From f9af94009bc9d989848198ae4b759d8c9a4cfea7 Mon Sep 17 00:00:00 2001 From: Ye Wang <52801275+wangyems@users.noreply.github.com> Date: Wed, 15 Nov 2023 00:48:51 +0000 Subject: [PATCH 01/38] onboard MoE (#18279) ### Description 1. Introduce MoE CUDA op to ORT based on FT implementation. 2. Upgrade cutlass to 3.1.0 to avoid some build failures on Windows. Remove patch file for cutlass 3.0.0. 3. Sharded MoE implementation will come with another PR limitation: __CUDA_ARCH__ >= 700 ### Motivation and Context --- cgmanifests/generated/cgmanifest.json | 2 +- cmake/deps.txt | 2 +- cmake/external/cutlass.cmake | 1 - cmake/onnxruntime_rocm_hipify.cmake | 1 + cmake/patches/cutlass/cutlass.patch | 92 -- docs/ContribOperators.md | 53 ++ docs/OperatorKernels.md | 1 + .../contrib_ops/cuda/cuda_contrib_kernels.cc | 4 + .../cuda/moe/ft_moe/compute_occupancy.h | 51 ++ .../cuda/moe/ft_moe/cutlass_heuristic.cc | 187 ++++ .../cuda/moe/ft_moe/cutlass_heuristic.h | 39 + .../cuda/moe/ft_moe/epilogue_helpers.h | 133 +++ .../cuda/moe/ft_moe/ft_gemm_configs.h | 58 ++ .../moe/ft_moe/gemm_moe_problem_visitor.h | 79 ++ .../cuda/moe/ft_moe/layout_traits_helper.h | 153 ++++ .../cuda/moe/ft_moe/moe_cutlass_kernel.h | 463 ++++++++++ .../cuda/moe/ft_moe/moe_gemm_kernels.h | 64 ++ .../moe/ft_moe/moe_gemm_kernels_fp16_fp16.cu | 21 + .../moe/ft_moe/moe_gemm_kernels_fp32_fp32.cu | 21 + .../moe/ft_moe/moe_gemm_kernels_template.h | 428 +++++++++ .../contrib_ops/cuda/moe/ft_moe/moe_kernel.cu | 830 ++++++++++++++++++ .../contrib_ops/cuda/moe/ft_moe/moe_kernel.h | 158 ++++ .../cuda/moe/ft_moe/moe_problem_visitor.h | 290 ++++++ .../cuda/moe/ft_moe/tile_interleaved_layout.h | 61 ++ onnxruntime/contrib_ops/cuda/moe/moe.cc | 197 +++++ onnxruntime/contrib_ops/cuda/moe/moe.h | 45 + .../core/graph/contrib_ops/contrib_defs.cc | 21 + onnxruntime/core/graph/contrib_ops/ms_opset.h | 2 + .../python/tools/symbolic_shape_infer.py | 1 + onnxruntime/test/contrib_ops/moe_test.cc | 423 +++++++++ .../python/transformers/test_parity_moe.py | 431 +++++++++ .../templates/download-deps.yml | 4 +- 32 files changed, 4219 insertions(+), 97 deletions(-) delete mode 100644 cmake/patches/cutlass/cutlass.patch create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/compute_occupancy.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.cc create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/epilogue_helpers.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/ft_gemm_configs.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/gemm_moe_problem_visitor.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/layout_traits_helper.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_cutlass_kernel.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp16_fp16.cu create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp32_fp32.cu create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_template.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.cu create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_problem_visitor.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/ft_moe/tile_interleaved_layout.h create mode 100644 onnxruntime/contrib_ops/cuda/moe/moe.cc create mode 100644 onnxruntime/contrib_ops/cuda/moe/moe.h create mode 100644 onnxruntime/test/contrib_ops/moe_test.cc create mode 100644 onnxruntime/test/python/transformers/test_parity_moe.py diff --git a/cgmanifests/generated/cgmanifest.json b/cgmanifests/generated/cgmanifest.json index 91a80bf4b2578..12fbb291c3a70 100644 --- a/cgmanifests/generated/cgmanifest.json +++ b/cgmanifests/generated/cgmanifest.json @@ -286,7 +286,7 @@ "component": { "type": "git", "git": { - "commitHash": "c4f6b8c6bc94ff69048492fb34df0dfaf1983933", + "commitHash": "6f47420213f757831fae65c686aa471749fa8d60", "repositoryUrl": "https://github.com/NVIDIA/cutlass.git" }, "comments": "cutlass" diff --git a/cmake/deps.txt b/cmake/deps.txt index 33cf5be76a200..49142372ab86e 100644 --- a/cmake/deps.txt +++ b/cmake/deps.txt @@ -51,7 +51,7 @@ pytorch_cpuinfo;https://github.com/pytorch/cpuinfo/archive/959002f82d7962a473d8b re2;https://github.com/google/re2/archive/refs/tags/2022-06-01.zip;aa77313b76e91b531ee7f3e45f004c6a502a5374 safeint;https://github.com/dcleblanc/SafeInt/archive/refs/tags/3.0.28.zip;23f252040ff6cb9f1fd18575b32fa8fb5928daac tensorboard;https://github.com/tensorflow/tensorboard/archive/373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81.zip;67b833913605a4f3f499894ab11528a702c2b381 -cutlass;https://github.com/NVIDIA/cutlass/archive/refs/tags/v3.0.0.zip;0f95b3c1fc1bd1175c4a90b2c9e39074d1bccefd +cutlass;https://github.com/NVIDIA/cutlass/archive/refs/tags/v3.1.0.zip;757f90a795034a89d4f48a79d1f009f7a04c8dee utf8_range;https://github.com/protocolbuffers/utf8_range/archive/72c943dea2b9240cd09efde15191e144bc7c7d38.zip;9925739c9debc0efa2adcb194d371a35b6a03156 extensions;https://github.com/microsoft/onnxruntime-extensions/archive/94142d8391c9791ec71c38336436319a2d4ac7a0.zip;4365ac5140338b4cb75a39944a4be276e3829b3c composable_kernel;https://github.com/ROCmSoftwarePlatform/composable_kernel/archive/a4f72a314a85732ed67d5aa8d1088d207a7e0e61.zip;f57357ab6d300e207a632d034ebc8aa036a090d9 diff --git a/cmake/external/cutlass.cmake b/cmake/external/cutlass.cmake index 8c5d81d638ced..983eecdd88235 100644 --- a/cmake/external/cutlass.cmake +++ b/cmake/external/cutlass.cmake @@ -4,7 +4,6 @@ if (onnxruntime_USE_FLASH_ATTENTION OR onnxruntime_USE_MEMORY_EFFICIENT_ATTENTIO cutlass URL ${DEP_URL_cutlass} URL_HASH SHA1=${DEP_SHA1_cutlass} - PATCH_COMMAND ${Patch_EXECUTABLE} --binary --ignore-whitespace -p1 < ${PROJECT_SOURCE_DIR}/patches/cutlass/cutlass.patch ) FetchContent_GetProperties(cutlass) diff --git a/cmake/onnxruntime_rocm_hipify.cmake b/cmake/onnxruntime_rocm_hipify.cmake index ce27bf756f247..980bd59b22c3f 100644 --- a/cmake/onnxruntime_rocm_hipify.cmake +++ b/cmake/onnxruntime_rocm_hipify.cmake @@ -51,6 +51,7 @@ set(contrib_ops_excluded_files "math/gemm_float8.cc" "math/gemm_float8.cu" "math/gemm_float8.h" + "moe/*" "quantization/attention_quantization.cc" "quantization/attention_quantization.h" "quantization/attention_quantization_impl.cu" diff --git a/cmake/patches/cutlass/cutlass.patch b/cmake/patches/cutlass/cutlass.patch deleted file mode 100644 index bda1de8b46916..0000000000000 --- a/cmake/patches/cutlass/cutlass.patch +++ /dev/null @@ -1,92 +0,0 @@ -diff --git a/include/cute/numeric/complex.hpp b/include/cute/numeric/complex.hpp -index 3790ebd3..cf727d09 100644 ---- a/include/cute/numeric/complex.hpp -+++ b/include/cute/numeric/complex.hpp -@@ -41,10 +41,14 @@ - // With CUDA 11.4, builds show spurious "-Wconversion" warnings - // on line 656 of thrust/detail/type_traits.h. - // These pragmas suppress the warnings. -+#ifdef __GNUC__ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wconversion" -+#endif - #include -+#ifdef __GNUC__ - #pragma GCC diagnostic pop -+#endif - - #include - -diff --git a/include/cutlass/functional.h b/include/cutlass/functional.h -index 59aec46a..8f2a913a 100644 ---- a/include/cutlass/functional.h -+++ b/include/cutlass/functional.h -@@ -89,7 +89,7 @@ struct multiplies { - } - }; - --#if defined(__CUDA_ARCH__) -+#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) - /// Partial specializations needed when __CUDA_NO_HALF2_OPERATORS__ is set - template<> - struct plus<__half2> { -@@ -143,12 +143,12 @@ struct multiplies<__half> { - - - // Maximum with nan propogation --// To propgate the NANs, the "max" of a two element that contains NaNs should also return a NaN -+// To propgate the NANs, the "max" of a two element that contains NaNs should also return a NaN - template - struct maximum_with_nan_propogation { - CUTLASS_HOST_DEVICE - T operator()(T const &lhs, T const &rhs) const { -- return lhs > rhs or std::isnan(lhs) ? lhs : rhs; -+ return lhs > rhs or isnan(lhs) ? lhs : rhs; - } - }; - -@@ -160,7 +160,7 @@ struct maximum_with_nan_propogation { - #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) - asm volatile("max.NaN.f32 %0, %1, %2;\n" : "=f"(res) : "f"(lhs), "f"(rhs)); - #else -- res = lhs > rhs or std::isnan(lhs) ? lhs : rhs; -+ res = lhs > rhs or isnan(lhs) ? lhs : rhs; - #endif - return res; - } -@@ -233,7 +233,7 @@ struct negate { - } - }; - --/// Greater equal -+/// Greater equal - template - struct greater_equal { - CUTLASS_HOST_DEVICE -@@ -242,7 +242,7 @@ struct greater_equal { - } - }; - --/// Greater -+/// Greater - template - struct greater { - CUTLASS_HOST_DEVICE -@@ -251,7 +251,7 @@ struct greater { - } - }; - --/// Less equal -+/// Less equal - template - struct less_equal { - CUTLASS_HOST_DEVICE -@@ -260,7 +260,7 @@ struct less_equal { - } - }; - --/// Less -+/// Less - template - struct less { - CUTLASS_HOST_DEVICE diff --git a/docs/ContribOperators.md b/docs/ContribOperators.md index 169737c9138c3..9c31978c66486 100644 --- a/docs/ContribOperators.md +++ b/docs/ContribOperators.md @@ -54,6 +54,7 @@ Do not modify directly.* * com.microsoft.MatMulIntegerToFloat * com.microsoft.MatMulNBits * com.microsoft.MaxpoolWithMask + * com.microsoft.MoE * com.microsoft.MulInteger * com.microsoft.MultiHeadAttention * com.microsoft.MurmurHash3 @@ -2904,6 +2905,58 @@ This version of the operator has been available since version 1 of the 'com.micr +### **com.microsoft.MoE** + + Mixture of experts. Examples: Switch transformer(https://arxiv.org/pdf/2101.03961.pdf) use top 1, + GLaM(https://arxiv.org/abs/2112.06905) activates top 2 FFN, and Vision MOE(https://arxiv.org/pdf/2106.05974.pdf) + usually uses top 32 experts. + + +#### Version + +This version of the operator has been available since version 1 of the 'com.microsoft' operator set. + +#### Attributes + +
+
activation_type : string
+
Activation function to use. Choose from relu, gelu, silu and identity. Default is relu
+
k : int
+
Number of top experts to select from expert pool
+
+ +#### Inputs (4 - 6) + +
+
input : T
+
2D input tensor with shape (num_rows, hidden_size) or 3D input tensor with shape (batch_size, sequence_length, hidden_size)
+
router_probs : T
+
2D input tensor with shape (num_rows, num_experts)
+
fc1_experts_weights : T
+
3D input tensor with shape (num_experts, hidden_size, inter_size)
+
fc2_experts_weights : T
+
3D input tensor with shape (num_experts, inter_size, hidden_size)
+
fc1_experts_bias (optional) : T
+
2D optional input tensor with shape (num_experts, inter_size)
+
fc2_experts_bias (optional) : T
+
2D optional input tensor with shape (num_experts, hidden_size)
+
+ +#### Outputs + +
+
output : T
+
2D input tensor with shape (num_rows, hidden_size) or 3D input tensor with shape (batch_size, sequence_length, hidden_size)
+
+ +#### Type Constraints + +
+
T : tensor(float), tensor(float16)
+
Constrain input and output types to float or float16 tensors.
+
+ + ### **com.microsoft.MulInteger** Performs element-wise binary quantized multiplication (with Numpy-style broadcasting support). diff --git a/docs/OperatorKernels.md b/docs/OperatorKernels.md index 8e546b30aa4cb..26b5ebbdbec36 100644 --- a/docs/OperatorKernels.md +++ b/docs/OperatorKernels.md @@ -842,6 +842,7 @@ Do not modify directly.* |LongformerAttention|*in* input:**T**
*in* weight:**T**
*in* bias:**T**
*in* mask:**T**
*in* global_weight:**T**
*in* global_bias:**T**
*in* global:**G**
*out* output:**T**|1+|**T** = tensor(float), tensor(float16)| |MatMulBnb4|*in* A:**T1**
*in* B:**T2**
*in* absmax:**T1**
*out* Y:**T1**|1+|**T1** = tensor(float), tensor(float16)
**T2** = tensor(uint8)| |MatMulNBits|*in* A:**T1**
*in* B:**T2**
*in* scales:**T1**
*in* zero_points:**T2**
*out* Y:**T1**|1+|**T1** = tensor(float), tensor(float16)
**T2** = tensor(uint8)| +|MoE|*in* input:**T**
*in* router_probs:**T**
*in* fc1_experts_weights:**T**
*in* fc2_experts_weights:**T**
*in* fc1_experts_bias:**T**
*in* fc2_experts_bias:**T**
*out* output:**T**|1+|**T** = tensor(float), tensor(float16)| |MultiHeadAttention|*in* query:**T**
*in* key:**T**
*in* value:**T**
*in* bias:**T**
*in* key_padding_mask:**M**
*in* relative_position_bias:**T**
*in* past_key:**T**
*in* past_value:**T**
*out* output:**T**
*out* present_key:**T**
*out* present_value:**T**|1+|**T** = tensor(float), tensor(float16)| |NGramRepeatBlock|*in* input_ids:**Tid**
*in* scores:**T**
*out* scores_out:**T**|1+|**T** = tensor(float)
**Tid** = tensor(int64)| |NhwcConv|*in* X:**T**
*in* W:**T**
*in* B:**T**
*out* Y:**T**|1+|**T** = tensor(float), tensor(float16)| diff --git a/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc b/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc index 6162895d00d79..7172a28316f16 100644 --- a/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc +++ b/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc @@ -70,6 +70,8 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1 class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, float, Crop); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, double, Crop); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, MLFloat16, Crop); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1, float, MoE); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1, MLFloat16, MoE); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1, float, MultiHeadAttention); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1, MLFloat16, MultiHeadAttention); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1, MLFloat16, GroupQueryAttention); @@ -260,6 +262,8 @@ Status RegisterCudaContribKernels(KernelRegistry& kernel_registry) { BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/compute_occupancy.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/compute_occupancy.h new file mode 100644 index 0000000000000..86136ea244e23 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/compute_occupancy.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include + +#include "core/providers/cuda/shared_inc/cuda_call.h" +#include "cutlass/device_kernel.h" + +using namespace onnxruntime; + +namespace ort_fastertransformer { + +template +inline int compute_occupancy_for_kernel() { + int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); + + if (smem_size > (48 << 10)) { + cudaError_t status = + cudaFuncSetAttribute(cutlass::Kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); + if (status == cudaError::cudaErrorInvalidValue) { + // Clear the error bit since we can ignore this. + // This should mean that smem_size > cudaDevAttrMaxSharedMemoryPerBlockOptin. In that case, we return an + // occupancy of 0. This will cause the heuristic to ignore this configuration. + status = cudaGetLastError(); + return 0; + } + CUDA_CALL_THROW(status); + } + + int max_active_blocks = -1; + CUDA_CALL_THROW(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, cutlass::Kernel, + GemmKernel::kThreadCount, smem_size)); + + return max_active_blocks; +} + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.cc b/onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.cc new file mode 100644 index 0000000000000..5d4c6793ec995 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.cc @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cutlass_heuristic.h" + +#include +#include +#include + +namespace ort_fastertransformer { + +struct TileShape { + int m; + int n; +}; + +TileShape get_cta_shape_for_config(CutlassTileConfig tile_config) { + switch (tile_config) { + case CutlassTileConfig::CtaShape32x128x64_WarpShape32x32x64: + return TileShape{32, 128}; + case CutlassTileConfig::CtaShape64x128x64_WarpShape32x64x64: + case CutlassTileConfig::CtaShape64x128x64_WarpShape64x32x64: + return TileShape{64, 128}; + case CutlassTileConfig::CtaShape128x128x8_WarpShape64x64x8: + case CutlassTileConfig::CtaShape128x128x64_WarpShape64x32x64: + case CutlassTileConfig::CtaShape128x128x64_WarpShape128x32x64: + return TileShape{128, 128}; + default: + ORT_THROW("[FT Error][get_grid_shape_for_config] Invalid config"); + } +} + +bool is_valid_split_k_factor(const int64_t m, const int64_t n, const int64_t k, const TileShape tile_shape, + const int split_k_factor, const size_t workspace_bytes, const bool is_weight_only) { + // All tile sizes have a k_tile of 64. + static constexpr int k_tile = 64; + + // For weight-only quant, we need k and k_elements_per_split to be a multiple of cta_k + if (is_weight_only) { + if ((k % k_tile) != 0) { + return false; + } + + if ((k % split_k_factor) != 0) { + return false; + } + + const int k_elements_per_split = static_cast(k / split_k_factor); + if ((k_elements_per_split % k_tile) != 0) { + return false; + } + } + + // Check that the workspace has sufficient space for this split-k factor + const int ctas_in_m_dim = static_cast((m + tile_shape.m - 1) / tile_shape.m); + const int ctas_in_n_dim = static_cast((n + tile_shape.n - 1) / tile_shape.n); + const int required_ws_bytes = split_k_factor == 1 ? 0 : sizeof(int) * ctas_in_m_dim * ctas_in_n_dim; + + if (required_ws_bytes > workspace_bytes) { + return false; + } + + return true; +} + +std::vector get_candidate_tiles(const bool is_weight_only, const bool simt_configs_only) { + std::vector simt_configs{CutlassTileConfig::CtaShape128x128x8_WarpShape64x64x8}; + + std::vector square_configs{CutlassTileConfig::CtaShape32x128x64_WarpShape32x32x64, + CutlassTileConfig::CtaShape64x128x64_WarpShape32x64x64, + CutlassTileConfig::CtaShape128x128x64_WarpShape64x32x64}; + + std::vector quant_B_configs{CutlassTileConfig::CtaShape32x128x64_WarpShape32x32x64, + CutlassTileConfig::CtaShape64x128x64_WarpShape64x32x64, + CutlassTileConfig::CtaShape128x128x64_WarpShape128x32x64}; + + const std::vector allowed_configs = is_weight_only ? quant_B_configs : square_configs; + return simt_configs_only ? simt_configs : allowed_configs; +} + +std::vector get_candidate_configs(int sm, const bool is_weight_only, const bool simt_configs_only) { + std::vector tiles = get_candidate_tiles(is_weight_only, simt_configs_only); + + std::vector candidate_configs; + const int min_stages = 2; + const int max_stages = sm >= 80 ? 4 : 2; + + for (const auto& tile_config : tiles) { + for (int stages = min_stages; stages <= max_stages; ++stages) { + CutlassGemmConfig config{tile_config, SplitKStyle::NO_SPLIT_K, 1, stages}; + candidate_configs.push_back(config); + } + } + + return candidate_configs; +} + +CutlassGemmConfig estimate_best_config_from_occupancies(const std::vector& candidate_configs, + const std::vector& occupancies, const int64_t m, + const int64_t n, const int64_t k, const int64_t, + const int split_k_limit, const size_t workspace_bytes, + const int multi_processor_count, const int is_weight_only) { + if (occupancies.size() != candidate_configs.size()) { + ORT_THROW( + "[FT Error][estimate_best_config_from_occupancies] occpancies and " + "candidate configs vectors must have equal length."); + } + + CutlassGemmConfig best_config; + // Score will be [0, 1]. The objective is to minimize this score. + // It represents the fraction of SM resources unused in the last wave. + float config_score = 1.0f; + int config_waves = INT_MAX; + int current_m_tile = 0; + + const int max_split_k = n >= multi_processor_count * 256 ? 1 : split_k_limit; + for (int ii = 0; ii < candidate_configs.size(); ++ii) { + CutlassGemmConfig candidate_config = candidate_configs[ii]; + TileShape tile_shape = get_cta_shape_for_config(candidate_config.tile_config); + int occupancy = occupancies[ii]; + + if (occupancy == 0) { + continue; + } + + // Keep small tile sizes when possible. + if (best_config.tile_config != CutlassTileConfig::ChooseWithHeuristic && m < current_m_tile && + current_m_tile < tile_shape.m) { + continue; + } + + const int ctas_in_m_dim = static_cast((m + tile_shape.m - 1) / tile_shape.m); + const int ctas_in_n_dim = static_cast((n + tile_shape.n - 1) / tile_shape.n); + + for (int split_k_factor = 1; split_k_factor <= max_split_k; ++split_k_factor) { + if (is_valid_split_k_factor(m, n, k, tile_shape, split_k_factor, workspace_bytes, is_weight_only)) { + const int ctas_per_wave = occupancy * multi_processor_count; + const int ctas_for_problem = ctas_in_m_dim * ctas_in_n_dim * split_k_factor; + + const int num_waves_total = (ctas_for_problem + ctas_per_wave - 1) / ctas_per_wave; + const float num_waves_fractional = ctas_for_problem / float(ctas_per_wave); + const float current_score = float(num_waves_total) - num_waves_fractional; + + const float score_slack = 0.1f; + if (current_score < config_score || + ((config_waves > num_waves_total) && (current_score < config_score + score_slack))) { + config_score = current_score; + config_waves = num_waves_total; + SplitKStyle split_style = split_k_factor > 1 ? SplitKStyle::SPLIT_K_SERIAL : SplitKStyle::NO_SPLIT_K; + best_config = + CutlassGemmConfig{candidate_config.tile_config, split_style, split_k_factor, candidate_config.stages}; + current_m_tile = tile_shape.m; + } else if (current_score == config_score && + (best_config.stages < candidate_config.stages || split_k_factor < best_config.split_k_factor || + current_m_tile < tile_shape.m)) { + // Prefer deeper pipeline or smaller split-k + SplitKStyle split_style = split_k_factor > 1 ? SplitKStyle::SPLIT_K_SERIAL : SplitKStyle::NO_SPLIT_K; + best_config = + CutlassGemmConfig{candidate_config.tile_config, split_style, split_k_factor, candidate_config.stages}; + current_m_tile = tile_shape.m; + config_waves = num_waves_total; + } + } + } + } + + if (best_config.tile_config == CutlassTileConfig::ChooseWithHeuristic) { + ORT_THROW("[FT Error] Heurisitc failed to find a valid config."); + } + + return best_config; +} + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.h new file mode 100644 index 0000000000000..e70efe0503b55 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/cutlass_heuristic.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "ft_gemm_configs.h" + +#include +#include +#include + +#include "core/common/common.h" + +using namespace onnxruntime; + +namespace ort_fastertransformer { + +std::vector get_candidate_configs(int sm, const bool is_weight_only, const bool simt_configs_only); + +CutlassGemmConfig estimate_best_config_from_occupancies(const std::vector& candidate_configs, + const std::vector& occupancies, const int64_t m, + const int64_t n, const int64_t k, const int64_t num_experts, + const int split_k_limit, const size_t workspace_bytes, + const int multi_processor_count, const int is_weight_only); + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/epilogue_helpers.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/epilogue_helpers.h new file mode 100644 index 0000000000000..78d206bf1d9bc --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/epilogue_helpers.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file epilogue_helpers.h + * + * This file includes types for the epilogues. The empty structs exist so we can signal to template + * code the type of epilogue we want to run, and let the underlying code specify the details such as + * element types, accumulator type and elements per vector access. + * + */ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/thread/activation.h" +#include "cutlass/epilogue/thread/scale_type.h" +#include "cutlass/functional.h" +#include "cutlass/half.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_generic.h" +#include "cutlass/epilogue/thread/linear_combination_relu.h" +#include "cutlass/epilogue/thread/linear_combination_silu.h" + +namespace cutlass { +namespace epilogue { +namespace thread { + +__forceinline__ __device__ float copysignf_pos(float a, float b) { + float r; + r = __int_as_float(__float_as_int(a) | (__float_as_int(b) & 0x80000000)); + return r; +} + +__forceinline__ __device__ float tanh_opt(float x) { +#if (__CUDACC_VER_MAJOR__ < 11) || (__CUDA_ARCH__ < 750) + const float exp_val = -1.f * fabs(2 * x); + return copysignf_pos((1.0f - __expf(exp_val)) / (__expf(exp_val) + 1.0f), x); +#else + return fast_tanh(x); +#endif +} + +template <> +struct GELU_taylor { + static const bool kIsHeavy = true; + CUTLASS_DEVICE + float operator()(float const& z) const { + float k0 = float(0.7978845608028654); + float k1 = float(0.044715); + + return float( + cutlass::constants::half() * z * + (cutlass::constants::one() + tanh_opt(k0 * z * (cutlass::constants::one() + k1 * z * z)))); + } + + using Params = LinearCombinationGenericParams; + + CUTLASS_DEVICE + float operator()(float const& scalar, Params const& params_) const { return this->operator()(scalar); } +}; + +} // namespace thread +} // namespace epilogue +} // namespace cutlass + +namespace ort_fastertransformer { + +struct EpilogueOpBiasSilu {}; + +struct EpilogueOpBiasReLU {}; + +struct EpilogueOpBiasFtGelu {}; + +struct EpilogueOpBias {}; + +struct EpilogueOpNoBias {}; + +template +struct Epilogue {}; + +template +struct Epilogue { + using Op = cutlass::epilogue::thread::LinearCombinationSilu; +}; + +template +struct Epilogue { + using Op = cutlass::epilogue::thread::LinearCombinationRelu; +}; + +template +struct Epilogue { + using Op = cutlass::epilogue::thread::LinearCombinationGeneric< + cutlass::epilogue::thread::GELU_taylor, ElementType, ElementsPerVectorAccess, ElementAccumulator, + ElementAccumulator, cutlass::epilogue::thread::ScaleType::NoBetaScaling, + cutlass::FloatRoundStyle::round_to_nearest, true>; +}; + +template +struct Epilogue { + using Op = cutlass::epilogue::thread::LinearCombination; +}; + +template +struct Epilogue { + using Op = + cutlass::epilogue::thread::LinearCombination; +}; + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/ft_gemm_configs.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/ft_gemm_configs.h new file mode 100644 index 0000000000000..a5faad423fad9 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/ft_gemm_configs.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace ort_fastertransformer { +// Note: The shapes are in the format MxNxK. The K shape of the runtime config MUST match the K shape +// in the kernel layout details when doing weight only quantization. +enum class CutlassTileConfig { + // Signals that we should run heuristics do choose a config + Undefined, + + // Signals that we should run heuristics do choose a config + ChooseWithHeuristic, + + // SiMT config + CtaShape128x128x8_WarpShape64x64x8, + + // TensorCore configs CTA_N = 128, CTA_K = 64 + // Warp configs for M=32 + CtaShape32x128x64_WarpShape32x32x64, + + // Warp configs for M=64 + CtaShape64x128x64_WarpShape32x64x64, + CtaShape64x128x64_WarpShape64x32x64, + + // Warp configs for M=128 + CtaShape128x128x64_WarpShape64x32x64, + CtaShape128x128x64_WarpShape128x32x64 +}; + +enum class SplitKStyle { + NO_SPLIT_K, + SPLIT_K_SERIAL, + // SPLIT_K_PARALLEL // Not supported yet +}; + +struct CutlassGemmConfig { + CutlassTileConfig tile_config = CutlassTileConfig::ChooseWithHeuristic; + SplitKStyle split_k_style = SplitKStyle::NO_SPLIT_K; + int split_k_factor = -1; + int stages = -1; +}; + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/gemm_moe_problem_visitor.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/gemm_moe_problem_visitor.h new file mode 100644 index 0000000000000..311ed323cb90c --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/gemm_moe_problem_visitor.h @@ -0,0 +1,79 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Scheduler for grouped GEMM +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h" +#include "cutlass/matrix_coord.h" + +#include "moe_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +/// Visitor class to abstract away the algorithm for iterating over tiles +template +struct GemmMoeProblemVisitor + : public MoeProblemVisitor, ThreadblockShape, + GroupScheduleMode_, PrefetchTileCount, ThreadCount> { + static bool const kTransposed = Transposed; + + using ProblemSizeHelper = detail::GemmGroupedProblemSizeHelper; + using Base = + MoeProblemVisitor; + using Params = typename Base::Params; + using SharedStorage = typename Base::SharedStorage; + + // + // Methods + // + CUTLASS_DEVICE + GemmMoeProblemVisitor(Params const& params_, SharedStorage& shared_storage_, int32_t block_idx) + : Base(params_, shared_storage_, block_idx) {} +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/layout_traits_helper.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/layout_traits_helper.h new file mode 100644 index 0000000000000..eb33a98e4246f --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/layout_traits_helper.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + This file exists so that we use the same weight layout for MoE grouped gemm and regular gemm when the weight is + quantized. The preprocessing code reads this template to know how to organize the quantized weight matrices + to be consumed by CUTLASS. + + Note that for int4, ThreadBlockK MUST be 64. + + */ + +#pragma once + +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" +#include "cutlass/arch/mma.h" +#include "cutlass/platform/platform.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" + +namespace cutlass { +namespace gemm { +namespace kernel { + +template +struct LayoutDetailsB {}; + +// Volta specialiations. Volta will dequantize before STS, so we need a different operator +template +struct LayoutDetailsB { + static constexpr int ThreadblockK = 64; + using Layout = layout::RowMajor; + static constexpr int ElementsPerAccess = 8; + using Operator = cutlass::arch::OpMultiplyAdd; +}; + +// Specializations for Turing+ when B is FP16. These are currently only used for MoE networks. +// TODO - Switch this to column major for weights since gemms should be more performant. +template +struct LayoutDetailsB= 75>::type> { + static constexpr int ThreadblockK = 64; + using Layout = layout::RowMajor; + static constexpr int ElementsPerAccess = 128 / cutlass::sizeof_bits::value; + using Operator = cutlass::arch::OpMultiplyAdd; +}; + +template +struct MixedGemmArchTraits {}; + +template +struct MixedGemmArchTraits { + static constexpr int Stages = 2; + using OperatorClass = cutlass::arch::OpClassSimt; + using AccType = float; + using LayoutB = cutlass::layout::RowMajor; + + static constexpr int ElementsPerAccessA = 1; + static constexpr int ElementsPerAccessB = 1; + static constexpr int ElementsPerAccessC = 1; + static constexpr int ThreadblockK = 8; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + + using Operator = cutlass::arch::OpMultiplyAdd; +}; + +// ========================= Volta Traits =========================== +// Volta will always dequantize after the global memory load. +// This will instantiate any HMMA tensorcore kernels for Volta. +template +struct MixedGemmArchTraits< + TypeA, TypeB, cutlass::arch::Sm70, + typename cutlass::platform::enable_if::value>::type> { + private: + using LayoutDetails = LayoutDetailsB; + + public: + static constexpr int ThreadblockK = LayoutDetails::ThreadblockK; + + using OperatorClass = cutlass::arch::OpClassTensorOp; + using AccType = float; + using LayoutB = typename LayoutDetails::Layout; + + static constexpr int ElementsPerAccessA = 128 / cutlass::sizeof_bits::value; + static constexpr int ElementsPerAccessB = LayoutDetails::ElementsPerAccess; + static constexpr int ElementsPerAccessC = 128 / cutlass::sizeof_bits::value; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Operator = typename LayoutDetails::Operator; +}; + +// ======================= Turing Traits ============================== +template +struct MixedGemmArchTraits< + TypeA, TypeB, cutlass::arch::Sm75, + typename cutlass::platform::enable_if::value>::type> { + private: + using LayoutDetails = LayoutDetailsB; + + public: + static constexpr int ThreadblockK = LayoutDetails::ThreadblockK; + + using OperatorClass = cutlass::arch::OpClassTensorOp; + using AccType = float; + using LayoutB = typename LayoutDetails::Layout; + + static constexpr int ElementsPerAccessA = 128 / cutlass::sizeof_bits::value; + static constexpr int ElementsPerAccessB = LayoutDetails::ElementsPerAccess; + static constexpr int ElementsPerAccessC = 128 / cutlass::sizeof_bits::value; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Operator = typename LayoutDetails::Operator; +}; + +// ======================= Ampere Traits ============================== +template +struct MixedGemmArchTraits< + TypeA, TypeB, cutlass::arch::Sm80, + typename cutlass::platform::enable_if::value>::type> { + private: + using LayoutDetails = LayoutDetailsB; + + public: + static constexpr int ThreadblockK = LayoutDetails::ThreadblockK; + + using OperatorClass = cutlass::arch::OpClassTensorOp; + using AccType = float; + using LayoutB = typename LayoutDetails::Layout; + + static constexpr int ElementsPerAccessA = 128 / cutlass::sizeof_bits::value; + static constexpr int ElementsPerAccessB = LayoutDetails::ElementsPerAccess; + static constexpr int ElementsPerAccessC = 128 / cutlass::sizeof_bits::value; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + using Operator = typename LayoutDetails::Operator; +}; + +} // namespace kernel +} // namespace gemm +} // namespace cutlass \ No newline at end of file diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_cutlass_kernel.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_cutlass_kernel.h new file mode 100644 index 0000000000000..bfe30b71170d8 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_cutlass_kernel.h @@ -0,0 +1,463 @@ +/*************************************************************************************************** + * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted + * provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, this list of + * conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/complex.h" +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/semaphore.h" + +#include "cutlass/gemm/kernel/gemm_transpose_operands.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/trace.h" + +#include "gemm_moe_problem_visitor.h" +#include "tile_interleaved_layout.h" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// +// This section exists to that we can use the same kernel code for regular gemm and dequantizing gemms. +// It will dispatch to the dequantizing gemm if the Mma type has an Iterator for scales in global. +template +using void_t = void; + +template +struct use_dq_gemm : platform::false_type {}; + +template +struct use_dq_gemm> : platform::true_type {}; + +// SFINAE overload for dequantizing gemm +template ::value, bool>::type = true> +CUTLASS_DEVICE static void run_mma(Mma mma, int gemm_k_iterations, typename Mma::FragmentC& accum, + typename Mma::IteratorA iterator_A, typename Mma::IteratorB iterator_B, + typename Mma::FragmentC const& src_accum, ElementScale* weight_scale_ptr, + MatrixCoord scale_extent, const int thread_idx, MatrixCoord tb_offset_scale) { + typename Mma::IteratorScale iterator_scale(Mma::IteratorScale::Layout(scale_extent.column()), weight_scale_ptr, + scale_extent, thread_idx, tb_offset_scale); + + mma(gemm_k_iterations, accum, iterator_A, iterator_B, iterator_scale, src_accum); +} + +// SFINAE overload for normal gemm. This completely ignores the scale parameters +template ::value, bool>::type = true> +CUTLASS_DEVICE static void run_mma(Mma mma, int gemm_k_iterations, typename Mma::FragmentC& accum, + typename Mma::IteratorA iterator_A, typename Mma::IteratorB iterator_B, + typename Mma::FragmentC const& src_accum, ElementScale* weight_scale_ptr, + MatrixCoord scale_extent, const int thread_idx, MatrixCoord tb_offset_scale) { + mma(gemm_k_iterations, accum, iterator_A, iterator_B, src_accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct MoeFCGemm { + public: + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; + static bool const kTransposed = false; + + // Optional transpose + using MapArguments = + kernel::detail::MapArguments; + + // Public-facing type definitions related to operand element type, layout, and complex conjugate + // operation. Must interact with the 'kTransposed' notion. + static_assert(!kTransposed, "Transpose problem not supported"); + using ElementA = typename MapArguments::ElementA; + using LayoutA = typename MapArguments::LayoutA; + using ElementB = typename MapArguments::ElementB; + using LayoutB = typename MapArguments::LayoutB; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename MapArguments::LayoutC; + using ElementScale = ElementC; + + static ComplexTransform const kTransformA = MapArguments::kTransformA; + static ComplexTransform const kTransformB = MapArguments::kTransformB; + + // Type definitions about the mainloop. + using Operator = typename Mma::Operator; + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = MapArguments::kAlignmentA; + static int const kAlignmentB = MapArguments::kAlignmentB; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + using ProblemVisitor = + GemmMoeProblemVisitor; + + // + // Structures + // + + /// Argument structure + struct Arguments { + // + // Data members + // + + int problem_count; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + ElementA* ptr_A; + ElementB* ptr_B; + ElementScale* weight_scales; + ElementC* ptr_C; + ElementC* ptr_D; + + int64_t* total_rows_before_expert; + int64_t gemm_n; + int64_t gemm_k; + + // Only used by device-level operator + GemmCoord* host_problem_sizes; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments() + : problem_count(0), + threadblock_count(0), + ptr_A(nullptr), + ptr_B(nullptr), + weight_scales(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + total_rows_before_expert(nullptr), + gemm_n(0), + gemm_k(0), + host_problem_sizes(nullptr) {} + + /// Ctor + CUTLASS_HOST_DEVICE + Arguments(int problem_count, int threadblock_count, typename EpilogueOutputOp::Params output_op, + const ElementA* ptr_A, const ElementB* ptr_B, const ElementScale* weight_scales, const ElementC* ptr_C, + ElementC* ptr_D, int64_t* total_rows_before_expert, int64_t gemm_n, int64_t gemm_k, + GemmCoord* host_problem_sizes = nullptr) + : problem_count(problem_count), + threadblock_count(threadblock_count), + output_op(output_op), + ptr_A(const_cast(ptr_A)), + ptr_B(const_cast(ptr_B)), + weight_scales(const_cast(weight_scales)), + ptr_C(const_cast(ptr_C)), + ptr_D(ptr_D), + total_rows_before_expert(total_rows_before_expert), + gemm_n(gemm_n), + gemm_k(gemm_k), + host_problem_sizes(nullptr) { + if (platform::is_same::value || platform::is_same::value) { + assert(weight_scales); + } + } + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + typename ProblemVisitor::Params problem_visitor; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + ElementA* ptr_A; + ElementB* ptr_B; + ElementScale* weight_scales; + ElementC* ptr_C; + ElementC* ptr_D; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params() : ptr_A(nullptr), ptr_B(nullptr), weight_scales(nullptr), ptr_C(nullptr), ptr_D(nullptr) {} + + CUTLASS_HOST_DEVICE + Params(Arguments const& args, void* workspace = nullptr, int tile_count = 0) + : problem_visitor(args.total_rows_before_expert, args.gemm_n, args.gemm_k, args.problem_count, workspace, + tile_count), + threadblock_count(args.threadblock_count), + output_op(args.output_op), + ptr_A(args.ptr_A), + ptr_B(args.ptr_B), + weight_scales(args.weight_scales), + ptr_C(args.ptr_C), + ptr_D(args.ptr_D) {} + + CUTLASS_HOST_DEVICE + void update(Arguments const& args, void* workspace = nullptr, int tile_count = 0) { + problem_visitor = typename ProblemVisitor::Params(args.total_rows_before_expert, args.gemm_n, args.gemm_k, + args.problem_count, workspace, tile_count); + threadblock_count = args.threadblock_count; + output_op = args.output_op; + ptr_A = args.ptr_A; + ptr_B = args.ptr_B; + weight_scales = args.weight_scales; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename ProblemVisitor::SharedStorage problem_visitor; + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + public: + // + // Methods + // + + CUTLASS_DEVICE + MoeFCGemm() {} + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::gemm::GemmCoord const& problem_size) { return Status::kSuccess; } + + static Status can_implement(Arguments const& args) { + if (args.weight_scales != nullptr) { + CUTLASS_TRACE_HOST( + "MoeFCGemm::can_implement() - weight scales are ignored for all types except uint8_t and uint4b_t"); + return Status::kInvalid; + } + return Status::kSuccess; + } + + static size_t get_extra_workspace_size(Arguments const& args, cutlass::gemm::GemmCoord const& grid_tiled_shape) { + return 0; + } + + // The dummy template parameter is not used and exists so that we can compile this code using + // a standard earlier than C++17. Prior to C++17, fully specialized templates HAD to exists in + // a namespace + template + struct KernelRunner { + CUTLASS_DEVICE + static void run_kernel(Params const& params, SharedStorage& shared_storage) { CUTLASS_NOT_IMPLEMENTED(); } + }; + + template + struct KernelRunner { + CUTLASS_DEVICE + static void run_kernel(Params const& params, SharedStorage& shared_storage) { + // + // These types shadow the type-level definitions and support the ability to implement + // a 'transposed' GEMM that computes the transposed problems. + // + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + static constexpr int kInterleave = Mma::IteratorB::Shape::kRow / Mma::Shape::kK; + static_assert(platform::is_same::value && kInterleave == 1 || + platform::is_same::value && kInterleave >= 1, + "B must be row major/col major OR col major interleaved."); + + // + // Problem visitor. + // + ProblemVisitor problem_visitor(params.problem_visitor, shared_storage.problem_visitor, blockIdx.x); + + const int64_t gemm_k = params.problem_visitor.gemm_k; + const int64_t gemm_n = params.problem_visitor.gemm_n; + int64_t bytes_per_expert_matrix = (gemm_k * gemm_n / 8) * cutlass::sizeof_bits::value; + + // Outer 'persistent' loop to iterate over tiles + while (problem_visitor.next_tile()) { + GemmCoord problem_size = problem_visitor.problem_size(); + int32_t problem_idx = problem_visitor.problem_index(); + int32_t cta_idx = int32_t(problem_visitor.threadblock_idx()); + + GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); + + cutlass::gemm::GemmCoord threadblock_offset(int(cta_idx / grid_shape.n()) * Mma::Shape::kM, + int(cta_idx % grid_shape.n()) * Mma::Shape::kN, 0); + + // Load element pointers. Exchange pointers and strides if working on the transpose + const int64_t rows_to_jump = + problem_idx == 0 ? 0 : params.problem_visitor.last_row_for_problem[problem_idx - 1]; + ElementA* ptr_A = reinterpret_cast(params.ptr_A) + rows_to_jump * gemm_k; + typename LayoutA::LongIndex ldm_A = gemm_k; + + char* byte_ptr_B = ((char*)params.ptr_B) + problem_idx * bytes_per_expert_matrix; + ElementB* ptr_B = reinterpret_cast(byte_ptr_B); + typename LayoutB::LongIndex ldm_B = + platform::is_same::value ? gemm_n : gemm_k * kInterleave; + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_offset.m(), + 0, + }; + + cutlass::MatrixCoord tb_offset_B{0, threadblock_offset.n() / kInterleave}; + + cutlass::MatrixCoord tb_offset_scale{0, threadblock_offset.n()}; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A(LayoutA(ldm_A), ptr_A, {problem_size.m(), problem_size.k()}, thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B(LayoutB(ldm_B), ptr_B, + {problem_size.k() * kInterleave, problem_size.n() / kInterleave}, thread_idx, + tb_offset_B); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + + int lane_idx = threadIdx.x % 32; + + // + // Matrix multiply phase + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Wait for all threads to finish their epilogue phases from the previous tile. + __syncthreads(); + + // Compute threadblock-scoped matrix multiply-add + ElementScale* weight_scale_ptr = params.weight_scales + problem_idx * problem_size.n(); + run_mma(mma, gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, weight_scale_ptr, + {1, problem_size.n()}, thread_idx, tb_offset_scale); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + ElementC* ptr_C = reinterpret_cast(params.ptr_C) + problem_idx * gemm_n; + ElementC* ptr_D = reinterpret_cast(params.ptr_D) + rows_to_jump * gemm_n; + + LayoutC layout_C(0); + LayoutC layout_D(gemm_n); + + typename Epilogue::OutputTileIterator::Params params_C(layout_C); + typename Epilogue::OutputTileIterator::Params params_D(layout_D); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C(params_C, ptr_C, problem_size.mn(), thread_idx, + threadblock_offset.mn()); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D(params_D, ptr_D, problem_size.mn(), thread_idx, + threadblock_offset.mn()); + + Epilogue epilogue(shared_storage.epilogue, thread_idx, warp_idx, lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, iterator_D, accumulators, iterator_C); + + // Next tile + problem_visitor.advance(gridDim.x); + } + } + }; + + /* + To improve compilation speed, we do not compile the device operator if the CUDA_ARCH does not correspond + to the ArchTag of the cutlass kernel operator. + */ + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const& params, SharedStorage& shared_storage) { +#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) && (__CUDA_ARCH__ < 750) + static constexpr bool compile_needed = platform::is_same::value; + KernelRunner::run_kernel(params, shared_storage); +#elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) && (__CUDA_ARCH__ < 800) + static constexpr bool compile_needed = platform::is_same::value; + KernelRunner::run_kernel(params, shared_storage); +#elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDA_ARCH__ < 900) + static constexpr bool compile_needed = platform::is_same::value; + KernelRunner::run_kernel(params, shared_storage); +#else + CUTLASS_NOT_IMPLEMENTED(); +#endif + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels.h new file mode 100644 index 0000000000000..60608f462fde5 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include "ft_gemm_configs.h" + +namespace ort_fastertransformer { + +enum class ActivationType { Gelu, + Relu, + Silu, + GeGLU, + ReGLU, + SiGLU, + Identity, + InvalidType }; + +template +class MoeGemmRunner { + public: + MoeGemmRunner(); + + void initialize(int sm); + + void moe_gemm_bias_act(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, int64_t gemm_n, int64_t gemm_k, + int num_experts, ActivationType activation_type, cudaStream_t stream); + + void moe_gemm(const T* A, const WeightType* B, const T* weight_scales, T* C, int64_t* total_rows_before_expert, + int64_t total_rows, int64_t gemm_n, int64_t gemm_k, int num_experts, cudaStream_t stream); + + private: + template + void dispatch_to_arch(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, int64_t gemm_n, int64_t gemm_k, + int num_experts, CutlassGemmConfig gemm_config, cudaStream_t stream, int* occupancy = nullptr); + + template + void run_gemm(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, int64_t gemm_n, int64_t gemm_k, int num_experts, + cudaStream_t stream); + + private: + int sm_; + int multi_processor_count_; +}; + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp16_fp16.cu b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp16_fp16.cu new file mode 100644 index 0000000000000..1d9a249db4237 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp16_fp16.cu @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "moe_gemm_kernels_template.h" + +namespace ort_fastertransformer { +template class MoeGemmRunner; +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp32_fp32.cu b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp32_fp32.cu new file mode 100644 index 0000000000000..7b250e6ca9060 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_fp32_fp32.cu @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "moe_gemm_kernels_template.h" + +namespace ort_fastertransformer { +template class MoeGemmRunner; +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_template.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_template.h new file mode 100644 index 0000000000000..66950c9b65970 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_gemm_kernels_template.h @@ -0,0 +1,428 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Ignore CUTLASS warnings about type punning +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif + +#include "cutlass/array.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/gemm/device/gemm_grouped.h" +#include "cutlass/gemm/kernel/default_gemm_grouped.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/arch/arch.h" +#include "cutlass/epilogue/thread/linear_combination_relu.h" + +#include "compute_occupancy.h" +#include "epilogue_helpers.h" +#include "layout_traits_helper.h" +#include "moe_cutlass_kernel.h" + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif + +#include "cutlass_heuristic.h" +#include "moe_gemm_kernels.h" + +#include +#include +#include +#include + +namespace ort_fastertransformer { + +// ============================= Variable batched Gemm things =========================== +template +void generic_moe_gemm_kernelLauncher(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t gemm_n, int64_t gemm_k, int num_experts, + CutlassGemmConfig gemm_config, const int multi_processor_count, + cudaStream_t stream, int* kernel_occupancy = nullptr) { + if (gemm_config.split_k_style != SplitKStyle::NO_SPLIT_K) { + ORT_THROW("[FT Error][MoeGemm] Grouped gemm does not support split-k"); + } + + static_assert(cutlass::platform::is_same::value || cutlass::platform::is_same::value, + "Specialized for half, float"); + + static_assert(cutlass::platform::is_same::value || + cutlass::platform::is_same::value || + cutlass::platform::is_same::value, + ""); + + // The cutlass type for the input elements. This is needed to convert to cutlass::half_t if necessary. + using ElementType_ = + typename cutlass::platform::conditional::value, cutlass::half_t, T>::type; + using ElementType = ElementType_; + + using CutlassWeightType_ = + typename cutlass::platform::conditional::value, cutlass::half_t, + WeightType>::type; + using CutlassWeightType = CutlassWeightType_; + + // We need separate config for each architecture since we will target different tensorcore instructions. For float, + // we do not target TCs. + using MixedGemmArchTraits = cutlass::gemm::kernel::MixedGemmArchTraits; + using ElementAccumulator = typename MixedGemmArchTraits::AccType; + + using EpilogueOp = + typename Epilogue::Op; + + // Finally, set up the kernel. + using GemmKernel_ = typename cutlass::gemm::kernel::DefaultGemmGrouped< + ElementType, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, MixedGemmArchTraits::ElementsPerAccessA, + CutlassWeightType, typename MixedGemmArchTraits::LayoutB, cutlass::ComplexTransform::kNone, + MixedGemmArchTraits::ElementsPerAccessB, ElementType, cutlass::layout::RowMajor, ElementAccumulator, + typename MixedGemmArchTraits::OperatorClass, arch, ThreadblockShape, WarpShape, + typename MixedGemmArchTraits::InstructionShape, EpilogueOp, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, Stages, + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, typename MixedGemmArchTraits::Operator>::GemmKernel; + + using GemmKernel = cutlass::gemm::kernel::MoeFCGemm; + + using GemmGrouped = cutlass::gemm::device::GemmGrouped; + + if (kernel_occupancy != nullptr) { + *kernel_occupancy = compute_occupancy_for_kernel(); + return; + } + int occupancy = std::min(2, GemmGrouped::maximum_active_blocks()); + if (occupancy == 0) { + ORT_THROW("[FT Error][MoE Runner] GPU lacks the shared memory resources to run GroupedGEMM kernel"); + } + const int threadblock_count = multi_processor_count * occupancy; + + typename EpilogueOp::Params epilogue_op(ElementAccumulator(1.f), ElementAccumulator(0.f)); + + typename GemmGrouped::Arguments args( + num_experts, threadblock_count, epilogue_op, reinterpret_cast(A), + reinterpret_cast(B), reinterpret_cast(weight_scales), + reinterpret_cast(biases), reinterpret_cast(C), total_rows_before_expert, gemm_n, + gemm_k); + + GemmGrouped gemm; + + auto can_implement = gemm.can_implement(args); + if (can_implement != cutlass::Status::kSuccess) { + std::string err_msg = + "MoEFC kernel will fail for params. Error: " + std::string(cutlassGetStatusString(can_implement)); + ORT_THROW("[FT Error][MoE Runner] " + err_msg); + } + + auto init_status = gemm.initialize(args); + if (init_status != cutlass::Status::kSuccess) { + std::string err_msg = "Failed to initialize cutlass variable batched gemm. Error: " + + std::string(cutlassGetStatusString(init_status)); + ORT_THROW("[FT Error][MoE Runner] " + err_msg); + } + + auto run_status = gemm.run(stream); + if (run_status != cutlass::Status::kSuccess) { + std::string err_msg = + "Failed to run cutlass variable batched gemm. Error: " + std::string(cutlassGetStatusString(run_status)); + ORT_THROW("[FT Error][MoE Runner] " + err_msg); + } +} + +template +struct dispatch_stages { + static void dispatch(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t gemm_n, int64_t gemm_k, int num_experts, + CutlassGemmConfig gemm_config, int multi_processor_count, cudaStream_t stream, + int* occupancy = nullptr) { + std::string err_msg = "Cutlass fpA_intB gemm. Not instantiates for arch " + + std::to_string(arch::kMinComputeCapability) + " with stages set to " + std::to_string(Stages); + ORT_THROW("[FT Error][dispatch_stages::dispatch] " + err_msg); + } +}; + +template +struct dispatch_stages { + static void dispatch(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t gemm_n, int64_t gemm_k, int num_experts, + CutlassGemmConfig gemm_config, int multi_processor_count, cudaStream_t stream, + int* occupancy = nullptr) { + generic_moe_gemm_kernelLauncher( + A, B, weight_scales, biases, C, total_rows_before_expert, gemm_n, gemm_k, num_experts, gemm_config, + multi_processor_count, stream, occupancy); + } +}; + +template +struct dispatch_stages 2)>::type> { + static void dispatch(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t gemm_n, int64_t gemm_k, int num_experts, + CutlassGemmConfig gemm_config, int multi_processor_count, cudaStream_t stream, + int* occupancy = nullptr) { + generic_moe_gemm_kernelLauncher(A, B, weight_scales, biases, C, total_rows_before_expert, gemm_n, gemm_k, + num_experts, gemm_config, multi_processor_count, stream, occupancy); + } +}; + +template +void dispatch_gemm_config(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t gemm_n, int64_t gemm_k, int num_experts, + CutlassGemmConfig gemm_config, int multi_processor_count, cudaStream_t stream, + int* occupancy = nullptr) { + switch (gemm_config.stages) { + case 2: + using DispatcherStages2 = dispatch_stages; + DispatcherStages2::dispatch(A, B, weight_scales, biases, C, total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case 3: + using DispatcherStages3 = dispatch_stages; + DispatcherStages3::dispatch(A, B, weight_scales, biases, C, total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case 4: + using DispatcherStages4 = dispatch_stages; + DispatcherStages4::dispatch(A, B, weight_scales, biases, C, total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + default: + std::string err_msg = "dispatch_gemm_config does not support stages " + std::to_string(gemm_config.stages); + ORT_THROW("[FT Error][MoE][dispatch_gemm_config] " + err_msg); + break; + } +} + +// This overload will handle tensorop gemms. It is disabled via SFINAE for fp32. +// This overload is only enabled when T == WeightType. +template < + typename T, typename WeightType, typename arch, typename EpilogueTag, + typename std::enable_if::value && std::is_same::value>::type* = nullptr> +void dispatch_moe_gemm_to_cutlass(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, int64_t gemm_n, int64_t gemm_k, + int num_experts, CutlassGemmConfig gemm_config, int sm_version, + int multi_processor_count, cudaStream_t stream, int* occupancy = nullptr) { + switch (gemm_config.tile_config) { + case CutlassTileConfig::CtaShape32x128x64_WarpShape32x32x64: + dispatch_gemm_config, + cutlass::gemm::GemmShape<32, 32, 64>>(A, B, weight_scales, biases, C, + total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case CutlassTileConfig::CtaShape64x128x64_WarpShape32x64x64: + dispatch_gemm_config, + cutlass::gemm::GemmShape<32, 64, 64>>(A, B, weight_scales, biases, C, + total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case CutlassTileConfig::CtaShape128x128x64_WarpShape64x32x64: + dispatch_gemm_config, + cutlass::gemm::GemmShape<64, 32, 64>>(A, B, weight_scales, biases, C, + total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case CutlassTileConfig::Undefined: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass] gemm config undefined."); + break; + case CutlassTileConfig::ChooseWithHeuristic: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass] gemm config should have already been set by heuristic."); + break; + default: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass] Config is invalid for same type MoE tensorop GEMM."); + break; + } +} + +// Tensorop GEMM overload +// Overload for quantize MoE GEMMs. We disable some warp configs here since they will not be used and we can improve +// compile time +template < + typename T, typename WeightType, typename arch, typename EpilogueTag, + typename std::enable_if::value && !std::is_same::value>::type* = nullptr> +void dispatch_moe_gemm_to_cutlass(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, int64_t gemm_n, int64_t gemm_k, + int num_experts, CutlassGemmConfig gemm_config, int sm_version, + int multi_processor_count, cudaStream_t stream, int* occupancy = nullptr) { + switch (gemm_config.tile_config) { + case CutlassTileConfig::CtaShape32x128x64_WarpShape32x32x64: + dispatch_gemm_config, + cutlass::gemm::GemmShape<32, 32, 64>>(A, B, weight_scales, biases, C, + total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case CutlassTileConfig::CtaShape64x128x64_WarpShape64x32x64: + dispatch_gemm_config, + cutlass::gemm::GemmShape<64, 32, 64>>(A, B, weight_scales, biases, C, + total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case CutlassTileConfig::CtaShape128x128x64_WarpShape128x32x64: + dispatch_gemm_config, + cutlass::gemm::GemmShape<128, 32, 64>>( + A, B, weight_scales, biases, C, total_rows_before_expert, gemm_n, gemm_k, num_experts, gemm_config, + multi_processor_count, stream, occupancy); + break; + case CutlassTileConfig::Undefined: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass] gemm config undefined."); + break; + case CutlassTileConfig::ChooseWithHeuristic: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass] gemm config should have already been set by heuristic."); + break; + default: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass] Config is invalid for mixed type tensorop GEMM."); + break; + } +} + +// This overload will handle simt gemms. It is disabled via SFINAE for tensorop. +template ::value>::type* = nullptr> +void dispatch_moe_gemm_to_cutlass(const T* A, const WeightType* B, const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, int64_t gemm_n, int64_t gemm_k, + int num_experts, CutlassGemmConfig gemm_config, int sm_version, + int multi_processor_count, cudaStream_t stream, int* occupancy = nullptr) { + switch (gemm_config.tile_config) { + case CutlassTileConfig::CtaShape128x128x8_WarpShape64x64x8: + dispatch_gemm_config, + cutlass::gemm::GemmShape<64, 64, 8>>(A, B, weight_scales, biases, C, + total_rows_before_expert, gemm_n, gemm_k, num_experts, + gemm_config, multi_processor_count, stream, occupancy); + break; + case CutlassTileConfig::Undefined: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass][SIMT] gemm config undefined."); + break; + case CutlassTileConfig::ChooseWithHeuristic: + ORT_THROW( + "[FT Error][dispatch_moe_gemm_to_cutlass][SIMT] gemm config should have already been set by heuristic."); + break; + default: + ORT_THROW("[FT Error][dispatch_moe_gemm_to_cutlass][SIMT] Unsupported config for float MoE gemm."); + break; + } +} + +template +MoeGemmRunner::MoeGemmRunner() {} + +template +void MoeGemmRunner::initialize(int sm_version) { + int device{-1}; + cudaGetDevice(&device); + sm_ = sm_version; + cudaDeviceGetAttribute(&multi_processor_count_, cudaDevAttrMultiProcessorCount, device); +} + +template +template +void MoeGemmRunner::dispatch_to_arch(const T* A, const WeightType* B, + const T* weight_scales, const T* biases, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, + int64_t gemm_n, int64_t gemm_k, int num_experts, + CutlassGemmConfig gemm_config, cudaStream_t stream, + int* occupancy) { + if (sm_ >= 70 && sm_ < 75) { + dispatch_moe_gemm_to_cutlass( + A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, num_experts, gemm_config, + sm_, multi_processor_count_, stream, occupancy); + } else if (sm_ >= 75 && sm_ < 80) { + dispatch_moe_gemm_to_cutlass( + A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, num_experts, gemm_config, + sm_, multi_processor_count_, stream, occupancy); + } else if (sm_ >= 80 && sm_ < 90) { + dispatch_moe_gemm_to_cutlass( + A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, num_experts, gemm_config, + sm_, multi_processor_count_, stream, occupancy); + } else { + ORT_THROW("[FT Error][MoE][GEMM Dispatch] Arch unsupported for MoE GEMM"); + } +} + +template +template +void MoeGemmRunner::run_gemm(const T* A, const WeightType* B, const T* weight_scales, + const T* biases, T* C, int64_t* total_rows_before_expert, + int64_t total_rows, int64_t gemm_n, int64_t gemm_k, + int num_experts, cudaStream_t stream) { + static constexpr bool is_weight_only = !std::is_same::value; + static constexpr bool only_simt_configs = std::is_same::value; + std::vector candidate_configs = get_candidate_configs(sm_, is_weight_only, only_simt_configs); + std::vector occupancies(candidate_configs.size()); + + for (size_t ii = 0; ii < candidate_configs.size(); ++ii) { + dispatch_to_arch(A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, + num_experts, candidate_configs[ii], stream, &occupancies[ii]); + } + + static constexpr int workspace_bytes = 0; // No workspace for MoE GEMMs. + static constexpr int split_k_limit = 1; // MoE GEMM does not support split-k. + CutlassGemmConfig chosen_config = + estimate_best_config_from_occupancies(candidate_configs, occupancies, total_rows, gemm_n, gemm_k, num_experts, + split_k_limit, workspace_bytes, multi_processor_count_, is_weight_only); + + dispatch_to_arch(A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, + num_experts, chosen_config, stream); +} + +template +void MoeGemmRunner::moe_gemm_bias_act(const T* A, const WeightType* B, const T* weight_scales, + const T* biases, T* C, int64_t* total_rows_before_expert, + int64_t total_rows, int64_t gemm_n, int64_t gemm_k, + int num_experts, ActivationType activation_type, + cudaStream_t stream) { + switch (activation_type) { + case ActivationType::Relu: + run_gemm(A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, + num_experts, stream); + break; + case ActivationType::Gelu: + run_gemm(A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, + gemm_k, num_experts, stream); + break; + case ActivationType::Silu: + run_gemm(A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, + num_experts, stream); + break; + case ActivationType::Identity: + run_gemm(A, B, weight_scales, biases, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, + num_experts, stream); + break; + case ActivationType::InvalidType: + ORT_THROW("[FT Error][MoE Runner] Invalid activation type for MoE GEMM"); + break; + default: { + ORT_THROW("[FT Error][MoE Runner] Invalid activation type for MoE GEMM"); + } + } +} + +template +void MoeGemmRunner::moe_gemm(const T* A, const WeightType* B, const T* weight_scales, T* C, + int64_t* total_rows_before_expert, int64_t total_rows, int64_t gemm_n, + int64_t gemm_k, int num_experts, cudaStream_t stream) { + run_gemm(A, B, weight_scales, nullptr, C, total_rows_before_expert, total_rows, gemm_n, gemm_k, + num_experts, stream); +} + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.cu b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.cu new file mode 100644 index 0000000000000..398ce4ee9880f --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.cu @@ -0,0 +1,830 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +// Ignore CUTLASS warnings about type punning +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif + +#include "cutlass/array.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif + +#include "moe_kernel.h" + +#if CUDA_VERSION >= 11000 +#include +#include +#include +#else +#include "cub/cub.cuh" +#include "cub/device/device_radix_sort.cuh" +#include "cub/util_type.cuh" +#endif + +namespace ort_fastertransformer { + +static constexpr int WARP_SIZE = 32; + +// ====================== Softmax things =============================== +// We have our own implementation of softmax here so we can support transposing the output +// in the softmax kernel when we extend this module to support expert-choice routing. +template +__launch_bounds__(TPB) __global__ + void moe_softmax(const T* input, const bool* finished, T* output, const int num_cols) { + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage tmpStorage; + + __shared__ float normalizing_factor; + __shared__ float float_max; + + const int thread_row_offset = blockIdx.x * num_cols; + + cub::Sum sum; + float threadData(-FLT_MAX); + + // Don't touch finished rows. + if ((finished != nullptr) && finished[blockIdx.x]) { + return; + } + + for (int ii = threadIdx.x; ii < num_cols; ii += TPB) { + const int idx = thread_row_offset + ii; + threadData = max(static_cast(input[idx]), threadData); + } + + const float maxElem = BlockReduce(tmpStorage).Reduce(threadData, cub::Max()); + if (threadIdx.x == 0) { + float_max = maxElem; + } + __syncthreads(); + + threadData = 0; + + for (int ii = threadIdx.x; ii < num_cols; ii += TPB) { + const int idx = thread_row_offset + ii; + threadData += exp((static_cast(input[idx]) - float_max)); + } + + const auto Z = BlockReduce(tmpStorage).Reduce(threadData, sum); + + if (threadIdx.x == 0) { + normalizing_factor = 1.f / Z; + } + __syncthreads(); + + for (int ii = threadIdx.x; ii < num_cols; ii += TPB) { + const int idx = thread_row_offset + ii; + const float val = exp((static_cast(input[idx]) - float_max)) * normalizing_factor; + output[idx] = T(val); + } +} + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 530 +template +__launch_bounds__(TPB) __global__ void moe_top_k(const T*, const bool*, T*, int*, int*, int, const int) { + // Does not support pre-Kepler architectures + ; +} +#else +template +__launch_bounds__(TPB) __global__ void moe_top_k(const T* inputs_after_softmax, const bool* finished, T* output, + int* indices, int* source_rows, int num_experts, int k) { + using cub_kvp = cub::KeyValuePair; + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage tmpStorage; + + cub_kvp thread_kvp; + cub::ArgMax arg_max; + + int num_rows = gridDim.x; + const int block_row = blockIdx.x; + + const bool should_process_row = finished ? !finished[block_row] : true; + const int thread_read_offset = blockIdx.x * num_experts; + for (int k_idx = 0; k_idx < k; ++k_idx) { + thread_kvp.key = 0; + thread_kvp.value = T(-1.f); // This is OK because inputs are probabilities + + cub_kvp inp_kvp; + for (int expert = threadIdx.x; expert < num_experts; expert += TPB) { + const int idx = thread_read_offset + expert; + inp_kvp.key = expert; + inp_kvp.value = inputs_after_softmax[idx]; + + for (int prior_k = 0; prior_k < k_idx; ++prior_k) { + const int prior_winning_expert = indices[k * block_row + prior_k]; + + if (prior_winning_expert == expert) { + inp_kvp = thread_kvp; + } + } + + thread_kvp = arg_max(inp_kvp, thread_kvp); + } + + const cub_kvp result_kvp = BlockReduce(tmpStorage).Reduce(thread_kvp, arg_max); + if (threadIdx.x == 0) { + const int idx = k * block_row + k_idx; + output[idx] = result_kvp.value; + indices[idx] = should_process_row ? result_kvp.key : num_experts; + source_rows[idx] = k_idx * num_rows + block_row; + } + __syncthreads(); + } +} +#endif + +// ====================== TopK softmax things =============================== + +/* + A Top-K gating softmax written to exploit when the number of experts in the MoE layers + are a small power of 2. This allows us to cleanly share the rows among the threads in + a single warp and eliminate communication between warps (so no need to use shared mem). + + It fuses the softmax, max and argmax into a single kernel. + + Limitations: + 1) This implementation is intended for when the number of experts is a small power of 2. + 2) This implementation assumes k is small, but will work for any k. +*/ + +template +__launch_bounds__(WARPS_PER_CTA* WARP_SIZE) __global__ + void topk_gating_softmax(const T* input, const bool* finished, T* output, int num_rows, int* indices, + int* source_rows, int k) { + // We begin by enforcing compile time assertions and setting up compile time constants. + static_assert(VPT == (VPT & -VPT), "VPT must be power of 2"); + static_assert(NUM_EXPERTS == (NUM_EXPERTS & -NUM_EXPERTS), "NUM_EXPERTS must be power of 2"); + static_assert(BYTES_PER_LDG == (BYTES_PER_LDG & -BYTES_PER_LDG), "BYTES_PER_LDG must be power of 2"); + static_assert(BYTES_PER_LDG <= 16, "BYTES_PER_LDG must be leq 16"); + + // Number of bytes each thread pulls in per load + static constexpr int ELTS_PER_LDG = BYTES_PER_LDG / sizeof(T); + static constexpr int ELTS_PER_ROW = NUM_EXPERTS; + static constexpr int THREADS_PER_ROW = ELTS_PER_ROW / VPT; + static constexpr int LDG_PER_THREAD = VPT / ELTS_PER_LDG; + + // Restrictions based on previous section. + static_assert(VPT % ELTS_PER_LDG == 0, "The elements per thread must be a multiple of the elements per ldg"); + static_assert(WARP_SIZE % THREADS_PER_ROW == 0, "The threads per row must cleanly divide the threads per warp"); + static_assert(THREADS_PER_ROW == (THREADS_PER_ROW & -THREADS_PER_ROW), "THREADS_PER_ROW must be power of 2"); + static_assert(THREADS_PER_ROW <= WARP_SIZE, "THREADS_PER_ROW can be at most warp size"); + + // We have NUM_EXPERTS elements per row. We specialize for small #experts + static constexpr int ELTS_PER_WARP = WARP_SIZE * VPT; + static constexpr int ROWS_PER_WARP = ELTS_PER_WARP / ELTS_PER_ROW; + static constexpr int ROWS_PER_CTA = WARPS_PER_CTA * ROWS_PER_WARP; + + // Restrictions for previous section. + static_assert(ELTS_PER_WARP % ELTS_PER_ROW == 0, "The elts per row must cleanly divide the total elt per warp"); + + // ===================== From this point, we finally start computing run-time variables. ======================== + + // Compute CTA and warp rows. We pack multiple rows into a single warp, and a block contains WARPS_PER_CTA warps. + // This, each block processes a chunk of rows. We start by computing the start row for each block. + const int cta_base_row = blockIdx.x * ROWS_PER_CTA; + + // Now, using the base row per thread block, we compute the base row per warp. + const int warp_base_row = cta_base_row + threadIdx.y * ROWS_PER_WARP; + + // The threads in a warp are split into sub-groups that will work on a row. + // We compute row offset for each thread sub-group + const int thread_row_in_warp = threadIdx.x / THREADS_PER_ROW; + const int thread_row = warp_base_row + thread_row_in_warp; + + // Threads with indices out of bounds should early exit here. + if (thread_row >= num_rows) return; + const bool should_process_row = finished ? !finished[thread_row] : true; + + // We finally start setting up the read pointers for each thread. First, each thread jumps to the start of the + // row it will read. + const T* thread_row_ptr = input + thread_row * ELTS_PER_ROW; + + // Now, we compute the group each thread belong to in order to determine the first column to start loads. + const int thread_group_idx = threadIdx.x % THREADS_PER_ROW; + const int first_elt_read_by_thread = thread_group_idx * ELTS_PER_LDG; + const T* thread_read_ptr = thread_row_ptr + first_elt_read_by_thread; + + // Determine the pointer type to use to read in the data depending on the BYTES_PER_LDG template param. In theory, + // this can support all powers of 2 up to 16. + using AccessType = cutlass::AlignedArray; + + // Finally, we pull in the data from global mem + cutlass::Array row_chunk_input; + AccessType* row_chunk_vec_ptr = reinterpret_cast(&row_chunk_input); + const AccessType* vec_thread_read_ptr = reinterpret_cast(thread_read_ptr); +#pragma unroll + for (int ii = 0; ii < LDG_PER_THREAD; ++ii) { + row_chunk_vec_ptr[ii] = vec_thread_read_ptr[ii * THREADS_PER_ROW]; + } + + using ComputeType = float; + using Converter = cutlass::NumericArrayConverter; + Converter compute_type_converter; + cutlass::Array row_chunk = compute_type_converter(row_chunk_input); + + // First, we perform a max reduce within the thread. We can do the max in fp16 safely (I think) and just + // convert to float afterwards for the exp + sum reduction. + ComputeType thread_max = row_chunk[0]; +#pragma unroll + for (int ii = 1; ii < VPT; ++ii) { + thread_max = max(thread_max, row_chunk[ii]); + } + +// Now, we find the max within the thread group and distribute among the threads. We use a butterfly reduce. +#pragma unroll + for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2) { + thread_max = max(thread_max, __shfl_xor_sync(0xFFFFFFFF, thread_max, mask, THREADS_PER_ROW)); + } + + // From this point, thread max in all the threads have the max within the row. + // Now, we subtract the max from each element in the thread and take the exp. We also compute the thread local sum. + float row_sum = 0; +#pragma unroll + for (int ii = 0; ii < VPT; ++ii) { + row_chunk[ii] = expf(row_chunk[ii] - thread_max); + row_sum += row_chunk[ii]; + } + +// Now, we perform the sum reduce within each thread group. Similar to the max reduce, we use a bufferfly pattern. +#pragma unroll + for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2) { + row_sum += __shfl_xor_sync(0xFFFFFFFF, row_sum, mask, THREADS_PER_ROW); + } + + // From this point, all threads have the max and the sum for their rows in the thread_max and thread_sum variables + // respectively. Finally, we can scale the rows for the softmax. Technically, for top-k gating we don't need to + // compute the entire softmax row. We can likely look at the maxes and only compute for the top-k values in the row. + // However, this kernel will likely not be a bottle neck and it seems better to closer match torch and find the + // argmax after computing the softmax. + const float reciprocal_row_sum = 1.f / row_sum; + +#pragma unroll + for (int ii = 0; ii < VPT; ++ii) { + row_chunk[ii] = row_chunk[ii] * reciprocal_row_sum; + } + + // Now, softmax_res contains the softmax of the row chunk. Now, I want to find the topk elements in each row, along + // with the max index.​ + int start_col = first_elt_read_by_thread; + static constexpr int COLS_PER_GROUP_LDG = ELTS_PER_LDG * THREADS_PER_ROW; + + for (int k_idx = 0; k_idx < k; ++k_idx) { + // First, each thread does the local argmax + float max_val = row_chunk[0]; + int expert = start_col; +#pragma unroll + for (int ldg = 0, col = start_col; ldg < LDG_PER_THREAD; ++ldg, col += COLS_PER_GROUP_LDG) { +#pragma unroll + for (int ii = 0; ii < ELTS_PER_LDG; ++ii) { + float val = row_chunk[ldg * ELTS_PER_LDG + ii]; + + // No check on the experts here since columns with the smallest index are processed first and only + // updated if > (not >=) + if (val > max_val) { + max_val = val; + expert = col + ii; + } + } + } + +// Now, we perform the argmax reduce. We use the butterfly pattern so threads reach consensus about the max. +// This will be useful for K > 1 so that the threads can agree on "who" had the max value. That thread can +// then blank out their max with -inf and the warp can run more iterations... +#pragma unroll + for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2) { + float other_max = __shfl_xor_sync(0xFFFFFFFF, max_val, mask, THREADS_PER_ROW); + int other_expert = __shfl_xor_sync(0xFFFFFFFF, expert, mask, THREADS_PER_ROW); + + // We want lower indices to "win" in every thread so we break ties this way + if (other_max > max_val || (other_max == max_val && other_expert < expert)) { + max_val = other_max; + expert = other_expert; + } + } + + // Write the max for this k iteration to global memory. + if (thread_group_idx == 0) { + // The lead thread from each sub-group will write out the final results to global memory. (This will be a + // single) thread per row of the input/output matrices. + const int idx = k * thread_row + k_idx; + output[idx] = T(max_val); + indices[idx] = should_process_row ? expert : NUM_EXPERTS; + source_rows[idx] = k_idx * num_rows + thread_row; + } + + // Finally, we clear the value in the thread with the current max if there is another iteration to run. + if (k_idx + 1 < k) { + const int ldg_group_for_expert = expert / COLS_PER_GROUP_LDG; + const int thread_to_clear_in_group = (expert / ELTS_PER_LDG) % THREADS_PER_ROW; + + // Only the thread in the group which produced the max will reset the "winning" value to -inf. + if (thread_group_idx == thread_to_clear_in_group) { + const int offset_for_expert = expert % ELTS_PER_LDG; + // Safe to set to any negative value since row_chunk values must be between 0 and 1. + row_chunk[ldg_group_for_expert * ELTS_PER_LDG + offset_for_expert] = ComputeType(-10000.f); + } + } + } +} + +namespace detail { +// Constructs some constants needed to partition the work across threads at compile time. +template +struct TopkConstants { + static constexpr int ELTS_PER_LDG = BYTES_PER_LDG / sizeof(T); + static_assert(EXPERTS / (ELTS_PER_LDG * WARP_SIZE) == 0 || EXPERTS % (ELTS_PER_LDG * WARP_SIZE) == 0, ""); + static constexpr int VECs_PER_THREAD = std::max(1, (int)EXPERTS / (ELTS_PER_LDG * WARP_SIZE)); + static constexpr int VPT = VECs_PER_THREAD * ELTS_PER_LDG; + static constexpr int THREADS_PER_ROW = EXPERTS / VPT; + static constexpr int ROWS_PER_WARP = WARP_SIZE / THREADS_PER_ROW; +}; +} // namespace detail + +template +void topk_gating_softmax_launcher_helper(const T* input, const bool* finished, T* output, int* indices, int* source_row, + int num_rows, int num_experts, int k, cudaStream_t stream) { + static constexpr unsigned long MAX_BYTES_PER_LDG = 16; + + static constexpr int BYTES_PER_LDG = std::min((int)MAX_BYTES_PER_LDG, (int)sizeof(T) * EXPERTS); + using Constants = detail::TopkConstants; + static constexpr int VPT = Constants::VPT; + static constexpr int ROWS_PER_WARP = Constants::ROWS_PER_WARP; + const int num_warps = (num_rows + ROWS_PER_WARP - 1) / ROWS_PER_WARP; + const int num_blocks = (num_warps + WARPS_PER_TB - 1) / WARPS_PER_TB; + + dim3 block_dim(WARP_SIZE, WARPS_PER_TB); + topk_gating_softmax + <<>>(input, finished, output, num_rows, indices, source_row, k); +} + +template +void topk_gating_softmax_kernelLauncher(const T* input, const bool* finished, T* output, T* softmax_temp_output, + int* indices, int* source_row, int num_rows, int num_experts, + int k, cudaStream_t stream) { + static constexpr int WARPS_PER_TB = 4; + + switch (num_experts) { + case 2: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + case 4: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + case 8: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + case 16: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + case 32: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + case 64: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + case 128: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + case 256: { + topk_gating_softmax_launcher_helper(input, finished, output, indices, source_row, num_rows, + num_experts, k, stream); + break; + } + default: { + static constexpr int TPB = 256; + moe_softmax<<>>(input, finished, softmax_temp_output, num_experts); + moe_top_k + <<>>(softmax_temp_output, finished, output, indices, source_row, num_experts, k); + } + } +} + +// ========================== CUB Sorting things ==================================== +CubKeyValueSorter::CubKeyValueSorter() : num_experts_(0), num_bits_(sizeof(int) * 8) {} + +CubKeyValueSorter::CubKeyValueSorter(int num_experts) + : num_experts_(num_experts), num_bits_((int)log2(num_experts) + 1) {} + +void CubKeyValueSorter::update_num_experts(int num_experts) { + num_experts_ = num_experts; + num_bits_ = (int)log2(num_experts) + 1; +} + +size_t CubKeyValueSorter::getWorkspaceSize(const size_t num_key_value_pairs) { + num_key_value_pairs_ = num_key_value_pairs; + size_t required_storage = 0; + int* null_int = nullptr; + cub::DeviceRadixSort::SortPairs(NULL, required_storage, null_int, null_int, null_int, null_int, + (int)num_key_value_pairs, 0, num_bits_); + return required_storage; +} + +void CubKeyValueSorter::run(void* workspace, const size_t workspace_size, const int* keys_in, int* keys_out, + const int* values_in, int* values_out, const size_t num_key_value_pairs, + cudaStream_t stream) { + size_t expected_ws_size = getWorkspaceSize(num_key_value_pairs); + size_t actual_ws_size = workspace_size; + + if (expected_ws_size > workspace_size) { + ORT_THROW("Error. The allocated workspace is too small to run this problem. Expected workspace size of at least ", + expected_ws_size, " but got problem size ", workspace_size, "\n"); + } + cub::DeviceRadixSort::SortPairs(workspace, actual_ws_size, keys_in, keys_out, values_in, values_out, + (int)num_key_value_pairs, 0, num_bits_, stream); +} + +// ============================== Infer GEMM sizes ================================= +__device__ inline int find_total_elts_leq_target(const int* sorted_indices, const int arr_length, const int target) { + int64_t low = 0, high = arr_length - 1, target_location = -1; + while (low <= high) { + int64_t mid = (low + high) / 2; + + if (sorted_indices[mid] > target) { + high = mid - 1; + } else { + low = mid + 1; + target_location = mid; + } + } + return target_location + 1; +} + +// Sets up the gemm assuming the inputs, experts and outputs are stored in row major order. +// Assumes we want to perform output = matmul(inputs, experts) + bias +__global__ void compute_total_rows_before_expert_kernel(const int* sorted_experts, const int sorted_experts_len, + const int64_t num_experts, int64_t* total_rows_before_expert) { + // First, compute the global tid. We only need 1 thread per expert. + const int expert = blockIdx.x * blockDim.x + threadIdx.x; + if (expert >= num_experts) return; + + // This should construct the last index where each expert occurs. + total_rows_before_expert[expert] = find_total_elts_leq_target(sorted_experts, sorted_experts_len, expert); +} + +template +CutlassMoeFCRunner::CutlassMoeFCRunner(int sm_version) { + moe_gemm_runner_.initialize(sm_version); +} + +template +size_t CutlassMoeFCRunner::getWorkspaceSize(int num_rows, const int hidden_size, + const int inter_size, int num_experts, + int k) { + const int buf_size = static_cast(pad_to_multiple_of_16(k * num_rows * hidden_size)); + const int interbuf_size = static_cast(pad_to_multiple_of_16(k * num_rows * inter_size)); + const int padded_experts = static_cast(pad_to_multiple_of_16(num_experts)); + const int num_moe_inputs = static_cast(pad_to_multiple_of_16(k * num_rows)); + int num_softmax_outs = 0; + + const bool is_pow_2 = (num_experts != 0) && ((num_experts & (num_experts - 1)) == 0); + if (!is_pow_2 || num_experts > 256) { + num_softmax_outs = static_cast(pad_to_multiple_of_16(num_rows * num_experts)); + } + + // softmax output, permuted_rows and permuted_experts have moved to outside of moe kernel, allocate them + // in Encoder or Decoder before invoking FfnLayer forward. + size_t total_ws_bytes = 3 * num_moe_inputs * sizeof(int); // source_rows_, permuted_rows_, permuted_experts_ + total_ws_bytes += buf_size * sizeof(T); // permuted_data + total_ws_bytes += padded_experts * sizeof(int64_t); // Hold total_rows_before_expert_ + total_ws_bytes += num_softmax_outs * sizeof(T); + const int bytes_for_fc1_result = interbuf_size * sizeof(T); + const int sorter_ws_size_bytes = static_cast(pad_to_multiple_of_16(sorter_.getWorkspaceSize(num_rows))); + sorter_.update_num_experts(num_experts); + + int bytes_for_intermediate_and_sorting = bytes_for_fc1_result; + if (sorter_ws_size_bytes > bytes_for_fc1_result) { + int remaining_bytes = static_cast(pad_to_multiple_of_16(sorter_ws_size_bytes - bytes_for_fc1_result)); + bytes_for_intermediate_and_sorting += remaining_bytes; + } + + total_ws_bytes += bytes_for_intermediate_and_sorting; // intermediate (fc1) output + cub sorting workspace + return total_ws_bytes; +} + +template +void CutlassMoeFCRunner::configure_ws_ptrs(char* ws_ptr, int num_rows, + const int hidden_size, const int inter_size, + int num_experts, int k) { + const int buf_size = static_cast(pad_to_multiple_of_16(k * num_rows * hidden_size)); + const int interbuf_size = static_cast(pad_to_multiple_of_16(k * num_rows * inter_size)); + const int padded_experts = static_cast(pad_to_multiple_of_16(num_experts)); + const int num_moe_inputs = static_cast(pad_to_multiple_of_16(k * num_rows)); + // const int num_softmax_outs = pad_to_multiple_of_16(num_rows * num_experts); + + source_rows_ = (int*)ws_ptr; + permuted_rows_ = source_rows_ + num_moe_inputs; + permuted_experts_ = permuted_rows_ + num_moe_inputs; + permuted_data_ = (T*)(permuted_experts_ + num_moe_inputs); + + total_rows_before_expert_ = (int64_t*)(permuted_data_ + buf_size); + + fc1_result_ = (T*)(total_rows_before_expert_ + padded_experts); + + const bool is_pow_2 = (num_experts != 0) && ((num_experts & (num_experts - 1)) == 0); + if (!is_pow_2 || num_experts > 256) { + softmax_out_ = (T*)(fc1_result_ + interbuf_size); + } else { + softmax_out_ = nullptr; + } +} + +template +void CutlassMoeFCRunner::run_moe_fc( + const T* input_activations, const T* gating_output, const WeightType* fc1_expert_weights, const T* fc1_scales, + const T* fc1_expert_biases, ActivationType fc1_activation_type, const WeightType* fc2_expert_weights, + const T* fc2_scales, int num_rows, const int hidden_size, const int inter_size, int num_experts, + int k, char* workspace_ptr, T* fc2_result, const bool* finished, int active_rows, T* expert_scales, + int* expanded_source_row_to_expanded_dest_row, int* expert_for_source_row, cudaStream_t stream) { + static constexpr bool scales_required = + std::is_same::value || std::is_same::value; + + if (scales_required) { + if (fc1_scales == nullptr) { + ORT_THROW("[FT Error][Run MoE FC] Scales expected but scale for first matmul is a null pointer"); + } else if (fc2_scales == nullptr) { + ORT_THROW("[FT Error][Run MoE FC] Scales expected but scale for second matmul is a null pointer"); + } + } else { + if (fc1_scales != nullptr) { + ORT_THROW("[FT Error][Run MoE FC] Scales are ignored for fp32/fp16/bf16 but received scale for FC1"); + } else if (fc2_scales != nullptr) { + ORT_THROW("[FT Error][Run MoE FC] Scales are ignored for fp32/fp16/bf16 but received scale for FC2"); + } + } + + configure_ws_ptrs(workspace_ptr, num_rows, hidden_size, inter_size, num_experts, k); + topk_gating_softmax_kernelLauncher(gating_output, finished, expert_scales, softmax_out_, expert_for_source_row, + source_rows_, num_rows, num_experts, k, stream); + + const int sorter_ws_size_bytes = static_cast(pad_to_multiple_of_16(sorter_.getWorkspaceSize(k * num_rows))); + sorter_.run((void*)fc1_result_, sorter_ws_size_bytes, expert_for_source_row, permuted_experts_, source_rows_, + permuted_rows_, k * num_rows, stream); + + initialize_moe_routing_kernelLauncher(input_activations, permuted_data_, permuted_rows_, + expanded_source_row_to_expanded_dest_row, num_rows, active_rows, hidden_size, k, + stream); + + const int expanded_active_expert_rows = k * active_rows; + compute_total_rows_before_expert(permuted_experts_, expanded_active_expert_rows, num_experts, + total_rows_before_expert_, stream); + + moe_gemm_runner_.moe_gemm_bias_act(permuted_data_, fc1_expert_weights, fc1_scales, fc1_expert_biases, fc1_result_, + total_rows_before_expert_, expanded_active_expert_rows, inter_size, hidden_size, + num_experts, fc1_activation_type, stream); + + moe_gemm_runner_.moe_gemm(fc1_result_, fc2_expert_weights, fc2_scales, fc2_result, total_rows_before_expert_, + expanded_active_expert_rows, hidden_size, inter_size, num_experts, stream); +} + +template +void CutlassMoeFCRunner::run_moe_fc( + const T* input_activations, const T* gating_output, const WeightType* fc1_expert_weights, const T* fc1_scales, + const T* fc1_expert_biases, ActivationType fc1_activation_type, const WeightType* fc2_expert_weights, + const T* fc2_scales, int num_rows, const int hidden_size, const int inter_size, int num_experts, + int k, char* workspace_ptr, T* fc2_result, T* expert_scales, int* expanded_source_row_to_expanded_dest_row, + int* expert_for_source_row, cudaStream_t stream) { + run_moe_fc(input_activations, gating_output, fc1_expert_weights, fc1_scales, fc1_expert_biases, fc1_activation_type, + fc2_expert_weights, fc2_scales, num_rows, hidden_size, inter_size, num_experts, k, workspace_ptr, + fc2_result, nullptr, num_rows, expert_scales, expanded_source_row_to_expanded_dest_row, + expert_for_source_row, stream); +} + +template +void CutlassMoeFCRunner::compute_total_rows_before_expert(const int* sorted_indices, + const int total_indices, + int num_experts, + int64_t* total_rows_before_expert, + cudaStream_t stream) { + const int threads = std::min(1024, num_experts); + const int blocks = (num_experts + threads - 1) / threads; + + compute_total_rows_before_expert_kernel<<>>(sorted_indices, total_indices, num_experts, + total_rows_before_expert); +} + +// ========================== Permutation things ======================================= + +// Duplicated and permutes rows for MoE. In addition, reverse the permutation map to help with finalizing routing. + +// "expanded_x_row" simply means that the number of values is num_rows x k. It is "expanded" since we will have to +// duplicate some rows in the input matrix to match the dimensions. Duplicates will always get routed to separate +// experts in the end. + +// Note that the expanded_dest_row_to_expanded_source_row map referred to here has indices in the range (0, +// k*rows_in_input - 1). However, it is set up so that index 0, rows_in_input, 2*rows_in_input ... (k-1)*rows_in_input +// all map to row 0 in the original matrix. Thus, to know where to read in the source matrix, we simply take the modulus +// of the expanded index. + +template +__global__ void initialize_moe_routing_kernel(const T* unpermuted_input, T* permuted_output, + const int* expanded_dest_row_to_expanded_source_row, + int* expanded_source_row_to_expanded_dest_row, int num_rows, + int active_rows, int cols) { + // Reverse permutation map. + // I do this so that later, we can use the source -> dest map to do the k-way reduction and unpermuting. I need the + // reverse map for that reduction to allow each threadblock to do 1 k-way reduce without atomics later in MoE. 1 + // thread block will be responsible for all k summations. + const int expanded_dest_row = blockIdx.x; + const int expanded_source_row = expanded_dest_row_to_expanded_source_row[expanded_dest_row]; + if (threadIdx.x == 0) { + expanded_source_row_to_expanded_dest_row[expanded_source_row] = expanded_dest_row; + } + + if (blockIdx.x < active_rows) { + // Duplicate and permute rows + const int source_row = expanded_source_row % num_rows; + + const T* source_row_ptr = unpermuted_input + source_row * cols; + T* dest_row_ptr = permuted_output + expanded_dest_row * cols; + + for (int tid = threadIdx.x; tid < cols; tid += blockDim.x) { + dest_row_ptr[tid] = source_row_ptr[tid]; + } + } +} + +template +void initialize_moe_routing_kernelLauncher(const T* unpermuted_input, T* permuted_output, + const int* expanded_dest_row_to_expanded_source_row, + int* expanded_source_row_to_expanded_dest_row, int num_rows, + int active_rows, int cols, int k, cudaStream_t stream) { + const int blocks = num_rows * k; + const int threads = std::min(cols, 1024); + initialize_moe_routing_kernel + <<>>(unpermuted_input, permuted_output, expanded_dest_row_to_expanded_source_row, + expanded_source_row_to_expanded_dest_row, num_rows, k * active_rows, cols); +} + +// Final kernel to unpermute and scale +// This kernel unpermutes the original data, does the k-way reduction and performs the final skip connection. +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 530 +template +__global__ void finalize_moe_routing_kernel(const T*, T*, const T*, const T*, const T*, const T*, const int*, + const int*, int, const int) { + // Does not support pre-Kepler architectures + ; +} +#else +template +__global__ void finalize_moe_routing_kernel(const T* expanded_permuted_rows, T* reduced_unpermuted_output, + const T* skip_1, const T* skip_2, const T* bias, const T* scales, + const int* expanded_source_row_to_expanded_dest_row, + const int* expert_for_source_row, int cols, int k) { + const int original_row = blockIdx.x; + int num_rows = gridDim.x; + T* reduced_row_ptr = reduced_unpermuted_output + original_row * cols; + + const T* skip_1_row_ptr = nullptr; + if (RESIDUAL_NUM == 1) { + skip_1_row_ptr = skip_1 + original_row * cols; + } + const T* skip_2_row_ptr = nullptr; + if (RESIDUAL_NUM == 2) { + skip_2_row_ptr = skip_2 + original_row * cols; + } + + for (int tid = threadIdx.x; tid < cols; tid += blockDim.x) { + T thread_output; + if (RESIDUAL_NUM == 0) { + thread_output = T(0); + } else if (RESIDUAL_NUM == 1) { + thread_output = skip_1_row_ptr[tid]; + } else if (RESIDUAL_NUM == 2) { + thread_output = skip_1_row_ptr[tid] + skip_2_row_ptr[tid]; + } + for (int k_idx = 0; k_idx < k; ++k_idx) { + const int expanded_original_row = original_row + k_idx * num_rows; + const int expanded_permuted_row = expanded_source_row_to_expanded_dest_row[expanded_original_row]; + + const int64_t k_offset = original_row * k + k_idx; + const T row_scale = scales[k_offset]; + const T* expanded_permuted_rows_row_ptr = expanded_permuted_rows + expanded_permuted_row * cols; + + const int expert_idx = expert_for_source_row[k_offset]; + const T* bias_ptr = bias + expert_idx * cols; + + thread_output = thread_output + row_scale * (expanded_permuted_rows_row_ptr[tid] + bias_ptr[tid]); + } + reduced_row_ptr[tid] = thread_output; + } +} +#endif + +template +void finalize_moe_routing_kernelLauncher(const T* expanded_permuted_rows, T* reduced_unpermuted_output, const T* bias, + const T* scales, const int* expanded_source_row_to_expanded_dest_row, + const int* expert_for_source_row, int num_rows, int cols, + int k, cudaStream_t stream) { + const int blocks = num_rows; + const int threads = std::min(cols, 1024); + finalize_moe_routing_kernel<<>>( + expanded_permuted_rows, reduced_unpermuted_output, nullptr, nullptr, bias, scales, + expanded_source_row_to_expanded_dest_row, expert_for_source_row, cols, k); +} + +template +void finalize_moe_routing_kernelLauncher(const T* expanded_permuted_rows, T* reduced_unpermuted_output, const T* skip, + const T* bias, const T* scales, + const int* expanded_source_row_to_expanded_dest_row, + const int* expert_for_source_row, int num_rows, int cols, + int k, cudaStream_t stream) { + const int blocks = num_rows; + const int threads = std::min(cols, 1024); + finalize_moe_routing_kernel + <<>>(expanded_permuted_rows, reduced_unpermuted_output, skip, nullptr, bias, scales, + expanded_source_row_to_expanded_dest_row, expert_for_source_row, cols, k); +} + +template +void finalize_moe_routing_kernelLauncher(const T* expanded_permuted_rows, T* reduced_unpermuted_output, const T* skip_1, + const T* skip_2, const T* bias, const T* scales, + const int* expanded_source_row_to_expanded_dest_row, + const int* expert_for_source_row, int num_rows, int cols, + int k, cudaStream_t stream) { + const int blocks = num_rows; + const int threads = std::min(cols, 1024); + if (skip_2 == nullptr) { + finalize_moe_routing_kernel<<>>( + expanded_permuted_rows, reduced_unpermuted_output, skip_1, skip_2, bias, scales, + expanded_source_row_to_expanded_dest_row, expert_for_source_row, cols, k); + } else { + finalize_moe_routing_kernel<<>>( + expanded_permuted_rows, reduced_unpermuted_output, skip_1, skip_2, bias, scales, + expanded_source_row_to_expanded_dest_row, expert_for_source_row, cols, k); + } +} + +// ========================= TopK Softmax specializations =========================== +template void topk_gating_softmax_kernelLauncher(const float*, const bool*, float*, float*, int*, int*, int, + int, int, cudaStream_t); +template void topk_gating_softmax_kernelLauncher(const half*, const bool*, half*, half*, int*, int*, int, + int, int, cudaStream_t); + +// ==================== Variable batched GEMM specializations ================================== +template class CutlassMoeFCRunner; +template class CutlassMoeFCRunner; + +// ===================== Specializations for init routing ========================= +template void initialize_moe_routing_kernelLauncher(const float*, float*, const int*, int*, int, int, + int, int, cudaStream_t); +template void initialize_moe_routing_kernelLauncher(const half*, half*, const int*, int*, int, int, + int, int, cudaStream_t); + +// ==================== Specializations for final routing =================================== +template void finalize_moe_routing_kernelLauncher(const float*, float*, const float*, const float*, const int*, + const int*, int, int, int, cudaStream_t); +template void finalize_moe_routing_kernelLauncher(const half*, half*, const half*, const half*, const int*, const int*, + int, int, int, cudaStream_t); +template void finalize_moe_routing_kernelLauncher(const float*, float*, const float*, const float*, const float*, + const int*, const int*, int, int, int, + cudaStream_t); +template void finalize_moe_routing_kernelLauncher(const half*, half*, const half*, const half*, const half*, const int*, + const int*, int, int, int, cudaStream_t); +template void finalize_moe_routing_kernelLauncher(const float*, float*, const float*, const float*, const float*, + const float*, const int*, const int*, int, int, int, + cudaStream_t); +template void finalize_moe_routing_kernelLauncher(const half*, half*, const half*, const half*, const half*, + const half*, const int*, const int*, int, int, int, + cudaStream_t); + +} // namespace ort_fastertransformer diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.h new file mode 100644 index 0000000000000..5cefe4fa5dc47 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_kernel.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "moe_gemm_kernels.h" +#include + +#include "core/common/common.h" + +using namespace onnxruntime; + +namespace ort_fastertransformer { + +static inline size_t pad_to_multiple_of_16(size_t input) { + static constexpr int ALIGNMENT = 16; + return ALIGNMENT * ((input + ALIGNMENT - 1) / ALIGNMENT); +} + +/* + Launches the topk gating softmax required for the MoE layers. + + Params: + input - a [num_rows x num_experts] + finished - [num_rows] vector with 1 if the sentence at this row is done translating and 0 otherwise. + output - a buffer of shape [num_rows x k] containing the top-k values of the softmax for each row. + indices - a matrix of shape [num_rows x k] containing the top-k experts each row should get routed to. + source_rows - a matrix of shape [num_rows x k] used internally for permuting. source_rows[row][k] = k * num_rows + + row. It is constructed like this so we can track where each of the original rows end up in order to perform the + "k-way" reduction later in the routing. + + num_rows - The number of rows in the matrix + num_experts - The number of expert layers present + k - k value in topk +*/ +template +void topk_gating_softmax_kernelLauncher(const T* input, const bool* finished, T* output, T* softmax_temp_out, + int* indices, int* source_row, int num_rows, int num_experts, + int k, cudaStream_t stream); + +class CubKeyValueSorter { + public: + CubKeyValueSorter(); + + CubKeyValueSorter(int num_experts); + + void update_num_experts(int num_experts); + + size_t getWorkspaceSize(const size_t num_key_value_pairs); + + void run(void* workspace, const size_t workspace_size, const int* keys_in, int* keys_out, const int* values_in, + int* values_out, const size_t num_key_value_pairs, cudaStream_t stream); + + private: + size_t num_key_value_pairs_; + int num_experts_; + int num_bits_; +}; + +template +void initialize_moe_routing_kernelLauncher(const T* unpermuted_input, T* permuted_output, + const int* expanded_dest_row_to_expanded_source_row, + int* expanded_source_row_to_expanded_dest_row, int num_rows, + int active_rows, int cols, int k, cudaStream_t stream); + +template +void finalize_moe_routing_kernelLauncher(const T* expanded_permuted_rows, T* reduced_unpermuted_output, const T* bias, + const T* scales, const int* expanded_source_row_to_expanded_dest_row, + const int* expert_for_source_row, int num_rows, int cols, + int k, cudaStream_t stream); + +template +void finalize_moe_routing_kernelLauncher(const T* expanded_permuted_rows, T* reduced_unpermuted_output, const T* skip, + const T* bias, const T* scales, + const int* expanded_source_row_to_expanded_dest_row, + const int* expert_for_source_row, int num_rows, int cols, + int k, cudaStream_t stream); + +template +void finalize_moe_routing_kernelLauncher(const T* expanded_permuted_rows, T* reduced_unpermuted_output, const T* skip_1, + const T* skip_2, const T* bias, const T* scales, + const int* expanded_source_row_to_expanded_dest_row, + const int* expert_for_source_row, int num_rows, int cols, + int k, cudaStream_t stream); + +// Assumes inputs activations are row major. Weights need to be preprocessed by th_op/weight_quantize.cc . +// Nested in a class to avoid multiple calls to cudaGetDeviceProperties as this call can be expensive. +// Avoid making several duplicates of this class. +template +class CutlassMoeFCRunner { + public: + CutlassMoeFCRunner(int sm_version); + + size_t getWorkspaceSize(int num_rows, int hidden_size, int inter_size, int num_experts, int k); + + void run_moe_fc(const T* input_activations, const T* gating_output, const WeightType* fc1_expert_weights, + const T* fc1_scales, const T* fc1_expert_biases, ActivationType fc1_activation_type, + const WeightType* fc2_expert_weights, const T* fc2_scales, int num_rows, int hidden_size, + int inter_size, int num_experts, int k, char* workspace_ptr, T* fc2_result, + T* expert_scales, int* expanded_source_row_to_expanded_dest_row, int* expert_for_source_row, + cudaStream_t stream); + + void run_moe_fc(const T* input_activations, const T* gating_output, const WeightType* fc1_expert_weights, + const T* fc1_scales, const T* fc1_expert_biases, ActivationType fc1_activation_type, + const WeightType* fc2_expert_weights, const T* fc2_scales, int num_rows, int hidden_size, + int inter_size, int num_experts, int k, char* workspace_ptr, T* fc2_result, + const bool* finished, int active_rows, T* expert_scales, + int* expanded_source_row_to_expanded_dest_row, int* expert_for_source_row, cudaStream_t stream); + + void compute_total_rows_before_expert(const int* sorted_indices, int total_indices, int num_experts, + int64_t* total_rows_before_expert, cudaStream_t stream); + + private: + void configure_ws_ptrs(char* ws_ptr, int num_rows, int hidden_size, int inter_size, int num_experts, int k); + + private: + CubKeyValueSorter sorter_; + MoeGemmRunner moe_gemm_runner_; + + // Pointers + int* source_rows_; + int* permuted_rows_; + int* permuted_experts_; + char* sorter_ws_; + T* permuted_data_; + T* softmax_out_; + + int64_t* total_rows_before_expert_; + + T* fc1_result_; +}; + +template +class CutlassMoeFCRunner::value>> { + public: + CutlassMoeFCRunner(int sm_version); + + size_t getWorkspaceSize(int num_rows, int hidden_size, int inter_size, int num_experts, int k) { + return 0; + } +}; + +} // namespace ort_fastertransformer \ No newline at end of file diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_problem_visitor.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_problem_visitor.h new file mode 100644 index 0000000000000..00f977c615df6 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/moe_problem_visitor.h @@ -0,0 +1,290 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Base scheduler for grouped problems, using MoE +*/ + +#pragma once + +#include "cutlass/gemm/kernel/grouped_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Visitor class to abstract away the algorithm for iterating over tiles +template +struct BaseMoeProblemVisitor { + using ThreadblockShape = ThreadblockShape_; + + struct ProblemInfo { + static int32_t const kNoPrefetchEntry = -1; + int32_t problem_idx; + int32_t problem_start; + + CUTLASS_DEVICE + ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {} + + CUTLASS_DEVICE + ProblemInfo(int32_t problem_idx_, int32_t problem_start_) + : problem_idx(problem_idx_), problem_start(problem_start_) {} + }; + + struct Params { + int64_t const* last_row_for_problem; + int64_t gemm_n; + int64_t gemm_k; + int32_t problem_count; + void const* workspace; + int32_t tile_count; + + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + Params() + : last_row_for_problem(nullptr), gemm_n(0), gemm_k(0), problem_count(0), workspace(nullptr), tile_count(0) {} + + /// Ctor + CUTLASS_HOST_DEVICE + Params(int64_t const* last_row_for_problem, int64_t gemm_n, int64_t gemm_k, int32_t problem_count, + void const* workspace = nullptr, int32_t tile_count = 0) + : last_row_for_problem(last_row_for_problem), + gemm_n(gemm_n), + gemm_k(gemm_k), + problem_count(problem_count), + workspace(workspace), + tile_count(tile_count) {} + }; + + Params const& params; + int32_t tile_idx; + int32_t problem_tile_start; + int32_t problem_idx; + + // + // Methods + // + CUTLASS_DEVICE + BaseMoeProblemVisitor(Params const& params_, int32_t block_idx) + : params(params_), tile_idx(block_idx), problem_tile_start(0), problem_idx(0) {} + + /// Get the grid shape + CUTLASS_HOST_DEVICE + static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { + return cutlass::gemm::GemmCoord(((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM), + ((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN), 1); + } + + /// Gets the global tile index + CUTLASS_HOST_DEVICE + int32_t tile_index() const { return tile_idx; } + + /// Gets the index of the problem + CUTLASS_HOST_DEVICE + int32_t problem_index() const { return problem_idx; } + + CUTLASS_HOST_DEVICE + int32_t threadblock_idx() const { return tile_idx - problem_tile_start; } + + CUTLASS_DEVICE + void advance(int32_t grid_size) { tile_idx += grid_size; } + + CUTLASS_HOST_DEVICE + static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) { + ProblemSizeHelper::possibly_transpose_problem(problem); + } + + /// Returns the problem size for the current problem + CUTLASS_HOST_DEVICE + cutlass::gemm::GemmCoord problem_size() const { return problem_size(problem_idx); } + + CUTLASS_HOST_DEVICE + cutlass::gemm::GemmCoord problem_size(int idx) const { + const int64_t prev_problem_row = idx == 0 ? 0 : params.last_row_for_problem[idx - 1]; + const int64_t current_problem_row = params.last_row_for_problem[idx]; + const int64_t gemm_m = current_problem_row - prev_problem_row; + GemmCoord problem(GemmCoord::Index(gemm_m), GemmCoord::Index(params.gemm_n), GemmCoord::Index(params.gemm_k)); + ProblemSizeHelper::possibly_transpose_problem(problem); + return problem; + } + + CUTLASS_HOST_DEVICE + static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { return ProblemSizeHelper::tile_count(grid); } + + static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) { + int32_t total_tiles = 0; + for (int32_t i = 0; i < problem_count; ++i) { + auto problem = host_problem_sizes_ptr[i]; + possibly_transpose_problem(problem); + auto grid = grid_shape(problem); + total_tiles += tile_count(grid); + } + + return total_tiles; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct MoeProblemVisitor; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// ProblemVisitor that performs all scheduling on device +// +template +struct MoeProblemVisitor : public BaseMoeProblemVisitor { + using Base = BaseMoeProblemVisitor; + using Params = typename Base::Params; + static int const kThreadCount = ThreadCount; + static bool const kRequiresPrecomputation = false; + static int const kThreadsPerWarp = 32; + + struct SharedStorage {}; + + // Final tile of the problem loaded by this thread. Each thread will hold + // a separate value. + int32_t problem_ending_tile; + + SharedStorage& shared_storage; + + // + // Methods + // + CUTLASS_DEVICE + MoeProblemVisitor(Params const& params_, SharedStorage& shared_storage_, int32_t block_idx) + : Base(params_, block_idx), problem_ending_tile(0), shared_storage(shared_storage_) { + this->problem_idx = -1 * kThreadsPerWarp; + this->problem_tile_start = 0; + } + + CUTLASS_DEVICE + bool next_tile() { + // Check whether the tile to compute is within the range of the current problem. + int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp); + if (this->tile_idx < problem_tile_end) { + return true; + } + + // Check whether the tile to compute is within the current group of problems fetched by the warp. + // The last tile for this group is the final tile of the problem held by the final thread in the warp. + int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp - 1); + + // Keep the starting problem for this group in `problem_idx`. This is done to reduce + // register pressure. The starting problem for this group is simply the first problem + // in the group most recently fetched by the warp. + int32_t& group_problem_start = this->problem_idx; + group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp; + + // Keep the starting tile for this group in `problem_tile_start`. This is done to reduce + // register pressure. + int32_t& group_tile_start = this->problem_tile_start; + + // Each thread in the warp processes a separate problem to advance until + // reaching a problem whose starting tile is less less than tile_idx. + while (group_tile_end <= this->tile_idx) { + group_problem_start += kThreadsPerWarp; + if (group_problem_start > this->params.problem_count) { + return false; + } + + // Since `group_tile_start` is a reference to `this->problem_tile_start`, this + // also sets `this->problem_tile_start`. The fact that `this->problem_tile_start` + // is also set here is used later in `next_tile`. + group_tile_start = group_tile_end; + + int lane_idx = threadIdx.x % kThreadsPerWarp; + int32_t lane_problem = group_problem_start + lane_idx; + + // Compute the number of tiles in the problem assigned to each thread. + problem_ending_tile = 0; + if (lane_problem < this->params.problem_count) { + cutlass::gemm::GemmCoord problem = this->problem_size(lane_problem); + cutlass::gemm::GemmCoord grid = this->grid_shape(problem); + problem_ending_tile = this->tile_count(grid); + } + + // Compute a warp-wide inclusive prefix sum to compute the ending tile index of + // each thread's problem. + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < kThreadsPerWarp; i <<= 1) { + int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i); + if (lane_idx >= i) { + problem_ending_tile += val; + } + } + + // The total tile count for this group is now in the final position of the prefix sum + int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp - 1); + + problem_ending_tile += group_tile_start; + group_tile_end += tiles_in_group; + } + + // The next problem to process is the first one that does not have ending tile position + // that is greater than or equal to tile index. + int32_t problem_idx_in_group = __popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx)); + + this->problem_idx = group_problem_start + problem_idx_in_group; + + // The starting tile for this problem is the ending tile of the previous problem. In cases + // where `problem_idx_in_group` is the first problem in the group, we do not need to reset + // `problem_tile_start`, because it is set to the previous group's ending tile in the while + // loop above. + if (problem_idx_in_group > 0) { + this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1); + } + + return true; + } + + static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, + int32_t block_count) { + return 0; + } + + static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, + int32_t block_count, void* host_workspace_ptr) {} +}; + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/onnxruntime/contrib_ops/cuda/moe/ft_moe/tile_interleaved_layout.h b/onnxruntime/contrib_ops/cuda/moe/ft_moe/tile_interleaved_layout.h new file mode 100644 index 0000000000000..3505bea24e4d9 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/ft_moe/tile_interleaved_layout.h @@ -0,0 +1,61 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines new layouts needed for MoE +*/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/pitch_linear_coord.h" + +namespace cutlass { +namespace layout { + +template +class ColumnMajorTileInterleave { + static constexpr int kRowsPerTile = RowsPerTile; + static constexpr int kColumnsInterleaved = ColumnsInterleaved; +}; + +template +struct IsColumnMajorTileInterleave { + static constexpr bool value = false; +}; + +template +struct IsColumnMajorTileInterleave> { + static constexpr bool value = true; +}; + +} // namespace layout +} // namespace cutlass diff --git a/onnxruntime/contrib_ops/cuda/moe/moe.cc b/onnxruntime/contrib_ops/cuda/moe/moe.cc new file mode 100644 index 0000000000000..6f2ffe7a0cc43 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/moe.cc @@ -0,0 +1,197 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "core/common/safeint.h" +#include "core/providers/cuda/cuda_common.h" +#include "moe.h" + +using namespace onnxruntime::cuda; +using namespace ::onnxruntime::common; +using namespace ONNX_NAMESPACE; + +namespace onnxruntime { +namespace contrib { +namespace cuda { + +#define REGISTER_KERNEL_TYPED(T) \ + ONNX_OPERATOR_TYPED_KERNEL_EX( \ + MoE, \ + kMSDomain, \ + 1, \ + T, \ + kCudaExecutionProvider, \ + (*KernelDefBuilder::Create()) \ + .MayInplace(0, 0) \ + .TypeConstraint("T", DataTypeImpl::GetTensorType()), \ + MoE); + +REGISTER_KERNEL_TYPED(float) +REGISTER_KERNEL_TYPED(MLFloat16) + +using namespace ONNX_NAMESPACE; + +template +Status MoE::ComputeInternal(OpKernelContext* context) const { + const Tensor* input = context->Input(0); + const Tensor* router_probs = context->Input(1); + const Tensor* fc1_experts_weights = context->Input(2); + const Tensor* fc2_experts_weights = context->Input(3); + const Tensor* fc1_experts_bias_optional = context->Input(4); + const Tensor* fc2_experts_bias_optional = context->Input(5); + + const auto& input_dims = input->Shape().GetDims(); + const auto& router_probs_dims = router_probs->Shape().GetDims(); + const auto& fc1_experts_weights_dims = fc1_experts_weights->Shape().GetDims(); + const auto& fc2_experts_weights_dims = fc2_experts_weights->Shape().GetDims(); + + const int64_t num_rows = input_dims.size() == 2 ? input_dims[0] : input_dims[0] * input_dims[1]; + const int64_t hidden_size = input_dims[input_dims.size() - 1]; + const int64_t num_experts = fc1_experts_weights_dims[0]; + const int64_t inter_size = fc1_experts_weights_dims[2]; + + // TODO: refactor to helper function. + if (fc1_experts_weights_dims.size() != 3) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "fc1_experts_weights_dims must be 3D, got ", + fc1_experts_weights_dims.size()); + } + if (fc2_experts_weights_dims.size() != 3) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "fc2_experts_weights_dims must be 3D, got ", + fc2_experts_weights_dims.size()); + } + if (fc1_experts_weights_dims[1] != hidden_size) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc1_experts_weights_dims[1] must be equal to hidden_size, got ", + fc1_experts_weights_dims[1], " and ", hidden_size); + } + if (fc2_experts_weights_dims[1] != inter_size) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc2_experts_weights_dims[1] must be equal to inter_size, got ", fc2_experts_weights_dims[1], + " and ", inter_size); + } + if (fc1_experts_weights_dims[2] != inter_size) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc1_experts_weights_dims[2] must be equal to inter_size, got ", fc1_experts_weights_dims[2], + " and ", inter_size); + } + if (fc2_experts_weights_dims[2] != hidden_size) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc2_experts_weights_dims[2] must be equal to hidden_size, got ", + fc2_experts_weights_dims[2], " and ", hidden_size); + } + if (router_probs_dims.size() != 2) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "router_probs_dims must be 2D, got ", + router_probs_dims.size()); + } + if (router_probs_dims[0] != num_rows) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "router_probs_dims[0] must be equal to num_rows, got ", + router_probs_dims[0], " and ", num_rows); + } + if (router_probs_dims[1] != num_experts) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "router_probs_dims[1] must be equal to num_experts, got ", + router_probs_dims[1], " and ", num_experts); + } + if (fc1_experts_bias_optional != nullptr && fc2_experts_bias_optional == nullptr) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "fc1_experts_bias is set but fc2_experts_bias is not set"); + } + if (fc1_experts_bias_optional == nullptr && fc2_experts_bias_optional != nullptr) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "fc1_experts_bias is not set but fc2_experts_bias is set"); + } + if (fc1_experts_bias_optional != nullptr && fc2_experts_bias_optional != nullptr) { + const auto& fc1_experts_bias_dims = fc1_experts_bias_optional->Shape().GetDims(); + const auto& fc2_experts_bias_dims = fc2_experts_bias_optional->Shape().GetDims(); + if (fc1_experts_bias_dims.size() != 2) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "fc1_experts_bias_dims must be 2D, got ", + fc1_experts_bias_dims.size()); + } + if (fc2_experts_bias_dims.size() != 2) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "fc2_experts_bias_dims must be 2D, got ", + fc2_experts_bias_dims.size()); + } + if (fc1_experts_bias_dims[0] != num_experts) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc1_experts_bias_dims[0] must be equal to num_experts, got ", fc1_experts_bias_dims[0], + " and ", num_experts); + } + if (fc2_experts_bias_dims[0] != num_experts) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc2_experts_bias_dims[0] must be equal to num_experts, got ", fc2_experts_bias_dims[0], + " and ", num_experts); + } + if (fc1_experts_bias_dims[1] != inter_size) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc1_experts_bias_dims[1] must be equal to inter_size, got ", fc1_experts_bias_dims[1], + " and ", inter_size); + } + if (fc2_experts_bias_dims[1] != hidden_size) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "fc2_experts_bias_dims[1] must be equal to hidden_size, got ", fc2_experts_bias_dims[1], + " and ", hidden_size); + } + } + + typedef typename ToCudaType::MappedType CudaT; + auto stream = context->GetComputeStream(); + + auto& device_prop = GetDeviceProp(); + const int sm = device_prop.major * 10 + device_prop.minor; + + ort_fastertransformer::CutlassMoeFCRunner moe_runner(sm); + + size_t ws_size = + moe_runner.getWorkspaceSize(static_cast(num_rows), static_cast(hidden_size), + static_cast(inter_size), static_cast(num_experts), static_cast(k_)); + size_t fc2_output_size = k_ * num_rows * hidden_size * sizeof(CudaT); + size_t expert_scales_size = k_ * num_rows * sizeof(CudaT); + size_t expanded_source_row_to_expanded_dest_row_size = k_ * num_rows * sizeof(int); + size_t expert_for_source_row_size = k_ * num_rows * sizeof(int); + + AllocatorPtr allocator; + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&allocator)); + + // TODO: allocate one buffer and reuse it. + IAllocatorUniquePtr work_space = IAllocator::MakeUniquePtr(allocator, ws_size, false, stream); + IAllocatorUniquePtr fc2_output = IAllocator::MakeUniquePtr(allocator, fc2_output_size, false, stream); + IAllocatorUniquePtr expert_scales = + IAllocator::MakeUniquePtr(allocator, expert_scales_size, false, stream); + IAllocatorUniquePtr expanded_source_row_to_expanded_dest_row = + IAllocator::MakeUniquePtr(allocator, expanded_source_row_to_expanded_dest_row_size, false, stream); + IAllocatorUniquePtr expert_for_source_row = + IAllocator::MakeUniquePtr(allocator, expert_for_source_row_size, false, stream); + + // fc1_scales and fc2_scales are used in quantized MoE + const CudaT* fc1_scales_ptr = nullptr; + const CudaT* fc2_scales_ptr = nullptr; + + moe_runner.run_moe_fc(reinterpret_cast(input->template Data()), + reinterpret_cast(router_probs->template Data()), + reinterpret_cast(fc1_experts_weights->template Data()), + std::move(fc1_scales_ptr), + fc1_experts_bias_optional == nullptr + ? nullptr + : reinterpret_cast(fc1_experts_bias_optional->template Data()), + activation_type_, reinterpret_cast(fc2_experts_weights->template Data()), + std::move(fc2_scales_ptr), static_cast(num_rows), static_cast(hidden_size), + static_cast(inter_size), static_cast(num_experts), static_cast(k_), + reinterpret_cast(work_space.get()), reinterpret_cast(fc2_output.get()), + reinterpret_cast(expert_scales.get()), + reinterpret_cast(expanded_source_row_to_expanded_dest_row.get()), + reinterpret_cast(expert_for_source_row.get()), Stream(context)); + + Tensor* output = context->Output(0, input->Shape()); + + ort_fastertransformer::finalize_moe_routing_kernelLauncher( + reinterpret_cast(fc2_output.get()), reinterpret_cast(output->template MutableData()), + fc2_experts_bias_optional == nullptr + ? nullptr + : reinterpret_cast(fc2_experts_bias_optional->template Data()), + reinterpret_cast(expert_scales.get()), + reinterpret_cast(expanded_source_row_to_expanded_dest_row.get()), + reinterpret_cast(expert_for_source_row.get()), static_cast(num_rows), static_cast(hidden_size), + static_cast(k_), Stream(context)); + + return Status::OK(); +} + +} // namespace cuda +} // namespace contrib +} // namespace onnxruntime diff --git a/onnxruntime/contrib_ops/cuda/moe/moe.h b/onnxruntime/contrib_ops/cuda/moe/moe.h new file mode 100644 index 0000000000000..8035568693814 --- /dev/null +++ b/onnxruntime/contrib_ops/cuda/moe/moe.h @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include "contrib_ops/cuda/moe/ft_moe/moe_kernel.h" +#include "core/common/common.h" +#include "core/providers/cuda/cuda_kernel.h" + +namespace onnxruntime { +namespace contrib { +namespace cuda { + +using namespace onnxruntime::cuda; + +template +class MoE final : public CudaKernel { + public: + explicit MoE(const OpKernelInfo& op_kernel_info) : CudaKernel(op_kernel_info) { + ORT_ENFORCE(op_kernel_info.GetAttr("k", &k_).IsOK()); + + std::string activation_type_str; + ORT_ENFORCE(op_kernel_info.GetAttr("activation_type", &activation_type_str).IsOK()); + if (activation_type_str == "relu") { + activation_type_ = ort_fastertransformer::ActivationType::Relu; + } else if (activation_type_str == "gelu") { + activation_type_ = ort_fastertransformer::ActivationType::Gelu; + } else if (activation_type_str == "silu") { + activation_type_ = ort_fastertransformer::ActivationType::Silu; + } else if (activation_type_str == "identity") { + activation_type_ = ort_fastertransformer::ActivationType::Identity; + } else { + ORT_THROW("Unsupported MoE activation type: ", activation_type_str); + } + } + Status ComputeInternal(OpKernelContext* ctx) const override; + + private: + int64_t k_; + ort_fastertransformer::ActivationType activation_type_; +}; + +} // namespace cuda +} // namespace contrib +} // namespace onnxruntime diff --git a/onnxruntime/core/graph/contrib_ops/contrib_defs.cc b/onnxruntime/core/graph/contrib_ops/contrib_defs.cc index 39449bea6303a..db0b13b0e1d27 100644 --- a/onnxruntime/core/graph/contrib_ops/contrib_defs.cc +++ b/onnxruntime/core/graph/contrib_ops/contrib_defs.cc @@ -1375,6 +1375,27 @@ ONNX_MS_OPERATOR_SET_SCHEMA(Sampling, 1, GreedySearchShapeInference(ctx); })); +constexpr const char* MoE_ver1_doc = R"DOC( + Mixture of experts. Examples: Switch transformer(https://arxiv.org/pdf/2101.03961.pdf) use top 1, + GLaM(https://arxiv.org/abs/2112.06905) activates top 2 FFN, and Vision MOE(https://arxiv.org/pdf/2106.05974.pdf) + usually uses top 32 experts. + )DOC"; + +ONNX_MS_OPERATOR_SET_SCHEMA(MoE, 1, + OpSchema() + .SetDoc(MoE_ver1_doc) + .Attr("activation_type", "Activation function to use. Choose from relu, gelu, silu and identity. Default is relu", AttributeProto::STRING, std::string("relu")) + .Attr("k", "Number of top experts to select from expert pool", AttributeProto::INT, static_cast(1)) + .Input(0, "input", "2D input tensor with shape (num_rows, hidden_size) or 3D input tensor with shape (batch_size, sequence_length, hidden_size)", "T") + .Input(1, "router_probs", "2D input tensor with shape (num_rows, num_experts)", "T") + .Input(2, "fc1_experts_weights", "3D input tensor with shape (num_experts, hidden_size, inter_size)", "T") + .Input(3, "fc2_experts_weights", "3D input tensor with shape (num_experts, inter_size, hidden_size)", "T") + .Input(4, "fc1_experts_bias", "2D optional input tensor with shape (num_experts, inter_size)", "T", OpSchema::Optional) + .Input(5, "fc2_experts_bias", "2D optional input tensor with shape (num_experts, hidden_size)", "T", OpSchema::Optional) + .Output(0, "output", "2D input tensor with shape (num_rows, hidden_size) or 3D input tensor with shape (batch_size, sequence_length, hidden_size)", "T") + .TypeConstraint("T", {"tensor(float)", "tensor(float16)"}, "Constrain input and output types to float or float16 tensors.") + .TypeAndShapeInferenceFunction(ONNX_NAMESPACE::propagateShapeAndTypeFromFirstInput)); + ONNX_MS_OPERATOR_SET_SCHEMA(SampleOp, 1, OpSchema() .Input(0, "X", "input", "T") diff --git a/onnxruntime/core/graph/contrib_ops/ms_opset.h b/onnxruntime/core/graph/contrib_ops/ms_opset.h index b35cfc5d12f36..5eef1b33a24dd 100644 --- a/onnxruntime/core/graph/contrib_ops/ms_opset.h +++ b/onnxruntime/core/graph/contrib_ops/ms_opset.h @@ -83,6 +83,7 @@ class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Microsoft, 1, MatMulInteger16); class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Microsoft, 1, MatMulFpQ4); #endif class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Microsoft, 1, MaxpoolWithMask); +class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Microsoft, 1, MoE); class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Microsoft, 1, MultiHeadAttention); class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Microsoft, 1, GroupQueryAttention); class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Microsoft, 1, MurmurHash3); @@ -189,6 +190,7 @@ class OpSet_Microsoft_ver1 { fn(GetOpSchema()); #endif fn(GetOpSchema()); + fn(GetOpSchema()); fn(GetOpSchema()); fn(GetOpSchema()); fn(GetOpSchema()); diff --git a/onnxruntime/python/tools/symbolic_shape_infer.py b/onnxruntime/python/tools/symbolic_shape_infer.py index a91ff91010e4b..a9cbef98d9165 100755 --- a/onnxruntime/python/tools/symbolic_shape_infer.py +++ b/onnxruntime/python/tools/symbolic_shape_infer.py @@ -157,6 +157,7 @@ def __init__(self, int_max, auto_merge, guess_output_rank, verbose, prefix=""): "MemcpyFromHost": self._pass_on_shape_and_type, "MemcpyToHost": self._pass_on_shape_and_type, "Min": self._infer_symbolic_compute_ops, + "MoE": self._pass_on_shape_and_type, "Mul": self._infer_symbolic_compute_ops, "NonMaxSuppression": self._infer_NonMaxSuppression, "NonZero": self._infer_NonZero, diff --git a/onnxruntime/test/contrib_ops/moe_test.cc b/onnxruntime/test/contrib_ops/moe_test.cc new file mode 100644 index 0000000000000..ebb0261deefa5 --- /dev/null +++ b/onnxruntime/test/contrib_ops/moe_test.cc @@ -0,0 +1,423 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "gtest/gtest.h" +#include "test/common/tensor_op_test_utils.h" +#include "test/common/cuda_op_test_utils.h" +#include "test/providers/provider_test_utils.h" + +namespace onnxruntime { +namespace test { + +static void RunMoETest( + const std::vector& input, + const std::vector& router_probs, + const std::vector& fc1_experts_weights, + const std::vector& fc2_experts_weights, + const std::vector& fc1_experts_bias, + const std::vector& fc2_experts_bias, + const std::vector& output_data, + int num_rows, + int num_experts, + int hidden_size, + int inter_size, + std::string activation_type, + bool use_float16 = false) { + int min_cuda_architecture = use_float16 ? 530 : 0; + + bool enable_cuda = HasCudaEnvironment(min_cuda_architecture); + if (enable_cuda) { + OpTester tester("MoE", 1, onnxruntime::kMSDomain); + tester.AddAttribute("k", static_cast(1)); + tester.AddAttribute("activation_type", activation_type); + + std::vector input_dims = {num_rows, hidden_size}; + std::vector router_probs_dims = {num_rows, num_experts}; + std::vector fc1_experts_weights_dims = {num_experts, hidden_size, inter_size}; + std::vector fc2_experts_weights_dims = {num_experts, inter_size, hidden_size}; + std::vector fc1_experts_bias_dims = {num_experts, inter_size}; + std::vector fc2_experts_bias_dims = {num_experts, hidden_size}; + std::vector output_dims = {num_rows, hidden_size}; + + if (use_float16) { + tester.AddInput("input", input_dims, ToFloat16(input)); + tester.AddInput("router_probs", router_probs_dims, ToFloat16(router_probs)); + tester.AddInput("fc1_experts_weights", fc1_experts_weights_dims, ToFloat16(fc1_experts_weights)); + tester.AddInput("fc2_experts_weights", fc2_experts_weights_dims, ToFloat16(fc2_experts_weights)); + tester.AddInput("fc1_experts_bias", fc1_experts_bias_dims, ToFloat16(fc1_experts_bias)); + tester.AddInput("fc2_experts_bias", fc2_experts_bias_dims, ToFloat16(fc2_experts_bias)); + tester.AddOutput("output", output_dims, ToFloat16(output_data)); + } else { + tester.AddInput("input", input_dims, input); + tester.AddInput("router_probs", router_probs_dims, router_probs); + tester.AddInput("fc1_experts_weights", fc1_experts_weights_dims, fc1_experts_weights); + tester.AddInput("fc2_experts_weights", fc2_experts_weights_dims, fc2_experts_weights); + tester.AddInput("fc1_experts_bias", fc1_experts_bias_dims, fc1_experts_bias); + tester.AddInput("fc2_experts_bias", fc2_experts_bias_dims, fc2_experts_bias); + tester.AddOutput("output", output_dims, output_data); + } + + std::vector> execution_providers; + execution_providers.push_back(DefaultCudaExecutionProvider()); + tester.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } +} + +TEST(MoETest, MoETest_Gelu) { + int num_rows = 4; + int num_experts = 4; + int hidden_size = 8; + int inter_size = 16; + + const std::vector input = { + -1.1200173f, -0.45884353f, -1.2929888f, 1.0784022f, 0.116372705f, 0.26902613f, -1.8818876f, -0.5457026f, + 0.22222236f, -0.28868636f, 0.6692926f, 1.4944887f, 0.02431708f, -0.49781424f, 0.7378293f, 1.276276f, + -0.15469065f, -0.28456813f, -0.6296439f, -0.24855971f, 0.80565417f, -1.1018785f, -0.74082595f, 0.82407707f, + -0.95033455f, 0.659333f, -0.68629056f, -0.2916592f, 1.869919f, -1.1053563f, -0.14417848f, -0.34625578f}; + const std::vector router_probs = { + -0.84837115f, 0.100507565f, -0.10548311f, 0.40957215f, 1.0159845f, 0.26919764f, 0.021741152f, -0.34184334f, + -0.71324956f, 0.29018253f, -0.18227568f, 0.31496462f, -0.48426327f, -1.006643f, -0.100081146f, -0.07692295f}; + const std::vector fc1_experts_weights = { + 0.14731085f, 0.52229995f, 0.14753294f, 0.22475791f, 0.20864725f, 0.6708725f, 0.20204341f, 0.4890914f, + 0.52103406f, 0.8223115f, 0.122039974f, 0.15674388f, 0.20966923f, 0.8499667f, 0.3202675f, 0.92174435f, + 0.6808038f, 0.563313f, 0.496278f, 0.40115923f, 0.5627332f, 0.38582766f, 0.49648678f, 0.5637965f, + 0.10889745f, 0.23793429f, 0.90374637f, 0.09422666f, 0.4640969f, 0.99461937f, 0.6806185f, 0.5141565f, + 0.066695035f, 0.74768895f, 0.14385962f, 0.35806787f, 0.33224183f, 0.4259563f, 0.50546914f, 0.91240376f, + 0.5624194f, 0.9478464f, 0.8058562f, 0.18389302f, 0.72425205f, 0.14655197f, 0.28808743f, 0.64706135f, + 0.66509604f, 0.875114f, 0.33904207f, 0.50080043f, 0.7574118f, 0.016453922f, 0.8614903f, 0.08653879f, + 0.50689125f, 0.41499162f, 0.23666352f, 0.5660855f, 0.91345936f, 0.35384023f, 0.20315295f, 0.31508058f, + 0.0044258237f, 0.725697f, 0.25986814f, 0.16632986f, 0.21194929f, 0.787478f, 0.76478684f, 0.8837609f, + 0.68136156f, 0.33302015f, 0.36027592f, 0.647715f, 0.91101736f, 0.6359461f, 0.26342732f, 0.2649613f, + 0.02726549f, 0.608024f, 0.21940875f, 0.054212093f, 0.93843824f, 0.1752944f, 0.44311923f, 0.64324677f, + 0.51592916f, 0.16355914f, 0.09583914f, 0.8985412f, 0.58141935f, 0.91481227f, 0.3323797f, 0.6472777f, + 0.3856619f, 0.47776443f, 0.1954779f, 0.66910046f, 0.65808296f, 0.4896857f, 0.38754892f, 0.1917851f, + 0.8457724f, 0.12778795f, 0.70483273f, 0.33187324f, 0.258766f, 0.58982253f, 0.24027151f, 0.6152024f, + 0.5981904f, 0.12875527f, 0.5832493f, 0.7129646f, 0.6979155f, 0.43706065f, 0.09010619f, 0.42292297f, + 0.67365384f, 0.31756145f, 0.68979055f, 0.8329813f, 0.2389242f, 0.5049309f, 0.7067495f, 0.5391889f, + 0.54176575f, 0.5624327f, 0.10692614f, 0.5392941f, 0.8462349f, 0.9505569f, 0.79387546f, 0.5670015f, + 0.7335071f, 0.25676018f, 0.08565581f, 0.07003945f, 0.99880487f, 0.8173947f, 0.15438312f, 0.6956213f, + 0.8775838f, 0.9998074f, 0.93719745f, 0.8873769f, 0.38537037f, 0.32452917f, 0.9105244f, 0.7801898f, + 0.19911051f, 0.9495086f, 0.7415793f, 0.77256775f, 0.18661183f, 0.6434499f, 0.32471877f, 0.8906783f, + 0.4100297f, 0.69465625f, 0.5888109f, 0.7127341f, 0.33008623f, 0.7437857f, 0.15076452f, 0.6129275f, + 0.16170406f, 0.006731212f, 0.09847212f, 0.89473504f, 0.7705178f, 0.96910787f, 0.9005606f, 0.053477287f, + 0.15878445f, 0.4192087f, 0.17528385f, 0.84719825f, 0.121996105f, 0.25604928f, 0.016954303f, 0.21612722f, + 0.91123873f, 0.90938f, 0.85791886f, 0.88606364f, 0.94459325f, 0.3719685f, 0.72000104f, 0.9454652f, + 0.6654094f, 0.9998382f, 0.75933146f, 0.81082416f, 0.32500392f, 0.73991376f, 0.5574533f, 0.38059133f, + 0.21814507f, 0.21944171f, 0.11525959f, 0.83566517f, 0.8554656f, 0.44309366f, 0.210657f, 0.88645273f, + 0.81974447f, 0.537167f, 0.26393235f, 0.9595239f, 0.70447034f, 0.12042731f, 0.97854143f, 0.8796869f, + 0.31775457f, 0.78107727f, 0.21590549f, 0.42164284f, 0.9245506f, 0.52065957f, 0.14639091f, 0.33288354f, + 0.36427742f, 0.4035356f, 0.5478503f, 0.9624148f, 0.5267702f, 0.19128f, 0.52562714f, 0.7397436f, + 0.7480201f, 0.04303074f, 0.41052878f, 0.12842774f, 0.2866572f, 0.6801467f, 0.1449349f, 0.68586344f, + 0.92438906f, 0.5327942f, 0.16675615f, 0.32085752f, 0.60918206f, 0.11884099f, 0.74840516f, 0.04606521f, + 0.01935333f, 0.014169693f, 0.39856833f, 0.83621645f, 0.026760519f, 0.91559356f, 0.29998857f, 0.64644206f, + 0.52280146f, 0.049140453f, 0.9146645f, 0.7692217f, 0.99699783f, 0.7526061f, 0.1699655f, 0.9172919f, + 0.5268722f, 0.73710823f, 0.09908545f, 0.35618675f, 0.009061217f, 0.30525374f, 0.6078656f, 0.10741913f, + 0.6593821f, 0.7684034f, 0.56965464f, 0.16545832f, 0.11234015f, 0.3457417f, 0.7194791f, 0.9931982f, + 0.7875145f, 0.44369537f, 0.6753082f, 0.009468555f, 0.07294935f, 0.73330396f, 0.2167924f, 0.74054784f, + 0.14703393f, 0.25234455f, 0.08815551f, 0.76092035f, 0.44905245f, 0.88480055f, 0.8094361f, 0.7766713f, + 0.51607805f, 0.345411f, 0.39128417f, 0.5664503f, 0.74785477f, 0.14970505f, 0.91963893f, 0.44563496f, + 0.08102721f, 0.22947109f, 0.94240886f, 0.9572636f, 0.036860168f, 0.85264915f, 0.7505796f, 0.79595923f, + 0.9232646f, 0.23052484f, 0.6578879f, 0.7046166f, 0.35225332f, 0.66732657f, 0.3561433f, 0.80913067f, + 0.3612727f, 0.31360215f, 0.6258745f, 0.6773468f, 0.25571418f, 0.54419917f, 0.78976786f, 0.45025164f, + 0.65216696f, 0.3794065f, 0.6752498f, 0.1378029f, 0.2059856f, 0.24620473f, 0.95950544f, 0.36545795f, + 0.49863482f, 0.25775224f, 0.99914503f, 0.9883351f, 0.122906685f, 0.09466505f, 0.12100351f, 0.49758863f, + 0.37254804f, 0.17272717f, 0.32066393f, 0.59446543f, 0.23875463f, 0.61079127f, 0.38534206f, 0.25771832f, + 0.56869274f, 0.9111291f, 0.16196036f, 0.5232172f, 0.31561613f, 0.99065316f, 0.025618374f, 0.0206694f, + 0.9926925f, 0.18365502f, 0.5958617f, 0.45684695f, 0.3946715f, 0.3883261f, 0.8177203f, 0.5238985f, + 0.013192713f, 0.20481992f, 0.32954985f, 0.7516082f, 0.17643315f, 0.9714598f, 0.38863534f, 0.410219f, + 0.891779f, 0.75130385f, 0.92406017f, 0.7892222f, 0.34832305f, 0.1682638f, 0.46279848f, 0.9138188f, + 0.3321901f, 0.036315024f, 0.7049642f, 0.9867357f, 0.3576584f, 0.08598822f, 0.046470165f, 0.6252997f, + 0.46214014f, 0.24750638f, 0.60106593f, 0.6898794f, 0.8976595f, 0.8881911f, 0.42515814f, 0.059116423f, + 0.048188448f, 0.9668448f, 0.7210276f, 0.7179537f, 0.06738949f, 0.96300787f, 0.97367156f, 0.95143014f, + 0.07820749f, 0.3113383f, 0.1561181f, 0.9734828f, 0.28516f, 0.27172273f, 0.76195645f, 0.26870382f, + 0.25373894f, 0.45626426f, 0.45194024f, 0.11051077f, 0.91683406f, 0.27943915f, 0.67735744f, 0.9348918f, + 0.7521582f, 0.57078993f, 0.9254285f, 0.5672131f, 0.2686717f, 0.97299975f, 0.61834025f, 0.012159586f, + 0.3576542f, 0.15941626f, 0.9383765f, 0.41742706f, 0.044237554f, 0.46856833f, 0.81400645f, 0.6299002f, + 0.6581022f, 0.5464366f, 0.68640935f, 0.378174f, 0.3010999f, 0.032645762f, 0.12333155f, 0.71670127f, + 0.20394331f, 0.57173324f, 0.6595957f, 0.53540194f, 0.17582512f, 0.9781642f, 0.20925027f, 0.9112503f, + 0.10224587f, 0.37972575f, 0.7719844f, 0.29570967f, 0.9200215f, 0.15592176f, 0.080114245f, 0.27454042f, + 0.5808252f, 0.96037793f, 0.26129955f, 0.6788141f, 0.37464648f, 0.39156884f, 0.8676517f, 0.112507045f, + 0.55310667f, 0.9702046f, 0.4312939f, 0.88821906f, 0.3460216f, 0.9024811f, 0.016334832f, 0.42793816f, + 0.4121768f, 0.6620425f, 0.6961637f, 0.88390845f, 0.425507f, 0.48017246f, 0.8424056f, 0.36471343f, + 0.9383168f, 0.16709393f, 0.44589508f, 0.47314453f, 0.72310495f, 0.84183806f, 0.4207481f, 0.0857597f, + 0.7477461f, 0.6495659f, 0.70084965f, 0.19156617f, 0.8217978f, 0.9735775f, 0.5433857f, 0.032975793f, + 0.85099494f, 0.12927437f, 0.61493605f, 0.5726589f, 0.26598173f, 0.6740978f, 0.052783668f, 0.61387974f}; + const std::vector fc2_experts_weights = { + 0.18302453f, 0.44593316f, 0.5643144f, 0.9259722f, 0.26143986f, 0.82031804f, 0.4364831f, 0.2625361f, + 0.06460017f, 0.04124081f, 0.98830533f, 0.37530023f, 0.5249744f, 0.63555616f, 0.8398661f, 0.92673707f, + 0.9055086f, 0.12955844f, 0.4198916f, 0.20413119f, 0.21432412f, 0.6186035f, 0.969324f, 0.099448025f, + 0.80260223f, 0.24076664f, 0.40261286f, 0.89688545f, 0.38691485f, 0.5455279f, 0.15048373f, 0.92562044f, + 0.43536508f, 0.13430476f, 0.64640516f, 0.14449131f, 0.10324633f, 0.5304596f, 0.8964218f, 0.358508f, + 0.73533344f, 0.9296606f, 0.83163047f, 0.23771948f, 0.44519007f, 0.34265757f, 0.09793854f, 0.5002066f, + 0.87621754f, 0.9212578f, 0.54665035f, 0.6135615f, 0.28353918f, 0.8774212f, 0.29194576f, 0.1526736f, + 0.57699674f, 0.7996927f, 0.04920423f, 0.95198375f, 0.67986554f, 0.14969361f, 0.39229625f, 0.93378997f, + 0.11638266f, 0.3538614f, 0.66399014f, 0.06195748f, 0.7740991f, 0.7602738f, 0.81010276f, 0.18122643f, + 0.9980005f, 0.20361924f, 0.99917024f, 0.020154774f, 0.054515004f, 0.80709815f, 0.55225646f, 0.52884465f, + 0.22312081f, 0.29026228f, 0.35380626f, 0.012922287f, 0.52598435f, 0.58842945f, 0.4995767f, 0.66146517f, + 0.9744255f, 0.632942f, 0.3169638f, 0.29422665f, 0.18009722f, 0.15339059f, 0.41947508f, 0.4115672f, + 0.72243124f, 0.2862816f, 0.89860183f, 0.14915991f, 0.5014211f, 0.94945997f, 0.99719256f, 0.21036887f, + 0.5890645f, 0.55906135f, 0.26557416f, 0.32725257f, 0.635427f, 0.1523174f, 0.58249784f, 0.71636236f, + 0.30296493f, 0.9153206f, 0.46709478f, 0.72685635f, 0.9951532f, 0.34716582f, 0.7717041f, 0.3569854f, + 0.4269635f, 0.41526443f, 0.4968937f, 0.3111158f, 0.61719346f, 0.5188402f, 0.8169449f, 0.39879733f, + 0.5501401f, 0.31400484f, 0.08127314f, 0.7023336f, 0.56397897f, 0.29975814f, 0.33094752f, 0.63076067f, + 0.40959156f, 0.82673794f, 0.52832156f, 0.68886834f, 0.7178481f, 0.37731683f, 0.71633244f, 0.86896664f, + 0.5230092f, 0.59784645f, 0.5181678f, 0.8461837f, 0.28890234f, 0.23421508f, 0.7178768f, 0.06484294f, + 0.5080162f, 0.27005446f, 0.8300168f, 0.034480453f, 0.8031663f, 0.9946784f, 0.60117006f, 0.46668667f, + 0.9921749f, 0.28632385f, 0.45993322f, 0.28104752f, 0.43097937f, 0.60866946f, 0.5667807f, 0.40556252f, + 7.969141e-05f, 0.52560204f, 0.48518902f, 0.5752184f, 0.8831251f, 0.9860047f, 0.20335877f, 0.46882278f, + 0.2996632f, 0.03917718f, 0.13617045f, 0.96928054f, 0.79153055f, 0.76857555f, 0.7778716f, 0.102760494f, + 0.5525096f, 0.9653573f, 0.22095704f, 0.94479716f, 0.63141924f, 0.8517718f, 0.28580618f, 0.73050886f, + 0.05675614f, 0.46825224f, 0.6667756f, 0.6499472f, 0.91840404f, 0.99132854f, 0.9548785f, 0.8356961f, + 0.851531f, 0.43548512f, 0.111976564f, 0.31438643f, 0.44386774f, 0.22980672f, 0.75558543f, 0.6755136f, + 0.58067596f, 0.62078035f, 0.93922615f, 0.6821157f, 0.061530292f, 0.13705963f, 0.7203748f, 0.5681396f, + 0.7438458f, 0.0006400347f, 0.038565338f, 0.8066132f, 0.81982285f, 0.047644496f, 0.68979263f, 0.109577894f, + 0.8786539f, 0.6568952f, 0.99439347f, 0.0070040226f, 0.018661916f, 0.838051f, 0.94391155f, 0.80634f, + 0.8324149f, 0.078864336f, 0.8619068f, 0.027926445f, 0.61170083f, 0.17248261f, 0.30140227f, 0.5885344f, + 0.30341f, 0.42088854f, 0.02608782f, 0.02856338f, 0.69368154f, 0.28836077f, 0.19580519f, 0.30270886f, + 0.09121573f, 0.100299895f, 0.79918617f, 0.75412107f, 0.56660175f, 0.22687018f, 0.6663505f, 0.5224626f, + 0.1426636f, 0.6075949f, 0.95527196f, 0.008196831f, 0.0028039217f, 0.5640625f, 0.87651116f, 0.19575512f, + 0.61006856f, 0.85149264f, 0.6541582f, 0.6082054f, 0.998863f, 0.82573634f, 0.21878648f, 0.54321826f, + 0.7554362f, 0.94095474f, 0.002533555f, 0.77075267f, 0.35483408f, 0.010389388f, 0.610987f, 0.22779316f, + 0.5708561f, 0.17537653f, 0.12373549f, 0.4575745f, 0.33203715f, 0.79243237f, 0.54310906f, 0.8902793f, + 0.5937015f, 0.33921933f, 0.8386668f, 0.52732253f, 0.59384584f, 0.3391887f, 0.5017944f, 0.40386343f, + 0.45749134f, 0.110060334f, 0.49692506f, 0.084977865f, 0.3924346f, 0.7897731f, 0.15232486f, 0.16297412f, + 0.37791175f, 0.36293298f, 0.5846437f, 0.5830078f, 0.75354826f, 0.15555972f, 0.4647144f, 0.7796456f, + 0.93248576f, 0.46352726f, 0.2106899f, 0.6437313f, 0.78473866f, 0.18762505f, 0.20985329f, 0.7209991f, + 0.464967f, 0.02775067f, 0.21170747f, 0.7027664f, 0.33041215f, 0.8451145f, 0.89526993f, 0.57273495f, + 0.46046263f, 0.34128642f, 0.47471708f, 0.59101045f, 0.11807448f, 0.38050216f, 0.08409953f, 0.80687743f, + 0.18158185f, 0.9567719f, 0.3711096f, 0.21356237f, 0.74022657f, 0.57453954f, 0.846228f, 0.70873487f, + 0.018330276f, 0.8162452f, 0.40584308f, 0.27901447f, 0.81752694f, 0.86466515f, 0.060534656f, 0.45478833f, + 0.9106033f, 0.6936434f, 0.92123467f, 0.32865065f, 0.22417879f, 0.9299548f, 0.70841146f, 0.97999126f, + 0.2911517f, 0.17896658f, 0.44139355f, 0.029210031f, 0.6959876f, 0.8687942f, 0.62002844f, 0.45059657f, + 0.74790317f, 0.18262434f, 0.98912156f, 0.0028281808f, 0.021027386f, 0.38184917f, 0.90842223f, 0.5500629f, + 0.69202286f, 0.13349658f, 0.6823429f, 0.44412827f, 0.7004118f, 0.8531213f, 0.7173401f, 0.4574679f, + 0.46920043f, 0.18640989f, 0.31914896f, 0.82491904f, 0.29950172f, 0.8105199f, 0.30173403f, 0.38355058f, + 0.5106411f, 0.04116726f, 0.49500751f, 0.44960213f, 0.45508182f, 0.4000479f, 0.89418864f, 0.8689936f, + 0.16112137f, 0.7322634f, 0.10780871f, 0.07433933f, 0.652841f, 0.50734824f, 0.26674682f, 0.017748117f, + 0.30643195f, 0.66699976f, 0.03719926f, 0.014267266f, 0.56343627f, 0.13979793f, 0.061959863f, 0.3073569f, + 0.41949958f, 0.045647383f, 0.16613615f, 0.5327839f, 0.028514147f, 0.4297228f, 0.17714864f, 0.15338135f, + 0.6965155f, 0.11515516f, 0.1210829f, 0.78514075f, 0.59348315f, 0.9553564f, 0.36635226f, 0.25849247f, + 0.45372677f, 0.5025297f, 0.88132215f, 0.0019600391f, 0.46439964f, 0.7211761f, 0.22465849f, 0.2459296f, + 0.7416339f, 0.020907402f, 0.6184779f, 0.112906754f, 0.7485309f, 0.072479784f, 0.8074024f, 0.026683688f, + 0.07971662f, 0.50736845f, 0.8939942f, 0.0718022f, 0.27697015f, 0.9391413f, 0.4161513f, 0.7071423f, + 0.019000888f, 0.34275955f, 0.24608392f, 0.9215306f, 0.70751995f, 0.13516217f, 0.5806135f, 0.49425328f, + 0.29456508f, 0.21446168f, 0.3340807f, 0.89411324f, 0.14157385f, 0.14382833f, 0.34574044f, 0.50869817f, + 0.63610595f, 0.51500404f, 0.37963718f, 0.19682491f, 0.41028368f, 0.29872334f, 0.9039644f, 0.013295233f, + 0.1810705f, 0.093204916f, 0.4086216f, 0.8896367f, 0.9382696f, 0.06472236f, 0.47833657f, 0.7934831f, + 0.7203987f, 0.9095519f, 0.4861309f, 0.16405362f, 0.83076525f, 0.3285427f, 0.7588931f, 0.37678176f, + 0.71254706f, 0.949713f, 0.96492773f, 0.044967473f, 0.16925985f, 0.2932666f, 0.18114948f, 0.97975004f, + 0.4558406f, 0.16832972f, 0.27750528f, 0.2238177f, 0.7039947f, 0.06387442f, 0.033798456f, 0.007119417f}; + const std::vector fc1_experts_bias = { + 0.71526206f, 0.7472273f, 0.18946046f, 0.6239893f, 0.86909235f, 0.5726507f, 0.3942092f, 0.5369412f, + 0.44638616f, 0.7517496f, 0.16049433f, 0.75355124f, 0.7818118f, 0.19706267f, 0.9082818f, 0.9910924f, + 0.30288565f, 0.3599528f, 0.74917775f, 0.10828978f, 0.697729f, 0.61665237f, 0.81516486f, 0.0656966f, + 0.0846076f, 0.72456455f, 0.6801054f, 0.034616888f, 0.22117025f, 0.042510748f, 0.14178854f, 0.27440017f, + 0.91376925f, 0.40047455f, 0.7871756f, 0.97484046f, 0.7278661f, 0.052394807f, 0.75161135f, 0.6907173f, + 0.8875328f, 0.0067828894f, 0.807508f, 0.9092707f, 0.034817636f, 0.55231315f, 0.92683655f, 0.13634592f, + 0.66405964f, 0.7209387f, 0.63104504f, 0.9971379f, 0.9093898f, 0.9289774f, 0.4376766f, 0.9193563f, + 0.03404367f, 0.23018533f, 0.39305943f, 0.3514716f, 0.96184736f, 0.73583263f, 0.8219065f, 0.8401047f}; + const std::vector fc2_experts_bias = { + 0.12649822f, 0.4420895f, 0.5730123f, 0.63004625f, 0.7571163f, 0.3010466f, 0.3492328f, 0.91837066f, + 0.36580783f, 0.15267932f, 0.8390199f, 0.83857775f, 0.34321654f, 0.40003997f, 0.13106f, 0.08245313f, + 0.68802476f, 0.28640372f, 0.89804775f, 0.09964341f, 0.43088746f, 0.5107959f, 0.75697356f, 0.90466535f, + 0.83860224f, 0.720098f, 0.2705031f, 0.14292616f, 0.052693605f, 0.5248023f, 0.9849401f, 0.40502876f}; + const std::vector output = { + 0.2552814f, 0.17651685f, 0.0034551744f, -0.123282805f, 0.0073816925f, 0.004265253f, 0.16927283f, -0.05276826f, + 9.555821f, 7.6907287f, 10.626425f, 7.0543795f, 8.10093f, 10.3664465f, 10.925815f, 8.737018f, + 0.565234f, 0.17098689f, 0.10810414f, 0.43916586f, 0.3535297f, 0.45673048f, 0.3853893f, 0.18613164f, + 1.3354061f, 0.5049282f, 0.72775036f, 0.90331376f, 1.2945517f, 0.9123066f, 1.1995136f, 0.7708638f}; + + RunMoETest(input, + router_probs, + fc1_experts_weights, + fc2_experts_weights, + fc1_experts_bias, + fc2_experts_bias, + output, + num_rows, + num_experts, + hidden_size, + inter_size, + "gelu"); +} + +TEST(MoETest, MoETest_Relu) { + int num_rows = 4; + int num_experts = 4; + int hidden_size = 8; + int inter_size = 16; + + const std::vector input = { + 0.7670296f, -0.93721074f, -2.330477f, -0.78088343f, 0.8250065f, 1.2206652f, -0.06297584f, 1.1463639f, + 1.2215378f, -0.31372663f, -0.7234253f, -0.3627346f, 0.44249064f, 0.19418247f, -0.49998695f, -0.55005103f, + 0.023851749f, -1.5203826f, 0.52939993f, -0.39082858f, -1.9291036f, 0.034976702f, -0.48336256f, -1.226073f, + -0.33963847f, 0.0073261578f, -0.0521804f, 1.16749f, 1.7302082f, 2.0561688f, -0.2347232f, -1.3456243f}; + const std::vector router_probs = { + -0.08146476f, -0.40439552f, 1.0100367f, -0.7724162f, -0.08113786f, -0.36328858f, 0.3688482f, -0.013465762f, + -0.32420647f, -0.3815508f, 0.79585606f, 0.14430691f, -0.21869831f, 0.11483674f, -0.11992836f, 0.35216537f}; + const std::vector fc1_experts_weights = { + 0.81960344f, 0.9296998f, 0.45050132f, 0.38805157f, 0.50729614f, 0.47014588f, 0.62020564f, 0.6401168f, + 0.045871615f, 0.31548113f, 0.92106473f, 0.6947775f, 0.4751312f, 0.19854712f, 0.19409746f, 0.052116573f, + 0.3370188f, 0.6688521f, 0.8188108f, 0.73084867f, 0.058027983f, 0.19931877f, 0.42109168f, 0.98367476f, + 0.57232875f, 0.37051463f, 0.7068576f, 0.30955923f, 0.17637217f, 0.8649436f, 0.2726491f, 0.39976662f, + 0.0025978684f, 0.8346353f, 0.8788173f, 0.6822241f, 0.1513629f, 0.0065300465f, 0.093910515f, 0.8728501f, + 0.7400529f, 0.9207522f, 0.76193494f, 0.6265461f, 0.49510366f, 0.11974698f, 0.07161391f, 0.032325685f, + 0.704681f, 0.254516f, 0.3993737f, 0.21224737f, 0.40888822f, 0.14808255f, 0.17329216f, 0.6658554f, + 0.3514018f, 0.8086716f, 0.33959562f, 0.13321638f, 0.41178054f, 0.2576263f, 0.3470292f, 0.024002194f, + 0.77974546f, 0.15189773f, 0.75130886f, 0.7268921f, 0.85721636f, 0.11647397f, 0.8595984f, 0.2636242f, + 0.6855346f, 0.96955734f, 0.42948407f, 0.49613327f, 0.38488472f, 0.08250773f, 0.73995143f, 0.003641069f, + 0.81039995f, 0.87411255f, 0.9728532f, 0.38206023f, 0.08917904f, 0.61241513f, 0.77621365f, 0.0023456216f, + 0.38650817f, 0.20027226f, 0.45626813f, 0.25389326f, 0.2956162f, 0.34127057f, 0.024847984f, 0.91025376f, + 0.9191656f, 0.42156547f, 0.44305897f, 0.29594004f, 0.04846859f, 0.013427794f, 0.6858292f, 0.22547692f, + 0.17856151f, 0.4609884f, 0.33349442f, 0.3382396f, 0.5160656f, 0.3939438f, 0.3278438f, 0.26059705f, + 0.0930863f, 0.9192536f, 0.29990643f, 0.63248974f, 0.32651705f, 0.54063064f, 0.9661502f, 0.73036134f, + 0.06670016f, 0.6984514f, 0.9746214f, 0.63154167f, 0.83521235f, 0.99294376f, 0.4233855f, 0.6037772f, + 0.15248245f, 0.39696145f, 0.8702919f, 0.7563229f, 0.18360549f, 0.099057496f, 0.15831816f, 0.00656116f, + 0.114180505f, 0.3763513f, 0.8374386f, 0.5836911f, 0.11969727f, 0.09888804f, 0.74873763f, 0.12807935f, + 0.43843627f, 0.739853f, 0.26859397f, 0.44548005f, 0.45647776f, 0.38170832f, 0.24648392f, 0.054280818f, + 0.0958215f, 0.23226917f, 0.98291886f, 0.25849265f, 0.16423601f, 0.6211971f, 0.63780516f, 0.77395487f, + 0.8800602f, 0.7784371f, 0.004249513f, 0.5443443f, 0.80287653f, 0.45378727f, 0.20536041f, 0.9766699f, + 0.31298608f, 0.21532774f, 0.04922247f, 0.52233416f, 0.72156656f, 0.6106814f, 0.59887487f, 0.12080628f, + 0.03305638f, 0.5088047f, 0.95591706f, 0.7884607f, 0.20888287f, 0.43509573f, 0.13140821f, 0.2587883f, + 0.5905492f, 0.77226925f, 0.91418463f, 0.04094696f, 0.8343076f, 0.14735395f, 0.6872336f, 0.92312264f, + 0.5070212f, 0.9549045f, 0.07397425f, 0.3090204f, 0.79162645f, 0.39106607f, 0.39764988f, 0.29160416f, + 0.84465307f, 0.7452516f, 0.66022503f, 0.21901816f, 0.09412521f, 0.5540803f, 0.6481394f, 0.26914406f, + 0.36010116f, 0.83768386f, 0.53982985f, 0.52255917f, 0.37694973f, 0.04720515f, 0.029871285f, 0.26099247f, + 0.2458393f, 0.6557768f, 0.35444462f, 0.30438894f, 0.9767149f, 0.67416143f, 0.85645115f, 0.25794363f, + 0.2957666f, 0.68377024f, 0.16686243f, 0.17314798f, 0.47585016f, 0.31711966f, 0.125171f, 0.7965795f, + 0.90208143f, 0.58111167f, 0.41294336f, 0.036863506f, 0.31788063f, 0.6272928f, 0.73576546f, 0.43679124f, + 0.30232358f, 0.77861303f, 0.10180014f, 0.816009f, 0.30602258f, 0.5076527f, 0.40119207f, 0.5606195f, + 0.3489008f, 0.8635635f, 0.48700142f, 0.89029974f, 0.98074025f, 0.25640452f, 0.13524544f, 0.901151f, + 0.89180696f, 0.11822635f, 0.46134835f, 0.006936848f, 0.09070045f, 0.59657127f, 0.6330173f, 0.6059905f, + 0.36391765f, 0.96128887f, 0.571489f, 0.2049576f, 0.4716931f, 0.6200726f, 0.67509633f, 0.14645958f, + 0.6873948f, 0.24455917f, 0.08452982f, 0.22689629f, 0.9822047f, 0.9274289f, 0.9477422f, 0.7935056f, + 0.87772477f, 0.43307513f, 0.22488606f, 0.7498283f, 0.24090862f, 0.16256708f, 0.34033298f, 0.6049296f, + 0.7573983f, 0.3057955f, 0.20571685f, 0.56744653f, 0.2052834f, 0.17446929f, 0.76062596f, 0.4160077f, + 0.9568925f, 0.9863913f, 0.64955276f, 0.67207885f, 0.61514187f, 0.50783044f, 0.46363378f, 0.50687206f, + 0.6867124f, 0.9648854f, 0.37042046f, 0.2886421f, 0.37891757f, 0.25843787f, 0.58501935f, 0.8732242f, + 0.8909887f, 0.72956276f, 0.13203424f, 0.23164761f, 0.3901443f, 0.40783793f, 0.54112387f, 0.041014254f, + 0.65562236f, 0.11856395f, 0.18362767f, 0.08430874f, 0.9356598f, 0.026530087f, 0.8771834f, 0.48319155f, + 0.4418506f, 0.81273925f, 0.4537862f, 0.81357706f, 0.8615075f, 0.06589496f, 0.692392f, 0.5943895f, + 0.60750586f, 0.5729957f, 0.6367655f, 0.2594666f, 0.43602943f, 0.97506f, 0.83592474f, 0.48121578f, + 0.029734552f, 0.5219139f, 0.15951324f, 0.90659577f, 0.19645631f, 0.4638992f, 0.38902867f, 0.5889769f, + 0.9705138f, 0.5475096f, 0.789582f, 0.8881108f, 0.9036556f, 0.32732427f, 0.38817167f, 0.7409689f, + 0.36356616f, 0.734132f, 0.39076614f, 0.16087383f, 0.70352167f, 0.576659f, 0.7229242f, 0.996743f, + 0.84136647f, 0.97399056f, 0.5267614f, 0.06989372f, 0.14923638f, 0.18941313f, 0.059375823f, 0.24937624f, + 0.039716125f, 0.038692355f, 0.20122272f, 0.0070830584f, 0.19309378f, 0.69065434f, 0.9170264f, 0.3512686f, + 0.3545606f, 0.76697665f, 0.25331455f, 0.26358372f, 0.80806476f, 0.064349174f, 0.5611374f, 0.941691f, + 0.58574325f, 0.6359719f, 0.20880443f, 0.49310172f, 0.5274922f, 0.62271714f, 0.694273f, 0.9344639f, + 0.11835027f, 0.51498765f, 0.25018185f, 0.10446805f, 0.45996118f, 0.059881568f, 0.8489496f, 0.5579074f, + 0.23052096f, 0.76128954f, 0.02678603f, 0.3066004f, 0.40259063f, 0.07512486f, 0.18205583f, 0.4183907f, + 0.8793823f, 0.9828271f, 0.8181312f, 0.20143801f, 0.17288941f, 0.9363466f, 0.6768587f, 0.51328385f, + 0.56766605f, 0.098151624f, 0.33305728f, 0.98130906f, 0.3766839f, 0.47491795f, 0.08483446f, 0.22029644f, + 0.4897902f, 0.18942028f, 0.4379952f, 0.7034796f, 0.0109113455f, 0.64850605f, 0.16939592f, 0.25597447f, + 0.69195485f, 0.8975601f, 0.36334568f, 0.29471546f, 0.04788208f, 0.24217117f, 0.062181532f, 0.38556474f, + 0.6020277f, 0.03156215f, 0.93655676f, 0.81369543f, 0.010527074f, 0.2611835f, 0.6630776f, 0.3972702f, + 0.44551176f, 0.27424216f, 0.9016098f, 0.22050089f, 0.9146384f, 0.53226113f, 0.6005109f, 0.8900659f, + 0.4176172f, 0.21532834f, 0.4191329f, 0.9055267f, 0.12900633f, 0.6134902f, 0.008604288f, 0.76215106f, + 0.68473387f, 0.5211961f, 0.71459657f, 0.50056237f, 0.7766764f, 0.10418975f, 0.42657375f, 0.7218073f, + 0.9979084f, 0.7546957f, 0.1364128f, 0.8845484f, 0.38850087f, 0.39324278f, 0.04554516f, 0.42129284f, + 0.8536634f, 0.5697224f, 0.20877302f, 0.65390605f, 0.3396778f, 0.956497f, 0.066022694f, 0.34206223f, + 0.017213225f, 0.3030849f, 0.6576238f, 0.9813073f, 0.58397317f, 0.99017924f, 0.59782606f, 0.788768f, + 0.9008311f, 0.91796166f, 0.22013813f, 0.959695f, 0.80288273f, 0.2662105f, 0.26139832f, 0.080626905f}; + const std::vector fc2_experts_weights = { + 0.6255686f, 0.09472537f, 0.71121234f, 0.65789884f, 0.065598905f, 0.63625044f, 0.45933473f, 0.7284089f, + 0.7868948f, 0.0029274821f, 0.95854944f, 0.919321f, 0.6989418f, 0.043019474f, 0.32138962f, 0.35509557f, + 0.37150103f, 0.78196156f, 0.6817853f, 0.89608955f, 0.31273842f, 0.6682699f, 0.6778976f, 0.08370459f, + 0.014990091f, 0.24055547f, 0.84227383f, 0.029270172f, 0.0647831f, 0.7801003f, 0.7697645f, 0.91119635f, + 0.12253064f, 0.13405013f, 0.75649333f, 0.9348151f, 0.7991694f, 0.57832605f, 0.66478735f, 0.97456336f, + 0.17739785f, 0.2729941f, 0.8497335f, 0.15788019f, 0.22429371f, 0.86499554f, 0.65776104f, 0.661535f, + 0.2880798f, 0.49309975f, 0.9576164f, 0.19988996f, 0.5039311f, 0.73779976f, 0.15482187f, 0.98558843f, + 0.25019473f, 0.379932f, 0.36471486f, 0.17417055f, 0.009367704f, 0.7819258f, 0.63283706f, 0.031699598f, + 0.1781866f, 0.994184f, 0.6911175f, 0.7006223f, 0.20085096f, 0.28080195f, 0.42452294f, 0.40856004f, + 0.15737581f, 0.5411925f, 0.549694f, 0.4366895f, 0.5693159f, 0.3018247f, 0.63012594f, 0.6885702f, + 0.2366305f, 0.004210472f, 0.7617172f, 0.61926836f, 0.24570602f, 0.981851f, 0.273876f, 0.8378734f, + 0.75366426f, 0.080795944f, 0.82247066f, 0.040263534f, 0.22299266f, 0.41664255f, 0.16297674f, 0.98845494f, + 0.39971018f, 0.69859487f, 0.053544044f, 0.7878332f, 0.34460813f, 0.11966437f, 0.5731115f, 0.7422309f, + 0.93269855f, 0.19460368f, 0.25394785f, 0.59613144f, 0.6356306f, 0.6922361f, 0.7744376f, 0.38662314f, + 0.7777848f, 0.8686458f, 0.36938924f, 0.8557286f, 0.74428976f, 0.9410264f, 0.21586305f, 0.2530955f, + 0.35543054f, 0.52536315f, 0.8000995f, 0.21456867f, 0.750327f, 0.3208093f, 0.80205464f, 0.47626138f, + 0.061956525f, 0.22487706f, 0.13812399f, 0.74798125f, 0.1647259f, 0.45834088f, 0.6078779f, 0.22580266f, + 0.644235f, 0.011788309f, 0.14224577f, 0.0469383f, 0.34876132f, 0.3178513f, 0.5715967f, 0.40754277f, + 0.735041f, 0.9583977f, 0.67939556f, 0.30301625f, 0.031807184f, 0.68110096f, 0.25227106f, 0.75443816f, + 0.83424246f, 0.69286025f, 0.9691554f, 0.9748982f, 0.60586995f, 0.13568163f, 0.94672066f, 0.26275212f, + 0.2638232f, 0.9183893f, 0.88740516f, 0.65107566f, 0.5313419f, 0.07941705f, 0.44809794f, 0.9795632f, + 0.6273294f, 0.542809f, 0.3961745f, 0.32560885f, 0.79801136f, 0.53083426f, 0.8252871f, 0.4115007f, + 0.7184546f, 0.70638496f, 0.57973206f, 0.8141865f, 0.81332296f, 0.96346164f, 0.88438797f, 0.37215167f, + 0.0766899f, 0.5914087f, 0.49563587f, 0.3695873f, 0.41627264f, 0.5235164f, 0.86481494f, 0.6558706f, + 0.32245284f, 0.29438752f, 0.37618434f, 0.3067485f, 0.9496114f, 0.76482266f, 0.95148784f, 0.5015968f, + 0.60083544f, 0.67338234f, 0.026723444f, 0.5446483f, 0.466555f, 0.21967298f, 0.112026334f, 0.9426372f, + 0.906533f, 0.73173434f, 0.97712487f, 0.29709607f, 0.41363865f, 0.6893093f, 0.4173867f, 0.4018826f, + 0.086719275f, 0.63433063f, 0.1978364f, 0.5181831f, 0.9874878f, 0.34609234f, 0.34240413f, 0.8016564f, + 0.31617337f, 0.4570613f, 0.96686924f, 0.29501313f, 0.14229488f, 0.22017813f, 0.36137718f, 0.26275063f, + 0.24053413f, 0.70197225f, 0.58496886f, 0.33996922f, 0.11154431f, 0.34257007f, 0.28898042f, 0.33729053f, + 0.048938513f, 0.60771453f, 0.13263822f, 0.11060041f, 0.091483414f, 0.70869184f, 0.19898665f, 0.29362458f, + 0.8919203f, 0.7654821f, 0.7866956f, 0.02524674f, 0.1414501f, 0.3112445f, 0.9130488f, 0.5511502f, + 0.12605143f, 0.5031309f, 0.11166459f, 0.39045036f, 0.36251247f, 0.9328308f, 0.65486836f, 0.41281444f, + 0.5844644f, 0.35566723f, 0.6964502f, 0.6977819f, 0.63427305f, 0.30511153f, 0.92657536f, 0.42781502f, + 0.30534166f, 0.813157f, 0.90752834f, 0.9975799f, 0.64812917f, 0.32955307f, 0.753946f, 0.92897725f, + 0.009582937f, 0.43805653f, 0.15901726f, 0.5931799f, 0.7067924f, 0.39670604f, 0.45817143f, 0.7250554f, + 0.41596514f, 0.08011025f, 0.900068f, 0.24834275f, 0.44507074f, 0.5471632f, 0.46995157f, 0.029657006f, + 0.7294f, 0.27288425f, 0.2406702f, 0.6194577f, 0.23906898f, 0.26892018f, 0.33152503f, 0.3121612f, + 0.29118127f, 0.36515707f, 0.6299379f, 0.095391035f, 0.19735986f, 0.5072957f, 0.56953406f, 0.77614623f, + 0.14877802f, 0.65959847f, 0.7841949f, 0.7776301f, 0.03428924f, 0.3091979f, 0.07021719f, 0.18359429f, + 0.77849144f, 0.42534047f, 0.7123557f, 0.20649683f, 0.57597995f, 0.19757104f, 0.749946f, 0.2813105f, + 0.37462044f, 0.06618434f, 0.50165176f, 0.9747401f, 0.7426891f, 0.23322952f, 0.50672436f, 0.44517577f, + 0.09746289f, 0.89204556f, 0.50806034f, 0.6052985f, 0.2980855f, 0.26604044f, 0.5824448f, 0.68485546f, + 0.612149f, 0.25902748f, 0.9854489f, 0.4263978f, 0.19379246f, 0.26614368f, 0.9922104f, 0.5000241f, + 0.4321279f, 0.2919191f, 0.3689273f, 0.078885734f, 0.10265827f, 0.79264474f, 0.9277247f, 0.9771502f, + 0.13902885f, 0.77043164f, 0.19051671f, 0.7982801f, 0.86077714f, 0.8869355f, 0.86002564f, 0.81278664f, + 0.5097318f, 0.7297412f, 0.32111454f, 0.7177174f, 0.33929902f, 0.49160433f, 0.064810574f, 0.3692627f, + 0.23706353f, 0.3313396f, 0.18070674f, 0.05027789f, 0.53255826f, 0.8244896f, 0.9553747f, 0.7917771f, + 0.24083132f, 0.005495131f, 0.6896569f, 0.78015697f, 0.07074398f, 0.67929304f, 0.9227386f, 0.5302883f, + 0.19877058f, 0.90993816f, 0.71350795f, 0.8311006f, 0.16185725f, 0.79097277f, 0.15846318f, 0.99474716f, + 0.28815013f, 0.80128354f, 0.6001208f, 0.63250524f, 0.4233225f, 0.7053677f, 0.29161406f, 0.028710365f, + 0.30789846f, 0.8917693f, 0.36836517f, 0.6571592f, 0.3151368f, 0.8750746f, 0.7992451f, 0.6765068f, + 0.24441916f, 0.091435075f, 0.5188247f, 0.20667112f, 0.9110969f, 0.019512117f, 0.72343415f, 0.998457f, + 0.7504142f, 0.6704894f, 0.01892668f, 0.9809466f, 0.41447622f, 0.032795787f, 0.9935814f, 0.29653466f, + 0.4646262f, 0.95763975f, 0.15339965f, 0.14625502f, 0.58130866f, 0.43307304f, 0.6151709f, 0.08064735f, + 0.5149533f, 0.27762014f, 0.25419557f, 0.04218155f, 0.7651092f, 0.59631824f, 0.077278376f, 0.89677596f, + 0.6508104f, 0.5927816f, 0.2064318f, 0.57540226f, 0.9817701f, 0.84294224f, 0.11056489f, 0.9564106f, + 0.5387549f, 0.74048257f, 0.88833815f, 0.9262546f, 0.11023259f, 0.93783194f, 0.16041255f, 0.53748304f, + 0.1506182f, 0.39038336f, 0.47727865f, 0.44018233f, 0.42101204f, 0.53943527f, 0.99320936f, 0.79050577f, + 0.77973497f, 0.7001237f, 0.88709056f, 0.4769255f, 0.5397561f, 0.60289854f, 0.06393474f, 0.09722155f, + 0.5613007f, 0.30437487f, 0.49082512f, 0.3852706f, 0.5778314f, 0.8253078f, 0.33417904f, 0.9004303f, + 0.8947809f, 0.11625093f, 0.11388689f, 0.09546256f, 0.22598988f, 0.30536187f, 0.46236527f, 0.3784039f, + 0.24737573f, 0.3411532f, 0.31912774f, 0.9905191f, 0.31468558f, 0.14199954f, 0.7078488f, 0.47111923f, + 0.882782f, 0.8124163f, 0.9593644f, 0.13382024f, 0.8214317f, 0.9196194f, 0.25308424f, 0.95958996f}; + const std::vector fc1_experts_bias = { + 0.8748215f, 0.5054756f, 0.74107623f, 0.32518923f, 0.0639081f, 0.62639004f, 0.64906263f, 0.17322052f, + 0.7424998f, 0.07288867f, 0.93031204f, 0.9841952f, 0.6361292f, 0.18628561f, 0.7433356f, 0.5852079f, + 0.6359594f, 0.66432667f, 0.88067776f, 0.28508204f, 0.38752747f, 0.63635296f, 0.55448055f, 0.9031888f, + 0.23738074f, 0.48179168f, 0.5934266f, 0.3672055f, 0.84085834f, 0.5546908f, 0.03788501f, 0.44583207f, + 0.27322155f, 0.5485856f, 0.44189203f, 0.00403291f, 0.40888733f, 0.45211035f, 0.35256076f, 0.9593902f, + 0.39090043f, 0.8212086f, 0.62385887f, 0.07793343f, 0.61749303f, 0.9143678f, 0.17294967f, 0.17681253f, + 0.9894245f, 0.901755f, 0.221053f, 0.8008725f, 0.43603396f, 0.007035315f, 0.5375667f, 0.661547f, + 0.35001957f, 0.67394173f, 0.072449565f, 0.84650797f, 0.92626715f, 0.77573335f, 0.58474565f, 0.66467446f}; + const std::vector fc2_experts_bias = { + 0.13822609f, 0.3750633f, 0.45226622f, 0.22175694f, 0.13068998f, 0.8363088f, 0.8393226f, 0.045905888f, + 0.65910596f, 0.7034011f, 0.97498417f, 0.78927684f, 0.95966834f, 0.33630514f, 0.8501932f, 0.9067007f, + 0.027835965f, 0.09864664f, 0.6012027f, 0.7730189f, 0.25159347f, 0.55506724f, 0.49927413f, 0.62655383f, + 0.23132521f, 0.7820195f, 0.8325047f, 0.15307087f, 0.5048437f, 0.5013873f, 0.66055787f, 0.96579224f}; + const std::vector output = { + 1.3775184f, 2.0985768f, 2.091839f, 2.9706357f, 1.9404914f, 1.9915576f, 2.3302228f, 2.3702593f, + 0.51896286f, 0.7936432f, 0.9944805f, 1.3225251f, 0.73894113f, 0.87975955f, 1.0468717f, 1.1585085f, + 0.012911659f, 0.045757107f, 0.27884653f, 0.3585817f, 0.116771236f, 0.25755364f, 0.23161705f, 0.2906256f, + 4.8571277f, 5.649453f, 5.485141f, 5.306299f, 4.767025f, 6.9010167f, 5.3520975f, 6.711155f}; + + RunMoETest(input, + router_probs, + fc1_experts_weights, + fc2_experts_weights, + fc1_experts_bias, + fc2_experts_bias, + output, + num_rows, + num_experts, + hidden_size, + inter_size, + "relu"); +} + +} // namespace test +} // namespace onnxruntime diff --git a/onnxruntime/test/python/transformers/test_parity_moe.py b/onnxruntime/test/python/transformers/test_parity_moe.py new file mode 100644 index 0000000000000..72ca5d9975c05 --- /dev/null +++ b/onnxruntime/test/python/transformers/test_parity_moe.py @@ -0,0 +1,431 @@ +# -------------------------------------------------------------------------- +# Copyright 2020 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ------------------------------------------------------------------------- + +import time +import unittest + +import numpy +import pytest +import torch +import torch.nn as nn +import torch.nn.functional as F +from onnx import TensorProto, helper + +import onnxruntime + +torch.manual_seed(42) +numpy.random.seed(42) + + +ORT_DTYPE = TensorProto.FLOAT16 +NP_TYPE = numpy.float16 if ORT_DTYPE == TensorProto.FLOAT16 else numpy.float32 +THRESHOLD = 3e-2 + + +def value_string_of(numpy_array): + arr = numpy_array.flatten() + lines = ["f, ".join([str(v) for v in arr[i : min(i + 8, arr.size)]]) for i in range(0, arr.size, 8)] + return "{\n " + "f,\n ".join(lines) + "f}" + + +def print_tensor(name, numpy_array): + print(f"const std::vector {name} = {value_string_of(numpy_array)};") + + +def create_moe_onnx_graph( + num_rows, + num_experts, + hidden_size, + inter_size, + fc1_experts_weights, + fc2_experts_weights, + fc1_experts_bias, + fc2_experts_bias, +): + nodes = [ + helper.make_node( + "MoE", + [ + "input", + "router_probs", + "fc1_experts_weights", + "fc2_experts_weights", + "fc1_experts_bias", + "fc2_experts_bias", + ], + ["output"], + "MoE_0", + k=1, + activation_type="gelu", + domain="com.microsoft", + ), + ] + + fc1_shape = [num_experts, hidden_size, inter_size] + fc2_shape = [num_experts, inter_size, hidden_size] + + torch_type = torch.float16 if ORT_DTYPE == TensorProto.FLOAT16 else torch.float32 + + initializers = [ + helper.make_tensor( + "fc1_experts_weights", + ORT_DTYPE, + fc1_shape, + fc1_experts_weights.to(torch_type).flatten().tolist(), + raw=False, + ), + helper.make_tensor( + "fc2_experts_weights", + ORT_DTYPE, + fc2_shape, + fc2_experts_weights.to(torch_type).flatten().tolist(), + raw=False, + ), + ] + + fc1_bias_shape = [num_experts, inter_size] + fc2_bias_shape = [num_experts, hidden_size] + initializers.extend( + [ + helper.make_tensor( + "fc1_experts_bias", + ORT_DTYPE, + fc1_bias_shape, + fc1_experts_bias.to(torch_type).flatten().tolist(), + raw=False, + ), + helper.make_tensor( + "fc2_experts_bias", + ORT_DTYPE, + fc2_bias_shape, + fc2_experts_bias.to(torch_type).flatten().tolist(), + raw=False, + ), + ] + ) + + graph_inputs = [ + helper.make_tensor_value_info("input", ORT_DTYPE, [num_rows, hidden_size]), + ] + + graph_inputs.append( + helper.make_tensor_value_info( + "router_probs", + ORT_DTYPE, + [num_rows, num_experts], + ) + ) + + graph_outputs = [ + helper.make_tensor_value_info("output", ORT_DTYPE, [num_rows, hidden_size]), + ] + + graph = helper.make_graph( + nodes, + "MoE_Graph", + graph_inputs, + graph_outputs, + initializers, + ) + + model = helper.make_model(graph) + return model.SerializeToString() + + +def get_activation_fn(activation): + if activation == "relu": + return nn.ReLU + elif activation == "gelu": + return nn.GELU + else: + raise NotImplementedError + + +class MoEGate(nn.Module): + def __init__(self, num_experts, in_features): + super().__init__() + self.wg_reduction = torch.nn.Linear(in_features, 16, bias=False) + + wg = torch.empty(num_experts, 16) + torch.nn.init.orthogonal_(wg, gain=0.32) + self.register_parameter("wg", torch.nn.Parameter(wg)) + + def forward(self, input): + input = self.wg_reduction(input) + with torch.no_grad(): + wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True) + self.wg.mul_(1.5 / wg_norm) + logits = self._cosine(input, self.wg) + return logits + + def _cosine(self, mat1, mat2, eps=1e-4): + assert mat1.dim() == 2 + assert mat2.dim() == 2 + + mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps) + return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1) + + +class MoERuntimeExperts(nn.Module): + def __init__( + self, + num_experts, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + bias=True, + chunk_size=-1, + ): + super().__init__() + # assert bias is False, "Current bias is not supported" + assert drop == 0.0, "Current drop is not supported" + assert chunk_size == -1, "Current chunk is not supported" + + self.weight1 = nn.Parameter(torch.rand(num_experts, in_features, hidden_features)) + self.weight2 = nn.Parameter(torch.rand(num_experts, hidden_features, out_features)) + + self.bias1 = nn.Parameter(torch.rand(num_experts, hidden_features)) if bias else None + self.bias2 = nn.Parameter(torch.rand(num_experts, in_features)) if bias else None + + self.act = act_layer() + + def forward(self, x, indices_s): + x = x.unsqueeze(1) + x = self.bmm(x, self.weight1, indices_s) + if self.bias1 is not None: + x = x + self.bias1[indices_s].unsqueeze(1) # S x hidden_features + x = self.act(x) + x = self.bmm(x, self.weight2, indices_s) + if self.bias2 is not None: + x = x + self.bias2[indices_s].unsqueeze(1) # S x 1 x in_features + return x + + def bmm(self, x, weight, indices_s): + x = torch.bmm(x, weight[indices_s]) # S x 1 x hidden_features + return x + + +class MoE(nn.Module): + def __init__( + self, + batch_size, + num_rows, + num_experts, + in_features, + hidden_features=None, + out_features=None, + eval_capacity=-1, + activation="gelu", + ): + super().__init__() + self.num_experts = num_experts + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.eval_capacity = eval_capacity # -1 means we route all tokens + + self.gate = MoEGate(num_experts=num_experts, in_features=in_features) + self.moe_experts = MoERuntimeExperts( + num_experts=num_experts, + in_features=in_features, + hidden_features=hidden_features, + out_features=out_features, + act_layer=get_activation_fn(activation), + bias=True, + ) + + self.moe_onnx_graph = create_moe_onnx_graph( + batch_size * num_rows, + num_experts, + in_features, + hidden_features, + self.moe_experts.weight1, + self.moe_experts.weight2, + self.moe_experts.bias1, + self.moe_experts.bias2, + ) + + self.ort_sess = self.create_ort_session() + + self.torch_input = torch.randn(batch_size, num_rows, in_features) + + def create_ort_session(self): + from onnxruntime import InferenceSession, SessionOptions + + sess_options = SessionOptions() + + cuda_providers = ["CUDAExecutionProvider"] + if cuda_providers[0] not in onnxruntime.get_available_providers(): + return None + + sess_options.log_severity_level = 2 + ort_session = InferenceSession(self.moe_onnx_graph, sess_options, providers=["CUDAExecutionProvider"]) + + return ort_session + + def ort_run_with_iobinding(self, ort_inputs, repeat=1000): + iobinding = self.ort_sess.io_binding() + device_id = torch.cuda.current_device() + + iobinding.bind_input( + name="input", + device_type="cuda", + device_id=device_id, + element_type=NP_TYPE, + shape=ort_inputs["input"].shape, + buffer_ptr=onnxruntime.OrtValue.ortvalue_from_numpy(ort_inputs["input"], "cuda", device_id).data_ptr(), + ) + iobinding.bind_input( + name="router_probs", + device_type="cuda", + device_id=device_id, + element_type=NP_TYPE, + shape=ort_inputs["router_probs"].shape, + buffer_ptr=onnxruntime.OrtValue.ortvalue_from_numpy( + ort_inputs["router_probs"], "cuda", device_id + ).data_ptr(), + ) + + iobinding.synchronize_inputs() + + iobinding.bind_output( + name="output", + device_type="cuda", + device_id=device_id, + element_type=NP_TYPE, + shape=ort_inputs["input"].shape, + buffer_ptr=onnxruntime.OrtValue.ortvalue_from_numpy( + numpy.zeros(ort_inputs["input"].shape), "cuda", device_id + ).data_ptr(), + ) + iobinding.synchronize_outputs() + + s = time.time() + for _ in range(repeat): + self.ort_sess.run_with_iobinding(iobinding) + e = time.time() + print(f"MoE cuda kernel time: {(e - s) / repeat * 1000} ms") + + def torch_forward(self): + x = self.torch_input + + b, t, c = x.shape + x = x.reshape(-1, c) + logits = self.gate(x) + gates = torch.nn.functional.softmax(logits, dim=1) + ret = torch.max(gates, dim=1) + indices_s = ret.indices # dim: [bs], the index of the expert with highest softmax value + scores = ret.values.unsqueeze(-1).unsqueeze(-1) # S + x = self.moe_experts(x, indices_s) + + x = x * scores + x = x.reshape(b * t, c) + + return x, torch.sum(x) + + def onnx_forward(self, iobinding=False): + x = self.torch_input + + _, _, c = x.shape + y = x.reshape(-1, c) + logits = self.gate(y) + + ort_inputs = { + "input": numpy.ascontiguousarray(y.detach().numpy().astype(NP_TYPE)), + "router_probs": numpy.ascontiguousarray(logits.detach().numpy().astype(NP_TYPE)), + } + + ort_output = None + if self.ort_sess is not None: + if not iobinding: + ort_output = self.ort_sess.run(None, ort_inputs) + else: + self.ort_run_with_iobinding(ort_inputs) + return None + + # print_tensor("input", ort_inputs["input"]) + # print_tensor("router_probs", ort_inputs["router_probs"]) + # print_tensor("fc1_experts_weights", self.moe_experts.weight1.detach().numpy()) + # print_tensor("fc2_experts_weights", self.moe_experts.weight2.detach().numpy()) + # print_tensor("fc1_experts_bias", self.moe_experts.bias1.detach().numpy()) + # print_tensor("fc2_experts_bias", self.moe_experts.bias2.detach().numpy()) + # print_tensor("output", ort_output[0]) + + return ort_output + + def parity_check(self): + torch_out = self.torch_forward() + ort_out = self.onnx_forward() + if ort_out is not None: + # print("max diff", numpy.max(numpy.abs(torch_out[0].detach().numpy() - ort_out[0]))) + assert numpy.allclose(torch_out[0].detach().numpy(), ort_out[0], rtol=THRESHOLD, atol=THRESHOLD) + + def benchmark(self): + self.onnx_forward(iobinding=True) + + +class TestMoE(unittest.TestCase): + def test_moe_small(self): + rt = MoE( + batch_size=2, + num_rows=8, + num_experts=4, + in_features=16, + hidden_features=32, + out_features=16, + ) + rt.parity_check() + + @pytest.mark.slow + def test_moe_large(self): + for batch_size in [1, 8]: + for num_rows in [16, 64]: + for num_experts in [16, 64]: + for in_features in [256]: + for hidden_features in [512]: + print( + f"batch_size={batch_size}, num_rows={num_rows}, num_experts={num_experts}, in_features={in_features}, hidden_features={hidden_features}" + ) + rt = MoE( + batch_size=batch_size, + num_rows=num_rows, + num_experts=num_experts, + in_features=in_features, + hidden_features=hidden_features, + out_features=in_features, + ) + rt.parity_check() + + @pytest.mark.slow + def test_moe_benchmark(self): + for batch_size in [32, 64]: + for num_rows in [128, 512]: + for num_experts in [64, 128]: + for in_features in [256, 512]: + for hidden_features in [1024, 2048]: + print( + f"batch_size={batch_size}, num_rows={num_rows}, num_experts={num_experts}, in_features={in_features}, hidden_features={hidden_features}" + ) + rt = MoE( + batch_size=batch_size, + num_rows=num_rows, + num_experts=num_experts, + in_features=in_features, + hidden_features=hidden_features, + out_features=in_features, + ) + rt.benchmark() + + +if __name__ == "__main__": + unittest.main() diff --git a/tools/ci_build/github/azure-pipelines/templates/download-deps.yml b/tools/ci_build/github/azure-pipelines/templates/download-deps.yml index 6fc076441108c..f2deb2041e06e 100644 --- a/tools/ci_build/github/azure-pipelines/templates/download-deps.yml +++ b/tools/ci_build/github/azure-pipelines/templates/download-deps.yml @@ -11,7 +11,7 @@ steps: packageType: upack feed: '/7424c8e4-5c62-490e-95c4-79446f31017c' definition: '517c4f6f-5437-4392-a70d-4f15ec5be2f0' - version: 1.0.117 + version: 1.0.118 downloadPath: $(Build.BinariesDirectory)/deps # The private ADO project @@ -22,7 +22,7 @@ steps: packageType: upack feed: '/4c7631f5-24c0-4307-8822-1aa8f180c325' definition: 'fd9dd5ad-b73e-4678-890e-edcf680dbc1a' - version: 1.0.117 + version: 1.0.118 downloadPath: $(Build.BinariesDirectory)/deps # You can add more ADO accounts at here. From 05526b354b0d29049aa29427def6d861cdec5faa Mon Sep 17 00:00:00 2001 From: Jian Chen Date: Tue, 14 Nov 2023 19:47:39 -0800 Subject: [PATCH 02/38] Adding new yaml file for downloading cuda, and trt from azure blob (#18443) This also set the Path variable for the downloaded libraries. ### Description ### Motivation and Context --- .../jobs/download_win_gpu_library.yml | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml diff --git a/tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml b/tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml new file mode 100644 index 0000000000000..4573c56963e34 --- /dev/null +++ b/tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml @@ -0,0 +1,51 @@ +parameters: + - name: DownloadCUDA + type: boolean + default: false + - name: DownloadTRT + type: boolean + default: false + - name: CudaVersion + type: string + default: '11.8' + values: + - 11.8 + - 12.2 + +steps: + + - ${{ if eq(parameters.DownloadCUDA, true) }}: + - powershell: | + azcopy.exe cp --recursive https://lotusscus.blob.core.windows.net/models/cuda_sdk/v${{ parameters.CudaVersion }} $(Agent.TempDirectory) + displayName: 'Download CUDA SDK v${{ parameters.CudaVersion }}' + - powershell: | + Write-Host "##vso[task.prependpath]$(Agent.TempDirectory)\v${{ parameters.CudaVersion }}\bin;$(Agent.TempDirectory)\v${{ parameters.CudaVersion }}\extras\CUPTI\lib64" + displayName: 'Append CUDA SDK Directory to PATH' + - task: CmdLine@2 + inputs: + script: | + echo %PATH% + displayName: 'Print PATH' + + - ${{ if eq(parameters.DownloadTRT, true) }}: + - ${{ if eq(parameters.CudaVersion, '11.8') }}: + - powershell: | + azcopy.exe cp --recursive https://lotusscus.blob.core.windows.net/models/local/TensorRT-8.6.1.6.Windows10.x86_64.cuda-11.8 $(Agent.TempDirectory) + displayName: 'Download TensorRT-8.6.1.6.Windows10.x86_64.cuda-11.8' + - powershell: | + Write-Host "##vso[task.prependpath]$(Agent.TempDirectory)\TensorRT-8.6.1.6.Windows10.x86_64.cuda-11.8\lib" + displayName: 'Append CUDA SDK Directory to PATH' + + - ${{ if eq(parameters.CudaVersion, '12.2') }}: + - powershell: | + azcopy.exe cp --recursive https://lotusscus.blob.core.windows.net/models/local/TensorRT-8.6.1.6.Windows10.x86_64.cuda-12.0 $(Agent.TempDirectory) + displayName: 'Download TensorRT-8.6.1.6.Windows10.x86_64.cuda-12.0' + - powershell: | + Write-Host "##vso[task.prependpath]$(Agent.TempDirectory)\TensorRT-8.6.1.6.Windows10.x86_64.cuda-12.0\lib" + displayName: 'Append CUDA SDK Directory to PATH' + + - task: CmdLine@2 + inputs: + script: | + echo %PATH% + displayName: 'Print PATH' \ No newline at end of file From d738ff16ecf7ba5c21211e745c492153eea15c59 Mon Sep 17 00:00:00 2001 From: Tianlei Wu Date: Tue, 14 Nov 2023 20:24:32 -0800 Subject: [PATCH 03/38] SDXL demo: consistent opt shape and seed (#18445) ### Description A few refinements: (1) Use fixed optimized shape for dynamic engine of TRT. (2) Use same seed in base and refiner. (3) Save metadata to png file so that it is easy to reproduce. (4) Disable EulerA scheduler for XL since it has issue in refiner with 1.16.2. (5) Limit height and width to be divisible by 64. (6) Update document to add a link of downloading optimized model. --------- Co-authored-by: kunal-vaishnavi <115581922+kunal-vaishnavi@users.noreply.github.com> --- .../models/stable_diffusion/README.md | 31 +++++----- .../models/stable_diffusion/demo_txt2img.py | 27 ++++++++- .../stable_diffusion/demo_txt2img_xl.py | 60 ++++++++++++++----- .../models/stable_diffusion/demo_utils.py | 25 ++++---- .../stable_diffusion/diffusion_models.py | 7 +++ .../engine_builder_ort_cuda.py | 6 +- .../pipeline_stable_diffusion.py | 25 ++++++-- 7 files changed, 125 insertions(+), 56 deletions(-) diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/README.md b/onnxruntime/python/tools/transformers/models/stable_diffusion/README.md index 0955fcde6a672..1ec1ca3ba0c83 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/README.md +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/README.md @@ -22,12 +22,6 @@ These optimizations are firstly carried out on CUDA EP. They may not work on oth | [optimize_pipeline.py](./optimize_pipeline.py) | Optimize Stable Diffusion ONNX models exported from Huggingface diffusers or optimum | | [benchmark.py](./benchmark.py) | Benchmark latency and memory of OnnxRuntime, xFormers or PyTorch 2.0 on stable diffusion. | -In some example, we run the scripts in source code directory. You can get source code like the following: - -``` -git clone https://github.com/microsoft/onnxruntime -cd onnxruntime/onnxruntime/python/tools/transformers/models/stable_diffusion -``` ## Run demo with docker @@ -36,6 +30,7 @@ cd onnxruntime/onnxruntime/python/tools/transformers/models/stable_diffusion git clone https://github.com/microsoft/onnxruntime cd onnxruntime ``` + #### Launch NVIDIA pytorch container Install nvidia-docker using [these instructions](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). @@ -44,12 +39,6 @@ Install nvidia-docker using [these instructions](https://docs.nvidia.com/datacen docker run --rm -it --gpus all -v $PWD:/workspace nvcr.io/nvidia/pytorch:23.10-py3 /bin/bash ``` -Optionally, you can update TensorRT from 8.6.1 to latest pre-release. -``` -python3 -m pip install --upgrade pip -python3 -m pip install --pre --upgrade --extra-index-url https://pypi.nvidia.com tensorrt -``` - #### Build onnxruntime from source After launching the docker, you can build and install onnxruntime-gpu wheel like the following. ``` @@ -61,6 +50,7 @@ sh build.sh --config Release --build_shared_lib --parallel --use_cuda --cuda_ve --cmake_extra_defines onnxruntime_BUILD_UNIT_TESTS=OFF \ --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=80 \ --allow_running_as_root +python3 -m pip install --upgrade pip python3 -m pip install build/Linux/Release/dist/onnxruntime_gpu-1.17.0-cp310-cp310-linux_x86_64.whl --force-reinstall ``` @@ -83,7 +73,7 @@ python3 demo_txt2img_xl.py --help For example: `--engine {ORT_CUDA,ORT_TRT,TRT}` can be used to choose different backend engines including CUDA or TensorRT execution provider of ONNX Runtime, or TensorRT. -`--work-dir WORK_DIR` can be used to save models under a specified directory. +`--work-dir WORK_DIR` can be used to load or save models under the given directory. You can download the [optimized ONNX models of Stable Diffusion XL 1.0](https://huggingface.co/tlwu/stable-diffusion-xl-1.0-onnxruntime#usage-example) to save time in running the XL demo. #### Generate an image guided by a text prompt ```python3 demo_txt2img.py "astronaut riding a horse on mars"``` @@ -93,11 +83,12 @@ For example: If you do not provide prompt, the script will generate different image sizes for a list of prompts for demonstration. -It is recommended to use a machine with 64 GB or more memory to run this demo. +## Optimize Stable Diffusion ONNX models for Hugging Face Diffusers or Optimum -## Example of Stable Diffusion 1.5 or XL +If you are able to run the above demo with docker, you can use the docker and skip the following setup and fast forward to [Export ONNX pipeline](#export-onnx-pipeline). -Below is example to optimize Stable Diffusion 1.5 or XL in Linux. For Windows OS, please change the format of path to be like `.\sd` instead of `./sd`. +Below setup does not use docker. We'll use the environment to optimize ONNX models of Stable Diffusion exported by huggingface diffusers or optimum. +For Windows OS, please change the format of path to be like `.\sd` instead of `./sd`. It is recommended to create a Conda environment with Python 3.10 for the following setup: ``` @@ -217,7 +208,13 @@ Example to optimize the exported float32 ONNX models, and save to float16 models python -m onnxruntime.transformers.models.stable_diffusion.optimize_pipeline -i ./sd_v1_5/fp32 -o ./sd_v1_5/fp16 --float16 ``` -For SDXL model, it is recommended to use a machine with 32 GB or more memory to optimize. +In all examples below, we run the scripts in source code directory. You can get source code like the following: +``` +git clone https://github.com/microsoft/onnxruntime +cd onnxruntime/onnxruntime/python/tools/transformers/models/stable_diffusion +``` + +For SDXL model, it is recommended to use a machine with 48 GB or more memory to optimize. ``` python optimize_pipeline.py -i ./sd_xl_base_onnx -o ./sd_xl_base_fp16 --float16 ``` diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py index 57f7986503a5c..4636f139d4613 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py @@ -53,10 +53,31 @@ f"Batch size {len(prompt)} is larger than allowed {max_batch_size}. If dynamic shape is used, then maximum batch size is 4" ) - min_image_size = 512 - max_image_size = 1024 if args.version in ["2.0", "2.1"] else 768 + # For TensorRT, performance of engine built with dynamic shape is very sensitive to the range of image size. + # Here, we reduce the range of image size for TensorRT to trade-off flexibility and performance. + # This range can cover common used shape of landscape 512x768, portrait 768x512, or square 512x512 and 768x768. + min_image_size = 512 if args.engine != "ORT_CUDA" else 256 + max_image_size = 768 if args.engine != "ORT_CUDA" else 1024 pipeline_info = PipelineInfo(args.version, min_image_size=min_image_size, max_image_size=max_image_size) - pipeline = init_pipeline(Txt2ImgPipeline, pipeline_info, engine_type, args, max_batch_size, batch_size) + + # Ideally, the optimized batch size and image size for TRT engine shall align with user's preference. That is to + # optimize the shape used most frequently. We can let user config it when we develop a UI plugin. + # In this demo, we optimize batch size 1 and image size 512x512 (or 768x768 for SD 2.0/2.1) for dynamic engine. + # This is mainly for benchmark purpose to simulate the case that we have no knowledge of user's preference. + opt_batch_size = 1 if args.build_dynamic_batch else batch_size + opt_image_height = pipeline_info.default_image_size() if args.build_dynamic_shape else args.height + opt_image_width = pipeline_info.default_image_size() if args.build_dynamic_shape else args.width + + pipeline = init_pipeline( + Txt2ImgPipeline, + pipeline_info, + engine_type, + args, + max_batch_size, + opt_batch_size, + opt_image_height, + opt_image_width, + ) if engine_type == EngineType.TRT: max_device_memory = max(pipeline.backend.max_device_memory(), pipeline.backend.max_device_memory()) diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py index 364af9e8e9923..974759bb6ae4b 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py @@ -48,17 +48,45 @@ def load_pipelines(args, batch_size): # For TensorRT, performance of engine built with dynamic shape is very sensitive to the range of image size. # Here, we reduce the range of image size for TensorRT to trade-off flexibility and performance. + # This range can cover most frequent shape of landscape (832x1216), portrait (1216x832) or square (1024x1024). min_image_size = 832 if args.engine != "ORT_CUDA" else 512 max_image_size = 1216 if args.engine != "ORT_CUDA" else 2048 # No VAE decoder in base when it outputs latent instead of image. base_info = PipelineInfo(args.version, use_vae=False, min_image_size=min_image_size, max_image_size=max_image_size) - base = init_pipeline(Txt2ImgXLPipeline, base_info, engine_type, args, max_batch_size, batch_size) + + # Ideally, the optimized batch size and image size for TRT engine shall align with user's preference. That is to + # optimize the shape used most frequently. We can let user config it when we develop a UI plugin. + # In this demo, we optimize batch size 1 and image size 1024x1024 for SD XL dynamic engine. + # This is mainly for benchmark purpose to simulate the case that we have no knowledge of user's preference. + opt_batch_size = 1 if args.build_dynamic_batch else batch_size + opt_image_height = base_info.default_image_size() if args.build_dynamic_shape else args.height + opt_image_width = base_info.default_image_size() if args.build_dynamic_shape else args.width + + base = init_pipeline( + Txt2ImgXLPipeline, + base_info, + engine_type, + args, + max_batch_size, + opt_batch_size, + opt_image_height, + opt_image_width, + ) refiner_info = PipelineInfo( args.version, is_refiner=True, min_image_size=min_image_size, max_image_size=max_image_size ) - refiner = init_pipeline(Img2ImgXLPipeline, refiner_info, engine_type, args, max_batch_size, batch_size) + refiner = init_pipeline( + Img2ImgXLPipeline, + refiner_info, + engine_type, + args, + max_batch_size, + opt_batch_size, + opt_image_height, + opt_image_width, + ) if engine_type == EngineType.TRT: max_device_memory = max(base.backend.max_device_memory(), refiner.backend.max_device_memory()) @@ -96,6 +124,9 @@ def run_base_and_refiner(warmup=False): return_type="latent", ) + # Use same seed in base and refiner. + seed = base.get_current_seed() + images, time_refiner = refiner.run( prompt, negative_prompt, @@ -105,7 +136,7 @@ def run_base_and_refiner(warmup=False): warmup=warmup, denoising_steps=args.denoising_steps, guidance=args.guidance, - seed=args.seed, + seed=seed, ) return images, time_base + time_refiner @@ -160,20 +191,20 @@ def run_dynamic_shape_demo(args): "blue owl, big green eyes, portrait, intricate metal design, unreal engine, octane render, realistic", ] - # batch size, height, width, scheduler, steps, prompt + # batch size, height, width, scheduler, steps, prompt, seed configs = [ - (1, 832, 1216, "UniPC", 8, prompts[0]), - (1, 1024, 1024, "DDIM", 24, prompts[1]), - (1, 1216, 832, "EulerA", 18, prompts[2]), - (2, 1344, 768, "DDIM", 30, prompts[3]), - (2, 640, 1536, "UniPC", 18, prompts[4]), - (2, 1152, 896, "EulerA", 30, prompts[5]), + (1, 832, 1216, "UniPC", 8, prompts[0], None), + (1, 1024, 1024, "DDIM", 24, prompts[1], None), + (1, 1216, 832, "UniPC", 16, prompts[2], None), + (1, 1344, 768, "DDIM", 24, prompts[3], None), + (2, 640, 1536, "UniPC", 16, prompts[4], 4312973633252712), + (2, 1152, 896, "DDIM", 24, prompts[5], 1964684802882906), ] - # Warm up (for cudnn convolution algo search) once before serving. + # Warm up each combination of (batch size, height, width) once before serving. args.prompt = ["warm up"] args.num_warmup_runs = 1 - for batch_size, height, width, _, _, _ in configs: + for batch_size, height, width, _, _, _, _ in configs: args.batch_size = batch_size args.height = height args.width = width @@ -183,17 +214,18 @@ def run_dynamic_shape_demo(args): # Run pipeline on a list of prompts. args.num_warmup_runs = 0 - for batch_size, height, width, scheduler, steps, example_prompt in configs: + for batch_size, height, width, scheduler, steps, example_prompt, seed in configs: args.prompt = [example_prompt] args.batch_size = batch_size args.height = height args.width = width args.scheduler = scheduler args.denoising_steps = steps + args.seed = seed base.set_scheduler(scheduler) refiner.set_scheduler(scheduler) print( - f"\nbatch_size={batch_size}, height={height}, width={width}, scheduler={scheduler}, steps={steps}, prompt={example_prompt}" + f"\nbatch_size={batch_size}, height={height}, width={width}, scheduler={scheduler}, steps={steps}, prompt={example_prompt}, seed={seed}" ) prompt, negative_prompt = repeat_prompt(args) run_pipelines(args, base, refiner, prompt, negative_prompt, is_warm_up=False) diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py index f28e5963b46d1..ef45b786b9ea3 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py @@ -68,7 +68,7 @@ def parse_arguments(is_xl: bool, description: str): "--scheduler", type=str, default="DDIM", - choices=["DDIM", "EulerA", "UniPC"], + choices=["DDIM", "UniPC"] if is_xl else ["DDIM", "EulerA", "UniPC"], help="Scheduler for diffusion process", ) @@ -174,9 +174,9 @@ def parse_arguments(is_xl: bool, description: str): ) # Validate image dimensions - if args.height % 8 != 0 or args.width % 8 != 0: + if args.height % 64 != 0 or args.width % 64 != 0: raise ValueError( - f"Image height and width have to be divisible by 8 but specified as: {args.height} and {args.width}." + f"Image height and width have to be divisible by 64 but specified as: {args.height} and {args.width}." ) if (args.build_dynamic_batch or args.build_dynamic_shape) and not args.disable_cuda_graph: @@ -209,7 +209,9 @@ def repeat_prompt(args): return prompt, negative_prompt -def init_pipeline(pipeline_class, pipeline_info, engine_type, args, max_batch_size, batch_size): +def init_pipeline( + pipeline_class, pipeline_info, engine_type, args, max_batch_size, opt_batch_size, opt_image_height, opt_image_width +): onnx_dir, engine_dir, output_dir, framework_model_dir, timing_cache = get_engine_paths( work_dir=args.work_dir, pipeline_info=pipeline_info, engine_type=engine_type ) @@ -234,9 +236,6 @@ def init_pipeline(pipeline_class, pipeline_info, engine_type, args, max_batch_si engine_dir=engine_dir, framework_model_dir=framework_model_dir, onnx_dir=onnx_dir, - opt_image_height=args.height, - opt_image_width=args.height, - opt_batch_size=batch_size, force_engine_rebuild=args.force_engine_build, device_id=torch.cuda.current_device(), ) @@ -247,9 +246,9 @@ def init_pipeline(pipeline_class, pipeline_info, engine_type, args, max_batch_si framework_model_dir, onnx_dir, args.onnx_opset, - opt_image_height=args.height, - opt_image_width=args.height, - opt_batch_size=batch_size, + opt_image_height=opt_image_height, + opt_image_width=opt_image_width, + opt_batch_size=opt_batch_size, force_engine_rebuild=args.force_engine_build, static_batch=not args.build_dynamic_batch, static_image_shape=not args.build_dynamic_shape, @@ -264,9 +263,9 @@ def init_pipeline(pipeline_class, pipeline_info, engine_type, args, max_batch_si framework_model_dir, onnx_dir, args.onnx_opset, - opt_batch_size=batch_size, - opt_image_height=args.height, - opt_image_width=args.height, + opt_batch_size=opt_batch_size, + opt_image_height=opt_image_height, + opt_image_width=opt_image_width, force_export=args.force_onnx_export, force_optimize=args.force_onnx_optimize, force_build=args.force_engine_build, diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py index 81e5a0417c001..514205d3b8945 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py @@ -211,6 +211,13 @@ def min_image_size(self): def max_image_size(self): return self._max_image_size + def default_image_size(self): + if self.is_xl(): + return 1024 + if self.version in ("2.0", "2.1"): + return 768 + return 512 + class BaseModel: def __init__( diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py index 07c675b2ed990..a03ca7ce2912c 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py @@ -159,9 +159,6 @@ def build_engines( framework_model_dir: str, onnx_dir: str, onnx_opset_version: int = 17, - opt_image_height: int = 512, - opt_image_width: int = 512, - opt_batch_size: int = 1, force_engine_rebuild: bool = False, device_id: int = 0, save_fp32_intermediate_model=False, @@ -209,7 +206,8 @@ def build_engines( with torch.inference_mode(): # For CUDA EP, export FP32 onnx since some graph fusion only supports fp32 graph pattern. - inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width) + # Export model with sample of batch size 1, image size 512 x 512 + inputs = model_obj.get_sample_input(1, 512, 512) torch.onnx.export( model, diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py index 51b0966194189..e675c9a7b3bf5 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py @@ -109,7 +109,7 @@ def __init__( self.tokenizer = None self.tokenizer2 = None - self.generator = None + self.generator = torch.Generator(device="cuda") self.actual_steps = None self.current_scheduler = None @@ -181,8 +181,13 @@ def load_resources(self, image_height, image_width, batch_size): self.backend.load_resources(image_height, image_width, batch_size) def set_random_seed(self, seed): - # Initialize noise generator. Usually, it is done before a batch of inference. - self.generator = torch.Generator(device="cuda").manual_seed(seed) if isinstance(seed, int) else None + if isinstance(seed, int): + self.generator.manual_seed(seed) + else: + self.generator.seed() + + def get_current_seed(self): + return self.generator.initial_seed() def teardown(self): for e in self.events.values(): @@ -452,8 +457,18 @@ def save_images(self, images, pipeline, prompt): images = self.to_pil_image(images) random_session_id = str(random.randint(1000, 9999)) for i, image in enumerate(images): + seed = str(self.get_current_seed()) image_path = os.path.join( - self.output_dir, image_name_prefix + str(i + 1) + "-" + random_session_id + ".png" + self.output_dir, image_name_prefix + str(i + 1) + "-" + random_session_id + "-" + seed + ".png" ) print(f"Saving image {i+1} / {len(images)} to: {image_path}") - image.save(image_path) + + from PIL import PngImagePlugin + + metadata = PngImagePlugin.PngInfo() + metadata.add_text("prompt", prompt[i]) + metadata.add_text("batch_size", str(len(images))) + metadata.add_text("denoising_steps", str(self.denoising_steps)) + metadata.add_text("actual_steps", str(self.actual_steps)) + metadata.add_text("seed", seed) + image.save(image_path, "PNG", pnginfo=metadata) From b0699d901c8e8e8dcfe4fd3b72d54ef7bef5bf75 Mon Sep 17 00:00:00 2001 From: Vincent Wang Date: Wed, 15 Nov 2023 13:46:38 +0800 Subject: [PATCH 04/38] Support Graph Input and Initializer for GatherToSplit Fusion (#18412) Support graph input and initializer for GatherToSplit fusion. Previously the fusion requires Gather nodes consume some other node which cannot be graph input or initializer. This helps some model training with such case so that we will not have GatherGrad in the final graph. GatherGrad is super inefficient in kernel implementation. --- onnxruntime/core/optimizer/gather_fusion.cc | 65 +++-- .../test/optimizer/graph_transform_test.cc | 223 ++++++++++++++++-- 2 files changed, 243 insertions(+), 45 deletions(-) diff --git a/onnxruntime/core/optimizer/gather_fusion.cc b/onnxruntime/core/optimizer/gather_fusion.cc index b994028cbca13..4903bc1d6b961 100644 --- a/onnxruntime/core/optimizer/gather_fusion.cc +++ b/onnxruntime/core/optimizer/gather_fusion.cc @@ -9,7 +9,8 @@ namespace onnxruntime { -bool GatherToSplitFusion::IsSupportedGather(const Graph& graph, const Node& node, int64_t& index, int64_t& axis, int64_t& indices_n_dims) const { +bool GatherToSplitFusion::IsSupportedGather(const Graph& graph, const Node& node, int64_t& index, int64_t& axis, + int64_t& indices_n_dims) const { if (!graph_utils::IsSupportedOptypeVersionAndDomain(node, "Gather", {1, 11, 13}) || !graph_utils::IsSupportedProvider(node, GetCompatibleExecutionProviders())) { return false; @@ -53,6 +54,22 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le GraphViewer graph_viewer(graph); const auto& node_topology_list = graph_viewer.GetNodesInTopologicalOrder(); + InlinedVector node_args; + for (auto node_arg : graph.GetInputs()) { + if (node_arg && graph.GetConsumerNodes(node_arg->Name()).size() > 1) { + node_args.push_back(node_arg); + } + } + + for (auto entry : graph.GetAllInitializedTensors()) { + if (graph.GetConsumerNodes(entry.first).size() > 1) { + auto node_arg = graph.GetNodeArg(entry.first); + if (node_arg) { + node_args.push_back(node_arg); + } + } + } + for (auto node_index : node_topology_list) { auto* p_node = graph.GetNode(node_index); if (p_node == nullptr) continue; // we removed the node as part of an earlier fusion @@ -73,7 +90,11 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le size_t output_count = node.GetOutputEdgesCount(); if (output_count <= 1) continue; - auto shape = node.MutableOutputDefs()[0]->Shape(); + node_args.push_back(node.OutputDefs()[0]); + } + + for (const NodeArg* node_arg : node_args) { + auto shape = node_arg->Shape(); if (!shape) continue; int64_t rank = static_cast(shape->dim_size()); @@ -81,11 +102,14 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le bool first_edge = true; int64_t split_axis = 0; int64_t indices_n_dims = -1; - InlinedVector gather_outputs(output_count, nullptr); + auto consumers = graph.GetConsumerNodes(node_arg->Name()); + size_t consumer_count = consumers.size(); + InlinedVector gather_outputs(consumer_count, nullptr); InlinedVector> nodes_to_fuse; - for (auto it = node.OutputNodesBegin(); it != node.OutputNodesEnd(); ++it) { + for (auto consumer : consumers) { int64_t index, axis, dims; - if (!IsSupportedGather(graph, *it, index, axis, dims)) { + if (!consumer || consumer->InputDefs()[0] != node_arg || + !IsSupportedGather(graph, *consumer, index, axis, dims)) { can_fuse = false; break; } @@ -99,7 +123,7 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le if (axis < 0) axis += rank; if (first_edge) { auto dim = shape->dim(static_cast(axis)); - if (!utils::HasDimValue(dim) || dim.dim_value() != static_cast(output_count)) { + if (!utils::HasDimValue(dim) || dim.dim_value() != static_cast(consumer_count)) { can_fuse = false; break; } @@ -109,12 +133,12 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le can_fuse = false; break; } - if (index < 0) index += static_cast(output_count); - if (index < 0 || index >= static_cast(output_count) || gather_outputs[static_cast(index)]) { + if (index < 0) index += static_cast(consumer_count); + if (index < 0 || index >= static_cast(consumer_count) || gather_outputs[static_cast(index)]) { can_fuse = false; break; } - Node& gather_node = *graph.GetNode(it->Index()); + Node& gather_node = *graph.GetNode(consumer->Index()); nodes_to_fuse.emplace_back(gather_node); gather_outputs[static_cast(index)] = gather_node.MutableOutputDefs()[0]; } @@ -122,8 +146,8 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le if (!can_fuse) continue; ONNX_NAMESPACE::TypeProto split_output_type; - const ONNX_NAMESPACE::TensorProto_DataType element_type = static_cast( - node.MutableOutputDefs()[0]->TypeAsProto()->tensor_type().elem_type()); + const ONNX_NAMESPACE::TensorProto_DataType element_type = + static_cast(node_arg->TypeAsProto()->tensor_type().elem_type()); split_output_type.mutable_tensor_type()->set_elem_type(element_type); for (int64_t i = 0; i < rank; ++i) { if (i == split_axis) { @@ -136,16 +160,17 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le InlinedVector split_outputs; bool add_squeeze_node = indices_n_dims == 0; if (add_squeeze_node) { - for (size_t i = 0; i < output_count; ++i) { + for (size_t i = 0; i < consumer_count; ++i) { split_outputs.emplace_back( &graph.GetOrCreateNodeArg(graph.GenerateNodeArgName("split" + std::to_string(i)), &split_output_type)); } } - Node& split_node = graph.AddNode(graph.GenerateNodeName("Split"), "Split", "Split for Fused Gather nodes", - {node.MutableOutputDefs()[0]}, add_squeeze_node ? split_outputs : gather_outputs); + Node& split_node = + graph.AddNode(graph.GenerateNodeName("Split"), "Split", "Split for Fused Gather nodes", + {graph.GetNodeArg(node_arg->Name())}, add_squeeze_node ? split_outputs : gather_outputs); split_node.AddAttribute("axis", split_axis); - split_node.SetExecutionProviderType(node.GetExecutionProviderType()); + split_node.SetExecutionProviderType(nodes_to_fuse[0].get().GetExecutionProviderType()); // Squeeze-11, Squeee-13, Split-13, Split-18 have different schemas. int onnx_opset_version = -1; @@ -155,16 +180,16 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le if (onnx_opset_version < 13) { if (add_squeeze_node) { - for (size_t i = 0; i < output_count; ++i) { + for (size_t i = 0; i < consumer_count; ++i) { Node& squeeze_node = graph.AddNode(graph.GenerateNodeName("Squeeze" + std::to_string(i)), "Squeeze", "Squeeze for Fused Gather nodes", {split_outputs[i]}, {gather_outputs[i]}); squeeze_node.AddAttribute("axes", std::vector{split_axis}); - squeeze_node.SetExecutionProviderType(node.GetExecutionProviderType()); + squeeze_node.SetExecutionProviderType(nodes_to_fuse[0].get().GetExecutionProviderType()); } } } else { if (onnx_opset_version >= 18) { - split_node.AddAttribute("num_outputs", static_cast(output_count)); + split_node.AddAttribute("num_outputs", static_cast(consumer_count)); } if (add_squeeze_node) { @@ -176,11 +201,11 @@ Status GatherToSplitFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le axes_initializer_proto.set_raw_data(axes_value.data(), axes_value.size() * sizeof(int64_t)); NodeArg* axes_arg = &graph_utils::AddInitializer(graph, axes_initializer_proto); - for (size_t i = 0; i < output_count; ++i) { + for (size_t i = 0; i < consumer_count; ++i) { Node& squeeze_node = graph.AddNode(graph.GenerateNodeName("Squeeze" + std::to_string(i)), "Squeeze", "Squeeze for Fused Gather nodes", {split_outputs[i], axes_arg}, {gather_outputs[i]}); - squeeze_node.SetExecutionProviderType(node.GetExecutionProviderType()); + squeeze_node.SetExecutionProviderType(nodes_to_fuse[0].get().GetExecutionProviderType()); } } } diff --git a/onnxruntime/test/optimizer/graph_transform_test.cc b/onnxruntime/test/optimizer/graph_transform_test.cc index b82f3345dfcd1..17b26ed7ca4ca 100755 --- a/onnxruntime/test/optimizer/graph_transform_test.cc +++ b/onnxruntime/test/optimizer/graph_transform_test.cc @@ -6910,7 +6910,10 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion) { builder.AddNode("Transpose", {gather_out_3}, {transpose_out_3}).AddAttribute("perm", std::vector{0, 2, 1}); }; - auto pre_graph_checker = [&](Graph& graph) { TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); return Status::OK(); }; + auto pre_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); + return Status::OK(); + }; // OpSet-12 { @@ -6933,8 +6936,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } // OpSet-14 @@ -6962,8 +6965,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } // OpSet-18 @@ -6991,8 +6994,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 18, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 18, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } } @@ -7023,7 +7026,10 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion_NoSqueeze) { builder.AddNode("Transpose", {gather_out_3}, {transpose_out_3}).AddAttribute("perm", std::vector{0, 2, 1}); }; - auto pre_graph_checker = [&](Graph& graph) { TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); return Status::OK(); }; + auto pre_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); + return Status::OK(); + }; // OpSet-12 { @@ -7042,8 +7048,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion_NoSqueeze) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } // OpSet-14 @@ -7063,8 +7069,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion_NoSqueeze) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } // OpSet-18 @@ -7084,13 +7090,180 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion_NoSqueeze) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 18, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 18, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); + } +} + +TEST_F(GraphTransformationTests, GatherToSplitFusion_Consume_Input) { + auto build_test_case = [&](ModelTestBuilder& builder) { + auto* data_arg = builder.MakeInput({{2, 3, 3, 3}}); + auto* gather_index_1 = builder.MakeInitializer({}, {static_cast(0)}); + auto* gather_index_2 = builder.MakeInitializer({}, {static_cast(1)}); + auto* gather_index_3 = builder.MakeInitializer({}, {static_cast(2)}); + auto* gather_out_1 = builder.MakeIntermediate(); + auto* gather_out_2 = builder.MakeIntermediate(); + auto* gather_out_3 = builder.MakeIntermediate(); + auto* transpose_out_1 = builder.MakeOutput(); + auto* transpose_out_2 = builder.MakeOutput(); + auto* transpose_out_3 = builder.MakeOutput(); + + builder.AddNode("Gather", {data_arg, gather_index_1}, {gather_out_1}).AddAttribute("axis", static_cast(2)); + builder.AddNode("Gather", {data_arg, gather_index_2}, {gather_out_2}) + .AddAttribute("axis", static_cast(-2)); + builder.AddNode("Gather", {data_arg, gather_index_3}, {gather_out_3}).AddAttribute("axis", static_cast(2)); + builder.AddNode("Transpose", {gather_out_1}, {transpose_out_1}).AddAttribute("perm", std::vector{0, 2, 1}); + builder.AddNode("Transpose", {gather_out_2}, {transpose_out_2}).AddAttribute("perm", std::vector{0, 2, 1}); + builder.AddNode("Transpose", {gather_out_3}, {transpose_out_3}).AddAttribute("perm", std::vector{0, 2, 1}); + }; + + auto pre_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); + return Status::OK(); + }; + + // OpSet-12 + { + auto post_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 0); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Split"] == 1); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Squeeze"] == 3); + for (auto& node : graph.Nodes()) { + if (node.OpType() == "Split") { + auto& attrs = node.GetAttributes(); + TEST_RETURN_IF_NOT(attrs.find("axis") != attrs.end()); + TEST_RETURN_IF_NOT(2 == static_cast(attrs.at("axis").i())); + } else if (node.OpType() == "Squeeze") { + auto& attrs = node.GetAttributes(); + TEST_RETURN_IF_NOT(attrs.find("axes") != attrs.end()); + TEST_RETURN_IF_NOT(2 == static_cast(attrs.at("axes").ints().at(0))); + } + } + return Status::OK(); + }; + + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); + } + + // OpSet-14 + { + auto post_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 0); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Split"] == 1); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Squeeze"] == 3); + for (auto& node : graph.Nodes()) { + if (node.OpType() == "Split") { + auto& attrs = node.GetAttributes(); + TEST_RETURN_IF_NOT(attrs.find("axis") != attrs.end()); + TEST_RETURN_IF_NOT(2 == static_cast(attrs.at("axis").i())); + } else if (node.OpType() == "Squeeze") { + const NodeArg& input_arg = *(node.InputDefs()[1]); + const ONNX_NAMESPACE::TensorProto* tensor_proto = + graph_utils::GetConstantInitializer(graph, input_arg.Name()); + TEST_RETURN_IF_NOT(tensor_proto != nullptr); + Initializer init_const{*tensor_proto, graph.ModelPath()}; + TEST_RETURN_IF_NOT(tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT64); + TEST_RETURN_IF_NOT(2 == static_cast(*(init_const.data()))); + } + } + return Status::OK(); + }; + + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); + } + + // OpSet-18 + { + auto post_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 0); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Split"] == 1); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Squeeze"] == 3); + for (auto& node : graph.Nodes()) { + if (node.OpType() == "Split") { + auto& attrs = node.GetAttributes(); + TEST_RETURN_IF_NOT(attrs.find("axis") != attrs.end()); + TEST_RETURN_IF_NOT(2 == static_cast(attrs.at("axis").i())); + } else if (node.OpType() == "Squeeze") { + const NodeArg& input_arg = *(node.InputDefs()[1]); + const ONNX_NAMESPACE::TensorProto* tensor_proto = + graph_utils::GetConstantInitializer(graph, input_arg.Name()); + TEST_RETURN_IF_NOT(tensor_proto != nullptr); + Initializer init_const{*tensor_proto, graph.ModelPath()}; + TEST_RETURN_IF_NOT(tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT64); + TEST_RETURN_IF_NOT(2 == static_cast(*(init_const.data()))); + } + } + return Status::OK(); + }; + + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 18, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } } +TEST_F(GraphTransformationTests, GatherToSplitFusion_Consume_Initializer) { + auto build_test_case = [&](ModelTestBuilder& builder) { + auto* data_arg = builder.MakeInitializer({2, 3, 3, 3}, std::vector(54)); + auto* gather_index_1 = builder.MakeInitializer({}, {static_cast(0)}); + auto* gather_index_2 = builder.MakeInitializer({}, {static_cast(1)}); + auto* gather_index_3 = builder.MakeInitializer({}, {static_cast(2)}); + auto* gather_out_1 = builder.MakeIntermediate(); + auto* gather_out_2 = builder.MakeIntermediate(); + auto* gather_out_3 = builder.MakeIntermediate(); + auto* transpose_out_1 = builder.MakeOutput(); + auto* transpose_out_2 = builder.MakeOutput(); + auto* transpose_out_3 = builder.MakeOutput(); + + builder.AddNode("Gather", {data_arg, gather_index_1}, {gather_out_1}).AddAttribute("axis", static_cast(2)); + builder.AddNode("Gather", {data_arg, gather_index_2}, {gather_out_2}) + .AddAttribute("axis", static_cast(-2)); + builder.AddNode("Gather", {data_arg, gather_index_3}, {gather_out_3}).AddAttribute("axis", static_cast(2)); + builder.AddNode("Transpose", {gather_out_1}, {transpose_out_1}).AddAttribute("perm", std::vector{0, 2, 1}); + builder.AddNode("Transpose", {gather_out_2}, {transpose_out_2}).AddAttribute("perm", std::vector{0, 2, 1}); + builder.AddNode("Transpose", {gather_out_3}, {transpose_out_3}).AddAttribute("perm", std::vector{0, 2, 1}); + }; + + auto pre_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); + return Status::OK(); + }; + + auto post_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 0); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Split"] == 1); + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Squeeze"] == 3); + for (auto& node : graph.Nodes()) { + if (node.OpType() == "Split") { + auto& attrs = node.GetAttributes(); + TEST_RETURN_IF_NOT(attrs.find("axis") != attrs.end()); + TEST_RETURN_IF_NOT(2 == static_cast(attrs.at("axis").i())); + } else if (node.OpType() == "Squeeze") { + const NodeArg& input_arg = *(node.InputDefs()[1]); + const ONNX_NAMESPACE::TensorProto* tensor_proto = graph_utils::GetConstantInitializer(graph, input_arg.Name()); + TEST_RETURN_IF_NOT(tensor_proto != nullptr); + Initializer init_const{*tensor_proto, graph.ModelPath()}; + TEST_RETURN_IF_NOT(tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT64); + TEST_RETURN_IF_NOT(2 == static_cast(*(init_const.data()))); + } + } + return Status::OK(); + }; + + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), TransformerLevel::Level1, + 1, pre_graph_checker, post_graph_checker)); +} + TEST_F(GraphTransformationTests, GatherToSplitFusion_Invalid) { - auto pre_graph_checker = [&](Graph& graph) { TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); return Status::OK(); }; + auto pre_graph_checker = [&](Graph& graph) { + TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); + return Status::OK(); + }; auto post_graph_checker = [&](Graph& graph) { TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Gather"] == 3); TEST_RETURN_IF_NOT(CountOpsInGraph(graph)["Split"] == 0); @@ -7130,8 +7303,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion_Invalid) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } // Invalid Gather indices. @@ -7166,8 +7339,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion_Invalid) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } // Invalid Gather axis. @@ -7202,8 +7375,8 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion_Invalid) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } } @@ -7250,8 +7423,8 @@ TEST_F(GraphTransformationTests, GatherToSliceFusion) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 12, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } // OpSet-14, Tind is int64. @@ -7289,8 +7462,8 @@ TEST_F(GraphTransformationTests, GatherToSliceFusion) { }; std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), TransformerLevel::Level1, 1, - pre_graph_checker, post_graph_checker)); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, 14, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, pre_graph_checker, post_graph_checker)); } } From ed89ca573a83e5adcf86a8e2cae912b6eeb9a335 Mon Sep 17 00:00:00 2001 From: Vincent Wang Date: Wed, 15 Nov 2023 17:16:38 +0800 Subject: [PATCH 05/38] [ORTModule] Support User Config for Triton Codegen, Bugfix for Reduce-to-scalar (#18448) User can provide Triton codegen config JSON through env variable. Also fix some bugs related to reduction to scalar case. --- .../python/training/ort_triton/_codegen.py | 14 +++++--- .../python/training/ort_triton/_ir.py | 6 ++-- .../training/ort_triton/triton_op_executor.py | 8 ++++- .../orttraining_test_ortmodule_triton.py | 33 +++++++++++++++++++ 4 files changed, 53 insertions(+), 8 deletions(-) diff --git a/orttraining/orttraining/python/training/ort_triton/_codegen.py b/orttraining/orttraining/python/training/ort_triton/_codegen.py index cac9b6fc4a2b6..462491365c1fa 100644 --- a/orttraining/orttraining/python/training/ort_triton/_codegen.py +++ b/orttraining/orttraining/python/training/ort_triton/_codegen.py @@ -52,7 +52,8 @@ def codegen(self, node: IRNode, context: CodegenContext, code_buffer: CodeBuffer def _get_elementwise_offset_mask(self, offset_calc: OffsetCalculator, arg_name: str) -> Tuple[str, str]: if offset_calc.is_x_reduced(arg_name): - return "", "" + # Scalar. + return "tl.full([1], 0, tl.int32)", "" if offset_calc.is_same_x_shape(arg_name): return "xindex", "xmask" if offset_calc.requires_x_mask else "" strides = offset_calc.get_input_strides(arg_name) @@ -88,13 +89,16 @@ def _get_reduce_offset_mask(self, offset_calc: OffsetCalculator, arg_name: str) if offset_calc.requires_r_mask: mask_strs.append("rmask") + # If both is_x_reduced and is_r_reduced are True, it's scalar. + if len(offset_strs) == 0: + offset_strs.append("tl.full([1, 1], 0, tl.int32)") return " + ".join(offset_strs), " & ".join(mask_strs) - def _get_offset_mask(self, node: OffsetCalculator, arg_name: str) -> Tuple[str, str]: + def _get_offset_mask(self, offset_calc: OffsetCalculator, arg_name: str) -> Tuple[str, str]: return ( - self._get_reduce_offset_mask(node, arg_name) - if node.is_reduction - else self._get_elementwise_offset_mask(node, arg_name) + self._get_reduce_offset_mask(offset_calc, arg_name) + if offset_calc.is_reduction + else self._get_elementwise_offset_mask(offset_calc, arg_name) ) def IONode(self, node: IONode, context: CodegenContext, code_buffer: CodeBuffer, indent: int): # noqa: N802 diff --git a/orttraining/orttraining/python/training/ort_triton/_ir.py b/orttraining/orttraining/python/training/ort_triton/_ir.py index 628ea822ff55b..50121cbf49804 100644 --- a/orttraining/orttraining/python/training/ort_triton/_ir.py +++ b/orttraining/orttraining/python/training/ort_triton/_ir.py @@ -131,7 +131,7 @@ def register_tensor_arg(self, tensor_arg: TensorArg): input_shape = tensor_arg.shape if tensor_arg.name in self.reduced_args: assert self.is_reduction - reduced_rank = len(input_shape) - len(self.reduce_axes) + reduced_rank = len(self.target_shape) - len(self.reduce_axes) if len(input_shape) < reduced_rank: input_shape = [sympy.Integer(1)] * (reduced_rank - len(input_shape)) + input_shape input_shape = ( @@ -143,7 +143,9 @@ def register_tensor_arg(self, tensor_arg: TensorArg): input_shape = [sympy.Integer(1)] * (len(self.target_shape) - len(input_shape)) + input_shape running_stride = sympy.Integer(1) for i in range(len(self.target_shape) - 1, -1, -1): - if self.target_shape[i] == input_shape[i]: + if self.target_shape[i] == input_shape[i] and not ( + tensor_arg.name in self.reduced_args and i in self.reduce_axes + ): strides.insert(0, running_stride) running_stride = running_stride * input_shape[i] else: diff --git a/orttraining/orttraining/python/training/ort_triton/triton_op_executor.py b/orttraining/orttraining/python/training/ort_triton/triton_op_executor.py index 8a642a1f7f26c..1fe61750e651e 100644 --- a/orttraining/orttraining/python/training/ort_triton/triton_op_executor.py +++ b/orttraining/orttraining/python/training/ort_triton/triton_op_executor.py @@ -87,7 +87,8 @@ def _gen_module(onnx_key: int, onnx_str: bytes, shapes: List[List[Union[int, str def get_config() -> str: """ Get the supported ops and other configs in JSON format to control the Triton fusion on backend side. - All supported ops are from _op_config.py. The Triton fusion will try to fuse subgraphs with connected supported ops. + All supported ops are from user config specified by env ORTMODULE_TRITON_CONFIG_FILE or from _op_config.py. + The Triton fusion will try to fuse subgraphs with connected supported ops. The initializer value can be "none", "scalar", and "all". "none": no initializer will be added to subgraphs. "scalar": only related scalar initializers will be added to subgraphs. @@ -95,6 +96,11 @@ def get_config() -> str: The min_nodes is used to control the minimum number of non-no-op nodes in a subgraph. """ + config_file = os.getenv("ORTMODULE_TRITON_CONFIG_FILE", "") + if config_file and os.path.exists(config_file): + with open(config_file, encoding="UTF-8") as f: + return f.read() + config = {"ops": get_supported_ops(), "initializer": "scalar", "min_nodes": 2} return json.dumps(config) diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_triton.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_triton.py index e66582bda9ed1..0c381d70ca4c1 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_triton.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_triton.py @@ -543,6 +543,8 @@ def _gen_inputs(dtype): ([123, 4, 5, 6], [2], False), ([16, 8, 16, 8], [1, 3], True), ([16, 8, 16, 8], [0, 2], False), + ([16, 8, 16, 8], [0, 1, 2, 3], True), + ([16, 1, 16, 8], [0, 1, 2, 3], False), ], ) def test_reduce_op(op_type, onnx_dtype, input_shape_and_reduce_info): @@ -871,3 +873,34 @@ def _gen_inputs(dtype): return [torch.rand(m_n_k[0], m_n_k[2], dtype=dtype, device=DEVICE, requires_grad=True)] _run_tunable_op_test(NeuralNetGemm, dtype, _gen_inputs, "GemmTunableOp", 2) + + +def test_user_config(): + n, d, h, w = 8, 768, 12, 64 + dtype = torch.float32 + + class NeuralNetElementwise(torch.nn.Module): + def forward(self, input1, input2, input3, input4): + return input1 + input2 - input3 * input4 + + def _gen_inputs(dtype): + return [ + torch.rand(n, d, h, w, dtype=dtype, device=DEVICE, requires_grad=True), + torch.rand(w, dtype=dtype, device=DEVICE, requires_grad=True), + torch.rand(d, 1, 1, dtype=dtype, device=DEVICE, requires_grad=True), + torch.rand(n, 1, h, w, dtype=dtype, device=DEVICE, requires_grad=True), + ] + + user_config = ( + '{"ops": {"Add": {"versions": [13, 14]}, "Mul": {"versions": [13, 14]}}, ' + '"initializer": "scalar", "min_nodes": 2}' + ) + with open("user_config.json", "w", encoding="UTF-8") as f: + f.write(user_config) + os.environ["ORTMODULE_TRITON_CONFIG_FILE"] = "./user_config.json" + + # Mul is not supported, the graph is splited to 2 subgraphs with single Op, which will not be fused to TritonOp. + _run_module_test(NeuralNetElementwise, dtype, _gen_inputs, 0) + + del os.environ["ORTMODULE_TRITON_CONFIG_FILE"] + os.remove(os.path.join(os.getcwd(), "user_config.json")) From 586f06f5a1448e80be19831bb57a23e836af0a33 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:16:29 -0800 Subject: [PATCH 06/38] [js/web] set noUnusedParameters to true and fix a few bugs (#18404) ### Description - set tsconfig "noUnusedParameters" to `true` and fix a few bugs discovered by typescript. how unused parameter is fixed: - for most code (webgl), add underscore as prefix, which is the standard ignore pattern for typescript check. - remove unused parameter from function and modify corresponding function calls (jsep) - fix a bug in ArgMinMax: this 2 operators do not have more than one input(s) so the `createArgMinMaxAttributesFromInputs()` is removed. - add proxy main.ts into typescript check and fix a bug in parameter passing - fixed `run()` function call and add typecheck fix (hack) --- js/.eslintrc.js | 6 +- .../backends/webgl/glsl-coordinate-lib.ts | 8 +-- .../backends/webgl/ops/concat-packed.ts | 2 +- .../lib/onnxjs/backends/webgl/ops/concat.ts | 4 +- .../lib/onnxjs/backends/webgl/ops/gather.ts | 2 +- .../lib/onnxjs/backends/webgl/ops/im2col.ts | 2 +- .../onnxjs/backends/webgl/ops/image-scaler.ts | 2 +- .../onnxjs/backends/webgl/ops/matmul-pack.ts | 4 +- .../lib/onnxjs/backends/webgl/ops/matmul.ts | 2 +- .../lib/onnxjs/backends/webgl/ops/reduce.ts | 2 +- js/web/lib/onnxjs/backends/webgl/ops/shape.ts | 4 +- js/web/lib/onnxjs/backends/webgl/ops/slice.ts | 4 +- js/web/lib/onnxjs/backends/webgl/ops/split.ts | 4 +- js/web/lib/onnxjs/backends/webgl/ops/sum.ts | 6 +- js/web/lib/onnxjs/backends/webgl/ops/tile.ts | 4 +- .../onnxjs/backends/webgl/ops/transpose.ts | 2 +- .../backends/webgl/texture-data-encoder.ts | 4 +- .../backends/webgl/texture-layout-strategy.ts | 4 +- .../onnxjs/backends/webgl/texture-manager.ts | 2 +- js/web/lib/onnxjs/execution-plan.ts | 2 +- js/web/lib/onnxjs/instrument.ts | 2 +- js/web/lib/onnxjs/util.ts | 2 +- js/web/lib/wasm/jsep/webgpu/ops/argminmax.ts | 17 ++---- js/web/lib/wasm/jsep/webgpu/ops/einsum.ts | 2 +- js/web/lib/wasm/jsep/webgpu/ops/pad.ts | 28 ++++----- .../lib/wasm/jsep/webgpu/ops/reduce-shared.ts | 2 +- js/web/lib/wasm/jsep/webgpu/ops/resize.ts | 58 +++++++++---------- js/web/lib/wasm/proxy-worker/main.ts | 57 ++++++++++++++---- js/web/lib/wasm/proxy-worker/tsconfig.json | 8 +++ js/web/package.json | 2 +- js/web/script/tsconfig.json | 3 +- .../unittests/backends/webgl/test-conv-new.ts | 2 +- .../backends/webgl/test-pack-unpack.ts | 2 +- .../unittests/backends/webgl/test-utils.ts | 4 +- js/web/tsconfig.json | 1 - 35 files changed, 146 insertions(+), 114 deletions(-) create mode 100644 js/web/lib/wasm/proxy-worker/tsconfig.json diff --git a/js/.eslintrc.js b/js/.eslintrc.js index 3a20429cfe8d2..fd30cb96a5bd0 100644 --- a/js/.eslintrc.js +++ b/js/.eslintrc.js @@ -146,9 +146,13 @@ module.exports = { }, { files: ['web/lib/**/*.ts'], excludedFiles: 'web/lib/wasm/proxy-worker/**/*', - parserOptions: { 'project': 'web/tsconfig.json' },rules: { + parserOptions: { 'project': 'web/tsconfig.json' }, + rules: { 'no-underscore-dangle': 'off', } + }, { + files: ['web/lib/wasm/proxy-worker/**/*.ts'], + parserOptions: { 'project': 'web/lib/wasm/proxy-worker/tsconfig.json' }, }, { files: ['web/lib/onnxjs/**/*.ts'], rules: { // TODO: those rules are useful. should turn on them in future (webgl refactor) diff --git a/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts b/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts index dd3f1b30dfb46..1f2b27c7bdea8 100644 --- a/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts +++ b/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts @@ -186,7 +186,7 @@ export class CoordsGlslLib extends GlslLib { /** * 1D packed output coordinates. */ - protected getOutputPacked1DCoords(shape: [number], texShape: [number, number]): GlslLibRoutine { + protected getOutputPacked1DCoords(_shape: [number], texShape: [number, number]): GlslLibRoutine { const packedTexShape = texShape; let source = ''; if (packedTexShape[0] === 1) { @@ -331,7 +331,7 @@ export class CoordsGlslLib extends GlslLib { /** * Unpacked 1D output coordinates. */ - protected getOutputUnpacked1DCoords(shape: [number], texShape: [number, number]): GlslLibRoutine { + protected getOutputUnpacked1DCoords(_shape: [number], texShape: [number, number]): GlslLibRoutine { const source = ` int getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * @@ -641,7 +641,7 @@ export class CoordsGlslLib extends GlslLib { if (outRank < 2 && inRank > 0) { unpackedCoordsSnippet = 'coords'; } else { - unpackedCoordsSnippet = inShape.map((s, i) => `coords.${fields[i + rankDiff]}`).join(', '); + unpackedCoordsSnippet = inShape.map((_s, i) => `coords.${fields[i + rankDiff]}`).join(', '); } let output = 'return outputValue;'; @@ -734,7 +734,7 @@ export class CoordsGlslLib extends GlslLib { if (outRank < 2 && inRank > 0) { unpackedCoordsSnippet = 'coords'; } else { - unpackedCoordsSnippet = inputLayout.unpackedShape.map((s, i) => `coords.${fields[i + rankDiff]}`).join(', '); + unpackedCoordsSnippet = inputLayout.unpackedShape.map((_s, i) => `coords.${fields[i + rankDiff]}`).join(', '); } const source = ` float ${funcName}() { diff --git a/js/web/lib/onnxjs/backends/webgl/ops/concat-packed.ts b/js/web/lib/onnxjs/backends/webgl/ops/concat-packed.ts index 709f883ae12c9..d0e589a428825 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/concat-packed.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/concat-packed.ts @@ -12,7 +12,7 @@ import {getChannels, unpackFromChannel} from './packing-utils'; const createPackedConcatProgramMetadata = (inputCount: number, cacheHint: string) => ({ name: 'Concat (packed)', - inputNames: Array.from({length: inputCount}, (v, i) => `X${i}`), + inputNames: Array.from({length: inputCount}, (_v, i) => `X${i}`), inputTypes: Array(inputCount).fill(TextureType.packed), cacheHint }); diff --git a/js/web/lib/onnxjs/backends/webgl/ops/concat.ts b/js/web/lib/onnxjs/backends/webgl/ops/concat.ts index c2b18ef86f814..f85f4032feae1 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/concat.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/concat.ts @@ -30,13 +30,13 @@ export const concat: OperatorImplementation = const createUnpackedConcatProgramMetadata = (inputCount: number, cacheHint: string) => ({ name: 'Concat', - inputNames: Array.from({length: inputCount}, (v, i) => `X${i}`), + inputNames: Array.from({length: inputCount}, (_v, i) => `X${i}`), inputTypes: Array(inputCount).fill(TextureType.unpacked), cacheHint }); const createUnpackedConcatProgramInfo = - (handler: WebGLInferenceHandler, metadata: ProgramMetadata, inputs: Tensor[], axis: number): ProgramInfo => { + (_handler: WebGLInferenceHandler, metadata: ProgramMetadata, inputs: Tensor[], axis: number): ProgramInfo => { const inputShape = inputs[0].dims.slice(); if (axis >= inputShape.length || axis < (-1 * inputShape.length)) { throw new Error('axis specified for concat doesn\'t match input dimensionality'); diff --git a/js/web/lib/onnxjs/backends/webgl/ops/gather.ts b/js/web/lib/onnxjs/backends/webgl/ops/gather.ts index 54b6ccd1a3685..bb44a20d75f34 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/gather.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/gather.ts @@ -30,7 +30,7 @@ const gatherProgramMetadata = { }; const createGatherProgramInfo = - (handler: WebGLInferenceHandler, metadata: ProgramMetadata, inputs: Tensor[], axis: number): ProgramInfo => { + (_handler: WebGLInferenceHandler, metadata: ProgramMetadata, inputs: Tensor[], axis: number): ProgramInfo => { const inputShape = inputs[0].dims.slice(); const indexDataShape = inputs[1].dims.slice(); const outputShape = new Array(inputShape.length + indexDataShape.length - 1); diff --git a/js/web/lib/onnxjs/backends/webgl/ops/im2col.ts b/js/web/lib/onnxjs/backends/webgl/ops/im2col.ts index f74c35b612665..a1da13ec48d70 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/im2col.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/im2col.ts @@ -15,7 +15,7 @@ const createIm2ColProgramMetadata = (cacheHint: string) => ({ }); const createIm2ColProgramInfo = - (inferenceHandler: WebGLInferenceHandler, metadata: ProgramMetadata, x: Tensor, w: Tensor, + (_inferenceHandler: WebGLInferenceHandler, metadata: ProgramMetadata, x: Tensor, w: Tensor, outputShape: readonly number[], attributes: ConvAttributes): ProgramInfo => { const xshape = x.dims; const wshape = w.dims; diff --git a/js/web/lib/onnxjs/backends/webgl/ops/image-scaler.ts b/js/web/lib/onnxjs/backends/webgl/ops/image-scaler.ts index 1cd5288251433..efc79f686c960 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/image-scaler.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/image-scaler.ts @@ -35,7 +35,7 @@ const imageScalerProgramMetadata = { }; const createImageScalerProgramInfo = - (handler: WebGLInferenceHandler, metadata: ProgramMetadata, inputs: Tensor[], attributes: ImageScalerAttributes): + (_handler: WebGLInferenceHandler, metadata: ProgramMetadata, inputs: Tensor[], attributes: ImageScalerAttributes): ProgramInfo => { const outputShape = inputs[0].dims.slice(); const rank = outputShape.length; diff --git a/js/web/lib/onnxjs/backends/webgl/ops/matmul-pack.ts b/js/web/lib/onnxjs/backends/webgl/ops/matmul-pack.ts index fb3c2357ae8fe..0be6d1ba8bcd2 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/matmul-pack.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/matmul-pack.ts @@ -107,10 +107,10 @@ function getBcastSamplerForMatmul( const rankADiff = outRank - inARank; const rankBDiff = outRank - inBRank; - unpackedACoordsSnippet = inAShape.map((s, i) => `coords.${allGlChannels[i + rankADiff]}`); + unpackedACoordsSnippet = inAShape.map((_s, i) => `coords.${allGlChannels[i + rankADiff]}`); unpackedACoordsSnippet[inARank - 1] = 'i*2'; unpackedACoordsSnippet.join(', '); - unpackedBCoordsSnippet = inBShape.map((s, i) => `coords.${allGlChannels[i + rankBDiff]}`); + unpackedBCoordsSnippet = inBShape.map((_s, i) => `coords.${allGlChannels[i + rankBDiff]}`); unpackedBCoordsSnippet[inBRank - 2] = 'i*2'; unpackedBCoordsSnippet.join(', '); diff --git a/js/web/lib/onnxjs/backends/webgl/ops/matmul.ts b/js/web/lib/onnxjs/backends/webgl/ops/matmul.ts index 704128fb4858e..523165f29f852 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/matmul.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/matmul.ts @@ -117,7 +117,7 @@ export function getBiasForMatmul( if (outRank < 2 && inRank > 0) { unpackedCoordsSnippet = 'coords'; } else { - unpackedCoordsSnippet = inShape.map((s, i) => `coords.${allGlChannels[i + rankDiff]}`).join(', '); + unpackedCoordsSnippet = inShape.map((_s, i) => `coords.${allGlChannels[i + rankDiff]}`).join(', '); } const broadcastDims = BroadcastUtil.getBroadcastDims(inShape, outShape); const coordsSnippet = broadcastDims.map(d => `coords.${allGlChannels[d + rankDiff]} = 0;`).join('\n'); diff --git a/js/web/lib/onnxjs/backends/webgl/ops/reduce.ts b/js/web/lib/onnxjs/backends/webgl/ops/reduce.ts index 1a2bc7422c833..c9ea460a6f1fc 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/reduce.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/reduce.ts @@ -46,7 +46,7 @@ export const parseReduceAttributes: OperatorInitialization = ( }; const createReduceProgramInfo = - (handler: WebGLInferenceHandler, inputs: Tensor[], attributes: ReduceAttributes, name: string, reduceOp: ReduceOp, + (_handler: WebGLInferenceHandler, inputs: Tensor[], attributes: ReduceAttributes, _name: string, reduceOp: ReduceOp, reduceProgramMetadata: ProgramMetadata): ProgramInfo => { const outputShape: number[] = []; const iRank = inputs[0].dims.length || 1; diff --git a/js/web/lib/onnxjs/backends/webgl/ops/shape.ts b/js/web/lib/onnxjs/backends/webgl/ops/shape.ts index 51acf5042d8bd..c2d703ed04fa0 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/shape.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/shape.ts @@ -4,7 +4,7 @@ import {Tensor} from '../../../tensor'; import {WebGLInferenceHandler} from '../inference-handler'; -export const shape = (inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): Tensor[] => { +export const shape = (_inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): Tensor[] => { validateInputs(inputs); return [new Tensor([inputs[0].dims.length], 'int32', undefined, undefined, new Int32Array(inputs[0].dims))]; }; @@ -13,4 +13,4 @@ const validateInputs = (inputs: Tensor[]): void => { if (!inputs || inputs.length !== 1) { throw new Error('Shape requires 1 input.'); } -}; \ No newline at end of file +}; diff --git a/js/web/lib/onnxjs/backends/webgl/ops/slice.ts b/js/web/lib/onnxjs/backends/webgl/ops/slice.ts index d32a76bbc8628..81fc1b7076fdb 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/slice.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/slice.ts @@ -42,8 +42,8 @@ export const parseSliceAttributes: OperatorInitialization = (no }; const createSliceProgramInfo = - (inferenceHandler: WebGLInferenceHandler, input: Tensor, attributes: SliceAttributes): ProgramInfo => { - const axes = (attributes.axes.length === 0) ? input.dims.slice(0).map((val, i) => i) : attributes.axes; + (_inferenceHandler: WebGLInferenceHandler, input: Tensor, attributes: SliceAttributes): ProgramInfo => { + const axes = (attributes.axes.length === 0) ? input.dims.slice(0).map((_val, i) => i) : attributes.axes; const normalizedAxes = ShapeUtil.normalizeAxes(axes, input.dims.length); const starts = attributes.starts.map((start, i) => { if (start > input.dims[normalizedAxes[i]] - 1) { diff --git a/js/web/lib/onnxjs/backends/webgl/ops/split.ts b/js/web/lib/onnxjs/backends/webgl/ops/split.ts index d1bd00d47eebd..2ab14563d80e2 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/split.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/split.ts @@ -49,13 +49,13 @@ export const parseSplitAttributes: OperatorInitialization = (no }; const getProgramCount = - (inferenceHandler: WebGLInferenceHandler, inputs: Tensor[], axis: number, attributes: SplitAttributes): number => { + (_inferenceHandler: WebGLInferenceHandler, inputs: Tensor[], axis: number, attributes: SplitAttributes): number => { const [, offsets] = SplitUtil.splitShape(inputs[0].dims, axis, attributes.split, attributes.numOutputs); return offsets.length; }; const createSplitProgramInfo = - (inferenceHandler: WebGLInferenceHandler, input: Tensor, attributes: SplitAttributes, axis: number, index: number): + (_inferenceHandler: WebGLInferenceHandler, input: Tensor, attributes: SplitAttributes, axis: number, index: number): ProgramInfo => { const [shapes, offsets] = SplitUtil.splitShape(input.dims, axis, attributes.split, attributes.numOutputs); const offset = offsets[index]; diff --git a/js/web/lib/onnxjs/backends/webgl/ops/sum.ts b/js/web/lib/onnxjs/backends/webgl/ops/sum.ts index c05286d16f936..2c25b10c5872c 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/sum.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/sum.ts @@ -11,7 +11,7 @@ export const sum = (inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): const sumProgramMetadata = { name: 'Sum', - inputNames: inputs.map((v, i) => `X${i}`), + inputNames: inputs.map((_v, i) => `X${i}`), inputTypes: new Array(inputs.length).fill(TextureType.unpacked) }; @@ -24,7 +24,7 @@ const createSumProgramInfo = (inferenceHandler: WebGLInferenceHandler, inputs: Tensor[], sumProgramMetadata: ProgramMetadata): ProgramInfo => { const glsl = getGlsl(inferenceHandler.session.backend.glContext.version); const outputShape = inputs[0].dims.slice(); - const sumLine = inputs.map((v, i) => `${glsl.texture2D}(X${i},TexCoords)`).join(' + '); + const sumLine = inputs.map((_v, i) => `${glsl.texture2D}(X${i},TexCoords)`).join(' + '); const shaderSource = ` void main() { vec4 result = ${sumLine}; @@ -65,4 +65,4 @@ const validateInputs = (inputs: Tensor[]): void => { throw new Error('Input types are not matched.'); } } -}; \ No newline at end of file +}; diff --git a/js/web/lib/onnxjs/backends/webgl/ops/tile.ts b/js/web/lib/onnxjs/backends/webgl/ops/tile.ts index 42128c7abc48c..1d2cba7d9d75f 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/tile.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/tile.ts @@ -22,7 +22,7 @@ export const tile = (inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): }; const createTileProgramInfo = - (handler: WebGLInferenceHandler, inputs: Tensor[], tileProgramMetadata: ProgramMetadata): ProgramInfo => { + (_handler: WebGLInferenceHandler, inputs: Tensor[], tileProgramMetadata: ProgramMetadata): ProgramInfo => { const inputShape = inputs[0].dims.slice(); const outputShape = new Array(inputShape.length); @@ -63,4 +63,4 @@ const validateInputs = (inputs: Tensor[]): void => { if (inputs[1].type !== 'int32' && inputs[1].type !== 'int16') { throw new Error('Invalid repeat type.'); } -}; \ No newline at end of file +}; diff --git a/js/web/lib/onnxjs/backends/webgl/ops/transpose.ts b/js/web/lib/onnxjs/backends/webgl/ops/transpose.ts index 815ff13f1f925..d3e7b3c0823be 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/transpose.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/transpose.ts @@ -36,7 +36,7 @@ export const parseTransposeAttributes: OperatorInitialization createAttributeWithCacheKey({perm: node.attributes.getInts('perm', [])}); const createTransposeProgramInfo = - (inferenceHandler: WebGLInferenceHandler, input: Tensor, perm: number[]): ProgramInfo => { + (_inferenceHandler: WebGLInferenceHandler, input: Tensor, perm: number[]): ProgramInfo => { const inputShape = input.dims; perm = getAdjustedPerm(inputShape, perm); const unpackedOutputShape = getOutputShape(inputShape, perm); diff --git a/js/web/lib/onnxjs/backends/webgl/texture-data-encoder.ts b/js/web/lib/onnxjs/backends/webgl/texture-data-encoder.ts index 5135c63702316..4b0cf3f037921 100644 --- a/js/web/lib/onnxjs/backends/webgl/texture-data-encoder.ts +++ b/js/web/lib/onnxjs/backends/webgl/texture-data-encoder.ts @@ -82,7 +82,7 @@ export class RedFloat32DataEncoder implements DataEncoder { } decode(buffer: Encoder.DataArrayType, dataSize: number): Float32Array { if (this.channelSize === 1) { - const filteredData = (buffer as Float32Array).filter((value, index) => index % 4 === 0).subarray(0, dataSize); + const filteredData = (buffer as Float32Array).filter((_value, index) => index % 4 === 0).subarray(0, dataSize); return filteredData; } return buffer.subarray(0, dataSize) as Float32Array; @@ -119,7 +119,7 @@ export class RGBAFloatDataEncoder implements DataEncoder { } decode(buffer: Encoder.DataArrayType, dataSize: number): Float32Array { if (this.channelSize === 1) { - const filteredData = (buffer as Float32Array).filter((value, index) => index % 4 === 0).subarray(0, dataSize); + const filteredData = (buffer as Float32Array).filter((_value, index) => index % 4 === 0).subarray(0, dataSize); return filteredData; } return buffer.subarray(0, dataSize) as Float32Array; diff --git a/js/web/lib/onnxjs/backends/webgl/texture-layout-strategy.ts b/js/web/lib/onnxjs/backends/webgl/texture-layout-strategy.ts index c89ef3d23638d..f8e370747928c 100644 --- a/js/web/lib/onnxjs/backends/webgl/texture-layout-strategy.ts +++ b/js/web/lib/onnxjs/backends/webgl/texture-layout-strategy.ts @@ -105,7 +105,7 @@ export class PreferLogicalStrategy implements TextureLayoutStrategy { // tensor has 3 rows, we pretend it has 4 rows in order to account for the // fact that the texels containing the third row are half empty. logShape = logShape.map( - (d, i) => i >= logShape.length - 2 ? (logShape[i] % 2 === 0 ? logShape[i] : logShape[i] + 1) : logShape[i]); + (_d, i) => i >= logShape.length - 2 ? (logShape[i] % 2 === 0 ? logShape[i] : logShape[i] + 1) : logShape[i]); // Packed texture height is at least 2 (the channel height of a single // texel). @@ -182,7 +182,7 @@ export function parseAxisParam(axis: number|number[], shape: number[]): number[] const rank = shape.length; // Normalize input - axis = axis == null ? shape.map((s, i) => i) : ([] as number[]).concat(axis); + axis = axis == null ? shape.map((_s, i) => i) : ([] as number[]).concat(axis); // Check for valid range assert( diff --git a/js/web/lib/onnxjs/backends/webgl/texture-manager.ts b/js/web/lib/onnxjs/backends/webgl/texture-manager.ts index 1cb113e5fa630..effb65288dc1c 100644 --- a/js/web/lib/onnxjs/backends/webgl/texture-manager.ts +++ b/js/web/lib/onnxjs/backends/webgl/texture-manager.ts @@ -172,7 +172,7 @@ export class TextureManager { throw new Error(`TensorData type ${dataType} is not supported`); } } - toTextureData(dataType: Tensor.DataType, data: Tensor.NumberType|undefined): Encoder.DataArrayType|undefined { + toTextureData(_dataType: Tensor.DataType, data: Tensor.NumberType|undefined): Encoder.DataArrayType|undefined { if (!data) { return undefined; } diff --git a/js/web/lib/onnxjs/execution-plan.ts b/js/web/lib/onnxjs/execution-plan.ts index b95e639817dbf..5599087ab46f5 100644 --- a/js/web/lib/onnxjs/execution-plan.ts +++ b/js/web/lib/onnxjs/execution-plan.ts @@ -114,7 +114,7 @@ export class ExecutionPlan { // resolve downstream nodes const downstreamNodes = new Set(); - outputList.forEach((output, i) => { + outputList.forEach((_output, i) => { const j = thisOp.node.outputs[i]; for (const currentDownstreamNodeIndex of graphValues[j].to) { const currentDownstreamNode = graphNodes[currentDownstreamNodeIndex]; diff --git a/js/web/lib/onnxjs/instrument.ts b/js/web/lib/onnxjs/instrument.ts index 4c543cab157d7..4f865503d50ec 100644 --- a/js/web/lib/onnxjs/instrument.ts +++ b/js/web/lib/onnxjs/instrument.ts @@ -176,7 +176,7 @@ function createCategorizedLogger(category: string): Logger.CategorizedLogger { // NOTE: argument 'category' is put the last parameter beacause typescript // doesn't allow optional argument put in front of required argument. This // order is different from a usual logging API. -function logInternal(severity: Logger.Severity, content: string, stack: number, category?: string) { +function logInternal(severity: Logger.Severity, content: string, _stack: number, category?: string) { const config = LOGGER_CONFIG_MAP[category || ''] || LOGGER_CONFIG_MAP['']; if (SEVERITY_VALUE[severity] < SEVERITY_VALUE[config.minimalSeverity]) { return; diff --git a/js/web/lib/onnxjs/util.ts b/js/web/lib/onnxjs/util.ts index 0a76d75e79bbf..d697a8b3138cf 100644 --- a/js/web/lib/onnxjs/util.ts +++ b/js/web/lib/onnxjs/util.ts @@ -967,7 +967,7 @@ export class ReduceUtil { const dims = a.dims.slice(0); // if axes is not set, perform reduce on all axes if (axes.length === 0) { - dims.forEach((d, ind) => axes.push(ind)); + dims.forEach((_d, ind) => axes.push(ind)); } // get a temporary broadcastable output shape const outputDims = ReduceUtil.calcReduceShape(dims, axes, true); diff --git a/js/web/lib/wasm/jsep/webgpu/ops/argminmax.ts b/js/web/lib/wasm/jsep/webgpu/ops/argminmax.ts index 2f8d072c7dd4f..b6c6853c8f222 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/argminmax.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/argminmax.ts @@ -27,11 +27,6 @@ export interface ArgMinMaxAttributes extends AttributeWithCacheKey { selectLastIndex: number; } -const createArgMinMaxAttributesFromInputs = - (inputs: readonly TensorView[], attributes: ArgMinMaxAttributes): ArgMinMaxAttributes => - createAttributeWithCacheKey( - {axis: attributes.axis, keepDims: attributes.keepDims, selectLastIndex: attributes.selectLastIndex}); - export const argMin = (context: ComputeContext, attributes: ArgMinMaxAttributes): void => { validateInputs(context.inputs); const argMinMaxOp: ReduceOp = (input, output, axes) => { @@ -51,12 +46,10 @@ export const argMin = (context: ComputeContext, attributes: ArgMinMaxAttributes) ]; }; - const updatedAttributes: ArgMinMaxAttributes = - context.inputs.length === 1 ? attributes : createArgMinMaxAttributesFromInputs(context.inputs, attributes); context.compute( createReduceProgramInfo( - 'ArgMin', {hint: updatedAttributes.cacheKey}, [context.inputs[0]], argMinMaxOp, [updatedAttributes.axis], - DataType.int64, updatedAttributes.keepDims), + 'ArgMin', {hint: attributes.cacheKey}, [context.inputs[0]], argMinMaxOp, [attributes.axis], DataType.int64, + attributes.keepDims), {inputs: [0]}); }; @@ -79,12 +72,10 @@ export const argMax = (context: ComputeContext, attributes: ArgMinMaxAttributes) ]; }; - const updatedAttributes: ArgMinMaxAttributes = - context.inputs.length === 1 ? attributes : createArgMinMaxAttributesFromInputs(context.inputs, attributes); context.compute( createReduceProgramInfo( - 'argMax', {hint: updatedAttributes.cacheKey}, [context.inputs[0]], argMinMaxOp, [updatedAttributes.axis], - DataType.int64, updatedAttributes.keepDims), + 'argMax', {hint: attributes.cacheKey}, [context.inputs[0]], argMinMaxOp, [attributes.axis], DataType.int64, + attributes.keepDims), {inputs: [0]}); }; diff --git a/js/web/lib/wasm/jsep/webgpu/ops/einsum.ts b/js/web/lib/wasm/jsep/webgpu/ops/einsum.ts index 357eb5c0b84ad..a233d37a79e65 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/einsum.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/einsum.ts @@ -254,7 +254,7 @@ const createEinsumProgramInfo = (inputs: readonly TensorView[], einsumEquation: ${shaderHelper.mainStart()} ${shaderHelper.guardAgainstOutOfBoundsWorkgroupSizes(outputSize)} var outputIndices = ${output.offsetToIndices('global_idx')}; - ${inputVars.map((inputVar, i) => `var input${i}Indices: ${inputVars[i].type.indices};`).join('\n')} + ${inputVars.map((_var, i) => `var input${i}Indices: ${inputVars[i].type.indices};`).join('\n')} ${reduceOps.join('\n')}; ${output.setByOffset('global_idx', 'sum')}; }`; diff --git a/js/web/lib/wasm/jsep/webgpu/ops/pad.ts b/js/web/lib/wasm/jsep/webgpu/ops/pad.ts index 180dab92a453a..18859e253aa02 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/pad.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/pad.ts @@ -36,8 +36,8 @@ const validateInputs = (inputs: readonly TensorView[]): void => { }; const getPadConstant = - (output: IndicesHelper, outputDims: readonly number[], inputDims: readonly number[], - inputStrides: readonly number[], pads: number[], dataType: string, constantValue: number): string => { + (output: IndicesHelper, inputDims: readonly number[], inputStrides: readonly number[], pads: number[], + dataType: string, constantValue: number): string => { const inputRank = inputDims.length; let block = ''; @@ -66,8 +66,7 @@ const getPadConstant = }; const getPadReflect = - (output: IndicesHelper, outputDims: readonly number[], inputDims: readonly number[], - inputStrides: readonly number[], pads: number[]): string => { + (output: IndicesHelper, inputDims: readonly number[], inputStrides: readonly number[], pads: number[]): string => { const inputRank = inputDims.length; let block = ''; @@ -97,8 +96,7 @@ const getPadReflect = }; const getPadEdge = - (output: IndicesHelper, outputDims: readonly number[], inputDims: readonly number[], - inputStrides: readonly number[], pads: number[]): string => { + (output: IndicesHelper, inputDims: readonly number[], inputStrides: readonly number[], pads: number[]): string => { const inputRank = inputDims.length; let block = ''; @@ -124,8 +122,7 @@ const getPadEdge = }; const getPadWrap = - (output: IndicesHelper, outputDims: readonly number[], inputDims: readonly number[], - inputStrides: readonly number[], pads: number[]): string => { + (output: IndicesHelper, inputDims: readonly number[], inputStrides: readonly number[], pads: number[]): string => { const inputRank = inputDims.length; let block = ''; @@ -151,18 +148,17 @@ const getPadWrap = }; const getPadSnippet = - (output: IndicesHelper, outputDims: readonly number[], inputDims: readonly number[], - inputStrides: readonly number[], attributes: PadAttributes, dataType: string): string => { + (output: IndicesHelper, inputDims: readonly number[], inputStrides: readonly number[], attributes: PadAttributes, + dataType: string): string => { switch (attributes.mode) { case 0: - return getPadConstant( - output, outputDims, inputDims, inputStrides, attributes.pads, dataType, attributes.value); + return getPadConstant(output, inputDims, inputStrides, attributes.pads, dataType, attributes.value); case 1: - return getPadReflect(output, outputDims, inputDims, inputStrides, attributes.pads); + return getPadReflect(output, inputDims, inputStrides, attributes.pads); case 2: - return getPadEdge(output, outputDims, inputDims, inputStrides, attributes.pads); + return getPadEdge(output, inputDims, inputStrides, attributes.pads); case 3: - return getPadWrap(output, outputDims, inputDims, inputStrides, attributes.pads); + return getPadWrap(output, inputDims, inputStrides, attributes.pads); default: throw new Error('Invalid mode'); } @@ -179,7 +175,7 @@ const generatePadCode = const output = outputVariable('output', inputs[0].dataType, outputDims); const input = inputVariable('x', inputs[0].dataType, inputDims); - const padSnippet = getPadSnippet(output, outputDims, inputDims, inputStrides, attributes, dataType); + const padSnippet = getPadSnippet(output, inputDims, inputStrides, attributes, dataType); const padCode = ` ${shaderHelper.declareVariables(input, output)} ${shaderHelper.mainStart()} diff --git a/js/web/lib/wasm/jsep/webgpu/ops/reduce-shared.ts b/js/web/lib/wasm/jsep/webgpu/ops/reduce-shared.ts index 54a7414360c4e..1365d1e9a12a4 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/reduce-shared.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/reduce-shared.ts @@ -199,7 +199,7 @@ const reduceCommon = let updatedAxes = updatedAttributes.axes; if (updatedAxes.length === 0 && !updatedAttributes.noopWithEmptyAxes) { - updatedAxes = context.inputs[0].dims.map((s, i) => i); + updatedAxes = context.inputs[0].dims.map((_dim, i) => i); } const normalizeAxes = ShapeUtil.normalizeAxes(updatedAxes, context.inputs[0].dims.length); diff --git a/js/web/lib/wasm/jsep/webgpu/ops/resize.ts b/js/web/lib/wasm/jsep/webgpu/ops/resize.ts index 07cfefb8f191b..9869561a36251 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/resize.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/resize.ts @@ -218,32 +218,30 @@ const initOutputShape = return outputShape; }; -const adjustOutputShape = - (inputShape: readonly number[], outputShape: readonly number[], scales: number[], attributes: ResizeAttributes): - number[] => { - const scaleInPolicy = (() => { - switch (attributes.keepAspectRatioPolicy) { - case 'not_larger': - return attributes.axes.length > 0 ? Math.min(...attributes.axes.map(i => scales[i]), Number.MAX_VALUE) : - Math.min(...scales, Number.MAX_VALUE); - case 'not_smaller': - return attributes.axes.length > 0 ? Math.max(...attributes.axes.map(i => scales[i]), Number.MIN_VALUE) : - Math.max(...scales, Number.MIN_VALUE); - default: - throw new Error(`Keep aspect ratio policy ${attributes.keepAspectRatioPolicy} is not supported`); - } - })(); - scales.fill(1.0, 0, scales.length); - const adjustedOutputShape = inputShape.slice(); - if (attributes.axes.length > 0) { - attributes.axes.forEach((v) => scales[v] = scaleInPolicy); - attributes.axes.forEach((v) => adjustedOutputShape[v] = Math.round(inputShape[v] * scales[v])); - } else { - scales.fill(scaleInPolicy, 0, scales.length); - adjustedOutputShape.forEach((v, i) => adjustedOutputShape[i] = Math.round(v * scales[i])); - } - return adjustedOutputShape; - }; +const adjustOutputShape = (inputShape: readonly number[], scales: number[], attributes: ResizeAttributes): number[] => { + const scaleInPolicy = (() => { + switch (attributes.keepAspectRatioPolicy) { + case 'not_larger': + return attributes.axes.length > 0 ? Math.min(...attributes.axes.map(i => scales[i]), Number.MAX_VALUE) : + Math.min(...scales, Number.MAX_VALUE); + case 'not_smaller': + return attributes.axes.length > 0 ? Math.max(...attributes.axes.map(i => scales[i]), Number.MIN_VALUE) : + Math.max(...scales, Number.MIN_VALUE); + default: + throw new Error(`Keep aspect ratio policy ${attributes.keepAspectRatioPolicy} is not supported`); + } + })(); + scales.fill(1.0, 0, scales.length); + const adjustedOutputShape = inputShape.slice(); + if (attributes.axes.length > 0) { + attributes.axes.forEach((v) => scales[v] = scaleInPolicy); + attributes.axes.forEach((v) => adjustedOutputShape[v] = Math.round(inputShape[v] * scales[v])); + } else { + scales.fill(scaleInPolicy, 0, scales.length); + adjustedOutputShape.forEach((v, i) => adjustedOutputShape[i] = Math.round(v * scales[i])); + } + return adjustedOutputShape; +}; const calculateOriginalIndicesFromOutputIndices = (output: IndicesHelper, inputShape: readonly number[], outputShape: readonly number[], scales: readonly number[], @@ -314,8 +312,8 @@ const checkInputIndices = (input: IndicesHelper, inputShape: readonly number[]): }`; const bilinearInterpolation = - (input: IndicesHelper, output: IndicesHelper, inputShape: readonly number[], outputShape: readonly number[], - scales: readonly number[], useExtrapolation: boolean, extrapolationValue: number): string => { + (input: IndicesHelper, output: IndicesHelper, inputShape: readonly number[], scales: readonly number[], + useExtrapolation: boolean, extrapolationValue: number): string => { const [batchIdx, heightIdx, widthIdx, channelIdx] = inputShape.length === 2 ? [-1, 0, 1, -1] : (scales[1] === 1.0 ? [0, 2, 3, 1] : [0, 1, 2, 3]); return ` @@ -445,7 +443,7 @@ const createResizeProgramInfo = if (scalesInput.length === 0) { scales = inputShape.map((value, index) => value === 0 ? 1.0 : outputShape[index] / value); if (attributes.keepAspectRatioPolicy !== 'stretch') { - outputShape = adjustOutputShape(inputShape, outputShape, scales, attributes); + outputShape = adjustOutputShape(inputShape, scales, attributes); } } const output = outputVariable('output', inputTensor.dataType, outputShape); @@ -471,7 +469,7 @@ const createResizeProgramInfo = ${calculateOriginalIndicesFromOutputIndices(output, inputShape, outputShape, scales, roi)}; ${ bilinearInterpolation( - input, output, inputShape, outputShape, scales, useExtrapolation, attributes.extrapolationValue)}; + input, output, inputShape, scales, useExtrapolation, attributes.extrapolationValue)}; `; case 'cubic': return ` diff --git a/js/web/lib/wasm/proxy-worker/main.ts b/js/web/lib/wasm/proxy-worker/main.ts index 1f4595818e5c0..1cb6d9e391e4f 100644 --- a/js/web/lib/wasm/proxy-worker/main.ts +++ b/js/web/lib/wasm/proxy-worker/main.ts @@ -3,7 +3,39 @@ /// -import {OrtWasmMessage} from '../proxy-messages'; +// +// * type hack for "HTMLImageElement" +// +// in typescript, the type of "HTMLImageElement" is defined in lib.dom.d.ts, which is conflict with lib.webworker.d.ts. +// when we use webworker, the lib.webworker.d.ts will be used, which does not have HTMLImageElement defined. +// +// we will get the following errors complaining that HTMLImageElement is not defined: +// +// ==================================================================================================================== +// +// ../common/dist/cjs/tensor-factory.d.ts:187:29 - error TS2552: Cannot find name 'HTMLImageElement'. Did you mean +// 'HTMLLIElement'? +// +// 187 fromImage(imageElement: HTMLImageElement, options?: TensorFromImageElementOptions): +// Promise | TypedTensor<'uint8'>>; +// ~~~~~~~~~~~~~~~~ +// +// node_modules/@webgpu/types/dist/index.d.ts:83:7 - error TS2552: Cannot find name 'HTMLImageElement'. Did you mean +// 'HTMLLIElement'? +// +// 83 | HTMLImageElement +// ~~~~~~~~~~~~~~~~ +// +// ==================================================================================================================== +// +// `HTMLImageElement` is only used in type declaration and not in real code. So we define it as `unknown` here to +// bypass the type check. +// +declare global { + type HTMLImageElement = unknown; +} + +import {OrtWasmMessage, SerializableTensorMetadata} from '../proxy-messages'; import {createSession, createSessionAllocate, createSessionFinalize, endProfiling, extractTransferableBuffers, initRuntime, isOrtEnvInitialized, releaseSession, run} from '../wasm-core-impl'; import {initializeWebAssembly} from '../wasm-factory'; @@ -11,7 +43,7 @@ self.onmessage = (ev: MessageEvent): void => { switch (ev.data.type) { case 'init-wasm': try { - initializeWebAssembly(ev.data.in) + initializeWebAssembly(ev.data.in!) .then( () => postMessage({type: 'init-wasm'} as OrtWasmMessage), err => postMessage({type: 'init-wasm', err} as OrtWasmMessage)); @@ -21,10 +53,10 @@ self.onmessage = (ev: MessageEvent): void => { break; case 'init-ort': try { - initRuntime(ev.data.in).then(() => postMessage({type: 'init-ort'} as OrtWasmMessage), err => postMessage({ - type: 'init-ort', - err - } as OrtWasmMessage)); + initRuntime(ev.data.in!).then(() => postMessage({type: 'init-ort'} as OrtWasmMessage), err => postMessage({ + type: 'init-ort', + err + } as OrtWasmMessage)); } catch (err) { postMessage({type: 'init-ort', err} as OrtWasmMessage); } @@ -58,8 +90,7 @@ self.onmessage = (ev: MessageEvent): void => { break; case 'release': try { - const handler = ev.data.in!; - releaseSession(handler); + releaseSession(ev.data.in!); postMessage({type: 'release'} as OrtWasmMessage); } catch (err) { postMessage({type: 'release', err} as OrtWasmMessage); @@ -68,10 +99,16 @@ self.onmessage = (ev: MessageEvent): void => { case 'run': try { const {sessionId, inputIndices, inputs, outputIndices, options} = ev.data.in!; - run(sessionId, inputIndices, inputs, outputIndices, options) + run(sessionId, inputIndices, inputs, outputIndices, new Array(outputIndices.length).fill(null), options) .then( outputs => { - postMessage({type: 'run', out: outputs} as OrtWasmMessage, extractTransferableBuffers(outputs)); + if (outputs.some(o => o[3] !== 'cpu')) { + postMessage({type: 'run', err: 'Proxy does not support non-cpu tensor location.'}); + } else { + postMessage( + {type: 'run', out: outputs} as OrtWasmMessage, + extractTransferableBuffers(outputs as SerializableTensorMetadata[])); + } }, err => { postMessage({type: 'run', err} as OrtWasmMessage); diff --git a/js/web/lib/wasm/proxy-worker/tsconfig.json b/js/web/lib/wasm/proxy-worker/tsconfig.json new file mode 100644 index 0000000000000..ec1044612a569 --- /dev/null +++ b/js/web/lib/wasm/proxy-worker/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "lib": ["WebWorker"] + }, + "include": ["main.ts", "../../build-def.d.ts"], + "exclude": [] +} diff --git a/js/web/package.json b/js/web/package.json index 898b0ba3ec5f1..9b4531d7766fe 100644 --- a/js/web/package.json +++ b/js/web/package.json @@ -24,7 +24,7 @@ "build:doc": "node ./script/generate-webgl-operator-md && node ./script/generate-webgpu-operator-md", "pull:wasm": "node ./script/pull-prebuilt-wasm-artifacts", "test:e2e": "node ./test/e2e/run", - "prebuild": "tsc -p . --noEmit", + "prebuild": "tsc -p . --noEmit && tsc -p lib/wasm/proxy-worker --noEmit", "build": "node ./script/build", "test": "tsc --build ../scripts && node ../scripts/prepare-onnx-node-tests && node ./script/test-runner-cli", "prepack": "node ./script/build && node ./script/prepack" diff --git a/js/web/script/tsconfig.json b/js/web/script/tsconfig.json index d7dc4cbe2d291..23b1fc96eb558 100644 --- a/js/web/script/tsconfig.json +++ b/js/web/script/tsconfig.json @@ -1,7 +1,6 @@ { "extends": "../../tsconfig.tools.json", "compilerOptions": { - "sourceMap": true, - "noUnusedParameters": false + "sourceMap": true } } diff --git a/js/web/test/unittests/backends/webgl/test-conv-new.ts b/js/web/test/unittests/backends/webgl/test-conv-new.ts index 0fddddf58181c..8c186b9b36451 100644 --- a/js/web/test/unittests/backends/webgl/test-conv-new.ts +++ b/js/web/test/unittests/backends/webgl/test-conv-new.ts @@ -14,7 +14,7 @@ import {conv2d} from './test-conv-utils'; function createRandomArray(size: number): Float32Array { const randomTable = [0, 3, 6, 9, 2, 5, 8, 1, 4, 7]; return new Float32Array( - Array.from({length: size}, (v, k) => randomTable[k % 10] * 0.1 + randomTable[Math.trunc(k / 10) % 10] * 0.01)); + Array.from({length: size}, (_v, k) => randomTable[k % 10] * 0.1 + randomTable[Math.trunc(k / 10) % 10] * 0.01)); } interface TestData { inputShape: number[]; diff --git a/js/web/test/unittests/backends/webgl/test-pack-unpack.ts b/js/web/test/unittests/backends/webgl/test-pack-unpack.ts index 0b70144733227..61c21d4b689fb 100644 --- a/js/web/test/unittests/backends/webgl/test-pack-unpack.ts +++ b/js/web/test/unittests/backends/webgl/test-pack-unpack.ts @@ -291,7 +291,7 @@ describe('#UnitTest# - unpack - Tensor unpack', () => { webglInferenceHandler.session.textureManager.glContext.checkError(); const webglTexture = createTextureFromArray( webglInferenceHandler.session.textureManager.glContext, testData.rawData ? testData.rawData : inputData, - gl.RGBA, inputTextureShape[0], inputTextureShape[1]); + inputTextureShape[0], inputTextureShape[1]); webglInferenceHandler.session.textureManager.glContext.checkError(); const packedShape = inputTextureShape; const textureData = { diff --git a/js/web/test/unittests/backends/webgl/test-utils.ts b/js/web/test/unittests/backends/webgl/test-utils.ts index acb3f0002ce2f..092d63cd2ade4 100644 --- a/js/web/test/unittests/backends/webgl/test-utils.ts +++ b/js/web/test/unittests/backends/webgl/test-utils.ts @@ -4,7 +4,7 @@ import {WebGLContext} from '../../../../lib/onnxjs/backends/webgl/webgl-context'; export function createAscendingArray(size: number): Float32Array { - return new Float32Array(Array.from({length: size}, (v, i) => (i + 1))); + return new Float32Array(Array.from({length: size}, (_v, i) => (i + 1))); } // Returns an array by injecting 3 zeros after every element in the input array to be used for creating unpacked @@ -19,7 +19,7 @@ export function generateArrayForUnpackedTexture(input: Float32Array): Float32Arr // create a webgl texture and fill it with the array content export function createTextureFromArray( - glContext: WebGLContext, dataArray: Float32Array, type: GLenum, width: number, height: number): WebGLTexture { + glContext: WebGLContext, dataArray: Float32Array, width: number, height: number): WebGLTexture { const gl = glContext.gl; // create the texture diff --git a/js/web/tsconfig.json b/js/web/tsconfig.json index 0f35367db1d5a..d60d746e9328d 100644 --- a/js/web/tsconfig.json +++ b/js/web/tsconfig.json @@ -3,7 +3,6 @@ "compilerOptions": { "downlevelIteration": true, "declaration": true, - "noUnusedParameters": false, "typeRoots": ["./node_modules/@webgpu/types", "./node_modules/@types", "../node_modules/@types"] }, "include": ["lib", "test"], From 0a4d76d98b8eea5bb863685944577d609c5bac8d Mon Sep 17 00:00:00 2001 From: Edward Chen <18449977+edgchen1@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:31:54 -0800 Subject: [PATCH 07/38] MLAS AArch64 quantized int4 Gemm kernel (#18031) - Implement MLAS function for quantized 4-bit int Gemm (Gemm with float A and quantized 4-bit int B) for ARM NEON. This is an initial implementation. Only the M=1 path (with M being number of rows of A and C) has any optimization attempted so far. More optimization to come in future PRs. - Connect MatMulNBits contrib op to MLAS function. --- cmake/onnxruntime_mlas.cmake | 3 + .../core/framework/op_node_proto_helper.h | 68 +-- .../cpu/quantization/matmul_nbits.cc | 111 ++-- onnxruntime/core/common/cpuid_uarch.cc | 28 +- .../core/framework/op_node_proto_helper.cc | 4 +- onnxruntime/core/mlas/.clang-format | 6 +- .../core/mlas/inc/mlas_gemm_postprocessor.h | 33 ++ onnxruntime/core/mlas/inc/mlas_q4.h | 48 +- onnxruntime/core/mlas/inc/mlas_qnbit.h | 79 +++ onnxruntime/core/mlas/lib/mlasi.h | 18 + onnxruntime/core/mlas/lib/platform.cpp | 1 + onnxruntime/core/mlas/lib/q4_dq.cpp | 153 +++++- onnxruntime/core/mlas/lib/q4gemm.h | 2 +- onnxruntime/core/mlas/lib/sqnbitgemm.cpp | 144 ++++++ onnxruntime/core/mlas/lib/sqnbitgemm.h | 287 ++++++++++ .../core/mlas/lib/sqnbitgemm_kernel_neon.cpp | 489 ++++++++++++++++++ .../test/contrib_ops/matmul_4bits_test.cc | 30 +- onnxruntime/test/mlas/bench/bench_q4gemm.cpp | 2 +- onnxruntime/test/mlas/bench/bench_sgemm.cpp | 2 +- .../test/mlas/bench/bench_sqnbitgemm.cpp | 86 +++ .../test/mlas/unittest/test_blockq4.cpp | 14 +- .../test/mlas/unittest/test_halfgemm.h | 14 - onnxruntime/test/mlas/unittest/test_main.cpp | 11 +- onnxruntime/test/mlas/unittest/test_q4gemm.h | 14 - .../test/mlas/unittest/test_q8q4gemm.cpp | 14 - .../test/mlas/unittest/test_sqnbitgemm.cpp | 270 ++++++++++ onnxruntime/test/mlas/unittest/test_util.h | 14 + .../test/onnx/microbenchmark/reduceminmax.cc | 4 + 28 files changed, 1744 insertions(+), 205 deletions(-) create mode 100644 onnxruntime/core/mlas/inc/mlas_gemm_postprocessor.h create mode 100644 onnxruntime/core/mlas/inc/mlas_qnbit.h create mode 100644 onnxruntime/core/mlas/lib/sqnbitgemm.cpp create mode 100644 onnxruntime/core/mlas/lib/sqnbitgemm.h create mode 100644 onnxruntime/core/mlas/lib/sqnbitgemm_kernel_neon.cpp create mode 100644 onnxruntime/test/mlas/bench/bench_sqnbitgemm.cpp create mode 100644 onnxruntime/test/mlas/unittest/test_sqnbitgemm.cpp diff --git a/cmake/onnxruntime_mlas.cmake b/cmake/onnxruntime_mlas.cmake index a62b1b259d109..04efa5c2b4f6d 100644 --- a/cmake/onnxruntime_mlas.cmake +++ b/cmake/onnxruntime_mlas.cmake @@ -33,6 +33,7 @@ onnxruntime_add_static_library(onnxruntime_mlas ${MLAS_SRC_DIR}/qpostprocessor.cpp ${MLAS_SRC_DIR}/qlgavgpool.cpp ${MLAS_SRC_DIR}/qdwconv_kernelsize.cpp + ${MLAS_SRC_DIR}/sqnbitgemm.cpp ) if (NOT onnxruntime_ORT_MINIMAL_BUILD) @@ -68,6 +69,7 @@ function(setup_mlas_source_for_windows) ${MLAS_SRC_DIR}/qgemm_kernel_neon.cpp ${MLAS_SRC_DIR}/qgemm_kernel_udot.cpp ${MLAS_SRC_DIR}/qgemm_kernel_sdot.cpp + ${MLAS_SRC_DIR}/sqnbitgemm_kernel_neon.cpp ) set(mlas_platform_preprocess_srcs @@ -334,6 +336,7 @@ else() ${MLAS_SRC_DIR}/qgemm_kernel_neon.cpp ${MLAS_SRC_DIR}/qgemm_kernel_udot.cpp ${MLAS_SRC_DIR}/qgemm_kernel_sdot.cpp + ${MLAS_SRC_DIR}/sqnbitgemm_kernel_neon.cpp ) if (NOT APPLE) set(mlas_platform_srcs diff --git a/include/onnxruntime/core/framework/op_node_proto_helper.h b/include/onnxruntime/core/framework/op_node_proto_helper.h index 700e1edc0cb7d..e7ac01947af41 100644 --- a/include/onnxruntime/core/framework/op_node_proto_helper.h +++ b/include/onnxruntime/core/framework/op_node_proto_helper.h @@ -10,20 +10,6 @@ #include "core/common/gsl.h" #endif -#ifdef __has_attribute -#define ORT_HAVE_ATTRIBUTE(x) __has_attribute(x) -#else -#define ORT_HAVE_ATTRIBUTE(x) 0 -#endif - -#if ORT_HAVE_ATTRIBUTE(nodiscard) -#define MUST_USE_RESULT [[nodiscard]] -#elif defined(__clang__) && ORT_HAVE_ATTRIBUTE(warn_unused_result) -#define MUST_USE_RESULT __attribute__((warn_unused_result)) -#else -#define MUST_USE_RESULT -#endif - class IMLOpKernel; namespace onnxruntime { @@ -43,14 +29,26 @@ class OpNodeProtoHelper { Call this function for a required attribute or when a default value for an optional attribute is specified in the op schema */ template - MUST_USE_RESULT Status GetAttr(const std::string& name, T* value) const; + Status GetAttr(const std::string& name, T* value) const; + + /** + Get a single attribute + Call this function for a required attribute or when a default value for an optional attribute is specified in the op schema + Throws if an attribute with the specified type doesn't exist + */ + template + [[nodiscard]] T GetAttr(const std::string& name) const { + T value; + ORT_THROW_IF_ERROR(GetAttr(name, &value)); + return value; + } /** Get a single attribute Call this function only when a default value for an optional attribute isn't specified in the op schema */ template - T GetAttrOrDefault(const std::string& name, const T& default_value) const { + [[nodiscard]] T GetAttrOrDefault(const std::string& name, const T& default_value) const { T tmp; return GetAttr(name, &tmp).IsOK() ? tmp : default_value; } @@ -70,7 +68,8 @@ class OpNodeProtoHelper { Call this function only when a default value for an optional attribute isn't specified in the op schema */ template - MUST_USE_RESULT std::vector GetAttrsOrDefault(const std::string& name, const std::vector& default_value = std::vector{}) const { + [[nodiscard]] std::vector GetAttrsOrDefault(const std::string& name, + const std::vector& default_value = {}) const { std::vector tmp; return GetAttrs(name, tmp).IsOK() ? tmp : default_value; } @@ -87,11 +86,12 @@ class OpNodeProtoHelper { /// Attribute data in a span, out parameter /// Status template - MUST_USE_RESULT Status GetAttrsAsSpan(const std::string& name, gsl::span& values) const; + Status GetAttrsAsSpan(const std::string& name, gsl::span& values) const; - MUST_USE_RESULT Status GetAttrs(const std::string& name, TensorShapeVector& out) const; + Status GetAttrs(const std::string& name, TensorShapeVector& out) const; - MUST_USE_RESULT TensorShapeVector GetAttrsOrDefault(const std::string& name, const TensorShapeVector& default_value = TensorShapeVector{}) const { + [[nodiscard]] TensorShapeVector GetAttrsOrDefault(const std::string& name, + const TensorShapeVector& default_value = {}) const { TensorShapeVector tmp; return GetAttrs(name, tmp).IsOK() ? tmp : default_value; } @@ -100,43 +100,43 @@ class OpNodeProtoHelper { Get repeated attributes */ template - MUST_USE_RESULT Status GetAttrs(const std::string& name, std::vector& values) const; + Status GetAttrs(const std::string& name, std::vector& values) const; template - MUST_USE_RESULT Status GetAttrs(const std::string& name, gsl::span values) const; + Status GetAttrs(const std::string& name, gsl::span values) const; - MUST_USE_RESULT Status GetAttrsStringRefs(const std::string& name, - std::vector>& refs) const; + Status GetAttrsStringRefs(const std::string& name, + std::vector>& refs) const; - uint32_t GetPrimitiveAttrElementCount(ONNX_NAMESPACE::AttributeProto_AttributeType type, - const std::string& name) const noexcept; + [[nodiscard]] uint32_t GetPrimitiveAttrElementCount(ONNX_NAMESPACE::AttributeProto_AttributeType type, + const std::string& name) const noexcept; - bool HasPrimitiveAttribute(ONNX_NAMESPACE::AttributeProto_AttributeType type, - const std::string& name) const noexcept; + [[nodiscard]] bool HasPrimitiveAttribute(ONNX_NAMESPACE::AttributeProto_AttributeType type, + const std::string& name) const noexcept; - uint32_t GetInputCount() const { + [[nodiscard]] uint32_t GetInputCount() const { return gsl::narrow_cast(impl_->getNumInputs()); } - uint32_t GetOutputCount() const { + [[nodiscard]] uint32_t GetOutputCount() const { return gsl::narrow_cast(impl_->getNumOutputs()); } - const ONNX_NAMESPACE::TypeProto* GetInputType(size_t index) const { + [[nodiscard]] const ONNX_NAMESPACE::TypeProto* GetInputType(size_t index) const { return impl_->getInputType(index); } - const ONNX_NAMESPACE::TypeProto* GetOutputType(size_t index) const { + [[nodiscard]] const ONNX_NAMESPACE::TypeProto* GetOutputType(size_t index) const { // Work around lack of a const method from the onnx InferenceContext interface return const_cast(impl_)->getOutputType(index); } // Try to query an attribute, returning nullptr if it doesn't exist - const ONNX_NAMESPACE::AttributeProto* TryGetAttribute(const std::string& name) const { + [[nodiscard]] const ONNX_NAMESPACE::AttributeProto* TryGetAttribute(const std::string& name) const { return impl_->getAttribute(name); } - const ONNX_NAMESPACE::AttributeProto* GetAttribute(const std::string& name) const { + [[nodiscard]] const ONNX_NAMESPACE::AttributeProto* GetAttribute(const std::string& name) const { const ONNX_NAMESPACE::AttributeProto* attr = TryGetAttribute(name); ORT_ENFORCE(attr != nullptr); return attr; diff --git a/onnxruntime/contrib_ops/cpu/quantization/matmul_nbits.cc b/onnxruntime/contrib_ops/cpu/quantization/matmul_nbits.cc index c72d811170a27..320a05bb97dac 100644 --- a/onnxruntime/contrib_ops/cpu/quantization/matmul_nbits.cc +++ b/onnxruntime/contrib_ops/cpu/quantization/matmul_nbits.cc @@ -1,35 +1,38 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +#include "core/common/narrow.h" #include "core/common/safeint.h" #include "core/framework/op_kernel.h" +#include "core/mlas/inc/mlas.h" +#include "core/mlas/inc/mlas_qnbit.h" +#include "core/mlas/inc/mlas_q4.h" #include "core/providers/cpu/math/matmul_helper.h" #include "core/providers/common.h" -#include "core/mlas/inc/mlas_q4.h" namespace onnxruntime { namespace contrib { class MatMulNBits final : public OpKernel { public: - MatMulNBits(const OpKernelInfo& info) : OpKernel(info) { - ORT_ENFORCE(Status::OK() == info.GetAttr("K", &K_)); - ORT_ENFORCE(Status::OK() == info.GetAttr("N", &N_)); - ORT_ENFORCE(Status::OK() == info.GetAttr("block_size", &block_size_)); - ORT_ENFORCE(Status::OK() == info.GetAttr("bits", &nbits_)); + MatMulNBits(const OpKernelInfo& info) + : OpKernel(info), + K_{narrow(info.GetAttr("K"))}, + N_{narrow(info.GetAttr("N"))}, + block_size_{narrow(info.GetAttr("block_size"))}, + nbits_{narrow(info.GetAttr("bits"))} { ORT_ENFORCE(nbits_ == 4, - "Only 4b quantization is supported for MatMulNBits op," - " additional bits support is planned."); + "Only 4b quantization is supported for MatMulNBits op, additional bits support is planned."); } Status Compute(OpKernelContext* context) const override; private: - int64_t K_; - int64_t N_; - int64_t block_size_; - int64_t nbits_; - bool column_wise_quant_{true}; + const size_t K_; + const size_t N_; + const size_t block_size_; + const size_t nbits_; + const bool column_wise_quant_{true}; }; Status MatMulNBits::Compute(OpKernelContext* ctx) const { @@ -45,11 +48,60 @@ Status MatMulNBits::Compute(OpKernelContext* ctx) const { const auto* scales_data = scales->Data(); const auto* zero_points_data = zero_points == nullptr ? nullptr : zero_points->Data(); + TensorShape b_shape({static_cast(N_), static_cast(K_)}); + + MatMulComputeHelper helper; + ORT_RETURN_IF_ERROR(helper.Compute(a->Shape(), b_shape, false, true)); + + Tensor* y = ctx->Output(0, helper.OutputShape()); + + // Bail out early if the output is going to be empty + if (y->Shape().Size() == 0) + return Status::OK(); + + auto* y_data = y->MutableData(); + + const size_t batch_count = helper.OutputOffsets().size(); + const size_t M = static_cast(helper.M()); + const size_t N = static_cast(helper.N()); + const size_t K = static_cast(helper.K()); + const size_t lda = helper.Lda(false); + + if (MlasIsSQNBitGemmAvailable(nbits_, block_size_)) { + // number of bytes or elements between adjacent matrices + size_t b_data_matrix_stride_in_bytes, b_scale_matrix_stride, b_zero_point_matrix_stride_in_bytes; + MlasBlockwiseQuantizedBufferSizes(static_cast(nbits_), static_cast(block_size_), /* columnwise */ true, + static_cast(K), static_cast(N), + b_data_matrix_stride_in_bytes, b_scale_matrix_stride, + &b_zero_point_matrix_stride_in_bytes); + + const size_t b_matrix_size = K * N; + + InlinedVector data(batch_count); + for (size_t i = 0; i < batch_count; ++i) { + const size_t b_matrix_offset = helper.RightOffsets()[i] / b_matrix_size; + + data[i].A = a_data + helper.LeftOffsets()[i]; + data[i].lda = lda; + data[i].QuantBData = b_data + b_matrix_offset * b_data_matrix_stride_in_bytes; + data[i].QuantBScale = scales_data + b_matrix_offset * b_scale_matrix_stride; + data[i].QuantBZeroPoint = zero_points_data != nullptr + ? zero_points_data + b_matrix_offset * b_zero_point_matrix_stride_in_bytes + : nullptr; + data[i].C = y_data + helper.OutputOffsets()[i]; + data[i].ldc = N; + } + + MlasSQNBitGemmBatch(M, N, K, batch_count, nbits_, block_size_, data.data(), thread_pool); + + return Status::OK(); + } + + const size_t ldb = helper.Ldb(true); + AllocatorPtr allocator; - auto status = ctx->GetTempSpaceAllocator(&allocator); - ORT_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(ctx->GetTempSpaceAllocator(&allocator)); auto tmp_b_data_ptr = IAllocator::MakeUniquePtr(allocator, SafeInt(K_) * N_); - // dequantize b, only 4b quantization is supported for now MlasDequantizeBlockwise( tmp_b_data_ptr.get(), // dequantized output @@ -67,29 +119,8 @@ Status MatMulNBits::Compute(OpKernelContext* ctx) const { MlasTranspose(tmp_b_data_ptr.get(), tm_b_data_ptr_trans.get(), N_, K_); #endif - TensorShape b_shape({N_, K_}); - - MatMulComputeHelper helper; - ORT_RETURN_IF_ERROR(helper.Compute(a->Shape(), b_shape, false, true)); - - Tensor* y = ctx->Output(0, helper.OutputShape()); - - // Bail out early if the output is going to be empty - if (y->Shape().Size() == 0) - return Status::OK(); - - auto* y_data = y->MutableData(); - - const size_t max_len = helper.OutputOffsets().size(); - const size_t M = static_cast(helper.M()); - const size_t N = static_cast(helper.N()); - const size_t K = static_cast(helper.K()); - const size_t lda = helper.Lda(false); - const size_t ldb = helper.Ldb(true); - - // TODO: implement with native kernel - std::vector data(max_len); - for (size_t i = 0; i < max_len; i++) { + std::vector data(batch_count); + for (size_t i = 0; i < batch_count; i++) { data[i].BIsPacked = false; data[i].A = a_data + helper.LeftOffsets()[i]; data[i].lda = lda; @@ -101,7 +132,7 @@ Status MatMulNBits::Compute(OpKernelContext* ctx) const { data[i].beta = 0.0f; } MlasGemmBatch(CblasNoTrans, CblasTrans, - M, N, K, data.data(), max_len, thread_pool); + M, N, K, data.data(), batch_count, thread_pool); return Status::OK(); } diff --git a/onnxruntime/core/common/cpuid_uarch.cc b/onnxruntime/core/common/cpuid_uarch.cc index 52baad739441b..16634b2bc8744 100644 --- a/onnxruntime/core/common/cpuid_uarch.cc +++ b/onnxruntime/core/common/cpuid_uarch.cc @@ -3,7 +3,8 @@ #include "core/common/cpuid_uarch.h" -#include "core/common/logging/logging.h" +#include // For std::cerr. + // Writing to stderr instead of logging because logger may not be initialized yet. namespace onnxruntime { @@ -137,7 +138,7 @@ void decodeMIDR( break; // #endif /* ARM */ default: - LOGS_DEFAULT(WARNING) << "unknown ARM CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown ARM CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } } break; @@ -156,7 +157,7 @@ void decodeMIDR( break; // #endif default: - LOGS_DEFAULT(WARNING) << "unknown Broadcom CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Broadcom CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; // #if (defined(_M_ARM64) || defined(__aarch64__)) && !defined(__ANDROID__) @@ -172,7 +173,7 @@ void decodeMIDR( *uarch = cpuinfo_uarch_thunderx2; break; default: - LOGS_DEFAULT(WARNING) << "unknown Cavium CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Cavium CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; // #endif @@ -187,7 +188,7 @@ void decodeMIDR( *uarch = cpuinfo_uarch_cortex_a76; break; default: - LOGS_DEFAULT(WARNING) << "unknown Huawei CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Huawei CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; // #if defined(_M_ARM) || defined(__arm__) @@ -199,7 +200,7 @@ void decodeMIDR( *uarch = cpuinfo_uarch_xscale; break; default: - LOGS_DEFAULT(WARNING) << "unknown Intel CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Intel CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; // #endif /* ARM */ @@ -215,7 +216,7 @@ void decodeMIDR( *uarch = cpuinfo_uarch_carmel; break; default: - LOGS_DEFAULT(WARNING) << "unknown Nvidia CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Nvidia CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; #if !defined(__ANDROID__) @@ -225,7 +226,7 @@ void decodeMIDR( *uarch = cpuinfo_uarch_xgene; break; default: - LOGS_DEFAULT(WARNING) << "unknown Applied Micro CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Applied Micro CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; #endif @@ -297,7 +298,7 @@ void decodeMIDR( break; // #endif /* ARM64 && !defined(__ANDROID__) */ default: - LOGS_DEFAULT(WARNING) << "unknown Qualcomm CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Qualcomm CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; case 'S': @@ -343,8 +344,9 @@ void decodeMIDR( *uarch = cpuinfo_uarch_exynos_m5; break; default: - LOGS_DEFAULT(WARNING) << "unknown Samsung CPU variant 0x" - << std::hex << midr_get_variant(midr) << " part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Samsung CPU variant 0x" + << std::hex << midr_get_variant(midr) << " part 0x" << std::hex << midr_get_part(midr) + << " ignored\n"; } break; // #if defined(_M_ARM) || defined(__arm__) @@ -355,12 +357,12 @@ void decodeMIDR( *uarch = cpuinfo_uarch_pj4; break; default: - LOGS_DEFAULT(WARNING) << "unknown Marvell CPU part 0x" << std::hex << midr_get_part(midr) << " ignored"; + std::cerr << "unknown Marvell CPU part 0x" << std::hex << midr_get_part(midr) << " ignored\n"; } break; // #endif /* ARM */ default: - LOGS_DEFAULT(WARNING) << "unknown CPU uarch from MIDR value: 0x" << std::hex << midr; + std::cerr << "unknown CPU uarch from MIDR value: 0x" << std::hex << midr << "\n"; } } diff --git a/onnxruntime/core/framework/op_node_proto_helper.cc b/onnxruntime/core/framework/op_node_proto_helper.cc index 38d67eb0e0c72..c3deb94300e78 100644 --- a/onnxruntime/core/framework/op_node_proto_helper.cc +++ b/onnxruntime/core/framework/op_node_proto_helper.cc @@ -182,7 +182,7 @@ ORT_DEFINE_GET_ATTRS_SPAN_SPECIALIZATION(float, floats) ORT_DEFINE_GET_ATTRS_SPAN_SPECIALIZATION(int64_t, ints) template -MUST_USE_RESULT Status OpNodeProtoHelper::GetAttrs(const std::string& name, TensorShapeVector& out) const { +Status OpNodeProtoHelper::GetAttrs(const std::string& name, TensorShapeVector& out) const { gsl::span span; Status status = this->GetAttrsAsSpan(name, span); if (status.IsOK()) { @@ -193,7 +193,7 @@ MUST_USE_RESULT Status OpNodeProtoHelper::GetAttrs(const std::string& na } template -MUST_USE_RESULT Status OpNodeProtoHelper::GetAttrsStringRefs( +Status OpNodeProtoHelper::GetAttrsStringRefs( const std::string& name, std::vector>& refs) const { const AttributeProto* attr = TryGetAttribute(name); diff --git a/onnxruntime/core/mlas/.clang-format b/onnxruntime/core/mlas/.clang-format index 4a89ef98cf049..16ad8bd8a7234 100644 --- a/onnxruntime/core/mlas/.clang-format +++ b/onnxruntime/core/mlas/.clang-format @@ -2,10 +2,12 @@ BasedOnStyle: Google IndentWidth: 4 -ColumnLimit: 100 +# Setting ColumnLimit to 0 so developer choices about where to break lines are maintained. +# Developers are responsible for adhering to the 120 character maximum. +ColumnLimit: 0 +AlignAfterOpenBracket: BlockIndent AlwaysBreakAfterReturnType: TopLevel AlwaysBreakTemplateDeclarations: Yes BinPackParameters: false BreakBeforeBraces: Linux ... - diff --git a/onnxruntime/core/mlas/inc/mlas_gemm_postprocessor.h b/onnxruntime/core/mlas/inc/mlas_gemm_postprocessor.h new file mode 100644 index 0000000000000..7ea29eb091318 --- /dev/null +++ b/onnxruntime/core/mlas/inc/mlas_gemm_postprocessor.h @@ -0,0 +1,33 @@ +/*++ + +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the MIT License. + +Module Name: + + mlas_gemm_postprocessor.h + +Abstract: + + This module contains a base class for custom postprocessing following a + GEMM. + +--*/ + +#pragma once + +template +class MLAS_GEMM_POSTPROCESSOR +{ + public: + virtual void Process(T* C, /**< the address of matrix to process */ + size_t RangeStartM, /**< the start row index of matrix */ + size_t RangeStartN, /**< the start col index of matrix */ + size_t RangeCountM, /**< the element count per row to process */ + size_t RangeCountN, /**< the element count per col to process */ + size_t ldc /**< the leading dimension of matrix */ + ) const = 0; + + virtual ~MLAS_GEMM_POSTPROCESSOR() {} +}; diff --git a/onnxruntime/core/mlas/inc/mlas_q4.h b/onnxruntime/core/mlas/inc/mlas_q4.h index 7c7b729117e4a..316344ad8c214 100644 --- a/onnxruntime/core/mlas/inc/mlas_q4.h +++ b/onnxruntime/core/mlas/inc/mlas_q4.h @@ -21,6 +21,7 @@ Module Name: #pragma once #include "mlas.h" +#include "mlas_gemm_postprocessor.h" #include #include @@ -95,22 +96,6 @@ MlasQ4GemmUnPackB( ); -template -class MLAS_GEMM_POSTPROCESSOR -{ - public: - virtual void Process(T*, /**< the address of matrix to process */ - size_t, /**< the start row index of matrix */ - size_t, /**< the start col index of matrix */ - size_t, /**< the element count per row to process */ - size_t, /**< the element count per col to process */ - size_t /**< the leading dimension of matrix */ - ) const = 0; - - virtual ~MLAS_GEMM_POSTPROCESSOR() {} -}; - - /** * @brief Data parameters for Q4 GEMM routine * C = A * B + Bias @@ -241,7 +226,7 @@ MlasQ8Q4GemmBatch( * matrix shape [rows, columns], compute the shape of the * quantization parameter matrix [meta_rows, meta_cols] */ -template +template void MlasBlockwiseQuantMetaShape( int block_size, @@ -259,6 +244,7 @@ MlasBlockwiseQuantMetaShape( * is in column major layout, with bits packed on the column. * * @tparam T + * @tparam qbits * @param block_size * @param columnwise * @param rows @@ -266,7 +252,7 @@ MlasBlockwiseQuantMetaShape( * @param q_rows * @param q_cols */ -template +template void MlasBlockwiseQuantizedShape( int block_size, @@ -277,6 +263,32 @@ MlasBlockwiseQuantizedShape( int& q_cols ); +/** + * @brief Compute the sizes of the quantized data and quantization parameter buffers. + * + * @param qbits The bit width of each quantized value. + * @param block_size The number of quantized values in a block. + * @param columnwise Whether a block contains values from a matrix column (true) or row (false). + * @param rows Number of matrix rows. + * @param columns Number of matrix columns. + * @param[out] q_data_size_in_bytes The size in bytes of the quantized data. + * @param[out] q_scale_num_elements The size in elements of the scale quantization parameters. + * @param[out] q_zero_point_size_in_bytes The size in bytes of the zero point quantization parameters. Optional. + * + * If the qbits or block_size values are unsupported the output sizes will be zero. + */ +void MLASCALL +MlasBlockwiseQuantizedBufferSizes( + int qbits, + int block_size, + bool columnwise, + int rows, + int columns, + size_t& q_data_size_in_bytes, + size_t& q_scale_num_elements, + size_t* q_zero_point_size_in_bytes +); + /** * @brief Blockwise 4 bits quantization, resulting elements and quantization diff --git a/onnxruntime/core/mlas/inc/mlas_qnbit.h b/onnxruntime/core/mlas/inc/mlas_qnbit.h new file mode 100644 index 0000000000000..9620dd42d1da9 --- /dev/null +++ b/onnxruntime/core/mlas/inc/mlas_qnbit.h @@ -0,0 +1,79 @@ +/*++ + +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the MIT License. + +Module Name: + + mlas_qnbit.h + +Abstract: + + This module contains the public data structures and procedure prototypes + for blocked n-bit quantized GEMM. + + N-bit block quantization is used to compress weight tensors of large + language models. + +--*/ + +#pragma once + +#include "mlas.h" +#include "mlas_gemm_postprocessor.h" + +/** + * @brief Data parameters for float/n-bit quantized int GEMM routine. + */ +struct MLAS_SQNBIT_GEMM_DATA_PARAMS { + const float* A = nullptr; ///< address of A (float32 matrix) + size_t lda = 0; ///< leading dimension of A + const void* QuantBData = nullptr; ///< address of quantized B (quantized n-bit int values) + const float* QuantBScale = nullptr; ///< address of scale values of quantized B, one per block + const void* QuantBZeroPoint = nullptr; ///< optional address of zero point values of quantized B, one per block + bool IsBPacked = false; ///< whether B values are packed in an optimized format for the computation + const float* Bias = nullptr; ///< optional address of Bias, vector size N + float* C = nullptr; ///< address of result matrix + size_t ldc = 0; ///< leading dimension of C + + ///< optional post processing to apply to result matrix + MLAS_GEMM_POSTPROCESSOR* PostProcessor = nullptr; +}; + +/** + * @brief Batched GEMM: C = A * B + Bias + * A must be a float32 matrix + * B must be a quantized and packed n-bit int matrix + * + * @param[in] M row size of matrix A and C + * @param[in] N column size of matrix B and C + * @param[in] K column size of matrix A and row size of matrix B + * @param[in] BatchN number of batches + * @param[in] BlkBitWidth quantized value bit width (e.g., 4 means 4 bit ints) + * @param[in] BlkLen number of quantized values per block + * @param[inout] DataParams An array (size BatchN) of parameter blocks + * @param[in] ThreadPool optional thread pool to use + */ +void MLASCALL +MlasSQNBitGemmBatch( + size_t M, + size_t N, + size_t K, + size_t BatchN, + size_t BlkBitWidth, + size_t BlkLen, + const MLAS_SQNBIT_GEMM_DATA_PARAMS* DataParams, + MLAS_THREADPOOL* ThreadPool = nullptr +); + +/** + * @brief Determines whether a float32/quantized n-bit int GEMM implementation is available on the current platform. + * @param[in] BlkBitWidth quantized value bit width (e.g., 4 means 4 bit ints) + * @param[in] BlkLen number of quantized values per block + */ +bool MLASCALL +MlasIsSQNBitGemmAvailable( + size_t BlkBitWidth, + size_t BlkLen +); diff --git a/onnxruntime/core/mlas/lib/mlasi.h b/onnxruntime/core/mlas/lib/mlasi.h index e0c2772cbb719..6c859e4e4f44b 100644 --- a/onnxruntime/core/mlas/lib/mlasi.h +++ b/onnxruntime/core/mlas/lib/mlasi.h @@ -890,14 +890,30 @@ extern const MLAS_CONV_SYM_DISPATCH MlasConvSymS8DispatchNeon; extern const MLAS_CONV_SYM_DISPATCH MlasConvSymU8DispatchDot; extern const MLAS_CONV_SYM_DISPATCH MlasConvSymS8DispatchDot; +// +// Quantized 8-bit integer/quantized 4-bit integer matrix/matrix multiply dispatch structure. +// + struct MLAS_Q8Q4GEMM_DISPATCH; extern const MLAS_Q8Q4GEMM_DISPATCH MlasQ8Q4GemmDispatchAvx512vnni; +// +// Float/quantized 4-bit integer matrix/matrix multiply dispatch structure. +// + struct MLAS_FPQ4GEMM_DISPATCH; extern const MLAS_FPQ4GEMM_DISPATCH MlasFpQ4GemmDispatchAvx512; +// +// Float/quantized n-bit integer matrix/matrix multiply dispatch structure. +// + +struct MLAS_SQNBIT_GEMM_DISPATCH; + +extern const MLAS_SQNBIT_GEMM_DISPATCH MlasSQNBitGemmDispatchNeon; + // // Quantized depthwise convolution kernels. // @@ -1029,6 +1045,8 @@ struct MLAS_PLATFORM { const MLAS_FPQ4GEMM_DISPATCH* FpQ4GemmDispatch{nullptr}; const MLAS_Q8Q4GEMM_DISPATCH* Q8Q4GemmDispatch{nullptr}; + + const MLAS_SQNBIT_GEMM_DISPATCH* SQNBitGemmDispatch{nullptr}; }; inline diff --git a/onnxruntime/core/mlas/lib/platform.cpp b/onnxruntime/core/mlas/lib/platform.cpp index 39586282e00ad..fec56c6ee063f 100644 --- a/onnxruntime/core/mlas/lib/platform.cpp +++ b/onnxruntime/core/mlas/lib/platform.cpp @@ -460,6 +460,7 @@ Return Value: this->SymmQgemmDispatch = &MlasSymmQgemmS8DispatchNeon; this->ConvSymU8S8Dispatch = &MlasConvSymU8DispatchNeon; this->ConvSymS8S8Dispatch = &MlasConvSymS8DispatchNeon; + this->SQNBitGemmDispatch = &MlasSQNBitGemmDispatchNeon; // // Check if the processor supports ASIMD dot product instructions. diff --git a/onnxruntime/core/mlas/lib/q4_dq.cpp b/onnxruntime/core/mlas/lib/q4_dq.cpp index fbd1030de8ab7..48d975a7fd26d 100644 --- a/onnxruntime/core/mlas/lib/q4_dq.cpp +++ b/onnxruntime/core/mlas/lib/q4_dq.cpp @@ -422,6 +422,24 @@ struct BlockwiseQuantizer { q_cols = meta_cols * QuantBlk::kColumn; } + static MLAS_FORCEINLINE void quantizedBufferSizes( + int rows, int columns, size_t& data_bytes, size_t& scale_num_elements, size_t* zero_point_bytes + ) + { + int meta_rows, meta_cols; + quantizeMetaShape(rows, columns, meta_rows, meta_cols); + int q_rows, q_cols; + quantizedShape(rows, columns, q_rows, q_cols); + + data_bytes = q_rows * q_cols; + scale_num_elements = meta_rows * meta_cols; + + if (zero_point_bytes) { + // this works for qbits == 4 but may need to be updated for other qbits values + *zero_point_bytes = ((meta_rows * qbits + 7) / 8) * meta_cols; + } + } + /** * @brief Quantized a Matrix shape [rows, columns], resulting quantized * and packed data are stored in column major (transposed) @@ -621,7 +639,7 @@ struct BlockwiseQuantizer { }; -template +template void MlasBlockwiseQuantMetaShape( int block_size, @@ -635,47 +653,47 @@ MlasBlockwiseQuantMetaShape( switch (block_size) { case 16: { if (columnwise) { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } else { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } break; } case 32: { if (columnwise) { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } else { - BlockwiseQuantizer::quantizeMetaShape( + BlockwiseQuantizer::quantizeMetaShape( rows, columns, meta_rows, meta_cols); } break; } case 64: { if (columnwise) { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } else { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } break; } case 128: { if (columnwise) { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } else { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } break; } case 256: { if (columnwise) { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } else { - BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, + BlockwiseQuantizer::quantizeMetaShape(rows, columns, meta_rows, meta_cols); } break; @@ -689,7 +707,7 @@ MlasBlockwiseQuantMetaShape( -template +template void MlasBlockwiseQuantizedShape( int block_size, @@ -703,42 +721,42 @@ MlasBlockwiseQuantizedShape( switch (block_size) { case 16: { if (columnwise) { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } else { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } break; } case 32: { if (columnwise) { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } else { - BlockwiseQuantizer::quantizedShape( + BlockwiseQuantizer::quantizedShape( rows, columns, q_rows, q_cols); } break; } case 64: { if (columnwise) { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } else { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } break; } case 128: { if (columnwise) { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } else { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } break; } case 256: { if (columnwise) { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } else { - BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); + BlockwiseQuantizer::quantizedShape(rows, columns, q_rows, q_cols); } break; } @@ -752,7 +770,7 @@ MlasBlockwiseQuantizedShape( template void -MlasBlockwiseQuantMetaShape( +MlasBlockwiseQuantMetaShape( int block_size, bool columnwise, int rows, @@ -763,7 +781,7 @@ MlasBlockwiseQuantMetaShape( template void -MlasBlockwiseQuantizedShape( +MlasBlockwiseQuantizedShape( int block_size, bool columnwise, int rows, @@ -773,6 +791,93 @@ MlasBlockwiseQuantizedShape( ); +void MLASCALL +MlasBlockwiseQuantizedBufferSizes( + int qbits, + int block_size, + bool columnwise, + int rows, + int columns, + size_t& q_data_size_in_bytes, + size_t& q_scale_num_elements, + size_t* q_zero_point_size_in_bytes +) +{ + q_data_size_in_bytes = q_scale_num_elements = 0; + if (q_zero_point_size_in_bytes) { + *q_zero_point_size_in_bytes = 0; + } + + if (qbits == 4) { + switch (block_size) { + case 16: + if (columnwise) { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } else { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } + break; + + case 32: + if (columnwise) { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } else { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } + break; + + case 64: + if (columnwise) { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } else { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } + break; + + case 128: + if (columnwise) { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } else { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } + break; + + case 256: + if (columnwise) { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } else { + BlockwiseQuantizer::quantizedBufferSizes( + rows, columns, q_data_size_in_bytes, q_scale_num_elements, q_zero_point_size_in_bytes + ); + } + break; + + default: + // Only block size 16, 32, 64, 128, 256 are supported. + break; + } + } +} + + template void MlasQuantizeBlockwise( diff --git a/onnxruntime/core/mlas/lib/q4gemm.h b/onnxruntime/core/mlas/lib/q4gemm.h index 1562f9c0b4236..b1b51dd53c4fc 100644 --- a/onnxruntime/core/mlas/lib/q4gemm.h +++ b/onnxruntime/core/mlas/lib/q4gemm.h @@ -90,7 +90,7 @@ MlasQ4GemmOperation( if (DataParams->OutputProcessor != nullptr) { DataParams->OutputProcessor->Process( - DataParams->C, RangeStartM + RangeCountM - RowsRemaining, RangeStartN, + DataParams->C, RangeStartM + RangeCountM - RowsRemaining, RangeStartN + n, RowsHandled, CountN, ldc); } diff --git a/onnxruntime/core/mlas/lib/sqnbitgemm.cpp b/onnxruntime/core/mlas/lib/sqnbitgemm.cpp new file mode 100644 index 0000000000000..f964b1affec31 --- /dev/null +++ b/onnxruntime/core/mlas/lib/sqnbitgemm.cpp @@ -0,0 +1,144 @@ +/*++ + +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the MIT License. + +Module Name: + + sqnbitgemm.cpp + +Abstract: + + This module implements the float/quantized n-bit integer matrix + multiplication hardware agnostic entrypoint, MlasSQNBitGemmBatch. +--*/ + +#include "sqnbitgemm.h" + +namespace +{ + +// Get quantization variant based on `BlkBitWidth` and `BlkLen`. +// Return -1 if the input values are unsupported. +int32_t +GetDispatchQuantVariant(size_t BlkBitWidth, size_t BlkLen) +{ + int32_t type = -1; + if (BlkBitWidth == 4 && BlkLen == 16) { + type = QuantVariant_BitWidth4_BlockSize16; + } else if (BlkBitWidth == 4 && BlkLen == 32) { + type = QuantVariant_BitWidth4_BlockSize32; + } else if (BlkBitWidth == 4 && BlkLen == 64) { + type = QuantVariant_BitWidth4_BlockSize64; + } else if (BlkBitWidth == 4 && BlkLen == 128) { + type = QuantVariant_BitWidth4_BlockSize128; + } else if (BlkBitWidth == 4 && BlkLen == 256) { + type = QuantVariant_BitWidth4_BlockSize256; + } + + return type; +} + +} // namespace + +void MLASCALL +MlasSQNBitGemmBatch( + const size_t M, + const size_t N, + const size_t K, + const size_t BatchN, + const size_t BlkBitWidth, + const size_t BlkLen, + const MLAS_SQNBIT_GEMM_DATA_PARAMS* DataParams, + MLAS_THREADPOOL* ThreadPool +) +{ + const int32_t QuantVariant = GetDispatchQuantVariant(BlkBitWidth, BlkLen); + MLAS_SQNBIT_GEMM_OPERATION* const Operation = GetMlasPlatform().SQNBitGemmDispatch->Operations[QuantVariant]; + + if (ThreadPool == nullptr) { + for (size_t gemm_i = 0; gemm_i < BatchN; gemm_i++) { + auto Data = &DataParams[gemm_i]; + Operation(K, Data, 0, M, 0, N); + } + return; + } + + // + // Compute the number of target threads given the complexity of the SGEMM + // operation. Small requests should run using the single threaded path. + // + + const double Complexity = double(M) * double(N) * double(K) * double(BatchN); + + ptrdiff_t TargetThreadCount = ptrdiff_t(Complexity / double(MLAS_QGEMM_THREAD_COMPLEXITY)) + 1; + + ptrdiff_t MaximumThreadCount = MlasGetMaximumThreadCount(ThreadPool) * 8; + + if (TargetThreadCount >= MaximumThreadCount) { + TargetThreadCount = MaximumThreadCount; + } + + ptrdiff_t ThreadsPerGemm = TargetThreadCount / BatchN; + if (ThreadsPerGemm < 1) { + ThreadsPerGemm = 1; + } + + constexpr size_t StrideM = 128; + + size_t nc = N; + if (ThreadsPerGemm > 1) { + // more than one thread per GEMM + + const size_t BlockedM = MlasDivRoundup(M, StrideM); + const size_t max_nc = MlasDivRoundup(N * BlockedM, ThreadsPerGemm); + if (max_nc < nc) { + nc = std::min( + nc, MlasDivRoundup(max_nc, MLAS_QGEMM_STRIDEN_THREAD_ALIGN) * + MLAS_QGEMM_STRIDEN_THREAD_ALIGN + ); + } + } + const size_t StrideN = nc; + + const size_t ThreadCountM = MlasDivRoundup(M, StrideM); + const size_t ThreadCountN = MlasDivRoundup(N, StrideN); + ThreadsPerGemm = ThreadCountM * ThreadCountN; + + MlasTrySimpleParallel(ThreadPool, ThreadsPerGemm * BatchN, [&](ptrdiff_t tid) { + const auto gemm_i = tid / ThreadsPerGemm; + const auto blk_i = tid % ThreadsPerGemm; + auto Data = &DataParams[gemm_i]; + + const ptrdiff_t ThreadIdN = blk_i / ThreadCountM; + const ptrdiff_t ThreadIdM = blk_i % ThreadCountM; + + const size_t RangeStartM = ThreadIdM * StrideM; + const size_t RangeCountM = std::min(M - RangeStartM, (size_t)StrideM); + + const size_t RangeStartN = ThreadIdN * StrideN; + const size_t RangeCountN = std::min(N - RangeStartN, (size_t)StrideN); + + Operation(K, Data, RangeStartM, RangeCountM, RangeStartN, RangeCountN); + }); +} + +bool MLASCALL +MlasIsSQNBitGemmAvailable( + size_t BlkBitWidth, + size_t BlkLen +) +{ + const int32_t QuantVariant = GetDispatchQuantVariant(BlkBitWidth, BlkLen); + if (QuantVariant == -1) { + return false; + } + + if (GetMlasPlatform().SQNBitGemmDispatch == nullptr || + GetMlasPlatform().SQNBitGemmDispatch->Operations[QuantVariant] == nullptr) { + return false; + } + + return true; +} diff --git a/onnxruntime/core/mlas/lib/sqnbitgemm.h b/onnxruntime/core/mlas/lib/sqnbitgemm.h new file mode 100644 index 0000000000000..f8f7dcd43699f --- /dev/null +++ b/onnxruntime/core/mlas/lib/sqnbitgemm.h @@ -0,0 +1,287 @@ +/*++ + +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the MIT License. + +Module Name: + + sqnbitgemm.h + +Abstract: + + This module includes: + + - Declaration of the set of template functions used to implement a kernel + for a matrix/matrix multiplication, A*B, where A is a float matrix and B is + a n-bit quantized integer matrix (QNBitGemm). + + - A shared kernel driver function template, MlasSQNBitGemmOperation. + + - Kernel dispatch structure. + + The B matrix is block quantized, which means that its values are grouped + into blocks which each have one scale and optional zero point. Each + quantized value in B is n-bits wide. + +--*/ + +#pragma once + +#include "mlas_qnbit.h" +#include "mlasi.h" + +// +// Kernel implementation template declarations +// + +/** + * @brief Multiply float matrix A with quantized n-bit integer matrix B. + * B is block quantized and column major. + * This kernel handles the special case where M, the number of rows of A and C, is 1. + * + * @tparam BlkBitWidth Bit width of each value in a block. + * @tparam BlkLen Number of values in a block. + * @tparam KernelType Hardware-specific kernel type. + * + * @param A Supplies the A matrix. + * @param QuantBData Supplies the quantized B matrix block data. + * @param QuantBScale Supplies the quantized B matrix block scale values. + * @param QuantBZeroPoint Supplies the quantized B matrix block zero point values. Optional. + * @param[out] C Supplies the output C matrix. + * @param CountN Number of columns of B and C. + * @param CountK Number of columns of A and rows of B. + * @param BlockStrideQuantB Number of blocks between adjacent columns of the quantized B matrix. + * @param Bias Bias vector of length N. + */ +template +MLAS_FORCEINLINE void +MlasSQNBitGemmM1Kernel( + const float* A, + const uint8_t* QuantBData, + const float* QuantBScale, + const uint8_t* QuantBZeroPoint, + float* C, + size_t CountN, + size_t CountK, + size_t BlockStrideQuantB, + const float* Bias +); + +/** + * @brief Dequantize B into the format expected by the Sgemm kernel. + * B is block quantized and column major. + * This is equivalent to dequantizing B and then running + * MlasSgemmCopyPackB. + * + * @tparam BlkBitWidth Bit width of each value in a block. + * @tparam BlkLen Number of values in a block. + * @tparam KernelType Hardware-specific kernel type. + * + * @param[out] FpData Supplies the output buffer for the dequantized B float data. + * @param QuantBData Supplies the quantized B matrix block data. + * @param QuantBScale Supplies the quantized B matrix block scale values. + * @param QuantBZeroPoint Supplies the quantized B matrix block zero point values. Optional. + * @param CountN Number of columns of B. + * @param CountK Number of rows of B. + * @param BlockStrideQuantB Number of blocks between adjacent columns of the quantized B matrix. + */ +template +MLAS_FORCEINLINE void +MlasQNBitBlkDequantBForSgemm( + float* FpData, + const uint8_t* QuantBData, + const float* QuantBScale, + const uint8_t* QuantBZeroPoint, + size_t CountN, + size_t CountK, + size_t BlockStrideQuantB +); + +// +// MlasQNBitGemmOperation and helpers +// + +constexpr MLAS_FORCEINLINE size_t +MlasQNBitBlkDataSizeInBytes(size_t BlkBitWidth, size_t BlkLen) +{ + return BlkLen * BlkBitWidth / 8; +} + +template +constexpr MLAS_FORCEINLINE size_t +MlasQNBitZeroPointsForBlksSizeInBytes(size_t BlkCount) +{ + if constexpr (BlkBitWidth <= 4) { + return MlasDivRoundup(BlkCount, 2); // 2 blocks per byte + } else { + return BlkCount; + } +} + +MLAS_FORCEINLINE void +MlasAddBiasForGemm(const float* Bias, float* C, size_t CountM, size_t CountN, size_t ldc) +{ + for (size_t m = 0; m < CountM; m++) { + const float* bias = Bias; + float* sum = C; + for (size_t n = 0; n < CountN; n += 4) { + if (CountN - n < 4) { + for (size_t nn = n; nn < CountN; nn++) { + *sum += *bias; + sum++; + bias++; + } + break; + } + + MLAS_FLOAT32X4 acc_x = MlasLoadFloat32x4(sum); + acc_x = MlasAddFloat32x4(acc_x, MlasLoadFloat32x4(bias)); + MlasStoreFloat32x4(sum, acc_x); + bias += 4; + sum += 4; + } + C += ldc; + } +} + +template +MLAS_FORCEINLINE void MLASCALL +MlasSQNBitGemmOperation( + const size_t K, + const MLAS_SQNBIT_GEMM_DATA_PARAMS* const DataParams, + const size_t RangeStartM, + const size_t RangeCountM, + const size_t RangeStartN, + const size_t RangeCountN +) +{ + const size_t lda = DataParams->lda; + const size_t ldc = DataParams->ldc; + + const size_t k_blks = MlasDivRoundup(K, BlkLen); + const size_t ldb = k_blks * MlasQNBitBlkDataSizeInBytes(BlkBitWidth, BlkLen); + const size_t k_blks_zp_bytes = MlasQNBitZeroPointsForBlksSizeInBytes(k_blks); + + const float* A = DataParams->A + RangeStartM * lda; + + const uint8_t* QuantBData = static_cast(DataParams->QuantBData) + RangeStartN * ldb; + const float* QuantBScale = DataParams->QuantBScale + RangeStartN * k_blks; + const uint8_t* QuantBZeroPoint = + (DataParams->QuantBZeroPoint == nullptr) + ? nullptr + : static_cast(DataParams->QuantBZeroPoint) + RangeStartN * k_blks_zp_bytes; + + float* C = DataParams->C + RangeStartM * ldc + RangeStartN; + + const float* Bias = (DataParams->Bias == nullptr) ? nullptr : DataParams->Bias + RangeStartN; + + if (RangeCountM == 1) { + size_t CountN; + for (size_t n = 0; n < RangeCountN; n += CountN) { + CountN = std::min(RangeCountN - n, size_t{128}); + + const float* a_row = A; + const uint8_t* b_col = QuantBData + n * ldb; + const float* b_col_scale = QuantBScale + n * k_blks; + const uint8_t* b_col_zp = + (QuantBZeroPoint == nullptr) ? nullptr : QuantBZeroPoint + n * k_blks_zp_bytes; + float* c_blk = C + n; + const float* bias = (Bias == nullptr) ? nullptr : Bias + n; + + MlasSQNBitGemmM1Kernel( + a_row, b_col, b_col_scale, b_col_zp, c_blk, CountN, K, k_blks, bias + ); + + if (DataParams->PostProcessor != nullptr) { + DataParams->PostProcessor->Process( + DataParams->C, RangeStartM, RangeStartN + n, + RangeCountM, CountN, ldc + ); + } + } + return; + } + + constexpr size_t StrideN = 32; + size_t bufsize = k_blks * BlkLen * StrideN * sizeof(float); + MlasThreadedBufAlloc(bufsize); + auto* dequant_b = reinterpret_cast(ThreadedBufHolder.get()); + // + // Step through each slice of matrix B along the N dimension. + // + + size_t CountN; + for (size_t n = 0; n < RangeCountN; n += CountN) { + CountN = std::min(RangeCountN - n, StrideN); + + // + // Step through each slice of matrix A along the M dimension. + // + const float* a_row = A; + const uint8_t* b_col = QuantBData + n * ldb; + const float* b_col_scale = QuantBScale + n * k_blks; + const uint8_t* b_col_zp = + (QuantBZeroPoint == nullptr) ? nullptr : QuantBZeroPoint + n * k_blks_zp_bytes; + float* c_blk = C + n; + const float* bias = (Bias == nullptr) ? nullptr : Bias + n; + + MlasQNBitBlkDequantBForSgemm( + dequant_b, b_col, b_col_scale, b_col_zp, CountN, K, k_blks + ); + + size_t RowsRemaining = RangeCountM; + while (RowsRemaining > 0) { +#if defined(MLAS_TARGET_AMD64_IX86) || defined(MLAS_TARGET_POWER) + auto RowsHandled = GetMlasPlatform().GemmFloatKernel( + a_row, dequant_b, c_blk, K, RowsRemaining, CountN, lda, ldc, 1.f, true + ); +#else + auto RowsHandled = MlasSgemmKernelZero(a_row, dequant_b, c_blk, K, RowsRemaining, CountN, lda, ldc, 1.f); +#endif + + if (bias) { + MlasAddBiasForGemm(bias, c_blk, RowsHandled, CountN, ldc); + } + if (DataParams->PostProcessor != nullptr) { + DataParams->PostProcessor->Process( + DataParams->C, RangeStartM + RangeCountM - RowsRemaining, RangeStartN, + RowsHandled, CountN, ldc + ); + } + + c_blk += ldc * RowsHandled; + a_row += lda * RowsHandled; + RowsRemaining -= RowsHandled; + } + } +} + +// +// Kernel dispatch structure. +// + +typedef void(MLASCALL MLAS_SQNBIT_GEMM_OPERATION)( + size_t K, + const MLAS_SQNBIT_GEMM_DATA_PARAMS* DataParams, + size_t RangeStartM, + size_t RangeCountM, + size_t RangeStartN, + size_t RangeCountN +); + +enum QuantVariant { + QuantVariant_BitWidth4_BlockSize16, + QuantVariant_BitWidth4_BlockSize32, + QuantVariant_BitWidth4_BlockSize64, + QuantVariant_BitWidth4_BlockSize128, + QuantVariant_BitWidth4_BlockSize256, + QuantVariantCount, // Keep this element last and ensure that its value is the number of other QuantVariant values. + // Its value is used as an array size. +}; + +struct MLAS_SQNBIT_GEMM_DISPATCH { + MLAS_SQNBIT_GEMM_OPERATION* Operations[QuantVariantCount] = { + // Initialized to nullptrs. Overwrite in hardware-specific kernel implementation. + }; +}; diff --git a/onnxruntime/core/mlas/lib/sqnbitgemm_kernel_neon.cpp b/onnxruntime/core/mlas/lib/sqnbitgemm_kernel_neon.cpp new file mode 100644 index 0000000000000..63afe57dd9137 --- /dev/null +++ b/onnxruntime/core/mlas/lib/sqnbitgemm_kernel_neon.cpp @@ -0,0 +1,489 @@ +/*++ + +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the MIT License. + +Module Name: + + sqnbitgemm_kernel_neon.h + +Abstract: + + This module implements the float/quantized n-bit integer matrix + multiplication kernels for ARM NEON. + +--*/ + +#include "sqnbitgemm.h" + +#include + +#include +#include +#include + +// +// Hardware-specific kernel type. +// +struct MLAS_SQNBIT_GEMM_KERNEL_NEON { +}; + +namespace +{ + +template +MLAS_FORCEINLINE void +UnrolledLoopIterations(IterationFn&& f, std::index_sequence /* indices */) +{ + (f(Indices), ...); +} + +template +MLAS_FORCEINLINE void +UnrolledLoop(IterationFn&& f) +{ + UnrolledLoopIterations(std::forward(f), std::make_index_sequence()); +} + +MLAS_FORCEINLINE float32x4_t +FoldAccumulators(float32x4_t a0, float32x4_t a1, float32x4_t a2, float32x4_t a3) +{ + // aN: aN_0 aN_1 aN_2 aN_3 + + float32x4_t b0 = vzip1q_f32(a0, a1); // a0_0 a1_0 a0_1 a1_1 + float32x4_t b1 = vzip2q_f32(a0, a1); // a0_2 a1_2 a0_3 a1_3 + float32x4_t b2 = vzip1q_f32(a2, a3); // a2_0 a3_0 a2_1 a3_1 + float32x4_t b3 = vzip2q_f32(a2, a3); // a2_2 a3_2 a2_3 a3_3 + + // a0_0 a1_0 a2_0 a3_0 + a0 = vreinterpretq_f32_f64(vzip1q_f64(vreinterpretq_f64_f32(b0), vreinterpretq_f64_f32(b2))); + // a0_1 a1_1 a2_1 a3_1 + a1 = vreinterpretq_f32_f64(vzip2q_f64(vreinterpretq_f64_f32(b0), vreinterpretq_f64_f32(b2))); + // a0_2 a1_2 a3_2 a3_2 + a2 = vreinterpretq_f32_f64(vzip1q_f64(vreinterpretq_f64_f32(b1), vreinterpretq_f64_f32(b3))); + // a0_3 a1_3 a2_3 a3_3 + a3 = vreinterpretq_f32_f64(vzip2q_f64(vreinterpretq_f64_f32(b1), vreinterpretq_f64_f32(b3))); + + return vaddq_f32(vaddq_f32(a0, a1), vaddq_f32(a2, a3)); +} + +template +MLAS_FORCEINLINE void +LoadData(const float* src, size_t count, float32x4_t (& dst)[Capacity / 4]) +{ + static_assert(Capacity % 4 == 0, "Capacity must be divisible by 4."); + + assert(count <= Capacity); + + size_t vi = 0; // vector index + + // handle 4 values at a time + while (count > 3) { + dst[vi] = vld1q_f32(src); + + vi += 1; + src += 4; + count -= 4; + } + + // handle remaining values + if (count > 0) { + dst[vi] = vsetq_lane_f32(src[0], dst[vi], 0); + + if (count > 1) { + dst[vi] = vsetq_lane_f32(src[1], dst[vi], 1); + + if (count > 2) { + dst[vi] = vsetq_lane_f32(src[2], dst[vi], 2); + } + } + } +} + +template +MLAS_FORCEINLINE void +ComputeDotProducts( + const float* ARowPtr, + const uint8_t* QuantBDataColPtr, + const float* QuantBScaleColPtr, + const uint8_t* QuantBZeroPointColPtr, + float* SumPtr, + size_t CountK, + size_t StrideQuantBData, + size_t StrideQuantBScale, + size_t StrideQuantBZeroPoint, + const float* BiasPtr +) +{ + static_assert(NCols == 1 || NCols == 4, "NCols must be 1 or 4"); + + const uint8x8_t LowMask = vdup_n_u8(0x0F); + + // Manual conversion to float takes place in two steps: + // 1. Map 4-bit values from [0, 15] to float values from [16.0f, 31.0f]. + // This target float range is convenient because the 4-bit source values can be placed directly into the + // target float bits. + // 2. Subtract the conversion offset of 16 from the float result. + + // The high 16 bits of an IEEE 754 32-bit float used as a template for creating float values. + constexpr uint16_t float_high_half_template = 0b0'10000011'0000000; + // sign|exponent|partial mantissa + // +|131: 2^4|~~~~ <- 4 bits go here + + const uint16x8_t float_high_half_template_v = vdupq_n_u16(float_high_half_template); + + float32x4_t acc[NCols]{}; + + const uint8_t* QuantBData = QuantBDataColPtr; + const float* QuantBScale = QuantBScaleColPtr; + size_t QuantBZeroPointIdx = 0; // track half byte increments with this index instead of a pointer + + for (size_t k = 0; k < CountK; k += BlkLen) { + const size_t k_blk_len = std::min(CountK - k, BlkLen); + + float scale[NCols]; + UnrolledLoop( + [&](size_t i) { scale[i] = QuantBScale[i * StrideQuantBScale]; } + ); + + float offset[NCols]; // Includes zero point and float conversion offset of 16. + if (QuantBZeroPointColPtr != nullptr) { + UnrolledLoop([&](size_t i) { + const uint8_t zp_packed = + QuantBZeroPointColPtr[i * StrideQuantBZeroPoint + QuantBZeroPointIdx / 2]; + const uint8_t zp = ((QuantBZeroPointIdx & 1) == 1) ? (zp_packed >> 4) : (zp_packed & 0x0F); + offset[i] = 16.0f + zp; + }); + } else { + UnrolledLoop([&](size_t i) { + constexpr float zp = 8.0f; + offset[i] = 16.0f + zp; + }); + } + + constexpr size_t SubBlkLen = 16; // number of block elements to process in one iteration + + for (size_t k_idx_in_blk = 0; k_idx_in_blk < k_blk_len; k_idx_in_blk += SubBlkLen) { + // load A row vector elements + + // load `SubBlkLen` elements from A, padded with 0's if there aren't enough + const size_t k_subblk_len = std::min(k_blk_len - k_idx_in_blk, SubBlkLen); + float32x4_t av[4]{}; + LoadData(ARowPtr + k + k_idx_in_blk, k_subblk_len, av); + + // load B column vectors + uint8x8_t bv_packed[NCols]; + UnrolledLoop([&](size_t i) { + const size_t b_data_block_offset = k_idx_in_blk * BlkBitWidth / 8; + bv_packed[i] = vld1_u8(QuantBData + i * StrideQuantBData + b_data_block_offset); + }); + + uint8x8_t bv_u8_unzipped[NCols][2]; + UnrolledLoop([&](size_t i) { + bv_u8_unzipped[i][0] = vand_u8(bv_packed[i], LowMask); + bv_u8_unzipped[i][1] = vand_u8(vshr_n_u8(bv_packed[i], 4), LowMask); + }); + + uint8x8_t bv_u8[NCols][2]; + UnrolledLoop([&](size_t i) { + bv_u8[i][0] = vzip1_u8(bv_u8_unzipped[i][0], bv_u8_unzipped[i][1]); + bv_u8[i][1] = vzip2_u8(bv_u8_unzipped[i][0], bv_u8_unzipped[i][1]); + }); + + // dequantize B + + // shift left 3 and widen to 16 bits + uint16x8_t bv_u16[NCols][2]; + UnrolledLoop([&](size_t i) { + constexpr int shift = 3; + bv_u16[i][0] = vshll_n_u8(bv_u8[i][0], shift); + bv_u16[i][1] = vshll_n_u8(bv_u8[i][1], shift); + }); + + // combine 4 bits with float high half template + UnrolledLoop([&](size_t i) { + bv_u16[i][0] = vorrq_u16(bv_u16[i][0], float_high_half_template_v); + bv_u16[i][1] = vorrq_u16(bv_u16[i][1], float_high_half_template_v); + }); + + // `SubBlkLen` floats of B + float32x4_t bv[NCols][4]; + + // shift left 16, widen to 32 bits, and reinterpret as float + UnrolledLoop([&](size_t i) { + constexpr int shift = 16; + bv[i][0] = vreinterpretq_f32_u32(vshll_n_u16(vget_low_u16(bv_u16[i][0]), shift)); + bv[i][1] = vreinterpretq_f32_u32(vshll_high_n_u16(bv_u16[i][0], shift)); + + bv[i][2] = vreinterpretq_f32_u32(vshll_n_u16(vget_low_u16(bv_u16[i][1]), shift)); + bv[i][3] = vreinterpretq_f32_u32(vshll_high_n_u16(bv_u16[i][1], shift)); + }); + + // subtract float conversion offset (16) and zero point + UnrolledLoop([&](size_t i) { + const float32x4_t offset_v = vdupq_n_f32(offset[i]); + UnrolledLoop<4>([&](size_t j) { bv[i][j] = vsubq_f32(bv[i][j], offset_v); }); + }); + + // multiply by scale + UnrolledLoop([&](size_t i) { + const float32x4_t scale_v = vdupq_n_f32(scale[i]); + UnrolledLoop<4>([&](size_t j) { bv[i][j] = vmulq_f32(bv[i][j], scale_v); }); + }); + + // c[m,n] += a[m,k] * b[k,n] + UnrolledLoop<4>([&](size_t j) { + UnrolledLoop([&](size_t i) { acc[i] = vfmaq_f32(acc[i], av[j], bv[i][j]); }); + }); + } + + // increment pointers to next block + QuantBData += MlasQNBitBlkDataSizeInBytes(BlkBitWidth, BlkLen); + QuantBScale += 1; + QuantBZeroPointIdx += 1; + } + + if constexpr (NCols == 4) { + float32x4_t sum = FoldAccumulators(acc[0], acc[1], acc[2], acc[3]); + + if (BiasPtr != nullptr) { + sum = vaddq_f32(sum, vld1q_f32(BiasPtr)); + } + + vst1q_f32(SumPtr, sum); + } else { + for (size_t i = 0; i < NCols; ++i) { + SumPtr[i] = vaddvq_f32(acc[i]); + if (BiasPtr != nullptr) { + SumPtr[i] += BiasPtr[i]; + } + } + } +} + +} // namespace + +// +// MlasSQNBitGemmKernel and helpers. +// + +template +MLAS_FORCEINLINE void +MlasSQNBitGemmM1KernelNeon( + const float* A, + const uint8_t* QuantBData, + const float* QuantBScale, + const uint8_t* QuantBZeroPoint, + float* C, + size_t CountN, + size_t CountK, + size_t BlockStrideQuantB, + const float* Bias +) +{ + constexpr size_t NCols = 4; + + const float* ARowPtr = A; + float* CRowPtr = C; + + const size_t BlockCountK = BlockStrideQuantB; + + const size_t StrideQuantBData = BlockCountK * MlasQNBitBlkDataSizeInBytes(BlkBitWidth, BlkLen); + const size_t StrideQuantBScale = BlockCountK; + const size_t StrideQuantBZeroPoint = MlasQNBitZeroPointsForBlksSizeInBytes(BlockCountK); + + const float* BiasPtr = Bias; + + const uint8_t* QuantBDataColPtr = QuantBData; + const float* QuantBScaleColPtr = QuantBScale; + const uint8_t* QuantBZeroPointColPtr = QuantBZeroPoint; + + float* SumPtr = CRowPtr; + + int64_t nblk = static_cast(CountN) - NCols; + + while (nblk >= 0) { + ComputeDotProducts( + ARowPtr, QuantBDataColPtr, QuantBScaleColPtr, QuantBZeroPointColPtr, SumPtr, CountK, + StrideQuantBData, StrideQuantBScale, StrideQuantBZeroPoint, + BiasPtr + ); + + // move to next `NCols` columns + + QuantBDataColPtr += NCols * StrideQuantBData; + QuantBScaleColPtr += NCols * StrideQuantBScale; + if (QuantBZeroPointColPtr != nullptr) { + QuantBZeroPointColPtr += NCols * StrideQuantBZeroPoint; + } + + BiasPtr += BiasPtr != nullptr ? NCols : 0; + SumPtr += NCols; + + nblk -= NCols; + } + + // left over columns less than `NCols`? + nblk += NCols; + for (int64_t n = 0; n < nblk; ++n) { + ComputeDotProducts( + ARowPtr, QuantBDataColPtr, QuantBScaleColPtr, QuantBZeroPointColPtr, SumPtr, CountK, + StrideQuantBData, StrideQuantBScale, StrideQuantBZeroPoint, + BiasPtr + ); + + // move to next column + + QuantBDataColPtr += StrideQuantBData; + QuantBScaleColPtr += StrideQuantBScale; + if (QuantBZeroPointColPtr != nullptr) { + QuantBZeroPointColPtr += StrideQuantBZeroPoint; + } + + BiasPtr += BiasPtr != nullptr ? 1 : 0; + SumPtr += 1; + } +} + +#define SPECIALIZE_SQNBIT_GEMM_M1_KERNEL(BlkBitWidth, BlkLen) \ + template <> \ + MLAS_FORCEINLINE void \ + MlasSQNBitGemmM1Kernel( \ + const float* A, \ + const uint8_t* QuantBData, \ + const float* QuantBScale, \ + const uint8_t* QuantBZeroPoint, \ + float* C, \ + size_t CountN, \ + size_t CountK, \ + size_t BlockStrideQuantB, \ + const float* Bias \ + ) \ + { \ + return MlasSQNBitGemmM1KernelNeon( \ + A, QuantBData, QuantBScale, QuantBZeroPoint, C, CountN, CountK, \ + BlockStrideQuantB, Bias \ + ); \ + } + +SPECIALIZE_SQNBIT_GEMM_M1_KERNEL(4, 16) +SPECIALIZE_SQNBIT_GEMM_M1_KERNEL(4, 32) +SPECIALIZE_SQNBIT_GEMM_M1_KERNEL(4, 64) +SPECIALIZE_SQNBIT_GEMM_M1_KERNEL(4, 128) +SPECIALIZE_SQNBIT_GEMM_M1_KERNEL(4, 256) + +#undef SPECIALIZE_SQNBIT_GEMM_M1_KERNEL + +// +// MlasQNBitBlkDequantBForSgemm and helpers. +// + +template +MLAS_FORCEINLINE void +MlasQNBitBlkDequantBForSgemmNeon( + float* FpData, + const uint8_t* QuantBData, + const float* QuantBScale, + const uint8_t* QuantBZeroPoint, + size_t CountN, + size_t CountK, + size_t BlockStrideQuantB +) +{ + auto impl0_reference = [&]() { + static_assert(BlkBitWidth == 4); + + float* Dst = FpData; + + const uint8_t* QuantBDataCol = QuantBData; + const float* QuantBScaleCol = QuantBScale; + const uint8_t* QuantBZeroPointCol = QuantBZeroPoint; + + for (size_t n = 0; n < CountN; n += 16) { + const size_t nnlen = std::min(CountN - n, size_t{16}); + + for (size_t nn = 0; nn < nnlen; ++nn) { + for (size_t k = 0, k_blk_idx = 0; k < CountK; k += BlkLen, k_blk_idx += 1) { + const size_t kklen = std::min(CountK - k, BlkLen); + + const uint8_t* b_data = + QuantBDataCol + k_blk_idx * MlasQNBitBlkDataSizeInBytes(BlkBitWidth, BlkLen); + const float b_s = QuantBScaleCol[k_blk_idx]; + const uint8_t b_z = + (QuantBZeroPointCol != nullptr) + ? ((k_blk_idx & 1) == 1) + ? QuantBZeroPointCol[k_blk_idx / 2] >> 4 + : QuantBZeroPointCol[k_blk_idx / 2] & 0x0F + : 8; + + for (size_t kk = 0; kk < kklen; ++kk) { + const uint8_t b_packed = b_data[kk / 2]; + const uint8_t b_byte = ((kk & 1) == 1) ? b_packed >> 4 : b_packed & 0x0F; + const float b_value = (b_byte - b_z) * b_s; + + Dst[(k + kk) * 16 + nn] = b_value; + } + } + + QuantBDataCol += BlockStrideQuantB * MlasQNBitBlkDataSizeInBytes(BlkBitWidth, BlkLen); + QuantBScaleCol += BlockStrideQuantB; + if (QuantBZeroPointCol != nullptr) { + QuantBZeroPointCol += MlasQNBitZeroPointsForBlksSizeInBytes(BlockStrideQuantB); + } + } + + // zero out any remaining columns + + if (nnlen < 16) { + for (size_t k = 0; k < CountK; ++k) { + std::fill_n(Dst + (k * 16) + nnlen, 16 - nnlen, 0.0f); + } + } + + Dst += CountK * 16; + } + }; + + impl0_reference(); +} + +#define SPECIALIZE_QNBIT_BLK_DEQUANT_B_FOR_SGEMM(BlkBitWidth, BlkLen) \ + template <> \ + MLAS_FORCEINLINE void \ + MlasQNBitBlkDequantBForSgemm( \ + float* FpData, \ + const uint8_t* QuantBData, \ + const float* QuantBScale, \ + const uint8_t* QuantBZeroPoint, \ + size_t CountN, \ + size_t CountK, \ + size_t BlockStrideQuantB \ + ) \ + { \ + MlasQNBitBlkDequantBForSgemmNeon( \ + FpData, QuantBData, QuantBScale, QuantBZeroPoint, CountN, CountK, BlockStrideQuantB \ + ); \ + } + +SPECIALIZE_QNBIT_BLK_DEQUANT_B_FOR_SGEMM(4, 16) +SPECIALIZE_QNBIT_BLK_DEQUANT_B_FOR_SGEMM(4, 32) +SPECIALIZE_QNBIT_BLK_DEQUANT_B_FOR_SGEMM(4, 64) +SPECIALIZE_QNBIT_BLK_DEQUANT_B_FOR_SGEMM(4, 128) +SPECIALIZE_QNBIT_BLK_DEQUANT_B_FOR_SGEMM(4, 256) + +#undef SPECIALIZE_QNBIT_BLK_DEQUANT_B_FOR_SGEMM + +// +// Kernel dispatch structure definition. +// + +const MLAS_SQNBIT_GEMM_DISPATCH MlasSQNBitGemmDispatchNeon = []() { + MLAS_SQNBIT_GEMM_DISPATCH d; + d.Operations[QuantVariant_BitWidth4_BlockSize16] = MlasSQNBitGemmOperation<4, 16, MLAS_SQNBIT_GEMM_KERNEL_NEON>; + d.Operations[QuantVariant_BitWidth4_BlockSize32] = MlasSQNBitGemmOperation<4, 32, MLAS_SQNBIT_GEMM_KERNEL_NEON>; + d.Operations[QuantVariant_BitWidth4_BlockSize64] = MlasSQNBitGemmOperation<4, 64, MLAS_SQNBIT_GEMM_KERNEL_NEON>; + d.Operations[QuantVariant_BitWidth4_BlockSize128] = MlasSQNBitGemmOperation<4, 128, MLAS_SQNBIT_GEMM_KERNEL_NEON>; + d.Operations[QuantVariant_BitWidth4_BlockSize256] = MlasSQNBitGemmOperation<4, 256, MLAS_SQNBIT_GEMM_KERNEL_NEON>; + return d; +}(); diff --git a/onnxruntime/test/contrib_ops/matmul_4bits_test.cc b/onnxruntime/test/contrib_ops/matmul_4bits_test.cc index 918ee0e6eb976..3c6217915bef0 100644 --- a/onnxruntime/test/contrib_ops/matmul_4bits_test.cc +++ b/onnxruntime/test/contrib_ops/matmul_4bits_test.cc @@ -72,21 +72,17 @@ void RunTest(int64_t M, int64_t N, int64_t K, int64_t block_size, bool has_zerop MlasTranspose(input1_f_vals.data(), input1_f_vals_trans.data(), K, N); #endif - int meta_rows; - int meta_cols; - MlasBlockwiseQuantMetaShape((int)block_size, true, (int)K, (int)N, meta_rows, meta_cols); + int q_rows, q_cols; + MlasBlockwiseQuantizedShape((int)block_size, true, (int)K, (int)N, q_rows, q_cols); - int q_rows; - int q_cols; - MlasBlockwiseQuantizedShape((int)block_size, true, (int)K, (int)N, q_rows, q_cols); + size_t q_data_size_in_bytes, q_scale_size, q_zp_size_in_bytes; + MlasBlockwiseQuantizedBufferSizes(4, static_cast(block_size), /* columnwise */ true, + static_cast(K), static_cast(N), + q_data_size_in_bytes, q_scale_size, &q_zp_size_in_bytes); - std::vector input1_vals(q_rows * q_cols); - std::vector scales(meta_rows * meta_cols); - - // TODO!! THIS SHOULD BE PROVIDED BY MLAS - // sub 8b packing always happen on the column dimension - const int packed_meta_rows = (meta_rows * QBits + 7) / 8; - std::vector zp(packed_meta_rows * meta_cols); + std::vector input1_vals(q_data_size_in_bytes); + std::vector scales(q_scale_size); + std::vector zp(q_zp_size_in_bytes); QuantizeDequantize(input1_f_vals, input1_vals, @@ -115,9 +111,9 @@ void RunTest(int64_t M, int64_t N, int64_t K, int64_t block_size, bool has_zerop if (use_float16) { test.AddInput("A", {M, K}, ToFloat16(input0_vals), false); test.AddInput("B", {q_cols, q_rows}, input1_vals, true); - test.AddInput("scales", {meta_cols * meta_rows}, ToFloat16(scales), true); + test.AddInput("scales", {static_cast(q_scale_size)}, ToFloat16(scales), true); if (has_zeropoint) { - test.AddInput("zero_points", {meta_cols * packed_meta_rows}, zp, true); + test.AddInput("zero_points", {static_cast(q_zp_size_in_bytes)}, zp, true); } test.AddOutput("Y", {M, N}, ToFloat16(expected_vals)); @@ -129,9 +125,9 @@ void RunTest(int64_t M, int64_t N, int64_t K, int64_t block_size, bool has_zerop } else { test.AddInput("A", {M, K}, input0_vals, false); test.AddInput("B", {q_cols, q_rows}, input1_vals, true); - test.AddInput("scales", {meta_cols * meta_rows}, scales, true); + test.AddInput("scales", {static_cast(q_scale_size)}, scales, true); if (has_zeropoint) { - test.AddInput("zero_points", {meta_cols * packed_meta_rows}, zp, true); + test.AddInput("zero_points", {static_cast(q_zp_size_in_bytes)}, zp, true); } test.AddOutput("Y", {M, N}, expected_vals); diff --git a/onnxruntime/test/mlas/bench/bench_q4gemm.cpp b/onnxruntime/test/mlas/bench/bench_q4gemm.cpp index cf02d4f3628f9..87e3601612761 100644 --- a/onnxruntime/test/mlas/bench/bench_q4gemm.cpp +++ b/onnxruntime/test/mlas/bench/bench_q4gemm.cpp @@ -33,7 +33,7 @@ void Q4GEMM(benchmark::State& state, MLAS_BLK_QUANT_TYPE qtype) { auto B1 = RandomVectorUniform(static_cast(N * K), -1.0f, 1.0f); std::vector C1(static_cast(M * N)); - std::vector B1_packed(pack_b_size); + std::vector B1_packed(pack_b_size); MlasQ4GemmPackB(qtype, B1_packed.data(), B1.data(), N, K, N); MLAS_Q4_GEMM_DATA_PARAMS params1; diff --git a/onnxruntime/test/mlas/bench/bench_sgemm.cpp b/onnxruntime/test/mlas/bench/bench_sgemm.cpp index baa8f1a830ea1..e6e34bc88ad59 100644 --- a/onnxruntime/test/mlas/bench/bench_sgemm.cpp +++ b/onnxruntime/test/mlas/bench/bench_sgemm.cpp @@ -128,7 +128,7 @@ BENCHMARK_CAPTURE(SGEMM, PACKB_TransA, true, true, false)->Apply(GemmSizeProduct static void GemmLLMSizeProducts(benchmark::internal::Benchmark* b) { b->ArgNames(sgemm_bench_arg_names); - ArgsProduct(b, {{1, 1024, 2048}, {4096}, {4096}}); + ArgsProduct(b, {{1, 1024, 2048}, {4096, 11008}, {4096, 11008}}); } BENCHMARK_CAPTURE(SGEMM, LLM, false, false, true)->Apply(GemmLLMSizeProducts)->UseRealTime(); diff --git a/onnxruntime/test/mlas/bench/bench_sqnbitgemm.cpp b/onnxruntime/test/mlas/bench/bench_sqnbitgemm.cpp new file mode 100644 index 0000000000000..2f2635dab0512 --- /dev/null +++ b/onnxruntime/test/mlas/bench/bench_sqnbitgemm.cpp @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "mlas_q4.h" +#include "mlas_qnbit.h" + +#include + +#include "benchmark/benchmark.h" + +#include "bench_util.h" +#include "core/util/thread_utils.h" + +template +void SQNBITGEMM(benchmark::State& state) { + if (state.range(0) <= 0) throw std::invalid_argument("M must greater than 0!"); + if (state.range(1) <= 0) throw std::invalid_argument("N must greater than 0!"); + if (state.range(2) <= 0) throw std::invalid_argument("K must greater than 0!"); + if (state.range(3) <= 0) throw std::invalid_argument("Threads must greater than 0!"); + + const size_t M = static_cast(state.range(0)); + const size_t N = static_cast(state.range(1)); + const size_t K = static_cast(state.range(2)); + const size_t threads = static_cast(state.range(3)); + + size_t QuantBDataSizeInBytes, QuantBScaleSize, QuantBZeroPointSizeInBytes; + MlasBlockwiseQuantizedBufferSizes( + BlkBitWidth, BlkLen, /* columnwise */ true, + static_cast(K), static_cast(N), + QuantBDataSizeInBytes, QuantBScaleSize, &QuantBZeroPointSizeInBytes); + + OrtThreadPoolParams tpo; + tpo.thread_pool_size = static_cast(threads); + tpo.auto_set_affinity = true; + + std::unique_ptr tp( + onnxruntime::concurrency::CreateThreadPool(&onnxruntime::Env::Default(), + tpo, onnxruntime::concurrency::ThreadPoolType::INTRA_OP)); + + auto A = RandomVectorUniform(static_cast(M * K), -1.0f, 1.0f); + auto B = RandomVectorUniform(static_cast(K * N), -1.0f, 1.0f); + std::vector C(static_cast(M * N)); + + std::vector QuantBData(QuantBDataSizeInBytes); + std::vector QuantBScale(QuantBScaleSize); + std::vector QuantBZeroPoint(Symmetric ? 0 : QuantBZeroPointSizeInBytes); + + MlasQuantizeBlockwise(QuantBData.data(), QuantBScale.data(), + Symmetric ? nullptr : QuantBZeroPoint.data(), + B.data(), BlkLen, /* columnwise */ true, + static_cast(K), static_cast(N), static_cast(N), + tp.get()); + + MLAS_SQNBIT_GEMM_DATA_PARAMS params{}; + params.A = A.data(); + params.lda = K; + params.QuantBData = QuantBData.data(); + params.QuantBScale = QuantBScale.data(); + params.QuantBZeroPoint = Symmetric ? nullptr : QuantBZeroPoint.data(); + params.Bias = nullptr; + params.C = C.data(); + params.ldc = N; + + // warm up run + MlasSQNBitGemmBatch(M, N, K, 1, BlkBitWidth, BlkLen, ¶ms, tp.get()); + + for (auto _ : state) { + MlasSQNBitGemmBatch(M, N, K, 1, BlkBitWidth, BlkLen, ¶ms, tp.get()); + } +} + +static void GemmSizeProducts(benchmark::internal::Benchmark* b) { + b->ArgNames({"M", "N", "K", "Threads"}); + ArgsProduct(b, {{1, 1024, 2048}, {4096, 11008}, {4096, 11008}, {8}}); +} + +BENCHMARK(SQNBITGEMM<4, 16, false>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 16, true>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 32, false>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 32, true>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 64, false>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 64, true>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 128, false>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 128, true>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 256, false>)->Apply(GemmSizeProducts)->UseRealTime(); +BENCHMARK(SQNBITGEMM<4, 256, true>)->Apply(GemmSizeProducts)->UseRealTime(); diff --git a/onnxruntime/test/mlas/unittest/test_blockq4.cpp b/onnxruntime/test/mlas/unittest/test_blockq4.cpp index f836da8277bb8..07f0748fb7ed1 100644 --- a/onnxruntime/test/mlas/unittest/test_blockq4.cpp +++ b/onnxruntime/test/mlas/unittest/test_blockq4.cpp @@ -38,13 +38,17 @@ class MlasBlockwiseQdqTest : public MlasTestBase { int meta_rows; int meta_cols; - MlasBlockwiseQuantMetaShape(block_size, columnwise, rows, columns, meta_rows, meta_cols); + MlasBlockwiseQuantMetaShape(block_size, columnwise, rows, columns, meta_rows, meta_cols); int q_rows; int q_cols; - MlasBlockwiseQuantizedShape(block_size, columnwise, rows, columns, q_rows, q_cols); + MlasBlockwiseQuantizedShape(block_size, columnwise, rows, columns, q_rows, q_cols); - uint8_t* elements = InputElements.GetBuffer(q_rows * q_cols, true); + size_t q_data_size_in_bytes, q_scale_size, q_zp_size_in_bytes; + MlasBlockwiseQuantizedBufferSizes(4, block_size, columnwise, rows, columns, + q_data_size_in_bytes, q_scale_size, &q_zp_size_in_bytes); + + uint8_t* elements = InputElements.GetBuffer(q_data_size_in_bytes, true); int v = 7; for (int c = 0; c < columns; c++) { @@ -70,8 +74,8 @@ class MlasBlockwiseQdqTest : public MlasTestBase { } } - float* scales = InputScales.GetBuffer(meta_rows * meta_cols); - uint8_t* zp = symmetric ? nullptr : InputOffsets.GetBuffer(((meta_rows + 1) / 2) * meta_cols, true); + float* scales = InputScales.GetBuffer(q_scale_size); + uint8_t* zp = symmetric ? nullptr : InputOffsets.GetBuffer(q_zp_size_in_bytes, true); if (zp) { for (int c = 0; c < meta_cols; c++) { for (int r = 0; r < meta_rows; r += 2) { diff --git a/onnxruntime/test/mlas/unittest/test_halfgemm.h b/onnxruntime/test/mlas/unittest/test_halfgemm.h index 2861b0e746fdc..4db5c2bebca40 100644 --- a/onnxruntime/test/mlas/unittest/test_halfgemm.h +++ b/onnxruntime/test/mlas/unittest/test_halfgemm.h @@ -18,20 +18,6 @@ Module Name: #include "test_fp16.h" -inline bool -CloseEnough(float actual, float expected) { - if (std::isnan(actual)) { - return std::isnan(expected); - } - float diff = std::abs(actual - expected); - float top = std::max(std::abs(actual), std::abs(expected)); - float ratio = 0; - if (top > 0.0001) { - ratio = diff / top; - } - return ratio < 0.005; -} - /** * @brief Test class for half precision GEMM * @tparam AType Data type of A matrix, can be either float or MLFp16 diff --git a/onnxruntime/test/mlas/unittest/test_main.cpp b/onnxruntime/test/mlas/unittest/test_main.cpp index 66b5a6a15db2b..505c0c01dfa90 100644 --- a/onnxruntime/test/mlas/unittest/test_main.cpp +++ b/onnxruntime/test/mlas/unittest/test_main.cpp @@ -1,17 +1,18 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -#include "test_util.h" - -#include #include +#include +#include + +#include "test_util.h" #if !defined(BUILD_MLAS_NO_ONNXRUNTIME) MLAS_THREADPOOL* GetMlasThreadPool(void) { - static MLAS_THREADPOOL* threadpool = new onnxruntime::concurrency::ThreadPool( + static auto threadpool = std::make_unique( &onnxruntime::Env::Default(), onnxruntime::ThreadOptions(), nullptr, 2, true); - return threadpool; + return threadpool.get(); } #else diff --git a/onnxruntime/test/mlas/unittest/test_q4gemm.h b/onnxruntime/test/mlas/unittest/test_q4gemm.h index 58a64491ae80b..97c6969b5bf91 100644 --- a/onnxruntime/test/mlas/unittest/test_q4gemm.h +++ b/onnxruntime/test/mlas/unittest/test_q4gemm.h @@ -19,20 +19,6 @@ Module Name: #include "test_util.h" #include "mlas_q4.h" -inline bool -CloseEnough(float actual, float expected) { - if (std::isnan(actual)) { - return std::isnan(expected); - } - float diff = std::abs(actual - expected); - float top = std::max(std::abs(actual), std::abs(expected)); - float ratio = 0; - if (top > 0.0001) { - ratio = diff / top; - } - return ratio < 0.005; -} - /** * @brief Test class for int4 block quantized GEMM * Note: only 2-D matmul supported for now diff --git a/onnxruntime/test/mlas/unittest/test_q8q4gemm.cpp b/onnxruntime/test/mlas/unittest/test_q8q4gemm.cpp index a78a3261d1f2a..d3f601793a970 100644 --- a/onnxruntime/test/mlas/unittest/test_q8q4gemm.cpp +++ b/onnxruntime/test/mlas/unittest/test_q8q4gemm.cpp @@ -19,20 +19,6 @@ Module Name: #include "test_util.h" #include "mlas_q4.h" -inline bool -CloseEnough(float actual, float expected) { - if (std::isnan(actual)) { - return std::isnan(expected); - } - float diff = std::abs(actual - expected); - float top = std::max(std::abs(actual), std::abs(expected)); - float ratio = 0; - if (top > 0.0001) { - ratio = diff / top; - } - return ratio < 0.005; -} - template static void blkq8_dequant_reference(const int8_t* src, float* dst, size_t M, size_t K) { const size_t num_blks = K / QBlkLen; diff --git a/onnxruntime/test/mlas/unittest/test_sqnbitgemm.cpp b/onnxruntime/test/mlas/unittest/test_sqnbitgemm.cpp new file mode 100644 index 0000000000000..6c97d60301573 --- /dev/null +++ b/onnxruntime/test/mlas/unittest/test_sqnbitgemm.cpp @@ -0,0 +1,270 @@ +/*++ + +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the MIT License. + +Module Name: + + test_sqnbitgemm.h + +Abstract: + + Tests for MLAS n-bit int block quantized GEMM. + +--*/ + +#include "test_util.h" +#include "mlas_q4.h" +#include "mlas_qnbit.h" + +/** + * @brief Test class for n-bit int block quantized GEMM + * Note: only 2-D matmul supported for now + */ +template +class MlasSQNBitGemmTest : public MlasTestBase { + private: + MatrixGuardBuffer BufferA; + MatrixGuardBuffer BufferB; + MatrixGuardBuffer BufferQuantBData; + MatrixGuardBuffer BufferQuantBZeroPoint; + MatrixGuardBuffer BufferQuantBScale; + MatrixGuardBuffer BufferDequantizedB; + MatrixGuardBuffer BufferBias; + MatrixGuardBuffer BufferC; + MatrixGuardBuffer BufferCReference; + + void CallGemm(size_t M, + size_t N, + size_t K, + const float* A, + size_t lda, + const uint8_t* QuantBData, + const float* QuantBScale, + const uint8_t* QuantBZeroPoint, + const float* Bias, + float* C, + size_t ldc, + MLAS_THREADPOOL* Threadpool) { + MLAS_SQNBIT_GEMM_DATA_PARAMS params; + params.A = A; + params.lda = lda; + params.Bias = Bias; + params.C = C; + params.ldc = ldc; + params.QuantBData = QuantBData; + params.QuantBScale = QuantBScale; + params.QuantBZeroPoint = QuantBZeroPoint; + params.PostProcessor = nullptr; + + MlasSQNBitGemmBatch(M, N, K, 1, BlkBitWidth, BlkLen, ¶ms, Threadpool); + } + + void CallReferenceGemm(size_t M, + size_t N, + size_t K, + const float* A, + const uint8_t* QuantBData, + const float* QuantBScale, + const uint8_t* QuantBZeroPoint, + const float* Bias, + float* C) { + float* DequantizedBData = BufferDequantizedB.GetBuffer(K * N); + MlasDequantizeBlockwise( + DequantizedBData, QuantBData, QuantBScale, QuantBZeroPoint, BlkLen, /* columnwise */ true, + static_cast(K), static_cast(N), GetMlasThreadPool()); + // Note: DequantizedBData is in column major layout. + + for (size_t m = 0; m < M; m++) { + for (size_t n = 0; n < N; n++) { + const float* a = A + m * K; + const float* b = DequantizedBData + n * K; + float* c = C + (m * N) + n; + + float sum = Bias == nullptr ? 0.0f : Bias[n]; + for (size_t k = 0; k < K; k++) { + sum += (*a) * (*b); + b += 1; + a += 1; + } + *c = sum; + } + } + } + + public: + void Test(size_t M, size_t N, size_t K, + bool WithBias, bool Symmetric, bool WithThreadpool) { + MLAS_THREADPOOL* Threadpool = WithThreadpool ? GetMlasThreadPool() : nullptr; + + const float* A = BufferA.GetBuffer(K * M); + + const float* B = BufferB.GetBuffer(N * K); + + const float* Bias = nullptr; + if (WithBias) { + Bias = BufferBias.GetBuffer(N); + } + +#if 0 + auto print_matrix = [](size_t ncols, size_t nrows, const float* data) { + for (size_t row = 0; row < nrows; ++row) { + for (size_t col = 0; col < ncols; ++col) { + std::cout << data[row * nrows + col] << "\t"; + } + std::cout << "\n"; + } + }; + + std::cout << "A:\n"; + print_matrix(M, K, A); + std::cout << "B:\n"; + print_matrix(K, N, B); +#endif + + float* C = BufferC.GetBuffer(N * M, true); + float* CReference = BufferCReference.GetBuffer(N * M, true); + + // pack B + uint8_t* QuantBData = nullptr; + float* QuantBScale = nullptr; + uint8_t* QuantBZeroPoint = nullptr; + { + size_t QuantBDataSizeInBytes, QuantBScaleSize, QuantBZeroPointSizeInBytes; + MlasBlockwiseQuantizedBufferSizes(BlkBitWidth, BlkLen, /* columnwise */ true, + static_cast(K), static_cast(N), + QuantBDataSizeInBytes, QuantBScaleSize, &QuantBZeroPointSizeInBytes); + + QuantBData = BufferQuantBData.GetBuffer(QuantBDataSizeInBytes); + QuantBScale = BufferQuantBScale.GetBuffer(QuantBScaleSize); + if (Symmetric) { + QuantBZeroPoint = BufferQuantBZeroPoint.GetBuffer(QuantBZeroPointSizeInBytes); + } + + MlasQuantizeBlockwise(QuantBData, QuantBScale, QuantBZeroPoint, + B, BlkLen, + /* columnwise */ true, + static_cast(K), static_cast(N), + static_cast(N), + GetMlasThreadPool()); + } + + CallGemm(M, N, K, A, /* lda */ K, QuantBData, QuantBScale, QuantBZeroPoint, Bias, C, /* ldc */ N, Threadpool); + CallReferenceGemm(M, N, K, A, QuantBData, QuantBScale, QuantBZeroPoint, Bias, CReference); + + size_t f = 0; + for (size_t m = 0; m < M; m++) { + for (size_t n = 0; n < N; n++, f++) { + ASSERT_TRUE(CloseEnough(C[f], CReference[f])) + << "Expected: " << CReference[f] << " Actual: " << C[f] << "@[" << m << "x" << n << "], " + << "M=" << M << ", N=" << N << ", K=" << K; + } + } + } + + public: + static const char* GetTestSuiteName() { + static std::string suite_name = std::string("SQNBitGemm") + + "BlkBitWidth" + std::to_string(BlkBitWidth) + + "BlkLen" + std::to_string(BlkLen); + return suite_name.c_str(); + } +}; + +// +// Short Execute() test helper to register each test separately by all parameters. +// +template +class SQNBitGemmShortExecuteTest : public MlasTestFixture> { + public: + explicit SQNBitGemmShortExecuteTest(size_t M, size_t N, size_t K, + bool WithThreadpool, bool Symmetric, bool WithBias) + : M_(M), N_(N), K_(K), WithThreadpool_(WithThreadpool), Symmetric_(Symmetric), WithBias_(WithBias) { + } + + void TestBody() override { + MlasTestFixture>::mlas_tester->Test( + M_, N_, K_, WithThreadpool_, Symmetric_, WithBias_); + } + + static size_t RegisterSingleTest(size_t M, size_t N, size_t K, + bool WithThreadpool, bool Symmetric, bool WithBias) { + std::stringstream ss; + ss << (WithThreadpool ? "SingleThread" : "Threaded") + << "/isSymmetric" << Symmetric + << "/M" << M << "xN" << N << "xK" << K + << "/hasBias" << WithBias; + auto test_name = ss.str(); + + testing::RegisterTest( + MlasSQNBitGemmTest::GetTestSuiteName(), + test_name.c_str(), + nullptr, + test_name.c_str(), + __FILE__, + __LINE__, + // Important to use the fixture type as the return type here. + [=]() -> MlasTestFixture>* { + return new SQNBitGemmShortExecuteTest( + M, N, K, WithThreadpool, Symmetric, WithBias); + }); + + return 1; + } + + static size_t RegisterShortExecuteTests() { + size_t test_registered = 0; + + if (MlasIsSQNBitGemmAvailable(BlkBitWidth, BlkLen)) { + for (bool WithThreadpool : {false, true}) { + for (bool Symmetric : {false, true}) { + for (size_t b = 1; b < 16; b++) { + test_registered += RegisterSingleTest(b, b, b, WithThreadpool, Symmetric, false); + test_registered += RegisterSingleTest(b, b, b, WithThreadpool, Symmetric, true); + } + for (size_t b = 16; b <= 256; b <<= 1) { + test_registered += RegisterSingleTest(b, b, b, WithThreadpool, Symmetric, false); + test_registered += RegisterSingleTest(b, b, b, WithThreadpool, Symmetric, true); + } + for (size_t b = 256; b < 320; b += 32) { + test_registered += RegisterSingleTest(b, b, b, WithThreadpool, Symmetric, true); + } + for (size_t b = 1; b < 96; b++) { + test_registered += RegisterSingleTest(1, b, 32, WithThreadpool, Symmetric, false); + test_registered += RegisterSingleTest(1, 32, b, WithThreadpool, Symmetric, true); + test_registered += RegisterSingleTest(1, b, b, WithThreadpool, Symmetric, false); + } + test_registered += RegisterSingleTest(43, 500, 401, WithThreadpool, Symmetric, true); + + // test_registered += RegisterSingleTest(1001, 1027, 1031, WithThreadpool, Symmetric, false); + } + } + } + + return test_registered; + } + + private: + size_t M_, N_, K_; + bool WithThreadpool_, Symmetric_, WithBias_; +}; + +static size_t SQNBitGemmRegisterAllShortExecuteTests() { + size_t count = 0; + + count += SQNBitGemmShortExecuteTest<4, 16>::RegisterShortExecuteTests(); + count += SQNBitGemmShortExecuteTest<4, 32>::RegisterShortExecuteTests(); + count += SQNBitGemmShortExecuteTest<4, 64>::RegisterShortExecuteTests(); + count += SQNBitGemmShortExecuteTest<4, 128>::RegisterShortExecuteTests(); + count += SQNBitGemmShortExecuteTest<4, 256>::RegisterShortExecuteTests(); + + return count; +} + +static UNUSED_VARIABLE bool added_to_main = AddTestRegister([](bool is_short_execute) { + if (is_short_execute) { + return SQNBitGemmRegisterAllShortExecuteTests() > 0; + } + return false; +}); diff --git a/onnxruntime/test/mlas/unittest/test_util.h b/onnxruntime/test/mlas/unittest/test_util.h index db528ef7291cc..8eecda900ff27 100644 --- a/onnxruntime/test/mlas/unittest/test_util.h +++ b/onnxruntime/test/mlas/unittest/test_util.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -253,3 +254,16 @@ inline void ReorderInputNchw(const int64_t* input_shape, const float* S, float* D += spatial_count * nchwc_channel_count; } } + +inline bool CloseEnough(float actual, float expected) { + if (std::isnan(actual)) { + return std::isnan(expected); + } + float diff = std::abs(actual - expected); + float top = std::max(std::abs(actual), std::abs(expected)); + float ratio = 0; + if (top > 0.0001) { + ratio = diff / top; + } + return ratio < 0.005; +} diff --git a/onnxruntime/test/onnx/microbenchmark/reduceminmax.cc b/onnxruntime/test/onnx/microbenchmark/reduceminmax.cc index bd2abadf49b81..d866045ba4962 100644 --- a/onnxruntime/test/onnx/microbenchmark/reduceminmax.cc +++ b/onnxruntime/test/onnx/microbenchmark/reduceminmax.cc @@ -91,6 +91,8 @@ BENCHMARK(BM_FindMinMaxMlasSSE2) ->Arg(98304) ->Arg(160000); +#ifdef MLAS_TARGET_AMD64 + // MLAS avx implementation static void BM_FindMinMaxMlasAvx(benchmark::State& state) { const size_t batch_size = static_cast(state.range(0)); @@ -115,3 +117,5 @@ BENCHMARK(BM_FindMinMaxMlasAvx) ->Arg(80000) ->Arg(98304) ->Arg(160000); + +#endif // MLAS_TARGET_AMD64 From cc840c5289e581997ace2f6e414558ca8b886e90 Mon Sep 17 00:00:00 2001 From: Changming Sun Date: Wed, 15 Nov 2023 14:51:42 -0800 Subject: [PATCH 08/38] Fix a bug in SaveInputOutputNamesToNodeMapping function (#18456) ### Description Fix a bug in SaveInputOutputNamesToNodeMapping function. The fix was provided by Scott. ### Motivation and Context --- onnxruntime/core/framework/session_state_utils.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/onnxruntime/core/framework/session_state_utils.cc b/onnxruntime/core/framework/session_state_utils.cc index df3a7afebc176..df11fe8302aef 100644 --- a/onnxruntime/core/framework/session_state_utils.cc +++ b/onnxruntime/core/framework/session_state_utils.cc @@ -455,11 +455,10 @@ common::Status SaveInputOutputNamesToNodeMapping(const onnxruntime::GraphViewer& // utils::CopyOneInputAcrossDevices is happy. auto& input_map = session_state.GetInputNodeInfoMap(); - auto end_map = input_map.cend(); for (const auto& graph_input : graph_inputs) { const auto& name = graph_input->Name(); - if (input_map.find(name) == end_map) { + if (input_map.find(name) == input_map.cend()) { // dummy entry for an input that we didn't find a use of in the graph. log it in case that's a bug. // utils::CopyOneInputAcrossDevices will use the input OrtValue as is given we don't believe it's used anywhere. LOGS(session_state.Logger(), INFO) << (graph.IsSubgraph() ? "Subgraph" : "Graph") << " input with name " From 6f863ae2ad5cf86dabe18100cdf3528b0696c1fe Mon Sep 17 00:00:00 2001 From: Dmitri Smirnov Date: Wed, 15 Nov 2023 16:09:05 -0800 Subject: [PATCH 09/38] Allow optional axes tensor to be null and ignore it as optional (#18423) ### Description Our function inliner converts call nodes to a proto. `Node::ToProto()` function recreates optional NodeArgs into a `NodeProto`. While handling missing input parameters, our inliner simply renames them as empty strings. `Graph::InlineFunctionProto()` recreates missing NodeArgs even though the original call node did not have them. This results in the below mentioned issue. The inlined model has the following entries, notice the second argument is present, but has no value in `ReduceSum` call (from a Dynamo exported model). > InsertedPrecisionFreeCast__inlfunc__aten_linalg_vector_norm_no_dim_onnx_result_12 = ReduceSum (InsertedPrecisionFreeCast__inlfunc_ReduceL1_data_abs, ) We now allow second input to ReduceSum to be nullptr and ignore it as it is optional. ### Motivation and Context This seeks to address https://github.com/microsoft/onnxruntime/issues/18338 --- .../providers/cpu/reduction/reduction_ops.cc | 22 +- onnxruntime/test/framework/function_test.cc | 25 + .../testdata/transform/gh_issue_18338.onnx | Bin 0 -> 3976 bytes .../test/testdata/transform/gh_issue_18338.py | 859 ++++++++++++++++++ 4 files changed, 896 insertions(+), 10 deletions(-) create mode 100644 onnxruntime/test/testdata/transform/gh_issue_18338.onnx create mode 100644 onnxruntime/test/testdata/transform/gh_issue_18338.py diff --git a/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc b/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc index ce834e371fdef..3c83394fb0bf4 100644 --- a/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc +++ b/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc @@ -688,21 +688,23 @@ FastReduceKind OptimizeShapeForFastReduce(gsl::span input_shape, return FastReduceKind::kNone; } -void ValidateCommonFastReduce(const Tensor* axes_tensor) { - ORT_ENFORCE(axes_tensor != nullptr, "Axes input is null"); - ORT_ENFORCE(axes_tensor->Shape().NumDimensions() == 1, - "An axes tensor must be a vector tensor."); -} - // template bool CommonFastReduceCopy(OpKernelContext* ctx, TensorShapeVector& input_axes, bool noop_with_empty_axes) { if (ctx->InputCount() == 2) { // second input holds the axes. + // the argument is optional const Tensor* axes_tensor = ctx->Input(1); - ValidateCommonFastReduce(axes_tensor); - auto nDims = static_cast(axes_tensor->Shape()[0]); - const auto* data = axes_tensor->Data(); - input_axes.insert(input_axes.begin(), data, data + nDims); + + if (axes_tensor != nullptr) { + ORT_ENFORCE(axes_tensor->Shape().NumDimensions() == 1, + "An axes tensor must be a vector tensor."); + + const auto data_span = axes_tensor->DataAsSpan(); + input_axes.assign(data_span.begin(), data_span.end()); + } else { + input_axes.clear(); + } + if (input_axes.empty() && noop_with_empty_axes) { const Tensor* input = ctx->Input(0); auto* output = ctx->Output(0, input->Shape()); diff --git a/onnxruntime/test/framework/function_test.cc b/onnxruntime/test/framework/function_test.cc index 84d8a9c56df89..9ab78cac3aca4 100644 --- a/onnxruntime/test/framework/function_test.cc +++ b/onnxruntime/test/framework/function_test.cc @@ -589,5 +589,30 @@ TEST(FunctionTest, TestInlinedLocalFunctionNotRemoved) { #endif } +TEST(FunctionTest, TestInlinedFunctionDoesNotReserrectNonExistingArgs) { + // Verify this runs + constexpr const ORTCHAR_T* model_uri = ORT_TSTR("testdata/transform/gh_issue_18338.onnx"); + + SessionOptions session_options; + InferenceSessionWrapper session_object{session_options, GetEnvironment()}; + + ASSERT_STATUS_OK(session_object.Load(model_uri)); + ASSERT_STATUS_OK(session_object.Initialize()); + + // Scalar shape for input_0 and output + const std::string input_names[] = {"input_0"}; + const std::string output_names[] = {"_val_3"}; + TensorShape input_shape; + MLFloat16 input_0_data{684.f}; + + OrtValue input_0; + Tensor::InitOrtValue(DataTypeImpl::GetType(), input_shape, &input_0_data, OrtMemoryInfo(), input_0); + + std::vector fetches(1); + RunOptions run_options; + ASSERT_STATUS_OK(session_object.Run(run_options, AsSpan(input_names), AsSpan({input_0}), + AsSpan(output_names), &fetches, 0)); +} + } // namespace test } // namespace onnxruntime diff --git a/onnxruntime/test/testdata/transform/gh_issue_18338.onnx b/onnxruntime/test/testdata/transform/gh_issue_18338.onnx new file mode 100644 index 0000000000000000000000000000000000000000..afb499a347ec78706f90956df1dddd8e94ecc782 GIT binary patch literal 3976 zcmb7H&2Jk;6!-cgUcV$wIzS2Om$iigOB;E|b`r0GXiX~=C=yC2Nae6vJDbGB-gUj} zr0uB^LY%n60hDtuaOBLfT(~0jFF;7#5f!}o*xgyHDJF;5o_X_T-tYH*Z{FxSDG%>Q z!Kk~xT%s#ax_aC+=HJwFPs{S*Ib$6A@@ z;5w9h{&0*-K)R9c=yGMt3-{fj=U7Hrel-`N;`aN&(F1Q3Eai^1Ji7Q+lS`>wT3^2G zxRK{O{hsgk_nbqoi-&gnU^Kvg&Q5RO1ipXIp~kDz*qNuGeBgOQT*tRG=b;dcc3#Tm za@USEjppp7;lbVta~XC=yEg2E_< zUVqoDjJ$B%j~v>f3<(;;V=nR3_m!<*`|@-+%><^U^d#v9P3~(-F^C{ z25%FHcea=tRRkA7#_@u#iAWS%9t^xCumfd6?#e*1jLL04ygT+h1{mRczsJ(bX>1qTOQ8GV(S+5^@@yS|nL9!yOEOE5W9nX(?(S7EOnNK>ixHE*{tn`lR;uzF%{i;#8 z-U)H?Dq#tLBWnt2XJA$+ezzaE5lZBd#;nl%5?2l>;V-9wtJaMoqsFX2J_?qwB;?WP z7KnK=!}xHTXxB1Io8hLvYo39H9WO#XJHEH)M!iE%NHQHC24RnnOK)E-fb>I>v~4UG ze5=NJ+{@B<8*CXaN>XGwZeKI77VV6?-iPiz^JUOwH3;#%kF+HDGpw{2E5d`Eu zmfz=LBG6U7)5(FdfD{0dKv@~IbMSOd2D+LC%DA$4oc(N^cVHulQ&)ICSPiCP(N=PRcq$-TFu`m`EHKT|nHP;q)LKG&5UvWwR_Yo`G zQk4yFNFz{;WDc!kXf#nZ{D|D}+*(heV$!a2sD7T1_757E30R`ebb_GI(Nfd5B6zqe z3kMYjADx)SLWRIcwjE1Imh!IMXNGUR~Tps27b}A4E&^n zO>}{t#R`f_!iAUwB1m}nPCGFbJk%f=h^d&5ojse4D=`~cJ5uJrTMP(=k-ujko=7QS zHiD8tHhVu)vNsmB#R$Zpgivo;(`N5_XC6`?0=z#;S@3=q&+*GR!*98V7(E3zqgbV~4OO$Dzh!27z81rJ+9R57pki5x2I zC%+ven92Q&Dwfv_q(($YWEdQwxn&eS3XZVgZAj(dFqy&T!m(HdrE#rhx;eR4D}=JT z$9e?WyqeEZ!EXyi+;q{Vz#Toa7BWdGyRMkSq`A}-U1cY# zCwGMXt0qN7mHV{Cj2irw_#|7BZoq3v8KH^{#id%_PC7TGZqqdtr!^s;PqXT|iFpv0 zC)F=Y6%?$}#VIUj9j{9Q-sdlZtrOo2kG1bLC~jeIZj|_&%!H8_jYmFv-r28!7<@)= ze_r(ViZvcCY3|#`$L@hw#fG8E`u=LL%fIs-s`yF^gVBmS+J8M&@S&0ieKXwdx_x)_ zKTo+NPFZ4=LUh#e>*Gzr2gQ|?rX;>KlZUIkb?w(dtQQ{^$;Omh Z33=ZQ1rW6>)nHWZg;lp2@_?p=;a`|_d6NJD literal 0 HcmV?d00001 diff --git a/onnxruntime/test/testdata/transform/gh_issue_18338.py b/onnxruntime/test/testdata/transform/gh_issue_18338.py new file mode 100644 index 0000000000000..dc5446ac56c09 --- /dev/null +++ b/onnxruntime/test/testdata/transform/gh_issue_18338.py @@ -0,0 +1,859 @@ +import google.protobuf.text_format +import onnx +from numpy import array, float16 + +import onnxruntime as ort + +# Run n times +N = 1 + +onnx_model_text = """ +ir_version: 8 +producer_name: "pytorch" +producer_version: "2.2.0" +graph { + node { + output: "_val_1" + name: "Constant_0" + op_type: "Constant" + attribute { + name: "value_ints" + ints: -1 + type: INTS + } + doc_string: "" + } + node { + input: "input_0" + input: "_val_1" + output: "_val_2" + name: "Reshape_1" + op_type: "Reshape" + attribute { + name: "allowzero" + i: 0 + type: INT + } + doc_string: "" + } + node { + input: "_val_2" + output: "_val_3" + name: "_aten_linalg_vector_norm_no_dim_onnx_2" + op_type: "_aten_linalg_vector_norm_no_dim_onnx" + attribute { + name: "keepdim" + i: 0 + type: INT + } + attribute { + name: "ord" + f: 2.0 + type: FLOAT + } + doc_string: "" + domain: "pkg.onnxscript.torch_lib" + } + name: "main_graph" + input { + name: "input_0" + type { + tensor_type { + elem_type: 10 + shape { + } + } + } + } + output { + name: "_val_3" + type { + tensor_type { + elem_type: 10 + shape { + } + } + } + } + value_info { + name: "_val_1" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 1 + } + } + } + } + } + value_info { + name: "_val_2" + type { + tensor_type { + elem_type: 10 + shape { + dim { + dim_value: 1 + } + } + } + } + } +} +opset_import { + domain: "pkg.onnxscript.torch_lib" + version: 1 +} +opset_import { + domain: "" + version: 18 +} +opset_import { + domain: "pkg.onnxscript.torch_lib.common" + version: 1 +} +functions { + name: "_aten_linalg_vector_norm_no_dim_onnx" + input: "self" + output: "result_29" + attribute: "ord" + attribute: "keepdim" + node { + input: "self" + output: "tmp" + name: "n0" + op_type: "Shape" + domain: "" + } + node { + input: "tmp" + output: "self_rank" + name: "n1" + op_type: "Size" + domain: "" + } + node { + output: "int64_0" + name: "n2" + op_type: "Constant" + attribute { + name: "value" + t { + data_type: 7 + int64_data: 0 + name: "int64_0" + } + type: TENSOR + } + domain: "" + } + node { + input: "int64_0" + input: "self_rank" + output: "int64_0_cast" + name: "n3" + op_type: "CastLike" + domain: "" + } + node { + input: "self_rank" + input: "int64_0_cast" + output: "cond" + name: "n4" + op_type: "Equal" + domain: "" + } + node { + input: "cond" + output: "self_2" + name: "n5" + op_type: "If" + attribute { + name: "then_branch" + g { + node { + output: "int64_0_1d" + name: "n0" + op_type: "Constant" + attribute { + name: "value" + t { + dims: 1 + data_type: 7 + int64_data: 0 + name: "int64_0_1d" + } + type: TENSOR + } + domain: "" + } + node { + input: "self" + input: "int64_0_1d" + output: "self_0" + name: "n1" + op_type: "Unsqueeze" + domain: "" + } + name: "thenGraph_4" + output { + name: "self_0" + type { + } + } + } + type: GRAPH + } + attribute { + name: "else_branch" + g { + node { + input: "self" + output: "self_1" + name: "n0" + op_type: "Identity" + domain: "" + } + name: "elseGraph_4" + output { + name: "self_1" + type { + } + } + } + type: GRAPH + } + domain: "" + } + node { + input: "self_2" + output: "self_3" + name: "n6" + op_type: "Abs" + domain: "" + } + node { + output: "ord" + name: "n7" + op_type: "Constant" + attribute { + name: "value_float" + type: FLOAT + ref_attr_name: "ord" + } + domain: "" + } + node { + input: "ord" + output: "ord_4" + name: "n8" + op_type: "Cast" + attribute { + name: "to" + i: 1 + type: INT + } + domain: "" + } + node { + input: "ord_4" + output: "cond_5" + name: "n9" + op_type: "IsInf" + attribute { + name: "detect_negative" + i: 0 + type: INT + } + attribute { + name: "detect_positive" + i: 1 + type: INT + } + domain: "" + } + node { + input: "cond_5" + output: "result_24" + name: "n10" + op_type: "If" + attribute { + name: "then_branch" + g { + node { + input: "self_3" + output: "result" + name: "n0" + op_type: "ReduceMax" + attribute { + name: "keepdims" + type: INT + ref_attr_name: "keepdim" + } + domain: "" + } + name: "thenGraph_9" + output { + name: "result" + type { + } + } + } + type: GRAPH + } + attribute { + name: "else_branch" + g { + node { + input: "ord_4" + output: "cond_6" + name: "n0" + op_type: "IsInf" + attribute { + name: "detect_negative" + i: 1 + type: INT + } + attribute { + name: "detect_positive" + i: 0 + type: INT + } + domain: "" + } + node { + input: "cond_6" + output: "result_23" + name: "n1" + op_type: "If" + attribute { + name: "then_branch" + g { + node { + input: "self_3" + output: "result_7" + name: "n0" + op_type: "ReduceMin" + attribute { + name: "keepdims" + type: INT + ref_attr_name: "keepdim" + } + domain: "" + } + name: "thenGraph_11" + output { + name: "result_7" + type { + } + } + } + type: GRAPH + } + attribute { + name: "else_branch" + g { + node { + output: "const" + name: "n0" + op_type: "Constant" + attribute { + name: "value" + t { + data_type: 1 + float_data: 0.0 + name: "const" + } + type: TENSOR + } + domain: "" + } + node { + input: "const" + input: "ord_4" + output: "const_cast" + name: "n1" + op_type: "CastLike" + domain: "" + } + node { + input: "ord_4" + input: "const_cast" + output: "cond_8" + name: "n2" + op_type: "Equal" + domain: "" + } + node { + input: "cond_8" + output: "result_22" + name: "n3" + op_type: "If" + attribute { + name: "then_branch" + g { + node { + input: "self_3" + output: "self_bool" + name: "n0" + op_type: "Cast" + attribute { + name: "to" + i: 9 + type: INT + } + domain: "" + } + node { + input: "self_bool" + input: "self_3" + output: "self_0_1" + name: "n1" + op_type: "CastLike" + domain: "" + } + node { + input: "self_0_1" + output: "result_9" + name: "n2" + op_type: "ReduceSum" + attribute { + name: "keepdims" + i: 0 + type: INT + } + domain: "" + } + name: "thenGraph_13" + output { + name: "result_9" + type { + } + } + } + type: GRAPH + } + attribute { + name: "else_branch" + g { + node { + output: "const_10" + name: "n0" + op_type: "Constant" + attribute { + name: "value" + t { + data_type: 1 + float_data: 1.0 + name: "const_10" + } + type: TENSOR + } + domain: "" + } + node { + input: "const_10" + input: "ord_4" + output: "const_10_cast" + name: "n1" + op_type: "CastLike" + domain: "" + } + node { + input: "ord_4" + input: "const_10_cast" + output: "cond_11" + name: "n2" + op_type: "Equal" + domain: "" + } + node { + input: "cond_11" + output: "result_21" + name: "n3" + op_type: "If" + attribute { + name: "then_branch" + g { + node { + input: "self_3" + output: "result_12" + name: "n0" + op_type: "ReduceL1" + attribute { + name: "keepdims" + type: INT + ref_attr_name: "keepdim" + } + domain: "" + } + name: "thenGraph_18" + output { + name: "result_12" + type { + } + } + } + type: GRAPH + } + attribute { + name: "else_branch" + g { + node { + output: "const_13" + name: "n0" + op_type: "Constant" + attribute { + name: "value" + t { + data_type: 1 + float_data: 2.0 + name: "const_13" + } + type: TENSOR + } + domain: "" + } + node { + input: "const_13" + input: "ord_4" + output: "const_13_cast" + name: "n1" + op_type: "CastLike" + domain: "" + } + node { + input: "ord_4" + input: "const_13_cast" + output: "cond_14" + name: "n2" + op_type: "Equal" + domain: "" + } + node { + input: "cond_14" + output: "result_20" + name: "n3" + op_type: "If" + attribute { + name: "then_branch" + g { + node { + input: "self_3" + output: "result_15" + name: "n0" + op_type: "ReduceL2" + attribute { + name: "keepdims" + type: INT + ref_attr_name: "keepdim" + } + domain: "" + } + name: "thenGraph_20" + output { + name: "result_15" + type { + } + } + } + type: GRAPH + } + attribute { + name: "else_branch" + g { + node { + input: "ord_4" + input: "self_3" + output: "ord_float" + name: "n0" + op_type: "CastLike" + domain: "" + } + node { + input: "self_3" + input: "ord_float" + output: "self_pow" + name: "n1" + op_type: "Pow" + domain: "" + } + node { + input: "self_pow" + output: "tmp_16" + name: "n2" + op_type: "ReduceSum" + attribute { + name: "keepdims" + type: INT + ref_attr_name: "keepdim" + } + domain: "" + } + node { + output: "const_17" + name: "n3" + op_type: "Constant" + attribute { + name: "value" + t { + data_type: 1 + float_data: 1.0 + name: "const_17" + } + type: TENSOR + } + domain: "" + } + node { + input: "const_17" + input: "ord_float" + output: "const_17_cast" + name: "n4" + op_type: "CastLike" + domain: "" + } + node { + input: "const_17_cast" + input: "ord_float" + output: "tmp_18" + name: "n5" + op_type: "Div" + domain: "" + } + node { + input: "tmp_16" + input: "tmp_18" + output: "result_19" + name: "n6" + op_type: "Pow" + domain: "" + } + name: "elseGraph_20" + output { + name: "result_19" + type { + } + } + } + type: GRAPH + } + domain: "" + } + name: "elseGraph_18" + output { + name: "result_20" + type { + } + } + } + type: GRAPH + } + domain: "" + } + name: "elseGraph_13" + output { + name: "result_21" + type { + } + } + } + type: GRAPH + } + domain: "" + } + name: "elseGraph_11" + output { + name: "result_22" + type { + } + } + } + type: GRAPH + } + domain: "" + } + name: "elseGraph_9" + output { + name: "result_23" + type { + } + } + } + type: GRAPH + } + domain: "" + } + node { + output: "int64_0_25" + name: "n11" + op_type: "Constant" + attribute { + name: "value" + t { + data_type: 7 + int64_data: 0 + name: "int64_0_25" + } + type: TENSOR + } + domain: "" + } + node { + input: "int64_0_25" + input: "self_rank" + output: "int64_0_25_cast" + name: "n12" + op_type: "CastLike" + domain: "" + } + node { + input: "self_rank" + input: "int64_0_25_cast" + output: "cond_26" + name: "n13" + op_type: "Equal" + domain: "" + } + node { + input: "cond_26" + output: "result_29" + name: "n14" + op_type: "If" + attribute { + name: "then_branch" + g { + node { + input: "result_24" + output: "result_27" + name: "n0" + op_type: "Squeeze" + domain: "" + } + name: "thenGraph_27" + output { + name: "result_27" + type { + } + } + } + type: GRAPH + } + attribute { + name: "else_branch" + g { + node { + input: "result_24" + output: "result_28" + name: "n0" + op_type: "Identity" + domain: "" + } + name: "elseGraph_27" + output { + name: "result_28" + type { + } + } + } + type: GRAPH + } + domain: "" + } + opset_import { + domain: "" + version: 18 + } + domain: "pkg.onnxscript.torch_lib" +} +functions { + name: "Rank" + input: "input" + output: "return_val" + node { + input: "input" + output: "tmp" + name: "n0" + op_type: "Shape" + domain: "" + } + node { + input: "tmp" + output: "return_val" + name: "n1" + op_type: "Size" + domain: "" + } + doc_string: "Take the rank of the input tensor." + opset_import { + domain: "" + version: 18 + } + domain: "pkg.onnxscript.torch_lib.common" +} +functions { + name: "IsScalar" + input: "input" + output: "return_val" + node { + input: "input" + output: "tmp" + name: "n0" + op_type: "Shape" + domain: "" + } + node { + input: "tmp" + output: "tmp_0" + name: "n1" + op_type: "Size" + domain: "" + } + node { + output: "tmp_1" + name: "n2" + op_type: "Constant" + attribute { + name: "value_int" + i: 0 + type: INT + } + domain: "" + } + node { + input: "tmp_0" + input: "tmp_1" + output: "return_val" + name: "n3" + op_type: "Equal" + domain: "" + } + doc_string: "Return whether the input has rank 0, or is a scalar." + opset_import { + domain: "" + version: 18 + } + domain: "pkg.onnxscript.torch_lib.common" +} + +""" + +ort_inputs = {"input_0": array(0.8965, dtype=float16)} + +# Set up the inference session +session_options = ort.SessionOptions() +session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL +onnx_model = onnx.ModelProto() +google.protobuf.text_format.Parse(onnx_model_text, onnx_model) + +# Uncomment this line to save the model to a file for examination +# onnx.save_model(onnx_model, "test_output_match_opinfo__linalg_vector_norm_cpu_float16.onnx") + +onnx.checker.check_model(onnx_model) +session = ort.InferenceSession(onnx_model.SerializeToString(), session_options, providers=("CPUExecutionProvider",)) + +# Run the model +for _ in range(N): + ort_outputs = session.run(None, ort_inputs) From 6f9f653ada3966e11f454440c066317930d95a5f Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 15 Nov 2023 17:51:04 -0800 Subject: [PATCH 10/38] [wasm] increase test max memory from 2G to 4G (#18459) ### Description increase max memory from 2G to 4G for onnxruntime_test_all in WebAssembly build. --- cmake/onnxruntime_unittests.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/onnxruntime_unittests.cmake b/cmake/onnxruntime_unittests.cmake index bdb0230a8ebd0..a52e941b235b4 100644 --- a/cmake/onnxruntime_unittests.cmake +++ b/cmake/onnxruntime_unittests.cmake @@ -906,7 +906,7 @@ if (onnxruntime_ENABLE_TRAINING_TORCH_INTEROP) endif() if (CMAKE_SYSTEM_NAME STREQUAL "Emscripten") set_target_properties(onnxruntime_test_all PROPERTIES LINK_DEPENDS ${TEST_SRC_DIR}/wasm/onnxruntime_test_all_adapter.js) - set_target_properties(onnxruntime_test_all PROPERTIES LINK_FLAGS "-s STACK_SIZE=5242880 -s ALLOW_MEMORY_GROWTH=1 --pre-js \"${TEST_SRC_DIR}/wasm/onnxruntime_test_all_adapter.js\" -s \"EXPORTED_RUNTIME_METHODS=['FS']\" --preload-file ${CMAKE_CURRENT_BINARY_DIR}/testdata@/testdata -s EXIT_RUNTIME=1 -s DEMANGLE_SUPPORT=1") + set_target_properties(onnxruntime_test_all PROPERTIES LINK_FLAGS "-s STACK_SIZE=5242880 -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4294967296 --pre-js \"${TEST_SRC_DIR}/wasm/onnxruntime_test_all_adapter.js\" -s \"EXPORTED_RUNTIME_METHODS=['FS']\" --preload-file ${CMAKE_CURRENT_BINARY_DIR}/testdata@/testdata -s EXIT_RUNTIME=1 -s DEMANGLE_SUPPORT=1") if (onnxruntime_ENABLE_WEBASSEMBLY_THREADS) set_property(TARGET onnxruntime_test_all APPEND_STRING PROPERTY LINK_FLAGS " -s DEFAULT_PTHREAD_STACK_SIZE=131072 -s PROXY_TO_PTHREAD=1") endif() From 18a3675bf73f86e05a200428c06c053357bbc51b Mon Sep 17 00:00:00 2001 From: Chi Lo <54722500+chilo-ms@users.noreply.github.com> Date: Thu, 16 Nov 2023 07:39:41 +0000 Subject: [PATCH 11/38] [TensorRT EP] Only instantiate TRT builder once (#18100) The TRT builder instantization is slow (see [here](https://github.com/microsoft/onnxruntime/issues/18071)). In current TRT EP, we instantiate builder object every time we need it. There are multiple places need the TRT builder so this causes huge performance overhead. --- .../tensorrt/tensorrt_execution_provider.cc | 25 ++++++++++++++----- .../tensorrt/tensorrt_execution_provider.h | 10 +++++++- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc index 020af451cdcd5..3b3732bb716f9 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc @@ -1272,6 +1272,20 @@ Status TensorrtExecutionProvider::OnRunEnd(bool sync_stream) { return Status::OK(); } +// Get the pointer to the IBuilder instance. +// Note: This function is not thread safe. Calls to this function from different threads must be serialized +// even though it doesn't make sense to have multiple threads initializing the same inference session. +nvinfer1::IBuilder* TensorrtExecutionProvider::GetBuilder() const { + if (!builder_) { + TensorrtLogger& trt_logger = GetTensorrtLogger(); + { + auto lock = GetApiLock(); + builder_ = std::unique_ptr(nvinfer1::createInferBuilder(trt_logger)); + } + } + return builder_.get(); +} + void TensorrtExecutionProvider::GetCustomOpDomainList(std::vector& custom_op_domain_list) const { if (info_.custom_op_domain_list.empty()) { common::Status status = CreateTensorRTCustomOpDomainList(info_); @@ -1633,7 +1647,7 @@ SubGraphCollection_t TensorrtExecutionProvider::GetSupportedList(SubGraphCollect // Get supported node list recursively SubGraphCollection_t parser_nodes_list; TensorrtLogger& trt_logger = GetTensorrtLogger(); - auto trt_builder = std::unique_ptr(nvinfer1::createInferBuilder(trt_logger)); + auto trt_builder = GetBuilder(); const auto explicitBatch = 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); auto trt_network = std::unique_ptr(trt_builder->createNetworkV2(explicitBatch)); @@ -1985,7 +1999,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector(nvinfer1::createInferBuilder(trt_logger)); + auto trt_builder = GetBuilder(); const auto explicitBatch = 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); auto trt_network = std::unique_ptr(trt_builder->createNetworkV2(explicitBatch)); auto trt_config = std::unique_ptr(trt_builder->createBuilderConfig()); @@ -2438,7 +2452,6 @@ common::Status TensorrtExecutionProvider::Compile(const std::vectorallocate_func, context->release_func, context->allocator_handle, context->node_name, - &parsers_[context->node_name], &engines_[context->node_name], &contexts_[context->node_name], &builders_[context->node_name], + *p = {context->allocate_func, context->release_func, context->allocator_handle, context->node_name, builder_.get(), + &parsers_[context->node_name], &engines_[context->node_name], &contexts_[context->node_name], &networks_[context->node_name], input_info_[context->node_name], output_info_[context->node_name], input_shape_ranges_[context->node_name], sync_stream_after_enqueue_, &tensorrt_mu_, fp16_enable_, int8_enable_, int8_calibration_cache_available_, dla_enable_, dla_core_, &max_workspace_size_, trt_node_name_with_precision, engine_cache_enable_, cache_path_, @@ -2490,7 +2503,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vectorsync_stream_after_enqueue; auto fused_node_name = trt_state->fused_node_name; auto& shape_ranges = trt_state->input_shape_ranges; - auto trt_builder = trt_state->builder->get(); + auto trt_builder = trt_state->builder; auto trt_engine = trt_state->engine->get(); auto trt_context = trt_state->context->get(); auto trt_profiles = trt_state->profiles; diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h index cda08715ea009..a945d219088aa 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h @@ -105,10 +105,10 @@ struct TensorrtFuncState { DestroyFunc test_release_func = nullptr; AllocatorHandle allocator = nullptr; std::string fused_node_name; + nvinfer1::IBuilder* builder; tensorrt_ptr::unique_pointer* parser = nullptr; std::unique_ptr* engine = nullptr; std::unique_ptr* context = nullptr; - std::unique_ptr* builder = nullptr; std::unique_ptr* network = nullptr; std::vector> input_info; std::vector> output_info; @@ -245,6 +245,8 @@ class TensorrtExecutionProvider : public IExecutionProvider { std::unordered_set control_flow_op_set_ = {"If", "Loop", "Scan"}; mutable std::unordered_map> subgraph_context_map_; + mutable std::unique_ptr builder_; + // Following maps that hold TRT objects will be accessible by different threads if ORT is using multithreading. // In general, TensorRT objects are not thread safe; accesses to an object from different threads must be serialized by the client. // But there are still some thread safe operations, please see here https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#threading @@ -456,5 +458,11 @@ class TensorrtExecutionProvider : public IExecutionProvider { void CaptureBegin(); void CaptureEnd(); void IncrementRegularRunCountBeforeGraphCapture(); + + /** + * Get the pointer to the IBuilder instance. + * This function only creates the instance at the first time it's being called." + */ + nvinfer1::IBuilder* GetBuilder() const; }; } // namespace onnxruntime From 751aa8d31ad10db97762d1a4a60237aae2705a47 Mon Sep 17 00:00:00 2001 From: guyang3532 <62738430+guyang3532@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:29:00 +0800 Subject: [PATCH 12/38] fix axis of layernorm for UpstreamReshape (#18425) Similar to https://github.com/microsoft/onnxruntime/pull/17255 update axis for Layernormalization when Reshape upstream it. --- .../compute_optimizer/upstream_gather.cc | 4 +- .../upstream_reshape_actors.cc | 18 ++ .../upstream_reshape_actors.h | 34 ++- .../test/optimizer/compute_optimizer_test.cc | 247 +++++++----------- .../python/orttraining_test_ortmodule_api.py | 21 +- 5 files changed, 155 insertions(+), 169 deletions(-) diff --git a/onnxruntime/core/optimizer/compute_optimizer/upstream_gather.cc b/onnxruntime/core/optimizer/compute_optimizer/upstream_gather.cc index 094ea1e24dd92..9c98ed6d3e114 100644 --- a/onnxruntime/core/optimizer/compute_optimizer/upstream_gather.cc +++ b/onnxruntime/core/optimizer/compute_optimizer/upstream_gather.cc @@ -338,8 +338,8 @@ std::optional IsSupportedGather(Graph& graph, Node& node, auto axis = static_cast(node.GetAttributes().at("axis").i()); axis = axis < 0 ? axis + data_rank : axis; size_t dim_size = static_cast(indices_shape->dim_size()); - bool is_single_value_1d_tensor = dim_size != 0 && (dim_size == 1 && utils::HasDimValue(indices_shape->dim(0)) && - indices_shape->dim(0).dim_value() == 1); + bool is_single_value_1d_tensor = dim_size == 1 && utils::HasDimValue(indices_shape->dim(0)) && + indices_shape->dim(0).dim_value() == 1; if (dim_size != 0 && !is_single_value_1d_tensor) { if (dim_size == 1 && utils::HasDimValue(data_shape->dim(axis)) && data_shape->dim(axis).dim_value() > indices_shape->dim(0).dim_value()) { diff --git a/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.cc b/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.cc index 716b027068ba1..23f7c45fba4ba 100644 --- a/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.cc +++ b/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.cc @@ -3,6 +3,7 @@ #ifdef ENABLE_TRAINING +#include #include "core/optimizer/utils.h" #include "core/optimizer/compute_optimizer/upstream_reshape_actors.h" @@ -282,6 +283,23 @@ bool LayerNormalizationReshapeActor::PreCheck( return propagate_input_indices.size() > 0; } +bool LayerNormalizationReshapeActor::PostProcess( + Graph& /* graph */, Node& current_node, const ReshapeInfo& /* info_without_node */, + const logging::Logger& /* logger */, + std::vector& /* propagate_input_indices */, + const std::unordered_map>& /* all_input_cmp_rets */, + const std::unordered_map& /* new_reshape_infos */) { + auto axis = static_cast(current_node.GetAttributes().at("axis").i()); + // When Reshape(from 3D to 2D, with the first two dimensions be merged) upstream a LayerNormalization, + // The axis attribute of LayerNormalization should be decreased by 1 if it is greater than 1. + if (axis > 1) { + auto new_axis = axis - 1; + auto& attributes = current_node.GetMutableAttributes(); + attributes["axis"] = ONNX_NAMESPACE::MakeAttribute("axis", static_cast(new_axis)); + } + return true; +} + template class SimplePointwiseReshapeActor; template class SimplePointwiseReshapeActor; diff --git a/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.h b/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.h index 05bcbabe9ba4c..de50a56fd8781 100644 --- a/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.h +++ b/onnxruntime/core/optimizer/compute_optimizer/upstream_reshape_actors.h @@ -111,13 +111,11 @@ class UpStreamReshapeOperatorActorBase : public UpStreamOperatorActorBase { * So far, we don't have requirements to override PostProcess function. */ - bool PostProcess(Graph& /* graph */, Node& /* current_node */, const ReshapeInfo& /* info_without_node */, - const logging::Logger& /* logger */, - std::vector& /* propagate_input_indices */, - const std::unordered_map>& /* all_input_cmp_rets */, - const std::unordered_map& /* new_reshape_infos */) { - return true; - } + virtual bool PostProcess(Graph& /* graph */, Node& /* current_node */, const ReshapeInfo& /* info_without_node */, + const logging::Logger& /* logger */, + std::vector& /* propagate_input_indices */, + const std::unordered_map>& /* all_input_cmp_rets */, + const std::unordered_map& /* new_reshape_infos */) = 0; }; // The inputs are broad-cast-able. The outputs should have the same shape (fully broadcasted shape) @@ -133,6 +131,14 @@ class SimplePointwiseReshapeActor : public UpStreamReshapeOperatorActorBase { std::vector& propagate_input_indices, std::unordered_map>& all_input_cmp_rets, std::function& shape_update_func) override; + + bool PostProcess(Graph& /* graph */, Node& /* current_node */, const ReshapeInfo& /* info_without_node */, + const logging::Logger& /* logger */, + std::vector& /* propagate_input_indices */, + const std::unordered_map>& /* all_input_cmp_rets */, + const std::unordered_map& /* new_reshape_infos */) override { + return true; + } }; class MatMulReshapeActor : public UpStreamReshapeOperatorActorBase { @@ -145,6 +151,14 @@ class MatMulReshapeActor : public UpStreamReshapeOperatorActorBase { std::vector& propagate_input_indices, std::unordered_map>& all_input_cmp_rets, std::function& shape_update_func) override; + + bool PostProcess(Graph& /* graph */, Node& /* current_node */, const ReshapeInfo& /* info_without_node */, + const logging::Logger& /* logger */, + std::vector& /* propagate_input_indices */, + const std::unordered_map>& /* all_input_cmp_rets */, + const std::unordered_map& /* new_reshape_infos */) override { + return true; + } }; class LayerNormalizationReshapeActor : public UpStreamReshapeOperatorActorBase { @@ -157,6 +171,12 @@ class LayerNormalizationReshapeActor : public UpStreamReshapeOperatorActorBase { std::vector& propagate_input_indices, std::unordered_map>& all_input_cmp_rets, std::function& shape_update_func) override; + + bool PostProcess(Graph& /* graph */, Node& current_node, const ReshapeInfo& /* info_without_node */, + const logging::Logger& /* logger */, + std::vector& /* propagate_input_indices */, + const std::unordered_map>& /* all_input_cmp_rets */, + const std::unordered_map& /* new_reshape_infos */) override; }; /** diff --git a/onnxruntime/test/optimizer/compute_optimizer_test.cc b/onnxruntime/test/optimizer/compute_optimizer_test.cc index a03d0da2538d4..9dcedd1fd7681 100644 --- a/onnxruntime/test/optimizer/compute_optimizer_test.cc +++ b/onnxruntime/test/optimizer/compute_optimizer_test.cc @@ -847,7 +847,7 @@ Test graph includes multiple equivalent subgraphs as below. Add an Identity node because currently, we don't allow Gather generates graph output. */ TEST(ComputeOptimizerTests, GatherLayerNormalization) { - std::vector> test_config_pairs{ + std::vector> test_config_pairs{ // { // is_scalar_slice, // ln_axis_before_propagation, @@ -929,13 +929,6 @@ TEST(ComputeOptimizerTests, GatherLayerNormalization) { const ONNX_NAMESPACE::TensorShapeProto* slice_out_shape = producer_node->OutputDefs()[0]->Shape(); TEST_RETURN_IF_NOT(slice_out_shape != nullptr); - auto& attrs = node.GetAttributes(); - TEST_RETURN_IF_NOT(attrs.find("axis") != attrs.end()); - - auto& axis_attr = attrs.at("axis"); - auto axis_value = (int)axis_attr.i(); - TEST_RETURN_IF_NOT(axis_value == ln_axis_after); - if (is_scalar_slice) { TEST_RETURN_IF_NOT(slice_out_shape->dim_size() == 2); TEST_RETURN_IF_NOT(utils::HasDimValue(slice_out_shape->dim(0)) && @@ -951,10 +944,15 @@ TEST(ComputeOptimizerTests, GatherLayerNormalization) { TEST_RETURN_IF_NOT(utils::HasDimValue(slice_out_shape->dim(2)) && slice_out_shape->dim(2).dim_value() == 256); } - } else { TEST_RETURN_IF_NOT(producer_node == nullptr); } + auto& attrs = node.GetAttributes(); + TEST_RETURN_IF_NOT(attrs.find("axis") != attrs.end()); + + auto& axis_attr = attrs.at("axis"); + auto axis_value = (int)axis_attr.i(); + TEST_RETURN_IF_NOT(axis_value == ln_axis_after); } } @@ -2841,165 +2839,110 @@ Test graph include multiple equivalent subgraphs as below. Add an Identity node because currently we don't allow Reshape generate graph output. */ -TEST(ComputeOptimizerTests, ReshapeLayerNormalization_PropagationOnOneBranch) { - const logging::Logger* logger = &logging::LoggingManager::DefaultLogger(); - auto pre_graph_checker = [](Graph& graph) -> Status { - auto op_count_pre = CountOpsInGraph(graph); - TEST_RETURN_IF_NOT(op_count_pre.size() == 3U); - TEST_RETURN_IF_NOT(op_count_pre["LayerNormalization"] == 1); - TEST_RETURN_IF_NOT(op_count_pre["Reshape"] == 1); - TEST_RETURN_IF_NOT(op_count_pre["Identity"] == 1); - return Status::OK(); - }; - - auto post_graph_checker = [](Graph& graph) { - auto op_count_post = CountOpsInGraph(graph); - TEST_RETURN_IF_NOT(op_count_post.size() == 3U); - TEST_RETURN_IF_NOT(op_count_post["LayerNormalization"] == 1); - TEST_RETURN_IF_NOT(op_count_post["Reshape"] == 1); - TEST_RETURN_IF_NOT(op_count_post["Identity"] == 1); - - for (Node& node : graph.Nodes()) { - if (node.OpType() == "LayerNormalization") { - const auto& input_defs = node.InputDefs(); - - { - auto producer_node = graph.GetProducerNode(input_defs[0]->Name()); - TEST_RETURN_IF_NOT(producer_node != nullptr); - TEST_RETURN_IF_NOT(producer_node->OpType() == "Reshape"); - - InlinedVector values; - constexpr bool require_constant = true; - NodeArg* initializer_node_arg = graph.GetNodeArg(producer_node->InputDefs()[1]->Name()); - TEST_RETURN_IF_NOT(optimizer_utils::AppendTensorFromInitializer(graph, *initializer_node_arg, values, require_constant)); - TEST_RETURN_IF_NOT(values.size() == 2); - TEST_RETURN_IF_NOT(values[0] == -1); - TEST_RETURN_IF_NOT(values[1] == 1024); - } - - { - auto producer_node = graph.GetProducerNode(input_defs[1]->Name()); - TEST_RETURN_IF_NOT(producer_node == nullptr); - } - - { - auto producer_node = graph.GetProducerNode(input_defs[2]->Name()); - TEST_RETURN_IF_NOT(producer_node == nullptr); - } - } - } - return Status::OK(); +TEST(ComputeOptimizerTests, ReshapeLayerNormalization) { + std::vector> test_config_pairs{ + // { + // ln_axis_before_propagation, + // expected_ln_axis_after_propagation, + // expected to propagate + // } + {0, 0, false}, + {1, 1, false}, + {2, 1, true}, + {-3, -3, false}, + {-2, -2, false}, + {-1, -1, true}, }; - std::vector fist_dim_values = {-1, 128}; - for (auto first_dim_value : fist_dim_values) { - auto build_test_case = [&first_dim_value](ModelTestBuilder& builder) { - auto* input1_arg = builder.MakeInput({{4, 32, 1024}}); - auto* input2_arg = builder.MakeInput({{1024}}); - auto* input3_arg = builder.MakeInput({{1024}}); - auto* ln_out = builder.MakeIntermediate(); - builder.AddNode("LayerNormalization", {input1_arg, input2_arg, input3_arg}, {ln_out}) - .AddAttribute("axis", static_cast(-1)); - - auto* shape_initializer = builder.MakeInitializer({2}, {first_dim_value, 1024}); - auto* reshape_out = builder.MakeIntermediate(); - builder.AddNode("Reshape", {ln_out, shape_initializer}, {reshape_out}); + for (auto p : test_config_pairs) { + int64_t ln_axis_before = std::get<0>(p); + int64_t ln_axis_after = std::get<1>(p); + bool expected_to_propagate = std::get<2>(p); - auto* identity_out = builder.MakeOutput(); - builder.AddNode("Identity", {reshape_out}, {identity_out}); + const logging::Logger* logger = &logging::LoggingManager::DefaultLogger(); + auto pre_graph_checker = [](Graph& graph) -> Status { + auto op_count_pre = CountOpsInGraph(graph); + TEST_RETURN_IF_NOT(op_count_pre.size() == 3U); + TEST_RETURN_IF_NOT(op_count_pre["LayerNormalization"] == 1); + TEST_RETURN_IF_NOT(op_count_pre["Reshape"] == 1); + TEST_RETURN_IF_NOT(op_count_pre["Identity"] == 1); + return Status::OK(); }; - const std::vector opsets{12, 13, 14}; - for (auto& opset_version : opsets) { - std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset_version, *logger, std::move(transformer), - TransformerLevel::Level1, - 1, pre_graph_checker, post_graph_checker)); - } - } -} + auto post_graph_checker = [ln_axis_after, expected_to_propagate](Graph& graph) { + auto op_count_post = CountOpsInGraph(graph); + TEST_RETURN_IF_NOT(op_count_post.size() == 3U); + TEST_RETURN_IF_NOT(op_count_post["LayerNormalization"] == 1); + TEST_RETURN_IF_NOT(op_count_post["Reshape"] == 1); + TEST_RETURN_IF_NOT(op_count_post["Identity"] == 1); -/* -Test graph include multiple equivalent subgraphs as below. - graph input [4, 32, 1024] (float) graph input [1024] (float) graph input [1024] (float) - | | / - \_____________ _______/ __________________________/ - \ / / - LayerNormalization - | - Reshape - | - Identity - | - graph out [128, 1024] (float) + for (Node& node : graph.Nodes()) { + if (node.OpType() == "LayerNormalization") { + const auto& input_defs = node.InputDefs(); -Add an Identity node because currently we don't allow Reshape generate graph output. -*/ -TEST(ComputeOptimizerTests, ReshapeLayerNormalization_NoPropagation) { - const logging::Logger* logger = &logging::LoggingManager::DefaultLogger(); - auto pre_graph_checker = [](Graph& graph) -> Status { - auto op_count_pre = CountOpsInGraph(graph); - TEST_RETURN_IF_NOT(op_count_pre.size() == 3U); - TEST_RETURN_IF_NOT(op_count_pre["LayerNormalization"] == 1); - TEST_RETURN_IF_NOT(op_count_pre["Reshape"] == 1); - TEST_RETURN_IF_NOT(op_count_pre["Identity"] == 1); - return Status::OK(); - }; + if (expected_to_propagate) { + auto producer_node = graph.GetProducerNode(input_defs[0]->Name()); + TEST_RETURN_IF_NOT(producer_node != nullptr); + TEST_RETURN_IF_NOT(producer_node->OpType() == "Reshape"); - auto post_graph_checker = [](Graph& graph) { - auto op_count_post = CountOpsInGraph(graph); - TEST_RETURN_IF_NOT(op_count_post.size() == 3U); - TEST_RETURN_IF_NOT(op_count_post["LayerNormalization"] == 1); - TEST_RETURN_IF_NOT(op_count_post["Reshape"] == 1); - TEST_RETURN_IF_NOT(op_count_post["Identity"] == 1); + InlinedVector values; + constexpr bool require_constant = true; + NodeArg* initializer_node_arg = graph.GetNodeArg(producer_node->InputDefs()[1]->Name()); + TEST_RETURN_IF_NOT(optimizer_utils::AppendTensorFromInitializer(graph, *initializer_node_arg, values, require_constant)); + TEST_RETURN_IF_NOT(values.size() == 2); + TEST_RETURN_IF_NOT(values[0] == -1); + TEST_RETURN_IF_NOT(values[1] == 1024); + } else { + auto producer_node = graph.GetProducerNode(input_defs[0]->Name()); + TEST_RETURN_IF_NOT(producer_node == nullptr); + } - for (Node& node : graph.Nodes()) { - if (node.OpType() == "LayerNormalization") { - const auto& input_defs = node.InputDefs(); + { + auto producer_node = graph.GetProducerNode(input_defs[1]->Name()); + TEST_RETURN_IF_NOT(producer_node == nullptr); + } - { - auto producer_node = graph.GetProducerNode(input_defs[0]->Name()); - TEST_RETURN_IF_NOT(producer_node == nullptr); - } + { + auto producer_node = graph.GetProducerNode(input_defs[2]->Name()); + TEST_RETURN_IF_NOT(producer_node == nullptr); + } - { - auto producer_node = graph.GetProducerNode(input_defs[1]->Name()); - TEST_RETURN_IF_NOT(producer_node == nullptr); - } + auto& attrs = node.GetAttributes(); + TEST_RETURN_IF_NOT(attrs.find("axis") != attrs.end()); - { - auto producer_node = graph.GetProducerNode(input_defs[2]->Name()); - TEST_RETURN_IF_NOT(producer_node == nullptr); + auto& axis_attr = attrs.at("axis"); + auto axis_value = (int)axis_attr.i(); + TEST_RETURN_IF_NOT(axis_value == ln_axis_after); } } - } - return Status::OK(); - }; - - std::vector fist_dim_values = {-1, 128}; - for (auto first_dim_value : fist_dim_values) { - auto build_test_case = [&first_dim_value](ModelTestBuilder& builder) { - auto* input1_arg = builder.MakeInput({{4, 32, 1024}}); - auto* input2_arg = builder.MakeInput({{1024}}); - auto* input3_arg = builder.MakeInput({{1024}}); - auto* ln_out = builder.MakeIntermediate(); - builder.AddNode("LayerNormalization", {input1_arg, input2_arg, input3_arg}, {ln_out}) - .AddAttribute("axis", static_cast(1)); - - auto* shape_initializer = builder.MakeInitializer({2}, {first_dim_value, 1024}); - auto* reshape_out = builder.MakeIntermediate(); - builder.AddNode("Reshape", {ln_out, shape_initializer}, {reshape_out}); - - auto* identity_out = builder.MakeOutput(); - builder.AddNode("Identity", {reshape_out}, {identity_out}); + return Status::OK(); }; - const std::vector opsets{12, 13, 14}; - for (auto& opset_version : opsets) { - std::unique_ptr transformer = std::make_unique(); - ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset_version, *logger, std::move(transformer), - TransformerLevel::Level1, - 1, pre_graph_checker, post_graph_checker)); + std::vector fist_dim_values = {-1, 128}; + for (auto first_dim_value : fist_dim_values) { + auto build_test_case = [ln_axis_before, &first_dim_value](ModelTestBuilder& builder) { + auto* input1_arg = builder.MakeInput({{4, 32, 1024}}); + auto* input2_arg = builder.MakeInput({{1024}}); + auto* input3_arg = builder.MakeInput({{1024}}); + auto* ln_out = builder.MakeIntermediate(); + builder.AddNode("LayerNormalization", {input1_arg, input2_arg, input3_arg}, {ln_out}) + .AddAttribute("axis", ln_axis_before); + + auto* shape_initializer = builder.MakeInitializer({2}, {first_dim_value, 1024}); + auto* reshape_out = builder.MakeIntermediate(); + builder.AddNode("Reshape", {ln_out, shape_initializer}, {reshape_out}); + + auto* identity_out = builder.MakeOutput(); + builder.AddNode("Identity", {reshape_out}, {identity_out}); + }; + + const std::vector opsets{12, 13, 14}; + for (auto& opset_version : opsets) { + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset_version, *logger, std::move(transformer), + TransformerLevel::Level1, + 1, pre_graph_checker, post_graph_checker)); + } } } } diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py index 13024b81f4b3c..ad0e5d8beba3d 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py @@ -5761,6 +5761,7 @@ def run_step(model, input, positions): ("MatMul", 1), ("Dropout", 0), ("LayerNormalization", 0), + ("LayerNormalization", 1), ("Cast", 0), ("BiasGelu", 0), ("Gelu", 0), @@ -5773,12 +5774,18 @@ def test_ops_for_padding_elimination(test_cases): test_op = test_cases[0] case = test_cases[1] + vocab_size, hidden_size = 50265, 768 + batch_size, max_seq_length = 8, 128 + class ToyModel(torch.nn.Module): def __init__(self, vocab_size, hidden_size, pad_token_id): super().__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size, padding_idx=pad_token_id) if test_op == "LayerNormalization": - self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-05) + if case == 0: + self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-05) + else: + self.LayerNorm = nn.LayerNorm([max_seq_length, hidden_size], eps=1e-05) self.hidden_size = hidden_size # test test_elementwise op for padding elimination @@ -5889,8 +5896,6 @@ def generate_inputs(batch_size, max_seq_length, vocab_size): batched_inputs.append(torch.cat((input_id, padding))) return torch.stack(batched_inputs) - vocab_size, hidden_size = 50265, 768 - batch_size, max_seq_length = 8, 128 device = "cuda" model = ORTModule(ToyModel(vocab_size, hidden_size, 1).to(device)) x = generate_inputs(batch_size, max_seq_length, vocab_size) @@ -5908,7 +5913,7 @@ def generate_inputs(batch_size, max_seq_length, vocab_size): assert len([node.op_type for node in training_model.graph.node if node.op_type == "FlattenAndUnpad"]) == 3 else: assert len([node.op_type for node in training_model.graph.node if node.op_type == "FlattenAndUnpad"]) == 2 - gathergrad_node = next(node for node in training_model.graph.node if node.op_type == "PadAndUnflatten") + recover_pad_node = next(node for node in training_model.graph.node if node.op_type == "PadAndUnflatten") def find_input_node_type(model, arg): result = [] @@ -5917,14 +5922,14 @@ def find_input_node_type(model, arg): result.append(node) return result[0].op_type if len(result) == 1 else None - gathergrad_input_optypes = [find_input_node_type(training_model, arg) for arg in gathergrad_node.input] + recover_pad_input_optypes = [find_input_node_type(training_model, arg) for arg in recover_pad_node.input] if test_op == "Add" or test_op == "Mul" or test_op == "Sub": - assert test_op in gathergrad_input_optypes + assert test_op in recover_pad_input_optypes else: if case == 0: - assert test_op in gathergrad_input_optypes + assert test_op in recover_pad_input_optypes else: - assert "ATen" in gathergrad_input_optypes + assert "ATen" in recover_pad_input_optypes del os.environ["ORTMODULE_ENABLE_EMBEDDING_SPARSE_OPTIMIZER"] From 16d7f5519318fc309eccdc957c14eba0bfe5bb10 Mon Sep 17 00:00:00 2001 From: zhijiang <43435212+zhijxu-MS@users.noreply.github.com> Date: Thu, 16 Nov 2023 17:08:06 +0800 Subject: [PATCH 13/38] lora conv1d replacement (#16643) in LoRA code, it will use conv1d to do projection for qkv, while the conv1d calculation is mathematically equivalent to matmul, and matmul is much faster than conv1d. The subsitution of the graph optimizer is: 1 conv1d >> 2 split + 1 squeeze + group_num matmul + 1 concat with this optimizer, we see 10%+ in one 1P model --- .../core/optimizer/conv1d_replacement.cc | 164 ++++++++++++++++++ .../core/optimizer/conv1d_replacement.h | 18 ++ .../core/optimizer/graph_transformer_utils.cc | 2 + .../test/optimizer/graph_transform_test.cc | 98 +++++++++++ 4 files changed, 282 insertions(+) create mode 100644 orttraining/orttraining/core/optimizer/conv1d_replacement.cc create mode 100644 orttraining/orttraining/core/optimizer/conv1d_replacement.h diff --git a/orttraining/orttraining/core/optimizer/conv1d_replacement.cc b/orttraining/orttraining/core/optimizer/conv1d_replacement.cc new file mode 100644 index 0000000000000..0412000e04e1b --- /dev/null +++ b/orttraining/orttraining/core/optimizer/conv1d_replacement.cc @@ -0,0 +1,164 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include "core/optimizer/initializer.h" +#include "orttraining/core/optimizer/conv1d_replacement.h" +#include "core/graph/graph_utils.h" + +/* + In LoRA code, it will use conv1d to do projection for qkv, + while the conv1d calculation is mathematically equivalent to MatMul, and MatMul is much faster than conv1d in GPU. + The graph transformation is doing the following graph substitution: + 1. The input graph is: + conv_input conv_weight + \ / + \ / + conv1d + + 2. The output graph is as follows, + the number of MatMul is equal to attribute "group" of conv1d + conv_input conv1d.group conv_weight conv1d.group + \ / \ / + \ / Squeeze / + \ / \ / + Split Split + / / ... \ / / ... \ + / / ... \ / / ... \ + / / ... \ / / ... \ + input0 input1 ... inputN weight0 weight1 ... weightN + \ \ \ / / / + \ \ \ / / / + \ \ \ / / / + \ \ X / / + \ \ / \ / / + \ \ / X / + \ X / \ / + \ / \ / \ / + MatMul MatMul ... MatMul + \ | ... / + \ | / + \ | / +*/ +namespace onnxruntime { +bool NodeCanBeReplacedByMatmul(const Node& node) { + // If node type is Conv, and attr "dilations" is 1, "kernel_shape" is 1, "stride" is 1, group is 1 or 2, + // then it can be replaced by MatMul + // Kernel_shape is 1 means it is conv1d + if (!graph_utils::IsSupportedOptypeVersionAndDomain(node, "Conv", {1, 11})) { + return false; + } + const auto* dilations = graph_utils::GetNodeAttribute(node, "dilations"); + const auto* kernel_shape = graph_utils::GetNodeAttribute(node, "kernel_shape"); + const auto* stride = graph_utils::GetNodeAttribute(node, "strides"); + const auto* group = graph_utils::GetNodeAttribute(node, "group"); + if (dilations == nullptr || kernel_shape == nullptr || stride == nullptr || group == nullptr) { + return false; + } + if ((dilations->ints_size() && dilations->ints(0) != 1) || + (kernel_shape->ints_size() && kernel_shape->ints(0) != 1) || + (stride->ints_size() && stride->ints(0) != 1) || + group->i() >= 3) { + return false; + } + + return true; +} + +void Conv1dToMatmul(Graph& graph, Node& conv) { + // Shape of conv1d input: [batch_size, in_channels, in_length] + // Shape of conv1d weight:[output_channels, input_channels/group, kernel_shape], kernel_shape is 1 + // We need to split the input into "group", and squeeze&split the weight, and then do MatMul + const std::string node_description("Conv1dReplacement"); + auto execution_provider_type = conv.GetExecutionProviderType(); + // 1. Split conv input + auto group_attr = graph_utils::GetNodeAttribute(conv, "group"); + int64_t group_num = 1; // default group is 1 from ONNX schema + if (group_attr != nullptr) { + group_num = group_attr->i(); + } + auto conv1d_input = conv.MutableInputDefs()[0]; + std::vector conv1d_input_splitted_outputs; + for (int i = 0; i < group_num; i++) { + conv1d_input_splitted_outputs.push_back(&graph.GetOrCreateNodeArg( + graph.GenerateNodeArgName("input_split_output"), nullptr)); + } + auto& input_split = graph.AddNode(graph.GenerateNodeName("Split"), "Split", node_description, {conv1d_input}, + {conv1d_input_splitted_outputs}); + input_split.SetExecutionProviderType(execution_provider_type); + input_split.AddAttribute("axis", int64_t(1)); + auto onnx_opset_version = graph.DomainToVersionMap().at(kOnnxDomain); + if (onnx_opset_version >= 18) { + input_split.AddAttribute("num_outputs", group_num); + } + // 2. Squeeze conv weight + auto conv1d_weight = conv.MutableInputDefs()[1]; + auto weight_squeeze_output = &graph.GetOrCreateNodeArg(graph.GenerateNodeArgName("weight_squeeze_output"), nullptr); + auto& weight_squeeze = graph.AddNode(graph.GenerateNodeName("WeightSqueeze"), "Squeeze", + node_description, {conv1d_weight}, {weight_squeeze_output}); + if (onnx_opset_version > 12) { + // After onnx version 12, squeeze node has axes as input instead of attribute + ONNX_NAMESPACE::TensorProto initializer_proto; + initializer_proto.set_name(graph.GenerateNodeName("ConstAsInitializer")); + initializer_proto.add_dims(static_cast(1)); + initializer_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_INT64); + InlinedVector initializer_proto_value{2}; + initializer_proto.set_raw_data(initializer_proto_value.data(), initializer_proto_value.size() * sizeof(int64_t)); + auto& axes_input = graph_utils::AddInitializer(graph, initializer_proto); + // Squeeze node doesn't have opschema here, so we need to set input args count manually + weight_squeeze.MutableInputArgsCount().resize(2); + graph_utils::AddNodeInput(weight_squeeze, 1, axes_input); + } else { + weight_squeeze.AddAttribute("axes", std::vector{2}); + } + weight_squeeze.SetExecutionProviderType(execution_provider_type); + // 3. Split conv weight + std::vector conv1d_weight_splitted_outputs; + for (int i = 0; i < group_num; i++) { + conv1d_weight_splitted_outputs.push_back(&graph.GetOrCreateNodeArg( + graph.GenerateNodeArgName("weight_split_output"), nullptr)); + } + auto& weight_split = graph.AddNode(graph.GenerateNodeName("Split"), "Split", node_description, + {weight_squeeze_output}, {conv1d_weight_splitted_outputs}); + weight_split.AddAttribute("axis", int64_t(0)); + weight_split.SetExecutionProviderType(execution_provider_type); + if (onnx_opset_version >= 18) { + weight_split.AddAttribute("num_outputs", group_num); + } + // 4. Do MatMul + std::vector matmul_outputs; + for (int i = 0; i < group_num; i++) { + auto matmul_output = &graph.GetOrCreateNodeArg(graph.GenerateNodeArgName("matmul_output"), nullptr); + matmul_outputs.push_back(matmul_output); + auto& matmul = graph.AddNode(graph.GenerateNodeName("Matmul"), "MatMul", node_description, + {conv1d_weight_splitted_outputs[i], conv1d_input_splitted_outputs[i]}, + {matmul_output}); + matmul.SetExecutionProviderType(execution_provider_type); + } + // 5. Concat matmul outputs + auto& concat_node = graph.AddNode(graph.GenerateNodeName("Concat"), "Concat", node_description, + matmul_outputs, {}); + concat_node.SetExecutionProviderType(execution_provider_type); + concat_node.AddAttribute("axis", int64_t(1)); + // 6. Clean up - delted original "conv" node, its output is replaced by concat_node + graph_utils::FinalizeNodeFusion(graph, concat_node, conv); +} + +Status Conv1dReplacement::ApplyImpl(Graph& graph, bool& modified, int graph_level, const logging::Logger& logger) const { + GraphViewer graph_viewer(graph); + const auto& node_topology_list = graph_viewer.GetNodesInTopologicalOrder(); + for (auto node_index : node_topology_list) { + auto* node_ptr = graph.GetNode(node_index); + if (!node_ptr) + continue; // node was removed + auto& node = *node_ptr; + ORT_RETURN_IF_ERROR(Recurse(node, modified, graph_level, logger)); + if (NodeCanBeReplacedByMatmul(node)) { + LOGS(logger, VERBOSE) << "lora conv1d replacement, node name: " + node.Name(); + Conv1dToMatmul(graph, node); + modified = true; + } + } + return Status::OK(); +} +} // namespace onnxruntime diff --git a/orttraining/orttraining/core/optimizer/conv1d_replacement.h b/orttraining/orttraining/core/optimizer/conv1d_replacement.h new file mode 100644 index 0000000000000..740f13c76fd6f --- /dev/null +++ b/orttraining/orttraining/core/optimizer/conv1d_replacement.h @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include "core/optimizer/graph_transformer.h" + +namespace onnxruntime { + +class Conv1dReplacement : public GraphTransformer { + public: + Conv1dReplacement(const InlinedHashSet& compatible_execution_providers = {}) noexcept + : GraphTransformer("Conv1dReplacement", compatible_execution_providers) {} + + Status ApplyImpl(Graph& graph, bool& modified, int graph_level, const logging::Logger& logger) const override; +}; + +} // namespace onnxruntime diff --git a/orttraining/orttraining/core/optimizer/graph_transformer_utils.cc b/orttraining/orttraining/core/optimizer/graph_transformer_utils.cc index 57d76577f1ba7..6193a1d10c095 100644 --- a/orttraining/orttraining/core/optimizer/graph_transformer_utils.cc +++ b/orttraining/orttraining/core/optimizer/graph_transformer_utils.cc @@ -72,6 +72,7 @@ #ifdef ENABLE_TRAINING_TORCH_INTEROP #include "orttraining/core/optimizer/pythonop_rewriter.h" #endif +#include "orttraining/core/optimizer/conv1d_replacement.h" namespace onnxruntime { namespace training { @@ -194,6 +195,7 @@ std::vector> GeneratePreTrainingTransformers( // Once we have a CPU kernel for PadAndUnflatten, we can remove the guard. transformers.emplace_back(std::make_unique(compatible_eps, config.sparse_embedding_input_names)); + transformers.emplace_back(std::make_unique(compatible_eps)); #endif } diff --git a/orttraining/orttraining/test/optimizer/graph_transform_test.cc b/orttraining/orttraining/test/optimizer/graph_transform_test.cc index 20b9354d85745..b774fec11cc8d 100644 --- a/orttraining/orttraining/test/optimizer/graph_transform_test.cc +++ b/orttraining/orttraining/test/optimizer/graph_transform_test.cc @@ -35,6 +35,7 @@ #ifdef ENABLE_TRITON #include "orttraining/core/optimizer/triton_fusion.h" #endif +#include "orttraining/core/optimizer/conv1d_replacement.h" #include @@ -1199,6 +1200,103 @@ TEST_P(QDQFusionTestsParameterized, CheckModelComposition) { ASSERT_EQ(op_to_count_post_fusion["com.microsoft.FakeQuant"], 1); } +TEST_F(GraphTransformationTests, Conv1dReplacement) { + auto pre_graph_checker = [&](Graph& graph) { + auto op_count_map = CountOpsInGraph(graph); + TEST_RETURN_IF_NOT(op_count_map["Conv"] == 1); + return Status::OK(); + }; + + for (auto opset : {11, 12, 13, 14, 15, 16, 17, 18}) { + for (auto group : {1, 2}) { + auto build_test_case = [&](ModelTestBuilder& builder) { + auto [batch_size, in_channel, in_length] = std::make_tuple(8, 16, 128); + auto out_channel = 64; + auto* data_arg = builder.MakeInput({{batch_size, in_channel, in_length}}); + + auto* weight_arg = builder.MakeInitializer({out_channel, in_channel / group, 1}, {-1.0f, 1.0f}); + auto* conv_output = builder.MakeOutput(); + + auto& conv_node = builder.AddNode("Conv", {data_arg, weight_arg}, {conv_output}); + conv_node.AddAttribute("dilations", std::vector{1}); + conv_node.AddAttribute("kernel_shape", std::vector{1}); + conv_node.AddAttribute("strides", std::vector{1}); + conv_node.AddAttribute("group", static_cast(group)); + }; + + auto post_graph_checker = [&](Graph& graph) { + auto op_count_map = CountOpsInGraph(graph); + TEST_RETURN_IF_NOT(op_count_map["Conv"] == 0); + // after graph transformation, the graph should have 1 squeeze, 2 split, group matmul, 1 concat + TEST_RETURN_IF_NOT(op_count_map["Squeeze"] == 1); + TEST_RETURN_IF_NOT(op_count_map["Split"] == 2); + TEST_RETURN_IF_NOT(op_count_map["MatMul"] == group); + TEST_RETURN_IF_NOT(op_count_map["Concat"] == 1); + return Status::OK(); + }; + + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, + pre_graph_checker, post_graph_checker)); + } + } +} + +TEST_F(GraphTransformationTests, Conv1dReplacement_NoTakeEffect) { + auto pre_graph_checker = [&](Graph& graph) { + auto op_count_map = CountOpsInGraph(graph); + TEST_RETURN_IF_NOT(op_count_map["Conv"] == 1); + return Status::OK(); + }; + + // "group" is 3 so conv not replaced + for (auto opset : {11, 12, 13, 14, 15, 16, 17, 18}) { + auto build_test_case = [&](ModelTestBuilder& builder) { + auto [batch_size, in_channel, in_length] = std::make_tuple(8, 16, 128); + auto out_channel = 64; + auto* data_arg = builder.MakeInput({{batch_size, in_channel, in_length}}); + + auto* weight_arg = builder.MakeInitializer({out_channel, in_channel / 3, 1}, {-1.0f, 1.0f}); + auto* conv_output = builder.MakeOutput(); + + auto& conv_node = builder.AddNode("Conv", {data_arg, weight_arg}, {conv_output}); + conv_node.AddAttribute("dilations", std::vector{1}); + conv_node.AddAttribute("kernel_shape", std::vector{1}); + conv_node.AddAttribute("strides", std::vector{1}); + conv_node.AddAttribute("group", static_cast(3)); + }; + + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, + pre_graph_checker, pre_graph_checker)); + } + + // "kernel_shape" is not 1 so conv not replaced + for (auto opset : {11, 12, 13, 14, 15, 16, 17, 18}) { + auto build_test_case = [&](ModelTestBuilder& builder) { + auto [batch_size, in_channel, in_length] = std::make_tuple(8, 16, 128); + auto out_channel = 64; + auto* data_arg = builder.MakeInput({{batch_size, in_channel, in_length}}); + + auto* weight_arg = builder.MakeInitializer({out_channel, in_channel, 1}, {-1.0f, 1.0f}); + auto* conv_output = builder.MakeOutput(); + + auto& conv_node = builder.AddNode("Conv", {data_arg, weight_arg}, {conv_output}); + conv_node.AddAttribute("dilations", std::vector{1}); + conv_node.AddAttribute("kernel_shape", std::vector{2}); + conv_node.AddAttribute("strides", std::vector{1}); + conv_node.AddAttribute("group", static_cast(1)); + }; + + std::unique_ptr transformer = std::make_unique(); + ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset, *logger_, std::move(transformer), + TransformerLevel::Level1, 1, + pre_graph_checker, pre_graph_checker)); + } +} + INSTANTIATE_TEST_SUITE_P( QDQFusionTests, QDQFusionTestsParameterized, From 119e86ec161849b2d904b5423cd6612a4ce31194 Mon Sep 17 00:00:00 2001 From: Tianlei Wu Date: Thu, 16 Nov 2023 06:43:18 -0800 Subject: [PATCH 14/38] SDXL demo: Add Option to disable refiner (#18455) Add option to disable refiner and only run base model. --- .../stable_diffusion/demo_txt2img_xl.py | 55 +++++++++++-------- .../models/stable_diffusion/demo_utils.py | 4 ++ 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py index 974759bb6ae4b..4f9ecf6cbb152 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img_xl.py @@ -53,7 +53,9 @@ def load_pipelines(args, batch_size): max_image_size = 1216 if args.engine != "ORT_CUDA" else 2048 # No VAE decoder in base when it outputs latent instead of image. - base_info = PipelineInfo(args.version, use_vae=False, min_image_size=min_image_size, max_image_size=max_image_size) + base_info = PipelineInfo( + args.version, use_vae=args.disable_refiner, min_image_size=min_image_size, max_image_size=max_image_size + ) # Ideally, the optimized batch size and image size for TRT engine shall align with user's preference. That is to # optimize the shape used most frequently. We can let user config it when we develop a UI plugin. @@ -74,25 +76,28 @@ def load_pipelines(args, batch_size): opt_image_width, ) - refiner_info = PipelineInfo( - args.version, is_refiner=True, min_image_size=min_image_size, max_image_size=max_image_size - ) - refiner = init_pipeline( - Img2ImgXLPipeline, - refiner_info, - engine_type, - args, - max_batch_size, - opt_batch_size, - opt_image_height, - opt_image_width, - ) + refiner = None + if not args.disable_refiner: + refiner_info = PipelineInfo( + args.version, is_refiner=True, min_image_size=min_image_size, max_image_size=max_image_size + ) + refiner = init_pipeline( + Img2ImgXLPipeline, + refiner_info, + engine_type, + args, + max_batch_size, + opt_batch_size, + opt_image_height, + opt_image_width, + ) if engine_type == EngineType.TRT: - max_device_memory = max(base.backend.max_device_memory(), refiner.backend.max_device_memory()) + max_device_memory = max(base.backend.max_device_memory(), (refiner or base).backend.max_device_memory()) _, shared_device_memory = cudart.cudaMalloc(max_device_memory) base.backend.activate_engines(shared_device_memory) - refiner.backend.activate_engines(shared_device_memory) + if refiner: + refiner.backend.activate_engines(shared_device_memory) if engine_type == EngineType.ORT_CUDA: enable_vae_slicing = args.enable_vae_slicing @@ -100,7 +105,7 @@ def load_pipelines(args, batch_size): print("Updating enable_vae_slicing to be True to avoid cuDNN error for batch size > 4.") enable_vae_slicing = True if enable_vae_slicing: - refiner.backend.enable_vae_slicing() + (refiner or base).backend.enable_vae_slicing() return base, refiner @@ -109,7 +114,8 @@ def run_pipelines(args, base, refiner, prompt, negative_prompt, is_warm_up=False image_width = args.width batch_size = len(prompt) base.load_resources(image_height, image_width, batch_size) - refiner.load_resources(image_height, image_width, batch_size) + if refiner: + refiner.load_resources(image_height, image_width, batch_size) def run_base_and_refiner(warmup=False): images, time_base = base.run( @@ -121,8 +127,10 @@ def run_base_and_refiner(warmup=False): denoising_steps=args.denoising_steps, guidance=args.guidance, seed=args.seed, - return_type="latent", + return_type="latent" if refiner else "image", ) + if refiner is None: + return images, time_base # Use same seed in base and refiner. seed = base.get_current_seed() @@ -173,7 +181,8 @@ def run_demo(args): base, refiner = load_pipelines(args, batch_size) run_pipelines(args, base, refiner, prompt, negative_prompt) base.teardown() - refiner.teardown() + if refiner: + refiner.teardown() def run_dynamic_shape_demo(args): @@ -223,7 +232,8 @@ def run_dynamic_shape_demo(args): args.denoising_steps = steps args.seed = seed base.set_scheduler(scheduler) - refiner.set_scheduler(scheduler) + if refiner: + refiner.set_scheduler(scheduler) print( f"\nbatch_size={batch_size}, height={height}, width={width}, scheduler={scheduler}, steps={steps}, prompt={example_prompt}, seed={seed}" ) @@ -231,7 +241,8 @@ def run_dynamic_shape_demo(args): run_pipelines(args, base, refiner, prompt, negative_prompt, is_warm_up=False) base.teardown() - refiner.teardown() + if refiner: + refiner.teardown() if __name__ == "__main__": diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py index ef45b786b9ea3..39ee273a3130d 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_utils.py @@ -145,6 +145,10 @@ def parse_arguments(is_xl: bool, description: str): parser.add_argument("--seed", type=int, default=None, help="Seed for random generator to get consistent results.") parser.add_argument("--disable-cuda-graph", action="store_true", help="Disable cuda graph.") + parser.add_argument( + "--disable-refiner", action="store_true", help="Disable refiner and only run base for XL pipeline." + ) + group = parser.add_argument_group("Options for ORT_CUDA engine only") group.add_argument("--enable-vae-slicing", action="store_true", help="True will feed only one image to VAE once.") From 999752a35d414acc214982b205d16a93768b0699 Mon Sep 17 00:00:00 2001 From: Wanming Lin Date: Fri, 17 Nov 2023 00:01:58 +0800 Subject: [PATCH 15/38] [WebNN EP] Support GreaterOrEqual and LessOrEqual ops (#18411) --- onnxruntime/core/providers/webnn/builders/helper.h | 2 ++ .../providers/webnn/builders/impl/logical_op_builder.cc | 6 ++++++ .../core/providers/webnn/builders/op_builder_factory.cc | 2 ++ 3 files changed, 10 insertions(+) diff --git a/onnxruntime/core/providers/webnn/builders/helper.h b/onnxruntime/core/providers/webnn/builders/helper.h index 46c456556e016..8ae16f0dd21fc 100644 --- a/onnxruntime/core/providers/webnn/builders/helper.h +++ b/onnxruntime/core/providers/webnn/builders/helper.h @@ -156,6 +156,7 @@ static const InlinedHashMap op_map = { {"GlobalMaxPool", "maxPool2d"}, {"GlobalLpPool", "l2Pool2d"}, {"Greater", "greater"}, + {"GreaterOrEqual", "greaterOrEqual"}, {"GroupNormalization", "meanVarianceNormalization"}, {"HardSigmoid", "hardSigmoid"}, {"HardSwish", "hardSwish"}, @@ -164,6 +165,7 @@ static const InlinedHashMap op_map = { {"LayerNormalization", "meanVarianceNormalization"}, {"LeakyRelu", "leakyRelu"}, {"Less", "lesser"}, + {"LessOrEqual", "lesserOrEqual"}, {"Log", "log"}, {"LpPool", "l2Pool2d"}, {"MatMul", "matmul"}, diff --git a/onnxruntime/core/providers/webnn/builders/impl/logical_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/logical_op_builder.cc index 4cb49d8f8cd3a..c8f58fa98635f 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/logical_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/logical_op_builder.cc @@ -35,8 +35,12 @@ Status LogicalOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, cons output = model_builder.GetBuilder().call("equal", input0, input1); } else if (op_type == "Greater") { output = model_builder.GetBuilder().call("greater", input0, input1); + } else if (op_type == "GreaterOrEqual") { + output = model_builder.GetBuilder().call("greaterOrEqual", input0, input1); } else if (op_type == "Less") { output = model_builder.GetBuilder().call("lesser", input0, input1); + } else if (op_type == "LessOrEqual") { + output = model_builder.GetBuilder().call("lesserOrEqual", input0, input1); } else { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "LogicalOpBuilder::AddToModelBuilderImpl, unknown op: ", op_type); @@ -54,7 +58,9 @@ void CreateLogicalOpBuilder(const std::string& op_type, OpBuilderRegistrations& { "Equal", "Greater", + "GreaterOrEqual", "Less", + "LessOrEqual", }; op_registrations.builders.push_back(std::make_unique()); diff --git a/onnxruntime/core/providers/webnn/builders/op_builder_factory.cc b/onnxruntime/core/providers/webnn/builders/op_builder_factory.cc index 65dc8ddbeaf90..463317a4dafda 100644 --- a/onnxruntime/core/providers/webnn/builders/op_builder_factory.cc +++ b/onnxruntime/core/providers/webnn/builders/op_builder_factory.cc @@ -99,7 +99,9 @@ static OpBuilderRegistrations CreateOpBuilderRegistrations() { { // Logical CreateLogicalOpBuilder("Equal", op_registrations); CreateLogicalOpBuilder("Greater", op_registrations); + CreateLogicalOpBuilder("GreaterOrEqual", op_registrations); CreateLogicalOpBuilder("Less", op_registrations); + CreateLogicalOpBuilder("LessOrEqual", op_registrations); } { // Max/Min From b291b20fa02b14ad243ef94ce6d72223dbe63ee9 Mon Sep 17 00:00:00 2001 From: satyajandhyala Date: Thu, 16 Nov 2023 09:44:13 -0800 Subject: [PATCH 16/38] [JS/Web]Added uniforms support to Slice op. (#18422) ### Description Support uniforms in Slice op ### Motivation and Context Improve ferformance --- js/web/lib/wasm/jsep/webgpu/ops/common.ts | 10 ++- js/web/lib/wasm/jsep/webgpu/ops/slice.ts | 81 +++++++++++++++++------ js/web/test/data/ops/slice.jsonc | 23 +++++++ 3 files changed, 92 insertions(+), 22 deletions(-) diff --git a/js/web/lib/wasm/jsep/webgpu/ops/common.ts b/js/web/lib/wasm/jsep/webgpu/ops/common.ts index 38dc14f23682e..014d9d02f6f10 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/common.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/common.ts @@ -646,6 +646,8 @@ export const outputVariable = (name: string, type: number, shapeOrRank: number|readonly number[], components: 1|2|3|4 = 1): IndicesHelper => createIndicesHelper(name, type, shapeOrRank, false, components); +export type UniformsArrayType = Array<{name: string; type: string}>; + /** * A ShaderHelper is a helper class for generating WGSL code. */ @@ -697,6 +699,7 @@ export interface ShaderHelper { * A helper function to register one uniform. Can be called multiple times to register multiple uniforms. */ registerUniform(name: string, type: string): ShaderHelper; + registerUniforms(nameToTypeMap: UniformsArrayType): ShaderHelper; } class ShaderHelperImpl implements ShaderHelper { @@ -755,8 +758,13 @@ class ShaderHelperImpl implements ShaderHelper { return this; } + registerUniforms(additionalUniforms: UniformsArrayType): ShaderHelper { + this.uniforms = this.uniforms.concat(additionalUniforms); + return this; + } + private indicesHelpers: IndicesHelper[] = []; - private uniforms: Array<{name: string; type: string}> = []; + private uniforms: UniformsArrayType = []; private uniformDeclaration(): string { if (this.uniforms.length === 0) { return ''; diff --git a/js/web/lib/wasm/jsep/webgpu/ops/slice.ts b/js/web/lib/wasm/jsep/webgpu/ops/slice.ts index d607351f69b74..7458579bf4340 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/slice.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/slice.ts @@ -5,9 +5,9 @@ import {DataType} from '../../../wasm-common'; import {TensorView} from '../../tensor-view'; import {ShapeUtil} from '../../util'; import {AttributeWithCacheKey, createAttributeWithCacheKey} from '../attribute-with-cache-key'; -import {ComputeContext, ProgramInfo, TensorInfo} from '../types'; +import {ComputeContext, ProgramInfo, ProgramUniform, TensorInfo} from '../types'; -import {IndicesHelper, inputVariable, outputVariable, ShaderHelper} from './common'; +import {createTensorShapeVariables, enableShapesUniforms, IndicesHelper, inputVariable, outputVariable, ShaderHelper, UniformsArrayType} from './common'; export interface SliceAttributes extends AttributeWithCacheKey { readonly starts: number[]; @@ -77,17 +77,26 @@ const fixStartEndValues = }; const calculateInputIndicesImpl = - (input: IndicesHelper, output: IndicesHelper, inputShape: readonly number[], outputShape: readonly number[]): - string => `fn calculateInputIndices(outputIndices: ${output.type.indices}) -> ${input.type.indices} { + (input: IndicesHelper, output: IndicesHelper, inputShape: readonly number[], outputShape: readonly number[], + enableInputShapeUniforms: boolean): string => + `fn calculateInputIndices(outputIndices: ${output.type.indices}) -> ${input.type.indices} { var inputIndices: ${input.type.indices}; var carry = 0u; for (var i = ${inputShape.length}; i >= 0; i--) { + let input_shape_i = ${ + enableInputShapeUniforms ? `uniforms.input_shape${inputShape.length > 1 ? '[i]' : ''}` : 'inputShape[i]'}; + let steps_i = ${ + enableInputShapeUniforms ? `uniforms.steps${inputShape.length > 1 ? '[i]' : ''}` : 'steps[i]'}; + let signs_i = ${ + enableInputShapeUniforms ? `uniforms.signs${inputShape.length > 1 ? '[i]' : ''}` : 'signs[i]'}; + let starts_i = ${ + enableInputShapeUniforms ? `uniforms.starts${inputShape.length > 1 ? '[i]' : ''}` : 'starts[i]'}; var outputIndex = ${outputShape.length === 1 ? 'outputIndices' : 'outputIndices[i]'}; - var inputIndex = outputIndex * steps[i] + starts[i] + carry; - carry = inputIndex / inputShape[i]; - inputIndex = inputIndex % inputShape[i]; - if (signs[i] < 0) { - inputIndex = inputShape[i] - inputIndex - 1u + starts[i]; + var inputIndex = outputIndex * steps_i + starts_i + carry; + carry = inputIndex / input_shape_i; + inputIndex = inputIndex % input_shape_i; + if (signs_i < 0) { + inputIndex = input_shape_i - inputIndex - 1u + starts_i; } ${inputShape.length === 1 ? 'inputIndices' : 'inputIndices[i]'} = inputIndex; } @@ -110,6 +119,10 @@ const createSliceProgramInfo = (inputs: readonly TensorView[], attributes: Slice const ends = attributes.ends.map((end, i) => fixStartEndValues(end, i, inputShape, axes, steps)); + if (axes.length !== starts.length || axes.length !== ends.length) { + throw new Error('start, ends and axes should have the same number of elements'); + } + if (axes.length !== inputShape.length) { for (let i = 0; i < inputShape.length; ++i) { if (!axes.includes(i)) { @@ -131,40 +144,66 @@ const createSliceProgramInfo = (inputs: readonly TensorView[], attributes: Slice array[i] = -step; } }); + // Output rank is expected to be less than or equal to the input rank. + const enableShapeUniforms = enableShapesUniforms(inputs[0].dims.length); + const inputShapeOrRank = enableShapeUniforms ? inputs[0].dims.length : inputs[0].dims; const outputShape = inputShape.slice(0); axes.forEach((axis, _) => { outputShape[axis] = Math.ceil((ends[axis] - starts[axis]) / steps[axis]); }); + const outputShapeOrRank = enableShapeUniforms ? outputShape.length : outputShape; const outputTensorInfo: TensorInfo = {dims: outputShape, dataType: inputs[0].dataType}; - const output = outputVariable('output', inputs[0].dataType, outputShape); - const input = inputVariable('input', inputs[0].dataType, inputShape); + const output = outputVariable('output', inputs[0].dataType, outputShapeOrRank); + const input = inputVariable('input', inputs[0].dataType, inputShapeOrRank); const outputSize = ShapeUtil.size(outputShape); + const programUniforms: ProgramUniform[] = []; + const uniforms: UniformsArrayType = []; + if (enableShapeUniforms) { + uniforms.push({name: 'starts', type: starts.length > 1 ? `vec${starts.length}` : 'u32'}); + uniforms.push({name: 'signs', type: signs.length > 1 ? `vec${signs.length}` : 'i32'}); + uniforms.push({name: 'steps', type: steps.length > 1 ? `vec${steps.length}` : 'u32'}); + programUniforms.push({type: 'uint32', data: starts}); + programUniforms.push({type: 'int32', data: signs}); + programUniforms.push({type: 'uint32', data: steps}); + } + uniforms.push({name: 'outputSize', type: 'u32'}); + programUniforms.push({type: 'uint32', data: outputSize}); + if (enableShapeUniforms) { + programUniforms.push(...createTensorShapeVariables(inputs[0].dims)); + programUniforms.push(...createTensorShapeVariables(outputShape)); + } const getShaderSource = (shaderHelper: ShaderHelper) => ` - ${shaderHelper.declareVariables(input, output)} - const signs = array(${signs.map(i => `${i}i`).join(',')}); - const starts = array(${starts.map(i => `${i}u`).join(',')}); - const ends = array(${ends.map(i => `${i}u`).join(',')}); - const steps = array(${steps.map(i => `${i}u`).join(',')}); - const inputShape = array(${inputShape.map(i => `${i}u`).join(',')}); - - ${calculateInputIndicesImpl(input, output, inputShape, outputShape)} + ${shaderHelper.registerUniforms(uniforms).declareVariables(input, output)} + ${enableShapeUniforms ? '' : [ + `const signs = array(${signs.map(i => `${i}i`).join(',')});`, + `const starts = array(${starts.map(i => `${i}u`).join(',')});`, + `const steps = array(${steps.map(i => `${i}u`).join(',')});`, + `const inputShape = array(${inputShape.map(i => `${i}u`).join(',')});` + ].join('\n')} + + ${calculateInputIndicesImpl(input, output, inputShape, outputShape, enableShapeUniforms)} ${shaderHelper.mainStart()} - ${shaderHelper.guardAgainstOutOfBoundsWorkgroupSizes(outputSize)} + ${shaderHelper.guardAgainstOutOfBoundsWorkgroupSizes('uniforms.outputSize')} let outputIndices = ${output.offsetToIndices('global_idx')}; let inputIndices = calculateInputIndices(outputIndices); ${output.setByOffset('global_idx', input.getByIndices('inputIndices'))} }`; return { name: 'Slice', - shaderCache: {hint: `${attributes.cacheKey}|${inputs[4]?.dims ?? ''}`}, + shaderCache: { + hint: enableShapeUniforms ? `${signs.length}_${starts.length}_${steps.length}` : + `${attributes.cacheKey} | ${inputs[4]?.dims ?? ''}`, + inputDependencies: [enableShapeUniforms ? 'rank' : 'dims'] + }, getShaderSource, getRunData: () => ({ outputs: [outputTensorInfo], dispatchGroup: {x: Math.ceil(inputSize / 64 /* workgroup size */)}, + programUniforms }) }; }; diff --git a/js/web/test/data/ops/slice.jsonc b/js/web/test/data/ops/slice.jsonc index 9c90817a80c36..beef154a29932 100644 --- a/js/web/test/data/ops/slice.jsonc +++ b/js/web/test/data/ops/slice.jsonc @@ -21,6 +21,29 @@ } ] }, + { + "name": "Slice float32 with input[0] dim > 4", + "operator": "Slice", + "attributes": [], + "cases": [ + { + "name": "T[1, 1, 1, 1, 5] T[1] T[1] T[1] (float32)", + "inputs": [ + { + "data": [ + 0.3964604139328003, -0.8916832804679871, -1.6578896045684814, 1.960708737373352, 1.181204915046692 + ], + "dims": [1, 1, 1, 1, 5], + "type": "float32" + }, + { "data": [3], "dims": [1], "type": "int64" }, + { "data": [4], "dims": [1], "type": "int64" }, + { "data": [4], "dims": [1], "type": "int64" } + ], + "outputs": [{ "data": [1.960708737373352], "dims": [1, 1, 1, 1, 1], "type": "float32" }] + } + ] + }, { "name": "Slice int32", "operator": "Slice", From 3588fbac1377eb2a74fcf82f8d8768c7c00397d3 Mon Sep 17 00:00:00 2001 From: Chi Lo <54722500+chilo-ms@users.noreply.github.com> Date: Thu, 16 Nov 2023 10:23:08 -0800 Subject: [PATCH 17/38] [TensorRT EP] Fix memory leak for cudnn/cublas (#18467) Free memory for cudnn/cublas instances at TRT EP destruction. https://github.com/microsoft/onnxruntime/issues/18466 --- .../core/providers/tensorrt/tensorrt_execution_provider.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc index 3b3732bb716f9..cd4aa45f83bc8 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc @@ -1194,6 +1194,11 @@ TensorrtExecutionProvider::~TensorrtExecutionProvider() { } } + if (external_stream_) { + ORT_IGNORE_RETURN_VALUE(CUBLAS_CALL(cublasDestroy(external_cublas_handle_))); + ORT_IGNORE_RETURN_VALUE(CUDNN_CALL(cudnnDestroy(external_cudnn_handle_))); + } + if (!external_stream_ && stream_) { ORT_IGNORE_RETURN_VALUE(CUDA_CALL(cudaStreamDestroy(stream_))); } From b6b9aff60846f03b4d68193e2e33afeab8c32c57 Mon Sep 17 00:00:00 2001 From: Dmitri Smirnov Date: Thu, 16 Nov 2023 13:15:48 -0800 Subject: [PATCH 18/38] Allow empty shapes and do not validate them for inputs/outputs (#18442) ### Description Allow empty shapes and do not validate them for inputs/outputs at the InferenceSession::ValidateInputsOutputs(). ### Motivation and Context https://github.com/microsoft/onnxruntime/pull/17301 disallowed empty shapes. However, many models depend on them as a way to pass shapes of different ranks. --- onnxruntime/core/session/inference_session.cc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/onnxruntime/core/session/inference_session.cc b/onnxruntime/core/session/inference_session.cc index ccedc71b9119a..f02d180ab104f 100644 --- a/onnxruntime/core/session/inference_session.cc +++ b/onnxruntime/core/session/inference_session.cc @@ -2025,9 +2025,10 @@ common::Status InferenceSession::ValidateInputsOutputs(gsl::spansecond.tensor_shape.has_value()) { + const auto& opt_shape = iter->second.tensor_shape; + if (opt_shape.has_value() && !opt_shape->GetDims().empty()) { ORT_RETURN_IF_ERROR_SESSIONID_(CheckShapes(name, input_output_tensor.Shape(), - *iter->second.tensor_shape, input_output_moniker)); + *opt_shape, input_output_moniker)); } } else if (input_output_ml_value.IsSparseTensor()) { #if !defined(DISABLE_SPARSE_TENSORS) @@ -2038,9 +2039,10 @@ common::Status InferenceSession::ValidateInputsOutputs(gsl::spansecond.tensor_shape.has_value()) { + const auto& opt_shape = iter->second.tensor_shape; + if (opt_shape.has_value() && !opt_shape->GetDims().empty()) { ORT_RETURN_IF_ERROR_SESSIONID_(CheckShapes(name, sparse_tensor.DenseShape(), - *iter->second.tensor_shape, input_output_moniker)); + *opt_shape, input_output_moniker)); } } else if (is_sparse_initializer(name) && expected_type->IsTensorType()) { @@ -2049,9 +2051,10 @@ common::Status InferenceSession::ValidateInputsOutputs(gsl::spansecond.tensor_shape.has_value()) { + const auto& opt_shape = iter->second.tensor_shape; + if (opt_shape.has_value() && !opt_shape->GetDims().empty()) { ORT_RETURN_IF_ERROR_SESSIONID_(CheckShapes(name, sparse_tensor.DenseShape(), - *iter->second.tensor_shape, input_output_moniker)); + *opt_shape, input_output_moniker)); } } else { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, input_output_moniker, " with name: '", name, @@ -2061,7 +2064,6 @@ common::Status InferenceSession::ValidateInputsOutputs(gsl::spanIsTensorSequenceType() #if !defined(DISABLE_OPTIONAL_TYPE) From e7a524fea9599dc4b5e5171cb14c16389b7d58a4 Mon Sep 17 00:00:00 2001 From: Scott McKay Date: Fri, 17 Nov 2023 07:20:16 +1000 Subject: [PATCH 19/38] Update to allow large models to be checked for mobile support. (#18357) ### Description Update usability checker and related infrastructure to support checking models > 2GB. - Add ability to set flag to keep initializers as external data - we optimize the model as part of the checking so need to write out a new copy. - Handle issue with ONNX shape inferencing silently failing - use API that supports large models but requires writing the model to a new file - automate cleanup of that copy of the model ### Motivation and Context Allow analysis of LLMs to determine gaps for mobile usage. --------- Co-authored-by: Edward Chen <18449977+edgchen1@users.noreply.github.com> --- .../check_model_can_use_ort_mobile_pkg.py | 9 ++-- .../util/mobile_helpers/usability_checker.py | 11 ++--- tools/python/util/onnx_model_utils.py | 45 +++++++++++++++++++ 3 files changed, 54 insertions(+), 11 deletions(-) diff --git a/tools/python/util/mobile_helpers/check_model_can_use_ort_mobile_pkg.py b/tools/python/util/mobile_helpers/check_model_can_use_ort_mobile_pkg.py index 113b5398f3981..9eccb7c36455f 100644 --- a/tools/python/util/mobile_helpers/check_model_can_use_ort_mobile_pkg.py +++ b/tools/python/util/mobile_helpers/check_model_can_use_ort_mobile_pkg.py @@ -10,9 +10,8 @@ import sys import onnx -from onnx import shape_inference -from ..onnx_model_utils import get_opsets_imported +from ..onnx_model_utils import ModelProtoWithShapeInfo, get_opsets_imported from ..reduced_build_config_parser import parse_config cpp_to_tensorproto_type = { @@ -265,15 +264,13 @@ def run_check(model_path: pathlib.Path, mobile_pkg_build_config: pathlib.Path, l ) model_file = model_path.resolve(strict=True) - model = onnx.load(str(model_file)) # we need to run shape inferencing to populate that type info for node outputs. # we will get warnings if the model uses ORT contrib ops (ONNX does not have shape inferencing for those), # and shape inferencing will be lost downstream of those. # TODO: add support for checking ORT format model as it will have full type/shape info for all nodes - model_with_type_info = shape_inference.infer_shapes(model) - - return run_check_with_model(model_with_type_info, mobile_pkg_build_config, logger) + model_wrapper = ModelProtoWithShapeInfo(model_file) + return run_check_with_model(model_wrapper.model_with_shape_info, mobile_pkg_build_config, logger) def main(): diff --git a/tools/python/util/mobile_helpers/usability_checker.py b/tools/python/util/mobile_helpers/usability_checker.py index f8b0bfe707ead..dcb3451a5e0fa 100644 --- a/tools/python/util/mobile_helpers/usability_checker.py +++ b/tools/python/util/mobile_helpers/usability_checker.py @@ -13,6 +13,7 @@ import onnx from ..onnx_model_utils import ( + ModelProtoWithShapeInfo, get_producer_consumer_maps, is_fixed_size_tensor, iterate_graph_per_graph_func, @@ -464,9 +465,9 @@ def check_shapes(graph: onnx.GraphProto, logger: Optional[logging.Logger] = None return dynamic_inputs, num_dynamic_values -def checker(model_path, logger: logging.Logger): - model = onnx.load(model_path) - model_with_shape_info = onnx.shape_inference.infer_shapes(model) +def checker(model_path: pathlib.Path, logger: logging.Logger): + model_with_shape_info_wrapper = ModelProtoWithShapeInfo(model_path) + model_with_shape_info = model_with_shape_info_wrapper.model_with_shape_info # create lookup map for efficiency value_to_shape = {} @@ -541,10 +542,10 @@ def analyze_model(model_path: pathlib.Path, skip_optimize: bool = False, logger: with tempfile.TemporaryDirectory() as tmp: if not skip_optimize: tmp_path = pathlib.Path(tmp) / model_path.name - optimize_model(model_path, tmp_path) + optimize_model(model_path, tmp_path, use_external_initializers=True) model_path = tmp_path - try_eps = checker(str(model_path.resolve(strict=True)), logger) + try_eps = checker(model_path.resolve(strict=True), logger) return try_eps diff --git a/tools/python/util/onnx_model_utils.py b/tools/python/util/onnx_model_utils.py index e662d1623f8bd..5c970430a3a82 100644 --- a/tools/python/util/onnx_model_utils.py +++ b/tools/python/util/onnx_model_utils.py @@ -95,6 +95,7 @@ def optimize_model( output_path: pathlib.Path, level: ort.GraphOptimizationLevel = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC, log_level: int = 3, + use_external_initializers: bool = False, ): """ Optimize an ONNX model using ONNX Runtime to the specified level @@ -103,12 +104,25 @@ def optimize_model( :param level: onnxruntime.GraphOptimizationLevel to use. Default is ORT_ENABLE_BASIC. :param log_level: Log level. Defaults to Error (3) so we don't get output about unused initializers being removed. Warning (2) or Info (1) may be desirable in some scenarios. + :param use_external_initializers: Set flag to write initializers to an external file. Required if model > 2GB. + Requires onnxruntime 1.17+ """ so = ort.SessionOptions() so.optimized_model_filepath = str(output_path.resolve()) so.graph_optimization_level = level so.log_severity_level = log_level + # save using external initializers so models > 2 GB are handled + if use_external_initializers: + major, minor, rest = ort.__version__.split(".", 3) + if (int(major), int(minor)) >= (1, 17): + so.add_session_config_entry("session.optimized_model_external_initializers_file_name", "external_data.pb") + else: + raise ValueError( + "ONNX Runtime 1.17 or higher required to save initializers as external data when optimizing model. " + f"Current ONNX Runtime version is {ort.__version__}" + ) + # create session to optimize. this will write the updated model to output_path _ = ort.InferenceSession(str(model_path.resolve(strict=True)), so, providers=["CPUExecutionProvider"]) @@ -366,3 +380,34 @@ def get_optimization_level(level): return ort.GraphOptimizationLevel.ORT_ENABLE_ALL raise ValueError("Invalid optimization level of " + level) + + +class ModelProtoWithShapeInfo: + """ + Class to load an ONNX model and run shape inferencing on it to populate the ValueInfo. + The model_with_shape_info property will contain the updated model. + If the model is > 2GB and uses external data a temporary file is required to run shape inferencing successfully. + This helper class handles automatic removal of the temporary file. + """ + + def __init__(self, model_path: pathlib.Path): + """ + :param model_path: Path to ONNX model to load and run shape inferencing on. + """ + + self.model_path = model_path + + model = onnx.load(str(model_path)) + self.model_with_shape_info = onnx.shape_inference.infer_shapes(model, strict_mode=True) + + # ONNX has a silent failure from the call to infer_shapes when the model is > 2GB. + # We detect that by checking the nodes in the returned model. + self._tmp_model_path = None + if len(model.graph.node) > 0 and len(self.model_with_shape_info.graph.node) == 0: + self._tmp_model_path = pathlib.Path(model_path).with_suffix(".temp_with_shapeinf.onnx") + onnx.shape_inference.infer_shapes_path(str(model_path), str(self._tmp_model_path), strict_mode=True) + self.model_with_shape_info = onnx.load(str(self._tmp_model_path)) + + def __del__(self): + if self._tmp_model_path: + self._tmp_model_path.unlink(missing_ok=True) From 6a4e4488da75b5b482ef449dfff20309e8b15744 Mon Sep 17 00:00:00 2001 From: Hector Li Date: Thu, 16 Nov 2023 13:44:15 -0800 Subject: [PATCH 20/38] [QNN EP] Support Qnn MatMul with 2 dynamic inputs which are uint16 quantized (#18469) ### Description QNN can't run MatMul if both inputs are dynamic inputs with uint16 quantized on v68. Make it run by inserting Convert op to convert 1 input to int8 --- .../selectors_actions/qdq_selectors.cc | 5 +- .../builder/opbuilder/simple_op_builder.cc | 89 +++++++++++++++++++ .../test/providers/qnn/matmul_test.cpp | 39 ++++++-- 3 files changed, 125 insertions(+), 8 deletions(-) diff --git a/onnxruntime/core/optimizer/qdq_transformer/selectors_actions/qdq_selectors.cc b/onnxruntime/core/optimizer/qdq_transformer/selectors_actions/qdq_selectors.cc index 5015e48fdb7b8..3880288bdba2e 100644 --- a/onnxruntime/core/optimizer/qdq_transformer/selectors_actions/qdq_selectors.cc +++ b/onnxruntime/core/optimizer/qdq_transformer/selectors_actions/qdq_selectors.cc @@ -443,7 +443,6 @@ bool InstanceAndLayerNormalizationNodeGroupSelector::Check(const GraphViewer& gr } int32_t dt_input = dq_nodes[0]->InputDefs()[0]->TypeAsProto()->tensor_type().elem_type(); - int32_t dt_scale = dq_nodes[1]->InputDefs()[0]->TypeAsProto()->tensor_type().elem_type(); int32_t dt_bias = 0; bool has_bias = false; // bias is optional for LayerNorm @@ -453,9 +452,9 @@ bool InstanceAndLayerNormalizationNodeGroupSelector::Check(const GraphViewer& gr } int32_t dt_output = q_nodes[0]->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type(); - // Input, output, and scale need to be the same type. The bias is int32. + // Input, output, need to be the same type. The bias is int32. + // Scale can be different with input for a16w8 case return (dt_input == dt_output) && - (dt_input == dt_scale) && (has_bias ? dt_bias == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32 : true); } diff --git a/onnxruntime/core/providers/qnn/builder/opbuilder/simple_op_builder.cc b/onnxruntime/core/providers/qnn/builder/opbuilder/simple_op_builder.cc index 4ae59951c5e98..fdc5317419c5b 100644 --- a/onnxruntime/core/providers/qnn/builder/opbuilder/simple_op_builder.cc +++ b/onnxruntime/core/providers/qnn/builder/opbuilder/simple_op_builder.cc @@ -22,6 +22,11 @@ class SimpleOpBuilder : public BaseOpBuilder { ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(SimpleOpBuilder); protected: + Status ProcessInputs(QnnModelWrapper& qnn_model_wrapper, + const NodeUnit& node_unit, + const logging::Logger& logger, + std::vector& input_names, + bool do_op_validation) const override ORT_MUST_USE_RESULT; Status ProcessAttributesAndOutputs(QnnModelWrapper& qnn_model_wrapper, const NodeUnit& node_unit, std::vector&& input_names, @@ -48,6 +53,90 @@ class SimpleOpBuilder : public BaseOpBuilder { static constexpr std::array gridsample_supported_padding_modes = {"zeros", "border", "reflection"}; }; +// Move to qnn_utils if it's re-usable +Status InsertConvertOp(QnnModelWrapper& qnn_model_wrapper, + const std::string& convert_input_name, + const std::string& convert_output_name, + Qnn_DataType_t input_qnn_data_type, + Qnn_DataType_t output_qnn_data_type, + int32_t input_offset, + float input_scale, + const std::vector& output_shape, + bool do_op_validation) { + // Assume input is already handled. + float qmin = 0.0f; + float qmax = 255.0f; + ORT_RETURN_IF_ERROR(qnn::utils::GetQminQmax(input_qnn_data_type, qmin, qmax)); + double value_min = qnn::utils::Dequantize(input_offset, input_scale, qmin); + double value_max = qnn::utils::Dequantize(input_offset, input_scale, qmax); + + Qnn_QuantizeParams_t convert_output_quant_param = QNN_QUANTIZE_PARAMS_INIT; + convert_output_quant_param.encodingDefinition = QNN_DEFINITION_DEFINED; + convert_output_quant_param.quantizationEncoding = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; + ORT_RETURN_IF_ERROR(qnn::utils::GetQuantParams(static_cast(value_min), + static_cast(value_max), + output_qnn_data_type, + convert_output_quant_param.scaleOffsetEncoding.scale, + convert_output_quant_param.scaleOffsetEncoding.offset)); + + std::vector output_shape_copy = output_shape; + QnnTensorWrapper convert_output_tensorwrapper(convert_output_name, + QNN_TENSOR_TYPE_NATIVE, + output_qnn_data_type, + convert_output_quant_param, + std::move(output_shape_copy)); + ORT_RETURN_IF_NOT(qnn_model_wrapper.AddTensorWrapper(std::move(convert_output_tensorwrapper)), "Failed to add tensor."); + + ORT_RETURN_IF_NOT(qnn_model_wrapper.CreateQnnNode(convert_output_name, + QNN_OP_PACKAGE_NAME_QTI_AISW, + "Convert", + {convert_input_name}, + {convert_output_name}, + {}, + do_op_validation), + "Failed to add node."); + return Status::OK(); +} + +Status SimpleOpBuilder::ProcessInputs(QnnModelWrapper& qnn_model_wrapper, + const NodeUnit& node_unit, + const logging::Logger& logger, + std::vector& input_names, + bool do_op_validation) const { + const std::string& op_type = node_unit.OpType(); + ORT_RETURN_IF_ERROR(BaseOpBuilder::ProcessInputs(qnn_model_wrapper, node_unit, logger, input_names, do_op_validation)); + + if (op_type == "MatMul") { + const auto& inputs = node_unit.Inputs(); + TensorInfo input0_info = {}; + TensorInfo input1_info = {}; + ORT_RETURN_IF_ERROR(qnn_model_wrapper.GetTensorInfo(inputs[0], input0_info)); + ORT_RETURN_IF_ERROR(qnn_model_wrapper.GetTensorInfo(inputs[1], input1_info)); + // Need to insert Convert op if both inputs are dynamic inputs and are ufixed_16 + if (!input0_info.is_initializer && !input1_info.is_initializer && + input0_info.qnn_data_type == input1_info.qnn_data_type && + input0_info.qnn_data_type == QNN_DATATYPE_UFIXED_POINT_16) { + // insert Convert op after input1 + std::string convert_input_name = input_names.back(); + input_names.pop_back(); + const std::string& matmul_output_name = node_unit.Outputs()[0].node_arg.Name(); + std::string convert_output_name = convert_input_name + "_convert_" + matmul_output_name; + ORT_RETURN_IF_ERROR(InsertConvertOp(qnn_model_wrapper, + convert_input_name, + convert_output_name, + input1_info.qnn_data_type, + QNN_DATATYPE_UFIXED_POINT_8, + input1_info.quant_param.scaleOffsetEncoding.offset, + input1_info.quant_param.scaleOffsetEncoding.scale, + input1_info.shape, + do_op_validation)); + input_names.push_back(convert_output_name); + } + } + + return Status::OK(); +} + Status SimpleOpBuilder::ExplicitOpCheck(const NodeUnit& node_unit) const { const std::string& op_type = node_unit.OpType(); diff --git a/onnxruntime/test/providers/qnn/matmul_test.cpp b/onnxruntime/test/providers/qnn/matmul_test.cpp index 3073dde9d8e4c..3da3dc858175b 100644 --- a/onnxruntime/test/providers/qnn/matmul_test.cpp +++ b/onnxruntime/test/providers/qnn/matmul_test.cpp @@ -142,11 +142,6 @@ TEST_F(QnnHTPBackendTests, MatMulOp_HTP_u8) { } // Test QDQ MatMul with 16-bit act, 8-bit weights (static) -// TODO: (SLIGHT) Inaccuracy detected for output 'output', element 0. -// Output quant params: scale=0.0015259021893143654, zero_point=0. -// Expected val: 98 -// QNN QDQ val: 97.720298767089844 (err 0.27970123291015625) -// CPU QDQ val: 97.726402282714844 (err 0.27359771728515625) TEST_F(QnnHTPBackendTests, MatMulOp_HTP_A16_W8Static) { std::vector input0_data = {-10.0f, -4.0f, -2.0f, 0.0f, 5.0f, 10.0f}; std::vector input1_data = {-10.0f, -6.0f, -1.0f, 0.0f, 3.0f, 10.0f}; @@ -158,6 +153,40 @@ TEST_F(QnnHTPBackendTests, MatMulOp_HTP_A16_W8Static) { 7e-3f); } +// Test QDQ MatMul with uint16 activation uint16 weights, both dynamic +// Inaccuracy detected for output 'output_0', element 1. +// Output quant params: scale=0.0015259021893143654, zero_point=0. +// Expected val: 40 +// QNN QDQ val: 39.681087493896484 (err 0.31891250610351562) +// CPU QDQ val: 39.99847412109375 (err 0.00152587890625) +TEST_F(QnnHTPBackendTests, DISABLED_MatMulOp_HTP_A16_W16Dynamic) { + std::vector input0_data = {-10.0f, -4.0f, -2.0f, 0.0f, 5.0f, 10.0f}; + std::vector input1_data = {-10.0f, -6.0f, -1.0f, 0.0f, 3.0f, 10.0f}; + RunQDQMatMulOpOpTest(TestInputDef({2, 3}, false, input0_data), + TestInputDef({3, 2}, false, input1_data), + ExpectedEPNodeAssignment::All, + 18, + true, // Use com.microsoft Q/DQ ops + 7e-3f); +} + +// Test QDQ MatMul with uint16 activation uint16 weights, both dynamic +// Inaccuracy detected for output 'output_0', element 1. +// Output quant params: scale=0.71908456087112427, zero_point=1. +// Expected val: 46848.41015625 +// QNN QDQ val: 46844.04296875 (err 4.3671875) +// CPU QDQ val: 46848.359375 (err 0.05078125) +TEST_F(QnnHTPBackendTests, DISABLED_MatMulOp_HTP_A16_W16DynamicLarge) { + std::vector input0_data = GetFloatDataInRange(-10.0f, 10.0f, 12 * 96 * 512); + std::vector input1_data = GetFloatDataInRange(-10.0f, 10.0f, 12 * 96 * 512); + RunQDQMatMulOpOpTest(TestInputDef({1, 12, 96, 512}, false, input0_data), + TestInputDef({1, 12, 512, 96}, false, input1_data), + ExpectedEPNodeAssignment::All, + 18, + true, // Use com.microsoft Q/DQ ops + 7e-3f); +} + // Test 16-bit QDQ MatMul with static weights // TODO: Inaccuracy detected for output 'output', element 0. // Output quant params: scale=0.0015259021893143654, zero_point=0. From adb56df2e8de61862c0835c985fb0ba748499b05 Mon Sep 17 00:00:00 2001 From: aciddelgado <139922440+aciddelgado@users.noreply.github.com> Date: Thu, 16 Nov 2023 15:01:06 -0800 Subject: [PATCH 21/38] Aciddelgado/gqa local (#18375) ### Description Implement preliminary version of local (sliding window) attention. Currently only supported by Flash Attention (sm >= 80, Linux). Currently only supports sliding attention with a large cached kv. ### Motivation and Context This change enables to run Mistral and other models which use sliding window attention. --- docs/ContribOperators.md | 4 +- .../contrib_ops/cpu/bert/attention_common.h | 4 +- .../cuda/bert/flash_attention/flash.h | 15 + .../cuda/bert/flash_attention/flash_api.cc | 44 +- .../cuda/bert/flash_attention/flash_api.h | 7 +- .../bert/flash_attention/flash_fwd_kernel.h | 375 +++++++++--------- .../flash_fwd_launch_template.h | 117 +++--- .../cuda/bert/flash_attention/kernel_traits.h | 9 +- .../cuda/bert/flash_attention/softmax.h | 23 +- .../cuda/bert/flash_attention/utils.h | 164 ++++++-- .../cuda/bert/group_query_attention.cc | 14 +- .../cuda/bert/group_query_attention.h | 3 +- .../cuda/bert/group_query_attention_impl.cu | 67 +--- .../core/graph/contrib_ops/bert_defs.cc | 10 +- .../python/transformers/test_flash_attn.py | 363 ++++++++--------- 15 files changed, 682 insertions(+), 537 deletions(-) diff --git a/docs/ContribOperators.md b/docs/ContribOperators.md index 9c31978c66486..da900e5c59405 100644 --- a/docs/ContribOperators.md +++ b/docs/ContribOperators.md @@ -2385,7 +2385,7 @@ This version of the operator has been available since version 1 of the 'com.micr Group Query Self/Cross Attention. - Supports different number of heads for q and kv. + Supports different number of heads for q and kv. Only supports causal or local attention. #### Version @@ -2396,6 +2396,8 @@ This version of the operator has been available since version 1 of the 'com.micr
kv_num_heads : int (required)
Number of attention heads for k and v
+
local_window_size : int
+
left_window_size for local attention (like Mistral). Default value is -1 meaning unused.
num_heads : int (required)
Number of attention heads for q
scale : float
diff --git a/onnxruntime/contrib_ops/cpu/bert/attention_common.h b/onnxruntime/contrib_ops/cpu/bert/attention_common.h index b693b58c7c40a..a7f83469a768d 100644 --- a/onnxruntime/contrib_ops/cpu/bert/attention_common.h +++ b/onnxruntime/contrib_ops/cpu/bert/attention_common.h @@ -96,9 +96,9 @@ struct GroupQueryAttentionParameters { int kv_num_heads; int num_splits; // number of splits for splitkv bool is_unidirectional; // causal + int local_window_size; bool kv_share_buffer; - bool is_prompt; // determines if seqlens_k is past or kv sequence length tensor - bool left_padding; // copies last token to last index if true + bool is_prompt; // determines if seqlens_k is past or kv sequence length tensor float scale; AttentionQkvFormat qkv_format; AttentionQkvFormat past_kv_format; diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash.h index 89e2351428d40..cbe536c6ce45a 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash.h @@ -69,6 +69,7 @@ struct Flash_fwd_params : public Qkv_params { int seqlen_q_rounded = 0; int seqlen_k_rounded = 0; int d_rounded = 0; + int rotary_dim = 0; // The scaling factors for the kernel. float scale_softmax = 0.0; @@ -92,12 +93,26 @@ struct Flash_fwd_params : public Qkv_params { index_t knew_head_stride = 0; index_t vnew_head_stride = 0; + // The cos and sin matrices for rotary embedding. + void* __restrict__ rotary_cos_ptr = nullptr; + void* __restrict__ rotary_sin_ptr = nullptr; + + // The indices to index into the KV cache. + int* __restrict__ cache_batch_idx = nullptr; + + // Local window size + int window_size_left = -1; + int window_size_right = -1; + bool is_bf16 = false; bool is_causal = false; // If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb]. // Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K. bool is_seqlens_k_cumulative = true; + + bool is_rotary_interleaved = false; + int num_splits = 0; // For split-KV version const cudaDeviceProp* dprops = nullptr; diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.cc b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.cc index 89a27c4d2b0d3..76190aad68fdb 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.cc +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.cc @@ -35,7 +35,9 @@ void set_params_fprop(Flash_fwd_params& params, void* softmax_lse_d, float softmax_scale, bool is_causal, - bool kv_bsnh = true) { + bool kv_bsnh = true, + int window_size_left = -1, + int window_size_right = -1) { // Set the pointers and strides. params.q_ptr = q; params.k_ptr = k; @@ -102,7 +104,21 @@ void set_params_fprop(Flash_fwd_params& params, params.scale_softmax = softmax_scale; params.scale_softmax_log2 = softmax_scale * M_LOG2E; + // In our API, causal/unidirectional determines if we only look at prior tokens. However, the flash API seperates + // local and causal, meaning when we have local window size params.is_causal = is_causal; + if (is_causal && (window_size_left >= 0 || window_size_right != 0)) { + params.is_causal = false; + } + if (window_size_left < 0 && window_size_right >= 0) { + window_size_left = seqlen_k; + } + if (window_size_left >= 0 && window_size_right < 0) { + window_size_right = seqlen_k; + } + params.window_size_left = window_size_left; + params.window_size_right = window_size_right; + params.is_seqlens_k_cumulative = true; } @@ -227,7 +243,8 @@ Status mha_fwd(const cudaDeviceProp& dprops, int num_splits, void* softmax_lse_accum, // num_splits x batch_size x seqlen_q x num_heads void* out_accum, // num_splits x batch_size x seqlen_q x num_heads x head_size_rounded - bool kv_bsnh) { + bool kv_bsnh, + int local_window_size) { auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; }; const int head_size_rounded = round_multiple(head_size, 32); const int seqlen_q_rounded = round_multiple(seqlen_q, 128); @@ -247,7 +264,9 @@ Status mha_fwd(const cudaDeviceProp& dprops, softmax_lse, softmax_scale, is_causal, - kv_bsnh); + kv_bsnh, + local_window_size, + is_causal ? 0 : -1); params.dprops = &dprops; params.knew_ptr = nullptr; params.vnew_ptr = nullptr; @@ -306,7 +325,10 @@ Status mha_varlen_fwd(const cudaDeviceProp& dprops, nullptr, softmax_lse, softmax_scale, - is_causal); + is_causal, + true, + -1, + is_causal ? 0 : -1); params.dprops = &dprops; params.num_splits = 0; params.softmax_lseaccum_ptr = nullptr; @@ -347,11 +369,11 @@ Status mha_fwd_kvcache(const cudaDeviceProp& dprops, bool past_bsnh, // otherwise bnsh int num_splits, void* softmax_lse_accum, // num_splits x batch_size x seqlen_q x num_heads - void* out_accum // num_splits x batch_size x seqlen_q x num_heads x head_size_rounded -) { - if (seqlen_q == 1) { - is_causal = false; - } // causal=true is the same as causal=false in this case + void* out_accum, // num_splits x batch_size x seqlen_q x num_heads x head_size_rounded + int local_window_size) { + // if (seqlen_q == 1) { + // is_causal = false; + // } // causal=true is the same as causal=false in this case auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; }; const int head_size_rounded = round_multiple(head_size, 32); @@ -372,7 +394,9 @@ Status mha_fwd_kvcache(const cudaDeviceProp& dprops, softmax_lse, softmax_scale, is_causal, - past_bsnh); + past_bsnh, + local_window_size, + is_causal ? 0 : -1); params.dprops = &dprops; if (k != nullptr && v != nullptr) { diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.h index 58f4304251872..efc1f565c4fa0 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_api.h @@ -54,7 +54,8 @@ Status mha_fwd(const cudaDeviceProp& dprops, int num_splits = 0, void* softmax_lse_accum = nullptr, // num_splits x batch_size x seqlen_q x num_heads void* out_accum = nullptr, // num_splits x batch_size x seqlen_q x num_heads x head_size_rounded - bool kv_bsnh = true); + bool kv_bsnh = true, + int local_window_size = -1); Status mha_varlen_fwd(const cudaDeviceProp& dprops, cudaStream_t stream, @@ -96,8 +97,8 @@ Status mha_fwd_kvcache(const cudaDeviceProp& dprops, bool past_bsnh, // otherwise bnsh int num_splits = 0, void* softmax_lse_accum = nullptr, // num_splits x batch_size x seqlen_q x num_heads - void* out_accum = nullptr // num_splits x batch_size x seqlen_q x num_heads x head_size_rounded -); + void* out_accum = nullptr, // num_splits x batch_size x seqlen_q x num_heads x head_size_rounded + int local_window_size = -1); size_t get_softmax_lse_size(int max_seqlen_q, int batch_size, int num_heads); diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_kernel.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_kernel.h index eb1c794d6df54..028233f66850f 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_kernel.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_kernel.h @@ -29,47 +29,6 @@ using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// -template -CUTE_HOST_DEVICE auto -make_tiled_copy_A_warpcontiguousM(Copy_Atom const& copy_atom, - TiledMMA const& tiled_mma) { - using TileShape_MNK = typename TiledMMA::TiledShape_MNK; - using AtomShape_MNK = typename TiledMMA::AtomShape_MNK; - constexpr int AtomShape_M = decltype(cute::size<0>(AtomShape_MNK{}))::value; - constexpr int kNWarps = decltype(cute::size<0>(TileShape_MNK{}))::value / AtomShape_M; - constexpr int MMAStride_M = MMA_M * AtomShape_M; - auto t = make_tile(cute::Layout, cute::Int>, - cute::Stride<_1, cute::Int>>{}, - make_layout(cute::size<2>(TileShape_MNK{}))); - - return make_tiled_copy_impl(copy_atom, tiled_mma.get_layoutA_TV(), t); -} - -//////////////////////////////////////////////////////////////////////////////////////////////////// - -template -CUTE_HOST_DEVICE auto -make_tiled_copy_C_warpcontiguousM(Copy_Atom const& copy_atom, - TiledMMA const& tiled_mma) { - using TileShape_MNK = typename TiledMMA::TiledShape_MNK; - using AtomShape_MNK = typename TiledMMA::AtomShape_MNK; - constexpr int AtomShape_M = decltype(cute::size<0>(AtomShape_MNK{}))::value; - constexpr int kNWarps = decltype(cute::size<0>(TileShape_MNK{}))::value / AtomShape_M; - constexpr int MMAStride_M = MMA_M * AtomShape_M; - auto t = make_tile(cute::Layout, cute::Int>, - cute::Stride<_1, cute::Int>>{}, - // TODO: Shouldn't this be size<1>? - make_layout(cute::size<2>(TileShape_MNK{}))); - // if (cute::thread0()) {printf("make_tiled_copy_C_warpcontiguousM "); print(t); printf("\n"); } - return make_tiled_copy_impl(copy_atom, tiled_mma.get_layoutC_TV(), t); -} - -//////////////////////////////////////////////////////////////////////////////////////////////////// - template inline __device__ void softmax_rescale_o(Tensor0& scores, Tensor1& scores_max, Tensor1& scores_sum, Tensor2& acc_o, float softmax_scale_log2) { @@ -123,7 +82,7 @@ inline __device__ void write_softmax_to_gmem( //////////////////////////////////////////////////////////////////////////////////////////////////// -template +template inline __device__ void compute_attn_1rowblock(const Params& params, const int bidb, const int bidh, const int m_block) { using Element = typename Kernel_traits::Element; using ElementAccum = typename Kernel_traits::ElementAccum; @@ -144,12 +103,14 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi const BlockInfo binfo(params, bidb); if (m_block * kBlockM >= binfo.actual_seqlen_q || binfo.actual_seqlen_k == 0) return; + const int n_block_min = !Is_local ? 0 : std::max(0, (m_block * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q - params.window_size_left) / kBlockN); int n_block_max = cute::ceil_div(binfo.actual_seqlen_k, kBlockN); - if (Is_causal) { - n_block_max = std::min(n_block_max, cute::ceil_div((m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q, kBlockN)); + if (Is_causal || Is_local) { + n_block_max = std::min(n_block_max, + cute::ceil_div((m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q + params.window_size_right, kBlockN)); // We exit early and write 0 to gO and gLSE. // Otherwise we might read OOB elements from gK and gV. - if (n_block_max <= 0) { + if (n_block_max <= n_block_min) { const index_t row_offset_o = binfo.q_offset(params.o_batch_stride, params.o_row_stride, bidb) + m_block * kBlockM * params.o_row_stride + bidh * params.o_head_stride; const index_t row_offset_lse = (bidb * params.h + bidh) * params.seqlen_q + m_block * kBlockM; Tensor gO = make_tensor(make_gmem_ptr(reinterpret_cast(params.o_ptr) + row_offset_o), @@ -197,7 +158,6 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi const index_t row_offset_k = binfo.k_offset(params.k_batch_stride, params.k_row_stride, bidb) + (n_block_max - 1) * kBlockN * params.k_row_stride + (bidh / params.h_h_k_ratio) * params.k_head_stride; const index_t row_offset_v = binfo.k_offset(params.v_batch_stride, params.v_row_stride, bidb) + (n_block_max - 1) * kBlockN * params.v_row_stride + (bidh / params.h_h_k_ratio) * params.v_head_stride; const index_t row_offset_p = ((bidb * params.h + bidh) * params.seqlen_q_rounded + m_block * kBlockM) * params.seqlen_k_rounded + (n_block_max - 1) * kBlockN; - cute::Tensor gQ = make_tensor(make_gmem_ptr(reinterpret_cast(params.q_ptr) + row_offset_q), cute::Shape, cute::Int>{}, make_stride(params.q_row_stride, _1{})); @@ -332,9 +292,9 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi // If not even_N, then seqlen_k might end in the middle of a block. In that case we need to // mask 2 blocks (e.g. when kBlockM == kBlockN), not just 1. - constexpr int n_masking_steps = !Is_causal + constexpr int n_masking_steps = (!Is_causal && !Is_local) ? 1 - : (Is_even_MN ? cute::ceil_div(kBlockM, kBlockN) : cute::ceil_div(kBlockM, kBlockN) + 1); + : ((Is_even_MN && Is_causal) ? cute::ceil_div(kBlockM, kBlockN) : cute::ceil_div(kBlockM, kBlockN) + 1); #pragma unroll for (int masking_step = 0; masking_step < n_masking_steps; ++masking_step, --n_block) { cute::Tensor acc_s = partition_fragment_C(tiled_mma, cute::Shape, cute::Int>{}); // (MMA=4, MMA_M, MMA_N) @@ -364,22 +324,22 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi // We don't put the masking before the matmul S = Q K^T because we don't clear sK // for rows outside actual_seqlen_k. So those rows could have Inf / NaN, and the matmul // can produce Inf / NaN. - if (!Is_causal) { + if (!Is_causal && !Is_local) { if (!Is_even_MN) { flash::apply_mask(scores, binfo.actual_seqlen_k - n_block * kBlockN); } } else { // I can't get the stride from idx_row - flash::apply_mask_causal(scores, n_block * kBlockN, binfo.actual_seqlen_k, - // m_block * kBlockM + get<0>(idx_row(0)), - m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, - binfo.actual_seqlen_q, - kNWarps * 16); + flash::apply_mask_local(scores, n_block * kBlockN, binfo.actual_seqlen_k, + // m_block * kBlockM + get<0>(idx_row(0)), + m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, + binfo.actual_seqlen_q, kNWarps * 16, + params.window_size_left, params.window_size_right); } flash::cp_async_wait<0>(); __syncthreads(); - if (n_block > 0) { + if (n_block > n_block_min) { // Advance gK tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); flash::copy(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); @@ -390,8 +350,8 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi // TODO: when we have key_padding_mask we'll need to Check_inf masking_step == 0 - ? softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2) - : softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); + ? softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2) + : softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); // Convert scores from fp32 to fp16/bf16 cute::Tensor rP = flash::convert_type(scores); @@ -408,14 +368,14 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi flash::gemm_A_in_regs(acc_o, tOrP, tOrVt, tOsVt, tiled_mma, smem_tiled_copy_V, smem_thr_copy_V); // This check is at the end of the loop since we always have at least 1 iteration - if (n_masking_steps > 1 && n_block <= 0) { + if (n_masking_steps > 1 && n_block <= n_block_min) { --n_block; break; } } // These are the iterations where we don't need masking on S - for (; n_block >= 0; --n_block) { + for (; n_block >= n_block_min; --n_block) { cute::Tensor acc_s = partition_fragment_C(tiled_mma, cute::Shape, cute::Int>{}); // (MMA=4, MMA_M, MMA_N) clear(acc_s); flash::cp_async_wait<0>(); @@ -431,7 +391,7 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi flash::cp_async_wait<0>(); __syncthreads(); - if (n_block > 0) { + if (n_block > n_block_min) { // Advance gK tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); flash::copy(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); @@ -441,8 +401,15 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi } // Reshape acc_s from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) - cute::Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout())); - softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); + Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout())); + if (Is_local && n_block * kBlockN < (m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q + params.window_size_right) { + flash::apply_mask_local( + scores, n_block * kBlockN, binfo.actual_seqlen_k, + m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, + binfo.actual_seqlen_q, kNWarps * 16, + params.window_size_left, params.window_size_right); + } + softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); cute::Tensor rP = flash::convert_type(scores); // Reshape rP from (nrow=(2, MMA_M), ncol=(2, MMA_N)) to ((2, 2, 2), MMA_M, MMA_N / 2) @@ -543,7 +510,7 @@ inline __device__ void compute_attn_1rowblock(const Params& params, const int bi //////////////////////////////////////////////////////////////////////////////////////////////////// -template +template inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, const int bidb, const int bidh, const int m_block, const int n_split_idx, const int num_n_splits) { using Element = typename Kernel_traits::Element; using ElementAccum = typename Kernel_traits::ElementAccum; @@ -572,11 +539,13 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons if (m_block * kBlockM >= binfo.actual_seqlen_q) return; const int n_blocks_per_split = ((params.seqlen_k + kBlockN - 1) / kBlockN + num_n_splits - 1) / num_n_splits; - const int n_block_min = n_split_idx * n_blocks_per_split; + const int n_block_min = !Is_local + ? n_split_idx * n_blocks_per_split + : std::max(n_split_idx * n_blocks_per_split, (m_block * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q - params.window_size_left) / kBlockN); int n_block_max = std::min(cute::ceil_div(binfo.actual_seqlen_k, kBlockN), (n_split_idx + 1) * n_blocks_per_split); - if (Is_causal) { + if (Is_causal || Is_local) { n_block_max = std::min(n_block_max, - cute::ceil_div((m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q, kBlockN)); + cute::ceil_div((m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q + params.window_size_right, kBlockN)); } if (n_block_min >= n_block_max) { // This also covers the case where n_block_max <= 0 // We exit early and write 0 to gOaccum and -inf to gLSEaccum. @@ -626,10 +595,9 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons const index_t row_offset_q = binfo.q_offset(params.q_batch_stride, params.q_row_stride, bidb) + m_block * kBlockM * params.q_row_stride + bidh * params.q_head_stride; // We move K and V to the last block. - const index_t row_offset_k = binfo.k_offset(params.k_batch_stride, params.k_row_stride, bidb) + (n_block_max - 1) * kBlockN * params.k_row_stride + (bidh / params.h_h_k_ratio) * params.k_head_stride; - const index_t row_offset_v = binfo.k_offset(params.v_batch_stride, params.v_row_stride, bidb) + (n_block_max - 1) * kBlockN * params.v_row_stride + (bidh / params.h_h_k_ratio) * params.v_head_stride; - const index_t row_offset_knew = binfo.k_offset(params.knew_batch_stride, params.knew_row_stride, bidb) + ((n_block_max - 1) * kBlockN) * params.knew_row_stride + (bidh / params.h_h_k_ratio) * params.knew_head_stride; - const index_t row_offset_vnew = binfo.k_offset(params.vnew_batch_stride, params.vnew_row_stride, bidb) + ((n_block_max - 1) * kBlockN) * params.vnew_row_stride + (bidh / params.h_h_k_ratio) * params.vnew_head_stride; + const int bidb_cache = params.cache_batch_idx == nullptr ? bidb : params.cache_batch_idx[bidb]; + const index_t row_offset_k = binfo.k_offset(params.k_batch_stride, params.k_row_stride, bidb_cache) + (n_block_max - 1) * kBlockN * params.k_row_stride + (bidh / params.h_h_k_ratio) * params.k_head_stride; + const index_t row_offset_v = binfo.k_offset(params.v_batch_stride, params.v_row_stride, bidb_cache) + (n_block_max - 1) * kBlockN * params.v_row_stride + (bidh / params.h_h_k_ratio) * params.v_head_stride; Tensor gQ = make_tensor(make_gmem_ptr(reinterpret_cast(params.q_ptr) + row_offset_q), Shape, Int>{}, @@ -641,16 +609,6 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons Tensor gV = make_tensor(make_gmem_ptr(reinterpret_cast(params.v_ptr) + row_offset_v), Shape, Int>{}, make_stride(params.v_row_stride, _1{})); - // Subtract seqlen_k_cache * row stride so that conceptually gK and gKnew "line up". When we access them, - // e.g. if gK has 128 rows and gKnew has 64 rows, we access gK[:128] and gKNew[128:128 + 64]. - // This maps to accessing the first 64 rows of knew_ptr. - Tensor gKnew = make_tensor(make_gmem_ptr(reinterpret_cast(params.knew_ptr) + row_offset_knew - binfo.seqlen_k_cache * params.knew_row_stride), - Shape, Int>{}, - make_stride(params.knew_row_stride, _1{})); - // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { printf("knew_ptr = %p, row_offset_knew = %d, gKnew_ptr = %p\n", params.knew_ptr, row_offset_knew, gKnew.data()); } - Tensor gVnew = make_tensor(make_gmem_ptr(reinterpret_cast(params.vnew_ptr) + row_offset_vnew - binfo.seqlen_k_cache * params.vnew_row_stride), - Shape, Int>{}, - make_stride(params.vnew_row_stride, _1{})); Tensor sQ = make_tensor(make_smem_ptr(reinterpret_cast(smem_)), typename Kernel_traits::SmemLayoutQ{}); @@ -664,11 +622,9 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons Tensor tQgQ = gmem_thr_copy_QKV.partition_S(gQ); Tensor tQsQ = gmem_thr_copy_QKV.partition_D(sQ); - Tensor tKgK = gmem_thr_copy_QKV.partition_S(gK); // (KCPY, KCPY_N, KCPY_K) - Tensor tKgKnew = gmem_thr_copy_QKV.partition_S(gKnew); // (KCPY, KCPY_N, KCPY_K) + Tensor tKgK = gmem_thr_copy_QKV.partition_S(gK); // (KCPY, KCPY_N, KCPY_K) Tensor tKsK = gmem_thr_copy_QKV.partition_D(sK); - Tensor tVgV = gmem_thr_copy_QKV.partition_S(gV); // (VCPY, VCPY_N, VCPY_K) - Tensor tVgVnew = gmem_thr_copy_QKV.partition_S(gVnew); // (VCPY, VCPY_N, VCPY_K) + Tensor tVgV = gmem_thr_copy_QKV.partition_S(gV); // (VCPY, VCPY_N, VCPY_K) Tensor tVsV = gmem_thr_copy_QKV.partition_D(sV); typename Kernel_traits::TiledMma tiled_mma; @@ -732,17 +688,129 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons } // Prologue + // Copy from Knew to K, optionally apply rotary embedding. + typename Kernel_traits::GmemTiledCopyRotcossin gmem_tiled_copy_rotary; + auto gmem_thr_copy_rotary = gmem_tiled_copy_rotary.get_thread_slice(tidx); + typename Kernel_traits::GmemTiledCopyRotcossinCont gmem_tiled_copy_rotary_cont; + auto gmem_thr_copy_rotary_cont = gmem_tiled_copy_rotary_cont.get_thread_slice(tidx); + if constexpr (Append_KV) { + // Even if we have MQA / GQA, all threadblocks responsible for the same KV head are writing to + // gmem. Technically it's a race condition, but they all write the same content anyway, and it's safe. + // We want to do this so that all threadblocks can proceed right after they finish writing the KV cache. + const index_t row_offset_cossin = ((n_block_max - 1) * kBlockN) * (params.rotary_dim / 2); + Tensor gCos = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_cos_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(params.rotary_dim / 2, _1{})); + Tensor gSin = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_sin_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(params.rotary_dim / 2, _1{})); + Tensor gCosCont = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_cos_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(params.rotary_dim / 2, _1{})); + Tensor gSinCont = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_sin_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(params.rotary_dim / 2, _1{})); + Tensor tRgCos = gmem_thr_copy_rotary.partition_S(gCos); + Tensor tRgSin = gmem_thr_copy_rotary.partition_S(gSin); + Tensor tRgCosCont = gmem_thr_copy_rotary_cont.partition_S(gCosCont); + Tensor tRgSinCont = gmem_thr_copy_rotary_cont.partition_S(gSinCont); + // if (cute::thread(0, 0)) { printf("rotary_cos_ptr = %p, gCos.data() = %p, tRgCos.data() = %p, rotary_dim = %d\n", params.rotary_cos_ptr, gCos.data(), tRgCos.data(), params.rotary_dim); } + // if (cute::thread(8, 0)) { print_tensor(gCos); } + // if (cute::thread(0, 0)) { print_tensor(tRgCos); } + + const index_t row_offset_knew = binfo.k_offset(params.knew_batch_stride, params.knew_row_stride, bidb) + ((n_block_max - 1) * kBlockN) * params.knew_row_stride + (bidh / params.h_h_k_ratio) * params.knew_head_stride; + const index_t row_offset_vnew = binfo.k_offset(params.vnew_batch_stride, params.vnew_row_stride, bidb) + ((n_block_max - 1) * kBlockN) * params.vnew_row_stride + (bidh / params.h_h_k_ratio) * params.vnew_head_stride; + // Subtract seqlen_k_cache * row stride so that conceptually gK and gKnew "line up". When we access them, + // e.g. if gK has 128 rows and gKnew has 64 rows, we access gK[:128] and gKNew[128:128 + 64]. + // This maps to accessing the first 64 rows of knew_ptr. + Tensor gKnew = make_tensor(make_gmem_ptr(reinterpret_cast(params.knew_ptr) + row_offset_knew - binfo.seqlen_k_cache * params.knew_row_stride), + Shape, Int>{}, + make_stride(params.knew_row_stride, _1{})); + // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { printf("knew_ptr = %p, row_offset_knew = %d, gKnew_ptr = %p\n", params.knew_ptr, row_offset_knew, gKnew.data()); } + Tensor gVnew = make_tensor(make_gmem_ptr(reinterpret_cast(params.vnew_ptr) + row_offset_vnew - binfo.seqlen_k_cache * params.vnew_row_stride), + Shape, Int>{}, + make_stride(params.vnew_row_stride, _1{})); + Tensor tKgKnew = gmem_thr_copy_QKV.partition_S(gKnew); // (KCPY, KCPY_N, KCPY_K) + Tensor tVgVnew = gmem_thr_copy_QKV.partition_S(gVnew); // (VCPY, VCPY_N, VCPY_K) + + const int n_block_copy_min = std::max(n_block_min, binfo.seqlen_k_cache / kBlockN); + for (int n_block = n_block_max - 1; n_block >= n_block_copy_min; n_block--) { + flash::copy_w_min_idx( + tVgVnew, tVgV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); + tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); + tVgVnew.data() = tVgVnew.data() + (-int(kBlockN * params.vnew_row_stride)); + if (params.rotary_dim == 0) { + flash::copy_w_min_idx( + tKgKnew, tKgK, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); + } else { + if (params.is_rotary_interleaved) { + // Don't clear OOB_K because we're writing to global memory + flash::copy_rotary_interleaved( + tKgKnew, tKgK, tRgCos, tRgSin, tKVcKV, binfo.actual_seqlen_k - n_block * kBlockN, + binfo.seqlen_k_cache - n_block * kBlockN, params.d, params.rotary_dim); + tRgCos.data() = tRgCos.data() + (-int(kBlockN * params.rotary_dim / 2)); + tRgSin.data() = tRgSin.data() + (-int(kBlockN * params.rotary_dim / 2)); + } else { + // Don't clear OOB_K because we're writing to global memory + flash::copy_rotary_contiguous( + tKgKnew, tKgK, tRgCosCont, tRgSinCont, tKVcKV, binfo.actual_seqlen_k - n_block * kBlockN, + binfo.seqlen_k_cache - n_block * kBlockN, params.d, params.rotary_dim); + tRgCosCont.data() = tRgCosCont.data() + (-int(kBlockN * params.rotary_dim / 2)); + tRgSinCont.data() = tRgSinCont.data() + (-int(kBlockN * params.rotary_dim / 2)); + } + } + tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); + tKgKnew.data() = tKgKnew.data() + (-int(kBlockN * params.knew_row_stride)); + } + // Need this before we can read in K again, so that we'll see the updated K values. + __syncthreads(); + if (n_block_max > n_block_copy_min) { + tKgK.data() = tKgK.data() + (n_block_max - n_block_copy_min) * kBlockN * params.k_row_stride; + tVgV.data() = tVgV.data() + (n_block_max - n_block_copy_min) * kBlockN * params.v_row_stride; + } + } + // Read Q from gmem to smem, optionally apply rotary embedding. Tensor tQrQ = make_fragment_like(tQgQ); - // We don't need to clear the sQ smem tiles since we'll only write out the valid outputs - flash::copy(gmem_tiled_copy_QKV, tQgQ, tQsQ, tQcQ, tQpQ, - binfo.actual_seqlen_q - m_block * kBlockM); + if (!Append_KV || params.rotary_dim == 0) { + // We don't need to clear the sQ smem tiles since we'll only write out the valid outputs + flash::copy(gmem_tiled_copy_QKV, tQgQ, tQsQ, tQcQ, tQpQ, + binfo.actual_seqlen_q - m_block * kBlockM); + } else { + const index_t row_offset_cossin = (binfo.seqlen_k_cache + (Is_causal || Is_local ? m_block * kBlockM : 0)) * (params.rotary_dim / 2); + // If not causal, all the queries get the same the cos/sin, taken at location seqlen_k_cache. + // We do this by setting the row stride of gCos / gSin to 0. + Tensor gCos = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_cos_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); + Tensor gSin = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_sin_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); + Tensor gCosCont = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_cos_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); + Tensor gSinCont = make_tensor(make_gmem_ptr(reinterpret_cast(params.rotary_sin_ptr) + row_offset_cossin), + Shape, Int>{}, + make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); + Tensor tRgCos = gmem_thr_copy_rotary.partition_S(gCos); + Tensor tRgSin = gmem_thr_copy_rotary.partition_S(gSin); + Tensor tRgCosCont = gmem_thr_copy_rotary_cont.partition_S(gCosCont); + Tensor tRgSinCont = gmem_thr_copy_rotary_cont.partition_S(gSinCont); + if (params.is_rotary_interleaved) { + flash::copy_rotary_interleaved( + tQgQ, tQsQ, tRgCos, tRgSin, tQcQ, binfo.actual_seqlen_q - m_block * kBlockM, + 0, params.d, params.rotary_dim); + } else { + flash::copy_rotary_contiguous( + tQgQ, tQsQ, tRgCosCont, tRgSinCont, tQcQ, binfo.actual_seqlen_q - m_block * kBlockM, + 0, params.d, params.rotary_dim); + } + } int n_block = n_block_max - 1; // We don't need to clear the sK smem tiles since we'll mask out the scores anyway. - flash::copy_2_sources( - gmem_tiled_copy_QKV, tKgK, tKgKnew, tKsK, tKVcKV, tKVpKV, - binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); + flash::copy(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV, + binfo.actual_seqlen_k - n_block * kBlockN); cute::cp_async_fence(); // flash::cp_async_wait<0>(); @@ -760,9 +828,9 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons // If not even_N, then seqlen_k might end in the middle of a block. In that case we need to // mask 2 blocks (e.g. when kBlockM == kBlockN), not just 1. - constexpr int n_masking_steps = !Is_causal + constexpr int n_masking_steps = (!Is_causal && !Is_local) ? 1 - : (Is_even_MN ? cute::ceil_div(kBlockM, kBlockN) : cute::ceil_div(kBlockM, kBlockN) + 1); + : ((Is_even_MN && Is_causal) ? cute::ceil_div(kBlockM, kBlockN) : cute::ceil_div(kBlockM, kBlockN) + 1); #pragma unroll for (int masking_step = 0; masking_step < n_masking_steps; ++masking_step, --n_block) { Tensor acc_s = partition_fragment_C(tiled_mma, Shape, Int>{}); // (MMA=4, MMA_M, MMA_N) @@ -770,32 +838,14 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons flash::cp_async_wait<0>(); __syncthreads(); - if constexpr (Append_KV) { - // if (cute::thread0()) { print(tKgK); } - // if (cute::thread0()) { print(tKsK); } - // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("seqlen_k_cache = %d, (nblock + 1) * kBlockN = %d\n", binfo.seqlen_k_cache, (n_block + 1) * kBlockN); } - if (bidh % params.h_h_k_ratio == 0 && binfo.seqlen_k_cache < (n_block + 1) * kBlockN) { - flash::copy_w_min_idx( - tKsK, tKgK, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); - } - // __syncthreads(); - // if (cute::thread0()) { print(tKgK); } - // __syncthreads(); - } - // Advance gV if (masking_step > 0) { tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); - if (Append_KV) { - tVgVnew.data() = tVgVnew.data() + (-int(kBlockN * params.vnew_row_stride)); - } - flash::copy_2_sources( - gmem_tiled_copy_QKV, tVgV, tVgVnew, tVsV, tKVcKV, tKVpKV, 0, binfo.seqlen_k_cache - n_block * kBlockN); + flash::copy(gmem_tiled_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV); } else { // Clear the smem tiles to account for predicated off loads - flash::copy_2_sources( - gmem_tiled_copy_QKV, tVgV, tVgVnew, tVsV, tKVcKV, tKVpKV, - binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); + flash::copy( + gmem_tiled_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN); } cute::cp_async_fence(); @@ -810,15 +860,15 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons // We don't put the masking before the matmul S = Q K^T because we don't clear sK // for rows outside actual_seqlen_k. So those rows could have Inf / NaN, and the matmul // can produce Inf / NaN. - if (!Is_causal) { + if (!Is_causal && !Is_local) { if (!Is_even_MN) { flash::apply_mask(scores, binfo.actual_seqlen_k - n_block * kBlockN); } } else { - flash::apply_mask_causal(scores, n_block * kBlockN, binfo.actual_seqlen_k, - m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, - binfo.actual_seqlen_q, - kNWarps * 16); + flash::apply_mask_local(scores, n_block * kBlockN, binfo.actual_seqlen_k, + m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, + binfo.actual_seqlen_q, kNWarps * 16, + params.window_size_left, params.window_size_right); } flash::cp_async_wait<0>(); @@ -826,26 +876,10 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons // if (tidx == 0 && blockIdx.y == 0 && blockIdx.z == 0) { print(tVsV); } // __syncthreads(); - // if (tidx == 0 && blockIdx.y == 1 && blockIdx.z == 0) { printf("n_block = %d, n_block_min = %d\n", n_block, n_block_min); } - if constexpr (Append_KV) { - // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("n_split_idx = %d, bidh = %d, params.h_h_k_ratio = %d, seqlen_k_cache = %d, (nblock + 1) * kBlockN = %d\n", n_split_idx, bidh, params.h_h_k_ratio, binfo.seqlen_k_cache, (n_block + 1) * kBlockN); } - if (bidh % params.h_h_k_ratio == 0 && binfo.seqlen_k_cache < (n_block + 1) * kBlockN) { - flash::copy_w_min_idx( - tVsV, tVgV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); - } - } - if (n_block > n_block_min) { // Advance gK - // if (tidx == 0 && blockIdx.y == 1 && blockIdx.z == 0) { printf("tKgKnew = %p\n", tKgKnew.data()); } tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); - if (Append_KV) { - tKgKnew.data() = tKgKnew.data() + (-int(kBlockN * params.knew_row_stride)); - } - // if (tidx == 0 && blockIdx.y == 1 && blockIdx.z == 0) { printf("tKgKnew = %p, row_idx_switch = %d\n", tKgKnew.data(), binfo.seqlen_k_cache - (n_block - 1) * kBlockN); } - flash::copy_2_sources( - gmem_tiled_copy_QKV, tKgK, tKgKnew, tKsK, tKVcKV, tKVpKV, 0, - binfo.seqlen_k_cache - (n_block - 1) * kBlockN); + flash::copy(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); @@ -853,8 +887,8 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons // We have key_padding_mask so we'll need to Check_inf masking_step == 0 - ? softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2) - : softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); + ? softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2) + : softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); // if (cute::thread0()) { print(scores_max); print(scores_sum); print(scores); } // Convert scores from fp32 to fp16/bf16 @@ -879,20 +913,9 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons clear(acc_s); flash::cp_async_wait<0>(); __syncthreads(); - if constexpr (Append_KV) { - // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("n_split_idx = %d, bidh = %d, params.h_h_k_ratio = %d, seqlen_k_cache = %d, (nblock + 1) * kBlockN = %d\n", n_split_idx, bidh, params.h_h_k_ratio, binfo.seqlen_k_cache, (n_block + 1) * kBlockN); } - if (bidh % params.h_h_k_ratio == 0 && binfo.seqlen_k_cache < (n_block + 1) * kBlockN) { - flash::copy_w_min_idx( - tKsK, tKgK, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); - } - } // Advance gV tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); - if (Append_KV) { - tVgVnew.data() = tVgVnew.data() + (-int(kBlockN * params.vnew_row_stride)); - } - flash::copy_2_sources( - gmem_tiled_copy_QKV, tVgV, tVgVnew, tVsV, tKVcKV, tKVpKV, 0, binfo.seqlen_k_cache - n_block * kBlockN); + flash::copy(gmem_tiled_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV); cute::cp_async_fence(); flash::gemm( @@ -901,22 +924,10 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons flash::cp_async_wait<0>(); __syncthreads(); - if constexpr (Append_KV) { - // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("seqlen_k_cache = %d, (nblock + 1) * kBlockN = %d\n", binfo.seqlen_k_cache, (n_block + 1) * kBlockN); } - if (bidh % params.h_h_k_ratio == 0 && binfo.seqlen_k_cache < (n_block + 1) * kBlockN) { - flash::copy_w_min_idx( - tVsV, tVgV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN); - } - } if (n_block > n_block_min) { // Advance gK tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); - if (Append_KV) { - tKgKnew.data() = tKgKnew.data() + (-int(kBlockN * params.knew_row_stride)); - } - flash::copy_2_sources( - gmem_tiled_copy_QKV, tKgK, tKgKnew, tKsK, tKVcKV, tKVpKV, 0, - binfo.seqlen_k_cache - (n_block - 1) * kBlockN); + flash::copy(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); @@ -924,7 +935,14 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons // Reshape acc_s from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout())); - softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); + if (Is_local && n_block * kBlockN < (m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q + params.window_size_right) { + flash::apply_mask_local( + scores, n_block * kBlockN, binfo.actual_seqlen_k, + m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, + binfo.actual_seqlen_q, kNWarps * 16, + params.window_size_left, params.window_size_right); + } + softmax_rescale_o(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); Tensor rP = flash::convert_type(scores); // Reshape rP from (nrow=(2, MMA_M), ncol=(2, MMA_N)) to ((2, 2, 2), MMA_M, MMA_N / 2) @@ -1031,7 +1049,7 @@ inline __device__ void compute_attn_1rowblock_splitkv(const Params& params, cons //////////////////////////////////////////////////////////////////////////////////////////////////// -template +template inline __device__ void compute_attn(const Params& params) { const int m_block = blockIdx.x; // The block index for the batch. @@ -1047,12 +1065,12 @@ inline __device__ void compute_attn(const Params& params) { // the attention matrix. This way, as long as we have the batch, head, and the location of // the 16 x 32 block within the attention matrix, we can generate the exact same dropout pattern. - flash::compute_attn_1rowblock(params, bidb, bidh, m_block); + flash::compute_attn_1rowblock(params, bidb, bidh, m_block); } //////////////////////////////////////////////////////////////////////////////////////////////////// -template +template inline __device__ void compute_attn_splitkv(const Params& params) { const int m_block = blockIdx.x; // The block index for the batch. @@ -1061,24 +1079,23 @@ inline __device__ void compute_attn_splitkv(const Params& params) { const int bidh = Split ? blockIdx.z - bidb * params.h : blockIdx.z; const int n_split_idx = Split ? blockIdx.y : 0; const int num_n_splits = Split ? gridDim.y : 1; - flash::compute_attn_1rowblock_splitkv(params, bidb, bidh, m_block, n_split_idx, num_n_splits); + flash::compute_attn_1rowblock_splitkv(params, bidb, bidh, m_block, n_split_idx, num_n_splits); } //////////////////////////////////////////////////////////////////////////////////////////////////// -template +template inline __device__ void combine_attn_seqk_parallel(const Params& params) { using Element = typename Kernel_traits::Element; using ElementAccum = typename Kernel_traits::ElementAccum; using index_t = typename Kernel_traits::index_t; constexpr int kMaxSplits = 1 << Log_max_splits; - constexpr int kBlockM = 16; constexpr int kHeadDim = Kernel_traits::kHeadDim; + constexpr int kNThreads = Kernel_traits::kNThreads; static_assert(kMaxSplits <= 128, "kMaxSplits must be <= 128"); - // static_assert(kMaxSplits <= 8, "kMaxSplits must be <= 8 for now, will extend layer"); - static_assert(kBlockM == 16 || kBlockM == 32, "kBlockM must be 16 or 32"); - static_assert(Kernel_traits::kNThreads == 128, "We assume that each block has 128 threads"); + static_assert(kBlockM == 4 || kBlockM == 8 || kBlockM == 16 || kBlockM == 32, "kBlockM must be 4, 8, 16 or 32"); + static_assert(kNThreads == 128, "We assume that each block has 128 threads"); // Shared memory. // kBlockM + 1 instead of kBlockM to reduce bank conflicts. @@ -1094,10 +1111,10 @@ inline __device__ void combine_attn_seqk_parallel(const Params& params) { make_stride(params.b * params.h * params.seqlen_q, _1{})); Tensor gLSE = make_tensor(make_gmem_ptr(reinterpret_cast(params.softmax_lse_ptr) + row_offset_lse), Shape>{}, Stride<_1>{}); - constexpr int kNLsePerThread = (kMaxSplits * kBlockM + Kernel_traits::kNThreads - 1) / Kernel_traits::kNThreads; + constexpr int kNLsePerThread = (kMaxSplits * kBlockM + kNThreads - 1) / kNThreads; // Read the LSE values from gmem and store them in shared memory, then tranpose them. - constexpr int kRowsPerLoadLSE = Kernel_traits::kNThreads / kBlockM; + constexpr int kRowsPerLoadLSE = kNThreads / kBlockM; #pragma unroll for (int l = 0; l < kNLsePerThread; ++l) { const int row = l * kRowsPerLoadLSE + tidx / kBlockM; @@ -1165,7 +1182,12 @@ inline __device__ void combine_attn_seqk_parallel(const Params& params) { Tensor gOaccum = make_tensor(make_gmem_ptr(reinterpret_cast(params.oaccum_ptr) + row_offset_oaccum), Shape, Int>{}, Stride, _1>{}); - typename Kernel_traits::GmemTiledCopyOaccum gmem_tiled_copy_Oaccum; + constexpr int kBlockN = kNThreads / kBlockM; + using GmemLayoutAtomOaccum = Layout, Int>, Stride, _1>>; + using GmemTiledCopyOaccum = decltype(make_tiled_copy(Copy_Atom{}, + GmemLayoutAtomOaccum{}, + Layout>{})); // Val layout, 4 vals per store + GmemTiledCopyOaccum gmem_tiled_copy_Oaccum; auto gmem_thr_copy_Oaccum = gmem_tiled_copy_Oaccum.get_thread_slice(tidx); Tensor tOgOaccum = gmem_thr_copy_Oaccum.partition_S(gOaccum); Tensor tOrO = make_tensor(shape(tOgOaccum)); @@ -1183,8 +1205,7 @@ inline __device__ void combine_attn_seqk_parallel(const Params& params) { tOpOaccum(k) = get<1>(tOcOaccum(0, 0, k)) < params.d; } } -// Load Oaccum in then scale and accumulate to O -#pragma unroll 2 + // Load Oaccum in then scale and accumulate to O for (int split = 0; split < params.num_splits; ++split) { flash::copy( gmem_tiled_copy_Oaccum, tOgOaccum, tOrOaccum, tOcOaccum, tOpOaccum, params.b * params.h * params.seqlen_q - bidx * kBlockM); diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_launch_template.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_launch_template.h index 82dfa59b8f8e7..87d189a803f8a 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_launch_template.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/flash_fwd_launch_template.h @@ -10,29 +10,30 @@ namespace onnxruntime { namespace flash { -template +template __global__ void flash_fwd_kernel(Flash_fwd_params params) { + static_assert(!(Is_causal && Is_local)); // If Is_local is true, Is_causal should be false #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 - flash::compute_attn(params); + flash::compute_attn(params); #else (void)params; #endif } -template +template __global__ void flash_fwd_splitkv_kernel(Flash_fwd_params params) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 - flash::compute_attn_splitkv(params); + flash::compute_attn_splitkv(params); #else (void)params; #endif } -template +template __global__ void flash_fwd_splitkv_combine_kernel(Flash_fwd_params params) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 static_assert(Log_max_splits >= 1); - flash::combine_attn_seqk_parallel(params); + flash::combine_attn_seqk_parallel(params); #else (void)params; #endif @@ -52,20 +53,25 @@ void run_flash_fwd(Flash_fwd_params& params, cudaStream_t stream) { const bool is_even_K = params.d == Kernel_traits::kHeadDim; BOOL_SWITCH(is_even_MN, IsEvenMNConst, [&] { BOOL_SWITCH(is_even_K, IsEvenKConst, [&] { - // Will only return softmax if dropout, to reduce compilation time. - auto kernel = &flash_fwd_kernel; - // auto kernel = &flash_fwd_kernel; - if (smem_size >= 48 * 1024) { - cudaFuncSetAttribute( - kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); - // ORT_ENFORCE(cudaFuncSetAttribute( - // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); - } - // int ctas_per_sm; - // cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor( - // &ctas_per_sm, kernel, Kernel_traits::kNThreads, smem_size); - // printf("smem_size = %d, CTAs per SM = %d\n", int(smem_size), ctas_per_sm); - kernel<<>>(params); + BOOL_SWITCH(params.window_size_left >= 0 || params.window_size_right >= 0, Is_local, [&] { + // Will only return softmax if dropout, to reduce compilation time. + // If not IsEvenKConst, we also set IsEvenMNConst to false to reduce number of templates. + // If head dim > 128, set IsEvenMNConst to false to reduce number of templates + // If Is_local, set Is_causal to false + auto kernel = &flash_fwd_kernel < Kernel_traits, Is_causal && !Is_local, Is_local, IsEvenMNConst && IsEvenKConst && !Is_local && Kernel_traits::kHeadDim <= 128, IsEvenKConst, false > ; + // auto kernel = &flash_fwd_kernel; + if (smem_size >= 48 * 1024) { + cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); + // ORT_ENFORCE(cudaFuncSetAttribute( + // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); + } + // int ctas_per_sm; + // cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor( + // &ctas_per_sm, kernel, Kernel_traits::kNThreads, smem_size); + // printf("smem_size = %d, CTAs per SM = %d\n", int(smem_size), ctas_per_sm); + kernel<<>>(params); + }); }); }); } @@ -82,40 +88,46 @@ void run_flash_splitkv_fwd(Flash_fwd_params& params, cudaStream_t stream) { BOOL_SWITCH(params.is_causal, Is_causal, [&] { BOOL_SWITCH(is_even_MN, IsEvenMNConst, [&] { BOOL_SWITCH(is_even_K, IsEvenKConst, [&] { - BOOL_SWITCH(params.num_splits > 1, Split, [&] { - BOOL_SWITCH(params.knew_ptr != nullptr, Append_KV, [&] { - // If Append_KV, then we must have seqlen_offsets, which means cu_seqlens_k != nullptr. - // printf("About to launch, Split = %d, Append_KV = %d, knew_ptr = %p\n", Split, Append_KV, params.knew_ptr); - auto kernel = &flash_fwd_splitkv_kernel < Kernel_traits, Is_causal, IsEvenMNConst && !Append_KV, IsEvenKConst, Split, Append_KV > ; - // auto kernel = &flash_fwd_splitkv_kernel; - // auto kernel = &flash_fwd_splitkv_kernel; - if (smem_size >= 48 * 1024) { - cudaFuncSetAttribute( - kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); - } - kernel<<>>(params); + BOOL_SWITCH(params.window_size_left >= 0 || params.window_size_right >= 0, Is_local, [&] { + BOOL_SWITCH(params.num_splits > 1, Split, [&] { + BOOL_SWITCH(params.knew_ptr != nullptr, Append_KV, [&] { + // If Append_KV, then we must have seqlen_offsets, which means cu_seqlens_k != nullptr. + // printf("About to launch, Split = %d, Append_KV = %d, knew_ptr = %p\n", Split, Append_KV, params.knew_ptr); + auto kernel = &flash_fwd_splitkv_kernel < Kernel_traits, Is_causal && !Is_local, Is_local, IsEvenMNConst && !Append_KV && IsEvenKConst && !Is_local && Kernel_traits::kHeadDim <= 128, IsEvenKConst, Split, Append_KV > ; + // auto kernel = &flash_fwd_splitkv_kernel; + // auto kernel = &flash_fwd_splitkv_kernel; + if (smem_size >= 48 * 1024) { + cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); + } + kernel<<>>(params); + }); }); }); }); }); }); if (params.num_splits > 1) { - dim3 grid_combine((params.b * params.h * params.seqlen_q + 16 - 1) / 16); + // We want kBlockM to be as small as possible for more parallelism. + // With 128 threads we can load 512 elements at a time, so if headdim is divisible by 128, kBlockM = 4. + // If headdim is divisible by 64, then we set kBlockM = 8, etc. + constexpr static int kBlockM = Kernel_traits::kHeadDim % 128 == 0 ? 4 : (Kernel_traits::kHeadDim % 64 == 0 ? 8 : 16); + dim3 grid_combine((params.b * params.h * params.seqlen_q + kBlockM - 1) / kBlockM); BOOL_SWITCH(is_even_K, IsEvenKConst, [&] { if (params.num_splits <= 2) { - flash_fwd_splitkv_combine_kernel<<>>(params); + flash_fwd_splitkv_combine_kernel<<>>(params); } else if (params.num_splits <= 4) { - flash_fwd_splitkv_combine_kernel<<>>(params); + flash_fwd_splitkv_combine_kernel<<>>(params); } else if (params.num_splits <= 8) { - flash_fwd_splitkv_combine_kernel<<>>(params); + flash_fwd_splitkv_combine_kernel<<>>(params); } else if (params.num_splits <= 16) { - flash_fwd_splitkv_combine_kernel<<>>(params); + flash_fwd_splitkv_combine_kernel<<>>(params); } else if (params.num_splits <= 32) { - flash_fwd_splitkv_combine_kernel<<>>(params); + flash_fwd_splitkv_combine_kernel<<>>(params); } else if (params.num_splits <= 64) { - flash_fwd_splitkv_combine_kernel<<>>(params); + flash_fwd_splitkv_combine_kernel<<>>(params); } else if (params.num_splits <= 128) { - flash_fwd_splitkv_combine_kernel<<>>(params); + flash_fwd_splitkv_combine_kernel<<>>(params); } }); } @@ -130,7 +142,7 @@ void run_mha_fwd_splitkv_dispatch(Flash_fwd_params& params, cudaStream_t stream) template void run_mha_fwd_hdim32(Flash_fwd_params& params, cudaStream_t stream) { - constexpr int Headdim = 32; + constexpr static int Headdim = 32; BOOL_SWITCH(params.is_causal, Is_causal, [&] { run_flash_fwd, Is_causal>(params, stream); }); @@ -138,7 +150,7 @@ void run_mha_fwd_hdim32(Flash_fwd_params& params, cudaStream_t stream) { template void run_mha_fwd_hdim64(Flash_fwd_params& params, cudaStream_t stream) { - constexpr int Headdim = 64; + constexpr static int Headdim = 64; BOOL_SWITCH(params.is_causal, Is_causal, [&] { // Using 8 warps is 18% slower for seqlen=2k, 2 warps is 5% slower // Using block size (64 x 256) is 27% slower for seqlen=2k @@ -174,8 +186,8 @@ void run_mha_fwd_hdim96(Flash_fwd_params& params, cudaStream_t stream) { template void run_mha_fwd_hdim128(Flash_fwd_params& params, cudaStream_t stream) { - constexpr int Headdim = 128; - const bool is_sm8x = params.dprops->major == 8 && params.dprops->minor > 0; + constexpr static int Headdim = 128; + bool is_sm8x = params.dprops->major == 8 && params.dprops->minor > 0; BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), // and 128 x 32 (48 KB smem) is the fastest for non-causal since we get 2 CTAs per SM. @@ -201,8 +213,8 @@ void run_mha_fwd_hdim128(Flash_fwd_params& params, cudaStream_t stream) { template void run_mha_fwd_hdim160(Flash_fwd_params& params, cudaStream_t stream) { - constexpr int Headdim = 160; - const bool is_sm8x = params.dprops->major == 8 && params.dprops->minor > 0; + constexpr static int Headdim = 160; + bool is_sm8x = params.dprops->major == 8 && params.dprops->minor > 0; BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For A100, H100, 128 x 32 is the fastest. // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), @@ -241,12 +253,11 @@ void run_mha_fwd_hdim192(Flash_fwd_params& params, cudaStream_t stream) { template void run_mha_fwd_hdim224(Flash_fwd_params& params, cudaStream_t stream) { - constexpr size_t Headdim = 224; - constexpr size_t threshold = 2 * Headdim * (128 + 2 * 64); - size_t max_smem_per_block = params.dprops->sharedMemPerBlockOptin; + constexpr static int Headdim = 224; + int max_smem_per_block = params.dprops->sharedMemPerBlockOptin; // printf("max_smem_per_block = %d\n", max_smem_per_block); BOOL_SWITCH(params.is_causal, Is_causal, [&] { - if (max_smem_per_block >= threshold) { // 112 KB + if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64)) { // 112 KB run_flash_fwd, Is_causal>(params, stream); } else { run_flash_fwd, Is_causal>(params, stream); @@ -262,16 +273,14 @@ void run_mha_fwd_hdim224(Flash_fwd_params& params, cudaStream_t stream) { template void run_mha_fwd_hdim256(Flash_fwd_params& params, cudaStream_t stream) { - constexpr size_t Headdim = 256; - constexpr size_t min_threshold = 2 * Headdim * (128 + 2 * 64); - constexpr size_t max_threshold = 4 * Headdim * (64 + 2 * 64); + constexpr static int Headdim = 256; size_t max_smem_per_sm = params.dprops->sharedMemPerMultiprocessor; size_t max_smem_per_block = params.dprops->sharedMemPerBlockOptin; // printf("max_smem_per_sm = %d, max_smem_per_block = %d\n", max_smem_per_sm, max_smem_per_block); BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For A100, we want to run with 128 x 64 (128KB smem). // For H100 we want to run with 64 x 64 (96KB smem) since then we can get 2 CTAs per SM. - if (max_smem_per_block >= min_threshold && max_smem_per_sm < max_threshold) { + if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64) && max_smem_per_sm < 4 * Headdim * (64 + 2 * 64)) { run_flash_fwd, Is_causal>(params, stream); } else { run_flash_fwd, Is_causal>(params, stream); diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/kernel_traits.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/kernel_traits.h index 134f159e258c4..1c0ed7f2fc2e8 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/kernel_traits.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/kernel_traits.h @@ -161,7 +161,14 @@ struct Flash_fwd_kernel_traits : public Base { cute::Stride<_16, _1>>>; using GmemTiledCopyOaccum = decltype(make_tiled_copy(Copy_Atom{}, GmemLayoutAtomOaccum{}, - cute::Layout>{})); // Val layout, 4 vals per store + Layout>{})); // Val layout, 4 vals per store + using GmemLayoutAtomRotcossin = GmemLayoutAtom; + using GmemTiledCopyRotcossin = decltype(make_tiled_copy(Copy_Atom, Element>{}, + GmemLayoutAtomRotcossin{}, + Layout>{})); // Val layout, 4 vals per load + using GmemTiledCopyRotcossinCont = decltype(make_tiled_copy(Copy_Atom{}, + GmemLayoutAtomRotcossin{}, + Layout>{})); // Val layout, 8 vals per load }; // Is_V_in_regs is an option to reduce smem usage, but will increase register pressue. diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h index 842edf3a98a86..8017f83bbb01d 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h @@ -139,10 +139,11 @@ inline __device__ void apply_mask(Tensor& tensor, const int max_ } } -template -inline __device__ void apply_mask_causal(Tensor& tensor, const int col_idx_offset_, - const int max_seqlen_k, const int row_idx_offset_, - const int max_seqlen_q, const int warp_row_stride) { +template +inline __device__ void apply_mask_local(Tensor& tensor, const int col_idx_offset_, + const int max_seqlen_k, const int row_idx_offset_, + const int max_seqlen_q, const int warp_row_stride, + const int window_size_left, const int window_size_right) { // tensor has shape (ncol=(2, MMA_M), nrow=(2, MMA_N)) static_assert(Layout::rank == 2, "Only support 2D Tensor"); const int lane_id = threadIdx.x % 32; @@ -155,14 +156,15 @@ inline __device__ void apply_mask_causal(Tensor& tensor, const i #pragma unroll for (int i = 0; i < size<0, 0>(tensor); ++i) { const int row_idx = row_idx_base + i * 8; - const int col_idx_limit = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q); + const int col_idx_limit_left = std::max(0, row_idx + max_seqlen_k - max_seqlen_q - window_size_left); + const int col_idx_limit_right = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q + window_size_right); #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; - if (col_idx >= col_idx_limit) { + if (col_idx >= col_idx_limit_right || (HasWSLeft && col_idx < col_idx_limit_left)) { tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY; } } @@ -176,6 +178,15 @@ inline __device__ void apply_mask_causal(Tensor& tensor, const i } } +template +inline __device__ void apply_mask_causal(Tensor& tensor, const int col_idx_offset_, + const int max_seqlen_k, const int row_idx_offset_, + const int max_seqlen_q, const int warp_row_stride) { + // Causal masking is equivalent to local masking with window_size_left = infinity and window_size_right = 0 + apply_mask_local(tensor, col_idx_offset_, max_seqlen_k, row_idx_offset_, + max_seqlen_q, warp_row_stride, -1, 0); +} + template inline __device__ void apply_mask_causal_w_idx( Tensor& tensor, Tensor const& idx_rowcol, diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/utils.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/utils.h index 02042e183f808..271112c5e890a 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/utils.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/utils.h @@ -307,7 +307,7 @@ template inline __device__ void copy(TiledCopy tiled_copy, Tensor const& S, Tensor& D, Tensor const& identity_MN, - Tensor const& predicate_K, int max_MN = 0) { + Tensor const& predicate_K, const int max_MN = 0) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA @@ -334,65 +334,161 @@ inline __device__ void copy(TiledCopy tiled_copy, Tensor const //////////////////////////////////////////////////////////////////////////////////////////////////// -template -inline __device__ void copy_2_sources(TiledCopy tiled_copy, Tensor const& S0, - Tensor const& S1, +inline __device__ void copy_w_min_idx(Tensor const& S, Tensor& D, Tensor const& identity_MN, Tensor const& predicate_K, - const int max_MN = 0, const int row_idx_switch = 0) { - CUTE_STATIC_ASSERT_V(rank(S0) == Int<3>{} && rank(S1) == Int<3>{}); + const int max_MN = 0, const int min_MN = 0) { + CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); - CUTE_STATIC_ASSERT_V(size<0>(S0) == size<0>(D) && size<0>(S1) == size<0>(D)); // MMA - CUTE_STATIC_ASSERT_V(size<1>(S0) == size<1>(D) && size<1>(S1) == size<1>(D)); // MMA_M - CUTE_STATIC_ASSERT_V(size<2>(S0) == size<2>(D) && size<2>(S1) == size<2>(D)); // MMA_K - // There's no case where !Clear_OOB_K && Clear_OOB_MN - static_assert(!(Clear_OOB_MN && !Clear_OOB_K)); -// if (threadIdx.x == 0 && blockIdx.y == 1 && blockIdx.z == 0) { printf("Is_2_sources = %d, max_MN = %d, row_idx_switch = %d\n", Is_2_sources, max_MN, row_idx_switch); } -// if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, Is_2_sources = %d, max_MN = %d, row_idx_switch = %d\n", blockIdx.y, Is_2_sources, max_MN, row_idx_switch); } + CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA + CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M + CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K +// if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, max_MN = %d, min_MN = %d\n", blockIdx.y, max_MN, min_MN); } #pragma unroll - for (int m = 0; m < size<1>(S0); ++m) { - auto& S = !Is_2_sources || get<0>(identity_MN(0, m, 0)) < row_idx_switch ? S0 : S1; - if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { + for (int m = 0; m < size<1>(S); ++m) { + // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); } + if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { +// if (threadIdx.x == 0 && blockIdx.z == 0) { printf("Inner loop, blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); } #pragma unroll - for (int k = 0; k < size<2>(S0); ++k) { + for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || predicate_K(k)) { - cute::copy(tiled_copy, S(_, m, k), D(_, m, k)); + cute::copy(S(_, m, k), D(_, m, k)); + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +inline __device__ void copy_rotary_interleaved(Tensor const& S, + Tensor& D, + Tensor const& Cos, + Tensor const& Sin, + Tensor const& identity_MN, + const int max_MN, const int min_MN, + const int dim, const int rotary_dim) { + CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); + CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); + CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA + CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M + CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K + CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M + CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K + CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M + CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K + CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); // MMA_K + static_assert(decltype(size<0>(S))::value == decltype(size<0>(Cos))::value * 2); + static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32 + Tensor rCos = make_fragment_like(Cos); + Tensor rSin = make_fragment_like(Sin); + Tensor rS = make_fragment_like(S); +#pragma unroll + for (int m = 0; m < size<1>(S); ++m) { + if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { +#pragma unroll + for (int k = 0; k < size<2>(S); ++k) { + if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) { + cute::copy(S(_, m, k), rS(_, m, k)); + if (get<1>(identity_MN(0, 0, k)) < rotary_dim) { + cute::copy(Cos(_, m, k), rCos(_, m, k)); + cute::copy(Sin(_, m, k), rSin(_, m, k)); + Tensor S_fp32 = convert_type(rS(_, m, k)); + Tensor cos_fp32 = convert_type(rCos(_, m, k)); + Tensor sin_fp32 = convert_type(rSin(_, m, k)); +#pragma unroll + for (int i = 0; i < size<0>(rS) / 2; ++i) { + float real = S_fp32(2 * i) * cos_fp32(i) - S_fp32(2 * i + 1) * sin_fp32(i); + float imag = S_fp32(2 * i) * sin_fp32(i) + S_fp32(2 * i + 1) * cos_fp32(i); + S_fp32(2 * i) = real; + S_fp32(2 * i + 1) = imag; + } + // Idk but I need to copy for the convert_type to work + Tensor S_fp32_copy = make_fragment_like(S_fp32); + cute::copy(S_fp32, S_fp32_copy); + using T = typename Engine0::value_type; + Tensor S_og_type = convert_type(S_fp32_copy); + cute::copy(S_og_type, rS(_, m, k)); + } + cute::copy(rS(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { cute::clear(D(_, m, k)); } } - } else if (Clear_OOB_MN) { - cute::clear(D(_, m, _)); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// -template -inline __device__ void copy_w_min_idx(Tensor const& S, - Tensor& D, Tensor const& identity_MN, - Tensor const& predicate_K, - const int max_MN = 0, const int min_MN = 0) { +inline __device__ void copy_rotary_contiguous(Tensor const& S, + Tensor& D, + Tensor const& Cos, + Tensor const& Sin, + Tensor const& identity_MN, + const int max_MN, const int min_MN, + const int dim, const int rotary_dim) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); - CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA - CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M - CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K -// if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, max_MN = %d, min_MN = %d\n", blockIdx.y, max_MN, min_MN); } + CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA + CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M + CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K + CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M + CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K + CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M + CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K + CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(Cos)); // MMA + CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); + static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32 + Tensor rCos = make_fragment_like(Cos); + Tensor rSin = make_fragment_like(Sin); + Tensor rS = make_fragment_like(S); + Tensor rS_other = make_fragment_like(rS(_, 0, 0)); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { - // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); } if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { -// if (threadIdx.x == 0 && blockIdx.z == 0) { printf("Inner loop, blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); } #pragma unroll for (int k = 0; k < size<2>(S); ++k) { - if (Is_even_K || predicate_K(k)) { - cute::copy(S(_, m, k), D(_, m, k)); + if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) { + cute::copy(S(_, m, k), rS(_, m, k)); + if (get<1>(identity_MN(0, 0, k)) < rotary_dim) { + const bool is_left = get<1>(identity_MN(0, 0, k)) < rotary_dim / 2; + Tensor gS_other = make_tensor(S(_, m, k).data() + (is_left ? rotary_dim / 2 : -rotary_dim / 2), S(_, m, k).layout()); + cute::copy(gS_other, rS_other); + // if (cute::thread0()) { print_tensor(rS(_, m, k)); print_tensor(rS_other); } + Tensor gCos = make_tensor(Cos(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Cos(_, m, k).layout()); + Tensor gSin = make_tensor(Sin(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Sin(_, m, k).layout()); + cute::copy(gCos, rCos(_, m, k)); + cute::copy(gSin, rSin(_, m, k)); + // if (cute::thread0()) { print_tensor(rCos(_, m, k)); print_tensor(rSin(_, m, k)); } + Tensor S_fp32 = convert_type(rS(_, m, k)); + Tensor S_other_fp32 = convert_type(rS_other); + Tensor cos_fp32 = convert_type(rCos(_, m, k)); + Tensor sin_fp32 = convert_type(rSin(_, m, k)); +#pragma unroll + for (int i = 0; i < size<0>(rS); ++i) { + S_fp32(i) = S_fp32(i) * cos_fp32(i) + S_other_fp32(i) * (is_left ? -sin_fp32(i) : sin_fp32(i)); + } + // Idk but I need to copy for the convert_type to work + Tensor S_fp32_copy = make_fragment_like(S_fp32); + cute::copy(S_fp32, S_fp32_copy); + using T = typename Engine0::value_type; + Tensor S_og_type = convert_type(S_fp32_copy); + cute::copy(S_og_type, rS(_, m, k)); + // if (cute::thread0()) { print_tensor(rS(_, m, k)); } + } + cute::copy(rS(_, m, k), D(_, m, k)); + } else if (Clear_OOB_K) { + cute::clear(D(_, m, k)); } } } diff --git a/onnxruntime/contrib_ops/cuda/bert/group_query_attention.cc b/onnxruntime/contrib_ops/cuda/bert/group_query_attention.cc index f21dff08e0350..93892169f6c79 100644 --- a/onnxruntime/contrib_ops/cuda/bert/group_query_attention.cc +++ b/onnxruntime/contrib_ops/cuda/bert/group_query_attention.cc @@ -44,9 +44,8 @@ GroupQueryAttention::GroupQueryAttention(const OpKernelInfo& info) ORT_ENFORCE(info.GetAttr("kv_num_heads", &kv_num_heads).IsOK() && kv_num_heads > 0 && num_heads % kv_num_heads == 0); num_heads_ = static_cast(num_heads); kv_num_heads_ = static_cast(kv_num_heads); - is_unidirectional_ = true; - // left_padding_ = info.GetAttrOrDefault("left_padding_last_token", 0) == 1; is_past_bsnh_ = false; // info.GetAttrOrDefault("is_past_bsnh", 1) == 1; + local_window_size_ = static_cast(info.GetAttrOrDefault("local_window_size", -1)); scale_ = info.GetAttrOrDefault("scale", 0.0f); #if USE_FLASH_ATTENTION @@ -92,8 +91,7 @@ Status GroupQueryAttention::ComputeInternal(OpKernelContext* context) const { is_past_bsnh_, scale_, device_prop.maxThreadsPerBlock)); - parameters.is_unidirectional = is_unidirectional_; - // parameters.left_padding = left_padding_; + parameters.local_window_size = local_window_size_; int sequence_length = parameters.sequence_length; TensorShapeVector output_shape(3); @@ -139,6 +137,7 @@ Status GroupQueryAttention::ComputeInternal(OpKernelContext* context) const { bool use_memory_efficient_attention = !use_flash_attention && !disable_memory_efficient_attention_ && + local_window_size_ == -1 && (parameters.head_size & 7) == 0 && parameters.sequence_length <= parameters.seqlen_past_kv_cache + parameters.sequence_length && (sizeof(T) == 2 || parameters.sequence_length >= attention::kMinSeqLenForMemoryEfficientAttentionFp32) && @@ -222,6 +221,13 @@ Status GroupQueryAttention::ComputeInternal(OpKernelContext* context) const { data.k = reinterpret_cast(k_buffer.get()); data.v = reinterpret_cast(v_buffer.get()); } + if (k_buffer != nullptr) { + data.k = reinterpret_cast(k_buffer.get()); + data.v = reinterpret_cast(v_buffer.get()); + } + if (fmha_buffer != nullptr) { + data.fmha_buffer = reinterpret_cast(fmha_buffer.get()); + } cublasHandle_t cublas = GetCublasHandle(context); diff --git a/onnxruntime/contrib_ops/cuda/bert/group_query_attention.h b/onnxruntime/contrib_ops/cuda/bert/group_query_attention.h index aade0436dc141..54a8127e29e7b 100644 --- a/onnxruntime/contrib_ops/cuda/bert/group_query_attention.h +++ b/onnxruntime/contrib_ops/cuda/bert/group_query_attention.h @@ -22,8 +22,7 @@ class GroupQueryAttention final : public CudaKernel { protected: int num_heads_; // number of attention heads int kv_num_heads_; // different for k and v for group query attention - // bool left_padding_; // shifts last token to end of buffer - bool is_unidirectional_; // causal + int local_window_size_; bool is_past_bsnh_; float scale_; bool disable_flash_attention_; diff --git a/onnxruntime/contrib_ops/cuda/bert/group_query_attention_impl.cu b/onnxruntime/contrib_ops/cuda/bert/group_query_attention_impl.cu index 2d158155eeba9..b22ccb68c1e7b 100644 --- a/onnxruntime/contrib_ops/cuda/bert/group_query_attention_impl.cu +++ b/onnxruntime/contrib_ops/cuda/bert/group_query_attention_impl.cu @@ -468,55 +468,6 @@ Status LaunchGetSeqlenBuff(contrib::GroupQueryAttentionParameters& parameters, i return CUDA_CALL(cudaGetLastError()); } -// // Kernel to append new kv to kv buffer in place -// template -// __global__ void LeftPadLast(const int max_seqlen, -// T* kv_buff, -// const int* seqlens_k) { // refers to kv buff; otherwise bnsh -// const int h = threadIdx.x; -// const int n = blockIdx.x; -// const int b = blockIdx.y; - -// const int num_heads = gridDim.x; -// const int H = blockDim.x; - -// const int present_batch_stride = max_seqlen * num_heads * H; -// const int present_row_stride = num_heads * H; -// const int present_head_stride = H; - -// // kv_buff: BTNH or BNTH with buffered memory for new -// // new_kv: BLNH - -// const int s = seqlens_k[b]; - -// const int in_offset = b * present_batch_stride + s * present_row_stride + n * present_head_stride + h; -// const int out_offset = b * present_batch_stride + (max_seqlen - 1) * present_row_stride + n * present_head_stride + h; -// kv_buff[out_offset] = kv_buff[in_offset]; -// } - -// // Concat new to kv buffer in place -// template -// Status LaunchLeftPadLast(contrib::GroupQueryAttentionParameters& parameters, -// GroupQueryAttentionData& data, -// cudaStream_t stream, -// const int max_threads_per_block) { -// const int batch_size = parameters.batch_size; -// const int sequence_length = parameters.sequence_length; -// const int num_heads = parameters.num_heads; -// const int head_size = parameters.head_size; - -// // Indicates past sequence_length of each sequence -// const int* seqlens_k = reinterpret_cast(data.seqlens_k); - -// const int H = head_size / 4; -// const dim3 grid(num_heads, batch_size, 1); -// const dim3 block(H, 1, 1); -// LeftPadLast<<>>(sequence_length, -// reinterpret_cast(data.output), -// seqlens_k); -// return CUDA_CALL(cudaGetLastError()); -// } - ////////// Launch Kernels #if USE_FLASH_ATTENTION @@ -541,7 +492,7 @@ Status FlashAttention( void* key = reinterpret_cast(const_cast(data.key)); void* value = reinterpret_cast(const_cast(data.value)); - bool is_causal = parameters.is_unidirectional; + bool is_causal = true; // Note: seqlens_k is past sequence length for flash if (parameters.is_prompt) { @@ -579,7 +530,7 @@ Status FlashAttention( seqlens_k, batch_size, num_heads, kv_num_heads, head_size, sequence_length, present_sequence_length, kv_sequence_length, scale, is_causal, past_bsnh, parameters.num_splits, reinterpret_cast(data.softmax_lse_accum), - reinterpret_cast(data.out_accum))); + reinterpret_cast(data.out_accum), parameters.local_window_size)); } else { // Not share buffer case // Note that Flash Attention kv-caching operates in place on a buffer... therefore this path is inneficient @@ -611,13 +562,9 @@ Status FlashAttention( seqlens_k, batch_size, num_heads, kv_num_heads, head_size, sequence_length, present_sequence_length, 0, scale, is_causal, past_bsnh, parameters.num_splits, reinterpret_cast(data.softmax_lse_accum), - reinterpret_cast(data.out_accum))); + reinterpret_cast(data.out_accum), parameters.local_window_size)); } - // if (parameters.left_padding && parameters.is_prompt) { - // ORT_RETURN_IF_ERROR(LaunchLeftPadLast(parameters, data, stream, device_prop.maxThreadsPerBlock)); - // } - DUMP_TENSOR_INIT(); DUMP_TENSOR("flash attention output", data.output, batch_size, sequence_length, num_heads, head_size); @@ -704,9 +651,11 @@ Status EfficientAttention( p.max_sequence_length = present_sequence_length; p.qk_head_size = head_size; p.v_head_size = head_size; - p.causal = parameters.is_unidirectional; + p.causal = true; p.scale = scale; p.seqlen_k_ptr = data.seqlens_k_total; // Note: seqlens_k is total sequence length for efficient + p.seqstart_q_ptr = nullptr; + p.seqstart_k_ptr = nullptr; p.query = query; p.key = key; p.value = value; @@ -721,10 +670,6 @@ Status EfficientAttention( p.has_custom_right_padding = true; run_memory_efficient_attention(p); - // if (parameters.left_padding && parameters.is_prompt) { - // ORT_RETURN_IF_ERROR(LaunchLeftPadLast(parameters, data, stream, device_prop.maxThreadsPerBlock)); - // } - DUMP_TENSOR_INIT(); DUMP_TENSOR("efficient attention output", data.output, batch_size, sequence_length, num_heads, head_size); diff --git a/onnxruntime/core/graph/contrib_ops/bert_defs.cc b/onnxruntime/core/graph/contrib_ops/bert_defs.cc index dcde2ddeb8270..a99bb36984538 100644 --- a/onnxruntime/core/graph/contrib_ops/bert_defs.cc +++ b/onnxruntime/core/graph/contrib_ops/bert_defs.cc @@ -991,7 +991,7 @@ ONNX_MS_OPERATOR_SET_SCHEMA( constexpr const char* GroupQueryAttention_ver1_doc = R"DOC( Group Query Self/Cross Attention. -Supports different number of heads for q and kv. +Supports different number of heads for q and kv. Only supports causal or local attention. )DOC"; ONNX_MS_OPERATOR_SET_SCHEMA( @@ -1004,10 +1004,10 @@ ONNX_MS_OPERATOR_SET_SCHEMA( "Custom scale will be used if specified. Default value is 1/sqrt(head_size)", AttributeProto::FLOAT, OPTIONAL_VALUE) - // .Attr("left_padding_last_token", - // "Copy last token to last index of buffer. Default is 0; 1 when true.", - // AttributeProto::INT, - // OPTIONAL_VALUE) + .Attr("local_window_size", + "left_window_size for local attention (like Mistral). Default value is -1 meaning unused.", + AttributeProto::INT, + static_cast(-1)) .Input(0, "query", "Query with shape (batch_size, sequence_length, hidden_size)", diff --git a/onnxruntime/test/python/transformers/test_flash_attn.py b/onnxruntime/test/python/transformers/test_flash_attn.py index 99f62ffdb9f53..8a839875de2a2 100644 --- a/onnxruntime/test/python/transformers/test_flash_attn.py +++ b/onnxruntime/test/python/transformers/test_flash_attn.py @@ -183,7 +183,9 @@ def create_multihead_attention_graph(config): return model.SerializeToString() -def create_group_query_attention_graph_prompt(config, past_kv_format=Formats.BSNH, share_buffer=True): +def create_group_query_attention_graph_prompt( + config, past_kv_format=Formats.BSNH, share_buffer=True, local_window_size=-1 +): past_kv_seqlen = config.buffer_sequence_length if share_buffer else 0 present_kv_seqlen = config.buffer_sequence_length if share_buffer else config.kv_sequence_length nodes = [ @@ -202,6 +204,7 @@ def create_group_query_attention_graph_prompt(config, past_kv_format=Formats.BSN "GroupQueryAttention_0", num_heads=config.num_heads, kv_num_heads=config.kv_num_heads, + local_window_size=local_window_size, # is_past_bsnh=1 if past_kv_format == Formats.BSNH else 0, # kv_share_buffer=1 if share_buffer else 0, domain="com.microsoft", @@ -297,6 +300,26 @@ def create_group_query_attention_graph_prompt(config, past_kv_format=Formats.BSN config.head_size, ], ), + helper.make_tensor_value_info( + "present_key", + TensorProto.FLOAT16, + [ + config.batch_size, + config.kv_sequence_length if past_kv_format == Formats.BSNH else config.kv_num_heads, + config.kv_num_heads if past_kv_format == Formats.BSNH else config.kv_sequence_length, + config.head_size, + ], + ), + helper.make_tensor_value_info( + "present_value", + TensorProto.FLOAT16, + [ + config.batch_size, + config.kv_sequence_length if past_kv_format == Formats.BSNH else config.kv_num_heads, + config.kv_num_heads if past_kv_format == Formats.BSNH else config.kv_sequence_length, + config.head_size, + ], + ), ] graph = helper.make_graph( @@ -310,7 +333,9 @@ def create_group_query_attention_graph_prompt(config, past_kv_format=Formats.BSN return model.SerializeToString() -def create_group_query_attention_graph_past(config, past_kv_format=Formats.BSNH, share_buffer=True): +def create_group_query_attention_graph_past( + config, past_kv_format=Formats.BSNH, share_buffer=True, local_window_size=-1 +): past_kv_seqlen = config.kv_sequence_length present_kv_seqlen = ( config.kv_sequence_length if share_buffer else config.kv_sequence_length + config.sequence_length @@ -331,6 +356,7 @@ def create_group_query_attention_graph_past(config, past_kv_format=Formats.BSNH, "GroupQueryAttention_0", num_heads=config.num_heads, kv_num_heads=config.kv_num_heads, + local_window_size=local_window_size, # is_past_bsnh=1 if past_kv_format == Formats.BSNH else 0, # kv_share_buffer=1 if share_buffer else 0, domain="com.microsoft", @@ -636,8 +662,12 @@ def mha_func(q, k, v, config): return output -def gqa_prompt_func(q, k, v, config, new_k, new_v, seqlens_k=None, past_kv_format=Formats.BSNH, share_buffer=True): - onnx_model_str = create_group_query_attention_graph_prompt(config, past_kv_format, share_buffer) +def gqa_prompt_func( + q, k, v, config, new_k, new_v, seqlens_k=None, window_size=-1, past_kv_format=Formats.BSNH, share_buffer=True +): + onnx_model_str = create_group_query_attention_graph_prompt( + config, past_kv_format, share_buffer, local_window_size=window_size + ) q = torch.reshape(q, (config.batch_size, config.q_sequence_length, -1)) past_k = k.clone() if share_buffer else None past_v = v.clone() if share_buffer else None @@ -706,8 +736,12 @@ def gqa_prompt_func(q, k, v, config, new_k, new_v, seqlens_k=None, past_kv_forma return output, present_k, present_v -def gqa_past_func(q, k, v, config, new_k, new_v, seqlens_k=None, past_kv_format=Formats.BSNH, share_buffer=True): - onnx_model_str = create_group_query_attention_graph_past(config, past_kv_format, share_buffer) +def gqa_past_func( + q, k, v, config, new_k, new_v, seqlens_k=None, past_kv_format=Formats.BSNH, share_buffer=True, window_size=-1 +): + onnx_model_str = create_group_query_attention_graph_past( + config, past_kv_format, share_buffer, local_window_size=window_size + ) q = torch.reshape(q, (config.batch_size, config.sequence_length, -1)) past_k = k.clone() past_v = v.clone() @@ -796,6 +830,28 @@ def construct_causal_mask(seqlen_q, seqlen_k, query_padding_mask=None, key_paddi return col_idx > row_idx + sk - sq +def construct_local_mask( + seqlen_q, + seqlen_k, + window_size=(-1, -1), # -1 means infinite window size + query_padding_mask=None, + key_padding_mask=None, + device=None, +): + row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1") + col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long) + sk = seqlen_k if key_padding_mask is None else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1") + sq = seqlen_q if query_padding_mask is None else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1") + if window_size[0] < 0: + return col_idx > row_idx + sk - sq + window_size[1] + else: + sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk + return torch.logical_or( + col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk), + col_idx < row_idx + sk - sq - window_size[0], + ) + + def attention_ref( q, k, @@ -805,6 +861,7 @@ def attention_ref( dropout_p=0.0, dropout_mask=None, causal=False, + window_size=(-1, -1), # -1 means infinite window size upcast=True, reorder_ops=False, ): @@ -817,6 +874,8 @@ def attention_ref( key_padding_mask: (batch_size, seqlen_k) dropout_p: float dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k) + causal: whether to apply causal masking + window_size: (int, int), left and right window size upcast: whether to cast all inputs to fp32, do all computation in fp32, then cast output back to fp16/bf16. reorder_ops: whether to change the order of operations (scaling k instead of scaling k, etc.) @@ -826,6 +885,8 @@ def attention_ref( output: (batch_size, seqlen_q, nheads, head_dim) attention: (batch_size, nheads, seqlen_q, seqlen_k), softmax after dropout """ + if causal: + window_size = (window_size[0], 0) dtype_og = q.dtype if upcast: q, k, v = q.float(), k.float(), v.float() @@ -839,12 +900,24 @@ def attention_ref( scores = torch.einsum("bthd,bshd->bhts", q, k / math.sqrt(d)) if key_padding_mask is not None: scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf")) - if causal: - causal_mask = construct_causal_mask(seqlen_q, seqlen_k, query_padding_mask, key_padding_mask, q.device) - scores.masked_fill_(causal_mask, float("-inf")) + if window_size[0] >= 0 or window_size[1] >= 0: + local_mask = construct_local_mask( + seqlen_q, + seqlen_k, + window_size, + query_padding_mask, + key_padding_mask, + q.device, + ) + scores.masked_fill_(local_mask, float("-inf")) attention = torch.softmax(scores, dim=-1) - if causal: # Some rows are completely masked out so we fill them with zero instead of NaN - attention = attention.masked_fill(torch.all(causal_mask, dim=-1, keepdim=True), 0.0) + # Some rows might be completely masked out so we fill them with zero instead of NaN + if window_size[0] >= 0 or window_size[1] >= 0: + attention = attention.masked_fill(torch.all(local_mask, dim=-1, keepdim=True), 0.0) + # We want to mask here so that the attention matrix doesn't have any NaNs + # Otherwise we'll get NaN in dV + if query_padding_mask is not None: + attention = attention.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0) dropout_scaling = 1.0 / (1 - dropout_p) if dropout_mask is not None: attention_drop = attention.masked_fill(~dropout_mask, 0.0) @@ -853,7 +926,6 @@ def attention_ref( output = torch.einsum("bhts,bshd->bthd", attention_drop, v * dropout_scaling) if query_padding_mask is not None: output.masked_fill_(rearrange(~query_padding_mask, "b s -> b s 1 1"), 0.0) - attention = attention.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0) return output.to(dtype=dtype_og), attention.to(dtype=dtype_og) @@ -957,6 +1029,8 @@ def parity_check_mha( def parity_check_gqa_prompt( config, + causal=False, + local=False, past_format=Formats.BSNH, rtol=1e-3, atol=1e-3, @@ -1007,6 +1081,15 @@ def parity_check_gqa_prompt( requires_grad=False, ) + window_size = (-1, -1) + left_window_size = -1 + if local: + left_window_size = random.randint(0, config.kv_sequence_length) + window_size = (left_window_size, 0) + elif causal: + left_window_size = -1 + window_size = (-1, 0) + # Pytorch to compare k_cache_ref = k.clone() v_cache_ref = v.clone() @@ -1033,14 +1116,18 @@ def parity_check_gqa_prompt( k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) key_padding_mask = arange < cache_seqlens_expanded - out_ref, _ = attention_ref(q, k_cache_rep, v_cache_rep, None, key_padding_mask, 0.0, None, causal=True) + out_ref, _ = attention_ref( + q, k_cache_rep, v_cache_rep, None, key_padding_mask, 0.0, None, causal=True, window_size=window_size + ) out_ref = out_ref.detach().cpu().numpy() if past_format == Formats.BNSH: k_cache_ref = k_cache_ref.transpose(1, 2) v_cache_ref = v_cache_ref.transpose(1, 2) # Flash function - out, present_k, present_v = gqa_prompt_func(q, k, v, config, new_k, new_v, cache_seqlens, past_format, True) + out, present_k, present_v = gqa_prompt_func( + q, k, v, config, new_k, new_v, cache_seqlens, left_window_size, past_format, True + ) out = torch.squeeze(out, 0) out = torch.reshape(out, (config.batch_size, config.q_sequence_length, config.num_heads, config.head_size)) out = out.detach().cpu().numpy() @@ -1052,6 +1139,10 @@ def parity_check_gqa_prompt( # Compare results print( "KV-buffer", + " causal:", + causal, + " local:", + local, "past kv format:", "BSNH" if past_format == Formats.BSNH else "BNSH", " B:", @@ -1080,6 +1171,8 @@ def parity_check_gqa_prompt( def parity_check_gqa_prompt_no_buff( config, + causal=False, + local=False, past_format=Formats.BSNH, rtol=1e-3, atol=1e-3, @@ -1112,6 +1205,15 @@ def parity_check_gqa_prompt_no_buff( requires_grad=False, ) + window_size = (-1, -1) + left_window_size = -1 + if local: + left_window_size = random.randint(0, config.kv_sequence_length) + window_size = (left_window_size, 0) + elif causal: + left_window_size = -1 + window_size = (-1, 0) + # Pytorch to compare k_cache_ref = new_k.clone() v_cache_ref = new_v.clone() @@ -1132,14 +1234,18 @@ def parity_check_gqa_prompt_no_buff( new_mask = brange < cache_seqlens_expanded k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) - out_ref, _ = attention_ref(q, k_cache_rep, v_cache_rep, None, new_mask, 0.0, None, causal=True) + out_ref, _ = attention_ref( + q, k_cache_rep, v_cache_rep, None, new_mask, 0.0, None, causal=True, window_size=window_size + ) out_ref = out_ref.detach().cpu().numpy() if past_format == Formats.BNSH: k_cache_ref = k_cache_ref.transpose(1, 2) v_cache_ref = v_cache_ref.transpose(1, 2) # Flash function - out, present_k, present_v = gqa_prompt_func(q, None, None, config, new_k, new_v, cache_seqlens, past_format, False) + out, present_k, present_v = gqa_prompt_func( + q, None, None, config, new_k, new_v, cache_seqlens, left_window_size, past_format, False + ) out = torch.squeeze(out, 0) out = torch.reshape(out, (config.batch_size, config.q_sequence_length, config.num_heads, config.head_size)) out = out.detach().cpu().numpy() @@ -1179,6 +1285,8 @@ def parity_check_gqa_prompt_no_buff( def parity_check_gqa_past( config, + causal=False, + local=False, past_format=Formats.BSNH, rtol=1e-3, atol=1e-3, @@ -1228,6 +1336,14 @@ def parity_check_gqa_past( dtype=torch.float16, requires_grad=False, ) + window_size = (-1, -1) + left_window_size = -1 + if local: + left_window_size = random.randint(0, config.kv_sequence_length) + window_size = (left_window_size, 0) + elif causal: + left_window_size = -1 + window_size = (-1, 0) # Pytorch to compare k_cache_ref = k.clone() @@ -1253,14 +1369,18 @@ def parity_check_gqa_past( k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) key_padding_mask = arange < cache_seqlens_expanded + config.sequence_length - out_ref, _ = attention_ref(q, k_cache_rep, v_cache_rep, None, key_padding_mask, 0.0, None, causal=True) + out_ref, _ = attention_ref( + q, k_cache_rep, v_cache_rep, None, key_padding_mask, 0.0, None, causal=True, window_size=window_size + ) out_ref = out_ref.detach().cpu().numpy() if past_format == Formats.BNSH: k_cache_ref = k_cache_ref.transpose(1, 2) v_cache_ref = v_cache_ref.transpose(1, 2) # Flash function - out, present_k, present_v = gqa_past_func(q, k, v, config, new_k, new_v, cache_seqlens, past_format, True) + out, present_k, present_v = gqa_past_func( + q, k, v, config, new_k, new_v, cache_seqlens, past_format, True, left_window_size + ) out = torch.squeeze(out, 0) out = torch.reshape(out, (config.batch_size, config.sequence_length, config.num_heads, config.head_size)) out = out.detach().cpu().numpy() @@ -1274,6 +1394,10 @@ def parity_check_gqa_past( "KV-buffer", "past kv format:", "BSNH" if past_format == Formats.BSNH else "BNSH", + " causal:", + causal, + " local:", + local, " B:", config.batch_size, " S:", @@ -1300,6 +1424,8 @@ def parity_check_gqa_past( def parity_check_gqa_past_no_buff( config, + causal=False, + local=False, past_format=Formats.BSNH, rtol=1e-3, atol=1e-3, @@ -1351,6 +1477,15 @@ def parity_check_gqa_past_no_buff( requires_grad=False, ) + window_size = (-1, -1) + left_window_size = -1 + if local: + left_window_size = random.randint(0, config.kv_sequence_length) + window_size = (left_window_size, 0) + elif causal: + left_window_size = -1 + window_size = (-1, 0) + # Pytorch to compare k_cache_ref = k.clone() v_cache_ref = v.clone() @@ -1378,14 +1513,18 @@ def parity_check_gqa_past_no_buff( k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) key_padding_mask = arange < cache_seqlens_expanded + config.sequence_length - out_ref, _ = attention_ref(q, k_cache_rep, v_cache_rep, None, key_padding_mask, 0.0, None, causal=True) + out_ref, _ = attention_ref( + q, k_cache_rep, v_cache_rep, None, key_padding_mask, 0.0, None, causal=True, window_size=window_size + ) out_ref = out_ref.detach().cpu().numpy() if past_format == Formats.BNSH: k_cache_ref = k_cache_ref.transpose(1, 2) v_cache_ref = v_cache_ref.transpose(1, 2) # Flash function - out, present_k, present_v = gqa_past_func(q, k, v, config, new_k, new_v, cache_seqlens, past_format, False) + out, present_k, present_v = gqa_past_func( + q, k, v, config, new_k, new_v, cache_seqlens, past_format, False, window_size=left_window_size + ) out = torch.squeeze(out, 0) out = torch.reshape(out, (config.batch_size, config.sequence_length, config.num_heads, config.head_size)) out = out.detach().cpu().numpy() @@ -1401,142 +1540,10 @@ def parity_check_gqa_past_no_buff( # Compare results print( "NO buff", - "past kv format:", - "BSNH" if past_format == Formats.BSNH else "BNSH", - " B:", - config.batch_size, - " S:", - config.sequence_length, - " kv S:", - config.kv_sequence_length, - " N:", - config.num_heads, - " kv N:", - config.kv_num_heads, - " h:", - config.head_size, - " Mean Error:", - numpy.mean(numpy.abs(out - out_ref)), - numpy.allclose( - out, - out_ref, - rtol=rtol, - atol=atol, - equal_nan=True, - ), - ) - - -def parity_check_gqa_past_no_buff_no_mask( - config, - past_format=Formats.BSNH, - rtol=1e-3, - atol=1e-3, -): - q = torch.randn( - config.batch_size, - config.sequence_length, - config.num_heads, - config.head_size, - device="cuda", - dtype=torch.float16, - requires_grad=False, - ) - k = torch.randn( - config.batch_size, - config.past_sequence_length if past_format == Formats.BSNH else config.kv_num_heads, - config.kv_num_heads if past_format == Formats.BSNH else config.past_sequence_length, - config.head_size, - device="cuda", - dtype=torch.float16, - requires_grad=False, - ) - v = torch.randn( - config.batch_size, - config.past_sequence_length if past_format == Formats.BSNH else config.kv_num_heads, - config.kv_num_heads if past_format == Formats.BSNH else config.past_sequence_length, - config.head_size, - device="cuda", - dtype=torch.float16, - requires_grad=False, - ) - new_k = torch.randn( - config.batch_size, - config.sequence_length, - config.kv_num_heads, - config.head_size, - device="cuda", - dtype=torch.float16, - requires_grad=False, - ) - new_v = torch.randn( - config.batch_size, - config.sequence_length, - config.kv_num_heads, - config.head_size, - device="cuda", - dtype=torch.float16, - requires_grad=False, - ) - - # Pytorch to compare - k_cache_ref = k.clone() - v_cache_ref = v.clone() - if past_format == Formats.BNSH: - k_cache_ref = k_cache_ref.transpose(1, 2) - v_cache_ref = v_cache_ref.transpose(1, 2) - k_cache_ref = torch.cat((k_cache_ref, new_k), 1) - v_cache_ref = torch.cat((v_cache_ref, new_v), 1) - k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) - v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=config.num_heads // config.kv_num_heads) - key_padding_mask = None - out_ref, _ = attention_ref(q, k_cache_rep, v_cache_rep, None, key_padding_mask, 0.0, None, causal=True) - out_ref = out_ref.detach().cpu().numpy() - if past_format == Formats.BNSH: - k_cache_ref = k_cache_ref.transpose(1, 2) - v_cache_ref = v_cache_ref.transpose(1, 2) - - # Flash function - out, present_k, present_v = gqa_past_func(q, k, v, config, new_k, new_v, past_format, False) - out = torch.squeeze(out, 0) - out = torch.reshape(out, (config.batch_size, config.sequence_length, config.num_heads, config.head_size)) - out = out.detach().cpu().numpy() - - # Make sure past-present buffer updating correctly - if past_format == Formats.BSNH: - assert numpy.allclose( - present_k, - k_cache_ref.detach().cpu().numpy(), - rtol=rtol, - atol=atol, - equal_nan=True, - ) - assert numpy.allclose( - present_v, - v_cache_ref.detach().cpu().numpy(), - rtol=rtol, - atol=atol, - equal_nan=True, - ) - else: - assert numpy.allclose( - present_k, - k_cache_ref.detach().cpu().numpy(), - rtol=rtol, - atol=atol, - equal_nan=True, - ) - assert numpy.allclose( - present_v, - v_cache_ref.detach().cpu().numpy(), - rtol=rtol, - atol=atol, - equal_nan=True, - ) - - # Compare results - print( - "Unbuffered", + " causal:", + causal, + " local:", + local, "past kv format:", "BSNH" if past_format == Formats.BSNH else "BNSH", " B:", @@ -1663,10 +1670,11 @@ def test_gqa_no_past(self): for sq, skv in seqs: for n, n2 in num_h: for h in h_sizes: - for past_kv_format in [Formats.BNSH]: - config = PromptConfig(b, sq, skv, sq + skv + 8, n, n2, h) - parity_check_gqa_prompt(config, past_format=past_kv_format) - parity_check_gqa_prompt_no_buff(config, past_format=past_kv_format) + for local in [False, True]: + for past_kv_format in [Formats.BNSH]: + config = PromptConfig(b, sq, skv, sq + skv + 8, n, n2, h) + parity_check_gqa_prompt(config, local=local, past_format=past_kv_format) + parity_check_gqa_prompt_no_buff(config, local=local, past_format=past_kv_format) def test_gqa_past(self): if not torch.cuda.is_available(): @@ -1725,24 +1733,25 @@ def test_gqa_past(self): for s, s2 in seqs: for n, n2 in num_h: for h in h_sizes: - for past_kv_format in [Formats.BNSH]: - sp = random.randint(1, s2 - s) if s2 - s > 0 else 0 - config = Config(b, s, s2, sp, n, n2, h) - parity_check_gqa_past( - config, - past_format=past_kv_format, - rtol=1e-3, - atol=1e-3, - ) - parity_check_gqa_past_no_buff( - config, - past_format=past_kv_format, - rtol=1e-3, - atol=1e-3, - ) + for local in [False, True]: + for past_kv_format in [Formats.BNSH]: + sp = random.randint(1, s2 - s) if s2 - s > 0 else 0 + config = Config(b, s, s2, sp, n, n2, h) + parity_check_gqa_past( + config, + local=local, + past_format=past_kv_format, + rtol=1e-3, + atol=1e-3, + ) + parity_check_gqa_past_no_buff( + config, + local=local, + past_format=past_kv_format, + rtol=1e-3, + atol=1e-3, + ) if __name__ == "__main__": unittest.main() - # test_gqa = TestGQA() - # test_gqa.test_gqa_past() From f17b6afe3c5241525c3ee1384f98dbef64bcffbc Mon Sep 17 00:00:00 2001 From: Chi Lo <54722500+chilo-ms@users.noreply.github.com> Date: Thu, 16 Nov 2023 19:56:05 -0800 Subject: [PATCH 22/38] [TensorRT EP] Fix bug for no nodes in subgraph at GetCapability (#18449) It's possible that subgraph of the "If" control flow op has no nodes. TRT EP should consider this kind of subgraph is fully supported by TRT. The faster rcnn model mentioned in this issue https://github.com/microsoft/onnxruntime/issues/17434 is the case. --- .../tensorrt/tensorrt_execution_provider.cc | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc index cd4aa45f83bc8..79f84864a5788 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc @@ -1829,6 +1829,10 @@ TensorrtExecutionProvider::GetCapability(const GraphViewer& graph, if (sub_graphs.size() != 0) { bool all_subgraphs_are_supported = true; for (auto sub_graph : sub_graphs) { + // TRT EP should consider the empty subgraph is fully supported by TRT. + if (sub_graph->CreateGraphViewer()->NumberOfNodes() == 0) { + continue; + } if (!AllNodesAssignedToSpecificEP(*(sub_graph->CreateGraphViewer()), kTensorrtExecutionProvider)) { all_subgraphs_are_supported = false; break; @@ -1896,27 +1900,33 @@ TensorrtExecutionProvider::GetCapability(const GraphViewer& graph, auto sub_graphs = graph.ParentNode()->GetSubgraphs(); for (auto sub_graph : sub_graphs) { if (sub_graph.get() != &graph.GetGraph()) { - auto sub_graph_veiwer = sub_graph->CreateGraphViewer(); - const int number_of_ort_subgraph_nodes = sub_graph_veiwer->NumberOfNodes(); + auto sub_graph_viewer = sub_graph->CreateGraphViewer(); + const int number_of_ort_subgraph_nodes = sub_graph_viewer->NumberOfNodes(); std::vector subgraph_nodes_vector(number_of_ort_subgraph_nodes); std::iota(std::begin(subgraph_nodes_vector), std::end(subgraph_nodes_vector), 0); SubGraphCollection_t parser_subgraph_nodes_vector = {{subgraph_nodes_vector, false}}; bool subgraph_early_termination = false; - // Another subgraph of "If" control flow has been parsed by GetCapability before and all subgraph's nodes assigned to TRT EP. - if (AllNodesAssignedToSpecificEP(*sub_graph_veiwer, kTensorrtExecutionProvider)) { + // Another subgraph of "If" control flow op has no nodes. + // In this case, TRT EP should consider this empty subgraph is fully supported by TRT. + if (sub_graph_viewer->NumberOfNodes() == 0) { + all_subgraphs_are_supported = true; + break; + } + // Another subgraph of "If" control flow op has been parsed by GetCapability before and all subgraph's nodes assigned to TRT EP. + else if (AllNodesAssignedToSpecificEP(*sub_graph_viewer, kTensorrtExecutionProvider)) { all_subgraphs_are_supported = true; break; } // Another subgraph of "If" control flow has been parsed by GetCapability and not all subgraph's nodes assigned to TRT EP. // (Note: GetExecutionProviderType() returns "" meaning node has not yet been assigned to any EPs) - else if (!AllNodesAssignedToSpecificEP(*sub_graph_veiwer, "")) { + else if (!AllNodesAssignedToSpecificEP(*sub_graph_viewer, "")) { all_subgraphs_are_supported = false; break; } // Another subgraph of "If" control flow has not yet been parsed by GetCapability. - subgraph_supported_nodes_vector = GetSupportedList(parser_subgraph_nodes_vector, 0, max_partition_iterations_, *sub_graph_veiwer, &subgraph_early_termination); + subgraph_supported_nodes_vector = GetSupportedList(parser_subgraph_nodes_vector, 0, max_partition_iterations_, *sub_graph_viewer, &subgraph_early_termination); all_subgraphs_are_supported = IsSubGraphFullySupported(subgraph_supported_nodes_vector, number_of_ort_subgraph_nodes); break; } From d73073d491d1543fd0fa746bbc8167f85da8488e Mon Sep 17 00:00:00 2001 From: George Wu Date: Thu, 16 Nov 2023 20:44:27 -0800 Subject: [PATCH 23/38] remove full protobuf requirement for tensorrt ep (#18413) tensorrt can work with protobuf lite. --- cmake/CMakeLists.txt | 4 +--- tools/ci_build/build.py | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index e82219a0aff64..5796db03fed7c 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -114,9 +114,7 @@ option(onnxruntime_ENABLE_LTO "Enable link time optimization" OFF) option(onnxruntime_CROSS_COMPILING "Cross compiling onnx runtime" OFF) option(onnxruntime_GCOV_COVERAGE "Compile with options necessary to run code coverage" OFF) option(onnxruntime_DONT_VECTORIZE "Do not vectorize operations in Eigen" OFF) - -#It's preferred to turn it OFF when onnxruntime is dynamically linked to PROTOBUF. But Tensort always required the full version of protobuf. -cmake_dependent_option(onnxruntime_USE_FULL_PROTOBUF "Link to libprotobuf instead of libprotobuf-lite when this option is ON" OFF "NOT onnxruntime_USE_TENSORRT" ON) +option(onnxruntime_USE_FULL_PROTOBUF "Link to libprotobuf instead of libprotobuf-lite when this option is ON" OFF) option(tensorflow_C_PACKAGE_PATH "Path to tensorflow C package installation dir") option(onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS "Enable operator implemented in language other than cpp" OFF) option(onnxruntime_DEBUG_NODE_INPUTS_OUTPUTS "Dump debug information about node inputs and outputs when executing the model." OFF) diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index e0559419ef8c7..6bd3e2533c045 100644 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -1171,9 +1171,9 @@ def generate_build_tree( "-Donnxruntime_USE_OPENVINO_AUTO=" + ("ON" if args.use_openvino.startswith("AUTO") else "OFF"), ] - # TensorRT and OpenVINO providers currently only support + # VitisAI and OpenVINO providers currently only support # full_protobuf option. - if args.use_full_protobuf or args.use_tensorrt or args.use_openvino or args.use_vitisai or args.gen_doc: + if args.use_full_protobuf or args.use_openvino or args.use_vitisai or args.gen_doc: cmake_args += ["-Donnxruntime_USE_FULL_PROTOBUF=ON", "-DProtobuf_USE_STATIC_LIBS=ON"] if args.use_tvm and args.llvm_path is not None: From 5eb5056c610e274494f182c63c06b30ef0761930 Mon Sep 17 00:00:00 2001 From: Changming Sun Date: Thu, 16 Nov 2023 21:37:29 -0800 Subject: [PATCH 24/38] Always run emsdk_env.sh before build.py, even when ccache is disabled (#18477) ### Description Always run emsdk_env.sh before build.py, even when ccache is disabled This is a follow up to #18434. That PR didn't handle the case when ccache was disabled. --- .../templates/build-linux-wasm-step.yml | 12 +++++------ .../templates/linux-wasm-ci.yml | 21 ++++++++++++------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/tools/ci_build/github/azure-pipelines/templates/build-linux-wasm-step.yml b/tools/ci_build/github/azure-pipelines/templates/build-linux-wasm-step.yml index 56f6bd56eeed7..e664cf69dec76 100644 --- a/tools/ci_build/github/azure-pipelines/templates/build-linux-wasm-step.yml +++ b/tools/ci_build/github/azure-pipelines/templates/build-linux-wasm-step.yml @@ -67,9 +67,9 @@ steps: EM_DIR: '$(Build.SourcesDirectory)/cmake/external/emsdk/upstream/emscripten' - ${{if eq(parameters.WithCache, false)}}: - - task: PythonScript@0 - displayName: '${{parameters.DisplayName}}' - inputs: - scriptPath: '$(Build.SourcesDirectory)/tools/ci_build/build.py' - arguments: ${{parameters.Arguments}} - workingDirectory: '$(Build.BinariesDirectory)' + - script: | + set -e -x + source $(Build.SourcesDirectory)/cmake/external/emsdk/emsdk_env.sh + cd '$(Build.BinariesDirectory)' + python3 '$(Build.SourcesDirectory)/tools/ci_build/build.py' ${{parameters.Arguments}} + displayName: ${{parameters.DisplayName}} diff --git a/tools/ci_build/github/azure-pipelines/templates/linux-wasm-ci.yml b/tools/ci_build/github/azure-pipelines/templates/linux-wasm-ci.yml index f81b1ddc8b93b..852d688b2dbb1 100644 --- a/tools/ci_build/github/azure-pipelines/templates/linux-wasm-ci.yml +++ b/tools/ci_build/github/azure-pipelines/templates/linux-wasm-ci.yml @@ -90,13 +90,20 @@ jobs: arguments: --new_dir $(Build.BinariesDirectory)/deps workingDirectory: $(Build.BinariesDirectory) - - script: | - set -ex - cd '$(Build.SourcesDirectory)/cmake/external/emsdk' - ./emsdk install 3.1.44 ccache-git-emscripten-64bit - ./emsdk activate 3.1.44 ccache-git-emscripten-64bit - displayName: 'emsdk install and activate ccache for emscripten' - condition: eq('${{ parameters.WithCache }}', 'true') + - ${{if eq(parameters.WithCache, true)}}: + - script: | + set -ex + cd '$(Build.SourcesDirectory)/cmake/external/emsdk' + ./emsdk install 3.1.44 ccache-git-emscripten-64bit + ./emsdk activate 3.1.44 ccache-git-emscripten-64bit + displayName: 'emsdk install and activate ccache for emscripten' + - ${{if eq(parameters.WithCache, false)}}: + - script: | + set -ex + cd '$(Build.SourcesDirectory)/cmake/external/emsdk' + ./emsdk install 3.1.44 + ./emsdk activate 3.1.44 + displayName: 'emsdk install and activate ccache for emscripten' - template: build-linux-wasm-step.yml parameters: From 1a2946091968fad57e52dd632967a870e0265b06 Mon Sep 17 00:00:00 2001 From: kailums <109063327+kailums@users.noreply.github.com> Date: Fri, 17 Nov 2023 20:38:15 +0800 Subject: [PATCH 25/38] rope support 4D input tensor (#18454) ### Description change RotaryEmbeddings op implementation, add support for 4D input tensor that is with shape of [batch, num_heads, seq_len, head_size]. ### Motivation and Context Current RotaryEmbedding op only support 3d input tensor with shape [batch, seq_len, hidden_size] For llamav2 model, when using FusionRotaryEmbeddings to only fuse RotaryEmbeddings op, there will be a transpose operation for query and key, and then the input tensor of RotaryEmbeddings becomes 4D [batch, num_heads, seq_len, head_size]. This scenario can't be supported by current RotaryEmbeddings implementation. So it needs to support 4D input tensor. --- docs/ContribOperators.md | 4 +- .../contrib_ops/cpu/bert/rotary_embedding.cc | 17 +++++-- .../cpu/bert/rotary_embedding_helper.h | 16 +++++-- .../contrib_ops/cuda/bert/rotary_embedding.cc | 3 +- .../cuda/bert/rotary_embedding_impl.cu | 35 ++++++++++---- .../cuda/bert/rotary_embedding_impl.h | 3 +- .../core/graph/contrib_ops/bert_defs.cc | 4 +- .../test_parity_rotary_embedding.py | 47 +++++++++++++++++-- 8 files changed, 103 insertions(+), 26 deletions(-) diff --git a/docs/ContribOperators.md b/docs/ContribOperators.md index da900e5c59405..8565ffbb6c379 100644 --- a/docs/ContribOperators.md +++ b/docs/ContribOperators.md @@ -5023,7 +5023,7 @@ This version of the operator has been available since version 1 of the 'com.micr
input : T
-
3D tensor with shape (batch_size, sequence_length, hidden_size)
+
3D tensor with shape (batch_size, sequence_length, hidden_size) or 4D with shape (batch_size, num_heads, sequence_length, head_size)
position_ids : M
1D tensor with shape (1) or 2D tensor with shape (batch_size, sequence_length)
cos_cache : T
@@ -5036,7 +5036,7 @@ This version of the operator has been available since version 1 of the 'com.micr
output : T
-
3D tensor with shape (batch_size, sequence_length, hidden_size)
+
tensor with same shape as input.
#### Type Constraints diff --git a/onnxruntime/contrib_ops/cpu/bert/rotary_embedding.cc b/onnxruntime/contrib_ops/cpu/bert/rotary_embedding.cc index 4a266af789250..47f462d75fcc4 100644 --- a/onnxruntime/contrib_ops/cpu/bert/rotary_embedding.cc +++ b/onnxruntime/contrib_ops/cpu/bert/rotary_embedding.cc @@ -63,6 +63,16 @@ Status RotaryEmbedding::Compute(OpKernelContext* context) const { const int head_size = parameters.head_size; const int position_ids_format = parameters.position_ids_format; const int half_head_size = head_size / 2; + // Default input tensor shape is [batch, seq_len, hidden_size] + int head_stride = head_size; + int seq_stride = num_heads * head_stride; + int batch_stride = sequence_length * seq_stride; + if (parameters.transposed) { + // Transposed input tensor shape is [batch, num_heads, seq_len, head_size] + seq_stride = head_size; + head_stride = sequence_length * seq_stride; + batch_stride = num_heads * head_stride; + } AllocatorPtr allocator; ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&allocator)); @@ -76,11 +86,10 @@ Status RotaryEmbedding::Compute(OpKernelContext* context) const { const int s = static_cast((ptr / num_heads) % sequence_length); const int n = static_cast(ptr % num_heads); - const int block_offset = b * sequence_length * num_heads + s * num_heads + n; - const int data_offset = block_offset * head_size; + const int block_offset = b * batch_stride + s * seq_stride + n * head_stride; - const T* input_data = input_src + data_offset; - T* output_data = output_dest + data_offset; + const T* input_data = input_src + block_offset; + T* output_data = output_dest + block_offset; // Cache is (M, H/2) const int position_id = (position_ids_format == 0) diff --git a/onnxruntime/contrib_ops/cpu/bert/rotary_embedding_helper.h b/onnxruntime/contrib_ops/cpu/bert/rotary_embedding_helper.h index cf8080800e072..7b2e8289f7b06 100644 --- a/onnxruntime/contrib_ops/cpu/bert/rotary_embedding_helper.h +++ b/onnxruntime/contrib_ops/cpu/bert/rotary_embedding_helper.h @@ -18,6 +18,7 @@ struct RotaryParameters { int num_heads; // num_heads = hidden_size / head_size int max_sequence_length; // Sequence length used by cos/sin cache int position_ids_format; // Format of position ids - 0 is (1), 1 is (batch_size, sequence_length) + bool transposed; // Whether the input tensor has been transposed into (batch, num_heads, seq_len, hidden) }; template @@ -33,8 +34,8 @@ Status CheckInputs(const T* input, // Check input const auto& input_dims = input->Shape().GetDims(); - if (input_dims.size() != 3) { - return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input 'x' is expected to have 3 dimensions, got ", + if (input_dims.size() != 3 && input_dims.size() != 4) { + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input 'x' is expected to have 3 or 4 dimensions, got ", input_dims.size()); } // Check position_ids @@ -63,6 +64,14 @@ Status CheckInputs(const T* input, int batch_size = static_cast(input_dims[0]); int sequence_length = static_cast(input_dims[1]); int hidden_size = static_cast(input_dims[2]); + + bool transposed = false; + if (input_dims.size() == 4) { + // input is [batch, num_heads, seq, head_size] + sequence_length = static_cast(input_dims[2]); + hidden_size = static_cast(input_dims[1]) * static_cast(input_dims[3]); + transposed = true; + } int max_sequence_length = static_cast(cos_cache_dims[0]); int head_size = static_cast(cos_cache_dims[1]) * 2; int num_heads = hidden_size / head_size; @@ -111,6 +120,7 @@ Status CheckInputs(const T* input, output_parameters->num_heads = num_heads; output_parameters->max_sequence_length = max_sequence_length; output_parameters->position_ids_format = position_ids_format; + output_parameters->transposed = transposed; } return Status::OK(); @@ -118,4 +128,4 @@ Status CheckInputs(const T* input, } // namespace rotary_embedding_helper } // namespace contrib -} // namespace onnxruntime \ No newline at end of file +} // namespace onnxruntime diff --git a/onnxruntime/contrib_ops/cuda/bert/rotary_embedding.cc b/onnxruntime/contrib_ops/cuda/bert/rotary_embedding.cc index b4b5dac1fbe19..2d12e975d88d7 100644 --- a/onnxruntime/contrib_ops/cuda/bert/rotary_embedding.cc +++ b/onnxruntime/contrib_ops/cuda/bert/rotary_embedding.cc @@ -74,7 +74,8 @@ Status RotaryEmbedding::ComputeInternal(OpKernelContext* context) const { parameters.max_sequence_length, parameters.position_ids_format, interleaved, - device_prop.maxThreadsPerBlock); + device_prop.maxThreadsPerBlock, + parameters.transposed); return Status::OK(); } diff --git a/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.cu b/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.cu index c54e72dcfce13..e1b83bd8caf54 100644 --- a/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.cu +++ b/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.cu @@ -27,7 +27,10 @@ __global__ void RotaryEmbeddingBSNH(T* output, // BxSxNxH const int num_heads, const int head_size, const int position_ids_format, - const bool interleaved) { + const bool interleaved, + const int batch_stride, + const int seq_stride, + const int head_stride) { // B = batch size, S = sequence length, N = num heads, H = head size, M = max sequence length // Use .x in innermost loop to access global memory efficiently @@ -37,11 +40,10 @@ __global__ void RotaryEmbeddingBSNH(T* output, // BxSxNxH const int i = threadIdx.x; - const int block_offset = b * sequence_length * num_heads + s * num_heads + n; - const int data_offset = block_offset * head_size; + const int block_offset = b * batch_stride + s * seq_stride + n * head_stride; - const T* input_data = input + data_offset; - T* output_data = output + data_offset; + const T* input_data = input + block_offset; + T* output_data = output + block_offset; // Cache is (M, H/2) const int half_head_size = head_size / 2; @@ -83,7 +85,8 @@ Status LaunchRotaryEmbeddingKernel( const int max_sequence_length, const int position_ids_format, const bool interleaved, - const int max_threads_per_block) { + const int max_threads_per_block, + const bool transposed) { constexpr int smem_size = 0; const dim3 grid(num_heads, sequence_length, batch_size); @@ -94,10 +97,22 @@ Status LaunchRotaryEmbeddingKernel( // and num_heads values, we can create a block as `block(num_heads, head_size, 1)` // instead. This will require kernel changes to support. + // Default input tensor shape is [batch, seq, hidden_size] + int head_stride = head_size; + int seq_stride = num_heads * head_stride; + int batch_stride = sequence_length * seq_stride; + if (transposed) { + // When transposed, input tensor shape is [batch, num_heads, seq, head_size] + seq_stride = head_size; + head_stride = sequence_length * seq_stride; + batch_stride = num_heads * head_stride; + } + assert(head_size <= max_threads_per_block); RotaryEmbeddingBSNH<<>>( output, input, cos_cache, sin_cache, position_ids, - sequence_length, num_heads, head_size, position_ids_format, interleaved + sequence_length, num_heads, head_size, position_ids_format, interleaved, + batch_stride, seq_stride, head_stride ); return CUDA_CALL(cudaGetLastError()); @@ -117,7 +132,8 @@ template Status LaunchRotaryEmbeddingKernel( const int max_sequence_length, const int position_ids_format, const bool interleaved, - const int max_threads_per_block); + const int max_threads_per_block, + const bool transposed); template Status LaunchRotaryEmbeddingKernel( cudaStream_t stream, @@ -133,7 +149,8 @@ template Status LaunchRotaryEmbeddingKernel( const int max_sequence_length, const int position_ids_format, const bool interleaved, - const int max_threads_per_block); + const int max_threads_per_block, + const bool transposed); } // namespace cuda diff --git a/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.h b/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.h index 29ff48a8ad0fb..ee1ccc43dcbff 100644 --- a/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.h +++ b/onnxruntime/contrib_ops/cuda/bert/rotary_embedding_impl.h @@ -24,7 +24,8 @@ Status LaunchRotaryEmbeddingKernel( const int max_sequence_length, const int position_ids_format, const bool interleaved, - const int max_threads_per_block); + const int max_threads_per_block, + const bool transposed); } // namespace cuda } // namespace contrib diff --git a/onnxruntime/core/graph/contrib_ops/bert_defs.cc b/onnxruntime/core/graph/contrib_ops/bert_defs.cc index a99bb36984538..b97fb0d2899fc 100644 --- a/onnxruntime/core/graph/contrib_ops/bert_defs.cc +++ b/onnxruntime/core/graph/contrib_ops/bert_defs.cc @@ -1144,7 +1144,7 @@ ONNX_MS_OPERATOR_SET_SCHEMA( OPTIONAL_VALUE) .Input(0, "input", - "3D tensor with shape (batch_size, sequence_length, hidden_size)", + "3D tensor with shape (batch_size, sequence_length, hidden_size) or 4D with shape (batch_size, num_heads, sequence_length, head_size)", "T") .Input(1, "position_ids", @@ -1160,7 +1160,7 @@ ONNX_MS_OPERATOR_SET_SCHEMA( "T") .Output(0, "output", - "3D tensor with shape (batch_size, sequence_length, hidden_size)", + "tensor with same shape as input.", "T") .TypeConstraint("T", {"tensor(float)", "tensor(float16)"}, "Constrain input and output types to float tensors.") .TypeConstraint("M", {"tensor(int64)"}, "Constrain input and output types to integer tensors") diff --git a/onnxruntime/test/python/transformers/test_parity_rotary_embedding.py b/onnxruntime/test/python/transformers/test_parity_rotary_embedding.py index b17ae5f69aff5..cf8128e0eebcf 100644 --- a/onnxruntime/test/python/transformers/test_parity_rotary_embedding.py +++ b/onnxruntime/test/python/transformers/test_parity_rotary_embedding.py @@ -261,14 +261,15 @@ def get_eps(self): eps = ["CPUExecutionProvider", "CUDAExecutionProvider"] return list(filter(lambda ep: ep in ort.get_available_providers(), eps)) - def run_ort_ep_tests(self, onnx_graph, inputs_ort, expected_output_bsnh): + def run_ort_ep_tests(self, onnx_graph, inputs_ort, expected_output_bsnh, transposed=False): eps = self.get_eps() for ep in eps: sess = ort.InferenceSession(onnx_graph, providers=[ep]) output_ort = sess.run(None, inputs_ort)[0] - output_ort = output_ort.reshape( - (self.config.batch_size, inputs_ort["input"].shape[1], self.config.num_heads, self.config.head_size) - ) + if not transposed: + output_ort = output_ort.reshape( + (self.config.batch_size, inputs_ort["input"].shape[1], self.config.num_heads, self.config.head_size) + ) # Compare outputs as BxSxNxH self.assertTrue(np.allclose(expected_output_bsnh, output_ort)) @@ -445,6 +446,44 @@ def test_hf_token_rotary_one_pos_id(self): # Compare outputs as BxSxNxH self.run_ort_ep_tests(onnx_graph, inputs_ort, output_hf.transpose(1, 2).detach().cpu().numpy()) + # Bonus test: Prompt step, interleaved = false, pos ids shape = (1), transposed + def test_hf_prompt_rotary_one_pos_id_transposed(self): + x_bnsh = torch.randn( + self.config.batch_size, self.config.num_heads, self.config.sequence_length, self.config.head_size + ) + cos_hf, sin_hf = self.llama_hf.get_cos_sin_cache(self.config.sequence_length) + pos_hf = torch.stack([torch.arange(0, self.config.sequence_length) for _ in range(self.config.batch_size)]) + output_hf = self.llama_hf(x_bnsh, cos_hf, sin_hf, pos_hf) # output is BxNxSxH + + cos_ms, sin_ms = self.llama_ms.get_cos_sin_cache() + pos_ms = torch.tensor([0]) + onnx_graph = self.create_onnx_graph(x_bnsh.shape, pos_ms.shape, cos_ms, sin_ms, interleaved=False) + inputs_ort = { + "input": x_bnsh.detach().cpu().numpy(), + "position_ids": pos_ms.detach().cpu().numpy(), + } + + # Compare outputs as BxNxSxH + self.run_ort_ep_tests(onnx_graph, inputs_ort, output_hf.detach().cpu().numpy(), transposed=True) + + # Bonus test: Token generation step, interleaved = false, pos ids shape = (1), transposed + def test_hf_token_rotary_one_pos_id_transposed(self): + x_bnsh = torch.randn(self.config.batch_size, self.config.num_heads, 1, self.config.head_size) + cos_hf, sin_hf = self.llama_hf.get_cos_sin_cache(self.config.sequence_length) + pos_ids = torch.stack([torch.tensor([2]) for _ in range(self.config.batch_size)]) + output_hf = self.llama_hf(x_bnsh, cos_hf, sin_hf, pos_ids) # output is BxSxNxH + + cos_ms, sin_ms = self.llama_ms.get_cos_sin_cache() + pos_ms = torch.tensor([2]) + onnx_graph = self.create_onnx_graph(x_bnsh.shape, pos_ms.shape, cos_ms, sin_ms, interleaved=False) + inputs_ort = { + "input": x_bnsh.detach().cpu().numpy(), + "position_ids": pos_ms.detach().cpu().numpy(), + } + + # Set tranposed=True to compare outputs as BxSxNxH + self.run_ort_ep_tests(onnx_graph, inputs_ort, output_hf.detach().cpu().numpy(), transposed=True) + if __name__ == "__main__": unittest.main() From a5537f2f563d4975c7e6121a7eb260bbbfd9455a Mon Sep 17 00:00:00 2001 From: Wanming Lin Date: Sat, 18 Nov 2023 00:01:40 +0800 Subject: [PATCH 26/38] [WebNN Ep] Slice's axes and steps inputs should be constant initializers (#18427) --- .../webnn/builders/impl/slice_op_builder.cc | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/onnxruntime/core/providers/webnn/builders/impl/slice_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/slice_op_builder.cc index 8778bb2414108..e48cf35012652 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/slice_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/slice_op_builder.cc @@ -114,6 +114,22 @@ bool SliceOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers, if (!GetShape(*input_defs[0], input_shape, logger)) { return false; } + + if (input_defs.size() < 3) { + LOGS(logger, VERBOSE) << op_type << " [" << name << "] requires at least 3 inputs (data, starts, ends) but got " + << input_defs.size(); + return false; + } + + // Inputs: starts, ends, axes, and steps must be constant initializers if present. + for (size_t i = 1; i < input_defs.size(); i++) { + if (!Contains(initializers, input_defs[i]->Name())) { + LOGS(logger, VERBOSE) << "Input [" << input_defs[i]->Name() << "] of " << op_type + << " [" << name << "] must be known as initializer"; + return false; + } + } + if (input_defs.size() == 5) { // Check steps. const auto& steps_tensor = *initializers.at(input_defs[4]->Name()); std::vector unpacked_tensor; @@ -140,18 +156,6 @@ bool SliceOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers, } } - if (input_defs.size() < 3) { - LOGS(logger, VERBOSE) << op_type << " [" << name << "] requires at least 3 inputs (data starts and ends) but got " - << input_defs.size(); - return false; - } - - const auto& starts_name = input_defs[1]->Name(); - const auto& ends_name = input_defs[2]->Name(); - if (!Contains(initializers, starts_name) || !Contains(initializers, ends_name)) { - LOGS(logger, VERBOSE) << op_type << " [" << name << "] need starts and ends as initializer."; - return false; - } return true; } From fac3e33da510c27c7a2631cf44a79923ee14e09f Mon Sep 17 00:00:00 2001 From: Arthur Islamov Date: Sat, 18 Nov 2023 00:23:52 +0400 Subject: [PATCH 27/38] [js/web] JSEP Attention & MultiHeadAttention (#17742) ### Description This is a narrow implementation of Attention/MultiHeadAttention as it does not support: a. inputs 5-7 for MHA b. packed QKV/KV c. past/present d. attention mask But it works well for StableDiffusion and can be extended later. It reduces VRAM usage as it combines many ops into few I've updated demo here https://islamov.ai/stable-diffusion-webgpu/ it takes ~13sec for 1 image with 20 steps on RTX3090Ti and about 25s on M1 Pro VRAM usage is about 8gb if you don't use img2img Going to focus on SDXL now --------- Co-authored-by: Guenther Schmuelling Co-authored-by: Yulong Wang <7679871+fs-eire@users.noreply.github.com> --- js/web/docs/webgpu-operators.md | 2 + .../lib/wasm/jsep/webgpu/op-resolve-rules.ts | 4 + js/web/lib/wasm/jsep/webgpu/ops/attention.ts | 635 ++++++++++++++++++ .../jsep/webgpu/ops/multi-head-attentiion.ts | 335 +++++++++ js/web/script/generate-webgpu-operator-md.ts | 2 + js/web/test/data/ops/attention.jsonc | 557 +++++++++++++++ .../test/data/ops/multi-head-attention.jsonc | 194 ++++++ js/web/test/suite-test-list.jsonc | 2 + onnxruntime/contrib_ops/js/bert/attention.cc | 24 + onnxruntime/contrib_ops/js/bert/attention.h | 47 ++ .../js/bert/multi_head_attention.cc | 24 + .../js/bert/multi_head_attention.h | 36 + .../contrib_ops/js/js_contrib_kernels.cc | 4 + 13 files changed, 1866 insertions(+) create mode 100644 js/web/lib/wasm/jsep/webgpu/ops/attention.ts create mode 100644 js/web/lib/wasm/jsep/webgpu/ops/multi-head-attentiion.ts create mode 100644 js/web/test/data/ops/attention.jsonc create mode 100644 js/web/test/data/ops/multi-head-attention.jsonc create mode 100644 onnxruntime/contrib_ops/js/bert/attention.cc create mode 100644 onnxruntime/contrib_ops/js/bert/attention.h create mode 100644 onnxruntime/contrib_ops/js/bert/multi_head_attention.cc create mode 100644 onnxruntime/contrib_ops/js/bert/multi_head_attention.h diff --git a/js/web/docs/webgpu-operators.md b/js/web/docs/webgpu-operators.md index 0b82a9c031baa..b246e19137888 100644 --- a/js/web/docs/webgpu-operators.md +++ b/js/web/docs/webgpu-operators.md @@ -20,6 +20,7 @@ Do not modify directly.* | Asinh | ai.onnx(9+) | | | Atan | ai.onnx(7+) | | | Atanh | ai.onnx(9+) | | +| Attention | com.microsoft(1+) | need implementing mask and past/present | | AveragePool | ai.onnx(7-9,10,11+); com.ms.internal.nhwc(7-9,10,11+) | need perf optimization; need implementing activation | | BiasAdd | com.microsoft(1+) | | | BiasSplitGelu | com.microsoft(1+) | | @@ -61,6 +62,7 @@ Do not modify directly.* | MemcpyFromHost | ai.onnx(1+) | | | MemcpyToHost | ai.onnx(1+) | | | Mul | ai.onnx(7-12,13,14+) | | +| MultiHeadAttention | com.microsoft(1+) | need implementing mask and past/present | | Neg | ai.onnx(6-12,13+) | | | Not | ai.onnx(1+) | | | Pad | ai.onnx(2-10,11-12,13-17,18,19+) | | diff --git a/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts b/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts index a4d51e68b6a25..9f5dceb8f4726 100644 --- a/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts +++ b/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts @@ -2,6 +2,7 @@ // Licensed under the MIT License. import {argMax, argMin, parseArgMinMaxAttributes} from './ops/argminmax'; +import {attention, parseAttentionAttributes} from './ops/attention'; import {biasAdd} from './ops/bias-add'; import {biasSplitGelu} from './ops/bias-split-gelu'; import * as binaryOps from './ops/binary-op'; @@ -16,6 +17,7 @@ import {gemm, parseGemmAttributes} from './ops/gemm'; import {instanceNorm, parseInstanceNormAttributes} from './ops/instance-norm'; import {layerNorm, parseLayerNormAttributes} from './ops/layer-norm'; import {matMul} from './ops/matmul'; +import {multiHeadAttention, parseMultiHeadAttentionAttributes} from './ops/multi-head-attentiion'; import {pad, parsePadAttributes} from './ops/pad'; import * as pool from './ops/pool'; import {range} from './ops/range'; @@ -46,6 +48,7 @@ export const WEBGPU_OP_RESOLVE_RULES: Map = new ['Asinh', [unaryOps.asinh]], ['Atan', [unaryOps.atan]], ['Atanh', [unaryOps.atanh]], + ['Attention', [attention, parseAttentionAttributes]], // TODO: support new attributes for AveragePool-10 ['AveragePool', [pool.averagePool, pool.parseAveragePoolAttributes]], ['BiasAdd', [biasAdd]], @@ -86,6 +89,7 @@ export const WEBGPU_OP_RESOLVE_RULES: Map = new // TODO: support new attributes for MaxPool-8 and MaxPool-10 ['MaxPool', [pool.maxPool, pool.parseMaxPoolAttributes]], ['Mul', [binaryOps.mul]], + ['MultiHeadAttention', [multiHeadAttention, parseMultiHeadAttentionAttributes]], ['Neg', [unaryOps.neg]], ['Not', [unaryOps.not]], ['Pad', [pad, parsePadAttributes]], diff --git a/js/web/lib/wasm/jsep/webgpu/ops/attention.ts b/js/web/lib/wasm/jsep/webgpu/ops/attention.ts new file mode 100644 index 0000000000000..e1f2a47301bfb --- /dev/null +++ b/js/web/lib/wasm/jsep/webgpu/ops/attention.ts @@ -0,0 +1,635 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import {TensorView} from '../../tensor-view'; +import {createAttributeWithCacheKey} from '../attribute-with-cache-key'; +import {ComputeContext, GpuDataType} from '../types'; + +import {castToF32, fillVector, getMaxComponents, inputVariable, outputVariable, ShaderHelper, sumVector, tensorTypeToWsglStorageType} from './common'; + +export const enum AttentionQkvFormat { + unknown, // enum value not set, or depends on qkv projection implementation details + qkvBNSH, // for non-packed qkv, permuted + qkvBSNH, // for non-packed qkv, not permuted, used by memory efficient attention or MultiHeadAttention + qkvBSN3H, // for TRT fused attention, qkv are packed + qkvBNSHqkvBS3NH, // for TRT fused causal attention, data has two formats (qkv is 3BNSH, gemm_buffer is BS3NH) + qKvBSNHxBSN2H, // for TRT fused cross attention, kv are packed + qkvTNH, // for memory efficient attention, qkv are not packed, and paddings are removed. + qkvTN3H, // for TRT fused attention, qkv are packed and paddings are removed +} + +export const enum AttentionMaskType { + none, // No mask + mask1dKeySeqLen, // [batch_size], key sequence length + mask1dEndStart, // [2 * batch_size] with end positions and start positions + mask1DKeySeqLenStart, // [3 * batch_size + 2] with [key_len[0], ..., key_len[batch_size - 1], query_start[0], + // ..., query_start[batch_size - 1], query_end[batch_size - 1], key_start[0], ..., + // key_start[batch_size - 1], key_end[batch_size - 1]] + mask2dDummy, // dummy mask with shape [1, 1] or [batch_size, 1]. It has same effect as no mask. + mask2dKeyPadding, // [batch_size, total_sequence_length] + mask3dAttention, // [batch_size, sequence_length, total_sequence_length] + mask4dMegatron, // Megatron causal mask with shape [batch_size, 1, max_sequence_length, max_sequence_length] + maskUnknown +} + +export interface AttentionParameters { + batchSize: number; + sequenceLength: number; + pastSequenceLength: number; + kvSequenceLength: number; + totalSequenceLength: number; + maxSequenceLength: number; + inputHiddenSize: number; + hiddenSize: number; + vHiddenSize: number; + headSize: number; + vHeadSize: number; + numHeads: number; + isUnidirectional: boolean; + pastPresentShareBuffer: boolean; + maskFilterValue: number; + maskType: AttentionMaskType; + scale: number; + broadcastResPosBias: boolean; + passPastInKv: boolean; + qkvFormat: AttentionQkvFormat; +} + +export interface AttentionAttrs { + numHeads: number; + isUnidirectional: number; + maskFilterValue: number; + scale: number; + doRotary: number; + qkvHiddenSizes: number[]; + pastPresentShareBuffer: boolean; +} + +const validateAttentionInputs = (inputs: readonly TensorView[], attributes: AttentionAttrs): AttentionParameters => { + // Abbreviation and Meanings: + // B: batch_size + // S: sequence_length (input sequence length of query) + // P: past_sequence_length (past sequence length of key or value) + // L: kv_sequence_length (input sequence length of key or value) + // M: max_sequence_length + // T: total_sequence_length = past_sequence_length + kv_sequence_length + // N: num_heads + // H: head size for Q and K, aka q_head_size or k_head_size or qk_head_size + // H_v: v_head_size + // D_i: input hidden size + // D: hidden size for Q and K (D = N * H), aka q_hidden_size or k_hidden_size or qk_hidden_size + // D_v: v_hidden_size = num_heads * v_head_size + + // When past state is used, Q, K and V should have same hidden size (unless we split it into past_key and past_value). + + // Input shapes: + // input (Q/K/V) : (B, S, D_i) + // weights (Q/K/V) : (D_i, D + D + D_v) + // bias (Q/K/V) : (D + D + D_v) + // mask_index : see below + // past (K/V) : (2, B, N, P, H) or NULL + // relative_position_bias : (B, N, S, T) or NULL + + // For mask_index, the following shapes are supported: + // NULL, (B, 1), (1, 1) + // (B), (2 * B), (3 * B + 2) + // (B, T) + // (B, S, T) + // (B, 1, M, M) + // + // When a model is pruned (like some attention heads are removed in Q/K/V), input_hidden_size could be larger + // than hidden dimension of Q, K and V. + + const input = inputs[0]; + const weights = inputs[1]; + const bias = inputs[2]; + const maskIndex = inputs[3]; + const past = inputs[4]; + const relativePositionBias = inputs[5]; + + if (past && relativePositionBias) { + throw new Error('Attention cannot have both past and relative_position_bias'); + } + + if (input.dims.length !== 3) { + throw new Error('Input "input" must have 3 dimensions'); + } + + const batchSize = input.dims[0]; + const sequenceLength = input.dims[1]; + const inputHiddenSize = input.dims[2]; + + if (bias.dims.length !== 1) { + throw new Error('Input "bias" is expected to have 1 dimensions'); + } + + if (weights.dims.length !== 2) { + throw new Error('Input "weights" is expected to have 2 dimensions'); + } + + if (weights.dims[0] !== inputHiddenSize) { + throw new Error('Input 1 dimension 0 should have same length as dimension 2 of input 0'); + } + + if (bias.dims[0] !== weights.dims[1]) { + throw new Error('Input "bias" dimension 0 should have same length as dimension 1 of input "weights"'); + } + + let qHiddenSize = bias.dims[0] / 3; + let kHiddenSize = qHiddenSize; + let vHiddenSize = kHiddenSize; + if (attributes.qkvHiddenSizes.length > 0) { + if (attributes.qkvHiddenSizes.length !== 3) { + throw new Error('qkv_hidden_sizes attribute should have 3 elements'); + } + for (const sz of attributes.qkvHiddenSizes) { + if (sz % attributes.numHeads !== 0) { + throw new Error('qkv_hidden_sizes should be divisible by num_heads'); + } + } + + qHiddenSize = attributes.qkvHiddenSizes[0]; + kHiddenSize = attributes.qkvHiddenSizes[1]; + vHiddenSize = attributes.qkvHiddenSizes[2]; + } + + const kvSequenceLength = sequenceLength; + + if (qHiddenSize !== kHiddenSize) { + throw new Error('qkv_hidden_sizes first element should be same as the second'); + } + + if (bias.dims[0] !== qHiddenSize + kHiddenSize + vHiddenSize) { + throw new Error('Input "bias" dimension 0 should have same length as sum of Q/K/V hidden sizes'); + } + + let pastSequenceLength = 0; + if (past) { + if (kHiddenSize !== vHiddenSize) { + throw new Error('Input "past" expect k_hidden_size == v_hidden_size'); + } + if (past.dims.length !== 5) { + throw new Error('Input "past" must have 5 dimensions'); + } + if (past.dims[0] !== 2) { + throw new Error('Input "past" first dimension must be 2'); + } + if (past.dims[1] !== batchSize) { + throw new Error('Input "past" second dimension must be batch_size'); + } + if (past.dims[2] !== attributes.numHeads) { + throw new Error('Input "past" third dimension must be num_heads'); + } + if (past.dims[4] !== kHiddenSize / attributes.numHeads) { + throw new Error('Input "past" fifth dimension must be k_hidden_size / num_heads'); + } + + if (!attributes.pastPresentShareBuffer) { + pastSequenceLength = past.dims[3]; + } + // TODO: handle past_seq_len + } + + const totalSequenceLength = kvSequenceLength + pastSequenceLength; + const maxSequenceLength = -1; + + const maskType = AttentionMaskType.none; + if (maskIndex) { + // maskType = AttentionMaskType.MASK_UNKNOWN; + // TODO: handle mask + throw new Error('Mask not supported'); + } + + if (past) { + throw new Error('past is not supported'); + } + if (relativePositionBias) { + throw new Error('relativePositionBias is not supported'); + } + + return { + batchSize, + sequenceLength, + pastSequenceLength, + kvSequenceLength, + totalSequenceLength, + maxSequenceLength, + inputHiddenSize, + hiddenSize: qHiddenSize, + vHiddenSize, + headSize: Math.floor(qHiddenSize / attributes.numHeads), + vHeadSize: Math.floor(vHiddenSize / attributes.numHeads), + numHeads: attributes.numHeads, + isUnidirectional: false, + pastPresentShareBuffer: false, + maskFilterValue: attributes.maskFilterValue, + maskType, + scale: attributes.scale, + broadcastResPosBias: false, + passPastInKv: false, + qkvFormat: AttentionQkvFormat.qkvBNSH, + }; +}; + +export const parseAttentionAttributes = (attributes: AttentionAttrs): AttentionAttrs => + createAttributeWithCacheKey({...attributes}); + +export const computeInPlaceSoftmax = (context: ComputeContext, input: TensorView, n: number, d: number) => { + const components = getMaxComponents(d); + const inputHelper = outputVariable('x', input.dataType, input.dims, components); + + let threadMaxValue = 'threadMaxVector'; + if (components === 2) { + threadMaxValue = 'max(threadMaxVector.x, threadMaxVector.y)'; + } else if (components === 4) { + threadMaxValue = 'max(max(threadMaxVector.x, threadMaxVector.y), max(threadMaxVector.z, threadMaxVector.w))'; + } + const dataType = tensorTypeToWsglStorageType(input.dataType); + let WG = 64; + const dComp = d / components; + if (dComp < WG) { + WG = 1; + } else if (dComp / 8 < 64) { + WG = Math.ceil(dComp / 8); + } + const elementsPerWG = Math.ceil(d / components / WG); + + const getShaderSource = (shaderHelper: ShaderHelper) => ` + const dInv: ${dataType} = 1 / ${d}; + const dComp = ${d / components}; + var wgMax: array; + var wgSum: array; + + ${shaderHelper.declareVariables(inputHelper)} + @compute @workgroup_size(${WG}, 1, 1) + fn main(@builtin(workgroup_id) workgroup_id : vec3, + @builtin(local_invocation_index) local_index : u32) { + let localOffset = local_index * ${elementsPerWG}; + let offset: u32 = workgroup_id.x * dComp + localOffset; + + var threadMaxVector = ${fillVector('f32', components, '-3.402823e+38f')}; + for (var i: u32 = 0; i < ${elementsPerWG} && i + localOffset < dComp; i++) { + threadMaxVector = max(${castToF32(dataType, components, 'x[offset + i]')}, threadMaxVector); + } + wgMax[local_index] = ${threadMaxValue}; + workgroupBarrier(); + + var maxValue = -3.402823e+38f; + for (var i = 0u; i < ${WG}; i++) { + maxValue = max(wgMax[i], maxValue); + } + + var sumVector = ${fillVector('f32', components, '0')}; + for (var i: u32 = 0; i < ${elementsPerWG} && i + localOffset < dComp; i++) { + sumVector += exp(${castToF32(dataType, components, 'x[offset + i]')} - maxValue); + } + wgSum[local_index] = ${sumVector('sumVector', components)}; + workgroupBarrier(); + + var sum: f32 = 0; + for (var i = 0u; i < ${WG}; i++) { + sum += wgSum[i]; + } + + if (sum == 0) { + for (var i: u32 = 0; i < ${elementsPerWG} && i + localOffset < dComp; i++) { + x[offset + i] = ${fillVector(dataType, components, 'dInv')}; + } + } else { + for (var i: u32 = 0; i < ${elementsPerWG} && i + localOffset < dComp; i++) { + let f32input = ${castToF32(dataType, components, 'x[offset + i]')}; + x[offset + i] = ${inputHelper.type.value}(exp(f32input - maxValue) / sum); + } + } + }`; + + context.compute( + { + name: 'AttentionProbsSoftmax', + shaderCache: {hint: `${d}`}, + getShaderSource, + getRunData: () => ({ + outputs: [], + dispatchGroup: {x: n}, + }), + }, + {inputs: [input], outputs: []}); +}; + +const computeAttentionProbs = + (context: ComputeContext, q: TensorView, key: TensorView, _bias: TensorView|undefined, + parameters: AttentionParameters, attributes: AttentionAttrs) => { + const probsShape = [ + parameters.batchSize, parameters.numHeads, parameters.sequenceLength, + parameters.kvSequenceLength + parameters.pastSequenceLength + ]; + // TODO: handle mask + + const alpha = attributes.scale === 0 ? 1.0 / Math.sqrt(parameters.headSize) : attributes.scale; + + const dataType = tensorTypeToWsglStorageType(q.dataType); + + const components = getMaxComponents(parameters.headSize); + const qInput = inputVariable('q', q.dataType, q.dims, components); + const kInput = inputVariable('key', key.dataType, key.dims, components); + const output = outputVariable('output', q.dataType, probsShape); + + const vectorizedHeadSize = parameters.headSize / components; + const M = parameters.sequenceLength; + const N = parameters.totalSequenceLength; + const K = vectorizedHeadSize; + + const TILE_SIZE = 12; + + const dispatch = { + x: Math.ceil(parameters.totalSequenceLength / TILE_SIZE), + y: Math.ceil(parameters.sequenceLength / TILE_SIZE), + z: parameters.batchSize * parameters.numHeads + }; + + const inputs = [q, key]; + const getShaderSource = (shaderHelper: ShaderHelper) => ` + const M: u32 = ${M}u; + const N: u32 = ${N}u; + const K: u32 = ${K}u; + const alpha: ${dataType} = ${alpha}; + const beta: ${dataType} = 1.0; + const TILE_SIZE = ${TILE_SIZE}u; + + var tileQ: array<${qInput.type.storage}, ${TILE_SIZE * TILE_SIZE}>; + var tileK: array<${qInput.type.storage}, ${TILE_SIZE * TILE_SIZE}>; + + ${shaderHelper.declareVariables(qInput, kInput, output)} + + @compute @workgroup_size(${TILE_SIZE}, ${TILE_SIZE}, 1) + fn main(@builtin(workgroup_id) workgroup_id : vec3, + @builtin(local_invocation_id) local_id : vec3, @builtin(local_invocation_index) local_index : u32) { + let global_idx = (workgroup_id.z * ${dispatch.x * dispatch.y}u + + workgroup_id.y * ${dispatch.x}u + workgroup_id.x) * ${TILE_SIZE * TILE_SIZE}u + local_index; + + // x holds the N and y holds the M + let headIdx = workgroup_id.z; + let m = workgroup_id.y * TILE_SIZE; + let n = workgroup_id.x * TILE_SIZE; + let lm = m + local_id.y; + let ln = n + local_id.x; + + let qOffset = ${parameters.sequenceLength * vectorizedHeadSize} * headIdx + m * K; + let kOffset = ${parameters.kvSequenceLength * vectorizedHeadSize} * headIdx + n * K; + + var value = ${fillVector(dataType, components)}; + for (var w: u32 = 0u; w < K; w += TILE_SIZE) { + if (m + local_id.y < M && w + local_id.x < K) { + tileQ[TILE_SIZE * local_id.y + local_id.x] = q[qOffset + local_id.y * K + w + local_id.x]; + } + if (n + local_id.y < N && w + local_id.x < K) { + tileK[TILE_SIZE * local_id.y + local_id.x] = key[kOffset + local_id.y * K + w + local_id.x]; + } + workgroupBarrier(); + + for (var k: u32 = 0u; k ({ + outputs: [{dims: probsShape, dataType: q.dataType, gpuDataType: GpuDataType.default}], + dispatchGroup: dispatch, + }), + getShaderSource, + }, + {inputs, outputs: [-1]})[0]; + + computeInPlaceSoftmax( + context, probs, parameters.batchSize * parameters.numHeads * parameters.sequenceLength, + parameters.totalSequenceLength); + + return probs; + }; + +const computeVxAttentionScore = + (context: ComputeContext, probs: TensorView, v: TensorView, params: AttentionParameters) => { + const outputShape = [params.batchSize, params.sequenceLength, params.vHiddenSize]; + + const probsHelper = inputVariable('probs', probs.dataType, probs.dims); + const vHelper = inputVariable('v', v.dataType, v.dims); + const output = outputVariable('output', probs.dataType, outputShape); + + const dataType = tensorTypeToWsglStorageType(probs.dataType); + + const TILE_SIZE = 12; + const dispatch = { + x: Math.ceil(params.vHeadSize / TILE_SIZE), + y: Math.ceil(params.sequenceLength / TILE_SIZE), + z: params.batchSize * params.numHeads + }; + + const getShaderSource = (shaderHelper: ShaderHelper) => ` + const M: u32 = ${params.sequenceLength}u; + const N: u32 = ${params.vHeadSize}u; + const K: u32 = ${params.totalSequenceLength}u; + const numHeads: u32 = ${params.numHeads}u; + const TILE_SIZE = ${TILE_SIZE}u; + + var tileQ: array<${probsHelper.type.storage}, ${TILE_SIZE * TILE_SIZE}>; + var tileK: array<${probsHelper.type.storage}, ${TILE_SIZE * TILE_SIZE}>; + + ${shaderHelper.declareVariables(probsHelper, vHelper, output)} + + @compute @workgroup_size(${TILE_SIZE}, ${TILE_SIZE}, 1) + fn main(@builtin(workgroup_id) workgroup_id : vec3, + @builtin(local_invocation_id) local_id : vec3, @builtin(local_invocation_index) local_index : u32) { + let global_idx = (workgroup_id.z * ${dispatch.x * dispatch.y}u + + workgroup_id.y * ${dispatch.x}u + workgroup_id.x) * ${TILE_SIZE * TILE_SIZE}u + local_index; + + let headIdx = workgroup_id.z; + let m = workgroup_id.y * TILE_SIZE + local_id.y; + let n = workgroup_id.x * TILE_SIZE + local_id.x; + + let offsetA = headIdx * (M * K) + m * K; + let offsetB = headIdx * (N * K) + n; + + var value = ${dataType}(0); + for (var w: u32 = 0u; w < K; w += TILE_SIZE) { + if (m < M && w + local_id.x < K) { + tileQ[TILE_SIZE * local_id.y + local_id.x] = probs[offsetA + w + local_id.x]; + } + if (n < N && w + local_id.y < K) { + tileK[TILE_SIZE * local_id.y + local_id.x] = v[offsetB + (w + local_id.y) * N]; + } + workgroupBarrier(); + for (var k: u32 = 0u; k ({ + outputs: [{dims: outputShape, dataType: probs.dataType, gpuDataType: GpuDataType.default}], + dispatchGroup: dispatch, + }), + getShaderSource, + }, + {inputs: [probs, v], outputs: [0]})[0]; + }; + +export const applyAttention = + (context: ComputeContext, q: TensorView, k: TensorView, v: TensorView, _maskIndex: TensorView|undefined, + _past: TensorView|undefined, _pastKey: TensorView|undefined, _pastValue: TensorView|undefined, + relativePositionBias: TensorView|undefined, parameters: AttentionParameters, attributes: AttentionAttrs) => { + const probs = computeAttentionProbs(context, q, k, relativePositionBias, parameters, attributes); + + computeVxAttentionScore(context, probs, v, parameters); + }; + +const prepare = (context: ComputeContext, parameters: AttentionParameters) => { + const outputShape = [ + parameters.batchSize, + parameters.numHeads, + parameters.sequenceLength, + parameters.headSize, + ]; + + const dataType = tensorTypeToWsglStorageType(context.inputs[0].dataType); + + const M = parameters.sequenceLength; + const K = parameters.inputHiddenSize; + const N = parameters.headSize; + + const TILE_SIZE = 12; + const dispatch = { + x: Math.ceil(parameters.headSize / TILE_SIZE), + y: Math.ceil(parameters.sequenceLength / TILE_SIZE), + z: parameters.batchSize * parameters.numHeads + }; + + const getShaderSource = () => ` + const M: u32 = ${M}u; + const K: u32 = ${K}u; + const N: u32 = ${N}u; + const numHeads: u32 = ${parameters.numHeads}; + const ldb = ${parameters.hiddenSize + parameters.hiddenSize + parameters.vHiddenSize}u; + const TILE_SIZE = ${TILE_SIZE}u; + + var tileInput: array<${dataType}, ${TILE_SIZE * TILE_SIZE}>; + var tileWeightQ: array<${dataType}, ${TILE_SIZE * TILE_SIZE}>; + var tileWeightK: array<${dataType}, ${TILE_SIZE * TILE_SIZE}>; + var tileWeightV: array<${dataType}, ${TILE_SIZE * TILE_SIZE}>; + + @group(0) @binding(0) var input: array<${dataType}>; + @group(0) @binding(1) var weight: array<${dataType}>; + @group(0) @binding(2) var bias: array<${dataType}>; + @group(0) @binding(3) var outputQ: array<${dataType}>; + @group(0) @binding(4) var outputK: array<${dataType}>; + @group(0) @binding(5) var outputV: array<${dataType}>; + + @compute @workgroup_size(${TILE_SIZE}, ${TILE_SIZE}, 1) + fn main(@builtin(workgroup_id) workgroup_id : vec3, + @builtin(local_invocation_id) local_id : vec3, @builtin(local_invocation_index) local_index : u32) { + let global_idx = (workgroup_id.z * ${dispatch.x * dispatch.y}u + + workgroup_id.y * ${dispatch.x}u + workgroup_id.x) * ${TILE_SIZE * TILE_SIZE}u + local_index; + + let batchIndex = workgroup_id.z / ${parameters.numHeads}; + let headNumber = workgroup_id.z % ${parameters.numHeads}; + let m = workgroup_id.y * TILE_SIZE + local_id.y; + let n = workgroup_id.x * TILE_SIZE + local_id.x; + + let inputOffset = batchIndex * (M * K) + m * K; + let biasOffsetQ = headNumber * ${parameters.headSize}; + let biasOffsetK = ${parameters.hiddenSize} + biasOffsetQ; + let biasOffsetV = ${parameters.hiddenSize} + biasOffsetK; + + var valueQ = ${dataType}(0); + var valueK = ${dataType}(0); + var valueV = ${dataType}(0); + for (var w: u32 = 0u; w < K; w += TILE_SIZE) { + if (m < M && w + local_id.x < K) { + tileInput[TILE_SIZE * local_id.y + local_id.x] = input[inputOffset + w + local_id.x]; + } + if (n < N && w + local_id.y < K) { + let offset = n + (w + local_id.y) * ldb; + tileWeightQ[TILE_SIZE * local_id.y + local_id.x] = weight[biasOffsetQ + offset]; + tileWeightK[TILE_SIZE * local_id.y + local_id.x] = weight[biasOffsetK + offset]; + tileWeightV[TILE_SIZE * local_id.y + local_id.x] = weight[biasOffsetV + offset]; + } + workgroupBarrier(); + for (var k: u32 = 0u; k ({ + outputs: [ + {dims: outputShape, dataType: context.inputs[0].dataType, gpuDataType: GpuDataType.default}, + {dims: outputShape, dataType: context.inputs[0].dataType, gpuDataType: GpuDataType.default}, + {dims: outputShape, dataType: context.inputs[0].dataType, gpuDataType: GpuDataType.default}, + ], + dispatchGroup: dispatch, + }), + getShaderSource, + }, + {inputs, outputs: [-1, -1, -1]}); +}; + +export const attention = (context: ComputeContext, attributes: AttentionAttrs): void => { + const params = validateAttentionInputs(context.inputs, attributes); + + const [q, k, v] = prepare(context, params); + + return applyAttention( + context, q, k, v, context.inputs[4], undefined, undefined, undefined, context.inputs[5], params, attributes); +}; diff --git a/js/web/lib/wasm/jsep/webgpu/ops/multi-head-attentiion.ts b/js/web/lib/wasm/jsep/webgpu/ops/multi-head-attentiion.ts new file mode 100644 index 0000000000000..b7726a36bcaad --- /dev/null +++ b/js/web/lib/wasm/jsep/webgpu/ops/multi-head-attentiion.ts @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import {TensorView} from '../../tensor-view'; +import {ShapeUtil} from '../../util'; +import {createAttributeWithCacheKey} from '../attribute-with-cache-key'; +import {ComputeContext, GpuDataType} from '../types'; + +import {applyAttention, AttentionAttrs, AttentionMaskType, AttentionParameters, AttentionQkvFormat} from './attention'; +import {ShaderHelper, tensorTypeToWsglStorageType} from './common'; +import {createTransposeProgramInfo, TransposeAttributes} from './transpose'; + +const validateInputs = (inputs: readonly TensorView[], attributes: AttentionAttrs): AttentionParameters => { + const query = inputs[0]; + const key = inputs[1]; + const value = inputs[2]; + const bias = inputs[3]; + const keyPaddingMask = inputs[4]; + const relativePositionBias = inputs[5]; + const pastKey = inputs[6]; + const pastValue = inputs[7]; + + // Abbreviation and Meanings: + // B: batch_size + // S: sequence_length (input sequence length of query) + // P: past_sequence_length (past sequence length of key or value) + // L: kv_sequence_length (input sequence length of key or value) + // M: max_sequence_length + // T: total_sequence_length = past_sequence_length + kv_sequence_length + // N: num_heads + // H: head size for Q and K, aka q_head_size or k_head_size or qk_head_size + // H_v: v_head_size + // D_i: input hidden size + // D: hidden size for Q and K (D = N * H), aka q_hidden_size or k_hidden_size or qk_hidden_size + // D_v: v_hidden_size = num_heads * v_head_size + + // key_padding_mask (K/V) : (B) or (2*B + 1) or (B, L) or None + // relative_position_bias : (B, 1, S, L) + // past_key : (B, N, S*, H) + // past_value : (B, N, S*, H) + // When no packing for q/k/v: + // query (Q) : (B, S, D) + // key (K) : (B, L, D) or (B, N, S*, H) + // value (V) : (B, L, D_v) or (B, N, S*, H) + // bias (Q/K/V) : (D + D + D_v) + // When packed kv is used: + // query (Q) : (B, S, D) + // key (K) : (B, L, N, 2, H) + // value (V) : None + // bias (Q/K/V) : None + // When packed qkv is used: + // query (Q) : (B, L, N, 3, H) or (B, S, 3*D) + // key (K) : None + // value (V) : None + // bias (Q/K/V) : None or (D + D + D_v) + + if (query.dims.length !== 3 && query.dims.length !== 5) { + throw new Error('Input query is expected to have 3 or 5 dimensions'); + } + + const dmmhaPacking = false; + const batchSize = query.dims[0]; + const sequenceLength = query.dims[1]; + const hiddenSize = query.dims.length === 3 ? (dmmhaPacking ? query.dims[2] / 3 : query.dims[2]) : + attributes.numHeads * query.dims[4]; + let kvSequenceLength = sequenceLength; + + let pastSequenceLength = 0; + let maxSequenceLength = 0; + const headSize = Math.floor(hiddenSize / attributes.numHeads); + if (pastKey && pastValue) { + if (pastKey.dims.length !== 4) { + throw new Error('Input "past_key" is expected to have 4 dimensions'); + } + if (pastValue.dims.length !== 4) { + throw new Error('Input "past_value" is expected to have 4 dimensions'); + } + pastSequenceLength = pastKey.dims[2]; + maxSequenceLength = pastKey.dims[2]; + } else if (pastKey || pastValue) { + throw new Error('Input "past_key" and "past_value" shall be both present or both absent'); + } + + let qkvFormat: AttentionQkvFormat; + if (key) { + if (query.dims.length !== 3) { + throw new Error('Input "query" is expected to have 3 dimensions when key is given'); + } + if (key.dims.length < 3 || key.dims.length > 5) { + throw new Error('Input "key" is expected to have 3, 4, or 5 dimensions'); + } + if (query.dims[0] !== key.dims[0]) { + throw new Error('Input "query" and "key" shall have same dim 0 (batch size)'); + } + + if (key.dims.length === 3) { + if (key.dims[2] !== query.dims[2]) { + throw new Error('Input "query" and "key" shall have same dim 2 (hidden_size)'); + } + qkvFormat = AttentionQkvFormat.qkvBSNH; + kvSequenceLength = key.dims[1]; + } else if (key.dims.length === 5) { + if (key.dims[2] !== attributes.numHeads || key.dims[3] !== 2 || key.dims[4] !== headSize) { + throw new Error('Expect "key" shape (batch_size, kv_sequence_length, num_heads, 2, head_size) for packed kv'); + } + if (value) { + throw new Error('Expect "value" be none when "key" has packed kv format.'); + } + qkvFormat = AttentionQkvFormat.qKvBSNHxBSN2H; + kvSequenceLength = key.dims[1]; + } else { // key_dims.size() == 4 (cross-attention with past_key) + if (key.dims[1] !== attributes.numHeads || key.dims[3] !== headSize) { + throw new Error('Expect "key" shape (batch_size, num_heads, kv_sequence_length, head_size) for past_key'); + } + + qkvFormat = AttentionQkvFormat.unknown; + kvSequenceLength = key.dims[2]; + } + } else { // packed QKV + if (query.dims.length !== 3 && query.dims.length !== 5) { + throw new Error('Input "query" is expected to have 3 or 5 dimensions when key is empty'); + } + if (query.dims.length === 5 && (query.dims[2] !== attributes.numHeads || query.dims[3] !== 3)) { + throw new Error('Expect "query" shape (batch_size, kv_sequence_length, num_heads, 3, head_size) for packed kv'); + } + + qkvFormat = AttentionQkvFormat.qkvBSN3H; + } + + if (bias) { + if (bias.dims.length !== 1) { + throw new Error('Input "bias" is expected to have 1 dimension'); + } + + if (value) { + if (query.dims.length === 5 && query.dims[3] === 2) { + throw new Error('bias is not allowed for packed kv.'); + } + } + } + + let maskType: AttentionMaskType = AttentionMaskType.none; + if (keyPaddingMask) { + maskType = AttentionMaskType.maskUnknown; + const maskDims = keyPaddingMask.dims; + if (maskDims.length === 1) { + if (maskDims[0] === batchSize) { + maskType = AttentionMaskType.mask1dKeySeqLen; + } else if (maskDims[0] === 3 * batchSize + 2) { + maskType = AttentionMaskType.mask1DKeySeqLenStart; + } + } else if (maskDims.length === 2 && maskDims[0] === batchSize && maskDims[1] === kvSequenceLength) { + maskType = AttentionMaskType.mask2dKeyPadding; + } + if (maskType === AttentionMaskType.maskUnknown) { + throw new Error('Input "key_padding_mask" shape shall be (batch_size) or (batch_size, kv_sequence_length)'); + } + throw new Error('Mask not supported'); + } + + let passPastInKv = false; + let vHiddenSize = hiddenSize; + if (value) { + if (value.dims.length !== 3 && value.dims.length !== 4) { + throw new Error('Input "value" is expected to have 3 or 4 dimensions'); + } + + if (query.dims[0] !== value.dims[0]) { + throw new Error('Input "query" and "value" shall have same dim 0 (batch_size)'); + } + + if (value.dims.length === 3) { + if (kvSequenceLength !== value.dims[1]) { + throw new Error('Input "key" and "value" shall have the same dim 1 (kv_sequence_length)'); + } + vHiddenSize = value.dims[2]; + } else { + if (kvSequenceLength !== value.dims[2]) { + throw new Error('Input "past_key" and "past_value" shall have the same dim 2 (kv_sequence_length)'); + } + vHiddenSize = value.dims[1] * value.dims[3]; + passPastInKv = true; + } + } + + const totalSequenceLength = pastSequenceLength + kvSequenceLength; + const broadcastResPosBias = false; + // if (extraAddQk) { + // if (extraAddQk.dims[0] === 1) { + // broadcastResPosBias = true; + // } + // } + + if (keyPaddingMask) { + throw new Error('Key padding mask is not supported'); + } + if (relativePositionBias) { + throw new Error('extraAddQk is not supported'); + } + if (pastKey) { + throw new Error('pastKey is not supported'); + } + if (pastValue) { + throw new Error('pastValue is not supported'); + } + + return { + batchSize, + sequenceLength, + pastSequenceLength, + kvSequenceLength, + totalSequenceLength, + maxSequenceLength, + inputHiddenSize: 0, + hiddenSize, + vHiddenSize, + headSize, + vHeadSize: Math.floor(vHiddenSize / attributes.numHeads), + numHeads: attributes.numHeads, + isUnidirectional: false, + pastPresentShareBuffer: false, + maskFilterValue: attributes.maskFilterValue, + maskType, + scale: attributes.scale, + broadcastResPosBias, + passPastInKv, + qkvFormat, + }; +}; + + +export const parseMultiHeadAttentionAttributes = (attributes: AttentionAttrs): AttentionAttrs => + createAttributeWithCacheKey({...attributes}); + +const weightTransposeAttribute: TransposeAttributes = createAttributeWithCacheKey({perm: [0, 2, 1, 3]}); + +const addBiasTranspose = + (context: ComputeContext, qkv: TensorView, bias: TensorView, batchSize: number, sequenceLength: number, + hiddenSize: number, biasOffset: number) => { + const outputShape = [batchSize, sequenceLength, hiddenSize]; + const outputSize = ShapeUtil.size(outputShape); + + const dataType = tensorTypeToWsglStorageType(qkv.dataType); + const getShaderSource = (shaderHelper: ShaderHelper) => ` + const biasOffset = ${biasOffset}u; + const hiddenSize = ${hiddenSize}u; + + @group(0) @binding(0) var qkv: array<${dataType}>; + @group(0) @binding(1) var bias: array<${dataType}>; + @group(0) @binding(2) var qkv_with_bias: array<${dataType}>; + + ${shaderHelper.mainStart()} + ${shaderHelper.guardAgainstOutOfBoundsWorkgroupSizes(outputSize)} + let biasOffsetIdx = (global_idx % hiddenSize) + biasOffset; + + qkv_with_bias[global_idx] = qkv[global_idx] + bias[biasOffsetIdx]; + }`; + + return context.compute( + { + name: 'MultiHeadAttentionAddBias', + shaderCache: {hint: JSON.stringify({batchSize, sequenceLength, hiddenSize, biasOffset})}, + getRunData: () => ({ + outputs: [{dims: outputShape, dataType: qkv.dataType, gpuDataType: GpuDataType.default}], + dispatchGroup: {x: Math.ceil(outputSize / 64 /* workgroup size */)}, + }), + getShaderSource, + }, + {inputs: [qkv, bias], outputs: [-1]})[0]; + }; + +const maybeTransposeToBNSHAndAddBias = + (context: ComputeContext, batchSize: number, numHeads: number, sequenceLength: number, headSize: number, + input: TensorView, bias?: TensorView, biasOffset?: number) => { + // const newDims = []; + + let reshapedInput = input; + if (!bias) { + if (input.dims.length === 3) { + reshapedInput = input.reshape([batchSize, sequenceLength, numHeads, headSize]); + } + return context.compute( + createTransposeProgramInfo(reshapedInput, weightTransposeAttribute.perm), + {inputs: [reshapedInput], outputs: [-1]})[0]; + } else { + if (sequenceLength === 1) { + throw new Error('AddBiasReshape is not implemented. Please export your model with packed QKV or KV'); + } else { + reshapedInput = + addBiasTranspose(context, input, bias, batchSize, sequenceLength, numHeads * headSize, biasOffset!); + reshapedInput = reshapedInput.reshape([batchSize, sequenceLength, numHeads, headSize]); + return context.compute( + createTransposeProgramInfo(reshapedInput, weightTransposeAttribute.perm), + {inputs: [reshapedInput], outputs: [-1]})[0]; + } + } + }; + +export const multiHeadAttention = (context: ComputeContext, attributes: AttentionAttrs): void => { + const params = validateInputs(context.inputs, attributes); + + if (context.inputs[0].dims.length === 5) { + throw new Error('Packed QKV is not implemented'); + } + + if (context.inputs[1]?.dims.length === 5) { + throw new Error('Packed KV is not implemented'); + } + + // applyAttention expects BNSH inputs + const kvBNSH = context.inputs[1] && context.inputs[2] && context.inputs[1].dims.length === 4 && + context.inputs[2].dims.length === 4; + + const Q = maybeTransposeToBNSHAndAddBias( + context, params.batchSize, params.numHeads, params.sequenceLength, params.headSize, context.inputs[0], + context.inputs[3], 0); + + if (kvBNSH) { + return applyAttention( + context, Q, context.inputs[1], context.inputs[2], context.inputs[4], undefined, undefined, undefined, + context.inputs[5], params, attributes); + } + + const K = maybeTransposeToBNSHAndAddBias( + context, params.batchSize, params.numHeads, params.kvSequenceLength, params.headSize, context.inputs[1], + context.inputs[3], params.hiddenSize); + + const V = maybeTransposeToBNSHAndAddBias( + context, params.batchSize, params.numHeads, params.kvSequenceLength, params.vHeadSize, context.inputs[2], + context.inputs[3], 2 * params.hiddenSize); + + applyAttention( + context, Q, K, V, context.inputs[4], undefined, context.inputs[6], context.inputs[7], context.inputs[5], params, + attributes); +}; diff --git a/js/web/script/generate-webgpu-operator-md.ts b/js/web/script/generate-webgpu-operator-md.ts index 7408f17004f5e..eab8175a941bd 100644 --- a/js/web/script/generate-webgpu-operator-md.ts +++ b/js/web/script/generate-webgpu-operator-md.ts @@ -16,6 +16,8 @@ const COMMENTS: Record = { 'Reshape': 'no GPU kernel', 'Shape': 'no GPU kernel; an ORT warning is generated - need to fix', 'Resize': 'CoordinateTransformMode align_corners is not supported with downsampling', + 'Attention': 'need implementing mask and past/present', + 'MultiHeadAttention': 'need implementing mask and past/present', }; /* eslint-disable max-len */ diff --git a/js/web/test/data/ops/attention.jsonc b/js/web/test/data/ops/attention.jsonc new file mode 100644 index 0000000000000..bd4483027cc25 --- /dev/null +++ b/js/web/test/data/ops/attention.jsonc @@ -0,0 +1,557 @@ +[ + { + "name": "Attention Basic", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 1, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], + "dims": [4, 3], + "type": "float32" + }, + { + "data": [1, 2, 3], + "dims": [3], + "type": "float32" + } + ], + "outputs": [ + { + "data": [213, 213], + "dims": [1, 2, 1], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic Batch 2 with 2 heads", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 2, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16 + ], + "dims": [2, 2, 8], + "type": "float32" + }, + { + "data": [ + 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 + ], + "dims": [8, 6], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [6], + "type": "float32" + } + ], + "outputs": [ + { + "data": [320, 321, 320, 321, 320, 321, 320, 321], + "dims": [2, 2, 2], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 1, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863], + "dims": [1, 3, 2], + "type": "float32" + }, + { + "data": [2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1.1103, -1.6898, -0.989], + "dims": [3], + "type": "float32" + } + ], + "outputs": [ + { + "data": [-1.328187108039856, -1.297916054725647, -0.8599594831466675], + "dims": [1, 3, 1], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic one head, batch 2", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 1, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094], + "dims": [2, 3, 2], + "type": "float32" + }, + { + "data": [2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1.1103, -1.6898, -0.989], + "dims": [3], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987 + ], + "dims": [2, 3, 1], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic 2 head, batch 1", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 2, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094], + "dims": [2, 3, 2], + "type": "float32" + }, + { + "data": [2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, 3.6643], + "dims": [2, 6], + "type": "float32" + }, + { + "data": [1.1103, -1.6898, -0.989, -0.989, 1.1103, -1.6898], + "dims": [6], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + 0.8701779842376709, -2.6158859729766846, 0.8710794448852539, -2.5763747692108154, 0.9005484580993652, + -2.182751178741455, 2.1661579608917236, -2.1045265197753906, 1.6716957092285156, -1.797281265258789, + 1.7134947776794434, -1.765358328819275 + ], + "dims": [2, 3, 2], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic 5 head, batch 2", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 5, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [ + 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, + 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, + -1.8803634643554688, 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, + -1.0069535970687866, -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, + -0.1792980432510376, -0.26380985975265503, -0.25473490357398987 + ], + "dims": [2, 3, 5], + "type": "float32" + }, + { + "data": [ + 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, 3.6643, + 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, + 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, 2.2082, 0.8710794448852539, -1.9054111242294312, + 0.9005484580993652, 1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236 + ], + "dims": [5, 15], + "type": "float32" + }, + { + "data": [ + 1.1103, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, -1.6898, -0.989, -1.9029953479766846, 0.8710794448852539, + -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, 1.7134947776794434 + ], + "dims": [15], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + -1.6956915855407715, -2.8863370418548584, 1.3899128437042236, 1.6789076328277588, -1.4083852767944336, + -1.7009180784225464, -3.1053788661956787, 3.5959298610687256, 1.1027096509933472, -0.009643087163567543, + -1.694351315498352, -2.9284396171569824, 1.734721302986145, 2.0606398582458496, -0.2571452260017395, + 3.671973943710327, -5.285338401794434, -6.833454132080078, 1.7506506443023682, -2.262148380279541, + 2.5110034942626953, 1.440049171447754, -0.9423203468322754, 1.7506506443023682, -1.86212158203125, + -0.5036701560020447, -5.732386589050293, -1.5674757957458496, 1.7506510019302368, -2.264472246170044 + ], + "dims": [2, 3, 5], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic 5 head, batch 1", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 5, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [ + 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, + 0.8701779842376709, 0.9005484580993652, -1.9029953479766846 + ], + "dims": [1, 3, 5], + "type": "float32" + }, + { + "data": [ + 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, 3.6643, + 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, + 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, 2.2082, 0.8710794448852539, -1.9054111242294312, + 0.9005484580993652, 1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236 + ], + "dims": [5, 15], + "type": "float32" + }, + { + "data": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + "dims": [15], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + -1.5670859813690186, -3.7310283184051514, -2.7460145950317383, 0.8121700286865234, -3.350031852722168, + -1.5735238790512085, -3.7310383319854736, 6.124307632446289, 0.7840213775634766, -0.7250789403915405, + -1.565433382987976, -3.731032371520996, -2.7436347007751465, 1.0472451448440552, -2.7828547954559326 + ], + "dims": [1, 3, 5], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic 5 head, batch 3", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 5, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [ + 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, + 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, + -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987 + ], + "dims": [3, 3, 5], + "type": "float32" + }, + { + "data": [ + 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, 3.6643, + 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, + 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, 2.2082, 0.8710794448852539, -1.9054111242294312, + 0.9005484580993652, 1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236 + ], + "dims": [5, 15], + "type": "float32" + }, + { + "data": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + "dims": [15], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + -1.5670859813690186, -3.7310283184051514, -2.7460145950317383, 0.8121700286865234, -3.350031852722168, + -1.5735238790512085, -3.7310383319854736, 6.124307632446289, 0.7840213775634766, -0.7250789403915405, + -1.565433382987976, -3.731032371520996, -2.7436347007751465, 1.0472451448440552, -2.7828547954559326, + -1.5670859813690186, -3.7310283184051514, -2.7460145950317383, 0.8121700286865234, -3.350031852722168, + -1.5735238790512085, -3.7310383319854736, 6.124307632446289, 0.7840213775634766, -0.7250789403915405, + -1.565433382987976, -3.731032371520996, -2.7436347007751465, 1.0472451448440552, -2.7828547954559326, + 3.7965505123138428, -2.3799397945404053, -3.9530906677246094, 0.5844926834106445, -2.9756431579589844, + 2.448162794113159, 4.34546422958374, 1.9380426406860352, 0.5870105624198914, -2.7368364334106445, + -0.4769568145275116, 4.255186557769775, -3.9529950618743896, 0.6987408995628357, -2.9756433963775635 + ], + "dims": [3, 3, 5], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic 5 head, batch 3", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 5, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [ + 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, + 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, + -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987, 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, + 0.5349, 0.8094, 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.3367, 0.1288, 0.2345, + 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987 + ], + "dims": [3, 3, 10], + "type": "float32" + }, + { + "data": [ + 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, 3.6643, + 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, + 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, 2.2082, 0.8710794448852539, -1.9054111242294312, + 0.9005484580993652, 1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, + 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, + 0.2303, 0.4617, 1.44, -2.22, 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, + 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, + -1.8803634643554688, 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, + -1.0069535970687866, -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, + -0.1792980432510376, -0.26380985975265503, -0.25473490357398987, 2.2082, 0.8710794448852539, + -1.9054111242294312, 0.9005484580993652, 1.9029953479766846, 0.8710794448852539, -1.9054111242294312, + -1.8803634643554688, 2.1661579608917236 + ], + "dims": [10, 15], + "type": "float32" + }, + { + "data": [ + -1.5670859813690186, -3.7310283184051514, -2.7460145950317383, 0.8121700286865234, -3.350031852722168, + -1.5735238790512085, -3.7310383319854736, 6.124307632446289, 0.7840213775634766, -0.7250789403915405, + -1.565433382987976, -3.731032371520996, -2.7436347007751465, 1.0472451448440552, -2.7828547954559326 + ], + "dims": [15], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + -8.01101303100586, -5.782258987426758, 6.016238689422607, 0.26747000217437744, -6.992541313171387, + -8.011263847351074, -5.782248020172119, 5.366001129150391, 0.26747000217437744, -6.99449348449707, + -8.011263847351074, -5.782265663146973, 6.016238689422607, 0.26747000217437744, -6.992537021636963, + -6.102723598480225, -7.28973388671875, -4.578637599945068, 7.2203369140625, -6.028444766998291, + -6.102705478668213, -7.2897748947143555, -3.7882626056671143, 5.393260478973389, -5.754333972930908, + -1.3616288900375366, -7.289827823638916, -6.341128349304199, 6.329389572143555, -5.751791954040527, + -2.3945987224578857, -14.532954216003418, 3.969801902770996, 12.744998931884766, -11.1966552734375, + -2.4002532958984375, -14.538958549499512, -6.684961318969727, 12.476543426513672, -9.24352741241455, + -4.787771701812744, -8.640848159790039, 3.969801902770996, -0.6471102833747864, -11.1966552734375 + ], + "dims": [3, 3, 5], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Attention Basic 1 head, batch 3", + "operator": "Attention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 1, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [ + 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, + 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, + -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987, 0.3367, 0.1288, 0.2345, 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, + 0.5349, 0.8094, 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.3367, 0.1288, 0.2345, + 0.2303, -1.1229, -0.1863, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987 + ], + "dims": [3, 3, 10], + "type": "float32" + }, + { + "data": [ + 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, 3.6643, + 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, 0.9005484580993652, + -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, 2.1661579608917236, + 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, -1.486573576927185, + -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, -0.26380985975265503, + -0.25473490357398987, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, + 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, 2.2082, 0.8710794448852539, -1.9054111242294312, + 0.9005484580993652, 1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, 0.2303, 0.4617, 1.44, -2.22, + 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, 0.8701779842376709, + 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, -1.8803634643554688, + 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, -1.0069535970687866, + -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, -0.1792980432510376, + -0.26380985975265503, -0.25473490357398987, 2.2082, -0.638, 0.4617, 0.2674, 0.5349, 0.8094, 0.2345, + 0.2303, 0.4617, 1.44, -2.22, 3.6643, 0.8710794448852539, -1.9054111242294312, 0.9005484580993652, + 0.8701779842376709, 0.9005484580993652, -1.9029953479766846, 0.8710794448852539, -1.9054111242294312, + -1.8803634643554688, 2.1661579608917236, 1.7134947776794434, -1.5250005722045898, 1.6716957092285156, + -1.0069535970687866, -1.486573576927185, -1.328187108039856, -1.297916054725647, -0.8599594831466675, + -0.1792980432510376, -0.26380985975265503, -0.25473490357398987, 2.2082, 0.8710794448852539, + -1.9054111242294312, 0.9005484580993652, 1.9029953479766846, 0.8710794448852539, -1.9054111242294312, + -1.8803634643554688, 2.1661579608917236 + ], + "dims": [10, 15], + "type": "float32" + }, + { + "data": [ + -1.5670859813690186, -3.7310283184051514, -2.7460145950317383, 0.8121700286865234, -3.350031852722168, + -1.5735238790512085, -3.7310383319854736, 6.124307632446289, 0.7840213775634766, -0.7250789403915405, + -1.565433382987976, -3.731032371520996, -2.7436347007751465, 1.0472451448440552, -2.7828547954559326 + ], + "dims": [15], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + -8.011263847351074, -5.7822418212890625, 6.016238689422607, 0.26747000217437744, -6.992536544799805, + -8.011263847351074, -5.7822418212890625, 6.016238689422607, 0.26747000217437744, -6.992536544799805, + -8.011263847351074, -5.7822418212890625, 6.016238689422607, 0.26747000217437744, -6.992536544799805, + 1.3541864156723022, -7.813620090484619, -6.758509635925293, 7.597365856170654, -13.926229476928711, + -1.322464108467102, -7.297357559204102, -0.05962071940302849, 6.347561836242676, -5.869992256164551, + -1.3616288900375366, -7.28973388671875, 0.0386197566986084, 6.329389572143555, -5.751791954040527, + -2.400698661804199, -14.538958549499512, -7.898950576782227, 12.744998931884766, -11.1966552734375, + -2.400698661804199, -14.538958549499512, -7.898950576782227, 12.744998931884766, -11.1966552734375, + 1.021930456161499, -2.373898983001709, 3.8501391410827637, -0.6108309626579285, -9.256340980529785 + ], + "dims": [3, 3, 5], + "type": "float32" + } + ] + } + ] + } +] diff --git a/js/web/test/data/ops/multi-head-attention.jsonc b/js/web/test/data/ops/multi-head-attention.jsonc new file mode 100644 index 0000000000000..05687bd482e24 --- /dev/null +++ b/js/web/test/data/ops/multi-head-attention.jsonc @@ -0,0 +1,194 @@ +[ + { + "name": "MultiHeadAttention Basic, one head", + "operator": "MultiHeadAttention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 1, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 1, 1, 1, 2, 2, 2, 2], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 4], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + 4.973228454589844, 5.973228454589844, 6.973228454589844, 7.973228454589844, 4.999990940093994, + 5.999990940093994, 6.999990940093994, 7.999990940093994 + ], + "dims": [1, 2, 4], + "type": "float32" + } + ] + } + ] + }, + { + "name": "MultiHeadAttention Basic", + "operator": "MultiHeadAttention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 2, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 1, 1, 1, 2, 2, 2, 2], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 4], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + 4.571832656860352, 5.571832656860352, 6.971858501434326, 7.971858501434326, 4.998325824737549, + 5.998325824737549, 6.999900817871094, 7.999900817871094 + ], + "dims": [1, 2, 4], + "type": "float32" + } + ] + } + ] + }, + { + "name": "MultiHeadAttention Basic with bias", + "operator": "MultiHeadAttention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 2, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 1, 1, 1, 2, 2, 2, 2], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 4], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4], + "dims": [12], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + 5.943336009979248, 7.94333553314209, 9.999799728393555, 11.999798774719238, 5.9997992515563965, + 7.9997992515563965, 10, 11.999999046325684 + ], + "dims": [1, 2, 4], + "type": "float32" + } + ] + } + ] + }, + { + "name": "MultiHeadAttention two heads", + "operator": "MultiHeadAttention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 2, "type": "int" }], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + "dims": [1, 2, 8], + "type": "float32" + }, + { + "data": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4], + "dims": [1, 2, 8], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + "dims": [1, 2, 8], + "type": "float32" + } + ], + "outputs": [ + { + "data": [ + 8.99963665008545, 9.99963665008545, 10.99963665008545, 11.999635696411133, 13, 14, 15, 16, 9, 10, 11, 12, + 13, 14, 15, 16 + ], + "dims": [1, 2, 8], + "type": "float32" + } + ] + } + ] + }, + { + "name": "MultiHeadAttention two heads", + "operator": "MultiHeadAttention", + "opset": { "domain": "com.microsoft", "version": 1 }, + "attributes": [{ "name": "num_heads", "data": 2, "type": "int" }], + "cases": [ + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + "dims": [1, 2, 8], + "type": "float32" + }, + { + "data": [1, 1, 1, 1, 2, 2, 2, 2], + "dims": [1, 1, 8], + "type": "float32" + }, + { + "data": [1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 1, 8], + "type": "float32" + } + ], + "outputs": [ + { + "data": [1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8], + "dims": [1, 2, 8], + "type": "float32" + } + ] + } + ] + } +] diff --git a/js/web/test/suite-test-list.jsonc b/js/web/test/suite-test-list.jsonc index c80f0b04a9abc..37aa9394c7f96 100644 --- a/js/web/test/suite-test-list.jsonc +++ b/js/web/test/suite-test-list.jsonc @@ -1336,6 +1336,7 @@ "add_int32.jsonc", //"and.jsonc", "asin.jsonc", + "attention.jsonc", "bias-add.jsonc", "bias-split-gelu.jsonc", "ceil.jsonc", @@ -1362,6 +1363,7 @@ "matmul-broadcast.jsonc", "mul.jsonc", "mul_int32.jsonc", + "multi-head-attention.jsonc", //"neg.jsonc", "neg-int32.jsonc", "not.jsonc", diff --git a/onnxruntime/contrib_ops/js/bert/attention.cc b/onnxruntime/contrib_ops/js/bert/attention.cc new file mode 100644 index 0000000000000..723ff00aa815e --- /dev/null +++ b/onnxruntime/contrib_ops/js/bert/attention.cc @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "attention.h" +#include "core/providers/js/js_data_types.h" + +namespace onnxruntime { +namespace contrib { +namespace js { + +using onnxruntime::js::JsepSupportedFloatTypes; + +ONNX_OPERATOR_KERNEL_EX( + Attention, + kMSDomain, + 1, + kJsExecutionProvider, + (*KernelDefBuilder::Create()) + .TypeConstraint("T", JsepSupportedFloatTypes()), + Attention); + +} // namespace js +} // namespace contrib +} // namespace onnxruntime diff --git a/onnxruntime/contrib_ops/js/bert/attention.h b/onnxruntime/contrib_ops/js/bert/attention.h new file mode 100644 index 0000000000000..0fa823befa9b2 --- /dev/null +++ b/onnxruntime/contrib_ops/js/bert/attention.h @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include "contrib_ops/cpu/bert/attention_base.h" +#include "core/providers/js/js_kernel.h" + +namespace onnxruntime { +namespace contrib { +namespace js { + +using onnxruntime::contrib::AttentionBase; +using onnxruntime::js::JsKernel; + +class Attention : public JsKernel, AttentionBase { + public: + explicit Attention(const OpKernelInfo& info) : JsKernel(info), AttentionBase(info, false) { + std::vector qkv_sizes(qkv_hidden_sizes_.size()); + if (qkv_hidden_sizes_.size() > 0) { + std::transform(qkv_hidden_sizes_.begin(), qkv_hidden_sizes_.end(), qkv_sizes.begin(), + [](int64_t sz) { return gsl::narrow_cast(sz); }); + } + + JSEP_INIT_KERNEL_ATTRIBUTE(Attention, ({ + "numHeads" : $1, + "isUnidirectional" : $2, + "maskFilterValue" : $3, + "scale" : $4, + "doRotary" : $5, + "qkvHiddenSizes" : $6 ? (Array.from(HEAP32.subarray(Number($7), Number($7) + $6))) : [], + "pastPresentShareBuffer" : !!$8, + }), + static_cast(num_heads_), + static_cast(is_unidirectional_), + static_cast(mask_filter_value_), + static_cast(scale_), + static_cast(do_rotary_), + static_cast(qkv_hidden_sizes_.size()), + reinterpret_cast((qkv_sizes.size() > 0) ? qkv_sizes.data() : nullptr) >> 2, + static_cast(past_present_share_buffer_)); + } +}; + +} // namespace js +} // namespace contrib +} // namespace onnxruntime diff --git a/onnxruntime/contrib_ops/js/bert/multi_head_attention.cc b/onnxruntime/contrib_ops/js/bert/multi_head_attention.cc new file mode 100644 index 0000000000000..c43f8b7f18465 --- /dev/null +++ b/onnxruntime/contrib_ops/js/bert/multi_head_attention.cc @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "multi_head_attention.h" +#include "core/providers/js/js_data_types.h" + +namespace onnxruntime { +namespace contrib { +namespace js { + +using onnxruntime::js::JsepSupportedFloatTypes; + +ONNX_OPERATOR_KERNEL_EX( + MultiHeadAttention, + kMSDomain, + 1, + kJsExecutionProvider, + (*KernelDefBuilder::Create()) + .TypeConstraint("T", JsepSupportedFloatTypes()), + MultiHeadAttention); + +} // namespace js +} // namespace contrib +} // namespace onnxruntime diff --git a/onnxruntime/contrib_ops/js/bert/multi_head_attention.h b/onnxruntime/contrib_ops/js/bert/multi_head_attention.h new file mode 100644 index 0000000000000..6c63a2ffed4b2 --- /dev/null +++ b/onnxruntime/contrib_ops/js/bert/multi_head_attention.h @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include "contrib_ops/cpu/bert/attention_base.h" +#include "core/providers/js/js_kernel.h" + +namespace onnxruntime { +namespace contrib { +namespace js { + +using onnxruntime::contrib::AttentionBase; +using onnxruntime::js::JsKernel; + +class MultiHeadAttention : public JsKernel, AttentionBase { + public: + explicit MultiHeadAttention(const OpKernelInfo& info) : JsKernel(info), AttentionBase(info, false) { + JSEP_INIT_KERNEL_ATTRIBUTE(MultiHeadAttention, ({ + "numHeads" : $1, + "isUnidirectional" : $2, + "maskFilterValue" : $3, + "scale" : $4, + "doRotary" : $5, + }), + static_cast(num_heads_), + static_cast(is_unidirectional_), + static_cast(mask_filter_value_), + static_cast(scale_), + static_cast(do_rotary_)); + } +}; + +} // namespace js +} // namespace contrib +} // namespace onnxruntime diff --git a/onnxruntime/contrib_ops/js/js_contrib_kernels.cc b/onnxruntime/contrib_ops/js/js_contrib_kernels.cc index 24d327576ecd9..498a9f5679eb5 100644 --- a/onnxruntime/contrib_ops/js/js_contrib_kernels.cc +++ b/onnxruntime/contrib_ops/js/js_contrib_kernels.cc @@ -7,7 +7,9 @@ namespace onnxruntime { namespace contrib { namespace js { +class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kMSDomain, 1, Attention); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kMSDomain, 1, Gelu); +class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kMSDomain, 1, MultiHeadAttention); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kMSDomain, 1, BiasSplitGelu); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kMSDomain, 1, BiasAdd); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kMSDomain, 1, SkipLayerNormalization); @@ -21,7 +23,9 @@ KernelCreateInfo BuildKernelCreateInfo() { Status RegisterJsContribKernels(KernelRegistry& kernel_registry) { static const BuildKernelCreateInfoFn function_table[] = { + BuildKernelCreateInfo, BuildKernelCreateInfo, + BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo Date: Fri, 17 Nov 2023 14:14:01 -0800 Subject: [PATCH 28/38] Update NDK version to 26.1.10909125 (#18493) ### Description Similar to #17852 ### Motivation and Context To avoid downloading NDK --- tools/android_custom_build/Dockerfile | 2 +- .../github/azure-pipelines/templates/use-android-ndk.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/android_custom_build/Dockerfile b/tools/android_custom_build/Dockerfile index 66b6a36e5a8c0..754a6633b0c62 100644 --- a/tools/android_custom_build/Dockerfile +++ b/tools/android_custom_build/Dockerfile @@ -55,7 +55,7 @@ WORKDIR /workspace # install Android SDK and tools ENV ANDROID_HOME=~/android-sdk -ENV NDK_VERSION=26.0.10792818 +ENV NDK_VERSION=26.1.10909125 ENV ANDROID_NDK_HOME=${ANDROID_HOME}/ndk/${NDK_VERSION} RUN aria2c -q -d /tmp -o cmdline-tools.zip \ diff --git a/tools/ci_build/github/azure-pipelines/templates/use-android-ndk.yml b/tools/ci_build/github/azure-pipelines/templates/use-android-ndk.yml index 8cc7f63a193cc..b8dba89b0b899 100644 --- a/tools/ci_build/github/azure-pipelines/templates/use-android-ndk.yml +++ b/tools/ci_build/github/azure-pipelines/templates/use-android-ndk.yml @@ -3,7 +3,7 @@ parameters: - name: AndroidNdkVersion type: string - default: "26.0.10792818" # LTS version + default: "26.1.10909125" # LTS version steps: - bash: | From cbb85b48749a42d6120ac78e40fcc9930814ab37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Sat, 18 Nov 2023 02:58:49 +0100 Subject: [PATCH 29/38] [CoreML] Adapt to `MLMultiArray.dataPointer` deprecation (#17726) ### Description This PR addresses https://github.com/microsoft/onnxruntime/issues/17652. The deprecated `MLMultiArray.dataPointer` is replaced with `.getBytesWithHandler`, as suggested by the docs. For now, I am only checking that the output `MLMultiArray` is contiguous, returning unsupported operation when that is not the case. I think this is already better than what we have right now, so we can block unsafe calls to `.dataPointer` (if any..). I would be happy to implement the handling of the non-contiguous case (replacing `memcpy` for such cases) as suggested by @edgchen1, but I am not sure how to reproduce that case to add a corresponding unit-test. Would we have to define a custom `MLCustomLayer` to get a non-contiguous output from a model..? ### Motivation and Context Fix https://github.com/microsoft/onnxruntime/issues/17652. --------- Co-authored-by: nicolo-lucchesi Co-authored-by: Edward Chen <18449977+edgchen1@users.noreply.github.com> --- .../core/providers/coreml/model/model.mm | 107 ++++++++++++------ 1 file changed, 71 insertions(+), 36 deletions(-) diff --git a/onnxruntime/core/providers/coreml/model/model.mm b/onnxruntime/core/providers/coreml/model/model.mm index 60e0b1c061a43..4a6743e9e5c52 100644 --- a/onnxruntime/core/providers/coreml/model/model.mm +++ b/onnxruntime/core/providers/coreml/model/model.mm @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -169,6 +170,60 @@ Status CreateInputFeatureProvider(const std::unordered_map mlmultiarray_buffer_size) { + if (mlmultiarray_buffer == nullptr) { + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "mlmultiarray_buffer has no data"); + } + + const size_t num_elements = array_info.count; + const auto onnx_data_type = tensor_info->data_type; + switch (onnx_data_type) { + case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: { + const auto output_data_byte_size = num_elements * sizeof(float); + ORT_RETURN_IF_NOT(!mlmultiarray_buffer_size || mlmultiarray_buffer_size == output_data_byte_size, + "CoreML output buffer size and expected output size differ"); + memcpy(tensor_buffer, mlmultiarray_buffer, output_data_byte_size); + break; + } + case ONNX_NAMESPACE::TensorProto_DataType_INT32: { + const auto output_data_byte_size = num_elements * sizeof(int32_t); + ORT_RETURN_IF_NOT(!mlmultiarray_buffer_size || mlmultiarray_buffer_size == output_data_byte_size, + "CoreML output buffer size and expected output size differ"); + memcpy(tensor_buffer, mlmultiarray_buffer, output_data_byte_size); + break; + } + // For this case, since Coreml Spec only uses int32 for model output while onnx provides + // int64 for model output data type. We are doing a type casting (int32 -> int64) here + // when copying the model to ORT + case ONNX_NAMESPACE::TensorProto_DataType_INT64: { + ORT_RETURN_IF_NOT(array_info.dataType == MLMultiArrayDataTypeInt32, + "CoreML output data type is not MLMultiArrayDataTypeInt32"); + ORT_RETURN_IF_NOT(!mlmultiarray_buffer_size || mlmultiarray_buffer_size == num_elements * sizeof(int32_t), + "CoreML output buffer size and expected output size differ"); + const auto model_output_span = gsl::span{static_cast(mlmultiarray_buffer), num_elements}; + const auto output_span = gsl::span{static_cast(tensor_buffer), num_elements}; + std::transform(model_output_span.begin(), model_output_span.end(), output_span.begin(), + [](int32_t v) { return static_cast(v); }); + break; + } + default: + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, + "Output data type is not supported, actual type: ", onnx_data_type); + } + return Status::OK(); +} } // namespace NS_ASSUME_NONNULL_BEGIN @@ -298,9 +353,9 @@ - (Status)predict:(const std::unordered_map&)inputs return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "output_features has no value for ", output_name); } - auto* data = [output_value multiArrayValue]; + MLMultiArray* data = [output_value multiArrayValue]; - const auto coreml_static_output_shape = [&]() { + const auto coreml_static_output_shape = [data]() { InlinedVector result; result.reserve(data.shape.count); for (NSNumber* dim in data.shape) { @@ -324,41 +379,21 @@ - (Status)predict:(const std::unordered_map&)inputs ") do not match"); } - const void* model_output_buffer = data.dataPointer; - - if (model_output_buffer == nullptr) { - return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "model_output_buffer has no data for ", output_name); - } - - const auto onnx_data_type = output_tensor_info.data_type; - switch (onnx_data_type) { - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: { - const auto output_data_byte_size = num_elements * sizeof(float); - memcpy(output_buffer, model_output_buffer, output_data_byte_size); - break; - } - case ONNX_NAMESPACE::TensorProto_DataType_INT32: { - const auto output_data_byte_size = num_elements * sizeof(int32_t); - memcpy(output_buffer, model_output_buffer, output_data_byte_size); - break; - } - // For this case, since Coreml Spec only uses int32 for model output while onnx provides - // int64 for model output data type. We are doing a type casting (int32 -> int64) here - // when copying the model to ORT - case ONNX_NAMESPACE::TensorProto_DataType_INT64: { - ORT_RETURN_IF_NOT(data.dataType == MLMultiArrayDataTypeInt32, - "CoreML output data type is not MLMultiArrayDataTypeInt32"); - - const auto model_output_span = gsl::span{static_cast(model_output_buffer), num_elements}; - const auto output_span = gsl::span{static_cast(output_buffer), num_elements}; - std::transform(model_output_span.begin(), model_output_span.end(), output_span.begin(), - [](int32_t v) { return static_cast(v); }); - break; - } - default: - return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, - "Output data type is not supported, actual type: ", onnx_data_type); + ORT_RETURN_IF_NOT(IsArrayContiguous(data), + "Non-contiguous output MLMultiArray is not currently supported"); + __block Status copy_status; + const auto* tensor_info = &output_tensor_info; + // `getBytesWithHandler` replaces deprecated `.dataPointer` on new versions + if (@available(macOS 12.3, iOS 15.4, *)) { + [data getBytesWithHandler:^(const void* bytes, NSInteger size) { + copy_status = CopyMLMultiArrayBuffer(bytes, output_buffer, data, tensor_info, size); + }]; + } else { + // disable size check as old API does not return buffer length + copy_status = CopyMLMultiArrayBuffer(data.dataPointer, output_buffer, data, tensor_info, std::nullopt); } + if (!copy_status.IsOK()) + return copy_status; } } } From 02333293dec94922585a4aed39bd331128b643a6 Mon Sep 17 00:00:00 2001 From: Ashwini Khade Date: Fri, 17 Nov 2023 18:19:21 -0800 Subject: [PATCH 30/38] Removed all the deprecated python training code and related tests and utils (#18333) ### Description Motivation for this PR is code cleanup. 1. Remove all deprecated python code related to orttrainer, old checkpoint, related tests and utils 2. Cleanup orttraining_pybind_state.cc to remove all deprecated bindings. --- cmake/onnxruntime_python.cmake | 13 - onnxruntime/__init__.py | 1 - .../python/onnxruntime_test_ort_trainer.py | 1026 ------- ...e_test_ort_trainer_with_mixed_precision.py | 102 - .../onnxruntime_test_training_unit_tests.py | 95 - ...nnxruntime_test_training_unittest_utils.py | 56 - .../orttraining/python/checkpointing_utils.py | 127 - .../orttraining/python/deprecated/__init__.py | 6 - .../python/deprecated/training_session.py | 68 - orttraining/orttraining/python/ort_trainer.py | 1241 --------- .../python/orttraining_pybind_state.cc | 329 +-- .../python/orttraining_python_module.cc | 4 +- .../orttraining/python/training/__init__.py | 12 +- .../python/training/_checkpoint_storage.py | 107 - .../orttraining/python/training/_utils.py | 138 - .../orttraining/python/training/checkpoint.py | 748 ----- .../python/training/model_desc_validation.py | 408 --- .../orttraining/python/training/orttrainer.py | 1537 ---------- .../python/training/orttrainer_options.py | 692 ----- .../python/training/postprocess.py | 478 ---- .../test/external_transformers_test.py | 144 - .../test_external_transformers.cc | 35 - .../orttraining/test/python/_test_commons.py | 213 -- .../orttraining/test/python/_test_helpers.py | 120 +- .../python/onnxruntime_test_postprocess.py | 325 --- .../python/orttraining_ortmodule_tests.py | 4 +- .../python/orttraining_run_bert_pretrain.py | 801 ------ ...rttraining_run_frontend_batch_size_test.py | 67 - .../test/python/orttraining_run_glue.py | 323 --- .../python/orttraining_run_multiple_choice.py | 281 -- .../orttraining_test_bert_postprocess.py | 6 - .../orttraining_test_checkpoint_storage.py | 257 -- .../python/orttraining_test_data_loader.py | 12 +- .../python/orttraining_test_debuggability.py | 40 - .../test/python/orttraining_test_ort_apis.py | 4 +- ...=> orttraining_test_ort_apis_onnxblock.py} | 0 ... orttraining_test_ort_apis_py_bindings.py} | 2 +- ...py => orttraining_test_ortmodule_hooks.py} | 0 ...=> orttraining_test_ortmodule_onnx_ops.py} | 0 ...ttraining_test_orttrainer_bert_toy_onnx.py | 1283 --------- ...ng_test_orttrainer_checkpoint_functions.py | 722 ----- .../orttraining_test_orttrainer_frontend.py | 2460 ----------------- .../python/orttraining_test_transformers.py | 480 ---- .../test/python/orttraining_test_utils.py | 246 -- .../python/orttraining_transformer_trainer.py | 357 --- .../test/python/utils_multiple_choice.py | 269 -- .../mnist_training.py | 200 -- .../orttrainer/mnist/mnist_original.onnx | Bin 1590610 -> 0 bytes .../training/orttrainer/mnist/ort_mnist.py | 174 -- .../orttrainer/mnist/pytorch_mnist.py | 157 -- .../orttrainer/pytorch_transformer/README.md | 33 - .../pytorch_transformer/ort_train.py | 89 - .../pytorch_transformer/ort_utils.py | 47 - .../pytorch_transformer/pt_model.py | 62 - .../pytorch_transformer/pt_train.py | 94 - .../orttrainer/pytorch_transformer/utils.py | 59 - setup.py | 1 - 57 files changed, 21 insertions(+), 16534 deletions(-) delete mode 100644 onnxruntime/test/python/onnxruntime_test_ort_trainer.py delete mode 100644 onnxruntime/test/python/onnxruntime_test_ort_trainer_with_mixed_precision.py delete mode 100644 onnxruntime/test/python/onnxruntime_test_training_unit_tests.py delete mode 100644 onnxruntime/test/python/onnxruntime_test_training_unittest_utils.py delete mode 100644 orttraining/orttraining/python/checkpointing_utils.py delete mode 100644 orttraining/orttraining/python/deprecated/__init__.py delete mode 100644 orttraining/orttraining/python/deprecated/training_session.py delete mode 100644 orttraining/orttraining/python/ort_trainer.py delete mode 100644 orttraining/orttraining/python/training/_checkpoint_storage.py delete mode 100644 orttraining/orttraining/python/training/checkpoint.py delete mode 100644 orttraining/orttraining/python/training/model_desc_validation.py delete mode 100644 orttraining/orttraining/python/training/orttrainer.py delete mode 100644 orttraining/orttraining/python/training/orttrainer_options.py delete mode 100644 orttraining/orttraining/python/training/postprocess.py delete mode 100644 orttraining/orttraining/test/external_transformer/test/external_transformers_test.py delete mode 100644 orttraining/orttraining/test/external_transformer/test_exeternal_transformers/test_external_transformers.cc delete mode 100644 orttraining/orttraining/test/python/onnxruntime_test_postprocess.py delete mode 100644 orttraining/orttraining/test/python/orttraining_run_bert_pretrain.py delete mode 100644 orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py delete mode 100644 orttraining/orttraining/test/python/orttraining_run_glue.py delete mode 100644 orttraining/orttraining/test/python/orttraining_run_multiple_choice.py delete mode 100644 orttraining/orttraining/test/python/orttraining_test_bert_postprocess.py delete mode 100644 orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py delete mode 100644 orttraining/orttraining/test/python/orttraining_test_debuggability.py rename orttraining/orttraining/test/python/{orttraining_test_onnxblock.py => orttraining_test_ort_apis_onnxblock.py} (100%) rename orttraining/orttraining/test/python/{orttraining_test_python_bindings.py => orttraining_test_ort_apis_py_bindings.py} (99%) rename orttraining/orttraining/test/python/{orttraining_test_hooks.py => orttraining_test_ortmodule_hooks.py} (100%) rename orttraining/orttraining/test/python/{orttraining_test_onnx_ops_ortmodule.py => orttraining_test_ortmodule_onnx_ops.py} (100%) delete mode 100644 orttraining/orttraining/test/python/orttraining_test_orttrainer_bert_toy_onnx.py delete mode 100644 orttraining/orttraining/test/python/orttraining_test_orttrainer_checkpoint_functions.py delete mode 100644 orttraining/orttraining/test/python/orttraining_test_orttrainer_frontend.py delete mode 100644 orttraining/orttraining/test/python/orttraining_test_transformers.py delete mode 100644 orttraining/orttraining/test/python/orttraining_test_utils.py delete mode 100644 orttraining/orttraining/test/python/orttraining_transformer_trainer.py delete mode 100644 orttraining/orttraining/test/python/utils_multiple_choice.py delete mode 100644 orttraining/pytorch_frontend_examples/mnist_training.py delete mode 100644 samples/python/training/orttrainer/mnist/mnist_original.onnx delete mode 100644 samples/python/training/orttrainer/mnist/ort_mnist.py delete mode 100644 samples/python/training/orttrainer/mnist/pytorch_mnist.py delete mode 100644 samples/python/training/orttrainer/pytorch_transformer/README.md delete mode 100644 samples/python/training/orttrainer/pytorch_transformer/ort_train.py delete mode 100644 samples/python/training/orttrainer/pytorch_transformer/ort_utils.py delete mode 100644 samples/python/training/orttrainer/pytorch_transformer/pt_model.py delete mode 100644 samples/python/training/orttrainer/pytorch_transformer/pt_train.py delete mode 100644 samples/python/training/orttrainer/pytorch_transformer/utils.py diff --git a/cmake/onnxruntime_python.cmake b/cmake/onnxruntime_python.cmake index a9a78668b4810..cdfb2139730ad 100644 --- a/cmake/onnxruntime_python.cmake +++ b/cmake/onnxruntime_python.cmake @@ -339,9 +339,6 @@ configure_file(${ONNXRUNTIME_ROOT}/python/_pybind_state.py.in ${CMAKE_BINARY_DIR}/onnxruntime/capi/_pybind_state.py) if (onnxruntime_ENABLE_TRAINING) - file(GLOB onnxruntime_python_capi_training_srcs CONFIGURE_DEPENDS - "${ORTTRAINING_SOURCE_DIR}/python/deprecated/*.py" - ) file(GLOB onnxruntime_python_root_srcs CONFIGURE_DEPENDS "${ORTTRAINING_SOURCE_DIR}/python/training/*.py" ) @@ -419,10 +416,6 @@ if (onnxruntime_ENABLE_TRAINING) "${ORTTRAINING_SOURCE_DIR}/python/training/onnxblock/optim/*" ) endif() -else() - file(GLOB onnxruntime_python_capi_training_srcs CONFIGURE_DEPENDS - "${ONNXRUNTIME_ROOT}/python/training/*.py" - ) endif() if (onnxruntime_BUILD_UNIT_TESTS) @@ -577,9 +570,6 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_BINARY_DIR}/onnxruntime/capi/_pybind_state.py $/onnxruntime/capi/ - COMMAND ${CMAKE_COMMAND} -E copy - ${onnxruntime_python_capi_training_srcs} - $/onnxruntime/capi/training/ COMMAND ${CMAKE_COMMAND} -E copy $ $/onnxruntime/capi/ @@ -750,9 +740,6 @@ if (onnxruntime_ENABLE_TRAINING) COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/training/utils COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/training/utils/data/ COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/training/utils/hooks/ - COMMAND ${CMAKE_COMMAND} -E copy - ${onnxruntime_python_capi_training_srcs} - $/onnxruntime/capi/training/ COMMAND ${CMAKE_COMMAND} -E copy ${onnxruntime_python_root_srcs} $/onnxruntime/training/ diff --git a/onnxruntime/__init__.py b/onnxruntime/__init__.py index 0ed7d887fc5e5..57219c50f39aa 100644 --- a/onnxruntime/__init__.py +++ b/onnxruntime/__init__.py @@ -61,7 +61,6 @@ from onnxruntime.capi.onnxruntime_inference_collection import OrtDevice # noqa: F401 from onnxruntime.capi.onnxruntime_inference_collection import OrtValue # noqa: F401 from onnxruntime.capi.onnxruntime_inference_collection import SparseTensor # noqa: F401 -from onnxruntime.capi.training import * # noqa: F403 # TODO: thiagofc: Temporary experimental namespace for new PyTorch front-end try: # noqa: SIM105 diff --git a/onnxruntime/test/python/onnxruntime_test_ort_trainer.py b/onnxruntime/test/python/onnxruntime_test_ort_trainer.py deleted file mode 100644 index 4cf2e5d7f7588..0000000000000 --- a/onnxruntime/test/python/onnxruntime_test_ort_trainer.py +++ /dev/null @@ -1,1026 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -import copy -import os -import unittest - -import numpy as np -import onnx -import torch -import torch.nn as nn -import torch.nn.functional as F -from helper import get_name -from numpy.testing import assert_allclose -from torchvision import datasets, transforms - -import onnxruntime -from onnxruntime.capi.ort_trainer import ( - IODescription, - LossScaler, - ModelDescription, - ORTTrainer, - generate_sample, - load_checkpoint, - save_checkpoint, -) - -SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__)) - - -def ort_trainer_learning_rate_description(): - return IODescription( - "Learning_Rate", - [ - 1, - ], - torch.float32, - ) - - -def remove_extra_info(model_desc): - simple_model_desc = copy.deepcopy(model_desc) - for input_desc in simple_model_desc.inputs_: - input_desc.dtype_ = None - input_desc.num_classes_ = None - for output_desc in simple_model_desc.outputs_: - output_desc.dtype_ = None - output_desc.num_classes_ = None - return simple_model_desc - - -def bert_model_description(): - vocab_size = 30528 - input_ids_desc = IODescription( - "input_ids", - ["batch", "max_seq_len_in_batch"], - torch.int64, - num_classes=vocab_size, - ) - segment_ids_desc = IODescription("segment_ids", ["batch", "max_seq_len_in_batch"], torch.int64, num_classes=2) - input_mask_desc = IODescription("input_mask", ["batch", "max_seq_len_in_batch"], torch.int64, num_classes=2) - masked_lm_labels_desc = IODescription( - "masked_lm_labels", - ["batch", "max_seq_len_in_batch"], - torch.int64, - num_classes=vocab_size, - ) - next_sentence_labels_desc = IODescription( - "next_sentence_labels", - [ - "batch", - ], - torch.int64, - num_classes=2, - ) - loss_desc = IODescription("loss", [], torch.float32) - - return ModelDescription( - [ - input_ids_desc, - segment_ids_desc, - input_mask_desc, - masked_lm_labels_desc, - next_sentence_labels_desc, - ], - [loss_desc], - ) - - -def map_optimizer_attributes(name): - no_decay_keys = ["bias", "gamma", "beta", "LayerNorm"] - no_decay = any(no_decay_key in name for no_decay_key in no_decay_keys) - if no_decay: - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.0, "epsilon": 1e-6} - else: - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.01, "epsilon": 1e-6} - - -def generate_sample_batch(desc, batch_size, device): - desc_ = copy.deepcopy(desc) - desc_.shape_[0] = batch_size - sample = generate_sample(desc_, device) - return sample - - -def create_ort_trainer( - gradient_accumulation_steps, - use_mixed_precision, - allreduce_post_accumulation, - use_simple_model_desc=True, - loss_scaler=None, - deepspeed_zero_stage=0, -): - model_desc = bert_model_description() - simple_model_desc = remove_extra_info(model_desc) if use_simple_model_desc else model_desc - learning_rate_description = ort_trainer_learning_rate_description() - device = torch.device("cuda", 0) - - onnx_model = onnx.load(get_name("bert_toy_postprocessed.onnx")) - - model = ORTTrainer( - onnx_model, - None, - simple_model_desc, - "LambOptimizer", - map_optimizer_attributes, - learning_rate_description, - device, - gradient_accumulation_steps=gradient_accumulation_steps, - world_rank=0, - world_size=1, - loss_scaler=loss_scaler, - use_mixed_precision=use_mixed_precision, - allreduce_post_accumulation=allreduce_post_accumulation, - deepspeed_zero_stage=deepspeed_zero_stage, - ) - - return model, model_desc, device - - -def run_bert_training_test( - gradient_accumulation_steps, - use_mixed_precision, - allreduce_post_accumulation, - use_simple_model_desc=True, - use_internel_loss_scale=False, -): - torch.manual_seed(1) - onnxruntime.set_seed(1) - - loss_scaler = LossScaler("ort_test_input_loss_scalar", True) if use_internel_loss_scale else None - - model, model_desc, device = create_ort_trainer( - gradient_accumulation_steps, - use_mixed_precision, - allreduce_post_accumulation, - use_simple_model_desc, - loss_scaler, - ) - - if loss_scaler is None: - loss_scaler = LossScaler(model.loss_scale_input_name, True) - - input_ids_batches = [] - segment_ids_batches = [] - input_mask_batches = [] - masked_lm_labels_batches = [] - next_sentence_labels_batches = [] - batch_size = 16 - num_batches = 8 - for _batch in range(num_batches): - input_ids_batches = [ - *input_ids_batches, - generate_sample_batch(model_desc.inputs_[0], batch_size, device), - ] - segment_ids_batches = [ - *segment_ids_batches, - generate_sample_batch(model_desc.inputs_[1], batch_size, device), - ] - input_mask_batches = [ - *input_mask_batches, - generate_sample_batch(model_desc.inputs_[2], batch_size, device), - ] - masked_lm_labels_batches = [ - *masked_lm_labels_batches, - generate_sample_batch(model_desc.inputs_[3], batch_size, device), - ] - next_sentence_labels_batches = [ - *next_sentence_labels_batches, - generate_sample_batch(model_desc.inputs_[4], batch_size, device), - ] - - lr_batch_list = [ - 0.0000000e00, - 4.6012269e-07, - 9.2024538e-07, - 1.3803681e-06, - 1.8404908e-06, - 2.3006135e-06, - 2.7607362e-06, - 3.2208588e-06, - 3.6809815e-06, - ] - - actual_losses = [] - actual_all_finites = [] - - for batch_count in range(num_batches): - input_ids = generate_sample_batch(model_desc.inputs_[0], batch_size, device) - segment_ids = generate_sample_batch(model_desc.inputs_[1], batch_size, device) - input_mask = generate_sample_batch(model_desc.inputs_[2], batch_size, device) - masked_lm_labels = generate_sample_batch(model_desc.inputs_[3], batch_size, device) - next_sentence_labels = generate_sample_batch(model_desc.inputs_[4], batch_size, device) - lr = lr_batch_list[batch_count] - - learning_rate = torch.tensor([lr]).to(device) - training_args = [ - input_ids, - segment_ids, - input_mask, - masked_lm_labels, - next_sentence_labels, - learning_rate, - ] - if use_mixed_precision: - if not use_internel_loss_scale: - loss_scale = torch.tensor([loss_scaler.loss_scale_]).to(device) - training_args.append(loss_scale) - actual_loss = model.train_step(*training_args) - if isinstance(actual_loss, (list, tuple)): - assert len(actual_loss) == 2 - actual_loss, actual_all_finite = actual_loss - if not use_internel_loss_scale: - loss_scaler.update_loss_scale(actual_all_finite.item()) - actual_all_finites = [ - *actual_all_finites, - actual_all_finite.cpu().numpy().item(0), - ] - - actual_losses = [*actual_losses, actual_loss.cpu().numpy().item(0)] - else: - loss = model(*training_args) - actual_losses = [*actual_losses, loss.cpu().numpy().item(0)] - - if batch_count == num_batches - 1: - # test eval_step api with fetches at the end of the training. - # if eval_step is called during the training, it will affect the actual training loss (training session is stateful). - eval_loss = model.eval_step( - input_ids, - segment_ids, - input_mask, - masked_lm_labels, - next_sentence_labels, - fetches=["loss"], - ) - eval_loss = eval_loss.cpu().numpy().item(0) - - # If using internal loss scale, all_finites are handled internally too. - if use_mixed_precision and not use_internel_loss_scale: - return actual_losses, actual_all_finites, eval_loss - else: - return actual_losses, eval_loss - - -class MNISTWrapper: - class NeuralNet(nn.Module): - def __init__(self, input_size, hidden_size, num_classes): - super().__init__() - self.fc1 = nn.Linear(input_size, hidden_size) - self.relu = nn.ReLU() - self.fc2 = nn.Linear(hidden_size, num_classes) - self.register_buffer("bias_buffer", torch.tensor(1e-6)) - - def forward(self, x): - out = self.fc1(x) - out = self.relu(out) - out = self.fc2(out) - out = torch.add(out, self.bias_buffer.to(out.dtype)) - return out - - class NeuralNetWithLoss(nn.Module): - def __init__(self, input_size, hidden_size, num_classes): - super().__init__() - self.fc1 = nn.Linear(input_size, hidden_size) - self.relu = nn.ReLU() - self.fc2 = nn.Linear(hidden_size, num_classes) - - def forward(self, x, target): - out = self.fc1(x) - out = self.relu(out) - out = self.fc2(out) - return F.nll_loss(F.log_softmax(out, dim=1), target), out - - def my_loss(x, target): # noqa: N805 - return F.nll_loss(F.log_softmax(x, dim=1), target) - - def train_with_trainer(self, learningRate, trainer, device, train_loader, epoch): - actual_losses = [] - for batch_idx, (data, target) in enumerate(train_loader): - data, target = data.to(device), target.to(device) # noqa: PLW2901 - data = data.reshape(data.shape[0], -1) # noqa: PLW2901 - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - args_log_interval = 100 - if batch_idx % args_log_interval == 0: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( - epoch, - batch_idx * len(data), - len(train_loader.dataset), - 100.0 * batch_idx / len(train_loader), - loss.item(), - ) - ) - actual_losses = [*actual_losses, loss.cpu().numpy().item()] - - return actual_losses - - # TODO: comple this once ORT training can do evaluation. - def test_with_trainer(self, trainer, device, test_loader): - test_loss = 0 - correct = 0 - with torch.no_grad(): - for data, target in test_loader: - data, target = data.to(device), target.to(device) # noqa: PLW2901 - data = data.reshape(data.shape[0], -1) # noqa: PLW2901 - output = F.log_softmax(trainer.eval_step((data), fetches=["probability"]), dim=1) - test_loss += F.nll_loss(output, target, reduction="sum").item() # sum up batch loss - pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability - correct += pred.eq(target.view_as(pred)).sum().item() - - test_loss /= len(test_loader.dataset) - - print( - "\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format( - test_loss, - correct, - len(test_loader.dataset), - 100.0 * correct / len(test_loader.dataset), - ) - ) - - return test_loss, correct / len(test_loader.dataset) - - def mnist_model_description(): - input_desc = IODescription("input1", ["batch", 784], torch.float32) - label_desc = IODescription( - "label", - [ - "batch", - ], - torch.int64, - num_classes=10, - ) - loss_desc = IODescription("loss", [], torch.float32) - probability_desc = IODescription("probability", ["batch", 10], torch.float32) - return ModelDescription([input_desc, label_desc], [loss_desc, probability_desc]) - - def get_loaders(self): - args_batch_size = 64 - args_test_batch_size = 1000 - - kwargs = {"num_workers": 0, "pin_memory": True} - # set shuffle to False to get deterministic data set among different torch version - train_loader = torch.utils.data.DataLoader( - datasets.MNIST( - os.path.join(SCRIPT_DIR, "data"), - train=True, - download=True, - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), - ), - batch_size=args_batch_size, - shuffle=False, - **kwargs, - ) - test_loader = torch.utils.data.DataLoader( - datasets.MNIST( - os.path.join(SCRIPT_DIR, "data"), - train=False, - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), - ), - batch_size=args_test_batch_size, - shuffle=False, - **kwargs, - ) - - return train_loader, test_loader - - def get_model(self): - input_size = 784 - hidden_size = 500 - num_classes = 10 - - # warning: changes the pytorch random generator state - model = MNISTWrapper.NeuralNet(input_size, hidden_size, num_classes) - model_desc = MNISTWrapper.mnist_model_description() - return model, model_desc - - def get_model_with_internal_loss(self): - input_size = 784 - hidden_size = 500 - num_classes = 10 - - # warning: changes the pytorch random generator state - model = MNISTWrapper.NeuralNetWithLoss(input_size, hidden_size, num_classes) - model_desc = MNISTWrapper.mnist_model_description() - return model, model_desc - - def get_trainer( - self, - model, - model_desc, - device, - onnx_opset_ver=12, - frozen_weights=[], # noqa: B006 - internal_loss_fn=False, - get_lr_this_step=None, - optimizer="SGDOptimizer", - ): - loss_fn = MNISTWrapper.my_loss if not internal_loss_fn else None - return ORTTrainer( - model, - loss_fn, - model_desc, - optimizer, - None, - IODescription( - "Learning_Rate", - [ - 1, - ], - torch.float32, - ), - device, - _opset_version=onnx_opset_ver, - frozen_weights=frozen_weights, - get_lr_this_step=get_lr_this_step, - ) - - -class TestOrtTrainer(unittest.TestCase): - def run_mnist_training_and_testing(onnx_opset_ver): # noqa: N805 - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - trainer = mnist.get_trainer(model, model_desc, device, onnx_opset_ver=onnx_opset_ver) - - learningRate = 0.01 # noqa: N806 - args_epochs = 2 - expected_losses = [ - 2.312044143676758, - 0.8018650412559509, - 0.5819257497787476, - 0.47025489807128906, - 0.35800155997276306, - 0.41124576330184937, - 0.2731882333755493, - 0.4201386570930481, - 0.39458805322647095, - 0.38380366563796997, - 0.2722422480583191, - 0.24230478703975677, - 0.23505745828151703, - 0.33442264795303345, - 0.21140924096107483, - 0.31545233726501465, - 0.18556523323059082, - 0.3453553020954132, - 0.29598352313041687, - 0.3595045208930969, - ] - - expected_test_losses = [0.3145490005493164, 0.256188737487793] - expected_test_accuracies = [0.9075, 0.9265] - - actual_losses = [] - actual_test_losses, actual_accuracies = [], [] - for epoch in range(1, args_epochs + 1): - actual_losses = [ - *actual_losses, - *mnist.train_with_trainer(learningRate, trainer, device, train_loader, epoch), - ] - - test_loss, accuracy = mnist.test_with_trainer(trainer, device, test_loader) - actual_test_losses = [*actual_test_losses, test_loss] - actual_accuracies = [*actual_accuracies, accuracy] - - # if you update outcomes, also do so for resume from checkpoint test - # args_checkpoint_epoch = 1 - # if epoch == args_checkpoint_epoch: - # state = {'rng_state': torch.get_rng_state(), 'model': trainer.state_dict()} - # torch.save(state, get_name("ckpt_mnist.pt")) - - print("actual_losses=", actual_losses) - print("actual_test_losses=", actual_test_losses) - print("actual_accuracies=", actual_accuracies) - - # to update expected outcomes, enable pdb and run the test with -s and copy paste outputs - # import pdb; pdb.set_trace() - rtol = 1e-03 - assert_allclose(expected_losses, actual_losses, rtol=rtol, err_msg="loss mismatch") - assert_allclose( - expected_test_losses, - actual_test_losses, - rtol=rtol, - err_msg="test loss mismatch", - ) - assert_allclose( - expected_test_accuracies, - actual_accuracies, - rtol=rtol, - err_msg="test accuracy mismatch", - ) - - def test_mnist_training_and_testing_opset12(self): - TestOrtTrainer.run_mnist_training_and_testing(onnx_opset_ver=12) - - def test_mnist_resume_training_and_testing(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - learningRate = 0.01 # noqa: N806 - args_epochs = 2 - args_checkpoint_epoch = 1 - # should match those in test without checkpointing - expected_losses = [ - 0.26509523391723633, - 0.24135658144950867, - 0.2397943139076233, - 0.3351520597934723, - 0.20998981595039368, - 0.31488314270973206, - 0.18481917679309845, - 0.34727591276168823, - 0.2971782684326172, - 0.3609251379966736, - ] - - expected_test_losses = [0.25632242965698243] - expected_test_accuracies = [0.9264] - - actual_losses = [] - actual_test_losses, actual_accuracies = [], [] - - # restore from checkpoint - resume_trainer = mnist.get_trainer(model, model_desc, device) - checkpoint = torch.load(get_name("ckpt_mnist.pt"), map_location="cpu") - torch.set_rng_state(checkpoint["rng_state"]) - resume_trainer.load_state_dict(checkpoint["model"], strict=True) - - # continue .. - for epoch in range(args_checkpoint_epoch + 1, args_epochs + 1): - actual_losses = [ - *actual_losses, - *mnist.train_with_trainer(learningRate, resume_trainer, device, train_loader, epoch), - ] - - test_loss, accuracy = mnist.test_with_trainer(resume_trainer, device, test_loader) - actual_test_losses = [*actual_test_losses, test_loss] - actual_accuracies = [*actual_accuracies, accuracy] - - print("actual_losses=", actual_losses) - print("actual_test_losses=", actual_test_losses) - print("actual_accuracies=", actual_accuracies) - - # to update expected outcomes, enable pdb and run the test with -s and copy paste outputs - # import pdb; pdb.set_trace() - rtol = 1e-03 - assert_allclose(expected_losses, actual_losses, rtol=rtol, err_msg="loss mismatch") - assert_allclose( - expected_test_losses, - actual_test_losses, - rtol=rtol, - err_msg="test loss mismatch", - ) - assert_allclose( - expected_test_accuracies, - actual_accuracies, - rtol=rtol, - err_msg="test accuracy mismatch", - ) - - def test_mnist_state_dict(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - trainer = mnist.get_trainer(model, model_desc, device) - state_dict = trainer.state_dict() - assert state_dict == {} - - learningRate = 0.02 # noqa: N806 - - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - state_dict = trainer.state_dict() - assert state_dict.keys() == { - "fc1.bias", - "fc1.weight", - "fc2.bias", - "fc2.weight", - "bias_buffer", - } - - def test_mnist_save_as_onnx(self): - torch.manual_seed(1) - device = torch.device("cuda") - onnx_file_name = "mnist.onnx" - if os.path.exists(onnx_file_name): - os.remove(onnx_file_name) - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - trainer = mnist.get_trainer(model, model_desc, device) - trainer.save_as_onnx(onnx_file_name) - assert not os.path.exists(onnx_file_name) - - learningRate = 0.02 # noqa: N806 - - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - trainer.save_as_onnx(onnx_file_name) - assert os.path.exists(onnx_file_name) - - def test_mnist_device(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - for model_device in [torch.device("cpu"), torch.device("cuda")]: - model.to(model_device) - trainer = mnist.get_trainer(model, model_desc, device) - learningRate = 0.02 # noqa: N806 - - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - def test_mnist_initializer_names(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - trainer = mnist.get_trainer(model, model_desc, device) - learningRate = 0.02 # noqa: N806 - - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - assert ({n.name for n in trainer.onnx_model_.graph.initializer} - {"bias_buffer"}) == { - n for n, t in model.named_parameters() - } - - def test_mnist_initializer_names_with_internal_loss(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model_with_internal_loss() - - def get_lr_this_step(global_step): - learningRate = 0.02 # noqa: N806 - return torch.tensor([learningRate]) - - trainer = mnist.get_trainer( - model, - model_desc, - device, - internal_loss_fn=True, - get_lr_this_step=get_lr_this_step, - ) - - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target) - - assert {n.name for n in trainer.onnx_model_.graph.initializer} == {n for n, t in model.named_parameters()} - - def test_mnist_frozen_weight(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - trainer = mnist.get_trainer(model, model_desc, device, frozen_weights=["fc1.weight"]) - - learningRate = 0.02 # noqa: N806 - - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - fc1_trainstep_1 = trainer.state_dict()["fc1.weight"] - fc2_trainstep_1 = trainer.state_dict()["fc2.weight"] - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - fc1_trainstep_2 = trainer.state_dict()["fc1.weight"] - fc2_trainstep_2 = trainer.state_dict()["fc2.weight"] - assert np.array_equal(fc1_trainstep_1, fc1_trainstep_2) and not np.array_equal(fc2_trainstep_1, fc2_trainstep_2) - - def test_mnist_torch_buffer(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - trainer = mnist.get_trainer(model, model_desc, device) - - learningRate = 0.02 # noqa: N806 - - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - fc1_trainstep_1 = trainer.state_dict()["fc1.weight"] - bias_buffer_trainstep_1 = trainer.state_dict()["bias_buffer"] - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - fc1_trainstep_2 = trainer.state_dict()["fc1.weight"] - bias_buffer_trainstep_2 = trainer.state_dict()["bias_buffer"] - assert not np.array_equal(fc1_trainstep_1, fc1_trainstep_2) and np.array_equal( - bias_buffer_trainstep_1, bias_buffer_trainstep_2 - ) - - def test_mnist_frozen_weight_checkpoint(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - trainer = mnist.get_trainer(model, model_desc, device, frozen_weights=["fc1.weight"]) - - learningRate = 0.02 # noqa: N806 - - # do one train step - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - # do one eval step - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.eval_step(data, target) - - # save checkpoint, load model and compare - state_dict = trainer.state_dict() - - new_model, _ = mnist.get_model() - trainer = mnist.get_trainer(new_model, model_desc, device, frozen_weights=["fc1.weight"]) - trainer.load_state_dict(state_dict) - - ckpt_loss, _ = trainer.eval_step(data, target) - assert loss == ckpt_loss - - loaded_state_dict = trainer.state_dict() - assert state_dict.keys() == loaded_state_dict.keys() - - def test_mnist_training_checkpoint(self): - torch.manual_seed(1) - device = torch.device("cuda") - - mnist = MNISTWrapper() - train_loader, test_loader = mnist.get_loaders() - model, model_desc = mnist.get_model() - - trainer = mnist.get_trainer( - model, - model_desc, - device, - optimizer="LambOptimizer", - frozen_weights=["fc1.weight"], - ) - - learningRate = 0.02 # noqa: N806 - - # do 5 train step - for _i in range(5): - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.train_step(data, target, torch.tensor([learningRate])) - - # do one eval step - data, target = next(iter(train_loader)) - data, target = data.to(device), target.to(device) - data = data.reshape(data.shape[0], -1) - - loss, _ = trainer.eval_step(data, target) - - # save checkpoint, load model and compare - state_dict = trainer.state_dict() - - new_model, _ = mnist.get_model() - trainer = mnist.get_trainer( - new_model, - model_desc, - device, - optimizer="LambOptimizer", - frozen_weights=["fc1.weight"], - ) - trainer.load_state_dict(state_dict) - - ckpt_loss, _ = trainer.eval_step(data, target) - assert loss == ckpt_loss - - loaded_state_dict = trainer.state_dict() - assert state_dict.keys() == loaded_state_dict.keys() - for key in state_dict: - assert np.array_equal(state_dict[key], loaded_state_dict[key]) - - def test_bert_training_basic(self): - expected_losses = [ - 11.027887, - 11.108191, - 11.055356, - 11.040912, - 10.960277, - 11.02691, - 11.082471, - 10.920979, - ] - expected_eval_loss = [10.958977] - actual_losses, actual_eval_loss = run_bert_training_test( - gradient_accumulation_steps=1, - use_mixed_precision=False, - allreduce_post_accumulation=False, - ) - - # to update expected outcomes, enable pdb and run the test with -s and copy paste outputs - # print('losses expected: ', expected_losses) - # print('losses actual: ', actual_losses) - # print('eval_loss expected: ', expected_eval_loss) - # print('eval_loss actual: ', actual_eval_loss) - # import pdb; pdb.set_trace() - - rtol = 1e-03 - assert_allclose(expected_losses, actual_losses, rtol=rtol, err_msg="loss mismatch") - assert_allclose( - expected_eval_loss, - actual_eval_loss, - rtol=rtol, - err_msg="evaluation loss mismatch", - ) - - def test_bert_training_gradient_accumulation(self): - expected_losses = [ - 11.027887, - 11.108191, - 11.055354, - 11.040904, - 10.960266, - 11.026897, - 11.082475, - 10.920998, - ] - expected_eval_loss = [10.958998] - - actual_losses, actual_eval_loss = run_bert_training_test( - gradient_accumulation_steps=4, - use_mixed_precision=False, - allreduce_post_accumulation=False, - ) - - # to update expected outcomes, enable pdb and run the test with -s and copy paste outputs - # print('losses expected: ', expected_losses) - # print('losses actual: ', actual_losses) - # print('eval_loss expected: ', expected_eval_loss) - # print('eval_loss actual: ', actual_eval_loss) - # import pdb; pdb.set_trace() - - rtol = 1e-03 - assert_allclose(expected_losses, actual_losses, rtol=rtol, err_msg="loss mismatch") - assert_allclose( - expected_eval_loss, - actual_eval_loss, - rtol=rtol, - err_msg="evaluation loss mismatch", - ) - - def test_bert_checkpointing_basic(self): - model, _, _ = create_ort_trainer( - gradient_accumulation_steps=1, - use_mixed_precision=False, - allreduce_post_accumulation=True, - use_simple_model_desc=True, - loss_scaler=None, - ) - sd = model.state_dict() - - # modify one of the default values - sd["bert.encoder.layer.0.attention.output.LayerNorm.weight"] += 1 - model.load_state_dict(sd) - - ckpt_dir = "testdata" - save_checkpoint(model, ckpt_dir, "bert_toy_save_test") - del model - - # create new model - model2, _, _ = create_ort_trainer( - gradient_accumulation_steps=1, - use_mixed_precision=False, - allreduce_post_accumulation=True, - use_simple_model_desc=True, - loss_scaler=None, - ) - - # load changed checkpoint - load_checkpoint(model2, ckpt_dir, "bert_toy_save_test") - loaded_sd = model2.state_dict() - - for k, v in loaded_sd.items(): - assert torch.all(torch.eq(v, sd[k])) - - def test_wrap_model_loss_fn_state_dict(self): - torch.manual_seed(1) - device = torch.device("cuda") - - class LinearModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.linear = torch.nn.Linear(2, 4) - - def forward(self, y=None, x=None): - if y is not None: - return self.linear(x) + y - else: - return self.linear(x) + torch.ones(2, 4) - - pt_model = LinearModel() - data = torch.randn(2, 2) - label = torch.tensor([0, 1], dtype=torch.int64) - input_desc = IODescription("x", [2, 2], torch.float32) - label_desc = IODescription( - "label", - [ - 2, - ], - torch.int64, - num_classes=4, - ) - output_desc = IODescription("output", [2, 4], torch.float32) - loss_desc = IODescription("loss", [], torch.float32) - model_desc = ModelDescription([input_desc, label_desc], [loss_desc, output_desc]) - - def loss_fn(x, label): - return F.nll_loss(F.log_softmax(x, dim=1), label) - - def get_lr_this_step(global_step): - learningRate = 0.02 # noqa: N806 - return torch.tensor([learningRate]) - - ort_trainer = ORTTrainer( - pt_model, - loss_fn, - model_desc, - "SGDOptimizer", - None, - IODescription( - "Learning_Rate", - [ - 1, - ], - torch.float32, - ), - device, - get_lr_this_step=get_lr_this_step, - ) - ort_trainer.train_step(x=data, label=label) - state_dict = ort_trainer.state_dict() - assert state_dict.keys() == {"linear.bias", "linear.weight"} - - -if __name__ == "__main__": - unittest.main(module=__name__, buffer=True) diff --git a/onnxruntime/test/python/onnxruntime_test_ort_trainer_with_mixed_precision.py b/onnxruntime/test/python/onnxruntime_test_ort_trainer_with_mixed_precision.py deleted file mode 100644 index 3b994e6f26710..0000000000000 --- a/onnxruntime/test/python/onnxruntime_test_ort_trainer_with_mixed_precision.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -import unittest - -from numpy.testing import assert_allclose, assert_array_equal -from onnxruntime_test_ort_trainer import run_bert_training_test - - -class TestOrtTrainer(unittest.TestCase): - def test_bert_training_mixed_precision(self): - expected_losses = [ - 11.034248352050781, - 11.125300407409668, - 11.006105422973633, - 11.047048568725586, - 11.027417182922363, - 11.015759468078613, - 11.060905456542969, - 10.971782684326172, - ] - expected_all_finites = [True, True, True, True, True, True, True, True] - expected_eval_loss = [10.959012985229492] - actual_losses, actual_all_finites, actual_eval_loss = run_bert_training_test( - gradient_accumulation_steps=1, - use_mixed_precision=True, - allreduce_post_accumulation=False, - use_simple_model_desc=False, - ) - - rtol = 1e-02 - assert_allclose(expected_losses, actual_losses, rtol=rtol, err_msg="loss mismatch") - assert_array_equal(expected_all_finites, actual_all_finites, "all_finite mismatch") - assert_allclose( - expected_eval_loss, - actual_eval_loss, - rtol=rtol, - err_msg="evaluation loss mismatch", - ) - - def test_bert_training_mixed_precision_internal_loss_scale(self): - expected_losses = [ - 11.034248352050781, - 11.125300407409668, - 11.006105422973633, - 11.047048568725586, - 11.027417182922363, - 11.015759468078613, - 11.060905456542969, - 10.971782684326172, - ] - expected_eval_loss = [10.959012985229492] - actual_losses, actual_eval_loss = run_bert_training_test( - gradient_accumulation_steps=1, - use_mixed_precision=True, - allreduce_post_accumulation=False, - use_simple_model_desc=False, - use_internel_loss_scale=True, - ) - - rtol = 1e-02 - assert_allclose(expected_losses, actual_losses, rtol=rtol, err_msg="loss mismatch") - assert_allclose( - expected_eval_loss, - actual_eval_loss, - rtol=rtol, - err_msg="evaluation loss mismatch", - ) - - def test_bert_training_gradient_accumulation_mixed_precision(self): - expected_losses = [ - 11.034248352050781, - 11.125300407409668, - 11.006077766418457, - 11.047025680541992, - 11.027434349060059, - 11.0156831741333, - 11.060973167419434, - 10.971841812133789, - ] - expected_all_finites = [True, True] - expected_eval_loss = [10.95903205871582] - actual_losses, actual_all_finites, actual_eval_loss = run_bert_training_test( - gradient_accumulation_steps=4, - use_mixed_precision=True, - allreduce_post_accumulation=False, - use_simple_model_desc=False, - ) - - rtol = 1e-02 - assert_allclose(expected_losses, actual_losses, rtol=rtol, err_msg="loss mismatch") - assert_array_equal(expected_all_finites, actual_all_finites, "all_finite mismatch") - assert_allclose( - expected_eval_loss, - actual_eval_loss, - rtol=rtol, - err_msg="evaluation loss mismatch", - ) - - -if __name__ == "__main__": - unittest.main(module=__name__, buffer=True) diff --git a/onnxruntime/test/python/onnxruntime_test_training_unit_tests.py b/onnxruntime/test/python/onnxruntime_test_training_unit_tests.py deleted file mode 100644 index 540f39b797bdb..0000000000000 --- a/onnxruntime/test/python/onnxruntime_test_training_unit_tests.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -import unittest - -import torch -import torch.nn as nn -from numpy.testing import assert_allclose -from onnxruntime_test_ort_trainer import map_optimizer_attributes, ort_trainer_learning_rate_description -from onnxruntime_test_training_unittest_utils import process_dropout - -import onnxruntime -from onnxruntime.capi.ort_trainer import IODescription, ModelDescription, ORTTrainer - - -class TestTrainingDropout(unittest.TestCase): - def setUp(self): - torch.manual_seed(1) - onnxruntime.set_seed(1) - - @unittest.skip( - "Temporarily disable this test. The graph below will trigger ORT to " - "sort backward graph before forward graph which gives incorrect result. " - "https://github.com/microsoft/onnxruntime/issues/16801" - ) - def test_training_and_eval_dropout(self): - class TwoDropoutNet(nn.Module): - def __init__(self, drop_prb_1, drop_prb_2, dim_size): - super().__init__() - self.drop_1 = nn.Dropout(drop_prb_1) - self.drop_2 = nn.Dropout(drop_prb_2) - self.weight_1 = torch.nn.Parameter(torch.zeros(dim_size, dtype=torch.float32)) - - def forward(self, x): - x = x + self.weight_1 - x = self.drop_1(x) - x = self.drop_2(x) - output = x - return output[0] - - dim_size = 3 - device = torch.device("cuda", 0) - # This will drop all values, therefore expecting all 0 in output tensor - model = TwoDropoutNet(0.999, 0.999, dim_size) - input_desc = IODescription("input", [dim_size], torch.float32) - output_desc = IODescription("output", [], torch.float32) - model_desc = ModelDescription([input_desc], [output_desc]) - lr_desc = ort_trainer_learning_rate_description() - model = ORTTrainer( - model, - None, - model_desc, - "LambOptimizer", - map_optimizer_attributes, - lr_desc, - device, - postprocess_model=process_dropout, - world_rank=0, - world_size=1, - ) - input = torch.ones(dim_size, dtype=torch.float32).to(device) - expected_training_output = [0.0] - expected_eval_output = [1.0] - learning_rate = torch.tensor([1.0000000e00]).to(device) - input_args = [input, learning_rate] - train_output = model.train_step(*input_args) - - rtol = 1e-04 - assert_allclose( - expected_training_output, - train_output.item(), - rtol=rtol, - err_msg="dropout training loss mismatch", - ) - - eval_output = model.eval_step(input) - assert_allclose( - expected_eval_output, - eval_output.item(), - rtol=rtol, - err_msg="dropout eval loss mismatch", - ) - - # Do another train step to make sure it's using original ratios - train_output_2 = model.train_step(*input_args) - assert_allclose( - expected_training_output, - train_output_2.item(), - rtol=rtol, - err_msg="dropout training loss 2 mismatch", - ) - - -if __name__ == "__main__": - unittest.main(module=__name__, buffer=True) diff --git a/onnxruntime/test/python/onnxruntime_test_training_unittest_utils.py b/onnxruntime/test/python/onnxruntime_test_training_unittest_utils.py deleted file mode 100644 index 3d3feca06a99b..0000000000000 --- a/onnxruntime/test/python/onnxruntime_test_training_unittest_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -import numpy as np -from onnx import numpy_helper - - -def get_node_index(model, node): - i = 0 - while i < len(model.graph.node): - if model.graph.node[i] == node: - break - i += 1 - return i if i < len(model.graph.node) else None - - -def add_const(model, name, output, t_value=None, f_value=None): - const_node = model.graph.node.add() - const_node.op_type = "Constant" - const_node.name = name - const_node.output.extend([output]) - attr = const_node.attribute.add() - attr.name = "value" - if t_value is not None: - attr.type = 4 - attr.t.CopyFrom(t_value) - else: - attr.type = 1 - attr.f = f_value - return const_node - - -def process_dropout(model): - dropouts = [] - index = 0 - for node in model.graph.node: - if node.op_type == "Dropout": - new_dropout = model.graph.node.add() - new_dropout.op_type = "TrainableDropout" - new_dropout.name = "TrainableDropout_%d" % index - # make ratio node - ratio = np.asarray([node.attribute[0].f], dtype=np.float32) - print(ratio.shape) - ratio_value = numpy_helper.from_array(ratio) - ratio_node = add_const( - model, - "dropout_node_ratio_%d" % index, - "dropout_node_ratio_%d" % index, - t_value=ratio_value, - ) - print(ratio_node) - new_dropout.input.extend([node.input[0], ratio_node.output[0]]) - new_dropout.output.extend(node.output) - dropouts.append(get_node_index(model, node)) - index += 1 - dropouts.sort(reverse=True) - for d in dropouts: - del model.graph.node[d] - model.opset_import[0].version = 10 diff --git a/orttraining/orttraining/python/checkpointing_utils.py b/orttraining/orttraining/python/checkpointing_utils.py deleted file mode 100644 index 460b9982297d1..0000000000000 --- a/orttraining/orttraining/python/checkpointing_utils.py +++ /dev/null @@ -1,127 +0,0 @@ -import os - -import torch - - -def list_checkpoint_files(checkpoint_dir, checkpoint_prefix, extension=".ort.pt"): - ckpt_file_names = [f for f in os.listdir(checkpoint_dir) if f.startswith(checkpoint_prefix)] - ckpt_file_names = [f for f in ckpt_file_names if f.endswith(extension)] - ckpt_file_names = [os.path.join(checkpoint_dir, f) for f in ckpt_file_names] - - assert len(ckpt_file_names) > 0, 'No checkpoint files found with prefix "{}" in directory {}.'.format( - checkpoint_prefix, checkpoint_dir - ) - return ckpt_file_names - - -def get_checkpoint_name(prefix, is_partitioned, world_rank=None, world_size=None): - SINGLE_CHECKPOINT_FILENAME = "{prefix}.ort.pt" # noqa: N806 - MULTIPLE_CHECKPOINT_FILENAME = "{prefix}.ZeRO.{world_rank}.{world_size}.ort.pt" # noqa: N806 - - if is_partitioned: - filename = MULTIPLE_CHECKPOINT_FILENAME.format( - prefix=prefix, world_rank=world_rank, world_size=(world_size - 1) - ) - else: - filename = SINGLE_CHECKPOINT_FILENAME.format(prefix=prefix) - - return filename - - -def _split_state_dict(state_dict): - optimizer_keys = ["Moment_1_", "Moment_2_", "Update_Count_", "Step"] - split_sd = {"optimizer": {}, "fp32_param": {}, "fp16_param": {}} - for k, v in state_dict.items(): - mode = "fp32_param" - for optim_key in optimizer_keys: - if k.startswith(optim_key): - mode = "optimizer" - break - if k.endswith("_fp16"): - mode = "fp16_param" - split_sd[mode][k] = v - return split_sd - - -class CombineZeroCheckpoint: - def __init__(self, checkpoint_files, clean_state_dict=None): - assert len(checkpoint_files) > 0, "No checkpoint files passed" - self.checkpoint_files = checkpoint_files - self.clean_state_dict = clean_state_dict - self.world_size = int(self.checkpoint_files[0].split("ZeRO")[1].split(".")[2]) + 1 - assert len(self.checkpoint_files) == self.world_size, f"Could not find {self.world_size} files" - self.weight_shape_map = dict() - self.sharded_params = set() - - def _split_name(self, name: str): - name_split = name.split("_view_") - view_num = None - if len(name_split) > 1: - view_num = int(name_split[1]) - optimizer_key = "" - mp_suffix = "" - if name_split[0].startswith("Moment_1"): - optimizer_key = "Moment_1_" - elif name_split[0].startswith("Moment_2"): - optimizer_key = "Moment_2_" - elif name_split[0].startswith("Update_Count"): - optimizer_key = "Update_Count_" - elif name_split[0].endswith("_fp16"): - mp_suffix = "_fp16" - param_name = name_split[0] - if optimizer_key: - param_name = param_name.split(optimizer_key)[1] - param_name = param_name.split("_fp16")[0] - return param_name, optimizer_key, view_num, mp_suffix - - def _update_weight_statistics(self, name, value): - if name not in self.weight_shape_map: - self.weight_shape_map[name] = value.size() # original shape of tensor - - def _reshape_tensor(self, key): - value = self.aggregate_state_dict[key] - weight_name, _, _, _ = self._split_name(key) - set_size = self.weight_shape_map[weight_name] - self.aggregate_state_dict[key] = value.reshape(set_size) - - def _aggregate(self, param_dict): - for k, v in param_dict.items(): - weight_name, optimizer_key, view_num, mp_suffix = self._split_name(k) - if view_num is not None: - # parameter is sharded - param_name = optimizer_key + weight_name + mp_suffix - - if param_name in self.aggregate_state_dict and optimizer_key not in ["Update_Count_"]: - self.sharded_params.add(param_name) - # Found a previous shard of the param, concatenate shards ordered by ranks - self.aggregate_state_dict[param_name] = torch.cat((self.aggregate_state_dict[param_name], v)) - else: - self.aggregate_state_dict[param_name] = v - else: - if k in self.aggregate_state_dict: - assert (self.aggregate_state_dict[k] == v).all(), "Unsharded params must have the same value" - else: - self.aggregate_state_dict[k] = v - self._update_weight_statistics(weight_name, v) - - def aggregate_checkpoints(self): - checkpoint_prefix = self.checkpoint_files[0].split(".ZeRO")[0] - self.aggregate_state_dict = dict() - - for i in range(self.world_size): - checkpoint_name = get_checkpoint_name(checkpoint_prefix, True, i, self.world_size) - rank_state_dict = torch.load(checkpoint_name, map_location=torch.device("cpu")) - if "model" in rank_state_dict: - rank_state_dict = rank_state_dict["model"] - - if self.clean_state_dict: - rank_state_dict = self.clean_state_dict(rank_state_dict) - - rank_state_dict = _split_state_dict(rank_state_dict) - self._aggregate(rank_state_dict["fp16_param"]) - self._aggregate(rank_state_dict["fp32_param"]) - self._aggregate(rank_state_dict["optimizer"]) - - for k in self.sharded_params: - self._reshape_tensor(k) - return self.aggregate_state_dict diff --git a/orttraining/orttraining/python/deprecated/__init__.py b/orttraining/orttraining/python/deprecated/__init__.py deleted file mode 100644 index 6e02db707bc47..0000000000000 --- a/orttraining/orttraining/python/deprecated/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. -# -------------------------------------------------------------------------- -from onnxruntime.capi._pybind_state import TrainingParameters # noqa: F401 -from onnxruntime.capi.training.training_session import TrainingSession # noqa: F401 diff --git a/orttraining/orttraining/python/deprecated/training_session.py b/orttraining/orttraining/python/deprecated/training_session.py deleted file mode 100644 index a6900578e174b..0000000000000 --- a/orttraining/orttraining/python/deprecated/training_session.py +++ /dev/null @@ -1,68 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. -# -------------------------------------------------------------------------- - -import os # noqa: F401 -import sys # noqa: F401 - -from onnxruntime.capi import _pybind_state as C -from onnxruntime.capi.onnxruntime_inference_collection import IOBinding # noqa: F401 -from onnxruntime.capi.onnxruntime_inference_collection import ( - InferenceSession, - Session, - check_and_normalize_provider_args, -) - - -class TrainingSession(InferenceSession): - def __init__(self, path_or_bytes, parameters, sess_options=None, providers=None, provider_options=None): - Session.__init__(self) - - if sess_options: - self._sess = C.TrainingSession(sess_options) - else: - self._sess = C.TrainingSession() - - # providers needs to be passed explicitly as of ORT 1.10 - # retain the pre-1.10 behavior by setting to the available providers. - if providers is None: - providers = C.get_available_providers() - - providers, provider_options = check_and_normalize_provider_args( - providers, provider_options, C.get_available_providers() - ) - - if isinstance(path_or_bytes, str): - config_result = self._sess.load_model(path_or_bytes, parameters, providers, provider_options) - elif isinstance(path_or_bytes, bytes): - config_result = self._sess.read_bytes(path_or_bytes, parameters, providers, provider_options) - else: - raise TypeError(f"Unable to load from type '{type(path_or_bytes)}'") - - self.loss_scale_input_name = config_result.loss_scale_input_name - - self._inputs_meta = self._sess.inputs_meta - self._outputs_meta = self._sess.outputs_meta - - def __del__(self): - if self._sess: - self._sess.finalize() - - def get_state(self): - return self._sess.get_state() - - def get_model_state(self, include_mixed_precision_weights=False): - return self._sess.get_model_state(include_mixed_precision_weights) - - def get_optimizer_state(self): - return self._sess.get_optimizer_state() - - def get_partition_info_map(self): - return self._sess.get_partition_info_map() - - def load_state(self, dict, strict=False): - self._sess.load_state(dict, strict) - - def is_output_fp32_node(self, output_name): - return self._sess.is_output_fp32_node(output_name) diff --git a/orttraining/orttraining/python/ort_trainer.py b/orttraining/orttraining/python/ort_trainer.py deleted file mode 100644 index 5286c087cfb64..0000000000000 --- a/orttraining/orttraining/python/ort_trainer.py +++ /dev/null @@ -1,1241 +0,0 @@ -import io -import os -import warnings - -import numpy as np -import onnx -import torch -import torch.nn -import torch.onnx -from onnx import helper, numpy_helper -from packaging.version import Version as LooseVersion - -import onnxruntime as ort -import onnxruntime.capi.pt_patch -from onnxruntime.tools.symbolic_shape_infer import SymbolicShapeInference - -from ..training import postprocess -from .checkpointing_utils import CombineZeroCheckpoint, get_checkpoint_name, list_checkpoint_files - -DEFAULT_OPSET_VERSION = 14 - - -class IODescription: - def __init__(self, name, shape, dtype=None, num_classes=None): - self.name_ = name - self.shape_ = shape - self.dtype_ = dtype - self.num_classes_ = num_classes - - -class ModelDescription: - def __init__(self, inputs, outputs): - self.inputs_ = inputs - self.outputs_ = outputs - - -def resolve_symbolic_dimensions(inputs, input_descs, output_descs): - import copy - - output_descs_copy = copy.deepcopy(output_descs) - resolved_dims = {} - for input, input_desc in zip(inputs, input_descs): - for i, axis in enumerate(input_desc.shape_): - if isinstance(axis, str): - resolved_dims[axis] = input.size()[i] - - for output_desc in output_descs_copy: - for i, axis in enumerate(output_desc.shape_): - if isinstance(axis, str): - output_desc.shape_[i] = resolved_dims[axis] - - if any(isinstance(axis, str) for axis in output_desc.shape_ for output_desc in output_descs): - raise RuntimeError("Cannot run model with unknown output dimensions") - - return output_descs_copy - - -def generate_sample(desc, device=None): - # symbolic dimensions are described with strings. set symbolic dimensions to be 1 - size = [s if isinstance(s, (int)) else 1 for s in desc.shape_] - if desc.num_classes_: - return torch.randint(0, desc.num_classes_, size, dtype=desc.dtype_).to(device) - else: - return torch.randn(size, dtype=desc.dtype_).to(device) - - -def get_device_index(device): - if type(device) == str: # noqa: E721 - # could be 'cuda:0', 'cuda:1', or 'cpu'. with cpu, set index=0 - device = torch.device(device) - return 0 if device.index is None else device.index - - -def input_get_device_index(input): - if isinstance(input, (list, tuple)): - device_index = get_device_index(input[0].device) - else: - device_index = get_device_index(input.device) - - return device_index - - -def get_all_gradients_finite_arg_name(session): - all_fp16_or_fp32_gradients_finite_node_args = [x for x in session._outputs_meta if "all_gradients_finite" in x.name] - if len(all_fp16_or_fp32_gradients_finite_node_args) < 1: - raise RuntimeError( - "Failed to find a group NodeArg with name that matches 'all_gradients_finite'\ - from the training session." - ) - - return all_fp16_or_fp32_gradients_finite_node_args[0].name - - -def get_group_accumulated_gradients_output_node_arg_name(session): - # TODO: get the constant string via pybind. - # optimizer_graph_builder BuildGroupNode with fixed string: 'Group_Accumulated_Gradients' - accumulated_gradients_output_node_args = [ - x for x in session._outputs_meta if "Group_Accumulated_Gradients" in x.name - ] - if len(accumulated_gradients_output_node_args) != 1: - raise RuntimeError( - "Failed to find a group NodeArg with name that matches 'Group_Accumulated_Gradients'\ - from the training session." - ) - - return accumulated_gradients_output_node_args[0].name - - -def ort_training_session_run_helper(session, iobinding, inputs, input_descs, output_descs, device, run_options=None): - for input, input_desc in zip(inputs, input_descs): - device_index = input_get_device_index(input) - iobinding.bind_input( - input_desc.name_, - input.device.type, - device_index, - dtype_torch_to_numpy(input.dtype), - list(input.size()), - input.data_ptr(), - ) - - output_descs_resolved = resolve_symbolic_dimensions(inputs, input_descs, output_descs) - torch_outputs = {} - for output_desc in output_descs_resolved: - torch_tensor = torch.zeros( - output_desc.shape_, - device=device, - dtype=output_desc.eval_dtype_ if hasattr(output_desc, "eval_dtype_") else output_desc.dtype_, - ) - iobinding.bind_output( - output_desc.name_, - torch_tensor.device.type, - get_device_index(device), - dtype_torch_to_numpy(torch_tensor.dtype), - list(torch_tensor.size()), - torch_tensor.data_ptr(), - ) - torch_outputs[output_desc.name_] = torch_tensor - - session.run_with_iobinding(iobinding, run_options) - return torch_outputs - - -def FuseSofmaxNLLToSoftmaxCE(onnx_model): # noqa: N802 - nll_count = 0 - while True: - nll_count = nll_count + 1 - nll_loss_node = None - nll_loss_node_index = 0 - for nll_loss_node_index, node in enumerate(onnx_model.graph.node): # noqa: B007 - if node.op_type == "nll_loss" or node.op_type == "NegativeLogLikelihoodLoss": - nll_loss_node = node - break - - if nll_loss_node is None: - break - - softmax_node = None - softmax_node_index = 0 - label_input_name = None - weight_input_name = None - for softmax_node_index, node in enumerate(onnx_model.graph.node): # noqa: B007 - if node.op_type == "LogSoftmax": - # has to be connected to nll_loss - if len(nll_loss_node.input) > 2: - weight_input_name = nll_loss_node.input[2] - if node.output[0] == nll_loss_node.input[0]: - softmax_node = node - label_input_name = nll_loss_node.input[1] - break - elif node.output[0] == nll_loss_node.input[1]: - softmax_node = node - label_input_name = nll_loss_node.input[0] - break - else: - if softmax_node is not None: - break - - if softmax_node is None: - break - - # delete nll_loss and LogSoftmax nodes in order - if nll_loss_node_index < softmax_node_index: - del onnx_model.graph.node[softmax_node_index] - del onnx_model.graph.node[nll_loss_node_index] - else: - del onnx_model.graph.node[nll_loss_node_index] - del onnx_model.graph.node[softmax_node_index] - - probability_output_name = softmax_node.output[0] - node = onnx_model.graph.node.add() - inputs = ( - [softmax_node.input[0], label_input_name, weight_input_name] - if weight_input_name - else [softmax_node.input[0], label_input_name] - ) - node.CopyFrom( - onnx.helper.make_node( - "SparseSoftmaxCrossEntropy", - inputs, - [nll_loss_node.output[0], probability_output_name], - "nll_loss_node_" + str(nll_count), - ) - ) - - return onnx_model - - -def delete_input_with_name(input, name): - index = 0 - for i in input: - if i.name == name: - del input[index] - break - index = index + 1 - - -# reference: -# https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html -# https://pytorch.org/docs/stable/tensors.html -# also must map to types accepted by: -# MLDataType NumpyTypeToOnnxRuntimeType(int numpy_type) -def dtype_torch_to_numpy(torch_dtype): - if torch_dtype == torch.float64 or torch_dtype == torch.double: - return np.float64 - elif torch_dtype == torch.float32 or torch_dtype == torch.float: - return np.float32 - elif torch_dtype == torch.float16 or torch_dtype == torch.half: - return np.float16 - elif torch_dtype == torch.int64 or torch_dtype == torch.long: - return np.longlong - elif torch_dtype == torch.int32 or torch_dtype == torch.int: - return np.int32 - elif torch_dtype == torch.int16 or torch_dtype == torch.short: - return np.int16 - elif torch_dtype == torch.bool: - return bool - else: - raise Exception("Torch type to numpy type mapping unavailable for: " + str(torch_dtype)) - - -class model_loss_cls(torch.nn.Module): # noqa: N801 - def __init__(self, model, loss_fn): - super().__init__() - self.model_ = model - self.loss_fn_ = loss_fn - - def forward(self, *inputs): - # here we assume input can be unpacked into input and label - input, label = inputs[:-1], inputs[-1] - preds = self.model_(*input) - return self.loss_fn_(preds, label), preds - - -class WrapModel(torch.nn.Module): - def __init__(self, model, loss_fn, input_names): - super().__init__() - self.model_ = model - self.loss_fn_ = loss_fn - self.input_names_ = input_names - - def forward(self, *inputs): - import inspect - - # *inputs is given by torch trace. It is in the order of input_names. - # model_ takes input in a order (which can be obtained via inspect.signature(model.forward)) different than input_names. - sig = inspect.signature(self.model_.forward) - list(sig.parameters.keys()) - - input_dict = {} - for key in sig.parameters: - if key in self.input_names_: - input_dict[key] = inputs[self.input_names_.index(key)] - - model_out = self.model_(**input_dict) - if self.loss_fn_ is None: - return model_out - - label = inputs[-1] - preds = model_out - return self.loss_fn_(preds, label), preds - - -def wrap_for_input_match(model, loss_fn, input_names): - import inspect - - sig = inspect.signature(model.forward) - ordered_list_keys = list(sig.parameters.keys()) - if loss_fn: - sig_loss = inspect.signature(loss_fn) - if len(sig_loss.parameters) != 2: - raise RuntimeError("loss function should take two arguments - predict and label.") - - # label shall be the second input to loss_fn. - ordered_list_keys = [*ordered_list_keys, list(sig_loss.parameters.keys())[1]] - - # name match is needed only when input_names are a subset - # of expected inputs (inputs to model and loss_fn combined). - if len(input_names) > len(ordered_list_keys): - # this is likely the case where input arguments are packed. - # TODO: to unpack the input argument. - return model_loss_cls(model, loss_fn) if loss_fn else model - elif len(input_names) == len(ordered_list_keys): - # in this case, we do not require name match. - return model_loss_cls(model, loss_fn) if loss_fn else model - - if not all(x in ordered_list_keys for x in input_names): - # model desc has name(s) not matching the model signature. We cannot do anything in this case. - # better to warning the user. - return model_loss_cls(model, loss_fn) if loss_fn else model - - # if input_names match ordered_list_keys, there is not need for wrapping - match = True - for i, input_name in enumerate(input_names): - if input_name != ordered_list_keys[i]: - match = False - break - - if match: - return model_loss_cls(model, loss_fn) if loss_fn else model - - model = WrapModel(model, loss_fn, input_names) - - return model - - -def convert_model_loss_fn_to_onnx(model, loss_fn, model_desc, device, inputs, opset_version=DEFAULT_OPSET_VERSION): - # example: {input0:{0:'batch'}, input1:{0:'batch'}} - dynamic_axes = {} - for input in model_desc.inputs_: - symbolic_axis = {} - for i, axis in enumerate(input.shape_): - if isinstance(axis, str): - symbolic_axis[i] = axis - if len(symbolic_axis): - dynamic_axes[input.name_] = symbolic_axis - - for output in model_desc.outputs_: - symbolic_axis = {} - for i, axis in enumerate(output.shape_): - if isinstance(axis, str): - symbolic_axis[i] = axis - if len(symbolic_axis): - dynamic_axes[output.name_] = symbolic_axis - - input_names = [input.name_ for input in model_desc.inputs_] - output_names = [output.name_ for output in model_desc.outputs_] - - if isinstance(inputs, torch.Tensor): - inputs = [inputs] - if isinstance(inputs, dict): - sample_inputs = [inputs[k.name_].to(device=device) for k in model_desc.inputs_] - elif isinstance(inputs, (list, tuple)): - sample_inputs = [input.to(device=device) for i, input in enumerate(inputs) if i < len(model_desc.inputs_)] - else: - raise RuntimeError("Unexpected input type. Only torch.Tensor, or dict/list/tuple of torch.Tensor is supported.") - - # pytorch onnx exporter/trace does not try to match argument names. - # e.g. for models with optional inputs, it requires all inputs be present. - # this is a problem because the model graph depends on inputs provided. - model = wrap_for_input_match(model, loss_fn, input_names) - - model.eval() - with torch.no_grad(): - import copy - - # Deepcopy inputs, since input values may change after model run. - sample_inputs_copy = copy.deepcopy(sample_inputs) - try: - # Deepcopy model, in case model is stateful and changes after model run. - model_copy = copy.deepcopy(model) - except Exception: - model_copy = model - warnings.warn( - "This model cannot be deep copied (or pickled), which is a required step for stateful models to be properly exported to ONNX." - " Compute will continue, but unexpected results may occur!" - ) - - sample_outputs = model_copy(*sample_inputs_copy) - if isinstance(sample_outputs, torch.Tensor): - sample_outputs = [sample_outputs] - for sample_output, output_desc in zip(sample_outputs, model_desc.outputs_): - output_desc.dtype_ = sample_output.dtype - model.train() - - f = io.BytesIO() - - # Other export options to use(this is for backward compatibility). - other_export_options = {} - other_export_options["training"] = True - - # This option was added after 1.4 release. - if LooseVersion(torch.__version__) > LooseVersion("1.4.0") and LooseVersion(torch.__version__) < LooseVersion( - "1.10.0" - ): - other_export_options["enable_onnx_checker"] = False - # This option was added after 1.6 release. - if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): - other_export_options["training"] = torch.onnx.TrainingMode.TRAINING - - # Deepcopy inputs, since input values may change after model run. - import copy - - sample_inputs_copy = copy.deepcopy(sample_inputs) - - # Enable contrib ops export from PyTorch - from onnxruntime.tools import pytorch_export_contrib_ops - - pytorch_export_contrib_ops.register() - - torch.onnx._export( - model, - tuple(sample_inputs_copy), - f, - input_names=input_names, - output_names=output_names, - opset_version=opset_version, - dynamic_axes=dynamic_axes, - do_constant_folding=False, - **other_export_options, - ) - - onnx_model = onnx.load_model_from_string(f.getvalue()) - - # Remove 'model_.' prefix introduced by model wrapper for initializers. - if isinstance(model, (WrapModel, model_loss_cls)): - replace_name_dict = {} - for n in onnx_model.graph.initializer: - if n.name.startswith("model_."): - replace_name_dict[n.name] = n.name[len("model_.") :] - n.name = replace_name_dict[n.name] - for n in onnx_model.graph.node: - for i, name in enumerate(n.input): - if name in replace_name_dict: - n.input[i] = replace_name_dict[name] - - return onnx_model - - -def create_ort_training_session_with_optimizer( - model, - device, - training_optimizer_name, - lr_params_feed_name, - map_optimizer_attributes, - world_rank=-1, - world_size=1, - gradient_accumulation_steps=1, - bind_parameters=False, - use_mixed_precision=False, - allreduce_post_accumulation=False, - deepspeed_zero_stage=0, - enable_grad_norm_clip=True, - frozen_weights=[], # noqa: B006 - opset_version=DEFAULT_OPSET_VERSION, - use_deterministic_compute=False, - use_memory_efficient_gradient=False, - enable_adasum=False, - optimized_model_filepath="", -): - output_name = model.graph.output[0].name - ort_parameters = ort.TrainingParameters() - ort_parameters.loss_output_name = output_name - ort_parameters.use_mixed_precision = use_mixed_precision - ort_parameters.world_rank = world_rank - ort_parameters.world_size = world_size - ort_parameters.gradient_accumulation_steps = gradient_accumulation_steps - ort_parameters.allreduce_post_accumulation = allreduce_post_accumulation - ort_parameters.deepspeed_zero_stage = deepspeed_zero_stage - ort_parameters.enable_grad_norm_clip = enable_grad_norm_clip - ort_parameters.set_gradients_as_graph_outputs = False - ort_parameters.use_memory_efficient_gradient = use_memory_efficient_gradient - ort_parameters.enable_adasum = enable_adasum - output_types = {} - for output in model.graph.output: - output_types[output.name] = output.type.tensor_type - - # pybind does not allow to add directly to ort_parameters.weights_to_train. - # Have to work around by using a temporary weights_to_train. - torch_params = {} - optimizer_attributes_map = {} - optimizer_int_attributes_map = {} - - unused_frozen_weights = [n for n in frozen_weights if n not in [i.name for i in model.graph.initializer]] - if unused_frozen_weights: - raise RuntimeError(f"{unused_frozen_weights} in frozen_weights not found in model weights.") - - weights_to_train = set() - for initializer in model.graph.initializer: - if initializer.name in frozen_weights: - continue - weights_to_train.add(initializer.name) - if map_optimizer_attributes is not None: - attributes = map_optimizer_attributes(initializer.name) - optimizer_attributes_map[initializer.name] = {} - optimizer_int_attributes_map[initializer.name] = {} - for k, v in attributes.items(): - if isinstance(v, float): - optimizer_attributes_map[initializer.name][k] = v - elif isinstance(v, int): - optimizer_int_attributes_map[initializer.name][k] = v - else: - raise ValueError("Optimizer attributes must be either float or int.") - else: - optimizer_attributes_map[initializer.name] = {} - optimizer_int_attributes_map[initializer.name] = {} - - if bind_parameters: - for initializer in model.graph.initializer: - torch_tensor = torch.nn.Parameter(torch.as_tensor(numpy_helper.to_array(initializer), device=device)) - delete_input_with_name(model.graph.input, initializer.name) - model.graph.input.extend( - [helper.make_tensor_value_info(initializer.name, initializer.data_type, initializer.dims)] - ) - torch_params[initializer.name] = torch_tensor - - del model.graph.initializer[:] - - ort_parameters.weights_to_train = weights_to_train - ort_parameters.training_optimizer_name = training_optimizer_name - ort_parameters.lr_params_feed_name = lr_params_feed_name - ort_parameters.optimizer_attributes_map = optimizer_attributes_map - ort_parameters.optimizer_int_attributes_map = optimizer_int_attributes_map - - sessionOptions = ort.SessionOptions() # noqa: N806 - sessionOptions.use_deterministic_compute = use_deterministic_compute - if len(optimized_model_filepath) > 0: - sessionOptions.optimized_model_filepath = optimized_model_filepath - session = ort.TrainingSession(model.SerializeToString(), ort_parameters, sessionOptions) - train_io_binding = session.io_binding() - eval_io_binding = session.io_binding() - - if bind_parameters: - for param in torch_params: - torch_tensor = torch_params[param] - - train_io_binding.bind_input( - param, - torch_tensor.device.type, - get_device_index(torch_tensor.device), - dtype_torch_to_numpy(torch_params[param].dtype), - list(torch_tensor.size()), - torch_tensor.data_ptr(), - ) - eval_io_binding.bind_input( - param, - torch_tensor.device.type, - get_device_index(torch_tensor.device), - dtype_torch_to_numpy(torch_params[param].dtype), - list(torch_tensor.size()), - torch_tensor.data_ptr(), - ) - - return session, train_io_binding, eval_io_binding, output_name, torch_params, output_types - - -def save_checkpoint( - model, checkpoint_dir, checkpoint_prefix="ORT_checkpoint", checkpoint_state_dict=None, include_optimizer_state=True -): - if checkpoint_state_dict is None: - checkpoint_state_dict = {"model": model.state_dict(include_optimizer_state)} - else: - checkpoint_state_dict.update({"model": model.state_dict(include_optimizer_state)}) - - assert os.path.exists(checkpoint_dir), f"ERROR: Checkpoint directory doesn't exist: {checkpoint_dir}" - - checkpoint_name = get_checkpoint_name( - checkpoint_prefix, model.deepspeed_zero_stage_, model.world_rank, model.world_size - ) - checkpoint_file = os.path.join(checkpoint_dir, checkpoint_name) - - if os.path.exists(checkpoint_file): - warnings.warn(f"{checkpoint_file} already exists, overwriting.") - - torch.save(checkpoint_state_dict, checkpoint_file) - - -def _load_single_checkpoint(model, checkpoint_dir, checkpoint_prefix, is_partitioned, strict): - checkpoint_name = get_checkpoint_name(checkpoint_prefix, is_partitioned, model.world_rank, model.world_size) - checkpoint_file = os.path.join(checkpoint_dir, checkpoint_name) - - if is_partitioned: - assert_msg = ( - f"Couldn't find checkpoint file {checkpoint_file}." - "Optimizer partitioning is enabled using ZeRO. Please make sure that the " - f"checkpoint file exists for rank {model.world_rank} of {model.world_size}." - ) - else: - assert_msg = f"Couldn't find checkpoint file {checkpoint_file}." - - assert os.path.exists(checkpoint_file), assert_msg - - checkpoint_state = torch.load(checkpoint_file, map_location="cpu") - - model.load_state_dict(checkpoint_state["model"], strict=strict) - del checkpoint_state["model"] - return checkpoint_state - - -def _load_multi_checkpoint(model, checkpoint_dir, checkpoint_prefix, strict): - checkpoint_files = list_checkpoint_files(checkpoint_dir, checkpoint_prefix) - - ckpt_agg = CombineZeroCheckpoint(checkpoint_files) - aggregate_state_dict = ckpt_agg.aggregate_checkpoints() - - model.load_state_dict(aggregate_state_dict, strict=strict) - - # aggregate other keys in the state_dict. - # Values will be overwritten for matching keys among workers - all_checkpoint_states = {} - for checkpoint_file in checkpoint_files: - checkpoint_state = torch.load(checkpoint_file, map_location="cpu") - del checkpoint_state["model"] - all_checkpoint_states.update(checkpoint_state) - return all_checkpoint_states - - -def load_checkpoint(model, checkpoint_dir, checkpoint_prefix="ORT_checkpoint", strict=False): - checkpoint_files = list_checkpoint_files(checkpoint_dir, checkpoint_prefix) - is_partitioned = False - if len(checkpoint_files) > 1: - warnings.warn( - f"Found more than one file with prefix {checkpoint_prefix} in directory {checkpoint_dir}." - "Attempting to load ZeRO checkpoint." - ) - is_partitioned = True - if (not model.deepspeed_zero_stage_) and is_partitioned: - return _load_multi_checkpoint(model, checkpoint_dir, checkpoint_prefix, strict) - else: - return _load_single_checkpoint(model, checkpoint_dir, checkpoint_prefix, is_partitioned, strict) - - -class ORTTrainer: - def __init__( - self, - model, - loss_fn, - model_desc, - training_optimizer_name, - map_optimizer_attributes, - learning_rate_description, - device, - gradient_accumulation_steps=1, - world_rank=0, - world_size=1, - use_mixed_precision=False, - allreduce_post_accumulation=False, - global_step=0, - get_lr_this_step=None, - loss_scaler=None, - deepspeed_zero_stage=0, - enable_grad_norm_clip=True, - frozen_weights=[], # noqa: B006 - _opset_version=DEFAULT_OPSET_VERSION, - _enable_internal_postprocess=True, - _extra_postprocess=None, - _use_deterministic_compute=False, - use_memory_efficient_gradient=False, - run_symbolic_shape_infer=False, - enable_adasum=False, - optimized_model_filepath="", - ): - super().__init__() - """ - Initialize ORTTrainer. - - Args: - - model: one of - - a PyTorch model (class that inherits from torch.nn.Module) - - a combined PyTorch model and loss function. - Inputs to this combined PyTorch model are a concatenation of the - model's input and the loss function's label input. - Outputs are a concatenation of the loss function's output and the - model's output. - - a combined ONNX model and loss function. - loss_fn: one of - - a PyTorch loss function if 'model' is a PyTorch model. A loss - function takes two inputs (prediction, label) and outputs a loss - tensor. - - None if model is already combined with a loss function. - model_desc: Specify input/output shapes, types, and names. - Must be consistent with the training model. - training_optimizer_name: one of - - 'SGDOptimizer' - - 'AdamOptimizer' - - 'LambOptimizer' - map_optimizer_attributes: for optimizers with weight-dependent - parameters. A callable that maps weight name to a set of optimization - parameters. - Defaults to None. - learning_rate_description: the name, shape and type of the learning - rate in form of IODescription(Learning_Rate_Name, [1,], torch.float32). - Because learning_rate is an input to the training model, - Learning_Rate_Name must be specified so that there is no name conflict - within the model. - device: device to store tensors (e.g. 'cpu', 'cuda', 'cuda:'). - gradient_accumulation_steps: number of training steps to accumulate - gradients before averaging and applying them. - Defaults to 1. - world_rank: rank id used for distributed training. - Defaults to 0. - world_size: number of ranks participating in distributed training. - Defaults to 1. - use_mixed_precision: flag to enable mixed precision (aka fp16). - Defaults to False. - allreduce_post_accumulation: controls whether overlaping gradient - computation is applied with allreduce. - Defaults to False. - global_step: training step that is used as input to 'get_lr_this_step'. - Defaults to 0. - get_lr_this_step: functor used as learning rate scheduler. - It uses 'global_step' as input. - Defaults to None. - loss_scaler: updates loss scale automatically when 'use_mixed_precision' - is specified. - Defaults to None. - deepspeed_zero_stage: controls whether to partition state using the DeepSpeed ZeRO technique. Stages 0 and 1 are supported. - Defaults to 0 (disabled). - enable_grad_norm_clip: enables gradient norm clipping. - Defaults to True. - frozen_weights: list of model parameters to be frozen (not trained). - Defaults to []. - _enable_internal_postprocess: whether to run or not the internal postprocesses. - Defaults to True - _extra_postprocess: a callable to postprocess the ONNX model that is converted from PyTorch. - Defaults to None - use_memory_efficient_gradient: use memory aware gradient builder. - Defaults to False - run_symbolic_shape_infer: run symbolic shape inference - Defaults to False - optimized_model_filepath: path to output the optimized training graph. - Defaults to "" (no output). - """ - warnings.warn( - "ORTTrainer is deprecated and will be removed in ort release 1.14. Please use ORTModule instead.", - FutureWarning, - ) - warnings.warn( - "DISCLAIMER: This is an early version of an experimental training API and it is subject to change. DO NOT create production applications with it" - ) - self.is_train = True - - self.torch_model_ = None - self.onnx_model_ = None - self._enable_internal_postprocess = _enable_internal_postprocess - self._extra_postprocess = _extra_postprocess - - if isinstance(model, torch.nn.Module): - self.torch_model_ = model - self.loss_fn_ = loss_fn - self._torch_state_dict_keys = list(model.state_dict().keys()) - else: - self._torch_state_dict_keys = [] - self.onnx_model_ = model - if loss_fn is not None: - warnings.warn("loss_fn is not used when creating ORTTrainer because an ONNX model is provided.") - # TODO: accept loss_fn as an onnx model. build self.onnx_model_ with model and loss_fn - self.loss_fn_ = None - - if self._enable_internal_postprocess: - postprocess.run_postprocess(self.onnx_model_) - - if self._extra_postprocess: - self._extra_postprocess(self.onnx_model_) - - self.model_desc_ = model_desc - self.input_desc_with_lr = [*self.model_desc_.inputs_, learning_rate_description] - - self.world_rank = world_rank - self.world_size = world_size - self.use_mixed_precision = use_mixed_precision - - self.session = None - self.device_ = device - self.gradient_accumulation_steps = gradient_accumulation_steps - # we use self.current_step to count calls to train_step. It is used for gradient accumulation. - # gradients are being accumulated when self.current_step is not divisible by gradient_accumulation_steps. - # gradients are updated when self.current_step is divisible by gradient_accumulation_steps. - self.current_step = 0 - - # we use self.global_step_ to count optimizations being performed. - # it is used to calculate learning rate if self.get_lr_this_step_ is provided. - self.global_step_ = global_step - self.get_lr_this_step_ = get_lr_this_step - self.loss_scaler_ = loss_scaler - - if self.get_lr_this_step_ is not None or self.loss_scaler_ is not None: - warnings.warn("It is experimental to use learning rate scheduler and loss scaler inside ORTTrainer.") - self.training_optimizer_name_ = training_optimizer_name - self.learning_rate_description_ = learning_rate_description - self.map_optimizer_attributes_ = map_optimizer_attributes - self.allreduce_post_accumulation_ = allreduce_post_accumulation - self.deepspeed_zero_stage_ = deepspeed_zero_stage - self.enable_grad_norm_clip_ = enable_grad_norm_clip - self.frozen_weights_ = frozen_weights - self.opset_version_ = _opset_version - self.state_dict_ = None - self._use_deterministic_compute = _use_deterministic_compute - self.use_memory_efficient_gradient = use_memory_efficient_gradient - self.run_symbolic_shape_infer = run_symbolic_shape_infer - self.enable_adasum = enable_adasum - self.optimized_model_filepath = optimized_model_filepath - - # use this special string to workaround a corner case that external loss_scale is passed into train_step as kwargs. - # see prepare_input_and_fetches for more details. - self.loss_scale_input_name = "default_loss_scale_input_name" - - self._init_session() - - def _init_session(self): - if self.onnx_model_ is None: - return - - self._verify_fully_optimized_model(self.onnx_model_) - - if self.run_symbolic_shape_infer: - self.onnx_model_ = SymbolicShapeInference.infer_shapes( - self.onnx_model_, auto_merge=True, guess_output_rank=True - ) - - # old ort session may already exists and occupies GPU memory when creating new session, this may cause OOM error. - # for example, load_state_dict will be called before returing the function, and it calls _init_session again - del self.session - ( - self.session, - self.train_io_binding, - self.eval_io_binding, - self.output_name, - _, - self.output_types, - ) = create_ort_training_session_with_optimizer( - self.onnx_model_, - self.device_, - self.training_optimizer_name_, - self.learning_rate_description_.name_, - self.map_optimizer_attributes_, - self.world_rank, - self.world_size, - self.gradient_accumulation_steps, - bind_parameters=False, - use_mixed_precision=self.use_mixed_precision, - allreduce_post_accumulation=self.allreduce_post_accumulation_, - deepspeed_zero_stage=self.deepspeed_zero_stage_, - enable_grad_norm_clip=self.enable_grad_norm_clip_, - frozen_weights=self.frozen_weights_, - opset_version=self.opset_version_, - use_deterministic_compute=self._use_deterministic_compute, - use_memory_efficient_gradient=self.use_memory_efficient_gradient, - enable_adasum=self.enable_adasum, - optimized_model_filepath=self.optimized_model_filepath, - ) - - self.loss_scale_input_name = self.session.loss_scale_input_name - - if self.use_mixed_precision: - self.input_desc_with_lr_and_loss_scale = [ - *self.input_desc_with_lr, - IODescription(self.loss_scale_input_name, [], torch.float32), - ] - - # ORT backend has modified model output dtype from float32 to float16. - for o_desc in self.model_desc_.outputs_: - if ( - self.use_mixed_precision - and o_desc.dtype_ == torch.float32 - and not self.session.is_output_fp32_node(o_desc.name_) - ): - o_desc.eval_dtype_ = torch.float16 - else: - o_desc.eval_dtype_ = o_desc.dtype_ - - # gradient accumulation buffers are connected to a single node with a boolean, dimension 1 tensor output. - # add a matching output to drive gradient accumulation. - if self.gradient_accumulation_steps > 1: - self.output_desc_with_group_accumulated_gradients = [ - *self.model_desc_.outputs_, - IODescription(get_group_accumulated_gradients_output_node_arg_name(self.session), [1], torch.bool), - ] - - if self.use_mixed_precision: - # when ready to use accumulated gradient with mixed precision, we need to fetch all_infinite to determine - # if the gradient is usable. - self.output_desc_with_all_fp_16_or_fp32_gradients_finite = [ - *self.model_desc_.outputs_, - IODescription(get_all_gradients_finite_arg_name(self.session), [1], torch.bool), - ] - - if self.state_dict_: - self.load_state_dict(self.state_dict_, self.strict_) - self.state_dict_ = None - - def _init_onnx_model(self, inputs): - if self.onnx_model_ is not None: - return - - if self.torch_model_ is not None: - # NOTE: pt model is moved to cpu to conserve gpu memory. - self.torch_model_.cpu() - # torch buffers created using 'register_buffer' are not meant to be trainable. - torch_buffers = list(dict(self.torch_model_.named_buffers()).keys()) - self.frozen_weights_ = self.frozen_weights_ + torch_buffers - self.onnx_model_ = convert_model_loss_fn_to_onnx( - self.torch_model_, - self.loss_fn_, - self.model_desc_, - torch.device("cpu"), - inputs, - opset_version=self.opset_version_, - ) - - if self._enable_internal_postprocess: - postprocess.run_postprocess(self.onnx_model_) - - if self._extra_postprocess: - self._extra_postprocess(self.onnx_model_) - - self._init_session() - - def train(self): - self.is_train = True - - def eval(self): - self.is_train = False - - def _update_onnx_model_initializers(self, state_tensors): - # replace the initializers with new value - new_weights = [] - replace_indices = [] - for i, w in enumerate(self.onnx_model_.graph.initializer): - if w.name in state_tensors: - new_weights.append(numpy_helper.from_array(state_tensors[w.name], w.name)) - replace_indices.append(i) - replace_indices.sort(reverse=True) - for w_i in replace_indices: - del self.onnx_model_.graph.initializer[w_i] - self.onnx_model_.graph.initializer.extend(new_weights) - - def state_dict(self, include_optimizer_state=True): - if not self.session: - warnings.warn( - "ONNXRuntime training session is not initialized yet. " - "Please run train_step or eval_step at least once before calling state_dict()." - ) - return {} - - # extract trained weights - session_state = self.session.get_state() - torch_state = {} - for name in session_state: - torch_state[name] = torch.from_numpy(session_state[name]) - - # extract untrained weights and buffer - for n in self.onnx_model_.graph.initializer: - if n.name not in torch_state: - torch_state[n.name] = torch.from_numpy(numpy_helper.to_array(n)) - - # Need to remove redundant initializers and name suffices to map back to original torch state names - if not include_optimizer_state and self._torch_state_dict_keys: - return {key: torch_state[key] for key in self._torch_state_dict_keys if key in torch_state} - return torch_state - - def load_state_dict(self, state_dict, strict=False): - # Note: It may happen ONNX model has not yet been initialized - # In this case we cache a reference to desired state and delay the restore until after initialization - # Unexpected behavior will result if the user changes the reference before initialization - if not self.session: - self.state_dict_ = state_dict - self.strict_ = strict - return - - # update onnx model from loaded state dict - cur_initializers_names = [n.name for n in self.onnx_model_.graph.initializer] - new_initializers = {} - - for name in state_dict: - if name in cur_initializers_names: - new_initializers[name] = state_dict[name].numpy() - elif strict: - raise RuntimeError(f"Checkpoint tensor: {name} is not present in the model.") - - self._update_onnx_model_initializers(new_initializers) - - # create new session based on updated onnx model - self.state_dict_ = None - self._init_session() - - # load training state - session_state = {name: state_dict[name].numpy() for name in state_dict} - self.session.load_state(session_state, strict) - - def save_as_onnx(self, path): - if not self.session: - warnings.warn( - "ONNXRuntime training session is not initialized yet. " - "Please run train_step or eval_step at least once before calling save_as_onnx()." - ) - return - state_tensors = self.session.get_state() - self._update_onnx_model_initializers(state_tensors) - - with open(path, "wb") as f: - f.write(self.onnx_model_.SerializeToString()) - - def _prepare_input_and_fetches( - self, input_desc_with_, internal_learning_rate, internal_loss_scale, *args, **kwargs - ): - fetches = None - if type(args) == tuple and len(args) == 1 and type(args[0]) == list: # noqa: E721 - input = tuple(args[0]) - else: - input = args - - for input_desc in input_desc_with_: - if input_desc.name_ in kwargs: - input = (*input, kwargs[input_desc.name_]) - if internal_learning_rate is not None: - input = (*input, internal_learning_rate) - if internal_loss_scale is not None: - input = (*input, internal_loss_scale) - elif self.use_mixed_precision: - # loss_scale input name is needed to call train_step, for example: - # kwargs[model.loss_scale_input_name] = loss_scale - # outputs = model.train_step(*args, **kwargs) - # However, when first time train_step is called model.loss_scale_input_name is not set. - # To workaround this problem, we use the special name 'default_loss_scale_input_name' to indicate - # the loss_scale. - if "default_loss_scale_input_name" in kwargs: - input = (*input, kwargs["default_loss_scale_input_name"]) - - fetches = None - if "fetches" in kwargs: - fetches = kwargs["fetches"] - - return input, fetches - - def train_step(self, *args, **kwargs): - """ - inputs: model inputs, labels, learning rate, and, if in mixed_precision mode, loss_scale. - outputs: if fetches is not provided, outputs are loss and - (if in mixed mode and is finishing gradient accumulation) all_finite. - if fetches is provided, outputs contains these requested with fetches. - fetches: names of requested outputs - """ - - # inputs to the ONNX model includes inputs to the original PyTorch model - # plus learning rate and loss_scale if self.use_mixed_precision is True. - # 1. when there are internal learning_rate and loss_scale (in fp16 cases) generators, - # *args and **kwargs together contain ONLY and COMPLETE inputs to the PyTorch model. - # In this case, changes to the training script is minimized. - # 2. without internal learning rate and loss scale (in fp16 cases) generators, - # *args and **kwargs passed in from the training script shall contains - # inputs to the PyTorch model plus learning_rate and loss_scale. - # it optionally contains the fetches. - # localized arguments (*args) contains inputs to the ONNX model. - # named arguments can contain both inputs, learning_rate and loss_scale, and the fetches - - learning_rate, loss_scale = None, None - if self.get_lr_this_step_ is not None: - # $args, **kwargs contains inputs to the pytorch model - lr_this_step = self.get_lr_this_step_(self.global_step_) - learning_rate = torch.tensor([lr_this_step]) - if self.loss_scaler_ is not None and self.use_mixed_precision: - loss_scale = torch.tensor([self.loss_scaler_.loss_scale_]) - - if self.onnx_model_ is None: - sample_input, _ = self._prepare_input_and_fetches(self.model_desc_.inputs_, None, None, *args, **kwargs) - self._init_onnx_model(sample_input) - - if self.use_mixed_precision: - input, fetches = self._prepare_input_and_fetches( - self.input_desc_with_lr_and_loss_scale, learning_rate, loss_scale, *args, **kwargs - ) - assert len(self.input_desc_with_lr_and_loss_scale) == len(input) - input_descs = self.input_desc_with_lr_and_loss_scale - else: - input, fetches = self._prepare_input_and_fetches( - self.input_desc_with_lr, learning_rate, loss_scale, *args, **kwargs - ) - assert len(self.input_desc_with_lr) == len(input) - input_descs = self.input_desc_with_lr - - self.current_step += 1 - - # handle gradient accumulation in fully optimized mode - run_options = None - has_if_all_finite = False - if fetches: - output_desc = [output for fetch in fetches for output in self.model_desc_.outputs_ if output.name_ == fetch] - elif self.current_step % self.gradient_accumulation_steps != 0: - run_options = ort.RunOptions() - run_options.only_execute_path_to_fetches = True - output_desc = self.output_desc_with_group_accumulated_gradients - elif self.use_mixed_precision: - has_if_all_finite = True - output_desc = self.output_desc_with_all_fp_16_or_fp32_gradients_finite - else: - output_desc = self.model_desc_.outputs_ - - if not isinstance(input, (list, tuple)): - input = (input,) - - session_run_results = ort_training_session_run_helper( - self.session, self.train_io_binding, input, input_descs, output_desc, self.device_, run_options - ) - - if has_if_all_finite: - # After session run with all_fp32_gradients_finite, we need to clear the iobinding's output state. - # Otherwise next run with only_execute_path_to_fetches will lead to gradient all reduce - # because all_fp32_gradients_finite is still in the feed. - self.train_io_binding.clear_binding_outputs() - all_finite = session_run_results[self.output_desc_with_all_fp_16_or_fp32_gradients_finite[-1].name_] - if self.loss_scaler_ is not None: - self.loss_scaler_.update_loss_scale(all_finite) - if all_finite: - # optimization has done, increase self.global_step_ - self.global_step_ = self.global_step_ + 1 - elif self.current_step % self.gradient_accumulation_steps == 0: - # optimization has done, increase self.global_step_ - self.global_step_ = self.global_step_ + 1 - - if fetches is not None: - results = [session_run_results[fetch] for fetch in fetches] - elif has_if_all_finite and self.loss_scaler_ is None: - # return descripted outputs plus the all_finite flag so that the training script can handle loss scaling. - results = [ - session_run_results[output_desc.name_] - for output_desc in self.output_desc_with_all_fp_16_or_fp32_gradients_finite - ] - else: - results = [session_run_results[output_desc.name_] for output_desc in self.model_desc_.outputs_] - return results[0] if len(results) == 1 else results - - def __call__(self, *args, **kwargs): - if self.is_train: - return self.train_step(*args, **kwargs) - else: - return self.eval_step(*args, **kwargs) - - def eval_step(self, *args, **kwargs): - """ - inputs: model inputs and/or labels. - outputs: if 'fetches' is not provided, outputs are loss and - (if in mixed mode and is finishing gradient accumulation) all_finite. - if fetches is provided, outputs contains these requested with fetches. - fetches: names of requested outputs - """ - - # with model_loss_cls, the last input is label, first output is loss - input, fetches = self._prepare_input_and_fetches(self.model_desc_.inputs_, None, None, *args, **kwargs) - - if self.onnx_model_ is None: - if self.torch_model_ is not None: - self._init_onnx_model(input) - else: - raise RuntimeError( - "Model is unintialized. Please ensure a valid ONNX model or PyTorch model is provided to this Trainer." - ) - - input_desc = self.model_desc_.inputs_[0 : len(input)] - if fetches is None: - output_desc = self.model_desc_.outputs_ - else: - output_desc = [output for fetch in fetches for output in self.model_desc_.outputs_ if output.name_ == fetch] - - if not isinstance(input, (list, tuple)): - input = (input,) - - run_options = ort.RunOptions() - run_options.only_execute_path_to_fetches = True - run_options.training_mode = False - - session_run_results = ort_training_session_run_helper( - self.session, self.eval_io_binding, input, input_desc, output_desc, self.device_, run_options - ) - - if len(session_run_results) == 1: - return session_run_results[next(iter(session_run_results.keys()))] - else: - return [session_run_results[output_desc.name_] for output_desc in output_desc] - - def _verify_fully_optimized_model(self, model): - assert len(model.graph.output) > 0 - # model's first output must be the loss tensor - if model.graph.output[0].type.tensor_type.elem_type not in { - onnx.TensorProto.FLOAT, - onnx.TensorProto.FLOAT16, - onnx.TensorProto.DOUBLE, - onnx.TensorProto.COMPLEX64, - onnx.TensorProto.COMPLEX128, - onnx.TensorProto.BFLOAT16, - onnx.TensorProto.FLOAT8E4M3FN, - onnx.TensorProto.FLOAT8E4M3FNUZ, - onnx.TensorProto.FLOAT8E5M2, - onnx.TensorProto.FLOAT8E5M2FNUZ, - }: - raise RuntimeError( - "the first output of a model to run with fully optimized ORT backend must be float types." - ) - if len(model.graph.output[0].type.tensor_type.shape.dim) != 0: - raise RuntimeError( - "the first output of a model to run with fully optimized ORT backend assumed to be loss and must be a scalar." - ) - - -class LossScaler: - def __init__( - self, - loss_scale_input_name, - is_dynamic_scale, - loss_scale=float(1 << 16), - up_scale_window=2000, - min_loss_scale=1.0, - max_loss_scale=float(1 << 24), - ): - super().__init__() - self.loss_scale_input_name_ = loss_scale_input_name - self.is_dynamic_scale_ = is_dynamic_scale - self.initial_loss_scale_ = loss_scale - self.up_scale_window_ = up_scale_window - self.min_loss_scale_ = min_loss_scale - self.max_loss_scale_ = max_loss_scale - self.loss_scale_ = loss_scale - self.stable_steps_ = 0 - - def update_loss_scale(self, is_all_finite): - if not self.is_dynamic_scale_: - return - - if is_all_finite: - self.stable_steps_ += 1 - - if self.stable_steps_ >= self.up_scale_window_: - self.loss_scale_ = min(self.max_loss_scale_, self.loss_scale_ * 2) - self.stable_steps_ = 0 - else: - self.loss_scale_ = max(self.min_loss_scale_, self.loss_scale_ / 2) - self.stable_steps_ = 0 - - def reset(self): - self.loss_scale_ = self.initial_loss_scale_ - self.stable_steps_ = 0 diff --git a/orttraining/orttraining/python/orttraining_pybind_state.cc b/orttraining/orttraining/python/orttraining_pybind_state.cc index a08e8bee99cee..bb1cb4bbd32f7 100644 --- a/orttraining/orttraining/python/orttraining_pybind_state.cc +++ b/orttraining/orttraining/python/orttraining_pybind_state.cc @@ -18,7 +18,6 @@ #include "core/session/environment.h" #include "core/session/custom_ops.h" #include "core/dlpack/dlpack_converter.h" -#include "orttraining/core/session/training_session.h" #include "orttraining/core/agent/training_agent.h" #include "orttraining/core/graph/gradient_config.h" #include "orttraining/core/graph/optimizer_config.h" @@ -113,14 +112,11 @@ struct TrainingParameters { std::unordered_set weights_to_train; std::unordered_set weights_not_to_train; - onnxruntime::training::TrainingSession::ImmutableWeights immutable_weights; - // optimizer std::string training_optimizer_name; std::string lr_params_feed_name = "Learning_Rate"; std::unordered_map> optimizer_attributes_map; std::unordered_map> optimizer_int_attributes_map; - onnxruntime::training::TrainingSession::OptimizerState optimizer_initial_state; std::unordered_map> sliced_schema; std::unordered_map sliced_axes; std::vector sliced_tensor_names; @@ -206,185 +202,6 @@ struct PyGradientGraphBuilderContext { local_registries_(local_registries) {} }; -// TODO: this method does not handle parallel optimization. -TrainingConfigurationResult ConfigureSessionForTraining( - training::PipelineTrainingSession* sess, TrainingParameters& parameters) { - // TODO tix, refactor the mpi related code to populate all fields correctly by default. - ORT_ENFORCE(parameters.data_parallel_size <= parameters.world_size, "data_parallel_size: ", parameters.data_parallel_size, ", world_size: ", parameters.world_size); - ORT_ENFORCE(parameters.horizontal_parallel_size <= parameters.world_size, "horizontal_parallel_size: ", parameters.horizontal_parallel_size, ", world_size: ", parameters.world_size); - ORT_ENFORCE(parameters.pipeline_parallel_size <= parameters.world_size, "pipeline_parallel_size: ", parameters.pipeline_parallel_size, ", world_size: ", parameters.world_size); - - // When DxHxP != the total number of ranks, we try adjusting D so that DxHxP == the total number of ranks. - if (parameters.world_size != parameters.data_parallel_size * parameters.horizontal_parallel_size * parameters.pipeline_parallel_size) { - ORT_ENFORCE(parameters.world_size % parameters.horizontal_parallel_size * parameters.pipeline_parallel_size == 0, - "D, H, P sizes are incorrect. To enable automatic correction, total number of ranks must be a divisible by HxP."); - - const auto new_data_parallel_size = parameters.world_size / (parameters.horizontal_parallel_size * parameters.pipeline_parallel_size); - parameters.data_parallel_size = new_data_parallel_size; - - const std::string msg = "Cannot distribute " + std::to_string(parameters.world_size) + " ranks for distributed computation with D=" + std::to_string(parameters.data_parallel_size) + - ", H=" + std::to_string(parameters.horizontal_parallel_size) + ", P=" + std::to_string(parameters.pipeline_parallel_size) + ", so D is automatically changed to " + std::to_string(new_data_parallel_size); - LOGS(*(sess->GetLogger()), WARNING) << msg; - } - - training::PipelineTrainingSession::TrainingConfiguration config{}; - config.weight_names_to_train = parameters.weights_to_train; - config.weight_names_to_not_train = parameters.weights_not_to_train; - config.immutable_weights = parameters.immutable_weights; - config.gradient_accumulation_steps = parameters.gradient_accumulation_steps; - - config.distributed_config.world_rank = parameters.world_rank; - config.distributed_config.world_size = parameters.world_size; - config.distributed_config.local_rank = parameters.local_rank; - config.distributed_config.local_size = parameters.local_size; - config.distributed_config.data_parallel_size = parameters.data_parallel_size; - config.distributed_config.horizontal_parallel_size = parameters.horizontal_parallel_size; - config.distributed_config.pipeline_parallel_size = parameters.pipeline_parallel_size; - config.distributed_config.num_pipeline_micro_batches = parameters.num_pipeline_micro_batches; - config.distributed_config.sliced_schema = parameters.sliced_schema; - config.distributed_config.sliced_axes = parameters.sliced_axes; - config.distributed_config.sliced_tensor_names = parameters.sliced_tensor_names; - - if (parameters.use_mixed_precision) { - training::PipelineTrainingSession::TrainingConfiguration::MixedPrecisionConfiguration mp{}; - mp.use_mixed_precision_initializers = true; - - config.mixed_precision_config = mp; - } - - if (config.distributed_config.pipeline_parallel_size > 1) { - training::PipelineTrainingSession::TrainingConfiguration::PipelineConfiguration pipeline_config; - - // Currently don't support auto-partition. User needs to pass in cut information for pipeline - pipeline_config.do_partition = true; - assert(!parameters.pipeline_cut_info_string.empty()); - - auto process_with_delimiter = [](std::string& input_str, const std::string& delimiter) { - std::vector result; - size_t pos = 0; - while ((pos = input_str.find(delimiter)) != std::string::npos) { - std::string token = input_str.substr(0, pos); - result.emplace_back(token); - input_str.erase(0, pos + delimiter.length()); - } - // push the last split of substring into result. - result.emplace_back(input_str); - return result; - }; - - auto process_cut_info = [&](std::string& cut_info_string) { - std::vector cut_list; - const std::string group_delimiter = ","; - const std::string edge_delimiter = ":"; - const std::string consumer_delimiter = "/"; - const std::string producer_consumer_delimiter = "-"; - - auto cut_info_groups = process_with_delimiter(cut_info_string, group_delimiter); - for (auto& cut_info_group : cut_info_groups) { - PipelineTrainingSession::TrainingConfiguration::CutInfo cut_info; - auto cut_edges = process_with_delimiter(cut_info_group, edge_delimiter); - for (auto& cut_edge : cut_edges) { - auto process_edge = process_with_delimiter(cut_edge, producer_consumer_delimiter); - if (process_edge.size() == 1) { - PipelineTrainingSession::TrainingConfiguration::CutEdge edge{process_edge[0]}; - cut_info.emplace_back(edge); - } else { - ORT_ENFORCE(process_edge.size() == 2); - auto consumer_list = process_with_delimiter(process_edge[1], consumer_delimiter); - - PipelineTrainingSession::TrainingConfiguration::CutEdge edge{process_edge[0], consumer_list}; - cut_info.emplace_back(edge); - } - } - cut_list.emplace_back(cut_info); - } - return cut_list; - }; - - pipeline_config.cut_list = process_cut_info(parameters.pipeline_cut_info_string); - config.pipeline_config = pipeline_config; - } - config.loss_name = parameters.loss_output_name; - - if (!parameters.training_optimizer_name.empty()) { - training::PipelineTrainingSession::TrainingConfiguration::OptimizerConfiguration opt{}; - opt.name = parameters.training_optimizer_name; - opt.learning_rate_input_name = parameters.lr_params_feed_name; - opt.weight_attributes_generator = [¶meters](const std::string& weight_name) { - const auto it = parameters.optimizer_attributes_map.find(weight_name); - ORT_ENFORCE( - it != parameters.optimizer_attributes_map.end(), - "Failed to find attribute map for weight ", weight_name); - return it->second; - }; - opt.weight_int_attributes_generator = [¶meters](const std::string& weight_name) { - const auto it = parameters.optimizer_int_attributes_map.find(weight_name); - ORT_ENFORCE( - it != parameters.optimizer_int_attributes_map.end(), - "Failed to find int attribute map for weight ", weight_name); - return it->second; - }; - opt.use_mixed_precision_moments = parameters.use_fp16_moments; - opt.do_all_reduce_in_mixed_precision_type = true; - // TODO: this mapping is temporary. - // For now, nccl allreduce kernel only implements for allreduce_post_accumulation - // hovorod allreduce kernel only implements for not allreduce_post_accumulation. - // eventually we will have one all reduce kernel and let opt to have - // an allreduce_post_accumulation option and remove the use_nccl option. - opt.use_nccl = parameters.allreduce_post_accumulation; - opt.deepspeed_zero = onnxruntime::training::ZeROConfig(parameters.deepspeed_zero_stage); - opt.enable_grad_norm_clip = parameters.enable_grad_norm_clip; - - // TODO reduction types - if (parameters.enable_adasum) { -#ifdef USE_CUDA - opt.adasum_reduction_type = training::AdasumReductionType::GpuHierarchicalReduction; -#else - opt.adasum_reduction_type = training::AdasumReductionType::CpuReduction; -#endif - } - - config.optimizer_config = opt; - } - - if (!parameters.optimizer_initial_state.empty()) { - config.init_optimizer_states = parameters.optimizer_initial_state; - } - - config.gradient_graph_config.use_memory_efficient_gradient = parameters.use_memory_efficient_gradient; - config.gradient_graph_config.set_gradients_as_graph_outputs = parameters.set_gradients_as_graph_outputs; - - config.graph_transformer_config.attn_dropout_recompute = parameters.attn_dropout_recompute; - config.graph_transformer_config.gelu_recompute = parameters.gelu_recompute; - config.graph_transformer_config.transformer_layer_recompute = parameters.transformer_layer_recompute; - config.graph_transformer_config.number_recompute_layers = parameters.number_recompute_layers; - config.graph_transformer_config.propagate_cast_ops_config.strategy = parameters.propagate_cast_ops_strategy; - config.graph_transformer_config.propagate_cast_ops_config.level = parameters.propagate_cast_ops_level; - config.graph_transformer_config.propagate_cast_ops_config.allow = parameters.propagate_cast_ops_allow; - - if (!parameters.model_after_graph_transforms_path.empty()) { - config.model_after_graph_transforms_path = ToPathString(parameters.model_after_graph_transforms_path); - } - if (!parameters.model_with_gradient_graph_path.empty()) { - config.model_with_gradient_graph_path = ToPathString(parameters.model_with_gradient_graph_path); - } - if (!parameters.model_with_training_graph_path.empty()) { - config.model_with_training_graph_path = ToPathString(parameters.model_with_training_graph_path); - } - - training::PipelineTrainingSession::TrainingConfigurationResult config_result{}; - - OrtPybindThrowIfError(sess->ConfigureForTraining(config, config_result)); - - TrainingConfigurationResult python_config_result{}; - if (config_result.mixed_precision_config_result.has_value()) { - const auto& mp_config_result = config_result.mixed_precision_config_result.value(); - python_config_result.loss_scale_input_name = mp_config_result.loss_scale_input_name; - } - - return python_config_result; -} - #if defined(USE_MPI) void CopyMPIContextToTrainingParameters(TrainingParameters& parameters, const logging::Logger* logger) { LOGS(*logger, INFO) << "MPIContext::GetInstance().GetWorldRank(): " << MPIContext::GetInstance().GetWorldRank(); @@ -424,7 +241,7 @@ std::unordered_map> Con return py_tensor_state; } -void addObjectMethodsForTraining(py::module& m, ExecutionProviderRegistrationFn ep_registration_fn) { +void addObjectMethodsForTraining(py::module& m) { py::class_(m, "OrtValueCache") .def(py::init<>()) .def("insert", [](const OrtValueCachePtr& cache_ptr, std::string node_arg_name, OrtValue& value) { @@ -451,7 +268,6 @@ void addObjectMethodsForTraining(py::module& m, ExecutionProviderRegistrationFn py::class_ parameters(m, "TrainingParameters", R"pbdoc(Configuration information for training.)pbdoc"); parameters.def(py::init()) .def_readwrite("loss_output_name", &TrainingParameters::loss_output_name) - .def_readwrite("immutable_weights", &TrainingParameters::immutable_weights) .def_readwrite("weights_not_to_train", &TrainingParameters::weights_not_to_train) .def_readwrite("weights_to_train", &TrainingParameters::weights_to_train) .def_readwrite("sliced_tensor_names", &TrainingParameters::sliced_tensor_names) @@ -484,25 +300,6 @@ void addObjectMethodsForTraining(py::module& m, ExecutionProviderRegistrationFn .def_readwrite("data_parallel_size", &TrainingParameters::data_parallel_size) .def_readwrite("horizontal_parallel_size", &TrainingParameters::horizontal_parallel_size) .def_readwrite("pipeline_parallel_size", &TrainingParameters::pipeline_parallel_size) - .def("set_optimizer_initial_state", - [](TrainingParameters& parameters, const std::unordered_map>& py_state) -> void { - onnxruntime::training::TrainingSession::OptimizerState optim_state; - for (const auto& weight_it : py_state) { - auto state = weight_it.second; - NameMLValMap state_tensors; - for (auto& initializer : state) { - OrtValue ml_value; - - // InputDeflist is null because parameters havent been tied to session yet - // Likewise, there is no need to specify the name (as the name was previously used to lookup the def list) - CreateGenericMLValue(nullptr, GetAllocator(), "", initializer.second, &ml_value, true); - ThrowIfPyErrOccured(); - state_tensors.emplace(initializer.first, ml_value); - } - optim_state.emplace(weight_it.first, state_tensors); - } - parameters.optimizer_initial_state = optim_state; - }) .def_readwrite("model_after_graph_transforms_path", &TrainingParameters::model_after_graph_transforms_path) .def_readwrite("model_with_gradient_graph_path", &TrainingParameters::model_with_gradient_graph_path) .def_readwrite("model_with_training_graph_path", &TrainingParameters::model_with_training_graph_path) @@ -611,130 +408,6 @@ void addObjectMethodsForTraining(py::module& m, ExecutionProviderRegistrationFn }); #endif - py::class_ config_result(m, "TrainingConfigurationResult", "pbdoc(Configuration result for training.)pbdoc"); - config_result.def(py::init()) - .def_property_readonly("loss_scale_input_name", [](const TrainingConfigurationResult& result) -> py::object { - if (result.loss_scale_input_name.has_value()) { - return py::str{result.loss_scale_input_name.value()}; - } - return py::none(); - }); - - // Thin wrapper over internal C++ InferenceSession to accommodate custom op library management for the Python user - struct PyTrainingSession : public PyInferenceSession { - PyTrainingSession(std::shared_ptr env, const PySessionOptions& so) - : PyInferenceSession(env, std::make_unique(so.value, *env)) { - } - ~PyTrainingSession() = default; - }; - - py::class_ training_session(m, "TrainingSession"); - training_session - .def(py::init([](const PySessionOptions& so) { - auto& training_env = GetTrainingEnv(); - return std::make_unique(training_env.GetORTEnv(), so); - })) - .def(py::init([]() { - auto& training_env = GetTrainingEnv(); - return std::make_unique(training_env.GetORTEnv(), GetDefaultCPUSessionOptions()); - })) - .def("finalize", [](py::object) { -#if defined(USE_MPI) -#ifdef _WIN32 - // https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-best-practices - // shutdown_mpi() is not called within MPIContext destructor because of DllMain's restriction - // call shutdown_mpi() here instead. - MPIContext::shutdown_mpi(); -#endif -#endif - }) - .def("load_model", [ep_registration_fn](PyTrainingSession* sess, const std::string& path, TrainingParameters& parameters, const std::vector& provider_types, const ProviderOptionsVector& provider_options) { - OrtPybindThrowIfError(sess->GetSessionHandle()->Load(path)); - -#if defined(USE_MPI) - bool use_nccl = parameters.allreduce_post_accumulation; - if (!use_nccl && parameters.world_size > 1) - CopyMPIContextToTrainingParameters(parameters, sess->GetSessionHandle()->GetLogger()); -#endif - const auto config_result = ConfigureSessionForTraining(static_cast(sess->GetSessionHandle()), parameters); - - ProviderOptionsVector merged_options; - ResolveExtraProviderOptions(provider_types, provider_options, merged_options); - - InitializeSession(sess->GetSessionHandle(), ep_registration_fn, provider_types, merged_options); - - return config_result; - }) - .def("read_bytes", [ep_registration_fn](PyTrainingSession* sess, const py::bytes& serialized_model, TrainingParameters& parameters, const std::vector& provider_types, const ProviderOptionsVector& provider_options) { - std::istringstream buffer(serialized_model); - OrtPybindThrowIfError(sess->GetSessionHandle()->Load(buffer)); - -#if defined(USE_MPI) - bool use_nccl = parameters.allreduce_post_accumulation; - if (!use_nccl && parameters.world_size > 1) - CopyMPIContextToTrainingParameters(parameters, sess->GetSessionHandle()->GetLogger()); -#endif - const auto config_result = ConfigureSessionForTraining(static_cast(sess->GetSessionHandle()), parameters); - ProviderOptionsVector merged_options; - ResolveExtraProviderOptions(provider_types, provider_options, merged_options); - - InitializeSession(sess->GetSessionHandle(), ep_registration_fn, provider_types, merged_options); - - return config_result; - }) - .def("get_state", [](PyTrainingSession* sess) { - NameMLValMap state_tensors; - ORT_THROW_IF_ERROR(static_cast(sess->GetSessionHandle())->GetStateTensors(state_tensors)); - auto& data_transfer_manager = sess->GetSessionHandle()->GetDataTransferManager(); - // convert to numpy array - std::map rmap; - for (auto& kv : state_tensors) { - if (kv.second.IsTensor()) { - py::object obj; - const Tensor& rtensor = kv.second.Get(); - GetPyObjFromTensor(rtensor, obj, &data_transfer_manager); - rmap.insert({kv.first, obj}); - } else { - throw std::runtime_error("Non tensor type in session state tensors is not expected."); - } - } - return rmap; - }) - .def("get_model_state", [](PyTrainingSession* sess, bool include_mixed_precision_weights) { - std::unordered_map model_state_tensors; - ORT_THROW_IF_ERROR(static_cast(sess->GetSessionHandle())->GetModelState(model_state_tensors, include_mixed_precision_weights)); - auto& data_transfer_manager = sess->GetSessionHandle()->GetDataTransferManager(); - return ConvertORTTensorMapToNumpy(model_state_tensors, data_transfer_manager); - }) - .def("get_optimizer_state", [](PyTrainingSession* sess) { - std::unordered_map opt_state_tensors; - ORT_THROW_IF_ERROR(static_cast(sess->GetSessionHandle())->GetOptimizerState(opt_state_tensors)); - auto& data_transfer_manager = sess->GetSessionHandle()->GetDataTransferManager(); - return ConvertORTTensorMapToNumpy(opt_state_tensors, data_transfer_manager); - }) - .def("get_partition_info_map", [](PyTrainingSession* sess) { - std::unordered_map>> part_info_map; - ORT_THROW_IF_ERROR(static_cast(sess->GetSessionHandle())->GetPartitionInfoMap(part_info_map)); - return part_info_map; - }) - .def("load_state", [](PyTrainingSession* sess, std::unordered_map& state, bool strict) { - NameMLValMap state_tensors; - for (auto initializer : state) { - OrtValue ml_value; - auto px = sess->GetSessionHandle()->GetModelInputs(); - if (!px.first.IsOK() || !px.second) { - throw std::runtime_error("Either failed to get model inputs from the session object or the input def list was null"); - } - CreateGenericMLValue(px.second, GetAllocator(), initializer.first, initializer.second, &ml_value); - ThrowIfPyErrOccured(); - state_tensors.insert(std::make_pair(initializer.first, ml_value)); - } - ORT_THROW_IF_ERROR(static_cast(sess->GetSessionHandle())->SetStateTensors(state_tensors, strict)); - }) - .def("is_output_fp32_node", [](PyTrainingSession* sess, const std::string& output_name) { - return static_cast(sess->GetSessionHandle())->IsGraphOutputFp32Node(output_name); - }); - py::class_(m, "PartialGraphExecutionState") .def(py::init([]() { return std::make_unique(); diff --git a/orttraining/orttraining/python/orttraining_python_module.cc b/orttraining/orttraining/python/orttraining_python_module.cc index 88ef90a7feaa8..4d1db7334f280 100644 --- a/orttraining/orttraining/python/orttraining_python_module.cc +++ b/orttraining/orttraining/python/orttraining_python_module.cc @@ -40,7 +40,7 @@ const ROCMExecutionProviderInfo GetRocmExecutionProviderInfo(ProviderInfo_ROCM* void addGlobalMethods(py::module& m); void addObjectMethods(py::module& m, ExecutionProviderRegistrationFn ep_registration_fn); -void addObjectMethodsForTraining(py::module& m, ExecutionProviderRegistrationFn ep_registration_fn); +void addObjectMethodsForTraining(py::module& m); void addObjectMethodsForEager(py::module& m); #ifdef ENABLE_LAZY_TENSOR void addObjectMethodsForLazyTensor(py::module& m); @@ -339,7 +339,7 @@ PYBIND11_MODULE(onnxruntime_pybind11_state, m) { } #endif - addObjectMethodsForTraining(m, ORTTrainingRegisterExecutionProviders); + addObjectMethodsForTraining(m); #ifdef ENABLE_LAZY_TENSOR addObjectMethodsForLazyTensor(m); diff --git a/orttraining/orttraining/python/training/__init__.py b/orttraining/orttraining/python/training/__init__.py index 73b1f826f68e1..a3c22686a1039 100644 --- a/orttraining/orttraining/python/training/__init__.py +++ b/orttraining/orttraining/python/training/__init__.py @@ -8,26 +8,16 @@ TrainingParameters, is_ortmodule_available, ) -from onnxruntime.capi.training.training_session import TrainingSession - # Options need to be imported before `ORTTrainer`. -from .orttrainer_options import ORTTrainerOptions -from .orttrainer import ORTTrainer, TrainStepInfo -from . import amp, artifacts, checkpoint, model_desc_validation, optim +from . import amp, artifacts, optim __all__ = [ "PropagateCastOpsStrategy", "TrainingParameters", "is_ortmodule_available", - "TrainingSession", - "ORTTrainerOptions", - "ORTTrainer", - "TrainStepInfo", "amp", "artifacts", - "checkpoint", - "model_desc_validation", "optim", ] diff --git a/orttraining/orttraining/python/training/_checkpoint_storage.py b/orttraining/orttraining/python/training/_checkpoint_storage.py deleted file mode 100644 index 7a8ada7dee96b..0000000000000 --- a/orttraining/orttraining/python/training/_checkpoint_storage.py +++ /dev/null @@ -1,107 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. -# -------------------------------------------------------------------------- - -import pickle -from collections.abc import Mapping - -import h5py - - -def _dfs_save(group, save_obj): - """Recursively go over each level in the save_obj dictionary and save values to a hdf5 group""" - - for key, value in save_obj.items(): - if isinstance(value, Mapping): - subgroup = group.create_group(key) - _dfs_save(subgroup, value) - else: - group[key] = value - - -def save(save_obj: dict, path): - """Persists the input dictionary to a file specified by path. - - Saves an hdf5 representation of the save_obj dictionary to a file or a file-like object specified by path. - Values are saved in a format supported by h5py. For example, a PyTorch tensor is saved and loaded as a - numpy object. So, user types may be converted from their original types to numpy equivalent types. - - Args: - save_obj: dictionary that needs to be saved. - save_obj should consist of types supported by hdf5 file format. - if hdf5 does not recognize a type, an exception is raised. - if save_obj is not a dictionary, a ValueError is raised. - path: string representation to a file path or a python file-like object. - if file already exists at path, an exception is raised. - """ - if not isinstance(save_obj, Mapping): - raise ValueError("Object to be saved must be a dictionary") - - with h5py.File(path, "w-") as f: - _dfs_save(f, save_obj) - - -def _dfs_load(group, load_obj): - """Recursively go over each level in the hdf5 group and load the values into the given dictionary""" - - for key in group: - if isinstance(group[key], h5py.Group): - load_obj[key] = {} - _dfs_load(group[key], load_obj[key]) - else: - load_obj[key] = group[key][()] - - -def load(path, key=None): - """Loads the data stored in the binary file specified at the given path into a dictionary and returns it. - - Loads the data from an hdf5 file specified at the given path into a python dictionary. - Loaded dictionary contains numpy equivalents of python data types. For example: - PyTorch tensor -> saved as a numpy array and loaded as a numpy array. - bool -> saved as a numpy bool and loaded as a numpy bool - If a '/' separated key is provided, the value at that hierarchical level in the hdf5 group is returned. - - Args: - path: string representation to a file path or a python file-like object. - if file does not already exist at path, an exception is raised. - key: '/' separated representation of the hierarchy level value that needs to be returned/ - for example, if the saved binary file has structure {a: {b: x, c:y}} and the user would like - to query the value for c, the key provided should be 'a/c'. - the default value of None for key implies that the entire hdf5 file structure needs to be loaded into a dictionary and returned. - - Returns: - a dictionary loaded from the specified binary hdf5 file. - """ - if not h5py.is_hdf5(path): - raise ValueError(f"{path} is not an hdf5 file or a python file-like object.") - - load_obj = {} - with h5py.File(path, "r") as f: - if key: - f = f[key] # noqa: PLW2901 - if isinstance(f, h5py.Dataset): - return f[()] - - _dfs_load(f, load_obj) - - return load_obj - - -def to_serialized_hex(user_dict): - """Serialize the user_dict and convert the serialized bytes to a hex string and return""" - - return pickle.dumps(user_dict).hex() - - -def from_serialized_hex(serialized_hex): - """Convert serialized_hex to bytes and deserialize it and return""" - - # serialized_hex can be either a regular string or a byte string. - # if it is a byte string, convert to regular string using decode() - # if it is a regular string, do nothing to it - try: # noqa: SIM105 - serialized_hex = serialized_hex.decode() - except AttributeError: - pass - return pickle.loads(bytes.fromhex(serialized_hex)) diff --git a/orttraining/orttraining/python/training/_utils.py b/orttraining/orttraining/python/training/_utils.py index 4eb79443c8f1a..091274d1d171d 100644 --- a/orttraining/orttraining/python/training/_utils.py +++ b/orttraining/orttraining/python/training/_utils.py @@ -6,11 +6,9 @@ import importlib.util import os import sys -from functools import wraps # noqa: F401 import numpy as np import torch -from onnx import TensorProto # noqa: F401 from packaging.version import Version @@ -23,16 +21,6 @@ def get_device_index(device): return 0 if device.index is None else device.index -def get_device_index_from_input(input): - """Returns device index from a input PyTorch Tensor""" - - if isinstance(input, (list, tuple)): - device_index = get_device_index(input[0].device) - else: - device_index = get_device_index(input.device) - return device_index - - def get_device_str(device): if isinstance(device, str): # could be 'cuda:0', 'cuda:1', or 'cpu'. with cpu, set index=0 @@ -50,24 +38,6 @@ def get_device_str(device): return device -def get_all_gradients_finite_name_from_session(session): - """Find all_gradients_finite node on Session graph and return its name""" - - nodes = [x for x in session._outputs_meta if "all_gradients_finite" in x.name] - if len(nodes) != 1: - raise RuntimeError("'all_gradients_finite' node not found within training session") - return nodes[0].name - - -def get_gradient_accumulation_name_from_session(session): - """Find Group_Accumulated_Gradients node on Session graph and return its name""" - - nodes = [x for x in session._outputs_meta if "Group_Accumulated_Gradients" in x.name] - if len(nodes) != 1: - raise RuntimeError("'Group_Accumulated_Gradients' node not found within training session") - return nodes[0].name - - def dtype_torch_to_numpy(torch_dtype): """Converts PyTorch types to Numpy types @@ -232,111 +202,3 @@ def import_module_from_file(file_path, module_name=None): sys.modules[module_name] = module spec.loader.exec_module(module) return module - - -def state_dict_model_key(): - """Returns the model key name in the state dictionary""" - - return "model" - - -def state_dict_optimizer_key(): - """Returns the optimizer key name in the state dictionary""" - - return "optimizer" - - -def state_dict_partition_info_key(): - """Returns the partition info key name in the state dictionary""" - - return "partition_info" - - -def state_dict_trainer_options_key(): - """Returns the trainer options key name in the state dictionary""" - - return "trainer_options" - - -def state_dict_full_precision_key(): - """Returns the full precision key name in the state dictionary""" - - return "full_precision" - - -def state_dict_original_dimension_key(): - """Returns the original dimension key name in the state dictionary""" - - return "original_dim" - - -def state_dict_sharded_optimizer_keys(): - """Returns the optimizer key names that can be sharded in the state dictionary""" - - return {"Moment_1", "Moment_2"} - - -def state_dict_user_dict_key(): - """Returns the user dict key name in the state dictionary""" - - return "user_dict" - - -def state_dict_trainer_options_mixed_precision_key(): - """Returns the trainer options mixed precision key name in the state dictionary""" - - return "mixed_precision" - - -def state_dict_trainer_options_zero_stage_key(): - """Returns the trainer options zero_stage key name in the state dictionary""" - - return "zero_stage" - - -def state_dict_trainer_options_world_rank_key(): - """Returns the trainer options world_rank key name in the state dictionary""" - - return "world_rank" - - -def state_dict_trainer_options_world_size_key(): - """Returns the trainer options world_size key name in the state dictionary""" - - return "world_size" - - -def state_dict_trainer_options_data_parallel_size_key(): - """Returns the trainer options data_parallel_size key name in the state dictionary""" - - return "data_parallel_size" - - -def state_dict_trainer_options_horizontal_parallel_size_key(): - """Returns the trainer options horizontal_parallel_size key name in the state dictionary""" - - return "horizontal_parallel_size" - - -def state_dict_trainer_options_optimizer_name_key(): - """Returns the trainer options optimizer_name key name in the state dictionary""" - - return "optimizer_name" - - -def state_dict_train_step_info_key(): - """Returns the train step info key name in the state dictionary""" - - return "train_step_info" - - -def state_dict_train_step_info_optimization_step_key(): - """Returns the train step info optimization step key name in the state dictionary""" - - return "optimization_step" - - -def state_dict_train_step_info_step_key(): - """Returns the train step info step key name in the state dictionary""" - - return "step" diff --git a/orttraining/orttraining/python/training/checkpoint.py b/orttraining/orttraining/python/training/checkpoint.py deleted file mode 100644 index d0ff0650662b7..0000000000000 --- a/orttraining/orttraining/python/training/checkpoint.py +++ /dev/null @@ -1,748 +0,0 @@ -import os -import tempfile -import warnings -from enum import Enum - -import numpy as np -import onnx -import torch - -from . import _checkpoint_storage, _utils - -################################################################################ -# Experimental Checkpoint APIs -################################################################################ - - -def experimental_state_dict(ort_trainer, include_optimizer_state=True): - warnings.warn( - "experimental_state_dict() will be deprecated soon. Please use ORTTrainer.state_dict() instead.", - DeprecationWarning, - ) - - if not ort_trainer._training_session: - warnings.warn( - "ONNX Runtime training session is not initialized yet. " - "Please run train_step or eval_step at least once before calling state_dict()." - ) - return ort_trainer._state_dict - - # extract trained weights - session_state = ort_trainer._training_session.get_state() - torch_state = {} - for name in session_state: - torch_state[name] = torch.from_numpy(session_state[name]) - - # extract untrained weights and buffer - for n in ort_trainer._onnx_model.graph.initializer: - if n.name not in torch_state and n.name in ort_trainer.options.utils.frozen_weights: - torch_state[n.name] = torch.from_numpy(np.array(onnx.numpy_helper.to_array(n))) - - # Need to remove redundant (optimizer) initializers to map back to original torch state names - if not include_optimizer_state and ort_trainer._torch_state_dict_keys: - return {key: torch_state[key] for key in ort_trainer._torch_state_dict_keys if key in torch_state} - return torch_state - - -def experimental_load_state_dict(ort_trainer, state_dict, strict=False): - warnings.warn( - "experimental_load_state_dict() will be deprecated soon. Please use ORTTrainer.load_state_dict() instead.", - DeprecationWarning, - ) - - # Note: It may happen ONNX model has not yet been initialized - # In this case we cache a reference to desired state and delay the restore until after initialization - # Unexpected behavior will result if the user changes the reference before initialization - if not ort_trainer._training_session: - ort_trainer._state_dict = state_dict - ort_trainer._load_state_dict_strict = strict - return - - # Update onnx model from loaded state dict - cur_initializers_names = [n.name for n in ort_trainer._onnx_model.graph.initializer] - new_initializers = {} - - for name in state_dict: - if name in cur_initializers_names: - new_initializers[name] = state_dict[name].numpy() - elif strict: - raise RuntimeError(f"Checkpoint tensor: {name} is not present in the model.") - - ort_trainer._update_onnx_model_initializers(new_initializers) - - # create new session based on updated onnx model - ort_trainer._state_dict = None - ort_trainer._init_session() - - # load training state - session_state = {name: state_dict[name].numpy() for name in state_dict} - ort_trainer._training_session.load_state(session_state, strict) - - -def experimental_save_checkpoint( - ort_trainer, - checkpoint_dir, - checkpoint_prefix="ORT_checkpoint", - checkpoint_state_dict=None, - include_optimizer_state=True, -): - warnings.warn( - "experimental_save_checkpoint() will be deprecated soon. Please use ORTTrainer.save_checkpoint() instead.", - DeprecationWarning, - ) - - if checkpoint_state_dict is None: - checkpoint_state_dict = {"model": experimental_state_dict(ort_trainer, include_optimizer_state)} - else: - checkpoint_state_dict.update({"model": experimental_state_dict(ort_trainer, include_optimizer_state)}) - - assert os.path.exists(checkpoint_dir), f"checkpoint_dir ({checkpoint_dir}) directory doesn't exist" - - checkpoint_name = _get_checkpoint_name( - checkpoint_prefix, - ort_trainer.options.distributed.deepspeed_zero_optimization.stage, - ort_trainer.options.distributed.world_rank, - ort_trainer.options.distributed.world_size, - ) - checkpoint_file = os.path.join(checkpoint_dir, checkpoint_name) - if os.path.exists(checkpoint_file): - msg = f"{checkpoint_file} already exists, overwriting." - warnings.warn(msg) - torch.save(checkpoint_state_dict, checkpoint_file) - - -def experimental_load_checkpoint(ort_trainer, checkpoint_dir, checkpoint_prefix="ORT_checkpoint", strict=False): - warnings.warn( - "experimental_load_checkpoint() will be deprecated soon. Please use ORTTrainer.load_checkpoint() instead.", - DeprecationWarning, - ) - - checkpoint_files = _list_checkpoint_files(checkpoint_dir, checkpoint_prefix) - is_partitioned = False - if len(checkpoint_files) > 1: - msg = ( - f"Found more than one file with prefix {checkpoint_prefix} in directory {checkpoint_dir}." - " Attempting to load ZeRO checkpoint." - ) - warnings.warn(msg) - is_partitioned = True - if (not ort_trainer.options.distributed.deepspeed_zero_optimization.stage) and is_partitioned: - return _load_multi_checkpoint(ort_trainer, checkpoint_dir, checkpoint_prefix, strict) - else: - return _load_single_checkpoint(ort_trainer, checkpoint_dir, checkpoint_prefix, is_partitioned, strict) - - -class _AGGREGATION_MODE(Enum): # noqa: N801 - Zero = 0 - Megatron = 1 - - -def _order_paths(paths, D_groups, H_groups): - """Reorders the given paths in order of aggregation of ranks for D and H parallellism respectively - and returns the ordered dict""" - - trainer_options_path_tuples = [] - world_rank = _utils.state_dict_trainer_options_world_rank_key() - - for path in paths: - trainer_options_path_tuples.append( - (_checkpoint_storage.load(path, key=_utils.state_dict_trainer_options_key()), path) - ) - - # sort paths according to rank - sorted_paths = [ - path - for _, path in sorted( - trainer_options_path_tuples, key=lambda trainer_options_path_pair: trainer_options_path_pair[0][world_rank] - ) - ] - - ordered_paths = dict() - ordered_paths["D"] = [[sorted_paths[i] for i in D_groups[group_id]] for group_id in range(len(D_groups))] - ordered_paths["H"] = [[sorted_paths[i] for i in H_groups[group_id]] for group_id in range(len(H_groups))] - - return ordered_paths - - -def _add_or_update_sharded_key( - state_key, state_value, state_sub_dict, model_state_key, state_partition_info, sharded_states_original_dims, mode -): - """Add or update the record for the sharded state_key in the state_sub_dict""" - - # record the original dimension for this state - original_dim = _utils.state_dict_original_dimension_key() - sharded_states_original_dims[model_state_key] = state_partition_info[original_dim] - - axis = 0 - if mode == _AGGREGATION_MODE.Megatron and state_partition_info["megatron_row_partition"] == 0: - axis = -1 - - if state_key in state_sub_dict: - # state_dict already contains a record for this state - # since this state is sharded, concatenate the state value to - # the record in the state_dict - state_sub_dict[state_key] = np.concatenate((state_sub_dict[state_key], state_value), axis) - else: - # create a new entry for this state in the state_dict - state_sub_dict[state_key] = state_value - - -def _add_or_validate_unsharded_key(state_key, state_value, state_sub_dict, mismatch_error_string): - """Add or validate the record for the unsharded state_key in the state_sub_dict""" - - if state_key in state_sub_dict: - # state_dict already contains a record for this unsharded state. - # assert that all values are the same for this previously loaded state - assert (state_sub_dict[state_key] == state_value).all(), mismatch_error_string - else: - # create a new entry for this state in the state_sub_dict - state_sub_dict[state_key] = state_value - - -def _aggregate_model_states( - rank_state_dict, sharded_states_original_dims, state_dict, mixed_precision_enabled, mode=_AGGREGATION_MODE.Zero -): - """Aggregates all model states from the rank_state_dict into state_dict""" - - model = _utils.state_dict_model_key() - full_precision = _utils.state_dict_full_precision_key() - partition_info = _utils.state_dict_partition_info_key() - - # if there are no model states in the rank_state_dict, no model aggregation is needed - if model not in rank_state_dict: - return - - if model not in state_dict: - state_dict[model] = {} - - if full_precision not in state_dict[model]: - state_dict[model][full_precision] = {} - - # iterate over all model state keys - for model_state_key, model_state_value in rank_state_dict[model][full_precision].items(): - # ZERO: full precision model states are sharded only when they exist in the partition_info subdict and mixed - # precision training was enabled. for full precision training, full precision model states are not sharded - # MEGATRON : full precision model states are sharded when they exist in the partition_info subdict - if (model_state_key in rank_state_dict[partition_info]) and ( - mode == _AGGREGATION_MODE.Megatron or mixed_precision_enabled - ): - # this model state is sharded - _add_or_update_sharded_key( - model_state_key, - model_state_value, - state_dict[model][full_precision], - model_state_key, - rank_state_dict[partition_info][model_state_key], - sharded_states_original_dims, - mode, - ) - else: - # this model state is not sharded since a record for it does not exist in the partition_info subdict - _add_or_validate_unsharded_key( - model_state_key, - model_state_value, - state_dict[model][full_precision], - f"Value mismatch for model state {model_state_key}", - ) - - -def _aggregate_optimizer_states(rank_state_dict, sharded_states_original_dims, state_dict, mode=_AGGREGATION_MODE.Zero): - """Aggregates all optimizer states from the rank_state_dict into state_dict""" - - optimizer = _utils.state_dict_optimizer_key() - partition_info = _utils.state_dict_partition_info_key() - sharded_optimizer_keys = _utils.state_dict_sharded_optimizer_keys() - - # if there are no optimizer states in the rank_state_dict, no optimizer aggregation is needed - if optimizer not in rank_state_dict: - return - - if optimizer not in state_dict: - state_dict[optimizer] = {} - - # iterate over all optimizer state keys - for model_state_key, optimizer_dict in rank_state_dict[optimizer].items(): - for optimizer_key, optimizer_value in optimizer_dict.items(): - if model_state_key not in state_dict[optimizer]: - state_dict[optimizer][model_state_key] = {} - - if optimizer_key in sharded_optimizer_keys and model_state_key in rank_state_dict[partition_info]: - # this optimizer state is sharded since a record exists in the partition_info subdict - _add_or_update_sharded_key( - optimizer_key, - optimizer_value, - state_dict[optimizer][model_state_key], - model_state_key, - rank_state_dict[partition_info][model_state_key], - sharded_states_original_dims, - mode, - ) - else: - # this optimizer state is not sharded since a record for it does not exist in the partition_info subdict - # or this optimizer key is not one of the sharded optimizer keys - _add_or_validate_unsharded_key( - optimizer_key, - optimizer_value, - state_dict[optimizer][model_state_key], - f"Value mismatch for model state {model_state_key} and optimizer state {optimizer_key}", - ) - - -def _reshape_states(sharded_states_original_dims, state_dict, mixed_precision_enabled): - """Reshape model and optimizer states in the state_dict according to dimensions in sharded_states_original_dims""" - - model = _utils.state_dict_model_key() - full_precision = _utils.state_dict_full_precision_key() - optimizer = _utils.state_dict_optimizer_key() - sharded_optimizer_keys = _utils.state_dict_sharded_optimizer_keys() - - for sharded_state_key, original_dim in sharded_states_original_dims.items(): - # reshape model states to original_dim only when mixed precision is enabled - if mixed_precision_enabled and (model in state_dict): - state_dict[model][full_precision][sharded_state_key] = state_dict[model][full_precision][ - sharded_state_key - ].reshape(original_dim) - - # reshape optimizer states to original_dim - if optimizer in state_dict: - for optimizer_key, optimizer_value in state_dict[optimizer][sharded_state_key].items(): - if optimizer_key in sharded_optimizer_keys: - state_dict[optimizer][sharded_state_key][optimizer_key] = optimizer_value.reshape(original_dim) - - -def _aggregate_trainer_options(rank_state_dict, state_dict, partial_aggregation): - """Extracts trainer options from rank_state_dict and loads them accordingly on state_dict""" - trainer_options = _utils.state_dict_trainer_options_key() - state_dict[trainer_options] = {} - - mixed_precision = _utils.state_dict_trainer_options_mixed_precision_key() - zero_stage = _utils.state_dict_trainer_options_zero_stage_key() - world_rank = _utils.state_dict_trainer_options_world_rank_key() - world_size = _utils.state_dict_trainer_options_world_size_key() - optimizer_name = _utils.state_dict_trainer_options_optimizer_name_key() - D_size = _utils.state_dict_trainer_options_data_parallel_size_key() # noqa: N806 - H_size = _utils.state_dict_trainer_options_horizontal_parallel_size_key() # noqa: N806 - - state_dict[trainer_options][mixed_precision] = rank_state_dict[trainer_options][mixed_precision] - state_dict[trainer_options][zero_stage] = 0 - state_dict[trainer_options][world_rank] = rank_state_dict[trainer_options][world_rank] if partial_aggregation else 0 - state_dict[trainer_options][world_size] = 1 - state_dict[trainer_options][optimizer_name] = rank_state_dict[trainer_options][optimizer_name] - state_dict[trainer_options][D_size] = 1 - state_dict[trainer_options][H_size] = 1 - - -def _aggregate_megatron_partition_info(rank_state_dict, state_dict): - """Extracts partition_info from rank_state_dict and loads on state_dict for megatron-partitioned weights""" - partition_info = _utils.state_dict_partition_info_key() - if partition_info not in state_dict: - state_dict[partition_info] = {} - - rank_partition_info = rank_state_dict[partition_info] - for model_state_key, partition_info_dict in rank_partition_info.items(): - if model_state_key not in state_dict[partition_info]: - # add partition info only if weight is megatron partitioned - if partition_info_dict["megatron_row_partition"] >= 0: - state_dict[partition_info][model_state_key] = partition_info_dict - - -def _to_pytorch_format(state_dict): - """Convert ORT state dictionary schema (hierarchical structure) to PyTorch state dictionary schema (flat structure)""" - - pytorch_state_dict = {} - for model_state_key, model_state_value in state_dict[_utils.state_dict_model_key()][ - _utils.state_dict_full_precision_key() - ].items(): - # convert numpy array to a torch tensor - pytorch_state_dict[model_state_key] = torch.tensor(model_state_value) - return pytorch_state_dict - - -def _get_parallellism_groups(data_parallel_size, horizontal_parallel_size, world_size): - """Returns the D and H groups for the given sizes""" - num_data_groups = world_size // data_parallel_size - data_groups = [] - for data_group_id in range(num_data_groups): - data_group_ranks = [] - for r in range(data_parallel_size): - data_group_ranks.append(data_group_id + horizontal_parallel_size * r) - data_groups.append(data_group_ranks) - - num_horizontal_groups = world_size // horizontal_parallel_size - horizontal_groups = [] - for hori_group_id in range(num_horizontal_groups): - hori_group_ranks = [] - for r in range(horizontal_parallel_size): - hori_group_ranks.append(hori_group_id * horizontal_parallel_size + r) - horizontal_groups.append(hori_group_ranks) - - return data_groups, horizontal_groups - - -def _aggregate_over_ranks( - ordered_paths, - ranks, - sharded_states_original_dims=None, - mode=_AGGREGATION_MODE.Zero, - partial_aggregation=False, - pytorch_format=True, -): - """Aggregate checkpoint files over set of ranks and return a single state dictionary - - Args: - ordered_paths: list of paths in the order in which they must be aggregated - ranks: list of ranks that are to be aggregated - sharded_states_original_dims: dict containing the original dims for sharded states that are persisted over - multiple calls to _aggregate_over_ranks() - mode: mode of aggregation: Zero or Megatron - partial_aggregation: boolean flag to indicate whether to produce a partially - aggregated state which can be further aggregated over - pytorch_format: boolean flag to select either ONNX Runtime or PyTorch state schema of the returned state_dict - Returns: - state_dict that can be loaded into an ORTTrainer or into a PyTorch model - """ - state_dict = {} - if sharded_states_original_dims is None: - sharded_states_original_dims = dict() - world_rank = _utils.state_dict_trainer_options_world_rank_key() - mixed_precision = _utils.state_dict_trainer_options_mixed_precision_key() - zero_stage = _utils.state_dict_trainer_options_zero_stage_key() - world_size = _utils.state_dict_trainer_options_world_size_key() - optimizer_name = _utils.state_dict_trainer_options_optimizer_name_key() - - loaded_mixed_precision = None - loaded_world_size = None - loaded_zero_stage = None - loaded_optimizer_name = None - - for i, path in enumerate(ordered_paths): - rank_state_dict = _checkpoint_storage.load(path) - - assert _utils.state_dict_partition_info_key() in rank_state_dict, "Missing information: partition_info" - assert _utils.state_dict_trainer_options_key() in rank_state_dict, "Missing information: trainer_options" - assert ( - ranks[i] == rank_state_dict[_utils.state_dict_trainer_options_key()][world_rank] - ), "Unexpected rank in file at path {}. Expected {}, got {}".format( - path, rank, rank_state_dict[_utils.state_dict_trainer_options_key()][world_rank] # noqa: F821 - ) - if loaded_mixed_precision is None: - loaded_mixed_precision = rank_state_dict[_utils.state_dict_trainer_options_key()][mixed_precision] - else: - assert ( - loaded_mixed_precision == rank_state_dict[_utils.state_dict_trainer_options_key()][mixed_precision] - ), f"Mixed precision state mismatch among checkpoint files. File: {path}" - if loaded_world_size is None: - loaded_world_size = rank_state_dict[_utils.state_dict_trainer_options_key()][world_size] - else: - assert ( - loaded_world_size == rank_state_dict[_utils.state_dict_trainer_options_key()][world_size] - ), f"World size state mismatch among checkpoint files. File: {path}" - if loaded_zero_stage is None: - loaded_zero_stage = rank_state_dict[_utils.state_dict_trainer_options_key()][zero_stage] - else: - assert ( - loaded_zero_stage == rank_state_dict[_utils.state_dict_trainer_options_key()][zero_stage] - ), f"Zero stage mismatch among checkpoint files. File: {path}" - if loaded_optimizer_name is None: - loaded_optimizer_name = rank_state_dict[_utils.state_dict_trainer_options_key()][optimizer_name] - else: - assert ( - loaded_optimizer_name == rank_state_dict[_utils.state_dict_trainer_options_key()][optimizer_name] - ), f"Optimizer name mismatch among checkpoint files. File: {path}" - - # aggregate all model states - _aggregate_model_states(rank_state_dict, sharded_states_original_dims, state_dict, loaded_mixed_precision, mode) - - if not pytorch_format: - # aggregate all optimizer states if pytorch_format is False - _aggregate_optimizer_states(rank_state_dict, sharded_states_original_dims, state_dict, mode) - - # for D+H aggregation scenario, the first pass of aggregation(partial aggregation) is over D groups - # to aggregate over Zero, and another pass to aggregate Megatron partitioned - # states. Preserve the relevant partition info only for weights that are megatron partitioned for - # a partial aggregation call - if partial_aggregation: - _aggregate_megatron_partition_info(rank_state_dict, state_dict) - - # entry for trainer_options in the state_dict to perform other sanity checks - if _utils.state_dict_trainer_options_key() not in state_dict: - _aggregate_trainer_options(rank_state_dict, state_dict, partial_aggregation) - - # entry for user_dict in the state_dict if not already present - if ( - _utils.state_dict_user_dict_key() not in state_dict - and _utils.state_dict_user_dict_key() in rank_state_dict - ): - state_dict[_utils.state_dict_user_dict_key()] = rank_state_dict[_utils.state_dict_user_dict_key()] - - # for a partial aggregation scenario, we might not have the entire tensor aggregated yet, thus skip reshape - if not partial_aggregation: - # reshape all the sharded tensors based on the original dimensions stored in sharded_states_original_dims - _reshape_states(sharded_states_original_dims, state_dict, loaded_mixed_precision) - - # return a flat structure for PyTorch model in case pytorch_format is True - # else return the hierarchical structure for ORTTrainer - return _to_pytorch_format(state_dict) if pytorch_format else state_dict - - -def _aggregate_over_D_H(ordered_paths, D_groups, H_groups, pytorch_format): # noqa: N802 - """Aggregate checkpoint files and return a single state dictionary for the D+H - (Zero+Megatron) partitioning strategy. - For D+H aggregation scenario, the first pass of aggregation(partial aggregation) is over D groups - to aggregate over Zero, and another pass over the previously aggregated states - to aggregate Megatron partitioned states. - """ - sharded_states_original_dims = {} - aggregate_data_checkpoint_files = [] - - # combine for Zero over data groups and save to temp file - with tempfile.TemporaryDirectory() as save_dir: - for group_id, d_group in enumerate(D_groups): - aggregate_state_dict = _aggregate_over_ranks( - ordered_paths["D"][group_id], - d_group, - sharded_states_original_dims, - partial_aggregation=True, - pytorch_format=False, - ) - - filename = "ort.data_group." + str(group_id) + ".ort.pt" - filepath = os.path.join(save_dir, filename) - _checkpoint_storage.save(aggregate_state_dict, filepath) - aggregate_data_checkpoint_files.append(filepath) - - assert len(aggregate_data_checkpoint_files) > 0 - - # combine for megatron: - aggregate_state = _aggregate_over_ranks( - aggregate_data_checkpoint_files, - H_groups[0], - sharded_states_original_dims, - mode=_AGGREGATION_MODE.Megatron, - pytorch_format=pytorch_format, - ) - - return aggregate_state - - -def aggregate_checkpoints(paths, pytorch_format=True): - """Aggregate checkpoint files and return a single state dictionary - - Aggregates checkpoint files specified by paths and loads them one at a time, merging - them into a single state dictionary. - The checkpoint files represented by paths must be saved through ORTTrainer.save_checkpoint() function. - The schema of the state_dict returned will be in the same as the one returned by ORTTrainer.state_dict() - - Args: - paths: list of more than one file represented as strings where the checkpoint is saved - pytorch_format: boolean flag to select either ONNX Runtime or PyTorch state schema of the returned state_dict - Returns: - state_dict that can be loaded into an ORTTrainer or into a PyTorch model - """ - - loaded_trainer_options = _checkpoint_storage.load(paths[0], key=_utils.state_dict_trainer_options_key()) - D_size = _utils.state_dict_trainer_options_data_parallel_size_key() # noqa: N806 - H_size = _utils.state_dict_trainer_options_horizontal_parallel_size_key() # noqa: N806 - world_size = _utils.state_dict_trainer_options_world_size_key() - - D_size = loaded_trainer_options[D_size] # noqa: N806 - H_size = loaded_trainer_options[H_size] # noqa: N806 - world_size = loaded_trainer_options[world_size] - D_groups, H_groups = _get_parallellism_groups(D_size, H_size, world_size) # noqa: N806 - - combine_zero = loaded_trainer_options[_utils.state_dict_trainer_options_zero_stage_key()] > 0 - combine_megatron = len(H_groups[0]) > 1 - - # order the paths in the order of groups in which they must be aggregated according to - # data-parallel groups and H-parallel groups obtained - # eg: {'D': [[path_0, path_2],[path_1, path_3]], 'H': [[path_0, path_1],[path_2, path_3]]} - ordered_paths = _order_paths(paths, D_groups, H_groups) - - aggregate_state = None - if combine_zero and combine_megatron: - aggregate_state = _aggregate_over_D_H(ordered_paths, D_groups, H_groups, pytorch_format) - elif combine_zero: - aggregate_state = _aggregate_over_ranks( - ordered_paths["D"][0], D_groups[0], mode=_AGGREGATION_MODE.Zero, pytorch_format=pytorch_format - ) - elif combine_megatron: - aggregate_state = _aggregate_over_ranks( - ordered_paths["H"][0], H_groups[0], mode=_AGGREGATION_MODE.Megatron, pytorch_format=pytorch_format - ) - - return aggregate_state - - -################################################################################ -# Helper functions -################################################################################ - - -def _load_single_checkpoint(ort_trainer, checkpoint_dir, checkpoint_prefix, is_partitioned, strict): - checkpoint_name = _get_checkpoint_name( - checkpoint_prefix, - is_partitioned, - ort_trainer.options.distributed.world_rank, - ort_trainer.options.distributed.world_size, - ) - checkpoint_file = os.path.join(checkpoint_dir, checkpoint_name) - - if is_partitioned: - assert_msg = ( - f"Couldn't find checkpoint file {checkpoint_file}." - " Optimizer partitioning is enabled using ZeRO. Please make sure the checkpoint file exists " - f"for rank {ort_trainer.options.distributed.world_rank} of {ort_trainer.options.distributed.world_size}" - ) - else: - assert_msg = f"Couldn't find checkpoint file {checkpoint_file}." - assert os.path.exists(checkpoint_file), assert_msg - - checkpoint_state = torch.load(checkpoint_file, map_location="cpu") - experimental_load_state_dict(ort_trainer, checkpoint_state["model"], strict=strict) - del checkpoint_state["model"] - return checkpoint_state - - -def _load_multi_checkpoint(ort_trainer, checkpoint_dir, checkpoint_prefix, strict): - checkpoint_files = _list_checkpoint_files(checkpoint_dir, checkpoint_prefix) - - ckpt_agg = _CombineZeroCheckpoint(checkpoint_files) - aggregate_state_dict = ckpt_agg.aggregate_checkpoints() - - experimental_load_state_dict(ort_trainer, aggregate_state_dict, strict=strict) - - # aggregate other keys in the state_dict. - # Values will be overwritten for matching keys among workers - all_checkpoint_states = dict() - for checkpoint_file in checkpoint_files: - checkpoint_state = torch.load(checkpoint_file, map_location="cpu") - del checkpoint_state["model"] - all_checkpoint_states.update(checkpoint_state) - return all_checkpoint_states - - -def _list_checkpoint_files(checkpoint_dir, checkpoint_prefix, extension=".ort.pt"): - ckpt_file_names = [f for f in os.listdir(checkpoint_dir) if f.startswith(checkpoint_prefix)] - ckpt_file_names = [f for f in ckpt_file_names if f.endswith(extension)] - ckpt_file_names = [os.path.join(checkpoint_dir, f) for f in ckpt_file_names] - - assert len(ckpt_file_names) > 0, f"No checkpoint found with prefix '{checkpoint_prefix}' at '{checkpoint_dir}'" - return ckpt_file_names - - -def _get_checkpoint_name(prefix, is_partitioned, world_rank=None, world_size=None): - SINGLE_CHECKPOINT_FILENAME = "{prefix}.ort.pt" # noqa: N806 - MULTIPLE_CHECKPOINT_FILENAME = "{prefix}.ZeRO.{world_rank}.{world_size}.ort.pt" # noqa: N806 - - if is_partitioned: - filename = MULTIPLE_CHECKPOINT_FILENAME.format( - prefix=prefix, world_rank=world_rank, world_size=(world_size - 1) - ) - else: - filename = SINGLE_CHECKPOINT_FILENAME.format(prefix=prefix) - return filename - - -def _split_state_dict(state_dict): - optimizer_keys = ["Moment_1_", "Moment_2_", "Update_Count_", "Step"] - split_sd = {"optimizer": {}, "fp32_param": {}, "fp16_param": {}} - for k, v in state_dict.items(): - mode = "fp32_param" - for optim_key in optimizer_keys: - if k.startswith(optim_key): - mode = "optimizer" - break - if k.endswith("_fp16"): - mode = "fp16_param" - split_sd[mode][k] = v - return split_sd - - -class _CombineZeroCheckpoint: - def __init__(self, checkpoint_files, clean_state_dict=None): - assert len(checkpoint_files) > 0, "No checkpoint files passed" - self.checkpoint_files = checkpoint_files - self.clean_state_dict = clean_state_dict - self.world_size = int(self.checkpoint_files[0].split("ZeRO")[1].split(".")[2]) + 1 - assert len(self.checkpoint_files) == self.world_size, f"Could not find {self.world_size} files" - self.weight_shape_map = {} - self.sharded_params = set() - - def _split_name(self, name: str): - name_split = name.split("_view_") - view_num = None - if len(name_split) > 1: - view_num = int(name_split[1]) - optimizer_key = "" - mp_suffix = "" - if name_split[0].startswith("Moment_1"): - optimizer_key = "Moment_1_" - elif name_split[0].startswith("Moment_2"): - optimizer_key = "Moment_2_" - elif name_split[0].startswith("Update_Count"): - optimizer_key = "Update_Count_" - elif name_split[0].endswith("_fp16"): - mp_suffix = "_fp16" - param_name = name_split[0] - if optimizer_key: - param_name = param_name.split(optimizer_key)[1] - param_name = param_name.split("_fp16")[0] - return param_name, optimizer_key, view_num, mp_suffix - - def _update_weight_statistics(self, name, value): - if name not in self.weight_shape_map: - self.weight_shape_map[name] = value.size() # original shape of tensor - - def _reshape_tensor(self, key): - value = self.aggregate_state_dict[key] - weight_name, _, _, _ = self._split_name(key) - set_size = self.weight_shape_map[weight_name] - self.aggregate_state_dict[key] = value.reshape(set_size) - - def _aggregate(self, param_dict): - for k, v in param_dict.items(): - weight_name, optimizer_key, view_num, mp_suffix = self._split_name(k) - if view_num is not None: - # parameter is sharded - param_name = optimizer_key + weight_name + mp_suffix - - if param_name in self.aggregate_state_dict and optimizer_key not in ["Update_Count_"]: - self.sharded_params.add(param_name) - # Found a previous shard of the param, concatenate shards ordered by ranks - self.aggregate_state_dict[param_name] = torch.cat((self.aggregate_state_dict[param_name], v)) - else: - self.aggregate_state_dict[param_name] = v - else: - if k in self.aggregate_state_dict: - assert (self.aggregate_state_dict[k] == v).all(), "Unsharded params must have the same value" - else: - self.aggregate_state_dict[k] = v - self._update_weight_statistics(weight_name, v) - - def aggregate_checkpoints(self): - warnings.warn( - "_CombineZeroCheckpoint.aggregate_checkpoints() will be deprecated soon. " - "Please use aggregate_checkpoints() instead.", - DeprecationWarning, - ) - - checkpoint_prefix = self.checkpoint_files[0].split(".ZeRO")[0] - self.aggregate_state_dict = dict() - - for i in range(self.world_size): - checkpoint_name = _get_checkpoint_name(checkpoint_prefix, True, i, self.world_size) - rank_state_dict = torch.load(checkpoint_name, map_location=torch.device("cpu")) - if "model" in rank_state_dict: - rank_state_dict = rank_state_dict["model"] - - if self.clean_state_dict: - rank_state_dict = self.clean_state_dict(rank_state_dict) - - rank_state_dict = _split_state_dict(rank_state_dict) - self._aggregate(rank_state_dict["fp16_param"]) - self._aggregate(rank_state_dict["fp32_param"]) - self._aggregate(rank_state_dict["optimizer"]) - - for k in self.sharded_params: - self._reshape_tensor(k) - return self.aggregate_state_dict diff --git a/orttraining/orttraining/python/training/model_desc_validation.py b/orttraining/orttraining/python/training/model_desc_validation.py deleted file mode 100644 index dd3f4cb95cd59..0000000000000 --- a/orttraining/orttraining/python/training/model_desc_validation.py +++ /dev/null @@ -1,408 +0,0 @@ -from collections import namedtuple - -import cerberus -import torch - -from ._utils import static_vars - -LEARNING_RATE_IO_DESCRIPTION_NAME = "__learning_rate" -ALL_FINITE_IO_DESCRIPTION_NAME = "__all_finite" -LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME = "__loss_scale_input_name" -GRADIENT_ACCUMULATION_IO_DESCRIPTION_NAME = "__gradient_accumulation_name" - - -class _ORTTrainerModelDesc: - def __init__(self, model_desc): - # Keep a copy of original input for debug - self._original = dict(model_desc) - - # Global counter used to validate occurrences of 'is_loss=True' whithin 'model_desc.outputs' - # A stateless validator is used for each tuple, but validation accross the whole list of tuple is needed - # because just one 'is_loss=True' is allowed withing 'model_desc.outputs' list of tuples - _model_desc_outputs_validation.loss_counter = 0 - - # Used for logging purposes - self._main_class_name = self.__class__.__name__ - - # Validates user input - self._validated = dict(self._original) - validator = cerberus.Validator(MODEL_DESC_SCHEMA) - self._validated = validator.validated(self._validated) - if self._validated is None: - raise ValueError(f"Invalid model_desc: {validator.errors}") - - # Normalize inputs to a list of namedtuple(name, shape) - self._InputDescription = namedtuple("InputDescription", ["name", "shape"]) - self._InputDescriptionTyped = namedtuple("InputDescriptionTyped", ["name", "shape", "dtype"]) - for idx, input in enumerate(self._validated["inputs"]): - self._validated["inputs"][idx] = self._InputDescription(*input) - - # Normalize outputs to a list of namedtuple(name, shape, is_loss) - self._OutputDescription = namedtuple("OutputDescription", ["name", "shape", "is_loss"]) - self._OutputDescriptionTyped = namedtuple( - "OutputDescriptionTyped", ["name", "shape", "is_loss", "dtype", "dtype_amp"] - ) - for idx, output in enumerate(self._validated["outputs"]): - if len(output) == 2: - self._validated["outputs"][idx] = self._OutputDescription(*output, False) - else: - self._validated["outputs"][idx] = self._OutputDescription(*output) - - # Hard-code learning rate, all_finite descriptors - self.learning_rate = self._InputDescriptionTyped(LEARNING_RATE_IO_DESCRIPTION_NAME, [1], torch.float32) - - # Convert dict in object - for k, v in self._validated.items(): - setattr(self, k, self._wrap(v)) - - def __repr__(self): - """Pretty representation for a model description class""" - - pretty_msg = "Model description:\n" - - # Inputs - inputs = [] - for i_desc in self.inputs: - if isinstance(i_desc, self._InputDescription): - inputs.append(f"(name={i_desc.name}, shape={i_desc.shape})") - elif isinstance(i_desc, self._InputDescriptionTyped): - inputs.append(f"(name={i_desc.name}, shape={i_desc.shape}, dtype={i_desc.dtype})") - else: - raise ValueError(f"Unexpected type {type(i_desc)} for input description") - - pretty_msg += "\nInputs:" - for idx, item in enumerate(inputs): - pretty_msg += f"\n\t{idx}: {item}" - - # Outputs - outputs = [] - for o_desc in self.outputs: - if isinstance(o_desc, self._OutputDescription): - outputs.append(f"(name={o_desc.name}, shape={o_desc.shape})") - elif isinstance(o_desc, self._OutputDescriptionTyped): - outputs.append( - f"(name={o_desc.name}, shape={o_desc.shape}, dtype={o_desc.dtype}, dtype_amp={o_desc.dtype_amp})" - ) - else: - raise ValueError(f"Unexpected type {type(o_desc)} for output description") - pretty_msg += "\nOutputs:" - for idx, item in enumerate(outputs): - pretty_msg += f"\n\t{idx}: {item}" - - # Learning rate - if self.learning_rate: - pretty_msg += "\nLearning rate: " - pretty_msg += ( - f"(name={self.learning_rate.name}, shape={self.learning_rate.shape}, dtype={self.learning_rate.dtype})" - ) - - # Mixed precision - if getattr(self, ALL_FINITE_IO_DESCRIPTION_NAME, None) or getattr( - self, LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME, None - ): - pretty_msg += "\nMixed Precision:" - if getattr(self, ALL_FINITE_IO_DESCRIPTION_NAME, None): - pretty_msg += "\n\tis gradients finite: " - pretty_msg += ( - f"(name={self.all_finite.name}, shape={self.all_finite.shape}, dtype={self.all_finite.dtype})" - ) - if getattr(self, LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME, None): - pretty_msg += "\n\tloss scale input name: " - pretty_msg += f"(name={self.loss_scale_input.name}, shape={self.loss_scale_input.shape}, dtype={self.loss_scale_input.dtype})" - - # Gradient Accumulation steps - if self.gradient_accumulation: - pretty_msg += "\nGradient Accumulation: " - pretty_msg += f"(name={self.gradient_accumulation.name}, shape={self.gradient_accumulation.shape}, dtype={self.gradient_accumulation.dtype})" - - return pretty_msg - - def add_type_to_input_description(self, index, dtype): - """Updates an existing input description at position 'index' with 'dtype' type information - - Args: - index (int): position within 'inputs' description - dtype (torch.dtype): input data type - """ - - assert isinstance(index, int) and index >= 0, "input 'index' must be a positive int" - assert isinstance(dtype, torch.dtype), "input 'dtype' must be a torch.dtype type" - existing_values = (*self.inputs[index],) - if isinstance(self.inputs[index], self._InputDescriptionTyped): - existing_values = (*existing_values[:-1],) - self.inputs[index] = self._InputDescriptionTyped(*existing_values, dtype) - - def add_type_to_output_description(self, index, dtype, dtype_amp=None): - """Updates an existing output description at position 'index' with 'dtype' type information - - Args: - index (int): position within 'inputs' description - dtype (torch.dtype): input data type - dtype_amp (torch.dtype, default is None): input data type for evaluation with mixed precision - """ - - assert isinstance(index, int) and index >= 0, "output 'index' must be a positive int" - assert isinstance(dtype, torch.dtype), "output 'dtype' must be a torch.dtype type" - assert dtype_amp is None or isinstance( - dtype_amp, torch.dtype - ), "output 'dtype_amp' must be either None or torch.dtype type" - existing_values = (*self.outputs[index],) - if isinstance(self.outputs[index], self._OutputDescriptionTyped): - existing_values = (*existing_values[:-2],) - self.outputs[index] = self._OutputDescriptionTyped(*existing_values, dtype, dtype_amp) - - @property - def gradient_accumulation(self): - return getattr(self, GRADIENT_ACCUMULATION_IO_DESCRIPTION_NAME, None) - - @gradient_accumulation.setter - def gradient_accumulation(self, name): - self._add_output_description( - self, name, [1], False, torch.bool, None, GRADIENT_ACCUMULATION_IO_DESCRIPTION_NAME, ignore_duplicate=True - ) - - @property - def all_finite(self): - return getattr(self, ALL_FINITE_IO_DESCRIPTION_NAME, None) - - @all_finite.setter - def all_finite(self, name): - self._add_output_description( - self, name, [1], False, torch.bool, None, ALL_FINITE_IO_DESCRIPTION_NAME, ignore_duplicate=True - ) - - @property - def loss_scale_input(self): - return getattr(self, LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME, None) - - @loss_scale_input.setter - def loss_scale_input(self, name): - self._add_input_description( - self, name, [], torch.float32, LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME, ignore_duplicate=True - ) - - def _add_input_description(self, node, name, shape, dtype=None, attr_name=None, ignore_duplicate=False): - """Add a new input description into the node object - - If 'dtype' is specified, a typed input description namedtuple(name, shape, dtype) is created. - Otherwise an untyped input description namedtuple(name, shape) is created instead. - - Args: - node (list or object): node to append input description to. When 'node' is 'self.inputs', - a new input description is appended to the list. - Otherwise, a new input description is created as an attribute into 'node' with name 'attr_name' - name (str): name of input description - shape (list): shape of input description - dtype (torch.dtype): input data type - attr_name (str, default is None): friendly name to allow direct access to the output description - ignore_duplicate (bool, default is False): silently skips addition of duplicate inputs - """ - - assert isinstance(name, str) and len(name) > 0, "'name' is an invalid input name" - not_found = True - if not ignore_duplicate: - if id(node) == id(self.inputs): - not_found = all([name not in i_desc.name for i_desc in node]) - assert not_found, f"'name' {name} already exists in the inputs description" - else: - not_found = attr_name not in dir(self) - assert not_found, f"'attr_name' {attr_name} already exists in the 'node'" - elif not not_found: - return - assert isinstance(shape, list) and all( - [(isinstance(dim, int) or (isinstance(dim, str) and len(dim) > 0)) for dim in shape] - ), "'shape' must be a list of int or str with length at least 1" - assert dtype is None or isinstance(dtype, torch.dtype), "'dtype' must be either None or a torch.dtype type" - if dtype: - new_input_desc = self._InputDescriptionTyped(name, shape, dtype) - else: - new_input_desc = self._InputDescription(name, shape) - - if id(node) == id(self.inputs): - self.inputs.append(new_input_desc) - else: - assert isinstance(attr_name, str) and len(attr_name) > 0, "Invalid 'attr_name'" - setattr(node, attr_name, new_input_desc) - - def _add_output_description( - self, node, name, shape, is_loss, dtype=None, dtype_amp=None, attr_name=None, ignore_duplicate=False - ): - """Add a new output description into the node object as a tuple - - When (name, shape, is_loss, dtype) is specified, a typed output description is created - Otherwise an untyped output description (name, shape, is_loss) is created instead - - Args: - node (list or object): node to append output description to. When 'node' is 'self.outputs', - a new output description is appended to the list. - Otherwise, a new output description is created as an attribute into 'node' with name 'attr_name' - name (str): name of output description - shape (list): shape of output description - is_loss (bool): specifies whether this output is a loss - dtype (torch.dtype): input data type - dtype_amp (torch.dtype, default is None): input data type for evaluation with mixed precision. - attr_name (str, default is None): friendly name to allow direct access to the output description - ignore_duplicate (bool, default is False): silently skips addition of duplicate outputs - """ - - assert isinstance(name, str) and len(name) > 0, "'name' is an invalid output name" - assert isinstance(shape, list) and all( - [(isinstance(dim, int) or (isinstance(dim, str) and len(dim) > 0)) for dim in shape] - ), "'shape' must be a list of int or str with length at least 1" - assert isinstance(is_loss, bool), "'is_loss' must be a bool" - - not_found = True - if not ignore_duplicate: - if id(node) == id(self.outputs): - not_found = all([name not in o_desc.name for o_desc in node]) - assert not_found, f"'name' {name} already exists in the outputs description" - assert ( - all([not o_desc.is_loss for o_desc in node]) if is_loss else True - ), "Only one 'is_loss' is supported at outputs description" - else: - not_found = attr_name not in dir(self) - assert not_found, f"'attr_name' {attr_name} already exists in the 'node'" - elif not not_found: - return - - assert dtype is None or isinstance(dtype, torch.dtype), "'dtype' must be either None or a torch.dtype type" - if dtype: - new_output_desc = self._OutputDescriptionTyped(name, shape, is_loss, dtype, None) - else: - new_output_desc = self._OutputDescription(name, shape, is_loss) - - if id(node) == id(self.outputs): - self.outputs.append(new_output_desc) - else: - assert isinstance(attr_name, str) and len(attr_name) > 0, "Invalid 'attr_name'" - setattr(node, attr_name, new_output_desc) - - def _wrap(self, v): - """Add 'v' as self's attribute to allow direct access as self.v""" - if isinstance(v, (list)): - return type(v)([self._wrap(v) for v in v]) - elif isinstance( - v, - ( - self._InputDescription, - self._InputDescriptionTyped, - self._OutputDescription, - self._OutputDescriptionTyped, - ), - ): - return v - elif isinstance(v, (tuple)): - return type(v)([self._wrap(v) for v in v]) - elif isinstance(v, (dict, int, float, bool, str)): - return _ORTTrainerModelDescInternal(self._main_class_name, v) if isinstance(v, dict) else v - else: - raise ValueError( - f"Unsupported type for model_desc ({v})." - "Only int, float, bool, str, list, tuple and dict are supported" - ) - - -class _ORTTrainerModelDescInternal(_ORTTrainerModelDesc): - r"""Internal class used by ONNX Runtime training backend for input validation - - NOTE: Users MUST NOT use this class in any way! - """ - - def __init__(self, main_class_name, model_desc): - # Used for logging purposes - self._main_class_name = main_class_name - - # Convert dict in object - for k, v in dict(model_desc).items(): - setattr(self, k, self._wrap(v)) - - -def _model_desc_inputs_validation(field, value, error): - r"""Cerberus custom check method for 'model_desc.inputs' - - 'model_desc.inputs' is a list of tuples. - The list has variable length, but each tuple has size 2 - - The first element of the tuple is a string which represents the input name - The second element is a list of shapes. Each shape must be either an int or string. - Empty list represents a scalar output - - Validation is done within each tuple to enforce the schema described above. - - Example: - - .. code-block:: python - - model_desc['inputs'] = [('input1', ['batch', 1024]), - ('input2', []) - ('input3', [512])] - """ - - if not isinstance(value, tuple) or len(value) != 2: - error(field, "must be a tuple with size 2") - if not isinstance(value[0], str): - error(field, "the first element of the tuple (aka name) must be a string") - if not isinstance(value[1], list): - error(field, "the second element of the tuple (aka shape) must be a list") - else: - for shape in value[1]: - if not isinstance(shape, str) and not isinstance(shape, int) or isinstance(shape, bool): - error(field, "each shape must be either a string or integer") - - -@static_vars(loss_counter=0) -def _model_desc_outputs_validation(field, value, error): - r"""Cerberus custom check method for 'model_desc.outputs' - - 'model_desc.outputs' is a list of tuples with variable length. - The first element of the tuple is a string which represents the output name - The second element is a list of shapes. Each shape must be either an int or string. - Empty list represents a scalar output - The third element is optional and is a flag that signals whether the output is a loss value - - Validation is done within each tuple to enforce the schema described above, but also - throughout the list of tuples to ensure a single 'is_loss=True' occurrence. - - Example: - - .. code-block:: python - - model_desc['outputs'] = [('output1', ['batch', 1024], is_loss=True), - ('output2', [], is_loss=False) - ('output3', [512])] - """ - - if not isinstance(value, tuple) or len(value) < 2 or len(value) > 3: - error(field, "must be a tuple with size 2 or 3") - if len(value) == 3 and not isinstance(value[2], bool): - error(field, "the third element of the tuple (aka is_loss) must be a boolean") - elif len(value) == 3: - if value[2]: - _model_desc_outputs_validation.loss_counter += 1 - if _model_desc_outputs_validation.loss_counter > 1: - error(field, "only one is_loss can bet set to True") - if not isinstance(value[0], str): - error(field, "the first element of the tuple (aka name) must be a string") - if not isinstance(value[1], list): - error(field, "the second element of the tuple (aka shape) must be a list") - else: - for shape in value[1]: - if not isinstance(shape, str) and not isinstance(shape, int) or isinstance(shape, bool): - error(field, "each shape must be either a string or integer") - - -# Validation schema for model description dictionary -MODEL_DESC_SCHEMA = { - "inputs": { - "type": "list", - "required": True, - "minlength": 1, - "schema": {"check_with": _model_desc_inputs_validation}, - }, - "outputs": { - "type": "list", - "required": True, - "minlength": 1, - "schema": {"check_with": _model_desc_outputs_validation}, - }, -} diff --git a/orttraining/orttraining/python/training/orttrainer.py b/orttraining/orttraining/python/training/orttrainer.py deleted file mode 100644 index d5a488c436a1d..0000000000000 --- a/orttraining/orttraining/python/training/orttrainer.py +++ /dev/null @@ -1,1537 +0,0 @@ -import copy -import io -import os -import warnings -from functools import partial -from inspect import signature - -import numpy as np -import onnx -import torch - -import onnxruntime as ort -from onnxruntime.tools.symbolic_shape_infer import SymbolicShapeInference - -from . import _checkpoint_storage, _utils, amp, checkpoint, optim, postprocess -from .model_desc_validation import _ORTTrainerModelDesc -from .orttrainer_options import ORTTrainerOptions - - -class TrainStepInfo: - r"""Private class used to store runtime information from current train step. - - After every train step, :py:meth:`ORTTrainer.train_step` updates the internal instance of - :py:class:`.TrainStepInfo` residing on :py:class:`.ORTTrainer` with relevant information - from the forward pass. - - This class shouldn't be accessed directly by the user, unless they really know what they are doing. - Instead, :py:class:`.ORTTrainer` passes it to relevant class methods automatically, - such as :py:method:`._LRScheduler.get_lr` or :py:class:`.LossScaler.update`. - - Args: - optimizer_config (optim._OptimizerConfig): reference to optimizer config - all_finite (bool, default is True): flag that indicates whether all gradients are still finite after last step - fetches (list of str, default is []): list of output names to fetch from train_step/eval_step. Set it to [] to reset normal behavior. - optimization_step (int): indicates the number of optimizations performed. Used for learning rate scheduling - step (int): indicates current training step. Used for gradient accumulation - - Example: - - .. code-block:: python - - info = TrainStepInfo(optimizer_config=optim.SGDConfig(lr=0.01)) - if info.all_finite: - print(f'Yay, all gradients are finite at {step} step!') - - """ - - def __init__(self, optimizer_config, all_finite=True, fetches=[], optimization_step=0, step=0): # noqa: B006 - assert isinstance(optimizer_config, optim._OptimizerConfig), "optimizer_config must be a optim._OptimizerConfig" - assert isinstance(all_finite, bool), "all_finite must be a bool" - assert isinstance(fetches, list) and all( - [isinstance(item, str) for item in fetches] - ), "fetches must be a list of str" - assert isinstance(optimization_step, int) and optimization_step >= 0, "optimization_step must be a positive int" - assert isinstance(step, int) and step >= 0, "step must be a positive int" - - self.optimizer_config = optimizer_config - self.all_finite = all_finite - self.fetches = fetches - self.optimization_step = optimization_step - self.step = step - - -class ORTTrainer: - r"""Pytorch frontend for ONNX Runtime training - - Entry point that exposes the C++ backend of ORT as a Pytorch frontend. - - Args: - model (torch.nn.Module or onnx.ModelProto): either a PyTorch or ONNX model. - When a PyTorch model and :py:attr:`loss_fn` are specified, :py:attr:`model` and :py:obj:`loss_fn` are combined. - When a ONNX model is provided, the loss is identified by the flag :py:obj:`is_loss=True` in one of the :py:attr:`.model_desc.outputs` entries. - model_desc (dict): model input and output description. - This is used to identify inputs and outputs and their shapes, so that ORT can generate back propagation graph, plan memory allocation for - training, and perform optimizations. - :py:attr:`model_desc` must be consistent with the training :py:attr:`model` and have the following (:py:obj:`dict`) schema - :py:obj:`{ 'inputs': [tuple(name, shape)], 'outputs': [tuple(name, shape, is_loss)]}`. - :py:attr:`name` is a string representing the name of input or output of the model. - For :py:obj:`model_desc['inputs']` entries, :py:attr:`name` must match input names of the original PyTorch model's :py:meth:`torch.nn.Module.forward` method. - For ONNX models, both name and order of input names must match. - For :py:obj:`model_desc['outputs']` entries, the order must match the original PyTorch's output as returned by :py:meth:`torch.nn.Module.forward` method. - For ONNX models, both name and order of output names must match. - :py:attr:`shape` is a list of string or integers that describes the shape of the input/output. - Each dimension size can be either a string or an int. String means the dimension size is dynamic, while integers mean static dimensions. - An empty list implies a scalar. - Lastly, :py:attr:`is_loss` is a boolean (default is False) that flags if this output is considered a loss. - ORT backend needs to know which output is loss in order to generate back propagation graph. - Loss output must be specified when either :py:attr:`loss_fn` is specified or when loss is embedded in the model. - Note that only one loss output is supported per model. - optimizer_config (optim._OptimizerConfig): optimizer config. - One of :py:class:`.optim.AdamConfig`, :py:class:`.optim.LambConfig` or :py:class:`.optim.SGDConfig`. - loss_fn (callable, default is None): a PyTorch loss function. - It takes two inputs [prediction, label] and outputs a scalar loss tensor. - If provided, :py:attr:`loss_fn` is combined with the PyTorch :py:attr:`model` to form a combined PyTorch model. - Inputs to the combined PyTorch model are concatenation of the :py:attr:`model`'s input and :py:attr:`loss_fn`'s label input. - Outputs of the combined PyTorch model are concatenation of :py:attr:`loss_fn`'s loss output and :py:attr:`model`'s outputs. - options (ORTTrainerOptions, default is None): options for additional features. - Example: - - .. code-block:: python - - model = ... - loss_fn = ... - model_desc = { - "inputs": [ - ("input_ids", ["batch", "max_seq_len_in_batch"]), - ("attention_mask", ["batch", "max_seq_len_in_batch"]), - ("token_type_ids", ["batch", "max_seq_len_in_batch"]), - ("masked_lm_labels", ["batch", "max_seq_len_in_batch"]), - ("next_sentence_label", ["batch", 1]) - ], - "outputs": [ - ("loss", [], True), - ], - } - optim_config = optim.LambConfig(param_groups = [ { 'params' : ['model_param0'], 'alpha' : 0.8, 'beta' : 0.7}, - { 'params' : ['model_param1' , 'model_param_2'], 'alpha' : 0.0} - ], - alpha=0.9, beta=0.999) - ort_trainer = ORTTrainer(model, model_desc, optim_config, loss_fn) - """ - - def __init__(self, model, model_desc, optim_config, loss_fn=None, options=None): - warnings.warn( - "ORTTrainer is deprecated and will be removed in ort release 1.14. Please use ORTModule instead.", - FutureWarning, - ) - - assert model is not None, "'model' is required and must be either a 'torch.nn.Module' or ONNX model" - assert isinstance(model_desc, dict), "'model_desc' must be a 'dict'" - assert isinstance( - optim_config, optim._OptimizerConfig - ), "'optim_config' is required and must be any of 'AdamConfig', 'LambConfig' or 'SGDConfig'" - assert loss_fn is None or ( - callable(loss_fn) and len(signature(loss_fn).parameters) == 2 - ), "'loss_fn' must be either 'None' or a callable with two parameters" - assert options is None or isinstance( - options, ORTTrainerOptions - ), "'options' must be either 'None' or 'ORTTrainerOptions'" - - # Model + Loss validation - # Supported combinarios are - # ---------------------------------------- - # | | Model | Loss | - # ---------------------------------------- - # | 1 | torch.nn.Module | None | - # | 2 | torch.nn.Module | torch.nn.Module | - # | 3 | ONNX | None | - # ---------------------------------------- - self._torch_model = None - self._onnx_model = None - if isinstance(model, torch.nn.Module): - assert loss_fn is None or isinstance( - model, torch.nn.Module - ), "'loss_fn' must be either 'None' or 'torch.nn.Module'" - self._torch_model = model - self.loss_fn = loss_fn - # TODO: Remove when experimental checkpoint functions are removed. - self._torch_state_dict_keys = list(model.state_dict().keys()) - elif isinstance(model, onnx.ModelProto): - assert loss_fn is None, "'loss_fn' must not be specified when 'model' is an ONNX model" - self._onnx_model = model - self.loss_fn = None - else: - raise ValueError("'model' must be either 'torch.nn.Module' or 'onnx.ModelProto'") - - self.model_desc = _ORTTrainerModelDesc(model_desc) - self.optim_config = optim_config - - # ORTTrainerOptions - if not options: - options = ORTTrainerOptions() - self.options = options - if self.options.mixed_precision.enabled and not self.options.mixed_precision.loss_scaler: - # TODO: Move this to model_desc_validation.py - self.options.mixed_precision.loss_scaler = amp.loss_scaler.DynamicLossScaler() - # Post processing ONNX model given as input - if self._onnx_model: - if self.options._internal_use.enable_internal_postprocess: - self._onnx_model = postprocess.run_postprocess(self._onnx_model) - if self.options._internal_use.extra_postprocess: - self._onnx_model = self.options._internal_use.extra_postprocess(self._onnx_model) - assert isinstance(self._onnx_model, onnx.ModelProto), "'extra_postprocess' must return a ONNX model" - - # When input model is already ONNX (and not exported from Pytorch within ORTTrainer), - # append 'dtype' from ONNX into model description's - for idx_i, i_desc in enumerate(self.model_desc.inputs): - dtype = None - for onnx_input in self._onnx_model.graph.input: - if onnx_input.name == i_desc.name: - dtype = _utils.dtype_onnx_to_torch(onnx_input.type.tensor_type.elem_type) - self.model_desc.add_type_to_input_description(idx_i, dtype) - break - assert dtype is not None, f"ONNX model with unknown input type ({i_desc.name})" - for idx_o, o_desc in enumerate(self.model_desc.outputs): - dtype = None - for onnx_output in self._onnx_model.graph.output: - if onnx_output.name == o_desc.name: - dtype = _utils.dtype_onnx_to_torch(onnx_output.type.tensor_type.elem_type) - self.model_desc.add_type_to_output_description(idx_o, dtype) - break - assert dtype is not None, f"ONNX model with unknown output type ({o_desc.name})" - - try: - from torch.utils.cpp_extension import ROCM_HOME - - self.is_rocm_pytorch = bool(torch.version.hip is not None and ROCM_HOME is not None) - except ImportError: - self.is_rocm_pytorch = False - - # TODO: Remove when experimental checkpoint functions are removed. - self._state_dict = {} - - self._train_step_info = TrainStepInfo(self.optim_config) - self._training_session = None - self._load_state_dict = None - self._init_session( - provider_options=self.options._validated_opts["provider_options"], - session_options=self.options.session_options, - ) - - def eval_step(self, *args, **kwargs): - r"""Evaluation step method - - Args: - *args: Arbitrary arguments that are used as model input (data only) - **kwargs: Arbitrary keyword arguments that are used as model input (data only) - - Returns: - ordered :py:obj:`list` with model outputs as described by :py:attr:`.ORTTrainer.model_desc` - """ - # Get data. CombineTorchModelLossFn takes label as last input and outputs loss first - sample_input = self._prepare_model_input(self.model_desc.inputs, None, None, *args, **kwargs) - - # Export model to ONNX - if self._onnx_model is None: - if self._torch_model is not None: - self._init_onnx_model(sample_input) - else: - raise RuntimeError("Model is uninitialized. Only ONNX and PyTorch models are supported") - - # Prepare input/output description - inputs_desc = self.model_desc.inputs - outputs_desc = self.model_desc.outputs - if self._train_step_info.fetches: - outputs_desc = [o_desc for o_desc in outputs_desc if o_desc.name in self._train_step_info.fetches] - if len(outputs_desc) != len(self._train_step_info.fetches): - raise RuntimeError("The specified fetches list contains invalid output names") - - # Normalize input - if not isinstance(sample_input, (list, tuple)): - sample_input = (sample_input,) - - # RunOptions - run_options = ort.RunOptions() - run_options.only_execute_path_to_fetches = True - run_options.training_mode = False - - # Run a eval step and return - session_run_results = self._training_session_run_helper( - False, sample_input, inputs_desc, outputs_desc, run_options - ) - - # Output must be returned in the same order as defined in the model description - results = [session_run_results[o_desc.name] for o_desc in outputs_desc] - return results[0] if len(results) == 1 else results - - def save_as_onnx(self, path): - r"""Persists ONNX model into :py:attr:`path` - - The model will be saved as a Google Protocol Buffers (aka protobuf) file as per ONNX standard. - The graph includes full information, including inference and training metadata. - - Args: - path (str): Full path, including filename, to save the ONNX model in the filesystem - - Raises: - RuntimeWarning: raised when neither `train_step` or `eval_step` was called at least once - ValueError: raised when `path` is not valid path - """ - if not self._training_session: - warnings.warn( - "Training session is not initialized yet. " - "'train_step' or 'eval_step' methods must be executed at least once before calling 'save_as_onnx()'." - ) - return - state_tensors = self._training_session.get_state() - self._update_onnx_model_initializers(state_tensors) - - assert isinstance(path, str), "'path' must be a valid path string" - dir_name = os.path.dirname(path) - file_name = os.path.basename(path) - if (dir_name and not os.path.exists(dir_name)) or not file_name: - warnings.warn("'path' is not valid or does not exist") - return - - with open(path, "wb") as f: - f.write(self._onnx_model.SerializeToString()) - - def _check_model_export(self, input): - from numpy.testing import assert_allclose - from onnx import TensorProto, helper, numpy_helper # noqa: F401 - - onnx_model_copy = copy.deepcopy(self._onnx_model) - - # Mute the dropout nodes - dropout_nodes = [n for n in onnx_model_copy.graph.node if n.op_type == "Dropout"] - for node in dropout_nodes: - ratio_node = next(n for n in onnx_model_copy.graph.node if node.input[1] in n.output) - training_mode_node = next(n for n in onnx_model_copy.graph.node if node.input[2] in n.output) - - training_mode_node.attribute.pop() - ratio_node.attribute.pop() - new_training_mode_arr = np.array(False, dtype=bool) - new_ratio_arr = np.array(0.0, dtype=np.float32) - new_training_mode = numpy_helper.from_array(new_training_mode_arr) - new_ratio = numpy_helper.from_array(new_ratio_arr) - training_mode_node.attribute.add().t.CopyFrom(new_training_mode) - ratio_node.attribute.add().t.CopyFrom(new_ratio) - training_mode_node.attribute[0].type = 4 - ratio_node.attribute[0].type = 4 - training_mode_node.attribute[0].name = "value" - ratio_node.attribute[0].name = "value" - - _inference_sess = ort.InferenceSession( - onnx_model_copy.SerializeToString(), providers=ort.get_available_providers() - ) - inf_inputs = {} - for i, input_elem in enumerate(input): - inf_inputs[_inference_sess.get_inputs()[i].name] = input_elem.cpu().numpy() - _inference_outs = _inference_sess.run(None, inf_inputs) - for torch_item, ort_item in zip(self.torch_sample_outputs, _inference_outs): - assert_allclose( - torch_item, - ort_item, - rtol=1e-2, - atol=1e-6, - err_msg="Mismatch between outputs of PyTorch model and exported ONNX model. " - "Note that different backends may exhibit small computational differences." - "If this is within acceptable margin, or if there is random generator " - "in the model causing inevitable mismatch, you can proceed training by " - "setting the flag debug.check_model_export to False.", - ) - - def train_step(self, *args, **kwargs): - r"""Train step method - - After forward pass, an ordered list with all outputs described at :py:attr:`ORTTrainer.model_desc` is returned. - Additional information relevant to the train step is maintend by :py:attr:`ORTTrainer._train_step_info`. - See :py:class:`.TrainStepInfo` for details. - - Args: - *args: Arbitrary arguments that are used as model input (data only) - **kwargs: Arbitrary keyword arguments that are used as model input (data only) - - Returns: - ordered :py:obj:`list` with model outputs as described by :py:attr:`ORTTrainer.model_desc` - """ - # Export model to ONNX - if self._onnx_model is None: - sample_input = self._prepare_model_input(self.model_desc.inputs, None, None, *args, **kwargs) - self._init_onnx_model(sample_input) - - # Debug Model Export if indicated - if self.options.debug.check_model_export: - self._check_model_export(sample_input) - - # Prepare inputs+lr and output descriptions - inputs_desc = self._model_desc_inputs_with_lr - outputs_desc = self.model_desc.outputs - - # Train step must be incremented *before* gradient accumulation code - # Gradients are accumulated when - # self._train_step_info.step % self.options.batch.gradient_accumulation_steps != 0, - # and they are updated otherwise - self._train_step_info.step += 1 - - # RunOptions - run_options = None - mixed_precision_without_fetches = False - if self._train_step_info.fetches: - outputs_desc = [o_desc for o_desc in outputs_desc if o_desc.name in self._train_step_info.fetches] - if len(outputs_desc) != len(self._train_step_info.fetches): - raise RuntimeError("The specified fetches list contains invalid output names") - elif self._train_step_info.step % self.options.batch.gradient_accumulation_steps != 0: - run_options = ort.RunOptions() - run_options.only_execute_path_to_fetches = True - outputs_desc = self._model_desc_outputs_with_gradient_accumulation - elif self.options.mixed_precision.enabled: - mixed_precision_without_fetches = True - outputs_desc = self._model_desc_outputs_with_all_finite - - # Update Learning Rate if Necessary - lr = self.optim_config.lr - if self.options.lr_scheduler: - lr = self.options.lr_scheduler._step(self._train_step_info)[0] - - # Loss Scale for mixed precision - loss_scale = None - if self.options.mixed_precision.enabled: - loss_scaler = self.options.mixed_precision.loss_scaler - assert loss_scaler, "Loss scaler is required when mixed precision is enabled" - loss_scale = loss_scaler.loss_scale - inputs_desc = self._model_desc_inputs_with_lr_and_loss_scale - - # Get data. CombineTorchModelLossFn takes label as last input and outputs loss first - input = self._prepare_model_input(inputs_desc, lr, loss_scale, *args, **kwargs) - - # Normalize input - if not isinstance(args, (list, tuple)): - args = (args,) - - # Run a train step and return - session_run_results = self._training_session_run_helper(True, input, inputs_desc, outputs_desc, run_options) - if mixed_precision_without_fetches: - # After session run with all_fp32_gradients_finite, we need to clear the training I/O binding's output - # Otherwise next run with only_execute_path_to_fetches will lead to gradient all reduce - # because all_fp32_gradients_finite is still in the feed. - self._train_io_binding.clear_binding_outputs() - - is_all_finite = session_run_results[self.model_desc.all_finite.name] - self._train_step_info.all_finite = is_all_finite - if loss_scaler: - loss_scaler.update(self._train_step_info) - if is_all_finite: - # Optimization step must be incremented *after* optimization is successful - self._train_step_info.optimization_step += 1 - elif self._train_step_info.step % self.options.batch.gradient_accumulation_steps == 0: - # Optimization step must be incremented *after* optimization is successful - self._train_step_info.optimization_step += 1 - - # Output must be returned in the same order as defined in the model description - # or in the order specified by TrainStepInfo.fetches, if applicable - if self._train_step_info.fetches: - results = [session_run_results[o_desc] for o_desc in self._train_step_info.fetches] - else: - results = [session_run_results[o_desc.name] for o_desc in self.model_desc.outputs] - return results[0] if len(results) == 1 else results - - def _convert_torch_model_loss_fn_to_onnx(self, inputs, device): - # Dynamic axes - dynamic_axes = {} - for input in self.model_desc.inputs: - symbolic_axis = {} - for i, axis in enumerate(input.shape): - if isinstance(axis, str): - symbolic_axis[i] = axis - if len(symbolic_axis): - dynamic_axes[input.name] = symbolic_axis - for output in self.model_desc.outputs: - symbolic_axis = {} - for i, axis in enumerate(output.shape): - if isinstance(axis, str): - symbolic_axis[i] = axis - if len(symbolic_axis): - dynamic_axes[output.name] = symbolic_axis - - if isinstance(inputs, torch.Tensor): - inputs = [inputs] - if isinstance(inputs, dict): - sample_inputs = [inputs[k.name_].to(device=device) for k in self.model_desc.inputs] - elif isinstance(inputs, (list, tuple)): - sample_inputs = [ - input.to(device=device) for i, input in enumerate(inputs) if i < len(self.model_desc.inputs) - ] - else: - raise RuntimeError( - "Unexpected input type. Only torch.Tensor, or dict/list/tuple of torch.Tensor is supported." - ) - - # PyTorch ONNX exporter does not match argument names - # This is an issue because the ONNX graph depends on all inputs to be specified - - # Validate loss_fn - if self.loss_fn: - sig_loss = signature(self.loss_fn) - if len(sig_loss.parameters) != 2: - raise RuntimeError("loss function should take two arguments - predict and label.") - - # Basic input names from model - input_names = [input.name for input in self.model_desc.inputs] - sig = signature(self._torch_model.forward) - ordered_input_list = list(sig.parameters.keys()) - - # Label from loss_fn goes after model input - if self.loss_fn: - ordered_input_list = [*ordered_input_list, list(sig_loss.parameters.keys())[1]] - - class CombineTorchModelLossFnWrapInput(torch.nn.Module): - def __init__(self, model, loss_fn, input_names): - super().__init__() - self.model = model - self.loss_fn = loss_fn - self.input_names = input_names - - def forward(self, *inputs): - sig = signature(self.model.forward) - - input_dict = {} - for key in sig.parameters: - if key in self.input_names: - input_dict[key] = inputs[self.input_names.index(key)] - - model_out = self.model(**input_dict) - if self.loss_fn is None: - return model_out - - label = inputs[-1] - preds = model_out - return self.loss_fn(preds, label), preds - - model = CombineTorchModelLossFnWrapInput(self._torch_model, self.loss_fn, input_names) - - # Do an inference to grab output types - model.eval() - with torch.no_grad(): - # Deepcopy inputs, since input values may change after model run. - sample_inputs_copy = copy.deepcopy(sample_inputs) - try: - # Deepcopy model, in case model is stateful and changes after model run. - model_copy = copy.deepcopy(model) - except Exception: - model_copy = model - warnings.warn( - "This model cannot be deep copied (or pickled), which is a required step for stateful models to be properly exported to ONNX." - " Compute will continue, but unexpected results may occur!" - ) - sample_outputs = model_copy(*sample_inputs_copy) - self.torch_sample_outputs = sample_outputs - model.train() - - if isinstance(sample_outputs, torch.Tensor): - sample_outputs = [sample_outputs] - - # Append 'dtype' for model description's inputs/outputs - for idx_i, sample_input in enumerate(sample_inputs): - if idx_i < len(self.model_desc.inputs): - self.model_desc.add_type_to_input_description(idx_i, sample_input.dtype) - for idx_o, sample_output in enumerate(sample_outputs): - if idx_o < len(self.model_desc.outputs): - self.model_desc.add_type_to_output_description(idx_o, sample_output.dtype) - - # Export the model to ONNX - f = io.BytesIO() - - # Deepcopy inputs, since input values may change after model run. - sample_inputs_copy = copy.deepcopy(sample_inputs) - - # Handle contrib OPs support - from onnxruntime.tools import pytorch_export_contrib_ops - - if self.options._internal_use.enable_onnx_contrib_ops: - pytorch_export_contrib_ops.register() - else: - # Unregister in case they were registered in previous calls. - pytorch_export_contrib_ops.unregister() - - # Export torch.nn.Module to ONNX - torch.onnx.export( - model, - tuple(sample_inputs_copy), - f, - input_names=[input.name for input in self.model_desc.inputs], - output_names=[output.name for output in self.model_desc.outputs], - opset_version=self.options._internal_use.onnx_opset_version, - dynamic_axes=dynamic_axes, - do_constant_folding=False, - training=torch.onnx.TrainingMode.TRAINING, - ) - onnx_model = onnx.load_model_from_string(f.getvalue()) - - # Remove 'model.' prefix introduced by CombineTorchModelLossFn class - if isinstance(model, CombineTorchModelLossFnWrapInput): - replace_name_dict = {} - for n in onnx_model.graph.initializer: - if n.name.startswith("model."): - replace_name_dict[n.name] = n.name[len("model.") :] - n.name = replace_name_dict[n.name] - for n in onnx_model.graph.node: - for i, name in enumerate(n.input): - if name in replace_name_dict: - n.input[i] = replace_name_dict[name] - - return onnx_model - - def _create_ort_training_session(self, optimizer_state_dict=None, session_options=None, provider_options=None): - if optimizer_state_dict is None: - optimizer_state_dict = {} - # Validating frozen_weights names - unused_frozen_weights = [ - n - for n in self.options.utils.frozen_weights - if n not in [i.name for i in self._onnx_model.graph.initializer] - ] - if unused_frozen_weights: - raise RuntimeError(f"{unused_frozen_weights} params from 'frozen_weights' not found in the ONNX model.") - - # Get loss name from model description - loss_name = [item.name for item in self.model_desc.outputs if item.is_loss] - assert len(loss_name) == 1, f"Only one loss output is supported ({len(loss_name)} were specified)" - loss_name = loss_name[0] - - # Parse optimizer parameters - optimizer_attributes_map = {} - optimizer_int_attributes_map = {} - trainable_params = set() - for initializer in self._onnx_model.graph.initializer: - if initializer.name in self.options.utils.frozen_weights: - continue # only trainable parameters are passed to the backend - trainable_params.add(initializer.name) - optimizer_attributes_map[initializer.name] = {} - optimizer_int_attributes_map[initializer.name] = {} - not_in_param_groups = True - for param_group in self.optim_config.params: - if initializer.name not in param_group["params"]: - continue # keep looking for a matching param_group - not_in_param_groups = False - for k, v in param_group.items(): - # 'params' is not a hyper parameter, skip it. 'lr' per weight is not supported - if k == "params" or k == "lr": - continue - if isinstance(v, float): - optimizer_attributes_map[initializer.name][k] = v - elif isinstance(v, int): - optimizer_int_attributes_map[initializer.name][k] = v - else: - raise ValueError("Optimizer attributes must be either float or int.") - - # set default values for params not found in groups - if not_in_param_groups: - for k, v in self.optim_config.defaults.items(): - if k == "lr": - continue - if isinstance(v, float): - optimizer_attributes_map[initializer.name][k] = v - elif isinstance(v, int): - optimizer_int_attributes_map[initializer.name][k] = v - else: - raise ValueError("Optimizer attributes must be either float or int.") - - self.options.distributed.horizontal_parallel_size = max(self.options.distributed.horizontal_parallel_size, 1) - self.options.distributed.data_parallel_size = ( - self.options.distributed.world_size // self.options.distributed.horizontal_parallel_size - ) - - # TrainingParameters - ort_parameters = ort.TrainingParameters() - ort_parameters.loss_output_name = loss_name - ort_parameters.use_mixed_precision = self.options.mixed_precision.enabled - ort_parameters.world_rank = self.options.distributed.world_rank - ort_parameters.world_size = self.options.distributed.world_size - ort_parameters.gradient_accumulation_steps = self.options.batch.gradient_accumulation_steps - ort_parameters.allreduce_post_accumulation = self.options.distributed.allreduce_post_accumulation - ort_parameters.enable_adasum = self.options.distributed.enable_adasum - ort_parameters.deepspeed_zero_stage = self.options.distributed.deepspeed_zero_optimization.stage - ort_parameters.enable_grad_norm_clip = self.options.utils.grad_norm_clip - ort_parameters.set_gradients_as_graph_outputs = False - ort_parameters.use_memory_efficient_gradient = self.options.utils.memory_efficient_gradient - ort_parameters.training_optimizer_name = self.optim_config.name - ort_parameters.lr_params_feed_name = self.model_desc.learning_rate.name - ort_parameters.weights_to_train = trainable_params - ort_parameters.optimizer_attributes_map = optimizer_attributes_map - ort_parameters.optimizer_int_attributes_map = optimizer_int_attributes_map - if bool(optimizer_state_dict): - ort_parameters.set_optimizer_initial_state(optimizer_state_dict) - - ort_parameters.attn_dropout_recompute = self.options.graph_transformer.attn_dropout_recompute - ort_parameters.gelu_recompute = self.options.graph_transformer.gelu_recompute - ort_parameters.transformer_layer_recompute = self.options.graph_transformer.transformer_layer_recompute - ort_parameters.number_recompute_layers = self.options.graph_transformer.number_recompute_layers - - ort_parameters.data_parallel_size = self.options.distributed.data_parallel_size - ort_parameters.horizontal_parallel_size = self.options.distributed.horizontal_parallel_size - ort_parameters.pipeline_parallel_size = self.options.distributed.pipeline_parallel.pipeline_parallel_size - ort_parameters.num_pipeline_micro_batches = ( - self.options.distributed.pipeline_parallel.num_pipeline_micro_batches - ) - ort_parameters.pipeline_cut_info_string = self.options.distributed.pipeline_parallel.pipeline_cut_info_string - # We have special handling for dictionary-typed option. - # sliced_schema._validated_opts is the original dictionary while sliced_schema is a _ORTTrainerOptionsInternal. - ort_parameters.sliced_schema = self.options.distributed.pipeline_parallel.sliced_schema._validated_opts - # We have special handling for dictionary-typed option. - # sliced_axes._validated_opts is the original dictionary while sliced_schema is a _ORTTrainerOptionsInternal. - ort_parameters.sliced_axes = self.options.distributed.pipeline_parallel.sliced_axes._validated_opts - ort_parameters.sliced_tensor_names = self.options.distributed.pipeline_parallel.sliced_tensor_names - - ort_parameters.model_after_graph_transforms_path = ( - self.options.debug.graph_save_paths.model_after_graph_transforms_path - ) - ort_parameters.model_with_gradient_graph_path = ( - self.options.debug.graph_save_paths.model_with_gradient_graph_path - ) - ort_parameters.model_with_training_graph_path = ( - self.options.debug.graph_save_paths.model_with_training_graph_path - ) - - # SessionOptions - session_options = ort.SessionOptions() if session_options is None else session_options - session_options.use_deterministic_compute = self.options.debug.deterministic_compute - if ( - self.options.graph_transformer.attn_dropout_recompute - or self.options.graph_transformer.gelu_recompute - or self.options.graph_transformer.transformer_layer_recompute - ): - session_options.execution_order = ort.ExecutionOrder.PRIORITY_BASED - if len(self.options.debug.graph_save_paths.model_with_training_graph_after_optimization_path) > 0: - session_options.optimized_model_filepath = ( - self.options.debug.graph_save_paths.model_with_training_graph_after_optimization_path - ) - - # old ort session may already exists and occupies GPU memory when creating new session, this may cause OOM error. - # for example, load_state_dict will be called before returing the function, and it calls _init_session again - del self._training_session - - # Set provider-specific options if needed - def get_providers(provider_options): - providers = ort.get_available_providers() - if provider_options: - for provider_name in provider_options: - if provider_name in providers: - providers[providers.index(provider_name)] = (provider_name, provider_options[provider_name]) - else: - providers.insert(0, (provider_name, provider_options[provider_name])) - # default: using cuda - elif "cuda" in self.options.device.id.lower(): - gpu_ep_options = {"device_id": _utils.get_device_index(self.options.device.id)} - gpu_ep_name = "ROCMExecutionProvider" if self.is_rocm_pytorch else "CUDAExecutionProvider" - if self.options.device.mem_limit > 0: - gpu_ep_options["gpu_mem_limit"] = self.options.device.mem_limit - - if gpu_ep_name not in providers: - raise RuntimeError( - "ORTTrainer options specify a CUDA device but the {} provider is unavailable.".format( - cuda_ep_name # noqa: F821 - ) - ) - - providers[providers.index(gpu_ep_name)] = (gpu_ep_name, gpu_ep_options) - - return providers - - # TrainingSession - self._training_session = ort.TrainingSession( - self._onnx_model.SerializeToString(), ort_parameters, session_options, get_providers(provider_options) - ) - - # I/O bindings - self._train_io_binding = self._training_session.io_binding() - self._eval_io_binding = self._training_session.io_binding() - - def _init_onnx_model(self, inputs): - if self._onnx_model is not None: - return - - if self._torch_model is not None: - # PyTorch model is moved to cpu to save GPU memory - self._torch_model.cpu() - - # PyTorch buffers (created using 'register_buffer') shouldn't be trained - torch_buffers = list(dict(self._torch_model.named_buffers()).keys()) - self.options.utils.frozen_weights.extend(torch_buffers) - - # Export to ONNX - self._onnx_model = self._convert_torch_model_loss_fn_to_onnx(inputs, "cpu") - - # Post processing for ONNX models expported from PyTorch - if self.options._internal_use.enable_internal_postprocess: - self._onnx_model = postprocess.run_postprocess(self._onnx_model) - if self.options._internal_use.extra_postprocess: - self._onnx_model = self.options._internal_use.extra_postprocess(self._onnx_model) - - optimizer_state_dict = {} - if self._load_state_dict: - optimizer_state_dict = self._load_state_dict() - - self._init_session( - optimizer_state_dict, - session_options=self.options.session_options, - provider_options=self.options._validated_opts["provider_options"], - ) - - def _init_session(self, optimizer_state_dict={}, session_options=None, provider_options=None): # noqa: B006 - if self._onnx_model is None: - return - - if self.options.utils.run_symbolic_shape_infer: - self._onnx_model = SymbolicShapeInference.infer_shapes( - self._onnx_model, auto_merge=True, guess_output_rank=True - ) - - # Create training session used by train_step - # pass all optimizer states to the backend - self._create_ort_training_session( - optimizer_state_dict, session_options=session_options, provider_options=provider_options - ) - - # Update model description to update dtype when mixed precision is enabled - # C++ backend modifies model's output dtype from float32 to float16 for mixed precision - # Note that for training we must use float32 and for evaluation we must use float16 - for idx, o_desc in enumerate(self.model_desc.outputs): - if ( - self.options.mixed_precision.enabled - and o_desc.dtype == torch.float32 - and not self._training_session.is_output_fp32_node(o_desc.name) - ): - self.model_desc.add_type_to_output_description(idx, o_desc.dtype, torch.float16) - - # Update model description - self._model_desc_inputs_with_lr = [*self.model_desc.inputs, self.model_desc.learning_rate] - - # Update Mixed Precision, if applicable - if self.options.mixed_precision.enabled: - self.model_desc.loss_scale_input = self._training_session.loss_scale_input_name - self._model_desc_inputs_with_lr_and_loss_scale = [ - *self._model_desc_inputs_with_lr, - self.model_desc.loss_scale_input, - ] - self.model_desc.all_finite = _utils.get_all_gradients_finite_name_from_session(self._training_session) - self._model_desc_outputs_with_all_finite = [*self.model_desc.outputs, self.model_desc.all_finite] - elif self.options.mixed_precision.loss_scaler: - raise ValueError("Loss Scaler cannot be specified when Mixed Precision is not enabled") - - # Update Loss Scaler Input Name, if applicable - if self.options.mixed_precision.enabled and self.options.mixed_precision.loss_scaler: - self.options.mixed_precision.loss_scaler.input_name = self.model_desc.loss_scale_input.name - elif not self.options.mixed_precision.enabled and self.options.mixed_precision.loss_scaler: - raise ValueError("Loss Scaler cannot be specified when Mixed Precision is not enabled") - - # Update Gradient Accumulation, if applicable - if self.options.batch.gradient_accumulation_steps > 1: - self.model_desc.gradient_accumulation = _utils.get_gradient_accumulation_name_from_session( - self._training_session - ) - self._model_desc_outputs_with_gradient_accumulation = [ - *self.model_desc.outputs, - self.model_desc.gradient_accumulation, - ] - - # TODO: Remove when experimental checkpoint functions are removed - if self._state_dict: - checkpoint.experimental_load_state_dict(self, self._state_dict, self._load_state_dict_strict) - self._state_dict_debug = self._state_dict - self._state_dict = {} - - def _prepare_model_input(self, inputs_desc, lr, loss_scale, *inputs, **kwargs): - # Normalize input to tuple of samples - if type(inputs) == tuple and len(inputs) == 1 and type(inputs[0]) == list: # noqa: E721 - input = tuple(inputs[0]) - else: - input = inputs - - # Append input from 'kwargs' - for input_desc in inputs_desc: - if input_desc.name in kwargs: - input = (*input, kwargs[input_desc.name]) - - # Append learning rate - extra_inputs = 0 - if lr is not None: - lr = torch.tensor([lr]) - input += (lr,) - extra_inputs += 1 - - # Append loss scale - if loss_scale is not None: - assert self.options.mixed_precision.enabled, "Loss scale cannot be used without mixed precision" - loss_scale = torch.tensor([loss_scale]) - input += (loss_scale,) - extra_inputs += 1 - - # Only assert length of input when fetches is not used - assert self._train_step_info.fetches or len(self.model_desc.inputs) + extra_inputs == len(input) - return input - - def _resolve_symbolic_dimensions(self, inputs, inputs_desc, outputs_desc): - outputs = copy.deepcopy(outputs_desc) - resolved_dims = {} - for input, i_desc in zip(inputs, inputs_desc): - for i_idx, i_axis in enumerate(i_desc.shape): - if isinstance(i_axis, str): - if i_axis not in resolved_dims: - resolved_dims[i_axis] = input.size()[i_idx] - else: - assert resolved_dims[i_axis] == input.size()[i_idx], f"Mismatch in dynamic shape {i_axis}" - - for o_desc in outputs: - for idx_o, o_axis in enumerate(o_desc.shape): - if isinstance(o_axis, str): - o_desc.shape[idx_o] = resolved_dims[o_axis] - - unknown_dim = [o_desc.name for dim in o_desc.shape for o_desc in outputs if isinstance(dim, str)] - if unknown_dim: - raise RuntimeError(f"Cannot execute model with unknown output dimensions ({unknown_dim}") - - return outputs - - def _training_session_run_helper(self, is_train, inputs, inputs_desc, outputs_desc, run_options=None): - # Select IO binding - if is_train: - iobinding = self._train_io_binding - else: - iobinding = self._eval_io_binding - - # Get the list of the actual session inputs because unused inputs can be removed. - input_nodes = self._training_session.get_inputs() - input_node_names = [input_node.name for input_node in input_nodes] - - # Bind input tensors - for input, input_desc in zip(inputs, inputs_desc): - if input_desc.name in input_node_names: - device_index = _utils.get_device_index_from_input(input) - iobinding.bind_input( - input_desc.name, - input.device.type, - device_index, - _utils.dtype_torch_to_numpy(input.dtype), - list(input.size()), - input.data_ptr(), - ) - - # Bind output tensors - outputs_desc_resolved = self._resolve_symbolic_dimensions(inputs, inputs_desc, outputs_desc) - result = {} - for output_desc in outputs_desc_resolved: - target_device = self.options.device.id - if self.options.mixed_precision.enabled and output_desc.name == self.model_desc.all_finite.name: - # Keep all finite flag on CPU to match backend implementation - # This prevents CPU -> GPU -> CPU copies between frontend and backend - target_device = "cpu" - # the self.options.device may be a device that pytorch does not recognize. - # in that case, we temporary prefer to leave the input/output on CPU and let ORT session - # to move the data between device and host. - # so output will be on the same device as input. - try: - torch.device(target_device) - except Exception: - # in this case, input/output must on CPU - assert input.device.type == "cpu" - target_device = "cpu" - - torch_tensor = torch.zeros( - output_desc.shape, - device=target_device, - dtype=output_desc.dtype_amp if output_desc.dtype_amp else output_desc.dtype, - ) - iobinding.bind_output( - output_desc.name, - torch_tensor.device.type, - _utils.get_device_index(target_device), - _utils.dtype_torch_to_numpy(torch_tensor.dtype), - list(torch_tensor.size()), - torch_tensor.data_ptr(), - ) - result[output_desc.name] = torch_tensor - - # Run a train/eval step - self._training_session.run_with_iobinding(iobinding, run_options) - return result - - def _update_onnx_model_initializers(self, state_tensors): - r"""Updates ONNX graph initializers with state_tensors's values - - Usually called to save or load an ONNX model. - - The tensors names of state_tensors are compared to all ONNX initializer tensors - and when the name matches, the ONNX graph is updated with the new value. - """ - assert isinstance(state_tensors, dict), "state_tensors must be a dict" - - new_weights = [] - replace_indices = [] - for i, w in enumerate(self._onnx_model.graph.initializer): - if w.name in state_tensors: - new_weights.append(onnx.numpy_helper.from_array(state_tensors[w.name], w.name)) - replace_indices.append(i) - replace_indices.sort(reverse=True) - for w_i in replace_indices: - del self._onnx_model.graph.initializer[w_i] - self._onnx_model.graph.initializer.extend(new_weights) - - def _extract_model_states(self, state_dict, pytorch_format): - """Extract model states from the training session and load into the state_dict""" - - model_states = self._training_session.get_model_state(include_mixed_precision_weights=False) - state_dict[_utils.state_dict_model_key()] = {} - - # extract trained model weights from the training session - for precision in model_states: - state_dict[_utils.state_dict_model_key()][precision] = {} - for model_state_key in model_states[precision]: - if pytorch_format: - state_dict[_utils.state_dict_model_key()][precision][model_state_key] = torch.from_numpy( - model_states[precision][model_state_key] - ) - else: - state_dict[_utils.state_dict_model_key()][precision][model_state_key] = model_states[precision][ - model_state_key - ] - - # extract untrained (frozen) model weights - for node in self._onnx_model.graph.initializer: - if ( - node.name not in state_dict[_utils.state_dict_model_key()][_utils.state_dict_full_precision_key()] - and node.name in self.options.utils.frozen_weights - ): - if pytorch_format: - state_dict[_utils.state_dict_model_key()][_utils.state_dict_full_precision_key()][ - node.name - ] = torch.from_numpy(onnx.numpy_helper.to_array(node)) - else: - state_dict[_utils.state_dict_model_key()][_utils.state_dict_full_precision_key()][ - node.name - ] = onnx.numpy_helper.to_array(node) - - def _extract_trainer_options(self, state_dict): - """Extract relevant trainer configuration and load it into the state_dict""" - - mixed_precision = _utils.state_dict_trainer_options_mixed_precision_key() - zero_stage = _utils.state_dict_trainer_options_zero_stage_key() - world_rank = _utils.state_dict_trainer_options_world_rank_key() - world_size = _utils.state_dict_trainer_options_world_size_key() - optimizer_name = _utils.state_dict_trainer_options_optimizer_name_key() - D_size = _utils.state_dict_trainer_options_data_parallel_size_key() # noqa: N806 - H_size = _utils.state_dict_trainer_options_horizontal_parallel_size_key() # noqa: N806 - - state_dict[_utils.state_dict_trainer_options_key()] = {} - state_dict[_utils.state_dict_trainer_options_key()][mixed_precision] = self.options.mixed_precision.enabled - state_dict[_utils.state_dict_trainer_options_key()][ - zero_stage - ] = self.options.distributed.deepspeed_zero_optimization.stage - state_dict[_utils.state_dict_trainer_options_key()][world_rank] = self.options.distributed.world_rank - state_dict[_utils.state_dict_trainer_options_key()][world_size] = self.options.distributed.world_size - state_dict[_utils.state_dict_trainer_options_key()][optimizer_name] = self.optim_config.name - state_dict[_utils.state_dict_trainer_options_key()][D_size] = self.options.distributed.data_parallel_size - state_dict[_utils.state_dict_trainer_options_key()][H_size] = self.options.distributed.horizontal_parallel_size - - def _extract_train_step_info(self, state_dict): - """Extract train step info settings and save it into the state_dict""" - - optimization_step = _utils.state_dict_train_step_info_optimization_step_key() - step = _utils.state_dict_train_step_info_step_key() - - state_dict[_utils.state_dict_train_step_info_key()] = {} - state_dict[_utils.state_dict_train_step_info_key()][optimization_step] = self._train_step_info.optimization_step - state_dict[_utils.state_dict_train_step_info_key()][step] = self._train_step_info.step - - def state_dict(self, pytorch_format=False): - """Returns a dictionary with model, train step info and optionally, optimizer states - - The returned dictionary contains the following information: - - Model and optimizer states - - Required ORTTrainerOptions settings - - Distributed training information, such as but not limited to ZeRO - - Train step info settings - - Structure of the returned dictionary: - - When `pytorch_format = False` - schema: - { - "model": - { - type: dict, - schema: - { - "full_precision": - { - type: dict, - schema: - { - model_weight_name: - { - type: array - } - } - } - } - }, - "optimizer": - { - type: dict, - schema: - { - model_weight_name: - { - type: dict, - schema: - { - "Moment_1": - { - type: array - }, - "Moment_2": - { - type: array - }, - "Update_Count": - { - type: array, - optional: True # present if optimizer is adam, absent otherwise - } - } - }, - "shared_optimizer_state": - { - type: dict, - optional: True, # present optimizer is shared, absent otherwise. - schema: - { - "step": - { - type: array, - } - } - } - } - }, - "trainer_options": - { - type: dict, - schema: - { - "mixed_precision": - { - type: bool - }, - "zero_stage": - { - type: int - }, - "world_rank": - { - type: int - }, - "world_size": - { - type: int - }, - "optimizer_name": - { - type: str - }, - "data_parallel_size": - { - type: int - }, - "horizontal_parallel_size": - { - type: int - } - } - }, - "partition_info": - { - type: dict, - optional: True, # present if states partitioned, else absent - schema: - { - model_weight_name: - { - type: dict, - schema: - { - "original_dim": - { - type: array - }, - "megatron_row_partition": - { - type: int - } - } - } - } - }, - "train_step_info": - { - type: dict, - schema: - { - "optimization_step": - { - type: int - }, - "step": - { - type: int - } - } - } - } - - When `pytorch_format = True` - schema: - { - model_weight_name: - { - type: tensor - } - } - - Args: - pytorch_format: boolean flag to select either ONNX Runtime or PyTorch state schema - - Returns: - A dictionary with `ORTTrainer` state - """ - if not self._training_session: - warnings.warn( - "ONNX Runtime training session is not initialized yet. " - "Please run train_step or eval_step at least once before calling ORTTrainer.state_dict().", - UserWarning, - ) - return self._load_state_dict.args[0] if self._load_state_dict else {} - - state_dict = {} - - # load training session model states into the state_dict - self._extract_model_states(state_dict, pytorch_format) - if pytorch_format: - if self.options.distributed.deepspeed_zero_optimization.stage > 0: - warnings.warn("Incomplete state_dict: ZeRO enabled", UserWarning) - if self.options.distributed.horizontal_parallel_size > 1: - warnings.warn("Incomplete state_dict: Megatron enabled", UserWarning) - # if pytorch_format is true, return a flat dictionary with only model states - # which is compatible with a PyTorch model - return state_dict[_utils.state_dict_model_key()][_utils.state_dict_full_precision_key()] - - # load training session optimizer states into the state_dict - state_dict[_utils.state_dict_optimizer_key()] = self._training_session.get_optimizer_state() - - # extract the relevant training configuration from the trainer and load them into the state_dict - self._extract_trainer_options(state_dict) - - # Extract train step info settings and load it into the state_dict - self._extract_train_step_info(state_dict) - - # add partition information in case of a distributed run - if ( - self.options.distributed.deepspeed_zero_optimization.stage > 0 - or self.options.distributed.horizontal_parallel_size > 1 - ): - state_dict[_utils.state_dict_partition_info_key()] = self._training_session.get_partition_info_map() - - return state_dict - - def _load_model_states(self, state_dict, strict): - """Load the model states onto the onnx model graph""" - - if _utils.state_dict_model_key() not in state_dict: - return - - # collect all initializer names from the current onnx graph - assert self._onnx_model, "ONNX model graph is not exported" - initializer_names = {node.name for node in self._onnx_model.graph.initializer} - - # loaded_initializers dict will be loaded with all the model states from the state dictionary - # that are found in the initializer_names dictionary - loaded_initializers = {} - - # copy over model states from the input state dict onto the onnx model - for precision, precision_states in state_dict[_utils.state_dict_model_key()].items(): - for state_key, state_value in precision_states.items(): - if state_key in initializer_names: - loaded_initializers[state_key] = state_value - elif strict: - raise RuntimeError(f"Unexpected key: {state_key} in state_dict[model][{precision}]") - - # update onnx model from loaded initializers - self._update_onnx_model_initializers(loaded_initializers) - - def _load_optimizer_states(self, current_state_dict, state_dict): - """Load the optimizer states onto the training session state dictionary""" - - def _check_optimizer_mismatch(state_dict): - """Assert that the loaded optimizer has the same config as the current training session config""" - - # the state_dict optimizer_name can be a byte string (if coming from checkpoint file) - # or can be a regular string (coming from user) - optimizer_name = state_dict[_utils.state_dict_trainer_options_key()][ - _utils.state_dict_trainer_options_optimizer_name_key() - ] - - # optimizer_name can be either a regular string or a byte string. - # if it is a byte string, convert to regular string using decode() - # if it is a regular string, do nothing to it - try: # noqa: SIM105 - optimizer_name = optimizer_name.decode() - except AttributeError: - pass - assert self.optim_config.name == optimizer_name, "Optimizer mismatch: expected {}, got {}".format( - self.optim_config.name, optimizer_name - ) - - if _utils.state_dict_optimizer_key() not in state_dict: - return - - # check optimizer config names are the same for current session and the sessino being loaded - _check_optimizer_mismatch(state_dict) - - # create an entry for the optimizer in the training session state dictionary - if _utils.state_dict_optimizer_key() not in current_state_dict: - current_state_dict[_utils.state_dict_optimizer_key()] = {} - - # copy over optimizer states from the input state dict onto the training session state dict - for model_state_key, optimizer_dict in state_dict[_utils.state_dict_optimizer_key()].items(): - if model_state_key not in current_state_dict[_utils.state_dict_optimizer_key()]: - current_state_dict[_utils.state_dict_optimizer_key()][model_state_key] = {} - for optimizer_state_key, optimizer_state_value in optimizer_dict.items(): - current_state_dict[_utils.state_dict_optimizer_key()][model_state_key][ - optimizer_state_key - ] = optimizer_state_value - - def _load_state_dict_impl(self, state_dict, strict=True): - """Load the state dictionary onto the onnx model and on the training session graph""" - - # clear the callable partial - self._load_state_dict = None - - def _mismatch_keys(keys1, keys2, in_error_str, allow_unexpected=False): - """Find out the missing and the unexpected keys in two dictionaries - - Throws a runtime error if missing or unexpected keys are found - - Keys in keys1 not in keys2 will be marked as missing - - Keys in keys2 not in keys1 will be marked as unexpected - """ - keys1 = set(keys1) - keys2 = set(keys2) - missing_keys = list(keys1 - keys2) - unexpected_keys = list(keys2 - keys1) - if len(missing_keys) > 0: - raise RuntimeError(f"Missing keys: {missing_keys} in {in_error_str}") - if len(unexpected_keys) > 0 and not allow_unexpected: - raise RuntimeError(f"Unexpected keys: {unexpected_keys} in {in_error_str}") - - def _check_model_key_mismatch(current_state_dict, state_dict, allow_unexpected=False): - """Check if there is any mismatch in the model sub state dictionary between the two state_dicts""" - - # check unxexpected and missing precision keys in the model state_dict compared to the training - # session model state_dict - _mismatch_keys( - current_state_dict[_utils.state_dict_model_key()], - state_dict[_utils.state_dict_model_key()], - "state_dict[model]", - allow_unexpected, - ) - - # check for model state key mismatch - for precision_key in current_state_dict[_utils.state_dict_model_key()]: - _mismatch_keys( - current_state_dict[_utils.state_dict_model_key()][precision_key], - state_dict[_utils.state_dict_model_key()][precision_key], - f"state_dict[model][{precision_key}]", - allow_unexpected, - ) - - def _check_optimizer_key_mismatch(current_state_dict, state_dict, allow_unexpected=False): - """Check if there is any mismatch in the optimizer sub state dictionary between the two state_dicts""" - - # check for model state key mismatch for the optimizer state_dict - _mismatch_keys( - current_state_dict[_utils.state_dict_optimizer_key()], - state_dict[_utils.state_dict_optimizer_key()], - "state_dict[optimizer]", - allow_unexpected, - ) - - # check for optimizer state keys mismatch - for model_state_key in current_state_dict[_utils.state_dict_optimizer_key()]: - _mismatch_keys( - current_state_dict[_utils.state_dict_optimizer_key()][model_state_key], - state_dict[_utils.state_dict_optimizer_key()][model_state_key], - f"state_dict[optimizer][{model_state_key}]", - allow_unexpected, - ) - - def _check_key_mismatch(current_state_dict, state_dict, allow_unexpected=False): - """Check if there is a mismatch in the keys (model and optimizer) in the two state_dicts""" - - # check presence of 'model' in the input state_dict - if _utils.state_dict_model_key() in state_dict: - _check_model_key_mismatch(current_state_dict, state_dict, allow_unexpected) - else: - warnings.warn("Missing key: model in state_dict", UserWarning) - # check presence of 'optimizer' in the input state_dict - if _utils.state_dict_optimizer_key() in state_dict: - _check_optimizer_key_mismatch(current_state_dict, state_dict, allow_unexpected) - else: - warnings.warn("Missing key: optimizer in state_dict", UserWarning) - - # extract state dict from the current training session. this is to persist the states between - # two training sessions. - # for example, if user provided only the model states, the optimizer states from the current - # training session must be persisted - current_state_dict = {} - if self._training_session: - current_state_dict = self.state_dict() - if strict: - # for Zero enabled, the current trainer might not have the complete state, and we must allow - # extra keys to be present in the state dict - allow_unexpected = self.options.distributed.deepspeed_zero_optimization.stage > 0 - _check_key_mismatch(current_state_dict, state_dict, allow_unexpected) - - # load the model states from the input state dictionary into the onnx graph - self._load_model_states(state_dict, strict) - - # load the optimizer states from the input state dictionary into the training session states - # dictionary - self._load_optimizer_states(current_state_dict, state_dict) - - return ( - current_state_dict[_utils.state_dict_optimizer_key()] - if _utils.state_dict_optimizer_key() in current_state_dict - else {} - ) - - def _load_train_step_info(self, state_dict): - """Load the train step info settings from state dict""" - - if _utils.state_dict_train_step_info_key() not in state_dict: - warnings.warn("Missing key: train_step_info in state_dict", UserWarning) - return - - optimization_step = _utils.state_dict_train_step_info_optimization_step_key() - step = _utils.state_dict_train_step_info_step_key() - - self._train_step_info.optimization_step = state_dict[_utils.state_dict_train_step_info_key()][optimization_step] - self._train_step_info.step = state_dict[_utils.state_dict_train_step_info_key()][step] - - def load_state_dict(self, state_dict, strict=True): - """Loads state_dict containing model/optimizer states into ORTTrainer - - The state_dict dictionary may contain the following information: - - Model and optimizer states - - Required ORTTrainerOptions settings - - Distributed training information, such as but not limited to ZeRO - - Args: - state_dict: state dictionary containing both model and optimizer states. The structure of this dictionary - should be the same as the one that is returned by ORTTrainer.state_dict for the case when pytorch_format=False - strict: boolean flag to strictly enforce that the input state_dict keys match the keys from ORTTrainer.state_dict - """ - - # if onnx graph has not been initialized, loading of states will be put on hold. - # a copy of the state_dict and other arguments to the function will be stored until the onnx graph has - # been initialized. Once the graph is initialized, the desired states will be loaded onto the grpah - if not self._training_session: - self._load_state_dict = partial(self._load_state_dict_impl, state_dict, strict=strict) - return - - # load the train step info settings - self._load_train_step_info(state_dict) - - # load states onto the frontend onnx graph - optimizer_state_dict = self._load_state_dict_impl(state_dict, strict=strict) - - # create a new training session after loading initializer states onto the onnx graph - # pass the populated states to the training session to populate the backend graph - self._init_session( - optimizer_state_dict, - session_options=self.options.session_options, - provider_options=self.options._validated_opts["provider_options"], - ) - - def save_checkpoint(self, path, user_dict={}, include_optimizer_states=True): # noqa: B006 - """Persists ORTTrainer state dictionary on disk along with user_dict. - - Saves the state_dict along with the user_dict to a file specified by path. - - Args: - path: string representation to a file path or a python file-like object. - if file already exists at path, an exception is raised. - user_dict: custom data to be saved along with the state_dict. This data will be returned - to the user when load_checkpoint is called. - include_optimizer_states: boolean flag indicating whether or not to persist the optimizer states. - on load_checkpoint, only model states will be loaded if include_optimizer_states==True - """ - - # extract state_dict to be saved in the checkpoint - state_dict = self.state_dict() - - # if user_dict is provided, serialize to bytes and convert to hex string. - # this helps in loading the types as they are given by the user since hdf5 - # converts to numpy types otherwise - if bool(user_dict): - state_dict[_utils.state_dict_user_dict_key()] = _checkpoint_storage.to_serialized_hex(user_dict) - - # if include_optimizer_states is False, only save the model states in the checkpoint file - if not include_optimizer_states: - if _utils.state_dict_optimizer_key() in state_dict: - del state_dict[_utils.state_dict_optimizer_key()] - - _checkpoint_storage.save(state_dict, path) - - def _aggregation_required(self, loaded_trainer_options): - """Checks if aggregation is required for the loading the state_dict into the ORTTrainer""" - - # To load states in the backend, aggregation is required for every ZeRO - # or Megatron checkpoint - return ( - loaded_trainer_options[_utils.state_dict_trainer_options_zero_stage_key()] > 0 - or loaded_trainer_options[_utils.state_dict_trainer_options_horizontal_parallel_size_key()] > 1 - ) - - def load_checkpoint(self, *paths, strict=True): - """Loads the saved checkpoint state dictionary into the ORTTrainer - - Reads the saved checkpoint files specified by paths from disk and loads the state dictionary - onto the ORTTrainer. - Aggregates the checkpoint files if aggregation is required. - - Args: - paths: one or more files represented as strings where the checkpoint is saved - strict: boolean flag to strictly enforce that the saved checkpoint state_dict - keys match the keys from ORTTrainer.state_dict - Returns: - dictionary that the user had saved when calling save_checkpoint - """ - state_dict = {} - - # check if aggregation is required - loaded_trainer_options = _checkpoint_storage.load(paths[0], key=_utils.state_dict_trainer_options_key()) - if self._aggregation_required(loaded_trainer_options): - # if aggregation is required, aggregation logic must be run on the saved checkpoints - state_dict = checkpoint.aggregate_checkpoints(paths, pytorch_format=False) - else: - # if aggregation is not required, there must only be a single file that needs to be loaded - assert len(paths) == 1, f"Expected number of files to load: 1, got {len(paths)}" - state_dict = _checkpoint_storage.load(paths[0]) - - # extract user dict from the saved checkpoint - user_dict = {} - if _utils.state_dict_user_dict_key() in state_dict: - user_dict = _checkpoint_storage.from_serialized_hex(state_dict[_utils.state_dict_user_dict_key()]) - del state_dict[_utils.state_dict_user_dict_key()] - - self.load_state_dict(state_dict, strict=strict) - - return user_dict diff --git a/orttraining/orttraining/python/training/orttrainer_options.py b/orttraining/orttraining/python/training/orttrainer_options.py deleted file mode 100644 index c63ac6f82c87f..0000000000000 --- a/orttraining/orttraining/python/training/orttrainer_options.py +++ /dev/null @@ -1,692 +0,0 @@ -import cerberus - -import onnxruntime as ort -from onnxruntime.capi._pybind_state import PropagateCastOpsStrategy - -from .amp import loss_scaler -from .optim import lr_scheduler - - -class ORTTrainerOptions: - r"""Settings used by ONNX Runtime training backend - - The parameters are hierarchically organized to facilitate configuration through semantic groups - that encompasses features, such as distributed training, etc. - - Input validation is performed on the input dict during instantiation to ensure - that supported parameters and values are passed in. Invalid input results - in :py:obj:`ValueError` exception with details on it. - - Args: - options (dict): contains all training options - _validate (bool, default is True): for internal use only - - Supported schema for kwargs: - - .. code-block:: python - - schema = { - 'batch' : { - 'type' : 'dict', - 'required': False, - 'default' : {}, - 'schema' : { - 'gradient_accumulation_steps' : { - 'type' : 'integer', - 'min' : 1, - 'default' : 1 - } - }, - }, - 'device' : { - 'type' : 'dict', - 'required': False, - 'default' : {}, - 'schema' : { - 'id' : { - 'type' : 'string', - 'default' : 'cuda' - }, - 'mem_limit' : { - 'type' : 'integer', - 'min' : 0, - 'default' : 0 - } - } - }, - 'distributed': { - 'type': 'dict', - 'default': {}, - 'required': False, - 'schema': { - 'world_rank': { - 'type': 'integer', - 'min': 0, - 'default': 0 - }, - 'world_size': { - 'type': 'integer', - 'min': 1, - 'default': 1 - }, - 'local_rank': { - 'type': 'integer', - 'min': 0, - 'default': 0 - }, - 'data_parallel_size': { - 'type': 'integer', - 'min': 1, - 'default': 1 - }, - 'horizontal_parallel_size': { - 'type': 'integer', - 'min': 1, - 'default': 1 - }, - 'pipeline_parallel' : { - 'type': 'dict', - 'default': {}, - 'required': False, - 'schema': { - 'pipeline_parallel_size': { - 'type': 'integer', - 'min': 1, - 'default': 1 - }, - 'num_pipeline_micro_batches': { - 'type': 'integer', - 'min': 1, - 'default': 1 - }, - 'pipeline_cut_info_string': { - 'type': 'string', - 'default': '' - }, - 'sliced_schema': { - 'type': 'dict', - 'default': {}, - 'keysrules': {'type': 'string'}, - 'valuesrules': { - 'type': 'list', - 'schema': {'type': 'integer'} - } - }, - 'sliced_axes': { - 'type': 'dict', - 'default': {}, - 'keysrules': {'type': 'string'}, - 'valuesrules': {'type': 'integer'} - }, - 'sliced_tensor_names': { - 'type': 'list', - 'schema': {'type': 'string'}, - 'default': [] - } - } - }, - 'allreduce_post_accumulation': { - 'type': 'boolean', - 'default': False - }, - 'deepspeed_zero_optimization': { - 'type': 'dict', - 'default': {}, - 'required': False, - 'schema': { - 'stage': { - 'type': 'integer', - 'min': 0, - 'max': 1, - 'default': 0 - }, - } - }, - 'enable_adasum': { - 'type': 'boolean', - 'default': False - } - } - }, - 'lr_scheduler' : { - 'type' : 'optim.lr_scheduler', - 'nullable' : True, - 'default' : None - }, - 'mixed_precision' : { - 'type' : 'dict', - 'required': False, - 'default' : {}, - 'schema' : { - 'enabled' : { - 'type' : 'boolean', - 'default' : False - }, - 'loss_scaler' : { - 'type' : 'amp.loss_scaler', - 'nullable' : True, - 'default' : None - } - } - }, - 'graph_transformer': { - 'type': 'dict', - 'required': False, - 'default': {}, - 'schema': { - 'attn_dropout_recompute': { - 'type': 'boolean', - 'default': False - }, - 'gelu_recompute': { - 'type': 'boolean', - 'default': False - }, - 'transformer_layer_recompute': { - 'type': 'boolean', - 'default': False - }, - 'number_recompute_layers': { - 'type': 'integer', - 'min': 0, - 'default': 0 - }, - 'propagate_cast_ops_config': { - 'type': 'dict', - 'required': False, - 'default': {}, - 'schema': { - 'propagate_cast_ops_strategy': { - 'type': 'onnxruntime.training.PropagateCastOpsStrategy', - 'default': PropagateCastOpsStrategy.FLOOD_FILL - }, - 'propagate_cast_ops_level': { - 'type': 'integer', - 'default': 1 - }, - 'propagate_cast_ops_allow': { - 'type': 'list', - 'schema': {'type': 'string'}, - 'default': [] - } - } - } - } - }, - 'utils' : { - 'type' : 'dict', - 'required': False, - 'default' : {}, - 'schema' : { - 'frozen_weights' : { - 'type' : 'list', - 'default' : [] - }, - 'grad_norm_clip' : { - 'type' : 'boolean', - 'default' : True - }, - 'memory_efficient_gradient' : { - 'type' : 'boolean', - 'default' : False - }, - 'run_symbolic_shape_infer' : { - 'type' : 'boolean', - 'default' : False - } - } - }, - 'debug' : { - 'type' : 'dict', - 'required': False, - 'default' : {}, - 'schema' : { - 'deterministic_compute' : { - 'type' : 'boolean', - 'default' : False - }, - 'check_model_export' : { - 'type' : 'boolean', - 'default' : False - }, - 'graph_save_paths' : { - 'type' : 'dict', - 'default': {}, - 'required': False, - 'schema': { - 'model_after_graph_transforms_path': { - 'type': 'string', - 'default': '' - }, - 'model_with_gradient_graph_path':{ - 'type': 'string', - 'default': '' - }, - 'model_with_training_graph_path': { - 'type': 'string', - 'default': '' - }, - 'model_with_training_graph_after_optimization_path': { - 'type': 'string', - 'default': '' - }, - } - }, - } - }, - '_internal_use' : { - 'type' : 'dict', - 'required': False, - 'default' : {}, - 'schema' : { - 'enable_internal_postprocess' : { - 'type' : 'boolean', - 'default' : True - }, - 'extra_postprocess' : { - 'type' : 'callable', - 'nullable' : True, - 'default' : None - }, - 'onnx_opset_version': { - 'type': 'integer', - 'min' : 12, - 'max' :14, - 'default': 14 - }, - 'enable_onnx_contrib_ops' : { - 'type' : 'boolean', - 'default' : True - } - } - }, - 'provider_options':{ - 'type': 'dict', - 'default': {}, - 'required': False, - 'schema': {} - }, - 'session_options': { - 'type': 'SessionOptions', - 'nullable': True, - 'default': None - }, - } - - Keyword arguments: - batch (dict): - batch related settings - batch.gradient_accumulation_steps (int, default is 1): - number of steps to accumulate before do collective gradient reduction - device (dict): - compute device related settings - device.id (string, default is 'cuda'): - device to run training - device.mem_limit (int): - maximum memory size (in bytes) used by device.id - distributed (dict): - distributed training options. - distributed.world_rank (int, default is 0): - rank ID used for data/horizontal parallelism - distributed.world_size (int, default is 1): - number of ranks participating in parallelism - distributed.data_parallel_size (int, default is 1): - number of ranks participating in data parallelism - distributed.horizontal_parallel_size (int, default is 1): - number of ranks participating in horizontal parallelism - distributed.pipeline_parallel (dict): - Options which are only useful to pipeline parallel. - distributed.pipeline_parallel.pipeline_parallel_size (int, default is 1): - number of ranks participating in pipeline parallelism - distributed.pipeline_parallel.num_pipeline_micro_batches (int, default is 1): - number of micro-batches. We divide input batch into micro-batches and run the graph. - distributed.pipeline_parallel.pipeline_cut_info_string (string, default is ''): - string of cutting ids for pipeline partition. - distributed.allreduce_post_accumulation (bool, default is False): - True enables overlap of AllReduce with computation, while False, - postpone AllReduce until all gradients are ready - distributed.deepspeed_zero_optimization: - DeepSpeed ZeRO options. - distributed.deepspeed_zero_optimization.stage (int, default is 0): - select which stage of DeepSpeed ZeRO to use. Stage 0 means disabled. - distributed.enable_adasum (bool, default is False): - enable `Adasum `_ - algorithm for AllReduce - lr_scheduler (optim._LRScheduler, default is None): - specifies learning rate scheduler - mixed_precision (dict): - mixed precision training options - mixed_precision.enabled (bool, default is False): - enable mixed precision (fp16) - mixed_precision.loss_scaler (amp.LossScaler, default is None): - specifies a loss scaler to be used for fp16. If not specified, - :py:class:`.DynamicLossScaler` is used with default values. - Users can also instantiate :py:class:`.DynamicLossScaler` and - override its parameters. Lastly, a completely new implementation - can be specified by extending :py:class:`.LossScaler` class from scratch - graph_transformer (dict): - graph transformer related configurations - graph_transformer.attn_dropout_recompute(bool, default False) - graph_transformer.gelu_recompute(bool, default False) - graph_transformer.transformer_layer_recompute(bool, default False) - graph_transformer.number_recompute_layers(bool, default False) - graph_transformer.propagate_cast_ops_config (dict): - graph_transformer.propagate_cast_ops_config.strategy(PropagateCastOpsStrategy, default FLOOD_FILL) - Specify the choice of the cast propagation optimization strategy, either, NONE, INSERT_AND_REDUCE or FLOOD_FILL. - NONE strategy does not perform any cast propagation transformation on the graph, although other optimizations - locally change cast operations, for example, in order to fuse Transpose and MatMul nodes, the TransposeMatMulFunsion optimization could - interchange Transpose and Cast if the Cast node exists between Transpose and MatMul. - INSERT_AND_REDUCE strategy inserts and reduces cast operations around the nodes with allowed opcodes. - FLOOD_FILL strategy expands float16 regions in the graph using the allowed opcodes, and unlike - INSERT_AND_REDUCE does not touch opcodes outside expanded float16 region. - graph_transformer.propagate_cast_ops_config.level(integer, default 1) - Optimize by moving Cast operations if propagate_cast_ops_level is non-negative. - Use predetermined list of opcodes considered safe to move before/after cast operation - if propagate_cast_ops_level is positive and use propagate_cast_ops_allow otherwise. - graph_transformer.propagate_cast_ops_config.allow(list of str, []) - List of opcodes to be considered safe to move before/after cast operation if propagate_cast_ops_level is zero. - attn_dropout_recompute (bool, default is False): - enable recomputing attention dropout to save memory - gelu_recompute (bool, default is False): - enable recomputing Gelu activation output to save memory - transformer_layer_recompute (bool, default is False): - enable recomputing transformer layerwise to save memory - number_recompute_layers (int, default is 0) - number of layers to apply transformer_layer_recompute, by default system will - apply recompute to all the layers, except for the last one - utils (dict): - miscellaneous options - utils.frozen_weights (list of str, []): - list of model parameter names to skip training (weights don't change) - utils.grad_norm_clip (bool, default is True): - enables gradient norm clipping for 'AdamOptimizer' and 'LambOptimizer' - utils.memory_efficient_gradient (bool, default is False): - enables use of memory aware gradient builder. - utils.run_symbolic_shape_infer (bool, default is False): - runs symbolic shape inference on the model - debug (dict): - debug options - debug.deterministic_compute (bool, default is False) - forces compute to be deterministic accross runs - debug.check_model_export (bool, default is False) - compares PyTorch model outputs with ONNX model outputs in inference before the first - train step to ensure successful model export - debug.graph_save_paths (dict): - paths used for dumping ONNX graphs for debugging purposes - debug.graph_save_paths.model_after_graph_transforms_path (str, default is "") - path to export the ONNX graph after training-related graph transforms have been applied. - No output when it is empty. - debug.graph_save_paths.model_with_gradient_graph_path (str, default is "") - path to export the ONNX graph with the gradient graph added. No output when it is empty. - debug.graph_save_paths.model_with_training_graph_path (str, default is "") - path to export the training ONNX graph with forward, gradient and optimizer nodes. - No output when it is empty. - debug.graph_save_paths.model_with_training_graph_after_optimization_path (str, default is "") - outputs the optimized training graph to the path if nonempty. - _internal_use (dict): - internal options, possibly undocumented, that might be removed without notice - _internal_use.enable_internal_postprocess (bool, default is True): - enable internal internal post processing of the ONNX model - _internal_use.extra_postprocess (callable, default is None) - a functor to postprocess the ONNX model and return a new ONNX model. - It does not override :py:attr:`._internal_use.enable_internal_postprocess`, but complement it - _internal_use.onnx_opset_version (int, default is 14): - ONNX opset version used during model exporting. - _internal_use.enable_onnx_contrib_ops (bool, default is True) - enable PyTorch to export nodes as contrib ops in ONNX. - This flag may be removed anytime in the future. - session_options (onnxruntime.SessionOptions): - The SessionOptions instance that TrainingSession will use. - provider_options (dict): - The provider_options for customized execution providers. it is dict map from EP name to - a key-value pairs, like {'EP1' : {'key1' : 'val1'}, ....} - - Example: - .. code-block:: python - - opts = ORTTrainerOptions({ - 'batch' : { - 'gradient_accumulation_steps' : 128 - }, - 'device' : { - 'id' : 'cuda:0', - 'mem_limit' : 2*1024*1024*1024, - }, - 'lr_scheduler' : optim.lr_scheduler.LinearWarmupLRScheduler(), - 'mixed_precision' : { - 'enabled': True, - 'loss_scaler': amp.LossScaler(loss_scale=float(1 << 16)) - } - }) - fp16_enabled = opts.mixed_precision.enabled - """ - - def __init__(self, options={}): # noqa: B006 - # Keep a copy of original input for debug - self._original_opts = dict(options) - - # Used for logging purposes - self._main_class_name = self.__class__.__name__ - - # Validates user input - self._validated_opts = dict(self._original_opts) - validator = ORTTrainerOptionsValidator(_ORTTRAINER_OPTIONS_SCHEMA) - self._validated_opts = validator.validated(self._validated_opts) - if self._validated_opts is None: - raise ValueError(f"Invalid options: {validator.errors}") - - # Convert dict in object - for k, v in self._validated_opts.items(): - setattr(self, k, self._wrap(v)) - - def __repr__(self): - return "{%s}" % str( - ", ".join( - f"'{k}': {v!r}" - for (k, v) in self.__dict__.items() - if k not in ["_original_opts", "_validated_opts", "_main_class_name"] - ) - ) - - def _wrap(self, v): - if isinstance(v, (tuple, list, set, frozenset)): - return type(v)([self._wrap(i) for i in v]) - else: - return _ORTTrainerOptionsInternal(self._main_class_name, v) if isinstance(v, dict) else v - - -class _ORTTrainerOptionsInternal(ORTTrainerOptions): - r"""Internal class used by ONNX Runtime training backend for input validation - - NOTE: Users MUST NOT use this class in any way! - """ - - def __init__(self, main_class_name, options): - # Used for logging purposes - self._main_class_name = main_class_name - # We don't call super().__init__(options) here but still called it "_validated_opts" - # instead of "_original_opts" because it has been validated in the top-level - # ORTTrainerOptions's constructor. - self._validated_opts = dict(options) - # Convert dict in object - for k, v in dict(options).items(): - setattr(self, k, self._wrap(v)) - - -class ORTTrainerOptionsValidator(cerberus.Validator): - _LR_SCHEDULER = cerberus.TypeDefinition("lr_scheduler", (lr_scheduler._LRScheduler,), ()) - _LOSS_SCALER = cerberus.TypeDefinition("loss_scaler", (loss_scaler.LossScaler,), ()) - - _SESSION_OPTIONS = cerberus.TypeDefinition("session_options", (ort.SessionOptions,), ()) - - _PROPAGATE_CAST_OPS_STRATEGY = cerberus.TypeDefinition( - "propagate_cast_ops_strategy", (PropagateCastOpsStrategy,), () - ) - - types_mapping = cerberus.Validator.types_mapping.copy() - types_mapping["lr_scheduler"] = _LR_SCHEDULER - types_mapping["loss_scaler"] = _LOSS_SCALER - types_mapping["session_options"] = _SESSION_OPTIONS - types_mapping["propagate_cast_ops_strategy"] = _PROPAGATE_CAST_OPS_STRATEGY - - -def _check_is_callable(field, value, error): - result = False - try: - # Python 3 - result = value is None or callable(value) - except Exception: - # Python 3 but < 3.2 - if hasattr(value, "__call__"): # noqa: B004 - result = True - if not result: - error(field, "Must be callable or None") - - -_ORTTRAINER_OPTIONS_SCHEMA = { - "batch": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": {"gradient_accumulation_steps": {"type": "integer", "min": 1, "default": 1}}, - }, - "device": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "id": {"type": "string", "default": "cuda"}, - "mem_limit": {"type": "integer", "min": 0, "default": 0}, - }, - }, - "distributed": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "world_rank": {"type": "integer", "min": 0, "default": 0}, - "world_size": {"type": "integer", "min": 1, "default": 1}, - "local_rank": {"type": "integer", "min": 0, "default": 0}, - "data_parallel_size": {"type": "integer", "min": 1, "default": 1}, - "horizontal_parallel_size": {"type": "integer", "min": 1, "default": 1}, - "pipeline_parallel": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "pipeline_parallel_size": {"type": "integer", "min": 1, "default": 1}, - "num_pipeline_micro_batches": {"type": "integer", "min": 1, "default": 1}, - "pipeline_cut_info_string": {"type": "string", "default": ""}, - "sliced_schema": { - "type": "dict", - "default_setter": lambda _: {}, - "keysrules": {"type": "string"}, - "valuesrules": {"type": "list", "schema": {"type": "integer"}}, - }, - "sliced_axes": { - "type": "dict", - "default_setter": lambda _: {}, - "keysrules": {"type": "string"}, - "valuesrules": {"type": "integer"}, - }, - "sliced_tensor_names": {"type": "list", "schema": {"type": "string"}, "default": []}, - }, - }, - "allreduce_post_accumulation": {"type": "boolean", "default": False}, - "deepspeed_zero_optimization": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "stage": {"type": "integer", "min": 0, "max": 1, "default": 0}, - }, - }, - "enable_adasum": {"type": "boolean", "default": False}, - }, - }, - "lr_scheduler": {"type": "lr_scheduler", "nullable": True, "default": None}, - "mixed_precision": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "enabled": {"type": "boolean", "default": False}, - "loss_scaler": {"type": "loss_scaler", "nullable": True, "default": None}, - }, - }, - "graph_transformer": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "attn_dropout_recompute": {"type": "boolean", "default": False}, - "gelu_recompute": {"type": "boolean", "default": False}, - "transformer_layer_recompute": {"type": "boolean", "default": False}, - "number_recompute_layers": {"type": "integer", "min": 0, "default": 0}, - "propagate_cast_ops_config": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "strategy": { - "type": "propagate_cast_ops_strategy", - "nullable": True, - "default": PropagateCastOpsStrategy.FLOOD_FILL, - }, - "level": {"type": "integer", "min": -1, "default": 1}, - "allow": {"type": "list", "schema": {"type": "string"}, "default": []}, - }, - }, - }, - }, - "utils": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "frozen_weights": {"type": "list", "default": []}, - "grad_norm_clip": {"type": "boolean", "default": True}, - "memory_efficient_gradient": {"type": "boolean", "default": False}, - "run_symbolic_shape_infer": {"type": "boolean", "default": False}, - }, - }, - "debug": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "deterministic_compute": {"type": "boolean", "default": False}, - "check_model_export": {"type": "boolean", "default": False}, - "graph_save_paths": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "model_after_graph_transforms_path": {"type": "string", "default": ""}, - "model_with_gradient_graph_path": {"type": "string", "default": ""}, - "model_with_training_graph_path": {"type": "string", "default": ""}, - "model_with_training_graph_after_optimization_path": {"type": "string", "default": ""}, - }, - }, - }, - }, - "_internal_use": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "schema": { - "enable_internal_postprocess": {"type": "boolean", "default": True}, - "extra_postprocess": {"check_with": _check_is_callable, "nullable": True, "default": None}, - "onnx_opset_version": {"type": "integer", "min": 12, "max": 14, "default": 14}, - "enable_onnx_contrib_ops": {"type": "boolean", "default": True}, - }, - }, - "provider_options": { - "type": "dict", - "default_setter": lambda _: {}, - "required": False, - "allow_unknown": True, - "schema": {}, - }, - "session_options": {"type": "session_options", "nullable": True, "default": None}, -} diff --git a/orttraining/orttraining/python/training/postprocess.py b/orttraining/orttraining/python/training/postprocess.py deleted file mode 100644 index 6c2adb6af7978..0000000000000 --- a/orttraining/orttraining/python/training/postprocess.py +++ /dev/null @@ -1,478 +0,0 @@ -import os.path # noqa: F401 -import struct -import sys # noqa: F401 - -import numpy as np # noqa: F401 -import onnx -from onnx import * # noqa: F403 -from onnx import helper, numpy_helper # noqa: F401 - - -def run_postprocess(model): - # this post pass is not required for pytorch >= 1.5 - # where add_node_name in torch.onnx.export is default to True - model = add_name(model) - - # this post pass is not required for pytorch > 1.6 - model = fuse_softmaxNLL_to_softmaxCE(model) - - model = fix_expand_shape(model) - model = fix_expand_shape_pt_1_5(model) - return model - - -def find_input_node(model, arg): - result = [] - for node in model.graph.node: - for output in node.output: - if output == arg: - result.append(node) - return result[0] if len(result) == 1 else None - - -def find_output_node(model, arg): - result = [] - for node in model.graph.node: - for input in node.input: - if input == arg: - result.append(node) - return result[0] if len(result) == 1 else result - - -def add_name(model): - i = 0 - for node in model.graph.node: - node.name = "%s_%d" % (node.op_type, i) - i += 1 - return model - - -# Expand Shape PostProcess - - -def fix_expand_shape(model): - expand_nodes = [n for n in model.graph.node if n.op_type == "Expand"] - model_inputs_names = [i.name for i in model.graph.input] - - for expand_node in expand_nodes: - shape = find_input_node(model, expand_node.input[1]) - if shape.op_type == "Shape": - # an expand subgraph - # Input Input2 - # | | - # | Shape - # | | - # |__ __| - # | | - # Expand - # | - # output - # - # Only if Input2 is one of the model inputs, assign Input2's shape to output of expand. - shape_input_name = shape.input[0] - if shape_input_name in model_inputs_names: - index = model_inputs_names.index(shape_input_name) - expand_out = model.graph.value_info.add() - expand_out.name = expand_node.output[0] - expand_out.type.CopyFrom(model.graph.input[index].type) - return model - - -def fix_expand_shape_pt_1_5(model): - # expand subgraph - # Constant - # + - # ConstantOfShape - # | + | - # | + | - # (Reshape subgraph) Mul | - # |___ _________| | - # + | | | - # + Equal | - # +++++|++++++++++++++|++ - # |____________ | + - # | | + - # (subgraph) Where - # | | - # |_____ ___________| - # | | - # Expand - # | - # output - # - # where the Reshape subgraph is - # - # Input - # | | - # | |___________________ - # | | - # Shape Constant Shape Constant - # | ______| | ______| - # | | | | - # Gather Gather - # | | - # Unsqueeze Unsqueeze - # | | - # | ..Number of dims.. | - # | _________________| - # |...| - # Concat Constant - # | | - # |______ __________________| - # | | - # Reshape - # | - # output - # - # This pass will copy Input's shape to the output of Expand. - expand_nodes = [n for n in model.graph.node if n.op_type == "Expand"] - model_inputs_names = [i.name for i in model.graph.input] - - for expand_node in expand_nodes: - n_where = find_input_node(model, expand_node.input[1]) - if n_where.op_type != "Where": - continue - - n_equal = find_input_node(model, n_where.input[0]) - n_cos = find_input_node(model, n_where.input[1]) - n_reshape = find_input_node(model, n_where.input[2]) - - if n_equal.op_type != "Equal" or n_cos.op_type != "ConstantOfShape" or n_reshape.op_type != "Reshape": - continue - - n_reshape_e = find_input_node(model, n_equal.input[0]) - n_mul = find_input_node(model, n_equal.input[1]) - if n_reshape_e != n_reshape or n_mul.op_type != "Mul": - continue - - n_cos_m = find_input_node(model, n_mul.input[0]) - n_constant = find_input_node(model, n_mul.input[1]) - if n_cos_m != n_cos or n_constant.op_type != "Constant": - continue - - n_concat = find_input_node(model, n_reshape.input[0]) - n_constant_r = find_input_node(model, n_reshape.input[1]) - if n_concat.op_type != "Concat" or n_constant_r.op_type != "Constant": - continue - - n_input_candidates = [] - for concat_in in n_concat.input: - n_unsqueeze = find_input_node(model, concat_in) - if n_unsqueeze.op_type != "Unsqueeze": - break - n_gather = find_input_node(model, n_unsqueeze.input[0]) - if n_gather.op_type != "Gather": - break - n_shape = find_input_node(model, n_gather.input[0]) - n_constant_g = find_input_node(model, n_gather.input[1]) - if n_shape.op_type != "Shape" or n_constant_g.op_type != "Constant": - break - n_input = n_shape.input[0] - if n_input not in model_inputs_names: - break - n_input_candidates.append(n_input) - - if not n_input_candidates or not all(elem == n_input_candidates[0] for elem in n_input_candidates): - continue - - index = model_inputs_names.index(n_input_candidates[0]) - expand_out = model.graph.value_info.add() - expand_out.name = expand_node.output[0] - expand_out.type.CopyFrom(model.graph.input[index].type) - return model - - -# LayerNorm PostProcess - - -def find_nodes(graph, op_type): - nodes = [] - for node in graph.node: - if node.op_type == op_type: - nodes.append(node) - return nodes - - -def is_type(node, op_type): - if node is None or isinstance(node, list): - return False - return node.op_type == op_type - - -def add_const(model, name, output, t_value=None, f_value=None): - const_node = model.graph.node.add() - const_node.op_type = "Constant" - const_node.name = name - const_node.output.extend([output]) - attr = const_node.attribute.add() - attr.name = "value" - if t_value is not None: - attr.type = 4 - attr.t.CopyFrom(t_value) - else: - attr.type = 1 - attr.f = f_value - return const_node - - -def layer_norm_transform(model): - # DEPRECATED: This pass is no longer needed as the transform is handled at the backend. - # Converting below subgraph - # - # input - # | - # ReduceMean - # | - # Sub Constant - # _||_____ | - # | | | - # | | | - # | (optional) Cast (optional) Cast - # | | | - # | | ____________________| - # | | | - # | Pow - # | | - # | ReduceMean - # | | - # | Add - # | | - # |__ __Sqrt - # | | - # Div (weight) - # | | - # | _____| - # | | - # Mul (bias) - # | | - # | _____| - # | | - # Add - # | - # output - # - # to the below subgraph - # - # input (weight) (bias) - # | | | - # | _______| | - # | | ________________| - # | | | - # LayerNormalization - # | - # output - graph = model.graph - - nodes_ReduceMean = find_nodes(graph, "ReduceMean") # noqa: N806 - - id = 0 - layer_norm_nodes = [] - remove_nodes = [] - for reduce_mean in nodes_ReduceMean: - # check that reduce_mean output is Sub - sub = find_output_node(model, reduce_mean.output[0]) - if not is_type(sub, "Sub"): - continue - - # check that sub output[0] is Div and output[1] is Pow - pow, div = find_output_node(model, sub.output[0]) - if is_type(pow, "Cast"): - # During an update in PyTorch, Cast nodes are inserted between Sub and Pow. - remove_nodes += [pow] - pow = find_output_node(model, pow.output[0]) - if not is_type(pow, "Pow"): - continue - cast_pow = find_input_node(model, pow.input[1]) - if not is_type(cast_pow, "Cast"): - continue - remove_nodes += [cast_pow] - if not is_type(div, "Div") or not is_type(pow, "Pow"): - continue - - # check that pow ouput is ReduceMean - reduce_mean2 = find_output_node(model, pow.output[0]) - if not is_type(reduce_mean2, "ReduceMean"): - continue - - # check that reduce_mean2 output is Add - add = find_output_node(model, reduce_mean2.output[0]) - if not is_type(add, "Add"): - continue - - # check that add output is Sqrt - sqrt = find_output_node(model, add.output[0]) - if not is_type(sqrt, "Sqrt"): - continue - - # check that sqrt output is div - if div != find_output_node(model, sqrt.output[0]): - continue - - # check if div output is Mul - optional_mul = find_output_node(model, div.output[0]) - if not is_type(optional_mul, "Mul"): - optional_mul = None - continue # default bias and weight not supported - - # check if mul output is Add - if optional_mul is not None: - optional_add = find_output_node(model, optional_mul.output[0]) - else: - optional_add = find_output_node(model, div.output[0]) - if not is_type(optional_add, "Add"): - optional_add = None - continue # default bias and weight not supported - - # add nodes to remove_nodes - remove_nodes.extend([reduce_mean, sub, div, pow, reduce_mean2, add, sqrt]) - - # create LayerNorm node - layer_norm_input = [] - layer_norm_output = [] - - layer_norm_input.append(reduce_mean.input[0]) - - if optional_mul is not None: - remove_nodes.append(optional_mul) - weight = optional_mul.input[1] - layer_norm_input.append(weight) - - if optional_add is not None: - remove_nodes.append(optional_add) - bias = optional_add.input[1] - layer_norm_input.append(bias) - - if optional_add is not None: - layer_norm_output.append(optional_add.output[0]) - elif optional_mul is not None: - layer_norm_output.append(optional_mul.output[0]) - else: - layer_norm_output.append(div.output[0]) - - layer_norm_output.append("saved_mean_" + str(id)) - layer_norm_output.append("saved_inv_std_var_" + str(id)) - - epsilon_node = find_input_node(model, add.input[1]) - epsilon = epsilon_node.attribute[0].t.raw_data - epsilon = struct.unpack("f", epsilon)[0] - - layer_norm = helper.make_node( - "LayerNormalization", - layer_norm_input, - layer_norm_output, - "LayerNormalization_" + str(id), - None, - axis=reduce_mean.attribute[0].ints[0], - epsilon=epsilon, - ) - layer_norm_nodes.append(layer_norm) - id += 1 - - # remove orphan constant nodes - for constant in graph.node: - if constant.op_type == "Constant" and constant not in remove_nodes: - is_orphan = True - for out_name in constant.output: - out = find_output_node(model, out_name) - if out not in remove_nodes: - is_orphan = False - if is_orphan: - remove_nodes.append(constant) - - all_nodes = [] - for node in graph.node: - if node not in remove_nodes: - all_nodes.append(node) - - for node in layer_norm_nodes: - all_nodes.append(node) # noqa: PERF402 - - graph.ClearField("node") - graph.node.extend(all_nodes) - return model - - -# Fuse SoftmaxCrossEntropy - - -def fuse_softmaxNLL_to_softmaxCE(onnx_model): # noqa: N802 - # Converting below subgraph - # - # (subgraph) - # | - # LogSoftmax (target) (optional weight) - # | | | - # nll_loss/NegativeLogLikelihoodLoss - # | - # output - # - # to the following - # - # (subgraph) (target) (optional weight) - # | | _____| - # | | | - # SparseSoftmaxCrossEntropy - # | - # output - nll_count = 0 - while True: - nll_count = nll_count + 1 - nll_loss_node = None - nll_loss_node_index = 0 - for nll_loss_node_index, node in enumerate(onnx_model.graph.node): # noqa: B007 - if node.op_type == "nll_loss" or node.op_type == "NegativeLogLikelihoodLoss": - nll_loss_node = node - break - - if nll_loss_node is None: - break - - softmax_node = None - softmax_node_index = 0 - label_input_name = None - weight_input_name = None - for softmax_node_index, node in enumerate(onnx_model.graph.node): # noqa: B007 - if node.op_type == "LogSoftmax": - # has to be connected to nll_loss - if len(nll_loss_node.input) > 2: - weight_input_name = nll_loss_node.input[2] - if node.output[0] == nll_loss_node.input[0]: - softmax_node = node - label_input_name = nll_loss_node.input[1] - break - elif node.output[0] == nll_loss_node.input[1]: - softmax_node = node - label_input_name = nll_loss_node.input[0] - break - else: - if softmax_node is not None: - break - - if softmax_node is None: - break - - # delete nll_loss and LogSoftmax nodes in order - if nll_loss_node_index < softmax_node_index: - del onnx_model.graph.node[softmax_node_index] - del onnx_model.graph.node[nll_loss_node_index] - else: - del onnx_model.graph.node[nll_loss_node_index] - del onnx_model.graph.node[softmax_node_index] - - probability_output_name = softmax_node.output[0] - node = onnx_model.graph.node.add() - inputs = ( - [softmax_node.input[0], label_input_name, weight_input_name] - if weight_input_name - else [softmax_node.input[0], label_input_name] - ) - node.CopyFrom( - onnx.helper.make_node( - "SparseSoftmaxCrossEntropy", - inputs, - [nll_loss_node.output[0], probability_output_name], - "nll_loss_node_" + str(nll_count), - ) - ) - - return onnx_model diff --git a/orttraining/orttraining/test/external_transformer/test/external_transformers_test.py b/orttraining/orttraining/test/external_transformer/test/external_transformers_test.py deleted file mode 100644 index f57f55d14eb1b..0000000000000 --- a/orttraining/orttraining/test/external_transformer/test/external_transformers_test.py +++ /dev/null @@ -1,144 +0,0 @@ -import sys -import threading -import time - - -class OutputGrabber: - """ - Class used to grab standard output or another stream. - """ - - escape_char = "\b" - - def __init__(self, stream=None, threaded=False): - self.origstream = stream - self.threaded = threaded - if self.origstream is None: - self.origstream = sys.stdout - self.origstreamfd = self.origstream.fileno() - self.capturedtext = "" - # Create a pipe so the stream can be captured: - self.pipe_out, self.pipe_in = os.pipe() - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, traceback): - self.stop() - - def start(self): - """ - Start capturing the stream data. - """ - self.capturedtext = "" - # Save a copy of the stream: - self.streamfd = os.dup(self.origstreamfd) - # Replace the original stream with our write pipe: - os.dup2(self.pipe_in, self.origstreamfd) - if self.threaded: - # Start thread that will read the stream: - self.workerThread = threading.Thread(target=self.readOutput) - self.workerThread.start() - # Make sure that the thread is running and os.read() has executed: - time.sleep(0.01) - - def stop(self): - """ - Stop capturing the stream data and save the text in `capturedtext`. - """ - # Print the escape character to make the readOutput method stop: - self.origstream.write(self.escape_char) - # Flush the stream to make sure all our data goes in before - # the escape character: - self.origstream.flush() - if self.threaded: - # wait until the thread finishes so we are sure that - # we have until the last character: - self.workerThread.join() - else: - self.readOutput() - # Close the pipe: - os.close(self.pipe_in) - os.close(self.pipe_out) - # Restore the original stream: - os.dup2(self.streamfd, self.origstreamfd) - # Close the duplicate stream: - os.close(self.streamfd) - - def readOutput(self): - """ - Read the stream data (one byte at a time) - and save the text in `capturedtext`. - """ - while True: - char = os.read(self.pipe_out, 1).decode(self.origstream.encoding) - if not char or self.escape_char in char: - break - self.capturedtext += char - - -import os # noqa: E402 -import unittest # noqa: E402 - -import numpy as np # noqa: E402, F401 -import torch # noqa: E402 -import torch.nn as nn # noqa: E402 -import torch.nn.functional as F # noqa: E402 - -from onnxruntime.capi import _pybind_state as torch_ort_eager # noqa: E402, F401 -from onnxruntime.training import optim, orttrainer, orttrainer_options # noqa: E402, F401 - - -def my_loss(x, target): - return F.nll_loss(F.log_softmax(x, dim=1), target) - - -class NeuralNet(nn.Module): - def __init__(self, input_size, hidden_size, num_classes): - super().__init__() - self.fc1 = nn.Linear(input_size, hidden_size) - self.relu = nn.ReLU() - self.fc2 = nn.Linear(hidden_size, num_classes) - - def forward(self, x, target): - out = self.fc1(x) - out = self.relu(out) - out = self.fc2(out) - return my_loss(out, target) - - -class OrtEPTests(unittest.TestCase): - def test_external_graph_transformer_triggering(self): - input_size = 784 - hidden_size = 500 - num_classes = 10 - batch_size = 128 - model = NeuralNet(input_size, hidden_size, num_classes) - - model_desc = { - "inputs": [ - ("x", [batch_size, input_size]), - ( - "target", - [ - batch_size, - ], - ), - ], - "outputs": [("loss", [], True)], - } - optim_config = optim.SGDConfig() - opts = orttrainer.ORTTrainerOptions({"device": {"id": "cpu"}}) - model = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - # because orttrainer is lazy initialized, feed in a random data to trigger the graph transformer - data = torch.rand(batch_size, input_size) - target = torch.randint(0, 10, (batch_size,)) - - with OutputGrabber() as out: - model.train_step(data, target) - assert "******************Trigger Customized Graph Transformer: MyGraphTransformer!" in out.capturedtext - - -if __name__ == "__main__": - unittest.main() diff --git a/orttraining/orttraining/test/external_transformer/test_exeternal_transformers/test_external_transformers.cc b/orttraining/orttraining/test/external_transformer/test_exeternal_transformers/test_external_transformers.cc deleted file mode 100644 index 00e933dd14914..0000000000000 --- a/orttraining/orttraining/test/external_transformer/test_exeternal_transformers/test_external_transformers.cc +++ /dev/null @@ -1,35 +0,0 @@ -#include "core/optimizer/rewrite_rule.h" -#include "orttraining/core/optimizer/graph_transformer_registry.h" -#include "onnx/defs/schema.h" -#include -#include - -namespace onnxruntime { -namespace training { - -class MyRewriteRule : public RewriteRule { - public: - MyRewriteRule() noexcept - : RewriteRule("MyRewriteRule") { - } - std::vector TargetOpTypes() const noexcept override { - return {}; - } - - private: - bool SatisfyCondition(const Graph& /*graph*/, const Node& /*node*/, const logging::Logger& /*logger*/) const override { - return true; - } - - Status Apply(Graph& /*graph*/, Node& /*node*/, RewriteRuleEffect& /*rule_effect*/, const logging::Logger& /*logger*/) const override { - std::cout << "******************Trigger Customized Graph Transformer: MyGraphTransformer!" << std::endl; - return Status::OK(); - } -}; - -void RegisterTrainingExternalTransformers() { - ONNX_REGISTER_EXTERNAL_REWRITE_RULE(MyRewriteRule, Level1, true); -} - -} // namespace training -} // namespace onnxruntime diff --git a/orttraining/orttraining/test/python/_test_commons.py b/orttraining/orttraining/test/python/_test_commons.py index 1413d59096832..fb7e62551de63 100644 --- a/orttraining/orttraining/test/python/_test_commons.py +++ b/orttraining/orttraining/test/python/_test_commons.py @@ -1,26 +1,7 @@ -import copy -import math import os import subprocess import sys -import numpy as np -import onnx -import torch -from numpy.testing import assert_allclose - -import onnxruntime -from onnxruntime.training import _utils, optim - - -def _single_run(execution_file, scenario, checkopint_dir=None): - cmd = [sys.executable, execution_file] - if scenario: - cmd += ["--scenario", scenario] - if checkopint_dir: - cmd += ["--checkpoint_dir", checkopint_dir] - assert subprocess.call(cmd) == 0 - def is_windows(): return sys.platform.startswith("win") @@ -46,197 +27,3 @@ def run_subprocess(args, cwd=None, capture=False, dll_path=None, shell=False, en if log: log.debug("Subprocess completed. Return code=" + str(completed_process.returncode)) return completed_process - - -def legacy_constant_lr_scheduler(global_step, initial_lr, total_steps, warmup): - num_warmup_steps = warmup * total_steps - if global_step < num_warmup_steps: - new_lr = initial_lr * float(global_step) / float(max(1, num_warmup_steps)) - else: - new_lr = initial_lr - return new_lr - - -def legacy_cosine_lr_scheduler(global_step, initial_lr, total_steps, warmup, cycles): - num_warmup_steps = warmup * total_steps - if global_step < num_warmup_steps: - new_lr = initial_lr * float(global_step) / float(max(1, num_warmup_steps)) - else: - progress = float(global_step - num_warmup_steps) / float(max(1, total_steps - num_warmup_steps)) - new_lr = initial_lr * max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(cycles) * 2.0 * progress))) - return new_lr - - -def legacy_linear_lr_scheduler(global_step, initial_lr, total_steps, warmup): - num_warmup_steps = warmup * total_steps - if global_step < num_warmup_steps: - new_lr = initial_lr * float(global_step) / float(max(1, num_warmup_steps)) - else: - new_lr = initial_lr * max(0.0, float(total_steps - global_step) / float(max(1, total_steps - num_warmup_steps))) - return new_lr - - -def legacy_poly_lr_scheduler(global_step, initial_lr, total_steps, warmup, power, lr_end): - num_warmup_steps = warmup * total_steps - if global_step < num_warmup_steps: - new_lr = initial_lr * float(global_step) / float(max(1, num_warmup_steps)) - elif global_step > total_steps: - new_lr = lr_end - else: - lr_range = initial_lr - lr_end - decay_steps = total_steps - num_warmup_steps - pct_remaining = 1 - (global_step - num_warmup_steps) / decay_steps - decay = lr_range * pct_remaining**power + lr_end - new_lr = decay - return new_lr - - -def generate_dummy_optim_state(model, optimizer): - np.random.seed(0) - if not (isinstance(optimizer, (optim.AdamConfig, optim.LambConfig))): - return dict() - - moment_keys = ["Moment_1", "Moment_2"] - uc_key = "Update_Count" - step_key = "Step" - shared_state_key = "shared_optimizer_state" - - optim_state = dict() - weight_shape_map = dict() - if isinstance(model, torch.nn.Module): - weight_shape_map = {name: param.size() for name, param in model.named_parameters()} - elif isinstance(model, onnx.ModelProto): - weight_shape_map = {n.name: n.dims for n in model.graph.initializer} - else: - raise ValueError("'model' must be either 'torch.nn.Module' or 'onnx.ModelProto'") - - for weight_name, weight_shape in weight_shape_map.items(): - per_weight_state = dict() - for moment in moment_keys: - per_weight_state[moment] = np.random.uniform(-2, 2, weight_shape).astype(np.float32) - if isinstance(optimizer, optim.AdamConfig): - per_weight_state[uc_key] = np.full([1], 5, dtype=np.int64) - optim_state[weight_name] = copy.deepcopy(per_weight_state) - if isinstance(optimizer, optim.LambConfig): - step_val = np.full([1], 5, dtype=np.int64) - optim_state[shared_state_key] = {step_key: step_val} - return {"optimizer": optim_state, "trainer_options": {"optimizer_name": optimizer.name}} - - -def _load_pytorch_transformer_model(device, dynamic_axes=False, legacy_api=False, data_dir=None): - # Loads external Pytorch TransformerModel into utils - root = "samples" - if not os.path.exists(root): - root = os.path.normpath( - os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", "samples") - ) - if not os.path.exists(root): - raise FileNotFoundError("Unable to find folder 'samples', tried %r." % root) - pytorch_transformer_path = os.path.join(root, "python", "training", "orttrainer", "pytorch_transformer") - pt_model_path = os.path.join(pytorch_transformer_path, "pt_model.py") - pt_model = _utils.import_module_from_file(pt_model_path) - ort_utils_path = os.path.join(pytorch_transformer_path, "ort_utils.py") - ort_utils = _utils.import_module_from_file(ort_utils_path) - utils_path = os.path.join(pytorch_transformer_path, "utils.py") - utils = _utils.import_module_from_file(utils_path) - - # Modeling - model = pt_model.TransformerModel(28785, 200, 2, 200, 2, 0.2).to(device) - my_loss = ort_utils.my_loss - if legacy_api: - if dynamic_axes: - model_desc = ort_utils.legacy_transformer_model_description_dynamic_axes() - else: - model_desc = ort_utils.legacy_transformer_model_description() - else: - if dynamic_axes: - model_desc = ort_utils.transformer_model_description_dynamic_axes() - else: - model_desc = ort_utils.transformer_model_description() - - # Preparing data - train_data, val_data, test_data = utils.prepare_data(device, 20, 20, data_dir) - return model, model_desc, my_loss, utils.get_batch, train_data, val_data, test_data - - -def generate_random_input_from_bart_model_desc(desc, seed=1, device="cuda:0"): - """Generates a sample input for the BART model using the model desc""" - - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - dtype = torch.int64 - vocab_size = 30528 - sample_input = [] - for _index, input in enumerate(desc["inputs"]): - size = [] - for s in input[1]: - if isinstance(s, (int)): - size.append(s) - else: - size.append(1) - sample_input.append(torch.randint(0, vocab_size, tuple(size), dtype=dtype).to(device)) - return sample_input - - -def _load_bart_model(): - bart_onnx_model_path = os.path.join("testdata", "bart_tiny.onnx") - model = onnx.load(bart_onnx_model_path) - batch = 2 - seq_len = 1024 - model_desc = { - "inputs": [ - ( - "src_tokens", - [batch, seq_len], - ), - ( - "prev_output_tokens", - [batch, seq_len], - ), - ( - "target", - [batch * seq_len], - ), - ], - "outputs": [("loss", [], True)], - } - - return model, model_desc - - -def assert_all_states_close_ort(state_dict_pre_checkpoint, state_dict_post_checkpoint, reshape_states=False): - """Assert that the two ORTTrainer (hierarchical) state dictionaries are very close for all states""" - - assert ("model" in state_dict_pre_checkpoint) == ("model" in state_dict_post_checkpoint) - assert ("optimizer" in state_dict_pre_checkpoint) == ("optimizer" in state_dict_post_checkpoint) - - if "model" in state_dict_pre_checkpoint: - for model_state_key in state_dict_pre_checkpoint["model"]["full_precision"]: - if reshape_states: - assert_allclose( - state_dict_pre_checkpoint["model"]["full_precision"][model_state_key], - state_dict_post_checkpoint["model"]["full_precision"][model_state_key].reshape( - state_dict_pre_checkpoint["model"]["full_precision"][model_state_key].shape - ), - ) - else: - assert_allclose( - state_dict_pre_checkpoint["model"]["full_precision"][model_state_key], - state_dict_post_checkpoint["model"]["full_precision"][model_state_key], - ) - - if "optimizer" in state_dict_pre_checkpoint: - for model_state_key in state_dict_pre_checkpoint["optimizer"]: - for optimizer_state_key in state_dict_pre_checkpoint["optimizer"][model_state_key]: - if reshape_states: - assert_allclose( - state_dict_pre_checkpoint["optimizer"][model_state_key][optimizer_state_key], - state_dict_post_checkpoint["optimizer"][model_state_key][optimizer_state_key].reshape( - state_dict_pre_checkpoint["optimizer"][model_state_key][optimizer_state_key].shape - ), - ) - else: - assert_allclose( - state_dict_pre_checkpoint["optimizer"][model_state_key][optimizer_state_key], - state_dict_post_checkpoint["optimizer"][model_state_key][optimizer_state_key], - ) diff --git a/orttraining/orttraining/test/python/_test_helpers.py b/orttraining/orttraining/test/python/_test_helpers.py index a9a4c7b1cc2ef..8f2a18b5ec00b 100644 --- a/orttraining/orttraining/test/python/_test_helpers.py +++ b/orttraining/orttraining/test/python/_test_helpers.py @@ -1,30 +1,11 @@ import copy import os -import numpy as np import torch from numpy.testing import assert_allclose -from onnxruntime.capi.ort_trainer import ORTTrainer as Legacy_ORTTrainer -from onnxruntime.training import orttrainer - -try: - from onnxruntime.training.ortmodule import ORTModule - from onnxruntime.training.ortmodule._fallback import ORTModuleInitException - from onnxruntime.training.ortmodule._graph_execution_manager_factory import ( # noqa: F401 - GraphExecutionManagerFactory, - ) -except ImportError: - # Some pipelines do not contain ORTModule - pass -except Exception as e: - from onnxruntime.training.ortmodule._fallback import ORTModuleInitException - - if isinstance(e, ORTModuleInitException): - # ORTModule is present but not ready to run - # That is OK because this file is also used by ORTTrainer tests - pass - raise +from onnxruntime.training.ortmodule import ORTModule +from onnxruntime.training.ortmodule._graph_execution_manager_factory import GraphExecutionManagerFactory # noqa: F401 def is_all_or_nothing_fallback_enabled(model, policy=None): @@ -66,103 +47,6 @@ def assert_model_outputs(output_a, output_b, verbose=False, rtol=1e-7, atol=0): assert_allclose(output_a, output_b, rtol=rtol, atol=atol, err_msg="Model output value mismatch") -def assert_onnx_weights(model_a, model_b, verbose=False, rtol=1e-7, atol=0): - r"""Asserts whether weight difference between models a and b differences are within specified tolerance - - Compares the weights of two different ONNX models (model_a and model_b) - and raises AssertError when they diverge by more than atol or rtol - - Args: - model_a, model_b (ORTTrainer): Two instances of ORTTrainer with the same model structure - verbose (bool, default is False): if True, prints absolute difference for each weight - rtol (float, default is 1e-7): Max relative difference - atol (float, default is 1e-4): Max absolute difference - """ - assert isinstance(model_a, orttrainer.ORTTrainer) and isinstance(model_b, orttrainer.ORTTrainer) - state_dict_a, state_dict_b = model_a._training_session.get_state(), model_b._training_session.get_state() - assert len(state_dict_a.items()) == len(state_dict_b.items()) - _assert_state_dict_weights(state_dict_a, state_dict_b, verbose, rtol, atol) - - -def assert_legacy_onnx_weights(model_a, model_b, verbose=False, rtol=1e-7, atol=0): - r"""Asserts whether weight difference between models a and b differences are within specified tolerance - - Compares the weights of a legacy model model_a and experimental model_b model - and raises AssertError when they diverge by more than atol or rtol. - - Args: - model_a (ORTTrainer): Instance of legacy ORTTrainer - model_b (ORTTrainer): Instance of experimental ORTTrainer - verbose (bool, default is False): if True, prints absolute difference for each weight. - rtol (float, default is 1e-7): Max relative difference - atol (float, default is 1e-4): Max absolute difference - """ - assert isinstance(model_a, orttrainer.ORTTrainer) and isinstance(model_b, Legacy_ORTTrainer) - state_dict_a, state_dict_b = model_a._training_session.get_state(), model_b.session.get_state() - assert len(state_dict_a.items()) == len(state_dict_b.items()) - _assert_state_dict_weights(state_dict_a, state_dict_b, verbose, rtol, atol) - - -def _assert_state_dict_weights(state_dict_a, state_dict_b, verbose, rtol, atol): - r"""Asserts whether dicts a and b value differences are within specified tolerance - - Compares the weights of two model's state_dict dicts and raises AssertError - when they diverge by more than atol or rtol - - Args: - model_a (ORTTrainer): Instance of legacy ORTTrainer - model_b (ORTTrainer): Instance of experimental ORTTrainer - verbose (bool, default is False): if True, prints absolute difference for each weight. - rtol (float, default is 1e-7): Max relative difference - atol (float, default is 1e-4): Max absolute difference - """ - - for (a_name, a_val), (_b_name, b_val) in zip(state_dict_a.items(), state_dict_b.items()): - np_a_vals = np.array(a_val).flatten() - np_b_vals = np.array(b_val).flatten() - assert np_a_vals.shape == np_b_vals.shape - if verbose: - print(f"Weight name: {a_name}: absolute difference: {np.abs(np_a_vals-np_b_vals).max()}") - assert_allclose(a_val, b_val, rtol=rtol, atol=atol, err_msg=f"Weight mismatch for {a_name}") - - -def assert_optim_state(expected_state, actual_state, rtol=1e-7, atol=0): - r"""Asserts whether optimizer state differences are within specified tolerance - - Compares the expected and actual optimizer states of dicts and raises AssertError - when they diverge by more than atol or rtol. - The optimizer dict is of the form: - model_weight_name: - { - "Moment_1": moment1_tensor, - "Moment_2": moment2_tensor, - "Update_Count": update_tensor # if optimizer is adam, absent otherwise - }, - ... - "shared_optimizer_state": # if optimizer is shared, absent otherwise. - So far, only lamb optimizer uses this. - { - "step": step_tensor # int array of size 1 - } - - Args: - expected_state (dict(dict())): Expected optimizer state - actual_state (dict(dict())): Actual optimizer state - rtol (float, default is 1e-7): Max relative difference - atol (float, default is 0): Max absolute difference - """ - assert expected_state.keys() == actual_state.keys() - for param_name, a_state in actual_state.items(): - for k, v in a_state.items(): - assert_allclose( - v, - expected_state[param_name][k], - rtol=rtol, - atol=atol, - err_msg=f"Optimizer state mismatch for param {param_name}, key {k}", - ) - - def is_dynamic_axes(model): # Check inputs for inp in model._torch_module._execution_manager(model._is_training())._onnx_models.optimized_model.graph.input: diff --git a/orttraining/orttraining/test/python/onnxruntime_test_postprocess.py b/orttraining/orttraining/test/python/onnxruntime_test_postprocess.py deleted file mode 100644 index d5298cf8e860e..0000000000000 --- a/orttraining/orttraining/test/python/onnxruntime_test_postprocess.py +++ /dev/null @@ -1,325 +0,0 @@ -import os -import unittest - -import torch -import torch.nn as nn -from orttraining_test_bert_postprocess import postprocess_model -from orttraining_test_data_loader import create_ort_test_dataloader -from orttraining_test_transformers import BertForPreTraining, BertModelTest -from orttraining_test_utils import map_optimizer_attributes - -import onnxruntime -from onnxruntime.capi.ort_trainer import ( # noqa: F401 - IODescription, - LossScaler, - ModelDescription, - ORTTrainer, - generate_sample, -) - -torch.manual_seed(1) -onnxruntime.set_seed(1) - - -class Test_PostPasses(unittest.TestCase): # noqa: N801 - def get_onnx_model( - self, model, model_desc, inputs, device, _enable_internal_postprocess=True, _extra_postprocess=None - ): - lr_desc = IODescription( - "Learning_Rate", - [ - 1, - ], - torch.float32, - ) - model = ORTTrainer( - model, - None, - model_desc, - "LambOptimizer", - map_optimizer_attributes, - lr_desc, - device, - world_rank=0, - world_size=1, - _opset_version=14, - _enable_internal_postprocess=_enable_internal_postprocess, - _extra_postprocess=_extra_postprocess, - ) - - model.train_step(*inputs) - return model.onnx_model_ - - def count_all_nodes(self, model): - return len(model.graph.node) - - def count_nodes(self, model, node_type): - count = 0 - for node in model.graph.node: - if node.op_type == node_type: - count += 1 - return count - - def find_nodes(self, model, node_type): - nodes = [] - for node in model.graph.node: - if node.op_type == node_type: - nodes.append(node) - return nodes - - def get_name(self, name): - if os.path.exists(name): - return name - rel = os.path.join("testdata", name) - if os.path.exists(rel): - return rel - this = os.path.dirname(__file__) - data = os.path.join(this, "..", "..", "..", "..", "onnxruntime", "test", "testdata") - res = os.path.join(data, name) - if os.path.exists(res): - return res - raise FileNotFoundError(f"Unable to find '{name}' or '{rel}' or '{res}'") - - def test_layer_norm(self): - class LayerNormNet(nn.Module): - def __init__(self, target): - super().__init__() - self.ln_1 = nn.LayerNorm(10) - self.loss = nn.CrossEntropyLoss() - self.target = target - - def forward(self, x): - output1 = self.ln_1(x) - loss = self.loss(output1, self.target) - return loss, output1 - - device = torch.device("cpu") - target = torch.ones(20, 10, 10, dtype=torch.int64).to(device) - model = LayerNormNet(target) - input = torch.randn(20, 5, 10, 10, dtype=torch.float32).to(device) - - input_desc = IODescription("input", [], "float32") - output0_desc = IODescription("output0", [], "float32") - output1_desc = IODescription("output1", [20, 5, 10, 10], "float32") - model_desc = ModelDescription([input_desc], [output0_desc, output1_desc]) - - learning_rate = torch.tensor([1.0000000e00]).to(device) - input_args = [input, learning_rate] - - onnx_model = self.get_onnx_model(model, model_desc, input_args, device) - - count_layer_norm = self.count_nodes(onnx_model, "LayerNormalization") - count_nodes = self.count_all_nodes(onnx_model) - - assert count_layer_norm == 0 - assert count_nodes == 3 - - def test_expand(self): - class ExpandNet(nn.Module): - def __init__(self, target): - super().__init__() - self.loss = nn.CrossEntropyLoss() - self.target = target - self.linear = torch.nn.Linear(2, 2) - - def forward(self, x, x1): - output = x.expand_as(x1) - output = self.linear(output) - output = output + output - loss = self.loss(output, self.target) - return loss, output - - device = torch.device("cpu") - target = torch.ones(5, 5, 2, dtype=torch.int64).to(device) - model = ExpandNet(target).to(device) - - x = torch.randn(5, 3, 1, 2, dtype=torch.float32).to(device) - x1 = torch.randn(5, 3, 5, 2, dtype=torch.float32).to(device) - - input0_desc = IODescription("x", [5, 3, 1, 2], "float32") - input1_desc = IODescription("x1", [5, 3, 5, 2], "float32") - output0_desc = IODescription("output0", [], "float32") - output1_desc = IODescription("output1", [5, 3, 5, 2], "float32") - model_desc = ModelDescription([input0_desc, input1_desc], [output0_desc, output1_desc]) - - learning_rate = torch.tensor([1.0000000e00]).to(device) - input_args = [x, x1, learning_rate] - - onnx_model = self.get_onnx_model(model, model_desc, input_args, device) - - # check that expand output has shape - expand_nodes = self.find_nodes(onnx_model, "Expand") - assert len(expand_nodes) == 1 - - model_info = onnx_model.graph.value_info - assert model_info[0].name == expand_nodes[0].output[0] - assert model_info[0].type == onnx_model.graph.input[1].type - - def test_bert(self): - device = torch.device("cpu") - - model_tester = BertModelTest.BertModelTester(self) - ( - config, - input_ids, - token_type_ids, - input_mask, - sequence_labels, - token_labels, - choice_labels, - ) = model_tester.prepare_config_and_inputs() - - model = BertForPreTraining(config=config) - model.eval() - - loss, prediction_scores, seq_relationship_score = model( - input_ids, - attention_mask=input_mask, - token_type_ids=token_type_ids, - masked_lm_labels=token_labels, - next_sentence_label=sequence_labels, - ) - - model_desc = ModelDescription( - [ - model_tester.input_ids_desc, - model_tester.attention_mask_desc, - model_tester.token_type_ids_desc, - model_tester.masked_lm_labels_desc, - model_tester.next_sentence_label_desc, - ], - [model_tester.loss_desc, model_tester.prediction_scores_desc, model_tester.seq_relationship_scores_desc], - ) - - from collections import namedtuple - - MyArgs = namedtuple( - "MyArgs", "local_rank world_size max_steps learning_rate warmup_proportion batch_size seq_len" - ) - args = MyArgs( - local_rank=0, - world_size=1, - max_steps=100, - learning_rate=0.00001, - warmup_proportion=0.01, - batch_size=13, - seq_len=7, - ) - - dataset_len = 100 - dataloader = create_ort_test_dataloader(model_desc.inputs_, args.batch_size, args.seq_len, dataset_len, device) - learning_rate = torch.tensor(1.0e0, dtype=torch.float32).to(device) - for b in dataloader: - batch = b - break - learning_rate = torch.tensor([1.00e00]).to(device) - inputs = [*batch, learning_rate] - - onnx_model = self.get_onnx_model(model, model_desc, inputs, device, _extra_postprocess=postprocess_model) - - self._bert_helper(onnx_model) - - def _bert_helper(self, onnx_model): - # count layer_norm - count_layer_norm = self.count_nodes(onnx_model, "LayerNormalization") - assert count_layer_norm == 0 - - # get expand node and check output shape - expand_nodes = self.find_nodes(onnx_model, "Expand") - assert len(expand_nodes) == 1 - - model_info = onnx_model.graph.value_info - assert model_info[0].name == expand_nodes[0].output[0] - assert model_info[0].type == onnx_model.graph.input[0].type - - def test_extra_postpass(self): - def postpass_replace_first_add_with_sub(model): - # this post pass replaces the first Add node with Sub in the model. - # Previous graph - # (subgraph 1) (subgraph 2) - # | | - # | | - # |________ ________| - # | | - # Add - # | - # (subgraph 3) - # - # Post graph - # (subgraph 1) (subgraph 2) - # | | - # | | - # |________ ________| - # | | - # Sub - # | - # (subgraph 3) - add_nodes = [n for n in model.graph.node if n.op_type == "Add"] - add_nodes[0].op_type = "Sub" - - class MultiAdd(nn.Module): - def __init__(self, target): - super().__init__() - self.loss = nn.CrossEntropyLoss() - self.target = target - self.linear = torch.nn.Linear(2, 2, bias=False) - - def forward(self, x, x1): - output = x + x1 - output = output + x - output = output + x1 - output = self.linear(output) - loss = self.loss(output, self.target) - return loss, output - - device = torch.device("cpu") - target = torch.ones(5, 2, dtype=torch.int64).to(device) - model = MultiAdd(target).to(device) - - x = torch.randn(5, 5, 2, dtype=torch.float32).to(device) - x1 = torch.randn(5, 5, 2, dtype=torch.float32).to(device) - - input0_desc = IODescription("x", [5, 5, 2], "float32") - input1_desc = IODescription("x1", [5, 5, 2], "float32") - output0_desc = IODescription("output0", [], "float32") - output1_desc = IODescription("output1", [5, 5, 2], "float32") - model_desc = ModelDescription([input0_desc, input1_desc], [output0_desc, output1_desc]) - - learning_rate = torch.tensor([1.0000000e00]).to(device) - input_args = [x, x1, learning_rate] - - onnx_model = self.get_onnx_model( - model, model_desc, input_args, device, _extra_postprocess=postpass_replace_first_add_with_sub - ) - - # check that extra postpass is called, and called only once. - add_nodes = self.find_nodes(onnx_model, "Add") - sub_nodes = self.find_nodes(onnx_model, "Sub") - assert len(add_nodes) == 2 - assert len(sub_nodes) == 1 - - unprocessed_onnx_model = self.get_onnx_model( - model, model_desc, input_args, device, _extra_postprocess=None, _enable_internal_postprocess=False - ) - # check that the model is unchanged. - add_nodes = self.find_nodes(unprocessed_onnx_model, "Add") - sub_nodes = self.find_nodes(unprocessed_onnx_model, "Sub") - assert len(add_nodes) == 3 - assert len(sub_nodes) == 0 - - processed_onnx_model = self.get_onnx_model( - unprocessed_onnx_model, - model_desc, - input_args, - device, - _extra_postprocess=postpass_replace_first_add_with_sub, - ) - # check that extra postpass is called, and called only once. - add_nodes = self.find_nodes(processed_onnx_model, "Add") - sub_nodes = self.find_nodes(processed_onnx_model, "Sub") - assert len(add_nodes) == 2 - assert len(sub_nodes) == 1 - - -if __name__ == "__main__": - unittest.main(module=__name__, buffer=True) diff --git a/orttraining/orttraining/test/python/orttraining_ortmodule_tests.py b/orttraining/orttraining/test/python/orttraining_ortmodule_tests.py index 0e7e9d23ee627..5341cd053ac18 100644 --- a/orttraining/orttraining/test/python/orttraining_ortmodule_tests.py +++ b/orttraining/orttraining/test/python/orttraining_ortmodule_tests.py @@ -43,7 +43,7 @@ def run_ortmodule_ops_tests(cwd, log, transformers_cache): env = get_env_with_transformers_cache(transformers_cache) - command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_onnx_ops_ortmodule.py"] + command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_ortmodule_onnx_ops.py"] run_subprocess(command, cwd=cwd, log=log, env=env).check_returncode() @@ -146,7 +146,7 @@ def run_data_sampler_tests(cwd, log): def run_hooks_tests(cwd, log): log.debug("Running: Data hooks tests") - command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_hooks.py"] + command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_ortmodule_hooks.py"] run_subprocess(command, cwd=cwd, log=log).check_returncode() diff --git a/orttraining/orttraining/test/python/orttraining_run_bert_pretrain.py b/orttraining/orttraining/test/python/orttraining_run_bert_pretrain.py deleted file mode 100644 index eea733684f140..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_run_bert_pretrain.py +++ /dev/null @@ -1,801 +0,0 @@ -# ================== -import dataclasses -import datetime -import glob -import json -import logging -import os -import random -import shutil -import unittest -from concurrent.futures import ProcessPoolExecutor -from dataclasses import dataclass, field -from typing import Any, Dict, Optional - -import h5py -import numpy as np -import torch -import torch.distributed as dist -from torch.utils.data import DataLoader, Dataset, RandomSampler -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm -from transformers import BertConfig, BertForPreTraining, HfArgumentParser - -import onnxruntime as ort - -# need to override torch.onnx.symbolic_opset12.nll_loss to handle ignore_index == -100 cases. -# the fix for ignore_index == -100 cases is already in pytorch master. -# however to use current torch master is causing computation changes in many tests. -# eventually we will use pytorch with fixed nll_loss once computation -# issues are understood and solved. -import onnxruntime.capi.pt_patch -from onnxruntime.training import amp, optim, orttrainer -from onnxruntime.training.checkpoint import aggregate_checkpoints -from onnxruntime.training.optim import LinearWarmupLRScheduler, PolyWarmupLRScheduler # noqa: F401 - -# we cannot make full convergence run in nightly pipeling because of its timeout limit, -# max_steps is still needed to calculate learning rate. force_to_stop_max_steps is used to -# terminate the training before the pipeline run hit its timeout. -force_to_stop_max_steps = 2500 - -logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO -) -logger = logging.getLogger(__name__) - - -def get_rank(): - if not dist.is_available(): - return 0 - if not dist.is_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(args): - if hasattr(args, "world_rank"): - return args.world_rank in [-1, 0] - else: - return get_rank() == 0 - - -def bert_model_description(config): - vocab_size = config.vocab_size - new_model_desc = { - "inputs": [ - ( - "input_ids", - ["batch", "max_seq_len_in_batch"], - ), - ( - "attention_mask", - ["batch", "max_seq_len_in_batch"], - ), - ( - "token_type_ids", - ["batch", "max_seq_len_in_batch"], - ), - ( - "masked_lm_labels", - ["batch", "max_seq_len_in_batch"], - ), - ( - "next_sentence_label", - [ - "batch", - ], - ), - ], - "outputs": [ - ("loss", [], True), - ( - "prediction_scores", - ["batch", "max_seq_len_in_batch", vocab_size], - ), - ( - "seq_relationship_scores", - ["batch", 2], - ), - ], - } - return new_model_desc - - -def create_pretraining_dataset(input_file, max_pred_length, args): - train_data = pretraining_dataset(input_file=input_file, max_pred_length=max_pred_length) - train_sampler = RandomSampler(train_data) - train_dataloader = DataLoader( - train_data, sampler=train_sampler, batch_size=args.train_batch_size * args.n_gpu, num_workers=0, pin_memory=True - ) - return train_dataloader, input_file - - -class pretraining_dataset(Dataset): # noqa: N801 - def __init__(self, input_file, max_pred_length): - logger.info("pretraining_dataset: %s, max_pred_length: %d", input_file, max_pred_length) - self.input_file = input_file - self.max_pred_length = max_pred_length - f = h5py.File(input_file, "r") - keys = [ - "input_ids", - "input_mask", - "segment_ids", - "masked_lm_positions", - "masked_lm_ids", - "next_sentence_labels", - ] - self.inputs = [np.asarray(f[key][:]) for key in keys] - f.close() - - def __len__(self): - "Denotes the total number of samples" - return len(self.inputs[0]) - - def __getitem__(self, index): - [input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [ - torch.from_numpy(input[index].astype(np.int64)) - if indice < 5 - else torch.from_numpy(np.asarray(input[index].astype(np.int64))) - for indice, input in enumerate(self.inputs) - ] - - # HF model use default ignore_index value (-100) for CrossEntropyLoss - masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -100 - index = self.max_pred_length - # store number of masked tokens in index - padded_mask_indices = (masked_lm_positions == 0).nonzero() - if len(padded_mask_indices) != 0: - index = padded_mask_indices[0].item() - masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index] - return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels] - - -import argparse # noqa: E402 - - -def parse_arguments(): - parser = argparse.ArgumentParser() - - # batch size test config parameters - parser.add_argument( - "--enable_mixed_precision", - default=False, - action="store_true", - help="Whether to use 16-bit float precision instead of 32-bit", - ) - - parser.add_argument( - "--sequence_length", - default=512, - type=int, - help="The maximum total input sequence length after WordPiece tokenization. \n" - "Sequences longer than this will be truncated, and sequences shorter \n" - "than this will be padded.", - ) - parser.add_argument( - "--max_predictions_per_seq", default=80, type=int, help="The maximum total of masked tokens in input sequence" - ) - parser.add_argument("--max_batch_size", default=32, type=int, help="Total batch size for training.") - - parser.add_argument("--gelu_recompute", default=False, action="store_true") - - parser.add_argument("--attn_dropout_recompute", default=False, action="store_true") - - parser.add_argument("--transformer_layer_recompute", default=False, action="store_true") - - args = parser.parse_args() - return args - - -@dataclass -class PretrainArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - input_dir: str = field( - default=None, metadata={"help": "The input data dir. Should contain .hdf5 files for the task"} - ) - - bert_model: str = field( - default=None, - metadata={ - "help": "Bert pre-trained model selected in the list: bert-base-uncased, \ - bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese." - }, - ) - - output_dir: str = field( - default=None, metadata={"help": "The output directory where the model checkpoints will be written."} - ) - - cache_dir: str = field( - default="/tmp/bert_pretrain/", - metadata={"help": "The output directory where the model checkpoints will be written."}, - ) - max_seq_length: Optional[int] = field( - default=512, - metadata={ - "help": "The maximum total input sequence length after tokenization. Sequences longer \ - than this will be truncated, sequences shorter will be padded." - }, - ) - - max_predictions_per_seq: Optional[int] = field( - default=80, metadata={"help": "The maximum total of masked tokens in input sequence."} - ) - - train_batch_size: Optional[int] = field(default=32, metadata={"help": "Batch size for training."}) - - learning_rate: Optional[float] = field(default=5e-5, metadata={"help": "The initial learning rate for Lamb."}) - - num_train_epochs: Optional[float] = field( - default=3.0, metadata={"help": "Total number of training epochs to perform."} - ) - - max_steps: Optional[float] = field(default=1000, metadata={"help": "Total number of training steps to perform."}) - - warmup_proportion: Optional[float] = field( - default=0.01, - metadata={ - "help": "Proportion of training to perform linear learning rate warmup for. \ - E.g., 0.1 = 10%% of training." - }, - ) - - local_rank: Optional[int] = field(default=-1, metadata={"help": "local_rank for distributed training on gpus."}) - - world_rank: Optional[int] = field(default=-1) - - world_size: Optional[int] = field(default=1) - - seed: Optional[int] = field(default=42, metadata={"help": "random seed for initialization."}) - - gradient_accumulation_steps: Optional[int] = field( - default=1, metadata={"help": "Number of updates steps to accumualte before performing a backward/update pass."} - ) - - fp16: bool = field(default=False, metadata={"help": "Whether to use 16-bit float precision instead of 32-bit."}) - - gelu_recompute: bool = field( - default=False, metadata={"help": "Whether to enable recomputing Gelu activation output to save memory."} - ) - attn_dropout_recompute: bool = field( - default=False, metadata={"help": "Whether to enable recomputing attention dropout to save memory."} - ) - transformer_layer_recompute: bool = field( - default=False, metadata={"help": "Whether to enable recomputing transformer layerwise to save memory."} - ) - - loss_scale: Optional[float] = field( - default=0.0, metadata={"help": "Loss scaling, positive power of 2 values can improve fp16 convergence."} - ) - - deepspeed_zero_stage: Optional[int] = field(default=0, metadata={"help": "Deepspeed Zero Stage. 0 => disabled"}) - - log_freq: Optional[float] = field(default=1.0, metadata={"help": "frequency of logging loss."}) - - checkpoint_activations: bool = field(default=False, metadata={"help": "Whether to use gradient checkpointing."}) - - resume_from_checkpoint: bool = field( - default=False, metadata={"help": "Whether to resume training from checkpoint."} - ) - - resume_step: Optional[int] = field(default=-1, metadata={"help": "Step to resume training from."}) - - num_steps_per_checkpoint: Optional[int] = field( - default=100, metadata={"help": "Number of update steps until a model checkpoint is saved to disk."} - ) - - save_checkpoint: Optional[bool] = field( - default=False, metadata={"help": "Enable for saving a model checkpoint to disk."} - ) - - init_state_dict: Optional[dict] = field(default=None, metadata={"help": "State to load before training."}) - - phase2: bool = field(default=False, metadata={"help": "Whether to train with seq len 512."}) - - allreduce_post_accumulation: bool = field( - default=False, metadata={"help": "Whether to do allreduces during gradient accumulation steps."} - ) - - allreduce_post_accumulation_fp16: bool = field( - default=False, metadata={"help": "Whether to do fp16 allreduce post accumulation."} - ) - - accumulate_into_fp16: bool = field(default=False, metadata={"help": "Whether to use fp16 gradient accumulators."}) - - phase1_end_step: Optional[int] = field( - default=7038, metadata={"help": "Whether to use fp16 gradient accumulators."} - ) - - tensorboard_dir: Optional[str] = field( - default=None, - ) - - schedule: Optional[str] = field( - default="warmup_poly", - ) - - # this argument is test specific. to run a full bert model will take too long to run. instead, we reduce - # number of hidden layers so that it can show convergence to an extend to help detect any regression. - force_num_hidden_layers: Optional[int] = field( - default=None, metadata={"help": "Whether to use fp16 gradient accumulators."} - ) - - def to_json_string(self): - """ - Serializes this instance to a JSON string. - """ - return json.dumps(dataclasses.asdict(self), indent=2) - - def to_sanitized_dict(self) -> Dict[str, Any]: - """ - Sanitized serialization to use with TensorBoard`s hparams - """ - d = dataclasses.asdict(self) - valid_types = [bool, int, float, str, torch.Tensor] - return {k: v if type(v) in valid_types else str(v) for k, v in d.items()} - - -def setup_training(args): - assert torch.cuda.is_available() - - if args.local_rank == -1: - args.local_rank = 0 - args.world_rank = 0 - - print("args.local_rank: ", args.local_rank) - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - args.n_gpu = 1 - - if args.gradient_accumulation_steps < 1: - raise ValueError( - f"Invalid gradient_accumulation_steps parameter: {args.gradient_accumulation_steps}, should be >= 1" - ) - if args.train_batch_size % args.gradient_accumulation_steps != 0: - raise ValueError( - "Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format( - args.gradient_accumulation_steps, args.train_batch_size - ) - ) - - # args.train_batch_size is per global step (optimization step) batch size - # now make it a per gpu batch size - args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps - args.train_batch_size = args.train_batch_size // args.world_size - - logger.info("setup_training: args.train_batch_size = %d", args.train_batch_size) - return device, args - - -def setup_torch_distributed(world_rank, world_size): - os.environ["RANK"] = str(world_rank) - os.environ["WORLD_SIZE"] = str(world_size) - os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = "12345" - torch.distributed.init_process_group(backend="nccl", world_size=world_size, rank=world_rank) - return - - -def prepare_model(args, device): - config = BertConfig.from_pretrained(args.bert_model, cache_dir=args.cache_dir) - - # config.num_hidden_layers = 12 - if args.force_num_hidden_layers: - logger.info("Modifying model config with num_hidden_layers to %d", args.force_num_hidden_layers) - config.num_hidden_layers = args.force_num_hidden_layers - - model = BertForPreTraining(config) - if args.init_state_dict is not None: - model.load_state_dict(args.init_state_dict) - model_desc = bert_model_description(config) - - lr_scheduler = LinearWarmupLRScheduler(total_steps=int(args.max_steps), warmup=args.warmup_proportion) - - loss_scaler = amp.DynamicLossScaler() if args.fp16 else None - - options = orttrainer.ORTTrainerOptions( - { - "batch": {"gradient_accumulation_steps": args.gradient_accumulation_steps}, - "device": {"id": str(device)}, - "mixed_precision": {"enabled": args.fp16, "loss_scaler": loss_scaler}, - "graph_transformer": { - "attn_dropout_recompute": args.attn_dropout_recompute, - "gelu_recompute": args.gelu_recompute, - "transformer_layer_recompute": args.transformer_layer_recompute, - }, - "debug": { - "deterministic_compute": True, - }, - "utils": {"grad_norm_clip": True}, - "distributed": { - "world_rank": max(0, args.local_rank), - "world_size": args.world_size, - "local_rank": max(0, args.local_rank), - "allreduce_post_accumulation": args.allreduce_post_accumulation, - "deepspeed_zero_optimization": {"stage": args.deepspeed_zero_stage}, - "enable_adasum": False, - }, - "lr_scheduler": lr_scheduler, - } - ) - - param_optimizer = list(model.named_parameters()) - no_decay_keys = ["bias", "gamma", "beta", "LayerNorm"] - params = [ - { - "params": [n for n, p in param_optimizer if any(no_decay_key in n for no_decay_key in no_decay_keys)], - "alpha": 0.9, - "beta": 0.999, - "lambda": 0.0, - "epsilon": 1e-6, - }, - { - "params": [n for n, p in param_optimizer if not any(no_decay_key in n for no_decay_key in no_decay_keys)], - "alpha": 0.9, - "beta": 0.999, - "lambda": 0.0, - "epsilon": 1e-6, - }, - ] - - optim_config = optim.AdamConfig(params=params, lr=2e-5, do_bias_correction=True) - model = orttrainer.ORTTrainer(model, model_desc, optim_config, options=options) - - return model - - -def get_data_file(f_id, world_rank, world_size, files): - num_files = len(files) - if world_size > num_files: - remainder = world_size % num_files - return files[(f_id * world_size + world_rank + remainder * f_id) % num_files] - elif world_size > 1: - return files[(f_id * world_size + world_rank) % num_files] - else: - return files[f_id % num_files] - - -def main(): - parser = HfArgumentParser(PretrainArguments) - args = parser.parse_args_into_dataclasses()[0] - do_pretrain(args) - - -def do_pretrain(args): - if is_main_process(args) and args.tensorboard_dir: - tb_writer = SummaryWriter(log_dir=args.tensorboard_dir) - tb_writer.add_text("args", args.to_json_string()) - tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={}) - else: - tb_writer = None - - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - ort.set_seed(args.seed) - - device, args = setup_training(args) - - model = prepare_model(args, device) - - logger.info("Running training: Batch size = %d, initial LR = %f", args.train_batch_size, args.learning_rate) - - average_loss = 0.0 - epoch = 0 - training_steps = 0 - - pool = ProcessPoolExecutor(1) - while True: - files = [ - os.path.join(args.input_dir, f) - for f in os.listdir(args.input_dir) - if os.path.isfile(os.path.join(args.input_dir, f)) and "training" in f - ] - files.sort() - random.shuffle(files) - - f_id = 0 - train_dataloader, data_file = create_pretraining_dataset( - get_data_file(f_id, args.world_rank, args.world_size, files), args.max_predictions_per_seq, args - ) - - for f_id in range(1, len(files)): - logger.info("data file %s" % (data_file)) - - dataset_future = pool.submit( - create_pretraining_dataset, - get_data_file(f_id, args.world_rank, args.world_size, files), - args.max_predictions_per_seq, - args, - ) - - train_iter = tqdm(train_dataloader, desc="Iteration") if is_main_process(args) else train_dataloader - for _step, batch in enumerate(train_iter): - training_steps += 1 - batch = [t.to(device) for t in batch] # noqa: PLW2901 - input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch - - loss, _, _ = model.train_step( - input_ids, input_mask, segment_ids, masked_lm_labels, next_sentence_labels - ) - average_loss += loss.item() - - global_step = model._train_step_info.optimization_step - if training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0: - if is_main_process(args): - divisor = args.log_freq * args.gradient_accumulation_steps - if tb_writer: - lr = model.options.lr_scheduler.get_last_lr()[0] - tb_writer.add_scalar("train/summary/scalar/Learning_Rate", lr, global_step) - if args.fp16: - tb_writer.add_scalar("train/summary/scalar/loss_scale_25", loss, global_step) - # TODO: ORTTrainer to expose all_finite - # tb_writer.add_scalar('train/summary/scalar/all_fp16_gradients_finite_859', all_finite, global_step) - tb_writer.add_scalar("train/summary/total_loss", average_loss / divisor, global_step) - - print(f"Step:{global_step} Average Loss = {average_loss / divisor}") - - if global_step >= args.max_steps or global_step >= force_to_stop_max_steps: - if tb_writer: - tb_writer.close() - - if global_step >= args.max_steps: - if args.save_checkpoint: - model.save_checkpoint(os.path.join(args.output_dir, f"checkpoint-{args.world_rank}.ortcp")) - final_loss = average_loss / (args.log_freq * args.gradient_accumulation_steps) - return final_loss - - average_loss = 0 - - del train_dataloader - - train_dataloader, data_file = dataset_future.result(timeout=None) - - epoch += 1 - - -def generate_tensorboard_logdir(root_dir): - current_date_time = datetime.datetime.today() - - dt_string = current_date_time.strftime("BERT_pretrain_%y_%m_%d_%I_%M_%S") - return os.path.join(root_dir, dt_string) - - -class ORTBertPretrainTest(unittest.TestCase): - def setUp(self): - self.output_dir = "/bert_data/hf_data/test_out/bert_pretrain_results" - self.bert_model = "bert-base-uncased" - self.local_rank = -1 - self.world_rank = -1 - self.world_size = 1 - self.max_steps = 300000 - self.learning_rate = 5e-4 - self.max_seq_length = 512 - self.max_predictions_per_seq = 20 - self.input_dir = "/bert_data/hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/books_wiki_en_corpus/train" - self.train_batch_size = 4096 - self.gradient_accumulation_steps = 64 - self.fp16 = True - self.allreduce_post_accumulation = True - self.tensorboard_dir = "/bert_data/hf_data/test_out" - - def test_pretrain_throughput(self, process_args=None): - if process_args.sequence_length == 128: - input_dir = "/bert_data/hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/books_wiki_en_corpus/train" - else: - input_dir = "/bert_data/hdf5_lower_case_1_seq_len_512_max_pred_80_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/books_wiki_en_corpus/train" - - print("process_args.enable_mixed_precision: ", process_args.enable_mixed_precision) - print("process_args.sequence_length: ", process_args.sequence_length) - print("process_args.max_batch_size: ", process_args.max_batch_size) - print("process_args.max_predictions_per_seq: ", process_args.max_predictions_per_seq) - print("process_args.gelu_recompute: ", process_args.gelu_recompute) - print("process_args.attn_dropout_recompute: ", process_args.attn_dropout_recompute) - print("process_args.transformer_layer_recompute: ", process_args.transformer_layer_recompute) - - args = PretrainArguments( - input_dir=input_dir, - output_dir="/bert_data/hf_data/test_out/bert_pretrain_results", - bert_model="bert-large-uncased", - local_rank=self.local_rank, - world_rank=self.world_rank, - world_size=self.world_size, - max_steps=10, - learning_rate=5e-4, - max_seq_length=process_args.sequence_length, - max_predictions_per_seq=process_args.max_predictions_per_seq, - train_batch_size=process_args.max_batch_size, - gradient_accumulation_steps=1, - fp16=process_args.enable_mixed_precision, - gelu_recompute=process_args.gelu_recompute, - attn_dropout_recompute=process_args.attn_dropout_recompute, - transformer_layer_recompute=process_args.transformer_layer_recompute, - allreduce_post_accumulation=True, - # TODO: remove - force_num_hidden_layers=2, - ) - do_pretrain(args) - - def test_pretrain_convergence(self): - args = PretrainArguments( - output_dir=self.output_dir, - bert_model=self.bert_model, - local_rank=self.local_rank, - world_rank=self.world_rank, - world_size=self.world_size, - max_steps=self.max_steps, - learning_rate=self.learning_rate, - max_seq_length=self.max_seq_length, - max_predictions_per_seq=self.max_predictions_per_seq, - train_batch_size=self.train_batch_size, - gradient_accumulation_steps=self.gradient_accumulation_steps, - input_dir=self.input_dir, - fp16=self.fp16, - allreduce_post_accumulation=self.allreduce_post_accumulation, - force_num_hidden_layers=self.force_num_hidden_layers, - tensorboard_dir=generate_tensorboard_logdir("/bert_data/hf_data/test_out/"), - ) - final_loss = do_pretrain(args) - return final_loss - - def test_pretrain_zero(self): - assert self.world_size > 0, "ZeRO test requires a distributed run." - setup_torch_distributed(self.world_rank, self.world_size) - per_gpu_batch_size = 32 - optimization_batch_size = per_gpu_batch_size * self.world_size # set to disable grad accumulation - - self.train_batch_size = optimization_batch_size - self.gradient_accumulation_steps = 1 - self.deepspeed_zero_stage = 1 - self.force_num_hidden_layers = 2 - self.max_seq_length = 32 - self.output_dir = "./bert_pretrain_ckpt" - if self.world_rank == 0: - if os.path.isdir(self.output_dir): - shutil.rmtree(self.output_dir) - os.makedirs(self.output_dir, exist_ok=True) - - torch.distributed.barrier() - - assert os.path.exists(self.output_dir) - - # run a few optimization steps - self.max_steps = 200 - args = PretrainArguments( - output_dir=self.output_dir, - bert_model=self.bert_model, - local_rank=self.local_rank, - world_rank=self.world_rank, - world_size=self.world_size, - max_steps=self.max_steps, - learning_rate=self.learning_rate, - max_seq_length=self.max_seq_length, - max_predictions_per_seq=self.max_predictions_per_seq, - train_batch_size=self.train_batch_size, - gradient_accumulation_steps=self.gradient_accumulation_steps, - input_dir=self.input_dir, - fp16=self.fp16, - allreduce_post_accumulation=self.allreduce_post_accumulation, - force_num_hidden_layers=self.force_num_hidden_layers, - deepspeed_zero_stage=self.deepspeed_zero_stage, - save_checkpoint=True, - ) - do_pretrain(args) - - # ensure all workers reach this point before loading the checkpointed state - torch.distributed.barrier() - - # on rank 0, load the trained state - if args.world_rank == 0: - checkpoint_files = glob.glob(os.path.join(self.output_dir, "checkpoint*.ortcp")) - args.init_state_dict = aggregate_checkpoints(checkpoint_files, pytorch_format=True) - - torch.distributed.barrier() - - # run a single step to get the loss, on rank 0 should be lesser than starting loss - args.save_checkpoint = False - args.max_steps = 1 - args.deepspeed_zero_stage = 0 - final_loss = do_pretrain(args) - return final_loss - - -if __name__ == "__main__": - import sys - - logger.warning("sys.argv: %s", sys.argv) - # usage: - # data parallel training - # mpirun -n 4 python orttraining_run_bert_pretrain.py - # - # single gpu: - # python orttraining_run_bert_pretrain.py ORTBertPretrainTest.test_pretrain_throughput - # [batch size test arguments] - # python orttraining_run_bert_pretrain.py ORTBertPretrainTest.test_pretrain_convergence - # - # pytorch.distributed.launch will not work because ort backend requires MPI to broadcast ncclUniqueId - # calling unpublished get_mpi_context_xxx to get rank/size numbers. - try: - # In case ORT is not built with MPI/NCCL, there are no get_mpi_context_xxx internal apis. - from onnxruntime.capi._pybind_state import get_mpi_context_local_size # noqa: F401 - from onnxruntime.capi._pybind_state import get_mpi_context_world_rank # noqa: F401 - from onnxruntime.capi._pybind_state import get_mpi_context_local_rank, get_mpi_context_world_size - - has_get_mpi_context_internal_api = True - except ImportError: - has_get_mpi_context_internal_api = False - pass - if has_get_mpi_context_internal_api and get_mpi_context_world_size() > 1: - world_size = get_mpi_context_world_size() - print("get_mpi_context_world_size(): ", world_size) - local_rank = get_mpi_context_local_rank() - - if local_rank == 0: - print("================================================================> os.getpid() = ", os.getpid()) - - test = ORTBertPretrainTest() - test.setUp() - test.local_rank = local_rank - test.world_rank = local_rank - test.world_size = world_size - - if len(sys.argv) >= 2 and sys.argv[1] == "ORTBertPretrainTest.test_pretrain_zero": - logger.info("running ORTBertPretrainTest.test_pretrain_zero()...") - final_loss = test.test_pretrain_zero() - logger.info("ORTBertPretrainTest.test_pretrain_zero() rank = %i final loss = %f", local_rank, final_loss) - if local_rank == 0: - test.assertLess(final_loss, 10.2) - else: - test.assertGreater(final_loss, 11.0) - logger.info("ORTBertPretrainTest.test_pretrain_zero() passed") - elif len(sys.argv) >= 2 and sys.argv[1] == "ORTBertPretrainTest.test_pretrain_convergence": - logger.info("running ORTBertPretrainTest.test_pretrain_convergence()...") - test.max_steps = 200 - test.force_num_hidden_layers = 8 - final_loss = test.test_pretrain_convergence() - logger.info("ORTBertPretrainTest.test_pretrain_convergence() final loss = %f", final_loss) - test.assertLess(final_loss, 8.5) - logger.info("ORTBertPretrainTest.test_pretrain_convergence() passed") - else: - # https://microsoft.sharepoint.com/teams/ONNX2/_layouts/15/Doc.aspx?sourcedoc={170774be-e1c6-4f8b-a3ae-984f211fe410}&action=edit&wd=target%28ONNX%20Training.one%7C8176133b-c7cb-4ef2-aa9d-3fdad5344c40%2FGitHub%20Master%20Merge%20Schedule%7Cb67f0db1-e3a0-4add-80a6-621d67fd8107%2F%29 - # to make equivalent args for cpp convergence test - test.max_seq_length = 128 - test.max_predictions_per_seq = 20 - test.gradient_accumulation_steps = 16 - - # cpp_batch_size (=64) * grad_acc * world_size - test.train_batch_size = 64 * test.gradient_accumulation_steps * test.world_size - test.max_steps = 300000 - - test.force_num_hidden_layers = None - - # already using Adam (e.g. AdamConfig) - test.learning_rate = 5e-4 - test.warmup_proportion = 0.1 - - final_loss = test.test_pretrain_convergence() - logger.info("ORTBertPretrainTest.test_pretrain_convergence() final loss = %f", final_loss) - else: - # unittest does not accept user defined arguments - # we need to run this script with user defined arguments - if len(sys.argv) >= 2 and sys.argv[1] == "ORTBertPretrainTest.test_pretrain_throughput": - run_test_pretrain_throughput, run_test_pretrain_convergence = True, False - sys.argv.remove("ORTBertPretrainTest.test_pretrain_throughput") - elif len(sys.argv) >= 2 and sys.argv[1] == "ORTBertPretrainTest.test_pretrain_convergence": - run_test_pretrain_throughput, run_test_pretrain_convergence = False, True - sys.argv.remove("ORTBertPretrainTest.test_pretrain_convergence") - else: - run_test_pretrain_throughput, run_test_pretrain_convergence = True, True - process_args = parse_arguments() - test = ORTBertPretrainTest() - test.setUp() - - if run_test_pretrain_throughput: - logger.info("running single GPU ORTBertPretrainTest.test_pretrain_throughput()...") - test.test_pretrain_throughput(process_args) - logger.info("single GPU ORTBertPretrainTest.test_pretrain_throughput() passed") - - # unittest.main() diff --git a/orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py b/orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py deleted file mode 100644 index 3e2d1a7154bfd..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py +++ /dev/null @@ -1,67 +0,0 @@ -import collections -import subprocess -import sys - -Config = collections.namedtuple( - "Config", - [ - "enable_mixed_precision", - "sequence_length", - "max_batch_size", - "max_predictions_per_seq", - "gelu_recompute", - "attn_dropout_recompute", - "transformer_layer_recompute", - ], -) - -configs = [ - Config(True, 128, 46, 20, False, False, False), - Config(True, 512, 8, 80, False, False, False), - Config(False, 128, 26, 20, False, False, False), - Config(False, 512, 4, 80, False, False, False), - Config(True, 128, 50, 20, True, False, False), - Config(True, 128, 50, 20, False, True, False), - Config(True, 128, 76, 20, False, False, True), - Config(True, 512, 8, 80, True, False, False), - Config(True, 512, 9, 80, False, True, False), - Config(True, 512, 15, 80, False, False, True), -] - - -def run_with_config(config): - print( - "##### testing name - {}-{} #####".format( - "fp16" if config.enable_mixed_precision else "fp32", config.sequence_length - ) - ) - print("gelu_recompute: ", config.gelu_recompute) - print("attn_dropout_recompute: ", config.attn_dropout_recompute) - print("transformer_layer_recompute: ", config.transformer_layer_recompute) - - cmds = [ - sys.executable, - "orttraining_run_bert_pretrain.py", - "ORTBertPretrainTest.test_pretrain_throughput", - "--sequence_length", - str(config.sequence_length), - "--max_batch_size", - str(config.max_batch_size), - "--max_predictions_per_seq", - str(config.max_predictions_per_seq), - ] - if config.enable_mixed_precision: - cmds.append("--enable_mixed_precision") - if config.gelu_recompute: - cmds.append("--gelu_recompute") - if config.attn_dropout_recompute: - cmds.append("--attn_dropout_recompute") - if config.transformer_layer_recompute: - cmds.append("--transformer_layer_recompute") - - # access to azure storage shared disk is much slower so we need a longer timeout. - subprocess.run(cmds, timeout=1200).check_returncode() # noqa: PLW1510 - - -for config in configs: - run_with_config(config) diff --git a/orttraining/orttraining/test/python/orttraining_run_glue.py b/orttraining/orttraining/test/python/orttraining_run_glue.py deleted file mode 100644 index 794e2f8cc7240..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_run_glue.py +++ /dev/null @@ -1,323 +0,0 @@ -# adapted from run_glue.py of huggingface transformers - -import dataclasses # noqa: F401 -import logging -import os -import unittest -from dataclasses import dataclass, field -from typing import Dict, Optional - -import numpy as np -from numpy.testing import assert_allclose -from transformers import ( - AutoConfig, - AutoModelForSequenceClassification, - AutoTokenizer, - EvalPrediction, - GlueDataset, - GlueDataTrainingArguments, - TrainingArguments, - glue_compute_metrics, - glue_output_modes, - glue_tasks_num_labels, - set_seed, -) - -import onnxruntime -from onnxruntime.capi.ort_trainer import IODescription, LossScaler, ModelDescription, ORTTrainer # noqa: F401 - -try: - from onnxruntime.capi._pybind_state import get_mpi_context_local_size # noqa: F401 - from onnxruntime.capi._pybind_state import get_mpi_context_world_rank # noqa: F401 - from onnxruntime.capi._pybind_state import get_mpi_context_local_rank, get_mpi_context_world_size - - has_get_mpi_context_internal_api = True -except ImportError: - has_get_mpi_context_internal_api = False - pass - - -import torch # noqa: F401 -from orttraining_transformer_trainer import ORTTransformerTrainer - -logger = logging.getLogger(__name__) - - -def verify_old_and_new_api_are_equal(results_per_api): - new_api_results = results_per_api[True] - old_api_results = results_per_api[False] - for key in new_api_results: - assert_allclose(new_api_results[key], old_api_results[key]) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field(metadata={"help": "model identifier from huggingface.co/models"}) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} - ) - - -class ORTGlueTest(unittest.TestCase): - def setUp(self): - # configurations not to be changed accoss tests - self.max_seq_length = 128 - self.train_batch_size = 8 - self.learning_rate = 2e-5 - self.num_train_epochs = 3.0 - self.local_rank = -1 - self.world_size = 1 - self.overwrite_output_dir = True - self.gradient_accumulation_steps = 1 - self.data_dir = "/bert_data/hf_data/glue_data/" - self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "glue_test_output/") - self.cache_dir = "/tmp/glue/" - self.logging_steps = 10 - - def test_roberta_with_mrpc(self): - expected_acc = 0.85 - expected_f1 = 0.88 - expected_loss = 0.35 - results = self.run_glue(model_name="roberta-base", task_name="MRPC", fp16=False) - - assert results["acc"] >= expected_acc - assert results["f1"] >= expected_f1 - assert results["loss"] <= expected_loss - - def test_roberta_fp16_with_mrpc(self): - expected_acc = 0.87 - expected_f1 = 0.90 - expected_loss = 0.33 - - results = self.run_glue(model_name="roberta-base", task_name="MRPC", fp16=True) - - assert results["acc"] >= expected_acc - assert results["f1"] >= expected_f1 - assert results["loss"] <= expected_loss - - def test_bert_with_mrpc(self): - if self.local_rank == -1: - expected_acc = 0.83 - expected_f1 = 0.88 - expected_loss = 0.44 - elif self.local_rank == 0: - expected_acc = 0.81 - expected_f1 = 0.86 - expected_loss = 0.44 - - results = self.run_glue(model_name="bert-base-cased", task_name="MRPC", fp16=False) - - if self.local_rank in [-1, 0]: - assert results["acc"] >= expected_acc - assert results["f1"] >= expected_f1 - assert results["loss"] <= expected_loss - - def test_bert_fp16_with_mrpc(self): - expected_acc = 0.84 - expected_f1 = 0.88 - expected_loss = 0.46 - - results = self.run_glue(model_name="bert-base-cased", task_name="MRPC", fp16=True) - - assert results["acc"] >= expected_acc - assert results["f1"] >= expected_f1 - assert results["loss"] <= expected_loss - - def model_to_desc(self, model_name, model): - if model_name.startswith("bert") or model_name.startswith("xlnet"): - model_desc = { - "inputs": [ - ( - "input_ids", - ["batch", "max_seq_len_in_batch"], - ), - ( - "attention_mask", - ["batch", "max_seq_len_in_batch"], - ), - ( - "token_type_ids", - ["batch", "max_seq_len_in_batch"], - ), - ( - "labels", - [ - "batch", - ], - ), - ], - "outputs": [("loss", [], True), ("logits", ["batch", 2])], - } - elif model_name.startswith("roberta"): - model_desc = { - "inputs": [ - ( - "input_ids", - ["batch", "max_seq_len_in_batch"], - ), - ( - "attention_mask", - ["batch", "max_seq_len_in_batch"], - ), - ( - "labels", - [ - "batch", - ], - ), - ], - "outputs": [("loss", [], True), ("logits", ["batch", 2])], - } - else: - raise RuntimeError(f"unsupported base model name {model_name}.") - - return model_desc - - def run_glue(self, model_name, task_name, fp16): - model_args = ModelArguments(model_name_or_path=model_name, cache_dir=self.cache_dir) - data_args = GlueDataTrainingArguments( - task_name=task_name, data_dir=os.path.join(self.data_dir, task_name), max_seq_length=self.max_seq_length - ) - - training_args = TrainingArguments( - output_dir=os.path.join(self.output_dir, task_name), - do_train=True, - do_eval=True, - per_gpu_train_batch_size=self.train_batch_size, - learning_rate=self.learning_rate, - num_train_epochs=self.num_train_epochs, - local_rank=self.local_rank, - overwrite_output_dir=self.overwrite_output_dir, - gradient_accumulation_steps=self.gradient_accumulation_steps, - fp16=fp16, - logging_steps=self.logging_steps, - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - training_args.local_rank, - training_args.device, - training_args.n_gpu, - bool(training_args.local_rank != -1), - training_args.fp16, - ) - logger.info("Training/evaluation parameters %s", training_args) - - set_seed(training_args.seed) - onnxruntime.set_seed(training_args.seed) - - try: - num_labels = glue_tasks_num_labels[data_args.task_name] - output_mode = glue_output_modes[data_args.task_name] - except KeyError: - raise ValueError("Task not found: %s" % (data_args.task_name)) # noqa: B904 - - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - ) - - model = AutoModelForSequenceClassification.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - ) - - train_dataset = GlueDataset(data_args, tokenizer=tokenizer) if training_args.do_train else None - - eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") if training_args.do_eval else None - - def compute_metrics(p: EvalPrediction) -> Dict: - if output_mode == "classification": - preds = np.argmax(p.predictions, axis=1) - elif output_mode == "regression": - preds = np.squeeze(p.predictions) - return glue_compute_metrics(data_args.task_name, preds, p.label_ids) - - model_desc = self.model_to_desc(model_name, model) - # Initialize the ORTTrainer within ORTTransformerTrainer - trainer = ORTTransformerTrainer( - model=model, - model_desc=model_desc, - args=training_args, - train_dataset=train_dataset, - eval_dataset=eval_dataset, - compute_metrics=compute_metrics, - world_size=self.world_size, - ) - - # Training - if training_args.do_train: - trainer.train() - trainer.save_model() - - # Evaluation - results = {} - if training_args.do_eval and training_args.local_rank in [-1, 0]: - logger.info("*** Evaluate ***") - - result = trainer.evaluate() - - logger.info(f"***** Eval results {data_args.task_name} *****") - for key, value in result.items(): - logger.info(" %s = %s", key, value) - - results.update(result) - - return results - - -if __name__ == "__main__": - if has_get_mpi_context_internal_api: - local_rank = get_mpi_context_local_rank() - world_size = get_mpi_context_world_size() - else: - local_rank = -1 - world_size = 1 - - if world_size > 1: - # mpi launch - logger.warning("mpirun launch, local_rank / world_size: %s : % s", local_rank, world_size) - - # TrainingArguments._setup_devices will call torch.distributed.init_process_group(backend="nccl") - # pytorch expects following environment settings (which would be set if launched with torch.distributed.launch). - - os.environ["RANK"] = str(local_rank) - os.environ["WORLD_SIZE"] = str(world_size) - os.environ["MASTER_ADDR"] = "127.0.0.1" - os.environ["MASTER_PORT"] = "29500" - - from onnxruntime.capi._pybind_state import set_cuda_device_id - - set_cuda_device_id(local_rank) - - test = ORTGlueTest() - test.setUp() - test.local_rank = local_rank - test.world_size = world_size - test.test_bert_with_mrpc() - else: - unittest.main() diff --git a/orttraining/orttraining/test/python/orttraining_run_multiple_choice.py b/orttraining/orttraining/test/python/orttraining_run_multiple_choice.py deleted file mode 100644 index 92db204593bcd..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_run_multiple_choice.py +++ /dev/null @@ -1,281 +0,0 @@ -# adapted from run_multiple_choice.py of huggingface transformers -# https://github.com/huggingface/transformers/blob/master/examples/multiple-choice/run_multiple_choice.py - -import dataclasses # noqa: F401 -import logging -import os -import unittest -from dataclasses import dataclass, field -from typing import Dict, Optional - -import numpy as np -import torch # noqa: F401 -from numpy.testing import assert_allclose # noqa: F401 -from orttraining_run_glue import verify_old_and_new_api_are_equal # noqa: F401 -from orttraining_transformer_trainer import ORTTransformerTrainer -from transformers import HfArgumentParser # noqa: F401 -from transformers import Trainer # noqa: F401 -from transformers import ( - AutoConfig, - AutoModelForMultipleChoice, - AutoTokenizer, - EvalPrediction, - TrainingArguments, - set_seed, -) -from utils_multiple_choice import MultipleChoiceDataset, Split, SwagProcessor - -import onnxruntime -from onnxruntime.capi.ort_trainer import IODescription, LossScaler, ModelDescription, ORTTrainer # noqa: F401 - -logger = logging.getLogger(__name__) - - -def simple_accuracy(preds, labels): - return (preds == labels).mean() - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field(metadata={"help": "model identifier from huggingface.co/models"}) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - task_name: str = field(metadata={"help": "The name of the task to train on."}) - data_dir: str = field(metadata={"help": "Should contain the data files for the task."}) - max_seq_length: int = field( - default=128, - metadata={ - "help": "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - }, - ) - overwrite_cache: bool = field(default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}) - - -class ORTMultipleChoiceTest(unittest.TestCase): - def setUp(self): - # configurations not to be changed accoss tests - self.max_seq_length = 80 - self.train_batch_size = 16 - self.eval_batch_size = 2 - self.learning_rate = 2e-5 - self.num_train_epochs = 1.0 - self.local_rank = -1 - self.overwrite_output_dir = True - self.gradient_accumulation_steps = 8 - self.data_dir = "/bert_data/hf_data/swag/swagaf/data" - self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "multiple_choice_test_output/") - self.cache_dir = "/tmp/multiple_choice/" - self.logging_steps = 10 - self.rtol = 2e-01 - - def test_bert_with_swag(self): - expected_acc = 0.75 - expected_loss = 0.64 - - results = self.run_multiple_choice(model_name="bert-base-cased", task_name="swag", fp16=False) - assert results["acc"] >= expected_acc - assert results["loss"] <= expected_loss - - def test_bert_fp16_with_swag(self): - # larger batch can be handled with mixed precision - self.train_batch_size = 32 - - expected_acc = 0.73 - expected_loss = 0.68 - - results = self.run_multiple_choice(model_name="bert-base-cased", task_name="swag", fp16=True) - assert results["acc"] >= expected_acc - assert results["loss"] <= expected_loss - - def run_multiple_choice(self, model_name, task_name, fp16): - model_args = ModelArguments(model_name_or_path=model_name, cache_dir=self.cache_dir) - data_args = DataTrainingArguments( - task_name=task_name, data_dir=self.data_dir, max_seq_length=self.max_seq_length - ) - - training_args = TrainingArguments( - output_dir=os.path.join(self.output_dir, task_name), - do_train=True, - do_eval=True, - per_gpu_train_batch_size=self.train_batch_size, - per_gpu_eval_batch_size=self.eval_batch_size, - learning_rate=self.learning_rate, - num_train_epochs=self.num_train_epochs, - local_rank=self.local_rank, - overwrite_output_dir=self.overwrite_output_dir, - gradient_accumulation_steps=self.gradient_accumulation_steps, - fp16=fp16, - logging_steps=self.logging_steps, - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - training_args.local_rank, - training_args.device, - training_args.n_gpu, - bool(training_args.local_rank != -1), - training_args.fp16, - ) - logger.info("Training/evaluation parameters %s", training_args) - - set_seed(training_args.seed) - onnxruntime.set_seed(training_args.seed) - - try: - processor = SwagProcessor() - label_list = processor.get_labels() - num_labels = len(label_list) - except KeyError: - raise ValueError("Task not found: %s" % (data_args.task_name)) # noqa: B904 - - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - ) - - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - ) - - model = AutoModelForMultipleChoice.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - ) - - # Get datasets - train_dataset = ( - MultipleChoiceDataset( - data_dir=data_args.data_dir, - tokenizer=tokenizer, - task=data_args.task_name, - processor=processor, - max_seq_length=data_args.max_seq_length, - overwrite_cache=data_args.overwrite_cache, - mode=Split.train, - ) - if training_args.do_train - else None - ) - eval_dataset = ( - MultipleChoiceDataset( - data_dir=data_args.data_dir, - tokenizer=tokenizer, - task=data_args.task_name, - processor=processor, - max_seq_length=data_args.max_seq_length, - overwrite_cache=data_args.overwrite_cache, - mode=Split.dev, - ) - if training_args.do_eval - else None - ) - - def compute_metrics(p: EvalPrediction) -> Dict: - preds = np.argmax(p.predictions, axis=1) - return {"acc": simple_accuracy(preds, p.label_ids)} - - if model_name.startswith("bert"): - model_desc = { - "inputs": [ - ( - "input_ids", - ["batch", num_labels, "max_seq_len_in_batch"], - ), - ( - "attention_mask", - ["batch", num_labels, "max_seq_len_in_batch"], - ), - ( - "token_type_ids", - ["batch", num_labels, "max_seq_len_in_batch"], - ), - ( - "labels", - ["batch", num_labels], - ), - ], - "outputs": [("loss", [], True), ("reshaped_logits", ["batch", num_labels])], - } - else: - model_desc = { - "inputs": [ - ( - "input_ids", - ["batch", num_labels, "max_seq_len_in_batch"], - ), - ( - "attention_mask", - ["batch", num_labels, "max_seq_len_in_batch"], - ), - ( - "labels", - ["batch", num_labels], - ), - ], - "outputs": [("loss", [], True), ("reshaped_logits", ["batch", num_labels])], - } - - # Initialize the ORTTrainer within ORTTransformerTrainer - trainer = ORTTransformerTrainer( - model=model, - model_desc=model_desc, - args=training_args, - train_dataset=train_dataset, - eval_dataset=eval_dataset, - compute_metrics=compute_metrics, - ) - - # Training - if training_args.do_train: - trainer.train() - trainer.save_model() - - # Evaluation - results = {} - if training_args.do_eval and training_args.local_rank in [-1, 0]: - logger.info("*** Evaluate ***") - - result = trainer.evaluate() - - logger.info(f"***** Eval results {data_args.task_name} *****") - for key, value in result.items(): - logger.info(" %s = %s", key, value) - - results.update(result) - - return results - - -if __name__ == "__main__": - unittest.main() diff --git a/orttraining/orttraining/test/python/orttraining_test_bert_postprocess.py b/orttraining/orttraining/test/python/orttraining_test_bert_postprocess.py deleted file mode 100644 index 71e6bb8e4d2f2..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_bert_postprocess.py +++ /dev/null @@ -1,6 +0,0 @@ -from orttraining_test_layer_norm_transform import layer_norm_transform # noqa: F401 -from orttraining_test_model_transform import add_expand_shape, add_name, fix_transpose # noqa: F401 - - -def postprocess_model(model): - add_name(model) diff --git a/orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py b/orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py deleted file mode 100644 index 21372caaf6779..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. -# orttraining_test_checkpoint_storage.py - -import os -import pickle -import shutil - -import numpy as np -import pytest -import torch - -from onnxruntime.training import _checkpoint_storage - -# Helper functions - - -def _equals(a, b): - """Checks recursively if two dictionaries are equal""" - if isinstance(a, dict): - return all(not (key not in b or not _equals(a[key], b[key])) for key in a) - else: - if isinstance(a, bytes): - a = a.decode() - if isinstance(b, bytes): - b = b.decode() - are_equal = a == b - return are_equal if isinstance(are_equal, bool) else are_equal.all() - - return False - - -def _numpy_types(obj_value): - """Return a bool indicating whether or not the input obj_value is a numpy type object - - Recursively checks if the obj_value (could be a dictionary) is a numpy type object. - Exceptions are str and bytes. - - Returns true if object is numpy type, str, or bytes - False if any other type - """ - if not isinstance(obj_value, dict): - return isinstance(obj_value, (str, bytes)) or type(obj_value).__module__ == np.__name__ - - return all(_numpy_types(value) for _, value in obj_value.items()) - - -def _get_dict(separated_key): - """Create dummy dictionary with different datatypes - - Returns the tuple of the entire dummy dictionary created, key argument as a dictionary for _checkpoint_storage.load - function and the value for that key in the original dictionary - - For example the complete dictionary is represented by: - { - 'int1':1, - 'int2': 2, - 'int_list': [1,2,3,5,6], - 'dict1': { - 'np_array': np.arange(100), - 'dict2': {'int3': 3, 'int4': 4}, - 'str1': "onnxruntime" - }, - 'bool1': bool(True), - 'int5': 5, - 'float1': 2.345, - 'np_array_float': np.array([1.234, 2.345, 3.456]), - 'np_array_float_3_dim': np.array([[[1,2],[3,4]], [[5,6],[7,8]]]) - } - - if the input key is ['dict1', 'str1'], then the key argument returned is 'dict1/str1' - and the value corresponding to that is "onnxruntime" - - so, for the above example, the returned tuple is: - (original_dict, {'key': 'dict1/str1', "onnxruntime") - """ - test_dict = { - "int1": 1, - "int2": 2, - "int_list": [1, 2, 3, 5, 6], - "dict1": {"np_array": np.arange(100), "dict2": {"int3": 3, "int4": 4}, "str1": "onnxruntime"}, - "bool1": True, - "int5": 5, - "float1": 2.345, - "np_array_float": np.array([1.234, 2.345, 3.456]), - "np_array_float_3_dim": np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]), - } - key = "" - expected_val = test_dict - for single_key in separated_key: - key += single_key + "/" - expected_val = expected_val[single_key] - return test_dict, {"key": key} if len(separated_key) > 0 else dict(), expected_val - - -class _CustomClass: - """Custom object that encpsulates dummy values for loss, epoch and train_step""" - - def __init__(self): - self._loss = 1.23 - self._epoch = 12000 - self._train_step = 25 - - def __eq__(self, other): - if isinstance(other, _CustomClass): - return self._loss == other._loss and self._epoch == other._epoch and self._train_step == other._train_step - - -# Test fixtures - - -@pytest.yield_fixture(scope="function") -def checkpoint_storage_test_setup(): - checkpoint_dir = os.path.abspath("checkpoint_dir/") - if not os.path.exists(checkpoint_dir): - os.makedirs(checkpoint_dir, exist_ok=True) - pytest.checkpoint_path = os.path.join(checkpoint_dir, "checkpoint.ortcp") - yield "checkpoint_storage_test_setup" - shutil.rmtree(checkpoint_dir) - - -@pytest.yield_fixture(scope="function") -def checkpoint_storage_test_parameterized_setup(request, checkpoint_storage_test_setup): - yield request.param - - -# Tests - - -@pytest.mark.parametrize( - "checkpoint_storage_test_parameterized_setup", - [ - _get_dict([]), - _get_dict(["int1"]), - _get_dict(["dict1"]), - _get_dict(["dict1", "dict2"]), - _get_dict(["dict1", "dict2", "int4"]), - _get_dict(["dict1", "str1"]), - _get_dict(["bool1"]), - _get_dict(["float1"]), - _get_dict(["np_array_float"]), - ], - indirect=True, -) -def test_checkpoint_storage_saved_dict_matches_loaded(checkpoint_storage_test_parameterized_setup): - to_save = checkpoint_storage_test_parameterized_setup[0] - key_arg = checkpoint_storage_test_parameterized_setup[1] - expected = checkpoint_storage_test_parameterized_setup[2] - _checkpoint_storage.save(to_save, pytest.checkpoint_path) - loaded = _checkpoint_storage.load(pytest.checkpoint_path, **key_arg) - assert _equals(loaded, expected) - assert _numpy_types(loaded) - - -@pytest.mark.parametrize( - "checkpoint_storage_test_parameterized_setup", - [{"int_set": {1, 2, 3, 4, 5}}, {"str_set": {"one", "two"}}, [1, 2, 3], 2.352], - indirect=True, -) -def test_checkpoint_storage_saving_non_supported_types_fails(checkpoint_storage_test_parameterized_setup): - to_save = checkpoint_storage_test_parameterized_setup - with pytest.raises(Exception): # noqa: B017 - _checkpoint_storage.save(to_save, pytest.checkpoint_path) - - -@pytest.mark.parametrize( - "checkpoint_storage_test_parameterized_setup", - [ - ({"int64_tensor": torch.tensor(np.arange(100))}, "int64_tensor", torch.int64, np.int64), - ({"int32_tensor": torch.tensor(np.arange(100), dtype=torch.int32)}, "int32_tensor", torch.int32, np.int32), - ({"int16_tensor": torch.tensor(np.arange(100), dtype=torch.int16)}, "int16_tensor", torch.int16, np.int16), - ({"int8_tensor": torch.tensor(np.arange(100), dtype=torch.int8)}, "int8_tensor", torch.int8, np.int8), - ({"float64_tensor": torch.tensor(np.array([1.0, 2.0]))}, "float64_tensor", torch.float64, np.float64), - ( - {"float32_tensor": torch.tensor(np.array([1.0, 2.0]), dtype=torch.float32)}, - "float32_tensor", - torch.float32, - np.float32, - ), - ( - {"float16_tensor": torch.tensor(np.array([1.0, 2.0]), dtype=torch.float16)}, - "float16_tensor", - torch.float16, - np.float16, - ), - ], - indirect=True, -) -def test_checkpoint_storage_saving_tensor_datatype(checkpoint_storage_test_parameterized_setup): - tensor_dict = checkpoint_storage_test_parameterized_setup[0] - tensor_name = checkpoint_storage_test_parameterized_setup[1] - tensor_dtype = checkpoint_storage_test_parameterized_setup[2] - np_dtype = checkpoint_storage_test_parameterized_setup[3] - - _checkpoint_storage.save(tensor_dict, pytest.checkpoint_path) - - loaded = _checkpoint_storage.load(pytest.checkpoint_path) - assert isinstance(loaded[tensor_name], np.ndarray) - assert tensor_dict[tensor_name].dtype == tensor_dtype - assert loaded[tensor_name].dtype == np_dtype - assert (tensor_dict[tensor_name].numpy() == loaded[tensor_name]).all() - - -@pytest.mark.parametrize( - "checkpoint_storage_test_parameterized_setup", - [ - ({"two_dim": torch.ones([2, 4], dtype=torch.float64)}, "two_dim"), - ({"three_dim": torch.ones([2, 4, 6], dtype=torch.float64)}, "three_dim"), - ({"four_dim": torch.ones([2, 4, 6, 8], dtype=torch.float64)}, "four_dim"), - ], - indirect=True, -) -def test_checkpoint_storage_saving_multiple_dimension_tensors(checkpoint_storage_test_parameterized_setup): - tensor_dict = checkpoint_storage_test_parameterized_setup[0] - tensor_name = checkpoint_storage_test_parameterized_setup[1] - - _checkpoint_storage.save(tensor_dict, pytest.checkpoint_path) - - loaded = _checkpoint_storage.load(pytest.checkpoint_path) - assert isinstance(loaded[tensor_name], np.ndarray) - assert (tensor_dict[tensor_name].numpy() == loaded[tensor_name]).all() - - -@pytest.mark.parametrize( - "checkpoint_storage_test_parameterized_setup", [{}, {"a": {}}, {"a": {"b": {}}}], indirect=True -) -def test_checkpoint_storage_saving_and_loading_empty_dictionaries_succeeds(checkpoint_storage_test_parameterized_setup): - saved = checkpoint_storage_test_parameterized_setup - _checkpoint_storage.save(saved, pytest.checkpoint_path) - - loaded = _checkpoint_storage.load(pytest.checkpoint_path) - assert _equals(saved, loaded) - - -def test_checkpoint_storage_load_file_that_does_not_exist_fails(checkpoint_storage_test_setup): - with pytest.raises(Exception): # noqa: B017 - _checkpoint_storage.load(pytest.checkpoint_path) - - -def test_checkpoint_storage_for_custom_user_dict_succeeds(checkpoint_storage_test_setup): - custom_class = _CustomClass() - user_dict = {"tensor1": torch.tensor(np.arange(100), dtype=torch.float32), "custom_class": custom_class} - - pickled_bytes = pickle.dumps(user_dict).hex() - to_save = {"a": torch.tensor(np.array([1.0, 2.0]), dtype=torch.float32), "user_dict": pickled_bytes} - _checkpoint_storage.save(to_save, pytest.checkpoint_path) - - loaded_dict = _checkpoint_storage.load(pytest.checkpoint_path) - assert (loaded_dict["a"] == to_save["a"].numpy()).all() - try: # noqa: SIM105 - loaded_dict["user_dict"] = loaded_dict["user_dict"].decode() - except AttributeError: - pass - loaded_obj = pickle.loads(bytes.fromhex(loaded_dict["user_dict"])) - - assert torch.all(loaded_obj["tensor1"].eq(user_dict["tensor1"])) - assert loaded_obj["custom_class"] == custom_class diff --git a/orttraining/orttraining/test/python/orttraining_test_data_loader.py b/orttraining/orttraining/test/python/orttraining_test_data_loader.py index aa15b44ae0d66..0009d2d3d7e1b 100644 --- a/orttraining/orttraining/test/python/orttraining_test_data_loader.py +++ b/orttraining/orttraining/test/python/orttraining_test_data_loader.py @@ -4,8 +4,6 @@ import torch from torch.utils.data import DataLoader, Dataset -from onnxruntime.capi.ort_trainer import generate_sample - global_rng = random.Random() @@ -41,6 +39,16 @@ def floats_tensor(shape, scale=1.0, rng=None, name=None): return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() +def generate_sample(desc, device=None): + """Generate a sample based on the description""" + # symbolic dimensions are described with strings. set symbolic dimensions to be 1 + size = [s if isinstance(s, (int)) else 1 for s in desc.shape_] + if desc.num_classes_: + return torch.randint(0, desc.num_classes_, size, dtype=desc.dtype_).to(device) + else: + return torch.randn(size, dtype=desc.dtype_).to(device) + + class OrtTestDataset(Dataset): def __init__(self, input_desc, seq_len, dataset_len, device): import copy diff --git a/orttraining/orttraining/test/python/orttraining_test_debuggability.py b/orttraining/orttraining/test/python/orttraining_test_debuggability.py deleted file mode 100644 index 499f0ba7a1ff5..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_debuggability.py +++ /dev/null @@ -1,40 +0,0 @@ -import pytest -import torch -from _test_commons import _load_pytorch_transformer_model - -from onnxruntime import set_seed -from onnxruntime.training import optim, orttrainer - -############################################################################### -# Testing starts here ######################################################### -############################################################################### - - -@pytest.mark.parametrize( - "seed, device", - [ - (24, "cuda"), - ], -) -def testORTTransformerModelExport(seed, device): - # Common setup - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions( - { - "debug": { - "check_model_export": True, - }, - "device": { - "id": device, - }, - } - ) - - # Setup for the first ORTTRainer run - torch.manual_seed(seed) - set_seed(seed) - model, model_desc, my_loss, batcher_fn, train_data, val_data, _ = _load_pytorch_transformer_model(device) - first_trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) - data, targets = batcher_fn(train_data, 0) - _ = first_trainer.train_step(data, targets) - assert first_trainer._onnx_model is not None diff --git a/orttraining/orttraining/test/python/orttraining_test_ort_apis.py b/orttraining/orttraining/test/python/orttraining_test_ort_apis.py index 506aafbe9f618..a3e666dd404f2 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ort_apis.py +++ b/orttraining/orttraining/test/python/orttraining_test_ort_apis.py @@ -27,7 +27,7 @@ def run_training_apis_python_api_tests(cwd, log): log.debug("Running: ort training api tests") - command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_python_bindings.py"] + command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_ort_apis_py_bindings.py"] run_subprocess(command, cwd=cwd, log=log).check_returncode() @@ -37,7 +37,7 @@ def run_onnxblock_tests(cwd, log): log.debug("Running: onnxblock tests") - command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_onnxblock.py"] + command = [sys.executable, "-m", "pytest", "-sv", "orttraining_test_ort_apis_onnxblock.py"] run_subprocess(command, cwd=cwd, log=log).check_returncode() diff --git a/orttraining/orttraining/test/python/orttraining_test_onnxblock.py b/orttraining/orttraining/test/python/orttraining_test_ort_apis_onnxblock.py similarity index 100% rename from orttraining/orttraining/test/python/orttraining_test_onnxblock.py rename to orttraining/orttraining/test/python/orttraining_test_ort_apis_onnxblock.py diff --git a/orttraining/orttraining/test/python/orttraining_test_python_bindings.py b/orttraining/orttraining/test/python/orttraining_test_ort_apis_py_bindings.py similarity index 99% rename from orttraining/orttraining/test/python/orttraining_test_python_bindings.py rename to orttraining/orttraining/test/python/orttraining_test_ort_apis_py_bindings.py index d5c37b3e36ee7..34d8c24ccfab4 100644 --- a/orttraining/orttraining/test/python/orttraining_test_python_bindings.py +++ b/orttraining/orttraining/test/python/orttraining_test_ort_apis_py_bindings.py @@ -11,7 +11,7 @@ import onnx import pytest import torch -from orttraining_test_onnxblock import _get_models +from orttraining_test_ort_apis_onnxblock import _get_models import onnxruntime.training.onnxblock as onnxblock from onnxruntime import OrtValue, SessionOptions diff --git a/orttraining/orttraining/test/python/orttraining_test_hooks.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_hooks.py similarity index 100% rename from orttraining/orttraining/test/python/orttraining_test_hooks.py rename to orttraining/orttraining/test/python/orttraining_test_ortmodule_hooks.py diff --git a/orttraining/orttraining/test/python/orttraining_test_onnx_ops_ortmodule.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_onnx_ops.py similarity index 100% rename from orttraining/orttraining/test/python/orttraining_test_onnx_ops_ortmodule.py rename to orttraining/orttraining/test/python/orttraining_test_ortmodule_onnx_ops.py diff --git a/orttraining/orttraining/test/python/orttraining_test_orttrainer_bert_toy_onnx.py b/orttraining/orttraining/test/python/orttraining_test_orttrainer_bert_toy_onnx.py deleted file mode 100644 index 45b87b32f7d64..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_orttrainer_bert_toy_onnx.py +++ /dev/null @@ -1,1283 +0,0 @@ -import copy # noqa: F401 -import inspect # noqa: F401 -import math # noqa: F401 -import os -from functools import partial - -import _test_commons -import _test_helpers -import onnx -import pytest -import torch -from numpy.testing import assert_allclose - -import onnxruntime -from onnxruntime.capi.ort_trainer import IODescription as Legacy_IODescription -from onnxruntime.capi.ort_trainer import LossScaler as Legacy_LossScaler -from onnxruntime.capi.ort_trainer import ModelDescription as Legacy_ModelDescription -from onnxruntime.capi.ort_trainer import ORTTrainer as Legacy_ORTTrainer -from onnxruntime.training import amp, optim, orttrainer - -############################################################################### -# Helper functions ############################################################ -############################################################################### - - -def generate_random_input_from_model_desc(desc, seed=1, device="cuda:0"): - """Generates a sample input for the BERT model using the model desc""" - - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - dtype = torch.int64 - vocab_size = 30528 - num_classes = [vocab_size, 2, 2, vocab_size, 2] - dims = {"batch_size": 16, "seq_len": 1} - sample_input = [] - for index, input in enumerate(desc["inputs"]): - size = [] - for s in input[1]: - if isinstance(s, (int)): - size.append(s) - else: - size.append(dims[s] if s in dims else 1) - sample_input.append(torch.randint(0, num_classes[index], tuple(size), dtype=dtype).to(device)) - return sample_input - - -# EXPERIMENTAL HELPER FUNCTIONS - - -def bert_model_description(dynamic_shape=True): - """Creates the model description dictionary with static dimensions""" - - if dynamic_shape: - model_desc = { - "inputs": [ - ("input_ids", ["batch_size", "seq_len"]), - ( - "segment_ids", - ["batch_size", "seq_len"], - ), - ( - "input_mask", - ["batch_size", "seq_len"], - ), - ( - "masked_lm_labels", - ["batch_size", "seq_len"], - ), - ( - "next_sentence_labels", - [ - "batch_size", - ], - ), - ], - "outputs": [("loss", [], True)], - } - else: - batch_size = 16 - seq_len = 1 - model_desc = { - "inputs": [ - ("input_ids", [batch_size, seq_len]), - ( - "segment_ids", - [batch_size, seq_len], - ), - ( - "input_mask", - [batch_size, seq_len], - ), - ( - "masked_lm_labels", - [batch_size, seq_len], - ), - ( - "next_sentence_labels", - [ - batch_size, - ], - ), - ], - "outputs": [("loss", [], True)], - } - return model_desc - - -def optimizer_parameters(model): - """A method to assign different hyper parameters for different model parameter groups""" - - no_decay_keys = ["bias", "gamma", "beta", "LayerNorm"] - no_decay_param_group = [] - for initializer in model.graph.initializer: - if any(key in initializer.name for key in no_decay_keys): - no_decay_param_group.append(initializer.name) - params = [ - { - "params": no_decay_param_group, - "alpha": 0.9, - "beta": 0.999, - "lambda_coef": 0.0, - "epsilon": 1e-6, - "do_bias_correction": False, - } - ] - - return params - - -def load_bert_onnx_model(): - bert_onnx_model_path = os.path.join("testdata", "bert_toy_postprocessed.onnx") - model = onnx.load(bert_onnx_model_path) - return model - - -class CustomLossScaler(amp.LossScaler): - def __init__(self, loss_scale=float(1 << 16)): - super().__init__(loss_scale) - self._initial_loss_scale = loss_scale - self.loss_scale = loss_scale - - def reset(self): - self.loss_scale = self._initial_loss_scale - - def update(self, train_step_info): - self.loss_scale *= 0.9 - return self.loss_scale - - -# LEGACY HELPER FUNCTIONS - - -class LegacyCustomLossScaler: - def __init__(self, loss_scale=float(1 << 16)): - self._initial_loss_scale = loss_scale - self.loss_scale_ = loss_scale - - def reset(self): - self.loss_scale_ = self._initial_loss_scale - - def update_loss_scale(self, is_all_finite): - self.loss_scale_ *= 0.9 - - -def legacy_model_params(lr, device=torch.device("cuda", 0)): # noqa: B008 - legacy_model_desc = legacy_bert_model_description() - learning_rate_description = legacy_ort_trainer_learning_rate_description() - learning_rate = torch.tensor([lr]).to(device) - return (legacy_model_desc, learning_rate_description, learning_rate) - - -def legacy_ort_trainer_learning_rate_description(): - return Legacy_IODescription( - "Learning_Rate", - [ - 1, - ], - torch.float32, - ) - - -def legacy_bert_model_description(): - input_ids_desc = Legacy_IODescription("input_ids", ["batch", "max_seq_len_in_batch"]) - segment_ids_desc = Legacy_IODescription("segment_ids", ["batch", "max_seq_len_in_batch"]) - input_mask_desc = Legacy_IODescription("input_mask", ["batch", "max_seq_len_in_batch"]) - masked_lm_labels_desc = Legacy_IODescription("masked_lm_labels", ["batch", "max_seq_len_in_batch"]) - next_sentence_labels_desc = Legacy_IODescription( - "next_sentence_labels", - [ - "batch", - ], - ) - loss_desc = Legacy_IODescription("loss", []) - - return Legacy_ModelDescription( - [input_ids_desc, segment_ids_desc, input_mask_desc, masked_lm_labels_desc, next_sentence_labels_desc], - [loss_desc], - ) - - -def legacy_optim_params_a(name): - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.01, "epsilon": 1e-6, "do_bias_correction": False} - - -def legacy_optim_params_b(name): - params = ["bert.embeddings.LayerNorm.bias", "bert.embeddings.LayerNorm.weight"] - if name in params: - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.0, "epsilon": 1e-6, "do_bias_correction": False} - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.01, "epsilon": 1e-6, "do_bias_correction": False} - - -def legacy_optim_params_c(name): - params_group = optimizer_parameters(load_bert_onnx_model()) - if name in params_group[0]["params"]: - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.0, "epsilon": 1e-6, "do_bias_correction": False} - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.01, "epsilon": 1e-6, "do_bias_correction": False} - - -############################################################################### -# Testing starts here ######################################################### -############################################################################### - - -@pytest.mark.parametrize("dynamic_shape", [(True), (False)]) -def testToyBERTModelBasicTraining(dynamic_shape): - model_desc = bert_model_description(dynamic_shape) - model = load_bert_onnx_model() - - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions({}) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - for _i in range(10): - sample_input = generate_random_input_from_model_desc(model_desc) - output = trainer.train_step(*sample_input) - assert output.shape == torch.Size([]) - - -@pytest.mark.parametrize( - "expected_losses", - [([11.041123, 10.986166, 11.101636, 11.013366, 11.03775, 11.041175, 10.957118, 11.069563, 11.040824, 11.16437])], -) -def testToyBERTDeterministicCheck(expected_losses): - # Common setup - train_steps = 10 - device = "cuda" - seed = 1 - rtol = 1e-3 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - - # Modeling - model_desc = bert_model_description() - model = load_bert_onnx_model() - optimizer_parameters(model) - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - # Train - experimental_losses = [] - for i in range(train_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - experimental_losses.append(trainer.train_step(*sample_input).cpu().item()) - - # Check output - _test_helpers.assert_model_outputs(experimental_losses, expected_losses, rtol=rtol) - - -@pytest.mark.parametrize( - "initial_lr, lr_scheduler, expected_learning_rates, expected_losses", - [ - ( - 1.0, - optim.lr_scheduler.ConstantWarmupLRScheduler, - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ - 10.988012313842773, - 10.99213981628418, - 120.79301452636719, - 36.11647033691406, - 95.83200073242188, - 221.2766571044922, - 208.40316772460938, - 279.5332946777344, - 402.46380615234375, - 325.79254150390625, - ], - ), - ( - 0.5, - optim.lr_scheduler.ConstantWarmupLRScheduler, - [0.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], - [ - 10.988012313842773, - 10.99213981628418, - 52.69743347167969, - 19.741533279418945, - 83.88340759277344, - 126.39848327636719, - 91.53898620605469, - 63.62016296386719, - 102.21206665039062, - 180.1424560546875, - ], - ), - ( - 1.0, - optim.lr_scheduler.CosineWarmupLRScheduler, - [ - 0.0, - 0.9931806517013612, - 0.9397368756032445, - 0.8386407858128706, - 0.7008477123264848, - 0.5412896727361662, - 0.37725725642960045, - 0.22652592093878665, - 0.10542974530180327, - 0.02709137914968268, - ], - [ - 10.988012313842773, - 10.99213981628418, - 120.6441650390625, - 32.152557373046875, - 89.63705444335938, - 138.8782196044922, - 117.57748413085938, - 148.01927185058594, - 229.60403442382812, - 110.2930908203125, - ], - ), - ( - 1.0, - optim.lr_scheduler.LinearWarmupLRScheduler, - [ - 0.0, - 0.9473684210526315, - 0.8421052631578947, - 0.7368421052631579, - 0.631578947368421, - 0.5263157894736842, - 0.42105263157894735, - 0.3157894736842105, - 0.21052631578947367, - 0.10526315789473684, - ], - [ - 10.988012313842773, - 10.99213981628418, - 112.89633178710938, - 31.114538192749023, - 80.94029235839844, - 131.34490966796875, - 111.4329605102539, - 133.74252319335938, - 219.37344360351562, - 109.67041015625, - ], - ), - ( - 1.0, - optim.lr_scheduler.PolyWarmupLRScheduler, - [ - 0.0, - 0.9473684263157895, - 0.8421052789473684, - 0.7368421315789474, - 0.6315789842105263, - 0.5263158368421054, - 0.42105268947368424, - 0.31578954210526317, - 0.21052639473684212, - 0.10526324736842106, - ], - [ - 10.988012313842773, - 10.99213981628418, - 112.89633178710938, - 31.114538192749023, - 80.9402847290039, - 131.3447265625, - 111.43253326416016, - 133.7415008544922, - 219.37147521972656, - 109.66986083984375, - ], - ), - ], -) -def testToyBERTModelLRScheduler(initial_lr, lr_scheduler, expected_learning_rates, expected_losses): - return # TODO: re-enable after nondeterminism on backend is fixed - # Common setup - device = "cuda" - total_steps = 10 - seed = 1 - warmup = 0.05 - cycles = 0.5 - power = 1.0 - lr_end = 1e-7 - rtol = 1e-3 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - - # Setup LR Schedulers - if ( - lr_scheduler == optim.lr_scheduler.ConstantWarmupLRScheduler - or lr_scheduler == optim.lr_scheduler.LinearWarmupLRScheduler - ): - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup) - elif lr_scheduler == optim.lr_scheduler.CosineWarmupLRScheduler: - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, cycles=cycles) - elif lr_scheduler == optim.lr_scheduler.PolyWarmupLRScheduler: - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, power=power, lr_end=lr_end) - else: - raise RuntimeError("Invalid lr_scheduler") - - # Modeling - model_desc = bert_model_description() - model = load_bert_onnx_model() - optim_config = optim.AdamConfig(lr=initial_lr) - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - "lr_scheduler": lr_scheduler, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - # Train - losses = [] - learning_rates = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - losses.append(trainer.train_step(*sample_input).cpu().item()) - learning_rates.append(trainer.options.lr_scheduler.get_last_lr()[0]) - - # Check output - _test_helpers.assert_model_outputs(learning_rates, expected_learning_rates, rtol=rtol) - _test_helpers.assert_model_outputs(losses, expected_losses, rtol=rtol) - - -@pytest.mark.parametrize( - "loss_scaler, expected_losses", - [ - ( - None, - [ - 11.041126, - 10.986309, - 11.101673, - 11.013394, - 11.037781, - 11.041253, - 10.957072, - 11.069506, - 11.040807, - 11.164349, - ], - ), - ( - amp.DynamicLossScaler(), - [ - 11.041126, - 10.986309, - 11.101673, - 11.013394, - 11.037781, - 11.041253, - 10.957072, - 11.069506, - 11.040807, - 11.164349, - ], - ), - ( - CustomLossScaler(), - [ - 11.041126, - 10.986309, - 11.101645, - 11.013412, - 11.037757, - 11.041273, - 10.957077, - 11.069525, - 11.040765, - 11.164298, - ], - ), - ], -) -def testToyBERTModelMixedPrecisionLossScaler(loss_scaler, expected_losses): - # Common setup - total_steps = 10 - device = "cuda" - seed = 1 - rtol = 1e-3 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - - # Modeling - model_desc = bert_model_description() - model = load_bert_onnx_model() - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - "mixed_precision": {"enabled": True, "loss_scaler": loss_scaler}, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - # Train - losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - losses.append(trainer.train_step(*sample_input).cpu().item()) - - # Check output - _test_helpers.assert_model_outputs(losses, expected_losses, rtol=rtol) - - -@pytest.mark.parametrize( - "gradient_accumulation_steps, expected_losses", - [ - ( - 1, - [ - 11.041123, - 10.986166, - 11.101636, - 11.013366, - 11.03775, - 11.041175, - 10.957118, - 11.069563, - 11.040824, - 11.16437, - ], - ), - ( - 4, - [ - 11.041123, - 10.982856, - 11.105512, - 11.006721, - 11.03358, - 11.05058, - 10.955864, - 11.059035, - 11.037753, - 11.162649, - ], - ), - ( - 7, - [ - 11.041123, - 10.982856, - 11.105512, - 11.006721, - 11.036314, - 11.055109, - 10.960751, - 11.05809, - 11.038856, - 11.159635, - ], - ), - ], -) -def testToyBERTModelGradientAccumulation(gradient_accumulation_steps, expected_losses): - # Common setup - total_steps = 10 - device = "cuda" - seed = 1 - rtol = 1e-3 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - - # Modeling - model_desc = bert_model_description() - model = load_bert_onnx_model() - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - # Train - losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - losses.append(trainer.train_step(*sample_input).cpu().item()) - - # Check output - _test_helpers.assert_model_outputs(losses, expected_losses, rtol=rtol) - - -def testToyBertCheckpointBasic(): - # Common setup - seed = 1 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions({"debug": {"deterministic_compute": True}}) - - # Create ORTTrainer and save initial state in a dict - model = load_bert_onnx_model() - model_desc = bert_model_description() - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - sd = trainer.state_dict() - - ## All initializers must be present in the state_dict - ## when the specified model for ORTTRainer is an ONNX model - for param in trainer._onnx_model.graph.initializer: - assert param.name in sd["model"]["full_precision"] - - ## Modify one of the state values and load into ORTTrainer - sd["model"]["full_precision"]["bert.encoder.layer.0.attention.output.LayerNorm.weight"] += 10 - trainer.load_state_dict(sd) - - ## Save a checkpoint - ckpt_dir = "testdata" - trainer.save_checkpoint(os.path.join(ckpt_dir, "bert_toy_save_test.ortcp")) - del trainer - del model - - # Create a new ORTTrainer and load the checkpoint from previous ORTTrainer - model2 = load_bert_onnx_model() - model_desc2 = bert_model_description() - trainer2 = orttrainer.ORTTrainer(model2, model_desc2, optim_config, options=opts) - trainer2.load_checkpoint(os.path.join(ckpt_dir, "bert_toy_save_test.ortcp")) - loaded_sd = trainer2.state_dict() - - # Assert whether original state and the one loaded from checkpoint matches - _test_commons.assert_all_states_close_ort(sd, loaded_sd) - - -def testToyBertCheckpointFrozenWeights(): - # Common setup - seed = 1 - total_steps = 10 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "utils": {"frozen_weights": ["bert.encoder.layer.0.attention.self.value.weight"]}, - } - ) - - # Create ORTTrainer and save initial state in a dict - model = load_bert_onnx_model() - model_desc = bert_model_description() - optim_config = optim.LambConfig() - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - # Train for a few steps - for _i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, seed) - _ = trainer.train_step(*sample_input) - sample_input = generate_random_input_from_model_desc(model_desc, seed + total_steps + 1) - # Evaluate once to get a base loss - loss = trainer.eval_step(*sample_input) - # Save checkpoint - state_dict = trainer.state_dict() - - # Load previous state into another instance of ORTTrainer - model2 = load_bert_onnx_model() - model_desc2 = bert_model_description() - optim_config2 = optim.LambConfig() - trainer2 = orttrainer.ORTTrainer(model2, model_desc2, optim_config2, options=opts) - trainer2.load_state_dict(state_dict) - # Evaluate once to get a base loss - ckpt_loss = trainer2.eval_step(*sample_input) - - # Must match as both trainers have the same dict state - assert_allclose(loss.cpu(), ckpt_loss.cpu()) - loaded_state_dict = trainer2.state_dict() - _test_commons.assert_all_states_close_ort(state_dict, loaded_state_dict) - - -@pytest.mark.parametrize( - "optimizer, mixedprecision_enabled", - [ - (optim.LambConfig(), False), - (optim.AdamConfig(), False), - (optim.LambConfig(), True), - (optim.AdamConfig(), True), - ], -) -def testToyBertLoadOptimState(optimizer, mixedprecision_enabled): - # Common setup - device = "cuda" - seed = 1 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - optim_config = optimizer - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": {"id": device}, - "mixed_precision": { - "enabled": mixedprecision_enabled, - }, - "distributed": {"allreduce_post_accumulation": True}, - } - ) - - # Create ORTTrainer and save initial state in a dict - model = load_bert_onnx_model() - model_desc = bert_model_description() - dummy_init_state = _test_commons.generate_dummy_optim_state(model, optimizer) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - trainer.load_state_dict(dummy_init_state) - - # Expected values - input_ids = torch.tensor( - [ - [26598], - [21379], - [19922], - [5219], - [5644], - [20559], - [23777], - [25672], - [22969], - [16824], - [16822], - [635], - [27399], - [20647], - [18519], - [15546], - ], - device=device, - ) - segment_ids = torch.tensor( - [[0], [1], [0], [1], [0], [0], [1], [0], [0], [1], [1], [0], [0], [1], [1], [1]], device=device - ) - input_mask = torch.tensor( - [[0], [0], [0], [0], [1], [1], [1], [0], [1], [1], [0], [0], [0], [1], [0], [0]], device=device - ) - masked_lm_labels = torch.tensor( - [ - [25496], - [16184], - [11005], - [16228], - [14884], - [21660], - [8678], - [23083], - [4027], - [8397], - [11921], - [1333], - [26482], - [1666], - [17925], - [27978], - ], - device=device, - ) - next_sentence_labels = torch.tensor([0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0], device=device) - - # Actual values - _ = trainer.eval_step(input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels) - - actual_state_dict = trainer.state_dict() - del actual_state_dict["model"] - _test_commons.assert_all_states_close_ort(actual_state_dict, dummy_init_state) - - -@pytest.mark.parametrize( - "model_params", - [ - (["bert.embeddings.LayerNorm.bias"]), - ( - [ - "bert.embeddings.LayerNorm.bias", - "bert.embeddings.LayerNorm.weight", - "bert.encoder.layer.0.attention.output.LayerNorm.bias", - ] - ), - ], -) -def testORTTrainerFrozenWeights(model_params): - device = "cuda" - total_steps = 10 - seed = 1 - - # EXPERIMENTAL API - model_desc = bert_model_description() - model = load_bert_onnx_model() - - optim_config = optim.LambConfig() - # Setup ORTTrainer WITHOUT frozen weights - opts_dict = { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - } - opts = orttrainer.ORTTrainerOptions(opts_dict) - - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - trainer.train_step(*sample_input) - - # All model_params must be in the session state - assert trainer._onnx_model is not None - session_state = trainer._training_session.get_state() - assert all([param in session_state for param in model_params]) - - # Setup ORTTrainer WITH frozen weights - opts_dict.update({"utils": {"frozen_weights": model_params}}) - opts = orttrainer.ORTTrainerOptions(opts_dict) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - trainer.train_step(*sample_input) - - # All model_params CANNOT be in the session state - assert trainer._onnx_model is not None - session_state = trainer._training_session.get_state() - assert not any([param in session_state for param in model_params]) - - -def testToyBERTSaveAsONNX(): - device = "cuda" - onnx_file_name = "_____temp_toy_bert_onnx_model.onnx" - if os.path.exists(onnx_file_name): - os.remove(onnx_file_name) - assert not os.path.exists(onnx_file_name) - - # Load trainer - model_desc = bert_model_description() - model = load_bert_onnx_model() - - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - } - ) - - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - trainer.save_as_onnx(onnx_file_name) - assert os.path.exists(onnx_file_name) - - with open(onnx_file_name, "rb") as f: - bin_str = f.read() - reload_onnx_model = onnx.load_model_from_string(bin_str) - os.remove(onnx_file_name) - - # Create a new trainer from persisted ONNX model and compare with original ONNX model - trainer_from_onnx = orttrainer.ORTTrainer(reload_onnx_model, model_desc, optim_config, options=opts) - assert trainer_from_onnx._onnx_model is not None - assert id(trainer_from_onnx._onnx_model) != id(trainer._onnx_model) - for initializer, loaded_initializer in zip( - trainer._onnx_model.graph.initializer, trainer_from_onnx._onnx_model.graph.initializer - ): - assert initializer.name == loaded_initializer.name - assert onnx.helper.printable_graph(trainer_from_onnx._onnx_model.graph) == onnx.helper.printable_graph( - trainer._onnx_model.graph - ) - _test_helpers.assert_onnx_weights(trainer, trainer_from_onnx) - - -############################################################################### -# Temporary tests comparing Legacy vs Experimental ORTTrainer APIs ############ -############################################################################### -@pytest.mark.parametrize( - "optimizer_config", - [ - (optim.AdamConfig), - # (optim.LambConfig), # TODO: re-enable after nondeterminism on backend is fixed - (optim.SGDConfig), - ], -) -def testToyBERTModelLegacyExperimentalBasicTraining(optimizer_config): - # Common setup - train_steps = 512 - - device = "cuda" - seed = 1 - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - - # EXPERIMENTAL API - model_desc = bert_model_description() - model = load_bert_onnx_model() - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - } - ) - optim_config = optimizer_config(lr=0.01) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - experimental_losses = [] - for i in range(train_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - experimental_losses.append(trainer.train_step(*sample_input).cpu().item()) - - # LEGACY IMPLEMENTATION - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - - if optimizer_config == optim.AdamConfig: - legacy_optimizer = "AdamOptimizer" - elif optimizer_config == optim.LambConfig: - legacy_optimizer = "LambOptimizer" - elif optimizer_config == optim.SGDConfig: - legacy_optimizer = "SGDOptimizer" - else: - raise RuntimeError("Invalid optimizer_config") - - device = torch.device(device) - model = load_bert_onnx_model() - legacy_model_desc, learning_rate_description, learning_rate = legacy_model_params(lr=optim_config.lr) - legacy_trainer = Legacy_ORTTrainer( - model, - None, - legacy_model_desc, - legacy_optimizer, - None, - learning_rate_description, - device, - _use_deterministic_compute=True, - ) - legacy_losses = [] - for i in range(train_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - leg_loss = legacy_trainer.train_step(*sample_input, learning_rate) - legacy_losses.append(leg_loss.cpu().item()) - - # Check results - _test_helpers.assert_model_outputs(experimental_losses, legacy_losses, True) - - -@pytest.mark.parametrize( - "initial_lr, lr_scheduler, legacy_lr_scheduler", - [ - (1.0, optim.lr_scheduler.ConstantWarmupLRScheduler, _test_commons.legacy_constant_lr_scheduler), - (0.5, optim.lr_scheduler.ConstantWarmupLRScheduler, _test_commons.legacy_constant_lr_scheduler), - (1.0, optim.lr_scheduler.CosineWarmupLRScheduler, _test_commons.legacy_cosine_lr_scheduler), - (1.0, optim.lr_scheduler.LinearWarmupLRScheduler, _test_commons.legacy_linear_lr_scheduler), - (1.0, optim.lr_scheduler.PolyWarmupLRScheduler, _test_commons.legacy_poly_lr_scheduler), - ], -) -def testToyBERTModelLegacyExperimentalLRScheduler(initial_lr, lr_scheduler, legacy_lr_scheduler): - ############################################################################ - # These tests require hard-coded values for 'total_steps' and 'initial_lr' # - ############################################################################ - - # Common setup - total_steps = 128 - device = "cuda" - seed = 1 - warmup = 0.05 - cycles = 0.5 - power = 1.0 - lr_end = 1e-7 - - # Setup both Experimental and Legacy LR Schedulers before the experimental loop - if ( - legacy_lr_scheduler == _test_commons.legacy_constant_lr_scheduler - or legacy_lr_scheduler == _test_commons.legacy_linear_lr_scheduler - ): - legacy_lr_scheduler = partial( - legacy_lr_scheduler, initial_lr=initial_lr, total_steps=total_steps, warmup=warmup - ) - elif legacy_lr_scheduler == _test_commons.legacy_cosine_lr_scheduler: - legacy_lr_scheduler = partial( - legacy_lr_scheduler, initial_lr=initial_lr, total_steps=total_steps, warmup=warmup, cycles=cycles - ) - elif legacy_lr_scheduler == _test_commons.legacy_poly_lr_scheduler: - legacy_lr_scheduler = partial( - legacy_lr_scheduler, - initial_lr=initial_lr, - total_steps=total_steps, - warmup=warmup, - power=power, - lr_end=lr_end, - ) - else: - raise RuntimeError("Invalid legacy_lr_scheduler") - if ( - lr_scheduler == optim.lr_scheduler.ConstantWarmupLRScheduler - or lr_scheduler == optim.lr_scheduler.LinearWarmupLRScheduler - ): - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup) - elif lr_scheduler == optim.lr_scheduler.CosineWarmupLRScheduler: - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, cycles=cycles) - elif lr_scheduler == optim.lr_scheduler.PolyWarmupLRScheduler: - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, power=power, lr_end=lr_end) - else: - raise RuntimeError("Invalid lr_scheduler") - - # EXPERIMENTAL API - model_desc = bert_model_description() - model = load_bert_onnx_model() - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - optim_config = optim.AdamConfig(lr=initial_lr) - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - "lr_scheduler": lr_scheduler, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - experimental_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - experimental_losses.append(trainer.train_step(*sample_input).cpu().item()) - assert_allclose(trainer.options.lr_scheduler.get_last_lr()[0], legacy_lr_scheduler(i)) - - # LEGACY IMPLEMENTATION - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - device = torch.device(device) - model = load_bert_onnx_model() - legacy_model_desc, learning_rate_description, learning_rate = legacy_model_params(initial_lr) - legacy_trainer = Legacy_ORTTrainer( - model, - None, - legacy_model_desc, - "AdamOptimizer", - None, - learning_rate_description, - device, - _use_deterministic_compute=True, - get_lr_this_step=legacy_lr_scheduler, - ) - legacy_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - leg_loss = legacy_trainer.train_step(*sample_input) - legacy_losses.append(leg_loss.cpu().item()) - - # Check results - _test_helpers.assert_model_outputs(experimental_losses, legacy_losses) - - -@pytest.mark.parametrize( - "loss_scaler, legacy_loss_scaler", - [ - (None, Legacy_LossScaler("ort_test_input_loss_scaler", True)), - (amp.DynamicLossScaler(), Legacy_LossScaler("ort_test_input_loss_scaler", True)), - (CustomLossScaler(), LegacyCustomLossScaler()), - ], -) -def testToyBERTModelMixedPrecisionLossScalerLegacyExperimental(loss_scaler, legacy_loss_scaler): - # Common setup - total_steps = 128 - device = "cuda" - seed = 1 - - # EXPERIMENTAL IMPLEMENTATION - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - model_desc = bert_model_description() - model = load_bert_onnx_model() - optim_config = optim.AdamConfig(lr=0.001) - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - "mixed_precision": {"enabled": True, "loss_scaler": loss_scaler}, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - experimental_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - experimental_losses.append(trainer.train_step(*sample_input).cpu().item()) - - # LEGACY IMPLEMENTATION - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - device = torch.device(device) - model = load_bert_onnx_model() - legacy_model_desc, learning_rate_description, learning_rate = legacy_model_params(optim_config.lr) - legacy_trainer = Legacy_ORTTrainer( - model, - None, - legacy_model_desc, - "AdamOptimizer", - None, - learning_rate_description, - device, - _use_deterministic_compute=True, - use_mixed_precision=True, - loss_scaler=legacy_loss_scaler, - ) - legacy_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - leg_loss = legacy_trainer.train_step(*sample_input, learning_rate) - legacy_losses.append(leg_loss.cpu().item()) - - # Check results - _test_helpers.assert_model_outputs(experimental_losses, legacy_losses) - - -@pytest.mark.parametrize("gradient_accumulation_steps", [(1), (4), (7)]) -def testToyBERTModelGradientAccumulationLegacyExperimental(gradient_accumulation_steps): - # Common setup - total_steps = 128 - device = "cuda" - seed = 1 - - # EXPERIMENTAL IMPLEMENTATION - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - model_desc = bert_model_description() - model = load_bert_onnx_model() - optim_config = optim.AdamConfig() - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - experimental_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - loss = trainer.train_step(*sample_input) - experimental_losses.append(loss.cpu().item()) - - # LEGACY IMPLEMENTATION - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - device = torch.device(device) - model = load_bert_onnx_model() - legacy_model_desc, learning_rate_description, learning_rate = legacy_model_params(optim_config.lr) - legacy_trainer = Legacy_ORTTrainer( - model, - None, - legacy_model_desc, - "AdamOptimizer", - None, - learning_rate_description, - device, - _use_deterministic_compute=True, - gradient_accumulation_steps=gradient_accumulation_steps, - ) - legacy_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - leg_loss = legacy_trainer.train_step(*sample_input, learning_rate) - legacy_losses.append(leg_loss.cpu().item()) - - # Check results - _test_helpers.assert_model_outputs(experimental_losses, legacy_losses) - - -@pytest.mark.parametrize( - "params, legacy_optim_map", - [ - # Change the hyper parameters for all parameters - ([], legacy_optim_params_a), - # Change the hyperparameters for a subset of hardcoded parameters - ( - [ - { - "params": ["bert.embeddings.LayerNorm.bias", "bert.embeddings.LayerNorm.weight"], - "alpha": 0.9, - "beta": 0.999, - "lambda_coef": 0.0, - "epsilon": 1e-6, - "do_bias_correction": False, - } - ], - legacy_optim_params_b, - ), - # Change the hyperparameters for a generated set of paramers - (optimizer_parameters(load_bert_onnx_model()), legacy_optim_params_c), - ], -) -def testToyBERTModelLegacyExperimentalCustomOptimParameters(params, legacy_optim_map): - # Common setup - total_steps = 128 - device = "cuda" - seed = 1 - - # EXPERIMENTAL API - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - model_desc = bert_model_description() - model = load_bert_onnx_model() - - optim_config = optim.AdamConfig( - params, alpha=0.9, beta=0.999, lambda_coef=0.01, epsilon=1e-6, do_bias_correction=False - ) - opts = orttrainer.ORTTrainerOptions( - { - "debug": {"deterministic_compute": True}, - "device": { - "id": device, - }, - } - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) - - experimental_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - experimental_losses.append(trainer.train_step(*sample_input).cpu().item()) - - # LEGACY IMPLEMENTATION - torch.manual_seed(seed) - onnxruntime.set_seed(seed) - device = torch.device(device) - model = load_bert_onnx_model() - legacy_model_desc, learning_rate_description, learning_rate = legacy_model_params(trainer.optim_config.lr) - - legacy_trainer = Legacy_ORTTrainer( - model, - None, - legacy_model_desc, - "AdamOptimizer", - legacy_optim_map, - learning_rate_description, - device, - _use_deterministic_compute=True, - ) - legacy_losses = [] - for i in range(total_steps): - sample_input = generate_random_input_from_model_desc(model_desc, i) - legacy_sample_input = [*sample_input, learning_rate] - legacy_losses.append(legacy_trainer.train_step(legacy_sample_input).cpu().item()) - - # Check results - _test_helpers.assert_model_outputs(experimental_losses, legacy_losses) diff --git a/orttraining/orttraining/test/python/orttraining_test_orttrainer_checkpoint_functions.py b/orttraining/orttraining/test/python/orttraining_test_orttrainer_checkpoint_functions.py deleted file mode 100644 index d366f2cb26557..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_orttrainer_checkpoint_functions.py +++ /dev/null @@ -1,722 +0,0 @@ -from unittest.mock import Mock, patch - -import numpy as np -import onnx -import pytest -import torch -from _test_commons import _load_pytorch_transformer_model - -from onnxruntime.training import _checkpoint_storage, amp, checkpoint, optim, orttrainer # noqa: F401 - -# Helper functions - - -def _create_trainer(zero_enabled=False): - """Cerates a simple ORTTrainer for ORTTrainer functional tests""" - - device = "cuda" - optim_config = optim.LambConfig(lr=0.1) - opts = {"device": {"id": device}, "debug": {"deterministic_compute": True}} - if zero_enabled: - opts["distributed"] = { - "world_rank": 0, - "world_size": 1, - "horizontal_parallel_size": 1, - "data_parallel_size": 1, - "allreduce_post_accumulation": True, - "deepspeed_zero_optimization": {"stage": 1}, - } - model, model_desc, loss_fn, batcher_fn, train_data, _, _ = _load_pytorch_transformer_model(device) - trainer = orttrainer.ORTTrainer( - model, model_desc, optim_config, loss_fn=loss_fn, options=orttrainer.ORTTrainerOptions(opts) - ) - - return trainer - - -class _training_session_mock: # noqa: N801 - """Mock object for the ORTTrainer _training_session member""" - - def __init__(self, model_states, optimizer_states, partition_info): - self.model_states = model_states - self.optimizer_states = optimizer_states - self.partition_info = partition_info - - def get_model_state(self, include_mixed_precision_weights=False): - return self.model_states - - def get_optimizer_state(self): - return self.optimizer_states - - def get_partition_info_map(self): - return self.partition_info - - -def _get_load_state_dict_strict_error_arguments(): - """Return a list of tuples that can be used as parameters for test_load_state_dict_errors_when_model_key_missing - - Construct a list of tuples (training_session_state_dict, input_state_dict, error_arguments) - The load_state_dict function will compare the two state dicts (training_session_state_dict, input_state_dict) and - throw a runtime error with the missing/unexpected keys. The error arguments capture these missing/unexpected keys. - """ - - training_session_state_dict = { - "model": {"full_precision": {"a": np.arange(5), "b": np.arange(7)}}, - "optimizer": { - "a": {"Moment_1": np.arange(5), "Moment_2": np.arange(7)}, - "shared_optimizer_state": {"step": np.arange(5)}, - }, - } - - # input state dictionaries - precision_key_missing = {"model": {}, "optimizer": {}} - precision_key_unexpected = {"model": {"full_precision": {}, "mixed_precision": {}}, "optimizer": {}} - model_state_key_missing = {"model": {"full_precision": {}}, "optimizer": {}} - model_state_key_unexpected = {"model": {"full_precision": {"a": 2, "b": 3, "c": 4}}, "optimizer": {}} - optimizer_model_state_key_missing = {"model": {"full_precision": {"a": 2, "b": 3}}, "optimizer": {}} - optimizer_model_state_key_unexpected = { - "model": {"full_precision": {"a": 2, "b": 3}}, - "optimizer": {"a": {}, "shared_optimizer_state": {}, "b": {}}, - } - optimizer_state_key_missing = { - "model": {"full_precision": {"a": 2, "b": 3}}, - "optimizer": {"a": {}, "shared_optimizer_state": {"step": np.arange(5)}}, - } - optimizer_state_key_unexpected = { - "model": {"full_precision": {"a": 2, "b": 3}}, - "optimizer": { - "a": {"Moment_1": np.arange(5), "Moment_2": np.arange(7)}, - "shared_optimizer_state": {"step": np.arange(5), "another_step": np.arange(1)}, - }, - } - - input_arguments = [ - (training_session_state_dict, precision_key_missing, ["full_precision"]), - (training_session_state_dict, precision_key_unexpected, ["mixed_precision"]), - (training_session_state_dict, model_state_key_missing, ["a", "b"]), - (training_session_state_dict, model_state_key_unexpected, ["c"]), - (training_session_state_dict, optimizer_model_state_key_missing, ["a", "shared_optimizer_state"]), - (training_session_state_dict, optimizer_model_state_key_unexpected, ["b"]), - (training_session_state_dict, optimizer_state_key_missing, ["Moment_1", "Moment_2"]), - (training_session_state_dict, optimizer_state_key_unexpected, ["another_step"]), - ] - - return input_arguments - - -# Tests - - -def test_empty_state_dict_when_training_session_uninitialized(): - trainer = _create_trainer() - with pytest.warns(UserWarning) as user_warning: - state_dict = trainer.state_dict() - - assert len(state_dict.keys()) == 0 - assert ( - user_warning[0].message.args[0] == "ONNX Runtime training session is not initialized yet. " - "Please run train_step or eval_step at least once before calling ORTTrainer.state_dict()." - ) - - -@patch("onnx.ModelProto") -def test_training_session_provides_empty_model_states(onnx_model_mock): - trainer = _create_trainer() - training_session_mock = _training_session_mock({}, {}, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict() - assert len(state_dict["model"].keys()) == 0 - - -@patch("onnx.ModelProto") -def test_training_session_provides_model_states(onnx_model_mock): - trainer = _create_trainer() - model_states = {"full_precision": {"a": np.arange(5), "b": np.arange(7)}} - training_session_mock = _training_session_mock(model_states, {}, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict() - assert (state_dict["model"]["full_precision"]["a"] == np.arange(5)).all() - assert (state_dict["model"]["full_precision"]["b"] == np.arange(7)).all() - - -@patch("onnx.ModelProto") -def test_training_session_provides_model_states_pytorch_format(onnx_model_mock): - trainer = _create_trainer() - model_states = {"full_precision": {"a": np.arange(5), "b": np.arange(7)}} - training_session_mock = _training_session_mock(model_states, {}, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict(pytorch_format=True) - assert torch.all(torch.eq(state_dict["a"], torch.tensor(np.arange(5)))) - assert torch.all(torch.eq(state_dict["b"], torch.tensor(np.arange(7)))) - - -@patch("onnx.ModelProto") -def test_onnx_graph_provides_frozen_model_states(onnx_model_mock): - trainer = _create_trainer() - model_states = {"full_precision": {"a": np.arange(5), "b": np.arange(7)}} - training_session_mock = _training_session_mock(model_states, {}, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - trainer.options.utils.frozen_weights = ["a_frozen_weight", "a_float16_weight"] - trainer._onnx_model.graph.initializer = [ - onnx.numpy_helper.from_array(np.array([1, 2, 3], dtype=np.float32), "a_frozen_weight"), - onnx.numpy_helper.from_array(np.array([4, 5, 6], dtype=np.float32), "a_non_fronzen_weight"), - onnx.numpy_helper.from_array(np.array([7, 8, 9], dtype=np.float16), "a_float16_weight"), - ] - - state_dict = trainer.state_dict() - assert (state_dict["model"]["full_precision"]["a"] == np.arange(5)).all() - assert (state_dict["model"]["full_precision"]["b"] == np.arange(7)).all() - assert (state_dict["model"]["full_precision"]["a_frozen_weight"] == np.array([1, 2, 3], dtype=np.float32)).all() - assert "a_non_fronzen_weight" not in state_dict["model"]["full_precision"] - assert (state_dict["model"]["full_precision"]["a_float16_weight"] == np.array([7, 8, 9], dtype=np.float32)).all() - - -@patch("onnx.ModelProto") -def test_training_session_provides_empty_optimizer_states(onnx_model_mock): - trainer = _create_trainer() - training_session_mock = _training_session_mock({}, {}, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict() - assert len(state_dict["optimizer"].keys()) == 0 - - -@patch("onnx.ModelProto") -def test_training_session_provides_optimizer_states(onnx_model_mock): - trainer = _create_trainer() - optimizer_states = { - "model_weight": {"Moment_1": np.arange(5), "Moment_2": np.arange(7)}, - "shared_optimizer_state": {"step": np.arange(1)}, - } - training_session_mock = _training_session_mock({}, optimizer_states, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict() - assert (state_dict["optimizer"]["model_weight"]["Moment_1"] == np.arange(5)).all() - assert (state_dict["optimizer"]["model_weight"]["Moment_2"] == np.arange(7)).all() - assert (state_dict["optimizer"]["shared_optimizer_state"]["step"] == np.arange(1)).all() - - -@patch("onnx.ModelProto") -def test_training_session_provides_optimizer_states_pytorch_format(onnx_model_mock): - trainer = _create_trainer() - model_states = {"full_precision": {"a": np.arange(5), "b": np.arange(7)}} - optimizer_states = { - "model_weight": {"Moment_1": np.arange(5), "Moment_2": np.arange(7)}, - "shared_optimizer_state": {"step": np.arange(1)}, - } - training_session_mock = _training_session_mock(model_states, optimizer_states, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict(pytorch_format=True) - assert "optimizer" not in state_dict - - -@patch("onnx.ModelProto") -def test_training_session_provides_empty_partition_info_map(onnx_model_mock): - trainer = _create_trainer(zero_enabled=True) - training_session_mock = _training_session_mock({}, {}, {}) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict() - assert len(state_dict["partition_info"].keys()) == 0 - - -@patch("onnx.ModelProto") -def test_training_session_provides_partition_info_map(onnx_model_mock): - trainer = _create_trainer(zero_enabled=True) - partition_info = {"a": {"original_dim": [1, 2, 3]}} - training_session_mock = _training_session_mock({}, {}, partition_info) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict() - assert state_dict["partition_info"]["a"]["original_dim"] == [1, 2, 3] - - -@patch("onnx.ModelProto") -def test_training_session_provides_all_states(onnx_model_mock): - trainer = _create_trainer(zero_enabled=True) - model_states = {"full_precision": {"a": np.arange(5), "b": np.arange(7)}} - optimizer_states = { - "model_weight": {"Moment_1": np.arange(5), "Moment_2": np.arange(7)}, - "shared_optimizer_state": {"step": np.arange(1)}, - } - partition_info = {"a": {"original_dim": [1, 2, 3]}} - training_session_mock = _training_session_mock(model_states, optimizer_states, partition_info) - trainer._training_session = training_session_mock - trainer._onnx_model = onnx_model_mock() - - state_dict = trainer.state_dict() - assert (state_dict["model"]["full_precision"]["a"] == np.arange(5)).all() - assert (state_dict["model"]["full_precision"]["b"] == np.arange(7)).all() - assert (state_dict["optimizer"]["model_weight"]["Moment_1"] == np.arange(5)).all() - assert (state_dict["optimizer"]["model_weight"]["Moment_2"] == np.arange(7)).all() - assert (state_dict["optimizer"]["shared_optimizer_state"]["step"] == np.arange(1)).all() - assert state_dict["partition_info"]["a"]["original_dim"] == [1, 2, 3] - - -def test_load_state_dict_holds_when_training_session_not_initialized(): - trainer = _create_trainer() - state_dict = { - "model": {"full_precision": {"a": np.arange(5), "b": np.arange(7)}}, - "optimizer": { - "a": {"Moment_1": np.arange(5), "Moment_2": np.arange(7)}, - "shared_optimizer_state": {"step": np.arange(5)}, - }, - } - assert not trainer._load_state_dict - state_dict = trainer.load_state_dict(state_dict) - assert trainer._load_state_dict - - -@pytest.mark.parametrize( - "state_dict, input_state_dict, error_key", - [ - ( - {"model": {}, "optimizer": {}}, - {"model": {}, "optimizer": {}, "trainer_options": {"optimizer_name": "LambOptimizer"}}, - "train_step_info", - ), - ( - {"optimizer": {}, "train_step_info": {"optimization_step": 0, "step": 0}}, - { - "optimizer": {}, - "trainer_options": {"optimizer_name": "LambOptimizer"}, - "train_step_info": {"optimization_step": 0, "step": 0}, - }, - "model", - ), - ( - {"model": {}, "train_step_info": {"optimization_step": 0, "step": 0}}, - { - "model": {}, - "trainer_options": {"optimizer_name": "LambOptimizer"}, - "train_step_info": {"optimization_step": 0, "step": 0}, - }, - "optimizer", - ), - ], -) -def test_load_state_dict_warns_when_model_optimizer_key_missing(state_dict, input_state_dict, error_key): - trainer = _create_trainer() - trainer._training_session = _training_session_mock({}, {}, {}) - trainer.state_dict = Mock(return_value=state_dict) - trainer._update_onnx_model_initializers = Mock() - trainer._init_session = Mock() - with patch("onnx.ModelProto") as onnx_model_mock: - trainer._onnx_model = onnx_model_mock() - trainer._onnx_model.graph.initializer = [] - with pytest.warns(UserWarning) as user_warning: - trainer.load_state_dict(input_state_dict) - - assert user_warning[0].message.args[0] == f"Missing key: {error_key} in state_dict" - - -@pytest.mark.parametrize("state_dict, input_state_dict, error_keys", _get_load_state_dict_strict_error_arguments()) -def test_load_state_dict_errors_when_state_dict_mismatch(state_dict, input_state_dict, error_keys): - trainer = _create_trainer() - trainer._training_session = _training_session_mock({}, {}, {}) - trainer.state_dict = Mock(return_value=state_dict) - with pytest.raises(RuntimeError) as runtime_error: - trainer.load_state_dict(input_state_dict) - - assert any(key in str(runtime_error.value) for key in error_keys) - - -@patch("onnx.ModelProto") -def test_load_state_dict_loads_the_states_and_inits_training_session(onnx_model_mock): - trainer = _create_trainer() - training_session_state_dict = { - "model": {"full_precision": {"a": np.arange(5), "b": np.arange(7)}}, - "optimizer": { - "a": {"Moment_1": np.arange(5), "Moment_2": np.arange(7)}, - "shared_optimizer_state": {"step": np.arange(1)}, - }, - } - - input_state_dict = { - "model": {"full_precision": {"a": np.array([1, 2]), "b": np.array([3, 4])}}, - "optimizer": { - "a": {"Moment_1": np.array([5, 6]), "Moment_2": np.array([7, 8])}, - "shared_optimizer_state": {"step": np.array([9])}, - }, - "trainer_options": {"optimizer_name": "LambOptimizer"}, - } - trainer._training_session = _training_session_mock({}, {}, {}) - trainer.state_dict = Mock(return_value=training_session_state_dict) - trainer._onnx_model = onnx_model_mock() - trainer._onnx_model.graph.initializer = [ - onnx.numpy_helper.from_array(np.arange(20, dtype=np.float32), "a"), - onnx.numpy_helper.from_array(np.arange(25, dtype=np.float32), "b"), - ] - trainer._update_onnx_model_initializers = Mock() - trainer._init_session = Mock() - - trainer.load_state_dict(input_state_dict) - - loaded_initializers, _ = trainer._update_onnx_model_initializers.call_args - state_dict_to_load, _ = trainer._init_session.call_args - - assert "a" in loaded_initializers[0] - assert (loaded_initializers[0]["a"] == np.array([1, 2])).all() - assert "b" in loaded_initializers[0] - assert (loaded_initializers[0]["b"] == np.array([3, 4])).all() - - assert (state_dict_to_load[0]["a"]["Moment_1"] == np.array([5, 6])).all() - assert (state_dict_to_load[0]["a"]["Moment_2"] == np.array([7, 8])).all() - assert (state_dict_to_load[0]["shared_optimizer_state"]["step"] == np.array([9])).all() - - -@patch("onnxruntime.training._checkpoint_storage.save") -def test_save_checkpoint_calls_checkpoint_storage_save(save_mock): - trainer = _create_trainer() - state_dict = {"model": {}, "optimizer": {}} - trainer.state_dict = Mock(return_value=state_dict) - - trainer.save_checkpoint("abc") - - save_args, _ = save_mock.call_args - assert "model" in save_args[0] - assert not bool(save_args[0]["model"]) - assert "optimizer" in save_args[0] - assert not bool(save_args[0]["optimizer"]) - assert save_args[1] == "abc" - - -@patch("onnxruntime.training._checkpoint_storage.save") -def test_save_checkpoint_exclude_optimizer_states(save_mock): - trainer = _create_trainer() - state_dict = {"model": {}, "optimizer": {}} - trainer.state_dict = Mock(return_value=state_dict) - - trainer.save_checkpoint("abc", include_optimizer_states=False) - - save_args, _ = save_mock.call_args - assert "model" in save_args[0] - assert not bool(save_args[0]["model"]) - assert "optimizer" not in save_args[0] - assert save_args[1] == "abc" - - -@patch("onnxruntime.training._checkpoint_storage.save") -def test_save_checkpoint_user_dict(save_mock): - trainer = _create_trainer() - state_dict = {"model": {}, "optimizer": {}} - trainer.state_dict = Mock(return_value=state_dict) - - trainer.save_checkpoint("abc", user_dict={"abc": np.arange(4)}) - - save_args, _ = save_mock.call_args - assert "user_dict" in save_args[0] - assert save_args[0]["user_dict"] == _checkpoint_storage.to_serialized_hex({"abc": np.arange(4)}) - - -@patch("onnxruntime.training._checkpoint_storage.load") -@patch("onnxruntime.training.checkpoint.aggregate_checkpoints") -def test_load_checkpoint(aggregate_checkpoints_mock, load_mock): - trainer = _create_trainer() - trainer_options = { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - } - state_dict = { - "model": {}, - "optimizer": {}, - "trainer_options": { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - }, - } - trainer.load_state_dict = Mock() - - load_mock.side_effect = [trainer_options, state_dict] - trainer.load_checkpoint("abc") - - args_list = load_mock.call_args_list - load_args, load_kwargs = args_list[0] - assert load_args[0] == "abc" - assert load_kwargs["key"] == "trainer_options" - load_args, load_kwargs = args_list[1] - assert load_args[0] == "abc" - assert "key" not in load_kwargs - assert not aggregate_checkpoints_mock.called - - -@patch("onnxruntime.training._checkpoint_storage.load") -@patch("onnxruntime.training.checkpoint.aggregate_checkpoints") -@pytest.mark.parametrize( - "trainer_options", - [ - { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(0), - "world_size": np.int64(4), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(4), - "zero_stage": np.int64(1), - }, - { - "mixed_precision": np.bool_(True), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(1), - }, - { - "mixed_precision": np.bool_(True), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(1), - }, - ], -) -def test_load_checkpoint_aggregation_required_zero_enabled(aggregate_checkpoints_mock, load_mock, trainer_options): - trainer = _create_trainer() - trainer.load_state_dict = Mock() - - load_mock.side_effect = [trainer_options] - trainer.load_checkpoint("abc") - - args_list = load_mock.call_args_list - load_args, load_kwargs = args_list[0] - assert load_args[0] == "abc" - assert load_kwargs["key"] == "trainer_options" - assert aggregate_checkpoints_mock.called - call_args, _ = aggregate_checkpoints_mock.call_args - assert call_args[0] == tuple(["abc"]) - - -@patch("onnxruntime.training._checkpoint_storage.load") -@patch("onnxruntime.training.checkpoint.aggregate_checkpoints") -def test_load_checkpoint_user_dict(aggregate_checkpoints_mock, load_mock): - trainer = _create_trainer() - trainer_options = { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - } - state_dict = { - "model": {}, - "optimizer": {}, - "trainer_options": { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - }, - "user_dict": _checkpoint_storage.to_serialized_hex({"array": torch.tensor(np.arange(5))}), - } - trainer.load_state_dict = Mock() - - load_mock.side_effect = [trainer_options, state_dict] - user_dict = trainer.load_checkpoint("abc") - - assert torch.all(torch.eq(user_dict["array"], torch.tensor(np.arange(5)))) - - -@patch("onnxruntime.training._checkpoint_storage.load") -def test_checkpoint_aggregation(load_mock): - trainer_options1 = { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(0), - "world_size": np.int64(2), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(2), - "zero_stage": np.int64(1), - "optimizer_name": b"Adam", - } - trainer_options2 = { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(1), - "world_size": np.int64(2), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(2), - "zero_stage": np.int64(1), - "optimizer_name": b"Adam", - } - - state_dict1 = { - "model": {"full_precision": {"optimizer_sharded": np.array([1, 2, 3]), "non_sharded": np.array([11, 22, 33])}}, - "optimizer": { - "optimizer_sharded": { - "Moment_1": np.array([9, 8, 7]), - "Moment_2": np.array([99, 88, 77]), - "Step": np.array([5]), - }, - "non_sharded": { - "Moment_1": np.array([666, 555, 444]), - "Moment_2": np.array([6666, 5555, 4444]), - "Step": np.array([55]), - }, - }, - "trainer_options": { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - "optimizer_name": b"Adam", - }, - "partition_info": {"optimizer_sharded": {"original_dim": np.array([2, 3])}}, - } - - state_dict2 = { - "model": {"full_precision": {"optimizer_sharded": np.array([1, 2, 3]), "non_sharded": np.array([11, 22, 33])}}, - "optimizer": { - "optimizer_sharded": { - "Moment_1": np.array([6, 5, 4]), - "Moment_2": np.array([66, 55, 44]), - "Step": np.array([5]), - }, - "non_sharded": { - "Moment_1": np.array([666, 555, 444]), - "Moment_2": np.array([6666, 5555, 4444]), - "Step": np.array([55]), - }, - }, - "trainer_options": { - "mixed_precision": np.bool_(False), - "world_rank": np.int64(1), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - "optimizer_name": b"Adam", - }, - "partition_info": {"optimizer_sharded": {"original_dim": np.array([2, 3])}}, - } - - load_mock.side_effect = [trainer_options1, trainer_options2, trainer_options1, state_dict1, state_dict2] - state_dict = checkpoint.aggregate_checkpoints(["abc", "def"], pytorch_format=False) - - assert (state_dict["model"]["full_precision"]["optimizer_sharded"] == np.array([1, 2, 3])).all() - assert (state_dict["model"]["full_precision"]["non_sharded"] == np.array([11, 22, 33])).all() - assert (state_dict["optimizer"]["optimizer_sharded"]["Moment_1"] == np.array([[9, 8, 7], [6, 5, 4]])).all() - assert (state_dict["optimizer"]["optimizer_sharded"]["Moment_2"] == np.array([[99, 88, 77], [66, 55, 44]])).all() - assert (state_dict["optimizer"]["optimizer_sharded"]["Step"] == np.array([5])).all() - assert (state_dict["optimizer"]["non_sharded"]["Moment_1"] == np.array([666, 555, 444])).all() - assert (state_dict["optimizer"]["non_sharded"]["Moment_2"] == np.array([6666, 5555, 4444])).all() - assert (state_dict["optimizer"]["non_sharded"]["Step"] == np.array([55])).all() - - assert state_dict["trainer_options"]["mixed_precision"] is False - assert state_dict["trainer_options"]["world_rank"] == 0 - assert state_dict["trainer_options"]["world_size"] == 1 - assert state_dict["trainer_options"]["horizontal_parallel_size"] == 1 - assert state_dict["trainer_options"]["data_parallel_size"] == 1 - assert state_dict["trainer_options"]["zero_stage"] == 0 - assert state_dict["trainer_options"]["optimizer_name"] == b"Adam" - - -@patch("onnxruntime.training._checkpoint_storage.load") -def test_checkpoint_aggregation_mixed_precision(load_mock): - trainer_options1 = { - "mixed_precision": np.bool_(True), - "world_rank": np.int64(0), - "world_size": np.int64(2), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(2), - "zero_stage": np.int64(1), - "optimizer_name": b"Adam", - } - trainer_options2 = { - "mixed_precision": np.bool_(True), - "world_rank": np.int64(1), - "world_size": np.int64(2), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(2), - "zero_stage": np.int64(1), - "optimizer_name": b"Adam", - } - - state_dict1 = { - "model": {"full_precision": {"sharded": np.array([1, 2, 3]), "non_sharded": np.array([11, 22, 33])}}, - "optimizer": { - "sharded": {"Moment_1": np.array([9, 8, 7]), "Moment_2": np.array([99, 88, 77]), "Step": np.array([5])}, - "non_sharded": { - "Moment_1": np.array([666, 555, 444]), - "Moment_2": np.array([6666, 5555, 4444]), - "Step": np.array([55]), - }, - }, - "trainer_options": { - "mixed_precision": np.bool_(True), - "world_rank": np.int64(0), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - "optimizer_name": b"Adam", - }, - "partition_info": {"sharded": {"original_dim": np.array([2, 3])}}, - } - - state_dict2 = { - "model": {"full_precision": {"sharded": np.array([4, 5, 6]), "non_sharded": np.array([11, 22, 33])}}, - "optimizer": { - "sharded": {"Moment_1": np.array([6, 5, 4]), "Moment_2": np.array([66, 55, 44]), "Step": np.array([5])}, - "non_sharded": { - "Moment_1": np.array([666, 555, 444]), - "Moment_2": np.array([6666, 5555, 4444]), - "Step": np.array([55]), - }, - }, - "trainer_options": { - "mixed_precision": np.bool_(True), - "world_rank": np.int64(1), - "world_size": np.int64(1), - "horizontal_parallel_size": np.int64(1), - "data_parallel_size": np.int64(1), - "zero_stage": np.int64(0), - "optimizer_name": b"Adam", - }, - "partition_info": {"sharded": {"original_dim": np.array([2, 3])}}, - } - - load_mock.side_effect = [trainer_options1, trainer_options2, trainer_options1, state_dict1, state_dict2] - state_dict = checkpoint.aggregate_checkpoints(["abc", "def"], pytorch_format=False) - - assert (state_dict["model"]["full_precision"]["sharded"] == np.array([[1, 2, 3], [4, 5, 6]])).all() - assert (state_dict["model"]["full_precision"]["non_sharded"] == np.array([11, 22, 33])).all() - assert (state_dict["optimizer"]["sharded"]["Moment_1"] == np.array([[9, 8, 7], [6, 5, 4]])).all() - assert (state_dict["optimizer"]["sharded"]["Moment_2"] == np.array([[99, 88, 77], [66, 55, 44]])).all() - assert (state_dict["optimizer"]["sharded"]["Step"] == np.array([5])).all() - assert (state_dict["optimizer"]["non_sharded"]["Moment_1"] == np.array([666, 555, 444])).all() - assert (state_dict["optimizer"]["non_sharded"]["Moment_2"] == np.array([6666, 5555, 4444])).all() - assert (state_dict["optimizer"]["non_sharded"]["Step"] == np.array([55])).all() - - assert state_dict["trainer_options"]["mixed_precision"] is True - assert state_dict["trainer_options"]["world_rank"] == 0 - assert state_dict["trainer_options"]["world_size"] == 1 - assert state_dict["trainer_options"]["horizontal_parallel_size"] == 1 - assert state_dict["trainer_options"]["data_parallel_size"] == 1 - assert state_dict["trainer_options"]["zero_stage"] == 0 - assert state_dict["trainer_options"]["optimizer_name"] == b"Adam" diff --git a/orttraining/orttraining/test/python/orttraining_test_orttrainer_frontend.py b/orttraining/orttraining/test/python/orttraining_test_orttrainer_frontend.py deleted file mode 100644 index fa13625f0ddac..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_orttrainer_frontend.py +++ /dev/null @@ -1,2460 +0,0 @@ -import inspect -import os -import tempfile -from functools import partial - -import _test_commons -import _test_helpers -import onnx -import pytest -import torch -import torch.nn.functional as F -from numpy.testing import assert_allclose -from packaging.version import Version as StrictVersion - -from onnxruntime import SessionOptions, set_seed -from onnxruntime.capi.ort_trainer import LossScaler as Legacy_LossScaler -from onnxruntime.capi.ort_trainer import ORTTrainer as Legacy_ORTTrainer -from onnxruntime.training import PropagateCastOpsStrategy, TrainStepInfo, _utils, amp -from onnxruntime.training import model_desc_validation as md_val -from onnxruntime.training import optim, orttrainer, orttrainer_options - -############################################################################### -# Testing starts here ######################################################### -############################################################################### - -pytorch_110 = StrictVersion(".".join(torch.__version__.split(".")[:2])) >= StrictVersion("1.10.0") - - -def get_model_opset(model_onnx): - for op in model_onnx.opset_import: - if op.domain == "": - return op.version - return None - - -@pytest.mark.parametrize( - "test_input", - [({}), ({"batch": {}, "device": {}, "distributed": {}, "mixed_precision": {}, "utils": {}, "_internal_use": {}})], -) -def testORTTrainerOptionsDefaultValues(test_input): - """Test different ways of using default values for incomplete input""" - - expected_values = { - "batch": {"gradient_accumulation_steps": 1}, - "device": {"id": "cuda", "mem_limit": 0}, - "distributed": { - "world_rank": 0, - "world_size": 1, - "local_rank": 0, - "data_parallel_size": 1, - "horizontal_parallel_size": 1, - "pipeline_parallel": { - "pipeline_parallel_size": 1, - "num_pipeline_micro_batches": 1, - "pipeline_cut_info_string": "", - "sliced_schema": {}, - "sliced_axes": {}, - "sliced_tensor_names": [], - }, - "allreduce_post_accumulation": False, - "deepspeed_zero_optimization": { - "stage": 0, - }, - "enable_adasum": False, - }, - "lr_scheduler": None, - "mixed_precision": {"enabled": False, "loss_scaler": None}, - "graph_transformer": { - "attn_dropout_recompute": False, - "gelu_recompute": False, - "transformer_layer_recompute": False, - "number_recompute_layers": 0, - "propagate_cast_ops_config": {"strategy": PropagateCastOpsStrategy.FLOOD_FILL, "level": 1, "allow": []}, - }, - "utils": { - "frozen_weights": [], - "grad_norm_clip": True, - "memory_efficient_gradient": False, - "run_symbolic_shape_infer": False, - }, - "debug": { - "deterministic_compute": False, - "check_model_export": False, - "graph_save_paths": { - "model_after_graph_transforms_path": "", - "model_with_gradient_graph_path": "", - "model_with_training_graph_path": "", - "model_with_training_graph_after_optimization_path": "", - }, - }, - "_internal_use": { - "enable_internal_postprocess": True, - "extra_postprocess": None, - "onnx_opset_version": 14, - "enable_onnx_contrib_ops": True, - }, - "provider_options": {}, - "session_options": None, - } - - actual_values = orttrainer_options.ORTTrainerOptions(test_input) - assert actual_values._validated_opts == expected_values - - -@pytest.mark.parametrize( - "input,error_msg", - [ - ( - {"mixed_precision": {"enabled": 1}}, - "Invalid options: {'mixed_precision': [{'enabled': ['must be of boolean type']}]}", - ) - ], -) -def testORTTrainerOptionsInvalidMixedPrecisionEnabledSchema(input, error_msg): - """Test an invalid input based on schema validation error message""" - - with pytest.raises(ValueError) as e: - orttrainer_options.ORTTrainerOptions(input) - assert str(e.value) == error_msg - - -@pytest.mark.parametrize( - "input_dict,input_dtype,output_dtype", - [ - ( - {"inputs": [("in0", [])], "outputs": [("out0", []), ("out1", [])]}, - (torch.int,), - ( - torch.float, - torch.int32, - ), - ), - ({"inputs": [("in0", ["batch", 2, 3])], "outputs": [("out0", [], True)]}, (torch.int8,), (torch.int16,)), - ( - { - "inputs": [ - ("in0", []), - ("in1", [1]), - ("in2", [1, 2]), - ("in3", [1000, "dyn_ax1"]), - ("in4", ["dyn_ax1", "dyn_ax2", "dyn_ax3"]), - ], - "outputs": [("out0", [], True), ("out1", [1], False), ("out2", [1, "dyn_ax1", 3])], - }, - ( - torch.float, - torch.uint8, - torch.bool, - torch.double, - torch.half, - ), - (torch.float, torch.float, torch.int64), - ), - ], -) -def testORTTrainerModelDescValidSchemas(input_dict, input_dtype, output_dtype): - r"""Test different ways of using default values for incomplete input""" - - model_description = md_val._ORTTrainerModelDesc(input_dict) - - # Validating hard-coded learning rate description - assert model_description.learning_rate.name == md_val.LEARNING_RATE_IO_DESCRIPTION_NAME - assert model_description.learning_rate.shape == [1] - assert model_description.learning_rate.dtype == torch.float32 - - # Validating model description from user - for idx, i_desc in enumerate(model_description.inputs): - assert isinstance(i_desc, model_description._InputDescription) - assert len(i_desc) == 2 - assert input_dict["inputs"][idx][0] == i_desc.name - assert input_dict["inputs"][idx][1] == i_desc.shape - for idx, o_desc in enumerate(model_description.outputs): - assert isinstance(o_desc, model_description._OutputDescription) - assert len(o_desc) == 3 - assert input_dict["outputs"][idx][0] == o_desc.name - assert input_dict["outputs"][idx][1] == o_desc.shape - is_loss = input_dict["outputs"][idx][2] if len(input_dict["outputs"][idx]) == 3 else False - assert is_loss == o_desc.is_loss - - # Set all_finite name and check its description - model_description.all_finite = md_val.ALL_FINITE_IO_DESCRIPTION_NAME - assert model_description.all_finite.name == md_val.ALL_FINITE_IO_DESCRIPTION_NAME - assert model_description.all_finite.shape == [1] - assert model_description.all_finite.dtype == torch.bool - - # Set loss_scale_input and check its description - model_description.loss_scale_input = md_val.LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME - assert model_description.loss_scale_input.name == md_val.LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME - assert model_description.loss_scale_input.shape == [] - assert model_description.loss_scale_input.dtype == torch.float32 - - # Append type to inputs/outputs tuples - for idx, i_desc in enumerate(model_description.inputs): # noqa: B007 - model_description.add_type_to_input_description(idx, input_dtype[idx]) - for idx, o_desc in enumerate(model_description.outputs): # noqa: B007 - model_description.add_type_to_output_description(idx, output_dtype[idx]) - - # Verify inputs/outputs tuples are replaced by the typed counterparts - for idx, i_desc in enumerate(model_description.inputs): - assert isinstance(i_desc, model_description._InputDescriptionTyped) - assert input_dtype[idx] == i_desc.dtype - for idx, o_desc in enumerate(model_description.outputs): - assert isinstance(o_desc, model_description._OutputDescriptionTyped) - assert output_dtype[idx] == o_desc.dtype - - -@pytest.mark.parametrize( - "input_dict,error_msg", - [ - ( - {"inputs": [(True, [])], "outputs": [(True, [])]}, - "Invalid model_desc: {'inputs': [{0: ['the first element of the tuple (aka name) must be a string']}], " - "'outputs': [{0: ['the first element of the tuple (aka name) must be a string']}]}", - ), - ( - {"inputs": [("in1", None)], "outputs": [("out1", None)]}, - "Invalid model_desc: {'inputs': [{0: ['the second element of the tuple (aka shape) must be a list']}], " - "'outputs': [{0: ['the second element of the tuple (aka shape) must be a list']}]}", - ), - ( - {"inputs": [("in1", [])], "outputs": [("out1", [], None)]}, - "Invalid model_desc: {'outputs': [{0: ['the third element of the tuple (aka is_loss) must be a boolean']}]}", - ), - ( - {"inputs": [("in1", [True])], "outputs": [("out1", [True])]}, - "Invalid model_desc: {'inputs': [{0: ['each shape must be either a string or integer']}], " - "'outputs': [{0: ['each shape must be either a string or integer']}]}", - ), - ( - {"inputs": [("in1", [])], "outputs": [("out1", [], True), ("out2", [], True)]}, - "Invalid model_desc: {'outputs': [{1: ['only one is_loss can bet set to True']}]}", - ), - ( - {"inputz": [("in1", [])], "outputs": [("out1", [], True)]}, - "Invalid model_desc: {'inputs': ['required field'], 'inputz': ['unknown field']}", - ), - ( - {"inputs": [("in1", [])], "outputz": [("out1", [], True)]}, - "Invalid model_desc: {'outputs': ['required field'], 'outputz': ['unknown field']}", - ), - ], -) -def testORTTrainerModelDescInvalidSchemas(input_dict, error_msg): - r"""Test different ways of using default values for incomplete input""" - with pytest.raises(ValueError) as e: - md_val._ORTTrainerModelDesc(input_dict) - assert str(e.value) == error_msg - - -def testDynamicLossScaler(): - rtol = 1e-7 - default_scaler = amp.loss_scaler.DynamicLossScaler() - - # Initial state - train_step_info = orttrainer.TrainStepInfo(optim.LambConfig()) - assert_allclose(default_scaler.loss_scale, float(1 << 16), rtol=rtol, err_msg="loss scale mismatch") - assert default_scaler.up_scale_window == 2000 - assert_allclose(default_scaler.min_loss_scale, 1.0, rtol=rtol, err_msg="min loss scale mismatch") - assert_allclose(default_scaler.max_loss_scale, float(1 << 24), rtol=rtol, err_msg="max loss scale mismatch") - - # Performing 9*2000 updates to cover all branches of LossScaler.update(train_step_info.all_finite=True) - loss_scale = float(1 << 16) - for cycles in range(1, 10): - # 1999 updates without overflow produces 1999 stable steps - for i in range(1, 2000): - new_loss_scale = default_scaler.update(train_step_info) - assert default_scaler._stable_steps_count == i - assert_allclose(new_loss_scale, loss_scale, rtol=rtol, err_msg=f"loss scale mismatch at update {i}") - - # 2000th update without overflow doubles the loss and zero stable steps until max_loss_scale is reached - new_loss_scale = default_scaler.update(train_step_info) - if cycles <= 8: - loss_scale *= 2 - assert default_scaler._stable_steps_count == 0 - assert_allclose(new_loss_scale, loss_scale, rtol=rtol, err_msg="loss scale mismatch") - - # After 8 cycles, loss scale should be float(1 << 16)*(2**8) - assert_allclose(new_loss_scale, float(1 << 16) * (2**8), rtol=rtol, err_msg="loss scale mismatch") - - # After 9 cycles, loss scale reaches max_loss_scale and it is not doubled from that point on - loss_scale = float(1 << 16) * (2**8) - for count in range(1, 2050): - new_loss_scale = default_scaler.update(train_step_info) - assert default_scaler._stable_steps_count == (count % 2000) - assert_allclose(new_loss_scale, loss_scale, rtol=rtol, err_msg="loss scale mismatch") - - # Setting train_step_info.all_finite = False to test down scaling - train_step_info.all_finite = False - - # Performing 24 updates to half the loss scale each time - loss_scale = float(1 << 16) * (2**8) - for count in range(1, 25): # noqa: B007 - new_loss_scale = default_scaler.update(train_step_info) - loss_scale /= 2 - assert default_scaler._stable_steps_count == 0 - assert_allclose(new_loss_scale, loss_scale, rtol=rtol, err_msg="loss scale mismatch") - - # After 24 updates with gradient overflow, loss scale is 1.0 - assert_allclose(new_loss_scale, 1.0, rtol=rtol, err_msg="loss scale mismatch") - - # After 25 updates, min_loss_scale is reached and loss scale is not halfed from that point on - for count in range(1, 5): # noqa: B007 - new_loss_scale = default_scaler.update(train_step_info) - assert default_scaler._stable_steps_count == 0 - assert_allclose(new_loss_scale, loss_scale, rtol=rtol, err_msg="loss scale mismatch") - - -def testDynamicLossScalerCustomValues(): - rtol = 1e-7 - scaler = amp.loss_scaler.DynamicLossScaler( - automatic_update=False, loss_scale=3, up_scale_window=7, min_loss_scale=5, max_loss_scale=10 - ) - assert scaler.automatic_update is False - assert_allclose(scaler.loss_scale, 3, rtol=rtol, err_msg="loss scale mismatch") - assert_allclose(scaler.min_loss_scale, 5, rtol=rtol, err_msg="min loss scale mismatch") - assert_allclose(scaler.max_loss_scale, 10, rtol=rtol, err_msg="max loss scale mismatch") - assert scaler.up_scale_window == 7 - - -def testTrainStepInfo(): - """Test valid initializations of TrainStepInfo""" - - optimizer_config = optim.LambConfig() - fetches = ["out1", "out2"] - step_info = orttrainer.TrainStepInfo( - optimizer_config=optimizer_config, all_finite=False, fetches=fetches, optimization_step=123, step=456 - ) - assert step_info.optimizer_config == optimizer_config - assert step_info.all_finite is False - assert step_info.fetches == fetches - assert step_info.optimization_step == 123 - assert step_info.step == 456 - - step_info = orttrainer.TrainStepInfo(optimizer_config) - assert step_info.optimizer_config == optimizer_config - assert step_info.all_finite is True - assert step_info.fetches == [] - assert step_info.optimization_step == 0 - assert step_info.step == 0 - - -@pytest.mark.parametrize( - "invalid_input", - [ - (-1), - ("Hello"), - ], -) -def testTrainStepInfoInvalidInput(invalid_input): - """Test invalid initialization of TrainStepInfo""" - optimizer_config = optim.LambConfig() - with pytest.raises(AssertionError): - orttrainer.TrainStepInfo(optimizer_config=invalid_input) - - with pytest.raises(AssertionError): - orttrainer.TrainStepInfo(optimizer_config, all_finite=invalid_input) - - with pytest.raises(AssertionError): - orttrainer.TrainStepInfo(optimizer_config, fetches=invalid_input) - - with pytest.raises(AssertionError): - orttrainer.TrainStepInfo(optimizer_config, optimization_step=invalid_input) - - with pytest.raises(AssertionError): - orttrainer.TrainStepInfo(optimizer_config, step=invalid_input) - - -@pytest.mark.parametrize( - "optim_name,lr,alpha,default_alpha", - [ - ("AdamOptimizer", 0.1, 0.2, None), - ("LambOptimizer", 0.2, 0.3, None), - ("SGDOptimizer", 0.3, 0.4, None), - ("SGDOptimizer", 0.3, 0.4, 0.5), - ], -) -def testOptimizerConfig(optim_name, lr, alpha, default_alpha): - """Test initialization of _OptimizerConfig""" - defaults = {"lr": lr, "alpha": alpha} - params = [{"params": ["fc1.weight", "fc2.weight"]}] - if default_alpha is not None: - params[0].update({"alpha": default_alpha}) - else: - params[0].update({"alpha": alpha}) - cfg = optim.config._OptimizerConfig(name=optim_name, params=params, defaults=defaults) - - assert cfg.name == optim_name - rtol = 1e-07 - assert_allclose(defaults["lr"], cfg.lr, rtol=rtol, err_msg="lr mismatch") - - # 1:1 mapping between defaults and params's hyper parameters - for param in params: - for k in param: - if k != "params": - assert k in cfg.defaults, "hyper parameter {k} not present in one of the parameter params" - for k in cfg.defaults: - for param in cfg.params: - assert k in param, "hyper parameter {k} not present in one of the parameter params" - - -@pytest.mark.parametrize( - "optim_name,defaults,params", - [ - ("AdamOptimizer", {"lr": -1}, []), # invalid lr - ("FooOptimizer", {"lr": 0.001}, []), # invalid name - ("SGDOptimizer", [], []), # invalid type(defaults) - (optim.AdamConfig, {"lr": 0.003}, []), # invalid type(name) - ("AdamOptimizer", {"lr": None}, []), # missing 'lr' hyper parameter - ("SGDOptimizer", {"lr": 0.004}, {}), # invalid type(params) - # invalid type(params[i]) - ("AdamOptimizer", {"lr": 0.005, "alpha": 2}, [[]]), - # missing 'params' at 'params' - ("AdamOptimizer", {"lr": 0.005, "alpha": 2}, [{"alpha": 1}]), - # missing 'alpha' at 'defaults' - ("AdamOptimizer", {"lr": 0.005}, [{"params": "param1", "alpha": 1}]), - ], -) -def testOptimizerConfigInvalidInputs(optim_name, defaults, params): - """Test invalid initialization of _OptimizerConfig""" - - with pytest.raises(AssertionError): - optim.config._OptimizerConfig(name=optim_name, params=params, defaults=defaults) - - -def testOptimizerConfigSGD(): - """Test initialization of SGD""" - cfg = optim.SGDConfig() - assert cfg.name == "SGDOptimizer" - - rtol = 1e-07 - assert_allclose(0.001, cfg.lr, rtol=rtol, err_msg="lr mismatch") - - cfg = optim.SGDConfig(lr=0.002) - assert_allclose(0.002, cfg.lr, rtol=rtol, err_msg="lr mismatch") - - # SGD does not support params - with pytest.raises(AssertionError) as e: - params = [{"params": ["layer1.weight"], "lr": 0.1}] - optim.SGDConfig(params=params, lr=0.002) - assert_allclose(0.002, cfg.lr, rtol=rtol, err_msg="lr mismatch") - assert str(e.value) == "'params' must be an empty list for SGD optimizer" - - -def testOptimizerConfigAdam(): - """Test initialization of Adam""" - cfg = optim.AdamConfig() - assert cfg.name == "AdamOptimizer" - - rtol = 1e-7 - assert_allclose(0.001, cfg.lr, rtol=rtol, err_msg="lr mismatch") - assert_allclose(0.9, cfg.alpha, rtol=rtol, err_msg="alpha mismatch") - assert_allclose(0.999, cfg.beta, rtol=rtol, err_msg="beta mismatch") - assert_allclose(0.0, cfg.lambda_coef, rtol=rtol, err_msg="lambda_coef mismatch") - assert_allclose(1e-8, cfg.epsilon, rtol=rtol, err_msg="epsilon mismatch") - assert_allclose(1.0, cfg.max_norm_clip, rtol=rtol, err_msg="max_norm_clip mismatch") - assert cfg.do_bias_correction is True, "lambda_coef mismatch" - assert cfg.weight_decay_mode == optim.AdamConfig.DecayMode.BEFORE_WEIGHT_UPDATE, "weight_decay_mode mismatch" - - -def testOptimizerConfigLamb(): - """Test initialization of Lamb""" - cfg = optim.LambConfig() - assert cfg.name == "LambOptimizer" - rtol = 1e-7 - assert_allclose(0.001, cfg.lr, rtol=rtol, err_msg="lr mismatch") - assert_allclose(0.9, cfg.alpha, rtol=rtol, err_msg="alpha mismatch") - assert_allclose(0.999, cfg.beta, rtol=rtol, err_msg="beta mismatch") - assert_allclose(0.0, cfg.lambda_coef, rtol=rtol, err_msg="lambda_coef mismatch") - assert cfg.ratio_min == float("-inf"), "ratio_min mismatch" - assert cfg.ratio_max == float("inf"), "ratio_max mismatch" - assert_allclose(1e-6, cfg.epsilon, rtol=rtol, err_msg="epsilon mismatch") - assert_allclose(1.0, cfg.max_norm_clip, rtol=rtol, err_msg="max_norm_clip mismatch") - assert cfg.do_bias_correction is False, "do_bias_correction mismatch" - - -@pytest.mark.parametrize("optim_name", [("Adam"), ("Lamb")]) -def testOptimizerConfigParams(optim_name): - rtol = 1e-7 - params = [{"params": ["layer1.weight"], "alpha": 0.1}] - if optim_name == "Adam": - cfg = optim.AdamConfig(params=params, alpha=0.2) - elif optim_name == "Lamb": - cfg = optim.LambConfig(params=params, alpha=0.2) - else: - raise ValueError("invalid input") - assert len(cfg.params) == 1, "params should have length 1" - assert_allclose(cfg.params[0]["alpha"], 0.1, rtol=rtol, err_msg="invalid lr on params[0]") - - -@pytest.mark.parametrize("optim_name", [("Adam"), ("Lamb")]) -def testOptimizerConfigInvalidParams(optim_name): - # lr is not supported within params - with pytest.raises(AssertionError) as e: - params = [{"params": ["layer1.weight"], "lr": 0.1}] - if optim_name == "Adam": - optim.AdamConfig(params=params, lr=0.2) - elif optim_name == "Lamb": - optim.LambConfig(params=params, lr=0.2) - else: - raise ValueError("invalid input") - assert str(e.value) == "'lr' is not supported inside params" - - -def testLinearLRSchedulerCreation(): - total_steps = 10 - warmup = 0.05 - - lr_scheduler = optim.lr_scheduler.LinearWarmupLRScheduler(total_steps, warmup) - - # Initial state - assert lr_scheduler.total_steps == total_steps - assert lr_scheduler.warmup == warmup - - -@pytest.mark.parametrize( - "lr_scheduler,expected_values", - [ - (optim.lr_scheduler.ConstantWarmupLRScheduler, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.0, 1.0, 1.0, 1.0]), - ( - optim.lr_scheduler.CosineWarmupLRScheduler, - [ - 0.0, - 0.9763960957919413, - 0.9059835861602854, - 0.7956724530494887, - 0.6563036824392345, - 0.5015739416158049, - 0.34668951940611276, - 0.2068719061737831, - 0.09586187986225325, - 0.0245691111902418, - ], - ), - (optim.lr_scheduler.LinearWarmupLRScheduler, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.8, 0.6, 0.4, 0.2]), - ( - optim.lr_scheduler.PolyWarmupLRScheduler, - [ - 0.0, - 0.9509018036072144, - 0.9008016032064128, - 0.8507014028056112, - 0.8006012024048097, - 0.750501002004008, - 0.7004008016032064, - 0.6503006012024048, - 0.6002004008016032, - 0.5501002004008015, - ], - ), - ], -) -def testLRSchedulerUpdateImpl(lr_scheduler, expected_values): - # Test tolerance - rtol = 1e-03 - - # Initial state - initial_lr = 1 - total_steps = 10 - warmup = 0.5 - optimizer_config = optim.SGDConfig(lr=initial_lr) - lr_scheduler = lr_scheduler(total_steps, warmup) - - # First half is warmup - for optimization_step in range(total_steps): - # Emulate ORTTRainer.train_step() call that updates its train_step_info - train_step_info = TrainStepInfo(optimizer_config=optimizer_config, optimization_step=optimization_step) - - lr_scheduler._step(train_step_info) - lr_list = lr_scheduler.get_last_lr() - assert len(lr_list) == 1 - assert_allclose(lr_list[0], expected_values[optimization_step], rtol=rtol, err_msg="lr mismatch") - - -def testInstantiateORTTrainerOptions(): - session_options = SessionOptions() - session_options.enable_mem_pattern = False - provider_options = {"EP1": {"key": "val"}} - opts = {"session_options": session_options, "provider_options": provider_options} - opts = orttrainer.ORTTrainerOptions(opts) - assert opts.session_options.enable_mem_pattern is False - assert opts._validated_opts["provider_options"]["EP1"]["key"] == "val" - - -@pytest.mark.parametrize( - "step_fn, lr_scheduler, expected_lr_values, device", - [ - ("train_step", None, None, "cuda"), - ("eval_step", None, None, "cpu"), - ( - "train_step", - optim.lr_scheduler.ConstantWarmupLRScheduler, - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.0, 1.0, 1.0, 1.0], - "cpu", - ), - ( - "train_step", - optim.lr_scheduler.CosineWarmupLRScheduler, - [ - 0.0, - 0.2, - 0.4, - 0.6, - 0.8, - 1.0, - 0.9045084971874737, - 0.6545084971874737, - 0.34549150281252633, - 0.09549150281252633, - ], - "cuda", - ), - ( - "train_step", - optim.lr_scheduler.LinearWarmupLRScheduler, - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.8, 0.6, 0.4, 0.2], - "cpu", - ), - ( - "train_step", - optim.lr_scheduler.PolyWarmupLRScheduler, - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.80000002, 0.60000004, 0.40000006000000005, 0.20000007999999997], - "cuda", - ), - ], -) -def testInstantiateORTTrainer(step_fn, lr_scheduler, expected_lr_values, device): - total_steps = 1 - initial_lr = 1.0 - rtol = 1e-3 - - # PyTorch Transformer model as example - opts = {"device": {"id": device}} - if lr_scheduler: - total_steps = 10 - opts.update({"lr_scheduler": lr_scheduler(total_steps=total_steps, warmup=0.5)}) - opts = orttrainer.ORTTrainerOptions(opts) - optim_config = optim.LambConfig(lr=initial_lr) - model, model_desc, my_loss, batcher_fn, train_data, val_data, _ = _test_commons._load_pytorch_transformer_model( - device - ) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) - - # Run a train or evaluation step - if step_fn == "eval_step": - data, targets = batcher_fn(val_data, 0) - elif step_fn == "train_step": - data, targets = batcher_fn(train_data, 0) - else: - raise ValueError("Invalid step_fn") - - # Export model to ONNX - if step_fn == "eval_step": - step_fn = trainer.eval_step - output = trainer.eval_step(data, targets) - elif step_fn == "train_step": - step_fn = trainer.train_step - for i in range(total_steps): - output = trainer.train_step(data, targets) - if lr_scheduler: - lr_list = trainer.options.lr_scheduler.get_last_lr() - assert_allclose(lr_list[0], expected_lr_values[i], rtol=rtol, err_msg="lr mismatch") - else: - raise ValueError("Invalid step_fn") - assert trainer._onnx_model is not None - - # Check output shape after train/eval step - for out, desc in zip(output, trainer.model_desc.outputs): - if trainer.loss_fn and desc.is_loss: - continue - assert list(out.size()) == desc.shape - - # Check name, shape and dtype of the first len(forward.parameters) ORT graph inputs - sig = inspect.signature(model.forward) - for i in range(len(sig.parameters.keys())): - input_name = trainer.model_desc.inputs[i][0] - input_dim = trainer.model_desc.inputs[i][1] - input_type = trainer.model_desc.inputs[i][2] - - assert trainer._onnx_model.graph.input[i].name == input_name - for dim_idx, dim in enumerate(trainer._onnx_model.graph.input[i].type.tensor_type.shape.dim): - assert input_dim[dim_idx] == dim.dim_value - assert input_type == _utils.dtype_onnx_to_torch( - trainer._onnx_model.graph.input[i].type.tensor_type.elem_type - ) - - opset = get_model_opset(trainer._onnx_model) - - # Check name, shape and dtype of the ORT graph outputs - for i in range(len(trainer.model_desc.outputs)): - output_name = trainer.model_desc.outputs[i][0] - output_dim = trainer.model_desc.outputs[i][1] - output_type = trainer.model_desc.outputs[i][3] - - assert trainer._onnx_model.graph.output[i].name == output_name - for dim_idx, dim in enumerate(trainer._onnx_model.graph.output[i].type.tensor_type.shape.dim): - if opset is None or opset <= 12: - assert output_dim[dim_idx] == dim.dim_value - assert output_type == _utils.dtype_onnx_to_torch( - trainer._onnx_model.graph.output[i].type.tensor_type.elem_type - ) - - # Save current model as ONNX as a file - file_name = os.path.join("_____temp_onnx_model.onnx") - trainer.save_as_onnx(file_name) - assert os.path.exists(file_name) - with open(file_name, "rb") as f: - bin_str = f.read() - reload_onnx_model = onnx.load_model_from_string(bin_str) - os.remove(file_name) - - # Create a new trainer from persisted ONNX model and compare with original ONNX model - trainer_from_onnx = orttrainer.ORTTrainer(reload_onnx_model, model_desc, optim_config) - step_fn(data, targets) - assert trainer_from_onnx._onnx_model is not None - assert id(trainer_from_onnx._onnx_model) != id(trainer._onnx_model) - assert trainer_from_onnx._onnx_model == trainer._onnx_model - assert trainer_from_onnx._onnx_model.graph == trainer._onnx_model.graph - assert onnx.helper.printable_graph(trainer_from_onnx._onnx_model.graph) == onnx.helper.printable_graph( - trainer._onnx_model.graph - ) - - -@pytest.mark.parametrize("seed, device", [(0, "cpu"), (24, "cuda")]) -def testORTDeterministicCompute(seed, device): - # Common setup - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions( - {"debug": {"deterministic_compute": True}, "device": {"id": device, "mem_limit": 10 * 1024 * 1024}} - ) - - # Setup for the first ORTTRainer run - torch.manual_seed(seed) - set_seed(seed) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - first_trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) - data, targets = batcher_fn(train_data, 0) - _ = first_trainer.train_step(data, targets) - assert first_trainer._onnx_model is not None - - # Setup for the second ORTTRainer run - torch.manual_seed(seed) - set_seed(seed) - model, _, _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device) - second_trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) - _ = second_trainer.train_step(data, targets) - assert second_trainer._onnx_model is not None - - # Compare two different instances with identical setup - assert id(first_trainer._onnx_model) != id(second_trainer._onnx_model) - _test_helpers.assert_onnx_weights(first_trainer, second_trainer) - - -@pytest.mark.parametrize( - "seed,device,expected_loss,fetches", - [ - (321, "cuda", [10.5774, 10.4403, 10.4175, 10.2886, 10.2760], False), - (321, "cuda", [10.5774, 10.4403, 10.4175, 10.2886, 10.2760], True), - ], -) -def testORTTrainerMixedPrecisionLossScaler(seed, device, expected_loss, fetches): - return # TODO: re-enable after nondeterminism on backend is fixed. update numbers - - rtol = 1e-3 - total_steps = len(expected_loss) - torch.manual_seed(seed) - set_seed(seed) - - # Setup ORTTrainer - loss_scaler = amp.DynamicLossScaler() - options = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "mixed_precision": {"enabled": True, "loss_scaler": loss_scaler}, - "debug": {"deterministic_compute": True}, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, val_data, _ = _test_commons._load_pytorch_transformer_model( - device - ) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - - # Training loop - actual_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - if fetches: - trainer._train_step_info.fetches = ["loss"] - loss = trainer.train_step(data, targets) - else: - loss, _ = trainer.train_step(data, targets) - actual_loss.append(loss.cpu()) - - # Eval once just to test fetches in action - val_data, val_targets = batcher_fn(val_data, 0) - if fetches: - trainer._train_step_info.fetches = ["loss"] - loss = trainer.eval_step(val_data, val_targets) - trainer._train_step_info.fetches = [] - loss, _ = trainer.eval_step(val_data, val_targets) - - # Compare loss to ground truth computed from current ORTTrainer API - _test_helpers.assert_model_outputs(expected_loss, actual_loss, True, rtol=rtol) - assert trainer._onnx_model is not None - - -def _recompute_data(): - device_capability_major = torch.cuda.get_device_capability()[0] - if device_capability_major == 7: # V100 for Dev machine - expected_loss = { - 12: [10.5598, 10.4591, 10.3477, 10.2726, 10.1945], - 14: [10.54088, 10.498755, 10.386827, 10.338747, 10.262459], - } - return [ - (False, False, False, 0, expected_loss), # no recompute - (True, False, False, 0, expected_loss), # attn_dropout recompute - (False, True, False, 0, expected_loss), # gelu recompute - (False, False, True, 0, expected_loss), # transformer_layer recompute - (False, False, True, 1, expected_loss), # transformer_layer recompute with 1 layer - ] - elif device_capability_major == 5: # M60 for CI machines - expected_loss = { - 12: [10.5445, 10.4389, 10.3480, 10.2627, 10.2113], - 14: [10.5445, 10.4389, 10.3480, 10.2627, 10.2113], - } - return [ - (False, False, False, 0, expected_loss), # no recompute - (True, False, False, 0, expected_loss), # attn_dropout recompute - (False, True, False, 0, expected_loss), # gelu recompute - (False, False, True, 0, expected_loss), # transformer_layer recompute - (False, False, True, 1, expected_loss), # transformer_layer recompute with 1 layer - ] - - -@pytest.mark.parametrize("attn_dropout, gelu, transformer_layer, number_layers, expected_loss", _recompute_data()) -def testORTTrainerRecompute(attn_dropout, gelu, transformer_layer, number_layers, expected_loss): - seed = 321 - device = "cuda" - rtol = 1e-3 - total_steps = len(expected_loss[12]) - torch.manual_seed(seed) - set_seed(seed) - - # Setup ORTTrainer - options = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "graph_transformer": { - "attn_dropout_recompute": attn_dropout, - "gelu_recompute": gelu, - "transformer_layer_recompute": transformer_layer, - "number_recompute_layers": number_layers, - }, - "debug": {"deterministic_compute": True}, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, val_data, _ = _test_commons._load_pytorch_transformer_model( - device - ) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - - # Training loop - actual_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - loss, _ = trainer.train_step(data, targets) - actual_loss.append(loss.cpu()) - - # Compare loss to ground truth computed from current ORTTrainer API - assert trainer._onnx_model is not None - opset = get_model_opset(trainer._onnx_model) - _test_helpers.assert_model_outputs(expected_loss[opset], actual_loss, True, rtol=rtol) - - -@pytest.mark.parametrize( - "seed,device,gradient_accumulation_steps,total_steps,expected_loss", - [ - ( - 0, - "cuda", - 1, - 12, - [ - 10.5368022919, - 10.4146203995, - 10.3635568619, - 10.2650547028, - 10.2284049988, - 10.1304626465, - 10.0853414536, - 9.9987659454, - 9.9472427368, - 9.8832416534, - 9.8223171234, - 9.8222122192, - ], - ), - ( - 42, - "cuda", - 3, - 12, - [ - 10.6455879211, - 10.6247081757, - 10.6361322403, - 10.5187482834, - 10.5345087051, - 10.5487670898, - 10.4833698273, - 10.4600019455, - 10.4535751343, - 10.3774127960, - 10.4144191742, - 10.3757553101, - ], - ), - ( - 123, - "cuda", - 7, - 12, - [ - 10.5353469849, - 10.5261383057, - 10.5240392685, - 10.5013713837, - 10.5678377151, - 10.5452117920, - 10.5184345245, - 10.4271221161, - 10.4458627701, - 10.4864749908, - 10.4416503906, - 10.4467563629, - ], - ), - ( - 321, - "cuda", - 12, - 12, - [ - 10.5773944855, - 10.5428829193, - 10.5974750519, - 10.5416746140, - 10.6009902954, - 10.5684127808, - 10.5759754181, - 10.5636739731, - 10.5613927841, - 10.5825119019, - 10.6031589508, - 10.6199369431, - ], - ), - ], -) -def testORTTrainerGradientAccumulation(seed, device, gradient_accumulation_steps, total_steps, expected_loss): - return # TODO: re-enable after nondeterminism on backend is fixed. update numbers - rtol = 1e-3 - torch.manual_seed(seed) - set_seed(seed) - - # Setup ORTTrainer - options = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - "debug": {"deterministic_compute": True}, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - - # Training loop - actual_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - loss, _ = trainer.train_step(data, targets) - actual_loss.append(loss.cpu()) - - # Compare legacy vs experimental APIs - _test_helpers.assert_model_outputs(expected_loss, actual_loss, rtol=rtol) - - -@pytest.mark.parametrize( - "dynamic_axes", - [ - (True), - (False), - ], -) -def testORTTrainerDynamicShape(dynamic_axes): - # Common setup - device = "cuda" - - # Setup ORTTrainer - options = orttrainer.ORTTrainerOptions({}) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model( - device, dynamic_axes=dynamic_axes - ) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - - # Training loop - total_steps = 10 - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - if dynamic_axes: - # Forcing batches with different sizes to exercise dynamic shapes - data = data[: -(i + 1)] - targets = targets[: -(i + 1) * data.size(1)] - _, _ = trainer.train_step(data, targets) - - assert trainer._onnx_model is not None - - -@pytest.mark.parametrize( - "enable_onnx_contrib_ops", - [ - (True), - (False), - ], -) -def testORTTrainerInternalUseContribOps(enable_onnx_contrib_ops): - # Common setup - device = "cuda" - - # Setup ORTTrainer - options = orttrainer.ORTTrainerOptions({"_internal_use": {"enable_onnx_contrib_ops": enable_onnx_contrib_ops}}) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - - # Training loop - data, targets = batcher_fn(train_data, 0) - if not enable_onnx_contrib_ops and not pytorch_110: - with pytest.raises(Exception): # noqa: B017 - _, _ = trainer.train_step(data, targets) - else: - _, _ = trainer.train_step(data, targets) - - -@pytest.mark.parametrize( - "model_params", - [ - ( - [ - "decoder.weight", - "transformer_encoder.layers.0.linear1.bias", - "transformer_encoder.layers.0.linear2.weight", - "transformer_encoder.layers.1.self_attn.out_proj.weight", - "transformer_encoder.layers.1.self_attn.out_proj.bias", - ] - ), - ], -) -def testORTTrainerFrozenWeights(model_params): - # Common setup - device = "cuda" - total_steps = 10 - - # Setup ORTTrainer WITHOUT frozen weights - options = orttrainer.ORTTrainerOptions({}) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - _, _ = trainer.train_step(data, targets) - - # All model_params must be in the session state - assert trainer._onnx_model is not None - session_state = trainer._training_session.get_state() - assert all([param in session_state for param in model_params]) - - # Setup ORTTrainer WITH frozen weights - options = orttrainer.ORTTrainerOptions({"utils": {"frozen_weights": model_params}}) - model, _, _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - _, _ = trainer.train_step(data, targets) - - # All model_params CANNOT be in the session state - assert trainer._onnx_model is not None - session_state = trainer._training_session.get_state() - assert not all([param in session_state for param in model_params]) - - -@pytest.mark.parametrize( - "loss_scaler, optimizer_config, gradient_accumulation_steps", - [ - (None, optim.AdamConfig(), 1), - (None, optim.LambConfig(), 1), - (None, optim.SGDConfig(), 1), - (amp.DynamicLossScaler(), optim.AdamConfig(), 1), - (amp.DynamicLossScaler(), optim.LambConfig(), 5), - # (amp.DynamicLossScaler(), optim.SGDConfig(), 1), # SGD doesnt support fp16 - ], -) -def testORTTrainerStateDictWrapModelLossFn(loss_scaler, optimizer_config, gradient_accumulation_steps): - # Common setup - seed = 1 - - class LinearModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.linear = torch.nn.Linear(2, 4) - - def forward(self, y=None, x=None): - if y is not None: - return self.linear(x) + y - else: - return self.linear(x) + torch.ones(2, 4) - - model_desc = { - "inputs": [ - ("x", [2, 2]), - ( - "label", - [ - 2, - ], - ), - ], - "outputs": [("loss", [], True), ("output", [2, 4])], - } - - # Dummy data - data1 = torch.randn(2, 2) - label1 = torch.tensor([0, 1], dtype=torch.int64) - data2 = torch.randn(2, 2) - label2 = torch.tensor([0, 1], dtype=torch.int64) - - # Setup training based on test parameters - opts = { - "debug": {"deterministic_compute": True}, - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - } - if loss_scaler: - opts["mixed_precision"] = {"enabled": True, "loss_scaler": loss_scaler} - opts = orttrainer.ORTTrainerOptions(opts) - - # Training session 1 - torch.manual_seed(seed) - set_seed(seed) - pt_model = LinearModel() - - def loss_fn(x, label): - return F.nll_loss(F.log_softmax(x, dim=1), label) - - trainer = orttrainer.ORTTrainer(pt_model, model_desc, optimizer_config, loss_fn=loss_fn, options=opts) - - # Check state_dict keys before train. Must be empty - state_dict = trainer.state_dict() - assert state_dict == {} - - # Train once and check initial state - trainer.train_step(x=data1, label=label1) - state_dict = trainer.state_dict() - assert all([weight in state_dict["model"]["full_precision"] for weight in ["linear.bias", "linear.weight"]]) - - # Initialize training session 2 from state of Training 1 - torch.manual_seed(seed) - set_seed(seed) - trainer2 = orttrainer.ORTTrainer(pt_model, model_desc, optimizer_config, loss_fn=loss_fn, options=opts) - trainer2.load_state_dict(state_dict) - - # Verify state was loaded properly - _test_commons.assert_all_states_close_ort(state_dict, trainer2._load_state_dict.args[0]) - - # Perform a second step in both training session 1 and 2 and verify they match - trainer.train_step(x=data2, label=label2) - state_dict = trainer.state_dict() - trainer2.train_step(x=data2, label=label2) - state_dict2 = trainer2.state_dict() - _test_commons.assert_all_states_close_ort(state_dict, state_dict2) - - -def testORTTrainerNonPickableModel(): - # Common setup - import threading - - seed = 1 - - class UnpickableModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.linear = torch.nn.Linear(2, 4) - self._lock = threading.Lock() - - def forward(self, y=None, x=None): - with self._lock: - if y is not None: - return self.linear(x) + y - else: - return self.linear(x) + torch.ones(2, 4) - - model_desc = { - "inputs": [ - ("x", [2, 2]), - ( - "label", - [ - 2, - ], - ), - ], - "outputs": [("loss", [], True), ("output", [2, 4])], - } - - # Dummy data - data = torch.randn(2, 2) - label = torch.tensor([0, 1], dtype=torch.int64) - - # Setup training based on test parameters - opts = orttrainer.ORTTrainerOptions({"debug": {"deterministic_compute": True}}) - - # Training session - torch.manual_seed(seed) - set_seed(seed) - pt_model = UnpickableModel() - - def loss_fn(x, label): - return F.nll_loss(F.log_softmax(x, dim=1), label) - - optim_config = optim.AdamConfig() - trainer = orttrainer.ORTTrainer(pt_model, model_desc, optim_config, loss_fn=loss_fn, options=opts) - - # Train must succeed despite warning - _, _ = trainer.train_step(data, label) - - -############################################################################### -# Temporary tests comparing Legacy vs Experimental ORTTrainer APIs ############ -############################################################################### - - -@pytest.mark.parametrize("seed,device", [(1234, "cuda")]) -def testORTTrainerLegacyAndExperimentalWeightsCheck(seed, device): - # Common data - rtol = 1e-7 - total_steps = 5 - - # Setup for the experimental ORTTRainer run - torch.manual_seed(seed) - set_seed(seed) - optim_config = optim.LambConfig() - opts = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "debug": {"deterministic_compute": True}, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) - # Training loop - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - _ = trainer.train_step(data, targets) - - # Setup for the legacy ORTTrainer run - torch.manual_seed(seed) - set_seed(seed) - model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True) - legacy_trainer = Legacy_ORTTrainer( - model, my_loss, model_desc, "LambOptimizer", None, lr_desc, device, _use_deterministic_compute=True - ) - # Training loop - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - _, _ = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr])) - - # Compare legacy vs experimental APIs - _test_helpers.assert_legacy_onnx_weights(trainer, legacy_trainer, rtol=rtol) - - -@pytest.mark.parametrize( - "seed,device", - [ - (321, "cuda"), - ], -) -def testORTTrainerLegacyAndExperimentalPrecisionLossScaler(seed, device): - # Common data - total_steps = 128 - - # Setup experimental API - torch.manual_seed(seed) - set_seed(seed) - loss_scaler = amp.DynamicLossScaler() - options = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "mixed_precision": {"enabled": True, "loss_scaler": loss_scaler}, - "debug": { - "deterministic_compute": True, - }, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - # Training loop - experimental_loss = [] - experimental_preds_dtype = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - exp_loss, exp_preds = trainer.train_step(data, targets) - experimental_loss.append(exp_loss.cpu()) - experimental_preds_dtype.append(exp_preds.dtype) - - # Setup legacy API - torch.manual_seed(seed) - set_seed(seed) - model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True) - loss_scaler = Legacy_LossScaler("ort_test_input_loss_scalar", True) - legacy_trainer = Legacy_ORTTrainer( - model, - my_loss, - model_desc, - "LambOptimizer", - None, - lr_desc, - device=device, - _use_deterministic_compute=True, - use_mixed_precision=True, - loss_scaler=loss_scaler, - ) - # Training loop - legacy_loss = [] - legacy_preds_dtype = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - leg_loss, leg_preds = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr])) - legacy_loss.append(leg_loss.cpu()) - legacy_preds_dtype.append(leg_preds.dtype) - - # Compare legacy vs experimental APIs - assert experimental_preds_dtype == legacy_preds_dtype - _test_helpers.assert_legacy_onnx_weights(trainer, legacy_trainer) - _test_helpers.assert_model_outputs(legacy_loss, experimental_loss) - - -@pytest.mark.parametrize( - "seed,device,gradient_accumulation_steps,total_steps", - [ - (0, "cuda", 1, 12), - (42, "cuda", 3, 12), - (123, "cuda", 7, 12), - (321, "cuda", 12, 12), - ], -) -def testORTTrainerLegacyAndExperimentalGradientAccumulation(seed, device, gradient_accumulation_steps, total_steps): - # Common data - torch.set_printoptions(precision=10) - - # Setup experimental API - torch.manual_seed(seed) - set_seed(seed) - options = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - "debug": {"deterministic_compute": True}, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - # Training loop - experimental_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - exp_loss, _ = trainer.train_step(data, targets) - experimental_loss.append(exp_loss.cpu()) - - # Setup legacy API - torch.manual_seed(seed) - set_seed(seed) - model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True) - legacy_trainer = Legacy_ORTTrainer( - model, - my_loss, - model_desc, - "LambOptimizer", - None, - lr_desc, - device=device, - _use_deterministic_compute=True, - gradient_accumulation_steps=gradient_accumulation_steps, - ) - # Training loop - legacy_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - leg_loss, _ = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr])) - legacy_loss.append(leg_loss.cpu()) - - # Compare legacy vs experimental APIs - _test_helpers.assert_model_outputs(legacy_loss, experimental_loss) - - -@pytest.mark.parametrize( - "seed,device,optimizer_config,lr_scheduler, get_lr_this_step", - [ - ( - 0, - "cuda", - optim.AdamConfig, - optim.lr_scheduler.ConstantWarmupLRScheduler, - _test_commons.legacy_constant_lr_scheduler, - ), - ( - 0, - "cuda", - optim.LambConfig, - optim.lr_scheduler.ConstantWarmupLRScheduler, - _test_commons.legacy_constant_lr_scheduler, - ), - ( - 0, - "cuda", - optim.SGDConfig, - optim.lr_scheduler.ConstantWarmupLRScheduler, - _test_commons.legacy_constant_lr_scheduler, - ), - ( - 42, - "cuda", - optim.AdamConfig, - optim.lr_scheduler.LinearWarmupLRScheduler, - _test_commons.legacy_linear_lr_scheduler, - ), - ( - 42, - "cuda", - optim.LambConfig, - optim.lr_scheduler.LinearWarmupLRScheduler, - _test_commons.legacy_linear_lr_scheduler, - ), - ( - 42, - "cuda", - optim.SGDConfig, - optim.lr_scheduler.LinearWarmupLRScheduler, - _test_commons.legacy_linear_lr_scheduler, - ), - ( - 123, - "cuda", - optim.AdamConfig, - optim.lr_scheduler.CosineWarmupLRScheduler, - _test_commons.legacy_cosine_lr_scheduler, - ), - ( - 123, - "cuda", - optim.LambConfig, - optim.lr_scheduler.CosineWarmupLRScheduler, - _test_commons.legacy_cosine_lr_scheduler, - ), - ( - 123, - "cuda", - optim.SGDConfig, - optim.lr_scheduler.CosineWarmupLRScheduler, - _test_commons.legacy_cosine_lr_scheduler, - ), - ( - 321, - "cuda", - optim.AdamConfig, - optim.lr_scheduler.PolyWarmupLRScheduler, - _test_commons.legacy_poly_lr_scheduler, - ), - ( - 321, - "cuda", - optim.LambConfig, - optim.lr_scheduler.PolyWarmupLRScheduler, - _test_commons.legacy_poly_lr_scheduler, - ), - ( - 321, - "cuda", - optim.SGDConfig, - optim.lr_scheduler.PolyWarmupLRScheduler, - _test_commons.legacy_poly_lr_scheduler, - ), - ], -) -def testORTTrainerLegacyAndExperimentalLRScheduler(seed, device, optimizer_config, lr_scheduler, get_lr_this_step): - # Common data - total_steps = 10 - lr = 0.001 - warmup = 0.5 - cycles = 0.5 - power = 1.0 - lr_end = 1e-7 - torch.set_printoptions(precision=10) - - # Setup experimental API - torch.manual_seed(seed) - set_seed(seed) - if ( - lr_scheduler == optim.lr_scheduler.ConstantWarmupLRScheduler - or lr_scheduler == optim.lr_scheduler.LinearWarmupLRScheduler - ): - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup) - elif lr_scheduler == optim.lr_scheduler.CosineWarmupLRScheduler: - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, cycles=cycles) - elif lr_scheduler == optim.lr_scheduler.PolyWarmupLRScheduler: - lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, power=power, lr_end=lr_end) - else: - raise RuntimeError("Invalid lr_scheduler") - - options = orttrainer.ORTTrainerOptions( - {"device": {"id": device}, "debug": {"deterministic_compute": True}, "lr_scheduler": lr_scheduler} - ) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optimizer_config(lr=lr) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - # Training loop - experimental_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - exp_loss, exp_preds = trainer.train_step(data, targets) - experimental_loss.append(exp_loss.cpu()) - - # Setup legacy API - torch.manual_seed(seed) - set_seed(seed) - - if optimizer_config == optim.AdamConfig: - legacy_optimizer_config = "AdamOptimizer" - elif optimizer_config == optim.LambConfig: - legacy_optimizer_config = "LambOptimizer" - elif optimizer_config == optim.SGDConfig: - legacy_optimizer_config = "SGDOptimizer" - else: - raise RuntimeError("Invalid optimizer_config") - - if ( - get_lr_this_step == _test_commons.legacy_constant_lr_scheduler - or get_lr_this_step == _test_commons.legacy_linear_lr_scheduler - ): - get_lr_this_step = partial(get_lr_this_step, initial_lr=lr, total_steps=total_steps, warmup=warmup) - elif get_lr_this_step == _test_commons.legacy_cosine_lr_scheduler: - get_lr_this_step = partial( - get_lr_this_step, initial_lr=lr, total_steps=total_steps, warmup=warmup, cycles=cycles - ) - elif get_lr_this_step == _test_commons.legacy_poly_lr_scheduler: - get_lr_this_step = partial( - get_lr_this_step, initial_lr=lr, total_steps=total_steps, warmup=warmup, power=power, lr_end=lr_end - ) - else: - raise RuntimeError("Invalid get_lr_this_step") - - model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True) - legacy_trainer = Legacy_ORTTrainer( - model, - my_loss, - model_desc, - legacy_optimizer_config, - None, - lr_desc, - device=device, - _use_deterministic_compute=True, - get_lr_this_step=get_lr_this_step, - ) - # Training loop - legacy_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - leg_loss, leg_preds = legacy_trainer.train_step(data, targets) - legacy_loss.append(leg_loss.cpu()) - - # Compare legacy vs experimental APIs - _test_helpers.assert_model_outputs(legacy_loss, experimental_loss) - - -def testLossScalerLegacyAndExperimentalFullCycle(): - orttrainer.TrainStepInfo( - optimizer_config=optim.LambConfig(lr=0.001), all_finite=True, fetches=[], optimization_step=0, step=0 - ) - new_ls = amp.DynamicLossScaler() - old_ls = Legacy_LossScaler("ort_test_input_loss_scaler", True) - - # Initial state - train_step_info = orttrainer.TrainStepInfo(optim.LambConfig()) - assert_allclose(new_ls.loss_scale, old_ls.loss_scale_) - assert new_ls.up_scale_window == old_ls.up_scale_window_ - assert_allclose(new_ls.min_loss_scale, old_ls.min_loss_scale_) - assert_allclose(new_ls.max_loss_scale, old_ls.max_loss_scale_) - - # Performing 9*2000 updates to cover all branches of LossScaler.update(train_step_info.all_finite=True) - for _cycles in range(1, 10): - # 1999 updates without overflow produces 1999 stable steps - for _i in range(1, 2000): - new_loss_scale = new_ls.update(train_step_info) - old_ls.update_loss_scale(train_step_info.all_finite) - old_loss_scale = old_ls.loss_scale_ - assert new_ls._stable_steps_count == old_ls.stable_steps_ - assert_allclose(new_loss_scale, old_loss_scale) - - # 2000th update without overflow doubles the loss and zero stable steps until max_loss_scale is reached - new_loss_scale = new_ls.update(train_step_info) - old_ls.update_loss_scale(train_step_info.all_finite) - old_loss_scale = old_ls.loss_scale_ - assert new_ls._stable_steps_count == old_ls.stable_steps_ - assert_allclose(new_loss_scale, old_loss_scale) - - # After 8 cycles, loss scale should be float(1 << 16)*(2**8) - assert_allclose(new_loss_scale, old_loss_scale) - - # After 9 cycles, loss scale reaches max_loss_scale and it is not doubled from that point on - for _count in range(1, 2050): - new_loss_scale = new_ls.update(train_step_info) - old_ls.update_loss_scale(train_step_info.all_finite) - old_loss_scale = old_ls.loss_scale_ - assert new_ls._stable_steps_count == old_ls.stable_steps_ - assert_allclose(new_loss_scale, old_loss_scale) - - # Setting train_step_info.all_finite = False to test down scaling - train_step_info.all_finite = False - - # Performing 24 updates to half the loss scale each time - for _count in range(1, 25): - new_loss_scale = new_ls.update(train_step_info) - old_ls.update_loss_scale(train_step_info.all_finite) - old_loss_scale = old_ls.loss_scale_ - assert new_ls._stable_steps_count == old_ls.stable_steps_ - assert_allclose(new_loss_scale, old_loss_scale) - - # After 24 updates with gradient overflow, loss scale is 1.0 - assert_allclose(new_loss_scale, old_loss_scale) - - # After 25 updates, min_loss_scale is reached and loss scale is not halfed from that point on - for _count in range(1, 5): - new_loss_scale = new_ls.update(train_step_info) - old_ls.update_loss_scale(train_step_info.all_finite) - old_loss_scale = old_ls.loss_scale_ - assert new_ls._stable_steps_count == old_ls.stable_steps_ - assert_allclose(new_loss_scale, old_loss_scale) - - -def testLossScalerLegacyAndExperimentalRandomAllFinite(): - new_ls = amp.DynamicLossScaler() - old_ls = Legacy_LossScaler("ort_test_input_loss_scaler", True) - - # Initial state - train_step_info = orttrainer.TrainStepInfo(optim.LambConfig()) - assert_allclose(new_ls.loss_scale, old_ls.loss_scale_) - assert new_ls.up_scale_window == old_ls.up_scale_window_ - assert_allclose(new_ls.min_loss_scale, old_ls.min_loss_scale_) - assert_allclose(new_ls.max_loss_scale, old_ls.max_loss_scale_) - - import random - - out = [] - for _ in range(1, 64): - train_step_info.all_finite = bool(random.getrandbits(1)) - new_loss_scale = new_ls.update(train_step_info) - old_ls.update_loss_scale(train_step_info.all_finite) - old_loss_scale = old_ls.loss_scale_ - assert new_ls._stable_steps_count == old_ls.stable_steps_ - assert_allclose(new_loss_scale, old_loss_scale) - out.append(new_loss_scale) - assert new_loss_scale > 1e-7 - - -def testORTTrainerRunSymbolicShapeInfer(): - # Common data - seed = 0 - total_steps = 12 - device = "cuda" - torch.set_printoptions(precision=10) - - # Setup without symbolic shape inference - torch.manual_seed(seed) - set_seed(seed) - options = orttrainer.ORTTrainerOptions({"device": {"id": device}, "debug": {"deterministic_compute": True}}) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - # Training loop - expected_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - loss, _ = trainer.train_step(data, targets) - expected_loss.append(loss.cpu()) - - # Setup with symbolic shape inference - torch.manual_seed(seed) - set_seed(seed) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001) - options.utils.run_symbolic_shape_infer = True - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - # Training loop - new_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - loss, _ = trainer.train_step(data, targets) - new_loss.append(loss.cpu()) - - # Setup with symbolic shape inference in legacy API - torch.manual_seed(seed) - set_seed(seed) - model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True) - legacy_trainer = Legacy_ORTTrainer( - model, - my_loss, - model_desc, - "LambOptimizer", - None, - lr_desc, - device=device, - run_symbolic_shape_infer=True, - _use_deterministic_compute=True, - ) - # Training loop - legacy_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - loss, _ = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr])) - legacy_loss.append(loss.cpu()) - - # Compare losses - _test_helpers.assert_model_outputs(new_loss, expected_loss) - _test_helpers.assert_model_outputs(legacy_loss, expected_loss) - - -@pytest.mark.parametrize( - "test_input", - [ - ( - { - "distributed": {"enable_adasum": True}, - } - ) - ], -) -def testORTTrainerOptionsEnabledAdasumFlag(test_input): - """Test the enabled_adasum flag values when set enabled""" - - actual_values = orttrainer_options.ORTTrainerOptions(test_input) - assert actual_values.distributed.enable_adasum is True - - -@pytest.mark.parametrize( - "test_input", - [ - ( - { - "distributed": {"enable_adasum": False}, - } - ) - ], -) -def testORTTrainerOptionsDisabledAdasumFlag(test_input): - """Test the enabled_adasum flag values when set disabled""" - - actual_values = orttrainer_options.ORTTrainerOptions(test_input) - assert actual_values.distributed.enable_adasum is False - - -def testORTTrainerUnusedInput(): - class UnusedInputModel(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x, y): - return torch.mean(x) - - model = UnusedInputModel() - model_desc = {"inputs": [("x", [1]), ("y", [1])], "outputs": [("loss", [], True)]} - optim_config = optim.LambConfig(lr=0.001) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config) - - # Run just one step to make sure there are no iobinding errors for the unused input. - try: - trainer.train_step(torch.FloatTensor([1.0]), torch.FloatTensor([1.0])) - except RuntimeError: - pytest.fail("RuntimeError doing train_step with an unused input.") - - -@pytest.mark.parametrize( - "debug_files", - [ - { - "model_after_graph_transforms_path": "transformed.onnx", - "model_with_gradient_graph_path": "transformed_grad.onnx", - "model_with_training_graph_path": "training.onnx", - "model_with_training_graph_after_optimization_path": "training_optimized.onnx", - }, - {"model_after_graph_transforms_path": "transformed.onnx", "model_with_training_graph_path": ""}, - ], -) -def testTrainingGraphExport(debug_files): - device = "cuda" - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - - with tempfile.TemporaryDirectory() as tempdir: - debug_paths = {} - for k, v in debug_files.items(): - debug_paths[k] = os.path.join(tempdir, v) - opts = orttrainer.ORTTrainerOptions({"device": {"id": device}, "debug": {"graph_save_paths": debug_paths}}) - optim_config = optim.AdamConfig() - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) - data, targets = batcher_fn(train_data, 0) - trainer.train_step(data, targets) - for k, v in debug_files.items(): - path = debug_paths[k] - if len(v) > 0: - assert os.path.isfile(path) - saved_graph = onnx.load(path).graph - if k == "model_with_training_graph_path": - assert any("AdamOptimizer" in n.op_type for n in saved_graph.node) - elif k == "model_with_gradient_graph_path": - assert any("Grad" in n.name for n in saved_graph.node) - elif k == "model_after_graph_transforms_path": - assert any("LayerNormalization" in n.op_type for n in saved_graph.node) - elif k == "model_with_training_graph_after_optimization_path": - assert any("FusedMatMul" in n.op_type for n in saved_graph.node) - # remove saved file - os.remove(path) - else: - assert not os.path.isfile(path) - - -def _adam_max_norm_clip_data(): - device_capability_major = torch.cuda.get_device_capability()[0] - if device_capability_major == 7: # V100 for Dev machine - return [ - ( - 0, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.592951, - 10.067989, - 9.619152, - 9.245731, - 8.881137, - 8.578644, - 8.280573, - 8.063023, - 7.797933, - 7.486215, - 7.233806, - 7.011791, - ], - 14: [ - 10.584141, - 10.068119, - 9.581743, - 9.191472, - 8.880169, - 8.5352, - 8.311425, - 8.061202, - 7.773032, - 7.523009, - 7.258711, - 7.02805, - ], - }, - ), - ( - 0, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.592951, - 10.068722, - 9.620503, - 9.247791, - 8.883972, - 8.582286, - 8.285027, - 8.068308, - 7.803638, - 7.492318, - 7.240352, - 7.018665, - ], - 14: [ - 10.584141, - 10.068845, - 9.583107, - 9.193537, - 8.882966, - 8.538839, - 8.315872, - 8.066408, - 7.778978, - 7.529708, - 7.265849, - 7.035439, - ], - }, - ), - ( - 42, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.647908, - 10.144501, - 9.672352, - 9.306980, - 8.956026, - 8.602655, - 8.351079, - 8.088144, - 7.867220, - 7.564082, - 7.289846, - 7.073726, - ], - 14: [ - 10.697515, - 10.229034, - 9.765422, - 9.428294, - 9.080612, - 8.715208, - 8.459574, - 8.169073, - 7.940211, - 7.654147, - 7.390446, - 7.166227, - ], - }, - ), - ( - 42, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.647908, - 10.145191, - 9.673690, - 9.309031, - 8.959020, - 8.606632, - 8.355836, - 8.093478, - 7.873327, - 7.570731, - 7.296772, - 7.0809422, - ], - 14: [ - 10.697515, - 10.22967, - 9.766556, - 9.430037, - 9.083106, - 8.718601, - 8.463726, - 8.17396, - 7.945755, - 7.660188, - 7.396963, - 7.172944, - ], - }, - ), - ] - elif device_capability_major == 5: # M60 for CI machines (Python Packaging Pipeline) - return [ - ( - 0, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.618382, - 10.08292, - 9.603334, - 9.258133, - 8.917768, - 8.591574, - 8.318401, - 8.042292, - 7.783608, - 7.50226, - 7.236041, - 7.035602, - ], - 14: [ - 10.618382, - 10.08292, - 9.603334, - 9.258133, - 8.917768, - 8.591574, - 8.318401, - 8.042292, - 7.783608, - 7.50226, - 7.236041, - 7.035602, - ], - }, - ), - ( - 0, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.618382, - 10.083632, - 9.604639, - 9.260109, - 8.920504, - 8.595082, - 8.322799, - 8.047493, - 7.78929, - 7.508382, - 7.242587, - 7.042367, - ], - 14: [ - 10.618382, - 10.083632, - 9.604639, - 9.260109, - 8.920504, - 8.595082, - 8.322799, - 8.047493, - 7.78929, - 7.508382, - 7.242587, - 7.042367, - ], - }, - ), - ( - 42, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.68639, - 10.102986, - 9.647681, - 9.293091, - 8.958928, - 8.625297, - 8.351107, - 8.079577, - 7.840723, - 7.543044, - 7.284141, - 7.072688, - ], - 14: [ - 10.68639, - 10.102986, - 9.647681, - 9.293091, - 8.958928, - 8.625297, - 8.351107, - 8.079577, - 7.840723, - 7.543044, - 7.284141, - 7.072688, - ], - }, - ), - ( - 42, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.68639, - 10.103672, - 9.649025, - 9.295167, - 8.961777, - 8.629059, - 8.355571, - 8.084871, - 7.846589, - 7.549438, - 7.290722, - 7.079446, - ], - 14: [ - 10.697515, - 10.22967, - 9.766556, - 9.430037, - 9.083106, - 8.718601, - 8.463726, - 8.17396, - 7.945755, - 7.660188, - 7.396963, - 7.172944, - ], - }, - ), - ] - - -@pytest.mark.parametrize( - "seed,device,max_norm_clip,gradient_accumulation_steps,total_steps,expected_loss", _adam_max_norm_clip_data() -) -def testORTTrainerAdamMaxNormClip(seed, device, max_norm_clip, gradient_accumulation_steps, total_steps, expected_loss): - rtol = 1e-5 - torch.manual_seed(seed) - set_seed(seed) - - # Setup ORTTrainer - options = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - "debug": {"deterministic_compute": True}, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.AdamConfig(lr=0.001, max_norm_clip=max_norm_clip) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - - # Training loop - actual_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - loss, _ = trainer.train_step(data, targets) - actual_loss.append(loss.cpu().item()) - - # Compare legacy vs experimental APIs - assert trainer._onnx_model is not None - opset = get_model_opset(trainer._onnx_model) - _test_helpers.assert_model_outputs(expected_loss[opset], actual_loss, rtol=rtol) - - -def _lamb_max_norm_clip_data(): - device_capability_major = torch.cuda.get_device_capability()[0] - if device_capability_major == 7: # V100 for Dev machine - return [ - ( - 0, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.592951, - 10.487728, - 10.422251, - 10.350913, - 10.244248, - 10.213003, - 10.129222, - 10.095112, - 10.035983, - 9.974586, - 9.909771, - 9.874278, - ], - 14: [ - 10.584141, - 10.497192, - 10.389251, - 10.286045, - 10.231354, - 10.17018, - 10.066779, - 10.048138, - 9.958029, - 9.8908, - 9.82965, - 9.755484, - ], - }, - ), - ( - 0, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.592951, - 10.452503, - 10.349832, - 10.245314, - 10.106587, - 10.046009, - 9.934781, - 9.875164, - 9.792067, - 9.704592, - 9.617104, - 9.563070, - ], - 14: [ - 10.584141, - 10.461154, - 10.315399, - 10.178979, - 10.092329, - 9.999928, - 9.869949, - 9.824564, - 9.707565, - 9.61643, - 9.532847, - 9.439593, - ], - }, - ), - ( - 42, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.647908, - 10.566276, - 10.476154, - 10.406275, - 10.311079, - 10.240053, - 10.196469, - 10.113955, - 10.117376, - 10.013077, - 9.930301, - 9.893368, - ], - 14: [ - 10.697515, - 10.631279, - 10.528757, - 10.496689, - 10.411219, - 10.322109, - 10.297314, - 10.215549, - 10.149698, - 10.087336, - 10.010884, - 9.934544, - ], - }, - ), - ( - 42, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.647908, - 10.531957, - 10.405246, - 10.302971, - 10.176583, - 10.075583, - 10.005772, - 9.897825, - 9.875748, - 9.748932, - 9.642885, - 9.586762, - ], - 14: [ - 10.697515, - 10.596729, - 10.457815, - 10.393475, - 10.277581, - 10.158909, - 10.108126, - 10.000326, - 9.912526, - 9.826057, - 9.727899, - 9.633768, - ], - }, - ), - ] - elif device_capability_major == 5: # M60 for CI machines (Python Packaging Pipeline) - return [ - ( - 0, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.618382, - 10.50222, - 10.403347, - 10.35298, - 10.288447, - 10.237399, - 10.184225, - 10.089048, - 10.008952, - 9.972644, - 9.897674, - 9.84524, - ], - 14: [0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4], - }, - ), - ( - 0, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.618382, - 10.466732, - 10.330871, - 10.24715, - 10.150972, - 10.069127, - 9.98974, - 9.870169, - 9.763693, - 9.704323, - 9.605957, - 9.533117, - ], - 14: [1, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4], - }, - ), - ( - 42, - "cuda", - 1.0, - 1, - 12, - { - 12: [ - 10.68639, - 10.511692, - 10.447308, - 10.405255, - 10.334866, - 10.261473, - 10.169422, - 10.107138, - 10.069889, - 9.97798, - 9.928105, - 9.896435, - ], - 14: [2, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4], - }, - ), - ( - 42, - "cuda", - 0.1, - 1, - 12, - { - 12: [ - 10.68639, - 10.477489, - 10.376671, - 10.301725, - 10.200718, - 10.098477, - 9.97995, - 9.890104, - 9.828899, - 9.713555, - 9.639567, - 9.589856, - ], - 14: [3, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4], - }, - ), - ] - - -@pytest.mark.parametrize( - "seed,device,max_norm_clip, gradient_accumulation_steps,total_steps,expected_loss", _lamb_max_norm_clip_data() -) -def testORTTrainerLambMaxNormClip(seed, device, max_norm_clip, gradient_accumulation_steps, total_steps, expected_loss): - rtol = 1e-3 - torch.manual_seed(seed) - set_seed(seed) - - # Setup ORTTrainer - options = orttrainer.ORTTrainerOptions( - { - "device": {"id": device}, - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - "debug": {"deterministic_compute": True}, - } - ) - model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device) - optim_config = optim.LambConfig(lr=0.001, max_norm_clip=max_norm_clip) - trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options) - - # Training loop - actual_loss = [] - for i in range(total_steps): - data, targets = batcher_fn(train_data, i) - loss, _ = trainer.train_step(data, targets) - actual_loss.append(loss.cpu().item()) - - # Compare legacy vs experimental APIs - opset = get_model_opset(trainer._onnx_model) - _test_helpers.assert_model_outputs(expected_loss[opset], actual_loss, rtol=rtol) diff --git a/orttraining/orttraining/test/python/orttraining_test_transformers.py b/orttraining/orttraining/test/python/orttraining_test_transformers.py deleted file mode 100644 index dbaf4a293c466..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_transformers.py +++ /dev/null @@ -1,480 +0,0 @@ -import random -import unittest - -import numpy as np -import torch -from numpy.testing import assert_allclose -from orttraining_test_data_loader import BatchArgsOption, ids_tensor -from orttraining_test_utils import get_lr, run_test -from transformers import BertConfig, BertForPreTraining - -import onnxruntime -from onnxruntime.capi.ort_trainer import IODescription, LossScaler, ModelDescription, ORTTrainer # noqa: F401 - - -class BertModelTest(unittest.TestCase): - class BertModelTester: - def __init__( - self, - parent, - batch_size=13, - seq_length=7, - is_training=True, - use_input_mask=True, - use_token_type_ids=True, - use_labels=True, - vocab_size=99, - hidden_size=32, - num_hidden_layers=5, - num_attention_heads=4, - intermediate_size=37, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - type_sequence_label_size=2, - initializer_range=0.02, - num_labels=3, - num_choices=4, - scope=None, - device="cpu", - ): - self.parent = parent - self.batch_size = batch_size - self.seq_length = seq_length - self.is_training = is_training - self.use_input_mask = use_input_mask - self.use_token_type_ids = use_token_type_ids - self.use_labels = use_labels - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.intermediate_size = intermediate_size - self.hidden_act = hidden_act - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.type_sequence_label_size = type_sequence_label_size - self.initializer_range = initializer_range - self.num_labels = num_labels - self.num_choices = num_choices - self.scope = scope - self.device = device - - # 1. superset of bert input/output descs - # see BertPreTrainedModel doc - self.input_ids_desc = IODescription( - "input_ids", ["batch", "max_seq_len_in_batch"], torch.int64, num_classes=self.vocab_size - ) - self.attention_mask_desc = IODescription( - "attention_mask", ["batch", "max_seq_len_in_batch"], torch.int64, num_classes=2 - ) - self.token_type_ids_desc = IODescription( - "token_type_ids", ["batch", "max_seq_len_in_batch"], torch.int64, num_classes=2 - ) - self.position_ids_desc = IODescription( - "position_ids", ["batch", "max_seq_len_in_batch"], torch.int64, num_classes=self.max_position_embeddings - ) - self.head_mask_desc = IODescription( - "head_mask", [self.num_hidden_layers, self.num_attention_heads], torch.int64, num_classes=2 - ) - self.inputs_embeds_desc = IODescription( - "inputs_embeds", ["batch", "max_seq_len_in_batch", self.hidden_size], torch.float32 - ) - - self.encoder_hidden_states_desc = IODescription( - "encoder_hidden_states", ["batch", "max_seq_len_in_batch", self.hidden_size], torch.float32 - ) - self.encoder_attention_mask_desc = IODescription( - "encoder_attention_mask", ["batch", "max_seq_len_in_batch"], torch.float32 - ) - - # see BertForPreTraining doc - self.masked_lm_labels_desc = IODescription( - "masked_lm_labels", ["batch", "max_seq_len_in_batch"], torch.int64, num_classes=self.vocab_size - ) - self.next_sentence_label_desc = IODescription( - "next_sentence_label", - [ - "batch", - ], - torch.int64, - num_classes=2, - ) - - # outputs - self.loss_desc = IODescription( - "loss", - [ - 1, - ], - torch.float32, - ) - self.prediction_scores_desc = IODescription( - "prediction_scores", ["batch", "max_seq_len_in_batch", self.vocab_size], torch.float32 - ) - - self.seq_relationship_scores_desc = IODescription( - "seq_relationship_scores", ["batch", 2], torch.float32 - ) # IODescription('seq_relationship_scores', ['batch', 'max_seq_len_in_batch', 2], torch.float32) - self.hidden_states_desc = IODescription( - "hidden_states", - [self.num_hidden_layers, "batch", "max_seq_len_in_batch", self.hidden_size], - torch.float32, - ) - self.attentions_desc = IODescription( - "attentions", - [ - self.num_hidden_layers, - "batch", - self.num_attention_heads, - "max_seq_len_in_batch", - "max_seq_len_in_batch", - ], - torch.float32, - ) - self.last_hidden_state_desc = IODescription( - "last_hidden_state", ["batch", "max_seq_len_in_batch", self.hidden_size], torch.float32 - ) - self.pooler_output_desc = IODescription("pooler_output", ["batch", self.hidden_size], torch.float32) - - def BertForPreTraining_descs(self): - return ModelDescription( - [ - self.input_ids_desc, - self.attention_mask_desc, - self.token_type_ids_desc, - self.masked_lm_labels_desc, - self.next_sentence_label_desc, - ], - # returns loss_desc if both masked_lm_labels_desc, next_sentence_label are provided - # hidden_states_desc, attentions_desc shall be included according to config.output_attentions, config.output_hidden_states - [ - self.loss_desc, - self.prediction_scores_desc, - self.seq_relationship_scores_desc, - # hidden_states_desc, attentions_desc - ], - ) - - def prepare_config_and_inputs(self): - input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).to(self.device) - - input_mask = None - if self.use_input_mask: - input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2).to(self.device) - - token_type_ids = None - if self.use_token_type_ids: - token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size).to(self.device) - - sequence_labels = None - token_labels = None - choice_labels = None - if self.use_labels: - sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size).to(self.device) - token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels).to(self.device) - choice_labels = ids_tensor([self.batch_size], self.num_choices).to(self.device) - - config = BertConfig( - vocab_size=self.vocab_size, - vocab_size_or_config_json_file=self.vocab_size, - hidden_size=self.hidden_size, - num_hidden_layers=self.num_hidden_layers, - num_attention_heads=self.num_attention_heads, - intermediate_size=self.intermediate_size, - hidden_act=self.hidden_act, - hidden_dropout_prob=self.hidden_dropout_prob, - attention_probs_dropout_prob=self.attention_probs_dropout_prob, - max_position_embeddings=self.max_position_embeddings, - type_vocab_size=self.type_vocab_size, - is_decoder=False, - initializer_range=self.initializer_range, - ) - - return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels - - def create_and_check_bert_for_pretraining( - self, - config, - input_ids, - token_type_ids, - input_mask, - sequence_labels, - token_labels, - choice_labels, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - option_use_internal_get_lr_this_step=[True], # noqa: B006 - option_use_internal_loss_scaler=[True], # noqa: B006 - ): - seed = 42 - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - onnxruntime.set_seed(seed) - - model = BertForPreTraining(config=config) - model.eval() - loss, prediction_scores, seq_relationship_score = model( - input_ids, - attention_mask=input_mask, - token_type_ids=token_type_ids, - masked_lm_labels=token_labels, - next_sentence_label=sequence_labels, - ) - model_desc = ModelDescription( - [ - self.input_ids_desc, - self.attention_mask_desc, - self.token_type_ids_desc, - self.masked_lm_labels_desc, - self.next_sentence_label_desc, - ], - [self.loss_desc, self.prediction_scores_desc, self.seq_relationship_scores_desc], - ) - - from collections import namedtuple - - MyArgs = namedtuple( - "MyArgs", "local_rank world_size max_steps learning_rate warmup_proportion batch_size seq_len" - ) - - dataset_len = 100 - epochs = 8 - max_steps = epochs * dataset_len - args = MyArgs( - local_rank=0, - world_size=1, - max_steps=max_steps, - learning_rate=0.00001, - warmup_proportion=0.01, - batch_size=13, - seq_len=7, - ) - - def get_lr_this_step(global_step): - return get_lr(args, global_step) - - loss_scaler = LossScaler("loss_scale_input_name", True, up_scale_window=2000) - - for fp16 in option_fp16: - for allreduce_post_accumulation in option_allreduce_post_accumulation: - for gradient_accumulation_steps in option_gradient_accumulation_steps: - for use_internal_get_lr_this_step in option_use_internal_get_lr_this_step: - for use_internal_loss_scaler in option_use_internal_loss_scaler: - for split_batch in option_split_batch: - print("gradient_accumulation_steps:", gradient_accumulation_steps) - print("split_batch:", split_batch) - - seed = 42 - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - onnxruntime.set_seed(seed) - - ( - old_api_loss_ort, - old_api_prediction_scores_ort, - old_api_seq_relationship_score_ort, - ) = run_test( - model, - model_desc, - self.device, - args, - gradient_accumulation_steps, - fp16, - allreduce_post_accumulation, - get_lr_this_step, - use_internal_get_lr_this_step, - loss_scaler, - use_internal_loss_scaler, - split_batch, - dataset_len, - epochs, - use_new_api=False, - ) - - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - onnxruntime.set_seed(seed) - if use_internal_get_lr_this_step and use_internal_loss_scaler: - ( - new_api_loss_ort, - new_api_prediction_scores_ort, - new_api_seq_relationship_score_ort, - ) = run_test( - model, - model_desc, - self.device, - args, - gradient_accumulation_steps, - fp16, - allreduce_post_accumulation, - get_lr_this_step, - use_internal_get_lr_this_step, - loss_scaler, - use_internal_loss_scaler, - split_batch, - dataset_len, - epochs, - use_new_api=True, - ) - - assert_allclose(old_api_loss_ort, new_api_loss_ort) - assert_allclose(old_api_prediction_scores_ort, new_api_prediction_scores_ort) - assert_allclose( - old_api_seq_relationship_score_ort, new_api_seq_relationship_score_ort - ) - - def setUp(self): - self.model_tester = BertModelTest.BertModelTester(self) - - def test_for_pretraining_mixed_precision(self): - # It would be better to test both with/without mixed precision and allreduce_post_accumulation. - # However, stress test of all the 4 cases is not stable at least on the test machine. - # There we only test mixed precision and allreduce_post_accumulation because it is the most useful use cases. - option_fp16 = [True] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [1] - option_split_batch = [BatchArgsOption.ListAndDict] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_mixed_precision_with_gradient_accumulation(self): - # It would be better to test both with/without mixed precision and allreduce_post_accumulation. - # However, stress test of all the 4 cases is not stable at least on the test machine. - # There we only test mixed precision and allreduce_post_accumulation because it is the most useful use cases. - option_fp16 = [True] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [8] - option_split_batch = [BatchArgsOption.ListAndDict] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_full_precision_all(self): - # This test is not stable because it create and run ORTSession multiple times. - # It occasionally gets seg fault at ~MemoryPattern() - # when releasing patterns_. In order not to block PR merging CI test, - # this test is broke into following individual tests. - option_fp16 = [False] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [1, 8] - option_split_batch = [BatchArgsOption.List, BatchArgsOption.Dict, BatchArgsOption.ListAndDict] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_full_precision_list_input(self): - option_fp16 = [False] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [1] - option_split_batch = [BatchArgsOption.List] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_full_precision_dict_input(self): - option_fp16 = [False] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [1] - option_split_batch = [BatchArgsOption.Dict] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_full_precision_list_and_dict_input(self): - option_fp16 = [False] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [1] - option_split_batch = [BatchArgsOption.ListAndDict] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_full_precision_grad_accumulation_list_input(self): - option_fp16 = [False] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [8] - option_split_batch = [BatchArgsOption.List] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_full_precision_grad_accumulation_dict_input(self): - option_fp16 = [False] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [8] - option_split_batch = [BatchArgsOption.Dict] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - def test_for_pretraining_full_precision_grad_accumulation_list_and_dict_input(self): - option_fp16 = [False] - option_allreduce_post_accumulation = [True] - option_gradient_accumulation_steps = [8] - option_split_batch = [BatchArgsOption.ListAndDict] - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining( - *config_and_inputs, - option_fp16, - option_allreduce_post_accumulation, - option_gradient_accumulation_steps, - option_split_batch, - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/orttraining/orttraining/test/python/orttraining_test_utils.py b/orttraining/orttraining/test/python/orttraining_test_utils.py deleted file mode 100644 index 527cfb8a0ba7d..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_test_utils.py +++ /dev/null @@ -1,246 +0,0 @@ -import math - -import torch -from orttraining_test_data_loader import BatchArgsOption, create_ort_test_dataloader, split_batch - -from onnxruntime.capi.ort_trainer import IODescription, ORTTrainer -from onnxruntime.training import amp, optim, orttrainer -from onnxruntime.training.optim import _LRScheduler - - -def warmup_cosine(x, warmup=0.002): - if x < warmup: - return x / warmup - return 0.5 * (1.0 + torch.cos(math.pi * x)) - - -def warmup_constant(x, warmup=0.002): - if x < warmup: - return x / warmup - return 1.0 - - -def warmup_linear(x, warmup=0.002): - if x < warmup: - return x / warmup - return max((x - 1.0) / (warmup - 1.0), 0.0) - - -def warmup_poly(x, warmup=0.002, degree=0.5): - if x < warmup: - return x / warmup - return (1.0 - x) ** degree - - -SCHEDULES = { - "warmup_cosine": warmup_cosine, - "warmup_constant": warmup_constant, - "warmup_linear": warmup_linear, - "warmup_poly": warmup_poly, -} - - -def get_lr(args, training_steps, schedule="warmup_poly"): - if args.max_steps == -1: - return args.learning_rate - - schedule_fct = SCHEDULES[schedule] - return args.learning_rate * schedule_fct(training_steps / args.max_steps, args.warmup_proportion) - - -def map_optimizer_attributes(name): - no_decay_keys = ["bias", "gamma", "beta", "LayerNorm"] - no_decay = any(no_decay_key in name for no_decay_key in no_decay_keys) - if no_decay: - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.0, "epsilon": 1e-6} - else: - return {"alpha": 0.9, "beta": 0.999, "lambda": 0.0, "epsilon": 1e-6} - - -class WrapLRScheduler(_LRScheduler): - def __init__(self, get_lr_this_step): - super().__init__() - self.get_lr_this_step = get_lr_this_step - - def get_lr(self, train_step_info): - return [self.get_lr_this_step(train_step_info.optimization_step)] - - -def run_test( - model, - model_desc, - device, - args, - gradient_accumulation_steps, - fp16, - allreduce_post_accumulation, - get_lr_this_step, - use_internal_get_lr_this_step, - loss_scaler, - use_internal_loss_scaler, - batch_args_option, - dataset_len, - epochs, - use_new_api, -): - dataloader = create_ort_test_dataloader(model_desc.inputs_, args.batch_size, args.seq_len, dataset_len, device) - - if use_new_api: - assert use_internal_loss_scaler, "new api should always use internal loss scaler" - - new_api_lr_scheduler = WrapLRScheduler(get_lr_this_step) - - new_api_loss_scaler = amp.DynamicLossScaler() if fp16 else None - options = orttrainer.ORTTrainerOptions( - { - "batch": {"gradient_accumulation_steps": gradient_accumulation_steps}, - "device": {"id": device}, - "mixed_precision": {"enabled": fp16, "loss_scaler": new_api_loss_scaler}, - "debug": { - "deterministic_compute": True, - }, - "utils": {"grad_norm_clip": True}, - "distributed": {"allreduce_post_accumulation": True}, - "lr_scheduler": new_api_lr_scheduler, - } - ) - - param_optimizer = list(model.named_parameters()) - params = [ - { - "params": [n for n, p in param_optimizer if "bias" in n or "LayerNorm.weight" in n], - "alpha": 0.9, - "beta": 0.999, - "lambda": 0.0, - "epsilon": 1e-6, - }, - { - "params": [n for n, p in param_optimizer if not ("bias" in n or "LayerNorm.weight" in n)], - "alpha": 0.9, - "beta": 0.999, - "lambda": 0.0, - "epsilon": 1e-6, - }, - ] - - vocab_size = 99 - new_model_desc = { - "inputs": [ - ( - "input_ids", - ["batch", "max_seq_len_in_batch"], - ), - ( - "attention_mask", - ["batch", "max_seq_len_in_batch"], - ), - ( - "token_type_ids", - ["batch", "max_seq_len_in_batch"], - ), - ( - "masked_lm_labels", - ["batch", "max_seq_len_in_batch"], - ), - ( - "next_sentence_label", - [ - "batch", - ], - ), - ], - "outputs": [ - ( - "loss", - [ - 1, - ], - True, - ), - ("prediction_scores", ["batch", "max_seq_len_in_batch", vocab_size]), - ("seq_relationship_scores", ["batch", 2]), - ], - } - - optim_config = optim.LambConfig(params=params, lr=2e-5) - model = orttrainer.ORTTrainer(model, new_model_desc, optim_config, options=options) - print("running with new frontend API") - else: - model = ORTTrainer( - model, - None, - model_desc, - "LambOptimizer", - map_optimizer_attributes=map_optimizer_attributes, - learning_rate_description=IODescription( - "Learning_Rate", - [ - 1, - ], - torch.float32, - ), - device=device, - _enable_internal_postprocess=True, - gradient_accumulation_steps=gradient_accumulation_steps, - # BertLAMB default initial settings: b1=0.9, b2=0.999, e=1e-6 - world_rank=args.local_rank, - world_size=args.world_size, - use_mixed_precision=fp16, - allreduce_post_accumulation=allreduce_post_accumulation, - get_lr_this_step=get_lr_this_step if use_internal_get_lr_this_step else None, - loss_scaler=loss_scaler if use_internal_loss_scaler else None, - _opset_version=14, - _use_deterministic_compute=True, - ) - print("running with old frontend API") - - # training loop - eval_batch = None - if not use_new_api: - model.train() - for _epoch in range(epochs): - for step, batch in enumerate(dataloader): - if eval_batch is None: - eval_batch = batch - - if not use_internal_get_lr_this_step: - lr = get_lr_this_step(step) - learning_rate = torch.tensor([lr]) - - if not use_internal_loss_scaler and fp16: - loss_scale = torch.tensor([loss_scaler.loss_scale_]) - - if batch_args_option == BatchArgsOption.List: - if not use_internal_get_lr_this_step: - batch = [*batch, learning_rate] # noqa: PLW2901 - if not use_internal_loss_scaler and fp16: - batch = [*batch, loss_scale] # noqa: PLW2901 - outputs = model.train_step(*batch) - elif batch_args_option == BatchArgsOption.Dict: - args, kwargs = split_batch(batch, model_desc.inputs_, 0) - if not use_internal_get_lr_this_step: - kwargs["Learning_Rate"] = learning_rate - if not use_internal_loss_scaler and fp16: - kwargs[model.loss_scale_input_name] = loss_scale - outputs = model.train_step(*args, **kwargs) - else: - args_count = int(len(model_desc.inputs_) / 2) # approx helf args, half kwargs - args, kwargs = split_batch(batch, model_desc.inputs_, args_count) - if not use_internal_get_lr_this_step: - kwargs["Learning_Rate"] = learning_rate - if not use_internal_loss_scaler and fp16: - kwargs[model.loss_scale_input_name] = loss_scale - outputs = model.train_step(*args, **kwargs) - - # eval - if batch_args_option == BatchArgsOption.List: - outputs = model.eval_step(*batch) - elif batch_args_option == BatchArgsOption.Dict: - args, kwargs = split_batch(batch, model_desc.inputs_, 0) - outputs = model.eval_step(*args, **kwargs) - else: - args_count = int(len(model_desc.inputs_) / 2) # approx helf args, half kwargs - args, kwargs = split_batch(batch, model_desc.inputs_, args_count) - outputs = model.eval_step(*args, **kwargs) - - return (output.cpu().numpy() for output in outputs) diff --git a/orttraining/orttraining/test/python/orttraining_transformer_trainer.py b/orttraining/orttraining/test/python/orttraining_transformer_trainer.py deleted file mode 100644 index bce726871bacf..0000000000000 --- a/orttraining/orttraining/test/python/orttraining_transformer_trainer.py +++ /dev/null @@ -1,357 +0,0 @@ -# adapted from Trainer.py of huggingface transformers - -import json -import logging -import os -import random -from typing import Callable, Dict, List, NamedTuple, Optional - -import numpy as np -import torch -from torch.utils.data.dataloader import DataLoader -from torch.utils.data.dataset import Dataset -from torch.utils.data.distributed import DistributedSampler -from torch.utils.data.sampler import SequentialSampler -from tqdm import tqdm, trange -from transformers.data.data_collator import DefaultDataCollator -from transformers.modeling_utils import PreTrainedModel -from transformers.training_args import TrainingArguments - -import onnxruntime -from onnxruntime.training import amp, optim, orttrainer - -try: - from torch.utils.tensorboard import SummaryWriter - - _has_tensorboard = True -except ImportError: - try: - from tensorboardX import SummaryWriter # noqa: F401 - - _has_tensorboard = True - except ImportError: - _has_tensorboard = False - - -def is_tensorboard_available(): - return _has_tensorboard - - -logger = logging.getLogger(__name__) - - -def set_seed(seed: int): - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - onnxruntime.set_seed(seed) - - -class EvalPrediction(NamedTuple): - predictions: np.ndarray - label_ids: np.ndarray - - -class PredictionOutput(NamedTuple): - predictions: np.ndarray - label_ids: Optional[np.ndarray] - metrics: Optional[Dict[str, float]] - - -class TrainOutput(NamedTuple): - global_step: int - training_loss: float - - -def get_linear_schedule_with_warmup(num_warmup_steps, num_training_steps, base_lr): - def lr_lambda_linear(current_step): - if current_step < num_warmup_steps: - return float(current_step) / float(max(1, num_warmup_steps)) - return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) - - def lambda_lr_get_lr(current_global_step): - # LambdaLR increment self.last_epoch at evert sept() - return base_lr * lr_lambda_linear(current_global_step) - - return lambda_lr_get_lr - - -class ORTTransformerTrainer: - """ """ - - model: PreTrainedModel - args: TrainingArguments - train_dataset: Dataset - eval_dataset: Dataset - compute_metrics: Callable[[EvalPrediction], Dict] - - def __init__( - self, - model: PreTrainedModel, - model_desc: dict, - args: TrainingArguments, - train_dataset: Dataset, - eval_dataset: Dataset, - compute_metrics: Callable[[EvalPrediction], Dict], - world_size: Optional[int] = 1, - ): - """ """ - - self.model = model - self.model_desc = model_desc - self.args = args - self.world_size = world_size - self.data_collator = DefaultDataCollator() - self.train_dataset = train_dataset - self.eval_dataset = eval_dataset - self.compute_metrics = compute_metrics - set_seed(self.args.seed) - # Create output directory if needed - if self.args.local_rank in [-1, 0]: - os.makedirs(self.args.output_dir, exist_ok=True) - - def get_train_dataloader(self) -> DataLoader: - if self.train_dataset is None: - raise ValueError("Trainer: training requires a train_dataset.") - train_sampler = ( - SequentialSampler(self.train_dataset) - if self.args.local_rank == -1 - else DistributedSampler(self.train_dataset) - ) - return DataLoader( - self.train_dataset, - batch_size=self.args.train_batch_size, - sampler=train_sampler, - collate_fn=self.data_collator.collate_batch, - ) - - def get_eval_dataloader(self) -> DataLoader: - return DataLoader( - self.eval_dataset, - batch_size=self.args.eval_batch_size, - shuffle=False, - collate_fn=self.data_collator.collate_batch, - ) - - def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: - # We use the same batch_size as for eval. - return DataLoader( - test_dataset, - batch_size=self.args.eval_batch_size, - shuffle=False, - collate_fn=self.data_collator.collate_batch, - ) - - def train(self): - """ - Main training entry point. - """ - train_dataloader = self.get_train_dataloader() - - if self.args.max_steps > 0: - t_total = self.args.max_steps - num_train_epochs = ( - self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1 - ) - else: - t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs) - num_train_epochs = self.args.num_train_epochs - - lr_scheduler = orttrainer.optim.LinearWarmupLRScheduler(t_total, self.args.warmup_steps / float(t_total)) - - loss_scaler = amp.DynamicLossScaler() if self.args.fp16 else None - device = self.args.device.type - - device = f"{device}:{self.args.device.index}" if self.args.device.index else f"{device}:0" - options = orttrainer.ORTTrainerOptions( - { - "batch": {"gradient_accumulation_steps": self.args.gradient_accumulation_steps}, - "device": {"id": device}, - "mixed_precision": {"enabled": self.args.fp16, "loss_scaler": loss_scaler}, - "debug": { - "deterministic_compute": True, - }, - "utils": {"grad_norm_clip": False}, - "distributed": { - # we are running single node multi gpu test. thus world_rank = local_rank - # and world_size = self.args.n_gpu - "world_rank": max(0, self.args.local_rank), - "world_size": int(self.world_size), - "local_rank": max(0, self.args.local_rank), - "allreduce_post_accumulation": True, - }, - "lr_scheduler": lr_scheduler, - } - ) - - param_optimizer = list(self.model.named_parameters()) - params = [ - { - "params": [n for n, p in param_optimizer if "bias" in n or "LayerNorm.weight" in n], - "weight_decay_mode": 1, - }, - { - "params": [n for n, p in param_optimizer if not ("bias" in n or "LayerNorm.weight" in n)], - "weight_decay_mode": 1, - }, - ] - - optim_config = optim.AdamConfig(params=params, lr=2e-5, do_bias_correction=True) - self.model = orttrainer.ORTTrainer(self.model, self.model_desc, optim_config, options=options) - - # Train! - logger.info("***** Running training *****") - logger.info(" Num examples = %d", len(train_dataloader.dataset)) - logger.info(" Num Epochs = %d", num_train_epochs) - logger.info(" Instantaneous batch size per GPU = %d", self.args.per_gpu_train_batch_size) - logger.info( - " Total train batch size (w. parallel, distributed & accumulation) = %d", - self.args.train_batch_size - * self.args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1), - ) - logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - - global_step = 0 - epochs_trained = 0 - steps_trained_in_current_epoch = 0 - - tr_loss = 0.0 - logging_loss = 0.0 - train_iterator = trange( - epochs_trained, - int(num_train_epochs), - desc="Epoch", - disable=self.args.local_rank not in [-1, 0], - ) - - for _epoch in train_iterator: - epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=self.args.local_rank not in [-1, 0]) - for step, inputs in enumerate(epoch_iterator): - # Skip past any already trained steps if resuming training - if steps_trained_in_current_epoch > 0: - steps_trained_in_current_epoch -= 1 - continue - - tr_loss += self._training_step(self.model, inputs) - - if (step + 1) % self.args.gradient_accumulation_steps == 0 or ( - len(epoch_iterator) <= self.args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator) - ): - global_step += 1 - - if self.args.local_rank in [-1, 0]: - if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or ( - global_step == 1 and self.args.logging_first_step - ): - logs = {} - if self.args.evaluate_during_training: - results = self.evaluate() - for key, value in results.items(): - eval_key = f"eval_{key}" - logs[eval_key] = value - - loss_scalar = (tr_loss - logging_loss) / self.args.logging_steps - - logs["loss"] = loss_scalar - logging_loss = tr_loss - - epoch_iterator.write(json.dumps({**logs, **{"step": global_step}})) - - if self.args.max_steps > 0 and global_step > self.args.max_steps: - epoch_iterator.close() - break - if self.args.max_steps > 0 and global_step > self.args.max_steps: - train_iterator.close() - break - - logger.info("\n\nTraining completed. \n\n") - return TrainOutput(global_step, tr_loss / global_step) - - def _training_step(self, model, inputs: Dict[str, torch.Tensor]) -> float: - for k, v in inputs.items(): - inputs[k] = v.to(self.args.device) - - outputs = model.train_step(**inputs) - loss = outputs[0] # model outputs are always tuple in transformers (see doc) - - return loss.item() - - def save_model(self, output_dir: Optional[str] = None): - output_dir = output_dir if output_dir is not None else self.args.output_dir - os.makedirs(output_dir, exist_ok=True) - self.model.save_as_onnx(os.path.join(output_dir, "transformer.onnx")) - - def evaluate(self) -> Dict[str, float]: - """ - Run evaluation and return metrics. - - Returns: - A dict containing: - - the eval loss - - the potential metrics computed from the predictions - """ - eval_dataloader = self.get_eval_dataloader() - - output = self._prediction_loop(eval_dataloader, description="Evaluation") - return output.metrics - - def predict(self, test_dataset: Dataset) -> PredictionOutput: - """ - Run prediction and return predictions and potential metrics. - - Depending on the dataset and your use case, your test dataset may contain labels. - In that case, this method will also return metrics, like in evaluate(). - """ - test_dataloader = self.get_test_dataloader(test_dataset) - return self._prediction_loop(test_dataloader, description="Prediction") - - def _prediction_loop(self, dataloader: DataLoader, description: str) -> PredictionOutput: - """ - Prediction/evaluation loop, shared by `evaluate()` and `predict()`. - - Works both with or without labels. - """ - - logger.info("***** Running %s *****", description) - logger.info(" Num examples = %d", len(dataloader.dataset)) - logger.info(" Batch size = %d", dataloader.batch_size) - eval_losses: List[float] = [] - preds: np.ndarray = None - label_ids: np.ndarray = None - - for inputs in tqdm(dataloader, desc=description): - has_labels = any(inputs.get(k) is not None for k in ["labels", "masked_lm_labels"]) - - for k, v in inputs.items(): - inputs[k] = v.to(self.args.device) - - with torch.no_grad(): - outputs = self.model.eval_step(**inputs) - - if has_labels: - step_eval_loss, logits = outputs[:2] - eval_losses += [step_eval_loss.mean().item()] - else: - logits = outputs[0] - - if preds is None: - preds = logits.detach().cpu().numpy() - else: - preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) - if inputs.get("labels") is not None: - if label_ids is None: - label_ids = inputs["labels"].detach().cpu().numpy() - else: - label_ids = np.append(label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) - - if self.compute_metrics is not None and preds is not None and label_ids is not None: - metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) - else: - metrics = {} - if len(eval_losses) > 0: - metrics["loss"] = np.mean(eval_losses) - - return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) diff --git a/orttraining/orttraining/test/python/utils_multiple_choice.py b/orttraining/orttraining/test/python/utils_multiple_choice.py deleted file mode 100644 index e0febaf2d6334..0000000000000 --- a/orttraining/orttraining/test/python/utils_multiple_choice.py +++ /dev/null @@ -1,269 +0,0 @@ -# adapted from run_multiple_choice.py of huggingface transformers -# https://github.com/huggingface/transformers/blob/master/examples/multiple-choice/utils_multiple_choice.py - -import csv -import glob # noqa: F401 -import json # noqa: F401 -import logging -import os -from dataclasses import dataclass -from enum import Enum -from typing import List, Optional - -import torch -import tqdm -from filelock import FileLock -from torch.utils.data.dataset import Dataset -from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available # noqa: F401 - -logger = logging.getLogger(__name__) - - -@dataclass(frozen=True) -class InputExample: - """ - A single training/test example for multiple choice - - Args: - example_id: Unique id for the example. - question: string. The untokenized text of the second sequence (question). - contexts: list of str. The untokenized text of the first sequence (context of corresponding question). - endings: list of str. multiple choice's options. Its length must be equal to contexts' length. - label: (Optional) string. The label of the example. This should be - specified for train and dev examples, but not for test examples. - """ - - example_id: str - question: str - contexts: List[str] - endings: List[str] - label: Optional[str] - - -@dataclass(frozen=True) -class InputFeatures: - """ - A single set of features of data. - Property names are the same names as the corresponding inputs to a model. - """ - - example_id: str - input_ids: List[List[int]] - attention_mask: Optional[List[List[int]]] - token_type_ids: Optional[List[List[int]]] - label: Optional[int] - - -class Split(Enum): - train = "train" - dev = "dev" - test = "test" - - -class DataProcessor: - """Base class for data converters for multiple choice data sets.""" - - def get_train_examples(self, data_dir): - """Gets a collection of `InputExample`s for the train set.""" - raise NotImplementedError() - - def get_dev_examples(self, data_dir): - """Gets a collection of `InputExample`s for the dev set.""" - raise NotImplementedError() - - def get_test_examples(self, data_dir): - """Gets a collection of `InputExample`s for the test set.""" - raise NotImplementedError() - - def get_labels(self): - """Gets the list of labels for this data set.""" - raise NotImplementedError() - - -class MultipleChoiceDataset(Dataset): - """ - This will be superseded by a framework-agnostic approach - soon. - """ - - features: List[InputFeatures] - - def __init__( - self, - data_dir: str, - tokenizer: PreTrainedTokenizer, - task: str, - processor: DataProcessor, - max_seq_length: Optional[int] = None, - overwrite_cache=False, - mode: Split = Split.train, - ): - cached_features_file = os.path.join( - data_dir, - "cached_{}_{}_{}_{}".format( - mode.value, - tokenizer.__class__.__name__, - str(max_seq_length), - task, - ), - ) - - # Make sure only the first process in distributed training processes the dataset, - # and the others will use the cache. - lock_path = cached_features_file + ".lock" - with FileLock(lock_path): - if os.path.exists(cached_features_file) and not overwrite_cache: - logger.info(f"Loading features from cached file {cached_features_file}") - self.features = torch.load(cached_features_file) - else: - logger.info(f"Creating features from dataset file at {data_dir}") - label_list = processor.get_labels() - if mode == Split.dev: - examples = processor.get_dev_examples(data_dir) - elif mode == Split.test: - examples = processor.get_test_examples(data_dir) - else: - examples = processor.get_train_examples(data_dir) - logger.info("Training examples: %s", len(examples)) - # TODO clean up all this to leverage built-in features of tokenizers - self.features = convert_examples_to_features( - examples, - label_list, - max_seq_length, - tokenizer, - pad_on_left=bool(tokenizer.padding_side == "left"), - pad_token=tokenizer.pad_token_id, - pad_token_segment_id=tokenizer.pad_token_type_id, - ) - logger.info("Saving features into cached file %s", cached_features_file) - torch.save(self.features, cached_features_file) - - def __len__(self): - return len(self.features) - - def __getitem__(self, i) -> InputFeatures: - return self.features[i] - - -class SwagProcessor(DataProcessor): - """Processor for the SWAG data set.""" - - def get_train_examples(self, data_dir): - """See base class.""" - logger.info(f"LOOKING AT {data_dir} train") - return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") - - def get_dev_examples(self, data_dir): - """See base class.""" - logger.info(f"LOOKING AT {data_dir} dev") - return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") - - def get_test_examples(self, data_dir): - """See base class.""" - logger.info(f"LOOKING AT {data_dir} dev") - raise ValueError( - "For swag testing, the input file does not contain a label column. It can not be tested in current code" - "setting!" - ) - return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") - - def get_labels(self): - """See base class.""" - return ["0", "1", "2", "3"] - - def _read_csv(self, input_file): - with open(input_file, encoding="utf-8") as f: - return list(csv.reader(f)) - - def _create_examples(self, lines: List[List[str]], type: str): - """Creates examples for the training and dev sets.""" - if type == "train" and lines[0][-1] != "label": - raise ValueError("For training, the input file must contain a label column.") - - examples = [ - InputExample( - example_id=line[2], - question=line[5], # in the swag dataset, the - # common beginning of each - # choice is stored in "sent2". - contexts=[line[4], line[4], line[4], line[4]], - endings=[line[7], line[8], line[9], line[10]], - label=line[11], - ) - for line in lines[1:] # we skip the line with the column names - ] - - return examples - - -def convert_examples_to_features( - examples: List[InputExample], - label_list: List[str], - max_length: int, - tokenizer: PreTrainedTokenizer, - pad_token_segment_id=0, - pad_on_left=False, - pad_token=0, - mask_padding_with_zero=True, -) -> List[InputFeatures]: - """ - Loads a data file into a list of `InputFeatures` - """ - - label_map = {label: i for i, label in enumerate(label_list)} - - features = [] - for ex_index, example in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): - if ex_index % 10000 == 0: - logger.info("Writing example %d of %d" % (ex_index, len(examples))) - choices_inputs = [] - for _ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): - text_a = context - if example.question.find("_") != -1: - # this is for cloze question - text_b = example.question.replace("_", ending) - else: - text_b = example.question + " " + ending - - inputs = tokenizer.encode_plus( - text_a, - text_b, - add_special_tokens=True, - max_length=max_length, - pad_to_max_length=True, - return_overflowing_tokens=True, - ) - if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0: - logger.info( - "Attention! you are cropping tokens (swag task is ok). " - "If you are training ARC and RACE and you are poping question + options," - "you need to try to use a bigger max seq length!" - ) - - choices_inputs.append(inputs) - - label = label_map[example.label] - - input_ids = [x["input_ids"] for x in choices_inputs] - attention_mask = ( - [x["attention_mask"] for x in choices_inputs] if "attention_mask" in choices_inputs[0] else None - ) - token_type_ids = ( - [x["token_type_ids"] for x in choices_inputs] if "token_type_ids" in choices_inputs[0] else None - ) - - features.append( - InputFeatures( - example_id=example.example_id, - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - label=label, - ) - ) - - for f in features[:2]: - logger.info("*** Example ***") - logger.info("feature: %s" % f) - - return features diff --git a/orttraining/pytorch_frontend_examples/mnist_training.py b/orttraining/pytorch_frontend_examples/mnist_training.py deleted file mode 100644 index dc9b3f654400c..0000000000000 --- a/orttraining/pytorch_frontend_examples/mnist_training.py +++ /dev/null @@ -1,200 +0,0 @@ -## This code is from https://github.com/pytorch/examples/blob/master/mnist/main.py -## with modification to do training using onnxruntime as backend on cuda device. -## A private PyTorch build from https://aiinfra.visualstudio.com/Lotus/_git/pytorch (ORTTraining branch) is needed to run the demo. - -## Model testing is not complete. - -import argparse -import os - -import numpy as np # noqa: F401 -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim # noqa: F401 -from mpi4py import MPI -from torchvision import datasets, transforms - -from onnxruntime.capi.ort_trainer import IODescription, ModelDescription, ORTTrainer - -try: # noqa: SIM105 - from onnxruntime.capi._pybind_state import set_cuda_device_id -except ImportError: - pass - - -class NeuralNet(nn.Module): - def __init__(self, input_size, hidden_size, num_classes): - super().__init__() - self.fc1 = nn.Linear(input_size, hidden_size) - self.relu = nn.ReLU() - self.fc2 = nn.Linear(hidden_size, num_classes) - - def forward(self, x): - out = self.fc1(x) - out = self.relu(out) - out = self.fc2(out) - return out - - -def my_loss(x, target): - return F.nll_loss(F.log_softmax(x, dim=1), target) - - -def train_with_trainer(args, trainer, device, train_loader, epoch): - for batch_idx, (data, target) in enumerate(train_loader): - data, target = data.to(device), target.to(device) # noqa: PLW2901 - data = data.reshape(data.shape[0], -1) # noqa: PLW2901 - - learning_rate = torch.tensor([args.lr]) - loss = trainer.train_step(data, target, learning_rate) - - # Since the output corresponds to [loss_desc, probability_desc], the first value is taken as loss. - if batch_idx % args.log_interval == 0: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( - epoch, - batch_idx * len(data), - len(train_loader.dataset), - 100.0 * batch_idx / len(train_loader), - loss[0], - ) - ) - - -# TODO: comple this once ORT training can do evaluation. -def test_with_trainer(args, trainer, device, test_loader): - test_loss = 0 - correct = 0 - with torch.no_grad(): - for data, target in test_loader: - data, target = data.to(device), target.to(device) # noqa: PLW2901 - data = data.reshape(data.shape[0], -1) # noqa: PLW2901 - output = F.log_softmax(trainer.eval_step(data, fetches=["probability"]), dim=1) - test_loss += F.nll_loss(output, target, reduction="sum").item() # sum up batch loss - pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability - correct += pred.eq(target.view_as(pred)).sum().item() - - test_loss /= len(test_loader.dataset) - - print( - "\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format( - test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset) - ) - ) - - -def mnist_model_description(): - input_desc = IODescription("input1", ["batch", 784], torch.float32) - label_desc = IODescription( - "label", - [ - "batch", - ], - torch.int64, - num_classes=10, - ) - loss_desc = IODescription("loss", [], torch.float32) - probability_desc = IODescription("probability", ["batch", 10], torch.float32) - return ModelDescription([input_desc, label_desc], [loss_desc, probability_desc]) - - -def main(): - # Training settings - parser = argparse.ArgumentParser(description="PyTorch MNIST Example") - parser.add_argument( - "--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)" - ) - parser.add_argument( - "--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)" - ) - parser.add_argument("--epochs", type=int, default=10, metavar="N", help="number of epochs to train (default: 10)") - parser.add_argument("--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)") - parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training") - parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)") - parser.add_argument( - "--log-interval", - type=int, - default=10, - metavar="N", - help="how many batches to wait before logging training status", - ) - - args = parser.parse_args() - use_cuda = not args.no_cuda and torch.cuda.is_available() - - torch.manual_seed(args.seed) - - kwargs = {"num_workers": 0, "pin_memory": True} - train_loader = torch.utils.data.DataLoader( - datasets.MNIST( - "../data", - train=True, - download=True, - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), - ), - batch_size=args.batch_size, - shuffle=True, - **kwargs, - ) - test_loader = torch.utils.data.DataLoader( - datasets.MNIST( - "../data", - train=False, - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), - ), - batch_size=args.test_batch_size, - shuffle=True, - **kwargs, - ) - - comm = MPI.COMM_WORLD - args.local_rank = ( - int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) if ("OMPI_COMM_WORLD_LOCAL_RANK" in os.environ) else 0 - ) - args.world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"]) if ("OMPI_COMM_WORLD_RANK" in os.environ) else 0 - args.world_size = comm.Get_size() - if use_cuda: - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - args.n_gpu = 1 - set_cuda_device_id(args.local_rank) - else: - device = torch.device("cpu") - - input_size = 784 - hidden_size = 500 - num_classes = 10 - model = NeuralNet(input_size, hidden_size, num_classes) - - model_desc = mnist_model_description() - # use log_interval as gradient accumulate steps - trainer = ORTTrainer( - model, - my_loss, - model_desc, - "SGDOptimizer", - None, - IODescription( - "Learning_Rate", - [ - 1, - ], - torch.float32, - ), - device, - 1, - args.world_rank, - args.world_size, - use_mixed_precision=False, - allreduce_post_accumulation=True, - ) - print("\nBuild ort model done.") - - for epoch in range(1, args.epochs + 1): - train_with_trainer(args, trainer, device, train_loader, epoch) - test_with_trainer(args, trainer, device, test_loader) - - -if __name__ == "__main__": - main() diff --git a/samples/python/training/orttrainer/mnist/mnist_original.onnx b/samples/python/training/orttrainer/mnist/mnist_original.onnx deleted file mode 100644 index 15931affb5ccf9723bdd4cfd3b2e9c9605143b26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1590610 zcmbTddsvL?8^=3SO^4~MMJ+;MutHcN&2ztqmBS#5Bw9IyL>Pw4KXP)Q2pYP}Uy$$vI8EoIT zYunCs8wcu6betl-bktY4TByHCx_$Sqi9+FqbrT(f{5JV-+$A*p-`{ID`3CmmPadd0 z$8YP_)f0y4{qNPmTqy9}vVEg(e;zM%dMcmqU?$XC>$mIw{ohomziX$jG;pR3Kb5Z| z{6)y0+>bwHp#BoSExT7w9H#fbSK)dgf6D)!`1}9oiNF6}Pc+`XbK6?qwVSqV+O_Zh ze)jMG|DN5?;{W`z;{rDA8t1on`?j6C9A26me%7_%&-{PBpyaXn>qcEh67oP<9N9s$ zA1*|X=Q?0uUJKdJE<*D5ZCL#NF>%T}j2(Nl=-}xK2`LL8`Q{z)U0Q9|e7&YIufy*A^jU zt%K498!_r`8x@YN073Ccuzmf2mh5i_laVDb_HG|KkH1JpM(DDEjkmyi#wn~>auof0 z9-wT)Woq)e9{2@S;^&_#p(s`l&0fZ0)GSNX7Josr)&F4GzB1@uZU*J$FVUPDvTN?d zK~!%JRi~Dsd&mO_=~)j6^~GrI!N>6KMl^fU2kxU6qV)b9D4TE}q{a_O-R=wEu_+C+ z`YF-j;9nRrs|P`+A1j+Q6~=0x;J}V|7&vbWq}B{Y(Mbm~ zhPLLJ=pGph+DI{cYZS8XB?|D?Er75$wJ;>R1)ckuP_wmXafT#{3N86;_O*||M*?H` zx=d7s1FQ+wXS+q|#Mvw#ThuLt_q;!L`&JN>;a?#1B%k)aOhSh=XDBjDB5v73(PQ9C z+FbG;_?O2Z&oEQG;6W91zn+cViO)b48j5?L88fq=STLnENuZiC3^aUSn1n`*TURai zXI`Uj{$s5F&4jgmCq-xLVsM;LL)9I5*!nXRXE1WwmGlgRPx*{|mOHg6Tm=WRE!gfx zS8$LAgET2ascpJLhG#qm-Pnhqks6BK)D0+>@EDm659G1ONl5Kuj9>i@WF6O(J|oA` z-ilUKRYzldi~-va6o!6*X3)Jb1yQjUL|GR|{xw6U&GQDi1Y%*Y0MIZdB z4F_b`fZ|0eG-)d^`R+Z^G;Rl|-dalzJTZnS~AvAu$b?ojsgI)psp?AF{yK=ZGTR6NA`o26z z56y9+wKirJ9Q_EfA}!dxHDbn0F=ACK`oJn~2`$@O0G(0cAXjTq6IMe7ACg^HU7D9c$|Jn-8=_Iuf_E zp97|u&pPiq0{6@Wtj~KL5;ZcCw9R~ha{qncb9x_I?-Vk!w?@QnRticM)PrQ+MWx1W zGE}bK0Rx%mhyyx7;*=!zp4E>jic0{ibw{be*D{C*n1N9*O|dawpOrTa#G-&C>M*|+ zde-VNVJ8?6>(I?^ws-&fuMf@RZt$-EGXhT0m8$N2) zWmFS$sLz9o5ccf_1g;r@-IgjUziWkg-v8m2ca4~~d?`wPxk!R$OaQqw9+FKbgCd|p zIkBq`vNQTYywh`(+!cte!)IcVb1HGH*JD*P^+@ZPS`ZcACjrB!VZhQ$ura+4ohpTx zqx}rLlaVgGzwMxV7584*ER>FlR?dhu#p2`~%v){5POSJ2a$g-*I^Ga8%MR1M8@8g^ z+mGO8yANX4?1ge-!ETw73fXaD)RcDImrnMM z0MU9qknhvM(W5`Yv%?!=&(I!yDaJnTMoh+6IYLb{tq zf>?0~W6Ih=GrAU{0HSi)uFEtEg8VV!U2!&h#}~g6hB*BK^CB1_ecu<{Aym(KThyXB)9K4?d&n^G2w2?~kotK7eNA zSY=l3OptzUBC2ouskEeswurMpKKLl?cK(U3rth({<|95I*$dJ)KU})bzsH1ELgq-_ zZE)y4itX%q2-$K2MOk(9>;)6n-b9Mo)BPZD({;!_9t94G&sTF@-teS8*XaL0w;D<4 z^VOb3ry+P)7|Q;8NwrIkf=<2>Te15Zv=p~flc%R3>iT&ioqvKV=5uxZ!)viN&l8#q z)}vPLKt+lr)H(GMktFpmNG$#d&el`F@!bl%l6xJ)M&G4Aej#XmkRX3VqquWpHgpTl z!A_nz+qWtYih_ni!W|*wJ-8T_&J(a-1{kx?2Or1CT|7psy+btt!C?L62;Mtt&g8}g zVgHTwpi#bXQQ9YA&DzrtvG5EwS&xOfFQZYhFqAr^o3O(V97CTwDIfwfoRnwDBt^7> zXhOZxYhDS89d2;FWHR7T7R!tZp*bZ7cC&n@is;a*yztk?9qd%$7&_*zo2I{P{5rXGa;aQrbhE z<1WI}N&}|wMjl#qe<#H|`yl1NV(?ma3mo1(0zb0?XdP}1T^8jSe7y_X%@3pB>_}?m znTu&w%fQ)YGQdB2sC~5tx(6}fk$MBl3`#-p+ibkoX2wY7AE6R;sxs6{$QCa|tb3n@ z(mBECuk#qPr%cEG=bz)$VZPwLoIu+F7Rvk*P=en{l)w{1gLF|n>K!V)BG6}&6S|9o zA*`Z?I7eB)c%6PscbpmH5M2gRi+J%5KNX_<708$LL#YcxD@^)us>2%^auadd7JWwa zu@Bo!U!ml4iZbi>6zpEL7^1)zD*}#U=t(`IP^kePKE%BBN1>#P!UY?1CatUjofW1~ z>G>3;mXTqOzbR~!tI%OrDMlEQqp=96xS(n4E-IXr*+xMBWqwt|DWJf z*8$~s2yec3#sqD-5`}6 zQBM2o6iSVki7)=c^^CGg{Bh?F>UOk4MkkMHEt-we&MI+V@IeTg8co8(;!uGXA?kT9 zh%Ok=Y>ShWe$-=%3x7gxlN`&2nln+A7QkPA2WO2=g;86d-aSnBW- zwT@q)=f(@_CvGF6->!ua|Poiw*$ z!yI!q>(T-Yo^b=_oVy6w=F5Sf`G%^TCsNh425Ob(g{q0WK(ufU`uk*HYoQ)!_o^|u z_$xIz7!Q8eyrIpnB0pVU{VIFn%E@ zj^v8DEsIgUTcF31y9YemiMf(CFxM(zbY zr}lzp>RVjC`Ug1b-$1J;>rvja&X5f`pnndU)M7N;_zt@@l*p8S zQi;bFnsx02Y970aMaH9{a)1Y{yY?O%xplL;zMWiA-G{7_N(l3RjbrwkFza8Kvq2Ls zf-^gU9N8HK!BbK&{?#?GyLS>tF66Un;U6FwkqR|@OGcKmi_R%9VGeM0Kr=8FeXlej zT4zAFBo%i%9fqvfUW^K?f!sCY!7A!7-R*c7;^#j`QO!>(JE?F1vrFi8s6U1+55e^T z#!P8+54bPu0~`B~;Qp%#>rQ8Z$5k&_kXVaZj$6T^I0ZdyltfVF3O>90pfKcLw3+Z4 zhj|+?s&l)+c2W!kr7Z#5A&ao<$2BP5@)H%B^MvPm2sABaAQ%{jy@jRdxb!cGTJaVq zi#qXUj4oT&cpS5R|D$d*uVU+_Af%Cewpg7Dt)B&miTNm5ZUZKFo}*Po1!R+pkXOv@ zv&W8M_w49CFdI zxrYQy846NO7{t692T>6>Xu$rL*fc{4l7p!Qf!pt30ZG3^7l+ZevBWA9_de<-l0t#f+VT;IHsw&z{eXGv zo}#dL42CB1(I+N?bc%J+=ZzP%o|%a)v=;m}=!5!|6XvbbVI0=>2m9)K=umwgy>}`g zg<$O!Dq5Y+AY&> zmVxf@2+%j-Gl7$)fKUEal5F6K$+5NIQGJD2#UrH8G-U!Cbg=X1Lx{Tcflk%cV{GQW z1}QE^$;(Xf@}vJ?<;5+i>1d;(#s3tT_0C7FwUCYad>u79b7}GeF%Dex3=*q8K+9Ss zB!z@QTKgMJog`#VzU8s|N`s8eOYraY*}w7q|WbXqt~jxb>5 z-=7laiT!EkdJmYTK7|G^9^;NpWngj*>Ve=IO1{Rn?mrW(W)IU+tnW&6EdmVEt^g(W5z(gq( z@ad!_<5?Y!>aB6qc0&glJM}$0*sIU3;p)7o)VSKzz*?>2O=-%=Y?1%jgtGZ*cXBo$fZ0{FXnXHB7 zuRftj{TU^<-WON^^9cAR zPh9rC*aOkga&XoB2Nu!Epe@-44%VwMeMmZ{(^$KP`TM)Udsr$;_3w)P=9)1!C*EQ4r~A~?)Qk~JIW}DL8%S4&pme-9 zjT@)SPAD*9{mqTo7ojH1V*fYLH)taWzV^ea*WaM@#6XO-eE___5|u5fM-dkd$EfD7 z^yF&+YyZs=%;x@%2AdB9KS~MC7E0>y>NrTN-VxvZcOfPlQSg$1|K4yG<>Gxb18T=8?rt(9-^?$l#w3KB2A}zF=)k9YXUe25~rCST^4(d8V$o|-9(>l&6sTW zAGo!w1m2#Lu=00HR>ce<)jJK@sI~|y3hGt{RzJi6^%kroc#q5XYDh#wY2eLsmFZ@AyE@gyuboeJ_FdW`ti2}pJ9$2wfFf+2UxQ9CD@$VQaA)DN&^ zoL-gTR1b63>xC^A$0J#=q!uo6{Vi=t6BOT?3fifoP_wg}*8Mz($!3^{uL!)N{*#K%NZAx-MRt+bIn+R@)gAK%ox$BYNaZv zh~zFbLfMo84D>vN@@<3Ys=q#>W_KHgZVM#=s|qo7#5qo5r4^)%Pr^GJ4VVz{z`U#H zvF_y{jL6mE?)=l>w&ok;DlcMR#0vc7up!fzp~kx4ArQVsi#wfkSka}mQ{BE@z`DQ` z(DqG2(ck$lWrc|--?bh7K5D=ef4D|+#YDn+yTPrWu&Gu4kW5Y zAgGUl*xp;<@kvgKd41saArGVrPAF}!9z^NrLF8h6Gwfbg05EtfhAsKbX@HFow$}%e z7Z@|?mN8(xSqv&LfXrYcHdp3_azzl)9(jOt43D+mQVO{`bHK-@4YjR8;=b?-2##HZ zm!}1cV3j{w&prqaOFv+?_et8zc{8qc4Rlq^1;~0H3rQ{iqT0q19f!8kw4_zo@JA}< z)YL)?QNifxpCKYM75k>uLZ5UZ2JSOLuc*PONoiI-9~_Db<5coI4l(M|b_n>Z3ybZR z;_wv}XysE1g*x}4;s$q~ z%utAE83p{u%~Ne9yD--(7uSBXWK^%G(*0F@=8Eq}u=}2fGV7tl`bvM4{rF88@uUcv z2d2UHaThT7?n&&edxR1GJ=l;?O0rmOo#L>1A6QGf#-gRypBQ8IcInZR%!*B*sZV%F&* zxuNG`Fy{4!2|mn!P;#)?A=#S69ha~lT%Ehccx=iJ-4^)zMy;TK-4m6 zxNZz$Zek+l8*iklWkZOBxkuVV;~|^B4egKQVT}p0k;S00`b-)|^B8IN zcjDQUgtBG|vZU|g%gZrtgC~cmEiqcUo4im0}*Hk%(tQJ%2gCbO{9HuN%ordVI-RM*CJJ#u$ zvceA+ISv~|Ls>Z(D&Ilm=w2MPKMaG0A?4>Q3rb@J*w?8*_;oEx2klTEnDZX&?E0hY zlwuHuwxM)cl{oCrSE%o1#L7SWld;_q=;&7mPS$TgkYvfq2M?!K+A=!%&IQbI<-ER> zAp9{V41Da2!C=632u(1iyFVvDzWYm(J>e(SR2$MFV~+Xoy_6B+8R-0E76uA7qsNP% zRQaeZ@&?Z z^IYn~{|DDBIFGW7--%c**OAA-!x(!KL_`Yy~=B$e}Y#bzors|6pQ34h*+B zhY4?Qqe5y%12UW;H6Rtn-{bn}AJeJks9fB9J`ycUUW1>qOTCMU zeKuBdOsvwg0(>5yMcYOmGi4*^6YF0=$(}Uj-EvNww6kdIrJ&7a%sL1IjUz#{ zL5=*^Vj?eIO-{L+vVPt@L|XT?z^wfxn5G%CqBER_KJEm9&lSiV@f2*GVyWY`c=VWm zhxYBcg~^#GNz9kskgVw-?fWlcxaJ%Gu`^8*(NQvnB>%*s&>e@rYvVd1=CW6`Thv_=dvlXTH;z-N*Msnau61aUk1TlxJFq!9%S6*-# zjng+tuDc;PA2eaC2j+qHz$WndX3Si;%3~$Tc_gab22_7PpsDMQqKDp7l2+S+KJ)gW zgA=E(zb!+-p3!JQ!a-*A3%WL4CaIJ9v7*EaaCW#3+ueMI>>g&xHVyiQ2Ufj@vR~6s z6O*RY9C<80n556hE5+daI0oF82BUS_TC-`~o#6s?lV47^oc-bVQYWzE#wW1oW=070nSUZ?Ro7(0o(hP$3%a8&$01? z80ftLK2_^8npx8zcV;^BKRqPHjHAGx`BBVwTTimLG~(kC$#Aug^EJCw=qsE9 zwt_GWthL3qFg~-P&5+|n8k!Zj1UefJf$h!psLelzuAknM4`T#u)T%RNi*W(E*3?k{ ztGdjz&QGYkc@pymG@;x#8%Ae#fQb`}RsZHg(?$w?tG}TpYYudNJp@5dA5-5dj??{V zNsB%u(3asDaObvwy>``{_1XN4sJlXm^Bob1cg;oD5$_=R-A#1&3q$e8FX*3R$#%wY zz`bHK+FIqp_Nk^!pUE^ZdGHfj(;wiEAxV(J`JNt=0#If*lCC;=4OLrxiBpj=8_dK{RpiVuUm z@G=~q$zvQI-AB>l;ee;VBUY^dwf$+Fs#yp8O>SKeSg?*| z=V{;7Nw{Wv8mKnvVZi6R5GLg^BsaCh@AzEwFzmn|H!85w&J9O-D{#TPB=BkllzLxQ z%F6GEJ0A9-z!*OWCa{Z^moP?}$f>jS{G0^!LhEDIts*+xTVwe##?4AWv znW)3$7w@4y_6MPA%vG>nWQ(mQIPG(^6Xb5YY0F(NaIA{Nkhat4`s^uHR3?c_`(K4N zvqESQ%4lhoIWw#H3`&*^COIn=XnX25>9?q0(g-lf# zJ;2g4zrY$pA!|3A2O;UccxAvf)Ee|a{?!oDkX4NCZ!<9J=s|ow>^21)yTNCEF5$e7z6UP-A9SO|MKfkg)?Kvu-~*EBT+Z$FBmg^C zxVML6H)Gsz_;^IIxe9t+E}*7owQ{z;4wF>;0&@R(f}Iwhaly$qkh@_t3|Z8OIbXRP zoU}b=kbTxC_7|SQ(@T5s@FG*jPf)`7&m3TN9`mA`DIjb_| zJc_|_M3xCeR7Qwo<~1T3_LOEFwqSRq>M&}nTIByxUyyuYBS`rJpht87bEf}5vze`+ z@QV#p)@}h z20p%yff>(1l%0a^L0mn*@dVB0X+gfMS#0zAn;}%q_Yw^52DP1$zH!4 zy+55noq7w#W7a8rv#SJRk8s|vv=EnndXHJA=P-A;GuZtwVnRRgxr~=1N|uerBgNc# zoY0Tq|MH+9@bDY#-&lqQdbgnQ;Bjy*LXZ^gp?)rJs89ZM5P2#JlGgxMMj1mCdjY%S zRG6@wk7XwwqvqyTam(0zQV-^gbNXnIIQ&%*c)l8?zi-B<@j{QcHj}j2C(yeg z6CAJZM1iq6w%1rPd|yh>`RcNYO&JAJo>tuCy8@+_2WYHJi@p;lL4@%G%w0bZ`tQ_$ znzJWc2Ku37urHBi+?;9|#rYabOsHrgOF|3nxvbL+2vkjgij(g#^v^SpU;U9z`=P_^ z9K-3E6&q>8?}s4o_I=JD>vXw3CR07q~9=JycZuK)<8@h(7;9$d7Xn+x8Px zg101e|8eqhCYP(wzo1Op&v_N|CG>C{m$`ct+;NhUd=DOZ z>$6hFhhk$J1184)5V)3Xz#wj272ZZ{SSuyIYkt7oX+J=5tiLj7#8Tjk@6g6+Baq*3 zl6b+EEOboR49?q5Ktg~kH!aF<|m%y=Wm1wFS!g-jS-u6=?jW{4~ef$FkmF9 z1Bm;UVGug%0#ScDM$;ZWhQ7PCkaYDTUYT+YCI2*I%R)2Myj`Mv+V~l)PMNX`G9SUL zt?8(6oK7@%pDV3r$YCAF{<{}dpeBErxHL)zCB@Mw_nypglz})Xfa9972qNF~m1?c7 zLT+9W_y^nun5h7h6)(}r%YNOe}i=DNfs9?`!w9hGr_@O+ewa5=`*8 zj1Q{yS+j#Oyz*ZmO0VrFb>=1zmj4|@i_Q{R!3SEo{UCar%b+tdlfcZ>l+FJrU~L*U zKtT8;R5^}A$0M8{QF#aL2i9Y@oZnPduXx11qW_92_?5)fhvOQO%tv{{cRoA%12B2=MRwd zWf8br5eaTjc-Uv!hjQ+&r{r=(fi`O(<#ja*L#;6E_I48SX9%?EHDOfseMl;%7{YmB zFmM}4uUy9APz^1ULm-(SgLON3s40v<4YP_$kKN1{?O&&qO}LG|-#gK3Am^){Dkbut z+X&8)VfPiz>)5ba`C~;D9NDGIc6l7cs0A0OY;qB8JCcMgnX!=k=`YN`x}AtQzk5)( z1rvPZ3b*cEv}98&biPi3RE;ic&|Ux)qm$9^!U>wi+XfP$HR(U92_i1}V6X@0frbA< z?EjnxRkJj}yLbv^I^h(#+(Q39s%Igo60%C^$3&e6mw%$x8*ePHE(F{x4B9 z?a*}X;;vRztllgP8{h(E>rw(8!7TIVE&h5jOn@t4#`hY+j)bQ zoh<o*so2vDZKte*z;P zS~49UKY^8aH3@xV4~{Nduw%qatgw5G*60GQLry^czE=gVzSFUI*ED?oIt^!CyNOh zjCNHsL_gv<>LqXJAAS&|D`KE@QyoZpZ;`Oj9VB^W4W!0}!|=i!JaYFg3d8u+ z)gfd({;Hs*{cdA_Crf5;Yd7WD$79JCB_s}61A-M};ck>JBlfdkA?P?%ijSgVgc~$D zEWr)u1&lVck&3>oCFQ2CIkuStw<;`H*_TwV&Q^j`-dhNse**-2#V9)Tk*pc{3yivJ z%sTT@An?a~^eI{bSN#zY+1s%|4_!+KjR-rKJH4#N}9%ym4EZKQcUd2pq)4qM#QiDW=ESk>~0li@;i+Pe*H z^UJ`MjOhe`bQyxIj#1qOs*(1y!`2P-=4K)2aH6RQ)=c1Psat z+35RNdEyjzW-M5rHA9HkYyrEUF=a*<^`JH+9ed{#1D0_7;n&xY>S+hP2X$E=yN4X} zD2EX{&DicjU8Z=%JsMSCL-;x6%H+LPhzHHFdzT}qk3FQ$Z%fIlv0b2Gmk`hV77Px1 zf$jn&VC({zdZ>|fe~m@Q#E)op{TnRbW5jkAa9O0Q>p}LSk7EWBJjFL-vqgr$*YQvm z|9gWjn4rVD&)_m3GqZ@J&u`FDa}AVSU+f<{FU&iqm&Y-{r0f!)^sS54qG1#-V6&qg|vw6`e$SX<~Ur9U*@L@gJp4$YG zUEQEKQ>ttoVa~{=|HSN{k(f7{%R-;MkLfwja9M(oUA@hS$#-ic=`&;?JH?>-qyR!+ zB%{v@3syeUlgQTPg2q@$!#bye)yh2(QCkhO7$F;YcL&Or4I-~=O_=TOrp$ub_b}j5 z5pFKw<`QbhQE%@oY&JNJjRUiwD<=;Y!x!MEexSA=#-ns`z0xX_$9%KZVH*Z>`=#&! zYQF?i_lPgxSpO%c`R+ie(FLWZAr!hEy@yDdF{?ZKHP}CzgC0lZq$WBI?P{#Ru+WTk z+D);g;Sgr_D8Opbui$vO5+!pk5gX3e9lOYsjrINsPid`kBos;gE`Nj#sLF|m%x#W@z9-j zlpgT9iz1;BE6V5+yDy-qnV(N(@9xlDW=3rP4Jrux_>>yzTd-b-1+3xlcNnwN4}Cl> zse}6t;8i6nkMtU|S-an%+xd93SG0iUa*eWLtO47pvm5yvYB1C_m599O(B$HL8vf-a z$aQi!9TY^=flErXKJnJMEozZn#9hr#i2K1NJ;2Fck| zO7(0F*z4W`&6YznY~Xrm&HIU>MfXX`q#NkD@GY0atVZpMAK38gS9JJg6!H)K5KH?V zraq&$!#E9}J)-4w*r4-Nq7$za-uh0rFE|XsT?wF>^@Q>#zfy`aZ_%(9>xp{lZj=V! zqPaI~u`A~@r18r^sJe%$W#h=Iub0v9<9iy&a zQ)XyyMKO*wxA6#nCg%5O2Y1Z$`^cO^{LJB7n{ z-+}4|1GcZM470gDCtI~a?6K$q?DYtS{EAm%N$d?Gc~AonjvKLN%v>}mN3gu{3N5OR zK{A(f%GL*vX5QvJnsl_17(iasK~!68fRLY~K-8ut2e;`lp1+u~+qRjoV?W-3mhFFF zV1zNN4w;3;2@N1`&!7o4G0+*B1wm_^(7P!CFn$tCx2J2TYZhoK%61&V>-}d?@21Vbz8Y;p^#IDchP6&J?!230Nqb% z;i-BZcxkz_v~?dAYHOi)3&&1W>xt&&SC_6^4ai?{6@(rmP;O~Np^Z7ogF1r))WdS2ZlQf97GHsRbj={hQc0_hHO8ca*%hBR<8eX^-1l z#V2jm!v_AC&WP|P~bg))auM{gs(q^9Dl=OM&}FIjyXl>ur?f*+aKWOxyo?+vEyiv5sG1Z z=h6@~WP`Gvklc3~Xc8(Q*8LT_?U;b@f%C1y7sG^o2SNRM7vOgj6m2So?VW}UZd?xh zcSY29>j9AQ9zgk~H|Qm`hsM-nn1BC+xW()a3NJsUqIc#l{Dpa#a9oYtV%zNucuRiKl~2O;Bq0u{WbI>uBH~xe@RB{(Pgyu!^oCoT~@Y1t4#V=3r;a>Q8x30So-yw*uUWcZv4ZT zk#K7jKIa*#RgZAE-*vRMFlB2lG=hGe4&yzb15Fw;Q1|6=aNlisZ##=?vMu4j ztc#$Wk_qZHn=y4@Cg{p~P#!V~t*YLGy6p&x#$}7;(}j$4d<+pCU0+byY0AyFKE`Ti z3wG7gD3I&zr(U1JOlYDn9=NTfIkt)eL8J(j9`@pto4_lglLu$Af2#4(oQGg@`lU zj6icd-94Zg%Ss<$`h-sO@Rehz;V(q^S0pJ^MWA;2HNeo7+}Q47`ux$77pT&_gtt2#9Bat^4gGf>3MdTZ{dlimbwuH~^I#yHl1Uuzc$+%yV4 zSnIKwjW=;(v;Vjjz=4S01yHLI>Q^H*n2z+`S)tT$&FiskPkGF)AOzOXCQC> zS<<+f2ikLbj6K=O`Kxp2Sd*s^@j#BU)4r5H?mY>7D+1X+v#`iJoVZ(U1*=9As#-J+ z59kS4|KjV|eA|+Z8sA7KU*b`GZA{R(|2u495|FqVu?g2LVDSh)Ttr^RN7gNhy!Z1Mnq zr)%Ii-WocKF5~xcx=h=$FcfZyC5Iy|*{OdAW7Oap5Y3JziuE2O>*^W|x%&aP%&UjO z7iHL;s)m&3nJ9Yy5oBiB;=UhBJfSvcyffc(^MGeKpY#&OR1HUI(?n&%%lk07{01sj z$6@9>KI3a6;kdmIG9YA1>sN68;ccQgu0q$+Z7?&yoUNowV46-ft~@4W9UiYn|JquR z*B$QeA z6G_-1hkn#1ha?fkA%vZjwZ!yF%n8UG9oo1q$E+vyWT%wf3|6! zSzRGSPd~0w{<+3<`&KqMSS$OkdAI=hau_9vOom zAtRQK%WncZ|;WWC&JO%`T<%cby6?OYuNbMfOkFg zAKIyymT%b~6AbI1sKp0t<>P3*gxy^t9zZJNDh%m+4}DWTp{w#Ju6!8=VUA|te6JT< zgTm2&X(5rNNyS-dwZu45gGx4NU~1+~`nEb8LT_)z(#qKwVyD3wnrU-ev#OyXy&EE@ z7h+D+a8#cD56vV2=zTQ~nl5Ow-&J4UNu&VJ9h>oI`A5im|C2bb$jA1CLa1O~Y2UbA z*m+%w-e!l{Ec_1`?u*BiV_E3UxC1ixqfoIi0^Cc#pj-1?7&lm*5BB&#ZRgLR%dV^Q z0VmSIx~>|%@4O;jkEH1LJRD&BSTM^u29knpG-PZDj;dXczNeo7@v%i(LWGw0q?@KDxc zLY_7nb$pNQCGSVjuaX4b*Gci+Y=hlJ^m$-I{FW--uwtk zO|IZllMiSVQ-~`0MG(fjlERWmDz!UA(R~}L`m_+yM^kG2zrEBc`~XI`-9iN<+OA2$+$j^Q1latLT5jfINpMyr&CyF?l_f= zydX|n9ErluV%lPM54^v0f>oL(C-<*L=h}U8Pnj}B%yRB`}-u%g&lLRqSBy0 zSFJC@T*D}c<)$(nwiSma8Sp|;Ab9Saf%=K^d&HdkgIe1a)`%psHaZ7|uHlPD>iuj?Dyu zOKeEB7Gr0)Xn|_!7xedM8Or%OG(c}2GCdF{-zvWsO7 zzl+z75^~C=DX3b@@_}*=wA}mh&(iWCNqswdtXGk)qj%7NHiJ#f6mVV~O)7Y%^_*w> zuH+d+dgLKW>`LUHpZ0>mxiO&odnAU<{U28B-3e-!nsJC*Gq&i=qze~ahfYuSZg_Wt z-V8m06Mo!=A@fh6YUov}8p-@r8~iYJk{;)H-~!5S&PR`dBZ%zE3EU^?$E!Vmjfr>F zcEVUxtl7YPu#`7Q_#JfuadCFne7x4hbp>DF3p^0`q$E>wF7`UmKMV-!W%z;dzg zi8mlg{D$)ev%PD;YjD2(23oQuSQ_&HJQk&sVzWHd|J|3fR~qr-f*9u{$`abX6+oDl z7&h)_yRGu=(B@c)kmZT(raD+FOk%!*LaJf@2>gb{LYI0e7H=v5hYaSk_!>rJ+nh<% z)hJvT{|(oT)P&**zaeCLE4q#B!=-u0LD-T)h|RbK4+g#lCB~3+8jb-~SFs`G3+7oW z#aXR!By89_;xwJ@Qr{}TYu!a`v={J+EXSo=s|^M|pCIU$DLM6)dG-H^mKzjo!rIIT z$TNIR66EVJAxRC7;LKaqrLrlLmVbnt(hLTvu`1HE=VW%)-bHt!blJ)696pWSL) z^Vela`zsuJ&%8qas6<*}(wDI?Owj3n`iy(wLL*#+eDHvFXn0zQ0Y$;++`XT8JevZF zPaM>w4F^ScGs=guU8_we)LOB-sjd|UY{~%7({)hm_5g~euE0^%>^-^WUy#3gfNI5M zC|hntmfsh0(xtP|bi;Ho+0OVlf2FZF=q9F>Pr=ggYw*CaFQ1w{mMGSqC!!2ZT>D6i z`)$s2@yL8MF6)4nrK7Oupn`bx+f1&iw?bZh&x~ekYs~UkiLvuoc6wboO7AWL(KREw zRh*B7$A)8q?+@zR>%$+ie*X_cTl?}AJ0d_8*$M*f2#i@Q z;0mXlW-)sOlqrK?PwYpu`Zf>_c%Ma$^&?qE=&X3*FZTBwwwC22A4AorP8!Oxv0n=X zd~k*rF@4H>1-U21sTs4-<7XlUd^N&;Qcb>RN(e5QeH3>;)90#IpT@kwyW|y1EYM)l zd#o8U7IvGP_k_lW5z zbv_}cfI17?KoFfG7kQ2p4`9qFH%*q~N@N;?_&hYdz7N4RbEwJcCd`}ELkui!!10Fy zZ#o@8<$ta-Y?ft$hz4*p!oJm~RO z=xg^J{YTA)oP_gOr8ybRO80^Bk8eb3_GCt){}l*Meu6cvgTQ#?MDnD%FRvk;z`Twn zm{U6tW6qsGjU_s~nH5{lFdtEB?g>&iv;f7!)wr6*CD>l_5Y4wR4PHN$gvMUQLZhp6 z{AdII($2y7Uzav7VtGlY%PnZ3El1DEpCNYMbWGS@g1XE5a>7r4lYp^baVgUUJRTe* z{*nr`Q`-S{hkl`UZy(-yS}G`O1LRig#h?r{$7ur}qn__Q@PAc8dxfu{M>G=m6?am` zUcoZKRrD@uB%Rxg!F!c2)>-FZ!O$jEK+L8Mp z&Af?ui@xNiUw8*Ai$CC=uvnB1G32$bKVte=DYO(86XT$#B>amWf8gj5z?yKp)Re%u z2#6^g?qh8L+lA~9L6Ej3wixg@z?z+LY8BuSIgjO{f0OdRvY;j05WW9Bfj!1dzf_w~ zL=FdN-ku#ZM*cex4A~t=)GURf+j-Q6=_59eT~X3zDqiFK2Zl~H;KE{8p~H?cG@jfD z_Ih8yH|!d`x7Oml`;`)@t2%hvPlC_+57GAbETY@E5d(!Xu<ccA+&h`LrHEMA8E29{TH;L9tUAT7AeOlEb|u zd+>i~8KHnCle-vj^$u1oE~05mLQt{Dp7j3j4w_l!!H>DRytg$$&#N8^KS98U7FI*Zv2avQ--I@%nP8(Dho)Bs z!)X!YPsrawdHrW-yY(9%WNUN64Nl;K1pE#R#?a?iQ7WlI#n(Y(Wpq6*I-$q=uBe3c zum51enjRD{(dQGKJ4i_UU9@$YjK<6I5f`4okh#p;II};ueB(JVE;1&;$xDf4V*@rj zPsFfujL-GxKJIb31a2cT@UV*p?=X51G{@+0F)`X)#l%t!fAj;*_7wwkenZp2CGhD3 z^Bms2lfP}Ofb%kF!NB+LAS;$Ve?k4YfQ>`IkK6*=S$DATf2l+=Vh$MFL=k^`7qHsD z1`S^hi9Q;-BajY@gD^5 zXIb;ZPBitP5udQKoqm2B4)yN)(DV6Hw0am1IhTQXIIoLmZ`J2pTB@mX%OsF(N(6t) z=L9bvfZl(Z_g;1$V&+A{07vGR%3<2UkQ%U`aRc(azv0e`LY!9k1P%|@A zqt47kgM=MmW-tXO+>3-}CsU|6!?GbQVj9pj6ax$D0Yq}J%3cgt9rgIs(j6e#Ih(9P zLq2jT+ao4lC$3>iI{(HSFg6%NUd(4Kh${s&O#dQCFX_N4BQ-8I=OrZ7+rfVeG&v)a zY?PgwMRF`0(RP&~CNy6ldTnfnZ1{ni4cf$bN$Nkdpo;;&BA4h?~*uCJua!;WNdI@{xH}MRr8@#~e65O`R_qc8AvbbU@RNaCDwK zhG~)4p*-j!oWG^Ttz3K>g?k?(vYn04=O#5Wc!H~=wfPp7%dP0VfbM;JA=0HB4Yx@l z=ldF*R$mE=OZ7Nh+X-vf4pOv#G+O`2N84*f)N-FSn5!PZ=k$~4Y>$u-_>IbHmWt~~ z6k^sc9Z;A`$=0m}u(k3T$b(uSWm`Ja$DQOpkFp?Zel3{MCs3i3gVRrYl|9c^J<_6)bouc(H13E53*kAmN_b0#u#7+xq=4cXLTT7Vv`S0ItnQhR zx98)Gn5F+g=K}#ZeC$W?zcT?$2eLia^S@ZGCzedudkah#dZj<(}>_ z*2@hlTXZ2mWf^0O98&}7p!wosUeyrq`W&RTIn-rHEc#wAWi#|IQu8nln_uxb-kte5 z)I-7Q(@GE&kAV!MIGC(`4SPEobEF~~taLb#eECF|`~1P5+#e#E|DISWcv73kd?ghp zu#o8vf)f$qZ*EM#dwm4g8TSJ(&qt7Sa32a*g`%+Jo;=W!-2r=-VY(BW?FxFJ<`2vH zL^x4v%{tKC@QzlMwSo8aSd^qYQ)zdjILG5Lwjam@!LU&Ii;YIS&_n|5Jw6zint%!W zy+Lx@80JmM!S;Ap2x>e{5+dxGAD{%3>XBk|Ql6$V7XH-R!mSs z)tm%axbi5|YN|2vHiDuw1{RLi;*xHyhr((zjO){YdS94FYve>oFnL3#+vqaSXAy{| zEFi;HGk?maU?S}3BBe9gu66t=nw2pXS|SZ-*>%Pel?6fQzYh2(PRN@bJxyeb?&bNN z4uGDod*N`g25+3vhN@R1*xjldMdO>qEo-x2c)cDc)bEs=EqIHeHV>ejF@fq|l|!k| zV!Ro762pV@VAg31=yV;86Jl?{>`DP2cFP=1yf0#IOby=j5b(nv%Ge(5Zz8M>NRrDguL;xIYh8ZKw_B3!^a{LUGiU}#;WV+ux=IBe9T1GTYp1dgcq1k4`Euz zcCq(1Hka5~VAh2YDhM2aLoC|ay;qIIcbmPKOT@`*7aJo6*6x zoai1gg`zHhQ0|^fUR?hM?>dBB;Roh_@h-tNAu(VUwFJD~liB_8CHRce=VFE&2CMwz zWc{5!oZ|3CYHa$Jx>!HLVPz~Aa`qZ3HW$%@zx6q@g+jjJT{K8%JRh_A3RL!AFg>&y^wP4ydHQEM zQW*wS>W4|k;fHANor~>m7U;7k1!VsyToxK}N zC0P(Y<0$4v#o{>8A6PQ}D0FD(aL%Wf%upoNkf{1|=w04H4O$F2#rB`nqxEkv9^OD> zzI0&2KcDeMj1i|w^Tzs`a&({Z8LM;aKvrW$6dEohN!p0=ed*}2v;}Q42Ed^Bov1j- zxJo_q#LAjPd68QV>72^+>|0kctbQc6*N8z?UyFH58mPU-S(NQ(o*CCis5k#3SS@J> z?dL6+^{SOD7{YiO|FeJzXW2XZ$|_plejTuKF^cTM(dO<*81n5tz8K+!K2}wbVRjPq z)>ngSdmdfHyyqJKC?IWlG`wAV6^=)C;mV1LC@T7UhC{`3GB?G5(>=j5FsBcq(M}!) zX|7{hMHEsiBi?w(E^t+5ldxcYu%7f7AZ-To&wHV5PZ3Ffx*j`cWFYxt#P?ipN8{6C zI%st>#Q5f+$fAK5UycQ{t2c=A_S15kThC$d^uByvMmu<_e}m=AdzfbU5(H0&&^VSS z3r)zwX)C%gV|y;vuI$5iF+T!~JOq(!XDFQSm*4Vb1CdB4iX(N)L2eWTUOsObA552X zS>l6cL-wGbeSa>H=HkdULzHE^khL$>_~vxRD&3)ir`P-PVQ$$}75Id#xqAcBt#^X$ z7gc#QGQI`<{eREr505bO>Wl zF^_Ube>9Pvz~qa4`9kXisy8td4Kk+UEI;OV991XIUi%&V%Z&JSMglI8WjPH-j>6z| z3OL!V&3V{tA}yy!gYjmT{Y{+6w9>b77rQfP=As}!!}p`jk{EEkoI|SUJC?^+<0N{M z05_fl**il%-MSlc-cM#aoh_}}br)OaHj=Qv*MjR|YY5r?9Okdk;Cr7nV9O4D?ty+M zW4|t?lb0vMf$`6vxL*|*7@tI!=WgiO7>8CfN8;BgmcjkS$-SR{L|t|d^j)_RTmyKb zvE>~09;||uV_Go5DH?453#X?2m*c%`HC}hYB{Jdp2`tjwgu08FKb)nZ)i>&LpY6WD zF|}&ur`|C}-fI`o?U>mXoMLnLx#byA8t!O}H>?%5DmIn9W zb^|1^tcSE@A{dSBg`mf}M09RFwVc+)=15Jk?b9l1$hbT&SZ04uSUxU)Q3KX1+MxZf zW(e9GhNHfCU~Do4@yJ^68`O#u3fsZwc{JF}oCMA%{z8GtIqJ3X7RV;wCChu7plcxW zIC!|Ar0OWmVY|b~=L^7+oQKb&l9|8e34QU<3rznGM%75h9hhDLvSG1u_#*0Y!`D}wq`1z^`A6j^T*`#$r>s+8|gez6jzu7`>F(aY#~ z_&u}^j)dZeJ<#NI6#Z+R;P}p8=wFk`=Em9RY3vA3heksEei?>)UxonZ?WlXv6O1e8 zq4ZjXxTR$qId0PfwtrbbRn#LkQ@o&KWGu7R_b`Tv=Ar4-RbX5A7ga5`g+RtMUXWnK zTdrsM(ytd7-!vY~&wPW-3NL$&!*OXe@#P$C~5b^;G%YV)?o+3Y%EI8-gJp_P7F+ zagk_$BM6?(KLt7SG(lyy8i$OlM~@NTi8kXNYqZQlv*kRo*nb;RLf^o?4IQ{9CYbRb zmQm-{>GD}i^iVG#A4{1>!!@>@8tu+T{}7JpY|pUaK4az#drmT*M&T2MfLoJz7aOd~ zP`*u@^OshW(m{G0H&V#OO)}y}CBKCpzYHi#zKOQhBD9<1M@1+4=C#Nh!C|pAls(tt zZQmL*?OBtwtT_VhItNg=p#w^9RA5nMBV5_U^5xsUVDq(7l&vg;7K4wN>SjcB@1&tY z`Z@S6)8%}_TA*l24-7ZB1gcA2j4wPK7SwBSt@)P$G-a4|@?X&1)dvFh-$8H?@G;je zqxppfO!_e!jdMLn0MmWimz$x+c_+L%;s_)$@0qK6U+%KXfV*1p3nu*c20D9>P#?21 zxZ}DGmy@o?>vb@0!{M*+v#%HC+%1AjhF5XOt^1h6vPhzVC*{iTnsTc=H6W0yx+7U zoO_k+Nxt-h!v{3@3hga$s#ljYYYL}5{-;3f`~edz?7+1Ozb0;yW!)&+8giYaU0{3AVSL%VWpTeq52hoYWmY2hPWtZ_WLxTrc7%7`{CX%B<;B zYp#Ivo)!q}E{#Rsu?BqkoG(x{;uZ6$T}0{BH)6|E$63DhHE3MifPqhfA@KYalyx`} z)y5yxuJ$u|T78-EOSZ|!xx9h6!1d@8dj);_wt=#Fi}=zsbM!MZhPQ+DK$R$?BLib` zUica8EP6q$OplU;Dox&wzGJ^7vpi{Ig>2+=_PpQ8B>~s;LHYhkp5Wb4no@HQ4VxHm zHcOL>Kh2oLAGG=E-KEgE+5v{UtMjjiYjbLQzQDW@eYod20)CYL1ePzfA!S90D0^{D zF7bLxljsbv&@Mt_%PrKn@;j{jQV5~XG(j449#!4fvF9Hlm$mjV5ltLNO|E}KMU*2t zdh78P(jyRL8bpeIyrn{Pw# zmL8YU8l#*Vlz(O-QP6*O%Rtvjepe_wD|rXg z(yKAly%;rSGA_%c58}j4anP0Z8TT;ndPDtF5V@`tx4bqc68Tna8lMH;g?(XdnLh9Q z^)#sdiX?esd@$vEC0IUM4Z1IP!A|qnSlfFELh&b-rftQR>r>#s-gBtioenmSQ=n*O zC8;}J1(F&nw|zJR*Q_W3;mClz(2@*n_TGsVQ>QV`#0u)X`z6(Qu%7USAc0VpTnm)np|Cj z9=|4=N7qTO!C3Pa^Wqdjwc|CEw%n$PWz27WHkN++1$HkI^4V$ z!sdqoxHGK;m69{4@?OHcUgwDOJ=F})gd#|1y%Qk^D=>ZSUZ}Xe1Oo+;p!tJ&=NSiM zvesqrIRA(0vK*Jilp64L%EM>OhaBXoLqjIUfsNZ;v|m*URp<5jt_Sttv+gBhVfTm= zE|uV(fj_Xkz8@Dk*AK!9*_`Af;6t8w{ z{u9RkU4y9u+GzItJk(79e1HZl8}U&5S&_&*!Hv-8p%&+{-HV8*uUvH}jI_M@L}iXI ziGllc2!F1GhU^opH$z2aGbdBUwl}OoWgFP;o&mn=A24oa8>u>Mj>_#O%mWjRZ1w^T zmN#_Wq5;jqG2meE3VWTtV?5&r6|>nf_~}A4H%f!T?M9F@PLCTJ(vMdyTL56T235AZ zK|134^Dk)oAqO(NAAzk}B%@Ls8fLSZa-r6=?YE1I1VZq*GfW$e|K#M;%J{lE%wN%`i3QMe0Kxd1UikQNtk?IIdGW)Q2&k&HEviPOYN9InOk=X1}ZbQq992U%Cj zlwFW+e;J&5{$pJFaJDnDh3GvEu=4J6G+p@@cTm43{5^DW!Ye1y9B+hNpWgxLLhoNjJ2#mHH|7o zapItTLy2)qI0Rd+2DyJ8SS3%!x}4)^rmv04ztdsg)GrVkwIAKOVnE=?a*BRWKz3_B z@wa(JzYXrkfrk@}>RySL4hlhhECnki zOEV$Ghk0NwyI|cLJ>KZt3y3$)Mdi{7G-PuOx?lD|t>n8W3q7a`Va_Kucf)M6<9m>9+)w2-kcCk&Y`4(cK21| zMmZ=jIMoiPWH4{HaTayTVCR$Pa+p{A2#sIe0)sz)qu+yHkYoP{RI`48)zs%ydA^WJ z?kuLd_K|S=vk{+TdI^O6TES|1FuIKS4@OzOgdB%(aGqpJ#JB3fTd@QDD`RQb$R^OM zX~%$!4Vdyqi;s4^3FZrmnO>&HeYWVwhwc}_wAnG}v?~fNEAr69s~_+B$qJRTl0a#w zEB5|!49))Bp~to+ViNOoq>4HrpsNl=t=@36n(<&BFP$Orm;)Zs>eR8P8JbrZfI|41 zK6B|obGFBa0gOk|co)jHJO=NYepp^tj#{C8IO&}qSfuX37%Z{Syx0e-hIW8+Qn%Pw z=QD=6OR25zRPxjREp?yl1u>gXf$P14=&?(Wb+AmufO`^rF}fQK_ASTM{by<5qA%=@ zsY2sZKOklQU0AcU8J)xwJHy?f{r&*xIC>9_BO_^<hpj)7%wU%e0YQr^R^B@RS$t*LUxQwn@ehjP5oS`X8-(zRkW7_%75PCDqK$qp- zb_eV5Tcgfl;y}jZ`tB>P%3O(!s~JykgP3{cG%)V>Pjm@r0F!Z#;JT|i@0>l1s+k`J zWuMdF>&(1~hdsqr=WdcUVfARYONTESXb%mKo6&it2#oFfqV#EsxXL$zhX7VjCo`{D|$Hao}7Z z5B?LPX3oDzK`y-Q$2{ZZKOyZW>-xy;%iAPe0keLOsoTmtbYu50^+=X6eH|q( zqkVXxUHgo}(dQv}Rww3$EXUkY7?yd;@ zJl>=8-Ev%2tiuH#-UP+1eRzv)rKs2N6^BjcL$+N|Rd{eFBc9-hzO_Y8+LX zgVG)K)T!1Ojb96>@YEfcrPAjm|H$C$bal?rDG#E}ojAH6?A}L?FIB6{RdpFd0Z+{p_APXwDl9yilVjnj?JOqWp{oS1_|4x_X8D){RF|rL_$|FJc_O-WqCFa+cf_gkj5vftaLS z$vTL>pd`3iyl=#Qkd%Z{$r~3)Ol4WAuvlzjGm0~b7e6%*2fuSA*d1WRH`9Ii`v&uV z{eA|bQJoMzN{>(5k_V=iw=isTDR_tV(iFzs8B&3x0b@H!o%1ByZZRGVA4n`wiLcc5SgM9a4V(rNO7N`|c$ zXI+pI(-oJnB6Bh%4BQWamao`8HxsN@d!Yei%E>O@B)TIDXo`9_fTbFHAHRla6&hUF z*LGsO=_H9{{0hhIP01Kf-)n}=vX$mTO5LY*EZn7CQbgq*6%3zF^-5#XOhA@mr07jFKkcQ z26~+z(DuR$qI<59Iz2m&p6h+kdpzp}d?mn+a48PbQ)hgPLUeuV24?K{qgY#wLtfoR z1BbhYFdIx$=a!P3t+(-T zxh|*dyNYRayJ7c!buKdHFq9_G$J+frKswL_T88>CPV-N=KcElaBR&hu_diA_w)dab z(2J%!XJgmhTCik%E%)>N_@>LOYv#BTo!6(q%6nf>`n_CU@XuAu{@4H${>}!28V__H zvlcsUCE%X8lUQKJ?uNxFw7skpL~I8Zuz5b5j5p+qL@O|3NhJDziU(J2Hsckt`M=*! ztoF*mZjNcbCzHfxsc9e+q|g|vM6h`<4`nY+p}ac<&%P zXZw^HZ2x)X3{7`hf(CEnpf2+Zy6;>EEv`sfn)G>%JTEpg@K7;M8x1{^!6WVu3^`wl zs-JUVdBbh6Jjm|dpK@{C;$zU<&jejJ9;ddk!%?qYllM^XAc~XC#QUj?b;qbIU_b)vtf~(}n4Hb#7f+%Ve+eDT7Gc?0A!qR<5*^y-f#Yj7 zzwJ{{Y3+OYNKORpCwTDA9s@xm^vQ&Kuh7T67F{AN(OY>J9g=&P&z{XbLr-DA&0^?r z{fc`u<9G_|ARec$;N*wzK{duRCO4uoj=bRe+K`r-?z!M%MiyASN?Q zVU4^H99}V>;-4#Mw5A-KqnpJB8XA1h)C*Yp*%h+PV?o%sO|Iyh4jk3sB$@Nco+~w= z$$B8u&APxtWymG$c!7?Qu^6y!7rK5L1F|7iVoAeHxnGkdSn6EB^>vILtzxsotaR9O zN69)pXMx^DmKU~b17Xn?Vp0(bquRu1wtXA(b+g`5H4HkkM=c0+f^&rkq$kY4!+dLP$-qH7+NM7GM~f6HO%{U12eE&}vQ?}5@^ zhdO)rgYt|<4876|MUyLO-GA@!z>#~{S^t7MpB2)DXVRH|z85QY-G$hr^HH+*BkCoG zgH*CboZv!<-h&q?dfmY|It??N4?oSXVp_`bM8vEs(d1H>9`h-2BA539=T?~nJjDp!M$K{?N4n^IkABRf5<__{0Pi7 zKZ3Gx$7tEne!P|4Hk|v2`ByDM;olJE1An=a?3l>>STQ~F5St_jvl$ADRvGakzo(ea z^7LVAEg+R;_nqHn%1h%|rf^>-hUZ^m45U)l&-W491#du>CE)7DupX~9Q|RwBeJ(5H z1sHC;z%uyt2x_@t@!&G-^!$MKDfiIxT|TTfVyu|!ImB>H0!W_d5XmV^ap?3j(2~9d z6xIt^2bK?Zjfuj}L&qUQbONevWMC^84<3he(M{kEwqG@|VjRmqRIok-od~h>F1;D` z`~6|(r1$9H0L*o`?Nm@UULRd{8p#zsm-14m%6gK5D$=`egA|^UOpp5V_$)HEYlYRO{kn*PklDZ(D}wK- zdHbtP;IVlCCjH5V_W%D*Uo_I3VaaIwI-dyq;~`^8G4v)kKu6#!IFhc*A9U5@jI;lc z^h0?lS@jbqE;iyyO&L4PdI%)6#o>-0jAfE`1uBf*qik~+*_vd;t;yJ4=G{{jPurvH~S6(?4H6Urn8Lk*Wn$zE@Q=c0q<2QgLJrwGM3e9F=yPhR$HiG z`8emu>GDG_SdVQ$HqjNHqH2df!?!dc@3Q6H1P=L01VfGS&0EZ+4=|+<=JD6+EVmTSJLU$?A|EOraAqFg2xIr1g?!l zKjvk3+);pqImwtOWo)YCq2R;*JCo|KI9%r>ih`Ta<;_hv?B9={vw<{sLC-A|FZ)j2%0cd%X%@*O1TCsz?WqCiN zr;M*%smYmVghK6#REQiIiW&`@p?TIpOtl}1ahx|Om$<-F?Q77|v6uGz&lqjoH9`J3 zhIPi)kmVWw;eWas+?sbR7y4iUgy`k7>Re1>u_g@As>If2dZv$F>N@VC?7Y9NBx@3yc$P|b~y7~ z^nAieYjruaa6!?$^WahZ55O8J$_nOFEC1J2ZIuou8@-%pOd=4Pm5sx9#9-UdS6I3` z6vCH%#jVRCz##K9RN0mizk#+`HZ~KQ$1z5S?|o2?J}=e{|AY=LvtiG?NL<+X0phR) z#Rr+^GWaVoK4rxFM$H3d#T+aeyATxLRugl^j1;&FK{r1cmD0~tn6X-}*L4y^fv3o= zdwM*Ge8E2`i<-6O;J}SKoDG=4v<)Xg8FnpyBmq2Ka1vMAy@SuAS-^{n6Cc)Md28P{#P0e}D%nrK>hUwORnd!(eH)ApRN@!b z1(4ACo0fR%b6V~^3V#;nTl~((jP)7NJkb`LndhydSpuK0y}$>%KfprwFc98eP0Sou zK-opcw?FU)gx`w9aR-q}2sk5P$SZMDqPYrY+ z^#W^*$Y%@)sX9o06Zx^;I8;RTq7+xnsGG#TyYXq%s^}o3-WxyycI86b(;l$eT}BOJ z3(@c&ZC>bokW_4$iphJ`xqVzCm8D*tVf!@gsmS+9<2p|N0LmxhDd8d*)+8ey&q(#m*amdGk=O{N}|1IsA!-H4CcOu;M```E%FUz!I!~#?7Mu2 zC-=}y?HbwRe+Pcs^ydtWyfJdV7T0p>Ivr{F7ifHQ$7z0{j3t!_JtrtcKdMKKgv%KE zWG3zzd=6c9Mu3O#0S&I(jc+vs(39MU&(Zt|tE2nyW>F2OEc-~ESr>BNlm;T}xg%Hb z4#cecJIxvW8XUIULbLZg=(uLUA7DDJ_w82dR-J*S`zK-%Ms07%~;Yn-BuRA43m&j^ z%P1ck>cOk)Hfp@NiEbr!u(ItaUO#TgSt@Kn+G#^gle17(A!X24C)De`3&Cnr(CKw6 zNb{-}HT;i;tWr%<(8S6>Hx{OBF8DZPq zY{sR~U^7<&`5hpbor{T2LUqRGh~+LGQwntb}oM35^l z0wzyD%c0vbh22SW?!1MRv0kidAdSsnr2Xi*F*h^#T9Jhp3pY1F}YEYWDds zt$8;Y!&cRzu2usk?zsgX|1Cs6cQYJyt%h|58OYNAsX~dHx!CZ8CU4xBLo$52A@-|~ zPc_^NIa6+6T2veun|;TasBCbp4TH;l8S_B;mKZ1PCW+SN=%?EalE6aoxcl#5>(^+w zsV3k%X7u4CO(x=sJ$+dYjBy5Ld4d1SaVQzaH00GKa@lJ3?{^yq9@qMjhR@f5GtuL0 zPvy`FElHr}5W+fUb|FM{q4GzXxOy*RKc2|N1F`opeP3U`*MdjiF@IxA|0#6%@9Q8k zu!W{M%-cCzgk}S?NWm6EPV09hSgL2kx@y*`c!|C19oL~U<&1pg`*{59qt6e&k_(aJ zUb60!9nkV9nKYf#<7-Y=p>x+VaqNi%Xl@f={OA`bsc|F*MbRKQHVBuWzX5T7AHa1Z zuAm~^1w_3+v6E#s^A_wTmgeeQj1A+69QaKqKS;v7cW+37{|{7itwImeQu-jvh?BLZ z&|nRYmRjlZxgC9Y=?5Qi>1`eK$zdAprYo#_bfZ`};jz3uCYO0QE93=NuVB>}5h%$Z z=v1%7H5QLB?!F2c&#92lMVlr;Z9gM!o>MkxY|2Bs zK3zog-&e>xxHrFIauIzh% zXvAs#V&A2mB`#yEU%%AT=rW=&9~e^whBd)#rqD)MT80KiJa`N}hYi=d&|}$T*7M4S;k6GU3e;-Nk;6Sj=xf$oeT=QX>b%xhc28jXhp1R7ci-a!BQuS_GXD_1Tdc+%TUv|j!!&r8 zQ;wi(Z2}9_`f~dE2E5duQ*LnRDL_CrIIfOkp4K$rFkj`poA zw@_A1jd#=&`S516`+kUshV_@XC92Tsy&j*wdoLs<7NYUd6&QMU4FqOHLHP8CAkaUG z_9x;{EvJ$(MS5XxwUC$Zmx1iU01)MV6uUl7q`|2*G-ktN%xx;cM7?Vm$#Cg8>V=6+|kKuFgkw7T>Rr{4NMiq6C@#`O*3En3x7 z;zY6=OD9>9YM%Q=$TG(svUCtS_MsE9gpx=S8AOtjM3N<$n&*B~l1L(B>13oNOC?JZ zsr>HWAJC`mo%ea}`?{|0B@umBCoxIWbXe+n8i3UT3|mj{qQI(T&$Wz#PIl2ZLi<{4UN7cR| zOuqIjbsLm((+^QRr91@7I_?L1>1fQDkVD?VV8}lfkH>p`K;M_=SS;=M2{?xHMPl$e z{|yHJ(!2VY8|1B{nKi;t2#A>hn34c zf?ZwF_(TKxJ^TfZhozKLPQnK9FARV68y&NqA)FYI=|PjxbmU7kE3d+e*#2m+D-`l6 zJ8O0-9TeS{GL7mjSInCTDxafnx?M}q;QTw3dbhFU{Nv~zO4Ti$=>%Adu_z%?f*g^da=UEQF0N$$``0^Wf09(k>Wk_uHlzH6oA3c0; z^9rYYvJ`c1a7-*YhK{3iS@Y1l#HKpTGavot9N7*VJhrPxoot zG-5ye16#|_LiRSw=3IN|R`0bMtjk8Qs$=C4d!P`<=U9kNqa7h&bQ%jD;tLxalBkRR zH!r(RoW~(!G1Kq2+UFCUL5>euv3nLIUAx8o!zkC1vma*ZnhQ#WjoNokI(Ic%#?pm< zFy_K*kUI5XlRJFF?h=lbP5zj6`4n@E-pgg~r$Osi0o4=lbGgSCb^K1+!LQBZtB%uq zuu~4|e#nx|mZZUh8M>m!(*jf_b;gCA-(h6$_JYph3o?&~i{N32g&4Qv7I`&XQAaKXy2e&%BO_owIr6PQkIl1zOoo1Xguur9CQ>DQ8>CQ?hCFdw;pa1PoH zJjJ~nZh;qZb;Alw#3soC@Ns*Ki(j3@irtCOaxw!Jh9`iMU1X|bJ(&E#c|;^b^S!Evv?pe$9x@}m(j{pwMe z>h1=fvgu%L>CU}}nXj$@GkZtOjl=sY2OIu zZ$TN#Ywtn+#fud_*ulWrK$M~`x}U#?R~@M<%DR~aT31ltnH{Dm7?Pu7S^3(Igfi_RsK`7o*l}fj?j5l@&ldP z*<<1^6G6Jg7z<+yxz^$xo88kwSp3CIm`W^TW#1Il!}jgO<8_NMQp+Kl^09Njzrh+$ z1^gOD8Gse*(0<(qVzRUoy*gT;{GJ`yz6{5@IeSnOMZLC#3-B1_&0OXbvvkVNAF4JJ zpOHS|d!dS@?C`+YSGt1f$6H+P7R;tyv=Rd>48`^@EW{e!DX{g$ZO~bfi2nV8L9&;+ zQl6U=KSz|wDc2GHM?N#|_=@sH9M}Pg*m>k)>J2%D^N+uQ)UXHma*dfdB;^=r2aHDL z-4r)d(>>ffdKn8&azNSvGSks}(E2Q8Xwp_f0p)v=PJZILL0d8vQ--Q5XIGP7|8I;p zIRG_P4^h6Wz%^pHh{~zO+`V`z7=iqv1sJ>t51M5YdV+qeu63gP%eJNOq7yV z<}pjaqU2J%{gAp3Y)M~Po6dHvFc-G|B(|l2g=jm!2&7B*vyo@0*XAyDaebUBv)eos zjuTgP)u^rDvu6$Z97{*1>h2(IcT%SL_LDk?lhGExVWF^{xunlzit{n#SNBARzBS;m z@)NjB+mBvKTU=GSJ7ajRi1II{f+@{D z3QzKlpT7f+oyiM4EEa(qiQdnakUrZFt4Dp{)m_iRa9>j~ZSYYjx)l$Ko%5OW*9X4b z4I$5o+E&?54y#`4fFyHW8VL53?0 zQ7mjHLBHP} z%yg%_Nv$zgX3d01w`|nLkmq;>-8@aI&?v9~qGLS3bbTabRxRMcBTCrw);risrxj9i zAA(}7HTN>1&g-uEn2{5Sd0O(*yCbeS9)TXj`tkhv4oypaz(ze5DnGQKX8beI9?O&^ zZBIjGM{DM~oc5$)L!rFqc}yH=DcFkDu>XgykoixQT4%hz`hCFv`TW#zpqI&2<2zx` z*PStV0Nr2SP=~YqDYbt2XAE*Zfqm*ZaR`<|MC~(F1>NSZPX1UUOe9ak1a2R*21YOF z0Io4V*cT(}IGpYUtr~jYTa&-`zYnOS{s5PGv-r}^k>EPV7jq8m;jzi|KesUw-le2s z#<&cS-EKw~-O*6Cun|9ZOTdb0r@&)|8VXmPQn8{nJ)w%QUqT*L_rfJ?U7Tmj#)gRG?I_Y67_j!qS{&J9-FlGn4F;i_NgzjGg z%BpWP2YkntG6_sMZ7gaBUci9aR^a|h0l7UELixtSpsGzkk4pMX`CaEh)8mOTcb%n9 zJ&5U-UxI?N`%TLez-F7NXuglMj4qK`D@gCMUSUgjJ2M^}UkavSa%kMr0`(>@AXi1K z@jv8Hzx5t|QG9~Hi(|1Q{~a`KP^083?XwGAnDn0(u>Fs+hfX=DU7U?obT+iT&BY|m z4pvOF>t;5T|4_B2H>~YKyZ+=5me$8o@E+sJ zX1^c>|IxDv{> z|FshrTOY&C-9Llsw->K_K;AdINS6KlB^0Y|aO4dQs5buLR%w3?q`&;wfSo^}dcK)( zeXX%rn7cl`g7WQn)rHnZV}k<)*nOspQ6-uF-Wgf%5)E2>%$l|s|9b2HV_MASD+-p1D2-cp|@-hS2liS$5+|;ho*wntY2uoOJLE%tc1+Npg{YA`X5=KDqO-JrY^YDOK0Wa?KxT_q88rrBd=$rtecJ)ZaZ$+2aU35LhV zvzpxVH2Zu8{f(9QqC!t-a*qS;q^GiUG!w0l&u2QH&#@+tNF3sS3hURtN9yBMYlrS( zLz`P6qhm2NOq5Xt&vn*gp@Fb*zN_UOj2h2q6%Z@DK&O4MJw1%W(Ur-~@2H2x7R*!kX z3b(9+8f`NS9%CTrM*6GGIK3;+YIw+08ru)lLHLY7JZ`lU+-)6EF(?wo{6jr`Pt3*S z9=EXnSshU$l%mr6F2Kq?(Ea>nwC_C`>dwD`#yL4C?X(%zwBLii3u@WdxB6nIY|7SI z>eC&z6lLuquw{x87Pmix`nO*}&ruy9ZO1=QJU;}iZ%oFrU!QTnj4#9$`-tz}L}J5o zM<_CQiLq99FfhskR5cH{!Q4II`yijm01`{pxtsn z7O?0dlSYKAgNt9Vh11Sp=)j(!o}Y&kJ6EB-{ZHbbzk~y2MnXu~MO^XX7fhZ*x!i5X z(D&>^mQCE!cA`X#i+V!YS|0}So*=JVqkchs*xHA*TX8*RuVzKpsc zt*|m-2+Z!-f_ux40lHAW=6EB0eT589r(kjtaWvYe!R4`(F<(=QEzy@SOwUYgNw|*= zkssmHV46L~t)%<%O}^i*onU;7`icKE0@u7{s5wCF9NUA?vfBY0Ubdq5u0yQXVF5?i zoxrqyX>9(p|KP!1bMdG;0RmQqQBRQ*ze8RF)$UYT-RfX$+GK^#ceO{2rW8Z}c!-(r zH$uvVR+MNuVs6!akY{;u>)ul8v~j`0h2~hWt^{KX^h9sdLtGL3jj1QzgVDbGIjmEY z9=Hnvs*S~p2rC%!JsOhMrt>Q!j)SLX6UKgBftjrXP zB3@&n?K2E~N1F7e{;1#F0KLAoq2Gb)kaB4u*w4R@P5y^?6VJhbgJUD==odA?K) z3Y4UIqOV;9ukYN%M^Ck){;F&~fAKL4%oq=4w=D&!K?29UZj^(%j{&oCQDtc*Ze8$y zHs~>MAeKt9|Nq(VdsyTRVy7H2<#sj8Ato&bLPuI)p?3j~G5U=yH>ZK)_(ROvrwBdg zTx8l_G%r0U!u{JN!pMd*I8rYX4CanRzo$1a;lLoe4~J#uo_@>Y)h9sHTTgtacOISc z^|0txB~~3df>r~5qq@f-SQQ^d-QOv&G@==7jc;P(y7y4%JA|*7nhTB*tHFN3DX72o zf!C-ELD926_m3C=O(#_>@`MIY|I!y?#=gQKVmrZf@*T8Z(#BBlIYf@GMWxOK)ZarM z=_zUG_{VjC?&)y!axO~tT*J4~#v(qEV(#UCaQ@6|FxjCB%con4D!=ngk}?=K(mpWR zSVz!2)Tifq19+K}Z{6wzOHRHA#%9f6Jk(sAKPU%c7aXQ{W~^GMsQMvy4{v1hh4E_B>qf9j90h5@UDk4Z4%V$LM8!I1^2S;T zT60?#WJg*RF-Ytz51{p;KuE9SnECQMGmzYZHHifn&N>Kk$tTqPMCW*9Drg&QS^c?c z3^zdTW!DEsx13{}4CwxAZzaT$$9;xn7CuLPq0gR65Gjko*vA6Y3wh|#bQ9&z%w(%p z9fOU%$vgJLMD3_o$Q%#Rj=5VB{P?~MO;c}zfjyx1R)#FTPd9L{z0P!v0wA&|1$F+6 z2UqdFl7ahGilY2`(J`DpN>GeE@y zl&_5B@)fJqngx}#mtD`Rd!59o$4j#_pHXKAEX=!!&QAg zf$!uZ+HH7nUqfQ+Kd%LspT^vE^mfW-U5EOBe|byLV_Y@-0r_~1-NG+E!lefv!_@p; zxZ_nLtU2)%{Rgf@)8@NSIQIZo(VnyP_)h5lBn?bu>#=3l3FzbZ9V71C!${T*!*>MY zG%%pIv2D0HS3wG531|x(=ff?Cu|x{y^|w2 zl+QQ@8L}qgJfKiul`jG?+SK-^|*D+X

( zkN6Gj0@wxabH9U*;96ttbpz`&y&(IdVV z0#g>D?}Rh(bj%*q9fEG^KYw9bcnYfC^;GNrH5ek{PvWO@^bYSqcj0GDx3jC8a>MYf zhnLC6>p;D__AxB@uXUiD_f2*@XEC_cT}RDH>gEiN;xCQ#MeEtGK&N{cKV(C_?H6t_ zIcbX?l?f2~`)_pKLH$X3_rdP&Gs=xh#00-PpmLI+`#iPJ@Q5LLV3rhGPEBB@q4Ynr%xtEkGqVc zC!ArXcTzy+k_vH}R50i{hI%fiV^C-mOsyycOJxMWCllzlh`5g7!|LuwTza0E zx*=yUsWcEYjyL$+%D*9LR0Fd==Z9lAT!FR{4O*SA#qeRqP_bH92;RDYYjtX&SJz3z z2AvPfyBtQHIHjyHbQelhmoUxf3Pv6ZgMi`G2Wh>Lms!QZ`CwvHEWaTeT%#xEExr$B z-#&uFp+wN%bO2NI7i0C;Q{ef>Lmry_n{-%hR?;&&p6T=2O;^V`Yt;@*Q4?e-W(+=z z0Rs!LK5r8)ey1mz&OFYXHYa1s``c*UeJ?v1v?k3h$K zLqXp3UFMs9n`uXst7rM^ijJ~6>UZldo6($y4fp=Wz(!M)OB&p&mnAT{UVWA)?HIPM zt-#BMI%3QO(k5KKutxbii0s@1nHAMCMc0?EOEXhJvgHU^-G7drmj{ra(nwqsVJbZD ze-o@5CZWQofa~IR+2pobkdaPPnDG5V%r@ZJ`#T76FD zJ0%63J{n<5wkddydkvkAougjMU6^#b7;PN(fWm7UGuse>pZABu?e#gu~!K7v3IWloKBwbPrupwPvm0@8G_2gOHtA0fU?ch z3v%=cPPKFa92N?y3xzD*WB`;`XJKHL0&HH}LK*FX``kYQ0k$24NZSyy2DD<}>`UO= zKAB73>#)Matst#0ljU|}=>2XWMBZ8k_O3qQ@yZVqpNrVtp#f)4d5elZy|AYC2u96& z2gMbuvBq{AI_}8Ey`#?J6f)}%cF z+095i+0#O-w{n5fv4&#K)mZEhB@rU@$xkqcWBv4gII`PYkQL;?{IYvc=XL}eGY;dS z4i_Nvhn|pr`zc6Acti2-?&x^8h2>XOVC<#-8+}FN_$vyIbO|cRj51j&o ziN@6Z8;Gh8L0RW}{sx!nNzAULBa99{#Gd~=giZlgqE4F)izJV`b?Z2odgKf^4r|1G zq}~-+1e?jxhPwYm^Ut z$77MB_5R*mwp@*6w|Aj@>KfjXt$~GG+aTg&F!9>AgSX>jSm19=O!iBV_DKbO=IaYJ zv~x;K|A2kIJf_~#pD=5-xv(*#66I_>)csilIua9E?63`Re8f(Cl4B@LTSgw9M|QIH zqx8gvHNKFt-v+HeQuikkyIV62U5-8g>HTEzSbi3jnLV)S`4#Jo zWKZ>)A*oj?w|aFAdxYzVE<5j&AICQ9xu+R;+MH(J7fJ+eb)rnAu~i!c{Q#%;^qCF{ zgROZlK>lz)2Euh*3Rj^1ks7MZh-Hv&B))u49Akr2<`Q`apF7UQnnzZUSe6FHvtEKu z%s|=GsC={w*@LcSff#vlC^+p8#{tX(Q$+m zcAs|~=HF2wbz`zI-Oa_|(kC$cdwa27O1uBoU<|)k2PICAQ1{s;{?6K1NIafSxz?R% zkT?=mV`s66{Y}N?n=QnIY!$R!s{xJkRqorT5%;b*2Z4>pA)|Ib^+$IH#o+)j8-EyO zl>3$L9ZuPc4$R(6g2%(x!N_ZuFxw{$Vs4$pJNo9LQ>Up|#*cwj?K|r5G!Q=>mIysf zC1P26DO9h1!kzDzq1mCgXdk`{LT{QvM5kz2l0eV0Y9jje{{gAmJK#Uu2%JtMN*~u? zf&T%@NCrW`All>H-o#=ZNat@|0}rT&u_l(ouy52iQ&4~jKQ@vU8pM-2>;R8nq=zO*rrDKpKElj13?}@N+?HTN>T!En#8z8B9GsGUhi;2A}p-<{AR7LdTbzz6V z-{vFi_*jEwx8Bn|^dKk}*QgWckbi2PB?ef?q0F7KRSU}DXbH`;1Fcw1pD<7sDrEWR zD=_l#KnPxxL7d><)O_lQn;jRymtzLXPm(~BRYt79jH4fHJLibDxlZIO^_MC zodq`8V>HdmN&mj#4&O}0Bu<@l*P6khE*kJ(J1|J@i?{=!^D+~`b9;Yq^d)U8Vk1+} zJ^@L8lyQTS!&v`hCzNkE1I}p;7~HXuSH5rpZI_Phtc`_G*snW}O3lGMdRC8znhS;P z*7EAX$iDtzAt*9N%bI2&pOO0!M(!jJ)TcIC zv)aZ+f=8Pvy2Ls0<2zr$^APeJ_+_eV{QJR*vGpLOOi$FdMyTu146Ce)0lLkD=Tipb z2x1FW$JTMrPsFKvpN)}{F_7P~1Qi#{Wx;g`#CzJuo{Fn9Fp9|J=c2jqymCz}352n~s z$6>%bo|-J8i}nh;HKe_mw>b!4oB|8)SfFFoBW^#7GQIyMLmM$OJ@<_O#hnIq!h|gJ z(%c7oT{+%e@B#gz?t$$;_fQpez)fo)!NMz1aQAryRvBKw_`RfOXByzlF_gifS%2^Q z3*cKC3H9rWAoUsb2zW3z?|nLa{-Fz~h+n`R553`I_Lzx5z4x(z*uUu6Y*V{xyt(q# z9v;rVF@67JC?9Vk+@1RiQ^F}zW<$J{17E=5Q!RLe?}XUF`rvVL3i!-lf~x4_SoP*9 zrfXN?vnl$5+)a&}=T>0h`53T#zaQevvY^wAyI3FifhqLuapUxtD7$nFa%auJnA2$} zt7G5>|3cau4rAX*BU+uyLzMV8 zV6I4-&dWa(lRxP+{P~3Vh2yV7#zo4ctb5`#)G zy{0c-eVL2Wpf~C!^2i)MZXxdQlZe{F4_KG+7;}tHR`W1=SLb(PLH}8@ zgq!(bMHx_!y=&oMB=Kw{skn3`Y1nti;O-1Pp{=t-2+B`@`UkO??L~}j>U2p8xX0)$ zgP?)ExN=DjEM9yJ22h7)=7!Hu-?IWv57rU-l(vFtcp5LC-3qfK&Oq?kk;DWiPgU|` zSx{p)YzYm4shhK~VrD4Z?bQfo!|G|4C|IDF}J73y`5vejHpc~zzSR+x!b282cNwfJ zk%+~zHnffUfw~W=J1viVW(GkR(rhJ0KAi&olKD_~ARd$J)!_fIC)6Kl!3Z5=VPbd# zR7c-txlLF3vj8(ubt#KY2rv?)mCfiFu=LU8GA&(O<+FBKhgUT#%^D)HJup3F(h;!n#=K9#sOJBYZ&RVXr2R2bs8tf!2$m?v<77c*c5= z?tKQc*Y1S`r-u+`X(5_5lh4xnHH)})0LP8%Al7~|7Gk$~;O}92V#+J(-X38EW^dEM zb*~@uJamKkzB$7^Q)tFNJ)Wyc8&~|h0Q_6GV#V9!tUdqZ1mK&VgXHNnh zqh74n6&c2NGZA6QFH}9X%X-;hAilj}F5-hu=+bo-@3Zm+_*vb=ere{SHqO%RG`+vH z8ZX|d{xOt|pgkF}x8e)vdvxs}D5kBDDJ%WCVuL>VwmXO({*NIv{unOJFc6G9Zo^ck zCiLxB4brs()#=2%dsn*;!#(aZrSyo*JMIsbX8Vj6rmyEQ%Z{OO^ebqazX#?&496zL zIUd@NJbG33Xm>7?ZML9uI^RO{SfYXZx$~V*bm%=)&uD~5P^5DzCDzuh%fOWdQP3L7?VLg}whYO9l%aPfk@u%fq_ z(5vfcOsXp5s@5E4@bUvZtg{jVXE?y(=~hsEuD#%F8jJ4xE`zit84XVNgOh(-iaBC5 z_w{cl40AIUa{sI45wj1V?v%~EX!uRYd1MX6c@wdv{Z3R(ya}c@xfq+i5(~qx@MpJ- z1*e-8FnRw=Sl;mt_2^6j`J0~n^NJ{F49kV=+BcZEz793!d)US$#0CBR7@P|=m}-zh zOqKp_#o6Z|$?rE5b}D5vj{F48>`icOmxU1TYKcjS)VpQqjKMi~I0Q7I_iIO})|Nm@ z(J|swW@6s4c7lA&%}izR0kvmgE9LKk&|Z%?IiC)&KJ7RzzG5UuI&OsZ5ed+JXbPr} zBR@&Y2skp@L=4s22l9Wv$h2*0$k<&0^?#A)=k#&nug#N%r%QyS%3wZu>~*Z)9?PTr zwK#v)MYKNjn`sSSgJwxRL@d)ne*I4n67__q&6i1wABK7T$R}OqOL^b|{^j2$P&WR> zwHhOII<_7xx?2eTLjtK6;{^X#N{q{e#D+TL#}t9Lp~dhMI=E+GHu1NYzX-vkzP9W~ z>MuG2+CV3|1NWa`DjIbBjKeljkC>wwcimrw3XhMlwW$@S@0Y}A4y`(3{l(1~@!u|}@n49Zn$eJNc@-7#o@u%tWSVISD3>o&x6OSG z#XU+fye5~G8=i*Qd4__P`q$IEcf+_&?FBQ}yHGr@1xMeyzeOQD$~mBJ_t->S{jY_fZh8dHi-K|N$Vbp;cnIov2W71&{)sBzGqRdX zW1-ioe=%j%8IW3KW)=F=@ZmOHLE6*~t*gA5yTKsPaXTwh4C%y= z^lc|>op}c9yy~Dbg195SyQ9C21gF0J57VhHdBv%(sJxz^DgU&K-}Y}WEPZeRy+<5G zyDL#Vc8d&VwEu$HPt~aP`ja_ri^Zc+?ZvBSK0~L1+gNiN0kivJNn8=2i498U5EG!U z4Ma;5F(fw#o4!)kX;TlT)Q;wx<)4VL`wdK+7Gg-820Y@YKyknl%=~UBdd_e~uksjF zF8>4E?@U5l@fWtumk7Obk@&Q^EXn>9;>6XUc)0xdx=D-8SolRd!OM>qm1xuTYJ%`>IC_>>fy`uYDf%8hnOBsxSTzNfZw@v z*L#I&Pi(y18NKbW7@?Sf-8+ zx(>=x(ue%l$P)V=#j#H+(ETy>WXI?UrqXZxQQbk@n$b@18><58=}2_gaSWrk%!a~b z9WkJEDm-s6hG5TJzV+lel)qYs+Q_b1s?m4BGb5ENj(x&vOM1uj=*~j*+JUNzl-nK8 zWLLZ9QkK0p7NtE!zsq0f9q7Xn3K}td>}ghi-G+54{7&AqXjc3t7#8j|7LV%(fd8{( zV(_2f#|~Nvw!08LY5!il;XV3aDZ-^riL@K-3pFc=H@|5j6d8TO@n7|YStBfjUX(#l z?%tN=`Zr}MCq@7+K7*1R6O<1i-UuwFTl)xYk~=j+{q$h|aAXDq;H zolM1sr^Ktv(1*=V#OM5QnWeqY2Jd+ptoZdH=mRZis^$zvoqrl$zH5h!Po!}n>KkKw#}iFl)l@2ToYw~O6YJ68K6S~K=!+AM&al+QP|S)4oKhr%XG5O!_sGWG49}DY#J*- zC23P-CAvb#?S`W6!keh79EG_RKS0{0zq)khBGA3?lvOw+gX`mUF!{!HP_@p=l4NA# z#H-Dq$eD{leRUy4XF0a?3&RZWYLp)y?V?Y2R_TDRXz=3|${P=;Rof)oIj0Gg`Ulj5 zYE6Z7#TSsTIKjtIW-$F*Z!GbBPxqxVrXG8qvYyK@ukS~!93tS(bLK*)stjn$B_2%s zJ1qVmGtrKE=Hn{wfc+oAXm==)WoX*yK2MDIF7H`&u!g<5i>cp7i>MtxhdIhfDY=a~vv2Jc_Fz9s;h+JHQ7E_Fb?(cR(WEjm{I~$raXH1^a~ht$4Y3tm5%GX8wxf4#5&Q2pvg^CP(v{3AUggmI#aVX@~e| z74%FP02!%|aPrvi;JE!gcO=bNJ8wE`kzI!lqa*@Or+yOh`cz*I!K)MQV1558>h{QD zNiQg0Shk37?Lgj}ZSD~8uoGh*hei!Oj`e0h03#j{cHv9iBQ#WOvI_=vV{KHXKoG_kas9XZ|vmY>P zUn60$sxx-_K#Arn`TYDiGa;WciFt-}A6>&xA<+@sD;|LEQE#vwoXSQHJcLE#UZBge zy-;thM5j;0O^c+Avpm_Y=LYIL&<~;=Lq{I?&tcpfaFjAzKQZm`X_h%{4fxu1#!R-7 ze`h8_aTf(D^M1%;o)ti^e>PzX&`w~04?f*~0v1)a7pD$QhL+}a7*lB}tclhU6#jPc+aW=FGmC5HK^2JKfp_!L!Ie>14;Amfi<3X4$I}UYT zT0DQb@C;vC;hsFJ2dL-rD zX?AOhj=~*--(gysF7*1h1k0}9Kq<{^xl4S(^Z5jD>G=>ct;w?5Gg8{SN;dZObGV#mA@<)X;_f0tVaYNR zF+2S;n4a*4uf&Xe{?LLvE~nM?y31Hp`(Kdr>l8EicLdZcO85~EQ!(#x7&r;hph(m3 z>WMGGL&pVWRWGT3&zPrnzk#|oLs)p}Etq~dhPrG2gd+x&M;>_wBIz?e^j#5tuH6S~ zJ`!X8QY$2srDLDt1?ao%J|aEiwkzW)Z@QhQ^ht)I@2%L~<1Y0P#xuLc39PxA!`8x2 z5WIu5^2M!`_twD#pK-8em$6tbR-uhSI`y`$M%_3M$Tj~Pq$~9W$JGz`%WcHU^1R0+ zEh#AXOlRKZv(UHBMD%qEVsPmo1n3iM^27pg(uqRl&}k??^EOjaXp73LM_E{RQ_*)5 zLSG+qQ8}=Rt?EnM7xN={s;`w;o%If8FHHr{wnk#>7ReHOpM-^T^082F0XKSc83G>c z1D%{;wW|AWb)Wq≺}k67wbEsUeiZSkWC}`Dv_b*^6>1Fvr{|mX!4hhFpxGUhp2^ zkb4R8vyH^yqMtlWTLIQ?r+9e8A*S0^PHe1+%>G7KbX;U66m-r7$1xpo{*nKnm-#ZB z?h*mZ`hunlX>6N!75i?08eh@YZ$gxoF#tdV8lhA;Yp{_X=%?4S#-0|R+v{!oxp zXWW5s6LEXBp{QNyz*ReTtEE9r;6?B2=&U}F8-I!Ya5ob|4r^fn(k>=G2VKtVGfR!J z;M-#*6wQ4J^7#dPZ`(z5azUt)D3VOeK#zP z{{oo-^~_-WFRaeip)6rn{_iu&YCXL!vtD|frF;Ah@UsP-;;b+;@(UvMKKvN$4L*M? zMzi~==ydBh?y|EKJRW|)>XO%(_UQ>)KV5=NA8#{-;Rl{yO5BOvcR+t%4km1&jQm7D z7LWya)KJ9KKDFd?I7-~2L}sQm5{tV|#~Pz@g#L>mZDa#`>i8VycXV3>5 z^A0-o`HHTY3a-)o;5L`Z&oWXMLQ>L6hj3tRVFf7P7@{7&M4JMyhGIjl4mv%# zLj8%a!KH9NczvU;(d02|Us1`O1J8o)+4&IkUo7Mc2WfYGf_%Q)ShYNxb<+Uv`Q#~&tfTXh=D|S+A#mI+a0$JG882_)u70LMoJ9#_ z#;jo0UH70Onm9CLyD+KD0kqG8)z#VC(bPH%(p1q*H*_1bW~ZU(egh<>)7`YP7;M&M zLCfi0sNM57COujJqa9LEwr)SvJfJzweFEqfaIh0^W8=&)40T%w0rx_&;LQb4c}`WY zit2#pcNzS*p%lyNV&{wDF@m2(_N=1gU>#nR`qn_=P`2IItKCcdr52r~}mT z(}Qi8VJd|5p#H&H>Zfba@>^N$Md`^Zv>PM`>wu#?q3k_IUwuj%{B-Ig+KI}TKe>Xs z&+CnQLS%m{RGyioRy00V2hAtlP!X!Wy5kW{Z#5O`M}B9nVgq-W5JW7lflzgYJo9?( zF)HOgIBiMB^JT4IEy}plPk?fH5%ioL0eL=g0H#&w{`N7Zv{1G`<07O)uY!(!^+o;G zI=~xr*X?31Xsw;``4?TW(V!LQZl-K<+XE(T>Q9+?2`nr+h@EcGc}+ToJXp$}{h-b+ zuSBNU)RE@yN9rP`MBj8nOhrpURW}%}Ju(nB|GNrg)$|VRrYCrdeK6O7?jUpbV~boD z{XhJm-}x^8awHWShKP{w)dTyfikT5=`_}95B!xa3J$In~v}53t z6aYzPR-`?fi3S(PLauxWRyTw~Dee72pBX^%b=oV@9!|qY;Z$c+(kN+;9e9>JdF~L@ z)09g_^@7ZyFWo$@l6Gek1d{si*w0-@ST*u8YV}8Q?<(4FDo2Cupl=u``vc@%<3M`w z3&y@%hAURR2iI&LxH>Qss`tmT%#U?06M|yU>)b>HbPB)`fb__nycu(kdJI0a2a ze~GTxa}ninH)MdH^*hX+{Fuc!-GPt|`RMUli;D-CplN~|<#(RBDUP2Y*0V%(44#PI z*7e-$aXRFue8i!?FQ_MlYQ@PvXZ>F{;=W{p^};-+GLta>y>|Gmm{?8Qt}*GB+uUen zHc0s|=JBy3xOVgf^xF;I@=RPBLHYeNBT(C%%1j+%;WIIQ1M*X$Hb+l{iOHC=$`ID; zo}e==PWIy}^|&9*!1_MpQIjsRj4f|)`8ve;!(ZaoZj|YYX~gnt27I_BS z+j`)1Eeld^55TsEx8SNO4;O0a@6@;o-majW#N{%s>|5*F#&4r#@_wvKxJw#R7)DRB z#*L;W*k@lTZZa_yRraOmy=Eckrk1+(VIk1`w3KGbT#%iOg`T!H7=4PN^g}J!J+}o# z-+U(Rx>BwBI*NN1b;qP_3ih!}2VvyW8?a}WM9>_~z?8T7*om|S>$zWf&(a>aioRaw zL^So>)^qD2)0v{=5X{y_9>eYO1<-t# z}vY*Klrp$!gWOtmJSdKq-zoh&Ob%1>win2#Fcwmp25Z-}vE4w$K z`TPX5>$g#V=Qr*wF%st6R#O(RJvim0f>v;5@ynE;oe;>BrH-h1^otc9C4S$l_Ja0E z9@dDyAbn7t^~0ncYXzNjavN-oK4ZkU7LlCR*eX(aY!9Bb-&hR0+af!M8D#I@81 z^|(4SWA`D@j7jBnr%FNDe*!dY>Vsp=NN3&~rVifp1&40dqI*IO)5JgHe(S$N_>yu6 zP)x#f%?;S$9swNd#t~49RwI3g;y*nTG?vIEZAPiO1#NESovz}KHqx?M?kk2DB%*#oM>W`g2mXCAnS=J?JiAz-;c{KFbojoD}3IdcQ7ak~dO zU;~Ty_JD?;&BQTIW8QzQLm!JDkdR*h9+yIh(R_<`ZHss{@u3I(p(ofDw1V!VdZuXq z5gaECfKelig}kF_Ag>z2v*~W{BDp}!>3cYLdpQ=}qs&v*8oImci@gjeyI=V%%e8VT zuQ+lDsd@%_7_<|$_W-1Dml$ieASr(XEAE?)34c3blRSl+P8}%_5;SYUuAcbXlXTga(vRSgGwlDJk9=;pjvzC9i-jRuSocy5XqOL#ls4Mc z*_jI}li)1V>0#V%uswF~LHeuyWm!;qKi>00M`($UM`@_JETkjtRUW+}=Jg%s^=2@5 z^GKUwgD zdfa@lfsbtqLM`Qc(?f{0-sH&F&$1M=Ki!0u{bra~dIfY>%~MyOf5OIHX(zVb{!SUE zV=QUXT(ntY3-Z3Rc;XuRKMsoKEh)=zsmoCa{Cg?5uK2;7ORp0T$Aee3S_(;v!XaSB zEGSG|4P&iH(_Y+wPpeD>?_w4B4&BQ$nq!aUgIhNbJnudzqr`2tJUV!P$ zr?|9NJs2wV#6zL&glw~Y=+gFtjqGehL%HTW`hYWzj4k)hOCF%RY*Zp7I? zQXrxO`7e)-m!;j&;vwp32&(CcBlhVDlNXu`c8RaK-Gqxg!}$Z2=afKkw+d8lx+WWL zClPG3YoYlec};%=@bX@vcqyB-hC=OIaNSUgl^2JiZp99@%7irN;a#|cqY@T=&c-V-QIz#FVpp@u(EdUr6sPUL zI#2R_+*_hPz^sU!^_rX2B%rnW6s(dxqJ0GMzw!&YO79k%os*7nsTM-z(lt5~&?*d1M7Va@sf(nyX z=y}joP^<^Q?d`9hyTaS*A@fMVZNaKGw_YdZe~-(nL8eQXAC;t?ortAxEb$&Y8G z1-i^|F~$bSQ|s#TJGpEfczXjI)aze9CRJ<29*(!D4nNKM@)VRQ&prL3{Hdm zS!+NpS*kYuL}$l;WAT)+nHVd*3V7!`)~_4Itj2t$XZk88RjPRSknvbJsRWwFk=N2C zRc-w$lgE10vH<@ZTzcs*?0o10N~iwIs~=FNmSUe zt~&&Jw8!<`PSNLpf$6LuZr_PAjPrvrZNLnepmPtBem>&WpT9Bfa^g$R?+=Mbs-Pu# zKUjq!{*R(_4Tv#q<9MU$u5=9737rdQxcF*@O*|9Fs#BhfoM5DM@4`Dv2bL z9FnQIuO}rrq;g0SBPEe!P*Re-*ZX0=?1!~#=6UY>y8i#)&(+xmMH-jJcgVZ(R+k51 z<-#P8ogAnPJlViK{`m_Oeq)$8!w=;r@|0GeDbdBOf&uo>_`B$-SN)mBX<M8#|M?F-ejv(}A$sZy?(qFVTMX7`ESd3o_ptR{46|MqbDrwuZH}hSkAjw1tvf139fBj*nVR*F?OA? z0N4pDJu`b)urtiFfX1wT?L$U&^~3Mi1`05>i{DbqW|-rsP5^;C2m*%iQ0R(%zx?K zTXlxvUs_mpY!@!%H0kbiTOzKJ!S&HPLcn-22JPvKMfp3hpEHA%Yf8Z5@+sy$Vn0)7 zC4suoo$mF?O7rp)SbyR@X8m|a`9zxuE_L(Krzg3?76WtaS`4m3N-_N+v9O69+&<46 zWfdm8*|t2+C#<_bkVsGx=*z4)n}yO2r0s7_G|g#5jIzPCi%L;1zY`x>YVwucozZ&n zd}tW?0oxSkQEjiq%cAR<;^$2m8E7cXtR^-@iy9*jmb1pJ2b4{Ih;F0nQPF2VYn(9? zv>zown3Co!P2>l6JqK2Lx0w53k&sgzz-3-N$<`dv5#)`1F>%vJOurTdmG}CA(<}yt zqP?JP`~w20JLFxmj~hPf5M&SeiI0L0gU6;Fuza}|@4CQi3e?L7!p9HZQni1$wZ02cRVKLCaxFv7z8_aW*=Jg-$e^zVS+8eBm9ui2j6G1vM~gW-!`quHpcEX&6SFv-81Gd>F zLqUl>TKr?c`z`4Y-uHcI&$S!;lD>lW#NOa--Hp8Tt&mz3!>Pah!No6%Mmf!YL!ya& zkD`7YWDUrGfaEeLen?Ema{(Zqdzgg{4#iG=E#VU} zK^v$0K@gT;Wq255yT_tYq5=Q=o{^B46wgT#tFW1N3dvSIFm%!*O#IXi4uhiEu75NI zu~J1&)d7$%B{yZ*Jainnnt3d(!-Ge)1zD0YZ)IHw@_pXQ>^{Uy?8{K87%@4rc+6H`xt1{NK< zh|zAmm-ifryDZ2BlVSmii6Y)_tBQObiAq_CnkhyN;QUoCpj?+qyuVuz zKeH4?P(4mqT<{G?5dLsl}yUYUIPI4QL znF=K(*RfJ85;|ZX+V+rR;#yB^YqugdI|s+7(=L17S+KvaqUYvR3AXQnY!@xTS{}r9 z_n?_s_9o8qx*GB#>CYSaH(2W(K+PjHfWJ(H(KoU|`S1{?_n~{{dwnjd@&k$+$f=q6 z1LPfNn9Gv@JltKI4;s(}srUZC!pf_}TfDEtes91#@;;{BEyALf%V4Ybo9?6MVOH@q zv~s(~+43BQ>@DM@vsYu$NDe%${uiEs~>?{bS6Waf=n- z3`L)1!Ei*EdMMl>eA>%EFq@pozQ%ll>g{3Vjje$4rYW%Ug9$Gl9gl~{?Z@XDA8_ri zpHRM)vd{Ti5_94NH}wt&@0tMW_|QDK7-=H;ZPyx-RBPNwhBR%|G@$KPJ*QFQ8;vC3ugIt6D$^tLd#j? z;oSTS-_iL`TD_9kkO}M~G13Ew7np1)7Si9DVypHeNHmCNvIY^TN{6wuRac?3&VWBY zM26CFwHT6g5z5aLfXG6^^$QQBy@wdJ&!j@pV>@Ws{xACN7y_<+9Kg8629xfjp=x?} zh9pAkc1iYFVuTMJ*AnMNpN)X>fN-w&jaP^t(oyX3mQyQgS+#BxW?sDjlh0Rz+HDyN z7=D!afX!Ue#xo%MxORf=tkEFyegL&~w43F^!M~bD`XNALRc* zXAVE|U%P%l_gP=Sw!s(W1p>IqjfK|T-T0)=7Kk>q!I^SxzC7b27o}5xSh)hsDsxb7 zb^ZAt#^VvNW{x$?#&H0?hclxvcY`M7qr`%LHyyv64mji7`i0( z&vBgm(s^i`9D+%GTfy`7Lp1(gf$rnUPamkyH$}bzg;qIudl_-HEi*80XBD|`aLb7^ zAR1jr&!@m#nZJUQe_08&CwPcjt;fp;21^S2oWN11f>Hj1vS~}CO6%qtPQzA*ODS#w zMehjm1ZnW8HcOa;mpK+yBK2_wL))Ck7%-@aOW;j~id*?uefJqkZwzCpqX;dZ*^NK! zOg_3BM(CDt1*7(#!ZdOKNhV!}es^ABc(^XFF|`*LWV;a*!VuXuzalS7(=s^mnM9)ZVJ>t$znbqh<|ZA z6a6Cf1Z|%k&^R3Nu6GG|AN>zf#ebp0MhP?|enjV)rZ{u&0n{5%OD>*I+;N>{=>F{= z5PyCQYWIgsmHSvSbBihOw^$7>RvdQzVTQgF&NBD7p%~k$gvi_V5J_DH^Tuqi^?vkX+asUAE;h*Of+;!A{5MDY|@@(1m_p7O2zHKnU@mdPrR@c{3crZdNq2 z+I@~oy}px4*<2_dgb=i9IC^6YlLsA_c=i(!yMLd=>ik7kY}g0p^i4o-#X36c=CS(O zgTZ>|O-Z*EV!^i0If&dk7u&DX>?vX}GkYmvUQtSrcO`RMnkh#aVZofg8t_RkW6)*# zD=dl%LXq`tNkA5Io;JmpwL1rMsy|Ak{~qL0&p4ocvIj9j#&NbUjD<-5G2ro_ipihj z1nINR368e|S@4Hb>|HMrU*W2Pf_M3xbJ{>C)TW&mu^5Wtia|Qt8omc;^Uk9#P!4x1 z=aE^$nI#!>SLRR-Blz$(WD=*&w zjWSK}&yI!mq*d55V>3kcia|TCLGUHtSdgA};bdw9Cf#%xr@he?S|dM$m(g1Y71a^X z^SLs8*09{&ReAfm{Mw z-UyY%EqvgJa<4-yXU$?|`l?QF8~=$oM2l#D8YZ#tHUu|55OC^;TOcjn1?Bgiu@J}2 z(DdvYBx4NR4b>5%-)|t!++!3iFTp_BYVci08B)7`T%z7qm{V)S^X23ou2>D1nhklW z;RVSq^KL@e8w0e9-U%Kda^^cb5LLT&a*l1=nVsl07d5#OD;p!wu=x(i3y&!sS1dyB zhbKAX{;Sacvz+EeBWeG=3}gj%lImUzRKyFmvai8_o5`H@`6y01d=-j-yy3ekU$pTx zEcs=M;+yM0ezZGR5xNIwj@0B86_rqGL>yiFf%v}XQQV$I&+Tu=xy&iHnA!3Jv^yq4 z>H!rzOBH-k9H+m1D~2o((71ULmhFwk&}VyL=1ZEv zkJk`X7gU)4Bn`XDhzU-d-H@Kn#CTc9c6e&=LrwMhj-l7U|MX~3z1yFo-EjkF9r}*- zU%MdF{4;~@e-ejwIJ zuad+UKUv_P^|ZHDa0+vtJZt*Q_EsfWIE9gWBa&I$t>s1z)fX__1a$RZf=B*KT>JPV z%Abq_hi3y=JRb?xCh^o0B|dPTKG?_q3FRjA&u~2f0xBV7RR@C)NAU6jZJ}-r&9G;` zMAIHOP<(@)pU1ihd2#>e23_IG430wEy^Hjn3*fdGT?0>J4jgvM88Pf3^7$*w+e|&} zGWtDj>j2f~9Vq%g8^eA&dVA_}9`ny*^xaqB(eM@gBf>xx+?)BlP6XBORnS&$DnupI zz3TgH)^7m0L;ntEZC*XG{6rd{mlmJD`XFmNPcx!tTfkovVKTiZ%a#$(YC@|dIEVI^ zCjWul^c7hAdWeC;w1nVlntSg!!eoy%x#G~xoKMnqw1~Y0v0BNn;)cGU*`AGV?^-e1 zJPg{7oM5RD9+*?K4OIS~T$ij0^7`ut&*!W_17fCCtB-?hMlomgv6-nxZh@8morkaX zov7aN96KB4L+AHOcr-5o3-Y9xWO)$1=3W44-Xc)H&%!*K-#XoTLL~w_Og$)LYDFk# z*>wVfUTg!0bLpr`d(X`?Y$eaKF|mL)L5-Of9~E8&*6E8_`Hh)Sb}=66hBu<0TO`yT zbBE~8cObTD!j!^a@P0AvR%6DoWfwJsqEBVuJNF@&r5#1R-u0k5tyHRPZYfnUHY8Bi zhVt93v`pIxSGUNqOLWP|5D+?yyYk>F{c_{)9ao(QcoakvD%n8#K zst0_B5FdL`kJCr1y;Jb?>oF*|xPuN~by)c;Jwg2NKXlkShV%A#0d;36Z?`4^0#-di z_uvs2GHxg5^qKky?E|qnLzn!=2E3w=IY=k46(h#bvRshf>EH^zX*L}n34LRX1UJ)W%zIdkVsRZS$?hcASSXC!+=OEH zC#+3NgD=+X$pSWNV|!WPia``y6}uZXuJD&w4|^u{fn9q9Ma6-_Q`3ZnG_Cv~09%`(YBjjw09daX>r zK~-pZvJj;ojUe8u6*^b;1he_RTy_rSR2DYouB6?Pi!=F)hhIaNn3vqR`}@EzOf2M= zo@4d5KY;q!S2k|OHBd|+AnC377IR#iIKKvCIG(zj{8+>eG1`H)I*DM|dXe_ocfrf^ zA$pkXLhZlZ!8_Q5Q-7F6uCWwu*j?Ij>s%-Qr3c`I)exEV8G=`b!J(0tU@fvoXnKb&zZ<~em>Apg zZ&Kc5+5e$!V9lSNU~QGmLOdLxZeam5b!))lf}Jc}^aLA6lF!m|D2lG#Q`Rdt;pD4P zpi)jmyStf8HYi$REXzc*UZpVD^#+1h4@~{%2xSbFAe(g?%1W!iyWbpE*};fInFgV6 z&Z2SsZ%9Qa&hqOaw8+&%yYhp?SQOyvj%woE^n~bh#o)Oy0u0X>3BB!B5QBGX&ZDof zxbdHQkX9^aX8l6I@?EbE~Zzfk%$xh5Xbc<`rTZ}ndKavag6BM|8VreHcz$#Q=67H_)@p{}c>y zrgu~PT}*5bWd;AyzVA^FZ2Mk{I5mctuyfFQSCT}o^NTsY`Nk|7qrpb|0rc;w%~xj6 z19OMt;OuC`*Ypw#C9`S&xNi{a)L@vJ`GT|la4tvM^CGAI*$J9EqF~B4`fRk#qBHh- zaQIaU(Yu!5=$4;2kY@5-uU}*Ib0aLv2*!;oh&?^O1v-i-J2~+_ll~gXg}!`?W`@}; zRb2&Zl@;K9#Dj~!MEmYNBBnT)%r!0UF0{;B4gDw^{UtF0>&@d>^kFq7Dk7OGO(8kG zCI>QRcA;$H2R!vjPY5e0g5Wo&SWsLys4gXLgUKV5J$#s3>{HKWI5dOHe-E+A_$gF8 zd56xki2vy7jKQC~a~c{aSV*`#23$>mUzXJ6v|LYnT~oeOwHXpGF8_a49_3TxIbYFR zbm&950<(Ihf4Uk{x67eoSP1$jlOMeaadJTn<`q-^d5JaNExHU=`v;?$5pfQ_X`|Wq ztxz~16s=B2z?PcsLi)T8@D2aObmOm~`2ITZ8*>bc&fEgo$b~rBy&NO^wJ{YVF_G;{ z$k07R{;|E7c}dM(u_gvdW*6r_C>B+8kFf7X-?8Q2nO*s5h#W`kUpL~5&zKKpsiV+t z;UiH0KF9JW>+^SIMPSj(4(0prFrP*3=r^K{W*ho!R>o7b>QDEOgR_}0cM+sT3pu|9 z-(gA#Ja|e&sJ~s# ztXg)VSJZ3R)6GE0%;cybC9UtR<9u}%|~L|veavJu*S zY{l5cYKJwn$(Te1{3f5BK5*9f0__meha3K&BxZNV_08c zf-Qf(MVXxw*XpS)i2i!dc?WNY+HNJpYBCk9lIozLw3_zD-gx!k4V3C-bK)Dx&~~Uh ze2doQBbV;LFhd_)_f?-Sv@+!LW+lTe|9X_`*khD&GgzfAgpkqypToVAWSI>C;~n*= zCIF6FSDUZ4)#kl%3#PyRfznWW%8EonknCLGh|En6so2TzyI~vE~YA zwqzpbeBlhLBjQmVw}G1+*@orsuQAEXe6Xp=#c1tWl*_&h(Omq7Ka-4#dA(WW zDnA@}U?HqM)(#unM>jvU?j@-Xda;EY0Gww|u6IaWumu_8&c?X_heaBTe{U1FWj20-(eTcW)R7st+i!AJ5Jq)yVz`B@d zRPH|tEgJ&iD|Jfc5Bj3@`<1NMGRhS^DFwMNNeN*>8w!eEv#g$D&C0(ZsQoQGfAR)*ZTJjM;nbV%afHRsx&!5F53xld*Kv{GJ+N|!qMg|x zW?!-cU7pUN-uno=q9q59+7z5MvK$*HRl(T%M!b{qI#|rIz)YhKNbB1}m^oHY@VN4g zldd7RnT9!fH(7D%LpDG*-U02SIpl0Db8xt+1D^hRLhQfgC_b#_{IkN*YHB0i%+}}M zAGiv|7p>6F;wkeD&=Yd{*Q59GNX(jc6N5h-hx(E#sC;-E%yiV;3{T2j-Mq{4vw|UA zqymTCrh@qV5zg&a9KHxtpz1btrRaAiJO4&9?k2*VJ}F=})t3u;sEI`-Js~`X?%YDY zq_}T4ZggoUIFC9`XEX=UOr^8#Foxx!3Cz5z4I2N5M2FRv>HTsO%4PZB6kCS!d82V$ z&sV65io^lrw`;NVL!bK(pv-eVD*SI_XU;bNJ zGH4H^gr6WjBM&%^cA0w{apu_WLUFw;ZZ1 zU%_)>KMYgf2HSzlA^O8j+!*x;3aOWv9=H+gb0|mtZ)lEKagiJ7xd!b%Pld`^O5#Fm zb5iF@^j?yUtExqO`umGm*G?`L+K1MA9cCWQQ&9Wc53ui0p_|V|P;a=xt?H!nRa6An zz0eUh=3K+RGzaoEPKLEZpQA!!44%ID42xdr@s*U<7k^Z93J+7RX6$(w7x)C@&mMxw zcdkRxcDF>rO>XYg^#gR}|Vgx2iq=wuf{P6I&on{8jL!WEvV`3e!2K*33^(9^ex_2<=iby~ zS-?s2-YBC79Yn{-K&~|+4ZZ$&0Rvv?6K_w+5-(SAYAq8XabyhJ{&x;mN8N@YSM(@X z83(b0vv4N4dy2Lyz~jG_5`e8#hk>MYAAGXu#s?k!j^&?UqIzs5XZ7eb zYyGA`$!_8vf4xD^l-V$@d_NYrP!1^a5W8umA=ssS;a3?ep?^~o2xX{(>3{Eefs%#3>IbqXk+ zz;o7mgP1Jrv$A!fso+pk!A`II2-Z`lGuvZ&pmx5?X-nh4duuAEx$gkVfj%S573{+k zpmjakwTf|a$_<|ef z^8nheynv9~!_atkU-&tjauy9@=$u9zzm^DM1#6L8w$lPie02o|h21_Z={S;>Ep zUT%+OeJ`=?s}u3gWnBRSX+G4C*jRHuN%}Pq>t{tC)V{w1_Sa8>$la3J zLHc}_Dih}tcOr7oD-`FxL1X4ocpIcZkoXs>E0UP(%}f@zF%5?H`2;P8pM&bqRB*DVb5^e)%5XFCL4|M` zs;@ZN@hI&EtFZUu?=Wsx7;22vL3f+s@Z_qPUsP5Nw#x=#9_4i8hr%Rk{Xfw|GKP8^ z4e)r(15E7K%*hWNQhJ9Thdmv}LiCke=stNS`9<`Ytn>(5Fw8*kD*a84wBeG}W&ODx z=Qa5&W7AME@TpS!UNV%29pLPba#(p~GHCSf!^Q;}3-vR@Vd}JNm`ps-z=}H1IBOzt zKlF@pR6Y2C?bKCF(ozO3TM75yzH#?);h%*sBFU+>l6qE0dYWz}J<@jM9XZ*Q6W_y>%bOLLp~ z$C+aDaiv23H}mOw596+tlly8cD2606-{%_eEl`8^?FvP)=~@U3Ze`X#13>EM$!NA9 zX*cS^OisaNEIWWYdo}p1n*OLecQ1sVXn+_07Si`-IwXC*3k4^u*)FH6urwh6$}iP% z;-wPwjtC*o*g>|uQ!Hehd5y#0=<~jF%2~nUj}Z398t~9qME(>nv{ckU=$cR*eYONeCx<7{P4#m*5ut8OHq)4b@Bbp=y0kE*SQ)#_1*KdyWHz z+g@eJx?1e4?t=RI_moX|jNW$s%wyA8merUI(woVWl^d?0deL6)@UnC$2yEh9;?%VJ zHWi9iC1ImWccD5*38`Thxs8h+Ana?#q1}v5^f?^9u?LoJF9rM0 z6TwjV6+>hPM|)Uut*3-OUTHZp?v|g?GT^Tt1f`0H_!qCiz`r zDEN)pLi62aIOpF;Xq$Bw&A)X(aHtluKA)znv|50!mc7aO@DJ2nJ%caSJcf>xPuQ_C z5B$h+8C*bjoHHLN6BZA4R)2vZCV)n=Et9$@OCn3JaRDt7H2ra%n00gUk=`-tYb^ow zl^N)*cb>BvrZ>1_tPow{f)W<-PdB`?5d44+CNA3j^Nn4-L>rvtjdI(vH8=4vjlBvE|H6kOdb&NzpzWPP>`% z5$CxV3+P!(t0m20w7Yk!;$*s%0sgam{KwUXeB;*LpxTBF=eo_d7kLe38JAN_MfDE+JGUC^^ zXJOgSy=ZIUiDnRF8iubsiX6%iQp_h~Q>x(qGH1+1Ji8giQJQ4-pSPY;r};{Hb57+MP3rzEGqvvPp z31a#m&|&JYA=v3gcY#HmQlUOfd$(uI;?!I$&x}LE9uHw&XjegzfTYSgrR8WLe?xVgT(M7-jlmc0m1$DjZ&@$rm^0a4vH|*P_c%v)Bbuc(FM@f!He*9&1KLU>(fVUE=j$H9t<9qB{l96% z<(i1Y-e~f>UQy@dQy!%B`wT5lPEoGtIy)4q&j*E8fmQ!+oM;0&O5duW_*OG&MubBt zaU$>b+7I(u>u}@WWgzVqF~Lirgfa_rasXI>@8&&hSF#+ee_d6kZ4C!Iw=0xEbOz}t ze~Hh_N+>BKw=Q+iBLgxaQpbXLn=e8A-5f&hY~k$7-O+kQl|(K*!(BNn;-fcFuJe#R zNVjw;W&Nf@9qnFZ{eMumn0S#m6bHW|N7~GRpuJ)x$Qs;WcZ-4W{^vbd6{aEhu06`h zi%pgB>BOLSlAvEr85%#Ld4777QhsnD+IAlb#lt^yDz+QM-@6HR3q|;7`zcT@{l?6G zt>@%c-WZ&MW*1n2j04lGZmt<{j!q8b8OVlzhsY#&Yf3S3=Ne zn!i+C0>2BB$w_R?f)Y-kN-1V8{~#0bb`W}!_zYz)K<+H!EX`sd=#CTd>gf4!st>En z*5J)#gPF`gHNpDv49?qb1IqG^uysWPy6>BbJ}qf5gm^8PuYJH-YDFx86G|x7!J?V^ znCjTT)=>{q=a`6J@SF0(!{fMdV{~~tt0FKiHb?vMS5R6r5aZ)cVcvZWKK@P{TF?H3 zeoGDcoasIjRt3GpOs@~z%sg%4`b<$;_g7IjK!SKej*Y?8Yx>wznbWo!x4#QP`$0V@ zs~$uAsywio+l7&N+E}X45|*0SgWnuhRV6x#K(AJo8&nD7=M-`aZ#7T{V8A`rUZ(=%k^-Ezz5%4|FUY&R z0i>Sh7^oWv^6`C%JEZ`ph1$GzS&lM(GvzGAWz5G;gSVU2Motf!KQ=^T>ZJ-Ub=nUW zeQg4$um6jg`#yu~lVxD_WEzM+?FF;P8KC&bOeuePRN|exg3B@5<*3#OW{uaJA?ZsN zY838;f;rDP^Cici_01E|rhBBgN?*vh`yR_Wo9Q!kOX&;7VDFudr;UmG*!TpC_oXvM zs6Hk>qSauJugZ{aPGDMh11uO1VZ$}~mh(C2PO|}+I~=@ThM|SX7;@xGQI#AF-jCWi zvtLJ8foC4~Q?4U4MRn)%*CfF}y*yZy(gIQAKEjg0d!cwy3Rgoj{nOd$kVcG9>Gz7< zu;fd)>4Yh-*pw=9*<8hBF{5b4mx$@A$0+^&7VP2aN{GMe`VJDbI!_ z%Ode5=nM7nv0zo!h((Pei1mC4<;62m^x(JhZon0=`|%7!70Wp9MP9Ir?z2lN=M>gM z0nRCBu*H$~-{W3jN$_Qi)$AeUC+G@(CWD}H!vfeo@-k@G|3jVkrQr4U6R0L$X6m7z znU&jduFK&(P91rdDr=h|7_?vs zJdareL3?h1-UnUYyLcM&(A)uw`hErR6Ka|jW?+8QOMP>f6NG4v|1k%?TD00aJVYdMv z+_xWWjHp9#w^lCp=npVAHWE(mGZB`M&)P2G5OD^nBOfJWS?TLhb~+B1+FT`Wp&1nP z$VUV6#M{n3hK)`VP-p&)F2;eVH9{;H_tJt*|7i*01yQ*6`8$X_dYV(5-p90uBw;Gr&E`DYdMrUM@1I3+_6s2L2A?=j1 zvp{>>YRr5Ph^Zf6FyEjQ^nPT=B@S!llpWnuzxqO=Em0RNd& z!N)%gpBHR}#)b45cw5RT$sgdIs*CBPXMy~$(fEA$PShDf3@G0o5I2PKr=L`?w4WQK z#?&*9yeBlDjsojJp|n3f!xg$8K}*>++&=s{XuJ~RqapDi7w_g&b2czP`76x&DZlC*2FXhT@c7S^KUXbJ# zp?qvBOP{C$+c#sNU27S1UATpj(ZjG}Rxql2|Ape>8{FjSr8K{-hDBdh_~JtYhBY07 zq9K|PkTDRA`{x6+U4zIIyC9%RkGHxI!-+;!P3U|-3S9b6hdHm)(CT~y%NkGl?L1Ft z)H0{8#4gTy`5YGM?vI6er_eI{7~14sgXp>pltul+KN#uv`kNU6|K$pon|bAY}p3t zLyJ&-o_NW{k6GrJ=b-+(l#|_VU~4tnq31aRp*XA$6V3Y023BQ&=&(dGf|!4*506<# z#6t|w8-jBtMxs%74a)k4aj9>IaV34|&ss;bx)rfZX4=Iyep2Fqus^AHbSgLQ)(CJP z9Rq<$ff&$tE{N~VLQltYcrs9zAF9~^{mA*#W{|*&%*Layekj-ZI|R&TbTZ=|`^aZG z4s@FmK)LA#3=@p_NZO(K-#CNrUl)K#%Zd5B45KWa4*JZi#i?756W{&=&ilF>Jzllr zh+TR@v^n)&y>3(Yc)8>-%}Aytbs)N5Mmw7dCV3Qx^Jw>NC7Odtiw~f6f~Mf&a1X1^ z<=|jsLZ0zys1EI99a*}(tyV0g*LZ`6-Bp%xw;CFrcjr?dO-9-909HAr6iCd0Zmx#> zQ*%w>9c3LA877>Dkp^)=M2;cHhjW#WhJs~7K1SMHWlKY*;;L2eP(3t_$-BrS@-Oud ztH%fYsc)G0#S#lmZs7p?Sg0Y#-Rbmcu%=x@(4B3>r|U1n@`$rsn~M0pQ_Uce z8%|ljG1N&M%y}<1j-*o%=(Nz z-LB$$F~g7nE}V5plcf2Cj!;>11v`^Af!W&EP;ovFTr>31E!UKfTocRu^WR{ldID~z zo&B!a&uDh|00vANfQ`j{pv|#_bF-xU&C%C6?jA!R^U{AfnLB~ID&*KO_#GJbq5N{i zeW<%f8G#cQ*z8mt-fra$oV=QLk#x7*?tBPRhBRT*!Bh;7KM8do^!UD;==r~6Hf82~ zB<|Bjqi$*}>UbLPjrn?b$y1Zxel-!o?;XO*55<^ylY`(w1LDon`So}v)0O3+vxfAHeR%|2ZER|90^{f1KNBFJh!Nxrum3+ZcS zgUioLpqg|>VwiRaV;zeyYg{Mf+lg_#DeYozC4hX|MJ}hl8Qy)-5{hrxg6r{1=(O=T zX3?4PF7fZ<%;jjBPx+Ba&p4}Jn&@`2hfXT0lg@SeGYxl^7udg=&F*hE}p@5?Apqs~xs1!rq#ireY@X#G!=()FPTDr294 zwx}m@K>uT&*@k@B%f)ziLMeLfrYyrDWBzOFZJ0TBKX&bS0s)szg$;Vd0*y(;!Z`&v z?sOY2lAZwn`>(L+95Jn@-^4S~M!dRj8koo4LQ(t@)No0bC>*aqB1>YAd;dg-wiC=V z-wbl*)hXTIEk~ntK!K z-*FK0XCb*fQ@Ox5uTXBqD>FBCvbN!;IR95a(eCzJ?(z4>sKHaOp(zBVYra6GVFGS> z^cq+7{EYHm@k)>A7*t!G!i~}G&>KvI!Hy3xF@riA%9*HL+YPe=j$q24KVkBTI~aWO z1k)Zd6=X4^C8_66Vpq?b(A4l81|IFA{_GMQS9b+H4{77oH6LIlISpce&^w_E&_BHg z7MUCJL6$qwx5qx>(Y=I@g+Jk5WD^cJqYH|oIqXWt1$_MJHSGgFDvRG&v$iK*;IMZL zv)+9J&RTZk<#rvc;GqQkf(oHdhk8t+3hwlotypZ7$W@qqqaBemJW4o#7VZT&jNHA7 z-^9f@t1mci*}*+3iK4!D9vAxg9n|y>1G}U9aX0b0tZ9BDpKB*2+UzYHQad;%9Db;IBxsho6ei=m z%>VHOL#-B6>`+Q%Zx1r#A7jA0^$Wz(yxT5Xie*-lF^J!R3Cdh%0pXSgVid^4Si2mK4yuXeo5MWrFCBWn9SD9jtXXxj)}8$H{*jrvEp}5YF(B z+)L3AmaNf4`HjI6`N%>D9QB<^p1dQU=X5aK*_{t;{!I6xaj@i`fv{_F8!AMnC2l3R z(fZpRW%xF2L9O#QnuKW!HSdgsISbY3AfX*EvBMJIr?K(^H+X;RKFka6#w+C+kYlhA zjH`&F*N5izNtI0TvM1|2(gq$yA7Ia74MD>^7(4p>z~G&_m}S^a$e)|emYy6&?wY&g z^lb*a`g~@5ZYYLP&f0zHI!Fw$fy$y0pjcFO)G*MPHa;E!PCA1b>iLE(BTME6jK6Boviz;_`av^7VfG zLHhIx8~yw;>^feK@powUJn;}(O)}u+hu=+*5N~R9OD5P4G~laEC~JFWj3jGv0ID9Z z;EF%~4URVJ@yhf>^mrY|71gJJ_dQpXJ4uz^f0bjge+q^6=^f?x6Wkh-$ba?_yBr^2 zs<;v>Bqb zjvL;f`ra*#gxBsd`6{=bX}s&rDfbUEkZTtIS9FozufRM(;J~w>Jl(rmO^i=~0M% zC?I|t2F@G)g3PUbpev*kM}C_R&fPtr$|w;R%a-86cg8|De-WSS`d+4~R$}{x3h-{A z&IR%RT(iAV(M6fUDaM>)_!hQ?D~I_5Z-Zvf8t~20LA@u_(4l-IiY6O7gk2g30sl6D z^9NHQ#abkYO|PkohrEHPuz09(55ro^f59y`0qTd`;R1`!gE`GFGOTt%tX~dVS<6Ab zUYGlHR>XI`$%5P+>m8dv`*Hf?iE~|U$~Ub`2XpBQPWJa%_+duBE9noZvpR-(nh)gq-n=Y`+=H!AtbsKHJ z-T=4tg;;+i6&m~fgHikMVClOE%6;d+>}L|8zSIx=Hw}iaZPUPI;t%5F)APPd%SLXQ zk5=x}(0QQ~oIHB~vc#{U$>j*nTYM7#+ClHES!Cw*?hWN1b^W4=v(%2aP%CnW&O1#Y(fNUk?P)i&L(Ba89Rl&3 zrx^9$T}(OM2nEY%PbB2y+0c5-_K@PMLb@;6v_SUlt(ZCCB&Y91exc*k4{&N>RySXO zbIrfhEAj^gaZKRezwqlLQz3oed2|k1j)!aL&a7e}ijJ3co+ox(;$~*mZ7^K7?1j0} zy1d7uyHIh<5VaovIfnhLVWGVbq5D@O!Q`G23ePdH8GRqqCftR9Ia*8^(T4HFcHhU@y;(_qdUA0%+hgIV za~KzB$nW`!SOl4y)Z&xA*#0m59!Vavx+yf1`H~5dzdnN>p89-^Wgk57FL9jn&GA}p zPa!hl2&npZL!)IXaO+or{*mpyNdXS+yl%!+zb305>bRbu%)dIoSeK+@%=wY zkM0htrM+e8)t|w3+<8v#z6Nhs>+$HM0r88KXk%RjR(Ef+=Iiv{?rq1pYg^Hhx*;Ad zRd8rPHzAHXz`yS9#d|ex1ZnRpEO3<^BlQ{}zqbIrJ4^(<{a*N{{xBx(p>NN~^QX`t9 zYcYy4cd2c*G=sQrOP-$UC+hjs;jpLre9N2?xY?@|eS=;@P!Xp^${tWegm{Im54gF>& zLi2@H=#~Bg7R2-vHuCp~xo5^Jj~a1yZ#^Ms>lQSOi2=92xn$uOxXXcGs)alnmUOfTp%!@!u zck`PYoBf!Rh`ZynRuy@}d*w{wwTy-JpE!D%orzgO^{xIzGdJhLe#lp{*`h20v8|rju zC)pSR$7*`Rf6vv-uz*mNm z(=IaM3MyWJ!TeX0dnw{v!%w33-XGu=-y0;Qe`2ti5?1uo;p@H66I<~DjyKp1+A^ig z>C8B^N!*DReP$x6zT%po1e6{ap!W8=hGm=#Z+@$zJ>^=In|U}i`dmP5j0UR3}8FNy~jA;jOcNf@h*DTT6G1`qJ-Q^S1G*cVQ2B zjeCq!T8)IFiS+(Hco;3(7f@CroOP`W$C2CY;L?VDFnMD*SFz|KR1sscUO$7Y{ym*C z4yMe+vLzKX;O zkGbGaXMd|_1ED%4TUS|t@}FELbtsahUm+)>ikL>d=7Os63WL9Lh&{9)re0~H zISF+{QYx`Txe&AdEr-QI2IhC&fRae+fJgKMJ?bmfTkmG}!-1>N|C7XyJHU5|1Ym9u zy6fBmy~7h=b)$qInbQyEueyM?ni|&8v6^Q1n?cc?BZkQ&=o)W`kf4R?oN$nAjYqxw zzd`le9Z-7aW0&!4RJBZow6I(lKJ^>uEfhh^0NNq5G`2?6i_aa^>QFFl5$)|3!b0*D zM*b0jl4BQ{;V|L>EpF$wZygSH(rhs4eFT)hw6gLo*rL zh}g!%ILS})MUB3UA|2xJoxH*+)XC6roHoY84>E^T1?DVyhnDMpg3qf&7WZX4`g!D| zo%=X+PkRA6#tATVZx6wudK!dw4nfn_R*-wfqRRLp^Q27L_Jluh*k8BM+|rkgRhjVD z%Zvn^FQ_s_qrPKx$9rcrrwU`%%|2f>9UV%qpvHW ze5VF3tv(JOXd)~=Fdj9NBbbrFOo$o&0)0L`WFgVvm>F4(qJ=|cw!|z_?9g%;L{5hr zO0}f#A@aSG%Om73kQ&~{i#4V~$qW_77+u8r=?}Rw*E1NmL07Q;cM)3d&cfvZu~_i# z4rI<%GxyCOP`gIJXssTv-7}o&y*r4So+Fs*_&_i^{1}X~2BZBfF-~mTgUzd(QD@yw zP!>OPlv_$2#bcXg4rwbm?V)ab^VaWNMQ1E115e33x5*$=Hw5GF7DD0xa-H6-;A;Aw z0o%?x*19bc?QFM$xs4wi^iD)vl}Il0xCMyUUxM5T8hO6I-V+Yc3 zp-NUEEJ5Gfn^CdePkm_t%^vrp;K;=eXt&B1+8m!^(lP^HZ15El$w_7LX%co#p8#<) zzGCEQnjxspL;Z|f;97kjiv1Yednpk@J{+Z-O_sXZvXRSbe+2;vhQgp8CsDaUM&AXR zZ_F>`ss^5h7w-&sn@D{=+{YT0-}#JXeKJ`5mv}7Pnv9+)&D2Fp;|j7bpi6rWxJ@T- zeB)vIH|i(T9;KY6c{Fy#?jg4D1)S_O487ZOA>D@N85ZOwewoWz7!fbKf16r5g)!^1 z&Jf#8#0Lg5&b*K^2Ftp*vc^e}?p=xj-|pb`;DNwCn+V#p7SR5Ysm@${lBM-H z2(FgfKvSB|B9?yQN}`^EOFi{r$E?RtwHZ!4^8lK5kqhr%joQf45H`+y1-LLBJvS`k z%3j~&QZF*r^n5oxCj(j2?eCZvO+5s!WE7q6jW>T@CMMAcct$?u=9FL-wf_}%ly`9E zW+Ig6hjXqg#N>=7$DES>d_#r>cJpA!a#P;u}z{=#6SX3Q~nWt@_(0w4~xc<#cZ6ub&=x##lfoSOX zz5`8XOF-%!eOTS<1(1@7GVo9^i>c#=)|Mn>qPuva4?O$Z|zf^FLdIGfzoWOb+ zP+l+rmf4sHwIw&fW6V{w*`_Z94Qt}MlN)f*@PXL>elOl*ID)kNI}7~xG0uOUkC884 zf%E4jpz=E)^PM{bg?1ypwdxsYUf)4kFM2;vH_~Ux3~gYOvapRI&|%lg`48*{KHFd5=7R=&m$e>hFA&SH=X11Q zo(Og6;Slq!9`;Hy;B2Xp5W92&=F=G{RCE{;@2FW_*#(&VMZ_m}vjzJ%!y#>m9&dYB zm+#s#0F}Du9GmYgV`aL0o?OOU%_U znH(>v&^*^06Nf~z6c>pQDSC%tPH&j~wf&&l+>=RvWT<5kcZn;y2ds!;>R%p2;uU(I zwspf)$~$_GBc4g`P59Ryk${@H=r*H=aMHpA%05J5D6tbV{-HDY(>I(|@J|-kv<+?K z|DiNGm%ZF;D0qB+iBcauUOecbqe#CS_aM2Okld&Ri?x%-I^6VOn$yK-dNmXMX*Yl4cVc+=yU+P%^}?C+wFu6h7~$s$nr+!=sQ;2On$_fT zI0}uU4r13_5iv4ufq19`*o27$=`-?>Xj8eewI4C!NeHH^Zv$>=0*A>44Wqcqphxl>GB)P3bxuy-ar>g>+%nz#=G z!c2LMDwjnyJOH^%o_a1lPl<;)*j3O4c4q$&ANx2L9=nUU*XOz3AIk|Kv=FSWA7$C= zreb1p3VYMuOMpBR@PB2D<5jsR&ECku+hnLH{fJFjU1;>Tp0MFWB5a_{YPfnDdVDzn z+4Q~Jx3U?`Z8u@X92K;dy<#$0%59>BiPIvfxxSoBQldjR{#jn3(jz*t%nmG$y>;Hl9y>n5%-+`T8 zN`0ME=U9|Xh0PvIaPhHHtXA)XMT-sjvK{AfA&F11;=33-b;--#9)&HHl<~thOkbJ^+U9L)>Dz}g`M}qlvUxan zF0Z1fy9kCQ8uG&& zAL5LGQM7BOZ%gJ#P8l>nmVG@5s^1h7|0O}Dcy@K1&%Iq(-Sq)8lfH50@v}L}Q+H^# zr2D(6KSXQd;l%`U{U{D{O?xZQ=zu#!#qR|1eGM14v7NFf%fXN})8vGa@3o^4VP%Pb^ z>uHq4sU)y<}UETnD>#smSn6cos`!U!S z-eUg6#JBCPW{UQsTym2yWOaN32Xc2L+I#_<7u4(dVk%@`>IRkPB)m%2ozbq0&(&AR zl*Hc4d1@%+kIaU;GiR|X;R6(HRHAm@e=PaReT*-px!A6qYSlLZtCC(|5%qdX$Q7w^ zsKI;AVqtcIF>jx!kLCCf#KRvs4kHGLMmrEgEp!CccQGe<)ynm~^cUC;r?V9+hpE56 zK&{q@<*)0;Pt@(sk320xWL6OKH4RpjnDU}@5odYuBDBqTii^V!qT<>DnbC3sG~a#z zwBv@UJI-u@z;UNp{mmAzPOGBs*KzE7;}du;H{%LVyTT9K7}!*$Cn&RX$Sc=?(!?)Z zu4ju(RjE?j9V2$_qZ8o&w^+~|y9$X8y&>>WHuHU?hZQ}q5lf>G6H|K<%jiBA@!=5{ zvEnE71b$QZKDifmeZNe64<+6#`;G;p-oV;P`hpABf<9K~Sl5k8xV4$Mf6g~C@b&|4 zcy$ws=BI(@BWEbIQDFT3W*9VTKS+i~qRWk!^#9VxEkC>)tnZ9Mtwze76@KE$#$?E~ z-VXMa7Laz1$Dm~`b5nIt!*DipWmOe+5`^5zScn3rM{s2+@>z=5e>op z$dmBt9IiZ|1F3Tkqssy6+UXEGbMa5dx?#t0$-~d+q_-MJyXx@a3tvM}&^K<1T~8rA zZzuSie9BE~))S`w)&>*DtD&mL9&j6oEIpon9#x()H}dOLV~)NS2`Z-0t;BUL5_e#u}_Y4z~&csz8@%)?st4%XQl@~&xRAdj$QIj(Oo z_e8KvY%mTr#_?R&%t@%U67o_;UxCTtCX~be9mJzPux4z)uuqNHdWeDd0orBweT7pM zhP=OK3dDNo5id6pt!5UYB6lpOI5Lkt{$jw#eEf)#!!z-Edjh8_w4oetJ+s0+sA`;# zn((V^@tJG*%Wx6jalVb-C+YU84>_FMkeBS_`Y3e0ei2qWo`cMRFTm>WQP?~?9ISWE z1MQr@(X!u8>~u^7MPMMOJX?V|sZY>d^Z@cF9>DEuy77zeo5FVIOW-ud3Ed07XQq5VS&h*x<+H!;>dd#JH`rK z$KJ;3^)2WcUQSHu5v()69o=`oh49!)@VrO9!z?c@BPbROP0HcU-7c)yw*VT)en!>5 zyVa6m`yk@{Cy@T6y=sc3I=*l>syH_$Pf)6LmKI`W+(k?sHkCPNn(%QwK4A67JD7E( z8v0tlM5C*8j%?w%eCud%Zkdced(&9O`u=G7CK-!{90P^jVV3i&F~8wp524~sCR&TH z!pdUmzcf50SJ^}s@$M0Kd<1!WydofT_Zl2f^$=}8-(}IpXRxM#dc}H9Ofl7jRp&iI z=`Yi`zI-dTKh48Ie+`1v6de|3F_*Z?pNLUSZs-XCFuw8`c#WBa&fDfez7F*dmMtYm z8AFw?ju8Bl_Hnz~nE8%bq5BBii?djJ?sW`%*~+4K{0D0qm8hJ4m$mL% z1)54%HvV!XDu+FhsnQ~8kMoiXbkx9h?*Qnl(}tcU7tncyF{tf{JL7eo)yMpYcGs?9 z$k{p!n^24P|7EcaOM3~4s$-xrv1j609n7}^;nJ{VNb4>VtV$8h`>o@&HrJSWaXyzS z{>XyYhGOM{_h9k=Y<1rdE-v&V2AVxU&#hB1xt9Vo_j#}|@5N_R#~|uc7(~2{}Q{F$-40HZD z0KRUEK_rTq5I)xgLpJslT<+e5W~=G2A!0wBZ=10B{X@KevX{{19!w5^!Cd%pEe8vG@MtsJa*jI?j#8xO?_8NbhN%-rVV^HtRS}dk~;>nH! zh&`%7`iHTg_rnhA2~4S~(@|S&EXTA`UB3R_1+JCm*-1W~*z1OgU~k-$wfsWgm!_VA zmKfXKe&p`hy`I6PpHTNFx$sJ!pqEP}LhB`n5h>yD@?x-e9l|M0f*^IDFI#iTnEF$7 z6Vi4b$H=X8X6K2&S*ztzO(}o#L?3c9gSkOzVzfVc1XE8;$7_XRJ}avY3sY$ywe<}+ z4_O7`4L(d{Xw7+G723znLix8a&STP4K4~eFY;8a%+gL_^;x7#i-mF9Bov>URLPlVLxk*r`(GnaS?seg7Kht=kQ{+)B= z-TtWlcY80P-nEYTyv;%L^~S=a50q8nOVN&t!^XYwSh>3swKujf+dwt`x~jV{Sz-*^ zA8$gH(Osg;vEH~@{f7MY`R_=DxHl34&_3vap zL19cg%}gdg0#}2SU7VcwXqpHy7n2=>Wo5lE{%Ch0=Rr3*qi+ZC*L<0L<2Kg1;}6iT z`~CkAC-9#o0neH1S<>*cII?aoHb0~OzIg-Z6<+9$?mhOv38=2%yIG5zs0pgNTvd+~4 z`izW5IAKQYf^#rz^aGUlx)0{W{Bc{1+<4ns>Vna}T`otd%nAZ2!>H^@Ov=;O)m8oL zQL#FXlU{$vl=QBMxtoL8HOnDA#||gPwBusRcDseY;zEC(;i#D?6y#rq)R5;Ga7~Gc zH>$ac_fO!Bla3InyofWVB|=MZ6|Ql4g4*=?%=7Ii5{FYR`?oLLkggtl{Eusx`(`WW zJNOGKtBA#P_aOP0qh$|I+yd>lKhVP9H`H{lVv41@;L+6p=0%<4Gx3MALr$O=oD7<2 zi!g!(fm3)3RtyOz-2Tfib@NgUPAnSZt z`^&jBJIIkm9P~!pXbJcRJ%^Nlj|f?Bao3^~)CtLh^7>5Z>bZ-0CL_Sjx(A=WCKgkV z>vEni3pj`3pHQ>M3avvz&}y{6anvVpdH){DMkT<+2lpVt&lRJJKf?7sX;@4dkeSr; zSN-$Yp+X*uxz<5kNAFLN6#5PIOuIp%s)j4ixQ)`h$lT(g$6#+ec{F$W!5iWN%h%7~ zijyzkqG4je9KGEQN}7+oQK_8hcjd03XR3L@o2)bZx?ay#%&c88&>0` z(9^ias~jrGNwWA)0ryF2iSa+3dXt~vOuLA0ygDEHp4R7OUo*)k7YY;U`;p_7gVMRz z(aLriw1j_x@D){HzfTtj`@F(2!?aMJcMRp*wBR!BAvTvu*}SKRG4i1nEykUK8F5BJ z6*~sbM+F!#uLnC7WqlCI=sh~ZYBCCIW ziq(#BMrSW)48DH>y2f$fb5)Ob+fa(0a|UpGuSdd9Vn{}u3Wvb)YOcwx1q`a6V`EJh z6tH%b52=v(kL?8KSFgc`IuDAYVy?R8J;c5m3Uh2tg(R&CQ}uu4lwU?OMN=YI*cJ!| z7C+E)^k-JDR-|z;=lR5(>zsZaOAdWT^Y1~J8@NFxcDsucdsRc! zxv@Ctvo-j{b`!=n=nI-vt(;{4Gd%vQM0h=)cuHf&f(`NA;TekAvpWD3zcv?0S{cNGDK_@h_0KALcAfccN?CEsR&D zplVDJb6S3d=?u-s>eqUL(H~+kce+8IB;rv$^1-T&7oj{2Wpo6hHgQtF#WVS z_fAl_dQ*h8!^t_{KQp&%s5i=seK?1tT9EHQ4^{JyVZdDtn(I1&(c2-| z^qF|T@-7T$VQBv$7Hs~}7mB>g=^j)8J~LgxBwd%kaKM;{odsafw*lOLi$aGvUaYy^ zA5^DpIc{xtK5BCyw4I27+H++P;jdxaTYK`^=H%van*!QCCamL`DRoWHpi}2Oh`Klh z6&AX(u#pp4PA6q6|4KrU-x*Lu2`r^I{hiMxfCKT`DkkSa+np-dRHP#`Og7<1S`)h| zyHzIbidFmODCq}Z)K&apgKPt1CHwQQ6s+KqI7*;&-fZ>Ezfguy&G7WuOKh{ zN|3C&3$im;aq|%|Z!qc|79TnUhlgLof?2hodHOfGRrPt*mm*e{N=#aRUnW2KQ&#%> z9+X-fp{%YsSO|+?%uvLG+l+bfj;q|12I?z|jro}0&R{r`g67Y~toG_Uh;a#rs;(rE z9KVOMA=G!b<3LkAmXluI2nx<6ugr>EN~^nMiZN%&?Gg_PQ76`Jcm!4%;jCUZ564F% zE}6Ow6?KapqZF4x+tx#9r2kHo*pEfM(c=SO-)D`@?r?R28E^j19-Us@0E-Ya@Eh2Q znN^!mF?A$t=%d5y-HL(woqn_@2|)8u2c~*-lk@s_4Yp6IL|=RA(%~ev0Os zGPvmLQP52Dx6(nTd~xG$sNa7U*9QG0o=qMXc|8~Z2s05>Ph~P`@K4OAJF{Qo6*O9% z3Ff|Ap(Zs68t%PBRe&@vV-7j6x=#o1gboyGOnBRs!NkL%`~u#=GK1eBb<;dfbgesP z_TLFdu#`1}F^;Ip>%l%4SBxa{n7(JHiYpL4(T6n>o#m zHB7S69b?;LVNL%?w2`#pzKvhe$8#0rm&If4@+P#hKMeytM7%O2N;adZl-^H|)y_UE z;rmzvKK-4Dcm3gvQ)e6U;s;An{^ujqiGD_0Uww6A)MHL*T`pVx;2tKvZ~|BUGx(K! zfBNl1pJ=b15FOtXiA-Tn(%==E7<6L2XX4_kLV_K!pm0rg6oDp;B~VNlznPtrC#xPpg*~7 zUMVWJtxj00EU0xP=np^{)ca)odc0jG2zAUeP(s^d(A;bO?^##x51)NfE z6z6}=9NpCE5PfnF_&xfJZT%Xs*+AaRt{SH`Uu@$X~2Hv88-9b8(4nq1E{XXUsiV(7SSOeD z3uM;M%kafpIwPNt#vB(hpZ+}%jba(>%72SK3(bkgbrj%X0l2L^NtubMYX71FRQgFF z_f!BbnH7lEzcpd&#ckj$-3W~-A@DQGh>x4K0yO_Ia&DG0`H)F6#fEmAzDAFa-&+ce zA$_6AaW}RFv;$L01Zj_*xr-x)U}~x@+J;a^<9jD(v%M7xgJ(nLWx97B>Ecw=t)Q&7 zoiWO)U$RTVf*WeE-E0owmc^KOzZCsn6yfl{UZUr8+NZ?o@-Bx8FeSE}b})x!ffJ`d zocD8__?mh#?iQ?Bw3bu9uY|CRKhSpLO>oMqL94;H;BezznmNbf$ZI_W#Y`DUM;5A` z_wT|)hXb7XWdpuU_XMhU5?51Jj&Bx6q3$LLe{)qkIR9;la=&Ms_sU~fW&Rl(`|X7Y z`(s#^Sj}l>i~_HvTi|}PsUV(op1bhWSjax#fJJ-A;qylx!?^3H9rQ`vZLFE_rU!AO z7w0k0BYE6mw+?bxujS+~zvlj8WhAT^)Ykdx$OS z_&fr+@iVwQm2w&h0mS7nGnUTa9oX7X0-S~-I&9qRc=$r#eqi)0;G2)$$7Gh;Z5Lg$^gt(I}AP%^Q zs`C$7+R)qhxSsYU4b*ME9|MiU+(454jFl94V@a(Emf<6&)f`}+_pXCp(mT+6Naeh? zG(yqEgAf?`CszI5f##>TvjKM3(dhFe)Ev0LDOMY@=JA7B-N{RImY#}wlmjjtxE~Z* zUpVu7o48tX4oF8gK+p&?!Pr@sPq}&y?E+U*7w!aT4<7N6Pnln#kK3~K(7wQXED=re}UPXPhT5QA+%4W|7yPjrnvVypL;z~}hj{+8l z14?JpcPz{Un)e$s&pmST94}(chyO%r*JwyiBKK!5ox!GWRR@$FM#;)25PIzhtJnF; zlC>2OVwevluVXM%cwh4i*Y;FgEZMOdNR|GyN=~#C!!>B+Q|A=tajilh3Hon<}#( zX9jUkwt!xf9HWy5qp`_O!|w3C-i{uKPoV+a|@Q7)kCYH7ojX- z25f&f2`z_Ifo;kJ481r4Qa|}KmA5{VEABdmBt(MRmOP?<2ccp!@oLtd!U0a-F-Z22 zMVVAX{;|&xR#i(Z*IsIzy;=f>}7gEo`bPD?`=WiR|Rj2(a)`;~gLt^4}#~DS0%@4$T0o0jD{433cs8UBUtCLU?%g zJ6>rJ3yN>?ET;P&^#0fZ^6FV?8D*LV+qOVdzbJIE(}2r7Ej&3(Zuyy~x#m-5j9}!L z>0|`oN6=pEw3+PQbX{_6UIwH0R^a=s3PT1GD>$0+fU7(5Iv*TF+fi{ivZo=R+W#)o z+%`h{+Z;rYPj}{u4lvgT825{WPhS3o@`Ll3j9eeZolQ_l-x1sM$;4~<0rsT}AT!`3 zi`jP$y1atH%C`gu|4nC@cP*Hs5Kz16g=0zD8??%5Wib=eU}W55Nc{aBiVu+|vp0e^ zm!6a3z4#!{4%YXXFKCCX;F>3S!uCQJa3hvo`+qT9=}Z&B?b&H;7Shq{*ET2{R)=kK zpJS%MP_PKTgjF@Ygpx0Z(QS4UY&b;h@3%qZ0WJXh^lXUQU=G^AW|^XW2T0$3$WyNW zE=%s%M!oMj&=s8nI?pNVw|O>9H!~98kR>_X3t978%C>3yq0Nw8D1Y*eE2BGa-Bs$q z4E>4)D<5$ot-8GXzHHF?jg$@ceuJxtQ{zhgTU*l!sJ+t-RQl0sos;d5{pK3^L>suK z8{aUM&d|0I)6ot-qx&&p2`Tr1cIzvd-jY%1`)Lb^!#z>-?76qi;z zgl*ZzrN47W{}?|UaOgDddX)|4H|pS_rWUd-QZe#IGKBsSif7l+SvPThUd0AI!Luk9 zq+b`Ywf^65qIWi4?~}k%tfO&zu2?9umhd4%_d(mnYgiIB2R(N*qs3lhv{g!YyUCFl z!`i5`sA9@#lH6{;nebz*i=o=X0@)qrV z#x;Jk$j~9a z3+fM<^6!Tl@#c|9Eamc3)LaYV{HOK+TcnbX@c z5S{i-K`;N?C@C<2snbuPb6yxKs-&Fja6~(gdEkApj&?x3!6iBz{C+mjUQGezgC2s_ zsYO`$V>krVhT1w?ch^9%_acQJ1J5Q*Yg85BHU!XQDpm+p+|z`c;6N>=KKm zdyU~eBR(S=A$H0vi0OC+nTwLR5W1r}*HAxqubivl_hQ8L7Za}!3L%z{*SSmdXCd2okq^W{a{j-ac#@yh{_Z{a-MAtzx z@EUf>`l6l-vC5rJa?SC7GVv%U6wU3+v{BV+^@J{zZk`PBYfbs=)*Uc-R2tg&|3^JS zW3(UpgmvXo1@n3w&GHwZ^_(uabEE;ajZ0*j=VO@Fpb$taPQ=V*sa){iohS*uk81`# z!`>5PK>Fx8L>NrO<#w-7n^~+*pJ4??S4$xM@l@30X0pVTEuik8=kV2A*1m&b-}Nm} z*8eGcSXKklA7^2tW(eet2%>v(1UAJ!LyHIdq2QlUSQfjA7zo6B%)NrkJdAnUc~fEh z=LXO&idE-!Z{h}&n+ne7b=D>E0vlpo!Ar@SWa*IgoB zstxCp@|i2#8~|-Y%mlv(fjB<}EO>G&N=LtA%`xx6%A9zMe>JErr#*+}!DCtB)FI%W zeiTGA_c_YT6V$Cey>R<#Q^6=>CMxW2W2KHac=fu9Mgt^Zce4<>3VpDwzLwdV8Spv7 zGQsBde;D~wfM%HoOMGB}_3_=&z9t3J=LbU*Fb~d6n$2ojXeu@bh;K_@*M5jya4*i}+&07d+8TpKUor;P_oz%bTV%ER7K+c2< zaQ*lQbeLWPmgE7`TSnQ~W213bF9xzc1+c5@5l)_Q8f9zBz?EhZSLuo##aG?)L_ox3|DXn_4VQItgGs z4y?ZAvSyzP#9utY`3xezoMaf>QFcMdyC)E}Z#0x#eGhi$hoDQ+UL5#APw>g+vAHD! z^?VngRsV8~->?UJ2k*x#G^3Z#IS!dm@8QkqNAcuVYbY7)i{lqO0Q;wT+?bFL6#Mf- z2~w}Ml{y%v`7HQEA-yXQD-FMZn{yUe$xgA4&H6%PlnunU)MA&F9CEu+M)_NJbo)^V z9`4!R^m2O+lN%G5#Pb=H87#&PjY)WDY!4y-^j&b9N3#&( zib%d4;|9{%zh&E7V&0BaI}Z`_p-1Cbe1QrgKF}F?%wU!kRf^_I0B!7^!qiz=G^d(@ zKJ{^|WTrpLb@lnW`3=x(qKU9gZp?eU_=e8%r|9{70g4myWxsH3(A(b8SF-EqsT5@K2Z7$ zHPd?WQsFzhzT*^|Z&VAvPUz0NtCB!VGZFt6XUOS#m#gK;h1T2%OWln4SVIHYW%CJI zKWsql_M5P0K{o*x^@jCN=!|S;4v;+q&8u!P#q$KFx!Hl@nmUJ+QHLPs!)NN@WZ|0F z18AhNMB6LxnW)W#bGt#k$A)|C$3HA7tT8}PB}CX~GS2u_yQxd`{85a_obGqbJXK4pH|4?Cj#^JAGJvw(W! zhJt^ba?>6-->Yq7?TJApENVD69d=nuvgMR<=DgRkN2_k&vLeTG26L|oq!j{RjSXuM~#-!2;qhnH7Dg;^Y2 zUr3%IgE<^Jk3*l&)390Z6jyI1g5bB`F#OLUC{4aa_ONy?)MgOO%zS}_Jd{lhjD_?W zl~8843YYIC-gQ75jtt*}4^B~syJ!+LC(J_YidsxhHb7n4V+CG0#lfFh-YI$uVS6q4)$_n3M$P0n)eZ6?M zX?D<>eh&Vn`RU2+`yiIOHlBAEf}P_qFj>`22t9NbLXHtH>+1#dN|3<-L&VI+Xs$%2 z1k3%MShw;uoRsdv#IX-?boLpTcY8mCT&_oz*Ih2V2b~-15vFdf#xn0lPNC_?6pn9Y z+Xu}C$ty!NT6z`j#+V33OTuu?`dE~UdI}kZjTq*sz)0OAU^QCJv_CF$jpK>s{9a#p zcz}dAt#nt6d@mCXr!H%oH58Rz0p)p9>@fYzCU1&kHf!Ev*%S#c`sElWJys7> z6+h7J!~s@#eg`yx^pvj$(p@Wv|eoH&S^ z+G!xno?svh9@?FkUcbhU%r+7-;?iLlFl5kdaGudQl;M_qzW`%* z0E*-0bE=a5*n8C}a^1v3Gj(Isb!8ATvIu72QBYxBCpte>n=#C+3o0Sn}R!{TjsLDAcWwfGOfYIle_Y%)8bqs11lcs=&FJj}t#2r% z_v;4DJH1%wc-oW=xYs6pyv07W-FE_%ua2^cL~=!ho*E!-2)U4W^T zu$NphR+f*r+@*ms)uG=}wDdT7{u_?9HHag3+yMXk-FThll%1S@AME`FZosAE)I$^T zY2y>2@PIC_`h1Itjtu5p?%A%rl)rmI<4+@U-%eD^%k=ovqbI2gmW3^5FF`aUR;}#60*!i^LGLR` zSlDe1NEiPG&z~6avV~=!z41G!k(cYKy_f zA<>Za>^o%d+yPln@58X8`B3$Rd?_BoK`-QYNcr#utf&6Kk_la)iErhKB6Fdl?HXo3 z)5F?@c~E?lc$vNZIJ-U8Fleh8By~lBq>dQc^xzQ!7z;bH_ zgu4)DT$kYnzbHt~dII&K_gVez4p#PW0-Jo*2sM7Q(P2h6L8q@V-;%0^H{~%jPm1NF zunQ}08)Nw0ArQLIlm+g;$JJg4fZILwgo$xa&~SSMlDZzH8?rgYXno#)z#Qu6{EwnD zkBf0_<9O4)E7_%kjHQz-q0D_f5we6VArf*(gt04xl9VJe5|v2C(n%z#=D8kPl_YhN z$jFkEWRxV5yx05R`NR2qXqxA_@9X;gzF%@@>}%uA#nd}p6{Qf3El~J>x`Cxv+rfXz zeJuK?6ZL!lBFFq7H1i&fYXa%-b%Szs+dHvtiy;KxpAAVRo+vg_VtYLOy&sn0`grmi zuL5{|gL=1p^n|3nzft;fFT0bVBcv?P0mB+iu3|+!t4}iK(jM1fcBCA0mVe^umxJ<6 zg_v;E5xaL93+wOxftyyuGP_uf9+gVwzT+dt2L(X!+&MV=F9U8^h9 zkFc~Sc^2n9=l_k?rOw(-^jPqTZ?6_%ZO41s%kATn+{S~kzX&}hH{#U)(s4(L2!bCv zVn%a`q#|`N+I=j-$i!Pc_&dw7FdE@_s zu#R?+r+?+0md}KiGc^1C<%K)sWdH%iSU1ZHa^{xu(mTg_4<$X5c2Or%O^*v#og?pL zu0$D>tgy*hi^`WuFsDqk#6-l|ir*3&Tf}urN-^vxoskA~=ZeMDF@11=SwlyiHX;IE5ZYw}FdjE@h<w?g(Yi=6EIWp5o5dpAS3oT8gD%Z zf~%HLqbLIh%j4j&mpDI_^Xc8Zp17M^!Q=T_e%Hqf#9O+AeQv$QTaOPx$3-pS2h{R8 z+aAN3MF=5-axm<6A@n{@y^LAWpe)Q%n5e>`p#BM1Ru`g5-;+07mWZ97nz7%jBE+9# z(9tY~-$A*mu#p+8^?pwnVomOw`4ezeF69KX7BbbND_C|)mwQVdWV3tgSzWhznD8+J zhHq{ISB+fqQ_T#uJL`tC`Ck52$TB3tpE@F)3~n=nTog0>AlK{H=*EpbmJyq(soV^%u07 z>_e~dW$^qjn%#P8(L1{YW$MO4hJgj2WA~Y655&DoB7df}B?eKxbb&(}reD$)Qg#VY zaiEU1>kkKa%}`W`w*eS(T1{c%TJKGsYR#HI}wP=E6$rW6kH zS{DwWV^tPgI%Yej{TNGJ_+aKU(i%Th>IfIq)wvSsV|3Hb236m$e8z(76c{^&{=3~! zHRJ`$cqCSA-F_S^dfveGTRYM5z+}u^v1wxRUso|?{6#b`Gv$(60zp-Jn#V=eps%g~ zFO7UY{AV#bNKT>aDi4%d^i;&%kkafU6zz(U?Yfo+$$f)pr!3+{H~&N5>^UH5{Df(v zH()UBVcOF3F#4e#=&zp!e$T%!FG&@s)NbQ;>I{`%sH6K(5HBxPv4vhn!slHgF3@2m zZ+tojdzaBUr$vr9U?+%8CA@R|ab~;j2que4VV7??Iu5zdAK3I34}YeIR8a8O5IWBlg>U>ioF<0-HBseB$Eslt+1iS{vkWR$E_acBTHxUz_;&FZ5h`VGm(y zu}r$Rksssp9g`YjG4KZU1ed3iLre?X&&TmQx_gj|BLHokzkvVB7D%1+75W&g1DkAj zupPSxcg(zpIm180?_p{}c{Gp;=P3I>D{i*KPARCUkzHP7jcFuw_0s)=`4tpbmap-?TUfpO#M z960P4&LLky(6q}?Z$x}-kKdqwxQ=hVrG_@QM?vLkUG8>0`L!O`z~VbK;9oof%?De8{^KV8J7+5V`d6Pb zz55Uw&y0bm#8}JdqMShOIkbr(Cy(E6++wIEY)zvs|A+`IIQuX3`df#a9H@<*f(-^{ z+-0Y-^toGUbk8@7me_B9Pv51(iqLuQsAKt>l?53K)~aim)3p)*qW-1b`k_$tM~ON$ z+FYi^CvfO2!8-Cg2CrX$s*MtsOgT(NODo7NQ!!=QMdqVF7n3&4L;oA*v@d=ER-Zm# z=hruhCg+u;fb=|g?T!aLYL<~WmPyk|b# zWZXy`gN*J=c(ZkM2EQB57k}T10XfEkXz6}ldOZ`5VNce z6O-uYnMmFU0#JYEFxLK|x33`GysNwlHgtx_+59WR!E^u$8Nf4W*ieC1i{(5rKA7WGGu{+oGK z=qXmQbQ?*#_+)f)K~Ag13Eu+xiytX(c>^>y_$Bi zG}93v^W4K)KD0ll95J%0V%mTTFBle!dAe#B1W+JYx8x6_BzoBUCZ$2UC3))&;#U}5Qu!KHS z=XdKTmMD&)_lO&iCiSMd+2<^!#d}5fp4vjuq()rbX%88d{h4G*Gpc+U2iNfzy?cUWb0}o1%X#U^oxF#`0N!q56t4XC5X^sep=0@HX6(5a zRi+=9T10<^bjuL*mo7t{Jx@{eV=Ak7cAB~6#lS$Lo>&$2fbM2bneEyGcq7V42sjwU zoEz#S<}-(4O6mZRR;}ip>chdW??wpQ?gnO;bOhV7N%Vf~j=`zwFvyBnVnbti`Qj}Y zI=2=RZPT!Ql)$%-yv(es0K5j&z%kg*D4AM{qv z#P~s%VXD72SFq~`7-l|(u%rU?GizgUTguq#*xeXXsv+PQQ!ZFl53ViOng3dSZpN1a zSbdn7(62AhGcggJ9PgoY_!EhZwUU({3W3JF3uu481|tqHhUmu^!EEFZ=)J8s%1Y)k zyP|o#%Fs}dsc(_gjk(C&2bG~Bv=Hq>YQd10pl@SbQPt}V^PYGU56nzL*Gc=pEX{zw zn5o9q?YaVef@`pH%x}t&we#B!z67huCo$(?C##=U2I9>V`K=`eT$4vI$`AAWwU;W& z(5GXth%$rYW3cny2D})pjg3#1g7jo5I9(+E?UZX^o~6fKogRrnZDEv;othOXO$YbR z6u3FIJEwwrR=%?w_7+o)?>6%3M(-hepGZiF_|7yIP|x7M_t@J$37Y;chR)Mn5IplM zyt$J zI2M6kbE^5$X~Ur3!E#u=Yz?+oma)ku>Zsad2WCG4_)Fz#!n+3YzJ?w`*K04)ZmK?y z5$}of>rQT^uTcKq18_RNjQm_C7|^EAD=#Z0Rcj}p_FEI7Y1VCs3eE$qd2uM6ti;wv z3oNc1$>hBsF+6C3#B;30a9$~DLNIgbqm>F-8x>bo7nX1@WW4U~(XRgW4@ zrkvE~j-p~*A#RJU!>RXgf!gUnlyz2TnQgcN&XRmE%y^9*m7ihIfJ#tIe2e!w9$@3F zSX^qn5pyWh?_F308Zk7#PaT0yIzBLcEam-6^aRz=t?Y-1H&&ep#fsS*(84Yd4e4jy zE32DeZ0C&`qaFAN`4P-pSPsfLD-~5!Xoe+z$17cLQs#UJGdo0g{o~yP=kFWf-i1I2 z40gxG2Wi)LvkVq*eFo}JO@!9N)zJQQAb7_b2p*A107D+p;lAGIUUNu&V{<1)+4fF`}7QZXKAu_=W!6{>6-rLlV{7Rxoq00M}+C^a*nS z|Nfa!(ANr0CqJf+&swltE)s0^?}gUTHgLXrl__nH(Hx{5PUhpxSjp!OCHJlqqN=c?J&DUr~2`XH7>9C6}JBT0R29mg^*ha z>QQP!^0)?gKShr_e@;V??94&0$jijW(G)g)RTFBsSiJb47dZM9P^KZ2*-VUtI64c8 z1|O7I&%KY*U+r0)UnWCFy57WsVf7F^{{~c$f5bR=2UM4JfIbGJ>#SPpwNTG}Xe}t| zzM1~+8Z`R(g4>EL_^VKdQ_DIHndckr3knQTgL3S}W3Tca8THWEd<^iF;V**hb6yZ3TFOx6k^X2eKYA;qQVwxKDx+DfX_IIVu^u;xTKI`7=T zTir;7=x+URV>eyFw?hlnj$EaT$P=_JjfJ$@zp>AykEpdK6nrZw&lOxuyXfnvlt;6$ zx|_ImLU&=mQkN5d&SPep3T*d#!rBueVDYD7lul}!c$ak(l>LcElE~v5pgqrq5B%l* z`ka}`ZZH`ZhrZ7Lfyetk=$yF$RMp;)_b><)#C*t^Jep=&(=sz}_)IjiqAcN-670I& zj6sI?(SmYXRinF8-!BPvq=xXC9ezRPFsl2oSsvQc0LMl?mvz19jXKA z)9cYMU@a!pSE0PuQKrndfw+J{sGnjBGt4W%(dapdt4=6#v~rlTbt~_cQ*t|*ot_?)hfxma&;eGu(}Br3+}aFldDweGR>z@h? z>fZ&DQx8#L5sQ9Xn)tSHTEeSDLjjUY(e=&+=-2HG&Yb%RD|VX0j?D|{-+Y`m8m-5v zhOGzZ>MA~aKnNcK0W^(NMB z8xE4yqyCmWXpc9?lJ#AfBlW?PmjXbvUnNo7)LmlLhqx%+nwjSyLt#_MMNp*=#f+${ zAhT%TeS%6M+_wW{jy@2sf10?pJ)t@vhwj&beDaS7RHx@>anGath28p`bmK33wdX-u z6?W`ccE()iVs$QT`dwx>ZzNccIR+VYj5&R(R9lQ@*W&Qx>f^AxR+o#b`odSz46@O2 z6;{9K#DoR2@a>a>=r>~@6bBH4$bAFs_+5*Adg=?l`Iq^fMcufRt`?T9eF55aMMA4@ zA)5CZ436pfJnjj_=i@hm&DI-ys+xd_FF&H~zB<_J_5>z>8jgv*zoVCYF4nl_K=ceZ zlngqCjd5xqN!Aq>FFc86#+s-<;wf0XO2!z|-!$JpsF0@E@|}NeK(8TXkTRi;ulQ#! zs>&lN-1QO1zkGwGXJ#UP3I@w?Ex}~uDe$c!mh<>na2mZ9TsMU9kw#B3Eyxq97Q}$n zs~zM}48rK)JyC4)Qz3I%%bce?0;L6D#j+w6mo6b2sXqN_w)nK4B;^*d+>FnV(^wxO zKes{LQB!Ws)QgaD`#DZ+cn$L7o6){;H7|uUNyhP{%#}x4Q6A;V91@=)jQNZi+@LIR z<`+qlUK=q==kO0))r4!y4LGls3P|xtV*>~3V8r%jyy$I=`g6%&+L{DC`xe3qbLw;~ zxqxTBb`vsNJb9^Y7KT+!!cBn@kYTkP5+hPEp6;-%h9lALha3uLRbe6R9EUAEkHys; zEOWyf^l$xu36I{BN9iNho-`8fb>^Vm(a9+1F0*EREumu@al5SUFvn$gd3an33HJpI ztSw_1%>_)EG?}gTcnpcnCop75JcRdag6~(2x%0H==|kB>(csY%Pja~$-Xadpzo|^# z;~Us19$Cc=jYM#9_n zQ4k;wpq`^6@3BtCTm07otLM!E5A%Ug`*#Vt>0F1(pj@bFyN~Wq^|;UlU63i?#RuP+ zgdO=!;5Vd&e{W;0lhM!aE)&oWs70uqBDu z^gC0aVmzHE=X6kJ*&plRHrDkhM$zO<`wX)ni7!52A_Xt8+*OLf7BrhSX~_~3PhYHz zLZ9imDBG3;#!2Jg*79p03woxoI=&iY8-GL2gP(AqQ&Xt(c*asf#Gu@MI_tt2ZDH{% z%2;gA!uAiBvEuqCV(6su(Ho+Ov6PSUQ~4NuBN3GW`Xkf z4C^EazT=6`i^3(+4b*kA*5ab{Qn9#V9P?jt7c#yxunK4Z=}0S<_v{R&*b##`FBc$` zax9zAz##X9AaCx)9NV*b_n<3apWwkR-ipVR3rY|VOy`Szd8Tsw!KWBB@Slg62!HG9 zaIhi@H(#N-r&}Xh-=J)Jl_zeR9|$q@e3xH$;fq(j<~#htp;fpA;?M$K-%}TyJFhD0 zrSY(hcDRkE-8uNn5qlm_r9R42-hIz|*mk1^6DBOgut0LJ*xiM#snp?8HbB(?BcXNe zZZtpSi_Wigg}5VaNLIEjxmVC1uR~;Wb_ve3rTXGZ2ISS%&V1D`~$i0GEtov*}=c#b>LX?D8o5EQm0;PkgfT-!h7Sbmv{&Np}RHZF7*mi=H( zXMbY**HYB9(H5N3$|W{WHHAL88=*Mdg7=S8Vabcrph(Okcj7uYbj(<|FphQzE{jnX zlg8>i=&pd&cN#+T#T%AP5%3jUomIHVTvxa|u{&3nHV?;(?!%@Zc^Ek5DW9j)jhp_L zI#;o@9h4oDnaxlg@^^VcPm^%UfB%P@giO$Aegdr%OgNAC|Dx@SE^Ie8387250mWSL(ZK;9LD4H-JzN_FD9 zx$lB)X|?p+Z02p%lW~yyCa~InlYTZ0iZ_aA+;-+ISng}U2-8OxZX!eT%SSQcT_(E5 z_r}04$zb}ui@Z-OCOT(?Dl`7xzR2T-!{Ae!xk0V7Ahd_+AYc+#?gFc z7xSO3L)~>XjE=Me>)KGXojiwn#=5w$$bg%+faXj!PcW|07BiE7N<^Ddvz)EE6tcm? z*|;D&Fa6zJNc{N+diDs!PXA}vXJ<3)+7g3{_di3gzAb2e%Y@rFSCgAOD+VkssdJL8 z@6qq<4{|s)!-CR(@m}m#&^b)KsS`Gse6Sundp(0W!!uE=w;w{da_q7xfv`oNc;`z- z6K#TefPUn3Fz+O;2c7HO<}5+kJ<5OljAF{pB1z@0H0Y)@6?$46$Iz~3n%`&$5wZCY z|N1&eZ_Q!RtxFMeyMef=7V|DXKW(6Kt2?0LPQ$ zEVX~lE3MPPTu%p_)3ng~56!#$h$GzY#mh8(?VYC;GYdZ@Ce8Q`Fscu_3fIX8wgfzS zuVQ!dO@-PkwCC&-!ctZl3N{t5_=;V>F}r|znNGE+Fv&7V|%+(V2a_69>ydL|hiG;Y|Ouo}@HCUZn0#=I# zK+g+Fbg!(1lrwX|u0M5}e{W*q7uzK{{imV0c8?;*emu=ZnnC=i1d2aKP~Iq<-EG(7 z%>LR*KabZ;?Q94p9twlty)VFQ-C!oaFb)I#>{!CLE^xW%iz_51+??mJpgNMvYQF_T z#BDi5m1d&FDu?X$Z#6&0rj50ZQUmiIx%bX4hEU`Z5fYwEIKaWOA|3 z*AoVLyo1(t57BD)7w}xs3oVL>bE!3eIdvy~eSjJMSpSP1#+vMH= zwLbUY@OT3uXxSsEGxvwUgkQwjo{9mL*TAc@f~h5+$a2sie*UaZRx+TQFmS3K<>EVt z(M)?_eF3AM6O;K-8!vqse^+&58*lu#41253fMJd=V0x^G%l18w1*cx(@-Moa<6!{? zj5XqvopU4w8+X9j-d{1Z`W3lbF0j_oUXXX}6*T(Od0^ci<}vsz`21A{<)=QNJkXoZ zIBm-O|7cUTU>!4GrVXuDKk2(01LDiS6>~nvftqJu=p0Vxxx?1{=vkLQ&GMQ;oSuZ% zM_VwZqKb)&zDvaS{#AInd_cn~VOV)29AqzcU}Yq+yyea8;vY3=*NVo)5*==_MHht3 z$N-m}-r)bY5>NIeUUA5ID4y_u$yTgjhC|;%@}je-I^xgt%`P&#oSSGiR3Jy~4e%Jp z^JYR{upO*LGfizQ{nr%wHJ!$a6>>Dc(~PS95zKh~K}>S}0HTY@d_hAqcC}~;SNo-+ zb?+LGPyNJ)`D&s5)}w6YjW#Gga2ce3ZctXpj``*pa#Jh6z{rinuW#K94z5?BLY?xe ziUC-qua08<0>#z)TKpYXx2?p?m7Cb){TG<{ z?kS0(Chdv8Hb~TlB{1hx+F*c2T%0aBw%&ZgHPeiQ%(<E;E(Ge&NZBQUg;0Ur)%-+QWIg+KH_CK--6%Abh)s6C9_?;md>I-B~3lLpyB4JqNq%MPQEeMbUOei|A+y4BZyb9UH#Lau( zjhmlGOs*}69{-y1Prgz9*s+1nTVIVXfzxo)7V50^E|Ylm`NM*poUkCq4PZ2-1C|1h+4KI2e5eaKg0ujpolux2s&Zas9~Ro5f@^>$F34nm7h^`ZY-Kd9z)qV z${oB&%97>(RwP)A1l8FE*gNwka2JfYVflHOXMGZbE~|6OF1;-GfE%z_Ur*RUew#O+ zPJq)=;%n{I5_}DZ09h^2=bzmezdjZtfB!_cx=K_!rbtwS?3i8pV@NCf25DCpgB@1{ zqMMl#+4GTnL2o$}&$)}m$q)F8lZZQz>WBKnmZ5X?MwC77kOb~3;a!L8fNJ3t-cM@8 zIWIk_aCRjh74#6 ze2G65x}4uJ&VcK;@E+vGsd3&>^z6&@23y0cV7&ed*!Tvq(mSExf43Z^Ci}p{eIS}W zUk0$N4|@JKMGyHYOv|SCRZE3}cC0ibBBqw>AIV}Hx>GIQM)S1e3Ui;!D4JJ?-W!PF zChP~N7;WfKUcfjXP4qV%iM|yYSiLM4CvVcH9^^<~I%Yl$s<;Df?zMO|-$01}k2>}* zdcna89rBO6Obi_Qh50UsWCj1FfV|&m)^PnZ$^tIpc^`FQ^|UpZ)N?LKg`RGNwi(-3wa4^iJo`egy!~h!q`w{AY-A8k;D$LO+<=6ky2CdzuL(`6DppmP` zx!qUe#%B`8@kh?^#JTr6W>Ib=7(WVz1Ltl+ z!RBIOODCbP{2^cGUCX+*#bbwo3NvDF@>;)cLTMbLEHVXxvvNRr^bs`m`T@q(FJaF2 z9GZhXfSq%?37c}CBUaG4vD+{{)4fJv=BVK1@-@89L=Da?o98`5&(S|99IP*AL)i%# zWT?CIT7h2Vu{{ax_lJScn>GwWXSYM( zJ6D`QxyNL_1S$@86SlbPaM^j}-%h6I@u?ToUuczhOvz@gdp*(0{y+3us7AYcx#ImH z>dXH!=0ibS&=GE;pM!|ASn@CU?+%6}SIW!(_ZoeqKHy`n0sUv`bB=0zz-8?U==qYI zIE5*w?_PvzM(25@n-Q_+6k;hhub%hkw??ZAe(Qr z>jUm1Xl7LQ6uc!G+~S$!M##8>OYB8L*s)k%T15GKTe|NL8p@Yz(z(rQ6_yU8cVlTi zI#;ABbWdvtR)Gdk@%$bOqh7+~#9`=o8<2TXck# zYP2t3KtAM7;>u5{VT7T=oQ-d4 z&cS%MPawWw2X%jL;d;-Xn3<$x8T+gFxSmVVcMqLgEHwCj$L^us%^r|8-3DB5xS;Lt zcuaZ~0&yREq34b(pgc}lSyL6|8Wh-hA_MLA(5|px4fyrig$=1jLPoz^5-GcZ#kXr% z!tt9}u-=ATXw<*n_Kn#tS%h`c8|XW+l;#>w`ObiMG{>=#ME_-qDU*U&-R5?daj*)9 zT{@20Q;+d3Kh!XIlO^=ay#$%Felm|y+o1Wv5pb&17rb5;GMfqaSh4a3zRNVGK5qdY z8mY~h$zJk-3o6N@HB#ZxV;H;XrN&h*zmLh2Rd7w$P%!)Ul=bMOozVn!wqtq_J;Lb> zKPXC~KW`a`#UT^TYPUnq&;k_gt(3$qQSyx+%~8t5Gkq@+1}5(X8At!{cO3bNq_Aet zJE$Lj4LkdcN7w#6!3iItW7==5Oeh1tW@AC0=~6#J%z9MQp74wVwz^YB)2j`vhAx9X zJaP7#Qu(~tx1c>yo3nr21~K0faWnZ%;v6f{X8KKDI{PSlK8Kh(F4S$m)P$O&3^?(< zvuw@W25=r0D-m==TtW074DR(F%(gqTO*iXthwEd2W6|V|S<4qEeW4jeFPO1B4R&u* z7gYORGplXgxK4k1W+>-_19d{eEC%BbopsoJ>KLe2=3?XfjnIzwm|bTu)?Pmif!Yti z_lbru^W10fAkIL>^dZEb^M|Nexga{Tj#c&VgU=6rfP+olIFD9MK~nM?O)iCj*)cm% z&VLUnW0#@lU$oO388XpgRsx9rea$@Vvr%Te2KFw{7JfAR1o`JYY#U5@np5uRu{@vm zI{uoUzy2g>NhvS*^R%L3tQJPkIgcJrdq6bZiMQ_k8uu=YLFM?F=p(*|!A*4k`AV~( z_b-?ou_%nc>62e@4VZhU@v7_o;Cii!xaWmfT~~#745@ptb0&(a8P2ac20Q$JLZ6E( zuxss46i=AKCvABI5qG+(jX-av@Rc!;tODT;#oQc%-4#GeiD1+#i$H21iH`n4IX>_Zrc7nR`D zc|7r_XpUz*4V8ZdOX51$pw?zfxNSk4b~y*~nP1R#Q#EUu@)cy&xom%sHWw%Ug|M<0 z2Aj|hVnGvS>BV` z*aM?`De%6QKBpfV4(6J-__|{PMz8M!=I7l|(eNHK`k$5fOxJ}l@5{{Vh7K37GXTs! zhk!D?iHR1bDzc|V!-YF)f?3vZn(xV(cWgC?f6w7lJmp|#Z3FU`Z`nojB;syo;P{+o zXx~En$zjAmNqNM)hdqFD*MC9FuL{=~$S|OIAbN0zne?-a_F=>bs67GJU(2C$(pC8L zR3ykAidkDeF%EQo!JONnSok~@re<6Kmys?Qw&XFZFo@y1X3~9hp|?b2v0st)k9rWFQMUF4bE;K&({#^7sQ^Pk7pfin{OtRzt zB-q+{9AU1uaNM!GaP>nxc${6s4lKTfDF%1=#=+#8R>q-n={__c8_18I779DM=U_#| z1KxPmH)xG-BA(+*{fFrgvCH!wWv{TnJygyn?*l-w^{><4P}LDBS*!ZJPcCItOr2 zp8XgMALt8yhmE*aPKUf~_7K-ebNr$oSm>ApPCar_`QR*TBaZ5zxG~WClLL;)Ek);{ zSF!QZQ*f#53A@&qa1}ouKpXXas@;BKSVtpU>b48b$MA4qR1kRf>J1*afv@#A30vcn zVC%bM#N?u`RQfoSj;ofuo%|I3>#HtE5{}~YF5=Q??&fV1Ux0&&38$i7%C-TykWrp2 z$sQNYf=}DyfrnQhVzwbx6m#G#uVa>*==pNC5F&_sDZ6tzYg6A7X!0}&jszGAUfSPz z@vD`5UP%YG*A}yig~5D*=Re@IM20(bcVW-Tkyt1*iJegapwZlj)AEpe5z(M2T(Rel{vGas-txjVQh8?eGX0I$jf ztSdOp*ZkHJ=0Bk>>e%c2n$!>IFaHidL{fC&KH@H|a_Yle!M2g<*r>M|dQUSI%tQB~ zs(lA?V{%Z#}`pACpPo-VO*TZPcKJ69P+IjfV?aP`oKQ1{G;GwWT4HuPPpKys$G z{X$WXU*HXwK^`3qT5&7TO(|e~egT-RA3;nLIkOtE6q2+P(D}X=Yt^H^>#=-ZuA9wg zp02~zJ$Z2Ui?NV)DGOzL!xT~r3v7B+hLT7P!SYEbD7&xZb>G%uVDL*;x$g-&#t1C# zR~4V>wL#J_Oq-KzN`tf#BdDJ80Xqg|K!8sSZ~2Vo!@_MiAFeJqMMXjkWz$#hdWy2O zIttgJ*}T~VV%qs1LF?cc9GN5%ygvNmUj21Bt4VjTDqIT|X#XHrBjekn^62wW zL5%1Y>f3GyU)#P=+Hes>h4&Q2FBgE=;5@oLD@AXOGGZ;f#^k;sSnU&m;$?&R(z1R~ zF#i>lYTMI0DF!3moUlyL?w@_ow*~CE+b@DOpFziB)qd)lIjyV`Y zJcNRv+fXRh5S~?tghiu_g-EyeSVjB2IlB#n0_)l2gWM~z4We$~!YAf{8eHD@ zPUzV?1lHGfg0a_Vu+9ypd|o}P+hY!C7m6@rp@gqaeF}1)fh?{EImZV$vASQ|(7aN(6-XNO7nr5&L;0%|re9u3vuu$> zc4s5{?yDdd+(?j3(Uue!>ht1R*_m?x8B7{>1^tI@#Ra+4q1Ud&3WvM=>aM@hH#wG% zIifB^$Bw`f|J!K%{3q`1mIY4?wK-Rr0(=sUAi-@LTx_Sjq}pus-kpjwvZ_E~t-(1@ z3zs;Lxuhs|+0GZfybNlGu0rr-518?ga+sS2fVc7q%^?KnGmuyzH%cU0mySdGlOi0X z_Z8YL&B#%^6I9(su%tO(QT%j1ugd()Ta^uf(>>_@iN_||rfPE;YkHz|mD;WuKe^l9|KHlsND{LCR^u`TFJ4npLRJJ|8p1C$Q_ z%{*_>GwSjeEV&s2E}njw&ev+3M#-iqwEcb*JxYjwmr%jfoA%oN_+m-Z!R@b5tM@g51Q%cbCH zsso|zO^`x9i#Npnl55Ro?N(zjTq6vx76wD&fEaLIla1o#Qr<-t550%aL#>cN^23ba zU-j)SxV{-d{E9Q^blM-BrZzzCO=H09M!=626Yj}mx)*+EU>~;Way$GC={&oQHBLAQ$~%@!V@?W+&NNA^X$L1$ zd{R{BhteGJG1Tt+33j&K!Oia(u8B>DEnx!d?-59 zhN=`h-b=LH@6!n{qk>C z-XaAmjRGZpcmMGH5{@CR`ib(thfo)$5w^u5XnkIVNh_MME~}jSZ{<)>`Wf(vW+u z;e(5fiZH&Dyr-wUCMpO1l&p9B39Ttx(6;An+|hRjDtQm4?m)Sg2qpTT6hYm$0^X~f z?jzR6`3&1anD22E?>&l#u=PGzHzgO7-VVgpAmZ%lU1YwKW`O(bP;3`n;$07#WAdI- zsHvr#eclsEc10XZ&rU?=X_ip>sUBrN&oObr1wQ8d8?-Sof&+~gVW7GuS~#ACUiY*) zspY_l$!_;xSN9@tKkyDy9si)WO%;07j%PNVe(0|A5Wk!DOcfuuD$>`J-Eq^mz&fSI~2IAk9cF*Fy5$ z9MGWO-OStXpwpl3rq^^h*Zwke^2oxJp7+st^b*PBy>U=w@d%)cp_;!H^BSFn8IMCH zX8LiESDA#yjpM*1MT@(*=nC{+HV(9Ghr@)Mx?EnzVU*Mc62HWl3w|;N^17a*rPY7v zQ@9&CQHoaE&H&JyXYaSiXm+9nonQ2zoU|36aWY7fFlZU#s5Yp#e1!L=-Ng2lI@me3 z6si}{ojqX`Gt1S2)HmdNwXTaor2GH{U4Z1heUe+ZMOkz)CTy1}x z=dCRmX1}4mbrUwnoyFRW97q_q6q6iB!@5aEf_d*Cw6-@D%qnakfjYj4y41mLrdh?= zS&*{80uy2#$mOyktC}mt=nIi(yWEPr8Dl5*dvc0ci|%aDhP#;ZWfJuzrToaACY-4> z84`CNBWB_WUM*G^`k1Uj{ZBD4{=^B)csP%5zL@}K-K`*D*l$=wZe(AN7$(9}MftM= zY#ciir`76kVLEr2*lZq{%@#3pdv{E6A@^HgJ;co~fz+08RN+- zWAr6%?q%Q`6vWJiuV!&YEqqrw@g9jKack325N&RO=AP%!jJn7A!G4Q`GXI~MT`x`EN<9~5^bRM^Ddn~uG`R!exiH@76>NN}FT~xx$LEA4v!J(; zs8RnKK-Y?PfAo8=C}hfB0~A#i2Glc2B!1jLsEB#TBEG!^Ay!wYEA8M1whxEJuE(%A zcNlNyNB5Qf)1ctnd930NW2GkT$7~4wZ5%Z^@ z4@Ae(e~%lF=Cn6*es)k2IiMJw&Xj?>K93%QN5agl2AsTM5riEajV`OgK|0{RB$2+? zuKtXeWuXeQoAXfgU=?~kp&94QASP<93L~KfHv(!}nm=u3Ts%4~72R zVE*lDMCFPaUVW&MkmvmbMAJ5)O79MX=;g3_atBuSc@17gx}3QaIhN|nX}0^7JWA7` zJ#h}i4fcWJ66%Ot(HA;h$q}}_hB<1^fkL`pY23Sta*J2!KkFEZ4b@r9wMZ;XJ%{>| za^AMP6r96DSZs!xVE3VfoYuste%NW(DhY?zIa=J}At{(hld_P5r(w!jHNj+EJjgWO zKrrz{d=8GseHJD{@ZoJ}lXse2q6>MgXlID8rFqtD4_@wN4~yGO1?BV`Sx(mjU~j@z zZ0)56g}btFm)w+-4_(PipT~mq>_n6~Us8yY|3;U23t-v-;(EVMXU4nAvEsObU-RfW z7R;66FvSrJE^q{8(O-(Tf5;Iin#wCRqfzT`;tAy31M_jjfDClTWQRAz1S92*^)guK zr7n~iXbbJN0rY1M<9&N_c?Ng!Q{6(rd2K53m+yiY3m~|89m43*piR|UUk+e`_&US z1*b#%8G)5ZH93!n96tMRF)Y@K!njUDZoWqh^)Bx*+14_^`VaX0KXM~=yMpFck*Kvv z4|=bRfX0Ho=(98lw7%(r^5$FC_~bum&sMO;nogKpVE{H4`+(us&wyGJX|JXx7@slV zOmyO5zl{Oc5u(S10Nq7ntWmY{9N+7thLFB2A5yM<16$<-D0!sBnswbcv%MerXt(*q z_xlD9h@&Y@ut&8dQ{Kyj`Updi6~EEub28W9@)4Tch2?r&V$&HYUKffI$_)f|w(+uw z-xY;>h{gE58;_8c;QOgJO{Oio=;>l3_YyjM*W$qD9JnaRS)_7em4@WWpiGC~NCU2_I0(ffzfbi4 zc?!IVool0L!mg|f*g7a5>JkcAwyvoVJYYAuO+6I--n|0Z+&oEX^&s%wk%}s92^&A_ z8Y&$!S);Eym#}yxG3akiEWRGjyt0n*9oCgN{%JlqpXseo1rt*p?i1%FoT(OmLV1s? zeEIie^sLqs%rkA1x(;>Qt9i7*_p;f*CoFAwZWg}_7`YuGGywRWt&LD=H zHe%YLjksXfNARIDXYu@4>MSjS;D~(Kl-q)5p$5a^m*9;3)I*V!C?e=t<3> za_D3_Hc3*J_`$GCp1U*-)6HDTKY}nhhIAZMOnZ) z`V}*gKb)s9qMkF0e2h>IXmq-jNR5t7;O5HIHgljKQN!3r3)zrflfsSsmEOw@>J zkWtWE*uLiy);`FGJkNb-_4oi^J};d)Z#CqmkJA*Qsxolv&N}RgqG$5PTnwLbo3}le zfF%Z(am$!sXmdRPDOzfrg?|hb-0g+BiO*P)Hjh!o>Vi|pLtb9_n&t1j4n2pe=$!tL z|9fB`LFS$$4w}$ga8kL0CQdm}Z~Y77qiUgEwgOr{uLE(Q0O8~FSQ@T|fX#a7)=~?O zUesMSqJ6-Vr|_vqOX#UOi8I%QP-g5WWhW*8E)T=pu_}V@$)C(Q!GKeKH{xmsc!Na9 z1b62kD7-=}(9q>9YJ3o}J6E17cj2EVWue)(U z&^eUm9#qu5%Lns!j?mKVg~l5^nf^;9WL*bzpOuAaR{?EQYaronZ}50B2g<8rS*ddi zB;LOXZO_CI=&~Oyrf(JR?o~?v)_&f*qlvg9CCugG8}L{^8T|V_=4F-(S?0xTG>N>A z#%({aW~GSB`1BYj|7|FQ_sWH~bG`9*KK%_3C!zk_hgcu}9TY_o`0@si%KzOE2UC}1 z^!e8~@_`!X`nCXko45IiSS7s7@-0A0sXsAxC@9lf0(Ab&Qd_-eqw zN^+f4YH|%BG*8wFVB#8GZfDRB6up?H2-#hP7BhPb7N%3#pmX|y$5(QvKU~B*uTT$0 zYmg%MMiuyM?ZLU$>ViabF}j?L#4=7z5UnM4*pv|L^Pb!XquyZtv;wFtx5v<4HslnJ z4hoI6Wli)!1BI=UR_R-U?9LPj12p9z@Dcj9VMYvL8#KVP-N^f1;kl zzJ)0Nc7j;f56B666y>jqq3hNQ+Lh2eY@du})jz<1>5ox4PnC(wWYDw45mS1bV{oH4 z){}om-0Kkx{z$CXrJ9_`Ge_al69cQXRfUq%Y53PM9bxA2vk?5aiw*u-0O6Xy`M3?Q z81-{7RJw>a|93WuR=ncngL&ByJqT{h<9+8x;qdx<5IG-FBRc@X9rCbaYX%=szZr)cUWYgbO_Uxu!GpRt zadeuXfM)nMp9@g2xf`DLQxlYHfBm7x9naL2Ib4;{O%fU!K|l^IX*bY zj$Je2jsOL$ZB9WZbeIZJ!%Gu7y zVrrsepsV|UcfGt4lo#g`$Fl=eriAdznj5g-;v1BV?ZC8IbHVp^BPK0>2mLHWg3iQ!!!4O3T0^KUBF9eoWJG?STGOFlg*Uxjct`W+AE9YBfM~5JRGjKWpYi=b|6XsD zq)~4yzZHI;E5+tt4H#4z&Bk@rg7anovGqxmp3Pv`YXOA1{|m=G&%$u+7L@cSj=Y|3 zj+M(bUiTpfd#$u5o7WKovV&19=Xm&XdUcGsT zl6u-@#;2nE)IWU1sGlgRZK5+tA((8T?C}mct1xPY%PvXeLa$@f$7u*AHyWW{tBZ*y z&gK0$eROLl-sras3{fLi;I(OJy)6{=#QH+4Bn4Y?%y=n%f8e4eINhdu-<~>jle~dm z8>kB)ZUpbR9cX%QGI(UI0KG&ypAGv!3=S`-7;hEb(yXIHC7(;6iRp9fXVkYxZOKxmU4MI%$pbv zpr`p1yT|ttx+W8+cas`s7tx#-7P0!n zp-|q^2APX5(VbZx%!hpk(On;LspvKo`z!Fc5%Hp4Y=;SI>cYg+cRg11=~3O)@A zOrkTN^@?7cbGjD>x7tF>wT=81>fYON`do@l0eHlufnanV5> z^`C$it0OQj?qA57I!<9*bDnoS6a~}sBQWct3fDOK7j(@r;NH$v6QT{rVVj~Ryt3Wr{8Y4muq3mlWm>X`#xCw@W{Hqi59r1)% z;N*%_`^Z~X((dqpFSdmIAkX9y2>W#%O+1gnFiUkIiMl}gC0jt6un?4G!NkuE$0dO^ zVEwo+X5e;|RBXcN`mx}e9>QNbsv_)e>Oosm9YMaem2dfHB7d6h&^DEcIIH^|DlO9a zxcCKNtUr#)nl-o{_qFJD`3RmEbPa8F3z)_DvEt)O%5(OgfaW(TLmY2_1!sDL>$h8& z)}=0ZI9T(mK6Yb^0l7YB-G}Wa#I-rO@wRcc3U-<`@YzE;pfFO7JT5JNFIWa@SK`q4+(Gc) zU&ecfFGlEo4i*a#v#JKOoA*Ur)|uI?#wHeHKU0rN{E!cbSO+^FP>$>SBIZ{VhZ&Nk zsMJhk@-1J(iYVie1{-XqI>a-4^x|vQdeh(HUmVBjasAalQJ!uHE-z8# z8jA0rQ*tSkZ%yQfe%8V2gZJ^PC>q6{H8k^%K>3v2(2{VGcX=6x^S-?%PxcM;pJ4}n zUv^W^GM1Iknu79%Z|t|_YiQpejRRY{Ft}y~v-weiLl+N#nkTwk^qYsEEC?6d{h{2V zPCgpf7BTsBb=K%Y%&Dc-@WDo#6SZ_;$9gU9eV3+CGSV8d_7x~XA`d~JB@gn}D)gB0 zn3df9fFG!%n;A-Zh;9SH#7j+(>pX?}3;i&oH5aS~)iZCoH#mi7Gvj4{lY^uQ;_2CR zd-n#FYih*3{_ahTlLWCwsS9rH_=fc}VtJLs(adb&5cC+@8|v#;!l?flanQ>N%pw+G zg#0`XeXk0+)CV=`A|_eP2NrjJ4!Y5IYrD-SludEQDQzP1_tF_WUYl?EoA$r&`tgN5 z2g!*S3T@3F&}Ym&D7>f2X_(FZ|Bk3w^5g|bHaaRIKhgf+`7qv{QSNMJFb2aM-t6a9 zHuFI-nhm*$dbY)wpHv1tCL%8K(i`$^JmiB0heOjo6+v%>5f`)mJ6K*Yho*;pusx>} zU)g(rM|~08I9P}^k*6@49C*6UDeRvxVkdMo;;Tn(n336xQ9o5Vm8pZs&0dFj6OcWQ z=_SN1E=TtPX0YO+5huz^WlFuiOdQdSL1Pc|QR^>4(yA{2TC1_q<~NLLzKy>=>v5@P zY0kaO8Rz9EV%o!juxkH(;&1e2gO_P@rj?7pm7d47Bco9y5LbO@Hj`Csg$n4idRDijH+=N{31fS$t`H^suR65Q@|23Hk#V9yL)&iVLx?2(hhuQ`IZ z(BA~}zEhqx=rM%8uEj8``xwz3j!lEQ@qD*B7qvANid6cs&>j~EIFN)98>+y#V`4esJppE;hJc69YvQ9I zTe#^DsBQiLJ)-H*5$p&<_33PR^)Q6~^#}ch1FY%g7<9{N!pwy&c+Vt}28$BM@IEV(dF5vv-baIc z>Sc!%%EyPqRi_J~;G!u^k8c2xw+CFE1E z)8L&3&%s%7U%~wm`F8?Z$;B(;+S<<(U+)?uzW*2ei$nR*yWW%2FIOCGeF)`^PgunC zgYeSn21t9IS4c}jA$8R?(Ea-y|I+R_PCH9IlTWW8@{s{r{JJgf=v)l-)5Dq9Q+?rX z%q93OCb#|hD3CXL!p=Q!z;BocYObiF-2*So?&3fcn(ArQ!%MlUkHw;@BU32)Fo!` zq<3nZ^`-G(wj+i6@DYW+(84DYSKgGnfI&d{=WcBhi{5C{KfMpZ$^)v$^ z^4SE?GfP0-twzE|M;+???T7M(v=gKmkYsu>Z$&*rU;EFfl${3Cq07+e%r_>g>`{29 zQV;dnJXYJ74Z2UgQRxv39=#{?6)Q{8_x)+io&OZ;cqJk%fP#VJz$54cAN<#AcsW1? z!qcDfqRUmp$kX8@e|~|*rN@wF+l2mMYJxvyh>L>?LE>>6v!1$x-x?9;w>S)X9+FRa z!A0;nUe5GYwYaUfU&6Q-*I))^#blF@=D0pu0QznX*!AlPbVg_kw&%4_vNBSkJK!bL zTUmv_V~GF0D+Llho55hpGRu;RaB$IgR6F*aet*OxokMvs-)F4vkKSBQk~1VvZN#qW z3UpNJkaKMyif*Obw>b?(UA_^+D?@p!V~3fs%~<}%<*&Hzc`X*1ujHK;)$u9I-jdtl zDsSu($m$+mfQc4OC>fp!w*F=4Zu&RmFFgdhr*5;*p>(#W48~_0$y5JKQ{jF-3tHwL z1f_Hl*iHNhfm3e57FB&NdszS!9N38}XLVTIqc5009O+#47Txa`K>$CAdaW9q#{3y* zr1=xg)ci5RUc|M|_zW@K4R|QH6Ww~6p~Smp-Z+L6Rk?^ut3`)FgdC8Qeis)ZrXsdmJON!p2 zm$k0owLv%xmfgavVCEbqQEu`8D_?cbW5O0||k@~Kdvyr8FW zo#KyKw~09_PD1OCeX-*&+Fh^f;JuYRp8cuLO$d?Gesz{Y)cY3Qg-1a^|0Glv{^YG_ zW;lU714Y9F(PUgQ)U1ugn!gNS`<6@OwtEFWcl0TTlm@HGnTqhCOFv{~zmGBu`1bRQYGX03t;L{w3=eCotQvW^r{+bGArk9zkWCf1> z+dweeQp*Rwrk<>PBCK7KjYW1LEcD$^95=lM%zT!hepWuX=Ntz=H3l8uWlU@3Mg02i z0v;pIMiM@N9&Ifg>Tw$So>vu$uUsJ)y}rVD^;FjMWhBW{b5j@LR=iJ9# z1pQ}!V3w&77v)cx!Gf(=v4py~V=9=Zv>RaedMtM>j?rz|qx1&he_<@-bomp`0<0{JTF>Qe!Z2NN)Mel8?#bp$K5$*A;w zXdm+HF1mPMz=Umv*z}hccHU})u(C?bHUEH;LA0yb9LhoujzeW2&sI$}5&|BiL)I+~ zK6g+7WdBwX+K6x9v0yb*9$tq&rluXlk8pYN{ZM7_0Q@MY?e0AY{XB^O_24Qdx;%he#OxcFb{%A{ zpFq^H4O=E?^Fc#3K&HNuj}(suvo;%;n9_x=o8PjQjzmbIjG049EQ$>b1*N$*lUj@H z6Kghu$a)VSwCfC56dqP&Ex#g8*-#DcrKuol{wR)_{}xn;VO@IY1H^c@qS^HjmU1>2 zTd%$bhwId3kc~p`b^l>u;6X5-y%l>Kst6f#E$Q5qpW~PwjBP5k<26o!ezWLzyO}yF zZ>}?q5C6h$gIaPqRS>)73f&=2fOi}5f0rrHde~@k|KuoKt&T$GvFo_?5#k7$mM~b& zfO~c4EvAT)LHT?vKPy2`nD=ldbcAc8?Ak9y-S)G94ujF!wLdYM4r7Xs8Vs~Ni;_M6 zfz)#;nz+`WPpunvzWNCb>n@T5A{G8WpX2GA3oUL}P~%Amwyh@r(B@v~mL`Mp-p5cr z{UwHa-A0FuJ20^P5te@c3BMfZ_fg=079}%K9`^=4Cl+E+>2fT2`vB7)8FCTh zg^9X_XnbN4;PBH(h;u&5LQgRBAx6y8MZEOOHL=OyZ)mbU9i+)(zG(gq$f68@$I)}- zRe6XmqtBsiY_dWtCKKykzlFHki;(c+B1G>S3epu(P!csCqmIxlh{eF@i+?DyKaPdn zcm$!BXqRw(D69Wa#mraN;we*YL9sppTa5cKGtJ#lcZ}}D=U$>;Yb?liw&r*Wh2W7p zllg1}2rxW{-goNpy)B)!&*%%%<$ZF}uH44fk{UF=GM7p3_$gLh%|hpmQ&4Gp5|w?2 zf<;#+8?JR76aEYUMI_~N@`~8+IW=&Ui3E9L3~zOL3>tr2%P%lFiuE(6L-Ngcs5|Wv z${e~C7U%COZaq>HT8Q0vqJ-FdyYG`zY%W{0Pm2@PxaFw1(46d~I=oFb5K0c5rEI}e z2p*peR=5pE4is^ohm3@R_ZuOWTnLVKG}k>-$?)`= zR#JA#1tW=19d-X8>^_}`uFW4=Uc@b4KV1XO-9N)l-!}eprk-H4Hx!;~>N$6HM0;f9A&yzLofV?ag64|YJzqBE|9P5 zhwd+9u&3fF%J(~?lsX)?ZalM^)6QFI$oY8>$Kk`?Mx5*A9kf@-LIr;qz3dI4eEn3= zaJ`7*6vTXy1fgr_9_UH8#f7gCR#{zwWe4uSxKYv6aT<)FuaCgYy)-*y)~ImF#HhdP zAW_#Fy#oJ-;qs#pl2nZrUV(sjfgA@4amkOnv}YR(5rf;%aS)x|?9cOE#`=QyWjmCn zTe4`2SrD*@X4n|1_|as*&HAV>Y$*3N{-tt7rr2lwaA;%>1;47&SF6E-8WcFn8kX zjdbw$WfAB2Ac;xG#xS1)0v{Dp2wA==yn~_|>o*Z&dvqDpYraSK_|aey6$8&2LcnK0 zGw(WL0lJP+L-8w``F^84_XiEZYj%IEPJMt@#%XN-U&PYvIS(#_-b3sCcVP3f0G0h4 z74GZxK-nvmFHxP2qtK*f;1z?eoj9l!8%C^0q(;vYshiM z?sGbvZr)z7+bTs}t$bWCDFr&Vmh;)2-^4M(8nAeMo;M19Nc*$HVCqXV#4eD0x&p4-Pte|4m9tve z%Bu`d;k}f}XmMo+rVUd-?;|>#rDQ9_#}F$kI+#tIR0hgruf?kx4Fwzfi>SMtXK7xC z*~Y(`(83`a{ifSd-}GOx^6Ok)>7@zy?KBL3SBnkHjzOJC5By&8k?u=NG48=2NY*eA z=Ka`?p-&BP_+SAOYA#{?hj?PXCqe#T>K9!RGs`RCpy!x}J~y92NydAGt3)Qd6v3Rn zZv(}_!{909;TQ4PB;z&t-S-T*xxVVc^s*ob?(4&QNRIM~8F#Sr^dG)z_BANv523T( zICK%k!1g&8akOa<%KQzvK4t1$=A>I-d~OwH7_0!L197eoKY@Ve?X(B3rLI{92H$?n z*Uxc7C%-^29ljL1w3@&vDyLoj8s}(ha%xej(AMV}WPkRxk4|&}r>kSZXPzEz zzDjfF{Li2}L`zT{KML7NU1HmNNyHkUZpA&y81Jd#t0TKHBV`m0oO=ZZKd*qK=t}e* zSBo~cQ$T5%fFAn)QWyF+zdN}b+83Pwr)$6X2^O(vOg(9pXmeIkngDjmoA}g+H^Jh> zB>U=b+FaV%Yt(uEiW)Xw&`0+NL^o-ImA(PQX2;{a9|0JYc8o77BzNb(6A*vhh5Q={ za7;s;3*LPPtWW3SzS$~*>6QX8%vBRI)PI9$+$M!|M=Y~=>LN}vUx03n7f>Wqik-$v z(ep|QT$UhOoa}~{VGZc~frp~JYkbL(+hC^h9Y#;r<~+@VKpEGln6B~!hCfY0*VFSM zYgs--Bqm{?@fUDiJQgI&e=4k0ih0Z0{t(xC9DUkvqVioou<6tlN}ex7Nkq9g?yr-$ zeLW9uOR4X*C4g@`brWqh<}>ZsT9m{ppgM~>rY}MiquipPVm`6emdBy)lOMd}B@1jh zx|G+Oa2@n*r$I^Ee6Y!U4za2{C@zw-ew7SM4$_(AMF}XBm1uFSzc_r?La@nFqLnfO zJQ8Yn@vHApmsEh(l*^S{uR_Vz5|*;u7~^OzF{<}nu$`-pFXQ6@2K5o*zQ)7O-G+jT z(Kp!XxE=!D6Z2kD369Sr+1JRkpx4xl&V5Ef$D%MkL+=tw`}E^8H$I2*bSso>5y8;S zN5J{gXNbHv0;MM|qU3X~xT(#FW)QZ#@9$}tbt6=faP%B>t}zf4gAalJq<_Gf-hCG2 zH*Gj%NG99-Eq@bQhBDl5%F-TTI!JL;Mueii-eV+zB_tZG8t;vvg zA&YtKKZ%)RUw|y)VNTn!X;|o@%KfJOo|`g~dRt+b*E1eV90owgKLx}WwFFoaj7zTV zLX*JD;NWN=)V4iB$%B5p^%z~jYBWb#)Ofz5dnOJ|h=Z0ITV6S3y?xVrdn|2zgce1c zc#oEW=pI`JwPHVPU2zu=S?CKza%Vm!b`4r)>j^lv6jeidb4918Kw3Z``h7|zo{1mE zHD5==+jr|9 z^<;-{d&qi{-9YPq2QWR^h>PBF9>U+*q4V7HD0AxuiQx-2*eRL%XEo$7J`Y9CBFsuY z#aHw_4}NDmvEWW9)IH2Y!%+vp_vICAIbDEuaTyQsu*Z%NOHq;@RLB4tBStLG%g*gg^blgCq=s_8Xw7 z58Y=kmJA(1F(GL1t|P_0s}5np3_FkJN_%;Oao4U>sf~4@~2=opquH> zU4WALS!W4RvJBDw%35nbqgoc029MpEXk92L@Lauk@LuG4W|_TSeWf0aut zW=nM4-qoE@-iD|!Y0zX*=o}Hz}xm-RCo({E8VL@H8;YFb9 z-UuG6c9D0Y4M9(bx;RNJQVJ2{;mavKA@Sg(-Zm~ zoCNP3Yq9OhZWK9h#lo^UP?_JCZ<${Uez{th=UvBJBtFc^D{n^E1xvBGwFuijSKtY+ zR*062N8dhgLH-!B;358kwp({mKhU-lCTL74;i zj;X^iG>q=?X@k(%-Q1ie9l%*rV zdU_gc*?tiUAKWG8C*3#n0cCH(#jjUu3G(Gbz^sKj8i}XSb<}&%pIeFgnl|8C+Xeb^ zTMT|)!hC;cqHaZBxSJcwE4L|G(;E+zRc44c&Od^+?JW?us*X9@siXB?Lr`Pc)K9BZ zn7-13bq{~ygp|XmWl@Y$w{&LfPMnMF)e5PdKeK&okD{rRPjqy{`rTKUMe7QM{@Q#b;w67ym%6$qsO4+RG?UCw?&L{ zk)U+YvG*)_jVbRQpz8n+I(rb0rg0F=q-=HL^Tb^H*hq#^ee-X-?8{m{Wn(keu|a@i(uwPM7Km@a?TtA z64l#w7XSOqOJAO0)n%XYuB8sfO=;k50$&sJtS^RA?k1r3W03SZB$nzw6w6dTGov5R zz$y^1{8Tbua^DBqKORGW6E#8Vwgx1{M%YR&Dd|uxA!5lrRQy#6xl2;vgu`oeRurQu zWx6DGfgtt!DE7TTIrwKYhzs-trDLWkTE}Fd?Tg#|D%Dcr*MHz;yFRkIq$l_?U>H>V zl>v^#0nB#Y3-JRkV(9iEAhT&y%xkei4PJ(!CeP@bPdwJTCWYR;Z18((kLAR<^XNXq zeD*#D>9u4C7iI8rBX8^>|8iR*U$ zVFKhmyb9&#c)oM$8dQP(y!F;5(71XZ16GA%{LN~}3<}4uhDOgqBy!KoUNU zS9VSochGloS+kDdWblp|?$YCSzq^IO>aHy9W8@{2UH#I1L%=?qS1*Ox%9p89FDC8zQF$+}Dgj4bK$p{PiCs zrGLcYEnScqgOHL`2u?MBFg)TsHof(R{!v?To=lFTj6xwlhd6sH`T#Ec3v}C($no}? z_pb?qPZRiLIyH#OkGWuw~>ilpgJ`2-UcOw*KL48hQPCj#PmMUxV@i z73kY~2cvY4VM4PUeRm##++FHiyh;h?r_(om;7PH3`8smPs9@d`f!}!j0;VY)iAmlI zCxshv0rl&lHg+Y-D%yDuI@cELH-;+e`wiA9!|cC^E75a=Sy(crZ1rb`4j~}_K8<;| zbc30m4Rma*LemRzkeKg?MPoyliy{SWiE&*25KGEMPa^RY4sA)gtz7 zsixrC{RW+;=7aLfbyQ&qh(m+1<*c6IJ5x&#onE9!lOm?tzQgQmEqwThO0?T)z`fp2 z@43QSvC7wj5bDtfyoUARv^*c7vCNuTEN>O-J~5)MuMvdK8;$5J1$T2hlsIih`Cnmt zmZv$$?`SeF*Q1a-G7{3%bvcQ{YVophXCYhTm)PlU4|<-|=c<|xL9=EuipDhIvfqc{ zy`zc{yXejTb1}jCKq|UiqArA6F`KhPRghflP{dSqVnDxOEV}yx0|XsT|Mm~ua6yIZ zQMzI6_XXssoB-igQ?bTc9~$nwM94AV{JCJf%-?~vZq)UQ(Zgo)9=N_wMe)@}4D8AQ zk;Df?tA(7t21cCetCuKU%JZ{^wbC<8yD1@6dwqKgacY&L3c@ywX69CVrokwDzQ8)ho7fIyP%FDNbjf4ne;x3 zeTe&rQ4*MVpK>aP^I-B+bxzj!O_BEX8#?zrj+5QAxfcDQth~Y(^=D*Z@t5OR8j+1D zE4wgbM?I9+-^G>+cl>@@OBkf3A&TBc1}k>QwPY);mIE@ zRqX#)m8-B1h2Y|45T7o<&Y6@0TiS?0JI^wUDane`A$KsZ>M@_#xf&#Q3R%THy05w2 z2dCT(H0zGWRdP9APCtiunr3!qt4 zjf{q686vJYJ)b!L`*EJiP3-WA#jJ95%%1y}<~ixiM~5;CSAQ_QXIbdx_X})xJS1M_ z7RcLE0xy^KB@fL->Wl@jpznkDf>r5gw__)7G0OmF-am{{ZU}|u_aO1H9YhAyV(S0s zt~YH3F0efa^@Re8o;0wPpF~{C13hx*y~m>LuLmpj}Z~I4(R<0x6@5AujJZ8)!j2 zmLpnJ_u zx&sUU9LJ=BS~OE$;3Mmj(5RL=Lw-nI-hfF=jA@Pl+oKvxtKce%eqUAK-gg*z=`^HjWnp>99%#>3fd8V$O!oHy%u83WlqGk; zYQ!SUntqYBebXf76LAo8M>FNG%Pfw~MdN=GAv-n@L`U8E?5!IV;TMAVivQh3<8?tG zy;9D;J?$lIc+s2uK{S(pG#ez9uP`^c0X??PVwwMSqt%b`V7=%D!taq7^)J0=&VGg# zyJT{eKZAki)r8=wnY?6826X_BvYrASZ_QK@=G7aa=y->{?4=WHryN+e##)7j*JiLZ z9fomwt01H1Hpuo;SFVsUgjI|1Mr|pM`~8Oa8{z!w6^5MU`uW)LF_y(l(Byn<-%>vD zCq{p%LhTW~xa`MU7128~FwI>bqHlxjpJSegLivP=25sH%X(CYaI z>?S8#k<)ORD|}+v><#bIxt}t4&oRn0j%I7YFv_Tmc9*?)Nka>{`*>tJ!{D2>eOP=*BG$aatrHq){$r00QSb{ zaczS`(0Yj#zWj2GGX7Rj3a_xOl%fCNV_4t#k`MZI5K6`sLSl{!9)ETOMG-4#J~LNQ z9z%VqZL`6q^e|K_YryBq4QyP}35{{YY((n7Ed2=CdT&vp7py3`wFDp6iMWp4%b>aN z4}{i{TUe9|()KBk^dk$CqVADX<|`=avVQG#`q2d=u13q6{|&ENMfr+nxH%C^`; z?&~7*72iawqxTs;jzZ-jV}(`rV?M}Vh1kvQ`ybZ^(G(L1$3* zcpGo*f0A!=y$zCn(f0j`X{XWO7D{f%WB7=xe6?9LsBJ$AmP<^*S!)#5f&*+E_6kcb zm|@E(CoIbmahGWBF0EL}G>$xjmOlew-d#U%o^%D%Z7*WRcq6VPT#Iwxc^FdWT4Mf; z8|X#dDZL+bemy@O^L3ws`O&{XU-}%vJ+_1G*t1xlLGS30Vn{nb1y!hPT(PbJblvXr zmS<$35_L%I*ec?x--coQ07K4rl{-jv&mb$-yRN8iLB|WTrZm`gA!j z#0C5HF<*yxs>gI$M#X-#TeBD4jmKciksMgtF9yALIa61|hVl~{s4=S$IOH+7#r5+X;wef9d z-@xtzSD^XPed1tEVZpU)VRvo<_IM}avn>bV_UJxB#%~EoYyLuM@9u9y135{P{Gtt0yrhD85WLYElzwhgC@s6G3nOcFds@GAmuoleoEx}6F z2W(&6g?jys5La}LIn(^0Ald@6uGuoPe;m;vL{C^ZTSo}q_LkS6-p77VVm5Ci2N?0a z>K|F-zytR{wqP}c+{^=ut7W{^k;lB@u>>ga@Ps((Y+Eg902gEGP<^4y?v_0eGi?+0 zlu`a_{7fh}wZY#*)i}GKZ`jNaA&~gv6AT`A9#eKWK#{{dh>I--{CoqT88Bd2AM{|y zK{=1I7DZ$DFnyZcoYv+tZdBpGZ6TDu+o34w(|{^JM#8xb#BB0CrSOO*rephazFZgx z8Eu|mY(JY>%-YUUCu<4WraD}S{a>`Rslwg)2QcIixo4js-xibs&eKMt<&+udb@Cvw z6^`;-ev5<>gXOr5?l8K|#yIW@QAA2qgkr2hC#T>2(T(*l~@&v_*}xGWmnb9ZI|U z>qf$fr}^~WE(Ral<)Byo1nL(U2p!)NnboGg;7hzB-VE9R1>aRj~Z_VPW>Z%}f>nU@(wLBym)2tWG* z!u!5vX~D-K;cFsBjjG4M{p1?>F9^f;YEn*CODI}?3roU$A@cDeyj&j&QnoQWz^51& zxXHlsPZkoRjY-XjNx1zxSoFKabajZW+eV$iVU0NpO9XIx(F*1->oC-&1Z10Rd2z^5 zbdE8_{tcV(;fdZ{#~4G-RKE(tD6=iqdSl=4S0d)`uf;b}>Vnm#MmFJ@KG%`?f}DHj zaO@Bj!Nochy)Vy3@6Xr4BK;fhE6D@b%e3Eo`2;LldA_=+4E{T8_*@T#ag>lnEuy!bZ$5cGQ)w2jpJ?-^D&!Awzb>k6F<*( zbr{n+`wAqpoE7T|^@Yw=doX+LC*~bMJKXE~oMPlNV$?R{@B#%E)b+#RWhofowhd=S z#zNML0%+bx_lJ%jpkjTHm)$%pu6|1U>}Mqyq1B3!qm0n!t1hQ>Cr&qY=`;0SL)PgS zrnWl`n>UC!SKScya6=~=|2e`eD{FB0^k(=veLpdBr;4qX{ospC(wJ|NzL1r62e%Hq z2h*)T;LEMrf<}MpyPxt0N4tP_BOx=LvXB0sW2S8%I2jr_X&}Pp| z$SM?I;OlO1d>aj-_({CzWVgbqGME^}gPD&c70FwSJ=IMhKX8XHujIku(Piq^I-;=- zvA_y`pa$`VG=gtnz_zDQbn6tze!sxqb18T8pOK*J{F2TYGJF+EJDJ`i#3!7yVB(B! z-10e$b}hsj_-MhF^!3GX#ccGcqdD=m-u6R-MRbooiZXGK*u3*6evQ%+EFKJI4yxBs z#rmY!=lW?j^xko_TiOY5`XeSBCl6&9&D2AVW56D5u#PhVm0q#p@=+U!-(4macP8OJ z;$6u;T0-bfV(R@S-kH--P*!ZgDa2z=9M^=>?h<=thYA~Yfii-oy*Q1Lu3-Gzie*3e zj2imQbRSv?En$=3)m15K$Wy4V2Dsvwf#AGEm76(MKr@{ZYzbHlara*EiQB1DUL?1- zXxJ-uQu!M*-b}=C_Z!jQMVpg{Z)dX53(W6N7HqwchY`ll!L3gv&T?+SYP~L;Ie|RK z^c>=!Ua0gw0M^MfK^piKB2$*3#sLO54%MUG*gd>P&0gYC*D=|we*COcZ@{aMh#1zs z5NfpzvO`uXOitbdzXwj3(rY248O_5;V#($=7NVt7JhrF~=i3(Q3AMLqhI@1@N_RQ4 znd`$rLL2~J#~YM2rr(6sV8X`U!o2G{L3-dQBuf59Z`^`yxEE#8l1vWIx=~)JT{2c0VJ%ZAL={(ZYnhTG^rpBz)duA|r8w(`5_XR-#Vz@lp`f?~EjqVD_`MpGG<_Dg zJZi<34{P{QxwJEJ%mDxTcs|ss47Pu-!J;y4$XYuDx>k_4DYuZv=TY$0m3l}M9$?^% zI@H}KXVJG(aQ6-^q5SP(kT}>vz9#+mpKAx3`a;TN*Fi! z?MONlj=2h5-Dj{zV=3hJ{DAr{BQSNLY+i_ppsY3J9R~e^C|@HX$Xt)laxr2yheQH~ zPlEbWU3~91BEfga3h>Yp^IF1l^m?z!$+&~O%K3BbZc;QZ8KEI$n{1@nPiIc#pXoGH zR0H{hMd&#q0pIUb;eMx3H*e{D40sy_ch+if8PvbFQe?A8r@gRoD%~|+J3(XQdt9<) zBRpKziFr<9tY1ufrW<>i)9dGag1s(U9KK@T*{Ut)?3sjd=}muhLl2&sD7F;VB4MGZ1}0T!3hARW5Q#IhgPMjLUWq zn`X8-7*8L@e76a-=Zb~uX-BX^^)$pJ=y3l>(Yc4kw0?2C>E6gxLQZl?MkF1?sb;T@ zIJtxza?QA(T*A17T#`acl1LJXBqhnElc{;vPDvt(B$1JlNEjt4Nq+11r{_G+d6JpE z-*>I=_wz|Fyvfy*->xt|5(FNg%?fbounS;5cM#_= zOT)Pj=m(nR&Jftz3(Sp+fxONjxBZ8O+XZ6Mo6p#IAqe<#Q@&V|h+i)13T4etxxxwe zV8Rpwe)#$nfUk0JsQm#0^~FMw-xt{Vz(`2W4~D)|4x)Hu3Cmpc2(t|RxQ0MWl&WM* zQ#OXH_!){hX=iEIdxRx*7_kSGxf)|b`@iG=VbRuwSeEDm(YcKn7yO%MzT{PVb{{S2 z`=y%O3R3N!(6O0j`U_sO*1(%!Z*m7JZzrH{Pzu_uoq}#H2H03iek#{S7~Sv`;=2BY za|deiTJcA?nI2D3Ua*suhde{`#%t)h?FaWM;{fEYGKF!2ZSlI+HFQeLU>@f-qRv3- zASS-VkIz@N5tIV$A45*LXk$K;G$gEQrfVg)7xcy6l@dg#D6$`m@ zS9)UO&;;5KwWEh#6UfUBb2jr&!|KQ?h?=Y7ie&E0?^*?p8+aZ@ZX$R3k2)52&p;>~ zmPdWU=jg*%fxO0?$x<)DswGb#VdYmW3qH+UC%p!TM~}JYWlqF|D^|ImF9vttdT{OY zh1D6|LzDLOYY^HCxuQ{Ts=546k;*@Y2IjUEp5SXWidi8J4^)DyqK!k*3@;e9yA zuMgl3@fi*Dvk<#vVPDfwkh=6$Ia20PS*9iAk+V;B;5>Ho>B=XqZ{Y%K6R_>ocTTLl zizTfpj9V`j^69=MJ*Jk`tZE0J@BWZGCJ0%0D)0t|Hywm=eG)-u%~2@p zF%p8WJOTHvN10w&A)X^AZcE`uYj81 zi-l#KRha%yH5W!1-a{$mx^&%0T|thavluj?2QX~MO6;%f21yUSF>hBhCrkyKAX1;)Ze?ht*Mipi^T6C{Jacb2i=M}8xrCk7crx=ViU+yj`1@i({OlCxInxjv zBB=Ywq9CixUscqpf%I}UYK;qai953!#M?rtFY-p_aorJv&Hu&sI_aPp`Gw`VSF`40 ze}UcHO(?%|n$zTVWo6rIIqwC#vDf}Jkhb6?#JOieRO1;=rUcqa-o@N-b2Io z0LbAB=sdHWi_#Ne+VCi-J|RZcKlcImHbGfrFlR6{5&hcFfE6(bq0$5F)aGb)*^l_f zQLO*@{iv>!W2Kc6?X$Fn#r1Tq4a!qVZGNgM!rh^J4@16g<|SyO%+>4`u@IpEP2wZEg_-@-=vHiq)>D^&x@wZF z#G?UKi{r`bb{od_))CZUB37Lg0^z4j`22w0$Bg>&2THrG+COwv@@^caME&D`#$yEcq@Isr)pK2L` zBeq<@t+XG$xJSf~Njd}N-Or=vk+;xmF3o)t*JI{`OK24phZiIUyn0)vOhGLD;_L&E zvc3%~rcA*sqX0JN(*=lpDdv03zW}CrmmuoGHLhLr3vypcq3^zzaGMxn6V{TybHIEQ z&pOOjq_jfL(_^R%kHcSYO$1q0Aoy3C3Nca%Iy`fOl8J>dZp(hI`CJF=d`b71*M)4; z?^aBWI}PT8hH*ivOlI+@7=1U6p=@X^_+8OQx$k%=%X-I|SDc4=FNyV5^aDJ4n?Rg% zI14Bzc4#S`vr_Z~o%bn_{5262%^5CrjQH-k^u8MM6vy1pN6~&~nau78bnZ&{KW7h; zCiQ0$$pthI`;Olt=-)ra3&F?>+WL^&)T%psN}i`V{emIEAs{wE&q%Ti(6Uigxg$M z9|Mg3*ND%&o`M~R^!cJKN>1t)pFQ}(H<+OJ1&{vjgmo!Tz;?$;NSHVsqBaf$`*rWI zuxl2Yn>29K%EkQ70adV+>Eb_%|N9J2pzr6SkP&?mTuZJooKDZm=#iZI%LA1`-wKc< z4n-&W?77L0Li*7?C=Gq8N;(+9BtfB|RoRabD;Cu^3b}?!83_0PM)_7(7H6s_g#B0! z6An~h#4dfIqaQg|Czhb{KzA^<{*3DW-Ym)qxV8&xAR}}D^nZ2`BBosdjpst>uhkpu zpPvDFdNcR_3(d=HPojvrWFfbsVFh*D7c1W5nB%F~d^HUbD?HJ9e56y z4)zJ3!QuHXa9m8g_N`)UJ{kb&A*m3(&6Iai5I?ixDx_D*z;jG4>-;+#Ek_-P;q69( zZ~G$3@m7JfUYDhRFy&8CM^)!5&9z4IZ1U6Z=(q3?=eX+ww!E!H_0-)=H2<56Jn1<8 z=WIkgS?WZ(zt5g=h5Qrqi#gA8c98XUJsWdY%y<5zj%xor&T+2=7VUe?Ii4-R!puh) zw`3!FENDevyH3Dy&q)zZGQ>=8G>Mq7{{%qP&&XsD^!0;TQmpdoA}CfMvj z`Ii4+TFo(fwuyM3OOfcl!HexXk^<`}k7TX%!irM@X081o6C2;#HuiFaS1E47>*ggEGYL{2)VT;7#Na-S`&$pHYS9NQu?8EO}MNezYyM+XF!n4 zTa>?=&#m)Mfn_Ed%Geo_d-(x|eI!oN${oym%y>LGs0;6(J`?tI>&kmgegU?9QZdlS z7Hr7LEvkHnQu$Rb%yl*dMSf*<`X&$*JPWJzQUNWN<3md$J}N$qID>{Tc|kk6@6j-a zz@^~-aXmz>OjOl8Edx!=e9(ZI;M0~4i~rT*BjQuRzkUiFZGM75yJs;~>;aVcYeSsH zOUSbPz|#KNhwch5X5JajG`ooxarCn+j#!);~z(1lHDm$K_ zLUsbAYaz z&Enb*b6)!quhE`Uq4Uc{(v#*iB?nb+E@%nLB{P^(P{N8$Z_s;!0vqJxVO?b+7NrLf ztNb_Q&A!Zy3A%*QS;RluNzDG9#H8t7#ynpW5A@j}R2vcBGG{GimWCjc8}bcjM`NpL z7@NGS5wt#E;3m?ZDmm1QA9ukVQ(q_G`=t+{NZJkWeWqUb%6M3ukqmOni{Q}xAVi&5 z$Q&wrV3E@e^8HjnZk;u>{=7~cMPv>^daz}xF7If65ImQ9Ls?uK=jI)c-Y?9cA^roF ztV%$e#IC}s3+Kt>FkU8o_d)hF$c!KAsLKa0(h}q?%i!yO)ah~aVx?o7;f>r_a6a{k z%P<=S4(ku&;*3X_{Q58iEHxFLE7oJUp;+>+V%JqPLW2t_}4arp+3sMI5F>wm$d@a}L#E*3>EtI=2&>%|bEQi@0f4Wt?b_l2tu2;jQOSg47gy2;KVw>h9E` zqA;2Z+o{TuIL$9L=aRYmtxQ;oR z%NRJ^1l@PqxLEx82+N1i%))3Ae4&n~-=KZ;?4bL3ZaAlKm*L~>d!gvy1JrErV=*czl<~CRZba@wP3R<^-LcyZy$s~ffKoowqpFW{v9ay8G&j~G>8X{#^~g76wSLX zYdBni*SmcKS&u;6{!Ihbs10_MZ3X|A;QVSijM#A-Pkz`42a?6SY}+lYHOmG&l@VVN z`v4Qyrel$BAS->F13sg+W7eYI5ZbOI%p)G9JDr`AW-cKAnGFQS(=KiEG^SXhVv64e zoWqz*%sW2{`rcHd`Cn32dm#cHzDNXl6*2OB*2B8a7)&}Z$3ucCUs5+5#Pprddo~8s zEAK&FT^%Zi^^%EvXcitS=JKvB0uH(ffj@?DK2x{DR)2lo-+DQ^`pak@n1b`&ec;zl zW4`=Z2t>M34r?m0ng0C_j!rs4RIg#Os1gfR@)rTzbi6R@)N9b}P3F|);VSW}4mRn5 zzEC?Q9D^;oVXw1mpe^oSw5*H5Iih#ulcQ`$b3A^6O_Qr+w6fTF)$LMw6u8j z^3U0d54kX0aR@91nLw(@2qz{;1PjX^;C*Nfz7I^My-gWxAM1ycww(dc9u9UFvcPZ7 z05D(iFG~vE&2eHaL3-fdn zy`b)#6ujc8m-8$ES6^xXkD;$ItnX2n=Ua(key>oGyO)#1`~$AzDmjtvXIbaD8(=Rk zf%>9mNMHRL!VEXVr!wkcM)qTpLOP2Z6kvVw2Xr#m=Sw2Rxa*#dkTky((p3jQ^4^4x za5Uhd)eq;`r$dzXP?j2hmRNwNAXG+7t%_M(N!SGVccc!l&~D?RJoPzsAkD}3y=3@h z5TvnV*y;Tpq?rr2=Np!w<9qt~9JavDDdgLn8KH82be;+Q#eznC3W5S$Fbt2e#AzR~ zK)XBE46(QeTJ|ya(hv@;R-z16(C{0}^+<0iR((kWOsDs7+Gz(5C0W&RA^n z{Q_3MhoO7?XWA{TU=|&tVX#D35Fh8ztK}x_{b|6r+mp|y=Nk;@Pr0jTeVzv3Si0~h zg4ciO=6N2(-*SjEr^Ro|LeLDAa%IbsSly#h=$N_!;;@-HEpbEj{cva=U;(FF41^HX z0UTqQ0A&unF^TJiDW|`II;4{`uh-?%YYSQ4FIV`>TvzyX=@azux{UKFGqdy5b5I^T zMfa~J3^4kIZ?biHtEJDFqscPLdnKbqT`!c5{LU4upj^Bp9i?G6!Sz-flRP{PnzFw* zpSERCmVc5nUi+LJ=%pxi+lK1JhqHa24FpyBCp_8y8`pUjpwf(XTUEMPQr!=%(>>9B z=22Nf-)9h>e;IXJQ&E&T3&IuzaYbYPLqz~(NvG`s_jr5e-O$Oze@sJ<(w;Q;q@Cp9 zXuyXnLE*O>K3#bWowJEM)S5;d&YqyR=O8xp?FJrSiQBmA5VZcMg<&~QFklAx;)7yk zmJ8m3=b7=~Q1K9z^Ri^AK8{fTpbPIy+;{WwuedndAg*m_6O#rk3nLG7 zKa`9GXJ^CTA8C#^s)N1nL3fNhkFdz@G41>H1czIT(B{Z7keg7BRx=R?nTmz}$!B1+ zmIm*X81YF=!D2?n;UGGrE4C$Yii@G#sv2@j#9zd|W*;Ekx{>*(=?Ur(F9upm(8G_I z+K*4;+AGGqoH$TXTe7)3hTVAEozYkpX8`Grlo`rD0?*gcUhZfg7sZGk*!ZW2cfGZm z-0sIY_s~Nqu2{zZaLBOsqnIi2?_fVVHs>PHzRP+1cNQ@E9*jPJ z79iIVo9p*tVkM&axD8NabOj=U^>|qpxi~GgRA=3d`Q5oDyx3C@#51y4m}M%L@n`@h z9yo#-okl#C<$&dvc66HV1?y&ofWx9hW+|Z^*FFiqujemF>azlx^_LJAy^gcumut|k>m!p1o6?o(K>H9M)f$P zinA@}{7lFd@>{}JJMV|E#j~Jw;eXI$@KuOTOh)e_ExuYO7=!xN(#*LN8{2jH?IDJ| zE6#;r$B#I!aRBD(9^m9nUNE}gA>|J#BjT}BrLOuLKUJ%t|Bh(Pcu)2uWHmB zU4Ut;vcThcE&6yF@ruLaaMRfb=;5}~o}ATb?vIjx}=h_}-fHvRbt z{2z`7#la-mx!@?2ELe|Pb8Wa$w~3eWrVgw_)S^Ej>Sb(m6Sov&}pU9dmf}?dX7{MVDDg(^>4{{{_`O$Gh}6mP{OfNhTrgxn)XxpUhhpk&Nal&1YB3(nR8tD7U~^LUIrLph)`axakIo<;8H zDwI7b;<`nO_@oi~=znW1${P!qW4{fkcdHxp9wX-0l~#hIMKrkATwqAuxng-1jVsNB zr9LT;d&E#^9f|n7xE&fYRA}*a8WzR>!}(cyiDdb};7|u$bl)&;`l*>MLOSR%t4(8X^V8%`XJh#nQ&y@Gz zJ6QcW;#f{L7B>D$fxpLf73R8W@kO0d&;g_WpQBW{PW{k9(vNs9HrPrtM2F-o%z<<| z#Oab%z@n~+K!i&T&529>vbEgI6Le7A}!K=9UwFw_R;Sjp7c*aeO zKaP_p5L2cn?Mnw7hw#Sd;A3fxi5cY(SR*0t!(}dYT_lP>`$FWfU>La2fKRAcNO_Xy zGU@JFs%|61LaWzpw41*VGZt?`Sa}Vs-`j(7O)A5k-%#?sCvNX}2iFE1#?dRAaGB{E zm{>s_e@!u}vzNfM2aTwiYlihtjRbFEUHCXFA>;ZZ5NG&6@vK}_Ot~uCTWlzF@aEM0 zTM7GGi!ou?2y`Cs0bQ$RfXd=1bUZwSvbEu;ZoDSb8&AB=LEW%*-*Akae-X+))FTj| z*y@ij;4tL?=g{*Mv3+$w++M)BnfFKUfSx!fgt!a*Ow@6cj)@?26ryk9NkG-U_LDcB)&sHaeTkbHDU_fs_MdTrhJ5WPB<4w=e>eXJ19yF zftYn&pvPQY!D(_9(-;on+zofJb()KydU^v|R(IhO535is?r3&}Sp}Ns)$_yCtH#1$F`zWo#k3AA^w(Z#5v`u?QSKTL_iTfCi^RC@TCi&9rCdtApJztI(*e`&>l6&hIgA`+IKJWTIfeZKs3Ha@=Dl@GJoj!PGx zNAGntko-9pwtmf_f8&)Hw$uT9D|8?*q%Q=tx54F0#=QTpN~lmcV~77RxYynet-dF~ zHDCgozCHwRC(>urCS81%&ZLX zwZ8}(OZ50tl5|ize&8xDorkRt{zLgE%8sQ!Mu+oa$S^xZKg$s&EnI;;YEGi9(wGl7 z&cNoaV^J+FRrx<14Cd?Zu&_BhvD2do#dm9&HfP3*%kH9P^<-3Tuuw($AIi3k3Pp#h z!2o&W6$(5L7F`5%UqIgpPhDBkoJ}A#G{8?AV~I&Ulj&@tyZ`0eP9TFnq{ENWFa?uK!FWzOWrGJWJmV zJqi9pEL!(j^ln>r3%q?dpwi}wOlzsJpj_||mvcN518yC_qc+5VcwEm~?%jqt>#suo z=_ZWYpQKWl4TTb^HqB$Q;nTuc81qjD`mX8*MIR0MUIA@5u8oJ@- zH$Q|`3I&|*?#nkaO?;472OKQ+VOx=!KuVfv& zGoj|a7cR>x1|{Wq>rU5misFGVMtlS6KD6M*FeBbPZxq6au0n`4G4*PWfL{&yWd>X$ zet80VnlwTmnJK@__Y0n*{fKm2xU4X~kah{{q2efY#!UV|&-Wi8WOFtoeI3U}?$qN) zOiHK!eug#sjspK(+9)$W4sJgmVASLPSm1jDmi1u0O8x~{aK{D835&8Pj39?*Jvr>R zE$0SR>ku2^2+XJ!@#){&nEP8f%7Z7eeW6(x{rD#ay6;E%@41}!`WK*D^b9m^;hb`x zK6iF1^&{_C!fi=6LF>B|Bd*-Ro~HEg6Gi)(nMzqg(-yq3lQKry!_nKN2h`S?@+bW^ zL+Tb2OqjG9eTyGp(ebWa*d$L>-rcW?{Cpk*3l9Sx^MM|1q2P7!As%(Q4ISF#&7yl) zQt)5Qdh~ColIZbuXRa{K+MQs!lfTf zO5R5vuwFQCh#BNXbD-$=j+=hQNRTKzplzrICrpY(|6w8cZh$ubesc-N=|r)RB9ZW% za#vX&m$HCw`uujE^_Z)Df!SLZQkO4`lfKzWeOx2X|G;_BsV&CqK-{#_@F8x1}X1Ag@}*cbH>B-&z3KPyI2kRcuqOqhx3*d)0_NGjGWN|d+Q#K{FcrHq;)nIUjSjfxC zzZuYnsW19+Zp%l3rgjVSGam(8Tdty5ZxR$)dt;ca4@S*Bz%9-Ug~p~>*siC>rm{z9 ztU3t&7U=L5gH2Iy!Z=J@c^jkR??Ld|5^(U-;U`R~gsNWoSfix|_gzt{h}o3K|Ir0x zUiALDTEJe77YR;(jDR_ZuY%r}R`P)9LG86P(0kcHya`*J)+Ls*l;5!?<{5Br#JpsA z2f&mVi28n0CeM9>ORaSvY<5@hoHPmxPENqey(giDI<09rN$Bu*E;MYIh4Ntri1+Iu zEP=8tJ~5ztdrPKRqUId4MuXI!ifygku;ZYNSk2WKAi4-)JBm@WBNVk7Ou3{(%h-4? zeLnra56stf9(d6Z{dVB#8H}7$=n*# zu$ATEy3O%ut@j3}5zn(Z?;0%Yn1M-i{@|Rh#&JnKr*j#1Hi2J890NTa!QV(%=rQF! zar#H$z6&uh;^TGjpRfX34F_Ogls3BgHbO!U{NexXoD5 z`b!^g4=~|NUb(>-*I3LcJPWCn+p*^3WAyRuORUsCpyg*YntN}7va3_jaYZ0#4vRSp zF9`%~ptFOCIYhaf#-NqUz;SC4E)9zYke>(76>4I_)nqjht^c?$E z5?eG=4y(2w#QO7wd^Fw3WM5-I{O2t=L@(+2HrpZVaXqvg$i=FI`@xB3p3V`QxVPSB zv>PAG1{fIdl3qWsJ*^1R7QUlr#A=X6%9!Zd9#-Y^9BP+d#^UQgz^ft%Dpn7MsI#wF zN536tu{H<1R{bDv{B&7?lMXm7xDMOXHq%a~s}M6N62gW(#Ilv-uL!h7i95S)J6gVUgFW=DDMb!|U!ax(q>)_B9iQ`-EwgEpWX z@u#Y8b2jr98aTadU#unPdco3<B{gX!MgNiI5z;o$szAvnDI$_&@Eg8R>I zEW?FZ;yX6VB>Sddk{dZQoSix8@-QwWHV0f^`(h98r|{@OSKh60BXoVN&Cd$dqPa~S z+D(%{-_Wi?+s_}Io0A1p4WylqEA{OM7qBa-dVK2}>LY*r3`wJpQSLSmtrr#Gi8}_o za-lm|8GMI1BM#%o4jtbA+b&F>>x*U6`hvssOs*`e2Gk8Bp>3juTr`H*5%>(t_H>1U zSqb1g!H4c^#KOpJ;+z$~FmhHPoO#%lR~g5H^w?hJo^p;^_;^9X?>#8F(MaA2BTP-8 zJVSVc%3V!o>tzE$een_bv5$boi@m7WOuU-zQcOI35X&GRb=Xy?AJGE#23Jr+9xG3K zWG6kBq2G;2F4;33)1^(E$G;Jvc%jDh2XRa^ut+7pv4_iBx(EYT_vFHV9K@EL65f5j zl}s&*adEpe6Vk_BA~x6>BAwpgO0H;w{43=h>rD8TYoEb?(`?M_@*WmH)DxEI7zqu! z)DhYJnv=i1$JCyQsBX^UlnLD^2Vu-bUQB{jP32%YG94lfkD{jLGw1%e2UCA@hU0mr ze6NKj!uhchq5R`lIC`uYtPcCZWpNjwqmw!ZNxg6ZIj=@wrauoSb!(Y~BLb~}6 z7L7@0{pB)hdfbLGvtcN;KI7v1L5=ofLSWooV;-yb!28dE=x&xTlSB{0PYct)+50P~ z2hV{dSt08+;yY!*UqJlia~MZEgF|Iv%B}<|ql(_Mk{jfxEjfpavh?_({XbD|xSR#5 z&Ou%TEUwp!IdZ!SiAM`n2$u)FbOW*B9_Ao9il_iZ@f`yaLV?9MS>-$#&+_!m^g<>U=&12Z{->#t{=^-VWOKgNSo-*MdUC-gJF%u#M# zeFFTiXM(Rh1swl*j+$CKPLw`DwP!8O2ohd!+buOLYVb5>b+ndCAD_nj8+uaKqE3~0 zfO^X=?btH-C4TNI5&G*!qUEb33>y9ft@~3a+-VJNJ<;7e4PWto2Ktibvg#q-Ne>ZB)I#;usr{}%*pWvxBc!9 zuISE8dVhA}zLfJ2_3{Z0A8-&2$+ht{L0g#j+7c~_2atax7A9MZ__UEUYna(8(`vJJ z@w=6e^N#LCn}|jfdp_jU)-AF~pGfeGc!~j*`PlZm32hD^Ml=k>GQ$GaJear=Wi<1i z-jfABH^fqp?^vPK7F_K&qQl&^&@gE>D2FTrClgCfp^6C6<7OV!x3KQI_{{j3q zgc6q`20RvA0ZH;%NDj$@2{|V)ZeAg0e)I{H810}A)JK$vZiCb25Kc6IgG|1QSi-xX zL0KQ-SIpYM#7l}X{tH+LCKq{ z3J#5glCCw_d7Sc_`xkMhN@GFV_yQDVv$FlZj{?>6x5N$`fz7?@uv6;*7EPGN9CuR& z>XD_&&COK6F-yRI#a)<2_hRdkdJskPP}cu3Dx^}GJjs+bNPDAndJmQ{#tX&64rAE$ zyksIwo{)$&woHX(ZMz`HT-&-DdQrCzNY&gm6A3OzdWIgm){s>mgIt)%1 zM=>wovxpIa#Lt<9UqkeTbJH%OxTA@+Ub~A~p6g}Cq0hlpwxOJZq=EDqF3OcT z1^$8RC5(Q5+f?SdW_*IUg?bqy;eE$LVr#qtv1A=8*B+LYIZeT)-M=AcN()3-NqG6H zaZGCWfSo3Wtny$g=e~UlG2mla*t08GU2Y^utBql?O&E;EG_+f(29Fip(fNu$SCm@M z&9hI%ut$ej(Eb%zmhOT5t{4mFKHGvCHgEyc)fhIbhI&j7QPVA$ecz}pc>MVRPmbLK zf#$Kw$XJsDE@a=qbm=uI9m&w<9h+Jj& zE~5HcJSs;WRoM(~!k|%gXekMV32wLW@t!~sT?pVjWG-M9{Ssh-1nQhhIqyF+f$19X zRSoSh=PQD8q66fP)!|!bY4KrB&1nC+6{;pwVwB*GBi2489*PbxrhcZRPdIMvdJ~>> z*A<$(t_LM$Ei%u%fSs!|AkKvDy_Mf(ni2YJnf(q_)H|STK^pcuBH<;D#PL|Wnd578 z_(2j~UK4ne^OC$lJ$(mIj4h?v!#i-aI}Z(a8$o$iq)HMtaAjfNp=okCDAwN~FZ?s+ zyKo3BuG|mrg(vv@@mCn9>dL#1zn&GA_b>j87xM`}))5aj2Md1EuG#SzOIuBMrb8E5 z{2+3epSsRUH{3(1yO;}dQnM`GAF6zhpOn`fkLg1ypKg@4rvK2 z$$+>oJ3;g~n+tj%W6F>TC>pc`H0CQgCy@iTg@59t|L3*O`T)k$-gJ&^Z#ss^b~t+(7gP5HzvbgmA(qYA#;2bO)@ z2z@`A3g))djk4Yi&EJ25UHD;0`}r0-!zt(7h$@H8^RO~?FJwAX=XcyuQ0v8VR{x&n zrd|4mGvlt&&gL?P39(#!GdVuzQjW;&IdhL{;Kuy*5qCBgVYCtTJeQmD!^egowhzIy zMJX7!e-0?S7^svZl2o>_p`fzScso{u1cu zmP64WrC9K25y(4Ua85VvA+FSl`a0D8_qz$rTfRU^+<%z$`@3q|`T~?RoC0}b1n2vz z9EP7EFQar3yKe9bJ&X2p>*%xMctDSzH@+8G5)h(SCdv=RW)` zdOkme0n=)rEG7kWqeF3;?_)?BUju$~IhysU*!De?$z8Gnf07d<+2}B=>-Q73_W42C z-7FRr`aU~O>j>0;G~+KE(B^}_PGx>Q^2x(4hJclA5adZcp9CIn>_GU^UZ_ugVZWzP1S>&T`=RoU$!B`ZBu|6^2dC0%f_8%jlo& zVDOB*4tIlDua*Fe|9qZ&OgiA_Err@1J`veC5}F2h5sP0($9m?VA>49J|>`v zOk<~7l`z4!5u(D@Gl!AkTu?+CovrM+bRFUmUHy&TY$ZxmBHpQ^f~DO&1<}DpIA>lh zDi6)!4gu7(OD!=oBPKx%YE|gtg!JbyZgVV`X`}+_oqXo>_g~Cn?g?}|GXlqr?!v1x zEI9MOYaq+jhl@D?aoz@ z@i+>djJt6C*(EeD`J0Va=Rz9oHtf>Eh>1ab%WMtWZRrhG@#HIcp)a)bZ3BOuR(MXl zR`t0&m0fdhxZX>J?umQpoHK?^q@AQk(JfS)hsr|q4x-yz@>b;B#InQ0@BDrp6_?2w zbM8L4_51@1jya-yLJsRNumY3V>tUP!a3`5Bz_ z=Qcbi>yE0*2Xt3&;k2qmva-oVFsCpY9W-80ezgk1e%{0g6hNudm*L@6sF&(RUXoqd zR^^X-<{I#Y3i>VE^yaaJ@pS;@J& zlydT27deq;|6&kFeS3wEG06R7?1dwY|`9z%fjcD4BmC^|tl~sI=}F z3Sp)fQIyM+TBT7kpT2*>&KSB=4N{?G;5ZmQE)|t;N>x!l#@w8mUzjvrM)S_4_sp29w@i0+yTJq1WMdSpH5TL>JJxL@OA|U%tkm zUoW{PqwC;(b{EcHX25q0Ujypi`&HAzPeR1k!LTL6l+NO&Onv=b(Q}?nettNCm=72@rO1Y%%Us%N4 zT_7*IMRQj(!C^-c{){mYYMb-WDewyC7PF5SM`MX=a*HWTAK>JtZD8=@7O3~uX1l%C zhpiv7A*}reSPi+%YzJqeL)rs~v^$R#>jbcQ{1hQ-ESmQi$9)=l7n3ggL)57OvewsT z;H`cD0fSSaqu~RVO+AislS`bU-&nM`sm*WG*W;u1cal5(HR|R`1WVUUthxOQm#wP? zdGABma4HXb#QuhVtF(odvoAq?DUaou+-3nK)Ybla9!@S$;L9*=-r4^Pmlj?IeOJ7I zmM6qF(i_N)KiE}Rv7!!()DFa3AeKeOGYtH2kW(5^ws%7%*8OJTMdm%YoxD7PNWbFduldUoOSa<8%u-KQA!^FEeDg`l)a?&5rIC8swp9g}{N zYw=u@EGcywYpF8fZLWVn@&4}+Y`dMZBkRGSQAe=;3fOCrsjyM84;_-mgH2Wk_E>Jn zYsHDk4fq&aU5~O)wK_DnH{?fH+{b1Mcl`FtNEkgf7HXE4qg{F*Ft81ypKB0{^0=$gDlt%Lb=OhJ z?Gl(f%}o6B@_^(3;MZ`4lU{36S$a{IZ}4|GXK#%mszT5zHsXV_w{ZoTE?{|~3LWEC zp-<~gD46U43479Dg7hWz*{V_T)Dl`}8iQ46Hs(GHWr2&1a4M_gFcZ_!JtCWn8aUfI zGcOi;=4lD4u)CNAZMh6SRed3F>_IMlpe=|O zn`2y=K8$20{HDBn@M&W?)U4bB+bvt!yi$F>^8RIfK9@31S=BCe^7pLAMkB$!Yah-} zkqh&V=3$=kP26Wj3?aKh^i9@B3m+S>-S7a~X8dNjtN=5|e}%g6IOetw(SXiL!Q;AM zeHC$;T2f`@;rlV@nF|Zj+rUM^M%I5iWdf(@QD`(DI_m?`cSbq{JbQ}nKGU-!^R@Ut zsXLf?s2r>=wPL*aJ*;rn$2`ezthgOQy@NEFxluY6ZBt@!LkpA`Zvo{gXKubh10JQ` zQRksth|n7VWw9dEB+W#TWg6F7+6CcuAGG_S3mX0=YtO$5OMjRN?*18A_Q(Xz#_I}- z#`|3Drz)Ih_X6|Ae&ovT9pNS{zf3HJI*{M&jSz7Zi;uT~Lbrq4-e(P}bXzcQZ7GW> zJ%jRtuQ`R8J=f73LGRMtsE!%WtmTu@cH<=&bG;h353b`xU%tpj&#cD2mJ+_!?JZbc zb%dN1chFU}6$YA^2w(ka2hi`nOOAFW1~%-&Nq=kc89nZS=J8JIAzYI=yxf8A#JSRp zUc{>GX%8P!&b*C|bE40kD#g`mm96?JE**OeqGJD(H5?fO89%4P-D~=M+F;6Y(m7CC zSSOp)Ut4hWH^j6FZRng+!zEmP3#nah(foQoY6~X3f*Zr-Y`FpJXlK@*8HziH9L1zA z!&qOPBRKElGYs#4VF(v5HV za34CC6ALuq1hIeiVc#9cP^GR08{)m{CA8z>k|YRjoQQqHf?$Q^B}l*bi!&e0WB)!9 zp=z%N1`nY*_U6}+I(H5<1{w*<&;68+m%2bww{y&M8fA3O?q}|@=PKnzE0*-di`eGp zQ0K`Jbb6oy(c+aVr&q_YY={l|c4mY0%pB%vRRUw|-ePSGJ_73Q}q4@0gOL)n8P=v@^Hb7*c~cj7x|NUlPfuw6ZxembBbeK?-Z=VoGp0}b%=s?c3X1+t+^>GRylXt&?US!@Zs*6N zPZsq`%f#?&IWd33BRFyOOH}4uW9U7L7|UzGnyn6j&_D$nw7Flp{8EWOj0&MY$2Dl%sg$;9XSRfe^WG(TQPS;DA` zoI~G8PH)#}&>46gpNF+$@O}#rJzWCkFW++4j?=6z{Svv?PC(6vP0&e99`EfJP;S0~ zv+}1M{%bR&>kCM$9XZ#ri(pJsF#bJETku;K!nG!=!1Z5$bbsB3epAk3!Ol{U1|C*5 zx9SPKYJY)w=mMr(aacCfArsW$+c`~>gfF`N04E4Xzn2u()S0)y`xJ&3atdy(yV-v=dmKH; z1x5d(=-lIC%)dC^y-ar@+vXC+CCR0wn(uk2CF~%Qlq5nTwndUlY)K+1j6@R2h$NS! zn&&(zNeL-Q$%vMeB$AXQzw`UwYuTE4p6~aZ&*%O2oyWva(-#S{A%>Jeck( zgi=>;>U4f!;yv?OcrEP=r%py?hXYnEYe!ph62yzA!6x%OkX>A-sK}@Woq8QEYKI$4 z^d)Di#Ro7+et^&skItjS5OLoYt&A_g)> z2N+TWaobDKVl6|QV<1%jcM+A;rIWQNgq;NM1t zt@Hw}I89FN2`WOXc_ZtYe;Qm01q|&;1W;_$~&=o%8vdtd&E{&F828=%hF_+Ow7*mIQas*?nZZ(-Ee zI(BT02IqS11y-bGW7&WMP`*1B*VcW6lzAU9;M3Dw^>dnB+51Rn&f0~;t>sX4>lbGK zCE^N#Kd{jOjWBc@^(#`Y@~&>*nbK>USv}&L=c=LR@EMT!?_0ig#dYj|m1egqTChmx zGO;KgfO(cXCU$0l$sdo|(!J_jP5uvjI42W3>vXaHdKzS3?+0#S_hH(7UCv9T5oA~H zNh04WFs!%}I36-+9%TB5Grke>ur~1|T$n~V_hCCJ!=Oc8fP*+I zHUR^*qd;?kzL0zZHdIwpC%l}|uL$^}DbQ#7eb5W^W%l0hR;{peK2bEE06rSYx{%mC^bZ=MVe493r z`*;PZhrTF8MVm1$sQ{!GX0pte2SGfrj-_cvgXQ}n;AF3h!Fn^Xb!{bMmyoEtgrwq;S4ngt#a7jR!D=NRHOT0GO zqq5`*bd-y@a{Ap_&pOB}_T7Yux^E$TX&QR%OJMoOba1{*O)$=UfPt^$F{AzyiigkQ zTXYU%hHE7;)Oe5!L%>+G74=e#1#yZJCtSM(+SUfb;k{=t<82IITXg}0A3kH!aC5$t zKf-TFCtqx~p5%I06||48#nuNKdEdlp>M#$-2Ad<`Oue6?yiZ{Ja4-wM_YwTG)j8$8 z0g9=U>o90o9CZKQi}e~!*!U!lyu2g%&ZRmSCb;ru>(bCQ3;E!_hfw@Hm5-91ghv}R zgr4p*m|byQadsYYlB)+|>!*=8bfYmB;J8)M)US^aR~m|8BZ5IX;SprgOcz|2KyWEB z4TWR~SPQ(ts2PxcEF8m5yyY9V_)xcb2WAdjfF>(%G2g9$kngddm(J+mCD$%P=wDhG zBd2!=o!bI@m%_u@9k_-Go8m|RZTksyhQS}|{apWKwEjU*vH`@x&;1h%hoSpUyu zkQZ|jRRVq|{=!}SG=;nZl5%@->Sks+ViE*L+-2Em9?b1|2{xS6!f`dUQ{Q5yFl&j1 zAYvxw?E9%ONw~ohj~(N)bKO~hNaH83nc1Sn|uDOB%|9C2_ z%@)EazbKHps3Cqv@cq*lWK%^<{y_rS!*sB?l6GZf`k*w|RkWVGNOPV^yzALq++o^+ zwX6f2H{6A#n^gqW%O6;kYZJJowPVAft9*osgm%?;n2AR&+HOC9iEUT;C_%v&<0Vkt z^@3lOUWn})A7SM-9qw#9Wu>ZQ_MVxAs7Sg7rNUNhxo602d)kJjatm-h6ozi!WH>V3 zSg8H{3LS4vLg}P%Rz4*WqOCvR??w7T;=lVq)nTjMv`so(%<*=R6MwPC_;;{-zk=BM z?GQfwIXLRy;+wy|1M9PQP#h^kUkf8B^gDya+Q-3Q<0f?VUJ0e`RlGE2Ig6a9!tKm8 z=E9@maMXNqiWa=bRnvJ;E}sV?-QSUgiMVl_w1jrWVH`Xm3#C_PGuyfZ=pLl=|CtZ? z?hS@Ss~TSK)m4n7|0nQV3QlSVaPC=R5P$s+#`C&R`-8FYx>p5y()(hECNccSKZgAi zTTxUW#X|0d;KV8gK`J}h7`+X*++!b&|T0B%V+=S6z^@U>Vizp5o$lN8fQ9ONz;{Q9Q^f<~t-p}T( zOw+)4T{h-yd4q|efhaRhu=eqMSn!>p4Ol;^o8& z%3sJQrGJXF*|#4)z@n1IVQ*KJ*Q~TpU57hX$nR&j$#Du()FHx zAwEEeB0g{!ye=SS&64?wAlXff*s=(6X8Xf913m6-o;K(FDH$X_l*4^XZo!qq(0AZy zbl%|sSv%jukd=Bu)WJ_^Vqb@8m3P1?Qv>CxBED2-Hsst_qH?`E3rRFW)O14WMQcg_ z*c7y`tYKnn4c_t0FW!Z?3+CSEpm%i^NbA zh_Y=@*|zCFSnuNqkezm6f!FdeEqnnsY4nmCb_Pg2t|&?yQ~Aoe5}MySfhlF}TNN(U zExwD}-u;B4Qy(xhY6{A9Xn+6JP}t4B(+*`caWAJRu8(;`?CK&(rg1)TeSV_FREBYX zoCn+7bSTcJd&+=WSnxcZB`%;eTDHTZdb80!3JKzH(ut~x<;Ty7Ye!A0osc?CfO@4~!LL*eBq zJx+P&vgF!S$#D5!VHLC{^bIC6O&@-lA+Ll(LszR(n0da9`V^9rVKdJGYiy=h2O<3h))fOMsk z#82lqxk`2ToFRS^`TiGhzEqVnLrK3L zu#HHB?0w{$?>88dM~46|n1BJNzp%}o#=?X}(cpNH{7%g;nA0~^!Eg9Y6d8ZQ!wxCf zx@bQ;+!+RIHtPstM-%&1+SEnp{0cXZ8**?)59H@)SHH6d3{G@mc4mJCyeb37{nxQ_ znFqbE+p#r_SSDBYK(`?|vo4;3CbK3q*y)P!R+kIe?}BE>t8l}TJXkSSM^Hc3LOaA4 z=uH{qp8J!a%F0NvRS(BDpHh&2PUQ1X(tFy-gef5^ zb3pNC!3F5luIA+}v2fzZ1{|JS3z`Y*q36vSC^-5M3s!u>=Ihrnd*w9@tDlTkeLLt3 zSEJAyuPLN&uO=?VRg}hl;j60mf;r7Dtus^5$&vCHN3KDHNeNWuNkN zg=))3K52eExVq*s{|7TMIB_c2dGF*!5&?$|*5RUUk3mXj37B8_&U%&^B3kZ);IEmy zh}NsDFg-!Wg|L=~*)%T}GtWO>LdcCs*f~c_ zaMVyjOr$2NF0(?}pM~t#17gd+=?{r1dokwwXDoa23T>kgVefr8X$(d2dSs3lCD=1{`7<@4g zZ06G$O>-IfCSozgT3ZNZ%4n7`@Z z_k1_zl<)WO3hoPr(9CgzHr?gM9kR=Q*N@fpmSEMLOK9+r?zL9$=-&|w>iIPL_*RZ( z8!w=Lkt;}dy^vH6IKhWZy^9T|y5Jm}3ftakaYY|SlaHtevxkO4sZJMKbKwv;btGyI zJpq2HwUFt(1}x<(z-z&67GOX(CBVJvkF^?9-$@$rtn^=3$v}AqEbKL)FWjk{(w_>_7D~0Np8cqI;pc z1}wdVgX8~$3C&?ByKzHea(XOWEg_fH z-9*f6iU;{T8x~hnjg|iCtn`-(e|F*s3}4cRi=)|wY*7mP^tYbi>?jBEk1)QtWiRL@ z(&x08mt=z>ZfQX-FGj%AP%(n zX9UKCsB?qs55tb#B@q9ESltep_I0YKaQEvE;Iu0Q8V*iJhwsLq^!H?z8`p#CNdf2f z??SHykI>HCi>c)QhCRwkk?4a!K^qu!`)0&6i79ES7#+4v$wBvjBG@^CaUhw>Sjd(H1Y)4`W1b%43 z@=u2_`y=hQA4alxm#A6Tn*z zo&z?~t04FWaeYLY;4=xwk$ibNj03xzhnELt-F1(p3_XNuL)7Z*0;gpLmtA zP#13~6#4zaR*QD@dcK-XE{Vk`oyYL$wTj@nrVRXjUP7ssH?Of>h09(W1AVXQ2x-*g z+%d#JkcBrY(gGF`52P8~|I~x5dp#f>b{E2|AF-zNCK!G?8t0BB2Z^Xh@vx5AU|(|} z&@2U{WBPE;F>a8w-%x1oyB^$w$|3906TG)fLueZG3hFgWh*z3OsI+UBGyg^Y6?P^L^^BW!=!0IW_K;x#qkU3%@xVKFOk5ihQZSXy;J7df_ z>R5xcubo8n?+tiea~50kbUFNV6=hfLnCQNM`71>rTNzGW{JzBC3uUVBUrB}@(dEqd zYj94ct5E#WgNX(OpoxVNefuZyUK@6Ufmbu=xHiJ5BZX+Y`FE@|dCpf+Kdj&0w-llCa$@2_)x zY2Pz-WFp;-$D!!NbEf%g2$&ry!!ie&>)(C^NvYK7-Zc>lCf{a>f)wgkt-!;-@5jV= zZBCP%z0SHbP`a8avb82*{4L@E?UOJ~y7P&gQknIpC_dez6kSeappBn1c5kf&QAq_n zurw4ZA3kB*F0P{PT_l9~_u*`fcwW~13B5dCvNktj)J|^3S(}ToMQbm5Kh8rpXvT|g z{zK(|rMZ*!Q&4ZvH| z<{?zxT4}3{9S^=g8~OH=>&R^}i4{LABcBH`-SrmpfvG;&Kj$QUkNqI>^<6NZ`JQ!K zK7igoGSH;HhS{C&0oj*+D4ul}T#pvBs0Z)B(q}a8#xl$--i>yA2uzRFN3 zJWn2nQLiZnqts;W8-8HvuOpZ&>H(j^)#&S!1ZxMjK}O7Vtomy|#w4eKiOUJtovbT} z18M)$QG>487ttVuINS$%S<%G3(6#FosJ@)aMm*^wSoL`c;pw+Qb|V#>&u&A@cbCzm zY8o$AJ1TLK+$FzrIz;z7iS_@wp*YqO3!dGE2K_KpcD93!s|&VS{)CIi$-jKin)lQ= z0^;qN;9q%Ixy=;{8;FHBb0t`BX+f_6ZO}Wf9^+4xfnDfF-YU2lw-KAo zz(fTMZl#lVMnJQBwcuZ)!0ecGaFyLnPhF>4GVq zdV-B`4oud(;ysClt23~V;5V@lrTb-2H0(U~ERTYrrT^!ml=J@M!|?DLIkw_5bbPrD z0xvJ7=Wr&PpC6A2D>A|LRRj98H)5}`y3o_53EK{Q#`Y2iIC8HQgSJ(HTMpg71JlSw zc?ot8I17#@S6S$v`}mCGM_IMYP_ziIg|t2O5Hh3*we|XNGZyM_X%XR=c${)o>wn`T zqJy#Zn?DK16GY!5h~IJolzy7Lxhmxe>Fm|ApcCu6?qcoBhY+cAA48J*Vtu|7PWY}N z^bU+e(+)S7wT9Rcx9h@E3bCMXTuSW8|)l!hpoR8+s z?^uM)2TGe#!S?k`$QXHr{jR1jxH3&HMAnB(YySz>8_)A^{%uFoE9#u{Gc#;8E1@jR zQ%Rmm1}eiTdo{*HVjgviDYTE%Tq=;fon8=g*qicuU5J~P!0c6OoF((YfY`hC(+|JJ zVBskX`C~sO4|0IUPbaB&W*{_7BHn1?PY8%M;k{N?pi$02oT^ieRf}Rk8uAx^CQpld z)TGVD4;_N_(e}6_zl0bn;nZRJj48WYaP&Jh!E9>pr@xTEe7d zDx9ip6Q5^8c|cVUw0qx1F6(G}RjHEq+>wBx4+r8IQxO;KOF1T*ZLW|cLrD2YERq`I z;9tbEjrt3Bdvu~)#T4R;ui@<0msk<;0h(`0Fg+~?W$WH3Y;()eeCa+cYQ2Y9p@rx} zIr!31S@603IGViez_a)NVMBIYH}DwPH7BZ=Q>m zL4TvQem}t8sTfiwfl*)XW5$Xw2xv^f<_p9bPE^CryGNLf&vQ&HCO2y7JG4Eu7h4mz zl83qmhY}Z1a~lU)MJ1?S{1XfQ>0n9m&6xf70Q+Qk0w-MC09MvxntC9X_HKZRC-1Q{ zR>6F-8p#p=nAM(afy8fGTvnVM%jN2V0cG}0dQ({5>}%LO>Ms=M8nN`+Pnb0@6ih`q zKp-J}J+F^YdHE#x8yjQgua(fU{}x^=Nrn7b=TUyqi+LR=VFgClSx1GIkX7&lwPQok z?deani*05-=YOH-(JYK~4@ITMOo`Q`0<3BM2SaQwLf5h%(B$3=nGSzrQUCE6f;2dliauzv zuNXS+MPqu;Ww3j^6Z1dpM@dm91|&w}Bkw*Oo2kXMyLLm>@`vd8>>gVF))$?PGC}Mz zLXsA>8@oTYg4Y`fOFfOa(IE;=o<#DR{br)cxVKO*c?70KCsBO7M?uRS9Qem_urck1G<=h=q3oub)CNpZbj@ z_Bg|*O-4e<<xF`e9ekJ3c1W7JpLS~!n0)aq7$-f0`kPxZB`XCMk1^zWCjEyYH+3@p?YyL z28Wvg20w+wL&R(x?#9IbnqyoX?We{tRx_ssBSWbxvQJIWUVi{KL{YZHG>^C4O7qfa zC-8MRiw$pOOtD!B(MLa{^LffIKX)TupEG(B!$(@~&TEgj3=tbgp>l@@zu-?TVPlG_ zAllso*8Ajq=-EhKHY`=LZO0b$J+6fTkA8#6H@dLp!ZpahE#nPrH)7!}${+>z@=9+` zBCo&4RBxB_abu5QlG;razYbu{>jE+Cd^?&@XXc$|A8x`+15UPRwZb;_F$)xQ1nYaR zLFMd7MN-{kurjH~31)_ZXhs-c>7WW72kv9yYE4XZehj^lk3ssehE?h0gVG}!r{iHX zFBpmE&GZFxVpO#{EaO{iDU13^7fb$ag-WA*EazjPc)(RGSd+)1`rqMglOka9`L9qs zJsAoYlwp9^c8rh?19Qm$JWD=#v)ifA-p2=(ZB2@RLH~lR;SQ_OTn|B)E~C`P86B6C zU(^3GsCPVuvQlfDdi*#jdn@^Xp)+`~Q#EtwyN6#jECQ7y$ssGBi^DT+gY(u0U=qBZ z_cJ(%*L%X8>Hxit|%Q`uz}uwqAmq zO!6$IKOE1KHl0sc(H?e(RF*oUbXsw;goa0YHWWXLMtgoiK)+yhD%(+; zOPT$z*hmgX1IZmoTqr=M@(ebRKcpkaP>|MzNrL`sgwOYMxot~dqwJs8y!G10OjbUS zrMZpA>~D`H8KbWjxofLJD?!x4SC|J6MT9jisOf%NI1NHYk*1Jw zuO9{#c7w(4YBX4tL>`}R6lZiQX3Z<7o=TtG*3@|7j*O#d_*ICM9;Bc1HSj*1fuq*G zK(1tPJnbMB~#)$@-(yP`3$yC(=q+@6WGxA5V-WCeM5yk-M_P$)66|6 zeRQ4${MiSLPu##QkJW`ro_g;vgw5T10ZQja^Oiejfr{NBR&%!nk~dNw#mAGK{?JFr zoNP_3YD=`up2vzyD0lu}HU?hLL)FkLJZ}6>xh_N8NfQpg=mfOB1(5fc$FlV0P&#%E zCgvVxtp@W@8A<2kJ9GJ>j0tFRse^f0Yyp=@%CJ;CWvyCz!o1WzocHx=P+55r&-z8+ zQiYljG43^HY@P%r-+qJS&)=cOrx7y7BtqGC+QY`p1^>hAvCFEC_bNPxtKRM-cX|V= zEII;HV;@6W`~_@i9R5k;4@ZQk{vcumc7W51c^Sp~mj`?VJ zzmv}i+s$Xn4Y=KB>d?z(88cb)0RnzZVg>zs;H>&ca9n8xUdsys8Z_XGALZyhGC-Ep zt}tJA1(X|>^KxZB)_OCWmo~&HRLh_74PVOfn)^XehUue0XC^dU)`Ix&;~`F}#^G0* z`wuc!SPrU2?{$yCy115)ubBbE!(W1}dJf-_y$AiDt%5g0_hHPf6%bPWCuaTei?X!8 z6vUc*hA?yRZ&Sry;C8{sp{#i-&@sE0Fi33tDG-@X2O2=yl3S zDBRTq;wy$I+uSQD*q+W*FE+6SG3uONWdtMs zwY>%7n<7EgW2eOSSu~rwg?^4>)3N{bGAz`2j8PWbAn8vXA!NH3PVlBI!N^A5^Ljb% zNb13Yv^ms291Bw}>kARY;K<&Rpvb&(38ef$+>qY^0b@V#uFk)KX_hJ~UoB=+b)q0F z_al1E-GP>Ujks>-+aR8@4ot=mg}U!A;K-9!EFYpRn6}LXr>bIbkIM(kI&ESs(%I)| z3JY#cVdar~p!01tbCURDV!vv>qWTATPfCN-arA!L)J>VJDqcQiDXW^42n)KCsmqiJ z5cv?6omJsHy{b`su0QWQ?leke$D#Bd`4=Y{Ql2J+)+v*tqOqeZl*+r#&bBZl{%f1p40rAwI~pP=#)gSi5}IsGx1yRQTTZ%=Tv zdk#@2)}XVE8-y>cfSmQ4(R*te4A1xm^JsSA<>1UBlOChe_kg4hy37;MkGS}?QjTPCWFYGx&k*qLy&}i#>`cP z^!(ll?S}^v3w1UIN(N!1@)7z^*h+Vz-%<2OUru>x04P^BDazl{EO_h>h3G>z3@+`$ z4KG{4wXguPSKgFZ{Jt2()zr0=jR2R-Y~owhvMJY$xe(7==-(TRUVm6&ie(s1FUx>1 zl_qS_q+a;7GZ1H=0rmEN7<_UhmNn*ryMs1YIlq%PsM~}!2mMfzd>LOaxro-6F5zKv zx>;7!p6l~HfU>dZ__c#+Ylp+mle9D5myRJBEnu#3jqgZoL*zo2-(7hDGROW-{Fh89AA1n8>)lvDL@(rz3PWox zHSoDshLuBB;;iZ%a9DT(o!9Jx%q=vd`LqQ5d+Wg@WHv8uInTRIB({s;N}Rln9QU7s zu=CADzO{QhtEwO`^b`(5M}_0mOq#76qddY;O-`xWq{uj>-~((Pkb9y;BJ2K1d+Mvu zu(ALeD@#CS+)-vQxaI#l6+UmnOX#}T#mi!X_>60ZAX?=%>^67~gNcb^cd3aS6Qf{( z9&zCZw}9Om>Hw@S!@+;%Vb8-VY~F3m+5C8i2Bq{ocoL13M+>22aTux}QDWzB7r?h{ z6~vy_5k#ZkL801roO}5uE?B0=Rn8s{Wtuc|pY@qn|6T_MuNFa%iW0KjQ(4hQJwbKt zWl3ex7hd^zENmWTzzzSEjb8Q#@xcuh;WQcxQLBGo8O@Vay&fs@JBH%698Dp+>QDPx zbsZt_zzgu4(FC(@(YrD6Dr6>}1?8~G*fAvxv#;v$1>>ri@49Mc$tZg{t}iCA``G!9 zGp-#^`>wN7B*7vcEKC3@qDw(M%b2~%iG+c^YC?bEHtg8-2*p60~qJp0JCSR2~%UQLV&JkF1#5J?nM!}jPg)!xhEhcrxF7S{$eSw z>e2K^CUzWt2vvJ;Lm9UaTRlQp>P90@Hs!wL8fAh4CK(9DB68hSlr#MDCq~8Y2HCT3 z3YQ`EDC+AECR;g_};adENP*gpAhY`Ug|h#ylx zc4`L$qt_T>NB6wprp%z97?msKic+luyfiH?S0uTJ;dAeU>=!v{Zks47tS`f_-?X?8 ztrlX=1Yu8039$<;Sd!y@TsGB^%US+j(XgnTRqLig>ZL4lSly8X2V7##(|w^e!5*`_ z=R?RMRg~Y!z|0a=%$W6ziE_jEa5G}Z57>uqoGziB_c1IB)k4jX5on@J<4vsA(eLFt zCc4#P{BsQqUNHwp4kC~DpaR}l%%jPuI6l575=GSEDD;$p$*C&V{}Qt>G`}lbqBpJ#PF?i!dYm*X;x_T8C-OlG3D%2v=dZ>FiAUa zV(Y-djy-|6V&Y}RkAmoTJQl?L4N-lHSTJ5=D(+^i9)0O~PW!#{G5o@0Rjw-KFyzo# zd{nwBw`C1whbO<4=>1B^IDZj0(6b(;@9s-Pu}h$bx_3R3snbrg;f4{u)rZ+*_;&1x+q(p*HxBeh;k$dgKrZAdi9?7Ih6nnHSneP15092UQ{7st4t! zLlWQZFIbgP5&kz=pDQTMfaIhK5S_`z#5`@-Hk5X_gZuFYKgvP6)E^x-hqKn8bY8w` z4IeO9o%Z5~_!Y|!LF=CfSQ2sUvh|A<<{RzNre6efs69gG;qTz6Bk^#4OF+|uSvc7T zL17;bGU-Lo(>V`G6Jx=A?__kHupH{u*MY&d!D!moly=s!d{M7IYJQx9z7sZM`o>F8 zpS>I!UcP7X6X&7pV?RiIISVVpk|5PdLkRsb3c@ZgVg)~zV*8RWP}Hi1;w{_QS07^3 z75s+o_R~Rnw+C7ywy@$qiqO}vhw@L2V11$&MBj$u+;vUZQ?dwRmR^N3frgy(?M?W( zCK5{fte~Ew9xCQu!OuGT>6u!NvW{LwOlL3HOe)9pxNE~k4(E?(^4OFP<6s2cZ` zwfp^!o60Yvo8f7EM4gz{vAZD5E|7hx(G+C=E#l*!kHbZ~H8|^EGT!NM7Y3@+^K#=i z{rMI?Dm$e`2Zq9e#2|I86Ro3Z{=npe@ah(tT*gyH$~+(zeA&xoE!7C@sx)iXEE#^ z$lfhiI4#`{5o;+EZ$(S9b#jcF-^M#`tK*BTW&)0#NPQSB^e&cR>4@iSRpeLbUL6O0 z52_2k*$2=dGzEQ+eW%XbZHRXlabqiWxh~ZvV$d%^(V*wNNAniUj!VrAo~(%*z4u^d zzpGe2dmjXxx?*R&xd|%$l5jZQL2r!~5KpeOpXz%LXZ=PQ^mPsxWU&t|Ta%$|Kmr8b za)4POG$$Nsu8{qI_R=zyNAW|D7?!~Bwp4J}odcmCEYKz?9EuLtVb6(qD8Jgu58u%N zyZaG`X6hHr2uO!b4L87(_+c^U4Ke%7F-611ZP2cCf}}SOaGHil&^GKVB<7uA4ZG<) zZ|%sepH=Z;$uIdtr9O0T`+z;h8=!K^5X}EuN64B)Zs)QOOr(FCsTigzG>E0DzV#&f zdXgh<*gHvQa|~~}V;oGMd=g6KFOYT*eAq{U$r7eP^CMyq`W%1&y5sCxpvS>b0jEpf z!uXvUg3NLUNH%B+i7%I+$|0FVc4#3hRf*@PzmKG>$umsMPhtrVZ=&r{>MxBy!>Y7D zq2~UZSm3Z5>W1kE(l6I@T}_Pzo2q}%xqS^r_8dkXQx%~>y&5*|I)|@%iEHWR!EEVn zW4_`Seb#O(j?dTT8bp-W2_A_#JKqpP=OUjy@shpr%Mo-;rc7sps&HcNN^G4&-h<1g zqhn!P4g&p+hm{{15# z{(!QlXLKNY{AF}4u)(gMA#{dL2XnQ2%1_P!)fd*Rg&n}MLDXUMKZmNTRxsJfb9k>u zQ?T?cC52L7A-(oFSl=BDmfau0KZ@Svy*p^%mH|1mcaPzoq3TDPZ>1PZ;*wuu=SEe| z@}?Pv%w7+Px&xtg>05GnyYT}UH&k`G0r^w+@^iiz&|Ud7j&^aDKw%1rMN79EGjZ;_;QIzL2x3 zlWC4K!W0?Zo7dfC8Kp1T>tIWemxVw`^KfXW>V>jtOVQ-NYrNB>d@TBS04y$SB^Gfb zpYb)6FAG%pW6rRsSC*m{X6DalBdyFn=9)}9rQoPyG6AM5*;vuL! z-w%!*3!wANeU_Ad9%FXrf@K!%yJl{|(!mD2y3Pe~9q5Yzx^!MKi-o|&GePdU1)@e= z2NTnQ>~I(He%=#P@A-FyK}85gt5k!l%Mg0+=R?>&f97`WGf3xsqx;)_aQ8LD!B49J zGPV%oI|@AS97XG|VU(Y#J@GF*D>=Z6kN~(Gt6an^lnV z_ASi5V$4|#^2gfAMIc%|6zzTmq2|(WARCZP9*JmF&>rabHO5^1oGz4%SD^JpA834Z z35=tz6Z=L5WY(UNPZ=Uk^}mG@sVGsQdgeP{I6}m^4x#f-*Enqd!Nbz~MqKHKa_Y^! zkYwiS34eHN3o&cg!;Mjzoa5+p#6IWIF1iII`h_^z?Ec z%p8497NY0H{TMWxLO&j5v%cMc#~wypGJA@v zZY4w1qZ+WjaTAk2@-QjGNQn473`d=h!CNoYxJ`o!(A72_GAABK>8?IP!M!Zr<3S@D zTs?-L6^Sr&NMFG=c?pkCe}e6eXgg}OLcM9Vs!z>rJ+U0 zB&a+T4DzCX`M}OIAez)4#C_WMYVShXo^ZaHSa)7?y_o+kb98^G4~cgW ztLiUxXeoXNozE|@u!&jxrky3^8;w@@D1$`7dS=+|&o-vaZ1u6Y?tryg^T|lc;drgSHllymv-A z_L?PJwf{?3=4Imc2D zq#$g+1J0}OV8ikVUe-rK8Sz<6#h2!{e*5`ltBeFa?@llexJf+hLcZa_Cq8j`8DEr0 z??rtLA*1ak1nBwUEMnLtU;hjiH(fx*eJ>_i$iey5e2h4B4svd6fXl>_ZJApKX;W;7 z;ndFBFGpbIh%$KauP*nn?Fpu}+Clb^8+>c~L?%~h2h)K%4^0929aV5G z%0}s(V!m}MahmXJz==Er;JzwH3!ct+FtY&);wy9XQgen;VFA1=pd zBbq$+28WR`eEns2Xbsn7id8a@fsUeNQX9xu*0ahf$xNNyfo!+Akd;`28OQX59pnGQ zVB_ZyacUGabk%^r_!oAc4~5RfFPL(Vm1Jtob+Bmo1YTJ-X#J#(nd`HMx4241xu)lgz3J$_+}!x8N5zF@20(=uc0Y~ZG8blEi}>P zS1yb_q0hD7YCyZB5H?;xu0rnvXi0aIcKg4vPLc^0|BQ#21u@`yG7Mr4SwPfycSxJ> zi#|u*fcZ!@aCoYQ($Z}EW*CE=Zq>{zobC$EE%c80j>!{)Q0zNXVsYpxyjgi2{<%u$ zf%o&E`^_^Pdsc@ti|hi;He=4xQV!Od&-thp3ACDx;meD31h3mVtiY=kI%k)#fEOck z#m%m)qB9>dHsyotw{poaM$D1{^v=Rd=)U_9IL-STyZ3wrrKQ5&b2Jq~o+fOlw?7*_s-I^4H%oet62tm|)@SR2 ze+9zdf5MN;h>z6o0pEJY2zB0ygdcB>grGLsW4iu@UXmy%Ii*7^J^`iE%t88Jhh0Qu zD(;?|LHk(^!BMjmD{uXTwCR-Zxs!#7svn`vQVzJhC~i?@A2V zqD47~O!U9{95TFaV~)*ta@jrrWw1N0AkJdW&mLy}?-fjF>ceGkc`k`xUkJ1OF5vuW zT0+G%+U-ob$Sl4r1C`8Eih$MHsCg?KBmJI3vC{(_KEEAuiZ(O-4+er|&Kd|f+Q$1m zKZ4S?8zpYvFA*EN5@wp}2<@{&C{H7Y468OY=HFxMXe-v~cMzqA4x>+QE94C7wr{3P zcUb!rv<~?Lk6qE`_MO({0#uX|&nOK+8c3gqds|@5xIUa-KpiA*sK(Io`yhE5d7}q4 zpwSKD;HHujEXaVf=(`bR%lAl}ZZ$!mh8&X)XJfod7>b%tf|^uKNWY(hQ*AzBRK-&C zaa@RPMj4oPZYPEn-$H|iW~j}h9$WBIa6emx0ka=V44kW|V?2yCKi-QIzkMc$cNba& zNkFn74s9OVLS+T9;k25eBeNZEP!}$wH4G{ezF?1z9X$heAz14M?E67X>MkiXh_^wS z#xZmwA4CN0?!5MH<4p(cr4BNkJKx>N^*vF`rdb&a!%Z(^_xK9Po}D7`EPV~F3!>3) zuM9g4CAh8D3;a(#L$9o5@ak_}PMoNXt)~H9uOH$|r%q(q8;N1PA`dlf&!ECH1ty24 zqTX~}PWq*WRa!K#N;6&Py0;UvYyJTBoCdfXZ^TXhi$|-@7|M=!Qm60_C^YE-)iJX{ zzI_K@+NckmJNB_=xj7o8pG7n0R5VNBG0i~}0)JhHBKldGIPYeYQ!b#@&@6PYj%Q~b zEkO2W2qe(+P2~2F*Q7hP!52H2+RH(>_Q z3e1KK)hsl*dw}|42{5?hA(-^>;R7Ow_4IxQvw$85TjYRe-|t8NgC^kkC&QfHuYCAx z4Iw)G9Y_O{`Q*bqROhO5rSUF2OVSri{DZNyzZC>{Ud8z%G`Tljz`xuDcMl`8`xVTu7|?I7z{iUKm*P4I_vN*)d2% zShP@&^V+lyo1gv;@^L32Y}*3nwWgh!ukps<7bW<&Ss$)6<~Nq%b&&0LO2?!_ub}12 z59s|Gj@c7r5Na3!S^vC&>7M(^{n(7&OFu$udk>uzegpNa92};q%T+X%p=Q!s6x->s zR=*Sc^gWHV^Vz~nkKa?6lw9TmgGF5Ow7HO%p9N9T5v=sUOt4+J2zRu#;Nv$mS8E2W z(#iqd2qUicbppIvXe@Y?stc$48w;Qo1x5eq2)2`_K<#KH%xcJl#202T^bBR%U4G^| z?V5%~V=T}|L3!eh^Cu)QrWUTwlt_{41acabV*}O+0-*OLCmk4O$dJuaH z5>cwy1ZC3~f->izM3m!;{SOMzketuNzW<}>%;RESzc}8$Op6ic|re=l1z-|zFB^ZC5r zPo3M{^C0XmVu+1t2V-cX=M;5{?r%U_at0=ZTVd-W6LEG=EuqqlxKln`K(X?)5{(8#Lp#z(fsAvF`^>A=e~?`+p8aRhLv|J8=N@YAwhMShz-l#k_MUy>%PHE?VQK(FTHQ`$?!%7lMahcj?P(feZUQoW}@%EbS}Dm2U1rU!MU0W zh)UB2k89s}$(J{{dF>2Lo>&IK%@WqQ{t;{aNbfZLS+M!nRMdcIWn}+L=)c(>*7p<9 zQg#N_#2@au=>eP@o{gqYXt&~SCAngYs0~av$

FMAn^+qRc9A&o(!9In~0BQnG3dM z@!87XmErp!9UZ8Q+psC<%;7hVZ&jrPb@L3;Xc&C_@E3*?2qGL6=SgLE^a;e z7o^OQafx{}^NFL~(kAYBd9aqCd;2k@lo5OE{TB!_Z-)MN_e0EG@}myR$L3RuAbn&Q zSB%sW8#Pz*m)V`r{df^34ekq-fjL+?lxB%*mV-rF8uZcA5Os9}pvlWnTr*S&SwEJb z)sK(xhna~ecdcZmw+?~i=vV&bZYObmyb`R!7D2|JPLO}rP}IE9p?tWN9M7xYWAcXO zl=1#fdzMMg@IqJU=p03J+$`)pj#KA08X3bf^6+Qk&$+ZrM zC_D}xSNbq>y<%K16Jek-3{suvlY_C!DQRm6*lx+odO0N?Y}V8x=$yq9o6Uu0-+V08 zc?52M>x#J#`-4Wjgr{Ww=IzfKaJ?!Y%sZ8!qiD_lc2z;bKe<>{Mj0YW3>J6P0;Pf> z-i;V;+bf(cHFSgsz7Fj=D0^z<0N&{;FiyUKe>*flYt3U=*kLU8^EMJ31CQat&>ZMk z@)CX>P~g@RjcDJ8To~3fG2qNKkiXc0v9oVb59J)UxHE+({W=czlMeBjI%Y!p#wsXj zjYZjm-)xBgDJ(8rhtD39CseCDv!1a4GWzO5<=P<_G-w?vHvgqG38C|v|7|o-le?!g zox2adz`DADnEdD-c55~jnuac$LHTO5?fm{_|_%)5MTTQ2UhQc((}}Xl-IE69+?o* z;mZ9-|HK=A5|8EEHkNSk9Jd}ZkurGo&P`oS#gv@8%w?RY9lqN#y z8%;4`;Z5S0W-6iQMELPsBFglrhrCTg=@l)4x8_6KY!Jg&ejq0FCoPC|mm`i`3R!2( zgfgvsv?}x`#(OAM-gL$m_gt_qy2_rkufpLybHMijb%NJd;P^Y&@Vbo&xvzF*%Qi{D zVb}uDJhGJ)P`@Ve)NTBwRe>$FKjFqBL&2><7b3l1V|q$0xD^*!kR5pomN?QP^7 zeZ{QDEd?7lPug?jve-CtTvqf6TtCf)5A^G7GtXs9Hs#a%em|>yts{7Ja>SVLvGBo4 z34Yq$v1;TMOdpZNTd!#f^0E{}pY^a_{Sb<2KINhHA9LNh21C+hkYHL1+THeos?ZU` z0_LLT^)_X~+E`q8^b{(F+-Et)M=`+5PzYEO19iq4VzulrWIP#%hYAkijy~BKknt13 zQVKzLksGYIdl5sLGWhxIU!ZQj!fKyVPkx9DS6aTo)WCNjpKFP~0&5}g!dqV2A`y0s zJqMP*y2FnNnqqt6Zj>1$u`vPd#KG`Ew{zRj&2%G-dS8zIunisGdxF=r!Qgi21Wr+z z31dnwqt~)z7Nq?r##CR!-am-hmc57^eCxr!koGsPzoS#^H!fRf$r|I$S(t1BswO<( zHdz6fvTZJVxg!x4Pqakaq4BK1+gwQ0D#psUGthVVKuFZ&SXJ}^o{^7fjh~*N%KOTu z?o>n1Vm;~v*5Zj>T7rN0WC*kU2&q;YVsr0GXfEr(q(Kod>oIXzZ!}56D^5aXZ86ww zp$zwh1g>sf3=X3+LH77Q_V&+#wg=imNKq3@kXK>H<`VcLg0kIiN@ml49Xej044Na# zS$l>8Y&xASguJ~s%uzo zZYJ*TLac&B`q^~r3Nm(_GE3*sW}gb$s=x6_gX`q9QE-1LWj~MfQm)x)CdMVM#L{Wn z$aSenjT52Be(#$kApVj#90 zIR-nFD)_R4_PNj8xJFVEmb4wls{JA8@@6+`ES6`dX>}HSCRVZ-(>m}wTnHUG13(^L zgfnc(wfl1fP$!Yv>@Mz;@&K1-<(6;mG%5w|1ij# zH4I$?U-1wFSIpYJ)493y6z<452snr4uiMvy*Yf9({;w0bzFJEDMsvQmz8T8Sx5J^| zrkCUIrLHi zLYCTNap-<*{nrrE=e$Q*l^s*{Jc8+0${<~b^6qbB5WK|>tLo(#bg2rKJt6=1jaue6 z|2rgJDu?K8HMm2wvk<@Q5vEL<%zqLubEuh?pn9TWb&1#58=X#~{F9clbV36JCDDv0 z{TGjXcn2N3S-|{n+2C`clB>Jaai<=Op|eU`yq-x}oSR01!a-kJ=i!XQpFJRM^;+`n zIiYu42@dH)d8K>L`P?fDP`&4}b3~InX3%qX;oEAE^<3**Ff^O;Pc3LMI3FWA=fGU) z2Z+%v1j`^Bl&u*Ey*ea<A#=c=YvocWQi>rE#NJi2f59*#3XCP zvb+w+RsI2zk2|oxKCyl95!j#6gY<)Au`z5rcVBxP?YBQ?DHAfenGv~r;TWnD%80#_ z$9x8l1TT|4yixiU$4%4{8n`{`cCN%a|155O^&F%R&qdQ=W1$Q2jOuPDaVYnMzFZ;% zy7^*l6Lr;W-*NfT3p||qj@F@Ru$32LYM3D``~DL$EDvKy2Dv}YYG7gKU?@BM8|+WE zVXG>b*^kMgdF5LCu&)N6bk-BYuV#U|N-B-rV1)A6BWUatkCI7QY}lPJ{1HVNpJm^e zhK2-xxM>LXRtLD|;`jJ$iG~ny-V)1*C(yqA7OJOefGyn=K8!aPgRc$ZT_>L)ug?MS z@3aGV{V)-{7%^VkesIU*r}4whLU0*uB<9^3P0XX4&beJ9cq+YnDGI} z-};0JNz`fa^@4=R9y1uFMN5rD>T|4hQ^b1JR+bRy!^(KpW+eoIDZW_u7>e? z10|@`iPS;<2&?0C#A4s~%;!ul95R*&x94dKJ*IaO>QuuZMCrgZN)CXGSWMQj$Dm## z7i+G&Vb@267`t4boQwNm^VOcXWAZL+9c&EJq^oF8xzlx9c7we+Wud#DAy)W*D7p0j z=Gu1_GS=9Dc)?se-KPi@dU-tP$VW)ue*v56Tw-%N6HU7@^kFC2*4R$~&Zn@5Iv34# zGE8{>9RmjFiVwaT3Kg9@i)xo~N(;kDEcogkRu{LQ1)4mEz9Ez)J#&fp4jVx|@1Qh2 zU&+Uq??D6Wj~ILU4*c4C1pZ0W6hbAD0COgxdyiP?Lc80_QHFv~mqE;@>rECxGu1o0 z=$`T@5Z@M?3(6}rpIc%Lrh7Fp?4lp0*S9deZ~8*R(ZL{JRLdFyb|TK01%4afg5@I} zA!P4lP`x_I{mp{FC$b5`f_7lk^={bK*$-tWuR2!`L(us52)xNR7s`6P!i=mP&;$14 zZaqyw=ChLpl7~J0PbJ!3%SX#f4-9{P8SNh*g1YY$(R9Q!dbTu4Ws4tkv*a_-yU$^i zyuQO7tzxltH+c+OdZBGoW)Rp$bSNu@3{bp? z=8CR;q~3Muu+XTL=JdT8uIY{ASAPU|i&AJ^oeDl%{Fz1ZJ}^G|2s*WB3+g48m<|1X z3bn}1)O3M+bg7XR^&@YWPAQM>b{&#$q=Hx0J021|k@?)LK;PcTKJ5;tJ3s1x2d$ph6xsrglXK4wxtL`g9-l~{FZN|?HEC3smmGoS1c2?aTkB&n{ z$Q^0{i#2pWvQL*>Tes0;a*-08W>5zu2;+h%v-cE1v!f6kb&2;mpf@>bM^NT(B+5&a z@TBQ8@r<*faF-Fb*3f+V92c*)M7^(_1PhfTi)|T&ioz?|Z}Z8g78Qdf>aY0UG;{G{Yb3N+JpsX@ zvk;d12<2rrc#7;9OVm9K@-?2>U&s?4a3U8gf+Ha5QBU}DPdltw5CVZAA9?)Uo9Gxd zmp6{|#k?6;S)`%_p*$X22UuhE-=;#xy;zKJy-B{qV_*~SgB2@2qKDf%Zg_t1fb>8GLfpIC^@)f0Xd8j3ZiM?&cr${!x|#lJgfuG-0k$6K^e=fzZL z?LP%I*6iZy7XzViuo=h)o`X>xpD}Ds1ukofg~iJ>q0m`VxcQW_X%=dw?Bph{ovy$h zcHa@t8;Q-$w2yhFX10SGDf>1Zd^XNu>Yk^$tzQQ#yYn6tr$(@w*U3F{vpdEIW}rL_ zSnr?NFnjNPh}Yi@@$}5I)mGrnMVf-~`3!vKYbr=jw1Y)t3%O;r&^KKR$1X7zH>Meg zo8Qu0?02(t2>CZW`;5UDSvxLC08qlDP^ zB{q;p_Y)U;a@kz-MAN0GpsKI{*89~G-&)EYJKVS*{oF5wnu~5rN^x`dCs@2u3;NDF zgFa_Xh3bDlqyMXK5LNgJ8|Q>!^O7-Oo6r+AjA^cAS|gqBFNZ|o1N`2oEdWnDFYG&f5T@_hLp6gpg1v;&Hm#en9{jHrftL<#DDSmb-KG_GLtn~NWB2V?>{x?SXPpOGNiA>OdkpJ>v(YPQHrPiD zM&FfwFziD(b@Kr}oH&T_HWFc~VFNLUM}XQ(Q^-s|gvARux0`<%G%t-qjr+#p@XOc1 z_r_tSPPnbSy^y$Jf9jw@aZ_o*%lM}!DYScSg4hWMuwHTxs%N0^H{#+QN3n5mHU)gLOD0`mT>$CaBYw>NMkRok1uW86&`e`q(%ZCQ)Y&Qhjr zdlplD9fIY(4F&m+SgFO(!%!$Y2L8XQaLL5^FiKvCK@oq$X4Au5Qg{i9Lpw13QZ8(@ zKa6QxC=cI*-lg6;7>{65Mb7rUZNGZs{f;>n?6A{6gE#Vn3KWKI*7W9qMW zxFe<#6+(p6l|N@)w@HKy6Jj4_$a&E2JvcX;*vljDg8%aAIHvsz&Dgeaw`mnHahsO7 zb~F9DehiN}Y-SR01Vez7#V3~g&W;d09%i1Kazfw{@ua&Z8oSsHKXKYDcbJ4q!>Ru261>a6j!L2Y0)~U-IRx++wS7;L?c1|6SyKe zIlE3!GF3<}_|NYSdIiRU*{Wk$ynHO=&0hzNC7Jy2ahh4oP2s6q|DgTBb#_~2EF{b| zM5q~ySx*C`j-FY#rjW9zG{;mq6ALF~8dr^KW>NRjF=ztylajlGN6`!>8+e7ugzFGH zYaZxFJ_YH$T4IoC@-}j9yPww**C~_XqDvO0vWpnDT8`_V6@g?!1Mw}!;F_cs@(=B# zyoZQh;}amt;woAV3PQQ{I%Tz7O{|4)&SrNsL>2LHmmFRQ4c8*+d|S?!Wf2c?ZWl1H z)Dzs&|3&$6nz^Z?xWRBkQRCb}XsM!Jv;Jn#oU{(2mM7r_vkWQTb#bkM69(8L+ve>px~dwxbR5`*rd{3wPq_$jfsYu zEvvwGrk>#1@*VoqjLUS1CzyY$MVCFaQ!RRjvcHx{<7-nEB-iXhnDzjL?Wus zMmrVv4rBTQ?n6$FsTlZ#SZ7l_xqbO!VwL?yk8$6ng_8?V<~5ur7{;S@Tr&$RJ&y55 zU!l215oK(3M5jXA9Y`%%lzqwjxf=JqmE>1l)N|tbh@&Ja3r;SSHTL z&3BGMUek9R;uVRd69d7ix}7DMeBidjXJUjko$)R_f`PlJ`#Px^8;=EY&o3v*7gfd6 zcg=$*v3i1HR;AL+Egph|Nm$ptj(5DA1RkW5uI^#NOx*^Nmq1T!SC`{J{Q_crc%$d=G&sJglQ4U9 zGuoQFNZYgWrEgpFkgQUD<7wWx3>q#-}+OhXarb4 zy}&cp8-wcdC1Nj+$o4g*zQE$sSsDX&!>GAeFlYd|4VL`{DW@}e!_xnV#gfYWBm80O z5?ukRj-lHCKd}B)#IW-rC|ubYV^5|*#$xITw*Nxd;|}Q^A9%v~dhpsn4i3Y2yx8b3 z?r?H21ka3QaV|?Rwcj;RUbqOJ*Gy33k`>*M?JZJqC5(it3UwrR>;VsYOdQ zAGA4SYw`(hkc5&zpA^ ztmqWu^7aV&QeXG1jfCDB8ED&Pg$j#!CfW87#?Z4qE6QA)lV~8w4!=># z)@;Mg4pIoA^RSJc8^%7r3}Mbe=&|`X=5{j>bW=XVm^%2=Q`>x>kEI|)kDW& zLy)=GG5tMHs5`kEVk@m6>IK7edcXI-5Ct_8iRaw73gg@tWBQ7ZtimM&HT1fpM#vv* zxX}mjNwHw-8}C3_U^N=|EryD^j}Wl&9Oll7p`7AZ?pn7V{q3xv*xr-9$)`Cw%^(zz z$yfK%77M+}Zz?>7`4$G^+_y$T3pH2L{T^`3i6g-A)I^qCy9mmfKZE-R1^)W|0$h&| zhdae#=$LOPro1?g=@v8Kr>`f;1?<9(q{mlNZ9Ft|!zwp2mpd`CK!wmh}*GVBv;LaJ&-+wySzVV_Oqi z99YA>7Mg;3mM8bEKFVSPJRxI6*|~^oCI-S1Vz%@w)Dh&xJEZAuSLnP( z?AaHgn0qSp24p@)N+ZZ7P>9U#~Q_N(W!hRn$A019#qJY3T~M>cRjLYz1ee)iT$yniWUJkoJl{+-D+`4#>$JqJ zh8NIh81etK%3+&Pdw^<-H_VSQ5qr#e zOy~2Ds91D``Z6sLKIb50w!DV~m%fmG^a>V)gpwmMfmJj-!@s|kV~@8V;SljN<6GKb zt(%b;Q}!I~e9EEabtz`$H^KP*!LVjmJr;lH%0e0CR4)(3(E2zCQPr{?C+IV_Y&7q9 z(-#YD-$0`J7mRt@Mwvnz=f=KYVD`WY$g=%{Wq#!_{K7{Z@9+cjGFzx~5eOENMzo%I ziKm=vXU56-V4r^%HHg16X6;o>=}mJyiz_^6;Cb-)l0&@M7@oFCSMZA72y)9u(jhdr zPpLNKA4*O@dyn_1c$X{1k@GM^nu0qgn2U$vPC?K@@&M1LGaT`iU{*c)JbDkh)ra8I zuss-j`2gC#|IP|i?qWorH13do7Nd9Hhh;vuQ2z5geh#PvM`0{B_b$hPAI;FM#z>SB z>sn)1cWJZtYUsPW6RI{X#K3`Vxa5EuG!K`8?avCR-ESc3*5!k1$uCUZe-ca`K2tBQ zGc44(ff}9lm5O%iY-`0!!@W;H@a#~KIkihYR!1@Wm^xy`jl?BpuF(JSSJd3v&TLPo z@QmH&LfzVZth9R>L=0=@5ivKR_*NDVxVsPQ9Ye^?d{DY%Pd!F&6H&Hv8=t;HOOU@* zI=gPnziXLgH%h>iZIM`@AG>vJ%Xd℞q9a9;a;jccw z(D%Yl^r#pE*6)eWJogLOnwaor|ekE3kaJTgxbWI z?$LqJsI>(CSgIqaqDOXzP+!1mNoQ@NJNxB+-Jlj@Ol*wxv#sSW#a*qENS4A z2LA?|p~Ngc`4JNQAF#YhQ_$8igK;}8@kzUeP&lgztJ1EaZP-|)-|(BnWHzBk|cNb8!~s+A=k+VsnBXny&AJUMg(}AQnhom5N z$t~QDse=xnev5)!t~;=JT>!Uu7KV-eZ-UFs1H_`Hdxri;rSX>IsQ5lpn$@&e8NK!= z*6wn5aohV!bxY`~k8iIqxO$lZB7{HOxwZvPK+AK zL0zzySJ0xdk{AlNl?k&BQ3ob~%^pWt%r7qJcT0i=%kE*&&?X%6>>_w+$rOvg{C?sZvebz~=;T}-+1d$llXxV8YL*YRy*Cn1Kiqh9iR%v)}TQw0SCSvtYi zr41nQ{|i*#;+Xq@Qevw)aN7}Pq8rUpCiK-86*gnJ&%j*n{=-oCKVRP`qaXK)$%ci> zGxVOFin4)=nZx&~n0ovb$P2D1ZT&s?wolrE;(#i9v7$2=gwm|c`7rcee--8n#0hY*OrUIXAIRB|jP5@wF(Dxq*KLbLhk-PQ zNxzPz#I0@I=0UEae|SJ8%|-b%Zu^@!geE=FI3o=F4;{n2w>20P)D~yIYHlia^jnJlGy0+L<4^n?F^OuDo{~#C8J9H>Q)Fl-QD#-o{=QL6owqbl z47tnVXVB;NPH(Q-xrcc^S`3SuiJcS>$rH=-v1(W*c${vf*~%-WtY2TAQCA3E=RE-b z?K9B+$|_8Ha~!Xp_=*nQ7eVor-SBovC!sW14{dui!;bT&LZx*AsHfH8-dFWdytj>c zALU?qW&)c2U5^eXyC*Imk@RB#_1Cun_p4hoa0b5*nl9iQ~#iT$#0b7?A< z?EK2?ckY0iJ=-u;dKZ$mox_Y^5kpovf+cYSEE<~l)NXGu{o!>sd*VI%{KsH&S{W)L zJF}v04Vd-Shdms227=xOpf{g~A+5uh^}Z5j;cNLu2sVu5kR&>oqx;tce{~ z0J3|jSsv{ZvE3*iJ%3GujP#pW^8E-#@BNJP!w!P&@(&FDD8QG#l-V~q#FVbLQQ~l& zrx?w_9y`m?cuo$6(0yF~@(9}Wc}za62h1VP3OyVnrQs3LAfFt^qn0Lsbr$hd9;PZi zEp$OXWh2+7XN$b&2uv`l2AA&#_-W^SEZx)xk|eYnfBFx9EBgqAf!)w9&>I_HKSS${ z)l9cL-6JdR@foKm(-@#3*o3Ww8ZVmTf0NLDGYIUq7Et#g72VQ*LjAQ6jM(i54Qr9u z;5YDB^g(hmjz!5O;#IvHf)f3=7?6^QrS73Pk{byrt-9dZMmdM8`NStpR3>yj$jfNQ z<2hy-uCbtf+DQZPjbUfO+W!qsok$$7yQ|q*V{@U_FAc17X&>=U2WvW0-h7{eHyHj) z^VxS$|K33G)nA6bSI=>;qA+wQ&mcBZy3(TN2(cpHVBV2(kY#OQJ$k&rVu#**)a8e0 z-To(xFeaYhk}15ZkTUI0{G_47>tOgFKXHfWNeuCw%-`xihb1!`L7vL6*0TxqGcRDe zO>d}biwC=_FEG^SBShq^$I>-RuxRlsm@WK9ul0+FEA*81_RG;aXb7gy+Dy)gWZ1e{ zS2T@2fwApjS4uFoc@2V9CB)?Lp}PulV8v~Sma7~kDkOX@EO7OPd5=u z{Z^pA4`n!RJ;7h&w8WgTpTTv(M}~um8{oAOrF%a@;+EIwd$I=A)!(I733XuoNd@is zr?73$7L2X7#Kh^wV%G{|(Z^ykH`}wU>dvt z?DFPg)PrbrY$;=2i_T+JNj`*mTfyS7dNAtoH*kGV9Byeg2KC6ql6|Mhn>`*f#;n4; z<#V9jihR}P#K@JMf#5O=%w2Sh*T-E%IJyed|DAyw*@mLL>us#Nx(Gaejb)`@zr&ET z!C+Cnl4}zaWB#iv=&qu3Je-(s$w! zW*t2ZDsP8@tNRUD(^Ddol8wgW(d_iA50E%&8!^2om zZY>8-??rHD`(=ooI|J4)tb!p28;Iv|n>&6l=gIENVYbm1@O8d~sipoA%j!@mq-NIDs3}&vNU| z$5=dk4=e6Fj>+5)Ioq{K!7E0^&8Bt|v;O_e0~dYA!t3wBp4d>(*8^+l&K>_a9MqpJ zxn$)d{?_Lg8st&O?&B$_tkw|!C^Z*d0;3=#R?6x^kMR{BlCkp!iKsckOtOr6E(>oD!Pv4?jDK?zMqB6$=-MBb`Rt=SlOYc}V1Pp@N961Lo_A^MBwGJH0*h_@ zQ11JbOAM+ZHcbV}yZKPrD+w&xzcUxVqtO5MA#76wVE@Lm zJJTUx$RqIVHXa)vQ+_6`J9C+Onnx|z4k`QvUQ{>YhkM6pmQ;oQ#1x4;s3S!EMbDY~ zXiR^a!hGg7vDEKny%Cc-cL&GibEwzVS#0^29LRUSD1)Awh#qIt(cVG< zi$f2VzNdS0Jx{4fLfMID&~ac9Cfry7FCQEP*=)+A z1%BghlDiP*x&rXTS8z2;XOf6})KNC%>f=7>`13w5oJpP3YCW;)dpHiPBo9m3PMonn z8mzzc#(}@~;JFN4yhDPLFr1 z`Px@>9*l8ha|e^}>3p6t?)(>6b8IiNx#YXHKLxho>C%R^?dUai6;9p$0tPO=K#amx z)==~j3Z|rhhy4}2I;sWbUEXrV*}eSGM@=F9JLehSW`Ym8L*)?>JzYmr9=Hwz*S%wl zDce(G`GYv_P0pJ=lCgPG4IHP8VsL5;XPHTe*Da9TwstW8J6&m2%}ScD1Y~|6v*WpI-rWgKCI(-7c*a$#;9p70$Un zL5uwYGX^6eCN2!4qC2q8;vqQpsp6?8b09)z7cVoU&d(Hg)!`(BslS2d zvM+dbdmXlpL-K$nu-cmIXdTrbGqf+jjOzWE{B8mqS{p~*f(WLLJjZ>;PGuK6bcG}} z5eIeZ@q-ur+coL$^U{=QE?o*Tt!U-NfD6PZzU187G!F7hjl|*!N4RY5OO(F64+ZQF zEXjAm#o`G_i8~A9ZPQ?grkUWs>N3bWPv;61Q^wACh?2X(ES55Lj^38!9UX_or>C*N za9>aym?%}N^|?fI7WwXXqCK&BedlMg-4o4(`8`jYqv;Jezt$UXh^CwBqTf zqFKb+wUF}8lhq}8()YwvP+uW;)s02mapfHDK9l;V^9m^wKLg!+r$J*Su?W5lA`ii1 zUb>z5J+ypONDnF=#PB|_JC)a_Z7h?%+v(er>W?Spn=;+Fz^ zkf|rA$BE8czwZTWn{doC=gcwv0d%!*hWo@bZhgO>+uo+xQEm$5>E=Pydt$L_e_>&# zDZ}GD883c1g2kWT0u4e@WuV{}4`yQB^%VZ`r-pD@Fc(zy2Y5;GaVVAPV&R{u)Cahk z)qLs!L>&7CAys7%^6nl+o=Cy!-c3+3`4qNJj^Q)^pu5M5J<^1~?qc0S;>Bh?hc&}@ zgX%xpk;HjI=h99>a6I+u-&R_<9)f3dBz<|&bn2Dq5p2$!|#YuF3V|0-M@|i^ezPd zxH8N#`^sk@O#~Lm^y1P*BD4X&&%!TGk+gbj8$?lzJc~H6l|~TB}7c$iB*S= zBNeMaqhyfOAvzgP8?<8aWI78S2B*Yc=TMzKo4vF;49)dZAvn+&OUBEgvELvTHG3UM za?gWrs|vrZ(iPO>2eYmEI-+gBQdYNFTXY@r1{-_NB0jtgPd`BKCJh^wc~?{L>AMQO z(zc*#S1)Kfq%97ytHj13Iv6q`3+D_p6!qViVe9-<%F?F+Ui^)?aRA!ytzZ%r{W(U) zg2(8g5U@86t-m7kJ-rE4@7^)FMY(e6c0D0v_88v4##5Gc3z%u>i$0tF;i@Tb$?c@V z+>GOt^9Y05sN0Y*W(@k>9EioB%QUYKmR9XNh))*zK+V}L;B$I0M7G~RPxWeOp3ns6 z(!-`XwkQCa>Dvg{3bfzIB125T5e@Rql_N|4MB<-JFNTHH*32;2zBPB))*XA9OvE z2A*Fug=a^!1??lD=<)p}Mkp3yZOaYNICl!FSB8P#1UuScD6#fP1_VbNvYN3Xbj*DU zHj~o9_2O-IB`+9~*DgmljXxovXtcYc6s`EKUlB@w4CB|h;TM@;TiM!BXVO55?Tp&;Tb%6`n`8j<>gCn7jb@KCYleUOh`Ww{Mv4T#U~@-Uxh^c8*<1Ge9XBfbP;@z z9e{pk&Bdrs>BJ|duJpC5pqbi=)hAAZJM9%qXIy|{zg2XOv1CCufVHciLG{6pkW|w^q(B)!DPE6C@%Lj=#ux;Si z#3#^qOh(r~nt60u2`DC~Wmiwx4_;GWL-FN$Y)Q=r+qXaXGIayUKUVPCI=X|oIYMu@ zXOQ|$Up!A)Uk9&x*kMTh#Zwc|M{*QB1#s*uGzZ6 zvVE7a>LGm&eO@UYpUi^c%@?7#Aeo2!>CT4Ow$t2po>Ve44P;jN+@Z-xh+9v2uYCoO zb)ii;)m;IO#GADHLsy8r@&Q%JrReMNo!gDNkM{T$TxU{GXR(p6{(d3i%fX-s{fpO` zmGdVHUxClSRPNDpEweuG6b(`|#iCGSq4jDX^fdB=l-w@ddeJMqzg1sw{Lh7XW$Y#A zbvHoGcNlcq4VzczL#TZOx@erj$lzkSvy>~3N2NO{k&g=Qz|?F~HA_5;wu6^y^PVBvo+ zLD91(i@VSP79DGt$FzsM_5M*TJ)(~`PljPfzAK5Mk6;Qhd5k^saYxr&@W@`rY+K{G zPtb1eO>Byk!egLreD9n-;02`8Zl$1r{O8Yl-0&Kl28(C+K?~0>Yyx$WUpQ)r@+1Y{ zL0*YG$!nV9mT^mMSB(63A1GdjQ1iC4sC)VuLNa;vobNhYW@!r=F{fCM^Dm%5B7^n# z6TynknZ1yC_)o`}ZEO?iuX*VJc|mdZNca zANk@7W}+`?jHv}L)P%=1Iz_E88^#_D`?v{ zm>2JL$1yI?v3IA7m;gs{VAp*(>@oQ(I!wjP&;RGK=HrWn~22ReR6*M!oe0;v_ENb%TEM^Wh&LA#WoZ=bl2Bq33y_Z7!;n za>_|9!!PzGf@4o@+*z(A`d~RvG4sbIZ%WWDXgUTiFc!Tkx8aE!#6SdJ>|AXos(v3} zw#~IHo%X>V6~C3fuM076$tRSBY*4nHY2wDW8?nCVIJB!(uBeg7X)*=l{}Rd)oJlm}wwlSk-U=Kv{M%~0IqZ;*U>%{{VgSj=y8akI@S-q=e^ z@G7h4vg2irl1w9EM=EiVD$?oBWg=doteM}Jy&yMeaGtvA8#MIt1B>ucT)O!)XdE`B zuFE0NT-V?fs)z-z@!jY<-3#)cnu(wF^C2RLv#P6?Ax5o0N2beLT}zn#%u76~=U5zn zAq?y-yTN(dYr4cova&C^5ViI-l)SqO1CQPSd6A7YPv;jdoP8bDwVzqoz%iIQa5buy zD*3r12O;C_GQ8q+3>BJb&i=$I{H2qL!-G1&?zRv0SP!xPo=HTD`Li&qq**!kPjk`Y z%6J$yaUV8J))u_=tU?=lnZ9WvonLJYm_T*zlfIF#K)hx8-~9j6kmrX_8*xCLF1$#gc*n$ z|JZU*o0Ax1vJuh?Gq}ap>0m+6Jhkm2nn!$q!i8n%zA*-e$5Hp?bTAgW)uKbra?tdV zVxq}&l=okNAzl;F_Z4-zJ}c4j-*7gmqY*ZzO~$aSUC=!54fftiUGw;XeCvfP;L^5> zoeus4vZ0IN(4k#We{wI{+L%KQWrZUHb%X-1`%p_cf>Hmz1l5pKsC6RF;Z|MLxYb)~ z9rKd;E|y@+2J#_KzX9$&Z$aOjIGjs6*y1;vS&)qKqEGiI8|{WLxmP6{mG%|3M(riv z^xtrHx4!7OmYi~#Ux`g|7v>**h?e8VqVL@|T;6sWlA85J-I0aR<+4Of9{+&6-#cN1 zHhr(89W3I7Hx}-A3we9DfP8>5+il7s(2X>QmOMJ+x;&LS6$LVP%VKc4SB;KKMzNda zrlP$Nj$VIeu)%GbVyXW|T$-dMrdaCnm5uK~;!JL)1^KK>9*KRw>j>(inQU0WUf9@f zD5#$pLs;ue>YFo=8}DXgAQ(N@YT{eh4mh`s_^_ehl&AZ}qfPZB)HHtVyjA}hv}Qi# zT|=7DZv6}BwntxpEV`?Odt>G}J)uXEnP}0k3-ftF&!VAEam{Jsr@og1E;xxFe;5lz zw{^vmpj|jlW+wQKA?DYxt9*=44v4?CMfoga78YENw)Y?K#%Cva3clh#-NT7To6Xcy z>y!=DK$O4z!mfNzhUCjmm{WHirFABPX-_NgT6mQ=o|?cW{L&D7rw>HS!&0=Hc#)n7 zrlRJwW|ZrxU@kptgSL6W>_@S%y^`L|NfqF5oN~^S4Z)_5fU#dSFe+A)!twyi(tjO^Wb)RIM_~o$0TRy8CIVMrFECl%5W{-+oCHf5>P4G7711^#9tm( z0p=6h(7M}qaM?~7`BdW5+K=Qa^JeaU;wjXdikMz%W7JTXrZL=P2*CDXe0?aBTR>s##EP0m!zE4kqL;5s$ zb=_2qxK_+ZucsO6yk6u~h>-g1))pEwM(_}`K&BBNAx#p4r6R~VV0_q2c6DF>+|2S*7rD$OxF{# z?!U-ZZCeBlezCYDs0cFxRT$>BlsE!Lq9S+%tJu;8G7C5EbbAeNsQ3nJL$rkSbO-WI zPC=hTW$g5cd`v0w2amO(==t&o*8HW0=Jh)07?T1$sH@m|b0!p@(Z=)7UZDJyiB#_M zku`rDji2_NfT(kJ;6G>x_MY$&GUvr%Z45ng7TJKU?|tcl;dG}MWg=>y`+?p;r4Top za*2OWMDt#`f5S5 z@i%-wb@~-{2Uw}83{tXgGmC+4aOS6hsGeBsG$PGdm>peDe9L86{1r7tFc!%^0SZ&e}J@PMssoha%Us!|{Q*VOYx+vSvNkgdDrruOeB9;y+#EW-t<7Ud! z*ZS0=)9{^4@$sIs&(oJ|U4*`P@y`|v_K#uy>k^^(VHajP<_d29P({w{q0pE%7@WGk z#9#hbVEuwxO!;#GYo0$1)XVO0q`Q~x0&nQJK%KTwjuAUrP+=E|HTMvcmd0R;-(TEs z)gLG`uR)3ZJMek`0kqj2JYUg(o)SHf-KFei>RFz3_Y1EH0G!?S6CH@>CC@H9)JapCN1HCuaX83=mSWu7ZH@ae# zi;1ZD=@E5Js9P|#4Ex{MgKGaArM_zpbW|M2fZ9aRta>Hwv5h)r-9ITEPenssej5Cr zv-M!LsX%vAG37vSwD{N+y&sQ*-Ua5O*NVr?VCj46fv3X2A2(1lM3g#BXhIk@9g`gQ zV2Afq2${2pg(R;8kHM2s*0BK`D$GG~R3*)znNGyVa;DxhjC(fykD@aVi+O$jc+;YN zAx@-&FgQt;q?+gcM5mKw5J~uw2xAFj=@3p*$dY6k$&yI2Bw5ln&;3bBmZXqm87YY* zk))9N-M>Hl;ksNdmu5bn=eh6q`}JCY>nKbAoBq9ab$6j~?H-=fsSaDV+F;%7N6>J^ zmB*BXqov<-kZ&tg$Nav8k80CVbBVfI4rkG3=}1gEd!E;hc?bG8l5mpmb1bFVbj1{6 zbhh~8$*aafnsPY$RV>8x+b>X(J3ia%@KA_MPbK$86Z5qpANm$2*Z`fxYH0zOhjkWX zPxr&|-M?YnvQ+NbJp|hOBaYd;2ORF%q48{U!6)k!3;9NU}zGggb&M=I7xR2?NJBArn#IsQ)QwNP^S4+PVqv|eHq_$Hx%@|6j8$!P~#B&&F zg!Qj?W9Ct7^^l9kLhpgv!oz<$38{Kx;L+F15Iqnm!*h`4nvqO$_8K%yiDTBIaxp30 z8<&!|WZadT*j7@3b9(3rseRU9Zc`j@x-kIb7gT`CXB`f-un_!aWPs1-cg#=b2@4Z% zK+^_i$SZdRl_Ub)x7|UzJ6Ax_eIJIN%)!tWeZgf=5YCETg_+78%xBQw;C7C4S!penm5TX*a;gvo8t>Pr}=UJVD;^mE`jY8dTm7U8UX;w)~r zrA+GHGpSYX{=^bydXXJ1JZJXE=J6=3!G>3ZyOssCz4tS8WWc zUUg?llRVhYrG#VLx~mYKOZ}jp128T;i_Y3A+%ZyDFzc;{E%cmoX|INcx@5qfUD2xA3jA)y zqWzV&Mp+u-OUR^mYL#+ z?Kd)j$9S1u#}q{&NYe^>^yH(tQjh8FV4p9kl2 zk3so1ic8H4h@V~!ZfC#ZPP4A@~X;Sx(|k zUi!k&GJPSd3*}_3&Omk79k|j~SDb$(2_BwOVfe?1SnM<#Ljq~OU_)FR1AxTA#3I=1 z56!kUXhSoCf}uX3R1APP+e%QLxyf=9^r3Cq9;i0`2<7h#&?kWArU5T7t>2%}uqBLD zXkO#|#gVAIuL4`E2jH;L16v0DjlOSmg^G`qvz{msjP!mGuU!pmSP{H7GZdve`h(?! zLN4EC&Ags2LcPzOg;OWAfzus1qGfuDyNoN_vksa-$eeqG|Ie9Z&@9>(IZUa?@^_bOU`djO?J zDJwDD8wc&}z=HmkII|b+>ChQMujInr7*heeoyIF^yAU@`hBYU?!eKje!NIeZX_@r| zyHb1XGpIXxcL>1?_kHre=-oX9CY`?wbJ`7rgr;h&I{+B* zWE zY<=^CN&O?4=Mx!dG@-K0m5$6)?=Uws+ z!NW1})mgl;gE$5YH-f|3dX!rmpm*JKFzVri$|GmgL}!GC9+by6Xl3$cUC3kc6+K^? ziDA2pd1YrQ?RIC$6qWsg5X1{n)MGm9bg_)TyQ(GiO~w?I@)A=-5Qz*3{? zfOZzLX1gCCadCmjW3-QWF&le%zl0Yl$DmbF2L<Fl*cgCcpAA|J`p`F`K$lO?o0sA>YQnUHE8kIl|zvxb90m7Tl)(5bYl8 zCUpfLb5Bq`uU1EhEuc7Pgmq)*pSqdm|Fog1dv6Fgn#$xE^)j!~G}o}Jhu}Zx&kS3K zX;0~{VM=Ec*A7{?o^7!6;C1xh`juQ@c`*N(Ekx`5h1<$@Vc-ROh$laAwZ~iZ_q;*8 zoOboQHAVt9j>05vBBFL0D4&(Hw$5GA`N;$@`CWv^%nKm5d=S<(J;5c7+i~Xg$uM*L zQAqt(fEurX*+-VPiLh^8ccduf;1_p@nQzh(BlRBfn4vpCE2b0X zwfbXb#$B2JSQDXQ^*b;!^@Gff{dn7w9GXc7^M*l2%<@GuR=W^a+Mjy4yEcG)7jYZy z%tfi3x_<04M9$g=Hl|+Gu_?jCZ5a?OSfGFLEf}a>j8fmuT`R_3&ZcmqT7Aty5lHIcjd@q4;rTflu-ee5iCt4{FD;%)e-stD>8 zDv+$o=Uvwrixu7p;G239l@Zm%7dZl_7wy-)+4|t)f#>hegG3?S` zENQlc$Bw4HO6Lo_+E>a z9&RF-9J~)`as!`D{EAjjtuZs{v@9vdhbg|7Fc0-th+Fv>Lpxswx$6?ly_?AN4|Ep7 z|N6q>+bh9u?{ijjel0v)Mn3CB^vo!}4C$X~Rx{=^pzCi`eVxHV?`I=!e~4)xTCuR` z2+O+snfvvxWCg$Ca45N}Qjg3;r=BW&)IiMNgSnLZqI>S?7*N=#5Oe3F#&8AElTT)21$?ew4Jw1xTff_N==WqSi?jPwb#9Uat~Hh;Qt-`3rb-y^OJ)98t3; z7FFNp%JN+i%4QgfRuf&ZYUePBhYwgTW@1*@JnolO#(dUu-o2+1wiO+P1(~K|)z%I8 zXz>-8Iruo9Aa~IL&12j@Edp(Ry~Gz?K46=<8st!mHrfZ6>hemO-3?+pW5aRjLUQKq z>4#>RiHXI#!O=%w40jz04nyhZzMJm&!*VgXG#guNFMzM*8_2ZWj)m4QS>Q!$OkZp+ zG#I^M!Jhh}P2pYgaR+0>2gJ~GQBXemDZHxH5&Kvhiyn>C&xs^Xn*J{|Dt*jVjoo<7 z&3G0qodD^NEriS&$C*2w*Jf7bLGb^s@ukU4sAv5LXgLH}+T#Qi_{gQ69)15i}yi6%K=R6eiN+ye*sdKQ(5_% zN3$I49{(2n+Ng&SYz{@AXjWoph$^=Lbz0sANNUk#DP^@#GyZQ};r%a`H&`Wf0?Sn504@5C?M}6@ z`ly94--70vyL-rNJe(lVp&aGYpD=YYIWMC7ar}=sW7mh^BxlNQ2b8P5N*AMD#|8{p zeheJG%hB@5XKuGy0<)iez__b3Ftv*aj%W5`TZpc(jlV>%&_>v`%s^DV_2zx-^u*3# z4WQRN9vYswGRJKKs!~g3r^*gvPEQxa~+$j;$n#O}tErn8v1-vM_fZL;J2Nb$QX4EYL zBmOuFt@1$ZGcBI{UdOpsV1hdD;AE_SQGl=iH4__0aq?q^Gn=n5EF;fM?3H{ME2s8H ztIjI&2b9U`?moqcwm5PeM5$YM`$PSer;vZL2r83$LmxwHkbWk=)5Ttpa-DpxjdQZ= z#;n5l9{16PcD^<*_km=?JWL&Zlt(Hj;;h91*>a~(%-gRDTd)2LxS0G7S#99=vJZ>>-wvp>l?V^loCjN# z2Za1w1-{B>%n9|FIs;g8K_UkB*^OD6*P!{6dba=T3wlmc zbc}imIxa8q;ZA+wSoL;j2%+87x4*&i_ikoa)dl^6Hh?6>3q0T0W5d#AEat*fm_&Yu zscsRJIZVe?KD_7|kQ zjzqur|ATs#1_sG*AyfWBU2~~`)ysE4X1^KaBzP&4PON7>R_3C=bv1e);FKrw=bdl9 zp&9K~9+G8Eyq)J9k61&&kbW>{uD+<@I*{z_g2T<96MN!CR=n1BJQn&Mj_K5Z+2u|U zmAV5PT!*qv2XZlg_$g=@{6B6~MtA+5VIYx@W1+jX1b^d?)Z>T-yInru_|^`*GM3|F z@*2v6T)8vXVpa<%`?E#^e&4RJKphE`CH%m`U5>EgpB#|Hq(Dr3JS?1e8?-JRR7>xD zktx@>%jBmETvYjVkF?C)V%nt*wFhjd04v8g6C_Ob58@K*Mx50E*bqNN` zVt2Ml_<@_8C1P3AJ}8}a6k9wv6!$5_dXGlTefmRzy_br5^zyYv< zI-~iCFCZ@DCTNDc$vAUw1>^mLT{NXe1VnzJ}M#=<~ID53!Hx!6>T% z!}H4FO3PU+Jdwye0yt{9=n#8K4YS`7>)+c3BjVRUtK~*inFsO0`Ff&if?C#)t;S;F z;H@gt5+3+Lk+KCvUSp%8jPW*wbq0sHSk)X0a zt9FbD#F@mWuqwEP6-8Mv{8tI4KRS$o^WI_Pn77cU_7wE$p(1uYF)LDtz4XURVx%ns zIdMHzXTS2g{4bECum@+{4OQFxLGv;Iq}jUct)MR!MxVq%uUjEvR|F`R&4#?aq2v&l z$;(WV!FRg~+`CifXvS3@KzSgOwPAE-hy}P11xqrnBPw1)(T!$s7TFh*V~@X1n~Hs;MribY30ivV3gcJ41Hav_;4~ry)g`ylwrLg2besu#^ck78 zbv+cOU*nP$)#|znO(3^55C?vJ2#)b9Fm>Q4aJ*{*9*Y}6m3dPwbx3ECi`zl^ekh+b zCKi*L1~B)r_H+->7qx27^Kro!z|XlGpS}DdI9Z-S&1AJYJZ&s1lO}?Z)OyAIn@enR(Q9nkH`Cx|xbicuxaU^l)u z6nPv0&jHn_B1hu}wYg|JxC~S8&>U<>yzHTW0tBBp2J&hHe&p^-Xd6rC`CoLVyr#ij z;o4%x3Iow|G`U+Q+rVZAGci>E7M8yJh+6Z))Oxk_y_>O{`Xzs2ahe&pbtVSiD;HOZ zbD`SDpqUk$3bQVz0Zdsn2#pg7PA$gN;O=s5}gw-W!wt z>dSENU5v7+L&+Fdey zCvTsRF4Ks?*r@;tEt65vrpENB0*pIIE)LTa;v2svHdPnAY^yDrJ>5@xL=(z{(%(7B znZ=if;TW1R#~M1Km)A)08BrfX*eIJh!a_Lz2QlDW0cTlIW{o^H`tReg&)q@j*Qzgu zoG*a`jWr05y&!x9dBd*Hh2+3ykl1@e^%?3e#JI_%``*dkFWQey&gP;not1+(?`QtH zFHvvmCb*ki56-uoz+re63z|*bjvZfMR$&8Jo*Bg~e>GyT(*Rs!(gCTD=YpPQ6l#=V zvW2uCl$K~=*>eNIugRMkX${4~c~^O4$us=_S;_e8Ux}5fM)~AfY)M4~q|q!v-gUQn z-0U*&cw{7Qs=1GfPqygd_8wM`i~+r_-|>;(5A?L_BnFY+$@=0eAVj*l@JuJ}xA;%K zkaE5$v(AIfz27`D>;bHg*AkaJx`v6H)1k0(7uz<;P%Qj8oc5R=ye88hj}Pi3*rY$f zhHjg|dBkMYyXy^$OmxL7$HOTPItbclQAef73i?edB9EC0jrdg1Te=*Shp({%%QCV2 zt}E8uILx)CS*j!ZR1sh1IrQnQ4a!f0P(#l)jX0Rk^jATuufAZ}+5+-&WC+9GG+*9!B{exTy z`*G5Q7_{2B6-p2F$E4wIJS5CqoaA>9jIM1&uM4S=8Wf7%%uNK9PMfTy>v0y>p=Nz1 z>xhNQk$g$-YtSOg=HY)SS^C0qVzsxy#NGO0z41v1i!g=g>s_Ggv==r-(fjFpCc5W5 z=2FugEIDN_Dj4}_MwkhD<2<49MHVRIO2BN^Oq{vBlMwrpeCwB9peB1JANIf97rDDmUayVCy}ZI8k>XeWs2WKJp4{{r!o>4E&6<$6vrK z?(Y;g}m?yvnc@rThPcgWHWoIUmPMoA2Y38OCC6WD?qam<5(o zw7G2A4NUty2ekACFuQ|#Fz`Y&Ws``(H2NWqcAyMSk6KVjzcIzu4$v6dv*=PUXpFf+ zE+hwLshP&K1|3%CG^T+jb2v*F=m4tj!C2S#JAm#J>X;qJqPIz)Y^riiYo}a8=W{ac z1T9hesSqLVF3nI^;=~eN(e_mvJ?o>{(vS`eKVHo?oT2gJ)9Rz7FSiUeladmA6G{TXU{KZat;(nsmO zhscA(S<)u<*@AnZsItM<*g*7qJ)hy`y>O6xnGMTgnM5mq+bVwG_zho?;;Fb{VH^3{ zFJbtuT&6fb0fZ@ZN56cAC)+q<>b)e~lok(ZM-LFc#~FP?YQeH^4C@tEhVJ_w@L75v z@kM13W;H8hc6&@jck3Z&SJVwtgB#JnwFyUcHxg!%r#(HR9@ech7Hrg?X@B4a-f(XP;7o-9Yxh%b5KMj- ze8{C%*JWnb6ER;=gGO6DA!ii%ho(2f<<929%TN=c^z{Yq5N-;R=ohku+D76A@?+k) zMDBKlKPV=>#w8;-#vJ{K+qQo}*0iTisk=0pt3rOb-RBC za*Qg`)~6Qaq5IgPzsSY<;4ru+WP;1r3f%eqA~@^xV!;b{^I4PV=Q;OZtaf^XrG?L- z!Tb>I6{R5QT?dJ-#D%$fo7E3)#?m1>_&?*ih;fm}Ss-PVloPx2pOhaq3Y`FHv$e71 z`UI?AMBm}-J7f*g8Q6CH6fV7BAfztN1o@t)Of#d#RjaU*sB#*~e+L-;GUew>8} zr@!L4b?MZPzR90?HDTKXGvP}i*JY-;Z3dkfg3H{8A{F))U3-v`yqfoWB zzs%;)CG<;5;SuvrqP*WzSxw1yCja$RUDA*GC9&kEyibgxLltVJ^gTkIB$b8ps0j7o#-sg1Y%>F=&1!yRP{51O3mvL8A_1 zL9$eVij~wldUpzfFYQI_^?~-#c|5PnGf-sTA}-Jq80bLzkiZ8Pjr8}b|LV0KP9 z&B%7i{E{W)!#2j*TBjlJzwzkjT7va{jno&b#-0Y|Vr=?FDBkJ;1833qartc)pIVAn z$3DSPugyivef^k2DUkDZnoH;0O2}(_hXd!AK&f>-%bgR4~S$_wLQuM?b;S!;# z_eNrH{l}yyBDwKYa*g!pVDXoz+g;H{vn(@FG0jL+FC{m{*_&|uB<)->HbRr%MVLu! zX`4imOL8`X!rYqfH0QC;MhoFqo3VK0{St^Sa{~D%H{#%KVjZ+l5Cs#v` zTO=0W{|Wwk8}OO-TZmk42F*QQqdengc4W8zpm53?ln?mL19u$8kix|fePTW|#Lh>_ z;U($~H#@kqr#-J;!!h`u3rr&u)f)4&JW%I4n45MHQkN&d-EU?>#0)LywLn{Fcr^$u z8{eUOmnSUG;v?wqe}JvK7NF!Fy}xs3V2*1S(fZdBtaF)w_YI6iD^DE=`EwYA{$nJr zNT;*4ECH4Ek1_7R3g|hJjAJybf8- zzg-)&x1hrM9UrcB8&&&LxYhA&sC3j3zkH8|`l@|6Pi-MU-Yqy`&{>$`pe;I$dx<_r zpMeo^)-=Dnpgd%?x_fa2E*W$JG=IPiFQ**p_bV-X;;cmIx=i_XhZw(Q3^%6IxL z{sETG9OJADSpFpu-fI`2=e;8+^&W)Xr)&iMX))N@RfBflR)BKxQgl%4W^ul&AY}4L zObki{%bWkQV{;zB$NdK4zvr|?s}bWswe)#*;UW_u^xZxPFQz{2;M4p-c{8Y<{-egR zOTlku9!^g-6{6L@A@1QcES|mvDv7h_uu~!gT{(pToygTa>0exPxfs&_B2Kt|9n}2! zjp_AY31#yT3*MXpWm&j7Nh^jau6$$-RbAll7c-$+`4Kdw2mzz+gG#SfmJ)a#q;I}5 z$D|$LckoY^kn$f6CRXb?_s(L(IX&5O=SsS@9U-2E9|-I9SWJ3sR7QNe(*b};N( zB3fZLFf+P=GMi&iIN%mY_TB-Xaa|$i>3ht)I}vo$)u56Vv2gV&juXk%d{h_Q9&nT- zZNw!L&OlSL9h#MQfwXi}^wBfMpbfq_{6{lLXXrxe#tc4nhJhFY6KN*tDXV>9AtVo+ z2s;xk1oz5EsFi36j#^F_xBCEp(Mexysxg8=uYQ5#>u?@uQi)aThfzmMAA>AH(EAGA z&#oQhg<2MZ&+1qfT#=8SjV_>xc%t5Rs|*wSZU^h3DkwYm9c_+&#vFMXL^ZVH{qDM= zMcV~*=p>oB*SvkF9yUcHhW2emO_xM&E1QUx zYXaDaV}|0BEJM-k0nOkJ{KlHmDrR?U0yI4!Pwp}=e7Mv^v=4s+R*Dj|oEQd$QToiw zSt2C9NQYq4aj2S=3%>tcMcXxl!RU-D_?qlQ_EJZvvi<;xbuWn1p3TgTdqUi$B@lS? zIaq{VfGV%~Xr}rTQ{tYWPH{Qp)fJ$0rlF{KJ0J9JcEU}HT6E8U#e*MF*L8~rvuyoD z?jbvv30lx&in(TgrAW4M;EbtkT=ayHd6+ylThOr#uamh zfZ|^ZQU3A@Wcu{sHm?%6t>qT(d$y%JIZzR6{*HBcBN>Wdn^%TbR{}W~$ z))H_>AT*CVPrmHl#8@)Gke$(3P2F>aX*n-%iiGj(9mK9A9?$nWh%4yg?#<}vpz?|A;^Hk3YELH*((Jb#^m=$G6N+?ubVs-mY%-f>J8kB7nfAIkaZ#k;2M z8w5oY=)O1is%%}sQD}nyaLfop)cjlz7-vPy1T%cJtd2Nzcfq-fBhyHKbN`MUsOgf% z3f_wt7D)c2h~9ksaLTG)S8+wwRi2{r24Z`fL+SAXuzdbEKfmiW1daL!r*F{{<%ugX z@MSVK6j(x_p^?yHG!&yL+gtE-4g?&30HMYrMBn#7eDw^>G{%BUbZ0?XpM%Zc^5E@t zZP7$Yv)SV{yyZk!Oiek(S4=nsl7HUFM!eGz(T*HPPLpN6cG1vakpWN1bDyWq!3gqf zI_6Hq%+aIxxzJR~lE2_H<9B0HVl|(iX@soaF4I;0K# z-40%NlkU^{=g3KT6AGu_M1?K&FlH=;;zJ&2)ApTZKG26(HFUQ6SxhW|-GQ@x^2!HUA*V@y$@>F;3n{*s3 z`_=RIE|0NFG85vZkKtGc{lA$xa3hN>mez3yJC_ur->L5C;O`4{m$bm-&-2jFEtdGZ z7kQn(xuC!I1nAxCfKOAj#Wm@-A^7F}K^2q0B-L)LvF#12CM>7!r!O!4lk!78JrJh#L^EQw zD|+}rbbdYB_4|Uy;+{fihYG$-J%FWyuJFCY^R!mvfXyjKut-0I%EF#dH>V1tnhtVg!OT&FLnEZo%r*0(TX>pci8PsMXb;>cn1f{)R30_n#!;5V|6*Q*;) zdP$JAsx&y;SXXFyE)k?VIj?&?jdq>E#A`e%OFBA&-FZm&_O2bUrkeH)JB*=EwJ!Pf z&STBvX&?#y$lejtB&lT^kLdRURBraDGJL|DevUxNxXJvH`Y~ocJ*$q5))kh{tp%5? zIr#6@@5SwzJGDwTz^fY6!nZ3nr?Hu!;l;P%39noZT zHn@+N2wJP`u)WeiZ0NTLzqILyHD2qmn6fLmS9Vj@Y9_o^8H-mY?1TvFzVusbB9@LK zUdEjx_#*K<`B5)pLgQeRtb4~SeNXX*jH_J!X1c2-J@+klS@QJUUtqD|1Ox=l1h1sO zG5Jm{BvjIYZqo(0v-1SzxY7RM&vYjDO(d3EBJHcU62qc|wHnu8Xut2!($N>2XBdf9 zM+Rft!#)^Q^8o9$BS5b1>Kc2t0OiAs#kOHn(05q_$jK#oNH7$Of*ym-axZKwXh8i* z85roKLCM4*?v zd_9LxEX)PJF`i6Ws09{n7J}WqU@%)FL9g=nIG1+J4IfVMZNcP|jIUP*752hz3A@mF z%wM3|O!KGbNeF*+5nP77gVOF}@bhdFq4!Zk(Rq~z@sV|z%|3)vw10D#h$yM}#6zBZ zh4AYk?CN9UM=d)*9POPT{WcB~F1$o}&H{!*$kkFm&sCFMBdI!2<%W4Z8)bat_>Aj!1$AJ|e9zr>6Yfvn_&6G`MY9YW>%&CaMG?!Q` z-Drtns%S1v*~&_jh<|ZlJx>i;2a|$xAmt>z>uxac`vmmprHCH55AD7KdFuFql!kxRjk z-mm4UW3g1AjKk?DmKs5GoF2Q?ac=sk=Ua`bAJ#%QKIuH{9C=Nt-t?8|TYR6CA}n$&;HDL!Fxl2eoFCEBCw91`UQy*xbJo!b8?!#bi@K zwMfOvs_B{ccBFd#-aVM_7>CD*j|5j2L;J0A)cVzl)qE)CcRX_Nzx|zr!c#-ow2jmu zy|oaFGk<|Yc^y65SJ3P^AUo`%KJUK6P>cvzPjiS#yxiXj72YGr3p$t_T?b`83*1ru zxjS)lw!8MpHbppFOj)l+mNYnvNBSCr{aYiUE#DXeAH_k)d?O(ri1YJ@shFoV1C0JY z&8y1kj_iB~!`ii&GKu;YD_5iA`V5dybLVYuzQM6??}6>3Os$SPG*I@`EW8vZjroj4 zRc~>_;om%V)=Q8MGKPVR@8j%Q->~F?q2TskJ2n{V@|`22VEJT;px9?feZlwYDc5zy z@aSi#aQP2Y9l|-~w$Wza56DXYPwiOz9|luK?8_JG*e!gkHt{0n-K1vl@uEJ2TPPG3 zf9AJZDQDw98*PRfp{D6yX19xaToPvtbbW+nK?yjE?uF7G&aT6T(_NruGP6Ed3GT&N z7%+J@Y+qy{7F6g!+=dCbP^u%A_YMcM!0(VopPxwLFf<;?g@d&7ja*d#E8g!TZ|p17 zI-ksR$!Ghso3W@V8N}BW<-mZECW5kIuUhj$hgjQ3*rE-c#DM2F&{4>MbJhle$EixR zOngV@+*^=TIS)3;x53o%li+i9F-qcA!MZLSqQ5+Xm1oSv_r-}YXkjh-)za?l|Ia6v zm$2yiM(lQ;!?}x{1?Mkx_V#$Be$hhTw;*5es>+9jqn@LB)Hm2gzkJr8qD_v*ig z6X*1VgpLg``!RXQBOGuZ`G3-g3u3YJIQX4SfT|6fXkVI2@7qDd7To}TR-++l<3}c) zu@b_!Tt!)*%OH1JgD(bz!CXTlQRBAEb!X{u^c!l7IzqgA4M^6GWYVFHZ06EK=s${BoJUUKrlY6Ob8|4b88#AIyaKGn?T}FK zi;-d{Y$h*>l=8KXd!L}>2hHTJ`SWU@2xxk;6~hXGkp1@S;Dh;Qa? zTnyWrwFJEr!>JH-85|abVp1)6m=4#0!tE1R20pZ zzes48kCqw#!2RV$B6;9hcta^Wu%Q794aei9DaK-4*CmkCn`YU5%UPtXjGx*VPg#vu znE&-OzW=8Pt@Kaffr*b%t65vH|F0HomLB01i}z#wzm=G0b`OgxA3^uD&Z4``6Fxe^ zOl<1A26z2uEDjoNB$$*(qJ>6R+*V%*Ri8&ds$B}q?;nmW56dC#)fV)q&c~U3MDYLn zGuVjFxIAjSOs10pGgla6&3)ptd-lN03(vqV=MzTEdxnAj!=Pn|1vG`8#lE*XiSnEf zO#YgJQgZKxJ}gD$#d2BOHd|CnE@9QQ`?x0P2gDsc$vUUzEyFy{;h+v6wznPmEXSh}JjtMITimllSe7 zYpfr_6735RwdXxX1S_d0sAA#%TfnIN5%tI~=;r{{#Vjf1_uL5fs0#12}C&xymADabqiW@G{Zo)kE0S z_ae64%Y=%IBFxJOg@Cz$j$0N(Xzp!PEK_31Mjg>8kAtV{H+=j_M{HmE8q7W|!pdQW z^!GVJRf;2o=|nO4;Z-o7n5s@k-@~NbCde%@!c0R0=91mQYOHHPIr^q7%fAMnk^il* zmpAI=GrY4l8SFY;Kp&^QJmJ$?^kL)yqwh}B_`VQ6U@flDIfK?qwFLL)FPPhZZ{d2n zk@z#9lvrIdu#US8iZ7Ivxl)2k(}~Q30@1R&v>)<0iO##V#IVapIIW5Czb9sb^8FNy zUQ$arVHfE0@C3Lkf>Go85=g{=p}N~4vSt(1G;Kr&;*}K*I|%3aE!2RKP&42XSKr(Z zWxrow{fTRkx?2Z==Iw=TS6a}%knUJoU3e7jc531#;muRV!r#wy1wYy!+NR~v%z6}$ z4K~8GD^1wv_c73tYl)WHEj;)}IhW?Ymh~j>R?>pbe9het;5W#VJB1X0`p8x^GL=By zz0X|gTISLqQa9goC&+K-s-sggAd}m1gNt>jJk=i~ee;jaaeH#n;p86T0A6B}qe?z|#Rar)D8lGZo5>wI5zV?7fc@!% z;1}>6uWq0@|LtbkzwtUk(;_3N9GC##j3lDObpm(Jjb~rhb`l;9pmTB33y}PvjQjTP zAo<>?mOHn~!mDZTIl6=8Yh6TrTl$BH9$ zm%0#l_Pc%iK`DKqxZba;4)${6I^*F3v zTQI+cVkMY7E-rQkC0DTT$ zflGHkXxd^VD8|1cSMpSu9F$zkyC-oPTgkb29k0sYg5&D{aKoT?T;96`Q`rD$`*R1* zkBkNP$JCQ7tY>-DQBa=WtyYYRWa>UYF~PJBN{151bupJEmsz3w_++%1Kpm_}j3+F~ z!16`Dp!LOE?G)EYS&kI)=cTf`5ka6Cd=(S!Y()LD*CBI^H4pC&Oxa_E%%S2Q8{NZ1 zT=Db}gg@KNf?^MW@?Z&My7c1zSCpD<<>Idb>+#)TJ+gsQfS&}Z3c zkawETgXvu#5?Tg+v*S?{eFI*k{(u&HG}y)d`!Z&1KuG0>jAS{Jq!!0Bd{REAF7VKL1v#J;F%l(Z&!5^;=e7 zoi3BgPwuKH)yl zy!0jY`gX$j*Jh&kgx9DEJI76k1J@Kc78L_Nv!a&m7@z1HnGG#L|wxH#LM|i-Z4mAU3q1_HcSUuN3h%Eii$~}%^zdrXM zuBH->4T^)Hv`v_P;xhPMwL^`&IVux2smEExQeThezZXg{d44la(p-XuC?)am{($DW z=_n0LL7CGNEMD^;NE5GeC*^4kr?k6-%o6Dk)&%i6Yk3(p~3HvGa1Kxf)0{ADMmLth!WEdcv1foTjuV9ohuT+?zg^Bk=a=+l05`pYi-eCv;h|C)}qGp zAEwZ#S+2bqIDgD%4vk*mV)%(Ctj-|MkO?av^Arb{SqSa>9)KjXOzrt47<>keNALcv z;I`*0ZhvMX(lP;4SJh*^*?CBs3|uc@DOP%(#mqk*W7d=fvMc03w(Zo4;nORaWd^YQ z$~cggQP(l>4NA<@LF!lp!Q-fdd?}I1Z|FeMRVzr7@5UzSN^Efa#cuDk5H#lZ)XI)2 zZ0D~>SX>iIzN#@a_n*Wj?;;;r3Z1D}+~QvU8;b>+N|ZlMaa9`Mb}8-~3n8Z|+quG) zCAFr5(y$vu%u$gaNg}!FiKZ**`2O8Ps=#1877YVl2DC1P;QQP!*5Hf*<1qhq}> z#1AM&orWV2^Q{saE=RJo$p+BHl6Do{4x*(s#0Z_Is48!J_NLPWRw5Lk2?3Rezbn%DFhv(RB?+k(kgql=o*v)N5S zGcnrPP-r^c4dRkcu(IZBplW{2C1Hi?f-ij_Z}nK{_tzI_AV62u`Ej7!bwbv-pb=~r zuf&@AXH41mC}`aGLfYK#kh!%d)99~(!u6x@|2R7LxERy-k2h*6=_sj9j_o+Kgbiix z>uH5H207%kwzW=S!#IQ-Vhf2RWl&2b8Ik0WOwD~gDam0Zl0-&IQj#1hIV8X9`}?1I zRnI*4b$_nU=lw~Q+`wO6OGTZ0FM0FTX#0C2Z1vHBS?xsPs>QN>mn6dK8}+Cv8p2#F z>TukTx42=-dtTE9(0FqmHpT|?w9Ba|v%U-mpS=UTpNm1a4^eKok9O{ZhY*=~ja8V% zLd>9}P-Ih&=jYJf>lX>-BeKY6^%PfEw!*+tGcjSE6GobJhk&0y^Se$oqxS6yZX@SI zApNd4#7#~D_p#Av z*pk0qEf|!1RN{aOlT2Hj!q zy6sr;AsV(?n2VYp!{zfAtw*!6T2>V_0s9o%2nq6JrZ;P1mUAidI;NvA&8MSi9Bm+s z;kB^hoT<=|@(LQ(rNEvqhJwQSvetjh8*VGQ@Dis3n9$QiEd0+{Xj=Ca8aE{J)DIoS zl-(Cl{U>LcAElzwzE~UB#RAqHKMtxLYqD;IzekP#QPz-gA9ZzfH?-*_tXgvdoED$v z6X!ifyA==7Vec|b^W+e@L&XeYf991ZI^e>qAHe0|W^`8#VdGwZ$D3!3gdp{J_;lYw z^cWIEe2)Rzi~Zl@=LI!PZW#xf7Uyi;<^qh1o(`d%2JoB~5wpxX2$JRP+1u^Ef+DJj z+h5EMd@N7R?;O2%j9;kBK}(0QHZ5kLAm9lR^8;v>b_YB>*6(ln@51FTO7*OI|uQtH&JLkyjJqm8lD#Qen3%vBdN|YYF2^qvRA86i)cj}CVQ6H&en4Tdkw{geeXecc@fhW?PJCo>e+i0iy02xDJwb;yhYi9XXvn2iPL{b zg<~o4=r#H~$R<7F%8lEYzxp^YnrSS0C0^pQ^J*~MoWsAJZG;;~Y=peR5^Udm5U-u= zAa>tjENYT&%5TzaoqKdINWDiuVSN#%?-&cV#%F-LbrxjTo0)F-dag6&n7aE2%6|ON z2F)@MT$Se_TyHFn*kB{11y~CPqa);A$_#L`n2GIuPNN6bv12FAL|0Q}Ajr-JFO>pmRXW_5&$e{W)B`6Z?(*em~VC>`8x zJRqjCRFs`MfuH<32&EgZqVTAb@U>VX_~m56#5>f*`{pa4)*8y}jYU_VYnX*USY)Dw znD6x+oOZ5+D*|~Z!pga7txa}V#ynOrJsVVAC35{_3rw4{55r7ev-z!)(51s<^p`Eb z#^5p@_}2y~nz#&C&E1c><2vy5IRcuZ1eVxUa&0BXE-4vGC=0I%h9iOnI;uH+; z_7%f2TWE&4rSxtPB)3}l<{v&O9xxIv8#D5mycpAB2lcUU#pt{RVa;fK)gX%VxGr+ZLI2VvgM zL=3v(15VOsy!W-)uzGL{I%QeGkS#WX%iBS?(CH1dwB)0E7a2;77js{;&H&2cP#B+u z*xHO)_Wv;T1RpRq?kx6jxs1w~2Hb>nZVjl#=x0Hw?=eZcWZH4;L4IvX-9(reZj z>~Hdork0@BvwW_YQ<|-eIV9iU{1)nijl`hBdZ?Iw8=8Wi(K}i*GIGK}uCA+QIv;=R z(SC!@YBkPve1K^e((vMtd$fm4#jhXt0D(|3#C0WlUN#o($J*hjZpNZ#)?U!rM>4x< z>8Kmg3A(0t63q_BL&p1JRIcoxEm#puo}v+8R&61|p$z!`x20$u9f==0)SznbdTjbY zv&bNNo-2+()2qoKm!E|>Blke7^;vG~5rpYByHGd5mh_Y_EWJjGOCriqy39mW`+6}| zR})Y>5EpXWOCB{S9xud3L+k(cj`okyCaVC)w3b7W74=y0aoqX@ zRKD2H0yf6;w%^jAm)b!i@D3U;{mSI92eu!(O3cd#JfmYRI;HHyk{rr=4{!vp z@!9BOdI6*Vv&K@R2I`cnC&sQ4CKg=9iny;BH?M=>e=-TAVNrs8@0ZG<Cw~>DnOWl8M1a2cXjrMydApoB;^H^eUyDbD=7n(_obl3BrhcISl9lE*OQr z`wqjrP2p%49Zh`3Zak#M57Oip!9cl#x%^xXt{u*km%I#AVUu|4q$j*JnEF7tGq|2i zWHU-5X+WI{qpD7$?aMA;>E(yvlX^nNJu6W<=N>NqtCV~PBO%wjFV@^n1>%IVV%Hz& zeNREpQddlQK$@vQcV)|BiU^+NvfbVu4Hr^C`HPO(K5c=7;PLqBnN$q>Nc^dL)38c9 z8dNv`W2*jbeBige;5YpemsgvJ11Ic9O;Q5W^|l4a>HlG|Lo>kA%OEd4hv~vA{I|Qg zkm^Ey@#1<2-|Y+98XdGvl3+u8EUxu47RtXJ#aX&gOvrD>xIQ^Bm3;9TMLwu|(S@57 z@5Hv_8qASb0xY8*h#wP}?X*wKxjGJFyJSOpii*wuPy%VwNJrcAKZ2e!uA}89Lb^8n+D=WCyAGeq)z?ae4#}XP$ z5D)x<%J;GIxpy1D<8dIDeXwGgzn0^Ns&>eo9S)1qO@!jIbdYL0h%OHfP?!2hT(uw> zlHOM0+@0|K&5P7Cx0LyZ@&({BAA=FPsDYiba||J#hQ>ANcj)2UIJ=`D1!- zn|PK|zhpG3iHWtjx1|`kJP$K|xegNwC4zT;9_hNNly|v?;cg#@NhF|imlSMTPJG+A zyUc#hA9z|)L;Zr@+SKizp(fxMm;Ii}K1b`Zu_2Dh&7Pqo#EmD-u0nU3)l;|5gL^s~ zvFmTdbbgbGemy!tS$1a(dpC|4qYr5xbq=aVSD^Zx!1R}Au$qdUJRsK#@-w4wjjfH? zYU_*1qbtGrRs)0_Do6Fb&%7+e8x4*}jcj_h6}L`&gl%?*Y1ib3{WGlvS8*iG<)z$A zbAwH7y$3GZ>A0%Alc0?MsMSzsN7RPXuwYL|(Kd7}EO~n$T4`sZ50gMr&?M~jXC#J| z`oX$Gl=D01jJqcy$^Etjpo>WHKhsBOX7Cg48n%H*BrsIgZA{ zCdGS9J@OrOuO@Kyp8G6dtP9RQ+6;=jY4S}QAAqyh5#01bD$bkS7ya$`V)!z8B=bt( z%|X&?xAaCwqgqU?wpZY=ZA$ebs%4d4CeT?_7dFyHn6K z{StWGeZ>+E?Zt^B?}NKhGWg3|__t|h!q%SKpvD+Lv351W9ZPXl+J2Zbp&3JmpI|w? zUQtKgEl|i_XwwFL!QQtOpn6}G?fY^F;B9AAuKJZl$0)F2+HF|s+W>pXZ`0d;9Q3*) zfrTH4J8% z`q^BF+Zze8xGNamV+MX*{~Zl#j#1`mHB{29YnfV&S#MWkjn_3a`>Q{0qJNh)q=D&I z^=HaXJ>+>an$bDyDX6CIkRRM&AUIiVr!#^+SLf|fdUY_Welrv22WG(bol@}u^~&#f zWg%=BvIeE10Tdrt>Wyj+X_s&N9xN#WK>mJyinGb>S&td;*GqKTO z8YHo5$WsM@V?YVcm}?*e_qYP{8dUgRVlK!I{E+V|w-o$l-@ukVB6!X}41+SP#iTrX z-#h-KE!-7~UX2;7wRsv!SE|9@_7JLSyMp(y3z+ah$6CYxK!X(>c!)#dAu{C~i|m>JZB^N*fAbqm>~|BS_i4`U zVj&D%vTV#&gc$Hk`9>XFM!5XoG4M_@!1Pgx zJUx9Q`aKNeCp5;wW8!$UL|F)V^~5y&M9(PsX%6~U!`uaTz+j&p5AXLA+8#`2&zOyn zKKlg=A7Lh>ZhniFFT&t(kB69)N&PnK>(H-D5j0*)=kLEJqhweXZ`v;st5FYegL~l- zLt-?mXphk!Q0g%VG){)h#ll)pJ5})wxE$bGIVx6WYxOC+A>lzF__uF{)*%Pk&lAMoxW!c1zW4;j;selkd&>syy#vRl#h`V@VfZ=0OjKHImWNy-#?W^S zGztCCukH}n|Luy$#wCJV#t`(l*p3yRpRxXNB-Ys1GtI2-7}U~+VHeh+X77G^^fGsJ zBgT~3g{K&ICY$&t(=lfg`Ll1p^_ zX%w?hV(RQ$;NH!Yue|tVIER_nudyjC}#mGG9EZHP66=tpMjmPd)krt}v z%3+CeiN$-KSFM1dK9w-0+De>kX(Bdml(6C6y!+ZCF-r7;r6G9s6@K)+O zoXKs?wEVXz=3?&VXdcq*0`)X)$0grG(S2`M7W}c8&#xxm*4eq}I>`&wPya;avsY}^ zh>64`$OGGP`(P7w7P$U18_KCCLAB?V)-_lML-7DLFzB{vH?6d6)UK-dAg-0qu zQ8kFk>YXt}hy^@%1bZDY7MzIXEQ^`K%Q~NiOY{ydZKGY_qz+8B`&pLysuh-&oPdOb z`*^^(w`}=TI>$VSM>>V_NmGfFox21Z<6XGjsIz#h*9&mAdJn0VlnD*I457oVL{GP}Vi;=qI#IdyUIU2d`3i!FG2Ym?stByFmtG+N-@#ImAK? z|C$DK9+rbc&{FI%?KU}yi7sKK1Cuz68oLR2$gQ)0>khDqLk|xu>}>OA})M>21?gvqQd15ZOvR$bnSA9HLdRn*S1)QULy6T$r9K) z*=f`tTHz5A`3&7E?7{QDT^N4;2;}s=iTX9ha^=iP%<}vyXrNt@?DKEr(fkan1XD3C zvJCZS@>$50lhEtrLFj$S13mL%FwE`(W#YS|QX`YpX#{^q-2*7XqPdrX)iw3`J!{ERApxN&|AVhW&i*{bZ z*5PTmgtRfQm20uo_%y~io`(0`EQAn4XYy;4*YiOX^*J8KrtjnlseQzA>*JvO%5F?O zC;`plX!0c2q3hI2W)}P%&fT*S^pkHvZ9zPQOrMFiA%B7H&-=V&?FV9iRu{?-+x?T6lb*J6AGUpdAxiTN(Y6S3{z(KE6bJ^@WsM;*$+ZDu5ooPV&*(-Tu z!fxhbwH@kLcECBguQk?-^e7-vStz-hj^cEe_S+c(|7be|iYY!8<|OpZvZ_t+*;^8wCC830|8fL1W`xW+Ooo z?3wuEx{a+Htw6r*8>%c#;Yf^$pzKKAdH>b&UZ<=; z*4~$=8X$ds$Xhz8BRUooFLR_Ezx274zDKfe}bTP|iB52g&q%HwdS zvZGjWO(HJ;EgoI&FQLEBX4Hj;!v@pNqUM$X^ILX^x8J=CA*}^?>uwE3-_fGfFA~Go zkmfvMB^y`u4D}BUP}<%ZXR`&EKpx(Jk6lofJxf0O+ZRaj=p;t|vy&BmY6bNb0dyxS zScA2ZIR8pJG1fnUS+_+n>Y0TAciW-ndkW}MW5CI!64U>9!_3-8qwd!Q%w^O_%53|y zp;toOqzSp~ASpTzNzt97FU-$#? zc??v1ufZ`Zv&rkd3;ch%!@oYd0uEoS#SCHz;ucptNBk<)8wcuMPlbS%G*;7XC!{|U zaMtmBFze`ri#M8zQO!5VClC(-f4%|w{}{a9gfe%YSgXARn*7_$R@O-zK9=@iDT_gN zqzji4cUt+eH-|PiNb>m`BAX&`;I33Gol1ARIl0j2PhHDXc42&mf%y4DC9L@>5$AfP zz}EW9#5>7`fp$$0eY-dKm6PTfH;^fgglh4xcc=*eCRZf3qEtEzheU!`baCNT~)8_?2o4KxH<2?39`^II)du#UfjT3-=Y9gD`#>))Ytg@x$9?ig3} zn*%rJQE%|vJ*dikI8ye0x0X5i%l>dt$c@T7e*MPb1E*^W&(N6W2yp}6zrVRxXk$SjKykhQG zlb|Lzl!tdv>2PM;=|{@O+k&t47vNH zT`c3fDcW!mV3Zq1kFkNYZBnu1XNh3`{u#DEI{>-%@z^9WL8~y@yOfOqaV~k}550y8 z=X)sWbWtubC2gxA3>997OMH4SGPv~epbT&y6S|_uivs! zlkHG1&!i3j#;z>_nm53+TIl9*Z^68;3GotL4+x`Ia> ziU-FN2eF@tROp}5h#NLmgWEk7(jOJ|hbx&~W-$c*ex7^)M_ItzCh)Bt1|g1fz<2RZ z^yL-A_Pm`PxG;_KaJi(--qL!{OTwDd5sEH)BCyr9@ zM3T1Y^JEBqSVV9^lMv647?hM&A5^ zI?bv+a5Mc{*k?)F(S&TS_w3KivIe88dH}S9d?l8bJtW=tLrpgWo;G_2{2WhNtG;cl zvwtU{Xl5VOXZbQ&YAL?lZ6bt+^@C#i&~Ug+3_KXzM-PQP7X zeo@0vDQ2}!U8p^1cU*%D$?vBh*HatuBmzqA-ed#w(-8Ok4vITv+S-2)L+|k$ z@&4pg#KfnV{*2DYv|mB-^ntvHxCR%9@7H4&2%1> zD^WLgHOuounCkcnW==8@3o^T4*sW3+SMdtWh}&m&@(;{gP8>9g4_p&Ov+YmKOj9sO zo_Fy!R?~ieUjg|Gzuw0p%6Ll)!a)&wn1va9h0s$w*aZ#sAs;T>xZy>r@ zh|yiG#Q8V>0_FEk;vi!1HAe2nqQ!&I>|hEc?>~sDD}~%UgStMPf9G~{en9C8;)-3n zE1#992HQPGlv6sOo%1OXU3P51(W8w7#iCU1UOx$=HPw*fQwV+$Q4s$N=@NY}GS!Y> zpt5^1M#irO{YMk|v8mUf_qSKzd{T|UY7dkw_!AQz1YpXjL@ar4CZxZkJ?84JOh3AU zX%0GT^_i{mlD<38!y|~h{v5-H@4ilR8}XHXQG$zGDd=B#LTR_t;8aU2oRE7MyS)su zs-DV|zLucm{9g?1W)TD86b^nb5f_{zR`h=fxQ;ach*QU)HZTf34z%Ic{nybc;XZ%= z>LB^?B}~-@O(#q|U>z z@5>;!q9Yo8FcsV;{)f78xm-E_m^NXijcXvL_9S&g{TdW|7#i=r|q= zJ&nOFmk}hn5HbHMYRn#KmGPnSt^h38IaBz$Fcl2aZR9n|QjAMALtXJ{Xv>@hOO`*xgt63%AUx-)^#fQE z{TNF+P}VELm50500*X(`{4DLJZ8!c*JMnrJIqfQMJ?jZhYsX`ms-w{EC>125YUDR9 z{=-@8M-Y$fBJWMzsG+*;Y+dtx%p2r`12akyetrpI*NAsSd9z~!MJRhxj*aL1SjD2( z*x+*l8ule(*xPC7e!~vp*V%~2f>WTegCjfKCl~#1oa4I7TJ&=@1F)X~(O2G~ZZ0uH zM@_@!Mn-~K@5UQ*jiE#gMuTZCTKAfr+<(Gw%(P8J1NZ&%2~WvOU~&c}N0yUnvm7*od+@jy9R-hpZ&2YA z!+I|o1*gxw#LEAUVAK2^7*Tf+6OKjmCI4pPc{gI*ssptihB5P8f zMoD}w&M#gB?!DuAW4DQ{#A`cuds+?0-q?sY?{bV%{)dY5BQZ9m7OK7!fI9Xu>4g2! z?9F>v_pJ(bIcr$eJ%6Y#iG;kCJ5ci5Rj_xn2a{JFso#%yam6~`JDct*rg5mGeWT>~ zLNq&@&w5ACfXIypV1}$3pCuX!O5(@4PoBXn5C4p|l3!7k?ZDi1)Cu>}RP<`U#)^KU zy}`O>{9tzuBsEvKto|o78%P!U+py;F-uv+ z^!pbw`AT8anH6F>6wEk5r{7icfN3cY?K_SkDR z*KMSI>VWUCY0_nwxc4+D#wvJO9e9E7?(DT%d&gm-+%w@@2bolEdstYn<9?d)X zwdAilSBkRtALYiSO^<+db!Klu2^PS z1i39HSW0Kq_M#6M>ZWEJ#??ZFrwADzmqJBlA*^M@cw>eLmJ_6)TN)a|nNs~ldv z2ea5)EQfeGbJt$PMhEh9nn$5-$rP}C@B(atpTX*6j;j0VT4kO)Q>@972P{~Dw(6k} zKI9cf*ZqnaCy0-K8_J73q$g(-pPi!@s>h*|9AK! z&swm3x*3&I`)Ca`+u82{7Gmk0NGNnXi4QhPgjUSuf#dF@*}ui0f?e`x%E+f5xz1dV zw(_=SOR>zd18nUYftuHkx&Qr7Ap8D(WRD(&D4FySwi`Xhw)7nU)c-qX$ajd|@flMS z=3-J;(lDOdpy7b~sDB^BJXi0ap83P_{+(aLxQFktJ@h&pB94c;Q!v&nQ*j7Bfa$Fr zMb)WUtnILcILOaTXeSn(-?bswID9lKDLjDBmeM=zqaZJea{#l7Ur-~OWEY)!j*1SE z97ZZ(s_Y8XZTmoZ;H3~TjeJcfCi9RTe-lfn1&g-M#9+Jen7JYXL;vZ{RxJ%f_0mQR zAAE_JTGJWrycn8v5H?ND!%=tt1H=CL09)2zu%j{74$+~aSji=~F39gVNyYvb429>^ z`+K~77fSrH*|c?(*;{isYoMII=k5bqTW8`vELx5&KPcN*p9MX7=R=YQ-QQ*{roVGM z8`r%KB7LqAyZ;m@t()ckt|K6HKl%37m_b`5^+*h{WknNiV&*$y6>VFE${i!naC{#0 zGdqi(FW=J~5XapuwgC{YHfUI9G5w$G)QOYL8>h^M!rPq$+#Q4d!%c~Y5t)tM9?*RJ zGtZM0WBj0n@F~$!Xe~DvdyKOY8)FZ#)-5F!41~P*P`e67g)Nl5GzJTVoXs4 z1}7ciAH@zC*4tYMVtp@X!8}v0C&}+b8`0Ty}>^C@|da9Y|L_Q3& z$jhjEuz>q#L}1ac4`9>OQ&{ln9Zc2Tf~Xx;7_&o-$Bi7oKV>ho-;q!Ay0ctWRI1gC z^WzGmlMvd~8Qk+abMNJspy+iDaa6Y8wxUkry6rd6WbqEj-J@n+qy?4f1|W1X5sFUH zJbsDs@@_?#bovweezJn0&ziu{*-A{Q{SQ=kThTB-4^q3`$AjK#Xun2%2Bu+H6lq8t zv02!UkxuG;hS|D*W?_H$A{0FUjaMHz?yN=oekJ5bti;INRM77mOBwRL;M%{CttQ4q z`!tE*e$*AKR{sjF3$L@jsfMEV)Il)IXkl45*U4SGq;l8v5S%z{HwH#VLkThXi+o~H zy;)8yvOu0!MRV(5N4AN0j2@j{viUg=plk;1bWVTQ-c7I;5DA_t>MT5I?h!uOq1sEuRIg zUJB*vec0-VtR;On>ShOU{kyYh*>5t;2q6t=QX5O3*cmk!6SQ;lB%;gE5LB2PgL?xF z#ikE=(C||{8Wh@b<&TDJnQ=F!?q$W?Z%Mh#ppNO2-^m?66+>9v8oqtwbExv11^Oq~ zv^jn{JiVn6J$4uf)-}oSbqi@ge^2NA?x~>sbqnRm47gKRB!8US3iF2T!JNh4A!Pqt z*t8{N*Z!{2Ft+f2xUIVe;_!6{)Yxq2o@_cWI!G3r)J7yAr zO^?^2nIaZSI~j=v^+&luY>d`rQwGWcSK;%XHk45yX45A(ZEZ;wN}AfBsD!kIk2fGx z=FC(p#xS~5VQ#UZIQy9r6oXI5J#PNRd%SG~>93to_FqSI+jfX@_@f|wbrB2hpM{2B zo1y5Z8kpa6ASi$I)MotkfOzNaOf|MhZt~j+Y&mW!YI-zuMAqF29 z4o&yQ;<1%qA@9*tj5L@EvU|Ou@m)`7`ez0#-;NksU=4nSBSB9JeAu*`Snr;S4^|lq z?n@r=tv_FGgTMj1U4uWF+Nv{7qkW1!$&XNUA01Q4v+=m+Oq5X@ijt*v{<4nZo zlZiilT%*+#Z09zsD^U3+DBEq+N6=d4p~pY-i1k&et@*oWzpQ~S!_7p?r{S=Tk#-xq8r~m>2HCh{a^1(N z-2K2ErhK}YZ9nk}UzpJxlktt~{xpYysl-liXa@JwR$|i8l~Azy1Iqf3WT{EC%efzo z2Kn7lH7Er}bv=Q)vT5l0oO~;mckq&bM`8OhshF&af#@E;L-p9>;95nw>33My4si#pQ1)gjAAXhm=%E+5ckO!6H&^qUQw_w7!KRS1 z?g)g>3dG3ym1uY*1`iN_(QL(CuA>~2&&+t@JoRv&!sEgA>ob=CJPw%!p3SQIw5Rdrh0`+~Tprk{vHZ}b-=mXD=Ojy2?HMEAI_l~vHDZUK- zl53c5>H^+0&s@+C$>wWjI3NlgG^B z{=mu`ouF-VGbWqdgks-3Y;i~i%S*$+g&)ATPo!e+Nh2U@C3PLGkmGS<6Tx8nai(~> zn@v|4i0YFs_|^*%pgYtV9LGNdpNfZQxBVR)KKmGKY4=?){Ws|G{SfHV&+?dwyD0}p znSueMUUX=Mdcihq>va>AcEhmXb{8nR??awn z>hbMSNu5BxIBW22P@A+fy;}h9pQ4BG?Z+|Pi*#Te>51Lf(6bx|o}R~0etj1hPqh%T zevtpoR1fAmjKnnaHsVw*Bd_Tgtt8>4R&JAo9R^TdaaS{O3w{F2gHmi9_ZxoAxrDyS zC+Io2!O2{LP8)y1v&v3_Z$A&v`<~^wX(q6b_F8fMX~r`?j>9M1#CBh3E(CP}$)GpPzLV4d;asuUn0t{xOh8f?VU7 zeypj~MhG8RgtOf`i?##*!a3hQqdr_GhuSgdHzAKna2Rgw_zV+HxRs^TaU_x51chUogY&0+yOIl7DO=S0ARm{pA`=ojwmd z?!UuygG#0!?#wb6`KsfNLCclomuw#k(g0)fx7$$nEp?J(CHfoLKv8cCv3~p` ztiMA%*-s~V)~JollICrfI^seNtK^LhpP0=bXHlt0XNR920gpuD#^4jVe&|tbt;ylZ zPTye0_cl;^xAL=;)rp*Rln-|dg`&S4G5I;|lupn+(*8X(oT`E>%0zo7>d?(;E9kEb zzM^P3&OOv0n47Bw^b9c(au(SL5v!g+eZpr{FL?z+Op>r-cP6CoTMMc_EzE;$;91oh zv|ZL32%&GgGsD6!pqWOVCg%gh71^a#KCRMvI&>29H@={(j|OUH{6OufWN`3W2y+wL zFz@at>du{kao&`XYr3d4*q?w)2Au)3p+}g0!*Hm1yqAr7X)UUnr?8VQr+`+>@LBpYoUsrV?-RGb=p58_|dpx5&;s6Vn_TW~)e+jDYZ zlTZNeZ%+`1B?Ao0?m}DBTWnt&jtZkGEX`Yk11T3S)zJQA^mDngEC5}0#X{(bpSVuD z9=;x=e1=0e)bg+JV6~x`Y4r`;k{4oZue*>jU>!E?4~N61pU883hxyfaM9H6TnM;a5 zY`0U|#wC?JVQL$7yIEkrXR#PIZ4gfnw1S#9Pat*mbCA!^gq2&Y#a4a{Li4xtIC~Qz zb%7yVAiY`ng}PqN$IH!>H^5N-2=(QUnEyc&aQ~Rjdf4uSAMwOQ(8Xa{W)1{zBi(V@ za;_O0${H_tv(M!FcCvj4`o_~-UHFyBieHiEDvaj#G*s4xvpJ5{SRIv!39e7+t~3qf zemp_%)Rh>ZO=ib05MQ8KD-VBl2`=n35v+fN!Q0mo%H3ImLDC-Hdp~K;W0s=JaR)TZ zDM!y~#9o#5=Jg9(pwayzcaJ|x{H2%jlJ!e5au4}PM%H0u<{lPtCKoM79KzUhUqErQ z8P!jI1%seOE-BW^!_F$O%rz4niki{y^%it=ivdZFk~fa~2Fj#9+|IudZM!*Qu#WD` z4c(dh6dNIxX5zf0+b~KNPW{uTx&EMpYo_00?*DcGuk-(+>q=sv-;T!iYui!Ntl$gH zsk46LL@fECLC_I*_O{sIjXqUh}QChrt=+gRb=~-Y}g_U?r4ww*E|Qauu~9{ z>jsi-SJA!a1#s)^jA8p5_~+%zA?Sz`g7XCYN}5fNKGcD{E1#{XBi`M^KHUH5C5U>K z43z^MK;3a9F)*?*XjUiiA4=Zu1S|Qt&5d}|_$~yHCoE*eN^~S0Em&}a9u75Nnf?_D zZf}L;PIri9QV3IxnnCYa2(zVgp>Tnv;4hm3PR-;kift#qcO-md*5an@LR9a#!wPGx z#fl_}*!%Sc=sjdG1ZNmw)X6YdP9E^ii!6nzJUK=ukx$e2B}xY*qiX|kZ%>W_Nk0)L z9*cq^7cFsfZ9yX)t^~ni7DD7YpPX(<_iLt0z26Hx_UG z_Y~9mnF|BQ=i_IGdi1oWJ?hbY5Ma{4?FU9F*r`|K2B<$-G0TzR{NlrM6=Gqe=(}YC_vSE1>;KG1O$t zgyB{vL3(sBxOGgYE>U8TkGTXg-*VJFp{!l+>+I&BFwz2Z;S0Ghka|h_n9VAJn|DhFL$%LG|pSR<1gLOKzQpiV?&i z_~|*1>XCwJ2kKD$<_8bIYJx6nu3+O{Tb4g03yzO9gXpr4u!;8ZHSgzB7SB@jn?SsK zo72>H;4OEheXf&He`p+kgU@-?Nlc&fjRgfKVyS;KD)q*K0o}2}96z#=%W;a2VP>s1Mz81(tmhLHVa4YkKvZblxm&*t2V(((i+$H+RwSLOre+AQim3IKY)_ zR-*Pt9vFtubGT~)w7>fR{{7D1oMse35b@J73Vz2TB(KlX2{p*T@E2L(yr;P2s9 zLS=F!Hr0n?#JYX>_+ALy?QSDZ__edRPS^^s{QD$&j_WAkP;*c$?f|;E?p*KV!>fijfMN1$@bT#+BoJfQEL#fhV>DQR zjkt-l``kbyF<@&Hy01Ebsu%7oV{AKEPl*A85B8Av`ZM~g#^c2;k71diR8VBm*BWsQ zdX?K@W1j@BNwenCbkfJd$3u}{3rf%0p=4YLPYXx^Y5s03o}YyU9^WBVN_h*P-LUBl zbw-}IVx@n?LYrqAw#Se*w)+Gg{6g7>$P6C*YZ-UTZiNRAY(&TSWSI4he$I&BAao1u zZCCBU9yiXQ|Fs#cwcUf5a38s98)-PO5_BGWxrhEO`SR$qmQ>4^PcOpoiL1bG>|fv% zI}n{B%t5(j9vfapY*lk=z@4!J<(<_4i$|dU@)h8pki=_>Q`pb@OocjbBf2L1;Hr3A zP=8zicF)2gWbRk!Y)CtoE}_i7^Auu^wc^#0q~jk@LF$80NI&BWnlF}kxL+i(FewWw zyv=?;GYT}-eoQev0|M^s$KG@Xmi_F8j=5Plu46mqJR&}?Q#o^gvIC|rc!%{{-Xg4@ z4#@@*abSiPRQuxO`{HO{JMK6t4wlPRiHp#j@<>YCK3uVT8fw-i%Nw>+SIpSa=t6qs z!QGKCV^14oAh9qeorcE3z3f9+E?8RKf~3glIIyMy<%gUuaDqn=_n{-Hgem`dswl$0mG#? zU=PhLkE%L}cgxMhjN?aO>*BlEyXYd!`6Ch%UM|3bt&~NTzC^|7?^%f>%>~Jc4>)60 zF}S?^0+Gv8*urtQfS^hc&8$Jc$QYBGIQb`D@uv%{1?`n5)L9b+?%NC?k91QqO`;dMScb?T!=5K;gM=>mJ6?gvD z3YIn1=qN9ytp6qO%3Vu3&lCtO0fb$vAk&y~aFb01a|`N-*}s>Y9HiZ~>K^N!Z6*fL z{bIQDRnmgPiH*A!6|*gbgrv_rvfEIMYJG?@$#(Hy=+gvwMgOB<7kM0pPWnjifIRBx zE@wR#T8ZwU;v43-apkz1@)qh43-Q>Cp+9#Ab=e5aSrAJ7GUQ>Jlgqs|KcUl>?NIZx z9hUw?9LPjD4o;&vwu>Wou^5AM=T@TZTSr`7&;&V@ftD@2ERXE!2ujy;a<^~91@9|G zKTe+V?uFR<>IC!;zlR|Y8^ATs7d7V?9RBGjIBm}dA2Al2sy0DM{CCnfy2_&)3ea_q zA-d^|z;C=ge3g8M*2R5svb$9D<?U#e^lP z5R(}XZKwR;<~1!WJd_D8aa!<6J_@e!6EXQWJvKFugF%C&qLV%Zpi+e)zgwbh8L~#} z(QtK%wcyr7{{CK-3{M)12@d4bT4w`a4K-kU z1ARbS@(P_s9)iqrV)qRuzh2x-T+Os_dR#3A7ty}_ za&*pjc|M=d`_tjf>UVqw)%+?Av%ne;#uc@0<>tapyeVK10tPY>VYDj#Gc=FsXd40J_umTy*j->SQpdd^GV&1Bx(o`2*CR z*K+36cO~yNfI2rgXp5&VlC%prwBin!$JT()l^w8&vIf~b52Et-AZ{h)`Bhd!p~#U~ z#W6$S=9&!j4Ryzo|K?NnZUEQP%>lo)5`%8QE7bn+2b9R>gUb6B6raC`J`y7qQWZr0 zpHAQ$`w)r-)}gTx{T_X1R~usB*k&=hw2pwdUFT@tGK5n{zCxcbE$&e{w=k`>3A}n0 zac(2cF-b?P>xk*tLA)Wu_a{+mnvKeGx);Y)GrKd(aXQ@r+sUh+lUIgEMv=zVyaejk z>A>1{Dwk2bA00-Lk7KwM+Fh)NJx575yh6F}nL9y3bK{hI_E5DY6vc^uVbr!L*!jeg zZz)OPp2r`AVwn+d5ugNbTA$2_V<(xE>+YET7}LM2an$!?nA)L4^}&0@kM{s+4DS%q{= zsR{1KlPBy#G&^6v3qmL}B$?Vn?e@r?a&4Euf8238k$&KUM>d0Q@M0$0;ib-4dKX;m z|E0d;_b9pZkJRt11uC|^Wlc*?p|!&u<~v(pd4FlaVS<>?h|ob)T@q9z(w(_Wf6i=c z73Y8V449wzNsNJhu*$)ZPr1?$o5fcA`z_=Tozx7^Apy1vx6t!z7FTX&$)nR|Jl#i+ z_nuE#gVQ!1Av3MW+i(&k8#ZzopQ)R5&RYzz70_?i15Aps5Xv@n5`4PNkj|pDT;!RL zhSUin{+5X|4iIlNs-CO;Cm#%lllCzF1Qz$p0MW%fu&Jtn8-q>x6rl(@wwUvZVgX7f zcZT^BtoVRiQLJRhd1Ci2;W~D85{e&Dzf|i@$eF*Kath9!=CW)b@ex$B z6c$N+F~?p4MY%6=k3V^7c6~%qd@&4r(N)NK@sq8~eTTB)PdUvW=A7FgJ1%JQ4|G?F z`SX*__*p+XpvJEqOwJkdDt%%Rd4E#tMfOJfxS!bmcs{J&Lz*Gw(%R#u{=fH+eci4n zbR5}@9f3s{J?uEdI1k09oekLXC6;CO*g=fUZ+LZr0Ut4rNA2&$XdS1ICENbPw6HiV zaSViKMKrVVSb)xsh|+n-P~o(P8!+$(HVl6Zv;I;+bhbXTc1+=#tb2mP137S;I}5X7 zo3MD)3ABzr3U0N#*`BF}ykwk=GrRMS3;jD5CCBU45oz8aaXP0?u>At(*Q7y1rZG~w z)FWiwWlrKAg?aj+*o_kjnVIQen@^oa9;R4tl|uc+?VvDj=j!Jrv7||U81ij3iY{!H zPIPae?zu>mDQ#Ku>TQ_b`xVF@d7#2P9Y$ExLZ(|Rl=Y#mrFb(z*?Nk(9vg$Is;z1l zv!$RZ{fr@C0rHM-sBzGsOCRz*PmEPJ7C+@`hin8;1YynG!yw+Ci9RDrp(P}ni~YM1 zT)V}gDq#y(Jmo3|C!PVh7j-{9dnCp%{caEs1_1X!}W6XHN(}fr_^U6jVH`32J#nF&XT18U}boD0eiLOVleaCvwGy-#%l>n%}S(cx0A=`i_a#4QCX z$4PDVnx*#fYa$6>-`Vx!UgM{f2n=xT>@A@v`iyvrw;vTqOA6}Cc1WEVmHa7#fE z{eLJv_C-Uc56Xa@7_@RGtXbb#=vP3UonO4M@Q)iPeO7|`3F|qZu)CbDt&Msn&q9;O z3-opr3lC>?;_HUzq2}K}X7=a;hGrP>cE63r=!cfP=b9eSI_xtze`*)QC`hbdUxX;rc!S<%DAT??L-@(}^ce~6*{bqsFLe_!u_G>2k^iX~OMLQI}pxMk< z(wsK*N1I6pFzjYDHjjA>Ay~+*3txb$)A3TDb?cf2e-DK8zrs<=`lG>t zZLodn9*E=~U}eZvtXD*{fF_Wrbc`)wR;sNmhd5g^!GGWbbm5wyal}%zce+Ozz61=pT!MywhGW1c7nbOM z8pA)@!|DVJJ}0P-W*a>r@1zy6v7;dN<#!Cy8;-p;n)9e$gc8s1Og>TVp_*zX)p|b! zjbsrkJ=R&MHQ0zQr+%T&IDHIn(}4KNZ(vuP$3-oULW9={nBP5+3n|P3)#Gu@F4z(6 zyS+ir(s(Sr)r_hUw>kTPDG+@ulSPc9{bo=+vtIBH0?wHVeMlpN+pXv?(c^P3w!pk+ zcR^uKECtUC%(o{odW!Es@aU~jcWVa*W*>#PHx`217dL$Ws|+g~ThR6o9@A`3V92{R zrmdNZ3mxeW-8%(pt{Dlk`4@>BQ6cS_{*GAUZ76m%!q_zzs6%uY%GX_2yM7M`O@{!} zn`rivO}v@ojJ%G~kbIZ=LWPSs!M6(T_AnD9he8S(cJzk&!G8dOEuenB0eJt}2d24i zg2^jH!tL=!{NM#fytZ~Om@i9*hBI_dRQ(4HSyxai^vC9dZ$a!*0X`PSQokK)_$*=& zr!K(yqZJtN&ncX8^%i=!^?@e~#KLFC8q|I9MM>gFZ0>p&N_O_b(0jWud`~=txoSb- z*vu+>i9tN_C)nQDgN?1}DU;dTjSW1dLs1|0+4(EHM9CnLcv zcnZ_)pDew(nsoLn1z>k|6lKMv-2Mdyg286$Q0(Q4?|UAGYP)CX=Jy-M?%xV=pH^Vt zzlP+Ku41|?ISizqG3Ag7gP)dxnRW}ix9K3tovtybayt-NeaB-P3_&&gvUI`}4Ytmx zXE7cR;7tsj`(h)}d-Y7T{ho^L%3A6pc`cP&MS_jYP-q(D0+)&_A$8;{@cQdNh2oOA@Vz%iXN&2R5<9Sc6d``5>*%eopQpBq`;&4VDzp)R=mYZ%o-AJ(mI zM7;-g>J^E4{5tz*%%$ZuD!N3X=;cb5Keds(WVGi+<-*n;ro1+*g}UNCa9++8FnNM0 zzve+3@kk{gJ0($vkmoh;?;hwgIbJIHcPA^_8H@=EN374k#=3W(2cGo&h1YIFSXnbh zTs#M5vPSgbCNtZceNid0K%a(>9!WQz;qwwh-uIj-dU?O)-hZwI$&j8*VSj*&xd}{X zVa8O~ZZScxlMpe=6?EQLrJ}1|l=~UR*%f3kf9YNHJ@td7yrp^lW9rb{x(qe9XLBKs z_3{1xa|fG9_Ju{KlwwZj37%Nl_=(2&mgmTddW zWMT-ug+j0IP`Dx!q4*ZMhpc6t$I!YwxDLa=|Apr#5NB_%6J8p0AL2SkBO~r#MwJQQ zJwhy~o<9QJuPUk8o;)nt_Jiq~$AXFdTO4gkcfvh@a=&3T>$$1+>t`%Dj!7eC)O=8E z-O6GrX0XrqtFZR>CFp9D0+RUWT>Iq(m@-F7-b-(|bp0NAwyr?b1oBP0n}ece2o4bp zc@RYWgg>8v=IvAFJe1~Mhclsc^=BxlmvGoJwfvF z5Jvp&53=7A(J<2qmk|fZ#fCV!%hy3bqXHrpnL(M^C$vj5<}2?R3C|;Y!b~R+ogzLilOB*pGb1SH>q@%KJD!BC=gagi-^7`*B1ljB->KQMzcRlqp9E$b&c8FcnDXc5;#BNRp`czM)}+{%*Ma)s;UdrfBDMkwjN+!&4WQ* z^%h+IASS@_kDQOt6P#Ucf}-UGoUb+DCC)KYd4n<3#=OOb9Ahj!*F;$%4Z1H2Vy-uX zK{3_`0!br@PTa#{vf3dcejh4K8#vkY`;h1NCsaP~MA@)XTo*-|g#gk#_mpzt7nY#l zwxR3G+u-f3FC-O(quS#Fx{P)L$9ee}UPAtcop+%;cM10CwFTF@8S(NvN2RvUhvDA4 z4Ir6iCgh7YasFXh;C*^DIxPQzj*aB~Xnu{}Ue{n)MHgWWoz;AO$Dsy0p^UmUS2U%= ziu$eOoAkqW%L0^*Il`H5p$u3L;<9WXgEbB>QIvJsLo65Z)VfyzH_fY10`{P3He5G%aJ zX*>EuuJ2`V$-T%zqy5o>vP$ByUBLZ_0iXAD9Txvhvvxny6#pt_^K>0B@%}eZ!21RN*XGVM zoAWpEAbop|^$c~ah^wY9>Mg|kE*~?FSfxLh-(Y=SWo;^yKQQ1k7A4|AbsN-f9D&y9 zub8Owd93^VlRAx-falaMDA!XIY;yX558dcY`L-+H=h;@YnL`;}Vg%OFzO?3J7WEIL zswZB13U=`uA<`-l3aCPZcq&^lfx1c{~@fBZ}n+l;9 z%y{EfRS@3y9E_M~B?LS?2K8f?VZ!3x#KRB7+UcL5IjJ3|=fyzD;y=+%{TqPH1a*bJ z9+ju%psX0hCG@B7H`xGYG(5+=L*AgAtj0BwN5IHb%!?NUqUXV6=3`V2mfDN(Irt-X zw>ILXziJ>Y<0^V*WJA*f;w0`*!MMHaQEzoL!h7=8?k>XM5npLvHszvyd&BwVN3mu_ zHOMc9qUEYGIM{PFF=1t(`AkeF+Iy`xOyP_VbrQx&^@YOWchPB#1%`Zjg>_gB)4ogq zH_9uhcAREei5=L!dNGBM z<8f%uTZ0i!onhspb70+t&L+2hqR+88)=Fa>u z=M&V#82&Au&!nv zef3=~tmZVjk9x()JswhieFBV4{SJ+#z=Auy#UUO$Q9Rs~=ADI{qU0!ZAF`Qib?zjH z5@%r9%cl@KuoRUO4?*_73&1S+BG(vnk-eUu2o8ZAphxRr&hRFT2@hvZQ^!F?HTh~5 znxpKuC*bmKKKM3E$%}uS%a8pktu-AC{(nY@@<=q&VIOQ3rs`I5TE!kX?y&ewpGvOs$u1O{9 zBh~JUzOcgVT_`pFggQI&^V9bf-qAqajrY0xiB+hbdX0(JPLW0!6tO2~y9lC-9nyQv zWdKG9(?kgv}v2s2e{B+J8L3(vW6U=cdDRo8Bne zb`6hxCFcHiUuYa`!>PmG!Ms~Z=w2EK0Z}g?A~=KLmme52s{-9NR)b@hk+4@qbMXNI z7(nOYtKIfv-l=i8HJ3Ds^uIvcW2R5N%2-FlK!tYa0F?&Ai+(Gw7q5E$Igvnz*)1J>gPDIVe`gbFwTAcy0R3rl=63 zdNb+~;9&KXu6+Bk9#BJGtJnuSA<7fbx$+z+iEV4x*$skfN+BRL5=LFfK>W4^%I}N? z&7bj*G>p#L8%Ic$-X0jQ{|XXJ%bDnL8)nqL&0`wSN^h_ z6|V=USU}l0uClZg2scAs&B?JRlilt00}cQCi;*?!E2R<-ptZ}6H&e6`nm;mjXK zeBI@rl<9oJ{l6Q*aH=0n=Pmi&-ko^6n^l-bm?7QeXP5nmU*Vr$2RXV1W;U&mQ`^y+ zQj*B*uMk)L`&az^l(AsFIE$69?+No<_JUo#npluhFzhZuulK~ksj6WH?K?4hjvIOW z8X#5j7=unNgOZQ4&`k3JLS6(xvj1l^pLYmcEmW9rYAf@u9RxnUZEQqPAz4`&%A#SRKOFMDp7ghC$?46}D!#bKdeW1iYGNWaJH$fr z-WO=yoANIWs)Fvq|MgL&RQ}@%LI!oudv8J7-ti91`5?@Ft4GQ zn1P$n$Bt)Cxm*6_U{kXQ+LgQ;%BNp%D7vLQ6Xv59Dg zvBFh6)8#(q)CeqKN*UJ|*P`=(Y1F6c4O+bxu8cIMrbGbi^Dj7gRd#{4BbntD42JHH zTFA%mgZcSiIq&7J;Ot+BipFQe_ZtU2?G8ed+FX!W)>9WiBK4BL@sJ!`z?^oNpxs#& zr>d^v3hmEhOyUyI-T%(DS-%ATc?NufJN;duf1#@TPPUD@F(e*GXpTy(v#V3V$7Uwy z@^l`0u1!ME2@;$_I*)IsU0k$cKRc+7z~Ek5sG+QDiE}t;565zSd(cn(_#zPZ-+}T5 z>V7PJ!%4KwSXt)+$|gC-{uT@A7wG-?!vU$%aFgX$FJP1#{7AoxJuHd1J@nG z;sI?Kx_c|lBmTigH#g2Az*M+8X&<^JJ42q*7pH0Gp=@H%|CtTEXO0KS*iUNZ z=);_X);Gh7RP@oS#phcj7%H9sA@xl_+C0-lhH^=l*MLHgIueJ*b8%xUK-ut=!rsUa#5JjVz{c|o&Wrv+(2pCK*(C{m z_7pR(eb1QB?CsP!mBx;Ol~7xG6E$hysWV_)IY7eQ!MH!hY3))1G`x9uy4fSU-6P_fC)27LEJ3n9;pd=)#7;=T8sh05iF!E@_}~;wF%-iDSBg8cuChhbnUinn_GWo0A0?@$d-e z{kaK)8pBcUL-+UvO8562S3&V_6W0==;WVZZkllGIv`AcV%G(6=T~p0P=$%C|u{#pR zjl+Qcd!W(R6wRJyV&I2<;5OwpD0}VaWPeAZJa7^!y?i+(oWfZDyXbSfOucDx8-zSg zB|U8j_Dt8H?DpM!o$*;|t91n0&YOwX=a>lr#s9I;f%^PEbC%P2HWT`aZbLxELAJ@_ z3}~%N*=R=t-m~!#%g$K@0gFbHmuxKOc5Mdy)3X`%cFVbdjsn)!k&1P+H)!YDA-iQF z`0QJb*)vRd>3`Ya^X4p0d3Fiyx}OEf_2rNx9*t`-P!3%`oOI4bFm3_%F38CK_{QDY>zi_CorTG7P=*1Da|Mg8Ak)4DXi-sjsTgtmz&% zZoIMJ)H(#DT@KA!Vj*trPf&UcWSZM@TzQJ}cc-M>ia~n3{9hL?M@jG1S2t!h{UR6RzXlC& zZGgg%qnL0r8A1)-W10S2%)j=HHPP%%{Mr&o&=dM@oC}JDhjFY+4n&|nHAa|oZx*`F{|A1MPF6CqCmM|r(IS2fCG&@J3E6bl&Nbv?PsgGU zOHHevo-iMSUX2^6Zy`3(VzzILApL)vGXg$K6(hQu70>8I}O6eyrXYY z4YKU@Slqh^(mc~3yVGJ&b2mZRRm$OChf!wROYOWm7b3fA;3sjXrcHYa5svXFAC{%g ziVz7!p6}SS$&S>s)CgmLuZ8KovrsMMqU6s}Qn$sO@#)rb#h2n4lTEVd31^Naf51E+5LA#d0xOdXqxX$@B)cvT$4dw1cL-kOC`746)Xo}@`4MgE`w>;>>9>b-Ij=$GSSKc?z9iX(lWJL?qf+shzfm)AGpy5JhOnor zpu711_e-cF#F+9nq5B}AojSHYCxZ3#ODw1NMB>Q|MqS+~gavglOLdp}>5T-SpR>8K z3mk$Pz|6%7^RnYGrOGw&h>vI-dUi8M4=kcgIP$Zpb`T_1!{v&+nFsN5iCb4oMYYB8Fj!hZM zCZDv=;`^N6%W){_44nE`GdL7b1}s#{$=mvKzOntmCM%hk$)+HV+KaLOeaErK8M*}A zBd^Ro7&W39e9Xeo@kk0dyOO7hyodiB5eXx17ogJa9k|w7V)LKmb(ngMlZ@?wIYXUt{RA+R@2=_ zdyU048Dryw-NdlX0{`y}&YawWVJ(GN*l5O&i8th*KaoJCNmm?U^$aEcyQnMZh*U?< z=eD;ev8euHK30)#Lvu@@zJj>#1^1~tU<=d4e&gb<=n1-#Fs^ape3m_>3B>tQTyV}v zsLyHw(OWI2r*|yhuk|j--t9)$$^W2i|3N|AVQJ6t)$E=WIv6UJ`6sp<<1diO{jO2%P%8BF(9TW`e`9`L8qJaO@_g ztuz#@rw2m*^w-j+JC|ujlLes)172yFh6(!(@wSsC->9jB@RVT8!=oTKe+h>v2NfDR z31cJ`lo6Mz#oo2(e6bMBJdSbjb}U+}7ZGEKlLlSC1S1-|@IHFqrQSPzP#)W&R^Ly7 z>8G}X-k5G|R^u6Ps@soi_jlzDf|DWL@G7w%a&cRzIp4a_gwHX*!1M<1m%5F-g?nl& z_L{W*QYX_plk(p}&(Zg-&1 zqU|gop%6rK8<~EDiBMnu1^$_5DwGWC!20NWocGNDP-f_JapD_JT?jK97|9U zM*gfex*sg)#woQ&m|MIx*H|Uyr`&r-^9u={S#<_AySA4k*xjrWLrQYuKm0$(ZS(g+0n6=s)lkX83jG zl|{?IN!11oD-#e3mY{Bq4DpZx66p?GRR0oZ{CWerKlZ9a;zLp9z8KPty&;Oc35q-y z^4u8n?~flrpUVoCpiW`B>J{psW%)3lW~`aKxiBJ@IxY6}N0%o?g1YStc#HLfgXJrs zdh}IXc~=9X1wx44J+X& z(|98rGGYvcc^^)p|DLzd^S25t>FfxSm^9|=+W{H{F)~(n!+;NZ{PYB|F!8TuSmQ@N zpG$Kg>6sPSId+DWtJGV%x)h{+@59u?=KQ@;#3yKeNBN2?FyMMSwi_*hX(7K*loNn1 z5)ablmVg6gaa`V~Lg)cKVd(A`;KCq-%*a_6Zw3o7VhJ60ip!}C} zjf6NKQ_xP{$IaV8oKWH`c~z<5QYhV_4vok5N8^d}xEDmu=g|I31@_tG0J@X6iD^jZ ziLK2L{h~9U@c0$W+;9$V{<@DXUG>53u>n6;Y#`Lzi~(`RD2$0}#g1RZrYe4p&w~}@ z-Hqn7`yD~`^LObw$q&xydMtHuE+m!^@sI}m2J#d3oZj)RETsGucVwv{-!sfiP<0cd zZqvSkdOONbYER-bOJc+1eZ_!^YL2sVqnS2syw8#H82tBh;VaiH7{L|h_2$|4Ta0&JW_rG=Q(u-K=SzCl1-7kV< z%mGevRG(eW=ql80*$$~&uEPlGtkCm4pe`TRhHiytxm=T1c<~JNPi0KSoQ(prA9^2Z zE^PtPk;_b;@mB3p*Mt-IwnF)*cntj{<~2Ki5+AaI^%Uqk;@?BSJ}+*XlNz-%M?uHZ z<5=%`l=Gec7=mVo;QSJE!FA#hG;rF6qnt}%%}X7Yn=FO=^yTVo$!Bowx1TtyKQKS~ zoBD@9{cQ^-a%t`rV3w5!DJA=$y_DY52?5N3>hkDe|@ zys9YEL-tf7t@Jd&pwV@(C?F0RZR42Z;ES+-z6DzmzD?aZK!G^V+ zg|J-$S~O>%t8pb>y1$*cOSR0xi)OM&*@{_D)UG9)p?<~;u)Eg_-P@W`R<;Ulucw3E zgf>n!<31CKUobCEHxR!oz?88uD4Q08dNwE3L-&7znvV)RGmQLN8*`;z>=icnw1F=E z9Txw23Jyo=@eT#wA;3n(w2c?J<9qb@Jc)@=(&ZJ*a0pkve}T-2iwYnKd$$p6R>w6h3&`mJM0RhH|a>kDHwdpMMz1xb>PsAwuvH{~RUO=mfW_$y24=V=@gKV3#_ zov|G9M!8|3C>_iHIRUb6=czwwJX>S?j`a1B(4suZB+J6UYcO&-12kN__z1f6U4yBW zb&$~e0=v5L00v#SixJ~!vhSKBneJ}}IKxFmRnyt=wIYIWY&wGe)Vhm%GoLdeHCc#YU-p+jCl-;tyzpS;1P ze*c8!w>iA@R}ytNjRuV3z;4n*CL8liZLiz`g=fBiqH`zoG8+f$+(O_f^{LL2?uW!N z$3V1hoQJ6WJp`xIId@?u=R4s#J6IS@{_iC0L*4<4b7?pxkY=HW?s`ay4AgNMnGo_G3c@iz};U`|I0H)0O4*vDOlxSe6x)a4GWv)T+}N#oN@ipHd^ z|3K0eJt!JUSsE)7(sdtWL`WtJc05W%l*{5zj68l;B5D zws||A(I+3%*7G15v;u1HPDdv`6mLWt2-|-YVDQ0KbRci&NTsQ;RNq4AI9vj+|H?se zFY?Y+J!7LrQr7A6w}Se}G}d=|E6D%OQ>VsKPh<(rtU`^z?A1}`GOz`eig0lEFyybc zBtj19Q{I{>plQtF{&~M12miF-?;8@Ij%oQ7STgiEF$+B@uO4^Sra^ZCc%3TwfB{uMkk^C8lC4NC@E@aE^w zlUFW|^YwNn|3@pj_*e?{Y6TOGzs!=%zv1OpW8rl}C0hG6GUX@}tZXj^yLrR_o~n?R zgpWd<_exw8&`B`ysfR^VPe8{L>bIOZMZIb3G4w4w&DkyA&4sX1ChvZpssB#>KU0-c zgi-E1u#oxe>pxQUIg2Gf*@>>_6I6a6sa6g^rg-7!QC#(r>d)nF&`v z#4zUr4?(%ZhjZ!^ijBYh&Se*rp>9SuCfSsx9=XSiFIg22q78FNx82G4q-mt0V?*;p zw^CO3{94e2U4)VQ7#JYiErj4PYtLMYefN&_7QMEPqU!{}f6+UQu3e9hlUJ z__&>ih)Gh7*3CJf+;R{=TLzhL4#EJlhUaI-N(&$0y_=YiAX= zsWT?452x2F9=*PPf}DzQ>|0X|uJ_s@E$0MEdRXw1)h1koe>LP5QS0_*OXEg-LhU4dC{$=cUTjb7@_N*G4rIQ6Ae-_B@k_=lxQKqm%=+vQl&6(q z)M^EeaxxZHzY+_!z1@j*)tYZSlDgVW$6(Ban-Efbo9Vi}Ve;(3tnBC&RCH=)iFcyG zp{@b~CdNbGw+oPFomd!k2UQ!rJTjkk7Mg_PAc|Ry$`{*HznsBVwMYeE-z6pWM^Uf(xVt%mdYx&zXq=ruGlx1di3@!lD4_lL58Fl z%1?H}YJGhnJ!>{9E7X|1fJf`#pnKe9`+j-ieAJ!D*q2H$h{ zz+U?q{agpZ!|P(f@%m9HK@ZUGjpu^miNB}42jXXov99|&u=e-mN~~5bAPg+?$uzH&=(X#x1e6S z6@<+G2{}toGs6NcxcaRD=kT}Km^dBUCh72>Ayh)y*CLwBF8RUv?BAtMm%IactUssQ zkORKZhT*es;Rp?R=_~$4$w2bqYraF=twxa086@fJYv>tEEXm@#?Eb(Ph=c!x3g>4K zzHlh|4Ryee6Ac9WmM%hW$`6dY`h)s$kGqvCNbA=h&-R`a$WxzA8KY9DO!YyfjXUSI zVJziA6WKh=I+Q$hQ7?X54@d5p@<{wws6^Up{>(t9z+}j2YUawGZvpEh8GGKl zFHC69ho;BJAY?-s3%W2G+}C^r_q%hUBH|<>cH*_;ieb@jhjDMzH)wi28hk&z0sV5cr)=|!HnkUB=y@pPWLD8jUiH1N_AAM0H>m>ti<$wQ5JAFDbpnKaEpvmyvn zyn*AtEcp7gAKW5&9EcE+%P%Sl|}_p_k-dRQtQ za~=G$`asqo>M>254F+*?$Xt2?0^7~#-g=qM%q9QJ7a6lzNDQ)@)0yAET<~7i4gsq* z7(gqiUVa~SOjcs)D&$ zpRVC^X9p;suLD_eJ4`rbDR@UB`Hu84KD`#q!mojCeHB_?{FhnBm9w%wFR`hQ07avl zAWmv3NVM%N+%pi(Y#wk)MgL;Lc2mCfeG|8Aov|R(E8|4nM&S0uYxv;23BO2o6m%*J z+WVZ;a@*Hx&Epp4G@u*xK+S;Q;8LhqycbtG%uU{ zI00M|itLoQtUpS{jaHLT3{!-eV$^^&%hffH?Gp z)i6YFFWj%r!#;64AZ5UAgrmlM=uT7A@SoXGC_~GwjZi)7I=H#)VAiE+pexIVHsW&U z7$>9GtMTL$S`T?odxG^=R~CDr3XN7;@Vd;C%(X-1LuzhCOUyp8kG_fiK14M**!YzSC60rj@ns-=V8LaS*zEAqR~x@*>8 z!1}jb_Vw#%H~29Z`x|12T?)3dOvwKF4?5D{f2Q#~h6is(gKQ(A$UQjASAG&}(Cer)@4`Y96%U7oxjVtZgyuDmN+IeS%`dn9!|8A6B=`XZsR<8cZavL8Us#0;iNJ&)Byrh}s4 zYp^S=c|OG-al6x1%;^&bS4a24xS|A9cJtsW8(dKKbG};C=WM>rwx?Pav;^$@UZ7%U zUn~j8!Jz-1V&j_{Xc|xNZAv4WU1B(7))5fT9s^A|tB|} zn>mUlY^UDEZg;U*b`?8TG(cU38gusignOSl2{611Uz6|(!rxxR&}0ed_Ro~sn8tz@ zb=-^1di3s!@U=&ziku`J*$ZQt&Tynaj#hv^_jFcbO)D{gJ`Z@jrLKeu)gs< zxSr?-YZ6}*<90ranrK5>oG^&wRp~{qB|tPLF_Z53#=f2TU`BJ z44>5-bN)-@M6ZHCGvOvHewG2S%MNw(ZNPnFJ=fSr$zFFqhzhf6uBvw+lpWfQQ<~c` zWu`fwhtz9QABiqB6@{6Z`qd7m;?FLrWu7j2R zsFyfX2LY?vIUmVEShu7L?{=dnQ*|lHFI2vQuu6L8geq`pxC;*Hao}P{Y?n88x$Hxa zAzk8&4O1K;bFQUOcV!p04A>9$>W`Rdmje#U9B?1@j#+=*h5kx9L-}4}vH&rYO<%`p zteYv9_ksmH4`V5-Cc(V8a*XH^k<#R%$ZhpH^uX`W7G^q@VKDV>_-8I~$ zBD(ubI>5>MEU@zA7$&!k4FEA$7*8ww_cM{&y06WAAX$OXD#) z>H~QXY9V!73HbTU2Fc$^Ot-Fs)eEB#gX;15FFj$|7&AdJpb;xyxnD_v#`PuB?KJ zLFCIHbAfWAlOXEHL1-YBqH1<57nnwKmBaO%-2NQ9+{>JI8+R1izv!dt`*5`~>KrRS z5ekvR>cD5v0(jn6B>2rK0_Q*zL3z~|@o+eVtvpC)hSzGd^@G7yVIdT8B~0U)%8L8# zf@1arU(eYMic<1U9({*Vw}^wMp-!Gx>(Tp88fI;MN?CDYK-HM@z4HwDhKp&i_``Dy zFFFY+n>chZc#G|Wzd=f>IYcD?18!}9fgRU|Eqx2Qh+#Xy>G=kjx2GEH=4av3IbHeW zbcW8l3)sh2j72A(GRL(_IHpRZ{z>A5j3cIGV*)k=odY{aC5|s;RBySWX7Ld4F0w!! z99BCUzlMm8QtF{zirKRpF=(HP)>xfXcWyJ6z2RR-2{*^a$LX+;dU2ICXE<9{1Z%sA zglSK%! zd1}>=LYT6#7NfHcf^RqKdFVs4?k&quQJutA*ocICqqkwo`2MIoas#p!H{#-PKgnwy z&f;Edf`7Wzqur`{rpCj$DeGC(?H(6bLuZ3^ zhf!y&M9%^JA=Cd9hRsbu&s9lGd|d&Rc_#3K^3374ZeahN213#0A9#MGg<#iFkExG| zo3MH(x@@|~mAp8Q$^lKRe2E{%b?uDLEsw(c_U+*N#{>Aoo#yo!zjM0fmFyqAcAPfh zBPv37qn7lFqf12mXY&4Ea54~9<(LR$4qoH(Wj3Qi`~Qf91>~W-n$+ACnh6iSo5tPIL1V)>amvExnsLTRQRH7b?Jge;bn= zyhNJfH_+YvgtLMe%3gRd(TWDC!_GV;z7M*OTnoiX3|9{B0MR&B;C1LdPM&TgWRrJK zS3``mF33?Q*Z(Lw^SBuIFOD}Y+BYGWZiEpblB9Xg6G@hlNCZ#v^G1|unmEU_pPZ^sIEXrRF!kHsY_^7WvO~y{ zXlz!0P2=pVv%zwX1{{7Cp}6`DI_x(Vp8UQKnib2@%%MLzett$h((gfh%Z^p8eF35l z2aqYXgoJzpK~z@_MqP)2J>_iW{f=E9>u_OU7*8*^3PG`pom~l3bglm z>Y`)GGp4hhIPV_`lE?R|Us^+YVD(gV|UgXQI zOo0g#55V$~Ip}qWy4+&jks>k}XkaR=Rp<(u$m2{--~0M)7W~Y$4>0t77_sW~1%<^J zmH1?o#QH0THGghkiimawLoG<(lCL%azTE_6Ll~rv zKLvGhNg(^6%R0=iVrXv*VIFmgWmum8`3D7P=6uKOHG`Sl^=-2Aeyui0*Y5ci8DQ`FT4fs9^|Fy@||;a&E%wBMV!2L zGV?ebL=5Bc=-%)Q;$@3a?nP{wn$>hSxRyKNw!SboPzm0H^RVe!G=|sTU_ssAfKt$~ zXK5BfsFw{?ryHQ_#{Nut_b~*IjG~^UcBU-T1!-rC>}1Qt}pjex$NV?nG?t z!z?@Mp~`Y1WoTwU!8E~;52>a+0ow$zd7HsOngs(MoAKh~k8$iW5g)L>39Y)0<7{uF zLEB_G)XlmB)$aD_@KKMy>8QmoOtQfO@-KO12SQoHYkaubi1)jxC#Xy3A{=}I`GZKu zd+7u&|23d&$`I%qNI6n>D=gMO&AN7`Y<~;G#v7C|Xm`b3)g|aSc@JmjcNGmTYx5PP zFD`0vgaBV__N5Hq6Jy_=;VAkt zoY{o@WP6=*(C==4EPXt%(BB z`L+wJ_EQN2BxK>NTiU`*lV6n0>d)GBFJpYk6<9uVCpu49h8W5yi!~UmT0XOVVU1{V z;VSMjOGWE2E9wQi1;qzGg7f4G9DJTwf3mMw1D7%T+%y(>6Hxi|F;{GBgPO->$mf{} zG40bq6zawq?NV^)XCNq+WOIsd8zfTuW8BDDMtt8tDd^#niB=B3QL%CcB&fb()K%jB z)QkoEz5;jEmf-xEC2(BlD!BHW1=f3SV*1>r)R}e}+MgT&+Z(@8miix8x}+Yv{Lu=r5a0w0vnWraHncsj{Zrc2j8F$e-;~u0Jyaz~q2Z}4C zHRcS2vZW8X$VdA?9ono)-8CIEH&ae8ryg3%&2ZGmO{m;vgmc}BAv?~QvvT(29Ow98 zxYje)=2p+Sy!;5^v+0iXQ(p+UF&&hR-!Y)ycls{f=V~iXa4+uDf4BV)v@S<*rY@vk zYnkxH$4YR*kXLBGXcypk6F73`Gf4Rjs2*#i@?F;q&F02@+o>ATxSL=W-AjL6rObih zFBY?WDwd4g1M&Pt&Tra&%u{wiL&81kQ%r}Po&R9^xk>1~{WV4r zsCcrO%U%=3WhBzw{Te;TN8f<>siCMCy%eHcKaih>IaiU3&!SBg^u;uoEX1^zE zxPFN;$(h)2X9kAvC(d^7Rp1nqkFcTxB;D`81}#0I_sE_w>h(gHX;F)#3PM=&+7c+O zI|mU~I{fc{u7SKMj@b-cj>qqG63Y7ag<(66f=IrSNqcE=m!@0r2}A$j@oq_=v-%FY zXc_V)v*KZj%#e5R?t)*s=lb!G^N>mTmsEU{GWKxf%oLaY*^AlHLsLg(moyoxH zkzSbd^9RafOSuX@9Ot**he(DD3<7b0ml>Zug;;SlJyE{Ng2@Z?Rii>R5N~x8mHNX_ zzd0EdBj#gZ>kzOyvkYv05W8{c31UhdVL8to(~QX*r5hLK`n98Nf7vc;3*8qRKXcyauUCU{{ zY#`0a^f(SG(G#YGP#$i8GlXr_=EJ-Avf9`8vH9F(gujMD@oods)OvzV(jK+orIwmx`M;lYILQ%p(5QA;+hgrk@OH;uV2B? zE4`s*VJmtbe}|sSVxhGn1>@>6&^z=N7I#VIlmj9l=7bJJ95#jyZ{p;)i z=umM3WlrP;Ek_(wWy)LK+sVmZb>+%t@8nz^u0wF2xu{pN7;HA(f!~<|^!GIvBn$nm3JA4`Q!e=c}3cM_3mW+XQ-jjeEuTXx!#4SZa;A1?0S&he8<4J3HO~f`tQo{1oem?+4F!iTVc@*=H)&q?Sf4+{bef;U zE!uq_9PX3$`PquAefN_a^R_1pHhF~8V+{po)8kOaec;54j8*c%FI9n0PC}_6X+v36 z+{S!EzDEbc`pOU3jdV5jSbKD|sNqHpmm};M1!j-hp?UF8G^-yC>Eb#pK0)7cvuaMP zOIk=jDLOjTu*id_F}eQ(Xj*U$2wXO%=AeOcCRzEjCon^#@dxY(?f8oNN)LHZ2Ly1K}6kI!KF4!G?5Aq)YlCm?q zxia7Pa3Il~AF|P02w#)I9DY6C8!P+Hl?|x;p0HI?F4iFz~Qasx_k%mq>ZoX@##m8F~6v#SlaY2 zc5Lkcg|r1D*OYSd>%`wA9bwbD9F)%go3rYnhfa-#o_#W^xbnQ1?9UDl&A3rM@=cqp4<(Ly8q%F{}@8c{6CcU_?0U=Py>xwpKxZ( zUhJ-J#=T4hk%>i2@|oCXM^>@YkN;rTf#sO|t{ffDUICALA0c|~Pu$YBg}NUeaCMUn z1aCnBtpkeCRVd@+)d`$=uxS_#0gsP!>Tk1H z-zP@=sFZb_mn;ljvr@n+X#vv=`i6>L$C$KaF=w#z6?M;+GPP4J7fd~fs##%lN4ShO zq|w$iOotx&pOKH&;X5~X5~Nqf(4d<}IbAmljiLR*{u=fj_zm9r5`)lIA4OY8cb52p z*Y8+}Je*EkTPrMkTZlz5^D#h~$fQ%PP;S^)@}Y`&H@(ux%M_5^e#u18v>FH#Jzk?# zO#pMH47Pj`?) zuB)-;)l!hw8FG=XajfB==@1$34nfT)K{lm|OG=%Nl`n4MT&W2k`}P~2AqEn*5&L6u zE2o_D2do}1XRRscP`WCS(|n46WW8!=qLu$(`&Q~;mOtlS?EDFG%@C$n zr7tvmsKZ3+bE&cUh*2jVV2|l1AmLCwaXbup8;A2)9(od1*DKLVUI8BE0z`M|iMsS2 zEC%u_zPgQFhJg1ojS747`cTw7s z!?oY~4Mo9$;JI5zDD$W#Mu7(78`HSwE6jy`4f?|TYJEOt$TUzd2IlZ@5(JptVXlFN zTwl*07*DLPlKLn#dGs7g&-sJe#0acE3FhJ0v56ciub%8JUOQ8oMwd2uSxC;THO zT&Vz~jY>4*EHHt(5`wJ0Lv5d(EZ_bDbnz1LWrrSPM8*PKl;j6t`%Q(&E3Z-Ou?-hQ z_khIN13^)ua+}*b4`sh!aUoi#Fx&kwmrD6!;}8q}&H*FAZ*V^A_ki*;R%KY{m5X2Q z)PZfqE{F~v4*5Ozu&3T8{O0TCd|%0Jj2E>r8>8+RWsrw^>DDK8e-EyykJxOV1(adw z;Ee3MQaij8-pb*U;U#%u*1V^m)bUzHHuNV_ws_fw>!Bf@qy$ zlm?YRkL(kenj(UDo5864QJX*B`9H|cr+d)A43+dj6ZOzu0{=4dH~ZA6Wao!5lT6Y$ z_djF~hQ{bO?JmmRJ><;3{6`+jAI#cU#5-;e2EU`%AW}3Lj9whz=8hK7!}$S5nqFtA z@kU@V$%M~4r^R=~E<_ykm>}~*dYBA*_vTk9(pMA{B zl-@JLi@=(0m+{d|M{cSC<%b`yE_R$8yg7G>+_CVSdtP0rezbyod>OBx) z+7;Z}{!m`1jLUb5L5E?T$R}KtTegBaQ9=uIef2JpUtkaSH}x?&_j>>i3j#rz^^n>1 z5FoJK5oL~@K&I}=^1Pp*-tpeV=gnqeMP^*b{nuDsG61yx|DFiC$Gv-S1hZ6kP^#_? zpO;zkpO5MBUANrA;|DK8PS!^^X-{LeiS+W!)BzZ%NyqfnZ4h~+3!I_bP<&o8q_3NT zGK)cw=0g6IUL(*bppKPggaUZ_fNjbJY~1=0vu0=Dv`q=buBCmgJE-)?tMW?JStwXb zpRw4ZSljz8x_;J!)h92L4zmWD*AE8i+|Q`_d6gBPD`JtU)nMVPFWA6&*0JUVhVASm zIC{2Y?CNH0P3wa}lboQPE5gtXCn0mKBUF|gM$aZS8nxVKVa;7|=QbTaV0sj!t~&;O zAMZiMxD%>Pz5ZbFuEl6{ARU$;I|A{dJmh%l0a!bTX-=!aD&{kbY}w1H{cbVoxBaYa z@>ecrnyz4%xei{u`3SYu$zXTe6_i?4knX)4QZG2*&KLT8nP3fyFB_O^zer9|TdF$s z+)$AA`UBr2M!eN?8Eo(V2(>02=M=ZX(fpk*@3ZLvd~Gu)|52uEo(}mapReGgb896X z_TM0W;uRRGA~x6EY1n;r6;`C~14X^c&2{c@oVGFyJ$D{N$9>M|J(Nm;4)=i66O9nq z{TO^PC&_ygvmpL*Al~);d_;bPp+`2eobkG5Gak481%FYl*Qt_-rFY zo01Q2`5O>*y$Qv0K684bx4_{);pneB41(`Z$HeMr=vbYCHK9C~b;L1;t<>XPbQ!{* zE#ZoU)esP?1DT_PU`zWiE^mDbgl)OVlKgg%ZzPIq%iIAuV^?rF2}{s@eFC;ceqxWE z&f(l`6L)ez&@&+!}Cx*&YfHI zcP+F>oQ3Sj6RdI{t=$7MHSOH^`T+i_qP3GiJV0gTi<%#LOpc<5+){=D+7~4Go2~8p?I8Z-!E44StgkaRo*- z;CZeCuHLD|?e=f6ugyJl{z5%&Ys>KKz^hV$Yu`UE*#OOjyq>b@#6uU>^b= zTMw3V_LrnKS)XRI@?0z(iwDa^yw@pRA^uGSIGBWhzn6{>{(T;ZYujLTemsw_^HNj6 zdY&(oo=!mFUwz(Dl?o2&^XcF8fH_A;VuH~n)EIom@T_di5bXzLofG>_e3<`t1aC4F zpm~KKx?ZKdPW=O8X7_^9aVIfn^LDgX9sqf0PxLe9F}uH%^C;Mk5wq%1*7%0oQAqbb zcO7BmUK2q|GnjH{PiD3CGKhYwSWe9nPAYL##XQs!`c65G0UOsrSv2ti#aZCd>k{z` z9KpBzE;x*&Oi<=^yeIh#jxkrcdbcKgm$4ITwIU#XM=@7;qZZQ=XwRQTxqWi;xTfW? zvbZcJezlt^>6`44U(D486>^Vm8wh^&dm(k&9aNCx$!pCVOgLQvr!G<_Q*JEEIu>#@ z2|q#4M2A-dxxm$_ub_287qC4O53XLFI1jU*EdD(CGCnvk*$zi2tNy@rc2t8^YckZP z>k9V0?w~_;5~hS(3VPP#aLLO3kiBUHvz*At=W336JsY5^Qcq}lwI7pK&cg7!L)m|y zw0NC{8kG2c!D7=HU_vuhSz8RZdX1K_w9SMMj4Qx&ql=(kKaEBIcO0wk3&d8-<@`1# zaE_CP!lpFx2P7#uW!C~uy5UhygOeUFt9A!*)J$~vyazV@eonlI6xB}Z;JENahZnUu zQTNXxlpTtNxM?pTebHZFb<2>A3DV{@pOc7nSR`3M&pzks-lRLZLC}oZn6qhxq{W#s z5-L|HYPW~vzRfV=B6+pL3fa5c+I+DCRS5s^fyZ05h0sq!sGIRN7S~$v)*E`Eqs;|o z_1Al-K4#2s|5yi)W5ZEp@&I1V7Ey;}1oM==2fsAhiydY&dHe$A``{Kzy)8J&_f|AI z>_aT_158o2o_^mY_;?B34$pk$veTxcm9ae+G_EszN}`N#;1%rf?!fe`xm;+-OH6M& z$^Bk-7(5St!^lmZDD_#OO5E288q^%|w6!3m?G8q~5b+`(M^5(g0+ZIygogPcsJ0)* zWgcoqmz#Id>lvfIM-56+GoZ|sX1_aYnAl_^Yw)kbFMz)xic(v z9TF@4209!5Ko>o_sT_OB#r$;~%#L|taLs)tNu|Bjg)&e-in##$OSs(8R1o(%z-{TZ z4l-;aXr9riin`OSfV_)}%6VvWA`umSGjL(99u$z{LH=_L^Hb_VnbLsnRHIqh$qTHe zk2~a)2djL(Yy)pEJz@3MFzlEz5`%sB5oeR;^oyC?T*r8tvr56JOA2O&6Q_x_V8h4- z7^aRum~k1;z0%^dU3699{oPsiQ5S?sw@^Ke_~UymdBx0C;Lu7vk;x5c^O8J09hBSi zHpkhK#XVk22Auf`;pVduSGi+RRxz}1$_BA)B@5EefXq{U zAidviE;zP?t5O^CenH((JlYCcuU_Gb?5{wU-FfmnY{jIJw=sL74=SFOxCMC8t@^_? z_`2C#@c8tG^LT;O={O2XQ+yDN_fXewBo_}aP)mJ8^6I5F-+q%c&c>e-hacY{YMKGx zzLMt0-siXit5Oc<-$u2q4sW|B3$ty?Rq~B&zdx<2t#kq9*nF1Uk^}`k+;Gev6L5Ch0d{?Z(QeN_=(+Je zCOdt_?r9=H`D;8kc=j{W8R%y^r2sp2+(XaHg&1D?9oyuKG4j<14E;=>dE+fy;Td9z zcN-5ycW7VLLr_(Ql&F_%4R z5DRK+OYQq@6Sbn}E$U2{BJ=p~rI}FQyB4F$e}Kom6}~DJB=P`0ymUiA70#w4<>~o7kQ9 zoj~jJIZ1H8-l+Pv3(CVT;nW>^g6vj1^Gdsp81XkozP9%`IjsrS^{aG_MlTVb)9t#u|rW^-4Fqgh=m^q^bW zj&jySbChyzFcbf0 z78aa_q^7CVC-O*@WBG;?^C=9*5+AnvZ+7=CagXnALx;6LAjLlw{hGdEd07E8&$h+T zkW?_pBM*fmfW1p4?vlh|(}r`DcW^)x;SB~CzsMt0#m%-%eT7&7_gf zhG8C-yx-moSY)&vt7ad-+8W|<%|8yQrHEtPiIsaJTT)~C0q4H{it!r?(V=lZ^bOJD zH!XOLk9O$uo*u+oJ3kTk?S2P))s^_+zP1qG#~L#|&!K#cscO^gN*o;b9sP#?U{O~r z1?2-@6qyCFoJqgApw*UqZ2)n$UhN>>;ZmG9zZJr7RuWHQ8+Znn)4gR1hW4ES@7fMx zMeuI)U7Q6ahrVHW`Eo29nap*!n}-2i7C`G?Q_yN_DCghef(!PT^6jTCg3gfLU^Zth z9zQEU;n+whoFt&#Jn9o&On!P#L&(`343SzoLd5*hpw>NsnG^58V7o+63|PRqUY!ID z`=qG%J{iMp{K(rOCI*us_56H-^fjiu%YY6no1qOeKYv5lqZ}u9oe$}M(JizO^|m?| zvm(m>fcPrsa20bTN}MH_IS< zUksQnnE*yB+d;Nc##J~GQ|zu6Ms8{00-kHb?0GtT?O2+DqD6vV&xKep>Lh%^95vn6R`BDp!!~%-2(?#pcd$D@hG3ae@9>Y70!|)$dnfI_t z5aTlN-EavOk2Du#Nz+lebSa!!Kr{CQGcfAb8#T8`UsHX=#8)TrcS{2HxTpfN3Eud5 zKiv_&XwzLj4}AVO5$FB`SP$$44cq!d;gFqZwAliEY%gQtNnJ3ieUDyQu4tNTz}x=a zO1;)|B)!rsh2X|L!#5>C#Kcu-CTL*OUojB4lCu8Z&#~rM zGwrb_Rf_1p$undO4FR_yYWqjh{ui;tk%?IPeH30T%z%K&&1jKNKJP2k4RU2Cw9=g} zzt{hHpz}EKi+1$TVW`(-DU{qy#AzF}gy`^}VBc4P&OZI|Oji>jw7ZGmdHg5_zweBC zpPbRmHx1i`NO1H%3y!z?V1-LhT<3QL;|!9>!$X;7lkXrhyri<){FzbJ6J%~U3e|(n z@%X%N5IQsf;;kDXBGa6=($PiVY3;Cj#2Z-N;{o_g>4TrTlGfNfoh9E*Anhy%i?1O> zCYhpi*$PSVr`2F0qTcMkDHC8_s*)c!C-2T&7W(iw_`Ig~?wcacW*Pl^b?2ixe?P2t zYDb$r_i(_dx7htH@%i&!Va54zn3DPoac?~)-FOSmPV^bupDghk#DVhMW|sYTPt=j6 zpu?}>)FXcmoa^^s`oR9E$!TH5AG1Kb@>H&P)DcyHA^E5?>%ei9Gj@bo;F2DZnEmI7 z#EQJ2!8*sV_IE$DSvs4%pT>}BwG|syUITSPETrE{z_QrY#F0FYd2ZLSbw(;Km~Fx5 z&pU{fvpzwlQvqg=Jj|^&JcrR4k3qg}EJi{zw0&F;XH870G;All+x-rMc56YywLo<8 zEQbY77W~F4(wBF30re2-mtPeRdYufRl6<^2LD5jqrIk5gX)0H? z=K`zf;tKM^m26bgHqg^*gyMCHTwiMwA>zV9P^VIN{Sh9k(@pq>jQgm_JS4fkz(8oe zdJZD{9^~x1U!^i{8HBF~C<(oYCIN{!bD)6`N6!`6vdy^g-eHuTZ(%;SS}--{BHVsU z?9f^3&~~AoU?aDJigOw0K0O;E(H&Qxe+cTpy-?&xn$4jX67SR)ver=s^!HWP@p}rf z08er8AH5;r)oCoNIl(kP4zk*pg`iA)fqSnc!ptT2F!Ss$_)K%PVoxh4iQSJnv-AYr zOSBIfb%5CQ6?%`QOn<6UB~Nf?i%7XrsN6VD#U&_{Uf|-}H)B%XSSXI24_zne3zEu@ zkbUt$Zivw>jDBr{(T_Rud`Tr&Cs(5U-_GbF-;ba6Qy*I3S5Cg&51O# zEfDE=i!%!e0>$NhlGeJpU}s6Q^}cN8xa$^I{kMS-dj38!QlBuSyPp5ZA$V`_N9Y|0`nDVV4ir!?PX&{inX&9?A}+hp zn)7`37>d)6&@%dg!ATu}MdOV zC4J&RZ#FC?9+l(jxWV5UicOQy@A)~-vo04E(e02q(+Exc+o5E_DV(c#4WYI_QRQ`= zcxFm&{@YBn>zD)!7nlpuPKm1DH&24q$%!Cq@nE68eo&gEg>}~XDDG2%F>5KaQ`Nzg z6Jp^*g^`fw8HINJe6YFt9Cf)o#HKhcK^nG96>MBi&$W*rJ$Ikmr27Rmcfufipfd}7 zKv|}&Hn-(-V=$b&v{w9QRxAvJ%-Ut3{t=8@;)q$bK!X0(tH+?5q_1s00n&tB+^#1dVB>l- zzE=Ay#x89H>HaO;gv#5n0JZsH=ZNz&+X13KK17*JpYL#sgI9lv_|lFF9Hk0@bu{a{ zkGzF;_Hr!g6ALZt83xxT;OyP2NsGAz!^U64nLgAfa5Mmnu7_ab^p}`g(F2^2IM7vViPxZ2P(ddz?2F?xxba>{$YK6=)Y7#lR&!8&sQb!G!T8kl_@I z0soAnp7d+<`%B0jlRf}TZ(RnX?d0bO?uWDOH(&x~^~IlbB;tz`Rkpp8F)_oGSAF<` zMX3WIDc}aAxxD}fj|bFCa2Stg7D3?f4`7v*&higkz`m5P38Ef*@BjXTBG!W#%7)Nm zZyPoz5etmfvixv!^m?KtEU8q3S2aC9>>E`YcUS7kP@>|I2;l7z^qg5!W2}2m8l<{1bL!%X-t`?Q4SSr&r0q!2Twe#VkBZ+;i>`b}2Z9A!{Iai%*6#C+%y+yX8gGZ?TIgl zeDP*Ct0>8Z$89i%?(7$S z8woMLd!a0_JCn}&uJWjUgF)ryn6UT}F7kK>vVd@|a(E2*p8QGv;l0H9iG;qs#MHU+ zgX>qU$Ga9Kf%k&p zcRRzR*KJS{qkx(RDE3Iw#Fu;@pN_!*+T8}+ z@5K9@8-v-Klq~gpHDzZkKxwHhD0D2qanXK^yc*ABXKr#8+y6$GtR=Yg*$W*hJTdnh zSiIlA;A3fxsn>}yGBZr&I?@R*Z=z>{LoTZJ(Vpl+ofl6@ueU0|Vw$V+R$oBr_bJ3; zImx9@JHefrq0eht>sj)P4CuPUl7D=Ocon_+W3gJpF8F;zr$Ytcdg2aO`fnmcRJ&3a z)hR61rWsB@E;p+6Hu_D_Mrqg1Z27mR*fG2mom)Dg|E*~l_b3g$ebv}8xI4}?`~Xfm zD)=ro;=lK_>T@{oGMdp#Cy8k$|a-i(pp^KV!1XYm|0tRZR#q z5F+muvt5lE%-1yrX?jZTYFQz=j_-t_eVpOR6b;^c^B!IAP2j}2gCK)6Ma^ed&ML=D zNX>eV>YsO1N*^WqXHP_%ni1gj^E@uRX#hok>+{_;zo2Z)7&NNMg`mM|V$;yBnrX)! z`b&>j>)r*;6CJ^2*i$IHQU;BCN@3xuSWpf=h01PIncUcjg+CYur3Q0AuW2=Uh&{0H zmZPZoEW#zJ9r*h~9jg7;gX8Eq)Wd2EX4@mkr&Z%Nn0!7)1FNuQZY}tG+{B1gLojtq z5bUTm7Jj!x!4k@sdu9GW#V>PKrO3j_9Z4XYL0ZJ?->7QLM3)WpY+I4XDKn@K{rNGr z>uNRnHrBzA`C5XrjT4+2WW-nhFccy~YB*8TWJzktVX*SwjnlfH#nNnRknNz&q z4KWySz;lf5oC#KmFM*$~C73WrU=iqaI6aNLQ zhP=(e7s@(F@Jhs>&Gq1ula3PYZ`kDrv1+V;gCgCS2kK~Pc}6|4VY@->5|>*$)&;^V ziEk4`ejwin?8pVs?6`xQHWJHW(Fd6VZn{@1!AWc5V;#8yY=^ z1+}Kawi+$|_qqe1%>D?Xln%fKS7@E8;2IV^A})VC#`ikG{D#u4?A%inE&Z)>d_&oT z0lVEY`}an_k)N2<;RXh*+yKY5DW`nil2>QsNg}=PL#A;D=N=yT}uQLlQ z)u(Q?CT!k7`g@O0xMXt#%Er89q1k{>N?v2c@(d`x+>fP66JYSVH?ZaWH>iD(gIc@9 zpxD=y0r^zZ=M4hopH-aSmng91UxDJp3{c;k2g++7(Q0Nd%4KFyrw}PsAFEiyqA?ix zbsr~>jpfQOeZdieh2WpP2TPtEM@PC@_Knk^$t3a@)*GNlfrdJXx7+T*vSTA5)y^EJU)2@7c8Kwb`!!ho_9%56J%;Q$EtPbGHcRhsEQme? zpyO{#^n8?$?w9U>{Kx{yvq9#9@?bH0DTaT+JSG+k0l^KrnqrBLwKOXl~s5d(60Ozn9G2lqS= zXulf<`)dmwCHo%be#1NrAWN&y!i@6cgGL zY%t<&44TdL#8UN4@N@IR#xBIg-}C_o-_6Di)OY8&vIwS+5%Krdi3IiLSzOK9$*64D zNiqG=#D^el$WZ}CUl!n&>p!sSYAfp4RAA{o>KZDX0pa~8u^g9{Tm#+Ce6Ud^vuPUw zsa3<#S#u2HbK5y4RBDAr*Q*KpWFKUY!G1e`eb7be%$L!QfZ z>H+#2U%U;1@D;Oa;0x7eL z!P$|zZVg9aU+V9uc+(S7w8@v&{}3v!&Sn)3mEbs+TIA0D%c@KsLEt%4lzghkMdRC0 z>-jD&Cp(bSx_FYc5ktX6?>veK-vJ}twdpAW#^rwH?d zO);&#Z-bL)=35++5cBVlOIs=z!yJ>U0Q= zV}Un*fM%b-NgMjIV$nHhtsH=6PzA-)&v7=Ni0f7<;x{?9Q;#CeXWQ!7E%FzNS7oxf z{w?Tkkp{~HOF-^i18RRu;f1swx6#h;af7n3sX8Du=?Fb*Xva!B%L>E-gm?PNvRX6H zQF5OXxo?*EOGbmDMI%w~pUKwtpj*ZFs~}w*3He5+SjXs-q_Oq}*OPZabO<5ieiE^K zo`7=vH%{@OilyJ233u&HgevXJo-GEbgzc)Q?+=X)pE$3(35V2cNo}PV$hlz)SM-)|J8Ms z+{#F$w(G>p`uqcjuU&xlRqtWpJX7lJ=qgF4ZjLrTBVp#Ldtj3M0)0G3)84ipf^=Ll zsA)RpHN`^OVd@mQ@2y&tdk1B;-NEVpbx;@tGU@2YlCUBXGYd#YP2o*6@;r#U;;&%W z4Ld=Uk;)QGKTto^E3`84L8BhTSLpy%3^C|s_kB6M-y1|3b{`WZP$B&ddDCaf_XVKNpnrtr5 z_eG6#89-YGE_wSEX6_DyB^^l^=avllRTeN^Fz5X)9%U^wV^r4b@T0nHWmwpRedm9G z$jYbMrwgKXik77%jo?RyKC!(s0-Jvl#7>3E?J%%&Wi%T%}RW z>(yaYHd|t(Q76=_a-}SNI&S&dfr@?Gn3L9LRR2XDoZne!QLW|EWTs~oKS6S57@BM`Hb^2QGh4o{pYtIG^KPa2N5SyUuQe?%NlFV%khe zdoeL=W}THNPW~W`g-5Rxb11v@2nO$<9d+ z?^8#6kfETKe_~lC8t7>DM%QkpEIZ*YXEf(G6mJo6^4r<)V);f4o3@9wj*4c%{leJy z-jCTYRFJuyu1XSM+Hegnt~%T1S`>D}M!gY@;sA`dztRk4yl(C=FV+VmTTk6(4- z6S^`CQD#BtyZ0z}=_~0mvjqzG=D@+bW_u8?6Z~TLUUjpYN7`*>jVRI`AP4W-#IME;3!HJA5RxV5hl-NRb3f;cSi8p^B25s;ui>+ObsfV2rUc3kC`)8bEo+;ns zc|4ZZuf$ZHK_D$kS5@9Vidy@uIfc6s8=QLteNU=Tws;S7JpK}W_PT)AtqKgAM>=6o zZ%!(GNE+~UbpM!!Ru3;T<&qG#e#>nrc^rrB+pl6-|9zm7L>ZC;V_2>GUmzNg&q-sw zQT{`Vi`eRbTk5O0vYiT;aP=;@ema6;w|deLOF=aMJ4BpLMg7h>pjeTUTi`wlqZdiA zdUFrx6m2RrJnIJm+V8n6RViAJGUn&*)e&abu0_p431-z?hu*=vp?|e5?_kT3pRI_K z?Y$389I;j=)uWfKEoeC(V@6lDfOy9)Zp?@7ka{`{hYi^a{%5s?2~W#F`usQ->Cg;; z$A&_(wh14jqmRkgk745M-Uw61W57dmjFhIZVsk6-E6(A1czgw)Dl>2ocnU?eZ{WoQ zLm}ijF`SN1a|_z2z=dn|K&Pw}Q+{8@)Y>`NS340!o&ih@j*vZBtWtb;h9aArxHm2x zoO(B4#nN(U7HjeL#Lp9v?o{^BmRMpU@bA70r4GTl>a$mK8&m&B(U}LtxPEcGY1f{) ziDb=K!dQ}Op7WwhmbuB2aLJNny@swOOCm{x$Vee6B$6y)YM%3^BukPdl#C@Ak#3fd zCHbA-|E`;9=AGv`=llJ9Vj$|{4{|YOfn?lJls4XhX^X2scWM`PeME4KO2CxLLm08p zMkseS5yOa+FuOB#Z`O7qUvN8k+R03;e!mp7qw~O`mS!5h{j^F$CxvQI1XENO!4^v! zK~j4k_Y_!&4JLQ7P~I0iH=TfJ+NoGn6(IDegK)3ym&yuvx# zvBB>v8iilNDQ}Tx44&Yz<|c$(+s)#qm!h*Yox6~WP*+(AGIMjZcvsBZueBGH2Piib zX)d&EDTL<0jx=Ms%ac3&W)=f3qtv?_D>tHE*FF<5b1?0bm(2pT`+KeK$`FWDpNGAI zh4}i4u^4mCO6X%)jw>td#FE7`c}U!6w7?21=(PpZe~rP_%ir<>O=~D+Z@|UhMkw4> z%N(Q9KxeU6VK>W2i2N~#|FiBa&Eq%nVu%F)MU_nV$6zcoT7z*P2BU}Eg&WMU=4x)h zQ@UA*qL;Pc_$>*Io^HX5bk1z-xD~5ERAWbCAGo$yfNJy*UNnSWyGwo$o@55esymc} zCqC)ae9*sBbG2s&?T6>vF?Ae0%b&$!X4(vx;^#G%fl&({L7+(*?=v?Y3hSF$*t#g@ z>HHQ{tJPZd{Z*)#--0=pr=!$D#!}v7VRig87;bYLLL{~rkR691zD_uG>Kq7(I|m=W zZ-xBA7ohz15W|$jSuxLmSo$uFT=AAk$Gd~_>s}}hqCIhk9iaS=e9`7DJkYBxTR#33 zrfK?;N7e-oY%If5Dmy`Qx0Ek{9GP`if~?i|+>h6;L5#&d zkRKU*CWw5bZSMJo7KBudS*pUCMh2(Vwto=)K(V{i=(taG~;YsF* zWA;PF#D@s{9u{;s%)0b;hqC3hBTEo@2?b@;lZ zfX7OEtf)4@ihG+d`c)fj6Kx{|-n9~%3mM1?TWPNvNrd?pr?IeH%apOJ^K!Z+qkgv> zA~w}v_K0%un{0xy6Ya3>lA#FpgJJES^H^AV3ab0Q0qq-eG3C-RICRHch`4?piY^hW zy?+}}Cbni7k=A1Oucm^*(at=F$70CRk6bzO2|dGqqSF5w_%9iQo*TX~*@Y);{w3NE z>*t^*!T^#VhGV(&GtkG9TXoG{Uf8#cWnKS`&P(s$n=N%vfA|1(u%CgVT?Q1W)F`yF z76T?U;)QOlMQICV9)8t8ZuUyZo)yWnu2_o`ZoR?e%}%IM?&B7hOobC^zi{mxBXM?( zA?O-(Ool}YZ}NusNh`(;O|4d!^?@(>tDiL z?F}JzPY-xMgF1ljTfq6t9!y?iL|uzO2qQL%%+Z$DM7_f}Q2@7eSK=-Fg#(03;L__S zYC3mBzpSH>*eL-AOegN!ZUgvn_YFY76nwUjIt!hDQ6F}zR=4oDqH1y~tPTALFQy$p zg92mPf$u{9SIDLhv=&Tu{0Dx!OfYkVo#?-3I4*CYp1oZa_IqY1*z_6+W$i9Q?Z*u8 z9=Hx1eFGp-{umqtGr{94%_e{SWX1hymY^5`?(-j^YPvgk1V3UCpJ+b>F)Y3EB9kAa zKE2w;#d!>VBZ(md%y{GMB9Evdwsc3SC@~SPS@UTZNR=5rUr}fuSmGS}urzP`> z1qPz--QTdjPb*=vBl)3Cse|6%06s^Uh({ysg%i8~gDH3ZK($8`Yq}T7ZRT_Yr&;77 zU8;kIp6A)x;+^2OYA9GtFULq@f3CW*iw95G2|G?bg+;%YqWi)^s_MLf=ugM7^nMXI zEQrS7=XT<#xmTfF*T~e(*Sy~Cw-cFO_a zA^1<4fk8ceSnf<7DBG_8G3Y$!a2t!qMXOW zj$1b&Wz{+KKIn&@#}q_C6mg$WXoDuN=+J;9Jm0!M!&G6b|vq)`X%bC-mpbk zR{{MBKy$1}J1UrV)!7Gd?bB=wneq<37yKl58a?}8t5~?1A=X8#!;B$!arsC)G4@It zlUdRCO1FcM>-s{Qvv(?yIRI;HFQC5HTVfFDSw+{*5M=WL3~p0*63)InfjPVvhEPsZQ?*NR{!a^GlDV-EbR!J)-z9=T*=Xgp$h>d! ze!vjAD@q-{FgPg@Ym8b6`ftJz;J74`S`KfQ#V`2%NJ$?$l z4gQINd+#w-7ZZq0vPES?KX&3L?X5j0@{qm`xZs>bR7wZ1Wi(%YGg3#r-f37^@C7^$ z_rT?2-DocPS!?ibTZ}zzB&@tyiLRXwL5M+rlntc(-c}>A$YC!UlsNN}lsPf8_M9Wp{HxGJGt%#xpUCMeV@oR45^kJVV$Py?MEk7HE3Z1kAW ziqk0?YC5G8%lAF3ZFL87!fr!!=3&fjnaj^7*$a~1H7sOs9vgkoMD(1yga!83q5DTm z(PV2ZX^(Q_B?v$qNZE#5KJqTf8$=mD#_oF=s$1E=r0=#~MF;w9im< zrgL(joxm+lu18IqQ;HYad(m&k9w__%3DhIf^U~T5heBg=o6dSo-mdwOn`$Zenzt6C zgXGwf^#jy37QFF}B|dp?FIELsqu-b15cswXFWSXJ$%uGnFkQy7k?u@~jl>_Ph>tSX z6?IPvn8|DhXzLJ;O(x?Y{X!+rpGBPb8{6SV`yUY7=`&Y82*A5?dm(n=9NemDL}mPO zo+DGB?EMu*;JcL=`}!ehcTq=GJ{DuL_QKINr5N~k71wMl@ftr}4bGPD_`ep`VnxPv zJYijff$i*=ThtwNl%>KH;~0>Szk;skO+b2VC{wQ+gj4Pe0ad^%W;COP&gbKypmsaF zyJ{?O+Igv;Dy+aryTt%GA?QA374U*9U?#v0yr{FvQ08c@Sy2 zo{c}NhWOI+7`@~Y{7DXJtC!Cq_NkWGLLX7*58S|VCmXi=1Z$l22qs*4hU%+Za{a1~ zkyG57FJb zm1qm50^0QgS;60|m}WF(b;Q3*`jg)&mx#qHsVDR57)#gPhng+a;XS#Y&!ioU|A=;2 zno$WUS@&=QuAHe{XwM6W3$Jd7NNZifMaQ zAo(;);deCy{)xX2n$%1tao@m`*FFc$r2ziNBI;(Fzs*Y?evJngUPjHolUU~6hZwc< zHK3&xF&K$WGdq-*bYBKRi~Pt%se;QE)`I7J3vqS*TWmdEA|821bAzF~wS7Kn$>TT` z1H+1Vx=9ST`S~1*y9A(nt%2x&md;Ec9I)bAD@@#U22<{}z|7kc(Xpqw;Fwy0Nm9zk ztKWl7{bm@hJOpL>IQT~A{qXL;AOQP29@%`ijwgic&V@xa-HaISwA0jFXNd{V>Xssy+wzG#aQ&M8CAVL@}|p_ zJ7kl!vd?XJ+-42tY|8`Xfb)uQ$w^}0oJVifO)wd71e}KnsE#3MmpLDA#$T?m?Y8$PT` zLmT5eG$UKgVh>-3ip^4pJbwccKi-4Hmm8rVVf!Wz2k zdBX6=Xp++xe1g1DzwR@aAKSwmy$+)bz4zgdfe_q!Kk<;&dD_1=LB?0gv38gO%|)3g zA4V%ycTAVO*_6jwa}eV#W|uc-plPNqUR%|iXHcVTp|QkWLB z56;zngcv{awFFryhCMw==d$I-V#AyO$$$>a+?4q$p_%TJmOikZD20_9$gp3nDv=l zS&Ir3bw0!cR`th-d0JwB`?J8MM`22B4yspKle5|gWc}A+gP9+1_A?N7%p_m>$3W&- zOn-;xbFBJy0wUwi@zsr?;Bd7Gu76F({2}k4xr@2DWRs=P7`OsWI=aGa?<0`X`af{j zl|t}DnxU=9#^UwCP)1Dr2su5kjXo%j-Zd0FEF>7)^cvF7<%9Rc?@$<|gxoVrp{(^H zOx7C-B|h6&vsE)}o!p4S4j)2U`UtGg+yo8-zk}3iFw1Q1h`r^uLdX|Aai~&3j4>7$ zZQqQ^9Y>IRUXMfGB%#}~R;YI^W_5!iAo6u7rhhMjYEyCsZP<*GPwQCu)+C%_Isr-o zjiB*sE9~iSFPQv&59+>-2YI8J80R+z zusAjhis#aP)N`^zbG#?FIVFVy`>x=ElP$2kV-t=tE^| zZ8&9EcTiUz(Sq0xcQ_W{jyK6TMR5ok>~}+GX)}g9>7&&= zN9zjN19R}3{sX9t?rQ^Yh-}#7Lp=9aXkPYda$6oZ5t8E`Lq&B3)AuDY-o%&F_98#RdqZ{H7k;)De4|Av7(zcV>yx3J#{NPH3vDxY7>xs2T7=94ga zK?)A(nF{e{RVe?o1JnzzDkRgcF-2ZH43i&YRciW-f1V57?5)J)g&Ck%r-OyKm#f<9 zu;C`{K97c>ANi|}3)bSV08>FW`!;Jkf%2@5Mev$()f=MBd4>l%ojnY) z{qcFYcp98@qa1E>BvA=`(P#>B8b356m?yzMOALkd_0KS2E6w{do-&nk5MEqx5Qcp< z75n(y#8In{(T=X3;CqiyY9qyqe{4c&RtDBiz6md?d!gpfu1rQ*`9Cd8#oBigG3Nr^ zfddXR_dQ1;tYA0xPP3zK%ng)mzLDo2Q^>PY4MmHwdzi_E>loQi!NU7o1?7~tJbmdc zv?p&^q0S4#gqqr_HHjq2P{K%^B%9vUAWOvb>)Nz7`0KE}?hU?umBP;u0dTsJxi*>2O& zW|OU8(cpnc7n4Kw!du80oQ`i^7zxR}s6TaM2k-JP?Rl4GWin-b0{Ofo{sI9aS?S3O6c4m5knH1!QD0$Cj|e8ex;3gPfL94zZx+z zub!)IcQS|N&(WLszBSQ>pqWR#v%a1ze0o>ZJRHLs?rdU_-RS*@HKQRt$=XF z3HV?d3sUNCrcN^!qz7kWS4U$ZHmQai+&2b_g!D9XU;{xMNRtA;?sv>aa5Hy%w6 z%5d$qQZ#u_ovf~3=sw<)is1LKH0nKsTG0J8bU(D5uEv8jXH;q26sT#BA5wQh!oVh! zYuaMqfm7Ot$ZXsgPST8*KUyg2-`qxYWKKWM1t9|2k8)X~Rus^ELx~8+OpntDG4G+k>Bj z2W0@rvGv_CPtkn`#^M=twfq}Y^R2ZZJuCR4ab@WD&IkkFhN8i(429GB7~+4l#t-DF zjP21IrmPu@k$>OhfrC!+ zw2R_C^NF?hN{y{u3W2J33RGZw*5nb`!U% z8PBBBS?ks(;*XeN@hF-%eQrRV^;K<1#8f;&xx9xvY()Py#0U9%IF#6=LPK2=RD>Qu z&8u75^xhWWtdOJf!%h}`XemS_-i3nxCm>~8Bh4~TbJuzIA;9|HsQ2HvXj(1hRcd+qmbTDv`5U?& zpdQV^R$LXb7(88SxMKdSKYuz0Blf4E!}o zB4#Q>iM<|)tN*eSHb&TC+leosP4?D0t{27*9wh#(C)1!-k24<4_F zl{K%xm)H}oc8U1X)mjLgXd(>K+6cqDr{ViTb3xWduPr&$4ZZ!(^SWEpFuX%3HsllE z`S5ANT389ro1%z^WCTWa@la>!g^mBU!KOK!7x$*~VgJF9@N_dIH?^Vtk%AYyc;GIX zt3Ud=g7H=-@fu6IJu;WfmM4+M6_Ol%%Uv-sFlNZ);yoNZ2s z&vY10XMVFK`#}0V0kXnAqD}KB(3RACsrGkaNtF$#3i8rE|IbKN@AhT7FqQEq=J{eamp&#Y`hVXrJoh|t zSl8gjKx>wMc|6w+p?>orVsQ`3!nUTi!jz?H7`(m?X_Be1*8U`BYGz?l zwXwMFvW*b#LGRZnM{w{zh?T*#Gwc70ua2zcaZ&ELyG<0TYD!qes%wzDd=U>1Ig9C6 zqbW0VFE2YX48B~p5%oiEK$qEJIKPQ z=7YsZdyE=pC%C_@fjBIrd~2g3cE~t;hgL$#+`-HsqbFX^ zxQ!tn68H{7%A^-9U~vigSU$UvkMyz=riI_ZPHm0FmZU?}Q(S=lvu2{|;#MXtr;cOi z=UVTgH>~`Jnp^GN2-2@Fd8};@VzNje;MYOQN}hxIp^cy%U&N*An0hfwObOHFa_U3%u~GBCj>2h$od0VpchG;ky_-vTJ-C_>==_OQ}uKV+@? z27z9+Txxa-UxZl*IpMTlJ-Qcu(w$(y{Er}Ylas$M&44`R;Kd+AmRN_zZISiUDm zv}Ol`RiDBC{!a|?+stC4JEP1foT(!-85s4)8yDU~gZT?~u~df{Z#Zlmup9CZ?gowH zc7@Np2$WToYn@musBUDFr=bF7YW8A+>s>JGor=nj972X3Wa+WfS$#5{7cI`Bw0IxG zwTE%Mg+$zP&rAqeS;X8O_JS&C0k2<4d+6|oSodNr;B+H!KbwbN`srZm8R||3ttBUV z1s?dBfCW#J8Cd3Efi1Bj?0-X-iRZAhT8&-&%P{&(E;J;(N2Ar>F-5qCQ=bxhrr|I0 z>Av$ym;H$?)9u9TD~#w27KXtxDqFU99DiB=&CxPht&Z#qak``!?-@ zk)I*TWgB>J_odF34L0@Kk8#s>V0~#c8jYj(ZWrnVMeRWK+0INJY$D_)yz|oZy#>xf z1>C5s#;}u-n6Vg9vb2gd_Lm5$u_s_=r%$kKrI}d1T!;F8F7*Dn#+Ek~P=?S-SeY7& zZC+c7+R*E?iy6V)Hx@x~c@-2>mEis!Ye8l^Kl`ajkq<;@QqH_3U%$n zRh(e5;f7fM>?_(B9KszDpFsLHm&v?r6j`DcYN#h98(Yq1SeOgi1#~{znaYwww_!?5 zHafJgMrR8{A!XSfyk%-BHl>ZEjDV+^%NA3j;&A>|rJT*~La^`fn>Q&!~s=g@3WT0}ep>ig)NUa14e{z5$Ktt-*rM z^|89!sQkknd)}4^u%sOfoo6c~MxVlSCOK&Eqa%2v3LvlOkJ6JXvEpqju-Z_7&BPQg z?DLTKc)kg~t^359Hf`q#gR{Z4bu&h{(_!@WM zT@*P>rh~KNB-nB7Cyu&nEF>)pgGxmU7FX;=U%G#1&1(T=@4--izZ%oq&*uG1t%aHt z6Vbm*8ZMh}C}bbCL$eih2l#lHe>`&=EI#)@o7%HQUZ!xLKiYwO z(OKfR*rK#&I7UyKgdK>{lKdqGYzlgV=O42$eW8}$XspNNFDH3<#b0Q!!xGN5dIbUZ zZN-%vKH=mJMgo+KhK-Azc+SgWOjpO_hce2WnsjH*{_oI+^80s*t>L$-7U)hc#E*Ig zYWl7;Xo%AmtT2WOow2Aa%jD|*^I-V?_u#U)0dzLe9h$wlo`x%Cvnj$ zOj-2Op%6W<4VrkIfiTq;j9PIE)UCFm)g1EGZ+)iFT~o7`Za<&^(@-<(By-)p0H!({ ziHrIRi)rIny;zNaYJCBvxw53v4RBIs83LaX7lJD60bkdGdKJ7#dG zxN`#g47>y9Tz_Ky6!Ka|5c|cBGWDA_qua$kF!rT^AU`aF*hN0%Y1t1poyy>V=_ObR z^_Vv98(2)4hUJUHA-UgOp8atX&TMEUCQl2;m=y(3n_mS7CLBb)<3#iyw*|w;jsxdi z)1mUk0&sR7fgbY{A?@;bsB*pukrP()=Ih6>yy`e>D6R%sR5{Zmi`wX&DOl${0i~a+ znf;xI&?HY`9`nY7`+id)`;R2dtysd6+dXB)ZB!6B*$WlpGw?-oCsed5#?Y(v*wjeA z!_}>E<@p?tY@L>;iaE&4-WI@c$Ndo0=?y%1OY_ddSupRDo#6My3@t2QL)qX<>VbZlF*0js9j2)VVtct}@AtaiDM<@+5_ zUA6|)$9{W;?9PJB2a`b3Ho{ACx&#}%X|C4Q6p}ZS7o$@-D!-K}RKL%G{L^Llw1}8< z7utf-P{{M#Jd1mOD`7^TsEfC*6l7c5z^+`05XpQY^I98_^*h2sHqVCHsZmhZZUFj_ z)3?)j8?kh;l@K`Jmldbn!nC=55L;vhW!n_6(eNZ|w4p4qR0j2}>p*{W4_A%qg2@M6 z(feu>w|Je*TR$-nS04Be^rO?LuiiqL$Vikveag+P-{71DhGJr?ArN7lP3(gmkUlpC zmA{Vhu-hUA9x~y%Z{~x>UOagIIQU%;fGIaR-~?k4 zGAqu4->0*vbDFR1*yjyazMhAkBmZT(bJph{P_ma-lBx92>3O>3@^7tO&&S&0W!OHy*Gpq>vY`y{^ z=@;0Pw8MCL66LH)MnJ`gF)-D82E@^Q)}W`YxG_5w3)?M6pQ%m=w;B)*iP)!v<|yh5 zD1WktXMT7MR@SkQzwa%6Orl)d`~F%FyVgQ|p@}fObrr1jk3@B=KvsBUDl~Q(00&g` zJI?9~9;@8AEY3(o$!IKJRE`r8s2@A?DsvvDK=mJ+u_)*Ur2Adr%Z(8N@039L$2~B7 zz$NmaTSEAxS!i*Da=3d2bDRD{>HP8zLxQuJiTpdxj*>v@#a5!nh9W+H(S7VQ>msU? z?X+dztwG&f2)5)+Uu4r849vT8qasV8uyYNehDyzFN9Fs>hL*&rO40q1Kjed?i z{oGshSh^oP8gfDZIUnnm+v1>WhGO86_vqIt2^7p)h!M=h#OeDXF4q=?_teSpSq&|} zvf-O{H7H-&(g9@=&9>?l0Z(cFUmL_Vz0PY@7lYV=&KE!r*CF_rop@lv5oj{Ag4ldH z^ewg&4YFgQq4Rihl17sgI}#G48R!|ILxY8zP;>I2R=R0C*PDLj>U*|gQ$ON*7?{A? z{+F^d3q%eZ zNBm-Ip<^nsHi`cun{^I+6bWeOo&kUUYbb`B*noC@Cgc*+e^Qo#sDGc%mmQ&b(9j?Z z?7xw{8Eq!0Y+vT}Sw_34%zKLR^$);$50*q2J|k&pwFM5sQ*8ahvkpN zgsE{*Q8SiMu7%j=JUNPY7ol!l2sCJdczWxdJU_Y|r}%nf51oP~j)bKRwI3$=xT zqbx=5XMU)>`jt5iy+K)?eY`nhAry2<25*x*?v^_Mhd+o%Rn%1|@U|8L<`cKprJC6s zyMTwnTM5Gpl5m1iIY>)vdBclCEZWim)GI8(=ks8YSe@bl592Y%a}d;>x&=*EV=*-8 zKK$BXBu*sfv~F_}xr2XU@^|9bT#SKq|Jxkatpd-lGqI#kADG=>3q8JX1oaYsm>Ka2 zN0BqN;0<-|t**lAj?2-aq62KaxSQ9G+Xe@$jl>Ag7EmoY&VE)I35_{7K=WWMys)<* zZfrQ>nVA?HgrI9_7*&SycLdJsBKDPnc`&p}o+9&2fr`0a+NFgs3-Q6Ct*B5Zmh&dpX)xO#ktPg&XN1#UqM5Zk<`V zCLf0!&BG|OJoL!z&MkB@NN5uag)46G!-LI5^@V%dSjYBQ+=Vi)E6YIoFYV&b_d;xy7N15*;@+F zr1!zYc^{K3((=x6?LcWV29k20LdX$wA>eW>1jZ@2dTySlK~*a)Ws<~Tuid!z%`b3v z2%*0EP*~*j4brLi5fSkN{h$L5+5a1o_m+X?NDnz8d&V=Q0iORpJ+#>%%5u*3JxQJZp$?T}fO;M7VO^&QJu#|dWg?$%*GUpt# zt+x|G{yV``Z}w{QpB;gt_isb)m{%Ay+#RbRpSTc(@Iq-RILZftOApGBUa7^>geYiE z?h49$UlurgGDg2}gXoP$V%MGq;waxNbWQ$_BLd9ChHeVp@!|yx-CuzJJlcmf)JF`j z(x6e-hcMIOEip#U!HzC=LQDEKsBm<{7qV`+Z1xVUTx2ddv(wPK$Wn|AdO|brL9ArC zDO2Y*^Wp^)LAT49;e;6U8P^S6o^FDR%VKHAkgJJK3LxJ=JZ?O1E~=c@K^1R+@*y|q zzxBt+sx`C=`iGUzr0lwC7_onMfh2ITw(Qtq${uT2kErty6#fEyFB%IK(bvIoJ_mS~ zPWQu9(2K@UYSKt9%t;V(C=;9a*TBriyRg;tB}|%PB)C^+LRJRt33`^@x}XIaRh*0|uO zM3meqVES{R>O(@~wudWg0LUME7VX4QRGiYBi(UX`B0Wz{uRk=vL{7`Y$(_ z=a*Wr2>r#p`@dpC{JxR5#!fU_l7y+g$KYIgA)Ni2?um!`LW%tZaz#y{8Twar@Y#$8 z0h@SszArH=!l7iSltnM1`S_41Rx+=WNv$?O_P^bre`6fxwhHIvk(F2%_!1&_{lhZ1 z>#&B7MXHUHAdzP3Er;?^MhtVs_egL))&Y2#nRsv14d`i19e#N^gs=Sr!(6s<{k&F~ zy0;a%(`V%s&bh{QgFK*U_*Y^MS&C&vlx;CP0r5*tgYMBA1#vuPd%^Ka8Z>Q7#TZk>v|aB}wbYO`rFDXu{c+$Qv4cmh<6JWpsAqB;OPA4p zvG^YEbMq}`Ki^1P&k~fpo(I=WQla?e9W1pkK<|ky?8UF`=srk`&axpWdEf@YmoMO- z{?>x>^%&-Qy$!5=(*Wt8!=duvJc#aUEf)Uz#iD&VY9Bs=2A@EbRS<`83GMQ}R>F&O zT`}J48|@;Tu?`O6qQ9;He!c}s(`vw``y34M{>)s-Nzt_7FD$;k89m2_P`849z8n!3 znFipLTWXXhPN5#|8ZI4E13T{Rh48dESa#Q37_Xze(Yr&SvnRLOv)R~u;4F@9pgiWI zkE|i`J2Xj7p{s}}J7gga%W{D)PA%A{#}$-*y}=CL?1FIX^~6ZI%JnlZG9Ar0ETZH% z`)C5OEjr`;WvOTpe1rHr4ruTom7dum4}WVWScaPknjUR<$&*~}`{z^CDNA{HYEMib zx`~$$y#%gqtH5Uc3^Xf?rt`hGw&BMSCb=^bhG}U&d*1~e-MV5ixk~zcEyn=bU#gdW zVcFgz@Z*Xc=+pljM&4bE6R#SHKJRL=((o0Qs!xK+ylLRrG!W(8{)F_iBYD*7yP)aPN8v=xRG@1_pnG4fynlTSAX$#K-W z`xyzN6Tgu+!7MLw#t@J@wZj`P^my=%wa^%_1PTw{02y`coIaB$`&KFzO)tYRBS&-( zqM1kFUWk)fqUzI4l=apz`P+-2pZ5WZhxP~E%-pVh+S~yX(*}pe-9EPr#y{=OO*SOm6hx1e93HIO0!`yo$h$S4>5n-3)%qzO|6t zF9@m*H9@1vJ$P`In9h^NDVm!1b1TZ$YaZ`{z=@2jo4&ItMcXO&>&a9!|?DTm?pmrH8gW_SeFX(#ZwScc^Y#Uw-Q54IZvrR1<^a#W07_r*gO-l zev29<@ZL+ee->{VVT|hIg*a_NIXL}cC^Uxjg}|>$Zjg3@`p}!`y}1BAm(IpIn&CZ+ zd5l4xS8&StJlITg;o@Fxu|#IdWMxBmLQWaPyiY}U!_C+rw_^j$-(#0kk5O4=m{$|| z8N)VRgRI0#EZ(Js%zw;8lMcgC^Hi;!nVbZTU+8slzp6d=^C8$o<>Rt_n;~-S4d!uU z0G>Pj3vj*-IR;NEn%lHs!rUJ?Z1y3jJLv{Nk1k_*kGbe~A{}~OeT$JIF%Zt=Fvl)! z(Dm=Xu$lJF%3xyo#;KrOMcK7?qjBwzIL!RF2Q*eXKywZ;YyKlI?8gk8J;7c`IP?Eq z+brhXXbYP9N7_b%6mr@dVyX96?9%@#)QgSa)6WtT=Oja|JQqEqKl6si!OXm;q2M;v z0i5KfLZ3HQV)oSGu#7y0;a(y}FKotJ6O9E`#0aLC{1w$*G|YFyKHNCH1Iw4|iH(|t z{cGa!>@Dguo_fUeOD(autrt4W|3!~y#PrKuu8?kj%iX#Yzcv06n7r)`*^iE*B(anT{n!Vcb`W6+kVuer$4GTVD{=Tayzk3o~AFl(=n7>#~`bTs>a}ruI?8O&}J+b^m892LN$3FR6 zKssX_-E%r<0~^E8dH-7M_nZ3Z+YJTv>W}Dlr$ zd!~Zkg?gL!$R*%^4=lW%GLPpH-kDedDzlY5AU+i&+ET^KiR68xXISuqn|K~>wP4k{*{3wJpm%xzz%`o-7xe(|h!FXOsF6+g(F`oE=Eyv(!wyBss+Z57D zKcK28J})+6DRy#dEqHVu!w1}Z37$tkLABZq^s>I_pEeWYw^o4FG3v6|#z5@qad@s@ z9V(lCvZ8J&aB!BT82NiPb~*bS+g~>joOuR|9$tylRhD8Dbsm+|KQXE9Am{?RU!?5w`4ZOR?QcS9P1}&f7K-i_pSU?@JAzvcF zuHYCXA3`kJcn@+WwuYQi4)R?tn3nPc3Tq#->Ou}i^Xx>wZ@s|tZ74SwxQi*xDA!PW z5gkUWF?p`B;QX!x6FfvHcU=r6UyE_M$35tn_>x#f{aEtEHK3ErAh-6F_S?-y(7yYK z&3=zizKFWAIlrjia2cfgN~rhvk`0>_50dTwGRe3XtR!v=R!BJ{6z#^TGJ7$iUplI` z-6gMVJCscTuNS|FRoUkU%=%y?WKI8x<-g~^l>0p}a%34(kABOxkGB={=S8M_SBN$N z7g3>kffd&&b3L^g%Jx6R$XNqePUcVS<4bprUv{Fj-W>h@+XP=IYu%I=iy7S?g1R;7 zh%C-S#a&|9PI}L`KQI@vLpO7eO&Q!dEQJSm{S2Cqw-l0lYti%X^-LNR#J_~@M@^se zv@dXv9Q|ZGk5Go-JkL^n60w{tMUS< zoHZLv&g{pRenw(ywLRvKr0?h{oyjr&8>Y-4Ctbo8R(xz9Y>6}!^Dj|mJHkYe-#m&% zf7j7-Weon)?+(WOF$V&+(b?Pd0u1q^y!-4yEb@|`t=MWNI<^^(=SuHjqU(9o*v2VF z(SGUtLjy70JdZVQ^}=u-h(I$g$yPBhyY?7=bkj`C`gIfRyS@kIx@QWLrBUebEF;hG zE=+$pn5iAVz^IxlnA3rys<4sztjQR1Z8LhD+>Dj>YcTgw6R*gjpCz1_j>+ zI&Z%X%H>Y@&zc7?Y|u$&-|r#njRvE0kC$9(TMKS&8lcOTF`&L1rWn60n)vnKKx#M@ zz1>S$&Ln#=XZA@fE1L#=7GB34PZ}WEV=&;si|FfJ0w&MyfK6^Cswz$Sx6&s(+xZ=j zK2VRA3+=_+mH%i{PF=+RXR|cbotSq}IC{u@Kr(*?kJgfps!a@P1phqcojZyRJB`Hb zaigHveKXW`Gk~(gm1tODFV^>U{YK*by*RCip&gyBqf?$m`V;u!cMP=tx#)d(2@H=d zgYrj8v|>i0E%_hIPafue>t*n<%L8=$FcgJMYjN75L}GD9YaMzfL1DmMFo|h^fK$}p zT{0D9-;%V=J*MKQi969Vuo16~u@YSTF2U^4He$k)<nW*9m(Sx(9BU^Zfx#d8Pmh z*Cv{4`18c(0jO4Lw4UD2p+~#Js9!k?>#_zw!$^Ck*m4Hb(&vC9eP60uDeud}QH8&Nv-FJcsLryN!wsM77fI}6)z*I;Gc?-ic zZG^yHJ$RF*4VZ)+godAQne$(Ioc8r8wrsKxKlz)8QnTgErsxi)xsSxKh(la8txyry z_#ZA9Y$F!=QLl7>fdHy<(u6ES)zJ;S^aF8z*%DrIWhBR}Ob8xIzN|$bG5S^~Y!0hK z@78vB)RtQ~Wluah4E}`n4vi49&;fqVFc<1> zSqsevmy@636_=0w2(fh+xslT+bSY{?gPE>cx5@+9_~r;UJ}5(%u}5IwDv78st%c!ic&GNR|&m$tZu6u9?dsfBlc5GY^Y#edG8{`@V>ilZ4|8PRNp!dGFT|C!CQE zI%FBUEFFeUh{S0mC8P{Wl9DVfV@amwx!;sz8Hps35g|*6BqiZ@|Ng{vxmsr4d7k_J zem|c8E@Rsf@I1#*EMLw=t>27R*XTXQXG`@PI+$C5nn~`gMvD=&|4e3F{?2MpCi`%z z&r4Y*y|?qdpOfd|IBP1=gDh>Xwr2KJyjJxAMTZ9vZ%D^nI*9-2mWR=^>Gh<{)H+Wm zhxn(TInNk7R_djttp6phl{z;IM)bp#tFB|<_-LrvI-B|!mO@L79>j+!pHXs&Yj7HZ zx~CJFt7;F{3mdSw!W!zSm#=z|17VvxuxiqHY_S*%it#?QN2A%&gE?4xZ3jl}Sr2hi ze@wiR1|>xkam+{?tbACD4u8*q{q)R(&N)!E?O%-hM4g9jpP=s^YyR&JOTJ?+_0gKP zYF<__sR&cuqL)u?oO@5@0(V%%}%=H677_SXE3qvI_ackM#8#@ z^u3Ns28Taava+5!Oc&1qXugJFbiQfuJBL*nmcqKL`yjL0f%|-mo~5#cQEvTap#Ha5 z&iTq|Y^&~pdSaTDv-h~%i9SOHY`})wJwf^I0#le-bE6+!#>n4KqI~djuBpkCQ_Rt@ z{*w-4)aywQw=NhXy^}E2;~-7^l5pl2J^DW!jDtdpKvWzh72glQHdo4XEb-9ZsW28g zcO1l~!@=C490Q@gcm%kG55|U(jVQS~m$lz|#5K##pjdQwRNRc7=#|&Z7W`%dWq0nN z?vo8MU(CpJW($Ev{o(!=>ZwN^Le0|t?0eoP+&s%n=sltv_Wr|!*C~HvV-okG-y!Pq z%FM~9)C}d##)9~#e8GW)!vu9^rW=Zx5W$_}+myF#7eM_!F@>`}E# zq19~J2O(i`SiCfaDfKTwq3SR7ra)BQ<`wAliw^73Zh`vmZy+BOi?U23reC#9+v(f~ z#*E#8iYJ%3uoGI4tG=QC<>~wjH25+6Nm= zG9jjuGA2a=SiSoMfsw`FApg$rQ#P6@p$*GGpd)!XkCO!9TN-N5#FKTRAiy_4~(0}4_ z&b9On1QTaM6PzgZ?tT_(oanqHq0XmU3s)YMk8>sTOj>dh*OC9SwV)lm4M$O+&Y<#p6hAk|Cg$^IU#XJ(*|1{^* z|C)`VcjLL^&+YiC-zjfl-wk(7d5n!;9%7qpJL~b)oR_xl1jAkA(C%e`-L_nUnT4w% z{PZ}8UGoQw$&Kgw7VgBjiGHwPwl&taWY%@Mg^@RE)U$DyT z0?Uky1I1q2FZQrPU;P7|`p0V!4W-wi^FWY{$z#qTH(A`+gAj0g0CCo5X=`I_d9&&_ zux`a$oL52~K+C6`W0bjHMQK_m2`GwhR!&zg#bnqvTnCEy(@@GN5 z|6A-f{0%y0L}IQ7@oeNSUNz2bkPTz|Y@^=ed_h z<@FyB`g0c7apNQ^Hrr|?ed9RA;^kmC?hftzE%~VNV)T!1=7Pe`fqG{p3pXLZbbq=( z`|OZP5}s*0<0FZ8G#S?2e-Bl^iFk|5&k$;w15uw2VX)f+sAvg*d%kwU&_T4h9fR z`nd6+xmYPpy1N9z@0CLe7mMk3g?Rt|9JF$)#+)lfm@>18Dbjz{n)(%CxOx(l4tWbE zE_Oo6$m`q&$^@(&UfuNa{&1qV@QU=}vGv-?Gu^tmpuiFDM&)-3M zq892OU!wlSILtH~0#>tLQ}^IK*YbN0h;^uB)6;HarsO=U`{M()`kC|N&lvEM$u~yD zxevsv?~dZav0kV;mBj*Pnc!eYYd*yG7*+(90^ICLJCA#)PP2yM&!@QfEHQsP+f0zJ znZsRcd`)?+Q%v-q3;RI~fj(jLQT0rYCjWPhiFoQGd*k;>0;HPhp-fQ;8?=wvg!_q@ zWbhR80-j>k&=@wJ+41`4##(te%}70r;fc>q5Cs`w&c%AzQa~BC9!p1gu8alW0R`wV zYAXvLuoeRAr-J0i9Id-(1nMswr>x}D!p+}pg;M(_XmmMeD_5X?ksqg4 zFUI^AH4c!7yVe1Vs@7XO14h7lsaW}fr&iy_YeBlPH8EGZx=A7aD3?_nN z%4@U_q<-^OTg>d-g>4C+EPFgwPTe2tTVB+1gS2t zjI*#VUCT1u+A*fqoR4xDihX8{LTJ7L3agt;RX2~B z{zmL>b{S7p7QpD=D8E&b!>QLNv5=qN!I-}i+4ME{QP<&#vkh)T{pS)Wv)4kPvPsLqW)$Z&e+5?cIfEgT zF>Io*V`a)th#lyQswGnjXTNWRHJwq=cL2>f<#)mD>LD~OjX`w_@prG2XTrjkwe^l> zPCqh1KYtM#PP>h=BzyXpxCFas&gqxi7q?uEr9OWS<_&Pe8rBuc*PR5bt! zTTVW$J2qIBQ8x1kw_h*fSJo!Onx2QTdeI4V;I?qjKGJ*gzYrF6_#aRXpUs_{YRuPW zo`iJWGzk15=KJiPiH}`Hg60nsrkBnJ*`F0$%78?s@l#{tu2M|7P{Q7lpW&^T`c?%) zz^%_{JR5okQ+GVU*uswx`t<^9`+5SG?Ge!HuU1%+Cgx>#d!m1;38oehdz{X%QEPi( zNaSX4*O8;^QeUv?u@*yT$8Zw+)7;eNhiJDb4YV$|vFgfO=0Ej4Sg>8-V7{428lFn6 zP77EUfN*sSc>oR>V@Ge=%^8p@jn1DbV??MqNPX{#rZ{`;QxpxRzVVhZTz}%#Wj`_! zUKEOW|GUGe%YOpwtuCQHehZUtOvfT!8|9*|vaD`&XBWa$|$Moeumalp<*dtDXaWY1SRk0 zQ9g!P?eFqXzd->Ny%NxUkph#CW}*0WJ;t6qOgXhgPQB9_Ou3UZyWPi~=@kQpi43qG zfa^V5LA>e$*Ip;b?D0lI%Mk}Gu8U&%r|w|FKesU{A`HVCx4|&$Rwy|Bmdhg^&EoS7 z&@yThy7lS-4eNh`WK9(`T~)JM%G)}sZNTdLTWpftVPX&JPF!t7XTPCPzq<)C1|?%z zEag#uo5jhD|7DYu2K?DaWgvgt!CH=ZVBnn@v|rM|gytxyDoDr7-}-Sir<^cP^bvc{ z-VG|B-C93C3*K~64S6t-IZv)+o-dCu^|E?MdP4iveXl_I?-N!Vbq_Kpx1lR#URBrr zz!2XZ5SN}!GgV8L-ZTpJyZ^z;wTQmsUy^5f1Uj`ay!uXi?cKZ4N6`<)^!OE{ zzJ15eLpHd1vJt=XK?<%qY9^HR4~O6hpWy1w)7UiD7@BOaf@WSWQ{5Y@)$12%6~PLr zrm-idzcY^WE^Y?dg%8>}<_3Jk?!ByAUt(eXX&^Lx{fZ&}cd^yF3LCF|K-UTMJ@33I zeKP15(Az%L>QiIUUv`EyY1TqeRULF5j)P}r(U^5ifm^mxw|ed2QL}a1z}0*?>bBlS zhl>TA>31D&Sh)}@R_0>h#M2nKO$*LLFEE)RoNH?!@8g;*+;c(1n~tG-f#G>hG3GYc zQZfk~uC1jG(@M@|`DyBm4Z;1}%mt_U)YC4i1=EBweA}Z4YVH~cK3jdzX6YQ5xoZI= z+#%Q2nefoAF$ghZdI|+>1X>k>iI| zp)P$GH@k}(e74OZr%Nf;?|Dc;mVaQ~;3`y9HcC^^-Nw$c6gPBJy1=((T?oRp6y&S$1$$|`nhGu;d9V`Y%We@*$^}EO`r@-2=TUr~ zIuuKXF^9-8oIl-x#a*6h`=GoSFu=vEHapX zD0RAne&+(+Ed_OlIpiO?2%cYgZo>m8c3-pM%BK4kQBTQ#QbP( zo_!8@|Efdz9vkMTSb;(3Tp@PIV>a)x5}LYGKYhwJCJWmLtaV~E#?hR?(1lpA+H2e;A6wpdTO72=icxZtyg0qTqWax= zkgr~fXQ`JGcrgn?-(O&2U8Od>$VSl21*uYdk;^D)!?3MSpuw;UWX<~ztN$&buj^mH z12@rqyMa&@f05ne6I!J23*XkL379DaL`@5v>QS5biKx!#yr zF9Szc2N)o-67=6IN4fY@PwC)3u-bEwI#d3b9xw|MZ_v5qy)CaFF_lZy^v6ND`@kW0 zCU@KBDC$yd(D8^7=EV;Ot3Koyp0Ow>yNM?}i@XW#E}T zlDeYQhqKCMO$*c5%fuwOt+WuPW?h69i=ROyU5|-F%+R4`8q1m)gFfzdkVv!OZQI4d z)mCCWE01cY`_^EAWFGYcMstP(XfAZ+3QIZY529Vip>1U`xL-R8yZuNSRa5j*^hs`#8&>Sg9 zR(@jYV{bW&%-D3`mVp*B99Uu1d9;~=QZ1D^Ju!yPu!+hvo`V6$+4TSuna%>r21{E%|(UE3UPCa8ly|ovo&EO?!I$Z2|uD69S-D&5N<_H{AFnU+K5 zHy;RVPsWwA4x&hI!}3#~qQwg>mJ=ht{#p*X;Y~QrwNsoZ&jqosB`Uw2;}QcT;CJUN z49~abmlfCx&T9>5H(<>A*x$pEw~5a_Vh3DaV=naRoCgsXRowpKi^NCX%=87j;Jk*| zl<$-2JP?O9#}H+4|8eqR*23AQS`>+ngYLl&_LVYAaeN_hJfmQ7t{N8hcnoJwm85k0e0s(d44f5*%9djEC$Fe#)fcVu=6H5BGKyH(!*Qi=8+woZ zh*=+(LV|e_pko)ly=egkX75AO^kgXTkV}U=Q`=)P2appu8a&;kCuC)|!OFITCs+s+o5IC6AQ7I$sr>HZ1PNGk`6oW*WsqTe!H5}nJ)xc>wW2#(q`2E zqhz{!=b2{VRH?LYE!OlMjgglhLXcMu$g;>;lW_}&HGe^~*CJlN_b*iJ&(KO5-mzsC z_hJ9}x75FCLY3N<`gz8@tf;(Dx1a+>Q+sldLhO|9f1%7Mo0I?d9^>9Lp(s(zQcP}w zyL>m6ENcUG-{)BO{wRjieyX7L1@&!ikX!CP)RAB5nMVNzou7m+6R!ch+=)@sXA%R` zkXP+H!8v^Di`AkJ(2;%(6)rDe*HS%YAT?Z>_9pmS_5+2dx%OSK5g&JWDKu^FipJ7h zkWE~#4Prkq@sCJsGH4_hrwCNr`*MkU3((#22q+C6qi@V*(Er^o4VbYPyWi@7`#;{G zGRuoCc1l67Ummk@X-(*<7{paar{dY$H^4^K4UGo`qNA@3&BvceWzFk2<+36cGSq_D z!?_Ut-5PxU3_@eC{^&EFcm&J0FgWlQtIpJb2X&DgPV9yB^;5um=ta~C3Rbb%4qd4W zrd*wk4L=7!&7K(WvCBX&ScI}J?+g5-&q4Pi5yoz?;Xj4i2^Btbv3%$mXc;>m^P{L^ zzK9rD|CC|HsbcKE#gO-@J%Y&vO@NnPVMAvHIl719m;rIv7POzMmXedjyf1YB+J?c# z_0W?u7yO=0fc%hcxcuk$;9+nT-P)VT1#y-f#nw2!%|h7E;$T{|ov`rWYxEF^wKeBD z3#7b2URX4$^oO)AK8`Iu-gNV4{E^_mCZq|LuPh1bBH_G-sWior(i~Z?E+=ED{NcD!kA(tRoeiGxht1%)x5hVRjk&|^c{a(K_ zk3Y^}!At5?STE-kk0Z1xQYQ?dyT0?cOT>InN3rb^F3|otlLUV%oZ!0~!;OD*OCA$HT)(m?1;~Yjx+h_U|a$ELzf{$Rsv32ZH4t4&3I{7BYw``27=yYlh%LZd)DzP z8D|@ofHHuw{@ZUs@uNQA`Qs?Uve@M|sPp(4p6vb! zf1J1GE!LZ0lx_rgP8rRqjve(hjoC}?>;JUyJPKcopiJ_CrMT4HidV3aOx@N>KD&JM zxnu~6cm2>qPIpH2Y{=|#8I_y8h}SOSKFrwx&08x`8NQNh>!jULAaz>qJcP2{KV#aC z2z3832Se8mf&(HOzT)6u5XZNG9v^8HrTs_k-fkz%oHviU*8ia2>w{o-h1|H*O{nnE zk_Y4y#*Q_F%7N5bu!!Z_ZR~jUSn9)fug8G+A>dB`_rze@Nm1t4Ke3W_$e*-f4|YKL z#>=?hhB(>L#0rQ!44&T$*l}jWrw7e~9L|QH;1>^aHyFAKRx;+fxJW8OuSdcx;k@g844&M^U zLmK>aRK=yo=*&)G+)&K5)8TX&%$-V0?jdx7p(TVaG+EchMh1g9k; zVSi>4G%;J|6=Wem7gNlvT*}SckpMZf$-(|53_1@~!#y+-7XDHJHM@SJZny!T*?S(V z81)I`XfLn0x0hQ+eI6(3(Mpee#nGC0OqoJ{@S{8Nf|oU~zx$qjcx)~xEQf1952c@z zIu$zB7z?dj3V8rKaQc!H_~wTJ-!P?o5?!iLK>1$^;``S+T!|83^MhWq>mGGK?#!guso>5dSI~6UID+d8ez1QGA5G^`iNS z%VBBNS|^kmM$>GlJ0}tSgEiLY!D`4Z*e`se%+V7N=lOtJ=UfQsUI8+F3V8caZ+!G& zPLGvb-ydnHds~fdw+2GnwjQ+i-N`!STOmC@0N3q32cF8QtVr<|ObK%iN1EDi%IOq;+!>oc25HTWyi))#Hnpm~A z%s&CLPK)??lfOacgoSYThoO)ZMBU%RU8EAeQ!BgS%7w0|W-HGa2+GeJnWxoM5Pi4j z96UBNmzED08s!Q(vKz2?+*5QBsQcN-!?K!dkp9~=fW>y`mb{1Q4jF>7U@GU|>;HWF zT%0G$A}kM>PnB>MQRkTQ(mN&@I8Lkk z+@0y{mg3tRSJ1D@j!)=(g3q=_Q&+*5729s(@Mjy|xPWFFFOC4=()8Wd>9 zO=tWnOulU@w8`(YwxhnVZGf?$cNogt4{k@->I|&*B%CT;14LteB3A`OTK{C)CDt_yN`c;D<`N|Jm zc=t5O-9u1iS0XibD5h*Z?JGxbkPaGXC5V=paPjkvgmH(+wQy{gR-CPtHuc?$d5chUzZTE0w-TxXTv4+p51JBIpo;q!RSzCWtB9W@@t?~g zQZ2D$7BTA{UX*ICbmii6YzD33k@E&r;~jfUb;v+S{rd_66C zOMXhdmEary1p;3fL%X#x-~YJ15VYADUb?12+DbF{sw5WDtU7q*ZNXcJ_i>72XIZfB z4CdZ9+w$N%4R>xg;QiJR zOV-V5p?d7J?9ibU2#P# z^WBk*Q;1O)HDwz*%$f}I9=-<8eoBm9b_9a=)nY}WFS@!I!&~a3O3e1*k3eEJ=I3gi z!&gFOXfc9g0;UxE5|@1+4v8}2+c(ndcG5ePEgDexGM>7qYv*fAEqCJ7CsmLVe~ixU zuQ{8gd01Wj0_QpGMn!ZAb&dN=-HzSmJXV|~mO>uQSw6tBl+)DJsDgklJj}e&iVnJa zoTuY#P+k3tyQ(cjYx1Ib{QVjgW+OPyr+qPh_)!%0nhCnPH{|Nfk|th0hbg;0gGXEg z=25P=@_aJ{G7H|JYbK^nzJYB${4n!gFjsMC5kMCe2G$m_h;z=Ea-|s>KBuv^+JTh4 z-2+uyA~~Cf)tFFm8l)As(0_L|R$Hc{+r>XYL$hnQ_IB{o(d%{3I${PyYuk1lz}DEy zI5qe?`4*?3a#tx-ZfJ)Bu0;zIPod&XcTP2;r?h=>Go-CK0bvbhLcw?&A@+Sb=oVRW z;U32zXYE0lcbB@hmb;*%B@s2j)3u3z5-aP@B9{5CCpZ<-xwu<4h^0DdRXIKXM^lC% zj~JCnEm-r`STN3ZMcIWK6i<0Ms`>T>XdF*}KAVnLldt0R&|_GCE|Biq>!5dd1Uh#o z2Sv>!u*e<)p>d|Xtamu4oAjQ^QwMM*x0^Aw;~L}}6{1}G3v<_-VV<8g`riV0NZI{e z!_D}4PsO~))mnUh&6Hmr+6MefGd^WXInGTJ35^cMSoL2eLm)P_GvO6^m4#Ka z1F5$+5GzcppA4zv?;ht-n2h0hKDeDYM+pYcDUZDf-}fQTXxkI4 zx^2g&Px8TrsnmT6%wj29&SGNuHE;{r&N+?03gd1iL4EEYAYSJNvh=@M^WFF8zmN8U z#Wx`KVjK4T1%In#n2%qxMJcjR&-tg*Efgg40;Is)?Y!_ z&!sqjsJSpPzX9d%XeRLCS8O|BfwklTZalvnHREr9XMzj!m87H6uZWv4c`KCUHbJWX z8~A-~Mu)~smb7RSIcfWHl87ywLir2I8z*v$uN^{t+jgmHMg)0nGci5wF``!~>g~U1 zSJv7I4Hq-uS(8|3ynhcH>K1|Pk6Gy4!NCDbk+3Bu0$a+uLsG+2^jYPN({CO|<9)-3 z&3b?f_bkLEgQ#o&JOI@%PjDw%Qy)cT|};(l}5(NpG1bmTaAkAFzM8F~+gA7c(S_JZn)w^Z3Y z3Igp{BiuF_{#Lkn&N+0ya}?Kh zCoj&Y>QOUOnjzh#8b^@3q@>dy-00jh?#V#FSt6{T5XD5$k;G4-E0XhU3m=K;Go5ST>%z&U8*+cj5=CMhuYpF1n4e zYuOmbfU;R9qa{ct6&Bp$RP z!Cb1ELhR63z;O36^!g(l{l>JSVaY8FN$7?$$fDjt1Df911D=r%l==RSZo_ESF!wg+ zIf$GmE|j^Fc4{{i%0V=(3${!YgUnTq^0K{X_`343rf4>VJv)tw)$8uWcLw>JJcOh zYi@z8Vudt7M9h-wuSUu03^}=AN1e$?w2AG7*&Q}~kD5AJjm070W3`9aM|0upcza$k;T6{>`3sco8-u#D zYuLaHDOhAEH$0KI<@{MW6_;ufj+KKno@NX>JAbAW8oKe_U0W>40}SehM%#Tvgi#% z4szO!U#Ke;15vsI;GGr<(x4P@$eF-G|1js9#qa~_#gsy*cR!8V7vIp`pcOJNxZ&uAuh5?S5juiz!b$Tl@I2js zuUdABH5A;#6IV@y0p!!VI=lq+d178;W+1pEA4HFN6}aH~G3viN6+G*H3R@x@v0O)I z`AI!tx%ds_ZKLx^t|6%Ftcd?5l2#W{u4c^00=FA?v8}@$G84vfuJo+WbiTkvtakxi zOli6Uwi9p0>{1iP zrhcHEFpt}!iA#5jI1m?%A^gS+3_oiql)W(lS@&dVXoU!kt&Dl4<8TPtGYyyhRtSDy z9)Ny|wX|esD<&`64U&p3(7ieg%r+8ttyaUTZjXno`il^EJ{g~UG2{Dq`QU)g22?!u zf=-Pg#?~xA_j#j1@u^xGc-l}X84Qql+nE#1>duy1KLHgl=M?WgN$-ra7kV8w5}IDk z^ z)mA5_d2(6$GU*^pefA0z=Uh3r%V9XZzLVVEhJ4PJd$8;21<;sRumOa=A7OEI#r2<^AOfX16|LG`^>I`Fd@U+bL?>s%=7*BA;4_bZ(Hwo!2P zR3^E^_JEC-1%_r^6edB3O#U=UUAaE1=*bc+d1gudJ7l!eF?jsWr0hK(LL`Tl+1d`HEH^w_}oLSr$;Ub z28Mjuay=yf_X3x1ZosRX&w&3=U%1lwMttqxluL?DLRYCLMlMaqc_x+6YF$e%*By}f zYZUZ$*a>Z~A~|Jl2{-1416s`9&k|0&fveljVE0NJe#9#BHEcJ5nv*+mL7M?cj<@45 znsMUZaIEb03MDCjF+a^a>^i}apVvVd+HWVJ+r=oDu;(avh~v>wbqs?ZZ^y73Knd)R?7l`*qnk zgZkXDj@QX$eII>@UG`(Ji6EKTi^ckV#pH({(RYxYpl{yKeVuD61iI0=Ty>rETlE~C zKQZIoWpkk=>@phWKP7K3&G}Ye!iWQwyz1_E)UA;ZYCs;O(rhy zFf=qHmVn(hjGkM74G*S)Tk&Qz=G)Opla9maoE>q?f?HX80oqpOa z{E`cX{BnH*+?#A7_`jeIwQU+}Sto+Yn@of*W)^(T*=Lv-Wdy35@tE_h0iGTa3Cl0O zfTnQ7*#|#>@_r-dk+&PfukUIzHmOiQ>Y#SdRC7MMycTQ{R1jys59XD8AU4N($fvns zpKfw&+Mms3-TfD)zidV4L)*E!U3WnwJs~wr`~;zwI0Uf`)K32l84of+|5eOcsBcgg zy+pgt`6z^3Xom&ky6|=Rkr19S4r(*9VEMHUa1J0|ZS*#DJb4r%ruSybwTn5)m3)vF ziG*|HS9Ba~joSAQF#Pyz^r^pt$&=2Y)1HfzW$J|3xmUP_CR*4)4w1#(-ecQ}EOOO7 z1poJ4v0&(8*6#L(Jo9R>YAr?2JJDQba}tJA=D)9a8`vZ!V@20yG|4p+OqN>kL7gGg zakC*OtOr)C?}z)TBO-zGT;|L>IC{!6bTsG!#m~DyxbazxIJN_f9~=d}Q@C^i&APL5 zs84^dmCi~(!~1v}-tl}k=!f0oI^S55d*?EwY~=B+$#sm5Jqkw)4f(T&Dsbpx;t>1Y z=3IUwUUEh-yqjk!6pYwJv*y{vw6bE3@xMT4_l@APF%`APAEH;qdafo-g4*&U*krsL zq(^U|Z+snuzwQEyy~@!(z6tRUE$VvKGhN?yw%{b~bvuesUMj|?hPJ{M^BSmHX~c){ z8{z8X2iQCNBvv>#qn{tK^CtLW@bZVCK6rqYhPQ#Ne>m5;P5@uCvsm!rExSc?6Q3X( zoIA^w?+8|-^V$*&{btUGjkt?1f8UM{FMn`Z5Bfmh_eRh?r(ODieV79|=+-w3E#P0U z2(lFRk8MCzKn<(=QU&c_9$~eH=AHS(8t+SI2<=5^O(_M(19n2b%XZkCYAcLOx{PZ3 z0@RlX_jw8=Xm;f+SgHMu81mgheu-`r?RWzN(oURYCB)S+& z_mfj^#3jmWtU!15UaZ_gU4jMOLE|Il&n>6jTU8#!rzXSnk~Tt4sA%``*!VQ(b3PWlG&y~EJaI1RI6Erh!BSK(8DmEgLFvaUUe{o{C- z>+@FthK)$4`CK5*x3=M1u9Cm+p9?Ijx)akI|Ao%)*I`OwB*yx0#UOcCP?-gD&c?CK zC9E73rv_qUNhNyuZs02B%tg;Pl%04}%4KENfGqGmMt(^`kK~W2ztmP3FmyY3ClDu} z7$f4@=RhXQ&^EeJuKV$HNZ=VdG+coNiw;6vJm!`DfbMUN`EuEN@?TFy%7<`1xoMa< zVFL8-<_fYyg0!*cR4CXohiOJIt-SXNuHfoFEOy>j=9UjAiOkT-zwe-P)Mxa6b`C`M z$yw~4j5D+A&^Y}uBp%-e#s{Y2vmHsWs|#_Wk2j)4pjgm+_ zR_|C&-P5ORYJ&#u?=;~fZ4SY4G3B;Ke!-G1`OMdSC#bB7!D7{I=F+DTB)blxxU2~( z9(!Sgw;AO3)nMg+_b~R|bIzy16r(baVCPV}ysstS;jRLZZhQn<%1oRiKSsjel#hN+ zpV8}x+56}3u;%z)w5Yxh`NT$)Oy5098FGtdF3w<~M`Ah0yB3&o>@-Yt?|?4p@C!jzQt;22LXhC}4+%&-^E zuC4)Z%?j`{?~S|KuOTfZ$fp=8jk1}H`vpTjKWz`jPOM;kOFlzMuMWssu^H8Sqd1AX ziVH0fIE^eqtCUf8F}tC_-`Sab4s=g{s^)5&t6;&JE}-`xh~E9bft5SWgo`(VhW1wu z&1$x~)q;1`t%sLO647`S|rSQE{G>SQx-&;#lsIDOX2>notQO53yuf-fL=X|Q(DrVFir%0 zKM-SZ`6N&`Cvm!m;hbR~OJVr}`h5R%2OGC8hm)$;IQOZMko$mg=>4u!4&)$Myg&%J zehV7QbIJ9oLC>*{P!xU=osV{MzQR6~jB0?ssWmXssU5Z)djqPVC0Z-$wGY!i2B5yh z*=>6-=BPY=WQ3HMMrVKaz++kx^qw!B1Y&8Kzm1|D0}!ToX} zHVmv_Xu1+q&fmcQV-cr&eV>c_WgHG}p!=YGEmtt%xi)hJ<&)iSa$+AHI8Qo{uKG}r zee%v`sX zdYS7`vHdGX1v$|iO@}&@muM!SW$ceB3B*-b+Qx2r5f`YmO474?B|@GWudb9 zIhT2MA2u#n0E&EGYqc+%?gsbZ=4vxO(&95Dc{gIlJPRT6>vL!;+QkNK770;7AE0eE zK!{g8D1Dwn!PM1S&ETX_H3P_ZIWHXy{~~UcCvhYcvDmPrH?-L)VWIbFVi&w&MUUP< z;PxwA;7wbhq^yyJP-a#AI+lxF^Ai>^Re1DbRnG5ihrU!b&DP>w4W?W>B?z5g2TlY)|Mhcw~~S2^sXMQ z^xH7;?Ij#XXX%yuZiCOrI$V}fgAHxe-JfqROwXmCq1c~^tuBlbZNv6gG6P{jwi$Bi>pFu)GV3oMdV1Q<|Noi1~K4fe=1vA}07#&gA?q5dBHLulTE| z(WoJ0a98a1T!)(WOXN`43|7)};O%msSu{T6S_ALEgz**F=uh*oS}XGN{?6%ak^FUQ zXkV{}s*$uKAm)Pe3oSO;Z)B2BT5Y#kHz0+c!DrjsP%TYH<=sE2XLOXkps%CUK@W+U z)e!u&6(v=lu-;)67M=eDABNcSL%)$XL1!+c^!N$NGy_pFX=!1>r|(REZxVnYB`oBhy*sLD{BwMlY9gdfvn4ls4gT$HA*3Yr zr#oM@XPf^W^A z6rBD?n&Q$AvSz)4l&RCmRZA?oE;6s;eaBb}Wot@8c*+Uwb$SrSXW8-HS2m*f`C4tW1^xaW843_b3WD; zqhb=l*+~lFGw2y~ya&t7-;A9LFTp(fqnNl}3#IdRqeZe7L#&Bi5wsWOKg|HArkfaR zaEpZpbwK;L1=zI43%Ygb0L`dkZG-+Y)7ROKlEw664MUq)%E(fXk39*tJBg#;^#uD= zW5vrBgmTjhZ%{6@jlriS*p%6iYg*(CCr5ul?SXH2j(W4M^WP$VDub{``>9*7klTLE zoR6F4hAFF_aM9Oy5=Zz2DB6FbS=bM)nm&i3dKH87u;<|XK9U9GiothqC)VufiIIKi zF8#%V{qul2@FnElJhO+LaJvstvPU2uxEi$!v%&aPPq3NW6=e=9u<7AUSiGkJqh^^3 zQ5FNSR+$Bsaw9==_9z!^RgCfpT{wsNm9X&a70|?=(z;pwi{bW9sA^J`LP@fONBi-hmO3FyDXnoqs<45rI= z{*R+`4Tv#)|9I1pN(Ui_L>PzI973A=dML?ZkZjHptqmJHmQZX`NH$3sWJE|LLz|R@|MmO7^TLao>Ur+_y1w7f2c0bgATuBnW$7A>`1yz{?w?}C zK8fTm+ry=MYncDoASel0&cX)eq2#lgj|}_|mf4z$za20WdlR1_kh&x5y3;#t4|Qv| z{sK00|3*EZc-a<{Oi)}7#zl5Cf4zHeT#IoP*DAjXkN;t#zjj4m%?j>?tPFPO{MtTq#Lf8XUAuPnHkXCj2}ab$t7woyLmsiwp` zlaIgk5mmR3qN>|1Y$|^bfz@^(AL7lAPCp50zYV6`<1^O3?+xq|VJ1kL7qG;wbWgau zos}4_MC+3;F?MMWbV;DRPD(YaUH%LMuL`JZ(#OQZ&DPT_VPj~9()tpR(!#zSsCbH zum#p-Qm&$<3%=+`y-1ak+cwG})0kK$7N4*Q+d&tO#1cu6sb)FzWv2GT19pQM3VpGq zZU=MP)(N!f3XS4b5kKmD5whmHq4e(qV9HE|dBZ!4HIo9+>BCI~vk54jQ_F(u??cs$ zH<)whv!>+6V)X1b2b}Mlf!(?LEcvq=`ZvGC`r+dscJ*l^l^I!wcH5p0&igE8P2rL z2BL1K0@W6Xqn-4HieFFBtje7j!{IXX19|j3zKgAc+qmz&Ldc$Yi~Lb>yfokdsnP$%tYHwL!ds)9tv)F;q3hdn0#dwMxDunq$QcOqe?{6 zhJ6q}rvl|!2Q-AdwWpX!Us zH(h0kQ+Gni`tu06nb>nZT|(UrMfWwAz$q*oHOr$=uD6v3e4?E}TmbPoez=<7G#7kb z5DR>oSVEs)$obd@RckC+gwJ)z>9zt(I-4=+q%iJt(Fo$_mSf081#TR-7wuM6LVoxe z)D_+35xcI?J@7TFTkrtX*RRT)ojX&H`wX)ittZC)6AQlK%b0fRZrNH7Qz7N?PPhZ* zczRk$|ZU3;6PJ3DM&WCvLx|NX7G6bFEQ_#0xHJ@c|DQ*#{BQ)1ST$6YgBWj0X z>?aPGD!ay|?MxuqLVN^C=IiWxh!&RFO&rh~Au z{v`QJT(OY7g`~EBA==f8*p3S@d1Ma${=rgonQ;~!&OSu9y2DJ0y;;Efc(h3!!Me7T zgEl5x=98)qUUgkyK(e)@@J$*uo1750Y0@ab?;ad*FPn=z+&L8qroL7ddSbGCz#Q3&OMZ^6la zL!f1HFO2>94h)w4N$yqM7js@v&IYjHhSKHG>N{n$|`2y4L?-IFl% z`f;?_zXu=B@duT37dN_j4%D+`-08P@*j087Ll?gRd|8C^ZO1~OQ4*@H3tUx>mO|sy z60FT{WSY;!2Q51TO6q+?+}{8;xus~gsfoHSQQR|QJ*Q0?dvT9B4f-WHtF-?HXWHoENpNaCNJ6QjFDsWmy-tl=yWL~8@*!a(TOg?o5r!;4P zGI#|qTCxvcG|<=kW)mv+K9f18WI)TxOFV^Kex}a_e7x95(3vIj;_muF)a|<%(z+PJ z)>{j6^Gm`0w29F3Z7N3Bsko0e9|Gu(VLq}2(@!Wda@tDln%ambx0<7L=|GI!VT{!` zvcPWWJ1!kLCs)1eAhB$paj(RYFxjMo*!<^DjE(yXQkJBHBD#Y49$kdvGY*nZXdgK! zJ9562WdPc_I&QWW!!O1iL?J*Wfin zmL72d713LGV9Q=Cw;lloaRpfZ*qgi?4lJv$si6D5ik?whVSZ&CI!2zsEc2@vu=+Y5 zLo-V4ph7elmjr4{b7uX#47&C-6y`>gM>%&nnr`2RcK2$yy2wzB+;IVd6E5=FDR+q* zk*7&JD#yg24mkhXIhdDaBuf9D#hhVSge%}&X@`Td z%ZbxTnczD$NUB?u8)wr#Z3Xi@Y9<&|Jcpd(XR?5dBvd>|UI_F@FNlnruoV+I$4-i)(6a{{?kOizKY?*!&=u!+2$2~2)s z9y5J13Ro|FVag&k1bo`Xw`6q?wI9#$lpilpK4c8PKFW~XV<+j1c$@{M?F2iQsp#a^ z0FpyKuxEQ6wE=XO(OR^Hw>lQ|C1?&B3>Ft&Wl$L5IS%j&Q8;S z>S!m??j~i}>3&_&>ms`Bk717uen-1l>SVg-p*q(Rl~r2$`ToG8-QR&iXUl`VwzFz& zJfuEWp~P{CM)gjEX@>M%cJ*M2$Gy2~c3+;Xx&_$f9rPq;=h>gts2<|1k^EaCOSF4{ z;U-C7_u*f#3@{PfPSEcnKgx;|=q`J25eB&a%SV-S7;+d~FIr0QRKwv=czG}l~8 zzhC812E+l$CvVzE)ax6Y<09G55*Iyz6X6zuO_Cm$?)refO>ZDJayC|O=qLcpz$Fcx zgs?H&@w+|o5^WBkJb_`b<4g>zdI%PaM}bGpC)_a+;o#2>Li=$KP;57Z*2WFw8;zlP z{!nP^cMmt-uLBGEe{S~s(0Alm22cX!!Gl578jsn3=!x}f8t@_Q&unfSft38q;AHig zCk|*vcU*<~=Zyrf!v;8PODZ~!-hxJU{xqL_%zT=wSxn7I@ajd(!`e(tc;62&g}hEq z0$RMVh4vgd$~}#R|FMWhJXeB|yMdr-d5Ws?&DdmKOboz$z`U+#*;QXKO7BmcL}Rfv z;~jMae}W`(9ZJHNYLxMA&}6#_ZAN%tvd?tTns1RQojYQh#u(P5h@d^&L!%v+Da)(a z1-_9z`P+}*L2`c-1neQEWlzcseK`a*>z&E3x{r68W+g7u3`E`92`D$cpvhWD9n1SU z?9c;4(c%Py5aJ%nRE4-=riCy=bBeld|qtTM?Gfh^e=yjC#2DFRZ zcr_7J>WT0wma^H!n;^8nS|~^=WKBOOK-G9lAvo(2q@1=EOWLMj&1Wz4UaKeCy5&H{ z^7FW(UoPe(ECuQ0;~IavHVEnQlDEB%rQEX#uiyQhzP47T9OjOTCL4?8e|%t5rpKam zjL2#doXY4-LWP;R zIHKo%@+!@R{7L=Mamio^Z9jv{j*vI@1hFK%^##-Er`d?v`hxia@?kkk1b^2Ja3$GP zP&ssGi++EN+8KXwMfdwu1+*0GW)RPE;~rW6?Jw|XL=x1|=d_dD#^&9Qq3q#v4172S zf_q|Gd?pyDSk)rhaGYTlP>rsflJja?OvxPU6u)$1uQ+ zvU9>x%r}k0J8efHbmTqoLp8{I_@V019WY>N5KZ?E_o6_I+NgodXJt?y7X-c%k(p2G zB!nw>WAU6wDEV&2%m2N=Zw`5fsnO>!vFrodNZl{Tfdr+Ri1=`wo_kld|n4JZQU0%HM!0_9sI!bk3!z1 z(25Rxaait8E;;#K{5{l2C~@h-Z$BX}-u8~3tBj+pfz(%B@88!(Fm+{aFwSi!pflJ+{%FLsn!v$RC7h;NKn4 zIw%lqEv+!JlO8CNj3MQ1J32kLAjZWrc6KbedTeFHnQ8>vy6wGv zSoJvt$%)gUdf-jSF>j?T(L`8gttZ-Sb4BS8{ahzsnsd%GWhI|zXXk#ITi?BaRc(c+ zjC!d#xNHX`y)qH*Y$1p9btSqlyUyn}UBl{GCEzme0Xm)7j165c!UAXmCmYJQ;dNqM zR&w8j42(Kb3BMDIJ40$DdiCufHa&C0{sE;ZKa=kokV#ysshz|z%fF*u`Dt0%+;&tb zXQF+lr;s>!AbmYrnLIxe^_X*WIot6hlYWM`;Aqx;xFe|o`9IOv zRxJ_ahL1EW#~(!V3=P^A{ez~T;+b#CO|DM4DvQp(PYml$V(qhMDjo1a3#%r9a@=>DR z3T_5w!jy@ZFuUOf6VJd0M>D~i=F~Ch!ft$e-JH6h>{*7wY>KV`+;o6#sbwO6)swUB(7>usIep?`N?7yW>!u(of^y zyAex1^+UP)T}{MAEjY4q&@&+!+NKeUZtf<;l$qG{I2mnRig`W#?G;Zp5mr7Xk6-;b zO_t&#wmYnV#6jmkzP(Yl#_thvckj|%Y80e?FGR_g5uo<(gzcrvp)!@`oF@ax;WH5* zuW1JLkqAiYvjY#E-JZM9v64Wji9=n@n=PdX&y^$N2Qc9R!PubMZ$niI}-|9VA{d6>V#?ai#YzoIhw5q|5SAWhY@CH4)S^ z(iel1k8*X*6uNg@E`sJT_kr4L2WY}*agU3caiHVqjBTXSaiskNxk@g;GlCEwkI}FW^p-$-7y#A$blJP zy%pt4xGXb$IV8WE3)x-iEH%jvoX+@Rq(Lr9y4{zJKb8g)ezg!b_Gkr|k^sBQjl}*y zow%2CQ2FwaMtQQoCerc*`9pzO9NtV_&Y!Z9_d`J%lEdOpzenlucA56^2u@U5<@@rQq)q21OTQDA&@= zllMMBlVdRunludT60D#iiI|y@AMl6KE$WJt1431-Lj6tNE3v@9tR+}t`JQO z?xQkzbQ)VMdsY&c6C`tFo7ZpqCqe1h3eAL83-lo?4(kzF{R1{UNs zZXK{0l-FE{IX4K^eM~e8Z3I@!%g}yCJ$g=YhqT2rana#_F!7r|db)X|^UJjml(Ph- z*N(rPabAzhUbXU7E1`4E&pnPpV zE?T;uymvFO^79FL-w#ElQzWK6c?L$hxgha)&3q{vSo%f-9xEvK!@uwx?Q58Noq8p! z(z#~Q1@yn|4qKw`f^?N9w)LVM!r*$!w8f%o?-d^X>s2;FVl0MC*K&382p-$K4mY-S z6oTxUFn!8SJp45fvN9LK!Kx?tUzUYnw_+zo4@g11p8L4Rn1fjGYZl8t;fNvYOa<$> z8_?9PlTaV$g=zj>KwE1dcpZ_A7ELa^1KdST4bHFE5-Z;%`I|jl` z4e{k8x3CrDt(GgCd3&4}tiPZqUM0Tc{h?9B)ZE}2yEqhriANRvZ3AXj{e{80aWE#0 z@@O5sz^AT+`~32PgK;GUx;{s#b}Sl&k0JliWlh(w2ch7-GboR}l<9pqD@&X+9n@_D zAX%eD^8^2(+4YeJon<)x&ujSRL$Q%!R427 z5cO6=4rlwO}n1Zbg390XVl=q zg|Dc8c!nvDH_B=a-lN^!hq$u!00ve6jp0U?#L9k0Zqv8WGMo6X=C{zoB7l1K_Dt{G zK_=@`hZ4U!o|wD7QB6iW=#fS`vz07Zbu1L zZcU{f*m&Gh`V|@k6w1-$9YhE^wgpM?`7>t8b*tQksm*>m6Oy7h*u+qs`6P z^p=$j|A}Y+Zp7M=0w|{t6Xrx651ii+GazCb#ig>z};#d^q%ssHGl_y+)hY zgcgy#A^qtAC{OanC1y=$0n)I^rF-6<8R_Zj^}xw+A(GR5s?0x#zwxYqPt9z zEIZ{ZCKe_W1F`@EE*gXF%o{i+fcP2Y9+L+op=3lLs(apoXhwz3hPRzD66O2#a9E)Vuqc3b1=m>9-)0cBKm*!))m(RGHD%GhaQV@r z+}tzrf88|}Kl2+N9{LS#n3##$A*o=0Ef33M=7V}dzRYoGC%C;=Pgw4E6QYZ)gxT_H zP@EjZo&O~^*y@+4b6C!@8qa|9pswgK^*Z|eS<0iwPe%;%MjO+yeCQbi!Dz?}Y&M8P z>DoJ3`J*$gv@{W%UoOKJyY_)+n6*%9n2L!ei&5)n!vY4};*Jvw(6Rqxh zX!Jrj8~zOmy(4fqmq+Q*9K^Xg;Zxb7GHOb)z& z_l4Yj)mf9rB1y0LJFvjvSe;rd+hME4#vEygNOznY;L`cGX^8r6vyBz z@~cEAj>gL8L&2|eI0VJagx#v=n9|`TNPeR(<>U_dBJTvOtV_g_kX@h*)5=z={{ad0 zGHWYdu@6;rpLy2>YBp7%p8i>^8MPU02QLB-;VRa=o`C`5tMTIC99TX(i*l6rS;e%+ zpt`GLUNl#fU(5yV!vCOwItc+f0eaq3<4HJ#UL6;M95!qGBkzI7>GN20^(sK(3aD-B zfv3v!sFk!7(fChP-qL4umWIpb)?&~$a=Ym17(P0McKei=PF4%;*MC7oVAyd(ONfj22$cM-y60pr)2sSJ7c>fcr(23sPp$eLx z8y!aXp(C+sdOXS&=YeiBy>(t$3)1%0=sf5+c)TIM#P)lzRb?R#`)VjiU;M({!>@sJ zZZA|GFJMj=;?aGH3shF!Bads0Y@UIc*!t}YFL>mEEmu>yZwDQ=eZPxNKwd{W$8Fza zEq;DsDFo~>fW*6c=u)@`!;Tw?id7?-=@f8?h5Z>iYY?ILfX&-8u(NgCMFw-kEPv%Y#= zE@*fH1cz3E=}YRo|JoUSO+wJI>q+Qv)j|lk`UoFW7t14L2Lvs(!P@L2kiYL3Pi(h^ z>Tl6l{0s4T0oC+2rlY9+-AWkNJ};r?AApwhmb|Eywy~cyqJUI+CDOQhfmyT z>THa58UZ1G%^-E10};!fv%@!9q1Re-A!NQkrq}I6b5$KZTO?vlWF=_GKWG~AAJfLY zk@@t#2i2Zx%&u;Q@{!M3WjS@b#+SklQ!P3jHxPa2{f^pOHJU`1mrzo+1kWGoASmJ< z$wE3A3a|Qn1h`5W#7z#^bw&q4ZnTH_UK@wSk2;C*m$K3JnE?b%Ax6xsiTH}TQc$TS6JH}C?dad&)F&H<*AnlCs|QP&Q3{H+?}+u1!3TV!XMB%nNb8;eHb1ZMiD9OK@8pwMqkauZ#>Dts{3mzW zmybJ+-vH^5&#d{MB$WDoAa1n_rv2IlPF$dT$iG@t+<$@fzb-|4A06~vXbpc4wGb}r znTaxD_}I;4ICs0D;GXvsQhoL#Uit~jx~cRT=A(L}Pwx2ke<08<3H=;SK=&_{y-4{C z)q0mesWZ{|R~HhOcMJwM+d#r7OEKB~3s{{=#lV9@(A#7;ByJi6zQeaNrAY)PkDh`y zeip=TiRZFM?@|5joTjGbDoR>25${rt@k<*-Ca;6E;WlVn&>v5pw!oaf=`Ov*2~JwX zV}zXUYxi%;6g{$-bnC4iPb9>D~Q+d1*SMV(K)O7-@mE zCmP9P7YrNU#G|fdEg!dh33Irz84_v!<&!WUv`Lhg3!DH$YdZ?r2~n^zEe#YNA-VSM zaqwzn6}Ufq!?oS1`|7ulwf`1`B?GU(LDfANons+}S41#x*n?{2F6OuUFu31;%m+*~ z7eZ~vK-MT?5F6{mj*=Ek8eRv(rrm?U!E@o;Yx-UA*@{!Fvk`jIU1HWJh=2A4Uo>_@ zAD?evQ#FRi-`0WY^<*Bs)C0oxEVyEBgsh(Q2Y^6r1MaOrdhSyRO#c-!Y4cpjbz^}C)xz{5MJ({*O5Yq4Cu z{lhr9`U^Jo_=?7k9mN!{omirm$BRG3VdX{>aRfQPYYt3=FsE`je~o6~vA4ngaU=#v z$>AZV*^KQzOucssyt^ghtG!0zr35qK;M>RO7*UMRryGkFh1sb3(uXxSq=MJ;3y{^G z2EN8`LA5;>ZI0-eBAM>Yw^Z1(E(3$p?$T`bA@fYL0N;Slpk6fuYiHbMb|o}N%e{yZ zkC6K=iG_d_@oZ6_y=ZYD8gZ&C*j|_fE^f)ZHTEa?R2!p{^H-?~`|AV?kB?=ERzJD+ zO|eXV_Pa**l^7uh^XT06lPTz|CU>Wd_1c?Ud*1?08{dHAM{CGBWhRuLE(e$E)3~Fe z0o?kUi}tJUp(^+QmmJ_MF>n|v{I6&vo>ygqu4jU8@pRs{p$#v!8VHA{{D)bxV#vJw z7uLHNLdXD#5c&`GiPAH<_|8y_wXlO!M!cnt(O53r!TBemK=ODPTkB^jn#9FpU;(kj zlm)Igh!O8gUx&wEufS$)7_*zd7PX@;=SI9)L4GY$URQPz0=7Iyzni(B8%O*=Yb}fa z_>Np219?DSImEmpKI4Z?mg?T^l8Hxu{26wlOTV##&5!i*S9C zo>-JM0iu7o&r2qDA>WcQ>O~E}+HtYybm|W;>QV)wPXag z)R%fI*8>z9U!D?8>)Ut!G_#FIk8&qGU7aIy#5`E3e9nTBXy*DyaU@H zYX}~hhcm2eA#f1ookQpz?6;UZe|G|v;d|vA#x2dQ2M@u z5Q{bFMH!eiEB1g#f|1ztxYzjX6?sa_oE0!UAH!&GR_e{nHzy zhOIKaj-8kkx}ir!BdTUcvifO1(fVN(*zL^avC;*&i28cHiT$yle;qcwZiOhl8z{?f z12eY{VuANdzMJOywP%lk_n8<}JiO0#l%`GrI!KheTreSYp~*okx}79Ncm5I58b-QlE!1tDd_czKnhh72e!V;L#Ed~N_grb4ZGuU1u5tCAHfa#_L2u~Wo6~V+3 zs-b!1^;}RX_hWCGPb;?+&|TgeqZe3XL-THO{;dSau|raed@t4{}yrAx}npEBBuNN1bcpYgPEBMu-;OE2MhlN-$iSf-^F_{e|rZZ z$f=_!FU!I7p2T$AmjU^wiHTN1nZP5-==rrb`fYuVwQHiF>DVc7iq&G(NQt1Q8OS|d zKH%&D#M(Zo1SgYKu&morbRRhf67COz9g82+-LL^F&iuqk^Cl=UC0CrxP;8!KDX2bb z(C)V~-t{u=ht^o5ZQgLmmLGwg-;IQX^`L%3~^!Tyj5X{jUT|rR*1Nu2GxR> z*s_7ai|bbCWb&NF-$^63(rgwPSC2uF6?pN#r|37|KwR+lClq$I5S4e6an^F$-yH3Y z!6he{*Dh;NbWGOR$fh&(B{%TAJ{-nOdjgKP7h|pVDwEBB1IlloA@frNWw4CJ#A7M! zcVbq|4bl@6Jc=RbpBG%8844q}d_bp~3_jSwRLBW=`Tt!w(~EmB^@^!jf9oFHZ0I1S zY)+$IgrzX$X*`r1*vU%{>5J{b>mX_D5!liBHTmow@sZ6UPFYW$nFKe?>@)PuTmcC5Izwa80Ifsc&@ZkWsYcueuZxY;0q&(b( zL*PHH5k3E%L~~r)ouuh#fqo9XECJ*^oy%P#F?4qcSe-wBE(@n(W9>QOmfb}6sMl!v zzBAk@K@4eIkLjDMv7|>19LS_yyvG3^Ty+g0iKEo`q^ z2t9j7qk`|sEje=qF>tW*&CBWgx@}#0|+v zhr~2j%$eiQQ&!vrWtAR}D>fH~%{LT>_qG;{u0F&W=bzzCwUM}>`zv(Vlt+2v+0g$+ zD)`)f55Dmq_yXcOCmK7Wtjs`kF&oA0y3Rsfbqd7q{R+`u5xAw&MBHK*iO~ZqxHjP- z+Bg1(Yl=^y&&~6gaF)2kPX9r9zXn)iLb@)SZq0xf5KPdb1E$VjWe4;)@48QG5Oo zFDhFFHrmrXd(}YW1)8AHh6!T?#!mWx#`S1XdoqSa{lDp zfKh2Uw$NHAE_#AZNrS1=z88azjR8MX;_#mrQR(?NZ(U<8c+8{Bx5-Tm`0$m@TSC1! z|FO(BHW2HRsH3>p&qX2iLjTQIFo$nq+QY#zWp%nteq=T{y8a3EKU&}wmVxfzC#Ki) zEU#aihBF^n30dJ!F>oNw;3n<`&uAkdc+Y%TGba+UO;2#NQDDQKZ)o1CFOKZgfYGNG zflcfd9)5Wr{76X0pjmFHFt)<#hnMj6YC2c$=nI*{Pjl5?##7$aV&+QZi)l{TbKfm2 z@#uw$W7}o5y$!@y7Yb2DjQ`e8D?ol)g6al$P`4lF4!^wyi%$}>@En0O}`Vx%#kEc(hye!D>VkLy_2PaS=V(|{m zH`^jUK*=X>-j%X0a?=X#^yM(iKWmI3MIq$vIY)P#Tp01%Qdk*JKCkY5=qzCjiszfL zw#f;dXXT*bdMokAH}ba(_>9e3iMTS>Tqwsu=<8U`+7EZdjjzb-A{zrz$$QQG+C#+h zwZP~noVlkZ!1&d=*)EUudo-gE3 z@%lB?UAll4Ue8)6PDF<({y6)OE8ucF6)*MfC`8{p$fZv&$gZYYiW3<FbU-Q$$`1(F(`(qFu+oaYNJ!RH_gq3plAz0_ms{DkAFk_8IHAyOQ0b2 zA=ld&kEqqhRlT+3Lk$DXs8jS_$^%8sdsYzG2DS?#A;;w>&V6Jh+FiK^D%(2L_IRdQ z`_5c&`31Q4wGIRBOa_~f-cZnfjaS*_VSb|o5@r_S;UjCY>G=sz$@;P2v!$#_!LT?x z7P9D`zlOM|isPqc>i7sYgBUk@rrUYJac}VWhvCLa4g*Kw+BiiI)PEgU%`oK@t}JQ1V}?LEcFLDU=lTf{xVGe@EyhN6P~;A5zPsu&}&tWOp=oD4;6@D7k}|EzHt-yM9)-m;?RQj{zik2b!!%1}0hCVfoW5iKK&NcT*g?) zS@BUe_=JHqVNHSw;c*HF6k zCD$b$;3d1Fc#z`{>3n9uBkXBRRun~ z{>0sro`bZL4bU?ib@`^u=~5b2c{D*AQ9Qh6jI7pq+}6a?ZCPb#gj-8AqdZ z$)RgFCO(2!6$tuhr54Z_R1&lWBYA>=8rq;ypmq4z8%YTF9MaCZp4@U$6mgL-*fX={zilrubDD z)BP}H9hO6Pr`K4%^DgO_2%R?=2!EfTIp1b)eoA31*sjh%n4!jY zqhu&Q*T#byk|5|?2HHLChF%kWFnaM@C@OWOUMn%J#{J5TS~il8MuehUzk}}(J*NBT zIIsVUGNk^CAf_q>1IkS>ht5A~v)4o7qAE1#_!4R-cBamwwW#;AHz@xF7%sII+V8!9 z)a7?U-q;7^CFUUiTOWd7(2Vf;S{Sy87$av}DF4s`id{STbOUp--8&Q={9l0Y3i>;H zag`_j_yr_1TQ_}6JK?ZTGD*fubYD}BY0tYs!Hyr`aWw_it9Qt7j||-c&4mjClHf{$ zsR-vzld~s?>v`|XiSKhAhSQ9u$nGmDzS+7)BvLkFY!5CwqDH3yx1fBqAqMy8fL+&8 zH~7T_@H8;O$9>kpi+1WUQ-5mUd}G0qbrRKcb8_7m6+^6F3NeM6S^v=E#I>;Jnd+|a zvcyzOFMLM43o{MQGQ;b1e^NES$CyuFaaf;XbnDX*>*ad*oLpbhxz{zmLuMmbn=N?Ce88^PMdr!oLds8bVO-H_K4M`i!cHfY{qGqmQIgwvNUqQA z6;R(d3i5BArp|Q(i(YYnsZ+C=WQ-@ z85ECR@WKjxLAz!kCS6v7JfckV%aM+P+_I1*$CrX{RyIoWU-F(Y#Onyo!15<2u~0M? z_1+%GmSyhbZdnOw%@(4xW+zyPzhboQIOuEKA41ms4aHM(v3M0Ry2fRK(Uy*4*Qd9^ zLvjeC%bxR=93#N*`r!Tx!_2-XF-A$Ol^b(#Ms_}wXgGy;kS(Qf~b zd1zO@1f#D+adi{N@3`!JNG!YHo zATsJ0YR?`*<?PrDAm?@ohy&@>tJnhUY>H>1>H0h4chtTC^*7P6&f*qoPuKCf>>aK}aPLwgVB zYRjOe(i4Xz)Su{7>0oT0O#>^l9OS%0EE>?2e?knkY0jrg?8KuA0sji$OYeDG$8Xt5}tm?P(S zSN%rN!f00ZDifnuUu5OY_xWE%3DAAYSBMP@1^3Td@H&+S+6ptw>bwvnmsjx6`8jZ& z-V+VB74&Sf7K%#dg5xea?|Od*yNE%s=2bENpe~Pf0D0DLnt*(?J6p@Fgpj?@K-uQ2 zu~)=l=#lYI^tU-)xJlX1@fKo9)pbx9ymM8g_T!y8(41^-3Uv0f6pEbv;NY64=(sZn z8n66^{RbU|DVh5b>bHQjc!kVn>P;TAr4@126p(x#q48)U_ST8H+;OZ2{`jjJR-UpF zejc$D+;a-Zf7Z@IFSLTn!y3I-(~j7EpA4oK;6&=_2m8;%I}@Hlu&I?8>h6W5SD(X} zpZ7q~vsJb+^#bki{v|KxX3(jwV`<6@kUH*w!aFqcdo5wAU-dEb3qV(+c&v*v7diyB zV6moy7&yinbd4QYfI}ji(V?>#urh%gt?7WPo<9c%Pht(cwrAM}jVQliB)0u|0|(XI zf`hBf1=Zy{e1^hGm@)ni7D>V&b5#;kPqadRlerkZH6Pzl=E*0x6-_!up!0!&_?CF7 z@$Ij1#wB_OEcD04U-bmvoLc_Zpa{r{=>UZflIfim$7WCe4vFP%=+KL1FR`Kc*P3wn zVUmS5k3Mnf$NL)jo*%M{!*4;%&r(R4L|u1VBhj|gJs9&m4FZ0BiI3_n1q!r6fqFBH zG$#Mtq${W$@KzS=nZxDDC*f$pO;iqfq>23{2Ha1Kz_zCUFnj6$T;Q)Ta(#F3SX>I_ zAx2R2@By*2KJi)2rlKOuQgC^0iHSpghxtPc#HK5MpjtJSsl3la&yM*xH$a4w!HyXD zVE{Jk8Hu6Fw~+kDeC)em7itwN(7$3OsyiNXEnis=S{$y?9rR$|iPijSO(rU~=5qPG z`&{~=)-~YsE2iz8#17NUHnVpISdu4lxiR^|wOyD~lZo(Kt+n7=tIt|O%NT8KvDrHg z0{+ee?atve`$*+E39Z~H;{o`+IgU0{hOz4QcG#mb7Bl-);_nx%#rX{fF|2YI&7iI^ z#S95FuFFNeIul-C`W^I*^o8;bhaqy#BD`d0DYUOXhX+T0L8;p%KJ4i~5G3h?X+D0~ zlXjTCjdoZ#!BSKly{wTq-e*NwbI`r!68a{7$N6&-p#N{wZ|T#j@fp7q%J4metZoD& zb0uisXL9YHf59~IBWC%%#PaV5!yc554PjZc=seMCqmkx+g|Xq} zUMpV&M^Dp^tkZ0b?o0&BWYNTmiRUi$-mF}82}{aO)0w)KM+}(5@?Z3Y$hL);KPL`c zULE9#3orBZ`}8xL_8$A&oU1T=;P3YEBg%-}H)DaC}lfUYV`GZbz>8NvDRUpCC6Z^1eRW&x9 z+k`jXSP0r%i(#>axzKzy1MGK~!{T`+qW4!?CO^4=F3ml7^|pgxG@ba%?Q6lN58Y2L z5)W>VnOJuz9_7Cu*GyV(DfqO8qv^^#bUxh*zIE$BQd%RMu`L2jdKih$zs7?qwE?^$ zJBn_#N4e9K3%JqsBD8m;F68oSGQW?-SazX`@+qcRK5iekl11X{UkwG@oDndt{}yN& zbexwIec%eW*G#9hV>O50qfNj=SZCgbl9qd#^+(MGZ<<${7SfLHKVtNgJHl!-^%wK3 zAo{=|v}`gH3%qD|F0Et1DDZDbB;QcYeLaz6nM0P4B}<16!dOC`gpx=Sno^QTMkFa=YM$$fBoR7D ziwsJ}5=IG$d+m%|C5o|cQFCwfDVUW)q5Y5kXFX*ts@&WhJ zyYCb9eEk&DFJ_@S?ijn*mwN9m`rx`}FvM0YM0v_=KCLvK#itvB*6Rbx!OeGY;lUV8 zHH^dX9m~;*SQV~!uR&q&PCRXECg|kULgnTdBF9nZ^&UB~JyxPK^+S@?#bCD3NO&>a zR7fOuP}YVEs0p{@%j!Pie%dX@(~RNzbUPT}WG?uf=3w0g@|Yhv&3{YL;ymX+!}fOq zaYi0PWKAvNl4GuHa7lL;sfp6$x7PcrI#wVR5k9!k%Yk$SJ^qYyh z{2t@!9Z}Kz5f0nX^Z2_tXVXbckD>2S759@xhD!Nl`$M4at`9k%&SP5HI>^4V0q=$U zgpw3Dh*Z6SS*8dDPhLV=O%Ay|jbE(}lf0D}knaXgpE9O8TPQB8DTSbiQoG;QencMw1o1IeaAO2_vS$iDv-Ter19 z;Q8(7IbH>UgHQ0cj%#u4VRBGUeFrWlFXPRd{X!r|+0Q2NB2#ot+h%l@QU!cuQ$ z-A3HsP5+;#G=;*atC?p|E0gHuq4AS}DC-`ils(Pl$85cVk!iy)zBUA8WAf2j^bvyk zd%}*@2QXIbjv-ff0kuis-c7`YkY2dAZy`!5{lwBd5gXwz5-c_z26e#)-fj9ezPb4^ z=5D=BuE$1Y`s>})YdnTkD|}&;fd(sb*D+DnB+7}!pjp5Dn7#A?abo9SSK~{JOj*mU z`X3<9y$1ve#MApD1suYvS^T?Y5X*i;#SRBh@5|#|eV;=7)$JI1CJ?U;)8!<881k|1 z2l$-X^?0HW&Dt~dgydpx>Uv#(@eQY8?Hl5j9W8{oQRgwpsDbv6&tcOhn!o*~Ugg9= z;%OT`L*Umn#IIf?9>2&AX!r~M6_dcYY#Cf}Y6Jt(L-^3M6{7{fV^=M>2(LZ7AmqSd3y`v zzk<|YGj{r-gXr395P@fQGRNsI_qb*v+&ie=EKJ z|9r@#>kflwuWiuh_$Bh!6PM{|9(f@sPf@cQA;JN2<7pTA_f8mpmwYAjL{Q#*0i|b* zgxJ}(&?n(3`nFc_fzD5{K2?q0UOS*IdH_g$Ua%iww?X1M31h0iq3TRO_rO7AXjr92 zyE(}62R5K;Xc#Y=wnl7hrwj75_k0s;Aa8jPs_KfBzNJ-6WAT^|o~sKQ;Vu;Xs)V0c zs1KESn^h@f7`*FWtZ=NOd({Qj75D&DwC0f(4OW()B^TcqL)^IJHF|FSh3c8vaLZ1g zi)cHCb$5T`L;qsjK>Y?uPcK$I=o^?#dIfu5@4;pEl%>%3ghRj6aPRU$__ftc@X63X zxaCpMTAj`Rzqh%fb^@DBPU@TOlu0Ve;T>K*WU^0p#kueYen_S7ePD)CtM5P-q9Iqt zzjef3_(~nd&scYlcz{Z8NHZtqhF>G!ykIUQ9n*k~ixs%tT#x_75p!c5-TymcnPT)D zoHSaS`*Vh&(8v4@GU7#yi0LLs&W~fh?P;u__Y_dzei(n4_$2N<`Pv41P|tcwY+^&M zw)X^xq`ten>=<)>Fqwz^%~+ggE(8x1gBiB~r2W3|y%R0CrjOKyn&}VGO z7z(5AT*3gi57@9;Bv>t)4W=e^ZWWfp?Q~rsaIB0M%~^-iWfo}l@gwu^K7!B9n2NHD zXq0VQrmRX-@ctps@Jg`}SGnvTVj$>ZwAD1sp$zOQMqT#aAt)<-%Vr!TCdmPj&}p3r zo#9DPIQN$_Z^=INCLfrh#f(XRrHOUk$iTaBGdXMa;OTR^oU|g1+yX)1n79>vC5gPt zf-w*q)*b6w{swQ~pSad34a0)da9)CuAWuHZ7m9u>72&iW%-_zWM?J)61DnuoOm|_( z>R(XY>i}4$k{ht6DXZ=63;*wgpLY2)Tz`L>DdOXy{mp)O^QRJBi&FS|6Xh`S4P{Bg zi&^pb%NWvH&JV_Jf@V+zI4{@-ksG)0ievu+DbihVj2tC8FJM{9S@b=z2HmbT@SQ=r zLjAih;3%_&;30HhJ}?C)cLPL-;tkLuQ%_o-D4=inmQ;{JjJ?3R|GZzB(U4_K%Mna(fT=cJ+0+v=9P)&ME?xq-Qrq4S&DFz2? z%259DAl+#a_?*8Vle<4pnIh*_G>rcU;aiN+H?0>q)D2~I_r8Ni$S#zw_@%V+)F$Vl3F}iUV3V^N zYQ1AX?$p3vkJ-(8ntIZVDwgj$QGx|;1qfMG%4gXggGq15;k#cAk&C>rZEh%x>54?r zqfkg+bQbpyG8Td_c|iuNz@{rnppHAv3iDTsH3ozE%2LV^&-@Ot%8w8?=m)B{XF}J` zE>LM=Dc9bDsc-0RDL<^VYuJE4J|w}kiS3j{yMVS;&6xJ54FukcCYJvcmOG&x`!psH zLu(bfn2mV(7FuVdi1c6M(fxg|fI=l|=`jq@Fmk7b7wvE}*!tPO~RL$8S^8+{n< z>u!Mb$u6<6Z7wTp*n!$wDh_vHQ~wFM#A1t`NR&Ev8n*f`dzt*evN06r@du zId^Fvw_F3eoArd3Royt1MJQa}MLGFb27F+1E3X+)hrOn%5vk8Pc=KMY9zfmSUQzh{ z%s<2`8%LS5_vrt|4+DP*X!h7016zxj!?Q4c{8c1LeIf8K$~$x|D7P}5 z*b76i@ulB8F~DO9TJ`yjV_wjX`)mo;m*%6YZ!@k{inuA=EQH|eqae+n+(_s5qSdQ1 z7W||FXZhR&tJl53$446q*$Snv(@3;5xHiwA5qTaYa&A%1Fmqkh-<%?4T@_@ zvGT!1XtgN^+<1mK53g9eTO_1*ek4ca18n09P|Gomzc+-k(?iq5@b?<@-=z)yOASHQ z`bl|xzCL$ys{!YEYdc8npR=H#-ryTaj?@t%kiYoNoZcQ`PW`g5U`idzR)&JhjzDse zX=9zma+pu=EqTWi<}kF*OkvYe}8~c>32~coWkPupJ0`HHx#)~XYgJD$3SAM60*(yLgne*~Mq?!rpFsVGS>DvG~!3iqzxL*2J*v8ICZu6Gt;ayC83 z))2d*V<&Wii1W3W116r|A&}l*J_Dy>4Efi~H|Ll5ShP_$okabtU^5T6MdFwWl^!W@jJAZLy|8WphIuTE*|HSl)F0>De2W7WfRH!SN z+xMlsVqOAo{pCN@`Y2qHfz={28Rn$yZ$DAD!0~RDLRD*{Q4WW>_q^Y<1ZSci01V zw8E9fJs3HfX65@8;xp?E1dE3WpqKIh^a^@{@wL}XZ|z_Vl7_>ILUV4K)(dp{!v+=2 zlQC^{G(b-a+LwF4!t=KU|rWvu%CGXZ&0V)c>8Hy z7P6V`qResSB{{gf{{yDYK8OB)rGeu2VYDk!u^BxmSC311@qedL;`UZ4bvVGMT}fh* zb6ipJF{!YQZvrQB+hO*2wCq!X#+z$Vy6`QNhJ6rMC3b*oDY+X}FR?8K@yd5%dW@9v zmH94U_huqsRT+AA+W;=fmq8Vgh-2nbo_XXWJZ5Pq+#6!XmHgERD*lYP**h7n_ReE* z**Zeun9brk^PeD7Pr#Dfk0A2IUB2p?8dO8>lNYxgHQmmEB<>FLJ=B73OMim3+(c+t z8i7}2jo`XS46*Mx_?w2dI8!!3UG?G|Q@CCL^9gpV{xR-RB6enehNaqn@(*@T;h}QUKC{!7SRy3AGN( zS&;4vjJPzA73RHGCWrZ9>gH(T1JT*i<{3U8mIo5h&} zodW_eaPS`9wWuc_8}tsO@9No=VuA8fiy)plz6DD*L6iC&T7A0Atf*frC2x4{lxU_I zM;%0)NU`ErKa4-|3SzHrf|%tm=^0BIMY%Cco?VV>U0d-{vmV!$dlj-uD`{7HfnTPt z#Z`5G!3So4g|s%x2AC?qS9XcI`R_4%@fV1^9Kd{+d}IaR?$8eHxO?8?L|B=Qq#Tbz zQB%26`pjG$cJK^H*GkZuGGY>QZTKD0?O;6L7)&#m3`eE^#&XfV|p;B%-Z7PVImQqi;QtY)v23aQbyfLR-b6F7D4!7V+^J?&VWhS$?&4j|3 zA51){8q9)!q9iksNwTZO^0D5y)5uKddf5qUFGNA-jc?veGd4eJ?aVNlw|j#N_++N$eHFL+6l63_hMiXNp%>gYJ1_#-Va3dtr|a zZhK%!uPQLv8Ucsm&tdiYDy(Y!DMDVq`8>UGvfoV+Fq`F{>MSg$XtR8d-23ky3OR> zkbO;y!baW^Lw1jZ*kR|v`V;ZVzv^+dm9-H0_cwMgkB4}9J!XDugN&m}_+fYs%F1d{ zv-&2~67#3uvNUk$a}76>hp(Cb-S$ssdA;8T!YbY8pgLK`zJ9NV>K?|NVn+`qenY#r z>*ZK{GzR5O#79p%h+|4GVPKoK;JYOggSYx)c8-w{P`({~j@#h9OSLF#?8__dA`pE) z;l{IXFe7~m_9WL(amhKzAP-odYD3{UP*x{=0GM<);v5Gzpwwu)a^bixT-a*B$qps4 zB-axV(swXwZQje0mk)7_Z$MtKBbxUP}vj zwk(C++NOf)`We16@;HjT-!lh?^rHTs$*n1uGsTeJeDz;vX_nL*vuppwssYcTeCQ#R z+MZw;_ugZSU?zxX)S_e92pq9p#5weSs{G!$6n&$6V&LX9-f_fi+JT4iInSc-N30qv z)Dwx>Rg4vpcd%W#0Gy5`@Rz7tC%SQ4nPy4zcZdSlF@wR2I7KqG805R}^CLfs1jjkK zs2X0!clo`*N{5pmyF65>sq0{}fIEEFW<73|sXjOQwYgCK&wtb_&&R5%=a_zx0SCSc zZ0~2n>1?=xv5A0E#X_<4#(c1<{vYi4SDOo(;twI*1gy{__Lt=(@>O@jk0&LN5TPx2 zUK@vwdY3RNy%Iw#GnwzTTnvf4UyIdwCXk<10G& z&Szkl1hM&)p;)A*u9yyIOy`xP>-!=2P#9$H{R%b-9%!t8lPzkn5T+P^f*s2fK*8IH zSFWP&MEwA!sr#Fkjr7Ha?cG4u{eyD&pi?LrMSdNcpBYDgVSajfpehMv(xyBNvb5mx zZta7rBN0q%bgX;(@_CS+vjUiuRcDl$j#cK=;rR*F_|i`WMJ6uMlg>?u?j&f+W1J!MOR?_y!W zGIran10A+TiW}u)Al&?aU_9;wYo5G#d!MU8bnYsbY53@0oOHwK$EqNuKHJ zFwkr+x$($*wnKwS!|D7d1feGACK^UixAj7lLbm8P4mnR};&T_+*<0GejEf>pY@Uca zoaN~9aTVnDbcR{aN?_>$U7N6qXH%MHQ;^f zDDApDlqXw^xlW%e*jdv^`<&6}ceoE)(0oK;J&yKStzx?)`(R9?E_Wr6SZU|3V5QrA z%3>cwsrEOP;SQ)Vf5`sZqQ&jdDMu&UF{mZ8;`QjvbMmui$3=vi`cNyquJWXL}0|ob)Dz}Azv6y{^u}Oh>&)1W`cC_N3rGsIXDO8qjA(i zP=7lPWj32&xNR~neL)@;vs#=rLIPlD6&El^uT+~1#hU|m{6UG+&2x$-yf-!6r_ z{vs~NrWQ#+f-w`wz3^05@MSL0cL2?ajufyewVI#t%9xWp@^dp@9*E7f(~Pbw!O1z) z!@hHtIs6sDlD0m?GSq^$A!4*D-iAKICWGa$8mJkpF9dnM#*LRs(MtR$uX105evQXL zJ?=U=B#2FCQ^y+|I}NhF$x6j>`i>9&z_(`}gE-1hIPA>kCB#C}iO7SJW>aBEHF4JtWU?uR5r zMxC(pC=RAspLw#oM5?27_)ANf8*>A`o+`wWp-=fHmkf|EHQ}Uf1H?8(S0QrEHh#gycSv2z;IODmoQn!BYwvyaS8Ixjx^<5re)Bx zw+);=d}gA8`^>KX3C4P+vy!C^=)a_el@(8i;JY``SbU$?d$1ZtC4E3w>mrn2lkzSN zTDW9E2m09(Q#9TJc2G`w`GQCoV_gdl7dpk2N8Y2vU??B|`w+PJh&Y+~cX4du9@e~_ zGE|Ory#E>_=;L}Dt|ZoDMCWc^rC9;N&%$w`%PXkp%!D}aw^$te9c;9o5_j2&U7SZ5 ztk-sYPCzm3i4#z6=7M_BFR^w~JZOAA@H0p4hkP$`|N2hj1FXvN%M)UB`L2bOdE20( z*EZhDO^Yp}_hMM@Yp`@0w?yap9n^TZkXO^AI~fm-$tO4ewI!CKCw zj}ZLHi}s`TBbMoTV47+dIP3#At;I*dPHP+PNUeeRm4hH;r4F>Kd!yaxCSrl8dDXaJ zl-$TF^jLosCq$WWPFr3vrxkmldWe8738B#5^D(Rci*_cxw1jql1I%4C0+OSgL84g3 zv_?@+SG*WohMa-arCp#kB(_Mg;V?NyL(w(6AId*mgYWm>68kNL7&6x|IL8;hc71_B z^D`_qo$d}RR)A{UbEOl`fDfymfF^wdIBG-?tQQ8)57(o2!T>Zo6oaGhYH>}b-MQca zRw&t+f{JBNP!$|4md=jAfX5>trAGi-q{TxnIhuXl%b-bo2}`>Y@wUDW7wh&Es#f2^ zbRR9@3btTCgb!5pc|_S8>c@Q9L;csqc=m^}5dU{I$QIoohgJvjuu>%_73ND(-y`r zru{=yIv*6^2fj%=@mahjrrfb?|oip^rEIwj?wcIa`=U?PZ`>v%hz%V_`Q66}~q9H6^X zK@<3c1zIYx&&dyPjCSvnoA!avZv&7^efY7z4TS(7FD!Z4#wv!MMCWsRY4`FIMxA{_ z9gJue=4>ov_TwS=@6j;p>Rt4E&=cnG`3~8C{EaGo4{=`uLoR{X7_OteA$z|$b^Uoz zJP`P@^Y=0MzB+{KVl-8h3f z>a>}hg#c$a@}Mk&KZq3Xka@*ItXwr+zk z!0S9lKZ%3RKfl9;ai&7mt>vg}O~sOyc@Wll19Cm9(SJ?}Z<6)~?G~DI_Py$$$9MzI zV?rh{ViKjTC`L)@YW_od1ZbY;Gvg)Jp!hZgG(l=1m6P2iG1oXzY9uIvwb@F={rO8+j5s zXFbQ{gqv7gr7!qt8-Qr>L$SqU9ie_M&BXIQVN3HNNc-^{rTt3z=04O(rk$xKg_wJ$IqR`6FhJzKU0RG-8a43Mwi4sxCns z?4JSF=jeU%?hNTU@|be!aa1fF&g=beICO3aB5qcKcu4UVT(bElN~P7PKGqvEHVntA zYqN1!w6Rcm+aDHBGZh}(--mI&^cnwlgxt;K!&-ElxmJ!ur*q-h^t}xmA3gwwO1{fB$^wIv@fRHYLXLCq#e?3L5uhcH!_kI^x;3+GW12IQkI!w2is7$0lwp|!o%xhAYMO*T6Mb=I^&U`I z&B3bv!~xE^0vnd;3aZ>(@{MLFS9Mx&wPC%mr?I}^nEDo)r$nKAp&>dvx#xa$gQ?K= zKe|&V+LJ3(#Vprcfs`1^`n(v(Hy?I}Ag}+=G5rLma98G*WDb;or*vb;mj^UH^Hgn9;Obx0hu)>f@1Ro)?DxgRi;OIN$Vlz=W`8wwP)aS zqfV%p9s>2US5TE&!&JkEVRY|$l&*QB6djqY3>rKg-psy^(^4K|zUUi19ZOE8Kl8!( zPB+fc@))#N_XVq+qu`C+E_nE`8pq$DjP1?ee8t4aC?0VMCoefcJMg*U+pB2Se&{=I zJc9Q4F$2a}e1rF+V@;JT$%~n5VZ1Wgg#;$}9vE%|=Fi2V7 zL79$8*D0$_tQcbQbh26CcRCd+wjJc%(pT}|6Nc9Jv^b}TDzr_?MbD@zCK{*!k5B0k zb(nIHtA}I7#nHI6yc_58qdVpftKrQPbcNhAkC+{IiVxTQjy)cWsP8*~NnR;fGd-g; zS{b~Lq8Y5UX$JE?gAbm(3O--a|jeldf*Q>QAT;M_Z(gy83mHZ zpZMp)<=|7-g&XMmnCVQOvvn6iDhY?Y`FcX?Rz1qvtrtr)hl)D$XM(TZHh6pKBZkb4 z`#1Kj5``9N`c)o6wHR};p|IhoVLo0YtK1EESo5Cv3@;XG0mcW=6_6c zwwA3O{RE`TC*Vq_WL%OJhiUz<^A-ID@GpAk3KFLvG}v+o^llYX@9PIkx%m~W%N6LI z@{9cTZORh8bs%&2AG-X|!?hO@F*3-9m)w1&Y&$m`Zy1?#3u~^yL4Q-Od^WlFuAb#9 zy1&QDQI;5N?~GA{=-#o;j#-83vdJtTBwrj@`~+gBHvfk+Gbm$bx(UsO6H~H$BPcd> z%S6G$v0)8%_9f0>JlutLK-;qeT zJlmM78vF+~-{SC%?=IB*TLZ5Dj)2|qbT2w`SLy%Z94k9n3ccoSN0V0i9Z!$u8y08_ z)eU>-^LPnLMtP(`OTl6NOg_Rij*r%CLCvXSyeh6Ixgh0yt!WHY?K{ZCrmrw`h6+mh zJqM@$3f}e4DJ-e&6Xwi%3v-@X3ML&Vam-kK?z!?AILvpzURNoClK_k*B5evy4? zC4R+(ClEV%5)Pks6z%_gjM7he?vamOnL+wq;n!&0+XGabi*lDQ z%EN+vE5T>}WZL`l_`&QVI_w+-s$O5<>%=P{8E6jnbvj(XCzM4tZ7#~R`~m)niOhAB z4#=B_LfOb;pmFhI--Eh%r#_v0=d$Oh&fX1*3zkJ~g?dop`UYKJ=AqYG0UTGx0XNZ1 zi0!wRMIU$reil(M)8+(z|6(9mIX;2xPfhssP!_hAP9=_iAz#sJ7jsMN#OISt1s8uK zNOO9O^`g&Uc7#V`yE8a=Y68xmt|LVMTaB2N3X-66V(E@GP+8}Qf%kv%(I4y(?k#|m zhxO32ZVhJdip5<0WjOwc9`35p6RQ5{gT}HRavH)Tw?LnjxtQAsBZ7 z1Z{c+a`TD2tCb13&ecNY)z@@>yu->ZX(lmH3x*hf#8#~W^mEXH><8n~;D<5ibYm+- z+%Q48=p$P^#fUnxj_%F>=y0yBiEQDET1XO6c6ombi}RP!F69*89&m{Hj<|px7fUfg zq$SuJnQ%$?5!ID$;PkKq_bybzTS+_AJ{k+pebgZRqqovwabJGax;A+DS2dROEn_x& z%>^_Gh5vms{QU*K=W zz}Lo#9H&>o-+3c1F}$U8=%6{u$a0M8`ixRn2Dyr_eDR3~=ny$YTw2_MRd(0FqiP?T zUAzvhlSxx?_6gzWsxa_<74NY|3HY77ZzeaCKc3w{$=gC+Ye^>innZlM11+F)l9=8n zJW>7c28gUvvWjMJ=5WVDAu-kCRMkhtV)By4(>+TSbz1CIbrcFI^O$~g7np4zPH;>E zjB;;;IO1VB_m6>!1FrnS9*3dlQ9VK9w2ftiZ-sy{IXEk{lbpM$d`3VaF4TJulX~b7 zJF$`NKx2W-?WmWP0)EAZxq1awYF`7Xei1)hVkoq3 zy+#?13*fZ03+Hy-z)+t}sOPRg`;1TEIB^*o5Od_uyT*d+ASKJz;UMtPH1M@71lI%! z_J6>m-IypvG_AkCfQOW1#d}Xq%?ku^oC(e8vkEys z6&<&XCg({H*fP&j=zsqjOnZ3?cQ73xJ3pTobE)J`bi(4MyBPhx8?jd|;&P-j_y{FV z1w}Q33`m-@)(G2N`R8$eE#~SL(rVi17|e~ z;2PZ>-a;e14b~I*p2W?@~d&+{< zHetAO2L8C)3I0#QA;4%mx^~~qH+^~u4dKTjZ70Iw-de)&V~Lp7Z3$xMTy*Nv6XIP1 zq2xp^^EvJTetQ-WV><}SJ1ekYOe^;5bpn<}#bN2c$6);1Gkk$&1H|{djd46>24~0c z)_NLnXieu0s*6v8yv9d5W}H27uKc+&7CvYbBwW#!iPyR}mM!g%~gkDBDkGbn6S> z(5k;P|3+2{b{&b}-~R*iefEGUJdDBN&*Gx_<@#LYiL2l-*j!K-8VNn>C~NtXxFIk8 z0#&CwpD}$sN~`*b@7o!1*H0N>*p3*qp?QP&?nRpS{{;n~tw6MKE-S0eg8ch;P&Uxf z-FTWC2Bp*eJ0VROFX&P(F%Ew`BF@Q%*|5^)7zQo>4$-}65B}mC`uA6Y)mb|}@_PqF zB4Ui^WH>pXlIAG0aqrHDv>V^2^!?e2W!Zm2Y3d7fXiE|&&Hjn^ zEKuR;3lf;f!Whi3gEF!yRXtUV|a`nl@jjAkva;_(brpUU8^ z+|}4Nt`W7q?4y6@C}wnA0E>(hXdG9@L<2g&?b}mc>M)xHZ4E$8sjg6a!vYi$yV&qr z;yLHt0vDqda9#T#v-Ww4NzeC?H?pQ0GEQS1USSJPP)H zZJ1Kmi4PLbpij(tD6pM|Tef@#C&SsG3T{IGzxP2}PYJpWTZu<0%$C(HRuhYMCcfQ%QH;QGU3xTQxTs04G~f75HW{E$c(Dc9yoY=dE%$0LBt zdbr{1SqPpq3%zgO2Wiw+_vRbZuyB7Ei#kty##PqOP99 zj`C%;C-BHIBW~|_V@~}~HRhH_LA2vB2yxdH`g`Y~2v?#1srT46bw9MlZiD`HMNr9X zVNesDjT5aPyGJ?7m^JS?<_C64#`(;J%qi@C<5Xadj$GjUDKk z93sZhQ%$t1aNtKgv*fmvKL-bh7mL)}S-@2C4*Vuw^}IxJ^MlQxoG@Zl)pCWH`= zYPCK${@4*FNn6YZo;l0LPq*Ou{QC*iFP>s#Xb|e`Hso@@4gx=$`NTfB590IYTuy5( zs=k=>QK^|&5kHFwNd`jv=teMn@*cit7;yfd(;;$t6@QcX@>OF~S^u!%ptBANo_`ec^X>rbcSgm;IB~FG3c0=Z@G%!Yp#gQQDk?nT;ghcr zylgZ%nI3>+T_&bf5IjWj9t*qdy0bAanO3M6k492z|Qp>&_J3E z{sDh6|HlWQvcCaW(bxj@dy_zw>r>=fHir0z)R7X7vC2Km(0}A|Hf4#KVCS@(m)XyN zq?fm$YwcHzUmFcw`jPl>+C9`zXCcrtnok~bhtB66cz1`s&^qHiq>+;~sp2M{rI~Qn z^ayN`rGo0pEWR-H6$@#SL-cqZh!|kZYaV5iGn($sPG%t8tHlNHIgMd~?HJc(z+Guc z0=w)n*m)!sA`1g}yQ*kbu)-F6d~4B5w;HBwqMV|jf#G)xaIP+~^393m=QxAU-M$%& zZ%;y@4uglU3M)!afz|Oc;sVC-+D;v_6SJ|I1mzJ4@ZrK;jExu$cc~NX znwgJQ+i&xcUL!H$yczgAjE1!iO_0&O3S82oan&(HZpn3lTp0fVOn(K1lXob?j}OA} zDM@_AkK^p@=`ZAktzeFG@*&sf21<|IW=ZqoQ98c^EAsj?Cz>bLkJ}9cy6XxVF|81A z)e+k94%64s7o=C#C}rP*l#==(y!v%02G(ep{gew>vV**|v#zqVoxgdHlUK+C`8Ve7 zvj;zGTWBxs3DMJ}2&?U(VxT4O=$%U0+6BCF!A}U@6oqNmD)9CJZGnW#uwz>u_zzJ* zSnEMdHtGWoeNAa*y8;V8ZY|Q}kK?m_pFy_w2|UzaS6Jfu7(z$CgZ`h#K)yH+eBM`s zo8A_%>P%;LHI;bbIQi{Pw}Sj+5d?aaKzO|g^wrbl@~%a~Ls#lijGL$|(^0~`_X*&c zumWsC4Z!MuQjph_pw{lFqQO1R5Tmde^hUVi@G|n;yt%-(+M5Y_mc+LDdpbx~(pmQD zLU0ItSQPf*5csdY#d4Rdh49c8l>ckbMB(0S&Pg*t^ymhw`r`--R1`p9T{UlVwFFdE zi5UNB9Og@T7;f2x>x=b;2W|&(SGh=Vy_LfHbWyf!l^fH%c@E9l61uDBqEEl>ur{w6 zEvX;$W@Rx-?Lk~@d4t|Db#yLkL5DfzC|MVw^o?H%4w^`%%*&t6zqk!I^o@be&huFK zasv<7m!r>{k5G6z9K09pz|LFuU`e0d5He0*P#iG>S!xnnZ_|w%KPDaMb|VC%G4?#z zjVre?HGJcTg{Ttj`O10kQ8ceoJUrNt zbF=N@MgJASG=p|na#9Uz`;n``{y#Ke5t!qu%Y|g1! z5c=%&0RNQp?CYQH5Es;f)l)7(hW$p&ym1+Q^ZH}v>9ZKSRu9VkC|@(6A1^r+jRr=Ax2GN4Q0f&g_V;|VFdFiwjjduZeA*8*-(e(qOP*IeP!m03kDD>Ady{OTxxst*wqwTWtg`HnfYa+lw1J z8qj}hBDg5^g)~JJ+KrX-zA3sexnURh-Mf!IHkJR!(V0KQyuSaxeVNjtlN{2K3{IRZ zNj3NNawOr*AxqZ8v2_r#bi4~$l0?WdQBo-+k}S#8+}A6G#7HD587+t;kx-I+ug~`f ze}HM`^}6rtdOjZy^n0!X<-9aVd9@7NW}ar+YpyW69oHbC+*nl1z6Vm(JYM#x0y=i2 zfaa5j%H!N8RE)G%nT@SMrQ=gx@{2oY-WS5qQD%aC(HEAw;8*UW6UMEV|HQT54}(^r zIg1!#4*FsPCbf28>7HuJ&MMjEDIKUB)0@YIeZ@u|4==P0MR_f8g2xSDUPHZ5)8n9< zdhK0Q?#yGs(NDQjFktI%PwC z+71}A=M*%q>s=QQ-8%4y#wZE%uHK;aZe@5>RG-ft$zOTOfK%%}&|aOz`J&c?9r-%$6o zp0L*CBuwl57@BpQvE<7G@X=1-|0bIVQSW_0pLl`t6*M2n$z^4ais*bPWA0ciSoQ)!`;=@YT>UjbDjQTQliG>Ir z8K{+<&QgsogKXqIkZiun?C)1&ldKqe4SoTk0|w%{{gnIY%0{~t7g5&p2{eZEfMG!v zqT;0+m*>CZ%g@(f`LDOIHTES)K3-zMQTN!wN^N}j`!VqD`vfFMyuKk5ZK)fEdyWCGfV<$mDj=b$58I%q`<~`=g@W4 z1qj}Cnpu6+XYm%>iIG%^W^E_X^+r$B-|vWJH+`V~=O)}jY+{@9#LPGl#+*9H@#)t9 zSI(M?5LJemy$*BRx8|ZqzVVV<#Jjs%O!tjOkm!En@vh%#pD01A^U*9}=OEO*X)2U| z$OorEhv?O(U~z8q5%$|bTGJtvuKSN^Ul`9r{-o@2(OlX)y<)zh4d|z;V4+XLa1K}s zIE&_89X@z#j)|}}rUaax$8f7Yi&^I8*Gygz$=f1NKtwd{VVBccLuZksk(68h8oOtn-2rEPNGBg zQB+Gl;H9ESbhTo9Htl7db@t)#$n#M4?<7cbJc!<|_d)Sz81q{^9GrjtU+16hKW1E2 zyVp`Eq&{-k=ocXWr0q7lg?^ju3>@D-$EE|VP_0{oKm5rNZkf*2*?%zkJQrqm@g>wx zPUa;s^H5>85~3qtqr%$@=B)ULo0gr!BM&da1_L#!h`(X=^DWmeS`P(P6+CIhQM3+x zM;+^P81V0P9H^ZO(VNDBU*F|C!y_4*_4aXB8%I%0af2{=A_h_2g2h!$Hx zaij*+yZ&MX#>2>-N!a=42ehKO@EolrkU!D}UDuvRFU?G>uq}tJThpO)5yLULry2e= z8-sFgfd1Z_;MZscirjScnzj_p4i6@-!Zn#DJ4zM(X&R~r%3=1UU7+p^!Gx!|SUA8E zwTDv&aGMz_*WZSn#Ne0yuZUTPoT0h30?J3c0cl4TN)ztE%=4+xA*UX^{3)|qGnu(I z?gY&TZMbRAA#nOP?xtfXXcqko0T0Xsv!B%E&+bs&TVo+=PUfk!+tu#PNBp4g#ZM0(B!d~96Yqyz*|D6DZ*9%pYaGMwZOTCV%7U2D}|0l$F=5jq=JDhj z&R$^(LwvS_bmKt?dNUU$POm}HT}Q|q;D;LTdR4DT6R|D+CguE0g{=Se!GtsKKsvlh zrC_gE!P+in8~cD|N+w|5wM0m59*X626EQvMH{uemh2A;3qUKP4mUVP0S zqemM==XVq3t|N%2N{sR?`E1oIn3@gQ$+XfNowPJVJLE zMBO_J{x&BuKIIDNcbVY!zwI&ma4Qt|JOj$Mvmkw7#V1wN;7J_|LAsn+(9fM9WT?3~ z;R-oQUL9kKbGKEoJ0F4d*)uREdMCribZ|I)4Z~_@@u=WLRA2bY5{wSP2v-B4sCy}t zR;n>Nb}2-4KMz_He@Cy6N#ORq4(&|yQ61C(U7fq2V8jKcF`Xy-;!62|kb4lh=o~5r zK46ItiA(p}3)cGLDK_6}!Uu-NqVtf~==?_*v_GB)f0Ct8OC!*raLzUIsvD}OtrdbK5H0$0AHPlCOJM$4MZ2DnybOQ!bZt~p| zZ80LG6cwM1p<$%4Xy*9>no{pU;X#1en-jscc`Ul_I*v;5Xij-s7L1f8=+Zoh=SbT80UgsDvLrIOEPIffGpICJg^Os(lA7KYD(Lw`0wU{DH5HnxEtss9K&%xC2hpCCK>2uQth z-7_C7!Vc|P=(U2_-S-St^@Eb(1KkmF<|EbDCzD1P9wiD82$onCRm6&vUP=e zGh!wMWbv%ye`AhwE3WgY!_H|A#8WvbtKT^Xm6P?|qSik{tqHrC(}PO9VW2OD&AZR) zwRh0-F%?X!O@wtTFX52Wk5D;wC(SSF;AMB>EX|$4I?a7yB5~aV{buvoztbH_Z?`J= z-3b=Gtpt6JXJA`fBC0mWpnmrq=&vL)Mj%3=$>RszZjwU6gt;)V%|I*={6VrU5Ay%n$2Gb$xMt^gnPlJ{9w5Jnjj^k-*Tj3El!8pHHiV3T z_rzZ5D+^xY3tMYU#kwD@IH^U%Q%wmN9r}R02y;+TV1k#(>xw+xIZE3)*usnF}X0;OdMP#yV}W{@=VP2B+FS|nnLO$_Q1Khj}g zE4IBKi28;-Fm>Ec%4N@@^T249RC5$;7VBbJODfYHSOFIQ?uE=f_rUSwVmhD6m@>o& z9d%P6JLow&{n@~L>=&c|cZuLF*@8Zl+sldks%rX;oJ`_&o^Uu3eJeVk?4l_YU#4g2 zllAOS%RORXJYof9GeG(6n7e4(P4J3|0slcq`RKAz2-n#UO7okTPR=;}9gg6lCpyZC)de6Y1Ic`Y`nDM*PbU72r0>OmNZr2lZ-U*9^LS`(5d`l0msLN%gkjBfu&nQHkUYJICsOsqA-WPl zdwdX=wB+)jh7u?roJm!%7c&22hN#(TrHY=s4}-Q!(C>*9ytL=x@EfI&eEnBUIB}ho zjv%-D5))qX>_3!rm%zJ7OHr%T260zB&ZfM()4^}-=-Vbauj$rm~4JhKiG5edV$*D(k!tYnOu2~zF4GBSsT0pbG&bZ_a$Fjr*2v}%? zR%8Fclph{o%aMiDzTx%Pb)nYiG-iBwifYQ5l+6jjDpT4~UAO};H|)bzv!0{%`gqi* z%#Y(dUD5j}ajg9mpnb>19oFZOuO)(8XD8s0mEX|s_#7@zY*r=2MDr5U%b42V7UeI9 zIZ{K<-rtP{jTf;551vy6EY>5>b2LZ-gJI^X4rrp!w7sd8+4M7}ze$tKdK%41+cmje zbS}}9IAXm+9iP0`LI}2$VPlL7C|?|7L7}4n#1oj)yGHe=)I@k!NuInj$GGdY;e3ds zjb_MM#EHGiZH)?ePZ@REA8Wy;?h+w%qYhT@d<}aqQqR@r3*7we6YUzKG3>%fbeM*r3tL0-@@W(9X zbG{2o>q=04-CPJ7@E`eAvtg3nb<{d&ArxL9kLf-MY7ZRCmu}P+uE{BnH{~ea`R6;7 z|4CgN`abBMEQGdScR`2WZcOeR028TycXhUf*aysoA-_ceL>FTlJ@1>pe}D^rbQ8nx zXQTe49L(ir<+B*sp(U1O zT%@dAEOdM^5V}?uqJG9^xD!R5HoHg&>HNy`MqLC)9dC5Gd=(QMe!+!)){w5V3p96H zA)$K>Cj0cKKE!pE_kT)!vs6pyI(ZVu&~ILQy(pu-5>GwuiY}*KlEdsKv|c%nk&k!a zsZ`=bK0eL2C#LXF>Lkm@jDzN%F?{D4OJP!W9;#y1__Z(jw9^d*@7prs<#lkcs{_HR zX(qGZ$iZreL>&69o4Dy<4Os2nf^{G3(Y|dPgninE;fCEr-`mAFWt_g4UTFksbpXqW zc%?df?khUI2#2VuOc-Kx0z0x&iIY1HF3r;uGB$P-11|`~{u!ZEyT9l5&BMX%!!q#f z@fzztyYj=e%^FwJKl<&M9&4|+=c;bG(PKDjZqi#=4* z^fkPCVmtXc-a}bgGLy78;#k_TdzKS-Us*%>$dkBi;b~a!M%e?mW{gU%!9^p;J<~sx zoC}>u=@i-(J zwpP^|Z76h3Yr)wimZ)q9~qe8c+KJFNZ> z>bL}6W_j1+(1E(H*VkGKDxWy0{;w9Le?+-^9eWF11?^~Ias+A*(Q~uBP1YV8f{##F z^s>H8bLu{_df)qe-}x9&rs^^2*{Lv7eGX2Y7s-WC3L*Pn@W{z#V*Ica{2E{(`rY^s zY^(c#)Au0w*jHb0-blZK;$N_!V_)ipEG@ONTe zZKN4{T?_i{uEbcHVLpr07K8g=0vBFKzLF2B&dd{7x{dZwPZLnGT?-1YUIwix>3qRI zC-Fd{g{VvHTG_=Ej4~)=64$*vXs;LLf?o5aBN^!5ZyfFA$x}aMDp;+5&h&qk;U&j3 zkVi%_^*GL=zBlpuw+4db-5VZgbCxBwJ*HeiwQ7f!nQ&_2H%usu;jQ8b7^2n@#zxTj z`gtKnrieJHGzP1W-9ZcYSezLC2JLofuq`W*wGG~bfg_EnyI2F&=E)E^2G}9hdm#QA z&b-%-ODi4_=%OcKV#V@I#F(F3S?7z6{u7||J562sI24eG) zy=>U4B>cQ5T5vm^~`1^Dm%a?@y-KJeHTm zJz-4_&mr(HE%L+wpEmk6#I7C>TFEDJjeao@0%upsG;3XCE-7cg#rZL6ElFmp+ET&Q zvWRI`G|F5@&Vq*G_o$ug&Z1%;^Tzlc^f?S*fjz=h@fLf~b~bUpbY7{3)Yibj3z?`} zNju3|5umMK#Yb=2i8IzvcA@wrSG?&*7^SP2xsCF?V+&Q2Okd-MjmNRjxgVrP$yw|` z1Il`5K;%uuj;*8qOy=a@^;PbggR9?za7hDURt%I)@iF1dG^DO_%= zQsiD>>n(uf`8OtYli?7(pSaNe99Be@fkJmMFS$eAzM5=wU3QSpTNSF=FY_^LuP2xl zj6&@TWBFA719Tr)i^1L8P~{heGfgwFDJc`GmKcj+cEcgN^bAI?>?WrET#t@N=x_bk zR7^ec6>Q=T;i55fAZj$-QEx0(C6PaC^go}$b(|9p^WF`LySW^FmSOA3x8NoQV_=6P z`k1%joaJlL-`5e_u4zN`SPA6U4?sn}H`-=UH)zFY2wr!MYwkwyphjX7B$aaM#10;k zWGsHtw-B5rzGAC#=-p=X0PWw_;sQNlDLjgYvSkVgbIoG4J#4U}jpn!4lzh>RIiPJO z^7`r1!9vgzHE(5HGIKDFIe46hR&)~w(muF?&Muvj%hb;xkJXg7^cnU=pRk|M+3*Xb zC;Ea{(KJlhy8;XkJw>G!$9Nkij+QJ2y>+4_GAo4N>M$S|%cud){ zP2TLIkFoeh>mekJM;1S!3(B&-Q_jd4G92DR(~-Mqc5*GYRphYD#CA;IVgOMtQ5dE3 z27NW15U~3jbt_Lnr{p-ce9Xmik8WazotBVy?JS0d900}Sr%c;=v%Aow?a^`J`PkpQX!DQ!Pc+l#AHvv>5D% zjRL~~#KliMjAl+V!MSiIF?SO&Y;Fy&iq;la{gZ{J)Ey7?91p=Q{m`dDjT)~glsAPi zv)vY=&($LIANPbw`cyKH^?y-^>j7SWVlJlsF_8lK~>7h3O7fX%Ks7PgVo*REWO4VT|IKR0MM(0JWb`5e%&=;U({v2`#61lmK+z!vW3)f>zj!@)`SGi&WK5E?GN$FMY! zW$;@VHuZ0|_C58{HqD}5@+rzuQrCWs87OD;rF^&!+f^hH3#xuF>HPJwvKo=jJC;J+ zL+bKX`!SUxF{MjOR8`~dVf!BeD9thzB07GBS)!Jxs=t9{H`18ZF#{&s^%%02Uxv!` zjnKT`j9V3CvoAZ+@a6-W@9Mne(&MSJF-l$N``<5+A3$ESdVQg8B+VoHyP~auAvmvC zhOWN7nOu2G6&6;9(!16ww{xF()%y$d?BrZ|wu*&!Z=%^CxuZuVqO=F?3^o_>KbBMf zYse1ncYQJ(NHZnhsurASmWVC~?}A79dT87)1$pokHtp{MP&6<;#v+D4Y}OZd6F;oS zRYNi7W-VkWBgjMBj)n3BP;C2xh7QHx@Z>&N`LyscPKo$s*$pV#uSOrrO9VdKrEL2} z zS^bF*WO{;ZE~4DB6Uqathz+xrx2$i0WEVZrCSWd3qvv>>r9Ymn{0ts-RiOU&4ENu& zi`TB`0Zs!dnfgp0C^Wx^x*N8mjoc3GLsp^*xu-I>O+xS4tf#Z7a-cQj8@f6WhgCA2 z`0+o8#Xkhbp3@OZ9yfyHtD7+C)n~ls^IA}p*SXs_F@m1!gniF zJFC#^B*SnyWzU03X9VHYk8wUZ8{51F!yV#m1S&4e4ouP&55?a`?Tc4b zoy3@Mu;XCEw4qJ4o#*`Ap!#zD6rmpH@KeN5@GwSh`m+M9>La+~^;3v`MUI{6N-(s@ z!v)9R6Ek`bZZ$j$>g9(~J~qou;$kUOPc#%t*4jWe`B!Gnq#nHCYb^a!F{t158&lwOiOH#grIjoHZ>wYP?bo=?&s>znMB#`oGqL^P zfA}#x7HX~gV945M{33CRtYeR1>6)Kl$s=@_C&iH~ef1Yv^owy| zOW8rqf&sbkT?J9Ic4CtB0qR?kyUJrZ#3dOER=tRi*Rffqt(yZu=e~oDmmyTst^^XZ zG4&ATrTY;l`{W_)T#^C##K-p--oYhK$xL~%2Tw1KMon-s*B-`Ik$PFUAwhwDZ%1PR z@$SRQ|7E_*&f=*8>WN$^hfFg^@VkGWg%iI{dQ!s~KSs z2g%lZF#2-|e*SJOW}2>so?kWqR33+j>u)gpKDmJ&U5DuBw?R>A1E&%{!oKzS^xN;t zt;Vikp-q%$SbP%NhDi{MexTpHP|zQ`2lGB3LB*vo_hx^>l?FA)QqJB0jibgr;Bh*x zty7@YB74*n@8W(^bJ6~;05gkeRw%b(ek$rxta?R02|Y0iu5zEM-7I$Kb$V|ELMgV2OFWXa>w&tSj!Tc#W39SHaMg*cT?nyrOdqv^r8Rv3EH$ z>TfE9nr=eb+-9_$z60uSJYh*u#O}{WY}`@_ZT}=Nf1SfvG~)sBy^mvp+g@-w)`|G| zBsq_rSdPm&9@t5lHGLDY%qN4zWqbyumNh24d%+X1dGdPGV<$-w`v;LI@k_ zfsKDrmu}4gnf|~&Nar)wFt3{+tR%O*1@XOK$1|(xTUe6sU8pY11=;sJ=o;3AH-ad$ znS6$OWpBlvIf0nHH4LJDYEXk8p`hDzII~1w@GjYa(!Zyuw69d~uxqoKJpK{WZ!922 zl{I=yev0*JPq`+-9ct&BLh0A*Ae(y`Y)qtJtE|S(Q@e5Wo{Pi+TnuG1?ol4$0W?kt zM~8bIC|7Oc+C87DT>O8)g3XmE4G&Vu4?DSyNGGO&{Se%G>L4x;)1b?@{SaJ4x%u0@ zxz=u5S=KFWv2$`IC_nv*JD%$Z>iG{@=vwNr1gGGJ8N|jit-&P=FQD~1x-(rYQngNh zL(ZeyC^^%`%9`jd^nN~+c5lLL&qB1}&N!A>cA@D$V0W_=wM=bgjXh{)x8i>oHt#Q( z`RyQPT9MoCr=_rLvI3yvQ)6s(Jy(JP?+g8^9D|8DnNxS9jsOu;phVvqHI|d!r&oyc=pW-^s@^= z&$IM9_etilWA5YBlkpJy&uq|&NoAVQI<(AgrT(uBF2%)yB4!Z;UJFL6-ig>;b%dpXabww3l(ab9@jX*dQL&4-G3;wo-)!zr{Q3a;d=jAj|@=Nk!S z4b)}Nn+2|?O)!dPMK)Dg;6+@4>V?-}XrsAkckm51RzyKs_yNrBbqI30wxILLt)MHX z+}6WOEUNe#G+gb#@K5!qZXeFPd`95JzD9!C1=_uDh(?!{y5d6Ig9Uq6z@?nMu*vQ& zt{X?)=zDbj-(kgS3lpGqrm5&sWhiFm6GQce32XKl3c-U4IAM_R&BX$=A2Jrz({}Lj zmn1^euLnSGxs->Gc!+)tPJEr~R~QnO1wPx#FllotIDI|GzUb`7F?%f`Tw*RN40fnG z_T)qS4?WS_@)fjchz;JSo=ew=D)rWK%<2(>!&hg5XVC+Qs;=iHGa87OVJuuC23Y*Y zTacpj7x;NpasOU-ndbdzURD|mt4wa-_%?mfyX6evS&9etIwwF)G~+u+gwA+pPiRvHAVA#+!a!GB0F{M$E=lKe| zUR{Fb8ae2PkBxs6SCHs z;Fy05iRs+T+ZLJ$$xXjvNy-eU#{yowJPc=V{0obh&~wwvpZVYU1o?j_p>)oEzO2q$io9XMYiU&f1D87e;{V zia3UU&%jx)Oho(8+WzFOm-up%7q_>CrA2-9!)!#8-q&qBk?=EE6 zzl2lsKcZ7LMfjFjiVdd5!l$o#Vn|0VIY#nTWwZ7`>ri{T^J)0j!L=COM!Amr1u*H% zP8>U3M|=}>AFLlgfC04kBFP1mT>k)(Z%?DDj@(k0N5D;_JZYcayv_3w#*ilqO0b#W9AZKY+eNBg&myqw#YL7azYYHNmp>c+y?K?~1kg6PQ zn=D|7_jbxCPr_t22Te$f@Ghy2O3UItqh^`GXuK!^qGz^po znOT-XJ!GImS{BS)-37tpF(6O+#2^{z@#dDTXmT5{|sQ zfQL2~pv&D%Ec5OQjz1sKyS;+pzisewI%Q{_zA^3Oe7BS{Z?WsCiLiU!TXL+P}JDK5AeIez3A$GPu2cx)7uo`ug-^}C~o2*7hQv+}sGoI!E z4j`X>8_f1?0H44?@DIJ8T-Mb>;KaUsTC5s8ejz4FgcE9J)X2tI4nm^~)u?zo5LHRU zEFJ8SYnyQuAEq3IY0KL|>&Y%z`vK|$#_2UB5Eur@ z$%CLdVgXaAMee9ujDGdUFo|{+^8JqyD<>mbGZ>Q40l$$C^rr*O$SeM3y^fs3o;Th= zRO0{OV{bjNAnhG@(sBlc(KnX4at40=+ftB6pLbW>m1BY7V9*|PS5;X^yTu-Z&~E~L z54XO@x&wbeMJLTX6FShdJsp?T~5Z<(pLgx>E;GLLfIIsw@Oc<@Kgt$Y-qF3lSICSnNW`F2FmlhGEPfp6dEI1DvULJ;K(FfzJGeD}| zi_XbD;1d%IdB?7zdUyfqCd5F<^|u(BB@pMMJ7rSxvAA?M<=QuJH`f(BFTM?C8_mYz zT;rkROKsvho-@FkN4k)@mr0A@TvsdzAc5t{%(Q0!BB|EG6%_;2jts* z#5MScxvt6LNu5t1&~^w9?A^xF$H!pa@N>{p{~PR_PQEAkJd}@V1V45g+GdvW^2-Mi z?wE+_S31en@|2|}uLGQviP~X5S>~lsP+LyL=8T)@+k6=HcdUfa;Jv6UxWwx(xFQ-x zpyO!^@NsiT+wVQWS-Ft<%UZm`av1iRU?#3Regh_q5Fz6|?a)S92(!2S0-2XS!?o`g zqVKX87(FThQzAw|zJUQ2ZZ3t{&mTfZW(0~0)KTF^Cs`Q-;eK5cjPuIa_6w~CPqT+$^w-4Fov@3d*Pb5 zxzIX@I@7QEVDyF_@a3AmSibfQ#?H6E|36FK|40_Fyc0_J2P~ucSs*4sq*@79zpq10 z&>fY=jG_0)+gKOliy7objIvq|Wjnnw<5&tD`f?rPth$R=E*OZV>U-d&zXHu#PC{WW z&5i%60$X2OtUJ6Mn}=+~RG(Bh?byUGX%?KuP#JATTX)gK|^KoShJF2$C|Kd^a0Ka|-1 zjv+Cm1gK7C^(W$#HDEBb1 zwMpag{0G*b)x!X_F0_tbMtetB^olWrw)cCvg^HX;T}2qMwhNjiU-_`KGwAcInt80c z!A*x6iw-(<;D4x-xa*Tx^ntCEi<>PgJAWMUsgYnh_9F&NUkXnD^@oIl$#BTKgm}r8 z;@Y|Az;?nb+`rOXbe^XG&o@!THfrR7Q}lW0hpDiAjV0@QWDpjNafjW%Qiil+3PgtP z#Ijl6p!HTB1dKO@q_1_LRF|>V$$QZ|_C0E@Ezfbyzr_NtSwPUY6X5m52m+>TgK$%k zeCJC*{ks6cK1L|-`UQqqn1~K8cOddaHu_%(V*wKC9ghLZrEFt1r&Dl|e-U1$4nv^b zFbt*)be8=lG_y?ruNIp1&QM@njHz(gle)t1>{#Hg6I`ivn>pNmg~hKL(JOle z-MLn=y~+RdZ?0m^N>VeYOF}x-~Ulh+t zggT$s@X}1gQ1|C(TkspWX6@#oeNRGH#&%G2AIJjNks~4SCuph@a=qWYL~Ap;SIj8m zLt5Ig=cn&r;G!?ulx9L-69duhXf4Vw)$ld*%?0ns`4D}t0*Bvv2`=7WVeRGwRQyb2 zk7Xvpz~DS6?|&SdLsPM7J$2$29ARhenz1nD4Vq2jcu7M%A)`w4_S^~{XXavJe=}ia zXg6W_-s?~?Ef#0Lj)csZKS8$iJUGUiiv!0>#IYm035RaCgJzbgdwqB}upe>@P8BL3 z&uAyPUg^$cbrQ4WGoY7O3Ha^(gsbdyh035%m~^*T^jEoNRnE}?hbRJ?iZJNTdLPPzJ{ zOg8NTHm8_j>*NJ!^!*t)TGo>%t0zWz>xrWSkD}BlTD4*4cKq@=0$o)us2P0>v-0nN z^vY#kKJE(mjGPLl(R98pP2TJm@d&3{AWvTHvWL>hzE+IUs7Rv^ST6WeCR zO_0xU5g*Y1(v^(GtgcF|pW#|xQVRW-t_Hmp}sH|>uw?%QJz z$REANjs-W-ku`#5M~k~(%ulf0os5dd|C2dhSq6^&fuQ|_;k&KWZ#c0KUJ*;Yo0E~y z-v1?pj(vjCJxAG+G9_Byxr~`x91yQu!!8RQ!LKO{uKAGP%IXia`EduruUCWJoR64p z?+EhvZYrmfH|Wmk$pS;y^L%eB2vd(>DLu33J$)TxH<}4?cb9;C@!_1hyBBcI2y&F| zv&C$ecJ%+$8==I%3Vyo*J}HKP-Svda-)Ii(ybGn@5>)Y)G`rn99-AH` z#E`wz=F`y4OL-j7=sRAAD*TC|xy3fp@x2R!fvcUWnOoAe7X zZ{8KGzg)oL<8;KnE4yKKMJF6@wiL13eIV$a=(yA#w?~??$jBYgxMmclM3MKi{ROJ8 zUEm!xxuED-iDj=W$aT0{RTcgYs^`7{tN9PHEFqfp`s*&%9UY7Q)jC4gRs$huryUIZ zp@hIS=`!`7G^<%zgsEHTeAwiP{tw1@#sDiakn|m z;`3iX^OG{3^ZRmiUjCkXPvoLl{s`^&yv62muK(vvWiy?=!RRNKp*G7xaEokW@{M0q zCDzFpbv~ALY@_}u<$oK4e}L_!Ezs$x2a!Srl!c@)jm8JCpBWUD5~C>pu*~YRA;x<4 zhomWK;2&lSw$ap44tuOBNPG-7P>Q$Pw8X7jk73*8JYvItLxuHSnWpv=)c8h$&m$sEa?@-T%`SF84hos|_B69?j)o z__`3RTFRJyq9Isx#h@Z?G^9;P!c5=ev;#tB_4{XZix1*HEAn}YT!mF@c0!nYEA<+R zn0&`|m5ZkZ-4$Y>bB`w|^M~O3I6C8Bx=82Khce$sx#ZlmgS35V=xs0un`xkEHjdnL z>wR&d$$qZ?8HkDv`1HPQ_~qj{*}$%n+#n)^o1qy$IzCXOWW!B zW|chxY(5>pO%0#X*@E1$PaG&4>&&9(t|dQ|Eh{@a1l%^Qg!)!}ap3erV3iZjnm?`K z@m@P|cF_!M{aYW(rr&`eCFNb!G=HRhoAU|{1Z4LhR_`nKXx-^h^(+ooSs#J+oNl6( z&QT0Fu?Y1S#)Fd!?bRg3pqZzQ&0-g>^1P2tS4v>`noc+eYjtoKWeiM78o1F zTYHded*lsln_x%mOb_s%c$(=y{u^6IjlqD&Hefg41NNHy9Z=TatHw7~o|C-70Vwy1P4!_!~QglIn#kdd1=bafwC6y8JZi*NJS8L(6U&O`YC3`!^`<17J1~7I^*Pq>H#FiR4$^iQ07wdB~yf{M2vhsGjA?N@w1Kdb3POP7FgA z^Jfrl@&t5UXm5QzoHv#@pp$-YFpX)TJ!b(cqpp0n<@y2}C=urb(6jyZ-(WZ83l`Y4 zK$fHuPU_JibjNbEdToT#xS4#yNF%}POdMDp_lD5Kc+k9k!1ETTVfcni;IxF!j)!T^ zF`9apS2}YdtqnxKW*;8)UmgAS%5o3A`U(@z)S{F97gT&a?q1*j9R#hF!i#J@A$CY2 z%C6A&Q~wHc95NS^jqM=%?QGn=;tfiVZ-6 ztMF8&dR>VQC)+W2Mhe$HpA8|ChyhrX&jbJ2#tW)fK;|ymPxQM4)h{~HZ`5aAOkBzk z{v4HG@^WLX&SSMcb!OaBFd;(4bIR7sY+v5v`%1M%HM7MK$xAlTyop>h2f=hHjPVyKOH*u@uQQWE9dFB~tAbR>-K%ve=v|e%tqtoW$p|>9)*q^@R zr>W~`cnShtKe8o_YI1`4se&|=SzJWB2~B?|uD;`Yj~ep2Lj4YjEqt z=diG0ANWt1ii(NZ%;#_o^jp+TjB0qo3f4~r)zEm!FZa%kPbrB=9 z20}XV8>R1a*rH?8Awci|tAHt0~P8SxJ=jy4P)9s zlJfy`>=$#twmeiS#_%u;PpDW=dx;67Vem6!(Z%j8J{w>v`j2a2^-Fx9=W^=E{6XD> zaX&Ds_#S#s%YeX<#EG5m0#T7)c!1IW@Q7|A1Uk8JzgN#;yRVd+o}}z>^b@SUy9^^?{|eiG_;dDu|d}OYyyS{n~UDv=zN&^0q)Sf zqq^V_)Yo-^PmcxYI7SN`Zi=v#nDND>Z799GStkF7cEjC&VN*_N39)A6MVkH{oXm|N zEM^tUoLb10?=xjqh7}Oy8wEDP54tf))aBO_xZ-e*mYyhGvCzHlb2HTH8KK7hZ$QxteG0drzo8m3?exUpFb9+x-*l7S zSjn}2h&xhgc9vI3WT zQ8qv5N3MLVz^nL$fz2Wk%QLMy)upfFn`v&~Oqj2OLUHc?QX`UOj?3sF0do*Nd%LPacP zm?|cMtj0*}`KOLpb~+qqAM8u6Me1WK2JvM&H^9z%A4-muK&q^s@7t7)kAh4D{Y@@d z=o$|pJ!zNqvj<@AudsQy3EeqPgQ-Clx>^#K4z}RF&GlfXa{=lX0d_5-osx+kPWWgd z6jvU>x~&Yh?$8$}jok;D%2_J2Rm8u{Ay!RKbC&+>U+A@(yx>!JxNFMa@yVU~g5y^^ z_+d*k!l!<$v$HP*I}O0heh1Na*KJ66AJ3P~$c8ARI;PBwS7~k)GI(Wz3ga@qGlRGe zrdMG@KCzgiIS($4qkPOk@H}!3{~twX9v9>K{_*x@+9g>Mp@Wbmsph^Oge-FsPRDW> zOPnmp;3V5OOURNGrbI%iL=s7+=DwbkL_#D)1|^9kOHz{juHWBYUax9qp1JSq`h4E+ z&rwO})M*B20-8JuUjKkesXAgy#!%X!MR@4@XF^NfQFJg^f%1PWAn-HIZ!Rm;R8ohK zZr>opJ`+#bNCZu=0%n^TiOWikf%YE)++9d~p4Va6^`e>B_;MiVexbYHe{a+hmpJqv z;tntG6k}MgV5m$Y-tejdxJFsc(d51?=@Eh%kLy`?(z! z4vNO!@d*$###nqy8Pbf(T9AJ%=Slnm^wqu(C7UvscK&Rp`DM(u%6DVR(B704P)XCW zZehh0V?i;ruM~P2qMIgy{FXO!r=N`ioFSrA^$WV*TL__{&b+`q00JgN!t&K$AZ|`5 zhL7!r)}8Bl*2LEsZX1f)`5V~KKO~~>%OfmR_bk+Wt0T_nP<8dS8fbYRhYE8yj2L@} zigzC%`^68cpEWa0=1mq>-3#OUO@Vs%i8%JaA+CRKF{J%lgDyY2V~5#q+#GrXl*x*m z&=dvY>U^jf7zb%4XTWyhM2zfYhLiLnAi#mn@mH=w)m_f42`khk0(i)+aVNKMeN{H4t3nvtYDaD+G8`rZgrN zmmDz`mq#>^hm(3CAIh-&LmD|Y!_+Ppr-63Pd3F4wH<0)A1fpvS&F)9A`oacOt&NbD z_bB6j>oRazbqeTukyq%A=eeO{I#fS|HIEH~0A}E&AUa#L5 zb;{o1;D1}gQ*L;{W0M+)udn3hxt7A?*f7fHQ-^);9~e8@7aPB{V^H_GXnUV{H_x=W zen}-Nhekrq_%{#?uMo&HFnI-?8+4wqxSg38@G_C!@B6^YHya(C{R>R8i==CmH`d1WL%Gi+jGuc2;?r_av)D>#`ZW;R zN6Db1$RDb`#$w3P6`=9^jgm<;$_f5bjq)(AZF>ae$6j&&o2TO%mzn;iQFmV z2~>QUOMMC2>*|i9+4>RbyWOuLr)(LT|4H{CvtPKmVF$kSHxz1Plflm95tl7p2&0W3 zqTGBSE2l17ydDST<>Aa}ZVU#h(;;$h7YO!wi_&B(F+QP)H#Rze%Hh0+>$#V3*8L5R zetrg3*TynOHb;sJ{>Fg*mO#Z8gxKBCbg~r-cfWy|ZYs=}lE89;n?SbjIZqF?61)cd z0ZNlETq0|MaQ8VNQB43{)frTFw&#N=f9QYpGz(H60dF%C!8Xhe!pyY5&gly;c%#4| z%KA4Bd&~RYrwpmn8Xnt0-jlmKF|Muy>WKptR@FfK&x^S&8!d#KPv50(aU#ZdnvLeY z15rM5JlkM$jVb>;PaTFwc=mV)xZBk-#i{Ain!nRfWBG~)tUd^#+tp0f<-Xc$Sp$HN zKe68rN$*ylfyar&Dc0``lYH{grT#T+uuEs^h^L_W>yby_G*e+gV=Z*wEn;c0vC!g^ z4I29k9)XWqvBtR)ZdI4UFfB9DHS--hji(vdWd)0)e1-iA^41)?L0yg%Jaju3y3f5& zoxK*d|FT&aVE+ZnZ}o&bJtRWUCIe9#{tJq>)gYJ{L6%ew*$=fu`&DXG?javex6d$&Ung0t~ijJ&JenL5;yzE8?bv)$YdQ?uzUMya7~ej@wG~r zGulX;MQ%aYRlA|2$4oYUWeI2`8!+;6CA=$WN54SYAs*2YDhjQ@uYN2#UTKvsnvjf| z*H37kG6xG>05aCyYKNJ)?2w6IeyAAT#J`x^RAY2(%_V*) zfc45gm^H8mhTl>^nm;j%gBMdj#YiZ)Uj{GzzM$oSli)9QhCM;YsBe23o)Al}x^o?P z9ao{ILCT8^&Ok}0*Q~v1DM&VtqdmFtX5g4AKEi|gl zAXAwM$_Hn16$|1(^1cPMkosB~Q^Aey2gFx&(|OG5kMzQ-JQEx}!#E$wC8(lRJ_q7lrWo>BG$5^NxF&wk}8o)Z3yx6YEpxHPQ zhVQS1;kIu;(uMX2TmOgh{aUPa@k;uh{Q}jc#K-?kJ4HjzvoPu1GYLez7V9$@wYh()kCd4&7$q1FJD1W)CJz zqdd-w?KA`50@@>jF{?2h@p=_z6iM>G&4<^?%KY2I4!tM%>(*3)z3RpnlJZpzwSm4M?(pGS4=wUtuMdpLxX^ z2A5&~?@^TZ41@KQ5ij`gir2o%Vqp(7sG~3zyBY?eTpo=@1|_sBO@wy=KS2^_h>;U3 zQKi?9yRt7BKk^AQF6)9DE}22&hHyB7bT_1*GrorzY-o4FkQ@0RU9*Fj9jg(}{lY~i zy70MACozoXg^p8tNiXy<5NsbFpuOH?rhVf)Z*e`t;*27}I($5f@7fD0tgpkEt9qhp z?G2Cq*UqB0L5^B`{1*tQF%?qW*Mhkw93B-J2|ibbVLxIS$u4-E_?*rL5IQi z{eJYqlc2qM97uj7Vb_Jaq9nQ(OqXNpbXrU(Kyq-1>+jD1V#Vd>d-UG zaPMbgxF#KyE<1JtJT=#u;z&;{i66-Pe~v--Ys7-+T?V#B`=BT)lbpTZSa#oh$lo5p z2Y{I<{SpRlLri#hml;rROZk$!Da=&S1ezdy<~S&Z&n_GYr)VDS9_Gh>kpte@x)Zk7 za_A`A4=FgD_iiS~{Vr!dI{X_%xP65Bh)vjc?oph3<}_M{MS{)Z>riy$JjAB$CFTP4 zIx0f(Wd2P$>%9UOljSg1Zy#tbx96srb^}Mt>8x3*dwD*vXE(giFYH%r^4uiiOqs;DZ z&<{TYZ+9CDIU5#nrGF%`H7>As`L%dy2hEG3AG4EbN5FB29wg+1LQ}6)_zyY>s*S~X zNSz1rw24^0BbtZ&@WP-EvG{Aaz8L;(JuGtV1o0ga)P;J-D;^s{#e99y?tm9omR|<1 zfz(Ye)kE94^t<5-i(&8)QDjWUzdSYiRC# z)7DgMiW`i1xilwWi&4|(Fvu4~Q{UMLl_Nr+xv3ngc1D3$@M+L~{hV04J8<~)uTT(_ z#kAI(}_mFegQF54A{QrP&|6~^a zqXWhdAufX(vAAz;X05taY{nnDh`5XA=9J7o*BQTiu>EAKK@& zLekhMto2Ukes<#^;Eox%b<4-5v?I7-{sGi2jz-n69w4iakj@^lhx}YuJ%T^HBDPv; zu3ft+`rhackHxESEx=URb1)7n(#bpAJq;%PFAGLoqVD0=5~<_YXx1ohM!%68QJK1d zRXquXtfT1|Bum6EtBS!cqBl%>8-l@>_#%U**FW(Ke#RpMg zaf|g{sU^;SVIqXAYevP)Q__MxIjEeGD6N_B3APs<1IHO_JQ~xEGADT(l8f8XiE^<-f4@L@@CaWT=_zNf7kUSUP$-V!M`TolkT9ZZp;38jBjakGe797a#G% zOuX|y^20-i)XVrksO+gDPI^~}*7J49X>pcqK3)XoWr3*N=qg?2ZXpEtTcMSt1D~$b z5`8aFmoWD!bDY-9gGMjJzMnop!SIpb`_xz%{P6-Lte`BVhaS@<=5XzLJzQWEhjK?J za0zL{mj3h%Eo_85d zAaaYusQrVdLDHTQs5DB)qFLGC8{mK$bQjg#{}GhECbJTYQN&@fW8UAD*sd9Yx(iR^ zem!j=HGmx0$CUK>PGi(Lv4Ik?#y^R$c#${rKD=gdaf z*iZ-g)mqd8$b}Zq0<5|I44Ut4$B5DQFz3cs=)NuwQZx^F5S1?ZCvC_2}>Mlil^+1zOMTP;K-IcY15EF7pf)&7MKsYze~NpVX=7i~5@% z!0hGbqUoSc!sflHQ1kK#1iT!8GcI1ET&kAZcbq>uPl<;;(YL`pG=RA!PUBun&BR@q zIzoz>D>olM4jY}NtnJ^YQuJ1AyX78T{n!ru$q~|9nvgtrsp$+H;{)CpS zT+sCCgW*Ti%;heP1qfI%x}<>Mq#W{wL-wI|Ge>bY}Io#5w4C0;Gq^q4I46 z$VSCLh=h3MUul=zc``(HS_P)V4TP+BDq`g4NQ)K>!=o1$p)Bm9I$Pcjo2L>haZ@7e zzjhaBkCm$RwalR?ZaPj63I`W288qGbg{|C5y!-qKjQv*$j^{o@$^v># z?(t<l4|PzfDE``0l8=H;|tUZw9Sg3E%l8 zmO84%OnJY9&;9!jrVP8oBkvPWdv7v!-ESdwty+Xit^VlQ@r|2qkcf_(q-ZfvUzk;) zC5Br$qv^nUSg^7d6;%y9ht9CEAF0Fg{s(B*uT<|DWg)a(O90uzU98%PW)SGYN^ZD8 z_GL3c)^tJIZLA7X+=Ww4t1=YU^cfuX}w7%a;LuZJA~t8`&aR|_#d ztOr7RE_R7F6e=uxf$IiSQC>0}oG%i)d|NEH`)?l)?7{JF6LrC?OsKQq4x?cYIkhs; zbkIu(YpF-`UVFiD?GKOmVPiq+c@O=@JqC&F2S)z>8^G8D4vnS`(MH-~bqWNS7lycE z1P&vgS4fGDIPa*TAaP%SiSBD~v_&%pEnJT!A!R)A?sD|&w-P>+4=H58epr`gAvm15 zh9TiPAj_%e4vUBpMftwXOWI*;Kk{?jJjcU`7p5%V13QbqVA23$m!Bh6kGu=W=jC8Q z**~bp6FB6urSRWn9kKL9 zndc!sm2;!gR#YuYBEP~DmNoPt)buIAMN%Ei)7+-d{&qh0N+%4=4MqRqW6;(2K9-ja z0=qkZP;ZY|OlFxNTk(~rSzST-_;P3;N}0*vKn%}q#^F&pkU{e$#ZEdmtdwHk2@jw( zY7H)MI*OjYa4Dbal0K zA!tPz2raGmd7SDn+`6ZNmxJD*^FJ#or{qEBi!!*|97DU_OYXL(BCtEmw14&oCD+=je&GJu|V2lRss=hjcT!uQf~WdRP~oV#`k70bka@!ds}bap@cn z?=TU(%P&BqUl2=)(cx*W9q{LU3o+S2N7SsUld5`^OSMO)<5=4i=C;b2`~P^(d@nr% zRaGC}5_Jre`=&8_j|Z4;bRSF;4}+yFmU>B_(EoERmx(>0()9+|Pq=})#w}p&W-3~L z8^#Z15!27Pop}xJhSQg(LjK|1+`Bp&H*Q*u#g+7L4I`Pp{#wv(y$Ipa4oI3_@^Zg| zMrSA1UHpQcC9Pae=kwT>ILyw-#RXk%;9BeN(Ak`NMJIZIgYh-m<79aBHT?}oCK`z4 zM^9qTx2?Qv#|w}=$z_V=#4KAWzTY z8;M=&b8HDV48Ki|^D8{9A`LfwcneRj{sPOMiQsc|B{>@fNNlI`)C$UJMQgZAzy_$z zPUGuC$aSrDhVJtUu~_;Vn*QmEE_?K#@>m;&pW4q-{HR}7up6^BI)LW+aq05EKcaj5 zYjnJ~8A84_V9a9!(K93$OwZlN&~^>n0V{Dsb_7cGG9hjE6$pQJn{Cd10(EO{VW{d3 zb6#O4IM$DZ{`N>9vw8N3%N*jK${f^!ZhOL)|xbh-0BEw$F>klvxU>DWqJv zbU#FP-;6|PZ+o&aK<_fd6nw(=b0^V8&sflGE0wDJp0O{Z9-z%|4m`?;x@jBLll~|H z#r*LsJn1OU>M2FFUM|@GOEcc>B}_7v&c3PoLeaHXs2x&-@wA5wYyJS%{$V_fvfhuM zzXO#dLu&rJ4E%p+gT(9w4_|qhHT)S*=kx=xlJ)=z9{S=nEn~54)OTp;9ET|rjd}mC zdcxDl08kc@`-AZcAO(QO?~w5>$pyq!R6Y z=n^VoiSKAEO}PTHe{R9j{W_xRzXxnlvKeaPAJHtx8g1JjfKOE`)GR)XUA5L=_*4tw z%hgQ!ZqODT2j0nX9PtJ8({z(KqKQwBOIf1xFtfuS|h; zoeV_>Z#5KsKaT%fNN4$l=V>2)g}rRaLM?M^jQM?y9A;(EmSQN_5B&v;i18j?b(Bve zAG~bAc=hroVkNIn;6-nDqgMZ9?k;wKxqSqEN1yO;!@n?P)eoku%0_LTD?R!5PfT1h z7Q&-0G4p^>D0%P32S|QH+>3Z{Ejk1#MMv45oy71lcnGo2y|FdB73KGqGFgTkoLc8$ z(0v`S9#@Vk+hjJW;S?6)JNQ{bGoc=bKz`AeIlA1Y9cLJxI&UT_-PqrSS)699aHap5U=-Pn4w4T-eFPS-|8iOk69vfU90d@+` zn*t)qduc3~U*3+2!N3I$K`rPGj6C|Sbf6u_}dLsGcFR{i)v%qoHE3T4CM7!Ca z&^C4#3~N4v;SGqPH^*_8Jn{+TbwO3^Oy>A3Iaj|r1vjsMhED%br+s=hhrfd%?o$># zaWN9d@9?*lG(||2`(Cb1Z~R)qvKZa2|5Qj(FB5Fm81PI5rFfpUGwPJ@Z2P zphk(+&r0BCikaAQj=TqbOa!fMmfYyz53Gtj0qPwE7(Ts#IoSS_3}fd z&MF4yyF=qxJ>HRX2us}0Gff{A1a2z@myYGANG;|16PALeXA$?!isMFE)!_D5I@>-RiNvgkJy;w#+sf!AWz{VUJ#Q3lAcF+WS<|{BnCpe z;WlWRy%^;3C>-1G6ql`JT>YFlvsXA%g~xDNi#>C$Fcobc?S+#um!ag^RM`GXB8Jl} zO4Z&E+U;(m!8Z$G`{M{OlD^0BA77&vt3%Bn|1j0&9?aFf6l!f-_~5q@=yvryD9Lw@ z1yPt{*T~$SO@Wf9U%1wt18SSJXJ|O31aDlV>{Io04@t~auv;1rP9?5j_kJXX)}^Ce z7gO9zT(O#&%`kgZDmiKCyY^d>RR8-S%(^uYA7^Q>{8}Se8}x?)gF5EEpcYHF=|Ry+ zAL?D`<}{A&3&WEtvBa}4$j>X-^txCqF=~gv&ZeT*R*wDyD?k~tf@zdnc$$HgF!sq& zP%cY?jN)f3>yj%bT2BVMhDu(saw@tw^v9N|#?*NO%7-|Bv-4F{o>g;&S7+&tUY&%(aAMn~ok2-7r{_KW{N1;sw(mbI z^{>n5>oXj~7Dhv=?maZ$b{SLt{KAw+cX&7^{ovNAUd&#{RIqnVLfyH$Fk{3Jv~!vR z5fQ1-!^28&_Bz6>J?)@<)=H4vPJ;h@OoUK!yGGgTh&6rU$TM}G&oVO>XGCN{iF03+ ze?Q4BYls)s_au`?eq|Yx*I?Re;!Hi6hA*`*z~KDH=(|l%XjfCttga4ycL!tXfc`&=OM)P)9sG z6uaK6g2Kbs;LC!KxW+FM-HsNrCYxH2S)O1aA@f1m(!iTuNCdmYH0Bi$1*IDlc&qRk zh97HTlSdj-PGmA4e{~O}Y@nX*yI{4(yGfn0;~n~%q+^CNIF7X(%Zy;Z=nO!iN7Mux9H|y1UhTG;W^9Qzq?Z5)(JfDt<_wdOcydKv|DtXIVml zIreMP6TB+7qW#;uP*`*vU4u)($G9ITjJHdz<&MO7H5dGswqm34J80{C7oTdc1g&+U z9x3$$nP%E8@(f-C`%~|!H*3P>FSp~&8a+X~S2<+NP+`!F53nKDN*vW_D#q$LLcHHT zmUFrtKX*EeS~HTRc3Jd(am$j*Ml8m=@SHkaGx^riA}kNS#wG8q$>;k6KARW|cCSXD zcX%1hduSn^on$23pJFW991kZR{WLlgOaz&fx=C_lR(RkWR92>gb5t8B;_7iyk7KA@ zCs%t73Wg-ZU3fmkM6mk%1!kli$K}#@pmlFIG-_orH-#5>SaKD0m$t(8o+(Tfw2KGT z-2)3B@87SyFYoigzlO6zEbWoaociZjL11$odn`3EGuIt2bxDsk5p1Hqb)f{h*j zqEnU>8~j5dUNaI}SAHZOMKia}p!w{AI)1m-R4l5T3K@2PP#^iKTFzH9`K{BGbvu%C zlxBIeH(Z0T67oO)#U3!-|8>W2p z^nJLD&h7rz=UBm{a-JWjEp(@uxy;WA6pFDtO(TV-gcs;?F_F0A#AVsVS>7>Y!Mh+0 zYd6%g##cYFb|G@v!z~_9qdUOIx-+!r-$S4DeAsH93H2?9v9F{R-M>)R;nG0nc*>t^ z*9Nh{D#}$}Hx*BvG!avpe#15sGf^_5kvEh*f$e(@Ahe5&r}X&D95xp4=;^83uezpGDOzxG$ZGX~kRGaP*ip#9w`B^Xxro5~6G%Q{F2rw}O zM%;>1d|B(;zSl)i#5liM)V zo7{L-2hrW*f6&y)0%C7iihb{x2;)yxfUnC!>fc#|#KRGdij756n|ug8ypcH>(Hz|` z2;9#-=U;|=!pNyQ!Z-Rq!@sw})=uwW_+MAiu23KMc4~&isySe%!^y!eMW1_?LgPZp zpe@MZj-6(~maWF3ub`_YuHCBJxPZJK754gK zyH5hNJ$r#I+Aa_=2r=c31(O9#lWGhn^YZfNJiPuO3;)m)HEwSF&0s^}f1hZFYkv=$ z*KG&s>u?x8AHk-!1zRo^VD{-NX!pYxWh3_U@~8bEc3e2bl(oT9FcJpSYjwBm68KOr z>Xz4SoPOyHtc`nt_Jbaw^_~69_dzYjcQX=(e|?R`P1Fb3k_30-iy&0p0eJZxgo@K) z{QBL*L%oA7f4D$^kvmGyhk;|uLutmY2v%Q3S%-bWJYeT-h#!2NZ4ZgX?(-9|l5%b? z@86@H@d@<(<0;6_WMlaw1JU(U8$|Y72iu!_LBh(eQ0vgaD?h0*=0YRnAMXu88P_nY zWD%BUe#a1N8|*&oH&g@=1J!8&+i+z#PI`Y6!XtCRao{$!Y|2vH6LuDB<~_uE+O4hs zMrU%@1DG&$AljwHW5hqnkSciy0mP!JHM$F#Z<;W?oo30tGW1*OiY0o*-0h8$4f?}S zY$86avUiGFGwKc-9sM3O|3s_7G7_zq3}u=r-&v)@Cx~a4!EO2x7W79yl$4W$@`^W8m=;Jqy>Ibb z$906rR+_D?>Iw?=B#_N$WR9M3s9Y5avWPs^aHf1t-j=(vVF$o;ei-drUTwzOvTKb87RLoi;sP{7u+7E^X0$w#m3V!z^!;TE9l|| z-M1cuf_I;JSp5M|x>BZc`)uN5PT*n2DWKi9TpIuN2#dev54AT9L|rN#s%E8Od`d1e zcQ+C}-5a=l+*5$cI|!#!Q8ubOTN-UB*0;Y$Wo;V7{n-X#{pum_z+>33W*3`H{zlWX z5>W2$#J^7_uKK!Ew1tECc~L0!`5Ga#YAaWK^XE&RT!n(K^D!+X7M9p0LPKB;I>i%9 zHuIRe?wOSkL)^BeP8k?L_xglCI)im;S9;C1vRhF*(Ed#nY~4}{@kbd%*31XT2!9N6 zFcX&G1L`fSQJy%}`yEn9-HEQF{Q;A!x6#~p#S14IQobSQQV8h{O+!e^h_j)*fv84`9 z#~j=}qLW}+)P@!HM^RQgfJHuVgt$A~!RNtNFrPUFPpnytU$-=pA z-(WARg`q0CTLxW#fChW)J$OINR_%k)@2}zKM`ti*?rn6wEyIYa=iomk2{MPhhOKp# zXhZi{<(p~h10PI9<&}QCCH*FNdCvyf+m~40wg7_Or-Oa!Qz-aT#r@k#P&s@PcU&9p zk@a#sX7iuaV-Dt_o5rHXeW$eWnvS6Po|PMa{yg`I`5UK^|LxztdSb~}TL?HzeE-l| z-Z3#7N>0Y|n;RwKh6w634YXz{KQBY+vnJY2)=Is7bHEh0!>eR-Vf(4>T&kUm;dq~E zMn$ux`Ny&0L_4UWE3tF4nc$Us40c9@;b0E~VNrc==ussRf^LR`SLIvW@I96_Wp%}q ze}9KY6MD~EKVynZPnh|cT6B~Bh4P(Cpz&G=WYK%j@V%ZO>;D_=K64h=uMA5~H-Y06 zgIr7BG%SBm$_kvmak~MpFok+E8uNeD-ro*k_u~bSJ^4ChU0FrvlxL6~)(*;oWa@MK zp@Jj?!D=Czw%p zZMRzM`dM|}Mf!}S_eIrJJC^0T6T=hx;KqA2J9+>xGn^RP>h9FHiU6M*sfdZ^pypQ+ zMiN`PdAGJuy81krmS_n%T7jtYdpJ~2S(#YiF_OO%@lR*3ACCIv@i%Y^S1?#V;IA-Vzl2OFZ>>0^w zclCrxb9X=$?dh~xjoLkLIV!cUGB<;3VBKRkqc1Q_vG|MbLmn)PW{L}T9l|@COa4~&^oy!MA{lbKm5`6iGuBeQoGY{pL8V+cQ4+dBY#Z!NSERZ;l z#rhy=isJsdzgX6w&G0g4A86&%U46GN_P<^MO<`9-7Uhi@=f>eUdTkYU6Zl58nJD+s z<;uxZQF-1H<#T8+-@g`ST1do=omWA*B9GUIH_^+SX5IAcU0`zrwKnLBIsWx5qA%qy zuTVCFX4URm?PxyWAg2A4ga6@=U>HKPwO)zX@>CnzpBRWPSGJ+-;2Ea-y#gDHoQdIh zf`yKq!qwXh#JM-#fIQxTm}kTjYt^9F!zNfpIbqM}N>B~Vkt(oI>bY_Y__WvIxgYjDTLg*E zCV^J+L8f{4TKy8kzci9!<=@dxkPBal(ax$oUg3N!;IiB$tATZ+_JttM% z$e&)vz1nDc;xJsIGe^e2|H0b#8K4^DjgCv^O0^fvWGQu8;tPeg;4tn6sy6Jx6zgXg z|3?>`og59Gb@|ME!(*5&w?XsbW03iG4Y+??&d2X4MAd#bwJOh>`Hb&@O~%Eb?6#Qs z4w{K~$G!*U8xME)m32JPyAadbR)Wv3cNmv>4D$WY!NDDeFl2WKSV!p+hc=8E^ryeC zVHE@iQZD9WJxBuIf&7~euYO5)z9;FB@y9Wc_4$tCARXao{lAbSX=N{Ca?!_rJ?-2t zq1CH)D7U}BLsN%g?MM}M{l=49k=z+^7HspeLLAX*Ng04bu3CG6$xIW`t>z$mXa0uz zpvT!K@{;wRkPZ%sa}in{P0F%GIP%tMUMfsPDgLE>~B z5pg)6!YcemDs#|CbE?6ZC}ugCOc(u9B`f zaRh@5ogjYPZVXMB51Jo-SZuNNwOdE1j~xqFUmA)j-#&t*zKz*>2nRNJP**4z|x9(X*WVC)q2oDL5YkyN97K z?IV@#c06VLZr=2$0ewFYgs(9M;<^Y^;b2lEbUe;M$EkzWZ6^7sytq@{U2Ma!J^j&o z)p}lY>@c)A7qX(7otW|8XJ%ym8|vRJN7EqcvJ+b@M)x*!OuPYMo?p! zY7aOx497I@p%D7-eU@`TS1jzY7kopFU_g<%;G7u8W!3uR3{Ulld3z9pEP6vra}-#* zQYUApk94G^zPK{XN^Gb~!s0JK(a|acw4c`Tt^Zb{(_fX?m|%jn^--*8&J<|u@tB2G zeh1mKbZ#flgUjS|8!X&Jw?f)~t0YkArY)qz?_;XV_HdzHUvzXUVmZ;@cyp<)Ac;nN z7e*Y6xMZyEKZ+d0O~j;MC)K}si+n7GH0#jgRgGGrU))e^iu?{4zV+xLnT)etyFufY zA>8ZD2#8oH5e+ZUd(phe#xWEDz zX~0_7fXx;TgWLFUo;UU(&Q1-((HkgZk#9+v$PyO1a2fYB`G5*?*2V7Ygz?X#SjIJS zqu#S8HpWEm{OT?9Bkq**98V~?fJ}8ehA+0Y5`6og2kUO5K=(KG(Yzl)+>w3I{Hq95 zejUJ%9IY?cvwFT2%t1)!%JRTet2iB{UaN@POSZ(neGM?^deOsP^{!%rj+}?@- z{SY8hkUJQTA^oY<>F{o#7WAT|_%4uUrVQT!PNm@+qr5jrm9WVg4mc;prN> zKSn&nYMX83cb^4`Z$9C60|TC(U4!OV>Fn~z5gNUA@nWZToauNSN^A>QXbCxsuMxM* zZ3EONnL(+i7R=5-knK2}qj~Fq5OD%!oom$j<#Aws!2!*+uV9OMKM%WHMjVR6@b7X1 zL9^<(v|qcGAYYmYEf%w(y{r|i?Ast^RuJo(tb~+}{aKtobt@V1-s~h|f2Sx&pAZE% zX$KngdKIobZ6X8_8(Xzm#pK6CD4%r!x=hy=(mU=$#f5?J6Ai?SS`KkH9)WyqPjI{H zNnDw|;FD{LOMe)MP1mpE)}bvJRze*ehXJUG1uh->7F*v|@yFsGih@}Zr(N00T(83nfWJYk8qbE@HSzE~bKs#}>OHec< z7&To6yU#Sg0TSwLZ!Fgnd~8-h=r)6XK;@?!>3jw;aP7 zPXEG??s6PGW*=&-Jow<4yBOAwVaZ-YOr;*n-G8GYWtkN(A9a_>3w60~lMc%0ewXU@ z3}v~SaaD-6kmg$i@)m~vb`N-E{NEcW56s|p!B3g)Tk5`kBaRb9!pz7pXl#DX$}iNiM6G(XC^QuF zwBjLc57pB~_riY<=?OK{cR}0StB{^}0kQ%Pfz`lbY^=?O!EcR((AdS$x3Ut0=jTH0 zX@th{5zLE||182-h-ouI2dTI@n&$-N%DtiA9eT|{tCQ@VLDQFLQh(8Ay z3ypuuxpHDLYnXf%cK#U#3Rhyhpfza5HRLKMWJtC5F5t@7e!SLQ3XTawz~5^JE-8C~ ziVfAQ;n00_+8TmO4mP5-0(eOyXNfwG$wx8{>j!EJIA}by*dKt#E-GxKUV3dyGRqzl z1L5bd2t~q zx76{;*R2G{Utu2Qn=HszEWwn~Cj6`+xeK&rfJ1;Y7PZ&l;67P!>6)QfQ85);dgigv z{~n-k*cH}Zz8}Y~=!3I;C*v6_199-Ab1-<KP|$@TFYi&+LWkhkOo$PnP>1OqWh zs|X!!;(6Vl#Jp8fXUBIF$P*7^%f)iU#g^iXKU-kDStJ}yS%3`@mC&0y+Xu-{eJiE_ zG^cg~Z5lA>cOPhLAoul46VdHNGFV-uJ>0X4;Jf1<-vEYUVND)oSPoOK)fvjoTCsT1 zZ}{b6EN;KN2NJXYLZ|FQ&^WLJCJl`PH}~7TV0=DSEz}dlL`&i2QVyzx#zO27Uu-ix zhJLjpKs9xchcfpkSN!`+J)#%!cKm1#zb6Xy|7Q=b=T3pOYYnzp{>F?=bk}MA1#Y66 z&(2N-$1O?Bk#^FS@1u#sz)|~T5YslF&rZ%W6l&;M>-%Ot`mD7S45Q7&yrysHPyHOf zM@5vuEo6@4M=;I3JD4HHz~Kk9gS@d59D+w+(VcnNyJ`T2y?KOba}Y35qwScsBk$?U}BjGI@d8 z_fQa|-rfU_^BA-5{t-fBHo#7cHjJ4>ENHjeTr$6bjXj^lTPV9#zGN|WypKlv3rU!M z;yBF?7J%&aXQnhwR;$u%nUQ4&C|_TP0g+#Ep~75T*2z-zF3m<8yJJwU+|CAtSP0e& zc7nxsV==gu&Ja-*qr&lf1sT<3nTgCPIHPJGc|C5(DUZZ_>$ zNZ}{gU^=T$K2eX2*ZVWelvv_Y+~R3(<|4YqLifTHY}bAcduHj1DU)LOQDP8g+~04;jdjnxY8ET%kiOdVp<4Em8DAy45Xh*#7i^_ zQPjMY`kpls{R`tDAn7S;|HIWmc2?rclMgXzgpSa>>?$sKFv!8%?InmQ0827g>St>J!kfi z`Y(}az3vf27U>JlBmY48DbAeFSqPF&4ZQUv@mkt!Aw2&sbnHmSz`{nfsU+Wh;6+gU zBxXhIXqsu+N*hn_;u$vgK-uMnI#DMJmF5rF9{W&ixqF@!&7Tf(V>K)o6alUO{EK~i z9U^|cwqRbKkM*6t;@YV-Ag?91jA5nv@%l(q=>Jc;U17(4r#@%qPr5>q!wqbTDZv7v z6KsQoT0ev{VuFb*hPKNZmi?c8L%Oi-Ok0a=os@; zy0NaB_db~n`nUb?>DPB?eRwi_CU$7#J~I)+C!j;mBCw5&$0Z-gJvCcP^#An*ZEA?! z__2*`v}h(a*j>uU(&y3n6Tm|!IC#)V40{}oJ0o{Moi)u|Yz9NQ|8;U&-T>zjTcB~@ z2l62NgJu5rpu}?tuZ%DgmY=G_teyQqYsg+{>mYOS8!T-PfPl#|h{x&R@bVFcn%xB(kNw!bY#UZgpMZDSBWMY@ zz$7XYLALWR^r`;{%1h&z%hn4Z4SfdI#6Z{lH&&`t=1cDS zl+^FQHHt`pvkfphT1!;LK2xju^#%L124ay_f^`BitxY?i(?~tRxr-^7^AIRZ))A5p z?IWh}L|lT!Ser+AlG}el%!TWa5z-G%JLrjuAMeyYnZHn$F_{;*-s3HP z&Me|1T zHW&Ot7~+bhKR;70P72|QIpA7u$0TuEc@RK%(}lEDw4-wN)YvJO>~&D zo0?ynqbpkX|38k-JTAud{o{>RZAypeWIGw0kR?=eUr*L>23fKlC%Y_h#uBF_lq8ar zkt~VSSdvIGHTU(TkVvv5g()S8FqWhw`CZ@N|6Wm==eh6e`h4E+To&LV6o9sDBSUtDWgtnLMD>~bYMPqzx5Tp9cS~Lb>v>UwjXsi2YBw_ zU_L9d6|{kV8vOwJ`V9`Kt$odP-S!b@cNyi2?y>59&*|N9P9sfx%u_xO0V$vu>D)%Fzqi|7KsCj-LYUlmb3?V;wZwK7}ij+6&ls7%J{pVd3{SZf|TM+V_}2 z=!=hNlYNanAE_@!Ne80-VahW9eNHxESqLWDJ;nNB12DX`7h1eJh%R%AK|5&@TKr=n z`aO!F_u>}3;gf|&Hm)7w@73Sl1y~h z?12dl#5!dRD{*C0X5N3dY~L?eE!$93*x?Lo z`jL$0<(8t4`)C+-B#M=|-Pa&iE<~@iIxLFG#&Fx~D4#N%8|Lq!`IM!Q)v#QamhqA0)h@vZn~vz4 zx*QC>4`D=Z6qn4=uv-re#J0{yFlB}VN}oisPs=4j*4oWXJFZCMeEuq$u6+dwl_}&y z{YCT9TU@&G2V{v`VR~K*D!v`Xb$yPbHvpVba(JmS6V&}H$`$I78+ZojE7>9+)jO!N1f`4)%D>%PlMp;UbXZ@f&$z?Txi zbhQ~S_4^6lFMsDZHt7kvs|R^uc{RSTdxi0p7cs+r8OoRUou265GzW_FEwEFE1_-qX0d?gASy+!u zkeL6aG5x(a`e*OupGVcB>8yD6_`8MRz2hZlKbC0Te{KM6pUG%$VkKla24iBuA`JR| z6Lo)`VxMj6v2XW>;D2d6*2cJ^*9Lv`PTo$tgAGKRarP`>O(S$KK8wYMoxpo*4N9$Ea{Je}*x@_+VqAND^zCs7<7qZu zx}_Qu^mahXnn0SD#6Xua6G1Zf6UP%?Sl(hK=DnqzdKqyg8ecI@&tEWOT03zQv76)t zTVdsB;z@tlCDW7ll?}fr5yvR4gbBfM*smu&!(ahqh?}sdUI$#)o4TFS^(=E>H5A+W zfU=h{&126%tMz#pEptQJC}J`1h~qb}QdUcSm`Pp^fRx+2aXhg*mtOb*qhE@=#?(dhzZHcT&pWGLAgvJQ{7p}bo2g2k4v7IZ!!rNz32dn+$HR=U^#RKuFPl5d2NTuYWTY z2OK$$wOh7e(AsA5h>>?KHwT_A3c)sF?8fA4z)F1-mv1u?6EsJm;XY+4?0q5aIzVeO zb?B$%y0yGIN11*TY)hw%WJW&XyJ%Q+=qM(Y-GQh(*5s>;fqavGQ23=I54uXfTW|V+ z;>cKDHz)+PHoZW8VLdq>LV3>Td^AWTo`cgZnU{4}ELSR^zEU6Tdyhf8sqt`ci$rjt zd&t9&g{U2%lU<267qg;?WgPfK(=%`fs4B)mepDpN-L&Y?{1cpd)N-ZqP*_>_6^_0+ zL0Ms2Ow0Jemkv{*x<|Imr7WA9m(m{g!9@Pzy`EU=U?8}@tiV#ub!>IN3!D0DDU*AH zr5vAya#fn9=E{DEsCkR=zkVb3LO)GrMk-9-{vH+bZgjtBkyRKN2`et|0lVw_h~IRO zyHB9m$X) z|5bqELpWR4YY#RBsknXb-yw3Rm2j7M;0{iEu<#@8>Zj?@sdEY|%xdO;q!^2(4-a6( zhy~~!d5K3kx1#D~XDnQDn|yV{P`AH{N5mXwn!Y>GMTp|c?PoOa8%{t@$uk(cN`vn8 zI*^a)feYTZVANSH+N_((1H(>$cU}Ttf3JktH=(El#g5m^Q^0}!L9^T=UI!1 z1FFGuVJ75;{syj#+n8Jt&GN1P!>^-^#1?bP4+PTrb~i!En;cF3CI3A zdY~5eYHWVmfzA08zG8MZD2zjpW?)>gA{4`=cd^9dCesfIr+WZ#%HG?eRJ97oeFpx& zO+jf<80CY`Gyio{Az)quos&gZJ-vjhwuU>aHGm0rY{FDV`yh5AjHL#%PVN5!ff+OghZa?4z zvmfn=KDYOf2YV~p&iV_bRpVG|GKbd5L8yK817%&0V18s1-Z=aP<*COsE{$i|f@7rB zMRP&>Y=B1l#vJlKx6=7J2Nc#%@t32OD2N{E)ewL2wLjD!s>|EkUO?7 zO85QAOJ+|1(@pnSVt;=~`*Ie{{xuUl){TVB$#h>KhD`38-ncEM9-M`Ks5i2S!`U@v7OhkO*fR+O{g7)MCP4{Oih@brpU4GK< ze-honN-kpPP%T)xd4PJ+V^$w!B*^#G!l=P9&~~m7gNDxsiCZ17p&fW`HgSz6J;719 z0~OZmWa`JeS?lGgbiew}SL?MCBI>Wg@MmEVNd4DEC;NcS)PFH(h#Tq>=CCkg#!Guv z$~4{cpm0f7W_tEH55HgE&XSsP9q**$1UJsErA zBuT#;($AV})IJezwQ+5{dss9y4WQiq1y`Iia6i^N9mI&J5b*v%zx!9s#pr|%Vq)S= zI62}tcE8&S!@cgHB;_AmoYH_cb^r3nbaJ>ae#Fws9wB}Llm{QvNTVlW;i_I--Z4az zGQJ#q6fJaaY~%mS^OsbU*JZpJ=vxzOGD=U(J+y-f&m}aoq+LEsL!a(WF!>&3nC^-) zNxVNCmF1!T*KR1^u!Lp((}pEhbQV7K8Ey=U1D#zHk7{#;Yxnd8ryc>&X$tS_)*<*zM z!ytdpPON`i4{g-@f{{niCBXxI=38LvvwP6|{SECQ|G>&^-@s){H+E)CI{GwShmF%s z1e*)Jpm@JC7F1h^QZp4FE~gyx%fGpcY6qIm$wK4(dScMdt62NsG<(oyE+)LAJFomc z$m+l10^8fDSgUmNQf-Bdo_|1mKSP?^x>Kh7f~@01Q^D}QL>QeSfRjNyQ>|XlkLnH* zbLW$+%a=y9IeL=^I+mhO!b=P&HW6%VY;aps8pb^Nfb#u1R=ubNMd~vvYd(SW-rwL7 z+Fo#7wFsR*Izdjev7ox^PVTfq+2s8(DV^oCp0gkeCb)PiTd;g+6Gpk{yK7o zLVt8x|2xLf{_JC%iO?EM@8?Og`clH|=NujkUyuuE~T2 zYZCDBFX~7B*%e$`lUXkAfWqdBsM^xXi%MwrnR1w?Z8Sja+D;6Ab{h9y*AttEH?Vm1 zV@!2aVbp`c=>Dh;FH(=yX?751cs>W0_lHnnyOwp@xDzw~s)AVQJ+M^_CpSP($~3*e z()kjxyZblzFpoY1T{KK~NsDg|zK4=!#gsqUph+9Dg_vcVp=9bWcJf^hP!>8s#_J?( zHY3KN%^~Vn&BiF&a~n?BMme?nS%;H+ApFx^@HdL%wINmz@9#zV#9XG5^@4g62N*Qc zTs*UZx~)?UWbun`Gyen0=;VC|s+<1-#ri1Onsp(d{8BDcHtM(}Y7~=5I*xE6bI0q$F3i&NsI>Nw=^E- zMf-;X3eCFCG2r{g0)3p{5cBg7)_5}vipk^Zb+3e{4lod04)2H7e$->y;|gOYnurPf z1nS%939Fk7grNb{b)R<`6ob!c%43$nJh6=OE2-f5*HWmTQ3>vCwUBUoCwh$YL6>E{ zLH45=W*3ZOXBLB#`X*>+_RxgUTqMr+FsL>a zq0O#D<_ibGZ~PgsJWfAj`5JV5oeqYh=7Gn#V~dn z)x$@kJVxbAbGw3}0{yccpw+Dw~2T|YLM9jU@4wGCz!Sv`)xS-fl(EZ&2UFK}Y z;(%9p^yNNuoUPg^e~ee-twm=qGh!YUs1;RRaq0V9Fl#jvyere0&xUe# zmcIgdT)M`m-D@`aqlmh1r+KeXKgc@$R^zd>AF4*2<1yV*A<*nFdfkY@lB)yIC%irJ zoSJARtB*E+4&$-O`B$t|yJGFo^`enDYv~&dx6Z=o>EteS(6U0;g;;1&gq}WC zp#Q!CrQ1`v=MW3(X!pVTXPqe%n1kKtA3(?J2xqQ`qF-D$oHwK#7Yuq1)9i?6)N!aR z00Y@NEm50_wwV)d0L{71b zINF_aMaK$WuHTFWCzRl4q6Vif@epgLqHf#|C<$wasd^TIZ`Cd+X)m(0g|)2k-diSP zA5mSD&9r|~UbfwBp3^4`RW++w_^f!+F2_wla=ZTt|XZhoT4Bj+#lD8aC)DR}r_e^6T!2VhRVrpPJ-QoT(?mo1Sz zvHBKtSzZP5F9B|?*SbS$8O>RKWTSgQ7O}~?b7z{HnJ4{%;^{PZUGB)t57K+a(ppr- zuEpwS{lUAfm6!fOzQY~`VClaLv=3S}qsA+-Meh*?jo(e*w-=mMScp(v3f>6^xOTFV z`O;q3x4bJ@R@A_fCk8^H{&kjFT!gWsN-?dl8tv~XSXrG zdQM}Wa{!tHt6A9Oj-XzZEKB@B?q8J#D!okvQ_83+h91-;R8~UBr%Al#(MOOTv)9O{ zZUlQNavv*KeAe&=_%?d?K7GcWwF=Dp`wH_r^#)ukpF^?yJ2*Yt0~yXYvA)_2bhpj9 z->Pfyu=WS6ccYwhOG-rR$u+1v6pTU9`>`qB3KExn#DtNS;>vebbkdB&qVFRQUhYB8fg9NFl@@W4SC0EnPbrxbnbc?7hl|hvHed%)a<*E`FID`K5fIC!fa@v zT}Z+a>g)P!;5~!((;kkx@DCa_UcX;~Tn~CyI5=W`jvYk$976@|uXUHRpeU5GCYfJB zx_vp57M^Fdp)_;csV_XMp&m84REAI8kG`Lq(P4fZ%GdXTj6p-lh1kl9!&jlI_5=Sl z)l4k&K1)8++vwRX7abIIPR%HQ(E+vG!|xt3x6H6HJrV665G(MhD?|hg#weqI!OL(j zx}W-n(w~Jq%JMn}#QcPnMS6m=^^|O}Y7;t{KV;hLhq9Z{9?fD5g|y9iOq$qUHY+HM zd;ll8-?T^!aA@R{cl`ptxLMGkr$pxzGs-_cL9PD+M4F+1ALsTQ+j7=ik}0w*J6U<$E}9r6;6V64PJ)oE@eNVWun< z{Rj6$uXg%ydvgbIc%~Xum1AgU)X16^J%K=3671ec`>eoN*1h-}=6CYN;GA~i(x*o; zjeMSq8v8=+A8PKQH5RwDS&Q%P84A+r>TE@E7-|dKn18_-)Qfh6(3C4!vaAiYXX>Em zZ>AVO_b@q9TTnWBJbFZ*K}r9iIO^?j&~JW$TF>@qU-5$n>|6*=rN`N(N6irbcrz~2 zsIX+@5mwft7}}glu{Q2LTG~>s+-L#l(sRM|$8+>h&!O2)JO-v-hoHVUDWecEV!*vd z%6$Mjq~69uN>kBs>0>n1e#Tk7IXLGNV__|E98(^{;-Yt$M_H8C;bTzMWhHd_$SRV%D&=&z}ujiPoA(ND!xb)jH&X|#I|?m5vd6*CZP?@^-sfM-(S>+If1Tr z{&Ysn;8{lRSXQ!y7!yXb-}(kkt}YaVez1^YHh#k14miF03l$kmDjHgWOK|9@xvY;uZs2u)@X^^)i4Kzj}Q^{dfKG7m~u$3fkcofvUN!^7gtAESXVHRN7+7;8M zUBez~-8Fmh)8xIFG42jnbe0G~OKc&Z_C4v*pP=^4 zIc_So5Ce~ff%H-ob|SEL6Rn z0mZH4jbCsbs(sVo?3PzhyW1YdzWa%N9TTCxZ#P`@B#yi-l!e;=C*8R%WscFV;4-=w z`;Gjm!#hWVSI0w4X?p~`XMSed)ae>u1IjSlw_@0@(@0x%?(`@5l(QmHJxT*gF%mLo zl;h;BR)Y7}bj<&@8albW1KnuyMo1T<)6G!Cy;Zni!A(?n4Sf>*1G+pRSLFS4 zkg7&7{h?NZyWe$8PBRfR7I5lW=RwKOaz44S3|rH4&?zAUQr68vZGJCkl6JwPo)%)} z%lni&_{HOXJ;nIw8{B*6a^^D5m3uxj6XlmuWH)#%_`HqdM|(BkS>jOLAm(=&a&+wST|(irh&U)bRRlXJyikhU4X5jt3X%6LGkGw z>$UO?ES4<P8eLhx(^fY$RPeAnmIX3hghfO9oSj*g32wR`x6g_&5T)K|+J>J8% znfsw~L@h|K7Vt|XHOJx(IIH9^j9yd$U1)YVmiX?sIX%em0q zY$9eMCeXXi-Bj?Ml8aG387Pk$PdofMtn_jc{`4>s5_|cAcgt=bdNvll*SSGq1?{Py zSMt4Q+l#f{ePAiygC~C;Mr_-P+I}`{>8?Cbzf9t?ce$vUM0s3tm8i)1;;%{J>T{DZ zq>0>j``(~uO%Ifg9VGK6o|0FGOkPq}L7Z?0ZquufDXf}UO?niZ)WkzTRvoMHiN%&D zk8vCQ`wBO_K$o|fm@#1nq?{^*MZGMA#s$=Y@ZQE|-=TYFVJutI=>#e+Tk!K|I|$mD zWd4c&vEf}0hO~ZYgBeOm}JcwUfG*i znlonMO}aDoGIv7LQ65;_>oqEF{l}!XzvHamDeHGI2fEK}gS>l1Jn35+tca%!WiL~l z^UPRi*TF(C|F;HrhZ+iH|5ykSWgUd-zs6H<<0GsyG#6F7e?aDwEbvyfa>;`w*?A)_ zkaH*<&rCQ1su_utcP<2N+BO_p)m<7>$ zGnbvwSc#?6N-%1(9-iD|4E6!f{+EZ9DP$c5%Q*(Zin(_%C4@RR(t}KSE=uD>&l$4> z3*pf@3*isz4np0{Q?%R9;VV|{fRM8rDC6}5QjeEHSY88W+)9VwlsfA74 z^%+{vX|evmQHV~CgkVKiw&qPWj81#V(uSwQtRd0^^K`TfcFx!m(A^GW`J+TD$^yreJOoH(y>)}}&Z{7(3F z*;IIcApt)PPQn2TpTfAC=0f}~2`Gv#f_CCOP1(hr;B0RRHhm^=)ln~I#?6Fxlv@vq z{D4zWwi8l@pNGYb#6r1kgQK^9V6ICmQM;xJR@la1s;vs@%^Felcpog!-v$u|BQ9e477Unu2aKtLtf2Xx_st)a=eUINCD$Qo!)qKprI78usxNp_RfN#qv^zo)M=er%KZjEI9 zGCPQwk0Qa6cEfs_Jv3L%23^}ufK9(a@b8A)Lpq8wvF2P|W+}Y?o9>6w9Tmr#;Ix+VGa&FdU;#kc= zaQxzqAz`y{YnzF!y& zA8-hkT{aTZe@la`>HacMX5!M=8gN`N9@gCphaSW~aqX+)^5QA7P2E02N#R+pZE}a= z*SE3h^f3&AT_{~|0G^YKM0kE0{jEX)E?J76&l9jD{~*kp`U)KMu2Pn?GiXE;Aucr$ ztk2WFU{avWGwBzqAKS3FpcwQ#SdFu8Ax7IplUwhtZ1lM+xPrQpHo4?^DSC^ATZVDv z=~uFC^Dd#!nq=tK$y#U_9RvYu8!@ndE>5_51ZM4MA=be{@b7UBdQRO{Dj0?gUNmUNaLwE0&VLa;^O}4l(8AYbO$c6 z3ZQ9zP~2-O3@+P= zQ7If%NAjWJXC?-A(HEsV4{%EdXOMmf(fECf08p=n%*u}#)b9W~xx9q;rX7TuZx2B` z%!zA~$X^q;P3Dqti`7>A2cw*Sht^MdTg0mdEAgPN=#-Oo5zcxbg5VN;@R6;j?dJ z$lG-0^yV5@J=o7DR2;;)8{a}|LmW2UoPv^B_gJS%G1&0u30QOEBTBwK1JC1g$^X>G zf_=l8ehu9-M#j;u;=U#zpLmvkdcjzyj$$zToptH(5&cg*(#(cwk5&9iyNRh6Lt%KvPcT1{01dJLVlEmB(ybOU_vqIkO)){`ms)82_boVi+{FBX zax9f(fX;|=)4LDJMh9i`7OT$~_V-CtZEQom@rGiMy9Fv8XvX*JBR5TUhB?1Y#xbUP zf>YBglzh6KRWdXcngS&#H4W9szf58Zz8)Kn%FwIq3{0lph%Qz|4%lqeh83bRVu?mt zD+Nisph+_dz?#@L3|{m*@ysqmgH<)=Sq#U76?fnkdE`xdAIG>%Gsyfxt`cL)%CPsc zi1Xt?GyEdxOg}T9Utv)A=rC+|wh+2kBRGtF0E1~?SQ?W@J^N>nU%H0aYG%UR+s{$; zC7uP%*B9L_kATlsO9<@v9_l<(Az^qmxD4LMQ^sb&e!F(UQtFUAyZjQIqy|Fo0xMD3 z&Wd}#`<>f<3nyR5-&~vhj#UnSisq9Mz8WPTdH4)@`$mRLbG0*tgdW*F!8AOg|fG8Ac+1RWbXXM!zEh?{D4k0q z4=d3>^$%EMBoX5c|Ad-VaTtU&pMUfVJt-fj_%9CZzf>^sPJ6+6ZY~yj6W_9Hl`Jdf zt)@Bn6CZcROpJ)Q2sJaCkzlI0Y?YnG@jO8CCHw9A%G&OZBzGA7AW zS?!u?=9Q=e^_fUWD|iLr9TAlY<7M%??qZlW4;&pQK}#k%3c7R@BYsw4{P5m%FFeB3 zf4R!UU6fJm82%~lUc;j-3B41I1awDx%pMIAC>Qy2%&IpnK8oW!+Gy?9|v z4EU|3c~TO&P%K;_|Jh1>mi`MDjp~cu8&nXMM83!O5^$}4hppSMVTo)C`W7F7lH147 z^2luPt=I)aYQDklTh)~LT#8e5N3&BHlHhpWy-uiOyFy152oxI2Pe-ZgTvnkz-L?-zdESB=wo6b zI38<*z!CSLJLS-I@;4YisEXYfn*g;#c5-di8|Hod7q9j50H={LJZ<87@L6Dp?18x; z?@*6k_O-;1C`6Z4H`%4062W28M~EqIg8F+Rxe0zJC;AJpt1W?(KWuQR37sE@*$@M$ zC*_d=(6SC32mXXL_u5bwumEJbV$^G|;-&*8#$EKeYpyJo#dGR zr5SRkQdjNbZ0;?4f@>zmf^;K2Q%n-@QrjMwL)m|Ai%t`N%MP4^Bly_#bhN$Y2DNq) z;6e6FV2q#`@$lOh)#xx^(ro1+mlTm5l%al`zS{6XN(Fw*AGeR2W)Hd{d1 ze+Bqhq#o+EKyY5Q7lIG;K;5blC`s?nmAEV*<{{2v9dIw`~)17q4CR~1q{1H3flS6W_#)GGU zW^d%^oxf)`^CME-r`?+=?Ri;A7$!UCOEq50`xx}%PmLdQ09Iecnw|!+I!R3 zCYuYG3&zm&dM;EBp)O5tBhi3n`z|9*FtJN5Xw2fl@{0<}Udr)M9p$yIJO;JWj1M<0 zgpBsxLGjrSbrbJGmc<7**Vpq=`gE?WP23F*&uTH_S$ol0Yl!0qQ9saS6Sw+HBBZ^^ zh0ez$;?cUt=#zAX#hUDbw5#V(8EVS6`5eIdt7qWNTP^xc3c|#8Tfjkf1f&kuaJht- za08cbA|@3NB@&R{}TCVE-?2~NYc zXy4~O_T1E7I8L6F_Zu%l!wzy;?If4+f0SP}+QXE+OZhb7@tJqt3E@rg`0!pC=oC@B z%+FlR+x(c-u73^wR}1+AyWObDn1-f@AM?gqL(wB`Dtb>G2$9jLpjtbE^7u3#o^=U) z?)!tE>ol}^xdWrVwFlMF4SZ|>@kVE>nX=W>EqHi8Hs`yssQ;}9Y>3xuef|+X>Te); z_q1i+yDqYj0+GerEx`%LiL<}>1g@})g#`!Rz@p9u*g05V{1t8`ST?@|uZNVu9`+Yh zMYe~KrBCT>c^gYkU4r4^SK;&SGCUI$juW0-2G>y*!kRuR2svB=%3)O;zwJO@#Rb$l zF9P#;HO5^tz)sWyEU9B4T~UM1_anhCY%3%_oDQRfa@h3p3&alAf*-L1>MBy9{0{}H zrl=vb?@LgxlwiznW$67eg;}Qm4WGSULj0tWP&ts^SCvmeZOQoq@1xkbv>qzc8$tC) zB~v^8Z)T^*Y7hMe)!hnEK4y@cw|Oe=Zf(S}QQ4^S&~YD~n&*(OOX2dDOzY7T(uZC{ z?UQbXl|jvLqFn+TGz`Gk7a|FmgYXJ=_Dc0#9M5 zIrSiKZy*FZyn|jH??QUlYoNZ;4qtj2iDkymz_-mAGp_YP*{=d}9Atpg##$6=h)+P= z6X!)c5ylah@kA<9CihcsbH55vH`<_a4BgS=OvTa%Sx~+CFU-4kg(>B7@IF+?Z5G%w z#l%It-`onyuZSR-`!+jo&nKSubR%?IY9c5)e1Nvu#On^e50rTjfK#o)D+wP{>-Atvc!)apUUi&EXTR?N5ZN=H%zjlMPBkj#@ zRG`D=Yxw?Y9$cSkASl0OFjM<~K@%?$lQokk?EMAt=S@VX@(XCUmKlU%g$*SwqX;7 zxqqd1$q7v^<=lcA{)Fi}E}(N(9cV*Nu*_F&P;&AY>W4OANNFQ(yxC4z@v*(|dD#QV zPz=C`y0ricO|e98Gj{6t4m_LAg2!q`#pz`M<7?pM$+`rtz zAj(qLPW{LWucq)8o}~2dBQjTj=BAR2Y15;l>NMhg^Mgj_1$4I z`{zEmi`WtJp`LE0ce7dSsN<07Y9VSbs)_Ue8nq{ObH6Wz5b{>W^+tuN9jY#ax?Q0r zvh@;OFc zL{G@oT0+)?R$0Q-pFo!?a5}vKn+}*kf>jL0&i@1jf89paz0+JOzpqhBufgbdZz1S0 zF}coB-~L4vle`)L@&6qIj}Dfob==OpzxCp2>DhFjIfOCM86dGr%TA~>6}=Z0v#7>j zsLLIRSszzxoTz)~?V;jfXSTwEVJ+mEn!!T{{3New8H-wc418*y@@A(M=yB|KY^2`P zQpqdOPd68gqluw+Wh*L==b&WKBiHmm>MQxUz_g<$Ah7ojaOm2Aw&Wt(I#ePC9dtv( zou9$^@f>s?@DOX=E_3w*$_GdSnR5DX%y!7|JgAUF7~(Y8y?e!uzwu?g(pp?4WJT>1iy zb7RqR+gX%8c!N%RzcPhcX_jxtVc`8|5i9As1ATtoW9n<}8u=*z)!Bd1w8o9x?z6#C zxtOvHzq6(99)s3xHr6{gqm+prE*O-Dlk@vP3i&d)(gxusXV!-)wX}iv>kh&PP>MIa+ z^cmR7?m*-8{rmgCVa{<{(|UrmfZWrq$H3dh4Wgp+A#dg&@F%a` z=qW9%C*A3u3?IG6`TtRisBZ&P?g9EzJ`TXlmo-|Qklx=J$^vawGv69!YEvFc6myj3e zMHQ-#8bJx(VFlzZew7QdyPq6BzivW!GRMI_0ROwD~gDM_TVB$1JlEXk0hB>7#xKfo(Z z^W4vMU!TwC{TZtx*1ebr+jp1??rxN2PIt%DXGO)ojpFG zCmj3r8D;)a@_;OJ!DMPXngl+kUEV;tcTeEfiw)5?F_ve#u49RMUBtRyrJ%NL;6Q^D0`(VsMbx#h?olA@U8|`5j}wJ z2tt8lDehUW#YvCT(dy?Dbj>jkY|AK@t2+;Za%(^_cnXQutrg`JME_ zhqu(V@wm26T$W`hPkJ&MudZmtT-ODtnYIT#<`#j;jBe<8*bP5rQl9)m944tuM9iB8 zA;bSbY@pA*c#-;8SAUSn2eQB105o31L6c(9$BbC8BN-~HMyu!dkB9P8yCBZ_ z5ZWk+6_d4}pRv(`?O+uS=-iR~F?5%7`2xLdiXrN+{usCF3#%C4NjTH_GtQrt02Uqg z5IZXb233b(?T??VK~jSw^oy{= zq(T`SlKjA}W=4Xh<2as~)65kAMse$<|ADdJSy*ka!s}NI1SeYqXgsANz1l&o{M!Tb zX1s(sx98!GWt~KseY_2Pd>lX#DhCNyDG zK7;nl7qIWUlbBP!kNmCrqEcaizGL<1-t-8PuB9^Rm>sO~Llt<%8i=Rj-{C?P^tMj`;!qLg1GpSlF|Z;N{-T>+TW@~aDF;xs z%aT8&U06_M1GZdW1Ci5{p|Uv%lj;MxR{jp(clv^-`i5i3hzM-+-U{Wu^Kq1`nV9~^ zcF3*DWEQuqKFeQ#ud+f9E6dXLf5 z%Te+)RqfdF7?ZCwLFj_}Oqw3c^!16smSlq?yFI}C{x30iM;j0BPx~ghE6j-$pqB&j zgj@txy7?Oq?LHQpe?Mcji?^d}!Go+ehiecJa*F&predrKWdZ&RXJhk9Q1SXY&%E-D zFFBV={?es_WM7kLgw!Np_%@pPpxB`nj zb;PI=GwS5p43@`E;)o{Vz^)?(j~iv(W9~BlBT?K_ZxDE{od9zR-G~dloqc`LNwB@Q z9}-F(F!$PY^fWz&L4RL@iE+lFL+5C8ds5He|6?FJ^f^TS|GN;e?i%xQ-;GU{dogC@ zMGU@(^!&^Nqe-2Ff>QFYT};G^1J`j$g^6H1{y5f}_rdo8uhC7C2X;$yz~Y3!6GneP zs~o@sK}Mq0p6lS2oW(VNt_634BdD|b1lsGhGyi5!Zr$)3R?ofx%2vwGm`vt{8(WDN zpk*t*N`;-|+ekS!7=k7@K^H%%n7idAFPeS%Z>!qDw-dqoT&zcLGiB~{Yw4S%9mQoj;F|L@~ zL6n8H%HKOlg~_izgWZ%4VuaOl4C;0h9fl9V`J>7)>hTVcJB5K#ILiCo91X?~KSK59 zFZd`ko4CV6ab#>e_&zJaB)50yv)l>iJEcNhKqJWB9%L6&rNa7$Z4lle6{|bE#k#EN zIQ1JbaL=T2-FI&>BeD@nKhjKAnJib1@ZuSV4FylP?wIt@0A~f83Fec2pp~Nw_Vx^; zE~pk3K$(!k?p*}?oqfR~g7Wz#{?vDMh`GIa%R+Sfuz)VzLDJ^~WUQsTXyhmsv~V>@ z+p0kob_)yrh?|@r0A3i$B3~4f4-0ui9(8A*`^?(j&jUCs!x`K}FeyI@yN~TfpKZmk z#7<9)vg(d@O5*jec!jU7n+mEW|Krsswqwoj&O%fO^(0RFEl-c%MtQ+Y)DMl+Q#lSb zNAJ2UlpY6({u*4q@II`cm5!;A)G1=q9VKV~Lruy5vMQC&F}vmz^`2F_#1|g{|1(2a zRo`By+z>1;lMo}%s4w?hGzOg~zecCm4647}C7*;Fmg`VAZ1!ice}L4bz8bathw{Rw z30Q9zhjA6ZSi|D4FujKM%w@%Fa;=$oYE&*{-ueze#a5eMv;!S#gT_8)6u(sGyi9W zp;%kAlRm>VE*VwDEldKjA^kcmnXm`65tLs)L^%O>Z{FBX0WsOpxbC5ufEjmD);64n zT-<^E64l^;ZMQt3;5)oHrXzUnUx|`ug>s9lG9255nAktbds1YJivL1LOr1|0Z3d^&Ey==*Y+JdVTZu?2H_^g*|0pTI9hg{5Bl829To#+-D-SwbEZT*=0!3Mw2-WGG77 zZ?YwUXVLeYfKJP-Xy@2Y-V@698qjQX(@34dUH2{PN&nn zuv7&QEMTQIAQ9#Pz)NfH_A8VVv$-^nKG&kR{DzD{8+Gqnze};YZYd zH}9Zw-zT(>+K!`x3`C!G8PqdQ`;t-(mui>7`kZ#OyxI)bt~*%J*|VUqeaIfI$%m5Q z8d$W3=9--cd61Y08)?4QS}tNCF}CP*egqtPcn|%LRuPlv3bD&_pwFM3g*44(d}FRJ z_y&4IW5*#-d9DGpXXyV?X#}OYB9n}`nH5OA5sCD!bG)?+Jy*E{Y}6O*2duz}$MnRu zNaE?z`zU{E7$z;e3%(wsu{Y)60$vzn1@Rp$HWWZmrajy&>?mdzDKY6)EjBEzMVrKE zm}4D{lD{5-=lX#tNz7F@GfS9I&_N8JNnX`N>YNET#Foi#xWaz|c@GxJ{oPmcbra3S zG-e=F4&MP0|K10@-hkSE04EjHvlnzpKAku@{(FbR?z?K5#f?$7;w-!$uff8rrlQTE z2=FiSr8)Mo0ZZ9Pr>iIi(-AC?B5-iqc5# zZDlC7ZTc7eM^GPfdnGT8?2U>#Q+a9gTbAp#9{SGgg)Q)1==sVO0 zIZd5D@&MYDBx3kW$`L=^4VM4a!n4~ypyZE6$Tz)>cEe1Cn=kal2*VFNSale8P^Kkn z_Hgvu<4=6pBJ%ki!I-CMI5Mgc>UCd(MejKnvg!a&`{hPnaU)b+)fY{2C4%Ay@kTG) zcIo|zI#8w;!osPo7?zTVRwp=hqeh_gL>KVZH^S8m3y3N43}a06Md#{m5O?$rrr{Ab zsP9$C=wFUDvByDmA=;(z;Tn`|a>T3j{+ZOA4M{`yLv5KZv8VhY^6E)US-cAGn(2s{ z2LG~*uE#*@{5LB*Sd69r9^|`SBQdJ!2Be(Y1l^*SA?_TGv2*XB#{Vez{_z==@=9nt za|%6o`asEC$}9ixD0mJhZNBSOx$csBC~*={C(DFrZRk60&I!7aNhnn z6#0AxKg(9|uAn?h>R2{OO?khs=V4QGN731>k*9oc1!YkOm+8$XeiJRcp7G|E(|q-{omjp97U(h3IYLZ$!h|aHTiJxNsSlaT?GW$%8WwZH1i}AdW2~ADm5O1d=q*+rZ{MTb3 zzgP{KS)X~*VjB$oMGNGI&Cs_f5mrBV1iGKU%dtT3W|NvEGIG_wShZ^uK@-)~VmJe7Hu&O_P#V!7tV=NsyO zX(xU37DR;n;+88YNAlSkq%GN)Pg+Rjv3yKu%SW5-l(YTAMD$vl2odsoEPd4ww4M}? zp(WEX%rpWc7bk;ESf=iA(OeAZwGP8PkAiD8<*WZ)k8W4KkVjxY_RZH7jen9}w!fHq z3rDdEWdpu@7m8;BVo}FERIPPAL%i}sm{vnvmXqx)M|%JUTp+IKX(OSGxYQ~0R$z+5 zIt=(03aZ;fQC6cXZ+>0DEQxvYkFl}n*eMF%pD-8ZTCD)Z$r!#`R)9vobwzv9&5AD3 zdwt|BULTkQvZ0T7{Dw$$3@ib!?OG_vI1F~#9mOSyaZnOx0*b}w)s3@j(eu+(7;%F5 z>&YpoNFJ?DH=Tw4Q+(wU);6MY;0GKYq%W8p(L(c-IpDmyg=5fnaO|@SyrMRNd;4pM zvZa2z`k!1DzKVSOg^-umS@insjzuSrpz`x#4qJc0@^KMR_i`-cK56HIxQqW4R2Ja%or) zT%Sff)ALmroOB;nR2zx4U+;mnH|-Sdr7*V1KrD{b7k$Z-qFuR+t9D=H$uoXH#PI!W z(4f7PeKh2-OZY{V7;d5L;Gj1u(v0G_-vqTol9=mDJ$R_AFI2m=p__3I>HIa|P;Q9v zIsGUvdj^t@E#a~C&v5I=7}99;1zX8lRBg;>vbjIiwL@O=(7O%Pxv`g*^!y1*ncLKjI8Hc+Uj~;;{MtFCWX!-2%;}WlU!tvCdojK*U-dkSBgY z_rjIbryYUTcDvBsl|lHbC{(U|EccsEJEw6wiG@0w7;q8jt)9v!T_;^Ar2?bc&fs2(!L>M7+lPg5b0Jd^hMZm|CNJ>v6(vGo6Pz;{y+^(s%q$fLy& z*)blR(qo`-og+N+{fN%LiP@}u&n#TKW8S?B(71|xJbk0VN&SHNx~PCk(Lptc*p9#F zVcz9xEO|*gjNPwsN_NN~)1Y0waPJ4O zdl*Z4fFO5Rd=O>-G%$m3bY?fH?6a6vRZLy|@O+N6^Q!Cr5vvjn^>c|;4LF0CpZ&}&SoF+fSxNkNv zw$m}}R_Tp;mSvdzilf2hN6^-B28OP*gXT37vBPnFu^*$(x`D^38!L@z$-d=q+)(s< zw+5n0OK|tMb7(*M4R0~r0!jZlvD~E&s8qIrRRZmosoy$t(;M_Y7{?DiDT7(3FG3|x zq@H+*5One{tlK;bQ?H2VKj$6BT$liTKgZ#r9%a~fl@imbze3hSMS0Kz+@>@iB7dKP zn0qd`kR?)9p7if`Gu5#cpI~9xCA2b`02#D9O#S#0;`L6W@|ruipX!eO!P6+mJr2Cb z|KyVWBDl6$p+n%GP!UGm9#zkH{<>QjNz9=|NugkoF`qd_k3p}Q>&eTMC|9)4R_D%Y zaV2E0`v|KITgbn#6D%zKS?I(6*aq?!J?uj~xWp*X z8BgD-VTnA*F(0*G?xU~KH&$hnfEq{Y1^AwVW7Cd7+Mo~?Uuy}K>3G9 z)T5dNQ>k;%;_7Gc=~f7SUkkA=%@&^R{E2#}df=&T1ptc}v~GEfnaj3usgWhNDBg3; z|9o8h7w4j4akN@HGZ@+$sQYQ>cBbj@Q|{H9-mQl<5SVTsK-bRBCXEKE$p zQo{!5=dHkuvxzu2a3w_Eszd*rY-UNBo%kVVFru7h!LdvEoOkCSscQo0Ubv6*mlUF6 z#`P@OJ41Emrr+4(>2>l*nlhRGFedAc*m|mi=)F_TW&cZ(zwz(oqFquo)EAKFKkG5m3|Ju#rtFTzn-R>GI#XPC>Y+~aUQiBe zVX7U`YWI&9}nsAtzqyI|V)wueHAUkdsLUFMCmhhf}ZGR7g)K0GYuh(gNRu;>;tp_Wm5! zR`&vX?|K4J6YrtMFE-2Dz>Q}T4>j26Ff^^Yj#{TOURUi6R$cpGueoMIhUZ<>4DZDi z_HOFtJ2}ih?33C*rJ4smdBk(-PIHT02He&;hPdkX@UoA&San@bC_T`Mexr_|PWw)| zEN2*3&TCfJzube$v1OE7Pvw@xnF$>FoJ|NQ1%K5`P+2{bFQmCnhkATMDlE9;PJQvE z)=(JOw}U9TR|OjH`RY^AyU=u9N3n5F7n}?_!cHFpvG#2ouekIaZz^baU)6}E)_1vI z@4q3vX9_AekK%^-8FF_I#R)k_!Rc%k#y=)b|Ak=Y_w@uepPC9OFLi_wmz1cO>5sF1 zq+rhQqukOj1R5jE#k@&0YyGE&O=o^U^WI`qdN0e8c=^I`f0{iOFDCZ$T+HjfADnE} zJVM*VtNt`XpV$_dBay&-lXS4{;)cBkK7_T#)DPxUgjBi#z7My9@#0UoY20JT`y{o>0el`mxb9SN=O5` zM|_C}_1ODpH~A_ao@F3P9u9^1%`Z?k_%-@A=)zX|zWRSEXQ^eng70i|Ayjue=2)0%!2F}XQ42kSXx1}&r%wQJ@%x5b2#n!|Mfs+$z`?g z8eKuL`ERwj+FVF8yaXQipF^fc8=JU-w4N=)aa&C{7!jj{^gda%6DJ+{;wY{d zxJfPxEMgkxA;h^aR%;SX#aZtzU}dDKposaVZZs)^md1KWs!8XSV`5>^UDDsu)Tpf* z$Fv*unaAJsImGnDw7sv0V@>bX+7g7q8{qC=2fn5WEEgzWHL?m8ZlUMt`f<<>eF4@* zb*M_3&*U$f(d217UJbbchYr8PT=5#}TEvkib>G7!Pb$%O*-y5uT}Qz2gQ@GU01S3K1BbFoDW{$hrX)yMR#3r-1*nB4(-3?ZPtGN}p-)w}lmO7#ZWv_i~ogw~D+E-tj6k-hntdWB%Y4&FNdKoVN^%q~lOCSK`T04Jvd;uHd7%ua(p~+a zCwQH;##x8&fOp5o*t#bcgDxD#G-A#<<=Er8<)<;?#YQ}TE*)#9E{4Laf3T{6X3u67 zrtGT1M;Q-LyYwqBJa~qc90?{Cu?;k(=Ae7$yTliaA?7J%Z_b=zHs>y)=M1`|$eU4; zU8440JO;A6Qa57n9?bjoGaO``MP4`ni{UYBWXES{__hUPOHay`3sJ54b&UrF7ov?r z1`21*1cmid?AOZ?11lXsXQ51OOxcRu{9p+8uLSQbd)TeM3zPRzz9Gg2ei19Bxo3AY zD4@*1kb3l4w-MX!cMu)Q4WLeF!&<`>$aox!Ziknn#PxYr)T3IkSa6Z8IQ$i4<@x|V zWAKGgcmQ8Q@~1nb{M#cwWm@aTX6uXKbh16x_kyPtHP{|^FP^U>|aORjCy zVKozTV7}{X%)GUUCFz-p*<(IIW6D+x3{Js-LBr5N^9CEge?_nUqadxtgFK*dEKYs{ zX1P#@L(~rtrbxx`&3(wTa*=IVt1G%4pq=&d$B^4;IW|W=U}4p?%Z~bkD_3ZyDUgUQOsENQPcz!8cUo zmDmDK!)&>X_R0QNri1&NGAwtdpC9>}*(|$29_tvW*?JHZ!xu5lt}a=6hnmoP#9Thz z@H4dfUcxQIgRz>j%gS&1;@nk8<*WEYpRYYCXZjG#5>?b;TOWeMrpg zUA!ko;GW&>(5Ki~ETlO$Q1=*9Cg{+9<|&W3`H+7aM_zd9Y}@kw1uHU21^ZkjIO$!2 z!l)*&j`+bdd%beLF~LN%tGfk--QMt+X$OeCe24dSI}V!I4m{nr0=?(`@q>Bqhq_{ii~3@UHUSn^T|s}_QeLg1Ua84@Jw zS7jWox1bzOO{z<1Y5+(tn4quiel*LALI?Rs+-xQhtc+)Y#YYEjbN>|<&V0$|)BazU z^8k_p_7Kw{847o$bEU?b*ID(2kUyhYdzpz?+dq@Zm*|Vx+4a>4JeS1!%I)72E&L#P{85 zFyz5m@Y*;AV-Aj?KGztUbLbqB(s$;=6sT>8Wv(eELdwstxTr&4s8^7VckUyWPJM(8 z`Zqu|x)+mnO5pCL9>8)t3GwOVQ>n7&IySL#$2SoWe1&q@)i(iG=HapH+d$f*211w9 z-RmUrY?D&h@W-ZNY3(X9qUyUTjEFo3(!Jp!w&wJV~+{2UW*|xQCz&1s+g}XTuD`%TT?>T^&!hLYIovjY`aR2@Fx@?`p{#!nd5dRcrBLURT~IQV zNQPsALoPNyu!rd*o?^xW(nGDyP<6>~jV3V0M@i5PA z+={EiFF@Xr2H3KB8)(kkF+==LT#->u*6cX<^PdWO8N?%Ls7CAQ2T;)=GjoR-CdF7J;t1*ZYiR}{-C)dbW^X-?^tW};YI6pT-KI{fJI}B&`3Fd| z`hs)gFML!>Oq8#qurWUXd+$}E%BQ3J?rtMtPTCQ;+D19SUfHnRF#-oRP*-L_HeX#+ z2i`PeT;;dmFGCa2`NcZsFyT77x==>irL&;j-^2o?vr#gQ@|D&b)y<{pJoj}uxIHIs zzwSut59lQJ{^K)PWs?tQZYkZp8^CGJKpwDRDMU^<21@Bjb-;m+!h6!5G#{t(jNBVw z*Yz>9-JS-jeHYZ)M<007tQE|6(`)9vDFe)qDb<|u4bR3Ln@2Q(D znb;Ch4aAC|jP21;)QgqQ)J3Vd;;fmNNf{o^-Y9j5W)}n&oPhJMG9jIEomVEELv6%( zRO;n|W0VN(+x5iQh?g*+jpm)da(T+!Nm%|s0Ig#&S_Jopw)^zVrw}8`P)C%_8j_R-H*r1n??!0Wp!JB^ zlwUGpiXVuL`lN~O?F1KrDo@7hzao>=Kg1J5#V(n&RNE`QX**VgGi`ViD|FdYVu!Z0`cVXnbuTb&+4)uv9JKtQPCkFlf z6jbj|s+SKr1`?~6Tq)Xc`Jy7Qb@u|#zdD0NXEaEDZ$#VF-+1hhRFEz_&SU?0iL$FP z=sK77j=k2fT?=%?v1-!!v-S~hQh`?&Re`Z<0qVV+hJ}vndBA{U@Q!kpZmM9^J8gjm z-iIOj=2N)tX(pud6Oj9Q30jG{7^m(mwvI@E@`ER^_kbtZtiOxP_V35_Gjp)p^jGNp zHUkUFA|Y)t-C>6Rk7d5w4B=}U@qg!}Vu*7Jmqb)zpjZTIOVYe%U&TgG1G+n-+Slze zdP}_lsF%JQZy|l?m&>UoyU{(lBX!>BaN7a2`&jV=GLB}Wx2~R`eEJeY9x^>T?IsPnHeYz(;5X+wSl^7}xyko1Cg=bJ&9NYU-e=5q)1ZI9&+??`_54(~28?i>g}Jt`LGkQ6 zT8!+EZ~90@j|X?LPMm|AMiHOKsF^iOo?`vVBnU|AkCt{f(P>;7Ykf}~i?h!lyD|rY zOg>@G@{Ta~?k7w)P+(MtHc&BQsgJ-ww2soU+FFk42gZW;He#vme*zXMJyEjZwt7>~7EFHe z1}zRfK~??;b@kl0xINNTEZ07RpMUBIm+j3&naxwIICmQS4A+2v6ZNwF)i9U!0;-087xYLFNoIRWx6&>m~FHwM?oMdbPH(sQE3><7OHtA}_0*G{nsq_+6dkVZ>$yk(d9-U{IL# z=HH5>!txDmvu7{G&kBa6=(lc zFMl45$7=rrf7^$!&TTh!WWSM1cmGDGf_YFku?N(Bzd#uZBT?R?7%Q_vpu8>*f<7=v zGb>}YZI4;whFvJ9U6f{H3Nba#V*ced801X4cCL}2@UsI|hN+9i!eXYqbA&s0t;cMW zpERqNa?d7L2rt(colX@nYsr7a?b`~gjSR&+gJYIogjeRXK z<7Pa%^}2)aeo^0A7pYhps$o_kL$Un{Wn5n7z&F26V#T`?s5+I%y{uwb{AThCNKT?6 zye}wUgn;~{M99hNh)x!rc=c)Oe3YkvUMBI34EHe2`b+ZEL1w~^2Fg00FyiiyJ)m++ zCdm4PqsRDSR8g)fY5Qp&cdH6Djr-KH--n^rA(?5v_ePr&t+409FYr%jW`(?&hiwU^ zE`_^i>9A{HI0Xk9Roq(Utw!yGE{6hfF@cEmehAai`5QTb=e5Z zG?WY7UcyxOESRQZu-tw^HNIce3hu>|@$`I&@S1!%o`cq7lJpU#o%NyfBMcl+54Jp~ zJoDpm;2N?I;sE)TE=RCVdYWdYG&mpj1jAk#3Mn1z!OpA}ljd%O-S2YIewqhHSbxJs z>G8xjwT2)oQ+%<9^tVIxtT`-^S)=D8$NwrKK408-u{E3`{Tl`zM} zcjsu7E+xHx$$oz2B;`f&Bv`ul2v4ri5j1IUvLvw?EU>Krj{OKGztCX}$=nGF(`Q(_ z_y^b?qud(K>u$XhIvD;<5tSJ zYD*$8(|7?_tv5vF1SKnII|0oQ4oS-nW5bmOaNKJsxSQX?aESx@rF}pL@(;IZhhbG2?bS>PdQn@-g)= zO!|SF{=9-gi=%LE3NdISuJg%u213k@n;2hCe!-#cc&xYv0%s0Gx50PWBs&q3{^DGF z>ps{fAI9`;_26`UKd5d6Dr! z*iujf+QMY8T73ni=Cr@4+=V@K4q+kR!W(CfhC$;ia8GI@Oo|Etx1=dJ?eS^Ixb+cK z{ukwG_rrO4k_V<}J+Rr8Jo@b>LS-5;By$Wvb?-*jG1YR2$vX?_b#|zjRwGaTR!jYK zD<~_z8Izt#1;wxe=I4GH4^cKpUXzQ}4%rA7_M***0?JBWU=ke{Vl8>8C7U$r4fFKH z{BB)D+i~MCY;y@j9j_sF=S5I-w`Z$rJBpIKujS!`-eTaHBMi6|B)o8j;3YfJ{@W8) z(D@;Ft@T0s+#OuAbR;S?9;|MzvAA&XUVOB-9hGs#09^@UR4kMqZ za6d#UB;A?7{o@d9SGqym_!P7;I}hirUtr*2^7q^{fwW&SEJ#HjxbOz9m*EFdjZ2}F z`X5#=YXXajtFZ_9eg#1LS`n6~MPc~(m2fpW4YdE`&=N;$e%ouo zM58B|Y`q8Z-}NDS7{}FZx!N1fUtuGf?pPKnD_36 z!pnUqi$?oh$5@of{$YyM&T@c=c8#&f0he8sdIJery!J zS!5!V|Jg}w8*(1&Bwp~PeXzWB$OSXmjfhbhhs##I;=H z+5L8bw$emQ4N)TO?<9nh_sREp26&a|2(rb!`KC0wr#)*%zk5319kqg64cQ5L-gb~d z+2GoDy^$|=k z8HQ==j1UI>zzWjAhq@UEwN5o4algrJ$5_FHTgLa!eagQd@4%`P+ z@sH%H)&#ZpZ8iGV219Uveet|;ImArOh4b>G5b3r9f(qt8a_kc<2xa(TP}5YtwXQA{n2Ee5xNE@ z!n;wi;J?KJOZTO7o%3hac~gpT?(nS;u~!Ys!$CZ7UlG`EOThH2qoHrX5VQ#EPF)qH z=wv${3m*(2Rz+XxUGD+5`B$O$Sv3UPUB_dEAK`p4-L=~avE&P}H3uC5#n$XB58()A zPJYV*bJgIr!~%^TQm!m>2#=c{#(bNPK!WloNM}q%sdo;uRt!(HnTwDUGb;G!9eTjNN z1o?-F!8<2UA3b<9q}Bg|zo?(H)bcE|?%2w8-kCC|^VGe!xd{5XeL~|UI)b~04@%?m zn8u2DE74ovBXQG`CJy7?^H0Jg*I3l&yEypMZ~T7^%Ky?^ zsNc~9lU^RgNu^87N@C$*R1E&Hq@%Fu#Z%HPwr8z= zum@Ad{)e%zs5|-B4K+Nc9NDscpxkmdt1Ye^WIJosMTb71U%4Zw=iEiJqVwn%xgM%= z46w=dA*L)C568|_L0mp%eojhwZ)p}B^o;}Pxg5ePlD4dbV`i2wd zz%&vxZ)drb&omR+4>&kT)H|i*N`*4f-&G-S+#6+5bPC(`JQ?Rf} z1m#3`9{tBt2&{EO*_`8Y{}lt7|Di~EOZRpbM)$YcC-)(@Bk6!EU7&5`J}kX@AG2$T zgS*-sJo`0aOz|}0a$knzbS)JAm;|1Oe?ZcOYwYj-^e!0vn!8nI(m8P(J>Fh}%o)Ex z150^@m6rTB1z^4S9_^80cuDCD{OnBJ$Pa@uXI-QDF02W{b{~Zl#X;EpPaztf|A;CD zz5C3k^PP`7h#!d;;cuP-%D(P+#6%*V8Ao?H^<4BSJIuzcFcp#p_ye?sV$rNKATuSt zleCou+*jbtWMcuS&!Ax>&BNV{1Z)4vxTYI1Z>JbBN&BC2?}jH#`nv;KSmZMkF$v1f z$AfLoBW#{<73a4XVP4{8cy#9{w(Zpya*4I8J^dLnyT5{z;&{@DUSn?D67XOBj;EcT zfnNqt_v+U{-jdY0p(ChGcJzXZM60jB9p z9@kKszqe)LLNi@);43rXl*1J?>0>N}TK>msC#JF5*5Q!y;SIbCx&i2L9n(g6LEOPC zNSoQrt81yJF+~9BUh=(9y9~`Mo?`C;x_=BM4U4o&*_!9fPHrwL7o3Bw#l}L)FB|l( zGRLj!$cG+~!`*wzvH7plm{jD&r8y_jRfl#=&hwbeV6!}DaVV2T>IkJ9wV<+e%CgM6 zgwr?tLW>ELSmxTT%xC8|Fd1h6sb#xx>nGxB&)b8MiQlmKbU5GN-9+?={z;zb*_av- zkDH3`ga3eUkVnr;MB8sno7V+1D$ildhHorT#~vl2?tJ;ubFd=v1EwE#K;NUSFsTDE zZRfUQk8U^6-~W`_U%8Xn*IR(DZ635uUWD>(FF>2{i#I!(V!!6u5V{6&LL=$SS`}M; zWH%J;eTmAm+ab^B7o;7nVHsOGiiIn_aOe4(So-Uq-~s=C279qHG&gQ-puO3Mm9Va> zxtMESkA-JK!2W;ydzOO{|H>iT3 z@rLMNT|>+v%#!9RK<|D}$o%uC+G_*#Cs-ZAiWhm1y>%aGm3gFL*+JVR%K4~Nq=Q;v zT+?>A+E|Gm8ONaJ4(+o}zd+lQcSv&^1TCv6Yo1rcrPqhS=LPzL-;C#&zC9lnp8bN} z7N$IDrVcpHxC=3Q4?(i!e_0Aejf-T2R&M1p9Qqj($8nyL=XS}%ygi?xL?)o_!^2oR z)*jX0vvBDJUD4aKJBHp4<|%jWAope)Y%equXYVGx^Y;h!$gY&9-(Cl`zb1mhgE%EK zpM&GhYS>iz3}hV)nBrLnxlI~%M_i2~o_sh92_lbKR4bZXUIgA>Td`f0j%WJ3#`#-` zC+D-5$yQe|rwh0Fl7;19(cg;8In>q_Ckxao8jFEPqj0(AE}lB_6>E1BKey*H z@bh?px??^c_VbI-{I6UGHcrv{E5Fc76*4>RSH1HANk9r(Fy z07d^09x(J3y8Rmnww+7q&e9!@X-KbG?Ts0~e_{T-=b%-Q_h*hVQ@u{+C7<`8&e=h1 zxpg{3W|Ou)<3F}+kAbj0>pA@1Lwp$OHmmic33Q(f+z4FLKx5D` zInCNq6(Bpa5OqBI%TG;wk1G~xK#@f~OHrZh%4^bP_nC=Bl^${X&V(BIvt69@P`xK&eR_);1f6MKi8ZwlphC zZ{1t8yLba4hgD)}@lEi%*NkoCTl>1kNFIq zzh~m=#uU_#?kp^WyCH8VUiu1TdCR zE^GBOX19GG%(?skEy_dq{BB3Ub#r&fS+k!v#*Be-+R5Y|Rk3xO_rZN*iO_F0b*p?= zu`lDu2Qt1GR*SFDMxe9oFzHKw(cZM?m)dDCvGrARAm^Qtm^N)Mug}XP|4uO4uDlQ3 z9xlS5pSu7z66^fmTOiBolvUN%5mYrH>fHNUt}*+<;%BFzY{P$GSrG$0{+A5W&BfgQ zOc~;CH?Rs#0c}=iG+A_(STH8=WU^H7J@5k>lY4`vs)xMpY%;nn{0$C+e&P8CS25Bn z2eM+_#N@d52h4cAsIfyxllr9UsEIdUoZ z?sGv0U(!!ZB0(mpk%xCBe$X2|j5~Y+B(J^Y(#uAI!CmU7AEGa4<{ei1#wD{PI z&EO7~MX3Bpc~We^Y@1L#YhWx!BuY_YGZYjJUtm;g+DfU z_EA|2`$6TVtB&oti*_&?aMe&Z>z8#{{?7rdksU>UT`eCpJOaaF$;ZBa46l8g$#ywP zMNR7$`RBo%g`nIi7)8&MvR&Y^dnT$c7mwp4>JacelZ@3fmH|v8@5Y}aAwBp4)O=1w zulNfr{Pra9-q4N7`p2`3gNEYPf?pVv)e~|8^+ktlT?`p_hD&$Wv$-^1`7lptK0&_b zKL&CC3PZ>XF2yakmQjW&nkVHwM9HB5=9fVol&cTIoNrgq-SRgmv->mS0)4SIp^QZy zcuhHlS{`y-7yMQXCfy~37j_xW74h@sTNVUCiggE&jT=ell{F7d`9ga3TP)2v$Gy(! zK-<5^(7Ej`)9xFNC4;5_B^Dt2^#L4wlkzj;-|&dAcBXW+gDXNP_FC3iIMHY%Wh5nuOeslJBB7-6yT8A`>yMUap7;H}uj_hUF@8G2)KB`HEPU<6B*(2x z(L{NuC6Q=(+Y9nii3v5u4*f%ZVC|GTVs;cVjaf%|hx|ND_&wKwTGX3=V7C|j7car{e-6Te*}u_8IE3ZnCZW^BHK>+8 z1R3t;FZR?E)JD6|=UNHcm7GU$6P;;WhY{DQg7`@%aM0E~bRvF@=!t@*4^{GYW4qCP z{x!a3ODonq{7jw`%I}%A@$PGH^KF~rSa-h|2odk*D`>X1nKFTT{2HXCy_SRXd&+pc zB|qW*FMOQB6DLKM;hO#*LE_zyjcBFp^j9y{B$yT2HUpAg&QSuti+wJ`ra!Nd`o&y2c5p+LF?JawFbO!eT%h0|U7C4!7X&qku zC<^Q+(`>MF2D;eO-a7UJ_26jI&olxRANMKM9~PiI#-26B9Yvi#j$lhjA<71B1{Y8A z?EJNiSu1kT-^~p(znq|%Hy3^VOaz+~(@@g!OxiM~2>f?kMR{}(^Gq?M%rj}AA2kH^ z*-n%!67lxKiBD;KdSV&z@{ao$2(2Mk(fSIpzqS70o6EIW-PHpa%6-9a)bX43?z2?m zKs+yB`ihv;+muO{w3*9CDX4#n(0t5r>K4%9gV(x&>h4zFKQxxOheuJaaUUzzKEr{# zOgW4-#<=v`STJ=sl$TxtTfJu3?QuAIsfzTrETx8kOt=li4%+kr$JG$jj@nw`vimi^02Pu z49nDig%XD>srqQNGHA_S2v2y)k`=KapF^6#N<%)&%8XMzraQ!&^VqKV$vWP?0uN*2 z3C-9-ShDi=X&(*WpMatm9wHIRPsllQULK!?jCnBoxqF3SSZeY`JJI49#- z^3c1lpf0yYFHF9ng67R_yxkUkq1N&cMz+0yoI@$#nGgVT9==0^Er(fVXEP-IvY<@r z4%9W%6XbNcY9>Z*p;L5?&QM$Lk)I#zGiZ+vWFVYs0^tNGnJ#_}!(>+qG z#o2Cu1_elLuB0buIWUo*GAjlH&z@si+QjRz4P*+*ex|-N85N^n@rsq9(&|P{p_3RW z2X~NXPv;t1CJ__%L=!Li>dhzd|A4{7Y8o4}GES#&{HM zT!wO&l02pQddyEqrAp9(ig{Epw8=NtV5z~` zY+D7PtF^eCpOj-7v5)usB4&1TKJY!amO|cMCoH`*8v-3Q(d5J|Xv;eZ)f(2|>3s?F z&lz#@eP7TbfHHmC!l8XJ`Mi!kLUq&yzVzgNUgHJbpZmUIS_!o8zB?b?CXB{;pACgE zm!DvK74`5v?S1?l2Z?u6x5Cj~MG6w+`1f zHIoll9D~sO&+xfjS6J;YVC$!OxX0=+A23uO6=(Aur(4io@B$Ixk)2u1qav8ekN=H!2; z{raCQPAi_WtY*yV>s!7<^A1=)JPVsXby6P~<$yvzu#9cw0T`dgZwV0z@AlJdRBM1Q zDKE5jObx33?#KHEQ1^DfXDp~R7UJH{#n!-em@}XOvts;U#Qh7Px*Wil&M9Y#Er+BQ z#)e$o*x}Uod=3Mj8E_-RyNIC;kj6hk2x-KsaXq*&^o209H>hZvE%gl6VFr$c?69St zknvzMXoz|-n~{Sd-RmbG(Q^TAjXw!{x{{eF-3FV(PNAsmkyQ0x0Dx0FI+|+=>cGE< z;bw!16$#Shtr?)B7l;)jy3l%eG^X3!!{6sjIE~(VP__CJWIV_x#s|%c@2J!FZ5`CX zBS?z$;&WdA00;RsUa^<5%w}Hf*knU)(2{qkCdQntI;@Xb%=#Zb8rCFZud1>VtgN1Dp2;vZAK}+Cjq#)@H;44Vkk!uQe=}rlmh(`u z@ual&F7ZyZ4#L8$;rL>eA?I9v7R5(ec-cQv5T#|oRBJtMkTr2w9Y6B2dCOomaU$%7 zj|cVTV0IwYSm^AXLHAHS&Y{1O$)(lG`2XDp`xq4@&$@}t1I|EtTP?bc_JJ|=-Prl! zIE>n$BiP?@MD_6V$~u{$U>j14g$Iv=x(Jk_Q)M8E**USKD+^Zl(iVLBQO0dpE|7m`-_V3;D?O-7nGP<0{Y^aH6R4CtL-#Mwd3TG+IGT8lR{B!n*ybziYFl{A;)|GP z+RWr7iON_LHDYQYI^;*PHk(J#eC!xYj7x#q9Vdx}upa+u(-QW4J4=j(2;v1jW|_9( zm{T4J-j9hF$P(c#^?qnw)e))#lQ1IkZzwR(hv=&F(AKjD#1LQ6ZQUPGXz~VN#BNNs zjiYxnd16{#pyijzke>J!y3-QTLVgCzAsiG3XYs8%PtnyW8g_px$K7LoqOETV+MjO& zFW1MYroFcLfyr=&SPSCN1oYjx9w$ktpC@h}6kiU7y7cQX|HNxF=x*ebX6y5TX#=r& zSrth2zM=B|M@WCM8SB$|kWF95>ueWs9^ZeWXn`-L-gtr?cSswgo~paCdffQM*)VCU z5(^ge#p=hyAjO^b9+@P?LdaeVyaQeHGVEtJQ1^f%zX0p|KSbFvU%CgiGpDmpS=qI#;PS_PbpP)N>h{p&>^H8*Ia;NV zu+9(6OO9a9*6$c{KArCr4Fe;fUG=ih7*s)B_onldHg6W7XJaz>pBa;te)c=>TP35e zh&I-J@)EIt73@YdWwUpfLU_s-NONoJu&~- zSE#;m6dm2v8#?uufqLm!_U{8DE~_aR zivARVVrjIaOE_`7{x=<43kQwFs8(aa=yfG-*{Q>s7mh{$Uo#xN`;t%Rb1igcZ^HbZjbIr} zp6Gt&5N)~yL%wgsem`Dg+`nynY21C@XF&|zZC>J+A+cz6v>XGkBk#V4@`mHa@^jp( zuz9X6n%9x8(?NW$wd+B(tr5cG8rdh}YUa(qheuDD3K8z_(QWuIRNO6t`U1-2ZhXoj zf==V01y9k#kls<&pHO@?3rjctWI7#Mobp91B)pgl3e!WO@!Lf^*DT*f#9w`043{^l}SG~!?HiSaG6&WL_Tf8MRA{C%;{3} zc>EPj9@4Y-ozA_6djJW2V8KjXu1W7S6yFHP)zf|f;w;>rGZA;;fcMgj5v#I;u+~@fyN&f=sXBgO8R+1y~$x1VH|>!#-7Ka^F`ES zF$g`HBQa}zRhG_+ffz!%l#|{Y7Sv5yoQ&gGHTgB_J<#ToFIGT{UJ3eGy~Rb@OCjaG zp6Eny*b z#-Fx<5A>fQYCmp>P z$8ep{6#fRn^$#-t6DB;IevfRGh^xI$^ZAMOxSUvW4N@_f9Daix&7UxAza7S@N?Dw! z7+=aZVxN{*7`-DAG^&P6n=O0uxjQbipu0^NK{`lWgD2)#WkC1oIvf#1Eawi|jg9YQ zRmEwT*XJG+k2{Is{K3lK>k2guZQy5Ij0+t!V7JPMnPsk2sqA4pnItxQQ=BhXLkn{2k(a(fKGhevbKd3hFa95vp$FLhiT$ z5Oy43WZFA$$pnn4YJoL*pKyL8_1~`fQ|bO@IjVn$6Yhg{<4i`G*JH{LjpP@aAtEN} zH;~*YS32sff!4-{kgok4;|*WKvaQv$54(>RXF5=0sXp)jq6Z)9gy_<^5(C#oKz9ow zJTQh$8`|)|xQk#{egOj2aj1^pnVqE4<<0x1As(}WNnO`b;c-ZrraTM{c}FlonuX;T z4RF{w6G87J?Yi$w1@C>QD9?KcAMC5ZBunbR{&Nn_4~-*_NfbI5=rO!jiun&NV_>ES zEF4<_;*r0Z`)w0ES417HjS)ut zbjK`|6>CeyqG?RB%N`TX%tCJ`(lQdBD{H&j!D-(!5F0#a@#_T$JEM!G-zr%j$w~5< z6g%3lp+>(;XZh!CdO}IzJy2Y=rT1!Y^d9sO)E7f|OJfUSwE2-vq0Kp|Z?Gu-G};`e zT<7Lo-pZ#NBljyn;&e&bU6+eDTGUWQxvSQYXJBLT5Uk&72$@T+fx=}mZ@*?DCb3Wa z1E&Zq>Y4x{sx738{Z#rKnS(cih<_Rr3VE|LVTr8)r&!%zn(?m&*Je&VVsk>6|A8Vr zvZ)J8Pm^!Wo>*5cUZCSI;*Qv#0?XNt_@o)%F!$so=CLRZ<4%#MugslgT)Kc;Z|}j< zz;94EEd!MeH>khs6S@nnh_!!#)73A0{F*nQb&-1c##BJ-0m{d0I|mtcWhm=;l~}m_ zK<@EGYBF;^1dOMS@UT~4{<#;1xUI(_QI}{R_6B@sPJpD5hnOWUC*MUeFYC3&(UJen zBCggzSd9i3HiOt;hZCXXVH#x7-N^P{EtbCG>AAQGHXnLX@AMKhH+u{XEi{LndJ5yS znz6!EM=%+E7D}cmz%%F>@n*cxAy>t0e$#l8Z~{Z&5Qf{IK-rlDTokbuvUd7ObNdWL zS!%FU6m$!ElHX)cuMtpp<}`C^S%qKb=?I=0P0Zt94o3dGjb}fZ2|eGEUvo8&?njT1 z?%zC7_th=ZU1x*U`Ts%lQrZvJ|1Fi&H1MlWK7zJS#EHBAiN*dj5LADCK>J`53{iZ* zJxP&#!caFfsJzc77Dr&uflBCPN@DQz##XykxG7Hs;(s!Dk>+2_;S2G1-_ApodLrb^ zF9h|jMyaRQAW%f>@g4c6pyxx=XqhUgcwd2~a9p zi^+!{;ph)$#B8qPyXVB9uIo!!c8@16;bCU&SdP^v;?Z|&F7dRqg*2;PoOswE>?r(& z&9UzQXY>%NUc_Rq|0O1$){E~K^dC44QSb$~O}M;&w_#MT?^r!89$W_hgrk-xQ#zjDeraO-#@Ij%lhYl$*CJ9~+x;a-m+TST-68PJIKn8dJ`H z|2lr*@NzgXwkIe4y@gGA90@uDd{F)CAFMt44U@EHf*+k-Aw_pEb!jbdPTe!|QMgw4SKICnJjA)J{5d6tR%z(`@zrGs`K^5t`EW5M6mF-5OMZ-b0endsiKLI%YD(tfK6ftNgHL(Nj?Dx|$_>o1W!m zR}J>PdXayl8=7C9hdnu`=^Q9$;$$m$mu<>LY@(b}-D>9PJ&M`AcfsqgQ?NPU0!(p< zMy^mx@EUs$)AQ8KFEbI3M6{tp&@JAjpaFt-VfnzXs|F$qvZjQsDq57BZr`P7&VCRo#*+@44O1=CPG*B#=G z6VN8=E=2S)B;L#!)^MZ^vNYS!C9xe9FK$5ah+6qzGj@wrXs9~ zn1Kc@*4Vm24|NMQxbIUNu+53ycZR=F<#h?{?rMNv09Wr=T^RkDkXrLHp^Qkg2)~VX^i|KuH};kzWn-S^w~R>I1KraclZe(u+|DvH{@b@ z0x_Cf&e8nzCtA+E!@J&m1UaJp7kV;&WYK6n`!WQlY=gMID)bX6pl%y^8s++2>exCk&)$b2H^zc!iJDix z+{nVpCScq@C()|?A*eJ?@czOYQq~CuAwCiykjQFb}y2qcl3uWm0GHp9VspP z^#tTdofG{tVbA^u-mYa0ChW*W`MGQ0V_6MlKw977#aKoAoISk{GI{(!W!Q-6=s2$h z1I$;_9rG*>d3OwNItvWKE#HfhkOB{1judqB}EkWEwavN~^8I`uRk zMl9!zcmQMJ7IgMJhT&HqG4-0EQq{$B7QM9$f+}=5XPaZVBB~!OI#z{>1S3u-Muh&q zXg0F7tWIN^2_V;{5%czE zjN5gBS^jSy20DAdx2c-K_P`F_v)4@O!`6Tm&r>jBYfpIdi?|0iWhl=c#TU|!(BZ~I z49F0p`wJ_8pL!fVe}=7#*Fjq^ClEy}#x;TOF-Y_S#Ow{<{z3!2B4)I|4LTN7LBEwB z(0^8cmQi&SI^5Hten=|y%f7`Q0jAvigD*g|e-!U+oeVm=T~Pf)t?(_nj0r!9-_gH< z<#Aci7P=4Zmy>rcNWw}T+ELVV6e}oLN}Bm(tm&tMpgA8Q=XWaj8ya&_yY|D7UwWV( z_AjseV8GoDB|q1&y?pbIIWQ-=2z@8a1ABE(BzF>9HH>mNvvn}8lrjkKBlrXvhv6%< zxR~hQ)Mb^S4BZfl5A2G;WZ)x+!)4GZqRg7tUEXuoIQHRSGj0yh6QtW?!SBp9`a54| zjz|1>o0Z{E5by~${i+5p2Va(~DvEQ`?mr+yDzdjw^ zYz|;t)GSOI-kbmK*8$1%(@>N`bLWR~#Jzk4X4Jzfy+mD6qi9cLA%XNu&3Jx~F@%dd z!Tr4)eo0M*gk_5;%a{oT|Nesbjr-}mdC0Dd5w|~U&=pfa*`-7X^Uwme2vrKlHSGe>33})_5^91s#oY)+M7u#llk(a_Z^+gQ?cNW38^+F-x;Unb<`d?6_4d z-On4g9ibJ%EPUREUtis^3bLcR4Dk{o)5F7Ro%71t<$zT~&pNzz7TK_Y$DWoLHtX*y=yP_Z`G`4X_y>k$sT zqt7|p-GH9k1Ss&agagE({u6r$nu0#JCo~mHXH-Mr%}Czk+zZIF+kmNlzHt7k7%fdT zFuliD2zj!OPuy`I#PJJxu}vEu8Sx3C9_J(cFhGUNbZKHl9?0v-@4>uTL{~d{)r|$! z+>r-y*(s7kr}e=nf!leUoc-xeM(*n`bMZO|Eb4EwbZ zcP=ay!8Zc~2lWPnztWj%)MM;OcuRhaHW+d8JXC0C3J(U~B+pt0JSB!nozE@sToHhY z&9#&b6I1VqgtWqVD0T3Ff^lPT@?Mc33myQc(hZ0;Kz)6ureK;5aex2!6&!zW zgtnWOptdPzF-5dE$Tx<5YzvsYGsE!2Pb}~~oejx7c%6Cf5cJ=9kX)p!_$o*0OriYX z^BB^Pui&sk6CvH~A5_1ctsFm_v>@aCtZ6BATYvl;PGnF&$iZkZsh9<|8u`!`e-2}o zZUfoFdAv+?nK>?b&sT*Tb4P6RiKXg`Bg86f6CN=0Tl&HjztiA8_Yz19CxY~%23H<* z2MF_os(dd#>2xuB`F}di|7fnP+6i0UkXAhIsZ#QIKWm`7m8b6lCJ9_lzQw;-Cym9?bD3Z?3{Js~g}aIfb6KOPJ?>+X0?TfFb20aO;ms;;>Ja`V4G?Nk22eDo~1X zIiz6>NP^f+uffML6J@m`&Yp8Yw_65+h9X+(-n)SLKcF05g9W$+EFgb5`9j`!qtmTA z*7!jrm~7n#7K83X>K-ME#<@}VW(Ds#TaOjo*@Kq`Qzm`*UAm{gfaoXGuV^el#7G-R zx~8B`!|#}w`T}Bqm!W@)Gn&|WpzEMg)VWFirXLT%n>^Ahx>8WKaUvgEtt-U6ZRYKd z-NWk%#DcT9p^P8%8OJZXfq8XhOidn3pC$KLC+mO^$2FieEe^zMliBu3?byD(fmcro z0=EyiRYAUo{*6$YlEX*bBhThT4PoIgqui`%u3vP1Tctuv-$xUC^HiMv1} zHjzcIrhaqY3JuC$p#k?Nta;K+^S44eNWTQa9R@;DP7m~awI4l)Jwc!7dgjo@$d4_R z`e;wb0~x8rob1K2-hQBtxJ~?r-3_2|>=L`@tSjvHIs|d%^~|JoJfUp zb-H(zZ@%>>6nvYC;(_!m2aI3|Y4rF0Hix&broPwKWa?E|mt94fj%CZwU~>PHX!&mh zs2b)$?Z7aQsoW=)4?l`^#Ufmhs0meE3--z050S?2v3apQM7g(vr#zAOa%)AiZwZhy zaz9u$MDnXkqF_cs2l}sE01Y$4A!*WV6tP=4zMY2v zA^+u3@X}R5__+wQ$sb6plVJYfFcEjDNMCUJ(1eO@VwjZumU0|3!Li&MMOPM*7nA0q z-6x=|neO17ExZNsg{5haF^sf4r#_yr&}=NYSSUd!)D(O-X21+p4Ooq<#uSeFq;6eO ziVpsOx{!8opBlz%JtGgg(J=IX7Rv%Zo=3^n(R?&-0VzxMg}k%rsMvUgwYrp{&zi?< zylg-Cniqm9c@o;L4~HIe=>IVG2uuBQ0r~w*F@4?*rZ`7F3AP04&R(ZQ>LB!={6FS9 z^fmDm$AUv#A|Da_4wlsD3pRVsVo9(P(xxkL#b7PL;>R~I8A1EizNew?VHddfe$P)o zaT{M~7;|NX8Q^P7-iDXMFlWLsh#UW!-pw2EBAt_}KSZ2o_hgJK`hhheZ=m*R2ySn9 zL)ud{Q#?Iiz!RI*c0OXCqej+4z|KKhPq;vJUIUk?ZdA_#EMv4)&oIj(-*My zz6;J}x?JG82YkklmAH+14qbnKp+4G;5c~8ih&10SH)tAj3FrQWsJcpA_}m2Zj=lqj z8Q~bvI{;-P*DGz$e54+%TgsG~)PXzd;lwzT*ATlu1&vmJM^C-|tZ}w6x5!A7^OPP0 ztS`qSGhUHLJ}P^fz7AKEz8-7t6+&}iH0ZqA1HM~qAhVsig}QojV}xty-jWAC$xG2e zXB-wDkHXZf=j1gF%T}GrX6kvI)Mq8}s8Y+oDH+)}L5pijiKKI5189HH5M<7uAo1`w zY+CXGwbDs1-u8j7n-u^}hWE)|q$T)o4d6Gv424NnavcAo8zuVxPK>GfM!uR{z{&l= zyW%F@%L-Y1rm@hH_!V_!kyzlj3!`23!^RRFE@ay*j2m_X%kBE2Y>6Bf-6Xzbi7xNC z^(@9uY6pWIKG<>ZD;C7~L+(BUsIEK$k_H!8xFZUDR=s5jqdY-!%?JF|q;36tg#Oj< zQT9hNE4$i+q2uE)XG;o*>$<2zQVUBg^NFD}6brUJ1-k==@7IzfT79 z@rN-f>=rmXe}?Y=3K6Q);dsVn>^b`ZH1Rp;eTcZnn{G&3eXJoiyB#zB-32DO)z}JnQ-@G?0^AcjB=1a=zU5BdYH(?>`hY>+TQPg(<`>?kXtHaDG>+c1Q z*WNOB%_OG2$BsojNNP&J*&CrAP24{1s5FH^`Dy`mzC~)Gv3X1(cI-qq$@* zs1)Rn`5no&`Vw!%XacLsZ-t1rBf#S1dvHHI214V%L9EqX40)p@uFOuB+8%*2`4Oh* ze}#E|v;~7HK3J1B8)cJ*@&)!SG3_9vy>6((VH`HbQPn9Ny^YhA?9v z9J%}*Z0}7c^!0EkKj#3Za`NxIBbMgwcIN)aWh~VWf)LAczVfmjSG@T!_{{7J6*q|+ zubas1-(RDQ81X&!-hd+spTH!Fc(Y~$p;YfA#+f$rUN04-&pAWxoHL+0_%|*j@2pAP zY*5AW*goqWFBy@q^xZH6E&6-}pR}XQp?fsMSieVUCuyoEjkF2!#A14 zVHC<&3gA^40J>#q5N%ilb98E8idz^<43ENw-mO@2?jsrqC!wK}n2pw_Sdhj4b&Nei z&x9+yNnbOZa?u>*MZ{*l@f=DgJ?33<%^{=WHD#J==(&2#TS_XR@b6}fo4o`xY+PYZ z=4TKS57^|vP_((VQ~e6VB;T>r%XD8UKKVD)2Sh-y zObg}XX0ha539!eO-ouHfpq15v(eO09=c6YSEt!RZ`(E&Jr`6E5whR^u8hB)SC8}4( zNIR{yaLS1gC@YPm9Clw`8FBy(bh~)>aTi%@;}3NF(L>0%;)OP?A8^^~0=P@>Bb%l3 zz{!}Hnt#%nWI73Q?d5#H1!vIu?+(_y+J)cYD8!|N^4n)?QIGo;HrLLGbJ;rsV(Pz> z_LiiyyF(nnS&PvBodxf;_9HZeO0iG=H_-TbOc^$E9He_v&S*y#hGZ>g?UAGz)nuT0 z+tG>Uwox!-&;j(E?80Yl3Br_Rx}3k(M%r^egyKUg^D~RAUIAEp zlHP0N(O0#&lDCdJ-zQuJv2-$DlsyK!tjz?)q-k(4trNP9av*r(ark{tSFn*yLjOC- zr1fhGPuGcr$d0F2yLS&*oq30>K#zD?Dps6x5GQRsL!7isUX?_7vE^YnVh?rR(+=8= z*+Wv|GiG2$9ck{*F{~kgy2`8hFy$yn!w$?nFNVYWGzFD~88$6w!1I%O;v)SoVD35- z5_eueQRq9i-R=iumA{vccB4**+d5q6zr?DiPMPlIzc5hh0qIL)se|h%xP8_{&$e3r z;9xb*Fswy`&A)hY%OWN|$HVwmeZlWtBC&C$5S;&$Sl2x{Ux%;Ym#ctF>&>{y%M3Xi zRSm+OSPb0I3oT!EGwZdqBiQGNdEaZ8e?_%2$;TB7&PAb4_+F?J4nk(+L%7|nCrql( z0F3}Kyq`#CS;e*N^cLd5Sn;b#J1bD8Pp?uP}-!1{O1>-`IQr2 zXSi@)8dE=83>NGzx-y-V_4|8nEV>_9EOqqGr>q0GwBr;Wc0zBDztm3N;im$we>UJocqD@O zfCnGAPE#16U5+Cp1>im62P)Kwyxrke%=H@p?)R)ww>S-|-J>agwFg!&x{3MI-eHZ< zfm@aDVBy6}7{0ZE4OpkmrLH;)1FW>Uf>FcK%jypG_3mT7hGM9nM$FXUb<8g4Z%|AG z7TVE@X;<}xks8!TE;!@<(I#Ae{2hphECtKYeW7l@9BY>nC%V}SJR838D#>luy0!)E zE*L{Z&?(56=L~0-=?jj2A6fgDtzf@yJQNgPNB5I|Ve`j0Fc=aCei`v7Tl-Nuq+BG# z<@gZm>jLi++6FStlx6(&4U%>gGoM9kF#V4P^xwCf4-nU3T+?+dR5oJM-_2k!?hs!$ z;~ecWXiuYkj$hFf3PICELROEZ=vjCYYjSzeeG!S0Ts>CkZNhb$8wly?dwFylj2a`% z7>xNBlR|^Z!yOIje=Ws9x_O|wkb|m!X7Xt?@24vILQ%tP*uG1TvmD%k_Q^+4F+t4Y z=}sg6n#&t>&c;1Y^jLk?Q_>S0V3S80#2t9UB3>Op^T-%j_U}6^=``n>PPE0|QI#%p{M4k#AaI#aw-8Iu?P^ zn+0@A3&4#UQQ$WCAc($|C?`$*MOh4MzU*l&#_r4@ZuW7Edv+DFc5Dang0q;E{f>G! zo1poWiuYesz~o*%c&i83uq7}PrhK-7jL-4l-#-phbDvOe^>aRae+7h8cJre3jHfmm~!pzY^vD6MwGEbkpK#K!<7<<4xi z)d8?I(i8$WX>)}aav(0CH|_Fi{=GDtm{(ECLH0^8N;*!zR|OgvUxyIa%@}km61^fG z6QgGp93#CW{5=n~eJatmiS#`f&E)4lD&zi+W_4$8VbuM6Jkq}z^Q3;LHZ^9MBURKr zCRfV(evvx=8HUN!tt`{pFAXp2W|<2z!0~B4NOiAcv;QrYzQh*1Y6_@>lHRdyzaj2# zx|6gjvK777kyt0(Pv8Nc};~Zhh`8PweZmnGs$N)4N}C!X^1N&9`7J&gjXX< zqRvZgM?E4PMvF3@)A9Sv4*2kz=EdHRSgA!ki0wJ%8rMkt*SjDot&kQzi^9j`xv%wl zgW<`Pv3+VL%sAT)bI)pUPDVfQ)JRj#*Yi9CNB)8_b4$UX+!S!tC5+Ea!8(Y-hy*vd ze%^>XF73%}8{LyDihBYpJSl7bCLBG6?t`UgH3XINJKMAtA+Fa!?513@zm*+3^71Xl z$aka5KO3QRGxdJnO2Uv`FEC+a8it4pSm6?4ugA{i-C!&PmT92fRW-W&)e7A^I>2Bu zz3W9q%%AVYN}E5R#gS-iULVA3#P7$DgmG-ZK|?`3ZV?mJ?tt>7X^^zxJi0W6VA1C+ z>{zbN4Z7SzIMPHp9oYnkB+rkh;LFGQHsbjDB#e*z3}wzm*!CkCn^Q)?h=s>-)~_Cd ztDUaU8PNgdYsaG9{9EWeL|)Y)n?Z8us8suy9ydj-4{g=N;)xk8OZAuN8lvJS3=051- z@SJ(JwKBQ3ZdR7Nh*LC&f@I)FK7^Q#cfT2Oq7&^fBvJ=l?WljiYBE^POyL7jiyQ5F z9PbYl2}i1~p=jby=B#}I($%*>qj4xaFuIKHOD03p)jae&lT4n8cX*&E5#{&ZvEVBu zm{ZaW3s;Q>dAO&tz{VF)KLhP+$5VH3I3Im?6hyAfz<@y}oQuy-47-*IPdm)GZZ&lU zy#aI|^@DXJiv-KS7nttjM;&$@5Jp)s`7CRctI3PlaEN#QScOH2oALXaN+>M8i}^j| zm~hMyr!PJZ@@bVy5794dSh@{^oG*YVvH-MP6%b&w3{*Gv@~W7TsJ?wmnqNeWxCKAK zVMH+#sq3;Om!~LY+7f>2>O&yua-#lkF-%d$VOzO7EXzL(F1GDhFD?c)!kE)rQwI-b z+(gy=!^~w#J&21RK!slym>rG5@b-(;r*j+SFXpoRze^ygs*9g~Mvqh3bwI;XeKPR)q!FwU;uTJ=vm<_Ta_W*hmSI&11$Q+ySOVnOm z_1H{^BRxMl$&6dFNRN~Ct(0cIPe4h=S7~_rKhU`xG2!(~jHkX3`Mxy1^^+wwmXZIb zY9U6aP=DX6n|yf`>B{N5CpKR_%eVjR;!CY2!kS9r$;-}z@(bk=#_4l*3G1-a(gGbX zeFht1OqAa01A#?XnPZ8WkpHO$&Hi@@ECZjgRy|+bSa}ROKbZDEXJrExwzZwI<9!42?p!gKUTpdHud3XluRzGJUo15P9AoHFQmGFX%M$?oS@mLsZ!G1kSP zkwG5jQYBj*kOMOKm)XB`gQBKzY~6GShYUQAO<{)Ijqx9`OhsSM3B&k(9XPx8C>F%E zVu1{RJX2De&&2!dd-VTjmu1a^m|GELvm`p)^YzqgU~P=QMjLVN2jd~U<2i&jQnu>E zNwBrzu`5xF0%zYn;?hDsosck5qVc`d*`z$~_c>?dY?j}U&+Y`@H z#6`!P0?XbfA?s`t)Gb|umM>0Ybc2>4_xKOhb#J7$T^fS*XdWWUjIeofB1@Xm&XQ46 z2$$vZZ_elnW8PHa>l|IqX2(znoqinhpSGY2S3#L19WZ$Q9{Rl`Pq%6gy3Wwzs%DxA z-KIGJ8JTEuh&(wpa!f5d0W&9`X^ayu)*Om5ktp`@;TddIO_|o5Pmg>*Y!0P{GB&vcfRk1@tet` zRWqG6_mr?!dOqUMf5H%hO3aTb0I_Kli}=z)GbeF7gTL|~9r^+s?|`H|+5EDOpI~=N zj;fSA*4jG}da6HQS(5@YeE&jk$RWO{rF2a7TM&I-h)yLHQ1EgLS~mETw)q96iPQxn z+0BAZP!BGBznj;eV9=bvr_~duanNCmUwRJHTz`OS?ILVSAdjr!P>?*iBh^X1OrC?~ z=)A2IONYCmt4=hgPOX9KF=3E&BAkhPeB_n&ZK%5YfDQUk3HD(X086!mQDP0j|CkPE zS?7jZ=U&0|_ZIMdY!SLQrt$&73{xL2gZk&j!V>aHh})zrKj3H$v7-oQirAIE;C)n+qBc9?CG`7e8-Si=x;O)hHUOfxsW3`cntLeWoJ6Z z)zq+e)Ei{}*D0*~5zailw)1(tzhb6IC1|x73jS9PflN@ak>ZcgI&u~G54y#+-+B*o zZ|HFGqbMU{F2;Pl%V_d#0!ZFb-g9F)%B?g|ve+!UgSuPP*Je<@e-$h3QZiSQmteFg z4%K7oAUrC9Jh#;Kw6qVX7oC@S?99W|?M2jEM*fZe-a^xmHz2ZWmoA;F$K}m z?i|F(U;%p%5pfdJ`^uz`XQ9u78ZbXjd8<)Mh{&CcZidvg6)_AQ;39q=)l;x+iiUH8 zsOKqhPPV$^1GxJffa;g=xPUl9M(=Nd$nZ6+zI_|_ygSVNlD}ZV>wJg{iQsjtGr(t^ zA@L&>OpzW3p8MYLEk~Zii>ao9N!oL0RvQZ*&Gb%ly$>2ez0mvH3A{kF;HZHnTzF5q z*PNV$VfCp{o@EV^u_a8dUdqCrug92Xe;78oC-?HtN|5co#`o!?#hv?L!j*2+qYNc= zXI$98>wNdbHsXE6IqlK9nPg7QpyA9$;O{`!9uorzzK=^Ms7)v2^6 zVVdlYCCL)1^W3jxXyOc6LNr-ImgEdV#)OhAA#x;=2uBGONp+t4O-bTNB(yk|a4aE9 zQV75M_Xo^;W=`+>y!Ufo*Y&+XR~zT4>7m6l@)!8T8dRu$p6v%hsLd&0P40AslVnP?^ZAs)lY7)+%KJBMgJ!dne`tej*CZ2^6VA$ ze8o>Jv=jAJap-o_3__~Dq9Px;`*Z~=i8HZ6aT@dU++BVsV z=V*^g{dL^9w}t4m@d_Jx^)Pe#LS9neTRdFr1PLpvunCEW(lA=1)3@gV#l)LCwT-K< zmq4(j8hx))-qD8m9Dxy#?e+@0{wt$Y?0fP@t9kW!E1`8*E%=-_#PJ_?;sLAGAUk@D zo4bDC5&v59lK5Q6IbVbY$IoM3A8+*kJqmqBDsfM*Q#ih#v1ot12RLRvBPLA&H%c@T z@1C#{&-^77CeUua(fbMl8MBEA2_|{=n8=Z=v%lnj!j7Z{C!FnEcxrFgpAkO4J9b^)Qj)q(4`h2S-d@^%Sr@bFY)LFcBGtEMSI zdw)3CpW6au1qG--^PXGh>7lW?JGgF1fiTjg)t_p(s?QjfduKOKuKI<;ZC=5n`f5ly zH5%1bL41_#8ww64qWQs@pnMsbuXNZeKYsHeY<*`VRBe<)iNh2)vXNfBW*Ed0eHXXDl*Rb!g7NU`3Exx_F8GR<%3a&L3C^OQ_i|K4m+?7~=-RofgpD}Z6Du&@J)3CP}ofW(Ep!P9j%KQItvyQ~6l|)0-k0+=KkHet3xzxv; z#NGTu(aBEBqu1O7hZ9-&>^%8$rbbi0cd*29`;;J+>&2ae83DtA=KIuBLWh+hVq4ne%OjD5-U-$ zIZ7U!K;5TX(!eo>I+BUgR{VxCSw^VQj){lZ6LrwM^B9D0*1*W$V$hJfbrg4KU;Qx` ztIP(X>|>}LKZK#dh!pv^FA*q9okPBp#pph7H{)Jy#QB}8(XZAhwcV*7Ds49_{_O-P)}La%zaB$$ zdI-bK4`IXLE<8D>0Jd4cG}6We2|Z2=@aw*$w1I?Opmc?8mE>s(1|Up@{W-7Li_ zp&HIueFx>{1Q;2u!by8l@kzA}?FP?5)Ed%PM=zBRFXI>xF`cPQp2%*Ib>RHe4tD_orMt`!lB31cNSqB6s)f%x^YJME`HkAgzJ? zugeoKc^t9!r{2lW{g#2xI$p<;`19x#Liuu@0j<|x zT!p0HY$-t5yKA0hukJxW$sYEHsi_cDUd60`ZiZ1p5%Cb$VpyJ;C~H2;wpQGxt_4?l zMrt!8{9A#X*n)l!n6dK!*JdTKjLyhn|F}W3#{_&DY$4dQ6d1I-2^>#W z!G7ZK=$?)P_Yy6S)Lz4!Szj^iR3=tWApY{T4s6=;_p~RT2(la9H7CehDL<45(%a2%A;kFyX;8huX?Azm*_|}|it#8RHc)gdWnC=BvJTGsVO0E8 ztoCZc*^_osw`?XxEsufNod#G0%9tR&wh)h?^p46xQ-&9RR zg#)(wsnBx4HE`*bMY{w#IHgM<+V2td&bzQw=`|dhAr)HpZh#Ewa%>(`%%m}GJg-_DhetF< zg3e?euM+ow*}wJZwTHNa;j{P^Ya1cpTH<7Xoth2k4KSLtchxfp+Ta};- z41lKPk!aqd7K#Vz;piVmIJpOP&7FVm>A2+<=vWV~`tz7PwD29M(*$n+DuqvO*MzHs zi4pHiXGXr7%|E1u2L1+;+~|H@TLBlBn23!x!XeLSEtiZ8%(EXK0#5mVqqFl-P@+E{ z6!?>Te^c4cD-pQAmxT~zV zo`m+~Avl|1E5=HKAo9yNh%K5#m|xWsy> zTslq$KAmoY+vGUZ@OcP(Q#4^4D4%JSB#k&LM*lZ7`nAD&USeOLEY~0pi&Qz zcuqvc+rM$z>F@aQaV}sTWnp^B&><@ti>G|0Y=S$usxu+(9ydzFdjmG9K%wtO&C4nCj4}*g^ax$$P=+&uIg!)+iXz<4SlO1a=>NO zwo9Qdi|z8#;c+-Qx*0l5D8i_~O7MHK50uqEuoJOp8rrRg#D3Y>8uTx8oMs|~_EMpb z8F@H=`xopBx3jzx3wdq5t!O~qbxB>WVBQ5oQCj{0FW+e+RQh*;XIqIC_OL6@^LZ;* zzU>QMP2-@&bqz6|Z{;TjUBvH^24dcSFSx9BDefdTXyTG<*pa&QtmEyWi1vf(?VD(B zcE<){;i+6g<J|CctW)6sdN)}9o`%|NIp%)-16`y? zF!PW^j2u{lp{u0gBl{ibP7ENYduMt1-=3gYbxOXm*jC6GUI#P&{EpJX$=K)qQ_M9P zgniCSg`p#Ipt|`Bo-mV&vRhuf@M8k*+}H^9ho6D^nj4rOjH1j^4S!Vh4*X4C;(Sd7 zlu;K;QP>^ceCaT=KOe~kZ#EH5iW0#$Z!5!%nUH+&7!LaJ5y99KbUT*dwAc5+{?Hg0 zG~)&_3yIG)cP&e8+KuW%%UF7KKbYUV2}_Qe3IVv6X_n{Wvne|$KXaO?eDg8+U_I9V z<3@dcg;;o(xYaWZ0I%MC!*iND*hD&QK>AQfve*S>iD}fUSAlcmtT3dx6zuP%;LaH-=>B0KU-a!J zDvxA%>IP@RxAJ84*gl)D=q{qu)KYH!egh~+>;T74pD}1O?GgM7G1}OO&Y2>vy}OR* zR7wTs(f>li?>8W9K|FT}JBa4mcf9h)5gc{M8+H}~s#a3`CHWs?I zyo7Yqwpj9J0}lO3+2OK_#6^$6;*Uo#R@;-3$~VE^;Rbx`whap>NyVU_2e@;{zu=pE z5$e3}p*E7{ymyy1%IG0b-1{O7?V15bd4>4MLyMiZl0W2Y7VpV zs?pHgsTYoXa|Cr+|7p~^_AK2i4qJb1!TP4h(9m%)QycrU4*wcsjGvv@+d3Dll%~S8 zDOnh{vW8U__NVU+a2%G20bZq0FtZTPJTnpeGO4%Zof^9pjKlE2dVKyvD(ZYpxO59W zgQOJ&UpkHnt31KiO(G7Bsz8@_h{5N_p}{>TX#DvIx2oK^kXi&vOpHWyu+nxV>l6DDmt#3nCz3;xIB(anXvUw+=`TT9%SiIG@Xyc>J}o(1|j zU)YH-$|LnSi24aPcu{=`6!*!-p&OdPy*>+M10S)Y%MN12jN1^=Wj}c=s`)67CHSZ^ z7xCH*a9c7AJjTDq#L^m6c-)r<$WMbyq^(%g?JpcafHKRyQ%FCPKyaSGh^u3WO@vfSRTCyU-*joMl#=G!|K8awyJm@93Bq`_|)>RU13 z9r>;!mdY1JNQF&DBx3dR5Qu&}92AM7Jj}oh?3dPZpMzULnY)QgtnQ#uVG+*N_JaBq zHjo=|O{0t2t!cP>8>6p`1?x6G*zlr~z0Ep}=9k_PS}AK>w0h;X;(<>viy_#oQPw^DzM@kVr@-IQ)#Hh=3CLCn`p8fCBtI+S08lJ&$s zcF{BQAG0B2Nh0VUj)$O&E8yMVZ3NYBKfY!PX%nhiV&b~<*!6E<_d{dBl-r6WyDDHt z`WMR4UPQJ3aQ1tER0#jn0cRIJMcLO5=(~pVx84UJggnREk#k_wg0Jk%zE_Z3v5hii zOPM_GI9O%ah>nshe7XG!26c^Q4SSWW)ZI#qwY^UM?hV}UbrfXf??C&ZT~NNAbn$1# zg46j?;A?c9r+&2+B}blWBE27jdD%*EOPhe}Vhe z<4`!i2hAHcLZ4anXw*VGNbR2>;Y+wz@BwIDy#X6i!qIozILei`fKcRVqS#*$ zW@CsW>CRY5-#OpXH2!qDxv*7r1b<%G0pnBOLC2m(VrRdj;MaaIWlb&Q{wBLnZE*v) zb^Z*lPrvaVGifde?G7r55lUvP&kq>V5$zKa@TCKF|Bgt-X{OJyG2u6OcG^H}ot8yR zqb(RSa2EAcEXR_$lv9e%g$aLI2tS<*(WO%XWeSNC>R=|=`W%G0e#9U8@dljse+M2) zOgp4q^~8yoe(Eo*-$kB}^o2ah)EQ-`HfXfosW3isD?Z<1B^D*q+0=ZSsY4xj=N&m9 zE4a@>kM@QYjkVyk>OE@x_j2_;0^!`~2??{v%lRM&YVU8SOnVS`z4ZZE<1K8ioq+D1 z10dq+K6G{c$&Efd#n=($lm;vs& zm5^JW%Deo3C)G2T`%Q_beN?%p?0$EUj_^eF)I#?0$WzcvIEdD*lc>A#F1lSTrOpE% zR_9Ed>NyPTANOP>{Z~Sg?f^F`Jp}4siM%d%I%YrEk88&$!Kn+!$bUERb4RY?{~1Oh z(;i^agEbJ=?=Xv5EroT8j7%w{wkvl(7IguMgt-ph+$2UQ5yc7Cul?q;a z`-6JJKRkNHDzyJ_ffXHk36+_|xq5dHoGc6ZeBXP}_-Qiq9xN5KKJj>TWLvSm(hu|g z`NdyuyGYqcV<9hn4Ridh3{1_oLFHlML`D;{FyLe{LQfM$eKkZTA;wzCRFa6V%wyLkS&>3^01o z2ppUI8lx%WRCh*+Ka2R97xaOpzOfd}E(Phxw&kUlsy@j)eCPIA!?HJ|jUTc#v zciaV7r!p4j8|y%aH#F*b-4OrrLrLr%&DwG6i4%($bewt4pBroy!o-$0XQBQ);o0!dj}?23c2P&_dn&sF7P!^T1u{9z_`OQ;1^Hu6BsgivcI zD9<((R3~!XlRU06gFn?g{=XC~X>`Euqv>xyuM5_ftJu7!bZ3fKjWHwlKnI-$n#&G@@24Yt`3>rmd~yeqyO@hge|y#}CcwGSw;+8W zU`Cf}(keKEhx;)x`y@^~V=Oki_`}lp+(#WY;VBO2pw5F}Un!RKD5u?%r68HNj@zX^1l>s+xnfc)>P{SHVU(+M`n?c~ zzFfc#U#ZTKST7m-7?wpCh>{X_RvD7W;yx6@+*j>E_WDGgqiqbd=8mNf(oI$HP&Ls~Fn_a;mj_u0jmM2d zli%Bl4PCmhC>3!*HeH85FPe!?;g4CGpP}G;?iZ@|QrJ0N$7zovZFd&)JO;T=|6iF4z75r?*d{9~CA zR&OGNkN5|!k!DcRm$V>A!LA*5qnq(T$XnM;OzkbqojU51Hqbu!<~C4lcjZxv3{=I~ zX$;0hL!?>@)dw5tS+y3*{f9%r)kw5HHwyFx=0eQ5P#9?*$F}ybg_s$`P&LRIs(y_H z*;xi<4H`I?@*Oloc0;@Cc4E_m&uG7RD8!LhcTrp-+858n;Ptlnd6um>_w-@#uebnB zwbae3H4?Ok$m?yF$YU~Y@}}}wXnF91yQQ1sbJEdVvp(~5yDRkf_yM}N3pF`qbT>P{ zmU^w`V24$;P?RP@{LEG?ThK;uEFz{?>sRn<=Ld5?odn;3Qb<~U5|wdHe163P7@w93 z*`rGFn#M}>7(|Tc&pkmm^ClDS8jG?NnP9nMIfg^GT!`P{GF;Xum2AQ?mZwQEOM zBkNC0gHor?JEzv>VwHn>oB)*BNa zZKC_76i08SoULvJ776D;`KOd;nCfw|3;9jl2J-sv1(Kz5xc-#dQ9bRjE=X~R`cJ@5BS1amMo-oKK??BZA)Og&7 z1rLaw=ie1vS4Ln}V_z_sVl9u9Pt7%zAN*r69G~4ecdX z!sTcS(cNe|j7+ma?Z9ll)+i8m_R~Xwot@xoKs=DKCJ>+q1;62xxp`g=IIa^edc$#c z!4c|lJ4JnguOLyk4-TANi?W(z7F?N!F$-^E!yMr4R}f45u7Nl?@g@v!$Oe^#US4!$ z7{u=_1*4^VuyW5&_H%nCoOLr0;x=5x0VmCcd;ds;vd(4bJ8mblrYwVp!y2ZX)t7q# zb^Wc4#>PVh=;N7zu7>Yn?TVl9*AZJmv-$`spDFmu%3WART)}?C+Ve``sCyxBrQUb8c#6DgrtF!GV~i}tj7Lwn zKRba^A33%ruLs?~`!qp*8!#~W4_p&%Biip%GY5|%=vrYUjQX4QE7v~p_;Fu|7i2BG zzhNWbuumAX*9iipUSMBc5AGwzb8WX*5Pz!{&p75{z>smc%+Nyg9pH#E^TYBUZ}&pc zx(3YZvja=)t8i`E8dR!nLD5&sgM_W9{kEKyYYu|SrW04cy9{bOFP{BoI>Z+I4c4>U zLrBCCC{Orom78oq7Bw8lT0X|qlmc)e?pM&no!tD*4Xj++9c5QP zbAzcC5R{UQUcK&Pecb>|nzI=d|2cD$Yv$s$m#xr}Z6o@|#q;{IHemaG6SmIvgvRgW znOnC6y0+^^eO#aAIaQR)e$kERg?y3+J88k^;zU$A=4+-!et|wM51{8f1HrtJI_tW9 zfv#r`LfPjNw5Rljg3fjL@V-QNOI>sR(hHdPVK#)ls$jA~Be;H0F>aee+Ucp;=p?Hj!c+^g`mi7hx=>wpTyTnRbM`Ov}01S*=4!diNMa8{bUbHEPIW0QM!-;ir ziFj4LU2nnoPt_1QQw!PvU(kP#$N7zyV4XoLxb!A(iB&AxXYE88Wf3cF)d26w=Pj3@ zQ~v@c*>zW5DQkzC?>lf^!Zz@VutM3z2W*0un%5``beDH)I}x&rkT`T^ z5WnaUsE*X5_l`nv%D4%k>R3=lS;^&Z$%iW$1sw-8Ls0uF9Cg+i9&LSx@uZ_?cU_LI z3(MHgNDXxDZ-#O+Q=xcs1%%$`;CuEx(@va*L9^<4!~1-g>n=s731jiqD0+r1m*D(L zGckPW3;KTT@hq&|0o7_VL0NiDqcsU*uH+lX%oMC!P=^VEIrXos!)Rd?MEcMjv3DS` zb|cxnvTEY77RYnUD>Qbb=X6^#9TZ`YsGDFQcb_nl8?eMkh2 z^G9?q-$A>@7AX9j2Jvex#PT-7z}{ZOu)hYO|56cS!rp@G-3sRHW`aQ{^O;?G3G7NI zmhgP)*PWONFNa;l25)0_{Bb2j^zVVX4o7*SSv5qce&d5~y~Nf{#5upT0}}FcA-jG# zxF*)pj`5@%URmJI#7I=0cHtf)+KA?n@qCdk7Am%6V&(b!APceJ4GVX%&b!pap{v%o z*i$aOC<7WiBDv47`KVoMB@{axMWyxz-%H%&gg(|nk0d)`)QDgVx~1ad``p6Zjb<1| zdwtC%(q>OChTLnkWBZM`X(f;0Okx{R5t;|NwGz?k_HJ~)I+zELPo<(L8=@UPqr_YR zl}nE^eIRkzI+_SQPne6kH@=`UvgOMyj^a7bM>x7iJi64BLHQCRA$HSIh|TK{wU$pH z-F_GpWszspO2;Z8iB5h zPl8{>L2TIGokdR4Lq?^g;BzJrJ5Tcluqw8_x4p_pGObSx7RG_K^(IJ*+tN`J(zDdPY;O$zoPl`$>=(a z@^)>^F*ogsrcQq!b&)AhHCjWRLSw*mMIJUjnvbb9XRu?^JkZ_0lefVo9|C8Uqi$9m zDCQ(;e8bWp=}ak_Uwn^+zYeh;TWp2Z)z5L1>Lqk_ipPZXX&CeT8ymUS7Si;3m|N8s zpLewqU2WcRT4~Ib}VhHRtaY1Z%{jSB>KhP1N&KRIUHQ?lGibkg6qn>>u^8bxE2FcbAfgCdbw+~qJlgpZ^Eje?AJq4Y zVqWk3A+T~B`X9RlU;eccg67p@;S4);n0gvDCQ(p1DwcJ4RSjVK0yVcEfO5l4lvJGs z{8;^e&jnWuJ*TnWVS>*xsZXolT~v)RWzx)hI8>~Fpk~T9F>8=a=G^*P9}K;1D_Un3 z;m_yW(COv?ym7@&RGg2_&u;hz$<4GMsWpJG%HANql?2LTsSx$x34;4++(671J3$1+ z4OhrscK`||lLzVWFQ&_wF3(s;_l)U(v*3HlVEW&~I~ zf3~7BF9Dtn*^1_{5EauZ_!+x4Lf)Q@tl8e2e7pM)`piMawMHx!qfoiSOz7S30czK5 z;z#YJ!q1MysC#!@9zp;7l^9cTl;#3LbP#FfeOdV8F>s;GQgnZJf(QNBkFG`sA;h{6 zy171qgk;24$4jW&KZG?LG@yQzH1c_TXTuFFg}*WK<)!HGVB_sk_WqJD)tns2fJeWyBZR&OodsWcV0d^Zqm3%9}F z!8T&j>_pz-({S{?{($&N_6+;YKy`5&O!#d-L@s;BWt+b++0DUnk8Ypny&=uNbUlQe zk%*5D+=3#LQsQh^vl$-marTECPzxct&!8&$gZX|+ z<&Fy~VXOQGxDT4dwG#kJVl4#MbEhHZr4)J)54%Bf3>u17U|3>rRGnCv=d|YkcgGs$ zdajHI?RJ8P=GH>V0xx(g&xM4&@$kf?8zk*~$(0>GX}p$x0>AMRq0!?S$bx5pu-!-$ z=`Iy}(VX7BQW)j<1a(FindRUdth;>)gU%AyzTGZQujnJF{kx16B~ixQ`WtD4Z&+C7 zOgye6PH!IREcUcld}4-T$cBcIgQbR z>RBZE&#lHn+oSAa7BL&2+X#@f0Tg=_+_xzNH<)k3HQfxv1f3xy<-TSV!y$J9Zy5Q*DLv-O>26 z-)ZznTg`&io#FN?3$Z0}Ay}I}g67_P!Bu;Q>3<8N4y|91ZK8mtl`YV06~}y5FNJla z7gVm>yGv-A<09eC{n=_wWoVUiDxKr*M{_yG6Yl211L$XXYKD#njKSs8+7y z=81DK>+NBje7GLf53I1%rxqn|3gsS}6}YzGGY%bhAKDX}c3Nf=23Z=SBBxdpsLF)0 zKqE19#vEKb{vGq5mJF(Z&GJp{48`(ErhsMUsI4)8dk)u8qHzW7b9YcYnv3>z4|(O) zavV85miZ5*yZ2uu=%jhU9UHge1q*8-c+^trW?7=?y~0>@a(~OlKi6ZP{Ds`qkM%CvziZZo)P3cc`EF(j#!I8%`|o z-Mn(;MCvmA9Ru2V@utIN^xrlHB;&p19hS!-Y+plX_cCm0X=3sD27*+zojO1Rd9FcU z2>3jVNvenNl^qSl$m)NX!Q!n4W~J(Prl}fM|XkZ*(C^{LwRE3%hYiu#h|G-nSIFx zE}yxHa`d+$X~ZRPPa6pOHVNqasSU0^m=B{nU8Fv{r__C(0gbQBFg7L;D$m^KNy)dk zm$Dk{qRxWL%vVsJHv>}}jb5HC*|BH1HB=!W^gC&~m zWFrB7>yPm{G8zlq2bLG_S4`DD0@H7j|sMhb?2-^zeN_Hh=~Aq zvnq`JF_v;_b)NRa`!n+!15v-=8)d0lH7mEZ5oiB*0h^MjyL(9%=GQk67&sj4^o<` zW-JWFdV@yrqM3=sRI4) zr?^tZ`SFx0C@fB(yuo34l*3KX?(4|S!=t!g@h0+LdVwUOKWokdnEuF69BuI!)31A= z%BED~HC6@po*c#v)P<xr>Td&*L@VE;|kHlc5HWJOu8X@39TLz6q zpmXqK4ZaFqKK%%GUDXF#o5$b`=V!S8grzvy{~c;)(w%9IAq#XJO)OQZpjNFXKbQ=2 zRZQMsr6bF!3U*FV!HyG#LSfSmEIyqNO=~$;wYdWg6ZLG7NdgRnWc)KiA`aPOEJk`P zVlAd^giH3sKs~!2(w$t;>5B~}y}8O_?Q&pL>t%2r`W(%D;#i>X548S^_8k|E#2(Z2 zI6B}Y_NV@#+zwyV2g*I1$`O4aIw;Q+O@3MwiAI2nhMgOr5NcXK?{a$@nUy9}w{PJ>!3 zMmOTyPcwJ`PA1KuZn9;vB3rrgyEd=3`628KpqzMZe{_14k0v2jg4>d6Oqw+v%0HRG zqSwR-|E&b@(<}(M>xYp$R}m9vB=4uN5z8-TK%HtVJezqHzqOo(Z<908{I8SD=lVbB z7~iD;I{h9;O!OxV~#0=!JjL;HPy@MP-(h`#O$iVF?UoY)Ht z=0}syb$}-5&_`JGu@oz3GH`ZiMAQ5ND4O+#t@x9&{F$FY`*AXK-usf+{@0*nPXtDf z2!NIuPr!VcA;=0>p;3&n*py8^vCZdkROLm8eftTTZ+8UM^YwUdQ#2Z#-w)6JISh@# z8!&3$B{U0Y!B&4ys2ogpiNF&~y)TigMJp_*{fg4xV^NjTlS!q~V18;hQ?|??j-no6 zauhB&DiNDU<$?XqXcm3T3#E4*@U1EWBP;GfMBihqA)0n8ALDrMjmaoc)S`QS7Fw@c zMof;mFuWoYXOANvbH(ZW?#6b4SAXKmjWxy9y>38g#_yo8yTxHg6)`xBar1mjQMt%O zqsv*%%!CH;+4vtQ@;$lz%4zDnF`A8^j%AH&~WCLJrUiOy`%Z7k}LLg z)hqO8U`KkL34?Q2rt!_@?oZFljAU;Y9LbJAE;{e3X&DW&=I3cTH?hon{Rkelnv z-F{01*}MyoWE}~KbcWQ*KGS!40JFcl3LlmK!k}JBT(RIiQ%C;9+5SHvZ~G&bTb(8k zpC1C{i-~I;9gXR&PH6O5kHp5(Pohd?|-tbm#z z*4*tpsP|o>Stl1^ObpaDG?Nd!6W_B(jg8We@NEI@4EisH`45S=rJBPwbo>a~WuGzU z^%E$0PiIldW9E9l9wbMa?#Z-wS0$|2wQ6B9a)!LYVc;a<)0xEJ!TEVhFA)&uaoF`yUKNxn;F0HGK_y!1oo0Z^7Ya0RAWpT>ROEITa2Tey|IUy zaVI3&AEj?-ZpMxB#p^#o>LHG<*2AD<8Fd4toMhdy zzM`HX>v+XV$lh3qh8u~CYf}s>Jd07i|0(*7uLaya2vc7Xd*RPW&BnWiLegzhF*W`Q zs+c_%m-eG*p$cK<#TVTDcjd#{WRKzptUajXO)4M||pPFAecQh{^i`;TgTN zGuuGHnzwk;*i>wqb{%KaPS7V{IqLdd#{wy)JyV4C}wk4wKiA=LCgZ6eG zM6@@0fa;nDusW*_t=oiAKk^A|kywif848Ft3CB5ut%Q<)4ROOF>O<|20mFVt#GvJ` zA@YSUC@$4AiS&+UMQ7R>Xns*yK}STH7euWiw$z2=7s{w4^NPKPXY*f0&|PIW6a9VSUbKEt0yi4^T(rb z#`hI4>^60yX2{v$HCAFo>p^x=Vk4xtnFK%D*a~53+xYB-Rp@oD3>?zP*AeH8VSnG} z_Eo{CNlL;k56wk)>pd*#=54qaSdRrJ*KvG(0w~vSl~3A5S!cItAVx~WfIkdCnz5NQ zi4urZ&f}`)jXb=uD@x*5!kH}EP4|>T%9lUTuB0A(yq1IC%kj{?%0}?r6NCC?l#iTf zBF@tqh&stOo@7?X!h_nOBEvvqwBtR>V=YCOqm;ScafFvY9tGX{engqyCHVII5h@%) zK$i2!(`nWn)a&B-?1!Z|{_z8_sIe718h&8nPBX#Ps(YROF=fjFPHS0C~s&r<`qf#@F2%jjO$m7y*=;X6lW`; zHko{eQCFbQ#(=!P$2GFCf&3=z-79xBp+B+NWhU)$_c@8UcMt8D4%Ts{d86Dx69?}3 zmfZR6b~Jb1%Z+H?oAjF!4@e(D!OoX>_qweZ^p^?xZg;@wnt3Q~>4MQ~dx5TT0&}w2 zLYk{e;}Uuhl)l#R)9MIVdoF=4`=|@*)h=`vr5I>15PnYG1G$SG+1izJ;nAbhkLvFep6WxeL1`ce%9KJ$W&JIq9-?2bI#V>-wx3OwuOJ*c06 z@(f2#!LO^vg28Ml=(km4k=I7ZxcQ7Lj7v1Leb0eRwoen;rHDINn+nosEAW{5ANS*T z;N@>^g_Ko8aqIjfi0^j@!+Y(4+P-FD?zm*ow0^s zJxgQ7O)_fyV%_64tGCKy8c)`asHTU(TBr%dh5*a1rkT!>;B>7$6-#@@K^E~%`9p3NPJ9J+rX}S6l1K=9O z(06BR_YZ@+$3QtZiWMB(i&KX_qRif0{*Bmb>Rng0J{uqK^o$yOzS2-!IU@u#qrK7I zn7o<7emw7f9hBDXP`lwTjQZN2YP2nII-ng@DF$+x`$7!=LM-%o^T@;eh%3^9c&^%1 zu->(Z7pj)P!UxTm+o6Pno;N_dNh+xBe97~VwL;xL1Ibr^0P26KhDG(}!mS(TqOI*W za5XGJJ7E`8Cd#4c#{<-Ceagc1^jQoz7X4a>5}!vnVd_^HrO&}~tKnE_8v+qddqLwF z!R{`OhuMpEf@G`~tH1AqoCVpS9m{=+x|PQu#*b8;odCL=M9GPHZDnB#BER1B0RqWkTx8OiN$8K z{2bI2NW_@2D$<31gY@2}&~%;dvNL-_+;N=vX}!*Ntcv}$`5-oeBZZTv_{ z0Lt+?MD;mFJ52-eYKyTDbNVCJ-|2%!7q(!pFJtLU*Flx_Iq=M_<@NgwKoXR}b&&!u zyz&4}M`oi_pN%x5zYPPTq+-CkQr@u7P|&ZvFSqS3frCR@U|8fStXy@DXyoXx@XNlDr?4wmWR4m>D*4>w~ zteH{heq#g}jnBl%{)S@t5^r2;`6mu_A%1;$K1_X5hS@nEVAhW=!ogl0IHUXk>XIVR zX*l_bu8>B0Ab?p>XL>$U##c0-g%`6DaHzDiIDqn#Ba6C;J}%$Ua?DZWGtC9v{664) zQ-n7YYcMqa4p){%Fhy*lv(z902D~=_TiZN7>Us{%cO#+xr6<;=pU0J_vdGuim!1E- z3qsz;@Wj?G!iT?NAz*9~_}Dt2+h3{J>qa!(?M^w2FJjR-@je_Fmj+F5AAow^a=x^` z3JUYd6Og_D)lUC0|EEO|y!BU9mMn*K)hiZKV{CjbA$gP^^3g|r?m_}^#?v9Wj{ zNaW4r{~Zf292-$-69$dbhB4cdyJ1X>p|JF12B_j&v~?Sv@=%A3EXQ<^JnWw{&^y9R za3(f`%5$Y$pO(SPZyu+di?g7&oGU*N9Sy$^GZUqispx#pOc=QM8`vH<5?dM#(Cd#L zVC(mSFFSPuH+{H-OXeQI;49<}o7_GaijvBIPOy<6&eTo@dVn3sw*s*QhI^wXWy4U z{a`M*F37=qLAOvBZwu~MFQDqLD6QPVR8Xy)C)e4}AYRH(khQW7U++Ev3$@KqzvB|f zZ5}~ry9XHh#$oOq+Nqqq!3+0)fCGJw5Hp#0M*Vv6!o$R-yrbj^@7wWAH&by&{eIAr zo+hzGB1S5Ipn1X?JiqE2ri^<8N|_!;Hj#H2n)&=(Gr{+^Et*}EifKdFVMPw@;dkle ziQcn8vaV2`J5vkS(2kvLy%MviF zt{q~Ot~4`G)vAU`iEH&4%YW?z==uV}63<{AWx>LZxHF-4Xi?=h)!?kEsQ%#zJ6Rg4a78+eK6@zHrqPF+vj+-e9fz8KS zD<8;QR|!#q53F3a2VRq3N49vYHj%&QEe9`iZ{k3i1r>tR+ex&?APvs_Xx>yOgU|qh zw36P;^TT;8O!b1C%F*25s|oG~ zj5YU}BSq)X5?)quhB$bW;Gm`hJ%3aZuZ;YXu0BjLdAxk#3JXDg%1n3@T7%%J!(omI zFt$$xtQ&m*WL?+E(?^wZ?asUCYTp183|^yiQYzZjMuM(!J6CUaz?vwkuXp~%Wuv-@ z0fP(qj6TJn-adfqvM(}qt3CA7UBO1^j0NlnCYd*4#f?)a6Z^4Ey^FEF^Jlawr#aID z1r}#Vz~GJDg!C_E+;xd7d8!veaG&uw?2j0%l-~yFn>JAW-C1ayxE0g-84E3aIlkz5 z3;gb$M@N@@kiU8cwpQT~>>iBIGWUTp$eR1^>Wlx9r$BW$6hfzyr%P9f!Im5F-ne(5 zJ8#8oE*XkO8!tj}0R#6aV_~+<0WfH_;+-vso7GNvCCRH$bp~+GusrPS}hQEA9~kG60q>OhM-X zZfuew8GDRP0qc>(S&T1*jQhL6_IWnfuUo8DtAO&oPV+h|6&Uup2#Fu zpA4WJ7|#@SHN5`BK0NuK4MzTLB&v)Pwec}m(ZRn3HL2#Xer_8$+|WSQpd74P^8q&f zQHs`8+nIN8KHj#p6l)i`pnlseY>=4>>1SH_(g#~%n^PyD$l?IzPW=u}#a1xj=iOLm z@g2=>sVRr^IW%?~%65LY5Th!iK=EKNq%B(nsk<5=r{@`MeDUvSZ9nG!_GH>;TO?3E z^b$82@dG4I9nf~2b_uP)= z#MGV7u}AY|l$;cK;LH|i*nJz?XJkOqjZe5K0--eTJf`dS;k}hj;FWWjGCBW(1-t)) zU5wt>-d&la{sBB@rNcD5d`Te$?rr2gPey^~wkME%wjHd; zFJ_LpF_4(Q32nxiiFLd0F-g`~$~F?QSJXc0wSU9fDYvn?jiZsnR!m+v6<#))iObsC zpoe&tW-42mN6mTEWex#Xxps9VT~4IeML0{ z^{vDCdBpBNd4>C?k{4vTGn7oX5@NLTF|_+rJQ5cOIm>oyl}D|))0lH`V2-I^M0c#p zFj&57M-80LyafX_t1$cVU0AyFD#nyB)Y`Uz+%Fuf>n@rL3+d z4kc4PSQG6m6I@EsV4l6UJv|FP4tb6~j=gaDJ@VAGZUv*A-l#COV5+@YJVeu#hb)O> zwdMCQBxoR4x*uj2i91 zl-s*O+Vj_te!T_ML7Taw`wd<{^)NtY5{BCbVvokxAbI#%u6sTe+>h6zWbb9IZgmOI z(e}jC&Yi{1|1%YmlV8E4D|1}#)J~%>OARlE7*b4zUF1)u8D8f-6J3p#7;Y zzV2C!*5Y!m%BqrQd2YnY?(R@4`-JWH5cY;xh{{fRYX(=VDpnP(%;+Aa}>{ThMK zPMV7$!IXcsHh?GYq}kK(Z_xOtC)LNqE48xIF6$qTkJhe1XTP7BB1nTj)E1)si7IsL z8AzGtCy4t{#L65C!O|}oR5Od^y(&#n^L-o-?_U9lQ`0c|S}XKZKSkHY9M#jdqAHm7 zCn`@UUr~jbF`1b0>NunuT9K!Yd}dn%izX=l+ZA6#9>V&?CW0no9)v9K z!wh1=aQ1^Pf^7aqYzjIH>y)IA+tvo^YutT1?*60lgj>34x7D zi0^q5G%I?ue}YYg_E(02bTjR+)*Dkk?t87?E|;qhgaK|@4Fxn;cCQ)+jh9oIrrQCi zUPIh)GaYe7zCodM1Gd-{gSW&~NVsSs=1QCCU00yjCCVq7TEM+Rv!MA+G_-FS3Oe)4 z(9l&X`i(0?&6_axZ0aeH{n)H+a_9q|zG0vweT&R+CfJqSKxzCE{O}vG(f&vI#5B7J zRDFj8OEb}F?tX}Pnh3gQE?oVlng<^zf8FxEa?kf`u<%wsKKpEh`h5NPnB@=9bMHT> zSZc;+xTH`f^*BB-=q{dD)8~G9kgI*kd(pU$Jk?LY=j<1D@aB8e%>Ik%J^cXp4M6t< zPp}l~@0QP3p6#`!t2uw#lJ>{1-U6kFs&ES_-`$7f~kUa-3C0JV3ig z+B#>dOYTc~aNRDLm)k}3rX8wvEivyV*lBIEH{%Clkx#lH5!CIc#v6>=kJh*hv#)g# zMs%mlx>wkc`2k;Blwk1xoIyR&hFSkK zgeN6jL4(h+%=yAGZrk-AmgH6o`cF~Vbo2{Fe^wH+rW2RWD`LenrQmtf9Q4u#Og{Gk zf}+wflQcv6rs=%FA{MoNchS$~9AyRi67T;gbN}NHSa~lSp1JkF4gaTCH0Xf#twvxz z{|D+#)8*?3$^|=n0<(5WQWbsj5yG#9UU0()eBGVI0>3}9Zch^VV?bu zqC;snDDE&7?sdxnTVV#OF}SeUo_A8-vOwzuFSRPCDN!EhynA%c<#~d=t?;^h@{*22CT=p zI5c=|#oE`@V27KUybv4yzaKY=lmB4iUCL`Z!xi7cA!dF#7VkZVGseW=gQrp z2fGUM`bvedHBv#XoX-AyW+)ES|BHI*#XNoNHw@W1kSk()k5?LY2I;;U$|m@M15*}) zN3|ZKVHdb{>n29&jE==n1QOoFF0)8JxrVS z0}QW4vK?zc+1n{{pHHz|VLFQG@n7D7-x)rk33SPn%H zbLzQ_*WvT>KdTxl$-SL=21U3(F;rL9srcRgTNJQ;aQ_4<$z@ozg)@( z4)H>8?S*b-=h6LiIuwuUB-D@E1UL4*MMdNgUNNW&+UJW9Ol-2?0lt{}?J0I_e2%Sc z#-grn2g70sR8H&)Oh|(2qPl&t_VP+Y2sICR+t*k0OEn(N(k^U_gA8KOsxp z5B{&sA!p4Hd39Y2MBVzv0-by@@!Bn}uj&F$8x-K^)4&q9)?%$$8TxC!ppTyv)LUfW z=kXf-etL?wLHW#Y&It^RC1AZr>S6HQBj;pm~I)*rI>Pe9inZZKV|d-X|3* zw{`(nv*9?qmgc({3m;=io6(w%`oEe;`+7=ld~G*WsqbLzCfeY>`pz8^(l9lR?zAv} zUVh~)%=!HTwxmXJUA-44rX_DG?<&rzc!Oo#NE`FpKA5q!2(!9SR?*gJ;7_%1U}-yq zKV1!XgX!F_zrzhapJE-_TM)i38an!a2g%7*EQ|bOMpd7&a^ED7&3Pe@&}3klCJ?3u zJca>feIa^3)hy56p++%-`3${@p{G8;oUd&VJS7Mx#9afKQy*4pABsw!C(JON_5zn8 zv2Ldn>J6@e`s{UXeQg>m%0(a?3~`CpVdA>O*y{p$UN?TmaOxQ(9$Q%W{w8?6;0}&G zT0)s8&w!xHu+iIG)L8ptIMwJC`zk@-H3I#*zk|fD3n1VBB}s7Ra7 z6o+IWi_tQ3mtt`06^M=zbzGWrjb=A}h)L2IoX}lp+>*g>&oB@Y z{;WZLs<+ljRfPQwyNNQRFxFI41+v&;3^6q1E9I9!Hhmy(idYM+tgEOCRdCtk4lE^) z-Hg;aufqtmy8)pxU|~R8O2h zk$jEo?$6-TX#=pA^%QJo9Go`p2G8`_sQPaJ%7(Zy<$H5xO`fhI&l))CZ3vqV?}31) zMxc>~fIf1TywK7aUpPF*oB$K%9aVsh!&?AG((J8yKjxGt$@M*)m^x!1Wmlho;8WW{ z6LA33-+tsOoWxoOf8c@3R{?H&k22Tc%-^vC(kK`D*(l;`#mcxYB;ms(}8#IEZwYh-v2l)$LRGl^+tpMe>eazGNXR zk`iHlzM-f}G1JO3DR1zG8)(132Pe~X*giG_gL7-3cr2Zz-OA;gn@mOKW+_e)h*3Ix z6U;nhEVw5Ai3N93Fi_V4OSdWD{5E1${VgR{?m-NR-Hjg?k)QuwIR-urK+nOX@jAW| zG)s1~81;Bi?r`E2(gfl#`D0X^3Irayh{8zBK zX(*2Ro(^blOMPc8ci8w5t#6Iz?jNXM&hQ2smq)OKAA`axoHF+gG28j8Ky_@eyyhy^(! z&*{nr%IQRK`dom~_b3nbl(`VTwjbua4Z|G2B3^laD$N)-VC|%>xa1mTuf6Ss{k)EV z$s>qSw#k?FGG*W8J<_!pI`JxVcq+$_6Weg; zim&K=X90I}`v?^!8R)fVKYBFYg@OV9un!P|6GF`evt}*YZ>z$>m}|^q?IZA*P>2ej zG*)u)7UUf7#}7u{M4NA?v3N4ozJ2Gy?8CIHPHBXTqopGINI3!t#-i;FOQ@?|2aSCV z!QSHnH01BbmIB(p=9_{wWp+nBiRI;vg*@blAzR?pj6(L0fuxIe z)^UB046OSvm$j6f#z4Li>}KtPWubK6_1(qlvhFeI`DQkID&^50J;fIiTg9t30TmaI zLX)=?Ld^eR@|*YCup!@Eont)Z>d|!xZb`WOM8+FX_@iR zueA!k*N`7Fx)Yc``T~k)=GdGN0#P&6tZmgPC|z+A7t!-~+Wj-s?TG~ChE+T(!~ykv z4zWA8x`;;aY|#4k9XMku6_1QE7S#uC%ALA6V^Me!JUU<@rW+Jd9>fJ`zqANddB-6q z!xEfyf5XYGcKAl|9z3!VNkjaD&41Tf2<-F)|^IH+!>1^$8ENUKkv!%zU z;)^epsC!5=&Ahu9x_ufXy{ZP^UxH7qs-Nq zKJpxovI_)FTRMc2_UOjSj}TpNB`o-|0;JX^czR3}q?O%8gY`eL@(GR5;ZhB?*SCUg z-5u_@{WVOWdb@d9J!UPWx^dqR1Ych;pFGR(Aqb{}=rcv+O{H&I;&xy~L zHB_IKv<@$Th3&-B9Zwg+#K~A%o{BMhjfj=)jY`*^TvaxXKb=5K+>i?t=k*zi|MW)V z&sQ;wGfT~#~Ib_ka$Q9I?rI{|22cOeWd>~Xh5%z7twh>Wjn`)GnHm4q~B;{zIS(mJVq)s z+4O{!t9D{yibSy9Cx_y!6i65K7;o4cLl=8NTXG>N|N6vmLMM1x(N%PKTm}{WXx}eh5H$pp#;#1_u$jruoz%KXNGD?c22%!*cW6d`+M&kesebQ<>cIWli95TB zZXVAenFV2JQY-9ULt4r~nP3^N}^%g3g}hFKCZ*me=ro0{NC zy2DbAJfeGbf8Kf0(i$lSK$O`~2$&to7Sl{N#;da+d9?yl_D4e6K8d)o)>sJcdL0~{ zDj~v{zPI(O#*aN7iiR_#LiYZf&`V}4o{r*p1G)$;?Z!~J>@m0R$cMlW%Yk4xpy?gN zLKB@qpQg~3jwH`z&U|)tVpl=Zr;_dXjRT_eXpa7^IEW)*n)~~{dq{KiLkJv67_|S+;#N__Q|Qc;2C}%svdlYUaxv# zvHt*wTx%w@xTWxJ9i2t#+X9Sp2?xC`vCeGd3=dxi^NGY0$?||KyLf2qybk@|AIFsu zk07k~0Z6O859Qs+YfJs2)45)#x;LI}>XwGB_gc7h%|TI@-F6+=nv9B|lBW%YT@O!6~yL_rZ6T_Qy|X zR3;I_Cf-KhH51UUgBT|RwAfgAjh!En3FbbfXI)hf)o z+5kF@CECu&1ea-EZ1K+sGP7e>D=l zLoZ?b@M6q|J79g$3F~R6U)5|WD%m_(P)*#UL$AO!Z#7Wn7}H%mLOi`v%H-Y)q1E}k zZl}m3PakSON@~EGvdTh_j$&io&tSlXeXL+M=}4Ch0iE3#cDLs#Y*AccVFkP4_hfTn zV$$W7g5LD9+%D)aj$BUqkv0I8T?I3o`~hV#UKo|Cf!PkGE=S&UEIh_Nicb8&?O?TnEtN=z0`4=m;9YCMAT~NPJF3-?N#TS1xfzL6iAUhn! zJ*R%1k7d?g>7W~&g|ei1O!H+Ex|9FWegl0k^-3(4j)qXnavtMJd6J_S)BN^-Ab&0q zKN4R>_UaOA0Uei`&{^Sh2^{b5!1K`!XkDDivljjW&Yp%mXm|w7ncs-7o3)_(xeTRl zaESIL(nY=socoYki^0QNrtJ}*7Nt9Kr#JA36ST|3Zs@px>$8IR@Tk20qU8O_{_ zu*cp{P@gjmsyBS6T($_jOTH3a9p#t>Zs!Rr;;Fv8jWO4Lh5@n1Fm=@zjB7rPIQbRI z3dfIkw%N-K|M>yEe*PVOPf%`EJ827c|G<#nP1&(7QZcwl3ah?;rG5D)^z+HU9PLDK zyZH^I6ZE9>sDL{1FPVRd#PWwa9`);6TvhP}qZIV{{(TOG>Q79sFK3Mly0L28CY*4D z_RIg;fGY3~8&KFqC=-3b;k64IMz-Ut{@n%jv=aHn`KDsK*A$2|&qX{p0K&U&g`oFV zB3yUHEXi|>e*Oy9b+Hmu2Lr)x*GbfeeUKafN!dg#zrsN2G^qRE&oJ*&H*x-;jyN_A@522&YMc!et2aGJ< zkj7Gh=}4JbBk6gwzR3paR)S=y9K0JF;gq#RJh<~d?j0)?>Zwi-Ji7%w)=dN7F2f+o zgi-!WIQ9;Dfw+jyU-Fl^^=-xQE5D*9t|xmVCnofpr#N&n@d5A~>ecVSte=wHHJ$@{ix(;ghWMk22F ze~V6zuc7qMYEV0DLPd3md`W*3ahDx2_nW$c`dAL@PuX-gW=I8xnb{yq83>ByQ$ZK? z5hnD$4~nI|Q98;J!-(O!tfK{9f99BeZ5tNP?E_l(ytf(D zWyV5o(^4Lp+kk-|zjA%@28bDRk=T3((ZOjYR5_HQdb&Ae$Hqe4-ZVa8`)yEVUSf?G zhJ&(ZE4R0^5d5!ng^TmZt41C{*MuhY{?5Vo6V0b0eulJoeJT{FG9Nz2R$ZcfGj)|b-&VY7`2v7rwqT0 zJBLv5ZXNjg1!B^)SWwtB$^%AZa{KsQsPEeo=B%U4^|~zfXMULuin2Rr@ zdx4Qx)^AP;%HN(u%m2<`{rs2UabhBFAHEZf^l6~zR!zAOHzDVrVE~V;vG6aNG2cG` zO~1UwQT;N|Wl$r;Zy8Q{<^wGFiU>z``Jq`NVNP}r;Js{8p>c5{3)kz3-PQ$j`uEX} zJevsag_=}n{$Wi~KEq6G@jK0RA-fk~fl;!iNVKT@X zum|FEM&PP~Z(x*n0dwZqXeC4Mv%)!BP*X7w<{DdxK1C}rJe!z`@kUyoh&xOzT#-A@ z@<3d9jbu*C^pw+Xhe0`?@bBT~f}^h)C^LRx70a_B z=lfa2kArD`lj&SNhd45)!r8Dv_pr6$GgqbjCZF{BC}~&k@}Os@QFd%F-qu@*ntPA1 z{_RFc%Bx1{v>0A=JQghqx`@65*Ms4rOelOZ2i^bNM)`58aK`cjDE%}PSMC3TTL*O! zW9%1#=YSmU*JdI_|9TMm-LHX-*`&h@Yrx3aWH!bNkL@qha$ z*ZI4P%jPGr?qgTcqpt~`zj6WA&#Zv7qn**ypn&q2(!nwE9?Irtv)(3_!jYF>A&Kg{ zx*v{U-CD&Qc`=U~e3UnLC(W3Z3%vTvO00?NDzx8l#;n8znEw78Y>zqtc9A!*X#lX$Ip2Ed_lsY~vBgZ8jGq7h7S`6%#Q= zPa5lW`?R*KcK2O z9gCV@Vq7Ktj=v{CuYm`NJzWA#nbCWHnOgYtAtQyongtM zWIOQmkv8Zx#}*Z_DNONY8>o5}W8m0&7F7Nn&v!0E_h;`>f1?WvbkxJ?N0;%?Ds!P= z|1qk8mSdXlZZx=Skyj@#!BNR&FmRTRIQ`vuhd0x)BQkExs!wlgJ9VO z$_g)hg*k`*$7OFPFGE#W9S!8t8BH)&dz^SBM_Ji#^nP_-fV{YyuyBBtP!LQyr-228;ngT%~w3e^h29-G2lJ+0w&NrDA!^!%Zhsn;REe4Iz1oTqkcncw|~%Jc51HT zmyr;bZXyJ3(WA9)KQ`<OD%Vx4A#bNob>`PVICP_~U%_N(Jw&*xwTY3}N* z7sC+Jrs;K&c>yew8TQ|ej$2DW<5kD_Oo_1jAIj9WKE-5nSAcSHdR@p!%vH$_gZhr>NyiNt)0c-KM^i#tb`PcJy;ZGAsE_R0JTajH;->a zN3%!J-o*i(Yi!VM??*tYO-IKY2)fuC%*Od0aj*Yo+2b@|YgxjI{-FNAwY%W8kQk$* z2T?}IS9aaUL@0kWmhyl5g0p4_2FsJsdaoO^d6R{X+Cp@YI)Th26PzyohxLcA;wpHB zYW+-*A3qH4w#Dc-lyXZvuVV`BXU$jM2A>jBAg%19Zv7n$bZl%V<-;xkqE1r-XM;oIxBf8*2?8zb}QfEfTRYR>!1;Z+Mk9 z9Y5kVwD#1Y?#Kfcm3RSsC>wr`_ALw=WF(Xm``dcu1itcG5uV?s1)tErVJKxK7M%Zy zytq$zdSVIHUq8To8|^TM<$?0f3-X2qU})HE%C(%rb(w|iMa>=<=|eSOH%n|Ea}Ev0 zrfN%8B;%iS{^_<1WN+vUbiA@11Ln@g{9g^khUawV&N_q-?-+}+av2-2{Q#6tC4cns zNwkkXm}l-p+2@W*oK$cWmd$+&Y4c8@QRp11W4a3};>0?(e`ETJU)iQvS&)C1xQ2n} zv^hO2m|H$+*NwDL?9>ObrU@Gltw+@ke|gKeZVdW&qlZ5x1oT}=#0bRuz9Vy zsDE$AJH{jfS~#N9xG^C2*P`dP{cL1iC(%b4i@|G-<0P+rJke3h7f#v@=^lsCXUi*m z6!rvr;2#6aA% z={U3p4}ftCEd*IZEjE2IhvpB}xV_CtoRC|I`Qh<^`4Z6}F@?uW)=}So9GXL3;EZ1^ z#c2OW;8TzY<@bW%^kMSqCUt_=-Cy|9AYW{lQbf5@nJnm!NT^HIf#VW4gkM}Bd#?`4 zmzaR$_!K^MS|!Le>6q@h7RncFK-*yyr1^VNbn)pmnLT*^AQF zNM66P6TojPG47j{X#MCve4Vuqt(4iA{>&eZ8s>nj{D0W&S&5s9YjD+@mta1i|-;}J2AKthiw7Q8Rrj>=hRn{W-iAsMb`bQSNn8j4vSy z9ybXGzWWY7TT-$8s1mID7vPF3QqkjlEA7ok&tiBb0@bw@Df56TQU{#sa1Bn z(Ja_m*l_)>tgbaC0lpzGgR%pzJUz%2ScUwz<{j80%y zNh&&ttI_QCN3e1$fPyc_Sx1j#*uGN-Q{Q(MJJKmv(QUZa)Aw)k;C!LEHu*Rg#%qIu zEQIPcpUZ8As6P+$45 z>Exdt~kojAK zMEP)h>*4R1GpH91O!t8DKa;u4Z5qT^$AGJ{8mvjr-Er8MeuF}7ALmZuk<)Kc`O8OK z?;v7J(N9=t+>WKmMYy$&*h|x{<<Dn(Ye78tiRZ@+5eh}F75$bw$ejh^`Q(KcOK!+mrZ!U zFw)O-9OOE~&P>0eUGAfZRab^Fm zc+j{7TwbjuR$`pibMJ{GIp=6NwRSDmd-kfRhu9gmph3!}6wj+H#*Fg- z)p>eHhOXxQ=DmZY-}10!Vjh@pr`(~>|3G-td&n7ViPC-rT(quvOG;>`{IQ*Mg!MVmJN_kZMJDLRpWwRh^I-AK zZer=L#OO*PzO&C3xqTJoguac1sLq+JG5;`g@!iIQ{>y^4_?M`j`H6+L6Zcu^0SDWf zuxw`x=!YMOGKcukDy*Hq6>7)5L5EF7IOZo) zF=W6#$S%$X$$+ouqd&k$1^kPOCqr1yIvESzI|s|x{!3XZbv$+bD+sb6{q(MzO!}{$ zMGeSD;@Z$v(vj&ZYAHq+OqR!ayu`_#KRf5p!bjHC~W)g2ku~Vd9Yn zXefP)?ms6%z3wzlS|5cN>;+}iZ~1&b4YH#(<6Ucr2fF+ypYY@gYA2qdzw;J)PQT6- zxntlor}|Rtgr(1(L1Sqn4{uxtW+ji&DE&63Eg)}si>EwwKm+8qsnBNLE_jptnlil1 zgdJx)i_r_-;MfNugu5)pvIPqZ>P#u) z^P2;HBEMpXMgz(3iol(8r{O=Upqge|#UuBSzspk0S&~b;tXq_;qhtEr&8(<(AC3yo z!L+QaDDitWe&|Un@#G~t$SvB$)2t`r)EPQFLN)F}e?3&)+zZJDtFftsW}xMHSbwfR zW_hl}d%s+Sxa1T(uqhH0aE)yy)}_?#71uYGX|=`A!K+&zNFQu21n(ILe!r3DptD3c zvOWl9^(*+tRaY@&Wfaqg-q)7DC++<2q)q(e2@kcHjX9+K(0%bF-4FdurzvM@(q8gg z{ee~X8i@Jaf%vc$bGLL53*adje+`F*3;Qvw^&pnKzW_FM8fe{0eQoA1&@1gYrWaAq zbuIuJCs(8Y=32B}n#q0U8VHkCQHI@)k=k_Cg?%PSg$?P(!h(cWc<9qv zD0WkWQ<4Sr?L*$V)@ywG$0RlShaR9 z*zeTPZs!Z6FCK++uHV3dS=O}EG7-KH>?Fe1HxQ zdo}pz`8!Z$O&T9Rb`rX}cCfbdyHT=h6qN4YNxp|XoOZhrhux%1=F&g##ai;9E&K+m zqQ0buv;&*aPQtPc$KYbKxwxdFE11{6LuZd--tKb&)4o`OY9;lkS(f71X9XDK_8sOo zC6eFG1HQPL3F;d&AkK$A`odzEAHI3<33 zm4~(YUx|IWf_9<3F!YrdzZaDOs>f|$+*}4mkMh9XdMJ8ES%{jpwah)-9_q4FSh+lp z=Xew1IpG(&FS8wKHqdlM^*bO@&PIDvSwDhch2{ldfjw>`FE+HvD85*-eHMtgQ54__ryV#;oT!=pw~V- zJVly{-+xHOb~=yE6|~!`U4Wg)XPu)M4N}LkP&h@P8U0I$k1j&l(F$(3xd|ng=0Q-* zc}NV`qsH7yD4Y5LTB7e#P}*Ru`{^v-M7)3ZjWlB&dkv)vo^tg{4bvOlW--5B#O>c7 zfnjwGS}UC(KY17BN}mFwqlTdPY$k*S(`S2bET|(%WASh}{Oc?g{0)2Kge&P--}Ny@ z9X1ySRMInQGLvVIeh9&(m%(pJA&!E_l*Ro&;!u}D`QBzeH1|EI)MMq1LB#YQ(#oPj z-hgLUWbyllgZubRsB>G#vyQ)n^XVE`FuDnP{jZY{HT@}@^D+xOvbz!I_Zo&&MuYBk z3cff(^X(29pgfv%z!&oZCRqw@iu(}v?KOVPEkN^(DrhXPfhS&F1;1aOf@bn1=Ddq? zgp2`CU8j27ZXOPeuoTU%$K&)gBT-7{To31BVjvyHcG7~m1ia^!xB8&3&IqF4X<%88 zs~AwGfY8BHdGp^j*h{$&^soLJ-_)J-ytn>gQ5lu|#=Y<8mtBJmbD!aW-w)%6HQmI7 zP$SXgzvoa_ag^s9>M-a5-P==UqHVqtB)Mq8uU8{fwH-&fwh3*W7lCrp0T^|<9>yBJ zCC>{nH#VtwK~gB!&ECdin~cOKHYS2@+|RtR&olIPsY2_3HLUKX6xwHn!UtlGl&&wt z^wTdO^lB0RurLZ|+wTNh;#FHMp&FoQuG}dw1QWVb{njy-sb06S^OLDZ{nE-#Uh6Ez z)cp#<3YVs;E45Gm`_|e)mR(3hif#HZ|;P3*TY$U(pPA= zUPQT>Q&2JT7)r?(p&yi}b$zPm1(eZXKT#^gPNwJf;2oC!GKBdx>7k--4=jC5wacY^ z@J*$>hK~R_NoTdsI`*T2_7nCaNz2h~04#MW1>5;Mv25mZVtPN7w?CkFXX7pI^!{g9 zXpew@B08HPS8eOT{5IxdV^%xYcP|2YmS83F?7Ywi3H}+c2!}G6;N{_cN(U3R5k3OMI{z<`lCUf|s9xCvAp8FzKgd zam#qcG94~ke+4X^Ed-1`jXrY@u(A-;*#5o&a|{N5KDlFlb>e4EF6Nj|FHvX0^c%ZbDK*(0%i;%Y2fb(ENh z#6@Y{MSI;M?Xt?pAQ?6jWmBfnp2iYPuXhvCePD|KiNRmS+Z4OBeBPAmxBa$4FGSB^{B!@vF zDH)VXBuPn0_}#z1Z69lT=XvkvzOL(gO+P_<;ih8fXrVsTyww%u=?E)DUfY-DMqv(}^9U%aq^7fV;AsNv2xxS)#RAM(3!7 zJR9w8EClEN=fTLP0}XZsg3rc6>~|y+=l*mKhuVsThkiHF@?k!w>Yay2pHshB38KXA zyxO5(J8?2e(`il33vnAu{dCvBahVm&+DE>#0bjt|Cmyr90;wNE5AE;113XB+`geM+ zp*sKPe<$Ny;>8U<+f&GY^ocw`mzWPF^KG}aqR#z9spL+BbinHEU~A+9IYny79ub77 z9*mB`7n%Jk(rQ$1m?W%|Q)cXfk#YmxO40-=(fYje<9q1*T7(zGJ^AMQq`|JZ4FS$U z*kDwOmMNsEkN%fakH3n6?_OY{hQ1IO3FAmNQ!dvP)@{(`ZCz(iA}pARFK@v5XQRMPe;1gj$X|a|4_w9@ z@e3<;1gG#`LatoHR5oj1z(N)E{885Ro;_-o2EyTfdV*-zU1{>VJ1Dj2DXe*yPF|!n zxZukR418UHE^S{>MtQ@4%@^713*D$lO2>;jh19$J3bJj*5MXMEO;H!96C)oU#@xZ& zkOxweflDxPprLSSYY!neZHhX>VmQ2-BjW9dcb~CHhTRve(ffcQ1n#;GrnQMMcg#2H z5U1H(XBvo-HmH+q@1J6Sk_@oer#)0lg@7%ew0{hG?P6$4wilAfbGJmU`>09>>Kr%pOpz7 z!gu&e9iEcHL9PKde}Mbxnc(=(b1vW<>1nGrpus6?%Bntq5p&54Fl0V;|nzW@d&;8D$r)qQ+6ucM2IY^wA%}YjPnSMHbZdnKSl6t zqaI)EKr9D!E6Q`4)T3^`#J0Q%U};^$b>-cHvPoOe#&R+gUUbDBJ7|Y^oizT*^UAI8{ezYwC|dx6!yCzw&72lLnT z;^iL%Y}>UO`~JBPU~L+d+--xM{jw>5``*0-_BjCSc}7JPJ7Duyo&q@Ge=2>Nymy-)7}ELLKO#d+|VJsOLn z&Y)%95(w8CL-;K6m&%WVLbg>ZyYB3&=uQ4afkII(iga$fO0E?#(d|vx<~0$tWAp5GEdKT{)cU7@V{0}uF&qSaFFe5os=J`goJ_vsUVOY^F?e*{$Fk4Ih@p0YY4d+W zotr;NReE;Vop}QMvtu|-({Ehd>qC&CR|eDLwqx#)JgGM5om6o}%DGDmIc?KMC|mpm z$}076Y@gnI?3Y`hIs6l6qxu`0m%YX6dE_VfiGewH05sN*Sm}inEK+{}N!b{6g~AoT z_sxX7CmWdjbS(OP&4gkHAGk#fW95=)~W0gHVfpzUsQ08nVo_7hZpq{AN(_g^H zih5Lj7Gtrb1;?#BigqV!Fj6lJwx2QMwIBLZ2TCF*Uw@m0XXiqh)i*pHWgze#v};N^ zh9#xs4UDyf+BLf|Ezg4Y{^#_1_%4o#2fiZD!xU%tiKw9<2uC2 zPAAcao_nR&E;7RwVd1{@nDuWchE>Mnt6O3rojR)16AxhA>uHcMZZ6JkK8t1EzF2YK z8qWLGgjc>=hD$swc$bRLOdeGYnxZb|@Z%uwnDb(EtG}+ z2hPVGIh{g1P%P|^_KtE;4U@smMqNSvXuq_6#ZBtNF%)hEB%sUPZst4V8XmQvTuJ^? z&cM+?=o?7P4V~jG>#!3BrrJWcCwc8_sq5+9UXUENLhsc_ApiFx=o{GzVPA=X?eTxV z%Y9Cs5zQ&q)zG_Kmp5Nz!H=5pmb#-VL3>=r5?4G&{h_A3rbI?eGX5o0P_t+Uo1;W>3bv?jWSuUQ6`Gr!@O#CQxBNIRM;fO5i2=$jA&wi}$$ zM$yEi$@+Zi5O?CqA7YY>_h1sW3;mn7G0E?jSobu_SM@W-$n)1(ZEZ0&*A0WkBdIfV z_DfbjfzJNx6EHp@h2}YLSY)O^pX!;&g3b8mf;O%zsS+(W{mOhU%!9z2PN<0<&g4a1 z(t3+_Xp0zuam!YKWwstCyWlI892-np`ZTQEz7!pPzl7TVDDP2rf|LJggX(V)v->sm1dR=-^)Im9xKS1&HG@J3qH7MJ;gTCiK!B>e1Z>ijgk;g)~!joBCk%dU` zNhb}(PsFc%LtdO&Z`CKH27JUfBVp06df0dHOTe9dL8saR;wndjuggiy?W@aaR!ryC zY!va~k|WsIcmO>6m_x#zJs1?)g4>A86-X=!@!}ciL9mVZ+(aVA6X8WY`s<&1eJK8GOd;Ym9mKK!3R9@&v8DXTudELqWXk zHhP`Ai8eoKS!dNV?0%mNWx=_aE3#2<3fhZJo0C!ewUM%SQZD731jHL@CLX*R9`EkK z%L-_3w&XnXbn=EG{x!N!d(333YN=O;kyrQ&7pNe1%-3r4v6q3`tPrfL_YqI%K6E?1 zLG8IjRC@f1S&O3}am^R_IP^Uzl(Dek;YQeBcM@M5G~jI(J^)WqBN~o;fT0(faO=KD z*t8%4HD)JRwrD8ml$?f>B2!`0=|oU1ab+2US5Yov8u!NBlrJbvW)l|a3wlZgq8o7_ zKUPA_pu>3MClkIhsZkofvKquE@=-mn z5hO#`u=*!%@I9aShzE~Bcx(X%ZKRH#J5h)=vnYeK1hj@@pu;T_efoDnZs|=X?@!F( z7qM)?ZY_Q%Jt=eQbMhGcpEn-tn&Dv~9h{9eGb1?rivQ5E ztAwR)Ifuz9`_R*vx;>_*fFkII>w^5(q*V_Gc`)^5*QwA)R*tfkeNxThREW1!f%e&T zuG-uHGsOK+T8-p`tYzBz84z$x1J6(C3n8oqM8$t&rduPV?jDWZ4!!yCrLm|U7LCrq zbKzEP0ciW1;G3;qFfpVN69Q@9SQZ76G+T9j%1kJElZqL{Gc4Bci9XK{z}Mf!LZ)*J z7UoMa*rpO9w5CjHG7P^P8t_}^zK45u_i?#=KP<7<5i*XvMCX5d;?_rd$yeIJ&R6LR z`LTx)5-wxZ>3py(-^bi%zT&*Dw&3E@H4wk!AiSb`O<30yd^WTlIiX@htrr>vI4C+AN%|(M2CKWGR_rR8^nCKpC@*Ej|YtxF^`@nz{K&r1^Z~? z8NILMY=#*K9x(~HVu>zqzabL>+)VNGs`sdOrwwEia#>*A4fsSHh>lPv?2P%!w08@+ z6*E3SoFL+}_ESz&?nx)$RR3;Z@x(_pIS|QX>19{G?*pM9Zn9}T7{B;`b_cGOrH))uA z=LBZF{f=>+yD@9o3$Ef0z4y;Ws&(8VSx5u1TQg5$$u;uxeZB#fXBL2VQoF17o9*a5 zYXVl>Nk+$4*Wly`BVp_5?I;g@%zOrHMR}@+w5_$L5Ggx{jZ1fqT^_@4(Dkv~~s zQ_2=iibSh%)JgQuX0EaBFWk%P6O$>N`}2+--=6Xc(hkz)XrF~ppLUUJ^Cv$>+BKBa z+>n+{$w6OjEjaE)mUXTNSievc)B81w$9+eQX%`n7dlAdtJ%==z2G-e|2oGNsLEq-X z)Z@^`$sYbgd^{c3=7WPdhhbXCcCbQ;GL}hF6_}qC3E~PpUfZWY`q1MBUYNQa7hL;_ zI|iBZZjX;(?0|QebEgWdi4mn4Z4cOFBG`;?M{#Zx_I+IevaBez&Prmy?`)9DDHoJH zIG&#Me41llQ{GI>Dv4z>d(|uQ`HzMXtQYV8`W9DWKz+J{W5ID{Gn7oJ9?f*CpI!DQJJPNx1!J?}4QPJRPQqX?AHnLdv0 zQE`**ppW-KrW{_xN|NlcajPHH-nfCz`~*%G{~E$#_Q8}@bocvh5mRNy!gdAi+dO|k zm+K#xy?Gn;5{yIW*aqS!#J1jEN8P=8eA&DAOpzML1#JJ!MK*OIaJN9`c?^1OrF|v3 zGwrgM>Nu-+kUuYxIHpk`+q7G{IM5gEUk`vXkFQ*ca{@k(BYs7nBJh113&rnff6y=i zQ|2ebxJ9wxTT%(-@?+>=o=d-T1(y`F8@#Vl-*i`u+G|KRh9z@R{7MEuuRtflUIUmmZ>&^%-izr>KftNVZllxVE2#D?q7E~E*Nb1Oamb+}DA#>}%{Og1iE)T} z(d}3W&g_EbfVI%u9?ZWXnGIK-Z>By4zFOt zNE1GHYJ^(H`=nYFhth)byWtkq_!$dV@I*LJOIM} zxW~2aH-oJU&VyUX0VX^Y35%`$(c`bfX!*G>L{6yT@=G2=iFq8#?w^+`-3O!P`sHZR zUISTEZm=xVVz#278)AwKg_S-Af@gL)+zg?7{g(^idv_n{2>(zYgfFxCkb-S$y5m;u zA{K-#1gxEl;Tm6DzBUD%_7#9CWFYiAL9Dw|-Qeun7cBp_L|fZ0SQUQ-v^fRp%yo&- zP<;{n?C!#XH`lSTt`y-zGWcEJjp2WOf}D#Nh`UpYCSS%9XSNS#=}?TDF0?>w{8PXM z*(mNg5(E1+L$>82te8F?l)H_2JX#E+r_!DE>T-xI@a6hl5kP0EIm_8a-9>XaT=3xq zwDq&c*pf_WSZK-@jv)@%J#&z>{EX3e2f&bNyTC7H1a1vXLZ!6{q;D|e{V#lGZf_FN zIjILW<(vUU+;Wx~O7F~f?@)bDpI@CT;{B6PgWdUWxMKfX$T%tj zHp)a8Iba{`2R&Z;&kb5-5(l}O7%GYmkbi#%e%Bm8b*zL7m>h=vstQ0Ebe01#dFTdQ zXVK*)kX`)={9A%y#qCz8(OnJJ2Pp$mS%zh9H*x)OeSYqIEkwoEL*alY#Fm@D1?Yug z^z97VQT!~em~syn(S6i(M+0bddO>`S0`h%J&~n8!_R`RlZ@$!n`^(oe5v^QX^F)X*Jqs;v@a-CO7yqXd)VFInzEwc5FS zCv2EYT}cH`;dEdV1ipv|ONZ%b_$CSc!>TZH_*Bpc&mw>4P!M5Kzg!2#4X*|t&8>4Uv=}H?M4fy3J3&4NMRvcwS+D|R9 z$X@QCyv=k7XqyhV_Me1;zkb8kL!{{?@5kj=5}`SXXLjH7F~t8V&TV}JqDpsZM~oS7 zz0MC@Y(HS(@-ilO%ZH_%7JPO843yGmZR4h3Bl8vD6MK|RU#i2)B;&Z^!xEH7Ye1)R z*VPwBiD>hFF&GfDZB%FxY<<2Re1exiku_<2yUh4Ld7B_$_YcsJKe{c&50+dggLMt& zLgPQ=9XuLM|NqIHVck9Ot~x|_pad2YLR!Phml%#SP_|-?v}{c>7gBT$Bg5XXjAF zC{v-~R1B8CJ%Dn`y96X!qT>(h3|s91@28mvs!LN?)|nx=zw;z$Z`NL|+LQq9WhI=J z-iy%_dVtn#BxgHoGI%Hc0x=ij$gj~KBQ%2{;IBc%N$_Km-)7;?&VA?~U(MOQA&>ok z|DXk4A-`w?)L7gD&A_KzvFB)vnuTyT%Sf<+C~o^B6W;mUEY7#N6)WmSL&butkharU zXpA0&ZnWZco}t&%VceM$Lha-=`2$S<5{+YQ(2ZFy&==?>KQ%6zzJ9D65vt z!v7{tK+hl4h2@Dp2PupGXaqN^ss>Xb2#Un1IMPPMXYW(thOe#6`LIZEne>rWjL63D zIcnTyNW6jfGA?k~eh@Y1NXs_f1JCY{V7Jdu@Y#L}oHy)*tdSOifky)_dZ`2431eRR z3!+SKmpbdpIdF7&&2o$TyJ}QdS@hwJXf>dSd?7{N9o$(% zECb_97*LmqUGA-@{5_H~<%lI0iLL#m6KB8riY6N`vnki#W5My$Ovgq~s=c%koqy>M zwTI53=Ei99gRi4JPzh>oPGQkUN5g{@Dw<%f_>c85Tn0kTmpWgD}AU z8uTe_$CBnOP>kCn?GDi86`Ok_K8*)aLk^dFdJcAOYl9(2&wyv737GW1!#QC9i1JHO z^mG(>SX}^7;A*L&F5cO5cqD}H<1rw*9drM<#o$f=8my%}*y$HoXy%UH^N72krY!M? zUc5HDfIjDXP^}DOYm^yu-+WBov!f7jARZF`T1(v5UC_MaJ@raENO!hZ606RODNm_c zx3xa@GkXf$T;jvuuf(nZB{q8bK>oy!kkhUwL}$g~(!~~nmCYFSjLTCZO{abkv{!-Zz*URsXMngo<@ zF3D3?nt{5tX+dpR3k7n`wseO7JbDPqar?M1d*Y)cUWT@=a#S_aKHUEaF-c|kW-9qN|4ATD^?v9xp&n#) z#FeNg9nsMP0$h|Z%J>uJ=hkDm1?3u@b0MJU0QxW5%YAAx5JG>ae&ZC|_#~ zWup@CA)Lin)dlD$jt1@9R;)J{V^FXXDo)D4rBk18ysL$n;?0oz{tTy8pX2I}uL6gt zWYAw{A=FW4eUh$)Fl>ds;JjAGe13k)#LKL(#+mx*S0#ewy=`nv&m_z$)&bvRw{Xap zvw+pEn3ZDy#lO}<$JECli8`lN=p2&Xnid1H+5e%kP~Z~3d;;gjN%(Dpo&dXDu=x@F zt~;iIqvAD0-yH|I*mU9EFJ&(e$UIs$)y;hWM zjd2a0n~7~Fol>(~}eoas$PYIkDi+{^w5iXT#@8MKM1=9Y1B`Zl05bP%|`)y1$M z)3N3Uc_cKctSj^?tof8eOx%6gMejKuNjc}A`wzB%d;`vU0<%2zfW2Ny-N2)N!E(tf zxD~1+IHnq5l0^w) z9%`l)LdWV1PT_A2775QFzF#vAQ7Lh!nD#nf=7QpMjM{SId~W-51Hs#WCFaflmUr)z zn2*Rc77Cj6ggmcToZrx4ShlB->lYG*X)9AOCLx)ta)}3J#r-p~y z&H+p9C0ra`ft#p@9fq#N_+!-P<7g&Wwj8EBeg|b$%~<%fB{<#PNRa<`g^gL2fSwQM zfPvHx1C0Bi!_F?$?0SyLQyDg1xsC2#S?rv|Sjhh50nXpAp=I(uu70RL#J$quGh&DA z9C;VYmS4ukSAU^KeS`Dga+uRD{K9QzchJ3$CuUwp46k|#-(S~a0G*3n0Z$;rLx(cV zi=m$(bv0Z#!bwVIvFx|gab&dtpXyft`BM^Lf9EL}DN2C6Zw;K=fRmgh`^@bcR}II8 zoAKqVb3wCkD^nM@g4$#Upx0%P>;J+0lL}b(%r8*E&4VSqXvc{kz-MnQq~xnm+ctw) z9`~R-o)gaat&2MB+n9P@E1D)%K!{=yW*omRYf>j|9Ns?RSNydOha zGQnreR*B4c_CZ>FroF%|Ix>bOTgD%eW9@^6ZJnaJ!t| zgY3}+mw1nhaQfg6RNSdU46Z}T!@FEq2K8%gKFEUk28=1zV1)sKWpD-8Hv4b%J8Fny ze>UNF_4|ZQE85VC?$6mHtWZsUJ?-5xX~)YfuKB(hZ~o;Nc+WG07lo8x`!|(%{r4zi zu#VM4g^+GzuWox%gntYb3zkFlIG^+st}2oEZYgp||EkA_{FMeK+p00fzMcGjeNq0a zoonF1)1WvJBn@~H%an)Y*rz!J14h1sl86}C>_>Ut^9I6k5%nUyp&7N6a)Z|b(R)`2 z^$Tx-)S5CB4|xo-VqRie<7p*_`9$3{0;+gax@Ku%_ou^n)I#q|9j9O)c{y4bErj z2d21jIxoodDF$w&tiZ94+`0lo{{Q@oTwj_8Ce6Uy@|`F;^jRtks^X?Q>+`nnx-i0R zAJZFu5kuBCVr+didHo)^YW()F`qCD>xAPr%wg=;_(f6V9+D2^Lag11akGQf92_*LT z0a2X>!o6SGAY|Sd)GnFG<@ch`ZqEtMT0!sgX}K8s^IKHBqjUD5Gi5O!q3G%bX=2Pb zl>D;LHQJRyzg`_E(t9O6aJvP4=EZ{l&7~lp_UUTY3N?gC9Ke0^dv@iSiLik(k@nvP zU|>Dvab6t811mIi_Z|zLCwfCdsT;&j(d8>G%mk;iftYPug=~kZppgCr;ZufzN^%CH zL+5~p_e*G+aESEUx!|z-3`QEMDXV)87o4K!ceWA4b^Z$-N20mfZEsOzen%Zx-UgCi zR9yYFS;Rxg=H~Xe1~EI0vD)q@TpYgz4M?w2{#?V_W;>GJ3Ml?J2(|vt@OEbhDk(d! zqf0uMcMCcWf5&yWi74l7i*XriAuCYD#Zr)+kCtw5H0AwzKL)w# zEtmEu?Wh7jyGGO)3Vr`J7Q(}qpiPwqR}_AS_Sw{R+qnW9OZUNP>J5>-T}SyH`h2!D zvG|pTLF1%h-)r_mN_`}>_95-<{A|3UG!nE^?l5JgEy`ksUiE(>VJ0s;(7ELr7q4>} zDn!rFqB0Aj_ zF@LP;xDEcFIr)&GoM+E@SO)nZxjNqU<1w0_uT2B#)++#4Xojg%aNnkigtjr8&`$>t zS3JYW;wzkL&LPmgIK(`znDO0DCty}Roq@VjiRU<1+P1g^Yv0D8wuEPncMYk|xD>r- z*Mj_Wt@@s-04rYGV_EMO&cmw>r}gc{x7zpSC*RfOmv^LNK*=+ZB^WYsTM`zJuYd%F zCsqtS0bN~m_Ovc!L1(VQrfF3Wa%3asp;2?6UCq4HG==khms5sf`Tlzx@_?~%UXzq6Dwr~05+dOdg0_g=hx>j--9 zmxGVx5(x3k0kuy&cusnOAbklH&OK0jCBix3;$bWqCV^Nw2f`M_ak)=+vUL3`m@`7e z|2eWJ?`SW_t*40N=QUE=tvj1EWM6EvJ`VCreh~8bI_A#30@fo`7@0pDCF|52rM}gU z9%o_AF*EX{Q`3n~3Fe*NLOU^U7I5F3SJa-O{nKtPWc>p0G9l`V3U!an{sB4r(xcJd`XngMkaW-2p^V308yx1ju zF}E7D^9|U9lV<#K*Dc{B$*?j7K~8lFR{fdec*dj|Me z;xPID09lwGo=P+3ZPp!OjsH-O%YSQevauQ>57e>7t5>P#J_p-YTcWp~59&~7IB#qw zWN(+@yTxMR?=!sw|C1KfC(*$LP0{Cby>F`Hwr$6%^Q92Jii5OuN5Q|*kgMHW1)gF5 z;P=JZ5anmiOZ&HgDqfCGH9@Eezrb1Lv_b0JS!jCi6CQP>tawrsTeRl{CQW<{n}!jW zQ<1^^rV`u9V*;0(bCxT7z8+PVjro98xt!yW53oX71MXQ*K{nWj_>FQHQ*6eIkmsUo z^?2pJ#Q?g2AkKV(Bmb}UoZF3IT{BRAK}S&B+CbfR^(=p75sJ5ou(0MC&Xo2LBx}8J z&4V3i`Mj2^pGJP^aqF3?y@q8R4ua@^M`7QfZtx%OM39C$EcOwB+$^}zf<8~ zOy})brZ}9&RNZvXh>5`>t5n=%_Z~lJdh%)Y@mMUgfL$j(plpr>_8?DH)D9)4wM3yR zSq9-cJUXXVV&wpR-n=Op!WSNbypB&?qh20xyL<8NXLR}SwcBxfk};p#wu_0jEaGmL zzXbb}Gf+CM8kHSZ;PT@g3{80lR#V=B!f__68TcovPo6=?>bu;%J{Cgp=qWJiy$LV= zbsyx!?Il)qFZ6p!``q5oAxr-OYxImjann_ZezOgxoOuobGkhW4BNkNO(!q1~DB@(^ zfdxBKvH15}#Bq-xFTFMGDZgUb)uE8}*%yPW)u_C)7Lvn$VEKwS5aBwQ>+5?2EC=5} z@#^I$j&VXo&NJ|Hr~TIZfA zWhG3{JO#naHPF8NBHDEr34KGzuO0LrT*T`*1F;8uNHOOFuW=aI{}TkIcHxAvV!`JY zb)H4egnPF>pf+Zx)HHJ+h!zE*f7V0vR2d2G|Fv@}#TRgy(aAXuBAs%u9v}G6UKsUv zHR7LtqUhje)^A86X5S>HJ)>u+eG*0)5?8(cJ)|tAZu9;XT-eT8Xn)`}Y}FC4Fla8w zO#YH8Klxz<{ahg%4x-6&aAtS7j@U6+tv!mPY_Fh$n|Ha%3CS#A^yc@$cvG-q|){jNIUO@~0J!YQbE+K3w>xc!=53 z0Y01eK=A#$_{VfzUX(S8t3I~}%ciCi`}{noq1=(|T)Db2$=`uqe zNNpA+bDKsT!tdTuU^yd#-78c8943Eol?bz~5g*RW#IotT!7=R%I-YyQX)DJ{8}-Ix zY1L6QaGD3QiIjh8Er+yHW4>Rwg7W!OH1`ct7G&M!o_ z7611H$Ds4^OQ_gh1!WEwv3ba2E@ajQRE+`9T0s-;752fh=jP z=Na_8wH9R;l0oVE3rLn7kycs|Z-lz7%gt#o-?17zHs;Vwueq8##+fZQy-&NU*W}yS z%N0-Ghb1xopmQUd#m6#`+`7lbvk0hv`5!b6zJ)&AM7$Yf#;c6az)${$yrrPQvf-wX zb9^`E?sJgJEUtjK`Y?v7D`ACC9aPc#Mv#j6*PD6@OHLRG%G|-&p!^T_htVB8`WFl? zyi9zX$LKxD6XFgGK)O-~^>S0lzxoN1OU;GVOeBQ;agMV;HwZp{x()T4d-Cq~rK~Er5x?Cv78FhfT-+2d zY&;bVG1kp2V}Cb{A4)wyrPr`%N)ndY#B%EUiF8+4eKlalea>A*c@pIj5RZ$Z{*n{0 z*yvx7@7c&Y$`e_6O(xV&xX!c_%cb?BDw+Ek2TWBt(f2i$c^o*0;|~4@T5B~6u^xmG zGy0%sfdq3JuS0u+nGiX~o#o#-hXG3ua5}`Qo*qkVhK2`Bo|}Q1$)r7GZU;R_k|4YV!N0qN+Zj`c#p5zDVE7%b z<+n5FJjW7csq;9u3)BY@Y02GNok`_?@4>feFDgs=Lcr>Bs67#j)@8HdtXd;t@Coq$)2dw2k6MniX4xBz43uVp*yur19(VskdQx;K&rKkcIw>`u{ zU-GFN-e-#SYt$N>8DR7D3JWlP%W~?^Vw`$57XIADIqt{=``UQ`mt5%270|ciG5T8f z67bnHde*b9locFf9aTSp#oJF1ce4p|v!B49`^h8Vy`H*Wk6`Na<=FVIEzI5;ibUYV zl*E(R@zjLhE~QMU#UscnS8_uJoCj%TJx(_<<-@z@K*FslU^2QMm-jFfa%aRsAL>)_ z{h>h3$abvr?IDEvKL?TZ7`9~EEv!kTyW72179K?BOt}Y__4NSDjN64>g=azM{BHGR zI(uWe7zi6&%GzI@N6q5NU}7A>ve#`U?e7T|{2s-1pS8qGr;P5iFTAhN)Kqy$iISOwjt%dkDFlvq9I!0FOSEcbc_qvEold9k@5r_W;X(@rS= zp(pI!ugg3A{VOUiSLb;zScNi=pSjeR^NEwR4bqP43eBI6q08@W+?euR=sn^E`0usg zavL}_>D`Mrc%+~V|8P#h6>{>%V>B~0vs(9LOr7fu8-8nskF{lJa+0zGo#V0aeKRxN z@d`>Ts!`|TF4F5pa=CNTxg<+s!Kb^cSDw=4)9xzi~Rn?R4USUpr#agEL zG|IL6_%E<&f`GAqyoboh3Mex>!O2FDliApIL>RHvTfZ;CBGUi4w51 zqdo7~fv|scPoZRC8+zPYy@`hz#`@)^KwzvRL~fdcp|8uqK=&5vh-|pqVJ5sL@g@fc zLqTEN&a~$qNnP%|!$IUPTYPvkWNT7janN6|W_JwSp4EUM3pycpm@j#w_AqT*y;QXR zEsFPE0L{n{md&zYhEFLjc$El2Lp#x6%X!EyK8xOt3JAA*ijw&rQkN6|u+o~H^z-LY z?p${@!+1Dlzt^DYm21QSea^I5pPBNGJ<1QO)ovGif#rwYoN9Os#7!WbW>812UDY0} zCeNK$k1~w<_66-yR0v*gVP+Z4vz_Ja*8!$N+J`bcu(AQXOHQFEhn_3^^J<^*VQ3Xa z-kV=qKy99hxsz@o?mZ9Q)pwyNqZIrvr+_G?fa{s6E6m$&!Cx2a@_}X#&~hsIMw8cZ zk~ldB8V=-f0vh~fE`;=H!pV<_vF}xYrr1-ESH7k`%;(I-`XiKRZ=h-v-Q!PO~9!nVsc{H>~ne)zJH@N&Y`h3`dBBodxbJf551{m6j`PAc1ka4{NCY0;( z@;3`ul?Uldb(dTlU+Sa5Xnov-yTNCisgOUg6I@b)xpBX>gZNuF%t|Ak@|#Uu*eue@ zB8r(I-N{rTUpQ5N+5y^~gP_;1ajS1CR{SGEs|)`@VBbIS!huMLIu#2+GX=;ryn)&e zwOAdTOf1O{91ge&;r`tiUw;Z~bv{F}egJs?T?g`CuVYo?C6ph33q!^k2t~cF!??zS zpy|{=!QwdPbK@~LCXKSD2D;cW*%KXeJfX0#j*WRlGxZgH%&w&j{&OoPI(rMni%-Dh z+q!~uUmH4(rq1s7?T}YUy_Kh2Q8OTuDTKY8DrzV?zkSTf${t9SJyv4hQraurw%~FO z#)853h0t()2RKco`)BLVAYDN|_8W{_DCx~-eEI_xWcLt6H|wO@s8Y4Ub|n|MVL061 zLw!BO;;>nv1KL58rWfT7v^BE6S zkle$B_pdnxjgyx`zm1Q{2h;-Zz8VSk`>%l??Uu_PtjBvdDLZ}Li}Gea!}=;c-f4iL z5O9AB%(z>IH9;;=V_OXwlLIkuZ5S*b`~ggI-+*XFwbXt)^+D@+!>xjNkWQmsjB{2H z?A=VWjq=Ds1jL#9i2a9G$hZLA&Dyt09ds)3gp{d&4*f!-E*m-p2gR zXs_Y)O=@WP0OeH{T(fT#RF-z3)y`a;9q=7Rb2rj=lQFO7HCRhI!2oqSs$x}4b@w)8 zhy6tz_2JUk8*Ol}&kyRCH3sc&SI)pL6hbGQLJ}8oLjz?PdaKos{_ep~ex!iHQGQrf zM9kJcZ|Qwv%0)#T0a?lx>6^V;7&A2kE%g$Z_1U8su|ETI-#ujV3GHYNWN)DX*qsz&E>F1Q# z6nYEY2AiXe=m6)kaVf~WjUeWxjt~_54Vz6*acjs|tv*cNsd?L=g7#c~rH0rXI|F`; zH{uJs#$jTm2_HF#NBfpp&}FwB*Do>=bYAPQ%oY_c3On z0PSzvx#kg*7%uq)vcnB(O~Ok|)32kxBt1;0PVciC@)!10ag%kfeV$g+A%ybNc`W!yjqhZfKzMPPBX1;_@uqwqqXw{AFusUt#g(S;s3=D*7* z`FNNs=-LfSnm!Oa*b%ym*W;$Ax4?4tb6Byw26O*D276qMd2lF!j%k@-w1PU>CQ$U? zEbZWZdO?Y61jM!x19?C=mz!rGltupnC4Fc5y@}B@Zu7m#?%Kto>ua2D6O8IC(UHoG-V2BrJ+ftaKwN&bTP7BScr@#>J62?(i3w!{>Tg1T zm4cfP+KYc-c@cp0}ryF!7=Q%a0E(FdU6 zErd;;OWo#54AeKonoLLf{#yuI9L7b}>+*3=w_u+cz4#eb-%;Xb#2KVbhpft-oa(wg zbDA>|ly_9@#A{vNllJCrkrtq5uP3~IV#;IJL}(i%11J3>IPbiO_s;eJ19l8^g1R8+ zryQ7hRV38Be}gMt6Z=>hfL#sOVeV{VM4VcP8Gn%1ZS_fn`I!*juMQSJ7=xpFUxkP_ zMx55XC-a!E0>#@3wJ7zP^ktX@Z|zY^oT6qdmQ&a1q(0K_9@O!2Y!j!6$ztxaoFRAa zFebm4PWyw|oXl<+CyQQ;A5BT4n)M7+4>xeq@e$D8UxzO{x&tHY+d-9L$_F5jJOn~Ad8TNH6##A+ByLVoM+>fPP z_6P*4ZF>BFuZTf=X)DIwiNlnCO?Yq9YbgJFhqP+Q8JvA(HKsls1D?-^Vp;fOVz?%$ zyVpHMyFroI*xe4%X@yWInE>|H)6p(I5en;8Vo1pXa9eN!(ZFX=`DdxgU zFGIn0_b4pa*^4^z6R0ig`z=+7>7_%&yD=bsbGW;`Fs6$K> z)e*M&ggIZ6zZ5Jz>X}V?J=|zE*9Vop*+Ppsj zi;eR@TWl(9c>9%0rJR?^mlrH2>^fNd@`1XhtGWBZ27-2BIENDrg@aBy!ls#GK{ZfC zbKC>gT}+zSN-=tTt%b%qFZ8HMg9FEI0wf+q%bq}4O?}AhK7$U*_Yjgb1Pe?LGuc9W z&S4JS4Q9?{2EXw5=3mNT?@r?kmQs!Uh&3+ zYf&&nor6^aOCj(|1a@3J&5db44ej%i3vV#=av25$hEcZd7l?Edu%;^mvOH2TZ?yqmTUw2A z8z{#$Ivwl^cY^Aol&f8RjQsZlv5$W!M7UA*aQ!~?c}ug_!UDRJ{opdPPC~$qB5vLa z;<#6tK+{7dwB??_a9cCsZk4%^FlIjx9|zYY_7bdG%!NK1=7BHmk?v&|qo=`o)akRE zZ6RKAzZ$X7IMxEk*$Fr@o9202Urf5+4O#L_oMirISNG$8fu*My{Y|SV6Lt*3zXss2 z{R<-eT!qlzkt(&q2RvQ9ZeqWV~V%W zrOM?eLDDl3yB{pXPD^9HYH<(#!7@`mI3IN@;-MlYc}oKo#LQta|0I5dVwRZP_M#z>JuHk4LZi`WZ5ehSdA@`67hA%b} z;^oB05sannp&G~vGZj>8QrI}}J+OzkPYF)()WZ=5n(gEdFQyE`9v58ogLDGv2Jm=8 ze%>>lV83l3y#pdKWuzV2*WLo3J!0Ye(sy|JzfaWNREIf*snB8ZfH{5akLAOYiLXIS z(94GCf6D=^{O;gA%G1f)r-OEMD@wRhwP^Nj=(-^m>_*p8*G@NPU;T#dEFDA0!`Cbq zv&F~W;}9c5zCG~*rhNV#vfqwN>ovXzkj)0nd0G;2{AgPC#kC|J`s8kRwgGlkFoQixd zsv}>NaR%Swan0|f&rYhtTKXqx+KaK9a&}3}>cM*FSZuhe!%L^tg2=#`?TgnFEQ{?q z$Hn&eh`KF8__@@lsYiWA8ay$}ScqTVg~5ZLK&21uL{=u?jKS4#dz~I15oy9|FCb*! zkzzjub#wh$fiozBDepa=+jQa>Caw-b$2kX>WWq8`b)Z>dy$BEd{Rs^UMq+@X3EC_6 zfWg1Z!MbignnH--9I!N!vR`oU7?~FuZ3e zEctCGy3}kzr!BA0=IIdfqB}z~*T!uI3Ab< zZ7U=Ho4za}=qBKX5X!b3kG(yuMESRU8vRqBA=soECN~|W?DZ({3eG|2UALfXoTZ>6 zuf+AGCW3V3l5}hRa_&B-8Sh;?gr<}!WcRL}+b{edR?hAu`n?{3=ZUFlt-Z|s2VR2a z0Y7>7=Vn55&?9jCZ~{`Ue#S6^H1dX5a3iI$;2YG$;#@yqQ}h*d8T1?16_;T4hBR)s zU>9$qOm2(N|1kLQ7QD5u12J#rfn@GeF6(d?8j9vqUYeN@TG~Q$z(H((pt)e4+6byt z6IMM~jq5)X3unz1jBx8D=6)b|(?M5Mw4dS&7IzXOuvD_U?3y3kpS z3o3$c)8on0or%l)o(1VB53b+iF}4k-o*^gPCd*fvU8NM*G%V)}LL5 zd3UlvUO$+(IX9uk=L)!;B*y>9JD~6X5%LAjy(*q_ho#-|Eg{e^F1g7(z@X$C!-XH8;`!P7+$r9tFKtSqY=&X5qEX^`yBwz(UR>p(N|W zXqQPhaL?*yNZj=kySw&Ba#1EobJlK44s#7x+H>i+b+Mc*H#eOV{RsUSbEq_ao)B9VnrkTFURr%7NaCZh@js zPq10A6jalRXInKC5+{^n$`|@PQ@>#L=gZ80cLAcdGpH!%p>J;^QTsT6GDl<4eZXnV zzuAWeugb=v4U|!NH=AqaT~N8Gm&Q6%%SxV2pqyf3LB8&=Mz!xFGkt!Thk8_lZkdub z(9Y6jlRHyZ{shy)lhiAHfby}Fsb2aR0%#}Wvr+|h6ANIq;sGf7b%TIzHjp1Zh9%7# zjNLvO317<1#b_fh(yDk<9{UaqePsY%rTbu(-x+Ky-cK3=+Mkw7ge{VGCRu)!Z`*Yl zo=q?n#)a#Psv*}kcPPIu=)M6+=NmBjf-2c>Ym9^xr*WX1|A1+oQYnkWgtXKF;Mn;Y zmQ+2)h5=W>;%|GfQw{-zBAbQgI6(f)M$j(GgUQbCvHu7WvMc=PXWJq8!dQ@Wy@8VY zBygEQGw6RtvEg<5$X`1JPRHpB6>Cb-`7hEI5&uxT?g;aKIt~jQ)F>TsBICwBBQaQL z1po4+sNWWN9h9n%-=*H1$Vd`C%W3)I?VLhrfcH~Cx5q>}v@9u>^1M&@$u;bxh0 zy#k}C=jo8AC$^dI#9pGM&`{(IIdAr$%HUYWmhC#Mo1BXw#E!_0_|D|(7c;a)^)?W?Z6-F%Z-zYKWhAlXnl+KD zi!gi!>21f8e)docdUl~~wwalT=gSB%_#GBeoujyx!sI@-8s#8ct}LBPtoDg4UtCMB`ox-Tno)JEaI|WFEN0s5!Xn$pEUhkGmS*&TE4>!;w5DkqyB&_`P(v|d z#Yvbu$qEyLj)M30y)Zqw45Y2je3K_ad?=kw^PhOfU0)~|_6YNHrZU;Na?+&VLfa)sWoQKoRW&+GQhN)CvSqxo(%6lR%dHNF+JzIF% zfe|w0mE$tkFyaU1-J^`{Anq0_faGx*l>ObAvcj_9-j@X0hvlNSc_g=^Ie<0gwR7|@^JA~Wpsj0rWB=Q~v&4^=d)TgQvcW|e><-97Na*S@R zf$rBXV@W&l%)e%%soqT?ppdr6hyPG2;)dw>D;SumV5H+BEsLc@E0(EaRiw5`m)vAcizc`(@V2u{=~oS&?DH5Qz-c1UpTodpjh(wVy<6t9C}keOoqb**0$U_dkRkx1ynrIJ#Bm)oA{R`h?@JVdN_-LA}73y9D;+;VU?C2~0^tG6Sks zW`TB6BnAk3SwN3$uB>j=1h>87(v{KaUDM2jBpp3J^OrN#pW{HjJ42Q}r(EMVv>uei z&v3ZlkGiza_{&pIRA+o+MKir{;n7R*af5*%fBQBgvi~y7?-j_4N7TSH?S9A|LSDdg zwNT}q&m`kM$W)h-nNfBs{-SIdyS%%+dFLkFw%`<&_<3M<8$y2Gavu8f13LZhA88{faf|gEu%pjfa&j>`ZH-_~QxuSJ#v3EPAE6#`A8sE^3eL-L+_COXxsLRUjM_m-NRT^j=d`DE9nX2L%w2Q&_}$d))N%NEFtl` zx$vOBL?{$;=skT9q`!7EZEA1meg7+YojS4@p&i0Mv~wrlaC|HYLYw9?DE#pND&D>Y z=i3Q@o2Nj{M=dn=y^3k}&v?=6|DkjDLYR@(S^OV)%jOMACN}GPSaiLO<}f=jq{K|{ zJGvhd%@08l>71OD?jWuDHG0be%EG;M6dm33(CZt`@C>G*JjsF=T5QAo)5n?XfE_TV zx&{4$V=yUuJb1ob3FU#ISk?O!Q~dp&ukTO^m50>etE8Owh@RXqrxR*Ao&npqc4*l| zJ0|z*GWo9|oD#Dk;A9cD9m@rs@gpWX`V|~5nu%V?_qg1z2p;zjg_MOiP`M=r6)WBO zV}oN@JTeP4lw3~q1*`v~9ZoEg+ zSFd=Y22t{nG&5xuLi*LOl#TcmFRXouZPDQ{4bFj;e>QlF)Gxd!K!=ufAknKyx3>r+ zRui#~mmEa@>ZfRWyOrhJ+(D;}Pg$V(KUl>L1eHk(w;Lx1%pQSN2hD^d$LcYydYG)c zpMU`?9AVIiqp)qO2uF8Hgw`<FBhx)`QqMQ!xbwi?C1n9KML_U;RYiL!F2@5h;_p zoP+R1zfdyAkySi@2etC?=-l3pa>K8h{3xnb9GCHiT&m4FUz4d;uLM(r<-{e4f?Yp5 z3VyqiG3VR|NT2=|iVWIV(w|4r@PRRE>A=cw9f3&;nlQ3V#GjYJkqUd+`gb4R9aPinD{NHj@Ath`vy3BrsyKkC`wo`7S zJiZ)F2R&fU>8=o1vkAh@f1nJs zuUr@Kn0NR=xl;39q3f^)+67yPPSa^u`FDa0$7g}$`g`WEw}iM7iy*D_6zbciLAU5^ zFx~qVgL~aa*T_b0c&8MPnXN!R)kM%s3Dc4}9b z77||2Jg=9AMa+za`tm1;S(l*A>m_Kf`0*;2^GxMhK{;8;U~NeIb+2}&mLdFX69Q=+k1ws_iIJt)*sliK_V9J z89}*o#0T(ri^^38WZ@YfQKh~K&ci~8k3yg6k1B}05P`Zwx$NpTi74xjPzGUGcIP}4 zjl9hwtivGi_DQfAP>J5pHK@od1-VNd^KO3%p55wk-nQqEzz-0oZ50IM#)B#&kLd;V z=Q6h%NP77XxSiSrC)eo-E&C|rtA`D^Rn%i>>l@1X+>Wo)^n~(_Dy*%PLUm~#1l+vE z?VXY_)$$FbOmP5N$_toms3$%nombinS5)omfR@|B(OGK`-P2z}aN`%0-1yE8zv(E1 z+3Z2@E@`Ok>!a~rc@)|LmxBHiqOIzmlo>9vB9?WGCaQ(rnfD#B^ynd&eCrt4oxabC zr=;Mh9QK-zNy1)%)=|^+%rBrefV!@;dC7K>v+l@aTex=*p_O^yniju{;MS z4t|52+0-|^e86k|(LtMu4;05g0pBT?@oBQD*nPr2boUzw)^FvMWjp~pHgyov-rgr~ z%~77|L7dYA-rzFb6V%ff#?Sc$NvVs;L%bKV>u+LdoQ0@L8p7fS7QlsT9mH(2M&_l9 zV2)Wjz`Lem*$_l8s@eToOVG=@E1KSE;wk^!Lf?mx%vNG7>Zh5Cve}1072X4#Tl+)y zf&hs5Lfq*gwM;vce4bvlp!Kj43~M$+OzXdx_wX04=t^3TWj3sE!e~ejT*MrEy~EPa zokd8mgxVhiiZfkRt7=^aj)cKe?tcav0e=ap(5%BH%`_U?oHf}DeM!bB|VnKJ!plJpQA8+ za3#wrOa{3>X@my)$#Pq2Q8#fP3;9}w)^~pcr;}QcM!%G$sSfbzE7EY*vPV=mo+CI#NAMOG#RD7cSjcu{mG@L#%t^z|AR`aA3R4Bjgs&-jU+oO+3-J|t1kPT7kVo-+*+)#*BpEP^hXp&AO3iE+x+y zzqV5L?kyuxv0^s!bpDEoq>Hni*8=80KEtN{Pq1<|ar?XHpz82<7Jj7xH4DF^ed22zZE-`$vNE!AjnC zyB}OQu?zO-*Mp*IBRZ`~W;td*q4(E4pttm_tSaU{d?JQm)Wc)YwAl)B!meP`o=hgG z-NcFq+=1!4-=WK$XhMZaVYn0mhiSnYZu3NkUD#k* z4S&SbJJB^5-Tqk(yUtjO?ffA2zjYbjjq500(BFl2&s5AV>J)Q1cbAVZd;qp|Cq%yQ zg}Qtn;^BYB+@1_o9wODQQA`s0gU`v<6P(wJ0P_b$-554~aVm8!*u1^h9uU(0ulRJo&pF4_K1$$U5X{l<=wOG|J ziB&9^2h%TpLpQ?*{KAkE;B#gl#!_}<<&kiVfAuOhVmtkIz^58t$01<+h0QW{6Uz0|E_Jz! z(~C1qr8|aA>xP1M!hG2nw`Z8DeF3(UQ=w{MM-0B9WY%rJalKn=nR@3>3~;%~VZ{nq zc;OO8*OHc~&tNn)amJ|&&Bf+^Us?9$<#3{?1+6yep#RP*crZ8%y+4$o`nUN!vHb?d z|BObT=btdM<}R*ZT93+YL0*}q zH*nhGqiAQV2f1hBuwhak97-9GvP61kpAW$l+XdLI{66SMwd0P06ObLHM8~Vt13aOc zRNtAU-fTwi*;WwyYA-RQtuWr_87l5PAQt;q>f3<1W|#=`O0I$Rzk~RunGZ-CQVO|c z3n1J^Ls<~Nx#~^p!nGYgr?-^mL;m`?JipkQo^eB1dZe>ZO0}t}$0weBD~+uiZz1Mf zuS35T|3klt@3D#5Lt7iojTab0i~T*gyVXK;9mdAc5>GoAjq=-%*tEZoW7W0Oyzbq7l+u0X@Pziq`O9EdENQF$zAsZe>;vIT ztcU{`#y%}D68!r*L4b_~;}`yZt$|ozV=l51iJ0rPmK? zF5YaSOe8J%tA~Tra}9HPNuH|^6%>6g=3`2)(R-#;=6tFX_xV$?bxToN=8IM1lKF|U zBWOxlB)R#nI9+@Nrg2ljFxd{(jSf(Dr3`iZ_QAm(&!H9)i7Ri)vbTC-A#sP&mfz-8 zyJV32my7YRFVvP%mO_p#FCG_;0qSt{ zOE`sc@8cTvgyG=Qxrlo^9e{Da8DP^g`VL|!b4)c1!#jRL#c5k8yg3@jo!OEE20wdrJbDEb9_GSj zd*VR57o*o#Unm^86NB%JA{Mob`CnWEiUB{N*pGHC@At@*o z;~DWKUE@)a`3Miz6FW4xiDzcgTuLX?=soYDNmzFm&|QhaM;5U*$8)g$_!pc9rZjT%-Q%YCosILD`g~{Vkxo; zG+k*R=%(+W-_a+da9ROqBYR~SIy8W8_!c&1In6ZGNt7cRtMMO^0j584c<{h>tnH_d zUZL|*-mghkx=D-q#7xhQ&*PzXzF26t4eC}Jidjc?a_8=kP~N+fCZ=B`Of9ky-E61A z(qLn;a$-65dSxh#x2i+?J>SrESq%&MBMiKVetHBk4-T!d;Z7;ZmJSp~tGkQ4f% z%l2?g`MQO;WQTcLrY*=lvbkO#eW>lOPnkOLkhmoZpOn7BU+Km|{y0C-96bzSn_q%@ z{An)#)`PNBgIUAcc-pDG)MWY{LA&%QlqcwC{6Q=mZP`i=_Q#;1+XavrJOtg1*}Ng< zBxJT8!jd30Hq;x5k;55=eLDu&Oui4(duV<8DDzlI??`Dkraa=NaTzd+>rBe|DB``W z?)no#swgibZ64`v%9-i7?(m%E^s3gsC~GSVjCc+VGNEUmW_>qWB;xigD`C12E{;DtWl21+)w1(K zM`1N(p~N+;0aax!J!dhzw(l0Odie?N_Mi+2gZUWhVQ@&JPsFXD0|`hKn!jBOl-Sn&~Bdq%^$yG z_gQW@S(}Qrd$O_N=qp@Tei5Z_h*zoC3%4DQ!Th~b*a_VOuuC+?#u^NONm>{6(5?B|XG#--{rBNEVk`m_hiu zrw~)KnEb63vid=7;I-!!cY3u2rDrL}r!^8Bjt8S;(n5`Mr7335NI~s52Zp{!@aIP2 zk4@Z5-2Lsi=Tj?K@BBe*rzV=!E$2Sp&!9*0QH<)p8|^-SWx9qK^!-FL(HBpdp zPwb9CS^F`wR~PZSuZ0kh7sFp1Bc|!Qd6?8Mna;aJaC)YoTDgwJ_*AkJVYO7NgvfG| za>@J5Ty@)HF?(493-}g{C2d~NR7fmeB$##roXXD${mONH1uW?~u(m0{Wsb z+`LX-2v4#Sr2i-?rIy!rc9%9mnMCSDK8|*lLoP1V6F!NStVT+cui|=X9 zQL!K8%kMKk<2v|x&q!2F{}0t>W=wfCk1hId9w-alnClFG*0_Ln^g~~h=jI%wExXSG z>3u2fTE-+suesdEg)786-1J&k)E@c3^PT77ku9IG!fgtextWPg_X==u=Z~m(-<>(m zy9xKEhvMlGcVYDbEzJICC`wgc+_wwmVlGVK)n3IIxzi8amz;)22h0SmQ30WP4xkV9 zee(|*3eq8a(lvI5qVxM>=yh!gI1dkon3l^t`r{l3UFMCa&$mG;=|)c+I)(L@euAtc zF`koe@}3_%h-u~rWzyRrtO>0#|Iu5XN8EuTxf<(dQ$Bf4Bpe)*3}uggq9lF|D{Uk{ zQ;|8#U#!Ey)t!YGd;Y`7+trY6^Oj|jUtq*PsjxobI+S!xfpFbTRDCn#{l#eLKl~63 zKYk2sw;cwZO(GPvMS^5cuEt@Nv7j<=OE2=-hBnMl(0SYjheR{5&b7xT<0vlgVAY3{Ky!_RNmOg{F5Xc~V8mC?&s z)yF0nb>Iz}?z;@Jn;h_(VIr(v_W)cTje{|s7J}qKwM_2!g!?YJ3rQ3(W}o>VBwD<>wh)N65AJM}sirJv%`;CL8Z-b%SYtC)wn zi1IqkAhIq2U0U6ls>frQ!mx}O5#~bMjir$C;}tkhkwUQW3zid0?MfnL478I5_~IGv zU)Vsk*#;OillVoB5>b(Q6QfxsI=TH|GS?$0{XrVs?-?vJ^d8LXV~-FJaDpO3`k5`d6Cu_ zLcM>X>Dp*kv~Mc7^e^Poo>+-`ff1t<&whcnlt9EUnW)_@!;sa)Ln*z@luM7HTlP$J zS@0LLO}$J$q6}0d7_%mS4yBj#NY~dy?B95PdTBJsXH+rM5`Wa|EtO4wb^{Msnu~1@ z{PEhHPhkJP9CW?o@RYifa4<~7plz3!{0|!Vq)tR1(j7|^7qJm9e_~ygzBqVTJt|1+ ztJ_n^LSEhgzpOhDAUzISX1(WQe_4r1jb%L0_;<*y&qLK8POxZf4a|Ffinu2OXl~mD zex{p=ioJ6&d!`OtO;xG(gBqk^m9|(LHY&7A8CHnmepI1Cp^GC~qB%aHtewqOP(Ky3@n| zj7EIA5?pVVfyd6HP~OxTrM(Kl;pG~T$M1l&=cI3;OkLfDzu3kwQ_+VwaGq0#gDyXk zmrT;9eyRnHtIP#o%fB(yy0b7X&{&KaIS(3+DDc|gYal1R`HipoqSMtwEPT^ZTub>b zF=hdv-6E4ozYGMeUk6e0cBMwWU@|ZJzuKMlHecr(2o4dRV39Nj)jsx6_3#kC@GzWM ze;mX9I}W>i#NkV`BhIrfhrUjD8l%y`si92Ip6 zLOX<^=4={T$G->Lf1g8K%QSGBSi!rwRAK@fkIj@X9Zq}1EgN(EuZqUUq}@ zNma5)V~H3uU?|in4Mh2zo1{(M3TX?jxkhiy!JyA^*!R~E>_3+JcyH2Y|9S&@YdW*+ z2@Npj{%J^h6UnzVjE4 zQBb!H1NDY|TpDtW_e?PpO8Zbh=ChEwT$}^}V{$-!bvRELeGr!)egqe8Ucz-t4uhwz z2fPh17FYcEiTkb-H%Q4elB}tGheV%vmIaWSWRl*Kk%h)IUOki%Yc4%qAa-}!4q5J+ zGuZxHG)VeofGXh$KmD2J_9tq%?e-dUziopR?iz>}OvEJJ5RCZz5TY{=W6`AVe7U}r z5K)#4iFacmZGJHupZp7h*E->q-;Km4DVgYgmbAx4v!F#<3u&r8xc=ZPsQs{)yosrx zJRB{n+PN9jB}bX`LT6b_wJ*$?l8ftF529r9HSWLs1!OtILUQM$&~0@ZC|sw~%*qs6 zdW#qn=ZiKE=R?lAYmn~HS!@~F0!=S-!38fcw<|AE{&kQnc*QuV(#pVY3gzt%?TosI z+sOB|ko0QBpltnxRy1obO0C9T7tKX0(#|jS?kI$6dt&?v10nwn<0DP<#ggRRSe132 z7q%D+V={liu}KS18Z{7HukK*6AIwovvYC6nm4fAlW;SGFwaN z|0Z(Hh~3~~TLjYTndF~Uv+l>{g0^ldm-imYD?7Xg>(yH@-?9f!Z(7DjM880F`(A8u z|AK=I4nyX&N{otC;oWl5t>ow8v0ZP81)NGAPD?iC%^{GaKF4w2i5*(CE8W)aUL>kD`4UkMez&4=EBpP*i>hJ^b+P#3uZBeT4yW+V;F(maUC(lHqH z0P~N!^I0&Qz;U+$$;LT>6tdJp}M zanD;ox~__+U-ZZ3fBymXmZ{+W5B)yUg+)GMpd0rmmZ&6@MU)8n?`W3VqZVVEhG1OQ zYY2b26n9Q1-O*wjnNk~%MKzgRuJ}_XcfY~iCqIX%s}|yxr$1PDMR(kB;}UuAQ#AJ~ z?qktp1EFy?Wuy0r;=4AOiLt|o?G#mxr|de3bC#~feBDM2{`(h~%(^Qx^!p!HrE8%5 zjt;x2<6z-pQ!(vWCsg&fqnsw13+_{aT~MWmnnVT`snI|+I_Y%~e;uYu;7ndq?kGBy~PLEwA_ep}Na zdHWeuDP&yxc95p&;7;QD4TjqDN=SRVhaZu)!m)xCka|dj!0V?_>$!Tg^Iir{moMRT zfwIph{3TPK6*Vbu%|)-ZYLl;TE6Ff;(Yq6qFclQbpuHJTVZ zVn|PQ0QII+upOR`l7-(`%;AHK?Cxxw*C06kXE7$MqGtdnaHgWn@6%{+vqi?0-XzY zV2WlZ=%?%fr_W{>H6{{*qXm|8_@Lbp5(7A6C`b}#VaL1p=u{oB!B~K3_F75^i*+$?$B%Ef@`yr`w zG+I$@>NMN}{QjziMtM4>ct#VGayv8pVgW8I$Um7hf$O$)W=;_^DZjAAWlxK#=;i1Q z!KVP{+3kiVv$+^x-v?t3{RVzki;0myJT(0RNKsje#~#z4U2_&#vxyMtcp0L9(k?tu zLbKyKF1cHbftO!k_TQy!+s7JgPkRh0XU$O+5{%05&A6l~i?mi=qmACYgiVt>i7D~l z$v^!9<0hqmyyKrZ$R-X~+f-xBC&4>NTs<-EUx9U-9!@pA2wIy zE_0#aZWM;b&bpb1`ugafI*1V5a&p=`%lI^a23o*CR|XrUSmP zlZYClt6+a#hp?IaHD*U}{J_JoO}GMSogeVRW;NxTWq_-VrQmdUG}c}^1~uPx#Fbx! z4hf%8S}J1|Y#Mg|Y=cWa>Whm`{-oO77Hw~oLu45TT0BFr?M9w7?j1^c-{yrKi6fP7 zEJ*+NMq^iK1M`~1ZrkNAyFla|viTp$Xhqtw#l| z4U~OcH1MD|W~MwP&(T>HaU={Zw$sD2)J$}@90Kb0F?{tOui!+B8qx-m?%0mb&y5L~ z9q5VmlU|{_KK0pcQm%L|!G#q)L3?K&b}^(3f=4T8ru3a9x)np*%eml^Jf0~EU3uEI zLmFjNyCzusj;Y$JGQ9ng!1GVq>kJ>SS(axd%+NO#^ncW2%dcA?-?16S1nQu8&qZ)6 z`IqXwr5gPu@en+XG}qMyxcuaGRM-r`dkKc3uBMdj_?1s=&Ib^pJx%(Y0-QW90{8be z7t(Px_q_Bwl;R~kwjmI;jvKkhs@sH1=!IT^M_J11#l&fKLB*$aSQ)jOm^(kQ>c~r8 zc%e5o_o?Hd9pY#<(Zj{(?Ke#9qX9WF@^0&O5=(x}Bro$O^5dy+@+vbyeT_VCug{_3 zya!PEDFw9sX0n9vj$(TZF(f2csYdpg>uG-J-C~yO9`upbu82M?%q_0F(^;SL3v50oF<=e`L@YkZuiQ@_Wgs%KRYHZRy05ytC1z zT2D}}J1J8RF2LZSHN5UfAvzekf%TxRIQ}JqPP&{M?mdCyH<*apz{4!*KZIaME%P6u zLf_f5@ZPdm;?>8&9G@~&)dWK1_|9UmW<12tNyU6Q`FJ~g;}%a6L0_^9>xsSZaK#Bj ze|CURbj}J$*RjsvB+4)B)Z{NZ3Ng9IFcJY)HiMhq6!ePsL)(cj5hLGV zQExrbZF3C{UwVl2Unf9MTiJIX}ledtYIp#Vu&Nbr=-A$Fbzi zR)XoO#Q=}{5>q`G4W3(xjm_^Ma>!-yU2&YbKiGyL7%Dvk>~%>_zSS z1(g4lGg>|`m8?$zH~qOE5*E(`o3}r}OVWdC?Gar1ibOt3Mq;D&QApT#3-ksKl-Z=MKpbKO)h%(j zX!8ol-EbYlXZv8n?CWT_dLr6KE{3Le9fhPT2f5a{0lbLQ_Vga{349uvM*;1coc2K4 zA6NJbQzfQ(oyUaN_Rw&N&d0CYGRcNTvLhME7+guYIOjMP={@2rM(c~Yjl?29Tf-Gk zS8#3Cc`)ow8dJ+4mYcAgctHi&5Lifgm^8~Cq>nO>aP%y!gut<1K>0ij8;5)Z;+9k7jI>Bw28H}p7+zy1b&pXpHk*VGKxs<&XQ zXCO9qe@$EgZ|<~f7Z*E81j8ghP)@kd^JmdH&ebxz^;x`%G%5Eq$I$b152*UO5$Yb? z$H~8|plU}tH=b@Nrr18mW>q=28!!Z7EK@xc~!tpnM~7T-J`{-5(oc z#E1jLUcCXi;(n|U1Xx5opUAy^NKZJ5{+(g4qs>%Q91nq@E9T67l{;p)eCHKd0Cs!s zlcqx-yT>pnnskQlWi!FGzM4&s$-pwZZxC4L2V>^ufaAn;OiSCZQSUrQ`osOB{p#W% zIJJbnn`N4kp#7j^>8}3fO*b+>X&jLO%&6kd0({dn#g4z^RPaO^lXnTdDQ1l;@U}uqU3i0UTiKy zmky>piN;IOTRRE?-7eDJIUf?f_6O&-G%U{>goWQqVNd2O%$j+E-3sj>n9unM6|={I zL_U*#77p=0Xdl{r5uZ0I_jxi_4 zChQ;e0JMjqG|s&S0GRKGZ#qjM`{a5kYRqRwY0n|O-w&D0F9y14_kd$W3SJp!Ce*Lc zLd>f9s0i51+6Yn*SzQC(Wi!#%k2EVaE}HxSH&Oc4h3j<<*Ay<^1Vz3xLAl0IW?MwM z3St|F#E9s5#Zbtce;j3(!ceuf7kV6j1~IQcv-IVpAJWK)?O`bf9q5H67E-j2{`&vD zLk-`2z~>Non!aw(m<|j9t=nX1*Y1Q#)OQw%#7+Az3uguC32C|HLGv3d3;fL<#urP3 zisn_|6uTeNig#vga^3}*dmfM%W`)t6`uOYbfZ>E}fNrr*T!F_!+TKtg?4joLy-Qx^|S+=zhx_vta3?2+y z-ZWsy_F$UfT!G4=&6f)63}RI&Xe#(lW4Jyc07! zSqOuUC!*)5_wZ@}c_{|WO*en3FO1t^C^)y%e9QbBvH$vE=#K+n<^2|pSgNQl+Twy! zI%3hwgUn&qVwC4E#-gpQ7&)L6D$P`spFEqzh0KHKTYtm)Qw^B%?i5zpoC6QK+pU*| z^UO^u)Q)Q6+Js>|eM}Y9zS$05E81vBw27(Yvr*Y6NS41~8>Cf_fRYo1P@R>HsxzHg zZNe02WRD=LMVuf@Q)19bf(O|{ZRg+ zD{j$zWw|NS(RyJsuQq!He!*AZn*K{v75pb_s9g@BRW$plnZcy}UqQgd6VR;@!J>H$ zv~S!4Gojnu8c=?CpjkdJ1d9wfvpb*1BZIBP z!DbyrZGQzTyweS}_X}lPX6!(RAA2C5vM}9mi17KAiJ)MKDD z_dP^h&=bsem1B%;5s!CIf$|Llh!tjrxU(DTmK%#H7kYv9_U*(43j(Li4Is6A0d{|U z<*rp5(C9Ahr!C5%s`>)htTz<$o!?-{sVrCTKtCA)3>kSIY7Er7`%$uqm@YrbTwTeietDOq| z6Esld;!U~@;z|6Gho;_nl&hx0$(Q!SwYX;>ue!>7ru~AC#N>;%JdcNsx(K%0c9SRL zU&tJJnzA*?_iJZP8EfsZZY4b{b(6vHUyj~~OoXPrzoDP2Dd_dF#Dk4LLA6;614o;S z1@t_Ie<(q3t4G+F8G(&jees@WHY}9VnX-DIQAcPoXX10PcsT-74#q>vyN@ty=`P$i z%S80rv;%ic?LJBqbiJxKd@54P+w5zBVo$MSIULZ58nj`_3;oLK^fV@<&S!*M8@)gP1#E3r3a zi{@|MO`rdS49}eFV4D#E@wd<8tiR5|sEAHt_YWLqk@x25~wN==xGZZ3sHp0Q|hnT9`jVie>9*HEq zPGT5ZPc#%`dY$Di@yG(x24g{Ai5Rp_O}yIQ(e?W(p1t-mx9;#4SnVgRP{);6n;i)w zM%_o>CMQfT+6T%71seO6N$8Timo&?1*p_n(HDf7ryH6`zzo{pVSe%UR;(su&;vs5f zPAu*4PMKoqT-Lh6NXW1I9p&%eX)@>0oU&UiX}I!8o7@f||FvS4lQ}9$+tSVT5!m+G z1wIE;u`Z$xt5QvbEqmUv9seFd?bhj_@L9+oZ{LHx^|SG;gOxDn*H%y&|Do|6b_{yy zI*AEjAtW6O!`kFu5EOZiNvk_TR9Gymzxf4q8cSZEor2c7+2D0x6;_j<-)&1Hu{zEY z=j8+jj-s7lhsWT1-kkhPmE0zBGg!_Ig0ypTEOa-(O&fQ?@Eca5>HQ$CSd+@ygKvY> zk=RxBJ5k+_w3&YnhC=~HqK#MV z`(Z!$vfAyLXYTvD{{P>Pc*vVs`kOLLt__2q^Q^^+xG2aM$^X361GN(+g67r}F8z2P zl{59AS-OWA-=ptWor=0?X}q%UU0DC7RSocKt9!DVP++nDgZ6My7-$pEWV=C5; zx`mOg6pPv)*pa~Q!B8pNsA#{&*ALl z>rt6TpR100S&D56B)xCN><|M`Pnd|>kttZ*#X=}Ah(q5lRyc9EOz^ru&wAiD$QZ14 z(OgeqVQ!ANj`Gu+&39n*OatLgqnRirE`aAO8&pi31~cAEgkpyS=)Rcx1v4IV8>({YGuGih)OFc~vm(rez)@!*4Dy0sOZ z;Tj~q2uA-$e(2DB0^~6hA*`gC8PII7wlV}Z_WcZjnk-1!yq`%N9$?fl;svzLg{+I0 z$+7W*uj@fO+3Q0<`mitDI{zG_y9~gHQ!l{rlE^YnFG1sNUg+pWOuPP%!P9ypxaECe z^OswR`pxI~?3>SV*o6mB;CP)&JZ|eIU#AZI*zxEWABNJ|nY_66C$oO}9A0UpqW7~Z z)V|uuCO&RO#p^>L3tCGVPdDy%uNm~_wA&;`RnEyAroTDRbw<2IPzSrP(jM-ZD*GK$ z+q;9|Zt|^}TmWIam5?5ckfKY6>e8iPbF&E-jCUfY>|ZeBg{e4Hy9ebxH*@1iGxSvU z!+M{0EPb6L+P8T{dx>2vr7t<`Q&zI3GhsYwDBXklepY&W%S8E{>8@4eBk?po1y3t! z9ypM8%c??@{`kf6S5d$9zLBu?2a6%E$S$)@}&&=-!a_%sXK1mA`zbMmI}t5W}!qphZ5@_kbe3yM%=v! zhA-=2$GA3PN~s_EF3v&uq@6nB@=Ww>_l0kGOZO6o6*$qq0Xp>_iUzVoU6%Vd%J2LI z(vx3Mzpp({_Y2{Tw=ZDzo<30iwha3xnhV8;R$}1rceI!3gR(uFL3?m1BsSfqGr;X*Wv=J1$r?Be(wxgu#U*;8}1I2)uthmeHSU+?- z`F>I$ed%LZ?-zpg>$kAr6X$q+;3&4$jGREZ#7b$AVCWc?SBd_G5ncPeJJ_ z%IRl42uFm7IoBd%W+V1=aB9t{!W~KxOnN)XMvy zW6yJx$#@JACG+V2-8t$FIPu~$$C;+LJ=#p!1U|nOk(aC^CV97zGq^kWU9&^*2nQb@ zYc98K59MDUp@qXE2;4;(r8S*dO0R6De;lvN&np4j%&S};)`kbY+RJ5QH{f3z+6o;H zR$*oA0ehWS_Fn|BWpcin~DQRErRzd`B|3&c)H&7aB(q7p&8J4C{QBm#!;-K6F1!xORxA z9@+%E{8AB?@50J`cR>295SRN)g_74tvEpVYIJujg$>vkBm-Bk~vf5Z& zQQ%vU$K$TN#n3%xF*$k{rexN0r|oZ1VtkYPMiRRz^B@FnjDcRqr-9=1K-fPZ4fNV< z{Pd;~RliQ^Jo|Zo5BTyyZ7jvG^%n3Su~x8gIV5G6Vy}&r@T!fWsEoc2&E)D<7G5SU zVk-vBzQ(3{@1+jxbly!R6#^8o+^u5~)GzA51AkM%l_PD0yuDIXB$&h2jr4u&+!?1{ zil7}uC}^w~aQ(0=JZ?34zAIh94-CYT1u@idHPijJ-Ar)mLR`?{6|7*sjLC&OT|nh_ z40FH2W{;JMD|(Bd>e>#fD2teM{Vq7#6HCmYgYNG^4tUc1CE!H~?UHKf89dA~Hdk`Z zlEX^hKyr=G^`Ne-iq#wrL0e+Zcs}Y&yOy!K8p=8v9PGi;2c3cJ4naJ4cplp7s#vl7 z8Nfegm{53`Z7i(ezFQvh(%n|VtYMZy>zjKR9V`=xt>W6xt-a8U-!cE+bWr|P0YyWD zz~A!^2>REK2NL5dd`>D9>`kJ*TrSstw*WOCjlt1#+27;#Kt*>`!A0>4I*uyD-V4dG zo|6g*K5^V`^e;&N{Tu{8m=Cp6sINMLghJx4Ti~^9CLEmGR#Z{H%|}0nyYIHcTKxwc zRFsL1ri4iq#)VIwLhmGr4ldgp3Rl;*5i>5l#5M2lpu&Yi z>cda?;kUNJAkd)qkN@yf-)k7|)Qou%by)0eh|U-E@O`^X_&z!a12=4hfeCar>;DmC z$H$z}DVa=ADJ5zrI3`qBi0vOA}F{9s%+_U0j_L<1o0E`drtSz^Ff^LeBCJeB}1s7--al z9t%xCTN%uJC;gy($q!}6tw>zxe|7ZL#ELH-EGoxz*LQNE_L z|NR)o&)xzl`>5-Aq68eDrGxyBD3?xs2GV&-hmmc_XZV`2oD=DuSaRIz`&qAlY^qblG7i&o#hBn&aEMa4BZt%xmaG&D}okHf|iboPL zi~5y;Z!>Z0r*{xrb{nE%v!SXd-Id=q@@yM(JmzL3prJqhYD>j`l$QXBlfPXkUd z9#Ak>!Ut8;?znA^E|BJSO0X2;555G0KIFT~KaHc?9^;8CcfhCraAK5tU_%#<*J6&|s zJq*S4>Woa|Z3Dog_+RuHUIkM;&Bf9IMrhkbDi|EFW%`CU-0$I5^eC@{dc(uigQ4D~ zCJHScT!GQA53*{9A3Wv4ENt0YfsKEML2`T^IT)MKXH^1R+Nq-sA#wxr-t2j2Vyu65 z!!eXa&!+#Lv1Y_ho^YRK-;Rflnb%R75Cf8pUapdd!*!PjwG&L=eDuOx z9_sXfGA#d5x9X=ZJ=8E$TS7UCvkyQ#?So*aePfpnmNp>gEmLD(^s7oApEKyWf%gxPu_|^&hBnuogeL zrhsqQTb3vZqs*kJD>kE^{i0YHZ5+%L%WT-}8|LEbZ{YxctV6HzofwDvvCgUmO=lJn z7vUY$o+4-3^K{5N6^N?(Fjx5;D_%4$1U+kstCpg~_?_n|!xW^H=PlIL*9LQpc?R;N zBtCk#0S34nq<@lm<;iSFop21@YAK7c_mZySbw5;(_@Glebb&$s8E7t}eA9mFfW}&g z^UU@^^1PpfDbRxNu}|!LoA$!SVONIPxiVa$ zJ_RKjnwu;*hT{iYiM{rIhfY0vp~uqC@Nu!R=)bxG3p<2@+qY_#W0}R*Cz5+sdjUJH zivamT1DrX9JUhc*v7mkXvH9g^?lV%#V~exFWyn^(KB^2n{|VxOt9lYQ=K~afIlztW zwx9~X@@Q*k`2H&i)B_u#{u#|d*3L!6KN1$+`X2=Tvk2-rz4zAsLWSiaX7hC|i{s5u zu*r}&1`(U?;~}Mb0^KFQuYtl>N3gg}31}vq&^1g+#F2l|S^ms>ViXdCv%*ZUXt@X4 zUpJZ4h+XLXC++a+=3eLL#!l-P2| zE5;b|-&^dPL~_jA39!UB67wcgMvK_3#Y1ADKLi1wQKcaOub;kk43v>MpaCYv$!cKtKqj^^Zoy@C&R?o(0Lqr4VLFT%6BV z;-?8$0H^;A@*Q{4t$Q=K^*hYuZoVvi&$-Nq9;Of~>kn5iv=v*jPJ?g1B7i%;u-a=1 zEIFeA?ayaS)~N}O&T15|@rGjWrVsJTId`xHfZt-8+10PT8E#?=a2x z35?lfDQw@+M$F3E12tZj!n{*=u*qG6yPh2Y#Wv*bzf-Q&WK^cy^fz6{5tL`#`5B`0 z)iB8O7AT{gXzgwVzv{EeQ!h4u|eRJzW`j!motMtNxb9ge5hHr zm$I#?m{f2W;wF4YudA0Jaj}_b_dOIfV&6tdhiaWnOO=!<3qr9)(A8TCBFpqdA=9zLVP+Oi^pE#-_O5<>OQf&q+JFEm>0sx z-yhO>O^03yWw>He9@c)83iW?n7qx7sNT{Zx9$sq&~Sf{2R_v0yBdhWD_`>$-%Nzb=W8JSXlLE(A#FvE zk_kBT>^IyTb_S!g#CD4Hb@l8_`4z7$OcwV-v*!<-IN&>0pZ^0x#YvEEpj8I9zGC%u z;ZR*ZkIv1vF!-Yrm#gl8i|`6Q9ee`X(i6<)a58tWHW$jC7znldFG0^@x>L;Y1js%m!K?Mgf^^A7Z1|FhbsdO7 zw_*Xr&PxKj>N-?x6M5_8$7nn2A#;&6b4}(12ulgWoU$6`G!;~P+d$1mBXQ=kBFsBVt~1Xd z=sAv9A`g0_O>i<8oIaqlJM$8EtvE)R=R;h1QCMYGxJXymhM1?o`8jg@x z&s%}j@9q#kErT3N`yg!1aUL($Lw_@=sIPBwt^0NfrS1NJU`+-1yuHfnHa^0_`sWxI z^a3?Ahq3fjh6}HMhjPk>HC;Fk`XAlcWV_ukIh)QcveP`(qY=FWvao!UskphW6b-VY zT}rf-H2cp&tw$|uAzo0yRwF?@NXiYvicz!QnQ+`T%o|Ju&xixuV*XY8=+g^ytZ6nA>_GPBq zFHpuOgL`#&0);u{;P>xLsB=1mo*m~yO1YI#^4kt9-%5E$ybFo%$rD);4I7mb!5V7O z>7+HdkCH-d2WuhnNF=@AqL@bKr_@Ltl=Egk1$E?h=((XCbTtS8$%tyc_yzS#TV^ZE z$2>usf^%ql`cD|-Tnkl$j>4K_x6pP=Gs~-51OeGkxc0*oteyWC_Fqo9?KM_#(&`Z8 z-#ZB5D?R|;dxT-{UC=YP7u+9VCA29s6GtZhf*hw5CaouyP0Sp;_R>hqI3p8UHg^Jf zMn0EZ8V(Z56qu8%Q1a?IOE_|i#~aW*8chVh8$eX39*J)G10Qk8JiyH26Z9yqHQJapyid?<4lWl@62(p1|GKJm5tkgCWmn zE%oUKf&6<4v~z7IRR47iO9zaCRC5FL_4vh$vgg9=oaa#Qbc8Qhvjwn}dYF&CfY9thY)MHf{^{946VZp~w{L@g5j>%>XCVl6e< zCEYN2#{pFAXpdzt)nMQHoOaU;^E*AkmKzHwqxc&SnGua{MN6?bPX^~Ho8@sthVeZv z;KG?#(c{WUEIl(E^`~>OCp{^8i?t}xj)aH}lVIdjiO^JU1e#gV+^>8X6l(@yfK>_$ z{J93?UCv~Nb*EnWR|6=BKaB1lCxg$AJQmq-7~Ljp0);#VuaX1LCZRvq-`=Cl8!!{S zKSe>kEQII1_>=gj;b0(~)=4`AF-=)YX73PsS5!Yq^uA?5|j z`ce(6QA4t6!+vTX5 z?W4;#55ofI;auB+_^yX@l(DKh44rv|Tu>o=@Mf79TGIm;6g`BW=5!AqcM+b3eFV=_ z=~#G5DmXtW!S4&-VXyTYv2H^;PIi2QCy!L1+pH8QSp0z}(!H_qS}U1^s|N#K4MnHBbh#j82WbU+rZ~{vcxAkDrjzRLY%KP!C0$ z1S`gTM(479n3{YH^{Z~P;NpjT^|l?je$`cMIkpC;>nudo^KO{F?WnSN<5gb0n1 zZ@~3M@1ZL71a&^?_dMx0RKy&Ht!qDnY{o(KyoV4vQ;Kf#S%hs|h@PJN&?9CnMqJqj z5Pt(QR=v==4?}q{{QrYd|3gi zg9YWow#Gukso&8y#hLgIpSk?X2JUk;iUk`^;thUl@De8DR*P)Jf@#Eb({pv`aSXLw z1PN|^_=4F3(dFnX=CI2gw2LNTNckT8GTcm(8kE0f%GeC z4u9Z!O_i=`h%@yM%&~Tn9ro@f6~d<7he1Q)DMvp8^gD8R;JBX9Z}$x_IV%%#Y|>fN zAL$Tan8{N&ksoi?BrFJRM(OJdSei)h$~gtB!rKySJFWwRihZaE=*J!6Z-Hu9G0)9; z15$^z5MS7g@+kT_7FkiQIPrzi)!cnSJ#cY~B+ z%klIxsi>IPje8D!i<_O&AUx&*`1GC16s695bJc#(?LA5yhpA{I3Bsz>Lm0>{#gqqq z$u+wD|2bZKN?pvtDX&m=irk?Mt+;cmi5MPRLH#QQYjQiorw14amGP8o_c+Jv20cdq zs}Im{^gEdNgT8cMR-)f4Cvw_OWy;A0C4%PRMfi6xb6{Pu z&_eY7<0bek9LTbJwiWBHQ!nh$25^*8)_mRXaB{wpnBX%974~Ofm-aL+=&nNRt|=fL zun9+BjX{%^HiBbW8oG7<%1YiD3W_$yu%Kx;jHTR;Pg|N*gq0~J%5<2w@BvJ?!g0{O z;~3y;j;V>nb5efNC5-om%HQu}eo+ZlZApQk^g}%M{0CgkXVD&)oQ^iuG*S zL(lLUr8>AT!VptI@z9zV>&Br)%XQ+lPlvo+;qV*H$BVitQI`G?eHED~+javbU1wtG z#8WtS_6vA(_XYTOcfq9YQ_*8P&7@*qpkzd&^0(n8;^rU81vCF+d&v=wvAelpun3{%cA z9NR;SK5JE6=4%5={VU?xwG|~+ABlDMlhsaj$AUS}*@zJmv1b;sC6BE`wOFH^@PNK+ zR!-yo<9egZ=UUvQKL>gFBcRs27_}b~!KUgY`RRc?0a8&S8;y$hkC|#r9CI#~3bE4N z*sEhJxh#?}ZEXQ$e0pSd&J1KGTB?`3d0;(@K__0)UG!S9Q$MS0UQVw5|8^`f`v z`}sTT=dcfa@GLBR`yLDO{)MJF)Y*wX#wx~Wz-k#WG#j7k;O$5ZJM;uPt|I2A!9Gks z)^z;2Q6aIMz{pb7yiwzP{^L`w=RFDqV3#t3mH&{2rSSF-PhBNq~ zK-JUEn0|DGGUa1B(>5z$X?`nMy`;{f??AqvvbnoPQ2t|oe~@!4UFyi;xCo4NH@a*?<1`7eltAyc^N(F zJe06b$0d($!@{UEHih{Kt~Yca*R z9PL#t7+jf29XLc){%0mqeogEn6^aJ$K*`s9o%(I6&N=)4d5h!BG;|MY68GygR}`)d ze;vjplOr&xT?s7xLC(LerUKHwarM;$m_mn}!VB57!?ELzH`=2-jB>uzV`ykChL&Ni z&@g^H8Voi7?aVwjS$`j)#{g`7Y(~CybDY`z5IBoJ!D;Lk>RP==zZErLM`z#FFZO|E z*F-2xJB2C-70z3k3+lTOx?Mw)AmYRbl(hMdU8hoxyuX|W#En9`gpV{kNrIwH)%b?k zJfZEkqh`7*leA~L?{0VCXRd`198m%*^yksyS{#^KAH=U4C8EF4A5gn(Cin*4f+QbP ztUe|}$4-aQsmD%ip3=y|sQ0S=7>dKrKf&UtE6gI2x-U;IaE)zG*ZSg29(-m4O7_1& zgUYsoOXwHgxWHP>xwZ;hcaW>`>~rQ~UX4DRdhn5rpKt^9!xRI?G5xqRSTV&4jh~&t znhQ4}HoF=FJM18*dmMtc4Oow?#9qCY!_z4hSfDWFh4i`GygU+Qo`*r-D}sDXFNmEa z6FM3vfaD1|CdVKgAF>17tDfNTw4T^$LKOO5BLC`$4J__XA{uuJfm;1c)blD`!tH;# z%A`@J{k#bR+T7z4mKciCO9yyHk7XGA*Pr;z-dZ^F(^B-TFht*&Sk^Gy3_qSYh+gxL zV6VbCc(K_;>}PctHfNoKYQuDFaI}HkbKi-R6M`R862Y@=BX=8gi|Ml(TxVKdCEkVu zv?TU}q-z%NxQ&Ha^ZGXEzShDU;~${ia~XCJQ(d0=N$GdqmCjV3S-_Hfe7ZOVqc;3N z<#0W!^aq$;c!d$wJ)picf!%jLfz`hD|Mv`~$0S<{UDq9guq<(AjP&=IE!R-HdqRWbJkx6i|+D{;&{?>G_tpQ5MkGk*#t4K|AD$XnE9JbWEsZ zj-_p}dhU2gpJ|yHzrl&J{6XOUY%FfwV+(s#|#Rjk?plj-MH1#QKq zU&f;0%}kV&bI@VkQp~Dr!IZi$X#2f_;gHMVR(^xBjaShg_5$Qe#s14)VZy{_KK44j z^Us9v=>1b6;npmWb^M5zE+J^j_PN6G8_@edsi3YuuCx_Dg5-2IhK)YP{F{eBW11EM zrbO{)<|ab^!7M1f+*Tab-9#8V>M$5@+Jmy^E>Q4k3*^6PBdC82qMdn>E@ROfou`?B zDDRTbLeJNr=6WfnJ47hc&31Cz%=ch7%t#E1*u$p54;=lmt)Nv#^2&Y!YD>>^)xH(H zph7C3W*Maa+Rhd4&hrBEH9XDTK#XYUhb<$!)1By*?(!r08LZ=>c;`M|bMroUHsyfA z+6XrK1VVvFA}<_r9Q~ft&h=Lnv$m~7^A~MJecfoDv1*~x_Cqa;_$Lh$SS5F84#$8; z1zlIM&c4#{O-;GT(rsQ5gcW)FQpvd)L4y*dLijW40P-u&AjYz zYFZ*{>OG+#E)TTlcJT#eftYLCPVjQO3Z*NF;aC(7CY}a@zDFvzt)2jzAMA%3CoS0i zsbT8yZ+z?(BsP>kDqqBaPvKd#PuPXhs}pFym<&bd@<7$wkoTgytF-H0^z2Tov>q>T z+DJNkSZ~FTrHM5AXkqnRO_)BiNSE$A1Ow*(4x4i_;qMhO2(u>Hk-T`%4W1v94Vsso zq2J)kAV2pTSZq2@4DO>$?ylsegJ}OZXASj}+X&mqw^MxF8b5oO2rh2VSyOL%rW1H{LpqPmA;6rextEmu>x(~ItR>DNed()}fSTtK=uLCn=sYXo zsrC)H4cmt69=8)Bx<3Jp>mB~M*if9l!%D0b7t&c`8*0COV?N7=gHP!mE-{$^(d139 zTTj`9yX#rNm2<=epm|;X2~f5F3v5=Bb6nSwG7Zb`5hLy|iI8R&gO1n9 z3s5qO7acPbHRNQ?aXb$fs8jo0lLt}tjo7p`mn97g1pgZ)(2_oyW=~a2{ntlDTGwRi zP5Q6|Lm$jKd4&97yD?GN1Emkhk6nKscBYw&p*y_b)``z>YmJ#$9~FvgkDH24UJpPM zxk;J6YLGH&OB$4)D5c#^SLSPRl`B5=;pv&wziyrg9{$%b>e59_>F$hfZK#9RTaWI4 z{0{b)vOryBnyKhu&1Z+L$KPIBiISf8AlLsqG|y@)gjnhzeM+Cq?;je_y`>7O2jnoP z`xCL!;sfrUL(kt;KZvMNLT$@g=v6Wkwf(K2#*A{7Th8GAaX-n^@kRGWZY_3F8;bEi z2SSPaPx6M<(5z=MNHSb8V`zI(QD+5Vo2O#oEUAz}8F@e5Z=nA(j`^&72`weJFh493 z+|SNO$@VNrGB}6zT|R*JF!_ZeM?!=CIfRZ&qOOm%xOw6&NOS*%qgz6Gz5Q^owa~Nv z`NUU_cnPf&@1W%HLk1m9#WT%xHhaDqV)yEyDYqL)Y&LNfOjd>*JqbSX`!Lz?HLe)i zgtp&afO`01rnXrL#*|;mDfUEVLl|1LUV{m%A~4kM8%mZ<<+6vXP&MnVE~456ZI&&Qhq~;s{CKbI4PF}uygE6i9-=oZa1IR}}1&Nu$CCKOImenjaI~gv-}J^Zh4kl5Ed_1lJy6$PQq~p=*wDK_cs*(-$OUuVB|59()qSX}xrbI$BVZ&; zhE5A};3fGeY+cWAZ{v%QMt`39LPT7C2orp#acw&T=;iAT3lFEl#(Nu~?So&aX}RMn z@kQRJ&_K+#Gl0hS&#*D!BaVDzDuxxLF?q1c)qeMTP~`Mr+VFod!6%cQ)Tg7~Y6pfn zcEY$KBT>0E3^Z%fAf(|N`ag{X&t9hJU2`5wT)u$Y_9EKxD3k%uis6!11D$iuQJ4B( zn2>W7>dc-~hTt00&+Wxr+&^=t7E4k6I$AlFa%)bD2ZH;Z(-0Qg$hKbl4^w6;@YFIh zG4|Ox7&{>YiuV2m_uKBnDT~?(`zOA^0*%O$t{4alCS1g6rZ2H)xrIQDQE_*-Iy78t zDN2IsGd;eJ!hjhnOc|_azTI!Jt#=HCgx)S#Z|((pHWDo;&nLNG!Gitsn0#`cQeAyd zIZr%}@+t#ftK(Qf?{3wp+q#nRAHXSU1az9(2R)wl#c4n3yrb;^p032aS#=5CEPD$V zubYco@dk{IXuO#c&~Bar3|D~-fUpUy+l<%w7xrUkEWouP7C8ESgJWzkD4F)7jlqJv#v*C?8`-6!7Z^h#p- zG_ykYcA}ae#?ig@;(DWa@J$&H*{33yinv0lIUg|i#WLpeBb65~tYiKUUW0FA8vY$C zA^x%lgbm&b&EFasaa&mrVtIvrK7<={s&M4ed#HYx$i^-{g-*MC@#lA$puWM;ra6$y z4^L9&U8w}k;#y_ESyRDwd^*o>p9vwe%Aj=LWQ_We2uWRzfY;#r_{R4oM#qFfQpgo_ zs7!=FyRBFxsR7CMa-DqUTlf`fApG4Jk4qL@#ueKm&}v5*hP~sEU?Fh%d;#&kwdj-o z9@m7lfL(JPrf6tSGkuXVecvseY<_2uUpfx%awYEACJ`GHuW>=RCx%6Lhu79^#Z?6c zVyR6V@!kbPF+BbZI0}a1glKcobB#Sb-JDIX*tg1#yNJV5>y5jUDsbz|7PP%9fX|Xc zP&y_VSKPb<2}UQlO+yI13k2R&_mF#hxrrI$7h~30=m#*Tr)Zo z95O4Rq%Irhb@~iG*;pV|Kk^qFKTTF)+qjjrUUd2cO-EXlo zm_wO%VpJS|1x@2Jpw_=Xs(-Y>dCtEup7;v&4|M$aI+_v4O+@roVflO7`)OZ7|7?i} z8RSqoc7S+?dmwtbRH#n+3XX|;nB;D=QeOW`=Oz`|_sAUV;b<)QBoeRcN+uZJ{0LXq znG5PU-?`@c5mtSW+$y#OXm^De;>XUA=X(um$5yF`?PkKn0)8wj$MCsD50rnKE059;m=9bG=LqCek)ZIBk!+vjsP zyh!}OJ=jpdpd>sDs@WVYR77FJ_&8!Hmhs|^GBJB8?atbVWN4iKX1j)1ias9{sB0e& z(luK!WjJ|>N`e88>&bCBnw{Kvnez6BptfoV`OQy*mrDXBT_CP$A>!g*)L$(*3VLEJ zr^p}T%Z0=nk>(PszX6?ncA{dhJnW#uqm+M| zR)I;q=V6ET=7OW4nP}r*i;bp@pwBX9i965W(6h(j7ddacue1<4zxfJ-wp)rG3vOXU zU%>SSu`u`r^`SIsj3#$U{mR*VJ?+!e2imh@yNTe9=fU7cj4rTeG^#v~Vm$Sq5-zB? z4|sF8%kQw?IYis^WtjDsrI^1d7K?5-(@y^rW>k{T}-NCg~D$piYqHTdnTXm5)dUNbqSm zjgu=+gJ(Mp8@t0y@a-PVokC587A8Tp>sa2|ZxsX_+CgW=_FUa}8q+UL#?~2nw0V-r zqs~30Ikf}%TTMjmZ#!X+yQxZ#JB7IpU%1Y9{9S8=s5d4YK@+fkLe(bHK%@L z$vYl)sxKJNIfG8NL(ooVCAe)(4xK)h$jNZDUBPOEtky3bM+*T+WdIn+G zzu4K1&dicUkRx8>@xIGJ^)TF}esdkOkT;?4E6T{E4uxTDQ=n-$hq3R7D>b(vQ-2j) z6M|y#ew0jTe%!=X7gH|c@1A_L@qTRS;tYFj$sNDV6N1~kXX%$ibe|SK0o&+8v~e*3 zj~nHfG4;7FY+x>WhFc4`FAAR~rC_g{$=C@mplYQ7w=|Us@c|RCimDWW6MA7#(;m3u zZY<26(g;gdBxBg05{wHkgqBu1pZ%K2e0@#ClsyGl&}TS?zq1gZy6It1_a;!OTe$&c zsS+FmU`=E`I-D9q-iBz`+EYVN(+~m$)h2Q)Ux~&h2R~l!%t3>gO*u|#nF*`Z>3alSUUt99#x@korPdKCx<=Fdj{$;gHhkd z1$>ztQWjl&fi7MV^tt! zBZEMDmd?qpaZDDa!{S$*$!||Zr%6fZvWRj8SjT9n1Objp7#-XnW_N5S#``zJo9)ye zN{e$%X|co%$68%#U@;aym5Md*;z2fY0XUl7;d!RSmy2GCw(H87b2o|5;OtKs_+~W7 zu?5X2e-_;M9=n=<1zi0ReLF6qjM!`ltEj_~!$XNjxP+Je>x%`;4?-(Oqh!otW$M#* z;2t{zid-g;3q&TwyDkNfQ*sFLibbP-MxwUj4dtvv{YeLT3=Wo?tpX8kTrhoYucZ>YnFE)M?vq+e*dt5X#;WB z2YUY*S1K*K2BD)ufw`8Il;!UQp}!tu{XZMP?)+O&F5wvH@EXQHl8GhZwS>#xgGX|W zg|t-S*mU`hN{3%C;~zRhC4Yeoix;|1K3A~z>Pl2}y3HgH2GU*f5O}Yv#)xG#;2V4Z zof{9ML%4vIALc>x(pJ>WH({ZH%h2F;9}Z_XK@+BOyH@g{Wxe3Co3o&~d@WvT$fRsi zcg)U|LG_WIsIeT&>sv#at@ade%DlRz*u)dy zO*7Mx)*pz4tk)SE`2Z9=Bu)fnCx0FVsr@K)N#B4;eLKOz<{AuWTgqGvDAV~zFNh2K z2Afl@gapMp)a-tR?vrjp%PfC%Nb`g8ic9eQ*=}g;afz6e=fR13o$1-u)X%!lvc3hQ zs%R{4eL~qcI|Vq5SqeT=tT`@`inb+`i%D1xfqXYKb-BXo-{tWKF*P_3DG&H8l8<>z zy@2|CU=uc%+3XMI0b(|5bvcHncZkj0DH_w?o`e|tWt96LMW2;dP_pYjCf$}1)1(Dz zw~WN6CktVw(G6Tdvj_iUzMwYzl_~jhm%ClP#lru51pO#iuD<8YORbL)L(EWYn(PVn z6@I)I?FWMun>eO+Lw%tS&mOfM@?N~cn43+IH)9wm3X(8n@-7_f`xE19Ma;Xg9P3}- zW-aD>(bj4zwlL~M_A}>-eUxE-vJ>rf`^ejQ0DZPq(mc+OjTvnq&abf+9#qw#W5Elq zXt9TBeyNc6=o%cMyH@d+EKKvhLcfD*OzThzC?5$niJf`lmUwJ)Qz9B)j|O#Wt1flJ zHt;y(2j0dxbe?o_El5ZP7s|Cban$go|I6E_>Y zy5wSw^dP-whrsB`dw6pC9`vDodqF36sPWs53yNvpzMu>PMogjnJNare?4athfrtkl zK=QXM;5lyz?0<0;o#q{Vfe?+|lXv zJ2YIM1ohd8EXC+4OPysZ&Sw9DSMd=H&^ls6N12f79Sr*HZo1-S&2Y=277LpFsb^+F zuGVCHYEp<^OAn%L%MmQ-whFR|huD%@i0R{tq3d5+xPbqStUtjQYaGG- zH!qYP3q|L-iTKs4ouIy3!8`4A2K}8NmM6OcsoiMiN&VWPXHTJYL<~=u?FH(lOS-?+ z8VIbX{n&qh!NFHnqSdcm(6p$K6;vyj{@Prwdb zSGP&UbRHiBQJLA`IW3!q=U&4USqLlMWJZ7QH+XQR2p4J#$#eGv0<-HuB@?lqER^zY zQCOWtIbBscs&BLrC%ijEMbasl-As)4t6r|#$FvtG-;TzXOVsBr-pkkTNXCGl{Q!Ta zAc0AR1^qWd`jhQS)!8a&zuHVJ@~TAj_!i#Psv47i{RKHY0>C$Z5Y+1Iz@VuIG*d^c z{!TA`yps(aAm*g1cO#Q8t78p4i=m<73V9g*;GVHR(T|)KmJ%yLQ8tvxrY^)m;}5`< z3vGm_#XC?@Vgt6vGT74-Wni4|2zMH&ue_sJseZAC7ml$Oa^0&@mGv9QHMa1INfM@O_0uNcdI(G9Tg) zWnBeZeXzw)|^-r2~k&h%o|23aMzBERc z$97?{?J;I}Uxa|!UBKt?8mRC8f^WWg88xfJUH9KD2dA4YkhL!ylh>s~pz15}0;c2s z_$;i>ZKU0yIoJO_-=!?G8kz=O#d(7`8lK;Q^>UsW}t`wBmWeMj|Qd%=E=R7{PR z;{!=G+`3Hugpg(obFdOdH+BZ~pRbg*uYysrq&+|LRS!L{nu(jIg%De8EGB4^z-Deo zR-|hu_$n^5`mzf2mHo%cMv|xP&or=2jfGEsAEEYkIH(p=5B1qUuA|>ZfmV4ID{pMT z^zAaXm6$Kho4&D7)mRAYc!z0LNioJ{A+)@{i?%g8$yqso?nLpKVS71fX5_OQovFh! zbqjjz-VR~oj6|oES3vvd3Af(y25djMKw;l%NSIAIuR8@y-8&U0mnT9=StPivc*`a4 z*D>tfPN=Cf6Prp7;@CZw!ulyXJlSF;gsS#|oaXU&ezaWLAf=*R&tKMgr_4CQQPzdK{pT+uBY?b zx(!hN%vi`7HW&7CsUY3E1nsZx!HrL!Q*T@gBUkD%8O3)mcDBW zm{YGv*eF(T&As8={n~4+Slk=Jtp35|zxU&(9?7USZH2gj@n92?%T?Y-(UZmaPx9oWbMu;8~YTs7?mrk?l>=Y?AcnjJwIfi??KLEet7_X|<|TO<}{ zJi{&hti-H>8|akfiPdfE$mwh)c6|ICZ|tFWiWl7r@*mNABvdzh;d+#}#OWeJ-=pTi zUG#mhk9!)K!md4s(5*3->CV#L>u6iZ9@qgSuEjk4fIWQ8As^D4d&-pIR)W*vf1qXG zd9>1Q$C-hT;A!${{JO_NNY(`rWBd_>4pc)g+D}Pt{=nQZF%WX*I)-|9L)uiDg_rH) zIcFQ7A!|2&I(-w85|`l7yJo`7-@jsXyJ=7|fZPu4Ux9kWFP*yQM9@E6sT{p51XBNe z33n(*?f-ru4!d0s#|K%6Ua4ln`jtP?$L}_BSL%75$-|1l2f(Ir5}!vi*_K8NaPRpI zPENQ3g-ztpI#9@sPwv5p*I#h-0c-Jkl)1Qm-ag{WuVWc~ebG;win7P|u+1O~!AJ1| zZB|f!{X&LPnLvB1iwoc11_yuRM?5nX3O$bnbCAZC@KN-BT$clHAS~A(v3?ITku}oD*_M3jI1d zB$Dltlq7{j64??-B#9)Kq}p?=l%yoNB#|wWlq3=&sdv18@}W=c)m&pf<9WW1RMk#d zsmtrD<3TcZgtR{GG`Q-_Lb>r+aJ@bY!bVYet?z`Z60@I8nJj*Gr37PSZ`&hYP%peEV7ARu{nS% z^o_*&Yl{(nlwyt3UWi^Bk0#gb=nQ(KP<9i~?LM(+gVtiLa}9biL(%k25~?;nW#6ha z#8CHB%xH2Q+7yiCx_cI5d#`9{^VY<+&&^ODFQCchbcon?8Qr(-Wis!filRu$?;tk8ssbXWrr@;?Mq;*UDWn)oVC`!o+2qX%P;D&X z$$8b(ji`tAL&u?w-Xs`dt|FvqX;5f{cw_OqLB&X$%NBpd=yFFe(4hBo>0t=-$U(jB zQCMag5B;lJz$eE7o^GMs&JHc2T`;)&#p|DwsfCo1%o?rNNzH=Ti(VoSK^@QsJd9HO}mB33M`u94w2@)!CJc< zbw|EK)$%lj){QXgEkl$pFGWw^Khf>*Pssk3hQ`swyd^yZH`E-3;7bA+{K>Ew5SR?6)RJ^+y}dRVR%fN>w`jA@yLUKzw$ot=y_ zt8gAZ>pM^n8cJ%3MR;{Fq-@vVVeh-ywc~n1{o+dIXZ8zRH}%5UjbW&|x{;lGU?gaT zX^KkIzlcLjK2zE)rWJ@(?QU@)+nA-?Tjh!1Ihc@2ivwsK59)fAj)SaR38N%IZvSS5%=dVGA@P*}Z|G@Vt)VTX;bFl2-=#>J-x!Q^su|4$}gdN31OWC3m6CVLGaePUKhp zzTk$FYJ#o_Woe%@q5&~4!|r7;$%H@_(m+gywhk6?rYA%y|5oAmt4Y~>Uv^xdL3B3E8r;>op|j0Ys}8=2jxCT!I*Dl^`0luw5tNE z_Sr$g{{Wwo=h((C99-0N#cwyrhh!hbS{#?4hjBcFoa!MMtQiOn!P@B9*$-t^$#yXB z17u3}fwS6IG`XP(#s9ksJ8kuaUsgJz!b4jK_(N4tiRmHkT~&+z;n7ek5);2L4y`^M z=JMuMQd#&xCaXWeMfR^k;BTCo`QYT28d{& z+3W5ba9+6uGFoJqvFr@=&e0V;r+q;C`PtlZ@;Y4Jd>A~xufr~{M<@$k2fn=wMU|UT z(%x4zg{1Zk=wQ7D{H9l-L%lh?8mTS1#NS8hVNEfm<1TZ&u9SwytiTHElTg~Yl-IYl zLVem-K7IWKv`)PbCh6hOFg_c{c4~+T_2fMndI0{ssv>0Mo#Wq1FJQKX7Wjlb$8Qy- zP}@j7;klZk{fx^X(HJYu?ok9OY#ozxdGEA_;zo1f8jg#z_%Rbm6}lyzsPqi0nSTE%6t z&Tl>#a_tkm7^^2}JtK|$%0Va?I1v{VUqh#sk>D}65nFzrg%wecVN6ppI&FK7m{tPD zk5{03yBk-vD&erT6^m@&p-F5!e2Ys(C-Q0AWS4>QoIiQa+)X_ES|7~RP-5Iq;=d^G z@Mn5jVmXh3maEe+rP>yk_BRrynihd1x}CLb7I^mV-q>W5jyKG-Me~{ja7nEP`JOT~ z2|fsT+#8x=`+-p??K}S4u8^cf@)f&saps6F!$gYVX$3XB4ic{-x|f>OR>_C;dDQg@%(cZK)MTM(yy5o?GW zP_=&qYT4zWjYk=i9CTH9toea!HHf8jqnG&3yO*GRzfWr8F`l)I?@69}PfVM(8KU;q zVArur;I*!qO&Y8rG)-86wwJwN`o1)XA|1mqzK%DJc>}}uw&3JfAF*wz0pujCVfxoJ z#i`zu!TNrHxvBre?79OAD<^$SKmHrrZjU4H|3QedXap_qdx+|Kf=%H}>V=xh^2{r7 z*0`J4qSb~iW2?aQj*5^~d;{i;j)X(2QgL$syVx1n6Q;b;5b8I41GkX1=x#BG6)8_a zY1K&zduj>TcLl+fxXQ8i*<1Ek%&DrvjP`UcppN`myET(^YPM{n>6BZKp zcGYNYAu2r`RzFk~zGbFh{xD)p(caEBa0mZAMn^nGoSn)A+4!uMJ}cUHwO_k}*;&tU z#*rT4`Hvr{*UJ&_-5~Cn{s+j=k7QPgc;@IBm)jC?frY92fIa2t4YfisWubsTU!&#D zelFYpuj2GDb!fS=ffo&_!OZp=x{sAfqc3cvoNfyC-?I$g*z^!O8*~JRm!42jyA{4U zXp8PK4Ir!8ivc?lS+rF%Ds6nRV0sYg#0t_)7m}ZM2rE7JlxOCh0k7_z5D*^&wy#_H ztWDLF(X?Py!GBPv&o$8PQw{-7PB5kBNSM)5iZA}`A-bPB!XtV=Lfgm0F`WAhvyVDJ z`i~6EI)51}+P=fa*5{Og@RV9VnvB(o?-(=m7gi1yL0)!_C)=FEim~6Y{t@k!4%Opi zt7y#kKZmco`$5&#NGu)m2L!yijAqXIU~=&oI;JG!>EQ=a&-N_h`hgJpn6l-|{{dz5 z9fj;&1Vnb_q2uFRMO}9Tgsqtew%@uT?0FVA7wCvq?+;<=rhDA#_$UZ{w-3^Let^mc z$H6u-fa{*`ix%OULY#U(RNjcoF5RZW+7I+a`>)q|KWs zJqgmr8i3jzIokLAO?TNR2!GDQ8-Ds?*g0ZcF0@CDtXB}`dyTk;C!rDEK=AH;Age8x zCX_XyyS@uf+@~%+vu;A?Ki^XS^LOYp*#hPCHRlAMhK#<{o#OfwlVjgP*fI|;^IMp! zUGx^yzxEVr`-Pz1-*3>g=@6{Q|BCesR-nJ-FX(D`3`<)mKR16d+85_?$@g*Cq52(z ztI6wi#RzZd5ufJZFjO@N#DK#IpeotO!Zy8wod)D>+58*le(#(sc;TxSHIBqT0jn)z!J=en12ehva4^^};y2?KW zXo`yJ6JTbuhs!pf<{cldfOk9jF8bUBt62xY^38InD%ODsA?a9kVJhaG%D_n7)u2uu zat+H6;-Be5X2S)L*&Revt4Hwgj{~r4Y6G}B9)^I#Q5bqW0-PP?IOKE>!32oY`QjY@ z4p$X*Q|WmT8hB0rMwDNFn;X4-9Kx)gfM4#w3iEErPag_%Lwks7-8FzC5<#N3iU;2$ z-emAhu%2WnDvoH2yYfDx!Q0uW{Glb9{XGl9{SSi2S`LdIstK~0Q@Nx!X?4jBTy=gc z|1VNoEG-IV(efC~XxRoTy)0Oo@d5N2&(Zw29J&GzfLZ=I=ukV0rJ^-Qr``}C8wwE` z24d;H6FfckJb1>^Ty&6fwNuCo>UD$%6wSi)1H_5g5e?bvEOOkR?B)sUydlIW7gm_Q z!oQDpW98@%;ODy)lQLW(?(!JS863=2FV03?;^byXZsR4&fwtFbqqk)t^_-+Dq85aK z?0}7Q$Xye>v&~Q}Fk1@QU;pBUZ}dd_)NZU0mQw#zApR)+z$(HziE~7|*T8`gXQmF; z8q;Y{ya23@Ylx@aNvo@iXNjYaV-5L&0@f$;;%Vez^!){GR%@YR2jbLsk8x<_2ADiC z0%|;7g8T+~Dn3SVh}Z<_?@1d>3_`2nH9X659u6v*6u!_-&uQICL3xf5p1OAw@I#C350Qw>7~p2xQ8rRbRs#Ou6+`C9^91Xh<b;(Q8*W=o;_DfE5S0S?^y=_4qB6pS6}g zb9sp=J|EHer5Y%m4s)!m!3@uM7U~gB{g3~m)-uYo)l7$3=Doz}r=qyS@5NX&^dgSg zz90L$kXJ>^Mq2l8J?ft$KJbC{P;YsLl`PUE-&h=P?=*rRG#{zfUgt$FE-?K;X5X;25z6D|JtU zeD7E0TX!9gwyBC*x*605bCx%q*$c80>O8EqnFkoyP*0bP`H3Z%68M2R_t`<;V-Tnw zaN%=?g`&j!f@1iwG?e94vM~-vFuauZ=a2WJhLMQ!^hw|=$a?FZ(lx|@d$Tfj@_ zUx5(!YCJ1563hK#!EJhPA^DV^P&$}=kp*Vly{9^txoN=gG337#dV;d|TplF6gU`CB z(CTP0t8+w93swoQdE~|D=NKS~RywH{vZN zR$#AnnOEI*1FdKg!%K*#Dj&q_OUgkK(j|2|zYfjXY@u(&Vbs-s$)#gUu)ZW3>j#@* zh|LEG5ud_3`nhrSOU25@OCY)5ip_&GMbkng|5psNo$?FMAJ-G7H=iMI!zJ`w^%*Ur zUqO7;9k73~9gB9KhK3DVV)7$7XsF82b>p9CV5P*;Gbfo}B{7mlo?wR}kHTzAHBqhP zCCEPX$+g*T1nVs{#O#xoS#W3z1Q&}~*mF;V1>FI zajqYxta;DIlvRPOEgv2Jz758Ig@D}i9EOY%(Rb<|2yf^C8Uy~th-bZo=Cx(8;DAhQUwE5EiHEoTe>t0SLL zUYUkXf&AL0XUSz;#0*``BsRSo$3cJUYS8tpKX)QZQroHhz8g@n=o2rPOdOnZ379?mjMO__ zTQog=4b=?4W7?J?v|asyw;!esfqrVxwCD+Wj_T0Jj=ZqFTd_U)59sUp9TVmfH>ExX zwN|E4KgvhUns5{|)cT=3@~O0G+X75IlZ>T%uVVDa-!DeYF+teY6D0do$a0MxHXakgM*xgs~m< z;7t9TY2APx{ky5prl(k{-jA0y%)@9~TQGL&gNdofP)6Bw`#4*$?QI~MT$zbJMk**f zc$P)>?PBHcHbB7t_Oe+q*~IYs#45fQ^6V#8Sm=BQ0vBw+>5~pK<&gQ(=@ZXF{NFjS zo%S2H{q)44`WK;c#cvpr+n;*!_F=neEZd?-><>3N$}Vol&R?#eac&tpF8_=Dn5SfC z|LGxCjQR@UBNwCloF4r3FdZRYp(85S|IKy3&^z1e68JPOf}tyCp}qZM?z)5e$xAjs zcC9W;`@;sTYj46F>R8T>(F1wgA*qe$RV<~fNO{6gu=zZOVV}L=8TeE;_xDgRU@a6Kkiool#3E0oXEXDlG}3=EIKKXh z!<#Qc3Hj@t?kF%UVOEBgYb*t7rgi!Te%zdK{5C7*OYPbZUjg;}EZ~?O~ z9#ho(Ltgy(qv7i22h{U2Uy)+C1=_t2@rFyqu;IvFhg-$Ji2ZY3_08fK+Oy* z?9-So?FH-IHPIwaPM*zqVB=ST2}e7ysnQvT{EUZZ*~A@Nya7TB^>LjkJzIu(5W0}E z<6(DUPINnHFYCtDAzz`T?J;jXScfUTy}5hABIap9pW%YH%yFVD*QP8B_FUEz#$;!s zZImak8xjV)YuM?omy;hc*hbHoViEP4rRwH9R7=g&7300D7HoS>pMX2CB#p|A6yqxJ*J{-~5Xxg16h+LHtb@8;AJW|(;|lBNxOBi3ki6Q;vMmRdHy`u19&@3+^%kfK3%KO) zFR8|J(vuQuAloTN8XUDBtV8#qapO#oS2i=p(=9ys8s!;Q`!a_JHDZ)d*Wh$V$l2bU zTXgdqNS0}dl1tu-GM5+77N$hiaAIrZe?Yy!Oiv0yH|Na6$j{btqw<#DxJJ;p$67Z$|z#NsHV(N(pP?fAA zSj~ROqW>_5tbix@c(<0I+nEdH&dD&oE&|LtVwpt#CwE?{PM#YuNQQL zPH8OGlOHZD_#XFaXki9N!|6;ZW{%aw190;JWyc>1gL&I9+cpndCX8W`o(){-oGkVK z^&TP<+o09T-BAiIa%CCG=Q>P`NYOpE0*DZfw!qprI`|=mdev->>w^7H<3qsXrZ6 z2NX;97XQKo>sKtg;w4CCn!bg*e050^vis?ll53pvBayck%&asThDDJ({P%Jm6d|wycRa@V4 zhg4M&7utd>L0@#8Nerb&f)It||9 zRljB!qoA|OGYTZ~NzCl#Z{A|(3?`kAz)I$e5jmA8IWmxq8L}H~*Q^40IOXt{Yl@{K z>0Q1#hIaY8xVhe(ewf!qNE(?MY)Z_GWv%VlX7J|`Q6S3mnUG()j2&dO*z(HdB z*G^vrr7oXX(w_0eW3u1{t^OF$s*UmM3qj?;ZfF>-F6s_&1LIQ$+~)mAR@LDMVMW`Z z_S$mj+%pBt$5cZ7pSSsjPE8?v^bhE8FT^r~GvMrW8ckDfgX|>j@YX&;y|g&|!+sZXdoe7ay1LMp)V*$%cVA1BU$B;Rw#Fo;?W7J!c=YYLpZzfjnOy2{Z1dKUH=u} z%|@8LKu3J`x2m{Nw*njP+$A5W#15pP;B@f?)~k4dO21QzhQH6CSLztv7_7Z)2G^4xooK<>;55#?CF5 zuws~s=oNGka?}D@%C|?nDbNrMa{6J)LkaWJC}y(se5QLe5by#qJ@cRPiaI&>JX1>h zi5-|(WhkW967zBK7U`{f`r?Xn$)KEZ1r<}jfpNu7ta|SRt@gxh>Z;&1k_>1`9LiK~ zoW`Ni^4t-(|>>N#9~!Zwrq8-#HJFymFWwP3+M8bd&9BlBV`p<&c?3R z-4ONP13dIoOLU!A1)=l$GyjP(&}^@O!W>PZ%Rd$DyI*jv<_HK{oB(l=h0r$V33ObH z11E0_=u3Rh_|Pyc$Ou9sSv{zoXoa}ZnxNu7gj)r^piH}xg&N0lr}fj&p4~+4(ps!v zrv(Z96})~GF#voYNkeDzJCEDyKjKMt^$>rflRih5siE~s5|r$tV&4V@KYDU zNlVqu))f<1L}2}km#knm=ZUm`4%ZeTsN^%Y>pf+O8}!A%3!lMr!5wIsSpXf|_hHtq zR8WvcrX0}3v&U*Pvx8Z@!<6*x{_p7=D8QzDlcB|yL)E*#&_0B`J}Q;0RKJUdZr_Cp z?-S5A{eO`Eeh*3#Y;jEfHP|vjMI4Vu@p7htsIu6Chh3Y7No%)4-19=x6fF42jCUxJ zbz(#z?ON#HxVPaa{-Au>>p^=k&H6U1b|q0~o-bD4ebnKw5cp8MSy9p`zgIL0uhume`J#!v3Cy!K7knO* z7tComDEhS0EVfGOxUkXA@!wijk?Mxkp-<4_3Nfz=#$xNrXOQysBVQU(30_uRT(<5L zwyRzw{<4C+>2;uHcOP?Zzvsy#KB4uBi=_G0bAz(mXdKd;pKVkVqy08uH&7>z17*gS z_2NHvwXmr3U6@^Ag6W+qg4f;|DBE%wWSe?&`P*WJXF)C|3E7z6UkCSE{RXdXkz9A+ z|FHIl74i7hxM`0E)U#HJCMza^qqqu&o7{wOi>=t^sRtSBKJl``F0k3`%%=`N3mr3V zg6s5DH2U8wcxK}R?$lSCa&Z*OUR39Hkng|>c5*${42)R%0w*M0!~O~FQ1`zah_bAJ z^k1>i|LcG7V_pNda=OZQSrFgq@+`joLQf&-=q1RWH%Sq#V+@vax-nb7AjkUBf9TZX zF*-C`qwYe)Py-3&)cUdZIfr>jLNR2%AWrBOMPtV%_}Pc<7{| z;PmYzG43}hoNm>DBmJK1FkP|p<^eEyJsrE}(Y~|STNZUP5u|yi;Lz?ckUPIunCJ|^ zIG+Hd0>(bZ z&7@hBM*RWGMVr9mIqgpLt5LPo1?$vbf?mc;XbZWCbFPO%Q_^S%QRYJFs}s!k@(swS zsOD3NCoy?>9fmC~>l<(@TStryjl^R?Jw=}h|6xLI z0SkFVzLv%N$WQ$z1TTMzt|!bu;AuJ9(P95gIyZiS-$ip!Dk+7!&vvc9s9Yt_QJ@)0ARyOnQS^QVtf4mq9ye_vS^XaN%tO!S_}h)*6r&JEIKU?Wq%U zSRAwm#j(iDWV+|vrP2r$G1K!hC>9rB`QYbRIO_uxyU@MVbq(19aSmxX8) z4#vw9xP`@Ota_D+_O+R)w(&Ih+9jjSSbfSK#GuoW6k2uA9Asf7)jdU>zHjW&+qN8r zyAg|V(LHcjKy2+T518DZa&W)=qad# z^u+@6o4in4S4^1j2CVdQ`M`;KqSX8`G!4uE<@kyGg|{Ag#STbQ)CIUstb*XzcF?YS zk6X8X!lU~1XUVtRRjwySnx4aeTcJE*#AmLo3g8*5d&8W_0{HCEQ_K+5!M`CC;Q$PcVL|PE699 zP;>kRS{))@r1KPTsC|Idwr|jVNg!{%@E)Cy_)%Zf6wD7ef|3o6?Cmf^(eX-{B5ct| zCSThsZC-v2%2N_SxpxBUdeF{evJ=X7UR6xES%mhsS$y)ihiDaRhc1Wi;P(%z;vo}F zx@R-|bx=hJ?SCE|qrNFp`<;gMw0p2h+<`Z)kQea%8fKh#oTpq0=bql=&1rfG8Pop4 z;fs&p(FP5%=!^xpyNtkDa}Pt`aS`A!aVbRgy9SOvL(z75XlD^$Uh!~bOcbGM_${HJKJr#D8f zkYeesb`0oyj7N^}p}yZ}n3GwDlGSIiyzeXOxm?MtE9@b5=oKva)dDp-dD!;a0Vj0R zJMJgVKfcQ`FtC#!yrC&}ruRkT8a-M~yKF6zuFHqfAlX4L!S-a|UHeUMzwiz#h6r%zzztF1iG2e#-LIV>v67ApK z1;;6auvDgl!-uL1$z~t%nZ_EpG($~HHvbJN^~S9F7VQmkDy4-tqVP)02}s%*0$^E! z$)>j;hkRONdVj;YeGG+~z#gJYFnM;}<)|F$iync*(djC~repKLYwbY5t7~!Z%Wr7a zXA64$w8TyiJz+c?fTZU#@a{a1mZG}Y;$X@W+#j&!{c^Ot^aN}3TOp_Yr=s5NE1JiO zuy^J!m=N$DBX696l=&lgSj-)q?M@7}i!Lne)pc}my9~jj{E1N?$4akGgY@%lnC%uJ z)m!}#oR;rlc)dcLDcoAx*U%}3zg=oKUHd=0U!wFlJU_Y>!C1h`f&XJo? z_GK@VXC1(bpiGwSe?}_bvzv$B*2Rpw%@DWC9^$$qQ4%*t(aw5{x=Z{hhx;LS>H9E< zmKtE}O6nN9H9pt9PdQ8bmcN83dP&ZM8H4N=B1ta^`bM#*d zG4JvauC+sLs{$)`exQ!h2(WqP#3fU671<+5C_L#rk4uHJ zV;|6KFfjsqf2Gd6?p(8kLU2eo1;@}&3Imtfknp)1qF1X4kJclsHh6_U7ORTK)Puno z%P{N1PMqwKfxgF;V72i*l)W$#wD&e**QjQ2D7XpOmWO@Q6c|%=4@)k;!*k;)hq`wd zSnR(>D%BDd7FFPI zt_c=6eu7N*Y;ieuJOSIJBRA$AbyX1p*r+HNp*r02uu-I_vm$S3$gjQY-HStw0c zV#}q2kf*$h8Wz`}yHi_?+)f#;KSoU%mq3}8bStc!as`QJ357P*sJ81MIw~GhKZhI7 z9x@l*CpTl=2HNwU(nAk9LZbuSC%?Tw>F*=heC{3So+V!6R6A@jpNZ`opE2*i+h9t} zGzoPmo4#*Fy~UTX{L*yD`4WgVac971@gVB%-9f&iMBbUGCrsbGmv#F8g8Bnmc(OrP zh+GuTtSvLa=C(Z_Vr3xqZ*B$WQyJV$x()5GbP@~RMVhR*0eSv9V#jz5!6xPi8ei#v ziV_zbLz$uM%&}7CVrzQe{llt1Cj$0+3@x({QfF&_j58t+{VYxH@6mt``@5<0u@|ep za}dXq=h5E$HOtmEN2~eEF;hw2qJNe^V0SI`&5dLUTFJ~gY&UC3B=7lOp^)L0j#=9K zK<`coOg26ZGcJ)%)}xW<%gI+=@d&4uT!MJLSeziFqf(Niyt);No~mKkz%d{@`c)Bo zw*m`q9>Q~PRfI^xJJ{ZEn9WeUMB^=%AlEGBnH~e-zk$6(BcDdBh{{zPs?qCt`?l+>QPF}k`nF@v;~l(t++J|-t_9g&4;9%F zsnXCMhatgs5vGhh#djIV(dlhpuzs6?F=|iguC)PGs~WgVQW+-QyN8t-`oiS-mqBjP z$((n&c?*p{eLw3IyMboQZ#BlEcaqnqA z;AKSpX@BA@z4*{0$RiM)lH^@#~%7@J&&*OtcX1Q)5s?aPZjrjug zwvjCPem(j^J%&s=2g#$3(PuG{dLr(#<_jH|d@3C^HXMhfzuO>mbSR|je1l9sby2?O zll0qg5oV6m7Ru_?1Z6+lTr>43R(ZYv6Nr_N;5r69R$fQv{#~r4Mhf+BHHD^w&tOc+ zEpRrhXYR(tfB`co&10-0=RMTkxrrAa5$keWZ@c8tO`!MRDV!mpyu#iTp1(N?>%!u2 zL1#J^eI7(Tv6BHhE1(kfgm!a#@?xtgat^%ZGdiL$?oefx@@VplMh&PmS>j8#N=_R~9 zMtrE#j^KXe0`)%9d;YW>il!*B-X#s3Z5i4|uVNPOR4Bjp1HvnB^T$ z8ti`1?qMjXE^mOAX&+cfVo$;N@6TwOe4Ta&=NTAJg)!&!gz(?isC+X3Z1fM}NdGR( zE8Yzc|IrkU`(4J2TiV=>Cr48pa}By!tj_T z?Zi7dORei6XmuY4{Q4YP!d$t@%cbBEUIsOCCB`pmLD{}isrI&RXeS*_oS`mmEd2n1 zyNS;j76i&QztDK)c?_PU0`o&6p-O3wVGnI!#wrcr+x9r}#eHWL0S&~aosH%3)R)jg zJu6PDQCa*zkuZZkhZzcR8gdGJ&5Izi_$|*H8A=|J=jhY_HyRr4LnZBuC(Nk@7gu6% z?k>UjrCsQiaE$U>S-iM70d()uooC{HRPKAvZFe@a5q*1$%BN2h#`$hcq@w% z|7b|PbrVeYzD6G}x-az{4*3dGbh$|>jJdzEa&A@ z55Z{L-r~chX<#6qj0t32@XXAI=>;k%Ul0k#nVRo89(}|Lux{z{g4R0sTU8l}WTt1OB#|frfQJ)Cz+qdzSM`u_? zWFK^YV1lkj>Oy^KElazSkH!0rq2$`XeD-{G!Q+IE_+vx}r1wa2 zEiJj+>Cg{MJ5S7wEd^X!8i`>`vru;86AS&ekELvX%LCTxfYnM*RPWVWnBE+ZCFehY zjSsP4$d7YupF7t6^&G5Ail909KBgpx@ctb-LinjoXzTk6U;_Er>{_92pd1Sp+n}?d z8|{8SkpEsolp248?}DCK^SF%m#9Pod?h6Y!(hN?ImO#UTI808=g+UWl#BZiIAYm8f z%}-EA*`d?u`R8Z|ZvP1Zq0}R>K0z^A|27t_+5`2IAA#f18WuJw9sF0)%zf<(1fT7| ziBW3eic4yu`Q+cI(YJ@VYx`YzGG9-O?zG0Sw=@LYn~NzP=3LfGj~DJvpmX{sEFYr* zt6yk|8qeZUK6^N`?DH2$XTL<{Um*&KVH+}UZ%1g+xR$-mE2@^u%Sd^H7(ow(AYr;*4x!aEJ z8Ket_T}GQhFNiTSmv{Y4gZPK!amb?{AL|adsBR=EBNs~ZUnMa=^%wYYm71UkAkJ9# zJn*W#4^BZpzOWaZHDw11?&M}F z-hlPlVd#}bJhZF1TzAtK3>&bFRahE|rD18{J}`;H(|ypnBoI{&Mk`FF>;bQ~T0YgQ zmAH6OP?Wb9CL0~Y9}*cc0XcVVegqpSh^_RduEY8|Y2u3pNPBe#WCMTLMGn{p_ATBx zexbUkMt$2uI~GEFPn!M4*MV`%C+aocfS{rXoy_~wy-1C%Jabzh;X zn9L`38VM_@_aMGPOH}T>iZ-cdnctbdAY0Td4VrWd)BK-6+P$TyjQTG9W~V9^sE)^H zq|d2bpq%E&6FBB>6vUpdgnki5Lcsq{@#4CBm@)1mmz_P&9p9d0=SR0==Mfu>Z`Kq7 zqR;R{7mvZaR1LZ_6L)IsR9@PCi*aJ~exGhA_#bFTEtNbl-CPOHbv01;EfdP(exYPr zEGwO|pQnzHW2;Ly{DAj3dFwZPb6j088MPP8Po$t{crI@6I*0Q4Z*b}4`xr@lj*z5x zkQ2FrZ}>ADl*W;4yh*wBJb|8XUufCz+V=MOASBFParK6hN%U2hwsV8+BI> z{i!E5QI-yp(v=w8{|uadG!P#<&`jMx|oQk)2q2;5*-vq$A1} z%vNw`Vg=8e0UQ7ON;A?oyUI4YxAvp?&$k{oc9y`i-!0%me>ZdIM=Y4!#Weye&_6i^ zmb;jvys-@Y+AhP&pGJbygFYZJJH*T9tS6820mV1cSR8%wK+iG(jpNnHuPjm~IvV}{ z@`d4L24cg4QZ$>I%Z!El@b3g&@pM5rEc&G_cPL+PSU{oaqYyAe*VAZ@?!EkwCcrX}?izy4fb z%hSzn3iZ`#JR+D(*POVa7HyN)FdV`5-^;q$Yx?f5;tC(dVexY1T+E`t& zrP~bUdbM1|fH*2O>1cg00y^?9K-vMo>TTteM-aK1RUGJU_k_lf7MKzH726k_;bub~ z@Rs~G7Cui8J3F(`8 zoonE(Js(>ql%evch+g}VbZGw@UH!+P!M#Q_dv}#*Y}n18?e@f#(~N{3o3%yx z_HcYXeI%x6aTa7~B$x$yVc+??!Pz7bB`YLg=zj*ahc@D@ODBm@{1h-zhQ{L;pz_iR zQ2vf*Igy(%EZhr&SB|C*Fq#{`yaZ#dBB(v+f+HhI%k?_KT7Dg7@`s-kH932s)^rm_ ztj~k?n}0$DX*j{dQqX4Z6)4b{gll6Agw>_w?_AYo8+t~MIhwX999^Gt$38w#D{seF zW7GscA2;X@Q4zBH+)^YgEZ{N6$s1Gi18Oopg0$yLbbM1}r#xRNb^Z{`Dh8_2_c;uU zcf_FKaLSUJL=$U}`1<=sLTyt!_=jd=shcV8CGNU`3ia4bQU}@4INbP04Ke5gxDlO4 zN>A#dYr7_G@h3KJ`$(R*{03w`e2O*Gj(}rgo}xbEHivhUp)J@3#*Wkx3Q2-G&-Zrefs%a>}lzvQ1x91li&ows5Sz z5chZ@diwkct(&f*Pi`ipR%;0350TeseH0{(Z9}CFd5woY;VnWOWL%n_luZ;bKj>MA!< zz_G-ag?=3j8NWxM{H{6+3*Uq)y5yZ-62XFpRH0+YFRnZCE!I3D-d9LCYk5(_5(dOz z-h>CR*i%ou`kD0H_}{R~T}v>tcSr1|c`Pv*0(w@nhu!qQ?HNJwg&h~|9^1YNxYtSrS!1EYOxCREhgsW>@{%3?L4YJDUo`=%YbZ) z3>G$U4*x%j&OI*1w2k9UNt)6@l0y#3V3T90=Dwbk-YP9wZq~ub-q5z{xubl z=KOP~T>(9VDmciip~UndYtoOzD|$z;l9x>^ZZ+>iBF(9nzDJXbeP$oJ|PxJs>=_1GD~p%G8;hshB4!dtJut z*Dg@+A|rNlCU*J#DdlV=Vu#BobTkbC??dF)^qj)|C%3VF4~gmVY7g)D>kE0IOK9&o z1iD)r3zA=nZM*y^C@*}_NW4Ze?VOijYx)XTifw4|uo~>2SK?5`cjCxu$?J9EM!3wh;fpo2O*84W*g3dacxedN97sqxJHm%LU zc@v*t$@c+lZ@z(`t#&{d5`hZ;GS)P;2QJXJhU!;lqSj&nDh|y+$+WAm_{e$esa{Vk z0c&RACS~o(u@Grb9JUeXx!aL!tR49o)xXhRMP9}i4>1v{hyO(MUxFsf{UPXrfv~7R zU#N*Kg_zh!=v_};yAg(<^Me?=pL?)w=>}rzyfG-*HxCpOhBMWD>NEucDF3Vld5`^| z^JoZ|P|mDgGZjKDJ<(M>%3Gx|{J8N+cuos#@8aX^?RtrjtM?aEp6;b_XrGNT_nGLT z)DRD8EM%{EMttE{#3idB)_Md)MgI;>kvAdb1LZK^yk`wuAB{)QIsYZi|C)=49nu5B z!k%D|w;{?YQ{3U1jLMi|G+tW@*~4P7^+6Ai4(#Cd8@Ka-!{^XDs1B3k#-rC@14zAh zj8$%>9d2(sh}^XkJj8{Vy6Q46m~53t@~B>dlXwyDZkwD50bAu*J9+Cct#7o~W%t5M@3Tn?6VLLlt_0F43g&1? zJ;N&z{LZRq@*f^X zDkksa3)ibbavoSsMGJY9$%}BIv#`_DM6}q@AANMmBl~%mCR{lWWceD*Z?D6ElM?U^ z{={3OF0+`1>sWFB8rS(N6+C+vz#nsUM7wRd7*1Ry+4j4b=p6td*Hkd%*DP%9AI5$C z=J1GbH^ITp5j!@fz%$yX<@WfhX)W%Cjg}`N?0Y8E-EM<2_q!+=TBT9N|0Pef3xZ&y zA5gDo;?kwG=hApF$2~<%G67lL;7VvMD`JKJeWL!2nPBqlES|r8jOG`Aqlwes=w|DM z*+Gx!_v!h|4!rfY$CHySQCf)cP#M(gvyUFxgZr{_8GC{O5pU_as*8sVht=jX+4Ph4ti= z%95lp)!3oV?$IlQSL zc0uX1T#W3fVk-M<&NA!Se6htbu*qEn^@GF7d6)-H2A-f}d_>dH-hen~A`E@-8kN`k zV2%7Vyn22bJ*+-rz@Lp6_qYrc|9y~K%t*wN%PrtEvVl47w&drnI}4TnT?6aWulbM# z(O~{xEsk&>2^9+pAgxse<0vB-y)hmy))|R8I}YLOxs?D5^sr^(V{Et31FM;af-TR( zsJ{Bbd1AjwOJ1vS9JJG+F=%)kHwL`IfIWg7? zzshA>6PT*|AYR}58wkA&1@*N{aL(CO*m}1HTnBc;fE|Tkw_!L)r|bqxMQ1_%hYQy( z*rusCqJ#F2I|=p+@1XD77VKF324!Zm*w9z{LT$$x${*Q-WYrt4{%1N@4%jD`**3EG zKAl9bL=z!SMLF}umLUKBnVjks2yqM1=IT%4I+_-%P4#GiLG>dMotkf&zM_^N{jw^O_QZ8ij4%xf0vo+LsMR98||r= zT|{h4g}I~E^qe*j7MK|dc1!1CqTvbFw0kih)~q80^}5G`<>Wmw^@HZb&O*UTaz%GF z5*;5*;}O>$;z83l_%s0FHtYEU4<{F1q+MCw|K;+P_Vi^7Z ztXxwe^vn`23AvK%y|;o@?g~d$ZXWZVz8v+-=Kw0>(RZuJ+h%-+*|Wbv;)4U|l6VR~ zEie%Wos^0tm+v#X_;diPP2iQ-6RguevWklLtlFppy#4#Y&{c0Cg_w(*mK;D??`Fv8 zP2ZIzZ?S3mKCb=aExW=FFssu;z{>L|)DJ?Yoc)h{!@dvv;DHO+I-GVeG>?ir(GQ>9 ziH6*nRQ`!vA z|JwgAxt1!a%Dp&4$sVPzs>b>q)cDJnqO5E=<%lK-jS|=5-Z4R8z?kuL5 zmteS63aYj~kSD+NgpVhypzuZu%6`2-p7LZQ=^Q5dW^%jrIh6Na!Ar|8gHm6C#!tgA zXlWr=9vsiC-sqy@Ws4@-Aqp<^`-JoIzhRMWA-2gWlN0qlZ}0cxU|Dq(mv88RtQ$pW zGI}r90>O43TETGiEh20vU){0ffdx?Hs~Dh6s;qq zOcAl63lZ~WFELSbnkT%CMsH~cR9v5dCFh0oId5UgYi%P8(NXaF>-> zf6@ty=Tn9vVG(~;ZYXrU*Gb48>H)7ecM?4x=AxP3F$})Y2Q__nLvtkUcW&!~Y+X9H zE+K!iYBe8XdW%>{=TNpG0%iS<%2QVrgUdrs`~NQxwwrRc5r1jg7nA>r{E}YxdO_9I z`QXvp1YpoK@a++cZi}vRw?QQkP5dOY)8XiSV>Lc(Z-NEJw)m>+ahj{?J14Cuqj{DD zri5KYsaGfu8%;dg+0|^>$WR>No6poq`hv~aRgijr3pQ(qqRcrX*G@hY{eu@^mP-h_ z%PDV79NOHxHK203!!~8#1zBESXW7kYZn5PF_xf)KHanky)S@CTYX;06K)JWPkzg4Y zfn|FXxN0J5{3CGrxKAkCWW}-__JFT>J=S-OCQk$TcrAZp6=u&NfAMLw z!U;Ho&NtH9C#){~9wuJe#o|nlVx;aBzTuarsOnt-I>X{wb04m!WtaP2?)HMNy1Att33 zO$A+1Lo<@VG6$4haDa;5Rn!ltWJW$fBX--(P8mXN<=~`(SpR9VWp?u$l5J zXa}b8#C_+nWVOITdoIPCN#}5O!Z*~HzW`siInW&8hW(t0iFx`w-537i7W4n)_4?m1 za%cgjWj(~uS?zpOcnp?|*pIT}UqD+E$X|cyB-s88fxCn5g6jPX7BKk+I=y+v0vjHI zzus}y;#`C&mu}(Q;ybuCU4wQynb@$*5v%iy!7;WkUpmx4m}y!AQ|(%z{_$q4=&dJY z410+Vlv9~QotxwD^+dHtDyGc250=F3m(&hpk~n+RxnZL*+awXU&f0@fbIKumh$)&) zNPv7X51q^!S@89}=s*1z-0)&CWUWr*HS8jke#`;81TWBk6AiC+p2Nzjr_i)u3s0Qx zhISe6pv6oo?ik!v@aVQ3oJtMEq8(XaU68=*7oX+N?Dc52zeIjzM?c6?#qvroQ?Y$c z1TN;WxP2*o{vYOGHGId43(>5n@B(@-|AHfiR+C`82CUsUQ(mAM*@=C*sqP$hQT|Xh zNHV@?)EQVu{>T}>9|GBQhC?T{gY?=ru5>rnJbuzuY@z&4YPk-tH0yw%{(Z?o_1Ia_ zsZC?Km}YB-E<>P|5`Eqf8;-J7vMKj*+p8`@{K!rM3pN)+DL3MM_#FG$Pa@V8s<5wU zCWO_zKv{t`m^_NbUE8(9F#G_foejl|weC>xaw9nbZ=uC~T_~FP9y?wnK{KuPr8>8n z|Kmd}q3|{ubfstZbOTL*WC{cvyA1MgZTQerM`%d+6QsxIW5$#aXr49-R&Jp@ee*Q% z*DQutbE)(Ft{n<|>v{9d(@>u|0l*)iVNwb>%L%Wd)+#-Jhi(1$l z)k%F(dbSI>zKxxqCGG;h#4wPa-iJ2S&*`^Q1KRbyZki)LHmEKR>7LP7^jr#l+7I%r%DY3nOCXLw(6d=}phmf8VggF1ljD&1wkt z>rT5Sdhb1c%-mysVB^#j%y2HDTx>q1bSD4v-k#iY7wwPIbMf6|iC|H-1jj9lh6b+| zT-G%PR?ja$pPT~79kmAZ`kY2<*HP$j@j1OSyQAlWeE6u=f_K&zR1sTC`T4BohmF1n zBUfSRP7zaYnL|IbH1LT@hOHTQQFAH+zIYo8)t$>hy~u@0uDEi4FH;PD{~UMidHQQWm_A5`QP z$)z7ZqD+y?GA0kBUf47U9!4R^(#udX)dM1{nlZxdK6?G$4~*aafReq*;I?@Z-E}81 zgLn~i)@(=Ub)VR)^gWMasMqg2`FMXa>9ejvK!5rjC^6=!^uTKNh?w8+VcdP1y=+=S zolg($-S{W?_BzVv+NMLvSrG!GucFoNfpFKO6x+%^L)5M9aDILW`qoY3Ws}Lv{kW^B zxj{ST$uX#N!HC&BRG{zEc+h@v&C}cS1r5^fKwmyv-*m8eRE8G|O~hpjs0X~~4^aG)3js0(+?Yo`qQ2&0MK42$J|`8ji#uUK_HAsv zISPkF?1!kp*XXCk74$r zci^hm4K#z_gIA9e;CY+4cQ+qm(8c3y?wEPF*w|Doa;(Eq(%q1=hIWBn{|I9?N=55z%FJa?!YNJ~xDnb4jz_z&%o~k(-faiEOesd! zkkgbc`c)IK#{y5$j&_n`XR*296eQ+K(Ru=Te%y%ZO0y#$eR^h@Bth}n91PnYgHlfg zmJqjhtZ8SVxhx7S`&`6!|F5X{u$Pss)JHe@Mcm>^Kg+8*FkqVPe4&%6%qlfppuHAYiXo`I?R zTexgtFQ#-g~hu6zI+x~ zlrQ96XigY^=NskNO(7umDEdmRVV>?<7~*jgi*EG*{T17=bZiTzH#b3p>NDK6c@8me z>Y;wyaV#BOOuNScu!_zTaCiBi%k}p@#Vn8a-819W*Z;O~{5Dd>wguvjz|L5R^ z%Zq#@Le%bBEYT*D-{u~@1HNPahH{Y1*bD*BrqlEMKKnSMlFr8dpr}gIJS!Oh%{%mk z_7-vy!3J-4~-)oTDz1kvQ+fJ9uYB9?0#);jXzu%+?yz539f< z+q#Mg@o%BoVmW$R-o@Jd6{z>Cxwx9wqLdt-`}^w%+o}yk*9&^$NBc7v6nG7#v+9|2 z$0KxHMLWLvA5eR33k3T-#j@8SVDpZ$m4AgXi(iwu@>4lj>xDx7x<`;$UIJ~SPhcat zDZE@Wabi00mLD_&PwXVTIPekpdDZC}-A>R2vJ7;h$vrt?{*^s}WA@m94AK49Q53Yi>>p9R5ETA1& z6!VTZ0DGr6lpi|-4%0Zc%FTq(*ile_=rni1a!70&N*(4*-nN0>QEg`ABbknKb=|Rj z6meq%q(alM9P|wT2(_sbq3PajeE-!{l)MVYwYQ7~b>Ucfz?2DCa;+biwbaT-$SWai z>VC9&aUQ%DZomP{_dry&9yI=O3^HqWf%IQ1P+Csrafhy8*ssmFYJV8ox9Ci&64-v5NMMZ`u~e$Pz&I`1XKyPF9y-H82R`x*xf zHNp_>0klkbha-+H1LYrbwQsH|s>aWThoP^i^FbK}r*e(O{Ulr`S7T*+CsErVWv;(; z5iN6bAhEF%>YOs?>zofl#m)IxwqiPl{k9$c8ANAu!5&tgY9oKRJnuT{3sp|HU_7~J z6ca>9puUM)$4d+{8v=0|2Z;S%4EcG_VCz9+A?{yO^w@m?)iGtxndK>g$`=p?|^^2uLaf>$%OKXgtF`)|+EhYz{X3$O5@v5flX#qy9b)))zQ5YMy}W z+i-{|&j;Vgy_jWR&P~$R;jTwbIB;-hq5UTD5q=9{zB^Yzbq={pnqNXjX))x`Oj>sH zFn+l!6~ZFuoHhKm-22uy3>aAeS!IVfxd))sIs_%NFY~=Y5vZN|kvX;e&5I@;f!LiA zv8i!Aapp2mvS7MKr(&PnGCCflD@Mb2@|#HFJjkgk6%r;Uf#uCIXtJklP0>FZKZna; z@74k(;X_z>A$0@ncJaj65T@^8i1kYW3+H@9^{m~Rwk)aOAg1AL>!)C|aSp(nzscXY zmz=jR&~Kfw5Gq;7y<+vmI^(0DJ+q3<-7ya46iUQ!Whu0$^o9!KT3FPdcA%3Eq0W&s zjgyr28a>bRdY>|8JS?5O%JyijmxTRXn!q7;68QJ+jro_;z~Reb>Q)uvva40#_=`U7 zZ9WBqXPJvxNorPq{y1BC=o3nuIrp9U2hS|6gY!`bG5l^J1TQp^t5mGK4!V&u^4=}WwSTd}P zd&hi&=zr<^ysQ(d1IQWY@C{ck-bU3E zE^**L=Tetn8V+@Dh?6kpG81~~iQ0_?a4Xwb&{!9u*S3Cm(5nF|gzYS2qCFbK>x=UW zKjF&F5UCFN#+e z%)t=@U&O+Qg(bXc&joIr8-{)ll40fJ9Bd5jEbQH9D$cy}4m|I2+N-89<&m4t{Ded- z8mGvoy^+)9q|-uE+MXkV!qbuX#_zMy*f7_93_ zcY#HxQQP+^Gmw^Iq3S3;qxWs?T9NMH8F@NiV%T+GW3kPn1N`kxg}Lv?p`u%mV}-Pm z-=H&1T&+3S=aDmN-c)e8a2b7VRuI?n0A|g4jH_1f#*t&XiN3FFz-!!2^v%^3lOJ5i zzCt%~ZH$hXavhO(?N{0Rm&Md0V23f&j!iaO~B<-QJvl$-p)6F+Wet4*uH?V<;c z2-QHtOly>yhM?NEN^U>?5ROxnpz)4S$QA0a*>w#z>Gy|KmL{S<%tPgr7-!e6#=^(t zkKp{}gJ`kIAIDLj%JHXLA8}m; zzexpfCBO!0xQht~4uMnAzs&2-4D8g|NGKe*hqy&Xd1X%eSgczHfp_PDc2Kj%56JI#N>?te z))BNb`e;fvNgz3(5rD^##Y`TZiga_o+?#UIl=a~j;AnR%r)VY75+3s8X zamZBMx}$;aZ1yOpuRm#u9Gg!Ywb>coK3Vh-QNY`u9%4G^O^FF z8sfFug`=Ws0jSq!P*+YwuSPw=*uzX1{DOYQ7Zbp@|L?H8R9^^E&}=dt(PSJk)C0_f zyF1RJ8^ye=pGq)eP8acO!xpF(4};9pm?h4b4?Z?0PBr^YC!WlhXM19 z5LI*WY_)+nVoy4geS6A^e*Y7@9O@)QE!=_yd1F!C`JhHt8vr?7LLqfPf2Ina$nCd$ zhh@bl@Zdc185sYD#e$At`ww|lEofG6@~0*;YXfSX$FihlI)cd_8E8+`X~L`uF-1QG za(~q4X3Smfiz$~NdIO#%I@7tvp@8ZDQ!f!y~q zmc1W(a%otO__Fx;_9w`w*rQ^BT$zy1KxDcv! zno*XTOZizXy4}48qvs)fK0rVFDc3ajOr=7o*9IQ?_h^=yO@27-8dOgksBx^l#K@4`WWWd*h ze%KJC0~z_Zz}jIl-0gP`y$t9()zer=eWhZqJ$nHp#DYc26Og+kU?Hd8L*+40JB;Qj z#7_wPaR=-ywfOZ}2X(I2Ql9MxtKV@N%M4|xy*inP>P$!Cx6xVt2JxpVxeo2rtoMYn zi512oA7mg}t5-vY4?P>t+Cg?J&BIG?X@bvXV(5ntT$ba+wC2$o|2=g)*_}E;GhUS{cnF+>SI&c+vB$@{81nKM}IB&vB)agDHE9UxuYMvDB%k{;wZ&HLYZ!x#X z4?_=Z;Zwpdfqr8bsJP!qIel9$+gJ`I9*2audrOz zHcQrAJRUNL!J8fU1hdaPLwq@oxF$+7L!t{d%g60g}zp71Wrx=QMivsbX$`9uKLk_?U1Gus<1niHJ z59@e5Bn|^qOQ*7m>N8yZBh0x+UjxD2xfvw%+;jT%U#R$d0(d0L!FpXWbZp*BUZX|K zU`8n#e9i=W&opv{&SZ)?|2b>#ywsb7JIFka!@*KeHw)Yx9>AWGz0KuG&` zxN}8MSY7=VRsIXPV>0p)@xNgs?O}8#nqY(TWNf0{5k91IR?TZ@Fs8mvQIMu-@<~*^ zH~^_r_p(ltItdZ!5!g7Z0e!Akz`?8-*kGV9?ERww{W;B{kFU=w3{A&0V!t0C?@Ruo zJD{U?oUb06k0pbCu1Wb{{_{I{$M#!LmO%`| zVH$LNWW#H=Ux!xWB}O@1fa@pB#Q0$gVW$66oI7F?Hmv;(B?snVNl#m}*hqch;4`es z0I=y@Ec0>>gc<8H(5Ipll6q1PI^`X!S)BgFYawy~dm7EMY$RQqd!_ zH{{1>!Pie;xVz^)+-ntuU&pmTf{wmWz4an)dZdCh$1FU)A{9E8&nv zrgQ~UcO>v!^-Fnc+&Si0m zc!asbrZ4Z%_Ys}aycf6~DrJe~uHZ5&3hq9C022Earqh?^O9PZxvSJb& zd(%jeDqiwb>QE^ieh0s&Z5UeqgsTnrX&j4hun)_Lsbj0hRrfA2P19Li*Y6Q%(%<9C zBN~`}5W!-=0CuR591ZOnST*hxCgoA)I?;fhy<3>K?kTQ%7lBR5zp#~gD&lyN+w}8y z)_&d{)!wn1s+@b6-#-TbNt1{}yBmr{`oE*Zezn|TS1JUsh3GW+J9zo5#n6mG=5U$L z1TlkI>~yk7&FiTgo+;y}o_+z5E>DlE6E0d3iGRBZec?4~L}6;TEQoQ=@7p$#Ij z7__f{)ku=;ow;w^Z`99{cK8)^}BYE8KMo@ab9bNw!2#TEr z%ps7tNskVLNPoVq+e1L`kcjoe6F@d|F3+`H#g-j=0Cfu@pm@!FESp4gYVO5zYE6Xp zW5Y36{Wth7B?o=Ko>*yp5B>9xfLGE&*tG0F2#x)QdyTsZp6@=Qa+m|x=|}xXg_)=l z20)))=7R4)7g~S5p?pO?bNC|`PRUJ$(IJ#UDlTK%-BYmX_(h)4#TP7kodnP0jiBoJ zhPABt35j95p@Nv367NcKt&M{M*=&ehaTPIXI9ShF1NHwtf>6~v@(O*YIptH0M0r+I zuYLrJld0!fZHLRIRzr6qU7@*Q9sF!H7giojhIQi(LsZd7@Mymesc+_iaVY(_VcGoE znG+D&z6~so83?LFZStq{b%gr97UTjOBCpVFg!rwCa8`mu+;HhBD$Ofll};R}@429? zk0F=*bp;~B^{~LYfZIf!18uB6RHo9ta7{i`FX{lLUKG=ox69k9qo?pFB$vP*bUawW zd>21pZ9o45o3sfKNAEqm=!MvRU0pP#jl}xt^v}tWa^GS9 zppC%?urQK{X7c^yVM8qWxrAA_8HjayH21eIl9z>c62g)jh+jC$IrrllEU8L?b!}}} zKe-nB7m_n$DczOA8o+`LMD+~Rl+LfiDeu*gRj>=|M|`C&+wVA{%Ww!TUx*cJG)(f$ z%_(IA&1QCV!niCsw8h>c-R#S&7)$3Br!uM&*|0d^p{Ya`7z| zQEo;1?l!NHgrIg}a$ZO;V=?$w9d!9C7pnYwqwA?1O!6oP;dCV0yjzN?0|)ZDY5!r_ ztuuJVNyRI=4@MaVvWo5-VVNss3tG~k#GshJ9{qw`67yk5$yI3GXCP|bze5xvmggFa zEnWvPEA}TwomfrY=VKVLx(q$++EC}tA!o10k0B&O2Zv9lXW`nXETi%X`WjBKg1etO#&~&nUFyDX6>NQOual#Uiq;V z{1uOwh1VaLeo-RiH=KmT{9An2K61GoaYb9o4A|tkLStYD+8!u@rY`d_dzBKrcb7w; z`8^nD&`szu!BmjT&Y^Kq0cz*0a^6bW{rx_ALU`kGTzTjky8ElKiQK5Ehk)1qHWNS9 zO9jX1F8C7Z++Tc}m!!0^?~A0u==W-DNk~I=V<)Jsxd7fz4>IYwdM;b3;EmsMz-vMc zq>ej_uY>8iYxEv`XC;Ea(HC^{xQ~9T;vjCzHSqlUgLchUP<5mm`rJ(dl>4FgsDUuz zVn1j}jKRQp{V79XhuI^oKwas9iarV+?&Xe6U%SKH((7owxI2tkm&t3SJ1`>PKD=5+ zY&`xC|M}2Na9>-F{?q6T<8XlccwWY$ljV>*WE)csf6pD?r!j~u0}po>$hz|p-QNBN zI)TaHa4QnMJMRP8fN*^H$1~yyoMJu$>9Y!Kr9FH#mrT<#zdwi>*{uwATI-2d9w$Ia z7_qOHy}^fH{>11rrecZG4V(5nL&>OF%)N@ZAsgN@pWku;|NaFmX6v#As{J4tSj&3L z(xBx;6)K13;5q0fsCNAAJeqcw{=yOF+n+oidp2UlfaBzz*5#Jp$z^VF3Mv~5p$Fv; zq}pcQdT|tl%}B!PT!a7@>Oyb1#;WGJf=~J(>H$rWXMHU|uf@&a{i_Mr}0R6EN+Pl*4-Mlmm z$})!{>L1r=jzDWt5{8V(VA{rIT>4zYvfhSc)yFdM9^j6~)Cacx^pJALtyuAn+)RJG zNBy{kpy>XGM)_tc%$;)r6KJokmR3Me=ixXv?s>Y_xcOl2rQ0%E)1>y$sO^o>@ z*Snt$Q}@0DrC*ZVF7-FeTD1rrK1jrXBkf=@{09Wz{00q%`zaqd7WE4R-26*7q4%_F zpmQbJx!;cW&@hTJ@!Q{k^7u;SExY#aU! z{xsGR@=LV1(A5VWCyaps#4?fuPQ->2o#9~BJ7^yA2iA{oggd7WgZW+RU^YmxV&i=l zS554dLAx|kdoR4`t1G@=-$m4}wq=?~`n<0dqRE^j+8T0VWDYeg2F<9-ol}l-LdSU70m4T0w?J# zrabx*Rn3o3`C~tX;7+J}^&I>S|3b$hpnD%3XL1lyvQ zh;|F0?2a=idq0qemR|y^Ri;AkkvA}-M*ujw{0G%P_Td8h&dc6=uuYNXLWgG#)NTC; ziPiDUJS__Ye8~ebQH|;zU7%DR2%`odh)_eg>2 zKVv{UF$0nonTRnrZ$asSHk2e#UiX7kv|M8(#;-LIn{q61gu_lOe0d$VuB=C?Qh8Jdqv-=Saoaj10o3Vw~P&{!0K)~+59k|g8sq6qAIGH^P$jk)#A1B2;z!IYl= zC2w!Sr!ZroevqCRpgSE$eEGzt1f`%s!cHs+xz8QDnF#*o`k?BnCtT^$oj9>E5MCyy zF3fcHC%Lg#-RmrR#2ex>!@f{*`7snkd}9jF;TT%^oHzXLO>W~b$kNSa>U)QoqSL>; zhPb%C-HdtbB@J^3Uk7ss4#%L0=7Q^s!+hDBGuZIgMcSzrAYNF8R_h%=Elbkmf21=~ z>tEb=mfeMa;g_0dY-6;>?Ke=(xp* z#rH8361687`k7$pwe_sJ`*l2@zYi5hmNC_j98F?#Zw%~GkD5F+G-c~!z!EQjvL4v< zfI-OleUP{^iTOXJ7;59)yyE&Aj4m3(P3)BqopJ zxXJtiRF>^PV;9PSo!SD0zut!N&N@O_lM*a1ok#5$XN{xI4mM)qS}@7Ej}AAsV$Xpe zA$`qD+AaO$?O7MFIEVfowZ4!#@-AyAG=Wg{7JR)+BF@Zt238T7;Mk{_x4cZmS6k^l z`X-8n)a->D#_w>s<2!gVrmIl9h;U$A&z8dxg-gW5_rbj&SATlX6LAW6jclzpiGt1p(WISf@3 ztLV>3T#z}FdFA*T7)0)>F=ILjjU$t>=!GsW7isFyhdq5CT|(v3PV2A-?cIZ6HDBoYrei9j_4%Xk2Da&$2VeOpQEtuF7+M% zTntttv%sPIQ<`HQMjxn!tuZ@s_Kke}V3mZ8@$KMg)ehm`sb|#p01J_YVAylo=e`@j zHiW*#4?T~gLnGyC4uwKwXHT#W+{fAv`$7BVo8U2TCyWls$7h*?(CgtyIO0Ssxjwc~ za?k-9*I)b-(x~f2-df)g@Yrv)4Q$=!tz@nM_mc2{2DjkREK>$jRarb2V5G| zsQUO9)7<}p7F~!9n%fVQc1PqN{EdXhPIuAbRVkGwGiZFL{Qg`YQ3?|cRtyyzrWta{5O zu`4v8(r2urFF8^cJOaxPXVCn@7f9^uiB`LIp@rohR(7`rd%Lwo6qNoB+`ue0X4F?FD5 zqW2x;;apT5h~opndPrK8QnRqvzFuC_n;ex$qjk_D`B zRyj1Z(A~GkVcO45hN>425DbD)>9iDsTebs`!@94# zn+glg>_YiQ%Fdi@lw117fp+jzxz}OxTD}~~=6xehwqa4;hUV&<(0{QvFAvC`_1`Dg-gUQA| zD0{yhEBlc@sP!G?2i|h)>7PIzRg2!;h&9noDk=x3XzD7c0~*M=_S-&g9lMhiZhwqb zn-$>kfbJbG7ED&X9u|tf!|~txK}Gvc#Kkwksd_O}^&f|!V>hEVKbspk?Z>R;Mqu75z5kqZqIG9mS{2rI{(fwRkqJ^E)tT{iy~?pZhB&AMznz`)2SBN^~u|r^^*>?2)M$vy|tiFed9)% zCPLx-8)$u^10DN0@r>(!xI$(kCK=rZoKS*UOZC8Fg&|WNYiItWyTeIcJyCyLDvrub zf%UibMaSG@(7n+_@V%A{*8S5!K7ThhxeQ<{gX17FvcL6Cf@ukaMan$4N?z6K;=4Ac8QVuI3z#_<&-P89s$*`ZLl~Y3>3*3OlI>;lRbPH zNX$k$kLXzp(f5u+aWB;|+ZHt>K4W>EF!5(fR83`*e>FBw0E z59zlDgJY|4-5?Vok5QTo5ROrt7QN=Um(OHeiT0KeuO>N=i9*-dw+cn=dn8GH_H)<1`iF6Z%e&0BDa zcnJv|9D}!hz}#Wve>pt~Egan-!0kT>PBVsvvmdeF$^-~}_A3NDI*HmnyI|3c&SJ}2 zVpOeolN*?KnOtw_&iMz-#B#mPLcQUAZ2sO8Twhy4NkI~%$iC2AJrmNjh4iy|r_p*T zoZaPyLcz^ep4w#)C_5e2MCzK0vllgBQ(+I>Dkh>`^8>Kz`9F%zJRqj^jpOZVq(#Uj zOEPv@LN(_(l!QByC0Ud0c9CRPH=!gkC1s?fQj#SpBvbP~Cxt8{kr0_8NhD)Q3d!&J z{p*jbo0)Uo_j$hG&*vQ2)-R+zm@nr%U@6#9Ct#KH5j^tUfOkDZ^93Cqogde7#>b*i zeCz`A%H7XdMGgnM`{N)?{1fBs9zkfuL*gK6v4FR!Ox;l}868I*_kD@XHJ-AK+9hDQ zU>vG<$qMxQa>UagM487_%=|DJ6c_9Z{C}mgwpc&B*gpz`F3@b$_Zifblb5aAE>uL= zaP{vZnfADeV4+?Q*-LYntn>)$zV|EGdbVJ2nK?+mi3x4gM<{WL!KhOLR5oTqnY1go96bjM_Rm4p z*xf93WfE)|_Y%wx`~lZ~#2#5{IevI*klaN`pSh&Im<;Knhm}#>Euxe;l?!l#Y{xKP)c7E8s ze^()MTntv6`3F|K3?NTHmc-)gPbLfX2Knw5*b+-@#fN{;;r~2Ky$k4d*^k3sgF*J- z31&Rszy-#?p&lm0vYpyI{zZ z;1iR08?YZ_&1Bo%aH7w543*5{y04@;d21A^ZggX{mRoQI&O@o&L5VR`L&-uhK&N2v znveokUUkfC=x@&F^HT`!lK~s=r(?wxVgMhS0^u%&AfBp&+dkjI8>_CP<}ZIPebPs) z`*9K#6O++3R#&LEABtJ8@1wNZ0*&Ki!Q??2=4~+K^W*M*(?5j(UFqVFDJfIfRoe3Kxo+Es)L zsN)jwhH)8oCs`ToK|OAFK;W?^Y^(gm7Q|bDSMYyqlvWp^^|m!;xt#^g?OUuc@gNpW zj>YtapTT<${eDe3@QpTw;vVIkbJ=CitLgj)`#jCs8Nk+|!xB_hdZuJrLuMCXxEel;KE zzkt&+kwN^Sc_1&`hK>7&!=^R4V5WNrP5$nK_9rfImTx9vdctr*5)AlW-Rp4T z^wa3Au0{36=@MW0O0*qyl~ui|gw`WhIgg1_tg}6U*{&|&x?(pBv(^zZuepFlVK?gJ zUc|AXa8l7mWqtJ<)n`=!*SZOe-&A9qv=gsbHUX`Azu}w@%*AO34uZ9v5~SS^bJ0n!aY5us zlut@j>cb0k2-KjyixE7StHl?MKLAlr^D)RG1iO#;M2>42VD?zzXP_JkQe?f3r@Gi!Dc0}ix~FJCtjhbn>WohkF!D3%z2N)X`EMI zPek!~T%6yI24W*YdyXxRC@|ui3&>BFsU>8`o#eC@8DncjB$w~=v7r8pnNVl*2Rl{8 zVrAe!wAf(>cEvq#G&_!#?^7}VkH2JJoURa<(#V3ZJ7Rp{U{t(3r;KhIjc$@ul;!=# zzTeA0eluK|`KlI2_cs#M2RR5km5$NG#2sN=KyJK72#-;ci@J;8Zu5LG$ zP0Vlp2$kV`!TC@VIrUYqM8qvo!-eTm)A6n}ehQpTSS-GI-SWz-iv)n7e#2 zb}Ty#Y9n_R{9-Ifi-}jHsv+J-E3_QT!n(6(z~t&yJd?W@{SKVLL?_~92h?KsKB3Sq z-3p2ycMH@-9nj-HQ~nb%z`ofU^RE)gWq5Ks%Kx0kq#M2<+LnMmBfqg3i*yD5ylS@P zeG#Zy_A)ufE2YcAB;#ku(PzXSPEvCLjuS6jO|wV8pSPh%oXIViXawF%RA4o9A6K%? z2{k8gaKZhklaxvv)U_j(lBtwWUVpJbG_sXJzKECh=*%es=W)4;eGqK_9VRYN;;UK< z)P7b{pqcv*ey4m(L!E(up=-$7wHDO3+8jcPiL(~gA5R~lyVet)JO*3Q!fya-hWFs| zPEmi`epDY#d|CmK^Ja6~2b(QAR8=uS2Xt=Qxy(%tdK;p40H3S>ug= zA>;Qz_T=?JSk*rdD-r@A)mvB4bU()?tvBSIM0BP(x`XDo_OMX+0;hJ8w=K|F>G7NL zd9(dN|2y$7m%V1P?^R%XRnAE}mO*}fE6dlkNK`M@F@M^zn(Uqm6C+=O$I4Gkf65r- zla2V)+5PFgVvid3fCXGeF8KR*EW4}67ghHI_oN+Iw(2aHpZH2_uCCmKR`NvT>;%#H z^ITb-8SSo1G5tHSH9~nD{+-T#gCiW(CX?9yN59bZbuL=O%}25Rb-3~S6=hFssBbHW zJLziTx9>vtX;Co7=n7^$84lC@?_-l&CT=;BK{>hJxNU7Tr2l$=(c}lPJEe!JPd~xc zb3CXw+~RKUi$IGP`K);Bf3Tvd4KmlpqQW)VF=oa|@T|B89$ktt)w(Oj{Z)=z4!DEQ zl+n<(Q$rlm%`Di4c)=@Yqh{=SsEnbki4M;)=1FitZ<+(-?059a*}zFB4`&Li1U&!P zR4DCLkLd#sK;5b;Sg{}*&*&y&T5dgYtwy6+V=~$Y&4k^KJ@i@I_dMrYCH7%@@aKEtW+$=lg%1xj@p?MSC^GWSL@ zuoOW{Fm>vRXJL5a4qV>3Gia7>Wbv8j;ij3sP@%=R;$x$+z56?;I+{o9@lDE_2s)z~ z-hm2-UTATE?&FFVN@v9-lrNjkq=suHviX#I{_075+{ciY{Q%rV#aKFsdVd!inMVnMwp8zfR8?pG zUmqQz;#L*ccfngox*N$=x#Zxmsx=_~cmW!ZzeQX7JhoSCC`2#Pg~XW*tSZS9F=HC@ zoOBW4xf!nLy#*{|P5IU#6EOAJ0rc#=2TFEqhuO>aV*23s5cTy6IRai2f1(Gft}TMl z$!nqB>>IP9uBYz!1gNAlN_M0OT&OSYyQCI0!$s)pHv*I1)o^ZK@1ygSFObGx#c~_+ z=#{-e?T{x0R@)1iD7T8aScGEz+_6}{m~keXUBK^DIcgm^Es=*

9YO#4oj_9>QeQ zZ2O50Ek2yb{G+J;qL0T9nF``MVsI@z$fBFiK&zD*l=c0E!8w;#-u&U+11~Myw`=ls8eABB}4Xt&xaG(wW15JHtq&?{UXtFSqfNxSc+j2bp-kA z%@Wa{NswM~h0Z|9u&Ed2SBPfSI_O`B8T;LT3_tl7FkTyY4^e5T^pF63p~ z9mZ_!i>Nb4bDoXoK{V$x$V(5gxU@^)Kgb@`LvxwcY8T90(jRKxJi&1A$6>=ZqLoOW z7daE3XNDuD^(n%VqbI>9c^X#Ozrn=?C&4+v2|PM<(L%e5MO%1as`M}nJ8g^E#2wY@ ze~8;K+Enn4I0m(KCG^}n2v(>CzM=0?dww>$&w7paOS*w{*IXw1+{&~^Pvz7LLa9?! z#w`B54kNa7!0hyWs2#|dM(;n$s z1@rxO1O0rjV{U^B-u|d3YzZM=a;iBWKjI;X8*anvT#@j~#t1XgezDdc`#5R-J+@_E z4-C#F$E&W5JrFbl+;c;rt$UrIQc#XFh7b_GD_Wne6tzhoLmG3T4agbH;z#u<2wN zd{|ArIbkHZFKZ`5c04eh|Rlk6=j(i-qj4bF?zhk9QuCEF5{*}GE^Tr$62KM zfF;-Ur8@ZF9p5D%i?Utf*%X~0# zT?jwlMWB8k%8bvN%h|Wgh2T_wINHaEZ%<4?lX(xJthXs7Y;roG|h&F zyTOPqU$B1vRnWBUz=WeOam~dhXiNITI`z1OE^CeW+>P&0X0PG2y*j`-YBNStzdL)< z7?f&Jw||TjntDBf*nb~kD#~JiXp$&C4`t#FEfTfH z)$vQoN07dr$hEe`a?@83Bd_Q*>dWUsQ_Lq~=Nh2Mc@kW&rEK*+`nOuRM=A3@$~27& zx$qAi=#}8XDc|Q~2<@$6Wn;EJ!Dep*K5xk#sD5cq z8H#<%@wZHQt0XfNKT3q8k9$DeB}*AT;T(ECjmC_VI@~sYHOd2$xXL@{!AIW(+iibA zIP^w~ugAcnEsRs;yTYGaW_-}MBh(F;g!YE%;Gm*8S=3#~wT^|*FH2dHg$^I`i{913 zbtvuHM68WArD|ag7d&t)&GGbw=K17BBS!gNV-c|;s+grjpSW$_cq+zB@L2tb=1SdB zR(S=|GcI7w#dFZO;5*6!_oGMVDfY~BJyc9v0J*uJVU^iOh~DrJLp~g1*^|1G*JBwB z%Xox}?_-%suNoNh`z>yX%)pnnT7q^n&4jkU1jE+=b`(}HQ;q&0`m$=MQ@uN^pXDnr%Wp-3NqwloJ2xqlp(;f zUzEL-!0Le!pWT%IZb)OY7k{um{TNG`PW|!^CzO|lRKk6G5$`+R z5L`|bLZiYD14a`s==UC)g|;Z0^XYtV6$ZCcGr)e;5nSa{hkLdd3-SS`e9kwz`&kZW z3nvGFi+weQY||I|Hq>L9+d9fM)MKyMQ@Be*+}*A*AQNxI*~XC|^ZE)7r`Hjqu^wy3 zIHM3_D!41pgN3MyOQ`vT;l$qgT}u3x4g1)zPa2q`euUDJa4a=CfVQ_6f~_K(#oo;WYu6p1HP)0_ zY<6X>v$Ejf$|zKhp+4!g1f@rU3$Dr`wrKq`)M}J-nExDohTMhHXGcI=xm&WJXDaM+ zC4Y9;&s;@%97`Xv)~;>KkSK;Gj*V^`vKg=@eo?QfmzIZ z%4rhLGJlW1xHw`{=~ryVT5>6kznp-YidlGL_X8OAatk`EuW}N-Xi(z@NmlxM;-$_* zkEP)-Y|92RP?fl6+V6-p?RN4#wnzpB;+6UhS`tOP*mdxeMg-oCT9)G43>gL zuQX0&Uk<*%hygchqjJna%Ha?1fiByRz^;y$u=r^X#zp7}bDq>nY>~QU7ZiR zqzc#;MP1w-VO-*-Q*bHaD{fw|#VZTqQ65#rJbFEbr1^K46Mul&Ufsva7LX_0U&JSI zX)Lo^3NcA0g6o7sm^bJTg!ZoI)X9rj=GR4N(O3nmz`LVhx3e&uJh{ZO8gI zUjXZ7V6pFd@E^6EC64|Ap3gp_RoXtTq4)+CjqFWxzf!Iu;vD#|Q7V1fe6VrdU1&KJ zjjx6e1>r`NR|q=1MNT!lk(e{PHn- zh#R#NgDE$XU{B9q_f+mecO4$h#sNqxpj(3pKjLu>*mdm&-eYcp{o_NdHNpx@>Cc+_ z9z)lsx_CaQlaLvagcJAVLH&-MSaKp8-few>(c>q8bHz1~ff=m6ngj6TCECuXed59* zNM6)MkQ*+AlQNMo=Tbe$ub-4?-RX-#TQ%Tu_zjj=(Cpy#TEJx=F+MO^r0w^i zSGy8h^eVe8fFl8WZTs5Fzk&8to~$i*1y^zlg>Cbdooen)sia|C;UGbiNqC@{GP2X zgmr4cJ1-hBa8C_t{VfAUABn{Od{Ta!MJ=b;JAqR<8*>ubH)vkk8!zgf!hi0W3dYTa zXx`}$u2AO#YV|^g|CV6p^E(hey$bz%*E*=j>foxapW$=MA#9<2M+xn)r6#K-N%G%Z z^mGxbY6n610uBNW-{?^pARmp!#8Jj`K0Qu_#jnRi2g5$nu zUrsy6Wt{ld2(Uigh+U8z0_uO4aNe>j zP!X#DlL)$#wY71p*2ja-O>OcluL7U*4)p)@PT75HC92#F(Uv&&^-&5IcJ~@8veG!) zq750zPV=NDdiQ*VUFUy8SiuF9&Wn;rTZeL4f#*;s8Q8o%~$HBPm6)L6F;EugK!8C;+SgNWY%bK4|-l` zLl4>Cn0Gr0?IX&V!fyzZUtA>V{$@W0zuSX`AKGbt*{*E6@q#TSrkF-0gT678Zx8K2 zzm5`6e|?LWMn8nP#7QlS&P0y7OD491(eA)@jApbq>)eUYcif>2$xmiUQ@ZfcFQr&a zce+nkx(ETfN7xj~z1it6Mv1fqd(vH1TD^yr?6N_R{n=d6(X(v)2=W-bxyLe_yixoo z2b|i8@07`i(cKxH_Z1P>+E5Vf+5+C>T+iP2ojB-Ev8u;iO#FEXu9oWx%X7}6rQ0lY z;8(KAn+=8F2h})zt4Pq7lwgI2Ay$0-7lwS$fabwqcKvVi$bXYS*w9>D-fAXaFd$~emPn~tKEF?x^KqBQ>xK{&LiPb-=SsDNyI4XpY)mz z`fn1z-!?)iT_4A^-b68bjTVb7zX`r`I|~!1g@d$xfU@Cu6qYM8ak;YzFOIB`Sd6z} zdE3r{&*R1LGcg^Lo+UxWf2%M)hh~9_iLi#UqPhEPQPaO3!=xY4f8%wD$D-HF#cCh* zR$|Cy@dvN#b>?&5w&A<~XNt)o>ysbLX{&Q2);pgA`e~!LODeSO-huM3GswIBFSoFo zGH5ezV9j6pyoEl;l5~-rDGqr4sv+<5FqYeT!hqizM|Z6*DWG2JPR!CXXcL+ME9m#C z_rySG>_PYHBW*ZwZWFN~O(D_c3G|%$23vcUvGHTiL;mVN%7(v-iEnb9lUlxDwe$Va zV^%eI-P2~8(qiVd`ybR#ZozH7N~jPeL+uhZW)H67ZkL=S=1C{P^~7(G@62Oe!zerY zz!#bq)x#6>90=HXlq>q3%2j42qU9W7x5no~=z*nN5bf@KN44O}bRAwExWAxhgCCSF z&=ZCXdJE1S9J%=(aZ#%uVypHTkRM4x^{*&Nh}BS*`FbA~4sVCbtWxaRR);3F$H{&1 z53`?GfKQ5}@wSY9FaJb9{GBieD?fm{X#SyhT$|5U|3v>qF>reyajp8hW3>5tc&Akd zh5lNC$CcT*rG%XI#>=TI6fbH0?<6O?HVL#pQBJIx)(X8eEa7l7IEyc^5qqjpYe=*t z#B(6S;lz9!>{C=6F2C+%uJ!6!RYHu3W&X z_)LJISBM!S!58Cnc%a!;@cwQXKCuPOyWU0V%mVT~tOoh_x5|t)pShydA*f$y2=&#m z+=MU@KYm^{+G@69(#rcR^m8rvZBvlvF^4sda;4tV4Nm%mqFc&kY)9n0W3R-Mk0OB?IFwA`n zbU0?hyYD|Bxcf8I9_yxzDLDk8yKkdq$PcuQ{`w7anYhlX9FdQ?Z5N{t#hPJ{NsCK%^MDxdjxp@mZ z%xc51pIKP7y&WxYohEM99{A~a8jFvAVP*gHhIBq2RPX0A@%TLeq-EoZD3wYIb3jFkLVtmFlZ1s<4 z8EF#k-2uus7M;VOnFhScoV(!htb&|OpU`QBHguGm@v!s+Y~DiMg7y^>xwj!N8r&6i zhSTq2tcg&TFdMU?E`TVom0V^D6gz35SGO?`T{IZn{O>}5*#yj{eAJd{b7AH%U73OX6e%-jP|*S zKfM(gaC#;-<^|#wyU&pKu^K97eZg>}u~__U0;Kp1Mh)n z(PNa{%0QL&ewNgA8Nx9mu=f_o-{;Pd;h<-9$!!9PM)P<+0G)w?u9ub$DU ze$x;0)>eUk@5F*NCUxXk*a9^jCm|_nD`ejsPx%-PB(E_SEHdXoMo=Hl|Ne9Ca*e() z`1v7hJ(b6;c0Wcg^#&F{U@YYJT#Hs7Yr)-f3GHexG1W&sh&K8MY94*Z)EC-(#iLkG z{j}8~t$YL=QR)g7GpJ*g7Rfy72Vp|lTd;iH52CjwV*JuckfEFlMU;(@i*8~NZ_L+T z^F`nOzbM7E4c5j!)IrlVf9@CAd2p9EAo6$yTePl zYNrMQ*LM-xD{`T5P&ByBSALNfUj=rEck|z*Q<(p&^-p>#ntBg=^mZ<_HTL+ zGmA2Yp6juBNerZ}>m+zYuBPtVC6@F@jJA&PF#a`l=IeKGUc~d4-g9K#_4cEW#v2Z) zy6~!Aoj8x)znJ=}ks^TZ^BGLeh5yw63gr;I?UsX&ewzrgk?xd5Jqdg)AALxIRs-qohS6qc%$^lIc3O13sA4<2>}grP}^jbGRejoow~@->kzTxWRpPV zn$IS>)nUfWtc|RxLhAmm^QRvX;3tQAU=0zE{CKluBp-GT<`#LrU z+QR4?=7Mt!a$ZFo^KWbb^3S1W@jz5-JD?)ZiOJR<2Aw%3yzyB9E8f4wmcWOoJi8Zq zd^8gh&8gS)FbEXkJy_HHIxmedZl25I80^5Gr zoY$DNvXGNU;o)7{!%>gT!M6ci6B;lnER5A&-H7q0`cX$|05o)sgW2Owg4Wj~*lYhK z^waEv5v9dgK8L*7%XdSWK)lK;vvA1~`dc_F&bTcNQO6Q44x#SgQ3VdGn~XpIy9Jdi z`(s&!DQK2xv6L&-Ao{nFQ?EUv+_r-F(OvFy&jSB}jFe>PJGBL^oE4nCssdKGCE}L( zwC9XGg{ri0G*g_PJw^(3uaNSi<(M9aCJ-oznK?7o)F?#@?bW2RHt(R4#!~mg9yxD^_5sH z2OxXkMiieD6>KKxg#2Qp(qB7I(*29J;PNmBHLbH){a+#6uB9eITH$n9ee(|Zo_z=P zj+fC&Zx2(I{L40Fegl(!o?y4w4lW%j!IA_czIMZAXkR!QFO6)&h7~cGeK?F;AlBh; zQT}Sn=j-Ub>Klra|50i(4ZvJ=9<=}dqqHNwRK5HZ*EZ%eQ)iuL^3nGlfolc3_%!Hm za2TRL(f+O~6*7)?=HO8$3_ecYhXct_65E-Vlao$%p8BtP`BXt2h#6acL)7zDSn;tC zvW{e9XxTsD7I~E%UBo-M{)qY=d*RHF9FSet!`!0BFmHwt@4B%9|JiLKXe~QOXJ8l1 z9oH42t0`9!WG+a9mP($HTgxlD2l}7wj&s7>Q9nwXw{`p*q=gzspYD{Gb^guuEy%<5 zrnE=XoWWf-9mJlz1#!f#4b`i{j-MJkmCRgGx>ROqEt;5#d2LX7x&vhaOX8*S+U^~TQN;(aVjgA3k6B8BZwx@~41sC}z-hOE(d@`mQ0~>0% zIO{M7>Q0;~;%K=qB3EypeK`9!z3<=Z^J#}CqmLi)ram53N)vxNx(b$fi8vx&J)Cf* zud$Gj^9|H~Qz881Qb=tu5>hDN8~RAahVAFkR|mny={H7#DX)GkhC~DE@4vQ$+xh#z zx1|&0edFk&5XgSdW?cV>5*_D;#x%w9@It~KvBqFi4 z7Vo#N2~&)caZ_mxsB*~>V3CT`)<&aOr~oeRGPIRXLD8wV%;RMd?HsAUv3n$EtD6aS z>keUn>Is@`ybAer7FF*FQNq{n*l;_RxO5`U(y13pH}_*z#QY4LvlTMkjQABs^tqUy z>L{}O3GPeh;b5CoczQxds2KHx)cyY=dI1gew}W=9LE+h(WezxoU;7g0&)+~exX_fh z*lNg1E=FMV&Q5&kBO^hcG@Yv{ErICcU3i1}rb2GV0SpK^fW0j{3B9ftqtfjoXkUGR zb~^{+#KY&XH0w2Pd43Z8{lzHpe1Ku97TA?##@o`F*nh+Ta5|TWs`i&$80AJ%-->uq z`FPMQyU%Q@E5OG-4vlq+u#E44CE4U)|DXdQJ;{ZuJPmp}ro275lp~6}2(pCxpw^j) z{mP64@wVZxe~FpU{xT7i-3u`P(Izf$(-SDZx)AOEcI8A5$%p&61evy;u(0h9s&?t4 zeCumgU*-Y_2T<30>TYaS-+^KCiPN5)jzx*dte1WkxGi{#xz3xw_`+*&8h->fen><6 z_b1usc*?K|ufVxHldIUh2rF*bqxi{sNx+B{loR_TJa-ec^f?WIndCP;Z_XzNQ>LzY zDlET}2dwzYJNlLh!$wptcCI88|a=(%D&Mx%;Q@G*xvcY zse2rjd}(@5EKg@*%+yNcLt7*nV{V~Ulnb=`nSuYFn_Q=n#E;c^0Tq*uLtYl;<@3&Q zs?o@jI#ZuJV`hQo%^qmIwHQmjdBdcYX1rS8gSG4bLUC}Ja{N2muR6@)JQgcp&f#z1 z;nfEeK~q`w@o~`PaT}mYN7z#E2h>+OL0Qynkn0&bOxHAFq*G^J?UzAa-y59$ow;1! z)K7F@9^&Z#Y#_JmY%GW$7-MEwHGCLgDmH7yF%I>3m ziU9Jhm-FWkr^4Li8CncJ&4r&Pf5NQJg7cRKPJ5YvyZ&U+S$!@fU89Wokvy)KeJNN^ zcBbCgI%d*F`?aMa)|8{o`-pFGvY%f$MeZWb);pP-81xF0P8?-3)LnR&Y`R~Dmq2`@ z46^sPqg~`ROg*IqJs%hdqmzmE`d1cZVsB$cj0$rjw4m&tzaclw3T5AkgH+HT?a3eI zwNZz&-&2MoCaECv){X<5b_MzyE44guK$>10NEXVAm(5?)fHY(lZgp zFKU9?QCA?k)eoJMis@Z69Ct~Mk^`fH)7+cEsW#4L;tP+Ifv0F6G0T*Ew>bs5%icqv zavfMcAy<3%JE-G9tgzZ{oNAH>3(fhB_Nxwp?fi1I`FRq=qm428TPN^;epQ(ixdK4v zDug?QK=wbSOnN$BQZz~rrGG7CGF2IC8JtOdo3{L`<|6*qQ|c)nj)c~2r@?CZF&OO} zMb4fhsNoKve`|zee;YIEfaNOB49P{6M+z!OUPSHBuFN)u7?O8lv1UsfOuX^|evV6{ zo_#RqasLPB(^ZU59>_2^_93<>IY5Sv5pQ>CI?h~6nb|(4IPKNzSm+mft|Ds^NP7)q z@dNWfGhNEOPbZ@))r*OT?^8-6PfNnLjf2@6V$pt}6vwQ62ck`S!jgQ-vJIgO&M{qP zbtfB3PA)*JkhkD%bP|#~$n&QNfQ4lu@?6ko){}S#vGLS>D8}H%di49G&wK1_3lMi zZ5m#Hv)$x$Cd%b9<@=y(S0f?7@gC+59e@ctmZ8No17^QHi5nI#0$s@|G|#*R9hx|l zXIE1ous; zLpht6W>}(a#!vet60|*0dHpf@1x|@^197taqsDMP?=?)-U=J3rx3Mt?^T5kz1WUN4 zBV=g~VoL0J@S+TvU+sRxvGiVvHsO`8Q^09@DqJz8-TQ9pwRm^HriL=?(WNVIVY{ER z(KhFAoKJ_O!Aa}^`9nlURPd{*lh7FBKr{9#_R}aC_PY_UVqGN{@>d z;X7_qr;B)I%U!fAS764okE}8K4K&?LA$I>IZVL>+#1S$u+%Dp?k39j4M>U{RuE*D8 z(7xn-cNVm#5jL`e*y3A^8*hd~{K3xPYNIa%tcl^&8Yl=IKbxuMorf}Vz;cZ`yve;g zDEpFuBP2f|c#;n%Yan}?>O$BjSuk1@St zT}rsz{s$>%{FB8jm1E0@&-kU^MT|~P2b*mXjpl*pRf$`w`}2~e{qX;I(k`rV{t7ee1Ows z%Bnr(8n4s5B(8|Eu*Ben``i|XYLLoGSV`44tkJ!Mx=ksNEsTZNn|1lpqt9W>x)YeD z^oOdq8XSAwT(I4?6xEVgv>5#gZ-_eaX(45xTdc*)Rvkl`MH9TAhXHz*$i4*N!slW9S1i417!cOzA#WV*MTT{mBt< zH4Gc3tI%umf7moPlYWjE5OtmlvN`X-y(IxG+_RW$kggyd>(2fBO?}F{R^Y`3Gtn(I z3)8=k%|#7hk@q+EIW7hIO`+_(xic5OeiyW}47kyrg$qKmP!w+m+sw~U=Bz8z&VI+; zTByS(ZS#Wo%Q`~F651av8weFQC?|3L4@PM{fn8-`XnpDyN{bh=lJyd3Yy1E}x&QH> zqoDRvAWFyZ5Oux=6}OEzi)~fVtkVM*TCT?~n}?u9wKm&5*qC2)`3t&6P;Q{pa@qy#VI~z8SoHKP_d4H@ zZ{Ixyz~~e9ntu#zc7|eMunC`U_Y2CNhXXA@v6JC($X+uTt$yUOw?^80{fT7OTtl-{ zZzc3P-$jt#bK*SSUqn0QLhLyE6wBOnAd7gX76Ssg0R5@Z{PHVgIFDj7NBZwQ0UW!~ zfR}w(h6UCC=Vn)+yS1~o3Q?PH00B6#&s}x&uqUDpJFX# zpyx1Eiw9F14&n-9&VbC$5*s~hvGi^t#PmHycSvpC*V7N9Ru_R+;SlhlLA}3LXHI%1 zUy`Ax#Lnl zA|bo)7-;*j8K2C50dC~hh_<0U@S1Yw-`mB(`qpUdWt9QN#TQT`+=So)a>|E>K!hjl zw%7KDvh$DN!y_}ktXnt8jfg^7^EqffZjQ>CIe2=xj$q^W2U3^EU`Skl))HEX;t46Z zV)hQqzG*7hPl@DC|3{zin|;tdYbb`k`-bZMH(1?l5kI}H3tz6O0SoH2H3U9`*Imu{ zh4tEkZDuuVZk&rvNta>tq|SnS{W{3HCP$}jdVI#+H(b*9@0>$LJ(j&Zhw;=0)9s-z zBz-vsTO34!ZqhkCzs#7oTvmXKRq-emM-`}tDv2pA2LH>&*idp5lf3i=)rsq%8kzvx zE{S-HEj^ev?N96H?C0`+y@lD=Wc0o20lTA!>=>cg0p z^*%}c5&@Hbt%oDibcLD?N>H^2W70TN!HM3v3*Hku>!KYv7C zs=%wF1=FvGgA)_MV9)E|+2=HMk1ulC(d82JlB;MxQDDIx%^?1VI$LSE#Ge$QjcGmj z=Dh-4{Dhj>FPZd_tz=)KiQxH@xUeR_;YQ$He03`YJLp_}`$#OhZ7W32jcI7%Wyi7? zMzNIjdokb|@j2I(uu}0MlJBco@*zgw%8EwudRD8v_L8qbZ?q{ZW9m;7>b>g(=enOS?bGCZ=Nwl(P!Xo)$ z+?B5-35W8G%jmvhDE9sO5bF+7245o*2A-#TXjGQO=kx{0>@egVW-VvF$ydQ6<_-8s zb1-yRBa^f1%FJ%f5EP>$Sc>joeq#gr`X9l|hlyEcHGuiJ)idYG+tBh?7xeN|vtXKI zBpJ0ak6DvJa_tVP-o9gA+C4a#sV!(_#NeIxT6|N#6!2PK%EZn;l-uHyF<^5E*3--^ z{DBm=N<_l0vJxmW&Vr<*aAK+mC_23tJEFtkQbilq#NP#=XJF%>=cuCdYVM?=G*3JM ziEhUrX5LZknDHJuW$N(O4=CdrsV}G_d@%BrnQ$#bOIZB032J)l2%W~9hT03eaCwUu zJ;!BYo8d30S!f{O8&i7L?}OrFIiTKoK`9=L5J35^@J>g;Wz}8u+jy<#oET~<1izs^;=mULeeLgKk2?_QG$W+3e{WVl=(w; zvGAEYaoAtE&@2BOSQJ@v>RqEGn~0B*{rdtKWSH}1Ry)D!pYCY+b_ynpZU?)WZ_uTG zGx`>YVesk-d_o+U)-@+leD;>oq-Pj^Uig-i zJTu`}M4yK&>gCBYCsEeSNs_dum3Y)c6{_RixsXX_-2TK*XiPko_9K+v{X%;%@l_~4 z(gE_g&z#jz4OSidNtuV8Xk*ljwylrZr14$&g9RMm`htic{QbIj=G%_9aI(_JAU3O)I1P_MljYrD&{92q z!kjKbt>`gi_bugCTfYE#${nyRR5JD27ET^lr7Vnmi57h}GUqP8nMa=loYOrIeS@fD zyU1R7`^Ew2Q>n*```dC0mh^*5rGLQr@hz^nVHi}J>R>Hvg6t7}u+To1o`r^d`LQ$9@H!_6f8d6Sd zE{7gJ8K<^sQND~K|IwRD2>fR~NI&0`1p2SU=6WmQO1@$q)^gU7nFl_v5@GqQba44M z3w*u)g0vrZasEerq3BEq)Bd>ybNyV&*;okfVYT@AekVb9$|KB_E<{2-V7prY_?(-A z4kv!2*CYe*VBsjXh-M1va&J=HhxjR7$&YACdxTofcgSCukm3sob^3yWz9%YA3uyoN z7D^K8(LH4b#;&H%gTp6s9`|Hh?pxx+*gc?Hz8|||EM2wLR(cawI8v z7&@_m>T7X#{kTcN&}j5!TVIcJ^yDQla5mx#{U}$NT?g&iEnv{wlpnF^654Km0s-yC%zcF(gvf?K*?TpNBA*dy zbnww|djB>P>#wvG&C=o_@j)#PE`5ZJ_sAV()(EwOTEVq-9C)XeLCvKcY;7qA#foA! zBK|TSr|eP@_QdYb_JX=xMfnUzsJJ;AtI=Pof5zOsdf@Wo6EGVsA@>n^Y!XXw+dR7C<@tdB%5QW zUemHTc?g|0w{qrb{u8RYU_eap;bu%P6r~tQj$G~2#ob^vBB*mLy zd_zw#>ubRKzqLaR1~HA>VThRi6icG^Q)X=_HrYIbijfLV-_IZ2R)hjXEkQ54PPpP0 z{e0KSL-oM5!KH9clV6vQXnB>0{6ib}h zzGf5N_WDPrC?2h>;Fsai{ziPrl2e?t=ijWRs(`2$D%E=8Y*-BA6o4U78!kD@aVi+O$jcq1*QMG28C zp)W#*6UsdICpvU6b{(>0O_ng04q4&|S&}S+k`ZA<5=o}!xj$)Rt-qkj?5^0i^>6as zEW?fZIZ)j&2db(pMfI0o;0U)t;~2vYBPjd(pB`-ce#JR=Btp<@>QNaT<#K$8YtI(q z)b6jrZuwds+HEFxc5s3~^K>xk9?KKO8@PEkoxR(Hz!HCBvGDOREHKT3aQhwDb+klS zP)h94pcAMIuSBnv_qgr0dU6Q9g=0sW(4k5Nv9oqy*%tD2EsBNIkQ?Z_Djjn3W}#n| zADg<1yvVBy+5Zwu#qd@K=PZ%{s8BIqA`!xb|d z(NN$C69#_9)I%+(QQKqR=dZEHl4CF^ni%lM|3&FvPjK{86Csz6LDi~XEb!}IP>wsx zD>qWd_h6VdXEB{4mNetLuNB1dYXkn*$6I z-%inqEVux4YiHr8?!>s0Q+^qyR|C zut5{A!RT>v3sYWy%ZpM9u+z2*%uuUw@^Fb5apN(1&Dj8EIZ?Q9$WI*9@i3$X{LnTu zXQST@I^SOSfHNJfM8oy`lz*EEz*_w~3Q^@aigL>&}wV3t3i1jAk zS97ciOWS@!$&Em!Snw}P{rfh?A8IXx&wm3W#Y5zN`Fr?)&RH1HCyEvIzRhR*Js>{c zWwthtdM2MWU^(*y=KPhXg~Yzu5&kdF=~Ns#$B^_}GVwCbP3n0oP6 zHe0_PW4LS7H@1X2 zEs{^Oxj{~O;ZCo>O-FaI75i{4d<4Td+C%o=!;YOh0NaL~!C)YF#I!boq46`n8~Ff6 zzVCwW_WwYVZ7xPtw?~ElR_ze`Erc)P-eh;R-2a^Ft&}VW9)ONZ~Y!Q11xmb_g7CnPnr3VX?CqmSVcnptfO|IwL zl-aw3xn?=&;Px7yx?KiuBP}FF_M_ZeK2*+40Ov&&bk^OAQ1S$oF}~WVmhT`3-azn- zi5Oo@d7#z1A^H4MD5gxl3bt} z2d;n~3vn5gR+b5A*t!%8JhB^V*7Ye8K3BsIl(DTd%Vb z1FkuO&iDdz^L4@SVXtv})miSE@EoR}DFgq3roul7l*Lp2#B+UNI3ig8Wz)YuKg2CHEPE4#{F1lARaQYX@~Z`flANc z+K?&Z(9+`qsxtPmrEQC$=w<>0tm}!qP`iv33`Aqh$Z%Cui9T zLl16;kXgfVdqQvSwEG5SglKPGE$}Pbr9#ay$_;cQX6bJ_|CH|6`c9_r7v+4DHh;!4 z*HTOv&gpmaI9H@HaH*s}XETCnMorN=XWo((d1#@NX9FrvKj0<@;xOLzF8Xb4M#<6) z+^kN4Sdnrk*|*abMxL;?V+BS&B%gym88-Nv3U!N4LbviJ;$T>bnOXZW>%wc0cr0O# zH_~}hel~rU6S%Ve3$H)d6dK! zO@z?9B}|cMgm$OSqS1D9ji2R9UOTrd?5s{CcFGqmrmds>bv`_OpN+C$@}&1O0mBJL zm_05A+&6pjkNA}3q_^jvZR__Xo| z?TTM$x5X4g-)seEuPE{rID>uo5bWgs9)gR9U{SYr=$cr?6$_73CM_DhCm4%wZ`00h z&o1;^w;T>Eq8;j2M~L1Xh~4Wf#EqUIxavo1aWJ`!SHCh9&KxI~Q|S%Re~Liq%tt&& z`57ynkKmw^^VsKV34da2{BJSnG<_iC?NF#(`2pP$9)X>DJ+GTi zJVN~xbRO}SR@W+mMP<{wG;_Pw5IP6(-4gUqCr8=cjXZvDG%6;{<6-~S!^rzbc(_u@ zk4>-?;@aQ9siLkpgvS63aA9Dl^V{mR6tKkuUxaqNsnL}-on^@iYFdrZ1UKaW-k zT({eeD;Kv<*L+Ww-P~m%`kycoyn*s{`z!@j$NjucUkd1EP@wQD2V zl{rGn#;#a3&P+7yehdoAhRLI!aNUXn)J^OJCY`rpE&24C+jOD+Q*Z6U53f*Q#-i$v zRm4^A2#Sy=vXr?J^a~jP2Kjj?n^K3d!52VZ7COAXX#lvzBw*I+r?52S8L%!k;+URS z0Bnfq9aF-ooHAVZcQVu-k}$dTUGRFX$DPU#VBI$p)Sq6$)g&{~F`|xT%s+(Py!B9b zkmf0lc3hca%8g#w@yxntFj^wvLC-?*<|;Ema%8hiWp6^Wx4T%|EfFuQp?t#}M_xU8 z0P4oi1!v7uS*@OW8F-X>wo6#qhu7qPtz^@W)1F*07(Cw~{PopZoNzo0OvY58Y(_fh ztOv8)v2mcDxDOOvE$Iu^oYY>7XDMorGB$@Ar2#nUnC_&xCI_?EjlIFiHf9R!LN zg1+ZDOijrH>l9zeoAVWCOOhaB*mm^rr8_8p$-`xzc#i>QVr=~{^2FbQ<_|+bQc)#S zd0%EWDitg|uo{M5NQL5tk(hS#hfF#*kvD}A<7l*YxZhqMZf9>PRu8a7d6!%4_G=4q zsYwwSe%%DGF;94bJOG?)r!(g*9ntywKH3*e(I|S?fwAN)_)RY1@%|i?DY@FB`R{o^ z4>cZ}_X7V#%3ghtq5OlAZJ*o@?Rp>K`to#bL)Qb`jr@L^dUNe&PwJVc3e1x_LkTx! z82Qg|TsG@H>Yt2eg#*n5h3vcRG3Cs|)@{ZxS5ul{-vHIfH5lCUA+e6>y`F!c`CO^t zb?#>YBvs(NhMu2>K-62EW}29`3~P;qg`5Av%}EtFX>)gwCFg?tZ1nKlBq_od2dKPD z{N$c8(6v6y9bJ1P+hQdqoLP$1U)Xtz*k>TH>Dj0EJy;?V-r!a+Cu9b zKLEWF>d3LZ3&URqfgcs-WR17r&O=kdvQI6P%-jaO%p^jN<`-x-9md?o<(T$Bp;ayQ z2lbF$OfmhqO!0TMY~+y+pl<35%9pnANO}iVaD(aPt66a8T%5*Q3z$G%l`i?%AiIb* zMd9E#z!L}6MWOqHNlbrw6JOTfR#fh)O3yJXC(iMEXgN@f?!{KZwzriqXwn|q-@ONE zgr(T>lXf1;`N$K^#dvilq}6|eNypZ}$S3BYJ88<4J!fO2$2R!(QzFDh`WV_XFn+xm>j&mxa{+jXu6b)PIZw`)RhK{zf~dH}4@U%pkwy#2UOSr_TH8YRZ3~ zVczdYW6*g=Of!6BZZykujwCMcJ&9=drjn;jn*jyht~5t3XA@?Sn`zQ{>@@rhDDv(w ziR78~-ZCRGtlL=dJJ*G0yn6`l-6!xt6VKDx(^9zG&P;H6{{SZZry>^M32>=V5i8#s zl*8%ktk?@i)|X^16CB{5+%`hvo_dh1eWjgr?;QqSS_^(D-NC8(H+D?47M(}s$QqZK z3pv+m4$PUe|YFEB^nSZ>?#DmI0+5lu8RasI?s!tAHdLAC82ldLM>%Fj(&sbdzKdDl{y zwCF3{qhpEb)C*7VIboR_8_UGuJ!Zp19Ey?!};c!AQDKywC=Y z%^}_oWwqbCfZ5*%=<_y}H@QB<^<_sPO;OG@K9Mq=ygTb74aYBTl*^et09`IlLZ?xs z6i*oefpM*Ir8)+^AKQw(hI~io#)~|>a0w2W9*K^QqhaUe<7l{YhevGsjVhNe5Yyun z1do$~Zp0*}I3CONH+|A2d>i-q<^uX%g`h;bCp|xnNjHA+;(K&|eAy3emYfHVO^flI zo09URzffXs(2m?C!JMOoP&D)dYi|0Dj*k;~^?#eNVO28sxp2fUsZqRpakwAM$$_Nq2qO79{4 zJ2#+eEakPSOP(8wGDlM(tEh^Hmd`;u+uIy|x2Lal792B+AW8I~uqT{kP#0*-*jC_?0tbaiC!Z%nLOLHWLE2tP1FPq?KhXF27&sPq5Y``xLuD?#=X z&P+^s)*Y12XIO1XC5*h$2jFvC!8I?HdiwuBLS09i$yf_fZ?fYo2p!O@m-Csj|O=Gk`) zyK9a%-7bKegB5W`3T4ek0VrSE8AI*A!>w+%g5R|e44&AZJk`X!OxXr$w|{`k^$yqv zim_#0K0@CU`%%T?Af=-X zmY44aFJ8l{n>wLq2659*mN3`4*{GPCq4hD@1&w?Io|9YwiOV=`;$OMwx%n4n41WcI z?b9$#HxFWeAO?n-L(ZrMJpRy395%vS)IDiOj$kjyu`C36vQAt2{W3KD`xAE_jvq^pbtSZ|~A* zU!=jNobC7&%kYmYN^*&tv&|FZsnn+%Y86D`HS); z19|Y&c<%659q0zP5q0(}P(Je+Gcp|IiWOfx-o;yqUQ<$7fmb^rq3$VYZa8zNvkx$> z>wNB!R*C-VI@Fx-VDXb$*Dj($YjfO9{&9@Gs{MrZ(` z2e71sc6hDk$!fyB!k*XnuzHAwbr!g>&sa0NEi(x2Pogar%IaOjb2)K%|*%7?`$aTzU>7E%u1>^6oP|He^Y z8o>Q3`A&mhk%w_OtHd^)t&k*$6m5u+5As^G$YoK10%Ogk3M#Iu4=(wT* zllnQ~o_xwzbs3I6x0|?6VG4JaxDStOOLyd`-=G6-79vnjZ}&Q|FP5XXu@kvNuFCdIy#>Wn{-k%+9j=_eM|)<%1*q@8n>cl| z(4%t*Djk;MInO#AkbD~hj@|=b^NUdQr~;M0zG=gpsB3FRUC7pymAcVNY}n<5a-TBo zwi4j3RwE2p>Amt%ewynT@UPHwJ&K$bZtCro_`*V<8x^)e2DJ8=fVCr zdA*A6V-Y*b*LS^$Yqk-Wi8409{riGli4{4YUU5l+OxtVB39LL-kG?VO@B`%>BC4%~ z5UZEqoIM!|sS7T>Udf?!CoJ4E9whx=G5I|oncQWnwv&7wNL8ggW@QAVuDFk3N2iis zvo|PuO0o8`3f$&e2_+?)u`<K16yo5+Y1oX&mg~v|W3XZ>Qg}z^2z*RRJAuN`n08aFr2~A|Zo|k~rux%JodmZ(f zAm(U>xOs}XXg;}yx=N)m^;tbAvQk)&Z^WwmX(>DnYl6z&Cvkj#@Bo#`$9zWqTKHT}94E^1BinN7J3NmBDJLezFXkc9z1|i)Nza&lxPb>u-#6 zcuG!y>1-#R6=u#|3y19JjGSaDEIj3ou^(EC`susDpZeQ|!^9TL?ScVP;+C%af?nTW zK|)kpaJ+sAbFBn0I&RHJJt#t*{W#__YBF(FKNFi;iNR_sG}W7l3V%79;8BF-UE{!g zmjo_8vJoYo!?b|~Pf;p+hGTpSamzy^5r!0EQNVYuK3NVv4x?~h_nk0MZ!X5JqYf;6 zmVK=YQAX#2pPk8jzhe*7+FS4`T`pkSi?2L3f}EK@3uP^?HK30s4w0?{kC{a-cI^dO zs&EE&Zn}iQ3)P76X5zyWltulg8Xd^}6Jul}Ms}y{@7n*Np?e-H`$8<4xdmD>1qmzD zBf(_B94Kmgi4Cx7BPPKe@>xCsP3z&P>~936GaIm_x2ce~FBKI(OZnL6jntJ-l?@t5 zJP2ZGuRltk*&GRc%l-}W^Ic?q^L#L9sV_dxsjCz=`h>wL(Z?pqlyLw@W`&tO|xdzS) zUTdHBjfIkZU%_^376kNHV!@8hsLY$2E-f*I#M%pBcSc}2-@ihi#oIx(^#|CwEre~! zAHlqnk?1$-F?X?dLbF;G2DLZA$Q52VsyG!SS6o@yfGdDH+U>hkgKJAM3&@K@2Ulw` zX5B7mIIg0(%zW*?H;K=3D-c$kZX;M{C1V=xw!+uuVdjxgJ~!Axuv29-zxM?^sr){( zTWrjAt}{UwGXN4c?ZBu-k=W33imP3YaED*{n6}6OeM6|v8{HOl{OlHcXdEjQ(h=F(VVS7^qb6!g4P47N1_*Oc~uiJu`p_(aUgSC~m$)I*D0@bce z9(&nDY;sKl^$LF;L9@{b-bc`BP6)&kf3Z~Y5)-e{K3unk_}pslJn$o!OnC*DO`HmFAt+&F>((zM!wCq7Ik7px=u>lCj(B%TPPQ2z+wa6MObEM2;TH!2y zz@-(5XiuHjl#5O1H|Q0s_WTD`Z>C^Hv4FB^xu8p($E}w=CH8+awC+f=L5tm7_xDYv zQ>3tCv);foV-un5uoiRU>|h%4H|_S%KtpUQQ(Rsw>*io1%->4qzG?Tgjpdth!E{s1 z*>wkc_&8$m5o6qWF9PJlktKDXhC#K&FWhjO=_Re9&sb|AqB#==+dc)45pBgszn{Z` z>eiykz}1-8^$SGaoCA&h-}BwXF17vpGD=@^(0h*~XN zFQ%E-F1W&VIpgtZTsG)WR|6&oi*EyAFSC+s>Y-%Gss(1&mZ8oS@zh$W_-VsMWifhkPc5Zwk z0gh+~(xo$+jBdp0bO)^My^#62J2FLlEb4!CV(=cB3Dz@U#)+HHFOeqbpL|C4uSrTR84+itzpUj25x8FgBQO&4$nZ$||p_o&25EZi< zv}Vq+IQGBS=yxp{mNq}cf~l0tUR>BLN z`VQ*VG$;PH4y4nR#CrIYX7}-5)?|DJg1 z!h}sz!0^cdtarV_9gf!G!jCu5&TJ1avQiP7F&s6U{*Z;6D|vu*22%#KVUhWs;MYk0 zi^;uE5i^A8qu(=SgCoS$Sc)M?|1O0)hO6h`%G5|S>feQD%EDpZ=SnbJlZkT51IlMd z;ZW+B52*c&&SzZt#>zIrlLAxW+A|f-?v;r>tWCuA756Y~`xA7QCflTjb;~)$Bv=%=1+l8VJT?_(8oZL!9S*!y!fprUafD}8zmOd<@R^~nOwEe+mnx<&7i67q|haOb)svasl0*g$*Q zpr~HBrk}ZZ{X!d2-tsQJ)Nm65v(|ur>_s$7jRyCrh&$WYpvl)gIC37bJ)X8>&OyX# zf2m@A;~aTngt2HIVj(s=OaqDKFs}OciA8jmh|&(^CEFOu8)j%hp>_hdVIMGZgA-)! z+5(;fub^i6ITqn{6Gja)6PqVa#2oVG=JGmRJLm)W8oFTU;0)?<4rK<#1+0DN0L?Re z!LM`_B)dMKd?-V=_75QEES-64tl9RD+nHR|lPwtB2?8@su-lbfnqhxrA(LB?2P+%h zp2dLPc@+QrM=cck*$6Ek8_}uk0eqpXZS1NW2wOkk^zK#IIKPo62z$Z7S1PJkt>xdE z!%;Ve@)dVHnc?*f=1U!nx6(A!lbck(ZW7;Fz6aH&)+}kra){eK0d(7wc&`I*;lPE~ z;wb-pP|@cTz=&Klx@e3#>YRBC{?tKVlrEn&6~Z3O#L^D8F>uB+&ZzN zoszaf!jCTK`Bx$KDY^&2Mb|MP@j6$nFXvLf!`R$21ts}!n56Yht*Yxj?pXAhC3U6j z$lk-~I{6RqEgTEezb1m^78B9$XeXHWoP%Sk0@f-kVb5PV@Wj_j(8E;O%olCNWapdc zAUg{+?N5U;bUruRd<|MG_TbQ5^3f-+XHwx$^f<8|)xqH`Va8%i`W*ocvToE-pUiEh zM_{*hdQc}tf_saOrCd;hI$6op-zJ0K@xDCxqnY5?JQIv&BrxTc?Pzw$SWxwR3(9j3 z`NTD*qR|93uYOkwpI2w2-)=3a+IfS*eZMT={dme)oPgPD^3gw^{AS~BqG4zin|JFp z;^>E{__SUo|GHhKTuAKY$6r}M`(&Q1&Zn8#K+s*HnL0TWKM(s(Irvy~&&>n!gmX9E z2i9Beg%qm_^xPH>&CiKR#>|DYdrd`k&>pbZA{7lzJ6OmoJyt(n1q(8FgP-*=R#w#j zX0wZ+X!|~B7@EZDb$UEEUn)$ZpVjdRwnA9tV03A^ge!;_HZa6YI1_&g75hGGV=sOH zyYAC?58BSmqK>d$FNt`@J5H=`Ld8 zds`vM)d{nfb5NWskZHzrr?Wx}eQ*AeRo_a4qWDG@z4u>y_qY-zyC*Y$6*1p!4|BII z$5C^3Ho~>V5F9lb9R87tBYU04+U1@Y!Km}N-xL~Dl>NLmm$GYXpqsV=;_2spb&iSH z+|vZ7htRBRzz$;Q+`)S19H`z=4%72!jwO49elC>z-+c?4|M~^4Yky-%_aCS`F_PTK zADFvlCpx$3z=QyakkV!s)`mQUh2{6aaODA$kDtd*X0;IqCASqcb@@!P$iOr{gIVI= zpHTTS5L0hy(e=UyR+9c3x9xoj4PSdgWXqqZm|T`VYFILQry7el$pfD05rNMAdZxKH zcY&ZML!fcyS8nJ^yGhq}m=HqunjC8}Z0};M&oGAO)hAF@YXaL|B-8m~G&vi&Y<_lY z!Cig}2k+&G^L=sT+X${29E4`~&!9K)x&1oCGw#NC zk};^1b_5sfgCg}`oo-ek*V_sKqq|^Ia0ZyKe-HA%>t#vP&vO^2iLmqcbx2XYgtYVZ zvZ>Lu2OT$(c`G}B{gi$nIrRZjDsKQmh~a+|jYOj-U9{DswWzcjiu;Ysg(sbj1dYam zC(ew(#9gh$GrvwzZ_pn@*RMpUOY~Wjz2_Sp-ZDd<4q#|w0k)fUP<&F2~Z++ajx>XwWZ~k#Y~+n(XmfoSCTf+Q$Z*j>m?1gE1|h`tEO@gKo|+F!B%6 z`t3XoCRGZ^H7Ebp$M0xcRfUgRS%}KQN^Pg>E6K0;64mgKFEft}IB_7AfaRU6}>(?J_?@W&)r^`1ENms>Ej{~T88mk08y zuI#Cfm|yaP*!*fNgwglcPInpB*FV6d9e-hg^(hDsT#1dHztMZE2#rTsi8<}gLTIlE z$g-@2mP5}`v%#GwPAJ5$TUrYV(Z#5Fk}C5%?1qmz&@=wh9<8+WJvfi(&x`DQz%T4J z3p;!c2f4Qqz7D57^J*9HwEv8X#BXe+8OH(D;aI)9JF2hV;A+Krwx$)$I=4U2w#42* z{ny_N2ld8{J*8sm;rBT2!2!sNdw|Xpt+{W{qqrw05B?crBNTR(h?~c?5<@k9*zJ8L z`0YwyEqimpzdNz)A}IeI?G1h-H=xge-|TM>Tfxhzoa3LPanhmw7_`tGbZ75FQu-5I zdBs9p+4~%(Pu?Y(3=YosP+Kwuf#907>~*j3A1+8V#bkO;F-1$Lgx==t>)PZ_Wg<1VR?vY z51fWb`ESfS{2VmTj%q7D%Axjuz?@GXVqu@{(C_A2dXC*d!`dHQr#OTYa>`*!=XiAR z`GS{cT8ggw9l#|{#5Jo;L}S|TD9ldEG^M3%()pFpX}~f}wnLPw>1=o0mreaqfGfKk z!%^FhfzcW@FW+niX%TbLZuLdp^`cZT8hwO~?cIbi(i5aE zY%~`9*ObzpWg1%VRYD7~5@+rwKIHN#!(C#dP_ipitMs-dciS53=Jf-WQxtRQ+Zz}B zX9>Y6kyz$^5)Gn;x%?i79vQ?-6yb}GKoRdd9!}(0!FLou7BhyE6ci$8^{^)oEV2Gz+9NgK#?W%G3+UYul19GZ|$ggxct_;O%{o=G{KyzDkrH~j$;i>Dcub%?hm2chlkK&%*nm_gEbGIi<~^5 zd+;Tr;kj)^75R3e{bP8@Obt*mTnHD;{Oz*bJiqSR##IPRJON?SSG*w>tL*+=Ot zKNX#WJ42x5a?HIR2a?k-WJRm$(7$s&v0N^(aLY_~_+%YsHkk>deTm2Skz61FRlM-= zMM(MX2kP`I7!aM$cGtHRTNW8Ga3;B6vcE9Lqv0@9P7LhA-?C$MN^)I2WucXmAZbGd z?^H%@tg5T%ykrftj$Mq-ISZM;)L4k^at~v6)?#3X2^d)8g@*hI#7LE(WyAw)zBdNa zLKN8R&QC1Mx`>i-Z*ie)DLN)M;<+s+QK!&hdFPH;wB#^##a96%7I*Wjhm@UeL9e_- z>JYq0w>8PcZDSj$lb*$;JCBhogSdE8tcA=+hjtzF49R7@*m{psD?{E_Y%5tC%S&RPi4?j~Z!+h?#iN-Bh;_e1rUX*kvD zI+&!8$FKV~P&U2QMoEvtZ6`BP7WxBi$dwUnpUKi%FM))O!?E$$Cp3GP1e^aoOlN`w zR^iAX-Te^{-a~GhuG`5UumcSr=sdnV3C9E_p`_6REA#X?V9#BA zQPzrhb1^tFw+qPIABHH)dssVjAD`X11T^8TnaQ%L==C)fLd%zu!^@Gue_NqpZaB|q zq#W;A3y>c;2hSc@2_p~fVY;FnP@$L%I}hE(mQGDrerzXtt@*)oh0Yk<`)|sQ)-!`u zCmg%WT#Q_ngNj;Wntpu8eKprGxYiAw?K|V7anr%E_AU-KynweS4nl3Oc3iRfB21d| zKhV!v4Wq6WqrpLhF;9=uZXzG0R-ZBHR0RsARy5-ufFYIVvAWI!RVMLF(f5v4vHXx$ zpL`9WyavuawGldGWRO4N7kkv0g{lr8Swi}9*qnV04Rc%o8(zY_FYi(3V#mEI*Ms4Y zy)0sE9xP*D=F_wF}(0D_qp2#=bo?>T*tW5{{1N5NxLW#vy&&3cA9@w(ciHHRqd?AQ0H;X zppijMa5g&5S_INTU%78sZ}c86qMhu2sIJQfx0}exxeg>Mpxqo_scfU7KeZ6zk$H$nNzBRmO} z?CQbRqG5RuBz<=RcgOxTPkxDpwh}Z^e?{x6ex3X3A-( zcI^OaG2|3t%fU)H?x+>8dOz`2mP?)nk3y*HpSuyShrgZOUYs3yy?X7Uc_k& zUpx;&!Og< zj>nYI4!)}#+=>@LP;p-vT1g%|Te>Sbsj#;8F2wa=;Pv4aM%L}ZqRek#Ht+&2ygVK~ z=6`3B=#6~mz&mJt_kWNZnhSOx7c%XY3piy!G&m+j5jV9<$hq}$tG?$^w!Lb`4AKICEIT>j8DGI0edWm{R zQ*HP%sgRY{#1nfSq4%ANZBBTN0s9&-WnBhruK5AQDo;rLM)#4+dW_8O4EDqO(*4my z>$>3uH`qiIVSFj1c~mh)>RzsR{7N>r8_m7fNj(hCx0s%PMD@P^vC;Hg>GVesI+ZLz z-;t*%|1%!j^*0q9mlm+KetH}^X&1{it)YD<;)H`o!6jS@+LaeD=0yYL;OhB~KWGp5 z;uG%t_aO14cYy!IHi9COSS(zsD^$Ah4C91Ih-M?)|kTdfZ*4()PAzebCB zw(1lt=pzAXr~AbJ-UPV~r*ZAt7nn6V6Ix%l7QIUI_&n)N9BHwKn;m$H;U|Bg^n(;F zCm({^5H;(IU%)T=Fm7LN1$tsUnj8+s&@WRVDc}?f?NEg|sX6$q^Io*mn;<+|06NOg zR(3xH$H<8kQsE30zjNX3+^2Z%_I@l4NJoFmR1Cdz2hZ(@f!TXcgH%l{2JiM5cIE)c z6KRg;8iB{2nhDOS3{?Ra`LZ)6Lf-baqCT&c%l|py|+V z_@2O1G`DO*|E_y+Yd@*5YGrGo*X;wKTWE%XJx`%}=pN``xQdQLE)tXA5=ui3a8>Yg zFl^K@yW-0{Am<9pSs#gt8jUvm>Nd=A-38}j55w7>mZIu(9Cr?}5_jICx$5x|{Pw4| zVpvE3X8qg>PJ07E{dx;{be)GG`cUwa)kCkdrea0GI*9r~8Sd#8?5V&ZY3?&L5e~t< z@|QSiXgT=*)dUI2ZpvHPl(x+n0>f)3o>60jgD z1xsvRLQ{$!67n`f?o@oyOSZ{s#u7sloe=q?=V-JSb_7U`m`a{)?zNo@b2t5{ASf!JT~+Moh>F} z=!kKsNQ-9P*6k^8JC#en-+`wyjzP#5n&I4thmAeALGbllZs)0HDI-^rGlphFwzlG^ z**ZLTeh2#RX(jX+bpYy1o5AOLG^%z`#N_x_3=TwzDMu7H5#r^y@K4X2XS-Ac6w$m zV|G{Q{C0c^cvfeEujNIA%>8)g)-lx0apUSotC+icC(Q6R68mi24kI0Qpu>?5U=-6E zdh9s_)gi0Vc1bbX{rMl4r2WO4M_j-Gg`Y6&ngKiAvk~nsjDqZAW`erca*R08f>M4S zj6GV5ew$`6rGc0*k8Qx&H5(!n2QV}L5<9#zkM0h37=Gpx3$Qj7=k0rmD@Wb}&ngSC zzH=!w<-frHrke;I+^j`MDdxHKXRB;dVN&A6*Zy_aS^4ZXvjgABvL(rJ*}Hg_1gwD~=rQZwvOI@{lXs-ExPr z0-A((19TsA0FUsaU|4G@u0Un;6ti>$L zU)ycMITuZHfhWzqim}Dd@Uz8RY-eX7Lh1#a+0GA#Rue~P-JeWhLLCwqiQyOJ5cts+*H_#| z!#X>REhCo1gEXdyuwl|GEugk=WgQ;xB5(ORKJ?Ndn#rZ&f>jrwc5feWPl;y>PhKY8 zrzk659*RzNlpL5rKmQ&+uX8Nq**^o1bxXk9!$dq* z^%W{s`%}K;7q7R-!|=(W7}?uaEbn^)2fa9k)308Dpi#c${OQCr`-`#hr@6Q@;VJr_ z?S(FN$>813RIJ_F36uLgr5wvt*~1r7aph(``pta8y-x?i+CQIQa_CF&C*Go8!#cij z>Qb8ZokHtJow4E7DM&i@06p)V1$*BiDEa3fu77)ptF$A?A$@=;U!Lc3;wC%)wSy^# zUIWJmX0UWyBDy*)#F&@ILH^i=ssFnKdt=^#!habw4*!7q>@(@|y8mRkP5G4LT#c?< zGr_&QkUH;9=(E0(Db2>fsI{TwAGd?ScV2-Zh)@Otpw$s6I^=QT$mKO27P;v zM}@a9uc53|xrdZ~#$#kbjlRSxn+Cb+j_4IvjtgfD!jLfmU_v_x>F}f2BDsvk26vET z&WA~CCazuj6?gvcE_I7P!-OaIam6xYA*V%&GHE)@)9A3?_aHuZ|Y`Z zRF4ODixQMyT!s2u_xYKfpt)-7WWWye)roU+?{wFC#Fj#{X9xLqa^CDI|KI%^IIUXdW>ZmBY> zucItuk)zKIb4*$D79+P;f#}C}w*10Cl+NA(k8M&>7VpEgKW%}2rRQKprAXL7bDVmH z4VU%&w`0w93&f#iAnR?)Iu>@KWNawN&wOL0W*l`vpN4`;P3qw=z|LD8Smxw_(l*-P zB|2fNdn+W`P`6WmVom(`!8!d*h6nPm#ANS^^0*LA*7#Ji={|Y&D~MB`w-o*35Y2YH zVahl;SXCBE{Lbj2c-t0O_>=rJZ@+?-$V1}X5{%y4O409hFr9}4bT5B{b9Vf~K8g;s zJ@^p^IwaA~e2uE*@FBGNPXbnsq@9($=Y}t)`!%T?vevCcndU<9p79)a5eHFsUnm|G zjQL1I13rAPpz65`M7bhq2M&o z6I-B+*vJNgL$xdCI@J&U`P<>8rM@6rpn-)Y-!XnMc}t96VV0}`=HC}F_BCyEJFNBXC1{6Rq|vn=sf?2?wMi^Y%EYiB0||oI+qv~!S2vJ$lZ4m zLst|MTX_f;MCPE#GCey%qyh z`@#^kZafFhH=Dty%m~5t1o>F!W6x}2YxXFVSbWWgK~qSd*h38Wf-0~MzYfilsH?4{ z4KO?i^~-emT66Ns1y1GM*2h6!40VOw2!-;J2FTS5W_Dejz&gYUB1gFpUn>n>n`J@7 z?K(`|{gQqsS{#192&I+u%&S<3ulkUObIiYvR+-~e^;{K}x+Y;$mra;ywO_R+K8O01 zl5w`HSh#S}fNwbW7qmKS2|=G%!F^}qW3Rl=q}E$hozi7kH`5*W+Z+Wi@lvqv`xr!0 znab%>EjDn(264HDE{m37kYyW`9r(bk!t1$;gacT05jvDyLpMGbZEuIcn|$&v z{tUp3fHWwj**9w(F;#OSp=oG5w!EK(ryfxs&GC)+lV%Hr>o!Tm`U_CUPH+qVe8s0z zjf5di8N?qn;^SRDb5_D4XlS2-X->y6+OC@*%hY7W16sLa)hjNMJjdQvt>|>JjZ?23 zz^x!%KEjVUslzHjD}>l=2U0Epmh3YRMr}BfkD|Ad{vB!gA2j#!*;Ng&}V!8Da#z`Moh09sQgGBai@EN z+NKa>S-rq4=_ePKxC8>Jpwe#?F&$TsC+oQxm)vDNw%4}99R4wyeZRr1kA;Hx-)mqk z)&SLcIlBDp4UrKGv8lm?Z`!(?Ge6&ssxBg-^y5Kv6^}$wcTdUiprgbyoz4O#WuyCt za+s<&2bYPh)1Sl6nlGreBpl-Gl;GW-$ckUgW}U~0CvsE_em4T4!`gta z)z{}&E)R#@Nrr;>9CuDDKG7Yot{8WjJQn5LmbnWj)u#3Nu&@W4gmhK%aRb?!c8ePI^n_J-GrM(c~_mDY^ zpzcZ1IM;IDz{)zD5hfaz80+)uK7Co#&_b}=F#v5zLyR@AgL<0N;+GtNPvr63grTV3 zlFa$Oq0W>CuesvFMCy`w$CC11fZ|RZEBkl?%tB(B_#t^i7ED%^E8pY7Y=87{F=x5t zC7N+Hkv<<2VY_V;_rU%PiVGg1Jm@&&wRPaQ3KO9LW^p=O~_>Zz9ASo1*$kCMZFZS105%+mU-X zlOXbxMQnzP2M%HAI0a6nE-3eq8`#=TOqP;V(BF0h6=P*8k!*}AWX311kvLU(uNj6{ z=nFA=kD%Y50T`q#A|Hb%+7`EBamhIJm>0rX{kv18>J|bX>5rIiRUM8_e}q{Z)~ZSe zJAnMY3=DnKDKpasg`BQn@m&v`D(kq8tQRQX#W?4PG#H(b0gBw~EW!w}uK6b#*=X`+ z$7M_!+{(;e{Kp)6KjmER^#a+b@yx}J{MB3q=al0K0e^H677~j_;}ZFkb0EX#7`D!9 zg9r5nyg70IWbOetyv~46?D$Igh@+}CHU+q;Bn<6t9L4D2$3YRlf(t3~g0_$})L3}f zQ9V{ea9Y>KOdjoni~wyx*0MC)ded^0L=p?FVH`;Ran82t_f3-8Zw%_69gM@bPzG$K zDgU4y(e~vZ(ATPAy-4TrXsUw+_J%^z2_h43xsS^f=)R&XSA77#x5;rCI1P$R9hFl!Mor|FC%b zGU)hn3uE`JCYF$l>+D*K1E0}eDmBYd*7Q`RGu;>OH|X=d)>MeQ--b?y|#WC{V+HGJmr74RF zxCzDGYA{It14l(_@y*&L7&i3*_1u%*{ll9*noYj8zzkKL)<-5i`vXch{YJN-FfQ}c zEeE$@YvJ{~CFu9m8Fa}r&_D-U>4k@?*wl@rX%qh;vLEMWTS^^EqgcfjDb#gKijB_Vf-K@bD+d2=9ic2~jMzKCUVqgdkh6QEy5^V1K2((tpGb^JSv@tTj` z??bqYW3HfdXA!h)x`ch4U%}9g)T6w^4$fLm$NYsa>HXNo?6c2f%b;HP+bj*e9~)u7 z%7qZJ)e~|POfbY_Im>(bioI5)W7|5)Gq8@-Zr%VsNT56yk!ap!n-H@~ozV z>>kbDf7_sFix0^EG(hJx5g(rr$t=^Jb1u7H!-#bzf?D!W5_+r|y#_2qC%vy+c&Gv2 zI9rKQ&6S*6iV?pd(Nxf%*vX(@3uu+>0p;A2%wys-2=Kna94;&ZaMHkv&L`mTRv$B6 zt5kiJx`KMUuHdNtOFX8QzavM*%%w`pb_VhoGtTttt;B$!bd ziP8Ga(CIh~`dn)P+YA>dSn&b0W`$x|RaeyWZ-E%+3@ljd2Y!2oq5GtdST^|*E{GZh zZC?*_%76!$XiIwEv`?J+%0+I-8zbJY>n6-wc8i(!dk9wk+00`8c&Ki#2gA`uymI*( zD6ZTD8D(E-hvCGQ2bx0S!CMeWnf%l(pJ2vczd$rGAC$$htgOcll+Gz+iYhJMy*v$L zQVd~v_x_k)tA?zft*U`f?txtXf?X_$!76zJ_V*kPo%U}r`Sd*EkLW|3lMb&m(BoHn zf5QFTT`(s-qUmcpCcM(Zidnh>G(Lo^^=~23^%(dBk3_ecA6T`#4#dfe@U56Ml+e>q zads9uOddpYvpyUl?)dsNk)W_ACa6X#W&|Z*Ro5E0Lf#vl_je(~DjpS~<_OEmF?D1v zY;iFZ@@{Vim+w4y+`htcX72%`Z(4%Y-Fq-~byvZyO@e1P-v#A6H!kpXCn__#azlzw zV8QcY;Mo>K_lyPhJmP@^Q|qYv^gQSB=N9_h90a9-6EUot24tQqg|>$4*f7disM>N9 z` zN6)2LdZvzgC?4VNwhYjqJ~YcqQpUC+0v4x1M}{Evrk=Ekn($ZmJ~ek?r87 zw;FWPlTgECe^y?XF_@8)05!8M;ql&^Xm({LOh|bQ?yKb(Huf`SWOM_aJ)4m;76}fD zzggLG56FBU!4&f)to77q@YvCv*}X3z1`~Op<K-%el-qURv!Yz`*QH{>W=s7qrv0K0O;wZ%j?8eL+7GVkfRpB zVcYzVXW$X#g>)b;6Og=zd?n``} zLwd=MtKhZaImA-lHues6>N^P4P@#OoVwApaP&g^WT`eJ&by?CNmkGB~o29_yFhbpmB^km=@TBm&J3GTCL>N z-%f$UvRqDMRdTkhN&p?dvEY)h5B$E?(fNY7r^GnhMqMC^+e28g^AVI`U#@QOTQ1vM8{{$=HTE#pEu*|u z)?4(Cxqyo|P%p&#Ag*Ca5Dc`*#G%0&Lj1GmOe{MJ1-^;UCcnvQuHj$tX7|lkF*b6ydUqW(^090$u<|5PjLDP_{ zTm^X)zt7j=J2Sd~MQ;HV;w2L4t81zWhfbjKRRoHpDNuL6gcvfXVRg11zp&^hM%Z44 zCFD^k@GAzx*`~aYT|0P2Q?_qn4@lLC<#MPyr*h9mRIKd4N$Z1AIcp^CTnEAaUBO_{ zdo3W1gq8;oj+#e?vjx_E?yS7?z+t9(aOr#!hEl0dqr6af;-v_UaeuD3a zDDa&Wib09`f_lMRF#jtUG8f#3$nX$cOkbbg^*u)S)FkFPabWIGqn=$oP-hQf9{qKA z*_qLBo95B{HFr?wvjl7>-UZR^rIJ$rbJ(ap19nHWgy84qjGI6m&4Z6H^@nE?={h?| zqMyrraS8QPi`bg>lYn=LsdFO?y{C_b_z)woewITF`~yrJZ;v>v7$X&F)M47hL`n6i zO8r0_Z@T6tPKF0k5&xL>Nlm_eIM*2xD18?J$sNyOPI)S3u82@+Eu+6f4Ta34N{R2$ zRy6b`)>Ogriahfb!1?%;8%cYm1rzc4yXt zjLyns18?DgMcRDO8#i1TDxlrF59meOxuVg`v18bMbZF2Jrd3o!+s5ZO_1HOL4W*Gs z;Y@aBLmrD<-&JTVpls*V1l1%0zvbmTQ&Z$ZpJVd-z2l(|`lQ?`@yj^VXd;qOyd4a>X`<#xr7Cc7%hU?pO1&w)KFy=%i*6#cP%J-MC!D$bIXaYv|zeK#P z61=EG^FuN1Z@XON`n(~ge?9rE3iBibweLe(i5_ovk6{K5fJX6m^zBW%q{-P(I2W`%mcOgowG!Oh4E2vr(PA0&;ZKO>+tG;y|m-V zV6_4If|XGq_CHM=>N1)qTC-5sN1IO^83DU!4hit6M7!l9AkvcV5&Q92Jna`3sChb zOe|)8Jr6eeI{cMI8oYYkgsk$nN5NUT7n1Gc(dXZMaJ@4UE!JuZ4pq9mN8uwDY}=F7 zxlV&h3$&JN;?bel`PEr&7lQxC^Fi!u}~n8GRk z=z=&c8#k@H03D;Epmkyalv#*z+J!dgxu+2ds$4-ad?8r%dWz$dR!8!Dq@BsZ{H{n1J6?qcUunmUeZ@VIl>a2mL)270Ztx>Swq6R}V@#(IK@YLox}pD4&_TfH==wo5c0^OUiLSpq{@cZ1|;C zINMJA{-cv{!p`romG<#!n_5n<(}Z_Th{M^Zw1uWz>VCO&o$WzGJ~Aqd&KZT-d1-f; z*BJn_=JW9XKIgKpHO%mmx_$Lk{J)WZHWn#w`eWR?kF282( zG2#kn|A6e#N-ofMKgPSKf!V%TurxOnaxzuW zvc(#Y{h#yNq*67Fpl73ObY>aVzPC0kOobs3xC%J*e7*`NRN`qar2=UCxm&Q5lQ_ov1hBx+a7>yv6a$ zzhTEI1Kw&}f+Xb5b)^5KCv)odfzy{! zmeOPpWE2q-gFKLyZBmHqyoD*p1z7$)4qT^nN2`~&AhV?x9@o&}gO^9JmYJ`p7h@6p zoYsx^_~Qc?bi47X)7;U@z!jUO?qM_B!(lM(`t{o*IoE(iU}m-uB{K^^HsKaXjRs-W zunq{>)Xc0toI)R)G@N9$m($oJS6TjZ4%8q1g%`t2g;U!^{M4nFu&LdKIkyJm{)sw* z`dt_1O4)|82lQ-T*Om8(7!FPT$5`gtJ8V{mk#II{7K)4gs6#aYf({gb!arXU?^D3p zo{VP$*ZsuaJ-P_XG`jF#Z|m`5(-kP^XP`Q;Bs=B(A4qjtf|+;vplzm#757`s92Vce zijCpu5N^#}N&B@QoI~up6zEfQ5R!*6+}!#ETn72W#jcN0>)=BJ$YjfnT9d)^|{Ux9;Jp z-&9~wc@oNx=Wq|SuMz9cp0iR<=0cS7K>c)%M0zX)$|sh?=MV#ayhT^y@!bV`|34Tt z^cfnOh=hX7d<;1Ggp-ErN`8T<;Q!Y%C^H)g9xFZAKt2T($KzDGsxJJD*A-adX8~b- zzoP6PBcY9Z2-nAxmwCc{grnE7c0d$FZ*Ire`PSGmtQ<8q>8eD>Dza~kp|khuDXeZf zo%cs>MKAL$Q1(QL(C&xHi|DK$M!SrevoZO)8?O0!4qAMa(49EeWpQ?(oZ1WX7T&

--0E&}edfQk(RF@ByM$aR)MF`e}?{asX@PGvYFJROx$dziBBHW!<} z5(@4d11*;VFrA|&UGh4UP?EII%rCIN^)lMeh=NH;N4bm6DPSE<+Nez%momAC{Nn4g z9MGQ2dKe^WIZ&(BcsPE>Hk7j~dG{^X*A zvmsA>1$rfXg4Qz19>%&-PL=LG+avJvx*;Dsw+XCnU1TeFe1yQtx9Bh@m{Z^R1^xr} zfZ9J;CH;93+CThaipE#j7elVY!o6D{t|0})lP6;GoY}DCZ!y0hN>7lG{yFu53T%Tb zndOpZEVubixi}xtSvLVfelFtN67n$fN^e!f&nmRNT!MZdK0x#7EDZVN$viY(;=&)s zd|O2VWEEDkf_Wa`v*0thKWW1n%G#t1t^v!^58SH7I{cKR5dEAO5K6|SXX_pzW=PZ}`$DEt}EEYo^PvlbCexb(Izd40?B6{WdV60|6 z=#IOLsh4>W*#nByv$>pchJ4G-S)k*!2R+?LmmD#MHFZ15M8g} zrXw_WNe3;*11NoU91x5IMPG8t7(G&_*8bZj6 zubAn4Np+*(GZasm%o!#fNBzRT(Ic~zh3uHe>f&OMIC@xiunYCL^~I)b-SF8PLw>^b z?^x>`jLX*_fq>UDp@n>rW+9`oJe)jG#5++kQwSEcnAwO$5O32Bqhju$|7rw>U0IM? z)&t9Z>OkXbZxkEpV58WScL|(|4rk}FDD4s`aG3#N^4_3GQ9Jg^GZ2b@{@`MN{6h67 zO;mUP#3YX|p;cXsV|htEYE*Z_?fc(wD?SnXej{bG6QV$wJdR6U{TxNleK^&gr+E5a zE&lWr@j**G$VNea+o78z9uKl0ui^;;^@40XVJsvxih1{6nGpX=35Ipg(Clvor#`%v zrH*w1wf;`kSXW&>z^0PT2(N>SZ>Y~_^8vU(zT}Xye_2+Wsn9>0o`?J9ad}c?Wnnj& z=kXS-Dl+0vpVko)e)oV)AC7_T>2~7nHL%l4H3a4z$9_G}L)&&@r1o46J{1mNulF4T zZ@)+B201rjw>5wt@)3Z7lQVn)nZ@LjVXA}DKR_F)w_(5IEy@OmsZcm+6osA9F@V!^J1e9sH0 zOMGgsV{i*`1-b_@4Odf)98UhCjAU%GI|feEb`nd!fW=UMuiKGCPOPwoE_5DpCPuTY z{VeCy>41u3^RgFQh=f(=wRrU_dY2zL&E2R|Vya;!$o_L-S~HIm1F@a6H738o^*buF z@PBa9l~~T|#WGH>nsnOe9G2^B2J$!MT;8IQpdOHd`jJzKQ(}i1%CAG*Bwf)HG;>o;pA!mfl}9+><;Py z?zhAO^Mc_CJz?_oXaKo)0lu;+3wDTwC$EV@qj~cxD=titJA~uF!6GZyH1rKcoL0%(3F*ZRZ z9(xIEdLBTL#;0)_f-(DfS)Z3TJA>j#C+V4;)b(q{s@KqNP3E_&VP5H*U^0RuWy8B6T|}KjDwIV zqfuTs4&x3JgL=?5re87~!h-*x{>?9({{9+FoLY{H-(16ZM;_%KhhcGc2IT{eqRuKk zJ}6m(FCJ%sdihN-`c4`uH2!AlbAFQFi(WvBxED&V>{B%uUx5lUIR@?R1JP^VK;(l| z*cD+c#9X@!E&tWgx1hy~-&}&v?{xTDlT^s>*@gc-w*f}$7U7_|+Jf8ZS76b7B80Km zsA1Vx^*F2e3(9F(;cWk>cH3hG})oJLI~r^wvI z*;Q_YyZ|0T;0|Y4cOJ*r`!SFAU4>S675cW_fjqk=80<$H*+Vs*-winr(_=W}U=>>4 zpv?GP;+PVbro;0Oz=t-pi0y@5>Xm3V&6~4cd=y=VKESf;N12sFJ5xJ7=S0JYbN)lG zVd30KaF1;f}Eu-HY1 zpK#?SZc8#1G9evq$S)8_xP`M?-y6>Vr6HuYJK^TVkFZJ4o$;n(zSPx-uiH!vBttbc z)wW`EQzyXr$CxtcIksQ&g45B&Cs-bZXU8;?hyNw$tXP1}dpgk2>@+yM8^zl58{koC zBFMg(Guex$%;DovHgdTU&siG@-cvq-ogx*agX<+e6|^5~I)f*bMnYoxO}w}20KD+g z7lQjg^&s7M732SW!(F6XPg+3dKojzbFPsMbu?eJU_qeKN;@?MyskZ<1jZ2-H zioM3^3e6`o(60PBS`JF#&NeMY+l$@6Ayk7;{aD3$`R)dbhQnZfwiZ+8(>%Ru2+FsM zgr)>LkdA-K3Xa!fc~A2DPp!v6#4Hh=%Vh;q&ftW+y=ZbkYQQCDhzZYeLJ@Wyyso!5`C!?CVd{dUk?SV)cdS_<9-Oc@e2#?tiqCd zQz1g2e`mSl+5RaQAJNG*1*Jhd%`Z*UZZo^+G%O}gX~{$KE?anUn+h4lDovY)j#rt1)Ev1m4oY16zDC9w`pe&%qjQT;{|l=y#fAXsJrt| z46aGn5ysE@3DG+*QFmkuYbEaF8k;cezx@Wv|Dm7Z3&Q3d)SbIpn_vA?k2ibti4*Oc z&dOJH;S2T+BL0*nZ}$gr;Y?j#_xKT5Ue^`9PZ3XSR~ZW^(u4S>9}u=737|(gCX5z? z^l2~Fzlu7ezK(^)iNUD$-Ul)E199L`+Bv*k;pn|?DrN<2lmwoB0mZTvthjL*ES`{o zwKET+#r?w=y+((x(;No_KhgZM)eM5;L&4$MV46jKg4xkCoX3cv+!~kj(0`&gEGBPm zuh~gx;A1NMb}WIE*G*vg@jRS;NB7N>JkENFCTf+_+*7N-Fpo3XV006IXIy|*t`rr0 z!&G&_@1bLGC7M5Jgw8$j)JcY2e|v z2oj$^28a4V@ba0VFv3z#P?4`JXyIefJyik=t{Ot#bn5Sz_>EQTD!{A9JZQam2+Rj} zx1BCas#G?SHl$t10j{T!O5Eq@fgkM{@sg# z*sSA{^{l}26rJhLX<~A;Iri#^2HS^~n3VGgWWQ#CYh?oLep!M3duW#Xpeq!5YVsyt z;ZSMu8an%31}@!zkFU^0+0r=dT)GiEe!hem>#6toAu+=Cm*ArTCm=Ap0!G;C^G!?x zny1x3#T*0L;hp3xdR|1!E=uOLU^+@${^ka*I{|P*ijzWzKyCvuO_z6|!}>!o#PTM3 zkE!NtD?W1RV+3^aYJf~<>R7tu3J!)RXrFonh7Z@`eYeVKpVJBZyA*-!pDkEleH>)C zAI-vd!lZHC(QxB=V%**0>_11L#zy-5&0V-Gs|Hj|T@M~n#%!}R4b+jA*{Q@ESIiiw zk~w$JmbPXvYeQ@FQ)uvyi`o!cj4(6lF4OSSW(%RK5THZeOdDOn-IDe!Tq&f#AWuPX zEt(V_fbsh}IOl26V42bv3`+C`XRTo3o$O@AiwyYN&A0yXZ-<_jsqAU@QHJoZX#Is`~u^Lhr;uakXZ_aABCHFu{s^ zEhdM!(XA<<5gg&5ICp>*D`PoX&2LFToq)Y6C|_@91@TvBqx7GY?6A}gU>7+7&1R~= z!~6t#WYC#y&Sq5X{tN7DKH-g7Cm{9Xf4IHuKXCtDO`PQG=y3FJ$ddoZx%%$GkoxId z%-A{DO84UMa|+55A0rOjTDZ6J3`(66ByP|4b3WA@Ahho-tpAgOjb{&F?EgJsHme}I z-y4(~IHP;#51N~ExX2^x@Zo&w7O^VC+O|p@H$qo%N$H7GyOu-p`ZJLH%@|{+zXY|c ztEAVt8_;mI0CR>@heyjxfPeSn@52e`J^m)A9zIu6*Y6x^IdNwB*PyKTW0)|s_kE_Sau0Q6Dc>)z8e>xq<6ahHna^)rFZ6F7IgU}@!GxbkjS0d`b@d_M6h z{<_T^f>qeitVceg3)p;|p1%pi*VEIY9)lZ|c zU~vg70!(;O<8PF&dVwPfMf@4^%=hoSgUUM=Tv(L>#=cz&iL=|$Re1t3+ug8DejHNg z=#wr=e2*o;*wZ8cR`yQ!PskKHt64~=Z)EQ3QEE7xwud}lu)@L2m zoH__?t^(Wam=D$ysi!_>6Zn6M!lsY+x#^)gyj|b|a9!pOi{FZbmBvvZ`70Yo@ux8> z^t+_d`U6ZnuO+06DFge`R_c+ zy*ilAzpk>|62zn)01xbI~ofhIyM7 zbCGT4biOfSky|{V_KE^`c2c(JlBuI#4e=QL4Fxdpfq=Rs?$KrqLH0A9OG7rCv}Lte}}*_wCZcsjcQW&aXCyyiZ%Ic71}z6IERmNEcmZh*}6 z5Njl_UwrTtZv3h3oNQCIsvu-CSoN=REcV)l63G{^9dV5v`bInlHNF26G??_QfKk=rNB`HZNV0;jtexd2Q7kuQo^?U1;e!Q&B$(oq2dDV zbZuCB65Z?5Q?Rw{8N_{l3jy3IRLtwh`t9-p-L(!P`!QDo1VpQJOX%Qn}~M;q8iQId^V1j>MQXlYx` z4d^r$UeE4;F!2P)(fEle)FtfU_n3)hmPicme?pHndQ5%LgF8Lt9@N=%u#vCCeBHFq z+(lRFejIuO33EQiW3i1Wh=*G+1w=B zeFU)TZ}(teK_vL;xnqF13_7R8g7acML8h3;q~HEh{a>@R`q-%|E-c~N=3nCMJq?82 z%~!E-+HW|!B@g2k?E@E0Cs-4$$Io!Q4CkY~8{K5YCZwS+m z#;nX2@XJ(NNQ@y5OLtq88PDRHO0&WI<6$Vi;>|Vb4CPV`PGLxfCyQB{ z03IfexV^NM4eV74mLc1~dz>l6zl_C^35I+hk0PA3YA^VGm;!T#HN*J50a*HK7s$@- zMP)vZ4&GxpGe1AdbBD4$uQhpjV=|caChg&KJ2dR4eAfG|EcW^a${@63jK%@T+%Pmd z!_N|2o|2B`_Y!=0ZC=^E1`5_XfI{~=GyC*{spB4FfK>q-VJH?NziRV3>qnx~XHSqD z>0|xl7$~dO7rt!M=k3loWAcCcLdGOrs5quA6yzR+h=(FRVrMm&2VaJ;KXzEkUXdU1 z7PkC)fuYX#A-S;>+Pi;a#TK+{JK%^7&(k2pZ5r3sOB-?$ig2W$&ATo?2&s~67O+yx z=ewSVDyu{=JCX=4Pz8#2J<+4v36OsZ<>LRG1ksKikT8by$Z!KmUF2F&J7-Fqxu7z}bPLG|7`w%NKKf_AAuIk+8eylBFh(S_LN zbQm)KC0%>B4!?3w5{}(#B4}tSIi*6Ej~R9eLT0Klb&efk-nNT>$8Rt?^+j94zk<=8JN3UYmE zXT0bkyBAL#5phRQ>3NMOvHL=JU%!bLRt4+Tx3TAc1$RQvU`3Y z_j(TYy{M0CZ8FLion-oB?sCB)>bH_IQVq3K&J>p3L=496E>uf;`R-8=#p zu5Cm0IbSRbOyTOxk&Et~2k{C2f%QG&@MUj+2ivo;er^PHWD!gCK?*AEesO+R`#`D5 zeXv_A5O+z=i49I*h4FlJ4b3La?J>MQyABk4BN^we#RvKz<%Q}fgntozQv(p|?v>kZ9D!@|KKpo_3Q@DoVK8bg`k6qIhfqMGzs3AU8& zalM>_5kKRh&OU@~H>87zYBr?WYY9DRuCEn8K&Ykrz0nHfVG`7fZUqOkQQUZ&6)e?+ z{!ir(TzUEcIOpb|#Rz+pmb_BU_(ja9$pfM3oC&kNoC33B_Cc#=Gj_hn1}_o$mpz`HW3Pwp`S^BDk^SB}@&|6*88&plr1d zm-XV0#QfevRGV-rk1mFse~-ObzE%l}^@G?{>43z_^_3hBDj*z7qbg3@RJTbrRP zxQ;i${j*EKA?OIC7QBYj#g&w;`wb<(bOlwrWKt#Np##&P?rJW##wHh4lrg$3 z*5tjXAq-iPjyd1yxtsfjlU1#OycKgXb=CuD3jT|!d-<~Af4!&v&Owl{bQpR!OoOt1 zq+jmo&dIdzpvv+Jbo!7Ffo5a}nE{h2F00B`)UaHe^I-L`E3x-Bffcs}f})KDC&@gJ z4fbQjb7o;naTIyt&ZF7po7@_69lpQQ3e2lhaV-WLK*`(VcEvl+*`x*h57K0Kl zb|*^RFSD}g4IpnXgqfqMk7!dPQ+G?1gj$&J1a$pAql- z%)b)l`r9aa@C#I?d9dc%LC8!~z#Q9FwB4q{K8Hh4wImGOue`*9UXGY)U5SdGEsk|= zXTj`C4R&nR6-u3!L72G(?V(bL%YFrvK@OBt`WyX-LqEJHeGfB;i*Ru!WQJ#BZFV7^ z9Z1|S2UAE%rQP=D!B}J40~1c`^9?b1*mJ-)%>C;iL|*zEBjy!>=LY(m56)rIEx)n@ z4jS{ej~{cvCE?Ih+LdoCq|C?J&DehY9hS=e#=*R{5Hd4>)7Wv6Lmvadp13gj#sk5x zW)dv+`$L{*1L_bl=g!tFM~w*)oO5?g{?{jZcl@D#RPq%^oNb2T7jy+h!4dHOLvvo= zI$V6|7SwmWiz17q7-nF=+vYB!`+X~AwkfC4g>u|S&w+Q3rQkjEGApfd1=-HmSso1n zC%t}A;xc47lz9z+)Sju#yuFQBTaRc@G_*vAEiIg>a^hJlCeA$GTo%p1J$1)rq zcmh{-A&>vU6qeM*g!kTm03E6+llCQ*u3KlA*jtV zDPmzk`>^`heehIK-gM75Zt9UEs9JcBx#)3<{B8@qFeLpIC@YPb z-;-ACZeqmi?4&%%*fbQ@brq}@7qisaJE8trDayn;@O%F!a0{A`ifyx43_TZXD^F40 zbO+$zepsUngTi!O+E2fQK5?C>(U1CGXW6nL+cX8~=sD=Oy)PzC*+V;WQ^^-yQ@+17 zz5o832M;bjg8?VB1@C`uLgbe95T|(s)FmfXTi=`TcA=Hnc>XNv9xp+MYmtx~N%=+y z<@%hbJyGA;aPe;qJ|uHB>b$c=yCYZdULP%9()xtD1qOnlq>HeX`i=|>Rg`CTl~|3j zgzk^^g#mB1d8d&dxSjh&JbvE?RqsnMF(4mC8>Hj*yj5`5Rf~66s{t$5mV;vAJxtI3 z1|NG+|Ilo@w&+ac_wO#qD%g_UFxCa1?a>p|s@vI~`}W|#q}PzVp(pnGSpXTQZ=hA< zDt64+h+j~j2xF*+r*47{Vj=BNe2+0nk65huVuUwTrh*w=CrxK+v1d^yWcBW#-02;( zdNhjln54lsPkIgi4J5BhbQP0KIF5acu7D-P!(El8Fqqy!u34Lj_fA@l!#Bxm-9n5G z{f(JR1^l?(&4nITU+jh)^6J1;jLiyPlHj~twUmashMSId|}_{XVF<035v`Q%<|SG z=>PZ``hO?~eaSw~Wz#;idO*(!vsU8i-o|?)C{Hl)1@*)~MOpc3m1v>|_m+4P>YwJ! z^5{8M_|t$-uT2HThHOrJYXO?|&f_AjA42hn-57G9o=ZRemHbN9;5Iam$-fq%;!zuC z`E4R*XubmP+mD&@3ZUWDqfp-K0_bF)z`D;FT;o3%@yL2Pl%Ly=(tX|%W%s%0ZQl(= zCtq{J7t*ZhKZYeXF=)9t6fHKU;JrU0AvpF3dS5by4&P36l8r;pe~Mt>yD)fgNP`!9 zT>^J+MBQs~sNc^Lt24+KGG#L>-hYZIoR1Pe;FT&~YKXz|U<@$5#wz$rC^tI_?eY$A zi_qs~ju8@|-A6a-Ch=LMM!&aWK4SUIQ}i??WkazGe{4BLi_bx%$&%=Ay zlVEg1E!KJO2D#}|mK-?sABF=N29cN3<(B)^;TJ=fgG;OxISXgBUA@fs>nK5QbS4n56|d^?Qs zRhoh*uZeRx7Kp7gZa`*o5;n#k0k4-!h&Q>PGTAkt_BkP$<8upBXAVKxqz+ZXy=~z6 zuSigD`pka!y+FPK6Cpw+7Cu{bxHlE1R*mBVy@13 zM|<7tpH)`=KTzzfM2|j`8GIm*Zk`Dw-Tz8`1A!=gG)fhpJs2W6FId@7j=qOpU~aPx z&Y~VMH;c)*W@|WhxR4%m%>{Hc(s5Ea?bY!dQ|lp(INv zjN~MdjHQxDrslcdlw?V=BsmO~q=cbDN%FgYf1l3BGUj>i=en=!d!cNJ9h7{`MzPO* z$VkgZ=deMT^WR4JIO7Y-4)0>|H74L~ZVwk+>%nE&eA2}KaaYR^f-)wbyW>&_l5=+< zVqh94-rfr0dL8^LrIX;{@|l^N+=MKrUSR0)0ORXL;4+{WE*xXYhit9Dh>uITWbe69 zb}$CzE^-#od=tGI-$0^CHv1U47h@hwB$mn)cjN8ln7wx&We%O=c#&}Iu_j+ybBj6V<`?*P;W9fOHU zZP-5VGz|Y`B*cs=gw!enLAFlJCYT<^#ZBiRa9$D2Q|Jh$ndfoKi!|)`@d`!TcSxJx z{D%bt>bRQWv7nq}#iG*3L27Ce49PMUwB@vWo;6vmnKDK#YZ=PE*;e6%%Mb8!K6NDS zrrB$nh-H0$j)95@Hg>>CtUA~OeY^6c7hF)6WMrVm@Qr%a+RG3%C}`~sXz z{_L|>LQ*t!P``A-#DFCbJwKC+pB;y)>VxVA1AFLM&`IdgEf?ljk*B4fJ|EX>H`<+V zW9{q6TXFY2`bHkW3cY_(HfkzJE;(b?azpg9%L3ON>Vl9g!6=D4o=r67ue^8wuZwF? zQhG<~gv%iMPs&hN7H|Pao?^zQ1dy9mNZX2IQMo>a)!FSrO~E^ERG1atk~0KUXU(W* zd6zoKnReRe&Y)!9NS2{U2b)48-n!u+=h`U&4v%;UQtLEyiXVrm_Lm?rHkW41w_Go@ zz>1I5#pu-qR2~jkYjc(vH>a@aD-L0#!FM>^xfS$WqoL$t9{xql>#c7M_&?WJ@vlc2 z5!dnoG<>-U&2}PU%t}E1pj2v`JQcgWSOsn&)C1hNnZ20$3zb`Ypluh1iG|^4eJdBL z4RWcM<~A2IZZB#se^3WixMSq(c4Rbzwprw1;>l&8GMI-2J?`M>4STr^n}`iM{9z8h%d!yG&P&43O9kW;+m7C?dr+h|5}n;1K(Q;a zx7+qo-k=F3y*VZ^`-=-oHRQua4aVAq#K`tmV{%6$+-4@c-K0az_KBrXHrNNEcvE5L zRAXUJ@ol`eOOH<#-63t3?&9gYz}P<-q?azBWUHZi$g6)*@ph6lzS|=x|9=PXjZNI{ zc+#&+%=wPp8VDk0F8oo14pZ7;pqGiTFuXrF+x!X6owexvQ5QqU8)Cnxd@LUQFP^5o zR!*0VaO1j}5Ndge?&h_il8u48ziGb^GY#dF{cypz)INxQcc(G3LqOWqJ&?gXOH|QU};IE`sgJ2ccm}1=#)3&dC~5r8CM~ zusLWD^%E~dS@}1V>_ln9`2JvAZUZ`#l1Zm*L!IXSU}>l%2HO*kA(-R(>r-eZS9Rxcd{@hM`Ael-`b9R5jpdGdSIH*yl-x#^C?P*paA zV(4BjVdYCKF1EmBvlXB+zQARia|XlHq-m9^avu`k-79u4L|m$2Wk-}uv((bP^ynwh zNfUFekNxrdoJ6?dRD@ocwh&Lgmx>}0v1^QIzN=t!Qr@GBjs;ZN$6)&;0d0P&sl)0p z*xSFy;I!>fT|J39P%Stw`g4W8zszi$k7D!v7zpWl4XmF^SbO+CI3za(n;Z@K<27qQ zaU@^;{Kq1w2=}GTriD$w`G1D8Vpq`V8&K{rG^torKzkL)d`Q%rZ%sDl zi??n@n;AJ^x3GdsS!=?}f{sXK0kPoRrH#3I{sn$B1d#BK<*YGzRxh>A=?^ImFw1z*N7U zyX(a4R|kw8gSzYJ_c?SAD?QVWsv=jszSLA0nL(`RVcBfc4}HG&pO=i-BF2;x7aV$sI4L3phvWW>oata}%5k}jvdbvJgp zlL0DMhyT$_`MQ|cN?nMZ@;t}sD+fUn{zm)mcQIn8D;)mx z1R}oCVsVx)&Pg!gGe$k+#5Goew~GlRxLpR>@*wb@*Me6bHiO-m7Z@zFhT6JzuK14# zOuF`xIMSyfu(K0s+)2`T8A>d9aTW{S0H`*1Qm^v82MW)t>T_Pi6@a~PCFKRm+>>e7 zJOIy6ryS+meHgX30mE*-r&(|tx@M&@X9wDSvt#5*FLj@8VZ`fH?acA>$YBsk8d?L# zm9>+1k^F&ncSxi6bK~|*{1-~LL_jEcAxgiOf_KRTOfD}c?x~D5j;1V0xMWqYLF9eMZpt<5`5XoVrZ{{t5&aSWUV(P$lw6S|ap5O7r_G=)y zBNCUF)0@Svg{!)51(MMx)df3#LNynHHkJB(xLsG)c;Y@~$n*r)yc9Nee>Iw`dO<`Q z&sKJ(e84{Pem3c#p#gcFSE@OWDB?>ru0s@317q!d(<&x88K=a7$I49x<+V#JVGhzzCe|jpq-Z_iT`%+l+ z^kHE8d7a{g~=u;X6ba)@97-4Ea$XB}P+yP*Bf zS9JL_4mPz^!pxQCg3ntkL6LD*8cG>SzY7)cD$11CTv(~L(N{t9;eIfCv%VmjmWu=H{yH7Q;vD+Z}c-6OntX&xe3j?FqRnSIs2M&n-?`u)--^t z4J_xDhiY+elY#J@xK!opy-;-E0!|pQ8^ZJ#gXBXYod2SR3!im(?Mp)lcQ!=51wSD2 zc_Ie%NJH6%Ty@QW&zM+(Y{Ic4($5`1Sx43*Smz9eQ)tC%=GQ(id#l z9!#vkOv(b(s+1?4+39gc#I~$LuahB=FZ>Aw?pL{(7cY zEVa4@1KXAp>y&sP6Dq)E#Yb#~4=C^9$a2b>xX{k!C{4QoidpL*dWpaq7VmQ%VDm`m*c4eK7zz4?vjO84Bw8xze1o!))b);fQ1etkQjowO?P91hhsacKRFde0w~ zqLXhNbwp@EStVy-CoF|_mu)CZc4H^WpIT;N$opLBB$(E>LP9_(nooI!e&##brW0?t zZ&#ZztcxQ`jx414*#<$f7@^l*^bp>_qdVtOQAziP4)L5XGw?30K=AvF z$7;OYt|J7rj6`ir6J-(7anH-=xO9h(kbVC$F3GgwkCTR!O?RJhpHK+D*^`Nv41&r* zI)bcnkJ{BQj{PI0ZqecZv`Q|6=-9DP3crcFPCMPfCl-^= z{fCN&u~KoUF&5vNg>7@vA<5?@mYtghr{6@um7oIX6l20SI}IQ%**lOgxxtm+&jZm_ z>I``@0mP$|h2u7PQB5f-dWV|0FW=A;3(~)@K4SBa0cH@ejzF>=XBE1_9LXQilF*Nu- zv|WBrEKp;9N>(YBHKsDR({iRM9V*?jn`R2i7sAd!9QB|MAk0y*Se~ty) zFJs`E|DffJ3Fd5V0(?gK%cbuyW-s|~9aUUI&kZPP8Nxb3U*dFS9CdL{qqf zo`6b?~ygJa?Bgq2C86-tvRV?|X2yw<4GZ;??r; z9pp#aD$V#k2(NY1<9#1KfHIvBD3fi2=B}%8>vcM6P7O>xunt-bAL6si zV&2c#0iBPuq3nP?)au`1I{o`gZ3oT-zMl~fPM;ujYbncmEJJL(4eh(bA-d%!H&6c# z%APgiftwmgzH=46RbD5LI{8&|#e7kY0iWe!jOtstsQI3!@^$GXYlz525p*7DqoKt~g)(tB81$eB)7ftbh`J8S zk`NX(We%q5QJ>$p1PHpB5AF56(0FnvUL7FfWhqzj`0Yw8xI6-jTkG-u1XH1X<0;Vj z)eU>Cjm4IUq>uXTI$?E>AzN zX`J&J30i+}VdC)~=tr!Ro12Vy*$nE+8ln%H)m0cVY&s{o90ESd67+|->rPwd7lMp;@2WW#2gRA8eP9Z8n-v#u(Sn^X981S1b z*i*_Si1m5vvll^>c1hiwv4L1;I=pY>9uUX5;|zW3q50<(_&(M}Ct_8l#C(AU;a@bq zqv0e!tUzj}!njLk(6#*>H)_~tko>cZExkoEPP7)MtfaR@t%6Io=)kh5Q(Voebd=jx zf;g43L_WKvp~v%39BU=4+FeI|;V0<7Y2C}jJ29xs2kd2Miw$W#u)Y5VIP~fumTgYr zOb-*2V?;LgAzs#JcS|9DBah9}GR&aF?qzp!z6f{&tk1{|e$j4J~Jqe}B7sZKLn- zybD~*^;A@O_HhsGutxD&Uw9tciC1pA&gMlN#?rs~pk&bj>Cj{CpjkCnx~%OUhU(3M zu|gs)@;BktzGu+I-IG4!lTeh|0uPN%`26b%NK83^b;lwp!xI9o^Y&xtd^K0G8gS4w z@)bI&V2zUwpK_@ZCGmdT4BawZth!G#Pc9S987Ou3p?Ck0NY2$EnciRL=-yw&d96vn z=+1{YPg5n9Ep$RJ{WsuYQ^1*e%$mf7?-6tU!|SoJB?BxUQg;LQgSr?N!{W|L zY`Jol+ck*xd0i`+x$grg9lZkY{CbMzMZY1fvnd~NG7RwRK(uR_hE_h;;TG-a`<1A` z&+s5|Z-)H;ea1Y#e_>-YFXA0r`u)4M;ILc9LPqidCi8QW)||5B?Z#7=__&P_5&j(t z=+|g>_y=4cjKI?TMpS%$q0ZVJ2#d|0pk&4^7F;67r%TLu=Rj{vR?I++=UkQ?{TWOI z>VHex%#Cdp34zP6afLp2Aw1+f%Gx(!jm3VjUeg(JI`3c!{#sa7bsaPO8A^AY0g-l^ zx^R#IuX-J>PAH@M$iFYhe?;J%&7Uyt@-gr*o(GZzAEg;xb*V4%EZ6R=&sWgdH>aN& zFEbgz#qdgyMW0iyUN_2VZ6-&$3Gx%W4{ov+aLnZ zw0z97kAsMhf3frb?ne1)UH*HRfe=_!$A&H_LhZ;x=5b~wF3cq+mrjCIRqM&TYvt&HE7(kaSrv&KA}C8Xx}<4U1$xeODcEO6a{nLa1`slpT-i=G4wus z543+sI5*=Oykb~~GCFs3dRYj8RU=@_{!d`H^Z_T?`iRMg(e7cZ1xhyb<*GuB!Rirt zC5CKbA!{s!^JnO+{o(?e@v%~wZj@TGejwM$=m(DVj)Aha515^%0vF1`_*P4B#(%Bo zy7~mjFI+}v>$4Cb+`!t{N^Hq{j4$ntclX74&;pf@)KPeOQ;D1x3+%g5Qyc zu;p3-Xs2$Vyw+v-_}Nq#yM)+k^_Q`hbQR64?&_S&b)53uR{VD56y=@+p~oc3ByDK| z&6FVN+DLO@)xv{tSX+c;Pj<4JbNfKnxCNZ9-2!Xr5-#RH>Q?AD3ne`)`NY*{@$^jU z>OIhzW$Bopch^YN{un`e`Udc6E`b=)OmKLWhY=Y!;W531m1n;Ip`uu?U(uL%Q(tJ= zBmuRy23A>9kIm&;D7-}Z@u>n9ux!{fE)f?CS1`%y9Qyoa&Zk?2lLq_&v@eNy^`H>P z`gfqXhZTlSn+sZ-1h{%a%wIjzNf0^9rGA%(v#1LP;Ca7#kRQ)sb3RcYdvp~GZMn#7 zqJGl90Rzrz&=*ve6|?wW#o+il4M$y9;pwA2p?b6jh8Emp0j;Izznu0h*==g)KX0IK zzZ5u5I;LpNICfzP?MDrQQMr32*Rkvc)PIQ2 zx5Sqp1L6fwFq*VzF&7 zq9_xOj5XpVSC+dE4Y-Q=CaIiqz71GA^+DOgy`0Zm;=Z1u4BUsaEc9eDga;?m+abkW z-enRiakvN7_nty(lpLGYK^U+;6J&K(7_A#ky%WTU*ghWX+)vTZc%7!S1j>923tcEFH#zFAcB)U7VV6miz_2^9< zSOz?uW9GcGeNv$_lMT-O3pYhlN)OF8}GIgzwuQu#+sS)#*Miwvdcr*Ig8G7 zRlZuj$6o4|@)Biz_DJph#6s}j)}Y*ahIw0`@FsVrm}{ zb(<|sKXn%K`Wg$SzMs+HPebAV?D7|MzgBqLplC+}r@86Ci8J#-b@de58knP>-vo?0 zArj&*=<@NU)D0*7jI{v|!O`^?SY3UIiN}|r{VQD|XS$Y?Y%yh`E=|(R_i^YFaGZEm z5|pj)u0C#S%GcJN#GEr)7VNVSBWBd&gotdAoXLb`EA@n=I4x@bUBya{Q-A#K{+yNb zeW1l2F+O9tU=a^nKQnXO}wI9sj_=7PV2FZQyu<`$vWw(UF-IMU-u4J?n3uL!UAxy#hqYZ~B9t|9-eJ`G(@ z9)ZHGl%d~9d8qFbm{xU^y1^zx>Wn;$kq@99vw2*y;FDVv>p=29$^vZ zpHi3jXijGkbs%>=%tb|P#65YXP#e4tv%3~UQ0o(vISph^8x~_!rI8Tr?t=2U7o|a3 zCAey0(f;3Ekbflslt$;EVZROeGeknv(-6oyLcaFTjO7<}#yE#K%&ajK?8$Q%v1BqQ zce95n(b(_9f@-vtUI*&7ft-gM z)S)KCFXCt(9^4O|+Pi>uFvm{cu!B@Df2Y=62!-mpEcK%Lu*4B z&7ceFx_{C@Lp!sCA%(=@T*_Kg!0l z&76vSv7)~hV9)eUeCY+s2uxZ9YXXRu?E92@12SRaHbbF`cEiEd=fJ*@Jd!3ar5>v} z*c8*ug_xg2)mvba&ga=Cei5pAQBQolAafe@>L1DRpwBeBSye}x^H!~~lBn|tk)i=oDcv9Ti+QfBYMjOUFw z>dYzX2QKA^pTRcu55b|c)F>(F!}4hlD|#_gowa@qqz^cN&R#0)_!)~O_MHUV_o*P8 zZ~>wQoANTLNv<+9l9jIT20VEeCQMaAvj2NjE~p_6dj@(p$8i-Gc7nMR6WRJV?W-c97-%qptCMx zNCy;0_vdPVtp#uKXguE~4@0j`gDUDM@5K&+-1ZC`x!F*N&9vk*&l~WvH?7hhwwKZ6 zZ8%D1lb2Bc7jBBPo z9XNTI@@0*GK=6?{;ML{^+1oluQ_TgJ5xHoPsmquD_XFh9%GKYj${_gGXOyhFquw;V z1G$I9tg*U_bNX$8L{BTwrcK~tSD6bLzb)C}eDN_9D4GO{lFsnl@&hziyvFwW zhv>gR1NlA)*f{koD9et5@fYglS^f|G+!FSBJpKGT+WX|_g6HvVptQF|SoIi+x9w!2 z(es#xVm&l`GzNiqDdMKFC{egcHRrD6hyn^B@!Ks3;~c?t)?@V2D@WI#VmNcjoNsQL zhz0qRQSqgM81GM*ZDbkvH9iK*)>t$xI0wew>0J4hw~#SC7Nd$^V%F>BX!GU~W(??p z760DChOZ}xAODngCcz**9*Lm_h0OM`4;H^)1j^VEXtyX9vx!A5@pzjXlyL(`|FA+u z3!;~iE-v}UMDP;Rn|kA0&h^56Rv20j)@uc>`9UO>^cVA9LoT9sUq77pvmRaN-A3cW z4d~Glg5p8x=<8uDgx;77QB@NlQ%?O3ywzT=t%@>4pgS$KjC^Yko2#pBy*MQaUf z9b66aLocQBb!*l8srTeiemvTiT*1+IsO$JiBA2u77Ym-4OunNbOjP(Ej=psYQ+H0r z!1Vw3Ni6}}Sjw-hbi}~!B0flh5cTyUw7Rx&{=_kvnj#i<&LJOQHvP;}TbBAW1Im5h zV9_{pP?%10zjHJn?D%ljF{gp}a8uCX4e{f4&EveN<8sU7Oi-DX<@hZ+%gWY_$DH;M z&N*)qX4t6N*Z`5>+<7yoDkg&ezI04BUx+Tj`nYtBzVPvEAqFL6Q672@*t+Qo)+^fC zm4t7oT5G^EPCr2q5>R9@mMaQ%0~@4w8gUWeU<>g~>WLK+0FFIxU?)1$&3nCtfZPSB zv)oEu-FFj|jj866ypMr8>n(chdx@z|Vl2P=8iU1tIAL)itP-_=z5)4Lt45*y%?7$t z>cE(E8hC#KXACdIz`fLW>D?1xc{ctzu#=GC`WS7RXin`FgmxcYP?TAaTf5{L7c+-A zzYj7YZ?P_)-meNv*4@Ww;-yNS6iC~h$cLW!4MO_Yfwe(zuC4Jsn%gg;JgzCHRhn_$ ze(&&j`+6LD`!jwV6c3>uXHa$YFwPrt3))RjBV=#H1BFEKJ-Q15g>2}sj02n4Qji=h zU^U6Vaaqz`oHFw^c=Re~*}oI1mo7yO8~%po*z5oI1#@YWO@u9^-OnaAn(aSQjF#L$ zZDlQHSx$kD$CpsGVzP93I?Znf6_8hU4A6fpT94?(obd@CXH92`G}^+GEpTu<^C}$8?>$-T*I|osQo(} zy{Zm_-sn4+H#-5uo2x+}9X5;&JTD3s^hg6j$J!&a{8*MW`8Gzo_4wR=SNo6mRr6shtRxW9PoON~XSuq}9 zmv9sO)*A4Oeo@E6ml$w$umjcCQJm9(^=SKIKXIP>v+dUpfuc{n)b3h8{5I}6nlD*Q zIVUC8GPsC64+{nB?v|Xk(3{D{CFtrfgr%nMMK8B0;94*R<;HQ;BmN)lP?qE2`EOAo z_)1kYgJE7w%toT1JkfRx-@hBh-CLnH!iZOncVOyUN3mh88;DptCyKw%sU&}M zeve(b#9oC^>mUbbYvMl5&S7B`7*NC*sWm1;I0;W(fUbGCB;1HE+ExzUW{)6zQY!S2 z>Ir>zmOJHRqF9u{wE#FyD(!F=y|>PxKPG#)z4JG_=trRhWB=Br#pK_ED5hv2e* z&p>#mb7(tYA8DfDtle%VIIp{lj-RtIfp(}0;{{OkTlaT(8j?hs54yBY0sF+~`fgK`H4RIts%{K_V91eS_ zCs=Z;6Q}y@p!Q#%if;3_vl91Mh?aH5c86+|$JgLpy(XyI9)p=_d(r=@3Rf*SL)j54 zwQ=)TX0sv=R4@f)`Gz!eWBpW6}H66l5afnAJjNUVBOsRuvxVURY}>P zJl6*dx8KJ5rD9%Idc9U!o+733`^aCR?aK~_ZOzL}2HGde^X9ZOn6*G=lEws>qIpPi4%Md(4fp9mJj zRFALcN*ohemqZI8z~C9?WbJ_3wi?dOwHC#TmScEBe~5f`1oYPJ#XMicc(aW(xnb_h{)~duu}$zjm^8gZy>e|QbVk{Z%j{RJrSN?sancfd zay!Oa@LAW)_=b-bSbWeQtlfgpD_XbBxn4g=zWxYd(J(ACMK-1XQLuIX6Xg3uaQ^xcjQO4q)vX!$<(d*CmZLC#ilOlQ z*hcUO{lpX-264`j)zGhr;i^~ru=r94>YXeMfwXs@PEe;UfT$27`u@JwIbqQAVJr3G6Ye2H}J(Hj5!?GqPVn3HN7&+@Te11;9 zX8UsV`TYYW0r$8V^(ox6zmE1YmH6$I0pBpH8je@K!+0Ne^b-s~Z2uHS4AY?D@kB7L zTnc%8&4i+Z{ZVCjl2fdm%(Yy7##|~!VtmO(yt~3&NU1Uul=}J5zOxv98CVIL?GaKP zhn>>QS$cxvz*w&IXA3dx2Gh;mThan#jKMNwI<=C@tC&A`M2`bkVaHZ{Y$%nI19ra>6C=^>cRns_j zsN@JNE;vE?|0=b4-~zOZ>VqkF-b1wCP?SHal?IBhve+UkUOCB(MJH-G#iBHILGf(J z+3*=>k@vgEbPQyXx1h!AD%W$DnIO653=v<6tLOL~Y>s>f`S>9i%(5}?%Oj>ZXoEpp zW#Eyd;zm;MhV!Bb?pv)T@Al>`&XbxF8}ywtugg9td!xg9u3n6ZgTk4{ZyhREuI56G zQ#fn8zu6ieVk=lPn3(nrf+%Nhe8Wmui@UJKfV$U8L#TH^!cwlkgh#C@APRCr(YUYD zu%y-4uwg&WnEw{+ep9Bxf0wi<$pyrBKEbHsAAnI2__Nqp$bX-L(b|<*TZ>$PZzU*1 zJDAE{Uz+?%i;4FErU#`$#L-H2K06A^w|qyLVJ4@Ec9I4iYDHOZP44L1rZ9S@CFD(d ziS4zYuvf$p@El$V!D>GYE4BrvaRZ4ZBvE&yT>{npqg-^!QO-yAA!cNJ#>Kyh@3k=+ zDn=L!ocdqWZ}>OParz@H|OFs17^P0<=buT zp}0>lH{HpM@0j}-gP#5YX2iLPU)>u$wg~W#5A|#q-htA5PaHakI;rNbVIw?W0QNCN zJIy9&Up*h^^|cTdY%vpR*DZm9G&(?@7NEe zl;WxZcWL*-(4tU>x6wI+fz>90?daPOk>kON+(lS%U>YPBhe1N@H}s34_tDCGQc=)# z7IWqSdURZ4IgJa{k{?HMMmZHg&X9Mwef?vQZ8uYwScaqL$1I%VN_v}>m^aR%9Qg|) zKEJISK>B)c4%tjQY*W5v%Wf9(fp3*8N7q z!gTncV<>odE`yevG8nZY0(%{^5T<04_GY@~hU8 zSGM{D*K6ESn7;Nn9_ni(BwR|sfP3i}6-YaA!vW|SqXX8r%s4k?6&E&!&T^z&OpooD z7fEd53&wm;;>}y%IEhnMoA7$=@ff)L1dN?(AY@E_$5hkKGwaUOr}(F?;FsUZ%-4p3 zs??X0biG774L#v-?R~gZYR;>CuDd5+`~@j%-lH}8;lo%{UioAmPH=t>4&BmmsNn;! zJ6X?y+AP6)Grj$^XL5ZPbwWu*7x#=3;_^yA!eh5js7f}HY9<|!+O%Y24RvKDcU=ah z0nt!=v=+kxW?`7>GPsTJEQAcag!0V~u`Id(d<~{y+?yu6vl>B7nX_t#S5SBV8GZ{S z)}1~B(CUI>q!}yAT?d+1;gB#a2MaAzL5W{sD7~+aHwU3!w<;XeEgr(t{$$m942mlo zI9N3VexD~l@xBGn{C)%u^(=(o{SD}I@eSVTc@|w5&A#17!t3!OUJ|{G#jHAk1rtW1 z^rJrSFue#RvsPeCJrCPcD5D>-0_2xxG0$m-sH@>L9Papy?RM_qJ82xaoa~PAHKxR1 zpNqk&R1CGZ1c!I7n0#{w%+fUzbdGeEzB{bLtMulxfV%N`dh0&;*mf3U-1=fjhn}G6 z;|QABc2b9tfl#!o4uco|M!T~6Ty(>4&Th#i>Y()H8ZP|+ab6=xCYeYbcRs=Y?}3#& z2C(K@F&@6#L|ku;+N&#N#%pzX$;1!pdDDvEmMbyDmf7KA=}(NLU8eWud`?_R94zri zO#iPD%k&eu=w+p>usRA0KDFbr$9jBA@c`^Lask+l_!C9X+SPLSt?m_g6dn2(qCC<} zZTmb6-DZ9QtC}ZJd+H`wF5QRC4@}U8zYoW^e8fV^eg#-Gq4*5>Jlp$v#9zs4qPcnT2;Z>G>U!@YtAzM1WW`(1T}_AMO{FsK7Fu5Y3q0ui6Y(`@yk zJDc@EpZ8ht1HCG*V1~(NkhOlqvi~}8M33L#To%UymzhA-h~8NKt_f>D(OsAE7wtvY zVAgd365sn`_4jemKdT+X$upI+S<773ox|3&cQDq1c!WmX(D|SD5M^+km{$?3<@zJ$ zcA=3AEK0?8dPmF5uBy$$yJD)(L5w=70gaatr}Y^Gq1icH)(rBIu6o15UdSM;?QcxT zG!^_~jD-yc4f#E9ZUa2*3odao)a1Fc=Wbu1d21=Wn066j&K<`Q<)*xUF)_N=sdb>cx;pw7D2_dpMz8tF1*S}f(H;FEc+nQLZo7geMP|IFq(E(b%$RS! z5CXkASqNM17gOJ6EJSqQ##v9@&Uy4s$K`G&{Jh*~T<-aykW9lpSR z90c7n#>ADoLFZPCRMMkVU3&K?@i(d=W$1TY_s4JWeYPKi)^q^{?Y}kuEM?s?<*2z+ z%-OO25O8B99B9Y|2mPf`_A^Sg2KJ(->=Q;_8QsS^;@I0_4p*nr`E-21f0cE?N;naFvK_eOjqaKtq>_ zzSvaATGJDRz4}7xo?FCdJjSlLKEcHs;$Yr{d`!IR3ZnZNq!nKSZ|)l_I`aq;GILOK zyERwkmBp!{01ga0h=K1Xd?K{|IL)-P3V{? z5`uQTg4XXJ*@LeJLgLOE*h?N4ETL|~qpoaJkEb~ObOMgbz6aLN1Pq?C6cr<1G0n?I z7@!TtB=UY{JHABa-UT3^aE(}f)hbow@$*0t?8imP?vAu4I$)>0pG3V4f(-$lfFy6l~?Yw!apteub(aWK~L|} z**F!a{M!UYw$v-!rvY5%(mTd|9`OltSsC&;*8Ck*gpRzCh+C5xxTnCpuGy<|_l;H(&3my+>M#Y0SQ2ht}IR|XxoQJG|jFtT$ z>&pvx3#Nkh`YUKNZAJ5q-!NkAT}bqpkMhJ{(x&_7LdC-f5dWSD;i-lJIvKivW4xpiXh*kli*t4i_>hF$W>f2$AfE31li)#SQgg>daUlmd);%y zfF0@JP(k?~%^`3ZBjAWwC3ptjCXUxHI5agC(AJdBks_|-Y8>s)lAxIO=A}B6QO?rg z<40wq?`b2!`DQF}1UvJ`-L_!E(`*RrQo!VfeL*$+I@dqwJNaF1p|L|OlZ@5j<=e#A zYH7i*yG#F;HZDTjZ++mKsaWu+T7sen53x3F4~FC@;fsSv$WQ1Gf&ZLA#eZhJx6m6j zH4hgZ@DZ$rT;&brPG%2vMsVTlRv;2efNzmCxUj(V?44s&~&k}>oC zNp$x4iP6rz(XmVkdiAHE;93ULHXcU5p=&wo%mR?7#X->Hi=e8}Q-_duCHkj=n{e_B zc$yEuM9*-R5?l&~3G{uu>4Dpq>hj^+lOdu$hm-l=#Q+2P|1ZRGnrZLVu6jq99(f>I zHpId;Bu`Gg8~#oqj?JK#pnW!lt-NnW{O{>*h1X-i{zxjCcLbm|%L%PtDKUA=PufL2 zqkFLp*WD$*bm{|K+l}33t&}#0@|(J zhYlW_&~W%=&|G{ajp?n=C)h-Tv;J3T@OFS?Z4G)Qu0zB5xtM*!kbg1iAcQUX|1Q2B zwO5-sD?JS)?jmn^$}=u?es@eFj#0qP8o#N$Hn z+fB+-_j->bX6=H+ZjUgG*d<%!*Dx=O{QX@{L4Szxv*ZREV%vKh0 zu$s9UYz3wGAs%phj<8FH5&5f_W}|PeIBy%49rQ)lm*$-I_)Lfxa)ndQDF=^xL&1En z9n?&sF7}-zkl=p;e;OGI%_&Xzkvu;042^{(|CiV-qy64rw_$8VEvQY3v36Y_nDgfs zOqiI7so(lSKd~8ae!vTL+!EcjmfjHYM=s|)iq6f2B38U{0>~;asTJa*P>@?re4HIp z*FU7-H}Eo(3%oS(`%jiPq6F-i|VU9N6U3WU=NF}`?D_eU2E7u@Sl zRGiwwqK?ePpYDddxUU`-@qgp!r-vcFybyvcM&WkdWRQ952nC(WQF(e23$57=oAy?t z#`yu$Ud&-ZcEpTYqyy>a4?^3eS7`mqfKR1v_dbG=;P>%gh_3DhixqUwdoN&-!zv)L z%YNF;yaT-RH!9+;N;Be3`HUXKSL^cD-F3<=F3XcV)eEjMO&sa;R%2jd`%l~+c@?G@ zT?57D%j$rs(=j~F2yNHy!uE#?Fpwt5bgtM%$_gl)1j8EFbmm8i+ya(piu_6*!UIBq&~Q1dBHu zBwy1fu3MK%=<%Y4SlFh5m!^>z-3^fNssviD6|u0yWT==;I(vO6?T>$f^38HClDZCq zNGq5_93IahKG>`Yf{wHQk6mSw z0`_LKOG=%}A0p(o>>lmkNdf(y8D%^D1VCMPSFeJ$Sh8I=#0p;fR~R!Mg4`XMFQ97dE*s@mXeYwRd(94~w)A z_pa`$Ygg47{}2~8`WHGnr=v&WU<~%#f|}i%xE`9Tpx6`!(f+47kNCCP7nfB*A%NHhEW-fO*H&*$@Qnnlj04)V{uIof){eQk1*P9|U8FUl#rF@o2r z)JL7CTKGN`j&RnP54}A3FwL=5zi{z~)bz zq4uqaOG$YJvpWyr+Wu`Q6&Lby&q2&T#1!-Hf0o>_xP>y$68`*iJ;A@j0o6NN;kvU9 z=RT-}c|Gxiwz?dQzjF~b-?)Lnfg@40!yQ#w)X_iY&;J*0D#(Jiu`PuLTxazPyg#{@ z5dTvbZMTGD>NpcF226#RunLS1IDqxd|3RwJGmIzJK-te@47#QLa9|Iw3?seWmFCAK!-{WIkV)6PEa#+QfvmnZ| zuQ_lHJG2h)5t-UxoZSklK1$Yd_Bt+BzarN8JDfPJ7pHaZ1IVsBgMO$U*e|{Xs!bYv z!J7uKd}<1*lU_r|y;O3|zl7$hMa2L6#(aXQYnzGCI>-ixv<-mPt0URG#h*~Jt~VEx z6pn#@K5#9?NZ9$qn7c#0bR3PKx@LwR5tBjv*h7*t`Xuxjm<@2ohu+1mDHFeyqw%s- zELpMuyUACj|J@yQooP0-VhgCBe*$%LoWwp-#T%TayUgKUf+9wPQzsv1VHcjl$aB%? z+1LS%J3fKgI1?OJQUG@3ZcKYo4APCYN-axWw0wPnW}|x$H_G5Fd9a(0E&?%Vpv`C*+89ddT`%1+ z!KWELi5-;av=wG#zoCxWH{dVRURqYgHov1zH{~*;W)ZW0;1-xG-w*Nqo#a-%glr`=*~Ir|f$J}&|N`Mn`6<^hb- z?17wtUn$#lm#I65S3BZ1*sK2J=SS!YbwfKK?@+rk`{yI(>R$SimWErs{=aM-C#`zPN2J6G)@n^iv9+Ral@7z zoKkcYn-1Is%LO0cE%k^3st&`d8F|=|d6wC1_a_f^IdgiI$`4d-LACdH$E0&LumKan zvSBzJl{KQ~cWq`SRmDOTua>V=+AXIffh* zKuvrUbIWjOt@^;bYrp1Y7j8s$0`H+{p6+~kknEOvx$hf-_Lrxw* zf7MxXOMO#zJUPY7;v)H~XZuj$VGK@(FEIV;681U%6+}2#LU5xA{Ly48Sk%$y{P=CQ z^@btm+nLDXx_?1Sz8b=1U(sT=lHQy3n9A#S*wF7IcF(p$120Zfk|k&mhx?5Ov#n-Q&O4n0{!|9LCc@-AiDK4)=O_f+_((9ETLVQBp99N z1i-MJzo6pzD0CmV$S=5!uKk4EODiLzzImsIEBThPow>yTd;P2 z8>Br_LPnbyY_n!Tgdxq5PMv{1eRJVle?87C=@|r!GDWvD;b{23e6V)X;XMC}q0gQQ zZ2qKt->FmRmmY`SO7fh(osBX|ycHD%qq^^R-X(t&c#e38Wg{fep>-6+#s6a|Uwpyr zg#**~x{E_!ZiWa&BZ_U-qw2aP@ujHec=d)R|WgFdDI_oj)A@IEq8VF*4y0@LJ z$0(Y4NM}XnZ>zfmaYZFi@Gb$p0=M!x8?_+s#wLk!a38^H^KoePSj7T}FYhb2Mc4AZ z%GN&*GLdd6Y7M!E1LVVTHT6HU*(>xfsl=k+DBrYP#cU*5pmqKS^xthP1kT!mSFG>D z+U;+tFESa5OebNVZkl=B9EZCp%V?Rl2y@)EG0PzGyI(@ByiM@l(2&d8^aC#6FcJJq4};;% zPG}*%sQRrTFORckxigc%^+qZ$EB__29Jd}CJkR5`Kz(jNtsb}L*ICeS3uU&~j}ngw zncJj+DC^(Cik#oDUXI#AoK`gE%pHU6ZAL=R(&v~?UP`h13}xtFda&TRCU=CtfVSkR zs-Y}==^Guv+MyeaEiQsb8u1>bF6>{iiJ*`^l5AL020!=e2(Hn74a{n6z7yi_<6|AGMNAA6f%*O+%rz{0w9T(a$~W zE_Nl{0lB`CVcjNZrhQ`cs!HtW4&$RD!=QQ8VvHF04V?b>JE|5o@>#Xd!M#WuN?sS@ zVsiP0J)xZC)sL8W`h}xI?P`8=Svy+UeZr)by@bVmmqF{=0pyn1f;Q%*Y)VNnhK>4) z1$mk1A=5_x$uw)x7|L8t`Y_)pA29Pu;KjsYbM-#ToK-(~q>N>{x`mkNADKelM`-cU9Sd_*Eh$Q#Rho9DhA}y2SKdYmhZWRaz8Zh zD9O429fe2vm+e+~5;cXi_ZP7{T?#$xE`dZu>=+D3ofPt?jwd&UNfSm~GDh&(LHA5I z9AQq(!R3(V;IXi=XJO=c=^(X4`T)0kYePNZA>2 zfkgal2y?QfJEiCiA2@g$CWR+K+`oyax%VKtIE;hDb;Pi!`wYG%15s}G6IJcL&@3JV z75kT=`@Skr_HBmNKbG;Xo0g&P>`Bnu#6ZaTTN86<&>rOFT~r-iOPu9u?C`vZ@l^^~ z`Mi%%fBiP3^xa1Nvk+cfv^u|F_+jt@HJ@SpALPyH$@l$C?2s58PO&FTx$-^nis|3& zdhfL4Wn2oLf2hk14V(*;Z1uVDf*6Rc(d9<3rJ3aS9Nz5jdW`9sf|jB0_!6hTu`^>M zRI+vm`QIhlamGQ}mtl|~S&e)5X$kU!=1fFgH0%4+{dn#o891<)(D7Kt9A>{q_X|oi z+qRCC#F}7K$5|Mb@Bu~lgP6^`KrDV+4qgGXnExuL%Z%hGeg#>aZ@inEDxAk9Qx05pd+f;u9cE#A}udAdO;E15x=F#|55gtCBxA2E^SH4}#oXPV0d1jm8kxvmda z>0SoGNv*`K`HwGbK8u^~ra{Z(mneF)3gpy>X>ZO0#rUJp#?c(;la?U8zcOE5O7mB9 zI@5?vo+Ca1(yin1@Gw126zGZe{ix%4&I6n*4>A4Jzfe8<9Ig#{gW~=(QUC8gsMB%} zVs7W5xxp3mO--g}**f^&8uA@=9^)fdP_L<=2e!?kx#Xs6*zG3)Tki*ub}tGv`^m7( z(Fdn|i6_76G@2p&2I9u666m)No%cLs(*B<@q;EVlTquI@Wj{gqjr7XCr35tOU{Hm|D=tiGwx7IvtcT{53Rcmmum7p3Ml~tR0 zv=bGNTEs95ocBh=xwaEzW#->aDO*t*4x?{Ep@ zbIxu83*!^Gq2FVaFSX$Fl3!9!Edd8iyo8qdv`_Qg3r)Qr;D%jKQ8KClQyey+*Y;vC z5gTw`^}Bf0qqnT|6>;rWlD`x*h2&kmxk}9w7@QgeKUBR1vF{~*mm+w1|Iuo$q7* z0T!O;$(@o7;$m{Rw^~9o@f^&ixuH$KWt4s}*Oc3AN4>9FQ_ka4 zDVAN_Kz-3xu$!qu=s%A3ZJ*G8WIa~sOv3DgExh7Re`s2riXP=AP&IWZ2JfJ}NgVNc z0`*{agqF}}+Ygl4Me%kPkFjJskG>;ikdhsMg@yewSUL^7N>B134j(aOG5OISAH%R~ z&QCB8_LgN;wzs#I6 zF{&$Y(Og%6`A@LJxRNb;P7dh3W;mcc5{gde^KH|1!P;edT$G-Ysh^CH1gfPFztRa> zdz&z|sU5Uv><6D8-_d&MRggyi!&H8yQ1c-cO8RMH($2S-?eUp!TycW0@abkzHh)6m z2}&%vQKvmjWY zyOezrs&?;1pGU;Eizos2px%OPmnoLHwqx&HQ_humPVt5gp!#nCh_~y)_-K*fzVi@+ zmYI-PL+otoSbJShVvV~i*aXS|>s&d3a=ooMlICBHsUa|Or?!xhI~i8SYHm_5|y2_`?&;q8Fo;=^Hewuwi+(H)Dpc9(<(?b%5e3I>+4O zi-WGC%HSi52p<6+ZInZUf~1ItpmnGbg1aA~_k&a@{r(BWArB;1w69|~ z`R;2zm!a0gfvEaE7v%YMD4$MU`eD!cXzD9hh8~6~&L0r>+{Uc?9q9El9t-!y@cO}@ zP@i`9CB(2;{hJY2JntQdoJLEWCJYD7^5(zx_lD-kDsC zA9D4Dn1xL^;57BbT)*MfJvzdQ(bkx7p7J=w(;d}+#j&D6`7o^NJiX^UcaRO+t5lk$ zLc}D>^{FU#)I6D&s%k)EUoS`*LHj|-V18%vVa3#;5U}|i%bWfZH<)CD@0`ahvR4jP zU)+qc{}K^~6Ju?w2Xi~_4i2w>z`{5c4iPj^{$w9ZrDu}MpkCN`^$fe6Y{Gf>7zpyD zmC!xAi#(6^yux)Hzip`&7i^Ra`nB1-wO>2N)NFwjlEWxzBaT}BRR~Oa0}a3BLB+v0 z*gUVE+$V!^?9u&bbMYLrQ7D;BTLx-`M}Y5-cHS$v0Lzm*s?XcNed{b# zhx{Q4nRO5R?@`WSXC2@CoH7CCzvz2al&>BUqqONJ=EaTalC;@ILhgXORvw87#i$j%gsS1W#L#R;<39V)&0_|*UnAGZh{crM z{vX&{5nm#$QxZIO4%V3VK#7$h$|xN>TcJa5edZ_mU5Uq#H*u-Xa}wsW)-EW5IpJ4PI@UkJg*kgR56RW%lZi#12bU zik?&{bN?vEK;k7vKJoJ zjn(FuC(`3$eviU%9~I3oJ;3n%19VR;1+mcPDBt4Fk6E;jGD=~2V&Za3uGOJ<)p4dj z`7*DVLIxZvJ?FynX| z#7rBH!#arjx!@U2W4(nb>u;m|7jo{jo`d?4S%5(WuIdhP%fI#(j9o{7 z+1LmOKkyU`S2m)^>>clNBLUR6e4w>L0;zl2!1>Yy^q4*g?YCH={f4`|_)H&(a_0j~ z)V;xr^|~A%Q4S#YJMB{5wSncdb0~Y~fC)QaW6aY-Q03-?^>q?(9`uN2`5sC;`JHEW|ma@0UL8>CIS|5Q{gkU`P1Tx?o&2^AT+sNqRYs*5ck zQ;mQPwbfYfaTD@fR`VVITA=f@Qgmw2!E+e)Di)kh z84I#3A2jQ*!1f-5k|7^3C&UhVh3g85aqhQzmZF2fL4Q5Jl&LYTY2-GN=fieD5PDw(gS*^COOB{#&|- zC?(=$iy&*(9yqx(1MDS7vDvZ>kIgp~1|05>&UwMmYGuW1%ni#Q7)syWYZcJkIGy~b zl+#&xTiNya8gZhEnf~UN5a2ceBfNvqwnqb8#kG>4<_}w@-gRG&ADnY>uAO7FF!$k=Qnak$l0ga=MnOHk!ytBj~68=z5bWX(iR*7>&A5S zdt3)^nhZJPf>elcc7~2$8TfL_AH+hG=llNo6Wl-k3AqnwMxCjNW~apIAY2@?J!bzd3_XM1H4s547Z_qP_X?w9~| z8*5-pOe}sHYa+}kGUQ6A5862J5}&xd57u0-qvxQOBqb&j0mq|abCfdK!V!q5a;Sg3vi zs&`9h-%T^cxgk(pp)05kK0~v^$EhEZgHdwIkrBI1#kXOE-(;A0iJant&SO(#7J1Ba z$#+zM6_;zFRYJ3vw>A7D>m;x^nT-FuqUOakhhq>$aPK7MCKxJ}nEa z#}}ZN{W8{JzZloAGZZw&TQJ$5Gg0a+XYK}wyJq#`jAzG#;n^k_X#WdkWA-@~>l$-9 zLn<+HPbOBRHn5}1sZP{kCDjK3kfqwen*>$-7Ivu z5q5ko=NmRW!5r%g@N!fb$PZ{?;}w9mg%cszqXn}3UckbA36K^?`{Yr_Sw!bOn4wGj zoTMLY2yvLzFH$j07E3!~C5A|@Lcx?=n%``PnDwSOy> z&c;O1O7QO(kG@meV1I7|VQgkEZcJT1WR!kJuQzpIc3>p*OxEF2=9Xf(qoJT6FS&mo zbEue>f{$+LaBB`fg6#4>!r0^s(5ODmR}|D@@YqG*{$vF3i!`~!E7W}{TEL5|dvRuK z^@NHK7cldPi1UB(8N)aSEc|+uIh571FyRAhIbQ=(V>@u&`VXQV_2Zr}tjGug{a=T1 zidix3_a1;EO;6dC-hv*B1`$7|A2{7P2rhTqA^D^^+BOjbEnAF=&lbv^?{tKoO=ls` z>a*nXKk3-EHvziM%TW5;EIxkaVnSfkteoAY-uZY*kW(l$w1%UY3zf*-p0KWaZ*WFH zC+3bi0}jROnAqsLQcYfx%FS`8HGrIdPJ3W~OC4wo@5K*0nG3mL_tEdP9EU0=g4fnL zETjB3HixP}_A5bBB>xX9#)dP6e>#sU5j4h;PwV147_%@Q&F`CVcrF5PZ$9x8yivOS zuB6)859FOE(D&?D);(TNDEM^}WkcGOeKtP;wY(pS{;QQF4wbVP?=?8D(>qx4l8aDT z#Di;~AMg8R6E=$aqD;tRo9BEazF{d-7}&Ck4Tn%=*PC0jk>)sVGtinG^Ooj$%&IL0 zn`2WUEB+U{rN*MxodXd1DizM?X$dYdLqMZn5`THHwxENV=&Ew!vkyu#Nj z$3jdw|12Xx^?ouN1bsLg?ilq>X$NtI7=zdT19i<6WzLWh*!H^#w9Wqrx$|SOJnJWh z;d?gRLPMCdN0S>lPg5|PUcuI+-ovO%dg%R}9LEbDf!b!bL{)Cid>e@AE*zX6lJwFX4tp;4xfWa_eo+iNS$*B16rZVf? z85E}+RBD79K)~Q+@Hk1?gdIj)+Ja~-J1f9f%293(c}kfg>dT~`=L4ohq1y8*lio34 zsUJUK#+T8c4&I>**ro@X#DC7#T84gA&p>>tmbv8VkaJ}=FM9NMeox^;P%jw@VJUgc zK9cfFeGG&n%to)Q9Y_!f`7(?ENdq|rkqM(NdP$f zY{cq*w4c1jW9PgsR`;z5?ZPu4SOzF!Ht2HwBvvevLi+7K0uI(i&m%P`-qBxDfAVkY zx+C~i$AHA2I;pvt%*Ub}quqOPF?O>c=EoNB+F;1%U2TC*e>GdSNW`TPv&MCdB@Di8 zD0tCXFW$a`uN+$m_DUD#=;UvDlu{WQA~MHxBA+bpLe1Gfdgz?gPV^o^xi zRQOds%zr6*oJ|6Sok&nncF<|lY1TMn02F_@i8d!aP^z~}xn}Eigg>W(*0j?Q*Xuiu zeIvu*o$DaU`V=^?eggp-eZgD*9p&a8pjT!;=H+sq51Zi1`w!9)ZjyUEF~Jhcve_+RxVoT>Lo%pD;TUef%{|IAW|3f~A3yKaKX z!#-U1_M=#Noc6aV*=RG;2`vwvgT!HttXXY{vglXL%5Wb9Xx{@O`eI29v9>%T(9A;z z)J5ai6{m~9Xa`-}Whi`J{t?uxSLMf)1>=_K27-P4MJzl{8RP%8@(%GEkkZr;{av5) zw&gK!nx^2^z*-O=KC6}Vq`&unTggV!wb^3;q0i^i8QCasq8C{Ot{ibLqnH3 zxvbh4luy3IIxGGIv$mS=v#uwLvI%^WP zCauHZ`L(DFpv-E>XC_hi=Im8P%x}aPC^6^*wzqC!Oi(O{uKmOE4sQi7AIj2{|AU){ zJ;u-X3ZZ6X4QxC07}Z)|B|5JYAxr)P%|35sF$0!@t@$0?zD$?%`${g>yuVnnMGiT= zG_kq+CA@so8-^XqLR*t!7|2~81TwB_UlSJKsD#O0mIlKp~jvU31nZ{h`I1^}9=wq3v6Iv^GqU~RS z;GFfH{~*!kinm^ZjkN|sH|3maOn8jnHV|Wyg0P@ZH}S+DVcx1BXjK{gA2$cp`xo*t zMVefz&LEck<~DDy9EYPeeutBvk}x}{g+-i^5pzwO^RpWOQFgOoRZkXf?sF)J(|hZ@Y1eJ64h+#uKo5FTzghZs%{&g*Rp76o2R*4vn0Xm8;FS=0G0hx>9W#?v(%Xb6%+6BmYZl^`O%2$c8bM?|I+OL{XJ$m{sFjW z{7vWNGT8kG@%GG1949(eLSgtGK4Xv+ZC?B0d08LMt}z~mRy#vdXcUf#+=mr5-%NmvdSn!0{dT}f`j_!SLOoY~jgF!cvdPD}XSoTt%vrz`ao-;J>tGLa^Zb${Y zw{LOiicDPd6rx=TI{B z3&d>J;(Pj{OvgF6%k)TYCm7O0ywPD@gL_v!3AZ@(nZ|TVg=KNbvgNN1j@V z#4@%6JGXp>yqT|fIbjh zqsWG~Pk14F5}@iulTpse4QA*Q-$E z?2Yin1yZjm$SbfCY66;YfAV*Tx^xW8vw7-dko$xjN;UU-3-Yxc_=lD;XI8(S9XXi} z4r{!i=;k9Po3jgvgn-VfC6J@5i>98bP&T3zsF$Q1d72pZMzo)z4AQucd)T3IgST>O zMoUWpL&o33*;A4sdR!{H9o>Nb&CZ~@UI3cjdqLw{s#3k!f;DX}!-Gq7Ir~|uY=9+k zLPou&yN)5aF0E7g?K%c+u5&PK-8$auKoHn3$zWj{O8Kfk#$%%YL0I{(mhiBzNSHpF zJVL>x5U=22d9jF7)hXDx6ACaNnMCf7IDFV`D)`-g2=cyqT=xzgEbV_0EXz`OtG&k| zFY*PteqYThPqc#~L(Uo(Ci9l9uOK`94vfjYgfX?mqIviT8Z)|ish%T_%cS|F?lqXw zQiO81f6-=31@j%e2cs?)j{1TdGiz(n`;XZNbeVpy@ckv9Pm&qAqUY|NUG5jybc9G)0jTOm+ou=!NL`qG)b^{kE%5K{~>_1PGp_Yba~L++Y0fxO?5 zXlOt23@mknz{hVAx_B%l?^-k}3O%u7+ENU=Uk<@J#&lo&O`<6Momi}g_|U|@eAeL~ z;B9RrcsKvTqKfV03~B&3b90CXqm;+*Xo zoO|eAa)+osrfD=o5bNXW@Lxr@5@6aHIN$Oyx zz9{BvLJoo1_YTTAb+a+S@4xWO;|PCm(C1p5O}OTrwV)B%hnX!{$?M0ec=N*B@U_L5D_T7b%(mR&;}gt@@pKGI zF3ce}!UjAtU4}k70mb0v5UInwQ=q<#$ z(b?A4jTJvd=tu5^n0Lpp^i34tK5edQ*9`o0Yd=ou>;Z9I0t*O!20{NkMXx3W1gEBf z>&z6kDQq9w9yAuba$MMmqclr+MZU}PPf<+V%7spSggN*6a4FH`cF{G##@X{wwfrDX zq8-S@%TXvl@c`BR{!%JRuSmwsd4qet=?lTE7;HziLDC=EoWp`nUUm5;!sTl8bJydH z?}WpwO-91F#nh3K?!@lHv%&p-6qBwp6L8%^lR6^RA{lcQv9He#mVf|$R9G2&|>pspTg5fBTq zQEQbOZF>nC33}kQf_4VUL!tP14Eb*lp?L3KsK^?|dzDx+|A+5kZ1Q<@?N!O#KhRn1 zPh6D9LUQ3~3+kjt%BWx;RL=VZJ&!*lSn8p>W+kuQ*+u7oiuv?=g4SI@xZ&V0nh9Ou zi$-+uy35{zy`~0pz41FA>@yX8cdiBN(Kc8zEd*q~*TMhI-yp5;go5uG;5=zLcyWCo zFTanVbvOw>&wmL~Q_o<0k_AeOym)IxZ>aduhGloDhjgKf#T}^z3q9g)d?Qy>+lK&n9-j(d0o{aTx`({(k}?{Y)c&{a@= zgm#TT%h7W|J+w9tgDF+tz(O~l`l34A^ZojQM&TKUu4lO*bzX;U%lo2#tOH1YoXBgK zQxAK$8VF{&@A2+e1EJou3dhXS=7J?tF=q&|{daGtdEac1ZE$gHvQ0;6^lzXTE@v_u z3(%r$VA(efToZZ`8|;!`PdUxQo;1Umi{z+Fj!`aDUk3AUuPDRw7l;~Gz~aic@OIQ& zOq`%1FX=fbic?~kWGT4z?IY-4%w^#-DKD}qllB4aY^dc*%${ZcRZ%*sgYV;f8hJm$OLfe=V7$|8+nPNGyPmZy(KZqGymkevH z$j7*6Fm{Y)AX4Q>68~L{WuwzDZ|!dW<(g9{D~*Nvqc$<-(!Y<@ve6SAv{MW|>?SWZ z=)s%|-4J?iAlkp(%PU&wuGwp?vLv+z<^8jH%g>p-w8W8LJYzLfEz=PaE8DR9uO%Qm z=tIu)3;3fiIkpVwJ~H+`@3VX?W}6YK|M_iJUz!L0Qcb8?$HPF6U8ss2#!4bgxz^*8 z`PR!r!0cf#%lcbi2-9feYwwzHs<{C;L}E;N)oxS=^_3V8oQgiRMQ8{l5~je|-mc)59n=oI~u=<2ZY2GPaDP{hRAv-s4vdHv8W| zvAde>pWg$4o<8LJ6$zeAH22VE9Z(&X{aU#N(GuL}R&_OQcCk{J<(j(P;w`K-bROG=cMh>9BJ50K5 zA9kIOfwi}~Am{HfXm{fn$Syo*Gp=h1Wt8_=0YkvX#F?@T=P3)b3;H~!TvdKFuMWw> zh}YD=pZ66ehEXrTHkOrj9f$fOW3i;84gOuHCxks!vfQ(;$-CW*seb3d^-C=8I>!h+ zj?Ex9J7p1XEyA>`Cve5nIF$c(kky^1UHwf*EUuwUbb1cWJGHo%gH18&xUtY0I0-7B zCZYKna=qM6hlCmHA$Za_H2%IDk5!QysZ5{wMP)+0c@0L*pAK&8@=*52NSv2`85ag7 zfv~(+rK##O4cq2&ICQ&Bo7B=br;O;zDA6=8s`2j z4P4IZa-wN|%0{O}Xj7BG?C&ptZHMndi7XUKK0imlN59c?Rt6s87~I@=4JG|}#08aT z#!unXJ4^(1>n^@=dpkIh4gL%Ze>UYx2SbX7*fTnox7 z{u1$F!{%T|qrPA_M4PJ_Rt+he9^;)Ox6sES7}VjXSp2L<^z46z$>biN=ta9BeY!_f zY(ni_)T!CM0kY)`UEOujsgh<|os*c?qqTUO*W;$d(fy!>Is*dh8C{D1mktFs@I zPw&mkGB2S2Kkw0RKJ^9I_(po8_w`8B_%)nGIQ4?CL(5sm zjc~s8_yTNvb&PMWSPts1Ym{oJmb8Y9qEp<5i>$9Ns z>mlAu?F^ph1#3Zmp|#6>2bPXC+GxZopO6`Ke>1<$d|Iu^_=Z-NK? z4DH#KpvxX%Dx7b=mOP9o$=3c=rts z7@~yZSE#r7p9_1NY$Qlm%9YL&uJ8dqdHiO`M&Gx~p{6nxto!++xn2)gXGvhx_E(_3 zx>u4tB#K#H{vWDpvRLCUn%l;BLPoBWf!U>UO9*P*4$2f`F}aeiEqG5b`GjW5RZS@EzEK# zfzYrt%x(!pGqXxq)wmBEsxE9OADP()Vp9Zj0%_O;68YfWb4~ z_48>Mx|A4S(<7LC!(1%!{sjX+1*6Sg1#cg1f@XB5jIcBmB1V`BYRy?p^{|TXUiy<* zZna9g6?fo)y*7u=7r>!&8Ebs}7~QHTP*-9l(>ZU-$&0@*bnr#-^Y1V+wKr!Ml!jhy z3()n79+zlM9AKY5T(#qWwDYFk_43vp1NEk_pk~osP`M1}7eCS9!d-Ge|ELuN8%!b3?>sOpqtD69lVE-HJZhafg0?UJ z1O2~3`H3lKp`@@1!t@;Y>Pw%X(fJDBs7qPy0)YjQuTrwC4(v}%2j^3r%xgs`6N@_~ z4qEG=NzX{oPo#H!>(G2vpBP9q3uj`jZoX{6A&k5lPwe9BXfZJv+E?6w?v(eal}n5( zdfuKkHRKAL(y?gxH@^Mn4Hz6q?@DP}T#WS|{7UnDx3Jsj?oM2b+ulm5b5P&mA?D61 z2X$IsTrB-UE{P?S*?Gvzw(pf}=o1emN@6FzFoNuYCX88C3XKiKH>-~auP555F;0a3 z(pB_~e}{Jm)!~?gkMQz6eV4^$lsPlx{O{cY+n6_~KL9Xs^?sb25QcU^skrdUQCR2` z0j_6SLA0ls*F2m?Z0IOn>vallY&H?>A3LBwiIJnE0x=aB`i?LJQM(tLJ*fakc3cPV zls)wBaT-EqQ@{207RXKdi52I3h&i2wT@$`gR(3u0?fz4WPwVoZ*WJf%+w0_lRKoTz zl&M@;z_b>xMyyGI;-W~j?=EEXwo&hPNd&!XY70|d7z#t`Qfc=&0eyFMLx}%^p~PmFFQir7yG^bz(~9 zMdD$~Sru{OWtVgFbAHIMcuga=-Kz!HQ7M45uXnAQ!`q9CnK%C()L|5|KrZvV{KrfLq#7Q69_El(FEaz6yB*KPp`-J(%2N zCxo0l3_%AD0xaEv`X#CSqla=(k8F{&)_h_o4ZcI0vVwXJ*jsnJuSn?k~p*`W31;i4Yz;FKcmH6M&;m*>tSkyiS zSHum*p;x`ZIis3w8`Fu?M}|R35$&G)wSmgq7Tb6q@IB}T@#b?eL;n%B6g`IErwQ2h zLIdQh{^3hLO-1#36W;9Kv3$o};;R0WN%!(S81g6*TE|4;L@V-QjOt>p3y3>tIt1M# z-645nAHg@r7R2A5qwSe7=vH)wTvA`5H6@054&Q_Jl541G6ACTg5sLOdgpm3SI>Rf; zDRm4Ro+V)S0EEburoso>-B%zoY(M=4m8UXL?E436nF{(=OUQj$#)7y0hZ>ZXkbe?`TBjFpJLeE2t{+PM zr7P&NX%bZX{{-`xdr)>|7j*O-z?xHKIP{(byTV&QGwU;`opr$2&K4BkY|%`mi*3UX zfvYux^@sF?QrmlYEr6cUXMSJ}ao4&}lz@1al24}h>AU&A&}x$$Et_BP4PNhPE_nuS zQf64TXOOb=z#){vpJ+GsFq+5d3oCtyNq>pv>$UWLerl}5cC8&oB-lXS{GXB{>Ya6e zrQR-O>K~RH38H0}$*0{9$`>n8-1bAFQXFU2zwt0EvJ+J{5%5;$Ejm&E&_>e{8g<>F z6_t?>EJuv}3t_IndgOUM(`S`m@A0cE_AzCa=C$4fA)|b<} zNdL_&`&TXR?tv^U)tPBc@WzOKZkY5c1tL_fs93#UQuxVx4)ZY7<_v8cv0U>jSV}Ymbe#jXA^JkRt1;@&TLR-Yz6IZDRV+z;9ldZn1igL$ z*;adSVivJiihf7)KhA?$jR{wH<~YQS%EJV$Ao7Rl@KM)if{uy@?M=pB&^Cmh~F+4anh=pO5cPE zR^lpv!^c2AV$MbktmptOa{*ipo+?ELyO{WJm@+hQFCTC%j!$si2}wc3eT5)$KYoIw z9%2Q4S4$M5&A`Q@3-zb`;P3u?4ra08ywA6HuqO8|`o7A9j)BIU*ea6gkC_NLH=csO z|8LNywVb+tWqeB20uZJ0^t_TO3%0a?&$yS6c=-)lj_wcEtDk^u=07M}wt`6`-b<{- zvC#N@H>{xjRK_oJkUy`+?8R1SQzu8Y{zSf-^8)pPiBKD$$3>62LG#ox*i21rypA7ea5x=Ef_o}4>VS! z=eKm6$9?U@a-DpNH+)XLdy@ODx~I)m?zsaI8;BEjxi$@#CO#h@$6o}XEO>Ld$lE=94| z1!4!xkQC2XkQe#}m|cI*cMlo@`|VrN>By+6D{n(_M=XxmQ(ApeYhfQoD$BaWup|y>qBq<{$ zp)f^~LztTTdMHUGiKJvil1RoODM{Yz{dfPc(#-SR_jP^0pAQx%*Wt1&KcM9KO-wwH zgn8S(a&2ZM;IMo)XixowH{4Hf(RxC#O)2!3$zB$_{4KH&V0f8f^Wi-hlkO?idp0v8c-7VTbA{@3DnHX%cw&uKHpynWl1RwZIqPcvan?gW&) z&32PnN!&`4d!eggJIXh0(aOd^ZP@r=_}MkJVC!f zR;>HDfe`k=88g6$mzqfp}p*NH&~; z;OQSRG)K%U%;T7W&t1@t@JHpdCTvQ|#L}^QAop1<3_W1T$3NK*3tyTGb5?1vyGjG1 z)tA&RW3F;BPbQ()`E}^G&;mm$-JxWK1+Vju9jdbmG2Ll8YF~Zi5(8=ev}c9d>yNe2 z_s4h2UbJypO^b<9*ofT``!K#oI4W}&qm$|eltu@ELf4F2V`z$--=w1Im*3pV`%Z;6 z>Oa{JuY%$&VQ9N<2~Ixr5f%A=G7;r;;4s3*k=M}nZVA_}6U_x)tYMONLv`@^5MpVG);&K$+@xiA?bTP5Z}`c+YpEMQwnVDw^Q&8F?|qyyyONa! z9|9}u#bEWe`=#jCo}=-L5D9W)$^ABuQoX%&kI`hk+v_fUNPCfsxs@fF{rv3U4M*m2rjUcON9W{lFGFW9j&3xrPN-;D%vl0^FPpg}RTq*n7t@H{>=14%w%YurWq_}H(U{N;6aWyoo; z9~X&!SFJE>Q5AIlx)VhALQ!ipoHdWA1gUWh%JNoo>fcTIoZE41@_Gv)@BTW@Lvo+l z#k_*n8BQ4X*AxsVZ$m{&GNhV+LB&=>-uhSuIQcF@k?t3%GQu1DURQDQ>YorN8}qg4WXP)8Zlvn?nVAIHMySDC_~N-9?hO#V*=XbrE`D-8Uv50~fo?P#L2m{5I5pcfIor9Ss3> zxMXxnZU*VTTuj(bp6P~Snicovy8pQgj-PTkpQp93#PJri9D9z9)*?Z==N@=wS_sM6 zi_y{NA%?BWgU%JVq0F%qG;1d^$E$BxrFk`m=clpKyko>oS^+~Jk!wBcDM*|uuwA*A zkXVN*gJ0B&ZU^?~<(@*zl627de2ThR#t40nV(t$W#CCLJ)$hb8dNUQXRo94p z^qkB2J%`R8d)0nMi&>DF3T>^rv3r{dud|IZtC3>#-nbEphrLJN|7k&S{iPaB+;Kv? zDId~06Qzb zw)XoS&_usf>;6LQk3GIP@aa=%r;LhM#BKDF+d%Q3CYTg`9o7#hq^q2HcR%!ww(h3^zD&1L|$YIxkN zI1bgP%Fu6(iVK@|2e!#Sp!VH*);y2)eJ}fPb&6uhp5Kg`UtdT|zaHgW?x`TlOcyuB z>+@y*J-}%L$dP9F4JuC+Fo#|8g#J@a+(yB3Bt{&}phwVUdcO+$I-*BF$ zrhH1>GxW0!2YApQYZt~r>E9*L-E|*T(a|WgOymrH_QpBO%P{?4U$lB+DVXgQ^F~jV zSQkpn&*!heb=P8c&F((r)Rn`-#rk||_-AO|cp2Jlvp|$b@9p+-^s6Ti#EDrXidklOhX@*381^6B-gW4RX7Ka9b+MNc4bJvjp_2I7mD zePA5kjgJ4Ovi|i^=rjBRTfy}dY`@KiQrUIx;v$;JFyZJH#M%M^lbLX^MVp~8-1Kka$P@&2j21$l=hUB zE9@bBbdN^gC2ufzlS~}X4bsX>zKGP7PKE2trZgK;AJ>4U_ZFHX+JHiAiIr3T zLAlctF7ZSI8mUu2<-)i*(oeYN%670i;=l?+ZlYtONbq~_z~$P^pfiXi1P*@6!dL8L zet%74>UaZT&GGY)Yb1j_+7uM&?YJrW8rr=^?Ffe@Xvr(DE#YK)|8jHPzJg0P_eYn$2CUit zF0M*x1NVXPT+-obDEU5yHNSZWOP1welCw2RhMwe3-z64)-W5#!xdT-mGo*f-eVNzq zLEJ#<&;!yKXcwl26Ve?oT* zT~0LVFX=KBW%!czVC#Qs(B2)+t^CbUKwUlP>hOZ9V>Ao99jJ~g+lfVoX%6Xmlr;xF zqK-om@e)2m(ygob;M--)8}Xh~KD~jS&bz^!*W*L~*hJaYY1~G68_I2$!`S-gI5@5^ zox9H=w$CSj!e#K9Y9v(zaa>UCM;P+e1p=5Krr1&zPNHU-jZs`@k$|tpnF($2=kdzm zyAW6!N}csxP$xN04Dozvn8jS|8M7Z2m|X(#nHqA=ySmEHR&tYXQa*Y23{LiaKCz~j zO68OxsG_-&)y)a8t%Ud-I?J*3Whf+^$bs1EPf-7pdM8_nWt_Vj>~Cj+kz5U4vz9^e zP!aFiRtIaAN28AM5UC`07|tuciEpkY6wxR-+S9=JJG~>OPtHG8H)R(DV z4=zXhfD8K{=cT<0XqJfnW6z*f!4t^oSHp5+$nmC;bMD`EvbIPSwm%$=cJU&?_2qN4 zt5bnv_A_St-+l~QX)IKGj0U50nuA_8mDam%2N8AMTx`8r?Dceb;9h_+?qC1k^^6vA z#o+xZ5_9=|5Y)2^3a30r=Z}|gdiygR{J;tei#j=r&t~B2TF*}U)IxL4DqQ&8golJ% zu;g?+4%vd}KPDDqGP~Kpjl_1kv=KuC1EKBZOBg(NueLn3+gDUcqr=+tXKrZWftFg3c35#8J7UGGW4r0pr zUN(WrRzE@Op2rns?j}}6H&!w;UgdR&^_{jJJ}u5g$KXAjhVE6@)?S8fPsD;xb~1}4 zck|Ejy@ZB}i?}x5gxAJpFqKf6-+1{FsNVmIjWL!mXZB6B3wsKh*sD^>f8;%#xCV-S zo4_lKdeG}`qNb`p+U`i?$`;Xg=1@57;My>SGL3TYuc(zxV3o`RHZE-kZixw>p7{b7 z^i2e-qBopr4-09b^x>c7E)d%3=GnKgNu=$A?vRR&|R_z%YNwdvh_L; zaJ3QD< zNZWXL6HkuMqOF)nSw)3uIg>oiWfHT0L0d2z{cf$H?}HUOhP~pvj$Bm7g&qWZ5xLqu zdkEFfM?vl6ZMq);OLM#s032EEEfYnR_9^pSn&BQ&=74U{afI za1ovdsH>NUxrZ$v`KFPuX8Bi)n3IDZX+Mx7cl4U!eco!QMdxooTp8u!hdjVM%WV+pG#{(8i4n7~ zmTg;i0h=A0z~f>gSNDoC1FwHYi+iVW<-%Ch>3d3Q&=~@f3n^;9e+{92$S5#6mjh;_ z8d1v%IfIX#n053rhV`^Yi#dy+dn4t^bt=IsG=~Mwd(9PFU&HA>P2hC>A80@9jccT0 z%n7T+NPZzCn(jpTR609)TME;E?IGyA661qAD$IK<1KW59n5iEHE3ftx(tbS!+P(K! z+nYyVzf_-hY;*_DOT?Op>5Kn{>+>D=yTNEE2BP`IsUzRHUxqV14`W#yeTRJ7 z27&i@z{&}Fd|1!DU}HQT4EoUQ-CoO54idvc5{QjYQ(4D{*DUCI3SP}KPe)l_UL~ zPp#wTttJM&n-6LsLdtdH~2Oa zU7QV)2{lsjC59=c49+{ zjhPK9!N|N|2==XlE{C6#0qe{kUebaK^E$ytO?|(#F0|6dGb_D2XiHg4Xwn14*C+_E z&BrB3KE4_5D9KPz)*u?@?==Xaj=pfUL(&^|*dCLrj{EjthIx&+f}4R}m&ppUEqJ{yV%INz4Ifp;f`Sz>k00x>tJf)TPcp?2k0>Y}XMlZ( z?Xr19H?%K0%#Lk5hdJ-Bay#~x!mmlY4WLLT*K=J$3!cjcuLO&=Hw-gky!mc1??)|L$sKdC3{ zug@#*%)o7H55X0a7+mhA!&`m0z=`jyKv&Ba=4Ft|Lh{VZ0NXys037E>v-o{58LmWN@E=^a?`{vl{@*m0viT?E_DoebB_1grNixZ0bS z0rgGsSpN*LcFjQPe0?FYKlxDFer2Wmm%@I*P#D#e1nE}maTz_sHuoXEjUTnQ1unsWYpz$FK4hZ3la~X#FM5EFx#dMTHCass;mZaY7^7OE@d8RG?VI* zL+L;dFv>P1*X9;%%gM#sNk;*a=&m1RgORD9D4Wp9BwsqPVNp0#gvNo_(93x~OKNd= z)O)&Ls5y^*-#GUoQ}RvkXQ4A2n8%@K;8~gsK2xdBnf@%TcLaK1U; zvXuBKHqBtI^Mu}mN1;zC&3UrxXvRDmB3}*x+qu7Dz<%N({cge+ca>nv=`>tA-jtWC zTF`@@0ono0OggL zb^Fm9Hw%xUG2#d3_~RPHt@(mUuXm%(m#GkyDhJP++mMz2D|Xu@qMdaEX3fpT&Wrhk zGVsL6Kgq}5;}5CM_}#2^-%Mzaq*>3^VJP3ejl3h*n0!ePj2hj9F6Kk9wZE8$_8nLd z9?Qfu`xaAvI>%`PNKQP)Ks(CV_H{w+st!)}t)BT^%-|wj^ia05THX551ozsB1h1b_ zoTEb*SDdjQi>cpM8kEG5&*dWgzW5kvAFN>(bRr7^A8=#+r- z{Y-=LyLN^9bdCtpjHVJ)v=aG^>B-14Bk`qCf8)sA6{EG3%?4H6;c-txn^> z&lkWewvT$K1v$>L{$rariuvFTIbbv}7JVoqCF?+Gc$yJ!d%zGbwthgXD~6Q)o`7-q z2P|206I`5(u-TY0`2UKy(hpCWSIu1JTy&M(OYxvno60EuLAgC5a_LVH0m zQ)Ql(rtXa76t}avU>gbCe$)-6!KTFN8i#(#)5LJE#hQIp=<-TXgPnHk4`+GpJ)dPe9H*s{7 zJ})ZV&gH$*AVe&oE*`n0!s(s#nzB{Ig7=a%VkKK?=$c=sjd=x1Pf}*{eQ#@Y7c6c7aAH)W^J)Sb4x4K}$_-YWfYB?vmGZ}Rg z^U#=bpHjw(J;>2HhI&Bzd^zdvCivw8z551TVm98>s57sI-xlc!Ir2lyOZ%GHD=YCx zA5-4v-wDK@--~soIcVik#6^CLhk-ek{Ot9qP#ifBQopsK)$N{w#HLx=U7mp#!ifDw z%#px_f70#%F}3&~>`puYBGpXxK-~m^)Q!J<)spv0S&Qux4?^jcD4b{ijaY=bYR40Y zP*%HxGwb^b#M$N4!R>=MpeIP_TD_$92`VafLaL@5D(JpZ{8u+r7JOxj+n4eK?-~m> zrw3tY_FE|Ro6Ra0Q^#%84^G_gCg=2I3>2>S2B$OD#6i1(Nh9;nD&ri@YX$uAS&z>; zy$DilK7c{205RiFas31LVad^ZDC==nU36EE_q~{m6=8=^y08|8M9!c&7d>0zx8s^& zZ;8cDK8-~SU~wBU2J6ld2k^=_EdjyiM5yWpf%ysYI5B)dSpeFTi zJZ5zPLO*?DWlugJj;q6C8+G`&KLhBDO|JO)Yls{55dAtQvR22T2sOLGOSTE@es-W@ z^DFf3X~H`_84F!Uc46xZHP#p8qQg1|6dm%GnyDXv&QJ0}c$x^s#Y!Bk&49+k$GP-; zfYc#@Fu5TbZ1pXm@tK(*+ml2Nsk5#&IT@HzMm*x!eysa$6FmnFSmki)dCqx@e#@G$ zn%9HKJj9D@OTnu0AM~zvfF0XvQMSoKDBXRN6`zfy?{aTUC@TP+0lQeGnq~^l(;>m@ z0w~T^V8`e#$SF_8koWFbX#Jf@)<1&2f3~5oeFSl>wz{?7`2(A#_7o-zX+_z}{pyZ8 zw6~vK&uLEV1b0y^0AB`jzg#AgcyP9+`5-D<2BqhVxav+b-f?0T_h7vm{UQ&rkRb{T ze{9J6#{tZ8^sl{Ar5NgJ|yXf>i#AzaHv1zY~5V_(Gp1%77k2R_= z)=3Sbc!o{3$DrBn6Dmb?FTY&_?>8IpvN6-qrEL@!H=sAle$JNW6?Sm7tG}u*MwKyi^#X@smoVdZK7R82infmGQSocFz*E_XJ$bA`dpNU-vyb@ zJ)E$8A1)d56q-Mmf+Q!E{VFr$C82B-{ESh z9`CjFCFiv{fyu5BcRlVKScWr@JbB1@Tbl@R(i5PNI;vxh6JZrOJ-j4G!7n}rea64U z_Q?IHus7lQ>uIsML=SboAK*0K4>8$ax1`SAS+H^RCrAh)cfiR|u46?P&EqZLp9iLV zgPks4=6M3@g0B(V-WGxv>4VsI2N=9uL!JR*de4=^MsYPrDfi-~RI4Qwe=?7s8;IkP zjZ6M6z{-VNFjM{n!ex}>@HA)QrDnXnSvC&&M0Yxz+%xma=>C$vDU+Op?HplJ+ z3!1sK6#szqg=4_tJe?U*sDHez6dH7h^Z2L=Ql~|;_T_h2%(=%Hb3cQtQthQo;x)B$ zAY~~J`~ty|>%q%}vd6afXK-mj{;2=e8NWXM;H9p982TYR+u`I?22q6*0}- z3mEvUh4poyJatMm=eaZltX`N?7Hkggc=!PJp6tOFpZH0Pw0iQ+C2(aEUxAft4d&!N$Cl zX(4CjYnXHJEcKykvC+Mo8D!U^%e87&ME&kwYehnN9}_4ZsUujer% ze8IDx;6uH(u34i%dC3uE>vpmv&z?e8fxZxYEe=cTY+<3S4wUi}Ok7?8p34Sf>g6Tq z1;2+NI{Qa{dV-oU2h~vrV#u{=E`;nl z$W@R-&vo-+Fx+m=@9vJ(2u$(5Ee%<}Sw%*L}I8~qG*E~kJ~{eFy}AAyeh-mnP+ z6QF)@2{HNCz(A{q#QA9CTw<={5YsktSer3edI+S?9upIH9Jeau3;wNR&O3+PK~+Tx z8*BRnE1mP1DyR@sM^}Po376kSF3d)6>NBZcLg4TA*#4C|NGZBP;@BF9(zWDc7EMK~ zFH4y!x)0nwMmhL@4ApuK7hoH8xU*{>quQYi^0u#Kk#Uh28Gj5KeJ4UC_3K*pEAV8G zUV`d|5sTs;VNwvel?DevNz3HgJGv({3+ zE&*h-Pe@C~9D)T^ld$>RQBYh$@cDNkly*O0^06OrWRW>1M*rFg1E{U!=wAZ;)#x7G_uNN6o=}w&5Sjtng8s z_PQR{%Ri&fYJH*loCB<+-AdpLb8IE6S**{?CQNW&JK-3>dQ_uwO!w?Nu2S>#Xp4q-<*C@sxJuhlukD5CM9{SE3nzK7;p z>%l#9KZG<{qQx=VrR%2RJUS;dnx0_F(eaeW9>-Y;r&*l%KXfQK4BlZMs5@YQLA7Ub z<{uZpXViFTDyTsbzlvGt2N6H{D?||!&g1Z17NwH{F?xoOO1s+!*V|C@a|h&=eT5k9 zQO^2CIwlm4zObH|G0gHmb}jX1g?I1 zK3W@JqVvNwuAuce*SWA7WY2HsH^;bB?)okiH=jg_Z2()zCt?3h#(a>8lDNF(EOl%k z&g}ITv%lX$)d}Kos@yo+f=oKcUxt?d$}l**4#vj51JU5^s4$P_=KWC$mEH=j^OtB? zw)7V2)3eqoJDZ;U)~GkP2pr;bq4~Q21}4+cgH3{E+b?4i4L{_1x4~w+87MnHqGv(~ ztm@Dey2k&4${Tf@)bSp%sM4|U{vB@GKr=pU(p(H%;{hSgbO)n5xH8z1R|O7GqjWrO zF4zxOwra5UatDm6?*g4ua<^22OArxi!MFdC#06x($I9n_VakFxw3ldMRrVe8@xtODJ=JM8z|+1=6kO5NS~z*&I6u0*C#nlh`0Q=shD z6V&=x@Oj&b5mQ!*&i1)zQ*Vm8pzxw%vPx zrK_Wuf2lEeU;lVi8 z`M)O^a&7@8eRsg@2j9VJt|9qQ#2A=Shqi-1f~M~d3|21#)$qAoi{~-oy!o?0?OE2i zZ5^@OuBx-P41jcE;kqhbJ|jVVJ%P*I zdl&uu25~d5yn@vKjf7G0xu{C=;taCBK)}(*5IeO6m0O>4BEC>feV%(* zWXv}x^#tWLH}sMOVZ!U5m?EymN?8qOw(lTHW>rY%aV;=>XCszx+y|1}0CoMFMhx5) z#l=49#MeQz=NYsC6ajs3;E@ZkUeM#0#J+;AWBnk3`v{58zQ8hL>T(meLSpL&qB-|j z@Xy^CI>? zWlhXxF*#!|#o?Ae%=ppCVqsHX`V6@TqU>u9{!(Girv)3~#XGs6nHQlRoRxx&(`s4u zCh~hLJ+Z!(`WT7NxwKJ6d}PuE5H-JKm6b!#a^riLqk9hw8vn+ce~72Bq9adn^%SQy z3&g9#h*|K%ADiTCh*%lG69X$-Bp_C8qvy*u=+~nFr zA8|UWBQ$r*WwP7Xq(vW^AYJb(Sjn$5uRp09G>i5#4IZfJBF_7y`_PgVfr^|L@IU%K zdxZr-+{FRJA92O-Vf$Fo@e-8eZA70tt(@l2S`6Qs263BAc+Hl7^V^&#M`GKHcW~JQ zYs{5UofnH%Py2Eyg z+d@ujL|)2iMp$`UggV)$xoZt6U^_XPHTG*J4t6WcieCpYQ^}{dZZmddE=93#Ka~Gl z&ir2M@RUsEWB*IXK;Pp`rkalz4J)CsR~s6ah=g5#_uvN_yuoO_E||DoEadoIgA`Xf zqi_Ah^=wIllKJ;hOV5sCbu`q!-2mQazk<>=omGBu1f^{_Y+guSr42)%)PZ{SU+h7A zE(`0zGce@CQIK^{V_ClGIMDqSpj!yGrkL~Ujl|KJ6p2R49=z+x4bW+yfT}#=4Hsm> zha-A?;NZc;(K6(RWQ@l!;>323PePfGE{YVjOk5I(xiJM883_3eV@vR%0Q!-yMdIa}9Xq6c5aNxeH#<=QbdH1^U?)V@Q=6 zCTzTibAGABh}-+%q=;c0v3p|71F`(mQXKYO#9Q5sXX*8q!Mejxa36h)<^EZWg@tFC zSUv@oOeg`5^fI*Hkb=cY10XTy2~^J}{<`cUh<|&CSpyPLV^|BpMls-8_5;IS64!o; zG4JtbFvu7D#I#~dz9>8y?fvD@`gt@;be~|~#IL9sHA3BT_X=_IhjY$dXRy`oA26Uf zw`M(Y#KxdBchL=WUmgv$2X4b;@6RY|y9knuE^uGo6Sbayk$XnXbo^tuD&Hp{+h>ky zVx{D@%1|>mMm_CE8OF9{qWhVnc!RiBt+FOiMY^#P>mpD+H{@Ob`-|#BFEPIcbsM13oJOlpYwlx z3IluIg_4o`P!%#reZl$!c((+gW>q`qFm5N5OB!(UCgRnh0xf7R7ZE|7u#9$Y`ZAH= z@)yIjj12JTcY&*)v;o3s@6Z{U0=3KZ1;2*n%%OM!%3ON0?8#SfqJ=3x(%F>%SxH&% zomJ3SU4=OjL$Dy?FyxH*%o;|XB4%cw)ZgkT%xp|R&BIJL&#a!KDd_zC8{YeYY%oY;qe=9fV3w~wQlRF4lN^&r!)WtDEnIon-#!6jNxNZDV4otdw(!j#?xJ%KgeSOr5K zEQIO>)R#SE%KJ6jVeP}0&~;-e^#fa=a{U`NdFesCtfwzn4Yoma-j9us-ZHh{G0Zhx z2T?PRgKV{~kZkh-Jw?4RvEwvl>i$3*sD_+P1$dd*sqMo?fwtR#Ip~@59oxtYHfA{5 z&U1!gCk^>EvqG`D-xY9nyMZzLO0hVw1SBiYsKr~eIUB#>Adzd--Mf1Tg=<%VOmlKvGf;N=6Nawh_D0_GmrpjBmz}zwcqJ znt0hKp2ErLub|EB1lV{Hi)Q;S6z^RCrHhtwGN0*KH$-3PzHk#1!(zDyrcKZ|nB2^b zmV7~xp5QpF2d^15Uz$T48mr>pF^qU2)3%!kGVN^kkHUbTn?W-@$0JOWw}58v>5ywq z@97r?T*`|^bR4cDM|`>2l2<&rBUOgga^cvQO;BWFh2aT+|1E(lm7h_M zdNQ`;ft80{XMT@tF}H^tb8>H@)08=oU~wL_8U7IXc{Maw48wqp4bX9~l{sEW!i{6| z(1M;z8VfnARP;v4)%l!G#ZKxtC9A8rJW$;!;dYFQLRFu~Oq&@APu2Xj$6f*q$Irr;DPo;4NA z{0q?in-yd+2gJm45Va~D#dJ3qoM9knlx9Nl^-+`;TLfjf9q{HfWgae!$GXF`N4QkZ zN;)#2b+QxmT(BP_4_neXa0|FL0=`gUWvt@ zeH4w!H|9fL3m2U^koNox>ReMm>9QMi|KA4P6IGB}B!#NzNNltI0TH#&&|>^==xGrI z$`=Rl?jc>FbjC-jVSZDG_vz0Kv2O!q_D)Xwx<9)eN4v*TbLMqfB(;riVutkjmL)Vu zX^n=s2Xc{eqBQ`0rqBl&P3cVjYto>tNUBmSCkJ`QLV!3GNpMLBNrz z_+aK2%0nu^YV9e=+0%qYFLSZv+7V1Y^c=SMS@PxnH!-rm38-C;LhyJgc$r;b5?yl4 z_O1aCug7{HS9G>$0jri8Zq1r9*wLYcWjVP}x|5ui1q?K@sct$~PC(71HQf7OgMw(Ng|=5J_ah| zE)s*eo#qq=v5%V>FS-@T>ceWWZEiU>X!L~6oln3aRG$yj{0%GP(&+yYAT6Xki6p9? z%hovoR>Y>rUYth^DjE8iZU&dYVW1tqm4(0Rhk4IdRyL&Qvq`Cv9vUpr@nIHB&fc};KiVaAX*$mZUi%!`AZDKyYo1~D;eg{jy9@24}F5yW9jRq zT=ON$yRSWnjSU}>`^Q))pL-LO#aEc7*K4@4ITs2?7IJMoBOo!!0P{tGd04EfUFU%6bff#@~oHOn$^0^9C7$URRP=sjMz z=HyUp-7y~`7i-eEiY1s5L9$n_tKzm0x^OD_n+wng-3vWLUs``uhHfA$m z4$WO$O;$5Sq#3W1(nl>3oy609zG2@{{x~FjK6n{YW>JEvTX`RBqc(z**&CRB(F1Ej4I;rHjjCOt%18x z&5QXgQ)1{u+{B!}-c$aWyjin9bB>{DTtw?DOrLZM{vBh?r-qP6-p+t;uRXz)e|kbJ zT`Qb&N6Z)M1Y&Dv3Vz+ECwR}@3gupOHgWzz|E6y_)h}nbg8m9FsrVYi4zGqhn=ZDn zNrOSdborFwuR*o)m=rFX3x%mU+>%@AI7?;5SH`5Uw#TM|!^jWd^7Sm;3GFdp5&7&Z z_i!Q0=3;B_CeV45=@w~u1|N$JgrrmE*lpB}wVRu7?zX->>L{8y|L5i@ap z1&-s4_#D5RSlZ(|8dvw^w;!Q7?=n;Lt|Vqa#SQ4IUW-=!U!!6H&3Z!*LsOL;YX5i( zj)l9qQp$eXr{2V_b^XA*cru7A&q~XWyoH#;BcL;GsJdN4@0I$=;Oe~+4V?^lQS>ut zJxsg{_mO~~j>9s`d)QN^K&Q_<1aB_@7+V0E@peqKs!<*ED>v zjVl<0C$s5(s@#H&E8n8y*c&Y1U;yH74eAU&fy*O^p?tItwn}#3%aK$Uf=RbR(`?W|Y9Vlh~ zVRDGfO@r6(Oofy==KPKkIp9wFqfuM$f&BP=3_a45?=+P|*~^=_|Fa?AJ&)!y%$LsX z##lLd8hF(&KuAvm$F>5_V8}=)`e7j0nLI^R{cOlv@f%Gx7bHd2pL-o8xMSjB?VcKJGU>iCimoJXY2DKOWSZt8FlTWzN5>j$uz%R4y(R= zgT#Qx)KNIi`RGswcmHxI6+LF>S6K3rMKdUKc?Vba?8WDt`N%1r*D%%oq3UA+?;vi) z0Gv1TEuhzJh&-|z{R)G@`sx4Q_2>+iTh8T$G~nfT#37Z>L%3xLesiCr|91=OieKeI zwx@yb-B&o%`UeiaIFLMBXJ{Aj5GCU_q3m@dMmju)qT{uw(LW^p_$(2fOPV3;pfyI= zRHC)-Ihy;6LDauO-K>*}rF%{>zdmYwk(B^WKVISFRd;Z6iII>uSjN_jK8UfZM63yK z!HI?X{DQMI8~k33sn$y{*=ZBB_n-`8L_XpmC!9T9iPJK3Fv8H3cP$EI1;kCs`k#@| zmTf2~Yge!~kH;XpO5CfIK=jpp1l!)_f#>}`pb8kKt}^I?($tGgu^<@@>IOo?OcUN9 z-;12$d+^I}L&5ns+T-aPK+w}}%=Xt8;_H%Ww$Lfnb}nS)H+l#?xl>RXQ;S_wcLfg&D&Yxl*4GSwh8crww}+!ZXHoXP@+O$LM2J<+e~HdmVXo%0=MO!rV?cDAG; zmh1qpo4aA&$}hkZ?^Z9J&VBRkK(u8m!oGF5%pwE*1{88zzEOw4yOvYlvF5B4k=z8~ zHO>yD^VvOTb>Lb(^eFtmf>u{U`co}xqr+DV(4ra&EoQC421iuJdpmnEN8|-)c*H7 z=gq$Z2mUM!EX{-DUp_*$zbaLpv*owY;2!$&x3(Ls#s%TacA99r0!g7@!E*yj8WJF>Oh z;AL1bNX*Z+h=$aES}}a81UhvOVqIYeosn!XwZsZ6lI4)9Y~$Ql z7zzvio&|IMxQwFgHn&`tC1|iXfO@q}5SR1}b5ic2ZPzaRS0dt1b?EToM`J1Xnh)MD zJRv7$C1ncCL5Am8(_eH?m>7+beHWnf%6c?&6i|8H2~(HTd0>i!9qKgU9i+Fhx!{|Iu zfyx{$(<}&Aml`5S7gSJB^g4vj`~og3Yq*7DHlR(jE7a#bgLlN4QZ|o3>6<&Ky?cr& z`!wQpl?m^;JRUdvwF^&P&8F-@t<+#;q7Pv%^FYFUj{L5pE*Eh~D zV>0UJ_YeYJy}->j=fFyA0x_91AKy`k^SYv;JD*rWH3{5IO%+J(lF?ew6+UjrB3@p! zG`007v$}tgIvhXqtFkXb-b>m}1lL02zvKhaqq**`7hqm(#LG-8rT*{wqip8yDElLm z86^p*irB?jk0W=Jl^eIsOFom6hit{GQZ2GxtN$XZp^KoeAXo+;nxple6Q!@Sdx?~14s7q06xf+&| zcO)#x8zVlSgFvAXY(~w;ynuG~?5(M=s-}*_DuXOd>xgOtlZ3C7Lb>y0td?SbT zUX+xYf#*3R;lH6eLJ@hGX9iS&?QnC-NJi(kY&r*HUPYnRe4q~KeC!ox!bgqX54Zv# zT-So7{b`Iv!c$zIYsxpKErRBe&p_H(fF6_Pp}RDRlZ_lG^_rmKa*x)bro;oSy(n*f z-&AO6Y6X`nDcEH0#sw~NaP&S0tnKQOA!JB~978u5 zk_<_PxP?R#ITDpL$PhwFb)K~;B$7~4azsdkV@OJpcfEhye|>I!+Iv6GTEE}-TLViL z#h}7Y!8T3(h9v{ts7rVkJU!@oJZ>kCDbN*yl@aLIBZ=YYOK{oy62uf|(D%3j(ueNn ziwa*+SNREa=pK&gN1ySI)CV*7oPbkTkXPeET#lP@2$;r`~b_e?ND;~9NNyA0-;Zu(fe2?8&x6^^ac|XC36<5x7HJt z89!Kg*$rHM(ny%uRE~2EG9hTdN-Qog5^^>!Wr6p|KPi0Vioa4>YL`=x89M+DSC|Od zMw#-pQ}$xcgcV?SB@PeH-Gz$SWAOSR?HG=1Wr=hj8T08DIyEhZ%&8ody;bsnpo{1e z`WJXxoP>Wp%mnk9UAT?wMwBL=!zF``fcKEem>$|4C(`VzJn$My$4B5z`>QZFApyeM zy~NP4JK&Vz0XggbuvD`yaB$FB)aya~hvzgW-#!XAW}AwxkB?yU_?KMu!bp&P{zqQA zjvNSPl+&oc%u>eeMV}Jtj-4#y*&z|RO0A`;*xHk5qiLk>=0djo-x7@Zat~F*D`~EE z5<*Jyd3-Y6H?H5|A-->*c#O8-^)V9tbH?FKZ7sngJr~>?_CU$aJlgk$tMUgBM`lJl zrtGy6ZwMWQhl}@suO=P*=4*?YolT)A$eN505$3A4Y}PTchD1;78!P;~hxhnFwC zj-QDI+_dN(-jOw7NnR>wnsvp~Z*)ZGf1*J%^EQt^wH-4THv%;=F^2ZaO241-u6LJeARG%;_76a3{f8QAs}ptx5O`AFzYc9520bM3+9?p{?a z^?3t6Wn$*U0;qpN^BCVcmi_G(kC+k#L39@$JW(PvKG+S@6cQoZwo0Y;oB%hy;sM+P z(Q?H~@KO?!mUiFaiKX~sypAAg-Nwo~p8?9>1x~FN1h^ z(ua<=e?~&Zv`HX2zL*uwbK`!e$8wqNV?65h4ze94f$Zi%l%`YWm{`qa4zsW*VKFq$0td?S`K*Bk!wBf7{|jQ$LP0U6KgJF|hvq6v)>iln{myU%e+?G= zwtyzr6twSc&pVf7dpOPWqU*c|+8bbEPJxsBFA@&$VE}EIo(B@hZq~2|h zIXWw${TAx`2GIU?v7Vrr@Q(HBVk($xZ{{&?w1lWQ4tthgxeyj|MVt&|GezZeVc{n`tD zHA8sO{8c#Dnf|t(y+og>ap3n%*P*rP3}`0wVOjbOKz==JO1cGl>rBM0pCy9*moUiI zo2AlIOr{R=5iVKx4kV+etNg0BfVH$MwmJR8qRtx795NHEuFt{@K`Me%J29_VA}Zd5 zFfET5_=Pyvp13l&5co4X_MSzW+j$4i(qdhr~XJ1uk?I@@>N=*&p%QjyL)%s znU@RZf6j2%8x|mo`o%T3_i;DJ(@Z^oD5Pu4@U-eI^sUqr-yD&O+k@MQ`Mc>Irh6Uz zOjcv+y|=7zh`#9OTB`c${0Ix?{2+f5^>!mm$e~aKZgX^ngrRf>{Im^a$08j~Y|>!d z^b9P#q{3d;KY?=GLDk&F(V)EYoHd!yF?(wW##qt(E~6-ScnHn9n#{p6?ip7f*uq|4 zJ%V;mKZDCNbz(G>bw7cg zcA20Iy{rl!PizSn71}JFi0XOuEbwp^s6W2Ibe|qH-w%>|Og@1jGrof~PVfJn0MR~d zA~`OOLSx%c&}7hjY|8^~GHWgd4crBOJsngwuTmk>&H-FXrl=|x{);YkBUl~ngDvhy zz`!^ALE$qJ2K}gmPVJsFjIqEU8HM`+D`fh%r*MOk_eNK5{XmRkZK`<^b(lF&?&pXL^q?;*}} z#haDCaOYbM+-ubmoo;T$u4Y=a8*e~|^UX}sYch@?rQAgvX6&MtwGbT}uwZqJ7S+e7vcU z{xBI$(r(j!Wi@sUx&zi}y+QRM3tcqR*}y(#LdcvUv@;q5im}8^33`XhIDb$#7*p!b zKu~XaApdaFSg>}e0?oN8Y^uKs%A1;rJ@}3|MDw`T&|_@%DFZQYU=P%DUX3Lq?%>$m+mr)fyLAFA*Ck(as|Qds9{Rm86XeOiVDL<-sM&j$H&ke_ z=$a`n%GBj1Jfj9bM*EeTDX?=_7jiD?>^f)ZB z>W2v}FQ8XT1k|ja1{0}sRMSfZ^1eIp*U|ANU7<2VG&v)|$V6{h0MZ6ca)c>{J)pU^xhl9;uB zcu~X#a>4rLCiHBBY*#a4a0h_j96dO=k7g&|)No=_1h&o>lg{@|KLU9TiU#5rgnq?PJ{ce-WHn0e4 zp3wX+|1|gxszS@hNvve!a@7Gn-FG`!P#{no$C#ltKDGG8rg|@;Cez5Oh@z0yTK%VCuW}uVX(OxqHRt>$wdP( zbU_7pEIR}KQ(YG-5Qy-kP zzYUVk1fYF^J7mqt#zlKiW6+jGkp8s8!7ZYsF)cI3t`!;;oPsBn(}<&JPrPmSX^Q~;aLI)Y!Xp`hiv z1Kr#$PwPECJO2fQ58Kc3N2@XV=P&Z%?PAsEW6}JtdhV?J3_c5qhgs_f zHA{zK@T?GSKDQIR8>l6C8=DE~n*M+nXwKX@7S(IN$nDZgF~Ge5ynh*pbB*?*D!IL| z^S~YqnPtzMPaeem9!6rN!xtE5pf4_@IlbCRo7<85GEhH`Cv-_hjQ9eo^VHj2@_=nk z(-lU)k_Z9!h%>(%;mi*cVeYhjP>xxsIP{jM(z$(0Cc%14aj5IQLuo7$b`4j13Dt&LiO%K(YILd7uk z+bw8Vx}TUs8_0)ggq^SLLYLX=`C*xsAX~MUYc|(1i}@UbF^*||>47F?0NCp-z-M&- zNIzQ1+FsB+Vw^;@TwVZq9rvQ&l3npu>J>WoJp)_J?t*4c z2ky7-5J;zH@-d_J#e`w+;o7awP&QvrECP2{tCoZ1f+x^vl0`=u0ow^5(EW(E5a?CK{Gt-L=BzpMjN1;as#nl7U@Vx2ZDS=@hN52nV7S)$i{6C- zS~F(|Rr`SK!#Gyk`4|*x@}UVYQ#UOU3KV7_xzMc23+e^d>xv+=k3-SwR!g1 z1I(`_U*%Hv8+=FTiOZc!!Fs(BMu)scrT1>|O!S8{>nU?R!wj`%?C0aU)WNv3dDuuy zw#yEm!D{qRc=<*jYacjb%84_WW4=x$YhMR?6)m7*2WUo4|4yM)xCqoU$$g0>x@#b9 z>%VBxOCkn8N#^R3)2chAbwDK*ay>j{TRQB*kE07{E>7HfPCX58-x4BT)d=SmcYEi2-UKJW>hVknS1kbg$bhn-g>L?v?xX}^x>+lZrK2IS> zbS^5am#~`A%h93VR_wB|ozP^R$5g>LQ9bw!9}-Qo{K}E6<~g~rHn(t>{-?P*ow(Tj z?8txmiq{$LAs5$umeA=QC+YPl{lY5E^&y%>?sp$5D2F8f#T-!#*bmVydn-D><d!Fda3&fpZZFJT(H)jt*T&GIK2ZDW5-z!Kg282m+_`Q!_WOO3a*iidW%FmE zweSzB?>DF<4Yzrf5Qn{%Jwj>fXE45#f{DwLsB`j#I+nZ8rwj2ieg|WH?_$*2HGnrf z;JBD|6m|>bKFqK~zd6ms)P2YF9HxToRtog+GZD6w7os`wqP$M#pnvB^Ed86jC0C8X z`rR{F{vir}H|mO`3=Kq=Wq-*JjnNTv4m^{Wv8NC;Wh}ZJT#l)4BJj*uZDG56CRiLU zN9nhjsQkxWr5Lx3uUM`xtU6{UrWx(PZ2d^|E~gB*$q1Ev%v0>TvILr3PQV=xL&5A~ zJ-HjpS-$;fw7hwn&6unNyT;S-XeNgy|2xe6-?LbBRgKO65sSS?DBJV&4~%$|jawpf zQIl%QYa>4KqQAEDBAazQt4vEsog-maRObm}OS?2tgJ)miRxwv?5a>;=7?HP~cvfx8S(zuA4>>)0%?Oq=E z{Rt|&Yty{yAwFCXhi>=cKvVn&yk{SWrb`!?xpX7xpp^8i>k9DfnH#s|xyK z06qVqXHT(@2fdP^`qM9-)u@5GUF2dM_X%Ws9y6nkbmqw{f=ADvf-KmPex6aPv`i;d zuHVAL=jjQdmoDSLxOSqxtG4LWoX^s~-DO9&1%jF{1DE1)sfh36J@5K})d#2n!@80d-6_~afMW>Lm;V=UUQEn@XjBfMr< z2pRvH3cd%*ptJrRjL#y5JaBQ7mFr-ZPBceGxu%?ud-Uv@~*6Hkw&?hmzqKl-US4$1 z^fG~jaH*JSo`YGVn(?1NEwT0}rH!3qnRQ-o==F}{@*jJ_kMaky2X**;U3*a(xm@Kn zn7nso0j%Z5H;mp=Lc5ZiY{YEpIrk*CN98J2bK5p_OS8s|o@OX?8QD=h@6`$eb(9!tUI&Zz7>TaGrD$%o0XN#43Oe6X zL1*v|@Rs+7;ITb;_N7OdIq*HC4XK5+?gn5ng?v>})YbL$#$Nl2G21*HBb!&ij3>kw z2?)W~v@fV=PdSJc7ctUf zVa_ANAzGALFi;npPcr8n6*elG;N{ z**OefwGX^K8o~Hu7=(=ck659iFu^MN|?TEu!D)y}z)0TO#_KS3_{CvC!J@ zAt;@O!1?HMjGwR`6g$g#{^ctey^VvS(h1yF=?MzuLgsXM0@&`SvvUkGJ)0tIUiE{O z`NVTYLKJy+Q&HK?T;46>6`bj=FSNup!p*GLAhX^D!_%~d;K1&n_LuVbr*BX;!vth* zqvYz5?k90+HZl494Cr%)=0U@W1?=hz6@NRU zY?Fd{lxT3`ltb9~@+D;{r+|ry*v^gn(ai7yQ8H>Wo)1@o*Yw}Guf#~W zv>_4N50ePe^a5V>fH<9Dmv~e24z5G@y%KNo=6$%!Y>u2}kzKQ~u*+FYsksCNYrkPr zRdo^iYb>ZodQrdriM*$FM`4_H4!He11v;L@6(5xaD)JAH`1KlI zo-qW=^cc)e%3|4_C%{k#%1^jdLv24b{(5UzYE zs$BjH;?jeC$=C1=F3-9M)(h6)mJg*^yrTsv%M3m=n^9~uW{N$(c{pX=yc5@F zoU=IgNfZm|kj*;B84Bjph(9}jKl(n1$Ef6AXm{m2cvN2ly(&BSmPE61pj@6wC9_`E z1mW2MSY(@n=|Q&Ck+}qsUv@yj@zvlmewa$HHWLhZA}SZ$2Aff@u)f6smkc!%6i`b5`whi@yKkVy%H1H{lL*__he6S{NPc=|Hl53!5JQeUF2zn_CtucK%?KOMT7E8r(7A zcP{*stSzqHwHKCr-itQd$3wnpBc^7Quz`37L)#35ypP(rW#@OO^1lZ%-*lC1^>TUG z&Rt+X=^9H{Nk!T7_l~tzPT<)z3?(ZBmFE0$RIHF;Sf!5G)5}zdyuTmZT(5y`ZY5f0 zdqahA8mhWAf=ie2@-WkTD1UYum%k4Ky}vJ^NB0B_G&s#A!^jcqS%O}H?;s|Qfm>l4 zd>uf2h+$or^~K?!(-Mt=l{dKbWE$=nqb16=@8k6g=MpRG7S{}>&hhAp&{T3Cyj>sQ zxH#%gUQt7i`ERZW{S7ANzL-9B6guBL46V67800O*oYEldKZOo=26brnEE=|!8i|Pt zU6hZB!nF3}kWoya-v!{f9fpEkm5FGt8iuE5pN5M0&xpVFnH=%D^ld#16I*Uzqxv-V zioJ}XbAoWXgONC~(-Ux^OuOF6v)Hu80!&hGqu#Ri!p+@PU}?Mu`#m~>@so}*^Ak$$ zp?ZtyG?N(8W-Nw(&=D>B-echxY>2(G9Q-zj7czrMF2|Meo!k0oA?9qmdz zcB6I53iQ$~$J(LKcw@g<$SS=FO|3VW?DH{D?g~<6Bb{#+9EQZ1N1(iWIW!45@U_=F zj2O8CW!3hqY;y`r&wj;x#iv-%+z*u-TrqzY2Uq7q5OLuhe(ie>T%LJiO!z%eJNM$D z^t=qtXfNRI)zIO&zMxBUo_G`T_}Z+k!Yg59Uk{J5fXnl+c;)0CLlzCSpGQ zy-2LwAZg`zNb&eg@AJOI96#)sw0;Gc-!SCL__dBf&p%+kau{X7`tqCY8_;@LRrFB){XpD z%DP>!>a>=SV(V>EVj_Saaor|92amY#ko4^rYL5T^d;SZwZj-aP54{VH7{SMK z1L6FpP;?>wUBmiR=tucE-;xhlcJ3F?4xp?;v=`lpbp_d>zhLHU%G>u^LD`cVaK8U} z^zl9cmK&&MQ%F;y#Dd(N_n%`XS0awo250d)#9TfGSw-K`d9WO+OrK(HLN&_HZ)3{a z{?sWyir#N_vz*wM^1N+yrWm#aG^Nz(&3J-A6JJ5s>AfJ!PXm#^w`06xDc6%e!@njYzEX~KR(*V{*6k`wKDwIw3gmb?r@2m>twi^dSebHn{P-KAN z#MjsoaU3o8yyA=4b1bG@h$JsJd-$ygn0IPDmc+H<&7ZAc8?c4?z_;O2ps7$hXd*6( zy8_t@`(VjS$|;*5gi2x*>Fp3{pLh&)D91Qyq_J?mR#)6Y``B*7G_=2RRB3#B zf%3aIs1F;lA@#4R9f^aN%PpUB-_cZSNz8(`faB=`F;cqya2SBNE+%@K&1 zodcO0!{Fw%IIu{kUQx<&Hdo&jF`V{=)wZ%o08?Z5A(2yx5 zZd5zrsL2bc*nAL^+#Rv#$ab_Q>mL=h0kiV#GEe;Ta=SnSP+D-@fT zFYw2J)OCs)@I zZVvhoGp8?PkBkSY=}~rC>jo5i(>dH>HB3ERho|F>1b3RfOT04Vou@s+9HTAV=JRH~HjnM)42aL}}-fT!FAo%)nB&zpAG zHSa2M863gHmROTRcY$nh21^?D2Ky|C0GrFb;Zsw4A#hhRG|ZU>wH?}6`pqbYBh6`# z^^m&Gh2S&jA5cHsB`>HthRRxd)*A7cI6?1G>5zs+_fD~-U+Gvo#R5Wpdh*x7G2k*M zCbxF#-{@8K7CkrILYGsC>}{;R81S){oNM$wpHc?at2={@@el4YWC$}!G>68)tzdPp z9EOBEhLDaEd7qb|P~P)4oo|Qo%A?CLePA8dlr2L^?Gqk8;R;2+m&h${>XG-`0jjsQ zg7@;vOi#E1?yGf$sp-ei$8;S;pF%8}(f}|293YQ-HdkvRnAQqYj2IgYV_ec9=h!N_ zblVO-=0zxk{T)os*#{uoDq@248Pxta0kY?J1kKzQruiXffs(VBc9@$fmvRCU!aQna_lvwz2+u%Xq ze(W1at&^BUXChY^tf2SY)trc8InX%qB? zh^bdV-G4W@-rP%h;5n$?cu!toVSp9Y+F0zH4`q&DSJ8~?wzmGEN4~LG>2JeUu zJo|YFPye(SZE5~`{T_LAo*v-giI4D$A{9d0J3{J^rz}H4d(>`eJRtZ3IBfq3U!y*P zTd5J6iItF=_>PZCHxX=S%>>zw8}b8RwS?kz8E8LtCbW*s!kn7LJhkgQ-gUe!b@fjW zziA-KcN_;Nz0IgjIlyF7lq!QAl@OzHK-)cI$v^yrdRO1r<%@_eQsk<&qG_U!)hMNOBDtSD8BZ$DHI7W?-?{?7$C zw08#~e#>HTd6kx~GeP56iKY&*6>DYR<6z#X%B4#1YmMRWnCjFP{I|9a48w!zs8w#sB z>I36h5k7ow~dRwBu**Q?x|7PkRBXhkk(JpJA9>$Nk%U!?D z2AS7rxrVYJE~9i*>SzrgvgrHq5b&6x6ac9`s?Jk-x>%ohMw-cmqv$?1CQ?&f{h^b%Hf=x^slzsaJdG!6ti%Dih z9yge>-cF^HlYq*$CE!=~#?h_vBzbyofnfu6_@J7pV@-L`qA@5fYeSbW=fQqg4^}XH z7Jk+<7Cb^DVP<&=SiN!vS&NbIXu?y7j@1+2iL`q;w}Ba$C1Pl)0?VYjs9E+m%5D{^ z@_O#)cQ(C;sX6*$VWXa?ndXW)nTycaOHa^B8N_3zI%A(x`hvzU5Ry8r2B!iKw8|?- zoj$k8A!Hh}7xq!-lqvYq-5dXE$1C&l%4sKlJ%DSs)o~;WN83KA*yv44s@}TMR9Z>R} zstp%9hz-=8(K|c@6tlmxt83efcAHfg;^;>E>)kxlTclpqaURky9YQwy@pO78&N*f( zR_$mfxOfi$t9{RiPduBep-k?2YZJaTkq9*d3sEtwGnlwWp-fz;a=Fu$95yYW8J0;L zy)S(1$bOi8V-lKtEkmo6I@&X6C{K~hEAETvd?%QkO(UUZstb1Z+zl2J$#a_V1^p`{ zP~GJXi{EjQHIdG`=-ve!z_rEN1J9wkM#H1{Lm0F1B3R`A#4?(FsRGKu)@>T-7T}=WYZ{2QGPuR_H1Tp(M(zL zjo@nX9l}eNfHGhivm4t6)tgUY$mEqgmeUWto3gGAZ>jTr2>bQ_fwJ}c<%_0Bh0wRg zl=-g4ZYK-`+n-V)%A*mhchIc7s|-@Uzd*~xG1%tw2mLagx$WQmur^{0=rn!@_o>ey zbMN|IXK({C0Wq%lVralMR?_YfXXo)x| zunEg18VMhjbQZ~V08ibeU>{O}=FS7armh0qibZB+atRlhb`XlcKF7kow=pe|_Feda z+{*ska&Q>RqH4H@!8vqZwig1=bb&RcMnaqL3|wr^$eSmv!9`J5q3qrVv|kbrHc7|P zefw3Y_>bm!=VoB%AR{3>;WnM~JaOJnLostT!t?-bdPn#=YTmu!ep)K>b|hkISTWC+ zma+KBm$1lw5-L7CWW~3BV}q^_q*qpB>VMU^#r7NqZJPl%J)UEvT^YH5g24XWFv`6D z1p$+2rq`_}8a`3e8Ho1HTKe2OxhEDZZ6{Q3EPz3w`oe`YJuy011M1Jicv19THrK&G zi0pF>>+DWKXqVwocrpiu%%FVYA|33uY8&cBX3+lTI(klekCz?apduxlDQ|XVQI~&X z>y1UYZL^V>B%2P+m;Ye#TJramH=<7YO|%O6jMiyiAb!RkVq}-%%NX+hH0wh0Tpck+ z>jrsnh=Vz_jHy@Xaf_sz;1!pGE|W{pb)OqW7Z>67IS-*?BKa}1`r`JqTugry59%?R z+)UdB&}!}kE%T{g<^Mn>>6}gNgtcIN@C}}y@qo@`p~OY#4Qj1J5F7XuvcJ2c_Hn$`RkOXw6&Hu|0;8lt*%TxBVE{yOb;Y zPZ|BFC>H~(%&E%QZP^R6qH4%(WQ)oJhnYH~6!P1w(Z_Zr zMqEum{|Vz@$)8lL88rvIYgOcnbBDh_8wpi5h3GoQ0yKD^r@k1&T!UzLdBK-=9Uciv zuf;q%@ddhAPUi3Sb`a_}7=hN2LoBd|I~F`&iCv9)VECGkY}9*WF)yYId3=L;_!cX) zyZ0G~ocM_)p2X0gGx*eT#TeB45tcYTg7DZ}F7xaG`F4}h+1C#I>I=ZP>K1rDdPv;l zznOHcp=dB67(i|I!*F27JCL8@i!sZR!3FQ~ z{?oMt`}aB6;Xnk27iXf0WG-B`EykiqT~yQGXYH#uT(;#48?KXxMXrT>97_brm!&+Q zPYC%LzH!&58dN=2Li}HUV*x0@#s3#Hb==2_`Y%NJB{f=k4TFAVW}?lmf1wtQ#j=V- z-jskC^0f#8C(-OEv>zW_tx;>F*{hBI4!;C5*QrVg_Lmlx0F4q@w2dh8r}#Tp68 z$@*d_x$)ZH))P}BOtI|9DR8NmLH@O+P{30m|GzADxvmbMQ6I=lMP9h#QQ&>NJvnN^ zA>JE40ZALK(k zpatn0%P@8cNA)9949|=}*@Jn^XH_b`{uqN!)pwyxxe6YIxU}Ldc0}Cv=D};Ift;djvCW!A`jRu(S_` zpj9i;Ezu0^ibF9n$p#hM4N+llBC1q>(DlR$2-*0FnM|kqYt3PHQ~e$_V>uYi)e!B8hp6P zKscFeCQ62hAlXmMhd&dTdQ%E_`?Z(uQ6l(sHH39vMU=q5ymo_$IM3=eae7+Wi38;M zaA-r1AvNe;XCk=WJHqV$o4~coDch>N0iM5xVPjT0)@Sr4$B-GVDA$6OwFg*q`GY>o zY|!h~JDfQC6gY2+#_%h|o5GKf|Md>KPX?c7n1U* ziylpV`EEanqn+w-@T3xaE)$dVQJhM1VKrLY5wB0~D+e2GLE8TeX1?+U#rkc$esCZ3 z>lwmLZn>dNR#*C6pMjkTT4Gi5RjmEEi>XhxXZ8F0W2@H-815MbX+P45sj6h@A)B#y z&}}T+z82x^Jz{ts0L6;k{Cwe6=(Femnr8-rQ-mGL{#*l}nEoJ`8;CXczoNl7si4`V z%hLC*_&+~Rv~Ekn{GVfS>Z!-z^_+N^r}WT!{X1s2@gn*xP(hAWlU(z5Ju8bI4|x|u zm?7n!>jO%OmtxQS$G2cn-~?XfR|Y9PUt!tEHK^SBfhm@Fac|X8n&UXLko$kABiF#r z^lpRrOWI<6X&+qjj|}@9=!D~zzQRd#$8uRcj14T##muhPAZy7@2j-xK0d|j_4D31WE$s8t9yMofoA& zWYQ}Su{E$2k}4jeW$_`dnX{i;^|*sJmkUr@O8dlZ7VKv2C5Tq&2`-1Xu&gZy$iuBK zYWl~c-nfxatR((mn*?A9F+2-JN8@9mcz!SOxi;?QIrElcXwUPIwV3u~0q$V?<|pc8 z$3b{^HEXIp0BZeH@~R$(L0ZrQYx@|9lkO8kuiXxq;jYA;OP^t8@h_;pPz{o-Eugem z$TZV*gyx-XJXA*asCDgFzpp1iYTkmf4zF26QZa12Ar+TL?f^~irQAk20NaO1MRn|U zl{)l4h@TsT#TGP^Eg;UVtb*0L4#m*&uh4enB^IR1(dNliGq$wp}7miRn+Cd94`|n<3iy3C_X$anq@{#Qhuzfhn{HC7)iT<{|FPZh^6@&CxRG4-Xznj1TEoH2>!+ z4|vv0d)xVJ$^9MZ^^+EE-}htZl@dX@N-cMLyqm?inL+C0DspPo!^9`;h3bDkV0P#T z+?3J6b*>VdZBm{*VaXmSTzn9g>r{fhaSm%6^A-Aer~Ti%4xu|jA^nXvF#wae#B?cF z9Xkc5gYSV(eik^nU12^tBG%0@6yxnvQCeqDj1EWG*lHw1hDy;TWg?daf8&}-xu6$h zhewA5!I+F2ShiJPd~Na#%5ElL=#;_aYrKqZ%jM`o`Ra0- AjRkgd<;JnL0sQi3~ z#m=t8CHLBkL)KhCmqtgXX1gz$MS{Pn~4 zsQ$fEzIeEiXt(~p)nrCU zRtJ{fdpLGY8V#>$Zav~kJ^Fr52U+zU3}G9%i(5T_-VLn3ehuqCQA12&`8ko z*Jq*SN^A<*!)>RXfkxYX80!%W-aEhXmnVqFm_mEDj?a0l%O%j-eU^C-b%KKUAh@=M zp4*kSZ0DO0@cv>fD*8pUpwiJ;d)!z&*kCBUv`%Ic8c>Jo>F%qOz4A#kFIO zg5u5=K6ZOJH-Ep5Nk`5`zf)VFG3F39G(CXuL+wR3@8{5THi0*-3uXaf8JHc{4YH10 zz}6ETA@x%&I*k7c#ty~M^?f?#w;PIn5B90t+e&a8&DUCwO+wk`-?^D9&ZE*PPBrz# zS&(#m%!d5A1y!5VFlay`sO4L+^Zp;Gv+f<1eOQG7(@ez`;ukb&y^cpZ>x<>Vloz~D z=iLXJC^ykT-s^om_&nJNsSj&FHfI+v)BVZ3gBM^w{cx0qzGvzo9EX#CK{M73%opdd zsr!g48fAit&i*W?{v(f8UBhzKebjRyJxX){7(Y6W*AAtiB()ijYCho=nUR3gL!dt z+~z<4Iyq%x%%hV~aeXfMzdMNc+yZ_!EzA zz5Z4u$yvd|moTuI7sKq0Y}r`iamd;|vv25koxTG{GeGkf_sxcvCY~($SMaO}xi6drbZd zH=Ovi6g3ll!EUq~w*Q~UIiLo;cOL}%+%R6!HU&mJxq~17+lj+{cj477Qqk-7Ggv#- zLgEV4(&YQi6_)eQJ;ASwfC4XXv4H6-%`4+4_DG`hHr9#L-8?Mx9%T`*sv+<^*a&tEJ zUuY~2xmSUAOo^|0RLxtiKLxFiJ;A51DK@0!!azMGCfsR*mW~{%Zd5_Rv}`c$^AMwc zzbA&zMS1wRc~I7)fmL^ZhKgw?xyy2E<}$m3sG{t6(-ca=2%RUjp#Ru`;CHq& zuV|SE*;xf#ZT3=L``nS{>aCC(k%2YChCr=KMC-61V#nOT!N$~w_W1!7Q9ZG2)MV-} ze&ySU8){2C+~&=%xz{Q!L4K|hI`=(~HWqa}``>#kb?a?Z9x_%LT-!~1=9wysx0EmK zRL2(>bQCNDElD^^quo>= z%0L_p0r!?Guqequ=(^7mTODmd9iNCpe!PNswg4SotYBrOp-`}*KNd=o;6HMNj61pm zKDiqUR&VUV>Dw#pxYJB5ymSN-)>nW!><@DrRY5zYO1abGTr|EKM!t<}%;(&A%JCgy zQrQD&i%dg}b~jezr~r@47tq%s3fJj8L}k(=beQ6cl4T0H$@p#9tL-zMX{5~ZrQ6u` zSse91Hu16QD0oDU|?@;nGFh}|Nl-&GZntg4!Lp*@KJSk=oWySY%XX_EP^B9LdVaLJR;$LuzE8>v4 z3YBU@!Lq|dEIOVFvbKZl4d(NY4Xs<4zEOX9k7JsgRJa(b- zx@pf~(dG9rq<1P-*!PCgTl#`K&Ae^&6{u90IM#c=#{7jX__{0~`d!o#nz{u;%k5N} znFK<*VirBeacstf;}Cjl0BEFFK-23csJf?PNe%5!4}aoW4Nu{6%PmCRr)b-c`ll0Q zEd8hzq_hy1XKN>gWuEhGy4MOiNqNkq$9W;G{@KTNo#a*Adb0M zi0T1nb6;7KvuY*HQNItM4B<&WaNbT(yd1->!2__JRRvjRLohFya)O}?Re3wiFd>k7 zsNbmPz3scIU}!654@A}6FZ-Z1H6J5awik1piTR{e0}fFgM4SC`Y?~Mfy&4~3c3U); zIYe;15_1^nOgY`jzSPx>f$bJV{Tg}{&vVq|ALiNIK{cPPRj`B++5;O>#&|5=mmDkd%~!EXmZ|*OQVK zNtUE!q$Eo+mZT*4UBAEedU-X?JagaI_4&Nt-fIAJH4t>a3#XmC;QoFb>QRq*fR6_> z`hLOWnKr~GwF331&%D|&1kD3DYuLIK-M#K(VDSUg9eoP)so!93`WmcD?xN?ypAa1H z$0{e%Tr*I=Bs9cM{nv;XD^{UlQAlb`$5LwuCRKqmKc7^6qMr+$sA|A;95J( z`GS8hfiin{w&DM@#N70Q81RL58znoS;GG2IZcDQtHfEDcE1kt!EQ8<&J0L7vR}4Nq z7cT!N6*_ueN6R@mT>a-catu5fb-is^NpoK>_1`rF0d&T+gH5>Q)#tJO+PJhuQ5rWl|;&Kr6fDs)mGS1&rnnZtY>Q5<1&w?=cxGS zrOeH3J9hZ)!$4vt%aa~v7d$+G@4071zqHj{4|8CwI$~Y0V(`bfh zMZBzpalDvvs23l6z+m4?IFxp6jYg~R`5CDopEp-#=}^Fj&aXh{=mX~wy&;N$rnB{_>($2?OopLPep)*2f+yPzq#JKSb=(k$;5z}$b(a@<#*;nM-? zzIJS9rGeo6_5mi1PJ_-rj09~*>ivJX&dQ75fpq66lp2>}@&PwizFZ=Du8f7&fCp@4 z=W6nayn^~?N0E&&5G794^fSLRtI#FP?ekmQmbVL>HTOV0uqE5v(~w{8*+uXel!aLf z-l=r|w8BygdL1qs3oBI`w2jsi{ofl9N5Fx_8ns|p<#k-Yr;R)!YcOfxO)T^07~UC< zqc@s~aS62NlvF|EwjUT{bRFd-&txldOvR|^SajcHBqsNXWcHJs_^u#{u)SIX#tyEa zS@##l7#u+L6fIfju6Ed7UIFTLFS+e-Pq^qqJB;lKyd}I4;vdmH+5a1sUHkw&{?WA*!BcNA*eIzV$I~SYj;bZmh$=)iftr?kDq- zF2+cG>U!;ZCBp-^F=G?W^7}hcf2myM^&yQ_7oWnSzR94eS_G0!?=kq}N3go8nYhT# z2vT1XkMfU0XmKeA%{O`AiY_{kHLevEV;s2rj3+kyd6G@Jb^;_?0=w9+5`0w#Lgi_i zkK_{@;*%w>E$E7lFQ!0wO$%B$jsVH?saO$Pg+5QHgP?lO0=E1_^TAsob^1?mnKc5; zv)f?%rcY2#o`iya4rt7zSYbguX}x94C*(9v|8xc&m1%6z!RYnm z9gn1*V|_1THe5H81-YdtHGU_xok08FM!%-}sUB~5*t9a7PAV|0R4?PU>aPHg` zbl=&;J!luLDIU!i56~9l3W+a0{s8xEJ&rCyHoG-LTijOv4AaLRW9sQ~yw$T9}ucW`rSqpCFb{`f^e~iXA^q`gAf2}LU<4mvOA@tdN7dvwF#en)C ztTR}FD@uc*xu=1sC8VNN*h-c8vZ*wieud|5Mq=6bofvHJAHVmct7uMV03AcAXrB^G zEK@UaO7v+K?QsY9*%%5D#-G3{U=VdS4`RXCxtKh0FNC%|g#UM-<-9%$i8;*>_4h%v zw>ZUM@NTHvu@qX3$|#p}0+V;NP@emON*Wi&JbfMk+PdSJKMh3-ryk&#n+}c#TEM^a z2!@9UASqwOJpT;=OU)6sc4;&Awb_lG{?AbJtOQzD_QEMeLEO^&AdgWwVVYz%oqH~W z?`6u)b6pHnWRVZ&FfsIJqE|;SSGzr6imyGnZnTkbVrCBJ#~&r<)&SPOj}e9l&5$rI z7~GtXf&Gc;T>6&?=4~S|@>GBHxl@dD|2#}H;#lyI(Op077;O8|f%4Imwg1`$>OMI@ z$-8X)dptR^Ua!ULnZ&)zZ^6U%7olRvNmv~53fglYfTy&JAnQCxj%;G~Eh&{%js1+W zObyO!E(Xt``>^Z@Wtf&0qM?tT0E4Nsp;e9U-=1>&7n3+%;V|oFIavMCn+Gl|goq4c z#;5hifger9@K`Iz?L39PuVwOAqqJ?+NB z_k^R%A9Hwv)gR^3lSoXmLLQQGkwWu9Y_a{mYUg3SWe1?FZ z3T#fMUfgCS&lpR;!=6p(n`8oyW^@zWr_nQBas=d?Ryqu*9Bg zA$;3DW~6uv_Ix@nFz+r_4e5Y_w%JgAlR^5z=WGM95-KJWyENoEIqts6f>XXQ?UA+k z`x&Wdhbn9s9S1x7bi}M>#2wVhgF1@~kTK5>m-OIhwRs+sul@waqr2jZ7usUlyI6EP zo5o|5kI0YloY`cBG0BAj3>t4BxEDBJR^%IW^6Mfz^u3JsV;-=UW(3`?0$QCMtjdzE z;m(#OqVtV7sM$+7R*!6!WJz8h=iNMbUKIrY{Et~wl%na36qvuDtJwKePfU*d#9OG# zo;80f54(q;2*1S@+e`ol-AAiSGr)a9Iy8^#flfWb(Whi5Y7fwW%Hjq@Y8?gp%YSkE z)fQZN;1YM$seu7M?!wqqCAf4s&0W`L5)UesTSX4&6O?vYJDvAryJT?uk z@M!WNQP%lD5ndZ;DwJ%OVyjtKvF=m@G-o?u@HaX0FHPcc=MSK?^CvVYFERO#LHxJt zdg7$Nlkm}Y;`V1~iC)?lVO1#YtDjE>ugP7Y&$9zKWlj+K3_1h$(Y_e?pncj;W z?8bG)^f5GR%zY$-5F>QGRs^>F%*0U7PBa^G2_!v>RnAj(fYldUDEcQDw|ifPhWG|% z6;mx!ytsgESDv$33tyuz`+$diK7zz`hRjDT72=AnqW$Y(SUFV0bW4$^=O>|COcHBO zJ&SYb^WeT?GL$Kwz+aQeJ@`ih>OCv~^BaAbd)dFl{E>i^p35bLiy>nAJMe5u!Oj#D zF^KoW0OC4rk0s|(Qwx@7twHbLE0A>cCED9AMXS>(%=XJG7$jbWi@rr@@rE*t)(_Dw z`w4e#L|ih-NN|4q4Fe7>#qxj_=9zU5+zy05M|E#paGB=rdm7O;*@*lm`}w5XSE!dT zm901uO1Y;VARqo0AN`Y<0G;VFSy2LL+nk~NW|AttcO=TA<*Wpo84e*|!IdvOUhfKQ zN;{7M9y;jgV+-=*8d>?P7}nE3TdcJ(!}863pviCN5t0IAIJf$>5V5`l=Put3ZwG#ZtlzzPuMaO_X#X3~y?`969ls#Zu!|`D z;~yB%ClD=;FNEBWr_lAv2hcx9{6iltQ7*lM#$~@~N8f-ifcDca#=_O97qQfnL-W&7 zDANm~nZ+w?bh{0+PDNlr(M;&jc!F)*1eElCp;|v9nfL&Oyzi=s7RF585;Um1)2 z)v3fw+=Mn|bJ6KfW5Htc1F-5rcQlt#Oj2RX<11gFkM$DrkH_PjZu+8sulp>1jS^zo z`a+VJ0c9C|VEv9gsFinwHBC5dCCU9?1%1T0-1{ zjc_G71bXz1fY|0$;6G~&gzF#R#ZTMOsW}*}2Dz}T4SuSU)*);`LOW_ts)Fs&SI|7> zJ51L(PfWTf;yIlIlv8i#>|wC~yOdeHi-eI!LZEsj-5b|O%hao8vJ$xgE8BD%JuTZI zG4LbihuuXR%EX#kXQ7{pJY;YGVm%@YP&u&-v}Wy_*rUq>n11{oCO6mfm)_k(>|_A&)^_Bjed)H(kDEmtrM)a`f}?lX_#I?8QJ|mv1m{tliz{K0+n;Qdj zHzc9*ngRUV*FcCXJi@}Q($Vv18dh#of|8}mEMo4VWjg)c_Cyn(yk6D*i)OYZM{r(I zA>@23#UuU_p|nmS!)qhAnrDgv{+4Ft-i>wRf;8+F>7< zu0KO1B$2!~FSx5=6vjTFIl!+n=(8~u?Uz^Z%K7&|wf-TdZ=^k%YXG)yYlE%YU4*Ra zPw1MN4e~oxDy3qdDhpb<(kztsqWo&|5nr|J~M~g{#=XJsgWpswuHJxEi7WhSJ1pBhjQ01P&a8SWg^yMmGD2j=yV^vh;`-l zp`L4u1JHb?JG1cUjb%Pn&^h-MrqBP1eP2BSt9N~5R@p+fcHdLbRF(tUH8)UEyI$69 zm7ZW#U&t(9SmJcYRQekvp#O^kEOEPp%~g-ElDNcH--7vo_UGtq^car&Fi?+kGJUR=?)Peh*MZ_3u`BtK!o@U>^8JR*18(5 zy}Agj*E~jN{ZR5?WD?JQ9vrgO65x#`BxY>Enj@6${LqIh`o_s30zP18m+qokT_O9@ zoBFShv?21)BS^Omz&K}1Fh9Hv`Y%5WS#34+EF;Iu*3Yct?l#O@HwyEVYLsiegVMGN zXqR09RY)xw%_u>yIg`+7=5unD0Kk~;*sx|f4sA}sCNl#uQ2!kUPj~~>U(RCV&%R)p z^apz3HSS7YEOqc3nfui;w0-yy7Q76ieq$65{%sj-T5Tj;`&}wl7WKobH!Hw=yB#F| z@WTFkj$_P{VDg?>g8a#UJpANQ{4%7Qxb{E;SfBoZxl6)u(zRFMma-2k^|isxzlgO= zXhE08E6gu10)i`j!G7KmUY50h994!;+bO|3w>BQ?6$j26Y9Zpjj&P3~ik4Cvh`ZCk zCKZ=LaPM6hQ+oz950_%fdtw-+nSjNb6ij{_fI)Y(nAHR)UXXtUDyeH+{yCTXUQ8#3 zXc)aF?ldngl?A``MYlbdQERdVBvsJ8RiZ6g`}G8k+eS8E@M!FO{Si6`X`sN%AEi}q zd54=EuTu9ez{L~XwpFp#2t=*m-SDKhwov!~e!!zMxP81|WW5{J~% z-NUIB-)eM)j^4*$kmY48xz>d{uJ^|}zb!cBXd)jQ(2jOYM{qr|4zOu5=6Q?<&4Qua zq1lsJ(s^7x*!~{fS9A zY@{mr+g#@Q_9{3eG+{^qF&KsxL+a&E*z1RyoICqbYVVJ}9<#`&`HFc3XYu$RS0OGw zmRS$>#qfqATwh0fq#fx@=_29kNr^mwc6dvAlBZ+pZr1XJx>=hylF#}qxWu+`#qMa8 zdvyqIf0sr4sLQD$Z zF2%9=L$$>LFI13oJ{~)14%L6hInWNefnM27U>it%xOw#69R409%WI%9%8(en7i7U3 z*Ym7Ka@gFKGxMINe7)Iua2WFei%Kie=jS#Q@8I7rf(NJH)T(hU5 z^}5T%XfEhL#gLYMz}Pqb*)2D7Ej;BFVq z)&KjBTBwVW@2G$KW;K+}BWG_f+Nu9cRHf#^M#)W{&{u*&RLpyNeAE5AY=a zm*6~A4&k{pXGtQy`2LF+#|MI2pLXWCrW!()9mF()LaccGmU?3!mBE!ac=Jq&xTtU{ z=G`uYhR6)cB$}X-oAQ>l4-k3T0&_-_pVR3g`}ZTcBCD_SghCy#nzk21U(_Qs5Zl}T zEKjgM0xLKCiHf!Q-0HOo?7CKf{Fj_7huf+wUFPEe^G;lKUsv$FdkM1s_&zb-jP}GU zpQ+My?z76Ff0Iw{6^k|9i(-hIvysXDv{|*J5gr_( z+=O%|FZlN~F8-t?NOYI;MvEILSO3Xe%3iU!gnU%@Rb*=-m++3ubPw?U3o=B8MOIbN zzaff78K#1A(_B`$FAY0#<>b^e5t0KJ5I5ADa^jBYa(e~qFgyfq=^^~YG856K*9)|7 z`W@|$jbf48+M#X4znIfA5=%Nqa!phs`2iH%@zG`GrF_aBy0+n_3!zXm>ML~q9ShF7 z*RiHw9cpdp%H;1?z`Wn+85y~PC$|pge(P#5d3P08q-Nl$)n=mcL-MM`Xfbz34V%IB zg}8$EsB=n3^x3@u+cxP75&Iuv^ouJHU~rFSga^RdcPxRUePrP~dVKl>IZg zO&t9${ll^R;Z}5N%7XzOf8(I7`{2a*yP(`U9m?*{K=t~aDmTZ;ps;_-Yi~thk}?v@ ztIn~r31-5P4W?piaVmHD@k6EF*v=EXbbw{aG$@IbqVbXxj9Zz?ZN}_j4VpZTB z_c`i4H>@)?`9M!T<2BnXYJ_Ivb~vlS*0cfDsRCiHq46%pHlR zEB7-xz%(JU5gi461F9LguYk9 zVR0X+VDERBg?k=@uDZrz@`XXfko}IvUk{+ol_z|Loskfp8c19+19Wq1KFl5WV;;D$Xuq=@|oIQC1&l8D=8dP0Rz;e_`mq zIf&;D?vILD3nzLVkccNX*I*Z7CsrorKmmQHCy&r)`^K1v;olk!ycjKwMPWt zXahydJ}^%C26k5Tnr*oVwxdFbMUc;I&T-~{tOAn1negP&AG~a2ADnVnOSBCVPX$(!$Vob2a$R^T)u#P&B6OXl}1z;B<^!4Gxo8 zKJ`;IGkSn}+Y09UEelolr|s5EgQ-IG%^d!Vn2ZI`9&y?$$%5dWOfK2q*O`=wJUKa~ppK_)`wpHsoL z8~G;ks^K;5Xzb$+*r+6_p!?rc7;Dpk>KpdTJarbU4LOe9&bQG2W+ri2XQ&hpeq_HO z|DpHJ3b5?H8J=|Zt@kdcV0lVnIo`GiyW}+JNd3K9pUqZE2vzZ z%mV+Se2=*Ws+`W_VDi&WpF!NA=Y>$Je*=_@7zAAW0tJty5V=`}7q7oV>GO0Ru;nRQ z9Smm;T~=UjW&&DmRLC@wte96R{TrJL9bcw_X5T&TKWZ*2lJBSz`!7c;`_Ho0jK}D7 z`Vq={G$5wZ?5N!yBL3mfrWM2o*;rW=qvIW%( z{n5RY-1Aiv!1<6NbZDE1Hj!~K<+hRFvs)h)=oN#zns!WHW6?a)fS3I1jsBz0;E+5$ zvD+(S(d|MwI>j0Z=2J|Basy}BVL|<#w;RA~$2(r|crsM@rlYTq2YE@X_^Sp(+C60= zjok3ftP%*;rSHH|Yf=5>iE4>n12o#3V($D?+)PPa&Ys3XgN(A1?Zk5%d6C&0kiTf@ zIPQ`10@MR;qGRMQaIqlIV#Ey|68IUT?xipHO!BtyJtdJri2}u3@Hi6=?OL^Hr}3a5=pXT*d}6i(PlH z$1!@A6s~~vWgmzwF@cytw43u?fl^y*F!~V&pGRE)#W@e!qvQ}HVg!(@keBv&kG)QA z!9=H8OcbUQTl_utrTLMMYZHVwY{STv`!Mp_?`ZVA2FIJhAQ9VM5 zcLr0pPm--anumQ>5=-2k`a*@fpsBu_Sk}4^taKl-5~HuQFBm5qxzk^@dcn_qL&titBL>hAIby!bE#f7BVf7c-pZf$MfHP%N1yYnT^JGx8lEJMbOxn+avl zPcz%wSJlhi`V*>a&ok&#dmGhKZ;y3FJ#*2q^ zgnGwNOlU|2i*Vuq{`^lSpEH1ap4JvLU__a_gLvY3A-bH^rv5`W-1H*|%7ST@^QfFS z3$8e;lnEWjHPxt?a3u>=|-I<$cewFh4lMIETe-WKzq4?fP zPjKn*xz^jp*sW%EyIp`i9rakuiVOm1N6MvN7x~f{ajC?$q;pl;vG3@9O zP%JXU^8dmy;Hn?kNm{|pMMC|dhb%dr*hfx1x#a3gKJONj0G3e}rx$R{D9oU(-jj&cZWI18P-)ToKx$~va|W6O{z z(Em?IXy_3Ebsh}edR}3fS8ieSzEX6LpA6>SlUU`(9?&{3l=&Y_;{*CX#XxtuAEudu z|G($S@o*91rZcqPz_6nx3zC8lLUI0Am}=c!RBq^jvo?1TB5FcWuQV9*yOvXbHCGih z-kNJoHU>r0d71jW7Us!Uz~MdDAu#VOHauuY``A&qE#eEP?{Qh&m+t6Tb)ETs@d3zi z28$1&7#cht7ObNEMik{MrFQf@KMr+e0yM0OgSd5Oq9*<;>$@`?g7hL;xQRWSR+gwRkl$xww#S<@ zu;juKG(Y8oK~sl;wG)8X=oo1Fpai#xKbbRcgs~@T}&(eO7aL-U^=)06W-XFoMJ&sHJMnR4`1EPPN zg8CuUEp%D{6)m+GQX7I%!}k$eeljE!p2P6}ZG=U+LvY*fb_k!=%oKP-RdFs9J-^kW z=DI$!ajd4SR~;|hN5As`XSB1WnH?;{MSJupgUquZQ1$&a zx?HMNX~kQ@N~0a9xOk7-eoDb`(E~M&Iy_?VaVXyP0*t*EW3MebqPa;SQ@p3S`HANc znb(!}0VXoP^c?aV((HKWcCh00SUcSuqYoUQ8SEi!>~10|W@MtHFbNaxXo2zKljtDKiuMtV9gG9t=T}E?_i@885-KZ z^)MF8sy3pmx&_qXp=k9X8k15!!7<7}nb+v^08axL+eCXP=aZ1^*TK_Yr?8S!^Xc`N z4|e-%vGU(jnDN(1h)c>q%e$wU&*1*x-Vw{?aof>%F~@=qW0aXucfHUJv=>p2-zNvl z4zGi;x7#o>g`P8SR)O+Glx(xdV@&f_VO+{Q$UJ%qq{{+f?XGW7E(9~hr%vA7aUX(p zoLF*g7d|E>f%1|QiQzDxyrn518B)k?EdGGV;AZk4+M@E?b(!WLnqBw5%SKK$5(*B_ zfaKE!y#H%rughB?V-ICZJU5}*tC9I$8Vr8){%pM;#Nzq~GxIA!D1RC$^O>Ft=EXJK zzN-wh?`NaSWNmTD?QE1@2;r8GOht`D2~Q2K0Xs(CfLO%MBVJ%~q&w%`x{F#fiEVhY zJ8KQ=0Yl}Kw~V@%rHF3Exmqc3qh2BeOdANjzU%-m9tFW?T)9=&cCLIY;ie8DkWuE0 z(t9oZ^C=D3M{w3BqX;{$XVCkxA8LA)FrOrbK7YTUypJi^th|6hpULApItoGtB*0qQ z@hO_r7&5;J)w@5T6S?ZP#RsCP?+kFJPD8zC7#t*@bhza+p0x5Z`R!%s*O7%jKPrgLnoO)3J19980Acng zA)zE5T}Is^k47Z7Qrt)7DqYcy`o6QW$e&fOEhqzAP-7{AOJUta4QqtB-`_ITPN|^R z^$E)N3}udQDw$F-gSnUg!lc_5VNt^ZsJ1==iINGZvHOIJXn&G#_#WaOy+_*sA2|G} z9(@dlLx6q;G1>b6;6PD!gul%4Z!5kYb`Rr2{H77gQ(5>WJzc-5^w#zKe22m374$ z3=1U=*2llmF|7{Trq^+!#nktG8>Gtnn!!Wri@`tYDbCh16r*nL0h}A{LrP|uKkzWN-k*2SvSx=Y#k zcrBrA)HiTxMVXQHEzEB|fp0dXp}&52*cGKEj%*46WAZlTYPGa zUw@j3&2v7Z`@tS8?hkUGnD3C;_a4TQjYBYU&te$)^B(#X)PcU|8w~R53s!TJRPO0L z*y8@Lu)ywjXoypD=d2lEA2J?S4DBMsiZvi#S0@Y1y#>ZkQYlZ=$jS!_m^?n3TX@Bj z?_nTQ?o64u-Qg*QSI?rm!F!o@|d}Q1v6hd7h7{IpvR_Muuh@O-{(-&rH73wV1%Ip%aeEWhGlCpmeE{b;#67KmUEChf?OEnPcI+tg2BN*n7Zn+s-gH# zXmEJSg510E$nQhI{DdC!-5L+}%Wq*2PT5xDjiizo*09^`8ObOi#;Sq zm9PW1>F#){s~Ehv89Rm=q5L(?hlbLex%fQS4C%|gu5UoiWos^t83T#K8?fv;fTC?A z^L?g+F5)hB|Gl=*F=r5bcKQg(TfU}^#!frLzu7EP27x6$!)R{G=b-t@q;$ZyYc}$C{Gpsax|Vm`d*$G2BF1c z!T($aGA#L=i@N#4h~07#((adH{2or8<|nMXv#!{~iP#mnT`+cF z0G5<{K*j@NTiv4jzjcdj=nd*}x36M}hnhi>Q^S@Vln9okPch4XDNjyS^Yr&K&^6Ej zzW7M>L(0O4uwpq7;jdL&D zH9=pLK68T@Lo=~KHXk*z!RVH(VAXO%!SVT3&@}A_FQZ1TTsnr?_nXO`9(EPVuQfu< z*I7_*dXL3)v%$?DbIHkhnaL;aRwax)jS{QvZ25NDe_j5Co!^W^^$*d+mD8AXQ5o1gn+cIV-N0y>id--+ zxm0I9)a%~{=kpclU(y3k{MU;1S57m-3%a69`f+mE7ZJz)Z;V_<&Un9En&E46_mV*7 zor%?~R2>-Dqrl`U54G##3f;6IZY8%`a|w3>`(; zbSI@}?dGpC#oFI_aQRkjI*^UAGmOQyJ0Dr77J~|zVZ6Wk1(>y43u|FczWky z7IZQWtrK(vuVrVMzfi>{=^uqb>q_9!{cd9NPg`*PYcg(&T!H>SMxkA5C`g@ySiWZr zepXZd(QyD*bi7gp&iW3e(@V%Jd4x@`IS5J3N(>-gsQO0*EH3XRRNfi|4r9HUbB7DD z$o8Yl>=WqzWEda0g1VyJS3)v5&Pt1af}2@8%XGMea$e1CI*PGs;U}yb^%^Wa-mpwt zgurTqvX!(CaGi?sISDeio~3N$xN{gUX+^)s27-zl5?b3*p~<-f6(iHK^Bt*6?0gau zt+s^QO!lC?6xK|he$nZ1O%*_4wB&>rGT+`sxkGv#K(PXwX8&jsl5<1U_it1H}yG7uC;_EE3c zoO*TL#LPdxfF3b)#xx(sO;x+GhUQWE5%(bBqn4Ny--$|{Ud**A0ULKu2JK%aV!@WK zC>iFa8k%quyFWA%ZoZ^W+l@B#YdVZEcXYu1##LU>T~Dy|{?2{sEnyLhMn#0%M3*{i zTJ{Bjd2gvu@M1T)NvdR5JP)Ez?+R>j`vudzlA*0_DXa|HNcnLQK%jYNqC098A5-Pj zl8frhWpG^olr4#%^ZE@>H2sZuYd%`AaxXb_OgDhiMuHEzhhTbL4{Xub7NnZT(0=_i zC?bnd&*wZ2xOyMt6RzRriFdGhKmnw!48dTF02aD<5#Ze^C~0?r{yjs`wc8tvx)F}r zR#8~~DTUj-`OK2<`|vgErJ{dGDYuwN&OCi$6CFK-mfw$Yo6EUOqujts291WXtKQ+m z{Z}d1N9Uba^nEzc5Nz~Eg5$aSP%AeQYuD0l&Fu~ZX4S!`OVnr6%7pxDryy(R3iRF( z32kF*&^+`ZBvU`peEo6Oywn)We}4rhdLKtGNf&X@-FPsQcY^AKRFqwa!+<0UESa>7 z7(kaH-R&fE$X(6`ICW6x{;1D>y z_wx@(U-1Grr`Mw}NMG=^mf+o`MuLVi+TJclaMGUrn0|61T>Gjmq~D2$z^H1hzNaOq zM{Htc1L;0cI7v0VwFH_+6oT|z4NFdHVkN(xbEPw#%X)WZwqhS}6$V54P#sWP>d34g zwL+InUC}1=Dr-|w4m^W+UcpjgTAtxPeM_Lk%7y1`{vTS9L$l@6urD0R zB1cQW^XxA?)ZbL>;FF=Mu!~rpd73^WExd7y3ic^J!Nt6jU|d6auBls8R#_Rm=se94 z?1w><1^Ei-8P(I77y_TCK#k`e%ujhjUB`K<;8QMKerh->eYIuz>=h`JC#k|ys2BUG zhL~tCd0B&&SQc{-l^Q8n?4=H5?ro;9A-7G>H0C}enOVI$$tJyOL(6sfka&AO=6#t0 zmNFeWd$fSBT|cz=W(^je!zgoOjPuTYL&cKss{Fts;JjiZtToXQoEAUDlQH^2lYJAW zotcMyeY6BOQ#H45E9YeiX;3lNMDVqXf)aNxkUV&s6*{F977p$%NcvsJsWc}T_R3f= z5~&0F+MZqakO=iU^_aF~2j)yXjBu|S{e!!S(%()oxmgNtXfTIr`45QG9ETO-(|9Od5&GZDEvi4WYU2TTnlo$UyfdF4{%U@KGhuXFa{gx6WiMJd&`7bppgZA3%KK zyZo)SL{Oh-lWEv{h;ld!Ba8ANTE{@h)uaAW#ZxG)+y_dDEvqx4yVrQ+GW`#bHRCf6 zUH={=HP68H=}GG7=!@aKyMp@iX_edB6m(Qg1)qZPI4kr4ycr}F*MBU4Qt!j4b^I91 zUmFaImD}O!J<6OPaKi8{$H9I7CYlo-hQ^HNFyQ$bP{c1&`E8U4^1_W&nmc&8v(D0+ z;P0$0G|PKnk2UnH(5Z&?@yQrX+5EPN#1eklCA+%Y1&lNE!=zzd#GGVeu;^x?`2<%! zuQ(q{7MuWeNDB*e$%J_pIZ*y|FqYZuf|6ZstSo9Fv>G0yH_156dj13Q_I>A>OV8rh zPkLg3*DV}X*+uY{JOO8il^DE1#?)~OSyYCppuX3NY2~+wolETbeJ#Xv9|!JD1?V*; z2z7r=!(gcZ!DBA7vB|~wmAsFVw-a~=Z-s(PJCyd)6I`o}#Jk_Mgnj?$iFOJT;oMqd zvHw!qt2}tE#Ko{9dZL9M^`XEHx}BYbr3|~c`ox#%YwIj0P{jAWTmWupcjc; z5m1(Gsl5fj~EvIzj|W6o}4HdTPN$fi8?WN&NAC?73gX<2dp+<;~m4= z(QDcPa0{TmY1j!Czy2($o2mCFr~7%dHxB)A1k%%l_FbZoi`Rk zMn^#M;L%(X{tv%tFBPjxk}yk8gOUmFd7S%st}6Hrr`^p&waeY?!-+32_~H&+@VXXz z#FW!#%9Len8VifOuA+M26SMo~sT9g`f7(=~bmD;V?vl=D(2dPUXqm9&fVt@lIS#ZA!9gFZiw z646or8dJY-M+c+-VUNMDsEe~3T}oS2RtH_l<-G-Lj=tdS({Dk;`BVJOs!-bRkA+F= z+o9=T9V#w#a=&B51tPY3+}6Fc2e<&z3vvuPU?S0fi-wY-bzGwLMrOQx3r-68g>^U;KaQg=)P+YAr9J43<+2ex z94zHOha;Na4TE=Nk4`H1!0 z<~z+MQ^(bwsOj^m3PIGJ!3Fv)Oj{cW-LfQHM2#o zlwtp!_;DY9hgqx7L&nr=U}HKC6|aBCJc$DgI76K?yK-sW5l#TX3Qn>SpCXlb(qe`Cl0W5v?V@!ns_{oW**ER%prgamy z7j&ZK$)|X08|`@HhNAh@zd-RhjNAO4ijpn=aQWKVOj+=NcF6<4N1Irs2acd~#W8H} zeF)B3M4{_Z+FfxG6h04S$>SoJWKxza?R_sW|F{9?cE-}rod&){S2***K$w+w6((s@ zcI`)hXxLNFJd1Oo(n(jS|KD3^Yi?rpX;F~1_9tIopNX9h<8XmZ8+@+LCx?R;uZ(_7 z4xJ|U=0hd)v3U;N4`_)CU5WFdHeq!O7J_;FOJ>#U9Qdm5Li)O^Jm%6-OnYby>OSXX zb#aS8GbouQ=l3K|VI!s!2xSShP@2sOQO8vF#bS+*&NtcYK4*ZC1cf znh7NyXECz$BO0xK1C#zvKyM3j0|n@@xL}%}=sw~B6N=D#Y8O%d=o;FWPKEprk3nyO zRCqY{5!Sm?USsYFC^=KirH^!lmQ&Qz^@{*!i%sD9Ef&KLrjUcpQ1F@i2ikb)2;P4k z!1xmn&~nfJadhqhG3M|4Z*)FQ(uO66FpCg2Df8Tq)e?4)Lz^rkk{tSE*bO-(9VC(* zMoN;BLXwh9&HH{*l1Oq0B_oA&kU|nke)sqHUw>B5%=>xY_jO&btAPc(+Ou^XHJBY> zh{pP+Ldo)bc(D~*m>MnZ+tyc`0!b6a5x)}m-kInghMSo+tLnAtDJk06N z{D?(k!a-uZ1vQ@y)fo>*!k>Q{^C`0ivCd1eX#4XOasMu1s0C>-cjj}&6Z4_3##pFN z?!?m06~O2B;&lV9i1C*vt#B`AHSJlb((kX9?2N=|_l{zDWj18IwI;n|B!hWtD0BA> zN>AK`nq$;eH*hv8+`g&ZDv7h)lnMi`L{ewaMpU_ksyk?}(OrC*Iy8EqV(9Lr+T@kDRx9KG zlRE8MgAEy_So&)|*D~u4Dt%kjwynelh|T9p=jwAUi^^Jm!gzUmXHZgi!FCdZ$A81Rs(Mu3E0ktzJwe^MJ6Q6(H=x|!Pu|A{=TgDj{s45^}frgVFFLOj*{4g^$$|j3RWv=tLfK zzD?PhUDTyKR)^m*y9ihQh{ejzkr?p54KON*bc>^R!Ah{eh>Hf$kRA=LX8Pzw{lQT` zkE7M9dw6)vA*!9xpy=p!^`6sOf^wfkD%q3Eq8C2~No>8;+vh0PeQ66MRJ;Y%anb+1 z^;z!fSL&3WJ6u#u81%nyz*mq)yyH_H>5$ZCwRSV7mA04VPy7U7vF^A9i6?gT45s{P z1IZV~{3N-S5Io^7SZHrzDbEj*hB%JNHmkY#1Z|KcW=cCpl*8>+y?M#?v7jsU2bDxe z9dvU9*X~Z|$%bf5Uzrb{tzW4B@CQ_Eeu;NA#6-(k09q9zG0QIxvOc~5^}C-acZo(V z20q$IJy1md!Oo8}=VfjY>Y8{paZptd`tl@9dl&&T$!jfsCWnlN ziL62Q9PuJ1gM;V@TRS<5xX1%J#{sKYY5aXosW_un1{$E-Cej$-9QXBi+Dp#b0XXpp zOj~DxDTTU1jPX?%Jgt`?%b~c!y>39JBe+%7LdUC9=+lpQ&O6;e8Iq<>u(*T%?pio) zSt`bt)^H`GOaupyDd>K;6uje0SW@U`EH=4?Dd%&cF82>SmM!L$11mUp;zy)UIF9`e z+u#o8XD~|k60%IYKtH=0n`df6dE-oQraq2 z#P1W0cvawab!p%_Oi9gPs)~Q4qV3CB#UI4Lu6)BS*mMw`^y|>`QZeAxqo6u3V@_{B zu~JhF8j=56;=hIqJE*~+QHN1wwoKjM?db`x#3*(+5e%vkQOtUc9jJC#GP@Vin5+0E6}5FrU4L!j`csFt+o)_D zyZ-{bpKdBZa5-wC=15SZ%8UC{8Ap{`LXn%{UB!)82y1=#x0ZwhswkiCC7Lfk9az(3wZ9nedZplk#scanlFX z|5*UaP(8S)XUJRm6I0(Ig0ot478QPK>BD{uR7>tky-g>g#_KsKSB0>k03%+@YG2-h zJ%`YCX)+VlU6J~H4(mE?@-pI;{GE z^3D)Y_)`u#=Pl|c-i4TwQQ*Ee0W2oz3De~nSk`qPR~<1CY(llrPC6Bzly_lQts%eV zTNXxc%7#5kQ|kWo1<4-b-q%cql(h@+&yhxg;=>>=<)9g}82Fs){Ea$oc0EIa|PljX0RuY z!Q}VS(7U}FWs8=nvlpL%f3H1*BK{p{_PcTpBk!TdGvdl^+JvKONQY?{!R?Fw0u#0z zMcLj(QrBnt7<`}>s)iG9bIf;eBkfbUs+qcE>Y3am%@>qPHbVnGdTSGxL@)x%4>S~DWXhYwyePA)Xo^i#M6Cj9>;RY{M(Y&F8_d^W$xcFPxJi8O(v`Itoi9l;b3#fK0 zIoHnXoK;^H=;uXX_tRu-IbQ(|@5ZnU$!w^~U5o7(8zJKLGgx=;GuDO=Kvnca^)$e-)fn|9hP3zc{YnR1?8!#Zm14K$}lomW&eDZzQqa z1AX$q#ckIX&Y7HnF@yiFJNXA%#Ye!}>EUQ$lg+9{#PJ_mkvDgBFb3T{h?#eEh4MdE zU_{IjE@S;Gkk6it#}JTQlZIkSK^J?{`xeR$B&#cy+CxfdIwzU3AaB8$ z6AsGXNbR+2@=EIQ)Cl9b6E!k zj6Pcqv+utLyUcgsf4iEKJfz=wQ3;GAA4b%DZD>6-4ijV_&_e$ecCIgG4)snTZdOvi zs4f;+?nlLNE2f(3%*CxN!rVpVdl>bBx?4`d*T6P7mO@^p3l~&^{IB=m-9dlD&}|b$+`h&hdZ-|KH9d>8 zbTqmD6fC-k9XYa8`b5!!oBlQta(m=3(v0+$?Kg2EW#ILHTn3-W+h|c5#>veOa4DG$ zF#F37oOWU_gp^Xpdas{oM1J&Lx`u+IQ7^$!kIwPZ9+r^Sgqqte>V?m)Vak1)7gJ}V zRfQwUDf^ytt~c)xVaROOjK$XX-$-+LisP0NyYbK+E`M=3tX*m-)b{U%Ph$O z409g&2_VjLMa2vk&f9Vf807=y7ngE2lXpOCTOTfIOe08Z(sA&Jy`UWXQ*C#p6zzX& z2fH7)(FWyEpW=n@W*7>x8TQOtdn5V;Oo8l8O{l0?q7L#(vlV^8 zCQ_SN*InusW6J7uiv)@J{VConGC9$%O10+32dG~vhCK>>KA|lUaA`L*EIR~AEgZ;Q z%Q(;7YVdR@hB|*K=niUtoJof<14Eg|u3)Sm^$P1PJ}`@(!z^OuTihW#1>UYtu~d1L z>#URF+>JVd;+`kAMs8tK_Z>xD;TdK$jzFcBoGZHU0jBxT&(B|Sn!1;ew)7x8nQ;cE z{Su50r_4Abp#eUO-3JZccd_78I0P#*IEDPFRB;JeiRdXt-F*Q*TEAk@ToET@W`g|r zpWLh>%HW)O!PQ(c5X#%C@ZfnJK4p{$mX4tQty2}~*ZK%U<=@aX-wi}mss_})L6W_h@D z$MsBHa54>=&rC+e`xoRx(7{sA?OedQTNrKqh&UCeFknwH6x+|jim&@Q$sK=b*tUsq zv(Q+OY|KLY8CfuN!zdUK{0VaMvaz#|2$XN$=5?;_3z~Jb8@QW_AFqD^oAr9a)ImL{ zolD+~wv8}-mA=sXM+ivVR$z>$70$ML3+at}A!AzvYpvRXDJPb3AJ&~h*K`+X`0pVM z7L&%*{GOPO2$JoQc@;D}cV`$v#fMes=Q|ABN86*z!V{>;Xi+=%{=tsRb%mlLePIpx zTtbJ|K+3>2w#D@lC|y>tKlhjjVXvG}q^eO{E&ar7&5@}xk+c4_0W6x=vn?lju%N$~ z*91j#%}OtD`s+RSt*Ltl2rn+51 zzbC7(eqT9x>VAdJXHT$h>v9ksn8ek5Y{QXP^U*IM8f|A^0@;g9wSCPIe4`Wzp)10m zeOodr`le8i`3iNR{3eQz6Duye0SXo$g0JdERCO+r2CdTK5`Q=4bH~0_cm1t`2!{@? zY|bZ?O#Q)mJuv1g^nP$Iv|BndAgFlKUZ-r7buWQZEc4ZQMrH zVT^=4DaS*h{`do?_2MXZ*eL>n^%i67yLxcBH4Gy!grnlcI!_-#G$PI z2WPd)oVPC_UeTjw*3DDbPKYiBZP3LM%~K52`$oFk3r;3=n=&XIDkZg<%YXdt&Pr%>%5gYe0SREXMo-40Hj&%xJo=Jj5w9i-U|4+SS z^bh#=^EK$c@eCF|eTH?(n`!4%D?R?5^cCkj)a5{a?jKr0#k!YV#moZ8KK?Ic4SR(4 zCdNXC>I^F5KZD)y%kbe4bu(OVgnowleD*lv<+T@r|JiHMSvVLgoHMwp&t&@VxP(zY zJ0Pina$A4yM*pvw#81u7YrX5lG`oMp>@StH*Y3r)Itg6*;yjGBeSRqdr#>3#-DS4N za@k>rgOFjI&57bF;UiBwPJ?NvVP?ccIS7%h&q>a2;zDn|gYe_gAog$rOXoany?7Y3 z{xibDi#^=GTQ9>X@=Rz)RYLnqBu?s67Pk60COkTWVi)Q(TU!8O14od#G#evZsk8lY zD2nk6=muLBEgQbT2E;D;?>> zwVcg|tc0J$A@2ddy<4Gu<4mmUcncF=oQC8%Es+1$V;p+n7YyF~1-7^+VaCg4FzsM6 zb14x+%KccBnXH4swhurRei1ZdT$%gMsn8us+@LepFr&BzHKxb0?$~NjJeR9N7wvlv6ua-o!t@FhSzcFH_A}&P9yH_^Wm0Ft-Q}GBwFKs*>88w*Ox-~*FAZs1 z2g$Qy=`_7QkkS7;+% ziSHJ)o^cZ@mY#$7zr$Gek~)ld_JxzpJ|UIV+kta)EMygYp}zZi*18fo**ao>pNhfP zURPl8Jw3t8;05M127%~`niI`7V*v`=s#6YkR6Lur!@V<&v!(E-&FF5%06QD zWFGU?Um?4}8nY+$re5x^V4+9;nN~Uv_ffCeXFU|x_yf#c53Q-Q{{QZAz@jR&zVi%K zktv)NF{`pxXJGa0``}ktj8>U{9+B0G3HcNCC z1rf8!1X_a+V~OfGT8Cz!w)=5R^LmfzLFDPV`>g;uDU&)@chZ_6cax8ji1r z93ro@BR*{X4#nqZLTR8YdS{caw6d7lvM;za>KuxtW3gsL6Zq}?1yoVt(r*8`5cWUn z4E~X>mLwhL8u(}E(2&D9-Be+Gwyxl{APuZlE8tvP38n?Mk%u5bTJ}pVKFN%Nm65~2 z|57Q&`{`hY&K>A}{1#-A#Z2qn8MR+j09Z~TM#0WiAbxfXWf^5tWCh2hyVlTq;h#TX zsxpK!93i-CU@yMicPz@@9cE$wO$KnQ#=qS~{0H9?XiNM4m_}kk7*Jo=iBRyrrw@G- zdJBnc5hJlycASlOz^G+#XQP+Jj#e6cXywpojZ94^}7kSRdD-L$JtzB)b~G*wZ+CbVN@jeZ4ZLxrF!6^or?uVy@klTZD={a6(&~vM4Jy5*!7zl z)EENd|B%iD@+S?xigVYQ@a=!zhMZv1LPj*9TgDv_CoROld!=-qpw(v%jCYo+hn)0SSs_z-QrGwKR@#;YO2WDYvM-UzC~ z1d#6>$vJ#J%%z;@jVY3iU}j>%YmR87361688g+=PX5_z3;@q|~>daVn5Hb!Wig0wccDDj!8dFA*0w35(ENBZS&f*tIeio#%T(Pf4A03X`#5~f~-3=4BP*(IsZyXs!eM)c&gXZW7-3}MALU5-H zjY=93RLniSMfa<{lt+Esmv-`&j1X>6>KwtzL+hD?-xx0C$##%0?}qlaCfbj^V(tZt zA@OD(%ooPbHRnTxk682jPf%pFLUc_Bo}+zqqiyH|HcxCU_-#E27H0XN4r@p4)90yA zdbHHy*P~3)xmT@Bm@F+-Phg7OJ$Z3ykI?1-^}F#N(EL$^hbyDda(FmYEv3xpZPr4uYt^Wv{;Q?+xOE`Q7A7 zVq$RVZ{|W!{1H62i0-vBR&iGL5uEp4#CblHbswq6C&hk;k!P#1KH@o-W~POToeLZb zb<}7-@F#hFzN^zVokWYIZtyN1gi8;9r5QwmIf-Y9tJ%aE&HN3DQfi5l8zXK1BOB%J z#Qz;%pmuV5PCLl$%t1AXO|6c?>3^ufN@f7kn|nZU_!7>U-9;9n^;;gABF@3;kke$%sCThRL+5uXE%;!y>>xh&1NI~bmlN%aE3&feFJaE^d zyzIs%j3mE#*`z{L_L)y}qmkf|8;b(;5j1`0Npn8cgK|$PvwvYGv`GPGh;z5=02iX2ecg)6{;JxN9*c-mW;@Sdm|2Y?BK@DJX!j!lD zNczRPt6XsH4v1RX2}Q)omi<14^>e)j>;8TP*smW*e(O*NEV~0Gf0^(fC|_OLXEW1C z1yEF;#<0xM=rtr09e3PeM$m}_e z_3HIFcGv~VZT^SDhwsBQSpl|=yNK$araX!9|zlZ~#)`EI4d3d*)^CO?c;G`>N!hoI(bhbAIsdEiX*E@~kRUx>N zc3nOjXx0lm2QAw#!TB5dyw#eQY~(4CkYv0Rl9F!|57dQrpI+oA4=0_D*dsIQm~AoR zEFz_>&oUFC7=AmRdg zy{$ssX>l}1j+aiGsKZ+=InOyfT?(b&|KpsU$mcBm3K7+xVQOI|qX&u%PFr z!oN8mp|W0tQUeu8deQu~_n#?q1d*x47Kzgp{AIsj z`LH%rF5k%Q*3%Kf%?q%bIz0N+iv^=g-#MJp3SBB=p|HIar_ZJSZhjgkDb`bqTGpxy zE~jE+AMz^8Qn)y~45$@Oq4R_=DE8b055s6?HnPL7{Z~MNTNoPLdW`{pJVuM?|G4;D z4j_{kbA_WXvy$UM7!U8+MEX7%o!3C}gxBCf`mAEL3#ZW4Q+Fv|gM9T&(Ac56@EMP_ z&W9nUnupSq!%UsqhVg&gWP@uWA^ue>i^`>r21dK^e*&N^wF(q1D_Lnv4XW1$L1MKP zWQP@j?EW$7^tT6L>fp0rbD<5xPh2H`pFGmDCSlT8x(AQXQHQzMqG!Y; zsJwC-tt(1EEPIYNyPHre^Q7Z&2Q$8N`s{H)f7*reOQcJKPoZq_H1ut}4KAN-LDRhw+EYCo>A`rcA#6rS$JziAB^5iY<%*HI3_KWUZ@LD)NEbeV=Zft}d@Yd6>_#{q$Ed8_n6c<#8NFc#uq!kc*T*bxo= z=&Y6fe8;J+82+1I49d9sl<6a;m~T7_PG8C#T&%!q;V!7*4@3Cb{UENrj*5%_s9o~H zAwQ%V{AXNYIqToTYbEWpKF8t18zqpT{J}H_d64CYql|X!(u;4vdix#JY#9Nn$|X>^ zsvVW;`)W(~F!DicQa`*)yRbEG*m}VZMI|L%L~sx7w&zHvKB~qX%Q_f7^CvnD$^qNj zOsH)!1J$VO(vl4bEn7>Va-KDH5gcW{U&TUN(Qwd8HB|SHP(Znv9Yj3826eQ9R2;ah z_T0A$mAk^Yj-7`w?tcu^(#oJDVGn3jYq%MkwD_9XYgoQ@0LF;Tz}D zMbC!r_1d6`(~&AWHi8^avEPe&2{u8y(1P^hP5t&@$W}vs-0!{lcu_JGMn53d)o1j+ zv;u;iC_kCF6*XGn#D$`~`S~k(qM5#|w5oyiKAi{_n-{Tum-<6?Nea3iyM{xp`lCx) zf9%+E37m&CK>Q0Cly3XU&7G^o$1iSSV#=ha)b+4x*V9;GeU$~s_h9^z>r4b&INiYS zsOpH9`UUIL9n}xq+~ZKwUCK23c0#``aS#vh+^^_VEYcJDzitj`hPERx}DS+XWj%(4|14djv=RUm1A<#RgC&k09j{$ zLiwbAU=;C#bB`+q(dCWIlCnfEYmEe-syM81PC`rL8ju}qW|GCHPfE4d)zIo+dhSO`XMmww>0WuGlBo(r;Bu(m}GwlOPRS0w#<2tHvKH2 z`9c7O9%+DefnQ-#ptewVZZ7D4Cf`R6%{-oeOWTw6p?&pbP%IzJ`Oe6~9-$c~<%{Xg z{+O$J{R(eB5(~EazheA_(IEP35?Ap=feBT?FmQ^oAX>N;G?iJLMbvzjoXkMO%b3J) zA0$1U0UduOVRO_ceDIxk_=7Hh#6-*u>(b#hj!x?Qs^0vl!6w4WMPgJYq_My~--!cv z0xK_*?rlT6_ryVvA#cP6|3es8LK(kCEz|^dOLI^DU=F=ReA>fYoV7gWI5v9Sf%T(K8zm6y4QzY@8ySn^4qy2BQbH=(m8 z6r5}BLqLQUzd~lpE7$!3UzgReO3L-?{!&49a>z4*#q z2XWU=G4C^#@($|`!SX8VF0=TAiRR{ls_-~x`=yMF7^PyZes>rgcfo$nsc36PUfE0f z)KmC7OnVtdyVYkXD`MbnaUGsWI$@U9bMQWCjRWk%V9^U>-p*(>CxWFUo>EBR=42C?qY-C!gSGbPuw|p86kHKVdy)$n#l6=R!~h z?g1Qd5Q^9K0_(|dps4Z?_&g>?*2#%D0X^Df=&kZ<%B~4Kn^L-H7b?qX~#Bvri-3)xzc|hpv zcCb>>^Y+u>;lnw|bT<=j2T?xMt&3&PBQI~28~CjUT(sLnsDID|x6A1~m^m7)sx@5c zjRUaqMPCRSAQBQ*7m_}_9OPg9Ko*jg=QjN~andJ&(dt9!H{1}y-@?Nmr^$jG2rJ$(3F1_?nn-{q*0m=PA(zp+M5SG`Ykun}t zYp!#Llsdf4-H{+$SHs!nPs5IO%2SLe!`6FCn4&$6^)KFv&C*Dmv%*AZ_<98_q6~=f z8#={jcqSzNj0LBrVibiD=QZ40o#`p&MOw}3JumfziAisgqEPx|`M`^?yL(&fn0sBObL9_EJ9VEc(rukND;rHmoDo zb>CDjXJ#+K_G=rc>keXfAp=SC5cpcO9y0R8lxMPJy5shNWot4Pri{bV(+feNm%-x3 z?Iji&^;OL3!7a~7E8VGv_@iph-!6#@yhi-;n&~V@`4lon_s00AiLCh+dG9K2LiWg| zI8JTK$7IZaE!(3YY3+1)OaA=ME6BWeUto&ctC(Alfl&H*J9qqeFWzB?9;ck%#pEmh z#vR6_xf9#Pg7kLp)+8oU+JbG_P%N&>MTd}BFmedxS|BCjwq;WczY37Ae52{yrv#Hg4EZhjx|mwycS zjMX8WEMTiN!%1LPTn&?WL}2!jX=qy+%DlZ!(hNaaj5CiplcgP?8Da_QS9<)8B5gjt zOba@0b`$SkUno6k$N3x`jZx1EAfod+bNIdontz&r+wR*q^>7e|pE2hB21a7w$r>Ec z`yer27UeypbgJiW`naV1iINL{E|%j zj-}R|@60?HaOpHAmrxIjSs~0Wh5+2)a)r_W@#efB-LSEORM;bqk9 zo1>1BG(+(F$vA89F*HTu^NilhS#QroEAuqg{x}4y2VVg9p2-k#tdMbDdP3LjYY^)H z41jn_-NSUiYSB|BJ8C90Se1dt{$?nBeUwxEwuCD;n}zPbR)J)Gs8qh-3!0kJuJqUe z9P2?lnbgZzw{SaZtv?_gyz?k_s|JJOlq)#wae@LfE6R60gQ2;4SlK)ujYj+qNb{iA zHbhm-A+>x*6O(UE=gLw{1jiOVp|$=DXOWnN{f_JN-bbHf;1MIiI=KeK^S42Z-(!?) z&tvsQ4sf3K2x*^(q1q)9WY*!}sdxgC)8j#M>=l=3nndG84D~sLvcv}=pq6}r;c@RU z``J5GtQ9epwhODt$^_5SBJ41|2>tY(KsJYG4%{$S8fwQRv#wCajQXV;UV!efRLFVw zfO6ZZ+#hmNp|$Ts`b?gHp5|IqT%QjS%MZc=;!9T?q&rG)7jV5xJz%*7;MH`2_SqRs zyf5dUS$LaFA4Gw4&5=Bhd1kMhA;I(;1{L1IlC1J|IvGiZ2>@neMh@bF!Vi`!} zf#}*V2uqj!f->J)X=zdbHq||*UbTI2WO}sWh9hJ+F9x15im8bi1b3rn;kObvp00X$i8W4wcr~} zdS}K%h&E)nJmn-=?dr&0IBHx_l(0I zE5QHZDNg)rFBrZd8axIc2dl+3OtUc-QsgHD=34 zgW|Wzpi5_ig~bM#Fg6%HJU?N#*J6;@)k975Va(r_0m`2HSiUw45>BT;z{@%)v|Wxh zOP)gO?5i-nI~D!)(lOlQ7z8-ogXHll+%cvImdvI*d|Uy4JwuohhX(nmhtikyJxQ@ zEEpdROB&Zn^4hxM1hgBd9Xn@2E5Y6s^2D#cn6{d}z;QIcF=e z^Xnc|e%uezw&&nFtOpkS5eF*bJ$5e@31K}Um~kx)d}eQgo;J!t&DwxIS{!JMe?ukd zF#(2@wdnH|Y&V?7b{Vn1=N-yZciaM(W@nVYZDFwTBYM!CSgUuky5UR^YHDu{dF+OmALZFwcy$~60{a4pk_|9v^}*FUW_It`j!}O>dpOV_K&t8rha&>-@};f z{Ay|GcLfg5A>Xpt6g4UFQt#(MprGweeo!ZRrt*;M_@0ZHAU6PFqMmQbJz0EAU##$0HkB5TeEc%p$)Trvt@1k73v* zUpyqD-j>Z9QEf-PIZi5QE}Vs$dlNBg!xFGmHe)SmuR(_K>{Ofy??Ag9qv%IyG37t% z_!-akmHxztj0x1cI1+XoEk%o<*DQKqIl798154a+&0xPNekU|oKmRiZ|C7Ul*)o8J z*HCOW1nWEM*}s(0m|;ir)AL3y;?*Gt@=s)vd;8U{WIl=iUWeH?7NYyj3LNPY1Ia(X z!u2z>+YFc^mFZ=21!GTf+7S;)<2i(39y2iI?OHD7*k%p}TEh#4Ij=E_MAd>{sV82J z3y;(8aHi$jV z2F>Ju@x+{b%VibkL#x+ATsqEJ2nd`2B}UPZ>uDe?s}=KepEu)K?D}dZ-g=5OIR;NZ_Tu~PkrHp-lgTzMP)mHf<{2a_zN6Lu+BsS2AZht8v!Q;|Es&d0PxzC$tkx|Vw9cJi zTE{Nr`D|{3c3m^R-^DnrzIGkz4tSwl;vUX%drsC>l)=5E12v1M_kyG;V8}-16a6$Sm62Uoga1h`dfkE{KFQy z27W?~UN3cE`fY6XTm-m63DX|LV(yA<%;C~~T>L{UxY(PaxlJj^MpVP_l49sK?}JuF zdCcDU2rAM|g^nG0m?gRo7SpCfd!OZaMEw)|l2c$h?RLlZ{fynsO<<#+3RMvzLEqyT zau@Z4k!^P%&1W4h-SrCIw)Yl_97IC<)f>>0TZEloq9O8VB4((kL94zy*XTyQQ0XRu zVtPwn)UGe!o_ZUyP8Y#+(zohFW5J^#9;(J^@fi(oIRD$@u>1Wl_^{tQlsz8L#SRO^ zw6~LS?RQ;X+1SoG?>>QvzB{xRU17 z6r*y^qI@1#|G*D}zsb2sJ6%4Ynz}oMy_`}y3EiU&_|^%RQS2THVL9K>=EhpAEQ>|| zdnFL5ItBHHOM*xw4N?{@%hI&%S{{v%GMuLYJ4&|@Os7izzM z&VX|u^*&zO1)ZlyqkNbn^Dh>Ge)}y{)|IO*wp4Sn6}MTaXDdYbjfI4TotVG&1GIXW z3m)c&L6(`tEN_u+S@kFUn4rrKk1^#fUmFOyx{Y}$UyO0#)?93T6v0YIGj8!@BSH5v zF;!3N^RDCdNoO4Z=B9Tb-t0X$;jeJ&G1U_)^6uby`i{%ymPr-febqi;kD&kic93~? zN)J;;%Y!tMs-+)6H(dnnUOO=I?kCK4c!PZ(QUB78U8pFcec#nLQ#f zpM^Z9J`X^YDcKkU*}A7tUO11lG|z*Ge#D^tBE^*S z6I^X&U!1l<8*<-zvKQBgb&z@<6gz{s_;4>Q8~+lknyG(dUM@2l5({wQFj}pv00rLy z-eu#U?m{6nA0)5(*IxY4#|EIuZc%%E_>77L*VvBFZ?U0iFF1YcV3OAr(7mP>6ONT* z=lo)5w|kAPqZ_eo-!(9Kn2lDY3Dhq=2IMqXt^#jM;+CE` z!)&X=F@Ut1_D|g?n@Ik@{+n^zhDOwV+=KG$RQBI9>gCb50N>K6CO=dP_N!h(hb_&B>t92wdL)?Vs=+C$ znfb4O4w*lXp|bpxRJQXu*BUsP+u>RaHI~sB|JygnbPXq6Z5dRUUS^&PNQ;+-VzKrw zXmz@q^INM%+1K7u>krP5aPlYI{-iDV8hr=VAAMM5k2{D8Z%GS(+@-w2Ge{fq7zVfY z5<2fWLCL!)oHgkFyYtCkcMLry{824kV$2N7nhp9qCh+Aw>rC3a?>Mw{zhsHyyhVUaXjcU7>3 z=5Mfl>@<)~?&3o0PJ-BPJ7z7Zhp`uafQM}%?T9C{C9mJ(nR%q6KNO^KSxq?g8S#~e zRD)>NTo%0H1GDnI#U7mAkM_r!u%tK*&TlprUgv4?QMVePzGo_|)f_-;aRqdQyac)C z0qY)Q0S%!B!kHSe@Nj!Ncqg`F_QOG#f8jV7^<_|%Xut=QZO5g-7a%3f0&FvZxTHnU zoOue<#sxsq;S!XMJ;j=rQ6I%dAGQ8(mBds32b;UvF+3(7wuSpqUSk=`$Iw3e;~G#N z+R7qg$%p#)wY=fE=kWFAmsnJxD~J9l7@|)MZMcHTeUC?XZD-b1@GCoKf4# zj8Bh#0nug8AUCdy8+$bg!7LDR8=BO1{`5SC&IdWo%%$e{n0UYlFiLWOz_o{cpXS)Z}hw1891yZ|e^^!Vx(0v6Z~hn3gPg3;}3oXec&nCTyiT7QMBoodJ{vue4t z!8HyPXH7?z`*}KLfOk6=6(1uTR;VR{exws4Q=6C z|2E*H&-a0>P=Q0E_fWn95$n&R?)mHJpZg1zuBCVTtp}(%?x&tgxd@9t?ZM*h0T$6P z89Z{e`J~ha^a}WZEkjcAWjFPZ{H_iBuLit?jZw#r+X&*7V5PxG_-ZtxtQBv@|?T`q>(DeDVgez zM0VGm7z%yOm}GCUG`Q$K_Pf0VKVI&E>Th(`{x=Jz-l)K?M@?{*GUX=&h)dj3&OKcH z8VeiU==o^l(AG$3wV1;e>?E&xrw3O-^HZzc4yG7T!TA)w$BLIm=s0N$7i`f;-H1~3 z&`E&!>k4?VdLPQ#d;t0z@?Nul(oUQHH}z9k&Z$@E)Bg(E+kAj0`c>d_E*2zv|3>+> zO_B?vL7yq|rFchlS4u(yAR1@-2k0)j-kfAhAS+1hSTYJb~}y)+f|Q{bVLm9TEqR{^O$ISA3PnW z;@EeM@MKOB>hf)HH_cG!`aKlA{xB0PHhO^XcQOAYEgU5y=CBbn^aSOxacFfr8H>z< zV5NzHP&TIwK+r+!1JC9MuGE@rsf}+7i`ud0&X}=%Y zgApkxJ`;mm$VcbCkMfMvr=-~Y5C?2e#P(y`$?IN4JdIfJ)SV695@X(@Bm(@JiqPY% z4lf!u8)HJxQs-koHthx7nX+6^Y7>fLb2ltH-HcmJ^##it=}^&s11RjCOoSKE9460Y)xu2-9r_5Y?-qk(d<;wTQi7AqJMz*uF&E7zQ0kpw!Iob+ zqtIhq^+59SI1_vAuqmf(e86ghjfLjhDwNMCKwt9X8#O+KmN@Fz;U&}!699!bAAxHD z`7b-8VBOs&Oewww-cjlaHGHU%3J9J!t72irEuf!RvP= z21w%}eF*8~l~0&t&MgdNJ<>p{pN zL_#FC5O$c2b_$`8NFpOSBqbw~L{iPYo|NP;l9H5+NODMWNJ^4-y`OyWh2P9P&wa1; z|6dn|E~vuh`|03o*azN?G7_rile2RN{nit&u#nr@pn4s}6;`#7Z;=Xz#veoN#m~`i z>R`Cj>m07FxJKEV67;M$6ryrQfV{JtbS>pw{jwNpcn;zzl!x=)yB2&7Q|^B}F;o^% z_RW3`ygU{Qv+q;(sd+tSsc9Y`=*$$)I>2_SfuI>P7qWKhhzolEraawu9zCN29IlVS zjCVR>+im>f%j%hq2Qh zP)GY7NS?@f#9?B-tb4#mbtccxJa_0ga2Op|Pourm=DVtdjWsnCS8{Dp zl6{MH*zJSdH*p{_(Ic0Dspy>G2N}&HpwA;mjQVpM#O^tQcM2Y1eOOP3>!&IDc6r0< zruBu)en}v4o`4B6!ZESSBs^hV3cLEJVnwqC#I))PK95$Sde^^v>Y>jV^*R_wbS^-R zzjffxzjeh^3NtaVriA70O9q9x4z?6)ivcAWus|7ve#(4|wttCb-xB$MQksz|^bvLq z2H!gy(Bt3j#2a9qNR%^_$m*v zyozM33;&6(i8PDX_k*<$4Mcg>GUaoHz9`vq24p+mz@)e2=bAEtw=68lxjyxo$5Pp^I-bJme9@&C3Rf>e zvo&U7%*Kx(>0YR8`D_K6n>)DobteoPz8Iq}39$WA8a5So*fTjyjIDasD?|#aYu0F+f**%C|+!c+!R-<>o0_OUtXZD63 z-=S{NPv&i6i4GIU^{`_a$llnpghAcF=$@D_Q6^FKpMm1&KAO&?MV}ajyzl?3qWHRUaaKYySY` zW)3`me-jK@S^+jabHJtBN-*u)7p{an24}0=_+h4*u>LUR!F*iUQp!4JKl`SH-y%JC zd_XfLS2{7e75w|ofaPOf;*g1TV7q{L35qxj^4t!1^9cAqX+=Majj&3Vjdn~4bx*X= zcwP=TtKVSRrb0eIaRu$)U87x_mf-a2U$(sKIS4zaCq(L-ij$-|V(ic?v~78a)!lYO z?THg$Ik77Q_Rm1sr{E{iFs{NGH z&mNCu1^O7O`pu1uwNUPt&dV+KLUgba!WO^@G5>Yb3X; z`2xnHPY{1VOL#+$lfWt0@x-5oLg*3?ZZxSA)Yf0e+Kn$DbE+Js@Bh~X$v4M5%Y zyXdx$G8+@;<9SvMxnrKwT}e~)i?{#*#3z&8id4#uoW;HWXo<2}Z*nYT&v47J-H;%m zo&;CHl431!%UeSM6DiYrSk2w)_CxV=2bf7a%9%&C1*gy6Ec>F2=5^f=9#&xepcZhQ zQ)2g=$x#PS}VP1(0aIB!E5*!}ebt9|svfM?ODyY39Ee*YQVJ{gITp44k}TFBPV z(i65-=n4kJfsNR@8mrG=C8scT|Mu2EwP`IevRv!+kFP7TsIbjRT^mjH*p1KMnGfPBmVNTp-_M4 zFVM&&_QG$(6K>y7-ZLH7I%$X!`)aAHYL>Df^d6RVru)#?D<~&+uai>)W~}^yjk7Jl z@?#Xw>~p~{{-J&Qc~tC0F1Qq(Ed&vIu;D&A-gqLgYH%0nARe_Gg1HUZ3tc} z#hRuEu-#LQFYNUMH^+EX{V*q}RSHP%3A96dA;qbhSTW%?oe>72a|xZFr7w6w&o1D) zcM~RE=>VhW@x<>yu&k|yq{3UMo*TqeS68#DBiX1qe-<~ItOdb)ebL?Z13FGh!P3=5 z@O!q7s2TDP*o}UNHA_@j9{3!>Hg*Qj19A+zQ3eHz!@==cIhdyYO}X475PE(w>rXx7 zx?v_z^B3(+>#R}zj}BA(v4OipB|}G62$lsHiXOwCL6pWiy31XGm}&Z=>qDB^ufET` zw~;&5Z3^GJzY%R;f5HvJZ$t0=X#6s(6e6{VyZH4GlLjCL|6NC(3}VS0x&$q0d#S^C z4wq+rLH)A3OuiJpz-xvtG+YW=~q_mvk69(|^67BT0>3y{I z5<03aF~7qDD!);;$Eg)h1!;>B!*-#AIudCu%Pd9{K}}B=9GWgd){J=|xpaxw9ySr( zxa5TeV(TrzbU+Ds z&NRZ}9|_pFvp@6JAP%zgKwjkW45MmhL7~?ekR`nbh2j-tjaUS2luHZKy^g^~W}-*f zdg?7#qvFpAO!D;%#}oQczlwI-^P_pjI!&SexH zfLv(}S%c*$IU0_GZ&-*D;v&sIpe4S%qJZY>lx6)@3wknb(Q$fVVv zznX&`(@ui&cM(+4_eRs5ya@l^rA}}27|X8j@cf%bblpXrz!BZyeD7-9rEI{gNl&=y z(n#)TItMh<%OUjI93H6Z4E0=&C-teLp?M73M|8u{S2YChmut}Xu`{G!qjPw*E-`sd zVYB=gywxQpmnHRnQ{M1iwNKG{st4S6)I;!2Hsk6E+msQyuen6?`j{=r#ncVB$yFbQao5x}VA~}MB|GQfw?GRq z;^b^-R6pg7PG^~~!AY+F{1j+^K7fiFVLbQFS@ZzF8??4p$-zX`Yh+ndh_dC%tpyzIR5IsuwSIiU-HRs(A;Y zzGD}7ZXJY)?tYLp%vIX*h4}T^FL|I{J{X<0p&ao_s9&=lO^;D8BXJ|hsuOsGc@2)= zX9~X07ed*NugtY~hxBTYiQw44+1P}qP<>xlv_E{4_qzTSZ47&(Z?80-c%GQU>BgdW z<^Wz%ieN)Np-^x3fsrTz~{IF=+K^v#{FC1#c2&8(7OwyC8&5r!g=tm z@W*qUa(iYKJbzpzSR|<_b3Bag?ec$T>l$pDZy=~T&*r)Y*TDPmKJI!g9GZ4(ppi}l zQ+$f%2CF+jJ%4JBz58>Fs@x0}-pLSrvj?iK?c`0*iOUcB@XpkS80eG0+lg!G5>C#A zei^*DM<6;4U&iJCQs-bqDY{&AgC$y3pqjst&uA$}Sub7s>zkiRm%O$xB4W+D1W2LC}ZIvtgYG7^i&>Yz((7f>JD!jcOglKYku`{M@$&E5)m|Am2D^g+CPnHVm? zn&9N|FPqihMA$y21YPUNEl$0NI?)>7pF@~DB^)vm$!ipKlNGdl$3lxS zhu>GWnS_7~jDgtE#BDG4<8RI6m^Y~>PWfF5vg}XXm>3AIeauAXZI`&D@f!D(nWUp9bu_le zwu|(BvA+ZD*Wc6qZ!ph)^f!9W&P0WdD;8IVp=CcE=81h_%X-RC9IA!ZSZ^#2PDEA4 z6WHFl4imRk5L-PGwuc@8*m zpa`vDcIz_f^D_}LJU-)D19QQCMKY`2aTb^SttHAs#_;oDKVZ4pW#F4lgr>gzvEw+s zUtR4WS=1LA1Px)!(3cQz9*J$6$obIALflaB9Lt@}g~lvj$fJ3$ymb#GrB-9Zb#t*| zoGa|UD=2_~d3&LU-6QmPRLtYm@!+;_ z2xxz!`RuwP*p{v*25+c@h`*j-jmd5Bz3?AY$NfOpt21+&*WQO_ayqBY)_}AKcglr{ zEGu6_sjmJ+d(tS7DP?^6Ienq(br^VC%bC4Z7&F@PiIpslM(?{}Y~rH*7?m^xO|=b( zhj^Mv9vkqym}HQzoGrDPUysX2R-*iN2g9D%aok=FvF(PQP&Ri4IvyX)9goqu&+$9H zJ-r92mv*2<#4kV~R^FO^kbh|_d6f_&QaVsJeG;#C`2`)hr$KW3MYi4eVATBFs?74C zd-F7alnH&oh}fE~12$memhJ?;p>9{fYA#njWy^b1gR1d8)3GrU>w4GFj(9%|P0$gW zjSHy%9YHJ#E3}%d8P&*b`T z$_#T4SdvWNnN826jmfs$c+hXy-&ao<^*x8ObjUp(Yl7djLTH@TmCO6OamUe4FrQ`; zbviYC2GbQT&C(Z&*P02kE}1ZEo|#ZlH4=PV+n8(7W2P8k2)^edaH9M>x_av>Gd2Z- z>ym-c=h!1~9HI}JMbDK{s&MF2Fcj_5|ALVIT4*aRKsUpaC^y-{eEWp+#%;ZLZO=wD z4@$cn)9q+r3C8{jas9pKb8bSQj=>R!8f?B9Br^tAyTzv#2&=1G_^>j+rR z&ESD2ukk9suMk36du{udAb&DSsqWN;1<1zW3|kg+$A zy4oY5ZMMFcQFrbC-WMLRyA{eFoaf;25>yj!gF{jaSd#lTFy#^3@aZ?Wd^wAf7j8<+ zz&aM0ZXzz7YbwV6!^s`A80V$Rv5!L;hCUy|+ZP2v#J!J9l3_2+jeC!l^H0zoT1#B| zD+I2TQPyU{YaF}%0~r7D7)@8|z_Hi9p!PqBySBYVx7#P^JfY!7d( zqU`M#W2R2{lSgQ#vEU0U(XGo;2!8nx=jk6rS>h9xvDy`q&gWuVzzg&qp~C}xDN}ql z#L08?6qMWR5clLRHq4Gk`BGzPUHuBo`||)hHi33H4b;2V@n9)y60qCwAat;O3)1`# zaHE@%m=>1FRK{PJ#K=O>4Q&L;-;>QIQnwkAOH(X_i z5I1ZpOz3AM*o|6(Zo(A2Q*#<>I~uS!I~Od2dvfF08f;kGfYuoY!7cqXC`OEB1*Wve zO1Y$TN_d5?m7gG@tcdG-UBSj*-rO>J8F$;Z6NfW%p``u{5;oM|#?;;xf^Q3lIE(dAzf}r#1_qEH{)UAHKIcxL zEgDSF5v+r5q1&(TIG#Sg=*{#U!^P+;oaFLhw97Bu3L1LdnXrN!LX=}`394XW4xLfs ztsz%l_>Kb%6lm7TP;4;_Vczb;*vt5?;9@rxbn9Ng)LJufr6=t-t6M>lW`oP0zr~b3 zpFz6+6vV9F32TD%g{*<`ydUk3HO_UGTI-I%>V@IByeDOu0zR_rkiSuvT$2S}8saHm zx+`V*@tA)Of^7FFb|;T=`@1iq!gN0KmY%{B&oza4UEbggb6r9G?`+scc?O&CI#7?F z0>+)cgK8XoChJ;RoJBE9Eqx3jl~{KrxIHHvTO?079(;p-W2b{d zzOlG>q>h;N?=-G*zRc7M%$2=uwV_t>0KGRtT7A$IvJPvAHXEuiqWV7H(5Vc{+z@5O zyOfQy46x?BCvMqx6IyOxAh(DG9xK-n+qV~?=1+grEF8}Q7GB0KeaN3XFam2Wbj65n zS)dx03eSe&wk>waW{18}lCPA1{E0Bjr%Dema1x z6=bFQ;;N^e#OPrk>7IB8jT~|yWllb3PP>Gy_y*&cCfWoHLASFHp>|>|I(%yd-@8gC z=|Nt`K~11U8vHz0{m~x|P9bLM>D@g3 zek{7M1dz3pn|}5s(DiV^)clL+?Qnp5%q{0pqvv2``aNiURE?Fb>(QwA3Aa5#jDcZE z5ZW?}D;(y5ci+qC+ITLKF5B_koueJ?8^UQgTHMqJvrAI63&$V;JsSe$|`JOEWQIc_Z&XQ%7*5E}q@2otObj$alCHTCOU=a-S*rtnx7a_Zes`x{C#= zXCXkLBl`Z1VhURyR9~N$<3GSeSU&v%$Vc`YV|nH^IQ>)4>+dOW!xiEVez;HBCner_ zlnr(F9&w|S=F}-{MoniQ9@|+Dyi+82Z24;H_5Y-N>9m|)%c$2?{Tr*p4w7S%x*Io3 zdDhB-Kxi+)q%&yKQP<=HE(nM4th^a#3t1{=#_aBcHcD= z;|eHmpYQ>1_BIi0cANvp@~hn7p9U=4a*DXQ2f@zI3U*zfK4j?tWk5|0CY+k|{Po1%OY}vL7-B%Ce!wi>Q!qoBhaJUbVDmAN zw-nyxd$LW0;EKOcJ^L~iMn8u})eTnm&svc5YnJ|ePQJDLowR#BM0^qfUD9;LwN_=g z>I28&{`=vy#6)=G-brkEPRxPZeynVg7igY$2cu_unTy?N8ikhzrPl2)4yQhsyK2J?0|sU1oZHK!f!J(!8XiH2)vZc ziO-DTMq#wuxG(LqZvuKo#)0hKNYI2iEbaMIVv=lQoZL&!=S@VI_}N4H<6oR*Yw*SnzV{snLk`UF8kD4VgknsPRaSr+{+ zY=axI3#aVwfJBJ?@)32Pxj;tBUh)nZLE?lw@U>}VAwI`&`N#LfH}Jsm{Z4^mVrRUx z&P}w% zX`a!5bWc)`-2h&_pMWjR>>GB_z0tZ6ET@EW`Im>RIQ0TF9Uq4(j~ZB;NnMNWt}wgv zF8mKV2}VbDvRE8O43iBQU08+c{xPU{p25AtKk++F+CmC3ii?M66R+irG_cS>Y;-ci z(7A*a(hnVl;&na2n0PB5VS(to zK}*n_=!~xSj)KCYHxJy=1+Devpk5iM%oy<(WoWL$c8y4w-ew}SZ(D$k4|lMt;nzXE zCYLWQj01;vOHpN0fsW)nn?jw0%>T%v3CJAm};~S zd^+qP!p;H*8nr@TsE%0s?j?1DZ?G%r#TZ_F3`>h!u*WGqA!Q_k(&e8(a$+s-HCTaT z>D}J?E(mI0@26+xQ;3mdqIZu@&@A3XRZS15Jywgl@ypO={|-pqtRpNx*#L1f%|*}A zXCQk0RgfpPNj=8Rh3aVHp}&w5<5?T3rW6CJv@x|yI>@_tf~9e9P~^X2ff|-jY)Wp{ z@mD~1)n=5PKghu8EPCv3MpxZ5s)u*bG^Z8a3~cefL`MwhAiv^w zPf${J)1&?tZ*=v=Q3hYJG5ir4B_3vuITJB+P&Vf8yvg!A+4$jH-CG59JN9v>q{TdA6Ls@X8qsX~J#)M=5X(G^Ky&@loW+(}LX<}$Nd9bB zh6$U%)Wi|v&W=KPn81t!L!e>BaV%ezPWhUp#Egr=Jr=Zwxv)D&m`S|HyWb$LjP`@) zL&4iQm7DGf2UpkbN`P422#m_%34Uf!VLTSf68h0jISh>_Y72f_sn@9$D0SVI zC|&+B5#zFdU~}ny@L&BK{GOB$L-sBvQFg4^^9Y9QHe$Ma%xU+lC7zhv2vo2aCDj+% zj9bsKBDyQ)|EOW%G3Vh>n!fP=>~(#Y-B{;h4i3htpzeQ4X{65J4a5M2#ZfS}{5V>F z>4R_LjD^@02pX;tY-N8_A<_0Xx-997lg}87jrrv5)*=^ibp4Z zcygMCu*I~`&sGLUo#L3#2 zy!j*+cl?J@-;&Au(1K0p#1{7U~!mGqda2y5x=dXa~8WmRfR^shK4WTga8(RH!5&W?L3x1o^w*E5v=vl1#zn_+F}1@t@r4uWoM#kYz) za@MwGuWC+%~=g&N!rbERuXKWPeP%b=`);1+Uk!BjI?a0@)KLs>A z_cQOqN!V#JO4BfF9aPKrkQs1YCa}FlcJu75vI8N-(QTZho)K8{K zt<1A2(`e7tri*io?#V%3GaOrv7D3FeGLVSB7@fDE!7&2f6c`KrjI;%Zv9!yPb!Bf? zQrCRX9SF!Phj(W>usv=P)@jAEl!SE3J~yCh+750!?j9x|xCw#JFSE3iWY%s^d!@c6 zn4sSczPMLn;pT7*+-L{+HkZ)lc01HkE-@~N;^f!$g{Z$nu=VISaO~C-;s>3AyT4!2 zY_AA{`;9^OlfUTsB$t*SB+ud7$ut8^XSEyZh*=(r_G`{zg=#!np7{^jTg?R@n!nDm z(Gq;e(7eqt6N@gI2$GsC=@MPaB(xl1(Lb+3NQyD{3L;OmG?m>f)fRCLQ0ILF1T1I; zWA|O)I>-vzZj^vsm?5|>oP&+FyKz%koh`&v#Yvg5!d0u9sYr__4*6vW{(&&BIo3i-P6)$1;RZY=iTs_Em zqLZxKW>}tj2E((X!EtyBD-g&dc6|WYeRT%sNzPz1eIm+USfI<&9kA7?v)I0iJa)(3 z(8lEyn4a^3I{8Md?_UW^;Q};{c!27&(^--2X;A9cKwa7qjC18I?ejGrVjaPyzfVyA zkM;<9K4|pc7ru5d?T*98W7E|iAP<^{S*LC){j5HMTI#|3KQ_hp?R(KsI6& zEB22?$609*^h}N3x$~H0-fd~>lwF`XmMibH6oCA0no>6Q0IJnipxaMhm^bMu1SP!T zwVGG4c|#LOKD|`RkH6x&XKe9~DeYpuhobx0tHh9_OhsB(3>&C`TpMyf-zvf-GkXmk?VvhAQnB@d}=3G4icqvwL{!dKYm@)#&SoR8sU~q*!VvclV6naeGH08L*wc>OG$G2gs|%oNLJHLJji5f>kE=Vm5Mw2c zyYBgnWk)h0C1F3fwV7ec`~XPHrTqOVFRYIzkNvGK5PM$>6r*PG*ceSwM!g#O{!pcH z{Y7XG*And4dSj-`SIUb#RVq^SnCf{VWZMLS|7=ZAtjvP2#M5B^b0KP`x}?63GMVF@gZs&|_6Te`?PD{iTL_Uf^H>!?zM2av@bjp^*>oVjzSLR$mpVX87t$T#yi%za)zIym zg)qdv1N_dsfv891Ef6=d=K1H*^4VJ!<`{@Q29?9qzHd>c+0G>MsgrJAfo`Rpz&~DJ z{4Y^M{9;1yzvJ#W^B-f;a_9o^=y#vbpnE~&OoWyO>KGC?NM$vNO`-ivgTh#lj~v0J zOMhYL77u2<>jLDvt|UilHk;VG3(6fdh48Qlkd#Wss2}Z-dTq#sbekl~iB3f4adRN} z&}tkN`yC4o9>x1b=4_D7&%-f6;x< zoJC@$UNx@jL~PTy)R}x|04_}p7?OK3NKiz8bLM(J#8cOylicS|N06 zJ5xvB;ZZ^7vFWWDDBAp(TmRv({>xr;Tib@3J==KP@Cfc!bOD_o7J_r7JBDl^pY@}7 zr{_*)LORV{jl9x%R_jshu?!JRSHjpSdC+8Lj!nkjq21&hJaW+#e%6wYxLnu zHlS%ZIj(*0L*&b1l-&A;uD1fQs{IZaZ61OD@5MR}8OWUs{W12<6U;K)&8uJhL|--m zl_$fY&G;5HjHCR%rLI`@F&T>-$npDP1-Py;;0|+=u>4{y+$EP=-54*>=pDmcmY4`r zT9Q!dRDq?$XRMD}g2lhyqoY$g-f20G$s;<8l3+vbYn#YT@5rF8v4G1e%cKq&=b^%} zH-7JFD!O_&G9ypwh#6Z5cgKE!;-?X?>(FD!{YIVLzKu*aJ59RQR7do=IuwTtcukoA zEp+`sEQx~};`TDiHGbOz|MsK##GL!kWE2dY=IRN(&PQQE(42=;0?3yC zRIV^F6@KZMQZ`IWDDC_L^Dafu%ykZvA2b&&Mpw}HxBz7THYj~V=$^3W81HDj0+uru z^C|2BWL+E~tt&XgUpDK5Ps&9Mb^pPe4h@3p1Vf?ja0W_RFG%IXXG&!rSEWJg+d(pM z2^OyTO1;EOQfKE6*mkZN?|j@vF4%sMsm_Dmd2!Io@B#SKSwr>xBa2_f;fi$u2CkX~ zZj37VOj) z9Kwhv^Dd9}NL}&F-`avtVgN+UOyK_pcM@{n)7gIY-)!PE^3vNzLs^l?Lp2|<#=Ydf za2-K=jF;f$CC99Vk9kik;_zPhDQ&Ty0%}gV_#$!|_>r@-MFX`b?j$!*lvI0MEG#M7 ziBle=LlgBIGn1-X1(f1MRU_Of4RF|7~f*mf36Eth;_75&Je zEMZh3ehPm!M_f0r@0dFp}X8j(1p#8^!9 zS_!s)Ql>e3FvhOb!LSL%V3kk;MepmN{{9h+JtfD=o3$uU)I`qS_#`zB>;l5y9@qr`?U=Y^4PQF1C9)P=WLNYWy}?Rr zUx?+N*s>xAvW|2{ZxzL2_b6c0Df;dFMa-dXmr+I;p0Sl}VB35H*50^D_t53oFun}K zrXB!!iw>$w^gzFw&hdQ>SmN|+5IbNTs#RCW<=75o@ncc-MH>@i{D`~ska9$G*m4{4 z4$Dul^6m9-DEko9pL-0;``sso#R)d{L>#zv?hbpGy~CEe46sWd3CZnLhCmn>mlJS~!FIkN%B*0~SGD zbuE-1I7-~o7+&#PM@YOl2{mjKIaVukVTu1ca7xt@^6v)FGqF;--1!Omt~J0BiE2Mc+|v&sOvEU2h7TcqP;cP_%;Q6UN>O-+OF7Rhn}e0 zeLuvW`4eJ4o@0uW@6i*4#@bqiMS@uSm2(gte8K6 zsV(0?O<0`vJWFWu9Esm;^#qr_eo(pT8)~#i=O`{;B~D2>xUJJhkH}^CC7=WxMo|YP zU5mTfTZn~Ew8T&24Mf;rgeMlN(EHnG7S_E0vaX)vvPITeZR|ezo1aQ!246$9?j>@R z_GP_X!eGnQBg8HX!OuH2gn}MNP+CSV2$LGry#54bU)x#Z2!Z*2Ysa83)vT1}OLbo@ z1nC6IjT|jex;&Z&BEPryi(k5(nmAh+QMwnRRHy$60kS$ikp(BtFKVv@}Ic%))X zOC4&g+^w{pHVDJcYGaT6w{Ys@_mJ6s1d_KNg@Wr5#N1=dE^<7o@}ej+NSvAP#IiHB z1bNC7rOmLT*i&LAXr|tWUbAU7G<*W}oLaGVWHn`eYf-gqJb3GJ*0gpmdTuoqogH66 z#M2Y3_vPJST2%$UZIR#|e32b%*$U1_KVrs_U(h&80GNFq8y5Y5J~$Q>yPQE>peah$ z9G3bdI^&n}Z#Z#ZCs8x*vb5s!9T*$YjhR%zjE|(T2L~UjkCWUMm`XLIW{*Uow{VdWYG^ zSD@n-Q%I@Jfq0YSsBoCXyz4Zf*yldXlZBbzgvjKc?wjwb!Rp+Yq-}SQ^DT!FkI=35Z%9$*p7ZQfA6bQ zOg+uqbd5xt5yhafb+^>fBMEjkh}Rfgsq)U-}RR) zONf+0&v6hxhqzvz+G3-o6f<`30mtYCygl36KeTSNS#7H9P>Yv;ci-@YUebrcoU5g zk0xQZ;d*7#S;}C|nSe%rC$dp5sgtO6nnze?LwF(iZ5@H`5@UGFreQeh-d-s9nGV(u zBFQ=U7c1|36$d{207m0Gz;niaOo-f#j(U^9*5MsmZFr3?3L{~74(-?1QI2u^4PLP0 zBStwOOz9qhz5g{p+iRJawc)xlE%+cS>pl`)CqzL@&ImJFt{*TiKy$Yk3^*t=0p zZ0pr*fT05HDgPzUtIQeKnf3+SUvU4-WN-+4OZ=Qw2&bE{qNE$@i^b6Jf}-{#cVbve zXPDZp1?0^iAkXy-mR7%nxD)2WoHQ+AtmFg!cZ2T#sYjII0au`1_cRvUcwv6ROt3jw zN{+H(%7+_>79YM~=FG2{HNQW@8SfzA_F-_)))Lf*9&@|ar+6nO3$j=EVW4{n&#q}2 zQ}FW!B+R1kj9xW&j+zZimHkPjy$XD?M??0Ex5~g*xg65^5O3ireZCK2=^i~H_+o!Z zI+F~A3u3UY?`7}_UyNRTE@Di#7z|ri1(PWwSth4vtixIQP0vyGYTpN4H<=0T4GZ9e znXWM6QvnuPeuwSZr$HrcVJQ_3kT;xo-1mB8(}6rF4$gq;DQD3;b`EcGIL~Awk0~v? zM001+4}$hIgX+S2*4S_qT2ecqVvw;o-a;S4=7+&3cAWfQuOL3)721{0M7Q_};5v3U z%pMX228D)#P1tDo(p80z@+`!+`~k+Pj70z3U~n7!5$|N`3ay*S`%TXVqyL^UO^dhE zfJAbb)i051sLzi%JxW(}HLj92Qtm;vc&yTK>`Au#5@Ok*FHpU+5xpl6zc4z5sk&@O z=ZOqo2Gd<3`#D!R5tHheiCC>$0gKGYm(WE|*sfm#KFi*LRSo&8QY*=8b_G7rj^Im; zikN?A*pRkt@cw5IIeK>D!I9=-+KNgDiuxBL#E&d>=yBZcS`CF$bp#Dvec{T*$KV+; z1p8D!M_Y}1xa8q^%sV27;4-><4)>R)JtYQ${UeqZ9)UJ}+L+fgLt$ccE;eNdSlIs- zIy`s`x|w~zdna`YP19Ilb1KA~OG3q_a`YW-AhswbLj738lv%`r{4xXr=g`@_=WJ>A zycbg0bbX$A?HhVuZD8&Gm!No41v<}|fqrd+P*K~HtE*F$>Fx2@sL(*iuz_@|C_zWF z1FXrq3SUm|46}kY#Ak!(-$OIgjb@bL{a!;o^&Dwz;$t*!>3}EpnnJ=OnpyeZ0g1ac zw+he}Ti$31afhr>HtVp|^kqBD%Rhu=?=vCp*J|`8YQ&ajmr=5|0k=1wgR~y&sXMj@ z`t%=-$=1bCSBFf}@=|K~XIF52y8)6)9)n_2Ff$XP`|PQ@&oeFW{;BP2IOPZHUn|S)Cw5CQ&TiHKZ3ID#c1>K z0;u%;nR@~`*lIIi$e~yW+Yt)+Ln=U)`2`yi$-8oI8~XQuhB50NV&wGSm{>On{ksQ2 z@O&Q(v-t!lk3yq33sF9O2UEXVsvNbt71a~c!F$|!JRp)&LurLZjaKxTJe9F5H#&S2*Mb0@^q*E z^Y`1J`ZWf9-;#|)~0CY=*)ZDGiiY?S;#`TWC%jv*FDxa%iVZXQpr?)8&c`{X9*z2gMP z-(BSi$0WpZsfM_}eq&>ge&m7qiYb}*G0klsD;IvDgIf}M#ZxZi;CHMFYzMi~Yz)%; z!R@Kf>-}~lb3R$h3;!65OX_~YcB>2M@JB6m%sjDh&tuS+9VHKs6ARU>X91M!%%1d` zMHs$g-bWU2&7SWu%d0Pj3_S#Xb^k))M?HM|H+hHZw~-6_5>KpYKy|Buv~1ob>d6{1 z|9Z+G;XzD2QG#*8Y4lw-lYSrfaKvkJV{F$`_Q@!~wkgSI8eNa7O`jk;;yIJe{2R(A z{{VG(lycyyZ&;ORDoSGuAo2AG3>b16>UXcAE~XZwW#7V-m36c`?+?K-cfnaN22T>c7Nf>2237Pk-Zoc(OS%@K>688-`MO)`W;-1EeliitHd|v| zKsAJZxCKV}7nt8FW0+X86O-m$gyeZ4;7x5Xn3V~|-Rp?mau(`#PAB)?RcP-;d5O;| z+|Z>4{M(nK@8+4pHidq+B)WHG8d02d|*q3fv8q^LfbnFv0?UcEFtE#y?Z&2bgskC zBgkj%yoDX1b5&N)2MnjtJLc;l80KXtCgzHmKe?PM9*;z)jsL-b{^dCSjtzQ04d)-} zY`fseY7Dt>fYt4{M9qdJtnGL-T3^-|yESMC^YSZEGdr8#SZpCAeWLwx%1mC!l~7+Q zl8=#c%yUQc;lIw{`Jc5kC#1jk7zuU{-7$$+P3^B%z_I_l(c&51*V6RGtP8W1sSPKg z{n2f-xReep#oa*iBVTG+;=;Vg7D+Tu{CPE3GzR`{=1)JYx^}i(eCu-3;n{ zMu6(;P8Mld2i7|Guv9&N(-`br$5;CUHs9 zXsk@u5npmm#2+riQ5uI6_1}Wagej<+8_!40ATG+hlPvb<9h482ap|?)8292ev;WTk zh7}l#RiU}yop6VHrR+v?F&h1sn?Za-E|jDn2X#!6a{doGdmSj_QPwj7&TK``saG-2 zECLJ;wLu4b1-IMt@xTfL(Qfl)3^*;v4v(X-oOZk(Urw^dm4)aZZUyngJCm#k=Oa$H zp?zoizh=;xC4CrHQ7>E_`#*}##4qOk3*${`Pm7RC+``=Gk|m`1o^R@sWsoIqB)cw> zv4lcW$dW`xN)pM4Boe0PbG|K-lB6VAMoJ>dSdv2YJHLM*uQ4;a;&bm-n=@4!@518>+Y= z{da-O;S6-~CkCsxFL`yn!R1mc2Btee(BG6V>@^cV`{$zKJ@I5?4}qiG0kGWL1G;GR zh1NHQg6{`sP>hpv0SmTcWON0LpZElqp4)_?K}DR0)&kI)dr(sRwFSa@I)OZUF#F`C zpl%ok&gP9RZaHtt2j7xVu6-Sb{AJEdpGUF$&U=vDy##$r7*3`PO4QI`RGUReZjrBY z2lZMubh}TTgcqoDAs;ka$2s{ldv5NhHq4tuOlb1VM~*vyV+T^Fw#z+Cw0MQ#Jv~sB z`2>5GYx5D;<>39{DW+H6XH_R&K-Av-^jp1_MJKC?t&yZsrj#2^djT8w zN+d+4B|-9W0cKs!gM0Kh?D}^Vf^&$SVBCfGJiVLE+h!`rrw!&j4$?EN{smUvI?e(n z8VRXWsUz0;B(b3$o_sihSqXc3FfHP5U$$X=OjkicM;UPuS*p29Z=LxxQncmIBk30B#tVe@hoX6^_; z6G!SG50KD7H52T}ldd=w%T2p(#@Dz|pU1)Bq?Lzo(w9llJf@Sn>OOLszHga*jV|Bz zgb+8B*9#Lr=c;WOG#Rb}7r!!yy=Dns%z*di8Q`uZsI)%7)Ml5N=aKDLXlBOyJc)*w zp87(<-W0BEwgul?OOCzU&XFG@ky)>&ZkUrC>h{;=K{Jm$(_XBy_5x%rHs*74jCmh! z6IYyS!uPOvNZoK{+%!K^L7cLg#Vk6FJ}#LMIHV7#&BDPopZY}x+K*KqABnz&=_p%$ zMV0xLbTNZ*^!ptuNjbS6BGxX&n7tL4LwjM#m^oi28g;g2wD%5ztu4X#J_uzZkCT7uidGWOOJ;|b3mT8J6mcR zD(SMEda=HEg5t6dQw|<2@g9>6Rl{x(lX4h@%uj|P|J;YFOXX-eatrn|yMoGJcG*E8 zn^3%n{Bb)UFzKl`EX8aWN>|PXY0zM7*cnH+`gT>pt*%1J{wrJ{&OzD8W7u%d31d5N zqBwKA#OerQsc|DL4{wB8Lmhas$R8@rPH;a%%>;%05N?`X3FV!KL;A!jPO+9@ze#3% z^SDD$wc-|t|E`k6ETSE_qXSazK7gRAm1vUQfIhx8SW>P=E!!g!kE==O|5S(9OdbXe zYwlo2_21C1k^K1CDH6Zm!%;cIQ(~==f_md#u5fGvwhinDvhuDN=vBsa_e#;yu$rUN zA?k~5XZxGG@?L(F?JhLIGiI@9_Sl>+{>N0%OrxHLJ|Wa`&Nttz)IMcjUEJs~Z2Kl-{^ zp!m&3l#e>A(o*!|GzGrUVe$ZEmYAJ&%8%>SZyU(Q4CZ9#oSG7PGL-Rv(oOb2*RxZP2QC2rlGCI@saq|7bR|XUr?*22WPV< z1s6@|gxX7H{N`~7Q7&64acTYxOYWGU=*TCD%qy9@Kyys%?KXIJ`!kFmngAs|K%^R+M(j zl(>#D=M_oh{j7BcOD}!&S!~TYrar^K1OK9<(-ah&g>m9=xk{&eH~JlyQP)8n_{LI3 zv)K^T`N3Sx{A@ftoOu6G3Qptn!T+=YZ<)Cr(l4iTlA-m`)ZvcGF9#&Dk@^^N$An+? zvjy4`W6(CE2wEMDsTUv-?`4_^Sr1lG&!i0#Wzu`@y_`BZuVu^ZyYS-TWa!mViEXeDeir!lhi$nQxrP`2&|R;xTT1(E<_cjD)vKwS>xN1zgtBm)ZM2X$cp` ze}>mRw0P?$_1NV$^UMu<*q_O);T~m9jD$&6dP2#)vz+pMKZ%F>7IMTD34DBq1%4fXMk&WQ zE#0HU@#u--6Or8Kt`Bf#}(fMhz+Mrp|D#VD913>%JtOAu748JWD2aQ zFG8<+Lq6i4`LO)6o}h@o#VU_4f%X>qT;7kuuhsgz|LcR8fv-{CI9cVlXCn0vd|>&B zA0VlH8cHPB(0bevoK!`bz@CIhozsBf-wRQZ7{RK&hl6dzO>8UA;Ur~Uc%2gR5+ssu zdhs+)UOby=I?hAp;CJCWIwB|;&Bu{k9z=P zUD~*gdVlCGI*AEB{n5|86;>4z*GXx{S#G5~hT~`KFaiwjX^5V>hP?X9R~X`8&iDGO z7DiwAf?NJD7ZRR#!}hfcA-|0tiHimkE8r4lo;ik^+0+lX+Lc*Z6oJRFfm9cegqM9x zg(D3HeAI70`dx^G?q|CQ$#vP-UrG9DGWE8-u4b+N-@s-iv7(g=xcDw~zj)+_mR&Eh zINfcK5VH={qq14%9y!Qu}}G4ULKhimZ_!!Of&H4dA%(4FhzNxH2sLVQImgx^j~Dyc%R)ja8&dztLV ze=LvKhEexFLhPwAsEK>dqJ=A<*|rNx&JV`6F&9{9h!*GLu!q%sDnol~FATN`K&Pu+ zC_7k5{6S|(n3aNM|%t2Z82D?-Hs94Hp2J} z>iwB?pE{pPRYyzpgrfJiFymY~mgs(Gb^q2upqao9cQ+GMR}SEfVTE{DW+qs0?Wk~= z!m7>AQGd=$2w8m&`n|8l#3iXva6cUI$M4{ZGVNqeaOpXk%hY# zv}4KJv!FHgESQ{)K#$se5I8@P%X^g!Mt`5cqzMnuDZ~-uXMKn2_dT)t<#0?o^%;$R zN3b0Cb_hT469PXSMqN{5sGQLk+kECi=5=Boy}pRD3CGZPDD49o?)1DviRO(zC+}Ry z)Wdr~j)Z1-c@X%=h9WAO;rvJ1^PaSmcKQUhEMh=w`~jvBlw4FLb&}-T!`SiLA#7Y5 zHqgJ{rtJ+OT}wdbX_H+vG!e^-hxi2!v?isUal6T$w~A7CXKiK)|)Vf>BTAhmDF_Ki_8_21EG zx!#%bN~h5=J`&nYu0fCc)u{ehP5!QjEWy5wQ`}kvVUkX!GJOLh-PnovAqD90OJEYgD}YoAaAT zz6`oi%g1!8pz<(!=r%x)(B6-jk=o8MWMC1fk9u%{7rVf-fb%rp^@T#^6s!z#ha-2waoS7D zW>sW@dCn;;&U%iqr2uUo9l^515!#9;qidKY+V6e?5iK0%OngT7E4q7B_J)d)_lUD& zETpd^PP}ghMDDyvT`5OcXsIdPk`4K2t9z)KaF-jj+kECsCf>3)40!F~6Xe z>sa0iMvrnqtF=2bYD~efgesP?G#g7Sw3%tcD3JFpfZILD+t9F$H7V0Uv}mS8N?y5U zyAHH3b%9vTFyJpyCg9pFP=0@eqK>s(zg{{*ko+Y&{UL&=b$g+`*LzMhV=_CfYLvn8^4UW=-@BWXvTr`F)@0ALSggh*XVdt0#~W4K|E+7XEbjH z^=OWTg!6AXnVTKA&G#~H;n#6oXW5M^73@c(F8l;3o)Fb zkkR-7eP_3`@kdi(W@Q*w&HaIjUxTm;$Ol^;f|d&lnaS#Rn07A@^4!e9voeyomA7%~ zSyo(SMFh7wkoepIxvaGFGL|$hh0x#om~_`F=TR1h!t#v0p!4uBT8OXXvU)TA?U*byB2(6R$g8qs)NNsWivp(iR$xv(P(Y6ciU-p97ga}N} zwFCK}kubMQ6&&m|T}%K6me7gdXK9zjK_tESoF6 z`UpJBOQACBD}-uyq0Y7`MAaFsI`}1XN^h{24^wPEf&J?M-;K}D7km}PL51um;eEKNR zDR?s47VH4GBh*7=Yy;v8+R%JbLz=#Xvn>q71a%!(bTu6s6x}g|G8|G{jJWaZ4##m=E9AZN8qD%9z^!bC1X3uvm03gma9EEk3)YzPV^CIxY8Yh zPwW8kq-mgk5dn&r z76###7`^l-Y~CLZ&D+isXL>AruFSzupKzA4Y&V7l-Gi*61?-rcp zt1Wp%4|QYmX(f{ScmpBg)n)F-3i5oM(Bh3Q-Jx9MWbXG`BSGZ1omKc3;1dTkenpnK zaHen&l)NN1A$bf;mVZX2+<_U*{DgtCBRGfYro0^*3U3c35_`1<-5+#9_1<&ji(Q$u z^ww~!{J9@nUmt~l5(o5Iu!4EU)N=Bx_p+M}USnaM5iiS~rFup;e^1o|HmLh|44~g0 zxz=K6K5`p%MiF;MwUArnTLm>2o54QfCoC@|c3i-ESV_;*8w=?(x$s|RIUyEIuQWLBDpae5M`Y#Ozekehwy%TUy*(b1dS%wWYW6`p= zIVVk*aH83zlFeBWP?F{euV0u6HP_QX)?Edj*KN^jDlxWq-czY=g+gyT(&6l5(f>{& zIL!M8t*7s1j+co&SP+I<;m1(4<}RzNn1~JG<8kf>4fG+_kh+J!rFNr?XgT$lAEQjw z`$OQ8cojzc`3a=c&Dg-*m@!yQj3GT$%gXQAugQ!r{6-$=CFJ4TZZ232wLr;=1Pr#m0#-Z!gpwp}=Jaj` zWhP9RCYy5n4vg~)L7e%(jBiz*z=l)nP*P>VM^9;m@mb582A1^2x#1kr@RdWkKxFLGd_4$8ML|RPduSMTWy zj*tWD7iXbh{A*ZzL|X{(GZsoM^o6X7MO-s6ptik^AtpsG77Y6eiarlmSl_ptk`04W zy6wB==40<6?;(~vSj$rnpvA41xYXSX0|uK5O7Fp3YHn9FN=DM|$y2?i5G#jq9(|kvWhM9t33@GSRfU8|t6ljT&Jq(>-tpWCyNMF2)J22A_w9 z>6uI!4V*e4jDrOyz=ZfCqenHMv`-_H^nSr<%`+41zs|$~YBSzD^B6j8DuLyHcI9is z_oE^rihKLy9PC&|Ui8*HE@`?F%WLDXdiF^0u&)F0AZ-|SQ%mq&(a5|65#M&}1g0+v zW#X?EEZ!!Z?#LqC^_QNY?lTjt95X4Ob%n*!UEtHfD9S94g1TWdv0Cj2w|5gOidf+* z_fQFN)>!dRNVxt8(;qXYHB1{FwiK}m z9u|U+$qR1S-2XrOi#bJzHOuH$PrN?LHg#D5IXYD!HlokQ`HQNCtYXeO?|zGmiPplVhcx808?6 zOupMiFnan7>z-`|PaW!$O>@MM?gsp%uF+65mFD*Ey}1#g`QZD3?)+bT*~%kX;55*T zPv5o}8mwASM_mZsZii8-6@pb3x8RjY7a`5R0;c_w0jKup3H~u!Ld>t-sOVnAnf6OW z@8LIKnLTkm=gn1xrYV?C)+dzbTCv3Lc^H+u2h$H~3klbQs8dD;4l$uza?x45@w*M| zla4@-#ZA;tc|cWCDaL(Kq_b?^#FahY0ZDZ4ln?F6g69v!rrAvx(nR~L-Bn!TNX#M4 zJ_sG(3zRR1Ni3eXqqMLKTSB}f_ubQB*i<9I^L7AiZs|my*V)*MZoQ7(R%2mA6WUH3 z1|0(yqEDAQsP5AaOLOvwC$57c&t+UHO$i;8`3#szoq4x!GWo|wNqTKOX{fqSsM$&*fPjWNm)>sF|Dw zO{0e4!|hr;CO-qMe(_+tehLgJItk`4FQ9DDX#BnA85BCNf@cGZNxP_aR&3;W(2q1RrQt^6wvU8eqm4qwVv z3^TzPJAENI;Qx2ZHDf&w-Nme!1G{~w zLl>n3$B@s0bZl`cL~CwA($62zYN^4_2qV6vRtF36vmj3t&MdDTWYq@GP(7|p($=bA zPJbU@pI(~^bK@>y>!s^(HQ!W-_>#l*a29aj8CTKtaxh?|~*W5d6)e21CPo*MvuqchMw zNMBIbMM`Y3;2AK0@cgEn*dTrjac?CC5WyITE*A+6WQ@~?jcbq+n z@^H;Y{D_fH!N}zwm?TfbGtbDE?=VhMeV{+&g`RDhUqZRs?sn)p*ieYNe;Y$*B(qkp{g71p3Z=77b00H_M-=UZ0cjQB z{+}0C{HX?=D;?iXBZrPb!&ft`HlBqE(^FaIh`#W=L?nzCa=~nifq=&Q zvFm(YA?k5&2q@4-?<+@fymbnwfHZ{ueo(jKHj3BGkYv8BCVpZpwl+kf8{~ZVHxc8VB<8Hj>?YU!(8MQYIbc$MQ#b;;;X7dG(AJoKxChu0^>5hHfVg zexIEly|87CZ&SMZoFG+ovGGLZBFy=~EA?t>WX%+2aM&<9=+*2pf z^QQ>IwtvId3k-$yJ!f#`r(}@c^pj|;Okwms0Sa}T(P5&3RXS&JBWepl>2O+dX00}= z3=YENZO>_ToeZMat1#@QJ6o4+ESUTtF4^ym=$5w*Ozx4nqIMZ{?4i8LU-jUz=QT=k zE*I#%7p3KXlCcRpIME;hl5Vu2Z*eH@culj*B@Zs#>JaR+F%>@Fdy4ahu1Db_?Jw=i zCFE`4qaJ?5=BugLY?O(X=h|4<&s6kLI&f;zZvsD`;$&N=t1R=8OSZcTv2Hft<9Z0S zzS*%^|GcCgd-8QipJII_?PuQB5O#GEXHAbw(I1bv%-e$@=FD|W-|!u7Ej8g=rPPCd z+kozGr%@xO8S5GG#S=SJ;`-jqCYkoE?Uy(!oPtnah4NlQAZE~Uc-!{?`W~)C-=H1v z>6;n<<^SBPk`yR$dV+Zx%IDmFDS1-n2cEH{ElS^`mD~oc21+r0O&Mx7net&v>zI9p z0d>V?u#P>kIH0*1{kHW*b=qx-e6KS)x;_EX+bOua=T*p9!Eo%6|FG&h+yqa}wrQYZ1{%oF6D_CZ1!X@8zdCjjrC>o#1{C1jBe?(vK@Y#d~ z@)x-6F?q%pmUGV<4rB6%N>s|CxNdRAyhwj6SL8-_;lT+iMcHt6bDR+`I_}P;ZFz^@ z%Pv7&xf<^D?J6|otb=K8ji57ySSFrbu;u%E@OV#oSx-4Qz88V^OKm|>_MMY2(dFKj zL_m5&3V2ST+^$N=#Kqk?wb00-TH}b5ZH_MW2SC_Ec~rk4-1ZDpA@Qp=e{+?-Q1JLX zRK5HTnFnQP4Hv+AwHb~L?EwE*FVIJOFSEWB&!wzB0@dBZKsnu-rM#45ldK3QE9)S9 zSrlg7*vS0W9YQG|0aC*QAWid=B)q%E5DL8v>9Z2 z>p|&>o8TRP7`?PBFmufq?3KI}0&kO_VTiU+vAPfxe|&*Azx9QPh|}!%(StDD-9k{W zI1NAyaRp%v#9^yrEqI)Y*&u{3D@k zMkhX79g538wNjS-EyQK6z}-f1@L->wkof5fr1zw~a4qG(iqBxn%{^Fms{sQS{-4Kv z6k9cg@G{Xr@Evark;fF^Y1YJCyPe`fTO(jm`eo|+asuVPn<%=ngws!tgI5uHe11_X zjtSG@{STd@=az!=&Gd)-hwGs@Uq^80SOu;Z&cTx|{&b64#9IG5gkD$muwVWjG&=SF zY-A&6G(rUf0=|RKb6x&%?m{fp zZa!cn6NAy@E4aHb9FzA|WAjhC*}u!>!gtHj=+g-%_Snq1W}XJky=e4&bDA`Rc$B`A zGMDeokRR|F?DKl!p>O0zG+xggBX3$l`YRSSbQg%+-MHr+=KLz-7L4G2q0t~0Y_R;u zWMe!aobJc5DTNQ=Q|VDWxWPmaA6>^;jJ$yZx@iz~4q)2R zBy?=xK~{KJBI^4|^(dD5kd`gLh%n+hXMJH_`nQR#{tUAYt<5T>UM>^94ApC=V%TRM zs@e;|$&K=BeNM5Iuqx>H>NYsUXLI7MB2~JgifNwKfqbnacaNFyZNBF@H@!JHdc#c+ z-JQy$?ItSmaXvrSv8G-ZA?x!82yK(Ip#Q$Y z1k@4d+swPTw`2S*?Cx$rhvjg z1&U(~BumDafb4}iKGHK1WG3{S5dQzZgrj2NQx>WJ11wJ*#4S$rJai$}$-gO>Zo8jr zuhxJnyat-OUW0&q>QCc-!PUAWknZ>u0?2#TTA2t<`F{~#h-Jr{oPZ_6%&=i*0eJSP z$B6gDY;QdT8VA}z_YVel@=vvxn!vhCMtnqH(oV+|W8#TJAUn04i<@8|#4dRR1&`A} zk@zpG*0+NVrSDPJq=yYDueh)qBT<~cgE{r5Zq+h1#EoeLMLFYScePpiw>Ef~swE^0 zE@q1JJ-~7BGAtT#6Sn2~nZBTm7R>_REv#DoH1cTi#Kz_6@=dmsv_H87deNPc@ zSCK%yx!U}X8Cf(_CS%RBPayxJSrTBE3QffSY)KS@$8R&C#qA-qN^@b6^#zzWXAzq8 zFG05e@*hGwx2mWOy+^$UwMi@6eCY~QPWr+%H=6UI#Q84CAI*x1A29x>p`c}ON;M%t z#HVDI!M3$+*v4PsYKIMieFr1Zr%ukQ*O<~>tQ9l^kVU`E1${B`g}PtGtmiLPS!bFH}&y{~AXq}%GxOz;i-h`Jkk(Vb`#^?0ZuVPP{He@l)Tiyosr zIn1L&uF!YF1>-zwQFfqMqO^@uMW}m`exhK?+q1cJ%GP-7C2vebxym{uiW}>+8cG&f zL36(h?3aB5=T_14#JGU7i@F63<{ltfsQ~dPckCxUiB3_2LF)EJHOYkd@k=kFQ3&xY zzLh|Dx;MH{_yXcJ@tk{~Gzi%30^`H0A#d;@F064riZ71PjGjA49kCz-(qQ3)?gz;i>)-zpCvxxFi$o!j&fBv25Pb@z|A6yrH?9R%928r z&CdbU8&V9h8OSQnpsu9|jp`Xz#=2nopl@8my(msgW2^Ely~)WoM}o+060<#V3(f@e z2j8X$F3fWg_#_|VTqa!u)2Sn&#pNVu?uTHS&jVaUH&$6r4aSxj@TRWR1tc86 z7u^I6ZbhufZV$RrH&@9b8K8lMu;_Lo`Znr$Q0~g!PuNMPll-%*;^PY^Pbzy@j|249g^gQ8xS}W(+GK-_L)f{cE9{egP9lcjw$A zd%#;VyNIQqxyFDusMGBx&NDX`+*}!2Yu=+O^)d85QUE!RVjyhWCuVhNI-EJ6k0YKJ zqx#G+&}5~9<^JhdHogaj?>z}E{msZD^>geb?g_}!zfq=g9;zlDMd|B(T-ep2FwG~H z*c8DyUcDE*-D9zIe=7Dii-qYsOa$rR^Qs~rZ9aAfX&GzwV1~RFB4hN0u+G<9kB%3R z)=7-DitDUkmI6QAfG>DavBYF=zv6rXL0{%{Xlgqukqg*=x{we-M{j#p5&e3rKKz z%nI7x!?sHasNQ1cJmA%=t6(8K#+>ZjZ>wIJlzrSPEoCGlX>pY869>hxG zmz#R@0fVfM#2NL1lADh(;)^cGgJZKN>6buM3w1)SZ-kof7eK9DB3Wi>Dp*7|0hUo; zz~cz>$5j=K!n2;5Cl;%dCQTgS_L2bK2Q*jOsfTY>&)A>C+>4<$#5d-jlS8@`s;BDw*4s3LcZLg7o!4mF(ec#4tawyevb-M>pme@)0fjK7!t(&qDm{GZ>%p3u2=O zfVgc3lQjGSbxD(C-p2(Hdi^kFE^vg7_d9XMJRQF2Wi_PCDM7~{y( z0fcqK+Tx?A`dEm%UTeW8qC4~$$v}6$6$I3{gM#>ph0`vgbk9LmnH}A@xDTANxI|_2 zk0){`M8fh}ndtvog;61Gu&&yOcU>0)Ze<8Q;$K|m{Wz?=(M1@LSct8$`4BY!E9X~V z2iD4$EKSpfB9ph-9_}$<+OG&qQXheG_DL?$q6W{#Y6)_h_hrkXY1aM*@^6CV+L|tW z{VRbq@-y6zrKHbiokg!I>O=3pkzHtS$EL#-5ZE*pZg|tZS(C@<{+o?XNoCCLPzuVE zUr6wAATFVOF(5{ZyzY&X5gj*hk{+VES_gw>H*+D=_d?iW1U8NSX2u7}Xmj$Z?GJZu zo%RK1-aZCP58i+!4;65P844BMs?lUaDfIw!Fw3y*93uvf#~({sTFU zpTKWYH$M6#g5|e;)TlPjQfDLVU60NMhx8vfka>QN42ys{TBMH_jRHj#gcG>#oGw zjg3Q%zcpA+?kf1WpXOdIFcK2AU&A#oEk3`7X2QXFoRLL6_|Eagu{&R}w!g`je*KF? zD)QiTFL1yI7z-IiUr}@2j=AaNfa~-~h`&hbfzL0nuYz+tp|*hWeE9V6a4u`vR-93XG~A1tuYoVWr(9Bv;2X+6$U zPsA6t?u#++aL@*F3htxtFbj~UO@y)UK5&}nKe@miW_-fB1QvVA1{yw8VF&q&3S+{c z>UtI0Pm65U1u`+oX{*d`32--bR=ox>)=jXSE2Ia70zRV z7M9%BWyv?1Az-J8FvE!MnT@pjo_L3FZXxcBH51fLi4uRUk7#XCiVm0WbG=?t&&{lP z=pehoN$r1f!S1wEJdlB9`*pOn^#*J6>rCO^%-s554 z25*My)M9j{-no`#H_<&mmvrG;PHUHqO13A0tEqhewymAi-B~QLYMlw`W?Hypi=NO! z`V}hMRNT^Sx3FSN0h}z<64as>648lf$Wm;Fh@B@fjXK_4Zob7n)K{dQ+M!DLLq-30 zmoXq~4vw{b!S&ns04z6;;S#Lepe+6<=!AU*>BM`i#W)ZWe$uVA&tzi5nyHTG>he|J zB0&>fit-f?s8fC(&f0hnj#17!;I)q6;dLMO2O9`;XLf>HK_E<8Qi{IvqkxZ7u`S+6 z*z9x#Zf6pk=Gqfoa#}j)5>_YBd;T$8#GhI>2YuYIF*^&gy(Q*qreh z^Nx}>wxucCr@ajD?)Z3B#4tjj~ed0cSUMp#!Y}>X= zDWJ9GJU92zH}D^($A|yJh&OZ$1~jNZ{nCaD4~T(>-nzWomvPwP^8=LuqgnEPQ{mam zSNNmjC3+fff#L@rK;(3f#s4USr6G^ea+y1mtYfIVbRO14J_6OpL*O*^FBUj5fg4hK z8HW2LQCEq+kS{C&>(l-5hxT5OmW_kLsSkp`Nt{|-(s?e zI=J_-lB>2XbWSSgB+`S3#%$^Wi2=(lcBts>fi8dMqhikx6vyccy4}q9r8-Msm}NI% z&lS%t?|W6AMK+^|r9NaFo48*@Rl zVj3GkLio#L=DhFH6X0^78AY{fHe${-*tRYm{AO%`r6*lUAJgSW7c}Deenx`)NjA>y zZNcLcPpnNN?L)^@uzm3_{Ia6ESwD1l$r9*8-GA!S7MzrPjEXfT zC{B9E(g(g}4G%Zqy`LsR$=y4kwv3jPb)Ey9vj$9tl9pq!7;InNfz1y2u;WZuAwplx ziGtSR@`*2rJ4bur%~a0P{S<3RKg~%GBubPO*3f&~XH4yA#Qv*w1kr{a%;(Zf)_!pn z47cwpjx--sSY3f%T@8emi~CUVUn;YddqYP^3oaTQ3QmdvOd*eB4mD??XR)y` zV&GNqStjD#-I9>Er3~WIzu22bGhxH;7WA8S1IE8R4copwMVFbkL2g~o0&hE_+mLMV z7LG$>jTV1&sevFnA1i5aR&zdcKXM5t18{E$V z?wu&ZITzL6b@<#U9YMN% z23s*yn-_0XbH}~(g}JqR(4~AI#77F`yUPW;x?5my?L9iq3W4pMsZiSY8MywJ$a%6C zTp3)4-J)Z-zvMZZEF^wtO)E>)=?8wBJyo1=8u4uib5WbjN@V^HagXXVuEPvWW^s}Yc7wCfJcfue({}k$x&X!rU z3cKo=3e)TnLPlQ!_1!~IyTC&5wrB!*=Sc3ykT0;=@gwQz#~@vEnrXT0m(+Y$piYkr zw0}Su`S4Uo`KkoVmGxY2^61O+oxy0&Y0kH@0IzP+6@Hu6U{IrmC3ww3zvVB$e&qnL zGcL!5UTIwY;TmX(H5X>3bYjFDUA}ktE*P=uD|zHDV5ZwT%#_}P9)^^qNSTkQ-wk?o zGvtk#G4HwE8!fx{#0@9XQENv}HaDRZcOIvnA$LENoc{t3`kV17!S}#Agyy#UeXv2X z7<5nG2DrBmu~uKW{!7GHyYz?V)GD;7s6*?BtAMXkp=ebY@;(N9-WvdP08gD)2l>yTKn1v(tO;V)>_?Ft64?@XEgolLnPyKbL3t zRY_SWlYdaO`3C27=QQjd6OG}PlR%ky0~Zy3gN8!Vpnv$F=oD$E-;r&W%0Y4~2@l$I z6$Q63t$#b}W(STf3YE@9|8`$n(^nR;s3zhJpD{J>Id(0MmOZxUjzeKxwz5Xd1<1TucxuM;}rJoqNu;6lx0@ zQ|^PwX}VYbq`O9u1@~~1KHrk?77f-nqquoJ7xb+LB*uHO##lqzi#=qiR>Jb6dbDr* z47Qg_QEmN*lh~>8$JSa%zt97kU;o7TchB)hECX@2SYm3fCFr;-P}w&I)MJlQ58-Pl zza2%h2XP?xdZDFTZAkBQ~@%pApk zT;digjCr|~q1ySEME%cQ%rCvlRW;oq7TGpb=dIwhybLh2nfPygOPTHdf1!%>-t_q; z80ta#B6(K|aU=fTdmgl&MMzqYyvNftvzR1)1ncguSbAe0D8CNiS`_{`=7T=J-sTuc z&t7EW7gqG0xeFs@ro6a*lEjp_CDL`TAi3}~#y686dV!H-cQtW++`9?wWuI9O@;O$` z{{YG644-$E<_ zIBAzr{+4=Yoz6+*#iOw#(i`Rea`dFkqT@fm(9Q4~MozhpQuP`xWR@;p+MJ6~Td#qy zrUM<8o&ql$1F$b1fKlmRp#1(xto(Bb{x(R2x@9lGDTwEMl$k8%c?(p2NWsU%mT0ka zfxP0AocE#{NND^U#!lG@@xkv<-&>1MP}gu-JLtYSak_JQTNC%>)f%uU%0LG}j*3yw zVfg-3h}vC+!RBkQzsgAPdh-`?b?)NTJ;!mwxEmP0=@Eq8bAh@j;*!U81JSw$)q33n zpzyzog%dBradVnC=2Vf-bv0+WbQI7aEXw)#SbOBs|!=7QoE@xs$ibFzSyXj=Xr zt@Wz7*jdEE_Iv{a46mYOEal@%5T+e9;!Vdofv#&Ch^jxbx*<~_^UhKi;BkN{f&sWEo4xi6qtB*Ox*#BUzHQ<4Ce}7@UyDiBpmYSw>1K zl0=e{OwE0LDakSt35Ai8EJ>D7lKigUf6wbY=jC-~%*^+`uj})9zwbhY*+OW3JRKL% zEIw$>56WZQ#lW*(;0ZK)-)4x~IWZ7Oe_pl!Q#4re4*u4fQ}@vj{ooBZ$nGrI{(X!o zkCI2%{WX+Uc3{qwT=c08$2{xrAUpgCng)zz@=`mG>eIFJWlmqqCUkuaS*;Qwt(}8yg0V2c?;H9P|K-!b?|4(z4nETxprz?BTkt3mo5#L} zeEuBeJKfp3ydSXY-+U-KRtTHrl;y`4l>7C?z7JYZ`dbPQe?{EfyTnX@kK7>sFlhXY z#5Z=IAfY7^i}tk>W4HlU?K2i651nDP8~F_mk_V@g3GoBAqbHBS8!JV$>U#yY^6#+j zJayU^-^Bpe%V0394@!D0L1)kBsDI!WD%Lmg@VN2JzF`-Zog>!dB_~Xo^gC$Y{*QYf zryanYg|N)XLJSR|Se=B&JiivN}n3&uHwx4|TqZ zp!y}9HGFi0zX zL94w>{gFwmwqp?_w%unzjo-lcEp-^z+{5BEkFm+o3L@j|(7@>hq-buS(I+Dzq{mTi zyW118qT0Ele=3ta?N2*};qWjm7kljZOf&vk9%o|$X2W-2*0H}p;xin4cbbU}r(&_$ zPb7Y&7sL-(1=@>^GUW?X)^yzc28Znak3Q&!F|9v)uZB!Kw34jMlwJt`8kR8~P4}^~VEWDq%4B1sq-%VW|0ijIsV5LQY#k%+^xSZlXJJ zpRej(eZn!n;8Q{PKRMJnGY~yCmtnNGNOQuwvij~vP*MAot88Sb>v>j|H~B8A-WoDj z`umHEyMR(ewdQyYsMg)ZptC8EHbxGAIvR@h`)*@1<$9a{o(>Kn+qqP{Sm5vU2lOkU ztjKTm1^D9yhNVSNPOptM>Hb8U5i_BJTnMUJ*O=~>|F}o|H=4hT{Iz%m*SlMY0Xu%8 zZNNe_dc;AydM;Nq3ouzf;7944}-_ z04vDSI|&)-Ux+>LMt6N9VS*wB;;fYzyX^yH?P*2Tk$zy;cm$F=UgDjtokjb)L7@Hb zpv?Q=a&#=xK(9Pve_iU2?t4tJP^XjlriyaHWt4sRxthC|oaD0JpP_8bHT3>|0wooH zfMT_VTl?QDWm3?(0KM1naP3*8l!>u#P^6=~t35;*W@e)9xP=KH8^DjAyPoy$ zAu918#(9trX5ty}?C2yW{`)6rZxbW+Vh^Z@Al|yrhQU|fLSo|wF!20>I57*<$+VZ?WEQqk)jdI42; zh|zd$w_8-y8LZ#bz-+^J@nd1+BIEq)J5!JlQ$qrGz?xd*?eKzr9yP+=GcKCkv; z_@gy=;O7-$rKPag57n6aYbN@A>d%(_)EC1WLlFMw0@DkL1<^T@NAB$`rqx}?%Y&#V z)87`icCNvI@Qcu-JjS*TISvDasR*np>D%l)U|w)?;rC6gGMjGuM_v8bFW{pC*cu>R9Zm404uSZWf#GE zN-g&p7tVrrd6Dxw5tW+4pD^862!wY=Vh{;xy8j=UcVJt;Vd58>vdmw4kB&BwA{ zkwQcTuQeP~~rd-eLji~h-hc(+f3DTbrAn?{j zUQHeQ)`M-(G+AT^o?U{ErU|%V2=NQesxYBz1v;$y02A8Gg-n;dSU&3-G?m}yi5B$T z+k1`s^&QSUUAse(0lDaH09(>7v76IBUmPeJHUOF?(%IkoCX0{Xcx<ICu!SF_41mJ)9C}vgKzRFlx*vWaFJvTf18BGSVm-6Yvt?`F8Vl~@`oY*a?GQm- zc~$-qc4NyfY<>8JZ?%7bE*JYV*iDRox)*v*qh0Kx3|<;-Av!cQuoPeF7~?EQHw^2*LkS&-8x=SP#`~|JgIx;B_1otDV%t zyBvj}!z-Y2N-=BIAgX*#{et8`}Y>bDp>vVsrsl)c%S6Goi z-K*7&EO`H2h^5G^-X=ZK9wVX7;S8!bv_s!TC&5;E5)=RUgx0EgQ0qM&+O`xy;mmj_ z+!o6!9QDb2FU2hiV^LGP2}_#GaFCR`w0S?#sbM_~tuPa{hj+`=-EKf|xV~8Az5q18 zJ}{@(XK|pbp6GO&G9C9vGO3Do?%N_+z~7majU(UUFeNsx+=Sg+exTp6x!CBt9m9%u zqGtbfp7G-n7S1*Xt#3EhG>P))25P9=n2j1X32WvIf#UPGz;^F9+-e#D&dW5YX*@_A zsHtkzLtn7|?Kw9u8i*=yU8b3uj_o~8Ky<+_>Q)#C4X@mBhHEEr^)iW=?y(KkuS>{b z)gOxo$Ak6#-Q0R`8T%4#At-ES$=+X4<4vni zuy$}Ft{Qy^#!+^+_Sy!T(@8|npPxXvq)pv0@(U>TpJ7jyeuO%56-nJHUFRxFuw{KV zx;(XD;WGjtmvw=~-FJgfrzhrZCnxZZ-rRC3G2rQSgW zPiQ!hj`^}hEVoBcj86eHynyj_l4%cVi7Ot$bc8ohsnF}HJIh|g(odQA+)9{aiGD$SSr zOj3KDtAM)YPaxXlClB_H!@e&~#E`qi>~@@yP*(65#=4!uygCkvlXZlc;)VEd;z7(k zVkY_|tik|I1qScA2|DRE*u0xT^Ps=5EX7dh7Ltww$5;s7Z~9`ht{1d@Fc%85(&!9U z549hM!(6?+5a9L}r8V8S@gd?wJ&fez$m=41av0B!p=Vp_2{warU8WWKqDwPnx^()0 zXRaf0Vcc=Z>^*QhMn@djKN1%N>I(;cuLa$+LFn`FD1g&4R9wD`P6vNs1?vOH^N443 z^bTaH%D}uU`O}?pD3dh|6gwK!+L|a?>OfP`<*NmEd9H?(p^s=zHd^hP`GWh4I&QXl1!){V}XjWL)`l2A?E)|jzxAi zG5pMTHhj-6{9$e-HV}Wr-ZB;1?cd`YKOMmsa=ZMs1Dc)UKowC0_VKw8 zS#%GpYBplc?t_$JT~e@$vfj}Z*O~R1YIHhmj7svUNLTbn6y6b^zz!9gXcsH>P|GxX zvGU{`9DY$xl&^h>-E2zGYvU<+w%aQl{XiPDVybZmiHg3LFs_3f%DRFy z;6H9*ArS+7IWbW_a4CGJ^JyF^iK&o>X{ggXftB5|fCe~1cQzxzr=J1?I~Ov$oFgdx zy^ngx`=d}lYzb5zal|(23|EiIhH{-NaQa&l95;!Da&mhO%+G^?kc6#6{2^F~C*I6_ zw!!%nXnIcLX0&719qfdBzNwIvO?M5Y1tdOg;gwd0vFW1}GVGF|&|(Le4l)-)FMGj| z)*@)=H4>XNy4d$Z8CGQ5VEc1N2y;o`duXoLp7;zD1Gj*Dhl-8Sw-VAGMuRHMfv=LZ zptfA8*4lhvsg7qL=pJ#c-iO2ScW2NR&#)n;b$#0QYyvW6XhKplr)ZLEHAI@tec1#7zwCcS?|mcAtmMpAweADzcszh$UvSjtu8 zT;1G$oqF$-`+fZhf)3mUFs?d$t2MOT?)|JByy< z<`U2FjjZxQA+ct!b8XZZ&^JAVu`#FMKx-pRXt;?Q^LE~ndKncr@35dwJ?!d5?iPz zc-;Mg)ehlM;eHp7>2?x*i=IJLE}eNsy+dt6KhVvdrOsIL8B6;4f@@GU)9mfaU7UNO zJaQ3)xX(k|5uY*b>2Ex6t(vtFgKLd7M$-d8gg&Dg7)S_nY3W2dhI;&t?V5EZ5I2ncx5WauB3C*AWsYo zC}zv+_Q9%O?;vZ)5uPDAg^jNC8M*4h{N3{~v3ERgw)4Uh&n<;jAzD0~rzeK~Z!ucs z+yPfX!K}-l^Sr^)=<(n?q-+eqfTP*u3DlykuACTR@#vR3kvV&bkWsq{S`JCDWZeiX z^)eD`zkCDPmXoOSCCSYtKN~%Ab6Nb38b}DLz|e*vpqop*$}R#A8C}Hf->gCFXKvtB z>Ir^5R>UImBf&rT>py($a3+td(|Y$xW#1 zwOr=6A`YzwEn+?@GguxM1+V2Qu+No2`aomBTTr5JTORS@u3^@Uxv0$R%`}^)LR@!K z>c|I?qvJ1_Ozf}FdJEC(SQKRXi!ku;ODOl?=&@l9{k9H+j4L@9vy50VcRoSbI0L8- z?IbGj0E_8)A5;IP4yLm?2HIw!&jK|oEIJIiLsGD~@g{~m=nh{!bi|mP;gDGQ6}=}r zgUfL0$t;|SF7F&!ljk!&K&d09M88Dr`Wu92s<>p@37)f+c=B~KP?LL>H@&-oUhgDA zjI*9-n|zl!Hxm~}e2vnTAMz{it665Rvmmh|Utr5oS!#V0#6J5429L~8ZGI5~`)($` zp9d`6pd;4h)4alajI4Ma<&#e62_=>HF?nVl2CYAY=}0^A$pe{=`ysk>CxYMm^(gP> z#Ezeh#+{v-U}*JM3<)&Ea~d6Ct5-eF+F&dc?YzeNKFq^L2mnGAzuK2LJFm)ftwkeQs`5kVsKY~tl_f>Qo#_elU!N2MYbq(Kv>ZXyH zH}xvCJjmv`%_|{ot&|NQ4rTbLaTx482ZH8VR*^IWtyBL4zn{B!*M(++a_3lZzV#A3e_qG_n@3^D^aRvy zA0_h{RE+Z2cF27&8jMpcM3>7K;A`g}kl5!kT5Dpl;)2NPPn7YrxH~*w76^MX4rA1F z>PDbus*w)&B*N}j(nJopZ)a(xpE|Lnfe$q zo2xLb){-?Wr@pMLk=vT?;FB$@pp>#tGT|vC`cV((;(YP~q=Aks5{f=MU}^Dp`u!ZN zK08EDbdS-8t!^iwtoMA(`Zf!tmepYPnfh3|C*ktBB&e|5j<)NnV55PR5PmO|XBh4W zJTidMCQmLC#v)rb}_I=urnb@IdXFSHwo&0`B@RDj$ukClcbp+X~r#N30taE~?E z4$v}eaD`Pn&wzBT6WioU*-YbP){Jj)t<^#F^YVpWWjCRb_7IcEuUr>f{@qsG3QBEZjo3}_D@ar+eW1m~oi3t2`6V#pR_0i~UUnEro| zznFT2rH3%`&|^^e>k0Grt%2w-Mnd8e%Cnmv0KeG*tp2SWm2GCItTK}MD7s_hnAe!~ znPx<~pW&mfiBS1xZ?F!10Wup-ES6p1Rrng!#nrH4ay>e!sh?LG0k%U9!=r7)Im_Kv z5UBf(I`L}Ww*LnCGj4+Ov|RMFuw{epSctCs*E98(I*5FfgQ{V4p6Jw>7mtgCegCr% za>bLh&#S{JQerjbEaOVK7qfL)2>wgU@eXl>6`3MBMJgdeSpi#z8H#erQ?+&JS!TcB zJ?tb`phLiam^ikR(4O=LlZO*)$oe<-bw&;vrdSGoM>4VLvjmF|??;y-@vNoA7L#uv zbet&%SR};*zAr$ln*!@*U4^tOif+}u-LB(QvzaL}Pp5B<%8i4X= z(Y(HF2E6VWj|-Dc1&@t&%*X8n_{|CjTQi!ge)}ET4!nnY`6)=*^bw>f=}bEBJ60U5 zXKLzxPdJ>3mVZW|%PD=XD?7`)r-s1Ct=rL~BnRsc=Y!`C56C=l7VCc-i$nd5g%sl@ zjJC5xbJ(&d?pd)vBgZgH;8FM4>3hf;HU>$FlF);IpD0heYIJNsp!I zI`tbC4V#If<9gDLxlB}6J=Vj1_*7pBmY?n_DCmQ2Khz8T^He|bGm~0mOA{4 z*aA&Q$HE};ODfy`;L>LDd1dXuxI1Z(zd9)YYbrU^2Je8NE?JoP!$9=Q-_3`9sR37Y zBjQ3k+KY_OcWF;X>&cH;+~8F7zp)Vf=zYpu`wF*y@4!f#6m0$PH4A=o3evpnc=fyK(sjXL?sLod8W z+xR3-U2yKY@?RFc;Too$e9rQo8jB7m8qx3XLT2W218j}dT-~|@GuB>*@Wwi@UV8^r<@$ncSK?m`%K-<7VTvjF zyo7e|f%8wY`k4sFw;1Eh<}7qt7=Y_k&9s*+1?#7|Smu?BIag{x*;XQxf4s^(ljsbw z*_Sf)=g>pz2^D`H^Uv-yL-BCb!Q2CewG~ zIxO#LA^1stK-~GE=oxYf5@+>?84l6lR1|=&XWnv$%NgkWtrBzR^db)Qtb(m2`eOWb zBQYy#63#pJ8LED*hs+h1QD>ACi=L|=ZI#J`hsHe3H6?%tx!q4OpQ$)My=hC zky=Z^=0QGa&r+8$%|P^_yP5NjeAJA(0AqW8L#IYlF|If9T5>8emU=6hmJgsQuZ7)w zR)?!ry@0UA{kST#CpS=hgI8ZH1g&j^TKaSrSM`d3KE%{)`7#)Khzr5B(nuU~@hH?j znGPNekJ<45{es_($a}h*JbN$Clhd%0SIDm8PU`HWZJNn+hpT0_n{x2)@14cVvtrQE zxfqq}i&6Vnm**b64>P}A#3x5yL1f?~R3%MzE1E5Ws>Mf^IrACvT741bzeAVP`*_9z zdOl#(u!h?fzhRrZrsFEJ1jw5IA7-7s!ld7yy6My0 zcCODuuvtR)!*@%F{UKuew!_$QFcPX4nG4$2j=Xei5hQL-Vqoio`?GdqZr)&WWQ3u6 z)n2R*evKQ}O2n;qn=xzS4}AFN3-Xd2fq!3-S9sM=X#C?Xxy3KZeBK>m=7%C7*I^EI z`dstp?b`qq@P#*A*$cfGu?(7YMV<9wZn`(7Lty9}i21x82Og>c>HA|?F{392>?e0+ z2C@G?5IcO!0N%v+;FHHy81dyYJa}v-_Ua(lCfx&U)wjsWHWdvnnTdveorLEv=;?5k@7SpxhJxQoUrees70NH&haTj~h+0=b8L2WZ?^7d-tB!)p*XX^C)>rEqIPp5- zpQ+Z3VDTS=;P_|*d{g%UV`|4>@}f_;Al6Vw`O6EZ9DPpn)J<$f5$&T?e?UmyEjD$S znV_q4ZcM`{^5ZTILD>Tiy#~I94xR=ve?11(iyi7Gi!!mngj^SW+}QuSovZyGqjE_S zEBSB~2Tnf=+W(Daa^G5+pW+c)lBg?crvHgy&%42&8RkOZxmF(IvJe&HxVrZ32aslc zL9I>*FFW`PLXS}oahb80xX}_M{kGxQxN~T09RMx&Qkd_KVu)Q!*@__t0GJs#o@|Hq z3TMdZ9s%veAsG0aVdl#`ls`EIHshShz4DYRJ}kgZubyJ`+1I?*_%U%=k21$=wczm} zka7`}g{r#&+w8gs(Zmq+bR|C5zvN1cPhd~FlZYs?6(D4&fJE;nUfxG{hgK_4-OobJuE#k1(lw0BP+^?!FsyE--v0{; zYhH2#kH71SjX60OWpN9%iHjk;_B{?fuphOTbXBUsz8EXf7xMqv#L=MzRU_`H`|ikw z6aUi{mOD@n=+IWo=yx8DQCG$Hf{7>%k6_Yw|FOg_Lvcp+DfCY&g>Du{!DfyZ>`c=W zj~hjTGH5yX`rSgX9_UT~JBDSZd_@n#MkcwP0?L0@B2l-=c|S+ zpEKlNd4(PHewq(_jwN;fVukq`GO}t0`i=< zYRy*}c%)zA35{8LNfy|5H+aAO3&VOu!tyOD2(MYp@{>Zitw|+!x3m%*J`cx=|Gr~TZ!KuQ>AEej zdx!m3)PU}{PqNVa>7cE;FY|t!iycBCRz<9Y_ieg@t?>%XHSx!5S;ScFuO?12u^V4r zLO!oe^090@ z&76Jp1c%skh#t!!<;4}clkHMx-HYe;>+_(dVL#@|x`-v@403(-jD^>K;F<#?SP9MQ z{bV#t(ktiY>k2UDkR56VWns|MH4y$j2Sc3VF!pISG34qnvD0bh^)eMoIu?V$q}%u- zQzGo4^TmwpFuE6+)7(Hu48Hy+CLSMvspp;(lVl9^AGREP|9y;!`Fhy=0@0y9ixqVZ zLgm%pp#9kv3~KSBj+QZkjj?#5jpi=1E4dO6VCLMrAT_d=*`HNX=GYsHF1=)yv(JD} zkALWMTg^7@4S`V2Y*Y@YfT9uCne%{B;=J|)@V|na8qI{z^Py0mJ{&a;MGV%bL(zZD z+`ZF2tX`vp_V(^jHlzq_3l6gS8!1fw;+-sQ=~|H7NY)NU!jkcJbOeZ@qO zci8Z;&kw*+%48bdpyy-K4`T1?iv3qE!F8d9&|2~lbY?^V?lQ!Rp>0f>I#wMxZ6ln2 zMf;xlQRqEuE({;~807y{x_Vu&21Twp56}6;PFop^KK;_T?_=`4?KXg{A4hoIkFyXt z&jPltG8Ayy0(1`eiUyabV~Oh@!~x!eiqRuk;9=Se?4-NnpdD)X!pYk3z~11EGy?D!oZ%O>aWjS z#_YkZgT7$p?kaTZO6-qAokXuM4OrV}HC8l$wSt(6W5@?_aR~j!(dUJ7w)>pvY_^+biNj8+m0qWqPwWw< z?OWv*{?7*pcIgE+>jltRVW7@P{)D5icNT32J;d1Z+o*Jrv-qk|`b?&xs4x}3a6{qp zD-E`{PJxU@V%5$%%n$bMBFz2Z19?5#z`A!TmKc1+@Lg?Odtx)z=HA7)lmg8EuT3U@ zX3P5yFNfhrKY@?92Jii`4d29m15F?WPKIVeQ2I}-IQf(tWt)oiSt?faeKK*?PI9G! zHeZdz9X>Mwk5XUTIbkQ{x_re|A839#IF7-;17ODBGg!P>Nqo@jptHTRuwg_ig03wV zU2%q>kd5%9Zyos%pR=+lmJn`Nz?GfWsMFKWL)O|zxbf;V49|&%z?v?wbeDywJRBzT znW!sl9V`)5F*{iFk|Ru2RmRl6iXrRSB;u6~V-Cyyg~+-K=;<&Q+lE{MS6IX=l3uaA zqBN|(zaGMOT&66-F_v<@1^w=ig7Wn@!204ICL8$_bQYZESL`IBes=>gF5@JGn8q;Q z)7hBkyc0M7bs6gQgXuiU-~i2_Y}S^c!+@jU9&ROi_cjoG4(-Q*(~@wI7~r+X)WKqHrhezix+H z2)3aXs83wejJcg)(VPTW9-ak-lW0G8-9VV2Rl?Z6@4@CC?;zw^DF)A51`_HSC$@Y+ zoj4=x7bp>ejt~p?V>pW)brG^=u7JQ7ce&1~UCcIVAgDaeWy;LOV72WBlo(#Y$dJBh zaC{zS%rg@MZgoK6nq<1mY(wX5KXGpJ6^Q5^2`y6wgJ0QtIDLy|IA0Xtyf6(^&4ubF zry4x#LjIE@8XhB~c~0CwY+5%97Z`s>?`yQ9bJ_sjD|%tvjA;;frJgG{X}O<|$bDvy z0qMrcaCNnrxIy)WIFA>ZWVtPFc>NlNhmr5)#bL@!0Q%BrCw0a>y2sOzGw_Fz+-XpL^TBb^&Fx%8Hld+^tp6h zhKnMyAp9zw(@r!&+K3cpZ$szT=bf?g%`vQMG#6~2Z=k(`H8#XdMeWg^eD0n85H?x| zeeye~gW7=~OT&nP+@h|S(S$R{+`!Ni?}?>u2s$4PWZgm&K|Wy+D7(3#U&(pYRy%U1 zS@r0@EFUJ+q(Vy9ZwNrH)QyeAv~7}w_Oi$5nNu)ZavYodC@1#y2dkW(PX4^9vQ?|E zpj|w1(d|#5^wa_7@R_^^vO8$~{44JfLVL$4-`Rp5rP$weBXtz~dB^7ykX}SNp2IiM zs-5Hh0OD0w{)W}Axe)TlM7(i?_DJ1Suz0l*)>Ldq{frD$t&Tx^tp>;98LTaz34S}r zfwitDWSl>Q>n1lrc-%&O?Y9#e0^88fWGqITR1;6B*v^yG?Xx~@Cg!E># zU;Ys$ctt^L#&W11Zwb~-H7slK9IX9$1J*tJh@qnfp}ox!2&y0-&#|LSAzcJP|9ge@ zv$ta0f4^hbUAp8e-49_WEX1PJ>!{jVF7s3FW?PS#3OK|FRKrdeG;Dr@6CVDAru`R~ z_ms}yH>nI14Uw{%puuR9GZJ*y&XNV+8GyEzI$%h%x!`_qAG*vug;pjFG^5%JR@(dE zvt}ozJiLJee-&ZiHD@&F;tL~JZv%yWJ!&czz=!q31gIE`i$|J>FJ|ir^|Y6s+f9br zid@h()~gjs)-=me5ML_@`Y)*j|6yg|GCqb&Ct8VV@owBEWC|!Pugc2)>4aN9CqSBa zE|!MBC8kpye)-o#Nd4n5S_?BlCw2$Vzx|X~B&*rF_;Lu#N#c>UE5Ko93wQZrAh8Um z;LNU{!6RoXG!LFk-I^mHME&{94d|2?9dRm3Vf05dt=Sy$&^z+B%%df!`X&1rv z-Vk&yZiLOvSJ76fGKYOqw!m8eEt?McYaOIUJ+!s>hU4%8I@mV!)B)= zkaeU9?bYS5?sP7g-it%%BKM;Yw_bJ!0RDsKX zbYJ-ws7HTA5Fi>>0|8Y;#vT1pW#7|Iu#2b2r)-4hEeK#=OF_g!%ovg)ZX< zGU@hS=rD0HG#xjC#MWdFg~7 zFyYKa9BUK>k{ub;>kI?i9s;Dj9*t4*lMtisOFOo0Xt%E$wEF!8#DV7W+Ggngw-?0R zx``t@XJD1(8r-wSNQepUz{-aeaC(%5Xx%lKH}BRZu1YM|-M5$ z8){hD87o0X_vy;M(O`}n0Nb`gi$C?|){0P{HxFDE4xrcEhKjbA5IK52-p#cXq=$T= z`us)e#^_+iPh#oX(7f9_j7?u)BnDU=#mHBGfd7P23}`YFR-JE!;i*S3ty?m)x6mc- zYzcXX`(v8*JyzJPi!NVE$gMF7Hry(Ny46Q9Rr>+uH462HvJV)X{ulOtH5$g=zlhyt zWS~!W7f77*4}^~m1-as$I{#nlV^WVptB#l5urwCFBwGr6yn!fLmI#SC4=^ap7P~*N z5>^;=5|{PtB=)yy!ByuIuwrZ!_ugLuSrd`~)Z1~vsa+1wg0>? zP^QxEpu7V5R%Oymdq1;#eFC7*Us(6=4k*)PVoBO1YzUc$2ST1>8J#=f-~iNBf0GS| zBnVu70=EV=qE){e5b`C0=MFc8%?|W;eg6u|{(6jQOIpb_au7m)D+L2BIpc}-@afS7 z46J>`l;_X!LhE=IIqVemqXWyH{QEYc>>Z@4!`kyNI?qUs*fl z*R?MdyoD8jnmvV~Rkf7$&&33ztMr}ymwO*g0-wj`fH%`HW?V8h?^q9gjvEStmRX6H zr$s=-%LCLQTgYQ}UqBy|uUs?j9oW+xq+x6$7#t>M%~A4pbZh1xvgwZhD1cP-T(mp88+UfM5^MB&V^nG?hIONPlyfOxFd!ey56wZBkW38ss)O*bYfSDfx^2re z6s!DOaI3PDkY_+M9x_g0|z;dQ+$pY<@P!?!%5)A${fVjJq=lg!1r+7Yu&05M? z;dT7G%2+VhZ-(UH&&11>hY2e(65Zf;eG2d=HDlfZg37F7G=R!y#gE||J67lC7kb`lGnPeW|~9Ei@c2bJJ1>vg3WwBOyK z>F7wl>4AlKeBnUY>Pnx_3OVi@WhHt#x}hef2iILPUnV`CBa;spiGAOeqJvK>6uG~G zZcl%r#^@}05bV+5z;U{J#`CpHcVfkSD>o}T0QM2DS$qvXapjZC~Xn(90(=T7boxv}_ z{L)gG`-CzIl>K)-MLg(HZ(#%VxSM*OWLaj{h`mDY;eB&>^^W^^o!D=Y+x|j>&z->j zG6z46Asf5l5zISoAg+CKocNPNp(tT8l;<{svg>nomETJo_@xb;MS3q6hqHji2E-5# z=Ba~dzV_`VsxB2XKg%D`cS8bNJ9TH8<~2Og{4qpKItU}3(?D5LtbWqJ66<&O0PCyk zxO7(yXm{E1Exo#kErt(xup{A*w_K-tgDuD{0h)5=;nzVqu+y|2nuIO@KUFY%`Wsm9 zL7ljs*SKO&JHOoU2_rg@_bc@~Dy)49f_vVF(PhTs#s!om$hrxw-`ZG+QGfLRb`N9h zzL0}66^3lc#M~Q!=&On`4zC8Yl`M}7tqE07?e1SBF^%?f;~o0 zaaEHvwsG21Y2M5lqBP$k}Wm77#?>n)VU@-5@pl;x3*c&kn`dB~^y)DvdQ z%th(;9!$|I35*-w5En`!lX^?pHbYB6xfIljS=1pUkDbmAb8MBo<$a5PL6-L+Ft^VH ze_~ze9M_ZiG#>;_(^dNC_rT|Q2l&vR74bKDPi8ow`Ebg9b~h7T_RipvLD4*a+$UMo z`be~C(~RbahWVGv;yS=vR(DB zZHGzM&BdFCECdV+h5e$jpj=y5K*1Ym+Wj9m)YWjskUIsJm9sFx`OqSYrHgLKZFb}!Of0e!S+o(ID6MZYbSc% zulb3;j&v3V_9SoL_4By=OCdDq-^0psz(9vQlpVf{-ci0F_2jI0LK!%i((bin5`Fib zu%!JL*45kspTO@J|NB;K4kKq&r9I@9=0I&r5Zze~)Y3=3Ow#-(zDaqHp?B6{lZuU)WtT`SFf`=hRPAye+qCjV-t(2YlRz)xs7 zYY3{eeFY=6^!n(v>N{M)3{4BPtThz&#E^I3Q9Z_8CcgB+QWj=O9ln3Mu*kA_*ymv( z1YX#Wx)*%ao)?f@x#ysKU;*_DV%_8k(QX;K4{*q>L_E9MSd9Fw8^k=G0R4YeWA)EO z&@Pz^E>dT%=r@R5rZ;(JVm!9*eFRywcl3z+3w4_M$zlqZV2eRHhtDCPT9(hl&rj#> zF9(3_+gB|A-9}l+%M)C8b}62AsfBRjs_jon0vD?!R`!H=Wn~MP&s2mUw`b&BolM;O zUEnyFb_`+Dq2x>&n7?&{;E-34k^2jEm_F(jPJntl7YJJ4j`=sLWr+_SLfVI3sJm&6 z?486!P&WFjbM5kR6nT~E4+X+83*rc6DY^VacNU%58(ltCVU5W_Xzv&c4S%nIv~hE= z^zTm?G;0}L&MO4@+o^!h{(+X%3@%-LuwZ;%CqWzG%2GC80eQf4%=7yNUe6LCXwF!y zEp7nYUgj{$&`LbuB!c-bJ!rdo8dC?Khd|qxXsZ(de)$6UZ8qcw(ufh#_`|KPrXJ_c zyF<+KuYBAX>J5$}cM6*bF|KApuM6KGlsF&WJ#{c|+8J2>^8qnJjj%*=3k$F8M!DG{ z%bBbqCb%Ok#m`hhdTaGt}Pv!Hr)wKy%1>Nc-mtQ;fJ% z5HsijzKMJbxlWd%AI(8^U(CYjBs0-@xC(TuTv1on!L(6PvY71W5E|7D{0hvWo_=2j zK6?S0;iJ&Lwho?jy$6bEhcNBGlRRd7F*qs5LEy;8JTa<{r#-G>x~Uh`c{+EYPJD(T zj&`6leXO3lbPo)b{DdA?wb*-tfe<=l8M=(p5sPddF~>ozAh(Wz9VbnMoHGw#kYN`g zyrB}!h2^O1J`^=x)LAWU0jwlP^$#nQo_xe_IMYsYyc;&BS!4aUVW`+Jl1Fcd;|Ch+ z(L+(m77aWGO5Gaufo&R4k4(VJ2lk@N@jh(HB8lk!G>SLfroOw*KcKjL5r23Z3;kEu zLYS$k7#UoK%F&3a}um$PV&C?Cc?d=yD;+3K+Het$MekZfz9vv*r91d zZP+x~pyjlK(4ND2lQ*G)&QR^K*P&rjGK4*}VAlP=LizrO7~yjn?EB3FAGZ|{H0v6w z;?By#$KK}G<8%beJ@FW^Igk2=o^G4MpR%kz57_ct6+~b9z-r2f7aDm5w4KA~_uYfJ z9G?dccQ7gky=6_)+dMS2Lc0QH+U7XK#3$Cc`{pQ0rZ+&rWCY`sl zOHe*PjWtZpg>Kys(OGjAugi%=zv~uIk#dGBy4!&M-a_!Dxq(+)H8u{Q%-(@2{M7Rm zI0omTQPyc}d7Z=UM_qw{%3m0D=u7^i|&#Vd`ssek9gX%Qx>}MWeR9|sR@sCDI4-NiVOZ22|+KP!|f6SzLHNx z+lyV0czQL6Jf1_>(gTnz6rnrLC&%VObh@DALf@4`#hy|y+PaMP2oG?I>_--%)M26`_4onitlNk-B_=}XuXPZ-?jDMilR0I~PIl;K9YjT~0JqR#&_8H1T4X-J z#GERK`blR`uie?J+xqY|k-gyBegM2)XK{g}Z=ge|g41w+hHuNr2c#QF+2$i$=ci0m z{Nha+)Oi%kFLAQXRc+Tev;VFTA3j5pen}svZ7}AqNvlzMbUn7;ZH3~I zcOYCbkyo(1~A`$by(lPT)FoADd+&aVKY>x1#;@=yrf@fn+g z($N3qWGJM*hswp8%d_f&N9D=D47K>`540n6)M5)(n(!%uO$DhNvCC3r7`Lk(ojocs z<-VTK^ZR_r`?nBMItGB(@%=3DLL3*nQybOp9+%Wq*1^hyM^WB<9Xq-nKpfD{(xCzq zP9?&APh%l|I6VU#K5_y5hN0QyQfL)6aXy<*nBn5I?8d1bqA_x4rMb}Wq_yl zeY6y-Kq=z5w&k?f4pl;raewqW<;D$+c#5)b3s`&dQrxL(;|gcXF@1(3R2DBqC)Wue zjV+Y;H?9J&d<_Vny#u5UZVGA9eO9S>!-<;>+1RxzkS;bv&)Az-Up1NTV}D^#2zBvt z#z`dK`UvW4H2CsgVzDRV3Ircef&7goOY-?lof%ujrBt42Y zK=n9`Z0N+P#O5v=a0VJ!3e3EfkD&%>xWS89h~@LhXIRbDPF=~4xGF_Xq{E}Ji4f74 ziE4dgB~dFcVg1#O*y?#0d;+r2tL_O`)m@CLgf-+l)#3xKT~Rs4l%INm?(DapqR#S2 z2$*&Om5Cx2_4sdW-M5^3Hq0B3{Y$gC@2??3V;h8@Q5Raeo!IuTGw5<#pSw1|`95xULG^*WP9Zlsg!a_yx;f zzDBtu7uqOam!nrn{n7*ov{Z6>EyjFWWH);4CN|>8K%5>r9H%!Xvhd%uXx=vos@Wif z!gTCuKLk5IXz+TS514qb{OSJ><>u_YnyQ8&qQdrR0QL` zXbA3^bu4FI2z32Mth|lCfQCyw_{Ndr(x3bue;<&@uYBZWyTaM8HIyy=KAG#ZlyONk zD$$!&%4Nkja0wruKz03K6kYgR0m2w)*uI}~Q2C(ucPdxVtIfkvG1Se|Lf6%K=)XD{ zGd4WJlvw~1im!tP^-M$0&BW=!^yjCq0a;Q8Ycw?EE8f2V{|p7V{m0NdITOw988by| zDafmSaF*LwgV=&xgWJQP_K!GBv%PVP`iX-TwqIw1OYc#~2=9TA{#OdN znttbYg;5vpwjsz&GbLI9uTaaP0|%bE4xPp0(A7x>QG3oqXES3iG!Mbc3vg7s32)y2 z6;zfSV>ic;!|Sh$5;g5;MM6Us1gw^V^z|m@Vss;WQ;jj-wKE5m|Ggr%W({mHX`^mp znM0EUx*qLczD0+GA~y9_ z5ghYNg6i1^&{c=H3HwJ&yjD(x)lV#mVOa)pqx+C@iWnEclxg}v46U3LXr<0!>l!Pt zKYtyzmldIX?L*jZ@(2w^sqv49`{MqN<{Q4+oYYna-PeC(zHAu=QYRZnY{ee-FJ(r4 zQ0I3zbP@Aaesm@@BvOc{9bEPrO zP0P4cja-b{z+vm)dz@SLI0(36fKwLy$^5lXW4alKK{n&jzho3n8MBk~y7G>*qC9#) z+dz=ktzc!2YP?p5wjkC1l^l`d&`R$wsC#%06_!z`n(#N1n|Z=)=~c9@D}|kkby%R8 z4&5HQyp8c{e9}Spt!cE2m)(VLd%iRGsYxuL_cUZp@K;pU&@=a;KHoU5nLUco7R+2u zv2~4&plP7THx4Pq0E2O0`!<0a67U?w&U;a7T`*JyD#_E8!jkf-o4USEVtEpo-JSPz zW?j#*nXNY}4{`hdyUXjj1c-!SEy$L+-GzFNF_Z8!B%@;Uz{ zD==MqBYNI>g{yB^;=vKRLUWouu1vlOHoPZ9iA{Lhw+BH>eg}p`T!OM6KOx>- zhVa;my3a#l5}j4$eaEqo!2`hlTN{=SiU;2nuTae;9Q_0DgNNZXR4Lsgp%i|xbDX|kTKtX~ro6*>fr~#p4a4sA;I+d?X|`#~tAxia zlY9iSFORa#c2n25rPVV8vF zxM%KhR#6Ku#6FDEGw#d(>S`?1EHo9wZMzg=$paRz?F{9_83`GGkU<48!e)3t#845< z$mUB#d9R>#hB~u*y%NeFB$E@xRIqvJjP7H1;^xj!dRG;qMX@d~b{@_AUZp_KptE3W z;sV2MspAm%l^a-c4V?5!xuk8Hg6!!L5I-2n$pZvp3-^JZh-Q@EKMASK2zuge=;vBY zc}vPO|FVly4g5b(aWY=he*@Bk<&H+({-Bylyw6kQvH6#B3C{+w0uAaRZXKzJQ8qzZ z%`Ip)nu5OhG0evDG0K1R;iYFrOY}sMKMDE;8-Bn@G<+IjSE-vr_%QzcQ2v!J8XSnz7j;4(eSz}=`X z#weqS<=dK--jxO7Yt53Lv0a!rA{8B6`(T7-3HH1`f~hCYG0VyV@_yOUu4xPMr!;s| zgLI5p+Jt!%nlV#*1sB>i510L8D!h5EDMac21hcJoKz#ZYh)*b3QsOgg92CYDEIdL! zs~aFHrEYezIw#&Ok{FFz55xM50*kGaab5On@NZuLo;KmA`cR-qd0vH;UAkm8|DCcp zx44;uyNNxz2qFd?n;Xx#_X`qbh`1$EKNYLKm=`|79`4APj08;oCJx_&1( zZ#5BAgRUuznnj3nIh60%%~@;~P{*he8%(HY`@U;RbA+1Ue2#X|hmJ5KvoH*_P~yO@ zR8U@>##ZI(3bii`dD@?_@W{6ql{5x2jZbh>rB@+!q#G*D{{g@5i@0}(mLOjx!cn>HOXkEUEOFYVBbaDl3+)iE1_qrheF6UHtc4r0+c!8FNRV?W!LRjQ?EO(d>;t}H| zR;9jBIm8Hi&dkOMeqX?B^mlg5nKJ2p`ZK%KjZj!Z^Y-s+IZMHq_upfRd1r63PD@8@ z{4WW1ofA+k+6JQYtH5r51^9W%q3cu$iq9vAObFw!f&9g{O@xHRQtCS-A@{dBFUO0V$}AAw*3!NBuOn>A(|3?aJJ8m7 zlTk%(9XUS+x8yzt+jaNZ*fVe8_6#H5y|4xsoY)Uje~=?&&reiVEkcd!@32!e5-T0z zx#*U16tAk3*gx$dx1kamcb_ zi0!h5VKElyvSTEsUZy@_N-zvOT?5LfQ(U1P`4fi6aeKyT^CHntiF`#pGoU%W&iO)A zS-3;%W)4*fQwSV%4$~{Af^2s)*Rh>EhufY=)Zl{SPU6@#y?jbPdpUNcT}QLVGR*(Kd?Mv*hm!> zSX-nK(wS840g4mj^h;wU+@yG_hdk> zG!d$u`hj#>AHk*05+h36nH%Myn^x)b9V@Sr%OnY1Ud1tctq+*6tr_DZ3fPADbnLuj zEL2XP4Dz|;@w`d?p5MATxpy~n`RK!JXQwjzR865Eg1DC}w`2SGf#lLU3?Z^)ZUd+b zvVL@a|Kqg6`TBRXctgJMW$xY^H9k{^cc+BJ}{ zH5xH<5zYF5eM&YG+NpyWurmN6#y(H48+(KYaS-9)~I7sSBM#>n7e@W=7g;Zfs9bSuzo31zMnld$L)UA{S34PqZ? zpebL9mR{8?>`*B<)SO`vr5DlXa3YE#By87?bLhT~_7_LJnHh6pnbC_;V?{Oub>6{3 z>U1o-qsxEW_=ee9-iKy`PAu$v05Kb$Q8wc+di32#`RfKQ;^lGH|A!i{7=IQ*-+MCD zF9hcYCvjV_DKBchrK$C~P;92m8uVrYcFq9LkWn zd0xcr$*-`Y`Z`q3xr{F7DgU234@2{xvgU$oc(_NM@99UKGVY=fGR>6vYwunZ3lEqVL`X(Y^$*nbo>uu>aI%AVuK+#vJY>4g!1jB zJN?aRWgNKAumzjE%> zZThsqM6j5w$4k8E99ftTs)3YS`d*KP2ZG2*yakHCK7|(Ro9Oqx97DdEGat%N>n-`p zg(s$yd+>xr>~V;*$snFI<-Kgrc0qkp0Z2QQ*||1JjMQi7!Ja`*0{QGr$`GvLATQw^ zS{9r}vjKl|i8nmJg?oqPPTKsnFE2qA?ZMK^7K6uS1Kg#j&v(77gw#=Im@3i`$4)Th zr>ooG%oPXmS#&eGm5dZB>4xm-n)>`s=a(R^I7nWvf3R?w4~W|?NUGLFp)znI1bXW8 zE2mdO{Z>cHF3ryNi+>0XigGmboQYO5pFx7b1-z@G|5tWa;(qU6ro6R}i}kNS``lab z?QuO9X=BWbQ}PsMXRe@l_+FG|55v@$L#)wDFwWu~xL4(1 z>a*44=ni9Em8-bwm=KuxG6Pa(&H!1e3#VLc#gxgnm{V5=7co^=C_KwS;KRirH{6T8 zeyzAJtR3y$1t5_N%3d8n+n2}{#vjMP+DV`~wi2wT(CnyRBBu(z2vf+FEI(d^oByG` z{q0w*nS6-bH@%0()i=S8qnxPkc~I&pxY;krwe$Nc6#qL~@{J3IywW--m`VH1$t{pM z>VTqhdI^Vh1F)xRBTlZ=5}Jpefo)Tb`A6!+otmOA80bX7!kjd?{GH}N*B7AH^LPww z$>X|-BiV4M5gT_r0LxpMY~g=}IQ`Fopi=#!uw{2RudemX{q$$%l-a_~3@L^3ui4=B zXegbB$3v2|8e0EcLTBc&XqI9iv`&oRAd%diJFhFmPp)(6NBcoYh#Dtb=cKql*MzVB zZ5r%7*+(cZ(Ffn>CEQ{|BVmKuS(Loa1mB$X7+v`mJ8o2?=Y9hrife_eBhA_J1^L8n zLloh9*62_Nosu&so31Zl*;a5FnBw?JOG}W68u7&m;?l0DWbtWX=tJE)ZtNJrgL$ainyGCweOPEPKUu7{ZUq!8-uSE2j)GYr0-!G(Em=zGg&2VEQq zX74yA;d4-Yphhyp<6rFg`z5$+-b26ZPE@P8#FhWM7{zvrnTyLEN7$>xLQ6Xo$Ndf< z-3L1lyg`G5)a{As0Q=4FK_0CSN$Pbdj$h3Dh8hV!XX^7tzs<%}n-~`HuMOr*3Waof zUwYNdVwqdVvbd5KJeZ-uCrhc{VP%fG%d~~VY5Icyu?*Z*(uKnoo`DgLUuh>WOk(Hw zFCLsuPOFkKMXdD^jIVx!yXcJ6;!9_m)*?>+&WK5d7zwFEY>A~~%vi36X*++?M6a(%4o3J6j6mM!7T*k7J^m$*U4D4)6 zG0)@)1lezenK9Q;t!f{8+9cxLgKc5SWPQPfe=l)9_5oDe$?-Qlh{@b$OQu5<^L;TA z!e97h68IU`~I#8(yJuU)>{oTEs?U11HpTJ zAT0dRj#)21U}Z-soBCW|Sd#n}+TUi5lPN^yQ zRYyW*;tj_p#YGfr1vAl?|4{7n3nyB-id@M&_^f9ToU6-Md`ux{8g-Uaf?!zGzu@0W z3}`*XkcfepHKAJ}(4H;yUt*0uw} z78d1e3jV)S_FKH1#eWG!+3{rZHRnU-N8;4zhr{YmF(^Hd%XYNV^Zf09*!5WnA^!}- z9@}cP9NvXyAsw75rWJZes|)eXQJh%i%Nh&3P!?atmFohekJ*e?wzolTxW1zGd^B-7 zYZZkfMMA>Te3Wh)kF8BbOl43Y5xrCpr)?`rJt#wa_%O=$-eX?DCXmrr#J9G!vk1%k zEOYfaOf^klores;?b>nN*}NNuTu?w@?iD7_T8gT_PoT@?y^;`?hz@&fxTG7m!RML2 z(5_cReVms2^=t_Hm_+DrG}i=u5~QMzC$ z=j~bo!2vNCWzY@E$_bpTv0UPG<_jmD_f=7|wvO)pt2oi@rwXrCIxN072wiu5LHU{$ z*m&&`DB|+*{zS_4R~KW{Hw$dtYRk!+{^3GZ@7To?TD-H(ZkW9&0)qBv3tt2+!M1)4 zy7i&#Z~@r`0E* z@OBOkYqf^O)Ae~C`tmBr>oA|`@t-H@@y!eE!Rgyau4`vBxEamBi0mF#^=~-1zkSWg z##c(#{cgg4Tl|G>-$+iI`?1i-KCp;Co=}&68>G4r&uW<+?S+wJqNn_yQ~+I2@9g9PoQP1ZoyTJC~1kt8c@3(P$_P|A~70o0uwW zBCakoMv+&hHRG6#3A#X3C&{-e-tOKMtw!OQGZ6k05&U1Ja}QF=U-7Z@KXg z2*zj#++qnC%9jw(y%Y=2{|CydFT|Q3&mL{n5L|Y-JEopG%~Uqi5lWAv3mBmD0b&x2 zrv1;nWDMFq8XS6l=Vo4efQ3;dU_0>~*JI&_fnRgbCpZmbUp)ocxJK#_r+~8G2v&aT z5tO|<3lVH3rcCre-wVlT6tfdj)||y_>-J$&r>UUz*@>8^QzhXYmJqQujg$D8@-EU) zi38mWTD|;;ZEzISHYc&x2A;DM-r<%z_i)}{ntYb@wPfsL+9kU6pbyQ~+&8NU5l2cG zmS4{4-*ip;hAAT#;pW_ZT{ch()Nl2#!%k+t5fv5I? z)~_P8*}e>wV`st27DIlD&uh*QoL0DA-G^qs_vH^>55?ZvGW2VgLrSqes#!%y zRAytqx_<~t9q%eyUL`^LtDgWV`Ve;RD&6I)B&s^PBb|AG;-@}v-rJOqToQ%$o3kKp zTP4iU5edC<0x?^FGS228ZiwJc?b(RWe)R?Eo)|Fur;~FrBj;6J8Jev>i)G=FSfB3= z%1_ql(2)l&mHD7Z)fF1UW@3ovaVER>RN*|z6D!WV#>@eyaSr>0N|?Z0whUsSxDp(w zBPD)JoR`*GuKIi`OqtTnmgT;KfV0FkxbYhCh6SWtKaauYSFtknI)o3n4%6lmW8g+S z78>k^TP23Vz!m%H-l)ZwPyULgZ*t+cXGg#)65(>+uXxv4lUI8*S`x55idc+EQ)Jcs z$;X$1%BH)Z*BV7Td|Q;od2>g|Y3o{_2aWcxK>0<=WLJJFdWz@_vOxp9h1^Aa;> zjYhTM+R!6)!1jy*C^PO*#22Wc=P4O_J}t%C$|GnMqJ-w!nGki`nBNuB4G!Z=IJX}z zn6C1|%&BH*nY|1pxB*`npwF8{3NikfsXrFh~rse@$2!f=dVCj@Hw!( z-^5u~Ohu7V6O%8`BtGU*v|LHwUU;`3n)GICpZ1S z!(6kkI#~Bm#wqd~7x1kIB;@^$*icW5ues1#{FIxb-o#eA9K$@{N|r@ujLRAwSjJ_b z`&unQDse+O--hiU-=RFc0%U%Nh!aA&_r-^)YxS1XyBz_xc0*xA-8&rktQ#sRSJOjJ zpwz!hKrB1XjTqMgA(}>lzfl?O-zdYrsfURs&tZYarYLz|OrPz4u(#|2NVRr@<$m(S zC@la&^04*fa*R0N$i+^Nz`}9%Am;}`Ya(@g^snKrAaWV~Sini!{~<@q0jB5i4I&QA zp{~Wp>oT|!}sS#0ke(NF9$`a&hBMv&#f07uC{C>8%x2w#g6bylJ9iT*FExU)Vi+iU zQn}9M3aD {7Zn+ESTI|@?<$AI0QLiApc1+fiuzwCMgBTjeW2Ac-* zq^JwEp2YoFmVlkU=^(o?7*cb#fr^~A(z_NCH&q5WBroTJ{kzCT=O_V9;tfyKf~<~L z60cd~n3usKE>P_Z3~B!e1<~KYd%<1QQP<*ww_b-h!*)!J55f(%F2cb!1ke1_ux+oA z(7jPxNHn~Uo#XVt{oWPM%W@nR-^hTr=2WcrY`|p|A7PcfrqDorUK`hRa9OR%`CL7V zRuApLW^p1~9%{v^f_GTxVt`B7T~xcR0~!IBAz+!Yh zx)TEzDwt2U9-pCkjCxX$+^#j+!l&kQXepI}SMLVSX1@ctSWYL_=~CA4stmry7zxRv z4?*?33QS9CfaW*R;Pq|`S8$oSjF*2xhR;!4Ifl`EdjWX>FG4tcLh;PUARBrNC6*6S zt49vuqIH;mS3}5sd=4Y;+=7UIX%79vj&q^8PyPDmw4Xf<8jr&0Tk)OK%721d>wiPD z6=%6r_lGPbRErO5Or{<9=PlSSeU0^VsIRi^w8X*C zp7o!%5xt^_Pig1QnwOsdCwjk!xE|rGc5i~XMRAz^`~ZH^Fy_PCbKs!(3{LG%1?iwQ zj@1pu(DO)wcpEJNkBQe&to>7AYxn>|kIo1A1TlDy%mBX+PayqZ z3hgT|au&yy!m|CVFlO>TTr&6_`u6P$(zW%9VT+7~#9HD^M;9o{xgHEy^8oZpX>OEN z1g&S=xTH-_QR#zRV0;?8HuNRxMN9-F?JbHsP_|O)?c7?hmKHNRD>?edf?l1FZ7ytjI*@* z!c~=g!VQ+S*c4d-wJSctFr7YtvELwbO)9p$lB+{q@Q1+^v@#J+G`H7Yu!vXtuLEE-n`TuONeRx8O=VNVD(4FLV!^-#8viU zm0kpdOfdz~j4e3MPM;Tz`kEck)`MOX1j^Y|C|WfpDN-z zTCU+J?i;%QJix5FX{M3mk3P2^f>IvIYy+>OjWZRreSF`rKJ(15+iplv-vr9lScd$k0MC$1P@x)yV8WMZK9MHcid z2J2$2$U~})dOHlE{Mm0P_wGmY{0R_lx(57<#-hy4D}rB44e&?%MFEU&4D;%V;kDm*WvVt1FUMFmXI}PBRA*mNAMmifaTIfS)(FWN^^oig_4-oOW1$$ zckuEV$;P^$1WcL@5q8D+CWd?){^yzfk7Lx$egZ>S4MYUIMZ2sHh+)Qphkg|X?Ab~V z?+udb+2nr8Sconi2T*FfN74J_3hhF3IIFT&XjnpLjtRFEpUgwRtihPM1RJ2|pe#@; z3&~c=yP4MG5UlbP(2TQSS}RwgcQo}u6&ixoW#aAGt>au~R>F&|BEe?=Rop4xLq4H6 z6p0r>;fpnZ1_hw{&w*2qp}h4A4UG6?h4-Vi`QOO{uPn|%>tD2m@{bR3+iL208%v$qK@VBNKUM^O`%n*Mfrwe&TbUlL)x?y zO%6F|>V6$EO>SbLsSCP{S}IZfF^U-#)9z^LG@yJvIjcT__hftYx^x-b+hRZ#7KF+$ zQ*f~u#r#jCfn~;Rcy-U1cX&7fL+|86MCEaoX?onT|6yH0>^2RO`w&NJrv_ggGZ9y9 zQ4>^;%el;tR(Nns8rD)5=+W^|Fbmxes(Bx=HSG|Sr#GWa_76Aq898FI=Rvh@G$^@a zFrl*&M)lJYLOjni*{5=L@I^5g-MxqSbO8=Y_yoRYfe<6^$8b`aTBC86r)=t)W8l)!!4~ePebxORFsJD?TKA?y%ePygQ%v5U zcP60fw4{FDD6Z05A7lx0!K&~L27a0W-zxq>_o6K5wZ4Lt;elwR{R>q5serw!wFNE5 zH|UkS3u+q*a0=zBR6i!Nvnj@Wlf?r#HRvng9(BIOvK#zL%E(DYeZ!?^xym!IS;Xx@ zSUyPv{*)6h_^yPWZx?ZF*ms!PRE6VzH|AA`n8MQaIK&qv5&PLsQI=1Ao_ON28l2*? zT7F6#^p|1q(jxE;9Yjp6KG<5bmD%;@h%M8^l7`V7CSD9~U+64sqN{M37=St>jrk2{ zD1S5jJTcX2AK6#eL8CGq-M<5(8nHhnkR)Ltgph3vjI}3p=-HU{uKp)Qg)9b*oJIYw1Uz zA*BX?t<@7Y`(K32o#~v}*LBSLUO5JCEd`4eMQAtg8;JW1%a(cZj`n{X089TkVxgP| zgZJH7E<1) zG}=hq*gYR1HbVnK_F6;pJqy_UF%;<&liwBGS@M!oP2M}jZ2oDQfeuupKK)E*ib`@|3Y=&o zW0`Fg3ZsR;qwObWWJ*JR_DEBH=jOfCc|C=y9Xi`9kpvJV(m z^A22wUVtr>E!=!YL%1=`~zM8{P5GJ>yij7jd$0rC=6v2}QE`DBVL$y#eMBdi>`9d+|`dd?v*7D*^MV zhoH0nF;tUXP+0t8jqy>GTU5U0!h9cKrgA2P!9}e6>5VBL4pOhdKu{JAV?{B2c=4^J zoZC=$@|{y=+4^^uRQ3~hZAi!2fk$yMWp-d;66R>sgT^Q$KHRq+U6_>F=_@gQf(r9q52E~_0W9IcFiA_Ji#-MVAq#vG`M02ra4QB$DDUmENWy z4}ihp`%pYtB=mUp;WO8tQ=}WaqH5nsPQLdim$J13lxo zH_ur{Igw|I+!M#RUH5baJM9|Wp8AA3k+jp=P42GAkHK}rduZRZ7;-l3$DVb6BG$Qr zNcEYk7?p|nTlIxqfA-+EcY3_82JLM*4@f2+WM#@A$Q^Tp$ser*w{aq<_P>Z>e z|3tj<)h#YLnpm*INzzu@Tu!E;Sxh;c`uZNUZjVQ|;9g7+9Y;6gWjK{O3nEW@ z*x>L65--lcQ-@z;_<|*PEr;CVb^oCK7CKj@rx8PJHgo~afjex_|LpXAXNAWdHW-Hh(a z3t8)Z;(E^PfSR2;g2iu3&^_-vC+#R#G|Y(v=edYtub&FDKd;a`;yH9G>C8CeFjUiX z++_Gcu)Pp~zfdRdQ;d!l;+=d)O##CJy| zo5TJC2MbqDI-!NL+;fqOFdYaj#?&u-sVh`%slmprMd;=k3Auj%atZg}p+oEMXxV;? zbMWkou5S$lso6FbVxq=N?_K6n7e;ZZiMm49IO=g7zX{=!2V)vAs0q3OHy4G0{M`-~ zuAC3)P7UM+&qD*-Oj;^DCAMs)KS8b6NQl4X zhoKMC!Mkz^DnheRK4m0Z@E{%wL=klUdX4>wb=$6bK`y2els=pQYN)PI4Q*!zao0iq z%Vn%JJPL{KcVWnFJ)vgg7gT*#lN1VlLHx5z68EKw_RC8kVO$7Al2{m3?cV+3Kf$%ay#OFAEz)X){x%P1>=uvqC zE$&#O+I<6s<&7TBxv3lbPc`B#HD0jz$4?dEdFTFI*#LZ=4PJsJ|zmp8Nx+)$=0R(yMhGta2-+#n;Na@G>C z6?}tnFeGG%R}zwIywI-!5V{Q+u!|nH;8x zHznfdTewi29mVE2@I=*kVKoIZmKja~(|=lWp7g5#(NAXlir7^i&P#X@xh zxuBpTFg@@CH^`qrL~$Y4KIa99&BwCe>}BX%(Ti>A1(1110&0%unP}&9$IOf7IDMlG zti^o=<*Xiv^{GJ}b&xD9#{Fxf1LEmLrH{37#IBa9dzv@2YJ~-IOAd{ zNF9!WUA!*ceaTz!;x@P^M8PiIlW3Y!OLHhw!DZ%jkZxYcELIs~$c+S+yEhih$9};v z2Z zB4B(Cdg*p!!16yJ=TRZfG}PgPCcVMh5s{E3`jMSFy_e;!$fb<;erC}Vf+J~mmUBgi z_tjG7?O?ah!U(V{|2P|tkfsrpog89uv+GTfQ z?qm%taBGA8L;LW(?=N6%Q8Y#-C4<+Tb#P}JMjYa!0uwh3#Z;d798k$JXC>3_@hoxvM?>uLk+7C>uqkHG@Z&}UK2xyBu3k+Y z^ntX$^{Zj_0425Jmn}>FMVBU#zPBqxa1r9I4h!KaOwRbZrN{@oaF;BE=e}*TY zYV$)DzN8ti6t{;JLD0GkJUK*1kSu=--nUnxbmT2X(%ELpx~n9ji0NEVbQc!v{{YS- zFQdF-8++SW3l@XuTz4{9;(PrSSg+IK~{ zFe4Y*P)VM;6mKaF`0_iGz}ays%1*c{D)-NT^g|Conf^P(g>|brCj72=ozjl zylyrTrk?BpW%FEU@+hJ%-Xw*z*o!5-J&m312e_`0FUUh}$e*Bltoz$noO@3>ySB6h z8$U0CyH7NQ1Px7|73%X-qfb)rZ7DgplfY{GI&fKZEYo$i4qw((4*`$opyc*h-26M` z5=L)<%*K-vi%*ZC-*ye5Gvh9^J3=m%csfIVqFmeEVhBkz;V!@KMA^=8uI7{>uk>BT zTrPK@L+w%=>8B}79a)HD>A4oKOI-l+KxAId#@K>sD7)7=rEug~Xr6xy19%lSB!7gi zn?F!(rKP0PZXd|a>{#)3+TZ-9E>y86u>F4=oqJqN>-Wc-(nY0p$zcu{0{3QoJi+^q$vIv^gO+U z6{h*%OAIMh!VOvuZi1@=u7bn+LNL!1AX;j`=S_Cw+RXZ)7pM@?|s zJ_Kb68r(sZq0sf|0lAB4_Bfle(VDk8@yZ>XQmYhe;5YO*pw7z%2_PmfVpw4y%GcQ9 z=DC%yb@EfnD3*aW?c@{Ro+OusKWDH`0UmK*Iko%Rm}I6xD{Bto%>40E=NE7?>ZaUB z8t38k5*#1=f{G8xsJvgRXmZOy(S|%O!0r_1e8CyA2WE09PQ)%*Iv91g$N)dz2Osu= z%cTzBu=I9F&@7|xxA(+8EP(jtdg$Ecfw`$tPPiK%z99&)F;-0)cNnL+Fw=2>wxp#oQ}QmRDn{dl6V6u@^sUfK+0@noSO4{3&uy zUp~%R&>THC;RJ|3v`C!Qi%|EsRiJtME4HL1QSMjZq%ArM)2vZwQZtTSU*+ zqFmO@Uw|q;8Fwb@@*-CWKzucpei?xFGD8%kMiscVP-1fb7 zO!oXxe(a3XP@JCy*+o~lsP!VS{Q3(VOlbC=X&@wwzXCNOmrsyOo8PoRb4)7S7Nuc(ua}TWGs>o%XPmuc0d`c}Bk%DK zjJkLSJbDLV{P~qo-unfMTk;DB*0q79wJqeYBqv=(D_1tE6)fCOQJ#{)uqH1Y7_l9! z^xLuO@44Wdvj~mqboub`(~vw=08jmDkS*QCGF+)^m*Z2Q%Cn=+$sIIL&_ciCrkHCM zf$|tSyM3?1n4B1d6@Q~B+MkQq*vgqy|Ah~RgyMreF~sHdvNbvPi|NH_3EieN6LTm- z&5bRn%qV69tyd#Rqri6WPhyU~VC7P4PIJIqtfbv*>;@}niFKt}f0071`!|Vv*MFc9 za}n02zXi7;sd#nnQ`%qb;7k@Za8Zr=d`7=qVy_zT>(^=tXhu%m+qan7NE7s&^#;4s z`eK*mLF%;MXEs$7XCi$_%< zXRpTl6r6*0?;Nnpeuv4L*HQD&AhbN$o9;!|uxZMEcFs8xQYwrD<7Et%I2!TZH!tJW z@5JU#e8ZLZ3Ir=>;z1ODmZ&BYJG8(Kd^HOpW1u7O2lNDa_AxHEbrIx6MsuYPXx5>+ z18N`BQR|YX;9mFwfwHdwuZMu!ya}k%o{0H3J7`ur3h!Ug5YoyAU(R{ssPDw1rTOU6?xg4XhxBy4t7|W)VU>P}zBe3O(=}F^Ohy z4>-R$#PLsy1&{mRnE9k)Xq|iz#h!;zwk=g6HChDO)Cczq34!Gws7H5v0(SUiaSp^4 z&ikm~M(wEwRq6sxe@`}AWm`a08o36_Dj@Hfp+ZzKUm}eOW0slju!tCnB_+o&Kk_9D zjA#S*^3M=A%bUjXpP1{I7m&x|n8)W6=umzDt#|ejr1MlP@p*5da6$`~&$45lepMJW zWd>N@aVHm?8#s5`frao9K0j>*%V1Mz%bNzGW-oAm*^4jB{0L(mDc|(Q4z(MYa*^b0QR`Ybr}GwmzHewzr{gQjBYfEg^IR8O!PT8ARv7D?$p+rV{sFTN(#5Ugkq zF7IB6i(}P!{QCuD-<*nhi+BRZ#)91=8N?kQ!(l$%c;8sp#abE%=#iz#C7f&u3#GXb$}c{hvw5S)}An zf7Ir)tsbznS{*)rf4d^!(_ye}`oxKD_Av9@Xs(hniNketdFQvm@QY?X4~CzGkmdWB zKum%5CSsHbz2QMzDjpd`+@Np3^)>6oi~qA>4cK8lCvEZ$UQvWDy2s2X{^H`R64?(rQ@nX+Aeb1C zm-KNPmo#8540Ha1!8SRNsjJ2t(|P(xNEuX=9-th!zhwPQ1vV#=lkK`Q+6*|si7v0> z%*#vBW58s(pU=WG3U%HmY$GH;Ps5W@Whj=FfbU<)V9;uSpHp*SPZ~L2tnOpWt3{N( zJFl=AT8Un*(U2L}3J(Sv@->AaJBpk*gpWZ{xKZmhO{0!YM{0Dhc z?563izsXWfC&158Euo@}oH`F8&~3?06z>g_$ktg)>@T!)uH>rk$aBW6TeSu8__egV z%;8imUQlyGKxNN880&cx-TSWr)!u(VT=N^(O#A4@`gsuUvLE!<69?0ra?eIXa8*-0TEzW8i|SN5!;RrYCqF>u#jhB+tOtT*8>kOV znOpNqAU<=AnIE`_%3(547F0o-jvd-odqdu>0xoY?Icv7A2B!_L(KCWv5hhny!t#$0 zskaZDwQj)Y&2`x6TE~^o&F1ub$}#Cu37SP+L<7qN%MN);9*g+25#%5Dv#gLl{9%9kIR{iti;vIe%$Y0gj&g_v(v7kC z;qbrDblvkFnh{-sj2HL7-Q_FID$cQvIfiI{aXPBp!=R{4j_qErz*RRMhb~Bm^72S# zA)&keAGTc6=47M~-;E!a`iR{4J;c9#oJ?jfMWWchNpBQ8F1UyoG zIpy*gMe(Rw_++fkn;cdlPPYQjcV|(#XbGp5dLFiKdCOT=*P+MgZf@|-W*jcp;O+M9 z2l>oQ?nXZmZ*7?c=cHNSx9J{YT{rlr9fPJ)%5$9@DCsyQ#fWFoAbsGg=sx=s?AtxD zKI;f}zh4NcFUN3gH0$sfVb4BDUZPzO&3da>f>eFnv~$Ovfd@VF9(O!gPg4%o+eV>B zg(XXFA~v#q36_4($8gq*|J>&}i0}>P@$@;mPZ9|MPQ3(ak4z#}XiL0(%g{sS3>I~} z!Ajj5vuXAl$)}nmfL6wK9YB6InY=rzc3lh z?rRE`3syp zj|HgqNrUCi{~L0{=-hWI9aJx4Fy8wvj_uV%d^FcnYxp&3OsJUWNP0u`H1EE*nROdn$#6@&~OX~o(Vvg zGRkc|Foj~Xw`@Y*Ikd2UiE<4!ShT$mWWIW=c1|vA2+-$^W{Cv1@0aoSiF$m_uLyMY zdjLtYCm8(v6(sc8k1^|r@mF?(TQE&mnDY5Lm}k9WH3u9}b$%nNO$n21&FjH7Gb6ZO zNPIyHU7mcQoO$bSP`D=rT!;zVV|5Pm(^ZoEkxdG-a>|F(e8>I!KWJ_d3^E-7TxL3f zY*z_uTDAw)=~9guo_-}LLdZ+<=@FWegRA96HICeth~n^XUgaqLQ@q zR)?b4{-9P2Dw#@-yboLzx#C3YoWQ+sBgD4_p^;ezYW*+}tkn+S;kRcoKUc=RC9vL$ z%NqQp>BS&BG8Zb$D^O}4h2B#Y5H;`>tbgzwBdy7O91#wgG0(Afqn=Qn9?r#9jKem? zB(S1)rdV?Ui$9}6xkWTFi_Y7W8<{Zq=m<{!x4NMGVT^%P{j2tZM$$Mh$CDrCp>X1!G=b$DSSC zc(GhV@LpsGpJ?{BKK&LXJ-&@0+vF^%=rI8@&K8XWpk$Cx4=v@4WteKuqJZfsx8(6yGc(m$ior0*BkIB zzn>vqxigbF{=q`~zvP@nZ?JA!J9@i*K;MnM1PAR5=vp3%j^{);EbBFNJd$I#Z!p9U zz0BfEPJ==3As}UgaHQ`=O#L*O*>3&DGG-ZLqxB2iGMRXBx^Gxp@FwtTNre#S-_P);K+5%zD_6LL-dM*XS- zaNF`3${Q*aD$6OT8{i1$L#&}vC;_pzJJ%rj3Cq5h(YraWK%6oN;{!*auD=+@7Ive_ z>dUZON5ps9v~bq*X@9^E<)pWQxcty9lCXZi5$nTYT8q5}Y7(bBDp!I%Z=ay~+UbxR zFc%fQf1pXhG|2NLhQT~_8>g@yROXLTbdUHGbcP%A>xnn6u`LhV)-45-lZRma`vm%) znooIq`Yt!wkE&BMnf>WH)J{)Bd~^@9zsu47{x+!kHy9$9K1BnoY{=+68zLVZ1e1f$ zIj2j+|62E6ApJ0;g!HTNRGo#F8 zu&$Ve4HJK1X>koGFV?e&TgxD|av)aif5)sH#PG39LkO$<7oHyTz>*dDpd$X2?pbvx z{>RB~E7JwcE6YMkb--=O(mVP_H*Q#fFBB?1!XoE7^j_?V<;7*-$7$e{H&@VkOf0BXpJR?@ zzOa8_Z(dcK!fbSAGhL%GDC>2Ug?&lD=kNOnGc==E`*QNAHC~2nSu*pm+RjO5R7*?_ zO#+Jx=fH1AfADZ=!=Cw}sH#jQF3NhOgE5*M>j&bM#BQGL57`=i#BI7iEkm;is{HqH zyBcXW(H+P>IK81ihjOwiGh*i7QLG^!b=z2LFkb!uOe&~%u|I{g$(qEO-MWtfMblxu z?<>r|@`Ec{A_9l6mDG#qk)%#$oajalOVF>y{?o(pa3{T=9Gh6eDFx(iI}I`JA5g02 z0S4NSP+oe8ZA}RQ=Tjm$mva%Na)FcG){r!ueTVXSpD<~w6NakjoOR(Cn&^~qPSIU( zEZvBg1*`EhUZq1qu{IA@p#5a zZOC6+r!NS{)oEvupWkWN#SJg$&HLCqK)tavV>Pbjv}@zBXxuqslALBmES&kt-7sIx z7u)SWqHO$LF7&r{G|y{=_JdU@Uho#2B_l8*HGxTezQ79&10iWhHnACqsb-Rl=1)pF z_%a2g+g3<|$M--(DO7`SV^#x{WnZ_sTeYSki?4 zUsEuQy7jW|;n3}p49+4u*uL#KnUpx6to? zQ=}xai?T0)O)Mjj7>pq^Fw`-VIi8V$-`~@5*{h?l?AJpmNg+1xFf**&(gmVuB}S~* z;P02~3Zw441GOmzyn#h;KJ^y$o-W2J)E1j@S&BaV$?gQm-t~#uzb(aj6bX_X0pC*o zuJOt>NLD3*c#}Dk=a(x6k1d9(iXEuxAs*$pn^-=fBFsl`th&<8^`s*nu6)?n|hsG;vK}G*Ao`$lmB*GCKrJRQFW$@ zW&d{!PPW%T!pB?S)qV!?*m@W?U@=xMN{0PL2K-y6cJQC^8kX(R6O2Al$9jG;S8}Kv zN`~j5zv2qah!tbFp+01%^pXyF!G_v(?dYDE{}=-YXrxXt*D`4B!Gbko!?Kwjw>ac?CnfW zvsMJlW`4l2uGE{mc7pXkLCmSXTZygmjAb7qp>jnMxyZkx{o*zjrIQIdYg;gT8!?UFZkH&3 zzsM;i>MTO9mQ(u`M|l<(wBCOOJ#Oq{AI9dx$XnS6yJJ9X<%J=?&Owv&E6ziuF03+o z4o#V~8_AxBk*plVcc}|>?KIfV`-nqE=n2W|uVbNO0kPd*!lJXEF(4+5naS@#kiiBT zc@GDd+#9GY{h?5831=#{kXY`Yp*4Fy2J|iBPS$)Om+d5O(T93iRCk-0)qH_$un&tr zwHw@qmx5E!J_yum$5rOhxb-;oSZ~}0|1m=$Y1%q`+Wj6Aia%ju=_r&Yyi-UG_pt6= z?pPDB2O^C>L22s>Ox-<{D;j*36%EwmCuki1;m%bv@_OKB@uC?>=BFPN)f&cPZ-UI!nfu&& z2Q~k@jzPfUoc3a>v)G%L>mdY`bL7zHZA^;Iqmr#(#*YYBf5T6VIG}7jfSi7J-N-$ z*Hlw5*}V!opEnabBoOwf-3G=nb0-t+xd4&U z6wHgRRGgb|6(7E$-kr~6&V1t%44ifa?T>L7=5YgR=Nv#)mlj)p={(A9k4hp#$(L|> z1uNG43ti4%1fw6<1Av1PgNwg|>6+i6f^0 z*3<1VYVcGrDVji?@Z`Hq=WX{!(CK9-O7$lZoZ;u$g_Kdu1h9j-G=4vnFE< zF_k1OwF@h;957 z!s;}j8}&ibP>v~!41_f5V5+>^(C03XVx3o9!s}C5|93P*PU+1T(!1Jb$^dL!@(xYR zCSdl;JJ`K|cp~f9pzFS`XtTtHb6FbzqV6_Ds*a(I${v}t_R)65uE@QKZHM;zFJQu%2yBlr67rteG1n#0ATRjFwSBDu z*{wHA6rI59f+>1(MklV`@ z?u&NfdfyDpGat^ymp$g%y^F9!*^Z%2TbNt-KGb+rjPbPB9x*|O-<6)_mHYl$ZVpK-A9eTXI2(0-+^piJw9%4=Jv$lmVabUR;j zMV_Z1Z(t4y+zWd+XyWuyuj$Wh_An9425?~AYaxkafw)i&^7~I3bZgn zZ8P=8(@|L>$I$p(&OW4`^J>wBjvF_*Ru3H^aPcGRm=1@h#HQ}LbPw{={kXbi&oSh+ zI%oE&4r&xR*niM%lvbafR+w=CgAIuvo3$MCrW#>bK{w{P*kZt)-hyN9RrK_{2<1Fw z(0#+Xv~&&L^5r&g(XV1l_lSfm$WC`kJr zk%Z;GgcxfE<*!v(yR8DU1)6t_88I!{M}RzwGZNbvdRBrx6mAAfFlep=Ivm~ywbnFO z^%}sGHN>eJdyDwSQ4;%s#5|a|23sD^#=P7LZvUwUs470l%{eL(+-qN8%f4E0USo_= zt-S=x)tf$Md4-2#z7n!#*S+dtm?{RR;c|JENjO@#?$ZU;8P0&XZxYXrz~(8 zaS*JMjzXL22;@bGz-}~gqFTp8wqhIv68Fa0#{^}YE)b{0NXX6~0cMwuf%&vU+{0rk z5Pi%?xskJ?cyBNskMG4Bs4CIwhY1AHoYA7K8k#n~V^O>BW428X2J0NdfT!1)G-3ew zD2mzG&OSnU(gx;w`~ue7orJga|C{PI1amzG;}70|Z*mgRF4x!A#w&&8Jrl9KI?A}U zB{6BA3eHzwn|F*JiOQ#0Z0s)L?WAYIZR$t3`)Tv4YlWPPq6Y$_AA`pMC0PHn9*mBv z2_9P*D;yh**_(fWwZ0|b?@3@_e;nlpKTB#>?MBBV84&TpKyaJN!`7WDP}Y7yW!j8_ z@@8^ZxUPoD&RRlw$!c;kSAjuVKFHPRxu?LT!$pv}`x0nY`GBf^6KfoD8oSCV zYy4*>$R3_mSbwMmxn(h@`*scHosU!~zcwjkUE85{o*~~GY)&^L;9tbd&47QLXE&h8p)-xi1YTa=2f zhZUgvbq|*sXo7E@U!ldHM7BuNVG;8T_+>OsiWO<`5$VS{=lvs5boj4=j+>V-U_ufG z_W4G0`U(KodJJ#S<~u@XqDO)zlXorS5?82U)R;1G)S3mWj--HUGj-jvzC%}f3#bCx z&~IEKdK6l4758&+zfL_A8yB%)nrVsFdZG#W^*&V5?xf!{sBx`=iJ7mkl`WFjZJ3(~86-O3{s0%VpQM>jFM41rR$u^Vo)TgeEw!Ng%+(0ltOV43rDsDF= zHm=uM)-odvs_Q9Nvq6uSt6NBNGs0oy#%QbwrFTQD(Oc-u?7$h(vCz*{9Nwl-2`4!7xK!e66PJ2>7@7~dNM~g!*iUKazFBEe7TpG0x}9ND zaXfVMcOm5PJ}&F9y0A8lSc7(t$+_DPpM1TFL4T-$^(1v6a?vBSKaj;-UG;?GV-8@R zmksy-mzBVSpp5*X5Kps{#7~LhRt1D&IC0nmmS!{2$=O($xE*%rX$vkRQkm@ze|Ql_ z-PzZZ0UJ))r|q~pyg1p z=@<4NO?=goLz2#yB0+8TaD{WnU2u>q2u9S4W@T%jlHUlAmTCwK&r^5t{Y(tCyTMIa zNG!d14b%tA1G8QFym$)7&5=C%ADau3W1oQ1a-GCFgU%&8V-$Y<7lP;e9=c=3G0i`S zX{q)IpZvOk$&^2M`rUxnt-j8}{foeb{MeQYA4BMw_ZYkW5|(~cpeM~sTH{ZFGC7;G z`Q45SxEsyf0%GCZkZ^3Ni^t+8?vVKR8GK@yg4)~(bX>NW&iGkaUgU-5R>p$sgKlzy zjAS43L%^kP60{B64HFkOU>@#g%EAi;0}fNyX#63J3Z-t|MkiF=UCgBQ^xhpHXHI9S zkDPrOy|4OUK(A0Pw0Z%{pxs-Tg&LS0I0J>zF_E&HWeD9X zdSWE9wt4v0qmw#(lxzQ~E-7!iz(h|gSj=?+hlU&g>CpLHVOTtv_c_b0npp(mQG2=K zfy*#nOxf0%zd&hnkv&pj!vZG(fLvMrjJ?oVL0 zgRx-q

K$-Uk(~m-@$YqXO!=o6eEC7>$c+OeN7Os2b1!^#}S?w!c+D=}5p|Asau&zPFE z5%XL`KF%Zb*`?Wl#>(5!lYRmsW-(B1ek-Y0qZ!qjT8zz%r~msGob}0T;5Pp;sP_A_ zh*3+?@1PYt*c=O+@7_e!K_lt_>PzGw?n&a`Wx=8s4RBi{4txI56zaV8Lx^D%W?v4* z)WA59?p{%lG3q4*&-w&)ez!4c*g_~j@4(4cRzrVZ>I^?UBMAvefO-RUq4u{Za>hMl z>&K@<>UA@4>Zira&liAZ<9Jy0ECVzRXJXEYNa!dVjjHc9%ph4$$jOd@p6)`-yO}33 zcYVOEitEMqPd)){EgLXwZ!NfgQ0JY9&A8{$88ClS2xpd3*1_xo2H;uB=Jo;IKF2Y{ z+L-H%4WisfZ``a&d7DS;!DHtU(7u}jR%@uiB1?ly1?4EU*v(1qm7`+%9Y~tf1hKQ# zQ2FgR*OXSmb!TjV?!KnbJuVX5%q~I1-{D|;bS3vNnHYFSDEGeP3WlajIr*`-)4Vq7 zprtv@BNND@cpw^6pWOh_=yGsBybdzk&p^berz~@ins6@q5X>D#UdqKZx4JP>GR&Lq zVMR*TZ8U^-cyB?ufoCT9wpdS2pyYGsz}&8!N!s?|CjD^qy^xQ|4wukZr!U_>y&J>+ zzK2#WWe}Az7Q+WefJs&`XWm8c&DkfQX|WMx+=#@1t0_0R?KKxeT|wRUI%c!dnE6cD zfQu%OL$J(-B?|OCa`-d^pQ|E|(RNN**umm)HF|0E5|T#tKhp=8Xou9jlQRn14I#e6(UW=~a=RwZMtX;nF7biQ&TlD$ z=|Z3BbNFFs4Rrk~hR%O|pz7^qmUs9F@xxzAdXw7q(@u2ejnPyi7Rs;ugvK*BF$lyUo74;us}k|-1T{e^ zj#c<4z6UeEXW(mzXl45l9Pijsu7U^oKh$Gg(}1~Gd!yslz8F-00c*t z=n@u&o|YYGFvNhDO)%h+hI*6tH35&#)Z!hlA%0l)1h9TMcx+#eM}PGZ{7y6hEHQ&h z%2P%S+>Re|8_~!s7UhG9?-4HLYzMD{la8q%UOpRSZ__x7+HQ3I?;1{`K5FODA8<{g z#Ru5-gG#qE;M?^KaN;)DT6p(=ymqefnz7LBPt3*QjS!l9nT7rM7Y^;!7VN*S$Ih1? z7%)Ve+-mz-UAi_u#h?N1UNI1C`>uqs5A=9#)eSWH@st~T{ufGqe}X=*wn44kc^KQ5 zoI(3?QDT|~UcKcYo#_Z}>mQ>e_c1C@P2)s;bGflEy1^>(Cd-LD4km57LaOIUc2T0u zJDq)rm7^ySN0a&&-?QOuMFU18{{**jnkcS(rm${V0iflM_G%ZHyl#R-RB;ErJ;&nh zvE`6jYy)|TYQmHO_fYo!Ur8SI9_4C07cq+1gDYhoQshdNl3lh!g%f4*k!CLgM*ourSWT zRTs(K((3}MjZTwzeKp3mam4>>ltOuz6W7y~iUDIy@Rh%@V85-A8GJG10!`t`<7-&4LO{Rv zQuOfaWcK2}Al|PUx-TCG$AOePxUG%>lP=>FWfP?8Y~c1XU7>uj7q`~XSlCCisQ#O) z(8W)J;{G>Ky$7Wpu6>Qn(7d4&rodj4-VNmrkD%c8!`KI(c{J_JzPf&u;yFl1~$);7im<&TcTrHM_jNLmPqZMuB^ zsYWz8IS8Wq?gQ1a8W4}*p{p_pib_Mc)P(^4mBt`5k%6trYUXoRpSOHOZiX8-SZeoe z^k2FfpBUBQ$w$|rZeyLuzIqD-e^xKNQr2e`ISEW6QK#f9gm>?O8NVk`=fQ`&>Jl& ziZlPSiF2gfoJac$PI`GV#0C|>rN9OZe6Gc7G{=MZc{<)3rz_MroCJUQKxi(^!IHKF zoM5dflq6JQ1NGs>=jU)Oe>>o+P8}YvodUh2 z?;5nZag?=(Jcr=f$I-H`0iV3OMI3xT>@N}VL6*d%y>S^`o(F;MfeUCks}DL~Z^D?F zr6~GiIykL+1o@wWxL{}EG8X@kST>r0PjU|X)ZdUdp8N%}yC>q2Vb>t)m^$ydknZIP z$1!zGUo_vG$>r4Shhc_)LcCWlDw6ZjaUAgmj$Tv5uTSB&Q|8%4|1`I1_9u+|Hw8k2 zDbF$ZHrIdmF^F2P#^?WOz>CiX;nUbaRMj2kmOJPQ!IOHx|Ji3q)-1u>9~0qM1F=Z5 zh}rqQp7`3~T+)v~tQ#JRom*Ntd7~5q91d|+b2WJn`Zpy_c@I}?jLFZ^25^m3Ns;6f zn{v27)cY|fr!&gyGnzu6(G?8P$Y=g(S%|Gt>?)#N#iaWzd2AaRZ8zjCzjCnOhw=-3 zT8eAW8 z6)XEhK)?u^snW{n)R4ZsW!Wk6YnOrQ%HRLb#S33}4XwweVB2)+1x}A=;;=1foBk1+ zQa6IqDMzAIR8Q+zbdL)Rd6jh7W&qW8V9x{nlXPvgE|bl@4;_RG&zJ;p_@k{nq($(9@~F$1|J?nKw=8lmYId|f6c@8 zCk@o^AB0VAAEA8IVe-|9$z+Mtm-caO}oLF94x{fJ92YU|8z=tEEKPA zN8dd+U~3k2xw7Y?a%`N!rW(1beRm;fR1Z3jItPLDdr-aJiPzHgc=09of@0My=#yZ? z<2`x@E^H>p=2R}vhYTDsg^(~h*;z4BcoUW4rO=1Vfr!LbmBai zm-U9kXfp_ia6wU(9aCmeru9lIT4)?6Ue!LZ9`^&bLo*npUqiS(5cP9E;U8{#g5^_p zP~}thW7t~M%`wFCkpZ9EWz;W`IbG`aQmPnQp&UrkwbkQ+%g90Y|QY_7@VoKn~@IOVFHm9)j{;qJCuo zX5Re@6RxFU>K!#MWc~>(A2yi{t|a$Jpb~X=U0`7s-vD;M1Bbqu;Oei&%NuV<5@#6* zdGb&;XWd!KRg49bY$wj@!ch*I{jlsYIZEDlLvrB-aA|FT&0ju3QORM}`sZ%2al6fF zjJb<`GYY|Hd<+ZPegRJ#84HOw219D=S;*6W3Qv32qts?4n9RS0@v31E|ANk&Nl~2T zXmf1-{R9?o`@noM>T%m9T|uO4uZY-P2vVO6g+IH9-tF`bdwGO8jHDcw743}F!kEjL zF>tY`7oQk?3`h0*348vCKv|QPFgrt+*9;g8wOuz+zq}s%>KgN|uOHBiu7)cWsX@rP zG_E^AoqQci@TnzE`VKX_lOf3QEJpsFP5DGRmlS`KSdP&I(fDS`?Wgf5{TRn=RxJRtLv`44 z_zx`YlLH?PB*5T1U(wO{Ck%63gkEW9p+RATzLRfb*p>Zo?!#jo_U|vWd8pu=R6SU6 z_%fFN*T#vi-s9vpomlnW5f}RO5v-;@7XX@kf3@5DR1 z#3Fi#A#<)4>o-SX-dY_l-~>5}m9(ePImRrzDO+&u5(fQy4n3ZI!maPmVeWMfa|c{T zdCCuTj4`F}tUtN#VRiUm>V7own1q#K9%!kPKzVaS2~WBAv9&mX@``;=h=k7X9bBhQ z4-+lgNDO)6D0|FddBbcu|I3#t7kZ3+RwzI_uo~z3>hncI#ps-S7xxUSgSg|HptR~8 z4i%2VrWH{*%=~Zg|K|(TA2!3}qs<@}>N&jJi5`dMacju)kUO>&haJ8GdpdJbb>E!n z_ad*O?KFt6N@KEB)&&(8GobSp^+lp@DJ%{j0<%WCW6LYKgavUBXh3tPt-ImLU$I~_ zWDIuasi-e5SIp2b0I$Muuo?Fkimig#l<&O+jYq$5@Qeyn9_WThmtrh@5QhQmJY2h{ z!w1vdqξ4AfqSo~sYt97!yLt7G`X9_**NPn znbt)`xLA{)E$H#qy^S!~J_>D@2XU>DVc1jOjf$l;kbk$4b39v&E}3yKE=k0j>(;Wi z(~jV9qllaV>L6N2yF22N1ssS6)tC_QnqW!ywskmXcQx2)r-1aI5lpkQ3*sVV5O6J$ ztC3NcyL}xj3)+pchB*qQ>3Ce_dIs_ag(^DByO@Vze609_{x%B>+oK550Q`d<`*Bs*u|~H%6NRAN= z&%z*dMJ~igm4fjV%KZPETCljKH(zfPiT;$OZLod_;xAq(UlzqVZ?S{6P9OAnvxoaI z><9YvNNG=TOX0j+fJow0?s58rhkrW=d2{bdO1oUJrwlPUxgM0=>Wa0;n*jaY(ffWY zl=tdKxvY_}HH_u~XS~sQwJTWP)aJXt8}r4Lr4an}Gr)B$12ZsgCkmn!5t-PWw zC@vZBRmm}Ex!Ms6)A9ffI$>hi2N>5%42`OHpxL7@1nF%-S#y6deYO`Xe81xMpAB5e zz$grKkAWGw^jz5GqFL4x>{JD#)t^ydYCVu#w(rrix&}=8uI4V>BF4wP`P{P62jTW# zxj5MSE?CxR@cCvflBX#u81|?UpUf_Re0eJzx&IzY?~>bn+#8f#PsMVv9=pmuqsyS3 z^nCTh*v~YFjE;q|lo<-E83N+JeiVpDJ(M_j6YIh62j>KabGLq(S z=bu5^1r6$FlgrXVMV*F=ps+gxPX>Jg-9gVGb#@t6C7eOi#WVvB7*1KkKhdLMCez>2 zNq(G7ta({Am{n-SI^-Is+WCq(47^4T-vHeHekDW&7znW~d%^ zh}@b78hfK~(U&^tOx%p~$>A08Jep|?`hm8uR^Y=OUofRXPcXe^0cGKBXnt%XUOIT2 zm_0gt$G$9F5k`EtdUal@8N<3wj0EwCW?KUzGu-=0hnF-M3K8#eh|@|=%$L4g)ycgW z7P}9m|9+&M_cJbb=pHa2K2?RoV-&~BxQDU#P_*4qq91t|#6Oxao~=gt^%Bk`ObYGC z-htHNm%@>FtZjWJLTda_oEX;wG4GRb@S1mM@As2MRZc;ZTW-Y9r)-$@1U6+&54L5m z#)s}d(aWhH+PGb0V&_arYr-+?in;(Dg~y=t#1GapjW`IEZ?Jzy2>5usqh1!niC^!5 z%YK0da}n6(cMvM4J;X3#al3R>!&dbSuum%CTvN}$gK4`lbd)_r$(}-gPbHR5yNSAs zN?3f=NhZ5s3bM8X)3p90FZJ7eMXpON<{#V1G-F0$faVE|urJ^SdlRq5t&E&XBSEyj zRH9K$v!(HmINjyMMe)i)X}lRY8A)g_+{1?_T2WRV&dtyb;i68R!^t}I8PmH5sq2WX zu;M8Og!RV3i&ZFoH^El5Hka->$2n!rPleL#M?s}#DA+#{n1(?*IUc@&$L4RS`BFmN z&e&-+RxY4cHJF2ChoN?eCLgej$M|2_pqTy!QS|Q`xt|M-t>m`WDp01a$H#c&p}{&e zkoW4r2K{oVw=+h&fuZ0m-U!oUsK=Bw1opTlfoNx~LcFLNEF;Cl!0F4k{AUU!>G7C- zSQm6dsb>^)89Y{2vW{J8%;<)i;G9djumyU;u*LsjDD&b}PQ$T$<{^%q)Z+aj7J|s* z6YCgwhDAJ|MST1-w(PQ=@L|?7ytMZ|Orbn#s%60cQFP{UF|O|)Z?qb1B$1Azlkkm` zC7qc2dJqyLSwfcVI`%9f5~m~)vW$vMDan##Nv7t$o|Gg;B1vJSB$13Il!V{)``61K z$;|WI_jP?f?>EzIGGP+pAvMkY8_!rHC_nvX{Zc5?dG$J)j_M@pEJrZi8Z|3;cm;#= zA~C*uF?f5LQm1u3*Z%3sKYO+l6nrXou#kuv@~+84UDcim^c+7x&sH`D+U}c+U5AIj zp*fAztt_aJx6|N9?g z)GtKEGi$V+kwEjkrRt16Qc)MOl6kls1@jBE8{5;KS$U4;dUZYN9deEP-7bUPmK}uv z?M~Uk8{V*~h;|sGEkQNflky7W@Ng<&ZDTfox&9$^9I;52dh0XD|LKe#gQdc?7~<8- z%>>7rlQ8vIIMmZ!q};9reAg2Xzf{Z1<{e@-zq3H&>W`UIcS1&`52!pYp!CO6R_vh0 zjDlcLL`h{DuTe04(`WqEk_hQ{Zo{=69mN21HB-4w=Wc=j@Qe#O@SO7y*R3Jn=Iuk^ z zr%>^kdc=c|ktbm!2AUCn(sm1m2gG8))xGLDiH3sSAG?^JLwm89+#Y3v+pzN9OKjBL zf`*YJ(LS#TVn+N%-~SCK4&_|tJlO|Rw`K7t`(ngRPf%rjRc3Q`KNh%}i_tPSY#aO+ z+V>7Z%_Z7#9J|KWjVQ$h2YSLI2XkSc2V%%eQ-~L&;(~=S5I)322%Okn%qB+2vi0N| zAKMXBE6Q2M2`8v?`H2OM-_f6v+#`;NNh{i;aY)U*ripbrx&`HK=-i z7j?R7=J(!+E&Y{DGwzO}+A@-|z)#2>>`yx#eRQAmocHZk1(Jr*T-Py?1Vze*)*pcbQk)F|1uyLchPYn9yA+%o}PT#>}7&&W-2j-}xz?(Op2N zOZtM`YC3Z~IDwURxDDEV2I$hSD;DL%V9bu);I4iM-tlk2&({}v#v6-W+v>pQpq8cH zxy8$VwMV}njOS(0?``BFl;@mKOJIO{c{&oei`R$HWokc~p_!rb%}UUPH!wxvT^97;KCbu{!$Rg=2Y(XnUt!xLT5zvm4;AW%<~yIZpI(r8E> zc7|yy7pcSAmZPIR?ZIk02%4yF5U_kO@eH=0sv*-zi zHwvNoV+RPhx|zXCTkNytGG)JNbLRb^UT}#1&!Wqcp|$igqGw0Z>A+C>E;|mB$)AB@jFjtV$D(0x z;y+oMsePCCM7M*N+4e4HF-S?fYp(=oDqjR6yQHy0bAO_y`z!Q_TM3aFsh~G2QkJRb z2U6<`5Uu`9&U;_3=U@W++G#;^el_~E-+`muVll#ra)?(hz>u!(gpoHp3c98WNSkFK zbUc)eC0>z`adZ=GzeyR=jFXUkteqHl+z>FV8-|^@4UWsG*Yb}m5AOR8HAm)PP-ssG zPCo!xX^)!Q9Yn{--!ggdNLgaXLlAc^8@FyX6Enll;j-xlVt&_q;M=1!xV1MB?DpN@ zQ6ug`>6l8?KE9gWXG0q9`(Gi-r)}V=-)f5a!^8Fx(P1#{!QRJXMV=?VSw&3T z>4or|vNX!!realWKF0U|f~E;y$wAl^t^WPRb(@=+YUeJVw9WyerzB&*9Y?5+i-86O z%?4JRe$l z22@Nt1PzZ*nG$LSETU|yGdj)<|A2SfMsS+2 zAHCaFV#BK0ko)o``sU{0X7A@Lcit)Z6nPJ(94kPl{`L6Zly+kNkJvg(wQ;o{Oe-r=Sfif8>IB0nZCU1L<6*s1UhkPic)meeb`cKf{^CxaR zcK|od)E66Pkb2zZ2@VRRIW1)$oagC@wFxJoW$t&b88HQ?Z>vLTgN{X6Qzt^a0#}ZA z6d%ky44ZQF1>F~V=7kQ2;#G+t+m{1Ef9jg(cMuE*Wn$f@J&--*6_cO6F4K1YA{!gu zPUxv;0p7{&@WU-*F?e$uXge%r@~^wO`NJsC)5XXNJ7t0Un0}C-*2soB*RX&Oo-)TH zmE^7J$YjmctXT9AoLcmtCOaLAi!Vd(|BOX> z=x836J`fwD(&0jzv0!-28CE<~fR(R?B|AG~!%%N5Tz(lmqQ;|Kc}%9vJ&cYUjaiU2 z16`mTovs#87a)+b7#+m5wbAsxpH2OyLNuoSxn_PP%Dq3asKDP`YECTTzlOk>|A>3- zo6Y5iW7x09r7$w^FI>^(G^I7Za?^LQ7$xlsxl_`}AKMKe!2#t@_Va-G8?g$W(*Dkr zDSjSc=`|(jWv4`4fhRW&s6)Rm3&EpRACeP)Lu>b&7%)v>Lw~OZX=yv^>pup3@EQDE zY34d`5jMhWgst~5)Y?*5Jh+3{<040$b_ZBj)BU5u1HInv0Doef!xieM-Y=I~HJY;0 zh^t_DPYF@`+oM<1NqPn=z;GpHL9%H-K#b@-%BJad8Zz7B_t1Pejyd&cFKBu$K%6%V z6-iH6RkV?4>i-ZWXQyGas~TnwdxsT!Wix6!+Oe6N7Z~rt1ZUF7e(37_G>Zn z&O!J?VJQsymW6BfS_o5`=pEkBEhp<>6C_*IVe^f?p!cn_?1A|S3^3XT`5}$ia?V7= z&7IJ4$4>A&R}Z829Ki)6+Y1d%A(+v!1^13H7qzpVvZlt4w2z|aeBilZ4UZ3iGU^I1 zoDcy=cba7;`WnuoYMmG&$%+&Bn|Z0dz$`SPt3nF2YXs2WBI)t^0lwx8MEoU_uYj(l$r}^ z=YGTD@*H%Fn1#c2AK({1f%0Q9++}zH2JP<$e*YT-l1mji_8k^O+eah#wW$FdzdFcV z!@`Iym4x2gh(UB}KWa~{M%@{I7Jbne3LiD2Jmmu9+pUM_uOT3Je8(4FpNlR@mUw$= zJvP?Hg62K#A2)8HUi=2sCLp)Vw*~osUOd#?3FI!+g}J#3o9i!vVt1nYW~iy?vHuBu zFQRGYPaUzO9NO8o$RhqX5`u0hc-fWt80UHv-Ix6h=H>$J&CPkaP>j0J^XN)@VBbY2 zz>ycJC3&~q_LU!in2A?$frB}EC&A}#spy!aH|%mag3qa92z>DWy>}^Ce4%dsh6OnL$U|_cnTOT|ky!mY7OlG% zq3U%7Nd7+0vm%dx=dWy(_Pq-V<$Z3yc`dYs(>x~cJolS*lUxCR^Nj-<@XbUmw5VR8 zt`0L&UV(Hfri2RF8d)KC7=X z?Wc#>mT&ctL z)B{@#4p+4h{`eLQ{r;1fB{x7jpn%*I_Gq*9BuX+Xao#&iA>;85Q1mrcYaiBtX2eo( zhZ}Qrqm#{9W3JkMTV6kBXN|zO)>0)|r-cV&Liq6T>S3kxO$|d_w zDTJuWhWVRiU{qKMb2fRwqSLLxaqBzgzvL&ky57WlY)r?|&t8Mwzxmwod^F_% zJA>AAGdeD<#-`67q3Lf=D7#@Uq^s(|C-yg&H?~-3YnAjItbm{gZcEm!zzov z0_^$@10!~WM%Gc#E0Ka}&l|X`^d;7&7eVWVlThtyf#vS+Xr6ckrZI z3%KSv`6zs+uqcnu*u1}($rqi^sdZ~a-In1z_0J7FbH+y;J$Mgxk2Mnl$3KKa-=9DO zYbTt|?I?u(vj$SlWo09=&bA6uZapKv*aK9~nF#K| z0BdJ<5bOTnF!PtOp!r&gCI1G&vi638y>B|yNfNos?TzSio*V$dwiwnng4t>BVQQ*^ z`$ZP=fz%Obnz0QHUyp&n@R5*4XCR-`E1`VpZwy_y4_aoE)2xdFbEdO|V|F7}C~cYV4XM{CRAtLsa6xL9A<7fg(mp-G_n=M)?M z>NDmC{~~^79w<{@;=K(LQSxXxO5}Ciu4Z07eh7e20#i0hT@MVodsLvVVRQ_!M7pV7x*p5`uO zME!@6N24HEATOUmF0an-hz+jMbmu)s{<%VRs8bi_T%~0wJ&EefA}lOhgiX)x(;UuK z_B1*XP17D>*xC|S_;EI>iu$1a)q7}3o%OO#J3+p17`T3Vj()~c1~D`Y^Y_R8`<|d= ze}yb#+bQ^Sn~B)=;v3wkpm}e14bxS91MMF%P*`Fjh923+-P*{1^HCy(AEZ5nje*$Q zs0I(>zH3cJv1)RLIftz!&!>6AEf`Vr05q`@2x{QWX-q2k9BJV(oOrY! zp2&Jm>x!y`E5j!LN(HNlo0*b!Y`I+(P-Z+G4I`-=(uVAtNLSy?=N2gkefx?TIZ zV%0J5@;(Um?loBUdLR2d-$;no&4VOgQ&7%4#lWHtteXFTdBoH7Nc{)hA}hJ}*QH@z z?{u(4W+~Xm4Ws9siP+?P6%}h7ScrBs6n8rYZ3lbf^#CKGXnH6XYOkVm&+km$X%w$H zsX^M@CVBqreYFQ?Hzp_DuBGsC)e*=z;v zq2=C6S=WC5LDi8^a16>r`$`#n8D}OYK7I@S=VpS3?+Q%oMZChRw=kxE7Q__X#kLB6 z;;ZcAmD*2qj`^lm9g7|2w0S%R_%_HKN}Iv`Nh5Rq9!*MV@)Wze4QO9j-vdqky}LiWT*-FhV$~@LR6v@*L}GSWhJj!fXY~18~+qk8l}2m zS}X)~piD`6AlJ@F;fnohdG41=@auGyR~)s4I{mR&K3)e=j+LPM=QInmU5(}+$OrCT z3IRRSf#9v6cG7^%;20du_zVYIjD_tUJAn#6pdD$AseR*F#!}j$f82|aM+}7l%s}vd zW(}D!55e|KEDnFyfVrOVN-lI15vn=Ty`Kj#=u^`_w z+_)tNGj9(gzEnCo)92bs9Le(Q9%5_DQLKxgUBT^kJZT&8GvDs#b}qqiYKl}W$tcCb zlM8WP)n_nNiqMuzOd?4qQG3!tR?y2_P)|4qXWFiV*=9Yl_D+UhC z1N(E6pu$EER7)kSpuX_cPF$S zej8U2H&LhO%-q{)r`*?z`q(;MWfjm`wWo|CZ*rh?<&M_DQ#g4(p~ zoNSv9YUlpvp-o0+(Z_QpP)7eYa|%h0HKjuGa1@b%6!4E&vh zar&XeCF}^ULmq;5#e7-f==NfruN18CD2oo(VvX%fsA&oTOrHb|eOBS1PHGq&E)ljC zN`xdI3($7%r5?U<9~?SQ=Q*c&G@yhh+MOnL?ntJ)*8|EaQ(sQ^$+GZV zrd_ZT6Yau55-~ zjW8A!pN^p8_@trWwG2n^%!jzI5#V!tGb+5ssV$eqq3UJ1tRlV_bf0f3rX?8)VOv*% z=EWtDTV7*6k@r}`#Ru5u@7r)lS`Jp@fyY%TFv|5f7{7f8{{6$4Al|zwAc0x(`gO4f8Q^}BC-ZR^7d)77_1qMRz{3X`aC?n z^$W0~7G}@P#ps_~DC?U>cclI3d)g8m|D6wIPvl_vNh(bFQBUkj`y9JNp4gwxqPcdj zq3PODP|$rv*(L?Y#p779H{!4L1*n;k&nicJ1izRTu9|BiQ*KEB<>Hl4F>4goNtK{@ z6$)90a=_H(C=4Hyjd=DTl=?JMR_iQN&d-FNoek;SKTB3m++v4~51>wY3H+9>1%} zq>lEWnf!|<7`79&9ZOMF@feHWeuCZ)rQ%fb<7xwQxU|n<7I>KU%kym1e&N5c3+-$b zBX?qy_7u2S1A6=~9RrPKVn`(C ztpTS4FUTpg7q;ioJ+=IUti<^M7Wg!vQ{O*8vCc#=cZ@=b$sC^4y@TMfH3)hHeE_FD z5>aXJ5-#7rj8O&$&@tr~^SDRfp^=tin%h+vBASRtx3yvOG0M|EwnkZ$8eET=3D&za z(7W^#?%Q(&G~?z%W$tQ}UogcXTP0#@#Y$e08Ht*oO?>06cVKri0F_JkLg9BqA+4hW zWM6#?8&_V!l-AwYM}86ms=fKs@KQ|vqYzW?eh1IY6X0j^o)zmo#SheZc55U~j&>*y za?)^*-G;b0<0tsJ*)fj?XE54biF$6QG4{ZTK;*gIK+~9qmOzWuAtHqI7R4c)l|iJ!m!<(DNgAkl7q%|}hd32wWTLu&@@orpo{e(?dz%$bZ;RSBT?J(XFjUW0yW2{uo;4u!9DSeLVt zr`La>Zs9dqo_0TMY<~{|J}qa`*b7kda}PL=i)JMW*T7y=iHA%}(XB%_y3Z?lll5%k zl5{{LH;K6PWP8Eg;sTH8K9=^J7i9&Zw4?R@lSL81I%CW}43li*jxX)Oyk7}e)l6e; znp04Ca2y8u84H20)?)eX*I01$6)2VjW@k)XN*n-*Fx2*!rR#HYH$z03MQ5HaJ9PM8m0Z#Q7!c>{Ee7%h`8 z@|D$}NkE^hFs@V&0cYhqo?`Qw@;Ot6ef{zhPk339gX|Jh+}X;SJ=1xbUIv8AFXEfG zRoFc9ILlr55MkdbP$Y-&=uVx%!GV6ZyEb6_J^~eHt!n+mBsBly4TkJ-hQ*ZQPrcp_ z71f`HxnI?BSFa{WEWd+V9G1PG05$-hTuQ-39s1J58}36#i)UIX>PIy8xJbbkz88{T7Cw|lIt zG?mrn9L4JK2O-^60l(}TFgd>!y83(ttBCdBQ=|s@>RNUD!3)rm{+nC%X$Oz~B!1$Y zofr~43R{MJK~>$goPJ8;iW+8NQAH%krF~%Vy$-_7AMM4hf6;eWeUzJL4u#P{_t7q^ zEBMX5#xq+yFfZ&3Uqn3C$v2}h28ko$@)>7dmWbNy+&-R7{21rG zp&0z}JlI*6vQ3jrMcoA>VV++;{5sNtDGnNVTKpLPebY{C$fGW0s|#vv^I_StpD3ja zuV0Xa%R(|x`_X}egCi(&036TfLP&=`=#lCG0W03A!v?9q$87}|e3Ob^)1y#Znk%bw zxPY$2MwZv?}yxj>_&?Ze9c6-F$*1(NyO8xXYQW$+`laZ z{0&OEzug#2ZBv8Qm|0wZe;gED$O66dd$7Ya1JTap5_muM$9bniapDuH;8;@(QU9OE z^Xm!4eJT)UyJArFb*LR(0`{9$Lq!a^s7G5fXH`8;cIqV9bvne`*7`x}ZQ2hWNQG*B zDds<=>`hD)_}jFy)VS?DZ2KB;jC-zj>@kKfo~Z?A)h);`qAW$AF>5$1fgv_xq}kJk80VSDlm2`1izF&P;t$LJ8vrFEgBQ6iQ{p$h{@tLW`Nu zT=$Uo6#AmuuZb9aR1c=_e2-^x%AxkiWpv6cgqT;4Kpr>{hYw0czZZvC=AZr$P-uq* zUDN2h(t=gpf8+Z3=0fD)bg+(#0IwP07(XHr?61y(vg(7FIKL62Te@PK0_6$4h&(y1EG)j_FY*Vn3*6C-N0}3qi71r}lk&9+h9u zva-tiT$^Kq;W|1C{M16m=SZk4vxc}gvFJmct7KE!DRg^KTidou3Q(MUXc z$wWv@YQiv;3ztp{Wi{)4h@F|KRw%uoiDpxUJ1qq#?9^X&t{s*FPXG|1aF&n7F`;?VW3khb@wiCyM*hg3@yaw zOdp8w3`0q9ldSc`MeN`H6V_{wLEuGwY#U8}yk#^a?^J6&8Cvz>+GF@*r*M)X@h}UU!OVw|leXowV<}vL2e34~60Dn_-SE z&F<8DcnKC@$EjH$e``$lix7UM(^W|6SWk>p>RBGGgwjuS=-<6BUViw5GO6duvGo`i ze?NdB*TTqSF$e z0reQxyFW(knv0+Q`2cmHJGr;-E3A%B19^b~uNe0LLU*<2+SA(@j;C|@;{{wgw;mR% zN1@vJGq`*kgdy{XkqcD`C51oeZg$hHu5B-tO<9Y(G$xe&KgB8xeW60qPE4;)2kF@B z7;xeQ6s9i6%8U(oTmLQ1zDmHaU==^JAQz^`)L z9EVj8-$93W7;j&OB9f@~RPd%Ve!FS^U>mQ#=T(<#3D-VZ2P-#}TDhdJ3>^#yRS zf~ct$5V7G7)^;wz2ix>TspEJYSsspZ_lcmJcpCh-SP*xQTul==+ur9PW`*5_nNMqR zGwb)}LZf#ZmUIE|@}hlq;bFcnz6uh3 zIXJ#L#2boKP!sNi^+LzgS`2i&7@H3srj)kM;ckQz4>P33Zg$VuV>cY@Shy=5O+#9B8+yXfJL& zWgreby9X9iChKB*si-)8pXo+MaJOAHtXaHC9kuCDRgwzIYc<^KZU%Vw9uJPahhbF4 zA>JDB0caC~CMn$$~))v<;$1VO{XF4Ta1Z2?4vWd5p7!r= zaFMUaWa_A8#xFt5HgcFe{)&YcspE5Sxq588nQ-xpv7qQ|$h9(Oc$(2s$X%8LZt0^D z<5ysKvZ)YI(ho|i8o@933M*@R#I=+sUU0{Pvh;T77G#UfD+hz4#c-(GYlY5ls#(6K z4BGnAU3>m*rpihmcBQXG%++3ljQAZ)a_&0%bdTYVR~uQYBcEGC z$|Ulx2keL7ea}HLq(~jLa0ocANad0fFF^A@J&=8Tgmvd1^I-kskgKI`0QvHKs1ua_ zEDHX7L;aez-l%DgC2z|UT$yMho{lh~p6I`5x3`q-TbT(dRt*^U4e8ty4!U58_y;o= zoVS?4s$Czj)r%b3Uru6yMh9*k$3XA-`hvfs8F|iQn5rk`^mpfD7(B*bQ#uMN>msa) zzK(Vi?sC810&{Qqz}E$pphWA$k{<CIH#;&mt^V|r~ZeVXmQLmr$>5v;}L8>_JOKr1>6XqVPu z;k4zTjl3yaU}1}q1%=q^XewOpbc@&}$S@c93e{1Ht87Uo`HSAV!Z_UZ*&;dRUfD5t^=rI|ju& zg3kpFUs&shik$V>9DbB*d#=VPU&@nra*%1eEMu9PztL@029v+|ANC706ckAlz`AQI z2LGo5`M%W<@!=938DHf8`g z)>JXK1(SL6Z(}q}-3U78!w~379O}TEVDp%Ej!P{BkEVwhZT%mlelBAv^Nxb{(k2!F zJ7s}8-N9+VQ4Ba+rk1q)ApY-N%2!@N(?@ZbKJ6BbPokLiVlsE!YbUeo;sk|evtgn` zCn4IY6IygJ7JYZocjvq z*GKVWR~_0MY%eaL-^Xh`J;AU2A@-ohN%ZJ#h4R~<)fuUUOd9$&QaU6g8GVq3jQGI{0K^ z-I)Mzwh6)Ne<`E(b`@AD4Vh|E4|VbC2dLS1io5uy;Ap?|s2ym4Ndu|F_$w7%d!2`1 zFPhV-^#xhx2{iwE49a(Rmz5ZZG+6w-Jo=^3RDN4 z%KYc`0-vQpP`ZSJUi$uF;h%Qng16SBooWU14<4vW%vA?0?+v*@_rYf><@#kmAWB3u zSJM0_YYi68eg$c+L*eF7iMW@VKgq%da{ms2+2i+uyh=0d_B}1G^$x{VU&=u}Z9hu) z`~#`!x6$VKV>I>n2=WVNOw~F8TwEt&azO#~AE<^EudkzC{2v%YOs=5?0r+diCwTKt zB82MoV&S8t!oGeDm}l4y(jQl2#XcG6DoxNmdpNrPIL{sZH>=m4H4?J#e`m@kmzZPb zdYP@=F3^66l?8M^2#!(z1GDpnLiY3R5Yxj*g#Q(ScAO78V|D~fdT+#r`$lMIJs8V` zL|#%b60`e1SDW`FHp}#ZY_y%F2={5`teDPtV^(`KTQ6g4=eZtn*C~ z{C6dQ-9Zy#6N<2an2eswOK`}4#Cj+HvY+o>bhg=nzICb4JmMtJn3%*@)XH$mmIqL@ zUJbHtZ?NR~MeuzoK*-a7KzaWC8`2(BM^%Lz<8^PbZ z5KAr*C+S2yZ+-R_IvP|%pe}@F%_mg3dSNh7y^Ff*cW~VHPhhun54SqmfrXl%^H0}H1jB^%3$pkCR z6@I~>)+#WcGZiu#B6!R4CcN!L3<;ZmKsTeF-FQcUUmGgW%uz4Xp>4W4X78zi#e8%AUZRNtR;3rY6~nrwcK? z-7Cnwbr_P|PEo%<5SojJL-v+HaCY1aQRiPU&qd`RiT6TD_Bxb%-eb+*D36$&m@{Q) z2EwK%Ab)=g6*Jw`C5HzR+vfqR{MBAeJ-1Y}i@jm1@acR;h#9cFng3bSl7q4^JA z^zqC?Kj%o+JoYZj3J67=+rOZ3JI8dhu3%WR6S>_ZaN(Q581(!W3pjE?CSTf_E9P}q ztKM74Y`h1;Z6$SmmaE`t?~|A^=M0ADMu6`67M{9!EvuMI&ydW4U`%%l!?4Zp>E=fa za!`RfLkFQ(E%>tGMnc?$$MpTYf^d2~?K?)|($Udi6|$GDzoREMUv$QLzZm>_A_i70 z>;~@V=RwGNcl7^Boj78Gmh^arH7yL3)a=ma^;XZKA9D-%)$QhXdIB=UU2i5L+HRGtmhLuC?4_^dYHCg&Fz2jDO5nM z|2fQ+7U1r)79v*PK-a7T#0`Butf7-X3<@ZP$qfdgcF8%mVik2H7u<)ah)}L=_fl=w zm)w>a`PjTq3Z*m1AMkEBL?2#&uMO$5W1GlZHIyq?%wj!8?t$v($7qf?n&*Fh$MpIp zu*#m_P_I|Kn(l$aq9XnT&CojT-}IHI5r=cinaen6^HmIpX+^6k4_T^FGdYT)c<|Fx z=qWQ4n?0lmT^>WjnYGYyY%4Zw?~l7y8;BCOZ|dB+wV-|6Wtd~dX<7D{Hd)}r0_t4v z!4l;Pbbdw|+@V?EdZZD9`yE721!aZObB0N^29PmjKf3$Mp>bOpq-}SG^7m&!dh80X zd)|QBE`x{4Kkt!cbSOZ#08d;+nF0TyS6I3xkDd#iFydZk*gur^L{H4HV%i1B{7l@n znFhj&*vX*uxrcEYZ(P`14Vta1*wUM!F#F6tOugqw%t!iv>^+;U`rHV*Xqt5lY($kL zUZ#i}%4)saiL2*R-!Xc*%*m-M>J2-h4()OV3@ewz%O(RcfEYIBZ`unPOTO?+J&6f0 z!x8n48w=KynY%Q`LhxI%8^flqXWM4!3$3>gpsVIGW}CcbVa>0=_^U)TJt7gK@;Dor zdyaWu@dfigM`K~tW{_Q?E^L>MLW*xaju>nzI{lG|3)|XI<4n6l%HTaLGZSii(OG1+ zH`m3aGr4&sD_QwBNVM-*j~@@991Zv`^m9hC) z^h1MXnxAampd@H9oezek-O+pBEnIhydVcX=pea2GC3)#N2XE^MZT~F*r-Tt;y7(K| zn|uZR^KZfU2j%W7AFAQrQ}T7DV?awFpIT2@CF^Q%5ggIJumOE84aR~wN$_Zko~RsW zhfaT9fDPmIMUOEy;H*x-pi4)}fd-oQU*J0P-Dr2Y2j&mF&fEfHxk^7zrbs&s zT5^6coNg{W+hQqxn(_qV=7r*)luvEwIvTytE~l)|VHTk44u6poHRxOsw;f!7 zX;bGz<vEWGqXi7|;5f@+MBsOQleeaD7@acwTmUp~{EV;nt`->V%r zUI0a#jrwpm%D;H8dJ6OwPUpArlKw@8E8AU_%;O1-*mC@+8c9`+?Z0(;$fYXeFu* z*d|h6Z~txCcC|!oDzU)uVW&af<1JPihoIHvN1*)jo5$qm;;IEwF?-DxwfxXou#(kr zAGZqTok97Skr%U5SwHA8kJbboo@2rL1ki4yoOHqjuNV7dR^a;C9YkIF-#k>K&n5fHSdZ!~w7XNp8t(7M3=>l*OxlDv_FvR~c&aW*-b6EW znz0sZY4($?mJC?VN_+yrbG1~=?|ThpU9(`If1P>@|A2k4A^ zoF4HQ%-7Kw`cyJXuRTWRZ(m_zB{8ZNKT+#Cq+{5+i(qv~2FZ25K-W`bj@d`mBZE#O z+|$6?1;?;hnuiP4SfN|%1L6-pCH7nthF^b+BTXpJJntgYM4SPu!wIZts|r10XQE@L zk?2o6i4@xoV)Kjx5Pmj+c8ecm_Wm=#Z(=f+dTv8$*k&wwkV{OF^~65vj0@MdK>u0K zVY|;^v@1$r0WRHTxlXrGle-hOTgaDv;w#qK)Z_4)-{|c@Kl{)j;B+ql@ogY6Dz>wf zP-0WPHxr`Ay~FZT`Oq|KCTiXuWy6bpK;x?;==nL7Jmx8=K70}zx_yA{OUpnnF`7Hx zF?I9tUJ7=eJJ=;fG`0zS$&Yd#%1s_Y{H-tyIBL$-mo9>I<`q^_J`=qg^&!BsK&=@w z0^9?&EMR8>58LI>PW>_vTYSvKD1+fpJF%P?A9Gm#dJaK-s24S`3WKKHM0-^r)V({; zOM*6HrkZ|FcgE+WF8Yf##f*oa)H|_T^?^Aaz0S0Qc3`4u6zv$bP?>7nDo2pqu z+J5q&G{V9i@4@&^EVP}Gp$dJ<$1G0F0b^0<>rML0q{N_9-eOK8YQkd2s>yrZl4RS~XM{uJg+=T{HgJ7~9_^NNBUsQS;D zS0%=Qx}5eu`X%UZz7o9l-vNW^7Sv@U-(yTHDBTg#6rQM=P4~Xp^P%4yQz7-Dh?5^A zpxX5exETBoE025vtG~K}$GSHdX-65R8Q0N#5Y0+TV{ze@iD3PjI%rpuK@qzVpRT3% zn?~S)zX~A#Z5;-zHzJ>13RJE$62kfsJ7p8iA@XBs??OMc(LtO%JPjLc+ky9R1MuH_ z7qygRuhzwq`_D*Lfu5*WZ-c4-T?cLVPC_I|1XpKrN10FO$^#c!oRI)Z>q!vwGL0+# z4QH2iSr9U#94Z@$Co?fyt=Tk=#qGU>tK2it|Mp#U9U~QGzokMC@7<6Xbsc-42vhwn zMf)4-h}0@jCc5hokE@6*d?z1a*N#&>KtKhjlTm&w;Bb zzw}hLdG2@Il|W}y**laQ^v8Kl#Xw8XV$iBzk$|f@*GV_VK?gg;Tla;_TP=A!GgyXc(_Uhj05Zy4_q@yzdBF zpC^{j!l$U0Lhqh>iHK$@DEoMU1r5D}=}U-_Gx1M!p701%DOu$2Ed=Lx(cpgU|G(43 zIbNzSY8q|1W0sCLu_Q3AqO;J}+u%~#3ClY8fX*b4XTMv5e&hbbKEaedH9jY+XbS_k zSJB+gJOy-VOPDVGUrgCwg1&A&v8|*t)S3CAtT-GUcn2ZQGzauXDs%KKI`hQtG)wq) z1syU?g!FU!p)7_vQ6B^z)HR!_eE#4O(>9aGCx<e;lp+y-=ImQzo4`lDtkO>afb; z5a)%c-S`8yhctmhYZ5&0-ixJ+D#3Z92}TuM0{{Eb%&n_ARvvl=+D>b^e$7+V9M|%a zCrfb&?V`17X7Px`nJD>o5giZi=FYRX(v0sa*E@SaUH?!bjBZE)H`8h0)0BjtstT|! z0pT?{8=_vdp`OnnwRBuNbPA<&)0DrkU_JR}MyTMxPcw1yA7-LeXLpvj;sk^zzQ=XG zFEQ0c&4>CKi2sn!Gu6zWDYut%RqqV7`S{yV`}{Y`E5?vdiJohJcSNs~C!zoO!yvD| zrB1rs1;$w$34sM3kiO+GI9>Vx{@0H|JaqZz@aMJQY1__G8OULvjecz(76< z>bkyU+D%_`G99F-D+=VhuhyZ*Ye(w)-{jqMjD&#AF6cA3nJ;eMhmO_7ApLy>J&qoQ zhMs!D(k?r&MmvDs8SdPecI*{>Y1Wi!A?Uo`@{*EtXxQiucCSX^*HcDf$$d{;Q*JCw zA9f5}6R$wP%n9Ih(Fs!*PUWl4-Gmjn_fT)z;hdeLC4$!Tsmz4#ajBE~(C+kzTK8=d z)V(5yUCVH2t`BEJKU1eNq=CE5nu&(a<`}i%JIfBNk-6Bs0-yHZ!11rCtl-^Ye7NNQ zC^{3rnEEe{&y==lFJ#G@!INYO)!cI)=Xqn^?VEwHtZg`jOanb(@Dh+|tI-L3)L2aJ~$PaVP5Iz5H< zkWSe43xEy}v1t+y0sSsx@0JL_y>v#5qWyZ{a8!&w0`llvaC#Ye z{bsL)D90G6%Ds-38|If=i|aU2`L{T{oLP307{)~bRo zU5;Y%b8~*qe`4Vdon727G+=Z?L^7ZY1A@%fSD0U8@Y^G_tSOQ}D1%?PyK*Bctf z>Or$p88%P70Eg|9u@#Ml9=(sE>aT}jCo|HFOX5v&e~THx{Iw10gH91(WkWWA^LM zYP-I>QNB?w>$nkv>P0$yR(m_`LT^A>%3+rFL&d4)GG@~)0;8r=24r$2b#-V^-k5>j zIfi`2`Zs9faULU&>7o>Rg5p59I&DN4R)@&J$}SF6+xDQfLm3v{K`59^XQyK(e00zU zOx8XG=N)&#-ZuIM{*~pNHXfnS6ft^)9@-}vf+C<=Ce4#zUXTGc<#j;$FJsuOBjHOJ zX)XrVJzM(a{!KGyk$B^1RU^lap zdS|r+#W;6PRc$SUmLOcx6avLR-I!~L8q!vmv9j`NcK&!QEG;t^3Sz1Ow`;-EYdJ8a zyCJVrBqbL79yB^ixhTmg79P>Yl1#3H^{&6cBzQFSE+=#HUhT5eSK8GtCI|5xuW6b6P!RN&z zlrOT!9@V9|`==T2Wb=a=6NCH91j--n&R}ghIoMZ^n5l-#!0Q6}e(#QfCfC0(aZG=# zd~8d+eRIM0V*-4mvyY!4b@e?M4COoihFPBy;i|u>pj_}B+YD%*ltFpYy!WW7dd5{c z2I0dA`_XY^I1^j7sQY~h#VH3mhzoWW>g3dq_1``S`r(DCZ}o+{Qe#1^>j)tePhnfv zO9&IFM}15^b__U)gNL_aWZ5zDGwOmgI2+x`zmxW58aR0_;cPZjx8>?lsJOLI?J<NCKXgzxd6fXP+#_TW@;Pz8E|0El_@yoDtXMMqMf(5VKa*T!Cqdi&v2gK^n=s$8k z&h4he(uO|dE02aZb`s%^wgPwmV=6RGd5yMaub^5gfi`c_pfiFnDdP&UO2(^4H(r7z z@-jHj-vZUiPJq6val`1-XjhU7PtX3uxMu@EdHHX8zVpz^)Cz-GmdfRr(8g(SwA>`>^ew@vz|8gGb`@n z%Zd{&aQ=7z+ozHSb?F>fE%pJuxjDADLXi znOZ!(=p1%QyNS{l^;d_(N)82HwD46{iKzr8KU&$&78_=>i)bt3p1DN;9d*b&&>LThnvg<#~N>L z>y$RIYd%bwtr4KG?j)10dkeO8UYI>{D_ioZ0N#Gzg>R&WLW%|PUPAk0gOvprJiHpk zM?Kia#c5b_Q!E6O$Kd-_T7vS{RaDmhh2bA3f!9)Z^jp3VyOczpOUkV*M6XS881=LqV^xU^cyHDm=?^!2|Nw zOe{iQqgt+Yb{(q=`-SdHZPlYsp8%6X6G1a-mO7}^T#!$AoKvVh2(PR3_=*#ch&!_i z9A25h?rW{o^d)YSf0J{`_Dfi1{)nxedmh9aUoo$cQ_S{w2e=LH4{k?^f3ee;#|Af8veX;= zY=>g8_dYiHCvi~z);)+J24z?`*@8D&txKJL!_X(G z9ldTvLMr9v-vyfU&nNDH_#4{%=B|dqNP73^#XpAT!pk5|y}{W!jKvAnJBgE?Dl-{D zdkWtfaK~^LTHeXv>YT5GZdVgxCjJDrKCUiuP4&)tWXk;V{a zejW?CKG^c&BFbZ~tF2%Cgp}?y_xAB*?Oq>2sr@%BS@ac3T0hWxB$t!XeXO{r$23uG zIU&K7*wjpXgP(ukus>-z7xckt#%F z=goM>JBJ~5^jqk0@iuBS^I6zkVn+2%0cl@PrbT@<%I!%Ic3r_j>`1TvQ-*m;@+002 zgSTOaF`-j9c$E^!6XAPLK7 z@An(G+tkr#0ZR^BS6T znoM0)7r5jz&oSeziBNWA76dJP!Re_=Abvfu7aU~h*RmT9Y|!Jw7e2%DM{nSm1hLRw zUrGFpR`qo1t?{iGjNyre=;(I@%TladjajpTW!Rwp|+Bdf6R>9Ntq*rJUgQ_!! zF!FW;iaU>lyI-}0v>kUj>GFXP{hYz%7$u!uKcaY%2d8*EO?GBu9@w7Wf*#X05>L4S z<%dtB)XAEYm-J@^$+>8~dKh)vbB{4xb|Pv)Z} z;xxDN4RthE5r5~U5ns`-9(pf4j1KxW5KcUH3H8h6O-taEbv#s^&VdaEXJKxi{rH>u zX77aW!~p#h=-6u%$~s@f!md@&VmA@|>{np%j+bCv{+`p+nPL3sE%fZCgD9p4+td95 zyl$dd>i#~i+n%=||7#bsw?7GUjZQ=RmorejZX*N)Er$u4)DVBVg&1w$p{kzr^}b!W z@GT9{VxP-tQUgJeVWVCcEYlw0wf;0i)UiTpuc!O#vFbC330zb9zGwsg_!YHPm>|vr;v?2Wyq&}Y3AgG zrm}fA^!W~s^5M=+T>H0&_-?R~u;gkbeA{j&?EY*dR3?yqTYDZfzXRcDfR@mH=NL2; zc85Hhi>%=IH`0?gLtg*BP;aQsL%IgD@{D=WkTcl&NDd0ezgYgjdVIE9o3GWUY{D)r zZ1YvnIUo{UKYfML76a%8p-^`~%$GMlhU(b?xU4W27agPD=)(!hb0|4Us69G7A(qUW z@gPc+$fUn+flYfGxUZkj)qgC2do_lFDpQ*~oEt&eh~-?!+}+T7#dUNUPg+d=7N&`> z#JiV1p`WfE$lqg+2AQf4$kX@SZO3J>97DOFmA7P~$Hp@4TN?6dcfskcrb58)U=%N2 z%Ip>XsEV;r$K5Z$=I8Y&67*o@Eb2hWdyZ+e)3AINjwW%-vEj-;%=XDZx>paORpAf} z=+PDBl=)c}{}Tgs^1(??Wrdl37&YNCrmfY&B!d{xq!n;hOU%GMx(ba&M!d%qDJ;EhBy`!UFH}sq zik6?cqmQ;euSw_v&nLdahw(eH-h$o<+Q&dr+|Id=oZ}QhtTs)ml-uz{k1y-&hG|7( zz}h1SHS5g<|M`>9JM`R|(yECwuSYuRgGuUA zH(~-0$%fxk^aY2%euK2gEa&twBb1)o&V1K2LGHdPw0>X1X{bxxQbgRQ#v=5X5`jj! zZD6@s;FevvftHh>aw$Qj#KxP2t}2Q=5AK7mnI5R1tV8t03i7^HKwaZo>OETlwol)} z)u9%=!Ysu(oEUa%4HKXaJAt|@G5*c4Qg0T*b~PAkk~r}QnFMiEBk~J>pdu)s`G=-1bvP;7wqtBI|B6#=?OYRYtZ#V8tXP-4Yub$rjDIjNHu+f z(;pGHVc>Xt{wf&dAA!hn@x-@^ZL#tLHZU_=U5_6`?)(98~pUzO2W7a0na)euosS{`5XP@ALvJ zZFM={PZPL+rF7Pyw&K#1Ezoet4*ZtShO*F{k}~!6J9@lW{e^hLPt@`m z-s+Qr0WZFJpOcD)aZy+F(RaW!P7<}9=FsD~Hj{kwcBPy(F+2S>r{cXg2qaPC~;kJrt>T$t=m!RxA(Y3b))O?Pm?9|Md*q{{Dd)*=FWd@dRYs|IcK&isHVt zoOfRbSi1$GIx?3@x1PbeKZfA5su?4RohlU>@=6~qA%WKwUhC$N#stoaeJvP!SzBn= z|BX#2t1$J3nIKX{sgZIt4o&+(?qMdY4|9wVt-G|s^dKe}qy<)K!vQc~dZsLvX z0le}Rta;+PvjVu&s>7eNAYMx0Syo(9i0W@gzi#a?`Fzn%I|Gf_(4xpMw*oRC0FiJhvL00h#T8c zdB6yJe7puJ<9yIDv^!YB25fqG9gcjYZXBEAT(ONvm>!Y{>b6SawDtmPivxJgMaAc)aOkMbN9DMeOqh6L61lG$FD(Q|CZ$&{|zaT8Faof$IAHc7__T1IJ|BIsfP)s zXhl%(&k|VJKpd8Nx#~?|E>u&$S-kldaPLI3X|L_vvhK&Q#X^O_KR!Sq$+;sp8wuu* zAHb*m+PweLVZ^D+LYwswq}$dzS#Q>$N2MRgW>$fbZyJ;>*ov;({(zR0cvIR+^tUwO zt#>4#RNDqtx&HxW^bC~#A{}JMQz$?35S2;e0DbCV|JF`|eRCu8Nqj~=ZClyEfo6i& zQ{v6LQ}%8}J`C$+z?W`vL91mvOj<)8fD^%-DDXVf{Cp^j-ZLItd|qR@oOq4@T;~Rz zx&VPMI-AYchvH+`HXqc`giEN4@1VMT7CHp0VIAlgsR?9)P!rDY|DG zg0)W{$m^mdv}m71ecd`#wyeR(mE;e6-3R+y()+CRA9eWg7H|&OJuE<_d5r(G|e<30lTX0XOsQsCPdJZTF1E_H(wN z{?>*gh8KhMdmyGvde}qfT*-taS06NWKcwyvc%#3w|$^D;zi}5a0y`~O0lW(%1M$!^OVq}E}UZQkk zyey^fc$(e5P!`J%eXB3CatBkP(xn9RKbN3?4&_%T9cCe;Xy@_d30ggwi)TJ~Lt5EO zY)F_0it#nraF)IwhiHeM`~X4?vrzqS9NG@l!AzTKi0xMf7Bh8u>&0t`F<_3JPipb% zT!yIz8r*vL3^e^}h2n1axcpb^;q-<>IJtzl(@#wW|7X=W`pSRkX`#<&mdr%>NS#5N zmu&6@1713P2s+sBCJxs>D4Ej_Y~L90|7S|*$>}$J{t2!+Ux2&ajd;)=jGF5k*i*{e zrd=zA*Oy|6H?m|1KpWDi<}^^eX$=%P|g`n(6z_)5xE-i9C{jq|@^ z4)$xdLe|rhIFovo>L2FA+K5xw&4V7-6SVJBP z4~$Q3Chlh=#y&ia1WrW_%L0>YZZOKG6Q4P;9Be;T17z8Q+uJ#~%ApZP^I6ccEE2;f z&g7hCEoVI**5g~3qZ$R=!4!NdP;w)R?y%IgIO;xC$zIH=yl5#PQSC<&|#+ zvz9G)xag{GV3XDYZr|r)QrQ>C*r&y7dOTJSx9-G8$zxgYyjNU#>3!S?t>Cq@kV`vv zf>nR3W#Z^D%098KrPrfEYU&FL0wDA5;2pZtx=?AMrR(HR|kOr-v>E;-iwiBB?W zC_|e`5O8%3MqIpvXTJT!C3Sx=G=+S$Z&G3Q5!I-@LBEERz=4kx4xd&WovAH3i*` z<1m3g1!C*FGC#{x%yDWtW*Yj!q=AOK=&?CE6U<2Rin-`b5!2UhV$zm!OHjNaq*mixLKsfdo{kI9hV_zH2EmVE5AegRy*+YF&4s$ z9)P^g3FK=Jz=(yPP}4Snn=+EJHb*5wphr5&hZX0z?|;qZ?LUaVg=+583t~!nTX430 zD={VSG8{0o;K6{twciWSHTe!_;zAiH*<7xn`)*7fuR_1Sjd?kqhGmCZA^fW?>oMdS zME!M?OB-GUB}0?2WkdqEcM$1Vk5+-N&JM0_sTgvv?*h?gOJ=#Be9zt0Sh!WhFDZWy zVnYuuYVrjp&wj+ovy)_+@t(3losXdZB;s1^^rY@IVxlJn5mWCVvAY(kUEbUSclHW; z5l^|eix`@<6mX_phEW!)VZ_Q`)D`pxOP1cCzL7HGdqgwE_h%3=-x&%&x}&6*BkC-A zgSLJpSlvAy`kXKm$_ky(yLdeK|I7fz@wu$Up)bmnjsUABLdq;tKK{%P$lm;wo3ceq zu$5GULy;BM94FovN(3X*Be-GYF;JeH&)E)CK=y!Tq#2sRhR3Jzjm%Jp9^?Zaw@ffx zl#a#O&g{;SqoCf`0)8f^;l>9MFI5dtr!05|%?D}sR}#%-*N39YWds*>v=*kk{*L1K zMb3G%PY@6LEO_`Q!#}NBLc#K5ST>xYZ|hjD>5>jgOCG2b3q-uv_Yd?<4P;X9Mhw3H zoBRDu#6R!c0mX;1Mbx{T+YFdyFono#c82xbI}yY27N}>ijrt$-c+R zKE~hy?gKIU|5lsmbYO-00;;F(#qDd11!Y|a-b|$Z)1(|!J-GqqjHURVbaCB+bsbPjy9-=p=GtDN|tBPu=jvh0=N7`<>8rqhhB5S8G{W`=v7?n19wJ;Cx8d2nv( zpnaaXaJAD;*vJ(?NLM#dc&TLN_;Eg^P=9mJR)lnQCrU&OXrI{0$Q;?m|IA7#s~X z5Ym5SL0YjZC@oEdl*gCR`qX)-+i@0pJopU_D;1o}sxMHLc@5nA4S>vXrKA^4L!F!t zpqf2M9i?N3Dj!`|ILw?6Tti;_X<7LE!AsccX~=IS#?Pt`U%ZH zlMk80GW$fJ#wE@=OZ!t7jrtzO6vbz4$(zZp#FuJk2r7{)}GMf%R)Qx zE$PQu<_&YS`Thq2Ci|njdA=-a;BGeJYXj(wdi;M@1DF0&K%*79LKAtD!c!)5XOzpJ zEbl3&u{2{9Ypz0GwGqE}oxV^y)Cj~!Gr=-=H_Ht816IA~fT){+5I^oMSkBpoGJ}1H zU2HLuO@ZQy6DaY^VV3_b=A`WcEGudRqcz#+d%G0M`fGvZpj;NzeJ_(NHzsfT6*m1v z8)Y8uF{vQM#I)NeHGIfU9WxOUx33^}$rzb@ahc4w?kbbUhp`^NZlY!H+t@ zol`c5anRLROpCn9%G`3eYV&TmXTeeI*w&1vXsxH3OT5uSZ2kf3-BW7*uWlsok&DtAi>ZGSLK6LGkdUx~{7S zrslpu`H%I8y{*B@z8}VU?uF`apD;fzo7D%MhsU1`cw5#F^Ix^G^jXdLHn<LSg70c2~vu7P(FSE^B?mLoJJG(b8snTNCf#ga{F zs9k;yMR$6#AnPueE&WQ~r+6lM{ue}je8G7hrT1XZ;ZVHrJ=301jP@Ibg72JC&N6){ zWOaQFy~_`R#Ic^uov1{|qH$c}-(6tbUl&=xZ2^5Z&x9jA%>>o*yV&$`191#2ILY87 zZbRTns6I_u&N=@vuYzBkj?$D*`N5%L^K`6^eh(u9wT02WzC*Rn9wzBJlGwXH@zF)n z`9=?8*7_GPP4_-CQD_O?>u1xvGMm%R$UsfP7TJv=$`Ktp$sD#cqGHW6wO7gvB-M`UDqo?kI+-kKptm$}#Mo57IH!Y}trg@M!-Rkh=$B z@%rF&5clgJoRXy{^otw@*XJ4vF^zYi#NJd$d(#T1dzfJI>mtgNjDh0PGVr1ta`NtL zn9f>I@^v?7*<8atI++XBe+oHKK@_)fR53L4-ip>^`k_(mN$OV0g}ikpfa~_7@32mo z|LYg$(UJ%q#|(u2zr=z(K3b+}eTgkaDa`%f@3Op)N4V1Tx7gnIZ&b|r>8$#87aV;o zIQMcRR=4#Fq}=@%RsAx!($CId6p@VQCltXh8;Ri4e;?)IE1|lw6JP2)5Y+>B!c+1G z@)^29rR{cH*1ZHXo!dZlud}SMTW_eg48jPP=iqd$mRq&H8AUgD$ttYOcx$bRm}bzA zddhx-`@I<~psE>EofFwZ*El#mN(Xwxej=uawM=vSRgPcTL=-<9k)yfb$@J*EtXFp! zo5oS@@+N7l-_8)vT$giES3^@mXLxk68WO^vfYMjR&7F3SdbRz@UoscM7meUz2fji{ zz$qr~RHoi&T8EBj%ISRBE`zZ{G4qHP|6sd5Kfx-Ae481dTBpd-n{pbPyR>5Tm)_VC zT*M8j74sYAzwzZnGr@&)fW-GVP_gMYXcmokZm++B*7LI2*3K3}^~_pMwdS}?GhbkZ zlTC&Cp^wq~nj=bscgy5`)-mfV2e}B-pWt!nUkuwG#*`a_S#n|>S}s|^C8mFYt=11P z>qs+oP+6eVWC&+H_AL_^E@ztPVJQ7MT;?@I35)DAafyi+);Zt9j8r|r;~-_5)9&Gv z3d-vasHg8-DCaSe&Jov#pkF;{QUhZ+-;I_`{^*Zd5wTCb*_`G9bsi=Stw57y+bCPO z8=C|Zyf5hUzIPUICg+EupFZ_g3>gUXo^(Lef+CtD$|$d$0AX!C@ysMwbc~tL6>jPb za*u3o(LNL2Bf<-6#F_LiE@DBJ*SX}pXqatO2qKf6T)$%>*!7r^@O-H+`PB8HuAcy! z8J=8#`B-edunkg@?@@-VH-^^)p}kZhl)f}3Mv5M9Eq}wT56`3Tzlh0CYz4;yI$Y|E z6i~f1#H#gqVB=PUlD8iqJAM&$wb5*5uY{<(PdUf;^EpvpJ@PY-#Fb|ag+X_Y(D@0N zCX2X1saX)waGRLXQuNp{8bYe`A+yGakG?V;dv9Twu zO14mr#_}spUtR-NE5}d|vk~)c-HIkhN+B)vCv*DX&v|W~02Q{Cn7yV!4UKXr4Bdvf zigp#zi?B?rqI=7mGwCQm<+pgC+Mql zvpbK2&XULP&=F$B2ZFtp1z*fQwa{`FR2QzWl zEoK?90SmYy@cA4AWgS1jw|g3B##1g%-hjR_4_S7Jk>Is>FB7L<;-opdWc9B5poF?= z6}C6jbNw0li0(l3=Ns7Yk3B5D-V^o9YOpHiBUE)dfy&uB$c+*S2mULC#IhuIaH^5u zIH(*P`fdW{lvFO?vJmq0jzLi87o1^)A@9Gq1xBBaBgXGYoHG3_7MlOY20IxPAML|l zOS2*Jf*mL(d{HO9zl8-u6cDn{2%4_+hU5vmVb8Q{P*s}?;+40E2Qd<|PqfgkMlJK@ zlfcAg4jRug5X5c{ENjVoaQ~Xd+yxOQZO&JROgMwpz6V*m!6I04cNm}22Wrszq`4VNpR*jkPrjyEt#avZ%vBRH#}T|m==uCq4}#Y>+tb>jmJo79~> zA5w`Cjz=-{_)ToTD&afE?8p4`w;}qaL^ymj5#^!lIK}R1oc+;1FxohoIsk`&_x{6B zxpxsLzK76RZ!%ggzQhjo5DC{;iG^(0FPU3F4d`rU2#aiq+jBK%LbuC^$BqETO{I7E zMH!sE4Ib-ef_%#bb%W&@&hi~GK&PCN)kRoB>rnFJw^2@${D2$Mh|BSdvTQ?gNS9ORY|5E2SH!yxrF`EuWI+dSf#RRVOl9o~_PW0~E6HSR=%`>WN8>Se)p_(% z>EVc`7Z76C37o=Saf%`HK$D~oJ4fpAn!R$F&$KsSePt6cJrhAvfm{g9a{2GuV7fw^ zS9`oh^{mH`6@3=+OC`jYSq=vKgOkvAnihZmxVDfV84Ar= zPoONG`qIX|L>FRd+Ba>6!YD(bFZqhy?flh+_9M|@jue!~=P|YQU5I}F15&!uvorRJ zx;?KGzh_(m)*L>IqR9!I*Cq!LvxTTjds*Mkq;O|CA0dzQvRd*e22 zy7~-fr?ibl*oOg<P|G*^D z=RAIx&>jP!?MXFCzZt=&Izz!Fllt_k%9!QwNUqrNA5OF46^oki0o>$c&~=Un?Ph!+ zXjL9s#(QuQJ_%ijd+)o!2gDB+g5u>wR1ENQ9If0Bx$_TTO{W`RCmaC3jx0_Q=AceX zx&)EjSX?~5KUCQylTU3gl(uQ{(p6eqaLpTZFP*H$@-5I~zXz%Ax0X z4qQsq=1bcrV8H6l7;9q6hkds|AOA8;s(OqK!MeiAm1m*Ba~bQljaZJ4a-gk|G(lfI zuosoVAbQ7H<@bWZB%}NZeLqlK>baU$o*-dR~>#S!eKN1Pj zapVz5u2nmEwP4^~%ELxQQvbRLy}c$txiN<(-8zANBoBYaS_n3oM^U67=R9}Kebnof zgNZA8po33W5G{^%QmJoX*`;hqnDP@24u%UgNq?HK`KX1bUeSPM~|$6)V6 zzu^3&3ow1gDJ+!H+!xMp<$wZw{%s?+2vQI?$XVPx;``_LS%LWf#Gc=Q$baLXKT;b6&B#K{Q^XZY(GQ zr(RVov+xw;>BKY5{y`A6{3Yt0e}q-z%OLz3b)Ht5!X0lDe)|{+UpP`1MucBR|J!=l zWIqx#y3aA4_Fbw$Gr47s-ymf`Ews2qkk9BQOQl_~-xKm_E|2Bxue^oWtqP$0r_lRJ zHfnUg<>=|1L?Kg4NXf5OkhNf3e2k2{wfPX4TU5m}nKo zSvO|0WL@%H`Jd(z3+1Seqvu??k-6JwFi1Hb8{WPo|Jr6q3(VuH+-OH%yB_M^OrxFr zW+s3BURK=cUry22K<4UP$in+hB|mK(#3^XseA^GpkHlkIih^6&aSBzzTKG1}8f~^y zR^!qLI@45O#M4@w>-G&pm9!@xJ{q;vY2pqv!@~nw4y?+EY**Ze|g-9$L`IT!Q8V@HYhyPJ={>w}S^G;>xU)B#J-xp$)^gav@)#LpW=nShm0#+x}VS=ds^ z;_jWHXGo>ibTyXsUY<$a>BqrsQ6FeuYJ@H|% z7zjMqhVW%O4sLIS^2;&|nN*JL!80Jjl=5;$W?*yX3AnABkEy9GU^#y$%C~*YX|UP| za;G zA58eu>%#DiNi9TIH^bTqq&-`{#aG0i65kA9{4KFyAJzfY3qMfKKmrGPY4MgHTbT5| zr&{``r`qPp7YwO>4t3)%fz>$+40EtykpU9EqSHNSAKpRSdB%F*-G|jJJpqk>VUN8z z=$-l;ykG!2YIOP6w706Z$pP8&asZ<=EPH8&V6BIdpFA++&}XcAdJ7Kh%RuW}f!Ibp zosk8#IQ!FQsGKzgyh!UUesP&44o_vGEf%PE?;PzAe`nh`f5DC~@#rn1{2*oP^UY~~ zzIT)B*Yy)FDlLGH8+t-+|BGlGr7P$p(C)u$D9WuXxhLbv%eaz<&%XNniA%(KFs($z zmj&G7Xenl`FGS71Cs5N81BLH;qE6sX(CJf*RcWR|WL_Ic{odr9`=KioPTGO~b996n z$2@dg+LIZ+C4Q`P73W?z(m8YZWT-JZ3!_Gf_{b&4!Sdc9Y%xB<PKOnSl>)a5sD`$MrXqO=jGFRGzj>11$Kf1%y)Sah5hi*xoO{|hjP)Yp*Ue1g!KG@#Y;>eVvPac zKEoUyd6z+Z-D_<4yoS{gUvTnVMzh&Dyrz~2F1mY&pIL>Bvgp)H1`hVQu=P_Bihuo* z73x{g-0^|a+_@#QeL>t45&13pn+U~y&qCRXdDMaY4Z_EIz+oG6p|~R%9mk0DTjVG4Q3>;w0+>Hfw44~mp%=J zm_FniNZ+iUamPrAj%~nO|C;gXLw>^6@;FE+5gyuQ+)uvEZx>d6RN$^qqQ_>sWJ&n2cRG-)=b|cW{>_yqEBuY z4tv#w;j$~ls-SP@xR`zrw}5z)A%&P46N!Pr@6k7C7pM6(oF%DRpg6XObNpu@i{8Bv zeXO6tv6W_mWHilLGY&8xuNYYS;RY|5k}8x#-~;akPe{l*t#id`$Y+W2Nr?-MaIqTO}e{Yn_6V8CzGDN z!1Y~k3G9lA;4T(J*$v8KM17?$u-RB@M!MVJ0AdAiCU#L@$PA$wXEOCWC(j4_`MI1_ z+g4^7{}y+QF&0+YXsE~UJrkXZ#vsrAtawo%HqDmsyMF2mt_QZGrT=KwAp44<%LX7` zQGp&uiXd&re@vrU!6iA}K=)I$A1?k15qiI1yAAONeLq0Nl<#QocAXVocmfW8reeYd z+I@W`=B;rPYHc-5>HxX69TLS=Y+z=5$BPS$g0b zmw?${W-!&V2ANszH8Ad@VXD1nnX>m6Ebsk+&-+w}ij-Vj+h`)Dym<>Hj@QAsvyeL+ zS&LO3v>OkqU|T{=g)#M{sai`&_Z&+(F9+@voJmpMTT{G$GZMJ0x6Um>yf1| z-X~vfL(o(-*`Xt5c2M3azlDd5@x{_LjnKJiBHgQqgFNXZv+8yp{Ay=l$fFJ@wS5es z_j{w_*AQ^=e~;;Y&oN(ij)h4cz_m~DkZ`M%<-~58IN?5ZhOW8^VV38xY4!%JyGwlc zdv8E7t1p;EH-O!_3<$3*M_fA)retdgHZfx;x4jgdCNOUE%U<4Rr2-|XOS%7qB`7UO zSC2a;5!-&Jcha=WP&Kd&%j4d&yfh1;i+Yz-vs_@mikK$Bhq>JFZ`tFYw1ljO39$O4 zj$jq$PMoltJfpUdx=kog^HVuy^*)Wt0QxN1@BCx*?d zpxyo|KSsX&S@soBbG_&B!mvf32|=Ll=7DJWGLCtO=!Gd2)AAJ#D=Xhs5|zNrIZZi z2_58nHhaUo@7^P3e?E?NxQK4uKBJ9q23PY35RgIr$z}JTBs~X}Ma^j5hx#>LKVe|E z7MWtLk}J|LGsTzV&{FXN4d=cFo2jOlx-SEqPrV0;!9s5HPY;l%7I2%`0T^%a8MTje z7bJEs;gUY_LxS7D#UuwXh%%D>$X7#NpUHbVLFYm-w#86CN8~Yehx;9{Q(Q*#33@{D z&7WD_$hSP|S{h9APs6rTJz!y6J!<`F3y~%NLVRpEhAHNA<-CWGa4HK#l1W% za@>R2&W6O=E@WqFh)G-=hfBUag^f#(W7DULknL;9LrvqDa%mPr>%p)&BMwJb7>hRN z&*S+`I>N|5O$Fbkf6>;FZdpD8%)hNIR-L(t9)TyZvrZeO)95DTKbI?_=Q8gH`Ve@< zkV(e+fFi9g$anR?jNJv8zz0Ca94*AZK4Ht36v)UX@AE^NeR`PiqCU@{Gou{h6iO7* zj79rq3!$xQ21f7D6&i26M&p=)&=T?qy+SX7(~n;<;o$~!`ouu_dNkha&=I7j39R9y z6SzO3%=QrjQ2i)VhrT$)wm!H8da5)?X)gxl^V=-=@M4fQw#tlmg>v_&r=fO#6iD@Y zs}0F>T^!e!7qj0%vC5S@Px_lH^5#Qfu!R_=AB^FzVj;fy61c9_7v!fO%VvGL26i)Z zAklUSb)s8B>ERFzP1nZONlzhSnT{ykVjx&HhVYbz$HeCTgwlQgs1ts!V!>6tV4K1~ zOu7Cas~fNxTFl?U@?b6T$-a8%Y_g(Gcr|D&PN@Ued!VJ|0oJK-#Wuc|*$l3RYe;=9 zRn8b_LANRgZL~G@1ka>+@boYg8;yvum;VC?E#8g(Sqk)hGzxBneSo*HTlH6e<;lVumt?I)2BiJ-mZke zUtclX;IZItcnJRSZzG1OHk^5BAo%GR3HKgahz+lQL6u(}k0Gz1;_5r#D9g;nwv#$gkWBv1zZ});(z^+oUI*0IHl0K1 zh!tv|Z#5A3ejXb+n6j+5wt)0tqpT?T7UV2y=P8>rh#4doSSIluZMow6J1pPo1Y3X0#l-V3AhR-Y@Ndv+X4HeAH6sc~??Hkocg zvruuT4;oCp3tdjf(Missu+JnM_@{+n_QY6N)_EEV^3BACbW8ZZ2YZSAS5WqO00ld5 zz>?5x40g7GC1v}daL9bj>LmV^eLgW()hLeC7Q$OFP8<&ZBuWcYq3m|1N?htx0N@tY6GN54dg`BU_nvK$lW+36J)$%Abl zp|4#jME`yUq*iIXy=)nDhP#4g|633~ybJ2NxtN&Xi52e0iC?q|L!zIe-mYmdx4sHm zqVq8Js)<-~dMA1COoW2{`%$&%CpZ{pC`RX=zznY|sMxmw^P{Nyq2!8M{(oO-8qJS$ zx(gc3NtunoJ+9g2#U51s3mbO^L&2^N>H+x1Y%XaLXFC}WFMmgT@2NSK-oz?BJCmz@ zufr;O#!W7zE}fhlTKdlp5raQo8H$z7zks)%C)V}v zE?R}|hK27U@c3t9++_V=-p6}`?`w&Ww&FM(yh3-Vk5X>4o%~p#)A-?ajVLv|0@tpo zu_tkUH8F=!Z{P^5NLQlDat%0K>qfm-S|~00fhYA2paJ>o7xwxD4e8|LxuA^>6Ay6z z9@HVYtr{FAm|()KT~PR|H4N*12jor5A${m=bX&CsN>ex%$6v;%s0K)zx)>DZiRgZz zHw-`V2K~QhQ;(B3kDD|cWKnS#HhDhFe!Y*^&HoGi&_qbI{{vc9{e&CCiCKGPA5?f< zh3xln#J?pS>3kS-(<@=}!Wv#O+gPyKf~bK$sE9~np=0}E!bK<4ruSIK&vZk({~mo; z-2$tYNE}%yLfr^QFdZ)ugP$CL&`;XD@y8`_-}Ms8oi0G#z9`i4?vDOLPUE~6d+4^h zNS)n0ROWWa1?z|XLO}(|m*77JEZP!i6Kl7IS%NAf4N*c-Dhq8u@DHwzw*~Oy= zVftnb`Q;Lr#%U7Em=I36DLpZE)<>}3L>$Y}oTuy^&O&_tzmGMmrLJq?c;0SY++9oT zHSh~)8ZWSuL$ri#mmdP$HphsP_vq=B0O8MHV9{L@(WlQv>bH{dz;-KsZT?}X)2+hP z6^ZE4@DNoVQK%~IMZH?dd_X_)<~^Fk>wKxpVASS`#?+alxiA&WWOY#Uig>c*8*}t9 z6I-h<;$ag%v_IbfO-o;Lho4rWPS9J_dfY>eafH~q^oq6GsnDrY$wMo3SoQ3SpqE2B z#h*PO-&a6`xzr<5HXGz_oE76JcKZ(tp@8S3=Kc_D*=96&d#3hw)J9W3bfC+37{ixEBt zzzNY>}#{pW@ z#qu4UH`X(`iz#)j?PlqkM^K-92ZE23!QoZ!!Ev;X;QPfwl=#=kT#RmF#;jaae2QYK zMZ|gdWg8~6)G#~8&k(&M5L#n9P=|Q7(YuM)_g$6Kv91WcdcT8e#eE39qs>!_Jh)=h z2z=&cF6zbh04MiIbay+4PAmGufTtyRIoDLo$e#uV68da3zhS1TW|YsqgU^1W`{!u` z?p7QGyN?(MP5-GdyfG5wBaL9|SPRN!&xS4Q^+ZQ29cU3LdpkQuE!C{%?o)HHu+S3+ zZ9WMZ_r9`qAu6nyyaA$qqP}0}i@d#{1L_`g=9r;@^wBAp-0~THT314b(nwHTYvNJE zze3CJGvM?BQ!zrnHfUhVv&nJtOa6*tp8#H{rQ-Q(^}tft(% zbt!p^s-F|%g_sDl#(~3a6>}4_Smg}TZIilVd1W%oE-plSoj6$Xmgc*@2o2t0*k}K0 zwDcSUFmNW+=a;@<}d6_3&1Z7yaUIRnK$X{>A#-5O?R zVdK`TFzcL!;I`)jn49Pc%W|!tb+CcZ_}dkTT%3gQ+YZt*^)^JrR)NChB82~Y4|?^B z1>d#h4gQ=+H`N;Q1=U~0me6vT{-qu)`vfo-t6ETbe`Bg*3shBqe3eZe z^bQ@yE`)@*H@u1Trx4FL*xJibNU$6UK`)J%y+Dr@x%SUOy zV01mEVpci2LiD6hut^{YZa7vd<6Eot3m6nk=l>CUOW4ahUbe-#6B`&KMfkl?6u?3 zRPPJ6|Ne!0U5&!vQ>1NH5|?<{Y+UF1FN{>1i<;_N;4tnX3-ek)+}i6H@yl9V`V;ji z@n@v#khno;N)fvW{DX;dENx7t9L@` z=||w_`IHT+OopJvAy~8Y8Yng`hwT30AZ=a~eOusd_z?5z%OT)>7Q-$^>sptpmAVTM79NFUm?p-1ge2<-k zs8>($Pa`d{I>1bbsBeahs5A(ZPhzIA84x{cDf*7p6PFA+3mYX$EI&8_ecr8r4%5$A znPnkVejW$%7q$4~l(D!mYbQpWO~>-iAn>UFgmvp~upvA2ggoLqX#$?emJ~dIGpR;m z%9+)yVP_n;tzLs4x1RzV`466T=mKo&uP-QOpM*BH7tE?jbN=q7u0KzHubCH7WwS=+ zF|{8g%>Dw#|BQl;2P$y;HXStLW>h^l7yMm*BEG|HXo-4&@k3w3*1J*6?MNWz`(ktlb2WJ2Jp> z=s|R=xy3?YMfGu&e@GwtHfgQ3pE6Xb0V)!?s=+g+X&aGvB{X zU_93m%%s6MZQFTFzL5{H7rsM-HR(8uw?SO`V5~ou0**ZlQQ}=GGrkc5DSr-PnxB84 z*zm9f7qqQKe~UjLB4ZUQD$UT}p$`107cPCnB`AMb$I7-=pknObe8)L6LD?gNZ{B~5 zSXIAMH(m|~=Y#^m`5;?+FmvA_Lc?!{sFL1LOS3P6;Je zOt-9uQsM`O-MdNmfsTopPljW}BV)8I-OIx65;Ki%H{p-YU<2)3_8;OwQ8o$WrS9sj z7m+8^J8E81D8_g^NAC_}p~*WJ?4pfC^#~K8tqz%#q*A*7z!+*+DE+Si!^Z-+d<&&}_luwR( zi`QpRF8)gj`5p2w;nE|n?o$jI7y98w*ZcV3y{-_}vH&~h48g#>ue>O0u&4zqm6AI$B0ArBmU}nf}uxV+)Na;z;DG!#( zhqA{LWgS#r1|d$4Wk>wa!a(7j$q54?SX!C z48;_6A7(&ZLrx`9_Hus`t{PL01^o_T*VqKG$-52fdYpyTKfQ*8zAr#i7=vz`GO;d~ zdOLoX@wThQ7=7|MYLl=2#3xg+@|CIJY846ZjLpP>4?4pYoyR+` zPej$yU-W<25w>+HS;S|?w}vAz|keA|iD zf0UypeH>3o`jeIJAAr5GyRc?@J$^W^FBY%;%7T5zL&Qw#$W~T@3bw+-D}ul;Ej!B=Ljp9FCC5Naj$PzQ#;&mA|8=5PB_9vWf{ zT4nut%ZPnAGCBb#%%lHDWOuR2R)g|q3HVK_1gksgq{-#-+Cl%KU5TC$s zz)REs>H-ykS0V1Ri0z?T!rUM4h)c7Hfqx!^+0(PZ?hxgViMupDOASXqV#y53;+`@S z%=QOT=UWtu9-s#n#Dk3K{S1wzYH0D&5hOBHXNTRH=wvzu0+N-)kf03F!6jFVQ-9{s zsk6a?cpR0gMAVda7q{u(1Me*}z)3rr#cuxswUH;$+bxIg0>$9l+=y;wUQp5VAq=^x zEBMVe5;F2Dp(*hQcWRqVnzP9*;QX0N&<0P~RSk z30fJuPlf6PYvo}V$~$3@a2 z2jGA~#LYavfft4kNB^@6Q5k89$M4sI<&a;&XYwet{`)Xa=y8|0-LbOHHVr5&9#Kvu zlNn!_xnmGo|2NI}^+Gw}9VgHMZS~1A7>Y4;CE&kJI#i^i8H)8A9yktB|?wHCi=$!1bOH z%%9j48pnRvc4jJWHN1wswjD(m_a379U>!pKI`Sf&!Q*QvpS1ss`gos8%5PkSz|g(w zga^l<%b*&+AE%DXxt!JZYy>Ym>IwSgEvSm!)zT@yvd4MaqV=-(#JBmxy)Dx5cuYKM zruTto6V1izuZ_e3Wp!9LyppL(EHJybobp#GP(0!;bNVqHC%pWDUQykJ;F4t+oVgr* zR?Q&pA9;&BFK~m}y`cH|7gjx*n*ZMy#cJ-;j6C}+tJtn4kIzM3T1{S8@?n)* zSE8SJ8hWY|A#f3Kc4Nme&GeIe?5a*|sxae=CUqAzJ)6NHeiX#&#zIQSENCV^-Sspr zakKdc_y*m?>qXu0_=b4&A3p@LYZ7vm#%j>?nXHbxX@_nr9Z-H=37uxIi1D@q!*U*= zZ*Ro%_i0>GFcxj3+Mrs`W%D}rgKAP4y{n#Ld3_i2o0!B}R2OihRuq1kY9?leXrp7# zCXDaM#{KUt1bgcP;C>?))5Rau5uuNT-O0b8eh13-Gnml-9Qd6jE>Dyfc#|LD@j|+b zMHKMDqeH;bG#q9vyoc)X9L=W8Agvi`*6~EWvOMsgIU1@>zGKP4PU^u8#a{bkpzY>v z7!sr{oFh(dX{bG1x_AX7W@R#!gPtfa?uT~uYBal@5B}%lQ8gwf*Xyf3`p&zF(?5I# ztw{s1%vOYvL+*m{n-k3V-e)X|4TbE#ujHE5xS@2@D;U?PBV;N@q4GE}G8lb6!-CMI zpSjSm%NDyjVleyQ0;Xtt4LX}RPB?m;{CwNtIQs}VDh~Z4^g&*!WVL04TdAX)`=u(r z()$%F3m$oWy{rNNV3U~k+fnQk4 z*nZqbVhyJA4?x|w81%cE&$~|QQjVB-DS!Nm#_R4dhfHn3N~XrlZS`R9)d4L(sUdt> z8Y+LGO!-$86fd4Z-FW%1jsBb+R%XJ9V}@d6`6^q|{5c~Z( zmkRAX0BKi@S;3>6A4Ba+gfoyp{@p)c0^aqnd^@eZd`?zk-Dfe?+&!o-9WD; z?^!>Zp%+MNa8^z%woOmR%v3$7iJ}Z_`6GP2llbEmmFmK6v(Z?K{BA4rs5@aFG})zb zY1@3bv`<8v!=oYisUf;18RINH4kLAyAXQved+zKmI>q0|3f&BJnj6YCZgzm`;n(ow z`6RsGpL!n#zJikYyAbDe4~(O=M4M&*f!+6ZG|S=G5o059h=&*xN|8BN^auyw;m&;kHC{#hv5yuNbue80wafO34wCP0uR>6 z9F~<6muw%%9eQKnsSoPnkEE;jYG<1KjgX?(8~rC;Lus-LYZAM0MN0*rKTJ!M*Y11SwCEd~g*=aB^*bjdYysW09xsVcHfwL37cT z>D}##Q}*bI&OWPg%)VH3_&SO1tO4}SeZ*bM{^6h74TTK$2#d4L#4?>cknTv6Z6!{2 zMpY797+i}veco_SyOTJ%uLj3Nq(aLsV?nxO9J7(yV&+z7D9jEc|L0?L_b1QpF>Ca+ zIslSI&)Pf5YmU&*=2uE$;JXDuxfb zii*s1=5)Y7NDQ0~70-5K#_=m?)6ECmb*4b$t8`*sU%(~3La}Tk%`GQy@&}}q)@l-n z{qQTuveLmO_!_j8CGtX3+Do3hsDon~z-rV;v>8{y>z*Cp^ZIlb6eXS*_^r!XIe0wS z{S!`iH$9ok#vC0A$HT{b%9~H0NjkU^4b;Sf{lx-4G@FR=ruQ-Um;~Kl@1u_He&`)& zh+fBnAgs@GuqQNSysq3D&LssP|k1JtjdG#P~>EdyN&qCC@H}H3bmgtaYPj_BR zJ~sF{?cW1H_q3T{e3>#xrEQpQcL^l4V@Qi;g673>nW}C+mxMU6?4!TPnvy-yudtZ- zG?%g1wUUK?KguM#H?ib_`NW3G!<5lE+@pCL%3qFW+35u=E@f|r=}{(V zjXLlG=aSp&!KM^Zk-i3(F4h&Qexh5@hcf2+TR2nwttVpcdR$;ugMES}LS11f&)=WL zN{6Lmvg9MG%BooQ*gi~qUpK*}<`QfUDh207^(Y-w#W#QZ5A?LJfcsuObnI1wj&sJL zd2T$kT^mCD9K51#qO^$?LcQK;nAiFkef;Hc?LFnb)_+zfdN4e>{}hC^6|p#W z9cv;AAV>aEZQWN}lt%2C`2IcRSNwisQBhYhVf#C-_c9k!cN9Vwbxqdz&jQPH190*r z4t>_|K!>t+rceoJHBwi2ma8MQ{{0*@$}A|Hw;x*UPhs4MLR@;rRFrtQG4=iLa5}(5 zv??mac_VJ4#B-`FLFEiSQUzqY+>@oeo{BcJkOkgr;pYcZ580gInCy5L)^ENK%Z7&H zpU_=QJ8L9%3@(Cu2XsZ_`@g{3(l?+z#6Wm%YbtyQ)fQXYEulC^!?s18fzqH~Q8Krf zZLInXmGu5rty;<39#04FXg$GpUOD8HR`ZPUhM>9Y%09Z2cWH0 z2_tGBLYL!t=*X#phtc{%&E`uWAM1(Jdp^fUgY-pNP96BKF%?&L(0lIRH{j;-0A0>h zqn+bfP!%w&eT`Je>3aRazFJ zbFClNjk<%WXUPYO&OK^ia zq|ThT^bVS*k%gUm%I%k$kr!qo+j{2-D312#AOAQ>Jcg6txA-l4{aIgZ`^$^=A~kqr zA7+{I-{6|NwphRS02*H(g_e=gPXB;l;u*JY&x=Wi( z!qgELu&Hnjm)!JIZ#`~_a_h@D(%?3j8hwM7#+Lx~zNk{pW!dCa(OlgwQ=PtnaRrw_ z&&dl7g0diZ$1L>uegQNm4xxSCLsUL1L-`IH-n4u%L?#?ZzdOxb)kdE3i6^jq$!CNM zndq3Cidx65c+S%I>a@8NU{sf$Xna@;*dYsH$)EX{xUU{dmSjPi!!6QTVz~F|Ox$?c z5;h(bAoHK+qih zKc0d~i_HYNG?h2~62Ka}UPD;)9ZdL_LCbvsWp_TK@x*aZ82S???5t;&f0H)#z(Dv| zVj%`HJLi`5*&sc1UELbGA1sr8p#9?0I9GEXTXq>x&wCM*>W8WgA9WCq_ajc*LhtBg zTP)d}f)&o;po#q*0-|1Hnl-(*pQp2eHgnNqQzwQEqFhzA2!_+TAjjJWZyS;C=hJ28 zkeAKNYc6qlLja4eY=EGmcf6^^i!Y+N8M+@p56v1_;#iEcobO|V`d3io{9q|X*35p~ zObq)ei)-QnFuzwB4>w7m`7;DBt;hfO0A|M<*J62MF}gMm!G)uaL-_WaX#cYdwrmQ9 z#GDSO%6N`?JOM1Ji?u~wfo<#e!20hOFw<~4@%aj1o82?0>v@(*GJIt7e2QV@(lSVR zlEWOAUBbcx%~<&32xN~*M6F{wJje5u%ythwyJnLAzneWczS$1tx86YDvGKU>LJT~A zZ6ZKpF?0^?k2Q%~VDkNg5aM_d+X}SN=EYvFAEqUC=u>Zi<6Bu^%xFHqoOodC7J=_Y zbFuZGV%np1#mM|>lHmKZfDK8vJ+-(VuxOEs6Z#R&xLG$6bbLzq% zBf3#ZgburdXzkqumOE`=`m_f0R(FSlf$eN{o3;>G`G}>MKVbvbMM0Z$Pt>&TAP#XH zN*ZXc`MsB{{9+c=<7EX6^28o1Ey1Ezx)nFr|v`b z(ljX6D&p>JDr8*!j`nuHqSgchR+{7qN!ty?j1B1!SWP#5Q(~+LIhdieNAKQyA*s$x z^eQ^eEdA;ID%}glDY;A%b&k5mH*$IC4OrMjS&>IktTyH*TK0N^X1V=gQ-+Zcd^!(J zYpPIXVubczzJc`K_gv+dcdTwhEwkwR4%~NYi3eVi-|fc-yRs{^cYuv{*He959*$<;;#PWpWbGxPIFm-A1CdC0Fy+pkDCO_Dc7OW zhVEZpq#rsCQF~9-6?9@QfJET|%{$X!Ja^I+@wX!dEGndnp40#=KDgUCN0(*9eG(%+uT{6?LHOlBd<$5R$2uLG{F%|hkK z{^+=R5yo9{1~VfC1nyBWf0+pBvuMssddG|hK7!iNDp==|2paPLc~mn{WDevnhiQxc zH@<`S5NGt;dWYpN6ftG99`j2M!EKvLp);25K|(Z3Zg0glJ!0c3?RiBJF*U=pp|FPL z1br9oH?118ALp>Z^djDQ&Ji`?Y2Z@ylDhc%K=hu+p!ygrOFN+h21WILAvwr#JZ4p9vi8|lGkZK(mbC0M@JmET~|EQrLfi0(%5q5XwVC)PQ+oy^hA(%`wP7;{L3|Cq9HKp0=GFI!z1-$iQ!%Vg$swEcb+v$ zC1J9rwRu=pc>&$~Jw;UjPV6&cF*;U#gNWuiAP@M#rQZWVvokW+|GzJI{ZJ@)-|&RA z$|s_HIJlJj(mnjAaATC?_<~5mbxEvn29EMx|VX ztcXt12C^x?MbARFQy~4O#6Gn@Fr9SX0%9A+JH5w-jHl=}cq>}3;B*Jx$2DtA`2_O3 zU%Pq*VEa4x|IIsM-4N6`{5o-S%nw*bePVI*9>W+UE&CroXbX~qujgU3?>7o^bpHnR zux6Z}qAz;vv4N&_o}k#;O-SrJ0UeBWaFvjOH|@2>ARF@a?z8~Mhk(cHjKsNBH>un9 zF$^M4Nyhw3c-XHCyhr>(-dz=&KS@{EsWBCYsgDth^b_a?(leF%uEPEyuV6N5(p^VT zV*5ocJ^4XqDei_W`(o&?p&WVreH`F>2m)+cU_fjXZoQX5^O+rZZR?A2?`n+vuLQ4a z9b*}D$*1`v2J9=N&}}dEs@;FZ)27pX&v`c*Ywv>(o@S!t;!K&-SR1RZz$c8#2 z+QD+%Fu>*o6fT!w_Ry1{bP0zovD!k`OA!mw^P%)F%44OHhU)tt&we5Y=T03l$EF$E zzS1osJcMo8M$D8CMuO9>CpPtW$$Obfwj%~YP#wHBj3nTW=%O>{HzP)k+4WyS{%<1%7hD?h6tMe8d|IDdmT zJsrc`78HVXJ7BHuO|1Ge50-iE$2?{%`i?&U*^i^4bjt)Zkn|Ac7aQ=~Zhi6Y5;Jkz zoToT@ubF5~A5-Jj7KjK>M9Vx&-cH|J-)@w%AKP6_AfI81Rv@p{J`XvWFX6xxePOuK zC&Z4~=oZuoeQ3TA#?$k`a3*f;I~gn;V!+JlG-Nm|N1N7h7z3X$ZFnUPFr-_9`V~}B zZ)@@^(q6Rht5YI>hK>9`h*<4~rqKsMRhJCu0n{b(_5i$0vj%xsE%!aFFVa{7Y2&A& zDKVYAVv|^EWf(}<0!Dvp(RP9t%w8Z7GA12F?|a^$eAvuaO-#Yg6XbJSoz5dt&qLVK zO+2#r7C6=f(F_;HHs9Ed(*I7VGlB|HvVA6Z`!WI(EObySe31NQ+ zp|m}L7v<1RP4N;^=OFA?NQ5VkRzuW+W(@hc6y3^;K>qNNtkB;KdzI{mG4+we+**aU zhnzvj{}?uTY-Arl?ZdSGiy$+WZl0G{GKUY*ylZ?kI4t}Py{Zh+W6>07IUp z?J|^-kDytv8p?aD$J)GfDBp99M;{x9(T5N^r-|TdzmIPokN{q@0w6!;8Y>*M z5RE+#@U5;>ATZmX6+Z~(L!4=+`#gndWb|3IA)e6h`hw*4VyMA$;AVN3=TEGJ+GAHC zv|We6-b;8w+_)5<;ga*PTOd`N~cYw|(&U4wR4?&86_dP4f9;}AE=5t`cU zxXSgG+L<~Y1izJKsOUv|{W1kH zq6WY$;@$N5x(HG{4I$9*0Xi2|qXd8(4+`V;pUj1f|!FHlvS7|vq|p`!|tl!k5)5b+h1p(rd|&119qU!lBdMJ{0goQ&$70> zrzo9s1v?9#Lro`r9&`+4pH(J8g~?tBkA8*2{=I|pcLrSP`wo31w?MI_I|N*AhLo4p z__wdA5I9z3lh>v~*uG{~JNFnQlIKqOsF`OiZpDI$ZCD=i1mta%GT%E&^i6()5f78W z<=-qI{TUTob;Sj{{)5Ek?`WHT8MBONZu))>^T!G}eRDmyKHkgHWOQTk&w|Z2Nk3{@ z2#OJ|T-sho{o~PC`ulva@f(6$XA_rqcr-dn=c7&dZdP8MhXuc9f>W9`3rlmw;p{CI zJXb@ujtg3TGzV;24fDq8h+#|0Auy#4r}yuK!V$V?7WM*E**VO9+b~eP?a3cTkiU9v z6Y<%f<4H>$QM2R+s_sTYlY2gScmKwC>1&L=OWl*l`>_!vMna%Pjx2cWXsGuhmbPc_ ziSpX(ym(y;&xkh_+}wcPfiGCKUmdtD3xtGsJ8{`x3Fx=3CkC3BsB6-%;Ifm^^l#Tw#dJ?nj$r;6 zmb&;3CiksIO~`$jPthoJ>qD93)%RKXn>d~__Y8(bzQ*Sl4TTi7IkPtJfC2jNz%syv z8*U_C*OmUf{5Qb5^)LBG`vs6TMhiWH$Pd3?U$jUr!P3b#h;)w*oPAg3VLcw*8cHyG zMX_41F$3l2mhnM_yHItFGQ3qL!m2(TyR33hnsh-X|7#GW{eBN^CQSj49=)+P{tOZa z5{5dMi56Q*z=5={g)3Dsebxso|6a?SPR--jjL1W}@{wBFJU_RpHWY>inu(TQqG5Ve zHTpTuhUk~iplb%{8P%^qv!2)^0nJzun+chV13-0fH!Gbt2WWIuLBoFp}q90?Fy6wR}$QV2hE!SCMVKem`TvziNej8P>ThZ7)7H#$$f|uV& zjGml;`3EYPmoyhuhhn{@+uEJbtEe8cONjkHCWj`Xjlp7Yk4i$xE*JE9vIXs@KY`KAkhmZ3 zaeK6ZC>irk=6J(GOxYO0PVNmut5FNk>9+(7X*om;$pLKRu-_nYmJbFSzQMYt6U2AV zlqEF#Vf}2n_YVA*CrWRFmenz^zH||lw>?3XC?QWqIzF7^Pg!P=9`W`LVMGmw>6hL?>7~;+O?NL?`F0cK(Jd1t zlWwBI#D-bD%D~8kQsP9D?mg!m`n|bLJx;qYPO=w5651fGC;}TU7NO-nDa?KOg)%PJ zWWH|YAbmz1#w%_3rQhCy^Q*7mwAG2#=)A&&RRd9??~1i|D$%Rw3ugJmm%;Q3goIvb zy`~zq?(AkwMy;f8?UJc&b;OFHw?SE%$%~{tM9B|1^>?jB_aApbqFRV4M&DT7ltsj( zyvVKJ6kxFaMQs20nz%prQJ&_8wrfU##%zeM33G*F{ZN+)7%zegT+*ErY`8|)QM5Pd_P{Vj0Bs^p6J$8$1(=j zGl%X`fTQ=J-jfR$?AQ;D4R0{Zg%`Q}`d4uKZwp@9`Uaf_?&cf49)YSWS{6I`H98J3 z60Be%q-=|40S?J%zDrw>|8+>_xVaa;@HY?(j~qjJ>6avGXn6y~I6Z>T2 z+^8ZvR%;~K-WK7>vp3jl!5iwdQ_9>WTfo=;2{;X#3_YLeiIv-xXgP2-wC?@@SH>6# zs)K*Y?7uQN@boOi1(KKWw^Zh{$N?o0+u1WKT_H2g08^-gqff$TNMC!II0$b*-ZcqB zF5blW^A90#N16KN95eAyzJYK?T}@x>X{?@o3%Y2x=&0Hc9s`mgA<`Q<8&{z+KACQ+ z-5_XS5Zbqy3i+cNx$68=)Eo^5hbQ`Yss1snyGnj+TVmfh8iB^ekEg6At;1CZl+8gn z$D2I;gN8!j)iWS>HJ0Vu)q-WjMv&Z`r?wxu30+FxL!n(KRuvUPFQ5NVR#OEhCmD-X zi!OtDN*y*mE$1DvrI?`D2&%_Zc*6CuFtnS#nEtOw`%VHYw9;_uE{(B>2G_Hi` zuVW!&RW|Dr@&@Wwnu`8lq}nLOixIeN6ZBK zbLjiH4eRDMqU^75>_x1?luN-Vy?k2MuHBC11B{_`p0TL>(-0Txke*kbj(#(5GUfTh zEHGCOa`d0`!^c-+T%|WSByFHxgIK5>la7H=mQ3|y25(yVCsYi-jrNo~F4Xx5F>RF7 zdhvy)kY6oB>k;!UeS%iTS5W?OIPcsxgECx3V#|*_XsY-HN@XQ5ZVu%*$lGna%NyN7 zGg-CWLD)7*TMXW?3nP|K1|8aKOY>e}i{EhKKLkLVqdu;lDG{ny>j=hu%*C2D@-{Q9NiT)uFc+UiybDoWCL+36&j@zS54VqoV= z=C$rSk91U{O=>aEOdE?$a=PhvPlNkc4MevQ8t%HYA7)0;v*vROzkNhUD9}oUjGB2M zAAFxTI^F`U88(n=DB{#qVuLnZ;W`nAK!Rn&IJk(;d)F|k2Z9M7sv*2v1hj_S!hqSk z34wkQ9d=w{tGBdchkOr|YMF~VZO5Tz8}S(eF2XwHQIP+tE32|iKt0on0+W0twtD$PxuSi(XKDp+xbBF zwzp7>$=pVD8uGg;peF4O23zHUT2CTY|J6+}PO%W{7F$4Z;CH6_<)f@vYbJN~kOH_+ zj|DMLEL9O`{^cMxF&5Zlof8=3|biyuD~hGAnq_q;zonb0^;%PNy?EL z_%fwsB9~_*fJvvZ*zq6b({FyFtc#qruU!Jc_b87Vu^ehL&V%W*e_;7$L!o+XBsz`0 z!xM8y)7O~+iDpls_{0q!ydfRs9`m{LkWX0Laf#zjQ&?8KAAP>9h0={d*qmAl?^jSB zHTNiZw;Y8ETYa%><_jEsMO)CA3`NCBD;B(VFO-bX5d$;VOtd*hJ>oxlVz1XvVB^9` zkhyOIgsvf`O;Q2NUOH9n`y(DLd-g?#s#q+a`jJOWI|Y9FQFx7>6$u7S*mNq1o7!lL z35DhOy@#pbe9u^jOR~d(zv>8uZOicCEp5Sj%2ZGcb>|_ekAmaA6R4W*2Fk!}Q5 zsLO3AHWTwFY{uv-OHo>W2Hb8HFuz}CU}6w4g2Q)#YIddA(mTUZXSEXLcm<;?y(lB##pOl= z!QsvdOkVXCV*0B=zSM-tr$$V)n!X=awHXM9W$$sz2{S=5cm*C4v_;9aNi4hn9GsS9 zEFup=b+;xg{KFN^mXIE0zKrh0uW3F3ymnW_u-mz8*!f(X(2)rltEu*tEe2v*lJ0H1Tg(uljq z5ViXqI8DuGid1VB+Hj1zU3^$Y-y7)nlGsFhzvQTR32$}H!u5w9V8WP3m=e+pUACRZ zuvyz!+yGtJSh@x#|KGtJ{Q@#}?_mKg<!y@c7mPTn&@a&mfa!HIVO~eAre0qv%}YVp`ic z+>}()S(2Sl(w1Z#l4_oNQIf;3b7&_LTgV|9hY$%RIYg0>NKzP)3<*=S?v)M_BauqU zNJ%0YhomHVpZDATp58j08A=LYqqrqrr)lkRW2AA+}y&w2FT=YB6*s-UvQ%5!8*l1I2f~5&=^7!?!l_bpg%6%^=Y5E*dOO0Z6t|KJU+)Sh;Z& zQ!Nlo!hi2esvntG7_6{X0_G9>mXvmq~fCf*S=uYr3c>52ARdG68^OqaM=0iKi zY*_`9a(;s1Ml#g#XnucbGB$7PBqPEOqt>T#s4X?+dKH?C3-yi!4CZrT;}?KK+a5?> zc@NXIZ(4|C28fuKpcAdMb@mM*0z&nzZ|P=n=3!@%HTJC={7^D-|h2sW4no?os( z((u25|Duw3bwPRQPg{M)%k@n1;+rr`oYqYCLHLv8y#=n zq8UF4#Akj%*Z=4q$F&fqf1$iA5(0G|hFtU02bAx2lg*5Nhs77Kp+=1X@iYA5<*=7*ZhSEM%=gBUOWwFT z@CelJqYf7>6@-@~#OKCeEKqRn*QrJRO=S;TH$E0Lz3VO{TRW2KfS>1<*I@hxja zJ0^s6pnV8YTDQ<(bPaAYQbMQY7|5EL503r1DBEo{X7r1Kgz?wW;&nXAU6NVRx;Tua zoz?h`+mM=k3fgS~u}5YI4IO`h4s!{*uO*;l!axXp+6n3_W7ycTKosx&kM!pA7^^%5 z^`so4yTuo%&Io3kPgo&ugBN+gn+LMTcT$Ur;O+`e{>B<2GOiWNT3@p_d8~V zYA|&Mw=g=a225wm>GzI)1NwMirp__^c9!lzvc910-@WkVTq^KeX2TdyBQAE}d)EEK zU~u^G1gyg89-!|yBJF#QsH!KjqA7vG)%JH#vM7WIl*fo9+>w~wFyewXZp8YTy4=zi zQk0ccVNz=piYJ~`G(PnNPyc-oCMd<&>4m7Ovc}4kIUsxTgQQBYg0zFjbgr9$VScAk znfC!>U6a{L#s@d~e}V74nw)US3pCp`o%NhebCTJa=n|WNW;rs}#(W3vo*;+p-=FAx zLW?Q;c?A6;3>cg<3__>QfC#P6SU;(Yau(xB&fS&ZsP46_%7W;re#~8(eA& zMmzt-1D7+fWS=%;*GOGI2Y0ek>UFV8&4k7kD==jKQ;3@If%bXm_ra(QYUl020kjiU zcxfRlbK`MBuPEein+#gdpW?=@{djx@^*D09thuQSe8wB#E=`&tcr8Q!xo3bE?+%5t zl$5QV3@IfqNSJg1xPOl(t-q$B-v{bWeoOnN(&N~=Y8MnrKVVJu-)K6Nx*pORNpzpD zDBn8+1D|qGlU0R74kW-r-LGgj!IHFCq@hvFVh9YAqZ#ips;uX-p`CBB{K6n`S>Obb z0U97Un}zB`+5>0CvI4hHL{vQovWK;yS#mFmHsnKwCyVCZMJQjBL&Q&XxY;M8!27@~ zjELKZHReyyrBR6Lkt1;6Rb$Rm>lB#jA;^BL#^7uRjH&*F0gno?a^OAU|0@RkH$DS9 zx0S5GWEYI6>C0`JRg8AVN028Iv-MNyUVOe5=Td4%@6aQ0+6{dM2QESFyOY6oS0Sjz zIWR^Z_#!D{Ttcc$#$`(%=w*kmb>>oYZswSGSe17 z!H&yp@V*-AcBDM!V=~~CZ6;>#HiGkl4h+}pz?QAgAtvQ9xJQpftNM6sZEM8f*CXKl z%vR`m5ezfRjJT|El=tN@n?>sVnOl$y*@6$4;Y2xuI}jtohJcD6rFc+Io#PcWSE_AA zLKWE%=AQ;jKN~WF%9Z%^_8IJ@^YCU-2CTlA0OkXFq2o^%M4WmAW+x6{e)e(XZ5u=R zJh@Qf_!)D3HVDo7zXH+ON32#w1~xbvpjpoVl6sJyfhdR1NqZKUyNqGbW0gJ*Y;eOCXsOlV;hX18H`tA|r&WUB z1ML7b2qELfay85m^oL^dBoCTq|bHOvl#H(h7B4!1-=gX0&-EEVzRp-r(W;|jvS;r#T(jOz>SG)d(&?; zb6U%`HPbo1;a&yKnGCYblc4%}PpDlK0_24b6ZP^xj8)xYYm7~p2N(X}*zQ;`-KIe0 z=szU(^$(mep#}KVwD9$3C*A}S?QvBieoW$R9$Z1W3Z?vw|?JEnq7UlE@7*J292Zm>;) z`>-b0KA7ol?sg2%{5D7x+jYV8PE zA?rZAXv_^V)!}SET!$XYWA+@X#hJQO9+qT3+jA)f9S19L-q9*Zw%-qC)rBaYTnbA= zdQrV&IBWh>hOzA**>S=;xNA;({_ZPq@GxU8k$nM2Y)_z1M-kpHGh$@=nh-4`1xDK|Wm=Q15eBmbN*VAdsc7|?}+cl4c~ z?(HU7f1R~`Eyv2}6C_dZ6T0|mbE-3|*$p%LZ~^^nv1D*E&7uDQuT$etJg~ z7VV{2c_95mc<`YH=VeU$-H#{Z*fFW7tg>ZOvi{(} zX4>U)x(NiH+d)bh_~%&MHw7AJI74rm0hifp3{eg3 z#AUKJ33z`TFD%jIe1qk9VQmZOd@F}Ay;@Y8{DkiFv@2G474K8OhrFC}eGiO19?5GzM|#LOSJp_7b`c-V`JP8!z5i6 zqZ97Ic%B9$8!!OU--lsf{{}SjDFwsRlr34DgwG8PnaSh&F~MWQ&~Ef?ML^nj5}QU~ zl2;9AnH_*6yBE+@Pycp5wu5%YYMk2}iGf>I;Ba3$6aBj{m)S6h=Ed*B?Rl4=edY+P zxtIo-I~QP~AriU1rXss%?gW-S$_Yl|zCzC;`%_85pplp|+#Fr>N3naRm@?YFO6cDG z4kNy5L-;?BQA)FaM(+1fovcY@Ln29FX8?F590g^T85^|jAZd7Rhe0+pkALYGE6LKq zYU*K#Pnixu_fxQPa{^qJ)q|wC9OfCeVeljyR1c?}i;?#r!CnS_Bl^JV%juAs*o}6d zHH4D&_lcZ~5{jY@5+ll)m+lOP6+=&B>qj~dEqutz-j4_QIEVkwzgT5;HO(|>u{~c9 zCy6N^a%&^dcx|lh4N;+?L84rMq_Tw zd$;Kml0f{Si!6UQ)bd)K{nLHarVpA^3#2mUt zz9)(iufeWfhE_#=IFB2zAzI@n6drs^>}prAO&ecA3hf_zUmXLp(>|jp#7kj%rUI2$ z=7FC@I>M)a!NS@S?uv9c#l6SW-`PvO)z8^yU-{fA({9-7sL91f>4Wc)LePoN0+;87 z?9wG?Kx@!ZNTvKU%W*wmJ?Q{$9dZSmtA4O6wxpo%RmzQ#eFPu%LtOE#6`~57!06UA z>@&I_Bb}1Urc;jh2n}7XH|i708mfuc=fh|vImKQ$AxFW#P9W%P!l13o(ZG#zUVQ0u zqMU%@jK^SkggP|DgF$>R3^Gl4) z172h4JA7TUFc2qT;LbWMG+v1hQr^H;8wGgRb>QxCS1_Kl!glBLC{8j`B#ri`xwRiC zXmAC!PJa*|iHCTN$8@e32~mG~u{BSRdRb~w{G>m`yr*}FV|}T2qKz2c8w3K?D^~S% zE2-a=3@Z*Ba-olgq5DB26!GS>+FK5xIA04Rjr)N2(Jkn^x&g|!I75JsEy|6PAhWI# zx~EYt-v(bqMLI5_S<*0H9@uRt5jOWy=0ai~Yx}SmHE18DBkwL+^R&?hTA_J$Fsx2I z3`wm9=zS`R@^EfJ!xIgz?LlA0Y&*@V_c;jy>Yh|j83~a`F5o&D{_-f268sM-;t#OysLg6!JA$ z!lirlnE2w+^lUmBMDr%I=~xA~%}p8A_`BePzlr$c(7be~u~68iBuN`CV~N%?R8hBg zWZGef-LnI=fBVAf+{@Tmmdy4zk3pl`Uud_yFDDqI!-&d4X+6}K35$G0BzCciv~ebk z+21|5{F4r|>=V7if1eK$>z~4~)Aas0u>dA~>C5FV{)zEs&Y&8%7DHzhP?zOEtl|HS zdwO)ZwvUwQy4Mj}E~jAfp0^-x=M;AP^pThUS*ZA{8~*G!X1sjwf|()#lxvS*toa8v z%-V!Y{j8?n%|LXyZNP-yms1CcIXb+Eha`r9n#dF=T%1i>+%ABm=rVXoT`+o$KI0^$ z&s=#lE1z#elFz1NTC_GN`Fw;}9(arovb31c_tB_+CL^Iv6TtWF3GDgV7Xm|PqV4u* z$Vjfics*TcUZlf(-TeVv$9-X^_o={;%onKaPkXC*hj3?tK9e@m9?Hj9LgKEofH`-t z@>Vk1${GLIAUG7y-u@nmr8}wj*!Vc>*zgvdYA6%s*jCDnzQ~Gne!xJ( zKWO<~PMrsJ#Cs*(B|g6cQN3zXy|5NSPF=&L6$ilbmoAgOxE$p}4uVrg3?w|KS-A8V zw&ryXO`{-L62V0X&8L?X>s3SMB(T8uK zm=7c~a5bv3ZAne20KGQbgV*o~+J_rM>Z%^1tB}sn+>Dsygy*2(r=jeuK5U?8jB%dX zAe8^aj7jgnJ?*)O;(l>DOv<}1f4TRnol)bL=0t5DpCJtVoP-9h^@PxG{Q+JDg z-=6(Yh`#-+a2@xDC@z~YVaIM0v+pjXb?#=!&3{F#wjV)%xC#I7094Lug3N#3gNsH9 zD=Kdgx>RKnk2iGpY~;QRH(q;q+&!s`fbHt)wpvEMN8 z^Ey!PS%Q8>Jg}Ow1YD}_v)ktxQCCYhD9${`*Z~vJvfphqIHZL3m-BE)RyTCeOpVP8 zXO#Mkq;t)yXk_pZW_Xvo|2#CR0e~d?#U_89c1ybH0?<5Hvj# zTo&hGBkhGGeyYOl^!>Haz7rMAl-2k08ro@O2ul{tMfIo|Y>@W@^nRNR!aXL;9NLM` zSx>vAPQBCtRE*v_Q&4;URqVaQyr;!ZozVWzi{rWH~X7%N2U8(cf z)q&N>;c>H;=`pSA_W>qTPgUPo%wPYM=8HeD;Y~6O)m%f}uPagX{S)X+pnSM~CM2cz z8yp*S4B^x>6sN3&mfy$G|I1I5jyp^Qzq}w~ToCo53n6P(4?ei_3GyQ-SLxdslpMW} zrNL(~rD+`)o$sc(yEi06_YpV`r~8)fpNbp1PGd^T6-?OMh(`OTf@sNT;^dl*W>XuG zFJ@_m^ge{7cjLTzCGrzxP&v7Uc7HFk8D;r2v+4q?>nJZM_$CpJ8wRsKXQ7=<4eqA1 zQiq`%C^vj5wBLM*4|ec5)xLe~!O$ae4=~Ph>@;%je`C?Y*K#@81szIi&m_`g>idOIAbqv z^*V^Jn=~2x`2_`cHF5Q}W7v^r$`w)OoSnN7aj+N<=9W#g3rxScl_iQAMHB{GFU9sn zx?JtOX7E4UMR_8zAktO}RV#Ic2I-nil+8B0^+=O*n6U)E>*{iLH@g+hnJduazcC;j zTMz2PWhhi8g8cPB;(Dc*gpbtX94;-TbB8#{Id%n9e;L4XYkkJWL`3AT{v!Mp2S~YH z7qt0HFeoUNe52iT`#mOHYh5pWRxjpyg`CE*clDY3Pjr~aBXya~ip?N@%fq>UC^s|y z3*>+p8tV(NI;aZ0Bp2a!F5PjTq2G(faw2xKN2ldlP`H$Z6sV$XxK}8qy%MvlL2S03 z99C4xAh60AL?xl~T zxrx8}Fsp}FV#?McR<)u7*U^5Cr}0}DvVr>k;(ma3A3E<#H((uqH-Y8pT+E$J?|dp_ z=4sI-P+7hs5d*0|X!TE+i(1^V(`jJq^AfxT3A7z_2(#Yq1r0MTqWRc_6M4)c;(PxR zKHc}@iAN9=T|?ZD>%y0rF%bRc7ZhH+N(8GaLoPzj{cOtYAIu-*>o;; zyxq-t!~-yGe-3sd!`uX3-$~;7JP0~;nAp7Eg(F%@VMt>K_*wEO-*5#vaz=qJ74+{} z>l&)wZDo1KJ6VAeiFSAfI_<54=DQ!!>P8$5eqG4J1Q&#HXSf zRSVWC_<<%|&;ZH^lB{4ei;hFk#P6i#Ycd3M9m1&4J;bHU9U^k~LsQdd42UgaEp|@^ zus#D`pDjUkI)@bSP7&!ceJ;mz6G+jN?fj}kncItnV)klohxJ!5yVc9;C<{J^nSMrn^_&$J?#w&HL6f>b2{)I|3^Hcud(iL_~^i!g9W2(F(rf#0ed`%Q_Ef$ zXhFH>JC8yzeOC)DuR^dpWf%zE*tFRGII75y3CO)pN=>f90ry9sQDZ^a#m3y{a04!S z>QTJqXu?_D?gu6dk3-|>=_oSnL8JNloKa^hnA1DIbe}F`>wFnI)O_x0IFAc!q#mbu zC92=$LXX91kp7ql;{8%#;k*_ciAl+w2u?F z2FxQw^sR#8HYIB*p+_2&o2h!3= zC~eMwKE9@maAFK3hVKW-=&h`$*FlI~l#h)sL>Ox(CEYu}VSxH0+~()P3Jqh3h%dZ}5|^jY()<7(2wQ2ddx=n}O-;oA)+$6(1Rx!y z$Czdm;fyNk0L+eq&3Vb5U7@7uhVc=nN@Xy0Cx zepF&8=?3xkcMuw!2x`ZdH1B4M+Xtzj{IB0w+w>F6dme+uIW64tO^>V0V=?n>8^|YU zv04w_VCefr(DPt7BrlIf@2L_jsMTeH4G=SiBjBq&SmR@a7MHK#-xGBi-nxwdkL`h< z7RUORe4^*-@n{+M5@QRuz?1Q&%$LMtn410)|GsC;@%~#(gv!Ine`^DFUIP?n<+DlS zkj>CzYlc}{pQ7Yeh$7gg1|*9bgt=3iAW*OfMNZwqo;Nq)_HXL1UGW}66LPTG+8f2y zodkc4w7#}ztWg9R;`T=C*= zZ2fA=#rEqZP5ThUC6!?M>k+-n(;k?c1U#I#pi7OCB+{9?-S$I5QNFVR9cd@Xy9zSp z8^G?>drTR1kd=lkW>a*|K&%Gs8q2#;dT29AZhMJNo8F;1pdY5rYr~k=W*{SvNZHU2 z$O`zJ#=KZIQ;Uz`<*&e{)}5$cRj@&8Wvut~YZ#ip1FP%r!mlb5CL&)8P8={~v;+p6 zvOAgfq#t8(%sE_B)(BA_o1m|Q9^FBH&0T%v1=b&b4IPmQ=o=J^>cbxF*Jatz_`C&8 zCh2pbrzT#{2#Yh4$tUYCIF(pK!_ zXvme^)5pLIbsuu!NA=d#Zpm@)^Q`Ank6A*^(}NhD3RLP6(XNbmFn$+*c7e$;@Q zokLmMgC3LMmW3ECy$(tPdgq3_P<$>IGc!y;e7YZ7dDt7z@~EfU=MwbV(D~w(zPX{R zVqn)EQ*Ki5c`TgzkX07;g05aY_`w5|OZN%ieKOz%6jOfGn=DMU{|ZL;)foQb61WT> z3V){e+xT{fF&N%;^bVo>(C zim0`MiKO2%sNP8DTZ6Y^V`({*%%Xm2(@lBty8mI;a9t)<9ShCA)hKRKu&L1|Ow)P; zPJEsd3dV+G1od(kzeNyD3Rl>@$zbWN07cofOZuQ0%|6ty-rc{@z1x_vI5G>>E~g-W zPBAJx-%>suof8)}fPCH%P@5Nn#P$Wr$6p?S888Kt)m&1sqa{qxs*}hmx8~d`CN5F z5#^=jfp-s&GuuoZDg|k9q2UsSRo;SJ^A#*R!jv=k=Q`ScHeuXdGN5yGI%|~n0G2J& zK%LMm3}|;?Ltf?68G9UHTq zY?sj)xG7`twHiXc(j0R~7`shdlS#R#MxS{{NzM2;Y_YzEcV_nC-mIkkXNfDiT%H3# z>z9H&vsMu}{3oj77v>2^m!OPQK}8Gg5WJT`5c?g?GJ4o{=Mxy}{T_Nx=3{wxIMy%D z2LFO{I4_9Ktd9N5n$0=Kx{eG+b)Q~ze|ZecE0&{s*#!(<^#mn!?)!PH2@}wEmk9o_ zARm2DQ5?f(1QQBamq);g{?EB9N+S_x9jI$*0311U7}OJ-AtykOOCkB7|CbK8vCV*) zWPO;PCvM_J3loMnc{2&}(_yxU(2Uspw=}P24xxH<4;B*!o`I3zZkIyDt2UDV-=lWV zrhTGd%4S&ogSG1sq4|<)G`~^Kb~Z1dY)85mM^k3{mUu`!5rvlYoUZYgDReHXhUFF1 zH`X?TNEeJG!4CBhaeOrf@F;_A?`Xms)k|*CJErH*a$KQz6G{SHP)DfGy$zw6?+3+D z?A!!eo9b{&gf`b@O|vyiZn*^=nTQTkcf*iLGH8+OQkH}_SX`Y6@~lH7;OthClN}5> zIu^hbX>qzD9md+(9z!l(Kr{7d>@hxxM_3Np(tHCT`HLUsA zilM0zux>w(B@G9_xr)wEhDcH6?1mOo>AiPByl~kN%5_-QMVjs7Nq!Rt@j0`1Q$(EZL8BAS0cuX2JGidrR#FaI7y@x4VLo}f}>S{(-7$*&}+Hv)G$ z_Tyyz^B`v1a){2lg%{$ELUn2e@U2zEl5$;rUC-mB=eU^ zo=Ik9n=-Lm9m`y2qTTcYh^aBml}sRMNXmfpANuA+yyy z1GbxXLd>Cs7}INjU1MH>=Woh|sv8Hfmp`zHlWBK-dl9rQH-Z?~IH=t6j(|61(=JTL z6+AkJ*}awi_czFVy9lDk_rRRyzKpA7J=RY+kLOPsFtU^+j4RjVY+iO^-IF#9)y@Ft z6S=5tXam;~p``wu24{QyEp}eJ1m%`9=>5u(^;=emqD|*0yCe!H|Z#Tad)_Brg%DdylqnCPlbEr4K@DA(Ixd<(?GSJHY4oF-Fqr2A$ zlH=iuPSfL|Cx02%_5XzR-Fu;VXCJ0@^mOVdE6lrKqy{_IkkKA}jCNKMar)Tj=km5EFEiG0k&pScv zB^f!WqsysC6kB$n9(|i~(V)*ylsHzpt(du&^37OKxou=6flUhc6ba^UdjPHPD$xDQ zWw6pc3xT;+G#}Nf$XR(2>^}da*go+$Ta!cmzp6-Kp`#EJ^6!G*T?TEi9`NyQoN?8N z+qT4%ljOgq=ecl@yPOu5nV$sZ;8`sHXgN`B(-A5UtYo9+-C#W~*TNj?GB22Oft{_6 z!z7k=RrdVGQ_^OTWXbZ*KkCcaJ^u@9{Q5ztM=)MK_6-#+e1>=8n{fKS+0azp0#-vp zzyYJMUGF=j)ZB*nwk=>cv;uef&^ffP5p%~ykBRnw2a`q?KyWnmq;>3|nWML?Tvvnf zkrp98{1CaU{EhQyU&hl_0kn{ZaXq?>g=G|eq)v;Z55qwA=sTr&VRHx@N|Y=x;0pJUHBBhHJ=hpj%XXt}WxC6T*G*ueW3ab`4_^gD>EXG4YE z|I)m2iUoG%Q66rnK;dMoi9Tz(N%-8;PLM)dS|kWTTJ3**5LV=@^vY% zC8xXtW@ym45Xm84qPgHSU4Q{|{t(geEyD1m0&p+y%LJ3*sNVIJRsH*(eDBa?YL;nm zqD=|3PyUk?F*WQ<>S*^JaSP-pGhns`qLJDPiZe`^Zt5m6r#V&8l$Ao*NOvq@7TEOZ zHkeJ;W0EBm@T^LM3urWEl>6zdBYFXX;40W=eZ}5sAwt}R8hA`&oCU`Ka5w&luL3!3`MaIEI2#KS-)wWwO z$c26n{r{lThg}#(%lq+R?x?es$9NB(f?RQW3r^)#2d`VYkSZ3pSd z7T~X%1g$4DA*jy)^l%fS;YE61cKJo@57Mr7=QHACD#C$Jv^j&2qv&8Rfe_<-x(jrm zdEHW0%b4=>LXHZpXqSI$f-ZBUs|`;zy~Cv%4Jb&x&f4_>aLLo;RGS@0z!B=Ao^~GU z^Rr>uhD(t9?-q!1abh)A_7%R-;xnPD@xcGevhmjiz*`oEk>}3hh4{k|RQU~LcNwrV z)&g7I3y^!^qayRBCd?zhKpnFm#haJ10b?AAdW9!RxxWV$%QB&roi@GZ7K%&^Sn*e9h0AOovMiww*P2Xu2_4tbb>SyCwmJa||7(F%<^wc`xnNj? zlr0y0M(I_0jy%?nX1oG~jgNm*cH?D$fdBCSy~^6OZ(z3W1J2Is#msC==olQ2mRC4b zjERF+?poZWkHu*9s)k^y7)8cX;v}LRpOH=AJf8Y!3TlYUyW6Ff%Z2?LA)!F zsHfT!k^2v@_!NV$soz#+3r8AqD zJ@6r2J6B*qsSQ|+N`Xm7??KScLZVtXlsajLp{UPJR?(3Kp7Z2rIjIOLBi4g@8J&@v zG(hM=0Wvz8+`_uGP`j!fJ1*1RW85m-Mp>l6g@iiO8WjPH8i=Xv8zgq-fI5F2n{|kK zo%9`W&<5I7{w&E+TH^EN$(FZhEow?^W+Yld9Qmpm*PdlJeb&SP@{ z4~%X*Lm7JoCS~{G94a=VQCtuNEINslk3dAqS#C{N-azy}zp*_z6c@&=l=TR~(q8l@Y*VVI~7mcF3uiv?2@!B6&J)}F_3YD55dPJM%=KPhWf zaS)xqzo2}}Ay`;hPRa#mFy-uN)+1;N-O+ErNuP3{%bd@oj2{jf^p4{Z{fJd;Nkr%H zvk2x_D7!Ts?8a@%QyVDQ5_vTynJXZ2Q#480^A$T{Uto&cQ&RonH|?Gdgs+xGSU2D^ z^alN;Iav$1Fue{v>~=!Hz9D!y{u8(+Z(@_D$0Fa9dSz_ZDAYN#h)(1M%sG|}8aIXu zJ)?hMz>Qf@o^%PVmY#u>H`_qfn~;|X`yl^bGDK)h!RZcigvs>paOwzFq>94E!U>?x zwg=VqPN90x7%a<7!&r+F!uQ^Tsg7M(f1;XlxuUUf)K?7u=NJ0%DzVQT+KJwE8kBJ| z7N!J3Ssz^{DC`i;RnW%eNn`NxNTFx!JdFMCF6~sEaN|+7Z_}R&2o=48u=1G@lv_Yd z&2D1Ms4>X9N%>)4{y|Mfo2d+52=42WSd~))Wkp+I`tb3vY!sgpzeyGrXI8?}!XB_} zrti`GZWwp-8-)5S!k!aLpyn)fuMPT%7FrgVa%Txd#P)?_OJl%KS_VnF9#HzL6f?YN zFM;MYRr;Zzar}r-+;CqZvyFnW(-MIxlaXk zwQncZYiTa)p$eR6Ui<7vea^3B5$@I2L+% zkn5~+tMSPPhrxZg*{?HD-e|z4O`(0_n`hCWAdb4dwIKe>9Ml+aT46@t`|ZLaR4INd zLXIdw9_ENX$__lmy~3~_$~kaxLQyi!W7OmWY?7jOToN`p+y>Q$bj9-}l-=X=iLHJ8 z66?SA0Mysuc8%Q-B7O);-$EF7`57!bW`f#N`0(so9|nim18-fOg17nyNmA)>Py5E9 z_(vJBjH!Xl%qb8s{xB<^rOEmo;&ZL$%fWkE6Fqk%kk*sEn0q%s5j5pGX$~@FDiarh zdgWoFyb{goh*&h+yb{aKvnanY7`2DYL4*Ds7(J6_$HEZ1@>}S+{wu_<9*HA1Uqx8H z9%E-cC*uC=$-zDi=uq+i=T9?c`dIL}hLE$!3s{W%nzT6E9kCctn+^Oiovf-|3}>=+ z7z4*dICz)tZmQ@ErZJCf?0=5V=)*|6)n_>HvIvL7(fu0x1zLS(!n`AoL1=T5vLUZP z?)sH^xW^5(c3el*h`q#Yk~#Y8M`8WvkEl9)4r(47Gl9Ay*tDB=@#9Xh8ISVNgSm=I z-g#oQ%>ZJ4UdJxOjevcPu*LB>RAwZwyyrq9T2Fs}w974uddj-Sf50H?BBGU`%U#$@ zf3DVLLV43ZBKh~2oAgiwE5A*DwsfYd*1CmfI%r0sU^ULr{7w6_r_syj6mIhFLFvG; z)HT3CSaJ^8RGy8AmZf0VY{e6Rv;X3-aB&|dj>11|$ z>t;-uNb}iGq9A;k5>L&f&NCwoC`e03%M=rC_U#xf)-M9VH9n3Qm<;|cQ6QOSM~1vb zOzvMm&rAo{u+=Atvj1KZ(>@8uo=HS+`zi?7NqZb+unCH{72tO5UufrhO_<4yMajn-@UD?&uKue9o7o+Z z%E6L5lvvnzX8p2r-T0%1WT1!7kE8l7gDQh&=L@Cwysv~&w#;KeZb`XUdyUN)i!<-a=$8^BIxNK(RI zvgzL2py?>>%t>Wvcd}Jc7&#f&(|Ne`>PyyQxgX`)abP!4LNX{nJ>K{hh@Tx($Oj%L zDa!L~(1DwzGV(2P*((R53_irycR=zL6VC2rwQ%eR`W@7pPjdfO3v;UNp~pvruXgYl z@hc8Q*DA=6!ea27rljA&&8&qnk9o7ji25r3A*pjqVX;h?nxf?;OWw)FIk^dLDb?xbFBj9Ycmzw@98q)Nrqg&Uh3!$Tufc=_tD_WPiS1e z0Tb8#fYip%kisiQiME_I`>>A8rM)LXWD}Y6*HIL;(EMEpy^E%4g0lBtqWr~UOE%QP z^M@u(P3nDgSsTUXuey$&{^zm5q7Q@*eSxZx)K&ZD54p8fgOP-L6Y&Bqf?^qEIc|il zZGAb>X>E2y`vVMLnvAN~2jPhCQB?8$A*dvc^08xRuk#en@zAE6lwt5Bn9l_`pP7x0)%Lk+p!{_XfLUxyPN6ewV~J4)9kv%%{=!lS1~ zoLJu$N^C}g1V&LO>t2QLu@@Nb-2wuBFDPU)1K~l=txS3{PZ^XH&2)InlKWc z`ker0y?T@|0uuBhhS-@{u~ygX!1pg5hUZ$Um|gc4Y@5GeM1R_&Gn)=vvlin$c^6oX zrk+m9Mr!tSL$f7g@Hus2IwkzX$_dw5OTl$$pxphc4qe9cx*W6%r=etVk5K!Cl>`${>~Th(4qce%0`M>{tF}4 zPe8}h)li_9j3HBMQ9#!VU60;lYWQKaT&RScpTpq-mSJdI3^w<6qTp2;RNtj;5ua3| zuAT|6^$nS5IvcT@{e={~wZ@pqD)bJ`!ekN}u!k#^)3#OHuXP2;^H1D@tN^fZZpBA|)MJt-=c6Hz>ff zR|yLxv>QD?gw3YUPgA-KqFV2E}2MvX5o=P#1jjt6T`%fn?IL zSm3Yx#m0U+#0JfIMWh}=gfq4@TdKv0W}6DlDwctGfi`8k_yBKZE0I2ZMud*{pd{ou zCMFkyX0~#W2uF-KG#$40juy-_$I&H{#WxE3ZKt4!@ zH#$q$VAi|G97{ZpLe+E1M@#F0fSYuiwR{+3Ja%g zgut6ZY+82}nlG6^{TM@LqaqTV9&ZQflxDK<@c$?}^SGMx?~iw-#cdZd$&%(;(qu{B z>b}p%2w5(Zr6Eg}5XN#XX|g0Gi6oJfBtn+tT9TXlJ|9YwxsoM`Tq#K;ktCA*&hPJe z%+%EF{XXy4dA**mD%AVg10^9%ng*Yqc)ctEeG=%bK7F1=SsX|6Hxf**7MaUI7bv9O zMQ8abh`LZfzPm!S|1lMo(*3(uLhPEF&EVc+0e7Fhl|?pB$Lo0!q}9^7{<|fL26+rHhF6h@P{uON@@+AZ_9rFglSBV;)%u?qfF6 zo^1w>%m`O@_r@7YrG2>67kaAGo6+>#38_8bD0PargvBtw*CF@#y~V!@?` z(6Tszd!GEo&$Z}@(#kj}{xBDJVmpMiJ%^m{>ox9U7qLZeXqVMfhqcq_uDII7WT67r z3$PNaKM+5tyAIucO1O3D1xTG(i|qsVLYG`)Au_2Lyr#RO`NvLT^ZW#`S=Svi4iiJ; zR61nUwSiubv#@VG?H3ATFn-5<`RrvsAcbZI*~&MXofZw)T>cQ_#U&uwaFK*)xvfn0_6Foqtt=)3ONcVA)X!UON@6{>(+A-*=+zu4=qN ze$3{%k8wn1A_lFxM!b?o+~+q(OjbNW+5Bjxi>J;hgKCWVH=DYn_F}v$2h{g7X3=#7D94UN`^B3<8xqO}%{LLsw$4D?G4;6N zWIHtakw@6Hn738jz?%KWVyTuiiSK>6)uA?&bX=fOZ!OpO&NRX0OUEGmpj_Qncdm4< z0hKG|O+($e->Mg^Vw(tFI}c#}+Eh#rumP3VKTsp-2?qU$1GF_AEJvn7)Vrs|9RI;{ zPPEF^iL>}TWefNZd4)qmGO6>-6czu)aOYn>kkOB{jDt_0dZq?q-EQNZ0YAWzQ=j0` zH<;u44Gh-Zg-xr|F?&TVYrxZAcSjG9zDLsSR`3+{Lh}0&+bQHHxLxvO@u`0@ zy$n+g_8tSN{cb}4(T^ayO9R;LKgdgI2Nmt#AEq5O6oX9aAm!RgbQohKreE=grR00G zpRT|M$1TOvE{1}l$2R6^`vrkkEdml3Lf-v+R^(LojS$M@q9-C??D(Ak{Sl8!c!bs|HnODxND?Olp0`bzuak+A@ zlV)d0Eq?oGEX1m}fYyEw78U9VE+dD5jm%8U8}@+B-Vh5jTV6rOL8fBK*jG$jq%ZpK zy#~$}PoXXREOd=Kh+Vh6#^BAKs2fIp$cwj_GWG~YEPMpoO&55)&cnmCls=15GvOkg z(^mW!xU{!`-==(+xBm#pMx@JwuMDPMigVmIC7WhM@_c;S;o%!+i{2|_pm-LCF84a3 z^Z_=cwsCFFMeNW^UsR_YVG4V5-V{LppJ7Q* zcEua^jl2U+iIk=Pl?&rT4TR!nr5NJ*3hxuEI7QLI{O+%Tl((j+-b3B5&3T|)^I9WY zmaJ*Ah=yXN3erbk1NHeUlnr_;kBd5rqr+~ZMtKY3&mjhE$>GrGjjH?GSzAyIXwKY5 zyFv#H9%c_K-LB*GjDw)Jp)Y^?_5-+VO2yM@M^HW0o=Jxu;VD-pV&PWmk{NC$WVmOd z|I9pMvUVh1>P3wA?xu0kD}g>a5m>Rk5QlUb@pnT_Uo^r|> z?DbYadB8!Kcku`DM?PyBylt7{crNd=cRMV-Y9;F2Rq~^cq(V-t5(k-^3toq(!G>na z+-ecE%kp^3fWgpbO$3&8+KHo|9D%adV;J_#0z$uiLcjBAJpKAVOgnD?B(G_ zd??Y>9M}VHF8~^d(-J+po_qOkq|RLWTAADAS#N0u>0u!ce|-@WzfA$3^8OI^`ziR@ z-$)FaZ~^?Laop8KYw~&Vl*yz?x)D3$ zEY!?Lbf5Z}IRrgJ`P6uveWevgXKYN3hxZzktDwC(u|Y!fbW`&9~gc1$XoX$89q()&CIsu+yx$J9WKxr<`WpMHciu z99$s>pDF`j-=sop^L-1l|ILCLqgK8=@+~&~Zi&v{hy~EG7Tbcif_Jwb7~i@cmBp0X z3M7``j4e2HH791;X^-oX_bJPo!)0PG9^WyQRh^y>568ZsfBpzE>OaHyT~!c#upArC zhO+#zF%Yt824(zHKyB5J6?<+%|7EUFJi$Ut{;&-*oL-`v(Qf8t91j7XDI>S)zK7e_ z5{ygDfK%l4Q+NIg%g;1`&$lMTxGabsPpsj`|C9Trbz>?M;x3vkfFk>JY+bz#)lX-j zzF7;jcQ}moR>bw{wty*!V?$l7#8Q5ODf^EyzqQxMM`n!D)sOhps~6EavJvXWQeX7O zccA_+591fQ;K(HgLh%27N7>i=^4y7MSoZjz*!5d8HXRs@8(uGkivi>_j-dVh^b}}x z`wzV9FJSn>GOn1M%#$8=6eP2BJb6?hVt6;GjZBC7UpH`Rx1A`_mCLR3HN5O`9DSy2 zp{X{8@(KocqUTA<@e?y7pO`X^)SW$~#Uu4Z8TeRdvvfg*LwnR?kxeu0txZ|WkTaMU z>%TQFsR&vF|FodFgu;Jfz z7`lP{DpxuR&Ryz2bD#_?Cs~TR59_gX`UN<)+6K&D{>1Lxm00}jU-(6yp!6+P;I>D? z1NP~Oq4drRIB}fC&nIo`+(sC$sfIPfj$vTQFI?ZvKu`-3LEepa9OS3cKK+lciQk6a z^9_W6+Sge6xeW9^Mq$^&mr#9<_7LuIn!HDTpcs=tEVYp!TdVevUb_R414zfMyN#19 z!_d6d0hqH#AD!Dm49v zX8F))Hm}cDSWsmP+8ftl9D9U5U?L8Uc!kbAQ&6M3i_38prp#Q4$45+r?> z!_?LHVA^6s(dKV&@LYBeLszRYW%6_C1RjVZ{%!}Cild-BvjFPjPSWrGG$dC<;POoa zK~XeX-gLDilvIv^m{K*W-;x*7X&+|ozKqIOx8xzeZv&;{DP~ho=ku%-jls0fpnI_p zMvr`f?F(pLUS`WS#Q1~%kWEdt>m43O$1X{3t@!uDU2<(#rOf;Kr+IYNegCzoohS#xISSo z(+tJ5aC)B~G6YrGR!G@!2NUO<$GYiHVW!y%sM>M~?WXx+jZGS3Awca)WOmOuPOtV7SkEFkwkP=e2)bca9tOPzpt9dV;%?Mp{6y-V>9Y}<=`ET+s0}wr*J7-u00vmnJLd3eR$@d< zzdd@y#z?|%%t&-U{Sj*3dZP89MWCciYQfC?82)z!c-ekLa~l(^jY}ZjOa`jPy+9Yb z2ils|P~ue1!rUF{J*r{)2KwSuI0w=_A0gli?YW=cK+X7S9JlH)%=REndf^XA2>3+ZU!k?g}bD++#76 zzpR*bj+ks|JYc8}a)+7Y^|oYCPae$lf~8{o%J1M#V4#9it?10FSis0aT+^_Ra`gMr ze4Q`a759LMZoBZr{KpuuxQV6r{ttJq_>3}}adPcLi4fdfb>9TaHSkbd|)2$ zKkfl44CXNdsV%xp+ln*wR)TRqGg0?@5N=vZ`g*P}b;ZPDRA3lHw+w;Yp{JNS;W&@b zGZ$2MU$PRXU=y@#s&Fj$@fmGq97d@&hmBl15N!KpVvSnlsvmPPzLy^_DIEf? zSE8|3#ZB0t)`9v!B3}?DqqAr;emb=ih915GuBnu_w0H+azD-#F+j;5+wGeeZyI|7^ z7to7H=XQ@1xb-qUF~uYY``qXxDt`MZZ#X3bpPS!6@7rO(eRZhFS*vk7?LvQtx0$WE zp&-qte|sM@^p+5R1&R0e?Jk6$-hz1h0F>*JVf3KOXj48FN}^vf?TaK=h^*cc69iBs9NJiMlb? za#ceZw(g0AjNiII{X{d-we>NbITBoZAp>sLS_-XGD?mES7$wbrFr9fB^L;QKJzrE& z*4h;OPOj$R!OeVXi5f!`Zy*=Lpv7PU7zOH!Heic7uUw5kn+TtytVCzm6PPgkFv>>B zA(fcL;TXu_EX_GD<3atGorl{G6VblDu2!Tr&Wv=(g1=3iYmrVqpo1uyG|vKu5}#kOxkkPGdlB-Zi^uI%|OTrr5#62Gb%3x%XRqy zET!u;-hRhetWk!c>+d31?YIozcC}*AQ!}yTJIz8yoze5v3*M&d1cIS~pm_3LE;+Uk zH6vO;vh*&O%{PHMkC|X?`4t@BYax8>T0GNU0X{EjKGb_Fm(Uqy>Z~D7E4?*-&&NdP z67bZ0<+bx~U}&=r{q(o+fa(UG6=)$OQg>hJ$DfcKAAx=!H()8e#sRc%QJB1u zA+MRikXxA2F&)c~6w*9dbZwG64pO`Z!0dH<@Z0zraO|Ov`pZ9qvi5Jy>XQ~i@{AVz z_Fo-a?sN~8bJn{XL?nXiy#GM$d4Mg*KM0W}O`r=@G4FfyuJ6%>+p0w3yc}n;dl5XZ z<3#dDKjKcV`KbK4Q=|Cc3TJK-qwZ(hwYKN$Pr0w7~ zUh-UH^F;@4f}jeXuwu73%{m}Wy+>@K+8%|ukv{-RjpgTD6E)zrTf9V@$|S&FfsYx<%w zskRbkAHN0pE;~`vVK2lx&gON4EotsMiY_0^FuqX7OJ)V5vimI_Gx|RIkMp5!f`i1k zdx@1PhUCl518rhIke#sAOqm;xilot+kflawom$C)CO1RkC?{BuK>X#9LV4`AnNTop zHq3o*B-G95NFKe()R)#5TMP$6dZY|I6Q`q8+z!Q3GulxH!o2()xHMafAx-0elP<8x z@I16VZ~_Ik%kvT+K-7*57;=t0$%UDqDE!QRZ;^^_hfJB$^ayXTxyqa!;%VlQuqvl{ z=w7@Imjqh~eyX`VFOy-@D$+)7Bw4RdpF(mx`4X!t(;PuN^qT7fEy!g!(ICkVXWGsxsi7zFh+^__fICd7Di~A5C z{Szope&TDh8Zm5KJc}b8Lh9E^FfSbp4WD{(Npp!j()t#t&*^c+(Vfhv?Go=x-sXbP zonXF@`WO?RfG)fqqwZ3t`_4enAEYO={rxA@sPu&XZsc3pJ|3G_pG0?Jm`R_yVIX;p zbN+1RBTXgH6!{Guwow=N&3>5jU>NvmH}mQN#4+m~1G-mD9wQ%0!AWumBAo_;y0sib zU!8&k`rX=HPQ*ZW3($^i#|1B>7;KZyij((Y{F%iV-ESJ@qs!##@EjI#_$G`zpa)Xx zlb{?{s>!PL18d*j-22{QRCzwP@rR1Id2OUiAzd zd~3j}{3YF)RoLIeQVf0`LA%f}cunt}@H^)Ky8(19h=H)LhuKK%0N%e&W6A$^V)BRm zu%VIe)-w&vXhR`HJ1j!;)P8h6jlk3T2f;ex9bdPZ^u3N6jg6A_8W-G{W@-f%Um#Xe zzY=D>WDLaba0S;-8KBcsuzIZu{4C}|_r-7E+qcC~u!3fpE#$MiHV$NZgRv$#631OD z1iuCeSkIO-Nk$ouP`tvG+u9*on$LYQ1a#g|%dL)n!epyw(9roU+TU7*`J;Y9;>%&^ zD>cK*ah78JLQWpKm9X|6&BeQ1QPo_^3_39M3O@)TQwlJAMj5fcu3_RxL*c-mMq;yX z0Y;P@!9{ouvlb1(Cik^yp71A{_dE&C4vjp+;{m#TGZX_dqM5ndI0!s+0n;zc;(d;E z67rXsi!CE9P|+GAA33rUPG9v2MsFtmVV<#=`I*RUe#C&=!Wp<<*3Jk9(CI-Nx9)np-$2U72LNpQh$X9XOiH(DPwFTq;q4>()H9Ey@MS#jjMRuntRxT!5C3C%MsWQ!&W@ zJci7oH}8iqkYy!lGDjq!*7Fw=9Vi<#BZ?ISKZjr2>F;`$1&-)hg(;V9u+88q=Izw+ zjN~2k^N+*K{yR|S(1F|CD8eVpItV2zK5|t`C2yKG5PYl5K<~LFq=#36pYbDJ7`GjB zTWmmj$Qh#(vYAs&1U!We_`ZXl5TN{o|0Z=3wGH`v)5R00Od@M|;2tbX9Ql z`;yN5`bDs!>-Qn%{yvCwod!Nhb6{R_5#~)RU=9J5Q1jO%*0608YX}_4^~T!E;mjk9 z==}gB-N#%j|9&3a{?`rFhi+s3zm!LOveqNOO2y23(fQl_Oyg|+fd@z|A!orS=6lot z_5MC9pKWp-+PvEFI(5~gH+u1;lgse=HDb6H!O zF7xg~QvMq3Z*T=e=Dot41#jeaJw`#up*^^g^1rp4FG50Q3^0@_tJA-DHIg;bFjTS>l|ATogYrc2O2c!`^X4L`TtYVs|tujDyj;Qqkq|?>PR9h0vjYXL012 zV{AWV&Q5-@5^eQPVx0R9)Ro3T>Wddp^KKxPY$E^v&MuhMzYJ&g+>BmF3m|elhk1Qo zVTjd3I1zCMG-E%2&#es**~b{+nKShI@ExqDyh1;ju~=4j7^iN&1UBzIar*S_IO#89 zMeOc}s!}!h{<)19R3@{3-MANO#)LzPOy71#iT&T1Wd6pBM-ok_w*+)4}Ted9<0n2Q7;!e>Bh&UBdFyL0~ zvAPEix%pr_s0?@hYb+)X*#`AS%@B607aGt$Ub3%-kNMR>s92i=U1hYx>;C~|S^B(X zaUnkz>WGgH8;Qq$?uEA(-=l0n9R463CWctSH72(}7x^BYA6W{Vc1PMG+5Gy zF>g&a3vpe9x}h`p&VH1|pDY!u-d@CG8)OhY^mhOcJz~ptN7q;t4tW0o{XQ78y37Be zddO}bKgmWD+3G_bDr>;)ZU^xEo(rzVB1rrjShiG!oBlQx>%wlJPJNMCdw0dUsYB4s zW-{9({SIp^w`2aoqi7fM1+2Y)f^N<|?7HX$T)rg{i!M~-gHF33UM|W5+SlRzahAfd z>0Ll~HBw%0U@9y)Jpck47GXohdsfo7kp*o(3%Twlg5Lk^n0lPOCUA~3{kzB5h~3R# z-p?M@4%FSSx04Xwy$P%X&-1unv?nI131_gaE&-@0O>Nj9)%rNl& z*cZ&nv&A>}&R$wJG*#E^nTjx+N>mj(m*^KF2AA6oN z6dJA^=E0T3g-dmZ*%vNDmi7$>oV~z(c39LH+&ywYpZy z$eyCJdGKKHozWX#X!L~zpY(|RwE+FSsiWy?0y?;rqRTgTK=(o5_4yiV{o7%-qf{_3 z?Ssij=-&J3iQ4N5Ob#H=>-Z|LJ9dZpjX1<~8~UPo`csfiKf^F23$*k5(thX(lnh7% z??=s`8DcKRB~T{QG9C0&DeH9vAmk6?P?s18S@ZQUju^BF`wn96(lbmo=_D*`{sA?c zcJavK9Lzp)6-HEDM73)aSNz+KICMDp{_TlYNd=&An8&=XPKLJT9eAgACFV75!RSr5 zdBNb*=)aa&$|}ycO*9csJj})ri)`w^AkXZ)WAdlpb&%St1+;aJEWLI%YlypvGgq%d zWmykNI)=V6vvGt?99B-GJgt8mla<*q*`gw-kiUoGbG3M>=nmGV zC14wiMst1ADE?|e-4Rnv|Fnq>8NU)%Ww#4WQ>GH?8vO_s4mL0~#itVTxGy+TdQs=!(F)up^C?}4o zltr`5&Pj;WTN8fuB}@NxnrUV}!1Sh#nBT1))uHK7e8LpXTZq?bNUZm{^j0ys1D~EJ zf!DlzR2;s=7R=HY+>*Mp#6&s+skg(xd^r}39}7~4k1Q=F6qK&(`bk`Sa;?DAai_L`SIeO^Vr8g>;d}QOUl3%?=pSoFZ@eF#WHJWfd zvG6l0w4)%rGrb3MCNaHz@!jW*jDj z&}VP^PB_SUyVwcfzVL zc!i-bb>~}f=@bejEf<)j{XSHL{w?<#&=sm0)}r!1(t(!*V_@rWOjoYJ^hu38j2olQ z=6`JI=KB!T@Ddy1Q=otAHPnySL9qTrTv$svh(nWD{jDt2f6zdE#s^`$%2dq1`vUAY zF2i%lG}kN*XGOlpplf6pUZ1GKjIFUa$?yY2)E&o(js}AD6=Dn%4@Oea4x2V*lb=6= zb-rgJ=(gUJ=h*(>ilCufy);H6a|~hqUtYp!X%^V^BQDiYD+n6@8a&7TiL&17$+P(b zx@|QTEhXE*sP{uGnXsFoj|25T=0omzGal-@SHf>>i)xR*SFEEO`zu zeb!*eRDCh8{t9|k(z$%%Ev5}xM7hjD$d_~y!-u~G+oa3r=I|F-#XJD7`X!Klc^kj} zm^$BQUXv;Re` zsW=PS@%PaA49(Gve}UrIZQl0H2||o~py{6vXmH0E<9+`HTyO|R+uen_iT$AM*IQK8 zZ(}RoO9iLn8pzwFLG`S{YrO)R@s2?Se2P5|er>JXsnJldaq|Vgp8i~|zMx6%(2PE^ zcJy>z08d=0YkS+jygf0Qcn-N(soafsjp;LWriJ|GH`zB!y8k9lz>>|^X`ee(W4-r1 zKD^_3Awkrs700xzdvGtK@wmM08YsuS#;zTSf8L=3 z*3A!r08>WSi%@|YBbNMPLy|5qFzcUdRXovyv6_N2!O1?|y+YS)W zdpB2P*2*_@Y~ja>4v~++iicJkidl13V?^^8DAmsfw^42w-{mforEFt*PGLN~iT>Mt zO~p0WQn6S#g_7PCyy^2jRQFGWoV5+t+`?iZrJ{g+jWQN1c5MUIKhv@9@C$62FcU8I zdkxJ2Z&9nIduQuKFuNiZ8y*j1VXeKQssQj(>+r&wg+|%R(G6i@MWqe#8am zQ?Yi_Vf;gBETn`?#NheksQb$lrE}MV^O{Grt9ruV$x}40dyPJqDJOHz0M(;!LCyZd z{Pf^FTzU5=mbDlI8_L8G~K@Fal0)!2}}kr$i^#rWSlqP*8P(9XSwXXcSMc=HHEoiq`Y zQf-d(gPe1R;J@ZCj*V-o-xY7x|(q7TNrZ*-SenJB=9PD!u z9UL?meO|!yzl)fky#?>3rS9dUnV5K}9Md!MA$D~z=uPQCopB$DO|>5)U!|bTWhvV? z@e(x76H(vt7X6OPxy|T(uzbT!P~=k&oB3`uujqq*uk^TVeFs4~?*Dy2>x`p?8egw1&MsbA=isxm?H?G_ z@Z0)UqKi4@Dy5x-sL`E-WnLy??pEr|y*d>1w(n&HM(4r&?r~UC6AL&^4YFT#8gSVT zvadfhdngAT|Ld#9=oaa>v$kMe;3Oz1a|HuoE7tGT6Ro{vJZivBNK`yV_4vC`yqf&C z<#%BABn{fmNgzM&0*E*2%wtR~MR(@UokO-_`QAj78I^#4(**Q6&Oq;T0^8@NLYKN4 zu-@O9Wexj-`l?iDBR>S@63QXf$AI0A2vkg`97a#-qMF80Y4%?8-q})IFs`$(;p}&o zJg5i~K9bk7>Ld>;OGlrtwcPr00F<6L5d0mUp!!2kX3b`zK?(Uo^e;hkzasRrUj@3} zkC=hAK1l0o!NznKb}w84>P5&B-jR-GP{_>bXBt1V6+WCKt^o%YWb+WhUshp-!DGmo z+m0umeFc{;dx?eC<65BdDs`DnmB)tF!1$+)=(A-9FEDmQ^PqhYxP1v4tfD*Ye~+*@ zOqYa(n@6Ms&-1%^yI1K;)spx^DIu*u>ox~(|FD_83YHAAE#4Zp2(L{Y6~cZui0A^bOoy&`ER(zle(#MPQ+yo}j#Eii@Hx z#K>@cbUXVe6pp%v>5cn&pLM6OW>+>-ue{BqBP<|TPJX~*8IatC(oyA%h+;x~S`Blr zs$?Y%#7#WhrdgR0fz^GZu_iVSz5e$bM9nIPDF^BAf5kD@u&xEhEer#<9np~9(v2_5 z%f+fOji7M6#bgC5aJMn}Flv;E;5C0Pq+gxMVmHsn4cY$S+wP3;AQSB-{f5|n z7}R@QJY1qZiFG^-;1~634(W_KLp_hLYs`hzfGV&b_ywU*19p%8V^D7elLi-obFd!i zb)R$WMo~tmUq7@AKZ_}gM?uSk$K>g&z^q;GA*SsMq?f5s?X(;HcH2_7%Q-CI=7P=j zLdaP``tyTma4D@s#S1IUE8Yt61E0wk*b?{hNDxX-cE;AQFz_3{9-5CN0e&$Py7`%l z7`7MB&|dZZCIj(ukeQ&@%>k?@S2M}2F>$Jqdm=j$5<`b$iB$%UJ42m8 z6Z9~C@LrAT<`3dGyJKGN1MXER$AnM%V$sTEoHM~ta5&luK7p@5TlR@LEPfB3;$?nm zKApeo;xvl0}K)IVWmG(yKdx3P1c!Bv>IrwMHq5fw_ z@FT5VnQF)Kz2l(D@Ej+sFxzyVpbjQ}BY}0UBS!;`3bgboV_7XWIJ=_?q<0#?=BgKDKRl1hI~8)BVgZJ2`i)0SOoPO?cQJn2 z1WobCJvg3rRO41y3hm9`z}v}0j3K|&iQB~xuDily%YSmCm?+BiA3<&UE-ar?gs=0g z#9Mbd2!4f5s8@QHw^&8sX@4^@CC3ksAMYr*mG8vnY2PqcPbw@UCSBu}h<$lr3NP_L z%8wVC3vTj`?7^7B;8M2|FQcirB-cz#9sdxA{UZ^}vu|MDnMQ7{zXiP~^6 zGAWZ6tQ!ku=`*puk-T{u$scj*1O`;6u%H&&O?EL7%riED!@W2xZz@9rR*tGave4)1 zeN4IX9n$Xq0Pj=L@bhZvHC~?fO4N^Zi3nlD3ZDNul1O)H%@brw)uJ9R?eKk@Ns#`3cAqFtH(fGquGWf-uj}XdNjB9E{EuCYWAt`kx|3yiu^e^dh3o-H1`t<&lU!x=~oUP6j1b$Wo4Ga+gUZ&@`$X4p6^T@NW@p$cZqT zBYLvX4&kUjvzdgOQcUTe%0l{%Ck`|*39y7TtYZ%X*s)fM+Oi*^Z zHx`7dAbR+DUh(7#X0E%2@dut?vl9yt-2X%Eu7}XWRU#}fj>pE^w2L(@M7vrYMiq7v z>&5~xXVNr^eg~n~V(P;nZOHKi?ZPrIvye4r!t6C?K@#(XZTK)3Voh@4aEOVhdHERK z57lzV4av|fHxX+09e_+P%0s@Z0{0g|y!F>^xV4&kqlwetfBzor4EP8uh7kv=#sQr_ za+HM~U}wm~6MRY!WzJi;a>y36n{NS8**PfN7r>LrQ>i}xz#})>0Be4m1fKu2vzo`7 zQ18ngp7)&iq@&FQe=9kJE$sq%zqMd|!gMxoPa@Ro--MMu(OB_6swjV-1=evFpyq)u zc#R+)i%yHv8$Lo^8nIbdiRku34-Bp~KCU!>QT4L(V zyaCodD`C-~SJ1em811f-pU3$%^GvPh>QM=ry2U*J=C(lKl|g8qIG4VL9yFM|M|j=> zQq5~r9ks{EU)CU@E>7>$CPH211p1zLGk@cJY+JGgJfGFGVHCfIYduO~DCLvv;`5m{HxiEhW`X8QNH4v(7D{*o>W1&c zyG!)NK%e{2c{{N=(~D8|shZao=b>*GbF_1M!esBf<>n)O(Vdug-WNCE7bgQ@!?zHe zwDvJRueK1aSNmf8x@37D8Raq8b%rpz{g|?O3ND{h5AEA_U_}3B?0V)Fn6@WjN>de6 z#TbI+$Fmp~;|PNm5}UVZEiC1ivHDRt`uyzYXg>S zxDC+6AB@?*R@PkqLdLBpu8 zt9~$UnixsUUt5ij`Yd16?E%=So7imHtN8UUfN>Ly1Ye7ju+)uokJHQX#PM4Ew|56I zpv$?;A}uFEd9A*v_q-3J zs9tjGTXVV7%?d32mIrAg{=@ij!yzqe1v=NCMo6;+yT}e`cP5+5&JjnQm_f1uU&#L( z2fpDdv~G7LAF31s{z(9k?7-*)i>W7cJ(lm;i(Uf{VcV#qkkUCC`n@+4s)HnAIxOY} z&2zEO$sOp{Y!8wzpFwVbP+NTl`i7c_InUZW6w++&yyQ9yTi6YiY|FKPJu_g^-g?;C zL|Q`Qd&u!#LLCCx9>YmN%j!vk>KKNf_FlnByyRzmQW&bVNxDJXvwaDz`hu>1N}j9%6c+=_j;kJBWmQLX}8 zmn*32=*!b%hd{LWhRK>ps|>a0j;ep5&64_Lr6<8UC6h^mirM78#zI8vORT#22xTF) z8aH=xb;)UeJJ}a=wpXxP^=$cB|-jU$bOCKbrKAHt~hp>E2KJE*rzQb$jVD0sTm4{|SUf?@) zH{Z<{sQ`OW}~$?*)rAI|1tGhw`@R@4!1_8hMaAP&fTanC-?-7VfMvXEKyp3-V-AhOy8r|;uz=&#L*sof`_HqbK7V19t--H z#XmmH6}vMqCM6QGem=z&($1o^(wKy z?1tdfg=P**;oe0Hu_mmD`)*{QyyK3=!>EsQX(q^WZ);AhGZljR{Q~L22YA8FSa3q> zOe%L`>aJlL-}4%ndFo$qu1#Up);l0$umnTM*U@xyE1s^5!4n&=fM?tf?*C{k%@xPM z^%42dlE!fLhe2*qc>-8JqD<>k3t?YX273B`#5R*cRL;JI3U4pw>iGm5-j`xV*B?+k z;5q0eS@LA-G|DGlmMeOh2zG_1S+$Lo0N+i(?$BGV2y+0}UppZ%j<_XN=j4+}zitu+ zQ1-o(knVI59mzjuaGAI{oAm^vFG<*ZzXXp+Ed?8^0(AV`hxo9DP!so_`4<(SLKe=; zKS@MINQ0(WJ^=Qu`~_|;Sv<{T11KLah0>gF*laiT2v$*&qo&?8tL z_B*DXcn^apQ#|h6QHahvMcsSzK%!lO=_#L>(=d)xbq{ffqk$OHkOG;78R$2}4l*Xx zVM<;LljSYfv~_$9;mgZGVR%j6p@BMnK3WO2PxM5imz;K!?%aFtJj`8t6ur#mpsY|U zFY%9o(|U-Cb>;MKCtYvbd{k||!sAPl<*8B4XsxB(T=pFnUn*rQ{Yt>k;|ZHd+DSY4 zsvDx}P`#rE1{xPZSaCS>DlNh0XLXoy@+77ws#s3(N?w;j-8L)9%iiNWhF$H!0@rnd zkPV5zpF=oaKS}xti zTJv3yT(=L*^F&aGPm{ML-h@cUOQ_pp42z;}V(gPi==RhQPkbb{(LWY^be|o#zKyur z)g8fS75PGjdeZ$xz7_mPcfoWXMf}kqb7P@!ND*Zpt;B7P#$x?HAK~Vcj^gq+X5bc5 z3e&0Mxao2rr0mUQlFsMl3P~+jtd%jx-a(L=RtvGYvoR}ID%Px+f>L!C+KsG6)%OZi z=APFC=+rOg%B+$E8 zXQ6@Kx<0RRc}?{KbS`}kiiQdgNiXtDcv{JuKYqq}dmAw^^-p-{kcq+O5;1D~B?=N_G^&U2?u4MN{brP=pYboRe z{G%C8v#pzRF^&weg9xWH;5TI@$}%r;=kJ}wfQOWksXPm^%BPx?%4Jx4{{otZ>(Ddf zPu#G$4qIOmkNY3$DtNq@n6|`(9&#NDeE$daXVZxpkx9Eq1!g?f#}OrO!8ss|7f|** z$#6Li9eNux&i(;)Gry4!ZxaUOl9#iZc0v`)7>=2Zwuf^OUiC*?*;iC;v=SdsU#M~l zW1bG4GzU(^gej!Mx;JXd3l8EYm9Y@`N>6mIYsBoGmtbaD2)bCcV_x=jny(E+sV;>@ z&wq{ww^|8RYi6VL2j%YjKY_eM*ZI7s@9<*hC`ezqlK3?iycr|W==~K~)!Io|+gOEX zHWKqp+XOOu%J_e!^XX^`)bYenm}L5T4rP!}G6V5`3%!|fc5 z*pYzIW^MfG=2rBZAA!~$wLEW^h>`Bm)FpM5IvX~FZkN88?$?8I#2lgyCeY{aC5F^) zf|1YZYYZO*h3h(qbvsPaliDOqCvj}(`+{3L2NEwm2aA{4kF}kOoXYvS}ctyAP##H+J#aEE~yY+PqiR+N(UZ0<`WbgpM@!gwh%r4 z7M7aV!Sb5v;G@5je~bIh6bG)#qlf)sVSA04+Yl93DU+FOXg|LEg$hlxq98HE7DL~h z#fY`vL3U};wc_sPg7-e!du#2mVe&$r6Sx$M9_D~k{&5U9`ov59ze3Hjsi5k0nL55V z%GDPi@!YDznC+!6{2xVU9v9=<#_^`Kn06tDEJ>D-r9(B(^>idz23bOuv4l9nkSv`f zC0UY`K_o;{l9W)*eLX2u6j_pz5lJE$B`HbX>;3cm;d6Y(%=6s$b^U(duSL&zrZ6g& zSU=8YdAbkT40GzHrN`rn?q{H3LK3LL-$~@Bmq11LKNut;AE#?6 ze|H^if_i|wCY3q6Tmi+!OE8?60+xG6!jA8hZ-`cK(sW&}k{t92$MRTesfZu3Jpx>5 zeA<-Nmmk-)t62kN(bVU8Uq?E;eZWx?DM>>;Q-cZ{+0OHOhe7 z^Re_(0+-tD3Ktunh&7kqVn9z_KK$!bRKqn6tg1owcok%R^I+Tj)4;~t2Le`10<}Xf zi@!jf3A3)8i$Oco`(DM^^_i%d-BnOgA1(OicV@lFGFu~XfYQ483}-RmJy*?r#}4sp z@NJj^Ggh95`X>QsJwlhszfEG}!cSuDO7fY-4RDak?_<@-Tr?Z4#zgaAEVZDs@AK|( z!%r;uHZ+0g_IiLH9yI5i0;wD7nP-$2+Ivn0_2vUmzf_6&k|!YDxRpZ5M!r zu^NJUF+DTG@59>sTNr5Zn*4X2T;W0mfS(D>wvC7Mol2}3lLCQLZlW?g4-fpL^K21i zI#=q0*Vo5v#<*0B>GA^Wt?j{D;l&gVi&$)rI;J)m1qse{uP-j-ay;~fxK)XGj^3;G zQ>QTBMK^Kk#SfSha|+)6eGuX%m=JfDvb8TKaHS_(xG|JJ&AonroOb=7Vf#Iljd;is z>ZDLRva6u(x(8x48S8!G2`W?1LCb=RAf0WZoZWtm_?yF+S?h6Tr=x|U8&AP$Lo0R+ zxq+6?^DtuOAIOMVgA>e+(V_h}XS853)cXEpVO|?S9C48gc)uC6v(n*vh9RG;y_Yp< z6F}gu3|}}K3Kw^33sTKGWn)ta)+=ApXE%X)<@ABhv0eG6b(v^1`#;F7H4?^6%g5?b z3m|`&0^7|`K$t}-xWdHn zNLd<8zJR_G&+2S6E%-lkDjw#~_=?Gvw{XtXFL>8XkFR+V1ukFf(f#`_Fk4;CneDmG zEPb{Si?t_9wcf|N4WKjlr!llY$zd;|X_hc7$#KmnEnZ``kz>bREqrTi2)0W?7#tgl zBL*j9>&m?hJ7lOaHc}bX^%zT-90u+h*XZt8sx%83#UjQjp;y61urskH?@1L*F^+=~ zSwY}^$dI>>`-|B<=?SsNn?Pm$T4Hx#4rT=V;fj0_^y?XdM%PbZhwV-{@rClul;LXG za0u(yJ;ZvA(cpGC60LnkJ4W?y1K+87ShCs@?EaaHD;E+EA)7kn`umin{#m&2^>!Fx zLwBMj|91yPK`%=cbdozE#QYT9jpyN*i$7t4-)VN^zLB7=$mc5ip2Fr)+p*K2n-KOi z70X>13~4suk6MVGub0 zwcW64X&yAax(fN{h%XYgS7N_Ymvi^?#N^Kd&|7~hag1Bw*Gp}oYJLvH&g)A###CH3 zpai|vo0r5gHc=qxopLYqX-@O-_PyCAs@4SisvPWW;x)MmdjTY;wx%Q67 zpwOpmOKmdPzW4_D*+nRA^5{WF0xakq@BTc89F!`!R-z-EIIF|Y>uW5iUu|Ti zX6ISyf;299*i5i-jRwWBJkH*G3A0XF!W2?BN%Tqux?QDb{*a@rB9Qi5l><3!{EAth zeq~$kUII>DF<3ve6N`4Q(Qjk}s|3-QO0Z&Xb{~t{C*Pen4Kv2I8>0a_4n(z{$0n;N?fI zRv3rP&W*5R?r~NeXv{zTcm?XOe+NBc@vN}Y7ea~USa+_1`1krkY|;vh+LVQgvmcZh zBe&zW0&Su8%Ua^`Mxfh|(O|#%Di>_KoGI=_W{-Qb55i3LBDU{?l3SMOE=ggv292~2 zx{H~IB9-D^8bWP*1^)Z$BP=ufgP|jI`1_RmC>tLS340_^W^)`P{ezbqP{f}onm9resBav6!KuU zjm}M@64{8!_t9gOGcIA8g5Spv=+0Mw&RY{9D9Ms$s{gQvhm)XThz_5S{u`}lt(VyR zziU&Nu9R-R0IRBh6I0@OR#96OC>CTelWs?FNAnHLYv?35%^X(xdk5?{FNC1wgD`L1 zJcwO149%X`b7!mR?w4wW?)U29yzWM*FS7;9;-#RPsKaXWM}p;s-55JIh7-#-kqJZ}2)#Ni;vxjB@JHcvMIn1*&<-+k5}I9ro=APA5;5VKxwcVvR*Z^Dky@mSI3yM{5_Lz(7}(TKWR=L0m~$> zux(2%Gz^S_tb@z2wl{Ki+Pk3SgC8+XQ?OO7)e_7WjW?o0o9%p5G+iw(;0+LORO!4dJ5qrDMR$ zK3G}wiE^--e8;H0XrV!wE_M>DKE{E4kO3<%t^m_fRnV+C2ef4`An-VK30DQ+@JX6{ z`+4$#e;G(F#=bZw?m8sQEkn7m9Sr{H^0qzqqxLV#`+sYJs@7OgcYV%$3a$e8lDeKA zQy_FkIfTx6j4I22=pITtz~SA%s3i_mp9@$F`_QMEnD+#R{RtFNiUH-pXpeHxm&eTT|}Ct(GIqJ`lAl>0b=++C(jIPT1aF6oQ0 z|7=10@fTX?+k@AyUl@HuQwX~q%DJ}>!#Rpq=sWT-6upea2|3!ri})@AZu|rtnjC!Z zNm<6WQ!Lfz30LyBH`c2G!tQ8ca#0shW`)7oI~TzsN&`|4xKgg<7;{TB;ETkS=vMv^ zlF$EzZhL>>jD$F}-|xZwecMPVy=#q?;nY>W>xejU5_GQkgVqTQW&cd%oO)HD@6K>M zsc3_iecw@(UJhpej%O$KbrnMHJc1DW(>SK|H@M^z+x1l~#AdWJ*T*B!$U;l_O8zOU z@5SIV@egLb`z|?oY!lRU&%>7SUqC!=5A!?v2EA;@p;zh_ESa?lEseJl6Y&{WXmkt; zH>82*_CTCZ`$B1sF-zNa1Z|y^%u99^TmOw_i@#h&gHwqZyJQt)>>+-{qbDFbG8a-? zpE9#JQwI0Vu+;7lXZ?DdOXs--Bl&F;ynh41e`)Fep|* z)*XN5O~y~8N{;gvZm=)N%0l1~#fv;*4L6MS~z59U? zUwg9>jfNypKe!7P1=G89#}3xpsSwMV1DGzNUW9ZOj8N>whRPI+sa$-08k zLp>qC<{Flo++lLdX!uHqjr9D?NyKjTkwqXjXSW5zpVqEh1v#^GR0lo)`PCO(8I^V>ypCae!bQC3Lz%Kc zau!?Hzkt|~r%V=c50b7@FSTMTsHUj6mZ|Y*ZYf1GT~AI{bqQxYZ$?D{^$kTCoW%VG zA;nw@#VtISMSN*Fjd94j<@p7PX%a#`$4c;N(fZ`S)R*e8>f6 zUZFtw*y~EmnZrTqa0i+m-$9GYHaz<|2`vtL;TJuKo^wKK`^{7Hw&w(txe-l>k<*|9S8H5e` z0v%5;Q}%Kdnp7Tw)U~g1n|T@Th$hFO*HZK#E}VLV4mR7X@HjCSV|M8BvV}vTw*D(| zxFZm*4%Ww$@B6`SeF&!VWvt=KYqWi|2lI*j?b4fENbRMRBU%PY=3g+NCIHoS zJu%PsCFlJq8#7-HR(eluh7DH4%^BMhR0UcRjUz{u@?D>?aYYnXM**sqg-SHyPNT|y zCD<)!ME~03kZH+t=O69FD4LVU4Wc>w0~28~_16P6jfKn)-MEK68X$tYm$K8PQ0sFA z7H332jQ9b#EzpC2+M_5f_GFPm_JN1t7z`b#C-~MGqu95SWod4}1lueKJp2qbhM(ke zs0UFv_!-FVTSCg=SV%Ifrmk--yScNDxU18tb5qBK4T`{))H;az`VJIMl=IFm=6p_+ zqtBDuP}^S5sdAoiwS)eHqVz>*oArQ+x6|&pp(@+D8|}v8>G?9{B--3!;I;bI1wrP(uisKNuFjP65yI0F;WyWVeN{#pWR&!OcDqy>9+Q zMFj2kefluP&?J^Oz!`kkbwd5z2k4vFj>e0KPm>dc%VKNLYQ+Um#EoQ%@bz5Yh$>Ee z<1EWB%|+EEFGsPjF33&K)4xqI$n8F4JB(NdikM}T8`lP#CS!E#>56kYGNEL>6oOay8Fcs4iOSg=nLlc)1owrub%+BF-B<$NyLkNBD3&)$@!wox;pnWKj zdX$+EG5Z%D8m`CpklX?F=V~UN7>}9emJ*li`yrS3XxisiQAhI&RN16Jih?>X6+F&# zHsTdqLL_hNwBhE74>0C;AuP15$JSLwd{aj`NM++#=4PgJu{Gv}OO!_?r=Z+sBfiiw z5F(NRiX!HLH~n|k%T>&GohDZL8whBpA-H88hTunz-2TOfG5FtQXm{L%uODH=_b#9_ zul{>n;S){W@5f4Y&%ZhCv^uo@ZNZWkd85^+%jmL{9L>)LpwXT>D6OyJtW$`sF{M!w zu&Wuvo5;U@V-K2)>LRSYZNgh6odaoK6aHZeWx>;1Svh6V1M0GI*8&aRFM2r!JQ)S@ zJ7**_A~L}&aXg3LH)$E*_AK;Ke*;S`<7z;&`cPvM%1Uo`B1dE{G=xeVDwl~dT+##ClJvqRo z&bI}l_db}cvWB<03Fz@x1a;AxSqsf6QvHUY^%fm)xc`GQ>UtlBa>jz&e&V2PDnSc% z0i^tT3l^@|n9Aq~W+p!ckDN(xSfL?I3DXf&-S)Ch>wL)kYbgk^_bMh6=R<|FjZk^(&)Q%;d27Ij01pOQzqWj%;W;S{Y=kaGC z*!`yi>W$r?mcNHtFNmdnjKdE1KH#a;MwSQFg6tn+z=fM#hM(cdB$T5m-`dH6m|p78%!=w(!f-G;F;ZNBJ_E+-x8 z%Kdp|APmX52u30P*qlVOM5n>zIeLyYvTO+6I*NIVenF=EHu_wqDi!`>XA4ea&^!@u zdutek96SZeO9EyMTndP^&yCJUbq!+RCMJt9d0TQ-_hV((jK9Gpe`50 z9p$8hmMd+KAAmutw0KWqIF(N)Kdf{JbD!S=+F#ls2268B4vXchGy3j(!-fKgLX;WS3_wEaG~k2%0v+)FUvsx8L%Ie{5>`(TN^7Vm0! z9ISdLVA1cl>@7J_^mO!uxxF>{)~n&HIW7&=`n$ny_!|@l4#4_iGf>ro@>3M6bsBDXRqD^ls!5OA$vl=YR4l`JuU#-LswXa_gBmZ@WBR* zc}p=TPnUw~hM#ibw`vIMb$}^N#JqjNGbWErl1!PhpZ1wUB(Aj|(OSC<`rH-q&WBq$ zvw??Ee&|1RnsN;+t{mY$OY{Vd2LqV&!9QF=+;wt*d{N5m7csBJL0s&=A#BJl%H*H= zgJE-t+n5pomXH1h1@48n1&P$vvB4=jkK=t?Qy~?oOB>b*(YziG=t)`Rt!hlz_?&HX zNTROIGtTRMBy^SqWhp(MXpQC&Du6dH732nH)Z7=8qi1^f;cc4tJMEPNF zWkPT$%UpDt+|iTJo%l-T!-&1B_QcH9r(noz%Ew$kkrmN}I%&+MO1LHIW z+wO;1))#%@iPl+kT9yOqvM;p%t%p478I~?;;!^Vuaz=>{(R+2ogYU307;qyS zBb`FfOJ7$A#3#hkI|osxpMprYlBr{M0m^bn()lBBMlibdwT0F*bY45DK$XUG zWn0iD@&}CM%$>4ehO851CAHvUY9yQ;^BS8Y?_&n|gPHLTNJ#sM!3HzYZq^t~_$+1- z-_~KR5p}qw1zb#e4uludx%w-exf3QSB?~@+OSglRMg0d~51@QSbOp|A(-s0sZs4*l zP3S`$U)d=e?i;Vi+f?YG)`(8j8vF$p(-|i45Ou%GazWvyRIb%LL-PshXgr_8g&n5* zObD@{jV6KoTdQQMVHLVZ89{iK3Ka`4JD!!NL(!;Z%+~o1gMGU(L#~Ml

P^8-shY zDC>D>7#l%57`aV`ByaC}$UQI&^xpi!gf0P)wY!n)eDn$0H`C6MW^&ehMxojF$EY~1 zMjHYUdEHn|S)^j}?ks@hPk!KD919+Ii7`I=0gG+-NB@rn7{2K;ng#Y{hm&cxb}S7t z=4uM#B$X(h?hb{UjD)qX@4|%})FTv0P_}d`#LD+#(L*!LORT_-Wl7L$z8p;N-Gm`$ zXumXlH4ZTgqZwg4SamDFnWIhl<9BLMKDCvp!ZVcv28ZDa{azSJP8~}vZ=6~~9G9fa zFl4>4uxp?WA3Amv1kLJ6?xU_ksI?)~x(x-(6aV4)gEgq$xPnWTx*}goPFM{cEXmHn zvr|(c_IfNzk7cp|;*tbx>crNU5nw)PI=YP?g%K0~#iq~-lo0P$)FMcNWyCOw-NdQ> zJN#sMTBrg0p;=$;% z^ri}81Lt9F*Fg}t>=D>Tyr5ZUFj_a=#cjSOLiKciwA)YFkHkc*lBltUJao&o40!%b z7r|Qd4n95J4hGaqNL&$4-qovI#-!7@gV-#7|FyFn_NP#+R|RgjAHX1vn0Yg0$_$@# zp#5nyezg7!lK!V5b<`S={$7gBvtPo#-(3_1-C+V4r2j!9OF0DEC1cu6>X|#o;PL!63^{DVORs$8X2c!` zOJW&`zh48_+df#|gSg)o`KWR^hFLut9E}o>!p5N{g2_o@>CL-^CqvgkRBRsR54{4m z!53NYV)9dV{(}LYZ$SRpoU<6Uf%tZGw%Aog%pz^!dkVd0uO0yBuTQ|XmUi?3jM=U} z4+h_2ur@~rlcpO9UblF#k<*UhKrwpQ^#`vH`LrW2;$_R%bJ`Q+u$wa!NS0#ZLGI92TgXq!Q87tkW$J_HfR$fDJE~(|DMpEWP z?|^_USzxv9Bg7S4z-7y0!1+`WvF`^tYWFcDC%O^3noI-LxI3628^U?DUqi2qePH&7 zcnMxFSX0dv>=Aklq&26xyz+8pwn!{ckQ|pSCO65x-Qbr|!`u(Va~FFV3-TssrK*n$ zSLE8nLR*hvkwlL2GZ#$~!Zr_jV~NS#@Q8eI&ayq-9lSDrDvvJFIbDUwpB zzwp45*ARMC1zS~ye1x40?mMVZd%GzgyY3=nbQyt7kBW$8R|7`Vmq5zCu6#toQ7qkb zgo)16xv94%DkjM#32I}gA9WB0tU8J%x#zIQ&gV2!ZAG8vPVjx1gU*!G3*4CmE9RO~ zwzvm+&YOxAT?3%9>=to0+Mtw+gx&|g;*3i_VMQYkE3utB9na6YO}qDtC~JDl68@!k@7i)=8rO2oDMP^^BOP+*$FpAH)o7n$ z3W`(blv4x$V94il7(KrimK~tqN8?lpejN*;+XAawr?78>N#rF(ONqZI2wcfZ_=J@yK=wi9yI?Gf|#fe;;EGPer0H1^xhEQ zL_B%)esB{~O@*V^HHD%DU9mJdp1JK>f;PVaN(R!qH2ehCj@=9{nit818%(~NuEMe= zn%BSS2JP!ae6Yp8+}0^wd9T0Xq3A?4r=`^9FUd3o#b_Uizx)cw0?3&zPG*X=TbbP& zJIvEug>#x77?Krp> z-IGpn+IG~rUVV#6Uvy<6VokVzq0Y{(h0uQMEbNFNu1S~icy`==*zl|x3w}`k>BJF9 z@{noZaqThiY6D)DRKnO(O<`MZ8Tf}ZpzZQf&S;(iuj*$B3r{o=M~D8*p=fZsc?v@x z%zzAI;=jfeEwwv-p-G zP@MPz%Y1*r5CwTB&VEtGRv%|`yJ`w~tlE{U028m?2BO4iDrp=+2)bd*q`)( z)5|sGy)Ca{M8;nrtDyYd1PM73-6+>~8Z1w)1Z(}?SaOB#t^FG@wB8G4XYPUBln}7E zn8rzeDJ9a$3(#*(3#K1mT~)W=Qqa!GJ%OXv2Hr5j`>gRI3ehCRBrO?Be)1JK z{8Xy7A3)@7Qz5piC%BQ9$h6=UmVRz#+G`3i z?w>={aXN&yKc+#g`6!U}sAA^j`a-Hf3m4QQlyVc@gqGV^Va{w~VQxGNr3W@+v*UjN zpERhmrBcqJzOll4g~aMX8RjTYf_lYA)VOI32{ZnNypPWyF8d@t=^2kPD|Li-ng&9` zSNd&w(0TY7M_iGiO6g}Sw)T1y=8t`cMLV^b-O(k0*0C@lZxF~&dUDGOnjqnhEf;)z z0R#{?hjNeH(~JffvHB40&glK;91pqvKCOonJE!5_;L8b<7>G zZoI+S}2cULXQGYbt~Or9PLz6`>sOO z@fEo2s~R(=PbAmsIlLJXf~`TvKr-eS%xQlQswh`3Vxc78}f^Z z8!XXMU5mYmlWgskPVTf9U`yz8`kzlcVnEU9?fUKq2|lD?^?` z{+IU{{FLKt?-rv=vWXzRT+d`v%(wujYDl`E#g~n#1VzPr&a?PB7%XnZn$4eJ|LaRI zzc3Yb*M`8|vCpA?Z4}%+{2dyOgrVKFVZj+gzGK>2$PM`mkF;tDUQLu^ z+LI5-W6j}aWGC2etzpZ?844=bNQwB}Kd2q)j2gWzF}oj>BfIb}+ou@8Xnqpd#hXE= z%SG^CL|&=LeqgfY1SBl@j+L>$!29@g95;^~?T1sjhc4}qx{&73?`0^?a);!OKuGZS z=Zbr13o6gqT;Px>$^^f|u$1oPK>ozVKCOUdaj)p_Y=~y7s$ri*#IMga6|4duqU~BG zd)!Y~khZ^-$h<1Jk3WBcb3exHyuC5^Lkvc#-(gi>kx(+}IHU$GgiMx$7SywIx=UTv zpY7P>R{^47UZ~ie4;emR(8h>z7k*)^BH0tn6vp6nNMIplwHWAg7<-H&kF0(JC_fqqh*vrBod4Tsj2urheZLtHb~OdXx4MB(mqIi<_mZ=dQNLwS zPmntfM2C$vFemXQilILS@BYTcvI-2_(hD30tYSqsr?I7;SC;A29P_PiDP)73Z8TFl+1SO2ym-oa*^} zX7>FQvzA{$MNTp7_^E^S(|4lpTpiv$tq??;?H$d+4sjaax^up1Qfwbx0#4-N@s(-` zo%MIo*>EI2-1-y@eo`JdTbEz^C<~V9841!HE9P|dJ$m{+gkaZSSZy~M2J{Gl??xKD ztp9hAr{^i>TQxzo`5fGFU>_>>x!V2WCox9ER?vu`{MuEQm$`2Ib+eRvX9 zp1h8!QTy20SLK+spqi~Ysl~VZ+=R8UPf_Z$Rx;yFDvDFdgGqb1uo&brRg9B_T*9!` zVVoB?9%=?$0=I*)PzAH%x{lHpfD{^bf<@G7oi##WF=-vY1EmBjUZ4&tmM ztp30)kU!89I{K7D{omo3GWQ4gi5gKczC#l1b|1Ni+QQ)B^xkYt0M8B^C>zj@!CKdu zRdxsl*kwTN<~Fu|y{XW$=_q5lRGaB7jnDXM3N+x>q5<{O859hBJ=)SR(sqD<4+$I~Gz8#?FN)TjT>J1)x_o3P5 z1G>uhLML1$2S*CnbS{NIbM*!5A@f=NKpQ9u*v1N-jET)dce_b9xEb$Gg4d$4P||xd zl*jMEC+V^9aGDA;-ZrAllbA^7P5I;(&%t@xC(irJLa2Y`k40ZUG1b>kFyP2}j9RP2 zp#D+pSFw&zv}*#G&D()ihpHix7+kd;Z_styeGp#=#SSY8EUf=cdxcAM){X*&-Y^vT zE#uIi_DfDLAwKX9xKoZ?{N*vE875%t^nCQae*{uyoWPu}2Oyxa3*~mo zV8;D$kT*-R-_8^ZZj{S(x_z9wBFAuy7l(7{!<`nr3ZsdS8WwbJC*)vcikAn?&u40KzGPrvj=PatS zh_yBZ&H5WbNu9pno#=*A;*&Sbd4V+%^^hvg;6&zbTz3OK-fpc5y$lPv5y|_Yyz?|Z zw$c-trCV@}(19K?XEFEmP|A@+;qHX4e2_xHk{>idMym|H!sz>C`jXRbDFo@gH=Nlq zeW7)5H~ctFPuMZ!Ah%*f7&f2zjna42Jug`Tag$F%s+Sn-G)_XZ>mN|huweFUCZIX( z!qqoq+-;Qxul?;C+U(a95|*BVtV3H_kBpAg8Wv&o@iMg7{hszwyQ!lw5LG$5BnAx^q4r@G z7J1yjOfxrTF}M$y-Fd^wTX{*-`X(rBPC=QEE0?B0SqXBK+JB9s`O>8<4L#%R9yh*$ zcNm>va<4GEj`Ki0dakgAOUR=rGkt@XpW1i2sNUzR{IawA(3D z4^*N?B=J3x;_;?_E1C{2hG6X+=8_(P3nY5Hh-Nuw5_Nfb?=MQ1JqF~q+zs6}lIK%D zlruWgi50uoQ;z&1v11>xfWM5OZOM1^ygvtQb6=v(Dhay%MZ1BmM%=db=ddYDiRm#_ zn5mb_*}VuQ{zex0Oa0&-I4TK=4ButA7g}2M;&TuOzdqAcL_iv)_y6-de4{jl! zMHz@XZ*yXCA$#aa+_cU|KtwN`KRN|ee4<2Z;G}FnTn;5Q^P$Jmb~+P2#H=%I%qlMt zyax=&jX8#V`BfbVG^FRb<_Bf3(A6LohclzrDhvrT=9~41`L(B_U5x3(W<^fDAO#`HyN=Uk@V=z=0M;M{JHM1|)j){@j!n4!+c-!qMc z*#8nB)xVTUQ}dLme(hXI8gVCPx?sizF`uA_VuPl3<<-8_m6)cDrlK5(J9iE*t=8tF z#|q$`T7juM`am+Zpe8In$RgHULivsY<%A7qphQi**zvP)_M14c+7OQRKPyKcEOXxuR6O}l1+yE>i=`|BK27?fqdO}PZyJno}K$6KyF?IKuPg()Kzk$)kYSiArA z$+mL*0;0H)j%OE(1iR|Hu%lHUDytIU?xtdFo%oy!a~ujMKYc*Aq0y*)X$po}u7kG8 z#=`qY+I-l;?Wj1{2CBiPtcbr4%PyP;=dG3Sf3L6W?z5mQpyzNp$N6feV`$||3|R0G zWxq0T$Z5(iRxW2^qx(=9b_HVH+ps$I9$+71c)MZ{e*E2)xA>gOVxuxay;=g)fZ z%@W)Y^8^OXG2;DR5w~MrB6ATFztr~vPFP1gj-+y8Mb@KwQXC{4p?UVK< zb2v{jVNocS9{C6=|E1*I$#t}87Gq||EXdiP2hkS>gLqB_i_VBcdDUFzZQm6q^dx84 z?^T%jy1-F9pn!`=r#?aBWUgc^WyBsaM^(&FSh#BgmJWEq8gybnYyU&6)pY{sJ{r#t zY@pA}35-(j;h%u6f(zdP^<##C+a67!uzw+T?5Ex0$&Xy>oA*%uokEBV#0$SC&k8w~hT7MpD9vkE z^4qofy!L9&I2>Ejvr#vS{ z61f#S`|9u=Wj5%Rc^9JpPDb}IF zoe7c;PtgD9Wsn$mqFt9{>JZL{GYcr2=g|m?#b2;jimA|HwhevbPJnr0C~CDep&w<# zwNK6DNlU&#iMLlf)x?L?H1`J0+x#ZdXcfN$RSoA_)Q zT;=kwV7F!)29v)HG^E+Wj}!Iz}^R zgPR~zrLbkM+lhq{tK3VOLf^~?(DH8}uK7zen&cVzw=SQ0X)C%;5z$$D40aY>28BkW#5(Vy(p9z{^EUW_#^2{0*H)AvH1#7N z;ytuBbdZR*Z3FlAP;N*0DJ&|c3~|(akVL!&jfx=>>$rtb*FgT^Rpik!Hxgo>{bF-G z9^f{8a%!Fbo0CqRjkEtt#%II}NTY6WAL8Y^MYf^e^Y<)BHxW9fZ^62W;rOXW;{gIF>o2t}XkO^|spm&ItN!jQCF@Ys0(Hryr-z-JeBv*sw&6kmea_-xK$ zl%^nCza6W3+y+^$Gb{f&6KDSl#e%0|-fZb!rmB0xD!aaf@RPIyTCtP$o|6I&T{bY+ zQ|m$fVU_+cen=UyQ3ouRMVZG&a5p|eS+ogQGb$Aw{FZacwUZ#Ae>pS-XQOU+Vzum!mu#*g#+m+Q6q%|d zhKG!K?>@7z>Q@lv6|0&1y%Ln06@nkX0%AlZ<4TmqNe zpxF1&Bn8R)fY zHSNr|VdAG8Vg(q2XQMZDDK0W6%ZJ!Y$CS6c`U-6H#zFn|OQ5kep0hM1j%H3gy2~r@ z+3gqyL*|4)>$)(` zL?aw)U*xd6jSZmJ@f)hj8Zq-gh|>A}Z%(nl7L3eJP>*F3%D|7CDl`&r`WtNerYD>~ zy8zX1UT`1gnvl2VG}ygP1>ZOKLCa5@S9oqESD^*FNc)$@EKUCD^-K))H4*J`v~F$UCq^!c#B-srL_ zi}uc5%w65ajCx4HbnX=hz55rJq*fo5MWFef|4;k{Itqniowy`pf#4}HOB{BQIU zYKi522`1MX2qRq2L;JX+&^+E7#RI=`OP$Dz`%epsrl};l%6<6lr=IYI&T0+IYrxjW z5LE38lw(>`sN>%U7M}iqMyP=E|Bk`ZA)UBP~7mVY+!8pTfAUU6g-g7^r_OdqE_CQ0( z+*~OcVe|<_lvS=CN1X}ExXZqOg)!2{Xuat_W$Yp)^$%a79c7%$d&~pVYuCWy=V;jB zdIi00hl9ldOK|NQinqsf5lk-;zhyk4Rlj0v>-&>sd8r*G#m6wm=@UjcJcC}YwXj3# zj<-g3R=nIq6ThxmOiK2TKZ!Z~#ugMbSw;q3T)Xb7%_IR>R@KV8Dr zb}fNeOBYn6{N~W^3N-&qcjWU0N>OqyrszaM$?_#w^mk7T+p!T-P7|+VpA0RYH!!!V z7ElieL*46!d|Plc+rC;uFlbG~>%^meQr}fD8#0x-Zt#J^6Da_DgU~>#%PYwJRCc-s zXV6Sc{(U)95BB8T4xRyzPhz+_!$1faAB)Z6c1%be#08Ih3{rO!tjaq97DiDlxz}s7 z?s--cJa#=(^>1dOhgQ(sTN5@^d_n7NdnIM{#5O#q;3A4sK^>!s2U31RX3rT+-Te`G z^;m+6M=so)@6TbQyGYRNYbu<*a2gdQx}0C644ppa0Q?Ha)S~y8lTK{lC0mItt06ew ze8;u@*TEEygQ0e*p^)7cIyb@58h}oI}JPP^g-Lz0sJ$rpgMv+gZ`g6)1kLO9Jmeb zEvImk3ycN7(qCw;Hso$EFcNBZGw^t7D%5np4Vi8ktZn66PFnENF*4u+lyqB!r8Xl# zF^@c`i^f2*{03Uza>o3wFDNT^pUu#I1RncG(2RNss5E~;bHWwqFzO(-UvIdXn+-vm zBB(R4g_D0UM7d*ErA5;l>`*Jv_QYvcI*cevc}wxcdb+dBoCbN1uY>!r28=Jdj$xZK zVE(O2bn|zByC08$JkNmJvA2YB`HxXHqYTAslF{OC7qA_D0K@*KJjy~-=B0UxQ;)P@ z??cFAJ0h3!D9DAfWs&IY)qpi?KVgbxDzsdson4b2Xe~{_nG1FJfY}_@tG=OZyc86F z^r-_Tg3uG%eEfoAP&+}JH`@1=m>@?m- zxe&KX^w0PMs<-Kqt5=P9b;fwwN#2EV+iRgHbRSo~z!@hzI1ZAk5;ThN0;kM_P_yJG zNN<|s!>OmC)c-g*J*S?Hl?Y$dRU+G{_h$SpDrHloK&fpx5(Cp?wITsUn8;idG!y<3%2-^Io z|8aD#aWSoLA8$IFYAPXzigre{i6qtB*Fwl)B!?t2BIFQzu-oJi3MnHwj1H2NBvO%N zYSy(3h zK$K4vKuUunk@SxuiTVbd;5nU<8kIrogul_=P@OAte+UBu4LKoP-9Y^SlAT=IMDlU`0MGv6(7oAxaMuNF?OvKQvJ)kwD0Nr{JO}tm2 zU*u7?L0gU45|IzGBgvQ@VFZ3lV~M`=DO5bpC5vvsRQBW3w=($p6(P!v$!E72Kz=;p;mM({kiRcDDy|?dFdj0 z1WW;uZ7ddL+(h@Geze0o9LyT?X+Q7_s0P@MsNYg_F@@I6w>?x<| z*PsLOlEav7;R~hx_Ys?S)I$&%PYS9|6MKDa&VimQJWk$bH=gUk^yyhJZP*!hb?j-l za=#Z8xua3_r9Tw!9}b0;1Pe`UA#QyPl+&I@lZ6t$xM*>eYZTq;XQ!YM$EkV z3bw5Ogei2^)40tS#Uq9j_giDIvWACm`e`sRGbj_}$^%d(T@d*TW?}rO&1f8T3o<1^ zI70UaE7`UVy~ibnH}r3O~C2i`wa4~ew#KI7;?NoJ4H{TVNnq6mQ>)X)b&v$rxJQ35H z-k=H+MRJE+;yO)$68AM&Kyxu(9eU8JV@Rl93h?JF%yb)yhsM?6mR{NyEI*0^c_m=F zr2tcCPE;nih=ZDRI4QS_q+MJDDTyUS&|`xeR@}jp(aE4umk3qPkHG`tY3?8%?0qyL zWm+$YgTE24)g@>@%7)Hh$CCSVcuZhL5(!*#oD2=u<%FFRNO7<>YrT1(`)u@h={hhcH$W2o4E8@5c=W>$wEg^nTX!2SI=Q1X9bQOPrGeQ+Pd zCv{k!{T@0$IG86YZ3n(x8l*Uh!N;HW_#4|CV+#meI z`-AH9Qr5(DI}|R>AojHfarAsmuJ>CBZh6CEiNiD45|9VNl-Z)~3%cOYP#&XUa2s8Y z&BsW?0Pr3_=l*lEFo?POzj-Y3th1E4W`>;K-9zXxt(lFP@fdmQioqnK3ezcv!0GD- z%+&56!fS(+mHEd|O&B9Gn{ts|Il++1pk+AyeLu$JP#kf`J_zz2fqvz_829KJrUh$5 z+#~8X+H{*b2^^dfznNl12<5pv_$^Wme~2poBdoA9i_V>WALl zp%LT$-&XlhCBj9V*R7 z*ZN5OvQU@l*hu^I!@D4V?l15vrHqqwGnBZlVqN}p!Jk>wGqpXBy|!0}36-l*Rage5 zSxS^@zQqd*jXB8l{G&F(lpV1lC{`e3qHIHLeRh!Xz)fpT(^D>%j$!80k&O*uEa+E7$F-`XemZ#G{r{5*{ zm)dYo*H6s9Q4czr+DylVGvM_@0A_FM*+4@?zYo;cenFqPO?xAOHga&mAUYR10>|&s zuJE*rtmTANF!maY9Yx7lyQu=(cU(bSHi6FOA319T-3DB=gx;AaDkEC2Zfi3Nj+~xc&6=J!^CuE{vr+2NzXd zm$3`_pASU8>yaq_Q9w-0OxXf@-jf{{!_ZHsFlD7T$}~np-;ht}_RnhwU-AJaHNGZf8#&$O7p{ zKKb@qn-Od71jVc((wNL)$@62_e1|$3HhU83$7k%HWqM52pgSmaqPdCzA6Ri+4o1o& zu=;5lBxYHFO%gmClah2oN4UByyrN2B){Q z6LE#Qyq7(7F1te+%gJ|8e9_K%v`aGzq7S0hA0Br=mpUhx%|OMr!64pPf(^nn@I5;l zRV}}jt&cXaPAY%4boM5ecKA6V8=-97^owAGDU`pRkG)F@F#cT`NN(q|^3~-a7hV+Q zsI-|~-vh99niuT9NIMP=`S{hZ2~-Pyi$+k!TC^Qy!r5ss^5`gK+Ff4^DjNe5#TK?f zWz6-A<*?BWVIIfl|RPl2fE#@gcrelE_vfgsJvN7LbmQ9nd%OZyj_oz zxF{%_ED2mkP{+}VhwR|MZ5TQ}6!|aSunKcFuh%dY0t3e3#;qDmZeI|L6!I9WU0o3T z$B-+1tj|Qj6VwPZq`r!^P}F!2Ma!e1(^Qkgje6ku(U_|{9F5X8h7Ejt410eq#1=^) ztgSwQW>4-D;hAWtR-c2WG~<tV{8cBm+!#pbE9y_ zbrqcSOTxm0@u(9X0*5Xp0(VlMNl4b_ay>tg+3T~xZ=xC(;gJE}sz=Db{EW5Rr2^rl z-=zGH92;Fc@v~V7mJRy`iwyc`PJ&}ys`AO4JDnJqolN>)GU8O+8|B*HeOO+S3cLd) zxK+!TQ61J|92W@DqB0Ca7%AZgSR&tZ5|+-T9>KA@p!HG`E0zg~Ygrbgl?kBLWfMx9 zA7K2Ky|}052i^P4W$in|vDEY^wuU`odoRz!nh$Nz+kO#(-DM>7ohi+eo<-ho5xalG zJ9N`{iCw48!ZvGlChqfd7|O2!zvI7H`L1)uY2ev4_luK--49=eKf zk;2>oTfZ5wj;EGG%l3Sfd|nCVeZN86{fz_%#v#vP2{BuJnN&nnpwMB3NaIHu2A_%` z*2XNTERFH=g{K%8`+(kOI>5#;0KKGX$a}qq?fB;qnk@aqwy0geSDOu)n!b%t5UWA+ zn%6)+a(G_t-fK{MX=a;)qyJMAwa9-E`bI650~ zr7NbhZ_yBbKj$&NVb=jR_QT}rN~}D@;Vu_{h&oh9RV*dShK+nq(wa)bpXqQbDaX{$ z>@4VLA7$1K{7N(o)V zTKacl=ed{Q8t@e(%C5texo_xxelmK8H-OqlYf?#Fmi5(RadbyJJUw_4JSU$*)qtx? z7p)BBR|Zm7F?HO<&8Dv5D1gXHaMuikLc2`pI;jB3r!i>z zgch?xOy8b>wzIXkE*Cm;ak~tM9$m)`4+_vP{5KJ%5;!uq9}{u@B$ORhV^pIWa$6{a zuy0y9x{3zS@A+RS36x^#hGQ^IBa{_bJi)FrbhfzjI1HSi#n>2r1&stvyM%B>y%Aj!}Aie)k z24s!xaMo#>6W-OcPUTRl#$JB7fbW#DQugU%GkLvJS!8kdZL2J zEenyuEnuwIF?4DALWF;pV}<=&${EsP%=|{61O56-t(W81_rGyjn>v?v>oml#rB0Pe zDH!~m&IhK>N3~{c7U`Zo^!hIBxHJw9tvm=*+Am>R#|f0a-$#a$D==fs4^&%xSfm&< z2FefSVvXevgxl|7f7u^&`I~xJEVOV@-F@g3)WMwYFR-QLJ4Sd?j;y7C5kAhv%IznJ zhq4IyBV#GMiC$486G3`#E}7=m4~mvmf$U=&ZoI3*rG1Zw2!}uTb>2&KpEng82g^ug zsDO#3`D@;Y9;X!R1UBSb1KCnvhF#B0@l2W)C*L`X_{S}!JFPWX|7t6g{N10cE$Tz5 zm%b7=Y{2AO2O)cZC7NixgOK+$V-i-18vp&qu6^e*#jFm+6t@k?DBs3S>-&5X*#F#NLHsI7}>T_lm1PwI%F+4V#W|F+fw)ZEn(>D(#-@{nB zg}1VM)h1{?u?ljdDgTMgME>d%^+3ksHjV; zKc~u|KH2ncRQVQ@+(p`)bng%+FY6eD^$Sr|{TJ=h-^ev%D3_*M$R1LCz{HsX)H)`G zmOJ`PeabTGE%AXv!bVh?d!vRCgCk`s}t8v%% zPgur01FKQBXuR(>bnGn$qt$$_=J#hb-t-dOZBF41@(6-|4n*1SF_7-0&WykjSZ-Me z8$KL|COcy$K>Iheyp_|t+)%=A{!EeuiJ-P6jC616A^2$&v_5yjWqVgZ>7Hk-+9@Gc zJm|$0&N`gzGJ(>*?-*K9jQN4oFE~?^k?**TlG6+5ox2b_9EvHU6~HfgHfFZgLGcPH zNOkj-Nz2_(aOONZCOfeyzdT6I1?pPeKMd<~XnsEEJg#+k4}v3(=ts{r#UJKj=2~^A zmQKf%fVb$d>@*C#t;^J1PKV648R&MWAEPRpugo>t#YU?CgvYl1m>72rl!hp27wVBS zuX{Y$%u7YDdYao_vkuZnszAC(S6O*q2KC^Y{{1k_*;NU$pDSU&EGRf2ddj*+0W5^Uw%3{zzjV6;SFlXOg z%F~@dP9^`qDT-vsw0wf%L0{OSkrBxI_EH)CT$4$wIRPu<^*OJ3GOV6908+vnD7mk8= z&a_A7&_%Y*O``0z7Ez3GIputX!{Ws{TxrmE;N=HE@2k_`SThC&j_;w)t}WoG?hd98 z@=#t@%clHW$GVJr%Nk1*=w|f^t+*N-;ZFHkPyfZW3k*2x?I$omFcj=R8z67u3ly5k zz@@emeb-%wptMae`#eoaPFxQuM;5VWy`!LdVi9D+C@k4T*(|0&8K9b&GBknpc;gO= z^&z<9)(@DYWz30#Co9wDHo=4QUuoAngAKv?Z19{Uk~VEGEb-7|%6Z|)ySM89z2uoR z|I;z{As@epEulWm=2LRQaC!n8db=(G7#8DRPl)g-5tX6d)!+edA# z>UbXt*RO!Lj24$&unYtK`+=faf6$e059#M`(av2b>+;bK2e${%J@XhWzsuqId+MBQ zY8nc|H=~oYY|K8mm#~u{w8VfIcIw@l$TIv3#eDiWvnt2P#zfXcn_abQ7whUz-DI>`9l1TM3m*gZR z;>rXKCTA*(E|%>i-gg(KZS=;Nul-TunhaI57}3hP`kdDBvDj6WN8)qmf(Z^|9W|e_ z;{Khi?1UXweSbt5Aaf{BQJ3lYla8)En?d)*A5<+CiyU7NqEq(*-cCOQw#oE!U}cj? z;(7;dd+M?D$S7328p^u2m_x{@w=Dm^8^rs;3-p^pv)3O6z!b_<5I7H}?6aRDSNe=* zj#!UX77rlhxCu)0#jNP*5A+>A51q1C`F;?6v$d88tIo2kaYPE9Vb^(b3-+Yvp3Qs6WrV3McPzVVz0 z%4tsmY14EpU1`kX!~i%VHelr2^cV?a#O@eN9p{E^Ft1gEE7d#53XaNPyU`2k5cnG! zb&jz)BdhT<<(Db$91@wFutLzbr5VK>QSfxiSaA4<_8baDwTeXAhuuIVk4$fuKs0J-vDslUl&*s(n*vU% z_cpKT`Be-X)t`$roeOeV7MQd-vGV1E5Z7#h!b3%vEojE=gDIy+Xv|%i^%kQv5ORiI z#lDGI;2NY4Idm>mN>9?#e(@M@MKfb<*D-l@6*S;gM7qnawV*pn3u7Y9^dUZNO;C7x zHnwOPfvr(F@@w)*YmF01_wH7z#0yF7($8Q=DE}#E2`G-y4&aP&SZb-s+2;0=@144g zWs5Iv$;iR$w2NYgUkU@xI^bRrp~EG+XD~L*XtK4emNMwBltcU0Y*=wf8 z)%*y;<{%Bu>odJKe67WLKbo_6T>>6HXTkDM8^W13=wmY=YKtZpF!e6=(KMiH`c>A| z>oXSL{s`GaPl5EDI+pKOXM}1ww7<9m`lbZqMyHZfeUZL5o@o+Fzjf>n-4f2df%BFFln6H)& zb#m%C_oXbgLyOqV_w?T3{S3VC6ypDO<8iyL!Y5NbrtGguDCI?>aCoeUzsMa=ZKpk+ zwmax&>4jz)GFPO%t1PX&<|d*Pl^Y39$UT9{60-W3tWvV1VH?IIu;X$q#D< z@yNZR%Ap#NBzOm^f<)ryy$PpWDuRHvm7r=WQ~okkV~lK1AU`P#_Lb{!H7A;}_2x_T zf6W6+^UYYO^O9XVB7@$)Heu+lg*2m>gu?HMEDSu2#nVPYhh!W?Mo=GR!OdL$jdBb% zH^#2VwQStAALux44p#o|hM24Am`MMht*?#|ai2L`i8ct*4;V1x1r)nmL(R`m(Dd^- zh%$pw_;LbqpDcv^b>aAw_CQ_UG@->!T~3v1M6xH3LH9LUTx2}ud&DbHK^#P?XI`R^ z7cW>h9Tn8ypbR;Kw}dzRC0o3qAI{*qA%%9=L~H}vEE$P2j42nXnVvB#0-&@#fCQCj zLzM^=u$u>;<-)>PIO=d^o+SxR3IPo;{ZmfZ%7H0_E(vNe0 zr-PyLNf7#I4HWG^hGLC_%C*nG01nPVt+F`y+*A*h5wUET{v#}LO2x__MWp8RTM#dO z?`*Pq5nFtdcBx+mJINm`#l;GJ#&&ieSx~3V`03@N^nNkB{>|+L8XrT*qFMq>&*3q2w{B&?= zdP!H_e{ zpj@!hv(Rz13#95_P)n@{Tjoy0p6wlIa{LAn=+X@EF*_(sorJx=Z=vtSbr`hz73^x< z4axS;K=tJ<u5gqL}@bKnf;j9Yf`A4{{&*RCt%F&t6)Q!qh_~S*(G`!)Ngze>p!ff?}|*! z^!`QtCIFe(1-XO&Wdmw|f~UzfxOhgF3)guK@(-5?&)-QYqg_6K+C`J*$D?G1Klm+) zW~C(@$|ni(g!fZ*CX)Cb)U&={b9W3C(mU$66Hu7<5?8fQF6@9eD2{!|_HB*?N6%YC+GHvko7{#ehd+SX6UwNV zTZSR#2iW=vd$8$rIaY4uaNG|A&i7dp+FQRt8$LbvT>TGt4cFL|wF}uRA6`>8P9e6; z3By*eB(nPQ0Bp#A1*xsKp?KbHls{aLj_P-?_lhN!{(BWIHr|69zmj2wV>=peZ$*#C z-oPL7jihC+#F}HP!P?~!*t97_XOsT{f-OMh6S}RZK za_(QK`1ToRHx_|Hyp)xm=u`$@xJ;~-8JN580GjQ|C$%RG7-erZwgyxY>A?P^PRsEByNTV>D#%D_K15XUaRK;1}J5YPuBUlpS?BZqCV zkwEF{T$IKRW9zRKpwE9z$a^|Jud79q6F-h&J>vRt^%?&{F24bSUgdxFRs<`uY~06dl7<+82vU>;zXjU#^_^ z7xV}+yALHqoY@lR3MFB2Wik0F1; zUtqGR2dkc)2RHxcnBbg*79YC7y_qt~>obUW$!GMSJ8}(&)3|mx&0v0{4&aWnsOq*C zsRZeuc}Bn~9>t)wUm>pE9f7{J3*qzkL};~Cp{sT^DAJOMc;8?QT4)DF{*74qRR@o3 zG2kT0+lkj=ea`jJU(mRs4zk?~p#3}DS@`x>cK*%dJZ|TaqU$P5bD?~kVA^f_XCLjq zra(*j7$`W%?}_T*5HJ~9fKB)A zVP^Cg7d3{Kavov`Hr4?=vczGsvtu7#2?EZkpu+u2HIf9j1eMi;K9LL<2CEnsrYOE71nVCmIO>Bo zg8~=ub~=JKqvcji26HKPdp zO`>qMeG2}?8E_u`qT$BAcq~l6NiwaBn9ok7n3vMWfnZ14V1|-$OmQ1syAC-tccBB>7E%+QbWxBl|*G*+}o0Mk1qVdgrR` zfGMEM^r`DG$?{%Eo6?M`6K$dmpJ<*w?xaX){s`8-QDEaguQ1a(n$+6dhL{;V4E&-C zK5jCQzK>W~-4VP;%vDep8U)l{gxk-)+og3^xH#x`;H7KLn zf5X(Re2(9C8q5m239p*aodjj~nI1xZo;l7ajKq$r{&zW8G zx2MMR+*@QKb>}$f-2`{r1$fAd$7Jt*iI8*^8_m4H(aaEHGD_*JdNbN>XvWAYKge#@ zW>i_LN$KUkS*tr$SaZ1t3ZH$z&{*2td2|7DOC>;$^VLDnS38dydlR76=K>UNn1VcyudMC3kF3kpEVjCH23Au)eB`uw$hqoq zkhB-1V|$enuN=06G7+q61w=(?NF20quoqL;c{kJEFkT~&y}Ic}gjw1G&Iq-@R5HV_9M0R0m!*w`7# zDqCMd)$BwFOUr^(l)s>50QIg7E8d-3-^>TBA*i*_hZ;f?jlC=Tw&hFq9| zs*Yi7uY&;s*f@X#ilm;8s86)dh+Hu{7dzQU|r!hRH zRC^mF{uKp+j~CId{u-7lHHi5PV{Yu|s}P(kBLX!y=mH7|y0!u9KT`kF(zj^4tB}-QbYaO=p$cSlLw{lxQ^*kF=NUgY9Ze(F5bNne`KX;>HtHM(UEPUR-_^Ob z9nlbC;6#$`j5&>xLQqZ7Rqj#Cg;WQ6Pj?Gv{T5ZCnsR_LxNs;d-?IS(y8A&b(3E&s z=fJM=6?Fc57bX~<13%$P6gxI!l{NKBeZE6%|0*Qy>rzpl`ujZlD7RRU4Y{`8h-Vy~ zZ+bpv1!~{$vrYjPc{jmWMGr`iPK2&m*Xhn8LX^`jgDXpToZrrOY|k45PNmiZwY$1- zzkdk2l{BGfWdc4WNf>`8kh)XpeU@hljV~hE=(%c~NA_73C-1^#6}v&4jZr^J@;4R<0y1G|Lrw`5zqB zr_QyS*`sVwe~e|{q06^)q6k9pq+Iswpmsd8ybW5u#bfC10!YnKf<&;M#CyEMOuva> z?=k`w_tH#JWvw#p%u+gw2iS8LvG86V8#!5@leg!K?7Aq!uh<3>DHA;9*9vIwIe~{x zoB>{UcV36hJY9-Y!RMT)@Y)B0ID`UJ3c~M3n5! z0m;(KL@PrHnP*NA6FToOHFQB)&?~aP0kFIdy-!!Lm9Nn+{!5j?6Eu%s(zIPd9MX(rYxd!5S7qR7T z94^k}b6q~VjCfNV6pWFRN}7{W)~ADXq68$%^}uxOQSh2A;5>#Ulg1JT)jCe0?PWcX z?ikD#%H~1uK^bL5Q}4o@53F?9NR(#;D~p`zZmeS=zNj+dT1<|C-?|>KJx6z9|FEod z702dWd<#jDow#5cp9y?U^K&cHKrvtJBxFiR$FKg-P(+;w>!{!5$A4h9yqQGC1Vj7? zHLmiW7j=$hvGH~%(fZR>=sflVOdd25lZralX7x7+VZBJ{5p9^KOLI_TU$VJo^{!iB)gSO7+4bhg>^UMVZ;g}OJ5kTTYth%FlE_kr#+tPL5wN$!sIFoqZF8WB!J*Y(1!T^kXIMLUb@l#(@D9pj`C`#0T?~ z;=8)+EZS!aOsk>ZGO<&Z9_=^#9R#&s39QM>XGH9FkHmdYV|>@ygFL<1Ic1v_raZYz z{Ob<_FY2X8T(XSvEx!=Sn1$eV-yDOE{SAArzXX?%?fAbLxin}5rkA&1nw=+f9X^gD z=Ievv+!FLqrm_uPr6?;@k%Y!4FtLcTP(zNR>L_KA&C}xA$DgJ-s>PUAoCh{@8}N#G z9qLl%d{hO!Q(tQU-@(#x9-w>K0mLayA|V&>AYtQ!zNU`rZQS1_?PB1 zYy7sLP39s9Yzkp#zo~-kAi8&sTms8Ne}P6cy(3YWnZIo*259X=o>dFl2A84AGK-Xs zoD6~`l%wc*6_!|OFpH!;;P&+wxV2?b{=zTP`qc@rYb$tWm%_VAnv6-K1VaYz!Gtaq z&i}z@68Gvt%eXyg^`3Ty9~&s=Y3VZ+%dXqK}}l4*yh`a7F#+=Cr` zKA`%Uf@zhTXF!2Vf2rtIqkaOTL zPm@vWuZ1Q<90+fR6A4@wLZph3#Pw-6O4m5k%(aLW54yzGT=|6L9|1Spy&OF&zCy7$ z0|U24V8E?mQ1n5-6o1VGsl&glN#-Cndqp8ub(K*@Bjwf}$pe@m;3RWiLde{?7`eQZ6=4wy>#{GOvX5hNF7}=aoJ3F*jK3aiZvNG6E z8AClvK^VP*at`zt;VHw1;Am(}#11>C|I{Y0T9t^o7dz1bKH!!K5rWliiP#7#v}F5k;i zw;^D=A$R60z_f)Y34d(``7hp(N!dr=58tb>m9NG2d{*bQ5(*(;hc{Sl zKS;ANAM;A|O3-})0oFi+GmC9N>Ba*fpKQbhGNzIzTJ``e^;cnhejOX;MwtxG*Rk)=5jcF7#~i+-!EHR+3z-iy32#9qQHTqP$0eF| z?Nw)H9Mt7fp2=9XbLZHEB05jQORP@iBiv=T4oc6@!qjOmkSi5%CfpE|`O3TLd%<_J|F1?uWUxbZ16Cv&GL6;dA>BRQw#S zTv)Hm6kGkF4$M;AQj~$2_IJ=jG6a3%%0OT~8N{r~xJ&zxreAoB)Up&)ME{{e)(^I8zof3R z5)p6k%)HKHtynr?8<@oAfV{U0vqv-dxJQ%OLOqLq=3Us@p+*0$C5cqC0^`Is%4Hjc zyhC?MdE6oRQ^n(4%=~a5dkc^8|^yKj+X^y_TS-aL;?Jd0kQqr zV{CB!h`|{bsnfL#G5#EGs5_3ODc9M`9ao5LbO~vm+yl`SMoj3S)v#in0WD+~?ibK+rz@-$goE0bvqT)3N*s&k zV+3`F)d2M(b_El;l22z%^#2rFj2liq#|^V+zU$2-)U7dQw)~WVSkS_jSU&^zwUk?S zaIVr$)=3>;cTw^(3ev-_QMdCD@Hv@}eMNUcvf({R|L}r(VsDUD=^Tjb_}YC)0u4~H{uIBX{2ep`#KlCi{!wPavC7@0E0x%iq z2_ukp%!X!)ct;gIg~M_x6=crL=?zmxLU-vGr=Itxv>3aSIyv>#^$QbR|gc2$o}wt9g%1z}*4XUNnpJpmEb zG>2c1&&rZdfr%;$vwIq_=Fu0}A);BxoLf+d3byVnWzLh+)M3OzN5yuOeo7bV+dqaq zQ;&ee{sHkinhKW&^xkhZVR};ejz0A zDrJlrr-1Z8J8RSw4L%K;T&_tko7S$2y5+5KrCo(thBULZTMrfDfkgJ1GIDBu(~e&x zYZ|c_mNe-x3a=x|0N4cL=t@z2%Y0OD-bBmY3VkmH;%o=X_kJLT&vfS9z2pyxzota* zr@fFeqm+%@wgnyIZbImrSE!Xm9cvp4Kkjm zRcP|63j8 zR_6mT=|pPw%$2MxtZ1^c}@=48||R`+6YYH2%5RZgVU=&u;@z`G+g@yYhU+3 zR_!Nf(fb6#oLcxiryb*ElsEp8K8Ny`B*^O#SVt70R+|s>6sAMe_(Ck_jRUi>wlMP_ zntk(LM}!S}km6xOT+BAXh&o;Db&Ela-GWiIeCGa0W3KYoWbAlFodECqU~FtMK;;RX zolm(!-s{-0j(1^%HT46<{RFMq`r!RB4W@3>54Ml0r+xcD#IZ7i2nQ-KhRXrzLTmPXRX@(&!~%nEe}$6<(O4@y zN#7wxOeO8DSqv$GuGVT&H|Q!T`>S&nPZS`n?PB*od4R=sU$HRy1v~T%<$8{eWqZG9 z!o;_ROw6l`psJ_=`CiI!8AdbBZEYZ8bh)M_{g}e!JJ8>j@+udvgx={7(L^jqtr}-g z%{?a)JHCKeZZ$~j={x@0L!zHR8BT?*N{efGC^H-lh3~RKuuui-r8-QmYm;;Q*aI;8 z_ALzXvBUCbR@5IpocdrFg2XY#ucG5EYZ9610l9Cw zmH(aNaSg|)FK5~mw4v|5*4r0ILD&UQ8P$Q6;3m;fZjL5P%!i| zSZKvT|KmJP=i&<}e>WS0B2J;G>KC^D7(@FG3gZ5H5oIm9vvS`vO0(F7Ag!oW3Ux-Z z3B~%%hE94O?HmiK*YD9g!9ft^wNTgbBQ|Faoe^KShp7^Jc2G+sjX`zvTpuGc9kvJx zc$(mNxIeCrG=R>|yVw|ije0|#qpHe*gj8f;Z|^&B-QADt^rgF^4_3r!o`A7CS%L-5 z*4Ut+zbodwu>W>A%8$#4$xCCjnQ;VsC;kE99Qr&~)Dn-M=TV~Xh{M+DGcH;*!*%Bo z3Ak#&2vu|0`YJb^rntZ+eCUO4TM0UjzX4NduS3ySrR+2cgZ_8*xBxwKjB&EYR!0e| zwx0g(&HFA2yLtp|b@Xv;-6yCNon;fwbVFM47$|hjLs!XtNTDn>zpmX_+^s-|>N?0- z^bv9o@5cD^#So&hBf+-@V(X?3P-@hn{HQr8-qs5FP1K2ar44%~{{-n@|G{~w4#$mG z$2)TG9*+s2=Rf7=cGyhWtT79;nEY9N z@VrHf+v6Ap{G=#0Q9KwO4$;hsei4b#h=J^PCYU+j9$g1XaCUYu)~xvjxjyfeewQYo z>fsT#rScWX4lbb{wg{{`ejKAr;!tihO6fM7x(X-on68o|Y~xok)ExAMUA3(!d~u7! zm^h&9NFd}6IUuSS`5c0u>5zc?t6}MWEiSN!hxSFgp|7tKZ*>_kUH`aZSaJ=-4R{EF zud6ZT!*C*wI)~oAP1t>UJ8s)1hsxp}67}ajxHYRW(*ENyPOlbhSFJ@!of#_~VvXov z4W+IrY>rnvH12$WzbPZefBFu(?@B_iZ7$$`PY-PWkD_z&i!p!Sc+=T*7VFeHWgKE- zg=(Jrp_Uwy771%dghbLJHeVY`Qj*A^B&}qWD3VO(b3ZA`VIJn^1FY3 zfY-}R^E{vXb6?l>zNDMLvYNPYcE^~!!k8)enIQXeg+(R@vxs>STyaV|7xDs`cd;R8 z2VCWJ`%hpi)FJjPfOTWc`Q*emkTlR-Ft;d#;`-BEDjwp(Ri^l~DHEL>#-q!$w_L)R zyPzJfhfAJ4!QRwCpdGs$z1IDQUABh8qPRg=ShdbI9tAUVTY00-F{69mv7Ce`@Nf$jlhgH4z00g&U zfPVv;tY(lqU^lnOM*(4Tu7N7YhL``;s%qN*3G}i|P?9M~)w#sa%kqI#B%kjG;v!DB z#K=P&Q=Cwv&%j|2Aj|>F&H#{C@1$O_9zs!b7s|4pvC`Rbpsc>eB2ErP=RUV7Yxxu^ zkF?HAEfGqGVt$VVpP0=kUp1pp^4Cylc#A&YH1->1(tfbCzvFOY`(5&Iek1Qr zBb;=nzG;4tN})F5H%vQ$jRUJ7t)d=k!9sW-w&Z0LU0ytM8Ll*?o!#Z#%%C(kY`A-vmX}HPyj^Bt@ z^qXkgu?Qo~P1yB0#AJ{jgN8`rqt^_^_`3`&ll38Lz-t^sIau+qr5NY^j#ZAlMXcY| zC^K1$Vfqpb8GZ?@H|$3}he%XF9`??EgWg9^Vd|$WF8lFb2wBj?DrZ~+oudQmTS3{> zG5auJQ3Pi@sF>*y$0jJL0YdtgakhwFqQO_p{X+)fw})Vh+JYd^GGx{s1m%-QaWjE=Zo3 zV(@nxq3g_8kSmXIA)JegGEM?=Uo)o7DV7>py@s0eMncnn>uC3`50Zb2q=C1dV)ltZ z7L-GBm53?VOy&ZP4#mdL+2Gg5oOc!p80U8u0xuW~=O?{~ z>fyUEao$7pnLUs89P6aN%(meZ4mRV|gg9*KlZchB&v0s87yi&tpQw>LSl&5@COhIl z_FE=8Jh21Yo}19_AIiGl3n)zWAdkS_uPCa|zN$ELhG}EEQ9f9v3P^vCwzF!u?DvN7 zx38WMqR@v_(uZgZqtazxWUWPya9WBLpE-r*_jONU=MqDH_i#&LS(Y_FxX*sb9`F@? zw-f;KRk&%jfnfV=Jf8f8=5nuROwuuwn@sKpT%rc^%uCq3JPpHTr=k4XTkLvbf?k0!5FXJ5w#`-8HmN^| z%G*)g6pEt3)3M~$J0^McC#O9?`=y2XYy@dm+eTFoQz1~=CFzUt?joUl+EeU2Q41k{ z=T%9D`m~SgC3Sr40qxsO1((PlkUG7d%N=?dn)0h~-nty{aq5HIb{iqKhcU1BN-Wh? z*<&}I1z%hTVE;z3ptCu}g?18a?Vs1AVa&$G-@-Ay@+o|gP!4l%61MxqV#QD`;)zl) z$+-mEGO{r=vWFmAd|u^m(h8E+G*!XI78K1KbJcUS3|yXV0LgDN*4S0*Pvc2H8?f^tZbYQgO1qi6C z;sX;`pl&gW3)# z{QAk&Zzn_Z+<1(~6CjG6Fa7y6{XG|qU~NY}LB;tp*da|sm)d%+qmlLv!^sDCIo?Hl zCW$Q?PV>yaqqzc$2e4;@HSgS=!8PBkMzxbZ=fm5>+)Yt9y;zUebeE&#n-S-;rh*MN z>`6QNshDu&2gIJy=bcOnv0Eh)oa-u>p4d|5N9%y>dl|4{Um`mERf%I(lu|wjahy|6 ze#B}^L96?NW&T1QB>z@!KkbWliLC`saVL!ZOl;!M`n-mvfaiwy%yC5)b1365`)3i@ z{yPSZQfY4JM_izj32^JtRaDf7hy!v3^&Sul$z=yK+BXdyj~@i(o!?m5lm=`X7zX9b z4fr3mA|b{!2P9LMavopyVN)q}tj|is>M>6-e>0t>tPVW1K{#TBjZo0%6`5jb*J=F* zx2-G!ucl9MQb+u>Y2GZd=SB2N%Y~-hg_u9;AUaLYf{4w=f-Usr($kiK;!`$!Iei5~ z(+`0Bi5mm^3OEvDA_P<%(-EMGhba?27p@s-}x>2OFogS76EMuz-Iz6-hrO#|nO za`bp@jqdzc_x#h#p>>tS^K)J5Gn#LoFXoi%w7JCm&k&*$Y= zWfwY`c5N3I5#56rK0iw`6G6=Aui800p*OlEOqtYH0uzvYtz3f$@Igj=_WRUy2Fr(ODe!& zz({NfmZ4>ySV)R`1d_#bx$Kl2sGj_UI%PFDaUpp!y@H`n{w?$zI24jj{tCZ$8wnTl zlTbWs3Kq1dfizMLo!1XzsN)If55y<_vQzdBi;5IDvxAWpj5b@W5kBe(}T=aERU! z0kLmc;!*_vYlV0u_dV`-nG71&drTRrx#~GT%vHYl1E;$<54Vv{rm#Ffow*GxZc7|D zm3o5)CyS^n^A+UQ&V`_Fz0j}dKa`2RILW%3;M8j;eTTWw(%&Ceo=S%YZ%;xr#Wf2m zDj;iS8y0zgf`*6Gdqv*Gn2~z{<~~O;y*pFa#KWW~`ofHXC&0gnI>FE1gtDzskX^eQ zKV13*ZOw}@j0w`Y?lCyP-P97l)8XCqa1!J;lVgIQIpd510xSV95t$UB=ukbtqaF$U>fY zOYQxiLYKKIxH|@dcj#waE;@v9qPtx5yaBlEumxXVZo}v6N~r%?&N=fv*}!{mLGu%b zA=e6-eCi4`NP37FKVQa-;Bye-F_Z2P0w?q6VwzK5;in1{ev9KB}sK6+l<>1&rHP%AH^R3GK$s!c5wccuK)FeD?*|Nx!>IlP!6l zRyvdJ&cx_cAF$Wmgh+c}-n%y9mmm>{E@rD1?^_DB?v%?dY)9G8^O;<|k@F#6ytz0A zjbg-nVNa1D4;mp2+T09YFAx-UdP38RyW~wLKKYb<>5(*RL2K8{j5Y(L?z&Bx=L8JV zUgET30}TBHQ0@31nykNp!NEDiP&~}?8-K_2QEPCF?iH5ZSEG$hE?i47;yo>AL1lv$ zoTdsmZ|-B1tw82s84R+D8QkQOCphG39(BN2amyE+#@VE$#Ke|juSQ}*%^8eACy%2C zd2A$ocd5kF)huka5o`@_#`xW_FmF3`&z*Ba(emqsDYNUKYWF^Pm}$muenUY+>uE|!dbObpcqF2ke+OZ9Rom$4P&B16GXP(k|GTX0}K@v=?^ zG25!=Og6KMbFO}Z0p6Zyx2_*-sEbG0R!gpBmki&)F zEl7U1l$INuhsZx?p}N(Ft!$-?m2L^UeBVa!AO@air5XO{Ddu~hrY?++)sTAZUnZJ% zQ|i`7kFvS?Xk-2z^&IWdDRmsirKCZwbsSslWh9hk-bTp`Cz!a7IHgM`u)h6Tapf}d zYxH)9-^^|Ju+T&JXSX#k@4HO3+`v*$hfTzQQ%{-G@dJ?1{|e-gcI(-ihcj2dMzbMi ze07uteE&)XzRrNRo7DpqmzHoggD-;kK?nEawgDfsdlI%@>H^7n;>FG$o zEih_0OWH+GOt?r~r9|outQ&`^+io-GJI7FSE*Hx5H^JSrCj7~Z8E`Q1F!4lUxmMSg zX#0Y^HWrIO(>M=&#=ql4lV@^1{v#no9jVgg=0Jvi8Z?!j0cB0PN@q{L-eH*iXtoYO)w#0YqjS7!+rtX-}+|OJBM_n&eI#sChJHJEd2|H}tItKgwY7aBTvpc9e6NA+ciIdrHB>eruP*BqO5}hQWoYYg4y-Vc=Z*V}PmN58dfj;l> zY#HVrTnOH)CPRhSIZP=!jF|>$IFEFS%9jki#&%)V*vrI_^x#a0ElSIG$Q#{>6)AJ^ zyiN|X=d+khk3Iv<><+88qGt{dsdryP$mo|~`{)ugd6)vSoWaad(Zxk`)5s@f$(K%j z4!TEgP&$J2u10gAYLN+_x4sB-T~9KjoZ+NF(JXuPJbLvt6m)fKAcCH+$U7TZnkFBD zrX7b{{y?d|BgtqwO=zU6z0XjFDN3TGOBmo}eP)>F6JLb7(vMPOwH^{!mg6^4^ zD@{{C^`^@Z`tc2P?YRV122at;gP4KY15x|Am#Z`R)Fr2Dq~-q-^I(37OJUU~*HBe5 zG1)5^ZjfUBh~C)yYb(U%9>Ms{>DYA09a5aFh3YHhqssaS4!4ejd`36;J>&nNI;am!Ag^Fr|8|II?a8+J zd_|WIJ$`p=523~PBzR8GqxZB7MT#0GAJ7wudu4#Lr=0zlK&;2}4EkCH5i6!dn!V~3 zL=SEPPyg)@H{o}57!w7F6DV`6?1$Ul)}#BI4$SRQ$c6kPa&^y=XBQa#@E#uqqXBoah^`AoZQJ31VeV8+?ckly7DsafAK?ABe@(t3#QBYn7# zkq(@;lt+>3nlwC@m{!w{vZ__bG4IJsuzYe4VscfKv7E_WdTz}RY<~w`Qv{TizhRbz zM=>dleoH+S$}fE%;7Wshh-|6B5c4Q0URj6oj(w_qPw3ywi$G6HV}4PD9u}?ogvwQe zFzszQ$Tyy5k!8zSp!OK$>qgkf_};^j5eY2L7j8Qgt@))JZ* z>)r6wBjRGNz0Y+nF&94Mkx%0a%}sxVphTZKfFFG#@4dC)zNnt%89W8^hEANgeGf|d zC0Rey#FW0&-`K>u7Zy@?`ySMN=)>9enMpI08x)Or4RIYUEO-q0 zro;AQ$UdK|1J8ZLs#_n4*(W8YxeT&@qkMR9Am!yIbMoUR;QhuLQm#isWbh{z>0Zuo zRt@~nSPM&Hh-)PmxMstHpjA}D%7R^x?Jp3!%apm_FJX?}G0bR+86@>1p2J455SD)o z<*mt>YI&S1`xu3y8h@-_W5pZI6r*R)n5&AQQ*2lYeRrqcq1e8di)_*`%Te24>OT?C z{va8oHz?D(!WnBj4q}nI5U>U@zxU5jvCtU&+|T18V>3QeUk=&E`50F`1zlFIfw1n& zF!Pr;P&%8s8A7jua)?x=eWYTlTj~OVp*j!CGHjMU|6_}bEjb5#> z2tAh4`E!j){EAd|^qdF3HsjN7-Ng?s?;(HnE07JG2f>QtV1G`F$~E^uRFlJWf|$u? zeZ>kN3tn6Hj0^gKkm|e|W>OED;uj5lXv+y!jv_wMwcPlvY)>s%q{F7YEbyZhD=7sn&bF0 zWyHk4-e#R{#53OU0kXFqrEb|k>S=t$WG>lI{xTQ4`^rJR;R5Sg{22q{_cGnmTG*;; zMDszPp-kCBh<_PHI&L(#K{$;kBL>2baTR!5+=9w=d!$AuJkdOeert~BR1GhG!G=Ri zphWqFlf|6JlK$mry@UFyZ)Bs2a!&WQneZi&22K%V%h|pN!jLPp6MAdIFV@q6({S4L zgj7N-`Cmi44RDG5Rj8jd5;Nag3nC9s<{rU=`<^<^qaXuoU-jU_$3CTf^d5{DXv@0N zwxCyO0cz%!ve;{_sQln9ZTxK)-tPYieKvi>+NNe`qPx-L@-|`%3Md)55+Y0HGDWrp zI%^Zq*;dTO)s4pdL_Hznsty;*$q#VU9|Lafg`}n#kTCoN%75HuUAvEiQ4H5CSSxsOtvIE1*KEUMIEc|%g zl$WKPWtNM@yyQqF(@vW2(m#(FI;!2=#TjP8xz8fr=u`%VBnP#koi`LwjR4Nh4g20MJ}w-kUn(50~I%xGH?Awn7n#3OrHM`cU(9D zN=O?J$N${$Vs{nv!<|YPNKG> z-nTloB;qD&FP1^v_7n&mAm&57-%2MBJ4`vfPS*AC475!7h8cUJ;mDaM#PcsfuO}}+ zzR3pXCBFm9om0T>@0qZ%<^ptW--z;od!-2!gA4Lo0h9e2pjkf&9KT(q9?zAo{i9w` zPUID~4EU2+puaP}8xNp)ZV3#IIs#s)pV4xYJ-xbP5!x}F62 z?HX52QY!dH-ACohB<37B06RbDlizp?-nRaRO|vN6Xoi7o6UbWFq><)BSG=occ82QbHVdSYWT>i*{I#?!x zQ@s&-k{@d7g=kn={v1~q)7hg(+q${*ELsHM%xA4w(D@pwmc~Qn=#LOa-5w{&U;m-& z8&;iYhX?dd@?Y>b_*bmMq?=|!xmO94R@}vymJ7sKwZ!$Av}-Q>37kVcILVntso$gf z5M+24lNRbxzqN?hr4+Mp{mH~45wsM8*Ls`fq(q59u?|86=D-(x;Mb!W6M%S4- zjqc(Uwa4; z?t!7NDKm1n9)5kChqkqwF`y-a%b!l~u&Wnb)N>|4@#8e~ZZ3x-1>`H9+6KClJ%|r` zhQ;0>Hq@5g*f#SC$^w635%V*c*%dlxZdYUWl70AYn-L#y`~;`CYR*bOoAKHc0x?H- zpwBurjQHOZ^b9JN79My8g*lDV$tP-{D)KahKI5T!8STRc%;XNzJZPWVLulS!jdH^z z7f0niZqYqYEIvEOY1cnCojp~W3cBz)kXoF< zCDaeYzLVNeGH0f=d`K&LJgb8d8xA0nHWpfAA^2r~B43adlh+B{Kzp$ex#u?XIiAJD zHOFwA-AOvnh{OMj2`?GH6IWF}>Y*MCH!nr`?!{b4U6NEb z?=W};Um>09o-~rUR?gQaGl?C?BGdhucEvg-+jtsf?~igm0h>W@CcTfYbZ{wpX;8Sl zm2-^z3Iuco&pG{A;rFGW?_j`>y{#|&Lw`@b;1jBXTl6`;BYGPC51iwkLi5Bb(xD@m zr++DP`tx^U3=@k0hG6s`&aj5`((-_MI79v$eCvjx8X1J7skm()4nRubJ18o9ghr!x zVgB20wACNX;WBR&oi0+f-6_E_NjG54pJILy5d}6@Dlu}T6KwtVfio$)iMHC|plTC z^nHmN9-8nW&INF*-&5?aB+k;1I~evNiYu|10EMmky!+QJ5Lj`J7>o?h5Bx;zn=MdC zy*;g)OQCpAZ&*Ldg4Zl*<~pBzgbn`~3wGl3(0_tNz*-!*^Q37UT=jd)9-I<|Ul8JdIw{ zpHSD{6^x8~gYv$V#n_yU<93{e_w@bCno60w^amENG=de?w70U3Cw@r=Cs%I8qr3&* zJ}MSvqJe1IVk&H*oI-^yG0pNOsXSNoy*ffI;^Wtw@tS}XRT|f^%^eXOzAVjgE`>p#Sor!pRh(Z=T0^*Jj8~ ztzpXib1e7cD~Phof<Cd81?lh`b*P+w8N@?d@Rby*&dayfGE}XJ>uFELDZ zo6{`pg1pI3A*$s*W^J!Uttst$L%wr%OD{0(XnjykxPbn|%e1|El#3E2V_>2Z16;hJ z#Naaf`LemN{sr9|+!ja`lmU7(!%%3*_=o{1EhsTx4nFEn%uzH0^-}bhk5eUSx|>0x zT8wtCry%>tDilSY<6 zMo+i*s`TU<=opPIq!^ z#NwwXppV{MtlK0O;xZq>kdYgZ813X8S&olSSqlz3?a_8=JZEP#g1n}o^c;-9!V52@ zih^!->rDkzjXI3>KOcY*Yj=Zl)fv#XcS@a>q@iqn2x9y?oT#Qf@40Fm5_<`aY95lG z*@!w$fb-Ecak8W{)UUJ{eIM;X=ffH>&&@~Ye@wV}4USSoGo-2z(jSw)Xp z3vk02O{cb_kzxWSzy5#{vxU&$(101{yP(L9GQ$X|!fQ~!CW(4~-N1h8Gq5~<52LEhgkElje8|f$Y}_E~k+gYCyn!{U^7)7GecpcZ ztj06<&q~O9sKmb8zrq&3QcUQ-9E#$IlOVAY6tDET+PTjlb?#17d!5JZ>9wH!Tt(%F zgR#H%8rXXEf|Q%hkZ{lzzMt0@l9%f9o~3)aOAk!>MCEgAYmj2xzbTOEl1X>Za&UV1 z4;Xy8gsFORh|8iJ0-Xhs8e~Sp@=&vC638}qaGqvwnL#tLslQfY zWN{d$|3>q=k2mPvsG#!@Ws>LQfFiLSCVN@%A@dG%?pxmDua^i z_^fK{s!lE`o;>SvOHOZd45y@d!EetV$|yIngum6;?U9J-+kLR)o0{cLxx}=0FRLon zdO&y=k4-!Gg2UAq@bo3GV#RJQGbxjL;75Sx_E7G`bW>imvYzvN8s@6_D90Onj0HW} zNKDqyOk6h)IvezO_n=a?kzUn{7P(=*x(ySOH(}B?Gk!)8!f(6H`RghXA2De+^LX@- z_7X+tIo?{8cq13Z?S{h4`Hi@8cp7YtSq1T9K0u_<#^pNQVXfQ6ykkZU%b$J_bjL=s zg-O1H?T(gW?}uod3oR5ZB@W$p&4R8Dh$(%t}J%tUgScYRIRK^MJXEW7N~( zCUsJLr;dnkDwh#Dh*xLAz`t*T=nK!#w+#YX4xpVT8Y@ERw_(u1S$+&fUA`fUmAu7H zj{KR%tKx(W5b&|c|3Pv5k^cF3g54jKW??;(@#!IQ)ks2JDU@Yi@ zeg{44V{oOvDKGisHB0SrgbUZsM*Fs_==f_Q%JBhb-(wf~XX+vO-DYe$SHy;$vJ^Bw zY5sr5z|!A&fLXYa;C0|BTs&kW)bG^;+xAPWEsDpWdPCv+1CfyR_9bQaq%Sy5i{~0j?vU3f-8Jk*C3thY zplSIRP-;d>KZuGTH`xb2c)WnzXOm%#H{A)!h=(@B5@bi*nR(A|pux`&c}2&vMLy`c zwVrb!ADQj`TC@xNALVug*DXUYV}jj8Ft@J7(!&}w(isVz#mQ*tb{4Z1b>L!}G0i16 z=yy7oGK)(=F>xB|o>xKtp8KF-Nuxb6wbLIIV;?a2*WIeJ zOP^4CcR6RbdLtN3I11k)t%TZBDzIM>kGch)ng80Q(2zR`jaL~7x(N=PV#jXK-Ft&W z7UrYd`JW;8+kK{8vlg5uq(S$_G>q70#>SW(N72sN4Et7sbN)o;9Ug##AN7NM7G}KN zfQuNt^BagYr&$Hv0biZaE3XZ=_ND&Yb$Wc)v2(bkSODE3fr*kHs^pukpi!SXp+3LIL+Qj|?zIPk zC1)URLnOClzW@W%<&ZG=H(a^A7@Tex;qn86P?C2MEMFQ5vI#|;&kSM^w53TyI<+cy z$N!un2J6TE`s7 z%MyQ(?yJAvM$got+*4ojdQSW%{ZeHv_?_%T|4GXs5{fvx1T~Ai`;h6(*Pz4l=@?dX zhNaN%qAk!5J*~S{_S4cqDGZm|o>8!_&@im>+zWQ$)okE#VyqR^sm3g81!c)$X_fv# zbo<2~r;b#h^V}(*?&yZ-_raJe$%E={4J7=s47&E)qTTROkUsuAxFiru^lm?<4qwUI zG*YPcUWD?0pGZ@;ZiDDg$I!UTOmNJ62T@lLoR{Z7sQxz~Koa_A+$Z+T3eI+JJ7%K| z1e=Kk<@4jL|BNS)bm9Uk7hl7hg>+}w^c+C67nFY|LGh(N)E#+(1yHxc#@uTln{UJ4 zvdqMdliTQS(}YHUU4q=Az2K=hgT|e-EAydj$dd2z(p-TNc)?g$#d0QsQj^8)q9Q_CzgqX2gf7Ohwa1P^lbz*tbqjol^}Vu zn1#0a!4beS*ECqSQ1o$wb_^zKH-$wfHNx)nt4IaT%h2+$3nek9ju zko+bDiN zn{G+P21Tr7@LV)H)Q7YT2QFc_9OdulFvWnIC?6Jq?)txgeC`FQ?0XS}TwBljr+vVR zkt@-#)KqwS)>K$-cm!p8Yq4biB`(CgglQI1K6UA7Rfg*x+&Q=hUmNljT(q0nUcfeHoFL-K4-WZ9 zwqcm58OUC3;6mCAxuQ-1E9o3Cv@jADC6?fkV9H$Nyk!k#lh8p&cO~ZvPS^Y^Xuj@Z zANWSB=wAeDUWxceo+3f*JQK?nRuJPh16nQwLx|B>5KYTdrQf+jxs%xc>`rSn)kcD7Ta!xrm9h|D_pt!-N!jjx&oWDG1mA@HP!ZFP6@&MKHYLI}cU~s<<-iRv zD)|Z--yWj6VheY0n4B_ZKAie!Z`=}1dH&gsuz7{PAo;IdwP>`8e%Ik}qMG=;{b{e& z8cTUz6W(C+C-RXW!>CI7J^iNOw6As(+{BO#ch`K*o<qq(R)?i@w3c_BdO^Rlax*s@NQq2VR#3zBcA_|Gf{1AF6P% zya97QnhQN&i}|+m7Q(<^KNGv@IccY6IB;bh!o}JjVCx^veu~r= z?E6{?NruEXu#Ulb6TiTa^BX|<{0gW3@{UvO$-xr0t6brhzf}`^QZ}qQ2XkcBg7;<> zXiuL~`SunIj>WG)*WkkC79Qp@wdBv8un9`H(0u6>f=9wrAh+!x7ANka{^Vm|YyAtG z{Ja$lubB!Zn;0`XOnir_M>*ZN>F84uh^icGAt>xB?0zibr8cB>bjCyb2+HY9a$wPi zeK2>mBlb4DMeG-2Vta;gu`>+$jGwQ9bLnnQdomO3|GEI;{wDZft));hEf8%CKf(8j zmV$1c1*!ukgY%pdT$SoJ79}y9u-Qz={l}C~(o6zCFYvxh8EtJb>$8^nh5DB>o7Q4F zkM2Ss6vGzUYq=Sazd7X%bDW$7V$0LaePSL9&sIQUZyjnMR=~_Vw8yL8gSn~WQL4C$ zqRoFXqhnRXRwQlr3vv2c9B_-Q1ly8gl$|5K>ghIJ_2D%!MbF))yoQC)dgKW>em%mixm1ITKWv4=1{Q+i=1o?t=}$SO zS}c2;2s-$i`6zwCVaE!L{OH9LW2WL5>pYC`IL=qaci}nC~@i7o0jYq@NiVmC{*$Mf}W`XF^1=NarOIy|qfP%g^p>cpYuU=@u#f9f# z(7iTvl3gPH{6Z}7qqA5O$I5atP#y7x+njGEq;(YH)Q~7TflIJ+&N*nE_Z}n{7E0sR zlK<=aBnKpm}hYi>xVSBE2!-IN&}8N%Bx8y}^}wUPPbC!_jg|8)#-<AKpiS8wixlU zYX-bomPk-k59i7@P5}KyVqX1`7*6`DS=jqSoFa)hQdSGlH~A&~{fsa-%Li=rZg3G> zEK&SA7-chM7?Rfw?pMm#lE7-}K#m5kr?uc0)Pa%1oH+S633~r96|(p3!ptXSC~5Fu zsoWP2}Iq2`O9AAy z*)H*AT5+k=U^3;r7XHqCarZ!7RTR@rUyGh2u1npta?}kJgJQfhEK9c#-es5ws(DQq z*Hy}Fe?JE;KNf?e`yIE&+)4m9QoGHO%-;%$F~HivjYbsDA$mi(|}%xT+waj4V=a0sFkpqMXromCq9$ z_!LcrPv6DD|8v?EKbLbw4VL`Vt|V|al(V)aIq2R+jN&>u?QblFj_wWUX>>s9)l!by z!KaynZ9h%u-OZlSxUh6}5Rg(~7>M9+SSc?-1E!959ciHmppk6I{tXDZw} zX3jhDu~^+m=Yny&)N^AFdhq?AvJSC+kBG;KpCRlv>GJglz;=~Ai%qr`{+>+wPv;iO zCY{A4da>AY))xnQ#8E%VAg=z_0d(7bm}|@@CyB{hY^tGNiMx(?bf-ud_)-C$dxom2 z{mQ7n*<9#9CKUpe$1&dXC3Jk$anV96Mw1so=Vm5!bDtnmHjxGR66?6821eLj18v7# z_Mw#g?HfuVNt}tw6_2Ey>U$6wu#K{3ZLnDJ0uq*&fNxq9bQlqfsM}b$9r1$hZ}b)` zf5+LatKrVit;0B9IX5vN9<|qFT|3nY_{qaYNbUGUdfYfwRYe+DZnPE@KV9I|(|>~! zi)q-{;|gXQu7c*NBEeBL2Ub2h50b5qIkoOHi>oVTxVMusU7?+wK-(R?Hie-Lz&WeV3i4rkg@qe7vX;=Nn6#F>^$ibEZ?FZr#bu#vNjRG~ zHU}kBHp8lElnEk_to^hy>Zi_RcD56G(O=Te=rlAYjc>Sz*Xh=$1dV*)Pm)=&6K;Sgpj4z zRTF=1Lc5Ur?8xFX=vErfN#bX-wn0--c7U=)KT@g7ArQC9{(^0G7DC*Rd*}&)*k}8E zaJkf=9v{Uxr%cne+ z^63ZlXJ>)pqzflGcHpYG;~~Dkbpp~BJw^A0EzIPI95w{)qwg{X9rFCq$9E@`*c6~% zv=NkT?!nvDJm+GL8}s572U*3}lZbaW!^k9aLA9j|8>>H~{9Yhr8vTok2R#I5i+yl> zixq!MMmZUA7OMX(fJ2-cQ4~?Q#+Y$*bwOdwjFIHN|tN877S{yknW^G#SVKo z|KKDJi=~--_&ulyUP9ife;_oa8cZhW^YWAumB=apA7t)=w(8NSR7kI8fBAu)b^YP| zK{*6`uoRqo4@ZLq`@!zu2J#tpFiEhKPVvhuY||-_?fnarzRkvpszL~oQ|DZsRvNO| zo~fnVP-EXinEE^s4!k$wD~~_HBuvCFcg46Q@g@}4_J-y;dx`0@0Mg?sz()4~DC90U#*JOjLr_k=&2_yo!~XX! zL%wDOE}mc{g#0Oo;7S3HWF5wa+zvDidWm+uqi{$25wxK!i~r?vgw6yQEIm#8)C#u4 ziM*h9h!Zt*KFAX$OM6dIfP)^r|4PzTMuCGt+^Xeb$qOXD8Ou#`?#ZjG`(Y{hk|#!e zhNSb6kPtVKc)$hHkl0?}d#@ThO-u!Yu}zq}kF=V5ju0^A0aq4v9ldJ{Ks-gCZ+23W zFYI5g*~N<2zG_qzn!RSa0vU$B?}lazL*9{mzw@7q_(dNNU{gOgNUr~aDfF2_Pb`PH z2bI{T))h7XljF84*U_=ZJyx~lJ*+=z%}3npi~c9V(I&Y7a(|nT%2~eb7WD-CREz-3 zIR;ZpiZGN91^M1dY;3%VaGBU(nl~>w@swAj6K#afo`n!Sje1>b`$8~vrMh(fjhgL6 z%y~a>wYT1*zx@vUu%nIUuZ6h$Cp*gP-@MxOv>a?NPh$K2FyKqClP`O#6(8LnQEPaf z({+Djijm7%%4G!4Cqucoj%g4l83F^HZepMHCPGMKiz>uwKL@YI!x+<2%Gm1*AsGYE z?pX@wK5Qb299BtdNz*ghUWci3A2CJBNmeqWmGU}rw#8Bl-p6{vV{HQ(G!{U5AVQz- zm+{JEYkraOSX5-BQjWq@5DlLQT9>O#5+|T&xK=7U@SjSfOh@^G6sD`9UN=9wTUzYG zt_??E%}ers_kV=mz4t>?K|FPJB~$-#uR__$S_rwlhI6`~M;x55oXN``d?#OxZvV=VwL z_7H-fv_jf#9aO!OLvi*&XtN#-vYolynp;-<=I1?xQnPp9xc@9<=}d*pZPtSCl_>Oy zyUs#8Ly2uyhw2fDpuW5eMk>F8y}bsck4fKmh{hdX{sW_TXHcGP$T_=RWWkaGXo&fQ zUE}|zJlkN(+xMmCb|kFgZTLqo#Qan8Q8o8w=us-)DGy7c2UNe;gZ(| ze93+hNVt*VnrWMrl%pkD604w?|gwW~KVXl|YA|r^&KT?FZmeoKG?FI@ntcCus z%>`|;j!C?9oOt6P3>;KQ{pYmrz0*ichBsh#ko>Tt(De>oR=NOarl~ zoQ3DEMflu}L6ldhE3@IXqfVQ{gz0McUzpyG!IC@=3vS;hfqv;9o`>bp$VFc^f%Xg)oL3w3=f^+8(s6Cz-Sh_5{bD1CJe{Q<0*Vpz zjzfY$Gwosj0*WzWam4}VdE%`~0YTVy_YJnz)icik_z&UA6NMx)!9I!q8Z67}6pGJ5e$xnOg>uSp$?a z^5ei`-8Gz=be8;`>uA0m$o(HhXC6>v`o-~X`zEC^SsEi;OOhq2?sHxwS*~OWSz?4R z$u%@&2_=anB`HaSiX=B(czNE^gfOw=RS&2=m~M$hh@ zz0hyU4}f8Zq3K%!PCj=O!$U6c-)`s%?#Fe7;M-*o`lX!sz1djZCyMq}KOpXI21G87 z1dGb9qGD8(OtQ9FrMSPCf4rb6Y))JY9+kvZn5QRfj@}CDt&Y^^7>His+CuP&67;m9 zv)I-J+%_}>Tmy8V;K6ycIA@NMG545@w43M>wHK;?HNw`GNBA%!44%fXz|DKAA^o`< zCd9Wx_jw;cV#%1V+a8PwdkYQsT)S6C{Kv*nbe2*6P;$-5@JJvW zoYqA!Yj^^dF_(C57vho+eZ_n>)Z@nHgZQvp1P1g-V=k1LvKwhER#s$VO4>?TVqK07 z)2mpn?q8q|{|{ppQt$ug6XLuc;+YP`m{M=UPy4jOb`4!op1K-qzq?^wmj}qZmmT8jDTHiL=51!-J(lZdvsRthHJ)O&^7zpbsO~lfe8t54N6)ww*!F?ac zk?B9c^Q9q_e4w1)q7vx-IuXX|bwWTyZ#)u6enNE-G^C~j1Y457auUcXd*QMDAP-b` zgV1?hgvrlT;4q+)dT0~5i{5FBk9duG@#Gw~e~j^_wHWj|l~+bgf)S52#q_FAPj-&r7i^0nQRa@5xz`&2AQvip}h00Q~t~vFton`)m&RNF)|VjUnE17&kc|cTCMUq z&Uvnxrnq>4p(uT|o=d+j;aD^c9YROIC6kj_=W-i2-fad&NKRh=8|2_>4x}u?O1kI% z$cukKT>0D=Dye*z%v`PyUOTF>mw!5_|21UAq5C0p7(HD2TFGKmhAm$(G1B#4f z2s+t|x4qC6HV+Gct$(LL-FM35xE0dwe+4&*-iLO5`lGG>4eoV6hcAC`Doi$|3^rxa zB_}KLIzDhvxd%E1&%&xET`~K>ew_BvnE2gsXy17Y zO7t7ShtY-k5l?8?mW|$VxYM5o{zEK(T*?%yZ;iY})e$!&5I~ z&A^^${$wHgmVbsAgG80R?O_O?k%3ljj2NjQvaPqWz>Drt>U3-D9(Wz>hv$Gv^DL}s z?2B6mv}4%adzdw{H^eA%p!`!AcNlgAWaP+_ntP#3(0+2^J_DWKdZWEp8O~jtgRxT% z$yh>m6*f|D_E7OQcPHe(mfvj5BM97?`;>!0j{wMVd=W4{E zhG#fi{_{S>`)q|wBMa~x@BrWTPe8v}yTNyo96f?Wj!$o+)id(A@7WF=sdXSZ=E`l< zN^&b_VbwqM-8$Zaz2b=ndZ{~@O?wTJ4|e=Bu{9)n%6Lpfcdp+4Tosf;`L6JO(6Pe- z=4-r$&Iu+$?PMjK50{9IFKaNy|A0(=A(@>#5J4>W3Z*ppu+wih=xj24D5wW6QWf1C z24Ptz@dfQ9okQxF%sC0X;v$*yrUC8#YFJd@E>QeR;bl6+B^tYcS#4c{?(fT>`{LU$ zKwDS%L}73nVPP!%wX3UGIIRq`obF{klYcWtNK ze*)BAUxOj`&FB&FnW_G1grZ7K^dFmr5xI9zX=;d;w;S0b8-3Ap#{=j=nYNtoFO-49n)euf_lU?%(A}^F01OG@IoS7 zH!z3wY2UCo{v_?qJ$TW#BjC~Yfg9?#@wYY-(fj!|sH{8&7IW#j=TpxhbShfL{$i&m zN0T$*IG0EPbpi}U^)LgLqL>IfF14bM(RKc1p`I9%yq$YXUx2)JEo@%3o_f$PA@frr zi~DE^;Mxl%vnFEPw+E;m>w&EW=@4~04HsTQP(|v970Xhw2ldyj^O ziKX|nv2flUu+5Y}Y+4GahxTM|Za1V|9!-&_CC!& zNQ{J4<8EUjG$HNLK*8X&R%M`_Ya}XNFY@roT?J*`G*q9-ME7~bRPI%Y z<=@{hpVc36>y-N-U4I4?Cw{0b4X;rb@gABujKi^^b?8;;$ZH11LH&@&5a76%SDt=C z=PV9Ohiiek`XR>kIf5yB_u~4C*Wm4oU@Uc80OcQlu!S=@DyQs`h03p^B4{0Nt=t3h zW6iQYbCWRTfG)KDu@UQiU%?Z~0?+Q02&RDtiDR#z4qhoMxY>Zwe_kSn?nrX+CSYYC z#lC#s5!*t^{NECz?0x}{oJ42JI}4c6Kn<}mjhG2np3!r=28#`@fJd=_87lH*^=@Xx zMSD^G*-!P(#Z>gKv4N_ZXQ(*;z_HpuPpEqliyMcfgCe$$E$^l)T%Q`o-kO++t2V~d z??o}BWd_cv9e`rU!n;T6u6Z+=IflJS9G@KlY=38cS*GF_UK75qJ zwmsw2yHk)Rde`Bwj3Q#WF#XUDX zLDj5A*!=1ZWSYI=O*U`PBlZf!x4lOB85J?%cY^JUWf&CMgU7C;Jnhl}2q8A0<#z$> zFTcguS#O}Vzkrzq$y{nC$txjV{YgiOX!7?Uw4Kq2_onXx<(a1{InC7cubYX=CsnfG zWs`Vf{z#~fdkXK+P*@%G1C%eiieAI!Ft0TpylM78ID1l0NWQxhefoufZ&`P6xT4A2 zUrs^g=`8e}*bBV6Eo0GIC71*B{L75x2bY-%mQA$x|Cf3_KL4Oe#SW-9J;r)9wt(}# zIn;*@lBKK+M_c1Obl$iEeUeqwJ!qFj%pqn(&uJ`l(>LO)uI2K{#CWVp2DvJS6}<7n zd*{gmZEcJ(BR_Hbf^6{IJci!Q&sg2y4on%P4XbF+XMeH^6CYJ_*Uons@HLL9&mL7( zU(y#8&xbiFo*T2=wDqV?cTo9c4aG#8Otel*#`y{#{N>bD^bhc7{XUiE>o`d68{=s75QjVUQTw#BKk*!s~|I!_xk`LVce}V7Re@JHL9!>*8Bc z68MB|Jo6C}*RDX1=T}*BP&42%88OC@d9F27E^hzRbN5(`!n8^{HE7D~p^4BaT%Nj3c}imx=p&hgX@ ze0Y=<^r?cNjzer@=vOeQj=~h-Up9EsN4Wl95Qc0y2@QkC;fNWbU>msxtS6j-l!LA4 zQPcw24dl6-p~>=#TR`5^9NZ^T4?=zwUDh1|FEi2P3ZnL90VI<@#80nAnm%F2eh7pcl%!f zo*aTn|7H^_dJX)uLre5oL3#g~2@sRrmHE62(j%Bf1TGSmb zUIY!3EeUPKS#dB!c-9>P1yoO6uG-H19o^QIPC3^T2pzYzeyu0KzW@>E3 zKW7<<`SMS&(Y+R|R-cDhA1w-7>OtjpV$vL(iQQWc>G zlCr+Uoi(o-!WYKAffZkj1xb~P$-DKGVXsK+Y^NQW&ohiIkgB(w*b4zO7va`byFtg8vQE??uU{?2L{59LU{BWCHVqT~!kO2JbXNBz1nj~u z0Pg<;VP{E@eoY? z)4+F`F37LS$T89#!Vg*Ew>ILD<%tmRO$FA!Jje~8C1gDO2ssK}F-}8DE}qetIcgu+ z1-wM97nBRRV=gnLU7z9Vr&#W0iGI(%fc%y&#^~(868SJR8(asG)~)264d#Y_-^YRv zfKaL}m<*W(!9iCb=0!UGu~I{H@2w;H{i7kau8)MjPUwi?qYptzMl3T=yoAbtJu-){ zQ+a{1fUD`;AoU3WGu2~Mmlr_HQ#tt0v`0mSHLDK#j;^;~VcVn>2-b6CP0Keio%ME5 zxsG;^*#J!!+rWP8NqBfF0n_N$wJj)Po}-Mg^<5#<%a3yPUtUacbc-yjW)C#2+zCE^ z&&Kdo-i3^7B9}CqPLF8a60G$?1 zkR-UtQuApB@hwlKq1F;Zr>{ZdjTyLp&JQ&CAfpZlF+(QWs8SZrVe_rK2{E~c^P=wT zN6*90K*8TR8W!#Z>tn|-p1Nrn$u%&y{4q2~Bw^y|x4cgu5u3s~m_qjub+>iIww~QV zshOg(`+YRVUXQ~1;$+b9)kgoy?kL|G#;;{-33gAfkXOJPFWa@E-=rVt`tc@Mk}J$5 z*+874)kO%^90PqG5KC))JSzTpD9fE6fYy&@K#LR219!NqIs*G)ukd|1sDB)mUVea9 zXV;)lfDv?`@D(#3CvjINeNirCtIVo@VtBW~nEPxCF`Ep9xO0inm`~aJ-pOdQs~(yh zOnAnt2(%UdfWC4I2yf)<7^N{=Cxzmy4|%x!=Me}L$3mNVB%7FQBIw8cNABy3EMdH^ z=>1zR1a4Uk^WAmDlY$0~s|P@!)fa58Jb?9D_qZ$VpB$@z zX)uKM+SNs<*Dt`@)qxo0eGqf^b`_*=KA>`68Y@VlS@<7SfGg>0Gj1Om*2p1{+5ubh zw8dcQOnRngVpZ02@EOFJ=L$NzF5AbJKGO!Bmn!T%)KmzQmSJt$Ul7xqvLz!rn9}Gf zrUZ^)#pm{-?AJk%8VqL@nM#z`j^l~9AG1uYB6PNCVR0_k;FPzPXx@JUkGq2H7%EG!Zhwx`?*J^+ajY9S9VR#id=ypKxa*lzBJ6QtIiLAKAyk*L8zHjTcb9 zeH>Chnwt+k%ihc)F5D-Yp$zCI&MLeP)t^m8v(jQ5G3yvweRzXjy4_IG@CZDUQo%On zC)2SN=mxoC233 zgHgG`ShTLAyU(-fc`=4bGKcob?CHo&s4;%GLqN`X^oNl zw9xN-C3Jrj1MO->=q_78gIX{?jQ|JICc*Y~1g z;(nIA9-u^ij+vJhN;Js17ChB(sV3S_vxq|SXcc2M`;<5M$>w|hmI7#$uU z7>SQY=!p<=hMOCs{_rQO`U=d?1YdDG?yg*kJae^;LEHCs1MkPJP^{N%G1PcIf>gA7zr_tN6A7aCV_hWYqaED1;1O7_^J3QzLRJP zE<3NFV`DomKmQf8vQiOdk0DM)6O_C+#o*O8c(^M$^8rC<$8gj38!n zFa)Oe!c`Rs;HJ6*7p5ADeeWE^AnQaP`Mw(rTBs#Nwst_$;6rHH+!w8K{-X?L6}Gl& zKz4V1LAjB3$(k!5ajHMo<w%3n1mu69^H?FhA-Y)J0Q1^<6Nz zFfy3;^Agaz7mdNw+)>9Q1xw2kU`-lz+Xipt9dEZm80~Cgn$Lqyt0l&CKczBR5Qd6# zx0rhBVOdy0HhRr85#c?t&_}P}lG1yu;l)Kztx2W1q(3NL^i#RFiXb`U33dncq2kjO zNOpLE1#Ne*D&qzCHVuRw!S_+|pts8M6}ev20@_^7fFA#v3FfaSfZF~Q9?{Yk)pma~ zcT@70E{(w+0rimaD*?m%Mnn1Vc(k;q412_Pj4i*0MVn^8lFPSIxpTj2{JWz(`K2Gk z-aZKO)I2Va%*Ts8b;aP{pYiLDEC4#kLW`cZP`BPxkj!m{m?_(sb@hK}GwCp_uT(*0 z?Ffv4ZPYod{%$&?@W zsPqJys}<_9zWa%97S+b&o|G4$b5q;eO8m0oIdLHeL9os@R;Zs2%C+alSPq_z$%~gk zeUCYyFHCah)K+`>F_2+xc`cex%zn^45 z>v}_*KIa;T;@RGxofz;w9x~hYp?-2Y?y&iQ>avHpDoX{Slip#Ij|z*Nc7WkuhjHPi zYt(nhrk!hNo;d0ryw`hrdQZGZnhMVYXb_){?FLL*bTT!KR1|-H+JbV5L zkQSM;lF_euQP19(GS?P>n~IrjN$6qsn3qRTj@&p$MGPA!^+#Pid+8gLKC}nLv8N6W zF*2s8+n@>_pUcyKk#AV%J}N<*b>Avt)TtmyuJ{TQPj(S@(A;sa>t}TK9Y*}S8=zGZ zfiY!AAmhn#2>#cPg;#xH*8~1QTk@Ut388y5by-VlCh_p&cUXYkSzcyX4CO)k;!MQ} z{L(>AgEx<%ayBt9rWoMWW;1cAAVu|iIX0C|fC=l!Gw2Y3QM>nm!>N1V))5MqSDnXE z#QGbQ7)x2QRV=3K2UT*n&BWQd2DGk+1_Mth)C@v(eY$E~EqMuk=!r?KnnJ;$roL@t^{ti(3D?#l#8bea*h#P;82W*T5%dx~^6XGHNXf|{Y)f9`j z{C{rh%`6@f18mnmu0|(|R7b&i(_l?kk|=kr~f;eH?ARDZzce97^d}I zaVxor$94qb3gnu5bHxc0UP<0+rSSmDk<8(xSB65tkQ(--r5&`IBqEF+hd#4Tus(~_ zc*R{JS}lJHVfR$1^pddog)71KbUPm=p>FMUBO&Conb8Gf#X9j#`< z#xZ9x&cslxD>((V25+!w)ds#Jgn0hZ)7aCfEg1ZL0eTFr2izP@SrI2kwbp4IQ{PqS zo=L7s$!TbsT*G7ZcHyi+uQ7C}o>01J6J$0QK=!_Cpc69$66<|nN(lKy@_!On;U?z) zK=OB(V2Mr{^xjKz23a5$Z%o0afflgiOBdmdp^oUa_9d|+Ht|dd1DB)W|L@;H$;t!R z|NSjc)>(rzCm7~`?u5iGMa<9q9h!VJ77pF$B6_sdgT?SYU_I|ATw-zPQM(pnUY~T3 z4yXH>E%E$s?uWPjNoe?SE|f^Kpx(QSklfaeRvTN$ZCMCvAppWRdqGLja$dMT7ekL{ zqSd=wsGK3=hW%rpYFRuKbX$loh`aCB@`5}!mMo_15K5)DWUj+BF?#1mROWPntn~}9 z=OT)-U*&%@Ts#TZjFh=-g>#^q-#(4_AUOlvB`LqB--hGyz&Gc0iQ1&w8?Dz}6) z7#;Hv6gJMxQ2RSZ&M+1|L@at5gz1M{Ak8cRHo9pD1SbXW zCBxD1kRzB)d;vk)rr@Kd{^`8I;68pYm?u)+Xov#~r+ZMg5rXAj8_FyaQ{7GuZncj1`3 zkx=rDahIXTK{hE1J~>swpEtCHOnD+~-qgqvDnsyBt-ct3{TCPx9gT+CXSm6m|Mx0K zLAcHy78$h9`4Sx+MnO6T865XkBq~sfHLc&|W1^e!;)c{jvMaPvG5W z5WL!xj7OC!9xI&(fklQjG_50+u2HJ?z%E_#0+@dPP;#qf$E7JlFw>!ogjqTz$U zaj_R0Z=XUwm6certrF^g|HKO7F7T(1J0a(qfmn-lzxo_2o6=||mX6Ja+qbEsGWRhC zFLK0wE_!0%%kOAz`Uo$4H56mcc4Z~ACo{>TPav`P!*Wvz=v*8~TxlEZ6SoUXPP?H0 z%Zp%~_yP)YT6y{A99CnTgi(9a(7Rs-y6>8f>W4}d)GWh}wegr#a~6~#`Z(4+2*W@5 zLdo7B?&Y+Ob-zX2&s--KGs6nP)5gw5Y8>rB*_C@E_`}2cf`!E}%5>ajA>$9g z$-fMQ)A}@-+4}^&RxiZ-ziwi=#StcVF=FPKdEDXZZ=kqdAe-~ArV!+9%xXf8K|@O} z22U+ygMPmVOO~kdPc##9-?Ty%@z1kVn?b!&$>Uf2O}yg?C`q5rW9p_dL#wagm1coK zOSRGBvkA6c*~gTl#-g-O2RC_p7c4K`<^6waN9p?pu4DTEaFrYbi=W`kA|t_0@(7Ys z9Z~Pld5oP{jlo@hqRU+31GZ2$pj}f`pQ&eRqj;KgJz&bTQ7UWdr3Vjv%WdCZW~p81 z-TaqaWg2uE{KwOdZ>fo}(o|2F<@OU?Jq}^S+n3mC-VMS}wD6GW?J)m%J=BV$V8c*- zQOEoork@`V_X3WCq4q5zNHSrHL63O)^ctM+`W8EmErcd*EtdU=Jnhv_q0za3 zcKM%}V(ogF@ynM`cKs?yw^gtnTiYM&x#QX>5?$Q-YKZN2Y?=E7@=y1o5 zoAdEFaX2GV3h6z25uKA|n7+OU22H#QjT)z6^Cd5|)Afd`Umqd&r%zsHDJ4Cp^#S$7 zGA8$_z$>4%L~rVjdso*Gi=XaxOX*&;i9REf`@vlOT^Rb0Blw4EQfIk0WZKwZ#NPLC zay0qpvs>WoUoSB@pPbL#-azTd-ViW+7ti#Z0Vx)PdEi+&dInqod4dxAEz%N3+P=a5 zp>(c~z7CBME!f@kE&U#5v5IvkP*VRx7BJ#I^J={SS#7@{RxF~BI-QxP8;Z5>Ji%l$ zF+$eebtnrt_v4uGnabVN>-*>zqowW=G7q-ZAWFjG?&D{T}7_=b^mOK;~Y% z8oMz1-m1yjNBr&#i`RIgR#$NKqZ!yvWR|C+dH!Samd@`%9i!k{2lIVgH?>P&Mv3N=mDE z<^_GUw0gv?vZi1o8_sm!4KoMXTsP|UB#jYQ79iwGwCVaKxb+I{`*l&$XuF=ep<<{!_*(=8>9nDypJd*asRowa`?LfX=gi!+}LS}*$WRBd&+$T;% zkHhKYPh17zFR9nszmaD(df}#eV{u3S>)?Cs0|eGn?yUR-db~c$BUR+Pf1NLD@;e4W z(l{=e6exSw`vqG3UXJD6MCPA)9c@qC;r9Q~8F-E;D{Wke|L|0J%Og>FqylGBpR;JP zrT`W8=yxXumaqN@o{V_T^2sW-(GS_$fAz)471>zOdmg&l+n~g^ojZ)9c~@5>?D#VX z3$iO%e)MxFSk=Nxe;$OVbhj&w)PR&>oj8cyL65(jQ4zhLIou=)<*k>XD6LX?-hBYs z^h23hqB>EhAy)q07YepEpzqc|Fo$PYIAJ%reNRETF)@`I?n2gse`0qCpQ7SP+{J0Lv z9}&yl@eCAQO2v*Vt)OgMzMJ?u^~;ev$h)#)?MG;lX@$0yIVjmS1El+FxyIS9dDa^HK$`Fah|4GQ_L0GS zmo;GfcP(`9Rz>?L9lV{RC8%9Ls8R-BMSFaRNoKidFT9WImdO&P@&M+dsrvC|=4`M+z}{of2c*9y!I_NyGYv0kHY)Tb5k^iaZ4p z)!v97xOcF=knLtB2JZTR9XX$&{y%cze$v7!?QHBgQUd-Xyk6p0=7Q?`3Ko~zD7+W9h zgF!Iu8^YP zfm0B)Kp&gGYYQb$hhb2cJs3LiFnsD+1C9R?Yu4`q3-A6BlIVVWFLV!9ezw4TmnO>b zURCLAQGl&I&4X{f#lS1wpr{LRANt*fK>^3H<8msjPri?RtMsA#=zq*-RvvaNEJv3g zKcPqeZbE296C|3f#pHSZpzot0=*&~$`cuzwteKWD$jwX$H>*eWoIsZ1or9&vZ{eGx zM&eHKzdDE`zDwYIan!K~`Tp z0i=^ZF#o)MfW$%4s`~(6o|nQ;;;jFlDKvhR6;#<=z{1>2*f^9Na=td?eKo{d-JPJg z^QX#u{!d=}=_*!fT!+bpcTu@59-6|JqowyZl!qA!CiC<`v3nDbsXiccE^maC4-=s6 zS1x)eDZ@jZ*6Ouq!6C<$+4Y`}DQAAd-jVO1#Los{%65#IGMdXA6Txdq1ee=x1ZQ~*R+fDGPqPajt0jjU=RTU)NjurEh|hjmBKl1K2c0*M1KZn9py=68Wqo%xw6@T$aq~UMFftZ%qd#+p z^Ta!OAAo%VpMt~wDA4(L0G=*v!PQ5bQOhbEWlP?J?NBRR`0fn^4L1;ExjLftq^&S2 zwyW57iu%}ZFXPI;%tWuJ{V{8f7xpc*fc8l*A?A|4sx*5OHfQbuPn$H<_}c=tFq!)B z=h>2-lMG3t(FEQm#O%KN zf_WJ`L-vO2u)OLfELju*mE)gd)7#y!c#@&8q=b6dt(`oCMd2s*6yxgR&=U5rM4zK* z-EJ%Fh4b^j_jA z#=k?ro2yXrd?sr;JP#d>Ut>!A5gtLDpcQoQa63=g?nWJv+)&(nJ2`=eoyU6p?QlBm zDRh{6qBP-`>iTp|@^Ksm4acKgJ?lKPFqna@xjUg((rq|B;VV7E`!PjkG+S<03C^RJ zppSnfq>Yb;vdx!pd}B1A-};#MHI)bjixsdo(L_i|3x~}&y9zNYY#ePj(q4=Xc-@`O;)=gC#g5(!z_70) z%8z|eDgOAXvRfYnW4G6Vj>-|2Qy1zMw4>!My8j^&wC){1>pLE3KKlkVuM0sP!~W>{C>-~a+xCma6Y9TM@JNH6 zAo)C-r|<3q8XNbh-1>YXHf9{O{X7PWPck<2phQf!-GG@>&hW@Wed+?70jX`AN;&Kj zl$JdN^ZE*G3Y~}2?=8%5*K6LO-3G$~4aI~zlzDOd4WFW}3IC>R_~Odq+K zaK%zn=m>JA4$Cn}F`vK&1RIIQu^q6`G!!h?RKY9qD7EcA09n58aU$`#rl;tNCJO-P z4laVEhu83n(v0rp|M0U?+OOC-aJBLdyO;O{qW&}z(}x$pm6hZ*nCZfv{`CfnC^MwY zNyq1(j0Aal4{pmMpaIXL)zt=g`!{uMMmDOPzux2DvOn=o!*I&D_d)fDaK1J3HpplD z$=co$v*{%{fm^R(+PLRnckTs*E4oljdlfDjmkI4di9`M-3njw@rrdEzrgS?1V}EOg zCCL)OdRq;+kR!rk9L-K#`*G`^MF3^GbOyBqWtW>QanloSSvVN%g6`o)>s+jQtOnOU z1F)DJS6>zc5l`UYn8M^1^z!y3wt`Hi&NAj1p=Y2~q|g83yS!965fz6L(DHgDlPsPC zPhvGh6XL3V9cm^Tx|xA72<*8azQ2aTMn5)!NlzmkBmwtr)i++IR`6h1u&mu72 zzn(X(+Rd^42^z=JT6da`Xe_0(aNrBTTX`UFIp;WXW*+W6Vkov!2CXhC3UPS>arzIU z--2&gaL*QfYyU;_$+QDM=8v{T6_7xEiPmvXAUEs>%T1{U4SPdoIkOUKJ07Fee#(j+ zz5%To=fV7s4$7gDKjN)1gv@%3?uu;8S{(<)HCkedDfM`4qq$)(75E!2z!<$ND0%-C z8mD$(x@jCFxfg=RybiW%Be5QxcVmjrIEFz_!1DbSv}BF!>AZi@+v+xCRENRoXg$H! zf|zaxe_{G2%JbWH6_!r?9hRq-KYug|(lbCLe=ds%0I9k6xL5`Js9B!|V-oyev1 zi%UDKq5Y3C>|;?1KC=^<>Z6%>L18LXL_GxM45_MVQX{&pyo$v)${=xC4k}JmVJ`Wn zYp)iej`A>=2U2$Z>J}bpI}!#j*A_i5rJ>KF0nq1MD`?yfR+XHb!K%j;qj#!3T-Q4S zlG0y%xSy$DA<-1DFd1TOFL3lsK~E1;LGiYea+s}P0Xd*>`^BWqx0!`y8D5rN0Jn+Z zkR>D9;goEs2-g+8jAvuajce%Ltq~Omrm?!+FR*mdAhe#a3N{v;g#6!$LHaox6J0Br zW1kn4&qq{t4q#1-CSXcPG*b^V5$rrVz|g#ox3AiVF$cdgpY~~puU9}oVNdWH5RZHd zxtdSyMftK-D#gG-5Scy`V*_8oLrr?uciPc@wUqL5IzoD4Gmg>hCddcQQGJO&3f*TM z#L!W4NDSM@!WL(tWz|s(p8pJ`+v&Wp<_x^uTY?(5&oK9+FARXzTqX4eaykJY7;@W zKL<9?qZ!)PBYajV$4tjSoImh2D3@MSxePo?bG>)0lGsfOy@AZx^$IH1yyWT}RZhwG z53FRsXck#?6FqxNaQ+hwA!lqCQJ(u)b{L77+ZhK+$4e?~*MzwdyP+!S7Rnc;%hY!a z1og6wT;pjk=<|CDs=FQJX+6V;_w)-pY4#jD4>Dz+KKL-?5&ysi2%u6Lqb^QL90 zpkMt_{{FR0E+xlNzQ}AX-Y|!+*ID~yV|n-3+IZK+E#foITM`nVx6l#0V~dCq67+d!S|iQ~A&^uZ85CW>2q+=j!b4?Eue zcT6;n<|!wx@@SnJ?A)e?qI7>~S}+OMo4iBMSsIYgavv6+Jb_7G5$FwaoXHbFQTY~2 z4K#^Qyn)Y}UJN)s3@bEpVe7<3l-|l=o)-WTdkjLa?dQQ*egfu<>LNC5^rlLnKlslm z#jx{NaBL#E!&l#h1o>4++1G&z!vM(F34`9`=$1Ne=ay^>ZodB&Pmj6*nKbZQ^P1RT zmuUWGq=S=J+<|1u(WjRzM5&$*XbeuEm)$G!HmYTAU8zIU{g9Jt6V2}v^7%r82=oba z0vA)d>%K@q=gvZI5*!KyhlQMvnnL{L4UoIPky&N0MfX)Dka0f>O9Q*WT;lxWw`Jhq zhpf@15*wb41N{%=g_`(;OYghUz3nem*`OO(Jog!z+?oQu-4jqzcz~w|FG7!{m$)SF zJClD8L;LOp=PipZiJs4-?AX7;)vRBFqdA2HFy}X8e#%Zv8 zPz&gd+mAn~pFa5$vDt<%#bC21m|}C9$E>=;mk)f8OEyb{&}^Dfynh5SQBg8mqcSf2 z(O))ggN|r!;fNZwmZ*?qv#e!$;JxfQZhdnZYkQ9a=YM`-j*XeP%3aqihp;|IVswux zklW@v$=@%5FPjfSpg-+Eimron-f*n4ufn?DcVkP5hUoD=gtrYng?|_s2yKJ*v%2lC zAWU9`K|6F<;Fw#u%8urpM;+1TuNH9B>|lB$${@4wA&V(_!2DLy=VaCt57Nrgf?q!+&-ITMQ%a#e2kD;@f8r*(;bQq&BM8Qcd%cap19Q2 zR4iFC9Kzl2aE(!MXuGQhk_Rf$Wc6rh>s7#)+&Y4u$Ht@NMF|*x-vgQ9L!e2>WFGIH z!~8`nAZpHjNcp)DGpkha^SH4XWM&Ta!w+L=$8yN(xQub57?!JR_?2tMg7Szpe-mvc zM4P>a*yKFu^(_-T@92wT^Wwoyewm!X^Kk3ZBam`6ftQ@$4XKnBPUw;d8Q*lpHbGAq zWNIil^goG`uMc?pfCnI7(}Ncc(1ROFiC{6vObF{yjOp91VqaAPDF5`t%E`AtYim1{ zYh?3PvvkFVtOfMhqCXpDB_vJHImTpyUNd!hm%L*o)dx_1v4gLxk_cX}q|kMXmLLyL z0gWx^WRbH{u=TPoN>_DZKB*(Xa!U;s+->Hru9jdKBgM;qUqrP=J}ch23vy@FVfw3& zknFn?Y~Hs*=g@a(n;8s+at;l-TVd{aa<}wffgRGNa9s$4xOjTDQYR?T)djO}o=5X# z^RRaGd^GNLiu_1FG5IQ;IesQH@0q!v+z>4*Kc2*tOK4?sn zei$@WTc|E+#OjYPA*f~_>WnbJ$dn<(QMn;&(wNDu*4!h1fj%qgy8=t>TB(0mz&c*{ zN2#?7J3Xos6;A)+;0QzUY1B_}(~Tg%$3oC~M)%D0F_hh|%~2=qh2a;BgiWG}m^gJe zxZZEY*3vcL5|j_&w-4gj_u=Swaxaveq}~6$C@gw7A7W?H^Kt7QnSA>%tf1_nd{r_p za#DbegA!~#5!2_=JhEG>Ec!2E?TxH~lr4$aL()w!H!opvl@-u#Onm+Bvn2YAHVz;hfy_a?)83!T6kyuCHMedsO6OLF&MCr8eEZOHZB)GLf zVXr+fKV=<+o9*N7N*#39?x6YeQ1-3h1^4^^%>5<9@c-g@^0PengEHFvhDgNx1NWfh z;Z9zvbspvy)PVWG6Hxxg6K?n11tgA{GT*9fNZ&aLE$eHTrGGs3Ez=VQ&~x0iF$g^L za!})L7<2vW8+=Or4z|(RY;)GNgHa*D!}X9uEnmlvp8247B9Bw2!kJY zK-A_`oc2Rg^m^AHI+j?2wbM1&xVHc`mY>320}O z`UYd-@Ns|Yn?8#H<*GZ(OHTuJ(lqdY6rFikjO`c48R}qPpMf8{Je@)PFSwy`p27jiC#+8_`j89d(+kQzF@)PNsr)!g%U4TLW5~JMySQ zFSy;RuH0tVUdXZDh~^8wV>>(YTwYBA^XR*v*4)Ql1J&dnxX353%7j;(cp|+f{-Ual zuz&nF+G`)?vf$HPTe>1+k<}wm`z24O zvvtQ|+PHpvTjzM}J#P`HtG96d)xPMM{~0=rP=giSiB##oP~Gx~dl6@sKRXg#cg91- zkp1B4umj`X9D*aOqOg{BQWAYHKIDNvtd`%x>kkdY0CMyA&dcCIX`xKL`Z@Qt%!gEu zt0=QgLGzGn)Oq_wuHR2bxFn}Lc!$5B=$D#YYQ&pQIt#V!??CSZyC7uLRtReC20BT- z_&fhwkfnE?zx#8JW==y`)X>LVUR8khyOhio0d#e6Y_b0psc5x08vG_%q9n^soVOo1{=mlv${=p@#wHd=({ltpZcdm$iz%Mx0BeKsIE}sF@l)~ zl1qM^Bdop?i#4zHSOxqB*TVqgi;iJQaw+vi7zkN@r&-ngD9C9E#lRagK;w52{X)lM zz~5zv=lWyhiCYlpxdQcVCZOAZ%h0fkzH1G`xMX<~ca->I+Q0KP5er*DC$Sxix1YrF z=FmBNWg(Zp?;t3mv$@Uk_GtcZ0tEfZV6}F?uxL{;lpeo;u30yEjoS=n`;c7!+<|y%204V=9=BoU;JZ8sXkfhCqgV)GGJth?Pe{R9nTUqFF(^T}jbQhgGv$4Q4 z9YZ$Xg-e5XV}0XCOnY@e?saZAiz;1$JB&?450A;LvC0#)0oOqC`YEQ{ZKS@1USNCs z4;1`c#SWb{6V!txIvR2h-eEN=8wfwq2YWBi z1u4tDznuU8gV{DQ&B(6xqV?9lTi%Sdz z!xL{Qmy??@edY&9SsX=spBrFv@i)T~;%cbJB&>}VmCs5wUPq+pyHbsd$Dcs$gb&11 zdO}i4D}?ZEsOa}Yqx~AKi5=#Lo=xMx-_H;~Y8w#b>(G1XLooUN6;jUlfY;_k2zyEH z(Z?|u*oWBQ8ejBkbCxA7i$mLXd!TS-4pjV2T^&VM^gJ*Y`Yxf2r^7l}mSZSviK@b; zWf8dLNDZ@Tie=@Ah@l=Yy8SQESq7v;uc7jkK|RI+QVBqpZUC?E~~w^ zpsRWUVgH$liNWW{6>}P6rbS@3jU8lesz%+Xkq1X_R%jwb-z5fwSMwZgJ!OQ? zLr97@Sp9g51;d;$;6V%~C%gjh59B(tx|AXHzRMD?lw;t1^5=O!BtLmNQ=QFZ!A|+0 zV-cb8k(mmygG=e&Sc(l3JBsG|XR*i+spm5tO5NY%x=H&W>&YP=P^yCF%l&Zj?tF~s z{t+58*J8To8chE83ixfSfZQGXS&h#|UcJ*mY}emZ*lF8}^G{rS5NdbiLS|MK?eU)Br6np*XMEHs^{x1xk8K6>6IZ$RF@VYAQ-~)ri0SA84o_*W z7ru`x0v))yUPm4jc^UTYF%pCNnu_xJ12A%gf#}>f8<*usgrZjiF<@pGBIxD+? zm&zEu@8!@wX%y<7<5+l}_BxfR+=sYU*NOsGGohZjL1%PZd<%5SdSpo2xX2ZOBcWjM z7mVK=%2Gcxpw}TENVuRYIPX{hindJOl2s95bkT3^pSMg2{_G z?A-JPKE_o+WOW$$%)J18>r!dA@PkWt&q2#M`_XHp3X*m&#-Pp9Q6b;K8Xq>YUdm9K zvp-`)c2n;p@vsG+`yhHl0LG6!2r+K-_gP10&cz#8^}`aHao)wnRrGy6J`buHc^nR3 z=eILsQ5j9%vgJ4JmC-Sppu;N6k-JkzLoHWM-N3%NoTIMQ0A95!4f<{+-aOzuQ?Gwd zyDf6K59rH-JS<_3kyKc1cms0x&VuyglnqtnfOL){`0tm9U=mDwlp=YP&ogkea7VL2 zchG0^A=o&EW?)I*HEBH`u$)_IlnZcTvOL-gpA%Vyy#aaAW`gB}l~~*1D#-4Bpjlum z#EdqC$YC^i~#Fc%>q0%Q9f;u1Jxjl@?m05zR z&&ojIIDu=WKhfgEG4MZcz|`BK5lXv){j`&FKQhCZx>Nu$A zfz`X7q2;D$%-5fKuZ~n^95sH2Iu)i|@@5*YejW@8;}Y}>bfZ~OJE5Y5-XVGVVpL!u z%e5{?w`yB(8uAY`cBH3kmlyJ6P0035it5fZ;9!hq4iShPO~B|S@dU78DYKD-Y+ zj12e?V!N%crGa|=CgxT0l;_d8TxtDU9xr`IF3X!-<^>wrMkDZaUxsDf^-w*`fb~wu zhD)^xG(WvgU8WnEtoK;tR%YU6X9JV_F-v*(s829SlmaBZT;N|5H)8sx+m<1 zkc(C*>C(gsHtyvirZWKaSHRWrN5~gQdDGXkdF{$pVol@dJKD;F3~w-B{aLWOOA~ru z(Z|9)=b@zEGt`}P0GBTO0SiM+gq7_pa8jUwpx=E6YU5vVo2Ng}EBh^PXln@O4d>8T z-vkFdHxhfyB@e;%R%kcgR7jn97DFPFF`B)jx#l7$PHrpArR-QpbsG`X*(jY>!C)pi z`u|ylld{^0%^u_{&b)&aV^YEO%VU%ntY+~oi}Ch3@`x?gh3PZiQ|C_#ahbotyz^zO zQ4WRXcYg3%t}8arqI}NVC1Bh37gTo$MXy0`@Ock?F?iA=^w{+Z68o46)f>WK@z;G= z;@&|B@4OEc{{qv(61lc}7N1E@T3Lew*0(gH+OV6vVtEmGJlf0l4b>A|T#kSQY~;GN z0cif{9xR{IUhrIhopuH^H~;$qwyyj`y;tqgwL79ay;JkhU6ItQ7r{G0U-uEcBCsS~$b2#+zNPOvjky!4Ss+5SFo)S#3Y`AV=` z#37r!oxR;Y;-Y=+#Pa-Cs7*V`%=;>M(ikVSdt3}|YbZzceF?EWA!BnFw&5m=yihxI zANxa`vpTagPtp(Jx%V5@PL1ZVNJjnC{P;-X=`jU>KDXfQ)wtDE*+D@)R*AU4AOc}#ne^5P}#{DTqphD!+RKtx!*Q3yZO_Z$7OS{i!6q^#nz}= zF@y!?NT6!Hp6FCUS^h+kXSaPr4xUU_aq2Mkf=G;7r33|iUsJw$Ktb*<_-Woz>~ggf zbO$w~!LDf9U4^6b%<=dnECg(lKk{z1A)uUmdaO>ArN+^*5$7GDxqX|a^nrao;>gQp zqVMon?%%H;9wA5GC}|65Ge>3wu6_YwXEUMlQ7E9ZGp28=m;c|{IHXH02G&f6@~(wY z(DMsxQig%7<5ie#)(UCI!Zpo_VOYQS6o!{)f!6%H+~Y|&2E~=JqL0zU`O$M%rx>Dc zedJop7IZ$qarW>eES#SJzGg$w!(Jj-W>+(hZTlc+Y(55FUk_1j_d=fkS=6o_h1&G1 zsN;8HtcBzr%rcgWI`k~JXi{K@!n3&ZBzYF?@1ZoYBe)K)Wj5ywh1*RR;a*}YG=GZ# z8|iDN{^ZR9x9DS-Q6&c21z|(~cF>w&KxuO`Vw|EtJK-6t_)HERv&~rBrvyVToQEzE zD>3eL6Z#x|4G!PPBfIE2c${%y`40bL?r$GBLR@M5fsPPcItFt-lJl>vF?{?-SEx+Z zfLYUL9A`-Ul;=zIayJlle*~hcXf#_>VJSVm>GW)Y`52e(-$2v(3JYJpC_Ya zGI_3VyobwPm5Ljai zQtDv0y=|7)UoEl7U*GD#0&mui=(c!g~UBGVRrBv1XNS!(pi>VL^H+W z@#xmHmwJ^xV`Ok9#z%R9s;QK@*>^WU$=|HhzK4&br;B>E*Ahoi71(GH|=73!=HTLqB=psX5Y$z%!j2o!2dB=B+kTw zS%+A&!7qq>eUviJlwbYzQj=sm8qF7(qQrA5gdCiN%M9UZA0f<)WS+k z$I>I0$+>h#<9;>*dI@o8GQ=FcrerY7Z&R>7;W^}NK&-hPh&XZzy_fQsL6;cV_x2#z zMPFp$@idFOvX>R~zX{d>$3eR2B1GMeJMcA!;*z7YTJBWo;MgK2B6<%KTiz`N=@%0K2}PMI;*%_&CNG4j$(Dns90XL#;{ zM3(q~q?iOoR1$mJnH*rh%`+O$NyXNOS0OaA zgCI-Z&b4~|dG84yvFa~ps9a$n)H~~l^CUUo;+cpp)Au3#`ho6#XYrjWIiK?TfSKht zaGy%&VrhiNcSSC9wK>Q2yGw=mn-6%(FeQeE7NN^&Ta>8>$e-y-#pI`j;B{d$S~i%8 zvsSec0;PWe{Ob?<3J%ij49pkG`Fw*B6tY)XbaWTKv+@ef3a@i$F_ zpFg>9YOr)IaR9rmSWJsCx(yl(`I9?g{5W5jURsVDi}T<}$Me*K(hfrQwo-@cUZ$C@ zD;87VyTUh@4Vm=^0!_O>ZR01jn6w`+u_jR5ipQ0k$q98qictsuKx-W3f4aotig86C zxigK)&+Cbn*YcRhg!QPiAXM&_Ns53AQ4fL;Jx!_3m`e~>dPy5?X%UFp;AxJAN@NpgSsh4~}`*9ac z`$q2l^A(!1J8j50icFHc4db3BLa!(N!Lh3XT>FrBbH9n0J0yv(t7|XpBEGYEYc+J( z5Ccbx9)qOc5V>pDI=<8|4U{Ityi7L|x3v!@#&4je6lx*JX*&-2Sb@sJQ5yTm6S5j}(GRB|tAVF2pHMO1Kul~20+UHwvGUC=tQd5f z6<%z_nY)dI*$+RW``Hf|vZDeuOOs&0_iI=bT1@{xM&dn@?uJkPP?v^?OW%m-eJ~iW z#?qa=ZVV6DbsyAj3x7Na&5wG=k{P!4{Nl(dL1l#ePlt^#OEHq3%)TOX%={m$#geq zc3!>)!v`KjgKg&_Y4#>!O2Rds?_Qyu$sMdJb3%&}CsZhrOBQ>h~=HZJbn2~zHU#hrJ^yH!EYjKU1A-QcBBwf%{9M$e(!Q0kevaTpg* z?1_8&bP&sqnL(2367PJK?lZ*WnQ#5Y8(mj|qxB|iI&=!+lYU}MyR+2gF_pbp+ClIg zn9Q61U4fDAaX3L@By9Y31Ku^AL1|SHCi(A&B1JOZePt?!|1%kD#&(2<1tx+`Njggk zrhQd>H*5-z0vu>8s$(44tPv+Lb>3YV{k@&wCY8eC&3Z!GMMI6(#6)JfY~z3J z(DT`NFh4KHA>FEh3QNGhF+X_$bHNzEuT_L{wEvBcwf~3F}Ugz2q{Ci7; z+#%FwtUDWq8^q(1eL8~nP8nDXo)1;M4ubVMx--zsOz9q^$(w!?Z?9;?Je^|=m5N{7m#YNo@QExyf*nDz>=!iy)A2^U_pRUBL6g^aA z?ipLM?F;tl)?OUHJQS4X#^j|Dao&*AV7{U+YHeqs=g{q_NIpZI*>{=5sug{@rlQ^Q zgUmIo6rD}if?GRdbo`ddBZFRG;=@v?Ftmhs1<|yBIfANx-(d8GBajyrOT6+1^uQ_z zDW@9lE!Nn{)l{rlc7PQOdYln$2X=6dkDoF`ECuQ3`BgZkCFgV;2<5}#fygQU?? zA#f0RB3dgkyM8;yPr1d_-yUhSGyG7w*+ft%3V3xv7^DyRAeV%GXF-Qw^HH%P2JO1X z?be*d()dd-%IOkTn?8e@SWlXFw_^I2*G%_kG{9X?l;9O~)`}RYnu^(jnwekQ+mt~$ z$NNrs2wHMc7Z#iWzb6Nv&eI%|0x1`^sTt$CUxD=DexO=T9f+U&@Qz$ZXwY?o#z}ry zxjz9yK97OE5#Mq85)JwsFGKS#wmh}(Ppmybd!jB<2)RY>#Niv6N55uf)`9#2YvnvY zX*Q|=G<%PWA9N#+%~HANh9{tXXpc$v3z=%+CptGy=GxPC zxaH3ep4d_W*7kj1Zi9i)`@;n2xq#T9gE^q=Sjd;{k_b8*PJ+I)qhJzAl_h=OLALc6 zXnGe1g_5t>(BmYlXhKL-jKQioyRhQGA&jnx16^Y+HjmZ-{L%#nz00WHo2!YMcpcOi z*RZHTdwKEuEtDT8C&=7Z`^?Z*G%qL5SSvkejb>?T-Y8Kry&Bb#=g@UiI21R&h1>_a z#25U6U}-#RCx)VVzCC+$TvzCvcNnx?M)G*IIhqS&(CTX;IP7{3O=l~}Yop|7I|=G$ zh2SwedM8J=j+M}dw@p9l>nrTRY^-!Z)$u1Tb-n}>bc}`GQ&M64f#V>1nniBkw>YB5 zdstl21+ua(z-;GK%<(P3lG$f4$SDlgAE$nXM9P*NScd*xbkHTR3YWfqL3^@vO`Y=* zn72nqh@5FEIImxWak~x?8#+V|dnICcuq|b|>AwEx0+jqpz%)A}tQl&^yi#c1(!3c9 zj#GE*O$~k_mfI5W@@rjLCJK}(Wg#AE9I*ptOS2S^34j-Qy_xhW9e&K#BZ zpUCY_Ex`IO7r}5vE$Hs9fEC07ELu>B@$H^qV0s#^c%&Dj!eV?!)N81Zg zcFDZU>aP%AbA&^%1YD!ac*)k^SjB%~{elWm)l&AheJbYG9Kg_{5<%K>1T6<-v$FdV zNW0&WOYV4cpYnWA&7H&*hpnLMYdmZTraesdQMP5h_t5*XBRFqr#KeDNG2S;Cyp0tQ-&V_u z&Wxrra~t-~mYhMYhcnbpU)ZG+xe)&`j72@&46a$1xYqVK3sRm0>)6YJRV7WW@cYE{KHQMe8Ad`7Ti0 z7>jjzQ4n7MtZq$Lh@S8sdkp9x_V%Ll^C~MWY$(A2O~ z2%ha}7erkS9~V()R%#jMn05n~2}Yp5nS3z;x#yt+)Wx3myLxUoi)$JlCr(y||_S}u>)0^Z0J&8Y0wS|*Ch>4Gv z@c6pl%zIrw>X@&A@Le)!m>R;X$W14y`J)M%{}BsAs5A1#5oR}TD$5^z5o^RLEd7fo zm&Ta+E?hUL<`UmJe#UA4-UqLVXPT1kU0~oH9 zf%UH`5I$HJqboF^99XBx8x{sf%gG0pG>dwv>%cNa%d1^ZLr}KJS4ZrJ=%L~0WqOmD zd`X1ysWG^E>vb$R|CJxL`~Y|hP2l=zJ&^WP@sk3T%?!GOo z&-C1j;&!*@K(CV>z#_Q|`OnWWKQIxJXeQCSa6PCm7(+pI z70QMzfgznE;994S!gT!2PUG zSnz!as=H5N%G2wZ_VWjM^Nvioeb-dTZ86|+OGJo_H5F>MF5yQX-o`b>QX%A@G8jc2 z=?O#Mpzq%{IC)(h^|Fghqo6Ff;5_*UDrX(W{DLm% zdN~r}gYRM0pl6UdnH5JjFApMRfbn|Eg zXJ>y*v$(@^9#6+!ZoM&m-&T2+wgaYhj+84lY?Eu_vv6Mb_Tmo82UaZpf@zHd>CfE? zY1yWHyRVK=f9?Oh2)EH#CyV+X=VEc+6`=h!k~^gog2_5vL3^Q*>3(S^C{zD1Us(*O z&W*#CAeyU{S!tS0!!YN91(ctEjmGl>AnEf{*7$BG4h$~?TfKAOUlqcJGdR^hj z6&M?aXX`uYmDC+a<;{ki=dGZ4vPpAkFJ-ar z|ACtDzo1T-3$~p~aLBj!STNcf(l*=j+}d^EF|;$?wVc4?=m6eeRSdho7z?%$r_gc9 z5QtxB&5xvo!tMW5;I*3GXN4cQ)=|PeMg@cN+Ifw_o3o8x$59d28|&i=LH7a8(f;ZS zsnd>Osy;cNhg?LPhca;ApoaXZZ=h~<8<@PzQ23Zv4p!qIq59BZZf+jHW)j<@y7P%E ztcH>ke=YfdJ23_IE7vay2YKFYut=W?QwCN;+_nc$GhsMuyT(|In}!&FiHKq|VgWn+ z2g)r2(LEyq%X_?nZL{hzr1n01K1JC|PQIv}b9kA-4*-yep4ju7wpZ|MUiOMs>sS zE8C*PaV3{NyTwCXe}d~rbCBJi&HRtiT>NY&F*+6{Ma(bVa*pt`}0&G-32zNLHMtU88{o8N%;-YzU? zI~)^U>XDaw2X=e*1(c6Uz;e_F9^bnISHDe^XI+Q@uZM1!;yMW%zTJX%=hETHX5v4q zckxB*iy*)94kS;%0n#6Xh%r~8WLEyz3L%|Yx0ndS$F&u&)#!>r|E$FPiBh;rXMs!G zn=mNyH<#fvD6o!W`c4L*yZ9&CJROSh(JstlWG{4$9*$1CBSD+<1fiFa5H&xUT>a1G zl(7=Ke9{&3+Gc^1*%xqjya4jmH?X2JJttnx!$4_1vV(9Jv+zg-VU#YY|HF!3>Ne?a+!l=d*+ zW)rwJ$uMe+8@SZ$f#Fwl1fR7PP!Su$mK#%!#a%~4VvvlVcMx-Seg_xF|8RXC^qa+sGCw-d}ADDTsptM6`Dqd% zCF~+(65p29A)LpbvSNw}qcgNRdzt>v`;eR6hXwUq4IW=q=(_efS624oHOeUv^>HM= ziwehYM^9kQh_^g#TU*g%xd5@6F{tixp3AatXPBpr;1#lTY%f_0ra95|uBaVP8UMftESPwPTZG?-C~Mk# z%VNeZ8DK1|nq(}5#RWsvoM$-s{3W#5b`&gPPe8F!NATDd$^r%x0x>5Mrb|_*Oqjva zODnkF+zQN3qJH5_nxTfA!G;T=JW6=Pltz8zahJYeeby}Pbp?;u#n5h98!`8`oa>H@1k(sT(b~ui2L3h{UGIpz zcS#GZQ%0it?lH8t{lY8Y|Aa-OH1t^>Nu4?Leck*Du=6koSbvvx4F^%SH#IOM=_4xwZ2!VzkN+E4lXtoqcTcx z!)EdwO>)D$?`A^f<}^4>{qGw;nM0j=HB{aVMRjW?`nFk!Irpw(n-+Q=ERVqSM_bdY z-e00S!BzBZ{Q`YI=0N1w<0!p!9nI>>z$NPkT*+-GF77~ih8e42O8+{{s*S`NpHP%q z^g#8cr5RS&AEVNd=D8mnn9liNyq94t-k=`%&V~nQ*0_|d>*Wt2KVL(trU+(L)1J=V zfk{Vjmj6&sREDnt?|$vk{jZzgx1c+C5{vA@y3_Bi*QlLcxsGkmvNgy>Vm*UoX{m+%+1yU|wA zR!MmABRz5a&|qGrw8FF$1G&e*R7ieEP7q}|Q!mb9b+qT)_ahPZZGHrni4rW?{RAZ^ zMq@+>b$$Q10{^_CUfd^{8nc#n7#x>V&xTjkM#Km^|t5tc=yBbsrHeh8J@;sDO z@w!peXYn8tWx8AC%9XL`rC){;^2k@Cff&;BFqns$iOqlgfDy|tWA8~D zaUuDxbfN~y^ZNh7>|JlUqOt&|Z2SX#UG72F=%cJepZ56uX0VO23eZUoVzIwI;le-q zLTU0bbRX0Rj*H);lVvPg|8Pg^p?SpG1Y^=)7MPY82(^j^Xy3VwpmXdT4u2hu6;Dqy zn;X>Ky7n44jUwiR*l+c)iJ;mtg_ZmuXHcwxFnv@YdYw*&0&jZXhD{-#txA*cJ`ct~|T`H)y{$qWRictlP^5-1wvvM@c4O$iSVD*)U6P7nGp?ZkhC)sF zA}rZXy+f`7B&yt@Vre%>nRJTce!uzl$7Vu>c`PeaQx8y3i9Bh`HMH1u0Q2vzM3cYd zh-ceE>OcNCpdWokdiCYaH;!Q!=WsUhE_EUw@UuvH(B%SL ztfK?pWI+&j>jwr7@dfn;U%BkHqkNzL1L8mcz5U`nPPDxJG(lD^;PE853H^K$AC zC^Lk2dv9R4;ZP_f?*zCEgp_^BSmp8$B-H){$(%YaKlKISzkA>pMCZBQ`?>jm7Z@df zjVj+2e08n}cG4ltYWoQsqWc(K8it}l+y&gW@BkPvGtqkZD6k_Y#DRR{r*n-(bo~nI zMU%OW&nxct=osq6Mxm?uGyEnHM_IQMg1zG~{pc#lB-ey?vm*o+%!TBy7s30ER1AF8 zP6+;|9Q(GVbGpKUuO6*IB_lrb_!)4|{Q^fPz6XmArb6%ewDXFX00QlW(>#go{c{Ml z=N~bXGkZ|-W;nms{v724_JV8nUZ~K{LDz>gSKi#hqsEqC@4>dzANm?Xd`*Q>*=@z3 z9%u00#N$xD_X+Cv7=X7Y8wsPxm*3dQ54C?8G1=Q}xp9|ZSWOIoa?&)G^I$c$F3ZLk z{|K}kn9CKtKk~(0+Q9s{Uy!5Uj`EapCK>k@yk4X+yFEs{S8!LXnMr54Ku^??izcc3 zI!2Y(f#<9TsM`7h_t87i!#DwZw~l~-*J&6Y^B8uTJ;K74kJxCp9c(It!M8AuZL}eN zqn8%!hpohaAGHzP{+)u7(XQYhuor?O>A6V$jkE$oUi2ym&Ryytsx6XO#jZm4{l-E z8CRLfTLV!sj5_9p>)1H2mG7BN_VYHoSoPt}(t5Pf z)Ob9^=H5nP^B`{=KPeI&#y`U#e{;~!zJ&_@QyyNG3)aL^;>v5FwjB%8yO&`2o%Z6k z8{w#O9nW%)jzOmrW`<6nNqId^IAdY7v{O#|~6t65VuqQA2VCJl|@5^rz0$1>`mC@~O{ceQ{?+s7C`YCX6f zI!OFRipFBUA-S;3#B9}T)_Nx!9S#N2PMW+Gr+1*`CwfM1y@1UThryp5jnl0!fekBx z@}A^l+AM`pe?DUA@%xkmoR72m8i>#LNQ7O^?FGwlW6}Az8}3_eCgu#Q#-QKL%wc2} z&K9%riDw-6z6@YVcEM~~n6Z$xxr3NOb2`P7Cvw4{y{PRT!hBq7A!*A@u%xVW((_^_ zwbc>(50nVXL+&gsR97sslM0iEkblg34(1M6&$sQ%hk1`aV|?9k^s&qXm2D~%bgbmP za!kPkG-Eu9MeKQ`crPZfWz4%i9D%?YPI_l3WMdu2_R}XdgQ&0vpKVkaD!0 zShe&u#D`epu{2#V@%bjKze=8rPd6~iGY*3!&geJH8C37HSnH!KR4qyaj}s@T*T#-D z&MW7ZV|6h+aU{0x`b8YYBb@9i5wz{Yv1%MSR0bYqlE0tx#=mIy`(G9>Uzd-~vE*j| zMBQz-Xb0C@D%7kC1;2lDuyE{A=TR!HbQOJW z{Q}p82U%)TIP_nnBji+1gOpd>DCfHmJh#|D;mBv0J*I&lU3(RLjgvtccbdE-#u)sr z0DKgsl%LlVoF7ewq7BA^eN7#o?*0VrZXJe#Y5Icoyc~3SPjjvv$!KvX7c)QIMzzTi zu5RkcyJa23cQ5YXzeWb4&GjPYw+unL>pb|sJ`HKq_2!ZCOIxbIL!WYeqkm&# z{YuO|a{?)vM+z2)k*MSYnFt+Fi@iJFnKxqn#vFbpc8@8=%LpKfrtq1~R z<;ySfh92XZ&rdMl)&%hRaIDyWjhFoMi#|Vx(L{GBxgH*C?mfGY7VF%h(mVhS{-)3K za!agRHVN26;)C|D((H^q0AI*0E4yO{7WPtc(HUK_-jZf@alTlS)K)A{&H=5d1MNt| zshheE-7YwT;;&Oo=Qd^A*SrRa=|^U^s~Kx(Pvg+@3f6VFMjUVg-VS|@O*5XN`6M$T z(Z&Hy)|7$V-~h(&vV)2VJHYEf3d`|54t1kWqs@zvuzzVS80)C9^XK+rU8oATjB12M zYb((gMi4_^!kmZ90QHbK$fT}wL)}WGU;-3(ehub_hTu_2Atn_Q%Wb=Y2URBXJAuUi zBvztgJhAej`?2$4Qz7Df6Xs4*u{(A&6TtDT;N^aZ`gInZ+;Xr%P@#3NG>GvCh4+C{ zAt%BedsofI-rlpILbDyLBzl6BoD+X0j5-*%mj#vEkMDsn17#v>+vG>xT zSDFg4CoBc!g@w%Za3`!qm72kHSkd*7F&{@y+8NUR@A*q<4t;UA-bOu}>4)*SK zEdAYD@Fb2qXQBs|D2;{KJ(>MP)_3yc41C_iHeq^&V90mqPeyBM2NE4LXO7 z_`X?@s5AVeCee<(ep|^|d?W;y9wP624}WxwhADx)(IjaBH2)h9%9Q&&G5$ORb*;yi zBz+-o8$CnJr|<%U53JYc{b2idE!z2;h{Mm(POz&7uJ}VN;L1HPIV~LT8`pty-FYz2 z^Ks8`1 zbMNyQWdGHq*BI_)9(x`#wVMSbC6&lf_M{v`>jcMuvoLBJw=bHjl&wui6jhW!O!W)|{x;S&oody%h!K+S5Mc+4LF}>e=RuDywsOevz zW{;McADV+ri(0`y*8);EKBDh$8vfs*S(=@VBXmEZUmqht|B=w7zZqhoK-b# z#-OG5!DsU~OfRe&tIU#v$ys}-nMIkkk#EtYe;n={)>gT{yZW%*46h+KY~z zro!Eo64Aq~jAc%{40(N`(CR0-{(cjKk!FjwmsJ2Blm+nm$aWqIMmwLbeCMcZu>WNv zlp;LL7V44IJqdMF--E(;I%uu`jD>n5yuHmt=tqv}@Vk)^ajX$tzga_7 z$5Dv?Ev3ARp6I`-Ew5|(jXC2+VvXJ|HhD;UvHJc=nlGLrR%H#jXIHTFI3+tihxX*| z8S<2!#HEDhp?XIwyJBP_mUcddlBp|+k69pZ-)Jh@j97zu6WWPhDQ8*zj~00QQb)*d z^1{4v|D)(k17cj;INoSgNy&D|62_8bSIvDrND`f7$#zJV#5o~L63LPzlF&#=5~)Zc z$<*A}lajJIWF-ZUd7;ds>4%7W=*8fl)TC1fO( zk}Rzb!)(sMqun|}^WT#&s9ynGFs#OPeIL_VI|?t1q`cQ>>aVMpFzMSo9@0Gu6PGli zUEyW^XXP*AesvHsLs~E}S zvEOVKI`#xhoV|zTo9Kw`t0h9u#C7Ouy%ywMBl)9Qa1X7StW{mOG&8P#HHP&v8b>Zbk5z~fEK>S-0!T^WHI zgN#IX`(!*6XCyAV{s!x4HgYIRL-h9E$DO?Uv7+KE5UVuAHaX3QHEVcd7`^wLYN${eoEM@8u* znVGu;cU|9u$?3rmZexq~tP;C)(GaC!V^G>3nZ>vL7;q5Le}6Q1k6s}2^!vyPoQy=p z@M++;|14{o5DvM0E@IleOI%Gn#0wo?V?*FRaP#TSx>pi|`&l8MPOJ{AfqVJaM4CUI zcO!rB9r|7_2cx5DAgL-~=Alm^C?gdu{DQH^q)Q;3U;|?iDehgrN9OMC5O*8A@hckL&b47`qz`2#04 z=!xnReoFbpC)iM`AvVjqVc#9Z1$s6EJ&sXE^V4`Xdu;-^B|PBzw|`)0{5!6wodJnf z4_N6tUAnve!Pa{>uzEl%Rt4-MSELdfOCK=LxQiI;`;)gGB(C$i6kMLJ4dJO(*mSBZ zs?AD3Bf?xJeKxQw!GmS3C!%Q1vQ`=V$OK=!G!xw1UsrX_9+Mlv8Fm<(N9+TAxXd*y_nk( z9kklH59VK^to@Kyl%KBT#T|EJKslW&|GC1vKkk&7Pt8K72_>lgN*j}xRKSWi_W%zL z$EqoMLR5+dG@8HR8x`8Z%qHSYR`up?ywM;-R~q$}vg8HFY3&Ki%U2_3;HJWj^j3`KQBH<>ahAF78h#5CI) z?xeFuHu-Q259?3viTb%<=bXhZ7&H^7XC7+)KMTE|dbg|mvl<^-p?}t4Fzq~!PYk?@ z^~%nI_xCM4Z`(!~J)L%}ud{g%UsE_yXCm5fFN9==L~Of5eDnL`nJaa-)os0G{Uw7y z@x4%{zx)W33++t5V<3cfsps0a!=b?XC1}ft#gsIR$7ZLpFY9lC%cXC)WEnAWK3)Se zhXRx?N@t0el2H4I9_^t`!NGDaG>(tsrE6b7*D%V0^t=TVPu#>}dj-TQDMKNA zPzOc})eCPbYpcI=mp{6gp=(MRx&&EBd=R?ybb{EK3O4HyWfx0`E#5T%>S(XxbTnMn zb4UyJxFy2wGvt4>ECHV{p5SvSAHUB!i6FN_b;S~ySE&Re&m4q+ai_5HaywJTX^IKC zhhf*;7O?nz92P8~&&;PTJa^t<9x4)Nuzwh_t#44q?F=__TLOhgA7jwhXXv)1iL2rl zK=Vr#t37{#ncKd`!Ur^WESv-C`XH{b6qx#C5H5eHCln0R74mB+!;}#SM)hf!QnMAM zG}o0YB9zv{MzPNWFXDGii6GhOiHbC0`4|7j{L4Wwb~5oD=ws7p@5=kee@DA-bx=BH zIz&EvK~BjszNaXend$ul($sOxap)fW6LAkyh~=6 zv$Tf=&?GY!nl@_+suwr;kn&3q?tBXKeZ$bCmkM|Lbrkg%-h{iZuOK#OCpw*5$qRnm z#k^%xFk_J={(tZJ(O*N*>~Fw8xO$m(L*jzAx7ihdgLO@M=#F|%x#sR$!nu!NbE*cV%Vw~JK4-91xQS_7I7>Bn z0_ulLxZYenA#NDG5C4mmx!<~tQu8CWB@h0<&i6E@id}{Y+tVO8?l^cCq{>_lgd!J- zW4+CQddvCXbG`};v#xyC|= zmUG1%snTuODjq8T0P-a#l&WewR(pgPtS?;oyN3~wb@-i9vTYW6H$RX`H{6h|7_u8z zp3)T_nHh=dZ_j1(e|$hwubq(l^DG8Re3{&8mn^(b5d5u@h!=iI1jq1lTr4lZ@Q;S# zzzX8@er>|HABj=nyH6HdbOI-Lkb~Sh8{3p8&>?saxqT_K!3HTCH+ZoOpCRNAJPFMg zD1Xp72YmP52EW%qOrs;6Eq9+`W^?1Yn%*B3+lrv#@hPzCV+Lb`1E68+O{_3%L+_=V z*y9u(QPpP|cN{?Mr+x2PaG@^k2sH$!Ed!vuqaQ5S(GxayB33}>j;u5xpGk{llt&pT z3yuf@r-EHfonooXYz>3xNFUU`v;&Mxe?a8>N7$mj3QBdOxo^i%Ec>i2I=qa=;pA7% znb8P^zjXvna)p?E{{lstk1*DXL33xy70w!pX1aqR&BGot7Z?a0KRXMOpx0dEpgCB( z+~konjzDPlM4t7LJgUD_Sx~<~aCzzlfxHa#ufL@Hn4hh}@UrszR$}(QAzrxVYdvOzva|B|}@l#)ep=Rd>1fx@jy}%mo)>ZFt{Y$lXexa?Rga#P3X{S+ap> zGfP_zxg$ozVwK@#qyzP|g6P$-y?gp$TN z#E1wXz;q6*vp)e&M|a3vdhCJ>_zPB4Pejdp6>1IWB=+^x5GN;V37*1PrU*#ki7B+F zOmO7k_a38MdpefpP)4aZ5Q>|1A?i9gs7i0J#DDwohKHAlGi1aPWrko)?2a~PS8%=V zi%09pm3HtMOZzvR1<&dvE^1u~>c@L*vkKNjr#|#~92Ly#EH8qY<3y-VxP!4{j4`Wk z1J8{#2mKyzxYw$`@!?;c1gmFVz}znYhMw&#W-YCjB}S=0KL4&VEmh8Y^y&Wp?l19@ zb)ku+!)c>)MT?y9~?Vp^2MxW{IcB&3J-ZWTfBP}=*J7TK7p1AN_4tcO!x%Q3- zjOyA+_`c3qv~)ZL0px`n+jTI8tCxYx$F9mrmM?@oxB76q z>=x9Y?2j>NCZe{zJ_a_qpxNC5>?CN3E3R7*7jp(v%>nK`Vv#Iz`xV%u?#JZk4Q-m| zcR^`MCja9S4j#J-(emJDVnrPXm%br*JA%A(nd8yzR0qM|y&0`l*HEf$k0I9|fd1=y z7}g^Risq14_n%hq?)eb0l*Jl;W`1+}06 zPFbxN+4THNW(p5MxvQ=XqIWOD0{wER0$s7V<1J9oj>p129X91wE{xA7R`i9tv=hpqebgJeBY3y)5>MjKPi+8m zy8`IZrziMqrCCm^v9Onz)NU^Zvl!0?Sh&gr=j%~6%k4I5myE|1)(v1+T7iSBiQm8f zjcwz90#p(^temq|4m4?S~b&5cT@i;L~lLp15nTQv zO4(~Z%{dy7C%ShO>s<43Rim!B;~!%oEn*~${iQED#D0TAjXHw;&n7TA`wVx{`Br{N zi`Tw9!xt;BQqIv4#-FFWj_(C<(tZkw`sGZW_*~{QcO=B7z2R-ceLy|ph0OF}H*Qtm zjhJz5SaG@y)w(X=7Lmv^I$MMLwK}l>Om~+dF0_|xW^RkRqW6&N+0$3*2yWkw;Dy*K z$h`O(YJWtqoNM1;QU%TTGimqLWi%3Q0xP6p7`ju<3;f=qWa4$1&xII-&Hq8X#$G%+ zih3m;mt;lB!LX%NUzB?fQpV0Y&I1qGv9q^#f?6stmkt?dy5l;N$GuVZ=y@I;l5F6G z31vRp&SBW>7BHPNodtT?aV7aWQXGbY2l2Ucvh$F-`+{_T81plCBz|@|Sn-Rzc4{7z z*PmDJig*KcI`1e~y&f|IenVwN8hRw1MOD~wYzVrFP3Hu3@RFfZ|0&AWJ^85Ko6U;* z4@PV6^USQYH^8tEw4z+ovIvPVv4ccVe0?k%`?{m37;#bNe)KA+mv-j9^qjWee}Fnj znX80WCi^iho&REtG^_vG9^+w0dH(`@!E_&WM%DsD4@#cT% z@3<;isR%G+Pp#?fA4H$!G4UAmavzwTJRRvv%8NCCI zleNV7$)=DP2|IxcEgC zmd*q$I^O`jyeLDlW-a`_ZXoKJlCN*oONer#9M$JXxR!Dnl8G`@p9p1*-+nW*>I<-K zu!dN9r?VJ#f|w6^dhF~XYe=rS0G`9&a^FEwD7pWWStgwUzrU~X*xEeqG-e@>=~+mg z^s#8#?>}Cq6-ZvmkIyTtH3|i1!&c!8SAD|X1jVB=H=NzqKz)MasS0V&xT{JX%dgG z>jkwDH@JK_XY!rlGQWT4g7xA75UVYRo%1>f6~t^UOg)9=6WT%1XP;8lV-dIZoC9%L zx1jY#BISYryaVcFyWS;2Z23u$#@C?R+ARL>VGYs3ArKR?4TR>k{lMY!PvQ!mLhW0B zLB)s)Q1@B^YCTODqt!)R=|#M`#Bg*QKs~(|Q?kA1EMOKE3W@!DGPnHBLfV-ylj`QGAGsp>z4V6^L~+=EAZSMndV48_aiK zGqz1yk8?iW!#MIVxVG+I z@HY7-vmTI);YT{7vbGYsoiq_sZRD8ZdLNvA#$(Q@_mEk?2W$Og@O7-VppNih-dQw1 z-@gi*1|J2*e~VejXF6}jhucc$oP*jCCqVITFv^prD@!I(9^z94t6lgXS_S9|?nC4t zwUdy4=P~zNSHzq*1wvq|CzDS6$;;sfDio12ms8{j$f0?l+XSVj{&sM~GA60Wz>S3m zpzVArns)2TRa-alHI;|ZJLLq1y3<}?YKcB`PD5@^B1%rXfJ2EXbZal)2-s=;+jC74anz+J0zAx-BExXm8HMnAg>{qGHiz}t>|)i?tovAC1q zHt7oQF;Io3U#Frk(4XM#>A_e@8$`q%>-Zyw-L7ih;i zs+CDD&c~*x23T?J9$HAtvA-7eBPO>)TT?Y!tg^<$bJ;w&<71G-X4=-C{tF7o4WH4Y z4cvz&;_``IAvAmec&?7X$(*6f04x;HyV!joAV#&c5vEg$EF|Obz`0%mdt&z@4!~eqQKSqM%+>`L; zmzEe{5ROwQE1VbQjPKm8z~?`4IB<=g*!Z@R7hJT1Jj1(SKjIs@XI_Lx$1Z|(;3M`q z$4E%O-2#5xN7_sUbH(7-a#@>ENH=;X8}uWM)G&(ehL(?t1?WJZ(HM=h!WP*Ny0VPESY)>PRkP>f0}h2g_+RyB9X%Zu8ziE9oJ3Y|(-sSKSD^lZI=o&Z5gd%VfX$w-pxAVgN!Jx)lg}a8PrJIr z_l&jEUca_ZpqT`5Fx%Dx4*N{mp%WN1@*=#O7X-?z?^w{?1Pdk{p>cvgJ9_&l&RJZ5 zk|i%#0kNFgKF$Eu*gA408;fopU%}~{E(9;FC8x&|>+~y?)(~&wW0xn zOTt0lC!ed{%;Ii#xgeRt`Q+L?Yy(QfA1_UWoX&4B{V1_8b>3o4U>GJeT_wkI6BabP z!_bG-=#ZWW@ukPGNA(HJ5w(?rb}oh9P&(oON@v_A-sNocds$2j=(AujPWf#j z8Z8`;_?UW2+e(!+0bem|slP1H^$K_L4F@A#J@iW1O*^SJET;2Xfw=*ye{DeZY+sf( z?ILT9%>Y6!K;E=MQ2afBvJIbU2R0m4Q`SIp*(3V7on^;%JjTZUcX?(U9l}C%#qst( zAnbu0_Y6A^88hCZcc}uche^>tJ{}`!Pa$`klU=>+J;cB4jw<6KEIlwAYb(z%uN%hD zEmb0T4%QUU-RmIYg9~W(V>7&OrTy;VflG^*;n5QZ;ma1f2j0lWK!@!!bp+a)qXNAhjNi4jl&{Vk%#;mWW?`vLU=@KHP6J7VUbuL-C7? zAnkOIGUk~u-ryj*p9+PxCsWZ!^#gZ(d5zMRlPvF_MPS!y8cNq*Mt42(BKNQ1^88|S z`nHyM7AvsH^e340ybXzUPH6X${F4jFQyLhgbm%h{7wa{mm+CL{S{ILFO>+Sv4Y7ak z6yo)Vfd0&mFyD$;P!EWbIw~BE)d$gNm<8nQ`bGTwTy8o44Z6f0g2j2spxzK7>sIyy z^Y&8beK0XURtY@rVIXF8Ax<=XZhPKeLZ1gsI#4F%)~4(Vb2Af$U557rdXUA;w#s zr#<9BEO_`Ax}Pb7y4`C*O_{zR>HqoiI>PuT;gmZA<{CI3Vu!ziqn*RS&woGfdFTT! zAeL?Gt}Z6S{SKRE~XVoG<){u(UiQktc@seI5W?;`bjyYjS2smy4|DjfLY z4lIo64Eh(FaLwv_l=uA`W53n0)#RB}Y(B!DWSIyWUk}Mt>-$pX{F80lBiZS;50p2DYdO;bpZ#l?U@3@ zsf$`fXOEoUnV6?-3yB}e$eob@@U%3BB6xV3_ng;^}|Tbq(doKcp!$ z`n-j+hn2X;JRBE~kO&2?q2S$7n}z@WhA2yhgxU1(-i=1jSGF+z|M!VzP4P=8?Qt|5 zC`-G9)nC#Px0#xV3q2^q<@8Xwaq)2o{COK|Xm9OuJOZlhlR&p70h^7rQ2VwV)_r^g zMlZ>=wy70@U5%jaM`!qaj522dF)~Fz;DPO9cyKYGTYeFD>YXGDUP8UEp8EK$jLy@? z?n3QcCDWt1T~lo{#<)F#vg5RKs9A(xp8SMx>hmT(egKI(nz=(1Y*MNXu#!t0x^mo0M9lX>NXo-TIF*zdJ_!3z08hb*Oi{sOA&*L+^sdN^o+osFvlohyb#!J@pv7?|2B){9hPq6M$282@vz_0f+ zax4^~`rmYZ*7F8fc4?(~K@~T!(iaS0J;kCi2eGy9CvcjvSJu?N9TvJBhv_rxAvX6G zF=Ix8mm@id(jTKqS`j8*+Qf1Ks4utq5H#8QAwIngeW?p-x-f(*mX;|Y=R3yw-C)}i z)z~&H9D7~+0D7h$G3HDX=)Y`dr4|o~jh+RnjE>N2^F=Hkb{c*!lnBaquR%RGCcDn& zA=LSqgTn|lN>AI$^e0|tDT|81-sl6mj$Mi#CnIo*wub1oGKOUw>4Nd($@Dv50@{`m z*huW(puFQ4??L&gbt*pOH|;3L?O>%ZD#6VoiZu+PJIwtP}O6l;bim@!m`3m2M3y zG;1L9Qv{BipfA>4$bwqiztFfX9t)aEu;$(i@IKNHgJXlS)?^NJYf_@diUirN%W??H ziUzkADsH7?h5A8>EJJ<(wU_V00N0PG;jE`roQpxvvc*`qRA0d1UC5Pmn>7UpP$2Dq zZewRLtJkKW7=I9s?al@>135c(fVgMgQ_y{75F|XS$7XkHELsx`(yrHK8C5r-RIw6E zUloE{Cp62s_!4Rxy}@JgDiCCbVzI*xaCMC0O{+UVeDzT9j_~BA$L4{xM>fkl$5~*? z1Wegc2=-Q#by1eEx}<>+7W;*MRwrjXHnPnI(7DrJfoSAw#@wsu^(n@iQlhl2zkxZsE1aE>lVl3 z@)<+W`w4aaI=p2C8gw@HyaU1c+sHBf7kkrPUl@2Q3atlBgSx!=7_jtT&?qwHfzO@5 zu`vQSd+Q4xukYYp2jbK%p?vl&9p=>M5s#kr3iH~Wp=p$sQ2X`>>viuBdhK&1FHSw= zE}|S@gO4(zT1(99`T=4G*`n1Y`aYcUVVKuJP^)dYlP6aO{M7|wc3mL1AICggkKT*B zz>bj`Lcp#BY&c$kWzPqoLf47kr3|Ec!MCill;d_EJI9&tot&!dR^Jh-MaBmtkpEM*wjXBxOX6t;& zTdolcrtjB*r=1ow{N$i{;3F7*y@a`kHK1<7Sx|k-!xbK7u!k6OX=5qp**j9Xk+K;T z5nnKBzYdgWUW3@lADF)DTa*r4&tlBKp>xYV44Hctr01tI*E7|u`coP8-*%I;s2O4} z4gu-#pUUW!RnTzZ2Ntv#i+P1jTt2ddr;N1)%)ATwCvUL8mUV2)Gjeqv?+S4)TEf80 zaCE-<7$)~R!Blt5p>6sd@J=n|p>)5Hj-Mv;nQbh>+&WO-ZUvWr?IBTnI8K~d4m}

;H2T=G-7J-k~Vg-$N7p zTMdNZugkFSy?hAUPyp$#GthisKKwatAZ*kK$Ff2FL4Mpv8D~=k8e8^cOXFst!*(e- zI4yaaraA3Xd-C@Ss}kQt{)w#50LsWkccxq!^{|&4vqaC~uz8=hsF@Z= z4onVFJ5n*N?`f9lmJQW=mSgw^2Mo$3hhQi%czq5rzpgVOby5mMJwJ{UU*uy^0eM!P z_b`kOCEh?VIyu&mla-j1{cV+YCw^hzb9ZC%}b7uH?V=NFyDkvUr~0SoThm*9qGJ1nI&u^_QG;)rL^IIvZkM!&L#%p z!uwg&p-JOT8*ifj5sl^pCowS&b1y2tNd2seyO#A7sn z$=KHb0Sf9Ys19M(n}Kv59szFUaxQJ^%@n6^+L#gx+$E_e$Ssa4Pumy>V;85R=PjxDqmgCWGDh z_2Bo!6x=O>pywMO+9N(uIt(9&%`MuH+T}91R1^^JHIAtkwXhIx>JXO~@{C?|*PA27 zoO^m9wYaBcQP)kw+Rf zVz;vAaFqC<8n41+7cPAT_3!SicLOmOT_3Q2Zx{*cEJxm9eP?l7MF*k4u@GY3G=nN- z9`-7Ki-{Lkq4qf^2sR&rX#-AR+Q7GHIVl6xe`X^0?j+Qmeg@{}TF~jxEm@Q9P6)n8 z`D5p(5`vq;xfbvZ2Os z)&CTjeH#e+4|GNMpEY37@i;gsr~A2DqE z`kQ{UarOpcnxi=mE;bUrUq-YV--d}_?}OyzL$-Kq0s04>ht}?QAbhYHx>vp@cbgTs zERvz>*lMO)y@{LseTRGl$L{$^_Ly9mu`Z4e~=&*fwQ7J&Q-M*oR9oB=`yr%xZwr!Li`~R!>}7Y$!OX zPRfp^(!1p9GAO!U0!1HgfyE;|$k3k+ZKr6)ezJkJ#D0Lbzs%7p@Cx0rI|_|I+HrFU z^;NfLV0gY4j^6hMRjzG(VXuoQ(fa>8b~w$K7_@EJ0HLS8vXn|G$iqHnyZ5dH$*FYZ zm)K{pF8>zhC6Oykl7W`h%@AxiUnu%{{>^576II-M{UOV;*}UZu}r_CVEXDLxWE_)Ror7M^vO|BTEld^%vOP^MyvZ9d&iy{GB2Q8V{p zTEcg(xAYrvNtZ&c%~w{N{**=j%)rVo5zy<~eR`(pV6$p5NDuxY9#{t!W~m`OTd6Ck zWrZLSwlhyfE9@}oBINxW04o;OqjXZH(oRy%eV#p~@7)McgbhRge&jc3?FFF|=<~Jr z130|sD1P0gB{(H-1~=-!ZCXIzx1e(0aX3@ zkJYyShQzyVyovgJeQR#vkq%wN(i@?uI+e?e&PqU9??F4L|8V-g_poS}hS+510O2oI zK+!0DVd|?6LZa&tW~7`8ll#Z;r~h;iJcb{GbHnw-wooNl6j*}qaAK+6d&whW8 z+Jlec;!X72anKjn{3t@n)$1^%Lm`?D_2yFfAl_Jc6?ZSt7J{0-Q0C@8E=_yHz51tP z>+zcyOz)lKiPTTHQ2}miPq3%gj)2cV9aJYZWa)4I$;96hq52v5#Fc84_)Wp`0~$g` zH2qqpgm+`2ao@TQLhMq?#}3({#OM*wy_2D6)~yURl0&(A_e_}IO;hM0xr`>W@?mZl z13}VjoHCf^h>0COgIUydT>f1SRUeI^`olkzA?}7XPp*RJsptGc>Q~h8Hiq!%zF_@z zI1e;<12uFOKWnQ-`I{d+KkXjgp3_mt8G4aC8SP4|jS{rC?I^fDq;u@<0yGOv!kPIJ z@z0%)SZjM2!awJM={OH;x%C}XHd|TMd&46;C_NFx>yPUR zhDJxAPgrMh{%U%^zHr2-f%mZB><;YV=1A_Eop}GfM34qY^T6zU3>o?yOcQ>=wh5Y| z=iyAuaPbEHsw(t!JI(@kr$O<>P)z803ccNKEA7dn+wF;-;3h_*^q(g-;d7lq8Z{kb zmvET)cPKdpO<|k!ODIZyg3>S1+06&XQ&*^hrC7A!sdNL;WtIgx2jzgz-%~*j3<755 zfyFK(99!Q73x4cG&s$x=Wz~G@G}ePEqyT(|PN&^qPt?#n&7_;wWY^lYvKEIgnD#6m z`aZgX-siS+i_UM67WSx0TFSgFd(*5L;g9DX3@r2lv)|LeI%zLY8wJ*chu#nc0yqIyCt%6qxUl)b}Y=yEM_SDzAidz08harIF4huAYWt-A^42y`J2zcoZEqk3rC#C@3{ivA}q1kS@^| zxA|9deYGFB7F^?sC$UO-$9`P;xEBvVgz*Xl)BoJV&MqilMXBw^B) z4npbq9^f`<6L0J_o{yce6jziuf|Z~rdJoYMV~-qT65T7Tx#|+DeIo<)wwXLl>p2D- zI|NxheOZ(+9DQvrpkE(=&?}wjdH7wHHoqLC#&NRhup5xjKM1EAY6)hS5Ax!4Ih^h6 z1s-2G>gI)_)nfrllD|Oi+eFm3L$l6_T0&_3A#l^00(L%c_{1eZ=W=9q zv)nPzIhT2#xXpVG_yOK4Z!$^TEv4OmAK~A>B%=P95-7eA26ae6%yaq~GGr0_^ zw(JMf`ZqkIPctgkU6EN|I}JwVv@?!UV&&w{;=0NRTujVAX?!YfdYKE;*LD_adQ_l1 zq800!YT-aq8>TD_#S-ed>iK^`w-qitO|<~auP#A#KFxntcf+G;2BN}2mF;*i9EZfk zVnElE7#h4Co%cNl)xO?5Cbk9~{QidbTfbv3yGWe)fchxev$1URB&g1!In&=Op-N&X zltlxEIuYk(`7p?QkcgoHbJ6ZeIqF}eGqA)1_J7qBbG;6uQAj+U{o|F1;X@!laX-3E zn#SYS-2l(IpP03bvKzhW&KlQ*C8}y1LA^!anp(&Ut%W$fFBm#vB_{mLhR{A5u+gfc zShD*Wn${IUFK7avn8SEgMjZ1V@4%sU0{of(7QgJTK}EHgopzv(&A0jjQL-dBkoXqY zX;DttpbA^cR-*an?dWv(nkRJ5QeCi$ldk$+e7D1UoK74+D0v0XYjb3$IP--mjn04=|KVc?Q z&ExoeY!bA7ARhFBR8SQw!SDKBR9SX_)FoQtaB|5vdkn|ceLrEvxH*{E^8iQ=Kc_sE zH3ak=1x-ILpq0T$%CcCZS&bD0_MV2;jyIUp?~|<8#_O=C(jB~7UxKUcWLV)n4;LSg zfw8wy|mZ2-6ZtNFL1MnYiCD7I$Qer&rh z5j3})0*gIPxX;{JY$slY^|8l%;bc8AFlq)LeTwGtCK55D=rEX%OvUhVnP6&9d8=j7 zXnMef7&{Z0nmkjV`yRl}Jmqe0(IR7;I5XT$e7{f9Hy3bvrU07!`OuR50)C(%OaZ(@-J^B_LeW)k1JV}=F zeKJ}tqxsr44OTkg7^GFNffRZd#U;|SW2A;?w*4G$SQ&{L*2WOJD3g^=(idQ#HF%GR zXBA88F!8cIog1Q1u`}EzJ^c|_Y;D9EQ*z&IHbDK@zrjD87>y(Mv$XpCAk{yi^r^Xv zF|<>0+GNI4R{O)eIQlc(<_jqoreo{;U(nb4IYv5n5cMbQ0qvPZ5SQ@{6p01O(N~^h z>#-cD{je9?j=sUz-|gfR@B(#JH=gpKKgP}}#nzm2AlJA`jF@tWiz$QJ!2qQTUXgRa zT^U>Rlf{G{#-fQ=psxdEr<{YBwEN%OW7|2%As*!cs~^~F=XXpL_wlIPGDslzQ0u`D zxI&@^!?JWt)~mqMSWQ%{x(D_-_hD?kp5T{W$Sw0s#L)VysP1})Slj@A{<@9}LnwnQ zZROSTD7Q9@=03_GnEFyvXns8#zb_$v_1rPwHr9j3P2}WL*<&l8Fphk}Yh@|i9RIi- zMbqYf%;~T5vKo_b5Oga72EKd<0WY?q%Eb=#=h{M8?qM98xC9+aiRFAhft}r04f@HE zY>$0cE`I1B_*#ZS-3}E>NEIOMcnAZ=oWKIZcaVBcMEA~n!4)5Jmyz^&la!&KWf*FH zu1B>=C-F|Qpy1Gg;=g{EnX zFlCXR82d1nJW081U0efk18!1RO%6W&HNmt!hpoH!26FHHMLEeE==^PCe!oX;JQ7L0s5 zLI21q40`{W=8U!&wmb(`PS+7jXVBU3?+4(tKOOw%-9Ycc&nzXv5p}(YyKMHACl*)0 zRnsWELHCKgh8$4+Q^a+*lh^G|28Wm1!EB8Kc5Ao=`-pK&69jf__gmV@|6u_aeq(vP zppE{s=St;+k{1C)aMssKlc{D>U zn8kJ%4AdC|iP?3`dQK03xBbC)CfzUV?P1Qc8&LY1I2w26;rsA+#Ezqm^@fpjR!-n9 zfnH#3*O!}K`vV&ek&j-@(I@gHWj*x;JF6vN_=s}<<}&3v8}iHa;H>!4P;{|f0a+*C zDZTTS@bFpgGD;}E9?1i$~#QRLn!xJ?|V$9`c+_mEa#wjTyQdtRS z>uqUAYme!V?qI@(W9ZlY1@%GvA=r}o5n~^*GHDoOd2Y6Kv-Uuh)fR5IWD7Wr(c|k5 z9)bB5hp;KuA4^M2G33@WwEkSg3Gw~5OvC;y!yrwJU>YTI>a7i6dCM1|Wb!hZTLgLD!#kp8A04sD zb1XUCW-3eCo`F{nf6$*5h0O+gF?3EdH|u*18k^kl>{SlI^Ls%5>|xNB+KhgS|ANz5 zorEUuGCXkZE5_=U(sz{lYl>))NA=}vB8ltX$(J|Snurenje&Q4Zh^P`HD%N%bI9^CDl*B^AP8GHy4A;BjT-TO@rB#FTcNx_}7!3K+kTq*mxxkUHhhU zdDosYjTh!D!KW5vk%b8Q3D7k9II+h)!F1R}=0P2W(4Yo#-yA@{8*}iFEC=qE{z2=h zL%=X67|bfd!QyEGe!tg%rR}XOvCn;8Y4DIT(_Sogcoe$bUc=-z{z}t-`tqXkYtVQ5 z4ZKNQ0L^o$kk%(0JAKpfU8`v8AXOeN7e}^b3RUkSnKg5`>AwsIYD3 zrR{ZW$fi%h-l4k;dwQ6nl6+llkrQ&|swx>qn@ znt>4d_5zsZ$#CrPgIM~jlejIY3Vo+t#nv_L*l?3(_g^JKs@6LUxKNJ+N8Ew7{Z}Xp zXe79C`e)BhEbC9Ruao^Dx!)PI=wOT94@!7(e|;hA%1)N?swV`xmb1QViqQ27vFA>0 zl1aNSG@>46ORpADsrwXMIbx1&wv5Z9Q$pe8}?osx-Bj!~*^rg=Vq?V6<8fi{B?eUPunQ zeGKKQ*1sW3vV|viy^l`fVwCJzPF=?CI6mViHjF%h=f_Dz>8m6j9IzJ19mDEEwZWy^ z4Y2$9lgpjFm5n1UxYhYQ9^-o$jHdL!mJW5iykIpX9DPT#$uRVsoXxB^h|kx}699RrOH(Yd_4kjii}&H~2n?GV&{K zV)uQNotu)($LAcy+^8dTzp#hcCk$Pp>(SK39^Jld1lMKbVaHQ#A<9S!c4-bUv%Q0O z_ERbjG}aKJOz-3OH(^-of1a5hn1Md-uh9I%Sacn~8ZGJ!QM!03Z_NE07A0v4G3k$? zZdE(X8TA#be(4GY+fUJVE{Avx^~`AGMzlzsfY!nS@RDkP*}s?B)0_fOB^ii~TYGXN z(H_j2?XYp)Sr)K$8@R4&fvUy(@m98`&~&z*W{(L<^%M|TMfmO1h$Jbg z=Dpt(au`XZA|sMSl9H5!-~IbzKmJ>XnV#plulu^bmrW$5(fr^&cMV1iEaC$u4aVSr zI$r9#i&vhDL%zHhSGIXB1nw$_4ACKQyOV@vc68?4YKQ6dFHrobk2nV4HT zClNxAFN3h6Lijy}*tbuvq3j>huB&JU={uRa@8+WGZ$nVDgL-E+QSP+-2Ord11TD_b zvH4{edPNv;&cB7T%-UzXLP2*~_hhg>D`)0^J*O_)wY;>Iw4_(vnB)?Qx8G)?-^?ep z0=kJ> z)0NErvnfh{-NC1)(p#o|VG76Js?}hveKD_LB z6_{u4WDQ5|FzbmoVWV$2#{RYfT1FPpe)Ai;yj}`*4;G+};}J=$aS3|e>kWz8?=bS! zH|pbffV+K($Ix)3H*r$clbv2&Lf^5W$(l-`VBuCrV4 zUYxcts^kt>zZ(OdVmOxMd-FNA(|gJ4@%&p?^_ z7>X&|9&uR8^CL|-jiDzBtPS>q`^-knyuF_f==}vPoGZ|`^a|RCe?r@NbY^OAr7nmz z-o|A#cxtZ%&+ne7dP$6~)`yUJ;1{qP9fDy6y@b5FA>b17l)tQT3w_33Cf2BomsR&b zUc{fwGjST_5I5uM@fT3~LXSm!Yhq4UGjLGkDJZ>5JLiN}R=J7pi^VE1qt7DtxDz<6 zPQyx{ho}}`QYOCA7RU=l_HIwKJc4)=Y8Kk@1y8vaWa%c-^Lxr@I@{$*z zT=Oi1?K}>;jX7wsK!tXe#2UPH3){Y4#PH>bw9`5Sn~rv1OZQZCENz1!d6aF~SkH>* z=Mm2Vm@3bP^(%S@N^kn!{Z2X0zfMS+XB0zGcRWAM!9-9`@6Ws&>p-Msgg(FBgyGXV zDeE1fET4Q1EJszLXVy9PO+m9p^I_gu&x}iS@q@66i&*w_8v1;h0YA>SgY+czQw3%~ zp4A8t1&jwxO;=|4kg~1D6D9JzV)8JQlkRsLB6trav4oG(`o9(433a~ziZh9~WWFf_ zU4|_O9qJq(f82~)e`*(uy7B-z&RoEp#Oa_g+`=nXr10MFe+QdQ&dhJyBq*CX3#VK- zO+N2D=C$Wfh~5~_hWY4n6}cCn&FvY4_8JY>?1?idddbW$6|l^{dAw-fIgB+fr7jOW z?iS^oZOIdP&1>( zJ@-CV4Ac|+kDj93SOWTtd5$*UZYZUCD*<2W2*ac|Aa9;0vtGL&b5a;sY%M^?F)JYH z0(tp!c7dCAAx3Hae-^6bL+sCk`lKdXpZ%Qt+U~69q6AxSM1VtZCAJ;Q#9=W;!dSH$ zYWKYY>BPs9DV34!Ns6@Ae1?YA*3L@V(py`fO^j8qyc|+2q zU5=)l_uCo#vFwMC?%0d7Tbc})e;~S>CPLgI>N(!Lkw3k4A4abf$jfO5IX5O_Qh(~T zGcafJoCf8m%wAl}_|Z(ZVgWH1uCfY8W6*qCuJrgLoH|k3S@jZQ!S?P`xSg2_k-rr} zMIhje7*iqV>LBpwA+6?&011hQpu?TC!fi#Yb8iyX70g2SohEVfCdfXq|Kx zRfk>qj)fKY@;4jU-JK4R?r~`CZ3zh*W?=kE9c~n{_`6G9U|~@cD4b)#^|&#N36F%W ziN;+0SJLskO}LtFZNXdXB$V;PN$=L-asroN{IwL&Tp~!+=NDo|m>y>~wHXp4f>AuA z5>hV2fas_S9S&Y%QG-8|ucaKyMxDSs?LuYo`UhzLWGi}~X=WmQ%EAy=9QK%@`m`^f z{%|g)eTc=|wKZTlQ6HjbWw3Z(>Ijt_quka)Y=}8cop5(05$_!+>tjg$a+6uH*?4$i ztj9GN$D_>fn8aH(GsG|=mTXtT@$E9My^U(?Rd5@uBuhQa5FopOZvHZ}nnnFb4B6Lsrf~!6D zU|zW~XPL(!-0KwFFx2B*zdl0iQeq!r8~Nt4ly#&j%ljJgwyy(GJgWsK8yRrheZ*PE?o5W&&CacCO5$6 z0k*>^kUpmGcGXpAaUlj*bPyjIN1U~B8l1|*2W0#qKFCAKM8hfb*ti4FS-(MzJK_S1 z=~swpwV2O5NZkNiK4Ey%ASSh$K)Ew3Rv~jE-XqPpJLx_0*JZwh*Fc||O~giCihBFC z1pD;i5N>3{M~t{dy2m5TTuvUao8-B4nGF61mtctG9r32qBo=G+1+DPg5Iv-x?`(_1 zbfo>ZaSVnT=3vDik-XGnFucq#M7wDMf8$jio&xG=-SZhoS-eAaVIOQa+l|u4-pXN$ z!{`mauvSN!Yi31|ZuB=KoiRW=r&zvX$tzwi-l;TBoeGABNk4DZ;525QWv=Ge(J{ss zJp(plo>7Lx@`W*1cFzLjx~_cY_-k04#-Q3T55n6IqIAk~KH6vvxar&h<@9{=V1AJ( zWNqxm%?y+(jU_!7{|2>vsl;yb5UkVA#nndT82&}h=ZFf(8>tP71qn2zYY4`h0iS%V z$A_{a=r{2*XidG2nw?EdombA1^e)2gH(FfT)FgZ|U^Na|`wacJgJY}8LUtZ>wOxlNmp9&zj<2^6iBx4@b2Y$o$Dc$$PKG?VmYWv3G zp5d>s##u{nfAI*#G)JnE{{Y25d%@1v3yfzwV`Ch7r2HRYkwn3-Y|{{29~^`{PFILs zx{mJJdKUcKCh+D$K=IHHBS+q)+`b%~_I<;KZh^%P?u6TK(@}JZ_R(6E>|Dw#AR!Z{ z%t(TU7Y-2h#1VW4#1M<`E>_M?MyK?Pm_OntKcF%NJ&K4^Wq6i-9NSA+-W~(ZKe9l4 z^&4d*cR=pUB*eqS<$5dTDjrUOG4Tnw!AFnVMf&A#|6ZI{Qvr<4&&Ax4dm(m9ENIjj z3N10~_;|m);A#;8lAFY$IBrRqQ$vucn=t2I1ZFl(LK~N6iIYPqSpPyftwS%N*kuEB z_aPm7?N-RF>|ou4n?Uw&w4?W{e16fWMoc$8i`p)F-1_I;5OOge9BM3BfI5gchfjIe zt1i%TZzcN8y2`Kl=Nakk3~nzy2dcUAp~CZb&}_QJ0#;_iMwH;pyQt_*zf>TZiPAR$MzX=HLJhC zfZ;7*qgq8=<(;tn-6>2Z?INsyJV-y>W|`0DQE%g6h#da{8>)ja^;qdE4zB^N1+vJDdLM4ZpgJoGc4 z4r={{P5l>({XolZsY=^rjXJdG-@ckCCU>gm~_A{s8k+hcLdN6n$Sbqx9}nEIWK3JbwOw zfDi8}i+ha4jL;MYH_t)a)w|)ak*46f#2L+HVN5ph7W1I@qco2C>OHeDyv_t}54!+0 zKSe^1h9&mhZY%^jFT!@>`gEOIjtaRaZ@1hS6*rIbn`YNQc$p4*Z(qRY)Ea=YI}a+l zjXCqS0Zgrb0&O?DLb4}ym;9Xx1YyTJF5uyYZ@*C{QOOMb>#*d>XQ<1* z1IJ8Sz-O!%Bt7d3nKj>-qa*zk%TrPOgLF}gDhORY2O1jhgYP-&Teaxn^IXFxdG8&M z$>!yhF$f3S>JTtoa1H|urlM-oQ0B3>4kPTS`+4tSxTe(P`s_6lDkk0|51a`PPyU3G z;614L8O)mxttM`MmSlMAD^#rim3*fJy z={=uB%&JS-5L$Kyqn(a{%}n|ZyuZjac8Dc*o#o*DXcV(M>JBaaw?mC5Pkcef+jR8f z6Wm6kY?8jj^IQ#OXMbm5^Y>#yn!eCba}j&)CJ_?^;AVyaSL;nWce8_JzL6Q1^dyZ9 zJE+h3O_>7aSJKeBITPe3$#dW#;&O@_DPQ?e;+XdpKdzw6zJsSpi{|j*H>{!Q;zaOheFgI_ zg`=&`bC~n~GkiIvK@px>NS^r?w0w6GGgt@xt`R@gc`HPaiT*}eBgT$!$I}bmK|7tl z(v%!NZXx}fjLA`kT`vLe=wEq_t;Ukj?rKN@eZXr=Nux-hUsqvf%wCorZ-*5R=-yWZ z^0MCKmznmG*9^R+6s;OV91>4V{!5c-ap+Ct$@HavE`|lWx5Gf4|;o zOjBf&w`(jo9Vf<*sFS!aM|jK0V4X12=feyj;sJPXixA@kMwti13C;2|# zxB3FegO!thC#LeAw(p29XP|4rBsy=~dD&lEB!we-Aph(dzV+;RFifdI>xs4y7MBCY z%ba1cjWIVilJ;ce>8-7PiSnr9%9x9nAmSG%u&3{yW#B@%k+d6Yr+h_g#cK3`d*JwP zE!vQ0Ib`N>h_HXf2P@W~vO|f{+jlYdfOhOnjIXBE)Is!wDdmqZNdE5&h#o>bLrrJ& z?zbGwWq*@ib5o+ZdOU=9r9jZ-B)(&qzOcLg0)njTTJFU)NP>7LPeZ<`T!c~dzCnhRi&P2AAq9XQ}@FRpHPBbq;7!9t5SK+>OYX^%kj z+MfHMr+h{I9h%DC-^e2%v}41X&wTLZM&__1k&oQ>9Av*7k?1zYK*quAsGj4B7&;0~ z2N($co}uV_ekZz?|3-ZZhMYLV4I|g`Xq%@1r^XUUSZ5$uC!2Co(_)EwYq_JQRfoiB z*Kt(J9^$wQx=bMW8aAW!?1R~zeK>g9Ce zrmW&ak5UJw`Yia(=HbyFW(tgv+<0!_HhpPShSFpKgjsyol^A{_y(Y*$u6136k z9Q79Q;b=bOCPsACF_RJ9eIW@L{_ttu7Q&ixTapxd!akA28rX5Qwxe8+!zs2H~ zH4r(6x{#;sh0HhwMm)I9%hs7GqqTB*oBWYX<*UgLwKw9zmx-BqdKeQE$I5YF2$ooo zuiL3inRz7zJ#7!8c=071RD2MJCK(G2Yj1-8bWLIG%~!bQ#53?%I|85O=?X4$t}v;U zrP6u)9;P<^SD83}HTndqpk=B7YyF#M#kOnUqkRb-miz)xxeF6VXmiDby^n@O$ut7kxd9 zc7}&p7u^p&Enh){m}g!=XP~F{Ck)V|Ieyh1rs|Y{#OMSjy*tNeF8T-FzW#vn2lRKO zdnBy54eVEumy2>}4ui(>j!TFW+cW~yJsGUu`BrG$R)Mo$CPBrM-C*mih1Ju>qRB-Q zF5;0rTJc@j***6&!2$1$RN6@(ugfVu7_occfd{Wl-MgDbCs-W=kv?JlKW(52MZ zFdm~_W5Ba?2U7!iiMMUQlJ`a!+TR6k1n3AQZv#=IM8x;Juf)iIo?(S!F0)#71wL=q z7XmWA0P3$q&zKm#W%Lp73XB2kF~>1#)&LyVc?M-M#GEL8N<12Sww!(z8WT@q?(+iT zOObctkONWJDEJC}^L}E_-0aB}eDBE^al6iDLF4fNhy^CTY){nR<`4zl4SOu&4_U4XM zn{r;Zy#?jI0$env9y62f(C&B~Um07DvYU^Hr&dFqLIzyk-a)(&Or6C>1Nf9{eIVnF zNVpMy3>S@0fXuA#P-gQQM|s49cd`r1iRuGkgDWrqdkM;cy#)V;A?WLQ21IkqvGsWb z$mjM`R_;1R_a)_aW@n+t9O6Wc9m5w2$6?B+TCk1FMd<8uXtph0oQuSjGNMZ6(7tp7CcT*g_evkCXauE<;Qge=V2#6B)W~NucN>^KZ8XD z9>6ukjIW#GPi$Z>>OX!qN$pt6+{!LMrX(D#eaNr5F^~2de&pL+Ephr{$mLXh!_bP? zpn2>bulaTvzhRLP*N~mU#*VCn(<>fe^e#R0OQ5r5hCY`zs+{s`Pucp`UC>f>4a{ec zKvft*tXPx$gwM&l(+ld`_M`aeR;Zrt0j1CbhOQAPpL@$OdhT5`?`H@0Aw}pBpN8$V zSE1y=6Jo-7l6NHqrB=BRxGx>1oaw=XF7;Tucpu2dbu-mm^50Jy%(DK`6^v`VDYy4Q z;&O8#y1Ei;<7E^(ODCV8R~&ApQggG z$B$rXj{u$@UZHhSDjP?c8@KKZ@O_>QF?+kfb!|LUPnieOb_FZpS3~%GZ6RpI4!*{S zw3OV#eA&2LSb0Ol4dV1LXHXl;yG}8b#)4ny56IJe$VV7W=lyP$v%>OAu=~%mSduA# zTiEV=$0OxQ-8O36;8ctQrho%&Qe6Mq5C zvri@Ue_w&Jga4uX>7P(@Nry|dN65Q>bdvYl=_oaAlDxDs7FM?>LDZsG;O-d(F&!1? z@uCHLUS>nqz@w-!TMtCLXlHxi1We%fW193SdQQ3vIY&y-`qLq1HiejJE3acl*;C@x z(p~lPKL|^U2M=KYR$RIVe#8F74@a~)iwor_k)EbE)H^T^^T*(>kB}bu7=|~$25%*K z8WS6NhZrMva9S-Ue2>Fw6?rux?tt6Q5>ToO!Akayyd#N_^m`Y2KP4Z7y_$uZ7;^Id zHY}~rX>6F=f=!FYK!biW^Sm(ydz_kJ=?eibcfG~vb=2GQHUsLncH)ZfhQjKlSs+c8 zQb*!(h#DJAd!G#Es8E zlf-1}wh&+Ap|a_AJvvFMQT=obq@H!fdb4b(I-|+uhQB2b@llA7f8tX|O(Y-aue@9t zugp*S!emy%;olmT3(<7QNjNt9 z0=PPkh4y(yC_UDPowkVpyVozUy4@X<7L+s1{B^un>~9e9ZXE>Zj-xmJVr>6SiSDyA zP&SkJ9&f+Hv|JNz>69N}TCG665tLt@xC39+=m-`q2T*l?0@NL2ki54S7xc(f*zF<~ ze3m<5RMrZ-F|HXiuY6*bxku6X+bL{$NNfRPkQD}=L~v5@k{W-U#SIyD=LQ{OXa zBYxjs(!${UzUu^s z0zKYl)=Opnnh%o7h19!L8U(U6OL)Jxo50qr58B$r()qz-7jMw0xP{qt9a^+SFf1}b>jljzt1-eC|E?=^dOvIDi-W)kMV6y&t&QU&L;0~INDD$wmipL^fw8?ho;|3UWgFD}&%6bGd zx15SCLjzg!hH&r-IDCcK2k>#0*!3ChjeBufIcZo}m=4EdbcA`yhoF7X39wUjf;jXe@8D(z zvgCFqO*#wKIu-2NIU~WgVm^f4{Sz&v{fLWjA4NmILg#rk)V zP-x%#7&?V((CkgWr>_EGt_R3xY-GjJpK$Q2h3H?B2iB{^U{|veW6LL?M~V!`>R!Qy z6%R43c_-vaH2JPc?|A7wLuS`;gasT~1)X7kVey-n@bCfM*%qh4(6<{zS2Vy+2*8$( zI3^!(6{FiL*)h9axNC$dr}5!{^7OY$XdmJOo|ZY-GjtXGtQ}{@R|Z4n(B2$gAAkqD zDQ0huSvz^U>;w@8F7*oFUiOHH|1CtvWy82pyg~cD;pe# zT8V|=KAiX-HcKGqAHdMAV_4kw4_1dqLC@Z85Vh4EiWg3VB&WX6xRUtDbaz?1&%hzi z-{YuUVk2H7&f=48==Z2E*gw$4qIfe1oogVBYQKUunNH~WMFRP{o0Tqqb%6QKdgiq( z3%cibLqJtFhV6R`?i;C-!i<=_lqH0mOPFJ4#!Wc!6a31uAosUB)c>=Ueg?C72X8ab zJK2MI(SKp={Bt;cdylvK9z(t!PMIscg8@IBKxG~W-R*UlJn0EBN!wBOWFB8y zbrkK^&c*0QUvT3z@|V3G1*zVT~KtS4HTm`@g-e)V4v|5rpR}K@#s`+(VxS6n*4<}1Fo{Dq`su@846)LNCS=( z3F}f#1w5P%dHOTaz2p=s>@M;F>6DGu5pkA>X!bpH51T*i0j=K);B>zpY`7YW-d+!R z-z9~xNIM<vi&MsBv${ zrae4{_8m)i%X@S>d=X_YHt^O?VNhX7Ubp4FA>$4)=yl$M*!&8lk7v@gU#(az20{l2uU+{UV0+s7>+9_Fs=kIIa$EcgAXe?ssxdm9ev;ecLQlRiD zonyMoA>TTTO(=hhjoug0neqjzqfI%?--^KtgVFC>Gc(Rz1U+^}T*@eI$QbY*N-G)( ztnv-K_pD})(gu|A()>+NK2kT*bUtN+AvpPb0rNnyV9u`d#Ru;*t!=L%KJ_`OdtG21 zhiFG&R*Y@~5>R^fE0a&JQOaJOkVva;6b#$69|A5t!}NkQ$ehy5NB%gA!TcxYq3lN4 zfS(BKhoVI?YR$p`Ym9(=RkVB>Tpx_+2Tx%3C9dRoG-snFqCRFvyk zPz(=Eav(;UhI!+6VbabC7|RoX3eqv!I~yxc-30adea!CXb8Kij$3ku+vMtqn7ikv$*VR`|$oa({o zKXRbuyB%*nB7}vlB~R+)MOgQReB-V|u;0=b5Hs~W#vZ*v?A9`V@SFAEwqOUwTWE7- zdvw7~cNeiqx`>@J5ghst18Mg%=zK#zYg3*PL-Hx>^4|hJ;gwji#vOmy?1G%n)mYY- zgP6q+z;;*^WwCYOqe@Tk+xHcRZHfWEW(V{-E{5X0foOBkx*%ZpQtFd>2tzi|UfpmZ z^=1|+$!#&p$WO=8~FA;oA~jL9wkj3;JHoYz#neK!32cwNr(F8K%R zi=c_Lfuyc1^uBlo@sB<@d;DR{voZ!(L*k(ASdE6WYawQH1%}BfGiV- z4r^a>ioQpmAX*d;O=oCF(=U@ZT}Avod%&Dp2drGY6AdR~k);Z8J8 zkf2(xOR`#~geLM9drs~Hu_N>a@9o3{bu33~*Y|wot|od@BtVk&IevmIG2WMT}N0W z`qKfWC$ga_M-$X<9Z8GQl|(RsSC^eC=%-2Vg@8L)IZn(;&(C7+kM%h>%6x{Mx&dDA zY%#14vAug=hK#2bv;v5dxK|y=4i<%v+6_4G%X^vT$XtlgZ3lU4*Q7M!hgsW{!t{m2 z6-q8=Vu$b0{=fiQoXl964(+|y5$AZ^Mr@z80_|$IqpP}=-hSU9yw7>2F>x=-EQpuf zrvz5NE=2iXHA>eFbL3NU(*WQ}u+R?r`d)nT<_5?_#_t6aPJB zC^(;u!r7+e&o5{wuqn*tyVrf7-F7GOQ=fzM;U4zjNH4D5?=*3kJi({r5xPf^PMVp{ zWTRbRLcnfl|9l(r-JVO#1FoR+v-7-cD0wsgJb?+?QV7rgjWs2{qqo`+Hm@ZPl9D$N zCi6b=0DH4N8$Myp;jcJ*GwFA9_Gpg(Say}eF{X|eY zwF-@O{{%5*Cu;f_aDJbo`Mf)amFn7K*yyDKNDjj4XBNc3=&dZWjKe!?bc9!jDR*P5 z&8eFh#Pm|bx@eJ*fBrlCu+S0IULR0a`;FgeXuwT*dj-w@p*{xBv;4g&y25no;gs&A zGvJLjiUU@nwDwYgsMjy>Jj{Sw(7qo1U#T$J`U38@p}ymn+ALr6ofYpToqt~+^f&w$ zH@UQf&DIA>XVde{JLVW{rL2C<)FQY%umCGf;xTT92^Ts&8S><5JUTtV26yC>R*gsT zrQ^Kx?DGP5_bmLe^A3b^Z@}}`9p3uyui$@o8|2JPhS|~-i2kPwTO_^sXrBUDRA$7b zl9#Tcsfzc$Hy`X@<)U;$Hov-EU&t9uTB!L%SP=IS#RXS*U$GK(IvA)n8K_aDeN%q3>vrgE^ZumT(JD8A&$EVxe@iyb23m;Ew~ zkLW!fize9M(0qEEo9xFDTRo^gY5_T$Owg})A}@Wc$pviQizT5)F=C@5@k&3Ur?r;Q z@Z%?#&%Dc5L_BAn;vv+p6a^LyB2KLI#pv){KKaZ!nEv4-+6;36*|XAuqH*CYeK+j~ z-q6W`PC2|m6~pTpH) z_7e?+L56!NN0f{OZd)Juif zzJ~Ix6J5crIRV{ji@-|$9_p!k$j+o3wPfia_19&Y+E1W6;sY@)GErJ!!YkUduqEpS zZ##1)I6q0kAmjUZ<+CXlX#518lW*?DN)rx#)j@~vy zJ5);9v_eUP%~8hon+g4*wt{qi@+A4=co6NWK`RMykpKP!K~*)(e*YY>o?pltKV6Gv zD?g+6`Dv`fPl*xEBkjD-60uW&%A>hjY_#6MF&6U;heX zrIAqdQy1M#Q{d$aee#jmIVOqE@aFNGc=^^YzSjuyX8Dc4@VxWTvEmrrITez21Io`j z48aoeabyzr!t&=8^iO+-{fdb_e~7f$a%cFbTb~PyHRLvJC_#s3zvA#5n*WA{@y2kJ7D;McCaqr&mKM~M~Cby ze1uOCszx)`G3FsO486uLUm?fpe=oq`-k3shja0>0bC4m^fP?Vzx;8g$jhM5uU4^34AF$yB;PZ9GH4cm zn0n4kXjs|C>R;@DvK4=W)@n62)ISDg&0X+3n}V|Kbl={s;blM8qj}9&%uKk6L9XHG zeZ(B{=e02#-RpeR3oXv$;cP5xn2F|Jlli(thUb>l5qEPV7KdzsiY-&1WmPeoc$<9t zSEun-{&Wu>u*UmYnw)&-CkOSegOg;hU$B8o425T(dU3^RUx+!-g^R=~Xt=NgBWCzR zkL@i?(@iI@I0jwA%oZ{F`#vcxK4iAw0oHf6KS3>f6~LuBf9uTn8#Ve%yO?48P&zW53;diycEi}s+}M#7SddO}xv zFf3m;2rggDL-E8furo!6(|i>yQG+dCe(?Zk96X7B>fPX&)XnPeJ|KNr08OJJUi8)t zNBP{q5UV$!So(x1a>e}OZF-#Q)pziAqfutobqMdb3pVI!a7*XLLi(z?AeyWLi7S_( zIBpjoeBlGGcfN)}!^UInpYdq-N(o7SBl8*LgqEH>S}xGQb;pbZSy*qhH;M*-r_CS_ z{lMgdPBEK+?-FYr2iW1DE!Zwpf!);?tmL{HqOX+$XGi+$4?S?S`u`kagZX?1`n8fT z^22Ur#bpyAWBnIM4fe$lugi#9yU;JBH`F-4fiZNq&$;>v*-6{oYX*3Xjav+X7 zZ_3p*OCe*?PAZ}sfqv5tfrGX=dTXsguPx`nDLEHx&fj$$K379%xqBJfG#{d0v=@{u z4}$J*9{|gnG3kK>9OpFf^jYIR>Y28%%!X<=B_tOa3C3&&Wd6An0+mOhg6rX1%QU$5 z*EdjpCKdu)@~HQ03t0Eq5MN2m1@$@!ows*@O;r$U>9ve)uhA3ow!|^OF1AIv2mZOaZ#p;SK=6q83m!>TMM`da}WO`oCS@Mcu?eNNwvd*D`VC`ia- zGiI}~5Hc|n;{!>92y;OH#G9B@bsrO67=x4K2ufEsGpBvVTx>xH_{DyJ6hjTX9l0Mn z%XrqXHyKe?2t(VbH#TY?B&~nXCz9VmW|gWG-s*7T(U(}J@oq?eOI>3v7oo>@4ZfxK zW5TSP;QQkT7O%d6iFfND{@D>!bm&6c5fvm|`N85nx^c|YG>rKr6-xS&@4|m0N{1d) z_P8Gg)iyhfxbcoxx>9zz&r-0j_=*e4w?pjY3*g}xhYe5PGj-5Yuo`p;qAQk=&iOaH zmP>mnA7^mz7V&mViG3T>g)1hAIalBQ_~Y4Lboy#0c&BxM=nRiR$A@Flf^$6QuPsQ| z-om8~O%VBLH)Oz((A$rQ+aGqX`s#5&ummW zWwcVNAf;A^Q>?j(@;8<^l(-$4b4q#FPm8c;**Nt3MF|!ss5{v-K$70H33BJg!6jd@ zp#HU#J)0}yoNxRFW%4M5?VGTKUx4ARXF!dNmsO+|6#w%c1~)L!G`v)>)b}~~_a1=S zuH;FMti|-S46v2bj59Y1N@~uah1M+;_tV7zwe)u~x(4~TUq}*QGK>l$56H1roD%Vn ze(ww5e&{xYYPf)ix5jhR)$nYyHWwP;3lv7cB>!9N^9TAq=A1eo}5q4~}$py6=}Enc1lpFu?^J40D3bsm^f zpHzc(F-z|@Cq4Nvv4|SctmFYMeq_X{=TVm2Y$qsQpW&1Dm!ou1FiJhIvApK-d}Trs zb$(P5Gw3f+yI?(lWgL$a>+DmYjMzGj5hESN84a1hlL1yy|^wZYI-)hq!myFlNLs#E@REzM;HHj%gi|B1iz z3;-7w8De51`Oc1^#Wq91eiQxo_W$#hls(eW;z9?`N6(%{UiHcf@Y_PLYZt@mXQx5E zaTIu0#bNl}SWK&Z3i7Z)FuKK%OYt3!Wg%ZNxhWmHOFqJw52?_Vt|OSoikmdXX0ETo}Q36kWs9+dCmr6-j+%PAGr%4<o2#;P15}PPKD8 z_B*fw+~d*(h%wn@~Gu^0$b|-My>vlxbgfk_>Xe4wWW_R z(7}i^pELzEhfk7p)P2LkRVP7yHBPCyze__fVYwF28rMyGA zkHmjB<7=iZG*(>4helVydX$2BwOYZvJQG2FGFzErsD<8T=HQnp=ZoH2fvV#(+uv-) zMg2!?iz}{pEbKPgE!Tkv%08;?T$Ji*Bd}(azOblD4NDJxfHF@TP`9M=IsW6JG4nBX z`JAVp&v&qAv93^@eU~{n@{o3W7wtQzg6F|1-tz4N46xJ_%)_U!Ad@q^_ofL5euZdf zIv%Vti@|r~4mz){O3YiQvXDF9v0;b|hX|)3_un^Qk=p}9D05=xlMWNo>Ha_Z3e?gS z%97NDFsx3F0sm|xU&%Xux9U1x`)(kdtE|PaI@(4E0*06l) zt41>U(Ic#|sS%vT0&bm9$zuv?(a-2Qy04!FaXOrpeC{HCKVu;Vm0O{-SRZtTMl2;7TscwCvL;w`EPL1vwBEtAs774=3G zVw8*17RQ71`a?dr>Nv<&UW6JeE%7gi)6rH9u9gQ;wP_|q{B;A%u02Hm(nZ+&N&pV( zFcoi3Fc%j2yu-3fSFm~ib0!}&3Y-5KjzI^=$5a}{YC=}v0Wb0%gjJ)*l{8i{y$RB; z%?I0;x=`^s5v=1@f#k_Jp1s>r81=aoB2y4kmzoK|LvAo>y}vqUOgt1OA4dml#N#Wj zg6*^axZ>6YChaDGTj~cG@bV<>UjxAFf9D;C7T%A3S1DU9`RG3>(@}eg$=t5cb936^ zd*WAY@Ae*T&Z&7#eJuoy{e^$;LC;@W7No#qChvmk?0sLs<(wV(yt_#L?klKVKO?U* z!532AAHq8KrBE&&#G}`1(QU5>*0D1zX89>>UN(+NFT7PB)S-WGa1#4;)L8gCney|S zCW6iEi;%V_1hf{qd2)3^UWD^{(5y_urqZ+E!}RgZ(>CZ~UXBrHj=^@VEHEz%dqlM2 zz~c=2d((VnW;v97o{gi(?cS>!NA2=&V7vbe%XqsV=JjqxxzByaCC8}``^5$;4t3Bw z=aSmOkb%p5+Kq%;fPLlw?xj47DdV!Z^u|xzZ}4GkjlBRnr#-~KDDM=S;DB!XlbQ75 zd9Iq6txj`)4R!0t*OGG|8*DAGeS$AJcI#N`lz0er%*3tJi2+W%C$~p+yumStvH^5n zsXEO^c#xZG;6aY}jK#FDJe2Rb&s5dLvccbOfy;M2EdH<`bb1@f%8$OsI=kE8zw}q) z*XBWs^)vME_K;6aHWF(p6&Nbn4+igd;q;kwcmDZFUZ>ML%t)_P;E_uOTvzUQmJxJP{WOdd*K-)c+HTOBeuTAJm-Mk@Kcf z^h+~z-kXVK&5rQ#Q5F>b)Qs_;&tqrUP}F|*Wi{uYg8%0{_~c?B#+d9!z44d9z1^7@ zcJ0)wypJ<)=!r%Hy9tt0GBBP%4#-254wWy8(BkZKs5e$YO1(FDugv4C161%WmFB3` zZ_$?euH}*cvWSKlrs=s9y$>fr`%5~zZc4{OnTc4iYCO6oUc%=5pLx-=Ryb>9DNGx^ zA3{Io!L?21IHFiVdBPJQeL0b3Xxf>4p{a=9bj890U(j5qn-F~bF{ar>kq12#wJUXH zfeF(=IV=Om9VY(YmFr;8MC|bJeQbeFH_>Lk08*WA>Ur=C5<~BR=cdWTM6SlTy?=w> z?2)LI{Kq_gNSNK4PAph(4dZ7S2ro_zAnw>j40-YZdze?jwD)>~a`1M(dRH;pWTx;< zy$pp}I}W4Idv9othy(4rrHB(<@#uhc_;h63=hT?Q?9 z4QxAtnD)p^Jia_1b9dFLtAkJBixCpaY`)>WJVVh|S;^${T3N8}H&)y`0o$$~#R$m^ zXil*~Uk{pNSX^NSLnlB~|9JYYokr|1#e(lIFm3$>*hKpmqnUZw*7Y{SGe^)(evzfL zkb~@^7jc8a^L$P*NLy?mjEG+f7t^~64>wcC;?ZB|uyY4yn|+0#;FG-WtR;l5aKijg z>Ev}RXQuNgv%kv{tb6FOAMZ`XipkATJyQgwj}^4lT;d}VJz;fWFy%}2&~1eZ%(Neg zS$z~gjwN=&%-hV(J&5T=KEn~esLVl)Qx-%+KFN60WxBTCEdU#|4fXEe1g(7-ErB)d^pl+Ab6DOhaE7|-gQ;MnM}3WMFCp6M z7Djj)3PEEcxO|};+m8)gZA+^NK{lZ`((0%PUcu`F}q~3|J?`Ivs z|5xhOoQ%PSv6f=mm=%z8Faj`9fg@I3#w`aQqwBieSikxN@c_unV^)Bn`cu$qbS~C+ zKaHM?w}8f3P7DDT)EM>x?a-MZ+d$9z$5hZfEMxMwt!P7gSjo##e4yoH9C+v@*sJF8 ziO0-^!(X}x%G1`ejq&E9%%ljcqcWJ=(*)l1$A8$|+Yfh;Q_JqU3aS>|fk51VMiLEi zMP$(4@R`l~&;aLuChzB#-(bXCBN5JwMQx`kdl+gUD8H1brSA^N8hVaIl}`!Q-4SUA zX0NtiCFjR04nslURer?8N{}5m1d6mFtST}M;*WhqO;6uLI zY!z-S=_1xo3P#*g3)SyvkLl_QlAsT09sLvrJk`b@W@LY=W;f^qO#oo^@j$@Zth`y-k4fi+iBpFM99)QJ*)$=pPul z%~FV*Yb<)iKft#6*P;5?^Em!R2lN1r8oPARjJ=2E_7XwsH5I16F&BDGbAZs#`oh7! zrh@&Bb67W^naKwji5_$Q25Z{)w*DCe4XII-k$VLG2V5|B@-~z#dBD^07Ie-vg_~c< zlkqAKJsxC&Yj{^7{`V7jKb*3Ka|AG1P4k@p8f9na{|BF%sH>8oFXZ3vgnp7d2zsWE zbtmMwZ?=K3W!rHGIhT$f-qXxt=-=4uST{(VIUa3#%m&h`@nv0!N3gR5d&kjt4sM`Z zVk7^aT#RM~hJv*IAd9xnz?ZpAX!rCM$kO#iFx(3%hbthp8-3Sy?W3KimfJrvLv8g% z2vNQR#cet{zBHpe;zEp#D8xRM=dn!x2t4=dDpYrz!F+6h{GccZ)hB0QH#^p+H6ItG zT8fJ2m5y5DSZ@8!2jwfSI2Nox)@yFw0j73e!WVdKjy=Daz+ruL`j3-gSoKFM{>hTF4ljz=LV`(Coj1t(L~Y zhuQl;yYwU`FR%~`($w&NYB$k-p|KeDMoCOKL*YK{fjznm;-P2q>0UJet%c#-Y}hxb zmeP)6LT}6`&eO1CyI~Y%Me>=cn4Xmc4)tDV~fyR_(=y@-9N>N+$^E{}3#8 z_s77S=jcmy2K*X#qf~tWntGRk_I?;>_xENx+WjCc8vu3Y{mG$V4$W~gD7M{=DKmDk z1B1H?Dg86~%U@{zR#%BGh09?my^pJ+Xdh^|1aY=MwGE)&a;+@lfe0g724lo#&hbqU;*$_MKgGbPU!pzJ-CG%cT-;~^8=UnFI6{RF$YD&A@!(N_i)r%BT;ig z&I__C__YiVpiJ@@Kfp(p159WElN*AyvZBhL9Xd`?td)^H#HiF9=>0hq!)dg zP3fIgGF|O`B9SQ@T-7QXng=U3^YV$g;5DZMQg`h^_vLAn|2_oE?i8X;SDJB+nhw`a znu+dTx9lLYh<$yys7Vk0(B1lt}N%CB#v0OvpWABoVVIr{Om7 zHg{9?rSGExwSV?NP3=YYCbbcgCUg;%>$dT%#ahUJdjg}ZPeBH!`LppYa2_!VY@U~} zxv%#@!<(((lk)}iJ}7a~M=K$%&0HA2!UvQ`W`MNYTzWo6qn23VrIfenxL8T^Z*wv2 zuS)Ve8?xpnub6CNG2ZXr1YXbgg67@`MjI#SFzqI;PI&^s?c*@Q%oiM@JlQ`(D3dp6 z3$`4o0Ui4Pav69P`aJ)J$L+Jw?QcH}AkMv_wMcg0y}9uIe|LiJzhU`wC70d5L!3A@ z?{#+^x_y1k0xOAYzu^N^yEQ`5z+6n)(nTx?t_A0Qmodfs0Pj4s3i>5#p;t0_P5+p} zY(j$A4QBI%uO<_H09%`FN`HOTYFRuan-CcO` z;%rQ~a1xxX&clc}5eJM6fqCZd$mI}+dh@Sg;KzT_?wzR+8Aady2W1$1@i}{$Pd({@ zjjUi>I(Mo)iJ=C!5xTFXdC+~H!Horv!08ZNRe;{34l~VMOOTF=V9itSVQ1z@GzlQD zZR#5gHFl+W`fRYsr#yXMa{L`>LaQ_P$UPAa!>((s%OeG1voEHgZSysXLlAh4<RUVdmL`tc`F`j+7~s3&@ilvnm{baHyE_-G~MxB;HZNy zWdG`dmOH~~UY!Jf8BQ=lItx6*mP10h_*#hGkKu2j;QJ$BI{rwEYzWR=cUBml*P=U$SBTV)w8``zcpl&W@ zljVj?{qY90MP3BujwIQ|Z&pJ6?kH4U_=TAfug$0O7{ms45#5%20JAOBJ)CiatEQfp z75!cdKJ%mDUJ~8+ADkdI7 zr0#BK0^Kcf8wT8rKz|%aZ1;GGDGkLwig;oSn+k#R+R%sgPE3XP(RFD2x}MIulhM^@G?wrA6BOa(OgrC8Rn`Sn zzuaOYgAIj*N#QuH8)ageYgp%8>a~sYq>Q^4A2mA;R3-PA%fsQ=tn306(Z+(@J(guW z-N&qdT?}y+^T7R10R%^{=U0m?gfG74LdwaZ7^pjgSp7evNwFjNY^S`>kR)DS-3xur zZX-uPIv?+Ej_Qv_!s=ayVn@q)z(dc$HL0tRaLW>XelZfP3oV%>Xo>8q4`p5w7oq)Y z6@U3hE7-Xk3ZA3KfntI#E3VdHTiO#mUQ$N9#Y5cQ!J1h&KVeFo3~?#@Sz_`n*!3%Y z^Cnn{?%R?vpbnvYbruG6lZZ=>JqORLpD^H9Gs<^)qE5dPtmxxpu!+;4=CYOOFDt_F zl#vg9Nx6WwA>6IN1|rwJM@8=@byUM+Xvvxh_BP#7ky)XRTl_EhpE$!^vf}YgfWBaW z{UGD8o)|Vp18%!tvx%b*vW9b`L9^;E$}a#6ocsipb+0h#$5$vZ*A-$53(?lwg#2y3 zU^DJ01czti#y?X*{ofso$c$qPEX^rMnZ)COFo3aDL=y~c`*^X&*3Ni4{B7n$aXx8I7rstGk6rI5X zTi0UQ`3GorF6fMW{1;E{_UoEUR@Ko zY-tC@+p#FA9m{3?gNU_fKz!y~yg9Nb);w=Om-$~X>{t5x*RKP$rG?m@_Y{hjo*)lY zCrY}MsF59lZjlCp(|S5@x#i(MmbyYi&R<~BoD6l!JT~gJu7IC7lv^b5O_@gGwLfA} zzsXz-w{J)5o4;`RlAmSm5dn}fb{X|>bRceVERWk(z+<O2B@H%d3X zXEnXIq5myQsM;nG*N>@3nk=tV)nxB==ZLJX8)xuQz8)u+yyk@dT&E6R5cL+$SE zbhq+_L%WQGPus~G`FRYupB#h{Iff9k^%dH!J%MXdThM>xDAcsiWUX^9!n_D#tW*R- z#veDpZ(9#k-U!BVYxAJr>^kW5F%;In{{*AHo`NEqC@lL$j~tl#yza;kZsqa>-T5t? z{J>0*J};N~3I))xF_*kP#^9el4C;t;r?Gs^ckVxc*6awxDI?LN`5WF`r6;U6Qla$} z6`Nas7Aw{B!MW=nn06?DaxWT3IJ+C7YLc-kQC}F;tSc8u*%%sB0e-qQ>*D*k7vyvu;{g$k5`C5R6uS~^= z&sWgGa68Ra(;z1E4x}Z~cYobbG`Zf+Vw+E(eZdv3aMG&#xqd*+YCnkS`4&75o(D_a zRJ6St3b{=dVxo}(rgS$G6cH!E_w;qtbOXMu+iqyy7Y^0)b7}U}!O}*_!CPfZ-LB7& z@h+aFnXSa}mrLoJ9-G_gSPX6bA|UbLH!M2i4HrXbMtUwBy(Sjm7QGf&wm22NQ>cTT zsLiwYnt+O$1H2}u2yJ{1)4)nkeCci>cr1Uw+NATrM5V?9cXR~Jb4xC@q|Cp?ZfyO9 zI2|LyP`iE;OE=NL@YjZdeEdo}caoR**au9Lu1l0KzV#AgsIaZ_j0SOuD%XZEdK_F?$t~cCxKrF(q4q_a+>=~F!oJ0 zbpDk|EbA^p%h)txbWMfwvjJ!nbrw7vOSqmT#mc()FsGeaP+S}him({jv=77tnKT+I zLmxqpG7+71mxH#V39};_v9YnMm_Mrypwtms_Ke5C12L$~t;!3I{1YvLeg}C(raCTw zb_w-;A-2mR4BY=13`SW%eEvR=TYhH6B{#v>`U-4$M~<~CPt~fv26;`N9^eZv3C1nE z2Sx*m;9Blsw62)MlO&H&tDeW)F6Hu=ac9vYCl6$vmZJ5yyG*ht8$GA_LH)&tSSh&! zE`KD#tCvQ?=?T>5-PRjcM_)s={!yG9nu84&U*m!S$uRKTG>k}l&K#s#-15Ri(1tvK z^0x-)bTJPVYbs=Otq$UV&(Sz+Azn)1_H^&igbVTKy6{y*<9S8L2Caj!qDYT^f z;g@G!MOO<8p_Dv1p-Y#bzqu8ZcU=hS@=gevlg1+RiPN+>7&}eAV43M@kUq@DSdFnT z*XkTtN9OVJ@=Pq#(?a3%!;o~o7$^5n0;xp=mgzX45N9EzEy%}(J`!AB$lp^!BNhXl}Ro@)dnN-t;B=F zhrfA5_t&g5Y$@tlYoKVI1(xl<25!U7G3nK6P#t#1%HGstnsf_1G;+w!ih=yLedLbp zApSDV@;l~1#*Y(ta;UKwIL=5MJoPLp#@|zyZhV2WlBsJ}TZr~v$(S3XkjbyCkflg= zvWdgH2z@BS?la*Nx)%}4+SgbtGV}pW@<<-}W;eN5Mxy*s=H=#V#(azGLG-w|f`1>P zD<~Jr_$<#%us+%hHu(}%QU^*?(gh|zy-yrs%Jwgn#x$Oc%AEZUsbS<&l94-EFN4QT--V&kx{#%= z!UYpvP!_zC{k*k{7~AqQ`8XIR2_|CX4Dt!)Y)0d_|DfjeS8NQ`6B8R}(Y?@&yrU1f zk6K1~Jtf-Dd5GRI$;{Z|CC2>-M4u;zF{n>pKHJnpjO%t2111t70 ztyi5PKTH(Y8QxbGUs?rbD{erbxfbU2%7@F%SD@~{EI6HHBs9N#%x95@$-h??|7~(N z;^8lMw667JWeau00CKR{&nsl^7FVGvEet(vGqL!u2T&W(MX2|Th89EG`L$le)P(yG z)indt?-&Y$Q}3eUr$gL*lqm*xdk>fYq8zwiEcRLT1EZ3*()s)d1WvGrs*&U|+)Ew+ z)8QbEULi9JE{9x8i4f?LLVMOlc(c<|XdX8hU0r5kACJ>mq^_j9@Ksj%bpkpcX=HBi zZ{X%UJrUP@2V-U-q?~Yv$gTUJEiH|gJU0^h{h~za#7MX>)?Ad|+rq0q-+>PoD0krU z3oI`Pg=IZnLs>d8WR9$W@^Q1EEjfaZckn?)Jo$>39_JOW4q(XKC(!WK0AnxQ0mZZLOz-z1 zuvssnnZrj^&VI?`W>s>Jmn(7Hk|!APrUZga^;w46NPJA51^>u6Vz#Kz{@Y2^8k)%p z%&VE=O)(GOLmUeym%OcUCSvM@TcE^N=(AQ07B!9-cQ~JWY$@T&?N8Mj$vLKoXpos? z{feraC2E@#Uz9v+;x8LD#7#b|c8a=kG3?n?b%+Ao_tqrS`q zsMj8ajv-w|o3vLfbkISZ{KHsq-Rck0o0rhfwE-@a8HoWd*U{(BPE35BiCTFf^Nt#i z@^E7=|G~Lp$tBpbf|v#6FIed&@=kY&#@`>A2}#E*LFr%!3*u>xf4dKs``N&>Uo+6f zWGF-|d5a4?gNds;+_9`&BFeAUf^_{@wC%VG5f9T~XQRGg_VpP$<3Av|u}da@9H;Ja zX+Vvm$b8;MOhBjoWY>_Rt}RJPe9ej94NQ>j*$*+;2iFQUOu})Dy!hI zVIhFNg`ge%1{IPfs2EcP89lr}K0sH!y8H%cF9-5juS#Ipe#-cadCIQ~lz(0J!cj5p zs^g&U`a*E~1zh)sv0(W>R=~)0Q1+WO*c*-IHgErhmfd=ybc_vCD4H?bkov)Y7>l-P z z!QEto(6)-^VGXgU-TxbeY4gBTWhBgcUW--TgJJlcFVL2#0?(g`7nu|Z9vw37iYAbl zegV@yQZ!lmj;{}Yh36GkqSL>x;rk!YP%$8t&iJdbdgT+y7(-sl9X05CzYUX~nF%}f z4iewjP%K;7iT=TN+Q5wG(0wAI6zkgGF_V$xHC}Hlx(%)3 zO>Ku!IpmUhn)(iSJ3Qdck{MX&QiyM6lXEIFl&44sfNcFyY!wb+#-R-m@oyBrXQ4#r z(M>R5^e_xwQ_XkkpU05D!oa(*6OG5ULtTI+s8ScJ``<7Z4;|MR?>Co2B{zh)Vq-xy zHwk-{M}zdI2;RvuzA;VkPQ)I7sfHaj;1H3R?%o5Q~}dh`&GL;9uzc zX!`>~{bM2CT3=j3zPM%2DF*YhFE%`zgPk{Mmt@fwlnr4#i1Hi?fooqvl|)A{mRX7AzaB(Q(K#?UL;dZ%(-=bMkl&@M+|xv2G(LT>`Z))k;dQA-|R527*}>sqE~;dUATMhq=$$rUlL(f-Z>wzrvGLHrM7Z0X=`Y*a)T!5L| zLb0=-7nI*)G}m%k;vCVDLXf-hA$7}|s5>S=}8w($=L8A3dqxixHnm7bXHZXjB;-a^Tu76{vv0f%jg zx1AWzBgQ%7<1PW<+~I=S(;pz}!2%d?IfnkW#=O2;5{~bbh(-3+srU%i}(Y)Uvz^?w=0M{D`DQrY#3K}4O%jTvF@RU#g#|V z9Ac(}mFgX_cvM(9k@!PnjD_Ya9o%GB0k>)X4uh9`#fYAbOdh|0y9`yM>)RGwq3R+o zw>%5-{psqRE1RG$=MjsGcLnX!kFxPO1z6cP2G*ONBoE^?^w{soicEe2Z|fPLDAuyV zNcu(`KgL}?_=A_rNsw>a3R`?k1fO|ZFyLYi-Bptz`dlNX7^+#rDIFo{;5`IbM`yFe zJmbfCZm@M2KHltwih08@V^biey~)8C$uTVJMF@ub-ywWX3)H;752~Fs$5`@#*-R>9 z5ff)(HvOL39B=WOZ|705V;m2RxQ7*MG&mR@V&d!Nkh1Ox^N{PHrg0|=jLj#P^g`x* zqK)bOaT%297xCWn1l;+QzR_1kI=UpU1?eC|mhsbN*q@|FF54G*u99_7SkMXDn`a!7s?xCezy7SW_Fvc_ zSc&rP_hhYx#SnC453enwc~sg#=udN+I>|Y-KG23??U$hJ;%BUJ%|?@;@A&6A1EFws zGRR()SA^X%4 zaE_b~9-se&V!cS5db+FVb)6hxh56|6-4IkO&O+sy-@x|dVzhTRgOskXF|&3*G*Cv@ zKRcfd7;_jr=RL(rWfn-B3S>SJGaE_ouKazDOqq}^8*|lIjM{jKa(J2GMNF@8`7P)o(Z`I{>FAxY2NgeNvLk1V zMM?7*=9X$CrWwx0y4x?PGg83Xa_L;`J`U|2b+Pp}byH>DShu$aP9|R0mMzzyxT_MQ z-&R1CUK{?Ms3$0fg`#3?7%xazj{R~j;l{s`A@_0=)v!pqG@;zyPVAqCkG>M{rtzU&2&_!LelxiCT3Kpb_%LJ0ZoG-_3HuG9@j z_hsGCdYTDSslq`zZm`gX}9v?Hq|rtZzVY3V>kF7 z9*%B1{UGJ`C(tSC#{y@Vh!*iQ@9Gqp^WvQ_z}{3W&7i!-{8Om>Z;gJh$M;&p&g~ zss0!&!d;;4Fi~&0Wg!M?e+QkPWBBplJ@EMIa`15f2V?Kk{b>qiq1Pm_h8?*iqz*>8 z<1KY?8gT=UN26#Y5sd!24i+JFS2+CxRfEZurTt7A(=TrF4EA)7@o6pLUh$;8<$fG?08p8X7Eq-No zWAsH;$6>B>R>!e3BY?P<$k(sY7cIJ`(S1IR=DeRkk(I;}*DXbZLl*ebPD68c+C9y9 zM9eo4b-M215eMIa{C|$N*S0d7%YK+KzJN$<4>SHC7pdbkOq|~e5v|?P|! zn|R-24Mm?Y^neXGTa#;O^kvXg|BcpOZruCn2=ENv2@OFKe0bhm)W%O?ahpoG!>4Vi z_c8peUXB0knpl(&vC~iBvoH=M6A7h=*LE9tZe0Z18tXu_g#G|U4v75!ewG`CiecV2- zo1ietbCf>0>Y$pi09C1ZT;V^H%QrPZ(T-wt@HQ1#O)>mVyC%sdWX;1E%gPjxPxL_d1 zD!(c${qGRAjicWpF+&uunh++3U`j=A4%W69F|rO?7a0ihs1xja@dcRt-)sD6`w8R3 z2i%Hwqrt?CQ;oce&Yty{7&r?~&m>3Vl4qb)&0=k3rj*Z`z|*%hfU?b)dlCo3t)>io zX8elMnlwkr#!PkFsy*DLeJ(uMts^d=&b-dSsNCwMXHmQEDoBG5JEkbELji>9IHSBd!O^c5?LaHdnEr-kywBPw)P^ksx8u6Pw@`_g|FeN`?IL{}9}lPeZ5d>&T?2&;A7S908Myju zFy^FE-pckMNV96VOG_VctxZLmYw@xrQ*oxIi!kbT84PT@j~RDgLH2olRBpS#GV)rv z<^A2%pEMT}Radc3|F38>=QMhl{KV{(57ByW1UG1>jM}*l^d0dLG=nv{G~JX7^v;YMS1XyI-6WB=_Ti}E_NH}^c;p>d-mYgH^h;83_Rj9v3Ktp zV9r&_Do-85otMpqPPe5Hx3Gp~L>S?+uSuA3v@7T|cjc=42W4>o5joHkVA+{$^z|yl z1*dM{%x?57W?W_deiKo1_7KmW(-RBkbTFr)E@EfydTiO%lg={aew0(bv+pY?y?F<$ zYCm9@Lp-^)Z(-#@(mo!V{Y3xUtauN=)qd18x6Y3c;mzm@1<#&t6?D(bS;E z_8#2uG!e?&rlD#8yd+JajovoNo``T~je_^Z@Xf z@`mNwwxIb69npK_NuE3A2k-IU3*6DeKx~a@f!2AUu-vL2L?3^EWvfD9!I~NjeKQEO zt4FcsMB=+2E{A+#kZviZ{L#)4m`}g|;0F~9YvM6Ne8sKH)-v5GCgLz#b5T|Ct1RXB zofz&xPLyIBP-mP3sb4f-zaSH9XgAxd2`dJLImA8a1rd)9 z5mPh^%KHVtz-xEWjxu_2>yRhz$cOIbMq+C670lT7o}EzmvWsqLmgc1dx(ir$l7us_!63-Z5>m@#$J@u$5wSf>^u zeURSI8gyHChd)0+Ub!7b80feObwY+h=wg5HYwM0yZKWtF@rT$8SJAEKH|YKF6}kPc z;@^FZgz;b8LFu%NDMuWYsrFu0%seKYfMJp4E^&$P2?AB%+30 z0Q`9+DCP+242)*dQ5~%J-c2C?7^{w-o{u`m4Y_h@E>sTq1xE4L#6UR&DI4#wHdz)* zZ^*eM(jF@P0GEbz5#wX7M@}d@Xu3PbOeIG$DCsTa)i2MlQfw(g? z6$TUzLa%x0koCHeoZ{w!zgZHqADxU9GJRoUD|OV}GcfVsN_1ZOf+?S#R*!#24EK9R zg2(7q7WBvj$A4c0776n(Y)J@eqs;Rb4S$Q;Iel@&@f9Fld6x1O2O$y%6tX{{rwSO##ofPf$C=Prbs1Iw}cI(f>>ycxPQCe;&Cou6@AR>PoN} zd=YzmtwoRS{DlQE~Y zkytRO7C+Y4;QD7jpkeGJ(7chLbM1djN79v*b&z+>ARRT|sp>mkG)M8fiY$Fi|07!a-_+$%eR3T7=cp7a`=mz*ay>QxqZ z@gjFkFTvFM`{=gP7QR1@gOm+ED0x!DBZfR-!_02N<78tY>4dp>yz>iaVn)$Csf^81 zXkh)DW8~aAhI+IIAAZ+J$UYp1Eh*c`U+R@*%%NomdWvj8jbK)*1EV(k_Cl2kcX$(zbOlE3Cstqoc|4G2*x9?Y(0cOU+&wcITX%i{@9rVocF{=~UaK#pr2NN! zCpO6X39;y~W-CkCy9vG8b%g3g8Q^nr1*C^Qg%-6xTK`*!?R(dt_qNyQH)RPJIb~p3 z)Cx3tzZYvdXF$h{zm_-iP|Gl3)?E1m|QMbWlSqNAB zd70f^MQ7>89}u;5E4XC!MT;{tP@ZEZo7$)=YP%0nE67C|SF)EUj{5*>Zc=Ye`JS)q zr2fF?r~FG7J<*f;ffDOXS$Qydqz29=|KbB4Zrn}GvU!aMg3N@7r5e6|8F_2gm7>#D z+NZZsF3|oHE1Ika+Vk^J75gjk$|*BiP>(~vP+a|1Lmhc7d374Gs;rS_3hi9cZ!WXj z+yL@#pViv6xB0;18CZGdG^#!yl-ZsLW0Ehw^Lh5vO*nBHKi(3tz_yWVvuk*U`3O+> z?{^%wjqcafe+cgN8>Z+iWepEzWAm2-{PPt<@l_uqG1AmXwAV9+Fz4&oToul=w_k9t z)x|h>STqj&Z#6WyBtT2@c5GO)5+K|ja|;^P;JN~ogZ85L%EOd3TLapM$64=z`a7K|P&~qx{QpMC=BPi>To?J?nTQxx94l!&H=qyXgQj*e z^P)a|%IwWRQgWzUJOqnpRADdov1sk0JjrhY~CH1aZ#TXETWbS_zsBi_1DKUIa2;x@1qNXBKBLXBc`P0f`P>k)c)3?j`9mZ z#T|1D@O^<{V$T~H1`bnoU1j|?HDDso%f1wK#IlN5CT{4) z#b3FN%D;+KKA!h^`J1AGUc|#GwW%b}L<#89Ouv01{T{@_AoiuMpq{MZJ%8Il;y;vA z3^C@ECnirEb~ywCV?4m?@4XoKRR@*~(h}q$gRrdJ6DQ43VX(nutdDCa{>xST-nj@% zXI%h$^QUOg`WluNP@hb^3*}ExYU@?NMAAtRtLP%Qe2f6IWyY-cKn-|1ekK3(AH2NL z4W`rYLS`_TNiQ9TmdyUphyM+>%1aRO{34W={)ZYrW3G8yC8TanV~PZA-fH0^?DU}? z&Gdz+lb?d-$75hxc>zj{!ck_MEo~krhP=iXX!qn7+S7NdbOp_+R_;}$zSf4s#n*U= zWE2Z5KTCXdscrgiGOtZ zJ9Z`$rzb3f{BZrz;7biO+gGuwt){~1YbIRjloi-?<2`B&@?m}DE_4`X#Fjm5f(O)x zEW3FGB9F$w_vMt~ut--8=BPs(-ozBpfT^=5!|z5DE~lF()=Td~;@yR4)J}W3y`QC; zN0s>Coeme5n~(W3-{)t=zhdbdh|_q1hdx(MZ@mL-Z ziV(eCs# zgZg8Ab20zj{VT5&<$!M${kz(7+1Rf*3+EuVWC5xVRg(KM z0qjfLu;OMUv0}c%e(G9_Wm&v<{0_(xi08HDEz6GXB6M8NMcLLOmAVr#8GHCB}QHy@aGw>?7Hnl@- z!Bmu3T;b>JE`@~kFW{9|E95xPS!dx?RByQfuFo3i+&zx>H=|i|MJR6b&p`JZ2GDjR zf>~JZ!Q?Fquq0|Fc&(j?{qW=QrNwVHK#T$7j0*p?GLC+Mb_AS+dJmB)ZM}jyee)Q#WJG z@uL{=_Z9edOk1cpY5{$ksP{0NxXevyu;u%6R%LY%wwS$PF1=bXQa1$6z6$jGUI{Id zu3UIrHo}u8I5s>E;Cui!jyHuw8y)Jx?%E(z-2}?*37cNkaZEv(WVvrFOVZ>{ARp;qYmXm&q$m1ig?eX_t407 zFs9C0hczvOq5gLR$lYy-(MEUwiCdWM)EE$jFJ;GsH_&|97dG`PK}qI#Y+PxA>ddL2 zO^kptoCENYdcUDNP`S~Ns~M=xwKP=ISiBNTAJOctVl}#z-GQXBdtl4bohZ-0LSE$} z@JVXpQy=bOp%*E4l}^t>ollTju^ihc?#HrET{y+>RB5!%dMx>B2qpI{u;N$|*v(l8 zBi|Vaew6}DTSo82`&U3KEfpF)-Ld`l0Vr{kK-0v_P-}Y+GLF5#=y^9;=4@@@%TYby zz+y4iM^=Za6;^!a*%!ocJcfz|t9Z@L#nSboXjkM%?xqL7*%vY8pUX4QVL=3dWEQk6 zz5^ZCiM{ad4JhYi7l>+Yu{25#GGcAPHDDpsXKX;*SPmmz#(+gk2vn5Qd;4x8>4GO)qOf^lF}wBc6oejI$D&u%F->VD%THM+^?FJS?46g0 z>C?z2HHV>X!x(HjU4;QL2%73bcG$H996IgLd&)l6n9~WSU+;p)vv7WCH&bj?h=g|0 z6WI3b2!_XM39^cKUhOsyI!@Es(|rz8Z@I!+|G0)hI%&LOvI`%bse{KNZ-MDwH;J7z zT@`He8mxB)!*|h6EIhXhTEdTmoA+(7TKI|jgv2BZpQ~xF1VWy#=4p=T{2wAC0Ms*{sVX zL(Vsya$wO8(54%TA?kj($vYcUk0$Z{3r>JE`3~ra&eHv`3MIFnN}J8=z$W+$HrM@P zK0|*phnWLV?cqabUh)L}`v6lycyOHc56Z^;Beiu2pf1%^h}2SoWVta*wBAepPfNDq zBz1_RC*#@cp6S*XD7dm@j9lQuf<`I{T{%3A09#u_=)4>y0a4Os!`G~S$#ZZjA4bp5=c?k<(O|idcJ6mtaPf*0 z=sl?$X0b6i>1`VR?4bV-F>O9|-;2v~C!zJxvDm-r8U34qwalSj@MqeCviIZ{9)|6` zK0sl_DfBa1hc;GCWCm;^SMq974oOh9#8@?y=f*@V^?LVs zz0?9VgWf>F{iY@@wukaHv`*H}ZO zP9`>&Bw*sER^H*YHZ!|50?i#h;kny9bcUpfqo>TZMPj}vUZPZzD4-d=KXO#=s^VT=z4`* z&fg|VzFz0`wXRWKUJ2Hgp5$u!jPe7nEaI=fz&`u|idO%cs3FHm;nD<%Hd8aCyv*BY z-37lzmr)aN3cL^f#(NI;@QwC8I#(Z+E?c$^?Q#zRM$~}(&}*pex{-MMSxoHvnE78c z5Kh^USK3UB?M9!82l@~-idyKGq|X`kzX}zNQ=u{S1hj4FL#~vgC~Gr^D=T%lX${@E zrB~=aId=iVp<^hT^^o_|Y{xA9DU`E`Wz7{ne9$RmYTr#vd9F7^oVJD>6BXvq|EaQG zPFe4se!N`ym@m8YAA0LQJELxx6#kBz(b2`D@}lok@`YVTM8DPE@90HXZU6I z8|cn7T@|>r15<0X_%GzrQ}z(?%LX~1ciV1ewQK{XhEtbx%6zu)Q3p!=voL1pRlN1) z322==0pc?eOjEaOqWeYa9pAB5JxJCPHto2CI~Ih2Yp+tg)V&6Scgn%9;5)A9dlqG) zTdE38G$syGqTL5e@*HF{_2WP$Tk~F(^6(fyh%t7|9SiCYlcCk>A@m;G2JoyBGWWHD zFQ zTokH2(JQR|s{{PUet_TYA}(uE2vdEz07a`dLDBc8Pg5#mD%Z zi?kOSwF+cQ_Cn3&p?Gag2p=eM$IOQf(7WL+&8PJQvw*j}o7-2^)aBwJ8L^yQKY@Dp zFi>Qdqu23P;%_O?|LzlLxibp3IhA2um@%hV86>?_(1?~M;TSbRo6D;b$g6D5ax4tE z?;AFvdcax!7~IFy+&z$SAOueCCr;DP8*KiuY{c=4=(+tA9Gni|;$DPw7sFB9y){R?c#{YozEbhNv>6eiC$7Cbsrnbxh-c+cq*?7Tu9m&r3hwK)Th z^)19VF-O5;^ELFC-3)`Pj)Sticw&yP5!gzphdHhr1}Hyb;ymQPUDFlHIvoJ6TN6L{ zp2~Zih$VMEfUeD7V8xnJNY<2Nevbxx5GN8GLu)`b;nBp-358e#{^1 zD9J%;sKG{OHN@_{h-NPf81$kunU4h?4C}&KoBspb>J-rXqz_t1dV>A(<9PV7v7nhZ zLbda_o>2RF8aTb}hMEyIykf|jJd2}-_%JjP^u~7u^@71@_k0T$x!s3oa$-sDJmn|Q zGuf`j3-hvmV6=S=Z#llZP_Z4s^56szN6rO1ojtfr)&Ok_$XB|p8qJ>{1=rN$7?j`& z(smP0>2d(&S6@oAD*i+X?2-%WBq$L9r$p+7_wNb5#L0=VU>a=q#(Ri3Q263#z4K52Lrm z8PwrQ=vjFMJEnBPtMy-C<;WhKbWJ>TIjt=$Gg<|4EAoilln(LK6MTE46^30R&TH%` zP|m)E)0h(LM>|5z+F{Ua7SB&RX~I1<)EDf&is6H_3)jc&5ne9S5fqaLqv!HFV4cTM zebtQBU*_We=ducyCY55JbK1IFNJyXF*YZpZH48e%zrOZxpgebf<%F;rqoViovpw1dvF0+iatL(*5>FX6$2@y$dmEq#lYtT*cOvF(`XHuON`? zM4zvJWBh-Af~Vnp@K1k%)zz8cG%$*_-6kL3*=BStq`l{050%zw9hI~0Ldqa0q1uLc zKIGAQbG;I`SbwDZh$rt$S^E5t7Mh9sQf?#$+g>xQ$UKAA|7PQx=Mfm!^&NzPu>&F4`lQcp2&qNgc%8@!xT!8lb_knoSC6;wljBrAZ0a-txVz@3e>w5Ew z$XRS!VmV}8d4NVgmf>0QD54qp*v404Q*L)5WbRRr9U2WC>FrP&;g6bEJz0GCeYE(G z{Nu_xI0n_wy8i;?FZso*i#D>1x^*B5E(GQKp1insKC7Qbu9fMRdF$RcAaCnloHp|` zG#?+uHg+@NJWqLG!x|mVOtX*l85j7}z{D(g%M{>eSX`pvEbd;i+I{YC&q_W zLh;xq5WG7a{d0cc;0ql%W^X8UoPI;{Pk$`w`w8TaiJP(cHnYpwiKR0ByfP%mVB z;&OfNO;acoX**#uzZ~=JszKa+JetL;S^s~tA#3S5XwUnEjp{-O_d0>o&fkU5Ej6s> zQ3%M&1~aGJL>41|4>~k2S4AX()mR7`}30cU9EtwQ~IN5Ju?ALgr8^MmU?qJr%s z-hek=N>hT$QHk=g6Jf>NByhhQh1@C=!PG*7KfUyXs`*!-?;z>|wf`=VyvkJNGNT2FzBbz#Mfs_}7_mmgT+BY~pEV9oL=a zm`aRxe-FC-427{nU!q5!F{t;S7syQt;N%-^Zg}-O&^WeB$2=q^gY7izYESosK~Kni zaDr8Kbm7L<-azljN3dgfGW;tw5mv8g!m7)}gb6tT@(od}bQ1@%@vSIV?Zd$bD9gR= zGKiA=CgvDgf{Wb`y8Eip<-lY7bk!x$OB#ONO!-3(yra# zqWariwqigd200z4KH_YZCRL1bdhRxVxQ;>Ba!_l}Zr1afDYxzFZfKDYg9u`2RCTKZ z&ozifTPK5T(>~s`TOO*9hk|GSsTdMFf@x;IM8Ds}2woKhrkY~toukWTT7Q7nS%zF} zYCdZ9OX6*)x3Z~`*wDwHvHIO*eE+j&pf&nB+NBM}=-ii|n2BV|2)&{=u4Hn!F0@fW6c z7p%_`Lm*C!&L3^C%xEsiblq8yN)KhzC*W=!VgVALvNC=rlsvtI*OX?^ye|pWtzB61 z&=+Xfg}hF)I?;269=Q|e!1AfYeJe~N$M2u0NAIp+Sti`_J%@c}QC4Zlct}m&h_cyI z-Y>Zri^T77v3*y;hZ=k>fk!aw`a>S}-N1b>O*n^N7x=hK^`Pu+!;~RMco*Fsg7fE< zn5;J+Ym%URHDokJc7bGkE)fs(7I zF!h@|x~6v#f|sra*X1`cV(3_HyhNm!obSw1IAm2qWAkP==+Z@7opU~ z$#jR3O}AIsiNi27=^x^S{!}HOy$Sm!7z>v62cY>xJ-Oqn9FEP30nO@5EOj&etp){4 z%U%S+qyx90bVLVedua)7^)EmV_JL~-F$se8S>r_Pqr2qeDEa})bX|xJ z3gP8r_Nt1%Q~u|Jh03U=C(69%V!3@0%DP-oi8rd)C+b^yb-#?Sf)sf0ut*5qIEi`n ze}(d?xs2w>HQd5(e*~gx|-xMc2^MiPZ7hDT8pN^_z=C`EljY7fu@kNweJ)-g- ze1*DpT0&XmG}Ih465J~n;-~>)?%IxMJ}6)?F{}QTYSwh%d`})M?8ZXTk#9^KCB~e7 zv^$9qbK<`RUJ_O+br{?O=IvZGLH;(#4Zzd-etGQA16F>Ay(rulV_SNPC*?%g9$)Qwb~k0sa9 z!xIoRSiv_&kRzyJ7dpkpq2`%L(0N84#-j6l$)09R)DlCYmx@b|uo1xykj@X22 z`iEx26GyR7a{PKk>T%|eiT6KfKa>0(!L+Ll1v}S&abU8kK&NI*9l3&m;dd-M)ry)` z5e1UJ!c@n8HQ{|?2F*Sn4-rX+F(NqycEr+tW$_AC=Zgcx`W%946R>?%JifYr2SYO((R6SfG@BP-hTS!g&6R+}Y6wk5&+=^pE<$SKG@KqyUa3cW zaGEi7Muy#$D!evJeH_lRK0+e#5xTaP7Pe^uq1GhhR;||^sbFX>Y zClVw7Ue;h1Wcy_~vzyEQc=URnvKhts4aOe^Tdct$|dwO%FYyJ-~YEBGmkm%qK3^;#?D) zF=(x_|R*SeDl{2wb!Wk2&9mti6xWVBFfgpyzG)h!U&eu3!aEi_9Ui5Ayy zQ_q6jk{y=3?7lwC*+%Z30Yebl7eTRqSFYsaI7oRd5{8s%amqK(_-}5eT%buRc+2L( zvVE^m=6ag>2`2by6fxkkGH@Ml$Vr-MS3mm%x~;-QN9Dl7iF+Ws;xzrN`|z;cXYk!fd&bK~ykFilJT+Xz zStiusu-1K0aae?Y+P6Wr_AA=|(+MTZ`r+m<1HsH?2#VJfL63u!1N8btZu%_dv;Q`F z=ZVnu%?ePJydXExZ*r|aVzT`UA!o=&P`;dvL0M_&Z}nE9B?^ARM0IOaj(>G9=fgi~XBNctpPWRGNpr#GdIR>UZG@%o z&)~Jm3SL}zi&gj&C#ugmv}=n)w*+mWc3~UNkL}K-ziq^pp>5z5RY5xhZ8USA1@)6( zFj@O`me^Z~=8ab|HM0nI$J5Me33+<^{8Y)+>1;ueo*?ObBQ0Cf6%#Me9MHcFUJTab zJV)uHeBM)H+Oa(>-afIzi8(49xXzQ zz^&Up3^bgA4`(N$X1zB!2Otyuc*-1CZw4KYkC-}YDX$0p(J9@6yn_wABCvy(&)R{C zgpVu*Pl3X}uE40m3#%j3aE|v+bRIGp6HWwxciI4EW3m^U*LG#GK}m2p>?;)BJO($c zOu12(+MIHdLiIVK2rV8AM0LX+xP7_<%f>`9bjgP1yBwNb-2t_sLGX{80p}y?BIG~T z@Y0%R7&K!BbjG~LqQSLb_fiJ!1-r5R2X%=?O+~xlb?DmqC)!3nW&?H_aS>l*acGqe z2gM68LYRyt#l1ll)_}Gv{slb#6nq?7A@Rv}XleQfKM@B|kvm-~+wupmI`{^LA2;S6 zExm{yU;o6GW4aK(Kpz_;WFiUv=jb9`ExU6bH`%9qW9D(+{*hMH4y5}E;7ZlL;SM+ zW1wv8Ar?2N9Fu1D##jF-Aphr12#iYv`J^c@XZ}Z6X!sV}9zBA<$uICSv8~DjsPAI* zi@COpM#-#Eh&~=*Q9!&NJsUQ*S2;F&=y2gdA8?sxDW(kW$_<=C^Vm_R!PVOz?)gyV za7`abA%C{(+6d~sCG&yrCV=JI5T-E?qx*VEL3N}CYVVE40JBCkYuJG9t<-rtz7U<$ z%6ZcR#gs|ACH3?FNjsw!7M)g%J$Zdj>-rGBwsi?=4v`1`F6|+XR_A+7H4xn8UTFOX z;LVwOLSab*1YVnssc%nXXI3*L?)kyD)ZIau(_W@&{|>D_uh449ESPdwN6@o9iAz_+ zfXvTN+OhT>HrxHj;@?dHSh52wEhZqEEr1=K#=`7TyTE8fGS*Sg^MnRTpU zhm>}?Y0N|R4O%W#Lbbs&Vpoc|%%EI+Vs9v<^lX5_KWUFP_5p8UHvlz(`=zh!GO+q% z40cq>VgC4sU^yX?rF=e&i5`2fNU{apdYcGy)?7iYQ^WYwbszXtvueujJc4XDF;|jl z39hfYqh-%jh9fed?A~i2mY6g#;sZt$>f(;*R`k7|i{b0uLePerpou?<_Oee{+gt=~ zcj?@;Xc4r(PKSZhy5lsL5WG+41tYCLVMPyJuK9?zP&P3fchGa&x*uYOb|-qDqr8l9 zGT6EhQ|zEW@e(}1`;S4eY*!&TJ5s*%EA^Wu+<+F)6JD*X#-fhfOx|ZEh@6@x+K`{X zX#6oqGVTSn8`?26U&R)xXkMk=2ikXD0=*34Mx1+w!`2-q@8E8HJi(N!o%;Z`to@CH zlNzvP!X~V}_Zi0|=?WR2=V9aeS{%H57l@*6(M&#qIZyk>a%it#^(G!)PUy--ON!XV z8g1@VngU9`8FJynPobknEA>ULU`cBmG&hf6^B3&_(aaZ6{MSkR+;9n$Ul-z`A;cKm zx(rjo4}+cUWtyG*0m0XOA!mD6PEMYJieB+}j2K*!v~HZVk@{b=?9s`VGU*+IAoTFR z{5IKdEHAwb?)mG$blYpV?xMq8lb_)g>6`gQQhh;wN?=Q9pIbO10;A^E!6@CX9K_Pz z;Pyqdov{PreE{NZ24i~BZ!q6Lxif_u6rLuRU$7qdyjTsX7iXjG*uyM@QHChy2Pm9q zmgsp{<={IGYKJdHm%-Vnyf+kj)N~VkO82ouF>%iPkMOE~Nmx`&yJwvbQb+d=kiFDr z_MHtl*!(<%lM_B?LlVG<)iCXPE@eWAslDA9UDL{NSiJzwe=mSnyRxDG`((_&*edl@ zTacG73`G;kquexKK8sSelkMZbz;5*-xl!Yz# zw^H{1D(W9E!4>Dju=(w4a0s#mhnT~ZMfuFUx2$&~e-U%~_8ll&{Xf=mC=C9Cn4C%f;oLRd zxW#`9c-6*$b1whJCz#h!ANU9xc>EY18+Ze2JMO@C`yQNsmpWMHSc!e=p2J&upVaqt z1+`}rOWp6yZaKujfjj3wx-%NJDvwF+wQgeJU=`ROxCu|@i8%xQ3_OZ&BBpUHWH0U_ z1U|lw(ZT=gDj9R_>*%?do-7@-RZD2wb)G+YH35Ar&!Obo16A{rp&)OYtZFR13QH>v zfyt!^G>d&teW5=vcv?51ReB3?{W}Oq%|@@&AyE81AGU4yf|?E$s~CC>l?U!&mct%q zpI!y+OMXGH+ha&QI22aw+zU>kBmCPZ-MJkThnZVJsYrv3X2!Xjc@fnK)hx>6C0@EtJOZu?dQa{EN$R(PO(osA-s4xJ z+Up-)VmTF>#%5z^BJwuFx^WH{qFKPQ51>v?VLRR#a%!!oY^ky<7dY|}#NDXDuATJX zHt%Ic(Rq+su!lwOcclGXHa;3yhQSfU{#V`v#TmT<<;p8e*5j(S@@(ML7($l5hHh@)6lS zyyngvUXs06Rk~;{<_uYjoudvy^zIhkn|iR~wc8-Wm~w_YW}{{4TrisZ3_4zRCoca_ z?A+dj28%;rNS%)0ICd*o7q3ST^Y74cbUO@dAZE<0QdEY`XRFSda<=CJ(RAP=%-Prq zmY+}XAx?(;;qiH>|6e+8TS@oOH7X1|G99fWh?_glj(AhgSg3n2JE`*mu0>Jqd$b8B zS$GsRJIbX+SKp!htv4V4i6NRM!Gn!j+`v*jO#MQ7dy%o=)viMM#{N7o5!V_;`>TFZ zjI1j_?-#|?@hw4vqxE1Q-wWA75=!#!vCs=Os4lLA*e89ke!ITV8Jq;ZXCFeFQ8_VE zx~R%_d4Wgx6IOd{D>$5VLD#;m5VTZ)4(eEGjSok&oGZMUUVrA$a0bdoeB@p0M?wX2 zhg!B6Q>V4^W@%yc4nF`VhxOo0W+#I^>CogBiSo^+5cFsklv<63gEn*~Osarlt^EjdCxPwKv%JE# zOf~Q#Wd@@SYjMFc%B2l#9rvy{U$k19;1ByAaoe5h5GqAoJR7FH=Co& zWa$}LFN(uuP9@NB_a{1)`~m(avcR_TIIpbN<>dvBQLXa|EiZ**dDj@QroB(uui>D0 z`kHU4EQ0c>C7{vEVs5mPSB}zy9iw$PD|dQctJ5GCO*w}z_QY3uiONq7q3ibn8RQU(5cC*h-7amf~k;WM(^;`ozlhk%Fttr9%x*Pm}o^OW_*c5f8v;iZt&sNWubiNwaMr- ztv8r0s3)xUbN2IfH{pwz&Ooih&^W|MkcY-GGd#~kmX$1X9_23VsW-K)ns*;fcb$0w zlnp;b{9WohuhA1WT{;ATUB`l+=RC+$M?%NkKHxntm^czoc<&7=7Ja>mZ`~Y&^?~h7 ztnOri&W`lVsE65pVW^oxIWq?v{>}Y8;1HXO?&B=r)ts-8U{Hs?f5f18>v>jkRfkKB z>qJ%kQ?UEeo%U-D;81MM+EPxVthj*Ra@!j_pb%?MJqE{g4N4Xr!m|A|<6GjyH?CB` zq&0`oB`Ae5>A7gR)DS-ZS&unYL0}t_hX+@Qh2|mjJPp?3mOaX&zke+6vOEoQhHVC0 z=Y70qZ#8J%egWy4GAz1rlR3U7UUHWyaJZrpHu)%UMepNKx#Amm)4Rm?5M^NM)-uH# zL&5FDK@7jX7d#eNLR=#8QpeX%6m5LXKl04R`GF1kXo{w2$>e2Rk6btvKy~4+K;#}PWlU>JA zWVxR0AXj{9=s0ZcYbZEY>IvEpO*q{HT7oF+CWZz&@Untk?EY?%V8<=PS(Kp{_c_cv z_xXm=tDgam-wukf?YwRBJ>F%!fgt+(19La(E|{?bmT{{Y<}}b*nY>A*x1u0|SZAR_ z)u3*2hrEDT+U+cn`WvM}cst_MfbLvaN>@SQc)h?`e+iV@?E{ga9n&m(D=jJB2C{ft zCX2}DW$m^6!o@+TmsW?#-d>oKF%o2F=JV3^g&?1w2%dr=_eQas=990WvGqGnn@V?X zD~&YuVJov;SHQAjzOuZ&ci{FN%B{_4#fKMD(B~6%lM~!oI_>r0G-U=~)PQ%uHGbLV z(eUQgA^2ox%mpr1f!`Z_Az_s-TFaJTbMOwFGx#~`eT&5suUpXiwFyp1wS{TDu7HEb zG^l@@#FsAaP7ah){O@TeGR|JHiS74oQM(+}!D%8;@J5S{yk-!l9c>^PH# zvsI@t^yMu!*yA)Tc5X%&pBETRUQ7$a(dafImGbr~-tYQ&Y)+WUFFb7|v@PKvdwmK_ zFaCzMZvttb_>qtHRiS^(6O66<3*yJbVTR{vl)szJZ%X|LnhCM0LTAcp#8!}}&<1ei zDDsX^fHJor5dD}1!C5qG>=_`nIZW^D6l2s92C@E+?_vBoW6o87G%ETFFv-?b$nDyN zlOH`HZ5-kY*5f;&Zi$KDs-`>ECNvKwhCSV#lX*FMvLeXRraeZ22PWz)0r3fQ?ES4BHidqn zo%UV`f7*%p-+lR(nPae}mp8QJKLxj*zc6Rd2<*S>KBP|1WK|#Pfr@XCZE^%sw-oUL zb=KXdYjdu{{y{DO^+U8>Np)P)L|-=*j3}ejaIPbS2w|^wvCuC%c1s< zF;JPb8vnNdNenH(L*8PpcxoXgw$uCGIg=l&cmuUVODRucS0Ep@p1r|P3=JCvZK?x& zzCPt_3olR(ithVmn?P|kTPoEnK#$KHDhGt}o3?6kMvFzD$k9l@cl_YP%nXI%C%T-s zv552ddW%;?ZD6Y6CY*ma73xiAVZ!od=wNBgXGC@v;tw3bX*0sHZ6$raA}?YRWT0kZ zlxhxUqHIMc%jR@AStWUWeyl<1)L7{A{UZ#^pueB{DKNg@ox2uejt3l6U7Xus!kPIy^2;Ml zI2}g2@Qj;~7_*02pGkmAsl)}hS0XgTU{!4kDnklXAEm~eNM{MZqUj;D@FT(FdjalP z@`N&uec9p1Rk(DEh`NRem~54WAp;K)4|5hw`j>jiyHok(>lxJNnt?v|=YV9hCtcmu zXsM)GgH{Okw-&(Cw~nbF$e_iLnA08cxQd<`3$hG3j|p+8vNPeLSBH@M(GVT-@3QA| zLvDJz0VjG~Prbnj5MA&Mw=MaO{TsUp0}F{Gk+Gg{WF`2ZNi6uS1@!&vHaTl9Ve9CZ zIC-I%^Q6y~X2Jw)S&)UBv@76NpL7WI|B0D@CV(0Bnaz7Vz$%wZSfck2{`tz3vkK0` z678d)x9bsJaWtau#|yq=VKp>vO~&d8570F+9a<~$(dWqmbY5T2SN7fkcEWB9=a1o> zcU4eTiYR^j1B_7o(wS$)Lh8P)XxHx})aFgX6)jn)@TLA^<7d=tPGp)OHM7`X zhHuhopJElu^a4I(YCjKfbpFcjz0JYFe|AWqb9tJ(dR51eh}xu3Osf$KzYV7>G9I;!oRnSxc_Ca z`eY;w(^_RR&%+enZ7Ea(4bc2X4jZ(dN~8 z6eV0Nu2^!dF>%ut7vd}6CURh0~Qhsx6a*xdSqvcP?mjZ1oC>D?z-wx^D@ zWNUGkXXywn-)Y8}>j>Tnk?5L{45c+8F!i+_H=nvsKE|DVHO(2-`byqSLkwW;m3V0u z@eQ7mtFKaEcQ1D163vK#+wB%u_U(ifQ$C=3qA4dnNB4m0G!TzI$2?T;DJ#AlckJGS z)!{GEmYafpbe@`@M)`sJ3s95Tfo7e4{GvbfIeVoEXWMN!iYxZ>S(|#oF`5sQSW}$~p!?uHutr?^cuW6~=8030SRjs<8L1+?lF{>VfrQr+a zUHTc*Su2LWqyE%BE#`2s3}Ui6(Q(aJRyK1egdC6MZSN$oZNoY5xxZwEN8PD?;=2Psp9r2obv!ICc;@rb}OA)>%61 z&0L07^Jr$WpLPpLr%~K;8JnJ7K#%SB(QEfHbpQ1SanZU!z19i%e6bT$2G=2iI)#N= z7eTT|p9|?f6l3UF8jSr33HNE}h!~vfP`}%x=v%ytDx< zdOQXBrbH<&^@d?_B~Wj*8L_kwv}PZJ`8`v?Y@R!MTSwsXT(RIuKZ`+a6D$oPm%ubF zG@2$B0&QHdGo~Ed+{^i{(ZrFd?|{_q=6subI4g1u2KnKIDxbNn;MR5kWdjVEV#5KI zM&ZxL?->E2KcA~CwSMr=7jzNKB2R(swvVhh?FRJ>hN6ggOY!n&D1P=3k&WZeg zG`DQ|13~$HFz?s@H7cy0OBDmou#mx_ka2n}7z{l_v*HQ-rl?Nv#t6{Jy$gn%7YS|) zRA4&jEwr6D#Ea7Eq?*$!`QW$HF?z*JQ2$uZBjugjqYZ>H+s}db)n{P5qMM*|<2kBt zc4029Unu)yRuHR0EWHEi#1PBlqjF#4rs|`xG$aY_2WfNm!PO;G{mC#`^ z29Bj2#}H*CADVFk)O!uNW~Wnpy^O#Yy-LAV5($e>G{8NIR*KEmqjK+De%+tO zocyZ2Du3*2RrrAt$Wqa~TVBrx%X*Uo(vr1ppl)=nZ9#lkPpB>32@!MWfZO|46pyNg zz#mQ z5H+_6B>(=0O+F@^p6^q9IoDK>EWU@f`Bfm%^5T0X=y68=<~XZLi%Y&lovc~05HB7H zCnL%r_P!pnUDVAm`G=KgU4p}=F&JI55MgE}YRcF0gTf62#pSCw_DMPPQGddmH`y?n zSnjEPT|l(!9^{NJMs-VXko0>6^Q{k(_coIGdFcZhUc?xKPOSG!XFeY&Gw0(o6xP||0p{1xEj+oj<=UK5;6&e zFrh(G&V49=_kCTz-}kF2*4q&?UuBOH&jhHJ-G#xA%h5N9n9y0nai{Vs=${Ma%Bs_f z#yVqBC46NKJwq^)-9@?9b9Q_U@tB%E&^#y)gDxNA(Y^H`^*d!C3U;B_`#ac}nt~Zg zO&C-)kc~glO_)qMxZDFBXm!aMI*#ZH?an3WnAgFTZSJ7TG2-#1#OZo0VtPa!$hILS zwx7eqCFddI5P6xUuOR*RUGQ+UvCw1S39w%ti?OGNV5#df;)t&UA7k39jP1o_?g9&V z^b~d~5R5xU;Q-k=2-WEhVHca3LqRmu_)fu7-PFYh&;8h$>jBM_VZPI@CiHf=MY{u6 zX6s$X{C=20=lD(7+Hej6M%cpO#G^3(WIM)>?FlO%9)Zsvf zzFYMkhW#Mc`7&||+aHBhm2`jj;;A?_(nuWrJOxKC%LccGT$-UDQfO{!$Lmv!gmy_Y z*q))?)xroIP}ht)Z`DL=p`B-F*3cc2d>S@-Lei-l*qaERBkz)n#OywC^P-q#Pb2g* zG!lKP597YPAZ%7EfkJD9bUSUdUe>IzkXAAEBh^rP{|i`#9RsW9u8=qTBFGjd@SL(T zG+R9lba$SD`u?1m?p9&`@U6HwLQk}G=|x?-4fKXo6Th3i1lw(YVR`W=RQ22l;b*@R zGq=_;YqYUgI7-gTt9}QW(OhPCvIpLf6c9r*8Z#rF!Hjxcq59Q+aO+Mi9{00~0#!ar ztB!+b?_?~ve-ItQ?|^lvAF7p{aqO*e2b4F}9p$B`AnisB7GGVD(!cw|?)>gTzI!jQ z-Zhk6@d?4HVOm0?(I{|!M$YqzD)b1dr_VR-rIT`*Z)-C&o}--5)@+vbkDuf_yNFq4 z-xc3ihJfti9}2I)a9G)k?keLZLbHA~I$Acc?Quqehjk)Kjy=G_S?_r7_7>0|`+%!E z`2d&PK>nm(UEGrK_;UAF_8#_NkVJ%?e9)l9nojDzBNLviLr;^yp+=II9# z(d}dgHm(><-W^@|9#cdauquot&wzK;ZLs;(8+X`r7i>$Lz^$qj?nD_2!;RZey>tne zj?3kvm<)wu<0SsXpO-RJ}iCp)R8I!zG35b7#<;Qj6J$N6dbJP@TAcN=)Pnw7P)33 zUZCe?cfciyW+-TCi(~4ppzQNQNzG`$^$SB$axx9WCWbPn+;gaS@)L8mUx$pWx#S3x zEBvy4M``Reg}aP6Xt!3dgj00hFFC6SXugdxk4}Q!lAqvoo;-@VJyABO90sZB3*T>P zh~7QF!$+y6n2>%Sw#)yc-bFdqtS1IWXCQ`6vVrvcDA;~>EJT-Gm$-L3#f$zs4YKBq zJUEm(Z^JGqEFPca-hbUi*_|7>VR8n#?@qv ziR&PLwiir|IRQ>iPoQwBKg_kclz znqXGL@#PFX!K~B^AW{{czSpU`$x1**Sugz&$Ppr3(0SnQx| zn{tyx(s2URE9}6Kn118O7*Zj4HY6m4VECSgphn&K_~9vNtZN3<@6Ul*z(MXdAcPugfa%~^rmB4l`H7P-_Rk;)Xzq_0^ZrJcvKL@}yb4NHu`D^b1>+Zx!}y;n z7X6`FQt?Dblr?lRo3I+tpYe*x4mv7K^Ne`m#|Lcdk`S!5xCe36HOhUI!@hibPuYr9 zeE4o9Dz$sCPZ^q`T_m|dzUhJX!X9Gz>}$}r;S*1AsR#YJyIE!uW9mEp;EwwPSj65$ z95K*PD9tLzxW)CLS{%!UzUxjMj%V45MGvsP=pf6ip8+dPkAQo?F>X3!2|ec$&^EFS zw*Pv7@uj`!c`%I+a6yz0Y-FmRC!jvrA7u+lpzB`);d|^6;*&W*%|#A_?eoCD$5RMo zjl8S;DU>H`h?`ga#Co4XE+LnW$&_bc>0u6Lsd?m#EoQCWr=dW01bi24#KQIeV(jhn zXj*>?Yy;0R{RJUd95@>56OVBV8|pZDKIb9U_fTzpBn$lU56HIz9Qi`-;Mg}J#?K+%V>;d#dk|%7_b9$B{)ENv$tgUU_82Gq^mMm>qv8qZac*H z6=e~>=nk{RIatF!fbXocn3Sl3D{sPa3G~9gT9M+$h%p?P|e zrZ_C<5z0z$f$2UQR9;h{?TVZH$&oJXJ?;Ux1{a|l&10&6X$tBgTbTQ4W9aM)(Dj<0 z1Dnp_3d&VicAiJ&x_hv%F&Im)o@f54D^a$ziZvhlgw4i>;NUk6!Q15O9v#$w}F3oI(r7u0eLB*urofkias$CIw}@|z)` z-dw9tsV30baUuE-cZ7oJT4KegZ(!W?jW|;eK^lFX81m(q9(tQQ=xA_#$qcwJyaQRk z3K%m{i7vs_SU)}y@=ra22CG}3`>P1t9iD@CkJ*^kkq_$hPEf9P$F@XYw7g6#m-+!L zJg^#@r)-DlQ;23y^@L#$Pvh%ZL8$-cDM+^xqyBz9C}-Z4$fm}!;sFU*vQ1kI8~Bce zU48-s1|Gr9$saHVkK?e6$Jk}5Etc;ch_EXTLo2^SQqE>bANdwi?cc%umpbBnw}qhp zm{Q`qikWmoF}hE8L(}PfQ1xswFFjVri!bS8V?ZG7Ys*oI)cYvd#4`0p^2*6=SZ%5$ zhTk_4^&f7)k+;sH^R7?e<$VSr{vgc$kNgLB=AcLEEnI0*Nj-~!iq^S#T&8(LqB;2v zgsnb|$;%%=PC^V=+s{N*$ZL?l7{sj>bbzTcft4Go3obfU=<5McZ)pb}@gJdSU;@^( zAkJPFiZ%_dfPx{oH%~yAP=LN^r@0B`ehQaq2rI*~QuWtZsnXeYNcRGlm%7yb#0gonaA?7GT_G6w2#5xMKKqD17jV2VZ;2 zm|RmRKKBic)))!OuTNQozaCnerJ!}H3417X6KtOzfW+MfI8ux5lGBcHb&Ko?xGe{A zM|I-q+v=jM zbb8VX5~o+t*Vh-9PMnT$#r0sff!G0VN3fE3?a>?KdB8vY(ewAI#CQ{6&b}6GpY{a& z5^qBMKyUOtRsb#jCc@$S_fUDajr%m#WBdQR>)pOW=0>r4}tER%* zM|xt-u1xSt?gv3Hf8#L=p5xVDpK$aTVxMH>@$X+K(=m#eM1|A~Iro5E0^Pae(=9xj zUj>WSRIskw2I>>MAg}uY;>qZ@Hepi(G0xb3$s$ z4eSWe63hDsfkUrrOzHAMkrJ1IS@}B@rNc9r^4K=Vl9G6Ip5z9~yyJW`quqAJ1ZdwK zgWex4!T0bY%=_&OCTwqms0}HULAcGMwJ);FUqK4{Codq_-T@m#Em7N_p4C6+a_P*a zkTtP~u$1^`xzmfmzdyOlo)YWn<~tZf|8`9q`uX2`4wGKRGwG=+$;=CPVa}pbu(rGf z(T9ieC3R;ZGd)OQ?za~=2Xw%$Q3irU2!(LHV2IeY6(V$eLGICmZ5Z|r2itT((l1Z6 zUg?6;?v*^)u^cqt+(+qnxx%@p6D%oBqORKuwCX|Gq4~9#X&$Grh#7>gmop%G{V{GC z(OvXT*XPw;6*y`AUOv2`Pzt?#@^g&D+DbN5LzHo&{gn6YY;9vPK)E{|;gPn*ie?>7tdQ6?X9kJvO`$&8B`Ow))eeT3SD0v!A z`8G9S&VsKX9q118SNg!fJau8t?Ob%-NzB?${V-!D-9`MfAhAQjGeME;H^0yQtKXrv!Gzk<1@f4DH>FhwBYq2Iogihh@M*pLb6c?JUh}& zd@?E>-IvjMd|wI{+{s7RZ-_#~*zxnA8&w2N-XF=aJPn!#hr=TO&)|HthF$q!BB*`V zg@#wBF@Nwsls%QPBx0s#g?V#_kYb4I5>a{TGj2U{7E~`wxc<>Vv_6x<<6EM^H1i`* zIJ6&2ZJglNCv{Q3?UnqZbs@2_-WI`3&ri_!Yc6=t9l?DK z+L`g(CtzqEj_xNmLGYhHDNA{nr!8UEnK+s@NozaL27p2;>|;|dY0{=$^92WV$Vx%J<_!lZsj zF(567U#-50)kBlecE@Y(zpFdW+N~jCS|rZbq4&S{CW%?mZ3qa?!ZkkhOun#zoC>s$ zFyDol6UlKk!4V4AEyV9{DW^X8GkBf20eN?n(7#SklvS;hY;d>?m3NW$G-@n=u#9>| z7vT8EHMp7PGVQ0Ik&9^!n(DT(!pyrMn>YjGgA;kyt`>;?X$nG$mT2hHU9=0h0&rCg zDyK)`=G1od?ym>=PpKDMPy|!AC?NS)BR1HlL!R?F@NSvK3*LvIyI96=JEuazm_@vG(?ZQ5DbXTVSYL%aF4N(u-3YV;JU|1a64`Y zb2n*;lhVl%qus<;4hecd33i9D6Eun_m*qKUOCJ3 zt92o0Mt|a_zCcG}hRAyPp=rk}DAPXzrK7iEal?GDXb)jqoywrFg|Uzm&DaQ;=<|)Z z5&zx>)BS1a7Vt05jChP+$$#8_wVSw^vIFw#bu6a166+&7(b;4yWnQ*|^({H*zt6*c zb=`&DopeU?4MXdD_gSlLIBFLCfXbS3j9zcWltYu4^2`B=!=SlLnmAq&UsnjlgLdJ* zC-*Tz=Q|{HzJiR?G_Q&&Ld~ccX!G8NTW#eaFPX_Eou!_?EQ$D|Enr%=3vEO9v*0=T z#8jg1`Rn-rr3u)i9uMmel1n3_yWo9;^Y(!%Oq?+UleYZ?sgssO?amoVz>VYV^^l&# zf_{NthZu-fe(7j4I}N6hM=5i*0XE#tg{I05biK6+^367)lK8=27XO5@fK2?dqy|4< z4+mvjfg+lzLFlqFkXsCc`nX51w5fuaY-MboIqlV_*g@JKO`rW{`7HiEikHL7oN;pz`;dERftMJ;|0HsK?2Q)D+GNUE?OCc(v;Z{vrk`$qGz2Id9U(~Pos1B zeeU%miE{XlC9bhYbq$a7zh z!ta4k(YE9{cNTOT3`s@LTwDvK%YjE|EjN7FEQ9usfDRec}V; zGyI#!y69u`X(d)uze94uKrmfsfsO-OApLV2@1egRtkhd*cAO21Wi(Tmehms|JD}d4 z4pfAkg@_vBI1oeB`gIMUf6G7`H^lwrDzxE=ebOQxfTHNAbIIXetL zRced4r4PC_UZnE^`NK9{1ylQKV#ZK4n7>e0 zeBG>vIRzYKyQlE2E%kKwrfkZ=YuvB34b^nyio>U~F#5p_iSksrqWr$0pb9_Cq{q%k zb}mnb+m8}q*;f2G8_cr}=*CSlQ)L#p<-?m z>KawSxq6H8ff4D0eYW&iDByV>3sc$sjOCV zX;e>zsa#}<)8o*)Bb@S+X;7j04!tMpfOMw^Yj@4XR?{kecvmJaI&lmN&FaCU@&@j6 zuRzBbVxaFggC-+RY|jU%e05C{GUpwp#E?g7>m7(c)fb$5`lEHQp%^btgJSIsxMuZB z>aA*uVI~_eR5}K>zgvaIcoVHpQjVZ3m^v@}_`OVG-UW1nnDylHd>0O>Nn=62!;!`P zXh+`<7Fe$p0`H3TMOT_@FWM1<(OFXPz117af78aUfdA)>BTzZRfq#&96QgeK29pI< zxWY?Ea4dR>(!N`Hl<9fcoTnx@YH#IN?vMkyyaPI)jfZh_j0O2uKYnFoA{1^)hOlxs z{45DY&z~Mx6GHBiP7@*EO+Hh`-%_X)Axz;wS(&fjVZPaF$hfo$_w~F4(QsdqKl~Kf zOdE-N!c9cgt)nbWH~>qP&9F#$1*FN*DErM1+Y_InGQA1wEenXLOPs{qSVh(%Uw-o- zIqI$|CHbid;F&oPn@gXg?-Arax1NAe7cm2G4#v_E;UJ$)_lRrVv5^nL`s`;=e9av6 zFVr!gMR&>P@)Z&iGO@iMy#rid^3X*aG5zjgOmq5*5wir8&Wpf!19Ox$dx3ZM8c21y zjGF!OF==`&vHoUYrilV1ONmuNKOgO2W5J@|ZZ^A_=1076LhT4GG3#)>!p^$^6*mk8 z56VSZ|J}?~_s>J)>v9~ktq7HBGbCGAlwp8w67wN`p>ngf;Is2P6tq@g+k)<*To2KE zyg>Z|J6!siGD0scxIceH*@EZf_bJ1uFDJkw3}g-tvc_;z+sT3Q(eo_e!8Fb%{OXoO?iLhD7vrD8i51O@z{j0*ISW zoFd}JONJ$)*>m#qz8iwhJGyvoZX*c2OhosN>#*-lE=c=%5>NXFizAj(qhkv;-)>otSn{cmWxxR%Mjd*Gbq zpCMsU3Kr|?;GQdn;^u3VllZD3?0BRp=8Vilvx~|2XJj|gKXMR6n{{&4?0d|6O%9Cd z-vsHd4LsNGCbs%kF{c@yFl@1!7!pH{wIjjkfB0WW&&os70c%jI94fIe4(AQq;~}Es z3f#JEB-s9!L%q;UNyu38h1po(b}tDA?|jAm(+$Abofw=)hM-!*5sA`qqr`7;305e7 zLd!SWH9WJ1<~)`)RSHDHuA?#EdQ(wUq zj)mKJamFIF+hv6Mr-q>K`E-c8P3JXL1rL6?lj|NnjD;~NO^OCH#MQ+ z3H1P01#@GsO0;OFyUFS}=02qa-QUuj>9_N6YNf6iv*kG0={$qrCD)leekj8oez0V! z25v4B;kE^F>Ytf_eQqvpi=D7Hbp>2I+(3}?EaLw>XXWWpY?1QFvmoqb2Q=&D zp+(g_YWM+Ez1i%dvuGPN9%aP1i#T)^SN&8MRj%P^{3#voZ>dG^z8Bfy zZ#1v?ceNrvW;Sg7ehpZ)CUGi5*f*NPe5q05cfB&`JTMHD54u41>?v+Ho1?H5Ntu6lFv_Bey1mt z&MxQj@+C~R>Vd>;{bO{`KFa(Xk}>V_d%STiA8kY#oz3?_UJ$viclY3>l5k#Gc@`U9 zCO|-)Cn#&*z|<9#`n~lKb2nzQG{;2n%Z!B}8*OI&?^pEwQpGZ6I}+zyLzs0$Pjovu z6H|r|lZ9o`?5&%4Ho6Q94^u3C)K>fYMyuPS{Nr^#Vcj!H= zoSFgdPMp{LUXC$eXvVkuS9YawHipIPh?y7d;Oa%<0=X1{S>9o^=sXYkiyR@t#udW! zf%wyVm@MlpL^~u&SknDW zly1Ak)QmK_{;Nm)xwEd2I9?78zp|M(_4O=L^0>o=o|r0M3zlsrpp*tE8pbJbE7up| zRZ4U_(1}Uo#^6$q1#l(bSSXZU;oBc%Vrdew9O8Dc@Bcr4Ec+d8ALsJq2S;I2b}TH} zaTqq&-2;c4Cg`bu1L30#%paVD)EooUrcU;n?&(ly^By(t+=r=)6_`#P1b^q_7`B19 zyjA)bo2&sPL(~LL-C!B@zV{B?H?be_2av*~_Eg|@+W7|h)Z^~5d>34B3a z@m!1*(=oF(QX#u3g*5dRusGNOAMA823WD+9}oTDaFzUsPHh zU{d`qwn!F(rBU^)EF~0tG3tew;~%5p z+it?&3)IBKaR$)X9FC`!(>tT5JwDOB4Z&4AdD5Q~LGtJ;?tIXM?zzJE^)JCX%}`jHHyR=uTWq?4XQppxH9CIBGqLHb^7`_HqI?Y`TG7CecMZ+eo~71dJcHIM-*{e<6*$# zL~K2f!Mv|0bI+&6FtzsuaNANsyqAZlep?~2jZTFCYtEw74aE$z>8M@fj@y?n!sx;8 zS=jhb;BRslP5oDbyMYYD|N4sl@pWjetYwYEU19sOY*bRq*eBUUuwVBHgU)y3c5O$o z@r41@t|Ax8$6b&>pV9chX_!CzG#>x{9&Fdg(46W3H6Hh2dkxGJiM)uk^C z2>*o+_Ol_#ER0W0(h@vswM3t+M|fprClsF9|9%Bk++saoT0MpN7S=!0Rc%$hIx)ICoO7Iad8*4=#oC8p@U@zL11+z?- zXDs-@Wj^1(1Kc;{vW9h)ST=$@2q)!OFBE}=Jz;hR=0NAAaByAz2|jkw{xe|-vo`xm zEHZsTz0Z6M+ZF(7u0e`a9S88Ixr^_hC>DO~8<EY23 zGQS*@^Cm0&d>(^+z(aKNb0eqjZRmZm3%%A}!wn-o()^^0rO#T0^=Gt1Ez09gI;=xI zlnP0J(m*IE=df+1p{POBJ9BezJ1C(b_a20q&xL?KUZ{+%Q22H|MCC;z zV%S_E=g1=P5{{#C=pD#?xde7ze+bFMS@3ng!ZY7fCw~Aj3U?=<-(#AnQ00}6~qIauOa>l)exk`A&@Kk z%wwkJpyxa{>h{awYfv{)GO-~B_J{DkeO3bNx(#y>Fh z=s57&CCBuC-(X?xJm!i3^5#YysZ0Po?gX+*%K1z-$3=FjkUvR7us#tgss368VgDFl z@(3lGe`$l>=Ziq1_7kS=jY7#ix<_SybG1*H={G)XS*3(Zw`g%fhIh63Sfwo zX1H54F@Hx-DDW@Gm4BCjEI)*WskcI0Y5^8+`49VESwf8ZNuWMI7wsZ9gL_IaQ+h9e zCOLJVGQRP`)(_}Z(*UUk-39r@X*+o`mmmMwT_~NH1<}_>VR=tS^bU<@E>9nV@tw!yg*yN?O;ym-)kFmUOAujK38vTl zQEjsU?T#O!%ON9CYPkmTzP3U7F_AUR=maPYZIl znE&e^@(7LqeqJPpySo%wXEy3M{h&2Tuy(4gs z9S!KbvJ7pH8Gzn6Z6N>&1t?5`nTk$y`uztnD}JJN`xO?ZW`(z?AfdT=x>)Ri*;Gwu_>SQxx-A9SIh`wXE}E z7%m+jfM3e$!0m}O4tVwi<%gG}^5HWs-O`^08(zd^2Z^iGi|*X&{lIM@0~ZNp)^?bP zw{yn}7cs(y^?de(avYnTSZp0p*TEN3Bg;}a`{hdo_Ql!;yz+OjyOy^ zGl?e8U)>j*r9&XG(m>ptR0f&Lyr9TA6vOuyLSr@M;Cweisn25xG6^S!V4`6Up6_=m;gLUqZLF2RlOzwLdo&G!og^$lMSz^ATaE2Z0^g_(N<&Iyp zD3>zrFAT0w(ph>4$~UXC`i*rw{njH^csLm=hHk_NgN4|5bU6f{y2G+#ygC!J5&nND1fWdisboMNuZF%TRu10;Le@y4rk2vd7UyVFR8)vv8+ zT4l=gM=k?zmtD-kA`7xq%b4}wPkD56og{2*7}KAZj*s6E3#Z%x8>@U^nHk+j)|9Z& zKDn6T^Bhgcxg$3_$W=KDP}wY(sO71%=(J#F?v;ef$X2jTHo&HRFTlRxHX1i*fnVQ+ zICV2IEdHutvW2G=BWpuY()kWY+Z+U!fgNCep%XGSwNZI+CP?3X;km#43z@a%Od8nC zN3O_%euqp%k0HhaZr%yjhxTw+Q3qr>p?q-iZL~7KLic4q{Bq<8#?ErV?dSc_{)@iQ ztYZ&BixRkO+8oI6T?YPh2jQ2RG;)KChTtJBn6~pdM$C7^{Mx<{Z8$~|ZJI9eOLqb3 zpYtVK={~3CAII&C`cn=mAuBkXdId=%F?oCo?%$xq*0zVdbd(>Kq@F{;NKM>Stu991 zH5P;Y{J4BYE=mvNgH*Ht-!z&BNk>i49=i+IjEM(L+Y<2ntrXJd>?R*UI%_wHM1{5- zM^8Nn$KvP=bRnBr%zTT^v)5zoj6$?(S&O0BB_MCGg3^sMK&qk1f;3{7``Y^Z}<@6Y}?R2Gz|Um zck-WZMcL6vMe_~HMQ>N3WT=rSd;Fbc9_%SezNIOy*+h;x4;4@R?;u*FXp^(P8m0aZ zBnH(+!jh)b;Qlb4o2MN`zj$&|Uj7rCf1iav71S}=R`PmzcRy}|LwKA-o)i$Is!hpOF3T?QFby=647u9 z<-~P5f2t0%9(XIPFKpz|V-m1>Y!gUUy+@zNIzs#PbJ&!g2TMzz!nFs~A<`cs!O5?1 z*=JqBH&RO&Ks+1Q7up!?6Ua-4`NOqsx?+CTJ*c_gh`pX@3U2BbLG79`OnpOM-EEaD zEFhiPo+|}L4k-1}-zA@e!y)7RUPwQaj77X2y`*Y_f5CCgFfE0!npf0KqdV!TWf z#{E~lhI&Dpx*EjpNb*OV?1!D>7U1o3iA+{?-eK6`a=00Hw|KC>`<~ z-K$TsvDi(NuBe7oi`}^4>-T2SxNnzlw(Xeu!>h)`v&CM$TlCM0(jCx zuo&GF@^_@8_BsbBG&&B2Y}8 z&Q4;w^PZxMV|TGAyO4VGFSr9Ufn}~n;_Ul}Afne@@|g7yUMI;RUvdh@49dg7uaYs= zsUN24(5&;{Fx;@`E~IWW6r_{YQPyGPxb(n$RP8$p37=n4#(ETGR`XG2c!*itC4TGH zk>K<2Daa0Mu%HuXLGGMF^Rj-l!#m7n?eyQ(CwcUOn~KJ?ZJ_k(&N9V5ia9OReNwAs z@h@ohGiERJrhc^ruYiEp)A3ofo?!S~OO%c=M$<7m;Q6;LYVJFW79&5i+>R~?FfZix z?Maw0>lrK`uO|kO=XuiJBrGsY11u=QMUxI=uFp~^uZw_1ceDlnG7C^uTtT1z_F(P1 zRB$eA0(GBB-1Yb2Fg*A*-JQ39U(6M_eJY6C_Ibte$9LZ?3_q`U$8UA26XoIE>heF}|#Z3K|7jqxD6V=)+C8doMLGHH&b0_@`jVY7S z={<2jZtelupoQR^v>d`>=To-EKwQ6+_K<-J@cI}@dA~d`wVTQ}k7xiJ{|Kl!y%ANz z)!6LW^!fI`1vZLHIIPb@e07AFIciNH5Kqf`bvOjql|btL1(?2LF!+}Bhs;TINB`q1 zC^ui_g&P|2_WbT>=k^UUU*syvO^0Ieg2w_ZhG(r$6JL-Qc%!6oz@%qP3}s;&|;2w7jY#C|1X!e&ZTQ zTAzWf4{!0=1>MEWs(eLhNr>+ibKbN6Ye=2lx*hMrk^Sxv*R_DF_vr$2id9?dPUO-5O@`R$ zec&OvidHFqLG7bhG}%v`$jHx39#f1-G?yRisxAa8NAl>l(X4ImCfM!Jog7IrraoXI zdTL4W-H&eK)K`_{m0tzTtLun6@sR5`cA+cxLkq8&7_f_Wq0fKgNz1-Nrv7mHe>5Tf zP&IUY&=AvguW|jyE1?fV zoWCLf-pS>8l}Hga(nAzXt5>&%!WB);W>IQ_PoU#%%5XOO9>X% zr^BG`hN4nO4!&I}JR+iw9C?>8^n{cc+#<1@zGJ7u3Xqy#m3Z%82NT!OzK|c`ehUEy z9|}f?)Jeo>X@jQj`a+oagziLHaEF-GQ-58-+-%BB-LQe2BkxfEOC(d;m@8~sL-{x~ z6w<$a1EzHke@IR7(MextBdouu7mw2XpYqX6;obaj770Gp*{5k+>YPRhn&<9UI*_3=WpZC z;mvinzvLS(tyu-hqko`O4zS^T87BDFptr|wpcWBD?rB$6y!|3N0nP2maUe|`#7ig8 z@4;9#G3d$x=Gh`a_xKt&7s^lBoOrb3Rq%)0t$Z{#<%Vb zg>s1;jH^FG!5@ba{>lK;LvNVR%_=O9n1%IEHi6rN323%!4<@8O1E)olJG<+Ti&q+n z$Knjcr1Dha4G%=Cx8$cWU%;(CPexOXIOzSh4sC~ygL)S=FjcdJVf#zLW}_}`Wbop+(HOQw57lnRJBIx+5d7Idz#L7mAy`?}xm#PdICHdda0cR+04kGRmc$Ma%ETFkjIGrMv&bppA4UYAoZ{xijHdZ#6M% z)RzhJlvzwRM+cMqZ{ogp3fg0gVH=heK<1sBn7ic%AH1m;&;6w(`uDQMRzo|Cnzb96 zCtkq(8E^3U8UwLZ^@=;3j{xWME5PrW21N9jj`8alMUF%js;hGrQ5z4{i<80P?~iQFp4S*-B1dT-8R(VQfofz2%V@C&W0R$zKeLr9 z)2Qxrtp^zUN8xz(0AI`AxQm zSjWNc)1ml*IdvA^!cv!F2)K2QJ6KNOp;oqFoMaBc8~#JDvqzz{+ZgDrl@78?8+gr` zYH$fX0N*B>2$`-f3e-OhuYc@^zW27{;BTSWnDP|DN^YWwW+(9qU3up}eGs+h;<~=- zV*RLezSu!S{9bjF<~jl@6_*scmXY}6?k%i!)}*si1=#CQ|9!_j$+VjC<|_)_a$S`|JD`;YZYPPx2N2`>n4WhMB`4%B3Nwe2I|G@AuF<7^2xTRsMz@i z?JjHq=XXCD>?T%_;dxeexe#X89YE#DQK2~z&5&y)wC!vy3cFwZs3Axeah(_ zmX3>5x3E$VVdS)Tn4T~K(`&9^{cqcFgF^-s{d*3rcb#DwGq-}Ik!FQ=_Ht{Zv0&RT zorNy=hz*lcFkeE?g-gR>>)A9^=+&b`nHtk4re{{`8Z4TUhikU>5F5X}h2FhNz@qC3 zi*r1W)xBP$nN=}b%t%4A1rx|;ISV|e_7saNjL0*;9-N~B(P_;O%0(oC%d!$Y@2-T+ zp~Wco{>t=*Qtl_w4YP=9B<;TwDz{MIY6bCbt`Spg;|K`bYmM?3p3F~AOLSPGiOQKV zilsA`;J>HHgRJ{i;ilaSVkZ$Ndtn7CMm&P#&J>Uj_W^U+UWl202bzz$LdNy4XlJA@ zgqKi%VZsn*U9pfw|G}BVHEUG;Q^1nL%F+956D|)>7rs&EXZXuDG>1?O`ww~VUpk_y z+5&o~kPpE%4N7Z0F-h47)?NG2{$&ezyf_4Df0bjHLju$cQ)0%KB2Y=LKx683R1VrL z(SJaU)p?DO*J>b?zD`G_@+Egzev$e0Xhr8BDb^2p#mY*HVCnGHC?8}f=$G$-MHw{5 z&&grRs2~iU*T5T2-b8huX9^7JiS9NEXtMr}Z=H#M`sEo*yLbk|dQ8Jf`cV)t*wCQs=mUb0jZji;#7jP?Z-n_6Ixv8m)f&Er>I z>IwSm=vndtp!HQQy$Ak)wEH*k*I#-<{j1raKFt;i=e9wm;gIm|Kuw@C%c)%D;PW?(<{XFQ>F%qJWOkhDxv24<~ zbF6+~!ka=Qvc73`PsLr)=S0gRd(lr+H{WTVfb)fHwGoUU!lvH$S3G(n)Eac4r=)0;BJB@dvJCkGB`Q%(<;Ih*qnX-swCSV7E@HMQ!vU1drzHA?FQFc~L&U>hP!{zWc7CrxFYgN6)0563 z#*{~&_7;pH>0CRG@;9f>^5D_{VuB7PgX%++UHhGXj*Wx(y_&f6%uguXYJrW-#3)_4 z4y;P=zS6Ljq>$3CY`gmib>_kSsc z1kVNxySAG}jQfQ`W|&Fq3`;{Yy=Ij@+cG8WYbBHkRNDcsqnAsm0Tot(k< z;qb$1^!|%O^7#goEh>;~)~0)Pyd(D;@gA#>bih)lW{AIA3ptc&N$y)t-I!`gVUOp$ z>%Zr)VMHwkWZz=m>T7uXng(d699M4rb*`$5X6l0`@WH;>pjmPQWywlOYZZMSg4S@G zo5jRYA#T=Kn=^~sXp8P&PLs>~k7MN28(fi@ z65YB`uGUC_DKU4@6>J9YDqHaSITw?94MwMg<6MtnayFx495T=kAIP zd78pt$1eP=r-v}IHij!}ro8pui7>l=GunoAMTPSbrcSO@ZN9tv|5;fud6*AVuhAKI z&sFBNeJ3Zgxy`kmJ_18#W#STtPqd4tzQ;F4y=z@zdC3aQo%I1lTh}_~p3DN#NhL%Z z7o%YY?GevTfQu&NppCc#_KycKb=7qE$4N(!4+sa(TMyyPjzb{VTgF{UJq7Z?&#~OK zFJvxS55X_CqrJm)>a*>|+AqWtsu_*`u@b4)g(|4N*bOCF&pFxTgSt5ldaakLo3$RVc231W z%Ch_ZI0m-2m!hXHb>5p)thDVX7xShZL(Zf^{+r3@pVh|6mgIA8X9uD0frFraL{5}Z zx7Z}p&zL*zEoXJ`2w2y(V9e*|I3eygUilJ-E=B_|@M|o0r78iT*`ZRRG~UxfMB^mso>GJuK|ufxN+U&K2cZY;Ew>+q7Z)HB&GpzY>P zNSYFWR>xj}OQ{+3ew+s0NnM5IeSeTIq8f)UHRMC*O+l@#^ndI^ob<*NWB%`)?!5hz zo8bLuH#cDv@m200g4Xl-DBgbzR<8bq(ZSDfb~*W4!xCA3SAF5(4^2MBzyNLRmqAkC zEzU;S!AU-?0Qs&5j*5*9kQVwE94Ex3;5d zX&qkKtVB_EJ{N8_ht3VJV8Fc+Xumj~c6;HRrJI(J9=RVSEylwA`9{3x-6vJS(F-6S zI*7HFJqJ}-198CH!Qe|2*o^pj(RCt?V#7=fZgO)6znCVDNe z;DV2ofx4uFEgPxJ@2jQGo$E^Q`J#X+1<^e_XJmIieM2^w_xu-2DWjM&dU zsPEYha?urO&4l4}bsPbH%10Qp@iCO1>W%8=LujU%4S_Nbyl(ax19)F34EYU4dv*DA z18v^_Zv)VH8ZOm#&=s~k(-cBY>F3|Mis@BxSl3ogUJmN(8?}ICT^1^5E4Tq|WspAR zD*UudLmQU{e9Vgl*~C|1`$2{YstP2PH{~)5u-5H3RNqVllhupBzhJeh;lVIsskPJI z)C5KT|6z)x3vv0iA@Vz&{X2U(8f@zhE2ir4L3%%7R9S`Qt7N3XSbxzjk7^vW6hXMbXnhI}USp=L?Y3UL0`xp;}kS@Q(nK7d{ZZYcBdos55+DgEt)f1r@$s zg_1!FF#qUi44Tmj_G5Io^}7vu^B?-c9rtd0>HA$=;Le+nJW~Og)WfX4)0cQ@@5o2o zmCsvq8v=J{cI`&^LJgs1{ePsV7C z+4S6hQnep9;^onn73>Y4c{{ zWqNPMkBEoydWOQC_O60G_Tn6REaxPn)~Muj8dVKnoY3`SEzOmlf+Ra0WW-)ij3UQI zo&~H-JpwI*)37~gg3jO)*hPPb0oCMw z+WrRm>xjT*#$ud(qZ3<#ZliY*abb?#a&v5aN&LpNi|c?+BsdzkV&UHK+2 zFEH0pz)EoMi1r=@2uTXATuq7Dm7DHnD)VRMKuh_X;>oJ%RqTpYu++$rUNyc3Uti`-;@Yuk##S!dwE;WBw1r^#4zNG-gexwhtb;u< zW8)rjndUStx1qbtRbg3W9pccbGZKF5-A8gMH{%!l^QrM0nb9oZAh+^!M_|6lj%G3Kp#e2&jM#B zTU=648SdrVAvAXj<}FQ!V2kd8++2g@s>We?zMc>`EE}E-EknsFYi{eOw~&_Dg0kJK zrEsSoy8eC%8fCg*>eB{=R=7NLQTPE`Q ziSnBdID?^d-gqGc+xbi3dih735$igy}f&V;ipdhrX$e7%7sHBI4kb}6v&eImNo-G#_Cd7O93SI+1`Jb15J zjAJaX(mZiLGY-0oh1IW#3%m~f=U?EGZZ~1YCIcbCwi0D0qNSr>U&5x(lQ{psw0Wc3 z`*3qwJ8H+m4~7Cjv;0!v|b@J|R+>_^+D9+-Hk3T&$nqjiBZl*EN$ zT3!XJb>FCD#}icUqmMATXOh%+8}05hUVvnK9h1K@=dSEJ3X#uygGCoVu<5%13Wt_+ zMpLx~SFf@-0E=3t!gpacT6ra_0%3RLz>l=}DSmMM4|@L~I!LF9Y|l-jOz zH!lQnu>xmW8VZHU`4F1!gMkLwSf5=F@(o>BXy5sq{KO9~Z6f7NpBX^dx+0W^i+NF1 z2P8Rcf`Eq&DC?%fVbAVZ`0OzkDG9OHP{N!jMwT|MMBbiFYFM zE0sz*16X|9RXjJM0kcb+!8=rp`}{s&$*g>A`anJa>0PXvCC6sRFvv8c{NGL!mPG6q z*?)7neY-!QdP*GFK|0N~@4^z}Ml?C$!Rp)e1?O!ooM^>M?D+2!29hi&{qd;aoR<(~ZICcGpkTdP3Ue zz09=f9>n@p;-pbA5IT4@SM#|1|BfI0`#$kSUGqR(DupBwGQ%@(VV8{-1SiwEV5BBC zh}VE`R3uz9{Q#Q-+o9(AJuH$HVNzoe+chJe#eDC?bB~I_b?Fyq(SMJ!%Ar|ObtZ9p z#Js2CZ+I`;hi|9{6RGw?{~dp`O6vD{TMY*1-5;5k!#d`if0@-qJRx85P-gh=4_xlM z3ZwQ?hU@5L(2yFbsy_|GlhMXPUd}t{Uo95$LVtnBteep4*MvIv^aQU%*8m14LDc+4 zls~DIYS8z6(v>S`<1uQ`jq(XPkT1?8XLhqqsP`@t5NWI>~UU0$2CCAZ6 zRRN{yNM<|Y1g8JJ4W|w#Kl+Ua5WDmQJlRu%y=&j0dBtJ~C;*uHx3-`jn4Gn4r2(H* z6@t6Am@tc3zpyzb6w#YG@kp2uJvJf=B=|~6M48Wfu zv}?O?3Whwoi{B5w=ECh#-W6j|~KaX&*UtY^f?Ta~>?$tfuql zBXq9IV6L;?PzH+J8~heb%Kpd_NB#z4fh_c#DvnspzffPW)hxr#Zr|YE z?ry?>CGAj{Rt7Kc83?8ubcE=io1m56i|D!xg5Uc9P8o}vXa^$7c!CyNdqK$;x5QpMk-*}=y~w2*w^Vos;y zWQ!hxmqZWLmm5@d`y$w`J6+LZSUkKJWk7%0Jz6?9;qnCyXd~aviA$T&U)f(3b}t3p z2jp;)Zi~1P!38j3_j@?oM!%=Jt5WYX&shGjALOmOp_%}@p?|18Bw7E%It=Oe*5xO@ z^`$QMKk=;N`xSIC4TXr&si@R{uBuL_nd%i$R-D;9>_^D(KFcHE!CoqX1WvY$1)dy$Ld<} z^7bVcoq;fQEA{lld@$i-7KNy+v9NPE1iw_{5lwimu{zJUG*BA8)NB#ykMCAb6+$D~_xxWdFXlvPyYym&F+8bKZ40&+JV z&B8j@127=O6*}*H1RGYxN&3H0S>BGpwB`HZ81YH!Mka&$??}#P=TY#=eb4+fZed-C z8rK)mEYo8NOA0v3{N{J%!w+{A+!uYr@7)72|2Ewn=e801?l3l7e~cv#*C6_p9S%6N z5$foEYVNZUP2&EB(y6~7aQ{WRn^(i3UR`)mA6qWxdjvC4JOQiX2Vh>Q!Kch3CshAA zndQSKpmSCVm*nBfd0)@tEWTaFrdI`A&iwtXZFwk^kDdqS;edUC#d3 zIdmBQ6MS~+^A5dt66<6Zb9P$?{YRaFx^@LC{Q3;lqflB>ybw*twm`bc6Nr!&Lx*)9 zp6PiUE0;x}uek=__OTi)-fY9)dvyixe=EVge=Me?42I16HH5pPZ}er_VeTUiPI)ye z4Bv&z?~cU6nUAS!w-D=+DbqBj1*~iZ@X`z6LRS5NDhY8LCx3>f88^V|7jj; zl`Myq^qp*QamR8OH_Vn+;>FK3AfMRD)B&G3`@7ew+j-P)r)^ z7|{ePp?jr$O3t%J%L`dLc*-72SjCZG|d3D9>1))`hZ6N?w<_HPdqZ{GzH!~LqF zmzNQ~hZFmBHTn%j^2lpo(uP-Hp0%82Xn*69J_%56KASv-`&reO2OwS80m)^_7B?xZ7}5bF=22$i+eHvLY*BTRqrp<5g8ZKE!OM*2l)a>w`+^+oAy-l4B+E3Z?PN(h z;oK-aeL>mlA*cAehI7+jf*m2$ja~Q{W5(YGcRKrt(j28$#s|PF+8KS{+Eb5l6BvX| zL+{}~*h;}O_dtjMoJmpb7 zW5F^7)K`gl_uFTgOMi3f$dz&ZlXtP#_0=>%gE`j z2!P|R)a$gl1Q8X7!FS#^lw%|wKeiR(H2Y%B4Iknxb%SiXM)2zSf+eQ?f+a(v!0x4( z*Ep|5p1nVg!C_R6K2(eCH`5{X<~1xcs=;e#l5vT19h9HGi@PeVpfIeG&0p<>Y6DG` zOShj8e(4u_W*VXaPyB?$HP9Ts9~(|JVjq)keEfqw=v7ZU$W8BHgKH_&<-4G%c@xN6 z?nztsT*Ex$Tj+n693F{Vp#Qo3fYu>kowgjtbpL?q)=`*5JVI5xA+I|24!;lTz|3_k zn0lKIXXa?gZx$mc>?jM(?*qGDDzvk##^{i1pbl|yoVD~2Jh}V^6st3ctzpH5&KiZz zXPCH$}u@yd`;)hC5I83^n{LVZhc$9xgYzL~`(yB*Gy>B8-N(u=L_mh*8d^VD2^C9ML+jS7klTg$ zrhSZrenHwoK6&Nj>q{IpcArvBF3=K63qEjVk10Pleh##$T2W^~AV^Lb;gm(!ASoc8 zlax*1ws!hK$oh}a^ehUz57sc-5BD*#zz6gGZif3Sbokc7GEnR|0;u{3g_az8-H2sg zb;jJ7c*-*kHo-P?%Aq*y#PY4VxPE#U-fVX`c@XErhVM@yc`LC)lDC0Z$sd%Tsliet zUd}(c=&)u3x3K3$fTB9E3af*-3H>4Ekru!FO$H?Fdyn$;QuaNTnARtW&kWXV<;_x@ zkWz*&&xo^dECXI-JNkedZ?}Qs}DshN8`8Z}~ za<&t9V!|s!KGSb2F|ORuyyhnuETc2@pogFy_5%hCaKgeOHD~irC)l>10GaK4bZ#x+ zetx6jOxPjY%XRy_BBQ&kr%#>>nWtCT-01K@zT(njWCfYwhL(LahN6Cs3&idQe~QTA0?kk9bHU`-#IS`OscMfufr&uFWQcg zHFr1<$p>`q%DCi;NSN@I*p}ViGv`qsSXpEhBo0{!T9o0Dj@0C{rTby5TX#N9Bau9U zH*xmNhp4u2ma11)(hTf7)LB|XP_7o=s-V8Q6QYgLDVj&z!T@&#_|kU`OgCVImkcaQ zX@A$h1+T}pVWDOi=-cS=CKG+oqr4ENT)R(miUAm~X%M*2o(%=7Qz(y90g|dX);>a$ zPy3tBVw<%1H`6qESEoy8<~kSr6E{f<^EWW{6HTskyfIoX{{p35J2B0613H-x1Nr1s z>9w4FXgzcg7Ti9HX`VS;_VhH^sOlzIbT0+@1O?}YQd~H#6_aKLLtOAJFtMv_xIp`U4rv-v;=+STNL?iW1COY&f<3iIU_G-`Hz{7OOW>A^?ocpR0a;>?@Tdn zF?yKAK)yyFv>3C8HN1Qa?k7vZC6=K)z6VBYnWELC9zuNk4@@JAWQD~iG?n~6KV^t3 zbGz~>rKOm#jm}!<-IW9Otb~p~GttC48Mi`^0O)E~k8%s*)7*9oG8 z1uE->#t>$48&a;;px@oYsFa;hrOX|Okqu8VVtWQ|zNo`HFA+G4?7?6=LyOltz#w#6 zGiUYu2i{+&#bd@8*C!WH0C5^#*kzAE?T5v#@zyBDPAt z4qo^}mljejSrB|q<7LUqKF@#_A$IN~{7``(VX5Br8 zre|Ig7uN#9ij)vfxt2gzLy!;b%be}%QNH7v%G*AI%g%1c_GkMc@MQvceH75!vpcas zN5PYhJE;5&P4mmFU@_&aVj)cf@_c-gG20~739Or-ahic`iPq6E%J}0A; zR}O!OE1W~~tAnes>Gm~9Y@I>5>s?ZF&=XGD>F{pV4{`n1D2O*Z0WPQZfc47`4Cz{m zve)mR>3abyHF$&lM{4nsjRP^KjTo&W>hV3@E&VyByD*jb<7p?m;>sS`=t}cJNw=-4 z(gOpDHPFg6rLKe!j|Ae?58|%awxi;@J8bx?2!j1vG4SSSFq}$V>y-^yxWb(}3r_%b zgTQ0aT?m@fore?pg4tGLXC9jgnc`}xD1Njw$KWtmo<;muILRLFrJO@!y`!yx1SYj~ zVC%YlXkTf;=^dc?%CcbQ?MXcHu`@7<{IeQsk3)KP6AYNL6*pJVpX0{rxTE`aEQwK1aaq#u3Q3a07|ELe+5MFm{H20MDLsG@eHc{iWnV`7e}pxZgqB zjbp)J;zZ)t7K3`pM@}g!VTOD;+FebBA*as45cA{kxG9$Q2pLdl+8+#dJYe~yyC5Z$ zqdiwOsI?7=%k&j9SCW6_bQHcN52AbgPpIvvLiOG}mCNHzkl;i2R?jow_eF&byUt*K zh#h8De}v`#6k`VMgs;rYq&vZOPCc#-rIn{}i>6qxq8WBO(PWc5ObZ4#zJhLn<#(b&E-4%3neEMMF&CBC6TF(CIGJKg{6&`$RqoL z)!Lt=XFP{B)n#$c+l#r-E&pQVI6LqfdI_yO+aY-01X#H%2MX;ca^Kf)1>0zM4A9qy zbN{4(K@IJu+I4sni_NU*(*qXHcgJAf8(Syd$2yZ9yzRijv?JDo)?H%S+f86{_y1tT z9zDLPJ&Y-NPUSsg9M-0Ng*pp8@LlAMO^XXb-ES@a(?yrJvxeN<;Zme!+r&z5l5NHqs5%fv|YqOlVkD&L%!ov zGkQ%G@wuMPC~7HE#nTME(7h3(Yu=+v#UpIGqA8S#DaXG_6J_hiaJD1%p!&jRb{mX% z$>KAST(7$u8$q7m`73T*_uNWQ46!}Nt{!+kJ1$504%ml9Jik;Npnp^3jA zcbQzD{zk1#O!=uR;>S4RfW8oJ_Xvvq+(WVSGg!^p525!u09KBL!jm7dxpyeSAIb&8 z4VWNwfga7X)(}2hYw>=2G=&UJ>eClqfJW8ayfp^WxPej@0&un5bvPGLK-ePS=%L))6aA@&{Zd6OGB znbU5FkNbnp$HpP-y9EPA=YZrwsLI>uD`xb(jH31KoCcluP5(rox&D7>azl&-+lYBL z@ChmpNl^G(UohQoByxC+tQQqJ+awfAsUWB42i$Q~O zI8|NFg8zV-(zvs(SoM`0p3w(b?xqJ^eB)8bWuKX(M-RTKpD}-^mNItET2N|zfNQ^! zz&<_K6Z|q0VA8&Fu#^2j=N|u}#@=|ycytr&&uQ}-p*>ifP>yadPY^G=FSvi01Zgep zT=e8F_=WDj=a#*KTwTgt?%#<`rlYxnw6oM}uENkcxhzlbHmI|CE_j^-dMw%n8Fh`| zvf?^4jMN5iUu|qxj7IPOu7WytG1ufm{)=t*;p#y{K4{QeY;`$|Zp~d_l{dMKj5Gy{ z1xA9?65_lq6~LvQ18*;VzNGICC~V)3^S{@D{A9K&bHy)gYF-OB2k8lKCmHkpy=2mt z+eO0riBHJ;Lp%Q=`k2?A2(_E&J3pFo_8QyBOLU(3kKHY89)1xEJ@0ZR=5tt*YzyaK zY77!lN|sl^W+u}xhm1(dptaosGnJfX(5IovtsY&1$U}c;4NDneLc7_c(0cF`%8j3J z4XxclzTkvZvZ;j0b(eDXh80+NfOZP55eUL!NT_LsUpwdyJ=U0SS#ulZTXt~E2d=}> z4>W~X(Q7cAQi4rlN6h(eFBhb^2v3mCY>q*gA21MVKNmxQZZViRcA~gY55322goZAQ zXcjb-EA0E8H7UxN_o_2oVah4^uD2YQe|(5(hfZ^1Vq#_Q(c!)3FNQ0A_rd?`JB;k7 zEu3>r!*Y*a7@G1MT#X|zGo~Rc%H=#1&80c5>?b+l%OLQhE9T8POV1kh5w4}-?IXs5 z#<&D2^_|fqe=(}LsHV=`IO7++3je+bCC<*6)M zqCx5Bgv#-K@yUQ(az2HCGF++(Rv2LWluYKoqDGpy^#iJxsa3*XngUjv!+|ead`F-c z@i?zBGy7=x+nnak3tlmw-!D)dv`S^4e~yi~{T|ZB&^N?Mjrz8KFg@oPoguDd&2}YM z>E*-pZa3o7a;q^W;sDyZ}8%Yw#MnR?H%^7OubejcT82>B0-GFl(|FuN`5?7q(2~ z)Pu-H@79C4W~x&^~m6iCze6vr!cOnSevhlya2z* z1a9);2&eQpj{z&Sg~-YVI5$R^8lExOqJ>u;d|U64(4GkekLR}46C|GZ@hY^Dk=^1>@G>Rfs5erYjB+9z_ z`w?gFULID|65Hm@6U1R|5Vw9D*tBJ^ptV;3hAu;I#UZ9#kt%)cf0n!)+c}x-VYc$p zMe@IBF*jz!2lv(mr(_eXC7&S{7=yh0IaXbcz`Ah)CsXZ_O7fSo9Jc{5;HDDw> z-joG89ets&t248HM;+Xl78beb7E8;c?$_uDOq!R(nZRHUdo2N%>zA=v6$|nk^HrAn zbFh8s1g!B$rN5V@;BWXGo1gB6Bnw{*Eowu_l<8c)<}o<4RK&+Py}``XI)_4+cM$xz z86$O_Q7!w)U2fCl+fS>w()nAk%2tjOj&$QCWwuPqF&W)&+{Va?bgWB6rV9LN8ptI#DT$U0W(D_!1Q(kwha~_{9#|n z8760v{;yO!pBf9sh7BlO?jo4fJqC%n0r?~XQevYzcl2${X$vL=2rBn4ab0Z zOITys4)c%uU`uWmWbW_AmtVMq(h*@$_D>N+?Gx~NT@eP;OtH|q34+!~;fS<1*ra97 z%{KT0-sg3|b?Zq`x!(h4<1+9bF9jKI!bu*JS8sL=lMbNow9S4lZ2&PC%Nw|aGt>i5 z?*mE?cNNq`Lm{(a(Jm)IRKK2;&p80;eOz$B|8oI*O2GQi9xMnl7T=OcIxr9SF+GuON_n)9iWIjXzq7?asJiyXg5Cz-djdvpcg~K z+vg$4#)XsU-QiaCp?7hJ1gxj|L;l3e;FeND{<>^58AY6w)5}m9W2W*NbP#Og*JHlP zKZplX$&b2(1&HW-*xcgi6grR?`I%7a{F>`jXF*1?h1v-!9@Y%D0Vd z)EDCSn2Jb$5EoKlmC^(cdQkhe^(+kABx7h&lC$AyC+dZa?NRuvq{SxA?yQ1xBO(Fbk zHD-4ACgyD+Q96#GZ2BK&J?{ptDgA_F=B2^MtzyC9dJS4!e8mhlP`o!tN7%f#6Cl_C z*T)<{jRHgF$Je9XoNMSYqL>`JM$mHWI$BeHBKOWBNIj~{+s@qxwZHeH#iI3y{kK8s z?<=hI!6z23eHFupm_u);YW(Y}0blc~3a(wEJKx-NW^y5t)%_}ANr&xOdDS>fDtU?~ zTq74AO#4=T8@ZQWFwa-S3kz5Ucz6t|Z#@7z!y<5F_t1Wr9mvLaLEE`qdD-W1t}VX+ z?Q<&d_E00fu)aIc{sRJ{qriVh3V7Q_bIaY9L+9>qXmIZxu^0DZySWj$8!RANB|*8| z7$*%oi)pd((Ac;iE4vioq`!!Vzi2uRc;Enj8yZ0&T!#GFBXGi}yWldg5!|&rvB;I4 z&mVD8i?h1q(VM~wv-feC1O2em_yUC7tHmgKw?&2LpzOvB2aR=+s`Ts;s7UM!Hr6+= zbX`A)|G5w3+L!U#Hewa6dCNTiy@blTH0F7?H@PT?ubxY8k=RmjJ)MchkI8Y^e3#`4 z&5$9@$6aU6F@GCRrl*}nGdcPW3{i9MoVyB=@h3Uu+A`)dDF*$Lo`UtBMofvi3gV^R zXs&L|n{F;g8_I1Pn*0L)al}tcq<+wsQ!K^r8!AVQQu%jyv(Tt#tTsT2$?7-^ZoP}8 z9GzPgGok(KJcw}A6AbhWFxWK<_rGbvD*zf#CI-n3k3p}}N7ME`JwI{dZ8`&WZ2?1n6^sbg6GoeK5pq-O!9A*s3$QthF3TvJmQ3864S=^z zy6`qG|G@PvCFt<|8)yG~CYaQoViM{TJh@i}tra%Z&(C*wuYC&h?TG2$vxyZZcVnWV z520YlL-7BLp26PDsL=Wh`Omw;j%ZCjal&`XO^!ra=MJfNWLLg5ZUvesr=g`)BWC{A z;jO3B9&vaJG~F5u9)ntO!VP-w+Zr+Z`%k&r&wHWM&QOrAldymqeSY6&eZD-kJ1TqJ zycxUh#gtggePG|EAo`haD9Mf`wV%6ZxLgvfJ?LA*Z|uX}w5 zagc!!V)-3g?Tv)A*>5o4Q;oAyPow&Yf~l=vs(xyxW1Bc0&fPr&UwY~C>=Ml~M%Y6j z&HfVHFF{N1R7|MvZIfiqvl$ZdXyP$c60(CUi26E?pU~Ur+pyxgTt|{fXf@aOZ!UXVoFc_BAnu7cD zJj`6WfoqO9Lo-myu9r^+@nmAezI+Blq6^_YJsUZP%s^!D9Qr-&CV1?r!bsvJSzH>2 z>0^sAv)4Kn)OX8EJpX{#~7m4<`Zh4VDzDf*!j|c7q`8G`SR~D=FD|WJNSl0_6mf( zX*&GCqVBv0)PhT(0%JSNFvvoI7HzQ@xo$IjzSCVuesK;%yjs9d@&mmcv$1^1KUh9u zKP2nd{dq6ed;}0;JC=&AQJD_fK z1Xp`97Y0~wN4fK8@V0yj4e7r@UQfK6*<(O?{}#Bs0hHt)p}gBYPLW{;YUlSHTO<;a zcIU7H@k6wTuYjP}`uyVVV&1y>KIHb#WAczWOzoF|ts@f9r2RT)@uv<&i+)QD);eLx zo6}JH=^c38eho2;D``(&3FbkKsPld&yX>YM&p_n787z5^g^LpsRbYY1t?_L^iyV}w3~C?8Mnwe!!g|Ko5F zODEv>E?==`tPJ+;c!#e4(4O+=7hKtT1YfFj_~;Ww&?X#&qF=?72cwyBRV@U(?T(7H z7o2$hNSYlta7)G(gGfsyZOo+azo!f6c+%OTp8?G8y8%m+W`g9`H5O|{{oU)ox$aCP z=zStC?|bsvcb?>)y!nko4iKZLt|#=ac!1X9D!`wzrk+Jt$i>%y&e!g9@^w})Hq1bf z-w2huyBeTZ+BnX{SBp+#1GO zKC6g3bx^hN^DmGnU(+uCACMWmfg#hn2u+_tnb+CN5V@ro+pkgYsZkAqZb_Vu-5J<- zyqO%oUm*781vDSmjSpxY46TuWVddgzEZ5ftd{qiQHS{e^N=1dAH+m%1q5JJTj2;~W zYDc20OxcWq^i12lT|v7^LqY8^5n~@IK$6=gE&Qcs-hpA@>2n&IqZ6^vuqX4J>kcW4 zK0uB1F}gm!LtbueteRW`PHFux?t!5&dS41E_VffT`(rfUJH#3+{soPdd!cb12mcUpvlX>T3>t)?lBlI{Ylk zK2=Ouiw+g*SW|jm;?3QH_c~8OzV;9(ppr>-X*YXmF^jTzjMByg^cedD;$!2`E%YX| z_dCs!K6h}vdli9*>QvYH4ls5oK?{BQE^VrV*28%i*;$JH_G|KaYafGh#z`(IPQk7J z5DA+rl@OVFhuN(D2=XaWQsZ+i*zwv5Os*ZLPFpc5oV*}t&To)mDU!$)albVLvzkdL>%Gr0xAGY*H17oO*A|Ep zb8HlM=kqt7MzP;C@<+AfL?;oS`Q!r^`e+l{IKBk8C^u04+Q*HVUP}Fx1aNP>#zMR7 zfcxLHc!NHc5TtbgmGuTZ6l$XlkE~8z#_5=vV(>>|a4+XMqgRRWy{?nlYdwNVKjTnz z_fb|t|9CW!cjXlC24cHK9?BbDNYjSd;e;gW81)?h27lFXI=;7YNlhDCUwwe3p5v(}~;ltY;Z-=D0b_YGy5GO{rx>7|9ex93j@FGS&DrPz#JwZ}F7!r4Og7n=7 zko0mx|7itWS-k>0se@M7z8BN({a|XBufz=<4;R~NA@V;rEFq>@)!c0C_^APMO#^97 zAoc!dZ{sBUU8KvZw8&w3oYU!u1WCnQHYDOWru-U<4W&uAIgGd+)NQx9dYqHg|HleK z6e!W$$Q7(igC`f$;a(YWXlcK{r1v*eZ0Q8qEko(?z01J&UopldPX$Tj9aU1s14!w# zM6cuCEMsULD3_*jv6kdzJ$wN$oE)s&aZaYC&!U48(Ee2(bHAw}7_N)MXNT!7*H+0y zR2|42+Jv*>FM;chNJuk57Fq&aeoq(tYodr-o|JN3k+#;MQOXq^cy9aDF zP-aqfRONo_3xi2VAZd&?m$al8c3+Uozk52J@lRn&@M#FBxeOEh=ni@MIx1d7v)R=*(9h^2rmbv;?43rua_vM_W=cEr z3VzDfL=|JcuMI4B^#YrvH@U$yQ<4m|;sU9!RE?Xal&&r>_|P~ zs3lN9jLq5Iox!8)UQ#ew@ex_O(O)DNVk~b`XX~CSZEfe(*3e z5+vzcSdh>J^iQ6(?6U-|;XLHu=OJa~c(nQ-MQ0ut4bs(WTc&;`N=t%=&$>x$i5m*8d3V*>hRooz8;OfmuA7^p;sZLS2-xs!gEv$|K)Mp$P-cc5O=%ji$!b5 z<9BBZm+Ng}W6LekcE%JmDnH6v9`0ldFVT$FZaMBqCeLiyRp?Rj89n|u01GJFX(k*) zRgW^R^_*BQ`}HYLv;y_6(e883W0X8tM%}(<4Ed!m$mR98YJD`yr#xfc@3L^x*4J=! z(oSf$YQ|DeL-=m|8DJsh9lIqcjr2;G(YbL@>3128{KrD^`3is~oj?=OjfLfE2_7{r zV6^BpKdVdz9b)5Fq!P2m`6^~b-(-Qy_i**VyUO8RvOwZ}m1(`}mZK5|^65&6pjUqj z)%HV_Eg3!Ww5N{f-E{(3eLTZ_m%oFC7wu3S(+-#D9`y2@xzIHAAlCTrCr9T9^j=Nh zrTGsb#MB1$S`WhKX~{VJN+TNlRRDjFrdeak%^Yd_3!a(25d+KeA+rz73+H#|ej8hr zkETU}{NT=!k_mOAzn^M`K-9@I}vD=(_(p78aX}LD8my)<2Qp z$GCFY&@8BaS%Z3T7A0@|A+P)}d%UFqnjh}PCrgNf7WA1v9H=c8bTh|g^i0i|^aSwZ zTdcj_3)dDJivf6~^`|AXzPkDl>)doCouC~y7nx5GH={g?Et-~3cb5S~OAnU5?3veYK6y0l} z9z0P|G*_u}crVOcWhvN|H{r=xa!}$YR=RT`>dkph9hvo@?m;<%%gvDJWhy%KAHg+$ zKH$&x(7w8t6OX(+5!{#SLQB9&Ci!;?7PA7FvLzA&{tm<9Z-=4glD4QE5Jov@ca+|u z&UKbGI^J(W-yCO})fJ-e%R5+FL+&}6-(ik1z}Us;K%6bj2ApFPn)$BZDA(pcIc}=?d!cr833vWJtN$4lV8ms2->zIBQigy~ci^ zLmZ0w0sFut>pe=t%Vcu1RbaUF9evgfnfv9fXgjzbQaYGIcK$nf*7FY-lt_1$ggtat z8_p!Nrb5?|7NX+ddw#fzc9lja(QnZM`J-wDatSfmw{EQ z71z^`qi)JmNGUJ_tFCd(_9=0P4w_)}sWWI__YXXLXDUQLy$PQB`Pgi<3p{LZLEh|C z%+}h%f}i}vg(O5#%_9!dms7Z{QVp7~Qz5rqtS*yfzm;25%kxhKAWvN5MHJ~N+umBvi*Y74i%83O6w zItb3wh-DJez_oHMc)$ko-fp@IzH9Z+@8UoV4Jc;nPLsI8_B?O=-A>(!DCRJ;16TD@ zuzG8nw{F}c%fESx?+i5*ZR8S=9JmeAk)kry!V_(xc46~>>1g9{i$BWP2QpLQSFgVg zN6S0V_e@__@n0Pr{pKpOHuW>889$c?}G;|d^fOLE>WkJeQXpotS&9^V0 zV&X0yXn%_39>@cAqY+cg4&~dtW8w3aXdL@tFAIG#kI$}*hOmZTxb5!;==N(8ltt)^ zS$mBfv#S44`Y!4q4F9798k*mM!^z%kiX;mknHdY&NpEHHiOTcTi!J$Ra2GVt&;- zcuM^YEU27;&h2lRf6)x|SWyf{FOqo$F`lh_uQ2(z1m^Ya0k@3)fT@4Cp)_E*%)SiK zEA2HO-!%oJwaL{D>5z1CEOc_x6MhaQXIwTp%)2FlUJ9{lR+tHm4YzUZ_PtDAvd+ys2ecy{!LWaj7GmySLV^CEwn#26asB0 zLGcbFaM)^y@!=1cvPUAO>n+4=)9uPRv;C1=UtI3Ahc*3aiCLz9LDrNqUejR@Xs%A> zzSq3q?Drke?d2OZziA|>LvOJxpH4E(j8~{rScCa{)I3op0qHehM!l4D9|m6ZeGg`Z z)9Wu(IN6IA#SwfcpYnM>Vc7`_JZ5RS@3ydEQU-u$qKGe=6C%Wmgtd+_4T8`&sZWf zJg42TJ5$enjn}wRBkJdVV|uhAg@;p<+6rxD|QE^OMf$;JG86# z@t9i=7QryE3flVWfO>U=EWsiJQwLb%A}@Ltr&FfD=O_!f-ij9kjK!m^k*pcAAT3v4_DSTpt{ClQHGyXMDH7OgwC}4^=<@Zawfi{x3>9j)(Z;f1=l_1|C}`qW$ij+-glYQ`JW^Tep>v zp2{GyYga(?b0s=mqJCoD zKRn1b0DbO-a@D>%_GXN!;Ot)vM;lJyvnn&nGuaUTsXs%?_PQOX*LLU$GE#DIC2!3`cgXf!h0%v3P?8V$ezm@Y#d1UdE#Rq3a-h zGXwe@$c2_We_&)?3$N4G5~2edF}%-yOf4}(zxg`Mk9KkTS4_lozvs-~X(${WsV$Vo z^}{?b9Srx$fKueAof_dC&;j4h zDKe{>6IiWFHq(008~)FWYAZ>@*bF7EI}xb|N`zAK1jr`UplE9%WSuES zU)_%2l%XXKendO7S%q+|tBDwR=LuXOhIQB5#~~}GRA%d&4c)pL2+jjaS;M=h&^02G zI>te$=}kXt<}f}9TVUk!CSra0f{RBRaqTj}_0<`aZ(gBHNjuM~F6k2cP2>*UPJ>@P z=?Zmh$C-#{kgB;=Xz>%$dq27sc{15mK$8_k&#gIEFLB4Q}NUrGr{E1H7ve8 z0;|UBqo&grTo{)GNtge{#aC&k7BrU`#U8}AAc^2-VvQ3&nTwsu^#s4pUu30zHn@0T zJC2|E1)ZDoS;3wk*gWbyHoyA>xNj7u-jITN>6)cp!7R8wR%hfky4Unwxi@Dik7Sq;idlxO^50)BgG&S6`F z?t}gY&6Fcte(kH$Ve1u4SQ>?08}i9Rw}_W+d(Qpb#zExVqx^fx2Uh-a9)5ghAeLX) z0y1+QA@2xfQEuzvF^7*(dOj18ZFc7DR9!Nh-CN!gFl01qE@5bR2w=S7Fey8fZI4y&dZBIK<6_irF{O=a+)F z4UNM1buoNO4E3wG4FO162reNhc$T~;nq3FLarqCX45#xKantHKVDJ&=W8hF*kRSL++2ae?Zz4T=<~D%s zoF$MQ`AL@d^$a@HmBOIQmoWJ8NU)mtig!MI5~pVC3w{rFV0t_`FLEA|kLVOi$c^c^ zzY;IpFcowRI*JQ_Q5N}53E0d%##6N|gyGlMWBmG0%-Ku^#|l5fl2gWFvqczMn_Pn8 zvcVWRG?CdCoC5V+4W@l~j3L8LfLhZTta494am5Ifv!QI8xt7pI87lj%BV3tw8&AWlRoiy*7}4q_;HpwYb( z%)~DP+um70!Jcf4*W|K6oo?ciE;{0jSFa(rAs^XO>Y^k#<6=j0SUGa=ev$=WWG#;ZHIL}uQH^!V=>8$})Yo(X77vx62@DXgxx5KDLdl~B< zs~kHBn(h`{uj>W!`yAjFk9JXR?g@N4qb)c&?!#)G$;3W>2t6p3AO4tfkee-pc&`Nh z_MnldGsr^FacqUbef5PDyZJ1CX+AGY%S4A)V=(RXKCpAx4Yj||bM+Tp_&hfm*G+m4 zu-XM3ybOetihj(_bQ6R)bpct{Ly*VB7WeSRK9U5q>AVW_dUq7dJ~e>V z#RTTIWDG9<)C`JieZk<@5qL!Y9w`VIIHo7K-LM3o4cEZPNn1?O^+S#Ad$jsx#MG<< z{u@Dl(6~b0qx?Ap9j-#DCG|9x*PyEFU6%5>gIH-nT$7M);9S0i{Ks_u#8DWYUkLTd zpFu8Djh6ni5H=^8iNy>3F{RELoQh4wJ{gH<$KT>`drLYyykb30J^;hy7ue>!0KGzk zFnUZoINy+i-?Eq-`Pd8|vPzHnkKKf4MwyGbsi#nzVPLJE zsJd7T&a-?mRrg=?K0gHQo{3PD6$k3eLzo>g(fo~v;Hrdt44vVF(&;(SXJ8U$ZH?#G zdOyG()fL1-X<&-$au!X!N%{Wic=%!m@ml;nu%_J6q8B=1P|hk;nZ00hIeI!Q;(-a>`6cRr(R0x77;lC~KCSPb{Y;x3m7C?2Wv~IFS2h!EggN zXe&R3nouDW>o5yK zPd2fp%d5d9AcMZ!hGEK)S{^?A0fLnmWw3)GZPR;j|G5;yMt%f^o2xR^>pn^rK5=yT z|J`KPE*@h^o$f*PGOx8J{I-R;kh*>rHb~AxN^k^kyO0Emw#lOxT!=#*yHK=R?1)`~kx$QvdRR9j0}zJ3wwy59=Ze|14U)kW~S_z3gX9OtT!cD#*do^^$V z;1fL;FzO&qk3Nc7Z)~9G+BM>EW^>~n2EsyHVg+}MWu-waT!SNdaX0d-n^_P?{LE-Y z-=^y)hntFu{rg7i`gaiOp9s)iVj@T*HZiaAS9tiRp`h9A&gJ8pbL1t_j#HXXV7U8v zG@9g(zBGT^cJ>Ook?(CtYXbDAyqcTa5%Ax&6y1HyAj`KCRCm7#Q+nQj7=J0q$2Y^+ zIXa@^P8AEilFs5!?jjfF8WP*JUCCA3h6(|jaAk`lb8-iG52qfOt3Y{8%6r>>!>~8EKqaR$ z%v39kU71Pj0`hL=OvZY{huHrg3(@FBF;~r}j{T}|?(AC6)G1lyeKp4!6V-GUG8W@w zI-y3lon;ZDt?d}{b`-*L&Y*FF58RP5M!}5y_==Qh`?dwlc zpTGni`b>bbQ?D_@u?D+M_Qn1?V#u|c!-nQ$VoX{j3^Cgckzd>~JiwAVTFCk)X$$tc z5nMWYrc8Z5LK&+mgSFqagoZ8VLi@r?`1=n7ap=GAal~swL3-l?h7R$?;h$SDx$Y2L z`&oj|uFS?Jp*QNeE<=UPgK}a6xx&w$%k`)8c;N-k-k>ceMGwZay>{VtPfKz0iY9cK zTnH^Q#-QHBwfOb9j-Wo9#8YLtm>As`j>T?=ssB>{z^#HQYEoqKbq|ycnbav<^j)R` z%3eqZV(COFWJwkLs0DS$)*eCmubr}l=6IUPdCQ*Nw-mF!=Mam^i$%Z9LS@@8Y%0=2 z#jaQ$?BR#?qf;?v5&ikr6<*QlE_i+INO`cUIKJ{DBwYK6W*a}l>^?N7@^+(HZ3&ic zm*P=LDC%vx0P&0Ib8eiDtGzpjYu1_znmPpxpLHB!x_*G^Ph&B)c?GmAi=aJ069!Sf zI^KH$@x3O(=Eeqau+qk`%Ew?x&k>d0KRmsBJD9wE3hH&|WCbBTscUl|^B!r#vxE@v zKbMJNpK@WGDdi51M1uGFt#GVD zUr@jHm*u$*KqH%hXy)WZ4s6^grmhQpOc zV#|t?TsrZ-(tg5Q^!qb`S%v?{71t6ma#ks~qvlEVt0|~n-zLlRrdd`x?WuoU2ajew zQA;~YImD@>Ab%SQJw}^};SbVqpP9aBtGR?m0oP%jilamIE{vQq4~*idhct2n`rVS` zgpGIrvR9U3``u%hWpxs>(kyK=N6{9M8m!CkzCwb|YaxVlx&r8H4)tVjj{G z%A7(Up!7)&Q!M@|yZiGDW{1sZzNz=Y=krDG&Nibep)(Bscpn`n`*Tn74^-r(Ve6>Z zXtP*>S*^C1mG!U80WH{7`u+>LxdPIv9-&76F%Oy92XRG86K? zrjIL2vb;met;~M!%G1X0zG@YzA3y6`+&T!|D|G=T_PhRXFkDB&c zRDZhy{p~WLcH|*w*2#kU>g5=G#0kzuoj}jZFp#gX6irtC!YQBMfZdBF5Ie@4y4Yz< zRdSNI9FovZ@bT!(9!7%e)-Z4yxC;*-O~<^?bhaC~kCztSXQ}h=_?p zHMu6tL_h529E>jSk6<`_2On+<>Du-<<@i#u^eUAc;jJ-GVYHdEB2Z<-_}=f^^zu>~s7+&Arcn-dQ&& zo|1J z*Dsh_orbv+e}R1KdFni^gyg~_V6|);)ViiZkq7nQDGMfXThD9LnwZx6gWRg73W{}J z!PVoYLZ}~QzVCjF} z&9Vkm@BIskCoBVd3nfbTd~tB^t&I|I6&A_LL4MavNSGc1ZptfYcC!|nx*vc`Z!Lw& z@CJ}>?91aj(3$k>eJHK|#H0tG%hXFpG26F`@FJ6lPJ<&b`fM&#-l9C5^#+8n8c3fK z%9@6GV8BfSp=gi-EN@XBsJ}N>t?CGEwgbSyBY{gMdB|4n*#&B+tkJz%Polyl1I2<4 z!judh5qtiD_Fo)u&=wU=e@C5(QAUEwx`_WdUR$WW=mLEx{}tp<``{;SOfo`_)$Tbc zY4XKqBSWAxVJ2FQtbzP5DteyMK1gRL4!!piLn(*TCZ()qVHdm@V@|W~dW@gf3liMu z9OJ30wApxtI1|13#PybjYY}d(^&ZXzo@TH@58Ih z%I2{WQR6-gf^@flNfojDXP-sq0wr(jMb4P4JQkeD!6J$BQ7hk}dsH?iuPuQ7io=xK z-NdxYd$OhtSr}rcK;NMTqU!ca^lf^BXU+Da&%q}w+UXHgo#+dC)80b%CE9VFsifX) zB1<^`9Q{5iN0+w+g4bU!c>WP`Vca+m>6+89ju_c7|2;(MuwSxXr{ch9{1Qy=UWSTy zo0VO=A!<%dWp)eeLDef7{lcBlYScTwJe*1t~TlR>(IqTYvl zp3gx0KTk1lg%e62-IKxLSa6+@3sx3YENt^BteH@N#k=-{+40rnP)&pxvl=0Ihp7Mo zO_1@l5Ofw4low+<%z1eQuP2C1Kw;G=P zjDl5%ECgS-yI?fq5`MQP*3c?)DovzbZgLcs8hvKXQTvJ8G#1;WRGaIYiVj0kz%ghM z6fL=h%MPDM`}4P<&f^9;>$LFrluJy{#TfffJPkG7Vqw8QPqF&91N0kiA}-KLgvxdU zvGcjdxNv_Mj_+~-WsUK7X@}XT7tdHpd5tZ^GaLO3 zQk#CkV(m}hVtpG;0uzAl$>fk9hLZBJe9BlA&R9j+`y^wOge=MS{Z|45WA#Lvkb^vC z(q3$_HmBM1X>PXN72Fs0fk0UfTZYO3Dyx9;VGj@(4M^)V_>o1(?CY#0c^lYKxk?pIO?nouCm*G1C1gr_%}?i(5&ak{6&( zT?<7U_khcwe4JDG25oA#V_@`QCN<{h#IlIFa|o9uAhc$LL3~IX^HJ}lJzyO4C1z;l z!gwfM`4W@tC~yAGQdE3DiEYa)Kwdz9S(S-?Bl@z?3ANyN@CF7No?|gv?I31e5&Hj_ z2XR2_M>oreXK(|@Hl?5K~eE6h-~Og0f7TRcYh95)!${VDM%o`Hk?MD&r;Zb$nouUB~E*w5k2RS|)T z1==!I|MASMZZ#xM{}ZLB=dpr-ItU-R6vmCw5!F%MVF2;F0xQVLcjGG@HTwgnUC|TW z1BmJ5Ya$G|r@?;ux}v6s2hZ9u5w%o3xlx@Z{u*v9nz>$s_^1|EEv!L1?>~s~rz?ic zFyl7&pMi72EzHw94HwNjik1GUSZm(M|090-@aB9h9@+qBePh9Qb5AHAJ0D_t8i;LU zH(>nx0<7ULFuo)RR6`bXxq}|7&C$mOs}z_q>KP`ae1T$9ZK0uqk?<{^GA2no@E>BL zx;beeX$!RK{0k2g0W29Lg|*iHL9)Vq^TL>{T4TMi$wS~-U z8!)ta8vOHL2eE*CMpdV|tnyA78om99{zLlGj>BB^GMWNOk6bX`_yTj9c>z;xH=;E2 z7I{5YJhCnk`{;Z@hcj|W%&u1D%mdIm^so}!_I;+L6gs{a2u&|@9sA|_073OV9O;jthv#y7y z6V1i)VY=e(xgT)0{2utWNXVVAp6=l3m~Th#n`Z(XP*lU=v53uu5!iX@FO&`6k42O2 zp>)R%-lO0t1XG7evXCpW@G<6RRB``D6CrkQF)Zs|gt%o9<=FI?=Id){Ursx00|QZR zcaI$_vJjINtFR!e6ZM6T%2Gd1!PNJX9&Ci29d@Dp8yOUY)uM>Jv<6 z*Dj}E$hcUR_TU^IwfGxHk-seKqv%jP+7T~MemQeg818S>73ORpwkt7~FwhRu9n^Gp z(4hjIF_d=7;r@znOb&YkUF=N+{|Nvpy>MP;@D|#BX$d2H<Vwi;5w5O?R3j9A}Zu zQ=qR+N1<TeEYii;cKB{}^%ZcVepj zcj)DM43h4Zqpx*7j`Y8Yk{yj)s!beysXnT!v}HE;tk6@d5F}3jppjPrwn)%e#LR(P}u-Q?BI;ZkM{$es{>d(`j{0(`;&1FGL zXm=u0fIL8CX5EitvaB6dY27j7ToYC=_=0(H`s5ay2Xa$e{MAKA*u3#3#@gwKi@m8o zvyh&X-RU{;@+f$%Y=!;;ae<1uDbwHApxQHpb+vi|sgzv_O`U|<1_3bqzYFLxm3H{U z5Q{qbd!e$M^I9kvK)r+BTTs&^2c!GM3E3Q? z43<+GRCv$VAEx>0T`$^oO~i^jyK%wii;$|{n^@C_Fss;r8^tuCrrQN}D}nk}L#|`{ zKXrI*-aANfpq=8_r_`@;#M<)fIK%K2u))-&^D-8a{fRf7a}-jt?y^3ki_vId2aFHs zj9&i@Vy#oli6Q5JErAbsa%2VGRX;{`#8KKk(EqPn3+AWiz}nd-F?3W0PtlzN%PH%v zb9o2s&$AGnFXn;bZoRDO!UO1&`5b#3HWeK5?sC_Ssc64;Gh`i}&8=R~g|YvffEnf@ zp1El(+6-HO#Z$*YZSV$cymSR@&QZQR*#bu1xP}$l#GU3rD4$Jz%(4$GG;=h!|K}tO ze`SE-i8*NW(*RNq2BY(`cCec@2h{0vbKI7YugZG`Y#!AHJtzw~snbtbXWs}BKg%$4 zq5-;Z$>#EBE12fD96u=ZgxMB95kma1GWRLCbXP$p@z_-z6}&WV1%5wnDYor16=gAq z6>d4`HHPj;{jZ}td7f6D(G}!X+RV4L7Kj}Qk&_pL(Set|x$ZdLYB~tD=S2wJ{*m?R z{1C@#b%Mk=OBi{{T##R%$dzZwrzSrRMR7;K<#ab}xj}Q|V`o9K+l$=pyRk;K3;q9c z#QX(?=+RjM6$h?^Pdsst2cCw1Ty@0Zf87O-ZgdA-c^;$@$7On7HMpdckvKNx2;a7` z6(g7BL8#>w?p;KF>**P2HF`J%7gVD;cD8+rCT_PR{DSX~Xx#zJM9^|W7HU?E%{)KSEyj`;n=FI@071_%9X zB9veG3XvT|mK{d-7|9QCF3cug>u#R1?lCJWF%aah`*Md(yCC7^6-eDN42BSg=T@VU z@OeTA_-Bm#}x}7gD#_S@vKl#W*cW(!+-G`Lff4+5edFz9U?P++_@C4e7-N~ds z7eVX5B#=k6%k1<^P%aywlzQvrs8b_k)k9REKDrF;txLGogdOa#b~$`))DegJ=0WOL zQ_!VsEk^ah;>py3wAX=#sKcn0be=ltL)bQ_ACU5UD97(~j(c$&N+~NT`9NLo(8t_o zT@6}o?E{DJoJYNmi_zm!7wEc)V@$jW9@E|ee$D}uD=cRo)jgrI&_HxwH3Z$qJ-}rw z3am#xhF10q>eEWmw_zH*sn!wGiY0={EmI+4>J3OY6;LnD61^7{VP(ljtS?-JHWwaNHHX`B^A>G~PkaZ$H3cdS8})AOloeukoxI;moM{A!VGe$mHwy!Q%Z7K-0d8 zdmgxrwjtL+-Dw(+FC}h#!xNCMVoY*pfii6E0rYycnfI#H7TZDxpzZw|;FTAI>b6js z@2Xfx+0;%Q?LD%8eRKus%u1!-7|J~QZH5G|SLiGqkIO!sfMw;0kn-PD9uZf9>Q{#R zuIm|WsHFXg-y*13Qwd&sq)fW*nNl+$irG&%$er_l^EA6CoUMHt?oiipp_`@X5Z;f6 zEV5JN${sw4CsVlIXoT2NkL8`?x84`?mH?4cT#_1p%`u8jjTr#~P~UstG{ z-H7!SchTzU1#aF~PfSQFhwn5mi=0}4HG2EOuk$I{s>EcNa^o!8P3}Q^(+jNgR%5a1 zfC=p&UgN;2Mq+S$0T?Z-;wb?=xlMNqDBaY=0uP+xUh(#j+z(T7osI~4WhG**FNSz;Oq_rEq7*n=!>sLPUo~2lt zV2`bj=?qs+{zCpIY_#hr#wQS(N6kau_+MbPU@7r`&*eCdYvbw*+Je_knr)>A<#e5mkp1B`C}wwo zw%#Lgb457V=PZK!)>SB%N68xZwP4t;CopWOnb;#=2~yi$7!Y_Cb-u)*|IhzG(y2_A z@bU>{p1*{?8^~#;4nz5B;?*w6!f3xt$b9UI0X1J>q2v_!%~ayu`|)5?p(`FMZpGNs z>6m`o3YMKtfa02JaQKqI?CT!G_o_C^0DWfO<3C}q3}V#@DO`SMlFY8#YLJIiK4U9Gr)(e}{tgtu(A%e3(H|2q?ZjQ+9q430PJI3zD*6+eS;F z$!IxRy*>uHURBWENS!gWm#~boBJR>v;J(`!e8W~j&HmHy$6^cd;7~2$l1fXsK7w|4 zmnK2$2gGM(uo5-l^{iHyi3u~xzm$%Up4c->#YJ{r=Fr<{jjj9OUX5Gs$B`?!iCrgKE}y==#T0P>-?ZRu^CL z`NjI;y13_H{o*)gU9->GwloAP!rx=~^g`_Ost~2Gw#hd8M8mUVZeXRg8f zdL};)z}Eos<^J748M5jeYtD|t9v?oSNoY9wmC9w&yUsx|IV-bEU$N3l?+tW(qzKt!pZH4fADhE?EdJ0o~B8_!(wh7{;aJ|K=()0J-)vrE2UMNP2S> z3N7CQUY&>)-sen)w|Db%L!V~eAbu*rEf=ml+E4HP$Z9{eFjY$_%W5X}Xym;X1Q=5olq#g zP0tC9i5MAMhu`f3F=ojVa9=Hg^QxhsupGzba6)FbVL5!y{03#RRC0>;K!y4W8qH7S ziRt9d>KMt3T?SzBJ_-6%Ho{rjg|}IcM(^t?I5VK5aH}t!!7f`PZ1I6)m8Ixk`GEL1 z(a@;Sph=qwttKgf4j2%5^eOimI}M^;RT%J=Is{GRjZCP^v~0u(9`# zf^>cZc;Bb*+WvLSA?N{S_BI2JMgm1WAHlnluVA)39j5fB?{iP`4oqOMOlBtNxJrcj zX~yWariy*up)Dvazd_Oc7RqgmVkVm_v0%(M=vw>)Ge16oYfEC#FQAbt_f}F;yAd-x zEQKC>GH~RNFA&*3pI3yvhsu@%#EaU2SM{GH8pR1s*fHVUX4vRuFL;+AH#5thEL02))s4*xwME z62SbvIMHsj3G9=L5X>a_ecTU_7a1yhcIqf78dvbr`svui#!_(B4aTEkBhmiD9n9`f ziw2WpKqK^qrYkzaS9-qU#4_SSm7sLlbl$R!*jU{Xq0M_3P7gW^+0GllA*C-1A>Q@y ztAim_tBGr($kj7O7mA6W?=yZJD01~>PUL`h8=?)7MjzP18=)9H%8dBY5ujeRTj@N1 zITUyI?Vf}4BK?bKRq*s zm84sW@qK9KIOiMXQFFNT-F7Hz-v?R}${$um$>ev5K^*=s9$Bm>`not#W;Q7ta-H(M6iMjW$R+Kq%P0>68L(&xfK z_TXLzp{b7YS&jPC?cW1hmHog`t>z}uTG}O~;qd)6sCDeTOwqWGO%mv%8*>L-m zkzkM*kKMjhVOSSq(d%U-c>2D_4B-jbOP)i5{0oNqZDK7k3UE6V1r^T^B4&hRSGo(@ zo92*@j)Q&wH*D-H+7+6=BB#$z@P2dwrH8VW0ggo&di^VRsjkP?0f``A7s1uT8kE&% zJyFvAf~@UI33+gnnfJa)PVS2}D*gmWtlY8ud{Mp0FBb<)~e=dNmhBM5l1NmZ|+F&Lc3;7mGC|*qe z&a61JDZYzSjBdi9|1+(cyJL?5jo`Aa1Ga897u^=Uhoh!{!rj>x;&z#VsMFepMnxAO ztJ6)GBmW5=Tc!A+$yDeun;bpna+b_Xk!Fpeg<&kZsJdb2j95Hpr6w4?dyL)LDG7Rl-l!*-UDh_*X+sqx zWxhqjieFg2V-LLj%TNgS9|MCXYl|^0qrvW3G=A?U5sLkY#h`r`*J))#SKDL6|JsON z>P@tx+^tkiu;B&`2)(9ugsf(L*?$-4^D;6PtdgFxmM%s7?w70JA7>y;nfMU&DC_IK z?mk*IPG>I~v;?!O1sH6%ggR$Sc*_xeQRQ_3wJ!IDSl;W$cwoH;M-$~IXqI*qjCS53FDGRb<|bgDUL6E2-6J{Hwc4UAx(e;4(=+svHOri1 z2;RYgkh#KAsBJRG*l_ZG>z?5+Du^vt_W^CO97`)}aPjvSP+j{OMz%eGXES<3nOQBd z9c>`8!VscfX^DP^5A)hZPnd_dp{SgD9L}Dz5G{J218?Uy;1Y8WQxADTn>}@@FEvA& z^aomAyn%Tqd!bK8J2z5i^AXGF^Zcwr>D6yD|M{5q7ttnA+gS=}TN7Yt@?AJQg4iFO zoFTYWTTm#@fNlIH?A-S)Mvj~cDM=H+0cQh_aXY0Ni*xKH{?7&GfAq(PFSxmbymf9){*Hl==0^^h?G~c!>t&Q|dLi4LYa)gSU9jpG z6vMISzcKCZE6ge@XL?bc zz-#<=jQ)BN(uQBfzLmP-=S#am&rN}jtAen!??;I5GZJhp(|Pk4%Dyh9^H-)FHchC9 zye9|v$9)E3lWY(S4ay~sh_(jpQ>M_IWQ+SLA@)iICOH3wTbc||PbiSx-={5f z{n-`9!g);YEuxdnEAsTb0RIOq5HrFC`?l$bx2}rd@W6%Z70m{(9sgiq^f#DszXg6h zG8LUlicp?jB2zfslexA?gvH&Bh0gZ-adS`%!lW74=JOqEs}foBM-$;4&3IEXZe!JF z>Ty1}39^t+Sik85rp!2lJuVkx#0R=}nVX4+8Vp1we}(ng=V0B{2OwWvgIN-h-MZ6^ zUW0e@mlpbB>O6@s_`bgAK4K;k+d-D}R}omB`2<4}_hZvpU6?bz8l7^r#g*p7sBS(b ztNdLGR!@yFYekeyttlS8xm!BzH}IlLx6`z~x%Y9Od)1Fk-&E2z7*v&cuC&}#it)--e-7LAGnw^tp+tZS~^Evhei zl}LnIm$8`C#R;YuQs;lydko&N8f)`4tec}3+D+MqscpNlmhEPBzn)^PB%E6rQLd;y zl(;vYF}>Ff{<)m)fTpw1Yf(OG;c=x|?psVZ9){H>l+CQlLBC{q&NAvfYIzwbRaSA_ zUa+H%u&&ZkvlM;E->Gx!20ET6_myouD|YORAwQ3E@4RiOuzJkhcl5>hp?6sWoteCk z+=B*12(-Qa0s|(0hv0%vARF=#+Afl-ss9WZZe9fcndt~!kC_W?Cl}zEHMGAknGMf~ zFA&-`4g8jm1zVkb9Q)21LiMI`(b-7+aD%?bnGv`DwgV4XeZ_)xU$AN0S?pm_3evwr zn6Jq+>P*_B-@_ZM`05eZd`CxUic^E$z#HIah|2imb=>RaLQdKibUVKUrSG>v;DTf5 z*JVAV{9D4yPkjTgkK=KnmWjCZtFD+nNCJI^#$g8Wnyj|AbC)nf$U3l}@_;8`S6Bx@ zEmz3;to{X_e^-L4ZZ1l*gYd(4Q(^Yv7l7mIVD^?4uwCv$XE_@jRQ&-POTH7&`Tz_P zn$YvoO>pQXaLFuJC5*X({c{ZktLvLs{p{zM^-m%@NbYONu@<157=P75ERKv`qw5|- zqIC2SWwyZr#OucB{(L`k`kaUk&yY(kxl*lNrffN(VD5MrBYv3+$)k@z>4rNv`+`LD zYIViCN3zHjN!ccMOD2CvTrkCTyv1+f_b!V;)9S#org_M`o*!chX9rdnM?28ecJ32r z2V*xU^4c4F;YK$dA@sj7V3Qb!#rGmX@@x=@)Ys}7(m`lhFc0I0n1a$Z5pCk;K_)SG z4y;Q9pTGG;|`rrjGxks6p5ujj$>FV$6K0D{B zZL$AG>Z4S0JhI$CwEstR6TkW^o5#Ttd?nYCCj`)(w3S7neEuMm8_@po%LoYG zIUk-0MqvFr9y4E($338(SD6s6x;>3=UZ00U2XS0=G8we|yYNzS^f*YAP#P70&g-in z+K^o8vC%v-<|gz0G8wWiM8LDcF~q>V0+Gaaw5~0N+*Lc#5=ucc>=gIBNjrmHvCMj&{ks{i zExQVS7AC}oEaDE{gW*C)V_}&O?LiJslg%zJ#^CFdad@69dU=F!Kjjl;>A!_oyFG)C zA3{EO-yHN={s2ckG7=Z6jWPS9w}ad-H^-(f2FK?Y(2n9N*uQ@NKZ?%9FQ&DNvM$Yms#kds854wAtMkq(kX5*aB;iXurORP(HzlEkP;5*d*s!i1zG zdDr_7`1mw4dq2-wzu))Eg~25Z=y95P?Kuab!>M;#`CX}2pOB`W{)(>m$T>D@B+7im ze0vP}%vO&A$*9nYT9#aV&P2O^ynFVZ$;E^ zIS+%&4}eM~W9J1OL3eExs9)`1@{)M2{TDIC5ZXx96dQ&p1d~)W$NF9_2zqZ86FB5v&X+f8v>;(0aI(b8z3! zHO_Owtj`Nz;2vMFn`#4R-(+J^<5$ehPCZT)$ZJ4{+fRD?n#FX$5PBhgXejYI5 z%RX!Y@mW{OX{gI1;x|rb zgM|YiS$tjU-(v;X9;JWx#Bg{VW5$aOhQqeNX0Fcn6?RBIV276$x|*FuiBq-Ix#TeP z?yN#peSh+Y22)>YH|JwH1X8|VMN8#Xs6SP~2Cil(uI8|#g?8+=DKIu7ALKtLph7o8 z*`5A=YFBa;9ozzC^ScP=UeY_H(?Ezxn})rY?ZGiyqj2&fEifTYZNRa^xRdT8w~mrG zy5BWyR$RspjoO0J>K8hsm2)XFKa7d6BCmiPtygFX&52K;dsHPV^DD4+;D6KSqHhqaM!PgdyzPTHlcA>uf3WQ|cavjJS4#zF9-GUjmJ1MFsr`Q6tJq3;%B zT&QayIBDI)7UB`ut@nbUxx+yB)-QDa#-n^yz0~x-1pxPm;g!@~nfT%lEDDmLO^6x` zT<>x9|BfZ@|2S|RCEQzLyjS2?(6D-LMWTo=nwv(V-fdW(dj|^lQMa*$GEy;yIHn|mxU)-`;^_^k zi(@NJuz3OVzg>lMXHEEmaT+YDeS|w#od!kCZE1#k5Jok=$G~HIz&52E+}2gm{$L?~ zk^F|XKUZL;<^X0nQ!ij4^{?$;VB$X~Q0v?o^x?m-5;d{sjH=MdU55VMGSDZ2{PvGb zKpyN3`aS5(s?ib#Rg3t1qhv08z)Mbcb@jx&yVal{vlq&D7GrjRGpa7wF}oi>;L$}o zvvlLR#?w#GcC-wAudhMX%m0{t{9RC=_F{wQoJLvCt*HBl6ZjTs@e|IxV%c*P6Z=iO zNWKym;>HdIh1($v{JR=f7u|t!dnN6`FGEMfEIO~!^KHmPoJ-8u&OKd(nv!;WO24<3 zf63*ydolA|3taq}Gi*Ypp|FMcW-1AtW1{|Mwsx7EHIw0nS_2_z!C2f=Y$EjA(S*8v z7f?3tAEl@38VLtF4j`zC=XX{@;!?Ba-wC)?86*|B%YCp=8$;~iu zDf2w{4IO4&;iP6&`1C>>E{aM8%$*GNChs}zh*E&Z5-<)n;Uk5^pxHSM_g~WCA%uEd zng`01!#-H>Z$5MWhdQ?F^+8?JTWS0I6DAF!oOo3n>JXQrrcWs4rX-A4yaMYVW8k41 zF|N#v`7077?I7ye&px`sIO54T?oURUe-YZ8d;sbtV_39S3h-}5LbKIskTp@hAtwY| z&yRr8;|4UVk}haT!LWB)S0{-tJUbWrcgpynn=EAY1&{}t;hNC5;BBG70jr2BGOPer-_N9ZSS$C+ z;1K8zNrY^JapbsM2%bAjIo0n!S@xKGZocXwyxQLt#aH%1%CK7K%z6(y(|@Dyt~t7ytS9vBy%KVU>;h2%K(^yyy#D?pRO*GI?ZGUz?Qk78 zcFbEma!JICI#+NatFKr!$paU^GUDqU+OgB~I;C9yanBClas=2K`*xC4C?v;^nr zL(qq-XY^D6JN4hFdrKSfVjh~lnTz3Hi#W>{4P1o7U9jF@%KP;#M7hrvE=BtQXCd5z zj595qDzXFHhWSvYvW^S>^_J7v)`0)91bSCKVa~r(uq{cS-}2)WY}ueh&%O!lh*mUQ zf0Kx%4f?{)l*4fL3vrHZBEb4JIj<<|v%*Y|cI)w2^u9mk)8BK;dYK6RYiC19(R(QR z^%0F_Pci9W3%a{gj;_s)N#ioB)^n9P$NsLZ8(- zf_Jyom>6Rre4em}SQGswszuasUTwhDI~m~A&F`Qe7l8wd=sR@&KGQTTVU~8}y?Qeb z<6ZYK|BlVHi`vAMl-$JlzY1{CoD-<{W7+s@!zk|6D?{)d*%QO2p2NKk7JN=>1Awyx zCts03{Qh_@xaTdJjqQ_K?n-0<-5hXp58AyS%RvWq7z@dafhEJu_!OrN#P6C1!B18| ze8X|BIJ_Ko{!*jp=yn`i(u}I>QqHcd9m=fBL0sSs?&A4)?T(S)Vit*Z(hLl$x{Utc ziI4lZ7@YWt5WX`2+UQ&nGtCsk(;8Uhr*g0}X`-CgV3fZc&t+XWje`bs5o`wMK+uYG z97rB@wew%_apf)W>GLmojC+9Xb??D(%qcWFZNT^2orMD)TS3yvLJ&>PVY*W`kwfy~CW+OMf4>{Xr%-P8qC%;@@)-vHZU%Lu7j@=Ch+})6 zIjrmnVPAKH*jt9@yT)KclLC!7>NR{#$1BUeK*+G$^t}#+oEh(>BK9Z7txrWa+XC#& z`w2^@=m-Npl3P6a1El+igrv3ExaNtr5I>YL(HlBjdLPH9)+W4Y{XEX8=^7XFZy%WI zbp%uPerJ%g05p++l2zxVn)8D=*`sWS$%bP1sGWxo`%poOV{S&ZWk@!=3_|npp+%)b~NG1gkEz^AI+1BO;_2x6hOpMgNrgWl-;T#WJ)=raY-jzBGHN9Sm*1MQ3L-xDqX( z#J*iQMj3}YC+qX^eLt{Ol%MueYS3lS5y~;!fZP3BV12U{yT?36dG2(jdajIR^%{>; zQZ4uiE8h{*lFs;&JnZLDiq_ZPL&}BI@X$qHNPia$>Q8B?`@@#J2yd|H?nzA9WkdPt zP3Toq4FOZ5Ain1#>Wpuf1{4sB(Z~(oZEM1C>n^d-Z=(;Hku1FHpXGxm@;$_xiXL$62nqY2;FfF?5V3CIp!v)h~*fjXChSj zKLyW)KX6r-VraYN3&Cr?LB7R4Y@GE1Wp`Jx{G2Y>VXdLh;um`Ac4tAQTe0xQ321iQ z2@XydnT>D;>LwgQ`=Tb?e}Z;!7br9LZYawA2Tx2{MR}MNsjQMwxBQ>pXh(imusDWp z{qJze%!EJeV8n;-9*#>bjreqX>h@3i#>&Qa$H2cXK)2%};e6voEL}*Qzr)nU_24-5 zuC>gu?Hd+c-VR@SF$f(}2hw-ug5RshprHJNgU5N!_wzASMz%st$WNR@yr!_iRAQvP zms(xwhQ*_Onki$Y?ZyiFFFw|E0%)P`#X5&@i2DXZ(JnTfFky&H0|~`R5@Bu z-yPwbuaTg>@(5>!{y+>NX4_~1WT`W_FS~P4*53iczb%1~@mhk)=3nmSN)aDT=Y&-Y zO2KP`k&tHc4I>X}p}gC6%<*5(nM%md;F^Tn96DLHyA!B=Co=uIG~6jUiOY)hg=5iL zg6UWuYYap}khvCQdCZ_bXA;Q&`39{!>_9Hg0*C1{Fm_NAE}6FxeFk-h@X~u+8+CKw z=VeGR*5NxO=IC#wN1U)X=rWJ)jXP$dDDEGn=20n%*OsDWQXX>~{gwH5egOXk7ts21 z6e~*a3gXv)L%w}Hr-_b1S=KPF>7=0$eYXsUCRJgd>7(3+~!IG5=*G zclJ7Q*#?}12_^MRQXQ<6JTTsKJ#=S+FhGRWHR4p~(%Az`hy;F{%vGnQ6hfK`9Ux<;OHr*Qaa z9gCJzHbaU?!>p7hAPM>lMWgOOM#~p4oiQBTzRiLlJuN;vtcbEV@qomFFZl8bJqEOZ z`9ork#{EILLh|13Sq~LvMuOMa_h{Eb8N9UHkaFn|)BMcBoSCc8dbEL%^X<7Z^34p$ z2_vq*nGx0pEl1M~YuL5Hgs<@32aCuDAukNzN*u`p(npK$BsZzU+J0F6p&8{{monvU z6F%eG0ZufdNvRpx0V6IJqq;qwdJ1D;(bz|r;!4@W`Q_5ph1pPX#S?>WTJS|b^H6ry zkyY$C&a9{0=EBdlLwMgU=y|#X_DaRVUHeX0xWR(&-zehkUUx^Us7DytV=x+P-k@Dg z1gaOFQHr|1P}VKHgz*P%a@L;*LhV+{!Tzg2{8J9yj*weL%NyprH|B3PYVjh!SxjP+ zARXpTj?`rb(Dq9#c<1cGnm_6(ry9p(JqL1GLp-55Rv&{?E<<3qGmwyb5WTiM0yiNO z;&Kjy-@0e8hTgpk0*NgozmM)0BEYqpI?c3q_a7s{irs+-(?+1~geDk44r}Ruh2+4| zfQ{!_a;=#2>Z`uYuH;{c>e~oqh5und=Ru|=9sn7#HBd7w3>=q>1vfiqw5Btcs#3!$ z;2u-o|E_d6dz5Ba0_)$dFNp5nVdli35_ii3#nH#;xtZJ!-@d@S*hth{L~}u6u6Neb5I?`5^kv{Rxh1$k{bH0Tpelq_)>zqKk1A)K7QB!B^^0 zt47SJKFBfuYAENUm&UxJ=q@vJAJ_gU6V&$(NL~HIz-QTY+MUE=d;e!xu$=m11MY%z z=MOx!&VY{`aE#nOKc#ioPUAN7u6)r*;yD!4P&syze^W>3jOYOQtS+e88O^*5 zX>K)52ijH;v$)6)wGYLBai#@-J*f$70uMrbSw0@gH03?+CxU931y?;!%+H_y9L%Xl z8X9XTSej4duC!Ib!*NEui%ksH?wWwDTo0^pP;;A3<$(ArvFUD%2Io5iK@nKOoG%SQ z9pd%8n_(z8SJ8X;>JwCiilbbP z^(X1|?1va&%0bH7GA_J$G5liMLcn(pPwDIOS>@X>i_W!K4X$v?Pt0$+^$25@b*IeC z7PPf&0Uz~4+^cQO^Gig0q%0Gi+P|=h;YVP^CF%oF|KH8NI|MXDb0hwx?_P)-%>AV+ zB+t<0z3pye@UqLC`>tbnsjm@lPaMp+|7a#yKUV1$KMzC)Ut-GN+c_g*&TZ@Sl4-BV zLElYdp?+v9&ba*$WpjFA`-cnA+~XBwbR!P?tR}AhU>nW;9AMO0E#B{FJoav+Zv5y- zCb4(n8Z8%KMZ!^#y~*T6$!}TFyiA~+B|l?pF^Dca2& zOea5fMt{1yyh>BnFS&^G)jy$P`2@_`&V%~wI!Mxd#tEC7!M8FKJU;)Vy|aOGYW@E` zhVQuwe4FYBD73-}ymEQ_feY(!2 zSW?!a?|bq@+@m|OH>j*OK-jMwEZVUWo9)f{uqg@9w9lLug%Xpwz6d8hC+?WbpR7an zg7|zR;Zz^7AP+BBW)F|$Ha`>d`6uGRR=x@)R=uTP=2T-vgMpA(@dwyO=nbPINWM*W>X??83;6fD{*!JRR$p83O4sltmxVhizOy`Gt#LK*hg?-m7n6L7y_tVty$M`|%p( ze3{g~$5)to;3}$PcCdE$JRlA-++}%Vh%F^LI4L>m5PXj3n zxP+Y4eTcKBg89cPAY)1tD4vO6YSaI7|LdfVP=_h?iQI@OgAtcSpqJ_wh+IJpccHt!^PccXm zOPSgR7}F4f>V4JHzV;@3$M+#1Uv@&8-=fd2v9%C_H~6D?@I?-%5QlirC)_#LnD=Wv zOC1|i{$t)AjOlKH%7-xkrvA+-2wkznczt?qLf{sT>X^^SXHqP-i_tBtktqZONe9Izr(O@)&cN%VZ{4c8VQv{ z9}@rLJZpW;p_@7fQ=3T++mNVwHc4EdA>IBZ$W1=gR6*QZVs?&FwYR6g}I^Y2=Y9{x> z26Lrmx(`76I>=e&qugPrD-3uOjfsDVQ9SHVEE-4e;B5nNYTy?b5EO^=w{>8U@By=q z^una_ER6c|Ej?Q_pxZN#c6_DGv&Vbx(<}qQbJYQEMCWffbHqTX53q&658o(L=g)~u zY*@ec_aOElx0yyC<*zKc|L06J`^&kFlp{GZ>o}^{Qb&LB6(%cU()3@iL3Z%IG*r(7 zTx9n!oVuX3-e1whvlWy2e?p(v*U;wQ76|{i3YCK^p|ZLG#kNCn=+eU|x%B{S%=f|0 z@gl+c^aXO}(KEO5Bs+U7p4_?{pzJfbnZs@ZpKr(q&Zk+)3YteA_TX^LecV_Yip8~M z5Ryi&ywEZTPw@rZc^o}@8}Ke`ACSlP7-ZOA<0OGqQd!{|h&*Nmjk4|-JlL0W(~gDi z3tFkKJ%fGRM)TC)rdTm4pG7#B^4XusA9K%~?-)uQ?CWJLC}|eDzNo>3#*YwXnu!HD zw=v-=?HDG~&a>nNb9h8|Vc$fw8XFAXW3=gaR0PoT3YhAyL8;*{(DbG5RM{-0dW;3T zNzQJicQJOvEW!s9wS=u2V_p$rO>B!(%xlOK^kug&e)@c9S#}!tZ8PAvRCf{VcB}>G zCqJQc*AH;{5DYuNDKPEER9s@9$J-=+!r)%btj8l=!Cto&i-`B?d~Pm`Q5=Qzm#3iL zKT#S(mw0#IEAT8Nc0hGsY&E^j4ZeE_!V3eiM4#?~XU0LA%TQDrWn<9p z85sIsB{3(fm9=H$5N`esyN_sLv%52@uh}S_G*dwmJXBdx{(;M%IS|yhZ^PL8SHTpv zLGOXOya+QUssv?VTb$ta3vBz2#7E>hADnyxWucQ$aoavO z_V!xZPv>W=9GYR<-Z!j#b_diRnFtXPHR!NA4kg>mnIrWI^DiXAQ!69EVc%m6oqvJ+ zd!_U&y@|jJP+S>?8_t^XHA6nbdEzKET0R6A_!n&PrMt?UB2<{J;9@-IK{Fw4_3c`htRF2f_mm+cF?Y?VA(N&Q;=&=0R_^) zElTq8>{Hs^bH|ToqCx4{4639&F6C+@YW%CX=XVSR{Tqpt4|z@fDVo{Me9aBKM(m<7 zHmHuW2gTe1<p8bMg zZ2hUtx0V|49=qBg^2KW`mdBx^B$AvwpK-M)6=KSJq3p(;+{R5WLB1n-LY~TmcaC_6 z^@<;?pl}R&JnDq4?eq*5m2uipDIkv52Zt6T%vch_9D2<}V+HZmBT68{pb@H?iH&r7 z$pqP>F&NXkA2gTCiA&On)@}R1i)L*V#${|o#zFY9|2fw6Z9%tPXNU)OjVZo&V56%6 zY)dc_L}UKUwQPLKsc%)#+`ACcCjA3mtBrX}{dthmkcHvTPO|Xf37lli9P%PYv+(nW zP@d9OzVq+niV+t; z<@c0T8l8lUK2O*d;uclJyn#ErC{tc;47$g2an4{fUfrA6r1x$!zaQqjqRTL@W{wVj zC?9>`_ z!FER*sOTP0p=sml@7^QUe-xKr-W7cfmZ9Y5ZB&I>V$r_C7;-BNHQk4>-a2%SG#BIQ z_E_{({)Lto%^{^e7ON`BxiI4$C;67AoS}UPtJ}lTA@VM4j8i~ppCZ^g%2Y_*)urty?ezMJM`euJ39gX6eb@(`*nyTXJ+U(xgOX6Ac(FeD1P7_;~>SOhg= z-m7L4hVv`ghhjP;Wrqx^y~hW^mQqLYo3MWcJ?%A-)yw;B71&H4NPi3F>~M!dm| zPw1IN4vllR&^%@#o%K2~s_Y)L)}Mi__cZ_bZiL0;XA!0Ur_8q0fp;j1k8wv2BeI?w_c5g)PO?Kf0>H0BE_$EwRFqqW}v z%68~7#W4qZF7*J{4Lz{Hd-E9iub*?CzIu5SC zP5_U8GO_oH1E@H=nOXnLW1c%LK%97sD@fi1o*R3y=Z_48DsKyr?;5Ezvla`g>~xeR z__14U7W^y95u2|20r$_F@V-|>yyW6*seH#sh`Q1WfRNN>nbrh)j+6QRgRJs zmsmsH18fF8K09&>s)c1-=ym#gO0VOvS-N~qq=s#I--HtJ8}3`XF|R2J;X(o;;rwC? zKA_JKOglUm4K&};^v7)UwVsCRZWEQ+Q^qsve^NPqpIBJ+elJ)KmvC=m4Fp?veeyG1 zAV*<1ml3Cfx(~d;_;(ac-l7Yt9*ilj^_f^7LZ6BAG1mR}MRJzvvoFqNP|@-LpZ72k z8Xu1Xo1bFJo#>%__6p9};|n@1pu4eR4~ULkp6L7D9wz^B0yQfVpr8+RZcklUG~bQqWnjhAPEBW0}pE?vcrr=2M8)vSzR_aJn2 zAUNwKgV%V(n8L55B#%jy40D!cv%c&}ay-c5@k#&aY( zxVximYAx&3eFoLW3RtMt5gaDYN5xBbEF@-BXx&Eiv?@Rc=_zJgUd>!r{{ty=Z$M@J zYpDAx6g^WKh`DhJGy@ND6${5=1@+$Cj%#q2Hf3iPbm8FfA8<-T{fS!zAfIju_|pOc z=xiI_G!~Zcr2JOF4|Li22GT?~@g4hwX}=d^d;yQ~)y-Vca$CsTsVxkAVTUQl%$Zw1 zS9A%efV8#sFx2}#?hK~gOYwg2FW5{j){9vF_!YR7c3_IS8}S9hl*ttayl35W7T>E3 z(ds01u~T92nfuWFeLO@zy8yoB+aTjp2vfAha9W#(v$m0qtUi7>^gctp)R9&&VV;hV zI_Mp7Yko>?&Yi^pPstCj^OXC4U(+SH1|m&%pd|dLGIEg_zx-zxA^Vp;UvGDx*e~zt zPHYLDXa50*Q-e6-pfHtc0Y-c{hZR>1A?5BE7E}5F-L}y=CiNW4KBnMQ{2uDNWr6ni zCdze4Ifs@W%-z0{I8YLFjhheQ)AN}3tQ}b5`33TNHlVEgFjhZ@a+?h|QT^r?h(D{j ziX;QH{%b3C|9KZbtkLIphTeee7P>>PrrSXrfF@$nDRT2UQ~u5hp&O^?KITBti-;Q4LO_n9yP+NaXe(;t0=K0@8Qwrex3SV~KAkN8qAAZl zcM-~VJOsN_ldz-E0i4F)=9));#>SCD(Nw-2@Gx^%W+KkWJr9H8 zOoUZ3GvV&4CJ-&V!t7oQ0R2l9sI_%3WDnOTZ^k;<-|H>f-qnF3OY)0%%s~TNI={Hs zac-B%v1oddlbbm)&tA)+gYJ3hqn<(p^)ox=sA+e;}kk?H!zEk>f&%Rjch_#5Xg9-fggA z-5zvXb_CVa2f?b72XNlsXE6Bj4KDIrF32Zbp;>Moh@LJ+k3QePC5}2FGpf*fAUWbF z6F8!Y?oz|!xPcxHAS(0Z6gGCbKhs6Rs>SzEyj8^{f6ZWW{j;1rY%Iq3g+V{NRJcrh z_SF^CUoDq`%iR!o_3kKs{F#bLp>tr-s9GpmxgXO+d!Tc51c>@JGhH7oUNtTdV+J3D z6#p??_UXRd6Y4t0PH4eqy|-YMFaiT_+{cP8?jReU4z;6xfPUc_2t4}<0-T6@l1eU< zd)K7avV{x=PltHG3SRvv=LUs)&;PXdd(4LJElFnDz}6#7!fy`}ae z7X9svUIXr;{@obRm@VW&ZqZ%gax1zA+o70!;Hq9eMGanqlmj`~ytFsUZW^-Vu3dzT z#R}Ba?%@2FAH#m4STtWO62hY^Qz7nV(pDu?@VpXMw$2J@FJN5BX;%-OrEa#ETYy=axhe<; zcG^%VQ`3ARX9$egfhLM6WtH?MawN$Iqidne8=)0pcualylkY?1+*VY4=!+Wu zH#TIX!j?u2+Uc7LQG=@>;FJi}yS_+M&R*v<({o^k$yxZ~eh~(&(MSIYcfs;of7WqD zMQjc++gVV7>a-awXm}v5-)_kF)=$TGYu+MfK>6i!$FXBFW&0(80Q_7O*{tOxXP+y_ zI7Oi7^jK!89>c7XGC{LuBxMYFX~Lk}n0GlFGDuLDNfsc8kx@GUNrO45stp`DCykA`(L9>G73%6+_C6!H!F)2=&3{6fH3QN=yy&b$Ifd9zTZWjkcc^>|GB-A(Y(VQHq}^+1;O|^m5?_ z_W2LW!p((#33Qg%wFcPn6XG7Xf!5_n&SBMf=G&(``Yg!C&xh!9SveG(Tx76-93#Q{ z)v)LTIadta=kZ5*sDV?R zX8fYaeGne{ncbOdDm2gSi!;i!gp}xi+4=Mka66yOX`Uy*gp~CxAewl;8+PNo`ZL(7 z^1uz&W_+A>1IA0Av0JZAg!rglFk#RZ)^m&}b(9xU?_?<~2sIZvjdVAWy-y>kVKFQu+s^b2Jm?O0lqiipJ?Ewv3^hn_+# zSRG0vAIoKw4Zq38QXkhxtYpoH3b0{$Ii#i0?&ZNlP??_N+$N1fx7ugq4&4FG(VfJ4 zDu>Uxr$Fn1K65zpfcX~RL8rnOT&=}N^vK)?Q^WQWd%P2^j!`}*VH*^zy~~N(esfmO z<59iJgB#Y@Tqt{}CCrJZUd%>1i;eciV)Z>t*{~GtV(*gUK!IfyzsT=nj9II$qgQbi z_A7ae9cvq)W5aCnR{ue>A`>q7kEP(&QqD5!o7t;(N2q)JSScR80aWTOEPL)lsmsRi zlp||IN!tR>^6eH#p{{4#*oQc5@kg|b{sb##4g%Z9hEOqjJLV6F$C~xju`4lSol6>_ z%r5w zKjbWYFSTD0kMf-+oONIrv7(Q&=9yPO@pdMcZJdaa2d%(InuWmwbKtS95&z+dp-@F0 z)iuRU!1hqb_~Kf~zW)qra@sMt|42@KvyI8eH%e`9>7uD}B5wL|3+9ox)_Tr5EW36c z-P(gtF)4-l_I`+U3rzXlN6AGWR;*OCrQ}*^SE3(r6cXF?`G!MR@VZwf7WT$$aDtuarbApH@G85|FiF2k`u zu`z?*$DmrSltx?M!Z5pZP^7tV8n;Bq^Q3!m=_2sa>wxSogUE444#j20TugHmobMWq zwMXp126Iuo>k@nTSetiiI>?Fk-c$wz?njC9287eoAhdKKrs+LEvuWCbYUVj6TVRqi zq_+wG`Nt{5am!%St2dDCY=hpGj$lyOiR$5&%%ZfFdSrG?Ug!aV?S&xkaf>vT^82e%ORhocrhPeHwP@!wc;IRb0%-hjY;KQZG_5PbY}96h5Ka*~cZ;;zSVv5!Ba zK>oa7sqG`vM2T+o&OuW7RcZoql%*FLT(dTa;XtQ4n2hA;n+A4b2=e0u;-Q7g7oOHmg zlMpYx51|VdVrNg<3#8UUWyw8^Y5S90mS4Fv;>F5Go}b{~Fcph?cjZf>a>3N=Uu+rs z10|cgg1T=B6wTD)!`B6XYc}nnPXxm5&E`-yG#4Ff$el2L0eLk=e9Fb!TxeA{${1%7 zzh5r(F`>J1K@@Cy)`*1%KY-{|Gx%q2#f~YN;B?_4*E4bzNIzV}yp|ArHrJ zc^DS{^Bu%HZg8@#Dms4^!XkrEP_?viMKSBpf$~S%l$p`?ApZWplR)EF&lJAC(&m(V z$g z%7w&UXkmeK8FY}##Aj(Kd+NOoR5O!7)!3VPc5z~D^J#~8R2#ICM<~5g?nBcuBVO%c z!FlUR(X#&$PUO0n>)%bAS2$k)hatl_TSwZRuM9=G??tH}@v_vGMqKA+9e&<0$`|%e zM{k{6XgQV-cc&9$G~*rXc%y^AwrcSeN6ldR8p`o4B8SyZPns>}V3g-=+&C*4I(x-~ z_)Z$rcp1Wy6C26dz6V0Hj)BQ@1HQ0F3HZ;^!SJ<%pyb*i=NGx$N zI+>+aDYNlx0QHmCxx4$GXB8VFm~T)nWQFRYTY(ceq=mD7r=CIN)i2l<@ehXA2SO|{ zV<%rsrTq3oDB8G-AnA5+tTxH@<9I<#g$QFd<)c?=}$5rIsIJkg?PF@D#zYb#e`VLG>_Ck*d z2&3+kJ7xsA0mhfJYRe<2@$lm!#vTCown~u9{w3XAFbF+kma>c<_wdYEEy1gX`Xon< zd9|M1#07`Mg7}IS-}EZO zypz#-aLG)?O3__7KlK?}4v4_|&3ADuor9Fak|8bY9HcY_QNF7{*-?5JYrL<3XJBt` zV5ms*XIC+&uU|OjmCV|AD*B) zx-SMV_<{~wvN3977D|@QmWrqMg>ANaLe-Ho=)WQxtgqkW=8wuJZ)c}cm3oZRdNr8y zYrTbCbq$5~LD%uo=&pRm*2_#Q=?wXDf}ws`4GZ9Jah<&yz}?PB@NRjBF=Zz~lsTVs z9-WMNJt!mc;vJ`ZdLM+ox&)Fr<;>S@0cI~slUD5CO}-?`U%5oW7CQ!t#xrcjVkP$6 zwh8^;^uc;dTWnqIgi$N&@MyoTe4NKASfL>f&0h~OYyVRexo;uHRhY7O-V3mwaUBE4 zy@%|3sY>e@awI|(wqL8kmx zfoPO1?4f*PKb;n+S*jy=+%)0U>$|b=hqLg%o~A-Z*mbB_6bL@4$PH|qjta*=nCU~} z0}ngOxh@<7Nsj(7!uL3iPS6v&M=MYrSu(MbdRi5#57_v{8C;i5N4ufPIP`KFL}XAm z$Nmr-=BmaH%4oJ6y^ezu=uD8WEr=U0!)QHie*Y3Pq49DW?Sf9Bct$7rbN=PRXPx7M z{FmWRjUm6};V*OzIgAE(UZcbFa%L4w-afAqkp5eQwPT!6IOQyt*+fz$%*Vzg35%vfFc{ zn&CO%`O*{yc6|uy>pzs5n{)8hQZX;q(DUNSb2u_9998FbLQL=3U_d#I{8j^AY+A^L z>eizoz5rJ&KZB8>?@$ypN11W+AY6|u2ZtgLPV1{asBQHpijrECXNK$Z!9$mzh|5z} zbh*gn-mN(Q!E>6M_FU-9awD@jPhxVm}u`b)D5dc`P~0lcG9JZK9_E? zz|)^mni7o>Kij~zf#$t=$GM?~Vc==E5GPM357sGziOq3hA!)!1kX29RTxYFB+g>t= zTXYl3f0w~j%JmK2bPdDjxIx0oPY^V2IHok`!I$F8Al+X~ef~(+C_atO)K8LF_+rry zGv503QDVwmlcv3OMukiXC&LW*j{LV+5j_$&jqDuHG4KZ{oxPyiQnU6 z^@}>FTR<~Kg25sUOmN!?U3~NftAC3i{rz_c`f~(Scza{GS0)r}&g5F=9Y)t)eaRzo z068Z0Repk0X(P<9pls-b&yYyIEa#QOAm7K3 zcXWG;G0Tnl^#5XE{=3^ywax`?cho_AU@&LAHXSM!{sq>GMd+c-p*+(;ShTDO)M5Rx zsK;69gwk{e8k7D;cmU&oS%JHp~{#+y^$F@tT$nk zjm%MseRUI%;+1x<$zHU0aW!axhzP(+ldU=O+ zZaaZb*M0)utv>kCid^^NJIwah3RDEOVEE}GrZ_AXY(IZwudImIn?4W3%fcYXaGTPh zunRdsCSi0yG(=s$hQ>?3LAbsGKhH<;2|Mszz6@h)g3!`(Dg%om z@S^PTT|a%^q23n#XRL;RK+1Mk^@956RLN zI9)>iiBGxsXS|4a&FKPh(+{AH7v*#x9RzRdby#q&j&o?R;vA0Lz=Q>{XzG~-(zW#d zoU<2o-<}26KW#AJ;65~;tuC0u^OOrNb0(O9pLhIa5#3G7+OD}E;AeD`@3L%zx)_8)XF?);n{aJC>r>_Ti}RJTURJqqdN>{$I+PepO0Dsc2C40p}WLnwXr-fe~C@9X(5mota1c8wl8INLeF0Ja59V za%c&P-x&+yJq%jAUqPR1SBXb{9TJ1IaOcxUAo($4;)wR;pxC#M^Xq#W6bUm>w7*l? z)MUoX!|5L7mW->_^geT)rwlfq465{jT#Vg)NO*n=?DOA4?$zL(9LW8b6btP9GCD+&-42k#x*F8^C_k1GDZZ;$y zxE`G)wqo=naUo@)*je|;qR;=wyy>gmHV)Ab^?T`evljR8|JP2A4O*#5aafT z@ut0L7qWDt%itziLYeb@DakU(lCEv+qMM8*gxgK2EJ+iUtmC#JL|hGhrur*(C!!G$;5Y1i7;)P-_)DbWse~j?0tpU=_yDmUkIm%d}Q99 z45)tJ31St?;D`$7ipUa#(=02bI-)#%lyy68+>9wEjD@vs8g;hMqR*CFF`pe0ov@x_ z?YjiJEYg~LENe%nJtv{qLyB#48|ZdvJy;&F=E`()KvU65mi%O#ivj&;ja~^2gWnnpVC9pZ5e^|9(nk%Z>S@rTd9!jFYlqLK!aTz5&A0<+y6&LwIpd z$jQYmv{i@o%M5;xJ7d4&r8p~Io^Fg=?_Y)H&rFkZGUsh3Yawk1>t6VL4zim!LezCJ zihe|hJI*$MMNh^{TH!+cg9Lp1F2;D!FlO6eYc7-Br^K0U%)5RU-2Xq5WP4EaxQAH& zhaRt4|4y7Vk6@@z6bh8vVEgUQnDsgzoCD*b=;~n{`Sd+(f3+3!%TE*Uazm7#Nu{dG zvoLA^n>BcFpKI{a;F z%+(CKOtf`xiQG=^whijDR+%Z?I%v^$2_Wxvp7b341LTZ zsCwQdmTe2h3Tq=i%wr$;Oy7V?m){7z{{+vm?7N_M6`e%Pi=mr{io-c1{nH)zSg6bC zv=3G~ujlZO(bin?k4dOrS3v5WJ1|J+CCKy(>54!7Q1jn;qN?nN(iGMQ(lZCc_L(uR z=O(eHMn&a}EvAVyA{n{Yu)+5*R$q-`4BxGIGs=i}jA7^QqAw`FeM@XapJJtZE6l29 zn%e31adxpMXl*I;wpISdVBQ8VPG#?mqccGEdklN0l;D5togv-J?s@l=6B&>ClDZ$2umQ;KVTTw{Pa0pYLl@n0OsrN0gz8dF-usF~8j)Lq24)fMsy~NWJwh zP&nr+g9Lh9>Zo&AyCws|ma;waybfo;<_GfK`{;eEa44H*#<%zSfb%gDWW9XY-N}kJ z6&!`V@;DUL`Ju%SOBloUwVeYiz;8knUOT79#TDuD(iKOrWa$$Sy3K-yjTP|wmwlVo{(g|LB^Xpq?h z1=HLhd9@yvKi`D{pBYqE|5)stTTXq-PtzHVKOsY>jP=K5pwYc&Y@hZAkG75AQ|$=z zeXLQtWG+nd5OB?6Jx)7e24nw-$o(`nx7}AmzZmHAUXA;iH^q!U5^l*U_q~F_4q2@4 zTbFZCFU7seG|2gb`Fy(nt!#4h#ORGZP(!1|9=TO0k{WQEEzLO>H$b4!x;LCiT9Pe_cDg| z03QftRv9bkN?J zfm=Na8Hdk`N++@5{+eBA+@l>_SjJ_f?*mAiS~N83w62yMB6X+EFu z;vf~m*%z30;|jX$2*W7{67kuDXh>h23`={OaxNt|K@MT#BzLZmvKev+{dowbuZlsX zTu5XO)5QkyLveP99@nVPJei6PDoAgk?v@cKe|C+Cre+eM?{ess$^4mT%s??KomTx@ z4#L}$SWiV{o=V%!?q(;zZ!61FHk<+BC=YpSFW}^G+b7Btd<@OqPY4PUHV! zJ*Ri?i0kF^aLzy#^M!W6-}DtYF?Q?Yau`rr40zMn5MR{uwlTeg(>X){FOMrF1$_ae z6Mo|hA4ATcv84r!Eofs`K{M9;Kl7(1t``rIRi{`U=j;!f`{yU6jeH=D&YJ*z`l_(? z&te*|GY6bl=1DIl8S70>p-K^g>Cuho=p2g?_evqmD2=9b#=L6dYr5juHO2+1A)3&* zaoP=RX4U8l!pbw?_wfY9RhuB^OD$AS%7ZE>$E4aX;MN?7I__q?Cc9DW$Y!4AM!LM@ z!%ABG!v4WApK!>NtwhRo3Y;9}nh)931Q zwiZJ{RGT7}Y4o{>Zd&G_2ov{NU4pe{B@k_FjRuZ^=v~KLy-MmKF|P-Kii9jVL-}BaGG-( zqkDLvt8WF09NWa26L;vjtGaxF=U0+FiTMl@DeU?to2quM$y8vhf%dBnyA~CL;kOmRN-if&n!~l`v6@&Vf!AC8Svou zJ)OXZF&JgQF^1U}n!KwUFu9#BP?_)pPP6m4pp_Qf{RXOa=SgZu9E=`g!nKcUMXgx? zIuA)9ea2jc_S^#)^86m&oc#Y8)Oi$K{w!X$(}I&&vH8uf0$S63Al~nO8%D3O=2iQ* z;J*{hxe*?EoNrqcIBaM4eal?>EnUcKUbGV2I0J*S?ty5-X%e?j$m^(&VX%okrmbFp z3Govc2YVRih|YkhFCBg7c4Eitoft6U5lo&T;L~%zfo;QCh@RU@JR4dezs!Kk zw-BP|Z*&Tq-X6#uC<45m9_dk;9BAp~GW!jT>#BJx=e{?J)B2Tjp0vhl6AF_@ZB8 zc+UFf85;%$WE@9HOK-ZhPYqU0WLfpu!&nBkmh?$X2C$Xl_HH{_?#`2WQclB|Uy;}` zsu=uE1fWOXQo_F!@(cd9;^X#3Vt7ae-C2ASUA~)Q(jH@Qj{8iz>?1JOX&Xj$T*Vb- z3g}oIf{~A{ICnO)m{N5Ff>kWfFWn6pD_dYvfeyFg-9M1weg(H%f1wVQW0=?Z1bXB- zVFSz0OJ>$9>+ZK>)OCG6#bzwX4@QaQV=gG8OvYe*k9w%seSl>^84t?pCRpg6g{0e+ z*$?))Vp7?R7_ zj$5F7l@wBof0JkbC8MbK1*LT6RI0c6D&z_qn0~$pMxMU~Nxl-4KCmYpH;b??u?~FF z&2gzuC&VsgS@7RS81t58w$|u!9p49GtWh_J+B63}SCpW((t#@Ex*&gKL^IqDkS7Kf zyv>%o;HZ5Lg80uQ;&>v;7bx;pzt!VQCcT6Trh#2@tb?fZC@^Rq2pjvQK^Q%N+2^NY znXUoY*>sUxorc`Xs97~`dk7?+bh85 z-3XQmbcW)a`{8OYYqpk-Lhv$Ybo|neQ%0I_B44di_DlqWz6!Vn12u5kGMaU<%!KX2 z57dhBMZeC`@g= zjpc{021C-tU^Z{g0r~jlxa#ON*4Ox(+-_mL&jZ$A)2zdIE0=ZP4Ac@>A9Hy5RxfDV)kqo`M9X%}8*oxQ~o6Fft4vsK*<_&=3n?)hmG7z zc(KZe@siJiviJzg3svD(J`4@k>0q43lD7-UV&8*VV54>dZSHpVb0)D4AE2=+UrA?K2xK&avRG(&cwJq7g1na&wNhlxZ0M@^^8>@vE7Va-fzM7 zNF?rM9GDZ$6un2Rhvw8zC`nW1H87u}_L?!+pQX4~F9W0=#l*pT8Pxo(%cTYmr8drQ zpfbjYe{O2TWz4kTRpR$l{^u8H?EDStAag#$XcZWYW4SL?3ZA2-P;B%8ni?FS>5~G3 zna4Hfqap8hdI!iOv+_LlbV7HgM`y^QAJryCN=))?<3odFs_& zkC(XJr=f|)oYC)e%uUcjcJ3H-UULg-8rmRk>q#j3@PTo?nUAxo9z>HT5z+e!)&rp- z=dQGa<76e&jrk4I%1&ZXdl}_(w$i5iC3yZl%Q4=#0oor6aE>w^L&vnDYJ>$ApZN!) z=O4tPH&qzC$sDT3XG6TzH>_w-LE-#|n9*Y@+I$~KLp^z%YGus%&Ps*mwTyF7JROp! zTcOs!i%3+xXqk8_<0O?}gzq67P@=#AZ#r;OQ8W9@*)St81@u}E!i&8od{y^c=;gf< zR5mB>&;gqhyU5|(XXv(x%}+Xag4&;*r8gF%=!pVk+h-7idw;Rq z-+E>Evzhed7$Z(?^?-3)CPC|tp`bplgocIj*xc8eOOt-b<<3W-^mYtHeX&CqXLppn z`CB|=cRhwz*OH!kI@}N+A?I3li^vcC3+}TRue{$S)XeE1saHKvHPnN8JO2x+&I?p@ z;VGTsdKemCKSKG!Q6wRsbwL_GhML)zQALN+r369HHBl^g9Yc#0Wvri+fFd!CxSn(X z?TgK7)2x~SMWGsXc79oP?EC3JV=Mp9RmRPdGrD!|n?P+{KWqpwlBt zTy=s&b@DLO=@S7-UB@9~NFm1j7moe67+`8tI|eREN7aEQ3^e`>(xR^T;GV=y+HeO2&wzyGmHuU2-@4z;oO2^oNsau zJjIzPafwhWW2|}2pgVbmD{5Hpa}FJppwA~ohvJBV$3gn!CG&m{rwae`;*OUe@!Zjy zDEiNW4}L7i)Ty0?SH_M_M9JY_C>b?RDf77^mbJeY56#x$kJ%fu`BDW2 zb(Ue$u8XV#SdKn5%aFxR(9Y@q+-P?qQ@e@%nOC=PY!1kbevb<~$?{**s>q64jEg$` z6)AQwfqaKV($T+#bpmY#J0w0+!&Jz58}^2+QEHUgYm{2GE}!1~033R%FsW_?r1m|73odM1#x zdt=bAuL;*-coppq#nSBWUszWn%O%fwNZjVLOhfh#BKpGwf~>ni`u0-XzNM4;%vEDm z@l*`%_nZVzI!rXN^Od4WauDV1z(s%ZY##WFiheG_5 zOLHxG?Ymo;Vb1&n>387CcOiGn`!SUHnWO4KC?w4&rZ$b8D80Zw*ZwcA7M22TZF4Rp zEQ|ro^DpAXZsvTiHUr)xv5Ivq@4-^{Y*g7 z%>z4*u0x|O_idWnNLv|Cg{5_Yry-#`;_?6OuI`^KD;A9tcI4v8@P?s@DgVCS7!6HIxt4q6sj z@jE{iK+>o4@cM@V-~L}3cx%fcB>o2WXWhwu$E!iYG6&JkdEi}rA8MYbqQl)F3^tA- z(P1a(hrK3T%7iaaQyEVj7JtCEPt5s>akp{j&aa?N(jl64r?74EDu{b+!X>@?K`Op} z!D#Dr6t>mVF7*#ke9uvqf7${WYgXcl;z<}=u?Iw(FDTO&r(v(S9~iJ}5h{XM_TOPI ztb8HlE>3xhpzMZz7mh-++j9u3HmAY?og`Xyn2P>+NhNcXRGA>;1$(c;s(WS3({_lc zS^jp)o7)(0(img5WT5kSQ*Q5Z1v(p_2fLo@@bAyYoP1^%^T%g_b3F6Y1l>`>f@X|R zGv00bJV=f0j!AZ1G^Fe-Hi_g=WUs@eY7UU1J!&X^;ty%ld@xLFN+LeJg zMkibGHrbC*Q#ghcyIbOjbe1QZIR@_T*XPQn%|xGRA&f291B1K`dF=Hc)UwQMbHg>% z8Idd=Gq4!L2T52b{9t9ut+lYTZyCymrlaheC8x30U{vo-D7f!L~}be#40_%)%~h;j__E5%Cdm)PKc8?Q~Xy?XO=6e}322McPemAjd?rhqS-34PL>^ZiD!(%p6 zTRX~t6CP%r7flW%LLCh^XBqPzHH`mUJRYvG-gcesawQ5wAZAS^RW?uV$UqeIe?W`x>7n!3IvF@dZ)tjAlNXhG4Djnq4I88VL+Zg*G*@jdfEzAK-Kbef3dkK^hgwwzd~q=1WY;XXU}FP*HjTyk-Al1~=x2!S>J1ILUAUb+6Q7j|qRmdD zRx48=g!L|W49o;kqY2lIu=Dg_N62x^Ku6(X5FY7=ttB(j>8~p=a@#|U3N&M!(S_)2 zszmM6(_q(i4ur8sF~;N|dYflqYtkzy`<{nwu3NxSRtbSa_u{QoimK5Tv}Sq)W^}y) z$DeC(l!qbjd}SQ82X$htn;g3GSl-z31v_6SpmTf+o!>YIidTPT=iALBH&chtZQLdn zx+XF2kPPT-tI$WnI&pM|5z&;-;!?+baA<}(-&)=udd*{;!^3SLYx+TNDfHML z{G1H_LyueAT8_RSj5*uJ9^m@amKuC!JnaU?Kq~BuHqJXBAq!D*@HNFg6CtIJeg1*V zP*ojII-E~HW&2l@&EFs{c*pWMIp>Kzd(XIRcnQHR0&rrpI0*#M#QR+!5BsEi!I(ic zeY->R+gBL2D49Ap`=gT!>&UTZ9yF6%MC;i_WFh-t^h1^hWSVB~h7cm(u?AlJHsxU6 zR-F2OrrG_y?7URMy!={m7~@*r6c}(jyD>)SLrPqx+v9vTBPclffXEx3knn&86!l;} z)Mv}l{(B}wd0ha_@vCC@Iw7k2Uxl=#si0raI>k23C;Re0gJ{A>BE0&Fwy(Vniv81+ z9hG}gG|G?e8_4_~y^qqu)N>d#I~)V7bh(<9v)*AlTd@mKd9e%RXB0K@rAi zbI%(j{_B*kESq9hUjpH?8)@339+tuo;E4q^Mv zP$j^fXy(hfAU2RmQ4`7X>iz;M={}3P`0Mae|E)y$g^(42jf@=sk(~m%m=a=&1^PXKu#lb}l8N+6ABvt%U(6 zOHhzKk2Kkz#CN3!F+;r#&31kVgI^w)v@wk;_RfJRHL=VW_7(L;sBrT*#t}Ge1;IU8 zm+7)5sx##r1aB6@i8XH_=^NviCOx52mSrr`#Dn}zEE!>Z7JGRIvW&J;?6paQc77*N z_3Qzp9sUPx=4GO{=@5`IX82B>{jl>{4XW}yA-KYo&Ary3P3Uf-Jw2ZuyJyZ9PqqiC zJdV!i7UJ1e`dmtA1KUH)xorF4sC;X$GIg8Y)@bmwE-U&VxS3 zS)W)D=&ja(P-itR)uf^zU7Z)mGK_7`+hMQnHSqJQppqq4YzDIcG!|x@N9!~;uVT!f z#EI1VCS$BsY$1RDVa=r%)u2J3fN%O|I^)`Xpw4HnfS2iK*qF~eW2Jf2X>$wM`r4q4 z)mjX09E?qIyFs{Ah&m(VNYuYSVXYtQ&FoPOX4P5D4>AheSF^09#dj2q-a_|`HR4!q zU%7PNA&lDBjyBsoQ9SY(EWN5l#nMwWqt`;#O^^yHYo#b;IhfQ7Ku>UtrTOCtNs?}d z$8M!)_Aw8F?VQnC{uSE4M8R1b0dMnApUfO2g zni2o!Og(-<_!r1YV0+4`o46v8`CS#=pz6{l3?0yb4*y+4_u328xTO`;y<0%Gkui&{ z!!b0c44MMxfOLB{ruz0Ju0@X_*Zw<3MLa@pp()t?&6qvRyIs?EfvlX(p63#_md7t( zxu8an$i5&>`UV;|Gvd;$&$A;pNW=wsG0BYUw^2>!%<6hWRAY-u>Z1X{O>_jRQ~$rtPXHHc$ZOtz%u8LUMu(m^AZ%N2 z;`j9+lvzH;%1p*Hanw?)niy~!q#>3MEO`6r4cK+$4c6@FVxPYov}^S^g<=5_@RKpK za5qSHuOUqnt=OK&`sTjhLCLf~&{^+3J>2vX%goDA(S08loScZ{5#yn**b8B`dUVmn z3dY(t<|2gVT!WtyQ}3-H(mRHvCg(rwzulUfZx;dbb8X_JDFab@axF;nDILTKI8}No zU7A;lNt^GY*Q7qs(eD+ArgFrtzL_50k_Rr!e_`dte3qYn3IUd*vDfdNAphA(|2ANJ zuJaqHyknPAzGjcuV*VhIy@?UGdYz^6ONqp(I1@VNy+dVhW8S5q5%oS6gGa|r@H9Vz zZu5g-YxY;xBO#}*KfKUSd4x#Lrz$gbg#6(21n_gYN`x5%+fyw0-zSA!uXzFJZM>NA z;$Ne5NKYawu!QAy`e6G#9sI|#d+kXRWo&a_h%xyAUONDPTeA0aZZ>%2UqOq9M*MC^ zQ%*g13djzf7fYS1sPx$ps+1nalpA*-{bDUfC-s0(Jrzh_I?=+!LL5{paSQ2QOSa5U+c4al=BXmiaW*R~RyqRv3@zI|jYW7wy9cVJTH7aG%@`4@u*L9k&7^FiFt z>$CASV?S7qE9_y#l|@)_P5j>|3n?L4yFP)o;T=6;Qi-mtd&_}ow?5+?Sl{wnaQ+%Y zrEb$1S92{1jxglqk@LX$_bqg^2?3Y7B$(25obhLtVYe`Iz9jw?rcOVEuI{$D$v+2G zQ*VR((=GJ+ejlO=o)SLMv#~PB3dxy}2eZ2*;tlxIW3%Ts5yKIi|khnkj1H#96qho4ch@KXO z9X~=~(XLW(>84>lWvq4~R44=PW6N zYcf49$WY2=-Y|J))MYF`ih&e{=`TM}d7yB``JL5bY zdm^s!Zl+0=E%1yt=W}aAh(@_VdAikzD>i(C&hig5UnN9oKI2aej;GnPm~SiO3>Gb9 zojDHYAa%|&kZ7ieqp#*e#jsC|9a;cIZc8DWdqw;*PLf&|0k0mi2~Yne;H;nBMqAHO zXg72ShGrc=g=N0D_2XnX`%b`FjJ(6v%2P7ma0q&I+=nTzSO#zT4{9(w2+j6>LDA0) zXkJl(r`NoP!kT;tVZIb?$P~uLZ$dx!qa=ZKuE|7a$%je0oMy>ruo>2mEN(aDBoR3@ zqx)BkYVL)xe|CeQEz_YZEe%A5cf|7DIbyXb^YXvxfK4O5Fbmuj6m~^pbILv#Q}r6k z3|t{td6+hf7;B=(F&OdAk{3;g#dA8W^JjbkjjeeOQT?w#`bQPspM8~eh`C_FX(ueH zs|L~B;WXo15sdgxhi~278?ugW=L*QLO$NLE z52<&NJ({01<`uJMk`SYNnElX-ogb#qtfV?9NpA;V#xe|=@Dp0g@~|@REcTeS7v+ar zl)!X_t-~Kf(ULc4KcfvtS}`uq96v1E9Rr;?R{Xw31J1Ve1@u2)4i$q-p<}{M6vY?Q z=QGW?h>cC)*YXlpr5}X(xe`o_4nz=_g6t{B=FMk`#>$#B4>|^ZH}_IlZNa%-wc;Kp z9tJ_&zqslU>qqpzPYQ3gLh48@Xj!Ln?$)*7wdESReXWE3T3Ww^L48D;uHi0Q@{fxD1+bhV;!^e+g{WqoNgmr$SjND}pt<%b7dz%mC1)SR#t zt94&v0?Qfa-#myzHrAn?zZ$nrd4s;bT9jS-rrh)(4a4Pgpry%_v-#cwcFuYP=YF&I z_`nD3-g!=(RJIA^c7?R)Vg|T8a)R(tv#5bb6r{$l!Puk?Q1Rq7+DCuHlu-&4weAql z9InTC1^BW)g>PW&RK>Kj$*^FSIWPHokBGJ&rP;$IjN``E*bEJ1NKCo$T{pocAQ;NB z_Mx)yI#&P1G>^=2L~%VzDau+2GN>fZn<9x`=RWX$SBmb_7?&$B0_H4u2#w#GK_@bS z+$zh38jH6ypw|G#S>8b8i%wymd1_E}A5LWv-?5=Ui{YylQP*mBYID?(=zNaEj7x9H z0Cfu3?EL_m;&58it%e-(HsYc;_W}VIN`#J4IAzUI3{H&1i*Lfw-aQ3`D+5@ba-1@< z+<+U!x{yTEGl=i+&*1k@IHWa*F?G*QbeO#iTtj-Zu1q(q@JK^h^=I*lu^%Bg^DzO%oYwnGT;pf85>ieqzVKcZ97j}!*JXT3#5AlgLA#ivUxa|o+!QZG%c{C3qn7>kS@tSy9fC(=amtm{sPgKPogp6r%B>8|5 z$b4JC=_A;oi7Hh(8#~ekZ3%GCL2XXWYc?cSCV) zel+ACw}+e-OWr22C#m>y9_GKj4(iv8U-yD_-?<#ZxTO|cvn&AzpEu@31)bwWCXvdJ z+99C4o&<$kH7MA$6l6|3fhQx->1h=h{iVmR+H(e6uk^&K;V;99?lH8oi$(R$^CbJoJjne&+xNo~2zvD%Z2H8LWl5}~ z1434&0^UKEja7iLB|;betBdPctx&&b1zP^4wrXe;Pe*N;ubKo z{{<`D*I|IR4Kgk;U+*8Eu+6ucSo}PTsr{YM=1>^AJ{V6zGfslMi>>{=4obV%ccFEC zHdXICL=&bi#PJVHu=HISBpo~oHF+&G+5T<|J%v3rD>+nI<<~VpJBh20_TB8PHmlM!L=vVup1p^YndGcCES1I)X0a$iJ&` z{2x)E6YGeI_F=>$dKQLEv}T>Z*Rc576RiJR$Q57Pz`l=4%n5$P&UGHt)=7t-@yv?f z^gI)W4622sn~!jcS0UqY7v$ET@&kpkClS41TDb2JBAcE$&SNt3IB$0*wlfxk-||uj zm|2JFxILto%X@HlGy;o9Y8ao#`sGy%aQ=M(stoGs@aL9%TFEPvp0lF?C590C_z?Qc zjK###R?zXgKp)GE`Gh7pc08L2A-6N3rl%FU?==RC;<<1(gS}^b+Of~Q-B9(R4jLX- zqsq{p?Cirl3PvA6A{-1FJGRFrbR(_Z9l*;g2L0j_XzGYb%zGRF?@kzTwxN3H#yr}o zFBZVNeciGA=4z}SYR#pl%_On`1$o+tF+?a?%KH57<2S~|ESzo42W%>Wj&p}W^yTrm zp1}H_A~UgV+cz?$*9|C$sUqXo?}J;T5)oUc;ozuD$WfVaji;^wpERwqc9)|n!{N2cQyv7Z-V;PBN{z@HrjcfrfQ!9AU)HBdW_MTI&T3=dv8YB z#0yw?;1z22t7yIDH`JMEB2Eo0Ag(jr(KS&_%YSWQx?UvQnh}Abp%Ex45~COV0@;D} zY<78a+zi&Kpm~vQWN;TV8pKdHUV|sff1@U#SSkB@gEj^gVTtw~iq?9;)|Xij-FTK(oU`DCh1Eo> z=SJV1?g2G(G*EQx6ytn~X=wE&$a;xr*PnHVw|2*(``^$bj@{MdLoxHyC8+sigQ^e- zBxGh|(#Kh_>gG?h>-)(Q)kY#DZ>Z$rbGTrI>2>iC#gN?Ng->~H!NCzii zPv&d#S-A^s^?fnrl^>{9Qcw*#4;^v35I(#Uyi5YoFI0g^(Vi$hHiQ=UYeAz1=20J= zg2f%(*{sR}?BZ>)WO5<6$LO+L^&Tj|p{!SODk{#6#~wy6phN!>NQzdG54X&Do%P8| zgWmdZe!VVN_M$u3ja&xh?S5!G))FMs_hZ042`X;eD2pFEpj+)$qP*~&W%qKx`Q8;= zG5$FW2>1fi`8f#T-eZ&}7Hob6+RqI{Ix-am z4;IsU|FtLyucXEF6-aVZm99l8B>fR%emvL#glgwG`*3BqYkgGX+;@AZ_pglxkO! zow>E((%%EV=j!mwy6Z4A;sa>krOV}1M`KK%H(0_wMVl%+GIiufXqtTv5^wwir+Zp) z?+lr*qD5bsZ8{QyS@(+ce0SuhcH>uXvEo~QvYFMYPed{A1|9tJDk_-9HLS{tyO72F zPQvr_YrY}pvMUqR>EDRv%x?N_d^fyRbQA@W&r10i#C-F)N!jO%3P_6HWY5wee+)IwtUBc%!Kf(Z8SH(Rh)744JdcVGp*4MQ=b}O&|q^E>RklK zSv=ZJ+la2FNm$~21A=c_aPl)RsEpOSX+A8%pnEqVaN1*tiL64u#93(h&VWm>j)ZP^ zSl5yC6)F18Gzk}WhQGWN$LB?1+Cx2-Z7UGlT%SvvwfAUX&2QF`D$C8d%VvYk`gDF~ z1nwPmnei9$h~(iU672a5E0!d(IhPgJZpYSW{$`X0{zd(A)_~-W$2eu!6I4YB=tXh~ zT%iY%hbBPb1je_DctDIB-=OT#MTF!BIOJdx1okSxAl+#&^=u~|%{S#11@h42O(rg9 zcl4Ba4_r~V22bp&fIcUZLE3FRShwGRlA7-j{pV-8bQ61@ynBO59<4-M&OEQ*w<+a* zdVF*Q56&O{1dq87*m@pA!{Zb*#~HL;_Zm*_VP35{u5`@U z9}wsggOx1%9iW)X{K0=g(I#iey{pH!UG>KR$AK`{iLtR+&qh3)fej;=XMTCKxMDfu zQyrd4ib@+$F5OIPde4O5=oF&p7Ax*GYzMR&UI1BQ4xAglAEjltXiS_TcWzoWrcJAb z1B?kGvb3TKBQN3--3yX$^nxTm*7cNh2&3iJoMvbPC|f>a(QF@FF(ZTdtRFB|>7Qh9 zq8V?tJ_DkKWngT`d@mw4oBKn6uDwmTrOYpxL zXx^WI@+r|&anXooKG)+4{0k^DCYv+kpV*E+1X;V-*(dO$Si8&$a=NUz!VBT(W1kKA zTo{#0HYy#2L72|+9|7MeMpqkyV8}LN|Kl_IwYO8-gL6Q6?HI^r`6<;bgD7?QM7C|R z;L^vR!qPkWAa+YedBe*f}6$7^IG`gjo(k&hInJX5L|7ygH2d2FG&2=*P>?`ow*hcOP zx^ayn=F@BQ1)J568Ba#Ydl~bf_{f;@RcswS7%#5y&|*Za9`ExqoFpwg4kMM%aCXvu zm@rDfYsSZoQ;e|W5+e*b)e{S7(qXxxzRW9k_dK*NDFM4@uW;KqYi_`kJdEmOJt*~; zKr!@yvf>}6ah+U_F?X2vNxG8x$(}%=ks;6K1mI#a32%MsgfO`!$~LqqiwDPn-tA6M z9M7UU3R4;(V6*oRKZw~fYpyzU4{n-J2uT|^p=jH6GT_1^TskNN$ABEVm$4pz^CilR z)|bR!7GTNw52&yYL&@(<8lw9bD0=l`JkULa^`D8Q#v+jDAHvC%=Da$zhBSF+;$YP) zu%WFOZ~G7nvW9|zObgLLVN`ne87VXGgHfo<+1yE&TGsi*xB^)Ia zm3gD?2>GHLS)ll5GOamWN-8FGv5vb89J#U?r+$5lJw!iHTHA~I+_}!YhF`(wx*sI0 zn-8{IFM}r7nnt~MN9~!@#AbK{>j-8VO6GnlFPRG()3YSI^dzXpWRrVu%z4p|+n|3& zkG~~kUcb zY4()n+W4`&^wmACI`6D$K;ks0p+Y`bb)4F?|3&6sp;)OlxHd>JafUsB4B zSHM1(&yXF{jw$|A7+2;2iMQkPLp z5Py~J9lh1ir+*^^HNSwYdF=jL!i(h{0&$u6D#S8E0Fke9lFU|X!yHAEGvJ8y4MBV`)Etvz{U=>QB4riXu&y! zrDH=&9_w>df-2dSL~nfv2D@VM7JJ9m(6_X3BRiv%GT+)?btpV#$<;qF$4M9T_;?!? zCJeHLz@*QZeC8y|=baR5+Fy}(0f#|t*h$R#ro)PfS1{;PFRUy>#=Skwv{4CJE)B<+ zd??jDJvx6wvujBn|QF21W;OqSSC07CFAh zQg(-`orhr8@&*@g)!}xsOiIxs3D}6NN%(3t?)jk0`8?^Ql|HFxP;ehr-PTc`=reGC zBl}LrGyeFWPAt3Co0R#vq47^4XEUC4QH$PU_|s`nEMAR1Yx9UrSwARyod%Wtj-qXy zfZIN#141%#AZ^%bSX+D$%&)K>)K?0UzUVbvI@Sa}9~g(kLNre1l!U&+zG0!UKKCq! z&Ap|AF&p1Ne&il1``w3{FEHdKfA&zSPc25Lk6}zRW8Ef)6_9eN8Z~{I#}$zocv=+; znp4wgly@{>#xl(Pv=*B*+tF@5^Fa#@xq$n=km-8_T{jQ(Uvi&>mWRMHb(R?=N4R95AEG!!QT>5-3INQw(9u*pm>o_Z=w32^jH)ahEQh$J6>rxbe6tFT!tPRpu^Y6Sk3V>dU@6_h$n{ zd}K!I@wha;7zFzsQ|rJ|tQl#B@oYU2N0wVD+@c)8e7c(ByTE>G3LbV+Le1A(#4jd_ zL|Y$6^^6i|TGR@^nP*zQ?G@@68&aFf9IWZiL&7vSl+UH8sU zL^P3H?jX`(#@wS)BYsYQ9&Dz3Cl#Mqu8+-|YW32%2^J15{aj!3|!X*J+`t{JxeS&rdT zb4k~N+bBOcibQ@e;ZhwgLgJA|&@j)x<5*+PETICs6JeL40iN}*uWt0l(Bxj(pZo-ouTsiTDrRAe-xd2T#Whq#+zy?l@3C_6cQtmki$5n5=jn8B>COHmw){+ z(>%}TaNXDSzNBIbaiFUY+(ZA@zk?$39i7$hp~mqehPvi(t5MH*o1et1182 zw+ardB7f9?J5W_|3zWmESaxh3y8WYpn)ox|JfITOyqDqHO&kNfFTw4_w0A!KKr@1L zx$pRc;Pmr2u9V*c8xuKxAFVHVoqdZg>VMFxteoo|ybQrg4eSsrK;}CToL4?zfrI|Q zYK!COpPfVZqeJL?u?#)EI^} zKU3u?=7cr~37=};K6CU2H#UhjE7+{(WZf^uo7C6Y$*KdWe`TfCXQUZ)G z%-|)y55RkQHMq6j!vTuHP_nd*`#shb63)ob|F>04TDKkjpLXHZ7kh%g>?yAvO5VA3 zJz0jk7uPiEat!WxB8NAC>VbfdHy2=atsDcdoMwt35gWU0fZVTt!@?pNjE~S2puaIB$$mok z=fO<2hGR=p9(w;yy3wI`pgFP{qGD#?`Glw7AGMPOEjAVk47F;z_!9hhx)Yk@H5mTG z95XtkV)CLW9=oa(hc7l4lYJ@I^ZP)^kkWG#JPWS;sYdM-EtY(qLLJQH(`+H{GF;}$ zN6X>@H%ftefa<@)yKzk^(Fn0PSt$i`o7J)#QCo4|xKi@!=Q=$LL;k znS6|E!DU(iw^2Vw*%(jA?xgun%U-y2zN;9Y^c-V8N@09<220CVfkV`7cskQU_-eTU zRaG<9{#GY=scSOiH=Bwbx?jP@q8Wn5{0FLwN9DJO+wMqA@7N0wn4-Kup6za^ePrDq3f3?92R4_A7;b6Bk|rU*=YO%v;{w~4FD30f5F;F@6Dpm7r}PPHj3;j8 zaLN?wa8=W*3@9&FgXNsFs5Pf-k8Lc~lkS9$jlZLE^lrJ{t0xfXv+j=urT^CV#Z@K=EE88F~!7m(dQ#MTS%xXn?@%?FTgd;EQM z&7)Z`DMBhFt6p=@#wORyP37RP_>0*{i3#;T6$a;oVfvY;=(hh8PB$X2wT5P1Gv>go zsB@5PI~Ph)8T4{k4Ie}^QGb?>c)j~8^jY^5Wg&Ou4LxZFPG=*{-nBfej-C$_CAW=T zk9E5(1+!}(!1Mbj>h!Zg^(4}I9ADAr=_XiGHc+P84IbNP;l_svXxy5@4Jk7fQ|ydx zu}`>UV=Es=xtZ8fC!93v9;O$!K=_KDczGK3gkr(n*O>I^z(2NI!s%G=^+ns#N~rJRfdA2Z z=KW?N8a_LM<;D79^@P1JW9?N?HT?;G7cM}D^eY%EQ&}i;#K&VbVC83s+TV2L6@~#2 z{=|fZ{HwwALc05wd$HsZI%qv$F#c#a5}Qm4(c`>^FewrtZHGO2S`mNm$34{MZj&c? z6+ypF4#r#MU{Cu{Ao*AA_Ym{$$uLNE>Wwzjj6rq2tJ*WB!qxfNT^`)eTxh>if&C)y zP%qCBEV&ti8}DC+CLJZNi8+onl}kY3->z0Q=s=ky6H9hn<+HL)#5>#eV@>>ZVrX~J z|4$)}i9?9ZN9b8>ERKC$1CM=w(pmW!dIuy>Ht`_OID3b;yPhRKnJ2{8o&dv|EQ}b* zdCt&i$Sfva#?+mzLEE;Yi`P z1ZcP(55^OVp*7$z1Yf;G-t-pr;75gE@1O*mLDjt0Hx=X_&mlC(0kZzQ1lccZAmgwL z|L#PdINiU&)@Bi2v>}d9ju&Qj)fGCnj>Vt>)O&U~9s-JpFfRh zuPzivqS@s{((TOW)_LH;p z*O91RM=ZJj%^^=u90xDry)3*9&GhWK+#_%LA6L=J(+rm+ya#W?JP3W0$Fy@k$iq@} zpg!wgl-(Q0J!db#q-C{OPuVW#?*`a%Zycs?%|id=tNi?X%0I^Z#E!|`K=pTwy2rLt z(0f)2$VZ*RCqCw4gT+a#%GQU_HP@lXU5Ng2&1}xbNgFZ+FznK=I*Q-2y7`Fm~=H8~+2olfTL^w5~ z4N7AFg0SUNP*HG2onl01vcp%g)Qz6W?pk$7+hg=NF$P+Tv$57a6T-Uv4UyJ5Vlf|t zjYYn&??(nI3@uP$ehrlqyj(ZFzm3KvMa;AJZ?p#g4_$QHC^zy1LvM|Tf%;d`OIJ^* zZ-0pjV!9a~rv0=f3^efy=FF4f%U~n%c$k^sJ9;qIp{Y;`t{^?ugw74USoR;(`#57F zOs2k{E9-wi@4?T&dE;Q-F~*W~j`gVdw37ziMnbO1K(H@+$g*cF!&i66OLRbkE#()W zbx0f7=jNlNW1)J~i4;sJDMibGk9a$sw8X>SnCi40qx-ht?w=(P|AxAa9MWL;+`SmV zKl9`R^;m1ABZkS>ZPu3O6x^F|gF&=$fKcZImiMl?xoEy)e9>e>+A$asfls=wB zXTqM)*%F6dim$jRY&L!3m}SJ-AMbXAHTD^SJsK~fvP+~q`{`M}hIm>2o-I(a z{xW*Rj|G|Q@2;PB>ImA#?XG3_?}AFI;#O@#nb9oDQ0S#W21?clnH^5%Qr~~3D3tQ2QHYRUT zd-xw-M@$wE($RJm>xljaYgn+-0*d#Upc0x2GwAGBne-8>?w{eF!)5Z&A)HrP)`FF3 z8I+u-^%^en&bm_=TbYd;|CS1xm#ca6NMC68FNd0APe6L{JABx191;u7MV*wrYR|@6 zb#ll{UgJjVyMsRc4Nsw-eKlI`E8vc$zeD=j|L^oX4YKK{)q$~Fpfjc%6c9O6vfHOWf+=c=yQG5aQ%IsC-+*WN+t zukTT|wvw3}6QgnLORns(muF0P$9ga9z~;@`S_r(r~@WR}Olj%L}f~`T3=yvWcXqRiCE!cH7=q#YyQ?E??WDWp^!5_i#^jO=0LScewj#OQACV5m-Jb2IZuA z7**s2t3%Di1l{2fRp$yygURZY9WSxu>;=}of!0@_bzD!Kj+!z4Ot!mHZS(67?$+i3 znoYl8c*iMJCgrPTMy^oRe~6quWUGq66t&1 z@i>b*7In!hq64EAW`Od$9fVNcuHx!U^x#wA7gMRItuq(wOgskbi7{@LQ-qS9*O>jh6WnrX77owYi+V#7 zA$8tK$Qw-Tt#hf6a!F52urYRMc?69VAP=_xPCPGY`$&5Za4IWac>Po_3k3yhRu?Q`!wmOeOzDoV!OVm7ZQURIq#U%DI7}` z%fM<)4lKImfG3xgK|ix&(7NX+Hp`DOyQQS9>@Q+5lapA$i`}$`ZI=hd-v{lRCb?;O zBRC9hAa=BY1ygtU5`L<=6Hi6ILytS5 zSp1Iqw3d{h=b1+Ca8(1QhKIp>&m5fjr=DQ-jn3-(YmBp6EdxJEklXHeTHg zc{gv;-Vx1*9DR=-?N*Rs^p0u0?bORpt1;$c1AY~oAp7wp=tFvt)9GE<($h$6uP=iE z9y_4y`ax)1=8v;YjfIvv>Q~jX#Jsg#1ebH~(0S@Su=<}hw!Kqw@0R}LF_og@KcQgv zEBVg8pxVm6ko$%hi4oyd7{BEw`VrpSrDHAe7AK-bC3Sx%B=dwcT7T3uYcLuuuc%Z* zhMfggk^O)!PJ}`OgbChns0gnsH=}h7Up*p6*63;u}Uw8%;Cfp@> z{d=&w`vICd(s1LIW_(QCmgI|0(BW$Vw);C^Y*hg?n(K=`mrsMGj|NMK!O`^U3?>Iv zu%aiA!1)X5!+(56-`3u)>GadW-1=hF%dJUzp$jHl{IhhV2TT;6Q z^_*eus!UzS{0DYn$_5W&Hv7VW?#80oO9x!4G!qoVbk!;HBD9p%;_I4P$RtmAv&{$= z)ZGp`ck2l&1~uX12QOi4_d2X6CUf}Zz0gPu0OyQTeAggT(Ph|k_HiaXp9iUTC~zZ| z)pQYR#L*D>F%4~oJ|$NEGTid7zSt76987n9!}xnqxIfBF$mny0J6`#TC06Hn!jEpa z#=%_lT`~<`>^_RKLk-1_+EqAx!$Ig={1Y<_2Eq6p+qkyrtGui`F{A61%&3meF>9>2 zY~4~~24$)X4%HP-yV*t97{>vh`{9xq#3Ud#jIouLmHv{6c5fboLtrr|LaJa*&onga zN1Z}@XVb*%kgJxwV#@BpOrxWRS_|TdP5eka-Je|V`bS8X=Ah&KMera|PZ%)z1O)pz zV@1GXe5ek?XY)>g?BO03HlLw)W;%M^%tDE{kwttPjR(h*Pog6l(~RyyW7te6IX)AX zG*K@1?P$46n2xyZIPs`H*+Z>EB1BZ3;UyQp!e;dmsM?wIf)}{tDQ0$y`Y3 zy$iFCUt;6;3`Q&0GOpTREzeRJ3st+?VfhR4qj*(u)%?gp+~|(hi^?E_x_GobN4S>R z?4<656sVdM$C@YQ<6+aT;>HVy(EGcEkQ_Gx(yQX3d>DCtvbqSj9lD54sdc0?{wORt zG8T2d>_DH}F?6O)<$3m3!Tv}%Te9RNc-+}Sv+7ce826R?R;XZ3<_1*e-r^n`XjT?< zhh^(aA)i*E>eHD?Ge0k>^*7uCSpKp0a&~C#tSF93X0Q>tV9R`-<{M5-Gb2%tdm8kr%k!9q@aZh~<^z@C#iW%{cW3_|4pod}i{3b}Jcf;AZKhy`` z1+&V-=&?di2t9lYW1oz~)rZW5&#xt-^~v7bVQa-}3?V3W)wa8|N132j>B|&t`_)$O9iX(+8XSBc!u@V^?)&*1 z@7t(x$q4Gi+?fMZmup7m;EftN6ts(2Cxeygick)0z zLDIQ`>HbFyzxaWa@A55hyz7tmjdX?A+uE@xCkZ0rvbg438LtT3iJcpcLc}~z=JO<# zmqjE)^8eofbHnA*s;b;9sMSg!46&?QW%gW6d{~kWz-) zOG_Y+6;j8p4U7sUU;mDtVDo7V){ozS>&JY>d!)0}|CY}MeFGstHwMSfxqvda6z27u zyp7Yx$(tw}CJpQ&%H;O&(A_|cvY3V)z3*aUbOtoi%;@9VCTz3skEYat&~oz}gq}YK z6g6LNqEe(2WHE|QvIOQxSBf3 z_p|J&<)~TsH{uId{I!do5dQ6dJp5ncBmD0KoO;|woL4#8ecnLX>ucz%L;UYKN4a*; z8T8k4gb@8m{H1GGF)*ec#>D9fo^cD=_^5E^fASN49cU@W>?HqBZ8THb(G26!en?*D z15t^8LbAgqjI2mUPxXBD*xny8@O>ltru?A+a@%11+y+QF!1&XtZ|$` zbB7C*{d$TvGbQ4lNyg&hX) zFv_dxjr)Q7%nrlA@025>PLCG9N03)X&+v_FT)9sIxg|zo%Wwyj1-J4*tvx1mXyC#s zL!qeYEk=FQ7bLqw*~;z5!Ljc?a4yb6?YX_s5;6?F9wpxBop(IuOF4Q!m`fRT6AWu! z15KB1(SGoz(0I;mfQ|Mr{NxT$+3i&OE-b?|m<&k;t$?#mVx;j^pmrhZs;(ywmM2r4 zD^mN!6)>wM*625_kaA`SRkznd*4`5^@b_I%^8PVq{F2D}g?u3IhE(nOAk}rmI7?Bh z4pL97k_s~CTD9Hs12}P)x#0HTE5GSM&uor4=5yjlyZ7dzHfjw25^Er+GJ0eE}KKXM>b#fq5d(oIlpoQx9khvDfl=-JUl zt+FLgrhEV>7sk2z4b~A;Q=|~OG7^KvmoS2A^H{N<_^_?O`MU-;!`wQwUWkf zznUF;XfA}W>dN{hRN(Hm1L$jN1O7|W*_kG(uw&JBoYmQm(_K%4u?^{0iF>)<-`7#I zJODmap1{nwAS#6a2`d!(!l0WHvEGoASB^Nl*?rO7=@N8ouZERbm&k+W#&`eLjOlDI z%8Cnd#ko%qR?-a(lae6s`g2t3&}?M;Shbz!Gf;e=?vg#5*!aN~m|7nP9qTBwIMWg} z0n{rwCV?4mDPk$khT^k^vuHXx39UYcu*w_fz;mPlyIdy~R{2>7_+KWbCwu_a+EeO^ zYmc$x{u=6X?kcz~y3fkuZsS;*kJ`LpT9!+zpAnyE6{kMpkqG>8rTbULRV9w8O_6c~J5p02M3S z!FtnN=;uRcn4Wn|+AA4D=jQOS>t3Rx-Y2xtspL7+PJ{pBndCu{ilqk^L+P$_k9eV==LxeCqOim+td9P&nY6O;S2uaW9r9s$A0TkMiZyxO^dZKNF95v-*DzF3O}XKAmixH`b-oW|iP!WCE@ZKa5>` zxjUboT4y0-6pV%e$M@r+yf>I}%t#!5NKcsLy&uyggp#QMA|rDFZGZt znnc-C^3G`$&!K7LL#T&m#B8K&6?+3dX=NCy?~ctnQ&9S!6M2!la<%<=NZ3oAX0cvy zKbSHGj*s}Vod&|ji`Ss!S3lUX=>n=Va}ZWP$ApdjF#D(m-3N4FQE?$mCa##Lav3~Z zN<6M@PYZ0$z2x5Vm1rlm6jjHMaH%E&d?vKw__^jNmFtWC)xNy>feHD+o9KPnN&VKl zU1XM z2fW_o6X$mrIt!P;+9Coyb|_)peDeDwq=2XUH@R=}c`&4Y(ugh3(Q3mvrhML=NpAh* zy)RwIrnH~fV@d{oSQm%q122QoZSqcTr0(WkA?*CK`=}j7Jk(3idG^u1@b_S18cHqH z^?lMH?La#;-v^$p-HxGU)39`;E<77!AV!Ru%_Wx8c|_!Ube2}}!@rq|duiVdzV<)p z{z)pPmD~m$ZGZ0lPZk7AS}{0cK041Nme<$!3~X!Rl$KcVQv+DDYB6{oMv(2iDL1XD zfIAn>g|8>fgcq-lgK5u;5NLA>y>+PvY`+uaYjuT{MV~==;89`nSn_pEYb(@D(8EaO zP3YY7mHNVx0p}W{;?M6aCrm1Y&&=cnsU7Nqi67beoDLpxED>vnCnr1f!^LJ+K1@h6 z6MSrbC;e|OJigdQKKxbqQ9mCf?-)pa2fp%;a0ooHmusFpVxuyp)P)mIjPoqaSm3}k zU!U>jbCv3`q*qwIA*j}4K9d!uYFKfV2J05~I>S4xLZ!U{` z!9$9#f>S*`o4(NyR&yBLbpM3O4sWm}$w+uy@fQ7QK9Krr654(tPjw^l)OL^_-^ZUz zf>54(<28%d{{lh7hT*|@6Jdm{fmqdd8EjNP!pk!)#eUn$K~~elRk8c9x=l}xD?dh3 z_uK@`emDmt6MnLQskgv_^8C;G=Yp(Y5_p_Rq;8Rg+-pWEX@s}{?q8@5zQ37f zkqw~vbQ@z18;G(l9IZyqhN?%WxNMe2?fZ_rEO&3y9QqZE#U~J?`vBXgrlF5ZFszKr zMW;79g6&!A!uazBcdt@oNme~Z2_fhqqr3HHWP!oN2EM4me=AHy#p875{3mI%<`W^0 zJmd2FFX4|ceet*Lrh?_0i`Zk>9#~E6piynRz{e|v>BpH0y~h7b949pfdFX=gnL&79 z8L@MJr&%;UN28!O_`2USFg~(}+qhn59>1&wFNce;{Cx)X8R%eR+AMTgUdMg@j$x0# zQFlwXZg}LAfl%t4hC%5|L9r;GEx&#oq039S>a(Y>t|oxVK;45@%w=Wf7x6Aen=yucQ^qiP!ru-LvQ#Kajv>{buR z9UcfBYzBmyRj`^}0r34@SK<2OW{BQtCX8y_N}jEoXdYoA`1ba}jhP?7u*-S0pGG{Z z-D>WKxy0x^B-askqgg@+CZ2wap>cEIr2c8hoEn0$VWiE>UBF|f_(A=B`do80p#0xG zx#h19F!w-DtjOvEZb#d>eWBH{$82C=b zB8Q})z4=uPYrh2@Js#jJNhT=nEJ5oP)~H=;&BMHoVTzP4wrC8UWWz0VEn znCGyw*mz_pocwhM-AR{IPgobB@e1u>4iQ*kbQ2{Np{TxAiJQxhVMX8%Onq;K9b5cy zd_e@0-?{=i0|)bFhpQZm?7{Pp9C{sj2`Q0p(Rk4m2-N??BnHk*w(UPuMX6bpbSt+p zZ-%I?w14Mip`WP;TbNWB*pFlBQCK zx%`YxSEb;B|7WAUdW#{K9-;q|H@rFD6@05a!1H)Nxn`#U_$jYr)X_7b%8Qg+`K(0I zM_1^*wVX0|{ZSFx;2OJJ7X!XzLRHxjHpI(NwCXbj{qu>bp1cQ~D@A6X-Hx5_sTQx_ zCCtboFE|{*M!x}|lWO2%mHH=B#@$t$u6_?0Z6~>#h|J@Q6K1+Uz^Y%*;!2ZrXy(P- z+hs3hPyWM<8+p}&b`n54;MXmh0vw9c<=M(LhH!}bbHc?4PTF7 z^6F8zqenSLAL}Bl>2U;1$!@fih++B(ErQ>rn7qha~d6o zWj#+pG9OAkQ^W&KC2q@*J916j1LkvgHbg!qUh%Q-XtpQ=yhFdEeS0jterP8Aa@bPv zFDl_aL05Tj)>iECikN<#%h1jJA;?1yVOZH~cww-UeEw%3j&NbJhwk9NZ8@>qp5Vj8 zCP@8x2olJneci=GSVDI{Ii1y_)`r6lx@QJX_W_0DYIV!JTa+`erw)XkY|{CBh$0_; zc6Z87obhJEf6=1UY8s@BqfF7|9F+Z#@QO+|^zm5)0YmFy{f{TaF#WY4;>#>(Fir!T zA#>rVouQzuRa5?H9>n)5g{Mtsq$w4%s(;3i_BosluDXN+wweehk40dc<4`a=^(zdY z`hysf0<^UC#HwvgSaE(AD4(4rhEF$eU(`X~DPIhJu?j0{ouI%y4D4MeW2~nav64ws zc0*8zA(&^o8zN5GGsk^3=+iy{>T8~1>E`?BQ+5JnAGgVs+1Gh?NIys0jC#11TR>V!1gpEN7$!ka3efaxD5V$@K|>_4#-8i?7JlG^}*E|x-2 z{VZr6F`Wf{HA26jW?W@sB05)=VB;(5G#-~CFO4AI`rSUP?ss$1@mn*P4M{@hk>hw- z*R7PTb_M^TE)ZkZ2WQN9f@y!>Mjd}!u#uXh@@X6Pons>SOmD!GI$4-I%?%@*w?L_Z zF)oix#G;8W&@|XUEd7tX5B+^$QS^E0fJg#UozJj5{UP{%xr4!CSMdJF9UC^?g%>kU zqtCvdDEYh=R*v6_p<_yLnTqzB`Da923VA)Pd=?=;JuR)TI*XtwipCTq>f z!h207f-Jv+Yj>vdM(ZjF2slCK{|J7*qY(lJ$1H~3!y_J6$4#eK$-l9t8bqZ zm_h%X9QLB|j6AN|;tI|6{t)|n0k->nC)RollgW0uzVNn#j9&JT$jk+ewVbtTo??uL z0R|VgW63CAeBl@iv43|JeTf$l`tUk)t7?IOsG}G_a|GWhx1l<}0K@7lP?Lq22K#H-l9!dM6whBD{Zx)61F0D2$Z0Z}qy zBiTeiFfk--Y^aN@=nzv`9|O;|9xU|30j~Kv8r>>yK*Y>Ep4dJg8}DUf{lhw*KiE`k ze)9`hyrZ7bkZllqWIbq({e=Z?U)45&o51}|CH7F}g5At@;8tDE6}zHcgAd%m2#ZQk z&iw`@tei`{{s!4pXVyIA7%qBP2W1x%pySqUkYu^5n}2`GcK3M&k$3l?qUtFxE2VzK zT`3r1a}z^9kA#Ki4Md4s0rV!0jB%A4I6R=cRN7%wPdrGxN6HHpMipkL!kAU7g^(Hb zoIJdv<;w6nS6j^wNH|)CEth(s+u%+ty%dCjSFfY8rc_?rWjisUx#MWBgX{m4>S>-`){YM%0zvr4QPPdbF>5UegGQpNyN(SOfZwqvG5P=)n_ z*YhY__lpALJ2F6{dj#sQ*`rTuIcyd)(5jtyhDMk1{M-hJU-$;bb<-tI*kQi#UOM&F z?&Trs7F@LVA8h`9fL&gyFO*yhMpfW6=HdMVrGLJK^41oJ)!2a_d`06IZ+O#hJ289N zW!}3k3X=!4v({bLu!R2$(gSuFdGRSIyoRE>{VO;PdJVqr&cqk-M4z>-C|NNd7cABj zgZt=!%2o?E>hWFfjd)WsWRjfd66 zdk^vfnpCWBmS9HVNS^U30&C8VM34Wa6F+O0yz$T#+FvfnOFV1AAN-hX&aY@cselh$ zVkvYK?!w(zf_g9LH#pYGa$XsU*L`1M^<}t{)`(ci80?Wu{c7Ehp%!|f zRPv0vIVZ5z5x2qqQYA{eP={K}2;#@?1fPA&(BeP>c)kviCoedQ3hPR4|8gsw%KwIu zT8f^FeNa``pIet&L-pY6#KUM{5$VOu>RUK;HYUURke4i(?i3Y^PhhgC6fK86#EjK5 za7N=J@aVoARN7&aOZyJahFOXkgIZWwe+$95_%e)Nn8H&l%~3XGp!(E<@2I0= zET|SF;+p$M(BH6@DZ;&>GKQqr3o_8Wu7Ksg9{>pHDmJxeflqM)mrbGG;J@23c61d; zJje3#JC~sCKo+|Fe8e`Uk}vU4A*+A)5f@YEXr65E|9ecpEbc5Gzi29Y-a4X=bZUp} zCC7M0=0BLQ?pKI)XoB9Ih{1GElg}>%pIQHcUZ3|k;8GXzCwt@9p3l)Z%@;K)YAP|8^W<-j5DRa26H_hT&a^GB zxR*Wk^&DjwZyZiCQa>F6)(A}YJ)$W=P!5E1{34IlcB{M_LXbt(&q%!YUdF?@Je z^vvx*`+!X#`QwfJ1bM@oSLHM7*s0i_N#83WU#&U19h|j&nKEmftEc=Hm)&{-P5ZT& zJ;+e>v-|?tuk(2a@nJoC|5uoe@P5mI9@7bScbE_7jrKHp>I?+ijv1Iu zUX0FLH^Iu=7hDqF^0dPI(%IxwynTr+UvLo& zZ>nH&SR*E^wTAU!3$S-N@lfjeK)*K1wv@H0?H}lik_VsE8NY6TXLsLW+lX->vkioT zvjJ)!%6qB5d_vn-cR_DWHRufLj{V}lpw)sP<`wt^9dA}(X~98EdZU8UpTDDo{SK|o z8XW#zB5JJX@rb{kv5q6;CnWMMaLOo z;C(U(tOjSGqUN5_R2Zz4-rSO$@nXK~4SVyWfn^4u}{;1`g9n>|FdYCej&U(GOyvfat+ zhfpuw9o|;h2R73C(K5L^j=yL{_ul1raUQW)GC!hyW)7%KD*0GT@{E`r!>pwz(0IXA zaG9zge%c|m!a)ZYKc&9)>DJKl@_)F0b{9b#vXOU<))gFjPE{;rq;sH+G8C~vHQ?PIiZ(Nb zpr_;qYiloI+1_W+qvvwm=)@uHSuv=dMsVvOb0OlZ8N*I}C%&WYMex+zRvY&?4%*)w(Ld%W}G|pFpOfsNQJ99WlyeF{pPL(>zMb!&ewsDM9 zjLSL)EsxHkWNN$GGtij1?fAi)7yJdFu#>`H(o&UiiUr(|PhJoaCxKI`a4Djm7ak190>( zePLF^3(~}oLq?6MsJc-=-Ps2~*|3~niG2uRUR$B8F&=iXN2uAfg}cmdX6b(#2_9h) z#A3|D0d?e0{%seeC0#(TG^z07zi2Et^ph3bT%Zo6&aXH%V)d;%C~JBkkBNTDmQPGY z?U!QKIW8M*^1pGP>0QLAt%yNJlfWmjfoDfXV@UfOY+IKDiryK#e#K}oChnBRjeHBw zhw;jPzhFX85k=waaLXSO>a!rNDQ_xT<;;WeMaRL)CY!5MG|(i8LeHWF^4MQ?fjc9g zeAr2Fq4Q9(eit!$!U)uM-ep?5K(!KshsT&{_6w-8`OIV^hRS2@vtUx6JJ@L* zjWM46(W>xY%$)cHZ6;SU<4^4jZ3mC7JH%W?Nx%p7z^FPHkgOv* z7fZ3<$uF4wo_x}4H(Q=XVv)4_dh&a@OdJ$y14!GU+VZJUzqy7G6KfcnTpx7(qU}tacsW1 z36!&Q!Fb&_{G`znwKD*Es`Z5iHxuD=B%PCUPr)pGbHUj32hU2QHPXjWNa$sOrPRr( z`f<@kn~!qk&`PzBV=?R}hOuN=0DZSNxb?81@HqV>Iy7DYo4Ai`fVUnxCZB^|HWp&g z@%JD}|IFLQ*+9X&fP$oiDL8h10z=De5rf63?{bupZidONSbl zPKXUWi=KPGuz!Osg#gO5HO;3p$+e<_nC3`cRr3~GE^oo%z6W8>vxQ(+Ux;y|AEL+n zTiCq*8rQ1hLQ1&*0ICVyKEIEMq zYV$NWxHbWtd`mGTULv-27VwJDci=I%4t!f$&gTl}io(^CNWbXN5d*a+h-py`v*y=<#$g)^|(rqtmTAv8fro@{vJgb)V*{XK3{(=sq zy%`%MGVSjLh3j2EaR1^`Ue!Jfj6-u+#iK0%H3LEFI3I0g^APqof>yr>>{_)@a%Tdl z-L6BJyakeO5Hs538zg%VMYm$o4&K_N1J<_QGd* z;MB8RdSnq5eYO)?J{G)0ueG zSl0->^WdRbh?0omJfdO&I^52pjB|JFXjleu{cb~EBAqAMW9q1WhaY!nZ~`+Dzns(& zU9LUHGGuhM_`p%EC;Oj3dK}V4(Pu4u*R&zSJ z=fYC;oY(~*)7{AX+3q2)hKBiXQL{E75eCsgEj-TvLND}#`(d{r;Z=8-ca|7TSEUg1 zHUO-w^XWWIeKlE$Xnb@#!DNo&+MAgWSX|69o*V+%h_~`Yhfc6A_QH*OMGU W_3| zpxI_g-Kn2YvQ^9D&zBN|`Vem=CSIfMatwXg1FDU@A#uw}G}}#C>Hc?df&ujp_9bs! z=P}3~aun@frL*Y^!;#8LQPT`iQIieH50_!Cm;eH&$n^GJC#|WHHGfa% z&C!D)y7)b4C(omGSp&&K-e7%JHIGdg1pfMlV#|e2C>e2qILPBEr*W3|F1t**?pF|0 zQjGWeJ;BI-FN5FIPpGs0e;DE30u{rE-QRQvef9UC_Ul$0nQSK1cSjaiR}KCi;~|&U z7}}V)>qEubdG8@A_Q5Q8FWzDnK@Q=f&Gm6%{wO2e-N3Aq@Y)2A@5cOYfXw%yD%4QpjunF%y%|pGUdz zNmQLLceVYw2Fn)5VJJ2K zpw0YIXk(qilH4tYk0uweQjgw=QpS?C&0IF4pE}?{D_HkbLUY40sFweMy@^#`rF#OE z`?jMd+ym_UNXTbh#AMpz&@ws@{lk;c_?aC%*LD#Ne~ZV)KIdTNg<2@q#N$GJ`tAE& z<7NR8Kht@fa=jkGpqF+`ZucMc$aNNK>qSzY;dBo^5Ic9rWn$h1 zE#bxYs(3j}FeK$Jo zq-WEZ#pHfi!W1q?Sm-HYkX=M9{7kdREqTx!WB@YF4Cpl@3KXRs=rL{)%z1Ah4w!iz znpSMVKm;sj_AW9wmoq$|5>TE`oxmYOIS*M%j{3R3?Qpjdlwv zOYXCk=S)TKw||2sLXOHIr{sOUSz&!!G_xOf0u}FWac{@t%w+2!`fT4)H%4v%d1wr( zOecXt?=^aaY{JCgQxJMbPtbHB992~fgo?y@?>~>HSe8@2*h27i%*FO+-9feVsXX@0 zZ1j9qjbS_iUtYEpGy$K9X_^O17D|M1GzW9Dn1BWJeTi9bCMYKM#krGiqhi!!%pN_A zTW<^FR6Y`dZ(oH7%@5*bHDg~(@_AgWgvxd^q5fqHc>G-elHD%EjmlR4foOXv2YT+O@7pgHxa2}OxS8fb z&BagH93n#cpa*P5nnXy2t&slk8rR)bvP-UUcmj_>WyfMv_P@@2N3Mem!zS=PNtx4^ zyIJzWSf2X#FO;Jrj>yzM(5Le-RKE!Y)!KV<>yuB|+J&KzwETBCe4gHeXD+yEY8JER zzNHZHXft&>%F+K&B=~QCjEd$f(7x4!GJ}uQ(K*kFS$|V)Gvq1+2l%soHMreB4DxOltt9b@^ zKfdFqb6`DdCzcg_W7(>cV7HGPbQQ<3IejFIEc_4WHVuR4Uagopuq(FAxJ$gEYq;%F zBGykk3(a=qv)gFHk|zf7occO>dJp0mb`hC!W`?|=q7;fA+dz>fkJ!ur^^ZR=n*oNR z|G)-#Fy;ch3rfdY*5N)^#r zy&E=d^+(y&4v+@@ORRwZSi8MFEYn5e#MN5x**pg9mS&?iQGb*&e+^Axz3AORe#Cu6 z>b|q@VO7O@%nS*@;US&Gkg^g`jE!T8#-F)wr=>VrWhTh>3}#J_Q^Dr^X&mhy4rM*( zfsbT9R_;||h0g%MJ`xO)uZED_Vc>tq7KdF%OnTl1jghY*+c%b{*%RxQKG)foF0;OA zh!OjjQD%`GIF6I4b73OrDn~;XH^eC{1Q8+||0?4q#|j?_LyZ+fr0 zBv4n@K9h~SY#_ewT!*bS2}~P*P#*DdCl4OZxZIHLKp)T2Sv?F2X?E`d)!bM9n!C2Q zv9Z4#VMY!qpr<3}`lb7LXD~VJ5BC6N_+7a#owcV{>It!KR#+r-!LY_~>`QDFTlr!5 ze9%O6>3j}-FLiLYNpHF4Rsy6?ea~)*t?(`<8uE72oaJg18tnTEQ(P>>#QF#*?b$#c zmV1z?FcIG`uo7*qp1?&H%>`HKT6CJ%49ayqnThfpuXI~M{*zHqzquCek{+?dPMO5U zRC3iQN9g~7m=y~T5;x-#dp`d!Oul8$wk|gotk>DG^mF4dy=M|MhZe$bmHNVfyfaAe z76iBVAWI5V>w@QUC3yk;-w6=3WfugkN<;q>tx)>99F_mrGDXo4zUa(OfPcP&WJ4fmN>m<58;jcxf$n@BcZ(Q;LC@$sX4DCb9$$rI`>tZOm9c0x<0H8g zez8ffCYjYfx zCNG&?FFnzAWmkxvdJ_zyN8$V{CPMzpLtNL{k43u%;((k3DA~J_hfWy@qX%3@-RL3A z&Q=Lt#pf{X@_ypV{|4^fH!#^HlBrfi^9}JeD7)LBRxBM1^0szp4ZehNATo16Bya)Zf%*oZhgxnX?@Q6zjezPE6 zfbIdfS>%9n#6+&y+J(6s90K!$nsJfyee@hQ4+<+gi4Ie===k^%^a^W6lYcLxa_lqk zu$_fPE=OSfUojXMLYy5->Kgto5v8uK5IUs*Z*_hHs*LwdUVo75%7#n5 z%tZg0n?Skqv|Kx^2))&%;2RjlO4k}f>RKCgA@*AL1qMR!pC7TTaXN{cO7CquDvEX_JMx5LZ*+1RllCHlq-Hg**q1*>f?$)4wolW_yMBbqbfxyW$L!uqa z{PDZN`$-(X)ow1B^`iTz>04%Y@-slg2t3gL8$5s9hAj&&!sfmRfqVCJ=d+KY*2q#Q zw#{H0p9FS82Vndf4KFpniUqglKf84@p(ypMl z{E%9~l}tIPj4MkYL2B1e(D7eB1lv!cUC9_MR5jt|A1c(9_u{dY5^TOHU~8iR##c~B za#1pPkN*Xmdsl*Y&@!-_ei(hbeFRm}7)W0@8P;w(2PbYE1ohIJ@SuS1$k%((*??RC zWpA+W%pqvt-w4J|PtiZ^6dapoA=nmqpvlYG@LvNx`!=YFpOCM%<$X|QtYSmnSP0AZ zHDdnp(@dGT2U3e#AldUcFR?rTFUMPmSq+5{oLz>pjYFATQ7daID8}Tnxjb~!GVr%s zfs2M8#4HOVG1;JuY4^}Mz~m3^kT4j157o2mw|OjcYz7*v4hM(Ba#%(E5zmBtsO+-{ zWv^#pa8LodR6Fpvxx})1;l$Jfim?9aeZDLq49k{(U}d>LIrv~M9eNaLd()XektZ z4@Iv{W#D(6I$d>HYPFyzDi0Q^PmE2$_!fzv{b?DWax@oG$PwhQlX#{99w4Xhh-}k% zaQSyW53oyyo9T4tEggoQoh9`5jmk}$!qLs>8@k`K60iSiD%jpQieB@cf*!LHoZGCS z?q{h`fUlYNms{+?q-UtT69n3fD0OKw<=s~P1W7|KsLGPvs9%-&d578I=2Cl}q z(pwNX{5;c4Zl`WY8!tW60YU$5MNNt|6uhYC8RM=Xl=nopr*t-p>CR!!s28yQsgamBK_YH=kOvy@yurO^h zetu&hT)kutbpQho&rOH;tk@gDnf>K$+0eFQ_>e!ys53bci%LWLXnu;pytoE(njWRw&t)VP<%hg!WSkGi_wL3s@^YSR?q4eL@uj1uH4nfuN zLh97kLh!CpJVcQV5~syzpK=H_IX_^@fG$GRn8^R{Kji906Y*Arfly>|4f;N6*_f*+gY@{5kyu-84nDh9pm)!&JRs>2TIJ+|>>@p@ z9vtNXzqWxw+JLdtpGv!v1vcSXm~*cUU4~BL$s;NE@oXHvzDqp~>3{qI?a9inm+1*VR1W0W~e?J8m8XS|B#Pp}qdO3G5~}7m_bMX8Ps_aKR5t@zlddu-RKf zvkWV_Q`HQR9GuIX>~AxP^AMK(cMKYUo=}Dvy#D=Lrs#PV)EoD~qRrHyHar3cc6=tT z$N+e^hBB}X^C&Mp0^?21(Dto`m=s)uJ7#ne6+8bJ-Ue-eM zWj8Z$wD_$EZ?n&C_j+KEa(FmJtOe8;^`Ea_VdI=#Jt?S(byaQ#>4vG6%s zkBnn>@R;i?o_6Fqw_Vfym-yy9RaSf85)A-#2&Ui=8leD3khl85lv zn_SDymZC;KjNP-M^T62;=o|Q**MF_zqgR}R=Hgr6vV>Sin?vzZ-?P|nwW*L5)&$XE zlOa9N4HM>f7CjD}AO=bdZ0PqG^u`@yfwd>Nqaq%X-;HNQhOaSo@J}>@IgJJSqiEl8 z1|uFdp|iy?Tu2N*_2V?^uY~Z%A$#D4u|%+^-cQKCrXD9at-@-jCvkP%xmES;HF_S4AgVm?x8JPAl3w57} z=klol43c(2OR}C=cmEF5?|#p-ZpPx&&&Fc?-(BDVvEF*qj9zOr60~nOp!Ig@{haGG zO7HVt)c9n9Jg*wtN9y6&pF5#yrzaoqTQHpHY$|%V&Y?Q(9n8*i=Q8uj#M;W|>FXxI z&d>Tn<3*a~U+WEJ&wj&*1T7mmpcWj?PK0{PE6mz^1`?B&dDl(iE+Zu3b7=)Ccmwy1 zzK#YT*FoUMpV2$EmPv12gpRio!O7J`^gc8Xbu0lA?ahQGJ`%y9`4}{c67l>vQ$c$* z^Y`Wzudw<5RgH#H$O(9wn{1qe1=ItI_=g-ZLmqMcgjhOX-{aF9mQe=P3{)(bMW>C1 z-DoPT91;Z?vU@mxVmbDZT?Sd>WAsTI0xq??*r|u4`6im5{pJ7@yDhh9ha#?H6QP-ydTQaDt z9(#bd>K%jZb=S~2!yYu9yRc>Lh1eecHq0khNdLrvLX^d2dNFTd76^yU8eeDw-)6#fQ9C+N=DiF~}{zwt$Z)YCR7L*?*e z>ZTWzOPNl*y!n60Tc2g{DCsrg?rmWi8akKs`Pnsj(0b6^uVPa3YTC86g5N*Zs9x{_ z%*e?XZu|@*Z(4{>ivuuXmJ8Nr&xXq2g;4aiKU(`-B$l%qu7%U zFN8Lj2yVYtv&4;QH2+^n-8;&k%yNaeXzG@bw^#eDNNz&sb=xfJRU8&T^}D@#SyLUF zXe(jW!1r+L)Cb6%ZzUL{q`_|L)*l#BL0;e9Qz*v-4{)DR0 zhfwuzBk!h=ieVK|Ff=ceJR5@$yIKiphkPn&YSuwNU!L|oIPRyj?86q= z_`^a7oSYAVZIp?8Y@{XaYBN!jnaldh1Wbve?%fTg+Ue;vW>9<_ z8gnBd-aQ1#h5%{RO~y`8{FJig{A8+ zL&c!m=r?=_M17_Vh|raG0Jhlpr4wq5&oXVFdH~rRs6Q^EJohdZdmcvFnD=VEQE_sm z(M)yJzWwO(&n)BL0dFeFT?Y_e2jeZML_GEwwd}0NeSK-5s1iTga9Bo^E zfjq~DsNMN1UlsKNDz+Mc(}n{~JLLwSaO?;<9j3Eby7vZ$U*smL5%}fNdc5zN1lvX$ zizb&Za8F{NR_gzQm94K)5j6+2llG|tpUncD$spcnosD)k+u-JqE`n;>c3Am=^2_#3 zEcS*8lxAE*$A8~LRctf{PU?rjmGvBFY=&0rFy6Ru2RNNKfKrnIXjB;w(^^ag`>~A> z9X}KwxHQxG(||83Xh6?b-?$3uKyqS=t7d5-_c*nOayv)SqwE-3U!nI_jYKH#TLMn= zB)Br>8meXx+jN7L@~VGAQK|!0Ey~4KFR7RvqUP%E62WcsAJ`y$2$JFRcxFH!u<4<}aIpns2e zkhreE{L3_V`n>>}o1~)Z=m4g8G)(Q}u?kz}7>PFf6NrC!o-O@9-+d4DZDafqs;HB5 z?+p9A&_t|19fhrfr=dFdA$oo|%`03xfhK$k%4`lYJhcE*zFxuX`^VwHr8*2X<`C4bty#a=nhbARrT1>?;`A|>CJ=IJ%b*{QZadNJvQ~eg1TR8c$w=r=yNz8ToPuoouO8u z*GF@qVD)Wc<@qpeMEzy5M)A~t86pD=2$$`RUO7Yw*8>{l!|eT7|^|ph)e5?+ZMjWq>f?;?mY|47g-61 zPg{t23N<>U|A8LW{jg}ycl7nwv4)N$P)E?2xbHzTZdXAwNqTwTP zaijawPI-C-%Ng@c?fd6Z$TCl%{8TaX7KgLBS>@;wWFf4aN_UrrqfjHNF>Grwt}1VU zj*udd>}uy`l@SnqXBAdVn~thperoMKCpNF(IgZ@_9@+~ zuv;CwWDcaH9D?^?@Qfj4IFs(cfDMFY6A~4 z-Q3P%rujYaoI%;2n?u2bXTaQ3?qL1=2FN-D2z72J=4?=|w$rX$zq5v71nnfVZ|$Lc zsB4b5K|ZFZ%!MHH_u$@2`Gx2G(dYN2SeC4ZNqrtb{tiPya_$m%ZhXb$=0(ufkpMDc z>@=js5w|UlyF8^kKuHcOJ8UUj`j_%>b!B*KB<)}=Z&UjT>hHAk zFLfQdN zp6Y%p=-IxG<`-5s@W7L2P%eJJX+Ml1eR~70{d@&e*Xg0jv~-55b_hrbMbB<0n01&l zD|39uIu;dTmE;yKna~LiY^b69Au-PrKjXBj(EO&^EgXFkmOf!jqx;tN?`al;U zeUK3bFL?paQ>)1ee}`o~%Yfh{ViU|a5wAQq6pyqn!R#D4lio7`=k=wKzV$Mf9K6bB_IL?1 z`@Fo${~@+O@efq|z65N)-$Rd3>V!=` zA$QFUgydZn{Q0;dSearho*I4x6CdWm{Kfa7;Z!`(0S^qwvmF6_pe2Vmx+l+IXmnSw z8`+Fit019fqvZ^J=A?VvjaEPrSXY^e36TU-3(dRTRMbZ2*8;)E`sM{hJnrbI4$CD$UbvRt>1t! zZ+#1_7Y;&E(?sy4{^|+JC~O*3!%h=#&85SU+Zk*}Yw;tie{_xqb+d=y--~hNFCwlbG2n6z3nzBfi5~NcVZl9DZJn zmY+KdOJ?qYq}5lkbjAS;9Y&GFTYDh*^8%icmj;f*Q!vMNl{)G7o0v$m&9b!p&@ZL4 z;9)cuG!t#1hbk5;hi-saLwAt1-%>06CShs7PDs;>#qw=NLaSvY#_vDNdR?F#-?Y1E z|JM&Xzezy%X({(eI|F@<+cEXaXso#81*$&3tJijxh@RGoV0Zf%*X*U=@5g~)|Kl}y zcpt>3lf=zSxrSa*Auu_|KjwP?9c~|pYnA>**7CffB%0o+0{&yBGI#h!Bdylb2M)lM`S_&S=sB^5{4Q~CB z*_lm(eLM8TiNhPvH}4|cm`rS^L4$eYW||#F??RWD$Efu>#adHNGRfFQxuZM1Bvw&B z^tGA~b@$64Xy#P(9hS&U4t2+fIrmuE*-Y}kyWuIWhDt`b`#7re1Z!7 z$q?T33N>H#LH+M_>L*4q?Q(Z@vR?^$)Ow+{UpdRkTEMiK%b;M)bXfa`7BWXlAY(Q0 z)53|J7F9<)Zx60qn<@7c>u^52MO9N@9Bp5Yr@}AdSTjp#|8q7ZCy(S|#CmBSdLEkf z?_rmXdDhd=(FUzF%x}P&%|lHMcizk0J@}OeAS9tRGI7XzIT>k;Cmxs z@h%J6HS9rOlOd28UJu!aipc}{52}7K1(m)v7#t6QEtKQbnSTQ1JTn%&qZs>bwh)s4 zddlpa{HPE8nQN+wSy9a>P^kWZZNhHwD^WrKJ_oyPbFuVs8>|^_AUHQ20<+dK42f4k z`JFZlpJghj_RmwtPg)HXiJ55SaDn#FGTu0_0{9rp?%74LQ1@DR`)4O%QgIa@7s%9FHy(ea-Xcr#=?pjFuDFlEy(1a#`LK zS_$!#i?Y7az;)FUwoG~o8$HQ~7%&m*t1hzp6Ny{hWhO{2uf?>Kjg|0bie5?0Y$=4W@2kDYAAoRVP-aj-+2zf@80oXoj;pQ&tRQj z5@pv-)qy|V1(`kdeV*=tIOQEI>3#~G&wWS#AH|SVSq|+UAs{_H3Mz?lJbFSM#`ZV> z_Fb;vj`Q}Az3^yGFHiEf7dm5XKS#{AJOznU@1cogB$Q=M$Bjcx#MoX8TwX~rwBRLp z(OkzMZ4wx?4#H_O9KpM05t|m=gZ3%6nDXRk@+>1um-PqjVjEUR=NY84rLFrcxPSEw zCbbgJJaGmadyx20HkP8d#R69R;Ti_yU5AwCvEY2ZKgRflLkTA@a=#3&TfP--N9x9MlT;Ocun7(K@)Krj9n9LUpUO)O5C}aY-|qNjLqHp zW7RD+x?Wq3iu=xL$-`SvY#2`2E#e)Rk@H4(5km932=zW!`HpB~lzB{jl%&Gfdo&vIW#Bn$i%?CcvlEbACtTzq^g~MRb(YaW0@2Na0k^F!a<007i zGJ2~%K#ty4W}}me1H$j(e776WK}?<=epeAyW@7ZH8|Zn-RETDtsLeS?T+wR0I*Y$=0=&V&~DutEF5{2d@u_j zyHAulE#w)d8y{jyIajyWdZXu>rJ%d_0E&m@g0`QruxxxhjNTrBj*=S?Fy{=!ZZ<~e zIj?c(Sqq`?!cDAN9*XS&Tfrn*2h09^hXxbJfYW<9w1tEbAN@UAJ56QUG`iEejDjZ3 zOOPE-gt-6v5r!XxxhWDnv%y?&*!l@fde4L?Eu!+m9wz%e0*iL}f}~IWsIXpTXfpT} zrVW0JdG*1d9R5~)#M&3FeJNK_vz-~N*#s}ANX600hfp#923YG4W~wnZxMr4>;JG4^ zEvq&Va<;6;=3P37im8ODy(OY|V+`K<;;5XBhWmfO!tN?9zUuJt|3N> z;^Q1lFVPdN`y6AL3j}CVU4*d%DF?XZ9{)vUBp$i>2Hiq@QMIiOnJStyKYPJ8 zy;+FaLt}XH^?C`--M$HfIlJH^t=2 zLPj&4-EgJ|bQFQVM}x`FDs zV<>Bj*Jk>6)RaXGYANt!Bb2>VG>`=P<7cKj1{03Ko@JL0bId7APAJZcTYCdBQus%(auC3h4q_@gnN= zInHV__JCwU2EV4D40OUi?sV=AG)iqjd-&<7_Ve)=I>T5@UVDJqNK8e=J6Fi*?SmDy z^e#>@hUR=XtlujGrF*S9_}CC=y&KLl58VVsRRn7PuFjPt07QNIfFD;_i1u=+FlF*n zX#R(odrdnqr)(RK4=Vua%8{r`%)-Fs89YR<0jF#@0TV74!*fj| zh`Fr%*fGdAltAAHfC?wktQie%PoG6Bo}wGvt>Bj)Wp zlsRm$q-?B&ddq(jx95TyUE=8Ze1ID~IslvA6908<4;V1>2~1gV9CUwL2-caz|5DQZ z{wFg*&tApC$}gfU`Rk~9%i}!#?E$7OI>EQz?IK2$R%89Csgx0%DOVnoqQkOtSnyX3 z3zS)ciR>C1nDqi+Lx0*a(9TP~oa?PU#GMmX!ryr`PuF>|zTLND^?hT(H?JOcl3%v+ z;xarEQ-eJyZ%|sI0GGI)OcTEZi$+evWfxC?-^~VasG`2<#m>UojdYG}DFuJG5VYRd zg!{WDk^^QoHh-T523-$g>rP}V zcT5iD?Pq_b&iP$+Eb-Xw{#p-}A75kg^x^!~bxXlN_XTFw(q1mi9+R!VgU<%aQjBu~ z+ZmBKEqV(^S=VC6$5XJV;50PepM;W^8Ka!KX;@<8L2QwqL}$x45D?f5`Y-O`-dbb9 z)nx8+_R|6+T_TRy*$~Rl=YYxaUXU!1yGXGG(k&uj?wa9vA|Zi%UVC_^V?7-B z_7r75$Mcnc#$trc7c5#a6N24e|BDfOFOV-_BM*7}9lb26kEma2aNlKY$P{tGW_@8UaqTn9mw|TVZA?Df&Vv;m zbT)D0mfKCm(F639O#;jk+M|`b7ZBq6I z>X{8hE!3j_W-Ur>&V$1w^2_~bgQ8)bglwLvo*9ydOUi>GxR^LYqxSLWeR){Yy9OS_ z)j^}1A35<-vF{Md)VOCdr_>uPV-|Jv9Jixe#cFI>ZzNV^M}T$n0bVrJ2t2$>Vd$r0 z(3DvP2A0Hj2)h8owqLy2gUN(>Z+1#N!4R@57lZpRAeQ)G2tNN8^Sby3t+%yvzsdxhFuM>m!B@#& zun_n{$~M*fDG#rHijflxMK90O)WJ%YCr_a{)xq3RNfP3X{oPrF4R#Q!T?(HHx(HL= zrhxXh@A5>4gIF|k394eWXl>#M^K>h4zzj8J9&W`$Hzi{8KtsXcLI-4q4g#l+r!?c1 zLVcbYD7)=ar#v)F^{0!_M0w!)!wD!qS_AsKo3Q2iWl#(_CRc3U zBR{gL5l>0UZ?cE;`jWZs&41ASXnQ+5 z((M(5ZkOXpK~K>03{$5zzQthAcP!N0KrnQZ3VsHi1l`bVj1OB2agpWd{UU)sUTPqe zEPsX54yED%T{39JLtwDB0sk6oEGP`3VCEruUw&*>L}mxIY>DeA5m0)7lb?~WMoP!FXuYiH5Xm}aj2orS=GjztuX3-D-#5o|y^OV|KED%pbUaNjyw2kceshkHZ$dPQsCIUx^>}mihk{4cf@P z-0i?sVzUI&KCy}S)le47=83#M{tezLEPrr;#QG3MgT=?|g)(=~DqoknL*E%bRC0j6JlSqj>%ZK&sQhS!1x)UTpl4LyVA*51LiErn26{s^@{ zwqePn?*KenEmV!%L*#PHmgK53C`q+ zcI}WHyOnwCH^cb?%4%DN%DWHn$BNI9XmbB1FF00D|6d92Bu}N&tY>(^t*fA%xP+JP z=p_0N@<0QXFJ`|w#8fzftH%_8-Mx5L<8ltyZpy$watOP+zG2D{6y^vd&h)PxsESQy zf&UqaD-Q;vZR;gC@{QiT)kiUN@O_N(Xu-n19RGVG5wh|xV!qEw?on?7GWS2#4HiWh z|1;(NyUY26E!nWs*;oiu2SaS89`&E|uy2YdD!aO~Si{ajIXNsVk4VMz+RdP5CFuRe z8TEtfVSuL!H@9>V{Ep7YQ^#*%cJ+Lo&3dB5a|x5~e1#3M4=_2BF_a&IGN)X0+b{!E zE55=un@hwi-;Gn=6EFP0WRSKtV~B4YM#Or6?9XT(m75PfPczZGs}1+;cNJsqRKql$ z2&tcE0Tp4vaki0QT`2OFD0gx6A#oPSF(akiN8f%ZIlU4TmpRO&cVW5o z2fBL{(>eMqG+yXHgN@WRv@fPvVKF+*yvlqAet-(fT05=MFeN#*dRRO|&un{WbGwDn z{j(s@+8mT@I=j{K2LG8ryUItzM(BB4?&r81!km5*o#;g20dcH<>on_zGxq67XHmk z>ZoV=ZwHS^{g2IRFcb4D`r)FL`hrW)Og_zME>>OcA}Y^ifrracw6;48o)P7c(wkh( z;j1vF;0C75C+=8TE_2zVV3~`qgYDvZsO?6K!#&9m)f@>y|G433hXOqH@(%UXXr|zB z9_yu!IDb+WD(m9pnrCA`GGk+|Cd`wyp1aQj&tBmwvnBlS!A|0m-KOBA9u3OEN4#(N zS~Sp?g7+$cJs9;3(svo60Wrc1de@^9ar|$M{Ei!rKEif4J)t5v39LtvSw_OUL%EngOHT}H8-ee>oW%JCs4@0-1hT`{uT?K8_Cg!^=k?xoW`L0}JLHY7MH~8Zdd0H;>cRi@<*mE;q zA07|7L>a{Ax8c~DUO0BraZr7)l-nIR2JQ2^2#T^pFl1#XG5Yt%FnJzji5hcxeJ14q z7ym#xjqJwF@Y}r?QyW4Nz9YK`I%G4&KD5^PwDp zodRv!+-c@BOkVK5jq5+M5}@Wdc*Av+Bn80Q(UFv=TE)tYBEae02mWQKg`g{pfaHC1 zA^SrTQ(QfURTGYa-yJvjyN7|WY(WENHmt_wEuDqrU^AAgzY!bq=Lzx%qYx^DH`g6~dvtbqSAv=TLmqA$OI27vtU4afxnXvL(6y^`;B&w`7@sO!u z@b;!uh&7y!wpVDbXjq7i`&J zt%VuOBOoXG8H8La2JNyKmhNE%f&HXlM{L>H!trP_{Rm`^G6dyAFPJWu2&FxbqoOrJ z?s@(kBo{r!6m|n2g_sBtUv%*ljdI~<=1Yb4fw>UW zzYA224MtNhniV{hikCetMdw4K@N>Db(A`;y)_vT$-s>Ie^moVUGrk@Do=l`3Tr_uh zx*xY6r#)-=YPBqPm+SOOD|{7p%pR?ntvv%j+iYM3YzqTf0OYo7Wwp zj~3v&gilaZ`!mMNiKXN)mfSOxk)GNQ2mISfFv^O;>?`-Mu%H7@ETvhHbGZBxX`&z&~Q7Vj_a|cxY%hf6qE$%&GD!5Hu4)Kp2uyMtEsLP~tzSx6De;+{nk6^a6 zj=twDRN*lWhdkP8mG?{|6lcDp>~-E<=J~=N6+_b1eutvL&;17uXrY<3k0JG5rtqv0k=P@K?nn=b3vZc= z?W?xq{o4unUxv9DFe?oL*Tu7n`BJd9J_$*?UO_75>uNC!Y+`G0w;&ZCbut%PXfEr! z@&ePI5Y^sxe=`4}t59)efO>_MzF_jBkXyfv!qM_qpgbQZmvr8W(yQ(mu$P#5UwZK# z)P>BtMO_uy1n^p}qpr(+X4V=8sYjo}%bd=FYPwGDxqllm(wulO`G++n3whLElo>cm zUEOD0gd%d<`N|Kn<|EzdetiS9?Yp?F(ign`v=lvw(-QE{ZU{PRjn-Gqi9z8HOZJDL zeN+@;-!v$Bd=%E6|D@*WN4IL}Th>h|@7W2#P~W>p}VLMOp`{cl|9yPNr4?n3_O z#jvRIKky7X2G-smc>D+nET3a0%Fh>LjV$fpRd$twe zyeNqwQFs@|Zau>7pEiPt zT|I7_)P{F$={+^x088J>A-_*JYD*l{nfdnM|FREpN&kUa#K=s3vVe~bKf`vbj0C61 zG&C?B0Bh?_#U(HGv73pRDBH4wd#rJQYbVXbwFP@1FO*|k-~acce8J33gj3TxV8`48 zfV)1T*VQOczW5hr&7%H(j+Kz|tsGPD3_`t`JLObh!~6?0*U91Iobh?D_5Gpt$#l>mobAFX16NZ>ODC`7Mx@d_w0vXQAas zJ$My4AJDu>my9D^rH+lCtvaGHxEW_ zE`EkzX1rob@@N|Gc}e^-Iq0@Zh0<|vP*&)Qn&*T0`n_g?>WGprNb4fj=x1Q4c{tQR z`2r?0tC@A#d!}gK&#f^9CyAuIQW@D;!Cq$Tc zu!co@vC1tAc6dbacE9y_Jc(FAW}CQV-t^q&jpyh+whkk<)9m8uAFynj235aX zV7)V(`xC2Fp?)|j`&otBv&fJ**GHgbZacWHevTdI(jjK!9`OCk1Y-JrLzj(rAaUDX ztRJ04-2g-8P+W@fog84(m1;O)cL=xspx@EppE0$1J6ukoeB;|xx&Ec2)c>a(wqFs; zo6-tSx)QFjn$7Ty2fpt56rQsOnC)*WfwVMpu$)cp7hCw?J*%Lh++zs2mu0$KFJTw`1fyqWpWllXDu}w+4%f4rr zOJXF}o{fRBBXW3=AQctfM#4C|tD}Jb>J6T^#!PeSC_hc*t@^}3 z*S{b?>q31{PlU~sJuG|sH_R)22R6}%;XOUy1{S@B&~!t==W75Sv0el%|2Dz2yXHdk z!)$WD{RhdP^C30!GRp1^l9&FEqjQgoF@OJm)45Tp9b|37+F>P;P|f?gDK;^DatJxh zI)`-_hu9oi8cBp4Mk0wMha@GLn)|v_5=kOS$tX!A8Hc2#^1HsjzxJPfJlNTp`+mQ# z>-Bm**+2Bpb=uCQ2XX)OcAVWvobBnA^tn(zECC=GML2(YDDK{1DC}>qg@tto!1KBk zBCYd5l9SC=>fb=-$;E8{rVBV&Ip`5m-foU3@^XX3!6f6VZP)vC-~q(2Mzc?|6v^}J&B6c$kVj&<%b5t`&nF@5I%@Y={>lJj#My*(8J>I*?#w3KCh zse_~0h61jkUWP!~ifLL_F^N9YIS1JLP!p~tLW`1atC)21IbK|42oaM#p?sBqZQXOB zSNVBj$H>9nH<9wpAJ~ZMu0mnI-H;GnkGRSllz%=WpU($aS-lxl&0j$Jm|RwuZm^SY z4Y{H&w0G}%6Bd$>O1XXj@AQJWD7WS8ZtgK+LhZuRH=7{#ksh2Y!kPU1Wm)X$9#~~_ z3=)I1cyTi2Lx+=_?89TU{rv}?IcrNj0kwV?azOh)<8j9la7clY4JKj&c5BChF9cd~udC(}{p)m^! z*>O~Fy~~?_eSzYQ#Ml0In2Gw|)7A|lu7r28EQ8K2v6C#hc6&>VG&c|)l^sXtUAG}| z*soAF)H~?UM)F{teA<5HR^O}n4VaR@`WObgI8nX0CFM^Un*-^Xn^vU!&pGWW=upapJ4JH z6;A^}xm-t_lNh#o?q!^8_z1kr=0jojQ5<4yMLDVWEak;2`gfW~UutkCS>d3a)IF4(U>%F?&yVc?B+sQ=c1OES;kG;JBkpZS2BOajLzi3KOkR}AZ8 zz=a-;!I^fn*H{}5W|g-gb*(AqR`3Y*>-Iuow~Z*j=fV81Wa6hy5jfCa#6_&nV-4MZ z;RaS+LT~L*jHLWTabyW7FXU?Vye;pR>BvhL&SN13$1vT`9h&L4bfh=U-u$<+{!dyk zH|929Iwu1>SDIpE^E6n#u>j&P7oh0P3)#50KQV=vN%a?Og@(MDnBFFZnw>YH=H5_H z&bq-@5!W)P{5Ygs{s1$t8FGs9a8Ol#W%5_OnZ$Jr1Qa)7;Kn@4kL*J~Ully>X)Vmw zC}DR$D?xR18Vg#i;fIz{2lL}9^t+S9yOUchd$g6%Q2GjUiC^NQEd{$T#O(DYzR$8f zG&jEn!{kT7Ub2hnMw@efY${0am1>nff5_s>LO>Fo!c6|FCP&d=jDC0+*A<@x>pxnd z_@0W+7$4E=%4M_-d<^SW9fF?gC?Ayc1*+DYbES1{DDs~Mx@kqc!6XA_9d9aV3dZp5 zFS0S9=N!-vnuz+42y9N;jVYspabRT}>Mjmt(J$kPFY1WpHaE~s?m-TQhncl|8_?+^ z`AL5AnD!wJR`z8WUsg(PAo7YHI|p00Wnlg)W13~Gt`1Tp)#1mda{x7r8?ACd(Y1@y{{2@r%HUo6uiZRHRd>~i0V6z$J zJG(AI`Q{xgG1LblyN{;4$s-INb^?`wIV|$yeGn%EqR#@6pqaamFDgi+3B+SA~T3q7ow}k57)X_a}xCx_By)@C+?!++veYa$bo&arf3To-6D4L z)Owh?UXR5Od*SS78_<5hLl)h|lvs@8A`&Ep zJ3!sTmq}ZuklS-LpIp<4!>p`1d7Lp*ImvnFoV&cLpF70<@)XMkeS^}G2H^KToR6LK zgB3?Dg6D~*T$0IRsG5@s@y7IA&)bI?+pW2(&~Vg`xB!x=$t<;JG(;P4^c?r*TgH09 z`>$e7;cyZCo?gJCkIaR@`BgACN5mEGu;J~0eT$fH4Qlrt;I?QUpLC6K4Lv5J^(Q}A zbJ|RZsBdL*!wwjhz7La}#MpBG9Q67@%rcXkeATM1Lf)3|ux`Xo>cJFexm8l%>G5OK zgkDFvdk+8Tc?e{`(xT{nFb@@!x!O&0w0_&6^UZ!@@;_o@p4xJ%SpgX8aSe@b$D)1e z04Tq_i#QKo*~)wu%-Oq#rFiMk|Jyiht3Ha#&ddDPfd?>{_ELUXebL#enI(=d#KTV- z(eCkS1Q$~fzbS`8>u4OEP5H=)hY^1rk7kk>7}p#F>znGJvojw8esn^ZJI&io9znp( zGPb#MDMs`p9?FRxtS#~$jBcfzm{~GTUi%GGX-2(0FAKfLctYahY*;bj1?5D0^YXU^ zvi64~pv7T01Z}kt0**Da@PkJvv)%yP$Ap6T`Ug--^R)R%0=Nz}7a|fb<0P>)=XYT$ z^`YsG{+pCJMLeT{XQF+zY8K(d_}SOcTmG5nhRXPp!z$YI{!c3(v_Hiy~zc= zdLYXmJQDnNBda*q4&s}}Lej{=V3=ns4D1&UVy!7h;#Eiy8o_>^76(_-4y9WuG}Q0K zocokNeM7U^fQ6JT8k!Ywp(~ePTL<>buJOKm02<^xbawqrJcvW=^Ix^#zRp5O$-d6! zZ~O$FqmDB6-h#9L#|uL>yU;WG9Bw_I1V05^&auaD*w*h2l;+SGlKAF@Z(1?r(kWiu zLi*FhbF6wLd8O{1g{p^9m>cHK2qAn+s9nPJ{i0XkyIL zp7W`r%p^Y;6N4+E=#Ui`c-lW8(B~$k^y)q6Q)*M2U(>6j% zcLP+cea$MG8$lH}mp}R4fRjBdL)&c{j0iZ6iqHgDcZcSsP6v1cc8J#>uwZ^O<9Xk8 zzhk;jAB?_X#;KpB|5d?~HN?Ts*-ZS;(-`P_8Rrfb3Gj3r#MhUj_pGUFPjeZ#_jy?O6FiO@DN4%A1gi1pdb zvip=kBYl??9s)02euZ}zKA_$>1zRM2Koq-H7W&m%C@g-*x?M65B3f(t*fFVKd;1&s z_V+~nrBf_n+)4Oljt!?=e;eQYPMw=SMuDIFE$;u_g454%;1l91;qK+VaQL^((% z+XtS&UUQCOM9yR=?VE|ZvV&;5tOR3by@rSa>Yi9X=k0Et!O?|cZh7!k+PO3{-6T)y z&X_aYV8!XZelXp}O-woW6Rh;N=31P*u(^Fd>elLIik1N^3<@9)9>C||d${SuG1NF( zvmJ}6w@|Z!pF89$uAcB7wj1-Ho8b+`Biv9OdIP+LSXBL7iN$_j@IkCtkY4Bt>Is)U z6fSaX==K0JY|cW;mo&aAx)K!)m)(KU zf1N|k($SFd`UT1sJ*0l;BVH5cj zgDHdOV!#cotVCS68VE0;_0*lkzQ2Znzk>ketXs z#hXj0+VPH8Bkg)l(T;S}JlGuckyp<`H1X_%Q(|m|fc#3#S6u{smXud5CpS}F9-p&r z05+~T3CjK@7?Rr^W4l(NC1v4_vYIg3*BVR4=HTk1Z!zWZJB%qw!tG!4DD#$%`?r6k zGkYqCi#DVFzZPEXatM6FDMR=v0Y<9Lg-sK}p{FVNL0)~doA~2%G10g8sNj&{dp*3q5yY zz^@|uT}a6HV2e&h_W(R;?*DKGF)HTq_1!Ina>7X$`u762IYVLfoOoEhtOJZxx1jWK zI;tl%X;r71v0>{(NNh3^oXbQ=<1P#v+6~HgS)nA?gBA25wrb#5;(0v7g7y~B6%Anv zoqD5GBggg;=b-)SVbHoYVy@W-rYwyk*XJv~aIXw>boMvvZNp{$woLX|WF^>EKSJm8 z9oRI6zN5V-^77B*i|c0&i5tlOQumayEB0`~>jI=t3q?`oJFUp-Uxfbav8TgQFxj;U zR{UWr_?^DT>bB+4?*ANazfQiZ@IL5twgc_A9YDJibY58BhwZmXhjBC0Xm_-RT*{fS zy=y8il{&&;-uI?Vx6aH{A5y>XbU%Rds#6Wl>Y{akFWE^)~7HDZh-Ps zJGdn{L5Qy{i&|EX_Ch7(JRfBJ`u=9yf zOV7x{C?~$+X){iE+y?Q#xIkQfGq{Zm1J?@=aPzMnDEF%1pH^E4l8=X3VfF8jlw&N->H}`K*oFEpub6&a9?N$HOrO09 zqTG4no8+PRcmgCox{0kf-$Ga7JJ;0D047aLG9mbO z0_0pL4?wjsq|7Cb-=;p`S=b-<|F{K~#7!yO*^0?_(b!oTg-I@p!8}{cg*DFwX?vl} zwX*{xj(d5N*?U0rXaU$6RbUdol(_2!OuCSENJ}Tkl5EaHls*kLJEPgqSTo^*$yE$0 z9{@>|zwkT}Nj`Ukm|5xIzh(@`lNetZx|&}!b|viUZ7D=8X(G=^yH>6~$rs%_gAMZr zf?j%?ciSKmKE2*cIlo}==ANTw_I|!D;w#wyMzgoJ5Qw#YftDr(xbu@Sr|1i8re!Hq zP7n!U%Tq9EfU%$}oT$~L9m+D9-vLF-Ut;gUtw#Z> z4&1`-o4!C|56a6$xq-B|TrEWtSE-37&)TS(l-5P8mj5 z_k*wG!!(OA7SNp-=s7BItv&-TO&SJOWT zCG%H-;>1_f*OA+P=M7XrGgcX9gInW4K5RrfxXOH?a7GN%?;Ss%zEwd;-$3!PiBL1NF^U>{u|A`IgfzCZ>-QRi8rYm1+8XHSh*#P z`hYagdASp$VrvMxDn{!hy5n`EW5j=**z8t}X8TW|^wdwS!@Cmftf&IT*g{r1su+CQ z?qlMgKllMVEx6F3zi^_%kF;gC-=a8hKkmHWmFs0qF3L66ptbfhFG^Vm0h|p+MVWJ& zPuI2T!VX#c=||YT>=x$Nea6J^UQq6K28xdxb15fZ@(u>ZLiZETK^|O-A@8iY<|!Fq z|MDH1mYEKsd*5Xz+e|p6afd8uYzc%!S~AwR3)jyhAGda|C)U{)=Ko?8WJ@kW_xuJ> z=Z5o{i#M~(@G2~D{0iFBZ*kr3ozQxU&TgyvfX~|7V1D{FY}UTQ6@GfyR!;qF@84MY zuaDsLSOd=QmNiO(9#c-W3#UxCqP={H%xOXa7G7D*+D20L&GR$~F3C7A5A~-VS2w;TgPs?`B>(v#(Yv zr8)bH7Z|d^gnDCgD0*=KlTz10#Hb2%{YyL8iX;ER zw3%(F@i?H36`g_u+pL7h=O>_N&vumfnzOlQ+hAvc1-IwY3ov{l<`U1{VTD$=Sp0Ef zJZ@cxO@DR;m2wrBg;{Xrk>f$RVFJW};RZRKxj5o8(f%ePh zTom;?7nVjq(qDA1QNQAY^=)uAR?J22^gx}N3qQ~|3Kf?|q2gu(@4sU=?J+XZ3-!Dt zqm10gbOyJH!T)o(fBcI&DEw9kIcm$-eF{bItnHZLPT$pW)WNn~$I@rbp*w&-#*Cxq z?6%1e(0Y!w=l@6d>5;sCqA7K(YP2qEYq4pg3cUTVz|`t>Yav(r&w{@k3EJ((Qn}DLXqHl`3t%Y*iSh^-z-1k|5<-j zp!{1IHdfrh&o#vu_W3xJ1)B-doCx$*g3kQn?0d9>Dbi|c zwCwc|Q=#=oAUZGj4F*pz5`uHq!06<+AicVniKE7%wMPoBy!;XyJ05|_;Q&mvIs*33 zK9O(S8KmLC=$p9*{nj$v!MBR&>a3{_} zm^Se_n#jZOMm^WlSv&xd6stYPn=AQt+31H;N4?0cHX>%xba_% z?3s#gX_n+;Tg-o~Gvtbml~}8NjK1#EaO>cmh^AvOz^9#A+flaB8o($$l3Z~~==8lE z@->Sg{J;$i9%n5y&Dx2vMdx{MH_D0C)A{-5PP~0kBvfsDfUdvx#6z!6gJ|(2^y^i~ zYW*lHJ2MaaCx(I-=Y}cDVqQ9{73Dzt+=IzjU;iFd>+dne|E$^MwP~<=ZUSmlZkS>6 zlzdsEn1ShG=H+;UUpYMiN6tG(tioh;cpeFUrW-IN=Qca@{w$h>oQ9bmUr{HmXC@Ao zh~57O(u53Io_`@G4RJ=vSYz%LMZU!4`O zpWaJz4Qa1i1}1%cvH9Q+)JGdZRon+yl6@aCW0v!=Pd1@=pn*`lW<6Zdm*wS+6Nt`LZicNxUt1j*f-inNKz6K|2GLeu0=fOAbEsPNKcz7fAPM z!IrVvsF+W@ChK_&*Y^bVvDZw2>sYVxXW+QS0(7%H25pmizs1f__4-BDDAQmB?GSVc69zx=8k8sKE!kl%0aI++5FirVqs*O0cXFt z2l4zPw4)u!&Af|x7aOvf&yh+vJ=#?8yQAd;>Qnfny!oh}xrem{Jiz42CiM0R$DkXR zVgBi4Oq@Rfb~Q_KbYs-?Z+TU1h}eT9JmH~;8I2-_V4Ue|u&riJ9E za46S?OeXh2z~2z(cnMNYjYh?b`=C6mX39xDWl3q{v2wH?>etqz`^D{`cpZsnLMi9x zrkDApec+|lAu@-yT2xN=V=9wWzNuRyxrKO-kPbWWo~-~^#Sc*4FJ-EN#cX_%h?{8@ z2bGC8u_&`0Bre-Ary!0cjc~xiS$!a7-XZpFnKh?$kCo+KX-4~nQRurh1+85cg2K&} zsUtH0f16G&v|3!U?kp(6yqP@wF%}h49_=piEV_(l65UKl_uGQS)+4dG@fakVwqdxw z7_w*3e(v%W3|RXeX47*(k!Q+6{yh(p!cf-4HKHml4c6)n1PA+P=oY}C)Ir6{xT|3O z^B^cE9MwvON2A#U%9S6^BL81HQy08o{iYLV^7Rk?))-4cIrSWhx_^d&)FligR_xbz zHiB1O8{ez;Gpd$nQZ9J_?>!+1L{&`dyZZn*On--0f0%GVPfP{f@~`|zbrIBvGr_ah zK~^1LC5XgJWrc^41#1^!wC!1NOP|DCJFWp_bP+VKy70=L4t(L0*GxY8kk+aAC?4?> z3!1br95Uz^uJ1kzRJ=FlOO1cBnI`d2*AxjY1^oeb27$WzS3bfg6gRcEK|t0c$dr6% z3%6VX-o#Mwx_q4XI4`^w%fAbuH7zW6VYo2!7CU)rEM ze+BJAGcf(c@#HTs@RGe#kVxhtiqzIxM?oCd~g3 z4~Kds(Vidzwp&;Uy3UK_ZE^?O{2FlD`Wgckk3ruNluL4CsF@q4Rk`GVed!-4wYrAw z)044_n=$7cwF%b)2?&aewN(3tMRj|$P3 z`XA}fra}CFS8z;-A*b7YILkFgg6h{Uyyk9CzE}|t>7$+yyTVLxKiPpHOD!=<+yF|+ zo~&_R>7dA%0fBR`L4*%=`}hp!Zafc(CI9iUn~147;~oqcBo>6%%(J7Qa^8rGo;N__y;N{qVklI7s{)hi01WR-Gds647?gPqO4asYxgi#P zqS8Sfu9fLJ=R%J}k)XDzVTsp>UyNp4?y8p%K>kU$g*h-p#hlKag8O8*@XW-q^#xI zc@TXd8T_AzqFVJ`D;*Iji)l9$7R(lL@;M9m9aoKoT9fPOJ3kwoj+R3};dxeg=Nsfi z{sDfKB`~7UR8VHF;&sD@Y7J(0=arc&c(?1*m_E>jFL&-jo(Fm_?iTTdW3{~Fc0(@1 zuL)I20-IZ{0?Pp=g8cP12ogPKk%2K#WR{5a?KWJ4mjcutU$wz18&HO8Wa-}yqpzD0 z3^YE0C0Ej*|LU`t*f|Z<6-iog7X|okIfcr_qgnboOX8*6CWjR5=o3tEdH>T;5^xz( zR)_F)HnexOz6wv>ei7`s6*!MFHqj%mLU~~xN^@TFa@h)xrUQ(648$kR%;X&xQwFST zFWy?(MX| z0wInTyyOA>xxZ?duTvWA8C->moE}Uu=uf8pa7L!i8b(~j1z4sM2?it7(B11Q`o;#p zFqxRseb3O=coQ#ciz9eH*?{s-iTt=DigaMTFzt3N|cmu5B=Y8$Vz8qDMa zOPKy-9P}zBF7iHW7V-OXI-hj#5gV>C`AK2}4`i6s^*reP&apF3`=Xan!AlLT+1)w& zsdL_mx|C5eiOUG`H9v%eW2xx9eF6k^aR8?=#HRW534@0dr$ugq0khk%eB~3!TUHNA z114jm=Rp{0U4Tm-?}oXLD!{SDmP>pi1If#?X!0h8{H*1?`n)L*pMSc!G@ z9&kQ^JSsjF=(nhi^{pZIRd;uEYwX4Jt9miz^UZFmF6n&L4r{?{u?{nOk|Xkqsc_de z9)7O266|Y=`Cw5QSXLfDbaBA;r=QU`o}5yvt-0L|Mncpw9vkU*RdkbjH#Ziu(9=z* zKR6!h=0t$}hb@zm|17=SAH!mQf;=@!3sxI2>dhli7(M45m%PD1-_f{7`~&V5JVC#$ zO^^@6A@iS&pd1_|i|g|Ryq;A-%CaR;LGPc!D|12PE9T|5nqf0Jkp1^egP`0Z*4@j3 zbF$Im$}UA94P1f!EjzH~MJX?LpQ3F)atEC)@3Z;y%jwllhPk>Ff!6&lS}%SHi4T5d zJ$rpaapD0+u2htqrdj_-V@`9-j|Dv&fkC^E!`ro5n6bR8;9ne19))2T-t!hVUb+W% z!gvgbe~zUe=R$1!DsqXsz;EQ#On*EJG^eh)SIFL=x_S=ZIFQ^G7rqi#x)>Xer(+V` zTS9&e;NON43sqc(lm9J&ng6{;$I0Ys)>fd`OX{6$?7#%dl5JOuxa=6>O*JgXlq;ny z0!~7u({5;2)=dAQxUG)GaTjYr{a2gE(mEqze%Sb^6x~v0L(}_eVuw#9cjhxtQq0_7)*-mr|9_ooh3+)ym`?|{0;#N+Us&u7%Ug^+3~%ip#PH;cA`zKbm! z@%e?*>@9=2p(=9!C9#CI*HGK{EVDr{k5uhJwSX zT^RNHFum`t%cPSQ@_rVvxcUO^f=jC)C~E*lXOlzkuZtiaJ_5G?&r%Q#dCTv%u;50g zMG;5T0CY!*vpD<<`%IjJ`49JD&7TF78w!MsiY${ zzUvSez&9+o2J;%JpYrAj`}`~wvl~_DcA0YVUy`9Ehy1%sV^Bh@sFA&jFyQxO-sCr8 zclqZdM$Seh?GX${Me%d3-h%i1ICS0gI|R3k1%oT*n5m&N+S~+~?1Nym?Fy*>Eb)+* zWO>ZL@D+<)o1q)|n4~}YX$zZfJ*nthXv%u-?afot#|_1AS7W%3+Uh3160%BFt@bZ zyz_vA=x~f&N2glgi=&8h9(RQ8KM(_A%5W5)sRfPYU^I{XfK4SC7-ynJouUs*vZtNX zFybV{n(|@Cwo=!1(*M7wC_b1%gY69T@wf?Le<#4(OBqn}&=JKncxfW9uqfbf7P{{` zsIusp^Zggz_p%Pp+_Vz>cjtj~RW3Abqx*Zy8Pq8AwDxU#sLQh!dYNlLwPHH2+i5|a zE&3G}5}gLuzlpf*=IKxef5FR7iQxD53W)udO&ozgP`Bv- zN=8wB=Z~>q@3j*8q*8CvPb7pV8FA_-AK5xTF&C}z0qwX4lsmWNbnEppySfrkoBxZJ zlZZQPGz^`5o3L|o9okTDQe<6$xqq#~s9R@I?>-sN1a-shDqGH6)q$SfyJO51dY`YJ z$};YTV9)6Jpcotk%^R(RIM*L|?3t;cHlUmMQ|c=GI+Ty?RmWDC*$B~_$Kc4rPf+r2 z7n&LV3j_boL``uw=H`&ixAw1Q{rBI3$+L>l_eUj44=k3c`)vi$p3zyWN2g-q~>STpjwxLo9XYYw7A4{^e!J(K>7I1eRy%90jG%c z1$n^^XeoZi$8HnAZK@BdKhbX1=?O;e48s7EFXWrs1$BF$S+ zPr>@9^k?}Vk-4Q=Gu_V5TIW+InKGUDnp_UdpF=y;`|a+EMR&0yLoAg45{8W*-=KHx zY%KnJFFL+Cj_HHy@g+*ox$HG~7ZsDkb{LMUKT7+VHa<4F3?d73m|%Yn-46IcRHl`X zF(eNh*AIg*uO2A2-h}xz9Bk^_3h>lGkS(I;^yI$0bk|vJ!?w}jW;X}xw4-oq;u*ZD zHW8#-j>{x5CNwW^%F4eqk$mydxOw_k=<&O)kf3}I4ZY1d_oIv6 z4^N20>iBE`xV^qiY_RX{(k&}J+D|UWpg;rAH->=pb8kNFF)^#$cVXs{8m+w7Zl*GN zhC#s+6cckz??$$T9%GpGnLzlrm!Tqsg8s^b1Qa-T< zuS#6TJ1!dr{U1Dnqm;o7XO!6}nZ;7dKJb#qQ(5tYR^Xmfe{8h~8%}y**v)ECJ$Z$N z`+LErOS>_uj~)}+Z(^0B6=%Jp6#Dj6L;ThtP(Av_*DyIGu6IDq?SryJ2TvSU7K(x0 zOt7WPY+UYi4eh6V=ld)-64n@-aLfCJK)}8zzUdNib!UA8iSuGCZ_5QodjA*xb`kv- zog|<0PuM!Q7~l3gjPezec*WG$01^vyT;zeGD<4?IjbP|!z+>r)96tTn322C%2hBNe zVfGhua*CXRv0%id4f_Pn#@8V5Tn6gyZsfL8zB~;)BO10K2V1#~tS(v$h_$o;9SI&{TGu?jKHFc% z{a0Ur$b79V^4%>+>;3`V_C)gX=>>dWV-@O-Pt?Zm%!lZj?huvr1=?Ss;D5u%6C3_Eop@XfE9QK}u z19>=Z0LGji*YZ8I^v+hi z1tS;oNxWZ28IGs0W4JkSCbOu&xe*FCBtzQeOiU@HGvD|BP!~Dbqu0wSbRD0HGlxF` zKbv2%_~s4l`*k0dgCY6Y&tT8U2HfK#7CJ-Ypx?R&7+9@EuZ`sWx#JG)>nK~=LY<>R z+H-b~l}Q)X^N9;@vMN&z6z9!{<94qgvAuyMZZhBl_y&~5^+(N?H0>f@0X^iDqxx&D zN6|l8cv#s*h;`h{a)ULzIOiZA7`_MsWE?EvPU7fq-ykAFifXnR7P;9KU@t=R{f#pc|?Zsyp_^eDo{ zJCr+b&Wy^d#Ww$U*mCGLw%%!n49eE`-fSRjr@3`NHub40D^UL1MV8Y>Iql!(Cka8YI*)Z0=$%h`a=?w5Sg&96S)xHETiQe;UM}?_#XGHMAsnkymrMMt64* zB+eGI@)H*@W!OI6Zue|V85hf|+oH7I&L*7qM25K3{Hk`Azgl`CO$IR^4+J^mw;Fs40YOlS=t!+0@wBl9PXvfpoO1sIk zG(Wd1wt?LEMqV=MA+srsB0k9-Z6Wo$A{PutiT5a8oUjbzCr^UHJa4RfWx>gk_Tb2@ zo2dUa7wiY^1t-e2Z(p1bTdhxGUB5;)GWaX?8V!Wm3F{EY?ZoKEM$mYO_APfVL$0id zcD>)g?bbc0$y)&qB_B}s_z_Bjx}$G5V#t2ZkPW8ysOY!vGT3L1s^SOWyZStgUaiBr zTW6T`E$vcAQD(#DE6OJaXf>zIv`JMohfX-nCuGHeQd1y<$zp668-RTe8VUI&TOjD} z9X{uGQ9jR8OP5~it4-cq0d~G!P+$C& zwR|)Z#NvED$nQQBJ6LhGYpJhLB4Ki`yL`;3Q#kl!S5EPx8e&5)aM@?9#f5N0Ih&y~U9u!N*VTGmz68#ly>v2OaaQ$wqIlTh| zeQK~Qi9FXW<7Ii?`4Fq3dHp#Z%z9?P1&F6nUveFY#{{EgUJ^E*Gva*83JBboq>&0@jiKRRD!YH^eHG-_(jduYPXvU1U6 zz=0un=6e{9nL~Y&YnLEy+8bD1pr`Yb5`)URL1NlbK9G9ZxvlMd`rbbw)3-^Ru=gz5 z$6ObzWmH9WyJR?gpvj2SUQqtd5T(i)F2XOX2pQpxBckkSOe1r z=Az%vXcj$aC%SL4735K^OgE!lYy7w?S5Xp-iq-^X`%fi?4lRUKFwA20{V(F={f1_dSGjk4vYHCWWu$ z-}lKj{UV^4pGEA>3d*hc5{##f`ki}G)R;$XRXfy043Sm*{0~ZD1$Zy=rM#Op-ssbX zYnbVR2AA#lyWK;v^jaFzE}>bA*CXQb4MI!#H!zZwK-t424C{9pK7BIb+%CBRR1Zd1 z>c-nW8x0U;i&ZrlkT`;RVwaDAX4f>{$ejFb^*u1I`2uL(_+wk`7aTi+93tO~`Oi&- zP(|GE_=my74JrVg!vLA=q88((Y0>u!%|g4q!60%CC@s5#U6>CB9h^*a)!`^^xyIY~ zDPv*RA45`)&G2W*S6IL89{Bl<1I6Em=nmk(i;HaVOui;nfUN)7jFChJFvJ_EG!{z zs_kOp)(pIig)_!r#JS_tL9?bk%?nv!`A*REchqJ)xe31F$&lN5ohef1pnCQcS;W45 zxUiVsD>H|JsK;Ab=A~wtd@lVCl6pYC9qkJK$%FW64{$#;4;8XdjIF3*j;9#tmsVj) zX)p#<(9Yu2C+aMev8L%p;4b>i!n!TNQA@~~`9%++)%vWssCy9hPdNImt7lge&4to^ zH$W^;VTBQ~EPTi<+^_hEO_5!Nc9&LgB_Dz_vqAfbMqKUyIw@}Y1d`k?yl!e4v|SYo zF+-zZwCXTrbc&(2`UWbED0laMF4K*Ah?+ICJ(euXhhcCLrfn+46y5+zzg}hjlgK^r zjb{F-@u1g#Vu=kkEOx>U*zDHKi!9$~c~S>E#h?$m-7jGdA3vdHt+CcEX8=e}zvHq0 z7)Wx@CU$KOkCH2p^q8FT6>a2%yvj$OdjOBt*$4)j6Eek@7c9W-2rM~oEqt~RFz4Ye zEZTM$hpjRdt`MhUbN97S*`*V`(krlE>_;fHT@0SFy%D?(gp>uwg8%h zrhp&L{E;HwpUy5-M{I?b8=e?@KbjBNf0U`l&Sx(EFEGCOGbE1u1##s* zbg*az1JORM{@W8?H{Dg6ViO3SuTO$*Tt4+z^tk!jJZ#@Q1R(e@miRsH#XdkF6=G7=C^g5&gI5c^*$3m6f`aQJFsE|!3Q=tY=SW+cp< zNPWXSHk@wO4!n5d6Y2)U@Mjc#Q2u*}3cR}QC~>R~ya&n$z%+MEnEKC+kg%)}6~moz zbWR7xn=U|o)fh-I`wI*2oFEp+d0tVJ%U(XX1g^yFJao@oaOr5lM~_UnvNgm7>!8f& z!MZHx;FD;5Fb>?#Bw!v-pYP6Ys1qd)k6JB45p0@KE}&@=uO zn~-B71Srlirwb3@?Tg!xlH>ut+b9<`Vh^A8{xxMkd+>`sjmF}oCR_n$%murC0PjQ# zA@R-z%8`xcmz14F@jeF9FUB0+@kQZ*5vTMRB$IY&fhE@WV5H(D22Hkw#6Miw63;`} z_xJ(S9kb6$>_eZ6Z==?CQwm0;=CRW9NdC}5Gr`wy5gvwYa6S>mszx-y=aLuTvJT> zy^T3qnsS>xYgwe@9Nhj_3j~``4&;9pGTnr?+Mg+uv!rKIzaF=sbzM7N2uc9^Kav1s z{*+5xz}GzvLf5BbF~npb4prI^vv4=L7IiqegZL>~XTkeo2jB|CjG4DVJ@JZ6nSX0Hwuw@stc8~|!@GGBKz8!V7A7!hynsPAZGFlfZF?xq3%y%n6uPv`p zGvzX`bgz^xu?fKlA0yD6uHci5jG!{J2{s?xOu5ElKJ3EHRHeD&-`E(u}`Zoscmd&Mo$Lg$<`U9X};K6(D zBBsRJUhw(o0|@Lk0`v4LGzc-1sqMRHH@{nrQdui6wO@m3gD!04`D$=|Pz4dgT3Kvz z4okFr#;!&kHqfRmXP&uDo-$0xjb1o(RI4>W0T-)Gg0r5DPIHML^l0zgk zAAW?oJF&b$xh+(A8gomO;XRDs`~VVzFY)1L!$I2j z7Sp_=8SbQP@-S2~(f{Mv}1|?aNl%$aS&iD5RpvC7i&vVZE{d%3Ox&}S{Q7h({ z$|@9DM%im@?(`kE%{d2iEViRfA0y%RxK3j0pOaBb-G?{drJPnFb&I{uaD{yszgl1{ zNajA^IS~z9cXk@eXA^HZVGeg(OJ3i>_UPSxH}vtf6o$TPg5bB7qHg15%;>Md+xtvK z<8WdlT|WROo@T;^;;Z%wj#DRR0!uosDC)WBafih?(oI@Yo@g!_yfIZp6m33QLF7y%)iEQXWJNrJ3UBo6z3808V>{ zVrIP=CS?4AT9bNmhwVbm>cKps(;4vk!&Ho1`vu3I`ij}_MEtVAOwe@IQ598PM|GDm zyk+ZSklI|LZpkNUU+`s1M9qykfwD)iRd1NlZvp!n@O46GQ0h5e3!Jfcih zTU5?RH&L#A*d&#f-Vs&UuFjy{=_n{S%)$tIhnNgkW3i`>=qkT~4My2e`zV$f=_hc- zYm*%LiA&6OZx2*lT7u(zj$p=LM?hZG3vApRu{A821t?rqnt>tCRx@2$jY~KLrFlYE z?H_aR=ExvE>!S>_IK!af|z8TUSVd~bzVnbpVK%`DD5pm>tBZA{E_vb zaoDB`D5X5Im-FuE4^M-yr|Pd9W9IK-AXhm{+BXJLP&}{4OuZ|5OLj znmSPDJ2I&b&A?{g;%(=vAtuK_m~{0Srj^cv)N9`d`ods!BI ze<2?HX(%L}Q*+;);TX8dQ1qI9h0V5`i&M%?1&5p)EM%mq;J9M~DxQdJ`AG{=ah!?= zvwNW%n}}JKmg3G&XVAN+hH`K+NWV#EU60Q&x~Uej>?dIJ$@iE|_isxDeQ!|~=%i#pExMU>2MJDe7|2HaLbAx6Fi1)jx^h-OP2z9>V!O$yFb% zC$8yYE-vsV&i$~xVE@~nSTbw^)_?tlICnevZ7tz&H4Kby9|pzw#khB-xo8_RjCvMJ zxU}m*mCcbe=yx-Mm%cAVo1;6icwiSSZ7PE3)oEB)C&${Oe%Lml3cFjRkrQYVfbQQY zcbEZD{gyz@rFL*F4~3{%h|&oHS7aOz7a*}oI*Z8GupK}*q%vd*%1N2pJ949lOK zM%{Me_WjkJ*O}jda=RoP{pcuV=%AgEjq_iC*OlSUpt!HZs)R3(U4!Vns((M;pjkp;k54&>ih?R zTlXrAHjjqr(<%)3;}2f!`vMf(xaxad3zpp4L^Jbnw&_d<&$1nXieClb^!Ogj?PDk= zjV?#^LgpTMo`-T@y-bCK@G=zae=4Knsv=w6K8+TGnw26y+rUU%V3iR zQD4rfAJ}>q^0X71d8SD$IaXbAh=+yP zVwAx(_V;vCF=Dcr(0%$X%4O{0UeB7L`$_6OYzg4~@6yli+d)2jfxf6kSwokWML1d= z4(Z3qG0GQ$GW!BEDLjJy`7#{3J|D)m`a;H4%3pub@1X9XxmbS3 zLJTORUR8s!kRCjg_|XDt-aS_-jZB%k&KD%wmmq2daX-sZ;&xV#c0hyS9pO#rK! z8IH$Kq(kj~U$8JshlgFl!RKffzV7)QG#Zx2q<_*3`RNa+ym66SSy7;nJZGLKh-Wpe zowC~-c>KCB%$cy2@>e-L^!_+ja{n8*)*@!akZ@v$SYz(EHi)*1C2!dGu?KINh=CR4 znMu6OT|dV{t6wCO?te$j$RsEp{{;I}zHl}%?h3zogMZ)0n3h|D>O?)zjM8AK8|9?k zouGBu0B9Jc;;yp$kU2aJ_Fw;ondTRXWwad|caU2rI+jb`T;|rjKhhn&1B2InfszR$ zu=Y(q7&;{$4T?WQ@{v@0x{-(ghOk~6JGmaDnvV-gMeFAD*3J&ZhLTIyS9?S7 z3QttZIOH8T%l)3TQQq^TbHZP%P`NNqWpkl75r*tVHK3YaSc~X7886Mq<-MML%boW%R&!r#frLbpmeo{ z|M9FFK3)a6#XGP{?+fvpZa{#(r>yXwH1vC`51CJiy>eqIac92b7PC04+5G^egTfGy zm}8H7p^=t8nsM5b@*{lcvKmaJ-?{#bLyaF6DWnlTX4*LG-jCGrS;40Y(eDS3k3!^teKQCP|?P^!(Tm1+6*i2^HHl0Pk znQ<(*zZ>lT`3o|o_ju(#gk^D2_;Z`F82ES%_`R=#W0TB9$qvqIs=vXi1?18UxQbc} zESQsZ2BaMff_&iRmB@NqoVSw%xTOe7FMttisK@o;9mowkUD$H z-Gf!3RyQCyH5HWqq%w!ym8^#Yy&L{A5Vu5!px?ZKSTp|+?GNlAUs1{jyfhWY`^2y` zl_UFn*FfysneNG%KX^>sVrXC9j4{hcLf*=K+-B8jNG~+TTCe4pI{F)?eSg9##&i&}(;{$MXZ8i;0h&*IL*@#pG)hk zhTfCnFCL^#{)F#IpsY@TI`4Ss|K}yF4Gc#=|4QaIeF{dbItdQO?$in0$0gvv9Q!`P zwflF|-lsE+WnZ|NnJtW8m4M1iZV=Ovgu0h4!0qD;%-fa1-PT+Hd6R^XeYp``23!Ei z!r@$3R{>QOkyu%pNZhhJAPr4YIo$Tb!dMIR`bl>&%ULR`AreR!VIkPxnU0xrcY!iE znjQa}91Iy2g5|rf*#2Z5CPrPw{T44NWBUY$#2O1hPbGrWrg(Pzv8h)2R6#3js+PEWq}tO0o5(Drm|%&?o-FTTLP;eiXoq z_1~d+382S-OT^k*iv^!csXK2@pO>$BX8bTnA`rgP^RDU!(-k#?&+(Xk1sJVPgZ7{l zkOXm>A*EotFLh;8vSi+4KcOOenJjDmJIvl5hnnzB&Zj9aqIKtlsybr`O0V^i1)M$$ zb=g&zvNRJFC7)65_lBnlzwvRBC@5ADo6xe9AF@|~df!%ds7Oa#Y+x?-IeQFkryOJz zvuHlJR!>#l;RR+(TQL4t5lG(OL$`>@FgiO2CmysEe42Z)lG<;q@7QfnGU+>p)=sBc zd#y_I-I{!0&eW-RhHsm1Lb$%E7@Xq>1w9MF=9D4&U+RRT%P&KI+;s2>=?RPa5WBVh ziE6`y^XPM>8xPnpSEdkpp+S$&=+I&crAs;qu3MW?+vh#ng+<`Sd7VVNHBsmmsl<-u z56F#v411+Mhp-~bkkb26_WO0zInYUv_H}ZW`jo3wQ{zx}q8c7qwxV*oJ)f@^OMCIP zvMuX0D801fTK^kwv14Z$+*s2|$o%~(ZkW~r4psZOB6kogFPww&pCdu@tAMq8xnWj# zBxHFcqQ%NnxNP@REU-C@r9M#*Y!wRbhjYOFVO(QK`>;u)x&JR;u)S%4QPe?9`=Deciep$hGZOurZL#9RGsygn zoN19UY_h+hsO%j9bCzv|J{M0w;p1JDhc*>^9lC%f_0RCk&w9-M(@bz^9RaQ*Q=z5$ z2V6ayjzAbZ51G|+9C^y*B4!O%q2d*IkOYv zG3`SZcw6qqCf`gAT!uZ&VbV$5U|-&W@a=Pw!KNh(`%nV zBJG8;mS+-geJY&`8advoC7IWJj2d?siz829ZI`{gd&ezQZ@$7Jx~0SX+0EGc-W8<2H#)!6*YNER1BTw>~oxUVxAvSw{ks(ZX3o^8xO+q?v`SVo|O7=LLBCAinN*Prh(eHIFw(eF_kJ64Ua+r*Y{6n(9fi<{(rm3J&-;y;iGsOJ9 z-^qpQh{b(^aOf+_cqMs2pPBUT^lRnvwPRS)m?X$iN)e|; zXWjv@?ra&I;RfL%$vb?M@evB>=VbK9Q)X}46|=uyK*@q^^4SryqxK-Sw0(l>ziEr} z-9phwVLSWsf*j(tk4aYJ84v!*qq&2X)Dxr7v^J>A{En zM|(`?lMtG4l_wo8VzK+fA;dkEr@3EaFfkF$UR{MmNg)=jjfdb0y1SoFLYJvQIa@T_fy``KG>?dh}t;xOCYzw-YERgTU$7AUsq5(-@sBA8#{g}pX_*P2|GS^Wy7(YAP7$6P2$sRM_&JaRj?;^-aupeTICYszn9?dd8g zEk2J*pEudB8Ewgjv(LhBPZ)^7FZjUp-0Ea) zxW@e+9Hw1cc8((SU(kH|$ihm^g{VpQ`RJ8ppmul)p`H`DO~_r89G3F{U+O~39nsff z2lUZC0@7doW&g?PPLoY6--R=I!|F?@|6c{9Ny|a=?`sSgwG>>-o}tvm+qwP2a8!g_ zf|i~Qw-?wdY zv+peJ##`L`$+5`nkfV`a?@vXk8XQ$QWtu@ zuGfH5On;Ue^$llFnS-8P4uk!sap-o*8`2{Wp!A{v?zk=yZKL(U|C2cc-61Z{vAYBc zyN@}?npl62TOg&yXYDePDX#3jw&~_CR+rZe8fJFl)n7hh#S(JK*+!w-=^#s529UP< z8`d_-DQjZl9RIF6Dz_D(*HtBWOpSwpoj0y2x85c`$G@m}N1LNyUu^pN0vq)9vBt4G z;PZVwadh$#C~Tpb&$z>o*sUJ7eAE^*2Y%w3qXkTn&;!QLi@~ZXoxpv;VaOT201SM! zL`B!HvVi@C%)@CHrk|US{avfbx8?`UmxHPIrvimX{xwNJJzID23;6H83G&^4l3zfd zpP>6i{ZwNibnYh>NLf{@t<8|$I1x9!UJbQ3k2Bx&QV4kv%U7{;uql_$_al1qx6_MI zI;}tCE)0Xl3CXx{8}-oFFU5H0aga>z%&aG?VcV)37*n!|xy8V-_w8+gORDO_=~k*R0=faW2oQ1yo~NXyz)ZXUDIE3Xl1p0yAoP@hRc zYS3h>vEY;(#xt)^1EX=FY-i;S9M^sxq&t_WGCyRov|dvncM|o-d#q-C?)QKsi|5Su z?jf9Y>nXlHdkmsdt1X1qw*aoM>%rmJ9QwbQ1KyNTD*pTu z^C^dYju-|z$M3~o4~>L{dIsS5UoU9*JC)nbJBaC{0qe?J0rwGuacv9_oKC%N_X`+s z`z=@Bt$^es=@8(#jA{S*18RG&B`%N-Y%2W8qFj3ZuUC!bFAk9pG6Y68pMzBO9W?uA z6BZ8A5){WBd2%deR}^KSn7a>*9^4~uC7oLzyuc)4xX1@M@zbToLa)xvaDZ~tj-GGe zwy&9xd6PO(6WUl|$uP7(d;pz}b>_bN3n+&%2G6V^H%r_k%CrqfN7q)+kPoh85_!$0 zlwxhdQi$67FFN(;h80x}&_200rW|Y|eBxq^pe3HBd#!U#j1t=a3CHyPw_w_3BhjI= z6sH?pgwx?SKy^R_fA`ObD@??Et>^6VJwqYn{#lF+r}KAoyevpt2R*i4qQ1f_?)Xkm z%rYU?!C*ZAw*i=DGY3n4QLdr;Zq~Vnxu|$E3P;{Qk5+$KU?AlmvL~7gry>o+O1TCr z^DTvK2Va8!2UD@_A>DoJEE`b=EM`~6N`!RI}}YVmI&f7P359OmF`#@T4)eDV?TwFt;_YAH zaU&k%mlBs|lnvBN$-y!5Uz*DmvCJ#RVz0AzF{-B>NZQVUJpTeqYJSgczM2TqiG%TC zzc5&qm5e=~S_*4NQjdP21Z!<}^65n?tX~j`$KUlwyT5PXs61_(kNfn21-GY0dyOO$@ z--EdQmnLkQSOzWuOY!vVaP+)(5?$mkxRs*^%Xs{WIJWfu`-u=zYlm)n6UbZphRHv? zm$ghk2KnFoVR#7bao1NcKhHI=^}kL+=z%OI9p1o=(!ao>CzB8d4g?rE12esEvBI#2 zsN7(pGHNX4J{uFTWKcCK66#fHbH4G`;Bzc&b1OEyyAJWr`Ox=;KPLT~1NnRMxY5Qj z*nWWW4`$UYx%4tLS<|_;aVYkqJEZ(vh{~hRNNC@292B{FvMjort<};M?Gng^6Va7+ zLl#0-gSIfb^cF^bmI&qXQy}KTN^sn{6m9#h0iWaw%<+7Uq5VCWaOmGxFzPxv=DY5}=*4HDPrELJZe7aKk2GWX<%5tlWC=9* zXwjK&4NAvG$fR8#a_Prm%+u5iO5P^3{N1xLZGS0Oo33XYf+8U;@+)iV-AFUpSj=q7 zXZLc#q0ur1TmQPwLVs3**NVlM)`{33!~aCb?qL{!F*0fEOFm@2wy@G(Ul{s$KUn$E zE^WjQbffc4%;tU|KX_MW6dr}OGvYxai~;qyXXte*oJ&>~qgBZ?*15Z(sC#uUL=86= zN)Kd0XxE3t?lWSFb(6uOg8H|M?(yKzHV7VOC?;*b4yy)GcG%(>=G~x=U1y$lgQ+uKlOq|m0F@JxWrZ=5(cUB+XXkVUqwvYlD~LR0vDtY2p_%C5OzvSN zgQ%O+V74cD4~{&7@|JUu)UOL_CHG{_J?qgap$GFPUXzhO@xh*lgVPs%v~u0VtbE@w zhw*pFmAafItxW)hbrjAv=uG*lULa{4!Wu@r#MxR$q0131QJT~RvZ_7c-G{Hx{B;WT zv)+S`&Su^!k%(0v;$ZT6OTp|vaw`7se)8LFXxQh)>Sbnvbp&x~SJZQT8h4<~!+3R4E?2niG3KuO6)jQ9}; z4^B$NzE@~p-cIwcj@Q?cds5fn^%P=xon}EZ%tTG_C6&z`K%ZN-SX6QwT(*DZY40Av z%s~dC{*edZIJX!a&OhLGUX{d{%RoseF@!a`qS?Ye)VDDM`>b@}75` zeu+T=u`Hx67QzOXL2IcNb+44NYSm|mIXMC3<80W5leadet4TQ0J^H`L4hQ|!shRPNbnomx{F75O+CN!uXeQN{luzV;nc*v3;9R{Dp z(d5k7g#ll+x%W0=zLwm9*;6Ls&a+vdOdQLd25Lhw^+KeF?YPFFg^zpr0GgM3Kur8R zKx-qE#1*JYWOQFU=*n7#Scs#kb5WYm3NE)Ev4XjY(6D|zc$CnLhB97>mbyY-V=o+e z?>v?_nSuODG1iE*i~A=R6<4m}iz_#v;n!XEtOJT^LXanPqb0=z+ye(^qz^se|G8!9+wKi`hT5) z&gUR!nwfA#pSm9x(`6}pW?|T7dgeYHgPOu@XkR=A!g4pj?V3jLHn|PamBYhl9_V*!>Fq8cAiW`8Er?J*EbzZ(kGTgQUtTdXQ*v z4r11z{aEh(Vkm4a}0Ie@2RQKt{ znkz>@&8c&kIN&>kCZ1!OfF)2ZT?~B$L$PjN2gn}};*S!ZK~!QHD7)O_o%Adz=Ut}q zv9=KZ_{%_$d49r@`2~D!Ob8n28jB8Dk)Yga3;DEXlAqhlg2y}Hg?*hw$LsD8_SXcI z#*b(5$1Tw1R2}4pmJ|Q)5asA@vEqJzV?y`WxFIqVe4pqDQBU@OWA0PPIv<6S4Y{o8 zW+~q8&=nf4oCmW}zvKRqWzh9l3l;|wWAfYus3JCiJp8+JVYw#^yFk8yHcW)jyA-8CqBLz8wdih27B8VG_zMtx?f02Hma(L%c}6soyCZwpImIKMlp>{vpt` z{T?>Wxr6Bk958KJK9}^Vah~(329=z#zMOh+$s&71-@jV29h4mk*@+)Ug2(1846*Nq zF7-E|4DR*HqKp`uiw!IuMB-?(rCOrxMdY zUj#*Rh$`QB6)3PjQ*T4w^{J6)bzj4j$GuU@JOE4$t_vVbQwBlr6JR#rmJcz{O`#(v28T16|QCb`&1@!%Pgmu7~qa-h+};8xbGY zQ1^@WgPn81N;;W`TFs+Q{4m+XM2Rpuy%9_f#Sxb$7Xytrs7)+IjX!lcp6KAz{W`+- zt_Gr?8}a%rvmi=+9Yb1dnBzBVv{-VKdg(JkW%>fuZJD%r%*CuY>h z&VI{@p*4d0ZTQZ6i910?42;}UHz3O(03N^85^a;4Sf;Ipa!YM0>D-IgT=UMO?nCM= zy;+Y*N$xc7YlqrrA7RzGaL5$Ocq&|n6U4N}zb#SGuEjkHqtHq+iEEE~3of`8{d*jN z=-rlre$`#vu|!A6TGb0*jx!L0PTr!K)Iv;JKOS--ffHtH4?fz*JI8?A9V3=V!7)zc-`Gt&}ttjD=I3125~)j7UY5N zvtkhK^@M;yB~WWS6~<@k<2vdHl^i6t(4r{1SETd8{&Gl?#<8)#2V!9Q0%%|1O*sr( zP>t3VhF&U$?r~ZM8Acz7hDtwk z(RxJ-?OCpI$@O+wn@=^AM%G~Zwx^)33qZA&A@f@jj)}WyUX&Tjio02$*2be~KdCpq z?REuG%F{ka(F7g7yAZ`hDKnopkxSMJrNx4e#2HLGhyB|%1pGLV_OX*g4?QRuuY{v zr#kXT&3g%!o=TMPt4#fF7Bqye#H80p>CgJ2Z21ZFnHi1mw!DIf%2F8DwGpBU(&5GD zHk7_)aoTTdJVt`6+B74u_*<7xIi91}{|-WRxGr=3Fyz)1w7ozS2_64K)`I z+%pg-Y&H;j&Cf);p+_*dV=7o_P2)+f#IO0%g1Of&LX6=~DA(5&hV`UO&N|x76e+pp zzBB$k!BQ|mv{f~5H(c{Vn~?bFTZG=CcUoco76T>Q?r z#E2MXD&!C5_Fn_VT;^>ct}kB@`p_D8Vn>DtKMpq2oG?!~`SzgoZmg2A|LvfO+v7kx30BOmS!SXKMi=xXRCG2;+ z^x!s*?vqBI1RGRT(C^{*_uOwI?U0OjUK=#%IVRq;6f^se0LOe{znI6NdU^%3y>o!i z=j2O2{RfP@a0UIQl`*He2HbP&Du|){{9au{ap#3ljIyB_9p#E$vJvv08HizX_k&jZ z4kqsyCENC;6v8TYV3^flOx-RK3#X{T?8Iu6Y`v{&S@Z@q-ww+{hIfIKt8=mRO)W0$ z903JKY4$m6IJ3Xpgqh(f%y+rA*jSJVo^!Q?1o;N2Ec*aaJ%-baU^?0vB%-vg4DFc} zs?sv>ZOvKq7^fp<&-#K|XS790r;Sic9g9`p5S`LhSbW0<229ry^W=ljNlkpgDHSr^ z;1{5r#@Uf*a-v^c4~lU&Re@%mMBAD?lw^C-opuDw@-7F(=XZSJr!G+CXbiPc)PG*q z$Uc5I6_wdDob~so;P@Whz+uG-P+zwI$ApCt)-RjbXoH|i&jh7IezBbQb!^+~?y?Z;&yrPQy8fTETIyT)N1fgw8mb) zqCuqKxxMa92uVnQw3NOz z{q78DgXUw$gDCv<)Iyxq)l^(G{tL*b7c+^{1JXX8!~aao#o#5sK%TM_17emjr{f0f zg>Gl@?a^cmI8raW7uF2Q;xhKuH3KZJoW@$I8yr{mhDWbHgO%P1>_`Ylm*Ydg#Gx8P zdli7usJ;Bqw#$&>*o3Vn6VQs@HDQ-+L9Tcz^Kbr+MhgbeAU+0iza7P#1+6kmNj><8 z4|%3R8H>o>fwTL~AQor=WuxT~A5QzDfV<2nx`Bn(f8u`r#AlDUeT{{iJR5{qJ zaKl;7JHY9L4huSW4@Uk`3w<@4z$NP@$~8UVo~y2KC^{X8ca6CZj$(1n8#-sL#J=+c z3}`x!8y+74A0H=f)O!X59C?;=NM8;1C%R!^)LE>(w1zxi8_B1U3w?#LsF{CDCh1e> z?9j3Tw!E*$>b}3AV`%}zd!=CN`=98O@5PO_R>1f!(cp8zk+`A8!dkkQjD7r`H76FL z#>k4LO|juw*>$wkp}0?fu&*nHa?7*uqPv!`>T(7c{kaw4!&4yUHLkM#i0%K(!HCGC zkdw3oE!OJ^WBGUXs3(W!wsJ@9( zDeL{kL|hVRB8G(Z!udA&U?n2amTBuu* zjeXAd0VgvAY1k+%A2AZ8zja|*pK@WpUj^j5e`iL=D2w5^3alE|fZQkpe3K(!TU#YW z29>i8DDLa1G2Dd>KtJnzMsT&rV%sxIOl%43dj z|5tC|R=I(&>7luhF+UZ|SL+Bi*?mzyqt2;)|9skAq;mD|Axt;OoaW?ag8PTNknuAg zqjXNQydRVi(23x#YvRHF#clMLt-_i=KY`m!1FZTa5lcqi<2Gvx&|dZkpZVwtO%C}G ze3C=|j_a_ca}i}Pr?dRwrEC!Gf@>@8vi9;Dpx*9@u19NNkrgrQ9{s?EMTL-kjB;h& zt)ccoIpi2_gYs!=>^`X&edJ?VtFbnQ{_zrrjWZN%eIGz(m4z_fydKBNZsGsVO;#SY z+$*AuN&cS0iZ9OqOWH-xmy^#amd?!sv~Z|X3b;3!3f@yCqJLl-SXEzxAmKE(x(&R! z$2invJ7B}yU6fx90dbTWc`!$TXPQK)k-USGTTF%TUn3#l!7)(UhcL;Vm#8`%iQ|%K zMp3s}rZkO))|A8CrR^8C7N=3(V<`r7%jb$Y@iKYNY?a)*yGo(?mlfv4V#z}z;^mr% ztz%B`?1wKw_k|Q~4IofRu0u-k zNcjH88?dFUW6oyc=nT*1@)d_<9wOU`yEANVD9P7!U zoybR8*3dpkNzdXL2zgF%<^ zM0h?xOSpHh8cQNy;TBKwXAa&4TK#w8aewMxMZ5u*i_aithc;}Pp9Gm}OvQkT!EiD8 z6r|4y!2O+{VgJsT@%RLL;y3QYm>FTj419z>y_GoqKoiXx)S!^6x%@Oa&N?Z0!>Y6B zGkz6yDqLBuLj&a;{CMjl2awh*=Tfg7D!EF>`Nduim7^kQ#*;+ZM|&2!rX4(Fhf%$1 z3{UUU3GK!uK*OCZR`_v0)D;c@>3}c%mYb=#PpvB&O{YB3!&X&dw;LE5y_oLE^cndf z6SXFF7BpW(E*bxwMI;@=)zukBj2TZQXv3 zse2ou*I!3*=x*8{T>lT`+p1Md$g8P7m;n_&tKnWnET%TJLexVUWVi$64^?OcVW0?bNy7G(J7By4GB?_MWV<@Q2I%7)4YnnG7L3Mi*c)sln zSsOK!$1ua(JlgNKTSD-^w1@v^I%?cwjhM`*_`E_c)LeNcsEj8ijHLVmc z9czKWM+QQ}Qf*=zrm}{SJ^3PUf9gN40^3L5nQBHE%D4Q+Yaet6|AoXT^tQq;t-7M} zx2H@#v5wanUBs~qYvI!i6Cv235rh7`%(86e!XV0mMGaU%8HXX1vo{qxN@8KkR6SwV zoxNbOo;Wd~-=U-OB$|a=k^gv~DlM*_v}As)L2o9j+w_bW(0Z)OF&q?Urpbz*UdNNS zb%lG5#DtA3Kwawtw1d%Qt5Pmf-ts>T{;wB#Co<8dekvZ>-AM$O?&wjl3yl7I%6$y4 zu$YaN=s&suJ-?ZY`(lg*tELUCVeu($UloLIVK<@aY&lLVI7rF#5gBQn5e!64T-W{W~zh`OL597~@I~wY~9>-uw-Ntfxaw z_-&lC>H|Ewj_7B75?-h};D3Jmk-@ZsGoqdF(_1p-zxyD>;RI8D04QH&0@kWl7`^!z z`Krc%|1rvt^6@Mbz1Wtcry=0Ko7ZgHv#5JqN^Ai=Vd35bnCX5M8{DWvwb%tS3x9yy zjCj=A7shQ~_l5q2*YWM=N1!$$7u{uJ(VH@!BSyy(f6qb~_xme&eV;zxo#^jPapLyR z(`d$Xfz1$zgZqo-Ml~B$?G6sG$khef3;smq)4ehm;>kB#cZPtWttv%G{k62BD&F_a zLa;cpAH0^vaL;ccaN4$$h-dde6wTy@Y&R1_dUc2118DE7X+-&%d|BY?THI0cpCf5&De-JJ4M9oY6>vX8OK?1lAn)?(+Q7r6 z!twF;==km_;^m3>Xh$K+?>E5R<&-@<`NcUe!-Dvto=mgKikDQL1@(ustbE=j)Qo?s zD$R^R8~x5=OU?yUIvi3NsY4+3_FHI}coa6gt-`!nJ@8Cu3(DN`F|BGLTJ62c8j8}n zWWFc%BKMe;Niiy8yF&lY#-erGE|gFElMjh87Na`7U|>wWe8Mnu-%r!3?w>Yfjk3z77SumAEtxt^V>XB@t!3`Mnd5M;v{5# zXWU0i#4Z8gev~-P>r2qUj{1pZ>FDUT3XKLQK(NVYD9m{XX+7ImOXW>0T)GCG#@~Sa z-FJE5l82E0{tEM=`ACB~alDAns4zMYUVfiwUZcU}Kyz{EzFQD+ssKvXMf1=l>$%sa zcHB1pCX^GGKr%_?EIhOj4_%_XN^%e4a~{XqA%5OMstF3p;7rQELeR6baz}p?;H8BOGjVW zzvTm-xneB5q4~ zY$fX&@CN?&u@I#0yJVWdo4L!xFPPK+KiTNgHXK5k`d*I2TdSm5l&vMGYt31ja}!*+ zW-R(`ZsCgiB|zyC(f#l_@NWW@~D(j043C8r?Ji?^nT+}G7q4+Y*=ZXVt~Qk`nUWkW$abT#ap^&0z5^oQGrjl~yB_kqji73d!}3aZu;qrP}H zPYW^w-M}J<$!Nrw&)3l9{vGf;QOoOWjRil~M6UVmoT^0+Vdv>HICftnx>by$eDs`a z+H~*uUX=qrYntF~y%PK~TEMS|8B{lK1kKa^{6*t0*f&B?+&58Q=yQ}l1GM8ozBKOI zj0id}1=fLECrdHd&=}NrvUr#cb$5H!fY-;~5J`T8l-6og`Xj*6YFS$;C@+p;-5scd9$ZE)P*X8v?;$QRAUJ>l)x#lcCV#FlE;M>yNFOI&{X-VcY5+V3!I*VYaWH zsC(TSZ8x^Na+(R8d)722#{*6I#1>&He7<8LHvutv(QL_jA7ujKdqe#pjQ-WSk zyAZ?C3vJ%aqURupYu|m0em|@+?amU`5arLBztMg&Ty)%FD7wA+28DTsG-us~8}4O*{84XKTsRWeu82q7h2`*F zN-n{MPGbI>Z`?&tx}{g;~Z8Ob<%Un+#P-NI1!Tr{c-Q=dc=F~#w zwBC!Cjz547gMTq;#(K1^uIDAg?s4T|${C49f@eP;NSbPoR>9iDv(Du{`yE(|rUrvv zj-mN5B~)rU3+q;vuXIa)Ie-|UPpVG2V5B? z;PQSyp+9~9wU36uDzDd+12x6w%_iue+XyaSs#wYN$DrF{2+J1SB46!!R;#xG18lWm z^nBXq{Jx31?l_KVfd;H)%M;9#n4^EK3S5q^#fN2y1_+}3fhIfIRftV zJFm5;e*C*@Tz=OEmfbfK{mgrTOGP<)M^>QbL>eB=(h;&7?|`QIC;v`o#RJ`S#r+GK z;QuH(7r&U+?~ga#o6<#+FC9lG;}SY9C)GS_BXnZWL9WRqbT}cGB$rT<2)T@uB!y9u zNHR5h?UW=2k)&iqloBQ+70GY?{s6BRV`lH?S?lw8zb8yoVCj3xe%Lz_|8y97yVmij z>kIx8`rfl}n0+J@qhg9tGW9Gg8F?PBPO%hnLtpcbdrm@G`X%fg5siga8hrEXJjyJ( zfi->hnNCmmpx2+!;J6D;7!(iwgJ{Om8jYnV+K9uoiCCZs{FClt!A_}Rp;J8(9%qAO z=3c%l_$+E^7c+O&?>Kj}82!0T(9%2~lrEE18U4C(>o$|C;vZv9d_|0fTgX$N+JJF) zX#clH3&p%6`1J@v15-5=p4yH{1K)=5nYL2gdZh{Li)3)*pUWsou)t!;2u#!-C#-Z(nU7G`^xtOYTGI?O;72usw?{oFe8tVgj&f zZZbM-eTg0yzM}3hOK#K`NBHGqDrnM8gh8+LIJ>6He8>(Jj8UfHsIDNIwV82&di%k> z?+s?VWfsovZ%ocDdZ(VPgV6k!kewR8rfSee_Fe8^I`7UdtQVk?w32Yt6aBv)A|)ew5cKc*DFN^g+ouL0Yl77#4m0 zf#LIC@+)W7VY$cw3aQ6-V16t}uuipNgBm9^zXb7zp%__m2UF&Jfk6NNRK^YlT)l}g zrx$R57&-d5as_1>CU&t(SvQcZj6?4mOK~ba_YyB7@3D$pZB8dp5j&bXM(>zpizy#_ zs{?IAJh4}1Cz@AWfehO;2z~n$gN@tJ?Aa&y;8c&|N%z@|bn~7gXCM=g^HUxob{`I%)n!mF_soZ3jqn4x@WY9`9sb#L##Yw#Z&!X1_ZSm_~g~xf}~`ck)NB ze8tcOh2T&~xl#867`J>7s@{G>e?bjZ6PAO>_N`PK6$>?Q3lIy>KuBvi=KiWteKu=? z%8itFc%u5Uxr=JeOForw^j+)iLU)cf&~c0l4|G z4mT)~_9i2LO68J#@EaY3#ch^cjyx8F$mvs&aTHhID8ayjeJZDUKe4yjYq;IlRM2_Y zjfw9XqxO$FKJmvMe%ap-z;>oRc*iusm{#Kd*F5Jd%G98iY ze1aBox9jiG;|lsE^AdwmDyPOGNS~mOK?C(Ehd|HIN!07S$Ghg#)S(^CU6-xU5nRUT z@bh|_ajWvk{rh}0j*vBit?mI(KW@X2Kl?&t>Il>fDggr{%Kw*^s7kh+LGj-EEHuxO z(=Kx1-)4%rbo<_*=8_>Z?k4Yez8zjXH|6ecd`v#cm#U0)x}3NDeRd~CSIECi^Fy6x zP|sM*G{NVIqdozO_jiK))@w+~q*?jXAV}I(1DY{g==<1>He%ZtRM|q}{tpnas}kPy z(tt-%9l7sbr~)r;Rmn8oY+d?!ND`ZHW@c|`=JErUxSfRhyALoet_faQSqS0$Wia}1 z1@S-=6)c+nd%~^}e*<&&ElOxTvpYV}lkuWJ-3wtMa!NX1ZLfhkF ztlaK{+Tn%#q`~jeHfAKAm)QN%tx`4T+mod(-}Pr*}cECjp?r&*07)>wT8 z>EOqxt!B{ozXK4?WiUy{8L9j8CKhwW4GRYSLAmb{aO<+M;5cj}-q~X*9B6)zikM)& z{YL;S>_PLvxCk6G>pB>WorooesPlJ<^4)!>g3I{R;QO-~>UVx;0rv_(w*3ODvik;J z37!z&eIdfkG?bm|4slx_!ZinDp+DuEYGT` zUXU*L>1Z7I64ZU|X!1lroitl~g9*!JoJ{^OP)dMrWPhb1`U&?U%hr+w7X z7tHb+olVW>>DhL0q=`8g6QGBt+LXi3Vl1lVGsT60tVS>vcI6J9~*h{{y zP9si2yy7FK484KV-BU6B`(((^eG3}%ORBB+_d(3RUf^{*5yGSV!0)mj<#H_HX_O@= zn)DoJ^l!vW?KqU348hFgTi~Ra2j$k?IZ;5SwBk$>&05B?S3QrC@NhDi{gZ^*Vco&o zX$+K~tAiOw=(Cv~!K+<|fWZ$PD57^_%lJQuZ+wS0s#UigD{@jt zLFES%K`(SSz^yYV-|&XDn||aQ+jWE>r&b8Jxy`#DdBr;vr$Tk!PgJ=7&CE-yQ8V>7 z7Q5^R7~403Bzrh?(SA75I|??tSqejAenHmEY}_G@!q9cSq3H8jDCtbVz}0F@XidP1 zuE!|5)PhAB*Rf)sxgb6;AL_Q72&vcgIEjV7G|@~0A^ABhH}|Eqh}(nq+4*3Fmmz!K zYA8&oW1-nsq1VE1&=zLFWsg4tEs-(U;ye#DT`O5^%Xet?N&}nT)G5pR-#w_k^@y4EG84u;NrK?J0%&}9@-xbAq4)Mk)Eupo-a07~I=8i>W8Dm}_gDhu zuMV@mmNlqc_&C>oZz=YUw-DU7U4~roPBzS`yKrh8%`(`%T>m!(yw1^{)S4Eah?cm{A$$9m9Iv z+e$l(+r-Z^g8P@Ogr?uRfx*^4LFVJkTt1KFy?acDWv`BdX751eGRvL0!)ORs1VYY1 zEfgKEfzLE=3q3Uqe82n!DQ2I*T=NveU^j;TL;dQ1$QOGe8Xiou;+huz0pfD%g$xY` z`QwAqaph@{l>QBD_U3}butBP5T?{LXN+=7mgoz?9!tARzFxy%~F4Ey_>+rj{Yq*)< zt9*kp+7Ie%FqVe5j^(eCM^4k|$ro1_L++Ya*JrOlbpP)I|KWcZv9IYK3|yQfjhiqU z!+u&bC;vzk$tUv>8TYXzZ#R_pnGG?EhQQY7QZ(i}$geV*=9>fHLr4co{$9yvb$g2o zcbfoAV|qR=$}h&U*gG)yK{*EML}1qpx=Xb?NT=5x#}G6Yu8lVo5;DkJxA3VnBqbi( z*4$GJqm8}IlIUmq^{UH|4 zQ^)!05Xu}M;=R2&49lkdNCWvs*fVLgX8>rQ81YuR@1XFHq3E(Lk57N@3DHhPtc@E4 znz!w$$Uo;{++{=I4W@y7Uw3S>=|V~QLte*6gcJPU!+o0hw@8d(wwa+&oT{ zSpU=up zfBXeO)8jC3Nip-yr5s^x3*e;hDEVDb6)L8I{LWOGPrt#YEwADEDP3V|q=}Ge_ys%9 zlw*K-6AX6<2W#qAmhPSlIa_il19S@q=Iy}-{oSC3Dg26Qrb6J6LRL-pFNLWt*JV?P zF!%&4+Ig9Hzr;H>cR}BsIV{GOxG4KaLcp+ISUmO-Bwkgpq}W_cb8Z8V*HWB%%b446 zO;;HDDGFs{pGd7s^4TovSXD~Jn7eJAN~WiU^ohrCxu1?;H*qG66O4tvBPgf7O-C3_ zJZZnbLa{TdKPZ0CduY`|D4CN1?yGaz?BBJZ^`SlNwp|zys>7W-t0#D`k3nnMabE86 zC)O0)g{r2_C?alw&X!0i`QT9OwG30wN1)d1HdqcN_isg=N_1r1s}B%)ty7xq@^A3)K|m> z3~hn983vS{`3dr!U8wHv%ZC;8Vcr+c^V*T&d{k!{1XH#)EIEwWBTwlb62qDT%!Q#) zjZL#?w((*jE8M8!wHfo7Yuyq)=y4+kU%rFI$4cSVj|d!9u?QA*84E6hhv2~VO0@V> zhl^{qLPg1UOnmW~~=SE&Wko-+?zw^GbW(Z%khFM?q0r4i{ zFsyrm6COWDV7^cJ8M<6q)*v+hE)a||fl ztH7(W1{4MFc_Y1el*^ZZ`s!5j$83YkR}8trwL8#bxgTY6o-pgJGnvz?)8M@35VX9Z zIqF9pW-wWXqHh7L4h;oqM?L!g?<%Nk_wzeVh>iMV6n0LxhumFnna)8y)D&A|y=fW` z!|vd^sKcmTdXrfJP*`RubFA4P*Kk z3RVB>E{Nho-5S#k1lt4iG4xCxSehlEPVe3PvaR2-UY5q|gqdP*-4xVL-Nh8Alvord zfcy}#H{8qwyLI8Pc?Wfyul`_5n@ojB(+_}pL*PxtZd628P{+6*Q;ayrBAcVpa`s6~ zv`z)ZnuC1Y>QgZHXBKgKlEAA&jBQTUFu^&T&iz{;b%;4vFeaU8x}9d(zmDV9U$Gz# z>f%G562tDd^=L)s^T6}xnO^k|Jb6UK$=f^KUd-;s<^EfNw&AZ4} z)lCfUNI}Q{4FENq=49SjQw2dZ1`c zFt0N@nn{AiVAom))s`_Z$+H8K$|+wmq*)r7bPqmwK7w`c&ci|*UCyOn57xS27et@9 zz$;b-0{I42%K2wiB3&<)?eeP-xL_Oa-JXfxX;#s;$r`J}9zd@V-G#)mI5aCb0M^q- zVrsgjP+SxNXV((1>fLP!8X17fVL8+x{fKfBO{QO^hVHwVG+m`d_^X>>` zEc76{>JRdOQ7+}Nr8MewB1GGrXTxdEnmFhQh-ZF6$?8vR%${eIiLX>y={*N|btdcV zeG#-p;e1ufM@;>!&$ZmU4S@y6LDF)ESA8RY^4Kgl@$V7fy;H-BWHY3-{Ws!_#15?9 zok;WG6A=Af&IfFJK>o91D(|UvOlm?*^~q99tuWxqO>3cjn1)}WFXk3gCcdk-01~`E z!SJ7l;r~4$?$eRS?Q_sPdJc+XlG(Ub`hw;~yR^FTJw}^dVtvnd5bLKI6cf*)q&UGX z=5RJTg8?@x+7rEd9tWq}@l3YilIykqOzAvHef_Sf#8~+sv}`&9t2akMN~jZrPwn7C z(*|PM%c~GM?SI%g+njP~6}-&}BO!KQBU&53CkEN?#JzlrqOlLyvWHq)j2VDP;e};6 zr6BX@2BS9BLvGl2#@=+}yp69jxo8<)BnE-lOAoET&t|VvO@!<@A0T|rJh-wyj$CCQ zXeaWT$&WQMsZJ4k-}%Wx7g?ih@eo!z?LErct}~asLME9zQ|d@*V7Z=B+W9U8f^Pf| zR$s{jrBN2Nor!1Tt`Xy}F@(L@mxG_O421=uVs7BNSctRU4VpM#y!l=%49tB3uED<0 zd*nfg&@Bb+%SRybev5Ycme^Ia7mc#i#9sc1Z=9p4H%pmO<1`3lck@hRY3DurCrW=c zqyL2$OcS1r-=Drm7aK$!;{@1T+Q>2oBO5xX8J2xlp|bKm>l{)KvVeYWVNvnC%e`?p z&g3LNW+3IKq&4J>`ijHjkpdha<5 zP6wsv$J4BF=X3C0@&>i$E8+N0p%xDX4MZa`l&#nlrF@Xp@%ID1VN z^>$rBFcS+tSBuc2*CJG(y2|{IQ>O4+GRh82ks2jNLekB zFn#nqwBIlkEvsW-_1!|q9Q*)026&=_JAFs*DX@pTo-lWP8u%VufnMcig0jPc%iZ-8 zaxGW!xfj0jPWcykh$RnZzg)z93$U&A8t+kZ4=dxIfjE=Q%RiEdH&nzV^T-phVXZWP zTvT7;tpv$r2UaDv;zEvcFhR#k&<0LsH?z!yvl4wFK4Lrgmi@!hU-pKYN8~}YeGh4! zUHCRpPsp|T&#m4&5wefaKC|~P42w@-?iQKMx$jlTuGt4$pK8HdO=sr*^||?f7lZP_ zUe*~;xzEAms%#+Ecj_icBB!(}mv)DKUeIrj3GhpdxP%>PaN3dF6w`>AxHM8_-k>XN zC^q4uWV^x6x0)DCr!cT+ms`l7NV*SC!{{e7vGZLWXd>3~9xr~uLmwTkF+*RF6}GUs z3-4jYno5lRnuXUI^|*nQEBE|(1zHp|YrFRyqAkmK^|Vr!RrUoPd%9ugRx57AZ-p2# zn1N_|qg(Bh?YNA*D>b_xLfDjeK6k+eNIiTBBZq$m`Q}wp@s%v*WcCuvr@VyXEp(pV z(93P!i+311DG~NgrsvjLEkD&ijLu}w5e5x_z>)bXzoSDSC}SS>JAWKq4voVA-P|K*AJq zRJHBlGf(?rcKcZ{x)KkO>N+f4Jqna8gAcs$3(Tib#(d3BW+Q!o*=1|cx_<_yoUOq^ z$t&o^UV~be#N@*+@k-FeqHCETJ#mJ7BXcn(lz~{Yin2U9oSHc7lKQ7i(t3q=^i#uZ zbrA$EQR|O`-i?E3ebE^L>dHWJ(S=tG*uiW>?;)`;7Rq#s&_yvA zbnXzNxXOwve0T+n$5SUD@G(D5KMk+kJ_pfjYIx3^1zsfNAnUxS{Hje%edXX2d39#=fuo=A@QnFl8 zdB}*j=ufk{_L+G)ssu24Wz0GGbEr}M!ruo>{++?`Y!szlkxtSNCtJoHmEit$1$w;F=akh$SlQfS z)OZcZ>z9;Fd_-#)^QH)7O1eFsyo_e}36yTuAltoNTCZ(pw%r#%>dJliwAzrfon(fd zlgdH!NTEtDl0)XOFPQxEB&tSapm)I-!4r1xT$9b|SaCw6Zm+>dAl`}sd% z!YWHH^yNnMt+iveR!W%Xy`Ay`>1@dleIXNR2JHC(3&nHLrz{d=TXR$%wh5rIJ5IS? zH;_FUt8yQ~LBasB&_LNhbBk>d8BmKWKbQ+|j#Q!edLY9DY zz=Xxk*yY^@+9j8HJMZzR24?w!@^88=C91ehE<2aG*6#% zoE8CXw9}ruXFmE*2}6(BCRqN*8rnCWQti5L#n~+(r)|VxmB#a~)GXsAWG*SkHeP#lrq-OTlXpF(c&rSm5X!sonX$5Z2t2?tD3X<@OX@ z)Nvf!zA0JAoO`_Q+5+?)p(I9LJU{B{V02#8itVxAar|n1K|E>*hPxHBab4Y^ZNy#X z%6Bq@eB$>8EQ9b8XRz=73~YWTU_Y0A;QMYnQ!kJjk#z2p{ip!%pOd?g+oXALAiDC{oD=Wmk$6uyM%U$rd)v41yszHp|YYM zcnwLylx7bsI`<2bzI7l>*M-hw@4?ya6-YMxh5g(TC>H+_MURJ3ZgB!?r=8$SMKv@B z4MVT>#INdMD%3B$!Aq7eL%D6LN)w!o<;RUt()d&A^v(ptHl^V8^#_hop9N*|GrA)+ zGezf1UL@R>mIvGdH8DF=ZtlcD^A>2*YsXF17F-0qf1-CCVTxfxA#l=JRk*r^IW<3K zjvdB0N~elE>k8s6UV-`(r?Aq`Oz__G5CaEvGQ8UpBny%uWw|>jN15^2es0*XHx53S zp24;+8BqAoeOCVICSt(?44H8hwk|$_nV~WK7#CxXy)Y2`PESUg{f9AdPIun@`v{bI z65q|?2l!0N2BaBCRX%0@2bV(dPXobwn*q0atr`v=(dFcWp5@u>eS`^{s!?4o0hfwg zD9wIry9Sjf&q*aClHHWv=A7^Q1rV6<0=#XB36>*=br!T6y_C;K zU%Jd*W|?x8Ypqb7*^gOYjX?IF75DjC6H2;1^O`C2p1tvc_q~1{(&dA}sbMKdq&@kD z*O@T1j{5#Xy?6<^6t%Qtd9-aG_-{K0GqRN!va6Jjc#sP!8DzopGDqSvL^*XJ@tiwi*BL2%l7du2hX$OHx}q?{R(>wH0B(Md8f@a7MyqG zfWwk|*r_}K?xEE%$kd$fhxefPT@XUhN9=v+4tn<*2>wSH9&U*O&HuXdnuxKye3l!u zPkl{in7x=fA|HE}=?Wf(Rsyu!p=7e$74FW%%}dvU-A8})&T$3VQo47oxh{>aYhd~s zVo!C>Wnnsld7YMC%$hRBYOgFlHZct(UHet*eM@nMT?xj&S_HG7XF%lAh1i2!J{sz{ zti08PA?j|zt7$zjdT|YuZw_bGlABoi;}2YKln-jhk-T>La8~L;=Y=}zo8_H>1=GdC zte@tB^YSv#q$IL+R}LUXZa_QLBv5ajOAJC&EPb(>`XC3SiazZaH-9$b zo-oum0j6Z=2>FwaVPez|=rn196=SUgb=Fr%k$Pj!S|h%S#GbnO!CZh5bO+Pv4ouo$?ruuc`8_Ot^V*-%;UchSIFZQ1Lzuiu+Zd z_2EI_WV;$uqYt6%TBb@~{#m-@riHM+T!8zHKhY!K4@+~NgGPKWZ(094n7PqR@Yojv z6EL)&FZ#}0&72JMxa7N0xM`sw7s(!hPTM}!)-x}_Y<>#mOy)z#v~oVRISQ*G z7JP4bF~$6SnBEh?)oDAu$HbUFyb!joItK8Na zeveb=`sXs}8cjXX$`KG^<$&V8vr+UlS0z%-kp>>H63UhpqTh>PP;575zE$0X%n&CC z8vh=`CU?U`dwLJpT!(Su!@N524*Fdlj;fpc@pulg--ix?P^YP2>sd+X*jgN8_7G!b zg$ zZj}||XTLk3oL{br{CO31T+*b`zPoVX^H|)xrk2-q*JX2GMPiYc8E1V$kICYtxa^7s zdz>`pTuxs`=aM7{r?cgH(^;|2WGqGfu!&k(-~&N%c^ZLCVdb@+Js|Pw+E2amm&gjCsFys zNgDjaP*BV}N!h+~V!dtvv%iQZvV1i-ZL;8kk#a)kgzi-2CHw;Lk~XhHpEp9M_BrYV#?c8+D<^Ki3uAgH?I$9LhIYQr?dR*t+KQX**8Pn*0R+(K-#->MZsC?g7 z+NjazMAM(MlvV8@PYguK#Sti4caxt>Ir^Hf`@wF@Io1_-9tWN%AkWen2yW_x=y7X- z9E7NhzOTY93vq9ABf3u-fi6j2=)Lg+FEd;uO?)v51D8$7%Rg3vI}EA6_9r<4O|mf1 zpoDjhk)!C)3rIh?3L|e3>oYP3BSX!Ih3(9TlTXqa1Lve-c-I!CgMEYX*F(*Af0*Z^1b)%LT^_6PWOW&d4Py)wmnvuPr#t zdrX-RnNwadpZL=-ZKwekcU=$Fi+-cNhY~XBzM0fn4XgV$Lcoy2P<$Nl{-LA99{j|o zUlVhE!&I2C?IQ;CFh;${FEIM%Daw+6F0{zDf;4qYlgyF3uJhOlT|@~k`- zU`37*SE#tni+fNG=Xc7b%{64Luc(I`HU^H!+VS`)1J3<6y(8ly@j9_q51%?p-h)!g z2U0%aa}xBP-hu1(B!aT_e{QO6#5GJOZs;y!_UC4DhHn)M4q0z-%*{5OI_ee_K5gc0 zj4cJx>2J)Y*C~|AM?ihUR*)NfR#ioAgo5#rOgmM9_Kzk(;nDl7Fl;5<7@@;u@~-f9{V(vzZ^(n<^*a>ZDL`#iG&&u9M}MzwJf!YG zyO)00YOxPMH4bCO&Oyz|NtDw&&c;ywWJ3K*)ceCykV>czzsG^P@sUs;`xevfbOc%V zRmxUDfFe3`MmJZY@B^4n>cryEml*LSntHjv`2G`31;y6x z%*^o|NTxTiru_e)tm-w^7g9%TUj`T?kWZrP3ZzFyfonhF8X1;gur)cL)#Ft*y%Mqc zlZBv~7X~hAJ$c)=f8*tOVoo;U9kXNU=wCD(91j7;xo4rxh6dR4PAueCUqKJYqoBDt zm_=EJ!tufr4e;X|mYzZ_MuK&}Gkm$&1uvvsbb%1KK$4( zzO(!yIv;)j4wNkpY_t^4x`cy!Nf(PR(BVQl3z%YmH@0-Io={+7kNz7>AY(qU!RBs3 z*|Mo@v*$}b#kU@N&!e-`t_d)eviE9(I#%p=8g&+4PEGn&pgFQZa3kh^q*ZV`y#pm2RBU!GwD zUJ*IuXFH;jX=$#d{u_qIUIz8>Xr}4&j#>Y40K^}u7yNc43U4eqan(1xSg$K&mX5(v zyE(9)7<7kaw69x-ctk0ndU7*g@GOe=_P@bG#@+^Tqa~;QwTgWvc8y1kCl+3Q49g}& zflt^&6m@Iol}C^l?Qv5D%(8{b9X7b=&NW;#@I3DLD+Y6?uH*x}GjV5;F&ELi8?0}n zGrVKLb%mZq4C;;M{tHkcsRMJ)kaP5EhV}t%kTItY%Wm>$S$+&;o)5?wLtKZU_i^s) z{iqjK1A!sER1TH5&~HUC>aYX67G$u@>=N!+PS2nBDF} z_tclXL%|7j)X##J&|s`{GUU8QJ)*h)fBdVhM?gh$^InuAA8FN15LJ@Dv;R~+{J=vt zT5Bc9TuoG6KG(sHH^Ni;=A8ev^Puz|1@iu(s$Q+s@#_@@Z9a5fZ(P81v}w#P*_89j zj)d7>pRn{g{Vq0*S4}ni39H$6ba|4^*6lLp_WnlSrTP!FqvEkdmIi8?PmEeg4pw^= z*4SmBli>^$({F0h_zxi2n#YGHZ{~-eIY`gA4zNxrWa=9?F@3BqS2Ly#+dEc5bbA^r z9`*?W-@cXxRz3ikdW)NQWi5P~m;&ZKR%6KPKj3tOp|D6efhl{hqgi?*#CQ@zpsbuP zzPki9U+43q%)QY+D*-*?N21T&eefm0OsJSond2?{;9M^Y;ehu;;$TvCX#RY>O8MKI zp{Fr%`bWNPcrwI_<1v4_0+cHi{AOSBbL=UQ%6*@zv;mKZ?e-6(DV~!vB@3-*C-6=y zi8U{J%w#)Kq^q+#Fe2apv9LohuzxAP@*4eF@p#@KN`jM?nsCZk18My}Cx{Jy1Tzxp zUU9604>U5CPJMq9eYNxWQ5B|e_`Sa14V2YMiopEzQ%& za*(sYH3h8ATn?XH!%^Zl$j$Y`8@lVLz;F6j48J*q$)ZQ70=7>Nn?Ms6cb1 zh~L2=CL019Dc99X@3Ma5VfgPU7$H0Z|Abgn+wFmQ{}D50##BDyc0HP}*$rmDYC*C( zO}gZ<4&{TonAo)k#1`xauLTYmV*eY|x;zJU_c2VtY4|aTpRsFa8#*~suCTcSG6sve z%;_nhSrf#EDZ4Yde;yR-Kj0->-1xblk12OV(uV%^CW89I2F%L1 z2_a+4!6KGs!;_sM_vJ5^Uhx3>(k`jXZy$v9(dRb@nsA}N=?U!yAHZt(FOawCu)~Y@ zqsC+&uW2X0W$bS7keWi4OF7`(^B^xd#;aeig!1jWoO1M2sbu9rW*22F+&gL_I4#hy z#H$({a~i>E=4*b;%u{&dg%Ou%Y0gdi!$`1wDq!eAJx=@T9F~7x4!#o&n72_3@4dSo ztYeJ11mlzF^(%_bLc|w&=M5fP9-`T}pXg^8fc|9#82V=yeE-o%&wZq>!)_L^tsedQ zEQOU5Kcm;B`=D0NfY!IiAZ(-##@tv6kuxc4aCf8Q~7cB#rSl6z#@QhGg4BrseiR_Cey0{Flaj_rJkW;z^h&?+51k zOJIFNC*Ae>^5U!+XctD``G0K5u}PhSr>|M|ZGAy*BdAi2%m=CD9{7i8;Je&F7*BkM z*4>Sec~r?1aXT=)aX5PQ*#t7L&nm~Y-M}$?8OE^$++a>;ffW&yQ7500bQ-??DCUg1 z3NWr)Z%h}=$bD_4f*$^eQ+K2Mj;>UD>?sT!r9na$3@7*=(R)zp{^MP}p& zXrTG?)Gsiw{y0R9+XdFL9lY%NaFy`46&J8|C5ZamQ%NLRwr=zZY*?R)P;vn*6O%x( zuqWnt)PsDeAqF-(vD@}$!tkI2VBuygv@S1&>%Yi9aN9;&bNm}7ntGDg!;9Ij9}d^% z7;r)NgP^zZ6;S-|DQJd%=k2ZuY$UNk-(NN0(jS;`v%QFYvT_-Yn-UG>OcyeX>OpLu z3%zgEf@b$pcC5=%kX*`C?bRhmq@)bh{TVMyy5ctX$|>;e(LgS@-n{oVM6pdb&T}^9 z%RRTjk`e_5?HS9lvHzkv8u#dPgwKDQqVKF05v)4pKInOxJVK)%13Yyghld({FQu~Du^S*`QZ@QZ zYnbSZpGt8<%QOQ=hUSv@XlYM$RMil(bS?}HEW&{2NoeFi zkIsizp(vLA?LjVJ*BQ$5*j z#GiU-)w2Q;H^npW_upaAZjlf%EDg=21_F*W6J$*Ffgb$3kejgphvm7TWFp1AILeV&d2f{PbA{+|hgH zT)o~{m}--aeYa5t-#rgAKi=Snez^37h`dK<=oeFpb$7O7o>eHEsiO=+qgwFv3t+(yosVJ6>Yu@ZHR7(@Dr^RN-t<6^==z-^%-F z^y#q=6Y^h!av2Xx>g+(=!%)yE=;Ib?c^^bkKUCU&BUn^?3fjblp_7v{9y(zlIL+A2 zE0?G6a<_?W+H`#`YU?wIJ+38x!b}$S%?J!WBZ^9jA+Sm*E$DX;qAs09-!L^_xr+Ro zwOtU0m#~vr30@xuK+ZSvZMj_s*RLP>)yF%r>1zXyIhT(br#Djhtb@FL&vhv3_itWh zpeOeGW`Lt&E}_Gx3iNTQfj5`Vq2Ieev`gCvZ+092(fGA0*k*=Ft0|vi;fQ{BCSr)C z1I(tKr~1%-7MZ#NLYJEf+V7)b=-!K<-hPr8!mUs=>IS(Ow8Y9SW)G<2xY@Ox74)V~ z%Gwcd`ml&=2{ynnGjnP8mk!xa=xk<5J*& z3B8B<90KKv{g8fnFpjC9{ORw;U_N9B<;HhGc6BGD|186%*gnLW+mA&x2$fNHA+s=o zk9JHzWmy(;b`v0aqy}9UU&5dtBF;MFJGL$6Ft=QXGusyhxeg0K-tmSH`y9tY)?Vj_ z{_zyzb%+l&J&e2px>#%{7E0|tK*1WZVDpD=5{1R+u30KzaF6D zv{x08CE$*TTC|S3$rt}IhB6b0l(%l+*MJUpU~&Q0tWG4yO*wgxD zMe(mpmRKS|jbVkG=FB&@x6yjssKr&7U1$g1t2f~&_ca*3)DskRF3dhYonAIXKY_A>f+habz z_1`zJdD0K&FzgLV#x~Q$+E|FWq=oR|D&G64K6KVP!oc$dnE%gya5*g&G9zP&bF5~q z!KYDe?hkhD4ZK0!eDFDMA^1vU%(>z%*zdWD#|m}1gMAHzB;h{x_R7YP-wW7nD=RM1 zu9-MuRk)*96iTc!dHGJ1+8wbFMt2wqW{&29*{LURIZVu{*VC-7gWlzH_3@6csSrBf z0wPCzLiZWl!13i7^lN1hSbB}Ad|s2gZU6+VzlA}PqhL;*25;^M`V*61QEG-K6?%ee z&MJP^bR(|QTY#i3)K?$y5#9OC#AG2Z^!{QYc7Z!*Ww+!c9h>3f#n{}7*$MN;V;Wo4R6($ZWM-^u!+*9O>@!l+XQg* z9|6|il0p9b7+m!*5bV!a;=1?~cz;_9e%c!b>+^G|xArf~cgP3d#<^Ho;0@L*tf}8k zEW0NL#HTBTadl@|U}&dmdh`WQ_1p(<%S^aV*Dg$II{}$@#h4QR79f{)VlN8w^3O-2 z*SI+}?_P_4G*L%(WhcwktyQV6J_7pzbe8h#CZthj*4NS#th;=`ae^6_oirKZBzqub zo(u ziRIG$HDzNfI?tfr9dWY6XC>Ho+6(+VvzZ+0wkX;57a#wi9b*^|vzMO7MdL4ETyh%N znFfKL>nE7^(n4^0+XKbxKCxa_n_$$qU<@Jl)bUq_90Y&BH)StTZ1Fc%dubr>?op<( z{++$5p;=+C0A6(dv&zo@4qLQvFHGv+3c0dcm140IZ=F)Z6o&iwpaVNVnL1hu^D0qX z^cbs`Q;vEueeTB`FY1aMJ-HT| zOoK7zxf-o6UE_03yoAJ#UwlGX0qh`0s;narw6_8+XL;trgB_Y#&jMq^6>%~zvpz{P7g z%2(-vBE+^K$zM{X!V`)wrn z9`C_-ZBfA1W7OqU{=oG+@4zHE&CK?k=5zK(;TVfV=zLlYr4f(NnX-?$iFN4z$r68^ zGv-pCRY7LjLrmV&4@6a4aUf;FvS<0gLfRum=O^;54ka`X_QR!@b-6@`EM_-+1L`sA zbG5c$XXjs7_Im-;mn!&`W@emt$Xdv3G8F7KoMn=fi+Qpw%US63G+eWhSY0!-A+hua zI}lQbfwRk1V>GYu$c7HoX+J}p+rxCPYs8-CjRoz_20n0K5syyYg!Ec5L_5r<+%s$ ziAIBe#Kc&L!A{Ry@LFO32HqRN-jH%v+jE%LZpw2y^ko^JvjM)nqXIGi3yy*jzY+orY*T03)n{H5-T>+AE6DWB{j-N%du_Bs!Skv^_I`U~KmzVNJ zW{K3pE~4MSJT`Eq8Y(_?(w_Sw?J|on+_${vBoak1*LyC3+G&!d2XWX(eZ1 zdSf~EU!lUZPABji*Huu-$gi}a1vR5~fn!}LD*Ii5b@wh~S==u4p7R;RoBUYvsaaV6 zR7a5gC*rJnzGEh?IT-T(BewQ`2tzhJ!Wq1gAUkn^8<_JEj_6nliivX|yr({2wDAU{ z^%e1rzo%p0Zx^BHD2ItVPC?K9`h2=~GWUD!F|-f;iOttSS=sGfOgTISrMC|;2PI|p zEux*vI#aQ6cQzKpsBv5qbpU#8!r_}vfT#Hb=5b&$s;`~H4`MB$?9@$^bnC>&%4}3z zUkNenETqrh1-(6E4*OfGztSYTG1@&VNRIU zQ|==p_v2(c3|-rY3+Qo*Ma(^pL6OAQy>}in23$aCcci(_2laGK8fprHna0?jdG;a3 z z_dG<@X>fLQ7eSRnOzPdsF?R1QOt;SAl8&W;?8R_!dS}9$t~2EIin>B+qX=C7&@Lza zDBwp!>J<1&oZ7T_cl+L`dHsXg&I0mPDRGHHo1feehVl0r(dM{ z!{ofxwBTHC;z6&emjq0>%S2og*qL8~kW1gmajhd*ubBW+gGpSyMJ@O?eZ*1Ld02}8 z(mGSgKJpZM$8`~;?E{&7R5H4KBPUMKEoMEx7?Z0)pdWGf(3!joYsY}&#=|f!GaFi} z^g!FqR7f4LAJYdM0+S!qBThKN#jSCnInOo9C6=Jd;5_Adp5|5`Jb)4E`&h8$KP>)s z3p@+Ya_P6`z`{+$&D?j5IjI0LtNNkB&|Q+hG8H`6x5CJlSSZ?a670xXqew5p>8CAu zSLDRC*4avs2h+~i z<2rSmM&m@{-zhhAuu`+9P_f|~WRdsEX`VLcf3iDDUGCCZz7C>RCt}#Ir%d@P8#-fnPpC&*p6y zxmX_}h2vby=6>MWV8ODtGIQ8?*jgwkKU38;FatMo#hC_w}pW?@^9|Zw=)@o?ONm75aiSrzflKRR{`pTAkP}7FIvh7t&9@gp2VHAi!%kXZOk+ zHypMUcGUgA)T}VPn3#;Vsh6?D{UDe0Kk84u9fwxgc3fk| z9hmioVW&k5l(}(Gw!MmT?cPf7^&GfT7l{GmpV3VAf0A?q$^xyR&(?oGCFui7A!nx_ zKA);1qy<~>axMnq(#(WW7xSQi{3VU~@xSwJxva zLRn9J2dp%uyXz5KrQMTv~yzQkg$iJV3kgha)Tyutv+fj^V{fHO+X)xN2mIRzl=!)UbT4f{yEFI6g}NLwcPE0# zp;aAxj~qP*mZ~)?FGyO3)j`&U8_+2If-sSMUk96@x2ztc_l*RO|0-9z^@+W^}T>kG|p~)P-{d+5EHc@uNtn zpgd#R(;1L`>6EkY>?ZP3Pg2W{G%#zgB$&Id78j0)!s_jXF!#tY;w$$9+afn&u9y>} zauqgfjYdWPdFryZ?QrqfLCUlOSHCV1w`^16-mWJ=86JvO|Ltc*x2IvIe-euA%<;-G z;%L0`2Fk$G=iS z^4MAEyfB#aU38Bvh2!9qn#B75>WyyCo6+#i4=DAb?!Mz^%+wo1%y1uxXlgoF^3PKy zzt@(VEk$z2^+#!D2FkRrKq<}khuKv(}1NB|D@hDUOkM*oE=F_#6*wFToxL0%3TO71_&BXEO`S1}v zL+?Xpz$pw}*&U4vI&hV5E_IaOV%VU3_S^Y3mQCsnZW}2NK6*Y{|7$8Z*RNsadWWb# zFb~^zufvvJB0lk23wZGhzsiQYeF)k)j5`B_{4;E|;wJ|g4+o+(Dx=W30% zpz~ZAHm@NEm6IjyWxsHlziB2EH54^Vk4l`HT%li=uDm$rlH|-1Jt6Y+Z|pRt`1Htm zVDl}8dD5Pz!P*6bh&2$k>L+GKw!@cSEm$HM#)&^qWA1;^xmQ1y)mr33pKhl7jtm3g zTu}!!kM}_*-)DGIkNU1O3(5)K#`zBz4Ng`Mxd@$Fv?eyk=P79z;P?!EqhGSV?z@R~ z5|0jJoFISl5wK1h1ip(~pkU537FO^JzFf`#7|BCwN;aH!rhfZ^|EM2y1hvctLa<&C zI5%#gzQ<6o+akuugYO`rF%biy1LKP5bq*bkA$2Ffj`H<(p;GKNnmE7boYYR!_Hd~S z3L$j8E~hdb$x4FEgqt&n8}1{57GiIOUhM*5-(9ez?hI22>$%K94{`qLjZk2F8&oUg z*m$K6`Y)aWdcjs$csB{tQjGa6v@a?$>)8xMwpve8iR z><_5ywh~2y9_Hp39Hd#Q0P7f|9Glt6y`3_g+mT%UgwBp{xJm~jAzJ$#mhw;G z<-dCT!lpR%ojL?xjW^+g9_jNACy6Z~KF0Eg(!OHJMV73b3-x-%v=`H&XJ;OIiqpUZ z_JNbTF6`>AFL*{i;KF9)0REskYwuN1G0`9G*X+dQ`-;J>_y%UbeW7+5It1i@gxmsA zJLkPOntCbJ<)ePoM)N~>|DPLhtFI0(61A~tZ#Pu0DujT0>FDRLhaT5y{;XQWT)jt9 zk2M!#8daFL|2nF4vT^vlJao?PWEmSNSMa73Y6sWA@)d}htKGr(M+P_i+zqtaJ(+8| zpv{kTJO`t?+<*i7w1m*7yV3d|T~y9_1cT30&i8gd5L@|T^vqHWyk{v)PDk1+yvOuk zt5KTOD6w*+ov-V0*0($si-=(@AA6iBYC~{%$Tvt@k^)v)N05fESfb3qnJ-SE{HqBT zy!{5juP;zuW(|ba_2rs(YH`_aqa-PhcB6Q$8_MF&VRO{K=-zuZ6x~GPcmL#UTWJSB zZ!0&e3;9`Qok#J3xsorL`4mB<-NsT8+CLbKB@e#heOWaG^eN;vf2U`;;mO=PQ#xR| zFYQje(}^<_k*j%C!o``nfYMXRy1g(H~vq#5$xiD2|faw1I3L-8zNiYXjrA;zFIE{^jYMVyJ`Q>fZ^ znX7Q|1)YOWD60^!Ue(Q#msDLwT|ZskrvEC?JXfH~sgh~NJmMNIv{B$bhTPd#h@W)_ zrDK0ejG}1npBe|2CVx=W`z~bmHAU%3EiU8kbI_Ymgr1`)FS+X^SoP-ET=(aoh`G&u zdG`fvJ{qFPw^8DJU@2>RbCEd(mqN)=7Z%qt3xkYusB2OPZjuPdGPr;^%Y^ccq0IMg zGwb|i2x*j$ln>YDP50;sn}-?lHX)`MKCBJwg-3L*+{hJ9X~oKQ`%#%A;ypIxq5Q-c zNzWqs^Y@_`M_pIx=p0G@rZ_I|dn!52$1{gX=G4tMWkFZ6!Nl}ExIYiZz^5y~s4<)Q zIves~O#$FGl)8krwQ8%u_nAYmK9sz-;Gz63LZlL%CTF7SLYl+!5-4{s0>$1WiRP+4 z%bsJ!L_=lj-|LKcIJpC+kWZ+z+hoe9cVe7@3bu%2!Bta^GU}VOK6nJO1sz<|L}Iu8 zDwHVVf;oTh@o4>*A+)WTh>ELs)QMI5ag-tl_Wq?Uloegz)*XI=imEoIij73gx*qr{ zQ;)aW{hQg8ePB)oahUya0ZKtCDNs+t+Ur|D^S7mteI%5#?YRjyefb1(%Y~BewkEt@ zs~2ToLNWQo4A5A3a-MMw5I*n;`V6}ZT8&4!LdWB9_^N2y#Zb?~~yQmyh!pU^W>pOb_-Wk{osec{D zkgEq_{d!|Q{?H-_8FLtKy*1W2P_yM`gT1C*5RDXWLb5f z_<-NHS@`95AZyKmJ0o5~L-q%*?PeaAetHW%dt;&RL+alfgrSYA6PR5+0&>Ik_}h_Y z2xW(Gq}Nf(J*{A(-mMa4{&{A1jvTEQ&ck?r9sajt7&yL(M(DE?Ri7=HPpK1Rm7f9k z?UfjEsSbTJWuVpVc&_W;-(dQR+YtVs3NE>r@b>LTu!82_%cWtMS4s?%k_rgy+Q@a= zq$e~45j@mxJtqCPAEg<_Ou8Gv?3Rh}Wdz+jb7sJFNX6Du`@jyyqksKUtUr)MjtT*@ zvkyxm$DiZu6$VgBcdBQb9-^)H74VI}3!x>aARViqEX*H+iefPB$yw0Eo>IFEu!hI8 zGcfDfH`Iu_LEeN%SkmkR=@T+p(BV+(;M1IAx=XIcZiOT&|2YP)zYK8>_wW*ZTp7TY_<3xoijMZIR}>x7y@2% zM!~v^VZ@2k;gwU2`OdZec)?X$=m6q%?=}^x_3xsiCLXGFqCjy$S5RJ*P}klrH`~Jl z7H-XgQUBhBk*ecRxxEu3&NqTs?<7;4NauPM1j0aH1^T`GLVM)((4qeem1FYo_7jot z+a?F|UN^$kE(ZMSiSN*PS_noT`v3#e4r2Y$X#6(VkT3jDg=?=G3C;SKnM$XMQ@A_? z&8P)vKdqB%Ki|wey{ed`UvYl&M$k;XE@^&kAf$XW5%n*L9P0N!gQe&D<6Wy1!wHO$+7Rl36F6vGaqp1f#sW z5M7%AiWe2kzETIoJyxO1-NT$+ZFg)tbRYfd%FwMx1jtHbF~^`u59L?5~bMq#LHJSU#&ICkF6i1_<3U9N$KFTvR9v6T3S13*I_U|HWo=+{0UnpIm_ z*^5qYOO2`E`dNa0S$5daYY(I@O9rijgF(D@0;sAUaf!XIV{LpnmS1^{ker8+@x`1p zl>ja0`=QmlSFGp7CQyA1<;sl;VaU^)IPhE~I_*Eo&3LZIOU;!M8|uXzu6_<8 zEj5c8`Wu!VA{WZ|2(CvR`C4KxtBV6@k6c$TiO{D!@mAWGCj6GP(VehobsLIO+?ZqY zH&Bd}tD9DS!3ZxiP#s$W1v_%cKd8sVrU!B(X{RQ8GLcgW?JWFfD5$1y#ox8&d`ay# z40^Z+&9rZUW3-4b@mL8Jh7Bm6_Ln62TL?zxJ>)`PS#o((-(geq3Q z{rG{N=?hUL&*!3SBSGu7C4_F%6Mh%HfZ(CGac0kh7`V8aYdP!;=boDhDgQo3qvea> z#11|FZPXKRSr*Mbp_F>Lmm}bL9e&|Pa>F+*Bi3^zT5VawwYE?W$NMhv-a93wE5BpF z=RsiHM%}{1Yn+2iI7nYwaV6WRTNp4C42SCqmX~jX{KHapXwDh1I^E2*oUlf%_|)9F zJ*x0=&}Yc;36Z3Kn~q$cF2bNo^gaAZA@L2VAt#EVP}r=&rmRwsx2@rNzs|vM`1uqrwkZ}164rp~rI@@EY8d$WBwEZk3W}g!K78GJb8TqBg*P&KrA~&S)1h#Y*PxBU5I&&11{uu_E&kp#NdOr>Sd(TeYG8UBMTbXKJ3hPL=6#R+h_m8$7AAKVe zRx3ZEtSSOL_J4(d#pTRF6F z6>*&1G#~O5g{w8`{$nc&-@z!y3t->$mSt@}h}bU*p6z)H{U;AZ(b&mgGy6NF{iHlx z3Oxs|&qE8^*A59i3U(DAF!|3*^ttB(^|8_5v1Aw&(JtIK!jNyHy~fNBlyjU`241!| zK=w(O?|(iNE*6kGWx-%j=GYP!__bQ-L9T0IKa8V$X<&xApgH5i$*YL_UqSPh2L~W_ zPZSi~Sr00!WW<|xcqLSuw<@l~&f#X*-ys3NhUxMR-}_P@#+sY{7Xx|V0VX=&gw`{s zKwq;0NLQZ$kB{##ZD$oKd~R~Cw=%e&4*JBTW!N(FAqE9xfROyoyY8&Tr|rf_e}6=-PBTgU zA>w!iEFjiK+1Tv56P(e9vGkgFgJc7B8={ceZ;!-g@>|>8u@po_FHp7e7`%RMB$(|s zqB}beA1Kck{^}HXPB{fu-JdY!(GY0Oy+fIOfAx+?b0OV46QgEb1dmx8@maJQPRNL3 z_`m&#XN-8U$#E{STNp-${DrFVJ2>&oShRn2mlZqGnfP>1w%x;$_sS-p^ZReC|J{CY zaA;RSS*6Cz+$tCv-U)BNp8<_|Jx2X=07QukQ99;63!4^1`JsHtva$L?O5Pr@8d-`3 zMYAAb^f@rn8iEV)EMy)t=0iqM=6djme$h3H1AqIs!&NK68$1E`uR^OTYJA z7XL*@$h=&MWwQAwpReR%+)`2Yc@=Z1J;S|JYVjRW4H)*#l_|bRS^TqC7?P<(tpx^H zX19wgoy)LK%V`GUVkW9rLHOlOq0ouLbg0}s(IQwehG zHyn&*w5YT|S8MV|GGh%~r5KJOX9g{aH`FPiUX;0h<=J z!n$`yFeUH@CY=6-24kM0C@z5O)Y1phY!9~Z`57>x%!_K?D6Y(O1SFDk)N|lnY2FO0M-} z4PEbWC8nWVLh4JXkbS}Y!=Y^CF=HWnmoDFT!$r{ObIfX(hI7%UGgXZt8!9v71E@bX z%IFP-ykqFmMpLVbwB5l8qVxp3c%`u4@+|FD)hKd+$!;JuH2;`wjMkNvDB?-{?r5O zT}6D%0dqm&dR5X~yorN-Ni;|0@w7Aj{Wpzj`H8+j%FHzYtcJ+q5qhM>Wp zduWp%&XDP0Sno_uz4!$tUTA{y(|xw}%=g&fRRwV)kE2aYI(sxW9Hk5PNJ`G=lBR)Apd&UR!F*e097Wj&{jDKJGb2+22Ul9>yZwy_zC9Ju2pw(F=!?K z!#TyCEKo-Gm* z9ugz_s*1@E8%az(&4t>UI`F-+pS7Mo4ml@2seebmA}61YP>|k?)pzfKk!dD2pWh5l zbs1dqmIs`2rvr|g`3&R!bi?f8zp(7=8Rnka8;exOV93sL`ui=ci2SX+Pt;?&&SsQP zwwKf|BbI0Zy+4;_ayB%Jc5Cl|(wP#}C@*2p(d14HVwm3NGUxa-8_Pe6u+wrA$}cvk zl^J!Y$th#b(M@Q-iLq=pQEhbK89K)=W$Zk4<#XPH@9Mp*<9iEY*g)9wBZ~GH(J0!S z!#qFcfY%le$_mV8etVYF>-7b#mN{^ZGj777C)B0=Z!x$Fi_zkSrQrUS&gFZw!1mxc za2hq3v#;z)3`7fcu#q{R)Fl+o^b_&Q(@9MF>^GE6PQ!M`*Q_(k81t7MLPcdHXg1og zY zU3(bsFU{S^v&kJOl4G65W%m(b7b zh+*A7at&YiaJ6#!s_YrdRcBuSt!aIkJoKhobU#C)6`wpd;D7mC{O1nDF)A>wCbwB? zwz~4Sfna-h9n@|`R2_N>yD5KKYMP3+?;jF%{1mqWsbJ5Zi)k2PN{L2e#S zxj`**{xWR1n8o~ZbwS4^4HP4mgC??x;hoJO+EJpeZY_t#Ol?TJPP@&&9&r&rCeqxp z2*6<~V8R%bT)%`0zerqe91acD$t<37NY(fQlz+y9uiX#kw`&M$sUsmk*$-vbtGS8m z&4i-j=jd5#2YX)^p~v{HX!Uvxrc>5u^#mQhC7$5`${e{;#_HLNDteE*V*xg?z(vOV zkjuwl?!zPuSMR}&wKuWk;UFw|vmV8lo{SyU`VvC4D&R;1?Z$I{fm4Mgv{umjdDKCW zXPxAHHWq^Kk;6E zTpt0Q3StP&yMXrbPq}d2NJttI4W2I^adO9Ql9V=jR+?9XQ&&5#A^IAVPR`;s4%6l5 zQjXov{XXWuPT-pAGhoXpV?hy>DX|KSXVS&(>UTFqf?1L?Mh6eU$ZQ4IaJK*#dWS<8 zF@vJcJO!CiH7F+=zvzVGwP4xfh8axox?30^%2Xftspt`Ay`>nVd{;g z)Qz|R5q8}$LF)qyE2Oi3YdV|zDg}az`@_m3+JcL>E&3}{pq0+GmiH^Ld^b7d0xp2r z=1i1L)#58oz9t530j`ki@YB^I!N+teI0fagY%7{)?&&Lu?y&-FX6nJj>3V#YcQx7% zZ%3Q(``qU_DF{EeK*VDDPU+JKGhc+ks?KW|zML4Qi5ocoxC3aV7GcTNVO-{(DcBSL zVC3K|ESz=>({FpA>fZ|te#pRKvlm#I1w-*CV&@r-#Kq2RmdF3MkKs5PJ0ux8C5wmQN>D5`pj#^a5JqO-5S|6mh} z3v4;hm>Mo9oY>4a)mXBt7^_7ld}zBE0)h{5O|PkIA&h51O6uBoB*3dY6TUHDPdFQ3 zD0nQc#nPgskk#)4bjA#aw#!*u@z*d+JoX3UM=b?!&l7m`y$+w^cM(?Y%SD}Uu^;q`M=^}BJjKG{eYtZ50G#EuZNqL_TX60DN z6ryop<**vM8(jeJ;IlAuaU!m^A$IezKU{It70`rasWT%IAv;SaH_A4NvKum*Ok2RX zk-Jd2#SPL|Rd9BH8}LTKX;40VIyx<|Wt%TkHz?sa*7)>=x-LeS5VmjS5i^bgoaNpQVn;3g)7p&Rej**uvcz z+zxX5D2Wz1Q1);fHf-30Ho3>ZHAM}1bzc-LaueS+raP|lW|9DA^oKI};fn*$VEe-swjKMwsH9`<|!qmom z!{n)GIjjIfzgRO@#~o1j+<<>vP=!TVW1z979$dzn^NS61gzEaQXoHEIY~E9J|JM}x z1vJA;cn6x%@73~}LW%tB8dkQs1Fh-?G3znrsNP2|>Ds;6)bl$S^dru|%3)Z1i@d8U zeH{1VJEpObD1+f#+1{S$*Y6cv7;3_cli!1lri|0LeP%{C?lPw#^RcD%Iaa5<#>SQ% z7^O;2~2S9>-ZM+l8Vj+ayEj=P7wt%i`|c17(sgYO3

KQad!8x%(8=%udZouE>T)15Ov%_Z~Fzq`l3;PZWw;#uY>-2>}I~~E1@4&gg9-?At zf<%7f;@HxLPk6ELI;P%=Bfr<9u?=Ofx%=H|=eey?@;X$9mp%Oo{_n#;)%Kc8)uIg7 zu__QJ*Kys=c4O~_A8^GI^7;k!!ia65xX|DgxU`8Zk*BM!7?ev8}^Sz}t%jP;GG*MErdAW$+Jl?)d?Sem3A8);z$b+l9nbCih23 zwmSG+7xa>e(e5_F$8EKft@`Ak|i>vu}=Oi68HQ zw-))cW~?WVW{sp=YK=wtzfipSB5PXy3*?cz)$*{JYE9{LweuE#a5W9VuHQs_&zUPh zoVy*ZM-wkdBFFrl5v=i!KCjvl08Upkv0zUKmc-q{M&((^a(RgkZj-7(568CQ;)(J4f_>ppaML{r%7sbn@sxJ-Ry_hM;XH`Dhvr)4{Kaj)Pi`u=36S|~ z35K2eg$l_PNp~8byKkVosI3yS2UN2B*dq+y(`Vzi4J4X3flnTya$^wsP3(&v^X8Jj zsS!%`*P*1T08{^$06rHlVSj5e#vQZ+vE@*v(tE)<9n{59_W59>dyOr9Vhw>mh+*}v zKP!1Yk`oUrWv#^QFZt}vZ8_75n(yD8Y8noKrq61O%Zoxq-DM`-WyN_;38hYEHiIdH zQTn<+n^_-$j!%f$GME^cC-1VbVLPF0Yc;GsS_KBdI(+)t1b9syqRiempgVsU8@l{q zc460{Il2!^nMb~Y*?!Ii@+&ango%)O^#$bwq|ie1Gkg9An{1m6ZHcqVU1rEc7e=zV zpC3bbSUp%R)Zxp{@5kU9sSv(Ln-4pb1NF4`={@K@x^{U@>@a26nBeh4x4hK{(O$v)j!bt?00Y&x(<96S5trM3K-4&fpVFG z<6d?V%HqgP_^uy$z>8tNBA9rt>%eV3<%Z%+`BvUQpvyU?|J4N@??z(!%nzL7`Mqcr zWy+VF+Q8{8n237L#ORDU!ok)aAU!iuQeaEZmeO#^rCvsSW-of~I;uc1U#?FF>jYHA%-~(2sGXM%Y zO1K`mdVJgV(dhXx4gD(q#-f?pm^efL`*~6r8B5>21qPD(1y`{9t~Xfb7Eg}ZI?8rD zK-qP=vp$%X^PPMwB?X_jvfyD5@-lV%;&_P8r7P;VXWzH61k2e(D&AVXxly;dvAVD z-l|->yLoeV<4>VOoC3@KzKzm#b#zxUr_79*P)i)Z!!z$gZ00-Ec$PxJXdWl^Fz218 zFK6jbBe=ZFh2YV3D%g(M3{vCq5HZ6RrhK6LYDys|3PrUhuoveU-^eK|3t7PS^APpE z2_`Po5t_yc*obu~ULMIQo}SAQYj5YY5>q8=*+H=B+s@>p>%mKBG|h_STqUJV0-=Cde-{U)~GwA<1`*^JQ!6EhJ*d(xX{09<0B!U}x)@$j`n4R7Q zyQ@zVH-OIN<9ks*L!7(tzzJNwD;?L3sfDES!!Ys!v3q+S=6;Vzz|d*YT-zp381vSU z&(1HB_{$5Rax~?Ndd{IexD@r4T?dc6V35~eAM3kbn_ReOncoUia{A?BTk><3wjdNs z->~!RhIM}Vjw`*p2t#^Yr5$M?uKLkMkc>G2=^f@mgY6zHJvj+| zW)Ne-Dp@T#QG#CX&(LY?L?|6R5|Tf9WAoI%+5Ng#z=sO3>90VQ zs?SB5q;g{I3+lL=yRq3wGE3h6YYP|kG6CY{ z8_3h;4bL{GVa|Vz(3Fw`(q7Gy2#ZjNyIcuQj*Gx<1o?jhkC1biTmYX7QIz}xvn@PX zS>g`teeM=)2oeeMwt8mcd7G2;dq_LSJ(xbKnUnT@BU#ub3+s$c_)@kHa1hYG{w`=l z+d%qpEt7qo!ZfSjL-wA%%y(fCCpB2lb;NX#7rj88C+mbVlS6FQa$=4g-inejH$d-D z3Al_p1zz%ln4aFk+WvU~Vk4B4n;!+-7X!64o*wv>SV>v>T=0M;*q1*0KE`xjZI?^@ zny+KJ;uC60Q$hOUFbrQI;$=R^S?J0Wu&L`~)C`UrTRvngbe^RQ$<>Qvchu0ig_sGx zi?xNOyRXq|S~=74GZm(*E`z)y7WF)d{Zp37h3<^w+O1!4>+~L=*5dw}yB5%HzBexdb9Gmyn!XIqjjd8@SVT+3t&bm|y{B7^G^hj;T( zpFA|~Cx?JY2eBpUCr%jGMetp$gVsJ2;0|Ad2J;zlvB?3{X32`D7l6htP0|)?OmmRE z;BRh;4XF{F^ovsBd&Ub2PLD?EgJkBb_>5Mnqg=}=hHV=u1HOAUXS1>ZT1+Q_;@c%= zcQ+Bt+FwEGf;ucbZz-4r9L0jfJIo3OqVnxJ)S(oMqtO%Me;t9czBk#$ULT-g`E+vN ztp@jl#=y&IHe$LJBFJ~A7el>4?>y{~`~>$2R^Yp-8Ek8E(RHFTwj2}*D=oVSiR-^( zmCBSKx4Q!6shJ!?OegUpRsT=agwO&ym2bDD3h-GOt5J_5_LotWkM0B3(O5saKy zVUfa+SJll1$EA-!v0w>APLRV4ZDM82EoJ52Q^9ZKLhReW9PHnff_VaEzE1f-R{K8U z)eU0LW>-;{_ytomd5+C~LC(KxZtP1labM*gSaLBCi`E-MndSzoKk)!v%RNA`alFLs zx(2t5+k;DzMZ)1(Kk(=TOWw12F%W1LZI$2AWMDjaTUZDeJuK9F`lCPe=Sh(wy3fwn-Zxj`}=X zq{Na*4adyj!L;L;qz-PP^ON)`r;Mn9?D}a;waJTm(&MRPNzCqO zpN~yPb%n`KDnZuQ5EkZsMA&A7TTW~9@5rHu1G_=3XWRU2BjYa^q+B(X1yC>X<>KhNKj$qFf-UIJ_N2|OF`pRigx{zLAFB|JB}ZL z$=aFVhkdc^eFxh19|t23MMH;40+epH5Zqnpd0?(U--Ws4GSL-Ua2oA&MZDga<&f^X z08)B{6Sg-MQ-NGFWtp%_Q-Rq!d)1!S`j~ll6BL?-gV$Ca2!B$5)qN}!X3kHE`2`BODau3g)#3wp)Is)NLACiGlzcmlzRkq%yHN`6 zPVLy3xd`>TRY3CQ`?zoMFKj7)hN?~NoJQX{_sa;%OuBx@<$34H^*Dq*m|-kH&3RA^ z%Tdeow~^QR9-F^77#id^xt_gNqur)wna|uKbT#shL0KBu}JxZU> zmV}z7fM_+(eX6h^M*0aBo0A1;^K39b%#;skc#1X$uVCg#AMo5a8|u=`_-B-(OEEkJ z4u6M$x-$=yCz3eL@Bj|F-aw6)5jL!M!lEVwt-J^pqjLadw;9u{B=&mKD1@okG5=x& zyt^V2ZXGldbj%V^*+4FaI^uBi|05QlHx~JD5P05_w|Eyrxs@dKYEvUVqLDg^4*6_& zJ~@*1e@EpyEmZ%$fuTnRq0yU8@GYmi!R}2Uz1oGhs5nQ=DQ!V?ay5Bn#$n`$HPD%E zf+~O7ss7s!hW}zWPSHGP%1N}Ja)4RiCeMR10^NOsvE-jcocEnC zplmnd-^QGUhWcAf@5D_^UvJ5)u3d%UJ&gIv>)%mRnKjm_s+enk_lDcpnT!KR81OmC zYbDD6&n;@x^JWs|t{4C0g2z2V`S!5fmcwCau|5@cWatT*zw$6WWepCzWWwu2yJ5`ecAfKbf*jTK4s)E}fhjM(!2 zC!nL|0G)#dpv}_H%u4hHjLz!uL;9S;=zrdUZ&e3*_ExIN4-fJ>Z}7d^f!U#_B*DH@ zK;CMruE>8yesM%crytPp(Sb7cCz)vYBY5I!Dd>bf0HYI2@%M~)bWPbpE=Va`NREqY zvx68gC!4bk`5Vu@{Y`w$w_LKPBigmC1?!D&khA(Vq=a;U?2Zn%{JI%$_Q4nfwC1AN z;)ulAy9OmOO;{5|3>u$IaJZ2Tt@|6WN%{)r{;WieV-A$)?*Y-QBJ$O_g3YeEP-7WE zZmW2R{@sA39g%3W@C;Tr?}EoG?~*fbC9B{05oHxNoQnE|(Gj~bvv*f?xphcsHzCdkWHs z_o?app>FuQiL+WtbH$-1==QLjm?$(W(#&Bcwzgb3<=9ngXMoL(UUQ(lIP!0lR8KWjHyWtj!YIcCMPZ3l8Cy*cSDP)f< zlsJD_gTAPF8y7B^3S`u&eCs`DUHKf zQN>`t|2rqH>V*w`l32jNztL`I8@~NfiqBXyX2f0vxnc{K8hHgIt)*D9--+|l8x5}l z4R|k}vWosD!s<+IUTd@gc)8C56WVWRPEO!%%(LX(a;`zn+7?LfUCxZI&q3L&TkJtN z-6?6VFP}%b>hE=oF7_a9KEO3+&7dGqhp_t&4fxO;rPy&VS{tY_ z?N1dH{UZ_-bZ=1o3d9So)Dx?z1h3JRD7`;T{g86|n(O2`exJd)&Uz1FQ(uBjMmjMC zqiIcvY(C?Deh>pYD3cT4MF+A&?;htAYPKef=me+Z*jhojguf=O0oAz#ylUt4P? zob1;{P|V4|RmEYj?qL;3yB6h!5BLSaKGCQcyjm^o{Dty0^TFxg6Pzl^jCbsF36z5_ zan+4o_yXxn5Lrw`)om+Qe~Nel{SJZcs17Xf)-b0J=TR2lh$)|Q@V?$Nn4b6=igu1g zqbWadkDZYq)%znUw#~xWZnQ65w^yQ*WhmgUE2!GNhWk?T6d~~o+UHHx38u-(k$p=V@YKi`9NZ9f+9;So;m_Y8IZNI~Z(t56we22J+s zp+ZKxXQy{)+wL${MP)#3lP#QS*Fp6683~eZms!m4Nciq*Eau0lRmx7|QNE9%ON9ze zUOvE8y;PtYY9OSq=*%6nBp@G2v(wx=Dz#o2=5Dk{x6=R5lvy<|9_1_MKy|tb#VX># z)uyqzy-q^de(G3qucn@(Ab?~W^j>U=Yg%=$e9 zb=Tg|cX=7O*xRGEdl{dQ@c>g_4u>GaSJ*s1l|?W4jVAlfbB!WUm9m*Q{2xES#!nKV zb?84(S);+Q!YbN>lRxsTmGIoZ5%zmWVRfHzaI&Bl1H(eVWwt9uwfOVmz>~aUPk-`| z#G_M(+X(mR-8dx_oSib6?^G{3msfJN^dV(lkHFk*rYI@?kuUM}2fyjhKzh*#mij4C zVMX00)zP3lD?r(i{t(egO{|OyxbzR=Y9#xB#^4fZ)e$_0cs+j64h{C6=P|gT64_lyuYVB3b#a7RZ>n z9h*0ma@_*zut~hY;^e(hYG=#4G^in85=ow%NC@6v3eb56plKXT>S!i7&T5CYY58#U z4RLu6@8Hsd1JFM~1uafGf~%c9ywS@6_}oJCtd>_w9&qQruQ8S~?02$K!K>ywPi?8i zFPC)%=N=}aNoIS&uB9uQZ!!|BX8eX}fnRaL+4Gq7;WakLS)kqIjwlyap~SJ8CEjjB zy{+eg4{j&;mHy&ERa(48%mirhf`+GZ%wE;NpMJWE>W{=FT1!4``w=kuNEs&oev6S} z7arS`g6e%=h}C`qGo6ls<8tcB^j4yL@FR4|`2!n&n271)hJku6&2Q!HAj_u|FMQG$ z=etCKq9JE4fdjS5OA464A1O49$WWf{mK`Uhw9R5>7Q>-IQ@YNN9ZD(O+ z+j}sWvxoeoI#@rSb_`+UbDE*XuwFhW8Em9#Bp_*$JuCqtuJKJc~E0oZl_Rk57qBF!Cd`joM9} zTExxSngd0bse4QL3xY$I;?;MiVr}dZkbiUG0lW54=JF;VQbnEZ&cwL?LcZCfrgUf9 zior{6^P`7)k>`qhRUN*8*^@YIoKOsx>MX=4+8ZiqA5$rwWvO45g63*ZmD#+bAgy40 zcaN_a_4hKaITpi@>eJur0sT(vEvR<^p@IE4GmF@tQ)1my~X-pdP z28`OrVm?&L?7Hn@ODl<+@$dj>Y~tft#0P&TBgvina4>kTWRt}vQGC1oLf)~b{}=c+2cF2E~tQ$Yjw;IIXGrsv&+ z$gAX0eK{JXzh*FAA9aU?_1@dW71IZWVh)RR z-_GXtu|fh{LRruqh?ZVKyAUIYOKjuL4gJ7}av}W|NW=hL;`6^#VwKQ7SdSg@O~$$o}T|CUm>MgeNHp4{3$0u`r9 zd48J(D0LDFO=RsLVrwsG4dEEBQ;4Gn?Ev*!4|XxCome?@4VGBtvP^gS95xPs#O;sB z4-)SlF@G;{2TE{q(jDl#Z#yJYKSHZOTIYc?5r$;zVyAxvi4hC;O z;+btA@BV_t=)KQM)C}|DVQEQBan6RtlJ8a9v6&~4@6zNL?VsD-A>QBxOmMCMcqfIl zz<7vwzYJOj8Nvk8$)mPbybW#$N;5iGNXw1_c+hKk4 zV@!;B3h9y<;>+yCd=DA;eh38hwlR#4A2#a=sd!(&ekw@p@HyyxgIesmO#`O%JTf?Sg@nMFiLkO%a%TYgr+;7 z=~g5QJim~9S~IDu?=t2;IKkI+e8Ym5m{%(`fbYuyee7}k z2aHAUdRJ6Rx2n?bon-x{Kf%BegRxn^1igA>afb&*aKFSzlw6#`UYhI!n?JKLwPqo< zY`FntRdx9EI5DA&PZwId_knnwEX>}M!RG$C8#_hTk&b;I6lxuoV37csS?z_UG#j=2d~@V+mwJt678S}dy_4;G0IF!!C2 zuZY$f@yp7!vuvh zc_kk5kUzeoTGa_FGmfL;_G5Q>?Qd0b!Y;JEHHf;JPGLj30WqCPqgpbGOV>YhkM}06 zqlMValaHZ-T`92cY70q}5BPmTN63gPBF(c4e)vmANDjCJAnPC|+`5jgm-Hd?)p>C1 zriG^9_Cn>TW6+eE26w6tP_I}Gj`nOqy+ntKz{R1aJ9b&4Ov<6eC$w^d|JaouGG<sCW_?q~u`H5DP zb3O^Z&XyB*cOMk%Z^e)mHz3aOFe^E!WhOgmmpW_()c4U7gFNz~obF}Pc;eK zZli2%Jygc;subRW&r41OUL0pdBBp4#e@#N z@G0^tXcsI8)08J z^77DN_9(Es{FSuZ_M*SFKZNPu#LyfMbp0f9oeX3_ek`v9*Nx(KS1);P0VxLDX4!&pUp8d z!7ct7^BG|*cGu9X6Qbf~`d5gxv=g+mEZDTkCn4eb1eiG8T-dMM6BFir#b~=$Gl)x6!Uf zf|~9&+E0eWwSNr}cQ;_z`f0?^uqOYq74(Yv1e(7rMU%DrG15X$Q1+$I+p-z*c~{vr zCnIro@f8TKeTSi&MeuOngOidEK)0S9MAcEsYkT$wssA!`xfBA|9#_HQg|ARge=`&t z#6a2Ezd`YS2jow^$mGuss&ZQr$eT}h!LtKE+rto7xo&~|l2jb>`#kDSrn8MS?)t0L z!P0Yrsw3%j6?8^wc79-zqyBEv3pZ8ic?SHSQbVDO+Y>DK+yDt4SD~)%7sG@7(XLwo z=H5IDPu^JwnoVEaBL^N~+KB^*^>T`DSiBjv|M<}yo`YC9hj{rKE`9eW%o*QaNZ57; z3zlhlDtSrDu244XLly5=T#Id{8Tin^LWr0$8PwK}_~zmrP%k&!Fs}uxD^6kX zdv9p-`b7+w4m{~CQqI?oS$}vBej~P{V!&4Syn>7Hzpg@q$3wyMz)3ED5Wte&d6ulw__`vVP1v)5WB%hbhHe>;almmv-jYUJEK9O9Kau5nuYm4 z?s97vOVMwQKW2B*uxX}dVv|7^h?yLZlZ#(~bfXcJ-Z=?DnyIKvv%{^|%|xMHJ5fIH z3Rquh=4sC@h5sJu2=c51mEz|oUU@-4xobIhd%BFD^ZNw#mO6rFfeTAN;L82Rq+@j~ zgDy4cP;E<${3XU*^X;u$Fl9_Ve9qGhJY8jVh!8T{K4R-wW9X?RmR^mym@@G_-!ax zpJwrp(DV4!(^AX}ISd}R@~C%oJGAP(qVD-&3?2=^rDqZ-|6IsCs-A&~jPBFVhGMbV zF%~}d9^^LDES*w;LuNFS|IL^W-;_mp=MP-5KZYNs{I&0axo9%FfhmGI@H*Xil;+7) zidB7B-oE|dy>JvXof`l~2MaJ%w4gmuu*xRsOLjTP8r< z)#F(7$0;ZZ%Y>RYS&;cCmpq%qd~@xr(#(hh&8?F%y`u(VFZoGgE$?8-c?TK#uZHHo z?(plwOa%vGy;RJXRO6tKRvk2kB0vDqsNmFiQn z+=Ke!(~|QLC*<;RciRiD0L}r#3TXcrCiGWhgUk`uywtEy zpKgH0rZ@CD83TKk&>T}K#SQ(l(f6DHPgQp@{Lpd8oMH%dq~mHIuIJWfeo)ZzO;r}v z8Ptg{ur1>NsQ=px6*GEb@bFGJ+WQbVWg*s3XJPWiCwL&4I^mBIM}P5eaI;*_0{zvn zs?TeX=@^No0j6R^kch!)fv}<0L{uIp|IGIG)Gur#z)V-n4qAggZV&MLlooDPlZEni z5|H$pjnZ-Y+;Ntay5--2{N7KVze*qcY>r^2_zo|v)e%OAeZzp9T5Nt74R~Zeq~6^C zxdR$OE#6{^tB0WDazjD-v^RG5e1)=R@)Focc=(AQC_S`H+ph7$UUx7d9toIpJjnR23(B8Vutu3Zv zzVQ|2S5}HPtA9ZC$o{zCKNF!mqydz#bPG3hzkpx6q{6wHYP4Naj8;7^qtw?4vL42P zMQJvKnUZ!sG*6~XGEjXlT8{;xD`XWnTG4N*J5*BcL+bnn`bHWGZi@q%blYz3*g6ps z_w1k?*E{e`G)BE%3AnCBPe}K<#9T)mh3gHL;-rTs!MR}@*L>_m9r1P4MG}Di7ym+M zo`S7qv`;KHL{0u9Vq>l1c6}Z|>c~)>&Mm}*js~!vn98%QDlquae2@%$!*2iYKD5*m z=D#Wc#l(X=`F0ux>qfFlzrAQ<)c`gRY9VS=71+DlL0ez)*U@?BY-@#$;vUe7?ZA4q zj0KGwM@+;Frc=FxPw1J4@2Q`CX$_qVT@Az;j~9SqcPz}I{>|7~#7l0a9?gTNK`L!6 zOx`sKHD}(b9Ar1pOWT5)6-m$(Pw&y*YuQt~Bq$oalRVZBuuIfw^x!I#NA~0%MJ=d) ze}wsE%}2lGY8F@Zkd2#E3l5(FQ>V-U-&vQ@dBQ8E44uVCUps*kogtI~n+CxyS{}82 zDDyq)2kIB*Frh;UE_!L9*6b_b*nZ7{#Th2VR99TW_F=k{s)O*n3#FGwwy zK;ZWG820CB>eGD>XDzow^pj&4)nNfw$DU?#)d3b&Dq{xYN+5mQGG6d{5%*4^tiCFm z_&~2w^PsIj+U<+G2l;3xucOYO?UcQknh7huoW|tGh5W1|^)H0&<=*-SFyfp8bU0`s zcx?Fwt}#03Fkl$O7{!2Qbg3-rmX39M2lL$psimL9x? z7IVjdILKU7miaX;DQG zb@DI1eK75?^8ba_XXD6=_z9aQIz!~{R1B@z2%+z~U`5p=@)i$AlkZv{Gg%j+%6)zIUPB+1C)(*F-e9aS|Fv>WP7*p`V{;C`2#1 zfhP4EAy;PrN-|%_;w9wIsEmfl<-IUw(qmXod7!$#+L+DC7~1DiPrLpnjPm)5l}ssw zvL{m^z2-ibCtgrRR`%j$#4fOzkb?WC_l5kGIe5`jM|6I6fVV;c#8;)$-spj>X;MFs zx5#A8>wHP;rafuWGgP16&EE9gj22z>A^Gt(?sF;{W8IEZ2k#6BcvlQM6MM1)ZZyZ; zNn)D*?|4+{D=wV}7=oK=Z}St?eb=(dep&F}a6KWaP>$ZSGelzbQl znsvY|{^|)wPm-tV%3w5j{~BGiFTk+5z1UQ4B0f#Xgb&`j!g3c&Vf{)Yafts@@V9yd z4rj-K+Sn6Z_vni{qs`#OVoSjzbq7ka{sW7j?}(w%K`?2Zf_<;;Lc0Mq+#oO;Dt43K zV%Qs!Yeke;=CJEu8X;Jkz&+OPMX!kstfDChK5Qq(R^mz0)32avVJXZXo{rX=%xI4O zim8+6-M;WMCbu48`BvHPN?`!*I(Y;ic z`q49yu9psxTg|X8=sxpG_CuavEG(STPMkZ1y3xvRqV@YfdAj#n44(csmYp^hTxPey znlViXdrZ;fp)bt&U?47D)ky5!YRDb*md@w^@)!|A?9&(0yX%?n?P|#CmVnw(Z&^_6 z9T@+oo-n!UJ?$C(Mn(30ncQwJTh#eJD!%MdW%s3f*@+B_df!}d=`sx*zpO>&%k>!epfkwBZDqrUUPcRtERfbvf4b}<`6b^L z`qa>zx%!F9`tc9+Uv4S{**8<>UtebXf_zD4Ut!XuE!e&T^}kn9el@5JrG4hIuLmrJ z?j55jgJ>WqFSKV8zZ#W!J?+;y3ZWlNKA4(U4{f z1`)a=(DUg|sO(#YtIignt3@sryxWM9fwxfiq>-49v_}XGQ_b_&6{G6L@z4r=I7i-8 z(3uY{|B%nIAP7x{z2WMZcmAF=XvHTe4K zi1Fv^AyT8{3+EXKem(ADSaJ&9JZ3567U&BF{=Z}fIq$G8@;MKk?1tILi&1s@DprRM z1*yd#=ryol!Gz|6!bby5HDWI7?535g)#V|>GRLnjG zl7t_!{Juw7--ZgbThkNmXHqxTw3o6<>Nz}XdH|xQY(axBZy@kH!%+WZP*;880b!qs zjoBHJ-;6;Zsc=<&CMp-)L+dZS(bxVL%HNdBG+VClm`7uAQiu|LCk&(S-CQQEATQk0 zt(ZRWG7HR3=IYg7@pXlXpxoYqAupxiJ!BD7eWE^~k+~3Tk&PO!6xGpf4w%OiQFCCu zZ0pKrqy>J!;6G9U>?sp+!3Pes1w*8wD~{M`DrR4K%2H!JVXi)PYPhHJ{r4@2%d4b2 z!@pQiw^&ws{usJ`J5G^9e+a%Jw`6Yljw<~4-#m7ORc?W{Iu~>ib7p9lL0mlo& z;7!$bbc`ASTB8Ogy;!O;k1-LJ#;2iEX$t1~CdK)IyWkQtaM=Y|_7s7X+z_Yz$ zaE9j^#K_MOKeGbeyxLe^pF9k?aRIAmxsv`Eh50kiv&7|B;c=Crm@IB%Yqre9M;fa z@vo@MKJ;QoRG1zDMZsg0^Pa`1TKf$Z|MCS1DQ|!&zAM@hLR_KFXS(oM7p(_H2f67<{jtgXp{$4>Z(4M0OwQs_p?gJ$s?!OWxP6ZJ$M zM~BK_(C$iQF-wO+o&ExFUakbM%U?(rI0zH=lD6fP3j!Vat86Ck#R-LX zP?8)%9t+A}MjT*eF+Kpkp6GnA2-0g;qvpnMrbC>nOZ})nsJ|hqTZgbf`$ldMCPGci zWi;`vVJ*kMf_LXIusf{ARfiL3@0`x%^3@oaOL_Gv{%BvBfc3L=h0>9auqXAUw5_In z^Qaz7`|$`tKtjo(+_|)k^1eP zz66_qHds}a1uD4)n*SRQmOnQUhfTy84_{Nx{TDN02EsY3%cy-z9Bd#ze)$DZKifn1 z_<^YObpwsbM|ZO$DyWR!gxM{~l8c_OIm0A^bLe8Mp}nC@Zx@ucG4xAahF*i3xcm^k z8i%?MB5a7u!>-hE)UDP+@M^B& za?3FwJ=0Jals_8kW^{#Ca|DyA*O<}i6Od-S6FY4(6LR;>#+WR#Jn$WNB2~4M~a&%n|I9<{c?rNIw!VGgU?kah0 z{jX!=x^Lj1HxQx&)DU(05)|2$V@YZ;&nRstE=@KT%LXh5zkB1r=eMy?8uS#T;WvmS z;H;7^JV|`DL9&9|D^z)JRm6?U1m`<%S?NgXBrKi9j((P)=HpdtC5COulU4{kz6dj0 zD8n2+RF$4<2Du;hLS=__;N)-x6HfGmit&^cHStBe<-rhlTO#JS*HL9Jodnj&6S#83 zY2p%?$@~hoV?p&&ESR;tuuR7RE5f}Y+j=}G#>#nW(j54H@hQj;OoGg5mg45P_CorU zzNoGU$AFjjupt3Z-ZF@t-1iJB7m{zLD|Ku88p7N+8JNC(20JPv-efb)_49mCvt}%_ zUT!LuUVM#>*ABw&^>ui_i8#pb=-3%-mlGsR6{#`g#dS&y>aXzE8Zje7?e= z0IiRF;w4#+xX;f$xHzD_;Me~eD;bYm?;g#$CKI@JO&8L~>Av?Pk~PQXp>=o&&-O^= z#clE6)axw<`bD8@auVfL&A6xB23$MKnLK1RSN1c5ugfo?((|Z$+^_cN5kTkdvP15Y z1+!F%rd!eD5V1x5UNFn0hj3L<3dBTD0g0V+rDt~d}Fc8rqhtVt_1yx zKcnAsCw%o=Uv%g*lKN7b(0>qVum9=_5jTf`^fqUbkSYB6)f$vey$I@(NvtVW0j~3^ z(UH2clQtQPL7C-{O?q6=ky$A9xWn~oZa|5khJY!>P<+=|%#M4^cMmTh*5M9V_3Aq0 zUoBv+Z#b8({j75SFQ31x`3?>C;rMl46LjDH1PxBEMMbYT=GY-0$`1E~=IxWvyT=At zI&Kp_j|M3ByoP0CVxamA?Vp|vmz8`z&z}#v0u`NBgX=)jF-q#Wrr#HKbUJaeBi>Ut z<#5*N@KWgh=om0Y2+VgZWD1?2a-!EFn3;c2&fnCZ5kGS92H?>}EC z!&1v%A0>wM7?Xktrw>8)QYFIi%c$7*H`k;NWHQqJZ8NyZy=z=5 zZNj?`w6JvB8Sv9|#TCn1K@xf$t$jREIot%)9UFOYBf~9~R-)s(v)~uB8svu8Ab-F} z)zTl|Fzt_v*cw(3vF#6myw^Ei^+yW0sCR=`w@T)}-UMp9r(#R@SWt%#!Qy9@=ohzz zud4k`y#Z;EJM{`AIL!iX)n1Sk&EOOFTL>lhS3&Zb6If(l37C_ICj2O`xqlt(i3_1D zY*XQu8nAtD08LZvU}Gui8uN3|c8G=Gm+lXC%095Aw!J7x3Q+~G(lD*VC?@}$=r;UZ z29~ehj&aXbOf#iGHfgZFu)C@mRXHgj4Y&f$dqt2somWXu7=d4s8HAAMO*%G3R=T?h zm_DTl1aS#rFis6SCl^3jBe-NVXyVgFxQdhFGV_QSm;f2Mh%8_ zm&1e2Mna5c0_F|<1W&JTftH3(sBD|8YB01%R~HM`6w(7PEA&LWABS1YlT5T-&>xf? zkH~CRe!va8Xm9-I5zO9Q#oK?d61;mqM#c6&dBVu=IO;EYKE5ZwP1F;Xdz*dAXb?78wfj)6Zb~ksvnxZF@1v@D@ro13%HtSV+|Qis={D zvT(N>80Zwsat~+Wn*oWSVX>@r+j1~DS;V{dxdOLm7>j`t;wPSOgt|d(Tsv$Z8`oeY z7(^b%>a_>ZbJ%_!cr(k8>~wTB3ic9V ze^6J5D77H(@(xVzHKPctogT{AiZ%-cIVzM`ks@Yv$2VM zy2;GTx|XX$KT_VLGlrc@<OmaC zm)3uWz~HVJ+S?!P?#{yO?VEAKjVAOSr!ThUeZ)_)-;ii?3oEl%fp5-w%35yW(nWfr zbE^uiH_~2id5-KUWzy9%PcXNN<)C=Hz*V9@iaLvjGtK$gl%FzV0bZoHd7oyVj^=>f zeK#C9%0g)NDq(6^!;Ma;F{2~#kM@-F7OR7B?%fTX@Z~W&ZGQKEd@f~$3Al6$frh?x zw=G>jSx53blO6;O8Z=qD5!I95@c9SG!&Qf{#uZRWiaK3f0(qf8+4=Wa9M{Ag6H_Hym1Tp_SUOmL4~Q<&XaPo!!D86 zw$%OiKr1oW(HOr6KjC&Oe{jEIW5MU~UQA9PzSrqnVDsfFgpJK3|3fi))q0?Bjw6vuZz!)nk7aw8LvUvRznPR@ zAHGbLf6tsb6T8zSeLTkH7=z^QJK9xPVA9!5SQ7mJ(sOHgo~@Dia{fK?gI<=w*;S}m z(Lttpe1Vl-y^L+-Lj=h|l$jccKH(>^`*!;I)lv*^O2xW8VZ=Rs02RAlp|_tuGzkHi z|Hm1O>O6o?{+)%bXNmLTL3^FnYF5ovU}JF(Yc`t+)iqg=+&2ikFFe8Cos0#gtP>y7 zM7&*#FKGCaJfPFovE+d}nf?IsImiY=bEkaDroHC!Rc}?+l3*5nuoSFSk&x@*iPHwh zfP>}{*qq$~cUN$9q)f>7ve~d~k3@*or-ShPV$3({Et4l3!8oA>;+~E|a2SfArH`na zk+g^L{{R+xlMi%@Y{;G4n18On44WLlu6!RChgyhX>9={rY&z$E*HX9SCbnj36Q7u3 zE?9VJP=5Jtfli2l7`%u&0UHgmt^6QJ>t>AH}1lKHI2VMhaU`hCT9J25?=y+PVdy5xQZ_ZD+O}@ybOHBpu zXR~nH>L=KJR6fZ2-BL+w-?FxNi7?u~7?8AqxGwF8VbO@;J1;@dvMJbD{s}Cio56L$ zaGGs$Fm34tw4QaDCH1-oimTPcCf^Q=FTa3@8Cu-zM(=9v2GzhqGl2_+qI82UcPW!$ z7rOgKRg8x+>l$dC84E#aa>#nIA3btDqIRE#2ZU^g>WlQVzaFVfoGoZiZ!CD6I)mC@ zyIH}zMKUyuMCGw{5Hfl!R4={`K@L?=bnq;=t~KM*kTM>Nx6$IsaoSg%f|9eBn8T~5 zXqRk`YsRlY=NBiyE_Mm1$D9He(!iuCE|_hd!Td)Dpi8q1wWj(^uVWf$ybT4p6N8{t z7cji;4Hggw!nJHTNN&2ISFs~j&_22`BLSUW?t}TsTTs%`Pv&+ef-SXahQJ=jQHCa> zOSBC>eb`>qT>Vgx^!Iy6DE)}xibDVai$Rm}gGp9Dgw{xN!P%319&su#3cd*?gMv|! z(<-ymE#axgpD?BUWvB?9PMx@S(Pm60>|d0Q8z0g8CGj)wc!Iq5kx`(&(UJSl`2+J4 z`g7_7#Hz?_&}koyIwUsir-9 zOdW)FZfP-JAgK8?#x?ALh$Fwju44*%U3$gBipSx(^v^JVs*Y%%YANcZSz}FI9x9Uj z*mG0rg>EGsW>`M2ZjHoT+CR$M%?7*O1>9?G7E}Mz8G3EGL)}yE7_}K-^p+<$G)PZ8 zu(%#=mNueJyMDax$23shJHbur$Vc0EGPmt~8Z*{bK;MSzU>$ZFs_)!}h6~g~G5rHn zo~h!O5~xr0!C3UyIfpQK9yt8fhOfqxzG$hG$sKbGo2#8s=b$0PyHV#?E62)C?obk* z4^FXXu`25h?9cOsF7qy9@GlF}`#ST|L*)?AwiyDA=0Wd=r2Fi#2hEn#ssu0MAx{W~ z&|`}+bVxn4^&-xY4#=#%zB9A16F7TNGn5>9kH0&b2s-xNRS9|)9^%0TJywgBzdCWC7ctVXGT@>@pA%Kb%DP>lkXpZ?bDN3(41XP=$LOM0FkQd$xUq z;8BS@q{>u~{P$HB6WtTWg}#N*Kb2@U{4Hd!zJXt#+`+gl>PtKG_Tm*T0Jn zZ9mcea$iVo97jDCx~%WCLa<0a4^gidA^GD}M~)B=aSHJUnlEs-vgd4n#SSboPsYH3 zgP}u&uCPCCGx49R;F&+^pjNMBbC1S@TK1EVI%q8VKTyNyro&J&@fCAh{Q@?eO@Uzj zD&{=DJGy-Ern^-I-oI!rpjZMv##Um;pAJyH;}(YA-hmM}w_)nT`DkLa0A(%~f>Qj= zC-^>xwBd)bIcNZk3N#i%FPj7Q^}zf+dwEF16i7=l5Da22!w^n8p|p?OYiJ#O9N0k& zeLM;a=*}l;^`@LFH8Q)m0|%>jp!8h97A?F9>d#AsPiU5 zA}0Jh1_SKVL0cBiE>1EKol~yh=<~neXEy^uO=ov{WDPnm|Bm^8KV}d^eEne;z*?t2 z>YPn;D?4@=29B{3q_0l!m{FzB-2XmXM0cEK7(O zbs*M`xJ9pzApfo>;9D%k4JBEi z?Ptuj*G$ATugj<$YpBZIIR@*twn4;)^=PsshaL8?6yiP`h-x~gG}g}i1CjIsSo(lhFBQN##*^*$HNd1!TBy=g!iEH5?#ib_tKUB8?Pwxc>wo9h z{yqwgAzA?4Lr{7G@tJ5W)VW{h^PN9I(}dTkd>6|C^D{v?N*5K$`ajY${V!cXtMz!E)?>GGK>p351F&hW4^KshrG*%v{wgUhShtJdELj_kZ7=0v_R_8 zPq?=x^XxEcc8 z6-;BC$n)myg1VquUU|QUe_doC1P?odZohw^SLH`G&5At4)3v-}3v~yL^H5DVU@FXi z$kAnC1{C+}g_`vyg5`&Jbd;Hk=N4CmdC^q_3n==WS%{FJ#^+7V~_Dgd>;aFP z2Vin+HJl}$L`WLx)_2W?qy9ZXGBStBKhQq@$3xy${E~W;9?JY?IzxH(H)xnQf;5T- z9-pBrc!f{H=$ss!&}%oGSL+DAi?2ee+Xx80xDpztzQC~HbD%e;3F28E#=QFmT|Vmy zNxgqU=sF{@WM(6OzJE3@$|N5&?LQRJ{}wJdNp}&eSj?{egav26^0+PAA;0$t)#UmH z=-%=PR4-JtTX=*aBhNy*c?@LN)Wh!oo)B)2cz3&xWAnFsmOVyS_}cIb6vwn!UE+!k zQ&e#0Gxgp|LWv3e+uc7uow!;r`JxY}$^Ybslk2~NLsco%lE>Z8vx+P4?~>(qQGibh z^&oW_hQam+LH(@bWg2|=R%xld9RDXEOySG0JMZv}R`I)Y$ zO>*bSEAD~La|@oHyo6i$4uzPJf0IAmnJWiRM7Yl_dyTn%jSWmods zSQNu86Js$QiW%H96+_!S$08jqWbSSx{zwuA%DTQ%5k7qs-f14b*$ z;rkaIao4F$#MoR6&grj+=~c(JjMf*dsVB&Jvo7uW$;<2PD)Z}>$)po~dG4%9m_7SE z=3ce}>D`quFtxon!TlO)b3``p8u5=jR-$!OPf%UG03nNeL10V)etTshSni2}U5%!K z`nmyTtTz-54(x}xCS)nWq){SH|c;om_LK_il`7PAkq?f_b>OTfupD#0svJ$S@l zgxlnGFt7OL%<`o%F}9X zj@*O2Jm?-46OYluucAZw9QYc08SMh+VEPv75FOw_Jp;>FvTZNa-uVWfhGdi9e-mER zwGh*7N?`Z%^VDTGoT-UTSTObngyvG#GHEZ*9o&ks4f~<;@fc`YGZIa(5lfO=S?-Gz zY}!+T{*GIzgP!;pGw(u?(^Xu$=MKz0??AfyWxoGMd$GITCj4~p3AFAYkA9II@eGck zqkIJ_!!KdeUPE#Cy++b%=7a6@dyu-$1A4wN5VX@vSWK!GEPfMPFu4Y$i*KpwdM9)F z0^&FSszpcFS{zAiZkyfQ}k}=E;AGY$NmEo&q&18mnCB3u~!(FrQogyuF)O; z1s0^g=b6?&A-QJ~_5C?u)`kR(ux6;(RU~U1o`W0OZ^w+WJ3;;LK$gFOdV!79EM$5- z+UY4!I?x63P3<9j&=7>rc}nEH@s!f=n%Se5h@O?Fb(@zD$1v7I?c-~7StR>kol zaT<8;?T3P&Ix)vBR+zZ35WU}W0XVK@3 zo)85%;D1IxTu3<(D91nYrn z7Io@AHvIC$74M%>zb<6~hIRw%7n_*6-#s)6KY|wb4&XJR3S*uf#^me^IC|+Lw2^** zus##X=eHR&lV!Z$^;h6ik&0UvX|TsB>ZdIo!M!$kV1?cW7;Kq>lBPob-D?Tvb*%>1 zS?$E`bM8YB`BrDOnTvLL9D?K5Q-9fW+3Z6XV9=kYVsQWQ*r{weRQ{tQbbsB3&2z4@ zwCg59!j?b4JCv9oA2;(2G2bwI;GbyVJsP||1Vi5HYv}anN$~di1sdzUFztmNp$&(D zj-3Nm{^|?1w?|;`cL~B&cW~JJ2=W>e(5-$smz#6eoSDX)jbmBV$yH2genX}Br%ffT zJSFpX9|K_?BS16flq}BYZybH_IhM^o3Hv`?!paZ%th#sz1niE*!KV8#rk@s+VzmR;wGV-vRYbsg_?QSl>u?a@v=speDX8Z{CB!@8b<|Q2bcLGRn{wK3=xlR4P zJynSzJJDnfK#=tcP~AI%$+k_rvZ9hXpZCU-F66su)x*qhh0r?785G(zu*Pv7dfmCf zJSP93KB{__{-r3N(CKg)z5KlKPoE5~p;nLul?0bv`LIQo!+H*a3 z(RIQ3%MYM*u)dfyonxG737WWNLgS|t4EZ$*{em{2$LA_k8^vSA@q7pccW98NAl}&k zRkn|D$Pk*V-Rdbj7+ScP-oLq@zQglD|3Pr{Y^Zp37c~9L=-Ii1iFFV0?7ID+zUsl{ z(~~99(H55!EjGpm_5S_cxtLe*1fn7(+U>Uk?a2O2wuT*Kop}Z0KIQ z13(c9%gE!g{Cx)23?*)@!yfXk=djy5bj9ku!!h{fZEVO6$0LW#1P@^=yfh`vDZC%E zKJSXqfjA1~7nvuELdDeOD3$cYu*BP#Yd;taUe`mYM1|6>E#Ut%2W-p}Fh6N3nyiaK z$+IDt`g{aV{@DN}vtID(S~{bCwDL|J!%?$Z<62-|=jP%#2w&Rd5vME?GW+V__n`0a z)SfbfV`FgN0dvuy`&DetHv@w+W}@qcNnqln<^D^mQ69L5sSf3Uqan>??lb7jUI5*9 zx1#3RNP4H{F-Q;~llC;>v(L~@+#ADAoq@W5i(LNyC_3}F8q@cWcUqLARmh~V<;Wy4 z-h&nIub6o%?!PRF))5vgBA&iAt8FMe@6T ze|sT1JO_WnF?N$L~j579M|Y}@`x4nEPP!AG_N?rHovF2uJd;mKfMS{Wm-by zau0BPafUbdwk24KCrtc>^0L={1?LBHe04k=0^B70h0hfTE-W%`MYd;cxZ4tM3{J+~)|AsCTGxGvtZX zE7x?p=U%jr$5!XV;L{zC!QbYv=GZ765#GSUZwH}#HthqnU#g?~6oF>N5_ypQ03N2% zf{^dz_>Pzk8$%_6NBMB#K-;KQPxtVo4L88SdKm-_84TX)!_ctT96X1%L9)Sla2c11 zvXxi)sD)aBxkEh7pJKT~(m7PQHnLLj0E27Oph+ypti{Bk(Rzl-wKeG4?;;W?y*=G`e3|&9E%IF|!gPzmLak>(j(DdIg&OA<)7( z?jQLYKW4{ZSI0%LlF$xfNj6IxA;X4e>mc(^4Gb~90m)NuV-S6P=01-w;`f6bolQlL zC^J+IDpgCDsMuz|77Y93g6w&W4#d-nio(f`!>B8lgemS;Xz)h| zwjcb=vfq}2=d@BVUh@uJ*M4IG*Xp=a!9^VCaSv0>HnKFc(d0ESV2-r!D;(>O1;dXq zWy2p#^C(t*|6>Cvg#SU`mXqXbIL?OsZ9?wsNOo@zWg$+6Fh#MWe7e4Ya97k3q1QRA z)9YZyfhIzl*?5SkjRTdBu^70}5S0!#+&|C~|`PuKoVDH74R-A;&_EFGMejLrT>Hc5Z4e>h$L-Bp$ z6h|gBl=d#UEupVwI$et(X)@>UL6SE31 zrSA$*-V0*B-X+Y~e;B&$IgS~r{%F&_7gY0sd%gMxn{W7I&;)Z_xhjbI9C315Q?l42 zdg7rKSJBFcItC-U(5n6k_zyUb?pL)%l}!Ox+PQ)J;5Rt+o;ntb%Xy0yWpF@epWob$rFDXOf}ei+<|Y=>lM8h z_B(Q4w-B|gmydelwY?M%Hsq;KZ$j1Eb{yQ*OGv401x0X@THM(~2-x$U%Z?9|s{$8; zx#SVV42s43_IIg!S&7bHBeC?sOw|7)8T6ZJzBsB+j#a;Gs5~D_43<0KzTya~#ysE_ zld@5(R4Vs7WCB$cov6BUVuE6GE>-Zz+Fr?Bfqr?)3h9J%_TQy)fl$Bp-OU z0Q}-YacO%rnhm~!U3F8zCgKogMot6Vw;ZM}KLF16ve1UQXz{Yt~^f*z+7JY36Ev_&26ks|4AmA}l#+2N{FT!;ytL zf@@Px@zddZSaIVUY#mOSu+=ZY>s?_jOs})4v6-m2pvU6Y{YTG%Q_RusESG^dKlpyj+7$ zGs>8s?FL-=vk@6*=R%$y?FQ_~8#4EI%usS(^6NGX>7blc@GCyZ#8?bEr3){<9R=4m zYx0Q<;gi!Pl)Kdx+ol-^%Y+qJGKE+=IjQP^zTQyUzZgna{==qIpDS|Sc67P1926U~ zavj%P;SmmZxf^9ivNV^`dzLn!V?XrYk_kG~$WOD$1pBY)MD*KKxD*n_KFnS(nB4gwgQFYA6QdjKSbPT;3 zn;<@^3ang`dE`YMA#mU#2wb!Rx-LmjJ@W@P%n~8~#RRZl8G+b-1GSbVQg8mc{92Bd z5LI~+(i8SWOzCmDe+G&Y5&d|(Z z)Fd>2L!5S_?_6>H@`Uh?2k7E68}18l&?+R2?Y?Fvlp%Hd<;{@s#z4$`-vz3FJmmnp z!S>&$bhptJ*RR$U;%{8RA*yQ6TutfXX=Qnc9{u^9p!A0O>Z%;A{z>fe<{#8}HJaR6q z?An9v?mo~(JCEg2Jq6npdq8nJTb=DMA+KdG`K9I}y!W<;;A~KY(>LXUf4DyA1sGxZ z*kho)wiz>ZC!l}W9&VoVGq-!6gUdo*Xj-2;&!;kk8Co?s0+ou8U*IvFZlCdLn4nhVo_WZh`gh z#NJX+*LvE0^nJOOnRPUP^J8M+4ajBfI~u`eV=_89cJissU9f&w4`HFnDRO))m8Tj+ zL#&4<*wF8FA5DxEy~79t=h4~jE*7MoVlgvfp|7L^Z}&10rpNAvW&cuEsx*x$R^+I! z4V&~-qKNUY zi~5s~Lg3^~4A|en2RmAT$LLVhxp@g{hH@ZB8!DZPS>z1zz9;>L3a<@ZUDXM$b51gu zmkBGS96*|%5wXg((Q*1gX1;C-a~`xGbENMu!&wH)y8i^@zKJxapn2H6Nw_fXDW)9$ z3}=7&f${mf!F_NBp6DqR`|?m|*jxoKHW-PTDi0_)b)5U&oC|ero!s8xE;{~hiBC$m z!n{T2aL?aAFd;@)3@r|&o(p*c<%7ZZqBi%fsn1pYIgW?*^24wdOJOJRhg1QZ$ z$sw|U?>N^1Z37>HbJGV9a0i3Z zBKYHCU65t{!O#0^i*3Drf|8;Bm~^ljoWdWn@lTYzYJfxxx|xVh()%o?=OE0qnT}Pp zQgLC#Ug*1^k{lU2XqBvHe&6ySq4frCxc&fM)IGrFJL{N+7{$RQe==$NY4YtI1wDIu z)&@P|q12nRT3v%aQ?kI(&xu#G?Bj}>G;luD6D`x9!=?LoA$&v?W{gol_f~Q}&v=TF z2Z%lE9D}~Ii*ezkCI~-p3N!sx5VpDy`g%XW>XDbQr7Rp2TP)CP(n@SQ^gA@z8-drB zLlACr0j+zdLCziT3A6NDF|yA(bWN|s$f6TC{vRJ)HAq)9Ut~)6g)(kCobDdxW@5_f zYJBpe9?oR+5ZhcI@WlE75IJ%z&iK+xaMVv_W`DncyxQ~Vd8COh%H#}KT!ca0hT`Z#SS;9?pKJ}Q{3>^M>SX%JQ-X@ zJ*S@FZE}UuJ|^uqFprL5-V6RCN5ulJeSXEP(uZ5;3J}8cq%Pim~z@ zqF>cKuv!+(7JAg8)uB(&HES;_Is%9dvzb@?k;VOJ4xBQ2DJF$>K=ZOTF4MXV1wC}Z zW$9C}^FI#duaVdlA>416sd(dT657o=4~|!Qz`U^)SU`L6S&=cQd9qyHBA&$GoM>iS zPP@GSw9(x&5lgHU)bD<&jvq@r$g{uT)TdFP&d#LH$YpN3NgEso?cp0TYf!V+23+PG zgI6BL=*ZK-%GDPg>m#8h-ay#;XA4*^sR7ykYx2_Bf3mi&t32z~5m5cTi8<0Nx2|Xp zcP%>$Ve_@9r!X5%zt9qW6}4&?A4mLR+f&e7Q*oD$DLCHFmwOEwhHdQ^`73)9SoU8# z_#CC4j^inw{O4_ma6iCzU88>Rls_T=5uH25^$t96TL1GY< zW&uyj@WYf>Hq1M2JM|Z|dA*~KkWRfIV~a85COg0ej{5;#KYs8t^YnzRltc6JPJyb= z!H_iSE!3wKqMgeNTxxa;`|r^g3s#44g}DK-Q{?iSWA%kP^ABKo;}qKe-pV{1UgOp& zFTsAzKJdR6#wQz{!sewm^w}K*rR!|yUNnhq-d2sR9x+%_Jp|Rg9zpo%1kl$!qOQmx z^{z)fgzUBE+@p9XF%ctR7`e8BM|MFZ`-<*rBcbby6r=(7Al%_R*ti5i@NEshl==*m zCT4Mb&9$FZ!mJ@3b1J@Y&)zTa z)6fT4?%9f;DGP6YkKT2!Vxd4b2>1N{1rsd?;L={Zv3q|UxOCP*^QybdlJ=agSL&GF z1Io|z*}=Q=B0>M@9#oj_07>tM@`i{2EGK42(BovjvD{Fc^=}xgj44B0?1bY7zU8GI z@7al3V=*IcIB1A_CH1M|E9aMh<4SE7xi24m?zW)U!j))B`#x!69oU)=#%9xn5TqQ8 zP3!aEhAZ_jqV5rg^bp+`l*u4Pzc*Q*ARjVSlG? z4EO6=JXMK`E6~%*5OywN)1+Dn$v*6gF3;}_7vu1c%b8Q z0T(r!2^C|mbJybsaB}m0mJ;w6ldinOwhqY$ujApU|M(922DGUo9zAAtA+)1ysFhn; zRN&#(UZPum4g?pZqqOQJR2%q0OWk)mCyl`P>&0kTT>}pFLtwx;@{0B?Wjg}RgqU0d zA@u7<=)Y|pmP)qplJ#Mb$@h^DPGl7`8_C_fnfY3tQ@^aC?81RoL>ub58SMu7gl@{x zzXPqplUz>R&W1S(%7%^R<^{hIV|olW^*W5U6?1Uyeq#aelKU&%53@!jLaSaOoai(Y zyFN}q&;KZw*0GPP7Tn=5;yS4AS)$jDSk^2t2dmTXF)>+7czgd98lJC&8@>O7`Y&dp z2XQnyR~`gK)G4*=vwS$apaC+HyZGdD2f6QcL!spzF-E+8W4_xiE6O znR>-wb@(Vuu0G70v!}9#Q`aFnI*+^v_h8@{@~9p#6&0OL6Jl@)N}lFYr{<$tYor<4 z-=Sw*_7e1a7KEh(8zFwpO5)jOqu`QIlX_%!tw4}Su4LldEOOAv;QD}=^o zJu$)k11f_igUzNq)SIyg;*06|wu;y|EyTedkc2CVxf6FD$qPq}!@je@HlLgkb3}}M zy&XOs%7NrF)u@=2$I{Ajz_#fI`2Kl>Cyc8mF7s=Yjho3dM-2JOpO4`X+u7)vP{R}p zo&}%>(~&mU?-tMLEo2aZ`CPN^+7Y`AJ8)1&%E14(r$Yp^_qK#Gn-8X<(oRJ zPFxCY9good#bWe3{tXHf7U7MMhhSw+KT|e?K{h6zn^2Z6WpO$vuTJHG5*@K&*Bi{T ziNcIN`yqQa)wyV=taUG#w^G)#{GBV5r60!rhqqyp%MS>D-$QUDUfJ|3i4d{)7EY{b zftbi(d=+dcx|lJP{k;Gxe|*P=C6wQfzrxL{#)9;qo?tK0ppC~VjCs8j%q{OTRm`7! z;yJ0<-OviEu}tp&{yL`IcVmj2E_Fr7EsXh-7&+d1@c0^2F=NhWVxMUX-v4`x=JJ1e zywx9YwZcHK=uIveg{~M|`jWV+pC)MHW7IR~Y;7Am03DuP2dU#=oR#(rs`jK%4m^Zq zu6hiyXE%e_)K&%j5)8Jb8r?G`G(4eM`PRGm!527yMEh!LuLvL{7hx+el1=(o-wR zD;dPodMw23!0G5RbQgH&hlB5}PPSonBRN&6f3UL}e}82rY&N|F_IIkF(5V6fjR#@P zi844eJqetr2jTS#MS^%ZBK%lx5bc>Vu60)QINP{07$2iAIhdiPB|QK9BM`WRu87w)KhJJ zbQgMQhN8ObDJHd0Hq5t}OKrVi@<<)nW27&ZHm0zqx!2*z#qy&0Sz-Fh`Gpl zMs6!lpUt7oIES^L>SXq3-!^F|BR7m=N|1TeqKpJ(Bl?`E5q;*a%qp&wbEa zVj!5+Hh_6-7jOUep3bLgCLNg0pG;hhwvVzQ;qM2q!#Ww2wudn0hA~!s3CBp`wqt_Z?qcKaa+-Z=h^J z9Y~uGvg)%MP%a+|)(dyT5U2CtHpUvKw-NIz@epMA)43#n4A1-DE}R6Vv@`{y63Kgfiz!ov3P-s?@#!)eZ7Q16OG0F zlT8KFG5sZKP3Az=DINhD@#K}LndAl!lDeTpej_06jP#^@4{1-~7>sR+O z3%a;OP%bco6k$268A+MdNtfhKkG4bK#c8ng<#Qar&`4-|n+G-~5s*&zTieqn;?%P5 zkS3c7(;|qc-C9R~?|qa-c<@qR4O1nZfMg@eQZLxcCl~I+%z`mcV!0Qm{(T;*CPcv` z+PxKieg<1*xls4Dn(~sA?H(JA)l&(Agh5DQ4s;!Tnpvsxh*xDI_}G!d`lg;JT`&SP zL&KPB(@o}UeT^&j8F5>iBveh`El=+_28+`_ zei!`v&&7z9198KLMzs4_f{}EliE*Pj%Y#+poo1J#^xtH#r(Mj=ZyN9%{2%^YtuOYU z^8tMOH4;~UEmIDi!0Pg3s0iqT%5Q!7$DDZZT3-u8`fP-jv==BF@>*SWz6DZlHltvr zEySm*u>8ygRPXF2mnC&yS5QCrbrQOmHDLy9Mu%N(Ap1*Sygs{^s0#W8o2WPK@^l=u zSxy6|d({|`+{WNa8XE6uffj4ZPF(IKHswZQe0wb`Sxg;4ouibcc?ZgMc_`}{G$Fp{ z5@@?s#m!eOU^ZuT#0vxHzM^A+DFGj`v?~hF9556WZ4vUKd3s{Qo_Uz+lZ1{&e*zBH z6|HJlqq$WrS2p!$bNNX;?Mmm^gYVg{E~)T%n4Wm<4tdH4m_niFI@F2Z2cDi^ zLrdFQusn7P{LcOi#@89nU#TOcS``3V#9?XuJ*N1+5hF8gC})t0zP$~3<|JE8tuDZb zPcQN9_8OFy*ODuZvQ_yK!I9=h*&Eer-;b-g&%Z@*@yuRyj7n$YXBr66rrDrbQptUW zMxxi-8K|837n$X1=BkAs;yss;aF;9YSc@6Gx(iMXJv!FX52kN3Y zw|(UV=#U;P9Vo3Ej!K9_mTF9a>8gUo24w&*cdf)0y<;7gE}sL3D4 zr1!Vcv*pc%!h;vN8=Ye-O24D^!Lty&xfT|wh=p=s4P@^f!mY@?TDOnRC@?lxwGmml z-T?T0S1Y`Ws4e{p=|sEBO!a}pS*s{IlK{Y1Xc{x z7a*HDwR9)VJpTzK?~7RE3msv{|CG9!Hl2u#JY(Ks28M~Ft718nlGKfeT+_HOTrSA9%zM}_y2H3zte2o zXxcq~c4t=CbD7ISiKu#T9+vudm>|#rO@8?5u~OsPZ{BVWmzyrZoNv zZEwb-@4sE@u&^fB)MzHeKOt^NkKIi1CIW6$M}ee04MJp)-*EN~Jcu^8=L{ky{&| z#3!E(M4gT2p&}rP7lhj4HKkPS`s*}gH&w7ge9tQ4J0XFdS&rA|L&ff`IK6_{sf}A` z_Unk=Z~DPqU0tF43EdAR#JxP#L+~4{gsuyl!L?r{8WvKgbH4}sUal>qdFbKc+4N_J zw_?r6MBKmX4#b%EN3AkF?nu0n^#;M98Xm$l4|LGHryT1ljKzq_I`}Fe7BkZJV)+UI z8uA{Zp=&;D-D)WMt$&TH5)6dN_y0qU!(nyAW|3{Zuf~t|JwzO;2Q~9m@FMXs-EUv> z8Se}Qn=_GU`1}q;WH|q4ezt zjMgeg%S+VFunpoP7wOO*vW3O^x5MIFe`C4+0<>@=CYSG1jA)5v605(|&7NuKHR1@E zFSB7ujl>sn3B@}ZJw=!M{m{zk45;tX&+h%Ryy-(3Mw?K7s_qpJyF<^qadztIDakm{ zE(?{WE5LX4b-X|OJ2=W6SlZz=0NcjFi86`UpwWiv*mm;9e#Nr(2AFs^ABu+1Oy_+U z%AUEXy~PgPZek=V%cw{Fbu}y9vjlT?e3ZwWHW13ke86Ve5sWvz#+r|2f&I<-+SR}xf2H|M#oWHw z)%YAe%4kkv`EF{Km~MXYoA-ro!a>WANDK4D?%SEQVWM z2JI(C;^pncWsUh4HD&@|{M1k^PxXKl-3i>gk1bBomW4QbX==rUJQck-^{XICS z)J!~NREF=V|I@D5gw=QDnDL~FNB^LHpN3d?)~V{?{u@!#dX%}ydqMc21gPk@hS(rk zd`3+zX1qFwj^?|W-TIH{tSiF z`eIxcqT?U)F@97Fy0kxsnqO~1bspW9=FetY?T02v_tfJMl|ZxHIZPwA@mZS>K#cKX z2wuIDXLJmJoX1;vxNrxSt#U;d$pcU~v=TdQIX&YZ$#cHHr+u_h?v9WPa8}k!tUvx3 zTB>tUMxHtE6Iw8SVmFUnxt7jXW~%GD4l=E?iLhbeTlBo!2pJugpwK<3R^wzvRr>p@S)BPii1({)ru{QO#;>IS=_l_Hm*{Chm`k^ zxV(5DBqUx&^Ep1eAn6RK-eoW?V%fVoK4;@gK7!t^Hp(w9fp4$$#O5L7O-eQp%Y(C_ z`I!z3G}(uHdcFsr3d*?M`@~-;FQa<`WeFVmg4chBqV#Al49mYqcg1c<>+T_%|E~yb z!%M;Qo(BE(heFp19nqmE1iQ={(LObTS$*)unBYH9_R&yuyEXw_+Rx(DZ;8;ncNly7 z$V^Q3Eyf(vm+Cy)Uq&{5MU4UQ_#aZx?6Oz879C|1BMQMj;1|f2X;09Ujg(7oM`1W8^>54ys9mzJ}F&a>#XjpkS5&#PsNmV&mCRBW0? z{G0#Qm6#qle)bDz=YaNxseNVJQ`AWPbtD@H)6qj5(;5Zp9* z0nNQ#`HiZhnDT5Pi<_H8=R9*9c#C#^r!Mm9yfUb`MV;2+KjGjNQVg3N3NCRTs57kw z$_C_8uW$@6Q4Iy#?LLsyoQs)?IjG#ShWY-wRz7nC`Ob6yg6}u=g^Gg(Fj40`7J1b} zylw%H_?bg+=?+M^^#Duy=7Q$@Q*74jgEq;u`)yu?3S%E~ddFZy<2`sjP9pls&3LK7 zS5)n6VL@gp7~g*-6eeC_ITQcl<);s$i`zGl4L4@<-d}^1t#d%LEQ^2KO!<#1dx@`C z04ndbtoncJK*641*ePRZo)^gFdv9aXVMl2BH~>`}_d(i`NVF<$V#a1hV7mMSX0}B@ z-SIlpTc~gKpqpOBL8H-HkGbtLoE! zJ%m+G&FETFigpt}fRFtt$cTTy6hY7AD#bgdPJf7-57OD|Xf0%{`^4*~rD1c)b##>9 zWO^Y+;*HuYyqVdK%dF(+>3SVCOY9(?GAo0X^b3YQ5bsMlHFcl+a#8XH9Jx|zi6J&=wc~vjUVSFElmOod+(1>25V%rRUrjR&K zb?+g+!dP&T)=ye(0i5Ep(li@3{B{Ag zyu;Nc&j(|}Y=7LkDHBTqKR{Rqy~hr7953!9KUW@0^}hm&kzMHTJRC9{BUtuNdvop9 zHsSsy=OFuip4v)s1~Pu7oz(Jav{Q3K)oINHdmj8m+ zwQt~4P}F^9nu9+>+HPI3bTNI-{wgJZY8#Jwc>+8NPk_R1KHuN35}f=WajlQKJnj&^ z(^scKgx7g?U|cUT@bo}>pA*RkW6H5fzRmA;*#!v5Kr!~!$ zhn=L%`Z;I``V0F0TMJ!V4x`nnOo;fJ&$9o$$u*1TtEc|*0AxRfn*9!JuI$4ytxKRd)yYr&q9;}c*I)@HL4c@RT8)@Fd0)!)>x5e&adT8i$~u9FyJjI%4WLvzDDv=-SZE zM;O*Y;%GA=V<5ddPU?$K>~%!-;eF&7Acq#8PaWnutZ1H&ALl24a_usZT`QA^mfVMy zdGt9e?tnSTJ%x`R;V{v(l{{?QvGeaYG>e`KQ)y?B=HiDL$F<3CaTlO+0Ipwf2=(C< zTEf#(eb8{VBNG2TswAx?}i@KnO_Xqcc$bj4yHpNA7UAn z-{v1Z!f3ZL8CCLAthxLbjMX$j^~AN{`!)zQiW&04&{mwfyntN3w^9FEF?fzS2*C~O z7_MkU)%{-p!fNnFYy~>gF0*8)2&$fIFeI#pa3+YDxZ&5qigqnfZ~+`&USpBb=drw; zm{^zQ|HfSqHzFQ2m&ln}(Sps}Ex5ETo(1pB;U(XQ!cgSI5_`Y%C65R~; zk~;8VgH&Muwgcj6?~s0tW*}A_ppXk{Nwycd-8Fy!a!$l{A87RtZ@5JBKpmAntO2+}pPgLG+lX z=whdb@&^}CuZXg=b^G9~MI*NCEx=#j5eI$g7_Qktywo9cI=c+IJu?hcR%Cr|)1kupyug0`K%5L(72L1Cjkjxy^ZXsL~`?qE4fD2WBdRRi9je2&H`4XB#; z9cA%XG5(1WgeTsE?V-JdP`zq255L3B%Dov3HoSnN*(D{~Fj(CP@*f&7v3F z#ojkeME?;Nn0Ipo@kFnKYsh|>o~12ZI@S$x=L-N{7f~^H7IW?KnnBTR2yr$LBPplc zedq>y?Rv{o?&VW9Ad*eb6|u1EA1E~wz+*!KSQzPG%cox0I!A-boGM=R^%z58(yzNrJd!ojZSDj1%*30-fMa6!-({eH}Wt?e{lScPCUe?K2!ZXk@x>q$Ge zM0ni)04T%XgVr^2*u2ePdVB#^j-vZnmm3bG_mcI#yWn85k^bQtaFq7JG58w< zzAJ=_TX*5OZMDSG3nqu7JA9g4hLQgS5tDHgI_OHoq|}vAz3Vlk8TZ6aZBFlK@=_<| zLa%|n#Veo9gz3rA==6j76{}7%RmWdkHd+Od6?FIi%S4_oC2r^y$}KLv42paEaOwXL z^!@h2^d;2y3@yhXMKTXKP{XW}qR|DnfM3pLbn^beH{>-EFEdy^h-R@`FMG4>F9qyg z;%UqrHxPe+*i*=^k>mNd`a+W4cc}On&7&h8f$!SwsJuKLtNm_+>e(NhOpOziBW6>E z>@%jfgrHWIJ~oV)2rfPzXg_EhxV(4_2WS^#WgE(x7DR(AY_-~Z;t_0FM%}=S1jxi< zkQiuq-$6|nd(K2q7(L-tiSOun)LTG*@~%z_2G^_k+(}Z;vz^qm%bJfqNteJ}*pJ>r z{(*&4UtmkNnON#lj4IQ6=(qhbWK27cmTp;~JOtQQM6T|RXLz_$OX%%L3?uaej5MU# zghv)`zVrh^XX^+VmZ#DCs0U~jk6^DtU9nWF9c86Q5OU3(85vZthdgzW-z- zD7WQ9+=V?*5@3%hp=Wr1ZEp;_q(H~7T1;tL&hoBA;s#sF_j-(h$;C13$RR^foopsL zXNKVy$j8LrZbb`81An=3#GX^(6USo_SXH;ZG(=eEIkel zH)hkjzeuieUQgbJbW|Uqy!RYC%CfbvnIn3NhaQ(g3VmKpzv_!>@c@1pWi0$}LT}Og zoddd$(GeWnsJl7t4|(*U>*QnZPyQiae)i`NFxmJt&rX^Qr)LxYptl}Gh-T0-HyKrS zlh89E6*p{q3SPg`-pDmcZIRj&2d^^_y}k5M>#YeK-)tg8-{7dS%H)OC(=cv7&6V!8 zvxvyC&~SMN9{$s8&%+h-CX&mi@C7*2xTaf_jERKOW6{uOUNiH!^qwmiFki3wXu!reg-(QzSJxzdxK_5YK z(I?lyrWL|%^U%t6Af{dRMIE|Rq_jO^mJuPiaMvqTp0h#opDwZ1+89g`ZlnEIatNmp z`#5wI?wR-l*QZm5rJLSE``XBJzK5I!r*Pq>3m}=cf=@k8bNz|+sO-HFuTp+>{C6Fp zxorV&=4;ViZy!ovn%a4-3Cb{t4e263U9344{C14xjk$=0(M#aeL(0Bxxy#bd{|pX; z{s2|rJhjXChgh_$h*);682{%9R4&=XWoH<-T>A-^)(2x-kG(J^>NJ)fcV~rt&vVzK z|6$tHJ`kT1jQh1OKxsrGoSmgFIv@3fSlh4Y@OctcwsvC0r~l!uBc_7m>QlJ9R!7i0 zcj3~LN7#!e)u=qy#5Il4a)r%t_9RRRRyo5Uh#VYt@oiYx_cHDyhOg3VyD;|ib;pI!##(CxPH=c7)Gf&> z7XHk+%fS!`YYM@7kB2zg+fb~^qx^r+e=P78^{g|0u;4>;z>1h~-aipH+VCOe?9ZUf zZBxUnf{TNm;AnjtOJYR$IoMQeNHzh3G1U+oYAiIb z(}vA!O3>o8J(L=oVwGtTWwy?tWTq=$MC`(ff~Rm~5B1ECxZ>ttq~v`hi1xWd7#8#l z=E>e5ULnTm_gI$j`4Gxqy~mUW6_oU%Gm(%f&-+x1+qJcY@fYctvaJV7M)+~q+8d)U zr(>FLG-~x3!oz-&LB*CF?p0LIk{gKcJaH$VIG`G;y-Oj+hWN17bS~cb7-p5mgJwWC z-96LQjyp}6`|5M(rCZHsoNdL#wS91NjG5@L?GoVk_t}wS0R3JD~@}X zk4f_a5CX(LrlDq;(F!syOm_Y1KlH z2&wp?T37TnH&OdOi*a=Q_c_l<9*LV1_JWW0M{HS0TszuDnsW!VkYr-S!k%K?$rRok zMy`aE2dwn_9Tu!lzJc^o9${aOEz{L#_4PLlo+F3N*0mTIeF*}5--CJfB8V7#fe|y1 zRZPBwzBMiKqFuyzlIjv?K^sZH0>Q86qF%*3l=bK(>b?Jg+qanteo0y=d;AE41Dv_C z{1IP#sE24-$T8{muaNnBPxPhk?2E6jQTdNEeNJvsk90KHuH6kdkaAL;8<^LtLgrzX zh_AhkgoLTNkYSMu*(SRo7#1xL{ga4)Cpk`cfGNx?Vi}Cke1)V`Z}3xG$Mp7EkjC%kdhH9rucZ)H7Tv&> zpefjob}(THCg6Fb5TuvsEPSk!m(QGn%?Y-wza#b1Z}|cW3n2R6Xs|L%<)KUTXfAEa zUZfqsWMvvY-((_Mp(B{<$yl@AY?e0i8|t?gpzEVZHl&(5g0qYSMQNs5^Y>%8MrSz- zC(7#lLhPTQC*0-fNUUfeUT=;bW^^Cn%@^}o^T~@Wc-k^v>G>FqcYj2?4m!JOrC?nC zXB_hDI`BPTsIr6==VF8}4X*iNSQAmK9o} z<{kCI7mP;7U`Mok-%Hf8mEhLCr%|i<0IPFKXTc}7z>DrIkPd4E_}@VoKB%|Yy_>jo zfrl~JzlHxblz1^tb*NH)1l9BJ)Mp-yk!yo6AukUN8_D0T3g&N5HPX3cENbP1!^eTA zu&;Y1`1VA3$>wq_x_TcgZiT?&vR3R&lL(tXa;P}A6!rF{piIClD{u)X-jgZu^A7&UDDQcART= z=c_+98j309YniF#X^7G`7J^R+e5y?;+FqN7uMYo=_g7Tm=IQUiEA0(a{Nav6yLH6H zAKD#YKX?xMqa5Xx{k>suBxu4V}rG&-rla(+bRZ(+}^DdkmW! z@8Z&#;l#hv;iU%`Fy)FV_{3!!wCqfP?CIm>&VMF>rf?40D}1pzat04;HN<_9x}xW@ z8<<7BH|74{nU>L6=67Z^l>glqVNEP1H7eow4qYK^>{3vs&0=Z8J7JmP9LVPvLS+3~ zl+6?6Zj)v~nMF9Z4RJ!JrJR}1m<)0M>xQy*Nnl5_VdeHXX1r_{s(dG@oyO0FoM#>o zG3g|FkEG1mhO4+=_7>x;+QGkMHiVvigFAF{Ft~L*x7b#J)*f9j{%JMX)jz@LKjW#V z)Xh9=(y$`vHJ4rqV~$&|gY4EzM~!>`30(sWL|ON5tY+>Zaur+$XKi}tU5w$X*_^T7 zy~V`ax}Y~|AH;j^VH$TYwIZhiTD0%Mi-yNoG=t8SZ*E}JKM~|@rMap&^?wKRs zquuY_m^YtzZrb!~x}th%Yfn*0yfpI%``IapfnYbI1vOKgDBzo_^R^=%PM^v+r)$+L#YrzV7hKR)zPgXx6AyL6=+e|50@2 zaWSs%A8%TeDJg@J>;@;v63X1ygOFvAC5enkvUGgMlH`ySvLuO-NK!JEq$EtueLX3a zB}s(HNJ)!gl%yp2-M@d&ALqrGd7k^euFvQF2IW6-=rDahd9HZGc3&?cq|b7)mE{EF zf3j)W#yM>%|A#<2y z7-4M1NmyNB0B;6%=j?aP#_BN(?Db6W`-tm{S6`KbZ2-iBsXwY<;f$`eppsX~JnPoMR54*%18Z zJV}^dhURGn7?u}K2C3hJBJLrXFzO8ynMZ<-eH;`u4Z}@KVo{+nloXA-1B2q0fb{Jh z@S9=6To(N$Ho}|ODn5s`xAS1$a}8s)4+6hJHIzK6wBiE{Szy4) z%9Ye(t`67nbQ$FnkI;Xa2$HuZLM~xfQLm7h>$Idib!S3m<;&B-ncsaR0y@ z6_dw8vh9AzetZH-oO5aInq0hhs~fNBvX;I~VGPRkOxYYU5i%~y$iGLr@=hCGfW~g3 zMDB82jZrLL(oc&eZDE*m?Is!P`U(15e#QjX0`y3DfQmOCCy9rTAOZiVKt9};_%F^x zlWoSFoy9>Cc$n>ivrPD$a32UuX~(bsJB`T5$`@h{;m`Zlc|YGVi@XG97Ye5%i#X6 zE)=$9VMyv45YNb9Oy9lK+Pj|UZ#CryrZX3?=LOURG^%A8YcV$H2@J9O3cI%Ia1&?h z@%iqfz-hyFR4j8QKE~_0j zQ8Htc{l{{xV+O-cwF!^k+4u9{K#9!Cm<%5lieU!USa7kJx#3EnuDUDda9|Mi-8dQt zyw~B|iyEN9!W8_%=c2-Ln)=y$Gp@SG78KWh(89h)!PcP#H3!jAw1#=hsv>AX*BM0f zdn{JQPJrz3R8o3Zfvore6`CScQ%i|WEK7+7M2jw@F( zAovQj(6vS%#Y@!m-kE9JdIaR6VtD?o9^}!Mq(kM8)vHn?x-JoX0-sVh)=e=xc@kF+Hs;#O_4%~*SwN;UpP4Y958rtA50o2?2rF8Uf0fp{+y{rVdVG3e4Tvm?Aog<( zQSRtXM?C!u^23C>-!R750~H{YexjYfyYe?%jkxSfjK#qDQTty#QF*-%-B)J=oMWzH z;c4bD90NKlLM7R0mN+E46^8dKK+!@q=6va-Zr}9S9R4rb51a;m?-&ylv#5jbdG@(= z2aW%6{2OG%Ck?s=!Kn!-jqy~+zSs<^{7fR+xq@ZspTpq}UEX=-XtaMIgLIz|Fu400 z6#nyx>RE5nv2Yn??>%0_~(ugu0F8z25cs1Px zJ(j<454i#b4Oy(?9!p9LQgQ6pR20Om#L{_gSoQD&EcxwcdEEgA}%wNoW7-*GCGS}7=Zl=U)?NkS=(*@!&hm4)Cz!9*5?ZGvlcEOJ-TJ;A zcbaQy&emq4V{=rk+3^pJ+Et1ZP6|2m(P{KFJj)7Blx-HDh4JMB%38Q7~|fCCZ0>l~lc=C_law=CMqV zv;GYfy{!TJnu#b(eT?dECj2YmJy<%B`R;n8FgEsZl(F}M!J^kF7JH!3_ZQ2f{clnl zJI@U2b`LvhLaonCE8@gDQD)HXxP(hBOqA4Z?WFQ`~(28z*Nad^#s)QnW2 za&$7Lo36ld(}jHhp7re9cLY}VW)6_4Z8ULZBRH`Cw_Kva=HrgAL95GK`c*-CGUEk? zoxy@673mwwX5I4vV9$rHe6;ih zl-#*XQsQb@r};L7y3K*=uZ%sm+X~8RgQ4)qK`3U-FZV0+&?a>{SUY-SZf7Bu#_VG= z$pY|k?uyIk8m!jLKye>4smI0C`j6X*;yy)otBpvpbZ0m_h^{5!i+CutQ3v* zGT34fEPS>P{U%R>qBWx+#gG!)N1+fsun^(hdl)ZLeRD^yL6bdVhp~`{AVhWE3_pW!;~nNUP(H z)4G1lpJl+lM;Sly)jBgyXwgU}^g544e;D(CTaDPw5#;+d7;z*Oe8y{_UpG_!lA#_K z(B}n|&NKo|{}A+g`Vx~zHe%I`9{e3!<{T=BgoKQ>(Ej=e?_CBWes`+>>s6b&3qChN>{4WFd17 zZAYD@1Bv{tEo2`&h*GyGNqgQel%*MRSx0lwsImo%GB$!bC>jISu;<8(5Q!+M3zfCy z(ZYZtSiOBeIvBnKYq#eFuL~gIR4WYXApuv)b}Y{bYBYzKFWM2Ud-VpR%s-%+zl)Up zP%#%Y%f4ALChh}!%(?j-#kWkTlOz`dZsvpLO*9o(d8=LGGuiX{He)4cW3~4Ka?*q` z7hD(Nggy#{j&C6CI|XB(yo9Og(Zp~0Y?PiXo1|(!jfqd0zwR`1(CS@8+2dOv>HP!> zilZ<*IS0gl^I6XG0;+uGplNdv;P?&Lk(Lxv`!%{g|r>M{}JSI?`{=Z zeZiYI^m*$?o6+vsY6uvAoH$)t3;%lSa@x03u`uT~rhK1<2~As36BZ$<+tkV2zZ%$Y z&-`3}BC-7REvVWTf>ryK5H&Cat=$T+@Zv+1sD)f>%^T(m+K45j7~6h*LXR$gP%(E0 zRo%Fp5opQsLfcm{p5vHImH3QAGj=d+dUOD!(#>jzrGts`^BwB+wSca#V2n#&AE*}h z0YT9`b*1SpmMb~KdIRGz=N6^#p%z5!nbvB0fw=Ae26xz5DNrv8BX+TD$M)W2Q|cG2 z+F{D2cPxQex&Rb5c97k0h`rI`IH|KI4E~4P8j?d6Sov&N5MPDIlT;`{a<6@ zKaVlMCIfTT4Ol*~6cig4W7D8dP;QdS`X@Wcoh8@6A!IP)pb2=#j?Ex)@TOCjAHbaF z>9pXUA2nzggX?qSu>aGOn0zY_qyf5|!J-}Ly6_y$wmyuCl>GB<%4SF z15!WmKNMX%ON;i+z~)Uauv$+C{ET;jWZP@7wW`BedP*2`ip?E*U#Aw6Y(ZAnm3E5b zp|gzjO_zP76|-1w;N&lm1ZCrwCtpx*I+Vt?+$AL`<6+kZmhI`cjaK`I(q(Nsu=7A8 z7W-X<@UtxA=rI6N9_0~@{DtJqmk#EJnW&B}zeSg^oV-wYjWL-Qq15}kB*M{)eJ(6p z8q*EJ3>Krol`7EmQ<6&Cg{;#SPpzl>gTuWWG$&mHuiPo)>|TOw0kaDeQf+u)W~*E2NOwY>|zX= zQVu&4zf#M?t>{;91ykNn1gDHoG-=@zyl82}TRVP7LDEx+T@>T3_4^+xOm|AW=E<;R z3(NH7iLoHUoA`+-EL-J^`GXK(fD){SF2mrFI-tI?4|K{BAMRSR-elIaU>VEj9MxcZ}mMGwz1kL)yPPJ9L$6HAHj zzX!qP-cXd!N>Pi;;w1I+m!NYi>zmEFfW^vi{L=3+crCn*>s$=@sUNPOXJrR_chwQO zA_kO$)=^d45*oMl9K2$8v8{7{qJzs(qHLQ1S=E;@Wkw^_vUgp{q`$-p*iLgW`;KP+ zp_^RHIP;YqD9YbN{sx+H_1kuVX=pQSVLc_Kbv%TG5yr{%L*LtTP;kD0MF~TQlT#`4 zVeqJ4a2QfU7>BFRFbG+;mq^b&WNsJ%Rll^u%wmUm~K)@UAFy}Sl9kfU~13Y3h5V1Q4Bic7X8vC3kKVtLHOFuB@Vi|S& zT)=X@0XSLDkau8RtCR=NsfUv;U*E+10uy_p;&U*lj@qhQ{~W=@cZ}s>;72oFS3|39 z8N@ccq}D~{pmLo?m630VW>-z7cn*@S>)*isWsgD2@|Edng&1i30sn@Wb6RCEQTXW- z@kx$)pM8Q37Rl%``#YqM_zEe5x}iqzqeSXzOX@48Fh}tLOc_!Kf(7;vo0^0HR*{(A zUW^5uBdE>Si70SE9FlSxMd>|RcBu^iePYPlyiuam)e$Nt=y3H-tSjEnmzXeSh&bRA z&C^&wP>wJ_DujCVTV z3`!h91PAX^YkkH5ZcGJ(VmoY&DWP6#%5c+)697{U_>zSescP8(BK+Ky%}aWq&s>%X zQR(p2bx}lm>puvbSqqxs*^=_=3Y@I{j;@Q^;n3*>NO5W<;M@;ggPk zQ|Zu;>iA2#VCWgevJ|eg^okR-GwxLN-iH{bZK9z^6WG0JWTsj38Qi+|1GLm6;G$l6 zFw=u&`webm`<`NyWgaBzH{lTewFo;DEPKE79Zl-Z*bWUz@UW*bH++8%4EpyL3TAEx z&*U~ReYy&BMCqW2d_g@sFQc&FCOp+M;2*sZaJh4I_@Z%#u!(tA(>8yCs*7pN(Gvq{ zBU4Z$Uq!|4Rg#=D-_ZQcb&$HBB4VTGMEGtuYE2@Db7C-T$}{2Q*^{8yS&tiZdJB4t zip0cK-|Upts%$Jl`;6jn<&Qp>_F3ZE3sgL2MLVP$xx+!2oTjacm(a4(^qsO z0q1!pV{@(%#EvomXX|VDpoH=6h8?6H)ko3Kc_Mh_jKk#HaVVS3wHYDQt8Pu3eCkl-dZU5Jl6Zdr`cIywr&OJf6Y0pd4*=0kx z0p0ne`*o;lEW#w0I!y1Qf-1JBjLNTI&Y0hmQfzL3>2TJSt^Sp1I6{RvsxLIImk}2; zwjCZwjQ9~dn%N$(nOfZA8F$Kx6#31Dmy9JM&9EW9_2rN>WEFU>Fyi#~B1i^gq4sSG z(Y{^H_zho4&d6tUlOB(ni2)Awc9vM(R84%Q77&e#8vAc=0#z4x8r#bhJZA|wpQuA{ zeVaKatGr1Se`-Mbs5`)OA5aYJgs`*BIh*|tI>s~RlHqO=_gBb=e(H^hzYn8zkuJEe zIe_W6yMp-VTa>bQ)V!R#@LR;Zi@l$dPkI7AWBz}c!NCJaLcg`375mey`By>G$?|e{ z?_x^-YoIL2rnYR(x9Q1oNFJ4s1snSlP48uD!RQhcM_#0IR{`5Eg`mRerDTtz9^XFY zG}vXXM(LY9q_0DFE^AZ?B>g>uVyOwstIwdb{(FEME*WsD36toMSI5B4+y`G-v95#% z+ZzpHtQMzK68b$7HI{n3+wL?d*fyJV>`aA$wX7p_;0-D=T50k{_O7joK&#K`*s)|0 zXm2uJ)tP6+sb>yEem{ggZkcignztmNB@rvvrb6{5Hg~O_x_OF25Yxckjd1 zfu>OT)15JmWW+$c0v_r=LDlK~XrFuvV%FYf{;r$IzB&BVpy5~(W6JqiErQqqL3k%W z9z@em(Dbp1V6`tDhObm&^|qhH^ZP#Lg`R}4Z6?~6*@4(vpL))JgR}2+;c#vY+W(pZ zvg=voi=3jUFC~g;MruvZ?~Gk#%-gJG%;;TTQ4zF(IJS4NIs98<)wdMum#WaJ|1S`4 zc0!A^{aBsHyvp9Dn6&!}x-{y5X&!UCzI&n8e5ePvQ9ZcWI3$WO86;-jam9 zsSv)R5hFG~WxTjzoG|_;7!nd&Nf|V{TrpH|zGELt(xd7g{k3WwKGIKKC81JLMSC_YZ2c z`&durFEl-4S`vjdA1%Y%?f~5J$0L*bOq&dIKSjYM_saq=KF1^>~i#kN0 z+4~=n{#Zn9`yeMsJA!p%T)cDSk zT85=#d#W zPgMwYj8#<-jU?wz2C24-A@YAs)t<8+vHA2x&`=Jgomx1U0+lLR9BMxp5ZJDM`# z0PGAk<+3gw!YcVEQ106TSx!$;X zi@j8bMiCRP3)k_oI|};N!7R2XZ=Rw8k5>hF&ZRq_es2$^6kSLC*lwJ|@kmm;{65%Q zG3JjyJKN^I!O5K&=r^hw47bNa#pxcXU44ffe6GtspLdJ-uD+x2p$n=;C}}0X45lu> zO2QZX0hd@a&b8-UwAjneh7)ID6W@s$Vg96OTst^f1mT+mb56o?_rYHor+VEdDqLp6 zJm?ExHvIvz4HIy`;wED>j-(2Y6Qr{_5wF{tal7kvcx9*sJ(L#=qJ*2IWKSm9>W~YH zq5s_V?W_lDpZ8RD(*aYymVghRPQ*`JSlD_Z%I41n)0+oC_NqTR>~)~FgPx#CFze_X zVtL7}S3&jaxmtT}EkqUT^0E@fRy6~fbLtVwFXl>$ZZ|^U6?;sn>%{aqU7^ya7?Tb@ z#Iac)S>KUJq_lzhO!!3OZw^K0b5VGu=>n)Xp8!chIk>Rrs4sAQ8{d($|H?ocls0N^}a*D)i`aX}ontlmh>AnFUeFbrw)Pi<%o>B9gYW#aok6+}}f~v0yhiZ~Y zR@~O(G9Ip@6IfR4(YcQ>iD6$CyiVlM8)(2wfkP0nZx^V8ky&z{mK3(m92|YWH zqWeKZP9|K1;qw_ovB43qE@ZC7rfukD$o5fR&FTL;pB3%rNV%C2S7?xc512bnsxt=c z7p?`{qu0QP^)Hmi${<<5zLy~nAmt&Vo#l9_Hm0cAISxH0T?4xb7jgZegQzH90fjFf zu-U*sFq~|_t8gwPJz{=|npEoYI0(Ggb?1r_#Nc6G4rjtPfHr(5R6TkJ;T{)YYrhgS zRI)!yY&)d*oS^x2_Aq^=J{SE|4DMG);dFyLP@`|m)eJG_tOwmgxxph8u5ZPb#l>LW zwFM7^>2l&ByP%*@&h~yyFrM9&8mo?A*&_CwdJ;v7R~hhMnh!z5tc?(oyNk-=mr}u& zEV92YmF+DSfXjhQtn8nLp~G@9y>AK1@9k3OoZO9G?7eXK#yb#Al40M>ZhXP&5b%lK zN91{@N$cjs+Y|Ka**u{#W{r7M18S|S~ z-#5TJ;y>V3KLsi`zG7z=Q{IQ2hYL#of-2NdEpYjUs#`g?bbFD;b z{*(mu{msrib!yqFb$DinKCcZPgx{B#{SpX!Naw?o0*`Y?$9 z38YEg({X^K4lkJYMdCS)^}qUVAwdssG49(s$B?&ciMan;iBI>>q&+bX+qSo&*OXh# zW1$a$X0f0cqa}iSPf7ASmfh+Sgi#Jj@Vv)$SjKWrqC_E=a(oMD=z{e+G?e9`s#MXJd&C6$kEfkC7$NC!?QDJJZ*+;%QX2c-zQuX&4S#xe`5#v$6BoM?khi%JB_?j2j;( zRjeF@JJ{~AIbkIB-e$s~RV5U|ISiWc2g|Njf?cUUREh%FeC7ojb??Dxt?a2Z><(eO z5mctj*g8a)bAAxbn60|Q`w-2@gPM3$Dru#EwKG$#%oHu z(9XwS;Q8uDAo{YKM0{oQs*#H@G^!MZiw{#9Wid=O8-Y%5+951104^06@%j=o&SmgJ zv{=cwOfh-rJ&x`5o{eSPv?dJo`OLbWzF-=E0>rzIs$D0%Ck}x*B;%EqPIWwvW!pdC zkcv+b^!F?lc(J~AV;qfTeWV8$yKxC4ezCjYGGeDU480QWLh@M?E^K}_>;Heopz$dX zrP1T;4z$6$f!+C*Cp@HiR$%$EJe;~T2CFVz1x-c~^Qk4nn=}C@5N?%dST@(mh_O-s zTmt8K#?JOw4f41)^}8!3oSSYtX!2vIY@?P|`M*MCKO};Yzo|8w-3G5U;#4mlLkf7H zb5GWpn`0k-x=vm_(#b>Wge#-pD00%{y4 z0J~Ly-!vW2GrY%G4OcVeskxH^x*Ua$#;)9!mEE~Uhi=S`w-?mcqR`cHKluDgCbP$A zP}F5TBu2B0@7^|A^0^x)Fnd77)mGHinz12!6`{0nBXP1{iDrFHgUzx3fxnD0h9C1; ztT2LMdFx!`iD&rUlfA$bO{EmP^ z^%oO;!%^mMN?mu{Bl4Mc%nd&hueh?Vqt`4i~z23#PMO zVp6xU%#o50Zd?AKkpbIb-)=&Kj6i73&nNKx2Z*XlvHIf{kj|gW@?E)f(Set+Y2-yn z+t>h#n>QwP=01enE+^5=xeaq1KGJ#t>xqT_Al0cMFywC@`Y(Boq1qJi3MqxsuHCte zKP%OW$1!AOKFh$kq+*25YlPcD7(4$K3HyinL{&oUfQ?YmVgbUu1}G{rYJ$+Bz$zXIxWKMd`b z`LX`lSBW+#(AM*ubjIV6w zLHeNwTIJnjdAFMw=vIcfVj{Nrequ|_si^b#l-jAc9Q5Z4cn?oA?&U;tK3Nd~dYy0Z z&uar-e4Kf^7`It-bsZ#yT4VJsHOk_j&@mc4UU}IV3r-FtVJ8+rqq{Drxn(V}aMr_W zX**U=%>x6o#hB**2umV9(XjC^A@uQ75NK6wcK{%Weg=^hXW-@w1J3LH3haD+5q-=j zpd<5SS9d)?Wyd|>U8xag&3bOm8OB_qT^V>Dx`K`=E$Hr|$2Z?RfwMI~z}M24*Yvxp zb}m_m=5x~^AS((TH*Y6$m4pOFo`OGt`dsW9EwwSvz(dPwVYyG!^V(t}m z9d8547rvqAi#HgVZpLY}A2Su_fol8Lkk%#p(9^gGUj3Jj6>}AM@_`wD z;&~ppUdx80ne6;MYzvJNy#QVI{Sap*5>ZGmcKCs0Ybc%lm1~!jE*(rwKNUG`raer3&-HLaSyI}-V~4? zd!im&{RpIUs@2tD6ny8g{{Q=~*y+UhZ6}|iDCIpwOr450_ZXKV;iejvm~&#sZPY8? z9(8B+;7S>Prm8XzTJF5ZEcXsv_SzFmy0oI~h>DnYnDcr+89&8)JWBh9qV~ZNQm`n3 z<~Oij=vdbC>`_Ex%byVO%7--RF7tfsHD*3fF)fT{Ibmfj`f!C*I)Uxjmp%IbtOnye zEXOx@nV0C06s5*bNJ8CG*f*MW_?>z{)aM5fr#=l1ds*fn>ANJ(7bGYjX)pp7zGlz9!IeKNgg4I%xKSVJPKIsVw0J4q<;Eg`qc%Xk+gzXD95aosIs& z7|hE1gjetBaTdXh55WizGQ-2f(hVM_sX%#L9Dsd01AL@?!RHo zPu+N#$}oo{W$uF|;rg8FzCz;s>nBQU3{mro%`|UYQ>AeTDVY*Y%&IP-4dYwII-IAL zu^%8{o)nVmEkL1TMSb3RLr$M`;#|-LRDUj0!$^vz^WMVMq+jSGs3k2`2Vv~eR16Eg zK@>3!RQ~t}BpBaCsc0h+)bhA`&VKamwiREkH{vJsyN0h~YS5-XyI+5+RnM5zm4o~1 zV8IGAuF>6;^R2pq1T%VieOMTJ^|6J-t9slgCHrpM9LD+?yU_o~JFwsG1a{reQSpa6fS+z+iQ+r+ zZ;N4aP66^VBi`a)Lq0?J2Zs;Kh0uq;K{PZH#B=vhVM!|EXoM5{Rp-(4s0kl;BL}o9 zB@r$7NM=9%h-X&(M)~h9na!m=LAWD?6j-U4FZ&SeZ8YWA3e7m_kVfkI{5Ub_R*u?r zYl!l-A3dcnW8p?Ku2H8V1^K zgY^UMqj=8+=&;)b=_xxQr|k;~DXpW9W7dNGtPb><_Ze3goACBIm6+S{8Qj;}V|6bt z%+It#oovA*^?~Ck8eR`FnGV-sHU*cBG~&`7#=vm#T?n3;2nODTc(~D&E7`glHVpm( zozDuf$Ypn{_6f z@^Lx1|44+A1NOkY;Iw_a8RuBqj3Ie))G$_Ic*{?)^2&k9!rKt?naz!S>I{}hGsCYk|3BQpS}P=*H2+=|B-m7BLV&7`jB3~m-((37jShriT(A9KHC2m z&~i40L{FmT@B@8yCxg!Li#Yu61C+_rXoWN$rEFi})vyo)`*-I<|KV^LgWZH^+vv@7 zW8O~8d?TwSLSbeJ*r@*j^9LVrdTJ#Ki5*A{4@m}bIZ)qYEI52VOfB{dhS}5pK*+CB zvMi!I_h{W8^xpatM6bH=wYSqD<@Pz$_#2X-nqO$5{>1iK_psz(6}nA3j~JMTs`0ll zr0N5WO|2(MqtC*i^|t7#*NNG1nM^*}0C9s8q4gFehc@hkg>x@~==?04XOoXhXLsc) z_vV1FempAAS`vqBU5K4L9>VXOf}FcGWUF%)Bs*SZU!VlzzPxmtR_)PxTy4MIik57Nu>uX(sk9fbK0E1}hBI>>L!@I32U^{@DhAxTd_G^mJ*u53pUJ4@^R6U;Kp zPpHx(hGe@$VPir!PU&@my4`66SF8O*Z%-;{4yDqtg4t|O9|_v%(V*P9gNhF)L0Cu! z$zD4QzW%4nYlTj12mX!}Ej@##mS+6eS=S+MfF76hA_0;F>+twtQ{Ll%IUhYU5r?|z zaMf`Y=&64M;zx$EK2tIZ>aMF5e%I9dpPBMP*PlcYGLB9-aumP&3pu4ijKcIB+A-oc zR82{O){-kQC`twEKRiRDjv5HteuG4I?ZK}e%KX=Z*v|O$W77Y@Hx%talt0)^stX69 z>zHfQv*0cUKFY$JkbE4pD+i>NCqa6878Moha$yUPQ2zbzx-Xpt7NNn6nchYPf9;JoD>u6Nhx9aJaL(Qh*qpFDx#IqcbA@E;D^rUk!=?##nj z3XLxgqv-j4)`!&LY85I-+;Ja7cjwXk3YK+uO;z8FJB`}^J~L)d7rt~0^Nc>IM#cPS znrA5B`mzp*I6rQZG?k+!F*ngU@~;Pc!2T7N|rCYN_~}&5Tu?n-&{y@Cm+VjG;hXoIfS0ITIdYKf z1t3Ux4Fzt8h)dUbEKADRC{dA_70iXAvK&QSRi9YL;3!D_Z%Z8dSdi=~)^M|o`5kml zLB97V$hVw=wm#<=PrwzW-Bv(KK{vGfG8+Xi{-_ItRk-U<9kNm_nx+O|^ECD@Zg~x@ z|DFb?3AMQEpGuSuF9zlOFyi1b2Cujnv z&G_jt%umy$r$iOLlr-K^V#>y|uxqmsw_bJy-P~BtJIsR=Hr|4a9eZf5=oIVnvW&=U zD-!$S6s-~;gk_-EJHjE%=7g=pq_6@+b!G--Ny3E`USr>|GVrM6+4MUu!D}^>>h@P#zw*Vp2e?Z)VnP&DEvDQ?;YZ4tXNjn)0*!{_WKijG8I!i=u zYLYQkhmZa^2b_W!Gv4tu4V!Qs?0fA8u@{@s%kM&9&OQ*YeJu&b{^;+9C`d0NvY2j| z)1!@%Q9_$E;dG9mR&O;BVP27v>wplM+VsJPeasVlp|6LSF<+MS}qffzLJ zLDB2K=~!jJTtWLjQ}I$8qP!4Cob5KE@2I{w=KD3!S#*>Li}ztvp9a()V9Zr+pUJqD z`-tn=)sSmZ1EotZg2cTMb=lcDWyeIEVQj?9dRUUy{s+mG3PhtlHQ+HI6UAi@Nx-G6 zL}TWGSBi|d67rjDH|@%&8}vsnXSRQe-iIabme{fR3lzO7zK8YA=1~3N8+3f^fn%LsK=#*bq`>q6)l{tl1F{zPf4u-h z%DV8KPZ;B0w*u^!_XzOy72&Ef`tu!KyonE7q_~#|dzqluhGKhA~$3KhdaMPzU_P% zVbFsM8p)jA1^_YK#{oB;LkgXau3cP@31ihyP?+_J#or#U;G$c=nfcF3Q zmZud#`J#i^Hf#shj=zZl^Ow}u@i)wS(E>U~kz}9~+tbgk1ij$X%;$Xp9!$~alDvOo zG8po*wZZ5grUjRL=0w@)kA;nOaCPQK6nCfa#o`GJnp_A4zh0sE%^L93`;MFJO*v_k z4#W;!4Ye#E7FEGAeovf1I>-_SZ0pWtPuInI8j5j4PJ-KwE3j$$F=!dXJZp3Uv@r*7 z>-8?&vle|$c~$~Lf?k2*xq$JKHllEP9fJOTuvZj8%G+Zkf9FBAPt3!B!ekI{8_V3j z)i^yqmE9@6Vtp?K>rfVwfN4Frj`z)M2C$Ja9Bs)fhip(7^q^PzM`7@BB~`WPsxu5e zqc7VJJQ&r5d$6k;C+^pzR@DhP>obp0xKxMN+y^qVq6=T|vJWjj+yEchUL1JPhOwNd zFeXtqN$uN9XkzdbVr( zl}R)4 zwtQjUS&!?~boM9k+0Iz{;_k3Fg*kZ-9|K>zKPWt&MKc!qQ^oLuwB-78{9j&ouFUfS zx=K>8ndp*GPOV>a+q=;BInL~d6KA5m_8w{5ofPshlf})1)0AJq&x85_*LTF?ez1J+KSoMZ61TsK3U&x0Yz=-!lmUVhsN><5P&we61=hW#9Ih&3dqDi-_KgA_&zm&P;_KrxV_dR)Hfx=h@2ft1Ofs99zXb~7(QY?uuMvd*sUD8$%{Z$W9% zKxL^1i6T(~U#3;zkpH9T%mZRv-#6Zrv}j{amUKFWv4oE0q?-4B5jq$N-*mF1lS7s` zgA=kOEmA6xk&;TuSW;P%YM%S0kQhXgk`a|eGM1zee)sQBe@UMAdG6)9K9|>X9J@Fd zL*|k1H*YYPX#N6A2h2kJM)N|wDd;Lh$+*1T@-02E~04W z1nAB);T7}pSyG)Pes#J4elLw-hEY9Syx)t@Pnd!Bm;b}Czstx^qfv=}GsJk~e7c)z zK_ME%iM58z_oFdywLuAP&wqic4bfcTyV=rHF1muX$wcU#sD+Ac*U`*-DQsD>4m%xx z$Dr@iFriN_WNvzlqO4WWB8^1Fn1@nHE+ z?`EI_hFs2uAA=|xYG_a1=b-_Y*BS@@{!y%^XfisDNd&y7i_2#HKPSH#^gZ1u&-@}} z2a&chxPYs>-a|063qboP+psl{W+l7lqugZ?C%gIvf-Faas<;^2wzi@ud8CUr~GLOb% z?viF=IVKGJiA|35SPddg$?P7&{ny5V&vY~VdinvXZYZ(PA(b=%UsUloPmm9D9Ba1i zDL8tjVC39LkVR)p)i+F_g7p5LZ?@yjQTqJgHDA&3)D!GSv-GtaYrtbW@z6G{gn%3e zFx&kAEz6UeLiNrz^m=$1{?XW2P z6NKa%2uHO#!mG5epa@A|@*P`MzP)?%27}UIi}5PZ#H(F=UMAq63kEzi-hpACb%fNB z7qK&RAbR&UAWz95c7NhQC_GX`+%aT|d2uSU!%6tuXb99x(sBCJ-h$4mXjXWgvghrM z@Z^egP|g?es((*|DD6J?ml5&S$JlX&Bb}seKdzGJoMxf9B4Mc&fCG8Ysu*#B^AEuD z7o;H>?Fy=hJg6dVssEt@(w`kv;s17_MOg}dsA|PL(LhKvJ^)(o0H-lXmb&fw7v$?| zz^7L?3ssm<9#kuYWjNwCUz#KCZ6RGpkhFD3Al<=gSi08*N0mk4m~mm4GWZNy^Iy0{ z%iBTcM>x2dyFq^V7ObfuEq}wm}8S4IU@-C|J;ee9es#vc!vwT;{jt%#1Y#k z7d1kgv^lW^G$;0RPOZkmyeoIX?GmxWwh{w&<5Q4cO9av4A1ck;_fr3}Ihuq$q1M*MT~QvG;K_ta+rZLfxd$4Duvf#xRj z&wzZ?J;-jzK*g$uux_(~kZ|B8;-cl~lmC#*%cL{cHWG9q&GErtBf;*oJGw75;$>a! zDhKgaj48?B^0RHQHTXXIY@LSV{nq0zJ9_Y{xB|@d<)KtqicqOT+{(p&>Vvxr>4#1j3%X_o{K+|~U~QBL^~bKGQX0qE#g&i` z?lpRKn+hehM&MESA1dw}sDf5TF?aVppfCP{6)~g{C!fT!9#LSwa6TxHJ9GZdxtv|e zD_kL`8D*Xke=;^6&0J_!a7pf>TriTQ%u1qM1Or|(<2Y%8<3UT=&x&Es!R^Ccy!4QE zC9(bxVsQ=Qi*|u$+j~@eGA^#Jj>NF^o!sD23gWW=$*FIuIl1a4Gl;ta2_vfT)zLR- z)9WC9xbhgQ9&Ci7*p=viV;PEmERZTmW8{BsAD4ZL<~*w2yj}i!3{s3n?LXOIyDN?6 z)JNEO+Zj;0NlX5Of#@{u9_I1kc)7_)XgxENax8a&d_b+L>cb`0eDD=Ux^{!8bt-t5 zd4o@}Icnh}mwb8&dXD-X)pqlk{?5LTG|LlnmSthmfO)u_xR~CPVxZ8k7Z@K9@ls+y zPpvi*Z2Vflb_nSsWR=j*g6=l&d@kN59(^)n0nXopCDEq5W6KS23hv95y0ox|Vsk#P zsXu9~DL3$VFQHTSI@cWj4kY>IAll;2;G$qM{H!&xEOlKawA{;q>Bp$yS-P+nif zO1CaXIeDG${8I;_E7Mqz_#~HIZ3^OEVW=+qhq>kV1i$IM1f?<#&XDe{`MMKSthfv9 zy{_S>oBBe2(R<40GbS%;EEY|p9OX0HNuNe{if*h*c_f_+&hduqlfxj}DiJ)NE+x)r z2`UQmRUTK~qk*V5f9kEV0Dqo9#fgV(UM;aAkIe?p1KXf^$Yhw%sVlUd(xOvpe~?|i zqtY4|(fNBXUG}C4U&fjWj$aRv$E&|n6#4(b{a?y#qI`zVjWGB3y|~gO0;&d-aCWDr z61NR-Q%(*J?s*b|UwT39G7BMS=pkq(UAbjjCe$1{1*IcKLHmw7*!^HX1W!uFp!=7Y zW9kv~?_FNeUq<&>QsH50pM+e1d>QK%Xl$yo(h z2=eG42wEee-Z={<&>kai=owUMQ(3?YUC6&8A)e_OaO@qAsn$+(E_R?>su-#UjzqjEt%sN(>-fsI*!*aft)48#3Fx|W)#nPhxZn0KK_Qk{6)3bm3FCY=SK83 z%EFSzshnp&y0?h^(pCE&k|pgheR3~eF|v%)fR1ovLLb4~@G*APQa+Ke36`0)gE(K8 zOr;*<@_ zEXq{M(rAEPq}>@|iLvi`3gTM=t5}qZGNTf%;!r>6?0Xt~GK;wDD|Go1+x?u1-Z_!w zB_oZ=HxydRNmjfzH$qkw`xOd-Hh_d&d@NZ1o7E2;-LJ6 ztA-j25qqyd$cO~6t{jCew<79Gaibc+ZtNzoZ@P_PS84ZF<_fD~KVa9hPB1=O%xV8supdrgATKdNh0%4YA##co zea^6u`b5Y7$Q3pW0rv@pLffd`yvOe^!MF1WefOTKAOEYyrr*Nx zn5d7?Z~P+&dpnusmuX#b-Wrr%6~<^j98OF_MPoTDc!i@Qdecq0~6pyaY2{|jNz zg=2_k*Kt1zY!-aNWC>}w{6}+kD^}oS@oC%~Z^~;tRgigPF1DOkVs&~0I@lGkKSFy8 z6T6zQg!XYMU$0>Is}Qis`i9GN-hxd-8rttz1Np?RtDHn$wxE>a{mUrj!v8t~`$u#bP3^IA(+w7#?eti-PqiHKu6MvD*BF#t^=ARE4#G*tEX1{4aM`L7@}Hgo(P$3apX|iJ$|Oj7 z(U&~gt66eqFa*5N7kpx}L37DPCCXS^EF1GdDt=MQ{Xe()GR>NOrU=Z{aRoM%)BF7G zBPLz)MCtHk7+-Q9D{ddglki`lCL zNWCr+{Bty5tQ`er8#cgK+idLo(GxtLWTCa}Bg=cY5Ow6FtK1SU#pnC5KMO(e)I=^T z;4_qXSzwXeh?kgs07r{LF#9wWv~6ORnxUo45&Cy0E@$`O_u?a0^WZZ~3W5DJGy^L| z`SHix58Y3gVtoXcxx67=vSG*u#h6@2QTh_&pDThK6zt=Yw&MoOK6=}ckw7|wJOe@?0~9gwP^1r{{GpFB$u)@C} zCBs~pMjmz8e-K-)*MKJQ3y4QIf(d0}WI2-#%;=6(c^FvBfTI{wpTmtR*h}-pZ>+6f z8tuteVWdGRv89f1vbtakF6ZZ$0=(?>LyT=p|_SPI1|XcGJetK9E%zPBU53K+>ys-T%(j+uVf^=^M~YbYe2{eD-Wn63|rpDl<8VFtl%K3i8bB!!$fdD{{{zA#`}ju=Rl)%gsNxVSorcP=asb= z((?5Agb$Cf@W^o1>6yo^NVE_fZtvn={n-dh=gb9>l~(HAYawNH^y2mNR)JGt0oVOM zOZf5pJXq%!fLU+i*3GMD;uC5vGQI)!sYrt`q5uau7z%M$pF_ft^N>^?3-*D%Koi{u zC!6<2d|?0%QPEtysSD|v(pmO#YxJpG3fJUK=%=dyy@7_rCAXs-uwBq*a18U(s!*Gd z1?^jQ!}^80LB8ZUdg5)a^q=wYY;147dB+q?X`!?8`4+P*YeUOT@ld5Z7LTK`Ai|3- zVp$LX(O5g%Zf30*TX%3Ue3MfsdGjE~z&zb9a0CpY6|dh44%RfBf$e)3>FR<+og z2|kr|ILhECw6_g}pqjl9SNH~le*X=uQ_ivVDf_@v6N-V_DwxU{31j|N;LaJ_E`$Kc@emKdM%@ zpr~jiG03M^xi>gr;fUc1q$r-CgSN^^l)bLO_^3=ZG-931vlLQK&erZU)t zK9(BR9CsM3<)=Bfr)$wIX)5|pie_cBPgM#vY|?piUhcnF<=m4puwTxCiZ~s}>D^Q4 zx6uSnnuTG*Avty&YC)gT=Hw5&hnYQZqUS49NPN&u`2^o^=QpZ{wl3kGYxQ|i(sPxj z;0)J^l-G8VcD*B(qH*j6W?wasytT7X|BC}IqTEFPqegti@Ai~QsEa=HozQ>CJy3@b zzf>WhX4Frq@@F45?VN$&@4S!mmji0Foh}!~81X^J&v24IQ?M}CyJ%^~Y!q91vA!w` zp=k8mV(t1~{NMyF#&sT~T5};Qp*_F&&_vY!O1{Fw2@qua5VG~G(I&1Lb&eWwMJv~V z;$p0`+2~fBH}M&!_A7&_;SY(a`w`^7r(ojySJ-_{1Qnb)D%V~IPn&!2XOoBzwiyd* z!=dn_{1kfh--qfdB`C&jWbKzsQ4)}-67?6PTVDSIgFEivMDjteZ%V@W8Mi3Ehl4AY zl)>I&#G22R;beX{TRGt-DhxA=V}>J|bsoX-YtrEk@gt@`%EHf%#=HaXg8!}4<&6hj zfR^%;AaT)ic^PEDd#{Z|y^1u*``pgS{=B7f+rgpu#Z{(VF^hYdKzxgj^O(*x1+G)* z37#wB@Of+kE=XI0bLa1c{f1GH??*Xj=V;bR^^NO96=$~C95d2-pklX|^!ywvB<6{1_q5(ZaBdWaRcSzd*9Ef6Y|+uN9>V?u*tSFq-{%+$Tdr8}5KjKMQb*3S zV=>C=&#S~z1=A!vP=%Fl(B1OxYdua>@nSid#6FUUjisw>{&^WEqPS#Lkv7e|9A-ohAGft z9nI8swBQbh-hw8iSfwKyAl3g=h(SXcE0V8Z>%XMHrfLO7pEMF29%*rGe`46N@m$pH z6Hw{VNZzPw+~8P@qTlXt{_5jcwuiJWMfsHRnTo-4mtxAyC%9?%MU3hd1zO5u7O#qe zQ`>)lsw@q&U1?5`f0YGOeU&c{0rBk77=L>j<$M@`|3@QGOsrA`#xI1Zma=5Qs3d2IIt<#sjGZY#u`r=L*0!;nST{{&IT@8DhP3i|H> z{MPHjqtQKhC!_wHyyaxE2MYvQ?G(xf6RTt!dqHSY3KulS4UDI}1|MG)bpM`+$+a&a zC6e?m)~h+CXb!BMUyGe4JBAFc0@=wP*xg+MaqIFyKl&bwy&!;&$u2HGVmiim5f{zFiK!cpab-*I zKxAJph#caI=_#4ui42}n4I#gg$ox(k@V^mfMs~Lfhjx>%D?XMfa+DaEKOZz#XOZ^) zJ=fB$LR_Js{a+s(zKXPRBO-B2=LSrwHRsF6Ji+{CIcTg7sU~Z6hbW_TL#9EXzw z4Kb)j$y6^(aPJk$Kst363;&IFX&2>U%JdFmkXT_{H1*tPh<5Au3Kdm}5Hp>}xP%87 zJfIq;Upb9~3@!K++a|D{(Zac}FTqsakgpD`1P8xDrYegA2aj?tV3i}Zr-`A^|73C5 zJI@F*C{ny!FG)TSzXWGqzM zl%ra5gnbY-V3po!$`uu0UgJ$nzk(>!NLtEAph4WRB0|lQPnC|czXbs z8uNhT(|hoFGmUxYJ1yu`DMrzfi&Dv(QQ*~4icYa2=2N$csnga#*xEIi=yM6h%Qvvb ze|iXb#8_~`JDBq1F6oD}(9?7fIFx;7ik@&NSK+hA=bN zi|A&NNqurHswy6XQ{6S0;Q%hwMP55^P=C&hD^`Vn11RU*ll)2t*I^8EbfBmA7eph*GaT_yBj10 z?@$@Ek;To3fn?H^tQ>g+OEyQL&*f_Ld`4c9-#D&&djhtO$%JLjkpOA-=ume7GGiB` zap!4t%0C1Z^It)=up8~yBUt%&gFJB(S512)jcYCj9vhCLX4~ z67&AohQH1=&=_USmwFjT-?2g{ybYGbC zX0lB~;rmM?zB;oD%WmF>bXy}nr*8|mZ#CuR-^40$@_!JWNnDL`@ zP9wg=xyx^$e>JhKzAZ+(!Q(JYH3$83ENN$bnG=I0Dv!)#<@7Det}b*5u6==uOb@2B zJ56OfV=(5Ov<8QN$hX?1FWd<~OrEU}R<`~k_=@Y$e|t3qo@zyfzdO2-#XZJ+!DO3_ z7}mx^m02vVp>NCQ;YbKtau$|`dVtx#iNr9by}8bFR^;$W6_iJM`<$!b;r@X#Xfh!u zjJ!YP)m*%OG{#Tc2SJiBP^H&n%<082nU zvK_4MGVokW`k=8>u_CRNSOrJXt#liv=Es29v`HYX(d7#UsKDFY606R}z;H)1-anV# zP2FXzWp^Agwo6%_RS%)y=v_>|{T}KM5?8u5in7m-(#&t4i|G7qY1gFZOk^D*740iv zF*|NTm0t#Q&+8A0XXBX9>joyDFdW4X263|6r#ZRgJLgH?vXAH@cQPOw$4^l~vsQ}A z>iOus{WhJSP`s1-6$32v`R1P6ur%Qr>IaSitqFjIY##>nEXL+!eJC-YS(Q7@S+`E1 zneGV?YxiNlLYld-na*FD`NZ{=29mb2-AazTfD~&vV%?d_(L>@xH~C8o~eff-#S3;KT-F zJ^%(oo+OFI{4)l_Y_G7GF$TQi^As-W_z0N3x&mR`L6ps&gT8OOA<(}scs{ZtkJmzk zrnw*v{vR%iDZ!ObiqLkR2@Fdk4~~5X7kGCr zggn&~vVOfmJJeOuq$5%cIG2v?!TO*zIL-OblB2)vBbL9%P$;*1fGOjtuFrn}TE{4? z-uxN-ciDlaGmCOD3Shz;GvV^(`{eJdf$HW4sH2|CnQsSKwTe~M`=i3QNEMpUgXIUA z36W|?unr;p(!^+$Ccl#_``du`7~@Ht-Yzcg%?(sGq;oo3BEciuSWw;w$HA|zLZsFM z7dh+!v%)ygwszzGMw&^E_zh#e^~A|<55U~*#8S?4M7jHC1_#NOUOg8x#@IoeG!duw zNr#BDbx?ZmCF{y3CLtdNh5c_!J@o#C>DD*F`{fN#>ea9x=hdLUM~u>U4=J~17j)PC zO+K~b=uof`4bRo1`--3V{ze#lHPjV!Xor$yeGjebqS5x>L*zYL24QKrtlzj;>|T{i z=g0_^`e(U{sifac_=#_h8w%ByqzSlu0iBJtoaaBCl;7J5+V}cGmt_~eI@kft>Hc7B zL51@)%7fI5c8S@@WAnm%R{q;V@b7xTrG{UJ#Xfq1CcCk?_2EcjRQ`hgR}?IC`T%a~ zr&tUdL3@D4N9Z^G2#Vj@a0Q2Hx4&A7a(e~$V{t16qyspeCjHTshv4QAjb+~-Lq)+R zi1}W`$hsEy#!lm+>_Ug6axrb5xMH7u{<9aEh6 z#C02ggv5kb(7AF1=uC`2<=399^98Xg=D(LlDB3{ru-^H8VNbrZKSr^p2_(65 zRHYYZf_2bh$d_!uhNr}y3ZtDUe*{AIQch`lBD7C)hiv6yRL4euXNeWuoneI4 zF9t(o#xly6APq#Sx5|0zOZ0vB7ORtpC77bai^4smPQ>#2+SGw|3OAIe$eHAiBj9GS zA7YmEgQ~tCIQ7t@=+jur6w7Zp3paK71emg%kB0T@D z%eOskgJI^o;CQ5g;Q9SKME#l#&#LtJu%g+l=vyF`u4?B7+k~U>^OKx%4XG682jPs4 zo0AWHM?utzCs+mjphjjWgqIocQ$L-?ip%C0|Jo8F3u3Tj8gW=FM}T-* zF6*3s97`H^VbR<7#a+(EeB^K+^sOtzY)@z6a{L9YvQ6kg`*;&|E!3n(2O_tP-u*m0C+nPTP%4qAhoz|+Ye;Xx9}3ulmz=3{X;og?*^dz@Lx15j6y zf2_8QG9Kr!7k0XQ^&vCf`rcxYJk}9ZR&+o8I-B!YbqQid{)2h%;vsCHC-gHvh`vrA zNSBzylxt!**%b!}9oQ4)CMLXXH2DJF>w&#jFUYsm!MtTLY`F9pdbJ(_Tv>_DbH%i4 z$#kjeN8k1fPY5ua4uwX$IpyRyNcwsPmf~bQDM`Z#4to5rr+V`7g$V*^i6{2UQFB3lmv=2H}V`r{;vf$ z4Y@?KEXqIbT83r6(A>|AmzG7W$4*TeYiiTsz2%pwcCqAE-Y&(gQ-{D~@n2~B#S+4D zeDL|g)40};Sd!asvR7j?;25CGH_WFuw6F=|N8JR)i3I3;rec037DBV1hIqry5azL& z6FK-`;<6OF(~jUoI%g3L#9u$S5RIpN0kc)R(fhqO`dBT%s>Ud;)mR57?-_tmFKH&) zcRnaws+qsj7nZNvM^LnU;8e|LKrLSh7F&|RP{R%6;TtlBY}?WXFxj2 z8})cklSSaFdmmwH3RD^AA@sfu1lL7Z1l z!8!j61|P9Tx97H$xg$j%dMo1b5cp2s3(2oGp>fnFba1z7Yh8c~4RrOUu#GwG)?PN_OB>?O8_h@YAg z3+WS&Va1vMDCe%lCFooXvz~hv#Zn$Ejvm8vhmNA}`AR(GXDlT4dV&*wUctJ&On4su z3;0jf1No&HEKjqDG?RbYXis~;8xp@Xx*x6z0%N?pqpDsQ?x-+T<)60Ts;sEruknhyP4^!Ogcn#rGj2=sLx zfzNJYh==fe37|+YjmWXJFtek{p$-Y$mkqjzN{&RIqwb z50(9YVwn3?=2UMXRQIX^&A(bz=b&UfOKhuG^IyQW!c^l}4EQC+)AVPvx|KE`jEJ+FL!MUhZpO ztUcceYKs%7v+9%!=xxOMkv~x*-=_Mo>JG{i&w$UC2-eHdoHsdtku>5-T+5utD0^rr zEsFeB+>h8+j^&{kG&K~0&!_+wnWJmxYS`x53Kh?GVfElTbf2*o@}#d=cII*n8KWuW$gD~7AOzYf2?e~hc{ent7;jd1&oNQfD@ms@)+ithOw=K0Sd+V>nr>$ZiU zN{U76y&|FV;3aUFw-{hp1xkcQ=|2X&`Me;?3((#~yBc4#b}6ELGH}Z_kjM4S56FKv z2R?|aLF7A?b2Gb(vi+2)erO?E`Q-%oKl#Shjnxou%Y!KkDf^@65LBF3NI%HFfUk;X z$6LZxrTykY_V5BoZK!9lnL5I`#hG|==2Z+$%LLb7*WsqpBB&j*9y*P}S%>pUaG3dw z%d-?fyR{&DVto2JV5k?6{LSfVeRT(jDtWQ|*dHLy}mG5JYS+7B{ z;hr?;FEN+|zlXdn6Vc&WHp&+dEjE8cuw<7n)6#!FcW{3Q?xKHYbK%3nu>G>+?|3MSp(wggO862Flj`2iq)4 zup-2e&wjZT+r0Op7k>q7XPNSfzsF;MuLj3={DW)DPebZC$_O049dy)s+_nK1=zczr zVTolZPMyh&@9coF2InAVPXo7T^ka0JW{B3&tUQ;S+OzVAIoF@T;(cy2jna zICu~0QzhsmyM<2nj_8{6g8A2S&~zjgeo)q0%=Y`7lcqoT|8kM^{!hX0BJ~=ZN5OtE z^{b1jaO6yLev_33{f(PAo2{R~BJC*ZPcYy!L$^Sg^e^%hXt?nWhoC5=o6B>pL`{Pp zui3hr1@^5%&)H#M*ViAt^eRByH<)XgD-uHH(C%W>O3H6*W|GxgRQx|WLVhCg?%OIU zt1tkR@Biiu@<~sU`9)PU>L;t177D3N^v?LWGTQ|DKid8c!L+AfaX<-@tw~HgCXDMX zeGi4F2T1)Fi~xDqN>$3$6p*L7fF|q$h!hsl|HAttDjjh<-Emll8G1|{h7E5FlR#@mq%{?J2+B+j(`j6TBJW1(nr?FgfT^W++)2u8i{EtadCdjyi-x|z?d z8O)kAo2j&8SyUW>vREGovL6HIcYS~;X#{bT2cWgE9=iMX5!A6!q|GZwkAq$)KjjEx zvtEH@%VW+~aSUu%_rNHllMr(t7Nj3~3l01`4Cu25<+eMS=)_vk`Duq;R`r}HLd7YQ z=$%Pw!o0odC_DC^IB!X4Uvn9%DaXrUNjBJ@_<%{jRuZ3QJyd0U;yT>!;`rb*kn(pY zj+~&w$Nzf(CB5B>yNfEYaLPE&ergV?pIu^Z;Xk2zW+(WK{SEG|*X5^b z>3X6;XCP_)JI}HBQ{GT@Z7obzP|n=*0L=G`#?I_L=ee@we__fV zV!Hj^h?m`}!0FuYU^dGG9FOjYF_TEMeJ_j)9@`geHtfN99Xa5%V+`2lCE>0@Q(pXA zDJS|St~_&dJ}@ONk{j09P93F3+G*~QT<=T zjT)!GDtkNd_BjgU^{JQZf!I=3f}yW$nC!sI;^aafs9F6hM%Yw>`pg(keK&wJbLt`3 z|2qu5UqnFpncMh>j)`zbU5}NN17v+=6-JcCgKeM=lqS!`svE1pC-^*E#lzP>G{y<;I}?skE^=qFsuO#w=% zA0OUNpD!}hVxIXiP~LO^{~4vsZfZStP6{Pr!E}i4@fhUepEF7PcU9Axedz8)-oF{N zm#VmrssAkj*N>k#MWY{UzAK>`DvnuCHxV?=5z_P83-HI(K72{$Zj&gU3k|+a^<}S03fNx*~ z+HX1wdG4bj=+h#UM`=`BR()kL(Ff4i^e1>7j6=;rjZ|Jx&Fm*7qy42(^j-U-{C;0{ z^Zq+1IYCUR=JlL@zttF=v=Q?rJj1HXUqBV`8aBepX4Kj32^T_x;Ar zZ>3BcO(Z7W&x62k)38sFkuYaz4?%LP)1{Jcbl2>j^irGr+yV>yNvPu6cfIn z;4q}+nDTYIsAv3gk-hS2M4z1-xc4z;yh8hg>-w*iQ|Gk9c<0aPw(h==v;f2YhC*X-g%<{&;K-F=huL#AFpwhQ#(<6{ssD5 zxDoT^7Oe8p=e=&Kv9PLI`aHZ3SZy}qHN*nYe5PLYd9tDre}H&?jJnH`QXf1Jh5Rz`x)`!cTN*$_02-wZ!GdkCAHZ(yd9JQF9*(%s+% z&z*K-*{EK;fvY+HYWX`z{$DfdZyiOfwG&JolEa+$JOS66W6;+m0$02<;q87b!Hlha zDA&BF5d6Ie+Q*4O$4H-Pq%T~oTQ7s{#gnunI>{9l>|#a2dTHVOL@uEG0dX>3sjjt> z7I^b&%ExFbes!S(+LIT7G3g!p9WjGN-TH!Z?j#PP85m^T#zg&a&@NlSs-m;e{C_5V zs#_yFw=xyfABjc%_%-|irh;hLROS#74w|HUFwrs$4fe+31LK~8&bnjl#55B=`Q=)0 zIzu|Ni=$xL4+BBnAz|*WKT*H@D^9$A5|jFrlSc6$PPH`$0+r^1$ogk-#HQmY8WJe= z|3=x|i*lsm$}-fJMuB8h6>ENH1a0A8$w&VJMxN>=1UOoN!}DDrdcIwiDVc%31}Cv< zEHN(dyH4toh^#;~$t|&_?p(p7Y`SJ&BtW7Xxre3nqPygqTNv zk&fvk#H}g^L!Du z>@hg6e8m(hJ(ZdJRZLY}V1Nzi+nBixso}2}rn@GdI*icAu-;17)r=m~%RHk*F#=5mr z0lz)Q?A8!s>_)=P>@QeSYKz&ir_jyh8d!T4fcM-3py^?Q9pK z;IUw7`=b`HPAp=gd5b_B{u0g;=S>mQ#?>!>f+3VOQnqtFl#ktqdQ0CDo7jrxSf{vD zmmc68q{ClZqyojFWR>#sW>k(C%A}hjQJy`8IXrp;DJ|q{zC-?i$;~dlRpcMts(|(f zE1~Ps2lTYQ!8&xyK(v1xD1RhyRevOL6_&doLqZvmxwn~Q%5`)+_7E5Sdl|=O)=KddX?@nTbK0wqL9DM}IlEs%>xx|vv7Z>F8~Q`@pc-Zz>jIJdI)<@)^_z}%YqZIhHo0vz5xh~SIa_oQRC!}cKL+S<-LC5=` zbZvesDvJl8m)#}sT1+g5#WRTkxtFv-FN$Q+Gb*RwbJ_U*Z^@HbDRo#G&t$6qR5GaN z{4=Uh^R-oV9xqew!fcnA2pb6O;y|+^!g$t*f*0)IP(#6eKn%9eFG1Nfd#0&>2Jc@) zVz+7wc9i8oj_V$1ruwzRAQ!~j-{6mjhhX9_D!3zm4f`JuE7VdYg*hJh+?MhPc2NEG zh3YVMIUeY%&r2FTSVdkwZi-AIHqU9?=&vVu*A`+}?ky(U>5m)k(c5xlJnE0H2Bqgx zK=U!sf6)@W_M!ykO=VL5l9e#^-{T-Y_CFS*YmPv^?v}z`s9x6r%D1%Z8*IQ=ze|9r zC%Qm+X9{+%?SXmU-m>^NsZchi03#G@gt2OX#6S2TM zA|dKrC~A`*aRw$f7%=D9!^79f01-mbt9I# zjzxEx;a~ntZ_f%B)UneS=2{%V_J!3@GgLvG;n&532J{ddK7PSPsmb8<-*HU6*o!Y| zPD8b{ld0F7V9ut2oGkF1RJ%M4O7~IDW`+!`LvH{ zhY(L{uc2PUV+b8dcl1bO9v22fS8NOCT(g~}E;SL*%{rAG4*|?5VYer(#IP!Mgb0$xdAvZZFn=ACr;37p6(e>V2be_<~etc8I zq6{;EAE+xBkQTq|rJ)c#^8;GEH~^C4IjW`KFQSiZ0WpE@;jh-c_(fZbi32%;g$^?n z+Js8?bNj(Qtp4 zJEp{-oRW(H!RukA+Y^+g)a+IOAtAfVM|mPlKTpmogaA#m3{)} z0sfd1)Cgh3hgbB_N{z?&0#R^2Q;hWD6d=Y4(@D>t^Cx#_$5AZ(_X*DJG$CEs19(4) za)Ea(L-D6}P#o!vUDSscI8$#nOb!jZyijR7ko#$m3bFGSbZ?*&N%1Y2*y$n5#Dp7TcSh!usu=n96aQIjZ-3y!1XXFlU@})>@ z>f8gG!k#ScyOH2Od>$xz6I-yCCuCnRb|c?j(TJi%s035aqXh@0>M zHM*DBu*7mqJ-Ck><#-cv?tQ{*J&&Nrbqka%uT-HqvBq3Bk^k>^C;86L%;y@-_?|Z5 z^yrJ|-D!z|;}C1|#)9|pdoYRid!@Z6fF$DvL|lJ}&Dr7LIg@gzOdfL9i$j_3(+C{w zSO^9$YlyYI89LLB(fm@zT3@@-w|5S;YxFTC;xln;pK{XWVUV><#23_D!JyWDIQ?b} zIA)!~{Ju7*+S?5h^H;DSpYn;X=b-j(HY>PO33-2=;$)kiqB>3n)Fa1d7}@aCSeofFL#GE6B%R;& z{r%|=k80-qzVB;!y`C?KyOIq4<9Fb%!?vP_|9o^hR7ku`ORSs~1PkQGqF3~BPVuhyk#RQpMS^jXD`9;_ru(Q>L`UP*=vMM;+>He`t@co@BWnKhgb>3?ebJT zlyR5VY;cyffnsPG_ir8uvIB|4O1lo0t6S0fn=7VXO(QSg3Xq=;;j&FTz-4*`YkF}T z3hq9^JijA2o3(*?urv5tUE)4TQo6TSBG9yo)^uH5V0EiPwA6Y2;R)d`Ryx8ULPSCx(8@V3zle0n;}XL%P*NbU_DE*)5*# zP781h$ivFZ#zND$HdI`+Ku@PtJZ#nr$R5}Ns-ycyModX#OT4K^=u*n3bhHx}4bMZp z%&uJGT!C)!{{zWX%J`Wa0}eTNn4-8p9LhElXJ7gRr$6VSwqLJUB?S+;!1^}~?T;K)0!G!}~H*7N;xBcVFD1k(%} z&|mTgs7BAh%=zoV+iL_g{JW3Nl9w>?R~h(LpJi>EnxVMx8WwKf#k5T-)Mk99GwD5Q z+7>{phUUl((;#*BZj6e}h42h-&^|fE(|ezT&^QIlAN)&g{V@&hc8x+a@*8E&(8I8T z@95IUnAipt7^!&7>|b;AZFz=ETR%YMmrv|%WgHkk|D70d#+VRt68*;8@w+`@Nc0i{ zt7g2wxD~$q^wC5JTXGN-o!cR0#a;5LB*MhrS3t9EJVsXBVwoLR!~ck9nbJeStVgZI z=$EOu?2fenryruD9?d6R*Wz2}7_feN0%70;OrHJ_Vm8*ob3+Ru@J$W&*l#J+4Y^F> zdg8qGPv&#G_+n7^XsS_-Mf)aCaHj7G$A5-snK#{=O0M!dDm^fy0bDw)N4dL{M{dZ( z6wNoN%Rhk;SFN#N;VnqD=pZN?g0ZsiHEw@qF>_8f$K}Uu#YH}G7}|P@-7+FhpY}3V znVi9m<7!ZYAGmUM3f6@^M4$EK-;Fb6DL)1Dn{%1@_WldblOKJ^={juOH4ZiMW8jwN zg5x&miPiR=Q1g-Mzp8ZT>4};1-(Z`@QdCAiWqGTj zaM7d}Sh)TyDm(4RM9&7eF}w|Z^8N!auL!&vWhnT!Z|8MekAwHjPdM{@9XMWlNcz*M zT)XhSI_#yjpboA;Y4<|3{>=}QMw$reBW6HvXFEY2I#8W%-w|~kiBsavL335h{;wB? zQRi@pcRa*49wX1&!7Fj$Wg}D1=TjwxZw3AHgpN3o*8I2d3m)q|98JdDeKslO-2m;-x&)Wtw83!wj7J2l2-H zrE%|XOTm6l1WsrsU8%PtHV-A%*yehv_hm=)IX@fx+#g`&_wAHpPP6^e|6ux8(vq*3 z09&mtLO6LT{Cb{cRofDvZDce0%zBRdiDeR9{2rbtN?`9gJyH9nD~{RMAN1{NxcuBa zG<=r{zK71Dyw@A4efS0zkgde@&cje)vmYD_PvDa7d*MQXp&;w>FL<`CVlKUsn9iy{ zIt;vw&##kTphHiPCzAj5i~}r}P}W-4-Ds~4WL2K=fPYXO_`p;>GnU?e*)k{`dz#rB zdGaULJD@G?ICykVgwma!=r>^^UzKhy0isV}~Lk_vCfsG)v@f#~>GIdQ7{ zK;FV+)E;V;>ix17?Eh|MeRaiP_bCHCMvdZHrK7s2<|?Z-AlAXweRv_sRw&QUg~&`P zA5!rMU!`rs=EFDnqc1w#^Pj%hwO}5|jy5xi>O41}dzIVIBCTihX|;db5qPAq6q0Q= zfc=k)n4eO6#XhnMs%9U7U{}iV@Ct_PO&MtXzB`5}s!b^x*uC;^cErU`1hPZV1 zcR=`+@es|DpwT-Mrgn)08T~z8e_Vzy9_=6p;(>Sn$^Fz4rb%85f%<*1FX^Ww?>py@ z>JkB&9S@_X=ow$`{0;3}LYSG!Yf#l0@#WhtLv&~sI14i&knY`I0AHIZa0oaFA%Yep%XhG-4jJ&7*i~IOkfvuV)JSze7;h>Lj|F`&g0C)X4Kx;s(LfIR~KLfIfo+ z8NBLAJy?~Ycz4cW_>&k##(q(FQr90V*&2Lt>Nt#D^BDx0g%Ir(i`R?DD>X=gX`QS@ zzaksv8(hXu)f))9x;s2`T?A`5IFiS#NrPV@dgA=vEjZ>>4CtoliErDEqKvO&$|Dz` zh}Up0eiHMw;glg!rB*aoV0zCd(6Z|*v>aMQzNxt^?&C5RKfQ$Nf8z4&+6mrP=h1xM zOUe!Top%kr0B+}NFs@|K$js-I9d-CSlsg;-rFQ{z?pTbLmiI{?pr)Dm6F4}-LM&TB zzw0s5=T)Y%wz*2ox)n#-+R3PwpuncKID=Wg@* zVm9506WvnKbm$o{wv%9Hz&Q-{ru@y{zY>$=Hbzz4hcq9`X4&!`qXvJ(!Codp=@SFd z*tP%@qf5~Iv@7M89%88>aS(YqkX2O81+TOY!q`Jov7&}{1=g-032S0;FAT9I`#s{Q z4r2Q(HMAeF6`hB=;D9NGFz_*X?Z*9NZEeq>@%tIf41CFUsx1XL;DqKT#$ryt4npwr zLWBze;5X?Jlf03l^}YlMKi-3$jbG4{ax3Sjl+!yJ%R+?R&}#byPp-EVGxRTyj%;L z*V_tFp*L~peBz*-`wVfPxm zu7imFj#x}NlD`HSiv{IZL8BkRi$~~-<`-?iqv`(t&&D}%$3!^NeLc7xTo3BEbvV`1 zK#)C@a>bHCD0{nv*?)V+hny)xzq^(~Q*;z2|LF(I*YCx~lcQkWg?M<#QSH~>?#c7W!|Ox|#HHal{21xDPD zhw$86ptvv(nyX@P(W5$utIC!-MLmV?udl*Vhaq6>@&i?p&eGb`<)AXM7Bqn-LjBiI z;PB-#bF@DQ^TaT8PoUmE@E5iP_Q#?x$hMMZMw|AQMam!W+?_o@GQSC;SL+GY+6t(? zR|LvacX-|Y&DhxQA^2aN2aRTvL4NF@wDc`~p7rF@O-6JUR2FK3j`G>NF=+|yspb(MaQr;*^ESco9Li-ptuNBFMzBvaMa_qAtl{Bwa4331e*4p4 z*#9zCY}iHj)(&-zvl7joSc(qAZ}7FptOaAqW+*-6Nm)cDLQ=7{7&CPzz>8*ddt)wU zcHGW2q`3*7I}*G<^u)?Tn?X4sllfDh@@UgrkZtb5=BtStyZ&#CD&7ap>khK(;dVl> zq6*y4*ov?E{Qw!Ut5ULhvE_#>#JZ~&;LA^X2QFAptvU$O2VI6&x#e!IF-P;{PobxD?Iz?(4b(-?$=SVxI83~p3M|sevXejy9Omu#LNblc6o;6f{hJ!SuqqOJ~SUMbizmliy^Cy?gh;Xq(BHI{Gk# z%{dNzf9Q$Pv8mYT|2vkdX91qj7iT@R5SQ-ziSAn|PqIvkLC0etV%<*Q-#Um}4Q?XU zW;oCIEkxXnM462h5AS6FmRpWuSbHIqP9=}nwX-b$&ELGCGY82q1x_7S1KKGu()^iA zLECK+WkG(xysc^I7B(F0X0~D5d3{m2{~Ob;k%0S+yD({*g=p!N3G3XpLuAYl@E$T2 zWj&8W#edd` z4gaKZ)$+@%CGZ!E54wZd>FMN|eZUS^SPNZ0P_J~7W_cM?VOofxFt*Q4)aD$OYMKXw z_f&ncVvC+=KVu7qEGof?_i`X1W*lg=VXSgr2A{h;3f$XLL3?DcG~#(Q6ua-j)AtS# zca>@dtt&3QydE0PPvDVVe7UZ^Cwgy*0?(}rdG(IRs4)0H-`za!*W(mZ9k~eJU8wI9 zwWv7I5hbCSYWrAyCiyiPT=ZxTKXpG^KfMgH218NNkj5JhC!!|UjK8(b#Ndnsa4Y|p z_5n7`@IB=xjWQEtE)G&|ZX|~OVJ=p7SqDx*IZ!n2AB_8VIyeut74L3M$D}cbFt1_< z-koV6Y8BggTw3f%kIn~~vLuUzkPax6&LO`O$1(1^xms@{=^D%o$ycO9%@0S28fqqV z{WJj-v715SH3@Zo`Y7-6h+9uR0u^%%!8R%p8j9X9x!WI7{rh*-?s?e|XH^e-|42gB z1_RV~?+x0Mw$fM2KcHXMO00KG$GBId&)<_IHNJfUWrlCr*p>dkZjm<1@)LRlYOrP0 z4{+~I=ibI0%+l*K$U5pU`Q!#HJ!dJX{D}h~ri0AvlJv-xX8;vjLEh;mNP6xD>Hn&T zF-f`YO9z1YwbhiV6w4DcuTfsMA^5;?(C;-1>KiLy%ugk+Uho^le%+6IKUfIK8N^H* zd`}(nq8)TE4s%I;2~T-5hky4q6a62yf#0^FJmke=(o1JcrJJg-{C*~QWKN>_Qx02l zCJLdO4(c9%#OS|zqjyR@X7`{hx<#j@)wD}0Jkc4n<3^&&(T#-$kB5qTBcM!j9Q0JsS*cns|Zeqn{8EEMC4JwIG96IF;?eJgl)0l|*EQqC-U&Hi9 zs!MAZG1o^oF#mt;>aB-}C0h0Yp8UBDioQPJW42m|{X1HS$=iQIsO>b=ItEMs`O8R@ z_7&mD;8Ki(vFhaQ&oH2MC#IMV#aDp|s8;C->qkBZ_nk!$_cKKrxh#)8pq^d&2YEu? z9zw0z2ijGY5Cfwc_n$WwwO;9J?Zd&I$#pttxc&)VT`7iYMHV`^lc%nhzHk zH(>KZbMSRpf|eDB(537f_rACreb@Yhig*Qlv*;k+-eoSvR zKWxRhzo$dM`3SmiJV>XqjV%v5Kz^(7SNu{wlBVb*msRwVzS^)47Ep$t>fvq(J$V;P zt=rK&IS40OQTBA|Db^4(2My9p#PkuFpno<%EnkxWscy$9Lvd$*MlrFvevL-$B`ax6 zCksfpMcjEQWy3l>V$owd;DPm8)O4SZ(XutzG*ScQ3N_aKwG5+Q@5a!=Y#f_R-mK1J z@Z^Ftd{bpE8b^J>XP!1W#(4hpk1uJ7#RDyA&$=)}wQYJ-Y1w!pB;#gu4D8;4t~1AKj@#<-;O~cd`)%e*TOZ zCO=Wr>j?%o?SQx$m(;$t`Osr<2eIM#VQ7}`0DU7J%NLeiDVq5&^euRaow*v**HX5} z3_91w&4Tp%k@$$|P-pm)1)CiK<7eY>;KF+JdR30vtSuNEZ3Pod^aYjVPwA-tm1C(Q zA5(BGo7znQiH@|F>!ik{1v-pA{}^2!6!O5#5RkOLlPUt!aPOt>s6ASRZ5Cfq{`ECB zUUHyJ7#nf6aRgY7-3k69i7_nfV*SQYX7blZ=>P2%>t1yc(j2w8wO118_j}6&cZ~!| zS~4#PF%sSu--BCJH|zZ*t(IaW6gU<@rbRHO?AXp?9Cm>6Sv{W}^bkC!EaM*OCs^hG zu3<<{J$n4R2i+E?V#N5v7<~T`y8G9Iy?+y`?nS9_aBpz!`3`+&J>gD|gFxYQioaZH zET$bZ5Pbhv#ZwZj&<%b;`lPQAIha_0d3$M9ZNQ7CpT$?iOK__!gw||RaqQV=Ox|oI zwsh2S`DW*l>)N9rcwRWjpVo~$X?zq^&VMnBv9>~sMG&N1DFo%G-OPIN8~87Ua&&+D z1rhJfu=$__e5@+qRlsRH;FJqr7L#Uufd$N;orto)4Ql;$Yt@l@KRI4P=zBRI7QNU_ zHP|vHU$9f^YI+CSURa3E7S0&fpZ5HjZ9GJi1GlO>2z@2TAkExR@Tqfw#!ggQ`IC;k zTmbL$KA2d36Plxqg|Cmygu4|AT+-GTyYBJE#*GuPZ`fVD`y&PIzZ;=kbY#JA+rj+o z9&Swnz_y#wsM;6@or&*bpZNoxOh|yRlpiR0{2Tae`38pFZe!Mpb6~sT1g>k}gC3f* zqz`<66Qh){+(?HNFaH6>_P(Gv(GJdei&48a2mR#M#AHq3vqzedZox>{ICKl?Vm|WE z>y+s9+eldSu7g`I*V;cumh|k?UPGbFSLG^kIL1ruyiql zAcJPikKTAiXTKIQMJ4J+9OqGIY=o#KKpo)85M%6MG#S_L8NuF;IN z2z00Jg6p|#h}6?y@r?uMG0hqLo0>Rytp&F~-oRT`1p2&4M{5r`28PV0ebWqjDJ^cV3^NqxaViWfJAtSMFrx!%;vY=e+ z-qOl(H@VAkSu8C4&V1Od+*m=c4h}el*MxAUc{=s9H!0;ZvdW83bM}ey$@Kg@fLwz85F2@y}EJXkC9X!6G1PePQfwMLoRmPbkvz&}X@9P#A zK z5IuV#HY{`kRckq0!lJP{X*=Y9qcf<$mdR_zmM*6duvD-o`5>rH;{hwBq;YU1+RnK@vSk{ zm4!#tjd``88=8ze-K)^y$2k_-ew%gf+X#Nc%OTF|7(CIdgRrunQ2MYp4*8l0TAPdN zQ?%=SPFi3UKgX`d5mUBL31DO*I_&I&>OTIa6dlUi`l0<~4KF`^^Z#@gY#8wg%c9c2 z-rJYwX9n;flS**EcN9bIilH}JLdc~9p!?F!n)i3Y2c{;X1+3SU61c zhD11d&O*?@LR=MVAqG7-3Eqp6pkdS#qXwK@iPNK-)Rc<#V@X?oKmh#vM>>q*#fa^ zR`d8dnUn>xTwOPp{w?1lm9?#fcNRMI?63uA|9Fb&-8RvVx&*?{wZqnF`ygzafmoZg z2;zGZ1Esx@JFi;~iho<6YZzkR6(7lmsNtG{M*K;`Q!MyQtH9I!v1wF03~(il+~9O( z+iENplm2VA+YSgyZzT?ZGiwM)g7yx#Fs-N_{YhtRU$TR0{6_BKGl2OW3CHS{y|DE9 zYK)ZKV;Q*x5Hqw0BVQ~-uM>!o4VBEZ>2K1s$sqTN3QuO6!h!q$f&2bslnpG9R-BrM zUenCQw!iA3d8iGl^RB|PuBJlU>P+;S{)sd=)B_JWNcZSe)T%%8lxzQDLvVL^C@~d{ z3rficOwZ8qWBmANBhj;TGjB9?z}bV}fnU&RmM{5$k(>RYA>s*3(fL7s?=TFqi-3y6 zeNcVI26RhvnMZH|YdCxjXL{U&h9UI*1a5;Cr$+8sw1tJHUE)PQw_?Q{(mEUbh2azB z=%Bd69Ucsa*5YTVT6vGv7KY%6Xwp>4rf}aYM-XO5fJ^i#HuvZNkd57o4t{Gfrsg1+ z-|dar4{xF>BZYW3xt`Tc+YrLHBP7OxCZ-mYi^%($(+Pq}%wpWZ4;w=~L3Uq}-|qJS zi@mJ`g_wa(*TZqqp#vCselQzeZ!H>6y@!?S5IS#r08fTjnL*w{2u=IS-=@C>yta5SX94$_I3|5MM0aiPrr)V6ZV|Sl2z`agWAA zYCJvr_sUQIKj&dB89gcAODS9A( z)kD2voVid#%#WU=NvR$<2o#e~vXnm}(GNF3zAx!QCUl3W=glz0F%Qx8DP|oYjh)m; zm_3x9&7Kmjv9IIu>_Q&ZF9o9cZm62p3R}N)5cE=`q;akK!s)gc^c{MM&kdh~Wvexq z`BuyKo~#Cs%k#P1ql)rZsrP!G29c8{QQoxzAGx+equ3en)^_lC=}au)*SIuH1(h!; zbM=s`G=8Ctn7_J38acWU?VPTFLtpZUOpRxnk+u+;bbwcXFvPf%i_yNDoW09^hKj>q zVdH`ts?~x}va*i7cxNc;^WntLeh;f;q+L;YV#2jT=<9wAJOBIw?>2k{#m}#-)H)sW zhQ9&djGLIBwVwCQdJl^pe4)Eb!m9MXQ%bi_}c^@aTTAE^dC!1VPqSjTV!advVG z1pYP~D<%<_I=YV(=3aGkmWL`sN(KlylvoB6J=^ ztKWdx!Oy5up5#H(lOViX3Fa!%5aZ@ZQ#PfWB9;_AL3z z2WyyYSO}AJNyC#)$q?u?4JxN!g}DdgU~mTQ)fWCGeKEwAn6NfNTWwD?&UFTtbvlS~ z%)n9cq~*)3;E${~fx|g#(C=}P1(feVcTEPo8gK%vJ3Yddh7Eu(BVn%N40<0_VEFMC z=B8dieeo7Xbk@VT;9Js&!cNd=N*eay-zcx730=-!0*~X4#1SCw@A&I1YI7Ny$L)tl zpQ#64Zp7@R(WoE&0y_nBm zOx?OS6BV^5n08*RwC#nJurnnJDk4p>Xki;)J}CksZ;;2cbSkWCvk=lA>%g|zT$oMo z?p+^SLB4n%sMhGK7u`IJ`W5X`JP-gs=TScD>K!Qed&eX_`?GSY`L$h^=Vvt~qmnpdXwnDmM zprNQY#++qPzgD?<8(yP2O_%kF>qtMGpYf92{a0Uf@z&D3<&9br6^xpP4a~)`2WX00 zA-robwhyg>0~1Qnen>VvTDB4b`kcbtzN0blssldiP=nDQrb6VM)sU&5!~$>V3oflS zAdl|E&5cdack+V>vJImmUZUs9|AC9kJ-ll|eNvyL5Ik`hj9_MBOjQQ>-5AX8 zIMGbl)|(Yj51g`p1*@Ad8OE@_;GoRL9mKedJmAXpKJArOwYFkw68Xz~;-oUtjl@W< z;UlxJXdg~`D8Kt$Iqfs|n|q({J#z{}{-k`{>MvOS^e~u~QQqrOEm&Xv1|BzOLR-gm zD9wM5%U$(_peK)^#h<*ByA8yK5mXb;YlD@i4aEsz#$w%LaG zZpO1A!h9F;BJYEOW)TwDgahwsyDoU zC{TX>98>C@<8SLyAf@RE>7!Dm0b#eH=;T*Ed+u>eT+oJY@5#>@oWas#*TJqoeu3ed zM;KqzLDWoFfI{kmIt#$;DNW=(tcRMqV(^{xijT6t2Q%iF2%)kQJVcsH^OhSdVe@Qg zsLJMcWgkKJ##Riz6NXF1AHyLJ5bQO3nBUhbSkmV-9HIQuc+%PAtLwP)o#S91ZowrN z4-#A02wMUygap%xC{x?x;IYI>8kGyBg-tkWm9dc0QA+-^v*gt=1Dxdx+M_L4H$jOX z|1lKvpT9<9(_7%zqk#85F#tdQ&s+>Tdk|z-)m;CzH>xTGcEG*~XRfXX56jcobo2)* zEXf~}r(kt7+gIhO)E>^aaq2ch@!~pL;kvPzC^za44YRIc!R!PKJWhRwOGm*=u@_8f ze4|%!Slvm|L7dp@CK~1+#U<7@s9NwCwyrx264gjPyoQX}s1w-xjIeIj$tZ^CVX zq}!exiJ>j!klnKt3qK{H!fz-J=~Rl=a|XbodsafrqY#wu9;Pnx5m|9?DPd= zrL-MGXPYJ%l70!@&7VN&11*~W&k7qgG*6vYitCr%0MnJJsQtfr-Ec$c={ux}Oh|yZ z#|g}{z82LBtc4wWOhvu#{rML!4X6q#(e3aKuwOca&8=2Iv$v(#?fN&6MYr=;Gh#`n zeUnQzA5<$3_k)1GrBLDU0JOBPbtJun+M6<(EUsbD{zxb)X+qiSp)CELzR)l)nrr^+ z!qX=VrTnhDV76aJzuQ^J&iW6mub2@B@(kC0Tgxi~4}v|_zmD7XgQovkmZLHdB!5j% zcU`v&{Bm}o<=R->8($77WAZV2aSb|Ve8jkZUDeSSdZGT9Hg)CO2%hrCEgtxOG}<#0 zmRfNL4y!E$m2Sew$TBsb_OH39ib~_Yk12cNbpS4(TMJvqQO&SmwrBd{=P3WR66QW8 z-sUOdthpzsWrrMD!SuT*Tb0B0p7n-M3wsO|c z%@03EU1bB4|B~>oT}PusjkV~JZ6{Rros6Ls_qpuD24a%$!3kd0Lgq#VF$b^Vk{Ua_ zm})3|8U6(PWcRqN|5yyJBY%`6k;nBaMtk=eP&4HP_+HLtq4(}{pXYn9bZ}>AI5HI` zM&E>!tL?C?Pe)Np=FOCl)6kGofkl}wFgu=RF%g-hhm3^Cz1NvGB@r!Om9uCSIv~RBT0_Qv1(Yw(^JTfSdX2T!Vx{XDwvCA4do5sMw zzi7{|8p|`k?_ssmgP~>2do(>HqB`mnJeiw>6HC=-JeC-9w*TU+Fk)I59~e25a%;Mq z(`@;let2@0A$nvvu$*OPV)B*2IAB!{j9I;lxdc6AZgh5~{1eTBn-;>NXSX2ue1EW? zFc1-YVqmJ7P~UPG8ZI8f{8x?W`!ScRX7uAOgDKB_qf0HF>9{nEi_8Zr;kF_RQ9bA+_%^42%j!zrCVP%?efIOl z&8IQns0oH0Bc0&;lQ8Qax^MggF!|0ykR)4=Oh}!HDT&!wO)Tm6P#}|6|@=8xPOog`~yiFWWGfjdQXOJw+FzI(Fv$=JA{URktTI*7R_`gqwlD_ zTzB;d4|Q{bRF(>7h^re{J`8q#yNlI_oxtIE23BWP;iv>tG2yBbgg$zr+0}2@-gXOm z3^5XYFdI}cC9Dk}W4y(4(8gM;11jRs$VN|)f4(Y(i;u8yV-l0i+Hob)`6u%Y*~k8j z(G&8XlUB63gc$2eG}Ol8Teb~CLasw}R~6)z+#;qIadGE$0t$zRN>v8fZ}Eb>+X<+h z@DUsOm_y$4SloFj0((p2!1<*e^zLCNdYg=b%mJ^__5XIB^mFjp3Sv5sllpuy$FLp= z7%?h_YuyZB)m5&zOWu@@6s$HJ_bBYOxWgfGoi*~FYZ`pCTRcH8#QK$Y@n&JfZ0chja!NKibLq@ zzaE_{C_^mtC~>H_;-PR;VNB#dhI}vbfWXj0h&>T^53$UOI;&G&ojiFl{SLjqpoV__Ro}e z98Uak%C6g#&Le4`9^a8>Bzr!vbdRGD88rgO+Gnx4s=q*$XvlN{#6PuM2-29>Aa8tn zrT41+;4m*2YI2T3eBKH44b0`Qj_t-eXY$>b_+tCvH^evppneccc|)ZY*toMZJYQ%b zW^`+U`JFh*Cl3I#Q=~t=sl@t-9CYlJhE~`0MYopD5K7sfTWKfx=PSy2Orq!OM!DL# zPbLKV%|ylB`QWkIolBw)s9OWIXnl48dK;UA-JxI5b^0u5@c18-ceZ2wVr>NTJ6e|i zc`c+IYz6Q212G_;_R>i-2VbxnbdxSI*S1<>7UZ(tjR#d4S+Nf1ftRWrw<0I0OCiHGhSq!qnonhl#IryF10jC=dgY42O zcoFFguj2PX<-m6k6%&VDHFl`({RM(0iRieI@(^OW(TwXB@!Dco+Xs7$E!#ob;Qgri zxCSfD-}2Pir1e{~3EPOPluKuEYh4}s(>y=&{!}h=NKq$@HpYgF{=B|p9*jLV1m2#% zM~ut2lrj5^=KVUZIX#yt!tBs+_y;Unxg4WY{W8cMRaU6FNp$Ng+?u;71!uuiWhs#cuE){+J&%%PdXpb+kI zyA1rgec-aY6Dbd=gz47(3ojx_%P(n1<&pbrY}OAbO=?2rKrhG`M_lK9mSTX-b%Y%9 z+>YuGv%8Wf>G~DE&fZWAp5P0H%u@5fD{o$CR z-OM~wNH2MxJYEAnoW2hR22ro1@_>e=HUjMV z9W~sR$2rYKRiCG*dGs6A%hOrs2I8$A-zL>oa=zE;7o_$>IPF;n@v3CJ*H4EvnKwb- z={4`)tD{)dg+Woz3edmo&Sk~L#0;Qp$ud{;-B5*Ps{L5`^#NNS(+(2rr_#6;#SFjx z1rH+)MC-j-;6FKsInQv1CH};F`?MZrrqqCM=X&ll)LM9ZL|=@FCrX-@r{{3v?ClUS5%*D{X58>ZlR-%8~OSU(~LKs$l215>!26Hg! z6UX?XMEhJFoNye+4u1}+QEupD`zOf5Yq&(rW^q=pS<<`j7@K((JY5Ozv|0&wPITjfcgu#ancWNxpWug7EFSQcpGu~>oB;d0CgQZqRdYuM#pf7C{M#b4jGH)w=IPH)~75nBNbwf$kDmg1HB&@ zz*eOaf~*v<>eyFwn^FL8U8BM4OA%^wiL08(A+bP$8&rI7Q@Nc70KN7|!#Ofq5yfA#MU@J@7tO!Z<&@3s+CzSlU0 z`lCa~5>`5ACw9GMfuEHfL^qR95Gou;dCoO9d`L&}?&Ks;3gkC7DMOD9r?9l}A_SIx z!-l=YBMO}hx=*ff_v0QMTcAPX0IC5xUPS+k-YA^9=_a zKZ*k?D#1tl3*y2C@wE?(g#7iZq=P$~2ywyJxva%dXd2&4jGK4-2>l0bxfP796*&7jF-n$-_=xn{F88;AY7dGwwT3@SS7XZ*#m-NN}#XXIV^QL37Iv0z{9~n zNcrm*+iFsX4!_NTQKScNFzdmO*tb!}zzyt0`kbtQB=kwELd~!lJS8g;=MCP9`Y!{R znG<=yofl!kWZEf4{=i(@?|^1U$VXVua&>dit>+AMRb0lD_M+CdsU*O2&EnsSw zh`GXUC>U}MC0%==OWhr)y1oO3-97~A@4~<#?+S4`?3n$KfxLL~J?sue_-X;MrM34V zxZ6jN_iDHz8S^KP>6(N3i`TPYS2e_JRbpKDhmn3=?qhgQy3gwlFuTYnC{YfD1HL*S z=MZ@8RD)&aF^uC}8aZ5GDQ6k?(GP>-U3bt^Zy9QO-ouR}wjek^hKBDQL7V+ax^-a= zj=3F)k*3w?Q_u^vD_mGpRRkK-jxDY*0o z!%^eDk`I4xAf*46iBZjlqHMm0THmsc`OWCZD*NZ6gNmN1ms(~#^cd;_x1-a`OZc(T zPHg%0lEr7oq5YquFySKUj2eFj`-^8lTjj>}`bF{-*H4go^$7Z{vJj{KyaIW?&uHIT z2`S4rbNhikP}S;!9{w~xXz7ckr~DyoeH8|-I1W}9EXCSO(=bYM0qw(9a*481Iv|$j zGnS-lh~%KXNO|K`t#sd-2oZ^f!l`rCf=js*Dl&S2Dp(159}hzEk}v4;cP*aWtpwkT zN{|SKQvI*r)iDkqh_U^Wud*fH#-ezscE}(&+s9mpUKWGWCJkmEQiJo9Es!3&4E#f9 zg8saD+`RDyq}zTY|FphTcK=N2(@MemRafl#s|q3>cg1DoyNF-(4eQco;jWve zqQ?nKF|A&QnTJns%OS;3NnTS)XsFubZ3%B4cN(;x|6*aI_n@~;BE(Jl&e}9fK-1|U ziy#e%YSRTa_w`r^bUa5)?{sEtS&yzEpE2KSm6}mDRMTI1SPf=E`;r&rO-|zSnI_EV ztsR{gbbq<`gm}5NShv|4UObD0nwY1sRh~?&*zTx(`2s+Lkl#)<+>2uny><||Cr4u6 zl802E6W3Mw0sSdoqf8XS{?b}?ZTe5l8~F;=6G)Hp@i`>#S_-o1 z)39*eZYVFSgMjuD=(@pHT-ni3(3B;!z9(|fce-x|B{bTN-XDVPT-|TfI7=! z^u=LdKkyDra%=#Vz7unh$uPu5g<-B*EN;Lpj-%Q%0~NgV$JuFVB7c6 zAo(slHV}Oo0Rn;uBJ{kg7gh6v6WrPe`fWg!s<<^=B`tooN+uuUC{}jkHB-YUR zEf7EU3nXl|0%g!ouANmWy*Zb^3($GdP276p zEP7R31-V|Hx;p$Kv2lszVX*{!b!EI@TP!$Vod#<@!VaMzWEP<*=;yUsk0 z8-G4!U3VUXrE6YegXiJ7+{(y}rds&!2mbe$%Q5MWkrsH6u1k>7Bh^h3&z>g2W^?@49 zPd$U=oMM_2KIig_Pa*K}AZ(4y!@faR(0aHb%1s|&`sE8yy)XtU-233k^8P5<*-I+B zbBk5nu!E$d2~h7R;{4-TFpF6C4fnTT_u5l1S6K-d`wLWiCvvbDk3KhpF!R5oyrm0e zQfBP|JBI>@3QdD%^5_W?IN@i7LS=S)M%BS)Tpbc57m zumx{yJB~`-L-6}TSsCuPVd9cvR9UE{6|RX`(Q^wF4%-7BCELNlYZJb@OTE=BH|hZr zKpDLOwOumRqlS@>c>hi|AS4|_ca(wlkR2a(#6W24wE_}@+KIEG!%=S|;7umYTVB3L zXIUc3eH|Etjzmvf&5}qvP0EF0(&lg^?MaPRfDSZpK1fpc$ANlCE6Z$mN|l ziy)@A%ipp5N!|mv-&;=%nS2afIt@qNT5F-gbqq>wP(S&c>dV@Wc=~Q0$m1UH)a}JM zap+a5yL3|hBd%(_0X=y`%0HM@mxj%6JyChv6XHB|BQxjTA9`#@-77Qd}@GWH)&9qIpPjHXGZ^1#_3bM0dNN5T^c#TEkZ;&ws^QkM0BvH^-*8 zw^4P%NbQ_A8H0OugmvSNLHD<&LQS8ap#2)gU;X@m%Re23S6;@#yJsClSyDDG2sRX- zyjP*ItpjAvZ-;cnDTscOkCoScFm*F=jBcgEa_3ZN+jkz6H)9xjsKLeY6nim831&^U z)R)+yb>3armGTagO*bM;aYWxo+jxwZt@y#oN-&rH23volnI+EvyPm`uh+WN(*%PPy zl#hB`wTYN@!%|FHPnxu2lsQmWtJds)#u~>&;?jr__$37K*&Tgh)Y@;5EvjHzwz(kx z)`LqK4)2g*S5wThXqdV{9Zk|68aNtE3WV;cYW z+$FIBPw(7=vgsG3P7}YP@8LV>u>KL&@eYLwp@ptiq1f~!Jo)VrQq3c&?qp^KZj*xY=cElMOV z|Cv0{JYB-VHlD|6x9x;Kdg+UKw!2WiKA$;+yk?5|wt`#!6bOIJA-nB1283FO)pFOt8RF!_PpM zv$mpN_&zpp8+o>m#Dha@DTiqcT@=Moyy6l#Epx-V#rsj2l*t?Oh+XvlDGa<;4`XH> zV3`-*fZmemk-omwEI8pXV)jI+ia&~VefFVw{w;8PX(gmTe1+XT4`5~RDpZAAh>C4T ziRZKm=NfYIjxNKvt=*ZzGz41$zd>QYPSk4vIF~E}!5^R`i+s;}?6G_A z-DtZ#8l#+5gbSTW`RI$FLGzaTXU%~5-5#P_`)J75Yh*EQrr`Hd1*e~XLx)OzsOx?N zx(1DbK6?#??3_Z(@A(~)^A>}C*mbESJQnN!A4lgN7vuK6|E6wZ#5w8G8B~`S?bFBQGj0NyECc1^P~zwD3uSAW z9)SCB!fuvH=?Q`Q!M>OzVGLU1C=zmvFn`?@g-nfgW$m5@2C>Cpne_|&{O!oVPq!iX zCQFOh9KqIzKrosb4dqLIfO2jr)oQM1T+6SpA>%TR4AtQa#$Ui1!!78n&pIa^=aMdo zF|09T%!02wz}=|`S8xJet@o_`{0nj5(=!U<2h^f4hItDsCP2o5YP=$6=kvu<`?_U! z(DAJf`qy`%uCfIKk_#~K-T`8EM@kz1jvqqAn5)Yrp;Z4D*YmmO}Py1>My9c z#g_^)r_zDD6)6A6EYwjSN%o>`7@Ktyx<<#+&IOktKlK{q`ged(*~2~u2W;QE1|$a_ zgL3e8(suC$cpKP4`yfZOJ2{f&37M~Z_c56A`YSOQ|BLNX4^SK7ZnVx@3!z67vB8lo z{JmenxVq=y`p-@<{V9jWpLbF6xSnOJE?}Z@15VM{gV`>tusrKuEckJRRx)0I;Lp9} z0T^(BVgt;wyg^OU{>ALk6|95xJ{l#Sgh19Q>Gyax={a=^+t>Kf%)T_nL%)tDLmXk? z04Wy#=LFK`QB;ulo+`iJq^91fEXQ{ogOa@<*Vz|4-&KHL@N}5;x&>N4gn~?v2x@1N z>4HDn;SuX8jt@KnTISUlu=)w>4PHa+ViPc`?-b4}5b=KFBNVevHA6}!V>QlPkG}OO zSmOEwqwNbJelpu7O&p2wUs=DS%MCK;{VTLgWz6yP8)!4k5thhiVp7UY5S#pFJ)ui6 z%kw31I^qwRr7cjo$N*#kc2p&NK&r3+XYzV%Hx&(j8RJ0oz6e(TuRjM4MUZVZ7Kby> zY1+-5IO)@M%&K?`?ctM{SBrwgn)w)`pE8YjIOb}#qu>79M0v*_Jl^!&mzy3ZIiw2%aB}NijAM?4^J^ZU(&DZuTDEb)V*(E(ee2|AXB8 zI@&dKAH*3~pNSPXxul`yd$hnnW#ri0yasVDY9N5YFF>)z7uL2=)Eg{&5JH^iwjX)P7jX z{9PUv87O;}MO`1hA&qW3aAHj~+FE@@Yj-`UI-|#J>$!xg{WI~)v=(UEeH(I9nk2o| z<#2u*V{JFyg8nR=SRulJ50yXM3Qmg3(Qe|fsVv>ENtU& zaaB47*k8d=LyCgth4vfjlECc3OX4x4nVKjnsoCpHrmgr%thcJDZ(}nA&MT(zW%;PP zlsyXhwGe zF?#`7Y2~OG&vf6$d6LSFXNX;IItjOEhJI_bc=x<()cSZlYOx)dOl=p48vB93V=f6i z{*0KUh4C;H&*4RtQ|*lCopOsshQ0mff{Plp#p z!Jlk~k3XT$dVW)grA9B~WGFCp#9dnNVv8mdrh)m1M(}3)zLFd4{2qA^o&=>q$K{Qn z@t_`Er?(Ju?=;9Ca|W|u4@w{Blj-V4oJr_0ID6$3J~-L}!^dQzm}Ns7rLiDh^Ol+= zzQh%*i%dK)hqf8~0HMJ`5ZB#fx`7d=I(JIaYr@#^H#e~FV4q^|V@*DN~BOhv(O zueQHh-vw@tFlx|Ilze78?^moNV4g9`vY(;#+zc{Y zS&empJY(iR#Bke>#5LF}uc|Q-vNsiDiry-SPn`+9M^=Ehc_s~b_Z*Vz5}=&vWTniH z821z+nrGBbxd35J4o=zpA8CBO7HgNqq4Zcb!G94g zX2;^vdM|9Bn}TacGj21>N|wf)#l_5r({YdK^j|K4_rNzOar=#eqyk0zDG`MqJ0QST z1z-OC3^Lz&m|T>E@@zw{()S{EmuvBUVlPR?q9BO<@R3ZfZA9T29dM>Wcy`AS6!qH* z{&%v`#+l8p(Ull=)(X_#oo6ghAu4Ysk@)sC=sqe7+U+;uyB0lux_=l9+S-jFIU8|c zz!`8|cuF3zsRB(ck7MZy#v)(g0y59Zw0=W3mV536nPx2GAFUwX_XBbFAYIPn6XUep z911onZEpDN0u+8cOt+7_&FZXPf^tw2aq_JPXU5Zs8ng(O*G5BrN*(%7bVZBfOfPd% zXMQOy&O`Z)K(;lO8!y7Fkt*~&(vFR4O^ihxX&>)B1f`OTR9vu+7VWx-8;Xi%O-Fh@-NhBmSg^|cJx^g0n z6LEXfGIYN%E)saNmVhw)BF4NBNgJ`|8g$UQ& zr{dLDN&Vw-;MdOnA1jqOX#FPkUCp4wCq9ILrcO|Up=dJWJr3&J1Zvx&NGa5!K?2j- z115p<#$fC$dj?KtGKuBsR;V?-f^+(QG2QQ%M7Z-9mhLm)i_KR;`7@^3&z%W#v@YWE ze#~#jN20%3E695#7-cgJGe2F$#+8l4Z*&iAWSppWtOv*b^iWX*3I;=l1}_` z3Od>>V9CXqP;vSN*qO7q-4`(pUG)W$yANZ;ph9qnDTgB;jCrYKE0O;EO7nl80$Zy_ z^!0v^uK%r@qCEW=LjS8k*WiO-^+AtsKF@N*hnHf`RYcnL&yeZ@IWE}S zi2gIr;6#&s_@pWkt@q7D2dlGS=RXEdupUO`sqtj{lMZzC)un;D#*xN! zre7~jA?2etV}tSq82?j*Dp8a@>fT|zEhmy2*hwnGG&mz)1Kz=oJ(v1pNtgLys{h!4 zcT6zAFr|ooe!{f3WhunR;T7X`xRToK3KZ+HZk84OsVev`M*QD-p*euq*^EQ)S?|eH z*83rsAC$QNIUc+2>v6qoHeQpx8ZW*IEww`(q-LIk>t-r&GH znQ&x^CTCd0m_vbOjEDR;#6G)AGgxSkZZDM%dd58CG7h5Sw^JLxWqn^>mb(0{j zp#(3i)8KFGs&Us>3wYPhM;XAp z@ea-Q7Lx{190r~)CaEX8Fy*l(q&zBz@I#-dvhNeB!EvgZk}I)ZRz<$Jv|{(z9^xE0 z7X@~X_@}KN=Wwza{j6uvGRCs3yyXNTS^)?#0w{;OSo>1It5P{o<%E$yyol1iZ<3tD z;ke=6T}K+8JyMCd8c(D;6FtTyeej+lT#hjk?b(M{wI5nIzjTc zZj_la21%YTZM~+0qR|FWnP3PW$FgyNmM#~R-%P&DtU@#I$26tw1<96NL&4z5bo6@> z=U-HSeslYX%w#T6I}@uI$#N$P#^#`4?rxSbngg+e3sAo6n!W9>I(*rp&vhIZL-62Z zc;%ToXLVDbFAQ%$&tb>GpH4vW`#edllQE|@Em;vTHX6dar&C$$9%NU~gYten z>ENQkHT94rHh(W9^%a8tL|xuxMImUVHDsn(tdAD0>Ng^@K-8kPpt$=8^_Pm51Y{Ej^-A0gRZ z2{>XJW?w#l%adN<+m~zx%QA!WpxxqZDKz=` z+3671KMt<5_a*+0htdE0in5~d)JI&8$F7Qa@$DK(W2%C9FW*mP=>arIr;G6*7lHHg zhv*?(M8v=M(4c;wA?Yj2&E>QE;;WZf{HX<#dX}Nuq|>b9r%zGbWdt4_E2wq+RU-SR z9J01dW|_h#nBR37PdNOCMU%onx+$5o{&>uGtBhgv_!3Ax-$}MTWPMuPE{UIOlR~^} zR-W4YKZ*2KjlJxM0e9n8Hul*j!rCdWEg1``Pg5bsA%^vL7J;(k5%sKog{o(E zij-U(=3``9u;Di$rxKtbycNvCTa-1zzm1M82Hj{8keKJXPdL@_U=UoJ``7f3>8 zA7+`3I?(cM0I9@)+|w5D<=^_y*`WsJYzxN(d&W}#Amn;djQFLqS3->bZ|n-Y2hye~ zv~gz{%k|sQ&GU@8-jggZ7IF(*T_0lYqYOB2q0fDc5^|E4Z1$>2rrF0|pwM6=iVsBC zN2SMtXJ{SP^+cfLTp!yR&cNd*wYUbqbI@+Io-~a+1|IJmnVwOFP9s^yy*U-b)~QqL zqA?)YFcXv^lc~+&TJX958H=atVNgmR8P#?UmCTbZ$jMdsWG(@%k|M3=Bl;$tWbC?b=n<-*zJzs4hWrQZH{R3S#X+RhQn*5jF zA7FK>5TgyRqWPM082I`m@mqU}TG$@KsF6`@D1w=|H9sLf1`V(KU$B>Cg$^aOtA}ul#8>$ zXHOW0L~g(YbP#0MM?-4AN4Q!p> z*FgNRei+hm2&ISbQ{liK3PV_yBj3UU-n{s}|VQnsI4oJAh!eL?^BDflnrxs~iM z!TJ&4+iP9;JDiTnA&y$Zp>&!ar<1V3_e0tq5mj)AEaSp zKLhZXoJ5l|O3?W59o9Lq2fWTdLoHJQ2v@M(TAPsf9vY59Iu)cRe-V>1ZRUww3tJ}W z@%MJHEY)mFrs4GiO~qWF+I+GSrP(UT8@iS9cLVY$=u)nTyx4|YGl--_4n zus*2Yj?m?pz&y~OB<=w$BRPTb!Tw`xgLEz2tJa_QjQb7?hJA-M>1zoJ-^hrtOAfZ&4yh zvv-kk=7+)Kdl2XvHe*QTFEl$h9l^W?9r{?eQRqqg8&zGbS1=mHGvDd#*-F3g9W5Zo2JI*{7s|(S(;S|ITiN(aNU65tcLU*3i<9wb3qrsgH zG??208(2?>Nzn|_7-a)d(lV45-XxCeFN1?l4T!}%74>-!F=P2w@cvXyTnA5As9it8 ze8-O^W)at@>CkNK_&XhQw>A-BO9jR@U!<^jF!~3xp7s@Y&~NVmi7I*kR{tIW^Rx~4 ztWD>s%*UE6$*q8#r4^78y9TXh+GExlSH|%iNS9mcbAzAUfCjraC~)$m;w$$Vugi-H z2K}I7^;oJZ{zimx+0<%gAEd@Grshro4)@Q+myZoNQQ0aKZr(>4Zz+h{Uy-1ukxs0$ zGf*cY70mBYIGZ{I8(%8PVE0-yJvI#ce%FAE0bVx z0o$Fno+kx-0`rGG#^M+8D4#G5wx4(e@=TWL3TpsgcmOgrLgmY@!C+sO5x8*|1#!zH z@(W@@uIumv&3eJP_&NkIy)4CI5k~xMz{!mN-KFn{-cze-@$j|akY5goqYtr893#f< z-A~VVT*B;C8oY;X4t48#1>JjAqVx7FXy`72+pM>F%Nt{^cxoC>pL!ZJTvTY6D}a=z z->~CnG8CpUuLxsnNp7`)E1RXoNtY$A5^XNaBAV2+y@ER>y1dE7eo*_Fbr>zX<{!hqd z*?2<6Faew|I}5HYOmjILfTn*^xIW<|_N}giD(h0ne03Pd`B#F#hf>kH9;{y14>p{o z5HzA5o(^I=b7yVzA3FuUlop}gcs;z;p~3eu-N8W3m|t;`u_pW90GDlPXkK|2Y$M~b za^ep-8)nJ4CYKbhM~wkr7=Vk4?fVwrq~ae3s628Wbr-l`{+!$B{!bJsx>5*Tr`S8l z>pP}+g+PItIyyMWApFX1^ksY`hqbJuK&O;?e$ItPEn_~HV_v<1(NNf%jaf(T5<8ba zXq3waaMQ@g`5*N;m$ri#Z*qib`qQz`KAGwJ(X1 z-w(vtO}{|!?JhiXFycCvwX@uhkoDWmQ8-!OB;#JDphu)3u{se6m4=JaJNPFRY<;ab zvfh|0)6(D)4wR#pj{~~yxGrh6-vtq;m~OasJ8ge|lVvXBX?^Vjl<&Ed8<@zmj_9ZM zs?EB5aBeDN308q*ZV%cWoq=bMjslOZ%+plp!T906G`6%5lyR4++sH$Z{_ZDu=zS*2 z!|a?|K0_kA5{z@SlEKes8&R1^pj$TpynpV%1`{5tJLW-g$viN4!S3%h0gz!~0r70^ z?m80=7V9#=dvqq8UA7O3SzdkMP_|22_z=s3m8dKqf|r(M;JCV5*k~68;;BRt#yn(V z<_*Xl?_uxJeTI%*p~2-=zNaRE_o&@*9x6LDIefySYGZfri&)H=n7B8s63^p_L zZcBiMmJAGwGDfY(7qC=*1KAt}At zLCAy$mhQ}U;eCgi`lzL3YlEeDEDpZ>`dZa>Z(@k_YJV(|V><8O- zIoOo)0#}bZgBv7|!1NV|O@AMT4Fee~e#ZfbH!?tvg#$3|WDMj!d?(2yd6;px6GGmt zh2HhQ(DdJzkP_I0p27~sRAG6F2X@qB#CwuW{ZVHRo9hjRk%~8Y&|x+cT!*wMDkH8^ zp^Z6a{Bpqhb7Nu93tx28)I`yNdu+!$UtxTsf^hYGvT{;5A zLoQO+o&t&4tO4lxxeQgIBdO=qa3~(4LTS6ULK-|r;up9D3v+&uuwfcp_{Jb`)llb_ zzWIWJ^cT!4=tbA|*5VOf#yp>9*w&)WC-1)nE~hm)n;F9~WUMw;CA@}khr1*Y7&qvj zO2+X02&0|~d5a=ll%6ahyUqG>`33BI`nUu({CWXT*0TFYt{s@oDx-EHdurWOOq^S8 zq3iQ4im(_X67kT8vkskw<>}oh>QQ25HJeRYM~e9s5w9A5MB*WTKy*d#AkD@W)utHd z`IZW~)q1+T;Mr|^b(Xydl@)+&sF1&Fz_P1@(j_XsANY-NQ@E33RJm;xwO%$7`yN)H zbmx669^!>R{TU}D_MZKX(M;!Gn@+6sQ}D#J&yX;+7sYn7=p2(AjLk8JNz3nJtDz7^ z=kfMP%y`Q(D;BKZL*_u!Oy=S?tyD}a-U!BjJjNLx znfG;8FgZIe4IX@Wiyk(Qu*jeuvKp(1yuwi;NSs8iJ--lFs}txZIgVkUj}f)R!wSJ} zJ0iGKNX>4RqWEMzOky27V#c;9skx64=Rc#CuQuenG=tf)Dd4c_661yS<2+m1AnW!I z2y{F^J>P$4nnxNqswY9i*>uM7TZ4Big*+~sil&p8SMopvae1zUl)`Bk8(&USwj>d| z-EQdP#By|x%AmI~6#RDVf#yRjdtmMcQ|r{ZzoHE|XBR^hvN@x2(p`{v{=z!O^nWwQ z2v69uXL`+DiTw0pTHIQL9&Tx5a905Y1V0CB#Y5^JCqh5r0R^rqU@VL>P}pj6W`~kd zW`azoFI0q#nvJ$=!I9nww=s1w>%;SEl?czjfI@b66=^JC zew`S!KEQt7TRo8D{SuSTnPKtZhtP2K8Pr#BsQT}tz2Bgb*y=eBr9Hipj(GwQ#hnEY zr)UgYn?Y3#Psx?br_f753*);zGx1k46tN#`Z5C27?nm>WHFdwD4`x)-)1vq!_!mi3N zVj&iB(e3)Y+037`*K|D|{Hno;Kfb4hdN0A1W$fDnW~1}c^GpNgL0rwwg0gZN&_5Es z%=`|*|6QbNCFvxtt{Y>!M_^>P8t3EQfN^KC&~242FYEIo5lm~2Jj-$lCIbHaPNq4; z>GRUaTT`3`We|R9BKECf&-V64{HdYI4fdOW~e@E^ph{3RdPKu6@@CqTE+O zBg!5?iuy@nVLKS1@-oQ2Cd0gCOdn{uiM~I(7zf1`eVi9z)Il|_{q7YIJn@sH8vn$E zU1gXrPiC`92@Tr!o`fIw0_TNh0R65&Qn46){a<3Cvl~cbUZFL+GssV!mK0BNhuGjZ zG}Bv$mz$QL&AopxmSejvt5fh-l{O!^vW%wQs|EkY?HD+T@%c0+vF>pr@GGjIp)>S( zt5Iq2W=A;l$RC1$d5p=_>cce1{VYHBKVsL;f>TQ76TEjE9>r+#M`k4B-F=$e*oWG@ zptVb)nt3TtdSS2P&e?vv+l+&FVVowXHXzbI$n6Ui{mu09lMc9r%}8Orhv?idtRpp9 zpJX|nK%Wc`%y^^)Ym?RZ%AzjTsnn8N8r2KmuNZRP$3&b?@+r*N=!j|xZO&lpKG2=r zp9`sM0_mlbkd=`PqYb{plCgOZa`h-~a45#?KLbFv{3uP~T0px@jZfBo3T^NdGMzPf z*QHv#p!fl_cYbF1%bhgys(^FtiBx#YUVzF+S7BDe{4Ir#N!CNwjaI-ITswD5+|P($ z@@73wRaZ#FZT*R8xh|+-EM|G^MPEf77H;dq+;rxXHMe42Taz(uyaL-ja^L|P@=5*w z#7vnEcVTdUe*65-;J3_zsAiqP$_q>jJ$8x;N;VIl_Y{Q}~vFYMcwZAGiVg%FZ^4GP{5MrC>`R-P>;tDm2Sc2Oo# z4JngE#6*JW&IA;mtz$WfPFgbOEU4yvA-3v`Xc(@;`?uI&=^PEtdCUPAaX^E&>pjIp zX9u`+JQZC#lF8w%MtnBA1EQ-9W;N@8d1*gR<22*On^jQv#Annsu0rCzw3gbQQ$o}V z1xCE7W&ZO6v|zX{XOn0InO1Cf={QVc7wbybdL`lK|Jx;Ob7IWFwUQHTuWdFz0qUk4 z!_a~w;IXa~OCJ`%V2jTnAM*&ry2doa%M(-nC_=YfwwKLkJeLz9A+=HhPFdZpsMdBa~77MGJg*B9>dtF_G4(_olju5m~lT_O(EQK z0|Xew!`erzcWv-DP{ps4Oi%lSEM zQ28u|t}7Dog)FnIk}#k5dOH+$CeZprMPR<@EJO{_{fe=-N$~z;MLJ+;2H7v4IKh~%Ap_{w{1@oDwaH%ajFYAe_uo{!sG5qZ+Hs7W% zCtb!@Xq#;^_?TFs--#&@Y?y~8_s2^L_!wU-oP}Ys&LSX$W;|P8hFs{P7d@vbhiMe5Gsc>&G z^|!wOF$Nt_w6F;FzBJ|pv(|wlJxc0}CWH9+b;YIc5m>Cxe4?MTM^G| z@a}7GQ9qr#XuY8x*J{N<%|k=Jb=3!u1{9Eh9Su+^+yc(~&tRxaH9Ef74_ou~dD)Rd z$X(E(=&^kY**2zFdf_q(etknNaSq5#e}enO1Td|4!*Z)Y)>$x|dRVc&dE-6mzJEHL zx2gi!^iU`|9SKThBV;`^1p_um#4sj&jzczD>xf~KS9pjZpA z%{C0g<9#JrId33PbPs)3MWHy?katz2D^$lzh}LidY2M8kwHP5jc`|PMsm@sf1ycc= z!-m{|f!&u-vilYc+RJu0x7ZGD(n1m&&3cqT7nYX$V^GFwRK6%8D(NCHS;J=57bfVz zv{t3d0~#o1J$#Jw*IRrNA1wU|(;nz?(m#h#-{DWt=bjDx5vk9ec<=@SMzp}2&m$q9 z>BWwL8l2BI#`GV*j(SEN2Jc@5%%2sCCA)JlWNHQYT(?KT#*GlNtv@8F+Oc3!G&n~O zL66=`Sbjy5dl1`?%NpkePRHI8%jUl^WX4NOR=&loympA%6@_!Q)iD;76K(ejg~F~p zs(RL}Fc_AH&PPUI2hUhkId>J7=c~|df*W+L=b${-m`mC#!Opg`P&{uoCXx>rII;#N zrYEA*Q=NaH!#GggY9Pq|3a6!viF33J)`hEa1DlVa@UJtZ;#UULUOxfBgIKpFCx)b{ zi?I0632bB?JSOWGkVbP2K6{HA>mZ3H#i_O+CHF~Mbr)LSH3t71#(eD1B($C?;!_+q zpx*+mM9yaDj$$79@Zi55sv#^nd(Rl5(jHU6H#5H^q%6o51 zM2@5J5IdKqw0tCu-KkWXiFtniR85g5%N;y2rc{W%Tf z(`pp4V-C=2(;9S~c9A&8yFvJ8j+!44@>_JZIgdfVK=t2o>^{{-l`)TrU+W4;8juZs zlQgkX<&8;x*4UnRfdmTs@$T0?(avdUxYoNF{AWfZ9JvpfyW*Ip^qYKmBjT}cC05;H zzL%Tt&}~Ekq}8x|ijd{hZ7+kITS2VN)S$i3invL}qx&~M?6~9!ew~MiU6PWTk3I<2 zS7kKoN-2h^#ZdRH2N>6BGqrjv$IR3NSbLRad-CUFq8{rz_WeKyhcvTJx@`(!Q4OqL zp~-3WHw4T7xr5Sl5`|xo3i8>(Rz{btwj+2zY;7`>G2+A5;E@Uekjne zKqscjwtpB28%`MU;g5WY+3=6lXSNc`qt{}HrVjXrsH3tOP&I583ExviRPF5E+47Xs z=|q9su-n*pi!mK?=3{<)8CGk}gGSLB2=%y#F2gmr3^rpnl-&o{Kdf+MKOOEtrV&55 zs~y6AFm_DL1w~I^G=v>KMvLAiU`gpsXmv?M#f~>9w@U)Jz8!0rKhbx z;~NSnd!NnTg0U%eWO3bDjGCQ^*36evb^i=V$IBI}1=HyX3qLsXO_vLqsm9CRFdg*l zQw$h!9Xo&hVqDZH^f1yRCcCmhDE|-rc3lGb?J!0Bt$`R{eg%eK zqT}#O^bSa-BiC#4b5`C5C)cSc4cp0d!G9s@oCdT8ZpDuNZrHeZ5|*F)gY})7K%4L? zdaA4OgNvKNcg|B|Yn3{uGN4#(rWS~7`Zh(Uhm-1Q)@}ME$^Ewe9ZPVa6Hdk4>AB55;{dfaSOwX#nX+}<9cbVxxFp4m6tJ`RBnxo}uhhoAl@ z>)Oqfp!<3~{=-8pe)#_P>|P#i|8u%N7j|_dniRNTAIl|(b}+`u4;QdDeNMx|&tNHS zW?hrR(K=ldQWvyg=gcN0;F3vISZ8tS6gDJW6nQ)Ft`yr&cPO=E1A zbSe5P@1bB9rQ>Fop+}hydKGv;Y}x^AXFio#%4YNoy}KE`l#rS87Hi7rpvC!%B0Ly!J;FMK8#P8J?xc)-Kk7hoT zfjw_A{t;uk?>AxW=oYMh@EPl$24dAjDTZ8Z$9&$1TkFfboyiyPEPFk2-20V#N=Zh#&0WxK<|^R$Gr%JA^A9sbs~~nJx)B5dCG0wNNj36 zB-z=Z>(_NepymlGH33zf`A#uCCbj))}Aj=t>s72E1U&f$dTvp=` z*U`i=gqR4o67Q}8y#C#YD=0gQ(g&qPx+6E&d*4se(d~kl8t-As)qE0q^%Y2K!>QUa zBa-6w0zF)+i23YDln%O(=eO|?s)BFk1-?B9@>#~5`QucWG=XJgr3nxxx(?e0sB^Xs zhFsR8ndrJJS0avN9UDupQ{`pW*E!spF*+IZc|kI)wpRz&;epiT@*@yf7t0;&duG^l#KP(4OzhxFtCPTPHP+Zn>b@@`+3ls9*m*fE36G{!_xOkIM1_A*wfcB zB{>@kc55KBJTg{!9m<#J(cZ!9Af)^&*u?<#8J~dei7IUHx`KbV3HYezJD}<^(;S2U z#?+lmN1VA1=iJK%Ypnw$AVUVunM$afsLdVRqQzG$4}htD0;mi1c(LWJJh3>IdYI26 zPk(B0Pb(R-u<#uge)IxyQMts@D-Tt_TyfB2#zx-$6|6GCnBQwG%AV?g#S$HuU%>8o z@vG58a~+K`S&9#=H93!%sVoE6!u%blzfvN>CU^!pMJxT;&`#a}RkyU9*3n^=;No-x&*jO%o(R;VU42bsC1) zKf^cM4@1`5Vlsb;8kgUeh{2!bkfySO{QYex``(FeE4}d&AC1_^x@>zQLF-f{2zS>~JxIH} z2<6fVOkvvwR^#niuHiN03%bCK`BGD|N3agUS7=4nLD7Lq^qLur&@}+tO*TRM@8{99} zP`^VGg_GeGqLWg?-YxynZmoh0UX}pCJ1Wp%b17D9UO@yM0eT43|wm8n=H}*jVsIq3~ZL>&*fmJ%Zs7{5$wY7O4ZMH8s zl{>{_(PJ=+Wx7ySx~`oP&M~@y6Won>H8nk2d_4h565^o#!3x-L zO`l)%lX($6=aN{rQk3<34|X{fbhS?s7MI(i(5{t4)rp|IZW#)Md5Yc&3s~TOALo}# zp{lcod1Gxz<-KaO9DNlco7MOv=E>h~)&R=R@ie7w4VGtQ!rcrF-ni}!2H!k^7w#JJ zpADm!|6IWN4bn!BZJQV`WhTlSE=azw(B}jrW+;ZHGcNOSeT47BLA7)u^F7uRtMBKr zGNOjaLgvwkYf&hgt~(E7&M_d7#&Y|>&$JI>ljA`7Q$&PMKf-i>LoPr1J&1OhqxAhuMOX&w z^b64iuh9D#7371({fjV6vL6EfJV8vo4N+^A6JxKasB`NL?9krBbg4YZuz3rOC4V7< zlJj*hqOezsWec~KlG2N(z}tH)#O64l^zI=^_5aw}x}%6OlaLhAdnS z5$!r$#D6S97#)wwx4kGR*-Rb(IS7qIhNIxlY&4g%dv`Nqs4dgMC9_Il+%I*$n{nMX zp6!6zEw8W~x*%olN35%8f|TwX#J!biv;WM)(2h=U{h$F(J_RJq`Z6v|(dCtW1E@*) zH|#OI#r{rhaD5aZ5ijt!w-Szqciu*P@rB>4>nVdOo-+M;VIPG(WzfaCdTD}?_ZuWd zzl479#jO+KcSb@-p*uQn90_4J)PN?6cymJ@2JUA)g*!cg_Gex+=9@9NK(S(HJ-Us1 zjY`u~*js3VOO{N8>D(=-*}*(=$sbuRd^3g|n1UjQe{k0HW(*Lg!;-y>YfRK}j^BHB zFF6OLW1pgvM-4=MzKLgF^+WI7-)X7OTd=zdV0LR3dS{=*tgGw5zen)2-GV)LDN(4(NKy#?Sb~1e9t!dQ@+8%QS-`Q~ zlw|yU++NTMvhW^a^_($3f+`s6@qZxix`%T%v3`}8USL`~2d&rT;E7;8uKmJXlD6g> zgi2yjw*4{{_tS#b`IjMfR1xi(s*U+G4}!t&PH?~DjOu@|?CU!dMdpL2;H3A4@mob~ zx4IPyRL8J#`cM@1+XKqpVj5;-NHhHrWaDNMVZsM&EOmll#yfBd`Gn%sQ;LDp`!M*~)u`-_8>X~s;Ga5-AHPuohf)t!}8nOaG@n}5?_Svs05VHy6HgJ|!1A%;xy#jC7~p>q{Ge_6IE!0|NX57p+% zUzT9f9|xiMkR8A(Gu91JMSZp%#y?jHIAhYqnEDpXXT1(?IFzD1p*Sx~nFv;K+t7}6 zJUV-C!s5_em}K<^l_&pZ`OG@-shfzBd^NtuC>N6Qk3!1jM;P;oNhlzdR&X5k%4Oby>rY9+MZ1u z^b`1tE z?aAPgQmAxmBmW)|aUO@XXuICORM1?f*l%dW2_jc3#M-~nRjAFW!aY!y>6&-h@EX1-N$@-Lnffrp#8AD;2o$$^A)mP zuW8^>mL<)}0p}hCL@bX6e60fC|88O3Bms}pM&ZUs#$4i?bErLlF_13YMdy1FkhLNN za}(Fw&$l{*DM^Vm&Qpt1-6>%Hu_E$)oFVt-WFor%wFo6LL{Zj1;IHuttF2G4|KDQt z+J6t;TeIwqTMo?Q1$?{h6o~DZ1@12r!2R!d(zx9Wy^B^uWxyUNy_<`2>q`o$K{UF) zWZeJN+MKL*BrWgCL5Isd;MMU1WUGFld%GqddfJc=`DVy}dHVva*H6XirKuQi*q>Ke z^s-s57epRz&}uRTgcdhJc4iLo{S}MSgpZ2ChFPFKiuJpc{#1n9Txb1xJjk;55!V%O ziBr=apu6<=q;vNmZhI@drL3<$?m857Swg7gGYoHjQJfe9swq9#ZnK?QKYawE zdVP$4Nx(NphJK%hODZuCyXWY04d$1iyx}#twylS>H>}5x`62!vN9P_F)B64K?xma3 z$)%HPatXO4)jVrEb?9J_NFup}Tr)16P9&71B#DtoB1t5XWNP->DI`Wh=^`VNL^32P z3BUFI{p$~}VrK8>S?lw8zq1D20b2_hQ~jmn<$n7i$9pX{tSEwz6jgO5dl7(9=xX29dLAL z=QGv1AVoR}!|eR{n6bncbk+s^y+8Pfh_NW`_eE}95yaf3NigjPG4sw+=4NvVlysKk zpb_d&Fp-Cvh}}5e-~l%4sS7ECZjoz>`rhT84gr4J;5PmRi2Kf#$K3gZDz%~5zV-sf zt~254PDP;o= znS{A48Q#>Z3(~xNk-5QrIPBelb!8`r6@L@%bZc?W!;_)7*C;I0UIpq|_xYsxnXKUd@cdymiQL7M<${mXuXfPOoBVA8Po=a^l_v&B3s>|B`=eKTQ7e8F0cx zNc!3gOEWsC z(_Vt=!#^?07!TO8uMs6y_w%zZ8^e}X>coE-2Z3JYKzUxLh|*w?Za&G1tnWg`LRVO; zMUL>Bt59rw$Z5FY6Lecx32l>K!@{i_;Z^*8j3f8RuZ>mkWI-Kx{l{QwomlGK z2zIl_K}UHNid`#N=+_JM`yWByDmr6)`-YMqlk!65ZN_N{5g;?{&s?v6NAttYD4Vhf zEMx*kk6j5;opfl-KgFx9x(Kn05+U|(7ZiW@&*ha5NEgHiIw0~VyUAMJe1F!$ew5Od@xbkF#VZqvIl zt)d4sG|Mrp(ViKn3fS4d4inxxqwHXBQN81InvES|5xdvG$v6{CSy^v_BUkJmGh|?q44=* z0$| zp6=os?G9k|&_0kg?GcksS_SoZm-rfLT$XnmMy#HOy3ZoPyzMQ`bXP-5q6(}X+ac)A zMcmoLSTJ#oK(jq_P-5R-9-#RMY)^P$+CnD`8}^P3H$Mil{+Uei)rmz691VWuNqo`k zNeI)M&~MRQG~avooi?|j^34r)CKE7CMe?(G>1Hb zGaD|@XZ{*XkS&7f^vmd{HV=G$YH@E*X$j-%(op}-3#Pd>46Qe4a*}B`@+BwhtI$&B9vwQ2^q>YSV6q^^W5^mOkx0}}`^gv7sL*ZDa?04R)(*-UPMJ zBUwktRY-1sK(o_3s2o%S*}e1WK}7SpyL&M9OmE!0g3fPwzD%_M)c>m+wcvy z@X`_^&UNZctlvuB@jYunIk`hLT<;E)J~R=`RXnzJCK4-q60<&YR4&nc;sk+1pmBIB z?~8_r+Y`Xp?J~O7mh1Q35AkajF>VWq7#N-?bd9t%|+hfZWh=`fH*0G zAi>6sdZezbY{?DCAg)cr;*&Jvy2cc@f>8CJEpdj{u;ykf&^J8^&0jWwmu?&kuzwGh zTA!Kks3j1q{DD4!|MAI5KOymI1a`gI586(FSgcGyY6*~JU`Co(1LAAcV;5?)+ zxDAqHR{I^ka77pN||5|8SEb+7kfaMuB}I7Zpm?T0~) z*e#x()PB&vUj>qPQdTKzN84*g!sJmm zz%J}H?y|ZJNSPEL*GioKi!vscemDgu9>?mxpV9A;%&U8^mwSvS2h(bRH%8>aJ72`- zt=qwRKhzhZm6mA#`6(Ey4}?&2W9rszrPFCUiWRk8@bbGl z7d_)3XrJ%G5X*s>=${8I2acoSVo!+JJsr9#A7bdu9(;_SF&9Mh&Vn0Cj7h(PrazuR zp}|pNJ~p6QK z3e{fY!H8)Kwq*rOoccFks-Xfi^~G!^Wd<)D@4+?O6vMgRJ%m&3`kbsIomXu6!mwln zTAr`vnC*{Fj7g5>LpDlE6!72v>;R4MK3UBU# zr{6WXUqg>$$i%x~6EXzMCMeJ)<0>A0*o4((QRp=)6FN3#GPlef*Ega5cM!q`T zOOG>Y9(fCBS2Q?c95#A!AXQd4wQ)J1u60wcihs=qu1SM`KN<QPy!Bu}?wST-xJo(jrsTQNeD(Qaovq~0T!oAoq#FrK0g(r=iO5)Tdq zr}!WV{Tz$F^Tm&HQJnQk)cku2h@FY0tvMPRfA<9OYlr;WzRj4kwic|`77=4k1Ecei z$D?mi)#(nd(`e@*t7aa`tD2>VG4pPp5G>wrSM-b;tO z`CMCYc*pVj73EOYv;;CfkSnxnf=IgUgZ$N&XgIp8C%67C@ieaAgS_l-q9DpdHr#1P zjPxQ$_rJVuqK?ofqc@aIk)wR~RXBc8jmr!!g6@0&K+>yTuzu46>@Hu0iWpyZK}Fp< z$}QBVDX`Z%H38PngrbEj!SAz*W%X`iU44wW0Pirg3F{3WPS>g1FXwBB#T+L)i3w|G zfOOLtrg%4yDYa9W_U&z$6S53HW$6oN9vnf(h7aiKc?oRRZ-)-wv&?3>9(gbFU{Gc#7#~=K((BJ0!)wDp{lXh32WcX!Sa6<&Xr#kq;`2%M-(YM_5=hQlfjHwY zrnLVTV%;}FMfDkMy+!k(jcqV%N)-fsPaqfati0h%^TBSg1}E9Of$rvyp=jj-oTau0 zGlH_AE&Ul5xDrQ|?k!5IkMgu9-n=YKE_YbGnAxq&#h?p!u!0y<4M8_SzxN|tHcD4$ z-r$1+B5C$JWFjy2@ujmsx~My$0O!g+;P_cTvHN#lR42ZK+`Iz}uOElD-rcz6`A=+4 zQ=&pA7QY7QaZ10R82C|+edbI?j|f|E4BiG=cQV*Gy64Hx^nxw(zEHooCnp{k1p6lU z;)>1NDBIxUpv1XM>}et-Q75>0AmtS(dn488;^&s-phNyC9JW!H^BedLyOmYg(4GS} z3znea++(oSna>9pZI3&6!$)?6D!UxPJ|OUj0cvwa;Mq zAQI*}wm?O{NIvJxGE~|($jiFR(J~^Q-`zoc(dexR%NlUdc1@_*+K9?T8-Do3`zZFh z$4ZRKL78rXD$7s&fR%e8>Z!hvYyTdfG*@C#c_+*HLl-UYsbluOn;2wr4rO;|GeyV^ zCYd&gDP8{HJ*QtnQNNFnJ)sNBK9bXC#Rmvloj{B}@|?~+hizd|u<}+BTAFJLp>O`= zoi|#L+942~_wxqm%^gvqTB)+m#0(F&i^o`!Av$+c^T4N zRG@4KXWl*Up~_|zuS_`1pBmSTQ{pm_#QUHqNO}z|Gj?LqH#4;TdloqK=ubS02Kfe0 zBQCQh730XWReUlUJvs+~V#+jLio-Ey7BS!auHsT*L2QfFg1&vy6-6tz&0^@j@RczSfGeKGvdw=cgdYnR4SsN%E}HrO;%dz~GlT zsGmL-EU)D=&M*HCC-cX0C+L+<#%wbHJe-~4~-p0@K&q0wzCw{%E z&Q%oLN3#KaFvx2mNF9{&*nVem@|Jio?^TP{oBxK^jaN}7)&%u}2Dw_Ej#F758GfN> zd0N3b<~}YAJ`+zXC?%R0)i*)8d>8Nej?UmJ5$hl;NAZPV@X6f+ZFycGy(P%~-C}W# zrmo;-x&kFrba}s{GzFYFOdf#+xt2l|5Bx*v0X#+-Xxeb;*3s~_L zIm-6PSlO~LES4z1@fJOYq6bs=_%bNHwK?Y%`Zza2g_c8$L7Fw5Z;Xj%0UZ|5*k>K2 z&bW$~%eA@X=Z>PvAsISHe&b!dbJ)tCm$0;e`VEyw!1nLiDC=hdE~^`n=Fd>-o&bI? zk20wsMdkQ1`IbL!gR(h9-uzz-M0?-hgN+)%_v|A4Z>AP^<*pIu-QI=DCCv~t#gt2V zbQKhF_n_#Ha<+6)0w%>yM$4^x!F)j`DnjQG3gQ*GgkA^BxslBEZXiw(9|CElAq1@- z2C++uQRyWAAD>aaFv16~wwwg(E%|)e$>_My3n1~+B%SCp8hirgzs zfnDf#VnP#dIQOS%^a^(%s*`J*v;lcO=m)C?p$`^m+b>p*gVy1Dk6h(k-ke(QIvu3d!D>)-R*J~SU&(kk~&|4jcc zW%5(GG+$rdkExQ{;B~H%5Vzzcaed!_cJersbO$lp@=EYIuLWNW=y`f_Hy^#~G#F=J zrJUER>uq~(;hOt*LI3ArzB%v(#=570^RNUy`ep~;{Ds_M$q{m!wecWZ%d@bf#Z2ln ziuE32D(L%7LGwRzFllZdVkPR4m+ml_20y0n=UE)TQZ=h!#Iq1Gc;=NW)!8)m2 zUag&te#wD&IIVy>W;G-pidlX}5=B*qOF>SdwbB$)T7A**ekgdo8Ua2j zA860kD%Zyv@KNPM-{vl8UDt_yk|&^QbSC)x(?FchO}xM77f1`PfXqR(d(=KkjPD;H zK2gQYY<@HGohTeOREK*Isw)^a8*a_ya924sAvohVmh}_S z6L%a2TsaOtU%vA7SN?{I2FlvLxrX9W2fF*cghtaU-YsS;eO{Ze;l6=TJ5-(1t9%Y= z=ca&vD34|J-%%`VLd!XIXcM;vibg)iS$tc!D~F$Ex}5nSJ>l2XBFJq^K-a(ap?3F9)E{*OT7qRT zVKHS+Clk~8Q!PxUUeNSkzd(BRM}FY!Tv$K44Z8e0vE{^V(EojvS1UARd0Xq)j&e1@ zac@1mnoln85mlJJR!>l#oe1NXKSJfVe)$7_YYL+24H%Kqh8C~0Q5F?0_Zel&{t49* z)W<)-h~HMwkxVm2n1wFpKlvPUS5%$ajQ%rIAkX9tM)xUXW*?$huVUKMZ6=nt>2*$r@93VVf3L#ynp5|n#D5QmhzJY zJ<4I*hJQtAbhgOvoGzD_UMI5l`oac9^@fj>0Ut2<7+AX;$=~ub2ZG|_S*7F^3j?QKd#NIysW-*5R(*;{@SA$Q36R*1bm}c5{adWtq@Xk|1h?}M+ z40fc?$h?7h5`*`U;b%?0%@7AI=YApdh{6F$NhoWx;zRzR_n~KjyiWu%ur_o< zN&@wl{d4%D`%S1c>x5Z1)dZFPC#Ip)6YT!ULfD}PvsUhd(LSm0_~r?~abofcP628B zZ;`~y2=d;v@?}zA82+RRr`>x9Fz0ox+|2& zaP5al4_?EomL5WS{}?QNqQc=Jx`J5W35)#(Q6}dFxK1$^VrJjN)z11{V|ER5Q@KEU zcMN50N+BJS&?ATLQ!Tfgx{{M1`~3%qo^S^n8=o>8HB)Z#R$^Wq&1Mec^1<)kC-6G_ z7}K4v!d03}*nPZ5oflt_REL8H@q8Ox_oB!$4wE;$g#j1kSnn4Mxur_-JX!H7_5ex; z-6ytj6kB=yH~88(Vyr^&A{OL41Ij*Y z;yZ6GA?xK#R2|vC)VHtXy9XRVhxt0Z=epk**yA;1>^}tqCYNJYA3A5O(&cOpx=|5{>VJK`?OTp&O*Wmi97NqZKR!|zs z+t(C=Nb(KSmi1>tX6kWKTqF5J-BFR3%PjWM&cWmY#W2IsWFgc z!ZYRk9uVXx#?pmyfUkxaTT7h}|BvYTmAvA;9C^o!oA@AIcd#T*a7Yz-RZDBI?7%fF z%%#q}LP>0%y}aaV5W@+HczbXa%-T_g-h*GldUx_6iDG%xkD(Cl`!CG%HRd`szMxo< zFP8{Pxx+DCCM_5TWoFCJ$4SnMPuKHlO_99n++Kc91NC?Vc|3SQO?W^)XYDz(!{KOO z9?%!Rj?)y9Y>w~{?1r|R??90(2*jK9*nvxW!jzs-bXHG?N7PmF`+kyDaPL{lTFTGO z)y9wwF6eft2j|+A0n>&?g0hcH9x&Sq8yEJ6Zx^V?7##+0{Y`{M=dtKKq8|)CT}MpO z!7SGNKI;8yB*(fKYUC@>rnc10lR&fcqs8Nol4(! zLE7pMY_9q}P<)_0|IAX}XWnaKfUFZuRyz)=)CTl>M7f(kjzaEp`g}I*hS4_XP-&;j zTY47r3hn#+?X&c3op_D+a1TY}=YOGbW&{Rop>vr}CDbJzhP0|Rd{WPU@MEGWCl0nn z2imRrUAKbN$ULwLeF<+j8VcgQhUhq|m6u>O_|Xo}I^YrO0g^H9)UXrvKq~qs;*PPziuu_flYoeXcwm`VL$dGd(VTAWb z0rYFcv?KTcO+y~Tu^#GNakef7YALB}sE^{pQhA!81&G_$;XB>;kQPgSzV-(y%T~&> zs6&?~$^utsI4fDS#HyNCaa0YV;B4K(HSGtcssfnliBapffur$qMO&!e_rh{aW| zI6k5nd>&0ihu?ql(os&#FW@?4vkEkNXUthT+{VUpYoOrlZJ5+cS5V%WftkuIi2ete z-|%|gFYPri-OwUe|55^@E7Kt;kFwlKYc!Ag01i7xp><2Vs5veUCEF&lfR{!>j3Ng! z`uE^wQQ=y?Pn~P^c}QHTawyW6gu#9{P#SK6E-AHOIa&?$uXcgbzzL<#{(^!Pk3o<6 z;#S#+;irtb=svG`pZsss5nKf_1^Lu&ZN{|9Yb^J~Ei|Ld3YaV;zHkL-jx^$IA7x;l zmKw+?@jz|qcGvN zxZg!j>rluUNsPRq|3IweaAZZg!iN84M<%7Spu&^T$UWdM-1vw|l?TCk+-8`rH01_Y zI%BZgClo6m%4JuRMf&g0@v5qKtS*?k4#Vi@@!1KPo)0i9sGgh!$3=Auh!-C0#$sNb zL-pU;uq%bf^_OW+z2qR&nufz|-^XZoqy$D7yup;}zri?h6&nB32y?&CU2f%NP|W{} zDGdBkt;~p(nK3Y2{Q(RlT3ool5-YN9vJS&sKGs7)T)r+Gw_lf2-hB=>Q)i*HHxv1O z7em?*dkEV-k$>m^9sPWY@!TbHl!jG;nx#JN-%GJ5PKi%)v!UzyW02HGL8apr>>5n2 zB9rIvS!N=z2U=Xwe;)}YzXYUde?h2h01Ft}j-&G*fMwNA7!*Gg4o`g!W!}XY)MA2- z-M8`hmnB%e{y7xqlUK~&7IPn6M5%eJT)g0(Q=ZGG{7$I~#IeIggs<3(V$&k$qm$X5ggg3I+A82-mSjOuzpzf&`28AV}6@;At8qR+&(GrZN# zCNOMjhp>6$pl*0Q`dFwjGi3uS>UWQq9@b}}IpoHz9>JU4EaO>>nlSemG170+anh~P)}#UBfi<(n0mkMJd&fMT7$E%|E5U|>|muPKz)XBqmDm?MaL648AEcVGP$O?~P zp&MeDZ1W|i*^}=2M~VA+vNtR9qFjUZpWxbTEVRX_VSD*)koL8{^f3Tk_RJ;Kv$rmUQP;{`pg6A>?4z$VgB)goD!kD)kc6c{^o1h~4Kv z*1uU`c9J~7mq+s*TmHs0<8MrRV=KDOS3>C}x+6^rCr9{mNGX_(lI2~Zfd90G*z+bZ zdEO21{-YeCPoBlS|2@5^fLkRV`|C?pXef8-f?1ywR83XfbdvLY~PNRErD0R_7X+CikwpBGj zOG+DMoabWyM14WI@Hw;5iiBCVQIsc{fgU%9`lL?l95M+LK7d<&@BPR~c6HHwNp<-+LgG^2f- z0*x0dvBJ(B^q0_kM9Ukj&YT9%iUX*<#|5^fe`k56^}O<1mrQKA46+W@@-g=5D1UGo z-QE=8wr|bMmAvA2jEx06y%GEL8iy~vdJ03U^#yB>V&2wl*Z(ut(%v>g(&g7sT&YR@ zsuCyjqp7F}JB_(tQ_<3Zm@7{Mz-;e-`1uMsaN4$jrSl5tcCts+BvZc5A{>GTt8uM6 z@)1A%2ItBoVs2!j&$A#L-;Z298SkCK=KRLe%=egdJP?g9U%>8~LnvJyCrbEwmUiDC z!M3#wk`e+aHwApgsvxjkp(}`JHX_dW1a=J;#Dx1xq_T8ol6Co_0)?7j9eD>~{0R)c zSWjo00tk99#azR3)K^=A9s8-XD%vOV8(hFgygh<*H9E1vXb&!(Ol;Q&ulYW$3E;M+ zopyk&Ft16UOA8e-*>Qd0Lbs8S@h}71az^0#^kB#mpW#dGi97lL(AfMQP9wizfH(+p zyp+UmGKS%Sw=wrwJPhtVANL&8<8tPWfeTJbOrWep$lkfQ_lbrO`{x!k?oEA%?6ELd zkq_GK^B}3|3_Z{5qN-Xlo;e7@rLp#mPV(BvqjIbkeFvr^>_d+E8Seg&HcFihgR(RjywDDE>0``}dwnU;?&4nJ^Q`~?twARdGc zqV&=>-ajsnTme3!_LU9jS@sJX!(6a{y5dO*n?T?1D>z?C0RQ5B&=j8qvd_+}>(xQX zNmfTQqZagx$wh_LO3bA9{?eE(j69>w73t50qMsdnqtSjS>zaw@HxWm|=e0ce)is1| zt?0a!a>`kKQ8oVpKkeQT5SIqZ?L*E%SLz<>Qk;OWnnnDRxH3?WxQE6jpCJBOD5{%G z5&5)jWYW1w@?Ckv&K@H;1<+)mjFYk+;xgo;ylirZY2z6H_~8MZB0s=D=|v z9&l6O3}lr_){B)De|T95maDGv<8 ztgY|Ji98o%$I97yVsh$>bp-#K2($?`73{VyL)G4Jy0f(Ms)SO!p>PxGkHt z_7ZMdYse|5%|zob(a`azoU$b*yywzW(DJDimb2rSySD>h4Y&Zc>Be06C^|ptzk!HJ zX{h)bg?{eF!l2n{ly4MRb9xc3oKlGP{jQ;d$3Z@Mb}GvL6Y&XC?6J+O9HpL5ndFC- zQy66x!*$B=RDh9?U~vVr_G#nB1?1SWPhdsvKlvD=XSiplIu{mN##Wwd2gNBPE~L~A zC&zc-;Pf((_J7ISX&G|OTW?@-&zY3Hx8OILs&m>QKOv6zEgcKT;7v6nLD6##?>c!N zTD|&-V?UV+da4G>W6kCLy3Nq+QylYc{6w5NV!`)#0k;F=P;ttOn1_F{oYUkSoH`m) zACh;_w~Du(ts_5v(Ll%>OKkj!VNAR{Tx6CI#=ER*z>v5u)Ha=swdCCB5X)G_kIzhf zw3oc{VH8+6=VAS(5-d*pi~3h_px*Axq;CS{Sr=o$(OiwQ*p`M{wikn`_y%+p84Jz^ zX{`9ag>ZSWhM?bi5i`B$PC6;l;lp`t!R(PWafC#$>*p2nY-VAp-xsuTW+)l5gr8OQ z0hQC+_}Tv$3ZnyGz}qXvT*xHi>L^1&b*z|oN$v*wdDJ0@dxl$PcA!fXdD463gX*Ut z^BJ{{`a(NIu|D}2w1$x(=!tnpTL-hEq^!ayWo^7<38199QG!KqZr<~X#X`b1b zeSG|~o7nci6V~@Pg0M<~Px{>o=4(GfpRz~{(+%XKi)^Tq+Z!x1t9kFG&(Jq!G6Wo0 zgIU)UpuJ-%TJ5Zd=3lR1+xE3+-&BptQBraS0Mr$n!wG5{LO`$$6#g9pc1d1Pq3Hr< z_q$l4p^jiY_#pPVUWiJEp4dJ2CKLy*hdjG=yidm>zG8Yb^E&?+8xu{TF=sf&#FSyh z)p1z+-xu^7mW}nr>oI!9d}!GI23@ZeSsG%3S_H%;Ds3$PIX&*k1)8?Yv ztk4ICq4M})k+scZ(UVbKSn#L@y$=(kW`-Q=ZobCO!L3;AzY_-3>2QU!QpoQ@Ewb$o z*)I5vkt2^mPGTagUD$}hwvWK`1btWbtl~ANG@v1UPNfmF^V>EK$6vkyo|Kslet!Tg zTT578QyoUP9%I@n%1+p-3Ec+fXlwD3pX>hyT%OTP)Kmf9`kyfM;6rSSt-;ODPJ-2K z#E_~v*sb{)OO|U3iN`+z27HEKe*u?Pwu7Y38mCNn0)3`ML1V~k2q@9uiUnFLAwXzUTjsRb~-_Ys48p`h3;_Kn2Tuf69NZu!cbjEPr_VfU>wHyL1K`qpgd<2RY z*O=|grQnb}j7cfC={@ZX#GlB8G%<(mL!VO~_A=AG@(gWl=HTs}$Iv>l2REp{6?nfP zhuWnazFND7(5~?V!UsQvj4_CF612Fc>kl!)cqi1q*g-qaXow2<4YvO+hiT^!s_OIw zu~RQpOHE`pBW^<2qCnv6IUl239^hma=Ft3Sibxy?IlN`DtRzJzWFW&+URj&y)UAg zp>$qz{{$IMPT1$xO{@^QDC293#(DGLb6;{LyE&tI++Dmkuz~XT=szSeew^1qimwDP3&OSsy344TKz z1W&CJ^2^d4_xdGNo&Jr@Tl+!7#3J&@rbAltHYVw3EAsTHN2z)euiozyWfn^LiV90G zHoOk2%}u$ev6Q0@o(68Vo6&eT<@?tRLDk`nJn>WHS+03#+j9aI_Dy7!>bGz=?QDz5 zU20!Yf}WzM5cFCBx#mWkq zJ}7 z_>rK(Dxjjaen@V(YIH?)p}D-bI4Vwm9*gKYcd4m$8bC)h&a0C z5Yv5;JblDN8nUTQ+mbLT=Mnz$cn*@bMZ82k1nvZ@bJq1MA;ReZ%Cir_Do=GmeN`2x8+q_)`;Wl- zFXO@Xn;-gI-OMb`Wkbq@nNV`!7#5wG1dSuTaQNJ3nDgizx@>sL5?yQH1@Sp|4Jjti zeH|X&^bvafrz=b=I*;||8X*N%;nYhslm7mN-i-*w@l7a>o5ZV1J()v4IA3cI7ck*vMYT(=2$qhl>NBb5MKi0|>~{3Y zn2EM(&w0BetDxw;7Z#ZYfX`=FW}AJV*$eS7d2c=>r%={4NdPm~SM0} zHy&)?XYvj^`oXUok02$i6brYFK(pN|nC|ftpuTyUyh42{T0b@fnb}x*+SYXFjy;H8 zj|jQ?a};KtECZjfU(rrC0yp1HL>M^-!#&CUCHf2w6AHnjUl9}E_T-CZilL(P7lsus zfa&#i`7VpC4sYk&tYy%pv(!$u6lru^;0mI!DSg3c3C4Eoe{o5!< zJk1jP&d?k#;|%t3pGIAkYEhKqCy+P^qgEG!ju5h`-n!c65%)d1R#iXy8bIcG_ z0S`d4>^(Ln`-AW1A7~k~3Ca#vV0%ss1ig%6{;MycVtOQGOL=U#8wqB$Pr#~0i>n^$ z3eEw2A$S=%$J`1~dGRdEQR#u~fC-;cC&IGA!C!|%TUmq`4i6Am|>EbJg82eAW-`)MDII^y=#m(KV1b{ z&+YAG%l%8c&uXZ4IEj|c6}-vLBuo!KfwS@-V1{Bl&eD&;F#pfsJM=RKJ*0EMQ*A+Y z(36*}8UvDb+xY4FdP4DdBlMcPAGc&RqG|41v`F~`s`_ZAKW0DQZECTF&%E|gBhGg!JAlvj^{7lb ziBI-k$E@m4OjWv;7x#&htGC>6QjUAU%u@4NPT2trpsX7ViG)aw?pnLvqjbx2bU1Yg zKa=ZFJpQrB`F%fZ?U@NMPZP#(eog)5ttcG@EMT;Nvt5mZS+B0b?pww}aPVtrd~l3c zm|9Zi{S*iq2EwewbjTSWKzpiQ>~aKU0ks2BeS}P|7Lg!-a^)R}r!FUk$Rg}pw+rHn zBOvr!JTH%>o?yXZaE2)S-e7&Gu6Q9>cB#|khUUv#s8=R(do`KLY$yiVo9cSt% zzVc!KoXey@NMt|2KHc2k4lp zk7ZEyLxhb=nO;~$x0$!w z<5-EFJI{f>aTMPkkb+6&zfo4UUsQCWfq!gq5M84dpv;h$XIB)#uBtN1b8dsAZ_$ue z@qjH?XmhGdTfq3+9kh!(iZ)+IqV1czyiIrpDpstAl-VU9-5riKhc|%o_1Ap;Hg&j1 z^d|j<)2Q=riFW#QFMKP5(1CHdi{{lO+-`%qSK#RScc zEMfnx)Dt{+k}u@WTjJ#s+xnXX`gk3q-(@yd^vh+6JL`D2(T6clU73G;>Mjh}cL^hw zdDDJ17PBW1_jEaVfHGdQD#3s=&oLIp-A_W>9@)&V{~f5Pb;O|F_c68Q4k(jd(Rk-3 zaL<;a*_%+_?d^8#V&C!cuXMD|UM^ZI`+~OaXW7CDJCVZ4pq&*4A+8M&dovI``e(4l zVcJ6L+yd0L@5H)Z#+>4YDQ`&(qS&Zv>==EKx4r0sVy9dD=9n{(T5yo^rEV~eHx%BT z`%L-V=}h)!4`{plfNZvgpe1GV|Yj>wCM{;1yTFf%;O?$eRovw?MUBHKxBR z$D)L{ENj;v5F$Md85ahFblY-`PlKQh&Kx@fWnapXl0m>UJH$d z@duwlPJlbE_c0>YpdoU5^|+K*J7A2niQw{>cqj{!dA0D1Xqg|we*HKE*GK4Z?$_u! z7}ScEqb`EuxSxDRpeaN=w5Hw5VhsH&iEmXWR?XU_piCMsigo)1BX$t`^I9=Vk3WWD z-huoUMIznBdR);pUkbxmVtmdOD7)zoE%{f``hqhtFK=Py#Lpl}?8jTr+`t?puGqe? z3FodTgUZJRHQBm)fcB*q1Gimf~v~-lAC3y{xJL(8Yt2M#SISz-P zpt(WNTvmI7a)$roK2UzF*n2yeCEVkSAD@LaFM0^U_v@hO={njyGVoTHV$#RO&^&iE zmc{>zmT;OC{j3DLbn+Qoe+#OjFk*I$XBB-uu(l1uz-BbDWlo&K(Iqz^BHa{1$CyHm zPXGjeJq8_~nIM@skHtG20{tB;G3(?DzDQ#M8vaf9XTK-BLMM`MjD5@(KmUMUgNQTg zo6I{(SE6}u@~)09VGh4W0LX5lRLB!S{7Vcn&gOH;%j$XQFl62eN0(LhVA%Tt>VA3# zsVkbv8}S6Bt0p^Iue1W?3r(&poaQN^-;rp-xJ$nht@`Q-6@FW>dCDmC*?0~2?x20j zxZY6JzlTsX{4KWRXmUlnri1m(SG1d_zInMjF$xBP?EG-vN7Th1clrRo9~lYFhLnl0 zqr1v(E%dy22X;%z=X>ilFBaZH$C}F+p_K@3k&kd2*M!C|m1tH!2(>j!(8}gJMl8uE z7ugGFJ{5}M0(+5LfD7F5(G)6n4Hk4ZkliyYYI$dLO#0 z5J$%^ET1|%yV2S)fma+$W~I@mv3`CB1nMS3m&-m#GWf;&l_`m-&>^oW{7hcfr=T{s zFJEkQ4Lui<$LR1H^c-^tWgEo2A?M~iDC<)I3SloagsL#$NDJ)! zQ=5w@EJ3NkLew>{MVF7r%Qi*IDrv{=0rM$Q07^iToQv^I^+_^Es^AsHVy8`<-K6)-++zM+2~n8p7pm6!Kry9OZ#NN^*Qzf z<;AownQ~RSKwpzH$tTuFq%SMZvxL&Uw?NJILy!B!R2jQds_c7#_usXE4;#LfZ;q{i z^M4s~GUp(vY|~&qYG*o@_9jNn91-{J>pjf=SC_N;u?=r*CI z;2T9=?5kpkyEYX2l^zG{zaE2!*>+4Ex(26}yukDx&(Y*Jof$sf=A)F=X!9)|W&a+O zdala`b-$zIoJ#1NdGQkzr%wTU^*P>9*N{uT5|828BFHZ>qYF26=gC z&@UP5rxFulbhzqcfUZ!}V*{++OPPa*br?5Qi<7T8s@g$)MlHuGQ2qM`O+Gha>DTA@ zs8fgYzq5

Gat*B7cIV4*1LtgHFv2#6b?ncx=Up(}NI1p70~;2b09;pwfkMp8a>> zWQ)g`urCf3b5o&3^D&C)`(?4+fzI`Rf&CL>s2TVP^{DF^d_-G_vI?U6m=@x%W}J9b ziW}c+34;u1Hvj1ihUs~s_~A>|W;qws=R%pr|-PJ(#EP5wZe*%T}THa}z13G!@2(}x-Ai19eO}~6&t#8|Dwr&R1i(;V6L(FZE zXmJv=+u+#uAv8+Nu{fbGGh0-K-b(WLxsWeMXC4z9e2{t_(d8nBT0_z=JYn={Q8on-&!FGfYvA9pn#paiV4~asO{@QAp$$F2 zJ<}5~_bECG4&*ARce1*ZiB30)AV1d*X8&i*S-hugqIxjzL0(qVi&oIQu7tABS5Ysp zJ2dSmz^qTyWuO^C%+(eO6Hsn#`gchCsUcJhpM~?km1FBS`q}usLhn1|#V)vkeq#iX zDTZR)c{hl7I~uHxbYfHbW$gUA0cHKSVZU@Gs0ZsyU$zoM`{WqjwDDgkr8|$?h$>LF zk+c0#7vH+I3iXbZG4F!EP<_f0Z=FAi{^1jI9~{u(EEk*wi}Ey9ok@P^oW~FwmkB|A zGBI@daXuuZhb43lq?Gh8LTvY6PYS9xjN@I{bPn+va|XmI1bQql394YXNs_+n=$Zg2U3 z2A(G%{WEzM#dp!AvpZ##r=YRXF&wia1$_0$zcF_JnywoO?%e{w%3ut1I%=TThhG?A zWC6CuArNvb53olhR`nJMQI!Uq|Fep*s$DlRU`9K5{3w9VR|_G?vw)@N7-G=pGBz>O z2|d4?qs;WO%E^!1o~74Ssz2W`DBTJqW@Gt?>e)0;3#OfQEuS)Y4YUU)LgJY#*u74J zlg#y0c{?;i&89bC-l9QV$&Z+@{tlMUD#X_DwGiWG2;Yb`6i0jc_>Ns*aPJHn$eYk= z);xs6hp^@Rd?pW~J|X?L?0?hgUXy}_k@LZFN)F_ugwyP-8A4C!b1gr^F=@zUw9`8e zGRN1jk2)*zb#Gbczf-73Tm@Yp_LFne1#OpYL5mZ|$<3Kd@46Ha-Ms*3eKa|>Dq8hy zh&ETcUe}g(%%SR%O1gYFJn$|gz;*J&%E+6H8Dnh zP`2$GbNC7y@?qbI^gC_G(+s_rTVc$Qy4`5@8U^+ zpe17ft2lQY#O_Y2RlS~JR-+ErX=aAuX)mzil?(hcS0p6PGlK#H0Y!d$rG+}9(CX+d zaP!K*&6f30<)y*(3;hpj&&Hu#){QR?djmUuJ_46xDcES(j>f|@IOSjCSwU_pgl)OR zN4+g3MoJt@%svkGr*C5O=uzZ?8qV}i7NPRjXeR$o8Rr#QQl;Aw7@64x^Tm0HTf0Nc zYI|%=xPfX94HR#s?zM*(%p(tEaUrofSB8NvR}cA1zQK0ozi5#f1&QhJai&!?*mzUt z@)+&xEY_pA>%D5#t`;1m6AJ1_*;3#4O|T>XE$sMhAS5n5%*(fhK;+!p7>PPUr++!Q z0axRd`No`gu|3EiTvK^u`J+h^24x(kYv7UXmwS1o-%5@YUw`|2L3PM*dm4k6ZYco;ZG(0!)uA2xAE1#8Itfw=M> zIjdSBXL_B~^dA*@!ory8&SuE-Jcglv>oY$$>U{i8#QKM0(EZgRNGVN+E!#{4tC~BY z+`pG^+>(kxBZ$2m(N$MdF#i1oLTnEe@XyxM(; z^tzUz;O)~+E+HGf{Z$MvwpOvk*9Jo8`ks`lxr#6TMB%;2XRx?F3!C_t}SdJ;S5bmJ^~eZ|7$aFlaim>Dw`;zjMS@cDB{zw!v1 zKA*$6vY#OG)@Bjkd*l4hgU~sPhZvngcz%I8qlb)gGhb{2MSu=Y<36DLgcwZevt~MQ z1Gaot@M)P*klo`ewkxXmLx1TJ+dhX!H`)U@72=c5&*0Hp#k;PmfJQnee!_$>T(!nxr(^PDzQ+Vs}1TQb<+9W zi5ood2QwN+J<0JM;8}7SOM>zs#8!#vb@pibz>1%ol?tMj2`b5nCY3le%SnA%!Gm5u zI!Cr+@rykE>oB^XE_u%v|GgYL*?LUQH^CQUbp^u{YanJqI<~x$^7DTSH2*CC`^%TX zwCEzQ*ez#4bC9**AsDmz6XZ0grE}MO0IRl6$|VgY7xgtJ3*Up89Syvz$y7|7{*dpt zg}5-$Q+XRj78JaQf|1dGFp5}|AtR_u^U?#W`^UqoQgYFCs9B#M2AuWhYA_txQ^@Rd z6bvs}gLg%5l$0c^9@FefcB)*8E+$-}g`6eKI0eP4F2fhf{>2!c!|{%kLpl}*%4{MQ z59^K&l!NVz{fu!9lhE^{NZ4$BAH?f6vV&JOxFljDD7H)iH!E@-dVYrYpES6_syR@c zBVk8xK7{k+Q!6Ymg(+JNgwK28!GFwBR{Z`3e2Wajk_t6sPJe@vhdH?yXJ`qw8Z`5) zJPqErO@!9L-=X3??Zaw*^NDg|L5E%yLxCQ6_(G-%xUV>T(XPZ|G zL_NOD!gr>CRX-Q*(Aai?ckR&TlyOg0 zB1Ndmqcac|ZV+)tKPYkeRb4K6awRnHE`rRT##mM^2LpBu!H59oj6FlPEtw9!w79D25b^)*x%{=((^#6sJtKukQf z3rssG;TTwhG3SQD+XWv%7VHA$-<$F6a1*Yr*H2WMb;Bbv4GvFrf|bWpNGwt?|5X>q zIq?O2S#~l;PkIg`quzpr{tXB4!xFspABC^Ifld00^Q zC|g9?jd0eIU4#DL6)J_H5+x^Uq=}7V(D8rdfZiy>O^XhrV!{?)v34(IFl;cn(E_B@ zpA4Fi!7AM-ZxcG7S1eCuG08Gityf}cuZ^Hs?ZCFTv_it7LzLT(1kYf~0DiUv<$AGH za~gTBBS(Sn3NO6ws3DAS(-B1HbfwKde&f~LN9dW73TZp(8PZvS6(G<#{1%Gu2`E*z zgW6#m1Q{9Ol|j^xmo}gz`#pZx`5(;fcmPL+??ah?wA6RhIcT2Ef$8(3;BWa}B{_c= zBr^`OqlX%R-PNYf2F*cjY2%N2UP9RdU$zqZ<($B%SyCJmeFG%zWh~aW2GSe^>I9l_ z?l22qOwR>(xh|Amt-&8Hh5{JHL72}BrZ=IIwV3_Ov>weur@9!nOyebbj!&k0ffbgy z>2V|X)7>rlCs<59!j9w}04J9!oEQ@Xay$TvY&qKfzJs3SV(#KlBd+{T9nH=grsbgTP&Ta8Ng64hP-> zIHy8G2S&O2PhflRJ2b0ifLz}l><729fKeublg(siIyRkG8q9>k@58ZZ7kPWze?hdG zIH0vk$TwV%)0)3v@}(h|pnVe6Rcll+CcVLxE`g&Gu_p!cs@ z-g|5tgln7xkJ-g+*}^20d0&=JmYl`XK9}I=;QLr|?Fd}nW5i9&xWQ*qej(`mG+3#RJOzwSMT{g}nPrnWORWz+`jZJhK&zqnPW4VZ1)yss_@S_88E>zUV$Ot7#CB_EWZ6pkf> z_1-twJYNGeZ?>S5EirBvcH;sM5dW|(VVt*OJ+Wsmfc59@0%Yo-I`W}ZwtgwgfA9dy za?hb$X#!#Dku3ibWsJSb=xcZ7)koSPhub}_Em9=3uDwk6_heLFJHSdlox+H&HDG#o z3z%0vhI`vuAp zEZ#tKhoc)W1<8EOaD08HA0$0zA?6MK(>@MHra z=V=pD$h_EOt8UyAO}Y#9D1oLWA3f23*ke_3|9;GK zVsZkUAiWgZte>*{b;G8&KkP!>5W>!tk5^ZiaWtwD{jW~-QnW!~A9ul9_vB^8jaB)#B zqz_sOO@@cSOA*P>8m%k%M^i@oyMk8^io!yVeK611M3AlUg5qQU!p05V1lvdhtR3|l zH!I8WZI*~Lh^P5V{~}OXl;g}7`_Of3U%-#O@m5(lN)}FnwoNn9bf*irZ8Q)@ZoY!{ zb}pd$cpqBCtC+TJC>~5T!}gfh(9sKI$pZ)AG7 z0P1_~g5^6?P%|9sHgJ0l1?Hnvrkx$!)W+9QAA$V31wppJ?1G55j4sYlEioavP z*Z*PRy=qXq%;ziU?yIrJfSYMf^Y*%>tf2Zky1Pp;V}qXX`Y&T)Rr@_?ZqeafM-PUE zpE|;-;j}BwTE|;D)BfeuE_}AQyWrv%!OwHk5IPSIg^;844*FGtr>K7y*EI&e(%)&B zSIB(6pMtdId$E(RM2X{P?59pgGu=H{T37`gp&Fd^wa<{8qQmXzUWmx@esW20-*EGKUlJe&JkX7 zP}Xu&nm?@pWOIo#*z=83X6qhe_IB`{mpBZ2-^jPP`>_$rPU3}mhJr|YpVWW;57sia zo&8QR=7P)~qD8}D{_WsjXx5XSzcp=Gd#DX^vYxBl>~CV}=}I&YOoO#CMna4`ooitq zw6{g^#sA)eqVKdDx^RoUO&cM(Mnd}_u@F4}2+FsroNa#%1HZuE^xlh+9yy#)F zYH>v+ENK!6`I-6fZNOO+Z{LL8C$pGxKAlec-`e?&FH$YcAk_OeytpeTUgb)wubuTOdxd;|o_k0f$$*+@_WYbYIyIO$UzxZ7M&V zeW}U0EJ?$r$%dTP&Pc2{^^tdd@RQuzGr%=24%VJ%MfpcyhTBXbW7Jnv-Zxi?hG=oU zChH6S-wVK7%M|O!dZV?40Dhj0kU?w$#UU|k*4PUw|)^ORR#5*uOy1dH|N0#DcwdI#?Yn1*iI+Eal~5 zST-RIT!TzeQrv_8&@T*RGn!b`n>;qzwH`gnB6#1VkMy&gA+0rf5567`(EG|3@Y*bA z*i(UVW{1Ix<_rFJp5y07`$7Fz7Jp%h9;Z)U5JheSpYM2?wb(2K`?1%-q9zkcR*~~{ z=qaiHm{+Knb^+Xv*Fxik1AyN~A--CRiK`y)UiUBZNr|y&oTei*MyQ!`S0QQ{C7@D$ zoR{ZO_wGRjy}-s-HE_^ZB&g>Za9XsN7@TDy7%z$kuT{r! zBs>G(zo%g90wsFb+y~x8#MyOTL{FMU$_5=&P22VY?Uud*NzHiXa#ag&O(H+n0%QC! z;VU1!(GyCvHH5mS`p|DmGt3;a2jM(rPK%{zakdt=H)#l$v^0gbBbgXn{ShW!83RhZ zt%3;yu%RjvGVHGa_L1QBV|C16m$ooIUd*X~QHMULi!v(*Q8q9F3nTQorpG71AXkN* zzGZ;TR!}sQ-s9GZs)7esQ4zVGY5h=uGE$d6t>1%7IdmPu^2xDrrvfc0Ke$Tr4U<-+ zQ8w-sIu5VL<-^6Chj=^G=(fYmqld}UG8rONU%(7etUOi_r#qORM zDx>{Pya%XH#50R0Ga=-MoOU9qy#GQD>`Q;avQZfzp7Lp2zyv*Ee3=H<_$dr1qMlgvjvngSKuA{jfSA*lQKz@Qgbh2%vIQQbuZXj`GKBIDU0121I3;m5PVf6v{iNIiYK`c_vedB z{CF3JyUXpD`os2d&sor86Rz+Px$*KSk1pzs1|w79XmuU2 zkhM6|d=v64$1vagUAWji4>C2kLEYAL)VxM#eUBTeYWN!>mu6tt$-Cq@eg_TT$2b21#|Skb$H1yWuk;IA)`zkqUR>po-I>a!@y)4=PqX_xvq80MZXf#x0WVB!Jt zW}hDne?}W}!QHe_Yh^k8+xyVDCK#Oto8!D+BEj(E2r&3N6lw=u$2O1Opf4jjm!HltVRm$G zAH#QgO@XinE+{!vuA;jM+-g-I-X047T553>ir<(u;0v1X{Tubno1vw^g6$bbPV{dp zpkMA|_|&N>_|Ltda?>hAm47b8B>|c~dV+nlUsA5!1$~$H!k7^>J1AR(S3-`VXp<3N zKJ_E|`zYANFIlM69*NIah`HCpjD#}V81yCwwrn)*EtEP3rT=)xzsBl8P*f7pyUm)El3 zPf?J1M1yOQRe<>o;*AGQX0ndixxwRmewS(cD>$8XIW0w|qap=@R+vKAuIVt-V?C?>W%==mCUh zB%&-fm3J=w#ynPBV;{8gQFgWkCoUOItQ=!ih(kNpWPgOzrv}1!%VIc^8H#%Lyb>3x3nzA9_4 zG3Y@%`m~+H<3B`P7IiRP-`@ta+c`L3yhxb%D+AqI6;NoVCuELU1s;lBH1|^RW8A5~ zH;UYeXO5#!=rug*TY~C+J9)9&I$oJ{0BX(uLx<6t9PAqi_6~-SzngMT9j`GVX&;P_ zI|Z$AU$JpaG^XFZhx0d4&Qm9rtyuOF>Q=w` z!}~^OLqOOf*gD;Wn2wkEijZnZNuCAGSNnimG@1WCjruLeCP3LKJ#N7wW6r=K2S$ia zK;hX87z^a}`?-jL`806Kabo^Wm%x&E?I!wvaK-KSXjMt`#}oJYin3Z1ZA`#8r+iS{ zF66D=JOr=4t9eOio=VTPk{3Cg<8}5BbH7qU5Wl>~<}`+(P1ku$Sb7ZQ|3#@<=I(-o znIEv?T`D=kDv6t61i=Ms$)zhr(Xv>+#d0rhKTAB<;IYuMTr4DhAfNWkSjf9YzjGne zG5ep>&{(4_G2ik8Nkf$rPKd)@MsCqK;6r4>m)dO ztHJzr4bK1a4)bOxpmFCfK4#KHRE*WsbdJd!FjReo^Qb_T*frI9rLr2*L(4HX{Lf$^WEWr|1EZL5QVKVT) zQ3^ZK>M*@40yS;I!R9)#O@=?i9h*Oc-2Nqtn)e2yk39sHH+j%jYY1L1>v{P)172d! zQ6{(?6uXqrI5-jwhMk6-fFRXSxe=#Vzn@7u=kpT!{1`hVpp(5baY;fU;zSQ}YSFnu z-vbhT^O^hOPsFCTgWqZ`!E=-?y>IUD>E{?OoADFAWfnrC9o@%`XXV;hCZp+_DG;Zt z1DUVC^ZwE0puEDfne}Sa5^u(fn`wu*!JFQjnlS!b4tnKHAs3N^(A6@OU3A`2Ww(lo*WMcpPht zUcldmMuN-BbG+I}iz|O3z~XQN&M$$-r70#t&V^rm@w#hZ5S$H@S5`uuxftrcilLP2 ziSBKsXg#wUeAiG1$4L)Gmk+4g{u+s)`Dakk{TxaLcBuxXQO4?tCxlYgv~d3xT%qfW zYYe*!^5B=OD>DbH&*^i9Z6`oHekR0BYD6ao2WI)Ff|!D|{}{9x3nvVM9TpcL=f`K2 zw{srG#T^INS?=VOGY7rbr+Bd^ouC??2 z2dPK>sU0%u`#7(Fvd((rv8fmNq6_q;{WA8UL3ZQfxFAgDTh>uuaksoYSD|zlRevM0c zC`0uzSHAdwIR<}UiANrF7gBRx5gYV4nvn~}*Y6Kx{CP%hBP}7S`7KnL^c3_oJ)vFi zF}q1_ZugGG=$yKNty-cZ*d#~7;v`*8tJ#E$(y3vRhX&Zsi{|fp?=iXJq|`n^0Lzj? z^jyk8oA(M_=30qyQ91x`>+s8{o`UbBQy3hag>R~~g*J~|^l%AB^FhZT%9NOKcMf1g zvHv-*P;9|vrTT((;1TS% z`77EF2#1Aj1{}Yf*na;R3vGfOPFqKx`%}-DdIQHdFQ++u^sjO27a0k-ZVs{MT|qH+ z2t-Fz;&x3AT43X0R?97sQ|AkXDAqP3ME4ij$Ab#g??mVi|n^|o8b zW3jRqh6EfU#@kBlr+XP!wHk4zlxb7OXv1R5Dx8~@g{v+Za%RR#x|RV)fZ$7|s zm4OgNUGSS38k|?R*WmJBU$oRahChPdkeHk7 z_Xt-6eu8`MuhHJ*I9>Ph!Fz2ybJg@C?$CEkj~|A?cOJv`6Cc2$=M~oYRKe2nZ$MV% zYIu9&5B}am-0Br-40?8$FC01t)$6p#!DN=}on{QREC;mvMdC#D6Fy>qHAc>f0@qa= zu;$x$$bR!1rQz3c{)T(#yy+dUccl=EcHc(%`)KK{h5ez+Op{YJ#*oiUgR8ST2r<1j zVEPFMXtAjS!>_kN$wjI%&kEQ+@jBC%7GwUo7VsN%0i{K^&}*q1cxRkKk!XOEq?e@+5qy>1rZlJun$8F3G zJq@Dv1gUpyJJ`p3!Z@W3_=}zR$(LgxvE&1k4mPHK@#b+(^WyoIvxzM6pEqdu=MYvb zYDd5MzrcQU1U6h$L)5euelintF;yzGS6*l36DeE!4ra`{1*wzhYdk1} zW&2XGYtC_+k)@y+^+j6l5O3>ZH19pU6+G3HA>Te2434y+3vq(NR%}Ie+ze*5rWT!o zQm}mZFLG*jp={zlRijQRlV6dk?9HA+%Bu{hxc!WF8RTSl<*>gCyiV6PM^D0sLLH~B-gR~7gkTlVS#PBYgRK1JvV89ujQd(C$0!2;-o$Z?^50Ia#I z!{rT$0@F(eC=;j)zKx4u>>gvT*IEv6@I6_ykw~o+b@YhwBiD9 zXwZqxA-bHzcN9|(dBeBf?Lc4R`*dn3V5Y@YkVGY+c0dNk79GPlPFHXm5Cxf^BbmrX zhx@j;5M9EJpxVZXGUpjA<=j$~>`dcreTG7o;Z>Nq>oFGn=_a%k$H43=U7;W+6H*TT zhQOz1p*;L4G;FyJZl1{?^E2Wnd)|a)Z*HNiW1bWp`@spCm1|`dQBUkCYpkz_u&$ZR zOAGjKhN~xh|K{&5oxj|~ZD2e)Sov1u95M4Kk(Q9Tl%gXF7h*w>d8txf@ z(KoW8#lwP6*tQ3pDj0JbV8s$EXFxjbLBM|+w5*uNOyB76c6|!41q|4ifx1G;%kG?G z-6inawT8_nmz@8~6};?uGAfA2ET3X0ZP&}f+UW}VvzxSdHhEE_yvZjl;wCK8e*WR#pbQ9#Xc0tODdeD=uLEGc{!aeG| zDBq7@n#bjM>)vrbbK}1l*o)pt17e_Lg8~d&i@^Wnan<6YE{xuw!+9Uu&!^O_1g+FW z3^j@7TgXi)-tsqZZk7nDNB7`pXFgOo{p2nG_X`ru!a=K_A?o~8qqfg|_@_a{*$3=} zg>Q8QyR^qB2_FEATAVCbo7ZcrDDN6J4Ajg1 z=C?=GW6W?LD8A*$l#MA6wb}&rN~VK)tjNiKc#|ro))z)nR^$x;Kf~9=Q*S?UUH@MmfgXK0&8_NxVF$ zL^@=M9_Q6Bk;&=(YnR^uDLcMG=F)bw`F;@`3kpCKdK^?$hv2d~br4cCxUTgVP(5=s zgw?sB*>7DgDUjrkCY6_hf#-g{*Io4>vLuZE%1RE>@8T7`0nFG*O@tZsd zcd_6?J$S#G4h?;KaOSo8)ZZP2zMhl8e{cw8d3|ckMp#>hXw`{fvR3!b@mr zm7w2lu@K+)1?ZJe2l=)NX7QLZhl49%TGTTPTUUh(^EPAMh5@*IU;^a#ZQ=d@HI`bv zMR*Z@3Yrc71=-%6()KC^bDWg{NW9L}VV^NL)(z%1y&`vtnxFrUI&ChsSUKBRC_Yom ze_JTP#Mfnf#K%!6tC}heGK~Sly~8obnb;^Jqj=TiY%Hu_1Pv{fsIZ=k{%`j4j-|%r zlwH7@yZsC9%e^2eyA|x8U*r9o)zVB+U*h2W2Ct$*OlmE`ws<-C)7;TpI~==$pOU-Q z5~3c_9guhe(p5)6NAE5EIQN3B)+HAH=&KNHG61v8t7%3~j<>8zW;!OE+^LBea)h{t zdCOVMyEODVVj|2T5A4i$O(5@-@@?O@p|QRO*K{cv`ney(733o@U0p=qtL0EoPuv@) zRF;}Z-pkw8nDWw=vTY{;j#;6id=OY}>mdyGA;;pBugvwoT5S8Y3-TP&L9hNGZ@+0Y z1dcC-#`^}svNHuxKhz6ut&WEF;ZGo}Fz5e0y3Fw9F!0~m;q31d##bikbNNA6_^54l zd`@!*lmzF4UfgJq#pOt4rRBT?&+%b9U1?Wh?OZbN9*X-u1(8ZeC5s1@>4>GgMdK#y zy!r&9pA=&L!qH?IHJ){~$=UGkBedx2S4rEVCRC1RnbZBA~t#B6Rk_B(oPQxn+M6^(~DnmFAK$ouSxx*6Q#S znCWLwNW}d3l6cfSahKTQk0GhC6E#B;z{j~93l{t$-efW^^m+**_aRU+qY+aKX;!@9 z6(;Pz2XSeGVEMzQ16kFhxNP8S?`uoqhnCE{Aj(T1Cn z8%42F<+%SJoH(f|#|y4%ZpR5AZuMvy=BZ)myy4*yEm;hIm+gQ(G4L7-icUc*ncw)>8G7icDs z@)5R083_vpi3Fd7A}|c2e*40&d`oE*<|K&(H*(HLJdtB+Bzf2N?&ckD&qDFWt-Nf7 zHW&U$iD3qFS#VA`I0{A>S|87%?w8Z-$P`Kxu1lCs-QwsQ zX%Ky{9*b!Hy5n zAhWC)=UTKF=IXtp-}#IDi-VV;!dnDwYaZY%Z$lw?+*$B4u41yHkJ1j7g8U^tPX6?& zwB*e>*i0Fk`Z)4(1U}(St?uy=WBbC&4K&-Kv*gxOT7r7X9@X}h@1dTuf$C-R#`*6) ztBN|a1EZ#n!=lz&@PD?+$z)dv$~Rt>%B_`B^S(yh2iXC|zo;-e`HOr*U3hTE)g{wtho^$2Q>nout3g`%g?Xyv^EB2yLM^k4wf z`$e3>0e6_){qHNb+FmIB+U_yIH$CQ^a(PlpA^*oCf$;WxU z+muOp7K>MBcVXGy1N0g9qW;VRa5Yn-)04sA(Od>a&(EUyGXt*QnbS~Q`H&?wUZ+06 zPAH<^yRA|a!Tx9&l-isIiLpP*a#r%ihlir%*CDCt#_8bkljkSXyjrns6v)b2S(!^1 zrQIIDpspSu85o0F zBa^@-u{V@JK1TeH@=_K{!7y+)I``hiyZf&tE(qlyXNIGXjuNy(^f{S0N7`EX3cSL; zpzo3z{PrRn!e7v=c*c2E?R?6_95@9Q(_%2TZwd5S*n$;Na$MzBhFa8H<8JogR<%~( zaGf4p(!4kf8`VnAj05P^s}kJ|9^u4(87L2(s*>&4!HQ31!^o+fP@SN~RlnBYoJvo# zFx{EF#KMbQqGME_QuH~grY?8Esu}HS-oe874$S$!lX4*4z>3ZYnbo&J?ouT6+Wvw0 z&dY+#31h)EW;EFLtN_VFUGkko^Vj=}gzd{a_?#)L;P5&FE+LJ2Xtyh*PMekN!zbzq zayP(d<1v&R4y5_b9{kWZ8S|^Ili$M`x{uTpJ|75!JU)sXq?35{h?Bhi91YH5(p=Cx z*B3js?uUqj+FZH+BP?(Hg2hfwe4ho4kow>ZhUy$-u2;Pw#@P+Wj@J-8oeyDG;Cnu7g9HSgGGHbx$JjCK1hvF%}h6!X79)L_V@-G4!+jXTzy_ziRX#a!h)Z7%Ej zA{<2=!f)M;usWDBUS9^W zD34l*67|HKn`=~``|bfn|7WQ8Y$|%bqPwVaJHOE68#ecAn=3|wELnrK z|L+!9Yi2{vu1;yA!yE8-na;1ZFyu@|{y|0H3#hmg&8Pcpr%b^BzJ_0iEz5SG`utew z)!tViDDN1|-`0%_XzYn?N!DoUQpvoI(k|F)5`@eyz~H1R$e$>Hq{mjKJ&*2DYnoy5 zz>nnVAs6>2%F}*c&8M?ujJ^FEq&?1oqk}hO4xvowpWiH;yGa>{7hpQRFBHDn3!!Um znEwDRDOgUx0_t2CrnsU1^n=XN$DQtC1z_;)4LCm62Gv#p6J>XpQ?FAf`zMd39Q6ms z{toCdq8j>?d12!)8+4{!sHDCpZ#ZfzY@exS^)D8nBY861M$iuISrTtO_&qf0U*OgK z6Ns632h5T_;hY(i;i|qQ%_s|n4|kfN!k9 z6{lfF*B!{D{`wDT6O&u)1Vg-l20*+q`(ylOYnsQ_iiOT}eU!zQ(w%c1dh^;KHG7Wg zrr~_;n-8eJS(_`|-jnX1Z~29(??B;uiua`M!?N^u#E2o!;2?djF7^Z{#@%3&y_(Fe z53!EJzk@t6bX?G@+x);WhJvGO7aD6vV$?mlt9}2B9?3a;%}YIQO#j>X3dDlsh&PIo~Y{T*Mcrx z7kJx?$*^~|rjTCf2A!*G$VXDkG=O;8 z`e?!Nkm-4WZ`^N!o(cmj%a6l=OebjjH69Dwit))i^7AgOW-aVD&BIn=_S(mw+}T}7 zX|F)#VJ7ulrVsvQ?tF9ENsvg(c`c7&uxWh^n%e)57jK(K-0Plvi1Ah2tonhjJ;$Qj z@S`+v>oIiyScQ?#%dxXv3IAkt7p%3#T>V`iRGZyK@d+P3C#{jUyj_Owwws7?Hy7>t zH{jfBMW9%Bls)<14cf~QBgEPc>rX6&{Q3@-Wb+dzcTz@mmy)^qyum`62YU^Y(`PCX zBniYv8QsOdh*zTJ+H$C{911y*l+y;{&TXV|IgKuoOYGke>jSoA4*)1zGFa}u z1bd(Aa+%BG_^``UP-~v1pg8Es)T#EYMOeYx9Zg5ey1U>Wr3FXidvX87dI-Oh0&QbH zlB3N)c=k|3u)oqt497cAJi8ZkOhc5+?k<={Q{L)~H9vVwK8n6-(!Fqxswj|{Q{lJ4 z>2D(r5}d(u|0Ux7nDGDj($DYxLKanj4hAjKhUmFBQ7`u(Y6j+jUC;~M6{o|Qwyb8I z%kL0pjM$Z}`_akN6GYna(#fYUVpx_h@2$TCgDU<5Wz_>!Q?@Zz@S7a;;}4=*xFpRg z{Ei;y$fdkG3hZtDXckhAEfOOZ_4Op`l>Wl7ckWECXDN03xe8({5^(X;Q{XlHKicOx zW690EaCJ7Z5+;Sgm>u8HcatF?&0|W9SApJG0X4^$lUMLRKEHE0u_Is6U6pdR+h@>c zeJxx1Oe|#f9s==`mV)%*DNKlb4@JLkpyZk@l}j2oZ>SZ7Q#{JGe2CkZ=Oe1i%H z4y7j45t-Lr$e!Iz2)ffBvwF~+E$lVTQ z3T<-_L&TP~Fmv%f$k(OaLFGr(dZNUjfHZz!B(c_KXmKSgAAn~kqS*G9Dm^U=)Y-XG z!|pveFX}azdyGY=uLJqh)cf!|YbdzibpXScMi}^+*iavyK+bs|<{3hF+3W&XJ;GS% zNi${lszy+_JVWoG1t?z`tIFRr3lerTg2?1B#0;wiwbvSmKKT)T1pG@oSIUUI%ac0a z_&LGy?d%-|)Y_VMB^hBiY_ zjxlPT))Hi*8<;>dp~&^)LFJq_vmd{XrPhuhH=r?g&v=2SPtm;M>kd4BTTL*{zKwyg zzj^P!AM)SB)HubMHLPy=DNMF|2~F=S&?xjf`b{z71PfKJW$HCdJpL0Z{u3~OGG`?U z4K&le4tVk{dJp{p>APq;|2YC+ZU;8%-E5HZ5ZYdv00(t1qT=Cn=32Z3{TqtFYtmO3+b;*4 zDT`oS*8-6xeYiT~WK8RPhCSC+Ip?`u7{9v{3qOqqdAl}conu&Ngc_%AE2cf&DHgo9 z3g6J{$+Frkx$SRADM^hs2%aA4+3u98=L3^)o&m~bi<0-MXk{tMN)c2(l4 zUVQ`;Ka-#Oe~Xz;`fX@d%Li{km)R8;VW4}H*nF!IybMx5wdMqczKOM2fe zkSQI@S&(}n_}RokclbG!Y&wNTku|Wey9ZJyPT;feyhK>D5}Vf1rzqqf&{?+?Brm?Q zNT(k#O7shxHZMWvMgKsA{sBH};#bJE43wqmOaWMxj=K%c!RmC8(4<)mie2T*?B#pf zLpz{{(hU_K&avcg>p)Yt6l>nsNv$VJ9jjTCnEQR8-5`1aRT?2Ls0+akk-2tdh6<-*)0XK7RF-Jg%lPo zJ<1ACi#SKMcIdGFiqf5w$uTR2bmKydoDhJKOZ4#ZY8|26{uZ>x>IjjK;z7N5FND>W zul-i1_D7bUW5SoPdSA z``C8&`9&glt|P9x+eR$8GYE3Dk2)ybjRfnIE|@UbP)Jzph_P2@f(S;a{i(@ zi8}bk+y`aHdOm*BcMNA{@E`3?l_&gZUcD7eUHU-(Z`8LvY2)DMasqO$oTqN(fB5~p zt{{D^4q1nK(JUtwVynYYeboVMetH!8TS)Q#&ga;yT29_;18&V#EtGX63gYhkY^m*xn8AtYGl$)F%` zJyaJNK=Q4>s7E@7nE$@m)Mo_n`YS*`KL--B4};=;0H}0drrCoAWe`TOC-=34 zujF!8hCD?@%yp*sU5fJLf1&wWIvT$W;q$A@z`e2$IDg&@P0fxN)y<=C)nbgCr^~5+ zq%+hya~YOrgJZ~PaNH9LmZQ#qY42P91ic^nx~K~|w?kQB%13Y?I~pC!zoE(Vw_v+X zMqC>QzO=7M_&8jR3*Ws9jJ5uR(l(^?_7o^tJC*rmh2jkEkQ}$QwQn0(~!8Q5cQ3|V&}|B*tu7Y>-y2g z*4|VX>lB!%WY?Y&nRzhIE42Oy$J(P9D)ei>6aMv zg{FU};_w`OE>)m8PPY`_|L&w6FtOEFUE|Fta~(JL1M#P5KNI>d*0nd#&tHsz{l6gG zFok77T3qy~I(}c^6$lKHI&{$AbwT<)tyY7avJ4h<@*G;bUcrRl9T2fX2YfD5c5Pq` z1pFmKKV=rQ6glJgq3uu|G!|FP>5pNLcks@GD)8=1ZH!p;mH&1|oqKJeB3zEw5SCP^ zaKkp;2gSzGn2@#?g3WHDURXH#rdKhafBuK2C+l&rkG^19yc0W57;wotp5R@vgV)-m z&$&6<;K9TKJkVF48+3!XfjcrlKHL%Pp4hW)^K3{knhq6D70_9790FrJpu{zdIzctK z?JWaI%P>}|9fg)#8^y9)Gz&8!;@Sgpt}+!)18^A*)u-m@L+T`90mzdJbTZ7 zgUE&Vq2=A*U_GJ%I(klFw98f~nKp`bO>1Kbc9TF_Re<7qr_jCZ0wyNjMfrj>P=qDI z(K|OG$g~WVvo1qY+yxl?TZh}ZCJvmJeTS0Mt+=K~Q;6woB91EU`hMwga<9w0*RF5q zK0kpOI}hP&kDMk3xIY+I&jOdyJSczf3S@;4QuWMPm}LeAd`v`V`CXRmR0JZEoeaa` zQ2J;m#MJKvlOMZaTg4F=hQ!;tI|@uIRZx~tO1_N=GE=)cranoE1sh5*Y$iRsCS8#w z^#21B@Giz#k&n~N5ws@#0#ka8+uzM`Ur{4@_Gs#UW5 zHwRZ4cXn1vz_AdNLfhrG)eHJNF@eLNeMosBnS2 zr@ill6b!#}nb;1yz(wsE1}dh3bx0wk{(XVia%I$eISYMe5JSo`24qf@4?k3eDm_QU znr_;{j1ync+pP%9#yr4Ior75NS%Xt-KQGSRvmAY%cYxKtRLGik0j*jHV7~7gY&4F*@cukh|4zq(Q*$ulY$kPcqj<~S5*9t;y1n~vKTQ4Q1)ZZQqf%ze zLgswKv>koPIV<7O119~b!!u_jl$s2&nc>5P0O1;))Kp4Z!0h-vzOo5f1>{(CNz z&hI8Z%2?_%yyUmnHSoS~>2uZn3&VQn^8-)WLU~XCCTPBbpa<<>H0vSlB+g;s1e%}t zcEYmO3sCT|5w7VNaYpldu`-SFSN``|)~?%75Z1_YzG!m+yp}Nb$|*>#dB7L!kfZyM z3^wa_IR>q0C6D!36g`*lEq_eIPT%J&=VCwJ{Gb{5r>KLnYAp*}x`U4{xhJ#BE@58V zx}d3kCCbMXz(UHT)|L_9B7GQGucKK+KrK!jPF|fqPeAaYco1FsB5QxM7;{nwqo3&o zu-R>cF135XQaOP4KKw7_3@I14=zhko#h)qDCkLIr2$CU-AVI&9dgpYnxKe^aFXgE4 zS;Q>5i-`qN1d)%Gc%`-py{-OWo9%z&3=w5z7Ez|k#Q?nTf9CDiH1Rs_Hq<$qjvgyV zfS+axgf;%fyA6Mc@|uIF4<;xbPMx-YZsS1x68b#Wv7Gk<(Crc>#aHBj=zq(^3t!x! zY_}aVUcMI<3#Rc}XTGCCsfxAx&Y|Y>7))=-M9YBdEQ0cfu@#gnCT5GG_7`i(dIYg1 z#5j*$il0}-VNrn-0W-YLEq68jY`Eu7G>DWV-_ zON1<^%#@jp_>HO;v(R1~0+JnnfX~ey;>1?SGKomsJP2{#!bcc;asrmL*@2|D3uCN~ zVcINmbJcTDwpN?_qW%Cy>wZ9O9Q82YnZuixHoddO5Pce4h2xzoC zi}Sqe!1Y%y2+1m(Yw}T)$PzH7v5u*V_wjn1 z`Gd#LKY>f+A)Y&bP(WeHoW&&;{tazL7L$ra1VC@$?P7m zZf(J!u@%s25rv`}S9UwRFE?(Hs<7g}Bv8a@VzpGC8*y8YYuc;_Sv7PX3_Hx%mLG$i zHs3H}c_TRMd?vrlD^#zlfzr@=ut~E(rMLxRyM|E5L!EXccVw!+&(wjt*zPWfR+7(*_Ht+=N(*0<1el{?9=Nnd*7!B`=yTmR?W87H$I`JVAGz zH9_!Z)k%;{Sj7C-N1;u{Z0wj?LR~Q*P&cZ^pq<-c$Y^!0_D2*NA5*fl6grzX&V!P^ zQ6OztgRT$T!LoE1y4-(*xtcyueW4oaiVM)=u|Dw|M5vP~V;(V4kfZld9KE(H&s$?O zx>VhT_EA?M|7rnLBz8hXBJtgF>1-L5j1hWeX!pScm&s0GomUCSRu;jaS^Lp;QX)jB ztjrCZ$>jy!`H^Q~tiV$3n;@x@vWa$0s6Ou$C_Jvpay||hXN)@sfycf=>hJxq&Hg5o z{z2}~@;w;+-BE@kw&KgLG!xw3hjX5Nk0E)TYg|8oNylr2V))=E&*Hybu>I zI0Vib@1b3NJ#5Z;h^0mbg89k;_@-Nf%PriP*Li*~OA8QjjmH}C_!?pgmq#$;W1o2E zwl{p!M@>*Rgz`$GJhUypgpo(=aFg#vD2$^nh2AFY>tZMvcW`|4pRp|b`C^bXX~J{j ze(btLGxyLA)amTYHIAb$Q|dcn{+~k05MpJh3}tru^ZD?!N~oJaUWL=qvHnKL^9`7Kt%X8sb5MyoEwlc&1nRCl z#bKZS!fnAB;KVNSW_35vw7rzir#+{~R7(hHy#c;P;kQT%))@imvALs0rqL+k^9jDVeglM&dQg3^x=BQNIrm@OKwAM zb+9-*awW80e@od)54L$)4oJ?aGvCkeSa9$u)Sp}iiRSHSxo8n(uM)`BS&27#l7fkOdnO*o3IQ_S}P!QY$%3*B-xV%zdFdo(k ziJ|+^{SOVaT>AsrTqe`0~j|7f4O7suDugI*+YP?c|B`CD}%yYeCGtfSszpCxGbayGW>M1VL* z3GRXiX3ZsLNLwlOSv)cFz&>>Ba}bIR3^?mEYMAg`#?rFxp+qu)>1^1G*4xWbQs0LQ zKeP$`mH$%zW-{y^K{KX!Vyw>`o9Ft&9mLDOQ`YY?*wH*L^3Q(QJL3%AA~&4f?|JAP z?g`UPj)1u68t$5>Blyf}hn&KZd7gqMr@!F@?ex@Ssa5+KEbV}AT-P ztZ64JSg&L@SD&&}Cnu(pJeXWuJD_D^3QP&s6>=1Ph1AzGKz&0SY<_VYj0Y@c5sgvM z6P<|0mBrZKkTP4>iL3wZH?bWvX$PB!@_nTk?`bILm{?<7%3k^oE@q;DhrA2zF3Vf` z3YHD?G2-(>uugala?5V^{DP4X?4vF8U;PB$tkI*+b_^Id6KnQIGs%12qrbHqN~Ww4 z8}Faaq5}+RF5r)bBQK##A@Qk*kH7k;Nbt$JMV!q#)^bu^@Eu)*OB&P!%MD=;(H_ZY zCp*m>)1BA$QWNCIeMQOchrILBDd-kG0_?o+@@`S1QPW3`3AvOreW{0*p|8<9(U^FG zuOQH1qAa;36O@KGm}a?%8`k!n_!Fjl^xKuZ|NLlNvdB;f{a}G+JSOXgS!8;F*U>IGum#-n+MyIS>2@UFq-nFFvhkcQt~E`qkn69V&v^j2d+D zYr!`edSJ6|D5n15!1gy#X80a;qr=t^V@RIY6!QS}^}l0C%OxoMN}2CaTRv!$n0S3R zK$NzDNzeJg(znzv&t3x3Z0cv9ILA8|HX}5igr1I{0OhYCsP8!Zd9DxFcqsuo@i-i9 zAcl04CqL-MOE8)!hj{sE?A`Vr(~fsQfOZRJ{kjL=GRV<$Lyqm6=8#uL$(P%N!1@K0 z*<7nCbe~TIJ-Tb@nAwBm(pHw>Y=UN2kAvq}+D#1TBUmTyLw~#l03Ak_3185G@7jk?C^U*7urn~@2rMd#?8 z=E8gzT)~K?8Iacf2%07s;!A^mTc+Jh1_kevYvF>@7T6Z^-!%ABKS` z+EAeT2%NO28)!cQQd8GpnE7W6xRL<1^0UO_{(%Kw{Gp_|k#{$!0q?!$=xN>tKK^sS z>`ND__?!|y_0Z-LE(bu9;y21S-Q%std4o>UEuO%(ka+J6oH(`*S~*=o{b>uNRTyx- z5+#4R=oaj;)DUb=<)N?D|FCFXHQqIi1LG6TY>fwnyj#z=>JD03<;!Y3eZcyg4JJ%XV~VTO_~#uBu+3G(g^nGHRU5Q~ zn}g+GxItUU`oUxR7I0ChE8!K1 z&t#I*-eUEC^n_JkZeV+bF{rv-0A+a!dXJ!v$>(AS{IPZ|=UzkSdnvy*qqPR)My#px&J8yA3aZZUXEh#Mro zfbV-Xg*GM@mP!Ft6!Iwx(U9LPPH@uZi0 zo#|B^|LZ-(NRE*2_F#1#*cyA0%a}xLtSr(|wj2EYF zlCrQ1uQBAtO$az$fCZ}cIQ?oFWh^g%zK?;R=q{7>9LUAp@xRGukj&rApz$a4K#ErT_xl{^L#Y1!)^bAV!cQVOfcZdv{iV<%p z!++QuFTcnJOa3|EG4Ur35-z~Nkry!Q-!Z7a?ho)Acn~eug^5E?906~~k1Qm0ICiP@ zgFPezO?Lz1a+*9K!unNE`eJ z9c9@lpLdG4Ubz#aJ;l()wPNa1YaZVJ!ssGj_N6i^xAq8#7Iw*ejiiSyh zYhcrxMJVbX4_=lvG_RfluG2nYg6VZo99WMDzla4;)ydHrW=p~u7)3a8dX)9ov)bp8Qst<~d1)iNkh z*$KT95^$5hj!-?B&W4v#G1%-T%HwXcWTSR4HF`#w$SOXlj~G%@W-{ll7VO9C3X1q0 z7`cV^LAfcEW39))s39QT_6J6%cCvvbw%~H~6TnjrWwb}ttw2!dt}; zS@R}zPB%cCYp=nvMS(i2hCrZl4l}dR7L-2pTnb2L)igWKi87QKUOov%4liM!_8XYE zFdvgE_h4!tHK_ky2~FSLQLl*dsb=SiDKdh0UXa0jRkeli0BzK_@&W&ATW}D61X}HW zLv+0X&55S*+duw*6T^})LZ_G2%>NH8b?1nscgz4MG@$=f6O{ib0&DqqEWh6l)SqE< z+b&^2&n)t9ErWb5J)wKR6Hq@)`NgSG=xv*aL1V6f-HzF)Fs+4QH)o>cVRv5phDGQr zq8^#SeO_e0JumEkt*|vpM+kg=NbFs!_sL>0P}quxyx)O4xtwB0-UYp`OPKm*Ho6Se z=W^bwb7}FnAi>WIj6ILy{dY8jxVsA7?;XIe$NLDB*T;E9@1SYRXY4=p3=FR_3a?co@Uy)n1=sYg-ia^i#)VbJuo;X@l(AuOLGj|h@@#}Py zY&gb$@pufz7Akys&MC~ftb#i2^c<>R4zu`d3=bQO#(k|J)!9fWiH+u?htA^TtFNG& zxdDvLPlT*<$5DSuKhAW(8Pse#iIH{xpyifVf zH)F4px*%~26JL4WL;WOEma0Q;KgupBZq&${R(L@Drl;uJkjGn(KZ4OW%s`&pjI|lL zAnKMd*Xc#5?~#Wtu{D_dpD#9NW`N1<25|S>4@n~<@W5*kH~pXtm0vEiq|Y?_9WVd$d2s$?UD4o^DO7$tXy!$hTz3F1oF}Hc0#p?K&+`h`pIsE-S8Bow9 zWvQFbLX$We+U=f$jcX#SbC1Vau_saW$6Kh?Bri}?a$ex_!+cI(##FD%pxwL!ZHdh& zX&lbG81F|ek^E%pDQw_!HR3}2fH#SeXa+9;@08)7jggS}S|msupJK@X4RlNC{Wx8U4``(Rr{A<9>Z@y2kGVEnfgZ?-fFih`=pw;~%AL*DX+ zPFKO{+Ihab#T)c`=(|X0V8$CK@jewBA^Bk?M9t5|1m8TG(;Eth1{w&%j}wD#TM?Ef zQRa|Suw_?b(T%bZ%Gs{`c)F)u9C8zKL~#slbve7HflOTzg`U%Wp+hf}pdM`V4^zkWCl2q_6B^x<@Zi_|uwu?tRJQ)ho;y(QMmZkc^>>4c zp%oL;ebwet8C&QV0gAIpY|dOdQ?~K4yRmc@ICvUDKWKA_0S27=CQGof*$)M0euCw{ zohRFL+lcj?#24M2#JH&x- zz+)HA#Y zItf3(B&81*QZ@^8YNujQRTHK}7?ESNHm|6#5Iatu1^H=5h+7#1)pfLMqd9C#@epFE zs^G;Z^zUeo;zb#0x%rAjP}tjvqwl3sC-plvWp6{*$HBO*%pLb=sclS6nXWjDF?o#64bxFY^$0(0RnTCzgS0-`!xDsLP$ZuEN0OfO4xt=Z~PzG+~dt|q9SW*w@ zO`-e$lkDc+* z1uy&DgNT44{{`WHYxRzuDyO`%TN4q@JZ@%v^aVfL#NF!9qph^%u!-z^E4v`H7dddYoc zaUE?7XqQ!WnWg$2XX8t2F!udEnn(Ka67f*+ys$#ZHjROD%0r(JCxBIEGUOBU_2_Q8 zi>sf9l7M;4IOZybn8f0WP7!!V{L3syo1puOyUciA2ea#UilzQ%!K-Y&$W|RLA=Ux| z^SN|Zf4hxIK3rrG%Z#D$?GubnU}BX#3-q#Xg{F@0n6>RVZvH6+nhEocFLgQ788=Xb z#9eRG;V$0K2l<58e1H4P;D7K9c4+>=m^QgXpT>T#iH1uPMzLi)$cLJmiyGxllea`NcA#olmnR8SIbN8~vp1~U6 zer^fMuc&g8n33#DS{^j5asugnb+8+)Do9qoki`y4h8EWVsMniH{M^fU^l%%FAy%7w zkq*}xr^}tVZov6yd_{GewGb>n4d!v-5I2l^NuM5r`p20VG0PWI)t7^K%t4TrRk7+F zUUY7+g51PC{EY+JoU`N+cpN1ThV2^uZqXrLe~1A}W{nivcHV>S>q4ksdxUl&QYibT z&OJ};2ED%5z;06}vm<|`ZR#=AxJI(vokEWhyl0(~xYua1ydyC3pr(!Kf6JKRiN zAQ#)qSRH=`t@Wv|%wD1S^vjr4z6@3W{D2G_ndg7sKr&szB=0tYZBrFmk6#bX!*qlO z9eTf{|> z?$-;DU(bQulwG|07$w&C>jm#;9Z>#Q3zmrc3jdu}<@841fQ~;;qSh$N%bM)Mqa1b4 ze(u8|CYqeaMk4`3XTrd5ebG06C0LzB9Q$u1+NI3I0=0D*Xz^J#!@3<}re1@68Eu&M zcmdt*9r@}Z`-y={517dP_&GKPxBsOt=pS&wmU%nSOaC`GzUj+Vw;Q1Rdno!AePRiJ zEBLv-XP`1o#2G6pnQvDJMz1j8b25*}Jc=Ge&)Y6g`rc;7R~z`k{e1*wz-0D(Njl~P zYYIueiD<2DLVlxCFus2p<-gux;D2*?TWLEgt4sNWRoZAXc|N46`GMm5NRVhIpj(tC z1kcnFR3*1Dm3X5UOX_f7?se?$$;U*eEaLvDPf8plg^)weGz)zqYuVohR+lR2d3OPJ zR^6ksz(JWPVxBB*>2*|O)UZwFMuNM?Rc&dTWo@xvy&8dUuZcC2$JA9zXLj18`Kf&F9AJh++ zjmA|n-su8mYbvJjd&*V0kUl}!e=?6fpPON!Uoq%?E5&W`N0@*6SyXI#!Lt5IfnJu2 z@zXAWD7z4o{!T>kf*zDKx#3u=BM|m{ET)$0qr$o$jGuT5o%5E0=sz)wI7R1`E?-ul zMoxmOVq?%T~(uxU`mB zFmz1B(DharIQgl}t;`4#?nd!02iwW*G#xS79lTD&Lg9<;5Oa%ih2E9)Z-}wv_DGml zUqFBAA@!{ZQ*_T@(Q99eS7kP0NqHQ&PmBf&50W{hMo})!2laiLp#N~1Im_lb6fVhw@mP$S5z%mP`vp`4&X-9a zL_+Ks3HVeK525@v`9CT^-TVZo2Rot_?T}3`eL_n$1( z7V>-2KE?nrkaD{_V_tym()q*`RKodLeTAGix|oXP%=AnIWQ924_7UIFX#X>)Z<2zw zQ3G1~p3eJnjP6Q5J-~Y;u)v-IzNl{vdgRf}=|UGAwLU^_*&^QV+yV%b?PO!0OJSI7 z7baM}1NGC3AmgeUR|4yB`KLZy^v5Oq>cP4~RAd=$pl3*d(+^(#KEg5^avZF=fZHyW z@Dq2u1IeH!X45_tOt&84b5h4LnKPXug0r!C!bND3TB6ARGw-yp59pDfP-ntC&_5Y~ zfyX$06DDDxTO;qSVb0tqB%#_lO`$dSCrZ4W*z=36=yf3yPR-Zi0=lD7I@<;E2mQoi zb8SIXy%v;4b14hfMl6znytkJD_DHEIfMnTgs1@AMf0X$bP!^`7~AkJU~UZh=y-pTzi zEMPiDl$67NdsVm=eilfF%)#_1H6g*El3?pb!W&S-{!#&|*B!#t{ON4PxFmYr-t%(V zcj8<-^Oo_ipvhK_-seBC@~!r${(b^lwVfajy(Q$T{1!`{)`DcauQ*`#X$U^cQ_q2V zR7*O=UMpnOg{qg8^jrhUO3J@@>VT=U6G~bN9Khf>lvjL4dPQZv)g!4#cAwq%zK52h zI{56I7_8V-jfd|2#97z{b$>QvwPX+|4)tY0ZcEWLDuG``9;8C`P|T)TO?l@>a9i~X zj+Q@$i@#zpHRuf_YxrPp+*^n;cm#XQ4TZ?lhak2{U+@^L0^xhsqJQB@h-!I^%F!yA zBO1-$SgOXIaJfpoIO_9^cIB;0qp-4Gk2A8U1f5a)!N&O`>eST0t85+N|2c=bA6gw! zoxKnqEx{7^>wMFd{mlQb05Dxu#D?k3f^Z#w_{&R=^BbNE8UGq`%JHjV`{ExUJ$(;~ z=sf$8Jag_jo3Z?iCWL9M<=glCK)uVSF?aVKep%)%6p3HspwFG?-nJRlmkokdvoq1X z${!@-`a{G=2_}|b#h%qSv3k`t`dx2BkUf1Y%b%l=(a^VZ;-0{AM_Pi}3 z;ffyZPW?3n+jL^WETX=!ohz@HeMVdu??n7nW3T{K!P@5!+}dp@-2JA5@x#fZwk{Xb zhS2_H{0FvsZ8$pXhd}hLpScli2Nc{W$IFYVF*R#HhF{SZ62cB)VB_>W^Zoh|G$Rdb z`46B>I?J2v2*zdoPowROEs&g9iMuUNgCh1dUs99}+w@h1f_pABFN=c{*;<0gtydP9 zrHAF$6EL}ok^gZbMh+VZ?lviqBb*VJhQCM4{Cm7&eV*9Zz8;j^JgDEfk$7Y@1JLyY zZ`EPo*6j&v=cscnoGQ30(g7Q?pyTav2yqz%-r?){mf<==jr~$oUGNfv^v~m=Zwicl zlT4o9n}Cr9V3|p-#m!Gp`d<^9m;M7Jk>gne*9n$uzM}E{$+%L#9~U@H#?RW4M!U}Q zcrsdr+qdvKwjMYG(-v!Tj+Afn+Vv4#Z5Cic>UU7p))ETkwRlh^5_aCbhNeR=LQt(M z&am#s*bi*GAr=o_ak$F0#U2a;EK475Z(|<3bl( zu*S1WNZTC@349B6r*6Yj=Xj_qNJoVMGJhcfeO}WMs%R*$9-+dm=-rEU-ow$unf7bX z4>Bve<9M`zGE$$z!M~1Xt0zpz@5~b;E(_Gx20}^lCdSCU)$^?ogM4=3%|i`P-Y7?( zhxgIMfpUQDUc683S5%yt23GE8u<30ZwCDLjk#G%st44xs)-8y-dXHvxi*ay&11?Z* zp5O46FIsh@1(^joTZ-)SWpMF2#Vx>0c zmZOExe+xK;52QVF0w|XkL*g|xLE^B8FWC8&ZTsc{?=KS%^~JxiZxQ8HjxS(wx^$0S z)5>&~oPdSSJzzX$KibS&2X*BB$O={gw-N6^Swek*&LnJCI|3cT7j)U9&LxH!2-Pdf zAj_r`TKj&bd`LBPOt}uJXPsHO>lm;u(c|{q)a6Qctl)iesT;n;0_~<%u)BKnPp^8EhV}fs%V>*uNqad=H=Fv3dw9|E`3HJ(6SnobV?aP5*6Y_|a9tkUA6yC|Zy#Q|$qb_(ER%Wg?%;nXADjI0 za8V$6NZ-WfW)K@e@$sIxBJ&=U&8Iw3LyK(QA_2mZ9au;tF~mNuXP=3Iz5DZLoVkgZ zOjgguI=x@8@pcC8v~NJ`1RdyEqYv9|JYl7W>AU0a(dd3El#izzVDjW!U>7}^x4e)b zw!Ca7+n%S(4Kn+Jmg~pMA}$x9YepTuJE=H|{f6T2MuPo!+CBWZ zi;Y9c3DseSwwq_7Yf%seuHL}g|9s6>ETo^s`KQc$>|v1iT>+~io3M4_Gjw`l1)Y;7 zW6)laP$F0}d1MVQ{+G_P%NJ8F^f7v$@j&x`uELUHEv}Ph8?#Q7qrKN!e)F0a2%*}X z>)AoXn~sABv$ZhnukUy;KO9|7@u1w=N66YY0h-FPSgg4{F$Hx{f0sTtv0e#QKGf0G z))1bRiv%xonk78xg(Q#5ka#EuEmJc1gZ+;}*Sl3{m(-89NnQsL$?epGkEYKf`O8in zp)B%WtajCD5Q&||`k8;i!LMmpy5ulei1HxU#!#46tikm!{sDDYr4T#y1tu!HVQkYe zY#QbO<=H3khN}^`>~}VJxhbLK%voOXF?W5JFbrnzT7@$C~&f&bX6C_D256LejRYJOF*(s3aJy`Gq)=>n6@<&#lt(P*K`Pddo!8w zoppTEt~lOx?k{Y7egeEbC~rQsuV7q-eB`452=Dt4r4~;x=lS1xqMI%lBO?xhfi2j7 z+|2G;9pTlRttcC84!d_n62EmaR{SZ&qWuDVJwwdnpZ!6(<^^xB>|oB;&5+Y`P`q^( zWn*=X1VwNPa~8kD5Pv67y_X0(wI1gJC6tWKLa&rO{G--@Yd4#L<&>w`IqxIZY*OX6O@GSY zfBpgP79ZjRC<8avw;GHCkFwnhub}jh047&=;@ib~T+2gWlwZ!~ZS}~DGs2mAN;(cr zCQ7uTXX~YKH7>0&43jg9Q00*>zi-=7biJa6t7LbA{b5w6&e`+@ z#UPLCL#`!H-s^^jP|~pjcTS`o>f|{f9#?>=KcDcf=Kn&qDFe^9-e6fY7s}%i)VrKe zTI~wuNljpBF+d+833kTx-hZOTxkqR7X{Q6R zV)Wgj46pz1j&w$Bl%-!^Y^=u@5A6 zBItU$2X_Cj3uZ-s!%WJ>Db}Q6kJc;9`nVL0xqrYnGy$viUSjjCBgFNRfaQUs;1&E0 zR-NcXMQ|GXZ>EaSseXucCHkWCh40YZkpY48bHx#-52MzW1`G^d%)B2)^D~Ispe`B& z`|t}W$EdJTe!nnEH5ZOm=yIR?M1%KJOEzOp3vQcm2y<)HILnWV*_YmrxJed{{Y#1E z6|{h*EvL^!MFj8Ilm-bql2N~`0gIo0g91Y(MA;FW&~qWC7X8O0{7?R9Ogko9eSz@T zXURLh3WKBX;lxISkfnxj(Lj#Q8pMv8Yslp2|KBG6f}@rG~T{w z>beGE_hyh+q6idN%qEzMgd8s?=uS_A@TTkV0dc5NiE9`#I`&cp;*OP~H2Mcq&ALLpcV}3*={OV&p9G4d|1sm= zLm|ibBy$oOL&yj@BtM;jy?iI=(Y^*PiSO8em_I)jbFyEE&2F?q;-K?r`Ineql_`Z8 z^j$Ulp|;@O9)l5!U$LjQI$ZnE{&0UlAFlqn5BhDSE@X)g@6)mliZ)kaS|2(i+}py( zZP*2^H(!GBSAi%Z zM6BXE41DLm!t|}z&?ICNL~(UcyYU6N?DPfY%{Ro_>xDsEKVi$AXXvw!a!?wvur0fQ zeC_|B?ZJ)UAtp}VU*~wWOaspP(=$l*z5y+~KEze+K;xpn;b*Isp!icEmYkR%8~ExO z#*Wd2{0rCM>uO!D&D%(D`)4i0d49v{?c*^0-rfJB=*;74Oxrl#(JGa4B$6e`(vYR0 zI`{QZ$a01(Axj98B^*plG)+npNtPp}lE|?n(InNmuO}smBui2_5=kVHB$B+>`@hdj zpE~Dx?)$oazwZ~77iO{bD`PQoV;F{%Ph$akcR(XkIS0;u2knj)kR0|1EW5lT?kBxR z{#+olHX4Z`I~>?_W6I*SM)Agf=0kOtome}u33v7DCYCIFj`FxP*fEvzymxM5@DD9a zsU<%4yfc`f%)~RbrlQ-YV6+|Do!hScM87xUuGz<{6XJ`x(|O{P-wgxHo6o>%gr3m6 z{S5k1@5M1~E(S*#iD~BDfpw)>*_~jndAvHuo4AxZb-F^VYY_I|ED_XuCBoC5k8#|; zJ0Y#FC5F8H3tyhQf?Ic`;Y-IiAiYI9=;;Spaeg(oBW{)?wOJM|I|lNq^;|W|0kS4N z;$@58vBn-6v>n^T7A;MI$%BnWaG4AN8AmaE>|M}u2u25wjS$7V3X8{g67~BU3%YI> zA!(it3@b=O&8q2q*b;gdR}6rx<%iTM)H8aSJ_F2qods#73Cr>@V~X&5P-@hIzH63& zZtqpZL6jgGt^=#)!)*NvGtsMe7j8aHiL-tlh0?|j*!txZ=)T^Hn&-`|Z`3bzFz64y zlXpP#sEcS{a}K@M-{Pvdf#?x)mYXaGw9ENQ{j%Ap@3?6ZqDx zn>f+p6Uwi9%4YM+P`b|n+V4@m`!(J1?+_1ZcN|j(CVXrsGG`_Gum(u z?anRk?1rop*)rGOHSFn}6A=6I4L02WhVbEEm{MUXcogag+T**5R$r5Fz?Lq8S0iQ7 z=3F2L{e9{`GU8I<7VZo|=EfPt&RQY`&2-?0SQm4=pI&t1WKR z{R<4gsCneP=ODFA4l0%0#+UT1{%Aa>Xu`M;^4veP_3y!sc8T#&%FaJcOOA+QXF{ByntR5gtXh} zL_23yR!sge+%}*3nqLQl<60{|d&^rG8>1!EytzS+t|Q!ag#ccQr!n{F6ZAJ83gdil zf`8I}c3_a1SnpFwGunaZs=Wp64Ej!%*G- zD%KA(#>LAG#FiC5(Q-sJxEqeg-*0LO-)?qr{e;J8xrdm3pXy-sw|J0CBY8%99Y}i` zKx-WL$H{@23Wa?Op(;#8nTJ&M!J9@x$QlGPSpQR+j{9?A}@M zxpx3#1`NWO>17aO?gCb4qcBOY7b<(+V6}@|So3va@CAo)*Fhh7j3OS~uJ;B{ngwd! zj+ZU}^C>2%DVu*~mn`{QIW!+}!2Sz#po$z2#Se`|`Gg;!bShDM8Ej<2A~R8b@?XrJ zxk=sFV=Q{Q3`dP&yDU0B44RYNu(rYyCF2{_c_t_Mn?vNv8FM>lLJoOC_V4Gm6D(Qw z+78*vt3F_xFcC_v{(yw)yIj^111-NJrFA@7e$S9d7xq zgdCz{Sjqf&@VnpymP&dzZSIeLW)YZs;6A2!8{wg3Vp+!ifMV*iN#w{>S5|PxL{sot zm(WE&LdeZDHq*B0;yeDA6D4Wz|2WitvV4Fof^)-RFCP%PLgLc%|Ur?th zbp%7L6Ri1XFj`wKLjTSAyf)vH&eCVH6XxlIiS{kfJRaig^;cKX=RXsnVg7MA*|Zi8 z?RyC6mMffaR+iFz}QH4+#}e_vQvR|8W3U29gu}&_1*( zC%53>DqQ|F4|hl;!l%Gp6RvZ9nU^qX!P=$`N4Axv~4X&PJaMBx@U`pC{oHB@VZ6hDUB04i=$F`}<(>jRx`Gy&~ zHei;6jXJxkMy-)OR@?UOBAcD?l>QEnoy*o-pS5;Ml`F+$4Psi24@=39goYKNbJ zO1~g*8yJROBPXGzd>>D8FcQZ3>_)?zjB*YPGf#d(uhHB1U+$fRt-48Q8TJazowUTj zm8-zoZ5_G~uY$O`ALua4P+XK62a119F}vd#=&uU~tr&V|k6*^4)ik?pF@;UPl#pMw z2wbaQvB##`!nvxSXqu-Z7EdSkSkGuw`0oL$;0Z9UZ!|{BK9jHa3tDv3#qzw9U}lcs zm$VQgzh$G-e_BFzuML<#^*(5V%D8G_2qyPVr|0`@jOjic@;9u4%=?ByA*0^n~_r=*?Sv}QcP`o!B<+tZh_$Uk}5eS2(gyM#SV zbwu5mt}uPrK2+p9lW8R;GWGH&Y?I!Ss%{P%hMx|Brg0j;2EC@c~f0!yVea-a_l-U_NfgerznE{j@3>@X2ocwT;dP z{k(8Uzz>*B_xizmV)5586ES3$j5*wHMjJ5#Vk|>&n@U#*EqI3}Pe!0})2SR=!x5Oj z|0$XTnh280i?V`n+I6Ot$_&3gc;tgOj>dup~wsaI+a43-C3xkEb!{gbNF)eH>@~g2}+uy2Tn6Ur{|_Z z!vPg&_iBJ05eA}vueta%M^{v&ZNu8_XF?tw(fY(#$yQ13#yto($7B(B3!oJcReS4#E&EQQp5A6T^3bB4Ho( zvGTd=?4eArajZh^I&9Fj=_4nX#bE31$DJ>RzC&Gf8*y?YYzZ?QTMP~oGmtr+!phUMFY$b!j@s--IVy4;C9NP2{0Y9``&Vc< zP8=3LVmFjpg6|dTz3jUQQyM#qS-Md2cMvzoRV<-`Pn}zj;pVG%J)YS%HJ-8J?V4je+AODEYRDJKggl z51%KmxsibWHy1$OoG7N)+K0<6!!d(cf|{-wGR;4<6D&JKtic7m=i28`twX-aZbq`k zVOROQ@(xTlr=IDq6R3%w!AuL37&YT8;L}vxF{qO`t`GTW#WR>@I24^rjoS`Fqx=*+#uI$OzYg&)qd z!c7F5wa20G2l>rr-2soTg{T~!2sQE)ytUmxob|~-G%@Kc2=x6o%s&7&2XqBT>M{>J zrY-8~{(zNJ_R}o$9C|=Fy5+vdn$`%Mw^8fFTs=PBe2}$JG8wl!urt9P~fjC^xd-uAs`8Ty2pWL z?jqLo*E3A{#{{o#H4xV?zJ+0qLs4swF84Jv6P7P6gSXTpEVZqKZ9%_ywCouUx&Hx+ z&70Wh-#ZCfy{w4qPTb<7(@^ntBJ&M&f=}BhuXFAv_sM^V$twzJXEhZar*so09*QGo z5#60?>2LR8DHBI&iS9ps!Gx1~Vs!p_lg5yL6S3vdt`*u zUj087>=VJ1IT76LMmqX7U4jVmHq#>?w4-aF?=Vxr@;3EwOupgSk4fY}%7oxQ2J%Gm zqD1tNV9$B$AZ(*ECizen*)37s-hBajOU(qs(psqUxQlbon+XN58_LFS0mF0G!D;Ld zF0G%B%E}R#HGTxM|9lF%xKRHoF#;p@m2^3{043p5b&?g!n{CYuLR6Pzqgn0YvcWh=`TMHoH>MQr3I2ZJHg@fZ3TObKH6mQzW1N?qL?&qiQ&3h$g_c0JG92t1@ z7y-We`l9SQ-QzEHX9;@_gIBjO)U5R8SGbAWp?`z1qoLsYPdAiL{ld@QJ`1syHR#Y9LuaB3tg);FBF3GCs1}C0=8NHE>P@II ziiKZk+M+V7irZ!N#1v&O2)W?KmA1WD?vJjbZ~S?14Bm&)xnTfTjuJCQix{&?I%jAJ zVL#+hx@!{J-RJ|uDz!x2>M8Jwa?w(^so*;QC-+mB6077FWL-VQH2V|Op4rAiOjabH6 z@uX!PBs8waMu%S*^zJQ2W|rV!r$lHu_#M2yyyD6^zhv@hrA%pafX-klSTfQ~Fb@xd z*89XgJ0PH^xtjP(#M_*IgQX=9*Q4qpr0I3VgD=nF;4xRRc;-tMH>M8SO)4>zysZ)^ z(b?;?3TrQCFs17W-T>8T9kmvr{s=1KcRCg3JcgzjEvN~u1;yQgYTMC|)t0dwqGlC? zTj^wQPz|N-?qjuFN6M9EBe|94HrkrZ1qHvOj-2@oJIZOdpqI)l&K#$i-QT?RbQri) znvmZ~n@x{sLC1?Tm}YJh1eQBvwQVOTTy+o^%=`iBcX~om<3TLAeHF{v09&3GqPL#D z7<^|Jckl5#xJ|2vTZSKym*@+{FSm35Zp9Fja2qx+s>Tl5ak?)ILWeq}J@yK^#dQ%X zLza_g?oS++lZ-|y$%(Ou?$`F;S$oZB+P}o0u6!0`dei4LYFv)DOLy@0m4ZpXG~(a| z<5JVFs4mkJbhXYvm~90Z-quE(>I+^8-Wb^54HH_I!}ydB3;PFEO%m=x)7w=uSN8 zN}a$V$A~4dooRZfW2W9+P<`6Z`mRmHrtv=^`E?me6o1P!rj6*ax*ChOzF~>vsZG3H zL0+Ii#IkVW))Rc-T%)ciJ#-S*@A`s1c85`E_#CnZ?80J;Dd0KP9P1zD;@M6W*eLIV z@TL?`dT&5|;?y@TxJvFMPYk*L6|-E@xO`Uuujq9ST;uLC_o6JU+jbso9_or-p{d+w z(lW55yFe5(KpR_$Q0OzC*zW#0Vd*0wb<1hoZ>%k9RZ4S0+^#^uh&I$*b;#MhK_d7i z6Py1*0nVtREUWim?qv1`Iyx7k?~U!~HJLaO%1B7_c7)cN`KX&w1g(+8sv7wpM*b{8 z$F3E8@OLvIA@mhSQVu?9>@&3LrRE`%zTmb2?|DS$iJ;!p3Xb2>c!X^WTVzlPr+#)7 z{bikD$xw-qG3ztBHqXG9QP07u=M%J@@`%`^vKKn?XhyjEcQthrd0 zo6S52l6xusQ_j+d`eI@IEATGqjw8trrT+aEF1>pjv;DjwIrtLw2A}eh+ce|YYK7s% zgK6z{gZthjR_U=3@VZoAOnEVeI$B$}`Iu-_PN|oDqHJr!oO@uAt_Nw=(mN08yGUa;bpkeo4aL~(8P;E*EMPZ8?#yX(&q#Iar z{SGWz5ls$?`v8S{qGH_Ns2HLxIIh17X}Z;1etkwx`;*sT+u1&6UcdWv=V*mgw@3if zNUT+uidibRocOMWg4?tr$^-P_We=J#t&a;fKfg-n@jX~}LR;`s?qyjg&6%#o8d~?o zF|UQknbdFu1vBjcj~s_R^xwZs?uMRMsYkq@e)a|7#L_esjOh2P>~|W|7E-U(t|w?K zqG6ogd&uf$hbg~b2J`!rVN^W^e!Z)3Crei}$6a7G`5`lTlmaOO4`I>_D~OtL3Kh?8 zIhmCcEA#jJ;8FA#6LfyD;J7%ZyvLbRTgolWW~0SO5f9RisD(V3L5o8nt9GaJE&d&> zy|+Tv>hr8-D9sHkUZcixFq*7(LHBVQs9m=S5_;ZelD_^tx-<1P=H>8Pr>OTdAeCw7 zyaB5@yZOpy%0(UYgq4AG=ZdTWX-j98vVA|w{x}ZV>OaxDnr7H_am;>X8q5kig;wTm zSXs3ZcfK|hw+yE{?RIliH)W#g<_L&dv>Iw_=b=4yXXSH?)!&AE=ZanFpgBV3$!TD{^2!RY3qSQ9PM9!4}{F4dqH{o9hax+ zg4geI)@pd1ci3Hr5!*~e^PpsmS$huE`l$ubo7qi2Z}*g)o~}XF!s_8 zEXeLGyd0kimx7H1)U82_E{3Al4JA_sPRTLbb{*TFw_z!9_xqnZi+OLXP<=fD*7J{O z;^2cFrggBfL?U`E3tqZ~mw)iTP(4HaWs5`9unhPo44}j*$EEY1S znOog$##DV1(Qu`g5@@9Ht{?>;KI)dgeLB{~Y1gupLr2 z=0HXhxzf}U{7HMU{<)^2R`zbNig*jE8M!=6dl?KG&{=4{Z;TqGvvbi%Y`u*vWA#3q z-MN7BxUCqm^(RXn`v&a0{$dA5=n5fHcMRTW1O<1RFlt%_6m`0WaSb_Wll6}B099<3 zi>Z*_717(*0RObn6I_RmMjy(k77xA7j9!04|L~*SrprzA`A3^P?BtYElh>nO2OV!5 z$Nc;T_HFHISgx+eXh!!NUlB6nPk`Hp^|bpK!B<{*N{p8pmfiFjbeDOM3rB&v$}2GP z5YaEDyx|jfX^AW6U&1wy4FvPZyAZVWG1$JD$`#@#*@F{RU>S7+W39iV)pn( zBuw0O0JknUjq3-#g5Vwl;MEa{U^t_YIZgY=> zofxo>c!Vc?VPY_`95eKUafg#o{;Dt2jCEwMn3-TLOM`q=p!DZ>7P4R+#QpmUUh2>+ z?u9Nq?U4l9Ef2BmRy|r=e~js&BC514xYe~v?mojAJVu-Vzp5*c#Jc0vKKi0{(+2$a zxv7{kZa+q}=JABh!Kex9qPD#h3;DC&;PTi}5Wn0|R8fCl-l>r|1*SsrznfU{?ysdJ4&StXwMq*GbE>PEAcBHfLezkpD61S~vmZ_KA zpfAT=-wF}Gk2BKkAA1JavgN8Bj z-0Nur)5s>0%OssTd}&}%vHE1ccL3Ov1ZGBb)qmEJ@T2@G0kT&ca z%3%O;LR;4zVO9r?1e?p3AhoEoXt!Yp)N34|f1@te=W9cg^etjZ1A45x!ONERf?>op zb)7d7x3M;q-yXu`*-0{a?k!gJEDUQ-x1-YT2k)2GNidXai8U7~%i%!mj=pNt@`&P& zT*+x+iN#5+9LIb@qlBZN=rxE{Rm4GUw+qb6q$fx(PemWyZ}_6Lv(Px(Oc>UaJd(cT ziSs#zVLga>d-|J`;pAe>T6aulJ@*S}#w=tiCj)Gkdtpf)&6xhC+)cz9(E4`>)~q>- z%B0EanAM#{+%Xv>zn;jXCQrbl&reicnFfB#mSDl+1E?7EL2Yq6AFLifVT(STCohvJ zTQr06a+iop5V-=!JJDV@uLrIt9>22fluSaMIr+b9m{aKsP#VOrV*f!b_0f9}bM*z? zFS_C?t}Pah{)~llFQM(^4yK%_0*_ZK(Ehxhn0v1bRND`8|7e;Qnir{UwBJ+y(qGn5 z5`%9~bQ1Rw&qI=wrv@re;Ff%@e$S)_(AMdW+@zEF-U6 zqB>a~hQ4EHN8~UU6aU-=p2fuf>DUVLby39Ld;oIeuFUO@lKgR}p>9zEWL4Zyo4h-U zzH!+w?ocPu*Qgw|^>TUg;uk>0=~hsLHTR9?LDR*+VOFvBRljP&Ky=O#4QG)#|U@YKb?5 zEu8|*zsVru+c~VK&#U2`>#QQ?2{alzqv4EvmNxS$Hrz`AJogtg4ax*Xvbii_NCK({ zd?{oS;w=otfJA(Qkya>A9LCGEtHQmena{6Jk~Ie<{)Lk zkafvHmN=KXLBobJ>%3;_LzZBcRUX>w=c8tKnoNFiD3c$yl+|9@2+A2VS!nr3Oo;g% znmmeNcf7WkRdI~V6GCM2v`y&mOxZi_QEKhraBM$Y1F9uvQ0y{~YqlrxZ?ASh>$s61 zsc4fW*|2|ggos4q{H+g!F;DAZ%#==o{$~RrzuS0- z=v9nfhpNHzwmo9|XUtav_y<2Ek7qcu-|-Yjjx`giRwSd3P+x=ltxQDagiEr`O^@+G9?i}76DRR>QI3li?W&jD%gJiojbRp} zFl&JUQ>o`Lo2UY?)rurP)o4_l_dy@acRbBz4}V(s0(}bU9u>*qY<4_^9rlNmxXDm7 zbU$W28;*u=2ZQGJ5Io!G9Aq62mSr89#f2LNg6+F8>IgcwW?$RR>}D*WdFLB8Jz7hU zZoiEks~bSm%Y=Idj6zN2T##>g4w9*_a?Icw`d{k-K7U1E;obnSv}gpmLneRVmk&Wb z^6}+gz8IKZ3Ldeota+vdY)heCi1Vr(ljm}pz1&eNw3j-6o8Q9c4bVWN-Wy05GZur2 z($Jqci?u#cFpmGirCJ}r=u`$~jZ9TH4p{{aZi(psMxNwr9LrOU&ojG zKZ9M~8XWcaU07dx7;DGIA^uf__LGYFXnN02|8)y&|9t}Yk4c0fg+}6wD|$lN&}8t^ zOavd}b>K2Ik*}X~5v)diK&@5X5hkAm*I`qkX3twxzGzWL21&%SK_-HO?ifg$cn^(? zKZ4a5SJ*$io3OI_IA~G^P~Wcw>w@Wf_yRF%n-54Eoml8=5q-A5V6CGT;<@IZkY>LX z4n2DgOOu}9;_9oL0{d>cjjfldOVH#`{?0U(^*U4*m9d8M zja4wE_zL89D*^My&+u|iIqK(BqWohVDjgrI+jSL~f9DC$>bg*-HSn~$=Uff0l!pM6 zAHy-)so-_$60_aDmB$D>(YIF*a%E9|+cFoI#{5EewhgN~8bEiaAr_7p1})k(sC-Ow zh0BaOn_UIJwruJi&clhlPhbjl;bKf^Ui;)1LEy*!?H8yhwq@zy4;Mk7a?&6l2jiT~9na zF#(JY1fxNKk=W#00&&|~QPJf+4(@&rR1fs{-#-n-_UBun_04Xm@puQ2*BA3{9)YbF zWAT&cY1FzwEF9@FNL@?w)!KS6@jecL#r?s3lncmjo&}RB1z5O?Tz7cFxo^w|+;>f1 zP&~Isw+H5sKc|HIylP=5dUX^3$?Ga)zxoKu_{*|n>pl2^Trv)&Mne8ecbGumts18b zU{Mtg300Ko{dg22mexa=RSB2szvcygrMQ)zO=W$)AOVU5tA9j%F#SE|r)!{itsC>t zbcR5!31I(u7C2HzIBH86X5P2~%8;|_nA4Xrvx}ineCH6eeoy=Arbp;FONT|R`hVwU zJ62G?kP3IGv6HJAoktoBE3x>B70Z|r4|RR_f^BSHa9p3p{|GY`4sE##8J&y3@Zf9q zv)D|q{Gugj%89w)(N&Q9|IKSnyl{=Dp%~fqGAf5f@^Qb4Q1^~C_%@b8OzS07y@+KC zsArwMlb+Z6Z#eJgxx?O9*s3)b zq}oqeo4Kyo_~kv%A2Jix-!>5J$`UYh#ASRP&_z&%USw%8yRc&G4II4u9uC$k0GEXa zF=9qKepA*mX;K?gEeznbe-6X)wS~}H9t^E($3cF33A*+w=V^9&!XeW~7i)WLKca zb0fig`4gNM`Wzioe`6jS9y6m0`=RxqJGie%huS~igL3v;uH1QrMO>W&?imPK+*BO3 zxdi?`U?irvQP$_l61Du$19lE-;M1VfuuW$PMjtX0#*9y*{&5K9Om}06S0#i6Ed?)2 zAM_aW3R^mpb0GEs#JA~)V|-%KLOK+!wTne$Q)LU zZm!kDrc$x`UmjRxTLnHLn?WiK;PU6oU=g{yTmQX^joO`gTXqLNXvl)M)zy$jtdF$z zjS#u#7uK#>j3;0CftBw&p0%YHHkwCp)j!eP;-DS+%Xe`rVqMCYKIF~Abm44210iAF zT{cW=BsksH60?qvaCVneNc-P_3BHr0ZpXe^62GZE#ke?ZXsIF!T{$RhkNgHh*` z7`3abAnh}f%}e+KZH0R=O7a8HY6r^u>c|50KB3dqm#nqvIN1B`hsX`NAXzm>-5Au$ zoE-jSX)C{j=jeIF=z9*utJK{8;0#DSe-$oX>?(TcTJwy@A-Lz?dQ|U@#JWxN|DSxG zr%m?9Hdz78o7@0-RYvGy70)tTQ=oXp4Gi}@fQ^3~h2kDdvGSsZvIqHyl>nBXV$nzQ zohQ!u4_iN;WVL?e8XPwqr2ROw#h=CH=3e0Gas{H=OVJ|hE(BzC6>Dtr=-s*%6aIb6 zQ(6R+=Ftr0$2nQe{1{L|UZ^_k-f;S`{st>c zV!*-W9mqcyg6A)+>S?LW?J6{1>kttLYf%Af`=OD`CFgPxH%A`Flz-(t8^p%}L_qRzP?bQVRj}HZl z(jMr(?HBkaR6$>#&)72O8MxMrK*x=p*_x(TSbUhy$w`N0DybiL|6Goh!y3`n!IZgP zxywHeF%_LW8lX^q8Eu!GQ!6$UslTQFM2oJSMF&*}u2&~w^G0%qAK8M%j?cJat_i>vC{X6*)Q2b{=J*FBSR(J-F+L?%l6c=#oqGSl(*N0gy|ni@TFRPze}kZ#7r=`0IW@a8;m1WIA^F7#Tw~IT(&SGpHlCOO z+q2N?(L?<7TvyNzI!-%~aVQNu$EPgHM)~F|92V(f$S)CxEh&WdOU2+hXegGyxrX|G zor1wq>U$)PXI0wN=~^}zYnLWKz4lpbr!)4jt{PN3Rng9BJu{(mUeek=AfJ9mX8z|- zm{NfI z_kisOnzxg4P!YG48Ek8Z&2i089K8!P^KPi+*PCQh)*Zzia_T!AIYLg01n?{xj%p8@ z*R7hz&AWGi*9{eBD>kx_Oqq zNYHCy`6mO>Yo-RJ#7cN6Sr5vvt|H{n+^-QaVP*~b$giQ}w2|~Px3YlGcR}L+RqYz~ zm`VDivc^7+JcGJKK}b%;)e^DiY(Lsb^oHbrZb0F%U&OJWGiOxMEDOI%CHm zRG8W@`G~D7VdY1@EU=T1wP_I6=Y?X#*|}&1rcgS_4e}dbGn18dfQ}2mKI0R|A>AN7 z? zcVO7Ye$xW-*(HZQPLysdYK%=-HQho%KQ7xE3;9ld$K}HR$+g7xO>DprwlV zFLB;DFvUpFZ#@o%y<8BworC5r8{pg49e9;Goi?tNq5D~n%N@sIW$yq??0E|*3<wIMj`e{>XlQZ|LLnKK-}(t}!(Kv&CV(rWmN9SBSV%vU4TkSDSo=^9%e1qZ zYWEOUW;zXP2iAk)pEaml)8wq0Z^ZqRqrfDp3L4ulffui0@|r<%BFIZ(r2MHX&7K> zHD$!>u$gNKWpWqd3|oNVk~Ctj+$X1TKc;NA;q5-%(6Z|u&>nISgGOG0pp<>IC$3~+ zPRpVCNi=n5CxO-70g$`!7h0KE;H{pYp~>$pxlVq8ht)&A|94%{DDncJNQ{?m@!9f& zvt?xltI_EDA#`6{NoS?ms1<&WO`!L#>zF+BTAj{av%d0zZ0Z)Cd`&y(625xhS+uwr zj9RDrpm|~}rUlWtcVDb5W+KNI^jyeO9AT~R3?cUDHuN2+!F_4EqRJx_H_<#g+*Vg; zoxTvV&l`)aPh(N;62~?FY-GvfO3{21aqhZ3OPtT1JAPtD zg&F0g2CJ=lx1b^-AC?YzNLi;uRyx@X+@w_)mVOJHkIsVR!CxWcc@1TZA7MuKK+FjK zjau9n;cgKhOt>gxxw?MonR;9>~0r)>Oo4J!|M zMzhy`yg%(&(|$NX!{qM}>(YVlcT7;xYnfV=W6C9(i|8mFfXnYWV^-G#YUPnUjOv*Q zQ5%fW-{2k_6WRuD=3}YrVkY{I-VDkkuA0Y0wgs6T@V&L3J4DxC)$hE@#b4c0dBLEe210Oxb}-wbkR3#CPz)df(r{cRRhi zc3y(C4NK5++((egZeZif4XAJ{VamzPAh}hL(;oE&4fEb{+o3C+qtA2|9DZ!Tz~`03 zz765FfBJGwnkDE`9-x>%0ObHXcsr8rbtwVpHCDmwLrukytux8RO$@c8dCYLzFQ%I2 z#?Ahu*})q_!DPratp79+oO(BdvfBZ4i zoW29UQHHK29K8nZg2vhikZif*>>Bu#%YUm!UCJ6vEI)*}?H#P2QV)@*%E120C9WA& z15W+lGO5`Uo|fJfY9~C$($2Y<73YGr+da`^S$C}Ka|K*yHnNh&PNIcQ7^;@sX3}C^ zn4DrPNRPYXTSIL@oga)=p|ubvB!Z%q9H0*zaLYVnLEEkrRiSTn| zXEFP8HK;c9U={bMbMT@B%CxRAOY`-!Z>tdB_r}I2+MuX%=3Yn7@sn-q zz_G}cyMI-KLqsKHo$-KvYxTw6r@IQi?eq*kJ_(xO8kF>T4l%|5LGF@REY-OIIAJKJ zPydRx$EX7KgU_X2_EU%l1?rqnh^kM)$_=7r2 ztGb}nK*3bs<$P4;SJZR>fB9P~sJtfO@DpZY_|skJbEy(e&Rarknz&ed%?Zmc0n)V%AT!l}{u&BXET<$p%ZDt-I2c+8Z`hTXPCw-pC zaKb#7Pr>3#w6mTTj|p2I!>gkv!sdy2kfnPJ+pDhPxYvb{GM{Ege$nh)6?s#PKd{x; zF5$}1y%?782AivNph(?WNO-Xt@ZM1z*KiZ_&YOVyv(s3sUJ9wQ`>5#C8IscX!2LMN za9rqz)xTSyK4gbHb5@AQnE$ji$ja72$?GRDd4jGOa$*$SoAYq}fd=^Fzs_RtG+VCB zoW=4VtiiN{2jL)feu9?YgGp!f#i{?j1MKRBJC+&9)R?p!7ozP{ibb=8Zd)t$oZq4;?{O@h~3p-wtl`*g%Y#N?y805m20b1foWM zgbNXd!YsA6xYe0sqzQ+XQ?2mnG4%jDDeS+=`xRZxk zl{lXpO)Egv=#5-*)|aQHorms&x#!3YOpmQ22ZhT|t-VY{}_-lelxOY_k_ zwp!xsh%%Jk+=^#6-9kU%Ivg0OC)lr`Jfitou%D`j9&e2?pm!8bt!x0taw}F_eF;7t zh=Sb~#C4x}1pU)1SV8bkP*!E5z3~^QFC0r}?t@_e`$edoT!*&3&T)@m6VSX(~@`c@Ej%j0N9c-S7f+)Z71}ox0gBu(+;*P1G+B9Z`mg4@$M! zr5b4ecntdPd4;hD{UE#JFCJj^41$QeX+0wl9VZQE8Pn##qJ8@i{;GkK&;B5n7xhqj z&BS_U2gdnE;(vEc#Y5g%;FI(_#Lfx=_q7rj^&kbM4hhh184iY_cBt65P8}3ajN3~u z@udEJ47-;D_Pyn(a2W_wqAuewo)7l*F4$gNb)dg$L)G$-Djp2D@CMe(VhJ z%h``GG#V7QcTvae5o@GApIg79P(8dCqz#OK^r|e-v>yg2Sq@dbDj==bW7c?VB7~)l zfe8mFf09bi-mM40Hhob}+0kp%jW5HfV_Tuw!XC2>tg+!H`3`TIinT9jjCcParW{%i zt=IJg`NII`Nc}J99cU!}h^6fQsg>x|VMYbWw9-U4e$V)=0*(mlQK))E70@p3Y1)5FU$Iky1|u2P(Qd9e70tB-OS7QA1A(D zb^zG#c*M75bQZABXq=vK9I67(!AX;k&^RR;YiI4iVy}NOYe24=q*fSm`yRyT3KJkf%yiBUTc%DM^^_R}MZ%b%Xw2wMv%?o-X*QI|@-A9qZ4VhXuBwNgCu(3^{}3=A zTZc9#)E8I%0VZYtLG8dnkV3@0h=w0v-krQ4PhMev-zb=EycK5#=_CdW@2FRLwF$? z3zmL|FlBZUR+$+IZTD)?zHkMYRL?@EG3(KN%{RP94v9vEHg_l@#>cj|koB*r;Ig!~eMEYIUk_i2Z*=?2G)B&?UNfQeeom~nGB2IuLs z<}g1@nyMo{4GV{c@6Tc7`jhD4-ya%vXs+Ufm1X2syO{_thvb37`5iFK=m>c* z+rj&JCx{y!iSa{-DSbU3tP5ywH`Y6+U~eo)=jjM_R+QTwSb(|Y`Rm!$JA_3bM3 zHNFpN9}UGi9S(v0y9kO$Hr!{~CG5$Tqy2U}a!G5@zVUwyorzye{TIh)+DnThOS1Hk zrC+jynD4nrvh*ZNk|mKOPZ$ykNg+#;k`$7X5g{d+ntN_qBtj&Kj1;mYLz0r@cm9FA z)Xcr#?>V2(`~3}Kb!YI|YiGi_!Q{P}+Fc0b`p|pGKv4Z}2}GDBptODzUp99nF1kc} zeTuomIe_3gLs;Xa&qdLS#M}BI#(R)MqjeAJ6ss_QOFpqNqA_MM^-AJ5)Be*KeU4vY zAyR$8=EnfMWp5;$inxXA4_86NiyA^aiooH|5A3WO1EGON*m2a5GZ$xp{ihlhdwecm zXlMgRFBu7)SptZ?y_wDI#rT1;mQMc`D25OJg%Q&Z@zS`PaD#ZsQuF(Y*uo?xeZisQ z!hbN))Bpn23)lhSDrE=3%OdaKzY7~S$&>> zjTnu+KO8`3r|ZNC=m%|Wx1e@YD{e9)#=Fx7iP)iv?pLAb3-ad{W70z{&Lz$XTy|2f z`gjE@-Tz@_-!!>d8cNJROFfiwH;}#mmq}l`@vWbJfLGKe?EGwoc*=x4kZoXmk~o7; zZ6K^}F$Uj%1}fbKwiWcaw2TBOJ$nb*J^r%D&CSrJeH!AOam3wKfeupFA~Pk?NcAB1aPfaUEvoV4f$w7q$Xt=7>vU;_Eo&6>ex zdOooNyYrUohLV@=DcWz^0iE~fLhdg=Nn-c{%pv!OO1p+{o+pQRSL&SJOH{~rO(&;! z5UcI=6TNpwqa7Oo@r||^Z$kbFyZe|jS_RMl(-Gc|O$HCSFQodaK;El}4Y2qF;(%#< z>VMf7Qgi~hKK=j~V#&99ow~F1j%=a*wBooD&BqOe@~=ABvL_Q2mYG<(uo41yj>4Se zuPC>6U``b(*7)i%dBc5p>v!Klx&8t;|N4skzlb@N;R$#!XAkNnuSbZa-s6W-mOi5y zc4*S?C8-rv2Sr%&{U>&I?gQ;nFYwBeC**{GM_#!bxc+?yG3<^gHf=uxANuA)#fdy@ z-&w~?Bfcw4o322{>e*-_p3J(BI*5Jl(7w9r40HMPKYV|Qp7Vc~W3Gv>qVp~7aTS-K zGdU9iZI7@5!$dS)U619@$AbB)g`oHI5WDk2TQK=F3A3X^aOuA%$tSxBYdkN4=k5LA zxFs5MA{4My`vBlB1*R@*z?QTS+ViPn>h0fP6;}oh#}4ALr^cN5$7fI?Uj*az)j6Bm zm7wVz4Cbom%u3RV;YrC3$MF!@4X4cGq-%K|#+po)9Ys8DD0e&p)YVq7GMZ?%v&kE_b`S3~U8QIo@(0DY$|ON=ssE_Z=7Lw$V!^Jx z&}V!XT3furo@2EHw>SlL0v{7+{Ty1xq)Vb!z6W{BIgo@rgr+_MI$I|}W%LUu*E|WD zm1&sxZvb9>-%W7&;|==bPm^P#1anXMfMxPwK1=lu*fvgo;+z6w$op1{OGlwrKs zO>i1+hvp8&lvT@v{5Mxk`v3f1yi)vuE%|00%@8y4ZF$$QbsbT^&P>u}3y z%Ku2bFkmckBIzt;(`y-gQ0}GY<~kPadjm^vr9t(UVdzoh%4;f?VoT9;FuXH@ynY{e zWqMv-nY;@kk=xTfO9ei) z4w&9($VpZo#-L@IoaegNw0A$wM=e>2^Q+4tc-T~Y@Z=dTw9*s0&Q!y}JT{82GR5{>UyB>!w(Guh*MS}K-9^CM0 zpCI@c-JkpZOPS+3iR6Nw5MLsoN6-JE=@COA<@$9rTojLmAupLwuO-MHOd&qW4shDv zp-?;|M*XzOu(?H7Py`l3MuHC5bzjV)zS@FSO%U2I=z^*0gFrgv9d9FQV3r@dSV6;i zw3+#VNo_MHRsYZx>SO+(-K!~BnOY0xCPVP?_;5_@e~}r=GeI7&DKS6q346T`fq7m$ zYnBwj>z_t~kt6lUlxtyLmL@mPi#%_I63})}1!chrNx+ISRF3M0cIR&JmO*w{mQUR+ zsX8~lEFD!t9GUZm$KVmU54Qf7455+3d5a!OJTp{NaA}UDx=5fR;+h|Zr+a{^-X9V^ zt%CZe#MHd`6HV65V~x&#U|v-;WPiE=riU(}ZyaS}23+MG<|Jc@^-|pAM%f11;oBy9 zqxpvmEGYdQ0yhLaF6Z(|#DEZWM&x}vXC&BuTLaSc$C5$by+NeZm-M)!B~*7Cf%Ul$ zq4$nr2>53xG+i?mWHpnpkD9jNH-85tMHvWnb{C=WlMR;4I0@AwszCW@D5jp#=Bm5Y zI5V5W=(DE;%zw>+=>*$oRZ)LC|1v7e8Yfv=&Er)G%P}l=HCBeEz}Hk=t{de9G!G3# z(~G5ODQZwSrw~7W$$m&&FBWFqYry8F0?_OV#j@EoV1D>3dbIz6$?ENNhjW!UTn>Z8 zLnrY=?-;nNNqM8_LzrQSkZ7b~n9tzTx$-Lp-XJ9|5 zjStu$$Kv|^a4|#)Z!MpI)OiC&9M%?U>gc|;b}*)|F++XsBZxo0gt)s8i7%1?r!tSD z&)*BI%&uPi?HTs`l3{4P#>Nsep#k>Iu?142i|!Tl)8sb=bO=Bi)3e8eG8 zsbA)YjnWkQ==Bg{w|(O+eGU>==@A+?6acvR(sS2>kGBv(b^cB){GdHp@jIm4zz8?@qwF*&^X$m;v{FuRfPpHyA{V<3Zw8 zfn_V4Q18klZ0Y+Ll~Og-p!}evZhuMUh6`AdRg4EV#bVr^W0d7o=i-byu;Jb*c;I~i zua4B@3Ze#pUpivjb?O3}kD1ifK>WATQXFqrjKSv*qOvwyVKZV11}-Q@m&!HCd}$;L5G%0P@X^x|1;Y#YM4GacJ>074T;b(rw!eDRN%zz z?eu@y&o9XA&aID%M_C_%wfQ80UHBQ)-&PM2`k7}Z8VjA;w%A)&2W-rnAY@b+c6n$C zR>z1llr4hD!zZvT^$fFJb_hLu%0V=95Sad}!;Nn&!8WxBl=rKaSmzUO-eL+SO*@T# z_D7)gqZ}6Z&=nfB?&JDBUqNqK9(p)WWRV)=D=#R5l;Tg2EYin2YsoEaM}OXyEWCJ8 z1u05fY*?7XYZ?^eutBL9nSUDe+iGxKy(YK(g|6V|b^rs;FT@n*&mg^Z8J!%6fzXHM zKQ*)PV_GYi9;*Vqkvh;2D58F<0YBhkCXSyHg_7+$oc8=moH?;O*M6cE)rS}gW*Myj zf0Cf{mJEWBoVm$$82Rxz=#5_r@xzQbxpyKcb;wz@;5hr>6#-!*e!=3%VDPxQ2`r1( zO3ZIIvGN=`Ysplg_plI@&WjXwfe%0`O6464UxOox1(!E%Xz^_~DjPp4b+}})EVy0%p)S%4)bsD=z*GWvGyw_h8YOs9;$KDVO^8~%aO?V4!&_HV(g~? zCTl&dzf48f>+#^I6N5fY#?*I`gNd>N?%Nu24@#1; zT0IOUq5?GbD+k#{byOLLq05!MFu?c+rXReIVy`PuH8Tf-wtV2rYm9}V=$p6K8&f^YYE=Z z>p)(5MiReX%r*I60`pCK7)Xh3(b#*5HoHS z?65fity{l>(`*i1KWM|g9tMJk)gK6`P~rIf*T83EByY8>FL5`6c=OBMG0ZWL_bq)2 z72!Y8aO+wO{Id_7vO;;2$a64gs0mhmRY7>cC-hjel|OK+2=2SkE_mf?MP-~BKwT_k z9L#2>aR@Wr=~=lYidhcd0gJa&{v_z+B(J|U*s$p*b-9P3tLhV8RDMRQlv_A+3}ra4 z-XWIIABFFsa7Zhl8QzO!{8G0{*s6Mg#YxRc@v+E9T(R7V^j<3+#R|XF#m-FZa@y2KG0%^H-dJb|`hE|siz88b_B5;GPoQx`4Jzdx=ol!4NY{Aq z{hR_iC&}^V^%O*A!7tXTv`KAuEtpq8z9Q`JK8kdC00i__TjZ0+w;~z?!W6<{qj5DBbKp% zq79gBvI(V08_^WRf+=-j+@F|X)T6;*R6x1XPuIY(wl`Y#y9Ck$BPCHwzk*0%#LU)) zVRS|uPBaPO7uT*LK1_>3KIDibwr@Dhd7;kL{|tic$R8kG_9)L}T?~qsIw`VYHrP%u zgC?&^Xutdpd^*~Bo)ZeV3s)}RYV-G;hYN#NNiCOUea*)F*O13-c9Cm&<}`$8yr zASK_yBYaq|UJvsW~8nH{yyp zuEz8M{h{reA!k913DXE1=P?!h9;+!B#-0Jg_9@^LQkob0<6Z6+DLp^WJwZI8Bj{VHj8KkPYW-1}qxI8827?={5JATrhC zRUUa|BBo8ZVW{}uZS;B6 z$orz0Q!XjSmdi=#+U5pvM%PdpG8L7E5qT1mPmmpb0Obd7@fXeBVDCK)efvjY)bU!d z?cs@K&4u7$dI+R((O_kxBP5NY9Kg$-uvA)!b`kekTKJ_$j;yaP=e5S=2|L+}UpWBE&&cC6HcDub#c7qbi+dGZ9 z&Wrw@B!*|ZWU=#ZtUPc6!ufnyHeO9A8YAYM?$;Lh1o;=+9K6D9buN4=jfUp{30Mi8O5> zE7ds&PtI#{Eg@1YN$3rCC(&n1?JLj`0`y(Qg4ZM^*amJPK65^pb-aVUy3f&U!EZcD zeE)z9#D)|H+Ce6PY;A8o>s1k2ez?L$da7|d>QXVlnRZqhZy~X-CzvlTXMrzl(D!Ky z)=($Xk1|5>n;B$Z=#Q}-cO|*GuO#Z3dvVxlV)qX`iWX`sAtaLSlG-Qv3~!o66+}Ye zp`AEBHyg8ih2xc8nnLLFJrMZGkgGmO=aeO4%3fF_?r8$oJww3Xm!98I)x6_lJ>fHT z`Gi4fym-5n5zYzOQy8G0m)a<(XxC z_1OxHEv@0BG>W09MvwE@&hw{qM1soyUud(=XC|yHOTTW@b1K3P)4au3cBkJcErt#{d0_$uGq}`{}0h#vxEg_ zY=iWedRX6GgWJ56xHR8l_*&0LAlJ46V$Q()q{rw7^Wo97?n0bz9=d2+Veh8F#F#DQ zKcCRx5{H(u>@`8Mg`p+B8s-T1IZ7 z%~)$ndFH#Tp<}BG8>Q25VQ+F{T|Y}X432M~s>g+iZosYAVlLm|6!;cV=Vd=J;JRM& z&LhLX>0BJ|HRct19A3!>dNFcL8G_{UL9p314qFb1(SB?Rs}7t?o{{IcHuW1uH5-7u zHFwg1zE4nTSYg0NY`lgM)TwptwG%?^;+ReNZzjKI%Lo4I2RWUMV3S`5 z^(jBF{?jgS^*zN(&XmAETgAeDUWW_aM+~c?!))rjeSAdu6Q1! z9YIWveV)YG(BekBo`rJhWoWyRi5}Z$^2*V*lKSDnDC^up9tdj|Zg+%yJ&VxN^b9L; zd5pNc@KN7EK6; zI&fuPH<;s1f9owj+1V9(T)*iWLQ@%aKPON&D5M{1X7q)mSufDjm%1op6EN_bIf$Px znp8HaJDM(dgkKxAIB(r*49aK(Pxl&P<6S^a4>dtpqc7Oq9tyL3+OVM?`FL#YnBf$L zl|QoB)_t9n6FQ{G&u&7WX5#)8yC~YGpJQbLk6~K1yzEkUIGU>`l%4Cwp`@H@w3-11_g24om7y!1Cm*JkdX5W?QuZO5Z=k0r3~m>B3Zr<*hjwxN#~3iVmXppQE6J z1z_`H9lAETph@#WRz6RR_tmsHeT_JncRB&xGRT86ZYh57j)Ep0(WAbc2d^OTKl}*8 zjeTKattU$Vj8WjepWvYSg>AEp&~JJjeb@8YhhgMr4S0dBe*d9Ow20hr1$d3zQDy0U zd08bfX2QZGGV*VaiKD;qjbfasmyU(qo@2i94SE=kh2DcE;ht z`q2=+Clr%PQ=!DB7}Ou#g6LJ$?QENbiFB^?@}s?Qu?VHf5xi=~MCSU-7D`HK4;ozz zE0>zXI+3n${D_#d)M!zd90_82PYpr#_bQwBZZA$9eTr`ymKh$E|FOi|w`Ncz(&GGgG=ulvCNv)1iDLOxW|{5HWKq`8Ieh^9SwXw2wK|gYqb^{c zzmvSNQp6>%QC4ym^ah{D^yU&s>T?66`A#hN>n3a(nF)G#@3VN1esIdY6yv_6VAY#2 zNNuBxmb@X)E|>Pu;LVzRv^l49;gaGB37AHEkjX3mK$ydPOzxt+a_l{Y$NQcjoA3hL zXA;L_#d}=itSQ*}igOh5G4nXIW0a5=eV;2@-$Oym;zWrg#*E ze#M$X(5?_D{qzVn{oaqQALwkh;$Nu0(*Xf>>cYfpzMy%^8O*jeVR2Y7;M&LFe!Bqi z$0{rz`W7wp-$O+zdFD)3VecC{T*;J&v$O;ygP z4$$DTb<_m&+w)QUwosDrqL$wIjS^Rj&)~Yy6Eg1o4>Yx!!J~5-pVlnq4*Wye;ai7b z{*H$z+q8)<-EjUmeMs zVR~G^1_9j5@6jG(F(2RmDu#WfUDMUEXy5xAq@H;N9)GeZvsi&D?je{w`iu?1*ZGnS zmC!PQ$Emwc(sM2%Z~oal^0#Yo59zEFHSRT-cGN@Xg$N8wTf)z5J3$V)xs<_UaF{Y} zokNa*^UG4|Bbva|Q#A8+JWWjB#Z3E^0r`)tpt}EjsG$5n(wWz26LtkfJJfMZMK-qC zPXp=P>);PBC>!tNd*gV()&im+YwtNsqINjhgpB+OV>suh(G=rZt^($l>o8ap4BJR}O zPGVVFV;Oa*orfHS@9rAHu;x(mWVC~mzb0`3t5F>EGjDXl9vHpoJc|8qVD#El@b;iP zl+Pw+ue8U;rSG7#o!(;uE6{0A273JDc)jLFpgQ3QCU5#d_QE!}xaJ%BEM15u2VbD- z(I1%F-v~4MJm6ba#DM0*Slkic2pP{)S?{j<;C;RbYxSeBc^b_t#0Qy~Ki$91lFRvH zIQW!)WZ8RWp<#X+lr_#q%b>|@O_Y%kBs4L@zLcj9i>3UF5vTC|i*_~=7Bk=}IIbQG ztz#d6A!W7IpFV*)Z(`T&|A4-`FQNLoF6^Cs5*_`%qL=&=>h+ooiP`i%#M-M>pf8rmPm$6kZUl+UmV?GBYuL69u(k2BX+qe-7F zD1SSW?I?eUem@SO)A%1y<^Bnk<EnqbTml^#1vscy z9pe&2!sE1fG&%Q)H=MBqV<)7eq3Rd}Z|*=l;%f~Zpd|!cyNahyD~KCwgxN*Ep=KZL z0WCJcwI^DF#oB8aJlzvK-^W3^^&sfwM|Z`7!;t!oIQTKYFe>F28cq=jRiiIqb|hsV z0Aa9(F&B0AE-H-@nF%=tGVTDbzxx@CYwn2nZCneH4nk*VLo4RtP#aIpA}L4IWSvu7{fb#GovweLJF3F2<6UK`D|AcY#gm6D8KH;{FjumTS)tyKYhS9cs2MC$IePQ zoY=s*lEg!2+051`wEQv`wY9Va$;dXG`aFSQY$JBoeF2YS!A!buFuzz!EOgObvY@*N zj4UbBu)T|Y4Ad3Uw^ZTG9wK3>dJcf`HB59ljL~}%pfY49Bz-$Wj#tXG%jQYSMr>rt z_8rh}{1tq1PcnJ8TZ-{RmDu8<#|{7Y6RPa;(d6U_RQ21&EI;*?SgKa>wfkE@ZLyXx zKoSlf#t}H$A`IoyWR{`!mjzrnjT$5MIc3Tz-qm_EmX+!9F%EUamg~WfuPR1yYMsQ* zvj8(*ZvxfjkLdZoIM5nMyMbMl6{TH5+87PNX>&A>9||FS=0faNrpZ|}NRm{ym{q; zJ5WZS1*POX*e@p5;NUK1e()KV#mh0m^b5NZp9V*Z)P$&S3(#a)G5IE+PEtmVMY}i8 z=`5}G;;{vUu-P=LtNsFLyp+4)Z&6t*MUbb7bvR~!I)WZFv&a@)sN&5 z@1F9Cw3{v4a|9v^-(k3$GnhCwKtRkIP|QofHbXUBuaSqbGfpsB=QvOfjKkB}nu61G zW9&S42K)wgLC*WznEr4*`RYEh)LA?+7Kj0!S_5h0hzpP&OrD84lx_RK=TQGgUHl8Y z{>!7C_;H0y7LHCc92K#>$=#kVC*ITtme`quNv01_JaP(e)}99uRWfw?hjz04ZYXdX zIefX_nA}Sb;O|nLKUNO#um{NMrA+CasBj#sD?AVEf_c9wYcP1J#3N}M%YLyH+#b?? z(yR&ns5hRn@+PW?5g@f6#JqM;?|zjL#O^Q@Zf?}&H21Y*M(1w)>8Z!f+S-mO`qrp< z*&iwnyaw^R8+l{ezJkjXBeZH-2@X#mL&5t4;E{d=JkNYY`w0)2bDv!Fo3S2Tr?f!m z&SNb9)P6K=JdH;ViG}i0Z{dnVDvIjN_}D3S=w_FNgEbGpVBI_@@!EnWjyw6l+0XdV zkHvz+?_a3e;fZ1In?d~8lUZ2of&(|Z3F!16%(qd2_~=GvXkCD{3(w)QdEGdRm5wOY zp04nnb^*VLXr6zO^2BFGLO|L;xVWtYZ3>ov6SofacINU8uM25+t-~pEcSthI2V%~V zF0h{X7HtBREcs+NZoTq76t1N{*wP_+Wy=<^sO(M%s{YK^^LN0-eLI@CID@0h1{B%< z!xon|!H1raAldS}cz3Bp259Uobd24dlC@N-C$6u+HI0 z*x0)Q_vRIYbwNFPK7It9sV~7j&=@lA?*Z{3?_9@uMi_cdgP%FA2=pAjFjKmZ1=}8l z2!1a>g*~$)R_ErCplYKInxh12rPc}3Rx`BQRvnbbW$Z8+m#;RU-Vdla!m_A}V zYOOWkVmIpJf8Xb6IMVw-{5$EeD3y%<`6Po8sf&9 z{dkOce-ACPXM@t^B%fT7fO;Du`DhE`dn(&tslzvjxsr=kFZ7^EPhBv#q%-x%`RLsK z6=TQq;Py8YBV?oTDA>xj}$4GII1nvK1!A7@;J-!Hs?>{>N5PNzY9u zTTK>wVkc9Xg`n)rIhI~MA002|;_J_oPfisDPTptO+7o}#&$0tOB!Mubjd)l$ zA7b%~XmCDt8*ThYLRSWzBYN(Z$RFvml3D*yUh@jIZ7`#ut5jZ1o@i;UjIiH7JJ=)Z{5vIJKmyOTQp`|B^LING+3V#iT$gL1ZlU|xn=)p@Xr60V8LrM=(u&4VUqjRd4cgXMX_m(MKW0OpYnrZ1DNM?n3DLbQU$j8j=I|!!{*3DgGzJw!R}V z+(=!}{5}Zf5d)xRyta^%?FSzNJF%_f1zx;Gv(=*6*t)Nq03K?XJ%0px+jQS^LTu@M~4^+CN^-}$ni zRm?KMpZ1;G6=jxVA>)S@XQEceLMG5>{YIQb8cRE_^S_wYzfznvPD8LAa06iKXzYHF zJm2D#it>B|Y;}2wTTe&Bup?(E6Y7!|KO+XBmgQqX!WnRU?m*{>u1V=tx6$+p&6uv9 z<>LbYVw>Mdww6}m7`+znYqnnGP51c~#1P^dMWPvF`K`34B%k-2RflZ=w_(vT~8g3v+ z-4CER`bM7lj4@DYpkUH|FL=E(4X{g>_Ie}!<5iUpQ8WD#y7o~+-~E>{>)JEuT(u0o z@9M^_9D4*CXKaT=_bk5doDnzXFLjXYw)5`KpTNWy{`9jFCGsz`74E|$XvTgl7t1f8 zYKtzEz9tvPp1XW&K`yAam15Agc7A~RN$kDzJ}U0Nz>u-0KN31rK_wAhG{j^xOPDP!6%!xxldTY_ua5RTbr{# zIRl)F^0AS+UCLX7n0bPX_$Rh`VcahC8aM$`ZmmMItH;6OjXwNknd4j6nDAuHB zz~#qk3YI(Q9qaB1c2;gIOtKax|2T`vJ+mdjBcj1=Bspr_H(|qDQ?SyTh)q%Th|`Xs z^OBR0rk;p0A zR$`r>SOrR5QxaQVhrPaZKQSMU5r-Je7%3JOzYYS+qp4uMEb=`2OWtE)0_6>s9A-a88F4<^!GQhUiP?DvgN;8?zy1||UTr8G z$k67ZT8N=_sT%cv-b2e5>+(k9c`&)Ll#d8VLdPy&H2l#QT|?KR_V!0~$5qFMfeNfl z`iuh-j$l~gZPs{c4PKqD#})Q}&1Zd*f_+2afjSQn=T}4Jk524Rqwhq&QIo<3ZHH6W{ZOd! zg^gb<7Uum`<6_H<_=@$!FPot&1U3=7;?K{CJu=0@Tl!wCB&J}GY(vh$zMGK2)VaCo zw6Q-sh2oNW(f^@Qh29bOTcT+4=4|whNC7L2;R~hEXYX(-1(E6!_YR?3lwRL>M<>gRXo`N~sPeOL=APhfJP5ek(a zFK-MupEP7H^~YfJ+87+BI)Z-51F`&cB({f)uuD9Q+t$5wFmp7IX{SEG;4X7G(V1{wX(6n7UxG>>AE35D7-)hl};Eg5El=Xk9>e57%TSd)de?JR)C!lP)J$1G=NF zVd*2HG2jB-PafLy!AF1Ne9HnX%SwjmUYDqwx0+l!zZIQV^@KpFRUlbCUSM+4rDq%Vws1_X(8RKBrDyILPcvFd)DT z6TKy%8q=SAC@*pGo0p&`vB&aWacKCw6t_}OOMKl6y^mGGOs2_6@98P*weRpfQVqCd z!I(3gHxy$hE#y6qU4__X-T9Uc1F_P~7G^FZ-^+uEkf5^?lpoKt){pm4Z}DY*y+#%c zKl}x9atcv=!$UDfaUDGF?**sc-xWz+`dqH;ZQk-l-Gn#`Jwf{EizGJZg<|pB?wq6b zDCqwE2-<69^JXb6ka+I{PS*Q@4qwSfaEG!@tIqRb%kBApPH71pbJe*4F+AYzH=ru$ zWXdcRxGjqW)jbA=!&hTsjvIQkIcS}l>hyeThAV%&bAg2=ywlH1PK23KsLuD0w1xK2i1Y4(E2-p%0a|67H6nq`<(qrz&-3Q+wwff)D6(EAU) zlkOachB;%g`q){V(x5Mdw4Wk&@FVb=@IO4|{TVC9-JUX%|=P=vah@kJNNJ@?}~ zx(+=D`EwLd{ef5@<5Iw>`41aLGrOpp+CtO*ec-u)7>9k{K}6_Y7P|W)?dD}5?S+c0 z;JfIytpvnVwzKSYPjTfOL(cwY6*Jp%2drMsMaQrxsE=KX>p#+aYT-tRnz95-_a4Nu zg|~SVmo#+g_W{dYE704f25c8MK}+Q<^pXUD+;BhIben_$O{Or6GTy1QS6DT!2X~^6 zfw0m`gL9|-lK%AyaMXK&Ao zK>BPfYhU4yTCYTcUUCYQop{M=@B^ONMV${jWHL7%eU{h(|f8kd@ut$1=v<<3+Lr zg`US*Xh~U(6WfEa6;FcXdNZv5^9#B=hN8G;6h<{7M)aEp^8Obmwx55)%blO1^gQw= zJvI4;K^7o8Qp;RRSD@&l{v>Jm58iNEZ@lKH%XKN=z=O{H=(j^nDEqsR=VJ^w{ezSX ziwt5eajw)aIK#4y*1+4!FPL(PJk?`w!U5?80H;1^==mGU;1HUauV*T83m%w%A5F4X zK*ywdY>55|=EM`Vx155?6M4CXrCGT5TRWJHo6lRWh-QiJ9VstNE=i5G;B%OGSnmwM zVR9sz5B|>kEWFL6ul7kiK2WxFLL#hP_Zj75Y$a3Q>vCno+xQ*McW}&xC#YK4$?V^J z#qjg~*lB7c_?X^8>0fWY%4&out^6Y1jtKdd= zu5g|N#@m&HG_QrVX=gI~DYanPGJvnVwFUp7Jm4^|GI%ihJbBmcFz90g1i5>|Gwj9% zoZW{v1T~?Rmh^SI{OvhPe4^2^_cY!$ z=oTnVe)BeY<3MYww$Nzr1eKCf%G~y5i6zSGZO*R`Xlw?HJ+6cLkuwxJ-NoW+m-lmSsOG1#U|Y%lsnAUl>t$r+oIyYUMI6VMQUAhJVhiWMI^$i1R zi9xU=6s2nIVCc~f&E1MIeIxCXE1)ufTU`#|N zR!n?}u4xgVoal(n4|6fy|1-**$(`mg9V#sz@QEwF@$Q9rXuJC;*63*nUp8q7gHu#! z3**4DZ{Q>+EsklLn}g184Q`W^x@1El&?!4n;rB}oM4=}pb$o9IzbS1HxuTSMzx9|g zk@%8By@>(#k%dgjLaCEFGcmPgfj5@I_}NVeO=9e{FycZx`|ugXm!MCuF?Zo1<(4(n z@Jd_;3^-OsEa!*l8kWy0?RXxkdlZ-T9Wss>LW>|m)y5)RPF>@$E?Y?2U4$J=&!GF{ zMHoA)T4Fd$lRUVGS=8`UoGEB=b~Sn|)-zSISp6@?Ri#R9$e6fS9HHv3=u11Si`y_EKF3yHWa7{vHuQ0 zpS78MuwySsy%-O*4Gn1f_BWcW-i{!i33}yg;5+?HxZEEGJc@?~=VsVzs6;2VxQVv* zzTnqP|BkvCOm+?9BmezG4ygugH4O*7pm>}p_T?+r-vXyC1~6ax7ZP&GuVP)sTw|W_ zfxC_|@emGW`%f@wjg01`^PyV%8BV^I31uq+_>8e{p@+M!(0ld?%$%dn<(#1z{KG!r zeeD;T{6ih+F~k|Wnu-x|0?3EvfX|ZOSl_=2wjRxa(g(!0n;_zfeGBna;zRIwuOS4C zD8@{y1oBekNHU%TWBy|mYQO6)h@;)}nl*k9moA;5sXAUPtVO4T7L;-O3Y|4ldWTV0 zT0$JxMdi#h`#Xq&4o$k^LmBqtwCgHSoMX?fIHp87^nYkn4FK^DB(;4Y}1F&e5x15mRd0E*38 zu)#7EJ8l-k-tdF4vfBl)-BI>`iq6C@#`O*3O?%TO>DaO)bC6D!kmk8x#K|(q5+_R> zNtSemgh*17NFpPJBukbgl2r5FZzNep5=qHuk(4l&q$K&>zkh)F)bu>}b6?l>z0hZ| znLuoFspj%^$j*zF=CF^-xE?$v_7wQ4<)%V&TswMqT)~Y)bOm|xJjh9@m+E!KgHzNZ zUNn9S$g|SX$L$_Y7?MDlkuypQ+kCKExgY0${)w&!jiBDQ2dvjQf>lx#>iLy{W5Pfv zt&W1I?vyqE_7VTRrzK42Z--6(J)v&mZm^t~51Jn`Ou2)q0TU3sxOI=)>IwEfV5+<2yQ~aP$edANoRUqRyewdy1gAJ^Pm0@ zkE9ZJ?;p~q<{eOK3YI};?n3-9gx>w*zhZm!K5%&OnZ;hnCx+=%=ape%&Z(68K^w>~ zbodk)=FVME>GOEyzoRrSt5|X^_31l^YkJBSef}Mhu=d@i9gb%{3am#!^Gd?Y`I#mVyJOkIdvJ^)5pFncAG+<6I~U z{*J0xCGGT<5VQXz?d}YjXPFmXCk9w~un~y$!dd(dIvb~?D8tN^xZ1H1CVi(&OaESA z`N0q+oA*JC1?A@s-G=z91vrPbLu+g)%Xt_iwb@Vw=J*R+|^ zsD78&1=)AZ9G4AklTC%O8(P?|GGoDKV=^(mf-qZ$bU3z<{l6DT){#IR!0(hE3-!6B zK2N9%B^=vd+(S*|XH@Ku2Wv?uuVs4(y}nYWaLqxK1(ZnLzUE?Z$S3lcJp;cm(o&;O z^U-z@(EZ{9D79$h7u8#E;t}IP;x*Vg)aw8r`uhYnCC`M+l0U)2p$sCDdPA-7FKA}) z#7Ta|t2!SsS^tZ?TwJe|W}HSX)kT&`3=xfRgtyf3=C>44$LgbYXLs=+aA<2|V{M58 zd&!Nd|4U>ZM#t_P-bgK1*dc9Y?7REv3*P|c~=?MmXUSiCukJzyGF(eM%4W5RJ zSli#k{|%PIhX1&R;lv%IOKV`bPw>6mYzG8n*@ zTlB`-$lv&ny`kW=?=tf$p-zEI$Jp=Jx`O4rFi_Ol@s?k*v2gxU$_35CeCc<-Fkv1P z)O>`j&IF7^L(u~(;hMUHpf&332`DmS!)IAo-Ml3^OotdJyu^J4n7Gw4&JB)KZ ziq>nMvb-wVkLAphD$+LQrkdBH3BAe0rZ0FW$=~R?J{)R`nm}~xhEw_alQ6hShjVoK z2C1C`L4M<*GSHl7iMw~Q{6%*#pyDm|UG@nbqc?+F?j;N%tq)%P2e_MdXkOmD;>A>A z;ykAv-ys}*AQoh)ExETYC!)sn2aYJq#21?l_H^n=E2$J;n?rTLoUz zXELAh;}^DkilZJQQ!JdG3c-eN;9jskXMHS5*!O$_t15D;I2 zfz{*C@&U0H!zmwmY8Tw1JGcL41(;4EPvDhLpz5!UUW5OFln5D%k$lFSLru#5Lylp2 zxh^N`YQ=`!^$S=)_!;ZlLm+=C#K7d-Rg%~ z`<`J$?i$P)LH9xr%5qRKEXC@Cf)GI z5oTAe1vx!eG9PC(tI+?5>6^p(iM{EJjXlZg2h_t14ZYhBj%M<553G(CbF$AOWz3$g z=zo>E2txZ();|HyBpt$_&}vjK+5q)~$6(2Qn*T;dcy;Se%3jYUe_aGq zmlk9F+M9ep3GFT}w;UYFA$ zg5JK0BMVt#FFnCMDvLEa_r{3fcPN|jh`e7LK(p*J@1B;*X9f%dt)MoY_Qe#e;8UxLNlQ3u?<=BW1o}Km_ zto1(fTBipv$(ch^QQB2!_j@^fxJf0k+XsPH;CeWJA%*gQ2FgUONLF}bC7|05%0=u$ zpJ>wZM_7V*0`UQ|%(&RyKOin46l$Mmf#=4Rd`NL6Dz~S?snkZOeKHYRA3TKgmyaRr zkRQtab%P?hMMB~ zx>*My3*Mk;&kg13oUd5kY6%`y3iMoI#El@$wfI*S1a`fG-Cz7o{eilC>#AcIRdtIM z%KXuNh%HuCB%x^PRB6olFZgEw>Ea0qD82gv1Isv>8`4w|W`0)kH7xV*MLtP-+ z12Cn&Cn!>nG7NtN;=Z`lle?|55Kw_2(p!K=$Y1yX(}J^pUX+tF&fMg*Ox$R zAnngCKY<9521mTl1?)ExV-L4NaM(!b*g70MJC^a1%MY1}(=R-2qc4mcqsJv?Q74~s zG_P2_n)kdD!Tc=tgCU(~scU~i%8v86Y4B~hRHVy&vB|=y!Yr_vG#e~$N-%2aUKSd7 zgdKB9!HmET2wGV{-l5;l$4;a{pm7R${2mGs zSU>EjHWx-O?SS^D1?akB8_3<%%FL+-!m3d@=<-y}5~hd17b5}8_MOJ={~p6S$Mf)Q zmNw^ebPz6moQKw{w_|<7L{Qw$N6WceFeTOovupCCex1+A$7zNRi>G0DvY7J@z5qdE z$=g}s3D#dqdCljqOkuPIoMz|}!}C0Jj`|7i#;q)T$3+Ns=#T1;qrf`n3M8(ju9)j~ zQ1Sf-CSLo>f_W2gIGe$20|cxbbPQU|bD;CX4V>fgiZa_$rR|PGsG?5Oh@E|KWQ(3q z<28_SbR4sn2`<*py1@sL|uX%U)r~+;gN2xG?zzA9UO2iiR(%!PBW4O%jg( zpSJ)@yM4)NBUAQWp-B_r<0^L0{(D&mjko7&PJoU#XaCnx&@}pj}#5L1F zZeGtzryaolb|=8+$Vt|E?=-}W)rA@3XwSa=CgkOJ;8J2D%HzID3n|YdzSzW~94Y(h zK8COB^9oZQz2#l!{{b#b8LDg-pql(cNm`U2IqHTwtxDn}>a(Vm-XO{U%oNolX}@SJ z_%}QQ&C4q+%sh*)9HqiM$1_;P$6)2IG#FV=?7>KL;l9R57=36bX64?;lo2zjn}ayj zPJi-s1J2OTv@6sX?BV5$hQpQpT0&-Xq!kdg^EQ8IuAvasMTDBGwoJaQlK(GI%z4Eu zMTg1fd6x^Vd}$l`)`>AMU#F1Xw#1_wJm|9i~l9ThnVd--| zI`Itn4DH11`C*V}@dT{DzsFJJBXZK$hVJ;Bd^!41wlE)OuhHUs{+$2;$-Wr9e>Y~2 zN>rw083^OLo&w3m;9Sd&PcT}$1~(;q#o}sD3|vk7BZHZkxFZTPt45*grfBqv4TOY1 z(hi25h2lV({q7rajusjyOmct}4@>6z&uwSj;6lRP(-n(k$e5UfLM&3pF2y@#!CJAboH=Dt6^Src)gZJVxHe%`F(& z-+;4RSjwAPS_oN-h-+2w46Qs5!ywvQ%~-8O%kz%B=cHJOu>S|@FKP)LO)8M-Y=h$R z{qV$)yc1!sd7rT3Xy@}ChLgr07&w45E(6a0nt-84#8A=X4O0>?p~bptNUFIFDMfF1 z(a+@&;A()-CmJp9`9o5w8RzMK6MxgW+1hfJj~TcV9j_inMU4t9zbD{aVmIWh*&vl{ z7>Je1SD5}flW(aNaiW@0fTbHT(Pjz;RZ%v7&`mt^?=O^0+k~60N8soynQ*MS9D){8 zSMs^zFhN(rCzk$(f>lSs)y4=5y6ncp1MAURy9$P1ibco6#O+$|&Mf0?(C(j3dKXRQ zy|u~TQ~p9Jw-94}$6}O21;mW8$FRL?Q0`YF4cU@K9W;C3mX$7gdzc8LGgTOwXC`R< zGNYaMN?evO2sJG|So`G?OzHJEd-5q2n`pD<}j8t7gr5gzHTly@h+fVQ~oJ{=U z*I?b>0wRBqFZAtv@M^t+J`PEk@R~ZxGe_}>LyB3dnwUCX$CPfT%($eftsp5$#a>RQ zQS>}jD(M+4&Gt4DWXscdYisf>IuYOXZ7H*9TZ>M0G^}Tp}DmYe1Gad%-K>% z{L+Ey?G=!7Xf5*^(gf-q8!?Y^u$?`3qucn6;OSlf-5ZXeXm*-&{qWPg+v|9g%o(2> zc*_Ix@6N{h-H|Lin|!^3DXO>DGpEJwV8y3{-2X4W{?P`SFBZX!Mm=HDwgc!_c>=RY z5Bt2IxI{5Ys2-rs75@rFC3^&|j#s}(Um^@+X z$JgkyObxQM4Y;UR0ZgKMC2Z{|cFQW3FQT7ed59@xXfLtwkMD^Su2P!qzYLS*T?Ff} zAmSojmDW35;`^?nd;VDkTCLU-vLn#B+;S&0Te#rYARVE+;THCd%%mLpFFt+Xbx5`x z0s(!g&)sn^VDL$J_}W;|TPotBZscJ~raw#Zeu825=0bk?a4ei@E@WxPU|D`V`EVvk zMPHZlGAT&2N^W9w#v4eVN1Wr(Z17lo5vvw7Lg-i#%$jW|1bYXfqF5iB18fn->k3t? zu3_euYv{h@F(32(DXd3x;r$KLTPcJr8YfdmPe{Si>yO!NFUrY^=D?)ird(^xa4-w% zN^`+)W$Tt*IN?%nINzfP(@(6TJ_GU$gbOGi5-9b2TE@Kpyn}(W>1{TBJe%=E%(<^G zhVr|^Kr=vzl|GFy)?h2TJkF-?D{&0mUr~1`}cv>FIzvGg8z5Cpb6 z4x(J^E@iN`GQE~F@bHNq7r&Xhv_H^!DyKKiJ5%YnHBDH(obqRRUKn+eejjO9aL*U& zl)XAknicpROT8Gly$^$cX!1oo83&H53vtz?5~!cG3`Q%PFueCS>~)d4vixJv?T#z; zp;bXIy?Dsj{|#LZ-R6r7HQ;?_C+f|60DWB$EkA4Vk@Lupv1d9)1z%-m=dI93*ox&D z3!!O8G+yXJZ;%RNVm=I}79L^Gu-v15imbL88Rz0EWJDt57GGP+2G(~}rrIGy$&~Wkxod2K@Qnnj_ zI&VKyJ+eh0Gcdeo3 z+hOvWj)JB)nb7?v?PqQbRVuuGD-~ogdfkP1?Z3(U zsL1y1oQq4f=`%f)q1X5|5aL6e*=sc@PhG`J2Fm$lWp|W&YIDAz3(2boK@epMI>->K(LVQ2;S=^8Qos_dqkQ(y$0*&2xGCUxq^4@@oG7XWy}^qo8>)0oDI_JI4?^Hsjbe zkS+c#t?4wQ^YZ|t{`CZtkLF~_6>KbRhZ zgEY%!_dG6j2>%Kp;fJAg?;*%8N8Sk!GSQyDAu!7d(+W;NrZWdo-qmO`d>dLWi(=Ws z=27>-NvQ1`0;TuDz{WHLt)Ks78uv}?L+u{Ccb+-|-)FHIhmNAGZXxq%4s}Ocz3~|}|Vc@QFAPL^bcbsnr z%X0&nqE7`&jn9Gn$m>}Du$hihCPQnqHs8;yLp+s+H(N2ppUfbi z7oplJ7fp=gu!#QVE639gKSWDe=xvXVslBl8`Y2HTLHqRz`u-2roteUe=jkhFqy_{yuSa?fk9OWg!9^10Zw z&k6Ae##!X)?;#loqR<`CUs-vQzb-+0^qs)cGaumz|`!XN=+aH?0Ib-G1uUJR# zv6?qW!Co4MBkp}9T}`R<9Z-nwTOLxTK!-&b(OiAR8&f;mQTFvSi0s!XYXcmi>hN!D zez_fGRTrH@x}5~ek=voQI)}NEpH%*Em9u8ZLLU7az#=FY)aNfKA9+(YZU08{oqXVr zwcp0n@sHs)8VaXw1Yj4Nqu51Df!!xB1n#bmFuHIr2K==H?dEO6@TS-3^2r0PbXo{` zMsL9E@k&U1^MHT$jxvh>w(A!cC(>X{XRTXiyyPCbZ)5mD4XoPa?~>%gbv z1~iQZz>~`;7n#Rnp&P<4H~jR~m_1!g(DXq5XObbdLK1vNAn9za@tHk83Ee zMLfu#(eP}5jxgd&DF#p0LeJwZK&}trPd{zJYT^x?J!KDG%{CU678nY>u2L?1rILAl z`3W+QsVMHV7Z(}l;z!yc%^6&X#Sg}SlyX(-gFCTFI}a+z4_@Dv3q5MJ1jo*6ko}Jn z)b+UwvhzQbK1M%T>0h^Dl{d|nlk}jw#YyD(0v%Q9*gbE*GW#U9Ny-P%2K^a21+=$k3BK4oL62 zb}YeXq@x*(ItLx$f1q#I7(nA`P%x?%MjU#Bb9&x}zS=fDCah#m@^H%k z?!oyll<3)WJIkB-8)T!Etgq`O5FhD*t77RLG%JJp=}ci#ijiQgGYnnjd6*h94(h$) znS3bis9%yF3B38*1Fs=ut+6ouOc$Z);}?{z{>JBNBEiFR2Dk_H!KTHZF|{}f6no}M zgO(Wz+qcmfQjw3=-z^|*I(3TfK7?iaqQS?}9||pK#<_6-3xq>hS~3bVMH!%;oW(){ zGNDvb$0UFEpW)+1C4Q;NyIa2T`nwYcGZPGRwKPb`h90h6S6Xg0GP)h~xb!eg?YO_gV(Q1XujVKy>dt8c!w@>-@|ImU(+1{*2TS#5cQ9zx#VI97`;pM>in7NME>6NgNS; zhn5R%i8bWSjJN0scG?jb(Yhbi+6v`|%sR|Gx*c5m65GIMIj+USUsZH!_{c&Soh+d(LMgras5ur1O|i+=14$)z~~I8#|>xF|nIFXxWf& z*s6*bsa~NB_A-~i)gUfv0oQ|8m|_@>DbdsUaLQ+7zHo-(0y)YfuJMi!UqkVdTyVW! z3*zcx6wPdb!$!pIF>Hc(OGDv$2eFtfDGz)!mYKZBh4YIpL%k*sWF`xx1>A1bTW|{l zsvW`g+77t=uMQ`>tj)Q&b>CfKwsf{wCp(EV`%m_DE#Lg-|0eP@C2`Y=w3jR#rwZUz~o zS55v0)CJb;)eB>xrR66c>v9Yruemtw=X)i~$d~3LI2qBHUWC3*zn&&+&{C z@c}K+L;Dfh|JsGh)ErzqMPD%UoP%<XNx!${ zHTnVuJtYnU@=+>X%p?s&E|f~UP1EC&SS!l5|AuIz7!1wVhQZxw2Xdp0S-*6FoJLP5 zbk^b=3eGdP*UfOJQuxbvTPgU`|o((RsN>7Y=)KAITs$9tk-msV{3w}9X6VMhM- zQy6$QlAjd)8JZ5=2Qz4gjN@-W7PCVta{9mu*5!fbxDCk7e3>Hr4a7FQLdBvK7PR|3 z7Pn6av8AriVoofNv5Z$%zeV45C#Y+zn0FmdS==65;H!lZC%ZeBZ!H-K$Id>68ryv! z54z1S^?MDg=Y0Ulh(Xe*m*X+}_d{M1S;gB?9>V6uM(C?Y%tbH(r&&iKWW)<%>@@Sj zCvD-g}vFPS?L53ny=>*YR4E zM7K(PTF75zbOhzTe=+&kTF|kij930?%#J;rD^HDL;_Y&f_4MLBH;lnQzjfg}ZG*`Z zd7M{Gx(r3r$-DI549dZFDjig*_~iF@5OHm=bb=w*@OLR3Uuz(UrhMT8G@+Og^aY~T zi8$HLTxc419KGI+M5|s+pgIu4%eKbz@-d_lj7iaQGODqJ-QTvJc z7X{L!i`cln7F~ATk zLe#bfUa~4!s`KA>@T-ghTRX~-j-~I~smJ)dMZ{GNZ3fHnYq8$-A+!FG$sF_#fi(0C zYWlul=9A5Z+o=YELYw;b8td>|A8ld&+h3F;+KMxXjZ)}Ld(-el;_Tdp^R|y6aKv9Y z<{wk8_Q+fmnbRRLHx{yc$4ldjPLKzS8|y#p1E${F1mj+HV#-tM>|L$m>wLvr_J3Dy z*v&l;6=|lNd!NsIrr%Dmd`^9?={Il+*G2f&NgOoGdD!4Z%!iuqG^ecO(jYFjO#8)Wy{0E5Al*(Z$ZQ+bTJY?F@T`^|> zq{!y7M0pkK_&gpIe>FkXutP9<%YAs%O;6CW31#+oe^ZxgBJX|t0Ci`XgKXj_X{+ct z%a$CG`V1&%>T`)KZ0b7FRB4Z0R6?1V2x;B?r?~9gDPlHif#cfakUhYbxB5&hn04#0 zNj(DBZ#n?2x)zvJ(uP_Ne<(Bhz9K*IA*HDG8LwDHdYqn-u%5VM`bJv9rj-@sO{+lb zdB_YTF~sPIfn`rAS7Q(kAt_>^{&*jdn%qKP!)|ykNi4X@TR<}T3b8rHQ3fd$F?Tk` zlx)OaJB_%^gzpe@aRo+fAzj9S-YwCSQSzxxDmG~1TTi>9|K3{EvL3>VPKU6n@NBS7 zABQ5joDY098=O3Kc=6c#%w<3&Z|%~EV%aZrc{v74lmA5TGv6>!KO3!ow({0y)CW?y zmRJ_;nCS2v>h%F%=IRTro(wTTz{7h!X=PN1J`>YAj{QZGW|I4nCc2) zq{q2e(0AC*0UbLyG|RtCT5=Z-U%Oyg#9i>Ur{3Th8VElafeYf_fo2csJ#UO5{Qhb1 za1DpN7r)70=OLAy7O{Ni`m3o=Y9CoWiNh1A;B1#<>$;x*AHaOt!djGg%br0>i*@%2cse6W?z?)w}S34Ctg zU2{BTb_s)D&%$GwZ;6j@iQ_^qV0|f_-+vB8Ew@-Udixt_9^D5&kEc9?tG2N5GWmIn zjD$(#ua;D_NmI_%@}*-b+g@3Uj@j3-JpDf0o1n`DD`~fGFP6%U-cxTyD;w+9z*@Fe zK!j!+M2#ffGwUHrryRqNdHO=*rZ39^%%cShHjwD94MrMV<()G)D71o{6WiUkoTt-g;6liy0wg|u9~%a_P=qUY>7o0u_u$hrkv~=V4nlvJFnOG_GH_b~ zWTen%N~x8~k|UL2<9v9{SQCB{)Pk?K3%EWfZ)h_4f^X2=w~WJ%!9EydoP(03n;@d^ zYY>^1N!<>Mgn%To7IIj0o47mRiqEL*pr58{# zF9%IF+(GYcpCD`FW0Y?A1r5YJz1ojo# zVyMkI)_NiXO3yh#dXE_>KE41o_w9|- zxS!~|=NpDvb>VuAq)gAR1Ju*nfq_qs!&2HUwoVjSTPQJuKBhypg8^r~@*9(62rnD`gzLurf znqwj4=hd+#7y|5ymk%jw;9 zbS~e=$&}NC>|~NhUqGDu1GA^PvGmAkpf{DlDeu=Pxn2V8>yyANq#uq=+=OvG_oBwR z5FCzFv(oNsAlcO#+(y}A`Yp=JhotiDGqOPPU#Rm`Jq>1d=?>vtTQU9R4RC9D47WC) zWe&Uu%jTv)^Z3nJ?w$Zsr@lfR>PUAVW+I$BClWqSKM7Nb$5cFb8AxW?OP^me5LD?U z7?O7%W>7xV?>9oTT{M;nJE1h;7pRvSN$2(G$|Xw<09R-#NaMs@-$}&q+9DFVFSNvT zi{sGlCKkjm8p&H42^GB>QE~8Jbk`aOnvwos^L-lF`*h*lKK(|^;yhk5@v&4Ky%yu; zf1~xAL;PLZ4`@iQb^Kv0ln;!C#_M$uA)5jT@Q!l(U#ZJSpX&@EFMv-ep2@$49>XSq z@0^}sHeH*`Zjt7udTOBDwirg+hJkv|T;;MCdqMk=9_N{cEYMM3aC>CPx%O5&a(qyCU}wFpKJGvVeOe~B|58*@P^pP{q;JszJZ z;$*XE=db>SmBVUq=`HG6N!~5Z${=5;?K|nUDWr!{EF#4JB*>+-7jHPgk0vImeAQ%@ z?wQ95@8@97OjEASrjhl39tlCDaVCw~iNRZvA?Max7Wpa?%p%ScN5TXDOf zmpD#Of+73O5D=|@j-y{(L`B(H-eJrLz&$NUY+hFRR<5oP5E~q-faf;icz=bF*Clc`&P{VRl_g6 zWWv?IF@=PrIR)-xb~9D}1}wcc3iPI|L`lz05be_jmCxy|S>uj6Q_g~C@BzMPXD-Sgd+>5a zU)J9$9BQT-;uyQG+=mk?s9$6RS?`LmUf6)4yDp-4)PD5(YbomG8*!zhhN7d+eNGpVm}{_u427<^^x8;!UEkCP}*f5JT4 zl;AY?0cHn^m`y)FP`AC2W^?0tv%jX|jJVU_D>4_#6Z%4}b^!S5-hd4GSL_w_2=DJQ z=OWHc0b7SRm{C)W9`+BwbGkJ)-zq_C&2vb1eb35>-H}qA0s)=#u~6oK<6K{&n0z$N zW(Jt^T8qm(@&!Je`HlgiqagV_pZ6#=hRTnRuq`GC8t>}}>O)TvrFk&C{w)6Y!9b|b zSMjbrwXnPB6s#`!LLBa2$~;jLGz~96%Um9=j5HHWsEa&nggF=a%~bFm2uEVRd@ieuEpdIsx%zu=D#(HGRF8$s)z-FWZZUdo5; zL`AoSAn&wemV0+oU*KeZQ`s?0zorMOpI4dQej9A6&c<2veeO7G%C-MP`->H{eNtD!Trhfv$g0xBLAv2;7^9%NqY<;A{wWI?cdbX)=-5zDWlQ-WFmU~DkSf)E3Y2xCtbbQOlW#b zOs~F&l!{-!q)snG@zs1S>+VkW4zOylQv?qyKFJ&)v=RdHw?V%X;Szem_w5#)zd< z#K7rR5odCw0*?7o#{_vtQv)l|=9?dGyGiek9%bn8_hAqXO;Z*dkj5EAJqclRF}+d= zUc<+uYVQT+qW23ojL_q3n=XLI>I-NyX$AzfWun}1gR}2^+BMC33tH*@Fi*A%eF~`? z^V@XLyIYNk#^0I6wkp!bo=D@yl~T9IL}uHKc%?(uVf@?KICkVFX5Dd>cQ|*IR~&QT zPg$waSlmU>)aCPukAB01KPZR);Cb1V!u4Y$=o!=CoMtqtKAD#3Bwf|CV3ED%Q zq5zTtVnZB}j!jui_w*|xK|K-qGZluM<=$lG(YX+sKR?B&=5M@a@LW8zz+6aCb;Isg zdSHj69?ZCs4-(f#=eB@mX4ycQ<^ivj1Bd~4fbL*1Qh(6qIzHQ8;N?^L^I_g9KCbIe zEF5bGndLGJSWNtfQH{)V?*w8#E(JwJ9*!dYU8AL7oR1-=eO;T24JknN6;nYn;TFGW z=xdD34~4SB`%(VaP!`pP`g)Y3pfU3k)^${4QhYO7OGdzs?OK9bx?Z^`x(crzh=BUv zX53vn1Fm^9WsBc{KQP56XM@!j7~Z=s9Zw z%G%Z`6Mw{FW_&!R^!orl3H#6?s{#Kl?!qZ|{i&)2wiI$O+6lME?CEVu)@ohYP7)gaWG$h_5Ihk6xQn;g{l^zIq#eC|5&KzfWj4 zb``8#LEJT)(OCc42z}q03oTodQQs7 zb;{No+_Id|reg|u#){BouZDK@Z=|a#_kySYXJ*@@fGngsg6;>gpcucFH=|iguIAV< z)E2}`Qt_CsiJ<4DCD@$y$HX7y@TR8@rylpG(%+wS&aLyL@*nQZsrm{|7?8jRvPbB2 zbp-Fw?JF=bv2Y#E2Sm*rdgy-`QUf%RK@28yv=`m~(P=7_>-Fa(g@% zH=iI5zEKnYzY|scXdXX2CP&9SE*E{C!)SFxgKC5YWT(0ZjMxKH@OOkSqKx=H#%#lj_+^5#!=W?ndW zydFs`6azuBKp$+SIUxHKrJS9$2ju5_vChQ%&^pXen!DP&Kd<<+B_4D#bGx;hqWZ4M#C;Msz+ez-oX%Js3Wwd6yvQ~y&$Um7Y2`VhAWZe zt(vNVlubrlyGbPkuiOpk7h3p$X;V;aGz_O7H4^M zjgcAc`0nyoObmqpb_7g99}??cmlIXiV%Wd{%$Q3Y_(jFIWknrme#ybQAH*w^DqxJd-=C)%r`dZKqz2zvlJ*&a624CLu@P7;s&x2ye+w>VMIENfd3=WFG z!k}YNzR!Yd5wy6{*f5sprbXs*h4{4~XIjP`r^FGSo zx4~$SN3iOR4%cJTder-8Bxw9Tva4bHV9QV9UdBc+nVR zV&8Ot9(NwoyT!3Y_5_^n>9FqHF;MN;52eQ~I47Sy5V`vt8185URbv(ezxoVUZHYZ~ zUYB!S+YZMab%cT)chP0u4Q4mSLg);s!hMtVxccZZ5hAX3ODBXlmEgzq#$3XvW>ohbs*HVk2`#fjS^JP?bUj-EOQm@zae9YU$1kD$ zfte6EHyY;W?j#O}538S0gHHN=ux@xeS}rZb+v#G#ap!wbHLQV@zfWU;>H=ig*TR>& zLKqiz2vgl=BI>z7{@rp2w&v0E$qN>p_#DbhFXNoOC3y2x7f$Zt#MiqJKlE)Fo{D-7 z);U#fzox-3Y1Dy8>OV7h@E0KCJT|u%KKeT9+JwBA*Vp zO6>QFZ*ACAT!<-u#W9H30V~hxaP?hnAe$epOz@#S+u&ztHZ21!Lmg1B-)D?kb&CK0 zy|8gLX+rY*nA(Nj&$%a2?eNWcTxl6tSB9gFjSMuaW}%{LBj!A+&s9vclxlT5=$vwC z6e~Py!np;LhHW{Ebxfu1%h+Ukn>+GRyH$MRl@lyL@CMPPn@X9P6`%9$BW8N(3lify zAgLMzLHji1wZDy?70BOe-p5*O@1T1|6kG2XjhdeZe16HlIHB1HG9Ug#m7x^(6t+X= z3Uk59b|Cg2X9(887Lamt64YBJ;9#0Fl6ca%gWvJK-fN+x8=Y0oVW5c_%P(+Nhsbw*3Grou?-Zql|9Ug>>f7 zznYgDCosLi*I|XVzEI=Wg-bkjggQXVAx!oXtuo88t?V6>EZHS(%GKwri*nh`$tGNu z7j-%mcIPJ%kEy|quMQT#Nr3wogN3OzkBlq z8e;8`{$Ck$4fGZrzX$b@pZt}{Dj_#w0>f6?EzCdt4aGK&nCJTg&zfordOfr` z&75&eB#dNM`t{(Edl`#uR)EvItElWWH+Ax3&efMkDTV9U=r(=_7U!N~cPpRfhOAa$vx-%|_-=nx?IaqdeW`&o| zV(jl~*kZXB+BSa1_{5Q@bH_{&>o~#nXJ*1!15Y;ScOLnKelXu%p|I&)8d{oeLyt-O zvAJ&;;N5my(yS-MAL1ZJc>x^fI-&e-GF#1c;*+y&aHhBnEYXE`inb848zz&ty%yb; z>j-Zii@5oZZ-aD~3es=wMEh#ucfI#Ub+WTmmopQZjh{p6jt!6(uOvo`KlLBd~Q-cltL(T(Gbk#EXhBfqHSOOicxk z2NxmnI(3OC?4gO?4eCV-zCiR5TBldmH1dpv=nLU51w0?p7j znCrn?)LTQDZJHlNM)z6z@1HpO_!+ERUkg>Kt&sYz2@n4(5~?ays0s97s^s$sfgPx6 z{sPLEvFJKu0eJ@&GR4Ea((q5maj>!--uCPwq&Qy1{IMH}t$C36$e$qOb}Blwm|;xZ za|o;t$Cznfu}2_vtP$T?-1Rb02`!dBe+8p+w7KB*UqRyH&nmNiK;|mqz79MBqcfsF zr5H-NnR$3?wGC2t@c%eE^SGGT_l-BwVpK|;NC#mo>10W&dG1e&lO^dO+aXJb4#HSM zmQa#NA`>N{B#~rEn&!DbDM?322_++vM3P8KlHdLP{om_V&GLDk`@XL0eL>FWvsgBs zy3A{@%fqhP0j|oY{OWmhBVLp1+ODGI$O&loxE%UwsCV7=0{VDW^PpLa(CyJr$0vHn z;g!BFxHNskpbhVMWYlHQcy&@|-a@(j<6C@Q)=jARUW_iuf1xB}Z3OE99O023b*Cw=L9-1Hy|vRxvu?sxLol9$nG{%1U8Xd>GGH65F4 z&S9_thk=jioYczU8vjJK+os<@^Kv`0c=d$4ANfqIoH@|4>k^wih=cmoO&B%45=$d8 zc;qJ9l^+7oqkar{)tmSi)7gZwNboT@z!f)7Qvaa`?f2F|fMO3eXOuy;&M%PU=28c? zgvXV=q{RMbrc-wao2;)u!i0QcKF2}Xf+*^2*M*d&aUjj{#M^Fl+9Vo(vQ8v z8INiqsdf$|{Pr(+-TKNaih@xZxC)f2Va%xisr2!+n>d-z}4FTiM5;S`im~RZlxNm7JabO5;%e#b%_Mw;(T84Oe1$70)qtBxl zmfWNzs9P?9;-5lwW=$C{aY+NSICHQ|xd8stwPEy(Zi4x!V;In>7gqfB8t1ugf$HW~ z>X%;*isG$2+_oJu*INOcYrytiBd}rE3YNLxiCXb|uf6{B&&)*8#0R7E`zTIG!}`6SMD8!6t3i6^>?Iz;72Mg55D=!So#I zQx!uo_jEo#Sz|29sINo%nRZqA>!E$YpSbV%Yphv&1Z%q$p^@qkG0dp{WM?tWPzGY< zzhRK zZi3W(FEQO>FwXegfoV>gAfNhyJf>KJ-SBsiUqo8gw_jkNHG}vi@40Mtb5_tuYwowF z2z-}c#6Jdg6>4K2qiTgj@bC!*=_nu6_&j4lZQr@kFWSqP+=kya>WQ;GEuOvj z7gYKdLFxS*?a_4NNR?6{JbUcS%V4o5NR6Jko#mj z>I^22!CuYD-n(NpcUW+6M8Y9weDR)HT_plAC#ZsTJPTP{~IgS30- zb4^!VJ?<&EKYEIvCpP0%y-z4zm&el2s?aOqAoS&Wf^(8CaY1u2s9!ED)G`)QJPfF# z&j&5+Bbd6=C*sDWV$c0$;C#x6v@|EROEC4tIE;jV<@?aq&Ikenx^yehN>0Nbeuz~`7LyPG7gqnW@GZ~GmucpQB%JQ!sh?t2CuWw{#AE0 z{csd%E13II$4IXFxRROq#pr1evO36dymj#niaHIT;u}^x{2ou4TSYS$=BG~ zNX(oas4k2qR`AdVpqkl?+s1rAkEshFpu`p2cJ08SUx=;KQcHQ$ZesdY;%B*Ss~?J(F{z=)q|cDb@1m{Vxmth;#&qR#&?T|sbS!Y_T$o+9Yw!g zr>>$NoJ6o_vc|X}p={@kyD*`>76WE}LDNg4AW@QolGjVHfAvY|X;1`ik1|wk>w?fN z`n6gfp@B4mbd*I3S(-Vo9WuU{h%RQE;Ywr-nAWak_Oo;#wD&diZteh$<3)97mtxd> z?u9m~yO~SMGq65?0)rDyfM3&L@J%|6>wl#KZkh_=iH72 z&tb~%jHR6Sp6)#S-f6J^cLpZ(mt(K%{vesA;%f>^zJmbeJIPN+*GLDmv=hHWIShAF-8;=188*gs8wjSkYb0B>o z;80=%Wi~xgTfY4dCFiXjJL(Rgnba8#TEnoRmbw6oma_=EQ?O#?2MAJCu%P4Ps2l5x zTHT|cjAYrL3cz~GO_U9vj2P{0OY13LXIz1D- zEC;~6Of8||X$WTeO_OWb4rX;chWa>su&C2cNK-$6z=%$md+Z~a8XIB8;*((DatS?8 zB=epZ(l9+`4VEp>hr*N-wEzCee8vW{uq9I<1s8+VW)YZ${|;?k<1pW6Hk3}4V#nq^ zsQ1-aG`;YGt!Vj)Rt<}ZJxN~R1MeWNJG7sk4oc~KmOB0waiO}1zSCy|tnQDlefp3G?lzWL zBtg69SIF4^7`^gSL4S8G7WVE1r8^!$bOz<#Cmlr9v#;p(qC%c=_7pS>QXzy{f$GH; ze(K*ikPV=nmO%nn*XawDvom1AByI6%&KJ;Za8=KAFcN+r(oOKVRZiU!yTR<(JS;d8 z11YPgLwe>bh~54jWnHe?=O0>)?HR3@JEIu_6HcOsIb0HkBkE=?kMX~nSkC1Ds* zZw`%-VF=Go!6`9m4?YbABpfUwRI4c^iJ zEURvnm;U*J1&LQc-Bp7jbq;v>JY{B_i$Uh4%QY_sGFkryrbwT`msXRHX90aSmknhu zOBO@%) zC|O%>7&;?t)h;_&0<&-MqJp-UC_I%0pk2tgLlq0)cmdH zlRI8w-W6RjFZ?VNc3FW>&OC+c|BQvu(i_nAokU0ivG&J^ z+6^c<@SfLIe1iV_dqI3pV{uWKw%|NCmL2{35EkauqV!}l_-wj{i`-SzLw}4NSx8*5 zmNH&9EP$T*8)2Pp6zcW82>GKvf$w@>Z1D4gy1Q$!I{zdltg*qM*C|Yz)WKx%MP1YD z2>aYSi+tCIu~Z&H{^;Fmo6PAD_OOr_&bosUyrnP(8-}MV*98uS`T&TT5s& z$b#5S)O{4Q0P}zTLW7f($txcQKJVtir1@Fozr6%DZ$C4a;{TW=c0IRYx49E1*5&W| zbY79~TUWK9gPgL*3CMhP-_hs93gy2V81`{3E?U?oU~u7bP@XQQm39b+yeV@=K1c$O(5591ayPJByj7Cz<(X zsW8g14r}H{gCbxMG)^EtSavz@xxqvXso0OzWd@?FawzC8&=$s+(0y=oC!v+Ti$g5v z{(j*y*ZdI$+POv0e_=8XQNrp%^^@S{uc~6Nga?wZ-7O^b;w^l4|4vJ z5a0G74*tM3Dc?iXm7tR*NGzs-8}@g&$iCvN@d`CPK&UzQ{t z1Q|2QpB+W}*}m31Vv{2@otOm5rz2V6zrC5}*;O8Wz8pGw=!?@;#~~=8gu9M4hD(CB zDElK)?fuFfy|g3I*(d>QB8vI!1H_Z94P>^zRiLx8p`htRedQa!GgU}CSAKVAR{l3Y zv+WCnAEo)^iIVB4qR^{7c3{QW(cx4!a<2qb<0v`ihE!YeCaLTOQL#fyX8r3!}DE zKJDbcV1Mg6xPPTS;UV>)oSOogbA4G-_H$-+#~*iQO2ke3uHzHc9tiO-!}Mv}AvNe7 zc;-~1PR~7<^sNm-E-t{7hY8sJAqiS~kXA`vXvwbO=;7lDt4;L;#hFrd+JV!Qy*Ue~ zinPV0+4a!YyA^%b900EthNkZpVZMQYrPDfCVb?V@BVTmXglA%=^8t)rI1FAb*20$2 z6H)2jO>NZJK%KbdU|>ru0<(2|cEB4bW!IR6eGp^{e`m?k^u-CL-!Nql?NvU_U`4^X zkY*DI^AA;GUF$C3^R&de#tC3sVGjdSqiBx&4NrA=8W?~1=2!D&q-|V4W@&u6fI0Tt- z$Xs$>FdRVpt>BwH;?y(h4_M98?mWb?BWTZkO$H&Pwao2*1PfZO!4ua^l&t$(U2{|) zO(*>Vi_;IVA+j6)bX`|iDw7CF>c^zFO=o_)rK0hLXk0z$6570_%(?kvVxNu&bf!CB zy&Byzo^ZFfA!v3o8AC2MV^gnjD8xy{#P_G6zy1(Zc3R3k@hf-k8wvqEpW%s#)Gh5} zNKBeIdBf}NEKDR`O;9*ikrp?5E^%5zW~qaMn;_s7aVV{ep(7(2JueU&N;tv8dZ*(2 zovpab%UBrlMn{Zr=IA=}I<7J7F3N&d$h|(Qp#1AANO?hbuFwvU{5ef+Dr)mgGg}^! z*IBUdR>+GE_JvWOAA#FHxvF(RhcMHr2xVCY;^=?72#fALMU$dxIC9uPD4OJh$|?7_ z=b3vv0Vl%iffCWBfH=N($!e zQ!12RPU5B?k8r0_;@Rt@W85a{ga24B^4=FtnW`|)<_C1k)Dzv!s4r)(4HT|l$W)tOqUrmiEca6yOih;xvKv**gOLV3 z`WY65Oh#ozHP+nEX3G4Te711}c;)8uqy65YU+5P!sD6nzFX;=B_v*pyQBUxAHxq4C z2hq0k5**nt2qg|Hd5b(B6aON{#pL;*S>6KD&hK#k4BFWmbQ5dowfbE05KP@>qMhb4 zTKKs_+-73wb?w80o!-H(w_0NAsT7*IQhCtxF)Z%57HD1H1FAt9?$EKEn2n3UC!_(l z{q-1wZoH-a<3~RHx`Fu7OJ5A#XDG}LY((2IDY)$K;P#r^kV5{f!ZAOI8IJ>R0e;du1SZR5VX??YO4JjNeCN1ooh>Nwham!$^7kjxyc8N3o>7j+f`c6gb zu`0{e_wga=Qa4m#|7L?`?l7=hSPpYL$H43-=fHnrJZjFbht`#P0`8#>hqw3Pad{xA({uD#iKzL5EA$=QEV7$x<)+A`t_>hhXjT)6}_D z4k_=#P(JYh`tJxrv(=+uQkoJZOR7}4%C}ha{61ye2SCjZIc{6{4rMR=i77n5@r9|O z_-_&Y{z0E8gK&_W4)kR42E>UmiU+;d2I6B2V&e??2u&7~zBcdTB>td7uq+MV^#tP!xU{hS-GS6EpG)Z8*u3 zE@^RZo#|j}7mTsNryw)L6eL-9u<3Omq!pAf`PY1C{1gNp_kvNPT+7`?=!>T2zjMu? zeULOe0efujD!N%;#YJ0g;C#y*Xe%Pm*^Q+T7}S#uUHl$o*+op@Oj^>neDI!G3SLhR zBfD)VCRnLaeK8Pw@f{HJi+tz-B5^1$Ipo|9!X(#JZquKBwwxy^&i@I=>$ec zKEawu6Y;jMR7jh(5cd3TB+OPF#^>F-i;d!8^cv8Q`;@%ri;|6rZ*B*5*4@yfFJ-B| z-#~f1h^-47NGs@#^|4Vn;j$8?!#~2@dr@=--s0^`W3m6rE|8mlgJ;;ML+0gM%;<|A z*pHY7vhzjipxpm)r7oiVj2>)KP##o!brIWNNpK?Ryl+YbGaG{-ydSh z%b!C+&|0XEp&8tJKke&i7d7%PdD!ml#496T$b~Hs@^B$~Z9T=#H@<^3z1`g6$Q_>B zvxDm_&I8k}Yan-LHuQW|L0QzBuzfD|CmuVG8GnU=Z)+d$7*TLwMY!_l7+Sd+VN*Ko>;AH)yGAn$n)8KO{I`#M zg(IQ#a5}hsD`xYW^aRiTGr{^%Ct(9+logJ#%K{*X^JZ-oo4c*Yjnj-*ODx|^cPU~J_YTyrt?Lw z&%)BiM_}bfx%|DvS$2;Fo$z}gwL1%oqRSz7$#=FyYY1Gw+*RnWG7^io9RfE#o3ff? zQB(Mv+zWPKq2DKXzOSp0nP4I&EqKjd(VbfFU#X~4eg&JHH{hcGiBHtg6W7wYV`{hu z$y`bmK>IP&HV-zgqxt5Of+s$Uqin?nVh{&m&9w)(Znw4&UD*#bTc0zpJ!5#`p_$mw z{SEU?uYe_!Ph<0?pP-4=5!;vgp!3>(ATgbWZe{n;|2gIUOIsoC+gr-v_a^_ip&%nZ zhpBHjDC($2ogFH4O`VVH>oQOoTmrUF58$?`)Ft2e0Cet$VeG|gV71&({7TPG&%L>< z>Wd?|xlNY`qzs2dqg&t`RS(+NQc#(&k56dSK%8e4`?XqM@aug5f(0e0*L4zdpZ!Cg z?X3)7bcMpQ)m%*;c}ejuxjgM0Zo3i!Zf)0a_o1$0t6eu?c2gv#+_{dM@;{?(w;Z&2 zcYtf&#&fU9rYzwfTPV7t!MVLDSHE>D7EYZH={w9ZbLU=^HLqtfuLU4G;)(0yRgm}A zSnwfDDL1&5DHMrnTxSmRM!X}xbwiePS09Lc`xU&-CXm;?9u-Z$gEZzI)_hnC(n*m_ z=eh*79vkq;Avf^oj5wNSbiwyjEy%yrU}m>-Ow+$FtWGIHjnIvitoej`x1uoL{To)l z6=BeYE@HgPK8W~`htlQOF=~&txa(L~F*70!3QnbA;;ZK%`~91v6ZPU~28TF2Cl+-; zu`lGN(B5hCY`UXAksG9oU|Zh~!#yNIh{F%8?|m6|)+In|{buyvbr`J5mqVS22l$o$ z0dDhG$cPtbW9r|zUwhOGDb@bB0qy|rBq07c?3T2C)QqzLRXu1D117Q zr+J^_h1I#Rv&UOleUr{@$FJ(td5Bc zYjGJ%bziceP0hrTCY`7w8E51Y=VGcOYqzU|T9ZWZTXYwrElt5U=rr!74DD38R4Dp5 z155YhbLDFCke)4LN&gIn%uk5v3k;xU?QQNh{&JQg<2d_u$5_a3SPmT}=OISZfxh3g zq3>l5b$(;f!f6~fIGA!bwvpvF-Q$Y(KbV!B8SMT^9m8nMY~PZn>xi~6clZtPeU?vs z9v$ei*#Yisqx|;!z09<&mM2**hX~bk@OTl2pPv_Cz?(=gXdvdL$#uS^p$#+#WPNyFEM*eSu-DVTN*y`QPl*i=*$j8?B6pF{n`yRdm}Fx`3g zaJ|RjR2{w?f*&SfT#y}j7hi+0yj>Xf+)$YN=WaA#a2dn3>oI38-MfvZ;f&LtF--oP zjp~kgl)NYTwq>At{#ia`J*ArVer~!>cbi1b*tneO6?l9TIEZ>^3(u zmV)wy53l*5D_EJVM2*!EM~gcdcz=HvL6$XLo~$wv!Y*wgUTO$x2K`cZyFxk3DgBwk z$(`-H(1w;%{sZZUgU~Q%J8D~=f}pQ1n5#qDx~dD5RY--@E)P&WGZu3`65q;cK56cs zz$j?fYk0;Ow-O%z1h+)_Qk?Z{l# zbKY6%FdoMe1KyMN`vKF#&Cq-6b#Qq%gFLO%up{*zxXlUx$-{B%(fTf8gzjqexb*_> zZ%n|QH}63HSei@B10d3*8EcyU!lNhhu!?d5b4d%mw?a>}n$TGc?{pRH?%e`uu{o6g z<&O%}i`_Y{0@R_I3ZrQ>$ko6@0fLG~kG@HTkRq#11|4fYK`!irgi$v7C2*j|Y z5L9Fuf{k-Ci=V3{Zd)#*oyA*7YdnbBN2y==*k$U7ItDbSu(7qq!lsQam~)xD6Xna$ z=Fl;wrPrHj+AZ+GV0|Hc$xE>R{TWyC%0jxBJ8#Zd-n-W)9<+$S3YqU0D#*|F}EiQ|+#7)$LJ;^jaSaYjWN z=wH8LNX zjP53sdd6~%Rs{7r$8s&-^YWDLO3-V}2CZ>X@{yh6ph?nIOenEORn}HkBK(4$+uET0 z@9EgVeqql=G2|WW1E*gZ3At|zx#j+`__obZyu3jw1_x(=t9>~He@TY4-7~RNoAwtY zPk?pbYAhr#eoJ8*mzW30V+LuUXx~;yKe`EJ{+6)1>N|cAI*Wd*>73Cb_HV;a2zxaJ z%OXO-d*TB~9YlZk`}3eSdx^p3#aK7r2V~9naGX{*;l9&dETMa1`x$Ta)Q{m2w{*mo zU=y+Z9OYI%{zTJmmfWw?F3SQ%?q$Ay;Tu$3 z*nxTdj70lEZcGs>r`_FUCYv*dFP;4w(gxQv(iNEYy*Jd^1=t|5CFaN%=DtB9^zW@J z{^M&X==gKA^_mDRt;67RKM}{Py9hq_Q_#3UPe_{{4(XG&1NJUNX>m6Beyh>+au%;C zr_WH)ZWc1-4rYcA<_iQvG1B5X&ebtTH^)!vr2O~XJN*U({!9MrjafXc^(8N=C5}t( zZ8q(nF2X<_f|Kd@yNnu%L;gGo(z!(-^VP_Gw!dSa6?stb?ha<(yp9dtS24fZ3&RtR zV!|R3`>!~Q8r!27l4}5cSM7$_#q`|A^LUTgZc0NPT%D>b2Ix?C-c|D2MYTc9{Z{gN z8$s|LVi+d;1?_1Yz=^zMMK;Y?H+&b)IVlmxBzF-5rguT9G!z;f|HH(A!H}@!Czf^4 zS(Z1G4_vStdX4!999sY3FCXZO4x6^~q;->t%QXml+BBg4xL2sR*;w>STg)3&A=JOs zA7qa`A*kpQEY-;YBg?&LWo|5{cppX?UHD~Hrrf9WC8RlTL+9dnuC$59B{{@i%j(E3vw%38P1tMI5A-YbfYl9A@c4$F@ZayoqSci@VM*n2D6o49NwZ#pOg)Ml z*(PE6qh+Z1P=;P(Mq=yAFF@x$-!_Btb}2C|{TCHwrMW}<`gxH2GaYlyM?fmgl>WWk zKvVk|H1~Vpig;qf^x1_C$@93TmmM}O_=TP3brJ7BYXz-W$1$Td4(CrvLC=AIgXz0_ zkd$?h$9fxzY8D0-#wYpDeY(Pj00TkdJ_}BdGN8Hc5?W0$1Ecu4Xzrv(d$Ax;I99;C zbs8`=*numiS3%Ol&mhyk$QG?_#eM#yFE^w?(*PfY^R;lS+DK^GVIV3SMS0KxdmefC z1A5Ls%>4FRfliU3Sor1%dc2*C$$=$Ms7+qEq&C>N%RqD+Kg>~v5@Z z5gv7-{mY<_{I~rQ@l=@#lzrME*tQ7bicQh%R0y2yED@8cTe!oz&0yw#5lrdcP!sx` zS2SJ&Z*y;u#QV$TdyT{u+uQKfDpPo|&{#|u9DpI?9|M+rg$CWHJfY$S7Mx1P!d=u+ zDx1bauGnB-i?g7)NE&SK&f>XJ1L5dV+6kAML2MxPk;Ly}^3I8n9p9?*JdLzmuIrReV+knQc*hB8$X|UigE7Fuw`Ez#+^t7#m>om z!uBF`HK>Hb6DG`JiXKG#Jc8=hB#a_2o}!6f1ZW!yuQylD0*a$n)NYimr2hkd%hg47OaG@WeGTk zx)DozzU8xZ_Mpwv9>o9ilsgnlgv?0~)P=j~y`=1xv_6Nej=TnLSC6YdEu+5Bh3oO_ zv`%7>i92_AU?OU)2CAzrIN>6{T(J6c02G>UVzE8zFlB-l?B1#^rp@Uj##X#QiM2Dm z*MCF6u>LUW*C(i$col0-Jj0NG-$3!QSZwdN9Ios-iCSCFaOJPJa!twrdEK^;D9g@~ zYd%#li#am#@rY=@I+JM!q+(&*7TzwU-0PgnkXF^swgsh<2HgUU;lbE)Is+vmpUX{8 z>+p!1fe@y&gqqd)l*wNSrEi;9>E2{iuIPovdyhcwbsy;HbPYR35G$x=IZ8rasqNFU zxsJFFgPy3Etz-gmm|uZsuNVZ`4_K;@iqTuIV8h7!tabl)T=H);D(79`$>zF3v9_KV zm)U}}MdkUY!+|MO^+qzo4m;30iGFf^pSVeA13mP}h-1?q(J*IrtG;<|6tx?ISNA zF$C_K2%fPz!X&E`kpHwhD(ku0`>6+Jz9y#5b+x*DTrFmwmx?lpM6DFh%CT(@YRtET z>Q0L~xZo>bl_e%w$6%bfE$)2#17%B9tn%x7VpCGjp`jO)Y6hbG)_ydzHWKGfj;Cib zv2Q*ov3<^F40U>dt9c4m7TqOQ#!#-h(7~(*X5)~%H*mR^iQr{n!`-THaGPPpe6B|X z?xZ=nuF4*EJ*ABI(t3GPUT2QqzJYS%d!FC#GbP@3tjbr6`$!mH1Y$<9YTpcYS0Rkqzfh5{n)Yt9B21N`m(v5_? zDJ|F#W`Jq5JMsIbD|lFXLh@?rr2dV3G4pD1x4nsA+3<$4d>Onbn6l7a^dZs0KrCWi zQ74T0ol17HKc^x(p%`!= zn|yM5V)CJK^qKdWtFAUU8r9|lerblxv0BV8fjls0+|)Wo0ho3+kZX}1qFiJz*D;TQ zk-Iw!6z7AajoIudIMP+9OInSkBU^C(57Ir$?(_JI z!@$C!3%blJ=8E_zh&Xi@m*zbMP0l)bW!oW)SojDm4BGjShFd^w(x3?24cgJQP$)e^ zOs#{ErdUBfP+R7jHc*Hpm#4G z-99=%u#1+kNF9hN?6Q- zkta8tL@(a}?q2x_Y?UKGp|Hl%oFnM8?JmlmrmDR>Oo>(b4nl~zt4QquW16}MTKlxK zs=viSZFvMXu zV1=of$)H0Xaz*fvtdaCtbg66R{U;8>E52fF&jM^sZbGZb4``b7k{M;} z#tEzd6(hgPGi`1#&r5HZ^2;+G9k&;YeD7jf-*RR!rMu`KKpj=f?3u~ZHeyl^hBVuo zAa$<>ZIfdl39sjh)WR8hydcRToYf-bBTtyWrl7{6H5LgJz8=m%SwaaUWvYlpf$UXU?(uH`J42Tg7EV zI%ChZCgSJvHpH6?p=MuKvG}8HrBRji^~3&C6oxJJwqp{0jPG>Zs2!P!dtdy@STv!77Ey8cz}yGu*d+*zAd-L8c44U5p#sXsvM+cj-@T3I$r>er2rT5=UNhDJDTW@lP_)?o3|;~3QRh5PS|VSbrr zX!^pJYk3A^(~>|eymb;S^bEl#Y%e&G576y~CzSSg!}LLe@kE%u=zKR8$~EsnPe~g7 zBSU67E|!N4>xnJSCYZUQg5%gD82lEY?m;xVZz9H(-XpdB30w5;5`YT74#z@m>O0LP z&c-!kFt4Wh@-%%9&!wTN%WKC>o+fv3Tnh?AeT?*fK;F_nsb8x*c_A91`cgCc&TqtU z@3-g`P|hX$GUPRWmCRT62nNmVCPYy8lxcsOuiqqtqI3@{3i^y*1AC%p5oN2FCaJwL zM&dp@LowNoSZOhjAY$k;i1^k4n!Zjv?7(2+MqTBtjhi7%@;BJb8-uZbUZwulBs9o4 z0n*tw(Pn!%C~A(XGvAFs&AkS7=KCae}lt#4DEL)xuU8E+7okPhDj=zI95X9 z!RO!~oQJl3KB8uMGi9~EV2t}tu>6Darq=&)*-zTr20P;EuHA)}mp7T##wdCAcwIru zCXSDJArb2S?g0jOj$`E)T_Lx99rSx_B4|3c((Drq4*OQ|Oy^0g#vz+)X`kd*8V*5! zhpsrDW<0;0y`YVer{;$Z1Q|_*nhl4+^5I*kOK}3Xns9aW*Uspw_Yx*`$%Vi<;iQ36 z_tE@?u|s36}4RDP{&5AinA9GuO9;M*9fV80~?gC^gAA-}GJP8g+Zoawxf=Y1`@I#P!}`xZVbwU%yd_| zh0i1C*-Uf(wk*28KL&Xv$Jhl$7&DRhJ3tl{{BPrvu6lx1h&#qrz2xg> z-a)tSsp|VNr@?ZPEu@WoLp%iuJsYgK)qvh;lUL7|cf`TYF-OpRa~f3F1!B8$Cbl24 z!OUk7j=G6DqUD<=^5N&J4+a>E-XA}Lm(LUCG+hampWZ^vnx`x$ERZD{7>Kgp&dKu) zI>V%b#C;RmN-3_q{j#Zl@*St2cQ6=Si&nPz-VE2sls7 zB>MrwDQ806dE=L${p5a3f9M-XX_bmsBKAY2djZ%P>xdGQ4j!s~61QE^dMvVe$ z{hPv09e)qeoH9{oc5uxtANh*K#OZY2N!d5Rb(ap~*=))bkl#~Mc$v!vkAsLlVz`oY&q=e0~UF&AaQ3B z29A*Mnv`WIU6RNZ2GR1I`%2ziM(@d(CN5i%td@Rp!Ukyt3-Nppu{swaE%z+u{&2_G zl~Ld|Vm~Y82O(K2614P`#H09^o$O>NYGze4ub%$srjw;s&~uR}t+@#vbjQdYYmbGMU%_}w3_MKN7Q9YTcIf>bOj6CGjy>w7GKxe~ zr_oUG;Sx@wT(Mtp(ec*^=G*4zEh!!#8q ztv>*od;gI?;ukaeuLS0MUjvWf)ZsNY3fwxmsNIfuI)t_#fYPp$&~<$mx@F1!d0 z3wp!kkau8CzKFQ1M|i`MVhq?^h&fY^L30UZF@7J2HKB`P{*xxmAN3d`qkmxQn(s_D z_Aw7MO=9(bm4jQ_@7VL^1yEGp1-n1!&bQ<|+tPIh*34Q4+Qe+poKvea-xP4?b$hrG z<*=;J>59`xn@ievn@QLctax4r#m7&BEwLYbyl6(hc>@}DM)P^!$@d-j8VBe~#VwcK z@d=;m(af_L{8nWGg#U(Ld(gRUx{#N8{DTdfC!^81-H`PAYVc_$O`udFzWZq+nzdhs zIP-Ms3N2>-ic)YpcbU0uvBILcCSt&hq4>y+eE&ax^4zP7asK2QlpX$?+kc!0X%E+f zufY^>3w4FbcUsXVDh<3=PGk<3H{!hoT`7-MiT}LoB$yU9v0T|V-m7vuB<_=nVCYDn zEq^F9?k=hp{gl5Lp-nx$=IHB{gy%CUb9p3#$v1vL>*O*RdiE6*UaMyf%RHcE(lyw! zeHA8Ie&AlSsDtZ*2!293lnuKIK0Q1kcP8mwSF7Y6+vh;d+}*4ta2P!Kc@UC@$~d3=SHkk=0d|&eP-~8_9~_aa6(iz z^nYVaUG6PtQ~QglZW{<;F%m5NZ!b$4k;?)SKjWqkXK zzrxG4QZd(b10+qP+`wdK9QFMwCKo2b(kuFcq@-4^{F=mN0`WaYr%}(9U6$`)Z%E2F z&QpPL+oPX+3i{V^K-roBb?*fdO9 zV#RXDmT*71i-x4?3YyAW>ejqA>gFCze#u{8-*OG}=H_DDh&~YaUF1&D)Yn`5P%a~# ztLVXXD4p&IikR6fZg?o>bdG`-^9=-FT`6cfcNT4A)V*%}6P3+J<@4@t#B;@61XZUt z=J&}Jd`3LS?SJcvUVDO=>QbXx_TCWco#Oyj>fyWSPaypjiSdzAv?T_*Ll+-#du&S# zyDWL(<}3VifJBJ9P2ZD?2YFKaBBq?xmZdo|9qs(iqcXgecFp7E-s@bT=E@M_viPCP z`SFnKaT?M~GeO&p7+3z%hvE2eH#;gBR^MDEcOCt2m->VtYh%I8MNu{yzAHxC8V9yeIoSv}AbvcPLa z84HP~zObnq!9{4IewqZR8!`dh_V0x1*jEtvPzNLzs#uve^%hIAK-KrwdD=g$j`EU!y1UCxs2xYlelzO7L3aO1O;pKge_ZFlGnkN*v*Bwsg`&q zX4fb~eNFB;)Dq>#o`b^XF6qAq>w_7q4T z=7g1B&w|&WHts=2(Y#*7if@>SqY@hF+2cxQ(q^1JtQc0@pp5Nra<#tMYiy|0p!N4~ ztQl^FNvDmti>euEi;v!8M`B84Gtznlu8fTU%a5(pN0`GRQr<&Qatkh+bO;0X{smW8 zmOxonAWTZW4C#?4&@1+LKL79|*tt^$FuI`FqWxIvJF%HJ{z${s%j+-_exa&q37AxN zClcn*Ctc7o288b}VO_u>M7?tk4Ecb1z9{ilRu?N{o?ol0NpXiwOnFcG#r z(-9*!ePR~-UbFo_c0hXbd+gZ$5G;;7XV)}mQDJow6??VBhm$C4ty3v2?NJ6^k%M4~ zj|S{N(!O(CEk8d~M=Z9o1n+qjXtF;M;|k{zgLf9T?iNw&vO?*3DxcZ?sDkj<`?-a& zg0~Jm&y!9!qV4(5QiFe~KltGR?qE+`dmAZV5%mho>qu)GlL#&i#^NlkLTp@m7K8qF zz)1B~>Pealo=wXkbKYPG7yiPad-WK$tD3sB!?Ao(5e6GQ;BBPO4mL1~F=YFBXpHDDvke^g;XU4xvR^jUn3RvVd0A2t04K{av&x&OiA=*KSt(E26 z=%0g-YWEHm>U?FHw3%kn9ejf0X@C=HXrFl-q%ohc@p%GX`%?i$fjO|aJ{UX$zkt^u zU9>%J&ph^Yg`Qs*qrBo9s=LMTtl841zPzuv=@T)SK1lh)5fPA|8U$%MG&eqc3`PNcVRPOWOcL9ewd+r&INzP`RTC%B zdOtQTeFHw4ClIvj8!ov18yYD4>(lE`^ymL$=F=NN^~XJIt(uEQ{~o~Vz;FS>3(hd>4tB*>5z)pZo(X$HhYPQyt;u>{RssVJxQfdR>1 zXIX(=A*f~r939G&Xr|)1@FXG^Qg|2vn_>x+Vy_o#eLU>tkB808Ji}uq@ zL`@e%K|P4_PdXQanVw_i@2qx+hFZ(WcuIv>n(EWywl7 z&ncc<=!c5kms#SvA0S~;J}mPnn|g*^{77Nt5<+iO)glzlwsCV>h=9q0@JS5 zp}dFLJa?@U_RZtLEdWx?vCsLyYcrh zwBiuNpD_}Wy8VMrb=NS>`zb`5c7P(PT^Y!&L2g(nwXha3x8yM>e19udic{3Ln@xv#4-0$kLDH4aqCPH~18bI}wXr^D>YvKWvUjizpa?evB}#t0HGc3d|X zO{Uco8*mQF`;5i7wc)U&%uI}kc)$&u_rv$HH*h+ZGOp{pOKajDL1gJ^Og|V3QIsc+ zn>`Ej*5xtVy*r?%Pd#LPIm@*EIt6Fm>Whxk(s@Lf6BtQbG0+o0egLRLPFryF3xh?u zokYbcO3`i(4T#!m96OvR+g0&KcK9`An* z#Gu|i(8*UD(##K0?q)is+)e=R!`YyDxQK-=2*!K0lqGodjX1}r*{qr~#KAiV5?6YA zRbJ!2+;zqAcO`=S^KRmrZO6k$m5}kNlKRDkJQX{1Orqx+4yClyyIZ3RkoJ)w?0`T)ni4gaK_B!UN zaAIT{INkLkw!e|kcJw!x4W-|~mE_g1&EP8{w8aL;E<%jfQ7Ce|!D=jYXx4DRY0Z>j zIyD0G-QMDeU0Om}*k$@VSK={g7a{2-c^|tNh>lZYFlj~^OF4cI{Of`-`oCC+);a>~ z4yK|tc~;}iX2IBc>Y4i*0rAd>#5$Y_HHX7dHg7izeb)k27pQ~#TLZTo>`iaIDDGXJ zf?Ampo;53lxB4Vt{!}?AdWSIo9$~!p>rbBIP2PtM-;^?$uF^Xz4>kUmSo;Kt5XgU_ z^|_;v^>Ye%)?6d6t~>NJB<`lwBGx#Bm|I=WxjB%>J*UJT+yD3*tq-L^iX@XVp!zJ; zY!gImBoFe1d~{5XVBr%F5bxcBH$3@)nym|1!tfu&bD0hn(MzyK^%S4PHetSAqub}O zRD38+gd>fng6rhJV#K}% zT>s)D_4+WlaWfn1rkwyg_fu#QQNgSCZb7S4H0x!DK)dx&G%F;p2K8Rps6WDzp@`FL z@}XfW<*|m(2mfMIu27l_R+iV{{E80{*tL^@-TiUNJq@(QaSVAFhvhzzZ1w!hV3Dig z>Mf@^P_9Bwnn6%kVt?JGY)&cVsIo-nbtVv7f(pRbDhd7ZBP&XNh!e9RN4obtP-4B$dk8i-RJ?28UWwd(_e+F?gydZD(N#4H75`F7i z!924BLT%n*T;xphct69r$4+3)BI@BFUOgNGh}iZ9CfpZs*S@Q0k$;EJYEQvLr)X@Q zGz?6>kl%h;21fTi#&Z;@uwX+jHjBg<&W}PH=P-=AUXGIej><>Zbp%DqNM1T57Ut=i zigN8}SYAx*e2*C{VtO&8cwC3Z#|J<RANOo9svcH?A}JM< zvbMsk!)fT=wUZcSwhql&V`0-k((V6!2)i9#W7(eDR{>8F3I~ouTKY2J(bD$H zgZ9B0xcg*1wvTJWylz~o9;eR&*8FC2(-SObTNPTap)7jrGv$yrBf+spAkOV^3EFoA zg6hW(^xvc}F0TKIx6C?=A#-cNHuWw`??-(5U!%b$jXE5Zeb}Z`=@{_NM6{eBkdBtk z{53V?lYfnoYZBo3;Vxoyn2Lo?r98*VY!>fMOb~8V5SV!uJ@Y~#dYc^V^vf_w{(~1e z{|4WJME*L}1YdfY3R&~0f8fnosq#@KBo)L!)Aoz7gfy_5E2#fOG!xq!R)PHoBhjb) z17=+zhX3eN$eI+xttUU@S-;${<@pO-KJF5z)4rqKV`5h05764%pG$ze*G~!{z2z4? zFf|kwuet{Il-;oYAo7Y5DZSVef>;dvy|)|1uQ7KLZzRQN!#%*MVx} zSk~V$1wCF-o<)3#zT@vQ-%n4NV)kD=NuA8()03sO#RqxQf;#-169Lwb!#U^@%Zfke zvO!&$lf4#N$dybptqoT*BLVHVL)6SJ;!9mUq4Z@E1|Ok(#0@7bo%lDtB;EEB?O7as zjqyrIXCYfl1Bs7rp-bf>UcUYo&wSbj?*13i!1gwHbUltHspRMBlnLdU#i+I77++5; z8mpI|uoxbI^D_e>@Ny-jYcIpiejPmWRwlx-W0*eM5F8f|M%h;_aTo<=8%rH zdvC+;Ze2vz6Q7yIjv};orq1cfnLKa9QQmOhNG#jXj!9kfxv#M=wl3X+r#I2Lux%xA zJsxAssN>+`bBbG>=?eaPw}4OFEL^Rs$5xjN>R#!_fqv$=1~c(vh_NtM|2H_K{6^2} ztIWLU8d&&WhNR|`%;L!xeyvU$T4vB0-mJ^ce~5yGZ-_V2W*`>t-iDq{w|Tk)bz5~I z=F84v)@QPb;E<6E*Y2h8q*=qzci&{l8R8DHB?=6;_XNK zfXklUDBqP-fRFNMUsldbLsd9;LJ_ow2B3ZJB}ffk2^K;=S1ggTb^52Eb@4rv-1f$- zPIl-zYb{y??_ipVk0I&GYp%9)=5hDq(7)j_XLserSv%Cx!g$;t{NMee0Yi2)} z7>b+kHSxOR@vtPk9p(21yEVJq#%U4H(8r4ATSp~V?d5cr=?WRMQt(F0BRF)`Tugpq zASPCvKslV2zMt@(_E5z6oYtMrvY$MtZ8`=&9)gO0?nz&cI|>R9ci0@(#v%rOVn#y} zx!>FjNZsF#ilk`;$Io>V;%P5?{^)0H!VeJBcnLJ##N=q&O_}~hkp4B9*?RxMoJdc_ zIsRby<`?|6=_BTMfA8i;dCug%hT>w%>b3TX;U$I7Vg1_o(E6Ahv3NozrfJN3raC)hOmivk^FYgbi#D(Bk(tx&e)CF4Cdn;{Q z)7bbCT}02dQ*hIRE7&0Ih2_P>*_PhNHkqj))z%YI3p+6L-az8xe1O)`(LB961Z}%D zK-89vm~=KA&xfBw)t~{~;_OGJIY~3AKlKfp-U8ce#12h6A$@(^P;CC%12$KiiR#yG z+o z;l@fOWEETl$N%5c%{dO6enkR^v+=}i9Sq))z}$bNVB~Rgp{>sjl$RST^MrKf)Lx6m z4wV1Q+YUF*wLm}G4@~_Mi;8ou+_wF;(xFQQ9@y6g{v$4;&HmxUsTWcD?Gl7+slZGd z3rxE6l4ZG=i;k-n5K}Z5A{{#k%@1bagX6>wwKEs(>|!t{X)R`1KV%Z~@6vX^k0|;1 z24x4-N`qhFXmg7^K#y7=^7UCr)YOBdX&MjC9S%1dE@Pz+aTzQvv81Du@FK)SxDlEP z{hCOh(RhNvlCxlY(+^BayPf}>$0k34h@KJSdcfO-YkTFiw|Tbl2> z?ZV#DBN&sJj>X?8r%wIG?IQW+ebvhNKh0p+v0bQYyajaG>;-uP;@r~b5o=@kO1v16j@BI|Y~yDgA$g2}&@gczY`CH= z-hOo$Rf==qkkLtys7j=H&vz<|E>`oyqv_5X_K4Z{lt9q9IheQq53rch6=Ue#=eOlE zI9|95a&M(FKkFw{dXfj}n~CtdP)9gi_Z+XJm1gXu=kz@3jcM)bDW4((~`kee%R!@dx_WF)o{$4WMSfK;6r zQ4stvhk5^FB19_;Fm+fH#_7jFVEuez=U6%6u6}$8t-WGl@r9Tzo>q@K)wQCX&*FVH-7tII7R(oc9 zEkoLlwO~!%xYy}^mk)Obm2x&y?`(0M6><)jVXLc1#lz331?aA)9;>#bXbFdn0xv?)@siL z-=sIZqx=_G9sW%j=e{^*wYKo}x*jpcU0H?$aR@$Wf#%dkC^t<5_lxbIt1=a@IVZ6x zTTBJ@-<7QNK?0`soB3V{E#U*8UB6Y{_v4CxVYl)UyI}3*|XJOQWomgdY z38M|ZapS87g3anI)cHe+6{V4&>Dj4;~RD^11a-)@6Rat|!Z0Mz@7x-Ql} zR3`3Fp>3^N+QB2BHj(mQ!_G6C*Z+dG-Y>4X?E*R5G+2`K0pl&QAUrajtKS$x=r&6n z;%F{>Pili)*F5Se4u#@M4)SgbVf|kfSeWq>T^y&e(D@zcT>Jn^{%FR=;a!FE-E;-2 zaED%UT`~Q0BSzItMMcFArgw;PJO9eTvCm&{4v8T$1&!+7AM~FOn1JB4_ zLZRO&%)MfCb%6#rURcXnz6F_y%Q5pB%x47r)_QtGSr7 zD+aQ3PN2byi?Ab;ycDM-Ldc&nVE2sJaigE$HB0)P4ci8u`^J&?*dDL^(h}5lv~zzo z90H1lp?{+rsx<-92p=hz53OXugQi1VOC(xE_duD$YL@b$7@Rs&erH57NGBKxo;#oL zCvrU@tvnL()LiOa$c2bbU${rL0oqTY4F9nR*gB(===0$mt{Bn;6=vRGv-ufhCf^60 z9sAJ5Fawn{PNPD)pUcOO1+R;wgN%BL9YI$he@7Uq-i_hvQeO;P`vXsUX^Fm-w|Ly; zzd`e#M64`np!+xgeJ9g>yok^V-)6St&(HWrTZgC`i)`q!lXn%_Q z1CxeOZ`5VxGb|M{FaJP^?{}7EcN4#NX=4|4;%JWUEQIwx3R@TH2)(tAfyKDhJn!;7 z-fm(8rPudiX2Lhje?a}7vtO{EqkE(2v%4)htTBj{akgC(0Nf3Hhg$%87c z)xS5`mapTP`{sa2KrwZr|7OvSa`1Gy2d|%+QkK1x`@5V1xo>AT#jE2y?BrL77~cfB z_lX&Qe-vce7zpi`cjFDI1~sc5Gozv)Xm7dz{<}({=)W3n+pG)AFX{^oQf;wk_H$GR zI$=N>F`hP&f92I4DE@jG1Dr|YaDKwCd7S}^t#`RRiN4R_b~y3fH}c_0goynPsCQlu zGwz>;+LLOeKT_yFy#Q3kGkJM0D~O(4jIMJi=k?_rOdL$!uo~jgyth~S7nHNcx+CDp zgTVdPRcL&42_|$q51A|NFiDxrB0U;FPil|q0O4hP}AcBru8@rS*3fRspAUtq&%GB^d`8(wM6xT z3re5q)Gb$7#=}nN3$kv1@!YIvyco9=zA5#DPXo0@m!GLTed0e@ZPJ1L4evnh#t>{R zDd8D8T4F*HbuanchonC%(PHUa^lQq7bt5R_6ZMu$3RZy3-kOK@D}%lzhfvGPUg~o3 zI_S+G1p#XZlP+e7n|HqDws+R^&y!B!sl}ayc5yB&cuQIlaggL&mnvn~zVU`I>NR(o+DE|XRv~Zc{vP{!o<_&?Lzp=&13j;; z0k0p&Q62pU<=4r#+#?&>+6vI(;&`+=L@f5(+G3$J8d{$`;C4rIDd6*zJd|Hy^_)1g zy*Y(P6Zg!@tE*6~N15I}S0Tu-7i&ZDg+=5&P~gr9T7s5N6b~>t&Q>S? zpu3AYvZJ*j*Zd@wXCKGaQ?7zZ@LF^!j>XLOS>Uo7Q1)&yM)@^kLTCrLSGqHuKw>gWNjvnOyrQeJD0X$9HNT>}lLT7sps>hYM z$x8#<1F6R)>JB*8eq!=U5A-xThvhSmFqwNHZ_6T{uIoRTXd)5L^m+%;QLn*)-sL4F zSExJesjJhGf4kl!lSZ#V0x$>>zT9~JBGgo%~5*3JzEW% z#{WWth#ly8yNY-JtU$?1PbT;73JczI@Hwx>x|3zNOLGQX43aTOQ3xu38Q+&~ApBcI zeV9!Wu6g~Id+ktSyJZcAchTZz={v#V`C{#J> z0*>urb(gCk_-9{CTYemR-g*O;QKzUcR1ai*U6kpv<@o;^i{)?X{1f#Bcl$jUIAA|$ zPRs}Amiw4k@fa;;4=0aD4VnxZff33P5M)&VPYR+jfF*PFvjiMgm;}*#L_Xnvd(m^r zYMgDe1*oGG+p7;&7JXFlLdhMYkEdkMVsqXSgk-j7w2l0nfmk>#J+2$5|H zv>BfZV;0co?07?(=y4Y+jE~^#D~rI@cp238xzAQO)uAj&S9#v)I)+hZRX!@5tIg7Q zoy9?H+IBF`Z~XVQ znOHLL9cZ#N%=+XnMv(1jj3RiVkm)gZaL4x_$m3*MbF;ErG_ zG~J^Pkz=G87mlDEi8(C)cPFe~kd88IC%%g^K#iscQBf1a8s=TWfQ4g0-lv@Bvo~BZ zVH-=IXvgC74?yjGD=g8|5r;>9fi&HEth&Qt^SxCVoKeZ6h75;}Gr4G#1T-H`0c(M> zGY&ghSmZaT?6VqswbN&8`;q0)8^_x!8zddISlDt9=F{J_N%zlaamGS3?QnWdjS%tpi*3em%!L7s&cU-In(XcZ1%zTZB<#liZb zUttW&m$bUck9+cjuZF@?cP-(t{uTJ|0_j1~OCdTkmP@rM2U(;-y^C|u_-!U+*$l-- z8&3Ln0{00u6e31;faZ86!#Adoa;_CrgL|RHT0i0gs!{f-8gjmQpk?|aRIYr6Dbadj zdx;*{tH+QB=9%>S_%~?1u?Z|TxIx*Vdze+*S+v!$qmC>MxZWT(?%=Bsb*L9ES|kxX z>(-%MJgt;H%u}B4R0~Pgtq|3I0>UMy`0KQjs9t+qYLlBq{goe>jzHN*<_rPazgSqu z2M9iRl%KdoeSD_bXmP`cPv{elR@1r&7J=`%denHg%vfX5|D84DSV|B-ZiNXaN!PMA zM<>4laNG7C1T=r=x^i>D?%P#Z(xVxh8-ii9v7Qhygx>R!TUha)|MwS?Hocu@#_!ib zK3S!V8A}Y!3z?Kpb5QDZ`v6v5s!*-F4+ARAgeawn5SV%%<|=9-6729_LWT)7FQ$Hl%YGQKKA8I~>O&s&Z!l~dX}_K0{y*~sdE3pI zCZGo~6ML}L?S+^IXF&bs25U9Hgw^-2Qm@P(dpEIJ4CJ~!5E@HLzT3kf?WEpd39`6qEn&)ba`0;{G zcGMTMru`sX!g)-y>OjARyFrmYn`!1RW>w$yM3cQa5cOOO^IOdY*Q~EhYg<2_=t>L~ z9n#T$tz!OBD{*YEEV|dOD>K^m;-fy=#I8Oqjb1^%gkgiYqeg-epGLBnC1)|wJ0Hu| zlta+)NR*L|xmdda(+xU-?B;T*7kh}2H}RYc;SCwpD~4Y%KzfD)1PqSTn%ha`pMk=^~IDl>HwM4 zkGdxBEXx>jH0V7#)J$Wgo#QU>XF>A4)u%87SYGEKM*D!1Q^Y1XaK?F1KC7 zR(pKM_vBX$Xz&4@FK-}Vf-boD8M3#-B%<||L%2TvB9g@(jXL*%JImfeaQc3xTcs`b zawks??bVgxbpG$_2J6I5qBQz5G=7bR?CUQebm?bUpF+E~(jLs`Cixl{9Z@zrZiI@k zBj7&y7Fz%Q2FpJiiz{}S3K83$V6AByuaz%^(4o$>*NDKZ*KwE?7th8%G7};`&43!e z3{(Vsq>uW6c40S(rI_!!%#Q3d5Ie3E^W=+!@dPkR#nmp;VhnORqmD6{!xr4CTR<`!!mhh4Rz$~k7Uk6ZeqhneKEp# zJ}f$ANxjz3(Z;U?!XN7i|2-rxk8CVVu)PK$XCI=!aTH|L9#$5%crz6nz)hMO+ z$m6g$_!0Wv6Tr!<3hhZJRz*>c{w7lv=Iq7%+3%o8&LAExq1xxITkyX+pzfDj(AF4% z&XgBcEnE!wTfP)nJ{y8X)pKlF{U>MAVzL5A>|7!Dx+|ZT{m2+V?buO{43;cFQ~0yroI(d(lts z)UzHG6B@AhkX>LiQUu$r^Q1FQcM*FJIzt?ptqCOhn5-&vbD|3o3$aNYvKQjbcEfldg7r!hyihcd!QkC7jB*yNVyTPf#ILm`|ny5rBWhDY}??RNu&|LlXZj2_H0y994chz74d zHlR2k>sECv2BVfMq3+)l)T8}lQQJ8tb=mu5!t zd<(|h!q5lR5H%2R)JHQxopnpu(WMeuSA8LG>wo#J{m-HQ=9Bz-KYhWbB^ySW5of2C zE7vLagUbNQ!u4#!>_1JItE*<$_cD06eqe9S)}VyyWi+7SOU9a|&NTm$N@4J<%%R>tM;Un^gpPQleP5}?)KJ{D(il&|>858G4ja0BgtrakAGs$7^h z>OFB8=DEqOQ0b&v2tBnsiL)uEF4I2A;&09ZsZ38)ZKXHn@Pn9I^B!l#8sXu~FTnad zWr;_Wx_U_{_p5n>eqVk;WUu46gnvWZO?oUq>nhjV9|k@@mx594HExi138fxqKyOnE zb{ss2hp)7v)*mO7mYO{%zw$+Dok1K!x2q`aYbq#r72x~W4;VYrSX4zTxkKW6kc52T z>OF2){B~UHbB^&`%sr#40)~nq(*gbShEAo{WEVs(7_(4<+B6I-5&EgV$ORl z(Sf-WIiUG2jBYAqC)UP+c?|6+iCrEeq(JWjNhnK|vj)v`$^@^c-t+NjHi`Hq_2VIE zwHfq|dx*xv3ed506)$UOCcjvfG;hf>_M5m&PFpshTsMem+%Mp)7q=dZ9ddzluG~7F2EM{su zz~?`SUB3AX`YlL?z1y`!+c}Gr{f3f;6deI3w5w76^AZ=eQFe2tjQiAWqyeoBjpon0_?!c=TGWgGFiWtWDKP4A`j!6@?yP5(3$Lx1{_Z)3l-^v*U+f5}-*&lXs&>?FFp=#HuX ztU`}FUC?#WO7P#{%}Si>;6|^D;Q6o?7F)PsbjB(NH}aJNLt3BDnttMRSrBwrN+i%zlpQE7Z>AZe3D!NS||Tt3WG{z)W7#Izb%Bv ze~70Hy1=Zh{6TM5Cqee8zQ7=uI8gr$t47L0o1Vt31nh(UeZaYvh-AHO-xeFCzX7DGQZlK*K6|^+O<2C7S zp0)7+@q=lndH)?NBEC-Gl|7hbd6bt1cA(0=nsg=3xpy9Wd*rBe0lDRQZ4)0lYa}lUgT3GxTI^*ADD1DDj2bqyEG4&I{vaA?V z9p}KP1oEcKOhD~*UYa-Iw6tYT7C3E*M&FI!LB0Q>^7H#0c*2G%*xk(N;EDuCbkG*HO4fN{bv@YARe&%dOdWsid3k5`yW zS3NN_V=ACjSJcprXE7*}r|WdZ*6=6jHTfRQYUxM2cQxz4M%eVVgf#UZ%)($C^@8l+ zF4;rT=7j?+e(@P|I)6b0d2^bUd_l|23Rt}@7iSm!#?{@g;tnr@)>Q2$`E0RBhV%|tN|05ZboJ+WRVO6_Xlnw3Aiiw~916~_xiRpG~FgC3~`JDxD zExHT3jGu;C^9G@$=p2N6y9p0Re_t<+zh4pD$1qY*Z^}!Q%B#1CUEk84PpJW&{3vfwTmjT^u``= z%)P>|J86sZqA*tVyBsWL?87Y2oy@i8|vZNzpI-2Zl_}0smfaAvz}t4_vu{vWqjh)UpLU9sdFAEjL(xUafLTlc}J& zkicrro`c(@xn?Hs&jP|ghtl@oSc*+qTn(+$4PG*7aq}ME{VmaEz9G8aFzJp!8 zRG@k9##N&)GNZ>An4~F))s|fX==2tHjX#29nT9Qn3GleYtN(~u12||LGI7bxO6(J) zCsggK#LR;qvF>Fl)K2{cww;$^>)V6qbmkXC+@H&z9DWP`JTevnj#H-T88G#Z+q7%= z0P*oYn0D_Q%%UC{+q`kokfNXP@t3iPZY%M8L@FjW)_`KVu{7e!SdK@6F@IDrmsGa9 zxeU9+b9jG@4v66%+ljq3p_uD;(-Sg7D0>t%4b}I*Q7+U#)FkZ!*@IV1z9v_xwoXwl zd4Cd8?3%GXwg~I`DzVr%5ljNlplfgtZhELCmUsWkRfm^>|J`+{u#aTbU7DzCW)`qp z2Bfj|#gvE?wEkZfs~USBs{&}gBi>0VlDCenkXjp$N3WIy6Gyk9YKN&<>G=km z#$Cj=hF7re0nIt~3(;a@chn2F26^vzD4kw_>eboOta#e98L!Re!i{&`2id|&w6}PGl{-In?-c4!#{f$g-iU+YgdP^tRX$w*Hr)kcu;NFrG=)2$(}Zk}r>UQYt)*DZf<8?o9ZNep?U0BB?F0qq8vcRRAm=Sb#o( zYaqg9E315JCH)pKQI13ZE~k~gfV^(csKfOZ z`Ixmu^R@YyM6*qM^%V3L>cFY84&~HisLnl&_KUCMu59X!kF1xfPj<$N6dNegDQBBK z-m??aY0v0A06o0-@tnzWTpYcHw3B(nou#|cWTmog;sfySG8jxi7py<7ghE#ZvHy2t zE8(p0{P8b37oR zJ25pBKb5k9XAAO<^n`Lr5@yXV=EZl3^BZL@Cb?Ck)1VEw>Ky3@M>Mdg{S26x+=cW7 zoyF#&32;8V0e${k1ByU*?(;DQf>N!p-18at-PFqD^-|P04QKML-Ptr7^3`@513Ib# zRL#7`nziqMbMs568Mq(XcE;kd3N4{$TMPPpB0c5QR@N|L6ErStMbGV3+^;18jdmId zUg!V7YAIRK7cT>K`F@C-Y>Eedyn%Jb@mS}u3wFXHcn3?`kRy}(P{g0)R ze?tlW2R@@!LJ>};JSgl=M$5nh;Fzu8|KBrs(7{;r8R`T9oo>RRG9$5;dNYIP8iM3d zi8SO=H9Gzdg1q;;r77bIXqq;Kq5~h<2q*IO8}tRExn8Js<^Z;?-^c#HC#GXvHSYc; z5v8FwA0n#$?B=s%9+n3Dz`WI|EUEhf=-3rO z?X&h!9W@Kex9N)I(O4nLk818)Ma;E+SRFqO5X*(ld+hulRBnwS)SBL;8c( z`Yvef`vzuhBc|Ch(%#080VB<9sFJim0-cwZiCsZG<|6a%e+~4iHh}HrL)iVx0bJ7k zD7r?z<>~7~!P4v;+74e{5NV^p`+u7YMi1_=h|=54%HljI;s!9eRkX4^`w!HY>WPU5 zV$kDlXR&pBH2Te^ymoWETlE*3laGwxp@DnwuSLXuyrkwSp`8VV$wcNHcmShiFZd|i zuAtz+M{q>9mq<0ace$qX)?wvAhSZzZA0!cEXYWWG*7U}d;q_1?Q!_97u0rsV z9Io^4Y3xXA0i4wr3*VC7esTb|4j+p}cQg6AsqxU(yBG=&W>SVpUko{T5(5AHfsJoI zVAA_i>X&_tssEb*);b~3P~8qTb6%k1+6z2=_bzO{ybl#k=g?D8gw1~`(Kcrs=IMIU zJpT*ioBm}I$E8qQK{-2BBcz`g%|c^M@NiBVWPbULPHDLyA9q|jUsA?)ih)pN z(+-lyADP{x&O%E2PH=T?XLV<@(Epkd%IBrJ2JYL5twja=b-jVuUHAcENm|0~W!c!! zv=1_Kro!SbQuKUw1yU|X;@l(Kp>*PXi0b+VCycxW{#;;j5UxplPz&C z!A1R3;fCEF&?NmLe%o8NyGIK|{GP)$B^*cZ*%j1{Oiaswi&!{)Clp=CWbnxyqpuc2 z;_mGzTQ!F{|tl_)RN@5LB zrs*J9lSE;|fVIZ@fk`0Pc3E1nlnNWM<3*;46DAOkxpls$PWzv`jToTsIQyX)^ zy748NFL(vfCA1rPn!+{n|Hj0(NL`pZLeR}9ywRx{yxWVRBjPYx%zeuovo=F_r_-Rd zJCom@dIQXVc3>V1Vl^G+qDS&>c02z+OnG$;hiyCp4GSnkWA4V1n#jj#l*(`CnTpP> zC(xw#f9Si;fo4k|Zr<}6NKU)+P;UjsU&%#l4+BAc>IGY{{50lHZ-JtNqse%Xi57MD zVArjk(0$%VG#I*@JT!e-mj_+M;?6!e>zgHNe1}0y)OYIjOhC(?J3)0$2WRb>2s!CK zfOBZZR40S$iv}Jscq2=6rr!5w9@PC*4<^2UVpDi2{yg4AG`{-?bE6MpVEh7jRb?!s z?j_E8_(SNv=pNX*CrMMfo&k@cx`JxeY~K2i^eD$T%xx~msc9ynUo*|*FTaA1`D0KI zAPz>?HJB?Y0@<2RJbKbVH0XK@*5{XDq~mS44as11&=2;@A~7_364sxcN*i zs+^@@HG3a?U*b<)C{wwtwGYG{+J&y~*0St5pV0dKckoXY`0A+_F{0BQ>{s7KG}`)& zw{F|VeYZYAqp_#?oyV_H?)IGry)ppj5qEI=B5l!f`fkk0BqmtRTJZl-hQ*B%&{`K$ zQ2X~vkU#3l;z#L2bnh1K6>thl^?YIYI2ATOSP3h7bzpb|?a_Txh=nbKYisv{-tK8= zwX*>|vQl{qc?NB3FJN!Y2T)(!pxipnT#SEXDtca`T~xnV2z(w7UbzzD_-VuL6Q3aX zeKh%2-huo{6hEUT&tuvjuxqCZbY|zEzt?X0>}ep#yYIJ3jKRMppKas z>|U~u`Y^A#S)Vn)^wdi%V5utkk-ZyB~5xuV6BH2OA1bVbg{V$~1HbqrVJ= z^5?IhtmP7%U#Bl9Hu-VIn}N!JqBNc~Wes&jEoSnE)Pu8XDE!{9EtWp7$L^fYnPGE5 zu_lF8_gzc#MHey1p$d}tP)Jx_i;8x+vc@w3&U@^@hm^;75*m-~nH=J#KR}pukThlT z&41;P)#`{9-KIiW=taosPh61+4{0V{0X2W$#Egt+4DB!j>9I!`{lCtT6kUl)F#|Ao zl|;y~?JPzlEkVbS31E~D#NwDneQs|t` z9EDZH@NXaf9I~vMxrJl|E4tIb>n3Dk^S`ISf9XO@7xyvEy-MzQMIzLM>WH%kFNFr3 zPN>Qm%A!6^#=sbc?SC!DNW)uj{`M6ZveZZz>z@cycNmLWT$f)X{h%c)7gO(gqGt0@ zws_S{3xX>8`mmkFNyN9{g#`XARkG635`)=U-420VK_Gmk3S%GZZUY4Gr z&0I1&LHd|N9yaF^s9O@aY;QWxnotI=<6BT&u$B0NcGB<&18k};1Z&6PSli7SjBJPx zy~P``Y%YEpU@97U7>dOst=TKnDwosZhW3X2aAal{|o zm&T%F(hB;XJ#pfA+>(6h?XP+0d;>A3hCH{jaq#DKb5V1{g**QF6*c;N z>-G_{B#|T!$r_%}gCtKvsYEKvNF*s4l4Pk=^S9+ib+eqcOhaJr}vV7VJ%SfM`jQy!6j?h;KN<;=2`N zy5BLDvGX+;-_O6lZ+Eq)EH6?S3$>UTG>62K<5_X2!x4 z_e9q2?Fw~|?m^+CL(nI^3L-~+Kx?znSd|wJ$p>4|X?6`-{8It4erM#RA_wZ)y251- zn+pNwAArK>a&C;}dra{*7Q+9%OV9jg;=#Sc@c(Y1ZNCx@lbOyFTA|Rs~j;!abBrBA)TRR+7xGAoAwXm2gRz>@7CeZCxGI_^SvZ8ls}Re<>-v5-E>0rTG}A<1MK#MLxI zQH&)9bQ$tSj%OkM-g8#Bu@fSkHleTnGs>*K0-F{+2=gn0!bJ~3SrY~G`&>ZfMPs4L zTm-V-FTu;N7T4`J;SHqwIjQIt=dfWocxr`$(Xw~Yqw50fqdbc&W)M^U8wp;&Z!%@q zG*)ss9c-z;A-T2$8ze6|Z@e25Qc z_mlE^LA>*E>`PfY$9pg39)Hhe(H}TeCX8jjTWJpJc8VBbWl%M~1UyDqbCNa2 zyk|rg93G)3{Mhs#2Je~6Wxwvlp5ZP`Y}DXg|Dt*7z;PCQ0*%x%{c?(=~Oa+^r<8a;mEHF6soXfDCgXy-hC{mijy1GKl=$lJCgiJXeCZF!z zUtna`0;O+kVS;BYdi?v9v#z_vhAH&~iJ2)MuX~S0WZfXn&L3{~n|mNzBF75@&w-m$ zF&69BaFVTOQI$zM)9qDIb-M~Dlzu^9KNqMq>JHi)uAr|W?QqWKvr>f`nr*|d@xW5b z@n?a0bPQMB=Q;kS+`CAlJL}y34KB?y5&C{5KD*B=MjAcz$o-1G|0Tc<>)*`UuLAuk zPuy5Dn);vKG4hMRr>mwykMT7i3O5AT_n9=06Q6o(B*-d#xuq91`D-zq5Pe<`b1FWd zhv8f%%h|=bWMrUr0(ArURD#8riCC?7h4!u-7R?_D8K16Uvve`$$Dc>t_t_{qVvZ7b zbHri&q5LVa2{g;)wy&+2ETN0jPThz5nA%DOTSGiH9Hz282$qiBlUpeTn+ z{bq$ewHktb_wT5jwUJ4?X0j0RQR1VDh1R|5_K7LU#3|79`f#z4QWAo8m_HOrM zNdLBmI47Gh^2$%xU*7^^@8?|U*>==+_HbzsQNE>-BB6bnGxhM$@T85&Bttnrk z(TVNDPjIRGA7a4i*Rb2?HTE$*4=d-q1h~zGsvb66t572GIcLD;{W;>3#EMrK@B}a(wR>v zYodY%eSL`UPu!B+SKzoeIk((=0p)Hou<6)O@JX1->hKLZ&dTJheCnAXSbV_`it^LYD09tm8B(=+D=9v{dX$+t5?8%UnuM%->2=zR4(W!=y!NR2^7CPKs37o6kx%@F;njJ(REYsjSBfG-T3Mwg*S z>kw{Q=Q9X4|BOzgzXmDvIh#=l*!-L_E}_>{K6|FgjU)7V@96(QLl4Se{rgmYw3NKX z2S#%f{{Dgio6zYmJPFSAUkw%|Aa5tbVa$81f3+Yz@OcaJ=eA*E>$_TCh#OLF1vyFyf) zehRLT&V|p4#zB95z}A-m7*rd}Nw>zbBUVq*!m@*VB-@K64{G4is{N=YTM-`KLu@6= zb**=k2efJP8)sI+(T5tmBv7B<{L6sPeZ8n)&Pen^1Lp%36sBE1^?%x!A7eCdVLAN46zcHJk%D< z<}l*M=E^51nB8xoJlchLzm4Bft*0sX`0oa2);d9+g&Nd9*Z ze++NN)C?8nXwP##BMwbEe$PY*oYRCSHhn^8P7UQZI+&($7_6jsoPEjv&~of2(meiy zd_Cf?=0t<8Z5h~(6u`PBn`@!GRHcuZ;H!8F#eN+uc*PHvB>5jI?1!>Dlpx6CuN!cVAs0SnBS5NOGbUf&4V=spU_Mu>;8-DOm#u;sShEz zLd^!%7NTof5Hnn7Ciq9FIImf^(PD!pIJ`PQJ(P)D;!+6&>Qfiw@)Yj(Wo^E#CwW{( zh?(80b)4xv>SG+$1m!-r(RIyVD0AJRnz_FkWS`fdi{2Y38t;ssXBr4$`|qJDl{!fl zX+ZHdZ77m%LZ_}P;In@aXRaRy9y%$|9D5P02OnXRlWt&kW}!-PsSAEowt(w}6I`En z8oc@3AyECZ4IO)&Wy;fk6B;tK_ z*5iQotzh;35V+{ngID4)c1)%%D6Z;p*@~O2BlI9BW`uFB|2^ii|I_AATzP}yTeswD z@k^F^kD-J2N+_LL1^$KSIQ5|(XuaH6sC0e@fxn-C=uHq)8&|Mvayp+wNVGtkwhF)K7aq^TrtlBmb zn}5Z!xU{=yu!a1*LBvY-NQ3gDKY*ApoX74Yl-tl$39cnnzd7##{b zF`_r|)%VolUw)KN{>ISkP6JpM=5W2jYq9awCxp3P@aNGLcZ4PC4`DB$VD?N6*&}E_l@f#H+-UOC(?Xlz+jq z_YtuCavQC~!kO%Pmdayw2pi(6C*VNJ5fu^FYP&C5bZg?&iw1C?PU!RM&G%G}F%My# z<}I{Onn500xvEzob-*d~(WXZzW))tfUR5t*SN&m{)4Fg#WEZR@?J&Ge%tvU^E<`a0 zQ&0W_eSaJB#YQ_|=3MG2_;Lk)P#)-B4_7+`-e`nRlPeIk0>rj6D z7_N+74qn99-95V%WpO_+;=oyE5!C~gtM-A_7ae|O?_FTp6$43oSHj_Q#~|gQsZc$n z9lV#>q5Ae>m{NQkr01q1e(z6RWexJkhi5P*hWto+^T79?;W+4S6s~;w5hmnQC+v6n z0){Vzf=7ig)R*$X)jzR6?Q5oN&_?g#Z)oxT8wP7m1PuO;5w|Pg_W+H{0d}DtM(lJ0b*|uavRkYpq;d_YZhx22Ef8=n3_X7n zcc|B1>PP;i8f2M|u9PEq{OL1{NjL$nMOs379gu%869TU|p~5I*lH&|5pst|iD@%T z{#FR%^mT;()QdYkP9(g2p)ZJ|8ln6!X^-bTu)y#Jrml2^uz@1n=%7G6--YS}#M5ZX zfL1?Z99w;YoaKKwuZM@>f^QPW_ZMZy8C<#DC-6wsgmP{MzF+hPWk;-;@SZ$_ zTA9$JuL{7H_Mpipv0-yQTkqI_(JgJD@_7M~4s@sY`vU5oeRHz+nDSQRY1h5T8{b{H zO*3#6Te2(~vOfiLlBzwJKU#_3j+zOnsR^LA^W{WlGR`;G0MpDQ{1S~&9J{@p{PA1C zwkw`%*d*Z%PDikOJ3Xi#Pzzsti>R+)B(p5N2U5$KETJS5^J4E%F6I?^d?!Lk;$eiC zaFluGFxB21u)F=4`FsM%8M=z8_mnZ|xb+aTLV&!9N&KUPoOLu%WnsznEN1qVA# zzE_V1+;W!Klfie{>a<+A5O)oOXFg*kRk`3}7&Xaa+gH|XvWrb?`GXFQgWy6oJx6c% zL*2yJxQF_)W2TSAegq3>Kwo^Yb}fUQwUKbWQd7 z_C0p!QSlFM&I!TS%MJJomA8l;{tB}$M4=?=1B##%U%y}(yN24po!pfhXlx332wX;@&df6qkU44Rc zy%5DBqQkH}^DnRnpH1^k3&>vnz}9E4;JnCKP%qNuEplRr>pYe6u&Y%u&mN%j@&bgB zO0fTPgNw+F=7Og!#rO-KIO(FLoWU+)E&tUG+lm`8Vo!ev`Vj|H-L?666Y5XvqP&Kc zJvLkypy6!^c5=GHTRKy_rjRD_nDny0M1tM?cH9z~hKl>eU{FZEa}Q@G&wG!q(pCuF zVkYbeufqpwG2gKB3Y%b(0uk*A+x<{<+#zfb zSrBJg9{dIE{ZwBj;hroICKe#2W9oiR!qF#A!K`(pnZDO$^`y7P_}zxu!_*b& z_KMpwl=gP>$m>de-HzpT)C-s<52yJ;9QhyEjbF(M^0z$`hD;LtBUm2*=9XEugJ23S1Um1=9rTYu!YhX01vt_;V(g%)%itojUr; zDp7Jj49YEHP&w!zX!tG|&j+5ZJLdiTV5-&|5cqhCW~%SQ zG4W(7$d8hrDzFUi9y<-wG>%cfdkbX}$paQT2W!V!Li_L+XkFxtE~*xEYd#7-ZbNa} zvp6)Gm<03vV$hB5;@S5vPLkiL0xuUaV(plHKp>!tLwI-+k`6{sid;M}L?qwLRe zm4{sym$h7%4-Tzi>pDo|nJ^l?{u+U@&;u&jAzKb-s3GEt6x0ir%e@C@Luu1y{GsuZ zrC&OW#oO*dQPX|EzPB->fV#u4n)1r-b==i|PLp$w1H!9uL)vzo7L>6W+Ni0s9Z{#!DX@VlPh` z3Bv-vLyzm!wLNtSIt9>7sC42CJcocUah*1H#e?fPU()QyL-|tT_xrbE@hR%HSa?Kk zS^FG)_!V+-&Un@8h^LTOK{;39mTLVhG4Jw}Jo&vfctsEDD0V*%&Beypu27<_g%dg~ zNP)cLA0Rp45C+bs?{&X@^t>m5Rqiyrg!;U1Q94$wC)W10`?zsfDVo=63SkBklzv-* zMeC2F-mf;&FqB{*>SE2~NITGdz$w-|%yqRL1jU}S(C3&IDtu0>?wS6goz`J!)xU;{ zjfXfzdnb#&PTs&F27I>n1+LXJ6-u}uNZX*v+ZBF+qKZcF-Ln>r>f_NXW-dm!h@k(X zV;D*ALG`(zs^douh1iQ6q#KuU&3nGGlJo+QY`Y5K1`@ul-~@j36ARH(wFQrp6>M=? z7u5dI7F=HELEM^J;$T~&kF+MYWce4AKHd*C|F?&#mVy1Cx#&gxUfKL5J5i%9@0F;9 zU$j4iN_!tFck1KR)rS0CMIN3wP)~i14bUr)yl<0sL!XY{*swSaq)V?u+_^L0I%+>w zZAIuX3BtTz-@s-9Li+UAtaLkZdOAixv$Kd_^4>%!Y9OBX_>=hMyc(Xh?;)Sx6j0jB zSp3vhZe6tkuWheF>rZu9GBX3PX#pNa6G7Dx3lq`Gc(n>*<`F1W9{5 z0V#_I1)s3tut<8NkK1A)I%<{*DY+6_izBq`YHy1gd zy-iqS`TzoCM{BV+0VI+;^GOabn;_%&RqoR<6G>cDs3)4awCTJBu-!Z zE>66EJ2pEnWfzVfrEYTtW`D?2smF8!wnoCYY%t(gETQ*qRvW6n@042|(gO9uvoKkG z3#GyW&W_%ljSEF6KlKdz9DIw(R>i2E|4P+qZir28ltb%3fiw8~H+U_V2n{CrO!jgw ztlF<7WZlao?Oq#Opf34WKZgTsP0>ebi^Zd2;O&b_Z>rNwjkO0OmnT2-4HwM}9C68jFU*lF|7nRmr)lM`AG8?;DFAx(GYJ z?gUrGGaL}{1_rFnf%dY)*ztD;*np7`ot=d~_W7#xvMy#IFXCc5Cel5)R<3P<=v-ih z(1~#PvY8NkvxxaDt>KD)d2)}ApGAM0X{dYS94L?8WUieTIOUyUw&f}58BeBw?BbY7 zhff>^b;D_SaW4aJ2S4DrS>#2$_Z%u(b@{P-KA?1O0;FG{EOF0T=rSn)QKE@l zxI((8QKGzV!cXvOUI=xqFVJ;=G?@1#{^P^nTxSdEb|tsLAZRL9e`y3f`4M)HCBNpR z-q4!a9rH4Opv>_Rv!vXfV#780p!X8N|DT)8BKA9%_wpk;osWU$Fv`16*~JZYVPK(~ z!dVdq&|uX?4BTS@UYaks;^zaoK?nDOh4&Cpmd(fNVYd*8SEsP~gcjO`EOB2f8mz76 z#P~;*cHGtphhW>pJ{Ab+)iwD$SxVaCD*cyY5o`1vqE8(bc6T{nA#KX|4 z!{1}Y!pg1()B_jKX7%_*9PCyM8`BFKuPuXE@4_-V%^`@5S;+JCIITiwW6gyyBlQRo-s}7Hv8W?u00IoBSDjel-&u z)eRuMB!_QXiN|&(YLb=fbWCr#hy*nh;(JCBo9heLm3NRlnbe=Uc`>N&9>XKA@*!Aw z%9&`G36@@$!1)55Ej?|yQ z59r#Ln5N%mqAL6gF#;~J>@+Q$*6Rii3)kfhJVG&BsgT!?NrREYqp?|g9@mRB(+GYe zH0;aZpm!#CmHdQ=#CooWE~4V*Blz4R=EZ$Ku`co&CAOc0#pZ879Jo%dXsJQxk27$< zx@z#Zx`9DWeON#h^$WH5hG+VMN7Nop{nsG8!wh+KO667WCeMwP2W?VTOa+qq9Cz29@L_4g6x|XGqB9y>>ljK0Zl(Kb)5>+CKut*+zN2Q zO6nlnpt3))7=nLKf#AR9aX0pw@#@wJO!bY&y5?nI*Wtrt3zoCWSt%g>yaz$hCZ=;# zF2DdBaD5TZT>OaoJ5#yPV>-N8e>(G#|B!nQCdQ3K1MMPp1c&JoLAqc(`HD_K^Se6S z*f$;(#2zma2cRM)CO2Z~e)Q^Pg>C)C!fNvWC_sac2-3ya>-y-p&VxB_pTj&_h=rm{ z{L$@SxF2VlnBwtPX!pJiir0U0p{tApTX{bC{?QZEBYU#u@e!Q&!0|Y3NE8<8zQf%? zVj;NQn#CP&g5+-WbIt1y37;!a??M}jMy4`@*Ad)r(t#awtzqQU8u0!5K4zS#!uYkv z89J6jmza1`p*rC5>j4CBdc?~2{ELx%5V}vk1TH&Xz=WT_P#w@s?v=F^tSb(|#D6vT zLEAq;#GeWloZHS74=~`Qfqo$KsAuNWq*z{`z&a)?h-r8N)o(9O^0Bo7>E&Q9yXuxI zqOpN19yOBFy;cE5SM!$8%5Mo%K1? z-aH?g2k~H#C9vq}vr&CdOWxnEo8Xh$!qOk;!xybwbU1$x#coqMDsuwyg!`=jM&i$3 zKg5F5H{luT?J3`ELUTqvn1-E(*8D!yU6~H4>$LdJO>y8d{XNZ@gX9IPvsrP;Fs{}} zgP$-u1Z_W`;m(Ytb1#$fz9$SgkFFN5JH4CpagCrm+f-)XTaC$=>kuM5(BZ5 z<)20T>Q_lnT;`9;xnEFHZ-8drkHE3#cIILFjH~W`56b@@2(FZwwBLCL2X52mWyzy( z*g*;JFwPq!(`Q20#FOaSb%WLV??tzkx1fG|X_9YR1ccc92Is}=u+rr+PH;{}<>(i% z+375lTq(hgMGv8T^9!(F{D-+891Yf|uHcHg0U%KSLXxl^l0UUVlinW;TfPJ5`&~qL z?|q<1l*;>WIRV-eN+Dv<43G0=IA>WjO% zRMQ&lBwu*(M`FR+Uq|W9;ZP9t5EUcZz-;DAD9r4D{RQ-W-!c}|UE|Th?kATY8pK?y zOPNIa0!pKmC?36>J=zn5&7Gt#f3tzYaeqKsJp#i{?nH-VIsx&9LLd)kujeudh(*HgP@_f02KEJa?Y2j zgCyl8Tyvz)O*qAhCeTsx+y(laH4vij9sr+c%87)2g4nFy#2GJU(yJoQHS`F#^6O3X zI5;2Uwq#Ovm@Yrbw=sQdBsvZ=QAtaZApcPq8)#=L6j0{9{Nz_I@WBU=MMtWf4pc*l z)jN>>MZBM;Y}JY%=Rx-MfV^b!6>Os}y5IwUP`vOPYPP<_&LU4(ob63}m1=DCI)nYS z2VyS|iJ;W_K^cL|s_dpQP@A?Ns)kYjbwC|c$`fgC>Bk&R`>G^UvZ!0ipYnvaFg;U? z7(wOW7m$SZ(^tX9&uO^32V%xDSIU~+LW9&(XfBKZ|JT>q;id8Dy8b!y@o8n&2i}6; zkp{3jWd^ksVj=%u3rzjkg)-?#bUa}PR_`R}VemJWF5H2gYU-CLGsXVZ+I-@C9X`wC zGm6_D%HvHAV0`E+ES^Xi_o(@t*TzsTKRy8_Ue@JD{50X!)nBlfcI;iV8=%~&j=BB! z1@6&aIW|xOtSwKo9R*+6nk+-Xr=lGfIf;1b-QCQqRti)6jDqdRTZ2e2}F+cZ0_R%q1LtQ!A29vR5kq0}`9o-4HVaaFMjeM#OTm^~o%rJgL4ho>-btjM@7l<%(x#RV~Dw%>JJfC!4n$Gx`)ky1^f8 zEZGWuOGx_}(j9}#zhT72x2&)$7;VXCHeNJAOzAv{>3I^NK3SVz`ScwcFbRh2rXKE@*Dz;UtDR)oI8+|I&&i4|vi0pR zz?xXz9)11=$J`SvZRHo-*+%=|&0DZ8n0(yB+nFUMW9!PP5HRF8D6>o{JDSUN9V4cH z#3giJegjuu*#|aLFJQT68<%Zd1h#c1%%Jo(`tIxxB4>$w|EeyGkbmNAFa8ZHhy91k z$y?)FX$y`1^sE|)SXfgF{@A+$*2SEFUAkt1qB#m}Mg16#m1AAti0Oj}~zA4<5q1Lr+6MO(N_tGv?c84#Z~r2he_dI{28F;Zs3RXwDzQB+5`& z->Ac<_B@K7-)b?%gJ>l-go3JC$A4L`xoIMsW%NzBW z6Z{GzeO{rYWCjK$TdF)15UzPl)4YkkbgE;)Hs>JmM&W+a=9CK!KM;j(bYdIm(cqA*Pm;wX#j6)+0 z()X#C#7@4Bi+Vx%42P#IXToZgN2)}y*zkf|Ps}1O)oYgOauY3{UFPaSDk$TeDHn@# zRK*Upu*JRuONmo8wx$&J?W4}NEHl9`CK!viOarS+DHyOZ7u4@Pn77VmOfQk+pj##A zKd69tn2o~5(c3Y4)jvRAIaGVbF~z_7T+xv};Quun-rY43vWPo)k2WhUJZ^J zlfcf!P{@2>Ae3kxh33w9PX6C((Ee@%9;#K;t2i3f!xYTwTnW0~AA=d&CD4D)0Bq=6 zM!UP)X!URlTE<_)8p_4z@PDY^RfCtsFUO)$Um@f3QGAk1KAq(=VGH*H;};yo!0JBW z*YcJ6XVCXpagA>h*=kH?Co`JZu zn--rDw;W}y(;)TPERecy0jW84nD%EXX)gyH>)U`Ug2*q>!wq{F=cBqHnX^3>0yfr_ z=zl+tMX#;LYVB%hxOEHnzBClvhL>ZIiHPZ_O@-8EPwY=wei7Y`bWc5k9m*Cimb#B! zTVJ!dyljw4hJkF=I_C9E1#ad+C`&pyDdNg++P&-XTj!ecPVOQhC(4J5*C=C-Za+Ea z*@?I!#2hO=(BEJ3=O(<0#NgqTp!?5WXdh$5cLheH#Az&U)uc>Pr7357=?eNUA_m@I z0d0=o23PG`&guP2=$v*P)Q37*;~itj{%bDh_0Mr`vep6gm~bEcfAm3*w}05(i51ZN z(wGaB^oKb{dcuVp2Vg^zvC!aqh-I&@#IO>5d@`HPk%J4tH<@-c-i5StFu;c8A6WC* zD-d1JWBbHYobuN=F6L@61Z>O)QOQ|Vk?V3yjt&D6^}S#c_0FDl#N>IW!8WfDi$3+h zUXCK(e9<#-)Dp3w6O++0?k@E-7Ulk57A>H1AA}K4%Ak|H{9d2%Xfn;<{eP+gf7;{S zjYYKoxQ)>P6=*-Yk@*}#PFa5y6~8&AeVoqLlLxt&p!ayRuLeKg?<`7##&JvL6k`91 z-l*{J!>P4av4#T?T=BO(taV{I)LRx43*{nrbrtP}-%mr6U~N7ycr0eb-U9z66>Po3 zPt22R3XMA7KzmUHDyCR-oq9*%*^qDG=tg|0^CwZR_ZX&iyoSigFvz&G7$Z-;$I9p! zC?0hH!w={Rc6WDjjar8Ml)j0~=fDP4RTO|_btPIWKI5~wTR^is6;^A;VC$3DaN%SG z_-)nZp>7*7TaGefy)lKf2E%?D3EK5)$e*Ue@5ufIjyq)>w?ia^_>&fUt)2~fa*Eh? zt!Nc90Cr#066DG>xUqv+Nh>;XqK6rQ-Sm}Q`&wPzH-~ue{hq4Q*VQmH$}%}THvtvR z)P<>$=)ErlMK{jMJLVRnd0be1BlVts5}1o%Y;zTU6Q6(JBvV6c-=- zob#9w29}p~g%sP@`0TqDKcBk3Di#~@iG32lS@$OnxOWo^uTT!@Vv^iEw;aC@))5?o z|6y@a`>`=&JRZCw<~{l!z{3;Ip!?9nVDal4a}IwCsVP!ae%QduJB%gYj;cxQLCEwW}wO;4Si}e%=omw}yaIPcy!_|2A|N#ZwQEHi)__Iqi>e zc$&Pc&SxnT9VLbGUme_d;$W(0m#|99DwGXehc50p*ym&tcy-ee)Q69uN0%-aGqxNZ zZuq16RUFH?rG{w-La?he1E&9F#y9^Pi2nQS(Py1Eb3I#-*E_en}RD-_E27YI`Kp@bDgw4;Nz*cDU;=kt21ISe#{M~encI8a$oGz5lb5D zan@Dz9_@o_nBz+~P=;$_yeVbO$_iQ2Nn)GazRFoxQa=3n3C?F>uF7YgJ5dJlH^>)4-jGG}*jL(5H}9%~K$oL1Y~x38D}8|R zIc_-XP8({bs=%o+2@~%9f;~?R`KDR%u<~;z?RnPA#r6$y*SXdZf8QGO<`q!?+(lgc zL(DtB*a0UN(+FnIfs^T{X$&!+-7Bx&N3U9q&c74zBMlT;SpPvXcG;na=T2x-@EL&As` z@I^-aY&HSad;dXIK@73dml=c2 zCE~3VrL!ZSs&M0wGJ1afaMsFiIOW#`@<#?kcFzas>$@LH@fC`*3guyAh@a(O#-I)xnq=#>DZNN+TjB`nLP9d#ET1n267*!fcc^%@nd zRF%P&N7OB2VFq3F`8Ld0fxBbHyg}t9Y>O&GWeaJd+p8vJZ0L!Nu>-(h=|&WfNW;2k zk5SQ*$fRDn=z&`}>-IWMCVtJ5y+c8E4?)JNzy1QUrti;vd=-Uz|$z^8I) z#9(HzlYC=dVm^>E+{Im*;IlI|r@Vx`id)^G$j=EHi{C=lq=RUE)e>w>(CK?tv;^;Ns|32cGoK?W&PSmqPt-ht4~@+D zbRf>I{Vb4Q{z;typIFP`5X$*zKtstmbo_p8(uIeQK>W&^*;qTE)<{DkXGA^osoD+F zKEZgdR>bf1G88J8*Ff{{H)wEl5_v1bAld0FGvRIZ9T*eeu@`z_hSF`!5}rA!I~FMVy+8Kv98|?kp1$;;`uJDU#t#q z|KI_F<3NxMqTj_*Yu4zh%PXf&VMDuOu(^CPbt~PFI}Ys3QN$0JRDSU)7rW~wS}l2m zj*l*~=9MX&_2Cq@X_%H^FzgsK98+LteJ|_^A^w0K`V2;?X8>z3KMCS-Ke*T{(KzNB`IN0!ah?t@piuT3I?F79{UF|2 z&wS`p_!+YM8LG10y~2W>qcKQt0EN#UQFp~Wh`!)XnKy=0LXUBN$|Cf5-h=bcpNfrb z-$C`3{Pw<^sdIt$tEtZ?)l^W9v8e;I?imQX2h^c&jVX96)M70+j$->;cbNR>0T!?Q z48G-8Xin;4sgaK$GV2Jm*>!>Jfr@z?P(j!DP>^j^Lsrr zB!0UiAx7i0+^Q#oG4R15aEzVB=`H>YO3l}tEO>OTrFR1;ewzp>y=XS&-vifV%9mDg z;Qxlse0`5xP0!19x=pz(Ic>M{Vr{gugg=-&F%W z_QintfN7B7qz^lukv~7X56UN%pjSsL>3Sjzc}{m)T95jOeVC%cA17O$fI~a<`Iyz0 zP;76_<{t=yRJ$ACK0P14o0}kjxXC_ za8G&7W4SsA3fh0^8=-Hr+n#v|L3L6W`pJKfsb&>#S1vhRF`+l zO2mLkq1fE4hjBVV`011;@BDEVCaCAt$k`?GW}q zw-05vzN&hRP66yedy0xJV9}BdjSV&^UU>y=Z;b)@uv~PkiJDaGcmXr+u7W9nFbtBsVv`9a#e?F=i<`s7pY$yOuY;PP26{pDpgVVwduzmV|RvrZD* z{U&)v#}lLZInxXz=2Yr4us9mdJR4Kc_UL`)rC0&&Z=a(JpGo<12UzmH0mR$>a26)1 zT;WCfx39Os`BgeX{y{w~9y*fwCmmrjStC5we2bk1=BVtR&zhW{LVM>C&dSUg>x!Pz zJiLKA2D{KB;yez}mSbCTCW=2_CT1qeoA=zrEm60i9;oYNwyWG>gdwW;HL$9Ex^$&D ziWxI>Fxm4i$jk=iPWy5c^N(0z;2=3FdMC((&*y;k+Cp?%mZ zXFY#G{DcF^`!4K_aC4ymKkv3dDpu17rONxtpI;Iv#EM0^e# z@f!3AbLKqOe`bD4x}$$xK1nn~m#_bq7?h*WL9emzKuqs*pL#>I?`-AD7hZ=V^+jwN zaTc@ZoR*i9M%v?7BpP%%Vf5a6^p1ary2giKrMC;J*YxJ{bu`d*GM#xJGw_~69Zcz$ z!rlFm3&mg0GK=YFnB(auazFP2=>DT0i29yKgZ00m(I*(ESq6bDOAmjHs7H}oB0BnH zvoM!z{Jd3PxOJQSSfuHV9YGnS>qps?LlS=fE$U1?Rw>sOy(GVrD|BAzgY{Nz5Tt7b ztwsT$*ONNWBvZM^cbj13ogB(>UPH~l62SW9cNR2dFrxM~k<6^p*>p*B4}Sb67)z8<W^?6sp`puugddMi^QaNL=$djq}4b7GM(%rV_ zdJL&lf@9rSrd~!Y@YYbdc<^>r_H08Yn>w2-Ut>+$*?fp5Km3j`2`?|d1SOPX3O-T6 zs)m~i5N`mwxw)8}d=S+snexEnYFxSRBUnHF&5|Rts0Klgw|J%Cegx3ja{WD&ZqCJ- z-Km4n+z#qKJfH>=Qh*)BMuP5dUxxcWT04xTASyuJTBF8H6#0L$HAN0KKNI~THu*I6jW zIdW;i3NG@?LF)HAMwzLe2^gNF+;yH>trg3Re+Ct8O9T>mv0G2vgLhy|;E~0_B1%rM; z=Ymn7Ht=FAmyd$@H47kThd22AYU8Y4F9q`{xv*iVfne|b69RANK#$$z!&rV+F8dF; z@a9TXo}=!{6ikyau>x@{H8w-fXB<94j- zPkf4m$FQPq5ELwH#=Ck6m@RXLxy5>X{Mj#@dVf3^v|i@|PyPk({wV@Q);yNq9L{M> zFydWDKW5H<^hqy*Jvx zwC~v{mONIazKnw6n_8S~v;%6dj6s(!+W(B!7MxQ=eDTBH;8jlV_fAs*{VzlCNfmR_ ztpkfeAJ94XJ4A1KhkB!G!K-aEdK8plg6bDc*jJBV;xD0>CN<5Pz^} zKgOzp&O&IJuCP%@0qL`rqiC{B-sAN(9Cqn7u3M4-cCU7zdW%Z6`I?>(oJqXoFB4f( zRz6r9j>kdYQ=lXZq3OCAzgSOCSkm_mh?2#eb*C9IHteCZ_ek^@8q6%x@3N7js?lvw z70lbIDR`uPrao39!ME>NmTyIVP<|k(n-v4kzM!D@{KC7I_L{3TU5CtSjoGaWp}6{CG_j zv8IeQJL_QJcb#Tm=cy&|0Qn1kh#S*7SRwlCTo8~`Cd zBBAgF(Rq};?yEznT1bp;6fUR12Y20dtMD`0_bDu|)P2O8+FH_M$H^t&_69 z(V>)~r=G*OZscn;RJAYTAaKqrh_-Nr{AgW1$ov>LzE?M1bon5u~x2`2{Nek0E=LD=;g7B(GY0=Ccc;rDMT*kskKCR|lzUi1qWimTOm;V2?9R z;5z>?6N!{JY#OXEx+??UsVFa`M>>kLGbf9_3cecDU7FGh2DnphR#J{O8>U0&!_Vlb zJA|z!*34dAZNb`O85XyzC@X)9x_U)SRL}>q-afkC~`-_;1A$5_a#V{&&Br-y0EkR3rudV!4vDe(Y|3R7VUfg zfBX`(s4r!n)5upjvydq^tib4$4$^v!vD!Tji;juOD^H!aJHOj0Ul0qY2~`o*N10W} zJm`?L1GqiJ;;}X?to#CYINt=>(hm7G(~saC7yO1J5v#Ja^I zVbg|9V$SfG?TpU_ZKy)!!s8S2V+fR)6|j`=IV>l14po z0IUA`3cpW%jDa6bgMZA$?ORdX%2#erIDb?DSZk_n;VSM?OLQE-3~#n#0UL z=^R~IER*=X%z=Te==R|`ynN^Y$(s(d3!Mza^RHU5(El!$Rb)XMeGhd>qGtZM=V0eI zp81>mfNn%AGwMh8-8+A;L3yFu{_j9n7BBSx}A@iLia_J*!v#kcfzTYwU_$q@8Cb$2MJJ7H)88u*z(s6 zluu<$deM)v4D+}=s^_TNbt#ZMD2pjlbv(f+2&H~T5SB6=bfafLw8uBhe!7iqyL1=R zmo=hS>qu0QQJ-113^F!nLf35~*hLfDI901r9kmu?+CF0tX5#m4_c7qbR)Dc)LYie7 z{&B%fw43b8)Za)0G^kYjxaVSGZ5z1Mb%luP?hxLV0bQIAVGLz6i$)BB5~ouTBqzR} z*7IO0FJr?ysoGK^zt_t(rGhFhhJna%ih6AWe7_uun`x9b`8ByenhQ_3cd{n!z{4HWesFCAA*whDd-kTI?uvTE-89|j!tr zl@7~08j8udkDh67Q8TX;d)pCLBUz1I`nJLviyP=@`jCe!KVaXgduS&Kg1HN}fp&l? ze`0PcH17Kwx4qp5zQ1ds@8DqY9D9ywuUrMc%O*l-VH{c$yV*Ul3tzPJ5CoGhs!)rv zEl-WbV==^LSSQ1{&yOHxWgYl9W%D@613Yp1J~rJt0$puGVc2=ff|WgE+OfO1TwWma zIdqTfdUs>hb9Z3*O6qrH*Q5JizoFlz-CV!RNSyw47v}yl5baA+X{4-Y`@ zuZN*NE(dmX=^%dT7=ikroIx=aC*_x*6g6>9@i4Iq?=>WWZbB zN?4F$D%jP^@Y?n?;=SropT7j9Ms?7lnvcm97r{32I+n^@HOEYu3g7Iv5;HZ$V3)I>Y1PrR*}ah03>r)w z;#aV7tEJFrn236_N$A~YF)_UM^MMtop?Jm>)Qw%is*cUYMY>y{d3J}GDRmHcxEQ{4 zCT+H@1W$hq#ey;$L3ely(h6cCBsGSdDn$PogF#2F<#cr(Z|?b);R#Qa zYmcC}WEf;!DMF;asFFC*t4Pj_Ol(`jrO?#H2=psU`LMWG@ODc*Boi|tec3nW zqB#PMX$okpXvDH#(n0Bc7i-*4L*2|{*!R!5Xq*>`_RDBnrg942E_nc3>K~yC`IwG# z%4I^Hfsno$F>t~LXo>SDrlt$T1e%I1neOm&sI5>mL@GwTzr|Z>OVMcwZMbI5;`x8? z!Is?nFl_%DRGJaXf&apnt+W*7$NhP;r;dA-XfUoHb=*e})5xBG!}7aP5aV?j$K|Es znl1S#{b@K9R8z*o`XZhvO~exsW@5&(La3QD6Xr*gE~hyOUH06?5<1uQ{_PZPf8WQ5 zw`u54-bGA?32L*fc-NeZxVxzYR;|s0mw!9JB7BB*7N%I^*}`&P*$DPeZg8iYIz-}7 zt2Xao?!9f`Sj+>6zcC%`CRspNDd`bYwxW;e25|i4oXn;?3FO^fxqQQMnY5ia0MvsD zZ92gFeky~yIeozOkuw(b9e^RGcOZA*A)bD)7v@LpfN|Ad!Sc`-th;Z99w8Cva(g`1 zANWN5l{A^cmi7?mkv~~aTEmCKkQ%ZKw~f4xtsA8R32>Ukl!%OX_`!Z<#Xe)wt!PkYOCx^?n#VNUEqq=ff}W+Clp^Uq-@D7 zxU^~C|2X(Kv`8w{^*l_QID@hG$LC-jWJ_$^1*~#;A(pKzf%K~lO#NE|C|>=|yKJP* zsH7GgcJwOfbH{PT%*V3k%ip-;7DIO4guFod_eNPw!&YBnxs6_nkB89yMT)ifaz{t8 z>RmQUyIp4e=uRK9%uuN9d;?+sGj!i|hbdta)84H^Ut*_Ut2GeRYn5zO%V>~~d;lIl zB3R9MbKx(#Bbheuz|s}ZQGdJxBu0?GeGV|KlNW@!WDpOsKdb5ZjumaKg|5T*VS4C2 zri(BU3a$n~SWF>HTzU%Zt8-AP`k>JTRmj4Ao(rX^Z^3VEZw%Zx9F}f4gU+WjAS`bg z+FzRhQ|6lsp5-es*sGW_L}?IKtmT+K8G^qK#9=`%!Ew-Epgld07^T1BD`!(-Y5RA~ z{IU%t1AFik~9U;qW_L@>d_| zakmPcFTTdiLSpM&nTb;$pF&A?J@ei<1KXZ`#A}Ld^b3EE1tI^Et~3%YEer*3%?r@= zewO3-zcb_L7PK}}p(6b4b`&yGITS zevO{n$ur;kP_yE_wGi-Z0Cd^@33?2CL%HT#U~D@CwOvoJ(gl05$%r~{C%iD8dXIMN zv-z4;HQ4pnd|Wd*3iDlUg~GS>pcdz7qO#9&m*Xaa4|RV02FHWjpbTt$N84iiDeLko zmP^?#Ht}cbg=l(XQ^R4F{2~t(x6?K0d$QT!|7HA!ZHAI=BcUZzfQY~(%xoQxft~IX zpQ@GHo1ev@^`x7>1ZLav9*fqRiy_(6Axu1i^0y7_`@vZBOMM25W@UqyVF*d^IYxTkFGb>@^lrA3K3>A7fEJ&lAd3CgQKgHbPU% z4B8G)!RgXNSks5L!T)&y<;QT8h?W1U;c*NiNCP{Ed+ie=-;L{%kJTEqTIf_Z@+t!*^k-|2HTkCcfNgA1`iui@M`CHR_8=vXu9G zv3_9z7GyB$;haVJ@^{Rq_a^WUT8efDeYvC9AoDw9$+l8YG`=AR-Od(cOOIz@;L#IR zV1~!+Sx4?EdB}^%8_>;GaqF)M zAYYS2&qH@e*jBL=qb1q<3}4s4;Fm5;Fy0n{Zl z0$k*G8*Advv%0C>AZ#D?CQL+G+P4F!H*AI~xPf7kN0^>;iMc0s=34(8<{mqcW#3++ z@vff?`7s)}RDK%0xVaE)J`WTdRJ_aK7AReJhxR$IajS4kaZ79@Dl9yOF?_VnXjU(w^z4pxV9frI)*r4bMj$gemGcb)3Fb2 zFW3A;e9s%Z@RQ^L_IWUiXGZ>Z^SC&rpardWQPO^Q=fY7Hy(a zKtD&pRpg<|8%i{lH7BUUM-}oiD?CT?}|gaziETwSvmIj zYAA$;_GCq~x`V-nNSUeLTFlDM0hMhBp*kTI+pK8AdH*A3(P1}sBIz5yvBfjKSCEbx97153=Tf*TkJ&g_3EbdHU?#-1)vl2MX7chx(9{7~r1JAybaP*sN;O|%kHrZBU87skn=bmHw`w64tsT21u93`q-V4pP&<2sZe+z7># zjlFQeGgC2yzKea<9fY*cDG=8u9Q_J1h!ejJwk>Oih|asfZ@@~fTb?yiIa7hPaeF|u z#|@MnMrN03Tv58H4!q?R=kc@n$DmI@4!bS81u5%#!uNI+#9q6F z0kaw*b>>G5ykidi>Ce)>vJ!r(BtPM}3M+3PM5#wR*YqUb*B{;4wT};>N0$h6RZf8L zWa82vaz^{TzK}>>n5Q%HrtH3u*4P&m2VU@EybkJ&F|aEB8>kNbLOW^~a7?nX5Eobu zDgAb$yo$Opw_};tkv`}j!CCB_uAW1Xpzz414CxqSEN+cH=Oae^woB6*@6_ zL)=LH%0GG8tocx|jB(6ZfA)O~pqX6f<{$M=uH>hj)1=iCY-(b(rEZBsYZoj3$r|&xIQa|PP zI}4b8?ig-o(g`|`>>#)V?uC)F{i&yWhaLBp3T^bfz1H>)A|@q5-!>UNs|=Vj=sHt7 zbYxMTzi{Kdj^Mh%9~=8n*3mANc7a+@XSWCXJT-#g@?381PTakz@sK{^E|bSk!@}pO znE$vE(h7P|Z}~CzN~E)X?iNs9G!`5-?W7LbXvzhJXfpe#(NS6~>+7%#LJylli)0bH ztjj_#+UD(}bbwU{?m@E3P_$d!fhP~?i&_UA>wEetb@kI&P)z|i4zoi4G36Nc*GH!F z-NHh`-(g$G2UMQe!RI?5ZY?(uZ3{%G8?^}%NdxjMd;&qY+oAH8e<5nxC#F8_$geIo z6jN{@1V;1#_vaBjdPN>M4ycfIEfpcOB86u%S6nWW3aJtGu$y{0cH(ukURecYT_xaR zWd!z9^*HsnQ&3%DB3cjoMj7FRoNaqs!QZJ0fHts<$9@FM-@>pmtr5F4nTVmqmOSd& z7bf{2V8LV+dQ6UIimU(P@Cee)qGq9^eI&Pje+?X4=kbzr$5_K6dPfc=*5vMAm~KSf zD0yoIms_9FM2!f*u3ciWYTy@C-{{TLr>1gwe6>ucH)FXY6KQMy7&c}OK<`Rpv7jM@ zILJ!My;47IvW=+O&_w*00!Wc0(`VSKQLZFysP-P(FF(#(W^Kb6&QB?mtw-zj^$=(K z2zA3~1EirBNKQXymGMd7*>)3)(q7UYV}D+gl7S_}Hn#tCod>_@4Lu^SfZ9zeR)0SR z1=ptGXk|yC%^??SS0ALk@m=8PbByV8Zoq+^;h5KtG=!xs-1?AI)U8{i36Y)xY|&tf z^&to;HN%qGC;7VPhasWXM96)3mhxJ+Ws(skEEglV&lFQ^dS)z)OxuFNm$Sg5a}<+> z+=nnjA24_sH!`R&7ObDnK)*?p^U*v6yl){&QvA51e`ro1dk6YB2?Td8r}u3A-G|`Op%Zw<>cOTif_hNHafzW4`X2uaGJiY=>2+Jky}O^M93+po z))3_{h*6d}8jar{!yt?2nD#Xp9b*p5&S#hls^uQ+Z<|g+TtGP1{A(;af2RCr_s=1byFoqFYiOxWBpsZ4*eR8j=iv*RjpM8SizX-0zOvF#9^~i9hf`eW`_b zI^_m>bz1^!b{PpV-Kc;0`xX2in}fPnQ#6j0+w;uyz<_+p3?=V@slT2EpUZM^jQm66 z&+;Jq&LUKHsiIwrkF?YM3%4&TWxfs3=yxE3&8{uLxMfWk(9F^AuojN_ON7P}i8%X| ztuUXwy+NbRg(7VaXsJnr-eK)1Keb1r(1@~{$;8GADI~wXitXv#hRKu5S;?9mY<@rm z@nlA$t|1N2*An;XCY{&bp5T77d*E&p;#_zXfildWuNmD*NISO^PuiG?8$?4vx4RzF zveO`7^>=Wwxd6F84AEe5Po6EU(Rg)q#_Kj#g7nB|(06cx+?%`6t9UM$&b^5}tRG?W zg$vy7ff?6Ee_-kg2L5lpqF1J+STL#>qDrDzMEB?Dbh!iOeW`-K7a570{^}rhT`VCz z`Zx5v(h6?YUr47~%l#%-V$II`%y{cr)OM7j-Ow!<=9I_1Z60Idwro(_c4fx#ryy6p zo9SL=%2M27@wd*Fa7Bu- z7&H7EC=bq-t<~EIYOw><^gDtj$uluqznnSUH-;0Nqal@lgdQqO!L9Baz;j{)*Jqn9fHAC3WnQYaXr{JP3MTgP5Kws{PpN2fgOGYOkz3LDeIg~=de{&$9`4}(y z%^9lW?xM=S30+<+M(Nml%%|ii4;eKFj3zxqA8{97Szsa-ne2d7mxHm=+EVb_f1ZVe z{tU|QR?ybzJPhh)E9S4d4xjr=#f(#x7*$~-D(#2nWNzO8Upno=)f*@$@O&&bX3xZz zVSPbAv>$(Hyc1%4u7Ov{EtoRTLI?@n3jR;Vp||=Az|6H!wN?P@K#ko7SS zCPnDc^v(keSz#pVW^V;|iyi25zz*g50h;DRHO$E4CFkeiL^ATNly8q$toy{{(y@10T?#xF;8_Zgs2L0l=j}q(}HYpO~W;G4D6&?zvC*T zeepo+B=QHo9E19Z7Kn;A7VRTFu#bZj3?c^dsR=P?@Fap5SYy%gI-QNH{)PxMI@3)w z5>7Y#i^=^DFkR~bnd#XAfX%tEs&zb!?sgHKzTLu_unD*#`2m*xh$D_}8f`Op;_=vy zg8QA_#9JuT4E(kqrT0(bwK44o<1YOdH^f)_eqyjS(f>30}tS%T&Mw3}Bw7X8jS zV!_U*s90_+WC{gnd~ZDle<7BZ%|WKm2!ME38^}0u4+cI+#rldGXzAG> zix4tkCiMQZ1m&H&jMAUkMfcEBKr0i_JZeYj>Vx2CP1>;~pyOaujlnWH3yXGW9R3vp z+x!uKcg3KXR!DnM0qG02ziP7FFi>e94pIkc$iYtN8ID8U(zF{mB+)V}du5qAy z*d4l1mS6GFiODKzG2J1PDR1f7=r8J~?o4xdqVGZzM3{DMA(Y8D{*1s840BmT@~ zbPSsY$-x$))XYq%8)_iZ)hK5~osqDue>J$Qe$6FXS5W_IhC$tTgSGx3*0CVG^0j=A)bcc9o`H+MzbIozmm?|tQD8#VheCX_DEY3ds z0OX2CIVF+zn0Eg|?%jDb?QMKy>DyCSpJ6=^nmd4h?MO(zL7jPFyR0s71i1b%6pv3iZb=Y5VSZ2%i2uE6}9By znHB&VW<#QE59OQN&^5sUb6bDLW4)U&R7-u^zI3-J^Rtg%#b>j^nSmgg@W@b z5Tf-5<>l_O-Or;?vhG@r&ijfi-oXsCE9{AF(}=;{yHh`8GuHU^fu_A`=C|ND=Awnr zV|y%|kBp`}SU0H8NWi{^UYOjEx`F>Bv8HLp;F$MR6VnT^u)2Tb4{)4%GIg8Hi^*F!7EmRwuu<&_z@sid;P&h8+4t2HQRQe2p zqkCgp34Px-?9@n3{?1f4_wX(I=MBgv|(mk7Z%K1uXs}~E)ZaD z9o@~|Gi@((>MYKL%-o0QmQsxoV?v-Y=>$jvolrONj3&V9IJZd+1-mCpFzx(yNNh6} z@{9(fhBP4Sj8NEi?HM#5{)~1*&T_}qO0?7V=boFjAXobGEmOaOwefqr-fkggbZDph zKgt6AxIYP*?K$^#FcdrZf1-L=5ipoEY)9*~EZEXj)!Ky@>&u zs0T+_7UckCHM=oULGMdzb5L}yL|ID?=q9hk56esh_n)4i%lT5w{4@Y1HU%1Kmk6Gc zZYCPlKY+%sx1sU;6&N@20bWbu&^!Ab>a;;vHpxJgb}vSGqQfY?=>n*s_g0rP$yj5O zipk?hr?3C6iOAZCMn+-iR8R)zQ{KSNJ{`r`&m`iLd4@u1+BMqnxq&y1QSSNsYg883 z^LWD}7;yI>>+widbXMJ?ftM|*c)Q~+?Ofsduqb(Q&xxAQtAV`6%UOCVrJ=CM*S&$qVk|-Go^K??Bpb|3c-j4d~};hk?}h@#^{? zl$yuG;~msL9z@$dCLv&bO^v5tT&EmWe{_5oo0FDDon*V^Fnk~BBIXsW?i%TQLxyn0 zo?@=coXN6!kTw(b76N1g(EgPY11`Seq3capV4=Baw3oKlO(t^Htw`oQrjs~3unbqO z_(439KCGnn5Jdgm088gmW_iy9naZM_H{LP?zaQqT<4kMOd;C1WTp6s||B40%{6Tk1 zGAeN6Q;>HlWA-g6ESC1a*Dt??1wXGNruss#pHsx+esBH%_hiwj0MK3bh4@w}X+M`( z&8IaOFk=B!Ke-8-gcBG%?+D-Y<`t$6SpkjKSJAqO*qvS$Ldi=dJ0TN6?wN%7qbpEu zV4!(qZz0aT{11lo9EidGv^O@?N?5To9>e%k?2{5p?`9un@3xm~ONeE%x17nS^U(F> zE$aCUM8EfzqPA!X6bw9y4i}H2-ANTwZ=3}gr_Mq4@84xHGuNZy+Z2%8a%CPz(wKpr zO4hsRD(aj!z_@?@gR~T5K{awFvwwM$Sb29KbL9$j9YlIp(!bPKAdX6ha<=VTG;CVw z2U~+EJ5Uv>QLW#G{@<5E&F>emXdU?%|DNQ^pKh?C7#DoHjdD=!%dv^>%gTjMWTUIE z;G$hu(4%1oM3wnM-=8<5ht5!xN*LCxI0?SPy3!rG5aLY_Lcn-Ej~eocKOJNwrY`;n zeJ?Hp%dMBt{hN^xv9t-pDSHsG!vUfc#B+Z57)wHDVtno(4BX^T`vR|--lT)*(Dnj1 z^{-`9r`!jZL(x!{{}6m5=Rp4Ri=cGL#OYPk)f@6J+myKo`yDqBeE$2y^R1p@TzDGF z_Z{Wcm1g3movqw1{Vjf}*Ta^wGN=jAaNXAw*8JrqPjy=biAkkczPTAa-o-&g$!5sE z`vmMK=^(p_y3AV#p>p$hR{wb~s1h=`{q1DhE1F9A@&pW!+hf7?0pNW11Qv`Pi&b6n zvE?rHeHUz%MX!De0Regp87~FTF;{r}Q}QfFdqR@Et>E8f3fL_ihFO~;z&IuljO|M? zyf>Yprg?(@(PdI^gFCB!WK1M`kwXUb(=QTxaeySC0lP4i_4 zQ;R(6qBjH@t--YZKXB#fADGyB5xaWPU4M>Lh_{u|UcnQl42hKOF{9py*L#*bYzO3d zc;e^F)*`gJqfWV$w@@c{E-`?8w4Kp3l(dI{J$Q11RM_)eDoS>()MVdYE~~M-&(9ee z2@Z=)g|3ICXdjvn-#=Ij1(QoDd#7jl-H4mlr-ijFb_e@KkHAc3B(xkb26=i9biKce z`dQBC8qx}LNehXJF%!3?8;Ajeci_l%72xPugx+03K)tt!>Ao9add3)Z^)eOXQ|F-i z!%p|wry|TBWGmPbpSH)-Gq66dg3k9RkjF|zpf0DjFq4?qd0cMNI4bl`2hr8@4Z5DH z#VZ~i#q>}6n9mjDy0nX$z@6h!WnssXb8bSab|`jf4a0%GuEU=CTCjd>BxJci2EQI> zSo%Nvu$p#WrG}rmWbYk5RJjvWbtYnO@iBxol;S6M(l2t~VztKyV)<4A?%4v>2RSrt zyZ~Oi?ohUTGqwdw1^rDk%%7DG`5xw?@7rTIuDhA&b)FdXF2wyQ*@0P^7cpc^cZ?5_ zgST@i)Vw;uB;}s6{NyLp<*^lIy&i+&>pG~h+75kggpmim80t>b&rJzs9py`90Z;N- z+4Lw_acM849JU47oOt{`?hQEJc4zqpH$gG;AXDOzoH3J#(OGdUbG#Z`a+Da3!v-`~bb|hop2tUv@EaB=4zsmx%ufUAc zcd_N@&oE$ynK-UXBZmK=u4&F$l+DY9G6&L@;#=Xsr&!e4(EeEFNYG8O8RryAQf_ZANv@HCd(g3zR-sLucSpREo~LrZ|!njtzs` z+Jj)4eH$aDgkp)+Kj`#sH#9E_Ky@Dg*bbw3%3eu^us*`ECVk$<7m_ZeTh_BgtJqHV#@5! z!e>~p?ookD9q)pW$99atZc_ZkD%;9cHl#hQ--e}<^cu^r0 zeHvo8)9gryadCv;VMpmswjM9Kb`n=~KLd_UFJ;NS%fSU6pqJaDfJ{Wk!+xM!g9z76-ylmkAxhbPvZ1p`G7jjI`XN|o!ucJL)!@h%)vmPcTo z%XL0I#7Z=tWCFRxCoy%xXY?B^6@zD+h@%T{z}n;HqT}X;#32j?X}<^v2NOXBYalk` zGgt;b0o`LS<`@t*N}{h}y8Jj+vWT{u7N4YC<5%i&`Z3p0)mSo^?x-iSG^+;A17&-K z%->uM+v>?H9-s%idzRqrQ;umJ?xBSh?cu#*kp1u@FV)e!RQfj-P_h=X#w9{XoEBm$ zYC-q<0Gq)wN$EW&mV9(!Ml zA+z35mtiY39ZpB}vuPTg*Jw1j+*=d-tr;cDo@#vheS?)--(k~653CM20$~voKwmtQ z+df2ecQWPyA*2Hqw}93&o$KsFH7CxC;P=jr1yr}Q#Scvd1M?(^v3&#uGryvyopie~ zy=6+@PcpmVKLypAdQ|l>7aBALEnL zas4aGP|Ex8?^|ObZR=dn9jWAAFZ0oIKpPA{X(MKq)j-=QJ#iIQLfFVz7-K-4Fi9$N zoMFkV9eyT{m9{}=mV?DWLs7SD|Hu}%2rO0I1pR-}tfgNNc+Cj~<9i~m*l-e+pD(g6 zmL`HOY@cS$iXV{X*p>E$!$-xX{DU?D_Mw8I<$$PF|cam$m zzk&3>&4kLvw-^z465NmWLB-z@;G0i5^ASBkuA;4j8+TE;WDmZG23<% z;{6SH?d><*an4xuT+{$g^zV`T6=F{s7f-#i6?EeyqRUUj$~^ol+y2Z}xMyl6+E=fDAk`bxw=cuIv`h>R7zdl){(wrC zSKv52mSu^Fu5xo1W>jsLQyZUgmP;FR)+vkx5n!=W2x^ zUz&IX+;2re+o=n%KJF{|hG#%>@imtZ5OeY`lV&pW2!FVax+OE7LzvGfP%lcBMOEl{ z`WH*cTw4xt-5OD{H=Da1+5x)XJfV5`Cnz68dCm?MGP`JJ?DKmJEO`-*(GL#e)uoj4 zvYD)qA_%nIg^Zb2GwO-nB5kIKAndVli8Fjk0r|H8WZ2U zXg~N|DTULrI8d3-;I?bdVfkO5LHBI8OuB6%tolA1oaYvyyYmGe_O=|=adSsyXRn1k zi64G++YOGrh)tI{6T6wLfrPPdvB1a^!XB>T65GFF^r#=0mXkz!M>aY}FTmWnW>A)K z6bkFVQa7T6dFGXIb&um9dAyvpk*-uXM}^v+oj^ax5+gR#7Qn{6piUj3asOl~l=RQ$ z)~O??n;ZqIF0_F?bSFFT{3!Z&`;O{wv)QuDj)Jy476T3*=US7CXxjfQq??yxrEv|Y zCmofY|Bv7btl_^Z@dCVUkP-ngcwFivAeRW4( zy}lIXH|H_epL{{F-~egM3wXxAZBR3Y^wQ&^W#^?~P;PE0dJi;2r=M(5;k+57ACPO0 zG=sj-3*jA|yN@rF!7r5KKH43(B{yN^fm1ZGRshxg_n}r&hVIsG-1~w9rpc{DPlsH# z@@6&Se+m@yO-1R$c0TBUrSR~utq|z>I}UWe15yowcYpyVC5-|{nQX;U%cT@$V}X@%M~YDkN|3sKd1Y|+*xDE)B?hVD88 z-Hc6zmsv_QzR@52(uXs;*lk~RByaJ&+n z8uGEI;4}`M5&?=j@*9#GaO7D(@P4uu<#S2*ZkkPJq%pAR6EXC=w-870k<4Z)F`jeX zaC!Gm;`!P)Z&AnJaj0=6*0Lev5-{(w69- zBlzyW5}>@DHU@4Q3yR&2%zdE&I1LU0cee!I(z+N%J`Cd78I?J4+mo>z>AkVE6%_`t zGQXoY@DqEA>XN;vsM*1mK75biU)o{%>^;!4Uk5R0LjqK%XMpdWMC>u}9ELABiY~oB zGDq0Jr0tx0cCBMeI)tP0(qLKao*eT2hoLq3joF{tSmSLY=yACT$NQTL8N)RoncAMS z_OPiKa3mGw>n6kfeP%-A>D~Bc@DBRFON4M=%B^OlK>j+~_*>%%_A^t_(V&spEnLSP zt9r^XY$gUq=R;2q3vq@0bF}NNlC-KFC<~iN3{MD*dr}*$?Ki zpYkYQb=Y`^{3-7*;Fus0qk^5l=Z|D2J$8gC?0j?VHa0RxhmSHx*?6>z{tk9Wj)2bZ z8Kx}w3DZ}O19(r__&r%nHF+ZFG!abQY$;T|XaskAsc=nSO}V9Cd0KltIIgYYK0n<; z#}mC}@-11TTnA)8RM34qM~te7o4wFy-FfhvWX}AG7Nc4-o6kR*h)o8E*`y!UxN7M& z&_V^zEj0m^=@%Zc@B=7AhqF5GHqa!hpza2BR)*JMmFHD>J%MRs zZh~ovrP$oz9pAn59)7%(in@P3qkP0m&5H)wYHI(R-q)LnS;u7hC97G9tPE5Y#DdMc z0X36ad8R3COLdrx`q$r?-{#Y-3fpMbAI-7}WKG=vVhK)p#~O-4}xT*bV6N?k9-4X$4-G zfsPk;%5sbLQog>F?aMb2))2>`>G}wmTij7}-TXJ+bD*tFUe#+O@ymKcF}`~o1aHgWs*q=} z?de1C*|8E`e;tQ?-(SGDuZanGW<0tFDlywm&#(Dkhmz0cLi!;C$o1L-n~v7=Fx&gA zWkU=cTq_Z>nlHkVP6xmbX7ltrzkoV+4p&aqu+UC@D3kaENtV>OUfc<_9hInfyNgAg zzRVT*Z7lw9Pf$J>&jUXx z5(D-x$8=(~rd4$YUB^AVsXYP%j!uHO+c~%>=0A9K+(xW&`VH!Xicr4Ao%f_XXJ~8` zxRTGY=vzIS7G8vK|1)T3-HI`z#^T%6v~iX6Mi%p?3R>pp(VdlY9v8+Ed#IYpY2PI| zE&&RR={@?g0fq%+Ky9DXs2Vj5n|;Wa3>b)NxjDX#SM4z8Opr+?Ppvrm#iHN3mBOy@##n=Qz~`(urYu?d}y^ z^-&4R-QFy`>^n+guW7oNHsD~jwcz(r&s=IPu+x4sQ8&v9E&utBt90GLmEXg?cTI&m z+E3V{Qx>|cjR*Cg$3~^UZ^M;Q)nGrt8AAW*1m5&sS~}cXXl$AeF4#i5yIs+*n=1rO zYR9^})M zA;Rq_DoRu`-JDXsK3ET~`h42ryTDZwX7Yd`1Di*?n)913tj^D33s@kosFmpac1%nN15Ha zci4Bg7G$#yf#c5YJZQCvuyiavOM6D+==GmK{tJ0C4FS}tUX0p29jmf?M?0FYWdZkS zpJE&RpLmv`=42zW56;Sp9_)qQ(u*kHwHy_Fy7Pe51F^(iiK&B$M|t!ONatFKj>Fuz zG@*=39a~skzyQeh`6P>7^8xM56459A8uM9#%=Bd>Y`h^6M^=8|K6kRw**6A*itpi& zohCwnxE1c3cM$77^`NuuYe>ur0~gy{cz#qAqy!Ti@F8_vHU-H7I#`RYNZe?NFREnQ zINs|>napXjYX_~xs3{*{#*mMYKCuv`^W(9orr0rkW=!fl~A&j zw(^`VfbBpv#3(937dnzlX1yEbm`1yz?Zle1lS9nRzfd>hJp}vu;>f+r(cQug?Y5JC z`=&o+>S}Slp9p<_Y(TBsP-uD_j%B&!XkXhIf2PioL4G$DHLVM1@hmETJ%%w>uc2t$ zLP&qN7Ni-wc?yf`|vG|7|YXJzK!DmhA=Un5(SnLo@bpKLa}Z$CMqIqKWuJjS%RLv#)(3@8u(N zAKX#UZNDR1W1I@nGyk_KP=qZx4QQA675k2RjVVfN_&9;K$Iht!r+@L-+4TJHZVg`l z(QcyLL@?fw4lb>~p=uMbq*@!&jrV}z&$hy0;)l*pC$DhDpRiv29OA#vfH(hG2%e21 zm-ME5+}i|RlDMC{DI|gq|3c@vC~%zgfcsI#X!@^|_wQQ9B98qGRZ;)r%oU*j=5bI^ z)(eBra=Nb_*97+*LYt`ZIgZZ{u#!~@keryvBR*>&e#|-WrMyCU!%1}Rw-1yaN`@(< zGg){8&iVyEQ71by!a$I2F#^?eHR|Skl{vlaN*SPbsNX<3w+bxY39q8zf?*l)?Ic($VoxKUtWfklX$3 z1TD4CDK}r>*mjeDmVD>W9>W{)%~o9ob0`T$~X561}#Bk{b^N7NBtSaPUt zR9=F}vI4SjQTHo|vy8E-QyxrBegod#71&$813k8%;EIabvf{O*C9i8i7kb7!%_V*| z?cs$wSu>~2AMoY`6TvI;Jl5`1gX0)OKJK#;0zc-X^+(FT#I=C#KoW+Z4u!Px*U&J7 zvfKOVzH2`NGB?mO@Zelb**XxESB+%Sgnuw}rV1Qa_GIo~2J!g02cf9oH@Nn?5UsrE z-Kik9m!lo(P2X`xmlfcXIe@mizVRhPlR#IUDbwAGL7#sg;3`vFbka|x4G=qqbJjqK zOBfqC?F8O_d>B^D&c-6zwkdvh8LN!Gg5#g#M#abU#~%wUg{=I`C@*#4D(X{x_q+}f zt^$45!tDiva?}s383q6ZvE?7BB3+~R-aQ-jvAffm>$j|T7sFro1{k!8J ze>Ev*m`fch2fDJT^i{;ZEd!m`d9H6&G0%)eICbJ~$~BmSW7;luY(hRfUA!7wPOL;@ z(;0BSCJY0@Mxyq_LZ+Fb!}|kFg~p|)QEmNzou7Ce?N-jEZn7R517Be37#T#EPX^<; z&!B3K7pM(Ac=GSgST>n@p#!bKJ~R@{LSNxEa}8K_rhNB%9V*npEZ}hjdfPZ-dhiQa z-zNv%hjoGZyBjgF$y{{kd<8<6^kt6iNnGZii@Mq#T#3gcw)QJsFWb z?0X~jDdl4Qu*WNFCKkRg=HQY5aFB$AXQ zl-#<{`KFM#5=mNIBvMjYl9J?iet*x)%(!)*=Q*GA`Mf`)d5zIT?D%>eyvGg)`wlm@ ze^&+?Xj@`~tOA^Rd}A^#Q|==-&o)qBU}^J%px-iWA+^UD^!gPI^%HwT2C*RA@1F<5 zpjIq1dw}(4JyAnbA7Iy5v~nGy?D%E}88s^*|I-$zUrc>(Bjb2}u`bMtOb7dKw)Cc0=dyHrfAG&x-k;Lvz0o= z6~@W%46KE~lxnPB zMwsxzDxBv35IcLnMMI4juymL$($UmJ{1|^4ML0B&2 z_JfQR4HJ+4X3nyr4+)xvAiHG8`tIk7tt9B4(}SOdg>q@?l((TJ6hJ+B?vG zaNR5Es4>Q3#v>mbLRzoW#oHh&sNk34B*H)c=fzi0uK$4%`REcs*Pr;buCK`-y`LM@ zYXcPY2Gg*+*y=%g!t4<=LvEC+mzHu!|BinC#8P}!MEyx)z{|NOuJ=C;lCfJz*VE)? zua(#nsEhL|?X_!7FwrA``q(0~BaTGExbsCc`)9zlU!{~4vj*?AI-=F4Ca|jMRQiRU z;kF-)#kZvQ%Vy3&gCn&uR)P6e`TQ@M! zPFM{!SNnqOz&z^ROH!VLmZNdDUl?2wM|_z~}ZzM-3RF47#6%i%V)CmU2zEdw&9(`N71Jzs~&* zhjFX##Sl2+E|UaI%$Dsn7Gq^bf?B7VJdPI~Q-1tITKhC!_&c$ejp|{vb`a!<0T6Qk zGEQ2!pJjwdz^qp=SCwb-6jS1x%$v*ofPwMNkvNd@mD>xN_^b&H=vR3IEw@*rOZWiP zcyxmMhyRB1)cI^gIdSzy{l#?WQD*z1H`<>$&8kOKy zwU>aw-d|AvZw;D#i)O0nz~mq8Y0p~&wlf#ta{X>X?q8$>bn%5gvzj5RWG6Pwui%>J z^o7n*i5U7PX@duDV$Cjn@zbp+lm|pRh7x_q`;ktb zghl(ZplGNm9IYT9j;RQ0+nVe*^Uvbdb=qR?v@T*1&2$#6bzB}SVVl$5gG9%lAEo_^ z@we_^Pdddv0h)rWbHCEB?iufS?gTo2cm@WmAMy;V@h~RtE|{Hui=olA-2a3WydoFF zvunDdd#M)WoH>CVqkK?xL%~us4MaD|QAqx`4(&r6(C?p-l$Vi$P0nF>Ie|EJgWR~f z^*XaI+yM3K2czZS^S1# zp^=#S@e#~>oe4GtT||r6*W|-F#><{OMZf-T;59&k)6Z%Mv9lyl`8Wpqi0|R(imM>Y zJ)@LokLT`R&!P4#Q!($zFZ8b_&C)Fbhn1($oqrHBOM@XZMpuXmm;x>J!MOX~11!yH z0sA^B;+`7tI7(iD54V)sBPdrK@do^q$e#_+6fkHxd9UYVj5jgc5_Yii+drxI*%s4Y zKS7yc0V+O@CcaWVD3(S`V{8J@ZR-!zP@95fcs6mWE73NCvi1EIaxaAi>b^A*w5L1- zs|kNW=^1X|*SeccT&mmJR2MVaLJpVjBjUR$#!+K+s!Cdy>H;F=YCA@@3|5i@0@QcWe}R z==$L#<6uneIRfmb#De*xaGX5dP|V;T!0ho9=A2jo7NrKll}X)%jZUvV0NO2`9wuHu23EXtA$apX4+{mSpJmFbN>eFEpC|D-wE6P z*o>v6M&kD0NH?j~63qM!!AgEfs=Rm-T9$o=apuXGIN&!v_lg{^pQrinNDnyrp%IE3 zKZ2zDO|A}kg;$A5E}wr(Iccg6-5+nV8wYn4Cv7@Gyza+f)$?0+>lFaAm@sf3YYwl? zR$^z}b8I?Yh_>^Jx!DI@7H&QhLL5vWCBqum{}l$xW#3R;Z!6Wly&uG(hN9o9Zi4I1 zaGcaLkQFyx!!hN}pvtpH2fvliD>))6Z2G{NW49MBF+r!)FL_|#)B zz?r+?9z@*wj6&$swTb%f9)ZRm{iH=x^T`X8g&~72U|R7JaO%1ZLnxzOHd{;hKTD+P z@IyXL76^GmmqU}cg1I!^hUgEgv90U{x9>I(0%qv*{JZl}1u@LM+kRXV5s#VFNgp`O zg9Yz46~ku^$LI^ysCpgA>?coztr-z$zh@v%4A_km_z&Vg=?VM)xQ10v_F~8KAF#vb zB6tP;3!6uNC#`uRy1bSEu6BdROV816;Q{mMnv$iykOUdtU%)HP9_%xEv#Zg^(c_9c zq~!!~yB*IVu{s7kc0a))J41-$krr~uZ1VZ2IBTfdO}0M zF|=oWxwrWxY`XHCPx@&fEV9{yS@YXa`H{Hj7p6*;y6up-;|kw?`8`*N_t~eFVOYMy zK=3+x5nSJRlLl}eXZq`i%_g%UgEI6AST!0cHD1Rnb5EQB*}vU+B^hs`57E_Mlqt>FO}Yp;aXz!dIe-N5P#v&iLKoxELY&DUUu+XL7=vYXJbDje1iBAw#zS4?i~!&NQB-)}q4Tow-p zi-FO+%k4IF7+%NRGY(>_ZcnV~mCoz`Dgl2%9~xv@V!v-jLZ2DrvB}wm7yL<6GnoQj z!wtl(qljO=;2=NJpe5SdQD^$|Md+e8i1g{5DEs*mthMihM-#`qw06qBe1!q?iQ{zq zBzwIBamUvjtbFOIuNLrdSzzS4*t9L-~A@5~=@|-_XB!HpuRn zvCiv{Ajh*1;6*5Uc+Z8FIbU%7N^KFs9#AGu2jnsLq=E7GahXOd+T0Aq{PlyNq;D?o z(8vVYJO4=q<>m6@z3) z31}DGLHj|^n99R}1s6#K`?;hqlrBa4rO2?Z0q5TDDh&8jlkSf3+;@2m>|SyMtbE;+ z`98K}~V{+-7F;vKYKS&B30wpP@rW9j22xID3D_Qk`>XIr}?m zzqkXJBXkAx^$*cX{R*cK`~`X@H8AT{IBLIoPP|=hp6S*KZ3o-H=7EtA71|AL=)RM0 z*$1xD3TVAZEQ9l$g>E{`^X3<@w7x2cb-V^Xr;Vj6_hq5%RtTTCTuV@GTg4RnY5(@8 z1X4^qps6$*%)J_6H{ECX#N7^6>_=70Q#8MAAjZyk1+GW_2B!g^AkTLcwiq5HhVe~a zMS8|!(v*vi`~wAM3b0+JgSDG3qtC!WQt7lauz0NkV~?C7PTg_PwRnn#qn?9%g%je2 zVdO)6MBbc2w7R0Dj1Mb@#$$Sd-2vjG&Nf1)if??}+f(S$PeUxXT8^d0H6XuwjGMVV z;hLWe#nQ|Zz)y4$iu~%qtbGrvkCZ4wzuji`brtA4Q3`@Po+$zC+W7LoE2Hk?7NR6Suzh zFZFlE z?mG>td)*+ht&9g11fY83OKIA8L-EhvLAcmG1S0F(pzB->(fno=7WJ)$kmdbJ`^1a-q;V9(a!8_%mui-Lr1JE zKM2NO4TQYG*)TBj2S_X(c!|3&o@QOdn1v6(FWVj#1^k5VUM)Cb`3cC}b_qSZQD)SK zbezNSJaVR?SUmg=4zw>ppAGe_gP8U{St%fUb^`{IRd&2Z9$`ZM?qNy7Tm);O?T#LnY6;58?Z{OZ#o@w6I>Jc#*l-kP)& zT@0-%;mv8ZYxuio3`TXSjsGoRpU##`Ng85c7pP$^1zI{Eh zY2+}bu#wX)Eekya6*>O#6kmhod$*-MI4sJYvwdVr> zTYe&T>j}~!*RknM5;Je4F3eROsGZasB-=hiS$!b1L@OanS6^I29cH1`3Fy&#J6Iph zhXWp3g8x}7w4ayGRW{nhc&?;%MDzX#4$D*e* zX_xOP7h|cy<(IpP@l&bKt12GMwJxKg=Tl`#TLZ&cbcW1}V`)Xr7%x?!_u*IYn3xt` zD@{PXBTVW&{4QdW4aOP-@{Iqo(Q0N8H)|YAI($9Me?ez-5oO|hoj_7bETmiM?B$DC zJR1^9`N3cwSmXou9vccKq#LPBhoL<9A1+sVqsC<`Jhwte@E`4n@}+xuNkSkq?0*MF z6&i@kb|g{H&c1B<#Vt&lN4tfoL)gIcRZ#q~4OBBMS(EQ%KKE4sW#m0UGA13>m&24k zYZ?AAlXx~P+bbYbPbTAt49trtJ ze9?MPEm}+o;Px|?Fzs#LV68*D-R}P2Ut@*7rF6IS3IqQ{BcXZCXmtN50o8Iq|A^ky z6Bq}ve~>3@%Mq67^Pb6f9VA}$Lh$igEv=>RjdOwt=3J@7A8CeyLF0b(`7;a)ZbqWj zUD}V#&*GbZ+`=xmXkI!yoy~KoMD_1Z=y|ajY>97Tb?iEL-DqdtHWE~q-sg>!opho9 zUg#H1>OFjkk6(vCjm}fH`!MAfr>a=YgAX8oS%R?n72X-qRg@1I?dW<}6L0gwP&VcO zo=rD|ChAEr``OI=pS%QxbfTpll+7(!*vZr66VUCcu4uQ&6cc7QKw{MwUet^Be~llQaf`N4qS6J) zkZNU%+dlLOtdL6nKlgUrLaXFU5Ob*k>+|YxS)-v4EE)@G)+%0R)e|J;4%}tUB}}QB z1*Sh9fvx%lOFi}qEIuBDgzF}P(;pQWH0nBZj{XYn%MXID^(v50jFJ}9J4}81BUj&u z=W6vS%yMi49Jmoe&+LKf#l+(JxtBV5UBJmG8=OyF#pHyq5cIhUeE)KRvL8{Pt@R2M zFYIAXSC?W|W|g%0j5UULJBR8~^SNTlAnD+S1kkt>u54UQEc~}#=oyp+*3BAd`&Sh7 z>GuZYsoB|vK}irX`v|(1If1v{J={;)Uid+6VQn?>toN_P+V}S$4_X0!42C|%KS3k( zxO9cQ7k>OeI$!l&?)32mC)YcdY?}uif0TjDnzKW>xhV6$&Sloe9370-vow=FP`Tg; zNMF2yL+fal%x`MElQ#->niVM066 zjtFM<(hjUUEFi1sO*@zfUOtcoLf_=P@EqPw3oYEO<53zIn-a z44<(B&BQe{ON(q}K?hoQQun~To6=b067X_pflo!}aU1nN)kulmZA$&p3-p8o#5*&1 zX%4RO{lQ898&A2UEADm07bui|yHif3J zn|#H`!_YDGG|n283L))3QKD?+UMc2qN~taQN$;Y4-7%g!G6`Spq2E)Fd~EPO!fB@R=MsZ`fR!Y0mQ2@ zxb>J@)JL<$acao99t+-2e!zzZx}x>IV3a)m>QJ0dbHvM6s4_jn{7rn(>eFMXMnJMc z8k+$go1cMw)Cl6<(eJFO0h&CZJXeD&mK-?8j~ZUWjA_LAKYAE5{yPbl{@>8U%nAJR z@_D;&F?Q*N6s~UqZ9WU*qliNqc#|(qiUEt_PiTAb3H8vLvOYb!h>3Sx!G7&IzPg`otr8#nMzIT-{%fyKI;VgG{=K9_|LhCg=0e(Yz1rW}2YA z_ZYNXc$IdF^v1P6r&Q~>k+)zO4}XAQ;FJc|egDAR<^<^JKzjak1-Ry(;Gs97c**ks z9Q=1So#B8JW~JkrD+L&8*bI`&84$k*;q_<%EHr#kKI;s3+eDvH+gs+`Wff`fBT=55 z34XF?Ofu7k52tslZ0Q&2%1N2vN*x9nGuL58j}0)ZUju$V{2a?Wh@TiA%5haNmU!Oh zwu1|3)|d^gPL6o{P#n4r*Z}Rj9>KTiCZh3~VU)c+j)_MCnLKr<5+}yM+PUum(|4fj z%(-YBYmai@OUfGA8SZy=8W?29u#o&BJY@MD)SXwAfh|%Vc)Xgu!9$q$Gn1>QA7eJN zD$)MA9$R_1it^A!=z7uwKCdnWz(ggD&W_RRI>PjnyYmx5TU2&_`}0X8oln zDvl&6ZIulW639@Ik^wct@AAN{DrVNBjHQ-SPSJ~S_h{j;t>d=fWekoADZ!&mLKYZZ%HrD&gb*yZ82O(4LVph>oX3#T(8(=e9?Cl4q z0=tREcejD-W197Tq@wT0E$DM*7L18#$L0|<+dDrYFGvluP#s~R-y@+Tc?|TPb``yz zJcRt23*bqrq3AVb2VOc#d)Cv$5LjP^o3E&#%03ZPzwbefp%S!PMjamp=h3U*KC~$^ z6ceXRfcn3$f%cfexO|tkV4QaZEP`X%vm|=2dc?3{d3nTTD@F6!acDcpL};pd#uCzQ zfXQSzTCHq^{xy`bSnr9lmFJlA(Lw0-_8r}ar7Ugw7AV?oiE;Mb#iX$nI53EI%M1U; z(z`70ncWiNiV zeTin}q>=u50LYT_6him*nB%F{Yx%IP84a!OaoeI3Sc zAG=Qc$FXp`i8R+sT0*SkENIL!kcJE;FQ3I&ye3g&iGyC-j*RiWZ zQ?Q%1L{vM5!PniIqQT%~Yosqp}mgs#BW3*(_^sSLEA-2{o3TG_Gp zJ9w0CinB$q?t2GNeYrevEbsMU${e#WdY459kKwU<=N#9w7vVfs1F`}Fgur@^7 z%#Td*z>Mv_NSY(wWd7r}VZCkv+TZWNO7bI^LG}Q+X7m*cZG$j;;BkmdR73AusbC!O z0&P!Z!QDT*Q9rdA77mJrIAUl|_#_b?51_M2n#c17KIQT+4pIlV4OqKJ4xq1t0l{bB z^Q=y=I9JEZk}|+4>KDuE=EV*qzeA7THF)fBhOWu`xX+(@ARqlinx|jDjc;v45C3(1 zYch2b_4Y&m9;0BT(PeOZc?Q}W0&sg@%7SI;i87|dRiE`m!=PM<%*{uK9lN>H&Ot(@e5iLb5=SEGk(XYA=UNlN+5bHn*z^Xky=S_pka@6(s( z(cq>oTK?66E=#wg*L*tb_4AokB{BTcBQS+}relZQ=f}suCygyy9ut<>Lp8I%+KV zxCBa9sy1P7^H{Jfp^l7A!?4JZ_JgB~(J(lgyj%zQoAcxwpRf$%HqpwAv!^k!+@F`l zx?{0^F~ukAAaHCYmU-Dj&#b4=5Kntupq;OcHFmMe#5}G0C<+q6s$NrEw)-u3tlfha zy2H`?P6GDaegIq?27=Cm68Y54qqwf)7BJ>T#f-M=4)&5?3m-e-5+UYG<+DmC& zxCSM?=JOUcF<^gH(H?)j(zEs`*t!pe#hU3TKbggvPCX}`vp;_wNB)6tYR8!Km(iuE z3T$bvN*uHnQ;fEPkK0%Xs?!#Cboma=y{|#ksMBI^jNe0vDprDv!!>>gx2Ux}s%UqQ^g!{D>A74vU+qt`!y;2(7fTQzf8O^^$w z_-Ke0k1s*-x?GT4=pawhYm7OX460r!nAtZOH+OjiZ*=Qnz>Z}9JfTJ&qg5a(@_Q@zL3Z!4q*F0^AAwvcl3eIO{c z9$Ri{h$%g~V^u?c@O%7(y@?3~^KWv}MfNi9apS;)dc#8Ev_;C35uJONrKw1npN6WAwKSEzSw0*}K-S!%~)te1U6vwusN zMa)N5S@Q{f{vOF}?G9md>K+Ir?xxhU0GIXC6I|nMVEyzUYzEp_huz?|wc0{cT@~~B zxrrBLiO}A382qEh;fk;jY%xCq?XesjW>$mx#yaW>Z~h?@6O}8#cz8TW)mac)e*nMbXo#UpjZnSo3;Uy?i)i~| z0`V3PI=U3mW3Kiy=BaM+s9&cb>C08Dndt}~d=CfeLFoO6m=MM#Jk3KNd>W2&OOI5_ zbD!m|?Jt;XnXb6?se#aKqLwI```vNC7wW6Ye4H)+V?PUs(qV~z?O`Dq>!8W#Ioq8d z1hcC1DL-u_Hr-c2hI0{0-%Bx3+^KDw#`Oypa|*mg1&i$1t<`Gj-WEVaoHPV5J}q=iu`!H=4AsmQ-og zMpF#@b(eX*(idGDpYhNI(?E4vN}5y?)4mW1QD;&yc2*l#v98RgIa{e6v=L*^dt%ij zP0TF(iZKCnPw5qcdoTL=8yO2`f1ShLm(IZ8s1q2QJqA76Nn6Pp4EB??x&6s$ETgR& zi4;{TY0Ojx8@jR^b->miCog{E|*_D&9WkVaE-xD@aj4X{SM!v z{pSa6m$?N(|D&!^x0jF;e;O*!RDn;=aXjVpC-fL>%FT|&qu12sf@uP2r*+>1Va_ewiE9)QoBo{&~x3^iZ1G1_+p93pm-$?;J1 zo*MyMvr}lEspYEOr@_9jCP=#7m->ZV=RUhuLK*FG$7vN|;_w(^e)ti?H53$04xn-6 zG%vb31)9c<XvlsXzmor>Z_ZWx$M;$Grrozxs64Cd>O)w~! z38D1+ZI-Wsl8J%XIW!Dkg=q>Q1L3(vE0N@AB@qO&{LTB`d#6w9`sMpAk4&D#g4y*~g= z`sfQDW>c8iVhNc0ya!tyddpb6;O%W^uyo03oU5AvUOWy~%nXL8`Q=um6yTmiL>8of*b!7llC6jv3@(&*tIfqcN@W4)>_nM62RT?D<8F z;qSYMieC|IVSG37@kC-}wY@>BuSzugn9V(IB!I)VX72x6Fxq{y#7I~2gT%RTSN%@z zleU7n#b}6$8rp)Fo}BKh+tL@lMq<&~N>DwB!Mr{VD4qNa?c3AAe9Il^?LGqI_ua(2 zakfzO_ARD4Y(d}Iw71@UQ|kSSIDDp0xs~a7$1igwLe^wY9@?WC9q#M_vqx%Zt)Oh~ z%N#r)k%(D3Yhn6M6T$P@YsoG^=Pz0?3$mcNx(S`bSK>fpI}EGP65^v; zv0#o02RB4O(U~alxbMO{{vLp~jhg6`um|NGlN}Q+x^tKECDawU4m|EeL*|b%Kn^%} z(E;M5sL*-*SF9Q2fu8T9Ffpv2N4z@;UEdiC-uic8%|pr#E^Gz)+f~_a^nT16H6IUk zrTcoEA%t@uSY~txtv3c@q@2E2LNX}M01S>Mz4Esu+?<^OpGh{@w2HFX8QI({MTZ;8 zuL2&chFJ8a|CbErp)2LBt%WSl(iyC_Ut$er4`D}q6|Ar`L$&#S-tkd^9l>Fs{`C|6 z{!9m(8;0V13)06%-)0)g+Dd~>VZ3G(`Cr{ef!+=J-OhT(0*)EL>y59VRevO^|Jn=l zt&Bu-X$(A78H#q=dmw^1%C7tFu-KO$FyLe&a~`z;(#$(BuwWXm{8)(&E5BfD=px7% za2dmE{s#NireghX4#a|4fa;>3%KFusI3W5rblGNtZOs~D&9P~?PpT*6)0^2V)esHZ z2Sdk;(P-6m7$#8~F%kjev=^u>QHv_$U$P2v)* z0`t;WsImLFv?lB|^PbrkWeTNI`;Tng|DE!pV?F8np~NV&B@nAS49i{ppr+CrYkH0b zUx&AlS34KGDCx5zwt@O|8dLQd%B+_7Dq}s2K>Fhg?9YhDa~-Cl>(Q5xai|CEDKQb^ z4OEonSVvyCU}<~I5sV$GfrT3JIQsMfoL*Z?JQZKZ`tiSE(YWOh73x5~(}iF@oP*7h zA6Ra@hq@il5pQ%JrX6^~EY}c6ZGAtm3iHCWe=G;q!mt@i z^yxjH`Mc|hc?p0kUA94=1M8uoZ#FSaDj@SgKF(@sL4)U+P&9o3+RFx_+`^NYEyxBR z-6SbY&=oSqC&Ae(ufxX9dqxbPNl=u}&wLdtaebFHBn%@tq$me41DT1GA1qC@}rXE4bcJwWaMYHRt+1pi`LZG3dSUH;l`B0DGTk2{w}+L*letf}*_yk9kp8qN48cvJc$q_!O3FX$AGeNdMZO z%wvqT#M&K4F_O1J;(SZkpLQ4m{_Kx|%jV&0v%hi2myh_;Fd2OOQnJ^tSE8@;5-7~t z5AGWugY5knNFx8}UcGLDoAMhp`yK;D=Rgdg{>b(9HIVkC9&JiaVTkozSlQze`h~iK z|F>VLI5I=&Vj#!S<7wu*+ zQ_x`u<#??gDEjY``0<|a*j0p6u4s#vCPZJQJKwDH2kST)jaJPnwC1r;Uw0VC z?9PT7^-Z*BUJUkwx(N0j{lTnpJM}U>f>4(@?y;?iJC9h1r@XbqHB*SQKKLLnIy;MW z1T8T(`3irkZzRY+1!SvtxiUpcfwBsNXvV7quTi1UMjgrH^aEhOWf+~YH5g8e&dzry zAnV9B>Crtmp;@yF-Yc(x0r^(wviTph8%*yn?GY^Z{XW<|;W(5GkH=|?4rAw%3Y@Nd zPRuiv()|GGjrA%%Z%F|}7aD5&~?Qc-7r|D zz5$A#T2hY;O8_~&2Rsw8bZjl`xk*_Iw_8xMb`C~6EyD@XpNKK{6a15`(LH7uggpKP zFgqXP-_8NQ^|!IDww<-^rM#qm59I$D3hqU-Az!luLNnWV``|jbbf%l=O8vtz6F!1# zdpb*O8^!-zV=Ne+dPr51nxggPba=Vy3(e}GIQ#_p97i}S!_{i|w1)b(UdG_3>R{N{ zsv~qTI#2wXFzTWd{YD>S&unx9Wbcat2HjBU^E&^HFfkw2t;Zu~%N75RQAgo0{A9*^bs zna5aDlw?1{nr7-&zRPNiiAAT;W!GyLK5I@xcb;BOPkRjd0R^ua( z??^oxt4sw*N`bOrVX-RrpF+5Pd4w}7Kjt>PzLU43U0srnq{WkN5#MW!0Yc;v=ZNvrm6w* z(0KCi{|)-zP;vS#wvG| z{}|64Hn(%_?a^5HDIer+v1mO_3qqtdD6_Ey`-D3@wwEUk;O5xUq%9QAxj+fJ>)0dat9rzw{qWWc`$mWJ+@^Rsoy=oH$7n_{{M^4U==tG&m zrt7@lJ?abDsi2PYGRi2op#6}(EO3(mR!8HQ%z7N1Yff0Iih=r_x}q%Pmr~O4B|ED9 zckr23scarf-6mHw(0J)~xUIP#<6op>4Q0s!PVB>`!v~m!q>e@I%!JlyArKLI3=+E} zFlUt#^Iy|$vttSE`p#qC=x4BRfv#Y^YYk}J>IZJa-(XV@%D~PZ2P>Cu09Uhh;Pq}g zCL1@RU*Kf4tsRO!(|^J4|47%q`3~Z&4uajb-?6FJ0x;&cz^C70*6MVXC2S3YfxX{D zz@ZTGq?IPcHror|fo@m^I?h*GyjDK2+fnTKTjDvw--+Br+1s;W2wZuRB?G)x;q7H-sJC(9u zyKu~l8r+?J4V~VWaQEB_2>DMU)RKnalsFkfeP2S&{E?VMZx;9G+9*jtX7D1B+3V;C zq00_I$Smqs^IS?Z%3Ua<-F1M+Ddssm5){6prE5nU3$pwN%x`TzJ26^U%zsLZ{QutK zd9VNXtS{k;Y|VJ zfAoUP1diIr2GMzbnFXdjBBnJGAG*wO+W38FW$L2rGsY7ZzimVNS;oxWc_tc57og9V zFHCRX2eA6>np6__n0a*crd@-z(m-c9^`GBi84qtkdyj1T9(H4r#HlPdJd%YT?gsFe z4$7|HlS-<06T37XhEpGDNXIMGwzv=5h0oCLbrNJ!8}2hTh=uR1!MOXEK(e9(8r~#B z)0@3$(PJ_Oln+E%nLRis??uyxcd$8n30PUs^t3U>(Qn5|4D?7txltPb$FjTd^{S?z zCeH1)r8;5eLK*h#^Lg zz`x&PG(P=1&8PR7&9q~n9-V3U0f$I-YmQP3M zT|EI4bIvlSasPmI-g+#|x=CG*2QmCq2bLD(K$gyGaGx3u9`fBR0l?J{eM+3xTnd%`JaCve{~@0 zo=C*=g0Kl$A6Tj8`3pfk*w&=av)bzDY)t3tB?V*(=~0cbXJ@Cmw1PT$B3j8ZrAfq5Nt*cpvg97DeO@-egP}O!s)<;y@evh|GC+P-P>yvn z5WZf~5Syo_V$kJ!cpG6Px@sNfE>X@Xk(7ho(j+v$_!R>*dT;|HPY6-;L(_HNP*qsX z_Dt*|#Q9Z#Ix|%1aC{dt%i0E$$i%CT$X3cbs+Eb0C~sm?#uN$8Tv~ArbH_zNq~mKi z(Afxeai78V_AD5-sRFFx;=qA8r`GM{PobRgxJU!h{`@m8-<8TgMeIYTyqhfJ*m#uf ze4}(vDTSyUx-adHviL90!C5gFvd((4=EKK{>G&7-zpxK{>|A)c{X3XUzGvAzE2&Gw zENpsF%xs5OL+HW~7$2u23X@HR#NolHn(GfgPU(o+M}J~_G379}Pg2h7SqNpX27wv_ znZ*X4}RkXv&*5k&UjRwtN`b4dekfM z9Xc+Tpvqq#)FFN>ZA~QcNLu*|;y#X>bqPny^o2t<=`g;|L>SVyo6!5_4E&UI80_;k zp>5wgs9gFH8tC0wvhWaQxvsQfw3LadQ< z{U~$(g{S>zNX+ZAN-N>HV@Jqy;)+pjdW}SMm3v|8^9<4x-LP1@12lqTvlYdc9OGlX zU`5A8^3b1QX)kVL#H`c!?Osu`0z~JXFAdCEIf9zzPFHTTj`7CA4hFI#5Lt?roqSyK`O2q`jT1^wtb;C`R zNGqAE)oS9RFlOwU36f!N!Qps4w~rCHQ~nR;HM)W89=wkp|BYuk+OgoVWCA}G(Ty@m zt}x+18QQeBgHxBU{8pxhn6@?_Q=aJvRy!Y}|MtN+g>*u%hQ&1RZh+{ytHAm4H&_|( zj2|gmoL|;O$bCNyV(NnF?-7S9lj>nd%_Z3UvxU5kh~aLZO`4ifP1o+l_GUGI_xd(A%f8n~;Af z8?A@bU|=wv0o_FDn79uR_irc;Y;J-5%6Mpw-3J*#HRyBuw6rG2NStb8Do#LRIvRY2 zlx6$DDf}xGeWhJfgEs`dp?<8GSIqK$86?l3xvqR7xAU-t9pR;@xYq^b1rMRJ`|5%ECA>GmH<9OJ9@)t~)a~566r#H_>IsWy&@^Vb-Qs$q%}hYwPxe!aMb-s2zfRf|o$$+J_)pQ~?IJ9`Fvo zg=nd`i>7|*&_e%?`r(aeJLeoIRw8@(^e`B|iG!%(p%8e+1zc`x2?eh?%7u8|VsH{d z%SorBteWl?V#)4xL}Rm1O!VBr^5*3;RmgZQThtpgx+X}SjF-XX<;H?@l@6wUDMzap z&$;_XQz%NV!&?0)2)~nr{u;Z;qwoiO_((kD3$aR{Ip>s3)JHt=sE$yUN&XPGDwKa| z(0KsU;FN}<`%fO;UGq2;*S)a`q{1atUh7F5&Etl*{jKDmLinVUy_s$ojUOC#_3>Q#-Uo**s!GPwl`yHETg*@9#=m z%1HM~Uk3KKPO?twXzNIJ1LNZ1P&y?HG7V0khp8Q3al?Xm+7IYE8p(EKsUb4x0onzu z0X?s`_@<=-d?F7s^`_}O;Isz!JiH%!2bl;NZJ8VAx1@zM1WA3IQsJc+XU3X_Ot8zO?T|ixG=Kn&`l>hK-KzE?I z7n>%x;DBNSVgId64BwdmW_uQbMcEHlr9nMD&nnPfZNSaqhBF_HPUccR2V9o!!0E4y zsrxJoW7}JJ-mg_K?EYQMb?7d<6tB{}F_W2T*fT3rEoRp#!9%@YfX~WKX<_hHFuuMN z9}LkFEk?ZK^$I)ihZERTf1k@@&f@D{-@x|q8r;$23>3xvhNDZ!>u$X)d&E;6ap_tE zQR5DI+02Ov+^8ukMC3|`XfW7O51*g6gUXRI&3$QZ>RG^(hkXEB;wwE?P*3+&>J?kl z2V%!fAjZrB+Q$^5)8kJ}$H-WWX>7$_yEO%`?l$qLg#z%4AU2^e|-bLg!3#<|36T?2CZmOmmf%CZ+t=&pFfpCmF5^v8O~KBrC+@gZMeL(S zNX+cb&t+-~4W+5*b-@u$$5+GXvLGj??@^g?8k(RJ4Vo+X1o(+7Pu=P zRKFEL;dc(9|7a6WPz$t&Or#l+L4Ic0DZ^-)-TZG5gbz9miK@k{$fF0WxceI>MU(bH zxwNL#YF^@f9%GEoVn_HV@H|=pv1UlTm}cfva|12d`TtRL<^eJ8Ul?zZG^ItzO;R!< z$r57T^CHO-vV_RUk}O@u61ODXl0=eaB$8ywl9VJ<^F41$vW#S@L`H-}GPi`13F6a^K$t>+>#~%{Z?kI^-!e;O`jf(!Dwh{P)oRnC1dulUq>wbRd)2r-1RW^C0~b z&Ruv$y;e8dG2~`3HkZzVIhj=u^_4mR>AhF!CWi?*XFh6t?B))Oq&r(syx1@usD;v9B8!Kk(t2z_4w=3yh4$Rw6s_SF~USEng8 zrQ=!b!B~*`ng~_D9)MS$GEOwdnRuNesUz_ihOZEFQFN9OO|9S@dYwm)J@X~bK``enp>xSoQ9%&FkDVJt5DkDkejT$IwjSya@8-Ut(n*HG|mI}`(~KY zeGHbI(&Fv<-Gi$B`DmGICZzQ_1~bMJ*YCJU2wBiYu)KT?Q{TO3QGeX9z4JBHERTUK zngOo%IZFMKJJI~(AUwH=^5XF~P%-2-TBR)ni*X|{bi)P+oHqwWvzI}P{u3^oe8N^! zi1+cJlvC%wReJ5oFO^3P6ojJPX)#5T&E;KvLg&Y#0}bD&oqa^e5bTr!7QNX4^KD_J`YcLRJF# zOMXz-+6Ol*$?QN)PAw?13zb&ej)D4+2^V+%EQ)6gW6ND?K@wdF%_G`jS*Rf@bmApx zZ|Xs&ST5Og@H@!9#Zm9K8Rt@Z5nPuCpq1|>w$8F1J@*!%F?q+^>K{PAai)CO$T*Z_ z-UN4t^Qb9%it3SV678#xB+EZ;2d|n(oJX@hCkh!(IVMAXF0pwmC^x4*{g{h7>VP#} zJ)m^v7-FD4X0TMBd|)C$YW0ZI{g3u5JLF?CTDPDJ8^*PF1f%(rXDrg2_(~K~*dax12K5`V zB(j48(82H~Xv+QpRbOpEJbe@=wJFCU$|AJ5EC#qln%U5I#LzCr36%eGFi8WsTYs)# zm_1evjX^CVePzb8qs+OU&JR1D;u{Ms0e`PW%~~IB&fx#Z8_~$wyf_c17v6s>a zi;7>?5>3l`7Phr3-*F%oRkqqpa*49kJ!CA)kDDR^W} zfu^-DL92T*YiGX1#`=zyzdobW#5xFBei9G8e}`JybY=;?#F@96ure>|ccyc0$-Fdd zBQD))ySFvR5IN6jHZGHy) z)4dCC|Lie(_PY#nPKx0^yIIEggP6GPKT!G?f`8A|;Pmb@OEt)1l}ik8Tv8llTxf*v z8`5FSlv=Pqm<8d_j&M<=-)m;IadL-&=xnJkM6h;{n$z#S!Itu7Ht4Nw20>Q}Sc3Cq zRO{!V_UIpw%|C=FYc*<_?UuwXCbreLV3g#)0LS`o5VG(axFy{Ba7;)lX40LV>m)Aq6+cy|%r;>+w?Q6_0yofyyxGrYGEuyaYJg#o#MDwl28 zoUSKiYt}&MKY5VwPFHA2n?!o~2{^J|OYrfn2A9voPriK=BM*1xquji~$D|1pwJ(5J z`~y}y8wmchKVsoOD&}Rm8|BlY;Lsq1?5TFx<32;FWD)t$uYmRXJy4pE0P2VRS>^IB zyoXz~l0-;2gbx9S4MFu2Q*QA!V?J=17?vG30H?k8a7O)o(43x!p*?RyNI^1;t`_k* zT7^(?KM$-=4bdRv+xHzbBb^+<}H^e(4|EjgIB>FA6ldWqpt#K~OpPuB>uP{v9 zEr9F$pIl+)A5KAt#SN>v^7W>jAkj zLPnI%=-k7tIJPerEw8h@75Q>>tj(K9E3ggRAEruCH5xT0BirsWy+Kvkfz=S zcmImP?=i#_`iFQ|Lua%4hbd4^{%nujaZKv&3_bFw$99L2BqrhmtCk-EBlQJb*HM7s zBAR`>n(#H2Rn$3<3Fbb(K%@1U%eH%obyG?p_lj?BB%((=akz>U;FAi>lRd$?nFzCw&&3iolI27-M)GCT6m z&1kK|gvp8Y{xIfbnU!32i8X|}x?|A3vshcW3VRfG7K+n8qo=+DD5{H@`Mw35>{~AL zlw1MN8Ez=nP3H8czJTV988AxkKF;WI0p*8sB*u3|#N00A#5=Y#qkhz<;-_Kq`0L>3 zNt&wOI`;8n4Yc@sq2n|&sTcX8Jo^rdr|&4$NmmHlY9vVAUU6~Op4e+C?a9k` z=UN|$fR4~qaF{U#2ge!+uD=?Y#h*PO3KfHw+dkHo^c)=aRzq>rKV`Kwy3msme@7hlMSxF1(s!m{158_Inn8>YD>kILYlry&bBMB{?0>yJ? zpzLg;654?L4S&FWsS(P*G&9fguefmkcU0?ybJDK|xM-KlsLBpw;kVAQty(-cm%p!-TMn*H1-n8N~i~8 zDfxI4s2eb<9L=bQ$Z=^MUX<0~sAY#xwDB`{`sEH(b?88aET245sZjZO5R8(2L3Ou3 z5>LHd0C$$4td|6a{zLc4C;=M1b~JPSY215())bj^JNqfT80=~ChetVqXZEkg*_=|K2$mv^EVrjK?Y^ z7gS*L&V=s~_?uWJcQLd&4tXAyOxi?HQ$3aA#VVc?=RFb+BgalWDGsrbLg<{BsS*~$gSk^gOi4nJ-} zIBq-?4nFF@d@zVQfHS|b z$xeDghU;G_&ZVA>0WnZfdKk2Z^hM|X8K|0anR9v+&Eonm1(#phEMZ9n%za7>hZ!H3 zo6S`WgIj2$Is*;|%rIyqqQ@aCHov``de~k`R9zKJmhg(R^LYWAT;9T_8~;J*FCP%+ zzF>~Ge!#mD6Fy^w8H5WusNL(gQrCmnIjtR-8*d~i231Nl^W8uO3{^i!r+rFZck^xF zU!;u{I`=?pWUMmaa1Ge>I08oPSFv)^eoW1gF{^Q#$rqPJu({XhtD6Q9F=fz@d;mS9 z0ZQ%k8xr&SDCQA2R9UiIn{WAQA|%s!*!5T*h#yUov}tN_S(qN)n_Z21mRC@+G7{&| ztW{0^D|vWZo^>VZE2rObia)x7se3fa_rGOMvRzow^DcZp^9je%Z{hlQI(P&>r@43o zI5$zx-AeKiIF3Mdo*s%^HlXeH8@NfJJXhrju2j*8@9fnO)AWa{sr-ef^S^cU;?vygu&kF4hAl0@WcwUgcH#u6+kCm$_SX;?-y2nt#a#aQBDA(Q;%Cf$ z2yQ)gql!3A;uA`y{eC$Y6xR>52TI1uUg%-edde7@bSGX)heWSz0lNOajByDXJT;4Q z#HmNYiFDxlunjO}zK)PQ`5^Uloo4NC080}GqvwyM=sW)#cC3v8ky{r&LcEo8+WCcZ zCoSvz5M(*!w3mjE#b{(dOVby$(fd5}1Ew z6X&si7=ABMU~|j@h%xZMWkEWE(_>;^7id9{-wmkc|1b;Po#5Vk5X41agGlQFP;3=L z(bYOSGgqTLKqN7@{KW#CTETnh9VoCjgiYD?@b@w^KHsGZ0+vhzb@gM(j2D;S+b(0i zAlZ=D-n5QIolb|OvU090t0y|hGf`!~h3S7~;D7TvlsW#vX4eKtcKwc(VcGEQFB8H1 z@K-3D|C^QFEoDhFuA;~LecTMUo!~$13Okfrjxk5)qh#nk)GQG*=e0hNp1Btt#~^4{ z?`4YB_L8KL(M+p+H*>zT9A?bc6B4RALCworVu=X<-xpwr=s4UnC!dQA?F{s6us!($ z6irm){RMiCQoH(J)!kj``u+&<)Be$_EheAibt z7`73w-inDIy_FPrXyf+}h3FJQ@8vIboX7KlOzYktPV;3PD2(na)n`U4#luE&$)hi0 z>if4~Y`G75l*dEjqFOBedm+?J*n+q8y9!?&MS|7XNUoONr^{0UVZ~D&UUAU?+uwM@ zn?bfX=FBT5P3ntIe|^H#ZAUp*i?5t2(2)yXy&hCcl0mZP40Vov%99tXI8n0=m<-hy zqz^2y^S?TL+@UMr5m&Fo4+`o8uYvDrq$f2xKv}O4G}Xwl%5)d5-V=_)-!qz8bl<(hH+4yHHlYBl<}b$7-XbB)rx3l56DNGcC)Ay2$n*93LP}LK6jR5u zy5)|f==NT)zeah_%Arhdx{dKb?{T&?A2Ewkh)IBul6SLAO2d(HmN?DI4rN`u8Nzm_osJR`E zTLsJDk zt+j=ur+vXcdmiM^e}JAFso!yZ-_aMMicmY@4oEjB*?Hm>OV`cggdydg;9GQ5h- z>_glb_nu5@{Djq(w_u}-wxC$R!@^x*P_*tpj47LrwNZOfd)pLj`zsxDKfZ$@_qq!C zbrmrCIq8iSSCl!k?qbSCJzlzL09TXZhIcFWqNLzF*p0ahBD)gEU-AXCu0*gsb|!*N zWPdteEdbj^?VuXcgv*D%#iTo1AwH2ZhWXo|=Urn#``>D%B0x{5oR9!_ztZkB$%E52 zf27p-G0@$%7sYffhtMPtciy9{ z3VILX`YK7OgB(QTzJO^;EY@Dw$-EyLgWtp|%<%67E8X`LRa_fd( zhKs*7;CST&dgz<-9dl`Z*f~OyT0R3N_9O1%{UPW(G6Hpn67%N604C)oacUQ}L`&Y8 zKV*6r6WVjZ#j-ohP3*$Mpb1#ElHtO4t+;&lI&A2F3S#1(a_!*)Q5@O?b0*%wX5#21bxG#r7puow^tV8lF%vQ4 zVGd}rQ<&509H_rTo4}(#8HOgMU;G_*FIQ6E>;I#N5 zXE|{%NDl19Seu9FA|?Ol2?2~A*HHdH4cFf^;7hIj(cTJDr(XP9*IFA&Xp%+$7CT%4{hlpR_JdeIHoVO)aqlg~hMsS!Vly(B*HS4_C7z)&3> zKC|~+SyTP#Ox_q$7N;F>Q4$*%YK(GV0ue^;_=M;D>z7btUJ>|5N zDQMwm4wB*&yqu#aR6MwZ8AEO`#nDc}JA)LgoHYbByH~LMFDIyH;0sEci1YfYE1%xd z3|_Ujz)p7;xH%MJ+K_l?KJkG%g^A(8Vo)CPfO%|4V_RhgLL%z&wV$WK)>9vG{@c%R zcJeROzTv?l$yX`OwdRIDt^)6Qbf&!U5i4AiKy&{Ar~S4;89MDLb#gbLC^8;!Q!8fZ z@#z9JQ@?@NwhAU+cW$iEpM2#1 z(0tNw1#2~11{pqgI3JU%AiC;B<0C6o{NTY2Fx zVwlw%qBzeU5`IqsS+A3v`>Iw*)tCwnvl78+(-F??iJlNO;2bJWrj03zsKVxfbEr4+ zEVORl1mUTtS=^`#AfGgCjOPkF%-&Rk=3h6mVDoC0T5bgS8;giFUJg}H>Tvl&+V@SV zU`F&lADI0fd+em|D56AJl~9ZI)UTL*@C(MC{Ds5lnu&4R0jGCp(D=nk2=x622bMNc zHoTKyoEL#kpC6)UzymZ>ktQQwi58`oC{KHj752FUR(glI9#%gw|IsHj-_#SUMn49X zO*$(ydCt9mqa(PEXn?Y{|A1@DRct=68If|A^%otmrc)SLmxPFD_{r<&z zqs5@SABB|>QE1~Q2Js#fu%B@rk`AUq^QU7l-|ifS{PP_B9B!e+{wJgk?uqq{A`E|( zz%0BLL6LPQ!M%1P7T#S#xq?ktd-DK@ufGGooKILfpcyn#$xJ)(qeK#*_W+}oYDhRX6p}(J!Rz-a5VarU#O{i`gQ_Fw5Ht^hBD;gt z)FaTc>^vMwQGmSrQb}#!>u9^L8DyTTz~h%GL^OUx^``EazAGOy_LZ@4Su9RjUrswv z9xI&G=%XONPA?lssivK^?Oho7<}E}J2dmI#F!3fMS>R?LYMxMG`&KnXEDqs3lIAEa zhaCbHF=Q>SC(-$C5Qe`X53-27N;+?u*{EjB?+6EV(^KZ1J^{QY7cyAg2cuq+UUFu{ zSZA07ciS(Z*XCWE(ZwV@_PPt7b7&{luDOI+A*E0dFbK*l-*aV$rErt_p=BqAgLc|r zNz%PD+}Ww*y&RqmMgKLUZ}(#Gczj=3*sT&CWznwT=>w(tPa{}3ZyUN37vV%d10i|H zJ&f3<$VXEZ|EyX zTSolLfLbP7c#c_0i$L%E67YO{4BG#4L`9P}=RS2Z=9@>N73C$oKAlIuko!35tPZc% z&0xxeT@Z1{2g|blfl=cwq72iNEzk1NRs&c+s6Wa*$R9-eQrm61kFPPCK?kpJ|t!gsr*Xn0)AcruZ$4(L6H*v#-ylXs@1?^Rue) zzXmlXgSg>CTk(Pw<=(x1Qa|)T$ka9!n#QQ1((g7nzj_a~A9rHJKfxfDRZ!=36L?Y2 z+d}>wc*mEZWk?K)sy0AW+7#IHGqye)jg`E|blM*oXXmVJY+y2irU z<$K`XI`V?8ZKq7nQdkh6C0wKoX#3TNp#5qVYHF;RN3y_FvrCz{YaXk0+ziH#zhZy@ zAbW{?5b?9hwVM$Oja0bT%gy$+**Vvr%>VGDbN3 z%^Xg)fwa*B&G)9jl(`4VH?kJfiR<&d_b#v#Dq+_^172hElelfv1L2>}RU}5DO}Cx2 zx5~nHAM)w?Rx??INHD&gg>|AN@QytTAsb_GND48K$`*l9Xgs`=JwxZA^!{!(6Jj?; zkVj%QSN3lZ$Zj_&L-OjNVyql1z7~O2{Q$1|QU^qxXe4geE|3k_0`3(BII6=;kWah7 z4b!~s*_Z<(oUe}Gq-MdXF-EQ*#CL>>T@sIz!H5PZ}!A-5!Cn6X&cIP z#p$lCRI3zYXW?sFS=bZX~AWKH;=I+m(u!juMq(CvzD(0`A5f1mg*f zU^%lGEK(O>Xtas&DwKYw%^j#s{gO{a)Is{-J~Zv3?8)=HOzZna?umCLRyubFRmmu( z_S=RBWi-pz?L!>b0qUS#5^170xA1rv*q=|p5b}*`7M{ZN!hLAxcmUhG`lH+MLaddT z@Q=xp7!>S<^S2y>z&&~3Io&|W*xnCn`v1g$E`1?z!b9SkWpHKnJ2|ISk09LpEGsqI zhAs2XV9~ioXl>dAO;v?Ra23RA@@}pdp_k$UYkAchv3xw*)@_0u`&LZ2uZ3=xu7H-S zIcvIIhVlnhOuV<<#Y5eT$vc1L;sbYL(9iDFx&9T)s8<+fzQW`mbtv^)iS4#;!D$EM z^a~Am&vSO*BfSJMwNBvSi!5;0IBw0`W8wEwb_kidR3yK^VvBiG*$jdMr6AJkPLJ;5US zerHK*Dp=u}930-e73H17lu7X%#t*rV9y9DEvZZDc#kL1yy`GWx`KU#nzvKvYPm$-n zcRF!9lR@uLA1n?TKz#=$f~T_%pY*mX+753*_apTDY#YP^Le=1*b5pV@{~pLT7IW!( zFVK9^5rDaqF@Duq7*=r#TP~}ZA3bBGkD?&Mn^qwpTf>yB?q8eHY-#LyURv4lb%f_TBP1)LzM8Zbd1mHC9(} zO#V)KX9@)M+|Q{p|798Tf-vn@D5@@v;VctQ;h!U&go}^bpmY)CS({!mkK3J<(py8Z z!SN+b*)3odJ!38GydX8859fO2EBDxUCxmtl0+n$M*1q|}s^{lI5oIMox(l;POHphb z5Bfuh#r2!??%72k>peO*@%%%~z4e`X4Y%jIetJUP+n1p|ZYGRAtS>AJIg3(-hP!is z*gV~5)4M926GzFF?Rl^0*_+RNQukrZ(xo8;U->4e!3qre@lgBv*RcV7*T>3>c`_TclABe|wN(;=5MO>PV0Mbq!?E9fl zSZlov;yeEbqSvIIPp5Or*mg|X`Hqzhuz*8!-fO4xmik{QH=~Ajg{!YYMBOvY@6iM* z7j5qJuX0>A!jzwJs|c5M4~O74)CD+TH|95;gfXm{oADQM4iD?{tAE@;jf67clk#9) zA$h`g64PMJ4S@M;!1=!)kh1?IlzQdCm#>C`BCiPa4UWLC{suzM+dGs6GE_<)rb3$} zocQcdFk<3bEE;zc?#@lb=TA-eXCXSmbnDexmO5NBH>o zaY*cO2cLwHR=8;>^jl2**vUG))x1)yJTM8~t`!No7fR9QL?4Wt7(>0mHXvP+h$V@i zP;H+M>Xp>D&~v(Se^pmOW+BFLJuX4^?q%rF{S~^LaYJY^0xjbEmE9(eNWXhr&8>Nu z&>P_1!dGziWg#}3zlG+zB~VR&ub*%WE2)1XvZfKNjL$=eb9jL0)(Ahfi;0AuQm3vBWP zGtHYbuO+w}REAj)Tga zcvR7MT_sOMhq;4MZg)}HUcph%@?AEy`aiU|=SEr88m|5P77VSUGuq=Mrt!amDP1Xh znqJOgth)kqDuL^(&G>iWT?MTfsbj^e1UfUNu+d=#eB8`OSaR?Q+LMlvx_b^WmkouI zfZsUd7|mgmH$(A{jj(QZXQ3_l0!D4g#6W*tA!?{4#)!XiS_k)zZLxj8osBRQGE9s> zmSn{e@6$7MBzXxh60_LlEvm1*z=M^?QKfvr$ybb5#!PDmy2@CR^B=D4aw;pZyvRiK zJa9c2gsIEpFnM7*>Us5q=1X>%TAhV<)hF@T5d(fryS8vBqyZJ@ZCFW3Ci-lTgorq2 z%83plW|$s0CO$#O&d(sOZZ}A7Ttxq1>c895Nr-da0ToB-zCAOWjk`(tp)pH9^0f7AnO`L*lQE@nQRD#+63|}T#N%xHkmq0Nndf_mFMza9}bR-2K_JO8$WNr*L&Ea z_+13ZKT{V>({Pmin+fSfufeL;2t7Pkp>)Yn(3Zc3InC{m-aQLLY%1YR&$FQ54J7h~ z@nfIF-G=&KCvnb;M;K$B!7bl&2jaTu^38X=u`t*VlsWpm`%N7IHjIaNfjQXV&{^0q z(@<#dGZf!VxdMU zAkWxSFUn_FW0e0j=>O4#PZ(Q@E^A%6jI<(_5UDRnzU+jc(Ya`DR*Tnij0D&FABbJE z3eAH;SVk>_vc5Vvys;Q`L+(RRLj~SVia|Tn;)|cRVaoR&s24|r-g3$r=y-7#$OGcu z*&WrFCvm6A|07;$2L-u%P}ZUa!FI+%mcut}Z*nKUV=AY{V;E9jjH_3f@u8>FDf{Mw zT3-*+^|OT7IKJrJbO8_VCy(%X9lqmZCCC;$U|L~fPBk(TGBoX+y6_?=KffKdTy}GJ zgD6{*nTV(BAA^-0ar0JmADiR!6ug&_=VZ7kXJ!5X)-7qo+Rv|8t=UURJ7xl*y_R6r zhZlI3o}m$q4XkEf2b3LYhZH>#xL;e2>hiUcjI)LK{2}Sg-%#Q|>@JshEdvVYUPY14 zb;$2ohQjQw{F{C{g8#08SbOLv6HR-<$`$&2a*rFBddZ0^GaygS2x4)XkcX6VDIOPX zSMe8)zxWsIZ$Ackjt3sgs>AsK#CO|M z0jk7kuE)%aSmdrHX!W#{gr7RW4O~_S^_MC8khw&X?r@*-NyTvJKoL5x$%lql?X(y9 z%+0PR&&ht86%Vh$?^`Z_;TFnXM{k3ahl%9hxQFhYjqzduakK4nQ2N>dY7bq&h+hB! zA9Q$WmJX*Gn#5wR3?BEVg`(>Ykg&%GSMN zLAiUlgs6cijam%yw^KO9K4Zz~Zl=5}gYM9ZBh^9q8b>l6Y*oywi*{D4>}n z^1x;6ZKlg_oJILvb9$~1F5+tE#&7}jjA%a)0pc6Gm8v)~mp?KI99uubln-gJ=140s zUmu|MRmNrfvj8L5FxW!<2vyCuh#@kGi^=JMPyW6~UXL`;j-uUYgqrij67E z3%Yx5P|xTCu69EkR`%(OiZyvGyucPz1Lkm{r^chSS0AoLd4f}~b0XH+GVpDvLEGL8 zjI}9qG4MNcsrEt1aU;RV^(O=_wFKL-1t>ewW$e1Ia@a~7VLeR)D0)udR4)z@$4Cm^ zd#7N7whCoET+t(ICK%_x#)K|MaQt>%K{Q1M$vgI-$81NX>%!+?B+rH^umkA|MMp< zV1hN}L5L-x$r;MUj8(EEx=te;|K(zOAL67(1x~p-3Qs3jknZcNjJSE3)7_qmI5-Kt zbchcv*+O$>7;eobUyuEDWiPGQ=v7bq$2#%^92myMZR;XLi92wanD!n~6JedxKDg_; z55rFxfpmp|Q0qSpZL+MeY2jlW5oabyH?=bLg$iXXb(iTG=tKMu@=RU+#G0=)qUQW6 zcxP%PEE{UV2d8~uy7$kb&n4o~E&0EH@HtLMGe`NxKyF6B9msaQL_MTS;qMj^uUP8> ziv1ML>=uMF>mN#)nH{dlYbPepcUV0>1K<3N!$_kPlzqsgd+6hsRPQLP{U;H<3{GJ3 zj(9wmWh|(hsw6Pdf_RLF@^lSTP*q%kSU(ZcxNOSLk>A$!8KiEW$aQ=>jvkh4A=N61 zJA3{ROz_^%RSd`k*TUb}GT}5+ytoU8>1jWO$vf|k@ovZi-Cs8uJFvB&B`Uh#*EF+5Ft+|eNV zJq-)A&SBVrdRTL%opQ^2iNo<3bZ3e9lArb9UZsWMH{!V5p<7snX%g{9O$F7#6c(d* zoX(!t;j}IJq;1wJ&4U{_i(A@wbxkV%SgFewHmjldniN_zeIWSaD}1+N4@4SVrVN55 zx_;2$!d~s58N40U0}aXhZNep;9uJ8R9+3~_33zVR;zh!H<|R7-=4Wcjd-6{5Zp23@ zzFC1Tv8FlT-vh%AX08d_mcCF=K6$swC0b|zV{X$ z(=0H>m0pH@7-WYN)9{)FQx5AY1nB+7gwJ!af_6UQr}0espbm;F(je$SB5s=T3e-oc zF{4i`^PbudR-d^JUM+Q)tV@1Z>I|Ta+q*u`!N=<-=uM(C(ds$qvGc@O zm+nze={psTcP66W8e-rzUy@e`=|iDVZ?vG_F0)| zW5|CGdI0jljgmJfv;}o|xg?~0AILH#TuWj$SN7mL#t*)TdY1y>_f7JfbRC5*$qP_A zS4XfK`V{q67DFMs$GnpEVvNo~rgm?FB(G{FS-uZjy6N$@4~eVt&<6u^Xy@!%0$!uF z1S|Okym#*vI*cksYx3Wh8LouY6EZ31a}Zt9iy$|?PV%O*3&b6>Ko8Q{ZP%2e>uxY2fndBzjFzVEJ!tA;9i3xGZK&YqAy#y>tyUR&iXw zGHpT2zALnM>dHs{Dxhw}0&L=5Kt|>$%(kz>(8J_YzdDW?5wEXMu^EyU8lWt>5}Y22 zpi)6SjoJ6G!TvDV^*;eI874x~-w8~2=~3`l{SOqbTLLog9r&&v{ms#I;;_|>y{9tZ zi%tm`l`<3E&(MCVXfE4-NLT35Rt&)xeDT8(;t4<4&Q>qkP5HE=7`B1*_JXVAi~EDx z?;4qOZ#XP`+X6ifkmqpCYKRR?AU@wPrNz%ln7XKd)9-i=q65t+KJYJS=lMZW;9YLU zG0%g1E01ls#WI4PL%oi6CPxGy-_#DePrdT)-5 z<2*g4L4XBuV}pL9=UD?7wTZGR55~Ycr5UfXJi|r4k3+jfnGo+wd(6#8v9>CXv*^|Z z+QUS2e?($qT{4O#y_h;RLlWOT7T4U@5+dAJakA?wtoKoZ7}XM`=N>Geej>%szMN}C zJua{LioUK1#ND-USaj7>#Z(@8rQE^uucM@Vxri0_SR&-o{3}@J1#8DSdVTxQE zT&IS?n9=JnduBG)F8z;NCnBvy-itVFfYItA43_<3ws!Y0x2BT-g>R`7p$A$G`pT@f z_JHhNyFlZ<42@eSLcTFWf6*yWO&m|&y`^Zr{xMe@SIK!DAe}n=57vJ>i?Jcm;NVK< zKift;McieDDY4v?^cf!{G40hEO14eS1`c=bO-QOrH&XZ{F z7IRUaBf<6jCk&cVzJw^zt+(-l|c? zf4>M3H%ma1JdgAH`~@6$6Stu=^|!^){xbg_%JnH39zPPwUdC{ei*8e?x%_0`2202c>G5uv+Ks? zwtOH?_i7whcos*@)fF~nDY4-2VMto!go@^SSX6czt748|qT&(l+v?G4=~_2XZ;h)d((0Iv>pgjM=c1Q|LLP zCu9^q#G1mf2+ir_?~K5tgT#?7dx45hCd?!M6mi4yQ2ll)R)^HW`58Jqw9xlx*s9c@ zmH=82@ywHcm$2>E;m!Yd{680C=z2fY9@v!UUttU)i5xf3SwJVxP-ts>f?_u_7W6MY z1D8w$?^V4){Gsl1ciKY8^A8{SDNSpb6i8Jd`g6-vtan$|2VBS(e z&rK1pR`kkB5cR)%>9Ec`pn9Q$PlKcE$ z2-%v9?yrcKbxy@;yjzH4@*d-MB!Xs71QRd5#lgND@W_n==afGXRr>}?yenv)5#?$L zodtFC3Fb7nE3a=t_g&7TyiF~|5ExCpPA-=?m-=$dhL`Y;IyW6p5$oi7B&2_R1<4yy zApb}#rq0rE8rfpbBXL4r!^~=^Ax+4qR|1HS7I6WCr-09ZN9f|P7bDWHGHux!)^7J6 zR2_jJ<_xg;ausUE^7zPGODH+^7~BnYc%yD2{>-ON!j5BI1X+9vjx#8QZlt?vPYHvx z`7_bK-(0j_9Rab9ham1$IgCHAF9dt&2~}EOP#*N3@^5!zKF*5#RZ}lQ;j}3jaNG!O z|EfiiRysGKM=BWQR$;LA7yMrNggP(-Adx=+OU{`I1;n^Y>K4h8mQ~{%v#;P0mN`~x z(gw1-2hjXxIP<9gm>2Bzhn>5sCwSza}UtO=d91GYe!=*KW(1UIm~h*T}#fDx3EbNN2rU7*GP$TJbjowfMv zejpL$;1+0O1Y8l5G%xKH|z1{C$!l)WGWuTR0>5^JaG4Iv z_5Li$?%f+24wEL+Siq&zJ~VaacUHE#6`Urm!Jyf?So}Q+<)$|z3fBxt`{`|X5o*Bl zE#)hW)LiMpHXJ=uhu5{d2}yq`ncI*eeE+8tZ*TmPJXPnwe_S1ma{CBQKC?mMb_3(m z`{Jwu5wGPdM#Ut`5MOwTkk*xEtM{0@{ihP}E;=_ALCwq%h$4M!0WkqPWpw4oJ>HG& zP9N}%wHXv{{Yb2(UYPA}$jc(NS@EdO=(*MflHoflvUMd@yZ!@>Ne8EHz7FOde__kF zBv56=bK*YZC05W2DtC2(k7++}R7C-Zmpsn%yf+!FbOWJu{(8_p*3NzZA4O*#RnynT z@vi21l9FUdN|G}C%016U$Z(M%WXKRQB#|UTD5*$FDjCus6-kDqy8AgPO;k!k)0H8K zN>oS#-u?b-Ez7d%oU`}ye7~Ph_f3?Y`x`nO3ur_^Eyi^8fm2u%CJtvzJl4V9#Ae?P z^@#}mQ_*WfEt-9BMn9{?oKD|EVAEwz{Pr_OI2l6lqckY(b)jlzjN7`9<>W@D!q{V~ zL@!d2kCOif^M*ZT*+B=E-AtyJZ4}AD>K~jzekF~Cub`q-groFa(Y9|rm}Rk_tM7Hz zSuUZ%|9sJHQVj;noy38nE#Mi!GB!4+Xn=Sk)XZE2c7N2s{(1SMX@qA5rzI66Vh-Y1rAP0GNH)Edr zAjSu%?iIT_TcW+`!Uhla|D0F_rQZS}=w~iweaVD^sUfU4HdE+P%W_K_ zZgCFgZ>hENEjXOXJX(jJFfRw|5%tZ7_=T5I?XwB^jGK#DKJ%#At_tk*P6OBAWUyOb zihT(UFygodFFeh7BBe)RdWIM>GcIEK=F?R2#FM>`?CATC+2~))gU7vgIQHfu#x!n+ z0gGO+Ub+xn=bb~HS)JfVHiChlBItR21T@;i_7La5_}Sx}(%%NC#=3OP!H;N%^d~M? zE{}C26F7;K44-PZ8s<;vLVK$;)OjAve)4KWd)y9c5&8)v6=Q|9jHlo|;DCGWgCOgx zm5qhQQ4lpPWtsZvD0zHpTJ@0y;K`mT$>-x@ryXrr;k*K@kA|Udo*b8ET@9l6OPF+Q z7B)y!iFrU88h*+H|1DqO)y*D=e#IE5s@oy_KKs7MWP(b;MVPMt9a5xnv1Z0|{PI3(h#LklPy` zhNWlHIaiEAr?dW?waRJEy-diNo$vvRA>S~2K|d{RS^)W5GO%Pt5LkCDhbt-?CaPR`_UtR3E%7i*ZZM8E{SNLwM1N)ek;ELHS9Dc>RUmYEvgM?$KQC z-9DD<**Z<6)y!F2pTmk9^C5=ixI`-hg-Rkni1HJo&o0)Gjd_ECZc==vX$Z@dGmd@MHH>Y1h!#ohw7Tyj4ZfSiJ-;B& zc#b8oA#nh`FS7mr+2c^Lll8B+H*rqV%G|pNS5UsA5Us~%L%DMeobwlu_kDX9zqSsY z#y;xz~rpOOyb)d`K`IvR@672h>&d2AEgORx=V0(diBu29D^B)BTt0??$0$ zswUs}ISZ{Wl|jC^hg0gU;9?59slI|VUn*|oyvLrV;jMR2V@4brRyIOn`(Ne^ibSV3 z0hswmf}Zooqx(TEU?>zcwmd_#I~B-&Geu(K!TQXukx;+yD%h$qkBKG^6K2a1Im#H5 zZj-6R)|E!=%>~0c89v&4E^5Aci=tZwP~IPeF=6IV+tmp9p&guis0xI5r=atQNvJ0J ziuHD%AkF+aWM>@U($zaS_&plkO{5tjAyKO zU&a{;xXXN}t?2*e9Ofz{pp#h^C>6b>rHLsZ6&){*z+ZGLupM)m8#tWJ1oL|ud}DSL z^z3GP*8o{QqIClrZzvHbR82&IsE+m|tMK~gUtp@*E>Jsv5)$1$fq0Y>u~TBcNn=fx ziTXxkJm#TF&mY#cRU$idg`ADtkuL>zc!?iRswz?(-B)=-`zJ;zge!eEL7sUD|-Hxh2q}EW^j_n+)d@<%o-CEa%z4 zx;=-7axM=BLHnW#FdK3jJy8awY!vx=`xNNj>Vp!^!*rr9+o>u(0*Rp?mwm#Sbx^v% z_-qqe?q&a{BF2wrZpI{!n>1mY2x6*kKt<|#)OL0TE01T;{ZAK8E@SSPy|Iw`BOVOS zEko=#VE5nu*xkMdtJ!Dt8(MC@7SXIJ|Cza>I1(EnpnqU2uK9O`K4 zmOpTt#>M;(dMJmQF~7!btxYKwU4@?dTKMLr^9?w zo{Z5sZ6V|Gl%imM4!Q<0PxY|!Y~MEtkY#Di?Ttu=lHGZrFYZB~$F~6XDHE0Hp=fMf z#vKd(h(0ewi8{K8rCg8o8=@?@&9gkketa z;l#2S99gME%%6RM-(8<*t%?^^?pEZBOst`F-%(ER*oryPV<27r61Q@9F|7W)3%uF+ z1)pfZY2PF3I}=GTRSuu`GaR~e%5E)rUsYf!W{7&3e6(d~B?>szivr`I-Ik*_*< zHn9#Sp8^iacWK0uYZ#$ajI(DyKuODcxVu)4k8r7mqPK;h^xsF?G0cUQ9LdEeef?;) zFdci0PJ)*Qb4WP8hs2*((YSJ*(0ghS^ZfmQIFW#-(u4T=HTSXTkq8?(31rM-=a$Qo zX`9CDzzHvPK6J<(z+v-n`{`IM9#eVHHP1*g@EiN zRB;PpE^cKqk9|Kpy#(Y&6WiflT!M@~0vjSDQ8(5X(DV*iTUpTm=E{)+VQlB(^N(7@ z2B0^?C}{`9QYq%Sa>^fr@g*8iJuaOpeHq7mn2%_>gfTFO)?v?%@u(d>hhFU54@KjS zuzboz!2$8cG-cg0Rc-?_Atb?7Q< zh57T@Zout1)arRa$3NzmzuN_p1QFL6!dz*iFHz$?YUr4F4wH`jq1s>sPu|Edzkxb& zeff*|x)y;;Y$+%BeMGFg;Rc9ySzw3tb#8LA3^~>ji}|OXqRURigTK2l|9dt_hBVvc zPOZXXC0X9$N-w0Df2Pu@Vk0}h*dPA{t~$mb zIZ-6OJ0S{1jvJ}f)lSs8{TK)TlIGJr3)%eRJqSWNF+=GEt~F_cEVdtaUpFzwLCprF zl^HLKWxK3*4+2s1FpOwaCE8D(g54}OYy4P)233qPH6n=X@lr*HsrRsiIE2dz07&90#KPyy8T4Pmp(&tSabk+{kKFVyb|go_HzSj9M*E>mAq?Mwf_ zh;jurENy0gtpzO*)d%3-e}HLy`NpjtDwhkHe=#U zpa(5CW9g4i@P26r%VO5!vI~@XCofa!V8>PyD(+WVQy{@Vzv ztXYOMiQNs?MWObQk(}nG+o%zMmcs*W$9G^GQwT6yuRA}OLKy&DA=D}-7W z>cs!wRXAAQ$o*wZ`i!Z0*r+beOI~GgYPvNTYj+8fK7`QVv5LfNUod$1KSQYnXE=*T zwKU08lXG8qgStB_^P*YZ;_~H;wbc*Qu#Isg9Te~i%bRAY-xW(1jfD0a)tKRQ4?;G4 zWL|JTR12Ph*;Ou7WG@HqRm?IT$+E)XMO^d0&zzJT%cxWiqf0NxgZBPzNKK3crvN+J zePSrsKP6zF^9i5iccb5e)zmotxmaS+k>kBI2ExlKu;njvH5W#pJK0Iw5Bk9bM@8OT z;R)U!DNQ_OGtg{$3~af@xLs+LjEV7zKJF9{mBMeJ&E28;Px|3-2Xm7tkEErOUr`^n zr*s^5itUlBg%16uj9n%J-51-@-RuB0=JLfVj_UkfRROV9ibLZSvb2@WqkJYwk)$8D zP8lWu~{FNk1ARrgRC5b~3;F`XlH%NsTuf2xQ-1XYk%vM}NO{fxlyJ zf|QONWZ64#skeSWaPn0y$>jlhm*vu;dtV^^yaEK`9(WfhN!KK@k4z%-%nC#JneQ)-}rN2z2hDi zuKWYCZ*3HsDZB#*ODU8+>JA;cTe-f8SI}EvjnbnvcZs$XotlWz+N;6?ax-u^`k>X49wsY?9R50Os zE}LDCg+Yz%-gl}=oNjG~nnzjp-}M1x-syxPmECFe=7Y(C5MgdzLLr* zJ-ooxe%y;@Pr^~(_7U6fnW4}7OW;tL#~qt|0n+aO#bFFmsj(Y}$rWun6Un6t1J}>@MucJ%FLo9NJHP zgX(A0__P^kAgaF?(|*T7EgVJvacuYg`2|Q01&a0A47@096GZQvjD7ixM_{@EbkDzH zJN1(w8rRPG-R5PG+w;q+0FHj4QEGn`0q$A{|`Rq0AIi+(L7y95fxVBf7EMjw+ zwGSSn({`_uNSaqvmb>2>%t$*d#g5OALGb2V3&b3kq^8ABJC;ENahA} z?d=r%1e^z9%1bC5#5$+OKe)~a#v6GQMxBqc?y>rB>h8ZOCttsp^I5SQ{Aa#Eyo_ia zy%f6HH`brA|GKpUpoFAjMFrzTWDf z52;-`<53s-LhGq%=zs47eqP@RE1&&_gEHUIbv2LKL#276O=TER5eZ4h&u}qUB~)j> zB9WRGE$()`$yk0q5MAqsO;ane;=%!_GHU?YOP!GAXASw&-_iU8C$L*5O)%*kx*Sr4 z^nphTV-buk`2?d?KXzd`6ed1fFGdK(@iq*m0Ws5jz{4*NRzZ-Sd zbmIO>MG~ye*bw6gY-+oN5%Hfn?}uXAdTk}jheV=U`wqt0YT+~tWl2vt!rlpIFyKTA zBu&txxo)+n+oF#C+H9uqRfVM4rDM#?i~sGIp88K=m1q#Lo1jdFn91YTVr&lyW_SL^M^o9VPMr)CZ39NT~{3@49+Mv&^19O)|qUMiB>~6FaQft3J zLf<2l{QfPTeJ2F&DRx1CnHo_FPD1}ZuTiH%i6~V)p=P%~f#7i~N^8ZUpwJGJ4(~vz zxG_+p7>k=!A`q@ThqcFoQLQl(jPhXH{pr7Hd#()MH}XBG zExe6JC%bT9?kz|&Qy@t$M``M4IjHXCsf$}C_4cU70gsi~_dFkxE*!=9w~EjnUjQwO zFM!L?F6td;h`E-GPk7sr^$GW3zST4MG*g4OIDQSI#@s=ZHUXJ3fiaU}VxZdhHPo9{ zV$v!hm-;~oY|hINr)OR$ao3sVl6V@otUm*B!_@ehQ%%707OUF-dIggBGHl%02d(98 z=%nPwTq^@K!dxV5J|)Zg#(Ox^s&?j?oddoFMqEJ6I>xEbV=P2l)NZimUTbTTh?CnP zy7US2w2Q!MTr1ReWZNPIdsj7$2mkorA7fWdd(()eOy6q!nkjZf8y6E#`Ze(wj&S~Z+=cb>>? zS+B&;v4#YVw1TzFBMWNAk+Z%GTBl47pJO^naUI{Ts_ojKJO zTD-%Q6P&m~3_c?ZQ0Hta#9q_n1#W9-mWzP%+;IuBs)U@O_j{aK9)R&88D7enapTUp z(S*e%*ccoQd6_5BtV9l4JsEq0%@9LLGk$B-AkAmJ*cm8_u6}s{CEtC(bc`w~Fz&=~ zwF*pc9f4Rj7M)C7~;g}d-nUzEIDApk`h{Hjwm($Ka zg>b$ZjC3+ij^kk5_m4{4Ww?$X54pq2r7R;HPZO3egkv_DsBgvY zvof==)p;T44k?3}tG+B}UC+A5x?ujI0%CS6qFLv1u-$YNC9)?lqaq6I)D2MZW)lt7 z_Cf2va(wFagOI=V6F2b$%jll07HbdH*vB&HrH#U>V#a4kn6#bLuy`uU+yO^EZmegWva3EIH&W4z+ z@4=OIt(AOLF#nPrRu3Kq5$|N-a_A{+TKo)C=1P;1A?(cLK92PiQy}zeFJt^mpxIjr z%vya=TH=eARna)vEChARE}+F~0?rM8A*rVZ(>>N==wkMcN?1s9Ju7kQFHQ1uQ!{&( zc2A4=J727`>9MFiteB(C>V&Zn zXx6G5Vv7n(RDL#yJl`WvY}enWqOqr@HQnsPERAsC{ApjoyT}ZJmY)V=4}$)Q*D+)l zkIfd9)c;!mh`Q#`j_J*CIr0p|NLHYM$p{pArQyJ*U2Jao57>@b?7yTEGSXQeUiAw^ zbt>>BsqAikG70|n#X_IaH*}Y)hwNd^bknz+;G~hoc)DXa@$oe9@$tmg0XYoWoCHcO zPG}LscITd#Kr&SY4BC#t@BP21-TEQu-=|2@W%D@6QE3_)l8i;x&+u)V9I-#W5{}u* z@RAf6y82`jK5q+yQmdU1kvEaEJ+KxmG4t+ONiM4>-kG1A?sg`iQ|-@6(8VQ^cYfz2da>|FFY@ zF;eE-#fDN@V*0KZTkF1nwSpAQYRtxq|8`>55+cqjF{b4sV%T@w8oKHg`PM&EpyC7L z8|Zn1!rQNyu8@X~6$@b0o)vKWnmjM%HG*qC^^FECc#PUM3%TYy@6ahsnbW>ZL3W%b zM)cjG9ct6qyPfA+Hl78&x?Sj{83{4>^uX=}^Fo|E$h?Gq(A=j4mD0*+gkcXz2BnJA zk3C>6zC+Y|XDF4(-4GrQ{0e424#VCVr$B%FHOM?2gDV}*g0ZHfP^rES48F|4)ZRug z?>`6n(w*pNn}~UG=V+o$805cTTxQ7v9QCRmQegFxxm=ST zEzi!yj=V8cQu$BpI(!;9$Q814qZ5>V{LWe2$Y*T3RKWMPs<8^P4Tdlz#mmWm&4xfYRDgG!DKI3d2u`CR8l}U9-la5z6(PK|Bl(ucc zzxiiSs{Jf=J$4GCR4zagS-?e`Ie^8E%V1|+51zHdq544BUp*wvOk+;WGb^oZ&_wPE>B!BaRK=L^abJN^Eh(hFpw=j2{lU< zapi0FEo};CIrvKEKYWG3$C@Bz%Ok949|@wPnE=DuA;N4Zm%qdU5-MMSPIMAxk6O>A zD{RJ=;COWWrh^W$%=OSyhc=?`5PGc;W?TM%0q?s|T|bCF?89;~+gX>&JdEXv88>Lh zNjyA80`0yx&>@gg$%9n!lYV(p9`^+US>~spUkn2(3Sd*%!zHyPfb-}^$a;~_3C6gv zUX>g_dDVM7R`3q|epzzyVg+)N?do0EsY7VhEVP@$o}nuVVyD%cQBuPEeb4@bde5im z7Wow&b=Y~DbyT1N|{TVc0!=CwK=5qH` z=Cg+~K9R{7ZXm}SE_U^zq;4$jF73g3+ZX^P7ZC3Hg_5pYIhtm_pvc1<9iHEZpm~hT z^`CHXfrSSCzgqwv{{LCr1a8U_yF;P zE?}wZLF!|HK4a~rKbSth3{xViFz*24@Y`tc zJ`X-ZJP8MNV5V6pbKIv-3$C~b8CO0+gbQ=3%Kt)veHseVkjBi{fz)m9P^yzPNigbRHMl2xpl4bd+h@O`o|S2+WWNyd zpS*?WA7f#mmL{oT?o{^dvQeMFz+*h)wpfZEuT&zj3(7Gm zEsxuDPn8I=yFj>)Ibfue#WgR+K*Bgrbi11i5sPGUq|QVLH<@VgNlR8?MhDA3w;AJ% z$U)?~lPvk!@(Cp`6gi!dKhcNZ4#5U1Fyi+axUfo*L`w}twbp3tGYG^LS2Rg*U^xgx zc{Fv9Hi~SOQ8eu%=ec-4@HZLDckg*Dy157~j2JIzZ5eovpM}!yeb^2?n40t)B3|5t z?s%qW zv20fNvqqTyyp5|k+Kc*&UP5!G4+`98amH^CLrPu}tYsN2eH0L>i)X2ET~Kyu;bq8f zeT!zbtcP32i(95K-<6dP4c>!PRK5=b1{-1G>j&tNuL>1^L?~Fp!*DqTqShb9+=zSO z;Sni*AdP*)OOMbO=UCqR$^-Cpo&v@S-_XEP#CFsF(CUE%Ep{a0#;?+RQTaXA6AJ}{ z$!(ly@H31ZQ-)J#r?Jj#G*k>a4dKddu*q&Hi0-cg!OJzm@z2#r(TTUPX-61}Ka^vJ zbUYf%zkLmSmA-gX~ z^UWVqxvWi7;Ej(gaW1WZvGE$b!P|r2>ZX7y5%*Af&<~99yGNCV-{(XLgSmsQ%>P!` zhDy&^w#z*Znk|wT|Je%?2eW5)HRW7y?`HckEihhCD~1P%9V(kS+5NJ-#e)dhd8~K#9wrZCJ>NXmk8oSW-8)ncNirN5n!Sa>+3a_&MFw2VFS1^9CCVGW z#MY7pm~`zDS3a^H{k+D5-|QpMEjNX^U}Lf1RSlYXupENcA2gmE0xnZam`huO|LxB5 zo!iA+kS^o+CD>x#4^>ih>kJBt)2LBh13HaO0Go_IG`esS91Q)4la~r%`-&&bgDAsy zgfFAoPhG(Jj~@Cod!kWI61e=%LWS*J;9Zf!>3gQayYw@d;UEPy_Cq0{Lck|P?u7Qk zh~vqIkDI1hNio-iYaFzG^n%X17mSa)4rS|y z;YAA&PY;_K18C(?OFJ1H~`v^+=pL1Ex>cry8MEGtP$2{|YaAxy4u0$!7^;GpBU!9%J zFFwMx?@}=@-xQS&U*kHaO+txbyx3!o1pXSxk%X#W(Ea=^9-f;3W}+=pw(f?VNc#!X521;%I2W6ZvCbly;avR}Heo3TRN*%|wrjV!6I5u;>QrcnDQ z7x8m!T*Ly2Y8xJZ0QAVz!@i%aS=yaAXDK2q#I4bd82uh z`Z$Q1ACy4S`@3A;(J}D#HgnfJFQcP|v)$&VK=jWjgYb(3IKBH8>mi<}K}9OW@8cBE z&3O#=m2rU8qhN#8FL1Ferd|qhQ0-C8>Gv_--I8dOj%kGRP0D=8wnQ*q+9>{h@+`0% zfLLNWLfG`*|7Yd}P~0IP#zg|LjpPtlUAGAWoQ{Emyc}OW@(IZs_}B+fOyoH6U!H#Ike?j0Wq|0T-tF}H7V z;Ia-$v?{N%J`LR;iG-5W)@kuGFQL`8PY|r40%~?Mu;ZL8XTSFq7J1A-rS&B|1~RAlF1}2U zZA!sD?`9~Cv&HoO1`K*$4^E4gag&o~LF3eJOkbEtqo%RW%q6zZ)MIY8v=Fqmh{Sa5 zEKX$A&RKkzh7mdPP%XO0t(qg?(NdA`VC=))J5~7r_YWZWX@MffSur$iW_OQ`5cMtw zB=$5%B&E%5strY@_w%9TT{K)iBS&oFHc-(q6&i5zDa6B3Fl{fy1TtQ3UIXq|)F8f5V{q~VBQ)-w zg9cxkY50;h5P1X(iw2ECy@dxM_{l|(-FFUUWhP=`eKzB?U*rDYceY=mi@8%yK=+#Q zaPK`rZRR`J_eGYZC%gcsJGOLGNiDi7RdEK(TERJVH3Tq+D48rxOsea!BdnCpVe>(c z3k|67ZxHWVWRH?i0a^&wprp0}(ijs@Kl~fi7)ZmpTE@mxnZzmV{st-=!l8cYPn@pT z!tUdnp-7;O)0bbv+T|YLv-m3O$yuV=`U(uv<5ALm8K3BB@a^7Fu;XqEMkvn)5h~J^ zgWK`){`*imdI$KPl|h*aYW#&i>iqA!|2W@)6b^5)dEfE%=;M1Et=k0DDMl9rgV{OA z@+Wk7*l^mJ?OdbWCk*Z8z+o55?A9E{(EHA;Bak9oef&3Ds@r48eP0}KjD;tCO_;m0 z8!mrIL${DrH2u*F5&gGl#jU9rDrX5#Bx<~>Gs{`Y9>t&hCrI(R1}4?43s&F3`E0xk zW;O~+8iH90+qO)y*u2x(Q)H^x9ExA8td-h};XW>^$HI0o)Q`{te z_|P;o%Zd>wC@K7qW`AAj|9{=vW#{%C`$h3Ff`%1C$?}r-)9htN^BHgTaTyEY&Y7D8 z8IQ;DlhiEWQ)L$(=)T0Q)&DKrAZyK!A8ro^%=)2s#CYDB$iTjZDtzgpWHy8=r~ASe zLhZ}9_q)A#*dMP{JE## zP?HN5WUTohKES{WE{(9kO|wUnhpUokTcIBPr*1{|Db9c|?9!YTV`dYWSp#p6?ttRM zVOaPwnbU6=!)r0f{XUDrX*!3L$YG-t7<;xCgx3b~A8t+|rZ#)vLVF6HN{&LSMU^-x zc{9iq{$W5_J-%G=J{nk$B3HK$Aa?(Np)b0*EHotliod{q8V;8V9I@$TKJI5?nh!sU z0Is&cgJlh-L$AWge%(p19Ik^#n)_! zUUex5uO2^&I*aVNsb|d4-|#UPbK(!?roIqwnxaf*E;As~qet+Wp6Y1f;KzNg>ww#1 z|DaFjFb1c13(qw);Lt%WUa2sb+cHW(7SDF(VrriW4YW?;-f5BOAkB*J1y+2+?^SeA zm?2(B(ucxq_E6L>2iNE77s)vE@wKz7!S0|UW zbLfE@AMkxF&##a6hCkc1xw~_+=%RXc{<#iI3mmIr>hbNPgX9KP#m^>Lo~i`1Fkq|F&@|UfMbvX3n-lnTZJ08RF(b!PhLQUxobh%`G1|4N@>UD$;Alo5x`s{UH!RU5Gg?YP_UcXa znYo%y3m!yLw?~1?&U3J?AOYp?+Q98oEi8;wjmw-y;r`ooG({^LE=sAxBfDMn$xdxz zbSZ>e%LZqzH=;3Xz?{!BmFMS{eTK?i7vSYi2KPZ97U)w#w+D>#jbQZFGk zFc}_{E~P4rsCnp6iZDOLh+PJcau;Q%kZUTZA+TgHzkTl%vL0o4%jLr`cwP$#4^HN( zO(`uquL@Vn>TyokD83z6qV3^gc>la?*b8NAYhw z^zbK^TsP(Ax4cHHCo1ILlxf^UZ&N<*%MEc+%4qyNN`~aLPbMv!#*x>Y4A19|;=TS> z!#2?xRuo*xgctku z`E_-Y?qWWlhY59g@}GC^_6X*~P32sW<>#qWfLH}03= zvDR`3-mk##{mP#28+PK&wpY+GyB(h%dj#v04ujV|1LC@z4TIEw-~rFm*n7yBKN4@j zuVC~@r;Ln7)2ugIh76!PXH~m3GjYkGxxmRAG*hC;Eu21Sg13W zoQ&2ZgBFYFhUxmW+G;FL-*JH+ys;m@2OqlAK7RsTMN5)ry8=FiJg2)uL*R-~Ku-T1N!Ew& zg?Wq5akJPU3vOpYS-^UbuUjb|G~yrJ8Lxt$7yN>00k=RkWSsE+p0_z-c0r!*kdH>7 zN~SnYB3U^G{D&=8&^Iy*J0m{8+uLI3^0gujV`Q;m_85Nd1utx`-^STxsL`f>Bl)VM zXCU{wB{}bY1KVxFVK5V9r|I3ox)4)-*2j17Bhn9RHVxxFDhtIWM@I7R9tlXrE)fpi zW9t0zO+h)+$fUfxl=Fc+@I&6;vXv$>{a3i&mPEeFv*w{ao+-feN8XtPGZOwS1;dhR{`>u&UbB!4sFB=3uI&tv&+xXJ17D>4b=T3x6mU!6Uj!u5&X{{w!n*4q+W`nNvwekQHD&eCiN7i zTHnKfR2jbON)P_q9WG4X+)A&jjN)Ie@4!&I5_FzfC*JtRlzeLpf^8||dE?x6h}s(j zTNvOY(B>a3U;hX^&wl50Tz}xp)=_+S;9_pVhUXA>NRRBBxLe$HtpsN}ND-?(C5 zdAN6c5Q$sZAbx+!f;8}fT!8UZ{#8yW)+*-0?^G4?Nq-2$8HHhtFc~lY90U*K=4M|# zX28FCSjNp!9LwMQc7cX0u_XQL&O>U#Q;bOejF~6q!}ntam@@PXH-`j4)QOYyGyC&# z*%o5AGk@s6&h_|bNiQteXTpzWVWO)|?yzA?1J^s+9S&)m@ZT21;X`p0`YbQNUAa^6 z*`3qy?$2dhHDL@os^p2gR3mY*-5v2V%^Xzy-jBrv`Y`?L5-wb@A6jzjp*gRei7L&= zjG<0w)Rv525E4J?e%O>J0hH-xG-4M;($xM&s7j5&Vdq(cC6;JG|efD7=u6g&vd7 z)4FM6c#&)%SKrfs##fC<_nadbN~aRh#K~mZoyEAa#DKs5L6=uG8P7i&q(s!cQz2ZF z4ZMd`!)C!DybzrRWlO$*>I*$SUKkRwEA z;l#~F5Ej3l+uU~r1ATAelo{>foSc>Dr0vXIT6Y#|ZP~Ycv=v$Mc^A5G|BqjJXaqSF z(Zl&DjUbKRGR1{8llXU&Mv;Xdo}t_I4w`!J3tkx;2(B7q`1ftbB!A~Nj8)bq^1pZC z0ir~#osIeFm2EJ4K%d`GV@7;yz2T*%5?R|n1m=EH1F*XVID-j>*4g6kYl#>+%$gtc zaR||ili_Dan3K@$dVK1T8Q7Zk25$W=LA~t%`1K?|b_V0uhK=S7`-KusPc5X5vFV0@WYqgU=-dKrS zK5ZDj&G7@p0vkBT2A3PBT}A&Vs$~50PRJY)%fek+eCQ(!_Rbwd{+*jZlZUC3Jz;hj z+#kRVcgnzGi>o;IzoWG9Mhfiy`wJsw&%?CLX_#_36eHIe!Z$B>Y<)_&tr{$xXwNR# zD*EIigU!F%SjnXqnb`1W&G^iMG5lHuDRQ-C7;hKZh|Oxrct(2|d9m1%pW&~?-&I$^ zxM>O`Cd39?+Qw6B>ER@7PY2a{bDfnb@@c?910pOr0So^R(Wp6%BrO@NtPF&gB~oPE zpEO+L&2e8hm!LvIJsf}Ff(P!gXLaouh_uoqt54s@mNSa{t;g@hEBda$rtE#V=B_Sr z*lJ29hkj>p=wG;4N{`Pj)+b}?w&UA_eL&48k>#T&^2UORxVdN~uf>LvFPD8oapraO zoTHM;VJe7-N<|rv=J!nR^ZR9dapL5DNHb z0alQ0+K=l;HsbZ_2&kJin0NeF2QAN;NUn{^VqYYKdFOSwT7CsCn{E}JIk^IPI+QpP zc0twuj$5A`z=DhtvjY6bGGR+|{^IfwDFs-*4k z2k2a^L6QxpkQIAZ;M4pmeD&@Xu-JSs-}v({EUVE4FZW zj~v-GP(|_BUifj;6zc*X)6+}k$>@U%x##=Zu=mwi(&x|vk69>H{5Y1AJZ=|O8g|qB zX(zBO`#WanOdyFXjQRFYYjE_J|I0)!(fAGQ4C=|zVuw#Ol@+MeW6yIdMla@$rPy=d zEQS;L?WUw5{}BqWcj1kt5{~dXq+R%d%UvMFt8Xobz3gtHxF!-OJX^$-K8c0#%|?9Z znI>-2;yl>#T7?{)RE8#&{c!eAH6$Nw!%Y+JpmXv~sQgd|{|;J^+ACEMbjuH#UfhQx zDnm)!xRK=H*IHb=buTwn^)+gB1VPRYJ^paA2b?+@$LWk3L_Stb;*095At_W7%uiQ{ zHJr{MA9@|FR1Nt2qT-y&0u3;WX~6ZnopI8|ZQ_}0De9NW@u$DBQtg}dOxm%5Tcqp+ z<7)lkZ?ZD~-NAsE$i73qbqE>bSc5mFOeODcUInM*mGJRMDs(-N;ZJM$3^N_gTRKJyQ?Ab9%C_gxZ#we8u*&=+{|C4~;RejC zYZR*Rm+;q6OWsEBtvJ{(S^V%t0QairAofqY4I2jpWV4AmKX70y>S~N9OGgjqZ(Sb_ zSr(Ios>%a!)lrsj_ZUe$vh>LFQWqQ^^aVzZAIeV^kHX=HKhw=+Pr&cy1a9rh5a6zj z!*Wg+T6=EeatB?0zpoP1f~`># z_|oM$)bv6r)mWi<20G2=d}7=Q+WaV;$(X-Tu9eaj zX@rpXTKtAbEUbA6>5P!-cbs|)mC5(6J5$fUp!qy5}}ii1dhb_jejTL#nr8{qoa z-G-~pXRytx3Jotw@J8r482RHg?ma8T$0oXSqash!lOIRH;-GtYNJF1L*Kf{3{K9) z3|1SQh(BBJ!+fP{xImf>KKuP(sZk`i@lhVkz8rvAk^k^;_auIn;ZmA(NgrB-Cj9Tt zIdo5N7zXe^@Udq<+#OMe@>iqKv8M}V)+_TXKk;BOe>gFAH)cVh#axlZo%_j(gW2yV zleicYzLy2d<<`|O8<1l}GG58)E!>f-O z$1ga05Jz}V;0J%NgOXFXfNNLbSE-+*{hl(!BW@eFZ_-0}yE=o*kyj_GV@DG%>IF<) zrpt$38%j=`(m?U&C17*t8RyaQ5DF9XIE?@^a_LbD7b{ZaOXq6v(oT6e^4%n;&$h<% zy_x(Refg*w^0rxgC2{fp*xx+E|}pUnL911%?*^Qm`7^Y%N%tjwrOPE0f) z{q_0y?&y0+V1VS9oDyhgUk@sxWngpjE4N|pM1JFxv3!7(CK`yDI7dsF|FI%~8~mEk zrqw^dYkm?AqAd{jyA8h?vLKjD3%%yp$Ju{RhjGTmw5l?glMPoUQqs++;CU9xNGz8B z{fN<_OW@#$2o$eK1tw8iu=hR63bKkAnpj#m-kJ`OHmE&yYm~&-FrY~e=QxhcNQ#C--z1=4d(OSRWo3k z8;tWmfembC==5nUe|Ae4YE?`n|794$&7}Ew&DH~~jCSLKRobv^>tEEK^aVUVSdmp5 zQz0N=Dl31y0lA@*iOccfcrmUL8r1v1s=^6#7HRX|9mgOtN0C1i*urFRt2vc}27DxW zO1J7pq1bdHxh2kkSIxm-ziv4Fuw02m{Cx-UA%ZJf08a^We~m1w!v9)l_@# zN3Mjsgc}|w{r~*R;1FyX6cHt!%!H`xV!^Nz>z4gbEq_ugdh72>{*v!uPZ_EM5>Q6VY~ zl^IddkQC8S85O1bI?g825DhAt$jC^Nl;Sy`KYp+0`Tcvl@xtZ0j`MSTKJQO3EU;51 zhbL>&d2I*r`rQS%GvpjI+W4W?7E$tJ<5j$_Vhy6k`}uOcHsM24Y0`Dkh(Ey=xzg%} zl>65Jc6gj0jI*Q)YfJFgmMVM{m%_i@^Aj%jX2WD!4?kK4@z30=IOrombMD#Fxdl6U z%^yRkHKmh}-f2iz@A?8O8O*4(xd!I_w1lvk4qnsdE0J&3u`oO943>R3g>fC&zDOSI+Fj(LHhelX-SjWBoSn)z=sJ!$^eKkKYOXcW*H8&KL|G zb|m!^^~oN!D%j#CMXuEb;elOg(4F)JH1-6;EDKB8>LfzzKi>rBH(lUgC`w&4hwH#q^hQ&maOkG;5} zbr)~(O@jP&HKo6bhcM)WDJ_mo!EI|y>4tVaDj>&UMz;p_sX}ZEP{h)XJ`^62qmDuw zvUlM@^zF}p#2JV93bz`}xKha1yRk*>lVdPy^z8{3^z``Upvh<1TFZ_J!FF#ej76lQiWX_AX@M5rmm$?%R zZ%0ZJy7Vw?*rHGCT{Tz-$`EXv<4g{pYzG4)O&WeX2IlW5gdjHu?sbel7<|>Iz01Bs zSamkS9eFxxl!xO`s(?w-bsefN$C$sBPq{;?6=bI_6$HA}+a_8N>SzK$hszPx48Kd3Ak z$61C;kp=SUINjSBKcv({jLL7cyT({V2hX9bf(BgoV1_wOV8ygeUL!`w;#%n@n0&hr z|MTKuGBfk)4LyPtZ7aB6iw?6AHha?l%LYnI_JQFACDJs>os4hY1gd*dxX4dc+^}RG z1eH(1(aTTqM_QP9_Gt<7J$pQ5+@0v!RrVw_P=%P9?Ssoj-ryy(mOrrbAF389@)5RC z7<$T@6q?!*+4W4<^fe%k1#(2HOPPp^od9=uFDO4=4S@|hbfU*%xWB=H%=zXcY@g>q zc9uz#@tziB-7taByXJI^k_`oI9;5o?=m|qH8t~LpSS%9*lCwmJ;iP7WpVkc59||EQ z#1$@YkRWB9C%8Ro9sGipVz`#U&g-Ye$yEoY$t32Y|ExA_oO2BAZ|RY}vrABA!G4e` zmZqa|3C`Tt1WW5DL3f%OeWtaDZ(g7ZN{tAaeSkx0iQI?gcx?N22OPrG$iElcQKt7O z@b0=~_C|GD{N^O@Z+M+A*dKIw>wHQY zmTT(c93L~9$~1=3$&SS3=NYgYHyu(er@=uxJwpGofLhFC0bRg=vs)*j%)fHv5@b{d({OuhMvD{g$dT=^#m4tom&BV z)oP?^Pdp32&%r3aD#&on6i(N%Wzcp-*tu57UthEonhWk?kxv6gX6M0?Pi|EIwF2ok zED%1vT!Z_{5@70w1X#`fuEL*%Fn?tp{!s6dXY%Q28;r9U@1WFCIgn}CAK zaxRfH3b=2X2RU&mh8y%{=7Y3!sCk+X8CyZ`2{{{IiSNtb;zkzuDBgFRFQS^1KFfk73_3reVg&b8V;(4;dVqYwOgwnr znjS9P51V8k;ccf9{>kl1zIgH(nEduF{LOGfrMYStC>#U9>L$c}$6aV}{SEIYFmN?A z;gu>22!EhX)=m1w?UFX5?gyRdoR>e)#@CsyzAZ~K2c*bKDGzeKxdl^BN#TEwjHsmj zD4Kn-0^4OWU@n7pd1ij+(%wFRLy`e-@9uF>J$8jFUbYDSD4fOfL53uHivoieuZPwL zyI8pQ8#E6(QVEfJkg%*8{?xq0!kh1y)zSyUCjMpGQ~@q}eMI>E{Z?2~w*gHj8j=~w z2AI8S01H<=0Owad7*tcnX*_lUjU6T=uHA!Hd?vU`s6y*kstM#;RmkUq22^*Q1iWM* z#Q?>1Xud{*zW<)VFR#48i)U((6>BeyQW@8c_x!AB9d`srH{4_;p7AJdiI`|qz@Jui zCo{r6qI<-Cxb;GT%<5mG^I`$xoqc-1iKUCCl&lTjgtV~zL@*qF2ppw$Fk#(x${=b1cnP! zk$8UO{2E>9pJ4@}LJYt*{Uy$j=z^7pQoPD1eT10H7r6UQZrr3FHDLVcmEiEg49wpl zgXLYOG&%A*7{)H;&oSem{*fY3QJ)FE0R~j9%ax2?QvnxGQ+xtxu((c=Ko~e>`{^!BTZ6vW>p-79L#DPw-2|2|44ArVK#Q1$J*vwHTC(|rx(*$=?QmRfS zpNfFR!@aO!hAaIo^+r$-E&|70YPrqc-f(2^2)gsM0eO0Fhyk`9!FV%YSi4<=F4Gyn zJ6mUgl)*_jx~!bv&2&x()1nDQrfBN(3ufED#GmT*+@Ep-`g2<~YFF5kKS>OLH&GSh z$Bf6xtu}(=x@Por=Q!@D%vF@qFh}PjZ!vI8DxY}zCKj<`c8Qr9w8Z^CSU)KQI~KI_ zsW&C4h2lhktI2deWW-WfImwX@ZE8Y&^@~^;kccuxc0`-~A12-V2p1wuh-kq>`12u- zSMD8;6)z8=4lUyK*<6xk5DbZrwCTh=cVcTb5(DEez~H_b+_~!o{Jd^T+h=Kz46o5p zcG;Dxh#13%Y-L^2oFVf)xz5N6ubD=S>^S)3>xzN$fH zT5Do_gSf!Aqr zWNF7_=xF73Thr*MGwAH=L{9GP0;es7c+%IF zZ0ak9OXGII8+{Fd!($Qj4D#Y$1U18}hf-Lq=EeK%Ex{d*3a~O$mY&$FLUu(tQLBD! z`u2_omFVeV<&!4FW4s5C^d&zs-jS>y!-_|L-NpBdjVby38ed6R5?|(niThyy(mEcz z{y$Zswac6wy!(%Lnr#4I9+W})+mUqE{eHN&PM17P`puuZtVqROkHDF)C$Q&}IMisn z(t(wtG*asa=0-9dV{I#>YXoy2BBJo((OGzG<~A;V^+P-+T!lvzO~`+q(nQ%)gnarS zL$}MZ;B~wn`hQ%<@g)jmU)FwTTGs$eu6=>9;+1eEy$FsZX~K_c0%iRlQPEnGE2gcy z>?B24xYC}ipKXUlcTWn&^rvw9WRtl)-Fjr+Qzg1>#uHEvRHJG;Z{z)+X4K2kpTFqX z#P4v81O<;jxK6PU9-h{uKC)VLR89=OFy9BZ+GmAA^?OLq8d67X=0RBf2vsf}gr~13 zg3?S&5~;fp)jZqa#tv(G;gULz*&W9PX{nISqqOP5s?(t8VS*9+6{x3D!OZ|q6Y?## z06!0Pa6l@2 zDrUEv!E74w*QKvq;4Z}Na0Xo(P6*yX6-KJ7!r#_Lu&y;GQ@w3a;074_(plFxLW)9J!bbP7ft7SPkZJdy-@+LD*S zk5SVq8n;H4qUz5^G_vuaFaOMj->K@v|9u8dStdd5#u$>DJr|&FRW$x28KRX$BC=K9q zXA?ZlKD2#59mi7>Y;3B9zI&f}s_y|aSjh;;*^_Y7VD7~gio#Z7^0@aR zYW*pNEo1UvjMPDB94CjCxooaCR;5#BhhX-)$7sLr7Es(*#mZ4;m()0(e!QKV5}38=L2JjiTafw?S<6%%_5^TXWe z#H9x~LyO<2SU3&xHgv&G@ljy*-k4rr+zvaZNs>mXkz`Rf)5M0YXu_UfXv7J)4RU*h z8ZD>rVEb1%tN9ILf~|?Dx)vQ++KbYc4szcgT!l?9;$Xgy3O&@)2p`5R=5&v`&!N?pCkL z`>8nlhYJnT`;5~n%3+k_cKo9(0-lr1NqoG8(Ee#HtWrOU#%@31{+m{8vmZ(Q9%EhEGV*(FAWDDgCu8VI{LsyF8fv~eA=Kzj*N|jQOXYV%lqrF z&QzK1Ym}rSr$&&qtF6d;rW0PccokKn^r_U_clbW`EO_2d$Mmb~pyQYstyDQ8^iRD4 z72C9Ea}e7t&b-crEp386^u)NY)^vHVE?8agAY9pb-1%e|%ssIm+7l8mXn^Sj+RkLe z2`75f@EPdEc47RTk(ha0lyZRzq$lD(e#w?MShs90|8ScrNsf^v+@?2>{^SdnvqYZg z9QuR*%w@o5@n0_uT!~AVN%w-R45cMHWYd>F@Hwsvdm^lH$#YxKDw+c4{0{?)TT#A~ zom)gxA%yLy?8n6L$6{5HD_{ZOqU#d>VPN?F(7!KhaB|RLmpo9;&kjs7j zuNf3ob@1ldXejx#4{}C(a__soKv%s4m2P+oQ!XEY>thU=XVZe37qXq7j5P7x(aivd zF|guK7Cw8-eixESC^7p!&Pp=C^v$_2`E@2FG4GVXQHRX1-N}s@ZAovL7|=6ncad{A zi*0utNc0m8Y8|acvQ(mAR?u>O%*R^({Ulx7BH=>6Y%S*|q}8I>cvVulXTEUL;C)!r zVoUG%yVHcv&Sb*-IatttfFEP=1;=kQ;zHymaq`tMXrpIHf9|=Dqw0;xCTUwx|5DFi zO}8c=-dK=t6^>-1Vlp~S6DJ*aa}n#mgRhP#^%Y!(O>gpqrjkZ9({~fp*t<}raRzWJ z3e~(6yus0-0~LI6Z)S^5X~OxMm1!Pnyyv z8V2;FwiQ{b{04U4IEHu5{DHLAgzpZ^RM^c=g<08R} zd6VKtFGnGqgH`5vxS2DfC+;VKKzASdzut^id(UuhvUq&Xo{bCSt#MT38yL)N6n6dI z;n@(uU@Mxv{OZ7axawa#KVI!7csy|lAmVzU~RWi#2E`XdZZ zb`dVDXS*)BTJ)_^A-&ISX>ZkewAXvZpU7N0>T-}RIK9&#DX|PpIkyn^^hnXA?oK4$ zd^_LUXilPi3h`@(A?dTsfUdKwEM@9eoFZjKFD+co+xIxq>|R&8y8Z{m+5F<7Uld_V zwlvXV1tsU(8{t`jJ{lEDQRUf}@yr5EGV!50`Hz*iADl#a-(qQ^WZuFLDWBww=G34@ z;(xgDtRYeVtpcZ)_G3)SDt!5UC%)Tqh#NcMEmyW@6g2hc@V}SXkx6SDX!^iQOqZ3% zvfY+^-i$C5CT(ImW)1vvipA_v27-NwmULk!1G~@v!%A0VsLy;!OdNTe6U;q?&RZzfe?JM{wn+QdoI zjFtRug~#Zr*vH2^tCN)v{b4T)aNV{xpn>kn*j3cdof~zh)J`g#U$Jv9JEwFb-==|RKyr>O&kEubEQWyTI&}3dAQMi_K9N!h&QG;kZy7P+~xmREh58|%lDycHg z^jT8rggFbL@eT9VE%=7{D~mB!$A%uTj>QgBS?q96#KlSF!afTTB0m2(czbl!S7s232`105ZaD$mpRh7{<5p4nf!PpUY%iuD=>n^ZvlM4|n-$*@w)}Q^LWh zD>%l&h$z-elKBa1;OrA`)O4>$u+pL}eh2V;RXaYcxeA^65hx{KWdP#+=oh{ZVvcq} zkL?SzeO<{t8#E&7D>rjD?jY{HS&pX@&+)m|&+zSbNw^o6i@WY8!tP1#G;m%641Y4g zM+XWaE@KVa$sB-;&swCWgIz~ulGN#^71_L?7K=RpVSCH2VN^l1Kss|KCB=m#fl zD~sv&_52|-4I=$cg2b%$7 zS*s+>$er=?Vc|dt?mZTOE}2fgE+u)H`IpI2JB^<_8=XPSjkIhipQG*9=Jb|-4yWw^58rZ)3C_KF2Kw??oH*KvY z`ERcjCciIZzzHKVeZLV2y8I1g)=q<@CywA!Ee$iyyK)w+Y(S*Wjs!38;@@r<3*mb- zsB%vZbc7^g(WeKvOTM3%tZ=7FaSym#zqL@?-3H$un1N5dTk(pj8u6|-r+bRB_#16` zLh}M~azE-3N~Nij!ZqxC^5_YCpRZ0k_xE!PQ$0DqN9!T}t_PjJycjNUrP!>HBK-5y zksjJ_L)_T)HnQFZ&-57(?ZkfgvQd(}{@us5U9AJh<4^GXw*q+iQkJar6DKm8W@5O} z1Mu4s%njYU4Uu-6ylN}%x1I(*ar};~4@MEGg&9gD&qkOmWX;mIH&Ssx*>;oOTJomI#n)hlrb}P}X8Y%)FU_rhZj6JDFukO$Sh4+rcEOaac+}Xom z=3;1jOOZ~FtOZFK6Eel!l16pbpq@9I-(P;gJ>#BZ|LGK1H!v6b!`)$bj5GPynal5J zZ%0p=Ht^E+xoPdCKzzrD3rmf4K%PC{jhRlGFz+|4jgX}tmrvoxn^qL1+hJ3JEv4srwA6xUncq-<=8L&t1g?3lY9{p(_n*z6OJiQKc+BlqSv z3lSz5(wYiC=qcZYl{2kK#$FAWx7LZgWwYahiAP}Gk6OGX9028)#%QarPrHr^p>^|H zSnR(LwmiRqOB;;wdCxC=Z?F?pT^n&_5bq`SDFZGj)Pj7J62zP}pd(yVsd2+)+^p(G zR!x_tw?(SpbmCt0_M?KECMxuH-Ud9g-=`xSjFaIKS>WID^(?;JSy&5NXX8OKs*ZQKCQk!S zWWgMd@z`~4BcQaknz0k zAy-Vy7>xxUPw`Zr71Mst;IN`0RSy3PYcDvH=${PIx3|?R^!+Csx;&lNi9Q8WcDTW{ zY??>o@)Vt1 z)Tmg7JlXG;54js$NYlL+P~JWU#e>eH%*awa{ZxSIf4aFgyCS&$Y8Y$16p7-N-&}~b zEN%Gvf6L%*;|1a=R5fgZjlJ{NdgSymYi{p^s{nC# zQSkx~Kd&f~WD6bYA|*i#3y$Ky!X;eo9$j*5cOC-*o#H3@%Fz1?&H@`tTk@Yq9OmQ( zK}%t!XOF5Cb!~3slz&Npq>}*Bf+ex|DubFndjiX69N-Kl55v)=+Vr`g2NtZigYP$u z$$tT%uywXSI^UcCjcJsXqOh{RJZrjIrJdVkdXY1g>A}%gM$3$Tm2Q^nQ9Wx#X~r>P?y@F9Nn-Z3*8S_3qLu$!#QtM z$+!e{njwA|#J1e!<`$_@Yo$Uk+SbqQZ##*v3=`mUX$L-97X&-Di$F&1MhI)Lhq=4d zXwNwl`hDL)&iPFmOtv|WN5F#0c7Fix?fG2y&WKSp#wsM|6T3DRS(C@MIxu{@1q3szrbKqm_t^H+~py_F)J76+kplqd~f z&;rFL?+Z#AbMOJ1MccO6QNH5~K6AFC>2WUfxKs{|?pEMiv+lxp_#&7Pa2H=>GzmuB z(tre+LYOI1f)~=0aGzz@Pf^RQTDAcHi~IGOifDTh4@yt9vjl&WYr1i01oqvISWq?5LSWERHX0 zKsQ%QE_IA3dH?e!hO}(QkMsldjckFCwMpnc_y(4~wWU>0lwjJJSgfn_=9jJ{JxnOh( z-@TI&%D5fEN~u%0L?xN4DBlbv{f)O`3==`T%Zez^spHNi8S_^%L_tF}0A9JYV2EHT zell?aDDmYshHi(Z@xL*?BOT{FIqEg#nFMVMz9_tH+Qk)ydeBE}v+%~C8>A|@)AhF< zsQ&DW;8atK-9A-Nyfzj--JA_cvwvaVxN>~O<$+ejo?BZNde9B~l_<$;gFiPefazCl z@;FC<*9WGVZ&6_EL?4H5gEL#j@OaQSOh^@@`G?9dvQ`xb)eLKe&inDb4oDEw^EXZ$78H@Ff2=T$PNSSBMb@b1}wRnQSck0Ld!$^je}i zA+mZ{@Usy2TNu-9i&Thmv!FlMUILw84DA2z1US?(KhFeq-`gdNGdgw1%Gsji9Mf5r zF+fach9k8eGr-NY)gwvry0q-II{lI{1+$W^X>jOhNHnvcis>`2}dds@9qm&UxP#vfNrz$QAH?YL}F<)0mSK6@UV4A>`-ds_i_zm(z8jBwn~ zf5qFMbZNv!eLDHF18v={Nx19mQ72@OWK~wMd2bi^ZA`&Q&l1pbNEFMYG|9M^hXkQY z$#8T+GrsgNW+2E)RGF>JohDb{((X2}UUL9tC*H@QusT?h-HRi{KEwVfUKn-V3vLZ$ zas7|GA?f0I=ssgW$NKpqeO8D~pX?#?!4{aUFH48L9BIzw3gM5r)}(x)1DX5i0F*D^ zjUvZfY1DcHa(%D|+s4g>x&E2h^w^YWZ)E%E^{RA+o(XE`Cqd88x8R$oz&vCEIHGpR z%RJv5C;fcOzn)pl&7W^asxCZ5%YJpTbIc^@*)}Mg=uyJ`@{8ur{!qpz)>l#NxhkLk zOrLiDHl!tIh9Pa*Q4luSz|qcAkUrLs{x|jY0!C(d1>2q$Nd7bM5K9 zrMe^{<0#CXqC~?4RnT|rD`aLQf^8nVmzCNwFRc|>0&Jh58G}`g+fjJqE8poaLKOEN zgbW#DdV9td_*)`NBqnCTOUV|v=3Rl=9y1_OVjrJ2z`#u6_h8ZNd$6BMQoME^_5_9E z#q+Kx{e1&8=R|_TrJ3BSu0_0FlMrqbLpU}~nl8L9rb?mP+u8aKJ4mbNf9c?EW`VwJnw8OdOY9=s2@1=nX7 zk)Hi5C;L>F#HL!&;*IRN8MK|t;nhjIE)c@D`(MGP!`>kH9@_=E6fS;pk5u z_-iABmoLAJ;U8u}@v|i46pzEeKX#6Ksz%dxpT|by2mApe5AwzF8mFkIO82f2rKdLJ zLVdUl+?%aSr?G3~<77Kp!2TB--qgWscD{Qb_5s&6PUB>2A9G_)>5{KYs$s$*8Je6d zPP83!@Qy@3{I+r-Dr_%jyzDTXc%=yk0=;3`l@1t;mlj5P#KAa=ncVOtdHkDRf#>E7 zW7pGMK0jHBoF4iCdFgiexzB+l+Ud~TnFH|SPA!A89f#fXM)Cfq)`O8tDI~pf!e4q< zVe?WmJiV?=SQV$rAjVeIY-%n(f2W5tD+ajdm;52kF9oaTiIL?p7UY_%C#DKyNm*t+ zI9*dB4sDNlhfzbU-mm1LInm7@p9SPZGp z!`Q&%{PM-QtdOJ-$?_~XYhysX3~lJqrq#f0apOW{BninV!b~+clI$tNmqn*Q6;&r% zW`!7i{1YCMuY<>0rM$zPEcDHm#J!o6+udMJ&Z=DHi(U`k5((3#xz_s`GeVW+lB~$6 zSSdO-FoN6p(H8zQYv8{>)`CkD)&V!a6YmVRV8x2NSlp%sM&gVWAY(v8FGZtqnH-(` z!y9c^%95_C2avoz9NMEVVXC|Bv(kqd*qFb)mv5HyBL10argg0r#I9P|K)#Tp6rQ#>E~&r2<>h9{!tu_gtAq zp3$NTw{L^WZWa2(?GE$JGS)?v6d9{xL8IG52{(~o6Y~}C6wBh(9dluIt_H4Z%0(~5 zV|X|192_n)Cvx!tpuBh(lftzqH^YpC&J)GXE_r(5WEI;u^Q$s4Yz%m&5z*)5Wp7Zomqb$MOz7&5vtwqAg?n!TDW2HmGie za0VRy^lmx?wYR~`5$`ZvW;c9rk|yO9KhSjKUu@i)gm+(FVdbQye8|BnFsk3ir>^91 z+93^6+0cT7N!>BtWx-O;t1lOh$+p7Wi6iNzm0G0N<1c80C%|u2Aznz%!5qzfC=qD|A=Vdh zAf=rP{m+V4OP2|)t(&{xdWUu%zWD5sqRmM%SFejt(ra_K-9bQ|Wh6ef0#4$pWCW%;avTTm;OYY;{Z)$MX zhF6)Uc@IBKFd!DA45;gsV-V~^ac%iMeBSm3=M-ND^H@tdtn+{m-J?bxE+-IKe-dJo zuVSiRC)1X7XwPjMa%M>n;u!{>W0{~c%?{LUnKJpR8;#40(zqwAMBQnRBmZH@ia7qQ zfLQN*^qqDdB4eH4?DIZ;^NxR5Cf^2ouHM8?qYG~Jf+^k0KI2ns=O-PQQwUVR^)-%x#M^(;~c6npT%F^l-wF_g@ott zp!ZicKWm>UH8Y+MR-aiQv)B^VFP!3KS7)Q*;ub-%(+_A3twr1Q+j!?O=LCmuTav)( zilmWwE1m{ktD{ULMWW&gq7Canke{m8Iqp5e#abga_IA z)_TyHW==66WH-~?#onOGxI}*Y^yhFRG#pyzT>4Q3!-~ge}ER2=q zI3#nn_WN;KLpN74su>PGl%xmP&u*GL#9e61$01o$m?SPrj`|#iFo|F=kX?m34tZD> zaGDhg=ivR4jkxOMbo7fhrB%x&q59<<6ibQ*JnTX49%5w;$`>)y!2-n99EjIaec~C= zjqVnrus>as99bqp#SfcdhpZYzy^4dd0x|mSZ4Ca&PXKi`E8AS&hL6t}k*Ti?;LpG% zUiPjmwKUcz@_pZ+W{orXWT;E;FEz)-dVgU|3ezIjyvK}VkFZ{39{g-!zJuLGIPm&8 zFTin7;HZG{za+_{x}Ugc@*7yrG*Eq$Xij3W8BM=41Ue^Gx#bIgvVw)V;OMoU^I@QO zg{|}9VV$QiDN>I+c0!9EdZJG@6La#G+sZdaxpBd@tZ4FXIxMs@phb6zc$eQcu=*W4 zm*?eyp_~$ZBB@ATZ8`y4eT=AJ^AIYk9OEmS<>|+TzlF=)gzze<9NsRIflK3Mh}1lH z^15>_$ar4mLmid)oy(i?YCsjgXjdinO{s>-0WEmx)HM{V_{ZhyJ!b_eRy1R@1V3|~ zJDvV?1l2M;kDK;h;I}ELlcy@~{PcW3d=}_RUah-=7njuDn^FHs?>2xtNeQ5#k@*5q5WO(Y$40owy0A@m0`i=nU3_O`2xtE=}LDq z&dicinV2wN0VcoyiKn+Ip?pdm^lVtkO^yAC_NI&R>F;>-ci0aqb#L+h@lojJUC3Yh zxf`3y&1j~s7Z9x!DBEdIt6bXn%<)BBbH5Sk|M~{u)frem+k`|{9>T}K%A2Kbp!fju zNFOhPd|78Y4;%6Rq-DI~fpC1hd=pq(orEPua#Df8o41C!sA 0: - test_loader = torch.utils.data.DataLoader( - datasets.MNIST( - "./data", - train=False, - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), - ), - batch_size=args.test_batch_size, - shuffle=True, - ) - - # Modeling - model = NeuralNet(784, 500, 10) - model_desc = mnist_model_description() - optim_config = optim.SGDConfig(lr=args.lr) - opts = {"device": {"id": device}} - opts = ORTTrainerOptions(opts) - - trainer = ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) - - # Train loop - for epoch in range(1, args.epochs + 1): - train(args.log_interval, trainer, device, train_loader, epoch, args.train_steps) - if args.test_batch_size > 0: - test(trainer, device, test_loader) - - # Save model - if args.save_path: - torch.save(model.state_dict(), os.path.join(args.save_path, "mnist_cnn.pt")) - - -if __name__ == "__main__": - main() diff --git a/samples/python/training/orttrainer/mnist/pytorch_mnist.py b/samples/python/training/orttrainer/mnist/pytorch_mnist.py deleted file mode 100644 index 2e451d85f62e8..0000000000000 --- a/samples/python/training/orttrainer/mnist/pytorch_mnist.py +++ /dev/null @@ -1,157 +0,0 @@ -import argparse -import os - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -from torchvision import datasets, transforms - - -# Pytorch model -class NeuralNet(nn.Module): - def __init__(self, input_size, hidden_size, num_classes): - super().__init__() - self.fc1 = nn.Linear(input_size, hidden_size) - self.relu = nn.ReLU() - self.fc2 = nn.Linear(hidden_size, num_classes) - - def forward(self, input1): - out = self.fc1(input1) - out = self.relu(out) - out = self.fc2(out) - return out - - -def my_loss(x, target, is_train=True): - if is_train: - return F.nll_loss(F.log_softmax(x, dim=1), target) - else: - return F.nll_loss(F.log_softmax(x, dim=1), target, reduction="sum") - - -# Helpers -def train(args, model, device, train_loader, optimizer, epoch): - model.train() - for batch_idx, (data, target) in enumerate(train_loader): - if batch_idx == args.train_steps: - break - data, target = data.to(device), target.to(device) # noqa: PLW2901 - data = data.reshape(data.shape[0], -1) # noqa: PLW2901 - optimizer.zero_grad() - output = model(data) - loss = my_loss(output, target) - loss.backward() - optimizer.step() - if batch_idx % args.log_interval == 0: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( - epoch, - batch_idx * len(data), - len(train_loader.dataset), - 100.0 * batch_idx / len(train_loader), - loss.item(), - ) - ) - - -def test(model, device, test_loader): - model.eval() - test_loss = 0 - correct = 0 - with torch.no_grad(): - for data, target in test_loader: - data, target = data.to(device), target.to(device) # noqa: PLW2901 - data = data.reshape(data.shape[0], -1) # noqa: PLW2901 - output = model(data) - # Stats - test_loss += my_loss(output, target, False).item() - pred = output.argmax(dim=1, keepdim=True) - correct += pred.eq(target.view_as(pred)).sum().item() - - test_loss /= len(test_loader.dataset) - - print( - "\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format( - test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset) - ) - ) - - -def main(): - # Training settings - parser = argparse.ArgumentParser(description="PyTorch MNIST Example") - parser.add_argument( - "--train-steps", - type=int, - default=-1, - metavar="N", - help="number of steps to train. Set -1 to run through whole dataset (default: -1)", - ) - parser.add_argument( - "--batch-size", type=int, default=20, metavar="N", help="input batch size for training (default: 20)" - ) - parser.add_argument( - "--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)" - ) - parser.add_argument("--epochs", type=int, default=1, metavar="N", help="number of epochs to train (default: 1)") - parser.add_argument("--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)") - parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training") - parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)") - parser.add_argument( - "--log-interval", - type=int, - default=10, - metavar="N", - help="how many batches to wait before logging training status", - ) - parser.add_argument("--save-path", type=str, default="", help="Path for Saving the current Model") - - # Basic setup - args = parser.parse_args() - if not args.no_cuda and torch.cuda.is_available(): - device = "cuda" - else: - device = "cpu" - torch.manual_seed(args.seed) - - # Data loader - train_loader = torch.utils.data.DataLoader( - datasets.MNIST( - "./data", - train=True, - download=True, - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), - ), - batch_size=args.batch_size, - shuffle=True, - ) - - if args.test_batch_size > 0: - test_loader = torch.utils.data.DataLoader( - datasets.MNIST( - "./data", - train=False, - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), - ), - batch_size=args.test_batch_size, - shuffle=True, - ) - - # Modeling - model = NeuralNet(784, 500, 10).to(device) - optimizer = optim.SGD(model.parameters(), lr=args.lr) - - # Train loop - for epoch in range(1, args.epochs + 1): - train(args, model, device, train_loader, optimizer, epoch) - if args.test_batch_size > 0: - test(model, device, test_loader) - - # Save model - if args.save_path: - torch.save(model.state_dict(), os.path.join(args.save_path, "mnist_cnn.pt")) - - -if __name__ == "__main__": - main() diff --git a/samples/python/training/orttrainer/pytorch_transformer/README.md b/samples/python/training/orttrainer/pytorch_transformer/README.md deleted file mode 100644 index cda8cba6ca0ad..0000000000000 --- a/samples/python/training/orttrainer/pytorch_transformer/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# TransformerModel example - -This example was adapted from Pytorch's [Sequence-to-Sequence Modeling with nn.Transformer and TorchText](https://pytorch.org/tutorials/beginner/transformer_tutorial.html) tutorial - -## Requirements - -* PyTorch 1.6+ -* TorchText 0.6+ -* ONNX Runtime 1.5+ - -## Running PyTorch version - -```bash -python pt_train.py -``` - -## Running ONNX Runtime version - -```bash -python ort_train.py -``` - -## Optional arguments - -| Argument | Description | Default | -| :---------------- | :-----------------------------------------------------: | --------: | -| --batch-size | input batch size for training | 20 | -| --test-batch-size | input batch size for testing | 20 | -| --epochs | number of epochs to train | 2 | -| --lr | learning rate | 0.001 | -| --no-cuda | disables CUDA training | False | -| --seed | random seed | 1 | -| --log-interval | how many batches to wait before logging training status | 200 | diff --git a/samples/python/training/orttrainer/pytorch_transformer/ort_train.py b/samples/python/training/orttrainer/pytorch_transformer/ort_train.py deleted file mode 100644 index 551e878cc9035..0000000000000 --- a/samples/python/training/orttrainer/pytorch_transformer/ort_train.py +++ /dev/null @@ -1,89 +0,0 @@ -import argparse - -import torch -from ort_utils import my_loss, transformer_model_description_dynamic_axes -from pt_model import TransformerModel -from utils import get_batch, prepare_data - -import onnxruntime - - -def train(trainer, data_source, device, epoch, args, bptt=35): - total_loss = 0.0 - for batch, i in enumerate(range(0, data_source.size(0) - 1, bptt)): - data, targets = get_batch(data_source, i) - - loss, pred = trainer.train_step(data, targets) - total_loss += loss.item() - if batch % args.log_interval == 0 and batch > 0: - cur_loss = total_loss / args.log_interval - print( - "epoch {:3d} | {:5d}/{:5d} batches | loss {:5.2f}".format( - epoch, batch, len(data_source) // bptt, cur_loss - ) - ) - total_loss = 0 - - -def evaluate(trainer, data_source, bptt=35): - total_loss = 0.0 - with torch.no_grad(): - for i in range(0, data_source.size(0) - 1, bptt): - data, targets = get_batch(data_source, i) - loss, pred = trainer.eval_step(data, targets) - total_loss += len(data) * loss.item() - return total_loss / (len(data_source) - 1) - - -if __name__ == "__main__": - # Training settings - parser = argparse.ArgumentParser(description="PyTorch TransformerModel example") - parser.add_argument( - "--batch-size", type=int, default=20, metavar="N", help="input batch size for training (default: 20)" - ) - parser.add_argument( - "--test-batch-size", type=int, default=20, metavar="N", help="input batch size for testing (default: 20)" - ) - parser.add_argument("--epochs", type=int, default=2, metavar="N", help="number of epochs to train (default: 2)") - parser.add_argument("--lr", type=float, default=0.001, metavar="LR", help="learning rate (default: 0.001)") - parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training") - parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)") - parser.add_argument( - "--log-interval", - type=int, - default=200, - metavar="N", - help="how many batches to wait before logging training status (default: 200)", - ) - - # Basic setup - args = parser.parse_args() - if not args.no_cuda and torch.cuda.is_available(): - device = "cuda" - else: - device = "cpu" - torch.manual_seed(args.seed) - onnxruntime.set_seed(args.seed) - - # Model - optim_config = onnxruntime.training.optim.SGDConfig(lr=args.lr) - model_desc = transformer_model_description_dynamic_axes() - model = TransformerModel(28785, 200, 2, 200, 2, 0.2).to(device) - - # Preparing data - train_data, val_data, test_data = prepare_data(device, args.batch_size, args.test_batch_size) - trainer = onnxruntime.training.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss) - - # Train - for epoch in range(1, args.epochs + 1): - train(trainer, train_data, device, epoch, args) - val_loss = evaluate(trainer, val_data) - print("-" * 89) - print(f"| end of epoch {epoch:3d} | valid loss {val_loss:5.2f} | ") - print("-" * 89) - - # Evaluate - test_loss = evaluate(trainer, test_data) - print("=" * 89) - print(f"| End of training | test loss {test_loss:5.2f}") - print("=" * 89) diff --git a/samples/python/training/orttrainer/pytorch_transformer/ort_utils.py b/samples/python/training/orttrainer/pytorch_transformer/ort_utils.py deleted file mode 100644 index 73992f5596f5f..0000000000000 --- a/samples/python/training/orttrainer/pytorch_transformer/ort_utils.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch - -from onnxruntime.capi.ort_trainer import IODescription as Legacy_IODescription -from onnxruntime.capi.ort_trainer import ModelDescription as Legacy_ModelDescription - - -def my_loss(x, target): - x = x.view(-1, 28785) - return torch.nn.CrossEntropyLoss()(x, target) - - -def transformer_model_description(bptt=35, batch_size=20, ntokens=28785): - model_desc = { - "inputs": [("input1", [bptt, batch_size]), ("label", [bptt * batch_size])], - "outputs": [("loss", [], True), ("predictions", [bptt, batch_size, ntokens])], - } - return model_desc - - -def transformer_model_description_dynamic_axes(ntokens=28785): - model_desc = { - "inputs": [("input1", ["bptt", "batch_size"]), ("label", ["bptt_x_batch_size"])], - "outputs": [("loss", [], True), ("predictions", ["bptt", "batch_size", ntokens])], - } - return model_desc - - -def legacy_transformer_model_description(bptt=35, batch_size=20, ntokens=28785): - input_desc = Legacy_IODescription("input1", [bptt, batch_size]) - label_desc = Legacy_IODescription("label", [bptt * batch_size]) - loss_desc = Legacy_IODescription("loss", []) - predictions_desc = Legacy_IODescription("predictions", [bptt, batch_size, ntokens]) - return ( - Legacy_ModelDescription([input_desc, label_desc], [loss_desc, predictions_desc]), - Legacy_IODescription("__learning_rate", [1]), - ) - - -def legacy_transformer_model_description_dynamic_axes(ntokens=28785): - input_desc = Legacy_IODescription("input1", ["bptt", "batch_size"]) - label_desc = Legacy_IODescription("label", ["bptt_x_batch_size"]) - loss_desc = Legacy_IODescription("loss", []) - predictions_desc = Legacy_IODescription("predictions", ["bptt", "batch_size", ntokens]) - return ( - Legacy_ModelDescription([input_desc, label_desc], [loss_desc, predictions_desc]), - Legacy_IODescription("__learning_rate", [1]), - ) diff --git a/samples/python/training/orttrainer/pytorch_transformer/pt_model.py b/samples/python/training/orttrainer/pytorch_transformer/pt_model.py deleted file mode 100644 index 4f2e03192c6cf..0000000000000 --- a/samples/python/training/orttrainer/pytorch_transformer/pt_model.py +++ /dev/null @@ -1,62 +0,0 @@ -import math - -import torch -import torch.nn as nn - - -class TransformerModel(nn.Module): - def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5): - super().__init__() - from torch.nn import TransformerEncoder, TransformerEncoderLayer - - self.model_type = "Transformer" - self.input1_mask = None - self.pos_encoder = PositionalEncoding(ninp, dropout) - encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout) - self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) - self.encoder = nn.Embedding(ntoken, ninp) - self.ninp = ninp - self.decoder = nn.Linear(ninp, ntoken) - - self.init_weights() - - def _generate_square_subsequent_mask(self, sz): - mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) - mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, 0.0) - return mask - - def init_weights(self): - initrange = 0.1 - self.encoder.weight.data.uniform_(-initrange, initrange) - self.decoder.bias.data.zero_() - self.decoder.weight.data.uniform_(-initrange, initrange) - - def forward(self, input1): - if self.input1_mask is None or self.input1_mask.size(0) != input1.size(0): - device = input1.device - mask = self._generate_square_subsequent_mask(input1.size(0)).to(device) - self.input1_mask = mask - - input1 = self.encoder(input1) * math.sqrt(self.ninp) - input1 = self.pos_encoder(input1) - output = self.transformer_encoder(input1, self.input1_mask) - output = self.decoder(output) - return output - - -class PositionalEncoding(nn.Module): - def __init__(self, d_model, dropout=0.1, max_len=5000): - super().__init__() - self.dropout = nn.Dropout(p=dropout) - - pe = torch.zeros(max_len, d_model) - position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) - div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0).transpose(0, 1) - self.register_buffer("pe", pe) - - def forward(self, x): - x = x + self.pe[: x.size(0), :] - return self.dropout(x) diff --git a/samples/python/training/orttrainer/pytorch_transformer/pt_train.py b/samples/python/training/orttrainer/pytorch_transformer/pt_train.py deleted file mode 100644 index a197fb50357e9..0000000000000 --- a/samples/python/training/orttrainer/pytorch_transformer/pt_train.py +++ /dev/null @@ -1,94 +0,0 @@ -import argparse - -import torch -import torch.nn as nn -from pt_model import TransformerModel -from utils import get_batch, prepare_data - - -def train(model, data_source, device, epoch, args, bptt=35): - total_loss = 0.0 - model.train() - for batch, i in enumerate(range(0, data_source.size(0) - 1, bptt)): - data, targets = get_batch(data_source, i) - - optimizer.zero_grad() - output = model(data) - loss = criterion(output.view(-1, 28785), targets) - loss.backward() - optimizer.step() - - total_loss += loss.item() - if batch % args.log_interval == 0 and batch > 0: - cur_loss = total_loss / args.log_interval - print( - "epoch {:3d} | {:5d}/{:5d} batches | loss {:5.2f}".format( - epoch, batch, len(data_source) // bptt, cur_loss - ) - ) - total_loss = 0 - - -def evaluate(model, data_source, criterion, bptt=35): - total_loss = 0.0 - model.eval() - with torch.no_grad(): - for i in range(0, data_source.size(0) - 1, bptt): - data, targets = get_batch(data_source, i) - output = model(data) - output_flat = output.view(-1, 28785) - total_loss += len(data) * criterion(output_flat, targets).item() - return total_loss / (len(data_source) - 1) - - -if __name__ == "__main__": - # Training settings - parser = argparse.ArgumentParser(description="PyTorch TransformerModel example") - parser.add_argument( - "--batch-size", type=int, default=20, metavar="N", help="input batch size for training (default: 20)" - ) - parser.add_argument( - "--test-batch-size", type=int, default=20, metavar="N", help="input batch size for testing (default: 20)" - ) - parser.add_argument("--epochs", type=int, default=2, metavar="N", help="number of epochs to train (default: 2)") - parser.add_argument("--lr", type=float, default=0.001, metavar="LR", help="learning rate (default: 0.001)") - parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training") - parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)") - parser.add_argument( - "--log-interval", - type=int, - default=200, - metavar="N", - help="how many batches to wait before logging training status (default: 200)", - ) - - # Basic setup - args = parser.parse_args() - if not args.no_cuda and torch.cuda.is_available(): - device = "cuda" - else: - device = "cpu" - torch.manual_seed(args.seed) - - # Model - criterion = nn.CrossEntropyLoss() - lr = 0.001 - model = TransformerModel(28785, 200, 2, 200, 2, 0.2).to(device) - optimizer = torch.optim.SGD(model.parameters(), lr=lr) - - # Preparing data - train_data, val_data, test_data = prepare_data(device, args.batch_size, args.test_batch_size) - - # Train - for epoch in range(1, args.epochs + 1): - train(model, train_data, device, epoch, args) - val_loss = evaluate(model, val_data, criterion) - print("-" * 89) - print(f"| end of epoch {epoch:3d} | valid loss {val_loss:5.2f} | ") - print("-" * 89) - - # Evaluate - test_loss = evaluate(model, test_data, criterion) - print("=" * 89) - print(f"| End of training | test loss {test_loss:5.2f}") - print("=" * 89) diff --git a/samples/python/training/orttrainer/pytorch_transformer/utils.py b/samples/python/training/orttrainer/pytorch_transformer/utils.py deleted file mode 100644 index 3be8b6cf3f420..0000000000000 --- a/samples/python/training/orttrainer/pytorch_transformer/utils.py +++ /dev/null @@ -1,59 +0,0 @@ -import os - -import torch -from torchtext.data.utils import get_tokenizer -from torchtext.utils import download_from_url, extract_archive -from torchtext.vocab import build_vocab_from_iterator - - -def batchify(data, bsz, device): - # Divide the dataset into bsz parts. - nbatch = data.size(0) // bsz - # Trim off any extra elements that wouldn't cleanly fit (remainders). - data = data.narrow(0, 0, nbatch * bsz) - # Evenly divide the data across the bsz batches. - data = data.view(bsz, -1).t().contiguous() - return data.to(device) - - -def get_batch(source, i, bptt=35): - seq_len = min(bptt, len(source) - 1 - i) - data = source[i : i + seq_len] - target = source[i + 1 : i + 1 + seq_len].view(-1) - return data, target - - -def prepare_data(device="cpu", train_batch_size=20, eval_batch_size=20, data_dir=None): - url = "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip" - - download_path = ".data_wikitext_2_v1" - extract_path = None - if data_dir: - download_path = os.path.join(data_dir, "download") - os.makedirs(download_path, exist_ok=True) - download_path = os.path.join(download_path, "wikitext-2-v1.zip") - - extract_path = os.path.join(data_dir, "extracted") - os.makedirs(extract_path, exist_ok=True) - - test_filepath, valid_filepath, train_filepath = extract_archive( - download_from_url(url, root=download_path), to_path=extract_path - ) - tokenizer = get_tokenizer("basic_english") - vocab = build_vocab_from_iterator(map(tokenizer, iter(open(train_filepath, encoding="utf8")))) # noqa: SIM115 - - def data_process(raw_text_iter): - data = [torch.tensor([vocab[token] for token in tokenizer(item)], dtype=torch.long) for item in raw_text_iter] - return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) - - train_data = data_process(iter(open(train_filepath, encoding="utf8"))) # noqa: SIM115 - val_data = data_process(iter(open(valid_filepath, encoding="utf8"))) # noqa: SIM115 - test_data = data_process(iter(open(test_filepath, encoding="utf8"))) # noqa: SIM115 - - device = torch.device(device) - - train_data = batchify(train_data, train_batch_size, device) - val_data = batchify(val_data, eval_batch_size, device) - test_data = batchify(test_data, eval_batch_size, device) - - return train_data, val_data, test_data diff --git a/setup.py b/setup.py index 1c04433c9a7ca..da4943c4ef7ae 100644 --- a/setup.py +++ b/setup.py @@ -398,7 +398,6 @@ def finalize_options(self): "onnxruntime", "onnxruntime.backend", "onnxruntime.capi", - "onnxruntime.capi.training", "onnxruntime.datasets", "onnxruntime.tools", "onnxruntime.tools.mobile_helpers", From 34c54244567af3157a3d37e6d42b9bb918931fbc Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Fri, 17 Nov 2023 22:40:51 -0800 Subject: [PATCH 31/38] [js] update a few packages (#18499) ### Description [js] update a few packages - update semver - update reference of onnx_proto to local folder in order to upgrade protobufjs@7.2.4 Resolve AB#18513 --- js/node/package-lock.json | 79 +- js/node/package.json | 3 +- js/node/test/ort-schema/protobuf/.gitignore | 2 + js/node/test/ort-schema/protobuf/README.md | 21 + js/node/test/ort-schema/protobuf/onnx.d.ts | 2627 +++++++ js/node/test/ort-schema/protobuf/onnx.js | 7658 +++++++++++++++++++ js/node/test/test-utils.ts | 3 +- js/package-lock.json | 12 +- 8 files changed, 10341 insertions(+), 64 deletions(-) create mode 100644 js/node/test/ort-schema/protobuf/.gitignore create mode 100644 js/node/test/ort-schema/protobuf/README.md create mode 100644 js/node/test/ort-schema/protobuf/onnx.d.ts create mode 100644 js/node/test/ort-schema/protobuf/onnx.js diff --git a/js/node/package-lock.json b/js/node/package-lock.json index e8968bafc4a9f..c1cf8af4bb80e 100644 --- a/js/node/package-lock.json +++ b/js/node/package-lock.json @@ -22,7 +22,7 @@ "jsonc": "^2.0.0", "minimist": "^1.2.8", "node-addon-api": "^6.0.0", - "onnx-proto": "^8.0.1" + "protobufjs": "^7.2.4" } }, "../common": { @@ -97,12 +97,6 @@ "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", "dev": true }, - "node_modules/@types/long": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz", - "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==", - "dev": true - }, "node_modules/@types/minimist": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz", @@ -528,9 +522,9 @@ "dev": true }, "node_modules/long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", + "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==", "dev": true }, "node_modules/lru-cache": { @@ -663,15 +657,6 @@ "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/onnx-proto": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/onnx-proto/-/onnx-proto-8.0.1.tgz", - "integrity": "sha512-ZpPTqp5dneh2bvavk/QpDsf20JJRArjqTkiMfshGmxR8ocjmfTk80fkW00FwLO7qRtybo9NPugcWQrumHYctLQ==", - "dev": true, - "dependencies": { - "protobufjs": "^6.11.2" - } - }, "node_modules/onnxruntime-common": { "resolved": "../common", "link": true @@ -690,9 +675,9 @@ } }, "node_modules/protobufjs": { - "version": "6.11.4", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz", - "integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==", + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz", + "integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==", "dev": true, "hasInstallScript": true, "dependencies": { @@ -706,13 +691,11 @@ "@protobufjs/path": "^1.1.2", "@protobufjs/pool": "^1.1.0", "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", "@types/node": ">=13.7.0", - "long": "^4.0.0" + "long": "^5.0.0" }, - "bin": { - "pbjs": "bin/pbjs", - "pbts": "bin/pbts" + "engines": { + "node": ">=12.0.0" } }, "node_modules/proxy-from-env": { @@ -789,9 +772,9 @@ ] }, "node_modules/semver": { - "version": "7.3.8", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", - "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -1070,12 +1053,6 @@ "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", "dev": true }, - "@types/long": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz", - "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==", - "dev": true - }, "@types/minimist": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz", @@ -1413,9 +1390,9 @@ "dev": true }, "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", + "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==", "dev": true }, "lru-cache": { @@ -1523,15 +1500,6 @@ "set-blocking": "^2.0.0" } }, - "onnx-proto": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/onnx-proto/-/onnx-proto-8.0.1.tgz", - "integrity": "sha512-ZpPTqp5dneh2bvavk/QpDsf20JJRArjqTkiMfshGmxR8ocjmfTk80fkW00FwLO7qRtybo9NPugcWQrumHYctLQ==", - "dev": true, - "requires": { - "protobufjs": "^6.11.2" - } - }, "onnxruntime-common": { "version": "file:../common", "requires": { @@ -1549,9 +1517,9 @@ } }, "protobufjs": { - "version": "6.11.4", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz", - "integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==", + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz", + "integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==", "dev": true, "requires": { "@protobufjs/aspromise": "^1.1.2", @@ -1564,9 +1532,8 @@ "@protobufjs/path": "^1.1.2", "@protobufjs/pool": "^1.1.0", "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", "@types/node": ">=13.7.0", - "long": "^4.0.0" + "long": "^5.0.0" } }, "proxy-from-env": { @@ -1619,9 +1586,9 @@ "dev": true }, "semver": { - "version": "7.3.8", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", - "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "requires": { "lru-cache": "^6.0.0" diff --git a/js/node/package.json b/js/node/package.json index 0f8f0e9d2260c..8e591d8f46b9d 100644 --- a/js/node/package.json +++ b/js/node/package.json @@ -19,6 +19,7 @@ }, "scripts": { "buildr": "tsc && node ./script/build --config=RelWithDebInfo", + "preprepare": "node -e \"require('node:fs').copyFileSync('./node_modules/long/index.d.ts', './node_modules/long/umd/index.d.ts')\"", "prepare": "tsc --build script test .", "rebuild": "tsc && node ./script/build --rebuild", "rebuildd": "tsc && node ./script/build --rebuild --config=Debug", @@ -39,7 +40,7 @@ "jsonc": "^2.0.0", "minimist": "^1.2.8", "node-addon-api": "^6.0.0", - "onnx-proto": "^8.0.1" + "protobufjs": "^7.2.4" }, "main": "dist/index.js", "os": [ diff --git a/js/node/test/ort-schema/protobuf/.gitignore b/js/node/test/ort-schema/protobuf/.gitignore new file mode 100644 index 0000000000000..092bb6c1c9fb4 --- /dev/null +++ b/js/node/test/ort-schema/protobuf/.gitignore @@ -0,0 +1,2 @@ +!onnx.js +!onnx.d.ts diff --git a/js/node/test/ort-schema/protobuf/README.md b/js/node/test/ort-schema/protobuf/README.md new file mode 100644 index 0000000000000..f5f52c602f1ad --- /dev/null +++ b/js/node/test/ort-schema/protobuf/README.md @@ -0,0 +1,21 @@ +# ONNX protobuf + +This directory contains generated protobuf definition for onnx: + +- onnx.js +- onnx.d.ts + +These files are generated from [a fork of onnx-proto](https://github.com/fs-eire/onnx-proto/tree/update-v9). + +The ONNX protobuf uses protobufjs@7.2.4, which depends on long@5.2.3, the version contains 2 bugs: + +- type export does not work with commonjs. described in https://github.com/dcodeIO/long.js/pull/124. added a "postinstall" script to fix. +- in the generated typescript declaration file 'onnx.d.ts', the following line: + ```ts + import Long = require("long"); + ``` + need to be replaced to fix type import error: + ```ts + import Long from "long"; + ``` + this replacement is done and code format is also applied to file 'onnx.d.ts'. diff --git a/js/node/test/ort-schema/protobuf/onnx.d.ts b/js/node/test/ort-schema/protobuf/onnx.d.ts new file mode 100644 index 0000000000000..c60264dca2a8d --- /dev/null +++ b/js/node/test/ort-schema/protobuf/onnx.d.ts @@ -0,0 +1,2627 @@ +import Long from 'long'; +import * as $protobuf from 'protobufjs'; + +/** Namespace onnx. */ +export namespace onnx { + + /** Version enum. */ + enum Version { + _START_VERSION = 0, + IR_VERSION_2017_10_10 = 1, + IR_VERSION_2017_10_30 = 2, + IR_VERSION_2017_11_3 = 3, + IR_VERSION_2019_1_22 = 4, + IR_VERSION_2019_3_18 = 5, + IR_VERSION_2019_9_19 = 6, + IR_VERSION_2020_5_8 = 7, + IR_VERSION_2021_7_30 = 8, + IR_VERSION = 9 + } + + /** Properties of an AttributeProto. */ + interface IAttributeProto { + /** AttributeProto name */ + name?: (string|null); + + /** AttributeProto refAttrName */ + refAttrName?: (string|null); + + /** AttributeProto docString */ + docString?: (string|null); + + /** AttributeProto type */ + type?: (onnx.AttributeProto.AttributeType|null); + + /** AttributeProto f */ + f?: (number|null); + + /** AttributeProto i */ + i?: (number|Long|null); + + /** AttributeProto s */ + s?: (Uint8Array|null); + + /** AttributeProto t */ + t?: (onnx.ITensorProto|null); + + /** AttributeProto g */ + g?: (onnx.IGraphProto|null); + + /** AttributeProto sparseTensor */ + sparseTensor?: (onnx.ISparseTensorProto|null); + + /** AttributeProto tp */ + tp?: (onnx.ITypeProto|null); + + /** AttributeProto floats */ + floats?: (number[]|null); + + /** AttributeProto ints */ + ints?: ((number | Long)[]|null); + + /** AttributeProto strings */ + strings?: (Uint8Array[]|null); + + /** AttributeProto tensors */ + tensors?: (onnx.ITensorProto[]|null); + + /** AttributeProto graphs */ + graphs?: (onnx.IGraphProto[]|null); + + /** AttributeProto sparseTensors */ + sparseTensors?: (onnx.ISparseTensorProto[]|null); + + /** AttributeProto typeProtos */ + typeProtos?: (onnx.ITypeProto[]|null); + } + + /** Represents an AttributeProto. */ + class AttributeProto implements IAttributeProto { + /** + * Constructs a new AttributeProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.IAttributeProto); + + /** AttributeProto name. */ + public name: string; + + /** AttributeProto refAttrName. */ + public refAttrName: string; + + /** AttributeProto docString. */ + public docString: string; + + /** AttributeProto type. */ + public type: onnx.AttributeProto.AttributeType; + + /** AttributeProto f. */ + public f: number; + + /** AttributeProto i. */ + public i: (number|Long); + + /** AttributeProto s. */ + public s: Uint8Array; + + /** AttributeProto t. */ + public t?: (onnx.ITensorProto|null); + + /** AttributeProto g. */ + public g?: (onnx.IGraphProto|null); + + /** AttributeProto sparseTensor. */ + public sparseTensor?: (onnx.ISparseTensorProto|null); + + /** AttributeProto tp. */ + public tp?: (onnx.ITypeProto|null); + + /** AttributeProto floats. */ + public floats: number[]; + + /** AttributeProto ints. */ + public ints: (number|Long)[]; + + /** AttributeProto strings. */ + public strings: Uint8Array[]; + + /** AttributeProto tensors. */ + public tensors: onnx.ITensorProto[]; + + /** AttributeProto graphs. */ + public graphs: onnx.IGraphProto[]; + + /** AttributeProto sparseTensors. */ + public sparseTensors: onnx.ISparseTensorProto[]; + + /** AttributeProto typeProtos. */ + public typeProtos: onnx.ITypeProto[]; + + /** + * Creates a new AttributeProto instance using the specified properties. + * @param [properties] Properties to set + * @returns AttributeProto instance + */ + public static create(properties?: onnx.IAttributeProto): onnx.AttributeProto; + + /** + * Encodes the specified AttributeProto message. Does not implicitly {@link onnx.AttributeProto.verify|verify} + * messages. + * @param message AttributeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.IAttributeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified AttributeProto message, length delimited. Does not implicitly {@link + * onnx.AttributeProto.verify|verify} messages. + * @param message AttributeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.IAttributeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an AttributeProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns AttributeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.AttributeProto; + + /** + * Decodes an AttributeProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns AttributeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.AttributeProto; + + /** + * Verifies an AttributeProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates an AttributeProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns AttributeProto + */ + public static fromObject(object: {[k: string]: any}): onnx.AttributeProto; + + /** + * Creates a plain object from an AttributeProto message. Also converts values to other types if specified. + * @param message AttributeProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.AttributeProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this AttributeProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for AttributeProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace AttributeProto { + + /** AttributeType enum. */ + enum AttributeType { + UNDEFINED = 0, + FLOAT = 1, + INT = 2, + STRING = 3, + TENSOR = 4, + GRAPH = 5, + SPARSE_TENSOR = 11, + TYPE_PROTO = 13, + FLOATS = 6, + INTS = 7, + STRINGS = 8, + TENSORS = 9, + GRAPHS = 10, + SPARSE_TENSORS = 12, + TYPE_PROTOS = 14 + } + } + + /** Properties of a ValueInfoProto. */ + interface IValueInfoProto { + /** ValueInfoProto name */ + name?: (string|null); + + /** ValueInfoProto type */ + type?: (onnx.ITypeProto|null); + + /** ValueInfoProto docString */ + docString?: (string|null); + } + + /** Represents a ValueInfoProto. */ + class ValueInfoProto implements IValueInfoProto { + /** + * Constructs a new ValueInfoProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.IValueInfoProto); + + /** ValueInfoProto name. */ + public name: string; + + /** ValueInfoProto type. */ + public type?: (onnx.ITypeProto|null); + + /** ValueInfoProto docString. */ + public docString: string; + + /** + * Creates a new ValueInfoProto instance using the specified properties. + * @param [properties] Properties to set + * @returns ValueInfoProto instance + */ + public static create(properties?: onnx.IValueInfoProto): onnx.ValueInfoProto; + + /** + * Encodes the specified ValueInfoProto message. Does not implicitly {@link onnx.ValueInfoProto.verify|verify} + * messages. + * @param message ValueInfoProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.IValueInfoProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ValueInfoProto message, length delimited. Does not implicitly {@link + * onnx.ValueInfoProto.verify|verify} messages. + * @param message ValueInfoProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.IValueInfoProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ValueInfoProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ValueInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.ValueInfoProto; + + /** + * Decodes a ValueInfoProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ValueInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.ValueInfoProto; + + /** + * Verifies a ValueInfoProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a ValueInfoProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ValueInfoProto + */ + public static fromObject(object: {[k: string]: any}): onnx.ValueInfoProto; + + /** + * Creates a plain object from a ValueInfoProto message. Also converts values to other types if specified. + * @param message ValueInfoProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.ValueInfoProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this ValueInfoProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for ValueInfoProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a NodeProto. */ + interface INodeProto { + /** NodeProto input */ + input?: (string[]|null); + + /** NodeProto output */ + output?: (string[]|null); + + /** NodeProto name */ + name?: (string|null); + + /** NodeProto opType */ + opType?: (string|null); + + /** NodeProto domain */ + domain?: (string|null); + + /** NodeProto attribute */ + attribute?: (onnx.IAttributeProto[]|null); + + /** NodeProto docString */ + docString?: (string|null); + } + + /** Represents a NodeProto. */ + class NodeProto implements INodeProto { + /** + * Constructs a new NodeProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.INodeProto); + + /** NodeProto input. */ + public input: string[]; + + /** NodeProto output. */ + public output: string[]; + + /** NodeProto name. */ + public name: string; + + /** NodeProto opType. */ + public opType: string; + + /** NodeProto domain. */ + public domain: string; + + /** NodeProto attribute. */ + public attribute: onnx.IAttributeProto[]; + + /** NodeProto docString. */ + public docString: string; + + /** + * Creates a new NodeProto instance using the specified properties. + * @param [properties] Properties to set + * @returns NodeProto instance + */ + public static create(properties?: onnx.INodeProto): onnx.NodeProto; + + /** + * Encodes the specified NodeProto message. Does not implicitly {@link onnx.NodeProto.verify|verify} messages. + * @param message NodeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.INodeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified NodeProto message, length delimited. Does not implicitly {@link + * onnx.NodeProto.verify|verify} messages. + * @param message NodeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.INodeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a NodeProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns NodeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.NodeProto; + + /** + * Decodes a NodeProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns NodeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.NodeProto; + + /** + * Verifies a NodeProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a NodeProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns NodeProto + */ + public static fromObject(object: {[k: string]: any}): onnx.NodeProto; + + /** + * Creates a plain object from a NodeProto message. Also converts values to other types if specified. + * @param message NodeProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.NodeProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this NodeProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for NodeProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a TrainingInfoProto. */ + interface ITrainingInfoProto { + /** TrainingInfoProto initialization */ + initialization?: (onnx.IGraphProto|null); + + /** TrainingInfoProto algorithm */ + algorithm?: (onnx.IGraphProto|null); + + /** TrainingInfoProto initializationBinding */ + initializationBinding?: (onnx.IStringStringEntryProto[]|null); + + /** TrainingInfoProto updateBinding */ + updateBinding?: (onnx.IStringStringEntryProto[]|null); + } + + /** Represents a TrainingInfoProto. */ + class TrainingInfoProto implements ITrainingInfoProto { + /** + * Constructs a new TrainingInfoProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.ITrainingInfoProto); + + /** TrainingInfoProto initialization. */ + public initialization?: (onnx.IGraphProto|null); + + /** TrainingInfoProto algorithm. */ + public algorithm?: (onnx.IGraphProto|null); + + /** TrainingInfoProto initializationBinding. */ + public initializationBinding: onnx.IStringStringEntryProto[]; + + /** TrainingInfoProto updateBinding. */ + public updateBinding: onnx.IStringStringEntryProto[]; + + /** + * Creates a new TrainingInfoProto instance using the specified properties. + * @param [properties] Properties to set + * @returns TrainingInfoProto instance + */ + public static create(properties?: onnx.ITrainingInfoProto): onnx.TrainingInfoProto; + + /** + * Encodes the specified TrainingInfoProto message. Does not implicitly {@link onnx.TrainingInfoProto.verify|verify} + * messages. + * @param message TrainingInfoProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.ITrainingInfoProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TrainingInfoProto message, length delimited. Does not implicitly {@link + * onnx.TrainingInfoProto.verify|verify} messages. + * @param message TrainingInfoProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.ITrainingInfoProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TrainingInfoProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TrainingInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TrainingInfoProto; + + /** + * Decodes a TrainingInfoProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TrainingInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TrainingInfoProto; + + /** + * Verifies a TrainingInfoProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a TrainingInfoProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TrainingInfoProto + */ + public static fromObject(object: {[k: string]: any}): onnx.TrainingInfoProto; + + /** + * Creates a plain object from a TrainingInfoProto message. Also converts values to other types if specified. + * @param message TrainingInfoProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TrainingInfoProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this TrainingInfoProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for TrainingInfoProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ModelProto. */ + interface IModelProto { + /** ModelProto irVersion */ + irVersion?: (number|Long|null); + + /** ModelProto opsetImport */ + opsetImport?: (onnx.IOperatorSetIdProto[]|null); + + /** ModelProto producerName */ + producerName?: (string|null); + + /** ModelProto producerVersion */ + producerVersion?: (string|null); + + /** ModelProto domain */ + domain?: (string|null); + + /** ModelProto modelVersion */ + modelVersion?: (number|Long|null); + + /** ModelProto docString */ + docString?: (string|null); + + /** ModelProto graph */ + graph?: (onnx.IGraphProto|null); + + /** ModelProto metadataProps */ + metadataProps?: (onnx.IStringStringEntryProto[]|null); + + /** ModelProto trainingInfo */ + trainingInfo?: (onnx.ITrainingInfoProto[]|null); + + /** ModelProto functions */ + functions?: (onnx.IFunctionProto[]|null); + } + + /** Represents a ModelProto. */ + class ModelProto implements IModelProto { + /** + * Constructs a new ModelProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.IModelProto); + + /** ModelProto irVersion. */ + public irVersion: (number|Long); + + /** ModelProto opsetImport. */ + public opsetImport: onnx.IOperatorSetIdProto[]; + + /** ModelProto producerName. */ + public producerName: string; + + /** ModelProto producerVersion. */ + public producerVersion: string; + + /** ModelProto domain. */ + public domain: string; + + /** ModelProto modelVersion. */ + public modelVersion: (number|Long); + + /** ModelProto docString. */ + public docString: string; + + /** ModelProto graph. */ + public graph?: (onnx.IGraphProto|null); + + /** ModelProto metadataProps. */ + public metadataProps: onnx.IStringStringEntryProto[]; + + /** ModelProto trainingInfo. */ + public trainingInfo: onnx.ITrainingInfoProto[]; + + /** ModelProto functions. */ + public functions: onnx.IFunctionProto[]; + + /** + * Creates a new ModelProto instance using the specified properties. + * @param [properties] Properties to set + * @returns ModelProto instance + */ + public static create(properties?: onnx.IModelProto): onnx.ModelProto; + + /** + * Encodes the specified ModelProto message. Does not implicitly {@link onnx.ModelProto.verify|verify} messages. + * @param message ModelProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.IModelProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ModelProto message, length delimited. Does not implicitly {@link + * onnx.ModelProto.verify|verify} messages. + * @param message ModelProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.IModelProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ModelProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ModelProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.ModelProto; + + /** + * Decodes a ModelProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ModelProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.ModelProto; + + /** + * Verifies a ModelProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a ModelProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ModelProto + */ + public static fromObject(object: {[k: string]: any}): onnx.ModelProto; + + /** + * Creates a plain object from a ModelProto message. Also converts values to other types if specified. + * @param message ModelProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.ModelProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this ModelProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for ModelProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a StringStringEntryProto. */ + interface IStringStringEntryProto { + /** StringStringEntryProto key */ + key?: (string|null); + + /** StringStringEntryProto value */ + value?: (string|null); + } + + /** Represents a StringStringEntryProto. */ + class StringStringEntryProto implements IStringStringEntryProto { + /** + * Constructs a new StringStringEntryProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.IStringStringEntryProto); + + /** StringStringEntryProto key. */ + public key: string; + + /** StringStringEntryProto value. */ + public value: string; + + /** + * Creates a new StringStringEntryProto instance using the specified properties. + * @param [properties] Properties to set + * @returns StringStringEntryProto instance + */ + public static create(properties?: onnx.IStringStringEntryProto): onnx.StringStringEntryProto; + + /** + * Encodes the specified StringStringEntryProto message. Does not implicitly {@link + * onnx.StringStringEntryProto.verify|verify} messages. + * @param message StringStringEntryProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.IStringStringEntryProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified StringStringEntryProto message, length delimited. Does not implicitly {@link + * onnx.StringStringEntryProto.verify|verify} messages. + * @param message StringStringEntryProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.IStringStringEntryProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a StringStringEntryProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns StringStringEntryProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.StringStringEntryProto; + + /** + * Decodes a StringStringEntryProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns StringStringEntryProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.StringStringEntryProto; + + /** + * Verifies a StringStringEntryProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a StringStringEntryProto message from a plain object. Also converts values to their respective internal + * types. + * @param object Plain object + * @returns StringStringEntryProto + */ + public static fromObject(object: {[k: string]: any}): onnx.StringStringEntryProto; + + /** + * Creates a plain object from a StringStringEntryProto message. Also converts values to other types if specified. + * @param message StringStringEntryProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.StringStringEntryProto, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this StringStringEntryProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for StringStringEntryProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a TensorAnnotation. */ + interface ITensorAnnotation { + /** TensorAnnotation tensorName */ + tensorName?: (string|null); + + /** TensorAnnotation quantParameterTensorNames */ + quantParameterTensorNames?: (onnx.IStringStringEntryProto[]|null); + } + + /** Represents a TensorAnnotation. */ + class TensorAnnotation implements ITensorAnnotation { + /** + * Constructs a new TensorAnnotation. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.ITensorAnnotation); + + /** TensorAnnotation tensorName. */ + public tensorName: string; + + /** TensorAnnotation quantParameterTensorNames. */ + public quantParameterTensorNames: onnx.IStringStringEntryProto[]; + + /** + * Creates a new TensorAnnotation instance using the specified properties. + * @param [properties] Properties to set + * @returns TensorAnnotation instance + */ + public static create(properties?: onnx.ITensorAnnotation): onnx.TensorAnnotation; + + /** + * Encodes the specified TensorAnnotation message. Does not implicitly {@link onnx.TensorAnnotation.verify|verify} + * messages. + * @param message TensorAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.ITensorAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TensorAnnotation message, length delimited. Does not implicitly {@link + * onnx.TensorAnnotation.verify|verify} messages. + * @param message TensorAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.ITensorAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TensorAnnotation message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TensorAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TensorAnnotation; + + /** + * Decodes a TensorAnnotation message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TensorAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TensorAnnotation; + + /** + * Verifies a TensorAnnotation message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a TensorAnnotation message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TensorAnnotation + */ + public static fromObject(object: {[k: string]: any}): onnx.TensorAnnotation; + + /** + * Creates a plain object from a TensorAnnotation message. Also converts values to other types if specified. + * @param message TensorAnnotation + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TensorAnnotation, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this TensorAnnotation to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for TensorAnnotation + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a GraphProto. */ + interface IGraphProto { + /** GraphProto node */ + node?: (onnx.INodeProto[]|null); + + /** GraphProto name */ + name?: (string|null); + + /** GraphProto initializer */ + initializer?: (onnx.ITensorProto[]|null); + + /** GraphProto sparseInitializer */ + sparseInitializer?: (onnx.ISparseTensorProto[]|null); + + /** GraphProto docString */ + docString?: (string|null); + + /** GraphProto input */ + input?: (onnx.IValueInfoProto[]|null); + + /** GraphProto output */ + output?: (onnx.IValueInfoProto[]|null); + + /** GraphProto valueInfo */ + valueInfo?: (onnx.IValueInfoProto[]|null); + + /** GraphProto quantizationAnnotation */ + quantizationAnnotation?: (onnx.ITensorAnnotation[]|null); + } + + /** Represents a GraphProto. */ + class GraphProto implements IGraphProto { + /** + * Constructs a new GraphProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.IGraphProto); + + /** GraphProto node. */ + public node: onnx.INodeProto[]; + + /** GraphProto name. */ + public name: string; + + /** GraphProto initializer. */ + public initializer: onnx.ITensorProto[]; + + /** GraphProto sparseInitializer. */ + public sparseInitializer: onnx.ISparseTensorProto[]; + + /** GraphProto docString. */ + public docString: string; + + /** GraphProto input. */ + public input: onnx.IValueInfoProto[]; + + /** GraphProto output. */ + public output: onnx.IValueInfoProto[]; + + /** GraphProto valueInfo. */ + public valueInfo: onnx.IValueInfoProto[]; + + /** GraphProto quantizationAnnotation. */ + public quantizationAnnotation: onnx.ITensorAnnotation[]; + + /** + * Creates a new GraphProto instance using the specified properties. + * @param [properties] Properties to set + * @returns GraphProto instance + */ + public static create(properties?: onnx.IGraphProto): onnx.GraphProto; + + /** + * Encodes the specified GraphProto message. Does not implicitly {@link onnx.GraphProto.verify|verify} messages. + * @param message GraphProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.IGraphProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GraphProto message, length delimited. Does not implicitly {@link + * onnx.GraphProto.verify|verify} messages. + * @param message GraphProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.IGraphProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GraphProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GraphProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.GraphProto; + + /** + * Decodes a GraphProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GraphProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.GraphProto; + + /** + * Verifies a GraphProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a GraphProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GraphProto + */ + public static fromObject(object: {[k: string]: any}): onnx.GraphProto; + + /** + * Creates a plain object from a GraphProto message. Also converts values to other types if specified. + * @param message GraphProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.GraphProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this GraphProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for GraphProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a TensorProto. */ + interface ITensorProto { + /** TensorProto dims */ + dims?: ((number | Long)[]|null); + + /** TensorProto dataType */ + dataType?: (number|null); + + /** TensorProto segment */ + segment?: (onnx.TensorProto.ISegment|null); + + /** TensorProto floatData */ + floatData?: (number[]|null); + + /** TensorProto int32Data */ + int32Data?: (number[]|null); + + /** TensorProto stringData */ + stringData?: (Uint8Array[]|null); + + /** TensorProto int64Data */ + int64Data?: ((number | Long)[]|null); + + /** TensorProto name */ + name?: (string|null); + + /** TensorProto docString */ + docString?: (string|null); + + /** TensorProto rawData */ + rawData?: (Uint8Array|null); + + /** TensorProto externalData */ + externalData?: (onnx.IStringStringEntryProto[]|null); + + /** TensorProto dataLocation */ + dataLocation?: (onnx.TensorProto.DataLocation|null); + + /** TensorProto doubleData */ + doubleData?: (number[]|null); + + /** TensorProto uint64Data */ + uint64Data?: ((number | Long)[]|null); + } + + /** Represents a TensorProto. */ + class TensorProto implements ITensorProto { + /** + * Constructs a new TensorProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.ITensorProto); + + /** TensorProto dims. */ + public dims: (number|Long)[]; + + /** TensorProto dataType. */ + public dataType: number; + + /** TensorProto segment. */ + public segment?: (onnx.TensorProto.ISegment|null); + + /** TensorProto floatData. */ + public floatData: number[]; + + /** TensorProto int32Data. */ + public int32Data: number[]; + + /** TensorProto stringData. */ + public stringData: Uint8Array[]; + + /** TensorProto int64Data. */ + public int64Data: (number|Long)[]; + + /** TensorProto name. */ + public name: string; + + /** TensorProto docString. */ + public docString: string; + + /** TensorProto rawData. */ + public rawData: Uint8Array; + + /** TensorProto externalData. */ + public externalData: onnx.IStringStringEntryProto[]; + + /** TensorProto dataLocation. */ + public dataLocation: onnx.TensorProto.DataLocation; + + /** TensorProto doubleData. */ + public doubleData: number[]; + + /** TensorProto uint64Data. */ + public uint64Data: (number|Long)[]; + + /** + * Creates a new TensorProto instance using the specified properties. + * @param [properties] Properties to set + * @returns TensorProto instance + */ + public static create(properties?: onnx.ITensorProto): onnx.TensorProto; + + /** + * Encodes the specified TensorProto message. Does not implicitly {@link onnx.TensorProto.verify|verify} messages. + * @param message TensorProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.ITensorProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TensorProto message, length delimited. Does not implicitly {@link + * onnx.TensorProto.verify|verify} messages. + * @param message TensorProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.ITensorProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TensorProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TensorProto; + + /** + * Decodes a TensorProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TensorProto; + + /** + * Verifies a TensorProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a TensorProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TensorProto + */ + public static fromObject(object: {[k: string]: any}): onnx.TensorProto; + + /** + * Creates a plain object from a TensorProto message. Also converts values to other types if specified. + * @param message TensorProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TensorProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this TensorProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for TensorProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace TensorProto { + + /** DataType enum. */ + enum DataType { + UNDEFINED = 0, + FLOAT = 1, + UINT8 = 2, + INT8 = 3, + UINT16 = 4, + INT16 = 5, + INT32 = 6, + INT64 = 7, + STRING = 8, + BOOL = 9, + FLOAT16 = 10, + DOUBLE = 11, + UINT32 = 12, + UINT64 = 13, + COMPLEX64 = 14, + COMPLEX128 = 15, + BFLOAT16 = 16, + FLOAT8E4M3FN = 17, + FLOAT8E4M3FNUZ = 18, + FLOAT8E5M2 = 19, + FLOAT8E5M2FNUZ = 20 + } + + /** Properties of a Segment. */ + interface ISegment { + /** Segment begin */ + begin?: (number|Long|null); + + /** Segment end */ + end?: (number|Long|null); + } + + /** Represents a Segment. */ + class Segment implements ISegment { + /** + * Constructs a new Segment. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.TensorProto.ISegment); + + /** Segment begin. */ + public begin: (number|Long); + + /** Segment end. */ + public end: (number|Long); + + /** + * Creates a new Segment instance using the specified properties. + * @param [properties] Properties to set + * @returns Segment instance + */ + public static create(properties?: onnx.TensorProto.ISegment): onnx.TensorProto.Segment; + + /** + * Encodes the specified Segment message. Does not implicitly {@link onnx.TensorProto.Segment.verify|verify} + * messages. + * @param message Segment message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.TensorProto.ISegment, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Segment message, length delimited. Does not implicitly {@link + * onnx.TensorProto.Segment.verify|verify} messages. + * @param message Segment message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.TensorProto.ISegment, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Segment message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Segment + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TensorProto.Segment; + + /** + * Decodes a Segment message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Segment + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TensorProto.Segment; + + /** + * Verifies a Segment message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a Segment message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Segment + */ + public static fromObject(object: {[k: string]: any}): onnx.TensorProto.Segment; + + /** + * Creates a plain object from a Segment message. Also converts values to other types if specified. + * @param message Segment + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TensorProto.Segment, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this Segment to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for Segment + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** DataLocation enum. */ + enum DataLocation { DEFAULT = 0, EXTERNAL = 1 } + } + + /** Properties of a SparseTensorProto. */ + interface ISparseTensorProto { + /** SparseTensorProto values */ + values?: (onnx.ITensorProto|null); + + /** SparseTensorProto indices */ + indices?: (onnx.ITensorProto|null); + + /** SparseTensorProto dims */ + dims?: ((number | Long)[]|null); + } + + /** Represents a SparseTensorProto. */ + class SparseTensorProto implements ISparseTensorProto { + /** + * Constructs a new SparseTensorProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.ISparseTensorProto); + + /** SparseTensorProto values. */ + public values?: (onnx.ITensorProto|null); + + /** SparseTensorProto indices. */ + public indices?: (onnx.ITensorProto|null); + + /** SparseTensorProto dims. */ + public dims: (number|Long)[]; + + /** + * Creates a new SparseTensorProto instance using the specified properties. + * @param [properties] Properties to set + * @returns SparseTensorProto instance + */ + public static create(properties?: onnx.ISparseTensorProto): onnx.SparseTensorProto; + + /** + * Encodes the specified SparseTensorProto message. Does not implicitly {@link onnx.SparseTensorProto.verify|verify} + * messages. + * @param message SparseTensorProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.ISparseTensorProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified SparseTensorProto message, length delimited. Does not implicitly {@link + * onnx.SparseTensorProto.verify|verify} messages. + * @param message SparseTensorProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.ISparseTensorProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a SparseTensorProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SparseTensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.SparseTensorProto; + + /** + * Decodes a SparseTensorProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SparseTensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.SparseTensorProto; + + /** + * Verifies a SparseTensorProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a SparseTensorProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SparseTensorProto + */ + public static fromObject(object: {[k: string]: any}): onnx.SparseTensorProto; + + /** + * Creates a plain object from a SparseTensorProto message. Also converts values to other types if specified. + * @param message SparseTensorProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.SparseTensorProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this SparseTensorProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for SparseTensorProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a TensorShapeProto. */ + interface ITensorShapeProto { + /** TensorShapeProto dim */ + dim?: (onnx.TensorShapeProto.IDimension[]|null); + } + + /** Represents a TensorShapeProto. */ + class TensorShapeProto implements ITensorShapeProto { + /** + * Constructs a new TensorShapeProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.ITensorShapeProto); + + /** TensorShapeProto dim. */ + public dim: onnx.TensorShapeProto.IDimension[]; + + /** + * Creates a new TensorShapeProto instance using the specified properties. + * @param [properties] Properties to set + * @returns TensorShapeProto instance + */ + public static create(properties?: onnx.ITensorShapeProto): onnx.TensorShapeProto; + + /** + * Encodes the specified TensorShapeProto message. Does not implicitly {@link onnx.TensorShapeProto.verify|verify} + * messages. + * @param message TensorShapeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.ITensorShapeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TensorShapeProto message, length delimited. Does not implicitly {@link + * onnx.TensorShapeProto.verify|verify} messages. + * @param message TensorShapeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.ITensorShapeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TensorShapeProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TensorShapeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TensorShapeProto; + + /** + * Decodes a TensorShapeProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TensorShapeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TensorShapeProto; + + /** + * Verifies a TensorShapeProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a TensorShapeProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TensorShapeProto + */ + public static fromObject(object: {[k: string]: any}): onnx.TensorShapeProto; + + /** + * Creates a plain object from a TensorShapeProto message. Also converts values to other types if specified. + * @param message TensorShapeProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TensorShapeProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this TensorShapeProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for TensorShapeProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace TensorShapeProto { + + /** Properties of a Dimension. */ + interface IDimension { + /** Dimension dimValue */ + dimValue?: (number|Long|null); + + /** Dimension dimParam */ + dimParam?: (string|null); + + /** Dimension denotation */ + denotation?: (string|null); + } + + /** Represents a Dimension. */ + class Dimension implements IDimension { + /** + * Constructs a new Dimension. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.TensorShapeProto.IDimension); + + /** Dimension dimValue. */ + public dimValue?: (number|Long|null); + + /** Dimension dimParam. */ + public dimParam?: (string|null); + + /** Dimension denotation. */ + public denotation: string; + + /** Dimension value. */ + public value?: ('dimValue'|'dimParam'); + + /** + * Creates a new Dimension instance using the specified properties. + * @param [properties] Properties to set + * @returns Dimension instance + */ + public static create(properties?: onnx.TensorShapeProto.IDimension): onnx.TensorShapeProto.Dimension; + + /** + * Encodes the specified Dimension message. Does not implicitly {@link + * onnx.TensorShapeProto.Dimension.verify|verify} messages. + * @param message Dimension message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.TensorShapeProto.IDimension, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Dimension message, length delimited. Does not implicitly {@link + * onnx.TensorShapeProto.Dimension.verify|verify} messages. + * @param message Dimension message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.TensorShapeProto.IDimension, writer?: $protobuf.Writer): + $protobuf.Writer; + + /** + * Decodes a Dimension message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Dimension + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TensorShapeProto.Dimension; + + /** + * Decodes a Dimension message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Dimension + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TensorShapeProto.Dimension; + + /** + * Verifies a Dimension message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a Dimension message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Dimension + */ + public static fromObject(object: {[k: string]: any}): onnx.TensorShapeProto.Dimension; + + /** + * Creates a plain object from a Dimension message. Also converts values to other types if specified. + * @param message Dimension + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TensorShapeProto.Dimension, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this Dimension to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for Dimension + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + + /** Properties of a TypeProto. */ + interface ITypeProto { + /** TypeProto tensorType */ + tensorType?: (onnx.TypeProto.ITensor|null); + + /** TypeProto sequenceType */ + sequenceType?: (onnx.TypeProto.ISequence|null); + + /** TypeProto mapType */ + mapType?: (onnx.TypeProto.IMap|null); + + /** TypeProto optionalType */ + optionalType?: (onnx.TypeProto.IOptional|null); + + /** TypeProto sparseTensorType */ + sparseTensorType?: (onnx.TypeProto.ISparseTensor|null); + + /** TypeProto denotation */ + denotation?: (string|null); + } + + /** Represents a TypeProto. */ + class TypeProto implements ITypeProto { + /** + * Constructs a new TypeProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.ITypeProto); + + /** TypeProto tensorType. */ + public tensorType?: (onnx.TypeProto.ITensor|null); + + /** TypeProto sequenceType. */ + public sequenceType?: (onnx.TypeProto.ISequence|null); + + /** TypeProto mapType. */ + public mapType?: (onnx.TypeProto.IMap|null); + + /** TypeProto optionalType. */ + public optionalType?: (onnx.TypeProto.IOptional|null); + + /** TypeProto sparseTensorType. */ + public sparseTensorType?: (onnx.TypeProto.ISparseTensor|null); + + /** TypeProto denotation. */ + public denotation: string; + + /** TypeProto value. */ + public value?: ('tensorType'|'sequenceType'|'mapType'|'optionalType'|'sparseTensorType'); + + /** + * Creates a new TypeProto instance using the specified properties. + * @param [properties] Properties to set + * @returns TypeProto instance + */ + public static create(properties?: onnx.ITypeProto): onnx.TypeProto; + + /** + * Encodes the specified TypeProto message. Does not implicitly {@link onnx.TypeProto.verify|verify} messages. + * @param message TypeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.ITypeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TypeProto message, length delimited. Does not implicitly {@link + * onnx.TypeProto.verify|verify} messages. + * @param message TypeProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.ITypeProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TypeProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TypeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TypeProto; + + /** + * Decodes a TypeProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TypeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TypeProto; + + /** + * Verifies a TypeProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a TypeProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TypeProto + */ + public static fromObject(object: {[k: string]: any}): onnx.TypeProto; + + /** + * Creates a plain object from a TypeProto message. Also converts values to other types if specified. + * @param message TypeProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TypeProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this TypeProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for TypeProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace TypeProto { + + /** Properties of a Tensor. */ + interface ITensor { + /** Tensor elemType */ + elemType?: (number|null); + + /** Tensor shape */ + shape?: (onnx.ITensorShapeProto|null); + } + + /** Represents a Tensor. */ + class Tensor implements ITensor { + /** + * Constructs a new Tensor. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.TypeProto.ITensor); + + /** Tensor elemType. */ + public elemType: number; + + /** Tensor shape. */ + public shape?: (onnx.ITensorShapeProto|null); + + /** + * Creates a new Tensor instance using the specified properties. + * @param [properties] Properties to set + * @returns Tensor instance + */ + public static create(properties?: onnx.TypeProto.ITensor): onnx.TypeProto.Tensor; + + /** + * Encodes the specified Tensor message. Does not implicitly {@link onnx.TypeProto.Tensor.verify|verify} messages. + * @param message Tensor message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.TypeProto.ITensor, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Tensor message, length delimited. Does not implicitly {@link + * onnx.TypeProto.Tensor.verify|verify} messages. + * @param message Tensor message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.TypeProto.ITensor, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Tensor message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Tensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TypeProto.Tensor; + + /** + * Decodes a Tensor message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Tensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TypeProto.Tensor; + + /** + * Verifies a Tensor message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a Tensor message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Tensor + */ + public static fromObject(object: {[k: string]: any}): onnx.TypeProto.Tensor; + + /** + * Creates a plain object from a Tensor message. Also converts values to other types if specified. + * @param message Tensor + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TypeProto.Tensor, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this Tensor to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for Tensor + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a Sequence. */ + interface ISequence { + /** Sequence elemType */ + elemType?: (onnx.ITypeProto|null); + } + + /** Represents a Sequence. */ + class Sequence implements ISequence { + /** + * Constructs a new Sequence. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.TypeProto.ISequence); + + /** Sequence elemType. */ + public elemType?: (onnx.ITypeProto|null); + + /** + * Creates a new Sequence instance using the specified properties. + * @param [properties] Properties to set + * @returns Sequence instance + */ + public static create(properties?: onnx.TypeProto.ISequence): onnx.TypeProto.Sequence; + + /** + * Encodes the specified Sequence message. Does not implicitly {@link onnx.TypeProto.Sequence.verify|verify} + * messages. + * @param message Sequence message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.TypeProto.ISequence, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Sequence message, length delimited. Does not implicitly {@link + * onnx.TypeProto.Sequence.verify|verify} messages. + * @param message Sequence message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.TypeProto.ISequence, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Sequence message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Sequence + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TypeProto.Sequence; + + /** + * Decodes a Sequence message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Sequence + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TypeProto.Sequence; + + /** + * Verifies a Sequence message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a Sequence message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Sequence + */ + public static fromObject(object: {[k: string]: any}): onnx.TypeProto.Sequence; + + /** + * Creates a plain object from a Sequence message. Also converts values to other types if specified. + * @param message Sequence + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TypeProto.Sequence, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this Sequence to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for Sequence + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a Map. */ + interface IMap { + /** Map keyType */ + keyType?: (number|null); + + /** Map valueType */ + valueType?: (onnx.ITypeProto|null); + } + + /** Represents a Map. */ + class Map implements IMap { + /** + * Constructs a new Map. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.TypeProto.IMap); + + /** Map keyType. */ + public keyType: number; + + /** Map valueType. */ + public valueType?: (onnx.ITypeProto|null); + + /** + * Creates a new Map instance using the specified properties. + * @param [properties] Properties to set + * @returns Map instance + */ + public static create(properties?: onnx.TypeProto.IMap): onnx.TypeProto.Map; + + /** + * Encodes the specified Map message. Does not implicitly {@link onnx.TypeProto.Map.verify|verify} messages. + * @param message Map message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.TypeProto.IMap, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Map message, length delimited. Does not implicitly {@link + * onnx.TypeProto.Map.verify|verify} messages. + * @param message Map message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.TypeProto.IMap, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Map message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Map + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TypeProto.Map; + + /** + * Decodes a Map message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Map + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TypeProto.Map; + + /** + * Verifies a Map message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a Map message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Map + */ + public static fromObject(object: {[k: string]: any}): onnx.TypeProto.Map; + + /** + * Creates a plain object from a Map message. Also converts values to other types if specified. + * @param message Map + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TypeProto.Map, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this Map to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for Map + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an Optional. */ + interface IOptional { + /** Optional elemType */ + elemType?: (onnx.ITypeProto|null); + } + + /** Represents an Optional. */ + class Optional implements IOptional { + /** + * Constructs a new Optional. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.TypeProto.IOptional); + + /** Optional elemType. */ + public elemType?: (onnx.ITypeProto|null); + + /** + * Creates a new Optional instance using the specified properties. + * @param [properties] Properties to set + * @returns Optional instance + */ + public static create(properties?: onnx.TypeProto.IOptional): onnx.TypeProto.Optional; + + /** + * Encodes the specified Optional message. Does not implicitly {@link onnx.TypeProto.Optional.verify|verify} + * messages. + * @param message Optional message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.TypeProto.IOptional, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Optional message, length delimited. Does not implicitly {@link + * onnx.TypeProto.Optional.verify|verify} messages. + * @param message Optional message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.TypeProto.IOptional, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an Optional message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Optional + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TypeProto.Optional; + + /** + * Decodes an Optional message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Optional + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TypeProto.Optional; + + /** + * Verifies an Optional message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates an Optional message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Optional + */ + public static fromObject(object: {[k: string]: any}): onnx.TypeProto.Optional; + + /** + * Creates a plain object from an Optional message. Also converts values to other types if specified. + * @param message Optional + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TypeProto.Optional, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this Optional to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for Optional + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a SparseTensor. */ + interface ISparseTensor { + /** SparseTensor elemType */ + elemType?: (number|null); + + /** SparseTensor shape */ + shape?: (onnx.ITensorShapeProto|null); + } + + /** Represents a SparseTensor. */ + class SparseTensor implements ISparseTensor { + /** + * Constructs a new SparseTensor. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.TypeProto.ISparseTensor); + + /** SparseTensor elemType. */ + public elemType: number; + + /** SparseTensor shape. */ + public shape?: (onnx.ITensorShapeProto|null); + + /** + * Creates a new SparseTensor instance using the specified properties. + * @param [properties] Properties to set + * @returns SparseTensor instance + */ + public static create(properties?: onnx.TypeProto.ISparseTensor): onnx.TypeProto.SparseTensor; + + /** + * Encodes the specified SparseTensor message. Does not implicitly {@link + * onnx.TypeProto.SparseTensor.verify|verify} messages. + * @param message SparseTensor message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.TypeProto.ISparseTensor, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified SparseTensor message, length delimited. Does not implicitly {@link + * onnx.TypeProto.SparseTensor.verify|verify} messages. + * @param message SparseTensor message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.TypeProto.ISparseTensor, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a SparseTensor message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SparseTensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.TypeProto.SparseTensor; + + /** + * Decodes a SparseTensor message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SparseTensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.TypeProto.SparseTensor; + + /** + * Verifies a SparseTensor message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a SparseTensor message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SparseTensor + */ + public static fromObject(object: {[k: string]: any}): onnx.TypeProto.SparseTensor; + + /** + * Creates a plain object from a SparseTensor message. Also converts values to other types if specified. + * @param message SparseTensor + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.TypeProto.SparseTensor, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this SparseTensor to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for SparseTensor + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + + /** Properties of an OperatorSetIdProto. */ + interface IOperatorSetIdProto { + /** OperatorSetIdProto domain */ + domain?: (string|null); + + /** OperatorSetIdProto version */ + version?: (number|Long|null); + } + + /** Represents an OperatorSetIdProto. */ + class OperatorSetIdProto implements IOperatorSetIdProto { + /** + * Constructs a new OperatorSetIdProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.IOperatorSetIdProto); + + /** OperatorSetIdProto domain. */ + public domain: string; + + /** OperatorSetIdProto version. */ + public version: (number|Long); + + /** + * Creates a new OperatorSetIdProto instance using the specified properties. + * @param [properties] Properties to set + * @returns OperatorSetIdProto instance + */ + public static create(properties?: onnx.IOperatorSetIdProto): onnx.OperatorSetIdProto; + + /** + * Encodes the specified OperatorSetIdProto message. Does not implicitly {@link + * onnx.OperatorSetIdProto.verify|verify} messages. + * @param message OperatorSetIdProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.IOperatorSetIdProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified OperatorSetIdProto message, length delimited. Does not implicitly {@link + * onnx.OperatorSetIdProto.verify|verify} messages. + * @param message OperatorSetIdProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.IOperatorSetIdProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an OperatorSetIdProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns OperatorSetIdProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.OperatorSetIdProto; + + /** + * Decodes an OperatorSetIdProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns OperatorSetIdProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.OperatorSetIdProto; + + /** + * Verifies an OperatorSetIdProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates an OperatorSetIdProto message from a plain object. Also converts values to their respective internal + * types. + * @param object Plain object + * @returns OperatorSetIdProto + */ + public static fromObject(object: {[k: string]: any}): onnx.OperatorSetIdProto; + + /** + * Creates a plain object from an OperatorSetIdProto message. Also converts values to other types if specified. + * @param message OperatorSetIdProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.OperatorSetIdProto, options?: $protobuf.IConversionOptions): + {[k: string]: any}; + + /** + * Converts this OperatorSetIdProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for OperatorSetIdProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** OperatorStatus enum. */ + enum OperatorStatus { EXPERIMENTAL = 0, STABLE = 1 } + + /** Properties of a FunctionProto. */ + interface IFunctionProto { + /** FunctionProto name */ + name?: (string|null); + + /** FunctionProto input */ + input?: (string[]|null); + + /** FunctionProto output */ + output?: (string[]|null); + + /** FunctionProto attribute */ + attribute?: (string[]|null); + + /** FunctionProto attributeProto */ + attributeProto?: (onnx.IAttributeProto[]|null); + + /** FunctionProto node */ + node?: (onnx.INodeProto[]|null); + + /** FunctionProto docString */ + docString?: (string|null); + + /** FunctionProto opsetImport */ + opsetImport?: (onnx.IOperatorSetIdProto[]|null); + + /** FunctionProto domain */ + domain?: (string|null); + } + + /** Represents a FunctionProto. */ + class FunctionProto implements IFunctionProto { + /** + * Constructs a new FunctionProto. + * @param [properties] Properties to set + */ + constructor(properties?: onnx.IFunctionProto); + + /** FunctionProto name. */ + public name: string; + + /** FunctionProto input. */ + public input: string[]; + + /** FunctionProto output. */ + public output: string[]; + + /** FunctionProto attribute. */ + public attribute: string[]; + + /** FunctionProto attributeProto. */ + public attributeProto: onnx.IAttributeProto[]; + + /** FunctionProto node. */ + public node: onnx.INodeProto[]; + + /** FunctionProto docString. */ + public docString: string; + + /** FunctionProto opsetImport. */ + public opsetImport: onnx.IOperatorSetIdProto[]; + + /** FunctionProto domain. */ + public domain: string; + + /** + * Creates a new FunctionProto instance using the specified properties. + * @param [properties] Properties to set + * @returns FunctionProto instance + */ + public static create(properties?: onnx.IFunctionProto): onnx.FunctionProto; + + /** + * Encodes the specified FunctionProto message. Does not implicitly {@link onnx.FunctionProto.verify|verify} + * messages. + * @param message FunctionProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: onnx.IFunctionProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified FunctionProto message, length delimited. Does not implicitly {@link + * onnx.FunctionProto.verify|verify} messages. + * @param message FunctionProto message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: onnx.IFunctionProto, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a FunctionProto message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns FunctionProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): onnx.FunctionProto; + + /** + * Decodes a FunctionProto message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns FunctionProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): onnx.FunctionProto; + + /** + * Verifies a FunctionProto message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: {[k: string]: any}): (string|null); + + /** + * Creates a FunctionProto message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns FunctionProto + */ + public static fromObject(object: {[k: string]: any}): onnx.FunctionProto; + + /** + * Creates a plain object from a FunctionProto message. Also converts values to other types if specified. + * @param message FunctionProto + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: onnx.FunctionProto, options?: $protobuf.IConversionOptions): {[k: string]: any}; + + /** + * Converts this FunctionProto to JSON. + * @returns JSON object + */ + public toJSON(): {[k: string]: any}; + + /** + * Gets the default type url for FunctionProto + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } +} diff --git a/js/node/test/ort-schema/protobuf/onnx.js b/js/node/test/ort-schema/protobuf/onnx.js new file mode 100644 index 0000000000000..681855132d4e8 --- /dev/null +++ b/js/node/test/ort-schema/protobuf/onnx.js @@ -0,0 +1,7658 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +"use strict"; + +var $protobuf = require("protobufjs/minimal"); + +// Common aliases +var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; + +// Exported root namespace +var $root = $protobuf.roots["default"] || ($protobuf.roots["default"] = {}); + +$root.onnx = (function() { + + /** + * Namespace onnx. + * @exports onnx + * @namespace + */ + var onnx = {}; + + /** + * Version enum. + * @name onnx.Version + * @enum {number} + * @property {number} _START_VERSION=0 _START_VERSION value + * @property {number} IR_VERSION_2017_10_10=1 IR_VERSION_2017_10_10 value + * @property {number} IR_VERSION_2017_10_30=2 IR_VERSION_2017_10_30 value + * @property {number} IR_VERSION_2017_11_3=3 IR_VERSION_2017_11_3 value + * @property {number} IR_VERSION_2019_1_22=4 IR_VERSION_2019_1_22 value + * @property {number} IR_VERSION_2019_3_18=5 IR_VERSION_2019_3_18 value + * @property {number} IR_VERSION_2019_9_19=6 IR_VERSION_2019_9_19 value + * @property {number} IR_VERSION_2020_5_8=7 IR_VERSION_2020_5_8 value + * @property {number} IR_VERSION_2021_7_30=8 IR_VERSION_2021_7_30 value + * @property {number} IR_VERSION=9 IR_VERSION value + */ + onnx.Version = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "_START_VERSION"] = 0; + values[valuesById[1] = "IR_VERSION_2017_10_10"] = 1; + values[valuesById[2] = "IR_VERSION_2017_10_30"] = 2; + values[valuesById[3] = "IR_VERSION_2017_11_3"] = 3; + values[valuesById[4] = "IR_VERSION_2019_1_22"] = 4; + values[valuesById[5] = "IR_VERSION_2019_3_18"] = 5; + values[valuesById[6] = "IR_VERSION_2019_9_19"] = 6; + values[valuesById[7] = "IR_VERSION_2020_5_8"] = 7; + values[valuesById[8] = "IR_VERSION_2021_7_30"] = 8; + values[valuesById[9] = "IR_VERSION"] = 9; + return values; + })(); + + onnx.AttributeProto = (function() { + + /** + * Properties of an AttributeProto. + * @memberof onnx + * @interface IAttributeProto + * @property {string|null} [name] AttributeProto name + * @property {string|null} [refAttrName] AttributeProto refAttrName + * @property {string|null} [docString] AttributeProto docString + * @property {onnx.AttributeProto.AttributeType|null} [type] AttributeProto type + * @property {number|null} [f] AttributeProto f + * @property {number|Long|null} [i] AttributeProto i + * @property {Uint8Array|null} [s] AttributeProto s + * @property {onnx.ITensorProto|null} [t] AttributeProto t + * @property {onnx.IGraphProto|null} [g] AttributeProto g + * @property {onnx.ISparseTensorProto|null} [sparseTensor] AttributeProto sparseTensor + * @property {onnx.ITypeProto|null} [tp] AttributeProto tp + * @property {Array.|null} [floats] AttributeProto floats + * @property {Array.|null} [ints] AttributeProto ints + * @property {Array.|null} [strings] AttributeProto strings + * @property {Array.|null} [tensors] AttributeProto tensors + * @property {Array.|null} [graphs] AttributeProto graphs + * @property {Array.|null} [sparseTensors] AttributeProto sparseTensors + * @property {Array.|null} [typeProtos] AttributeProto typeProtos + */ + + /** + * Constructs a new AttributeProto. + * @memberof onnx + * @classdesc Represents an AttributeProto. + * @implements IAttributeProto + * @constructor + * @param {onnx.IAttributeProto=} [properties] Properties to set + */ + function AttributeProto(properties) { + this.floats = []; + this.ints = []; + this.strings = []; + this.tensors = []; + this.graphs = []; + this.sparseTensors = []; + this.typeProtos = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * AttributeProto name. + * @member {string} name + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.name = ""; + + /** + * AttributeProto refAttrName. + * @member {string} refAttrName + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.refAttrName = ""; + + /** + * AttributeProto docString. + * @member {string} docString + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.docString = ""; + + /** + * AttributeProto type. + * @member {onnx.AttributeProto.AttributeType} type + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.type = 0; + + /** + * AttributeProto f. + * @member {number} f + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.f = 0; + + /** + * AttributeProto i. + * @member {number|Long} i + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.i = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * AttributeProto s. + * @member {Uint8Array} s + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.s = $util.newBuffer([]); + + /** + * AttributeProto t. + * @member {onnx.ITensorProto|null|undefined} t + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.t = null; + + /** + * AttributeProto g. + * @member {onnx.IGraphProto|null|undefined} g + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.g = null; + + /** + * AttributeProto sparseTensor. + * @member {onnx.ISparseTensorProto|null|undefined} sparseTensor + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.sparseTensor = null; + + /** + * AttributeProto tp. + * @member {onnx.ITypeProto|null|undefined} tp + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.tp = null; + + /** + * AttributeProto floats. + * @member {Array.} floats + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.floats = $util.emptyArray; + + /** + * AttributeProto ints. + * @member {Array.} ints + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.ints = $util.emptyArray; + + /** + * AttributeProto strings. + * @member {Array.} strings + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.strings = $util.emptyArray; + + /** + * AttributeProto tensors. + * @member {Array.} tensors + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.tensors = $util.emptyArray; + + /** + * AttributeProto graphs. + * @member {Array.} graphs + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.graphs = $util.emptyArray; + + /** + * AttributeProto sparseTensors. + * @member {Array.} sparseTensors + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.sparseTensors = $util.emptyArray; + + /** + * AttributeProto typeProtos. + * @member {Array.} typeProtos + * @memberof onnx.AttributeProto + * @instance + */ + AttributeProto.prototype.typeProtos = $util.emptyArray; + + /** + * Creates a new AttributeProto instance using the specified properties. + * @function create + * @memberof onnx.AttributeProto + * @static + * @param {onnx.IAttributeProto=} [properties] Properties to set + * @returns {onnx.AttributeProto} AttributeProto instance + */ + AttributeProto.create = function create(properties) { + return new AttributeProto(properties); + }; + + /** + * Encodes the specified AttributeProto message. Does not implicitly {@link onnx.AttributeProto.verify|verify} messages. + * @function encode + * @memberof onnx.AttributeProto + * @static + * @param {onnx.IAttributeProto} message AttributeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AttributeProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.f != null && Object.hasOwnProperty.call(message, "f")) + writer.uint32(/* id 2, wireType 5 =*/21).float(message.f); + if (message.i != null && Object.hasOwnProperty.call(message, "i")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.i); + if (message.s != null && Object.hasOwnProperty.call(message, "s")) + writer.uint32(/* id 4, wireType 2 =*/34).bytes(message.s); + if (message.t != null && Object.hasOwnProperty.call(message, "t")) + $root.onnx.TensorProto.encode(message.t, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.g != null && Object.hasOwnProperty.call(message, "g")) + $root.onnx.GraphProto.encode(message.g, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.floats != null && message.floats.length) { + writer.uint32(/* id 7, wireType 2 =*/58).fork(); + for (var i = 0; i < message.floats.length; ++i) + writer.float(message.floats[i]); + writer.ldelim(); + } + if (message.ints != null && message.ints.length) { + writer.uint32(/* id 8, wireType 2 =*/66).fork(); + for (var i = 0; i < message.ints.length; ++i) + writer.int64(message.ints[i]); + writer.ldelim(); + } + if (message.strings != null && message.strings.length) + for (var i = 0; i < message.strings.length; ++i) + writer.uint32(/* id 9, wireType 2 =*/74).bytes(message.strings[i]); + if (message.tensors != null && message.tensors.length) + for (var i = 0; i < message.tensors.length; ++i) + $root.onnx.TensorProto.encode(message.tensors[i], writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); + if (message.graphs != null && message.graphs.length) + for (var i = 0; i < message.graphs.length; ++i) + $root.onnx.GraphProto.encode(message.graphs[i], writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.docString != null && Object.hasOwnProperty.call(message, "docString")) + writer.uint32(/* id 13, wireType 2 =*/106).string(message.docString); + if (message.tp != null && Object.hasOwnProperty.call(message, "tp")) + $root.onnx.TypeProto.encode(message.tp, writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); + if (message.typeProtos != null && message.typeProtos.length) + for (var i = 0; i < message.typeProtos.length; ++i) + $root.onnx.TypeProto.encode(message.typeProtos[i], writer.uint32(/* id 15, wireType 2 =*/122).fork()).ldelim(); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 20, wireType 0 =*/160).int32(message.type); + if (message.refAttrName != null && Object.hasOwnProperty.call(message, "refAttrName")) + writer.uint32(/* id 21, wireType 2 =*/170).string(message.refAttrName); + if (message.sparseTensor != null && Object.hasOwnProperty.call(message, "sparseTensor")) + $root.onnx.SparseTensorProto.encode(message.sparseTensor, writer.uint32(/* id 22, wireType 2 =*/178).fork()).ldelim(); + if (message.sparseTensors != null && message.sparseTensors.length) + for (var i = 0; i < message.sparseTensors.length; ++i) + $root.onnx.SparseTensorProto.encode(message.sparseTensors[i], writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified AttributeProto message, length delimited. Does not implicitly {@link onnx.AttributeProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.AttributeProto + * @static + * @param {onnx.IAttributeProto} message AttributeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AttributeProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an AttributeProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.AttributeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.AttributeProto} AttributeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AttributeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.AttributeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 21: { + message.refAttrName = reader.string(); + break; + } + case 13: { + message.docString = reader.string(); + break; + } + case 20: { + message.type = reader.int32(); + break; + } + case 2: { + message.f = reader.float(); + break; + } + case 3: { + message.i = reader.int64(); + break; + } + case 4: { + message.s = reader.bytes(); + break; + } + case 5: { + message.t = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + } + case 6: { + message.g = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + } + case 22: { + message.sparseTensor = $root.onnx.SparseTensorProto.decode(reader, reader.uint32()); + break; + } + case 14: { + message.tp = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + } + case 7: { + if (!(message.floats && message.floats.length)) + message.floats = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.floats.push(reader.float()); + } else + message.floats.push(reader.float()); + break; + } + case 8: { + if (!(message.ints && message.ints.length)) + message.ints = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.ints.push(reader.int64()); + } else + message.ints.push(reader.int64()); + break; + } + case 9: { + if (!(message.strings && message.strings.length)) + message.strings = []; + message.strings.push(reader.bytes()); + break; + } + case 10: { + if (!(message.tensors && message.tensors.length)) + message.tensors = []; + message.tensors.push($root.onnx.TensorProto.decode(reader, reader.uint32())); + break; + } + case 11: { + if (!(message.graphs && message.graphs.length)) + message.graphs = []; + message.graphs.push($root.onnx.GraphProto.decode(reader, reader.uint32())); + break; + } + case 23: { + if (!(message.sparseTensors && message.sparseTensors.length)) + message.sparseTensors = []; + message.sparseTensors.push($root.onnx.SparseTensorProto.decode(reader, reader.uint32())); + break; + } + case 15: { + if (!(message.typeProtos && message.typeProtos.length)) + message.typeProtos = []; + message.typeProtos.push($root.onnx.TypeProto.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an AttributeProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.AttributeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.AttributeProto} AttributeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AttributeProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an AttributeProto message. + * @function verify + * @memberof onnx.AttributeProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + AttributeProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.refAttrName != null && message.hasOwnProperty("refAttrName")) + if (!$util.isString(message.refAttrName)) + return "refAttrName: string expected"; + if (message.docString != null && message.hasOwnProperty("docString")) + if (!$util.isString(message.docString)) + return "docString: string expected"; + if (message.type != null && message.hasOwnProperty("type")) + switch (message.type) { + default: + return "type: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 11: + case 13: + case 6: + case 7: + case 8: + case 9: + case 10: + case 12: + case 14: + break; + } + if (message.f != null && message.hasOwnProperty("f")) + if (typeof message.f !== "number") + return "f: number expected"; + if (message.i != null && message.hasOwnProperty("i")) + if (!$util.isInteger(message.i) && !(message.i && $util.isInteger(message.i.low) && $util.isInteger(message.i.high))) + return "i: integer|Long expected"; + if (message.s != null && message.hasOwnProperty("s")) + if (!(message.s && typeof message.s.length === "number" || $util.isString(message.s))) + return "s: buffer expected"; + if (message.t != null && message.hasOwnProperty("t")) { + var error = $root.onnx.TensorProto.verify(message.t); + if (error) + return "t." + error; + } + if (message.g != null && message.hasOwnProperty("g")) { + var error = $root.onnx.GraphProto.verify(message.g); + if (error) + return "g." + error; + } + if (message.sparseTensor != null && message.hasOwnProperty("sparseTensor")) { + var error = $root.onnx.SparseTensorProto.verify(message.sparseTensor); + if (error) + return "sparseTensor." + error; + } + if (message.tp != null && message.hasOwnProperty("tp")) { + var error = $root.onnx.TypeProto.verify(message.tp); + if (error) + return "tp." + error; + } + if (message.floats != null && message.hasOwnProperty("floats")) { + if (!Array.isArray(message.floats)) + return "floats: array expected"; + for (var i = 0; i < message.floats.length; ++i) + if (typeof message.floats[i] !== "number") + return "floats: number[] expected"; + } + if (message.ints != null && message.hasOwnProperty("ints")) { + if (!Array.isArray(message.ints)) + return "ints: array expected"; + for (var i = 0; i < message.ints.length; ++i) + if (!$util.isInteger(message.ints[i]) && !(message.ints[i] && $util.isInteger(message.ints[i].low) && $util.isInteger(message.ints[i].high))) + return "ints: integer|Long[] expected"; + } + if (message.strings != null && message.hasOwnProperty("strings")) { + if (!Array.isArray(message.strings)) + return "strings: array expected"; + for (var i = 0; i < message.strings.length; ++i) + if (!(message.strings[i] && typeof message.strings[i].length === "number" || $util.isString(message.strings[i]))) + return "strings: buffer[] expected"; + } + if (message.tensors != null && message.hasOwnProperty("tensors")) { + if (!Array.isArray(message.tensors)) + return "tensors: array expected"; + for (var i = 0; i < message.tensors.length; ++i) { + var error = $root.onnx.TensorProto.verify(message.tensors[i]); + if (error) + return "tensors." + error; + } + } + if (message.graphs != null && message.hasOwnProperty("graphs")) { + if (!Array.isArray(message.graphs)) + return "graphs: array expected"; + for (var i = 0; i < message.graphs.length; ++i) { + var error = $root.onnx.GraphProto.verify(message.graphs[i]); + if (error) + return "graphs." + error; + } + } + if (message.sparseTensors != null && message.hasOwnProperty("sparseTensors")) { + if (!Array.isArray(message.sparseTensors)) + return "sparseTensors: array expected"; + for (var i = 0; i < message.sparseTensors.length; ++i) { + var error = $root.onnx.SparseTensorProto.verify(message.sparseTensors[i]); + if (error) + return "sparseTensors." + error; + } + } + if (message.typeProtos != null && message.hasOwnProperty("typeProtos")) { + if (!Array.isArray(message.typeProtos)) + return "typeProtos: array expected"; + for (var i = 0; i < message.typeProtos.length; ++i) { + var error = $root.onnx.TypeProto.verify(message.typeProtos[i]); + if (error) + return "typeProtos." + error; + } + } + return null; + }; + + /** + * Creates an AttributeProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.AttributeProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.AttributeProto} AttributeProto + */ + AttributeProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.AttributeProto) + return object; + var message = new $root.onnx.AttributeProto(); + if (object.name != null) + message.name = String(object.name); + if (object.refAttrName != null) + message.refAttrName = String(object.refAttrName); + if (object.docString != null) + message.docString = String(object.docString); + switch (object.type) { + default: + if (typeof object.type === "number") { + message.type = object.type; + break; + } + break; + case "UNDEFINED": + case 0: + message.type = 0; + break; + case "FLOAT": + case 1: + message.type = 1; + break; + case "INT": + case 2: + message.type = 2; + break; + case "STRING": + case 3: + message.type = 3; + break; + case "TENSOR": + case 4: + message.type = 4; + break; + case "GRAPH": + case 5: + message.type = 5; + break; + case "SPARSE_TENSOR": + case 11: + message.type = 11; + break; + case "TYPE_PROTO": + case 13: + message.type = 13; + break; + case "FLOATS": + case 6: + message.type = 6; + break; + case "INTS": + case 7: + message.type = 7; + break; + case "STRINGS": + case 8: + message.type = 8; + break; + case "TENSORS": + case 9: + message.type = 9; + break; + case "GRAPHS": + case 10: + message.type = 10; + break; + case "SPARSE_TENSORS": + case 12: + message.type = 12; + break; + case "TYPE_PROTOS": + case 14: + message.type = 14; + break; + } + if (object.f != null) + message.f = Number(object.f); + if (object.i != null) + if ($util.Long) + (message.i = $util.Long.fromValue(object.i)).unsigned = false; + else if (typeof object.i === "string") + message.i = parseInt(object.i, 10); + else if (typeof object.i === "number") + message.i = object.i; + else if (typeof object.i === "object") + message.i = new $util.LongBits(object.i.low >>> 0, object.i.high >>> 0).toNumber(); + if (object.s != null) + if (typeof object.s === "string") + $util.base64.decode(object.s, message.s = $util.newBuffer($util.base64.length(object.s)), 0); + else if (object.s.length >= 0) + message.s = object.s; + if (object.t != null) { + if (typeof object.t !== "object") + throw TypeError(".onnx.AttributeProto.t: object expected"); + message.t = $root.onnx.TensorProto.fromObject(object.t); + } + if (object.g != null) { + if (typeof object.g !== "object") + throw TypeError(".onnx.AttributeProto.g: object expected"); + message.g = $root.onnx.GraphProto.fromObject(object.g); + } + if (object.sparseTensor != null) { + if (typeof object.sparseTensor !== "object") + throw TypeError(".onnx.AttributeProto.sparseTensor: object expected"); + message.sparseTensor = $root.onnx.SparseTensorProto.fromObject(object.sparseTensor); + } + if (object.tp != null) { + if (typeof object.tp !== "object") + throw TypeError(".onnx.AttributeProto.tp: object expected"); + message.tp = $root.onnx.TypeProto.fromObject(object.tp); + } + if (object.floats) { + if (!Array.isArray(object.floats)) + throw TypeError(".onnx.AttributeProto.floats: array expected"); + message.floats = []; + for (var i = 0; i < object.floats.length; ++i) + message.floats[i] = Number(object.floats[i]); + } + if (object.ints) { + if (!Array.isArray(object.ints)) + throw TypeError(".onnx.AttributeProto.ints: array expected"); + message.ints = []; + for (var i = 0; i < object.ints.length; ++i) + if ($util.Long) + (message.ints[i] = $util.Long.fromValue(object.ints[i])).unsigned = false; + else if (typeof object.ints[i] === "string") + message.ints[i] = parseInt(object.ints[i], 10); + else if (typeof object.ints[i] === "number") + message.ints[i] = object.ints[i]; + else if (typeof object.ints[i] === "object") + message.ints[i] = new $util.LongBits(object.ints[i].low >>> 0, object.ints[i].high >>> 0).toNumber(); + } + if (object.strings) { + if (!Array.isArray(object.strings)) + throw TypeError(".onnx.AttributeProto.strings: array expected"); + message.strings = []; + for (var i = 0; i < object.strings.length; ++i) + if (typeof object.strings[i] === "string") + $util.base64.decode(object.strings[i], message.strings[i] = $util.newBuffer($util.base64.length(object.strings[i])), 0); + else if (object.strings[i].length >= 0) + message.strings[i] = object.strings[i]; + } + if (object.tensors) { + if (!Array.isArray(object.tensors)) + throw TypeError(".onnx.AttributeProto.tensors: array expected"); + message.tensors = []; + for (var i = 0; i < object.tensors.length; ++i) { + if (typeof object.tensors[i] !== "object") + throw TypeError(".onnx.AttributeProto.tensors: object expected"); + message.tensors[i] = $root.onnx.TensorProto.fromObject(object.tensors[i]); + } + } + if (object.graphs) { + if (!Array.isArray(object.graphs)) + throw TypeError(".onnx.AttributeProto.graphs: array expected"); + message.graphs = []; + for (var i = 0; i < object.graphs.length; ++i) { + if (typeof object.graphs[i] !== "object") + throw TypeError(".onnx.AttributeProto.graphs: object expected"); + message.graphs[i] = $root.onnx.GraphProto.fromObject(object.graphs[i]); + } + } + if (object.sparseTensors) { + if (!Array.isArray(object.sparseTensors)) + throw TypeError(".onnx.AttributeProto.sparseTensors: array expected"); + message.sparseTensors = []; + for (var i = 0; i < object.sparseTensors.length; ++i) { + if (typeof object.sparseTensors[i] !== "object") + throw TypeError(".onnx.AttributeProto.sparseTensors: object expected"); + message.sparseTensors[i] = $root.onnx.SparseTensorProto.fromObject(object.sparseTensors[i]); + } + } + if (object.typeProtos) { + if (!Array.isArray(object.typeProtos)) + throw TypeError(".onnx.AttributeProto.typeProtos: array expected"); + message.typeProtos = []; + for (var i = 0; i < object.typeProtos.length; ++i) { + if (typeof object.typeProtos[i] !== "object") + throw TypeError(".onnx.AttributeProto.typeProtos: object expected"); + message.typeProtos[i] = $root.onnx.TypeProto.fromObject(object.typeProtos[i]); + } + } + return message; + }; + + /** + * Creates a plain object from an AttributeProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.AttributeProto + * @static + * @param {onnx.AttributeProto} message AttributeProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + AttributeProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.floats = []; + object.ints = []; + object.strings = []; + object.tensors = []; + object.graphs = []; + object.typeProtos = []; + object.sparseTensors = []; + } + if (options.defaults) { + object.name = ""; + object.f = 0; + if ($util.Long) { + var long = new $util.Long(0, 0, false); + object.i = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.i = options.longs === String ? "0" : 0; + if (options.bytes === String) + object.s = ""; + else { + object.s = []; + if (options.bytes !== Array) + object.s = $util.newBuffer(object.s); + } + object.t = null; + object.g = null; + object.docString = ""; + object.tp = null; + object.type = options.enums === String ? "UNDEFINED" : 0; + object.refAttrName = ""; + object.sparseTensor = null; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.f != null && message.hasOwnProperty("f")) + object.f = options.json && !isFinite(message.f) ? String(message.f) : message.f; + if (message.i != null && message.hasOwnProperty("i")) + if (typeof message.i === "number") + object.i = options.longs === String ? String(message.i) : message.i; + else + object.i = options.longs === String ? $util.Long.prototype.toString.call(message.i) : options.longs === Number ? new $util.LongBits(message.i.low >>> 0, message.i.high >>> 0).toNumber() : message.i; + if (message.s != null && message.hasOwnProperty("s")) + object.s = options.bytes === String ? $util.base64.encode(message.s, 0, message.s.length) : options.bytes === Array ? Array.prototype.slice.call(message.s) : message.s; + if (message.t != null && message.hasOwnProperty("t")) + object.t = $root.onnx.TensorProto.toObject(message.t, options); + if (message.g != null && message.hasOwnProperty("g")) + object.g = $root.onnx.GraphProto.toObject(message.g, options); + if (message.floats && message.floats.length) { + object.floats = []; + for (var j = 0; j < message.floats.length; ++j) + object.floats[j] = options.json && !isFinite(message.floats[j]) ? String(message.floats[j]) : message.floats[j]; + } + if (message.ints && message.ints.length) { + object.ints = []; + for (var j = 0; j < message.ints.length; ++j) + if (typeof message.ints[j] === "number") + object.ints[j] = options.longs === String ? String(message.ints[j]) : message.ints[j]; + else + object.ints[j] = options.longs === String ? $util.Long.prototype.toString.call(message.ints[j]) : options.longs === Number ? new $util.LongBits(message.ints[j].low >>> 0, message.ints[j].high >>> 0).toNumber() : message.ints[j]; + } + if (message.strings && message.strings.length) { + object.strings = []; + for (var j = 0; j < message.strings.length; ++j) + object.strings[j] = options.bytes === String ? $util.base64.encode(message.strings[j], 0, message.strings[j].length) : options.bytes === Array ? Array.prototype.slice.call(message.strings[j]) : message.strings[j]; + } + if (message.tensors && message.tensors.length) { + object.tensors = []; + for (var j = 0; j < message.tensors.length; ++j) + object.tensors[j] = $root.onnx.TensorProto.toObject(message.tensors[j], options); + } + if (message.graphs && message.graphs.length) { + object.graphs = []; + for (var j = 0; j < message.graphs.length; ++j) + object.graphs[j] = $root.onnx.GraphProto.toObject(message.graphs[j], options); + } + if (message.docString != null && message.hasOwnProperty("docString")) + object.docString = message.docString; + if (message.tp != null && message.hasOwnProperty("tp")) + object.tp = $root.onnx.TypeProto.toObject(message.tp, options); + if (message.typeProtos && message.typeProtos.length) { + object.typeProtos = []; + for (var j = 0; j < message.typeProtos.length; ++j) + object.typeProtos[j] = $root.onnx.TypeProto.toObject(message.typeProtos[j], options); + } + if (message.type != null && message.hasOwnProperty("type")) + object.type = options.enums === String ? $root.onnx.AttributeProto.AttributeType[message.type] === undefined ? message.type : $root.onnx.AttributeProto.AttributeType[message.type] : message.type; + if (message.refAttrName != null && message.hasOwnProperty("refAttrName")) + object.refAttrName = message.refAttrName; + if (message.sparseTensor != null && message.hasOwnProperty("sparseTensor")) + object.sparseTensor = $root.onnx.SparseTensorProto.toObject(message.sparseTensor, options); + if (message.sparseTensors && message.sparseTensors.length) { + object.sparseTensors = []; + for (var j = 0; j < message.sparseTensors.length; ++j) + object.sparseTensors[j] = $root.onnx.SparseTensorProto.toObject(message.sparseTensors[j], options); + } + return object; + }; + + /** + * Converts this AttributeProto to JSON. + * @function toJSON + * @memberof onnx.AttributeProto + * @instance + * @returns {Object.} JSON object + */ + AttributeProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for AttributeProto + * @function getTypeUrl + * @memberof onnx.AttributeProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + AttributeProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.AttributeProto"; + }; + + /** + * AttributeType enum. + * @name onnx.AttributeProto.AttributeType + * @enum {number} + * @property {number} UNDEFINED=0 UNDEFINED value + * @property {number} FLOAT=1 FLOAT value + * @property {number} INT=2 INT value + * @property {number} STRING=3 STRING value + * @property {number} TENSOR=4 TENSOR value + * @property {number} GRAPH=5 GRAPH value + * @property {number} SPARSE_TENSOR=11 SPARSE_TENSOR value + * @property {number} TYPE_PROTO=13 TYPE_PROTO value + * @property {number} FLOATS=6 FLOATS value + * @property {number} INTS=7 INTS value + * @property {number} STRINGS=8 STRINGS value + * @property {number} TENSORS=9 TENSORS value + * @property {number} GRAPHS=10 GRAPHS value + * @property {number} SPARSE_TENSORS=12 SPARSE_TENSORS value + * @property {number} TYPE_PROTOS=14 TYPE_PROTOS value + */ + AttributeProto.AttributeType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNDEFINED"] = 0; + values[valuesById[1] = "FLOAT"] = 1; + values[valuesById[2] = "INT"] = 2; + values[valuesById[3] = "STRING"] = 3; + values[valuesById[4] = "TENSOR"] = 4; + values[valuesById[5] = "GRAPH"] = 5; + values[valuesById[11] = "SPARSE_TENSOR"] = 11; + values[valuesById[13] = "TYPE_PROTO"] = 13; + values[valuesById[6] = "FLOATS"] = 6; + values[valuesById[7] = "INTS"] = 7; + values[valuesById[8] = "STRINGS"] = 8; + values[valuesById[9] = "TENSORS"] = 9; + values[valuesById[10] = "GRAPHS"] = 10; + values[valuesById[12] = "SPARSE_TENSORS"] = 12; + values[valuesById[14] = "TYPE_PROTOS"] = 14; + return values; + })(); + + return AttributeProto; + })(); + + onnx.ValueInfoProto = (function() { + + /** + * Properties of a ValueInfoProto. + * @memberof onnx + * @interface IValueInfoProto + * @property {string|null} [name] ValueInfoProto name + * @property {onnx.ITypeProto|null} [type] ValueInfoProto type + * @property {string|null} [docString] ValueInfoProto docString + */ + + /** + * Constructs a new ValueInfoProto. + * @memberof onnx + * @classdesc Represents a ValueInfoProto. + * @implements IValueInfoProto + * @constructor + * @param {onnx.IValueInfoProto=} [properties] Properties to set + */ + function ValueInfoProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ValueInfoProto name. + * @member {string} name + * @memberof onnx.ValueInfoProto + * @instance + */ + ValueInfoProto.prototype.name = ""; + + /** + * ValueInfoProto type. + * @member {onnx.ITypeProto|null|undefined} type + * @memberof onnx.ValueInfoProto + * @instance + */ + ValueInfoProto.prototype.type = null; + + /** + * ValueInfoProto docString. + * @member {string} docString + * @memberof onnx.ValueInfoProto + * @instance + */ + ValueInfoProto.prototype.docString = ""; + + /** + * Creates a new ValueInfoProto instance using the specified properties. + * @function create + * @memberof onnx.ValueInfoProto + * @static + * @param {onnx.IValueInfoProto=} [properties] Properties to set + * @returns {onnx.ValueInfoProto} ValueInfoProto instance + */ + ValueInfoProto.create = function create(properties) { + return new ValueInfoProto(properties); + }; + + /** + * Encodes the specified ValueInfoProto message. Does not implicitly {@link onnx.ValueInfoProto.verify|verify} messages. + * @function encode + * @memberof onnx.ValueInfoProto + * @static + * @param {onnx.IValueInfoProto} message ValueInfoProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ValueInfoProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + $root.onnx.TypeProto.encode(message.type, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.docString != null && Object.hasOwnProperty.call(message, "docString")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.docString); + return writer; + }; + + /** + * Encodes the specified ValueInfoProto message, length delimited. Does not implicitly {@link onnx.ValueInfoProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.ValueInfoProto + * @static + * @param {onnx.IValueInfoProto} message ValueInfoProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ValueInfoProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ValueInfoProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.ValueInfoProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.ValueInfoProto} ValueInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ValueInfoProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.ValueInfoProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + } + case 3: { + message.docString = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ValueInfoProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.ValueInfoProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.ValueInfoProto} ValueInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ValueInfoProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ValueInfoProto message. + * @function verify + * @memberof onnx.ValueInfoProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ValueInfoProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.type != null && message.hasOwnProperty("type")) { + var error = $root.onnx.TypeProto.verify(message.type); + if (error) + return "type." + error; + } + if (message.docString != null && message.hasOwnProperty("docString")) + if (!$util.isString(message.docString)) + return "docString: string expected"; + return null; + }; + + /** + * Creates a ValueInfoProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.ValueInfoProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.ValueInfoProto} ValueInfoProto + */ + ValueInfoProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.ValueInfoProto) + return object; + var message = new $root.onnx.ValueInfoProto(); + if (object.name != null) + message.name = String(object.name); + if (object.type != null) { + if (typeof object.type !== "object") + throw TypeError(".onnx.ValueInfoProto.type: object expected"); + message.type = $root.onnx.TypeProto.fromObject(object.type); + } + if (object.docString != null) + message.docString = String(object.docString); + return message; + }; + + /** + * Creates a plain object from a ValueInfoProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.ValueInfoProto + * @static + * @param {onnx.ValueInfoProto} message ValueInfoProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ValueInfoProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.name = ""; + object.type = null; + object.docString = ""; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.type != null && message.hasOwnProperty("type")) + object.type = $root.onnx.TypeProto.toObject(message.type, options); + if (message.docString != null && message.hasOwnProperty("docString")) + object.docString = message.docString; + return object; + }; + + /** + * Converts this ValueInfoProto to JSON. + * @function toJSON + * @memberof onnx.ValueInfoProto + * @instance + * @returns {Object.} JSON object + */ + ValueInfoProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ValueInfoProto + * @function getTypeUrl + * @memberof onnx.ValueInfoProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ValueInfoProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.ValueInfoProto"; + }; + + return ValueInfoProto; + })(); + + onnx.NodeProto = (function() { + + /** + * Properties of a NodeProto. + * @memberof onnx + * @interface INodeProto + * @property {Array.|null} [input] NodeProto input + * @property {Array.|null} [output] NodeProto output + * @property {string|null} [name] NodeProto name + * @property {string|null} [opType] NodeProto opType + * @property {string|null} [domain] NodeProto domain + * @property {Array.|null} [attribute] NodeProto attribute + * @property {string|null} [docString] NodeProto docString + */ + + /** + * Constructs a new NodeProto. + * @memberof onnx + * @classdesc Represents a NodeProto. + * @implements INodeProto + * @constructor + * @param {onnx.INodeProto=} [properties] Properties to set + */ + function NodeProto(properties) { + this.input = []; + this.output = []; + this.attribute = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * NodeProto input. + * @member {Array.} input + * @memberof onnx.NodeProto + * @instance + */ + NodeProto.prototype.input = $util.emptyArray; + + /** + * NodeProto output. + * @member {Array.} output + * @memberof onnx.NodeProto + * @instance + */ + NodeProto.prototype.output = $util.emptyArray; + + /** + * NodeProto name. + * @member {string} name + * @memberof onnx.NodeProto + * @instance + */ + NodeProto.prototype.name = ""; + + /** + * NodeProto opType. + * @member {string} opType + * @memberof onnx.NodeProto + * @instance + */ + NodeProto.prototype.opType = ""; + + /** + * NodeProto domain. + * @member {string} domain + * @memberof onnx.NodeProto + * @instance + */ + NodeProto.prototype.domain = ""; + + /** + * NodeProto attribute. + * @member {Array.} attribute + * @memberof onnx.NodeProto + * @instance + */ + NodeProto.prototype.attribute = $util.emptyArray; + + /** + * NodeProto docString. + * @member {string} docString + * @memberof onnx.NodeProto + * @instance + */ + NodeProto.prototype.docString = ""; + + /** + * Creates a new NodeProto instance using the specified properties. + * @function create + * @memberof onnx.NodeProto + * @static + * @param {onnx.INodeProto=} [properties] Properties to set + * @returns {onnx.NodeProto} NodeProto instance + */ + NodeProto.create = function create(properties) { + return new NodeProto(properties); + }; + + /** + * Encodes the specified NodeProto message. Does not implicitly {@link onnx.NodeProto.verify|verify} messages. + * @function encode + * @memberof onnx.NodeProto + * @static + * @param {onnx.INodeProto} message NodeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + NodeProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.input != null && message.input.length) + for (var i = 0; i < message.input.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.input[i]); + if (message.output != null && message.output.length) + for (var i = 0; i < message.output.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.output[i]); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.name); + if (message.opType != null && Object.hasOwnProperty.call(message, "opType")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.opType); + if (message.attribute != null && message.attribute.length) + for (var i = 0; i < message.attribute.length; ++i) + $root.onnx.AttributeProto.encode(message.attribute[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.docString != null && Object.hasOwnProperty.call(message, "docString")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.docString); + if (message.domain != null && Object.hasOwnProperty.call(message, "domain")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.domain); + return writer; + }; + + /** + * Encodes the specified NodeProto message, length delimited. Does not implicitly {@link onnx.NodeProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.NodeProto + * @static + * @param {onnx.INodeProto} message NodeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + NodeProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a NodeProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.NodeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.NodeProto} NodeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + NodeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.NodeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + } + case 2: { + if (!(message.output && message.output.length)) + message.output = []; + message.output.push(reader.string()); + break; + } + case 3: { + message.name = reader.string(); + break; + } + case 4: { + message.opType = reader.string(); + break; + } + case 7: { + message.domain = reader.string(); + break; + } + case 5: { + if (!(message.attribute && message.attribute.length)) + message.attribute = []; + message.attribute.push($root.onnx.AttributeProto.decode(reader, reader.uint32())); + break; + } + case 6: { + message.docString = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a NodeProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.NodeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.NodeProto} NodeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + NodeProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a NodeProto message. + * @function verify + * @memberof onnx.NodeProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + NodeProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.input != null && message.hasOwnProperty("input")) { + if (!Array.isArray(message.input)) + return "input: array expected"; + for (var i = 0; i < message.input.length; ++i) + if (!$util.isString(message.input[i])) + return "input: string[] expected"; + } + if (message.output != null && message.hasOwnProperty("output")) { + if (!Array.isArray(message.output)) + return "output: array expected"; + for (var i = 0; i < message.output.length; ++i) + if (!$util.isString(message.output[i])) + return "output: string[] expected"; + } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.opType != null && message.hasOwnProperty("opType")) + if (!$util.isString(message.opType)) + return "opType: string expected"; + if (message.domain != null && message.hasOwnProperty("domain")) + if (!$util.isString(message.domain)) + return "domain: string expected"; + if (message.attribute != null && message.hasOwnProperty("attribute")) { + if (!Array.isArray(message.attribute)) + return "attribute: array expected"; + for (var i = 0; i < message.attribute.length; ++i) { + var error = $root.onnx.AttributeProto.verify(message.attribute[i]); + if (error) + return "attribute." + error; + } + } + if (message.docString != null && message.hasOwnProperty("docString")) + if (!$util.isString(message.docString)) + return "docString: string expected"; + return null; + }; + + /** + * Creates a NodeProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.NodeProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.NodeProto} NodeProto + */ + NodeProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.NodeProto) + return object; + var message = new $root.onnx.NodeProto(); + if (object.input) { + if (!Array.isArray(object.input)) + throw TypeError(".onnx.NodeProto.input: array expected"); + message.input = []; + for (var i = 0; i < object.input.length; ++i) + message.input[i] = String(object.input[i]); + } + if (object.output) { + if (!Array.isArray(object.output)) + throw TypeError(".onnx.NodeProto.output: array expected"); + message.output = []; + for (var i = 0; i < object.output.length; ++i) + message.output[i] = String(object.output[i]); + } + if (object.name != null) + message.name = String(object.name); + if (object.opType != null) + message.opType = String(object.opType); + if (object.domain != null) + message.domain = String(object.domain); + if (object.attribute) { + if (!Array.isArray(object.attribute)) + throw TypeError(".onnx.NodeProto.attribute: array expected"); + message.attribute = []; + for (var i = 0; i < object.attribute.length; ++i) { + if (typeof object.attribute[i] !== "object") + throw TypeError(".onnx.NodeProto.attribute: object expected"); + message.attribute[i] = $root.onnx.AttributeProto.fromObject(object.attribute[i]); + } + } + if (object.docString != null) + message.docString = String(object.docString); + return message; + }; + + /** + * Creates a plain object from a NodeProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.NodeProto + * @static + * @param {onnx.NodeProto} message NodeProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + NodeProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.input = []; + object.output = []; + object.attribute = []; + } + if (options.defaults) { + object.name = ""; + object.opType = ""; + object.docString = ""; + object.domain = ""; + } + if (message.input && message.input.length) { + object.input = []; + for (var j = 0; j < message.input.length; ++j) + object.input[j] = message.input[j]; + } + if (message.output && message.output.length) { + object.output = []; + for (var j = 0; j < message.output.length; ++j) + object.output[j] = message.output[j]; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.opType != null && message.hasOwnProperty("opType")) + object.opType = message.opType; + if (message.attribute && message.attribute.length) { + object.attribute = []; + for (var j = 0; j < message.attribute.length; ++j) + object.attribute[j] = $root.onnx.AttributeProto.toObject(message.attribute[j], options); + } + if (message.docString != null && message.hasOwnProperty("docString")) + object.docString = message.docString; + if (message.domain != null && message.hasOwnProperty("domain")) + object.domain = message.domain; + return object; + }; + + /** + * Converts this NodeProto to JSON. + * @function toJSON + * @memberof onnx.NodeProto + * @instance + * @returns {Object.} JSON object + */ + NodeProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for NodeProto + * @function getTypeUrl + * @memberof onnx.NodeProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + NodeProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.NodeProto"; + }; + + return NodeProto; + })(); + + onnx.TrainingInfoProto = (function() { + + /** + * Properties of a TrainingInfoProto. + * @memberof onnx + * @interface ITrainingInfoProto + * @property {onnx.IGraphProto|null} [initialization] TrainingInfoProto initialization + * @property {onnx.IGraphProto|null} [algorithm] TrainingInfoProto algorithm + * @property {Array.|null} [initializationBinding] TrainingInfoProto initializationBinding + * @property {Array.|null} [updateBinding] TrainingInfoProto updateBinding + */ + + /** + * Constructs a new TrainingInfoProto. + * @memberof onnx + * @classdesc Represents a TrainingInfoProto. + * @implements ITrainingInfoProto + * @constructor + * @param {onnx.ITrainingInfoProto=} [properties] Properties to set + */ + function TrainingInfoProto(properties) { + this.initializationBinding = []; + this.updateBinding = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TrainingInfoProto initialization. + * @member {onnx.IGraphProto|null|undefined} initialization + * @memberof onnx.TrainingInfoProto + * @instance + */ + TrainingInfoProto.prototype.initialization = null; + + /** + * TrainingInfoProto algorithm. + * @member {onnx.IGraphProto|null|undefined} algorithm + * @memberof onnx.TrainingInfoProto + * @instance + */ + TrainingInfoProto.prototype.algorithm = null; + + /** + * TrainingInfoProto initializationBinding. + * @member {Array.} initializationBinding + * @memberof onnx.TrainingInfoProto + * @instance + */ + TrainingInfoProto.prototype.initializationBinding = $util.emptyArray; + + /** + * TrainingInfoProto updateBinding. + * @member {Array.} updateBinding + * @memberof onnx.TrainingInfoProto + * @instance + */ + TrainingInfoProto.prototype.updateBinding = $util.emptyArray; + + /** + * Creates a new TrainingInfoProto instance using the specified properties. + * @function create + * @memberof onnx.TrainingInfoProto + * @static + * @param {onnx.ITrainingInfoProto=} [properties] Properties to set + * @returns {onnx.TrainingInfoProto} TrainingInfoProto instance + */ + TrainingInfoProto.create = function create(properties) { + return new TrainingInfoProto(properties); + }; + + /** + * Encodes the specified TrainingInfoProto message. Does not implicitly {@link onnx.TrainingInfoProto.verify|verify} messages. + * @function encode + * @memberof onnx.TrainingInfoProto + * @static + * @param {onnx.ITrainingInfoProto} message TrainingInfoProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TrainingInfoProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.initialization != null && Object.hasOwnProperty.call(message, "initialization")) + $root.onnx.GraphProto.encode(message.initialization, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.algorithm != null && Object.hasOwnProperty.call(message, "algorithm")) + $root.onnx.GraphProto.encode(message.algorithm, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.initializationBinding != null && message.initializationBinding.length) + for (var i = 0; i < message.initializationBinding.length; ++i) + $root.onnx.StringStringEntryProto.encode(message.initializationBinding[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.updateBinding != null && message.updateBinding.length) + for (var i = 0; i < message.updateBinding.length; ++i) + $root.onnx.StringStringEntryProto.encode(message.updateBinding[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified TrainingInfoProto message, length delimited. Does not implicitly {@link onnx.TrainingInfoProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TrainingInfoProto + * @static + * @param {onnx.ITrainingInfoProto} message TrainingInfoProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TrainingInfoProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TrainingInfoProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.TrainingInfoProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TrainingInfoProto} TrainingInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TrainingInfoProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TrainingInfoProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.initialization = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + } + case 2: { + message.algorithm = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + } + case 3: { + if (!(message.initializationBinding && message.initializationBinding.length)) + message.initializationBinding = []; + message.initializationBinding.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + } + case 4: { + if (!(message.updateBinding && message.updateBinding.length)) + message.updateBinding = []; + message.updateBinding.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TrainingInfoProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TrainingInfoProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TrainingInfoProto} TrainingInfoProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TrainingInfoProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TrainingInfoProto message. + * @function verify + * @memberof onnx.TrainingInfoProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TrainingInfoProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.initialization != null && message.hasOwnProperty("initialization")) { + var error = $root.onnx.GraphProto.verify(message.initialization); + if (error) + return "initialization." + error; + } + if (message.algorithm != null && message.hasOwnProperty("algorithm")) { + var error = $root.onnx.GraphProto.verify(message.algorithm); + if (error) + return "algorithm." + error; + } + if (message.initializationBinding != null && message.hasOwnProperty("initializationBinding")) { + if (!Array.isArray(message.initializationBinding)) + return "initializationBinding: array expected"; + for (var i = 0; i < message.initializationBinding.length; ++i) { + var error = $root.onnx.StringStringEntryProto.verify(message.initializationBinding[i]); + if (error) + return "initializationBinding." + error; + } + } + if (message.updateBinding != null && message.hasOwnProperty("updateBinding")) { + if (!Array.isArray(message.updateBinding)) + return "updateBinding: array expected"; + for (var i = 0; i < message.updateBinding.length; ++i) { + var error = $root.onnx.StringStringEntryProto.verify(message.updateBinding[i]); + if (error) + return "updateBinding." + error; + } + } + return null; + }; + + /** + * Creates a TrainingInfoProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TrainingInfoProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.TrainingInfoProto} TrainingInfoProto + */ + TrainingInfoProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TrainingInfoProto) + return object; + var message = new $root.onnx.TrainingInfoProto(); + if (object.initialization != null) { + if (typeof object.initialization !== "object") + throw TypeError(".onnx.TrainingInfoProto.initialization: object expected"); + message.initialization = $root.onnx.GraphProto.fromObject(object.initialization); + } + if (object.algorithm != null) { + if (typeof object.algorithm !== "object") + throw TypeError(".onnx.TrainingInfoProto.algorithm: object expected"); + message.algorithm = $root.onnx.GraphProto.fromObject(object.algorithm); + } + if (object.initializationBinding) { + if (!Array.isArray(object.initializationBinding)) + throw TypeError(".onnx.TrainingInfoProto.initializationBinding: array expected"); + message.initializationBinding = []; + for (var i = 0; i < object.initializationBinding.length; ++i) { + if (typeof object.initializationBinding[i] !== "object") + throw TypeError(".onnx.TrainingInfoProto.initializationBinding: object expected"); + message.initializationBinding[i] = $root.onnx.StringStringEntryProto.fromObject(object.initializationBinding[i]); + } + } + if (object.updateBinding) { + if (!Array.isArray(object.updateBinding)) + throw TypeError(".onnx.TrainingInfoProto.updateBinding: array expected"); + message.updateBinding = []; + for (var i = 0; i < object.updateBinding.length; ++i) { + if (typeof object.updateBinding[i] !== "object") + throw TypeError(".onnx.TrainingInfoProto.updateBinding: object expected"); + message.updateBinding[i] = $root.onnx.StringStringEntryProto.fromObject(object.updateBinding[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a TrainingInfoProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TrainingInfoProto + * @static + * @param {onnx.TrainingInfoProto} message TrainingInfoProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TrainingInfoProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.initializationBinding = []; + object.updateBinding = []; + } + if (options.defaults) { + object.initialization = null; + object.algorithm = null; + } + if (message.initialization != null && message.hasOwnProperty("initialization")) + object.initialization = $root.onnx.GraphProto.toObject(message.initialization, options); + if (message.algorithm != null && message.hasOwnProperty("algorithm")) + object.algorithm = $root.onnx.GraphProto.toObject(message.algorithm, options); + if (message.initializationBinding && message.initializationBinding.length) { + object.initializationBinding = []; + for (var j = 0; j < message.initializationBinding.length; ++j) + object.initializationBinding[j] = $root.onnx.StringStringEntryProto.toObject(message.initializationBinding[j], options); + } + if (message.updateBinding && message.updateBinding.length) { + object.updateBinding = []; + for (var j = 0; j < message.updateBinding.length; ++j) + object.updateBinding[j] = $root.onnx.StringStringEntryProto.toObject(message.updateBinding[j], options); + } + return object; + }; + + /** + * Converts this TrainingInfoProto to JSON. + * @function toJSON + * @memberof onnx.TrainingInfoProto + * @instance + * @returns {Object.} JSON object + */ + TrainingInfoProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TrainingInfoProto + * @function getTypeUrl + * @memberof onnx.TrainingInfoProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TrainingInfoProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TrainingInfoProto"; + }; + + return TrainingInfoProto; + })(); + + onnx.ModelProto = (function() { + + /** + * Properties of a ModelProto. + * @memberof onnx + * @interface IModelProto + * @property {number|Long|null} [irVersion] ModelProto irVersion + * @property {Array.|null} [opsetImport] ModelProto opsetImport + * @property {string|null} [producerName] ModelProto producerName + * @property {string|null} [producerVersion] ModelProto producerVersion + * @property {string|null} [domain] ModelProto domain + * @property {number|Long|null} [modelVersion] ModelProto modelVersion + * @property {string|null} [docString] ModelProto docString + * @property {onnx.IGraphProto|null} [graph] ModelProto graph + * @property {Array.|null} [metadataProps] ModelProto metadataProps + * @property {Array.|null} [trainingInfo] ModelProto trainingInfo + * @property {Array.|null} [functions] ModelProto functions + */ + + /** + * Constructs a new ModelProto. + * @memberof onnx + * @classdesc Represents a ModelProto. + * @implements IModelProto + * @constructor + * @param {onnx.IModelProto=} [properties] Properties to set + */ + function ModelProto(properties) { + this.opsetImport = []; + this.metadataProps = []; + this.trainingInfo = []; + this.functions = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ModelProto irVersion. + * @member {number|Long} irVersion + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.irVersion = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ModelProto opsetImport. + * @member {Array.} opsetImport + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.opsetImport = $util.emptyArray; + + /** + * ModelProto producerName. + * @member {string} producerName + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.producerName = ""; + + /** + * ModelProto producerVersion. + * @member {string} producerVersion + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.producerVersion = ""; + + /** + * ModelProto domain. + * @member {string} domain + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.domain = ""; + + /** + * ModelProto modelVersion. + * @member {number|Long} modelVersion + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.modelVersion = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ModelProto docString. + * @member {string} docString + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.docString = ""; + + /** + * ModelProto graph. + * @member {onnx.IGraphProto|null|undefined} graph + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.graph = null; + + /** + * ModelProto metadataProps. + * @member {Array.} metadataProps + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.metadataProps = $util.emptyArray; + + /** + * ModelProto trainingInfo. + * @member {Array.} trainingInfo + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.trainingInfo = $util.emptyArray; + + /** + * ModelProto functions. + * @member {Array.} functions + * @memberof onnx.ModelProto + * @instance + */ + ModelProto.prototype.functions = $util.emptyArray; + + /** + * Creates a new ModelProto instance using the specified properties. + * @function create + * @memberof onnx.ModelProto + * @static + * @param {onnx.IModelProto=} [properties] Properties to set + * @returns {onnx.ModelProto} ModelProto instance + */ + ModelProto.create = function create(properties) { + return new ModelProto(properties); + }; + + /** + * Encodes the specified ModelProto message. Does not implicitly {@link onnx.ModelProto.verify|verify} messages. + * @function encode + * @memberof onnx.ModelProto + * @static + * @param {onnx.IModelProto} message ModelProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ModelProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.irVersion != null && Object.hasOwnProperty.call(message, "irVersion")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.irVersion); + if (message.producerName != null && Object.hasOwnProperty.call(message, "producerName")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.producerName); + if (message.producerVersion != null && Object.hasOwnProperty.call(message, "producerVersion")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.producerVersion); + if (message.domain != null && Object.hasOwnProperty.call(message, "domain")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.domain); + if (message.modelVersion != null && Object.hasOwnProperty.call(message, "modelVersion")) + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.modelVersion); + if (message.docString != null && Object.hasOwnProperty.call(message, "docString")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.docString); + if (message.graph != null && Object.hasOwnProperty.call(message, "graph")) + $root.onnx.GraphProto.encode(message.graph, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + if (message.opsetImport != null && message.opsetImport.length) + for (var i = 0; i < message.opsetImport.length; ++i) + $root.onnx.OperatorSetIdProto.encode(message.opsetImport[i], writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + if (message.metadataProps != null && message.metadataProps.length) + for (var i = 0; i < message.metadataProps.length; ++i) + $root.onnx.StringStringEntryProto.encode(message.metadataProps[i], writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); + if (message.trainingInfo != null && message.trainingInfo.length) + for (var i = 0; i < message.trainingInfo.length; ++i) + $root.onnx.TrainingInfoProto.encode(message.trainingInfo[i], writer.uint32(/* id 20, wireType 2 =*/162).fork()).ldelim(); + if (message.functions != null && message.functions.length) + for (var i = 0; i < message.functions.length; ++i) + $root.onnx.FunctionProto.encode(message.functions[i], writer.uint32(/* id 25, wireType 2 =*/202).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ModelProto message, length delimited. Does not implicitly {@link onnx.ModelProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.ModelProto + * @static + * @param {onnx.IModelProto} message ModelProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ModelProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ModelProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.ModelProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.ModelProto} ModelProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ModelProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.ModelProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.irVersion = reader.int64(); + break; + } + case 8: { + if (!(message.opsetImport && message.opsetImport.length)) + message.opsetImport = []; + message.opsetImport.push($root.onnx.OperatorSetIdProto.decode(reader, reader.uint32())); + break; + } + case 2: { + message.producerName = reader.string(); + break; + } + case 3: { + message.producerVersion = reader.string(); + break; + } + case 4: { + message.domain = reader.string(); + break; + } + case 5: { + message.modelVersion = reader.int64(); + break; + } + case 6: { + message.docString = reader.string(); + break; + } + case 7: { + message.graph = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + } + case 14: { + if (!(message.metadataProps && message.metadataProps.length)) + message.metadataProps = []; + message.metadataProps.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + } + case 20: { + if (!(message.trainingInfo && message.trainingInfo.length)) + message.trainingInfo = []; + message.trainingInfo.push($root.onnx.TrainingInfoProto.decode(reader, reader.uint32())); + break; + } + case 25: { + if (!(message.functions && message.functions.length)) + message.functions = []; + message.functions.push($root.onnx.FunctionProto.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ModelProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.ModelProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.ModelProto} ModelProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ModelProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ModelProto message. + * @function verify + * @memberof onnx.ModelProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ModelProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.irVersion != null && message.hasOwnProperty("irVersion")) + if (!$util.isInteger(message.irVersion) && !(message.irVersion && $util.isInteger(message.irVersion.low) && $util.isInteger(message.irVersion.high))) + return "irVersion: integer|Long expected"; + if (message.opsetImport != null && message.hasOwnProperty("opsetImport")) { + if (!Array.isArray(message.opsetImport)) + return "opsetImport: array expected"; + for (var i = 0; i < message.opsetImport.length; ++i) { + var error = $root.onnx.OperatorSetIdProto.verify(message.opsetImport[i]); + if (error) + return "opsetImport." + error; + } + } + if (message.producerName != null && message.hasOwnProperty("producerName")) + if (!$util.isString(message.producerName)) + return "producerName: string expected"; + if (message.producerVersion != null && message.hasOwnProperty("producerVersion")) + if (!$util.isString(message.producerVersion)) + return "producerVersion: string expected"; + if (message.domain != null && message.hasOwnProperty("domain")) + if (!$util.isString(message.domain)) + return "domain: string expected"; + if (message.modelVersion != null && message.hasOwnProperty("modelVersion")) + if (!$util.isInteger(message.modelVersion) && !(message.modelVersion && $util.isInteger(message.modelVersion.low) && $util.isInteger(message.modelVersion.high))) + return "modelVersion: integer|Long expected"; + if (message.docString != null && message.hasOwnProperty("docString")) + if (!$util.isString(message.docString)) + return "docString: string expected"; + if (message.graph != null && message.hasOwnProperty("graph")) { + var error = $root.onnx.GraphProto.verify(message.graph); + if (error) + return "graph." + error; + } + if (message.metadataProps != null && message.hasOwnProperty("metadataProps")) { + if (!Array.isArray(message.metadataProps)) + return "metadataProps: array expected"; + for (var i = 0; i < message.metadataProps.length; ++i) { + var error = $root.onnx.StringStringEntryProto.verify(message.metadataProps[i]); + if (error) + return "metadataProps." + error; + } + } + if (message.trainingInfo != null && message.hasOwnProperty("trainingInfo")) { + if (!Array.isArray(message.trainingInfo)) + return "trainingInfo: array expected"; + for (var i = 0; i < message.trainingInfo.length; ++i) { + var error = $root.onnx.TrainingInfoProto.verify(message.trainingInfo[i]); + if (error) + return "trainingInfo." + error; + } + } + if (message.functions != null && message.hasOwnProperty("functions")) { + if (!Array.isArray(message.functions)) + return "functions: array expected"; + for (var i = 0; i < message.functions.length; ++i) { + var error = $root.onnx.FunctionProto.verify(message.functions[i]); + if (error) + return "functions." + error; + } + } + return null; + }; + + /** + * Creates a ModelProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.ModelProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.ModelProto} ModelProto + */ + ModelProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.ModelProto) + return object; + var message = new $root.onnx.ModelProto(); + if (object.irVersion != null) + if ($util.Long) + (message.irVersion = $util.Long.fromValue(object.irVersion)).unsigned = false; + else if (typeof object.irVersion === "string") + message.irVersion = parseInt(object.irVersion, 10); + else if (typeof object.irVersion === "number") + message.irVersion = object.irVersion; + else if (typeof object.irVersion === "object") + message.irVersion = new $util.LongBits(object.irVersion.low >>> 0, object.irVersion.high >>> 0).toNumber(); + if (object.opsetImport) { + if (!Array.isArray(object.opsetImport)) + throw TypeError(".onnx.ModelProto.opsetImport: array expected"); + message.opsetImport = []; + for (var i = 0; i < object.opsetImport.length; ++i) { + if (typeof object.opsetImport[i] !== "object") + throw TypeError(".onnx.ModelProto.opsetImport: object expected"); + message.opsetImport[i] = $root.onnx.OperatorSetIdProto.fromObject(object.opsetImport[i]); + } + } + if (object.producerName != null) + message.producerName = String(object.producerName); + if (object.producerVersion != null) + message.producerVersion = String(object.producerVersion); + if (object.domain != null) + message.domain = String(object.domain); + if (object.modelVersion != null) + if ($util.Long) + (message.modelVersion = $util.Long.fromValue(object.modelVersion)).unsigned = false; + else if (typeof object.modelVersion === "string") + message.modelVersion = parseInt(object.modelVersion, 10); + else if (typeof object.modelVersion === "number") + message.modelVersion = object.modelVersion; + else if (typeof object.modelVersion === "object") + message.modelVersion = new $util.LongBits(object.modelVersion.low >>> 0, object.modelVersion.high >>> 0).toNumber(); + if (object.docString != null) + message.docString = String(object.docString); + if (object.graph != null) { + if (typeof object.graph !== "object") + throw TypeError(".onnx.ModelProto.graph: object expected"); + message.graph = $root.onnx.GraphProto.fromObject(object.graph); + } + if (object.metadataProps) { + if (!Array.isArray(object.metadataProps)) + throw TypeError(".onnx.ModelProto.metadataProps: array expected"); + message.metadataProps = []; + for (var i = 0; i < object.metadataProps.length; ++i) { + if (typeof object.metadataProps[i] !== "object") + throw TypeError(".onnx.ModelProto.metadataProps: object expected"); + message.metadataProps[i] = $root.onnx.StringStringEntryProto.fromObject(object.metadataProps[i]); + } + } + if (object.trainingInfo) { + if (!Array.isArray(object.trainingInfo)) + throw TypeError(".onnx.ModelProto.trainingInfo: array expected"); + message.trainingInfo = []; + for (var i = 0; i < object.trainingInfo.length; ++i) { + if (typeof object.trainingInfo[i] !== "object") + throw TypeError(".onnx.ModelProto.trainingInfo: object expected"); + message.trainingInfo[i] = $root.onnx.TrainingInfoProto.fromObject(object.trainingInfo[i]); + } + } + if (object.functions) { + if (!Array.isArray(object.functions)) + throw TypeError(".onnx.ModelProto.functions: array expected"); + message.functions = []; + for (var i = 0; i < object.functions.length; ++i) { + if (typeof object.functions[i] !== "object") + throw TypeError(".onnx.ModelProto.functions: object expected"); + message.functions[i] = $root.onnx.FunctionProto.fromObject(object.functions[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a ModelProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.ModelProto + * @static + * @param {onnx.ModelProto} message ModelProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ModelProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.opsetImport = []; + object.metadataProps = []; + object.trainingInfo = []; + object.functions = []; + } + if (options.defaults) { + if ($util.Long) { + var long = new $util.Long(0, 0, false); + object.irVersion = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.irVersion = options.longs === String ? "0" : 0; + object.producerName = ""; + object.producerVersion = ""; + object.domain = ""; + if ($util.Long) { + var long = new $util.Long(0, 0, false); + object.modelVersion = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.modelVersion = options.longs === String ? "0" : 0; + object.docString = ""; + object.graph = null; + } + if (message.irVersion != null && message.hasOwnProperty("irVersion")) + if (typeof message.irVersion === "number") + object.irVersion = options.longs === String ? String(message.irVersion) : message.irVersion; + else + object.irVersion = options.longs === String ? $util.Long.prototype.toString.call(message.irVersion) : options.longs === Number ? new $util.LongBits(message.irVersion.low >>> 0, message.irVersion.high >>> 0).toNumber() : message.irVersion; + if (message.producerName != null && message.hasOwnProperty("producerName")) + object.producerName = message.producerName; + if (message.producerVersion != null && message.hasOwnProperty("producerVersion")) + object.producerVersion = message.producerVersion; + if (message.domain != null && message.hasOwnProperty("domain")) + object.domain = message.domain; + if (message.modelVersion != null && message.hasOwnProperty("modelVersion")) + if (typeof message.modelVersion === "number") + object.modelVersion = options.longs === String ? String(message.modelVersion) : message.modelVersion; + else + object.modelVersion = options.longs === String ? $util.Long.prototype.toString.call(message.modelVersion) : options.longs === Number ? new $util.LongBits(message.modelVersion.low >>> 0, message.modelVersion.high >>> 0).toNumber() : message.modelVersion; + if (message.docString != null && message.hasOwnProperty("docString")) + object.docString = message.docString; + if (message.graph != null && message.hasOwnProperty("graph")) + object.graph = $root.onnx.GraphProto.toObject(message.graph, options); + if (message.opsetImport && message.opsetImport.length) { + object.opsetImport = []; + for (var j = 0; j < message.opsetImport.length; ++j) + object.opsetImport[j] = $root.onnx.OperatorSetIdProto.toObject(message.opsetImport[j], options); + } + if (message.metadataProps && message.metadataProps.length) { + object.metadataProps = []; + for (var j = 0; j < message.metadataProps.length; ++j) + object.metadataProps[j] = $root.onnx.StringStringEntryProto.toObject(message.metadataProps[j], options); + } + if (message.trainingInfo && message.trainingInfo.length) { + object.trainingInfo = []; + for (var j = 0; j < message.trainingInfo.length; ++j) + object.trainingInfo[j] = $root.onnx.TrainingInfoProto.toObject(message.trainingInfo[j], options); + } + if (message.functions && message.functions.length) { + object.functions = []; + for (var j = 0; j < message.functions.length; ++j) + object.functions[j] = $root.onnx.FunctionProto.toObject(message.functions[j], options); + } + return object; + }; + + /** + * Converts this ModelProto to JSON. + * @function toJSON + * @memberof onnx.ModelProto + * @instance + * @returns {Object.} JSON object + */ + ModelProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ModelProto + * @function getTypeUrl + * @memberof onnx.ModelProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ModelProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.ModelProto"; + }; + + return ModelProto; + })(); + + onnx.StringStringEntryProto = (function() { + + /** + * Properties of a StringStringEntryProto. + * @memberof onnx + * @interface IStringStringEntryProto + * @property {string|null} [key] StringStringEntryProto key + * @property {string|null} [value] StringStringEntryProto value + */ + + /** + * Constructs a new StringStringEntryProto. + * @memberof onnx + * @classdesc Represents a StringStringEntryProto. + * @implements IStringStringEntryProto + * @constructor + * @param {onnx.IStringStringEntryProto=} [properties] Properties to set + */ + function StringStringEntryProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * StringStringEntryProto key. + * @member {string} key + * @memberof onnx.StringStringEntryProto + * @instance + */ + StringStringEntryProto.prototype.key = ""; + + /** + * StringStringEntryProto value. + * @member {string} value + * @memberof onnx.StringStringEntryProto + * @instance + */ + StringStringEntryProto.prototype.value = ""; + + /** + * Creates a new StringStringEntryProto instance using the specified properties. + * @function create + * @memberof onnx.StringStringEntryProto + * @static + * @param {onnx.IStringStringEntryProto=} [properties] Properties to set + * @returns {onnx.StringStringEntryProto} StringStringEntryProto instance + */ + StringStringEntryProto.create = function create(properties) { + return new StringStringEntryProto(properties); + }; + + /** + * Encodes the specified StringStringEntryProto message. Does not implicitly {@link onnx.StringStringEntryProto.verify|verify} messages. + * @function encode + * @memberof onnx.StringStringEntryProto + * @static + * @param {onnx.IStringStringEntryProto} message StringStringEntryProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + StringStringEntryProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.key != null && Object.hasOwnProperty.call(message, "key")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.key); + if (message.value != null && Object.hasOwnProperty.call(message, "value")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.value); + return writer; + }; + + /** + * Encodes the specified StringStringEntryProto message, length delimited. Does not implicitly {@link onnx.StringStringEntryProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.StringStringEntryProto + * @static + * @param {onnx.IStringStringEntryProto} message StringStringEntryProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + StringStringEntryProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a StringStringEntryProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.StringStringEntryProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.StringStringEntryProto} StringStringEntryProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + StringStringEntryProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.StringStringEntryProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.key = reader.string(); + break; + } + case 2: { + message.value = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a StringStringEntryProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.StringStringEntryProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.StringStringEntryProto} StringStringEntryProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + StringStringEntryProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a StringStringEntryProto message. + * @function verify + * @memberof onnx.StringStringEntryProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + StringStringEntryProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.key != null && message.hasOwnProperty("key")) + if (!$util.isString(message.key)) + return "key: string expected"; + if (message.value != null && message.hasOwnProperty("value")) + if (!$util.isString(message.value)) + return "value: string expected"; + return null; + }; + + /** + * Creates a StringStringEntryProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.StringStringEntryProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.StringStringEntryProto} StringStringEntryProto + */ + StringStringEntryProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.StringStringEntryProto) + return object; + var message = new $root.onnx.StringStringEntryProto(); + if (object.key != null) + message.key = String(object.key); + if (object.value != null) + message.value = String(object.value); + return message; + }; + + /** + * Creates a plain object from a StringStringEntryProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.StringStringEntryProto + * @static + * @param {onnx.StringStringEntryProto} message StringStringEntryProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + StringStringEntryProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.key = ""; + object.value = ""; + } + if (message.key != null && message.hasOwnProperty("key")) + object.key = message.key; + if (message.value != null && message.hasOwnProperty("value")) + object.value = message.value; + return object; + }; + + /** + * Converts this StringStringEntryProto to JSON. + * @function toJSON + * @memberof onnx.StringStringEntryProto + * @instance + * @returns {Object.} JSON object + */ + StringStringEntryProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for StringStringEntryProto + * @function getTypeUrl + * @memberof onnx.StringStringEntryProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + StringStringEntryProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.StringStringEntryProto"; + }; + + return StringStringEntryProto; + })(); + + onnx.TensorAnnotation = (function() { + + /** + * Properties of a TensorAnnotation. + * @memberof onnx + * @interface ITensorAnnotation + * @property {string|null} [tensorName] TensorAnnotation tensorName + * @property {Array.|null} [quantParameterTensorNames] TensorAnnotation quantParameterTensorNames + */ + + /** + * Constructs a new TensorAnnotation. + * @memberof onnx + * @classdesc Represents a TensorAnnotation. + * @implements ITensorAnnotation + * @constructor + * @param {onnx.ITensorAnnotation=} [properties] Properties to set + */ + function TensorAnnotation(properties) { + this.quantParameterTensorNames = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TensorAnnotation tensorName. + * @member {string} tensorName + * @memberof onnx.TensorAnnotation + * @instance + */ + TensorAnnotation.prototype.tensorName = ""; + + /** + * TensorAnnotation quantParameterTensorNames. + * @member {Array.} quantParameterTensorNames + * @memberof onnx.TensorAnnotation + * @instance + */ + TensorAnnotation.prototype.quantParameterTensorNames = $util.emptyArray; + + /** + * Creates a new TensorAnnotation instance using the specified properties. + * @function create + * @memberof onnx.TensorAnnotation + * @static + * @param {onnx.ITensorAnnotation=} [properties] Properties to set + * @returns {onnx.TensorAnnotation} TensorAnnotation instance + */ + TensorAnnotation.create = function create(properties) { + return new TensorAnnotation(properties); + }; + + /** + * Encodes the specified TensorAnnotation message. Does not implicitly {@link onnx.TensorAnnotation.verify|verify} messages. + * @function encode + * @memberof onnx.TensorAnnotation + * @static + * @param {onnx.ITensorAnnotation} message TensorAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TensorAnnotation.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tensorName != null && Object.hasOwnProperty.call(message, "tensorName")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.tensorName); + if (message.quantParameterTensorNames != null && message.quantParameterTensorNames.length) + for (var i = 0; i < message.quantParameterTensorNames.length; ++i) + $root.onnx.StringStringEntryProto.encode(message.quantParameterTensorNames[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified TensorAnnotation message, length delimited. Does not implicitly {@link onnx.TensorAnnotation.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TensorAnnotation + * @static + * @param {onnx.ITensorAnnotation} message TensorAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TensorAnnotation.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TensorAnnotation message from the specified reader or buffer. + * @function decode + * @memberof onnx.TensorAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TensorAnnotation} TensorAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TensorAnnotation.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorAnnotation(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tensorName = reader.string(); + break; + } + case 2: { + if (!(message.quantParameterTensorNames && message.quantParameterTensorNames.length)) + message.quantParameterTensorNames = []; + message.quantParameterTensorNames.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TensorAnnotation message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TensorAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TensorAnnotation} TensorAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TensorAnnotation.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TensorAnnotation message. + * @function verify + * @memberof onnx.TensorAnnotation + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TensorAnnotation.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tensorName != null && message.hasOwnProperty("tensorName")) + if (!$util.isString(message.tensorName)) + return "tensorName: string expected"; + if (message.quantParameterTensorNames != null && message.hasOwnProperty("quantParameterTensorNames")) { + if (!Array.isArray(message.quantParameterTensorNames)) + return "quantParameterTensorNames: array expected"; + for (var i = 0; i < message.quantParameterTensorNames.length; ++i) { + var error = $root.onnx.StringStringEntryProto.verify(message.quantParameterTensorNames[i]); + if (error) + return "quantParameterTensorNames." + error; + } + } + return null; + }; + + /** + * Creates a TensorAnnotation message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TensorAnnotation + * @static + * @param {Object.} object Plain object + * @returns {onnx.TensorAnnotation} TensorAnnotation + */ + TensorAnnotation.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TensorAnnotation) + return object; + var message = new $root.onnx.TensorAnnotation(); + if (object.tensorName != null) + message.tensorName = String(object.tensorName); + if (object.quantParameterTensorNames) { + if (!Array.isArray(object.quantParameterTensorNames)) + throw TypeError(".onnx.TensorAnnotation.quantParameterTensorNames: array expected"); + message.quantParameterTensorNames = []; + for (var i = 0; i < object.quantParameterTensorNames.length; ++i) { + if (typeof object.quantParameterTensorNames[i] !== "object") + throw TypeError(".onnx.TensorAnnotation.quantParameterTensorNames: object expected"); + message.quantParameterTensorNames[i] = $root.onnx.StringStringEntryProto.fromObject(object.quantParameterTensorNames[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a TensorAnnotation message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TensorAnnotation + * @static + * @param {onnx.TensorAnnotation} message TensorAnnotation + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TensorAnnotation.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.quantParameterTensorNames = []; + if (options.defaults) + object.tensorName = ""; + if (message.tensorName != null && message.hasOwnProperty("tensorName")) + object.tensorName = message.tensorName; + if (message.quantParameterTensorNames && message.quantParameterTensorNames.length) { + object.quantParameterTensorNames = []; + for (var j = 0; j < message.quantParameterTensorNames.length; ++j) + object.quantParameterTensorNames[j] = $root.onnx.StringStringEntryProto.toObject(message.quantParameterTensorNames[j], options); + } + return object; + }; + + /** + * Converts this TensorAnnotation to JSON. + * @function toJSON + * @memberof onnx.TensorAnnotation + * @instance + * @returns {Object.} JSON object + */ + TensorAnnotation.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TensorAnnotation + * @function getTypeUrl + * @memberof onnx.TensorAnnotation + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TensorAnnotation.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TensorAnnotation"; + }; + + return TensorAnnotation; + })(); + + onnx.GraphProto = (function() { + + /** + * Properties of a GraphProto. + * @memberof onnx + * @interface IGraphProto + * @property {Array.|null} [node] GraphProto node + * @property {string|null} [name] GraphProto name + * @property {Array.|null} [initializer] GraphProto initializer + * @property {Array.|null} [sparseInitializer] GraphProto sparseInitializer + * @property {string|null} [docString] GraphProto docString + * @property {Array.|null} [input] GraphProto input + * @property {Array.|null} [output] GraphProto output + * @property {Array.|null} [valueInfo] GraphProto valueInfo + * @property {Array.|null} [quantizationAnnotation] GraphProto quantizationAnnotation + */ + + /** + * Constructs a new GraphProto. + * @memberof onnx + * @classdesc Represents a GraphProto. + * @implements IGraphProto + * @constructor + * @param {onnx.IGraphProto=} [properties] Properties to set + */ + function GraphProto(properties) { + this.node = []; + this.initializer = []; + this.sparseInitializer = []; + this.input = []; + this.output = []; + this.valueInfo = []; + this.quantizationAnnotation = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GraphProto node. + * @member {Array.} node + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.node = $util.emptyArray; + + /** + * GraphProto name. + * @member {string} name + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.name = ""; + + /** + * GraphProto initializer. + * @member {Array.} initializer + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.initializer = $util.emptyArray; + + /** + * GraphProto sparseInitializer. + * @member {Array.} sparseInitializer + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.sparseInitializer = $util.emptyArray; + + /** + * GraphProto docString. + * @member {string} docString + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.docString = ""; + + /** + * GraphProto input. + * @member {Array.} input + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.input = $util.emptyArray; + + /** + * GraphProto output. + * @member {Array.} output + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.output = $util.emptyArray; + + /** + * GraphProto valueInfo. + * @member {Array.} valueInfo + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.valueInfo = $util.emptyArray; + + /** + * GraphProto quantizationAnnotation. + * @member {Array.} quantizationAnnotation + * @memberof onnx.GraphProto + * @instance + */ + GraphProto.prototype.quantizationAnnotation = $util.emptyArray; + + /** + * Creates a new GraphProto instance using the specified properties. + * @function create + * @memberof onnx.GraphProto + * @static + * @param {onnx.IGraphProto=} [properties] Properties to set + * @returns {onnx.GraphProto} GraphProto instance + */ + GraphProto.create = function create(properties) { + return new GraphProto(properties); + }; + + /** + * Encodes the specified GraphProto message. Does not implicitly {@link onnx.GraphProto.verify|verify} messages. + * @function encode + * @memberof onnx.GraphProto + * @static + * @param {onnx.IGraphProto} message GraphProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GraphProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.node != null && message.node.length) + for (var i = 0; i < message.node.length; ++i) + $root.onnx.NodeProto.encode(message.node[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.name); + if (message.initializer != null && message.initializer.length) + for (var i = 0; i < message.initializer.length; ++i) + $root.onnx.TensorProto.encode(message.initializer[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.docString != null && Object.hasOwnProperty.call(message, "docString")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.docString); + if (message.input != null && message.input.length) + for (var i = 0; i < message.input.length; ++i) + $root.onnx.ValueInfoProto.encode(message.input[i], writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.output != null && message.output.length) + for (var i = 0; i < message.output.length; ++i) + $root.onnx.ValueInfoProto.encode(message.output[i], writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); + if (message.valueInfo != null && message.valueInfo.length) + for (var i = 0; i < message.valueInfo.length; ++i) + $root.onnx.ValueInfoProto.encode(message.valueInfo[i], writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); + if (message.quantizationAnnotation != null && message.quantizationAnnotation.length) + for (var i = 0; i < message.quantizationAnnotation.length; ++i) + $root.onnx.TensorAnnotation.encode(message.quantizationAnnotation[i], writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); + if (message.sparseInitializer != null && message.sparseInitializer.length) + for (var i = 0; i < message.sparseInitializer.length; ++i) + $root.onnx.SparseTensorProto.encode(message.sparseInitializer[i], writer.uint32(/* id 15, wireType 2 =*/122).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified GraphProto message, length delimited. Does not implicitly {@link onnx.GraphProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.GraphProto + * @static + * @param {onnx.IGraphProto} message GraphProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GraphProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GraphProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.GraphProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.GraphProto} GraphProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GraphProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.GraphProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.onnx.NodeProto.decode(reader, reader.uint32())); + break; + } + case 2: { + message.name = reader.string(); + break; + } + case 5: { + if (!(message.initializer && message.initializer.length)) + message.initializer = []; + message.initializer.push($root.onnx.TensorProto.decode(reader, reader.uint32())); + break; + } + case 15: { + if (!(message.sparseInitializer && message.sparseInitializer.length)) + message.sparseInitializer = []; + message.sparseInitializer.push($root.onnx.SparseTensorProto.decode(reader, reader.uint32())); + break; + } + case 10: { + message.docString = reader.string(); + break; + } + case 11: { + if (!(message.input && message.input.length)) + message.input = []; + message.input.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + } + case 12: { + if (!(message.output && message.output.length)) + message.output = []; + message.output.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + } + case 13: { + if (!(message.valueInfo && message.valueInfo.length)) + message.valueInfo = []; + message.valueInfo.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + } + case 14: { + if (!(message.quantizationAnnotation && message.quantizationAnnotation.length)) + message.quantizationAnnotation = []; + message.quantizationAnnotation.push($root.onnx.TensorAnnotation.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GraphProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.GraphProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.GraphProto} GraphProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GraphProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GraphProto message. + * @function verify + * @memberof onnx.GraphProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GraphProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.node != null && message.hasOwnProperty("node")) { + if (!Array.isArray(message.node)) + return "node: array expected"; + for (var i = 0; i < message.node.length; ++i) { + var error = $root.onnx.NodeProto.verify(message.node[i]); + if (error) + return "node." + error; + } + } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.initializer != null && message.hasOwnProperty("initializer")) { + if (!Array.isArray(message.initializer)) + return "initializer: array expected"; + for (var i = 0; i < message.initializer.length; ++i) { + var error = $root.onnx.TensorProto.verify(message.initializer[i]); + if (error) + return "initializer." + error; + } + } + if (message.sparseInitializer != null && message.hasOwnProperty("sparseInitializer")) { + if (!Array.isArray(message.sparseInitializer)) + return "sparseInitializer: array expected"; + for (var i = 0; i < message.sparseInitializer.length; ++i) { + var error = $root.onnx.SparseTensorProto.verify(message.sparseInitializer[i]); + if (error) + return "sparseInitializer." + error; + } + } + if (message.docString != null && message.hasOwnProperty("docString")) + if (!$util.isString(message.docString)) + return "docString: string expected"; + if (message.input != null && message.hasOwnProperty("input")) { + if (!Array.isArray(message.input)) + return "input: array expected"; + for (var i = 0; i < message.input.length; ++i) { + var error = $root.onnx.ValueInfoProto.verify(message.input[i]); + if (error) + return "input." + error; + } + } + if (message.output != null && message.hasOwnProperty("output")) { + if (!Array.isArray(message.output)) + return "output: array expected"; + for (var i = 0; i < message.output.length; ++i) { + var error = $root.onnx.ValueInfoProto.verify(message.output[i]); + if (error) + return "output." + error; + } + } + if (message.valueInfo != null && message.hasOwnProperty("valueInfo")) { + if (!Array.isArray(message.valueInfo)) + return "valueInfo: array expected"; + for (var i = 0; i < message.valueInfo.length; ++i) { + var error = $root.onnx.ValueInfoProto.verify(message.valueInfo[i]); + if (error) + return "valueInfo." + error; + } + } + if (message.quantizationAnnotation != null && message.hasOwnProperty("quantizationAnnotation")) { + if (!Array.isArray(message.quantizationAnnotation)) + return "quantizationAnnotation: array expected"; + for (var i = 0; i < message.quantizationAnnotation.length; ++i) { + var error = $root.onnx.TensorAnnotation.verify(message.quantizationAnnotation[i]); + if (error) + return "quantizationAnnotation." + error; + } + } + return null; + }; + + /** + * Creates a GraphProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.GraphProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.GraphProto} GraphProto + */ + GraphProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.GraphProto) + return object; + var message = new $root.onnx.GraphProto(); + if (object.node) { + if (!Array.isArray(object.node)) + throw TypeError(".onnx.GraphProto.node: array expected"); + message.node = []; + for (var i = 0; i < object.node.length; ++i) { + if (typeof object.node[i] !== "object") + throw TypeError(".onnx.GraphProto.node: object expected"); + message.node[i] = $root.onnx.NodeProto.fromObject(object.node[i]); + } + } + if (object.name != null) + message.name = String(object.name); + if (object.initializer) { + if (!Array.isArray(object.initializer)) + throw TypeError(".onnx.GraphProto.initializer: array expected"); + message.initializer = []; + for (var i = 0; i < object.initializer.length; ++i) { + if (typeof object.initializer[i] !== "object") + throw TypeError(".onnx.GraphProto.initializer: object expected"); + message.initializer[i] = $root.onnx.TensorProto.fromObject(object.initializer[i]); + } + } + if (object.sparseInitializer) { + if (!Array.isArray(object.sparseInitializer)) + throw TypeError(".onnx.GraphProto.sparseInitializer: array expected"); + message.sparseInitializer = []; + for (var i = 0; i < object.sparseInitializer.length; ++i) { + if (typeof object.sparseInitializer[i] !== "object") + throw TypeError(".onnx.GraphProto.sparseInitializer: object expected"); + message.sparseInitializer[i] = $root.onnx.SparseTensorProto.fromObject(object.sparseInitializer[i]); + } + } + if (object.docString != null) + message.docString = String(object.docString); + if (object.input) { + if (!Array.isArray(object.input)) + throw TypeError(".onnx.GraphProto.input: array expected"); + message.input = []; + for (var i = 0; i < object.input.length; ++i) { + if (typeof object.input[i] !== "object") + throw TypeError(".onnx.GraphProto.input: object expected"); + message.input[i] = $root.onnx.ValueInfoProto.fromObject(object.input[i]); + } + } + if (object.output) { + if (!Array.isArray(object.output)) + throw TypeError(".onnx.GraphProto.output: array expected"); + message.output = []; + for (var i = 0; i < object.output.length; ++i) { + if (typeof object.output[i] !== "object") + throw TypeError(".onnx.GraphProto.output: object expected"); + message.output[i] = $root.onnx.ValueInfoProto.fromObject(object.output[i]); + } + } + if (object.valueInfo) { + if (!Array.isArray(object.valueInfo)) + throw TypeError(".onnx.GraphProto.valueInfo: array expected"); + message.valueInfo = []; + for (var i = 0; i < object.valueInfo.length; ++i) { + if (typeof object.valueInfo[i] !== "object") + throw TypeError(".onnx.GraphProto.valueInfo: object expected"); + message.valueInfo[i] = $root.onnx.ValueInfoProto.fromObject(object.valueInfo[i]); + } + } + if (object.quantizationAnnotation) { + if (!Array.isArray(object.quantizationAnnotation)) + throw TypeError(".onnx.GraphProto.quantizationAnnotation: array expected"); + message.quantizationAnnotation = []; + for (var i = 0; i < object.quantizationAnnotation.length; ++i) { + if (typeof object.quantizationAnnotation[i] !== "object") + throw TypeError(".onnx.GraphProto.quantizationAnnotation: object expected"); + message.quantizationAnnotation[i] = $root.onnx.TensorAnnotation.fromObject(object.quantizationAnnotation[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a GraphProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.GraphProto + * @static + * @param {onnx.GraphProto} message GraphProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GraphProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.node = []; + object.initializer = []; + object.input = []; + object.output = []; + object.valueInfo = []; + object.quantizationAnnotation = []; + object.sparseInitializer = []; + } + if (options.defaults) { + object.name = ""; + object.docString = ""; + } + if (message.node && message.node.length) { + object.node = []; + for (var j = 0; j < message.node.length; ++j) + object.node[j] = $root.onnx.NodeProto.toObject(message.node[j], options); + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.initializer && message.initializer.length) { + object.initializer = []; + for (var j = 0; j < message.initializer.length; ++j) + object.initializer[j] = $root.onnx.TensorProto.toObject(message.initializer[j], options); + } + if (message.docString != null && message.hasOwnProperty("docString")) + object.docString = message.docString; + if (message.input && message.input.length) { + object.input = []; + for (var j = 0; j < message.input.length; ++j) + object.input[j] = $root.onnx.ValueInfoProto.toObject(message.input[j], options); + } + if (message.output && message.output.length) { + object.output = []; + for (var j = 0; j < message.output.length; ++j) + object.output[j] = $root.onnx.ValueInfoProto.toObject(message.output[j], options); + } + if (message.valueInfo && message.valueInfo.length) { + object.valueInfo = []; + for (var j = 0; j < message.valueInfo.length; ++j) + object.valueInfo[j] = $root.onnx.ValueInfoProto.toObject(message.valueInfo[j], options); + } + if (message.quantizationAnnotation && message.quantizationAnnotation.length) { + object.quantizationAnnotation = []; + for (var j = 0; j < message.quantizationAnnotation.length; ++j) + object.quantizationAnnotation[j] = $root.onnx.TensorAnnotation.toObject(message.quantizationAnnotation[j], options); + } + if (message.sparseInitializer && message.sparseInitializer.length) { + object.sparseInitializer = []; + for (var j = 0; j < message.sparseInitializer.length; ++j) + object.sparseInitializer[j] = $root.onnx.SparseTensorProto.toObject(message.sparseInitializer[j], options); + } + return object; + }; + + /** + * Converts this GraphProto to JSON. + * @function toJSON + * @memberof onnx.GraphProto + * @instance + * @returns {Object.} JSON object + */ + GraphProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GraphProto + * @function getTypeUrl + * @memberof onnx.GraphProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GraphProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.GraphProto"; + }; + + return GraphProto; + })(); + + onnx.TensorProto = (function() { + + /** + * Properties of a TensorProto. + * @memberof onnx + * @interface ITensorProto + * @property {Array.|null} [dims] TensorProto dims + * @property {number|null} [dataType] TensorProto dataType + * @property {onnx.TensorProto.ISegment|null} [segment] TensorProto segment + * @property {Array.|null} [floatData] TensorProto floatData + * @property {Array.|null} [int32Data] TensorProto int32Data + * @property {Array.|null} [stringData] TensorProto stringData + * @property {Array.|null} [int64Data] TensorProto int64Data + * @property {string|null} [name] TensorProto name + * @property {string|null} [docString] TensorProto docString + * @property {Uint8Array|null} [rawData] TensorProto rawData + * @property {Array.|null} [externalData] TensorProto externalData + * @property {onnx.TensorProto.DataLocation|null} [dataLocation] TensorProto dataLocation + * @property {Array.|null} [doubleData] TensorProto doubleData + * @property {Array.|null} [uint64Data] TensorProto uint64Data + */ + + /** + * Constructs a new TensorProto. + * @memberof onnx + * @classdesc Represents a TensorProto. + * @implements ITensorProto + * @constructor + * @param {onnx.ITensorProto=} [properties] Properties to set + */ + function TensorProto(properties) { + this.dims = []; + this.floatData = []; + this.int32Data = []; + this.stringData = []; + this.int64Data = []; + this.externalData = []; + this.doubleData = []; + this.uint64Data = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TensorProto dims. + * @member {Array.} dims + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.dims = $util.emptyArray; + + /** + * TensorProto dataType. + * @member {number} dataType + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.dataType = 0; + + /** + * TensorProto segment. + * @member {onnx.TensorProto.ISegment|null|undefined} segment + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.segment = null; + + /** + * TensorProto floatData. + * @member {Array.} floatData + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.floatData = $util.emptyArray; + + /** + * TensorProto int32Data. + * @member {Array.} int32Data + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.int32Data = $util.emptyArray; + + /** + * TensorProto stringData. + * @member {Array.} stringData + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.stringData = $util.emptyArray; + + /** + * TensorProto int64Data. + * @member {Array.} int64Data + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.int64Data = $util.emptyArray; + + /** + * TensorProto name. + * @member {string} name + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.name = ""; + + /** + * TensorProto docString. + * @member {string} docString + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.docString = ""; + + /** + * TensorProto rawData. + * @member {Uint8Array} rawData + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.rawData = $util.newBuffer([]); + + /** + * TensorProto externalData. + * @member {Array.} externalData + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.externalData = $util.emptyArray; + + /** + * TensorProto dataLocation. + * @member {onnx.TensorProto.DataLocation} dataLocation + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.dataLocation = 0; + + /** + * TensorProto doubleData. + * @member {Array.} doubleData + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.doubleData = $util.emptyArray; + + /** + * TensorProto uint64Data. + * @member {Array.} uint64Data + * @memberof onnx.TensorProto + * @instance + */ + TensorProto.prototype.uint64Data = $util.emptyArray; + + /** + * Creates a new TensorProto instance using the specified properties. + * @function create + * @memberof onnx.TensorProto + * @static + * @param {onnx.ITensorProto=} [properties] Properties to set + * @returns {onnx.TensorProto} TensorProto instance + */ + TensorProto.create = function create(properties) { + return new TensorProto(properties); + }; + + /** + * Encodes the specified TensorProto message. Does not implicitly {@link onnx.TensorProto.verify|verify} messages. + * @function encode + * @memberof onnx.TensorProto + * @static + * @param {onnx.ITensorProto} message TensorProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TensorProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.dims != null && message.dims.length) { + writer.uint32(/* id 1, wireType 2 =*/10).fork(); + for (var i = 0; i < message.dims.length; ++i) + writer.int64(message.dims[i]); + writer.ldelim(); + } + if (message.dataType != null && Object.hasOwnProperty.call(message, "dataType")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.dataType); + if (message.segment != null && Object.hasOwnProperty.call(message, "segment")) + $root.onnx.TensorProto.Segment.encode(message.segment, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.floatData != null && message.floatData.length) { + writer.uint32(/* id 4, wireType 2 =*/34).fork(); + for (var i = 0; i < message.floatData.length; ++i) + writer.float(message.floatData[i]); + writer.ldelim(); + } + if (message.int32Data != null && message.int32Data.length) { + writer.uint32(/* id 5, wireType 2 =*/42).fork(); + for (var i = 0; i < message.int32Data.length; ++i) + writer.int32(message.int32Data[i]); + writer.ldelim(); + } + if (message.stringData != null && message.stringData.length) + for (var i = 0; i < message.stringData.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).bytes(message.stringData[i]); + if (message.int64Data != null && message.int64Data.length) { + writer.uint32(/* id 7, wireType 2 =*/58).fork(); + for (var i = 0; i < message.int64Data.length; ++i) + writer.int64(message.int64Data[i]); + writer.ldelim(); + } + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.name); + if (message.rawData != null && Object.hasOwnProperty.call(message, "rawData")) + writer.uint32(/* id 9, wireType 2 =*/74).bytes(message.rawData); + if (message.doubleData != null && message.doubleData.length) { + writer.uint32(/* id 10, wireType 2 =*/82).fork(); + for (var i = 0; i < message.doubleData.length; ++i) + writer.double(message.doubleData[i]); + writer.ldelim(); + } + if (message.uint64Data != null && message.uint64Data.length) { + writer.uint32(/* id 11, wireType 2 =*/90).fork(); + for (var i = 0; i < message.uint64Data.length; ++i) + writer.uint64(message.uint64Data[i]); + writer.ldelim(); + } + if (message.docString != null && Object.hasOwnProperty.call(message, "docString")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.docString); + if (message.externalData != null && message.externalData.length) + for (var i = 0; i < message.externalData.length; ++i) + $root.onnx.StringStringEntryProto.encode(message.externalData[i], writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); + if (message.dataLocation != null && Object.hasOwnProperty.call(message, "dataLocation")) + writer.uint32(/* id 14, wireType 0 =*/112).int32(message.dataLocation); + return writer; + }; + + /** + * Encodes the specified TensorProto message, length delimited. Does not implicitly {@link onnx.TensorProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TensorProto + * @static + * @param {onnx.ITensorProto} message TensorProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TensorProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TensorProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.TensorProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TensorProto} TensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TensorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + } + case 2: { + message.dataType = reader.int32(); + break; + } + case 3: { + message.segment = $root.onnx.TensorProto.Segment.decode(reader, reader.uint32()); + break; + } + case 4: { + if (!(message.floatData && message.floatData.length)) + message.floatData = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.floatData.push(reader.float()); + } else + message.floatData.push(reader.float()); + break; + } + case 5: { + if (!(message.int32Data && message.int32Data.length)) + message.int32Data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int32Data.push(reader.int32()); + } else + message.int32Data.push(reader.int32()); + break; + } + case 6: { + if (!(message.stringData && message.stringData.length)) + message.stringData = []; + message.stringData.push(reader.bytes()); + break; + } + case 7: { + if (!(message.int64Data && message.int64Data.length)) + message.int64Data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int64Data.push(reader.int64()); + } else + message.int64Data.push(reader.int64()); + break; + } + case 8: { + message.name = reader.string(); + break; + } + case 12: { + message.docString = reader.string(); + break; + } + case 9: { + message.rawData = reader.bytes(); + break; + } + case 13: { + if (!(message.externalData && message.externalData.length)) + message.externalData = []; + message.externalData.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + } + case 14: { + message.dataLocation = reader.int32(); + break; + } + case 10: { + if (!(message.doubleData && message.doubleData.length)) + message.doubleData = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.doubleData.push(reader.double()); + } else + message.doubleData.push(reader.double()); + break; + } + case 11: { + if (!(message.uint64Data && message.uint64Data.length)) + message.uint64Data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.uint64Data.push(reader.uint64()); + } else + message.uint64Data.push(reader.uint64()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TensorProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TensorProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TensorProto} TensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TensorProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TensorProto message. + * @function verify + * @memberof onnx.TensorProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TensorProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.dims != null && message.hasOwnProperty("dims")) { + if (!Array.isArray(message.dims)) + return "dims: array expected"; + for (var i = 0; i < message.dims.length; ++i) + if (!$util.isInteger(message.dims[i]) && !(message.dims[i] && $util.isInteger(message.dims[i].low) && $util.isInteger(message.dims[i].high))) + return "dims: integer|Long[] expected"; + } + if (message.dataType != null && message.hasOwnProperty("dataType")) + if (!$util.isInteger(message.dataType)) + return "dataType: integer expected"; + if (message.segment != null && message.hasOwnProperty("segment")) { + var error = $root.onnx.TensorProto.Segment.verify(message.segment); + if (error) + return "segment." + error; + } + if (message.floatData != null && message.hasOwnProperty("floatData")) { + if (!Array.isArray(message.floatData)) + return "floatData: array expected"; + for (var i = 0; i < message.floatData.length; ++i) + if (typeof message.floatData[i] !== "number") + return "floatData: number[] expected"; + } + if (message.int32Data != null && message.hasOwnProperty("int32Data")) { + if (!Array.isArray(message.int32Data)) + return "int32Data: array expected"; + for (var i = 0; i < message.int32Data.length; ++i) + if (!$util.isInteger(message.int32Data[i])) + return "int32Data: integer[] expected"; + } + if (message.stringData != null && message.hasOwnProperty("stringData")) { + if (!Array.isArray(message.stringData)) + return "stringData: array expected"; + for (var i = 0; i < message.stringData.length; ++i) + if (!(message.stringData[i] && typeof message.stringData[i].length === "number" || $util.isString(message.stringData[i]))) + return "stringData: buffer[] expected"; + } + if (message.int64Data != null && message.hasOwnProperty("int64Data")) { + if (!Array.isArray(message.int64Data)) + return "int64Data: array expected"; + for (var i = 0; i < message.int64Data.length; ++i) + if (!$util.isInteger(message.int64Data[i]) && !(message.int64Data[i] && $util.isInteger(message.int64Data[i].low) && $util.isInteger(message.int64Data[i].high))) + return "int64Data: integer|Long[] expected"; + } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.docString != null && message.hasOwnProperty("docString")) + if (!$util.isString(message.docString)) + return "docString: string expected"; + if (message.rawData != null && message.hasOwnProperty("rawData")) + if (!(message.rawData && typeof message.rawData.length === "number" || $util.isString(message.rawData))) + return "rawData: buffer expected"; + if (message.externalData != null && message.hasOwnProperty("externalData")) { + if (!Array.isArray(message.externalData)) + return "externalData: array expected"; + for (var i = 0; i < message.externalData.length; ++i) { + var error = $root.onnx.StringStringEntryProto.verify(message.externalData[i]); + if (error) + return "externalData." + error; + } + } + if (message.dataLocation != null && message.hasOwnProperty("dataLocation")) + switch (message.dataLocation) { + default: + return "dataLocation: enum value expected"; + case 0: + case 1: + break; + } + if (message.doubleData != null && message.hasOwnProperty("doubleData")) { + if (!Array.isArray(message.doubleData)) + return "doubleData: array expected"; + for (var i = 0; i < message.doubleData.length; ++i) + if (typeof message.doubleData[i] !== "number") + return "doubleData: number[] expected"; + } + if (message.uint64Data != null && message.hasOwnProperty("uint64Data")) { + if (!Array.isArray(message.uint64Data)) + return "uint64Data: array expected"; + for (var i = 0; i < message.uint64Data.length; ++i) + if (!$util.isInteger(message.uint64Data[i]) && !(message.uint64Data[i] && $util.isInteger(message.uint64Data[i].low) && $util.isInteger(message.uint64Data[i].high))) + return "uint64Data: integer|Long[] expected"; + } + return null; + }; + + /** + * Creates a TensorProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TensorProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.TensorProto} TensorProto + */ + TensorProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TensorProto) + return object; + var message = new $root.onnx.TensorProto(); + if (object.dims) { + if (!Array.isArray(object.dims)) + throw TypeError(".onnx.TensorProto.dims: array expected"); + message.dims = []; + for (var i = 0; i < object.dims.length; ++i) + if ($util.Long) + (message.dims[i] = $util.Long.fromValue(object.dims[i])).unsigned = false; + else if (typeof object.dims[i] === "string") + message.dims[i] = parseInt(object.dims[i], 10); + else if (typeof object.dims[i] === "number") + message.dims[i] = object.dims[i]; + else if (typeof object.dims[i] === "object") + message.dims[i] = new $util.LongBits(object.dims[i].low >>> 0, object.dims[i].high >>> 0).toNumber(); + } + if (object.dataType != null) + message.dataType = object.dataType | 0; + if (object.segment != null) { + if (typeof object.segment !== "object") + throw TypeError(".onnx.TensorProto.segment: object expected"); + message.segment = $root.onnx.TensorProto.Segment.fromObject(object.segment); + } + if (object.floatData) { + if (!Array.isArray(object.floatData)) + throw TypeError(".onnx.TensorProto.floatData: array expected"); + message.floatData = []; + for (var i = 0; i < object.floatData.length; ++i) + message.floatData[i] = Number(object.floatData[i]); + } + if (object.int32Data) { + if (!Array.isArray(object.int32Data)) + throw TypeError(".onnx.TensorProto.int32Data: array expected"); + message.int32Data = []; + for (var i = 0; i < object.int32Data.length; ++i) + message.int32Data[i] = object.int32Data[i] | 0; + } + if (object.stringData) { + if (!Array.isArray(object.stringData)) + throw TypeError(".onnx.TensorProto.stringData: array expected"); + message.stringData = []; + for (var i = 0; i < object.stringData.length; ++i) + if (typeof object.stringData[i] === "string") + $util.base64.decode(object.stringData[i], message.stringData[i] = $util.newBuffer($util.base64.length(object.stringData[i])), 0); + else if (object.stringData[i].length >= 0) + message.stringData[i] = object.stringData[i]; + } + if (object.int64Data) { + if (!Array.isArray(object.int64Data)) + throw TypeError(".onnx.TensorProto.int64Data: array expected"); + message.int64Data = []; + for (var i = 0; i < object.int64Data.length; ++i) + if ($util.Long) + (message.int64Data[i] = $util.Long.fromValue(object.int64Data[i])).unsigned = false; + else if (typeof object.int64Data[i] === "string") + message.int64Data[i] = parseInt(object.int64Data[i], 10); + else if (typeof object.int64Data[i] === "number") + message.int64Data[i] = object.int64Data[i]; + else if (typeof object.int64Data[i] === "object") + message.int64Data[i] = new $util.LongBits(object.int64Data[i].low >>> 0, object.int64Data[i].high >>> 0).toNumber(); + } + if (object.name != null) + message.name = String(object.name); + if (object.docString != null) + message.docString = String(object.docString); + if (object.rawData != null) + if (typeof object.rawData === "string") + $util.base64.decode(object.rawData, message.rawData = $util.newBuffer($util.base64.length(object.rawData)), 0); + else if (object.rawData.length >= 0) + message.rawData = object.rawData; + if (object.externalData) { + if (!Array.isArray(object.externalData)) + throw TypeError(".onnx.TensorProto.externalData: array expected"); + message.externalData = []; + for (var i = 0; i < object.externalData.length; ++i) { + if (typeof object.externalData[i] !== "object") + throw TypeError(".onnx.TensorProto.externalData: object expected"); + message.externalData[i] = $root.onnx.StringStringEntryProto.fromObject(object.externalData[i]); + } + } + switch (object.dataLocation) { + default: + if (typeof object.dataLocation === "number") { + message.dataLocation = object.dataLocation; + break; + } + break; + case "DEFAULT": + case 0: + message.dataLocation = 0; + break; + case "EXTERNAL": + case 1: + message.dataLocation = 1; + break; + } + if (object.doubleData) { + if (!Array.isArray(object.doubleData)) + throw TypeError(".onnx.TensorProto.doubleData: array expected"); + message.doubleData = []; + for (var i = 0; i < object.doubleData.length; ++i) + message.doubleData[i] = Number(object.doubleData[i]); + } + if (object.uint64Data) { + if (!Array.isArray(object.uint64Data)) + throw TypeError(".onnx.TensorProto.uint64Data: array expected"); + message.uint64Data = []; + for (var i = 0; i < object.uint64Data.length; ++i) + if ($util.Long) + (message.uint64Data[i] = $util.Long.fromValue(object.uint64Data[i])).unsigned = true; + else if (typeof object.uint64Data[i] === "string") + message.uint64Data[i] = parseInt(object.uint64Data[i], 10); + else if (typeof object.uint64Data[i] === "number") + message.uint64Data[i] = object.uint64Data[i]; + else if (typeof object.uint64Data[i] === "object") + message.uint64Data[i] = new $util.LongBits(object.uint64Data[i].low >>> 0, object.uint64Data[i].high >>> 0).toNumber(true); + } + return message; + }; + + /** + * Creates a plain object from a TensorProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TensorProto + * @static + * @param {onnx.TensorProto} message TensorProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TensorProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.dims = []; + object.floatData = []; + object.int32Data = []; + object.stringData = []; + object.int64Data = []; + object.doubleData = []; + object.uint64Data = []; + object.externalData = []; + } + if (options.defaults) { + object.dataType = 0; + object.segment = null; + object.name = ""; + if (options.bytes === String) + object.rawData = ""; + else { + object.rawData = []; + if (options.bytes !== Array) + object.rawData = $util.newBuffer(object.rawData); + } + object.docString = ""; + object.dataLocation = options.enums === String ? "DEFAULT" : 0; + } + if (message.dims && message.dims.length) { + object.dims = []; + for (var j = 0; j < message.dims.length; ++j) + if (typeof message.dims[j] === "number") + object.dims[j] = options.longs === String ? String(message.dims[j]) : message.dims[j]; + else + object.dims[j] = options.longs === String ? $util.Long.prototype.toString.call(message.dims[j]) : options.longs === Number ? new $util.LongBits(message.dims[j].low >>> 0, message.dims[j].high >>> 0).toNumber() : message.dims[j]; + } + if (message.dataType != null && message.hasOwnProperty("dataType")) + object.dataType = message.dataType; + if (message.segment != null && message.hasOwnProperty("segment")) + object.segment = $root.onnx.TensorProto.Segment.toObject(message.segment, options); + if (message.floatData && message.floatData.length) { + object.floatData = []; + for (var j = 0; j < message.floatData.length; ++j) + object.floatData[j] = options.json && !isFinite(message.floatData[j]) ? String(message.floatData[j]) : message.floatData[j]; + } + if (message.int32Data && message.int32Data.length) { + object.int32Data = []; + for (var j = 0; j < message.int32Data.length; ++j) + object.int32Data[j] = message.int32Data[j]; + } + if (message.stringData && message.stringData.length) { + object.stringData = []; + for (var j = 0; j < message.stringData.length; ++j) + object.stringData[j] = options.bytes === String ? $util.base64.encode(message.stringData[j], 0, message.stringData[j].length) : options.bytes === Array ? Array.prototype.slice.call(message.stringData[j]) : message.stringData[j]; + } + if (message.int64Data && message.int64Data.length) { + object.int64Data = []; + for (var j = 0; j < message.int64Data.length; ++j) + if (typeof message.int64Data[j] === "number") + object.int64Data[j] = options.longs === String ? String(message.int64Data[j]) : message.int64Data[j]; + else + object.int64Data[j] = options.longs === String ? $util.Long.prototype.toString.call(message.int64Data[j]) : options.longs === Number ? new $util.LongBits(message.int64Data[j].low >>> 0, message.int64Data[j].high >>> 0).toNumber() : message.int64Data[j]; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.rawData != null && message.hasOwnProperty("rawData")) + object.rawData = options.bytes === String ? $util.base64.encode(message.rawData, 0, message.rawData.length) : options.bytes === Array ? Array.prototype.slice.call(message.rawData) : message.rawData; + if (message.doubleData && message.doubleData.length) { + object.doubleData = []; + for (var j = 0; j < message.doubleData.length; ++j) + object.doubleData[j] = options.json && !isFinite(message.doubleData[j]) ? String(message.doubleData[j]) : message.doubleData[j]; + } + if (message.uint64Data && message.uint64Data.length) { + object.uint64Data = []; + for (var j = 0; j < message.uint64Data.length; ++j) + if (typeof message.uint64Data[j] === "number") + object.uint64Data[j] = options.longs === String ? String(message.uint64Data[j]) : message.uint64Data[j]; + else + object.uint64Data[j] = options.longs === String ? $util.Long.prototype.toString.call(message.uint64Data[j]) : options.longs === Number ? new $util.LongBits(message.uint64Data[j].low >>> 0, message.uint64Data[j].high >>> 0).toNumber(true) : message.uint64Data[j]; + } + if (message.docString != null && message.hasOwnProperty("docString")) + object.docString = message.docString; + if (message.externalData && message.externalData.length) { + object.externalData = []; + for (var j = 0; j < message.externalData.length; ++j) + object.externalData[j] = $root.onnx.StringStringEntryProto.toObject(message.externalData[j], options); + } + if (message.dataLocation != null && message.hasOwnProperty("dataLocation")) + object.dataLocation = options.enums === String ? $root.onnx.TensorProto.DataLocation[message.dataLocation] === undefined ? message.dataLocation : $root.onnx.TensorProto.DataLocation[message.dataLocation] : message.dataLocation; + return object; + }; + + /** + * Converts this TensorProto to JSON. + * @function toJSON + * @memberof onnx.TensorProto + * @instance + * @returns {Object.} JSON object + */ + TensorProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TensorProto + * @function getTypeUrl + * @memberof onnx.TensorProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TensorProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TensorProto"; + }; + + /** + * DataType enum. + * @name onnx.TensorProto.DataType + * @enum {number} + * @property {number} UNDEFINED=0 UNDEFINED value + * @property {number} FLOAT=1 FLOAT value + * @property {number} UINT8=2 UINT8 value + * @property {number} INT8=3 INT8 value + * @property {number} UINT16=4 UINT16 value + * @property {number} INT16=5 INT16 value + * @property {number} INT32=6 INT32 value + * @property {number} INT64=7 INT64 value + * @property {number} STRING=8 STRING value + * @property {number} BOOL=9 BOOL value + * @property {number} FLOAT16=10 FLOAT16 value + * @property {number} DOUBLE=11 DOUBLE value + * @property {number} UINT32=12 UINT32 value + * @property {number} UINT64=13 UINT64 value + * @property {number} COMPLEX64=14 COMPLEX64 value + * @property {number} COMPLEX128=15 COMPLEX128 value + * @property {number} BFLOAT16=16 BFLOAT16 value + * @property {number} FLOAT8E4M3FN=17 FLOAT8E4M3FN value + * @property {number} FLOAT8E4M3FNUZ=18 FLOAT8E4M3FNUZ value + * @property {number} FLOAT8E5M2=19 FLOAT8E5M2 value + * @property {number} FLOAT8E5M2FNUZ=20 FLOAT8E5M2FNUZ value + */ + TensorProto.DataType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNDEFINED"] = 0; + values[valuesById[1] = "FLOAT"] = 1; + values[valuesById[2] = "UINT8"] = 2; + values[valuesById[3] = "INT8"] = 3; + values[valuesById[4] = "UINT16"] = 4; + values[valuesById[5] = "INT16"] = 5; + values[valuesById[6] = "INT32"] = 6; + values[valuesById[7] = "INT64"] = 7; + values[valuesById[8] = "STRING"] = 8; + values[valuesById[9] = "BOOL"] = 9; + values[valuesById[10] = "FLOAT16"] = 10; + values[valuesById[11] = "DOUBLE"] = 11; + values[valuesById[12] = "UINT32"] = 12; + values[valuesById[13] = "UINT64"] = 13; + values[valuesById[14] = "COMPLEX64"] = 14; + values[valuesById[15] = "COMPLEX128"] = 15; + values[valuesById[16] = "BFLOAT16"] = 16; + values[valuesById[17] = "FLOAT8E4M3FN"] = 17; + values[valuesById[18] = "FLOAT8E4M3FNUZ"] = 18; + values[valuesById[19] = "FLOAT8E5M2"] = 19; + values[valuesById[20] = "FLOAT8E5M2FNUZ"] = 20; + return values; + })(); + + TensorProto.Segment = (function() { + + /** + * Properties of a Segment. + * @memberof onnx.TensorProto + * @interface ISegment + * @property {number|Long|null} [begin] Segment begin + * @property {number|Long|null} [end] Segment end + */ + + /** + * Constructs a new Segment. + * @memberof onnx.TensorProto + * @classdesc Represents a Segment. + * @implements ISegment + * @constructor + * @param {onnx.TensorProto.ISegment=} [properties] Properties to set + */ + function Segment(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Segment begin. + * @member {number|Long} begin + * @memberof onnx.TensorProto.Segment + * @instance + */ + Segment.prototype.begin = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Segment end. + * @member {number|Long} end + * @memberof onnx.TensorProto.Segment + * @instance + */ + Segment.prototype.end = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new Segment instance using the specified properties. + * @function create + * @memberof onnx.TensorProto.Segment + * @static + * @param {onnx.TensorProto.ISegment=} [properties] Properties to set + * @returns {onnx.TensorProto.Segment} Segment instance + */ + Segment.create = function create(properties) { + return new Segment(properties); + }; + + /** + * Encodes the specified Segment message. Does not implicitly {@link onnx.TensorProto.Segment.verify|verify} messages. + * @function encode + * @memberof onnx.TensorProto.Segment + * @static + * @param {onnx.TensorProto.ISegment} message Segment message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Segment.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.begin != null && Object.hasOwnProperty.call(message, "begin")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.begin); + if (message.end != null && Object.hasOwnProperty.call(message, "end")) + writer.uint32(/* id 2, wireType 0 =*/16).int64(message.end); + return writer; + }; + + /** + * Encodes the specified Segment message, length delimited. Does not implicitly {@link onnx.TensorProto.Segment.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TensorProto.Segment + * @static + * @param {onnx.TensorProto.ISegment} message Segment message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Segment.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Segment message from the specified reader or buffer. + * @function decode + * @memberof onnx.TensorProto.Segment + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TensorProto.Segment} Segment + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Segment.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorProto.Segment(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.begin = reader.int64(); + break; + } + case 2: { + message.end = reader.int64(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Segment message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TensorProto.Segment + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TensorProto.Segment} Segment + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Segment.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Segment message. + * @function verify + * @memberof onnx.TensorProto.Segment + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Segment.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.begin != null && message.hasOwnProperty("begin")) + if (!$util.isInteger(message.begin) && !(message.begin && $util.isInteger(message.begin.low) && $util.isInteger(message.begin.high))) + return "begin: integer|Long expected"; + if (message.end != null && message.hasOwnProperty("end")) + if (!$util.isInteger(message.end) && !(message.end && $util.isInteger(message.end.low) && $util.isInteger(message.end.high))) + return "end: integer|Long expected"; + return null; + }; + + /** + * Creates a Segment message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TensorProto.Segment + * @static + * @param {Object.} object Plain object + * @returns {onnx.TensorProto.Segment} Segment + */ + Segment.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TensorProto.Segment) + return object; + var message = new $root.onnx.TensorProto.Segment(); + if (object.begin != null) + if ($util.Long) + (message.begin = $util.Long.fromValue(object.begin)).unsigned = false; + else if (typeof object.begin === "string") + message.begin = parseInt(object.begin, 10); + else if (typeof object.begin === "number") + message.begin = object.begin; + else if (typeof object.begin === "object") + message.begin = new $util.LongBits(object.begin.low >>> 0, object.begin.high >>> 0).toNumber(); + if (object.end != null) + if ($util.Long) + (message.end = $util.Long.fromValue(object.end)).unsigned = false; + else if (typeof object.end === "string") + message.end = parseInt(object.end, 10); + else if (typeof object.end === "number") + message.end = object.end; + else if (typeof object.end === "object") + message.end = new $util.LongBits(object.end.low >>> 0, object.end.high >>> 0).toNumber(); + return message; + }; + + /** + * Creates a plain object from a Segment message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TensorProto.Segment + * @static + * @param {onnx.TensorProto.Segment} message Segment + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Segment.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + if ($util.Long) { + var long = new $util.Long(0, 0, false); + object.begin = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.begin = options.longs === String ? "0" : 0; + if ($util.Long) { + var long = new $util.Long(0, 0, false); + object.end = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.end = options.longs === String ? "0" : 0; + } + if (message.begin != null && message.hasOwnProperty("begin")) + if (typeof message.begin === "number") + object.begin = options.longs === String ? String(message.begin) : message.begin; + else + object.begin = options.longs === String ? $util.Long.prototype.toString.call(message.begin) : options.longs === Number ? new $util.LongBits(message.begin.low >>> 0, message.begin.high >>> 0).toNumber() : message.begin; + if (message.end != null && message.hasOwnProperty("end")) + if (typeof message.end === "number") + object.end = options.longs === String ? String(message.end) : message.end; + else + object.end = options.longs === String ? $util.Long.prototype.toString.call(message.end) : options.longs === Number ? new $util.LongBits(message.end.low >>> 0, message.end.high >>> 0).toNumber() : message.end; + return object; + }; + + /** + * Converts this Segment to JSON. + * @function toJSON + * @memberof onnx.TensorProto.Segment + * @instance + * @returns {Object.} JSON object + */ + Segment.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Segment + * @function getTypeUrl + * @memberof onnx.TensorProto.Segment + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Segment.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TensorProto.Segment"; + }; + + return Segment; + })(); + + /** + * DataLocation enum. + * @name onnx.TensorProto.DataLocation + * @enum {number} + * @property {number} DEFAULT=0 DEFAULT value + * @property {number} EXTERNAL=1 EXTERNAL value + */ + TensorProto.DataLocation = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "EXTERNAL"] = 1; + return values; + })(); + + return TensorProto; + })(); + + onnx.SparseTensorProto = (function() { + + /** + * Properties of a SparseTensorProto. + * @memberof onnx + * @interface ISparseTensorProto + * @property {onnx.ITensorProto|null} [values] SparseTensorProto values + * @property {onnx.ITensorProto|null} [indices] SparseTensorProto indices + * @property {Array.|null} [dims] SparseTensorProto dims + */ + + /** + * Constructs a new SparseTensorProto. + * @memberof onnx + * @classdesc Represents a SparseTensorProto. + * @implements ISparseTensorProto + * @constructor + * @param {onnx.ISparseTensorProto=} [properties] Properties to set + */ + function SparseTensorProto(properties) { + this.dims = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SparseTensorProto values. + * @member {onnx.ITensorProto|null|undefined} values + * @memberof onnx.SparseTensorProto + * @instance + */ + SparseTensorProto.prototype.values = null; + + /** + * SparseTensorProto indices. + * @member {onnx.ITensorProto|null|undefined} indices + * @memberof onnx.SparseTensorProto + * @instance + */ + SparseTensorProto.prototype.indices = null; + + /** + * SparseTensorProto dims. + * @member {Array.} dims + * @memberof onnx.SparseTensorProto + * @instance + */ + SparseTensorProto.prototype.dims = $util.emptyArray; + + /** + * Creates a new SparseTensorProto instance using the specified properties. + * @function create + * @memberof onnx.SparseTensorProto + * @static + * @param {onnx.ISparseTensorProto=} [properties] Properties to set + * @returns {onnx.SparseTensorProto} SparseTensorProto instance + */ + SparseTensorProto.create = function create(properties) { + return new SparseTensorProto(properties); + }; + + /** + * Encodes the specified SparseTensorProto message. Does not implicitly {@link onnx.SparseTensorProto.verify|verify} messages. + * @function encode + * @memberof onnx.SparseTensorProto + * @static + * @param {onnx.ISparseTensorProto} message SparseTensorProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SparseTensorProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.values != null && Object.hasOwnProperty.call(message, "values")) + $root.onnx.TensorProto.encode(message.values, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.indices != null && Object.hasOwnProperty.call(message, "indices")) + $root.onnx.TensorProto.encode(message.indices, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.dims != null && message.dims.length) { + writer.uint32(/* id 3, wireType 2 =*/26).fork(); + for (var i = 0; i < message.dims.length; ++i) + writer.int64(message.dims[i]); + writer.ldelim(); + } + return writer; + }; + + /** + * Encodes the specified SparseTensorProto message, length delimited. Does not implicitly {@link onnx.SparseTensorProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.SparseTensorProto + * @static + * @param {onnx.ISparseTensorProto} message SparseTensorProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SparseTensorProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SparseTensorProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.SparseTensorProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.SparseTensorProto} SparseTensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SparseTensorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.SparseTensorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.values = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + } + case 2: { + message.indices = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + } + case 3: { + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SparseTensorProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.SparseTensorProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.SparseTensorProto} SparseTensorProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SparseTensorProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SparseTensorProto message. + * @function verify + * @memberof onnx.SparseTensorProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SparseTensorProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.values != null && message.hasOwnProperty("values")) { + var error = $root.onnx.TensorProto.verify(message.values); + if (error) + return "values." + error; + } + if (message.indices != null && message.hasOwnProperty("indices")) { + var error = $root.onnx.TensorProto.verify(message.indices); + if (error) + return "indices." + error; + } + if (message.dims != null && message.hasOwnProperty("dims")) { + if (!Array.isArray(message.dims)) + return "dims: array expected"; + for (var i = 0; i < message.dims.length; ++i) + if (!$util.isInteger(message.dims[i]) && !(message.dims[i] && $util.isInteger(message.dims[i].low) && $util.isInteger(message.dims[i].high))) + return "dims: integer|Long[] expected"; + } + return null; + }; + + /** + * Creates a SparseTensorProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.SparseTensorProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.SparseTensorProto} SparseTensorProto + */ + SparseTensorProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.SparseTensorProto) + return object; + var message = new $root.onnx.SparseTensorProto(); + if (object.values != null) { + if (typeof object.values !== "object") + throw TypeError(".onnx.SparseTensorProto.values: object expected"); + message.values = $root.onnx.TensorProto.fromObject(object.values); + } + if (object.indices != null) { + if (typeof object.indices !== "object") + throw TypeError(".onnx.SparseTensorProto.indices: object expected"); + message.indices = $root.onnx.TensorProto.fromObject(object.indices); + } + if (object.dims) { + if (!Array.isArray(object.dims)) + throw TypeError(".onnx.SparseTensorProto.dims: array expected"); + message.dims = []; + for (var i = 0; i < object.dims.length; ++i) + if ($util.Long) + (message.dims[i] = $util.Long.fromValue(object.dims[i])).unsigned = false; + else if (typeof object.dims[i] === "string") + message.dims[i] = parseInt(object.dims[i], 10); + else if (typeof object.dims[i] === "number") + message.dims[i] = object.dims[i]; + else if (typeof object.dims[i] === "object") + message.dims[i] = new $util.LongBits(object.dims[i].low >>> 0, object.dims[i].high >>> 0).toNumber(); + } + return message; + }; + + /** + * Creates a plain object from a SparseTensorProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.SparseTensorProto + * @static + * @param {onnx.SparseTensorProto} message SparseTensorProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SparseTensorProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.dims = []; + if (options.defaults) { + object.values = null; + object.indices = null; + } + if (message.values != null && message.hasOwnProperty("values")) + object.values = $root.onnx.TensorProto.toObject(message.values, options); + if (message.indices != null && message.hasOwnProperty("indices")) + object.indices = $root.onnx.TensorProto.toObject(message.indices, options); + if (message.dims && message.dims.length) { + object.dims = []; + for (var j = 0; j < message.dims.length; ++j) + if (typeof message.dims[j] === "number") + object.dims[j] = options.longs === String ? String(message.dims[j]) : message.dims[j]; + else + object.dims[j] = options.longs === String ? $util.Long.prototype.toString.call(message.dims[j]) : options.longs === Number ? new $util.LongBits(message.dims[j].low >>> 0, message.dims[j].high >>> 0).toNumber() : message.dims[j]; + } + return object; + }; + + /** + * Converts this SparseTensorProto to JSON. + * @function toJSON + * @memberof onnx.SparseTensorProto + * @instance + * @returns {Object.} JSON object + */ + SparseTensorProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SparseTensorProto + * @function getTypeUrl + * @memberof onnx.SparseTensorProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SparseTensorProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.SparseTensorProto"; + }; + + return SparseTensorProto; + })(); + + onnx.TensorShapeProto = (function() { + + /** + * Properties of a TensorShapeProto. + * @memberof onnx + * @interface ITensorShapeProto + * @property {Array.|null} [dim] TensorShapeProto dim + */ + + /** + * Constructs a new TensorShapeProto. + * @memberof onnx + * @classdesc Represents a TensorShapeProto. + * @implements ITensorShapeProto + * @constructor + * @param {onnx.ITensorShapeProto=} [properties] Properties to set + */ + function TensorShapeProto(properties) { + this.dim = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TensorShapeProto dim. + * @member {Array.} dim + * @memberof onnx.TensorShapeProto + * @instance + */ + TensorShapeProto.prototype.dim = $util.emptyArray; + + /** + * Creates a new TensorShapeProto instance using the specified properties. + * @function create + * @memberof onnx.TensorShapeProto + * @static + * @param {onnx.ITensorShapeProto=} [properties] Properties to set + * @returns {onnx.TensorShapeProto} TensorShapeProto instance + */ + TensorShapeProto.create = function create(properties) { + return new TensorShapeProto(properties); + }; + + /** + * Encodes the specified TensorShapeProto message. Does not implicitly {@link onnx.TensorShapeProto.verify|verify} messages. + * @function encode + * @memberof onnx.TensorShapeProto + * @static + * @param {onnx.ITensorShapeProto} message TensorShapeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TensorShapeProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.dim != null && message.dim.length) + for (var i = 0; i < message.dim.length; ++i) + $root.onnx.TensorShapeProto.Dimension.encode(message.dim[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified TensorShapeProto message, length delimited. Does not implicitly {@link onnx.TensorShapeProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TensorShapeProto + * @static + * @param {onnx.ITensorShapeProto} message TensorShapeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TensorShapeProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TensorShapeProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.TensorShapeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TensorShapeProto} TensorShapeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TensorShapeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorShapeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.dim && message.dim.length)) + message.dim = []; + message.dim.push($root.onnx.TensorShapeProto.Dimension.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TensorShapeProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TensorShapeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TensorShapeProto} TensorShapeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TensorShapeProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TensorShapeProto message. + * @function verify + * @memberof onnx.TensorShapeProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TensorShapeProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.dim != null && message.hasOwnProperty("dim")) { + if (!Array.isArray(message.dim)) + return "dim: array expected"; + for (var i = 0; i < message.dim.length; ++i) { + var error = $root.onnx.TensorShapeProto.Dimension.verify(message.dim[i]); + if (error) + return "dim." + error; + } + } + return null; + }; + + /** + * Creates a TensorShapeProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TensorShapeProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.TensorShapeProto} TensorShapeProto + */ + TensorShapeProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TensorShapeProto) + return object; + var message = new $root.onnx.TensorShapeProto(); + if (object.dim) { + if (!Array.isArray(object.dim)) + throw TypeError(".onnx.TensorShapeProto.dim: array expected"); + message.dim = []; + for (var i = 0; i < object.dim.length; ++i) { + if (typeof object.dim[i] !== "object") + throw TypeError(".onnx.TensorShapeProto.dim: object expected"); + message.dim[i] = $root.onnx.TensorShapeProto.Dimension.fromObject(object.dim[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a TensorShapeProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TensorShapeProto + * @static + * @param {onnx.TensorShapeProto} message TensorShapeProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TensorShapeProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.dim = []; + if (message.dim && message.dim.length) { + object.dim = []; + for (var j = 0; j < message.dim.length; ++j) + object.dim[j] = $root.onnx.TensorShapeProto.Dimension.toObject(message.dim[j], options); + } + return object; + }; + + /** + * Converts this TensorShapeProto to JSON. + * @function toJSON + * @memberof onnx.TensorShapeProto + * @instance + * @returns {Object.} JSON object + */ + TensorShapeProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TensorShapeProto + * @function getTypeUrl + * @memberof onnx.TensorShapeProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TensorShapeProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TensorShapeProto"; + }; + + TensorShapeProto.Dimension = (function() { + + /** + * Properties of a Dimension. + * @memberof onnx.TensorShapeProto + * @interface IDimension + * @property {number|Long|null} [dimValue] Dimension dimValue + * @property {string|null} [dimParam] Dimension dimParam + * @property {string|null} [denotation] Dimension denotation + */ + + /** + * Constructs a new Dimension. + * @memberof onnx.TensorShapeProto + * @classdesc Represents a Dimension. + * @implements IDimension + * @constructor + * @param {onnx.TensorShapeProto.IDimension=} [properties] Properties to set + */ + function Dimension(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Dimension dimValue. + * @member {number|Long|null|undefined} dimValue + * @memberof onnx.TensorShapeProto.Dimension + * @instance + */ + Dimension.prototype.dimValue = null; + + /** + * Dimension dimParam. + * @member {string|null|undefined} dimParam + * @memberof onnx.TensorShapeProto.Dimension + * @instance + */ + Dimension.prototype.dimParam = null; + + /** + * Dimension denotation. + * @member {string} denotation + * @memberof onnx.TensorShapeProto.Dimension + * @instance + */ + Dimension.prototype.denotation = ""; + + // OneOf field names bound to virtual getters and setters + var $oneOfFields; + + /** + * Dimension value. + * @member {"dimValue"|"dimParam"|undefined} value + * @memberof onnx.TensorShapeProto.Dimension + * @instance + */ + Object.defineProperty(Dimension.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["dimValue", "dimParam"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Creates a new Dimension instance using the specified properties. + * @function create + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {onnx.TensorShapeProto.IDimension=} [properties] Properties to set + * @returns {onnx.TensorShapeProto.Dimension} Dimension instance + */ + Dimension.create = function create(properties) { + return new Dimension(properties); + }; + + /** + * Encodes the specified Dimension message. Does not implicitly {@link onnx.TensorShapeProto.Dimension.verify|verify} messages. + * @function encode + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {onnx.TensorShapeProto.IDimension} message Dimension message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Dimension.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.dimValue != null && Object.hasOwnProperty.call(message, "dimValue")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.dimValue); + if (message.dimParam != null && Object.hasOwnProperty.call(message, "dimParam")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.dimParam); + if (message.denotation != null && Object.hasOwnProperty.call(message, "denotation")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.denotation); + return writer; + }; + + /** + * Encodes the specified Dimension message, length delimited. Does not implicitly {@link onnx.TensorShapeProto.Dimension.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {onnx.TensorShapeProto.IDimension} message Dimension message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Dimension.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Dimension message from the specified reader or buffer. + * @function decode + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TensorShapeProto.Dimension} Dimension + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Dimension.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorShapeProto.Dimension(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.dimValue = reader.int64(); + break; + } + case 2: { + message.dimParam = reader.string(); + break; + } + case 3: { + message.denotation = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Dimension message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TensorShapeProto.Dimension} Dimension + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Dimension.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Dimension message. + * @function verify + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Dimension.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + var properties = {}; + if (message.dimValue != null && message.hasOwnProperty("dimValue")) { + properties.value = 1; + if (!$util.isInteger(message.dimValue) && !(message.dimValue && $util.isInteger(message.dimValue.low) && $util.isInteger(message.dimValue.high))) + return "dimValue: integer|Long expected"; + } + if (message.dimParam != null && message.hasOwnProperty("dimParam")) { + if (properties.value === 1) + return "value: multiple values"; + properties.value = 1; + if (!$util.isString(message.dimParam)) + return "dimParam: string expected"; + } + if (message.denotation != null && message.hasOwnProperty("denotation")) + if (!$util.isString(message.denotation)) + return "denotation: string expected"; + return null; + }; + + /** + * Creates a Dimension message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {Object.} object Plain object + * @returns {onnx.TensorShapeProto.Dimension} Dimension + */ + Dimension.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TensorShapeProto.Dimension) + return object; + var message = new $root.onnx.TensorShapeProto.Dimension(); + if (object.dimValue != null) + if ($util.Long) + (message.dimValue = $util.Long.fromValue(object.dimValue)).unsigned = false; + else if (typeof object.dimValue === "string") + message.dimValue = parseInt(object.dimValue, 10); + else if (typeof object.dimValue === "number") + message.dimValue = object.dimValue; + else if (typeof object.dimValue === "object") + message.dimValue = new $util.LongBits(object.dimValue.low >>> 0, object.dimValue.high >>> 0).toNumber(); + if (object.dimParam != null) + message.dimParam = String(object.dimParam); + if (object.denotation != null) + message.denotation = String(object.denotation); + return message; + }; + + /** + * Creates a plain object from a Dimension message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {onnx.TensorShapeProto.Dimension} message Dimension + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Dimension.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) + object.denotation = ""; + if (message.dimValue != null && message.hasOwnProperty("dimValue")) { + if (typeof message.dimValue === "number") + object.dimValue = options.longs === String ? String(message.dimValue) : message.dimValue; + else + object.dimValue = options.longs === String ? $util.Long.prototype.toString.call(message.dimValue) : options.longs === Number ? new $util.LongBits(message.dimValue.low >>> 0, message.dimValue.high >>> 0).toNumber() : message.dimValue; + if (options.oneofs) + object.value = "dimValue"; + } + if (message.dimParam != null && message.hasOwnProperty("dimParam")) { + object.dimParam = message.dimParam; + if (options.oneofs) + object.value = "dimParam"; + } + if (message.denotation != null && message.hasOwnProperty("denotation")) + object.denotation = message.denotation; + return object; + }; + + /** + * Converts this Dimension to JSON. + * @function toJSON + * @memberof onnx.TensorShapeProto.Dimension + * @instance + * @returns {Object.} JSON object + */ + Dimension.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Dimension + * @function getTypeUrl + * @memberof onnx.TensorShapeProto.Dimension + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Dimension.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TensorShapeProto.Dimension"; + }; + + return Dimension; + })(); + + return TensorShapeProto; + })(); + + onnx.TypeProto = (function() { + + /** + * Properties of a TypeProto. + * @memberof onnx + * @interface ITypeProto + * @property {onnx.TypeProto.ITensor|null} [tensorType] TypeProto tensorType + * @property {onnx.TypeProto.ISequence|null} [sequenceType] TypeProto sequenceType + * @property {onnx.TypeProto.IMap|null} [mapType] TypeProto mapType + * @property {onnx.TypeProto.IOptional|null} [optionalType] TypeProto optionalType + * @property {onnx.TypeProto.ISparseTensor|null} [sparseTensorType] TypeProto sparseTensorType + * @property {string|null} [denotation] TypeProto denotation + */ + + /** + * Constructs a new TypeProto. + * @memberof onnx + * @classdesc Represents a TypeProto. + * @implements ITypeProto + * @constructor + * @param {onnx.ITypeProto=} [properties] Properties to set + */ + function TypeProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TypeProto tensorType. + * @member {onnx.TypeProto.ITensor|null|undefined} tensorType + * @memberof onnx.TypeProto + * @instance + */ + TypeProto.prototype.tensorType = null; + + /** + * TypeProto sequenceType. + * @member {onnx.TypeProto.ISequence|null|undefined} sequenceType + * @memberof onnx.TypeProto + * @instance + */ + TypeProto.prototype.sequenceType = null; + + /** + * TypeProto mapType. + * @member {onnx.TypeProto.IMap|null|undefined} mapType + * @memberof onnx.TypeProto + * @instance + */ + TypeProto.prototype.mapType = null; + + /** + * TypeProto optionalType. + * @member {onnx.TypeProto.IOptional|null|undefined} optionalType + * @memberof onnx.TypeProto + * @instance + */ + TypeProto.prototype.optionalType = null; + + /** + * TypeProto sparseTensorType. + * @member {onnx.TypeProto.ISparseTensor|null|undefined} sparseTensorType + * @memberof onnx.TypeProto + * @instance + */ + TypeProto.prototype.sparseTensorType = null; + + /** + * TypeProto denotation. + * @member {string} denotation + * @memberof onnx.TypeProto + * @instance + */ + TypeProto.prototype.denotation = ""; + + // OneOf field names bound to virtual getters and setters + var $oneOfFields; + + /** + * TypeProto value. + * @member {"tensorType"|"sequenceType"|"mapType"|"optionalType"|"sparseTensorType"|undefined} value + * @memberof onnx.TypeProto + * @instance + */ + Object.defineProperty(TypeProto.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["tensorType", "sequenceType", "mapType", "optionalType", "sparseTensorType"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Creates a new TypeProto instance using the specified properties. + * @function create + * @memberof onnx.TypeProto + * @static + * @param {onnx.ITypeProto=} [properties] Properties to set + * @returns {onnx.TypeProto} TypeProto instance + */ + TypeProto.create = function create(properties) { + return new TypeProto(properties); + }; + + /** + * Encodes the specified TypeProto message. Does not implicitly {@link onnx.TypeProto.verify|verify} messages. + * @function encode + * @memberof onnx.TypeProto + * @static + * @param {onnx.ITypeProto} message TypeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TypeProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tensorType != null && Object.hasOwnProperty.call(message, "tensorType")) + $root.onnx.TypeProto.Tensor.encode(message.tensorType, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.sequenceType != null && Object.hasOwnProperty.call(message, "sequenceType")) + $root.onnx.TypeProto.Sequence.encode(message.sequenceType, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.mapType != null && Object.hasOwnProperty.call(message, "mapType")) + $root.onnx.TypeProto.Map.encode(message.mapType, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.denotation != null && Object.hasOwnProperty.call(message, "denotation")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.denotation); + if (message.sparseTensorType != null && Object.hasOwnProperty.call(message, "sparseTensorType")) + $root.onnx.TypeProto.SparseTensor.encode(message.sparseTensorType, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + if (message.optionalType != null && Object.hasOwnProperty.call(message, "optionalType")) + $root.onnx.TypeProto.Optional.encode(message.optionalType, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified TypeProto message, length delimited. Does not implicitly {@link onnx.TypeProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TypeProto + * @static + * @param {onnx.ITypeProto} message TypeProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TypeProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TypeProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.TypeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TypeProto} TypeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TypeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tensorType = $root.onnx.TypeProto.Tensor.decode(reader, reader.uint32()); + break; + } + case 4: { + message.sequenceType = $root.onnx.TypeProto.Sequence.decode(reader, reader.uint32()); + break; + } + case 5: { + message.mapType = $root.onnx.TypeProto.Map.decode(reader, reader.uint32()); + break; + } + case 9: { + message.optionalType = $root.onnx.TypeProto.Optional.decode(reader, reader.uint32()); + break; + } + case 8: { + message.sparseTensorType = $root.onnx.TypeProto.SparseTensor.decode(reader, reader.uint32()); + break; + } + case 6: { + message.denotation = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TypeProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TypeProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TypeProto} TypeProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TypeProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TypeProto message. + * @function verify + * @memberof onnx.TypeProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TypeProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + var properties = {}; + if (message.tensorType != null && message.hasOwnProperty("tensorType")) { + properties.value = 1; + { + var error = $root.onnx.TypeProto.Tensor.verify(message.tensorType); + if (error) + return "tensorType." + error; + } + } + if (message.sequenceType != null && message.hasOwnProperty("sequenceType")) { + if (properties.value === 1) + return "value: multiple values"; + properties.value = 1; + { + var error = $root.onnx.TypeProto.Sequence.verify(message.sequenceType); + if (error) + return "sequenceType." + error; + } + } + if (message.mapType != null && message.hasOwnProperty("mapType")) { + if (properties.value === 1) + return "value: multiple values"; + properties.value = 1; + { + var error = $root.onnx.TypeProto.Map.verify(message.mapType); + if (error) + return "mapType." + error; + } + } + if (message.optionalType != null && message.hasOwnProperty("optionalType")) { + if (properties.value === 1) + return "value: multiple values"; + properties.value = 1; + { + var error = $root.onnx.TypeProto.Optional.verify(message.optionalType); + if (error) + return "optionalType." + error; + } + } + if (message.sparseTensorType != null && message.hasOwnProperty("sparseTensorType")) { + if (properties.value === 1) + return "value: multiple values"; + properties.value = 1; + { + var error = $root.onnx.TypeProto.SparseTensor.verify(message.sparseTensorType); + if (error) + return "sparseTensorType." + error; + } + } + if (message.denotation != null && message.hasOwnProperty("denotation")) + if (!$util.isString(message.denotation)) + return "denotation: string expected"; + return null; + }; + + /** + * Creates a TypeProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TypeProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.TypeProto} TypeProto + */ + TypeProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TypeProto) + return object; + var message = new $root.onnx.TypeProto(); + if (object.tensorType != null) { + if (typeof object.tensorType !== "object") + throw TypeError(".onnx.TypeProto.tensorType: object expected"); + message.tensorType = $root.onnx.TypeProto.Tensor.fromObject(object.tensorType); + } + if (object.sequenceType != null) { + if (typeof object.sequenceType !== "object") + throw TypeError(".onnx.TypeProto.sequenceType: object expected"); + message.sequenceType = $root.onnx.TypeProto.Sequence.fromObject(object.sequenceType); + } + if (object.mapType != null) { + if (typeof object.mapType !== "object") + throw TypeError(".onnx.TypeProto.mapType: object expected"); + message.mapType = $root.onnx.TypeProto.Map.fromObject(object.mapType); + } + if (object.optionalType != null) { + if (typeof object.optionalType !== "object") + throw TypeError(".onnx.TypeProto.optionalType: object expected"); + message.optionalType = $root.onnx.TypeProto.Optional.fromObject(object.optionalType); + } + if (object.sparseTensorType != null) { + if (typeof object.sparseTensorType !== "object") + throw TypeError(".onnx.TypeProto.sparseTensorType: object expected"); + message.sparseTensorType = $root.onnx.TypeProto.SparseTensor.fromObject(object.sparseTensorType); + } + if (object.denotation != null) + message.denotation = String(object.denotation); + return message; + }; + + /** + * Creates a plain object from a TypeProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TypeProto + * @static + * @param {onnx.TypeProto} message TypeProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TypeProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) + object.denotation = ""; + if (message.tensorType != null && message.hasOwnProperty("tensorType")) { + object.tensorType = $root.onnx.TypeProto.Tensor.toObject(message.tensorType, options); + if (options.oneofs) + object.value = "tensorType"; + } + if (message.sequenceType != null && message.hasOwnProperty("sequenceType")) { + object.sequenceType = $root.onnx.TypeProto.Sequence.toObject(message.sequenceType, options); + if (options.oneofs) + object.value = "sequenceType"; + } + if (message.mapType != null && message.hasOwnProperty("mapType")) { + object.mapType = $root.onnx.TypeProto.Map.toObject(message.mapType, options); + if (options.oneofs) + object.value = "mapType"; + } + if (message.denotation != null && message.hasOwnProperty("denotation")) + object.denotation = message.denotation; + if (message.sparseTensorType != null && message.hasOwnProperty("sparseTensorType")) { + object.sparseTensorType = $root.onnx.TypeProto.SparseTensor.toObject(message.sparseTensorType, options); + if (options.oneofs) + object.value = "sparseTensorType"; + } + if (message.optionalType != null && message.hasOwnProperty("optionalType")) { + object.optionalType = $root.onnx.TypeProto.Optional.toObject(message.optionalType, options); + if (options.oneofs) + object.value = "optionalType"; + } + return object; + }; + + /** + * Converts this TypeProto to JSON. + * @function toJSON + * @memberof onnx.TypeProto + * @instance + * @returns {Object.} JSON object + */ + TypeProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TypeProto + * @function getTypeUrl + * @memberof onnx.TypeProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TypeProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TypeProto"; + }; + + TypeProto.Tensor = (function() { + + /** + * Properties of a Tensor. + * @memberof onnx.TypeProto + * @interface ITensor + * @property {number|null} [elemType] Tensor elemType + * @property {onnx.ITensorShapeProto|null} [shape] Tensor shape + */ + + /** + * Constructs a new Tensor. + * @memberof onnx.TypeProto + * @classdesc Represents a Tensor. + * @implements ITensor + * @constructor + * @param {onnx.TypeProto.ITensor=} [properties] Properties to set + */ + function Tensor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Tensor elemType. + * @member {number} elemType + * @memberof onnx.TypeProto.Tensor + * @instance + */ + Tensor.prototype.elemType = 0; + + /** + * Tensor shape. + * @member {onnx.ITensorShapeProto|null|undefined} shape + * @memberof onnx.TypeProto.Tensor + * @instance + */ + Tensor.prototype.shape = null; + + /** + * Creates a new Tensor instance using the specified properties. + * @function create + * @memberof onnx.TypeProto.Tensor + * @static + * @param {onnx.TypeProto.ITensor=} [properties] Properties to set + * @returns {onnx.TypeProto.Tensor} Tensor instance + */ + Tensor.create = function create(properties) { + return new Tensor(properties); + }; + + /** + * Encodes the specified Tensor message. Does not implicitly {@link onnx.TypeProto.Tensor.verify|verify} messages. + * @function encode + * @memberof onnx.TypeProto.Tensor + * @static + * @param {onnx.TypeProto.ITensor} message Tensor message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Tensor.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.elemType != null && Object.hasOwnProperty.call(message, "elemType")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.elemType); + if (message.shape != null && Object.hasOwnProperty.call(message, "shape")) + $root.onnx.TensorShapeProto.encode(message.shape, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified Tensor message, length delimited. Does not implicitly {@link onnx.TypeProto.Tensor.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TypeProto.Tensor + * @static + * @param {onnx.TypeProto.ITensor} message Tensor message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Tensor.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Tensor message from the specified reader or buffer. + * @function decode + * @memberof onnx.TypeProto.Tensor + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TypeProto.Tensor} Tensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Tensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Tensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.elemType = reader.int32(); + break; + } + case 2: { + message.shape = $root.onnx.TensorShapeProto.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Tensor message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TypeProto.Tensor + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TypeProto.Tensor} Tensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Tensor.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Tensor message. + * @function verify + * @memberof onnx.TypeProto.Tensor + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Tensor.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.elemType != null && message.hasOwnProperty("elemType")) + if (!$util.isInteger(message.elemType)) + return "elemType: integer expected"; + if (message.shape != null && message.hasOwnProperty("shape")) { + var error = $root.onnx.TensorShapeProto.verify(message.shape); + if (error) + return "shape." + error; + } + return null; + }; + + /** + * Creates a Tensor message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TypeProto.Tensor + * @static + * @param {Object.} object Plain object + * @returns {onnx.TypeProto.Tensor} Tensor + */ + Tensor.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TypeProto.Tensor) + return object; + var message = new $root.onnx.TypeProto.Tensor(); + if (object.elemType != null) + message.elemType = object.elemType | 0; + if (object.shape != null) { + if (typeof object.shape !== "object") + throw TypeError(".onnx.TypeProto.Tensor.shape: object expected"); + message.shape = $root.onnx.TensorShapeProto.fromObject(object.shape); + } + return message; + }; + + /** + * Creates a plain object from a Tensor message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TypeProto.Tensor + * @static + * @param {onnx.TypeProto.Tensor} message Tensor + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Tensor.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.elemType = 0; + object.shape = null; + } + if (message.elemType != null && message.hasOwnProperty("elemType")) + object.elemType = message.elemType; + if (message.shape != null && message.hasOwnProperty("shape")) + object.shape = $root.onnx.TensorShapeProto.toObject(message.shape, options); + return object; + }; + + /** + * Converts this Tensor to JSON. + * @function toJSON + * @memberof onnx.TypeProto.Tensor + * @instance + * @returns {Object.} JSON object + */ + Tensor.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Tensor + * @function getTypeUrl + * @memberof onnx.TypeProto.Tensor + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Tensor.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TypeProto.Tensor"; + }; + + return Tensor; + })(); + + TypeProto.Sequence = (function() { + + /** + * Properties of a Sequence. + * @memberof onnx.TypeProto + * @interface ISequence + * @property {onnx.ITypeProto|null} [elemType] Sequence elemType + */ + + /** + * Constructs a new Sequence. + * @memberof onnx.TypeProto + * @classdesc Represents a Sequence. + * @implements ISequence + * @constructor + * @param {onnx.TypeProto.ISequence=} [properties] Properties to set + */ + function Sequence(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Sequence elemType. + * @member {onnx.ITypeProto|null|undefined} elemType + * @memberof onnx.TypeProto.Sequence + * @instance + */ + Sequence.prototype.elemType = null; + + /** + * Creates a new Sequence instance using the specified properties. + * @function create + * @memberof onnx.TypeProto.Sequence + * @static + * @param {onnx.TypeProto.ISequence=} [properties] Properties to set + * @returns {onnx.TypeProto.Sequence} Sequence instance + */ + Sequence.create = function create(properties) { + return new Sequence(properties); + }; + + /** + * Encodes the specified Sequence message. Does not implicitly {@link onnx.TypeProto.Sequence.verify|verify} messages. + * @function encode + * @memberof onnx.TypeProto.Sequence + * @static + * @param {onnx.TypeProto.ISequence} message Sequence message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Sequence.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.elemType != null && Object.hasOwnProperty.call(message, "elemType")) + $root.onnx.TypeProto.encode(message.elemType, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified Sequence message, length delimited. Does not implicitly {@link onnx.TypeProto.Sequence.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TypeProto.Sequence + * @static + * @param {onnx.TypeProto.ISequence} message Sequence message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Sequence.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Sequence message from the specified reader or buffer. + * @function decode + * @memberof onnx.TypeProto.Sequence + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TypeProto.Sequence} Sequence + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Sequence.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Sequence(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.elemType = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Sequence message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TypeProto.Sequence + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TypeProto.Sequence} Sequence + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Sequence.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Sequence message. + * @function verify + * @memberof onnx.TypeProto.Sequence + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Sequence.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.elemType != null && message.hasOwnProperty("elemType")) { + var error = $root.onnx.TypeProto.verify(message.elemType); + if (error) + return "elemType." + error; + } + return null; + }; + + /** + * Creates a Sequence message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TypeProto.Sequence + * @static + * @param {Object.} object Plain object + * @returns {onnx.TypeProto.Sequence} Sequence + */ + Sequence.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TypeProto.Sequence) + return object; + var message = new $root.onnx.TypeProto.Sequence(); + if (object.elemType != null) { + if (typeof object.elemType !== "object") + throw TypeError(".onnx.TypeProto.Sequence.elemType: object expected"); + message.elemType = $root.onnx.TypeProto.fromObject(object.elemType); + } + return message; + }; + + /** + * Creates a plain object from a Sequence message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TypeProto.Sequence + * @static + * @param {onnx.TypeProto.Sequence} message Sequence + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Sequence.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) + object.elemType = null; + if (message.elemType != null && message.hasOwnProperty("elemType")) + object.elemType = $root.onnx.TypeProto.toObject(message.elemType, options); + return object; + }; + + /** + * Converts this Sequence to JSON. + * @function toJSON + * @memberof onnx.TypeProto.Sequence + * @instance + * @returns {Object.} JSON object + */ + Sequence.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Sequence + * @function getTypeUrl + * @memberof onnx.TypeProto.Sequence + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Sequence.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TypeProto.Sequence"; + }; + + return Sequence; + })(); + + TypeProto.Map = (function() { + + /** + * Properties of a Map. + * @memberof onnx.TypeProto + * @interface IMap + * @property {number|null} [keyType] Map keyType + * @property {onnx.ITypeProto|null} [valueType] Map valueType + */ + + /** + * Constructs a new Map. + * @memberof onnx.TypeProto + * @classdesc Represents a Map. + * @implements IMap + * @constructor + * @param {onnx.TypeProto.IMap=} [properties] Properties to set + */ + function Map(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Map keyType. + * @member {number} keyType + * @memberof onnx.TypeProto.Map + * @instance + */ + Map.prototype.keyType = 0; + + /** + * Map valueType. + * @member {onnx.ITypeProto|null|undefined} valueType + * @memberof onnx.TypeProto.Map + * @instance + */ + Map.prototype.valueType = null; + + /** + * Creates a new Map instance using the specified properties. + * @function create + * @memberof onnx.TypeProto.Map + * @static + * @param {onnx.TypeProto.IMap=} [properties] Properties to set + * @returns {onnx.TypeProto.Map} Map instance + */ + Map.create = function create(properties) { + return new Map(properties); + }; + + /** + * Encodes the specified Map message. Does not implicitly {@link onnx.TypeProto.Map.verify|verify} messages. + * @function encode + * @memberof onnx.TypeProto.Map + * @static + * @param {onnx.TypeProto.IMap} message Map message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Map.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyType != null && Object.hasOwnProperty.call(message, "keyType")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.keyType); + if (message.valueType != null && Object.hasOwnProperty.call(message, "valueType")) + $root.onnx.TypeProto.encode(message.valueType, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified Map message, length delimited. Does not implicitly {@link onnx.TypeProto.Map.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TypeProto.Map + * @static + * @param {onnx.TypeProto.IMap} message Map message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Map.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Map message from the specified reader or buffer. + * @function decode + * @memberof onnx.TypeProto.Map + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TypeProto.Map} Map + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Map.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Map(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyType = reader.int32(); + break; + } + case 2: { + message.valueType = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Map message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TypeProto.Map + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TypeProto.Map} Map + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Map.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Map message. + * @function verify + * @memberof onnx.TypeProto.Map + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Map.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyType != null && message.hasOwnProperty("keyType")) + if (!$util.isInteger(message.keyType)) + return "keyType: integer expected"; + if (message.valueType != null && message.hasOwnProperty("valueType")) { + var error = $root.onnx.TypeProto.verify(message.valueType); + if (error) + return "valueType." + error; + } + return null; + }; + + /** + * Creates a Map message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TypeProto.Map + * @static + * @param {Object.} object Plain object + * @returns {onnx.TypeProto.Map} Map + */ + Map.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TypeProto.Map) + return object; + var message = new $root.onnx.TypeProto.Map(); + if (object.keyType != null) + message.keyType = object.keyType | 0; + if (object.valueType != null) { + if (typeof object.valueType !== "object") + throw TypeError(".onnx.TypeProto.Map.valueType: object expected"); + message.valueType = $root.onnx.TypeProto.fromObject(object.valueType); + } + return message; + }; + + /** + * Creates a plain object from a Map message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TypeProto.Map + * @static + * @param {onnx.TypeProto.Map} message Map + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Map.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.keyType = 0; + object.valueType = null; + } + if (message.keyType != null && message.hasOwnProperty("keyType")) + object.keyType = message.keyType; + if (message.valueType != null && message.hasOwnProperty("valueType")) + object.valueType = $root.onnx.TypeProto.toObject(message.valueType, options); + return object; + }; + + /** + * Converts this Map to JSON. + * @function toJSON + * @memberof onnx.TypeProto.Map + * @instance + * @returns {Object.} JSON object + */ + Map.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Map + * @function getTypeUrl + * @memberof onnx.TypeProto.Map + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Map.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TypeProto.Map"; + }; + + return Map; + })(); + + TypeProto.Optional = (function() { + + /** + * Properties of an Optional. + * @memberof onnx.TypeProto + * @interface IOptional + * @property {onnx.ITypeProto|null} [elemType] Optional elemType + */ + + /** + * Constructs a new Optional. + * @memberof onnx.TypeProto + * @classdesc Represents an Optional. + * @implements IOptional + * @constructor + * @param {onnx.TypeProto.IOptional=} [properties] Properties to set + */ + function Optional(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Optional elemType. + * @member {onnx.ITypeProto|null|undefined} elemType + * @memberof onnx.TypeProto.Optional + * @instance + */ + Optional.prototype.elemType = null; + + /** + * Creates a new Optional instance using the specified properties. + * @function create + * @memberof onnx.TypeProto.Optional + * @static + * @param {onnx.TypeProto.IOptional=} [properties] Properties to set + * @returns {onnx.TypeProto.Optional} Optional instance + */ + Optional.create = function create(properties) { + return new Optional(properties); + }; + + /** + * Encodes the specified Optional message. Does not implicitly {@link onnx.TypeProto.Optional.verify|verify} messages. + * @function encode + * @memberof onnx.TypeProto.Optional + * @static + * @param {onnx.TypeProto.IOptional} message Optional message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Optional.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.elemType != null && Object.hasOwnProperty.call(message, "elemType")) + $root.onnx.TypeProto.encode(message.elemType, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified Optional message, length delimited. Does not implicitly {@link onnx.TypeProto.Optional.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TypeProto.Optional + * @static + * @param {onnx.TypeProto.IOptional} message Optional message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Optional.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an Optional message from the specified reader or buffer. + * @function decode + * @memberof onnx.TypeProto.Optional + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TypeProto.Optional} Optional + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Optional.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Optional(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.elemType = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an Optional message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TypeProto.Optional + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TypeProto.Optional} Optional + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Optional.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an Optional message. + * @function verify + * @memberof onnx.TypeProto.Optional + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Optional.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.elemType != null && message.hasOwnProperty("elemType")) { + var error = $root.onnx.TypeProto.verify(message.elemType); + if (error) + return "elemType." + error; + } + return null; + }; + + /** + * Creates an Optional message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TypeProto.Optional + * @static + * @param {Object.} object Plain object + * @returns {onnx.TypeProto.Optional} Optional + */ + Optional.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TypeProto.Optional) + return object; + var message = new $root.onnx.TypeProto.Optional(); + if (object.elemType != null) { + if (typeof object.elemType !== "object") + throw TypeError(".onnx.TypeProto.Optional.elemType: object expected"); + message.elemType = $root.onnx.TypeProto.fromObject(object.elemType); + } + return message; + }; + + /** + * Creates a plain object from an Optional message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TypeProto.Optional + * @static + * @param {onnx.TypeProto.Optional} message Optional + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Optional.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) + object.elemType = null; + if (message.elemType != null && message.hasOwnProperty("elemType")) + object.elemType = $root.onnx.TypeProto.toObject(message.elemType, options); + return object; + }; + + /** + * Converts this Optional to JSON. + * @function toJSON + * @memberof onnx.TypeProto.Optional + * @instance + * @returns {Object.} JSON object + */ + Optional.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Optional + * @function getTypeUrl + * @memberof onnx.TypeProto.Optional + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Optional.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TypeProto.Optional"; + }; + + return Optional; + })(); + + TypeProto.SparseTensor = (function() { + + /** + * Properties of a SparseTensor. + * @memberof onnx.TypeProto + * @interface ISparseTensor + * @property {number|null} [elemType] SparseTensor elemType + * @property {onnx.ITensorShapeProto|null} [shape] SparseTensor shape + */ + + /** + * Constructs a new SparseTensor. + * @memberof onnx.TypeProto + * @classdesc Represents a SparseTensor. + * @implements ISparseTensor + * @constructor + * @param {onnx.TypeProto.ISparseTensor=} [properties] Properties to set + */ + function SparseTensor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SparseTensor elemType. + * @member {number} elemType + * @memberof onnx.TypeProto.SparseTensor + * @instance + */ + SparseTensor.prototype.elemType = 0; + + /** + * SparseTensor shape. + * @member {onnx.ITensorShapeProto|null|undefined} shape + * @memberof onnx.TypeProto.SparseTensor + * @instance + */ + SparseTensor.prototype.shape = null; + + /** + * Creates a new SparseTensor instance using the specified properties. + * @function create + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {onnx.TypeProto.ISparseTensor=} [properties] Properties to set + * @returns {onnx.TypeProto.SparseTensor} SparseTensor instance + */ + SparseTensor.create = function create(properties) { + return new SparseTensor(properties); + }; + + /** + * Encodes the specified SparseTensor message. Does not implicitly {@link onnx.TypeProto.SparseTensor.verify|verify} messages. + * @function encode + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {onnx.TypeProto.ISparseTensor} message SparseTensor message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SparseTensor.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.elemType != null && Object.hasOwnProperty.call(message, "elemType")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.elemType); + if (message.shape != null && Object.hasOwnProperty.call(message, "shape")) + $root.onnx.TensorShapeProto.encode(message.shape, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified SparseTensor message, length delimited. Does not implicitly {@link onnx.TypeProto.SparseTensor.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {onnx.TypeProto.ISparseTensor} message SparseTensor message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SparseTensor.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SparseTensor message from the specified reader or buffer. + * @function decode + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.TypeProto.SparseTensor} SparseTensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SparseTensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.SparseTensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.elemType = reader.int32(); + break; + } + case 2: { + message.shape = $root.onnx.TensorShapeProto.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SparseTensor message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.TypeProto.SparseTensor} SparseTensor + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SparseTensor.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SparseTensor message. + * @function verify + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SparseTensor.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.elemType != null && message.hasOwnProperty("elemType")) + if (!$util.isInteger(message.elemType)) + return "elemType: integer expected"; + if (message.shape != null && message.hasOwnProperty("shape")) { + var error = $root.onnx.TensorShapeProto.verify(message.shape); + if (error) + return "shape." + error; + } + return null; + }; + + /** + * Creates a SparseTensor message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {Object.} object Plain object + * @returns {onnx.TypeProto.SparseTensor} SparseTensor + */ + SparseTensor.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.TypeProto.SparseTensor) + return object; + var message = new $root.onnx.TypeProto.SparseTensor(); + if (object.elemType != null) + message.elemType = object.elemType | 0; + if (object.shape != null) { + if (typeof object.shape !== "object") + throw TypeError(".onnx.TypeProto.SparseTensor.shape: object expected"); + message.shape = $root.onnx.TensorShapeProto.fromObject(object.shape); + } + return message; + }; + + /** + * Creates a plain object from a SparseTensor message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {onnx.TypeProto.SparseTensor} message SparseTensor + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SparseTensor.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.elemType = 0; + object.shape = null; + } + if (message.elemType != null && message.hasOwnProperty("elemType")) + object.elemType = message.elemType; + if (message.shape != null && message.hasOwnProperty("shape")) + object.shape = $root.onnx.TensorShapeProto.toObject(message.shape, options); + return object; + }; + + /** + * Converts this SparseTensor to JSON. + * @function toJSON + * @memberof onnx.TypeProto.SparseTensor + * @instance + * @returns {Object.} JSON object + */ + SparseTensor.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SparseTensor + * @function getTypeUrl + * @memberof onnx.TypeProto.SparseTensor + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SparseTensor.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.TypeProto.SparseTensor"; + }; + + return SparseTensor; + })(); + + return TypeProto; + })(); + + onnx.OperatorSetIdProto = (function() { + + /** + * Properties of an OperatorSetIdProto. + * @memberof onnx + * @interface IOperatorSetIdProto + * @property {string|null} [domain] OperatorSetIdProto domain + * @property {number|Long|null} [version] OperatorSetIdProto version + */ + + /** + * Constructs a new OperatorSetIdProto. + * @memberof onnx + * @classdesc Represents an OperatorSetIdProto. + * @implements IOperatorSetIdProto + * @constructor + * @param {onnx.IOperatorSetIdProto=} [properties] Properties to set + */ + function OperatorSetIdProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * OperatorSetIdProto domain. + * @member {string} domain + * @memberof onnx.OperatorSetIdProto + * @instance + */ + OperatorSetIdProto.prototype.domain = ""; + + /** + * OperatorSetIdProto version. + * @member {number|Long} version + * @memberof onnx.OperatorSetIdProto + * @instance + */ + OperatorSetIdProto.prototype.version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new OperatorSetIdProto instance using the specified properties. + * @function create + * @memberof onnx.OperatorSetIdProto + * @static + * @param {onnx.IOperatorSetIdProto=} [properties] Properties to set + * @returns {onnx.OperatorSetIdProto} OperatorSetIdProto instance + */ + OperatorSetIdProto.create = function create(properties) { + return new OperatorSetIdProto(properties); + }; + + /** + * Encodes the specified OperatorSetIdProto message. Does not implicitly {@link onnx.OperatorSetIdProto.verify|verify} messages. + * @function encode + * @memberof onnx.OperatorSetIdProto + * @static + * @param {onnx.IOperatorSetIdProto} message OperatorSetIdProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + OperatorSetIdProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.domain != null && Object.hasOwnProperty.call(message, "domain")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.domain); + if (message.version != null && Object.hasOwnProperty.call(message, "version")) + writer.uint32(/* id 2, wireType 0 =*/16).int64(message.version); + return writer; + }; + + /** + * Encodes the specified OperatorSetIdProto message, length delimited. Does not implicitly {@link onnx.OperatorSetIdProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.OperatorSetIdProto + * @static + * @param {onnx.IOperatorSetIdProto} message OperatorSetIdProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + OperatorSetIdProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an OperatorSetIdProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.OperatorSetIdProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.OperatorSetIdProto} OperatorSetIdProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + OperatorSetIdProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.OperatorSetIdProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.domain = reader.string(); + break; + } + case 2: { + message.version = reader.int64(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an OperatorSetIdProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.OperatorSetIdProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.OperatorSetIdProto} OperatorSetIdProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + OperatorSetIdProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an OperatorSetIdProto message. + * @function verify + * @memberof onnx.OperatorSetIdProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + OperatorSetIdProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.domain != null && message.hasOwnProperty("domain")) + if (!$util.isString(message.domain)) + return "domain: string expected"; + if (message.version != null && message.hasOwnProperty("version")) + if (!$util.isInteger(message.version) && !(message.version && $util.isInteger(message.version.low) && $util.isInteger(message.version.high))) + return "version: integer|Long expected"; + return null; + }; + + /** + * Creates an OperatorSetIdProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.OperatorSetIdProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.OperatorSetIdProto} OperatorSetIdProto + */ + OperatorSetIdProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.OperatorSetIdProto) + return object; + var message = new $root.onnx.OperatorSetIdProto(); + if (object.domain != null) + message.domain = String(object.domain); + if (object.version != null) + if ($util.Long) + (message.version = $util.Long.fromValue(object.version)).unsigned = false; + else if (typeof object.version === "string") + message.version = parseInt(object.version, 10); + else if (typeof object.version === "number") + message.version = object.version; + else if (typeof object.version === "object") + message.version = new $util.LongBits(object.version.low >>> 0, object.version.high >>> 0).toNumber(); + return message; + }; + + /** + * Creates a plain object from an OperatorSetIdProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.OperatorSetIdProto + * @static + * @param {onnx.OperatorSetIdProto} message OperatorSetIdProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + OperatorSetIdProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.domain = ""; + if ($util.Long) { + var long = new $util.Long(0, 0, false); + object.version = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.version = options.longs === String ? "0" : 0; + } + if (message.domain != null && message.hasOwnProperty("domain")) + object.domain = message.domain; + if (message.version != null && message.hasOwnProperty("version")) + if (typeof message.version === "number") + object.version = options.longs === String ? String(message.version) : message.version; + else + object.version = options.longs === String ? $util.Long.prototype.toString.call(message.version) : options.longs === Number ? new $util.LongBits(message.version.low >>> 0, message.version.high >>> 0).toNumber() : message.version; + return object; + }; + + /** + * Converts this OperatorSetIdProto to JSON. + * @function toJSON + * @memberof onnx.OperatorSetIdProto + * @instance + * @returns {Object.} JSON object + */ + OperatorSetIdProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for OperatorSetIdProto + * @function getTypeUrl + * @memberof onnx.OperatorSetIdProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + OperatorSetIdProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.OperatorSetIdProto"; + }; + + return OperatorSetIdProto; + })(); + + /** + * OperatorStatus enum. + * @name onnx.OperatorStatus + * @enum {number} + * @property {number} EXPERIMENTAL=0 EXPERIMENTAL value + * @property {number} STABLE=1 STABLE value + */ + onnx.OperatorStatus = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "EXPERIMENTAL"] = 0; + values[valuesById[1] = "STABLE"] = 1; + return values; + })(); + + onnx.FunctionProto = (function() { + + /** + * Properties of a FunctionProto. + * @memberof onnx + * @interface IFunctionProto + * @property {string|null} [name] FunctionProto name + * @property {Array.|null} [input] FunctionProto input + * @property {Array.|null} [output] FunctionProto output + * @property {Array.|null} [attribute] FunctionProto attribute + * @property {Array.|null} [attributeProto] FunctionProto attributeProto + * @property {Array.|null} [node] FunctionProto node + * @property {string|null} [docString] FunctionProto docString + * @property {Array.|null} [opsetImport] FunctionProto opsetImport + * @property {string|null} [domain] FunctionProto domain + */ + + /** + * Constructs a new FunctionProto. + * @memberof onnx + * @classdesc Represents a FunctionProto. + * @implements IFunctionProto + * @constructor + * @param {onnx.IFunctionProto=} [properties] Properties to set + */ + function FunctionProto(properties) { + this.input = []; + this.output = []; + this.attribute = []; + this.attributeProto = []; + this.node = []; + this.opsetImport = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * FunctionProto name. + * @member {string} name + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.name = ""; + + /** + * FunctionProto input. + * @member {Array.} input + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.input = $util.emptyArray; + + /** + * FunctionProto output. + * @member {Array.} output + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.output = $util.emptyArray; + + /** + * FunctionProto attribute. + * @member {Array.} attribute + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.attribute = $util.emptyArray; + + /** + * FunctionProto attributeProto. + * @member {Array.} attributeProto + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.attributeProto = $util.emptyArray; + + /** + * FunctionProto node. + * @member {Array.} node + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.node = $util.emptyArray; + + /** + * FunctionProto docString. + * @member {string} docString + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.docString = ""; + + /** + * FunctionProto opsetImport. + * @member {Array.} opsetImport + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.opsetImport = $util.emptyArray; + + /** + * FunctionProto domain. + * @member {string} domain + * @memberof onnx.FunctionProto + * @instance + */ + FunctionProto.prototype.domain = ""; + + /** + * Creates a new FunctionProto instance using the specified properties. + * @function create + * @memberof onnx.FunctionProto + * @static + * @param {onnx.IFunctionProto=} [properties] Properties to set + * @returns {onnx.FunctionProto} FunctionProto instance + */ + FunctionProto.create = function create(properties) { + return new FunctionProto(properties); + }; + + /** + * Encodes the specified FunctionProto message. Does not implicitly {@link onnx.FunctionProto.verify|verify} messages. + * @function encode + * @memberof onnx.FunctionProto + * @static + * @param {onnx.IFunctionProto} message FunctionProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FunctionProto.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.input != null && message.input.length) + for (var i = 0; i < message.input.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.input[i]); + if (message.output != null && message.output.length) + for (var i = 0; i < message.output.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.output[i]); + if (message.attribute != null && message.attribute.length) + for (var i = 0; i < message.attribute.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.attribute[i]); + if (message.node != null && message.node.length) + for (var i = 0; i < message.node.length; ++i) + $root.onnx.NodeProto.encode(message.node[i], writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + if (message.docString != null && Object.hasOwnProperty.call(message, "docString")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.docString); + if (message.opsetImport != null && message.opsetImport.length) + for (var i = 0; i < message.opsetImport.length; ++i) + $root.onnx.OperatorSetIdProto.encode(message.opsetImport[i], writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.domain != null && Object.hasOwnProperty.call(message, "domain")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.domain); + if (message.attributeProto != null && message.attributeProto.length) + for (var i = 0; i < message.attributeProto.length; ++i) + $root.onnx.AttributeProto.encode(message.attributeProto[i], writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified FunctionProto message, length delimited. Does not implicitly {@link onnx.FunctionProto.verify|verify} messages. + * @function encodeDelimited + * @memberof onnx.FunctionProto + * @static + * @param {onnx.IFunctionProto} message FunctionProto message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FunctionProto.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a FunctionProto message from the specified reader or buffer. + * @function decode + * @memberof onnx.FunctionProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {onnx.FunctionProto} FunctionProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FunctionProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.FunctionProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 4: { + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + } + case 5: { + if (!(message.output && message.output.length)) + message.output = []; + message.output.push(reader.string()); + break; + } + case 6: { + if (!(message.attribute && message.attribute.length)) + message.attribute = []; + message.attribute.push(reader.string()); + break; + } + case 11: { + if (!(message.attributeProto && message.attributeProto.length)) + message.attributeProto = []; + message.attributeProto.push($root.onnx.AttributeProto.decode(reader, reader.uint32())); + break; + } + case 7: { + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.onnx.NodeProto.decode(reader, reader.uint32())); + break; + } + case 8: { + message.docString = reader.string(); + break; + } + case 9: { + if (!(message.opsetImport && message.opsetImport.length)) + message.opsetImport = []; + message.opsetImport.push($root.onnx.OperatorSetIdProto.decode(reader, reader.uint32())); + break; + } + case 10: { + message.domain = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a FunctionProto message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof onnx.FunctionProto + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {onnx.FunctionProto} FunctionProto + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FunctionProto.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a FunctionProto message. + * @function verify + * @memberof onnx.FunctionProto + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + FunctionProto.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.input != null && message.hasOwnProperty("input")) { + if (!Array.isArray(message.input)) + return "input: array expected"; + for (var i = 0; i < message.input.length; ++i) + if (!$util.isString(message.input[i])) + return "input: string[] expected"; + } + if (message.output != null && message.hasOwnProperty("output")) { + if (!Array.isArray(message.output)) + return "output: array expected"; + for (var i = 0; i < message.output.length; ++i) + if (!$util.isString(message.output[i])) + return "output: string[] expected"; + } + if (message.attribute != null && message.hasOwnProperty("attribute")) { + if (!Array.isArray(message.attribute)) + return "attribute: array expected"; + for (var i = 0; i < message.attribute.length; ++i) + if (!$util.isString(message.attribute[i])) + return "attribute: string[] expected"; + } + if (message.attributeProto != null && message.hasOwnProperty("attributeProto")) { + if (!Array.isArray(message.attributeProto)) + return "attributeProto: array expected"; + for (var i = 0; i < message.attributeProto.length; ++i) { + var error = $root.onnx.AttributeProto.verify(message.attributeProto[i]); + if (error) + return "attributeProto." + error; + } + } + if (message.node != null && message.hasOwnProperty("node")) { + if (!Array.isArray(message.node)) + return "node: array expected"; + for (var i = 0; i < message.node.length; ++i) { + var error = $root.onnx.NodeProto.verify(message.node[i]); + if (error) + return "node." + error; + } + } + if (message.docString != null && message.hasOwnProperty("docString")) + if (!$util.isString(message.docString)) + return "docString: string expected"; + if (message.opsetImport != null && message.hasOwnProperty("opsetImport")) { + if (!Array.isArray(message.opsetImport)) + return "opsetImport: array expected"; + for (var i = 0; i < message.opsetImport.length; ++i) { + var error = $root.onnx.OperatorSetIdProto.verify(message.opsetImport[i]); + if (error) + return "opsetImport." + error; + } + } + if (message.domain != null && message.hasOwnProperty("domain")) + if (!$util.isString(message.domain)) + return "domain: string expected"; + return null; + }; + + /** + * Creates a FunctionProto message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof onnx.FunctionProto + * @static + * @param {Object.} object Plain object + * @returns {onnx.FunctionProto} FunctionProto + */ + FunctionProto.fromObject = function fromObject(object) { + if (object instanceof $root.onnx.FunctionProto) + return object; + var message = new $root.onnx.FunctionProto(); + if (object.name != null) + message.name = String(object.name); + if (object.input) { + if (!Array.isArray(object.input)) + throw TypeError(".onnx.FunctionProto.input: array expected"); + message.input = []; + for (var i = 0; i < object.input.length; ++i) + message.input[i] = String(object.input[i]); + } + if (object.output) { + if (!Array.isArray(object.output)) + throw TypeError(".onnx.FunctionProto.output: array expected"); + message.output = []; + for (var i = 0; i < object.output.length; ++i) + message.output[i] = String(object.output[i]); + } + if (object.attribute) { + if (!Array.isArray(object.attribute)) + throw TypeError(".onnx.FunctionProto.attribute: array expected"); + message.attribute = []; + for (var i = 0; i < object.attribute.length; ++i) + message.attribute[i] = String(object.attribute[i]); + } + if (object.attributeProto) { + if (!Array.isArray(object.attributeProto)) + throw TypeError(".onnx.FunctionProto.attributeProto: array expected"); + message.attributeProto = []; + for (var i = 0; i < object.attributeProto.length; ++i) { + if (typeof object.attributeProto[i] !== "object") + throw TypeError(".onnx.FunctionProto.attributeProto: object expected"); + message.attributeProto[i] = $root.onnx.AttributeProto.fromObject(object.attributeProto[i]); + } + } + if (object.node) { + if (!Array.isArray(object.node)) + throw TypeError(".onnx.FunctionProto.node: array expected"); + message.node = []; + for (var i = 0; i < object.node.length; ++i) { + if (typeof object.node[i] !== "object") + throw TypeError(".onnx.FunctionProto.node: object expected"); + message.node[i] = $root.onnx.NodeProto.fromObject(object.node[i]); + } + } + if (object.docString != null) + message.docString = String(object.docString); + if (object.opsetImport) { + if (!Array.isArray(object.opsetImport)) + throw TypeError(".onnx.FunctionProto.opsetImport: array expected"); + message.opsetImport = []; + for (var i = 0; i < object.opsetImport.length; ++i) { + if (typeof object.opsetImport[i] !== "object") + throw TypeError(".onnx.FunctionProto.opsetImport: object expected"); + message.opsetImport[i] = $root.onnx.OperatorSetIdProto.fromObject(object.opsetImport[i]); + } + } + if (object.domain != null) + message.domain = String(object.domain); + return message; + }; + + /** + * Creates a plain object from a FunctionProto message. Also converts values to other types if specified. + * @function toObject + * @memberof onnx.FunctionProto + * @static + * @param {onnx.FunctionProto} message FunctionProto + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + FunctionProto.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.input = []; + object.output = []; + object.attribute = []; + object.node = []; + object.opsetImport = []; + object.attributeProto = []; + } + if (options.defaults) { + object.name = ""; + object.docString = ""; + object.domain = ""; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.input && message.input.length) { + object.input = []; + for (var j = 0; j < message.input.length; ++j) + object.input[j] = message.input[j]; + } + if (message.output && message.output.length) { + object.output = []; + for (var j = 0; j < message.output.length; ++j) + object.output[j] = message.output[j]; + } + if (message.attribute && message.attribute.length) { + object.attribute = []; + for (var j = 0; j < message.attribute.length; ++j) + object.attribute[j] = message.attribute[j]; + } + if (message.node && message.node.length) { + object.node = []; + for (var j = 0; j < message.node.length; ++j) + object.node[j] = $root.onnx.NodeProto.toObject(message.node[j], options); + } + if (message.docString != null && message.hasOwnProperty("docString")) + object.docString = message.docString; + if (message.opsetImport && message.opsetImport.length) { + object.opsetImport = []; + for (var j = 0; j < message.opsetImport.length; ++j) + object.opsetImport[j] = $root.onnx.OperatorSetIdProto.toObject(message.opsetImport[j], options); + } + if (message.domain != null && message.hasOwnProperty("domain")) + object.domain = message.domain; + if (message.attributeProto && message.attributeProto.length) { + object.attributeProto = []; + for (var j = 0; j < message.attributeProto.length; ++j) + object.attributeProto[j] = $root.onnx.AttributeProto.toObject(message.attributeProto[j], options); + } + return object; + }; + + /** + * Converts this FunctionProto to JSON. + * @function toJSON + * @memberof onnx.FunctionProto + * @instance + * @returns {Object.} JSON object + */ + FunctionProto.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for FunctionProto + * @function getTypeUrl + * @memberof onnx.FunctionProto + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + FunctionProto.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/onnx.FunctionProto"; + }; + + return FunctionProto; + })(); + + return onnx; +})(); + +module.exports = $root; diff --git a/js/node/test/test-utils.ts b/js/node/test/test-utils.ts index 968e8a1881810..3eef90356a335 100644 --- a/js/node/test/test-utils.ts +++ b/js/node/test/test-utils.ts @@ -4,10 +4,11 @@ import assert from 'assert'; import * as fs from 'fs-extra'; import {jsonc} from 'jsonc'; -import * as onnx_proto from 'onnx-proto'; import {InferenceSession, Tensor} from 'onnxruntime-common'; import * as path from 'path'; +import * as onnx_proto from './ort-schema/protobuf/onnx'; + export const TEST_ROOT = __dirname; export const TEST_DATA_ROOT = path.join(TEST_ROOT, 'testdata'); diff --git a/js/package-lock.json b/js/package-lock.json index c87a58a3196d6..c16a8b59a3a6f 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -3391,9 +3391,9 @@ } }, "node_modules/normalize-package-data/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "dev": true, "bin": { "semver": "bin/semver" @@ -7011,9 +7011,9 @@ }, "dependencies": { "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "dev": true } } From 9364c05170d78c4516886dc91ec86afdce06ad6d Mon Sep 17 00:00:00 2001 From: Changming Sun Date: Fri, 17 Nov 2023 22:49:03 -0800 Subject: [PATCH 32/38] Update web-ci.yml: remove depth=1 (#18500) ### Description It causes our "NPM Packaging Pipeline" to fail. ### Motivation and Context --- tools/ci_build/github/azure-pipelines/templates/web-ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/ci_build/github/azure-pipelines/templates/web-ci.yml b/tools/ci_build/github/azure-pipelines/templates/web-ci.yml index c649883ea0d8b..9982b36509b68 100644 --- a/tools/ci_build/github/azure-pipelines/templates/web-ci.yml +++ b/tools/ci_build/github/azure-pipelines/templates/web-ci.yml @@ -65,7 +65,6 @@ stages: clean: all steps: - checkout: self - fetchDepth: 1 submodules: false - script: | git submodule sync -- cmake/external/onnx From 53917a33536ab8873264c55c9cac8d91d5a8d040 Mon Sep 17 00:00:00 2001 From: RandySheriffH <48490400+RandySheriffH@users.noreply.github.com> Date: Sat, 18 Nov 2023 15:00:54 -0800 Subject: [PATCH 33/38] Move up members in Lite Custom Op hierarchy for possible memleaks. (#18478) Move data member in LiteOpFunc to its parent to avoid possible mem leaks. --------- Co-authored-by: Randy Shuai --- .../core/session/onnxruntime_lite_custom_op.h | 47 ++++++++++++------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h b/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h index 443710884743a..0c0af16d4e20c 100644 --- a/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h +++ b/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h @@ -399,6 +399,15 @@ struct TensorArray : public ArgBase { using Variadic = TensorArray; +/* +Note: +OrtLiteCustomOp inherits from OrtCustomOp to bridge tween a custom func/struct and ort core. +The lifetime of an OrtLiteCustomOp instance is managed by customer code, not ort, so: +1. DO NOT cast OrtLiteCustomOp to OrtCustomOp and release since there is no virtual destructor in the hierachy. +2. OrtLiteCustomFunc and OrtLiteCustomStruct, as two sub-structs, can be released in form of OrtLiteCustomOp since all members are kept in the OrtLiteCustomOp, + hence memory could still be recycled properly. +Further, OrtCustomOp is a c struct bearing no v-table, so offspring structs are by design to be of zero virtual functions to maintain cast safety. +*/ struct OrtLiteCustomOp : public OrtCustomOp { using ConstOptionalFloatTensor = std::optional&>; using OptionalFloatTensor = std::optional>; @@ -774,10 +783,13 @@ struct OrtLiteCustomOp : public OrtCustomOp { OrtLiteCustomOp(const char* op_name, const char* execution_provider, - int start_ver = 1, int end_ver = MAX_CUSTOM_OP_END_VER) : op_name_(op_name), - execution_provider_(execution_provider), - start_ver_(start_ver), - end_ver_(end_ver) { + ShapeInferFn shape_infer_fn, + int start_ver = 1, + int end_ver = MAX_CUSTOM_OP_END_VER) : op_name_(op_name), + execution_provider_(execution_provider), + shape_infer_fn_(shape_infer_fn), + start_ver_(start_ver), + end_ver_(end_ver) { OrtCustomOp::version = ORT_API_VERSION; OrtCustomOp::GetName = [](const OrtCustomOp* op) { return static_cast(op)->op_name_.c_str(); }; @@ -858,8 +870,13 @@ struct OrtLiteCustomOp : public OrtCustomOp { std::vector input_types_; std::vector output_types_; + ShapeInferFn shape_infer_fn_ = {}; + int start_ver_ = 1; int end_ver_ = MAX_CUSTOM_OP_END_VER; + + void* compute_fn_ = {}; + void* compute_fn_return_status_ = {}; }; //////////////////////////// OrtLiteCustomFunc //////////////////////////////// @@ -891,9 +908,8 @@ struct OrtLiteCustomFunc : public OrtLiteCustomOp { ComputeFn compute_fn, ShapeInferFn shape_infer_fn = {}, int start_ver = 1, - int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, start_ver, end_ver), - compute_fn_(compute_fn), - shape_infer_fn_(shape_infer_fn) { + int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, shape_infer_fn, start_ver, end_ver) { + compute_fn_ = reinterpret_cast(compute_fn); ParseArgs(input_types_, output_types_); OrtCustomOp::KernelCompute = [](void* op_kernel, OrtKernelContext* context) { @@ -905,7 +921,8 @@ struct OrtLiteCustomFunc : public OrtLiteCustomOp { OrtCustomOp::CreateKernel = [](const OrtCustomOp* this_, const OrtApi* ort_api, const OrtKernelInfo* info) { auto kernel = std::make_unique(); - kernel->compute_fn_ = static_cast(this_)->compute_fn_; + auto me = static_cast(this_); + kernel->compute_fn_ = reinterpret_cast(me->compute_fn_); Ort::ThrowOnError(ort_api->KernelInfo_GetInputCount(info, &kernel->num_input_)); Ort::ThrowOnError(ort_api->KernelInfo_GetOutputCount(info, &kernel->num_output_)); auto self = static_cast(this_); @@ -931,9 +948,8 @@ struct OrtLiteCustomFunc : public OrtLiteCustomOp { ComputeFnReturnStatus compute_fn_return_status, ShapeInferFn shape_infer_fn = {}, int start_ver = 1, - int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, start_ver, end_ver), - compute_fn_return_status_(compute_fn_return_status), - shape_infer_fn_(shape_infer_fn) { + int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, shape_infer_fn, start_ver, end_ver) { + compute_fn_return_status_ = reinterpret_cast(compute_fn_return_status); ParseArgs(input_types_, output_types_); OrtCustomOp::KernelComputeV2 = [](void* op_kernel, OrtKernelContext* context) -> OrtStatusPtr { @@ -945,7 +961,8 @@ struct OrtLiteCustomFunc : public OrtLiteCustomOp { OrtCustomOp::CreateKernel = [](const OrtCustomOp* this_, const OrtApi* ort_api, const OrtKernelInfo* info) { auto kernel = std::make_unique(); - kernel->compute_fn_return_status_ = static_cast(this_)->compute_fn_return_status_; + auto me = static_cast(this_); + kernel->compute_fn_return_status_ = reinterpret_cast(me->compute_fn_return_status_); Ort::ThrowOnError(ort_api->KernelInfo_GetInputCount(info, &kernel->num_input_)); Ort::ThrowOnError(ort_api->KernelInfo_GetOutputCount(info, &kernel->num_output_)); auto self = static_cast(this_); @@ -965,10 +982,6 @@ struct OrtLiteCustomFunc : public OrtLiteCustomOp { }; } } - - ComputeFn compute_fn_ = {}; - ComputeFnReturnStatus compute_fn_return_status_ = {}; - ShapeInferFn shape_infer_fn_ = {}; }; // struct OrtLiteCustomFunc /////////////////////////// OrtLiteCustomStruct /////////////////////////// @@ -1007,7 +1020,7 @@ struct OrtLiteCustomStruct : public OrtLiteCustomOp { OrtLiteCustomStruct(const char* op_name, const char* execution_provider, int start_ver = 1, - int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, start_ver, end_ver) { + int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, {}, start_ver, end_ver) { SetCompute(&CustomOp::Compute); OrtCustomOp::CreateKernel = [](const OrtCustomOp* this_, const OrtApi* ort_api, const OrtKernelInfo* info) { From 97cc40d75a50e4c10c3f9232bb52fb76db5a7f9b Mon Sep 17 00:00:00 2001 From: Akshay Sonawane <111780983+apsonawane@users.noreply.github.com> Date: Sat, 18 Nov 2023 23:39:04 -0800 Subject: [PATCH 34/38] Add fusion patterns for conformer-transducer model (#18461) ### Description Add conformer-transducer model type to optimizer. This PR adds pattern matches for attention shown below: Unfused attention: ![ct_unfused](https://github.com/microsoft/onnxruntime/assets/111780983/46c71ed8-67e0-4607-85b1-bcadba5a2956) Fused attention: ![ct_fused](https://github.com/microsoft/onnxruntime/assets/111780983/fbb91c96-0d4b-4f0b-8674-1ae3b9b9a92e) --- cmake/onnxruntime_python.cmake | 7 + .../tools/transformers/fusion_attention.py | 8 +- .../fusion_conformer_attention.py | 143 +++++ .../transformers/onnx_model_conformer.py | 33 ++ .../python/tools/transformers/optimizer.py | 2 + .../transformers/conformer_model_generator.py | 543 ++++++++++++++++++ .../python/transformers/test_conformer.py | 69 +++ .../conformer/conformer_self_mha_fused.onnx | Bin 0 -> 4212207 bytes 8 files changed, 802 insertions(+), 3 deletions(-) create mode 100644 onnxruntime/python/tools/transformers/fusion_conformer_attention.py create mode 100644 onnxruntime/python/tools/transformers/onnx_model_conformer.py create mode 100644 onnxruntime/test/python/transformers/conformer_model_generator.py create mode 100644 onnxruntime/test/python/transformers/test_conformer.py create mode 100644 onnxruntime/test/python/transformers/test_data/models/conformer/conformer_self_mha_fused.onnx diff --git a/cmake/onnxruntime_python.cmake b/cmake/onnxruntime_python.cmake index cdfb2139730ad..345ef2b504aa4 100644 --- a/cmake/onnxruntime_python.cmake +++ b/cmake/onnxruntime_python.cmake @@ -436,6 +436,9 @@ if (onnxruntime_BUILD_UNIT_TESTS) file(GLOB onnxruntime_python_transformers_testdata_whisper CONFIGURE_DEPENDS "${ONNXRUNTIME_ROOT}/test/python/transformers/test_data/models/whisper/*.onnx" ) + file(GLOB onnxruntime_python_transformers_testdata_conformer CONFIGURE_DEPENDS + "${ONNXRUNTIME_ROOT}/test/python/transformers/test_data/models/conformer/*.onnx" + ) endif() file(GLOB onnxruntime_python_tools_srcs CONFIGURE_DEPENDS @@ -549,6 +552,7 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E make_directory $/transformers/test_data/models COMMAND ${CMAKE_COMMAND} -E make_directory $/transformers/test_data/models/whisper COMMAND ${CMAKE_COMMAND} -E make_directory $/eager_test + COMMAND ${CMAKE_COMMAND} -E make_directory $/transformers/test_data/models/conformer COMMAND ${CMAKE_COMMAND} -E copy ${ONNXRUNTIME_ROOT}/__init__.py $/onnxruntime/ @@ -701,6 +705,9 @@ if (onnxruntime_BUILD_UNIT_TESTS) COMMAND ${CMAKE_COMMAND} -E copy ${onnxruntime_python_transformers_testdata_whisper} $/transformers/test_data/models/whisper/ + COMMAND ${CMAKE_COMMAND} -E copy + ${onnxruntime_python_transformers_testdata_conformer} + $/transformers/test_data/models/conformer/ ) endif() diff --git a/onnxruntime/python/tools/transformers/fusion_attention.py b/onnxruntime/python/tools/transformers/fusion_attention.py index c1b241aa1a5ec..d11cb91d98b0c 100644 --- a/onnxruntime/python/tools/transformers/fusion_attention.py +++ b/onnxruntime/python/tools/transformers/fusion_attention.py @@ -657,7 +657,6 @@ def create_multihead_attention_node( return None graph_input_names = set([node.name for node in self.model.graph().input]) - graph_output_names = set([node.name for node in self.model.graph().output]) mha_node_name = self.model.create_node_name("Attention") # Add initial Q/K/V inputs for MHA @@ -693,12 +692,15 @@ def create_multihead_attention_node( mha_inputs.append("") # Add optional inputs for MHA - if past_k and past_v and past_k in graph_input_names and past_v in graph_input_names: + + if past_k and past_v: mha_inputs.extend([key_padding_mask, add_qk, past_k, past_v]) + elif key_padding_mask or add_qk: + mha_inputs.extend([key_padding_mask, add_qk]) # Add outputs for MHA mha_outputs = [output] - if present_k and present_v and present_k in graph_output_names and present_v in graph_output_names: + if present_k and present_v: mha_outputs.extend([present_k, present_v]) mha_node = helper.make_node( diff --git a/onnxruntime/python/tools/transformers/fusion_conformer_attention.py b/onnxruntime/python/tools/transformers/fusion_conformer_attention.py new file mode 100644 index 0000000000000..6bc681c57444e --- /dev/null +++ b/onnxruntime/python/tools/transformers/fusion_conformer_attention.py @@ -0,0 +1,143 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +import logging + +from fusion_attention import AttentionMask, FusionAttention +from onnx_model import OnnxModel + +logger = logging.getLogger(__name__) + + +class FusionConformerAttention(FusionAttention): + """ + Fuse Conformer Attention subgraph into one MultiHeadAttention node. + """ + + def __init__( + self, + model: OnnxModel, + hidden_size: int, + num_heads: int, + attention_mask: AttentionMask, + ): + super().__init__(model, hidden_size, num_heads, attention_mask) + + def fuse(self, normalize_node, input_name_to_nodes, output_name_to_node): + # SkipLayerNormalization has two inputs, and one of them is the root input for attention. + qkv_nodes = self.model.match_parent_path( + normalize_node, + ["Add", "MatMul", "Reshape", "Transpose", "MatMul"], + [1, 1, 0, 0, 0], + ) + if qkv_nodes is not None: + ( + _, + _, + reshape_qkv, + transpose_qkv, + matmul_qkv, + ) = qkv_nodes + else: + logger.debug("fuse_conformer_attention: failed to match qkv path") + return + + v_nodes = self.model.match_parent_path( + matmul_qkv, + ["Concat", "Transpose", "Reshape", "Add", "MatMul"], + [1, 1, 0, 0, 1], + ) + + add_v = None + if v_nodes is not None: + (concat_v, _, _, add_v, matmul_v) = v_nodes + concat_parent = self.model.get_parent(concat_v, 0, None) + present_v = concat_v.output[0] + past_v = concat_parent.output[0] + else: + logger.debug("fuse_conformer_attention: failed to match v path") + return + + qk_nodes = self.model.match_parent_path(matmul_qkv, ["Softmax", "Add", "MatMul"], [0, 0, 0]) + + if qk_nodes is not None: + _, add_qk, matmul_qk = qk_nodes + else: + logger.debug("fuse_conformer_attention: failed to match qk path") + return + + q_nodes = self.model.match_parent_path( + matmul_qk, + ["Div", "Transpose", "Reshape", "Add", "MatMul"], + [0, 0, 0, 0, 1], + ) + if q_nodes is not None: + _, _, reshape_q, add_q, matmul_q = q_nodes + else: + logger.debug("fuse_conformer_attention: failed to match q path") + return + + k_nodes = self.model.match_parent_path( + matmul_qk, + ["Transpose", "Concat", "Transpose", "Reshape", "Add", "MatMul"], + [1, 0, 1, 0, 0, 1], + ) + + matmul_k = None + if k_nodes is not None: + _, concat_k, _, _, add_k, matmul_k = k_nodes + concat_parent = self.model.get_parent(concat_k, 0, None) + past_k = concat_parent.output[0] + present_k = concat_k.output[0] + else: + logger.debug("fuse_conformer_attention: failed to match k path") + return + + attention_last_node = reshape_qkv + num_heads, hidden_size = self.get_num_heads_and_hidden_size(reshape_q) + + if num_heads <= 0 or hidden_size <= 0 or (hidden_size % num_heads) != 0: + logger.debug("fuse_conformer_attention: failed to detect num_heads or hidden_size") + return + + new_node = self.create_multihead_attention_node( + matmul_q, + matmul_k, + matmul_v, + add_q, + add_k, + add_v, + num_heads, + hidden_size, + attention_last_node.output[0], + add_qk=add_qk.input[1], + past_k=past_k, + past_v=past_v, + present_k=present_k, + present_v=present_v, + ) + + if new_node is None: + logger.debug("fuse_conformer_attention: MultiHeadAttention node creation failed") + return + + self.nodes_to_add.append(new_node) + self.node_name_to_graph_name[new_node.name] = self.this_graph_name + + self.nodes_to_remove.extend([attention_last_node, transpose_qkv, matmul_qkv]) + self.nodes_to_remove.extend(qk_nodes) + + # When using multihead attention, keep MatMul nodes in original graph + if q_nodes[-1].op_type == "MatMul": + q_nodes.pop() + if k_nodes[-1].op_type == "MatMul": + k_nodes.pop() + if v_nodes[-1].op_type == "MatMul": + v_nodes.pop() + + self.nodes_to_remove.extend(k_nodes) + self.nodes_to_remove.extend(v_nodes) + + # Use prune graph to remove mask nodes since they are shared by all attention nodes. + self.prune_graph = True diff --git a/onnxruntime/python/tools/transformers/onnx_model_conformer.py b/onnxruntime/python/tools/transformers/onnx_model_conformer.py new file mode 100644 index 0000000000000..1506d85f53fd4 --- /dev/null +++ b/onnxruntime/python/tools/transformers/onnx_model_conformer.py @@ -0,0 +1,33 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +import logging +from typing import Optional + +from fusion_attention import AttentionMask +from fusion_conformer_attention import FusionConformerAttention +from fusion_options import FusionOptions +from onnx_model_bert import BertOnnxModel + +logger = logging.getLogger(__name__) + + +class ConformerOnnxModel(BertOnnxModel): + def __init__(self, model, num_heads, hidden_size): + super().__init__(model, num_heads, hidden_size) + self.attention_mask = AttentionMask(self) + self.attention_fusion = FusionConformerAttention(self, self.hidden_size, self.num_heads, self.attention_mask) + + def optimize(self, options: Optional[FusionOptions] = None, add_dynamic_axes: bool = False): + self.attention_fusion.use_multi_head_attention = False if options is None else options.use_multi_head_attention + self.attention_fusion.disable_multi_head_attention_bias = ( + False if options is None else options.disable_multi_head_attention_bias + ) + super().optimize(options, add_dynamic_axes) + + def fuse_attention(self): + self.attention_fusion.apply() + + def preprocess(self): + self.adjust_reshape_and_expand() diff --git a/onnxruntime/python/tools/transformers/optimizer.py b/onnxruntime/python/tools/transformers/optimizer.py index 94a757320e598..6842a97fe0c77 100644 --- a/onnxruntime/python/tools/transformers/optimizer.py +++ b/onnxruntime/python/tools/transformers/optimizer.py @@ -32,6 +32,7 @@ from onnx_model_bert_keras import BertOnnxModelKeras from onnx_model_bert_tf import BertOnnxModelTF from onnx_model_clip import ClipOnnxModel +from onnx_model_conformer import ConformerOnnxModel from onnx_model_gpt2 import Gpt2OnnxModel from onnx_model_t5 import T5OnnxModel from onnx_model_tnlr import TnlrOnnxModel @@ -56,6 +57,7 @@ "unet": (UnetOnnxModel, "pytorch", 1), # UNet in Stable Diffusion "vae": (VaeOnnxModel, "pytorch", 1), # UAE in Stable Diffusion "vit": (BertOnnxModel, "pytorch", 1), + "conformer": (ConformerOnnxModel, "pytorch", 1), } diff --git a/onnxruntime/test/python/transformers/conformer_model_generator.py b/onnxruntime/test/python/transformers/conformer_model_generator.py new file mode 100644 index 0000000000000..71e4f2b63cf4f --- /dev/null +++ b/onnxruntime/test/python/transformers/conformer_model_generator.py @@ -0,0 +1,543 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import List + +import numpy as np +import onnx +from bert_model_generator import float_tensor +from onnx import TensorProto, helper, numpy_helper + + +# Adapted from bert_model_generator.py +def get_tensor_and_weight(name: str, shape: List[int], random=False, zeros=False): + low = 0.0 + high = 1.0 + total_elements = 1 + for x in shape: + total_elements *= x + weights = ( + [np.random.uniform(low, high) for _ in range(total_elements)] + if random + else [0.0] * total_elements + if zeros + else [1.0] * total_elements + ) + return helper.make_tensor(name, TensorProto.FLOAT, shape, weights), weights + + +def create_conformer_attention( + hidden_size=512, + num_heads=8, + epsilon=0.000009999999747378752, + add_before_layernorm=False, + fused=False, +): + # Get head size and ensure head size is an integer + assert hidden_size % num_heads == 0 + head_size = hidden_size // num_heads + + # Construct input and output nodes + inputs = [ + helper.make_tensor_value_info("input_0", TensorProto.FLOAT, ["batch_size", 8, 512]), + helper.make_tensor_value_info("input_1", TensorProto.FLOAT, ["batch_size", 8, 512]), + helper.make_tensor_value_info("inp_cache_k", TensorProto.FLOAT, [24, "batch_size", 8, 72, head_size]), + helper.make_tensor_value_info("inp_cache_v", TensorProto.FLOAT, [24, "batch_size", 8, 72, head_size]), + ] + outputs = [ + helper.make_tensor_value_info("output_0", TensorProto.FLOAT, ["batch_size", 8, hidden_size]), + helper.make_tensor_value_info("output_1", TensorProto.FLOAT, ["batch_size", 8, 512]), + helper.make_tensor_value_info("oup_cache_k", TensorProto.FLOAT, ["batch_size", 8, 80, 64]), + helper.make_tensor_value_info("oup_cache_v", TensorProto.FLOAT, ["batch_size", 8, 80, 64]), + ] + nodes = [] + + # Create layernorm (Add + LayerNorm or SkipLayerNorm) + if add_before_layernorm: + nodes.extend( + [ + helper.make_node( + "Add", ["input_0", "input_1"], ["layernorm_output_to_skiplayernorm"], "add_before_layernorm" + ), + helper.make_node( + "LayerNormalization", + ["layernorm_output_to_skiplayernorm", "layernorm_weight", "layernorm_bias"], + ["layernorm_add_output_to_matmul"], + "layernorm", + epsilon=epsilon, + ), + ] + ) + else: + nodes.append( + helper.make_node( + "SkipLayerNormalization", + ["input_0", "input_1", "layernorm_weight", "layernorm_bias"], + ["layernorm_add_output_to_matmul", "", "", "layernorm_add_output_to_skiplayernorm"], + "skiplayernorm", + domain="com.microsoft", + epsilon=epsilon, + ) + ) + + if fused: + fused_q_nodes = [ + helper.make_node( + "MatMul", + ["layernorm_add_output_to_matmul", "q_weight"], + ["q_matmul_output"], + "q_path_matmul", + ), + helper.make_node("Add", ["q_bias", "q_matmul_output"], ["q_add_output"], "q_path_add"), + helper.make_node( + "Reshape", ["q_add_output", "k_attn_heads_output"], ["q_4d_bsnh"], "q_reshape_to_4d", allowzero=0 + ), + helper.make_node("Transpose", ["q_4d_bsnh"], ["q_4d_bnsh"], "q_transpose_to_bnsh", perm=[0, 2, 1, 3]), + helper.make_node( + "Div", + ["q_4d_bnsh", "q_scale"], + ["q_div_output"], + "q_div_by_sqrt_head_size", + ), + ] + nodes.extend(fused_q_nodes) + nodes.extend( + [ + helper.make_node( + "MatMul", + ["layernorm_add_output_to_matmul", "k_weight"], + ["k_matmul_output"], + "k_path_matmul", + ), + helper.make_node( + "MatMul", + ["layernorm_add_output_to_matmul", "v_weight"], + ["v_matmul_output"], + "v_path_matmul", + ), + helper.make_node( + "Reshape", ["q_div_output", "position_embed_output"], ["reshape_pos_emb"], "r_pos_emb", allowzero=0 + ), + helper.make_node( + "Transpose", ["reshape_pos_emb"], ["transpose_reshape_pos_emb"], "p_transpose", perm=[1, 0, 2] + ), + helper.make_node( + "MatMul", + ["transpose_reshape_pos_emb", "transpose_reshape_pos_emb"], + ["pos_matmul"], + "pos_embed_matmul", + ), + helper.make_node( + "Transpose", ["pos_matmul"], ["transpose_pos_matmul"], "p_matmul_transpose", perm=[1, 0, 2] + ), + helper.make_node( + "Reshape", + ["transpose_pos_matmul", "position_embed_output"], + ["reshape_position_emb"], + "final_reshape_pos_emb", + allowzero=0, + ), + helper.make_node( + "MultiHeadAttention", + [ + "q_matmul_output", + "k_matmul_output", + "v_matmul_output", + "Attention_0_qkv_bias", + "", + "reshape_position_emb", + "gather_past_k_output", + "gather_past_v_output", + ], + ["attn_output", "oup_cache_k", "oup_cache_v"], + "Attention_0", + domain="com.microsoft", + num_heads=num_heads, + ), + ] + ) + # Create nodes used with qkv concats, reshapes, and transposes + nodes.extend( + [ + helper.make_node("Shape", ["layernorm_add_output_to_matmul"], ["shape_output"], "shape", start=0), + helper.make_node("Gather", ["shape_output", "idx_0"], ["gather_0_output"], "gather_0", axis=0), + helper.make_node( + "Mul", + ["gather_0_output", "num_heads_int"], + ["mul_attn_heads_output"], + "mul_num_heads", + ), + helper.make_node( + "Unsqueeze", + ["mul_attn_heads_output", "unsqueeze_axes_input"], + ["unsqueeze_position_embed"], + "unsqueeze_position_embed", + ), + helper.make_node( + "Concat", + ["unsqueeze_position_embed", "neg_one", "head_size"], + ["position_embed_output"], + "position_embed_concat_output", + axis=0, + ), + helper.make_node( + "Unsqueeze", + ["gather_0_output", "unsqueeze_axes_input"], + ["unsqueeze_attn_heads_output"], + "unsqueeze_num_heads", + ), + helper.make_node( + "Concat", + ["unsqueeze_attn_heads_output", "neg_one", "head_size", "q_bsnh_reshape"], + ["k_attn_heads_output"], + "k_num_heads", + axis=0, + ), + ] + ) + + nodes.extend( + [ + helper.make_node("Gather", ["inp_cache_v", "idx_0"], ["gather_past_v_output"], "gather_past_v", axis=0), + helper.make_node("Gather", ["inp_cache_k", "idx_0"], ["gather_past_k_output"], "gather_past_k", axis=0), + ] + ) + else: + # Create nodes for Q/K/V paths + q_nodes = [ + helper.make_node( + "MatMul", ["layernorm_add_output_to_matmul", "q_weight"], ["q_matmul_output"], "q_path_matmul" + ), + helper.make_node("Add", ["q_bias", "q_matmul_output"], ["q_add_output"], "q_path_add"), + helper.make_node("Reshape", ["q_add_output", "q_attn_heads_output"], ["q_4d_bsnh"], "q_reshape_to_4d"), + helper.make_node("Transpose", ["q_4d_bsnh"], ["q_4d_bnsh"], "q_transpose_to_bnsh", perm=[0, 2, 1, 3]), + helper.make_node( + "Div", + ["q_4d_bnsh", "q_scale"], + ["q_div_output"], + "q_div_by_sqrt_head_size", + ), + ] + k_nodes = [ + helper.make_node( + "MatMul", + ["layernorm_add_output_to_matmul", "k_weight"], + ["k_matmul_output"], + "k_path_matmul", + ), + helper.make_node("Add", ["k_bias", "k_matmul_output"], ["k_add_output"], "k_path_add"), + helper.make_node("Reshape", ["k_add_output", "k_attn_heads_output"], ["k_4d_bsnh"], "k_reshape_to_4d"), + helper.make_node("Transpose", ["k_4d_bsnh"], ["k_4d_bnsh"], "k_transpose_to_bnsh", perm=[0, 2, 1, 3]), + helper.make_node( + "Concat", + ["gather_past_k_output", "k_4d_bnsh"], + ["oup_cache_k"], + "concat_past_k_and_curr_k", + axis=2, + ), + helper.make_node( + "Transpose", + ["oup_cache_k"], + ["k_output_transpose"], + "k_transpose_last_two_dims", + perm=[0, 1, 3, 2], + ), + ] + v_nodes = [ + helper.make_node( + "MatMul", + ["layernorm_add_output_to_matmul", "v_weight"], + ["v_matmul_output"], + "v_path_matmul", + ), + helper.make_node("Add", ["v_bias", "v_matmul_output"], ["v_add_output"], "v_path_add"), + helper.make_node("Reshape", ["v_add_output", "v_attn_heads_output"], ["v_4d_bsnh"], "v_reshape_to_4d"), + helper.make_node("Transpose", ["v_4d_bsnh"], ["v_4d_bnsh"], "v_transpose_to_bnsh", perm=[0, 2, 1, 3]), + helper.make_node( + "Concat", + ["gather_past_v_output", "v_4d_bnsh"], + ["oup_cache_v"], + "concat_past_v_and_curr_v", + axis=2, + ), + ] + pos_embed = [ + helper.make_node("Reshape", ["q_div_output", "position_embed_output"], ["reshape_pos_emb"], "r_pos_emb"), + helper.make_node( + "Transpose", ["reshape_pos_emb"], ["transpose_reshape_pos_emb"], "p_transpose", perm=[1, 0, 2] + ), + helper.make_node( + "MatMul", + ["transpose_reshape_pos_emb", "transpose_reshape_pos_emb"], + ["pos_matmul"], + "pos_embed_matmul", + ), + helper.make_node( + "Transpose", ["pos_matmul"], ["transpose_pos_matmul"], "p_matmul_transpose", perm=[1, 0, 2] + ), + helper.make_node( + "Reshape", + ["transpose_pos_matmul", "position_embed_output"], + ["reshape_position_emb"], + "final_reshape_pos_emb", + ), + ] + nodes.extend(q_nodes) + nodes.extend(k_nodes) + nodes.extend(v_nodes) + nodes.extend(pos_embed) + + # Create nodes used with qkv concats, reshapes, and transposes + nodes.extend( + [ + helper.make_node("Shape", ["layernorm_add_output_to_matmul"], ["shape_output"], "shape", start=0), + helper.make_node("Gather", ["shape_output", "idx_0"], ["gather_0_output"], "gather_0", axis=0), + helper.make_node( + "Mul", + ["gather_0_output", "num_heads_int"], + ["mul_attn_heads_output"], + "mul_num_heads", + ), + helper.make_node( + "Unsqueeze", + ["mul_attn_heads_output", "unsqueeze_axes_input"], + ["unsqueeze_position_embed"], + "unsqueeze_position_embed", + ), + helper.make_node( + "Concat", + ["unsqueeze_position_embed", "neg_one", "head_size"], + ["position_embed_output"], + "position_embed_concat_output", + axis=0, + ), + helper.make_node( + "Unsqueeze", + ["gather_0_output", "unsqueeze_axes_input"], + ["unsqueeze_attn_heads_output"], + "unsqueeze_num_heads", + ), + helper.make_node( + "Concat", + ["unsqueeze_attn_heads_output", "neg_one", "head_size", "q_bsnh_reshape"], + ["q_attn_heads_output"], + "q_num_heads", + axis=0, + ), + helper.make_node( + "Concat", + ["unsqueeze_attn_heads_output", "neg_one", "head_size", "q_bsnh_reshape"], + ["k_attn_heads_output"], + "k_num_heads", + axis=0, + ), + helper.make_node( + "Concat", + ["unsqueeze_attn_heads_output", "neg_one", "head_size", "q_bsnh_reshape"], + ["v_attn_heads_output"], + "v_num_heads", + axis=0, + ), + helper.make_node( + "Concat", + ["unsqueeze_attn_heads_output", "neg_one", "head_size"], + ["bsd_format"], + axis=0, + ), + helper.make_node( + "Constant", + inputs=[], + outputs=["q_bsnh_reshape"], + value=numpy_helper.from_array( + np.array([0, 0, num_heads, head_size], dtype="int64"), name="const_tensor" + ), + ), + ] + ) + + nodes.extend( + [ + helper.make_node("Gather", ["inp_cache_v", "idx_0"], ["gather_past_v_output"], "gather_past_v", axis=0), + helper.make_node("Gather", ["inp_cache_k", "idx_0"], ["gather_past_k_output"], "gather_past_k", axis=0), + ] + ) + + # Compute Q x K' + nodes.extend( + [ + helper.make_node( + "MatMul", + [ + "q_div_output", + "k_output_transpose", + ], + ["qk_output"], + "matmul_qk", + ) + ] + ) + + # Create nodes for computing softmax(Q x K') x V + nodes.extend( + [ + helper.make_node( + "Add", + [ + "qk_output", + "reshape_position_emb", + ], + ["add_qk_output"], + "add_qk", + ), + helper.make_node( + "Softmax", + ["add_qk_output"], + ["softmax_output"], + "softmax_qk", + axis=2, + ), + helper.make_node( + "MatMul", + ["softmax_output", "oup_cache_v"], + ["qkv_output_(num_heads*batch_size,seq_len,head_size)"], + "matmul_qkv", + ), + helper.make_node( + "Transpose", + ["qkv_output_(num_heads*batch_size,seq_len,head_size)"], + ["qkv_bsnh"], + "transpose_bnsh_to_bsnh", + perm=[0, 2, 1, 3], + ), + helper.make_node("Reshape", ["qkv_bsnh", "bsd_format"], ["attn_output"], "qkv_bsd"), + ] + ) + + # Create final nodes to conclude attention + nodes.append( + helper.make_node( + "MatMul", + ["attn_output", "matmul_after_attn_initializer"], + ["matmul_after_attn_output"], + "matmul_after_attn", + ), + ) + if not fused: + next_sln_inputs = [ + "layernorm_add_output_to_skiplayernorm", + "add_after_attn_output", + "layernorm_weight", + "layernorm_bias", + ] + nodes.extend( + [ + helper.make_node( + "Add", + ["add_after_attn_initializer", "matmul_after_attn_output"], + ["add_after_attn_output"], + "add_after_attn", + ), + helper.make_node( + "SkipLayerNormalization", + next_sln_inputs, + ["output_0", "", "", "output_1"], + "next_skiplayernorm", + domain="com.microsoft", + epsilon=epsilon, + ), + ] + ) + else: + next_sln_inputs = [ + "matmul_after_attn_output", + "layernorm_add_output_to_skiplayernorm", + "layernorm_weight", + "layernorm_bias", + "add_after_attn_initializer", + ] + nodes.append( + helper.make_node( + "SkipLayerNormalization", + next_sln_inputs, + ["output_0", "", "", "output_1"], + "SkipLayerNorm_AddBias_0", + domain="com.microsoft", + epsilon=epsilon, + ) + ) + + # Create initializers + v_weight, v_weight_data = get_tensor_and_weight("v_weight", [hidden_size, hidden_size]) + v_bias, v_bias_data = get_tensor_and_weight("v_bias", [hidden_size]) + q_weight, q_weight_data = get_tensor_and_weight("q_weight", [hidden_size, hidden_size]) + q_bias, q_bias_data = get_tensor_and_weight("q_bias", [hidden_size]) + k_weight, k_weight_data = get_tensor_and_weight("k_weight", [hidden_size, hidden_size]) + k_bias, k_bias_data = get_tensor_and_weight("k_bias", [hidden_size]) + + qkv_bias = helper.make_tensor( + "Attention_0_qkv_bias", + TensorProto.FLOAT, + [3 * hidden_size], + q_bias_data + k_bias_data + v_bias_data, + ) + initializers = [ + float_tensor("layernorm_weight", [hidden_size]), + float_tensor("layernorm_bias", [hidden_size]), + float_tensor("matmul_after_attn_initializer", [hidden_size, hidden_size]), + float_tensor("add_after_attn_initializer", [hidden_size]), + ] + + # Add Q/K/V weight tensors as initializers + if fused: + initializers.extend([q_weight, k_weight, v_weight]) + initializers.extend([q_bias]) + initializers.append(qkv_bias) + initializers.extend( + [ + numpy_helper.from_array(np.array(num_heads, dtype="int64"), name="num_heads_int"), + numpy_helper.from_array(np.array([head_size], dtype="int64"), name="head_size"), + numpy_helper.from_array(np.array(1 / np.sqrt(head_size), dtype="float32"), name="q_scale"), + numpy_helper.from_array(np.array(0, dtype="int64"), name="idx_0"), + numpy_helper.from_array(np.array([-1], dtype="int64"), name="neg_one"), + numpy_helper.from_array(np.array([0], dtype="int64"), name="unsqueeze_axes_input"), + numpy_helper.from_array(np.array([0, 0, num_heads, head_size], dtype="int64"), name="q_bsnh_reshape"), + ] + ) + else: + initializers.extend([q_weight, k_weight, v_weight]) + + initializers.extend([q_bias, k_bias, v_bias]) + + initializers.extend( + [ + numpy_helper.from_array(np.array(num_heads, dtype="int64"), name="num_heads_int"), + numpy_helper.from_array(np.array([num_heads], dtype="int64"), name="num_heads"), + numpy_helper.from_array(np.array([head_size], dtype="int64"), name="head_size"), + numpy_helper.from_array(np.array([hidden_size], dtype="int64"), name="hidden_size"), + numpy_helper.from_array(np.array(1 / np.sqrt(head_size), dtype="float32"), name="q_scale"), + numpy_helper.from_array(np.array(0, dtype="int64"), name="idx_0"), + numpy_helper.from_array(np.array(1, dtype="int64"), name="idx_1"), + numpy_helper.from_array(np.array([-1], dtype="int64"), name="neg_one"), + numpy_helper.from_array(np.array([0], dtype="int64"), name="unsqueeze_axes_input"), + ] + ) + + # Construct graph + graph = helper.make_graph(nodes, "conformer_self_mha_graph", inputs, outputs, initializers, doc_string="conformer") + opsetid = helper.make_opsetid("ai.onnx", min(onnx.defs.onnx_opset_version(), 16)) + return helper.make_model(graph, opset_imports=(opsetid,)) + + +if __name__ == "__main__": + np.random.seed(2) + num_heads = 8 + hidden_size = 512 + + model = create_conformer_attention(num_heads=num_heads, hidden_size=hidden_size) + onnx.save(model, "conformer_self_mha.onnx") + + model = create_conformer_attention(num_heads=num_heads, hidden_size=hidden_size, fused=True) + onnx.save(model, "./test_data/models/conformer/conformer_self_mha_fused.onnx") diff --git a/onnxruntime/test/python/transformers/test_conformer.py b/onnxruntime/test/python/transformers/test_conformer.py new file mode 100644 index 0000000000000..471ba9756bcf8 --- /dev/null +++ b/onnxruntime/test/python/transformers/test_conformer.py @@ -0,0 +1,69 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +import unittest + +import onnx +from conformer_model_generator import create_conformer_attention +from parity_utilities import find_transformers_source + +if find_transformers_source(): + from fusion_options import FusionOptions + from onnx_model import OnnxModel + from optimizer import optimize_model +else: + from onnxruntime.transformers.fusion_options import FusionOptions + from onnxruntime.transformers.onnx_model import OnnxModel + from onnxruntime.transformers.optimizer import optimize_model + + +class TestFusion(unittest.TestCase): + def verify_fusion(self, optimized_model, expected_model_filename): + optimized_model.topological_sort(is_deterministic=True) + + expected_model_path = os.path.join( + os.path.dirname(__file__), "test_data", "models", "conformer", expected_model_filename + ) + print("Expected model path = ", expected_model_path) + expected_model = OnnxModel(onnx.load(expected_model_path)) + expected_model.topological_sort(is_deterministic=True) + + nodes = optimized_model.model.graph.node + self.assertEqual(len(nodes), len(expected_model.model.graph.node)) + + for i in range(len(nodes)): + self.assertEqual(nodes[i], expected_model.model.graph.node[i]) + + for expected_initializer in expected_model.model.graph.initializer: + print("Expected initializer initial = ", expected_initializer.name) + self.assertTrue( + OnnxModel.has_same_value( + optimized_model.get_initializer(expected_initializer.name), expected_initializer + ) + ) + + def test_ct_mha_fusion(self): + num_heads = 8 + hidden_size = 512 + model = create_conformer_attention(num_heads=num_heads, hidden_size=hidden_size, add_before_layernorm=False) + dir = "." + model_path = os.path.join(dir, "conformer_self_mha.onnx") + onnx.save(model, model_path) + options = FusionOptions("conformer") + optimized_model = optimize_model( + model_path, + model_type="conformer", + num_heads=num_heads, + hidden_size=hidden_size, + optimization_options=options, + ) + os.remove(model_path) + self.verify_fusion(optimized_model, "conformer_self_mha_fused.onnx") + + +if __name__ == "__main__": + unittest.main() diff --git a/onnxruntime/test/python/transformers/test_data/models/conformer/conformer_self_mha_fused.onnx b/onnxruntime/test/python/transformers/test_data/models/conformer/conformer_self_mha_fused.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9d882751db2652ef6df28981c680364c5bd62e55 GIT binary patch literal 4212207 zcmeF$O>Y~=8316>mMv;zMaN;sO;@){a86xk+ zoh0v^x1+7%-w$uPXVl92%f9OV+GXuKUlhH+pM3u3#pfsO?0J%Bz3$=rVJGd6vUacg z@E`yD(NA8MN}ueX^g2K8WGBPksCSx&$Kf@z3gy9kJ<-8nKGXTstNVkvpXAMXYwhl5 zN&eY+JNz`<9mJCrhpSIS_XhE5>D4fQzG$l5{H$@6a9Oq0fF{;ARK zApW=!KN)qK)nfc%I%+2UbP~po8?~Lsv&+Lu*iG8)-qYvlu=i-|W$9M1bOrG!d!E*A|0271ld!jzVQ*axduwCZ-yoG2%P}vmj(M?h z%&#U1S*#I`oWw`oa^xai1u8z6aUnS~t zf)3^hDos)}NzYf|=22eJ6@|%^XSG`s{vy1~?pMzE zJZv22rf@&F6q;o=st-@IZqi;B#2du_zolyx8V;^#3kO%Iga@DIdD@*KIEn|Yi&?2} zg^jZc56+4mA}u&1qdacSt7E-&Q8UrrwAg3+Vzk#g@5d*}Ni&UGmoF}=dzU6qi;5j5 z&wgEO5X;s{@psQVvt8p+xhVdtyWVeKx9dgsX1mI4F4a5JtJ<9}CR@$ny>MrgC&PTU zulzEs-5c&?jc4&ubTD7{(PBd=FB)ohe>Dxs;a=EIo@JxiFuw~|p9uTQMZ{S*j~*89 zi0j*7b$`;lyt8(DvIDon=9|NV^X_PHo~F;!IC++iigAlqQ1ockTBuoyN*mQ1duqF1 zE=CH^!i~3uoo;#-_qu7=o$TS$9WdIwiB!M8{@L+KuX~c@^YGQ~{(O459<;{wplx2^ z_f{*M8oT^Lsor08uHv}1zE?fIX^G*z;ssc|pccg#ZF-Sa_ga_owsC!*hrJ?0^D4Ml z&{`|0Me$VkFBjUyW@KyQwKk5oDAD!tTARm9!sU_;AI`SQI62LWik}8L>lU0Q2aZ9rhYlp75IKLbh zXP)@z|38gIj|%gr#R;y6-Y9LK#+_yopAD0K^YE{g@_4&as*NjKTjQVkF9r-SzyJdb zyjup2uYJ*U_!ox*ha!hU1{h#~fp^%z@s+FTOaKe|IphJ1o9ihei38Zw44( zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`>bZ^FRwhn*zvoVVlTG*5?dlIPtx>t=bD zw6o{w@bGVy@_4&as*Nk(#HSmRY6ci!fB^>HZUe{Fq|vzPnDAm;FU|?`qr$i@mv472 zv_-UG*HS@z4<9d0# zUHnu^weh(AZJ50|sxk%`V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0S4Y~1IOi7 z{4~wZn)%_2alJg=E`BPd+IU=-xwkuW@*_VCFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdb zFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?0 z00Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u< z3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs# zzyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d| z0}L?000Rs#zyJdbFu(u<3^2d|0}L?000Rs#zyJdbFu(u<3^2d|0}L?000aMP*M54M z2Ezb=GelPknmS2PiG&aivV+c;cr>_^*~xh1@}?V=HAu(U9|b|G_|W?bC3W6~nw&ghuEejW0hCY$Vm@<6?9ix>(9!y5A%bw&}hvnEY#PhC8|Ys72Y?t|2nQttAHdolW4M)vk^B#r+qsl6LVz0t57%G~zYZCNx*#YMQ_46~$SS`~ig*z0v>x literal 0 HcmV?d00001 From dc9ab4f8213cbef1a0ca93d2630b77fbc13d4da3 Mon Sep 17 00:00:00 2001 From: Changming Sun Date: Sun, 19 Nov 2023 22:06:32 -0800 Subject: [PATCH 35/38] Update setup.py: replace libcudart.so.12.0 with libcudart.so.12 (#18501) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index da4943c4ef7ae..798c8c4b2895b 100644 --- a/setup.py +++ b/setup.py @@ -196,7 +196,7 @@ def run(self): "libcublasLt.so.11", "libcublasLt.so.12", "libcudart.so.11.0", - "libcudart.so.12.0", + "libcudart.so.12", "libcudnn.so.8", "libcufft.so.10", "libcufft.so.11", From 3bcc137eb423ada476118949876611be87636bb4 Mon Sep 17 00:00:00 2001 From: Wei-Sheng Chin Date: Sun, 19 Nov 2023 22:09:11 -0800 Subject: [PATCH 36/38] Tiny change to trigger the update of DORT's CI image (#18507) Recent PyTorch breaks DORT CI and [a patch](https://github.com/pytorch/pytorch/pull/113697) has been merged into PyTorch main. In order to update DORT's CI, we made dummy change in this PR. --- orttraining/orttraining/test/python/orttraining_test_dort.py | 1 + .../github/linux/docker/scripts/manylinux/install_deps_lort.sh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/orttraining/orttraining/test/python/orttraining_test_dort.py b/orttraining/orttraining/test/python/orttraining_test_dort.py index 88d9c00984d3e..2a7012787be6e 100644 --- a/orttraining/orttraining/test/python/orttraining_test_dort.py +++ b/orttraining/orttraining/test/python/orttraining_test_dort.py @@ -19,6 +19,7 @@ class TestTorchDynamoOrt(unittest.TestCase): def setUp(self): # Make computation deterministic. torch.manual_seed(42) + print(f"TestTorchDynamoOrt uses PyTorch version {torch.__version__}") def test_elementwise_model(self): torch._dynamo.reset() diff --git a/tools/ci_build/github/linux/docker/scripts/manylinux/install_deps_lort.sh b/tools/ci_build/github/linux/docker/scripts/manylinux/install_deps_lort.sh index 3bca6413100a2..da8a45e00cc90 100755 --- a/tools/ci_build/github/linux/docker/scripts/manylinux/install_deps_lort.sh +++ b/tools/ci_build/github/linux/docker/scripts/manylinux/install_deps_lort.sh @@ -19,7 +19,9 @@ fi export ONNX_ML=1 export CMAKE_ARGS="-DONNX_GEN_PB_TYPE_STUBS=OFF -DONNX_WERROR=OFF" +# This may install PyTorch, which will be overrided by the PyTorch local build below. /opt/python/cp39-cp39/bin/python3.9 -m pip install transformers + # beartype is installed here so that onnxscript installation step won't # install a version PyTorch doesn't like. Once beartype fixes this problem. # We can remove this line. From aa673b6ff8a869d5001a5c045ed6cc59f8fc14ac Mon Sep 17 00:00:00 2001 From: adrianlizarraga Date: Mon, 20 Nov 2023 10:11:28 -0800 Subject: [PATCH 37/38] Treat initializers for Add, Sub, Mul, Div as weights --- .../tools/quantization/execution_providers/qnn/quant_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/onnxruntime/python/tools/quantization/execution_providers/qnn/quant_config.py b/onnxruntime/python/tools/quantization/execution_providers/qnn/quant_config.py index 5ab2abb504421..0d98e07804932 100644 --- a/onnxruntime/python/tools/quantization/execution_providers/qnn/quant_config.py +++ b/onnxruntime/python/tools/quantization/execution_providers/qnn/quant_config.py @@ -14,6 +14,7 @@ Q16_TYPES = {QuantType.QInt16, QuantType.QUInt16} Q8_TYPES = {QuantType.QInt8, QuantType.QUInt8} OP_TYPES_TO_EXCLUDE = {"Cast"} +OP_TYPES_INITIALIZER_AS_WEIGHTS = {"MatMul", "Add", "Sub", "Mul", "Div"} def get_qnn_qdq_config( @@ -34,7 +35,7 @@ def get_qnn_qdq_config( for node in model.graph.node: op_types.add(node.op_type) - if node.op_type == "MatMul" and activation_type in Q16_TYPES and weight_type in Q8_TYPES: + if node.op_type in OP_TYPES_INITIALIZER_AS_WEIGHTS and activation_type in Q16_TYPES and weight_type in Q8_TYPES: weight_symmetric = weight_type == QuantType.QInt8 # Override initializers to use the weight_type From 476f10a8da411e50b78912a5acf9364bd07edac0 Mon Sep 17 00:00:00 2001 From: adrianlizarraga Date: Mon, 20 Nov 2023 10:15:52 -0800 Subject: [PATCH 38/38] Add qnn quantization directories to python package --- cmake/onnxruntime_python.cmake | 8 ++++++++ setup.py | 1 + 2 files changed, 9 insertions(+) diff --git a/cmake/onnxruntime_python.cmake b/cmake/onnxruntime_python.cmake index 345ef2b504aa4..b93ccf77d52a2 100644 --- a/cmake/onnxruntime_python.cmake +++ b/cmake/onnxruntime_python.cmake @@ -453,6 +453,9 @@ file(GLOB onnxruntime_python_quantization_operators_src CONFIGURE_DEPENDS file(GLOB onnxruntime_python_quantization_cal_table_flatbuffers_src CONFIGURE_DEPENDS "${ONNXRUNTIME_ROOT}/python/tools/quantization/CalTableFlatBuffers/*.py" ) +file(GLOB onnxruntime_python_quantization_ep_qnn_src CONFIGURE_DEPENDS + "${ONNXRUNTIME_ROOT}/python/tools/quantization/execution_providers/qnn/*.py" +) file(GLOB onnxruntime_python_transformers_src CONFIGURE_DEPENDS "${ONNXRUNTIME_ROOT}/python/tools/transformers/*.py" ) @@ -547,6 +550,8 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/quantization COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/quantization/operators COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/quantization/CalTableFlatBuffers + COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/quantization/execution_providers + COMMAND ${CMAKE_COMMAND} -E make_directory $/onnxruntime/quantization/execution_providers/qnn COMMAND ${CMAKE_COMMAND} -E make_directory $/quantization COMMAND ${CMAKE_COMMAND} -E make_directory $/transformers COMMAND ${CMAKE_COMMAND} -E make_directory $/transformers/test_data/models @@ -617,6 +622,9 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E copy ${onnxruntime_python_quantization_cal_table_flatbuffers_src} $/onnxruntime/quantization/CalTableFlatBuffers/ + COMMAND ${CMAKE_COMMAND} -E copy + ${onnxruntime_python_quantization_ep_qnn_src} + $/onnxruntime/quantization/execution_providers/qnn/ COMMAND ${CMAKE_COMMAND} -E copy ${onnxruntime_python_transformers_src} $/onnxruntime/transformers/ diff --git a/setup.py b/setup.py index 798c8c4b2895b..2ede39915cc8d 100644 --- a/setup.py +++ b/setup.py @@ -408,6 +408,7 @@ def finalize_options(self): "onnxruntime.quantization", "onnxruntime.quantization.operators", "onnxruntime.quantization.CalTableFlatBuffers", + "onnxruntime.quantization.execution_providers.qnn", "onnxruntime.transformers", "onnxruntime.transformers.models.bart", "onnxruntime.transformers.models.bert",