From f0a36773531bc553de2167a88dae38a6ff19a3fc Mon Sep 17 00:00:00 2001 From: Kaiyu Xie <26294424+kaiyux@users.noreply.github.com> Date: Tue, 20 Aug 2024 02:59:15 +0000 Subject: [PATCH] open source 49402939d007b39393cabaa8fe96c110d16f5b35 --- .gitignore | 4 +- README.md | 9 +- benchmarks/README.md | 2 +- benchmarks/Suite.md | 316 +++++++++ benchmarks/cpp/gptManagerBenchmark.cpp | 109 ++- benchmarks/python/all_reduce.py | 25 +- benchmarks/suite/README.md | 234 ------- benchmarks/suite/requirements.txt | 3 - .../suite/tensorrt_llm_bench/__init__.py | 1 - .../suite/tensorrt_llm_bench/benchmark.py | 125 ---- .../benchmarkers/__init__.py | 30 - .../benchmarkers/pybind_executor.py | 146 ---- .../tensorrt_llm_bench/benchmarkers/static.py | 208 ------ benchmarks/suite/tensorrt_llm_bench/ifb.py | 338 --------- benchmarks/suite/tensorrt_llm_bench/static.py | 69 -- .../tensorrt_llm_bench/utils/benchmarkers.py | 15 - .../tensorrt_llm_bench/utils/dataclasses.py | 189 ----- .../tensorrt_llm_bench/utils/trtllm_config.py | 323 --------- cpp/CMakeLists.txt | 2 +- .../batch_manager/inferenceRequest.h | 17 + .../tensorrt_llm/batch_manager/llmRequest.h | 127 +++- .../batch_manager/trtGptModelOptionalParams.h | 26 +- cpp/include/tensorrt_llm/executor/executor.h | 258 +++++-- .../tensorrt_llm/executor/serialization.h | 10 + cpp/include/tensorrt_llm/executor/types.h | 16 + .../tensorrt_llm/runtime/decodingInput.h | 9 +- .../tensorrt_llm/runtime/decodingOutput.h | 16 +- cpp/include/tensorrt_llm/runtime/gptDecoder.h | 8 +- .../tensorrt_llm/runtime/gptDecoderBatched.h | 11 + cpp/include/tensorrt_llm/runtime/iBuffer.h | 7 + .../tensorrt_llm/runtime/iGptDecoderBatched.h | 7 + .../runtime/iStatefulGptDecoder.h | 5 + cpp/include/tensorrt_llm/runtime/iTensor.h | 1 + .../tensorrt_llm/runtime/lookaheadBuffers.h | 83 +++ .../runtime/speculativeDecodingMode.h | 3 +- cpp/tensorrt_llm/CMakeLists.txt | 4 +- .../libtensorrt_llm_batch_manager_static.a | 4 +- ...sorrt_llm_batch_manager_static.pre_cxx11.a | 4 +- .../aarch64-linux-gnu/version.txt | 6 +- .../libtensorrt_llm_batch_manager_static.a | 4 +- ...sorrt_llm_batch_manager_static.pre_cxx11.a | 4 +- .../tensorrt_llm_batch_manager_static.lib | 4 +- cpp/tensorrt_llm/common/cublasMMWrapper.cpp | 11 +- cpp/tensorrt_llm/common/cublasMMWrapper.h | 3 - cpp/tensorrt_llm/common/mpiUtils.cpp | 6 + cpp/tensorrt_llm/common/safetensors.cpp | 5 +- .../libtensorrt_llm_executor_static.a | 4 +- ...ibtensorrt_llm_executor_static.pre_cxx11.a | 4 +- .../executor/aarch64-linux-gnu/version.txt | 6 +- .../libtensorrt_llm_executor_static.a | 4 +- ...ibtensorrt_llm_executor_static.pre_cxx11.a | 4 +- .../tensorrt_llm_executor_static.lib | 4 +- .../aarch64-linux-gnu/version.txt | 2 +- .../tensorrt_llm_nvrtc_wrapper.dll | 2 +- cpp/tensorrt_llm/kernels/decodingCommon.h | 24 +- cpp/tensorrt_llm/kernels/gptKernels.cu | 57 +- cpp/tensorrt_llm/kernels/gptKernels.h | 18 +- cpp/tensorrt_llm/kernels/groupGemm.cu | 9 +- cpp/tensorrt_llm/kernels/lora/lora.cpp | 18 +- .../kernels/mixtureOfExperts/moe_kernels.cu | 17 +- cpp/tensorrt_llm/kernels/splitkGroupGemm.cu | 9 +- .../kernels/unfusedAttentionKernels.cu | 16 +- cpp/tensorrt_llm/layers/decodingLayer.cpp | 10 +- cpp/tensorrt_llm/layers/decodingParams.h | 50 +- .../layers/lookaheadAlgorithm.cpp | 7 +- .../layers/lookaheadDecodingLayer.cpp | 246 +++++-- .../layers/lookaheadDecodingLayer.h | 17 +- .../layers/lookaheadDecodingUtils.h | 30 +- cpp/tensorrt_llm/plugins/CMakeLists.txt | 6 +- cpp/tensorrt_llm/plugins/common/plugin.cpp | 83 ++- cpp/tensorrt_llm/plugins/common/plugin.h | 3 + .../plugins/gemmPlugin/gemmPlugin.cpp | 10 +- .../plugins/gemmPlugin/gemmPlugin.h | 2 + .../gptAttentionCommon/gptAttentionCommon.cpp | 26 +- .../gptAttentionPlugin/gptAttentionPlugin.cpp | 3 +- .../plugins/loraPlugin/loraPlugin.cpp | 24 +- .../plugins/loraPlugin/loraPlugin.h | 5 +- .../mixtureOfExpertsPlugin.cpp | 5 + .../plugins/ncclPlugin/allreducePlugin.cpp | 12 +- .../plugins/ncclPlugin/recvPlugin.cpp | 3 + .../plugins/ncclPlugin/sendPlugin.cpp | 3 + .../pybind/batch_manager/llmRequest.cpp | 25 +- .../pybind/batch_manager/llmRequest.h | 5 +- cpp/tensorrt_llm/pybind/executor/bindings.cpp | 198 +++--- cpp/tensorrt_llm/pybind/executor/executor.cpp | 12 +- cpp/tensorrt_llm/pybind/executor/executor.h | 2 +- cpp/tensorrt_llm/runtime/CMakeLists.txt | 3 +- cpp/tensorrt_llm/runtime/gptDecoder.cpp | 71 +- .../runtime/gptDecoderBatched.cpp | 41 +- cpp/tensorrt_llm/runtime/gptSession.cpp | 3 +- cpp/tensorrt_llm/runtime/ipcUtils.cpp | 18 +- cpp/tensorrt_llm/runtime/lookaheadBuffers.cpp | 153 +++++ .../runtime/statefulGptDecoder.cpp | 10 +- cpp/tensorrt_llm/runtime/utils/numpyUtils.cpp | 16 +- cpp/tensorrt_llm/runtime/utils/numpyUtils.h | 3 +- cpp/tests/CMakeLists.txt | 3 +- cpp/tests/kernels/ropeTest.cu | 17 +- cpp/tests/layers/lookaheadAlgorithmTest.cpp | 4 +- .../layers/lookaheadDecodingLayerTest.cpp | 161 +++-- cpp/tests/layers/randomLlm.cpp | 13 + cpp/tests/layers/randomLlm.h | 2 + .../data/test_model_lora_config.json | 3 +- .../scripts/build_chatglm_engines.py | 4 +- .../resources/scripts/build_engines_utils.py | 9 +- .../resources/scripts/build_gpt_engines.py | 40 +- .../resources/scripts/build_gptj_engines.py | 6 +- .../resources/scripts/build_llama_engines.py | 30 +- .../resources/scripts/build_mamba_engines.py | 4 +- .../resources/scripts/build_medusa_engines.py | 2 +- .../scripts/build_recurrentgemma_engines.py | 2 +- .../generate_expected_chatglm_output.py | 4 +- .../scripts/generate_expected_gpt_output.py | 12 +- .../scripts/generate_expected_gptj_output.py | 6 +- .../scripts/generate_expected_llama_output.py | 2 +- .../scripts/generate_expected_mamba_output.py | 4 +- .../generate_expected_medusa_output.py | 2 +- ...generate_expected_recurrentgemma_output.py | 2 +- cpp/tests/resources/scripts/test_cpp.py | 23 +- cpp/tests/runtime/gptDecoderTest.cpp | 12 +- cpp/tests/runtime/utilsTest.cpp | 119 ++++ .../architecture/model-weights-loader.md | 254 +++++++ docs/source/conf.py | 6 + docs/source/executor.md | 6 +- docs/source/generate_examples.py | 50 ++ .../high-level-api-examples/advanced.md | 140 ++++ .../examples_index.template.rst | 8 + .../high-level-api-examples/introduction.md | 46 ++ docs/source/index.rst | 24 + docs/source/media/picture-08-06-2024.png | Bin 354043 -> 0 bytes docs/source/media/picture-08-13-2024.png | Bin 0 -> 390124 bytes .../source/performance/perf-best-practices.md | 4 +- docs/source/performance/perf-overview.md | 22 +- docs/source/quick-start-guide.md | 30 + examples/apps/requirements.txt | 1 + examples/baichuan/requirements.txt | 2 +- examples/bindings/executor/README.md | 10 + examples/bindings/executor/example_debug.py | 52 ++ .../executor/example_logits_processor.py | 6 +- examples/bloom/requirements.txt | 2 +- examples/chatglm/requirements.txt | 2 +- examples/cpp/executor/CMakeLists.txt | 3 + examples/cpp/executor/README.md | 10 + .../cpp/executor/executorExampleBasic.cpp | 2 +- .../cpp/executor/executorExampleDebug.cpp | 65 ++ .../executorExampleLogitsProcessor.cpp | 9 +- examples/dbrx/requirements.txt | 2 +- examples/dit/sample.py | 4 +- examples/falcon/requirements.txt | 2 +- examples/gemma/requirements.txt | 2 +- examples/gpt/requirements.txt | 2 +- examples/gptj/requirements.txt | 2 +- examples/gptneox/requirements.txt | 2 +- examples/grok/requirements.txt | 2 +- examples/high-level-api/README.md | 13 +- examples/high-level-api/requirements.txt | 2 +- examples/internlm/README.md | 2 +- examples/internlm/requirements.txt | 2 +- examples/jais/requirements.txt | 2 +- examples/llama/README.md | 4 +- examples/llama/requirements.txt | 2 +- examples/mamba/README.md | 34 +- examples/mamba/convert_checkpoint.py | 75 +- examples/mamba/requirements.txt | 2 +- examples/medusa/requirements.txt | 2 +- examples/mixtral/requirements.txt | 2 +- examples/model_api/README.md | 27 + examples/mpt/requirements.txt | 2 +- examples/multimodal/README.md | 14 +- examples/nemotron/requirements.txt | 2 +- examples/opt/requirements.txt | 2 +- examples/phi/requirements.txt | 2 +- examples/quantization/requirements.txt | 2 +- examples/qwen/requirements.txt | 2 +- examples/qwenvl/requirements.txt | 2 +- examples/recurrentgemma/requirements.txt | 2 +- examples/run.py | 6 + examples/skywork/requirements.txt | 2 +- examples/smaug/requirements.txt | 2 +- examples/summarize.py | 23 +- examples/utils.py | 7 + examples/whisper/requirements.txt | 2 +- requirements.txt | 3 + scripts/build_wheel.py | 6 +- setup.py | 6 +- tensorrt_llm/__init__.py | 3 + tensorrt_llm/_ipc_utils.py | 25 +- tensorrt_llm/bench/__init__.py | 0 tensorrt_llm/bench/build/__init__.py | 0 tensorrt_llm/bench/build/benchmark_config.yml | 69 ++ tensorrt_llm/bench/build/build.py | 273 ++++++++ tensorrt_llm/bench/build/utils.py | 22 + tensorrt_llm/bench/dataclasses.py | 99 +++ .../utils => tensorrt_llm/bench}/enums.py | 0 tensorrt_llm/bench/run/__init__.py | 0 tensorrt_llm/bench/run/dataclasses.py | 176 +++++ tensorrt_llm/bench/run/run.py | 431 ++++++++++++ tensorrt_llm/bench/run/utils.py | 133 ++++ .../bench}/utils/__init__.py | 6 +- tensorrt_llm/bench/utils/data.py | 137 ++++ tensorrt_llm/bench/utils/tokenize.py | 105 +++ tensorrt_llm/commands/bench.py | 42 ++ tensorrt_llm/commands/build.py | 17 +- tensorrt_llm/executor.py | 27 +- tensorrt_llm/functional.py | 13 +- tensorrt_llm/hlapi/llm.py | 3 +- tensorrt_llm/hlapi/llm_utils.py | 49 +- tensorrt_llm/hlapi/utils.py | 10 - tensorrt_llm/layers/attention.py | 4 +- tensorrt_llm/layers/embedding.py | 14 +- tensorrt_llm/layers/linear.py | 30 +- tensorrt_llm/layers/lora.py | 12 +- tensorrt_llm/layers/mlp.py | 3 +- tensorrt_llm/layers/moe.py | 3 +- tensorrt_llm/layers/ssm.py | 47 +- tensorrt_llm/models/__init__.py | 2 + tensorrt_llm/models/automodel.py | 73 ++ tensorrt_llm/models/baichuan/model.py | 8 +- tensorrt_llm/models/bloom/model.py | 8 +- tensorrt_llm/models/chatglm/model.py | 8 +- tensorrt_llm/models/convert_utils.py | 14 +- tensorrt_llm/models/deci/__init__.py | 14 + tensorrt_llm/models/deci/config.py | 207 ++++++ tensorrt_llm/models/deci/convert.py | 365 ++++++++++ tensorrt_llm/models/deci/layer_config.py | 86 +++ tensorrt_llm/models/deci/model.py | 643 ++++++++++++++++++ tensorrt_llm/models/enc_dec/model.py | 4 - tensorrt_llm/models/generation_mixin.py | 16 +- tensorrt_llm/models/mamba/model.py | 33 +- tensorrt_llm/models/model_weights_loader.py | 27 +- tensorrt_llm/models/modeling_utils.py | 9 +- tensorrt_llm/models/opt/model.py | 8 +- tensorrt_llm/models/phi/model.py | 2 + tensorrt_llm/models/qwen/convert.py | 2 +- tensorrt_llm/models/recurrentgemma/model.py | 8 +- tensorrt_llm/plugin/plugin.py | 9 +- tensorrt_llm/quantization/functional.py | 31 + tensorrt_llm/quantization/layers.py | 91 ++- tensorrt_llm/runtime/enc_dec_model_runner.py | 4 +- tensorrt_llm/runtime/generation.py | 63 +- tensorrt_llm/runtime/model_runner.py | 17 + tensorrt_llm/runtime/model_runner_cpp.py | 6 + tensorrt_llm/version.py | 2 +- tests/bindings/test_executor_bindings.py | 226 +++++- tests/bindings/test_gpt_manager.py | 2 +- tests/functional/test_moe.py | 368 +++++++++- tests/functional/test_nccl.py | 30 +- tests/functional/test_reduce_norm.py | 122 ++-- tests/hlapi/apps/_test_llm_server.py | 7 +- tests/hlapi/test_llm.py | 16 +- tests/hlapi/test_llm_models.py | 19 +- tests/hlapi/test_llm_multi_gpu.py | 4 + tests/hlapi/test_llm_perf_evaluator.py | 6 +- tests/microbenchamarks/README.md | 2 + .../microbenchamarks/build_time_benchmark.py | 134 ++++ tests/model/test_decilm.py | 602 ++++++++++++++++ tests/model/test_llama.py | 15 +- tests/model/test_mamba.py | 12 +- tests/model/test_mistral.py | 2 - tests/utils/cpp_paths.py | 2 +- 259 files changed, 7861 insertions(+), 2845 deletions(-) create mode 100644 benchmarks/Suite.md delete mode 100644 benchmarks/suite/README.md delete mode 100644 benchmarks/suite/requirements.txt delete mode 100644 benchmarks/suite/tensorrt_llm_bench/__init__.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/benchmark.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/benchmarkers/__init__.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/benchmarkers/pybind_executor.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/benchmarkers/static.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/ifb.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/static.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/utils/benchmarkers.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/utils/dataclasses.py delete mode 100644 benchmarks/suite/tensorrt_llm_bench/utils/trtllm_config.py create mode 100644 cpp/include/tensorrt_llm/runtime/lookaheadBuffers.h create mode 100644 cpp/tensorrt_llm/runtime/lookaheadBuffers.cpp create mode 100644 cpp/tests/runtime/utilsTest.cpp create mode 100644 docs/source/architecture/model-weights-loader.md create mode 100644 docs/source/generate_examples.py create mode 100644 docs/source/high-level-api-examples/advanced.md create mode 100644 docs/source/high-level-api-examples/examples_index.template.rst create mode 100644 docs/source/high-level-api-examples/introduction.md delete mode 100644 docs/source/media/picture-08-06-2024.png create mode 100644 docs/source/media/picture-08-13-2024.png create mode 100644 examples/bindings/executor/example_debug.py create mode 100644 examples/cpp/executor/executorExampleDebug.cpp create mode 100644 tensorrt_llm/bench/__init__.py create mode 100644 tensorrt_llm/bench/build/__init__.py create mode 100644 tensorrt_llm/bench/build/benchmark_config.yml create mode 100644 tensorrt_llm/bench/build/build.py create mode 100644 tensorrt_llm/bench/build/utils.py create mode 100644 tensorrt_llm/bench/dataclasses.py rename {benchmarks/suite/tensorrt_llm_bench/utils => tensorrt_llm/bench}/enums.py (100%) create mode 100644 tensorrt_llm/bench/run/__init__.py create mode 100644 tensorrt_llm/bench/run/dataclasses.py create mode 100644 tensorrt_llm/bench/run/run.py create mode 100644 tensorrt_llm/bench/run/utils.py rename {benchmarks/suite/tensorrt_llm_bench => tensorrt_llm/bench}/utils/__init__.py (96%) create mode 100644 tensorrt_llm/bench/utils/data.py create mode 100644 tensorrt_llm/bench/utils/tokenize.py create mode 100644 tensorrt_llm/commands/bench.py create mode 100644 tensorrt_llm/models/automodel.py create mode 100644 tensorrt_llm/models/deci/__init__.py create mode 100644 tensorrt_llm/models/deci/config.py create mode 100644 tensorrt_llm/models/deci/convert.py create mode 100644 tensorrt_llm/models/deci/layer_config.py create mode 100644 tensorrt_llm/models/deci/model.py create mode 100644 tests/microbenchamarks/README.md create mode 100644 tests/microbenchamarks/build_time_benchmark.py create mode 100644 tests/model/test_decilm.py diff --git a/.gitignore b/.gitignore index 3cc202c38..d3ea24ec6 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,9 @@ __pycache__/ *.cache *.nsys-rep .VSCodeCounter -build*/ +cpp/build* +build +!tensorrt_llm/bench/build !builders/ *.egg-info/ .coverage diff --git a/README.md b/README.md index e981b1d7e..12ebaee18 100644 --- a/README.md +++ b/README.md @@ -17,12 +17,15 @@ TensorRT-LLM
## Latest News -* [2024/08/06] 🗫 Multilingual Challenge Accepted 🗫 -🤖 #TensorRT #LLM boosts low-resource languages like Hebrew, Indonesian and Vietnamese ⚡[➡️ link](https://developer.nvidia.com/blog/accelerating-hebrew-llm-performance-with-nvidia-tensorrt-llm/?linkId=100000278659647) +* [2024/08/13] 🐍 DIY Code Completion with #Mamba ⚡ #TensorRT #LLM for speed 🤖 NIM for ease ☁️ deploy anywhere +[➡️ link](https://developer.nvidia.com/blog/revolutionizing-code-completion-with-codestral-mamba-the-next-gen-coding-llm/)
- +
+* [2024/08/06] 🗫 Multilingual Challenge Accepted 🗫 +🤖 #TensorRT #LLM boosts low-resource languages like Hebrew, Indonesian and Vietnamese ⚡[➡️ link](https://developer.nvidia.com/blog/accelerating-hebrew-llm-performance-with-nvidia-tensorrt-llm/?linkId=100000278659647) + * [2024/07/30] Introducing🍊 @SliceXAI ELM Turbo 🤖 train ELM once ⚡ #TensorRT #LLM optimize ☁️ deploy anywhere [➡️ link](https://developer.nvidia.com/blog/supercharging-llama-3-1-across-nvidia-platforms) diff --git a/benchmarks/README.md b/benchmarks/README.md index 575769842..00f450319 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -7,5 +7,5 @@ There are currently three workflows to benchmark TensorRT-LLM: - The recommended workflow that uses TensorRT-LLM C++ API and can take advantage of the latest features of TensorRT-LLM. * [Python benchmarks](./python) - The Python benchmarking scripts can only benchmark the Python runtime, which do not support the latest features, such as in-flight batching. -* [The Python benchmarking suite](./suite) +* [The Python benchmarking suite](./Suite.md) - This benchmarking suite is a current work in progress and is prone to large changes. diff --git a/benchmarks/Suite.md b/benchmarks/Suite.md new file mode 100644 index 000000000..f447b73e7 --- /dev/null +++ b/benchmarks/Suite.md @@ -0,0 +1,316 @@ +# TensorRT-LLM Benchmarking + +> [!WARNING] Work in Progress +> This benchmarking suite is a current work in progress and is prone to large changes. + +TensorRT-LLM provides a packaged benchmarking utility that is accessible via the `trtllm-bench` CLI tool. + +#### Supported Networks for Benchmarking + +- [`tiiuae/falcon-180B`](https://huggingface.co/tiiuae/falcon-180B) +- [`meta-llama/Llama-2-7b-hf`](https://huggingface.co/meta-llama/Llama-2-7b-hf) +- [`meta-llama/Llama-2-70b-hf`](https://huggingface.co/meta-llama/Llama-2-70b-hf) +- [`meta-llama/Meta-Llama-3-8B`](https://huggingface.co/meta-llama/Meta-Llama-3-8B) +- [`meta-llama/Meta-Llama-3-70B`](https://huggingface.co/meta-llama/Meta-Llama-3-70B) +- [`EleutherAI/gpt-j-6b`](https://huggingface.co/EleutherAI/gpt-j-6b) + +#### Support Quantization Modes + +TensorRT-LLM supports a number of quanization modes. For more information about quantization, see the +[documentation](https://nvidia.github.io/TensorRT-LLM/precision.html). + +- None (no quantization applied) +- W8A16 +- W4A16 +- W4A16_AWQ +- W4A8_AWQ +- W4A16_GPTQ +- FP8 +- INT8 + +> [!NOTE] Please see the supported quantization methods for each network [here](https://nvidia.github.io/TensorRT-LLM/precision.html#support-matrix) + + +## Inflight Benchmarking with a Dataset + +This section covers how to benchmark TensorRT-LLM using inflight batching. + + +### Quickstart + +For this quick start guide, we will focus on running a short max throughput benchmark on +`meta-llama/Llama-2-7b-hf` on a syntehtic dataset with a uniform distribution of prompts with ISL:OSL +of 128:128. In order to run the benchmark from start to finish simply run the following commands: + +```shell +python benchmarks/cpp/prepare_dataset.py --stdout --tokenizer meta-llama/Llama-2-7b-hf token-norm-dist --input-mean 128 --output-mean 128 --input-stdev 0 --output-stdev 0 --num-requests 1400 > /tmp/synthetic_128_128.txt +trtllm-bench --model meta-llama/Llama-2-7b-hf build --dataset /tmp/synthetic_128_128.txt --quantization FP8 +trtllm-bench --model meta-llama/Llama-2-7b-hf throughput --dataset /tmp/synthetic_128_128.txt --engine-path /tmp/meta-llama/Llama-2-7b-hf/tp_1_pp_1 +``` + +And that's it! Once the benchmark completes, a summary will be printed with summary metrics. + +``` +=========================================================== += ENGINE DETAILS +=========================================================== +Model: meta-llama/Llama-2-7b-hf +Engine Directory: /tmp/meta-llama/Llama-2-7b-hf/tp_1_pp_1 +TensorRT-LLM Version: 0.12.0.dev2024073000 +Dtype: float16 +KV Cache Dtype: FP8 +Quantization: FP8 +Max Input Length: 2048 +Max Sequence Length: 4098 + +=========================================================== += WORLD + RUNTIME INFORMATION +=========================================================== +TP Size: 1 +PP Size: 1 +Max Runtime Batch Size: 4096 +Max Runtime Tokens: 8192 +Scheduling Policy: Guaranteed No Evict +KV Memory Percentage: 99.0% +Issue Rate (req/sec): 3.680275266452667e+18 +=========================================================== += STATISTICS +=========================================================== +Number of requests: 3000 +Average Input Length (tokens): 128.0 +Average Output Length (tokens): 128.0 +Token Throughput (tokens/sec): 23405.927228471104 +Request Throughput (req/sec): 182.8588064724305 +Total Latency (seconds): 16.406100739 +=========================================================== +``` + +### Workflow + +The workflow for `trtllm-bench` is composed of the following steps: + +1. Prepare a dataset to drive the inflight batching benchmark. +2. Build a benchmark engine using `trtllm-bench build` subcommand. +3. Run the max throughput benchmark using the `trtllm-bench throughput` subcommand. + +#### Preparing a Dataset + +The inflight benchmark utilizes a fixed JSON schema so that it is simple and +straightforward to specify requests. The schema is defined as follows: + +| Key | Required | Type | Description | +| :- | :-: | :-: | :- | +| `task_id`| Y | String | Unique identifier for the request. | +| `prompt` | N* | String | Input text for a generation request. | +| `logits` | N* | List[Integer] | List of logits that make up the request prompt. | +| `output_tokens` | Y | Integer | Number of generated tokens for this request. | + +> [!NOTE] Prompt and logits are mutually exclusive* +> While having both `prompt` and `logits` is not required, at least one is required. +> If `logits` are specified, the `prompt` entry is ignored for request generation. + +Examples of valid entries for the inflight benchmark are: + +- Entries with a human-readable prompt and no logits. +```json +{"task_id": 1, "prompt": "Generate an infinite response to the following: This is the song that never ends, it goes on and on my friend.", "output_tokens": 1000} +{"task_id": 2, "prompt": "Generate an infinite response to the following: Na, na, na, na", "output_tokens": 1000} +``` + +- Entries which contain logits. +```json +{"task_id":0,"logits":[863,22056,25603,11943,8932,13195,3132,25032,21747,22213],"output_tokens":128} +{"task_id":1,"logits":[14480,13598,15585,6591,1252,8259,30990,26778,7063,30065,21764,11023,1418],"output_tokens":128} +``` + +> [!INFO] A whole entry is on a line! +> To make the passing of data simpler, a complete JSON entry is on each line so that the benchmarker +> can simply read a line and assume a complete entry. When creating a dataset, be sure that a complete +> JSON entry is on every line. + +#### Using `prepare_dataset` to Create Synthetic Datasets + +In order to prepare a synthetic dataset, you can use the provided script in the `benchmarks/cpp` +directory. For example, to generate a synthetic dataset of 1000 requests with a uniform ISL/OSL of +128/128 for [Llama-2-7b](https://huggingface.co/meta-llama/Llama-2-7b), simply run: + +```shell +benchmarks/cpp/prepare_dataset.py --stdout --tokenizer meta-llama/Llama-2-7b-hf token-norm-dist --input-mean 128 --output-mean 128 --input-stdev 0 --output-stdev 0 --num-requests 1000 > $PATH_TO_DATASET +``` + +You can pipe the above command to a file to reuse the same dataset, or simply pipe its output to the +benchmark script (example below). + +### Building a Benchmark Engine + +The second thing you'll need once you have a dataset is an engine to benchmark against. In order to +build a pre-configured engine for one of the supported ISL:OSL combinations, you can run the following +using the dataset you generated with `prepare_dataset.py` to build an FP8 quantized engine: + +```shell +trtllm-bench --model $HF_MODEL_NAME build --dataset $PATH_TO_DATASET --quantization FP8 +``` + +or manually set a max sequence length thatL you plan to run with specifically: + +```shell +trtllm-bench --model $HF_MODEL_NAME build --max_seq_len $MAX_SEQ_LEN --quantization FP8 +``` + +The engine in this case will be written to the `/tmp/$HF_MODEL_NAME/tp_1_pp_1/` directory. + +### Running a Max Throughput Benchmark + +The `trtllm-bench` command line tool provides a max throughput benchmark that is accessible via the +`throughput` subcommand. This benchmark tests a TensorRT-LLM engine under maximum load to provide an +upper bound throughput number. + +#### How the Benchmarker Works + +The benchmarker will read in a data file or standard input (stdin) as a stream where a single line contains +a complete JSON request entry. The process that the benchmarker is as follows: + +1. Iterate over all input requests. If `logits` is specified, construct the request using the specified +list of logits. Otherwise, tokenize the `prompt` with as specified by `--model $HF_MODEL_NAME`. +3. Submit the dataset to the TensorRT-LLM `Executor` API at as fast of a rate as possible (offline mode). +4. Wait for all requests to return, compute statistics, then report out results. + +To run the benchmarker, run the following with the engine and dataset generated above: + +``` +trtllm-bench --model $HF_MODEL_NAME throughput --dataset $PATH_TO_DATASET --engine_dir /tmp/$HF_MODEL_NAME/tp_1_pp_1/ +``` + +When the benchmark runs, you will see output similar to the following: + +``` +Preparing to run throughput benchmark... +Setting up benchmarker and infrastructure. +Initializing Throughput Benchmark. [rate=%d req/s] +Ready to start benchmark. +Initializing Executor. +[TensorRT-LLM][INFO] Engine version 0.12.0.dev2024073000 found in the config file, assuming engine(s) built by new builder API. +[TensorRT-LLM][INFO] Initializing MPI with thread mode 3 +[TensorRT-LLM][INFO] Initialized MPI +[TensorRT-LLM][INFO] Engine version 0.12.0.dev2024073000 found in the config file, assuming engine(s) built by new builder API. +[TensorRT-LLM][INFO] MPI size: 1, MPI local size: 1, rank: 0 +[TensorRT-LLM][INFO] Rank 0 is using GPU 0 +[TensorRT-LLM][INFO] TRTGptModel maxNumSequences: 4096 +[TensorRT-LLM][INFO] TRTGptModel maxBatchSize: 4096 +[TensorRT-LLM][INFO] TRTGptModel maxBeamWidth: 1 +[TensorRT-LLM][INFO] TRTGptModel maxSequenceLen: 4098 +[TensorRT-LLM][INFO] TRTGptModel maxDraftLen: 0 +[TensorRT-LLM][INFO] TRTGptModel mMaxAttentionWindowSize: 4098 +[TensorRT-LLM][INFO] TRTGptModel enableTrtOverlap: 0 +[TensorRT-LLM][INFO] TRTGptModel normalizeLogProbs: 1 +[TensorRT-LLM][INFO] TRTGptModel maxNumTokens: 8192 +[TensorRT-LLM][INFO] TRTGptModel maxInputLen: 4097 = maxSequenceLen - 1 since chunked context is enabled +[TensorRT-LLM][INFO] Capacity Scheduler Policy: GUARANTEED_NO_EVICT +[TensorRT-LLM][INFO] Context Chunking Scheduler Policy: FIRST_COME_FIRST_SERVED +[TensorRT-LLM][INFO] Loaded engine size: 6214 MiB +[TensorRT-LLM][INFO] [MemUsageChange] Allocated 928.77 MiB for execution context memory. +[TensorRT-LLM][INFO] [MS] Running engine with multi stream info +[TensorRT-LLM][INFO] [MS] Number of aux streams is 1 +[TensorRT-LLM][INFO] [MS] Number of total worker streams is 2 +[TensorRT-LLM][INFO] [MS] The main stream provided by execute/enqueue calls is the first worker stream +[TensorRT-LLM][INFO] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +0, now: CPU 0, GPU 6166 (MiB) +[TensorRT-LLM][INFO] [MS] Running engine with multi stream info +[TensorRT-LLM][INFO] [MS] Number of aux streams is 1 +[TensorRT-LLM][INFO] [MS] Number of total worker streams is 2 +[TensorRT-LLM][INFO] [MS] The main stream provided by execute/enqueue calls is the first worker stream +[TensorRT-LLM][INFO] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +0, now: CPU 0, GPU 6166 (MiB) +[TensorRT-LLM][INFO] Switching optimization profile from: 0 to 1. Please ensure there are no enqueued operations pending in this context prior to switching profiles +[TensorRT-LLM][INFO] [MS] Running engine with multi stream info +[TensorRT-LLM][INFO] [MS] Number of aux streams is 1 +[TensorRT-LLM][INFO] [MS] Number of total worker streams is 2 +[TensorRT-LLM][INFO] [MS] The main stream provided by execute/enqueue calls is the first worker stream +[TensorRT-LLM][INFO] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +0, now: CPU 0, GPU 6166 (MiB) +[TensorRT-LLM][INFO] Switching optimization profile from: 0 to 2. Please ensure there are no enqueued operations pending in this context prior to switching profiles +[TensorRT-LLM][INFO] [MS] Running engine with multi stream info +[TensorRT-LLM][INFO] [MS] Number of aux streams is 1 +[TensorRT-LLM][INFO] [MS] Number of total worker streams is 2 +[TensorRT-LLM][INFO] [MS] The main stream provided by execute/enqueue calls is the first worker stream +[TensorRT-LLM][INFO] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +0, now: CPU 0, GPU 6166 (MiB) +[TensorRT-LLM][INFO] Switching optimization profile from: 0 to 3. Please ensure there are no enqueued operations pending in this context prior to switching profiles +[TensorRT-LLM][INFO] [MS] Running engine with multi stream info +[TensorRT-LLM][INFO] [MS] Number of aux streams is 1 +[TensorRT-LLM][INFO] [MS] Number of total worker streams is 2 +[TensorRT-LLM][INFO] [MS] The main stream provided by execute/enqueue calls is the first worker stream +[TensorRT-LLM][INFO] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +0, now: CPU 0, GPU 6166 (MiB) +[TensorRT-LLM][INFO] Switching optimization profile from: 0 to 4. Please ensure there are no enqueued operations pending in this context prior to switching profiles +[TensorRT-LLM][INFO] [MS] Running engine with multi stream info +[TensorRT-LLM][INFO] [MS] Number of aux streams is 1 +[TensorRT-LLM][INFO] [MS] Number of total worker streams is 2 +[TensorRT-LLM][INFO] [MS] The main stream provided by execute/enqueue calls is the first worker stream +[TensorRT-LLM][INFO] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +0, now: CPU 0, GPU 6166 (MiB) +[TensorRT-LLM][INFO] Switching optimization profile from: 0 to 5. Please ensure there are no enqueued operations pending in this context prior to switching profiles +[TensorRT-LLM][INFO] [MemUsageChange] Allocated 1.14 GB GPU memory for runtime buffers. +[TensorRT-LLM][INFO] [MemUsageChange] Allocated 4.35 GB GPU memory for decoder. +[TensorRT-LLM][INFO] Memory usage when calculating max tokens in paged kv cache: total: 79.10 GiB, available: 63.62 GiB +[TensorRT-LLM][INFO] Number of blocks in KV cache primary pool: 4607 +[TensorRT-LLM][INFO] Number of blocks in KV cache secondary pool: 0, onboard blocks to primary memory before reuse: true +[TensorRT-LLM][INFO] Max KV cache pages per sequence: 65 +[TensorRT-LLM][INFO] Number of tokens per block: 64. +[TensorRT-LLM][INFO] [MemUsageChange] Allocated 62.99 GiB for max tokens in paged KV cache (294848). +[TensorRT-LLM][INFO] Executor instance created by worker +Starting response daemon...Executor started. + +Request serving started. +Starting statistics collection. +Collecting live stats... +Benchmark started. +Request serving stopped. +Collecting last stats... +Ending statistics collection. +Stop received. +Stopping response parsing. +Collecting last responses before shutdown. +Completed request parsing. +Parsing stopped. +Request generator successfully joined. +Statistics process successfully joined. +=========================================================== += ENGINE DETAILS +=========================================================== +Model: meta-llama/Llama-2-7b-hf +Engine Directory: /tmp/meta-llama/Llama-2-7b-hf/tp_1_pp_1 +TensorRT-LLM Version: 0.12.0.dev2024073000 +Dtype: float16 +KV Cache Dtype: FP8 +Quantization: FP8 +Max Input Length: 2048 +Max Sequence Length: 4098 + +=========================================================== += WORLD + RUNTIME INFORMATION +=========================================================== +TP Size: 1 +PP Size: 1 +Max Runtime Batch Size: 4096 +Max Runtime Tokens: 8192 +Scheduling Policy: Guaranteed No Evict +KV Memory Percentage: 99.0% +Issue Rate (req/sec): 3.680275266452667e+18 +=========================================================== += STATISTICS +=========================================================== +Number of requests: 3000 +Average Input Length (tokens): 128.0 +Average Output Length (tokens): 128.0 +Token Throughput (tokens/sec): 23405.927228471104 +Request Throughput (req/sec): 182.8588064724305 +Total Latency (seconds): 16.406100739 +=========================================================== + +Benchmark Shutdown called! +Shutting down ExecutorServer. +[TensorRT-LLM][INFO] Orchestrator sendReq thread exiting +[TensorRT-LLM][INFO] Orchestrator recv thread exiting +Executor shutdown. +[TensorRT-LLM][INFO] Leader sendThread exiting +[TensorRT-LLM][INFO] Leader recvReq thread exiting +``` + +> [!WARNING] Some statistics are not reported. +> There are some statistics that are not reported in the summary (typically as 0.0). These statistics +> are not available currently. diff --git a/benchmarks/cpp/gptManagerBenchmark.cpp b/benchmarks/cpp/gptManagerBenchmark.cpp index d3861a2f3..488f71a19 100644 --- a/benchmarks/cpp/gptManagerBenchmark.cpp +++ b/benchmarks/cpp/gptManagerBenchmark.cpp @@ -24,6 +24,7 @@ #include "tensorrt_llm/common/stringUtils.h" #include "tensorrt_llm/executor/executor.h" #include "tensorrt_llm/executor/tensor.h" +#include "tensorrt_llm/executor/types.h" #include "tensorrt_llm/plugins/api/tllmPlugin.h" #include "tensorrt_llm/runtime/common.h" #include "tensorrt_llm/runtime/gptJsonConfig.h" @@ -173,6 +174,9 @@ struct BenchmarkParams // Decoding params std::optional>> medusaChoices; + + std::optional executorLookaheadConfig; + std::optional requestLookaheadConfig; }; class InferenceRequestsAsyncSend @@ -509,6 +513,7 @@ class Recorder { if (!mStreaming) { + TLLM_LOG_DEBUG("response.getResult().outputTokenIds"); auto outputTokenIds = response.getResult().outputTokenIds; int32_t outSeqLen = 0; @@ -824,9 +829,11 @@ class ExecutorServer executorConfig.setMaxNumTokens(benchmarkParams.maxNumTokens.value()); } - executorConfig.setDecodingConfig(texec::DecodingConfig( - benchmarkParams.medusaChoices.has_value() ? texec::DecodingMode::Medusa() : texec::DecodingMode::Auto(), - std::nullopt, benchmarkParams.medusaChoices)); + executorConfig.setDecodingConfig( + texec::DecodingConfig(benchmarkParams.medusaChoices.has_value() ? texec::DecodingMode::Medusa() + : benchmarkParams.executorLookaheadConfig.has_value() ? texec::DecodingMode::Lookahead() + : texec::DecodingMode::Auto(), + benchmarkParams.executorLookaheadConfig, benchmarkParams.medusaChoices)); executorConfig.setExtendedRuntimePerfKnobConfig(extendedRuntimePerfKnobConfig); if (executorModelType == texec::ModelType::kDECODER_ONLY) @@ -910,7 +917,7 @@ class ExecutorServer for (auto const& response : responses) { auto const reqId = response.getRequestId(); - + TLLM_LOG_DEBUG("response.getResult().isFinal"); if (response.getResult().isFinal) { mActiveCount--; @@ -1323,7 +1330,8 @@ std::shared_ptr makeRequest(std::uint64_t reqId, Sample const& ITensor::SharedPtr const& beamWidthTensor, ITensor::SharedPtr const& eosId, ITensor::SharedPtr const& padId, BufferManager const& bufferManager, ITensor::SharedPtr const& returnContextLogits = nullptr, ITensor::SharedPtr const& returnGenerationLogits = nullptr, ITensor::SharedPtr const& loraWeights = nullptr, - ITensor::SharedPtr const& loraConfig = nullptr) + ITensor::SharedPtr const& loraConfig = nullptr, + std::optional lookaheadConfig = std::nullopt) { auto request = std::make_shared(reqId); auto const& inputIds = sample.inputIds; @@ -1361,6 +1369,10 @@ std::shared_ptr makeRequest(std::uint64_t reqId, Sample const& { request->setLoraConfig(loraConfig); } + if (lookaheadConfig) + { + request->setLookaheadConfig(lookaheadConfig.value()); + } if (streaming) { request->setIsStreaming(true); @@ -1372,18 +1384,20 @@ texec::Request makeExecutorRequest(Sample const& sample, SizeType32 const& beamW std::optional const& eosId, std::optional const& padId, bool streaming = false, bool const& returnContextLogits = false, bool const& returnGenerationLogits = false, std::optional const& loraConfig = std::nullopt, + std::optional const& lookaheadConfig = std::nullopt, std::optional encoderInputTokenIds = std::nullopt) { auto samplingConfig = texec::SamplingConfig{beamWidth}; auto outputConfig = texec::OutputConfig{false, returnContextLogits, returnGenerationLogits, false}; return texec::Request(sample.inputIds, sample.outputLen, streaming, samplingConfig, outputConfig, eosId, padId, - std::nullopt, // badWords - std::nullopt, // stopWords - std::nullopt, // embeddingBias - std::nullopt, // speculativeDecoding - std::nullopt, // pTuning - loraConfig, - std::nullopt, // logitsPostProcessorName + std::nullopt, // badWords + std::nullopt, // stopWords + std::nullopt, // embeddingBias + std::nullopt, // speculativeDecoding + std::nullopt, // pTuning + loraConfig, // loraConfig + lookaheadConfig, // lookaheadConfig + std::nullopt, // logitsPostProcessorName encoderInputTokenIds.has_value() ? encoderInputTokenIds : std::nullopt); } @@ -1429,9 +1443,11 @@ void benchmarkGptManager(std::filesystem::path const& engineDir, TrtGptModelType optionalParams.maxBatchSize = benchmarkParams.maxBatchSize; optionalParams.maxNumTokens = benchmarkParams.maxNumTokens; optionalParams.schedulerConfig = texec::SchedulerConfig{capacitySchedulerPolicy}; - optionalParams.decodingConfig = texec::DecodingConfig( - benchmarkParams.medusaChoices.has_value() ? texec::DecodingMode::Medusa() : texec::DecodingMode::Auto(), - std::nullopt, benchmarkParams.medusaChoices); + optionalParams.decodingConfig + = texec::DecodingConfig(benchmarkParams.medusaChoices.has_value() ? texec::DecodingMode::Medusa() + : benchmarkParams.executorLookaheadConfig.has_value() ? texec::DecodingMode::Lookahead() + : texec::DecodingMode::Auto(), + benchmarkParams.executorLookaheadConfig, benchmarkParams.medusaChoices); optionalParams.extendedRuntimePerfKnobConfig = texec::ExtendedRuntimePerfKnobConfig( benchmarkParams.multiBlockMode, benchmarkParams.enableContextFMHAFP32Acc); @@ -1501,8 +1517,8 @@ void benchmarkGptManager(std::filesystem::path const& engineDir, TrtGptModelType ++reqId; if (i == terminateReqId) ++reqId; - auto request = makeRequest( - reqId, samples[0], benchmarkParams.streaming, beamWidthTensor, eosIdTensor, padIdTensor, bufferManager); + auto request = makeRequest(reqId, samples[0], benchmarkParams.streaming, beamWidthTensor, eosIdTensor, + padIdTensor, bufferManager, nullptr, nullptr, nullptr, nullptr, benchmarkParams.requestLookaheadConfig); gptServer->enqueue(request); } gptServer->waitForEmpty(); @@ -1517,7 +1533,8 @@ void benchmarkGptManager(std::filesystem::path const& engineDir, TrtGptModelType for (std::size_t i = 0; i < numSamples; ++i) { auto request = makeRequest(i + 1, samples[i], benchmarkParams.streaming, beamWidthTensor, eosIdTensor, - padIdTensor, bufferManager, returnContextLogitsFlagTensor, returnGenerationLogitsFlagTensor); + padIdTensor, bufferManager, returnContextLogitsFlagTensor, returnGenerationLogitsFlagTensor, nullptr, + nullptr, benchmarkParams.requestLookaheadConfig); gptServer->enqueue(request); if (i < numSamples - 1) @@ -1541,7 +1558,8 @@ void benchmarkGptManager(std::filesystem::path const& engineDir, TrtGptModelType for (std::size_t i = 0; i < numSamples; ++i) { auto request = makeRequest(i + 1, samples[i], benchmarkParams.streaming, beamWidthTensor, eosIdTensor, - padIdTensor, bufferManager, returnContextLogitsFlagTensor, returnGenerationLogitsFlagTensor); + padIdTensor, bufferManager, returnContextLogitsFlagTensor, returnGenerationLogitsFlagTensor, + nullptr, nullptr, benchmarkParams.requestLookaheadConfig); gptServer->enqueue(request); } gptServer->waitForEmpty(); @@ -1644,13 +1662,13 @@ void benchmarkExecutor(std::optional const& decoderEngine { Sample s{std::vector{decoderStartTokenId}, 1, static_cast(taskId)}; requests.emplace_back(makeExecutorRequest(s, beamWidth, eosId, padId, false, false, false, - loraConfig, std::vector{1, 2, 3, 4, 5})); + loraConfig, std::nullopt, std::vector{1, 2, 3, 4, 5})); } else { Sample s{std::vector{1, 2, 3, 4, 5}, 1, static_cast(taskId)}; requests.emplace_back( - makeExecutorRequest(s, beamWidth, eosId, padId, false, false, false, loraConfig)); + makeExecutorRequest(s, beamWidth, eosId, padId, false, false, false, loraConfig, std::nullopt)); } } executorServer->enqueue(std::move(requests), true); @@ -1668,12 +1686,14 @@ void benchmarkExecutor(std::optional const& decoderEngine { Sample s{std::vector{decoderStartTokenId}, samples[0].outputLen, samples[0].taskId}; requests.emplace_back(makeExecutorRequest(s, beamWidth, eosId, padId, benchmarkParams.streaming, - returnContextLogits, returnGenerationLogits, std::nullopt, samples[0].inputIds)); + returnContextLogits, returnGenerationLogits, std::nullopt, + benchmarkParams.requestLookaheadConfig, samples[0].inputIds)); } else { requests.emplace_back(makeExecutorRequest(samples[0], beamWidth, eosId, padId, - benchmarkParams.streaming, returnContextLogits, returnGenerationLogits)); + benchmarkParams.streaming, returnContextLogits, returnGenerationLogits, std::nullopt, + benchmarkParams.requestLookaheadConfig)); } } executorServer->enqueue(std::move(requests), true); @@ -1699,12 +1719,14 @@ void benchmarkExecutor(std::optional const& decoderEngine { Sample s{std::vector{decoderStartTokenId}, samples[i].outputLen, samples[i].taskId}; requests.emplace_back(makeExecutorRequest(s, beamWidth, eosId, padId, benchmarkParams.streaming, - returnContextLogits, returnGenerationLogits, loraConfig, samples[i].inputIds)); + returnContextLogits, returnGenerationLogits, loraConfig, benchmarkParams.requestLookaheadConfig, + samples[i].inputIds)); } else { requests.emplace_back(makeExecutorRequest(samples[i], beamWidth, eosId, padId, - benchmarkParams.streaming, returnContextLogits, returnGenerationLogits, loraConfig)); + benchmarkParams.streaming, returnContextLogits, returnGenerationLogits, loraConfig, + benchmarkParams.requestLookaheadConfig)); } } @@ -1789,6 +1811,25 @@ std::vector> parseVectorOfVectors(std::string const& inp return result; } +texec::LookaheadDecodingConfig parseLookaheadConfig(std::string const& input) +{ + std::regex regex("\\[ *(\\d+) *, *(\\d+) *, *(\\d+) *\\]"); + std::smatch match; + if (std::regex_match(input, match, regex)) + { + TLLM_CHECK(match.size() == 4); + auto w = std::stoi(match[1]); + auto n = std::stoi(match[2]); + auto g = std::stoi(match[3]); + return texec::LookaheadDecodingConfig(w, n, g); + } + else + { + TLLM_LOG_WARNING("cannot parse lookahead config from '%s'", input.c_str()); + return texec::LookaheadDecodingConfig(); + } +} + } // namespace int main(int argc, char* argv[]) @@ -1898,6 +1939,14 @@ int main(int argc, char* argv[]) options.add_options()("enable_context_fmha_fp32_acc", "Enable FMHA runner FP32 accumulation", cxxopts::value()->default_value("false")); + options.add_options()("executor_lookahead_config", + "lookahead config in the format of [max_window_size, max_ngram_size, max_verification_set_size]", + cxxopts::value()); + + options.add_options()("request_lookahead_config", + "lookahead config in the format of [max_window_size, max_ngram_size, max_verification_set_size], and each <= " + "executor lookahead config", + cxxopts::value()); auto result = options.parse(argc, argv); @@ -2055,6 +2104,16 @@ int main(int argc, char* argv[]) { benchmarkParams.medusaChoices = parseVectorOfVectors(result["medusa_choices"].as()); } + if (result.count("executor_lookahead_config")) + { + benchmarkParams.executorLookaheadConfig + = parseLookaheadConfig(result["executor_lookahead_config"].as()); + } + if (result.count("request_lookahead_config")) + { + benchmarkParams.requestLookaheadConfig + = parseLookaheadConfig(result["request_lookahead_config"].as()); + } // Argument: multi_block_mode benchmarkParams.multiBlockMode = result["multi_block_mode"].as(); diff --git a/benchmarks/python/all_reduce.py b/benchmarks/python/all_reduce.py index ae7cb8868..d91cdd0d4 100644 --- a/benchmarks/python/all_reduce.py +++ b/benchmarks/python/all_reduce.py @@ -23,7 +23,6 @@ import tensorrt_llm as tllm from tensorrt_llm import Mapping, Tensor -from tensorrt_llm._ipc_utils import peer_access from tensorrt_llm._utils import OMPI_COMM_TYPE_HOST, mpi_comm from tensorrt_llm.functional import AllReduceStrategy, allreduce from tensorrt_llm.plugin.plugin import current_all_reduce_helper @@ -106,18 +105,18 @@ def allreduce_benchmark(dtype: str, _, start = cuda.cuEventCreate(0) _, stop = cuda.cuEventCreate(0) runtimes = [] - with peer_access(mapping): - tllm.mpi_barrier() - - for _ in range(10): - cuda.cuEventRecord(start, stream.cuda_stream) - session.run(inputs=feed_dict, - outputs={"output": output}, - stream=stream.cuda_stream) - cuda.cuEventRecord(stop, stream.cuda_stream) - torch.cuda.synchronize() - _, ms = cuda.cuEventElapsedTime(start, stop) - runtimes.append(ms) + + tllm.mpi_barrier() + + for _ in range(10): + cuda.cuEventRecord(start, stream.cuda_stream) + session.run(inputs=feed_dict, + outputs={"output": output}, + stream=stream.cuda_stream) + cuda.cuEventRecord(stop, stream.cuda_stream) + torch.cuda.synchronize() + _, ms = cuda.cuEventElapsedTime(start, stop) + runtimes.append(ms) median_ms = sorted(runtimes)[len(runtimes) // 2] assert torch.allclose(output, (input * world_size)**inner_loop) diff --git a/benchmarks/suite/README.md b/benchmarks/suite/README.md deleted file mode 100644 index bba21609e..000000000 --- a/benchmarks/suite/README.md +++ /dev/null @@ -1,234 +0,0 @@ -# TensorRT-LLM Benchmarking - -> [!WARNING] Work in Progress -> This benchmarking suite is a current work in progress and is prone to large changes. - -This package is the official benchmarking suite for TensorRT-LLM. This benchmark will be updated -as development of TensorRT-LLM continues. - -## Installation - -From this folder, run `pip install -r requirements.txt` to install the extra dependencies required for this tool. - -### Available Build and Benchmark Options - -The following model options are available for benchmarking models. - -| Option | Required | Default | Description | -| :- | :-: | :-: | :- | -| `--model` | Y | - | The name of the model to benchmark. | -| `--dtype` | N | `float16` | The datatype of the weights. | -| `--max-batch-size` | Y | - | The batch size to build the engine with for the benchmark. | -| `--kv-dtype` | N | `float16` | The datatype to store the KV Cache in. | -| `--kv-cache-free-gpu-mem-fraction` | N | `0.98` | The percentage of free memory that the KV cache is allowed to occupy. | -| `--quantization` | N | `None` |The quantization algorithm to be used when benchmarking. See the [documentation](https://nvidia.github.io/TensorRT-LLM/precision.html) for more information| -| `--workspace` | N | `/tmp` | The directory to store benchmarking intermediate files. | -| `--tensor-parallel-size` | N | `1` | Number of tensor parallel shards to run the benchmark with. | -| `--pipeline-parallel-size` | N | `1` | Number of pipeline parallel shards to run the benchmark with. | - -#### Supported Networks for Benchmarking - -- [`tiiuae/falcon-7b`](https://huggingface.co/tiiuae/falcon-7b) -- [`tiiuae/falcon-40b`](https://huggingface.co/tiiuae/falcon-40b) -- [`tiiuae/falcon-180B`](https://huggingface.co/tiiuae/falcon-180B) -- [`meta-llama/Llama-2-7b-hf`](https://huggingface.co/meta-llama/Llama-2-7b-hf) -- [`meta-llama/Llama-2-13b-hf`](https://huggingface.co/meta-llama/Llama-2-13b-hf) -- [`meta-llama/Llama-2-70b-hf`](https://huggingface.co/meta-llama/Llama-2-70b-hf) -- [`EleutherAI/gpt-j-6b`](https://huggingface.co/EleutherAI/gpt-j-6b) - -#### Support Quantization Modes - -TensorRT-LLM supports a number of quanization modes. For more information about quantization, see the -[documentation](https://nvidia.github.io/TensorRT-LLM/precision.html). - -- None (no quantization applied) -- W8A16 -- W4A16 -- W4A16_AWQ -- W4A8_AWQ -- W4A16_GPTQ -- FP8 -- INT8 - -> [!NOTE] Please see the supported quantization methods for each network [here](https://nvidia.github.io/TensorRT-LLM/precision.html#support-matrix) - -## Static Benchmarking a Network - -In order to benchmark a static batch for a network, run a command like the following: - -```shell -cd tensorrt_llm_bench/ -python benchmark.py --model tiiuae/falcon-7b static --isl 128 --osl 128 --max-batch-size 1 -``` - -This command line will build a unique engine for the configuration and run the benchmark using -the `gptSessionBenchmark` binary. You need to build the TensorRT-LLM wheel with the `--benchmarks` flag for this binary to be compiled: - -```shell -python3 ./scripts/build_wheel.py --benchmarks -``` - -If you've already compiled the wheel without benchmarks, you can build the benchmarking binaries with the following after the fact: - -```shell -pushd cpp/build/ -make -j benchmarks -popd -``` - -The complete list of arguments for static benchmarking are as follows: -| Option | Required | Default | Description | -| :- | :-: | :-: | :- | -| `--isl` | Y | - | The input sequence length to pass in during benchmark. | -| `--osl` | Y | - | The output sequence length to generate in the benchmark. | -| `--gpt-session-path` | N | `../../cpp/build/benchmarks/gptSessionBenchmark` | The path to the built gptSessionBenchmark binary. | -| `--warm-up-runs` | N | `2` | The number of warm up runs to run before benchmarking actual results. | -| `--num-runs` | N | `10` | The number runs to generate benchmarking results from. | -| `--duration` | N | `60` | The minimum iteration time, in seconds, to measure. | - -> [!WARNING] -> `gptSession` will be deprecated for the 1.0 release of TensorRT-LLM. This command line will change in order to match and update benchmarks accordingly. - - -## Inflight Benchmarking with a Dataset - -This section covers how to benchmark TensorRT-LLM using inflight batching. - -### Workflow - -The workflow for inflight batching is slightly different than the [static scenario](#static-benchmarking-a-network) as it requires a workload of requests instead of a single static batch. The following is the workflow for benchmarking using inflight batching: - -1. Prepare a dataset to drive the inflight batching benchmark. -2. Run the `inflight` benchmarking subcommand and provide the dataset from step 1. - -#### Preparing a Dataset - -The inflight benchmark utilizes a fixed JSON schema so that it is simple and -straightforward to specify requests. The schema is defined as follows: - -| Key | Required | Type | Description | -| :- | :-: | :-: | :- | -| `task_id`| Y | String | Unique identifier for the request. | -| `prompt` | N* | String | Input text for a generation request. | -| `logits` | N* | List[Integer] | List of logits that make up the request prompt. | -| `output_tokens` | Y | Integer | Number of generated tokens for this request. | - -> [!NOTE] Prompt and logits are mutually exclusive* -> While having both `prompt` and `logits` is not required, at least one is required. -> If `logits` are specified, the `prompt` entry is ignored for request generation. - -Examples of valid entries for the inflight benchmark are: - -- Entries with a human-readable prompt and no logits. -```json -{"task_id": 1, "prompt": "Generate an infinite response to the following: This is the song that never ends, it goes on and on my friend.", "output_tokens": 1000} -{"task_id": 2, "prompt": "Generate an infinite response to the following: Na, na, na, na", "output_tokens": 1000} -``` - -- Entries which contain logits. -```json -{"task_id":0,"logits":[863,22056,25603,11943,8932,13195,3132,25032,21747,22213],"output_tokens":128} -{"task_id":1,"logits":[14480,13598,15585,6591,1252,8259,30990,26778,7063,30065,21764,11023,1418],"output_tokens":128} -``` - -> [!INFO] A whole entry is on a line! -> To make the passing of data simpler, a complete JSON entry is on each line so that the benchmarker -> can simply read a line and assume a complete entry. When creating a dataset, be sure that a complete -> JSON entry is on every line. - -#### Using `prepare_dataset` to Create Synthetic Datasets - -In order to prepare a synthetic dataset, you can use the provided script in the `benchmarks/cpp` -directory. For example, to generate a synthetic dataset of 1000 requests with a uniform ISL/OSL of -128/128 for [Llama-2-7b](https://huggingface.co/meta-llama/Llama-2-7b), simply run: - -```shell -benchmarks/cpp/prepare_dataset.py --tokenizer meta-llama/Llama-2-7b-hf token-norm-dist --input-mean 128 --output-mean 128 --input-stdev 0 --output-stdev 0 --num-requests 1000 --stdout -``` - -You can pipe the above command to a file to reuse the same dataset, or simply pipe its output to the -benchmark script (example below). - -### Running a Dataset with the Benchmarker - -Once you've generated a dataset (see [above](#preparing-a-dataset)), you can run the benchmarker -in one of two ways: - -```shell -benchmarks/suite/tensorrt_llm_bench/benchmark.py --model $HF_MODEL_NAME --max-batch-size $BATCH_SIZE < $DATASET_PATH -``` - -> [!INFO] Alternative to piping. -> There is also a `--dataset` option for `benchmark.py` that can be used instead of piping a file. - -or - -```shell -benchmarks/cpp/prepare_dataset.py --tokenizer $HF_MODEL_NAME --input-mean $ISL --output-mean $OSL --num-requests $NUM_REQUESTS --stdout | benchmarks/suite/tensorrt_llm_bench/benchmark.py --model $HF_MODEL_NAME --max-batch-size $BATCH_SIZE --request-rate $REQUEST_RATE -``` - -#### How the Benchmarker Works - -The benchmarker will read in a data file or standard input (stdin) as a stream where a single line contains -a complete JSON request entry. The process that the benchmarker is as follows: - -1. Iterate over all input requests. If `logits` is specified, construct the request using the specified -list of logits. Otherwise, tokenize the `prompt` with as specified by `--model $HF_MODEL_NAME`. -2. Build the TensorRT-LLM engine. -3. Submit the dataset to the TensorRT-LLM `Executor` API at the request rate specified by `--request-rate $REQUEST_RATE` -4. Wait for all requests to return, compute statistics, then report out results. - -When the benchmark runs successfully, you will see a report out of the run similar to the following: - -``` -[RANK 0] Submitting requests... -[RANK 0] Completed request submission. -[RANK 0] Calculating results. -[RANK 0] Reporting... -[RANK 0] JSON: {'benchmark_cmd': '', 'binary': '', 'build_cmd': 'trtllm-build --output_dir /tmp/meta-llama/llama-2-7b-hf --model_config /tmp/generated_config.json --workers 1 --max_batch_size 1024 --max_input_len 128 --max_seq_len 256 --max_num_tokens 8000 --context_fmha enable --gpt_attention_plugin float16 --paged_kv_cache enable --multiple_profiles enable --gemm_plugin float16', 'first_token_latency': 0.0, 'inflight_batching': True, 'kv_mem_fraction': 0.98, 'latency_units': 'ms', 'max_batch_size': 1024, 'max_tokens': 8000, 'model': 'meta-llama/Llama-2-7b-hf', 'peak_gpu_mem_units': 'GB', 'peak_gpu_mem': 0.0, 'scheduler': 'Max Utilization', 'throughput_units': 'tokens/second', 'throughput': 17634.422523488243, 'time_per_output_token': 0.0, 'total_input_tokens': 128000, 'total_latency': 7.258530855178833, 'total_output_tokens': 128000} -=========================================================== -= METADATA -=========================================================== -Model: meta-llama/Llama-2-7b-hf -TP Size: 1 -PP Size: 1 -Scheduling Policy: Max Utilization -In-flight Batcher?: True -Dtype: float16 -KV Cache Dtype: FP8 -Quantization: FP8 -KV Memory Percentage: 98.0% - -=========================================================== -= ENGINE DETAILS -=========================================================== -Engine Directory: /tmp/meta-llama/llama-2-7b-hf -Max Batch Size: 1024 -Total Input Length: 128000 -Total Output Length: 128000 -Max Tokens: 8000 - -=========================================================== -= STATISTICS -=========================================================== -Throughput (tokens/second): 17634.422523488243 -Total Latency (ms): 7258.5309 -First Token Latency (ms): 0.0 -Token-to-token Latency (ms): 0.0 -Peak GPU Memory Usage (GB): 0.0 - -=========================================================== -= COMMANDS -=========================================================== -Build: trtllm-build --output_dir /tmp/meta-llama/llama-2-7b-hf --model_config /tmp/generated_config.json --workers 1 --max_batch_size 1024 --max_input_len 128 --max_seq_len 256 --max_num_tokens 8000 --context_fmha enable --gpt_attention_plugin float16 --paged_kv_cache enable --multiple_profiles enable --gemm_plugin float16 -Benchmark: - -[RANK 0] Terminating. -``` - -> [!WARNING] Some statistics are not reported. -> There are some statistics that are not reported in the summary (typically as 0.0). These statistics -> are not available currently. - - -That's it! -- you've successfully benchmarked TensorRT-LLM! diff --git a/benchmarks/suite/requirements.txt b/benchmarks/suite/requirements.txt deleted file mode 100644 index e75e33990..000000000 --- a/benchmarks/suite/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -pydantic>=2.2.1 -click-option-group == 0.5.6 -aenum == 3.1.15 diff --git a/benchmarks/suite/tensorrt_llm_bench/__init__.py b/benchmarks/suite/tensorrt_llm_bench/__init__.py deleted file mode 100644 index d6bfb8507..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Module for running TensorRT-LLM benchmarks.""" diff --git a/benchmarks/suite/tensorrt_llm_bench/benchmark.py b/benchmarks/suite/tensorrt_llm_bench/benchmark.py deleted file mode 100644 index 65e306459..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/benchmark.py +++ /dev/null @@ -1,125 +0,0 @@ -from pathlib import Path -from typing import get_args - -import click -from ifb import executor_benchmark -from static import static_benchmark -from utils import VALID_CACHE_DTYPES, VALID_COMPUTE_DTYPES, VALID_QUANT_ALGOS -from utils.dataclasses import BenchmarkConfig - - -@click.group(context_settings={'show_default': True}) -@click.option( - "--model", - "-m", - required=True, - type=str, - help="The Huggingface name of the model to benchmark.", -) -@click.option( - "--max-batch-size", - hidden=True, - default=0, - type=int, - help="Maximum batch size to build the benchmark engine with.", -) -@click.option( - "--kv-dtype", - type=click.Choice(tuple(get_args(VALID_CACHE_DTYPES))), - default="float16", - help="The dtype to store the KV Cache in.", -) -@click.option( - "--dtype", - type=click.Choice(tuple(get_args(VALID_COMPUTE_DTYPES))), - default="float16", - help="Activation and plugin data type.", -) -@click.option( - "--quantization", - "-q", - type=click.Choice(tuple(get_args(VALID_QUANT_ALGOS))), - default="None", - help= - ("The quantization algorithm to be used when benchmarking. See the " - "documentations for more information.\n" - " - https://nvidia.github.io/TensorRT-LLM/precision.html" - " - https://github.com/NVIDIA/TensorRT-LLM/blob/main/docs/source/blogs/quantization-in-TRT-LLM.md" - ), -) -@click.option( - "--workspace", - "-w", - required=False, - type=click.Path(writable=True, readable=True), - default="/tmp", - help="The directory to store benchmarking intermediate files.", -) -@click.option( - "--tensor-parallel-size", - "-tp", - type=int, - default=1, - required=False, - help="Number of tensor parallel shards to run the benchmark with.", -) -@click.option( - "--pipeline-parallel-size", - "-pp", - type=int, - default=1, - required=False, - help="Number of pipeline parallel shards to run the benchmark with.", -) -@click.option( - "--kv-cache-free-gpu-mem-fraction", - "-kv-mem", - type=float, - default=0.98, - help="The percentage of free memory that the KV Cache is allowed to occupy.", -) -@click.option( - "--build-opts", - type=str, - default="", - required=False, - hidden=True, - help="Passthrough options for trtllm-build to fine-tuning build commands.") -@click.pass_context -def benchmark( - ctx, - model: str, - max_batch_size: int, - workspace: Path, - dtype: str, - kv_dtype: str, - quantization: str, - tensor_parallel_size: int, - pipeline_parallel_size: int, - kv_cache_free_gpu_mem_fraction: float, - build_opts: str, -): - """Utility for using TRT-LLM for benchmarking networks from Huggingface.""" - ctx.obj = BenchmarkConfig( - model=model, - max_batch_size=max_batch_size, - workspace=Path(workspace), - dtype=dtype, - cache_dtype=kv_dtype, - quantization=quantization, - tensor_parallel=tensor_parallel_size, - pipeline_parallel=pipeline_parallel_size, - kv_cache_mem_percentage=kv_cache_free_gpu_mem_fraction, - build_overrides=build_opts.split(), - ) - - # Create the workspace where we plan to store intermediate files. - ctx.obj.workspace.mkdir(parents=True, exist_ok=True) - - -# Add nested subcommands to main benchmark CLI. -benchmark.add_command(static_benchmark) -benchmark.add_command(executor_benchmark) - -if __name__ == "__main__": - benchmark() diff --git a/benchmarks/suite/tensorrt_llm_bench/benchmarkers/__init__.py b/benchmarks/suite/tensorrt_llm_bench/benchmarkers/__init__.py deleted file mode 100644 index 2cc2877af..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/benchmarkers/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import List, Protocol - -from utils.dataclasses import BenchmarkResults, InferenceRequest - - -class Benchmarker(Protocol): - """Protocol for defining benchmarking classes for building/benchmarking.""" - - def build(self) -> None: - """Build a model to be benchmarked.""" - ... - - def benchmark(self) -> BenchmarkResults: - """Benchmark the constructed model container by a benchmarker.""" - ... - - -class DatasetBenchmarker(Protocol): - - def benchmark_dataset(self, - dataset: List[InferenceRequest]) -> BenchmarkResults: - """_summary_ - - Args: - dataset (List[InferenceRequest]): List of inference requests to benchmark. - - Returns: - BenchmarkResults: The results of the benchmark run. - """ - ... diff --git a/benchmarks/suite/tensorrt_llm_bench/benchmarkers/pybind_executor.py b/benchmarks/suite/tensorrt_llm_bench/benchmarkers/pybind_executor.py deleted file mode 100644 index 5742e90a5..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/benchmarkers/pybind_executor.py +++ /dev/null @@ -1,146 +0,0 @@ -from datetime import timedelta -from time import sleep, time -from typing import List - -from mpi4py.MPI import COMM_WORLD -from transformers import PreTrainedTokenizer -from utils.dataclasses import BenchmarkConfig, BenchmarkResults -from utils.enums import IFBSchedulingPolicy, ResultsSchedulingPolicy - -from tensorrt_llm.bindings.executor import (Executor, ExecutorConfig, - KvCacheConfig, ModelType, - OutputConfig, Request, - SchedulerConfig) - -from . import InferenceRequest - - -class PybindExecutorBenchmarker: - """Utility class for running inflight benchmarks via the Executor API.""" - - def __init__( - self, - config: BenchmarkConfig, - ): - """Initialize a gptSessionBenchmark instance. - - Args: - config (BenchmarkConfig): Benchmark configuration for build/run. - """ - self.config: BenchmarkConfig = config - - @staticmethod - def get_request(request: InferenceRequest, - tokenizer: PreTrainedTokenizer) -> Request: - return Request( - input_token_ids=request.logits, - max_new_tokens=request.output_tokens, - stop_words=[], - bad_words=[], - streaming=False, - output_config=OutputConfig(exclude_input_from_output=True), - pad_id=tokenizer.pad_token_id, - end_id=tokenizer.eos_token_id, - ) - - def initialize_executor(self) -> Executor: - """ - Initialize an Executor instance. - - Returns: - Executor: An instance of a TensorRT-LLM Executor. - """ - policy = IFBSchedulingPolicy(self.config.scheduling_policy).value - executor_config: ExecutorConfig = ExecutorConfig( - max_beam_width=1, - enable_chunked_context=self.config.chunking, - scheduler_config=SchedulerConfig( - capacity_scheduler_policy=policy, ), - kv_cache_config=KvCacheConfig( - free_gpu_memory_fraction=self.config.kv_cache_mem_percentage, ), - ) - - executor: Executor = Executor( - model_path=self.config.engine_path, - model_type=ModelType.DECODER_ONLY, - executor_config=executor_config, - ) - - return executor - - def benchmark_dataset(self, rate: int, - dataset: List[InferenceRequest]) -> BenchmarkResults: - """Benchmark the Executor Pybind interface. - - Args: - dataset (List[InferenceRequest]): List of inference requests to - benchmark with. - - Returns: - BenchmarkResults: Final results from running the specified dataset. - """ - request_ids = [] - num_finished = 0 - num_errored = 0 - num_input_tokens = 0 - num_output_tokens = 0 - delay = 1.0 / float(rate) - last_request = len(dataset) - 1 - bench_result = None - - executor = self.initialize_executor() - if executor.can_enqueue_requests(): - print(f"[RANK {COMM_WORLD.rank}] Submitting requests...") - start = time() - for i, request in enumerate(dataset): - sleep_time = delay if i != last_request else 0 - request_ids.append(executor.enqueue_request(request)) - num_input_tokens += len(request.input_token_ids) - sleep(sleep_time) - print(f"[RANK {COMM_WORLD.rank}] Completed request submission.") - - while num_finished <= last_request: - responses = executor.await_responses(timeout=timedelta( - milliseconds=1)) - for response in responses: - has_error = response.has_error() - num_finished += 1 - num_errored += 1 if has_error else 0 - - if not has_error: - result = response.result - for out_tokens in result.output_token_ids: - num_output_tokens += len(out_tokens) - end = time() - print(f"[RANK {COMM_WORLD.rank}] Calculating results.") - e2e_time = end - start - e2e_time * 1000.0 - policy = ResultsSchedulingPolicy( - IFBSchedulingPolicy(self.config.scheduling_policy).value) - - bench_result = BenchmarkResults( - model=self.config.model, - dtype=self.config.dtype.value, - quantization=str(self.config.quantization.value), - max_batch_size=self.config.max_batch_size, - total_input_tokens=num_input_tokens, - total_output_tokens=num_output_tokens, - tp_size=self.config.tensor_parallel, - pp_size=self.config.pipeline_parallel, - kv_mem_fraction=self.config.kv_cache_mem_percentage, - scheduler=policy.value, - max_tokens=self.config.max_tokens, - inflight_batching=True, - total_latency=e2e_time, - first_token_latency=0, - time_per_output_token=0, - latency_units="ms", - throughput=num_output_tokens / e2e_time, - throughput_units="tokens/second", - peak_gpu_mem=0.0, - peak_gpu_mem_units="GB", - build_cmd="", - benchmark_cmd="", - ) - - return bench_result diff --git a/benchmarks/suite/tensorrt_llm_bench/benchmarkers/static.py b/benchmarks/suite/tensorrt_llm_bench/benchmarkers/static.py deleted file mode 100644 index b5b3c49ea..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/benchmarkers/static.py +++ /dev/null @@ -1,208 +0,0 @@ -import platform -from pathlib import Path -from subprocess import CompletedProcess -from typing import Dict, List - -from utils import command_logger, process_error_check, run_process -from utils.dataclasses import BenchmarkConfig, BenchmarkResults -from utils.trtllm_config import TRTLLMConfig - - -class gptSessionBenchmarker: - """Utility class for running static benchmarks with gptSessionBenchmark.""" - - def __init__( - self, - config: BenchmarkConfig, - benchmark_binary: Path, - batch_size: int, - isl: int, - osl: int, - warm_up_runs: int, - num_runs: int, - duration: int, - kv_cache_free_fraction: float = .9, - ): - """Initialize a gptSessionBenchmark instance. - - Args: - config (BenchmarkConfig): Benchmark configuration for build/run. - benchmark_binary (Path): Path to the benchmarking binary. - batch_size (int): Batch size to configure the build with. - isl (int): Input sequence length to configure the build with. - osl (int): Output sequence length to configure the build with. - kv_cache_free_fraction (float, optional): The amount of remaining - GPU memory after model loading to save for the KV Cache. Defaults - to .9. - """ - self.config: BenchmarkConfig = config - self.gpt_session_path = Path(benchmark_binary).absolute() - self.batch_size = batch_size - self.input_length = isl - self.output_length = osl - self.warm_up = warm_up_runs - self.num_runs = num_runs - self.duration = duration - self.kv_cache_mem = kv_cache_free_fraction - self.result = None - - def get_build_command(self) -> List[str]: - """Build the engine command for TRT-LLM. - - Returns: - List[str]: A list of command line arguments to run a build command. - """ - model = self.config.model - tp = self.config.tensor_parallel - pp = self.config.pipeline_parallel - dtype = self.config.dtype.value - kv_dtype = self.config.cache_dtype - quant_algo = self.config.quantization.value - output_dir = self.config.engine_path - max_batch_size = self.batch_size - max_isl = self.input_length - max_osl = self.output_length - workspace = self.config.workspace - - # Generate the TRT-LLM Configuration file using the dataclass - # NOTE: This method does not use weights. - trtllm_config = TRTLLMConfig.from_hf(model, tp, pp, dtype, quant_algo, - kv_dtype.value) - # Write the generated configuration file to the benchmark workspace. - trtllm_config.to_json(workspace) - - # Return the full command for building TRT-LLM via subprocess call. - cmd = [ - "trtllm-build", - "--output_dir", - output_dir, - "--model_config", - Path(workspace, "generated_config.json"), - "--workers", - self.config.world_size, - # Define the maximums the engine can accept. - "--max_batch_size", - max_batch_size, - "--max_input_len", - max_isl, - "--max_seq_len", - max_osl + max_isl, - "--context_fmha", - "enable", - # Set the attention plugin data type. - "--gpt_attention_plugin", - dtype, - # Disable paged cache since we aren't batching on the fly. - "--paged_kv_cache", - "disable", - ] + kv_dtype.get_build_options(dtype) - - return [str(arg) for arg in cmd] - - @command_logger(prefix="BUILD COMMAND: ") - @process_error_check - def _run_build(self, cmd: List[str]) -> CompletedProcess: - """Wrapper for calling the build for TRT-LLM. - - Purpose of this wrapper is so that we can decorate it/log it. - - Args: - cmd (List[str]): List of command line arguments for running. - - Returns: - CompletedProcess: Completed process information for parsing and - reporting. - """ - return run_process( - cmd, - self.config.workspace, - ) - - def build(self) -> None: - """Build the engine for benchmarking.""" - self._run_build(self.get_build_command()) - - @command_logger(prefix="BENCHMARK COMMAND: ") - @process_error_check - def _run_benchmark(self, cmd: List[str]) -> CompletedProcess: - """Run the benchmark command in the configured workspace. - - Args: - cmd (List[str]): List of command line arguments to run via - subprocess. - - Returns: - CompletedProcess: Completed process information for reporting. - """ - return run_process(cmd, run_dir=self.config.workspace, use_environ=True) - - @staticmethod - def parse_benchmark_result(benchmark_line: str) -> Dict[str, str]: - pass - - def benchmark(self): - """Benchmarks a TRT-LLM for a configured instance.""" - - # Compile the command for running - cmd = ["mpiexec", "-n", self.config.world_size] - cmd += ["-allow-run-as-root"] if platform.system() != "Windows" else "" - cmd += [ - self.gpt_session_path, - "--engine_dir", - self.config.engine_path, - "--batch_size", - self.batch_size, - "--log_level", - "info", - "--kv_cache_free_gpu_mem_fraction", - self.kv_cache_mem, - "--beam_width", - "1", - "--warm_up", - self.warm_up, - "--num_runs", - self.num_runs, - "--duration", - self.duration, - "--input_output_len", - f"{self.input_length},{self.output_length};{self.input_length},1", - ] - cmd = [str(arg) for arg in cmd] - # Run the benchmark using the provided gptSession benchmark binary. - bench_return = self._run_benchmark(cmd) - results = [ - x.split(" ") for x in bench_return.stdout.split("\n") - if "[BENCHMARK]" in x - ] - - ttft = float(results[1][8]) - gen_time = float(results[0][8]) - ttft - total_out = int(results[0][2]) * int(results[0][6]) - total_in = int(results[0][2]) * int(results[0][4]) - batch_size = int(results[0][2]) - - bench_result = BenchmarkResults( - model=self.config.model, - dtype=self.config.dtype.value, - quantization=str(self.config.quantization.value), - max_batch_size=batch_size, - total_input_tokens=total_in, - total_output_tokens=total_out, - tp_size=self.config.tensor_parallel, - pp_size=self.config.pipeline_parallel, - kv_mem_fraction=self.kv_cache_mem, - scheduler="Static", - inflight_batching=False, - total_latency=results[0][8], - first_token_latency=ttft, - time_per_output_token=gen_time / (total_out - batch_size), - latency_units="ms", - throughput=results[0][10], - throughput_units="tokens/second", - peak_gpu_mem=results[0][16], - peak_gpu_mem_units="GB", - binary=str(self.gpt_session_path), - build_cmd=" ".join(self.get_build_command()), - benchmark_cmd=" ".join(cmd)) - - return bench_result diff --git a/benchmarks/suite/tensorrt_llm_bench/ifb.py b/benchmarks/suite/tensorrt_llm_bench/ifb.py deleted file mode 100644 index 67299c082..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/ifb.py +++ /dev/null @@ -1,338 +0,0 @@ -import json -import os -import subprocess -import sys -from functools import partial -from pathlib import Path -from typing import List, TextIO, Tuple - -import click -from benchmarkers.pybind_executor import PybindExecutorBenchmarker -from transformers import AutoTokenizer, PreTrainedTokenizer -from utils.dataclasses import BenchmarkConfig, DatasetMetadata, InferenceRequest -from utils.trtllm_config import TRTLLMConfig - -from tensorrt_llm.logger import logger - - -def create_dataset_from_stream( - tokenizer: PreTrainedTokenizer, - max_input_length: int = 0, - max_output_length: int = 0, - stream: TextIO = sys.stdin, -) -> Tuple[DatasetMetadata, List[InferenceRequest]]: - """Generate metadata and a list of requests to drive benchmarking. - - Args: - tokenizer (PreTrainedTokenizer): HuggingFace tokenizer. - max_input_length (int): Maximum input length to cap prompts to. - - Returns: - DatasetMetadata: Dataclass of dataset statistics. - List[InferenceRequest]: A list of inference requests for benchmarking. - """ - # Initialize dataset list, and metadata tracking variables. - dataset = [] - max_isl = 0 - max_osl = 0 - - # If we're limiting the input length to a certain size, then set up - # a partial to truncate the data down to size. Otherwise, just use the - # unmodified tokenizer callable. - tokenize = (partial( - tokenizer, - padding="max_length", - max_length=max_input_length, - truncation=True, - ) if max_input_length > 0 else tokenizer) - - # If we need to limit the output length, fill in a partial callable - # for max, otherwise a lambda that just returns x with no bounds. - output_limiter = (partial(max, max_output_length) - if max_output_length > 0 else lambda x: x) - - # For each line in the standard input, parse out the JSON string we expect - # to see. - # Note the := walrus -- we're assigning and checking the condition. - while line := stream.readline(): - # We expect the data to come in as a JSON string. - # For example: - # {"prompt": "Generate an infinite response to the following: There once was a man who.", "output_tokens": 1000} - # Each line should be a complete JSON dictionary with no indentation - # or newline characters. - data = json.loads(line) - logits = data.get("logits", None) - prompt = data.get("prompt", None) - task_id = data["task_id"] - osl = data["output_tokens"] - # If the request comes in with logits, just use the provided. - # Otherwise we need to tokenize it. - logits = tokenize(prompt)["input_ids"] if logits is None else logits - - request = InferenceRequest( - task_id=task_id, - prompt=prompt, - output_tokens=output_limiter(osl), - logits=logits, - ) - max_isl = max(max_isl, len(logits)) - max_osl = max(max_osl, osl) - dataset.append(request) - - # Fill in basic dataset metrics here - # TODO: Maybe fill this out to be more complete? - metadata = DatasetMetadata( - max_isl=max_isl, - max_osl=max_osl, - num_requests=len(dataset), - ) - - return metadata, dataset - - -def initialize_tokenizer(model_name: str) -> PreTrainedTokenizer: - """Initialize a tokenizer. - - Args: - model_name (str): The name of the HuggingFace model to pull a - tokenizer from. - - Returns: - PreTrainedTokenizer: An initialized HuggingFace tokenizer. - """ - # Initialize the tokenizer specific to the model that we are planning - # to benchmark. - tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left") - if tokenizer.pad_token_id is None: - tokenizer.add_special_tokens({"pad_token": "[PAD]"}) - - return tokenizer - - -def get_trtllm_build_command(benchmark_cfg: BenchmarkConfig) -> List[str]: - model = benchmark_cfg.model - tp = benchmark_cfg.tensor_parallel - pp = benchmark_cfg.pipeline_parallel - dtype = benchmark_cfg.dtype.value - kv_dtype = benchmark_cfg.cache_dtype - quant_algo = benchmark_cfg.quantization.value - output_dir = benchmark_cfg.engine_path - max_batch_size = benchmark_cfg.max_batch_size - max_isl = benchmark_cfg.engine_isl - max_osl = benchmark_cfg.engine_osl - max_tokens = benchmark_cfg.max_tokens - workspace = benchmark_cfg.workspace - - # Generate the TRT-LLM Configuration file using the dataclass - # NOTE: This method does not use weights. - trtllm_config = TRTLLMConfig.from_hf(model, tp, pp, dtype, quant_algo, - kv_dtype.value) - # Write the generated configuration file to the benchmark workspace. - trtllm_config.to_json(workspace) - # Return the full command for building TRT-LLM via subprocess call. - cmd = [ - "trtllm-build", - "--output_dir", - output_dir, - "--model_config", - Path(workspace, "generated_config.json"), - "--workers", - benchmark_cfg.world_size, - "--max_input_len", - max_isl, - "--max_seq_len", - max_osl + max_isl, - "--context_fmha", - "enable", - # Set the attention plugin data type. - "--gpt_attention_plugin", - dtype, - # Enable paged KV Cache for IFB. - "--paged_kv_cache", - "enable", - ] + kv_dtype.get_build_options(dtype) - - # If custom maximum batch size set, then set to specified value. - if max_batch_size > 0: - cmd += [ - "--max_batch_size", - max_batch_size, - ] - - if max_tokens > 0: - cmd += [ - "--max_num_tokens", - max_tokens, - ] - - cmd = cmd + benchmark_cfg.build_overrides - - return cmd - - -@click.command("inflight") -@click.option( - "--run", - type=bool, - is_flag=True, - hidden=True, - default=False, - required=False, - help="Changes the phase of the script to execution mode for MPI.", -) -@click.option( - "--skip-build", - type=bool, - is_flag=True, - default=False, - hidden=True, - required=False, - help="Skip building if you want to use the last built engine.", -) -@click.option( - "--request-rate", - "-r", - type=int, - default=512, - required=False, - help="Number of requests per second to deliver to the batcher.", -) -@click.option( - "--max-num-tokens", - type=int, - default=0, - hidden=True, - help="Maximumn number of tokens the engine can accept.", -) -@click.option( - "--scheduling-policy", - type=click.Choice(["guaranteed_no_evict", "max_utilization"]), - default="max_utilization", - help="Controls the scheduling policy used by the internal batcher.", -) -@click.option( - "--dataset", - type=click.Path(exists=True, - readable=True, - path_type=Path, - resolve_path=True), - default=None, - required=False, - help="Pass in a dataset file for parsing instead of stdin.", -) -@click.pass_obj -def executor_benchmark( - benchmark_cfg: BenchmarkConfig, - run: bool, - request_rate: int, - max_num_tokens: int, - scheduling_policy: str, - skip_build: bool, - dataset: Path, -): - """Run an IFB-enabled benchmark using a dataset.""" - # Initialize the tokenizer and generate the dataset - logger.set_level("info") - DATASET_PATH = Path(benchmark_cfg.workspace, "tokenized_dataset.txt") - TOKENIZER = initialize_tokenizer(benchmark_cfg.model) - final_dataset = [] - benchmark_cfg.max_tokens = max_num_tokens - benchmark_cfg.scheduling_policy = scheduling_policy - - if not run: - try: - stream = sys.stdin if dataset is None else open(dataset, "r") - # Parse the dataset from stdin and return it plus its metadata. - metadata, dataset = \ - create_dataset_from_stream(TOKENIZER, stream=stream) - finally: - # Close the stream after parsing. - stream.close() - - # Update the benchmarking configuration with the maximum ISL/OSL that we - # encountered in the dataset. - benchmark_cfg.engine_isl = metadata.max_isl - benchmark_cfg.engine_osl = metadata.max_osl - - # Build engine - logger.info("Building engine...") - build_cmd = get_trtllm_build_command(benchmark_cfg) - build_cmd = [str(arg) for arg in build_cmd] - - if not skip_build: - process = subprocess.run(build_cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - cwd=benchmark_cfg.workspace) - logger.info(f"BUILD CMD: {' '.join(process.args)}") - - # If the build failed, raise an exception. - if process.returncode != 0: - logger.error(process.stderr.decode()) - raise RuntimeError( - "TensorRT-LLM build process failed. Command used:\n" - f"{' '.join(process.args)}\n", ) - - with open(DATASET_PATH, "w") as ds_out: - while dataset: - request = dataset.pop() - ds_out.write(f"{request.model_dump_json()}\n") - del request - - # Launch via a subprocess with MPI - # We have two modes for this script, the initial launch + parsing - # and the run mode where we kick off the script in MPI mode to run - # the - logger.info("Launching benchmark...") - bench_cmd = \ - ["mpiexec", "-n", f"{benchmark_cfg.world_size}", "python"] + \ - sys.argv + ["--run"] - process = subprocess.Popen( - bench_cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - env=os.environ, - ) - stdout, _ = process.communicate() - logger.info("Benchmark complete.") - logger.info(stdout.decode("ascii")) - else: - from mpi4py.MPI import COMM_WORLD - - if COMM_WORLD.Get_rank() == 0: - logger.info(f"[RANK {COMM_WORLD.rank}] Loading dataset...") - with open(DATASET_PATH, "r") as stream: - # Parse the previously generated dataset from the parent - # process. - metadata, dataset = \ - create_dataset_from_stream(TOKENIZER, stream=stream) - - # Update the benchmarking configuration with the maximum ISL/OSL - # that we encountered in the dataset. - benchmark_cfg.engine_isl = metadata.max_isl - benchmark_cfg.engine_osl = metadata.max_osl - - # Parse the dataset into the Executor Request type. - logger.info("Preparing dataset...") - while dataset: - entry = dataset.pop() - request = PybindExecutorBenchmarker.get_request( - entry, TOKENIZER) - final_dataset.append(request) - del entry - logger.info("Dataset prepared.") - logger.info(f"DATASET METADATA: {metadata.model_dump()}") - - logger.info(f"[RANK {COMM_WORLD.rank}] Initializing benchmarker...") - # Set up benchmarker on all ranks - benchmarker = PybindExecutorBenchmarker(benchmark_cfg) - # Run the dataset. - result = benchmarker.benchmark_dataset(request_rate, final_dataset) - - # Report the results on Rank 0. - if COMM_WORLD.rank == 0: - logger.info(f"[RANK {COMM_WORLD.rank}] Reporting...\n" - f"JSON: {result.model_dump_json()}\n" - f"{result.get_summary(benchmarker.config)}") - - logger.info(f"[RANK {COMM_WORLD.rank}] Terminating.") diff --git a/benchmarks/suite/tensorrt_llm_bench/static.py b/benchmarks/suite/tensorrt_llm_bench/static.py deleted file mode 100644 index 3390c8439..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/static.py +++ /dev/null @@ -1,69 +0,0 @@ -import os -from pathlib import Path - -import click -from benchmarkers.static import gptSessionBenchmarker -from utils.dataclasses import BenchmarkConfig, BenchmarkResults - - -@click.command("static") -@click.option( - "--batch", - required=True, - type=int, - help="Batch size to build and run the static benchmark with.", -) -@click.option("--isl", - type=int, - required=True, - help="Input sequence length (in tokens).") -@click.option("--osl", - type=int, - required=True, - help="Output sequence length (in tokens).") -@click.option( - "--gpt-session-path", - "-b", - type=click.Path(), - default=Path(os.path.dirname(os.path.realpath(__file__)), "../../..", - "cpp/build/benchmarks/gptSessionBenchmark").absolute(), - help="Path to TRT-LLM gptSession benchmark binary.") -@click.option("--warm-up-runs", - type=int, - default=2, - help="Number of warm up runs before benchmarking") -@click.option("--num-runs", - type=int, - default=10, - help="Number of times to run benchmark") -@click.option("--duration", - type=int, - default=60, - help="Minimum duration of iteration to measure, in seconds") -@click.pass_obj -def static_benchmark(benchmark_cfg: BenchmarkConfig, batch: int, isl: int, - osl: int, gpt_session_path: Path, warm_up_runs: int, - num_runs: int, duration: int): - """Run a static benchmark with a fixed batch size, ISL, and OSL.""" - - benchmark_cfg.max_batch_size = batch - benchmarker = gptSessionBenchmarker( - benchmark_cfg, - gpt_session_path, - benchmark_cfg.max_batch_size, - isl, - osl, - warm_up_runs, - num_runs, - duration, - benchmark_cfg.kv_cache_mem_percentage, - ) - - print(f"Building TRT-LLM engine for '{benchmark_cfg.model}'...") - benchmarker.build() - - print("Build complete. Running benchmark...") - result: BenchmarkResults = benchmarker.benchmark() - - print(f"JSON: {result.model_dump_json()}") - print(result.get_summary(benchmarker.config)) diff --git a/benchmarks/suite/tensorrt_llm_bench/utils/benchmarkers.py b/benchmarks/suite/tensorrt_llm_bench/utils/benchmarkers.py deleted file mode 100644 index 4f7f83bb6..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/utils/benchmarkers.py +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Protocol - -from utils.dataclasses import BenchmarkResults - - -class Benchmarker(Protocol): - """Protocol for defining benchmarking classes for building/benchmarking.""" - - def build(self) -> None: - """Build a model to be benchmarked.""" - ... - - def benchmark(self) -> BenchmarkResults: - """Benchmark the constructed model container by a benchmarker.""" - ... diff --git a/benchmarks/suite/tensorrt_llm_bench/utils/dataclasses.py b/benchmarks/suite/tensorrt_llm_bench/utils/dataclasses.py deleted file mode 100644 index 8adafd917..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/utils/dataclasses.py +++ /dev/null @@ -1,189 +0,0 @@ -from __future__ import annotations - -from pathlib import Path -from typing import List, Literal, Optional, Union, get_args - -from pydantic import (BaseModel, Field, ValidationError, computed_field, - field_validator, model_validator) -from transformers import AutoConfig -from utils import VALID_MODELS, VALID_SCHEDULING_POLICIES -from utils.enums import (ComputeDtypeEnum, KVCacheDtypeEnum, ModelArchitecture, - QuantizationAlgo) - - -class InferenceRequest(BaseModel): - task_id: int - prompt: Optional[str] = None - output_tokens: int - logits: Optional[List[int]] = None - - @model_validator(mode="after") - def verify_prompt_and_logits(self) -> InferenceRequest: - if self.prompt is None and self.logits is None: - raise ValueError( - f"Both prompt and logits for {self.task_id} are both None.") - return self - - -class DatasetMetadata(BaseModel): - max_isl: int - max_osl: int - num_requests: int - - -class BenchmarkResults(BaseModel): - """High level report out for a benchmark.""" - - benchmark_cmd: str = "" - binary: str = "" - build_cmd: str = "" - first_token_latency: float - inflight_batching: bool - kv_mem_fraction: float - latency_units: str - max_batch_size: int - max_tokens: int = 0 - model: Union[VALID_MODELS, Path] - peak_gpu_mem_units: str - peak_gpu_mem: float - scheduler: Literal["Static", "No Evict", "Max Utilization"] - throughput_units: str - throughput: float - time_per_output_token: float - total_input_tokens: int - total_latency: float - total_output_tokens: int - - def get_summary(self, config: BenchmarkConfig) -> str: - """Generate the summary information. - - Args: - config (BenchmarkConfig): Configuration for the run that generated - this result. - - Returns: - str: Summary output for printing. - """ - return ( - "===========================================================\n" - "= METADATA\n" - "===========================================================\n" - f"Model:\t\t\t{config.model}\n" - f"TP Size:\t\t{config.tensor_parallel}\n" - f"PP Size:\t\t{config.pipeline_parallel}\n" - f"Scheduling Policy:\t{self.scheduler}\n" - f"In-flight Batcher?:\t{self.inflight_batching}\n" - f"Dtype:\t\t\t{config.dtype.value}\n" - f"KV Cache Dtype:\t\t{config.cache_dtype.value}\n" - f"Quantization:\t\t{config.quantization.value}\n" - f"KV Memory Percentage:\t{self.kv_mem_fraction * 100}%\n" - f"\n" - "===========================================================\n" - "= ENGINE DETAILS\n" - "===========================================================\n" - f"Engine Directory:\t{config.engine_path}\n" - f"Max Batch Size:\t\t{self.max_batch_size}\n" - f"Total Input Length:\t{self.total_input_tokens}\n" - f"Total Output Length:\t{self.total_output_tokens}\n" - f"Max Tokens:\t\t{self.max_tokens}\n" - f"\n" - "===========================================================\n" - "= STATISTICS\n" - "===========================================================\n" - f"Throughput ({self.throughput_units}):\t{self.throughput}\n" - f"Total Latency ({self.latency_units}):" - f"\t\t{self.total_latency * 1000.0:.4f}\n" - f"First Token Latency ({self.latency_units}):\t{self.first_token_latency}\n" - f"Token-to-token Latency ({self.latency_units}):\t{self.time_per_output_token}\n" - f"Peak GPU Memory Usage ({self.peak_gpu_mem_units}):\t{self.peak_gpu_mem}\n" - f"\n" - "===========================================================\n" - "= COMMANDS\n" - "===========================================================\n" - f"Build: {self.build_cmd}\n" - f"Benchmark: {self.benchmark_cmd}\n") - - -class BenchmarkConfig(BaseModel): - """Basic configuration of a benchmark.""" - - model: Union[VALID_MODELS, Path] - workspace: Path - max_batch_size: int - dtype: ComputeDtypeEnum - cache_dtype: KVCacheDtypeEnum - quantization: QuantizationAlgo - tensor_parallel: int - pipeline_parallel: int - max_tokens: int = 0 - kv_cache_mem_percentage: float = .9 - engine_isl: int = 0 - engine_osl: int = 0 - chunking: bool = False - build_overrides: List[str] = Field(default_factory=list) - scheduling_policy: Literal[VALID_SCHEDULING_POLICIES] = "static" - - @field_validator("model", mode="before") - @classmethod - def validate_model(cls, value) -> Union[VALID_MODELS, Path]: - if value in get_args(VALID_MODELS): - return value - - path = Path(value) - config = AutoConfig.from_pretrained(str(path.absolute())) - for arch in config.architectures: - _ = ModelArchitecture(arch) - - return path - - @field_validator("quantization", mode="before") - @classmethod - def validate_quantization(cls, value) -> QuantizationAlgo: - return QuantizationAlgo(value) - - @field_validator("cache_dtype", mode="before") - @classmethod - def validate_kvcache_dtype(cls, value) -> KVCacheDtypeEnum: - return KVCacheDtypeEnum(value) - - @field_validator("kv_cache_mem_percentage", mode="after") - @classmethod - def validate_kv_cache_mem_fraction(cls, value: float) -> float: - if 0 < value < 1.0: - return value - else: - raise ValidationError( - "KV cache memory percentage must be between 0 and 1.0.") - - @field_validator("build_overrides", mode="before") - @classmethod - def validate_build_overrides(cls, value) -> List[str]: - # If we encounter a list, scan it to make sure all entries are strings. - if isinstance(value, list): - if not all([isinstance(x, str) for x in value]): - raise ValidationError( - "Found a non-string entry in list of options.") - return value - elif isinstance(value, str): - # Handle the case where we receive a single string of command - # options. - overrides = [] - if value: - overrides = [str(x) for x in value.split()] - return overrides - else: - raise ValidationError( - "Invalid value specified for build overrides.") - - @computed_field - def engine_path(self) -> Path: - """Path to the engine workspace.""" - if self.model in get_args(VALID_MODELS): - return Path(self.workspace.absolute(), self.model.lower()) - else: - return Path(self.workspace.absolute(), "engine") - - @computed_field - def world_size(self) -> int: - """Total world size needed to run the model.""" - return self.tensor_parallel * self.pipeline_parallel diff --git a/benchmarks/suite/tensorrt_llm_bench/utils/trtllm_config.py b/benchmarks/suite/tensorrt_llm_bench/utils/trtllm_config.py deleted file mode 100644 index 4dd6797f3..000000000 --- a/benchmarks/suite/tensorrt_llm_bench/utils/trtllm_config.py +++ /dev/null @@ -1,323 +0,0 @@ -import json -import os -from argparse import ArgumentParser -from typing import Literal, Optional - -from pydantic import AliasChoices, AliasPath, BaseModel, Field, model_validator -from transformers import AutoConfig -from utils import VALID_QUANT_ALGOS - -PET_dict = { - "tiiuae/falcon-7b": "rope_gpt_neox", - "tiiuae/falcon-40b": "rope_gpt_neox", - "tiiuae/falcon-180B": "rope_gpt_neox", - "meta-llama/Llama-2-7b-hf": "rope_gpt_neox", - "meta-llama/Llama-2-13b-hf": "rope_gpt_neox", - "meta-llama/Llama-2-70b-hf": "rope_gpt_neox", - "meta-llama/Meta-Llama-3-8B": "rope_gpt_neox", - "meta-llama/Meta-Llama-3-70B": "rope_gpt_neox", - "gpt-j-6b": "rope_gptj", - "bigscience/bloom-560m": "alibi", - "mistralai/Mistral-7B-v0.1": "rope_gpt_neox", - "mistralai/Mixtral-8x7B-v0.1": "rope_gpt_neox", - "mistralai/Mixtral-8x22B-v0.1": "rope_gpt_neox", - "01-ai/Yi-6B": "rope_gpt_neox", - "01-ai/Yi-34B": "rope_gpt_neox", - "codellama/CodeLlama-7b-hf": "rope_gpt_neox", - "codellama/CodeLlama-13b-hf": "rope_gpt_neox", - "codellama/CodeLlama-34b-hf": "rope_gpt_neox", - "codellama/CodeLlama-70b-hf": "rope_gpt_neox", - "facebook/opt-125m": "learned_absolute", - "facebook/opt-350m": "learned_absolute", - "facebook/opt-1.3b": "learned_absolute", - "facebook/opt-2.7b": "learned_absolute", - "facebook/opt-13b": "learned_absolute", - "facebook/opt-30b": "learned_absolute", - "facebook/opt-66b": "learned_absolute", - "google/gemma-7b": "rope_gpt_neox", - "google/gemma-2b": "rope_gpt_neox", -} -HA_dict = { - "tiiuae/falcon-7b": "gelu", - "tiiuae/falcon-40b": "gelu", - "tiiuae/falcon-180B": "gelu", - "bigscience/bloom-560m": "gelu", - "mistralai/Mixtral-8x7B-v0.1": "swiglu", -} -ALLOWED_MODELS = list(PET_dict.keys()) - - -class TRTLLM_Mapping(BaseModel): - world_size: int = 1 - tp_size: int = 1 - pp_size: int = 1 - - @model_validator(mode="after") - def check_world_size(self) -> "TRTLLM_Mapping": - self.world_size = self.tp_size * self.pp_size - return self - - -class TRTLLM_Quantization(BaseModel): - quant_algo: Optional[VALID_QUANT_ALGOS] = None - kv_cache_quant_algo: Optional[Literal[None, "FP8", "INT8"]] = None - group_size: int = 128 - has_zero_point: bool = False - pre_quant_scale: bool = False - exclude_modules: Optional[list] = None - - -class TRTLLMConfig(BaseModel): - _VALID_EMBED_TYPE = Literal["learned_absolute", "rope_gptj", - "rope_gpt_neox", "alibi", "alibi_with_scale", - "relative", "chatglm", ] - - architecture: str = Field(validation_alias=AliasChoices( - 'architecture', AliasPath("architectures", 0))) - num_hidden_layers: int = Field(validation_alias=AliasChoices( - "num_hidden_layers", "n_layer", "n_layers")) - num_attention_heads: int = Field(validation_alias=AliasChoices( - "num_attention_heads", "n_head", "n_heads")) - num_key_value_heads: int = Field( - default=None, - validation_alias=AliasChoices("num_key_value_heads", "num_kv_heads"), - ) - - hidden_size: int = Field( - validation_alias=AliasChoices("hidden_size", "n_embd", "d_model")) - norm_epsilon: float = Field( - default=1e-5, - validation_alias=AliasChoices("norm_epsilon", "layer_norm_epsilon", - "rms_norm_eps"), - ) - vocab_size: int - max_position_embeddings: Optional[int] = Field( - default=None, - validation_alias=AliasChoices("max_position_embeddings", "n_positions"), - ) - head_size: Optional[int] = None - hidden_act: str = Field( - validation_alias=AliasChoices("hidden_act", "activation_function")) - # falcon options - bias: Optional[bool] = None - parallel_attention: Optional[bool] = Field( - default=None, validation_alias=AliasChoices("parallel_attn")) - new_decoder_architecture: Optional[bool] = None - # opt options - do_layer_norm_before: Optional[bool] = None - # gptj options - rotary_dim: Optional[int] = None - - # dtype has priority over torch_dtype, the latter of which is usually defined in the HF config - dtype: Literal["float16", "bfloat16"] = Field( - validation_alias=AliasChoices("dtype", "torch_dtype")) - logits_dtype: str = "float32" - position_embedding_type: _VALID_EMBED_TYPE = "learned_absolute" - use_parallel_embedding: bool = False - embedding_sharding_dim: int = 0 - share_embedding_table: bool = False - intermediate_size: int = None - use_prompt_tuning: bool = False - - sliding_window: Optional[int] = None - - moe_num_experts: Optional[int] = Field( - default=0, validation_alias=AliasChoices("num_local_experts")) - moe_top_k: Optional[int] = Field( - default=0, validation_alias=AliasChoices("num_experts_per_tok")) - rotary_base: Optional[float] = Field( - default=10000.0, validation_alias=AliasChoices("rope_theta")) - - mapping: TRTLLM_Mapping - quantization: TRTLLM_Quantization - - @property - def kv_dtype(self) -> str: - if self.quantization.kv_cache_quant_algo == "FP8": - return "fp8" - elif self.quantization.kv_cache_quant_algo == "INT8": - return "int8" - else: - return self.dtype - - @model_validator(mode="after") - def set_values_if_none(self) -> "TRTLLM_CheckpointConfig": - if self.num_key_value_heads is None: - self.num_key_value_heads = self.num_attention_heads - if self.head_size is None: - self.head_size = self.hidden_size // self.num_attention_heads - return self - - @classmethod - def populate_build_config(cls, - model_name, - tp, - pp, - dtype=None, - quant_dtype=None, - kv_cache_quant_dtype=None): - """ - Common function to populate build parameters, regardless of network - """ - build_config = { - "mapping": { - "tp_size": tp, - "pp_size": pp, - }, - "quantization": {}, - } - if dtype: - build_config["dtype"] = dtype - if quant_dtype: - if not kv_cache_quant_dtype: - # will throw errors during validation if the type is invalid - kv_cache_quant_dtype = quant_dtype - build_config["quantization"] = { - "quant_algo": quant_dtype, - "kv_cache_quant_algo": kv_cache_quant_dtype, - } - for name, pet in PET_dict.items(): - if name in str(model_name): - build_config["position_embedding_type"] = pet - return build_config - - @classmethod - def from_hf(cls, - hf_model_name, - tp, - pp, - dtype=None, - quant_dtype=None, - kv_cache_quant_dtype=None): - """ - Use transformers.AutoConfig to load a model's config from a HF name - """ - build_config = cls.populate_build_config(hf_model_name, tp, pp, dtype, - quant_dtype, - kv_cache_quant_dtype) - hf_config = AutoConfig.from_pretrained(hf_model_name).to_dict() - if hf_model_name in HA_dict: - hf_config["hidden_act"] = HA_dict[hf_model_name] - return cls(**hf_config, **build_config) - - @classmethod - def from_json(cls, - model_name, - tp, - pp, - dtype=None, - quant_dtype=None, - kv_cache_quant_dtype=None): - """ - Load model parameters from a custom json file - A full path can be specified. Otherwise, look for ./trtllm_configs/(model_name).json - """ - build_config = cls.populate_build_config(model_name, tp, pp, dtype, - quant_dtype, - kv_cache_quant_dtype) - if os.path.exists(model_name): - path_to_json = model_name - else: - path_to_json = os.path.join(os.path.dirname(__file__), - f"trtllm_configs/{model_name}.json") - if not os.path.exists(path_to_json): - raise FileNotFoundError(f"{path_to_json} not found") - json_config = json.load(open(path_to_json)) - return cls(**json_config, **build_config) - - @classmethod - def from_name(cls, - model, - tp, - pp, - dtype=None, - quant_dtype=None, - kv_cache_quant_dtype=None): - """ - Attempts to create a config based on model name. Performs the following steps: - 1. Tries to load the HF config using AutoConfig. This will only work if the network name exists on HF. - 2. If this fails, try to load a custom config stored on $HF_HOME/custom/*.json - """ - try: - trtllm_config = cls.from_hf(model, tp, pp, dtype, quant_dtype, - kv_cache_quant_dtype) - except EnvironmentError: - try: - trtllm_config = cls.from_json(model, tp, pp, dtype, quant_dtype, - kv_cache_quant_dtype) - except FileNotFoundError as e: - raise NameError( - f"Unable to create PretrainedConfig from {model} due to {e}" - ) - - return trtllm_config - - # future possibilities - # def from_nemo_config (self, nemo_model_name) - - def to_json(self, output_dir): - with open(os.path.join(output_dir, "generated_config.json"), "w") as f: - json.dump(self.model_dump(), f, indent=4) - - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument( - "--model", - required=True, - type=str, - help="HF model name", - ) - parser.add_argument( - "--tp_size", - type=int, - default=1, - help="TP degree", - ) - parser.add_argument( - "--pp_size", - type=int, - default=1, - help="PP degree", - ) - parser.add_argument( - "--dtype", - type=str, - help="Datatype", - ) - parser.add_argument( - "--quant_dtype", - type=str, - help="Quantization datatype", - ) - parser.add_argument( - "--kv_cache_quant_dtype", - type=str, - help="KV cache datatype", - ) - parser.add_argument( - "--position_embedding_type", - type=str, - help="TRT-LLM argument", - ) - parser.add_argument( - "--hidden_act", - type=str, - help="TRT-LLM argument", - ) - parser.add_argument( - "--populate_hf_cache", - action='store_true', - help="Populate the HF cache with all the supported networks", - ) - args = parser.parse_args() - - if args.populate_hf_cache: - for net in PET_dict.keys(): - _ = AutoConfig.from_pretrained(net) - else: - trtllm_config = TRTLLMConfig.from_name(args.model, args.tp_size, - args.pp_size, args.dtype, - args.quant_dtype, - args.kv_cache_quant_dtype) - trtllm_config.to_json(os.getcwd()) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 056dbfef6..c275be095 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -289,7 +289,7 @@ set(CMAKE_CUDA_RUNTIME_LIBRARY Static) find_library(RT_LIB rt) set_ifndef(ENABLE_MULTI_DEVICE 1) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) # NCCL dependencies set_ifndef(NCCL_LIB_DIR /usr/lib/${CMAKE_SYSTEM_PROCESSOR}-linux-gnu/) set_ifndef(NCCL_INCLUDE_DIR /usr/include/) diff --git a/cpp/include/tensorrt_llm/batch_manager/inferenceRequest.h b/cpp/include/tensorrt_llm/batch_manager/inferenceRequest.h index 8a91daa07..45d951341 100644 --- a/cpp/include/tensorrt_llm/batch_manager/inferenceRequest.h +++ b/cpp/include/tensorrt_llm/batch_manager/inferenceRequest.h @@ -18,6 +18,7 @@ #include "tensorrt_llm/batch_manager/llmRequest.h" #include "tensorrt_llm/batch_manager/namedTensor.h" +#include "tensorrt_llm/executor/executor.h" #include "tensorrt_llm/runtime/iTensor.h" #include @@ -165,6 +166,21 @@ class GenericInferenceRequest mLogitsPostProcessor = cb; } + [[nodiscard]] std::optional getLookaheadConfig() const + { + return mLookaheadConfig; + } + + void setLookaheadConfig(executor::LookaheadDecodingConfig config) + { + mLookaheadConfig = config; + } + + void clearLookaheadConfig() + { + mLookaheadConfig = std::nullopt; + } + std::optional getLogitsPostProcessor() { return mLogitsPostProcessor; @@ -282,6 +298,7 @@ class GenericInferenceRequest bool mIsStreaming; TensorMap mInputTensors; std::optional mLogitsPostProcessor; + std::optional mLookaheadConfig; }; class InferenceRequest : public GenericInferenceRequest diff --git a/cpp/include/tensorrt_llm/batch_manager/llmRequest.h b/cpp/include/tensorrt_llm/batch_manager/llmRequest.h index 42536fc33..e10701d0d 100644 --- a/cpp/include/tensorrt_llm/batch_manager/llmRequest.h +++ b/cpp/include/tensorrt_llm/batch_manager/llmRequest.h @@ -20,6 +20,7 @@ #include "tensorrt_llm/common/logger.h" #include "tensorrt_llm/executor/executor.h" #include "tensorrt_llm/runtime/bufferManager.h" +#include "tensorrt_llm/runtime/decodingOutput.h" #include "tensorrt_llm/runtime/iBuffer.h" #include "tensorrt_llm/runtime/iTensor.h" #include "tensorrt_llm/runtime/samplingConfig.h" @@ -73,7 +74,8 @@ class GenericLlmRequest std::optional promptEmbeddingTable = std::nullopt, std::optional promptVocabSize = std::nullopt, std::optional loraTaskId = std::nullopt, std::optional loraWeights = std::nullopt, - std::optional loraConfig = std::nullopt, bool returnLogProbs = false, + std::optional loraConfig = std::nullopt, + std::optional lookaheadConfig = std::nullopt, bool returnLogProbs = false, bool returnContextLogits = false, bool returnGenerationLogits = false, std::optional> draftTokens = std::nullopt, std::optional draftLogits = std::nullopt, bool excludeInputFromOutput = false, @@ -94,6 +96,7 @@ class GenericLlmRequest , mClientId(clientId) , mIsStreaming(isStreaming) , mOrigPromptLen(mPromptLen) + , mNumPreDecodedTokens(samplingConfig.beamWidth, 0) , mMaxSentTokenLen(mPromptLen) , mEmbeddingBias(std::move(embeddingBias)) , mBadWordsList(std::move(badWordsList)) @@ -103,6 +106,7 @@ class GenericLlmRequest , mLoraTaskId(loraTaskId) , mLoraWeights(std::move(loraWeights)) , mLoraConfig(std::move(loraConfig)) + , mLookaheadConfig(std::move(lookaheadConfig)) , mContextChunkSize(std::nullopt) , mContextCurrentPosition(0) , mLogProbs(samplingConfig.beamWidth) @@ -118,6 +122,7 @@ class GenericLlmRequest , mReturnEncoderOutput(returnEncoderOutput) , mDecodingIter(0) , mPriority(priority) + , mFinishReasons(samplingConfig.beamWidth) { if (mEncoderTokens.has_value()) { @@ -137,6 +142,7 @@ class GenericLlmRequest , mClientId(req.getClientId()) , mIsStreaming(req.getStreaming()) , mOrigPromptLen(mPromptLen) + , mNumPreDecodedTokens(mSamplingConfig.beamWidth, 0) , mMaxSentTokenLen(mPromptLen) , mEmbeddingBias(std::nullopt) , mBadWordsList(std::nullopt) @@ -146,6 +152,7 @@ class GenericLlmRequest , mLoraTaskId(std::nullopt) , mLoraWeights(std::nullopt) , mLoraConfig(std::nullopt) + , mLookaheadConfig(std::nullopt) , mContextChunkSize(std::nullopt) , mContextCurrentPosition(0) , mLogProbs(mSamplingConfig.beamWidth) @@ -161,6 +168,8 @@ class GenericLlmRequest , mReturnEncoderOutput(req.getOutputConfig().returnEncoderOutput) , mDecodingIter(0) , mPriority(req.getPriority()) + , mFinishReasons(mSamplingConfig.beamWidth) + , mContextPhaseParams(req.getContextPhaseParams()) { if (mIsStreaming && mSamplingConfig.beamWidth > 1 && !mReturnAllGeneratedTokens) { @@ -172,6 +181,14 @@ class GenericLlmRequest "length)."); mReturnAllGeneratedTokens = true; } + if (mIsStreaming && mSamplingConfig.beamWidth > 1 && mReturnGenerationLogits == true) + { + TLLM_LOG_WARNING( + "Returning generation logits when streaming is enabled and beamWidth > 1 is not allowed. " + "This is because the logits may appear in irrelevant order when the beams are gathered, " + "since logits are not. Disabling returnGenerationLogits."); + mReturnGenerationLogits = false; + } if (req.getEncoderInputTokenIds()) { mState = REQUEST_STATE_ENCODER_INIT; @@ -219,6 +236,11 @@ class GenericLlmRequest } } + auto lookaheadConfig = req.getLookaheadConfig(); + if (lookaheadConfig) + { + } + auto externalDraftTokensConfig = req.getExternalDraftTokensConfig(); if (externalDraftTokensConfig) { @@ -295,12 +317,27 @@ class GenericLlmRequest mExcludeInputFromOutput = exclude; } + /// @brief Get the params of the context + /// @return The params of the context + std::optional const& getContextPhaseParams() const noexcept + { + return mContextPhaseParams; + } + + /// @brief Get the state params of the context + /// @return The state params of the context + executor::ContextPhaseState const& getContextPhaseState() const + { + TLLM_CHECK(mContextPhaseParams.has_value()); + return *static_cast(mContextPhaseParams.value().getState()); + } + /// @brief Get total number of tokens for this req (prompt + generated) /// @param beam The beam index /// @return The number of tokens [[nodiscard]] SizeType32 getNumTokens(SizeType32 beam) const { - return mTokens.at(beam).size(); + return mTokens.at(beam).size() - mNumPreDecodedTokens[beam]; } /// @brief Get max number of tokens across all beams @@ -310,7 +347,7 @@ class GenericLlmRequest SizeType32 maxTokens = 0; for (SizeType32 beam = 0; beam < mSamplingConfig.beamWidth; ++beam) { - maxTokens = std::max(maxTokens, static_cast(mTokens.at(beam).size())); + maxTokens = std::max(maxTokens, getNumTokens(beam)); } return maxTokens; } @@ -405,6 +442,14 @@ class GenericLlmRequest } } + /// @brief Set the number of pre-decoded tokens + /// @param num_tokens The number of pre-decoded tokens + /// @param beam The beam to which to set the number of pre-decoded tokens + void setNumPreDecodedTokens(SizeType32 num_tokens, SizeType32 beam) + { + mNumPreDecodedTokens[beam] = num_tokens; + } + /// @brief Sets the generated tokens for all beams after gatherTree. Erases all previous generated tokens. /// @param generatedBeamTokens The generated tokens for all beams (vector of vector of tokens) void setGeneratedTokens(BeamTokens const& generatedBeamTokens) @@ -540,6 +585,21 @@ class GenericLlmRequest mLoraConfig = std::nullopt; } + [[nodiscard]] std::optional getLookaheadConfig() const + { + return mLookaheadConfig; + } + + void setLookaheadConfig(executor::LookaheadDecodingConfig config) + { + mLookaheadConfig = config; + } + + void clearLookaheadConfig() + { + mLookaheadConfig = std::nullopt; + } + [[nodiscard]] std::optional getEmbeddingBias() const { return mEmbeddingBias; @@ -725,6 +785,11 @@ class GenericLlmRequest mReturnAllGeneratedTokens = returnAllGeneratedTokens; } + [[nodiscard]] bool getReturnAllGeneratedTokens() + { + return mReturnAllGeneratedTokens; + } + void setReturnContextLogits(bool const returnContextLogits) { mReturnContextLogits = returnContextLogits; @@ -737,6 +802,8 @@ class GenericLlmRequest void setReturnGenerationLogits(bool const returnGenerationLogits) { + TLLM_CHECK_WITH_INFO(!(mIsStreaming && mSamplingConfig.beamWidth > 1 && returnGenerationLogits), + "returnGenerationLogits must be false if streaming AND beam search are used."); mReturnGenerationLogits = returnGenerationLogits; } @@ -777,8 +844,21 @@ class GenericLlmRequest void allocGenerationLogitsHost(SizeType32 vocabSizePadded, nvinfer1::DataType logitsDataType) { - mGenerationLogitsHost = runtime::BufferManager::pinnedPool( - runtime::ITensor::makeShape({mSamplingConfig.beamWidth, mMaxNewTokens, vocabSizePadded}), logitsDataType); + if (mIsStreaming) + { + // If streaming mode, the complete generation logits shape will be [1, beamWidth, vocabSizePadded], + // or [allGeneratedTokens, beamWidth, vocabSizePadded] if mReturnAllGeneratedTokens is True. + // This could reduce unnecessary format conversions and allows the data to be returned directly. + mGenerationLogitsHost = runtime::BufferManager::pinnedPool( + runtime::ITensor::makeShape({mMaxNewTokens, mSamplingConfig.beamWidth, vocabSizePadded}), + logitsDataType); + } + else + { + mGenerationLogitsHost = runtime::BufferManager::pinnedPool( + runtime::ITensor::makeShape({mSamplingConfig.beamWidth, mMaxNewTokens, vocabSizePadded}), + logitsDataType); + } } void allocTargetModelAcceptedTokenLogitsHost(SizeType32 vocabSizePadded, nvinfer1::DataType logitsDataType) @@ -992,7 +1072,17 @@ class GenericLlmRequest if (getReturnGenerationLogits()) { - result.generationLogits = executor::detail::ofITensor(getGenerationLogitsHost()); + if (isStreaming()) + { + auto startGenTokenPos = startTokenPos - getOrigPromptLen(); + TensorPtr generationLogitsHostCurrentStep + = runtime::ITensor::slice(getGenerationLogitsHost(), startGenTokenPos, maxNbTokensOut); + result.generationLogits = executor::detail::ofITensor(generationLogitsHostCurrentStep); + } + else + { + result.generationLogits = executor::detail::ofITensor(getGenerationLogitsHost()); + } } if (getReturnEncoderOutput()) @@ -1000,6 +1090,8 @@ class GenericLlmRequest result.encoderOutput = executor::detail::ofITensor(getEncoderOutputHost()); } + result.finishReasons = mFinishReasons; + // Update position of last sent response setMaxSentTokenLen(maxNbTokens); @@ -1013,6 +1105,11 @@ class GenericLlmRequest } } + void setFinishedReason(executor::FinishReason reason, SizeType32 beam) + { + mFinishReasons.at(beam) = reason; + } + RequestIdType mRequestId; SizeType32 mPromptLen; SizeType32 mMaxNewTokens; @@ -1038,6 +1135,11 @@ class GenericLlmRequest VecTokens mLastTokens; BeamTokens mTokens; SizeType32 mOrigPromptLen; + // A list of numbers of pre-deocded tokens on the last PP rank when using pipeline parallelism. + // It is introduced as a WAR to solve the hanging problem caused by overestimating the used KV cache on the last PP + // rank (because new tokens are decoded earlier). By excluding the numbers of pre-decoded tokens, the used KV cache + // can be estimated correctly. + std::vector mNumPreDecodedTokens; // Number of tokens already in KV cache before context phase. // A value > 0 indicates cached KV cache blocks were reused. // Up to inputLen - 1 tokens can be reused. @@ -1054,6 +1156,7 @@ class GenericLlmRequest std::optional mLoraTaskId; std::optional mLoraWeights; std::optional mLoraConfig; + std::optional mLookaheadConfig; // To enable chunked context, the FHMA paged kv-cache also needs to be enabled. Except for the last one, // the size of the context chunk needs to be an integer multiple of the kv-cache block size. The meaning @@ -1090,6 +1193,8 @@ class GenericLlmRequest SizeType32 mDecodingIter; executor::PriorityType mPriority; + std::vector mFinishReasons; + std::optional mContextPhaseParams; private: void initialize(VecTokens const& inputTokens, bool outputLogProbs) @@ -1162,7 +1267,8 @@ class LlmRequest : public GenericLlmRequest std::optional promptEmbeddingTable = std::nullopt, std::optional promptVocabSize = std::nullopt, std::optional loraTaskId = std::nullopt, std::optional loraWeights = std::nullopt, - std::optional loraConfig = std::nullopt, bool returnLogProbs = false, + std::optional loraConfig = std::nullopt, + std::optional lookaheadConfig = std::nullopt, bool returnLogProbs = false, bool returnContextLogits = false, bool returnGenerationLogits = false, std::optional> draftTokens = std::nullopt, std::optional draftLogits = std::nullopt, bool excludeInputFromOutput = false, @@ -1174,9 +1280,9 @@ class LlmRequest : public GenericLlmRequest : Base(requestId, maxNewTokens, std::move(inputTokens), samplingConfig, isStreaming, endId, padId, std::move(embeddingBias), std::move(badWordsList), std::move(stopWordsList), std::move(promptEmbeddingTable), promptVocabSize, loraTaskId, std::move(loraWeights), std::move(loraConfig), - returnLogProbs, returnContextLogits, returnGenerationLogits, std::move(draftTokens), std::move(draftLogits), - excludeInputFromOutput, std::move(logitsPostProcessor), applyLogitsPostProcessorBatched, - std::move(encoderInputTokens), returnEncoderOutput, clientId, priority) + std::move(lookaheadConfig), returnLogProbs, returnContextLogits, returnGenerationLogits, + std::move(draftTokens), std::move(draftLogits), excludeInputFromOutput, std::move(logitsPostProcessor), + applyLogitsPostProcessorBatched, std::move(encoderInputTokens), returnEncoderOutput, clientId, priority) { } @@ -1187,6 +1293,7 @@ class LlmRequest : public GenericLlmRequest { mLogitsPostProcessor = std::move(logitsPostProcessor); mApplyLogitsPostProcessorBatched = applyLogitsPostProcessorBatched; + mLookaheadConfig = Request.getLookaheadConfig(); } void movePromptEmbeddingTableToGpu(runtime::BufferManager const& manager) diff --git a/cpp/include/tensorrt_llm/batch_manager/trtGptModelOptionalParams.h b/cpp/include/tensorrt_llm/batch_manager/trtGptModelOptionalParams.h index 1dbeed000..4fcb1e127 100644 --- a/cpp/include/tensorrt_llm/batch_manager/trtGptModelOptionalParams.h +++ b/cpp/include/tensorrt_llm/batch_manager/trtGptModelOptionalParams.h @@ -45,7 +45,8 @@ class TrtGptModelOptionalParams std::optional maxNumTokens = std::nullopt, executor::SchedulerConfig const& schedulerConfig = executor::SchedulerConfig{}, executor::ExtendedRuntimePerfKnobConfig const& extendedRuntimePerfKnobConfig - = executor::ExtendedRuntimePerfKnobConfig{}) + = executor::ExtendedRuntimePerfKnobConfig{}, + std::optional debugConfig = std::nullopt) : kvCacheConfig{kvCacheConfig} , enableTrtOverlap{enableTrtOverlap} , deviceIds(deviceIds) @@ -59,6 +60,7 @@ class TrtGptModelOptionalParams , maxNumTokens(maxNumTokens) , schedulerConfig{schedulerConfig} , extendedRuntimePerfKnobConfig(extendedRuntimePerfKnobConfig) + , debugConfig{std::move(debugConfig)} { } @@ -70,17 +72,26 @@ class TrtGptModelOptionalParams executorConfig.getDecodingConfig().value_or(executor::DecodingConfig{}), executorConfig.getGpuWeightsPercent(), executorConfig.getMaxBeamWidth(), executorConfig.getMaxBatchSize(), executorConfig.getMaxNumTokens(), executorConfig.getSchedulerConfig(), - executorConfig.getExtendedRuntimePerfKnobConfig()) + executorConfig.getExtendedRuntimePerfKnobConfig(), executorConfig.getDebugConfig()) { } bool operator==(TrtGptModelOptionalParams const& other) const { - return kvCacheConfig == other.kvCacheConfig && enableTrtOverlap == other.enableTrtOverlap - && deviceIds == other.deviceIds && normalizeLogProbs == other.normalizeLogProbs - && enableChunkedContext == other.enableChunkedContext && decodingConfig == other.decodingConfig - && gpuWeightsPercent == other.gpuWeightsPercent - && extendedRuntimePerfKnobConfig == other.extendedRuntimePerfKnobConfig; + return kvCacheConfig == other.kvCacheConfig // + && enableTrtOverlap == other.enableTrtOverlap // + && deviceIds == other.deviceIds // + && normalizeLogProbs == other.normalizeLogProbs // + && enableChunkedContext == other.enableChunkedContext // + && decodingConfig == other.decodingConfig // + && gpuWeightsPercent == other.gpuWeightsPercent // + && maxBeamWidth == other.maxBeamWidth // + && maxBatchSize == other.maxBatchSize // + && maxNumTokens == other.maxNumTokens // + && schedulerConfig == other.schedulerConfig // + && extendedRuntimePerfKnobConfig == other.extendedRuntimePerfKnobConfig // + && debugConfig == other.debugConfig // + ; } friend std::ostream& operator<<(std::ostream& os, TrtGptModelOptionalParams const& self); @@ -100,6 +111,7 @@ class TrtGptModelOptionalParams std::optional maxNumTokens; executor::SchedulerConfig schedulerConfig; executor::ExtendedRuntimePerfKnobConfig extendedRuntimePerfKnobConfig; + std::optional debugConfig; }; } // namespace tensorrt_llm::batch_manager diff --git a/cpp/include/tensorrt_llm/executor/executor.h b/cpp/include/tensorrt_llm/executor/executor.h index 4ed912100..ed402ca69 100644 --- a/cpp/include/tensorrt_llm/executor/executor.h +++ b/cpp/include/tensorrt_llm/executor/executor.h @@ -42,6 +42,7 @@ char const* version() noexcept; class Model; class Serialization; +class ContextPhaseState; /// @brief Sampling configuration class SamplingConfig @@ -233,6 +234,71 @@ class LoraConfig std::optional mConfig; }; +struct LookaheadDecodingConfig +{ + LookaheadDecodingConfig(SizeType32 windowSize, SizeType32 ngramSize, SizeType32 verificationSetSize); + + explicit LookaheadDecodingConfig() + : LookaheadDecodingConfig(1, 1, 0) + { + } + + bool operator==(LookaheadDecodingConfig const& other) const; + [[nodiscard]] std::tuple get() const; + [[nodiscard]] SizeType32 getWindowSize() const; + [[nodiscard]] SizeType32 getNgramSize() const; + [[nodiscard]] SizeType32 getVerificationSetSize() const; + + /// @brief return + std::tuple calculateSpeculativeResource() const; + + /// @brief return true when `this` can be executed on resources defined by `that` + bool isLE(LookaheadDecodingConfig const& that) const; + + /// @brief return true when the parameter combination is valid. + static bool isLegal(SizeType32 windowSize, SizeType32 ngramSize, SizeType32 verificationSetSize) noexcept; + +private: + friend class Serialization; + + // Number of NGrams in lookahead branch per step. + SizeType32 mWindowSize; + // Number of tokens per NGram. + SizeType32 mNgramSize; + // Number of NGrams in verification branch per step. + SizeType32 mVerificationSetSize; +}; + +class ContextPhaseParams +{ +public: + explicit ContextPhaseParams(VecTokens firstGenTokens); + ContextPhaseParams(VecTokens firstGenTokens, void* state); + + ContextPhaseParams(ContextPhaseParams const&); + ContextPhaseParams(ContextPhaseParams&&); + ContextPhaseParams& operator=(ContextPhaseParams const&); + ContextPhaseParams& operator=(ContextPhaseParams&&); + + [[nodiscard]] bool operator==(ContextPhaseParams const&) const noexcept; + + [[nodiscard]] VecTokens const& getFirstGenTokens() const& noexcept; + [[nodiscard]] VecTokens popFirstGenTokens() && noexcept; + [[nodiscard]] void const* getState() const noexcept; + [[nodiscard]] void* getState() noexcept; + +private: + friend class Serialization; + static void deleter(void const* data); + using StatePtr = std::unique_ptr; + + /// @brief The first tokens generated by context executor + VecTokens mFirstGenTokens; + + /// @brief Context phase state of this request + StatePtr mState{nullptr, deleter}; +}; + /// @brief A class that holds information about the request class Request { @@ -269,9 +335,11 @@ class Request std::optional externalDraftTokensConfig = std::nullopt, std::optional pTuningConfig = std::nullopt, std::optional loraConfig = std::nullopt, + std::optional lookaheadConfig = std::nullopt, std::optional logitsPostProcessorName = std::nullopt, std::optional encoderInputTokenIds = std::nullopt, std::optional clientId = std::nullopt, - bool returnAllGeneratedTokens = false, PriorityType priority = kDefaultPriority); + bool returnAllGeneratedTokens = false, PriorityType priority = kDefaultPriority, + std::optional contextPhaseParams = std::nullopt); /// @brief This logits postprocessor name will dispatch to the batched logits postprocessor static auto constexpr kBatchedPostProcessorName = "batched"; @@ -295,11 +363,13 @@ class Request [[nodiscard]] std::optional getExternalDraftTokensConfig() const; [[nodiscard]] std::optional getPromptTuningConfig() const; [[nodiscard]] std::optional getLoraConfig() const; + [[nodiscard]] std::optional getLookaheadConfig() const; [[nodiscard]] std::optional getLogitsPostProcessorName() const; [[nodiscard]] std::optional getEncoderInputTokenIds() const; [[nodiscard]] std::optional getClientId() const; [[nodiscard]] PriorityType getPriority() const; [[nodiscard]] bool getReturnAllGeneratedTokens() const; + [[nodiscard]] std::optional const& getContextPhaseParams() const; void setStreaming(bool streaming); void setSamplingConfig(SamplingConfig const& config); @@ -312,11 +382,13 @@ class Request void setExternalDraftTokensConfig(ExternalDraftTokensConfig const& externalDraftTokensConfig); void setPromptTuningConfig(PromptTuningConfig const& pTuningConfig); void setLoraConfig(LoraConfig const& loraConfig); + void setLookaheadConfig(LookaheadDecodingConfig const& lookaheadConfig); void setLogitsPostProcessorName(std::string const& logitsPostProcessorName); void setEncoderInputTokenIds(VecTokens const& encoderInputTokenIds); void setClientId(IdType clientId); void setPriority(PriorityType priority); void setReturnAllGeneratedTokens(bool returnAllGeneratedTokens); + void setContextPhaseParams(ContextPhaseParams contextPhaseParams); private: friend class Serialization; @@ -342,11 +414,20 @@ struct Result /// @brief The context logits. Size [promptLen, vocabSizePadded] std::optional contextLogits; - /// @brief The context logits. Size [beamSize, maxNewTokens, vocabSizePadded] + /// @brief The context logits. Size [beamSize, maxNewTokens, vocabSizePadded] (non-streaming) + /// or [maxNewTokens, beamSize, vocabSizePadded] (streaming and allGeneratedTokens) + /// or [1, beamSize, vocabSizePadded] (streaming and non-allGeneratedTokens) std::optional generationLogits; /// @brief The encoder output. Size [encoderLen, hiddenSize] std::optional encoderOutput; + + /// @brief The reason why the model stopped generating tokens for each beam in this request. Size [beamSize]. + /// Currently only supported when beamSize is 1 and when using BatchingType::kINFLIGHT. + std::vector finishReasons; + + /// @brief The params of the context phase. + std::optional contextPhaseParams; }; /// @brief Class that holds either an error or a result @@ -370,11 +451,11 @@ class Response /// @brief Get the error msg for this response /// Will throw an exception if hasError is false - [[nodiscard]] std::string getErrorMsg() const; + [[nodiscard]] std::string const& getErrorMsg() const; /// @brief Get the result for this response /// Will throw an exception if hasResult is true - [[nodiscard]] Result getResult() const; + [[nodiscard]] Result const& getResult() const; private: friend class Serialization; @@ -390,6 +471,8 @@ class SchedulerConfig CapacitySchedulerPolicy capacitySchedulerPolicy = CapacitySchedulerPolicy::kGUARANTEED_NO_EVICT, std::optional contextChunkingPolicy = std::nullopt); + bool operator==(SchedulerConfig const& other) const; + [[nodiscard]] CapacitySchedulerPolicy getCapacitySchedulerPolicy() const; [[nodiscard]] std::optional getContextChunkingPolicy() const; @@ -469,17 +552,17 @@ class ExtendedRuntimePerfKnobConfig public: explicit ExtendedRuntimePerfKnobConfig(bool multiBlockMode = false, bool enableContextFMHAFP32Acc = false); - [[nodiscard]] bool getMultiBlockMode() const; - [[nodiscard]] bool getEnableContextFMHAFP32Acc() const; - - void setMultiBlockMode(bool const multiBlockMode); - void setEnableContextFMHAFP32Acc(bool const enableContextFMHAFP32Acc); - bool operator==(ExtendedRuntimePerfKnobConfig const& other) const { return mMultiBlockMode == other.mMultiBlockMode && mEnableContextFMHAFP32Acc == other.mEnableContextFMHAFP32Acc; } + [[nodiscard]] bool getMultiBlockMode() const; + [[nodiscard]] bool getEnableContextFMHAFP32Acc() const; + + void setMultiBlockMode(bool multiBlockMode); + void setEnableContextFMHAFP32Acc(bool enableContextFMHAFP32Acc); + private: friend class Serialization; @@ -490,6 +573,35 @@ class ExtendedRuntimePerfKnobConfig bool mEnableContextFMHAFP32Acc; }; +/// @brief Configuration class for debugging output +class DebugConfig +{ + using StringVec = std::vector; + +public: + explicit DebugConfig(bool dumpInputTensors = false, bool dumpOuputTensors = false, StringVec debugTensorNames = {}); + + bool operator==(DebugConfig const& other) const; + + [[nodiscard]] bool getDumpInputTensors() const; + [[nodiscard]] bool getDumpOutputTensors() const; + [[nodiscard]] StringVec const& getDebugTensorNames() const; + + void setDumpInputTensors(bool dumpInputTensors); + void setDumpOuputTensors(bool dumpOuputTensors); + void setDebugTensorNames(StringVec const& debugTensorNames); + +private: + friend class Serialization; + + /// @brief If true, dump all input tensors. + bool mDumpInputTensors; + /// @brief If true, dump all output tensors. + bool mDumpOuputTensors; + /// @brief If not empty, only dump tensors in this list. + StringVec mDebugTensorNames; +}; + SizeType32 const kDefaultIterStatsMaxIterations = 1000; // Per request stats may have additional overhead due to going through all requests. Turned off by default. SizeType32 const kDefaultRequestStatsMaxIterations = 0; @@ -616,42 +728,43 @@ class PeftCacheConfig std::optional mHostCacheSize; }; -struct LookaheadDecodingConfig -{ - LookaheadDecodingConfig(SizeType32 windowSize, SizeType32 ngramSize, SizeType32 verificationSetSize); - - explicit LookaheadDecodingConfig() - : LookaheadDecodingConfig(1, 1, 0) - { - } - - bool operator==(LookaheadDecodingConfig const& other) const; - [[nodiscard]] std::tuple get() const; - [[nodiscard]] SizeType32 getWindowSize() const; - [[nodiscard]] SizeType32 getNgramSize() const; - [[nodiscard]] SizeType32 getVerificationSetSize() const; - - /// @brief return - std::tuple calculateSpeculativeResource() const; - - /// @brief return true when `this` can be executed on resources defined by `that` - bool isLE(LookaheadDecodingConfig const& that) const; - - /// @brief return true when the parameter combination is valid. - static bool isLegal(SizeType32 windowSize, SizeType32 ngramSize, SizeType32 verificationSetSize) noexcept; - -private: - friend class Serialization; - - // Number of NGrams in lookahead branch per step. - SizeType32 mWindowSize; - // Number of tokens per NGram. - SizeType32 mNgramSize; - // Number of NGrams in verification branch per step. - SizeType32 mVerificationSetSize; -}; - /// @brief Configuration class for the speculative decoding. +// struct LookaheadDecodingConfig +//{ +// LookaheadDecodingConfig(SizeType32 windowSize, SizeType32 ngramSize, SizeType32 verificationSetSize); +// +// explicit LookaheadDecodingConfig() +// : LookaheadDecodingConfig(1, 1, 0) +// { +// } +// +// bool operator==(LookaheadDecodingConfig const& other) const; +// [[nodiscard]] std::tuple get() const; +// [[nodiscard]] SizeType32 getWindowSize() const; +// [[nodiscard]] SizeType32 getNgramSize() const; +// [[nodiscard]] SizeType32 getVerificationSetSize() const; +// +// /// @brief return +// std::tuple calculateSpeculativeResource() const; +// +// /// @brief return true when `this` can be executed on resources defined by `that` +// bool isLE(LookaheadDecodingConfig const& that) const; +// +// /// @brief return true when the parameter combination is valid. +// static bool isLegal(SizeType32 windowSize, SizeType32 ngramSize, SizeType32 verificationSetSize) noexcept; +// +// private: +// friend class Serialization; +// +// // Number of NGrams in lookahead branch per step. +// SizeType32 mWindowSize; +// // Number of tokens per NGram. +// SizeType32 mNgramSize; +// // Number of NGrams in verification branch per step. +// SizeType32 mVerificationSetSize; +// }; + +/// @brief Configuration class for the decoding. class DecodingConfig { public: @@ -687,6 +800,29 @@ class DecodingConfig std::optional mMedusaChoices; }; +class LogitsPostProcessorConfig +{ +public: + explicit LogitsPostProcessorConfig(std::optional processorMap = std::nullopt, + std::optional processorBatched = std::nullopt, bool replicate = true); + + [[nodiscard]] std::optional getProcessorMap() const; + [[nodiscard]] std::optional getProcessorBatched() const; + [[nodiscard]] bool getReplicate() const; + + void setProcessorMap(LogitsPostProcessorMap const& processorMap); + void setProcessorBatched(LogitsPostProcessorBatched const& processorBatched); + void setReplicate(bool replicate); + +private: + /// @brief mapping from post processor names to non-batched post processors + std::optional mProcessorMap; + /// @brief single batched post processor + std::optional mProcessorBatched; + /// @brief If set to true, logits post processor will run on all TP ranks in last PP rank + bool mReplicate; +}; + /// @brief Configuration class for the model executor class ExecutorConfig { @@ -699,11 +835,11 @@ class ExecutorConfig std::optional maxNumTokens = std::nullopt, std::optional parallelConfig = std::nullopt, std::optional const& peftCacheConfig = std::nullopt, - std::optional logitsPostProcessorMap = std::nullopt, - std::optional logitsPostProcessorBatched = std::nullopt, - bool replicateLogitsPostProcessor = true, std::optional decodingConfig = std::nullopt, - float gpuWeightsPercent = 1, std::optional maxQueueSize = std::nullopt, - ExtendedRuntimePerfKnobConfig const& extendedRuntimePerfKnobConfig = ExtendedRuntimePerfKnobConfig()); + std::optional logitsPostProcessorConfig = std::nullopt, + std::optional decodingConfig = std::nullopt, float gpuWeightsPercent = 1, + std::optional maxQueueSize = std::nullopt, + ExtendedRuntimePerfKnobConfig const& extendedRuntimePerfKnobConfig = ExtendedRuntimePerfKnobConfig(), + std::optional debugConfig = std::nullopt); [[nodiscard]] SizeType32 getMaxBeamWidth() const; [[nodiscard]] SchedulerConfig getSchedulerConfig() const; @@ -717,13 +853,12 @@ class ExecutorConfig [[nodiscard]] std::optional getMaxNumTokens() const; [[nodiscard]] std::optional getParallelConfig() const; [[nodiscard]] std::optional getPeftCacheConfig() const; - [[nodiscard]] std::optional getLogitsPostProcessorMap() const; - [[nodiscard]] std::optional getLogitsPostProcessorBatched() const; - [[nodiscard]] bool getReplicateLogitsPostProcessor() const; + [[nodiscard]] std::optional getLogitsPostProcessorConfig() const; [[nodiscard]] std::optional getDecodingConfig() const; [[nodiscard]] float getGpuWeightsPercent() const; [[nodiscard]] std::optional getMaxQueueSize() const; [[nodiscard]] ExtendedRuntimePerfKnobConfig getExtendedRuntimePerfKnobConfig() const; + [[nodiscard]] std::optional getDebugConfig() const; void setMaxBeamWidth(SizeType32 maxBeamWidth); void setMaxBatchSize(SizeType32 maxBatchSize); @@ -737,13 +872,12 @@ class ExecutorConfig void setBatchingType(BatchingType batchingType); void setParallelConfig(ParallelConfig const& parallelConfig); void setPeftCacheConfig(PeftCacheConfig const& peftCacheConfig); - void setLogitsPostProcessorMap(LogitsPostProcessorMap const& logitsPostProcessorMap); - void setLogitsPostProcessorBatched(LogitsPostProcessorBatched const& logitsPostProcessorBatched); - void setReplicateLogitsPostProcessor(bool const replicateLogitsPostProcessor); + void setLogitsPostProcessorConfig(LogitsPostProcessorConfig const& logitsPostProcessorConfig); void setDecodingConfig(DecodingConfig const& decodingConfig); void setGpuWeightsPercent(float const& gpuWeightsPercent); void setMaxQueueSize(std::optional const& maxQueueSize); - void setExtendedRuntimePerfKnobConfig(ExtendedRuntimePerfKnobConfig const& ExtendedRuntimePerfKnobConfig); + void setExtendedRuntimePerfKnobConfig(ExtendedRuntimePerfKnobConfig const& extendedRuntimePerfKnobConfig); + void setDebugConfig(DebugConfig const& debugConfig); private: friend class Serialization; @@ -781,10 +915,9 @@ class ExecutorConfig /// @brief The parallel execution configuration. std::optional mParallelConfig; std::optional mPeftCacheConfig; - std::optional mLogitsPostProcessorMap; - std::optional mLogitsPostProcessorBatched; - /// @brief If set to true, logits post processor will run on all TP ranks in last PP rank - bool mReplicateLogitsPostProcessor; + + /// @brief Logits post processor configuration + std::optional mLogitsPostProcessorConfig; /// @brief Decoding configuration. std::optional mDecodingConfig; @@ -797,6 +930,9 @@ class ExecutorConfig /// @brief Config for perf knobs that can be set in runtime. ExtendedRuntimePerfKnobConfig mExtendedRuntimePerfKnobConfig; + + /// @brief Debugging configuration. + std::optional mDebugConfig; }; /// @brief The executor is responsible for receiving new requests and sending responses, and running the inference diff --git a/cpp/include/tensorrt_llm/executor/serialization.h b/cpp/include/tensorrt_llm/executor/serialization.h index 4dace9acb..5f29afde2 100644 --- a/cpp/include/tensorrt_llm/executor/serialization.h +++ b/cpp/include/tensorrt_llm/executor/serialization.h @@ -53,6 +53,11 @@ class Serialization static void serialize(LoraConfig const& config, std::ostream& os); [[nodiscard]] static size_t serializedSize(LoraConfig const& config); + // ContextPhaseParams + [[nodiscard]] static ContextPhaseParams deserializeContextPhaseParams(std::istream& is); + static void serialize(ContextPhaseParams const& contextPhaseParams, std::ostream& os); + [[nodiscard]] static size_t serializedSize(ContextPhaseParams const& contextPhaseParams); + // Request [[nodiscard]] static Request deserializeRequest(std::istream& is); static void serialize(Request const& request, std::ostream& os); @@ -122,6 +127,11 @@ class Serialization static void serialize(DecodingConfig const& decodingConfig, std::ostream& os); static size_t serializedSize(DecodingConfig const& decodingConfig); + // DebugConfig + static DebugConfig deserializeDebugConfig(std::istream& is); + static void serialize(DebugConfig const& debugConfig, std::ostream& os); + static size_t serializedSize(DebugConfig const& debugConfig); + // ExecutorConfig static ExecutorConfig deserializeExecutorConfig(std::istream& is); static void serialize(ExecutorConfig const& executorConfig, std::ostream& os); diff --git a/cpp/include/tensorrt_llm/executor/types.h b/cpp/include/tensorrt_llm/executor/types.h index 4861a39aa..1dce228cd 100644 --- a/cpp/include/tensorrt_llm/executor/types.h +++ b/cpp/include/tensorrt_llm/executor/types.h @@ -348,6 +348,22 @@ struct RequestStatsPerIteration std::vector requestStats; }; +/// @brief The reason why the model stopped generating tokens for a request. +enum class FinishReason +{ + /// @brief The request is not finished. + kNOT_FINISHED = 0, + + /// @brief The request finished because the end id was generated. + kEND_ID = 1, + + /// @brief The request finished because a stop word was generated. + kSTOP_WORDS = 2, + + /// @brief The request finished because the maximum number of tokens was reached. + kLENGTH = 3, +}; + /// @brief mode of the decoder class DecodingMode { diff --git a/cpp/include/tensorrt_llm/runtime/decodingInput.h b/cpp/include/tensorrt_llm/runtime/decodingInput.h index 6c4d7c805..4f92cbd06 100644 --- a/cpp/include/tensorrt_llm/runtime/decodingInput.h +++ b/cpp/include/tensorrt_llm/runtime/decodingInput.h @@ -80,7 +80,7 @@ class DecodingInput batchSlots; //!< [batchSize], address map of the linear batch id to to the seq slots, int32_t, pinned // optional parameters - TensorConstPtr finished; //!< [batchSize, beamWidth], finished states at current iteration. + TensorConstPtr finishReasons; //!< [batchSize, beamWidth], finished states at current iteration. //!< If true for some request, the decoding step of it is skipped, on gpu TensorConstPtr sequenceLimitLength; //!< [batchSize], on gpu. The maximum sequence length for each sequence in the batch. @@ -129,9 +129,16 @@ class DecodingInput TensorConstPtr seqSlots; //!< [batchSize] }; + struct LookaheadInputs + { + TensorPtr tokensPerStep; + }; + std::optional medusaInputs; std::optional explicitDraftTokensInputs; + + std::optional lookaheadInputs; }; } // namespace tensorrt_llm::runtime diff --git a/cpp/include/tensorrt_llm/runtime/decodingOutput.h b/cpp/include/tensorrt_llm/runtime/decodingOutput.h index c07ae057b..146db40a4 100644 --- a/cpp/include/tensorrt_llm/runtime/decodingOutput.h +++ b/cpp/include/tensorrt_llm/runtime/decodingOutput.h @@ -20,9 +20,15 @@ #include "tensorrt_llm/runtime/common.h" #include "tensorrt_llm/runtime/explicitDraftTokensBuffers.h" #include "tensorrt_llm/runtime/iTensor.h" +#include "tensorrt_llm/runtime/lookaheadBuffers.h" #include #include +namespace tensorrt_llm::batch_manager +{ +class LookaheadDecodingBuffers; +} + namespace tensorrt_llm::runtime { class DecodingOutput @@ -81,10 +87,10 @@ class DecodingOutput // Vector of views on newTokensSteps for each token // optional parameters - TensorPtr finished; // [BS, BM], set to true by decoding if any of the stop conditions are met or if - // DecodingInput.finished is true. In beam search and to determine whether to stop according to - // DecodingInput.sequenceLimitLength - TensorPtr finishedSum; // [BS], the sum of finished sequences per request, in pinned memory + TensorPtr finishReasons; // [BS, BM], set to FinishedState by decoding if any of the stop conditions are met or if + // DecodingInput.finished is true. In beam search and to determine whether to stop + // according to DecodingInput.sequenceLimitLength + TensorPtr finishedSum; // [BS], the sum of finished sequences per request, in pinned memory // mandatory parameters for beam search TensorPtr logProbs; // [BS, BM, MSL], must be float* @@ -110,6 +116,8 @@ class DecodingOutput std::optional speculativeDecodingOutputs; std::optional explicitDraftTokensBuffers; + + std::optional lookaheadOutputs; }; } // namespace tensorrt_llm::runtime diff --git a/cpp/include/tensorrt_llm/runtime/gptDecoder.h b/cpp/include/tensorrt_llm/runtime/gptDecoder.h index 1753f24c6..8b0dc994b 100644 --- a/cpp/include/tensorrt_llm/runtime/gptDecoder.h +++ b/cpp/include/tensorrt_llm/runtime/gptDecoder.h @@ -16,11 +16,13 @@ #pragma once +#include "tensorrt_llm/common/logger.h" #include "tensorrt_llm/executor/types.h" #include "tensorrt_llm/layers/decodingParams.h" #include "tensorrt_llm/runtime/bufferManager.h" #include "tensorrt_llm/runtime/decodingInput.h" #include "tensorrt_llm/runtime/decodingOutput.h" +#include "tensorrt_llm/runtime/request.h" #include "tensorrt_llm/runtime/samplingConfig.h" #include @@ -52,7 +54,8 @@ class IGptDecoder virtual ~IGptDecoder() = default; virtual void setup(SamplingConfig const& samplingConfig, size_t batchSize, TensorConstPtr const& batchSlots, - std::optional const& output = std::nullopt) + std::optional const& output = std::nullopt, + std::optional const> const& requests = std::nullopt) = 0; virtual void forwardAsync(DecodingOutput& output, DecodingInput const& input) = 0; @@ -95,7 +98,8 @@ class GptDecoder : public virtual IGptDecoder std::shared_ptr speculativeDecodingModule = nullptr); void setup(SamplingConfig const& samplingConfig, size_t batchSize, TensorConstPtr const& batchSlots, - std::optional const& output = std::nullopt) override; + std::optional const& output = std::nullopt, + std::optional const> const& requests = std::nullopt) override; void forwardAsync(DecodingOutput& output, DecodingInput const& input) override; diff --git a/cpp/include/tensorrt_llm/runtime/gptDecoderBatched.h b/cpp/include/tensorrt_llm/runtime/gptDecoderBatched.h index bbef3ae7a..358826f50 100644 --- a/cpp/include/tensorrt_llm/runtime/gptDecoderBatched.h +++ b/cpp/include/tensorrt_llm/runtime/gptDecoderBatched.h @@ -54,6 +54,8 @@ class GptDecoderBatched : public IGptDecoderBatched void setupExplicitDraftTokens(ExplicitDraftTokensBuffers::Inputs explicitDraftTokensBuffers) override; + void setupLookahead(LookaheadDecodingBuffers lookaheadDecodingBuffers) override; + void newBatch( GenerationInput const& inputs, GenerationOutput const& outputs, SamplingConfig const& samplingConfig) override; @@ -77,6 +79,12 @@ class GptDecoderBatched : public IGptDecoderBatched return {mFinished.begin(), mFinished.begin() + mActualBatchSize}; } + //! @returns [batchSize, beamWidth], FinishedState value, on gpu + [[nodiscard]] TensorPtr getFinishReasons() const override + { + return ITensor::slice(mJointDecodingOutput->finishReasons, 0, mActualBatchSize); + } + //! @param batchIdx index of the batch //! @returns [maxBeamWidth, maxInputLength + maxNewTokens], contains input token ids and generated token ids without //! padding for request `batchIdx`, on gpu. In case of beam search, contains the ungathered data. @@ -242,6 +250,9 @@ class GptDecoderBatched : public IGptDecoderBatched //! @brief Setup buffers for speculative decoding. void setupSpeculativeDecoding(ModelConfig const& modelConfig); + //! @brief Setup buffers for lookahead decoding. + void setupLookahead(ModelConfig const& modelConfig); + //! @brief Setups decoder internal tensors for new speculative decoding request void newRequestSpeculativeDecoding( SizeType32 batchIdx, decoder_batch::Request const& request, SamplingConfig const& samplingConfig); diff --git a/cpp/include/tensorrt_llm/runtime/iBuffer.h b/cpp/include/tensorrt_llm/runtime/iBuffer.h index 46fb3972e..5675de5ad 100644 --- a/cpp/include/tensorrt_llm/runtime/iBuffer.h +++ b/cpp/include/tensorrt_llm/runtime/iBuffer.h @@ -18,6 +18,7 @@ #include "tensorrt_llm/common/arrayView.h" #include "tensorrt_llm/common/dataType.h" +#include "tensorrt_llm/kernels/decodingCommon.h" #include "tensorrt_llm/kernels/kvCacheIndex.h" #include @@ -323,6 +324,12 @@ struct TRTDataType static constexpr auto value = TRTDataType::value; }; +template <> +struct TRTDataType +{ + static constexpr auto value = TRTDataType::value; +}; + template <> struct TRTDataType { diff --git a/cpp/include/tensorrt_llm/runtime/iGptDecoderBatched.h b/cpp/include/tensorrt_llm/runtime/iGptDecoderBatched.h index 4495c102a..11464f80e 100644 --- a/cpp/include/tensorrt_llm/runtime/iGptDecoderBatched.h +++ b/cpp/include/tensorrt_llm/runtime/iGptDecoderBatched.h @@ -21,6 +21,7 @@ #include "tensorrt_llm/runtime/explicitDraftTokensBuffers.h" #include "tensorrt_llm/runtime/iStatefulGptDecoder.h" #include "tensorrt_llm/runtime/iTensor.h" +#include "tensorrt_llm/runtime/lookaheadBuffers.h" #include "tensorrt_llm/runtime/request.h" #include "tensorrt_llm/runtime/utils/sessionUtils.h" @@ -100,6 +101,9 @@ class IGptDecoderBatched : public virtual IStatefulGptDecoder //! @brief Setup buffers for ExplicitDraftTokens decoding. virtual void setupExplicitDraftTokens(ExplicitDraftTokensBuffers::Inputs explicitDraftTokensBuffers) = 0; + //! @brief Setup buffers for Lookahead decoding. + virtual void setupLookahead(LookaheadDecodingBuffers lookaheadDecodingBuffers) = 0; + //! @brief Run one step for all requests without blocking the host process and return the token for synchronization. virtual TokenPtr forwardAsync(decoder_batch::Output& output, decoder_batch::Input const& input) = 0; @@ -135,6 +139,9 @@ class IGptDecoderBatched : public virtual IStatefulGptDecoder //! @returns [batchSize (actual)], marks finished requests (per batch) [[nodiscard]] virtual std::vector getFinished() const = 0; + //! @returns [batchSize, beamWidth], FinishedState value, on gpu + [[nodiscard]] virtual TensorPtr getFinishReasons() const = 0; + //! @returns [batchSize, beamWidth], cumulative log probabilities (per beam), on gpu [[nodiscard]] virtual TensorPtr getCumLogProbs() const = 0; diff --git a/cpp/include/tensorrt_llm/runtime/iStatefulGptDecoder.h b/cpp/include/tensorrt_llm/runtime/iStatefulGptDecoder.h index f5e0f142d..4719e4902 100644 --- a/cpp/include/tensorrt_llm/runtime/iStatefulGptDecoder.h +++ b/cpp/include/tensorrt_llm/runtime/iStatefulGptDecoder.h @@ -29,6 +29,11 @@ #include +namespace tensorrt_llm::batch_manager +{ +struct DecoderBuffers; +} + namespace tensorrt_llm::runtime { diff --git a/cpp/include/tensorrt_llm/runtime/iTensor.h b/cpp/include/tensorrt_llm/runtime/iTensor.h index 04937fc0d..faa14ca90 100644 --- a/cpp/include/tensorrt_llm/runtime/iTensor.h +++ b/cpp/include/tensorrt_llm/runtime/iTensor.h @@ -50,6 +50,7 @@ class ITensor : virtual public IBuffer using SharedConstPtr = std::shared_ptr; using Shape = nvinfer1::Dims; using DimType64 = std::remove_reference_t; + using TensorMap = runtime::StringPtrMap; static_assert(std::is_same_v, "This version of TRT-LLM requires TensorRT 10.0 or later."); diff --git a/cpp/include/tensorrt_llm/runtime/lookaheadBuffers.h b/cpp/include/tensorrt_llm/runtime/lookaheadBuffers.h new file mode 100644 index 000000000..56504bd94 --- /dev/null +++ b/cpp/include/tensorrt_llm/runtime/lookaheadBuffers.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "tensorrt_llm/executor/executor.h" +#include "tensorrt_llm/runtime/iTensor.h" +#include "tensorrt_llm/runtime/modelConfig.h" +#include "tensorrt_llm/runtime/tllmRuntime.h" +#include "tensorrt_llm/runtime/worldConfig.h" + +namespace tensorrt_llm::runtime +{ + +class LookaheadDecodingBuffers +{ +public: + using SizeType32 = runtime::SizeType32; + using TensorPtr = runtime::ITensor::SharedPtr; + using ITensor = tensorrt_llm::runtime::ITensor; + LookaheadDecodingBuffers( + SizeType32 maxNumSequences, SizeType32 maxTokensPerStep, runtime::BufferManager const& bufferManager); + TensorPtr generationLengths; // [mMaxNumRequests] + TensorPtr positionOffsets; // [mMaxNumRequests, maxTokensPerStep] + TensorPtr packedMasks; // [mMaxNumRequests, maxTokensPerStep, divUp(maxTokensPerStep, 32)] + TensorPtr positionIds; +}; + +class LookaheadRuntimeBuffers +{ +public: + using SizeType32 = tensorrt_llm::runtime::SizeType32; + using ITensor = tensorrt_llm::runtime::ITensor; + using TensorPtr = runtime::ITensor::SharedPtr; + using TensorMap = runtime::StringPtrMap; + + LookaheadRuntimeBuffers(SizeType32 maxBatchSize, SizeType32 maxBeamWidth, runtime::BufferManager const& manager, + runtime::ModelConfig const& modelConfig, runtime::WorldConfig const& worldConfig, + executor::DecodingConfig const& decodingConfig, runtime::TllmRuntime const& runtime); + + void setFromInputs(SizeType32 numCtxSequences, SizeType32 numGenSequences, runtime::ITensor const& requestTypes, + ITensor const& seqSlots, LookaheadDecodingBuffers const& decoderLookaheadBuffers, + runtime::TllmRuntime const& runtime, runtime::ModelConfig const& modelConfig, + runtime::WorldConfig const& worldConfig) const; + + void reshape(SizeType32 numCtxSequences, SizeType32 numGenSequences, SizeType32 tokensPerStep); + + void insertInputTensors( + TensorMap& inputBuffers, TensorMap& outputBuffers, runtime::WorldConfig const& worldConfig) const; + +public: + TensorPtr packedMasksDevice; // [forwardBatchSize, tokensPerStep, numPackedMasks], on gpu + TensorPtr generationLengthsDevice; // [forwardBatchSize], on gpu + TensorPtr positionOffsetsDevice; // [forwardBatchSize, tokensPerStep], on gpu + TensorPtr positionIdsDevice; // [forwardBatchSize, tokensPerStep], on gpu + + TensorPtr packedMaskHost; + TensorPtr generationLengthsHost; + TensorPtr positionOffsetsHost; + TensorPtr positionIdsHost; + + TensorPtr packedMaskHostCopy; + TensorPtr generationLengthsHostCopy; + TensorPtr positionOffsetsHostCopy; + TensorPtr positionIdsHostCopy; + + TensorPtr batchSlotsHostCopy; +}; + +} // namespace tensorrt_llm::runtime diff --git a/cpp/include/tensorrt_llm/runtime/speculativeDecodingMode.h b/cpp/include/tensorrt_llm/runtime/speculativeDecodingMode.h index e3103ea91..8226c411c 100644 --- a/cpp/include/tensorrt_llm/runtime/speculativeDecodingMode.h +++ b/cpp/include/tensorrt_llm/runtime/speculativeDecodingMode.h @@ -108,8 +108,7 @@ class SpeculativeDecodingMode [[nodiscard]] bool constexpr needsDecoderPrologue() const { - // Potentially lookahead should require it too. - return anyBitSet(kExplicitDraftTokens); + return anyBitSet(kExplicitDraftTokens | kLookaheadDecoding); } using UnderlyingType = std::uint8_t; diff --git a/cpp/tensorrt_llm/CMakeLists.txt b/cpp/tensorrt_llm/CMakeLists.txt index df6e62c84..a8b6f276a 100644 --- a/cpp/tensorrt_llm/CMakeLists.txt +++ b/cpp/tensorrt_llm/CMakeLists.txt @@ -22,7 +22,7 @@ set(API_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/include) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/cutlass_extensions/include ${API_INCLUDE_DIR}) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) find_package(MPI REQUIRED) message(STATUS "Using MPI_C_INCLUDE_DIRS: ${MPI_C_INCLUDE_DIRS}") message(STATUS "Using MPI_C_LIBRARIES: ${MPI_C_LIBRARIES}") @@ -269,7 +269,7 @@ set(TRTLLM_LINK_LIBS runtime_src ${DECODER_SHARED_TARGET}) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) set(TRTLLM_LINK_LIBS ${TRTLLM_LINK_LIBS} ${MPI_C_LIBRARIES} ${NCCL_LIB}) endif() diff --git a/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.a b/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.a index 1781be133..f9fe9ff33 100644 --- a/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.a +++ b/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84a6439038eb0a7d2913c3fe051684ab7779e42635c074bc6df30bfc46807929 -size 4358834 +oid sha256:460b75a97c0de65941839ccd5e0458cf5929574b9345b3cb723a695ae5a056e0 +size 4404838 diff --git a/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a b/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a index f90fc6d03..555117707 100644 --- a/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a +++ b/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b321340ea622b28ed7d38fe18ff7707091d2efa414af40f6db516959a4fa2f4 -size 4466694 +oid sha256:645bbbad2c38b573df7c6e56588a6728d356a58444ac7c2f881d773faaca7593 +size 4516944 diff --git a/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/version.txt b/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/version.txt index 2eff4c1d7..484d3274b 100644 --- a/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/version.txt +++ b/cpp/tensorrt_llm/batch_manager/aarch64-linux-gnu/version.txt @@ -1,3 +1,3 @@ -99062b35da1cb99df9c79368da1ff9de libtensorrt_llm_batch_manager_static.a -72e6a44f7636bb6d48b016db1c62cdc7 libtensorrt_llm_batch_manager_static.pre_cxx11.a -90dd0ad72954a5cc7cc2e298495e784906fe49b1 commit \ No newline at end of file +a348613d480961aa14d4e77939be8a34 libtensorrt_llm_batch_manager_static.a +317ec85caec48184c9c8b9cbd3eb44b1 libtensorrt_llm_batch_manager_static.pre_cxx11.a +49402939d007b39393cabaa8fe96c110d16f5b35 commit \ No newline at end of file diff --git a/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.a b/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.a index a2560a5a6..3e658d50e 100644 --- a/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.a +++ b/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1df6689f399313cac54ec1f4422975d6060957edbc698468a96f3a2c2a6542bc -size 4221016 +oid sha256:a785c4459bdb4a7dad9df0c832211f26f699a331ce0b2b9516e7a666f83b895a +size 4272894 diff --git a/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a b/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a index f9251acc4..26532800d 100644 --- a/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a +++ b/cpp/tensorrt_llm/batch_manager/x86_64-linux-gnu/libtensorrt_llm_batch_manager_static.pre_cxx11.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b118f1bbccd6fe8e5e001916f4de19ec34886e7dcc288b91dcdadf5500f0eb50 -size 4205756 +oid sha256:9a6b98589222f8bf8e82f122110cb1824b1728646bad45a41e8b9ada632539dc +size 4248190 diff --git a/cpp/tensorrt_llm/batch_manager/x86_64-windows-msvc/tensorrt_llm_batch_manager_static.lib b/cpp/tensorrt_llm/batch_manager/x86_64-windows-msvc/tensorrt_llm_batch_manager_static.lib index f69634e79..e3f219a93 100644 --- a/cpp/tensorrt_llm/batch_manager/x86_64-windows-msvc/tensorrt_llm_batch_manager_static.lib +++ b/cpp/tensorrt_llm/batch_manager/x86_64-windows-msvc/tensorrt_llm_batch_manager_static.lib @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47162c3eaab9b6f60bca8927eef0423c5521d7750f112b87a2f9175156ccb6cd -size 24807904 +oid sha256:7daa6c306a2fb738bbe8b3d30324691c83d59aa933c79b0e48342976edb4e356 +size 25540884 diff --git a/cpp/tensorrt_llm/common/cublasMMWrapper.cpp b/cpp/tensorrt_llm/common/cublasMMWrapper.cpp index 224ca77f6..27f179236 100644 --- a/cpp/tensorrt_llm/common/cublasMMWrapper.cpp +++ b/cpp/tensorrt_llm/common/cublasMMWrapper.cpp @@ -37,16 +37,12 @@ CublasMMWrapper::CublasMMWrapper(std::shared_ptr cublasHandle, { } -CublasMMWrapper::~CublasMMWrapper() -{ - mMutex = nullptr; -} +CublasMMWrapper::~CublasMMWrapper() {} CublasMMWrapper::CublasMMWrapper(CublasMMWrapper const& wrapper) : mCublasHandle(wrapper.mCublasHandle) , mCublasLtHandle(wrapper.mCublasLtHandle) , mStream(wrapper.mStream) - , mMutex(wrapper.mMutex) { } @@ -135,8 +131,6 @@ void CublasMMWrapper::Gemm(cublasOperation_t transa, cublasOperation_t transb, i half h_alpha = (half) (f_alpha); half h_beta = (half) (f_beta); - std::lock_guard lock(*mMutex); - // TODO: default cublas libs usingCublasLt = usingCublasLt && (mAType == CUDA_R_16F || mAType == CUDA_R_8F_E4M3); bool isFp16ComputeType = mComputeType == CUBLAS_COMPUTE_16F; @@ -179,8 +173,6 @@ void CublasMMWrapper::stridedBatchedGemm(cublasOperation_t transa, cublasOperati half h_alpha = (half) f_alpha; half h_beta = (half) f_beta; - std::lock_guard lock(*mMutex); - int isFp16ComputeType = mComputeType == CUBLAS_COMPUTE_16F ? 1 : 0; void const* alpha = isFp16ComputeType ? reinterpret_cast(&h_alpha) : reinterpret_cast(&f_alpha); void const* beta = isFp16ComputeType ? reinterpret_cast(&h_beta) : reinterpret_cast(&f_beta); @@ -198,7 +190,6 @@ void CublasMMWrapper::stridedBatchedGemm(cublasOperation_t transa, cublasOperati half h_alpha = (half) f_alpha; half h_beta = (half) f_beta; - std::lock_guard lock(*mMutex); bool isFp16ComputeType = mComputeType == CUBLAS_COMPUTE_16F ? 1 : 0; void const* alpha = isFp16ComputeType ? reinterpret_cast(&h_alpha) : reinterpret_cast(&f_alpha); void const* beta = isFp16ComputeType ? reinterpret_cast(&h_beta) : reinterpret_cast(&f_beta); diff --git a/cpp/tensorrt_llm/common/cublasMMWrapper.h b/cpp/tensorrt_llm/common/cublasMMWrapper.h index 9418b0faf..21062f2f2 100644 --- a/cpp/tensorrt_llm/common/cublasMMWrapper.h +++ b/cpp/tensorrt_llm/common/cublasMMWrapper.h @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -48,8 +47,6 @@ class CublasMMWrapper cublasLtMatrixLayout_t mCDesc{NULL}; cudaStream_t mStream; - //@fixme: we may not need the mutex if we copy the wrapper instead of sharing in GemmPlugin::clone() - std::shared_ptr mMutex{std::make_shared()}; void* mCublasWorkspace = nullptr; diff --git a/cpp/tensorrt_llm/common/mpiUtils.cpp b/cpp/tensorrt_llm/common/mpiUtils.cpp index dce6bf855..720e73b81 100644 --- a/cpp/tensorrt_llm/common/mpiUtils.cpp +++ b/cpp/tensorrt_llm/common/mpiUtils.cpp @@ -234,12 +234,14 @@ void MpiComm::bcast(runtime::IBuffer& buf, int root) const std::shared_ptr MpiComm::sendAsync(void const* buffer, size_t size, MpiType dtype, int dest, int tag) const { + TLLM_LOG_DEBUG("start MPI_Isend with size %d", size); std::shared_ptr r = std::make_shared(); #if ENABLE_MULTI_DEVICE MPICHECK(MPI_Isend(buffer, size, getMpiDtype(dtype), dest, tag, mComm, &r->mRequest)); #else TLLM_THROW("Multi device support is disabled."); #endif + TLLM_LOG_DEBUG("end MPI_Isend with size %d", size); return r; } @@ -250,11 +252,13 @@ std::shared_ptr MpiComm::sendAsync(runtime::IBuffer const& buf, int void MpiComm::send(void const* buffer, size_t size, MpiType dtype, int dest, int tag) const { + TLLM_LOG_DEBUG("start MPI_Send with size %d", size); #if ENABLE_MULTI_DEVICE MPICHECK(MPI_Send(buffer, size, getMpiDtype(dtype), dest, tag, mComm)); #else TLLM_THROW("Multi device support is disabled."); #endif // ENABLE_MULTI_DEVICE + TLLM_LOG_DEBUG("end MPI_Send with size %d", size); } void MpiComm::send(runtime::IBuffer const& buf, int dest, int tag) const @@ -264,12 +268,14 @@ void MpiComm::send(runtime::IBuffer const& buf, int dest, int tag) const MPI_Status MpiComm::recv(void* buffer, size_t size, MpiType dtype, int source, int tag) const { + TLLM_LOG_DEBUG("start MPI_Recv with size %d", size); MPI_Status status{}; #if ENABLE_MULTI_DEVICE MPICHECK(MPI_Recv(buffer, size, getMpiDtype(dtype), source, tag, mComm, &status)); #else TLLM_THROW("Multi device support is disabled."); #endif // ENABLE_MULTI_DEVICE + TLLM_LOG_DEBUG("end MPI_Recv with size %d", size); return status; } diff --git a/cpp/tensorrt_llm/common/safetensors.cpp b/cpp/tensorrt_llm/common/safetensors.cpp index b8e73f31e..8637f7f46 100644 --- a/cpp/tensorrt_llm/common/safetensors.cpp +++ b/cpp/tensorrt_llm/common/safetensors.cpp @@ -18,7 +18,6 @@ #include "nlohmann/json.hpp" #include "tensorrt_llm/common/assert.h" #include -#include #include #include #include @@ -153,9 +152,9 @@ class SafeTensor : public ISafeTensor { auto const& value = it->second; int64_t offset = mJsonSize + sizeof(mJsonSize); - return std::shared_ptr(new SafeTensorArray(mFs, value["dtype"], value["shape"], + return std::make_shared(mFs, value["dtype"], value["shape"], static_cast(value["data_offsets"][0]) + offset, - static_cast(value["data_offsets"][1]) + offset)); + static_cast(value["data_offsets"][1]) + offset); } TLLM_THROW("Tensor not found: " + std::string(name)); } diff --git a/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.a b/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.a index 1d94a394e..f8d7e496e 100644 --- a/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.a +++ b/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15d9a383921d4e955112fc69db35a861c6d5b7c72afc3708dc2a32177c9e5dfe -size 1453326 +oid sha256:f75b47945f8bb945a7086a0bcde038490ebfd2fbb406dfa0f3391f262cfac365 +size 1529360 diff --git a/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a b/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a index b7b6a8ece..51005d6b9 100644 --- a/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a +++ b/cpp/tensorrt_llm/executor/aarch64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:729953123d9ef18c78cc10bf72acb8132d70cd9892d18293350ba35b14405728 -size 1482178 +oid sha256:0790f83b79f8ff2a2313d238bdd409d8f082d92edf8e22e6dc75f6f5dfa8327d +size 1553716 diff --git a/cpp/tensorrt_llm/executor/aarch64-linux-gnu/version.txt b/cpp/tensorrt_llm/executor/aarch64-linux-gnu/version.txt index 51b6e7108..e9e3d1275 100644 --- a/cpp/tensorrt_llm/executor/aarch64-linux-gnu/version.txt +++ b/cpp/tensorrt_llm/executor/aarch64-linux-gnu/version.txt @@ -1,3 +1,3 @@ -0b92c82ef47ae42243c50506a0b40583 libtensorrt_llm_executor_static.a -58aff9dae183ea725f3cf1407aa96594 libtensorrt_llm_executor_static.pre_cxx11.a -90dd0ad72954a5cc7cc2e298495e784906fe49b1 commit \ No newline at end of file +bf15d213c14dcbe75d2116945bd24c82 libtensorrt_llm_executor_static.a +492e0b37b7f004c5b7a7c46d079f354d libtensorrt_llm_executor_static.pre_cxx11.a +49402939d007b39393cabaa8fe96c110d16f5b35 commit \ No newline at end of file diff --git a/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.a b/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.a index 9feed6caa..3366214ea 100644 --- a/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.a +++ b/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8273d6b5add26400e3b4ff032c9cfd754009def88abf55e16efd535fd596753f -size 1501596 +oid sha256:7f5fed27f812506b319a1275a6f00b71e3b8e3c0a8a2f71370b7c4673820306f +size 1588916 diff --git a/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a b/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a index 19ab55f6b..1a1400544 100644 --- a/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a +++ b/cpp/tensorrt_llm/executor/x86_64-linux-gnu/libtensorrt_llm_executor_static.pre_cxx11.a @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44950c4e09e881c6d43040fbdff75f3e9015d91ac32e4e8b09ab1ec16c0c366e -size 1434742 +oid sha256:c9e712e014960458ae1fbda43fcb882eb98f04f00c9e95afce2d881b29d2c5cf +size 1517700 diff --git a/cpp/tensorrt_llm/executor/x86_64-windows-msvc/tensorrt_llm_executor_static.lib b/cpp/tensorrt_llm/executor/x86_64-windows-msvc/tensorrt_llm_executor_static.lib index bac4ed11f..517415958 100644 --- a/cpp/tensorrt_llm/executor/x86_64-windows-msvc/tensorrt_llm_executor_static.lib +++ b/cpp/tensorrt_llm/executor/x86_64-windows-msvc/tensorrt_llm_executor_static.lib @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:212111cf11a06d0ed6ed7f4ac085f6c8bb70a339a1e2771028ed37e0c416b43b -size 14582948 +oid sha256:e433610d288aa1533fd36c467fd67929fefec68043e486f45dd3a774a55667cd +size 16515186 diff --git a/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/aarch64-linux-gnu/version.txt b/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/aarch64-linux-gnu/version.txt index 215e46c51..f02829789 100644 --- a/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/aarch64-linux-gnu/version.txt +++ b/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/aarch64-linux-gnu/version.txt @@ -1,2 +1,2 @@ 47b5d2e14616709b1dfb86b16213308e libtensorrt_llm_nvrtc_wrapper.so -90dd0ad72954a5cc7cc2e298495e784906fe49b1 commit \ No newline at end of file +49402939d007b39393cabaa8fe96c110d16f5b35 commit \ No newline at end of file diff --git a/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/x86_64-windows-msvc/tensorrt_llm_nvrtc_wrapper.dll b/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/x86_64-windows-msvc/tensorrt_llm_nvrtc_wrapper.dll index b0fe7471d..98548f6cf 100644 --- a/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/x86_64-windows-msvc/tensorrt_llm_nvrtc_wrapper.dll +++ b/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/x86_64-windows-msvc/tensorrt_llm_nvrtc_wrapper.dll @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:698a56ec294b5d82bd3e967b4595dce43baef96306001cd1f23dfe77a701e0d4 +oid sha256:6b16e47ce5d366249f54bf1c5edb46841efa84be58de97d78539ac0ba4fc710b size 1127936 diff --git a/cpp/tensorrt_llm/kernels/decodingCommon.h b/cpp/tensorrt_llm/kernels/decodingCommon.h index 494990994..bd695b35d 100644 --- a/cpp/tensorrt_llm/kernels/decodingCommon.h +++ b/cpp/tensorrt_llm/kernels/decodingCommon.h @@ -16,6 +16,7 @@ #pragma once +#include "tensorrt_llm/executor/types.h" #include #include @@ -62,7 +63,7 @@ class FinishedState mState |= kFinishedEos; } - __host__ __device__ bool constexpr isFinishedEOS() + __host__ __device__ bool constexpr isFinishedEOS() const { return anyBitSet(kFinishedEos); } @@ -72,7 +73,7 @@ class FinishedState mState |= kFinishedStopWords; } - __host__ __device__ bool constexpr isFinishedStopWords() + __host__ __device__ bool constexpr isFinishedStopWords() const { return anyBitSet(kFinishedStopWords); } @@ -82,7 +83,7 @@ class FinishedState mState |= kFinishedMaxLength; } - __host__ __device__ bool constexpr isFinishedMaxLength() + __host__ __device__ bool constexpr isFinishedMaxLength() const { return anyBitSet(kFinishedMaxLength); } @@ -107,6 +108,23 @@ class FinishedState return anyBitSet(kSkipDecoding); } + executor::FinishReason toFinishReason() const + { + if (isFinishedEOS()) + { + return executor::FinishReason::kEND_ID; + } + if (isFinishedStopWords()) + { + return executor::FinishReason::kSTOP_WORDS; + } + if (isFinishedMaxLength()) + { + return executor::FinishReason::kLENGTH; + } + return executor::FinishReason::kNOT_FINISHED; + } + using UnderlyingType = uint8_t; private: diff --git a/cpp/tensorrt_llm/kernels/gptKernels.cu b/cpp/tensorrt_llm/kernels/gptKernels.cu index d0d7b96c5..fa61d4cef 100644 --- a/cpp/tensorrt_llm/kernels/gptKernels.cu +++ b/cpp/tensorrt_llm/kernels/gptKernels.cu @@ -71,7 +71,8 @@ template __global__ __launch_bounds__(THREADS_PER_BLOCK) void computeSeqAndPaddingOffsets(BuildDecoderInfoParams params) { // Dynamic shared memory for storing seqOffsets. - extern __shared__ int smemSeqQOffsets[]; + extern __shared__ int smem[]; + int* smemSeqQOffsets = (int*) (smem); // Fixed Q sequence lengths. bool const fixed_q_seqlen = params.seqQLengths == nullptr; @@ -82,6 +83,10 @@ __global__ __launch_bounds__(THREADS_PER_BLOCK) void computeSeqAndPaddingOffsets // Whether to calculate cumulative packed mask rows. bool const calculate_packed_mask_row_offsets = params.packedMaskRowOffsets != nullptr; + // Compute the padding offsets for Encoder Inputs. + bool const need_encoder_padding_offsets = (params.encoderPaddingOffsets != nullptr) && calculate_kv_offsets; + [[maybe_unused]] int* smemEncoderSeqQOffsets; + // The implementation of the parallel scan in the thread block (see CUB for details). using BlockScan = cub::BlockScan; @@ -95,6 +100,11 @@ __global__ __launch_bounds__(THREADS_PER_BLOCK) void computeSeqAndPaddingOffsets BlockPrefixCallbackOp prefixMaskOp(0); BlockPrefixCallbackOp prefixKVOp(0); + if (need_encoder_padding_offsets) + { + smemEncoderSeqQOffsets = (int*) (&smemSeqQOffsets[params.batchSize + 1]); + } + // Iterate over the sequences in the batch. // // The loop index does not depend on the thread index to make sure all the threads enter the @@ -140,6 +150,10 @@ __global__ __launch_bounds__(THREADS_PER_BLOCK) void computeSeqAndPaddingOffsets if (batchIdx <= batchSizeBound) { smemSeqQOffsets[batchIdx] = seqQOffset; + if (need_encoder_padding_offsets) + { + smemEncoderSeqQOffsets[batchIdx] = seqKVOffset; + } } // Store the result. @@ -160,27 +174,35 @@ __global__ __launch_bounds__(THREADS_PER_BLOCK) void computeSeqAndPaddingOffsets __syncthreads(); } - // Compute the padding offsets. - // Block x dimension is the batch dimension, while threads iterate all tokens in the sequence. int batchIdx = blockIdx.x; - // The beginning of the sequence. - int seqBegin = smemSeqQOffsets[batchIdx]; - // The offset to the 1st element of the next sequence. - int seqEnd = smemSeqQOffsets[batchIdx + 1]; - // The length of the sequence. - int seqLength = seqEnd - seqBegin; - // The number of padded tokens in the previous sequences. - int paddingOffset = batchIdx * params.maxQSeqLength - seqBegin; - bool const need_padding_offsets = params.paddingOffsets != nullptr; - - if (need_padding_offsets) + // Compute the padding offsets. + auto compute_padding_offset = [&](int* smem_offset, int maxSeqLength, int* paddingOffsets) { + // Block x dimension is the batch dimension, while threads iterate all tokens in the sequence. + int seqBegin = smem_offset[batchIdx]; + // The offset to the 1st element of the next sequence. + int seqEnd = smem_offset[batchIdx + 1]; + // The length of the sequence. + int seqLength = seqEnd - seqBegin; + // The number of padded tokens in the previous sequences. + int paddingOffset = batchIdx * maxSeqLength - seqBegin; + // Iterate over the tokens to update the number of padded elements. for (int tokenIdx = threadIdx.x; tokenIdx < seqLength; tokenIdx += blockDim.x) { - params.paddingOffsets[seqBegin + tokenIdx] = paddingOffset; + paddingOffsets[seqBegin + tokenIdx] = paddingOffset; } + }; + + if (params.paddingOffsets != nullptr) + { + compute_padding_offset(smemSeqQOffsets, params.maxQSeqLength, params.paddingOffsets); + } + + if (need_encoder_padding_offsets) + { + compute_padding_offset(smemEncoderSeqQOffsets, params.maxEncoderQSeqLength, params.encoderPaddingOffsets); } // Each block generates the rotary embedding inv_freq tensor for the corresponding sequence. @@ -311,7 +333,10 @@ void invokeBuildDecoderInfo(BuildDecoderInfoParams const& params, cudaStream_ "Rotary embedding dim is assumed to be smaller than 512 and multiple of 2."); TLLM_CHECK_WITH_INFO( !(params.seqKVLengths == nullptr && params.rotaryEmbeddingDim > 0), "KV sequence lengths buffer is invalid."); - const size_t smem_size = (params.batchSize + 1) * sizeof(int); + bool const need_encoder_padding_offsets + = (params.encoderPaddingOffsets != nullptr) && (params.seqKVOffsets != nullptr); + const size_t smem_size + = (need_encoder_padding_offsets ? (params.batchSize + 1) * 2 : (params.batchSize + 1)) * sizeof(int); computeSeqAndPaddingOffsets <<>>(params); diff --git a/cpp/tensorrt_llm/kernels/gptKernels.h b/cpp/tensorrt_llm/kernels/gptKernels.h index 2eea65469..53441abf1 100644 --- a/cpp/tensorrt_llm/kernels/gptKernels.h +++ b/cpp/tensorrt_llm/kernels/gptKernels.h @@ -100,8 +100,12 @@ struct BuildDecoderInfoParams int* seqQOffsets; // The offsets to the 1st token in each sequence of KV buffer. Shape: [batchSize+1]. int* seqKVOffsets; - // The number of padded tokens in the corresponding padded tensor before the current token. Shape: [numTokens]. + // The number of padded tokens in the corresponding padded tensor before the current token, for Decoder. Shape: + // [numTokens]. int* paddingOffsets; + // The number of padded tokens in the corresponding padded tensor before the current token, for Encoder. Shape: + // [numTokens]. + int* encoderPaddingOffsets; // The offsets to the 1st row in each sequence of packed mask buffer. Shape: [batchSize+1]. int* packedMaskRowOffsets; @@ -120,8 +124,10 @@ struct BuildDecoderInfoParams // The number of sequences in the batch. int batchSize; - // The maximum query length of a sequence; it includes input and output. + // The maximum query length of a sequence for Decoder (max_input_length), N for ctx phase, 1 for gen phase. int maxQSeqLength; + // The maximum query length of a sequence for Encoder, for cross attention (cross_qkv_length). + int maxEncoderQSeqLength; // Whether remove the input padding or not. bool removePadding; // The kv cache capacity. @@ -164,12 +170,20 @@ struct BuildDecoderInfoParams << *(runtime::ITensor::wrap( (void*) paddingOffsets, nvinfer1::DataType::kINT32, runtime::ITensor::makeShape({batchSize}))) << std::endl; + if (encoderPaddingOffsets != nullptr) + { + ss << "encoderPaddingOffsets: " + << *(runtime::ITensor::wrap((void*) encoderPaddingOffsets, nvinfer1::DataType::kINT32, + runtime::ITensor::makeShape({batchSize}))) + << std::endl; + } ss << "attentionMask: " << static_cast(attentionMask) << std::endl; ss << "seqQLengths: " << seqQLengths << std::endl; ss << "seqKVLengths: " << seqKVLengths << std::endl; ss << "fmhaTileCounter: " << fmhaTileCounter << std::endl; ss << "batchSize: " << batchSize << std::endl; ss << "maxQSeqLength: " << maxQSeqLength << std::endl; + ss << "maxEncoderQSeqLength: " << maxEncoderQSeqLength << std::endl; ss << "removePadding: " << std::boolalpha << removePadding << std::endl; ss << "attentionWindowSize: " << attentionWindowSize << std::endl; ss << "sinkTokenLength: " << sinkTokenLength << std::endl; diff --git a/cpp/tensorrt_llm/kernels/groupGemm.cu b/cpp/tensorrt_llm/kernels/groupGemm.cu index 86656f779..b133f8e84 100644 --- a/cpp/tensorrt_llm/kernels/groupGemm.cu +++ b/cpp/tensorrt_llm/kernels/groupGemm.cu @@ -72,13 +72,12 @@ void groupedGemm_(std::vector problem_sizes, std::vect using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; - int const kAlignmentA = 8; - int const kAlignmentB = 8; + int constexpr kAlignment = 8; int problem_count = problem_sizes.size(); using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped, cutlass::gemm::GemmShape, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination::value, @@ -121,9 +120,13 @@ void groupedGemm_(std::vector problem_sizes, std::vect auto problem = problem_sizes.at(i); lda_host[i] = LayoutA::packed({problem.m(), problem.k()}).stride(0); + TLLM_CHECK(lda_host[i] % kAlignment == 0); ldb_host[i] = LayoutB::packed({problem.k(), problem.n()}).stride(0); + TLLM_CHECK(ldb_host[i] % kAlignment == 0); ldc_host[i] = LayoutC::packed({problem.m(), problem.n()}).stride(0); + TLLM_CHECK(ldc_host[i] % kAlignment == 0); ldd_host[i] = LayoutC::packed({problem.m(), problem.n()}).stride(0); + TLLM_CHECK(ldd_host[i] % kAlignment == 0); } cutlass::gemm::GemmCoord* problem_sizes_device = reinterpret_cast(gemmParamsWorkSpace); diff --git a/cpp/tensorrt_llm/kernels/lora/lora.cpp b/cpp/tensorrt_llm/kernels/lora/lora.cpp index f275cb559..25a46fec9 100644 --- a/cpp/tensorrt_llm/kernels/lora/lora.cpp +++ b/cpp/tensorrt_llm/kernels/lora/lora.cpp @@ -26,12 +26,14 @@ #include -namespace tk = tensorrt_llm::kernels; using namespace nvinfer1; using namespace tensorrt_llm::common; using tensorrt_llm::kernels::LoraImpl; using tensorrt_llm::kernels::CublasGemmWrapperPtr; +namespace tensorrt_llm::kernels +{ + // TODO should reuse the function in gemmPlugin void _getProblemParams(cublasOperation_t& transa, cublasOperation_t& transb, int& m, int& n, int& k, int& lda, int& ldb, int& ldc, bool transA, bool transB, int M, int N, int K) @@ -103,9 +105,9 @@ int64_t getLowRankWorkSpaceSize(int64_t numTokens, int64_t maxLoraModuleNum, int return divUp(numTokens * maxLoraModuleNum * maxLowRank * typeSize, 16) * 16; } -int64_t getGroupedGemmParamsWorkSpaceSize(int64_t nbReq) +int64_t getGemmParamsWorkSpaceSize(int64_t nbReq) { - return std::max(tk::getSplitkGroupedGemmParamsWorkSpaceSize(nbReq), tk::getGroupedGemmParamsWorkSpaceSize(nbReq)); + return std::max(getSplitkGroupedGemmParamsWorkSpaceSize(nbReq), getGroupedGemmParamsWorkSpaceSize(nbReq)); } int64_t getSplitkGroupedGemmWorkSpaceSize( @@ -129,7 +131,7 @@ size_t LoraImpl::getWorkspaceSize( return (size_t) getGemmWorkSpaceSize(numTokens, mNumLoraModules, mMaxLowRank, mSplitKSlices) + getLowRankWorkSpaceSize(numTokens, mNumLoraModules, mMaxLowRank, typeSize) - + getGroupedGemmParamsWorkSpaceSize(numReqs * mNumLoraModules); + + getGemmParamsWorkSpaceSize(numReqs * mNumLoraModules); } void LoraImpl::setBestTactic(std::optional config) @@ -160,7 +162,7 @@ int LoraImpl::run(int64_t numTokens, int64_t numReqs, void const* input, int32_t setGemmConfig(); int64_t GemmWorkSpaceSize = getGemmWorkSpaceSize(numTokens, mNumLoraModules, mMaxLowRank, mSplitKSlices); - int64_t groupGemmParamsWorkSpaceSize = getGroupedGemmParamsWorkSpaceSize(numReqs * mNumLoraModules); + int64_t groupGemmParamsWorkSpaceSize = getGemmParamsWorkSpaceSize(numReqs * mNumLoraModules); void* gemmWorkSpace = workspace; // [gemmWorkSpace, lowrankWorkSpace, groupGemmParamsWorkSpace] void* lowRankWorkSpace = static_cast(gemmWorkSpace) + GemmWorkSpaceSize; void* groupGemmParamsWorkSpace = static_cast(lowRankWorkSpace) @@ -321,11 +323,11 @@ int LoraImpl::run(int64_t numTokens, int64_t numReqs, void const* input, int32_t TLLM_CHECK_WITH_INFO(mTransA == false && mTransB == true, fmtstr("Invalid transA (%d) transB (%d). transA must be false, transB must be true", int(mTransA), int(mTransB))); - tk::splitkGroupedGemm(problem_sizes, ptrA, ptrB, ptrC, ptrD, groupGemmParamsWorkSpace, + splitkGroupedGemm(problem_sizes, ptrA, ptrB, ptrC, ptrD, groupGemmParamsWorkSpace, groupGemmParamsWorkSpaceSize, gemmWorkSpace, GemmWorkSpaceSize, splitkBufferOffsets, true, mType, mSplitKSlices, stream); sync_check_cuda_error(); - tk::groupedGemm(problem_sizes_2, ptrA_2, ptrB_2, ptrC_2, ptrD_2, groupGemmParamsWorkSpace, + groupedGemm(problem_sizes_2, ptrA_2, ptrB_2, ptrC_2, ptrD_2, groupGemmParamsWorkSpace, groupGemmParamsWorkSpaceSize, gemmWorkSpace, GemmWorkSpaceSize, false, mType, stream); sync_check_cuda_error(); } @@ -333,3 +335,5 @@ int LoraImpl::run(int64_t numTokens, int64_t numReqs, void const* input, int32_t return 0; } + +} // namespace tensorrt_llm::kernels diff --git a/cpp/tensorrt_llm/kernels/mixtureOfExperts/moe_kernels.cu b/cpp/tensorrt_llm/kernels/mixtureOfExperts/moe_kernels.cu index a7298820e..c3fe5e475 100644 --- a/cpp/tensorrt_llm/kernels/mixtureOfExperts/moe_kernels.cu +++ b/cpp/tensorrt_llm/kernels/mixtureOfExperts/moe_kernels.cu @@ -1746,16 +1746,27 @@ T const* CutlassMoeFCRunner::l auto fc1_lora_impl = lora_params.fc1_lora_impl; int num_reqs = lora_params.num_reqs; - int64_t num_tokens_handled = 0; - T* lora_gated_out = lora_fc1_result_ + expanded_num_rows * inter_size; + T *lora_gated_out = nullptr, *lora_fc1_result = nullptr; + + if (is_gated_activation) + { + lora_gated_out = lora_fc1_result_; + lora_fc1_result = lora_fc1_result_ + expanded_num_rows * inter_size; + } + else + { + lora_fc1_result = lora_fc1_result_; + } + void* lora_workspace = lora_params.workspace; + int64_t num_tokens_handled = 0; // TODO: Remove the weightIndex parameter from the 'loraImpl->run' function and consolidate it into a single // 'groupGEMM' operation. for (int expert_id = 0; expert_id < num_experts_per_node; expert_id += 1) { int64_t expert_num_rows = host_expert_first_token_offset[expert_id + 1] - num_tokens_handled; - void* tmp_lora_fc_result = static_cast(lora_fc1_result_ + num_tokens_handled * inter_size); + void* tmp_lora_fc_result = static_cast(lora_fc1_result + num_tokens_handled * inter_size); fc1_lora_impl->run(expert_num_rows, num_reqs, permuted_data_ + num_tokens_handled * hidden_size, &host_permuted_fc1_lora_ranks[num_tokens_handled], &host_permuted_fc1_weight_ptrs[num_tokens_handled * 2], expert_id + start_expert, &tmp_lora_fc_result, lora_workspace, stream); diff --git a/cpp/tensorrt_llm/kernels/splitkGroupGemm.cu b/cpp/tensorrt_llm/kernels/splitkGroupGemm.cu index b4c49453f..d4bf74a01 100644 --- a/cpp/tensorrt_llm/kernels/splitkGroupGemm.cu +++ b/cpp/tensorrt_llm/kernels/splitkGroupGemm.cu @@ -81,13 +81,12 @@ void splitkGroupedGemm_(std::vector problem_sizes, std using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; - int const kAlignmentA = 8; - int const kAlignmentB = 8; + int constexpr kAlignment = 8; int problem_count = problem_sizes.size(); using GemmKernel = typename cutlass::gemm::kernel::DefaultSplitkGemmGrouped, cutlass::gemm::GemmShape, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination::value, @@ -141,9 +140,13 @@ void splitkGroupedGemm_(std::vector problem_sizes, std auto problem = problem_sizes.at(i); lda_host[i] = LayoutA::packed({problem.m(), problem.k()}).stride(0); + TLLM_CHECK(lda_host[i] % kAlignment == 0); ldb_host[i] = LayoutB::packed({problem.k(), problem.n()}).stride(0); + TLLM_CHECK(ldb_host[i] % kAlignment == 0); ldc_host[i] = LayoutC::packed({problem.m(), problem.n()}).stride(0); + TLLM_CHECK(ldc_host[i] % kAlignment == 0); ldd_host[i] = LayoutC::packed({problem.m(), problem.n()}).stride(0); + TLLM_CHECK(ldd_host[i] % kAlignment == 0); offset_host[i] = cumulative_offsets; cumulative_offsets += problem.m() * problem.n(); diff --git a/cpp/tensorrt_llm/kernels/unfusedAttentionKernels.cu b/cpp/tensorrt_llm/kernels/unfusedAttentionKernels.cu index 63394a78c..7eafc17a5 100644 --- a/cpp/tensorrt_llm/kernels/unfusedAttentionKernels.cu +++ b/cpp/tensorrt_llm/kernels/unfusedAttentionKernels.cu @@ -1065,8 +1065,10 @@ __global__ void transpose_remove_padding(T const* src, T* dst, int const batch_s // do remove_sequence_length_padding int const bid = blockIdx.x; // batch * seq_len or valid_word_num - int const src_batch_id = (bid + mask_offset[bid]) / seq_len; - int const src_seq_id = (bid + mask_offset[bid]) % seq_len; + int const mask_offset_value = (mask_offset == nullptr) ? 0 : mask_offset[bid]; + + int const src_batch_id = (bid + mask_offset_value) / seq_len; + int const src_seq_id = (bid + mask_offset_value) % seq_len; int const dst_seq_id = bid; @@ -1166,7 +1168,7 @@ __global__ void add_fusedQKV_bias_transpose_kernel(T* q_buf, T* k_buf, T* v_buf, // k_buf, v_buf: [batch, kv_head_num, seq_len, size_per_head] // For cross attention where q/k/v buffer could be nullptr, writing to split buffer is suppressed when null T* qkv_ptr[3] = {q_buf, k_buf, v_buf}; - bool const has_padding = padding_offset == nullptr; + bool const has_padding = padding_offset != nullptr; int const hidden = head_num * size_per_head; // hidden dim Q int const n = hidden + 2 * kv_head_num * size_per_head; @@ -1175,11 +1177,11 @@ __global__ void add_fusedQKV_bias_transpose_kernel(T* q_buf, T* k_buf, T* v_buf, int const bias_id = index % n; int const token_idx = index / n; - int const token_padded_idx = token_idx + (has_padding ? 0 : padding_offset[token_idx]); + int const token_padded_idx = token_idx + (has_padding ? padding_offset[token_idx] : 0); int const target_batch_id = token_padded_idx / seq_len; int const actual_seq_len = seq_lens[target_batch_id]; int const seq_id = token_padded_idx % seq_len; - bool const valid_seq = seq_id < actual_seq_len || !has_padding; + bool const valid_seq = seq_id < actual_seq_len || has_padding; int qkv_id; int head_id; @@ -1319,12 +1321,12 @@ __global__ void add_fusedQKV_bias_transpose_kernel(T* q_buf, T* k_buf, T* v_buf, int const token_idx = blockIdx.x; int const token_padding_offset = (padding_offset == nullptr || token_idx < 0) ? 0 : padding_offset[token_idx]; int const tgt_token_idx = token_idx + token_padding_offset; - bool const has_padding = padding_offset == nullptr; + bool const has_padding = padding_offset != nullptr; int const batch_idx = tgt_token_idx / seq_len; int const seq_idx = tgt_token_idx % seq_len; int const actual_seq_len = seq_lens[batch_idx]; - bool const valid_seq = seq_idx < actual_seq_len || !has_padding; + bool const valid_seq = seq_idx < actual_seq_len || has_padding; int const head_idx = blockIdx.y; int const tidx = threadIdx.x; diff --git a/cpp/tensorrt_llm/layers/decodingLayer.cpp b/cpp/tensorrt_llm/layers/decodingLayer.cpp index a33cdd627..505150db8 100644 --- a/cpp/tensorrt_llm/layers/decodingLayer.cpp +++ b/cpp/tensorrt_llm/layers/decodingLayer.cpp @@ -20,6 +20,7 @@ #include "tensorrt_llm/layers/decodingParams.h" #include "tensorrt_llm/layers/explicitDraftTokensLayer.h" #include "tensorrt_llm/layers/layerUtils.h" +#include "tensorrt_llm/layers/lookaheadDecodingLayer.h" #include "tensorrt_llm/layers/medusaDecodingLayer.h" #include "tensorrt_llm/layers/samplingLayer.h" @@ -66,6 +67,7 @@ bool hasDiffRuntimeArgs(std::shared_ptr DecodingLayer::DecodingLayer(executor::DecodingMode const& mode, DecoderDomain const& decoderDomain, std::shared_ptr bufferManager) @@ -88,8 +90,7 @@ DecodingLayer::DecodingLayer(executor::DecodingMode const& mode, DecoderDomai } else if (mDecodingMode.isLookahead()) { - // TODO(nkorobov) add lookahead layer - TLLM_LOG_WARNING("Lookahead decoding is not supported yet."); + mDecodingLayer = std::make_unique>(mDecoderDomain, mBufferManager); } else if (mDecodingMode.isExplicitDraftTokens()) { @@ -134,7 +135,7 @@ void DecodingLayer::setup(SizeType32 batchSize, SizeType32 beamWidth, BufferC else if (mDecodingMode.isLookahead()) { TLLM_CHECK_WITH_INFO(beamWidth == 1, "Decoding mode is Lookahead, but beamWidth != 1 (%d != 1)", beamWidth); - // TODO(nkorobov) add lookahead layer + mDecodingLayer->setup(batchSize, beamWidth, batchSlots, setupParams->decodingParams); } else if (mDecodingMode.isExplicitDraftTokens()) { @@ -235,7 +236,8 @@ std::tuple, std::shared_ptr decodingParams; }; -class LookaheadSetupParams : public DecodingSetupParams +struct LookaheadSetupParams : public DecodingSetupParams { -public: + using TensorPtr = runtime::ITensor::SharedPtr; + std::vector prompt; // [batchSize][maxSeqLen] on cpu - std::optional> randomSeed; // [1] or [batchSize] on cpu std::vector algoConfigs; // [1 or batchSize] on cpu + + //! see LookaheadDecodingOutputs::generationLengths + TensorPtr generationLengths; + //! see LookaheadDecodingOutputs::positionOffsets + TensorPtr positionOffsets; + //! see LookaheadDecodingOutputs::attentionPackedMasks + TensorPtr attentionPackedMasks; + //! see LookaheadDecodingOutputs::actualGenerationLengths + TensorPtr actualGenerationLengths; }; class BaseDecodingInputs @@ -396,17 +405,11 @@ class ExplicitDraftTokensInputs : public DecodingInputs class LookaheadDecodingInputs : public DecodingInputs { - using TensorConstPtr = runtime::ITensor::SharedConstPtr; - public: - explicit LookaheadDecodingInputs(TensorPtr endIds, TensorConstPtr batchSlots) + explicit LookaheadDecodingInputs(TensorConstPtr endIds, TensorConstPtr batchSlots) : DecodingInputs{std::move(endIds), std::move(batchSlots)} - //, logits{logits} { } - // TODO(liweim) reuse base logits and curTokensPerStep. - // TensorConstPtr logits; // [batchSize, maxTokensPerStep, vocabSizePadded] on gpu - // TensorConstPtr tokensPerStep; // [maxBatchSize] on gpu }; class BaseDecodingOutputs @@ -527,6 +530,33 @@ class SpeculativeDecodingOutputs : public BaseDecodingOutputs TensorPtr packedMasks; }; +class LookaheadDecodingOutputs : public SpeculativeDecodingOutputs +{ + using TensorPtr = runtime::ITensor::SharedPtr; + +public: + explicit LookaheadDecodingOutputs(TensorPtr outputIds) + : SpeculativeDecodingOutputs{std::move(outputIds)} + { + } + + //! for TLLM engine input "spec_decoding_generation_lengths", indicating how many tokens to be generated. + //! currently, the 1st step of generation is 1, set at `setup`, others are maxDecodingTokens, set at `forward`. + //! [maxBatchSize] + TensorPtr generationLengths; + //! for TLLM engine input "spec_decoding_position_offsets", + //! indicating each token position offset base on the last golden token = 0. + //! ABCefgxyz--- // sequence tokens, ABCD: golden; efg, xyz: draft; ---: padding. + //! ***<0>123123--- // positionOffsets. + //! 012<3>456456--- // positionIds. + //! [maxBatchSize, maxDecodingTokens] + TensorPtr positionOffsets; + //! [maxBatchSize, maxDecodingTokens] + TensorPtr positionIds; + //! The actual decoding tokens length, for debug and for future. + TensorPtr actualGenerationLengths; +}; + class ExplicitDraftTokensOutputs : public SpeculativeDecodingOutputs { public: diff --git a/cpp/tensorrt_llm/layers/lookaheadAlgorithm.cpp b/cpp/tensorrt_llm/layers/lookaheadAlgorithm.cpp index 7392c4e8d..5b3062be0 100644 --- a/cpp/tensorrt_llm/layers/lookaheadAlgorithm.cpp +++ b/cpp/tensorrt_llm/layers/lookaheadAlgorithm.cpp @@ -64,9 +64,6 @@ void LookaheadAlgorithm::setup(TensorConstPtr const& prompt, SizeType32 w, SizeT std::copy(std::prev(promptRange.end(), mN - 1), promptRange.end(), goldRange.begin()); mGuessTokens = ITensor::slice(mGuessTokensMax, 0, 0); mFilling = (mN - 1) > 0 ? 1 : 0; - PRINT_TOKENS(prompt); - PRINT_TOKENS(mPrefills); - PRINT_TOKENS(mPastTokens); TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); } @@ -271,10 +268,10 @@ void LookaheadAlgorithm::verify(TensorPtr const& accepted, TensorPtr const& acce BufferRange acceptedOffsetsRange(*acceptedOffsets); auto lookSize = 1 + mN - 2 - mFilling + mFilling * mW; - acceptedOffsetsRange[0] = 0; + // acceptedOffsetsRange[0] = 0; for (SizeType32 i = 0; i < maxHit; i++) { - acceptedOffsetsRange[1 + i] = lookSize + hitIdx * (mN - 1) + i; + acceptedOffsetsRange[i] = lookSize + hitIdx * (mN - 1) + i - 1; } *BufferRange(*acceptedLength).begin() = maxHit + 1; diff --git a/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.cpp b/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.cpp index 87098f88b..18697e6c8 100644 --- a/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.cpp +++ b/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.cpp @@ -16,6 +16,7 @@ #include "lookaheadDecodingLayer.h" #include "tensorrt_llm/common/assert.h" +#include "tensorrt_llm/common/cudaUtils.h" #include "tensorrt_llm/common/logger.h" #include "tensorrt_llm/common/memoryUtils.h" #include "tensorrt_llm/executor/executor.h" @@ -32,6 +33,7 @@ #include #include #include +#include #include namespace tensorrt_llm::layers @@ -44,19 +46,28 @@ using namespace tensorrt_llm::runtime; template LookaheadDecodingLayer::CpuAlgorithmResources::CpuAlgorithmResources(DecoderDomain const& decoderDomain) { - auto maxBatchSize = decoderDomain.getBatchSize(); + auto const maxBatchSize = decoderDomain.getBatchSize(); + auto const beamWidth = decoderDomain.getBeamWidth(); + auto const decodingTokens = decoderDomain.getMaxDecodingTokens(); auto lookaheadModule = std::dynamic_pointer_cast(decoderDomain.getSpeculativeDecodingModule()); auto const [maxW, maxN, maxG] = lookaheadModule->getExecutionConfig().get(); + SizeType32 maxTokensPerStep, maxNumNewTokens, maxDraftLen, maxAcceptedDraftLen; + std::tie(maxTokensPerStep, maxNumNewTokens, maxDraftLen, maxAcceptedDraftLen) + = executor::LookaheadDecodingConfig(maxW, maxN, maxG).calculateSpeculativeResource(); + TLLM_CHECK_WITH_INFO(beamWidth == 1, "Lookahead requires beam width = 1"); + TLLM_CHECK_WITH_INFO(maxTokensPerStep == decodingTokens, "%d != %d", maxTokensPerStep, decodingTokens); for (SizeType32 id = 0; id < maxBatchSize; id++) { mAlgos.emplace_back(maxW, maxN, maxG, id); } - SizeType32 maxTokensPerStep, maxNumNewTokens, maxDraftLen; - std::tie(maxTokensPerStep, maxNumNewTokens, maxDraftLen, std::ignore) - = executor::LookaheadDecodingConfig(maxW, maxN, maxG).calculateSpeculativeResource(); + mPrompts.reserve(maxBatchSize); + for (auto bi = 0; bi < maxBatchSize; bi++) + { + mPrompts.emplace_back(BufferManager::cpu(ITensor::makeShape({0}), nvinfer1::DataType::kINT32)); + } auto const maxBatchShape1D = ITensor::makeShape({maxBatchSize}); mBatchSlots = BufferManager::cpu(maxBatchShape1D, nvinfer1::DataType::kINT32); @@ -66,14 +77,22 @@ LookaheadDecodingLayer::CpuAlgorithmResources::CpuAlgorithmResources(DecoderD mEndIds = BufferManager::cpu(maxBatchShape1D, nvinfer1::DataType::kINT32); mOutputIds = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxNumNewTokens}), nvinfer1::DataType::kINT32); - mPathsOffsets = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxNumNewTokens}), nvinfer1::DataType::kINT32); + mNewTokens = BufferManager::cpu( + ITensor::makeShape({maxTokensPerStep, maxBatchSize, beamWidth}), nvinfer1::DataType::kINT32); + mPathsOffsets + = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxAcceptedDraftLen}), nvinfer1::DataType::kINT32); mNumNewTokens = BufferManager::cpu(maxBatchShape1D, nvinfer1::DataType::kINT32); mNumNewTokensCumSum = BufferManager::cpu(ITensor::makeShape({maxBatchSize + 1}), nvinfer1::DataType::kINT32); mNextDraftTokens = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxDraftLen}), nvinfer1::DataType::kINT32); mNextDraftPosIds = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxDraftLen}), nvinfer1::DataType::kINT32); - auto divUp32 = [](SizeType32 x) { return x / 32 + ((x % 32) ? 1 : 0); }; - mPackedMasks = BufferManager::cpu( - ITensor::makeShape({maxBatchSize, maxTokensPerStep, divUp32(maxTokensPerStep)}), nvinfer1::DataType::kINT32); + mGenerationLengths = BufferManager::cpu(maxBatchShape1D, nvinfer1::DataType::kINT32); + mGenerationLengthsMax = BufferManager::cpu(maxBatchShape1D, nvinfer1::DataType::kINT32); + mPositionOffsets + = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxTokensPerStep}), nvinfer1::DataType::kINT32); + mPositionIds = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxTokensPerStep}), nvinfer1::DataType::kINT32); + mPackedMask = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxTokensPerStep, + static_cast(divUp(maxTokensPerStep, 32))}), + nvinfer1::DataType::kINT32); mSamplingMask = BufferManager::cpu(ITensor::makeShape({maxBatchSize, maxDraftLen}), nvinfer1::DataType::kBOOL); mNextDraftLengths = BufferManager::cpu(maxBatchShape1D, nvinfer1::DataType::kINT32); mSequenceLengths = BufferManager::cpu(maxBatchShape1D, nvinfer1::DataType::kINT32); @@ -87,6 +106,10 @@ LookaheadDecodingLayer::LookaheadDecodingLayer( { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + auto lookaheadModule + = std::dynamic_pointer_cast(decoderDomain.getSpeculativeDecodingModule()); + auto const [maxW, maxN, maxG] = lookaheadModule->getExecutionConfig().get(); + auto const maxBatchSize = mDecoderDomain.getBatchSize(); auto const maxTokensPerStep = mDecoderDomain.getMaxDecodingTokens(); auto const vocabSizePadded = mDecoderDomain.getVocabSizePadded(); @@ -97,7 +120,6 @@ LookaheadDecodingLayer::LookaheadDecodingLayer( auto workspaceSize = getTopKWorkspaceSize(maxBatchSize, maxTokensPerStep, maxTopK, vocabSizePadded); mSamplingWorkspaceDevice = mBufferManager->gpu(ITensor::makeShape({static_cast(workspaceSize)}), nvinfer1::DataType::kINT8); - TLLM_LOG_DEBUG("workspaceSize=%d", getWorkspaceSize()); mTargetTokensDevice = mBufferManager->gpu(maxBatchShape2D, nvinfer1::DataType::kINT32); mRandomSeedsDevice = mBufferManager->gpu(maxBatchShape1D, nvinfer1::DataType::kINT64); mSamplingMaskDevice = mBufferManager->gpu(maxBatchShape2D, nvinfer1::DataType::kBOOL); @@ -119,13 +141,23 @@ void LookaheadDecodingLayer::setup(SizeType32 batchSize, SizeType32 beamWidth auto& algoConfigs = setupParams->algoConfigs; TLLM_CHECK_WITH_INFO(algoConfigs.size() == 1 || algoConfigs.size() == batchSize, "Lookahead runtime configuration size should be either 1 or batchSize"); + + for (auto bi = 0; bi < batchSize; bi++) + { + PRINT_SHAPE(setupParams->prompt[bi]); + PRINT_TOKENS(setupParams->prompt[bi]); + mCpuAlgo->mPrompts[bi]->reshape(setupParams->prompt[bi]->getShape()); + mBufferManager->copy(*setupParams->prompt[bi], *mCpuAlgo->mPrompts[bi]); + } + + mBufferManager->getStream().synchronize(); // sync prompt gpu to cpu + auto const batchSlotsRange = BufferRange(*batchSlots); for (SizeType32 bi = 0; bi < batchSize; bi++) { auto const gbi = batchSlotsRange[bi]; SizeType32 bi1orN = (algoConfigs.size() == 1) ? 0 : bi; - TLLM_LOG_DEBUG("CPU ALGO [ %d ] setup", gbi); - PRINT_TOKENS(setupParams->prompt[bi]); + TLLM_LOG_DEBUG("CPU ALGO [ %d ] setup prompt %s", gbi, D(mCpuAlgo->mPrompts[bi]).values().c_str()); auto [w, n, g] = algoConfigs[bi1orN].get(); SizeType32 runtimeTokensPerStep; std::tie(runtimeTokensPerStep, std::ignore, std::ignore, std::ignore) @@ -133,8 +165,42 @@ void LookaheadDecodingLayer::setup(SizeType32 batchSize, SizeType32 beamWidth TLLM_CHECK_WITH_INFO(runtimeTokensPerStep <= mDecoderDomain.getMaxDecodingTokens(), "runtime w(%d) n(%d) g(%d) exceeds maxTokensPerStep(%d)", w, n, g, mDecoderDomain.getMaxDecodingTokens()); - mCpuAlgo->mAlgos[gbi].setup(setupParams->prompt[bi], w, n, g); + PRINT_VALUES(mCpuAlgo->mPrompts[bi]); + mCpuAlgo->mAlgos[gbi].setup(mCpuAlgo->mPrompts[bi], w, n, g); } + + for (runtime::SizeType32 bi = 0; bi < batchSize; bi++) + { + SizeType32 gbi = batchSlotsRange[bi]; + (BufferRange(*mCpuAlgo->mGenerationLengths))[gbi] = 1; + BufferLocation(*mCpuAlgo->mPositionOffsets).at(gbi, 0) = 0; + BufferRange packedMaskRange(*ITensor::at(mCpuAlgo->mPackedMask, {gbi})); + for (auto& mask : packedMaskRange) + { + mask = 0; + } + packedMaskRange[0] = 1; + + PRINT_SHAPE(mCpuAlgo->mGenerationLengths); + PRINT_SHAPE(setupParams->generationLengths); + PRINT_SHAPE(mCpuAlgo->mPositionOffsets); + PRINT_SHAPE(setupParams->positionOffsets); + PRINT_SHAPE(mCpuAlgo->mPackedMask); + PRINT_SHAPE(setupParams->attentionPackedMasks); + mBufferManager->copy( + *ITensor::at(mCpuAlgo->mGenerationLengths, {gbi}), *ITensor::at(setupParams->generationLengths, {gbi})); + if (setupParams->actualGenerationLengths) + { + mBufferManager->copy(*ITensor::at(mCpuAlgo->mGenerationLengths, {gbi}), + *ITensor::at(setupParams->actualGenerationLengths, {gbi})); + } + mBufferManager->copy( + *ITensor::at(mCpuAlgo->mPositionOffsets, {gbi}), *ITensor::at(setupParams->positionOffsets, {gbi})); + mBufferManager->copy( + *ITensor::at(mCpuAlgo->mPackedMask, {gbi}), *ITensor::at(setupParams->attentionPackedMasks, {gbi})); + } + + mBufferManager->getStream().synchronize(); // sync outputs cpu to gpu } auto curandStatesDevicePtr = reinterpret_cast(bufferCast(*mCurandStatesDevice)); @@ -171,22 +237,14 @@ void LookaheadDecodingLayer::forwardAsync( { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); auto inputs = std::dynamic_pointer_cast(inputParams); - auto outputs = std::dynamic_pointer_cast(outputParams); + auto outputs = std::dynamic_pointer_cast(outputParams); auto batchSize = inputs->localBatchSize; TLLM_CHECK_WITH_INFO(inputs->batchSlots, "Batch slots must be provided for LookaheadDecoding"); TLLM_CHECK_WITH_INFO(inputs->curTokensPerStep, "curTokensPerStep must be provided for LookaheadDecoding"); TLLM_CHECK_WITH_INFO(outputs->sequenceLength, "sequenceLength must be provided for LookaheadDecoding"); - // TODO(liweim) to be confirmed. - TLLM_CHECK(inputs->logits); - - mBufferManager->copy( - bufferCast(*inputs->batchSlots), *mCpuAlgo->mBatchSlots, runtime::MemoryType::kGPU); - mBufferManager->copy(bufferCast(*inputs->curTokensPerStep.value()), *mCpuAlgo->mTokensPerStep, - runtime::MemoryType::kGPU); - mBufferManager->copy(bufferCast(*inputs->endIds), *mCpuAlgo->mEndIds, runtime::MemoryType::kGPU); - mBufferManager->copy(bufferCast(*outputs->sequenceLength.value()), *mCpuAlgo->mSequenceLengths, - runtime::MemoryType::kGPU); + TLLM_CHECK_WITH_INFO(inputs->logits, "logits must be provided for lookaheadDecoding"); + TLLM_CHECK_WITH_INFO(inputs->localBatchSize > 0, "batchSize must be"); TopKSamplingKernelParams params; params.maxBatchSize = mDecoderDomain.getBatchSize(); @@ -197,7 +255,6 @@ void LookaheadDecodingLayer::forwardAsync( params.maxSeqLen = mDecoderDomain.getMaxDecodingTokens(); params.vocabSizePadded = mDecoderDomain.getVocabSizePadded(); params.batchSlots = bufferCast(*inputs->batchSlots); - TLLM_LOG_DEBUG("batchSize = %d", batchSize); params.logProbs = bufferCastOrNull(inputs->logits); params.outputIds = bufferCast(*mTargetTokensDevice); params.workspace = bufferCast(*mSamplingWorkspaceDevice); @@ -215,19 +272,13 @@ void LookaheadDecodingLayer::forwardAsync( // Finished state is not set. invokeBatchTopKSampling(params, getStream()); - mBufferManager->copy(*mTargetTokensDevice, *mCpuAlgo->mTargetTokens); - - TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); -} - -template -void LookaheadDecodingLayer::forwardSync( - std::shared_ptr const& outputParams, std::shared_ptr const& inputParams) -{ if (mCpuAlgo) { - forwardSyncCPU(outputParams, inputParams); + forwardSyncCPU(outputs, inputs); + mGlobalSteps += 1; } + + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); } template @@ -275,33 +326,55 @@ void LookaheadDecodingLayer::posIdsToMask(TensorPtr mask, TensorConstPtr posI template void LookaheadDecodingLayer::forwardSyncCPU( - std::shared_ptr const& outputParams, std::shared_ptr const& inputParams) + std::shared_ptr const& outputs, std::shared_ptr const& inputs) { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); - auto inputs = std::dynamic_pointer_cast(inputParams); - auto outputs = std::dynamic_pointer_cast(outputParams); + + mCpuAlgo->mBatchSlots->reshape(inputs->batchSlots->getShape()); + mBufferManager->copy(*inputs->batchSlots, *mCpuAlgo->mBatchSlots); + mBufferManager->copy(*inputs->curTokensPerStep.value(), *mCpuAlgo->mTokensPerStep); + mBufferManager->copy(*inputs->curTokensPerStep.value(), *mCpuAlgo->mTokensPerStep); + mBufferManager->copy(*inputs->endIds, *mCpuAlgo->mEndIds); + mBufferManager->copy(*outputs->sequenceLength.value(), *mCpuAlgo->mSequenceLengths); + + mBufferManager->copy(*mTargetTokensDevice, *mCpuAlgo->mTargetTokens); + + mBufferManager->getStream().synchronize(); + auto const batchSize = inputs->localBatchSize; + auto const beamIndex = 0; BufferRange tokensPerStepRange(*mCpuAlgo->mTokensPerStep); + BufferRange endIdsRange(*mCpuAlgo->mEndIds); + BufferLocation newTokensLocation(*mCpuAlgo->mNewTokens); BufferRange numNewTokensRange(*mCpuAlgo->mNumNewTokens); BufferRange numNewTokensCumSumRange(*mCpuAlgo->mNumNewTokensCumSum); BufferRange batchSlotsRange(*mCpuAlgo->mBatchSlots); + BufferRange generationLengthsRange(*mCpuAlgo->mGenerationLengths); + BufferRange generationLengthsMaxRange(*mCpuAlgo->mGenerationLengthsMax); BufferRange nextDraftLengthsRange(*mCpuAlgo->mNextDraftLengths); BufferRange sequenceLengthsRange(*mCpuAlgo->mSequenceLengths); + BufferLocation pathsOffsetLocation(*mCpuAlgo->mPathsOffsets); + BufferLocation outputIdsLocation(*mCpuAlgo->mOutputIds); + + mBufferManager->setZero(*mCpuAlgo->mPathsOffsets); + mBufferManager->setZero(*mCpuAlgo->mNumNewTokens); + mBufferManager->setZero(*mCpuAlgo->mNumNewTokensCumSum); for (SizeType32 bi = 0; bi < batchSize; bi++) { SizeType32 gbi = batchSlotsRange[bi]; LookaheadAlgorithm& theAlgo(mCpuAlgo->mAlgos[gbi]); - SizeType32 const tokensPerStep = tokensPerStepRange[gbi]; + SizeType32 const tokensPerStep = generationLengthsRange[gbi]; TensorPtr sampledTokens = ITensor::slice(mCpuAlgo->mTargetTokens, {gbi, 0}, tokensPerStep); + PRINT_VALUES(sampledTokens); if (tokensPerStep == 1) - { // The first step in generation phase has no draft tokens. + { + // The first step in generation phase has no draft tokens. theAlgo.accept(sampledTokens); mBufferManager->copy(*sampledTokens, *ITensor::slice(mCpuAlgo->mOutputIds, {gbi, 0}, tokensPerStep)); - BufferLocation(*mCpuAlgo->mPathsOffsets).at(gbi, 0) = 0; numNewTokensRange[gbi] = tokensPerStep; BufferLocation(*mCpuAlgo->mNextDraftLengths).at(gbi) = 0; } @@ -318,7 +391,7 @@ void LookaheadDecodingLayer::forwardSyncCPU( auto maxNumNewTokens = mCpuAlgo->mOutputIds->getShape().d[1]; mBufferManager->copy(*ITensor::at(mCpuAlgo->mOutputIds, {gbi}), - *ITensor::slice(outputs->outputIds, {gbi, sequenceLengthsRange[gbi]}, maxNumNewTokens)); + *ITensor::slice(outputs->outputIds, {gbi, 0, sequenceLengthsRange[gbi]}, maxNumNewTokens)); sequenceLengthsRange[gbi] += numNewTokensRange[gbi]; @@ -330,38 +403,83 @@ void LookaheadDecodingLayer::forwardSyncCPU( ITensor::at(mCpuAlgo->mSequenceLengths, {gbi}), // ITensor::at(mCpuAlgo->mOutputIds, {gbi, numNewTokensRange[gbi] - 1})); - posIdsToMask( // - ITensor::at(mCpuAlgo->mPackedMasks, {gbi}), // + BufferLocation posIdsLocation(*ITensor::at(mCpuAlgo->mPositionIds, {gbi})); + for (auto& posid : posIdsLocation) + { + posid = sequenceLengthsRange[gbi] - 1; + } + mBufferManager->copy(*ITensor::slice(mCpuAlgo->mNextDraftPosIds, {gbi, 0}, nextDraftLengthsRange[gbi]), + *ITensor::slice(mCpuAlgo->mPositionIds, {gbi, 1}, nextDraftLengthsRange[gbi])); + + posIdsToMask( // + ITensor::at(mCpuAlgo->mPackedMask, {gbi}), // ITensor::slice(mCpuAlgo->mNextDraftPosIds, {gbi, 0}, nextDraftLengthsRange[gbi])); + + BufferRange offsetRange(*ITensor::at(mCpuAlgo->mPositionOffsets, {gbi})); + TLLM_CHECK_WITH_INFO( + posIdsLocation.size() == offsetRange.size(), "%ld, %ld", posIdsLocation.size(), offsetRange.size()); + for (auto i = 0; i < posIdsLocation.size(); i++) + { + offsetRange[i] = posIdsLocation[i] - posIdsLocation[0]; + } + TensorPtr accepted = ITensor::slice(mCpuAlgo->mOutputIds, {gbi, 0}, numNewTokensRange[gbi]); + TensorPtr draft = ITensor::slice(mCpuAlgo->mNextDraftTokens, {gbi, 0}, nextDraftLengthsRange[gbi]); + + TLLM_LOG_DEBUG("CPU ALGO [ %d ] forward, %s", gbi, D(sampledTokens).values().c_str()); + TLLM_LOG_DEBUG("[%d][%d] CPU ALGO [ %d ] forward, %s, %s", mGlobalSteps, batchSize, gbi, + D(accepted).values().c_str(), D(draft).values().c_str()); } numNewTokensCumSumRange[0] = 0; - for (SizeType32 i = 0; i < numNewTokensRange.size(); i++) + SizeType32 pi = 0; + for (SizeType32 bi = 0; bi < numNewTokensRange.size(); bi++) + { + SizeType32 acceptedDraftLen = numNewTokensRange[bi] <= 1 ? 0 : (numNewTokensRange[bi] - 1); + numNewTokensCumSumRange[bi + 1] = numNewTokensCumSumRange[bi] + acceptedDraftLen; + for (SizeType32 tj = 0; tj < acceptedDraftLen; tj++) + { + pathsOffsetLocation[pi++] = pathsOffsetLocation.at(bi, tj); + } + } + for (; pi < pathsOffsetLocation.size(); pi++) { - numNewTokensCumSumRange[i + 1] = numNewTokensCumSumRange[i] + numNewTokensRange[i]; + pathsOffsetLocation[pi++] = 0; } TLLM_CHECK(outputs->numNewTokens); - mBufferManager->copy(*mCpuAlgo->mSequenceLengths, // - const_cast(outputs->sequenceLength.value()->data()), runtime::MemoryType::kGPU); - mBufferManager->copy(*mCpuAlgo->mPathsOffsets, // - const_cast(outputs->pathsOffsets->data()), runtime::MemoryType::kGPU); - mBufferManager->copy(*mCpuAlgo->mNumNewTokens, // - const_cast(outputs->numNewTokens.value()->data()), runtime::MemoryType::kGPU); - mBufferManager->copy(*mCpuAlgo->mNumNewTokensCumSum, // - const_cast(outputs->numNewTokensCumSum->data()), runtime::MemoryType::kGPU); - mBufferManager->copy(*mCpuAlgo->mNextDraftTokens, // - const_cast(outputs->nextDraftTokens->data()), runtime::MemoryType::kGPU); - mBufferManager->copy(*mCpuAlgo->mNextDraftPosIds, // - const_cast(outputs->nextDraftPosIds->data()), runtime::MemoryType::kGPU); - mBufferManager->copy(*mCpuAlgo->mPackedMasks, // - const_cast(outputs->packedMasks->data()), runtime::MemoryType::kGPU); - mBufferManager->copy(*mCpuAlgo->mNextDraftLengths, // - const_cast(outputs->nextDraftLengths->data()), runtime::MemoryType::kGPU); - - // TODO(liweim) do we need this? - // mBufferManager->getStream().synchronize(); + mBufferManager->copy(*mCpuAlgo->mSequenceLengths, *outputs->sequenceLength.value()); + mBufferManager->copy(*mCpuAlgo->mNewTokens, *outputs->newTokens); + + mBufferManager->copy(*mCpuAlgo->mPathsOffsets, *outputs->pathsOffsets); + mBufferManager->copy(*mCpuAlgo->mNumNewTokens, *outputs->numNewTokens.value()); + mBufferManager->copy(*mCpuAlgo->mNumNewTokensCumSum, *outputs->numNewTokensCumSum); // + mBufferManager->copy(*mCpuAlgo->mNextDraftTokens, *outputs->nextDraftTokens); + + mBufferManager->copy(*mCpuAlgo->mPackedMask, *outputs->packedMasks); + + if (outputs->nextDraftLengths) + { + mBufferManager->copy(*mCpuAlgo->mNextDraftLengths, *outputs->nextDraftLengths); + } + + for (SizeType32 bi = 0; bi < batchSize; bi++) + { + SizeType32 gbi = batchSlotsRange[bi]; + generationLengthsRange[gbi] = nextDraftLengthsRange[gbi] + 1; + generationLengthsMaxRange[gbi] = mDecoderDomain.getMaxDecodingTokens(); + } + mBufferManager->copy(*mCpuAlgo->mPackedMask, *outputs->packedMasks); + mBufferManager->copy(*mCpuAlgo->mGenerationLengthsMax, *outputs->generationLengths); + mBufferManager->copy(*mCpuAlgo->mPositionOffsets, *outputs->positionOffsets); + mBufferManager->copy(*mCpuAlgo->mPositionIds, *outputs->positionIds); + + if (outputs->actualGenerationLengths) + { + mBufferManager->copy(*mCpuAlgo->mGenerationLengths, *outputs->actualGenerationLengths); + } + + mBufferManager->getStream().synchronize(); TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); } diff --git a/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.h b/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.h index 0b6c44761..d68254074 100644 --- a/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.h +++ b/cpp/tensorrt_llm/layers/lookaheadDecodingLayer.h @@ -42,15 +42,12 @@ class LookaheadDecodingLayer : public BaseLayer void forwardAsync(std::shared_ptr const& outputParams, std::shared_ptr const& inputParams) override; - void forwardSync(std::shared_ptr const& outputParams, - std::shared_ptr const& inputParams) override; - //! @returns workspace needed for this layer in bytes [[nodiscard]] size_t getWorkspaceSize() const noexcept; private: - void forwardSyncCPU(std::shared_ptr const& outputParams, - std::shared_ptr const& inputParams); + void forwardSyncCPU(std::shared_ptr const& outputs, + std::shared_ptr const& inputs); void posIdsToMask(TensorPtr mask, TensorConstPtr posIds); private: @@ -67,6 +64,7 @@ class LookaheadDecodingLayer : public BaseLayer explicit CpuAlgorithmResources(DecoderDomain const& decoderDomain); std::vector mAlgos; + std::vector mPrompts; TensorPtr mBatchSlots; TensorPtr mTargetTokens; TensorPtr mTokensPerStep; @@ -76,16 +74,23 @@ class LookaheadDecodingLayer : public BaseLayer TensorPtr mPathsOffsets; TensorPtr mNumNewTokens; TensorPtr mNumNewTokensCumSum; + TensorPtr mNewTokens; TensorPtr mNextDraftTokens; TensorPtr mNextDraftPosIds; - TensorPtr mPackedMasks; TensorPtr mSamplingMask; TensorPtr mNextDraftLengths; TensorPtr mSequenceLengths; + TensorPtr mGenerationLengths; + TensorPtr mGenerationLengthsMax; + TensorPtr mPackedMask; + TensorPtr mPositionOffsets; + TensorPtr mPositionIds; }; std::optional mCpuAlgo; + + runtime::SizeType32 mGlobalSteps{0}; }; } // namespace tensorrt_llm::layers diff --git a/cpp/tensorrt_llm/layers/lookaheadDecodingUtils.h b/cpp/tensorrt_llm/layers/lookaheadDecodingUtils.h index f6cabbc34..ce85491c1 100644 --- a/cpp/tensorrt_llm/layers/lookaheadDecodingUtils.h +++ b/cpp/tensorrt_llm/layers/lookaheadDecodingUtils.h @@ -16,6 +16,7 @@ #pragma once +#include "tensorrt_llm/runtime/bufferManager.h" #include "tensorrt_llm/runtime/iTensor.h" #include "tensorrt_llm/runtime/tensorView.h" @@ -182,9 +183,22 @@ class DebugTensor return (BufferLocation(mTensor))[idx]; } + runtime::BufferManager::ITensorPtr copyToHostOptional() + { + runtime::BufferManager::ITensorPtr hostPtr{nullptr}; + if (mTensor.getMemoryType() == runtime::MemoryType::kGPU) + { + runtime::BufferManager manager{std::make_shared()}; + hostPtr = manager.copyFrom(mTensor, runtime::MemoryType::kCPU); + manager.getStream().synchronize(); + } + return hostPtr; + } + std::string string(void) { - runtime::BufferRange range(mTensor); + runtime::BufferManager::ITensorPtr hostPtr = copyToHostOptional(); + runtime::BufferRange range(hostPtr ? (*hostPtr) : mTensor); std::string result(range.size(), '\0'); std::copy(range.begin(), range.end(), result.begin()); return result; @@ -195,8 +209,10 @@ class DebugTensor using namespace tensorrt_llm::runtime; std::ostringstream buf; auto shape = mTensor.getShape(); - runtime::BufferRange tensorRange(mTensor); - buf << mName << ": " << shape; + runtime::BufferManager::ITensorPtr hostPtr = copyToHostOptional(); + runtime::BufferRange tensorRange(hostPtr ? (*hostPtr) : mTensor); + + buf << mName << ": " << mTensor.getMemoryTypeName() << ',' << mTensor.getDataTypeName() << ',' << shape; auto line = [&buf](TokenIdType const* array, SizeType32 size) { buf << '['; @@ -249,14 +265,16 @@ class DebugTensor using namespace tensorrt_llm::runtime; std::ostringstream buf; auto shape = mTensor.getShape(); - runtime::BufferRange tensorRange(mTensor); - buf << mName << ": " << shape; + runtime::BufferManager::ITensorPtr hostPtr = copyToHostOptional(); + runtime::BufferRange tensorRange(hostPtr ? (*hostPtr) : mTensor); + + buf << mName << ": " << mTensor.getMemoryTypeName() << ',' << mTensor.getDataTypeName() << ',' << shape; auto line = [&buf](T const* array, SizeType32 size) { buf << '['; for (SizeType32 i = 0; i < size; i++) { - buf << array[i]; + buf << static_cast(array[i]); if (i != size - 1) { buf << ','; diff --git a/cpp/tensorrt_llm/plugins/CMakeLists.txt b/cpp/tensorrt_llm/plugins/CMakeLists.txt index 5af9b78ee..045a34d1c 100755 --- a/cpp/tensorrt_llm/plugins/CMakeLists.txt +++ b/cpp/tensorrt_llm/plugins/CMakeLists.txt @@ -60,7 +60,7 @@ foreach(PLUGIN_ITER ${PLUGIN_LISTS}) add_subdirectory(${PLUGIN_ITER}) endforeach(PLUGIN_ITER) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) include_directories(ncclPlugin) add_subdirectory(ncclPlugin) endif() @@ -86,7 +86,7 @@ target_include_directories( PUBLIC ${CUDA_INSTALL_DIR}/include PRIVATE ${TARGET_DIR}) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) target_include_directories(${PLUGIN_SHARED_TARGET} PUBLIC ${MPI_C_INCLUDE_DIRS}) endif() @@ -134,6 +134,6 @@ target_link_libraries( ${CMAKE_DL_LIBS} ${SHARED_TARGET}) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) target_link_libraries(${PLUGIN_SHARED_TARGET} ${MPI_C_LIBRARIES} ${NCCL_LIB}) endif() diff --git a/cpp/tensorrt_llm/plugins/common/plugin.cpp b/cpp/tensorrt_llm/plugins/common/plugin.cpp index efb03b3bc..95401ade4 100644 --- a/cpp/tensorrt_llm/plugins/common/plugin.cpp +++ b/cpp/tensorrt_llm/plugins/common/plugin.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #ifdef _MSC_VER #define FN_NAME __FUNCTION__ @@ -212,11 +213,75 @@ class PerCudaCtxSingletonCreator // CUDA resources are per-context. std::unordered_map> mObservers; }; + +template +class PerThreadSingletonCreator +{ +public: + using CreatorFunc = std::function()>; + using DeleterFunc = std::function; + + // creator returning std::unique_ptr is by design. + // It forces separation of memory for T and memory for control blocks. + // So when T is released, but we still have observer weak_ptr in mObservers, the T mem block can be released. + // creator itself must not own CUDA resources. Only the object it creates can. + PerThreadSingletonCreator(CreatorFunc creator, DeleterFunc deleter) + : mCreator{std::move(creator)} + , mDeleter{std::move(deleter)} + { + } + + std::shared_ptr operator()() + { + std::lock_guard lk{mMutex}; + + std::thread::id thread = std::this_thread::get_id(); + std::shared_ptr result = mObservers[thread].lock(); + + if (result == nullptr) + { + // Create the resource and register with an observer. + result = std::shared_ptr{mCreator().release(), + [this, thread](T* obj) + { + if (obj == nullptr) + { + return; + } + mDeleter(obj); + + // Clears observer to avoid growth of mObservers, in case users creates/destroys cuda contexts + // frequently. + std::shared_ptr observedObjHolder; // Delay destroy to avoid dead lock. + std::lock_guard lk{mMutex}; + // Must check observer again because another thread may created new instance for this ctx just + // before we lock mMutex. We can't infer that the observer is stale from the fact that obj is + // destroyed, because shared_ptr ref-count checking and observer removing are not in one atomic + // operation, and the observer may be changed to observe another instance. + observedObjHolder = mObservers.at(thread).lock(); + if (observedObjHolder == nullptr) + { + mObservers.erase(thread); + } + }}; + mObservers.at(thread) = result; + } + return result; + } + +private: + CreatorFunc mCreator; + DeleterFunc mDeleter; + mutable std::mutex mMutex; + // CUDA resources are per-thread. + std::unordered_map> mObservers; +}; + } // namespace std::shared_ptr getCublasHandle() { - static PerCudaCtxSingletonCreator creator( + static PerThreadSingletonCreator creator( []() -> auto { auto handle = std::unique_ptr(new cublasHandle_t); @@ -233,7 +298,7 @@ std::shared_ptr getCublasHandle() std::shared_ptr getCublasLtHandle() { - static PerCudaCtxSingletonCreator creator( + static PerThreadSingletonCreator creator( []() -> auto { auto handle = std::unique_ptr(new cublasLtHandle_t); @@ -248,6 +313,20 @@ std::shared_ptr getCublasLtHandle() return creator(); } +std::shared_ptr getCublasMMWrapper(std::shared_ptr cublasHandle, + std::shared_ptr cublasltHandle, cudaStream_t stream, void* workspace) +{ + static PerThreadSingletonCreator creator( + [cublasHandle, cublasltHandle, stream, workspace]() -> auto + { + auto wrapper = std::unique_ptr( + new tensorrt_llm::common::CublasMMWrapper(cublasHandle, cublasltHandle, stream, workspace)); + return wrapper; + }, + [](tensorrt_llm::common::CublasMMWrapper* wrapper) { delete wrapper; }); + return creator(); +} + PluginFieldParser::PluginFieldParser(int32_t nbFields, nvinfer1::PluginField const* fields) : mFields{fields} { diff --git a/cpp/tensorrt_llm/plugins/common/plugin.h b/cpp/tensorrt_llm/plugins/common/plugin.h index 39053f1a4..96bd1ef47 100644 --- a/cpp/tensorrt_llm/plugins/common/plugin.h +++ b/cpp/tensorrt_llm/plugins/common/plugin.h @@ -17,6 +17,7 @@ #pragma once +#include "tensorrt_llm/common/cublasMMWrapper.h" #include "tensorrt_llm/common/workspace.h" #include "tensorrt_llm/plugins/api/tllmPlugin.h" #include "tensorrt_llm/plugins/common/checkMacrosPlugin.h" @@ -179,6 +180,8 @@ std::shared_ptr getComm(std::set const& group); //! Get cublas and cublasLt handle for current cuda context std::shared_ptr getCublasHandle(); std::shared_ptr getCublasLtHandle(); +std::shared_ptr getCublasMMWrapper(std::shared_ptr cublasHandle, + std::shared_ptr cublasltHandle, cudaStream_t stream, void* workspace); #ifndef DEBUG diff --git a/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.cpp b/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.cpp index 5d97a8412..3a1dacdcc 100644 --- a/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.cpp +++ b/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.cpp @@ -184,9 +184,9 @@ GemmPlugin::GemmPlugin(void const* data, size_t length, GemmPlugin::PluginProfil void GemmPlugin::init() { - auto cublasHandle = getCublasHandle(); - auto cublasLtHandle = getCublasLtHandle(); - mCublasWrapper = std::make_shared(cublasHandle, cublasLtHandle, nullptr, nullptr); + mcublasHandle = getCublasHandle(); + mcublasLtHandle = getCublasLtHandle(); + mCublasWrapper = getCublasMMWrapper(mcublasHandle, mcublasLtHandle, nullptr, nullptr); mPluginProfiler->setTranspose(mTransA, mTransB); mPluginProfiler->setOutputType(mOutputType); @@ -347,7 +347,9 @@ int GemmPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::P // mat2 [K, N] (mTransB = False) // outputs // mat [M, N] - + mcublasHandle = getCublasHandle(); + mcublasLtHandle = getCublasLtHandle(); + mCublasWrapper = getCublasMMWrapper(mcublasHandle, mcublasLtHandle, nullptr, nullptr); setGemmConfig(); int const nbDimsA = inputDesc[0].dims.nbDims; diff --git a/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.h b/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.h index bf2b5540f..9ba090882 100644 --- a/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.h +++ b/cpp/tensorrt_llm/plugins/gemmPlugin/gemmPlugin.h @@ -131,6 +131,8 @@ class GemmPlugin : public BasePlugin // @fixme: seems this is shared across multiple clones. // If we deep copy the wrapper inside clone(), then we may avoid the mutex inside the wrapper? CublasGemmWrapperPtr mCublasWrapper; + std::shared_ptr mcublasHandle; + std::shared_ptr mcublasLtHandle; GemmDims mDims{}; GemmIdCublas mGemmId{}; diff --git a/cpp/tensorrt_llm/plugins/gptAttentionCommon/gptAttentionCommon.cpp b/cpp/tensorrt_llm/plugins/gptAttentionCommon/gptAttentionCommon.cpp index b63869341..0a2c5e771 100644 --- a/cpp/tensorrt_llm/plugins/gptAttentionCommon/gptAttentionCommon.cpp +++ b/cpp/tensorrt_llm/plugins/gptAttentionCommon/gptAttentionCommon.cpp @@ -618,9 +618,10 @@ size_t GPTAttentionPluginCommon::getWorkspaceSizeForContext(nvinfer1::DataType t ? max_num_tokens * size_t(local_hidden_units_qo + 2 * local_hidden_units_kv) : 0; size_t const padding_offset_size = mEnableContextFMHA ? 0 : sizeof(int) * max_num_tokens; + size_t const encoder_padding_offset_size = mEnableContextFMHA ? 0 : sizeof(int) * max_num_tokens; size_t const fmha_scheduler_counter = mEnableContextFMHA ? sizeof(uint32_t) : 0; - int const NUM_BUFFERS = 15; + int const NUM_BUFFERS = 16; size_t workspaces[NUM_BUFFERS]; workspaces[0] = CUBLAS_WORKSPACE_SIZE; workspaces[1] = attention_mask_size; @@ -636,7 +637,8 @@ size_t GPTAttentionPluginCommon::getWorkspaceSizeForContext(nvinfer1::DataType t workspaces[11] = qk_buf_float_size; workspaces[12] = fp8_qkv_buffer_size; workspaces[13] = padding_offset_size; - workspaces[14] = fmha_scheduler_counter; + workspaces[14] = encoder_padding_offset_size; + workspaces[15] = fmha_scheduler_counter; context_workspace_size = tc::calculateTotalWorkspaceSize(workspaces, NUM_BUFFERS); return context_workspace_size; @@ -795,9 +797,10 @@ int GPTAttentionPluginCommon::enqueueContext(EnqueueContextParams(nextWorkspacePtr(workspace_byte_ptr, offset, padding_offset_size)); + int* encoder_padding_offset = (mEnableContextFMHA && !isCrossAttention()) + ? nullptr + : reinterpret_cast(nextWorkspacePtr(workspace_byte_ptr, offset, encoder_padding_offset_size)); uint32_t* fmha_tile_counter_ptr = reinterpret_cast(nextWorkspacePtr(workspace_byte_ptr, offset, fmha_scheduler_counter)); @@ -836,12 +842,16 @@ int GPTAttentionPluginCommon::enqueueContext(EnqueueContextParams(params.cross_qkv), const_cast(params.qkv_bias), params.encoder_input_lengths, - mRemovePadding ? padding_offset : nullptr, params.batch_size, params.cross_qkv_length, + mRemovePadding ? encoder_padding_offset : nullptr, params.batch_size, params.cross_qkv_length, params.num_encoder_tokens, mNumHeads, mNumKVHeads, getHeadSize(), mRotaryEmbeddingDim, mRotaryEmbeddingBase, mRotaryEmbeddingScaleType, mRotaryEmbeddingScale, mRotaryEmbeddingMaxPositions, position_embedding_type, (float*) nullptr, 0, stream); diff --git a/cpp/tensorrt_llm/plugins/gptAttentionPlugin/gptAttentionPlugin.cpp b/cpp/tensorrt_llm/plugins/gptAttentionPlugin/gptAttentionPlugin.cpp index f921b66af..4411b1473 100644 --- a/cpp/tensorrt_llm/plugins/gptAttentionPlugin/gptAttentionPlugin.cpp +++ b/cpp/tensorrt_llm/plugins/gptAttentionPlugin/gptAttentionPlugin.cpp @@ -659,7 +659,8 @@ int GPTAttentionPlugin::enqueueSome(int32_t seqIdxBeg, int32_t localNbSeq, int32 if (past_key_value_cache != outputs[1]) { auto shape = outputDesc[1].dims; - auto const size = std::accumulate(shape.d, shape.d + shape.nbDims, 1, std::multiplies{}); + auto const size + = cacheElemSize * std::accumulate(shape.d, shape.d + shape.nbDims, 1, std::multiplies{}); cudaMemcpyAsync(outputs[1], past_key_value_cache, size, cudaMemcpyDeviceToDevice, stream); } } diff --git a/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.cpp b/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.cpp index 60dfe0648..ffc4b7a8d 100644 --- a/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.cpp +++ b/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.cpp @@ -39,7 +39,7 @@ std::vector LoraPluginCreator::mPluginAttributes; LoraPlugin::LoraPlugin(int in_hidden_size, std::vector out_hidden_sizes, int transA, int transB, int num_lora_modules, nvinfer1::DataType type, LoraPlugin::PluginProfilerPtr const& pluginProfiler, - bool remove_input_padding, int max_num_tokens, int max_low_rank, int weight_index) + bool remove_input_padding, int max_low_rank, int weight_index) : mInHiddenSize(in_hidden_size) , mTransA(transA) , mTransB(transB) @@ -47,7 +47,6 @@ LoraPlugin::LoraPlugin(int in_hidden_size, std::vector out_hidden_sizes, in , mType(type) , mPluginProfiler(pluginProfiler) , mRemoveInputPadding(remove_input_padding) - , mMaxNumTokens(max_num_tokens) , mMaxLowRank(max_low_rank) , mWeightIndex(weight_index) { @@ -69,7 +68,6 @@ LoraPlugin::LoraPlugin(void const* data, size_t length, LoraPlugin::PluginProfil read(d, mNumLoraModules); read(d, mType); read(d, mRemoveInputPadding); - read(d, mMaxNumTokens); read(d, mMaxLowRank); read(d, mWeightIndex); mOutHiddenSizes.resize(mNumLoraModules); @@ -212,7 +210,7 @@ size_t LoraPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, in int const nbReq = inputs[getLoraRanksIdx()].dims.d[0]; auto const type = inputs[getInputTensorIdx()].type; auto const numTokens = getNumTokens(inputs); - return mLoraImpl->getWorkspaceSize(mMaxNumTokens, nbReq, type); + return mLoraImpl->getWorkspaceSize(numTokens, nbReq, type); } int64_t LoraPlugin::getNumTokens(nvinfer1::PluginTensorDesc const* input_tensors) const @@ -233,6 +231,11 @@ int LoraPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::P { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + if (isBuilding()) + { + return 0; + } + auto const numReqs = inputDesc[getLoraRanksIdx()].dims.d[0]; void const* input = inputs[getInputTensorIdx()]; int const seqLen = mRemoveInputPadding ? 0 : inputDesc[getInputTensorIdx()].dims.d[1]; @@ -344,8 +347,8 @@ size_t LoraPlugin::getSerializationSize() const noexcept { TLLM_LOG_DEBUG("%s", __PRETTY_FUNCTION__); return sizeof(mInHiddenSize) + sizeof(mTransA) + sizeof(mTransB) + sizeof(mNumLoraModules) + sizeof(mType) - + mPluginProfiler->getSerializationSize(mGemmId) + sizeof(mRemoveInputPadding) + sizeof(mMaxNumTokens) - + sizeof(mMaxLowRank) + sizeof(mWeightIndex) + sizeof(int) * mNumLoraModules; // selected tactics container size + + mPluginProfiler->getSerializationSize(mGemmId) + sizeof(mRemoveInputPadding) + sizeof(mMaxLowRank) + + sizeof(mWeightIndex) + sizeof(int) * mNumLoraModules; // selected tactics container size } void LoraPlugin::serialize(void* buffer) const noexcept @@ -358,7 +361,6 @@ void LoraPlugin::serialize(void* buffer) const noexcept write(d, mNumLoraModules); write(d, mType); write(d, mRemoveInputPadding); - write(d, mMaxNumTokens); write(d, mMaxLowRank); write(d, mWeightIndex); for (int i = 0; i < mNumLoraModules; i++) @@ -414,7 +416,6 @@ IPluginV2* LoraPluginCreator::createPlugin(char const* name, PluginFieldCollecti int num_lora_modules; int in_hidden_size, transA, transB; bool remove_input_padding; - int max_num_tokens; int max_low_rank; int weight_index; // Read configurations from each fields @@ -446,11 +447,6 @@ IPluginV2* LoraPluginCreator::createPlugin(char const* name, PluginFieldCollecti TLLM_CHECK(fields[i].type == PluginFieldType::kINT8); remove_input_padding = static_cast(*(static_cast(fields[i].data))); } - else if (!strcmp(attrName, "max_num_tokens")) - { - TLLM_CHECK(fields[i].type == PluginFieldType::kINT32); - max_num_tokens = *(static_cast(fields[i].data)); - } else if (!strcmp(attrName, "max_low_rank")) { TLLM_CHECK(fields[i].type == PluginFieldType::kINT32); @@ -488,7 +484,7 @@ IPluginV2* LoraPluginCreator::createPlugin(char const* name, PluginFieldCollecti // FIXME enable tactic profiler auto pluginProfiler = gemmPluginProfileManager.createGemmPluginProfiler(/* inference */ false, /* skip */ true); auto* obj = new LoraPlugin(in_hidden_size, out_hidden_sizes, transA, transB, num_lora_modules, type, - pluginProfiler, remove_input_padding, max_num_tokens, max_low_rank, weight_index); + pluginProfiler, remove_input_padding, max_low_rank, weight_index); obj->setPluginNamespace(mNamespace.c_str()); return obj; } diff --git a/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.h b/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.h index 6104abd72..7795f7b7c 100644 --- a/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.h +++ b/cpp/tensorrt_llm/plugins/loraPlugin/loraPlugin.h @@ -37,8 +37,8 @@ class LoraPlugin : public BasePlugin LoraPlugin() = delete; LoraPlugin(int in_hidden_size, std::vector out_hidden_sizes, int transA, int transB, int num_lora_modules, - nvinfer1::DataType type, PluginProfilerPtr const& profiler, bool remove_input_padding, int max_num_tokens, - int max_low_rank, int weight_index); + nvinfer1::DataType type, PluginProfilerPtr const& profiler, bool remove_input_padding, int max_low_rank, + int weight_index); LoraPlugin(void const* data, size_t length, PluginProfilerPtr const& profiler); @@ -117,7 +117,6 @@ class LoraPlugin : public BasePlugin int mTransB; nvinfer1::DataType mType; bool mRemoveInputPadding; - int mMaxNumTokens; int mNumLoraModules; int mInHiddenSize; int mMaxLowRank; diff --git a/cpp/tensorrt_llm/plugins/mixtureOfExperts/mixtureOfExpertsPlugin.cpp b/cpp/tensorrt_llm/plugins/mixtureOfExperts/mixtureOfExpertsPlugin.cpp index 279eb825d..7586aa4ba 100644 --- a/cpp/tensorrt_llm/plugins/mixtureOfExperts/mixtureOfExpertsPlugin.cpp +++ b/cpp/tensorrt_llm/plugins/mixtureOfExperts/mixtureOfExpertsPlugin.cpp @@ -647,6 +647,11 @@ int MixtureOfExpertsPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace_ptr, cudaStream_t stream) noexcept { + if (isBuilding()) + { + return 0; + } + int64_t const num_tokens = getNumTokens(inputDesc); int64_t const num_reqs = getNumLoraRequests(inputDesc); int64_t const num_not_finished = num_tokens; // TODO Take this as an input diff --git a/cpp/tensorrt_llm/plugins/ncclPlugin/allreducePlugin.cpp b/cpp/tensorrt_llm/plugins/ncclPlugin/allreducePlugin.cpp index 110a07000..784055cc5 100644 --- a/cpp/tensorrt_llm/plugins/ncclPlugin/allreducePlugin.cpp +++ b/cpp/tensorrt_llm/plugins/ncclPlugin/allreducePlugin.cpp @@ -46,10 +46,6 @@ AllreducePlugin::AllreducePlugin(std::set group, nvinfer1::DataType type, A , mAffine(affine) , mBias(bias) { - if (std::getenv("FORCE_NCCL_ALL_REDUCE_STRATEGY") != nullptr) - { - mStrategy = AllReduceStrategyType::NCCL; - } } // Parameterized constructor @@ -58,10 +54,6 @@ AllreducePlugin::AllreducePlugin(void const* data, size_t length) char const *d = reinterpret_cast(data), *a = d; read(d, mType); read(d, mStrategy); - if (std::getenv("FORCE_NCCL_ALL_REDUCE_STRATEGY") != nullptr) - { - mStrategy = AllReduceStrategyType::NCCL; - } read(d, mConfig); read(d, mOp); read(d, mEps); @@ -239,7 +231,9 @@ int AllreducePlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfe kernels::AllReduceStrategyType runtimeStrategy; - if (mStrategy == AllReduceStrategyType::NCCL) + static char* forceNcclAllReduceStrategyChar = std::getenv("FORCE_NCCL_ALL_REDUCE_STRATEGY"); + bool forceNcclAllReduceStrategy = (forceNcclAllReduceStrategyChar != nullptr); + if (forceNcclAllReduceStrategy || mStrategy == AllReduceStrategyType::NCCL) { runtimeStrategy = AllReduceStrategyType::NCCL; } diff --git a/cpp/tensorrt_llm/plugins/ncclPlugin/recvPlugin.cpp b/cpp/tensorrt_llm/plugins/ncclPlugin/recvPlugin.cpp index 96f30363f..62e3e4a6b 100644 --- a/cpp/tensorrt_llm/plugins/ncclPlugin/recvPlugin.cpp +++ b/cpp/tensorrt_llm/plugins/ncclPlugin/recvPlugin.cpp @@ -16,6 +16,7 @@ */ #include "recvPlugin.h" +#include "tensorrt_llm/common/logger.h" #include "tensorrt_llm/common/mpiUtils.h" #include @@ -91,7 +92,9 @@ int RecvPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::P { size *= inputDesc[0].dims.d[i]; } + TLLM_LOG_DEBUG("start ncclRecv with size %d", size); NCCLCHECK(ncclRecv(outputs[0], size, (*getDtypeMap())[inputDesc[0].type], 0, mComm, stream)); + TLLM_LOG_DEBUG("end ncclRecv with size %d", size); return 0; } diff --git a/cpp/tensorrt_llm/plugins/ncclPlugin/sendPlugin.cpp b/cpp/tensorrt_llm/plugins/ncclPlugin/sendPlugin.cpp index 3d20b9911..de0bce1c6 100644 --- a/cpp/tensorrt_llm/plugins/ncclPlugin/sendPlugin.cpp +++ b/cpp/tensorrt_llm/plugins/ncclPlugin/sendPlugin.cpp @@ -16,6 +16,7 @@ */ #include "sendPlugin.h" +#include "tensorrt_llm/common/logger.h" #include "tensorrt_llm/common/mpiUtils.h" #include @@ -93,7 +94,9 @@ int SendPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::P size *= inputDesc[0].dims.d[i]; } + TLLM_LOG_DEBUG("start ncclSend with size %d", size); NCCLCHECK(ncclSend(inputs[0], size, (*getDtypeMap())[inputDesc[0].type], 1, mComm, stream)); + TLLM_LOG_DEBUG("end ncclSend with size %d", size); return 0; } diff --git a/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.cpp b/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.cpp index 0f51ef0d7..05f2b93e1 100644 --- a/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.cpp +++ b/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.cpp @@ -77,8 +77,8 @@ std::shared_ptr LlmRequest::toTrtLlm() const return std::make_shared(mRequestId, mMaxNewTokens, std::make_shared>(mTokens.at(0)), mSamplingConfig, mIsStreaming, mEndId, mPadId, embeddingBias, badWordsList, stopWordsList, promptEmbeddingTable, mPromptVocabSize, mLoraTaskId, loraWeights, - loraConfig, returnLogProbs(), mReturnContextLogits, mReturnGenerationLogits, mDraftTokens, draftLogits, - mExcludeInputFromOutput, callbackAdapter(mLogitsPostProcessor), mApplyLogitsPostProcessorBatched, + loraConfig, mLookaheadConfig, returnLogProbs(), mReturnContextLogits, mReturnGenerationLogits, mDraftTokens, + draftLogits, mExcludeInputFromOutput, callbackAdapter(mLogitsPostProcessor), mApplyLogitsPostProcessorBatched, mEncoderTokens, mReturnEncoderOutput, mClientId, mPriority); } @@ -90,21 +90,23 @@ void LlmRequest::initBindings(py::module_& m) std::optional, std::optional, std::optional, std::optional, std::optional, std::optional, std::optional, - std::optional, bool, bool, bool, std::optional, - std::optional, bool, std::optional, bool, - std::optional, bool, std::optional, executor::PriorityType>(), + std::optional, std::optional, bool, bool, + bool, std::optional, std::optional, bool, + std::optional, bool, std::optional, bool, + std::optional, executor::PriorityType>(), py::arg("request_id"), py::arg("max_new_tokens"), py::arg("input_tokens"), py::arg("sampling_config"), py::arg("is_streaming"), py::arg("end_id") = std::nullopt, py::arg("pad_id") = std::nullopt, py::arg("embedding_bias") = std::nullopt, py::arg("bad_words_list") = std::nullopt, py::arg("stop_words_list") = std::nullopt, py::arg("prompt_embedding_table") = std::nullopt, py::arg("prompt_vocab_size") = std::nullopt, py::arg("lora_task_id") = std::nullopt, py::arg("lora_weights") = std::nullopt, py::arg("lora_config") = std::nullopt, - py::arg("return_log_probs") = false, py::arg("return_context_logits") = false, - py::arg("return_generation_logits") = false, py::arg("draft_tokens") = std::nullopt, - py::arg("draft_logits") = std::nullopt, py::arg("exclude_input_from_output") = false, - py::arg("logits_post_processor") = std::nullopt, py::arg("apply_logits_post_processor_batched") = false, - py::arg("encoder_input_tokens") = std::nullopt, py::arg("return_encoder_output") = false, - py::arg("client_id") = std::nullopt, py::arg("priority") = executor::Request::kDefaultPriority) + py::arg("lookahead_config") = std::nullopt, py::arg("return_log_probs") = false, + py::arg("return_context_logits") = false, py::arg("return_generation_logits") = false, + py::arg("draft_tokens") = std::nullopt, py::arg("draft_logits") = std::nullopt, + py::arg("exclude_input_from_output") = false, py::arg("logits_post_processor") = std::nullopt, + py::arg("apply_logits_post_processor_batched") = false, py::arg("encoder_input_tokens") = std::nullopt, + py::arg("return_encoder_output") = false, py::arg("client_id") = std::nullopt, + py::arg("priority") = executor::Request::kDefaultPriority) .def("get_num_tokens", &LlmRequest::getNumTokens, py::arg("beam")) .def_property_readonly("max_beam_num_tokens", &LlmRequest::getMaxBeamNumTokens) .def("get_token", &LlmRequest::getToken, py::arg("beam"), py::arg("pos")) @@ -122,6 +124,7 @@ void LlmRequest::initBindings(py::module_& m) .def_property_readonly("lora_task_id", &LlmRequest::getLoraTaskId) .def_property_readonly("lora_weights", &LlmRequest::getLoraWeights) .def_property_readonly("lora_config", &LlmRequest::getLoraConfig) + .def_property_readonly("lookahead_config", &LlmRequest::getLookaheadConfig) .def_property_readonly("embedding_bias", &LlmRequest::getEmbeddingBias) .def_property_readonly("bad_words_list", &LlmRequest::getBadWordsList) .def_property_readonly("stop_words_list", &LlmRequest::getStopWordsList) diff --git a/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.h b/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.h index 84c593ff0..a4415c581 100644 --- a/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.h +++ b/cpp/tensorrt_llm/pybind/batch_manager/llmRequest.h @@ -55,7 +55,8 @@ class LlmRequest : public tb::GenericLlmRequest std::optional promptEmbeddingTable = std::nullopt, std::optional promptVocabSize = std::nullopt, std::optional loraTaskId = std::nullopt, std::optional loraWeights = std::nullopt, - std::optional loraConfig = std::nullopt, bool returnLogProbs = false, + std::optional loraConfig = std::nullopt, + std::optional lookaheadConfig = std::nullopt, bool returnLogProbs = false, bool returnContextLogits = false, bool returnGenerationLogits = false, std::optional draftTokens = std::nullopt, std::optional draftLogits = std::nullopt, bool excludeInputFromOutput = false, std::optional logitsPostProcessor = std::nullopt, @@ -64,7 +65,7 @@ class LlmRequest : public tb::GenericLlmRequest executor::PriorityType priority = executor::Request::kDefaultPriority) : Base(requestId, maxNewTokens, std::make_shared>(std::move(inputTokens)), samplingConfig, isStreaming, endId, padId, embeddingBias, badWordsList, stopWordsList, promptEmbeddingTable, - promptVocabSize, loraTaskId, loraWeights, loraConfig, returnLogProbs, returnContextLogits, + promptVocabSize, loraTaskId, loraWeights, loraConfig, lookaheadConfig, returnLogProbs, returnContextLogits, returnGenerationLogits, draftTokens.has_value() ? std::make_shared(std::move(draftTokens.value())) : std::make_shared(), diff --git a/cpp/tensorrt_llm/pybind/executor/bindings.cpp b/cpp/tensorrt_llm/pybind/executor/bindings.cpp index 83aac4e98..4a91d0b8b 100644 --- a/cpp/tensorrt_llm/pybind/executor/bindings.cpp +++ b/cpp/tensorrt_llm/pybind/executor/bindings.cpp @@ -20,17 +20,20 @@ #include #include #include -#include #include "bindings.h" #include "executor.h" #include "streamCaster.h" #include "tensorCaster.h" +#include "tensorrt_llm/common/assert.h" #include "tensorrt_llm/executor/executor.h" #include "tensorrt_llm/executor/tensor.h" #include "tensorrt_llm/executor/types.h" +#include +#include + namespace py = pybind11; namespace tle = tensorrt_llm::executor; using Tensor = tle::Tensor; @@ -54,6 +57,15 @@ void InitBindings(pybind11::module_& m) .value("STATIC", tle::BatchingType::kSTATIC) .value("INFLIGHT", tle::BatchingType::kINFLIGHT); + auto decodingModeGetstate = [](tle::DecodingMode const& self) { return py::make_tuple(self.getState()); }; + auto decodingModeSetstate = [](py::tuple state) + { + if (state.size() != 1) + { + throw std::runtime_error("Invalid state!"); + } + return tle::DecodingMode(state[0].cast()); + }; py::class_(m, "DecodingMode") .def("Auto", &tle::DecodingMode::Auto) .def("TopK", &tle::DecodingMode::TopK) @@ -69,7 +81,8 @@ void InitBindings(pybind11::module_& m) .def("isTopKandTopP", &tle::DecodingMode::isTopKandTopP) .def("isBeamSearch", &tle::DecodingMode::isBeamSearch) .def("isMedusa", &tle::DecodingMode::isMedusa) - .def("isLookahead", &tle::DecodingMode::isLookahead); + .def("isLookahead", &tle::DecodingMode::isLookahead) + .def(py::pickle(decodingModeGetstate, decodingModeSetstate)); py::enum_(m, "CapacitySchedulerPolicy") .value("MAX_UTILIZATION", tle::CapacitySchedulerPolicy::kMAX_UTILIZATION) @@ -223,22 +236,30 @@ void InitBindings(pybind11::module_& m) .def_property_readonly("weights", &tle::LoraConfig::getWeights) .def_property_readonly("config", &tle::LoraConfig::getConfig); + py::class_(m, "LookaheadDecodingConfig") + .def(py::init(), py::arg("max_window_size"), py::arg("max_ngram_size"), + py::arg("max_verification_set_size")) + .def_property_readonly("max_window_size", &tle::LookaheadDecodingConfig::getWindowSize) + .def_property_readonly("max_ngram_size", &tle::LookaheadDecodingConfig::getNgramSize) + .def_property_readonly("max_verification_set_size", &tle::LookaheadDecodingConfig::getVerificationSetSize); + py::class_ request(m, "Request"); request .def(py::init const&, std::optional const&, std::optional>, std::optional>, std::optional, std::optional, std::optional, - std::optional, std::optional, std::optional, - std::optional, bool>(), + std::optional, std::optional, + std::optional, std::optional, std::optional, bool>(), py::arg("input_token_ids"), py::arg("max_new_tokens"), py::arg("streaming") = false, py::arg_v("sampling_config", tle::SamplingConfig(), "SamplingConfig()"), py::arg_v("output_config", tle::OutputConfig(), "OutputConfig()"), py::arg("end_id") = py::none(), py::arg("pad_id") = py::none(), py::arg("bad_words") = py::none(), py::arg("stop_words") = py::none(), py::arg("embedding_bias") = py::none(), py::arg("external_draft_tokens_config") = py::none(), py::arg("prompt_tuning_config") = py::none(), py::arg("lora_config") = py::none(), - py::arg("logits_post_processor_name") = py::none(), py::arg("encoder_input_token_ids") = py::none(), - py::arg("client_id") = py::none(), py::arg("return_all_generated_tokens") = false) + py::arg("lookahead_config") = py::none(), py::arg("logits_post_processor_name") = py::none(), + py::arg("encoder_input_token_ids") = py::none(), py::arg("client_id") = py::none(), + py::arg("return_all_generated_tokens") = false) .def_property_readonly("input_token_ids", &tle::Request::getInputTokenIds) .def_property_readonly("max_new_tokens", &tle::Request::getMaxNewTokens) .def_property("streaming", &tle::Request::getStreaming, &tle::Request::setStreaming) @@ -254,6 +275,7 @@ void InitBindings(pybind11::module_& m) .def_property( "prompt_tuning_config", &tle::Request::getPromptTuningConfig, &tle::Request::setPromptTuningConfig) .def_property("lora_config", &tle::Request::getLoraConfig, &tle::Request::setLoraConfig) + .def_property("lookahead_config", &tle::Request::getLookaheadConfig, &tle::Request::setLookaheadConfig) .def_property("logits_post_processor_name", &tle::Request::getLogitsPostProcessorName, &tle::Request::setLogitsPostProcessorName) .def_property( @@ -263,6 +285,12 @@ void InitBindings(pybind11::module_& m) &tle::Request::setReturnAllGeneratedTokens); request.attr("BATCHED_POST_PROCESSOR_NAME") = tle::Request::kBatchedPostProcessorName; + py::enum_(m, "FinishReason") + .value("NOT_FINISHED", tle::FinishReason::kNOT_FINISHED) + .value("END_ID", tle::FinishReason::kEND_ID) + .value("STOP_WORDS", tle::FinishReason::kSTOP_WORDS) + .value("LENGTH", tle::FinishReason::kLENGTH); + py::class_(m, "Result") .def(py::init<>()) .def_readwrite("is_final", &tle::Result::isFinal) @@ -271,7 +299,8 @@ void InitBindings(pybind11::module_& m) .def_readwrite("log_probs", &tle::Result::logProbs) .def_readwrite("context_logits", &tle::Result::contextLogits) .def_readwrite("generation_logits", &tle::Result::generationLogits) - .def_readwrite("encoder_output", &tle::Result::encoderOutput); + .def_readwrite("encoder_output", &tle::Result::encoderOutput) + .def_readwrite("finish_reasons", &tle::Result::finishReasons); py::class_(m, "Response") .def(py::init(), py::arg("request_id"), py::arg("error_msg")) @@ -421,13 +450,18 @@ void InitBindings(pybind11::module_& m) .def_property_readonly("host_cache_size", &tle::PeftCacheConfig::getHostCacheSize) .def(py::pickle(peftCacheConfigGetstate, peftCacheConfigSetstate)); - py::class_(m, "LookaheadDecodingConfig") - .def(py::init(), py::arg("max_window_size"), py::arg("max_ngram_size"), - py::arg("max_verification_set_size")) - .def_property_readonly("max_window_size", &tle::LookaheadDecodingConfig::getWindowSize) - .def_property_readonly("max_ngram_size", &tle::LookaheadDecodingConfig::getNgramSize) - .def_property_readonly("max_verification_set_size", &tle::LookaheadDecodingConfig::getVerificationSetSize); - + auto decodingConfigGetstate = [](tle::DecodingConfig const& self) + { return py::make_tuple(self.getDecodingMode(), self.getLookaheadDecodingConfig(), self.getMedusaChoices()); }; + auto decodingConfigSetstate = [](py::tuple state) + { + if (state.size() != 3) + { + throw std::runtime_error("Invalid state!"); + } + return tle::DecodingConfig(state[0].cast>(), + state[1].cast>(), + state[2].cast>()); + }; py::class_(m, "DecodingConfig") .def(py::init, std::optional, std::optional>(), @@ -436,7 +470,55 @@ void InitBindings(pybind11::module_& m) .def_property("decoding_mode", &tle::DecodingConfig::getDecodingMode, &tle::DecodingConfig::setDecodingMode) .def_property("lookahead_decoding_config", &tle::DecodingConfig::getLookaheadDecodingConfig, &tle::DecodingConfig::setLookaheadDecoding) - .def_property("medusa_choices", &tle::DecodingConfig::getMedusaChoices, &tle::DecodingConfig::setMedusaChoices); + .def_property("medusa_choices", &tle::DecodingConfig::getMedusaChoices, &tle::DecodingConfig::setMedusaChoices) + .def(py::pickle(decodingConfigGetstate, decodingConfigSetstate)); + + auto debugConfigGetstate = [](tle::DebugConfig const& self) + { return py::make_tuple(self.getDumpInputTensors(), self.getDumpOutputTensors(), self.getDebugTensorNames()); }; + auto debugConfigSetstate = [](py::tuple state) + { + if (state.size() != 3) + { + throw std::runtime_error("Invalid state!"); + } + return tle::DebugConfig( + state[0].cast(), state[1].cast(), state[2].cast>()); + }; + py::class_(m, "DebugConfig") + .def(py::init>(), py::arg("dump_input_tensors") = false, + py::arg("dump_output_tensors") = false, py::arg("debug_tensor_names") = py::none()) + .def_property( + "dump_input_tensors", &tle::DebugConfig::getDumpInputTensors, &tle::DebugConfig::setDumpInputTensors) + .def_property( + "dump_output_tensors", &tle::DebugConfig::getDumpOutputTensors, &tle::DebugConfig::setDumpOuputTensors) + .def_property( + "debug_tensor_names", &tle::DebugConfig::getDebugTensorNames, &tle::DebugConfig::setDebugTensorNames) + .def(py::pickle(debugConfigGetstate, debugConfigSetstate)); + + auto logitsPostProcessorConfigGetstate = [](tle::LogitsPostProcessorConfig const& self) + { return py::make_tuple(self.getProcessorMap(), self.getProcessorBatched(), self.getReplicate()); }; + auto logitsPostProcessorConfigSetstate = [](py::tuple state) + { + if (state.size() != 3) + { + throw std::runtime_error("Invalid LogitsPostProcessorConfig state!"); + } + return tle::LogitsPostProcessorConfig(state[0].cast>(), + state[1].cast>(), state[2].cast()); + }; + + py::class_(m, "LogitsPostProcessorConfig") + .def(py::init, std::optional, + bool>(), + py::arg("processor_map") = py::none(), py::arg("processor_batched") = py::none(), + py::arg("replicate") = true) + .def_property("processor_map", &tle::LogitsPostProcessorConfig::getProcessorMap, + &tle::LogitsPostProcessorConfig::setProcessorMap) + .def_property("processor_batched", &tle::LogitsPostProcessorConfig::getProcessorBatched, + &tle::LogitsPostProcessorConfig::setProcessorBatched) + .def_property( + "replicate", &tle::LogitsPostProcessorConfig::getReplicate, &tle::LogitsPostProcessorConfig::setReplicate) + .def(py::pickle(logitsPostProcessorConfigGetstate, logitsPostProcessorConfigSetstate)); auto extendedRuntimePerfKnobConfigSetstate = [](py::tuple state) { @@ -457,71 +539,38 @@ void InitBindings(pybind11::module_& m) &tle::ExtendedRuntimePerfKnobConfig::setEnableContextFMHAFP32Acc) .def(py::pickle(extendedRuntimePerfKnobConfigGetstate, extendedRuntimePerfKnobConfigSetstate)); - auto executorConfigGetState - = [&peftCacheConfigGetstate, &kvCacheConfigGetstate, &schedulerConfigGetstate, ¶llelConfigGetstate, - &extendedRuntimePerfKnobConfigGetstate](tle::ExecutorConfig const& self) + auto executorConfigGetState = [](tle::ExecutorConfig const& self) { - py::object peftCacheConfigState = py::none(); - - if (self.getPeftCacheConfig().has_value()) - { - peftCacheConfigState = peftCacheConfigGetstate(self.getPeftCacheConfig().value()); - } - auto kvCacheConfigState = kvCacheConfigGetstate(self.getKvCacheConfig()); - auto schedulerConfigState = schedulerConfigGetstate(self.getSchedulerConfig()); - auto extendedRuntimePerfKnobConfigState - = extendedRuntimePerfKnobConfigGetstate(self.getExtendedRuntimePerfKnobConfig()); - py::object parallelConfigState = py::none(); - if (self.getParallelConfig().has_value()) - { - parallelConfigState = parallelConfigGetstate(self.getParallelConfig().value()); - } - - return py::make_tuple(self.getMaxBeamWidth(), schedulerConfigState, kvCacheConfigState, + return py::make_tuple(self.getMaxBeamWidth(), self.getSchedulerConfig(), self.getKvCacheConfig(), self.getEnableChunkedContext(), self.getNormalizeLogProbs(), self.getIterStatsMaxIterations(), self.getRequestStatsMaxIterations(), self.getBatchingType(), self.getMaxBatchSize(), self.getMaxNumTokens(), - parallelConfigState, peftCacheConfigState, self.getLogitsPostProcessorMap(), - self.getLogitsPostProcessorBatched(), self.getReplicateLogitsPostProcessor(), self.getDecodingConfig(), - self.getGpuWeightsPercent(), self.getMaxQueueSize(), extendedRuntimePerfKnobConfigState); + self.getParallelConfig(), self.getPeftCacheConfig(), self.getLogitsPostProcessorConfig(), + self.getDecodingConfig(), self.getGpuWeightsPercent(), self.getMaxQueueSize(), + self.getExtendedRuntimePerfKnobConfig(), self.getDebugConfig()); }; - auto executorConfigSetState = [&kvCacheConfigSetstate, &peftCacheConfigSetstate, &schedulerConfigSetstate, - ¶llelConfigSetstate, &extendedRuntimePerfKnobConfigSetstate](py::tuple state) + auto executorConfigSetState = [](py::tuple state) { - if (state.size() != 19) + if (state.size() != 18) { throw std::runtime_error("Invalid state!"); } - auto kvCacheConfig = kvCacheConfigSetstate(state[2].cast()); - auto schedulerConfig = schedulerConfigSetstate(state[1].cast()); - auto extendedRuntimePerfKnobConfig = extendedRuntimePerfKnobConfigSetstate(state[18].cast()); - - std::optional peftCacheConfig; - if (state[11].cast() != py::none()) - { - peftCacheConfig = peftCacheConfigSetstate(state[11].cast()); - } - std::optional parallelConfig; - if (state[10].cast() != py::none()) - { - parallelConfig = parallelConfigSetstate(state[10].cast()); - } - - return tle::ExecutorConfig(state[0].cast(), schedulerConfig, kvCacheConfig, state[3].cast(), - state[4].cast(), state[5].cast(), state[6].cast(), - state[7].cast(), state[8].cast>(), - state[9].cast>(), parallelConfig, peftCacheConfig, - state[12].cast>(), - state[13].cast>(), state[14].cast(), - state[15].cast>(), state[16].cast(), - state[17].cast>(), extendedRuntimePerfKnobConfig); + return tle::ExecutorConfig(state[0].cast(), state[1].cast(), + state[2].cast(), state[3].cast(), state[4].cast(), + state[5].cast(), state[6].cast(), state[7].cast(), + state[8].cast>(), state[9].cast>(), + state[10].cast>(), state[11].cast>(), + state[12].cast>(), + state[13].cast>(), state[14].cast(), + state[15].cast>(), state[16].cast(), + state[17].cast>()); }; py::class_(m, "ExecutorConfig") .def(py::init, std::optional, std::optional, tle::PeftCacheConfig const&, - std::optional, std::optional, bool, - std::optional, float, std::optional, - tle::ExtendedRuntimePerfKnobConfig const&>(), + std::optional, std::optional, float, + std::optional, tle::ExtendedRuntimePerfKnobConfig const&, + std::optional>(), py::arg("max_beam_width") = 1, py::arg_v("scheduler_config", tle::SchedulerConfig(), "SchedulerConfig()"), py::arg_v("kv_cache_config", tle::KvCacheConfig(), "KvCacheConfig()"), py::arg("enable_chunked_context") = false, py::arg("normalize_log_probs") = true, @@ -531,11 +580,11 @@ void InitBindings(pybind11::module_& m) py::arg("max_batch_size") = py::none(), py::arg("max_num_tokens") = py::none(), py::arg("parallel_config") = py::none(), py::arg_v("peft_cache_config", tle::PeftCacheConfig(), "PeftCacheConfig()"), - py::arg("logits_post_processor_map") = py::none(), py::arg("logits_post_processor_batched") = py::none(), - py::arg("replicate_logits_post_processor") = true, py::arg("decoding_config") = py::none(), + py::arg("logits_post_processor_config") = py::none(), py::arg("decoding_config") = py::none(), py::arg("gpu_weights_percent") = 1.0, py::arg("max_queue_size") = py::none(), py::arg_v("extended_runtime_perf_knob_config", tle::ExtendedRuntimePerfKnobConfig(), - "ExtendedRuntimePerfKnobConfig()")) + "ExtendedRuntimePerfKnobConfig()"), + py::arg("debug_config") = py::none()) .def_property("max_beam_width", &tle::ExecutorConfig::getMaxBeamWidth, &tle::ExecutorConfig::setMaxBeamWidth) .def_property("max_batch_size", &tle::ExecutorConfig::getMaxBatchSize, &tle::ExecutorConfig::setMaxBatchSize) .def_property("max_num_tokens", &tle::ExecutorConfig::getMaxNumTokens, &tle::ExecutorConfig::setMaxNumTokens) @@ -555,12 +604,8 @@ void InitBindings(pybind11::module_& m) "parallel_config", &tle::ExecutorConfig::getParallelConfig, &tle::ExecutorConfig::setParallelConfig) .def_property( "peft_cache_config", &tle::ExecutorConfig::getPeftCacheConfig, &tle::ExecutorConfig::setPeftCacheConfig) - .def_property("logits_post_processor_map", &tle::ExecutorConfig::getLogitsPostProcessorMap, - &tle::ExecutorConfig::setLogitsPostProcessorMap) - .def_property("logits_post_processor_batched", &tle::ExecutorConfig::getLogitsPostProcessorBatched, - &tle::ExecutorConfig::setLogitsPostProcessorBatched) - .def_property("replicate_logits_post_processor", &tle::ExecutorConfig::getReplicateLogitsPostProcessor, - &tle::ExecutorConfig::setReplicateLogitsPostProcessor) + .def_property("logits_post_processor_config", &tle::ExecutorConfig::getLogitsPostProcessorConfig, + &tle::ExecutorConfig::setLogitsPostProcessorConfig) .def_property( "decoding_config", &tle::ExecutorConfig::getDecodingConfig, &tle::ExecutorConfig::setDecodingConfig) .def_property("gpu_weights_percent", &tle::ExecutorConfig::getGpuWeightsPercent, @@ -568,6 +613,7 @@ void InitBindings(pybind11::module_& m) .def_property("max_queue_size", &tle::ExecutorConfig::getMaxQueueSize, &tle::ExecutorConfig::setMaxQueueSize) .def_property("extended_runtime_perf_knob_config", &tle::ExecutorConfig::getExtendedRuntimePerfKnobConfig, &tle::ExecutorConfig::setExtendedRuntimePerfKnobConfig) + .def_property("debug_config", &tle::ExecutorConfig::getDebugConfig, &tle::ExecutorConfig::setDebugConfig) .def(py::pickle(executorConfigGetState, executorConfigSetState)); tensorrt_llm::pybind::executor::Executor::initBindings(m); diff --git a/cpp/tensorrt_llm/pybind/executor/executor.cpp b/cpp/tensorrt_llm/pybind/executor/executor.cpp index 87af73ab6..c9e10673e 100644 --- a/cpp/tensorrt_llm/pybind/executor/executor.cpp +++ b/cpp/tensorrt_llm/pybind/executor/executor.cpp @@ -42,11 +42,15 @@ Executor::Executor(std::filesystem::path const& encoderModelPath, std::filesyste mExecutor = std::make_unique(encoderModelPath, decoderModelPath, modelType, executorConfig); } -Executor::Executor(std::string const& engineBuffer, std::string const& jsonConfigStr, tle::ModelType modelType, +Executor::Executor(pybind11::buffer engineBuffer, std::string const& jsonConfigStr, tle::ModelType modelType, tle::ExecutorConfig const& executorConfig) { - mExecutor = std::make_unique( - std::vector(engineBuffer.begin(), engineBuffer.end()), jsonConfigStr, modelType, executorConfig); + py::buffer_info info = engineBuffer.request(); + auto begin = reinterpret_cast(info.ptr); + // the buffer is just 1-D array of uint8_t, so .shape[0] == number of bytes + auto end = reinterpret_cast(begin) + info.shape[0]; + mExecutor + = std::make_unique(std::vector(begin, end), jsonConfigStr, modelType, executorConfig); } Executor::Executor(std::string const& encoderEngineBuffer, std::string const& encoderJsonConfigStr, @@ -92,7 +96,7 @@ void Executor::initBindings(py::module_& m) tle::ExecutorConfig const&>(), py::arg("encoder_model_path"), py::arg("decoder_model_path"), py::arg("model_type"), py::arg("executor_config")) - .def(py::init(), + .def(py::init(), py::arg("engine_buffer"), py::arg("json_config_str"), py::arg("model_type"), py::arg("executor_config")) .def(py::init(), diff --git a/cpp/tensorrt_llm/pybind/executor/executor.h b/cpp/tensorrt_llm/pybind/executor/executor.h index 5c950a0ff..e19cfcc77 100644 --- a/cpp/tensorrt_llm/pybind/executor/executor.h +++ b/cpp/tensorrt_llm/pybind/executor/executor.h @@ -34,7 +34,7 @@ class Executor Executor(std::filesystem::path const& encoderModelPath, std::filesystem::path const& decoderModelPath, tle::ModelType modelType, tle::ExecutorConfig const& executorConfig); - Executor(std::string const& engineBuffer, std::string const& jsonConfigStr, tle::ModelType modelType, + Executor(pybind11::buffer engineBuffer, std::string const& jsonConfigStr, tle::ModelType modelType, tle::ExecutorConfig const& executorConfig); Executor(std::string const& encoderEngineBuffer, std::string const& encoderJsonConfigStr, diff --git a/cpp/tensorrt_llm/runtime/CMakeLists.txt b/cpp/tensorrt_llm/runtime/CMakeLists.txt index 10ddbc3a2..07c1bd6fb 100644 --- a/cpp/tensorrt_llm/runtime/CMakeLists.txt +++ b/cpp/tensorrt_llm/runtime/CMakeLists.txt @@ -20,6 +20,7 @@ set(SRCS utils/debugUtils.cu bufferManager.cpp explicitDraftTokensBuffers.cpp + lookaheadBuffers.cpp layerProfiler.cpp loraManager.cpp loraUtils.cpp @@ -68,6 +69,6 @@ set_property(TARGET runtime_src PROPERTY CUDA_RESOLVE_DEVICE_SYMBOLS ON) target_include_directories(runtime_src PRIVATE ${MPI_C_INCLUDE_DIRS}) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) target_link_libraries(runtime_src PUBLIC ${NCCL_LIB}) endif() diff --git a/cpp/tensorrt_llm/runtime/gptDecoder.cpp b/cpp/tensorrt_llm/runtime/gptDecoder.cpp index 484736afb..39c09ce59 100644 --- a/cpp/tensorrt_llm/runtime/gptDecoder.cpp +++ b/cpp/tensorrt_llm/runtime/gptDecoder.cpp @@ -59,7 +59,8 @@ GptDecoder::GptDecoder(executor::DecodingMode const& mode, size_t maxBatchSiz template void GptDecoder::setup(SamplingConfig const& samplingConfig, size_t batchSize, TensorConstPtr const& batchSlots, - std::optional const& output) + std::optional const& output, + std::optional const> const& requestsOpt) { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); @@ -135,7 +136,29 @@ void GptDecoder::setup(SamplingConfig const& samplingConfig, size_t batchSize setupParams->decodingParams = explicitDraftTokensParams; } + else if (mDecodingMode.isLookahead()) + { + TLLM_CHECK_WITH_INFO(output.has_value(), "Output tensors must be provided for Lookahead decoding"); + TLLM_LOG_DEBUG("gptDecoder setup lookahead, batchSize=%d", batchSize); + auto lookaheadParams = std::make_shared(); + TLLM_CHECK(requestsOpt); + auto& requests = requestsOpt.value(); + lookaheadParams->prompt.resize(0); + lookaheadParams->prompt.reserve(batchSize); + lookaheadParams->algoConfigs.resize(0); + lookaheadParams->algoConfigs.reserve(batchSize); + for (size_t bi = 0; bi < batchSize; bi++) + { + lookaheadParams->prompt.emplace_back(ITensor::slice(requests[bi].ids, 0, requests[bi].inputLen)); + TLLM_CHECK(requests[bi].lookaheadRuntimeConfig); + lookaheadParams->algoConfigs.emplace_back(requests[bi].lookaheadRuntimeConfig.value()); + } + lookaheadParams->generationLengths = output->lookaheadOutputs->generationLengths; + lookaheadParams->positionOffsets = output->lookaheadOutputs->positionOffsets; + lookaheadParams->attentionPackedMasks = output->lookaheadOutputs->packedMasks; + setupParams->decodingParams = std::move(lookaheadParams); + } setupParams->decodingParams->randomSeed = mSamplingConfig.randomSeed; mDynamicDecodeLayer->setup(batchSize, mSamplingConfig.beamWidth, batchSlots, setupParams); @@ -248,6 +271,18 @@ void prepareExplicitDraftTokensInput( TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); } +void prepareLookaheadInputs( + DecodingInput const& inputs, size_t maxBatchSize, std::shared_ptr& baseInputs) +{ + TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + + auto inputParams = std::dynamic_pointer_cast(baseInputs); + auto const& lookaheadInputs = inputs.lookaheadInputs.value(); + inputParams->curTokensPerStep = lookaheadInputs.tokensPerStep; + + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); +} + template std::shared_ptr prepareInputs(DecodingInput const& input, size_t maxBatchSize, tle::DecodingMode const& decodingMode, std::shared_ptr bufferManager) @@ -272,7 +307,7 @@ std::shared_ptr prepareInputs(DecodingInput const& input } else if (decodingMode.isLookahead()) { - // TODO add lookahead inputs + forwardParams = std::make_shared(input.endIds, input.batchSlots); } else if (decodingMode.isExplicitDraftTokens()) { @@ -319,9 +354,9 @@ std::shared_ptr prepareInputs(DecodingInput const& input forwardParams->stopCriteriaInputs = prepareStopCriteriaInputs(input); - if (input.finished) + if (input.finishReasons) { - forwardParams->finished = input.finished; + forwardParams->finished = input.finishReasons; } // Medusa @@ -336,6 +371,12 @@ std::shared_ptr prepareInputs(DecodingInput const& input prepareExplicitDraftTokensInput(input, maxBatchSize, forwardParams); } + if (input.lookaheadInputs) + { + prepareLookaheadInputs(input, maxBatchSize, forwardParams); + forwardParams->localBatchSize = input.batchSize; + } + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); return forwardParams; @@ -430,6 +471,15 @@ void prepareSpeculativeDecodingOutputs(DecodingOutput& output, std::shared_ptrgenerationLengthsHost = explicitDraftTokensBuffers->generationLengthsHost; outputParams->maxGenLengthHost = explicitDraftTokensBuffers->maxGenLengthHost; } + if (decodingMode.isLookahead()) + { + TLLM_CHECK(output.lookaheadOutputs); + auto outputParams = std::dynamic_pointer_cast(baseOutputs); + outputParams->packedMasks = output.lookaheadOutputs->packedMasks; + outputParams->positionIds = output.lookaheadOutputs->positionIds; + outputParams->positionOffsets = output.lookaheadOutputs->positionOffsets; + outputParams->generationLengths = output.lookaheadOutputs->generationLengths; + } TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); } @@ -444,10 +494,14 @@ std::shared_ptr prepareOutputs( { outputParams = std::make_shared(output.ids); } - else if (decodingMode.isMedusa() || decodingMode.isLookahead()) + else if (decodingMode.isMedusa()) { outputParams = std::make_shared(output.ids); } + else if (decodingMode.isLookahead()) + { + outputParams = std::make_shared(output.ids); + } else if (decodingMode.isExplicitDraftTokens()) { outputParams = std::make_shared(output.ids); @@ -470,9 +524,9 @@ std::shared_ptr prepareOutputs( outputParams->parentIds = output.parentIds; } - if (output.finished) + if (output.finishReasons) { - outputParams->finished = output.finished; + outputParams->finished = output.finishReasons; } if (output.finishedSum) @@ -609,8 +663,7 @@ void GptDecoder::gatherTree(DecodingOutput const& decodingOutput, DecodingInp bh.numBeamsCBA = bufferCast(*decodingOutput.beamHypotheses.numBeamsCBA); bh.minNormedScoresCBA = bufferCast(*decodingOutput.beamHypotheses.minNormedScoresCBA); bh.batchDones = bufferCast(*decodingOutput.beamHypotheses.batchDones); - bh.finished = reinterpret_cast( - bufferCast(*decodingOutput.finished)); + bh.finished = bufferCast(*decodingOutput.finishReasons); bh.outputIdsUnfinish = bufferCast(*decodingOutput.ids); bh.parentIdsUnfinish = bufferCast(*decodingOutput.parentIds); diff --git a/cpp/tensorrt_llm/runtime/gptDecoderBatched.cpp b/cpp/tensorrt_llm/runtime/gptDecoderBatched.cpp index e7151cca2..b343796db 100644 --- a/cpp/tensorrt_llm/runtime/gptDecoderBatched.cpp +++ b/cpp/tensorrt_llm/runtime/gptDecoderBatched.cpp @@ -20,6 +20,8 @@ #include "tensorrt_llm/kernels/decodingKernels.h" #include "tensorrt_llm/runtime/bufferManager.h" #include "tensorrt_llm/runtime/cudaEvent.h" +#include "tensorrt_llm/runtime/memoryCounters.h" +#include "tensorrt_llm/runtime/runtimeBuffers.h" #include "tensorrt_llm/runtime/runtimeKernels.h" #include @@ -122,6 +124,8 @@ GptDecoderBatched::GptDecoderBatched(std::size_t vocabSize, std::size_t vocabSiz dOutput->cumLogProbs = mBufferManager.emptyTensor(MemoryType::kGPU, nvFloatType); dOutput->logProbs = mBufferManager.emptyTensor(MemoryType::kGPU, nvFloatType); dOutput->beamHypotheses.empty(mBufferManager); + dOutput->finishReasons + = mBufferManager.emptyTensor(MemoryType::kGPU, TRTDataType::value); mNumDraftTokens = mBufferManager.emptyTensor(MemoryType::kGPU, nvSizeType); mCurandStates = mBufferManager.emptyTensor(MemoryType::kGPU, nvinfer1::DataType::kINT8); @@ -180,6 +184,10 @@ void GptDecoderBatched::allocateSpeculativeDecodingBuffers() = mBufferManager.emptyTensor(MemoryType::kGPU, nvinfer1::DataType::kINT32); } } + if (mSpeculativeDecodingMode.isLookaheadDecoding()) + { + dInput->lookaheadInputs = DecodingInput::LookaheadInputs(); + } if (mSpeculativeDecodingMode.needsKVCacheRewind()) { speculativeDecodingOutputs.acceptedTokensLen @@ -204,6 +212,17 @@ void GptDecoderBatched::setupExplicitDraftTokens(ExplicitDraftTokensBuffers::Inp TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); } +void GptDecoderBatched::setupLookahead(LookaheadDecodingBuffers lookaheadDecodingBuffers) +{ + TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + + TLLM_CHECK(mSpeculativeDecodingMode.isLookaheadDecoding()); + mJointDecodingOutput->lookaheadOutputs = std::move(lookaheadDecodingBuffers); + mJointDecodingInput->lookaheadInputs->tokensPerStep = mJointDecodingOutput->lookaheadOutputs->generationLengths; + + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); +} + void GptDecoderBatched::setup(executor::DecodingMode const& mode, SizeType32 maxBatchSize, SizeType32 maxBeamWidth, SizeType32 maxAttentionWindow, SizeType32 sinkTokenLength, SizeType32 maxSequenceLength, SizeType32 maxTokensPerEngineStep, nvinfer1::DataType dtype, ModelConfig const& modelConfig) @@ -270,6 +289,9 @@ void GptDecoderBatched::setup(executor::DecodingMode const& mode, SizeType32 max mFinishedSteps->reshape(maxTokensPerStepXmaxBatchSizeXmaxBeamWidth); mBufferManager.setZero(*mFinishedSteps); + dOutput.finishReasons->reshape(maxBatchSizeXmaxBeamWidth); + mBufferManager.setZero(*dOutput.finishReasons); + mBatchSlotsSetup->reshape(ITensor::makeShape({maxBatchSize})); mBatchSlotsDecoder->reshape(ITensor::makeShape({maxTokensPerEngineStep, maxBatchSize})); mBatchSlotsAcceptTokens->reshape(ITensor::makeShape({maxTokensPerEngineStep, maxBatchSize})); @@ -684,8 +706,15 @@ void GptDecoderBatched::newRequestLookahead(SizeType32 batchIdx, decoder_batch:: { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); - // TODO(nkorobov) add lookahead layer - TLLM_LOG_WARNING("Lookahead decoding is not supported yet."); + TLLM_CHECK(mJointDecodingOutput->lookaheadOutputs); + + auto& stream = mRuntimeStream; + + // The first generation step only generate 1 token. + TensorPtr curTokensPerStepSlice + = ITensor::slice(constPointerCast(mJointDecodingInput->lookaheadInputs->tokensPerStep), batchIdx, 1); + kernels::invokeFill(*curTokensPerStepSlice, 1, *stream); + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); } @@ -747,7 +776,7 @@ void GptDecoderBatched::newRequests(std::vector const& seqSlots, TensorPtr batchSlotsView = ITensor::slice(mBatchSlotsSetup, 0, localBatchSize); auto samplingConfig = SamplingConfig(samplingConfigs); - mDecoder->setup(samplingConfig, localBatchSize, batchSlotsView, {*mJointDecodingOutput}); + mDecoder->setup(samplingConfig, localBatchSize, batchSlotsView, {*mJointDecodingOutput}, {requests}); auto const& stream = mDecoderStream; CudaEvent event{}; @@ -905,7 +934,7 @@ void GptDecoderBatched::forwardDecoder( TensorPtr newTokensStepView = ITensor::slice(dOutput.newTokensSteps, step, mMaxDecodingDecoderTokens); dInput.logitsVec = logitsVec; - dInput.finished = finishedStepsInput; + dInput.finishReasons = finishedStepsInput; if (maxBeamWidth > 1 && input.seqSlots) { @@ -925,7 +954,7 @@ void GptDecoderBatched::forwardDecoder( } dOutput.newTokens = newTokensStepView; - dOutput.finished = finishedStepsOutput; + dOutput.finishReasons = finishedStepsOutput; dOutput.lengths = sequenceLengths; if (localBatchDecoderIdx > 0) @@ -1057,7 +1086,7 @@ CudaEvent GptDecoderBatched::postProcessRequest( slice(dOutput.cumLogProbs, dJointOutput.cumLogProbs); slice(dOutput.cacheIndirection, dJointOutput.cacheIndirection); slice(dOutput.lengths, dJointOutput.lengths); - slice(dOutput.finished, dJointOutput.finished); + slice(dOutput.finishReasons, dJointOutput.finishReasons); slice(dOutput.logProbs, dJointOutput.logProbs); dOutput.newTokens = ITensor::view(dJointOutput.newTokens); diff --git a/cpp/tensorrt_llm/runtime/gptSession.cpp b/cpp/tensorrt_llm/runtime/gptSession.cpp index 63687f042..a871113b5 100644 --- a/cpp/tensorrt_llm/runtime/gptSession.cpp +++ b/cpp/tensorrt_llm/runtime/gptSession.cpp @@ -20,7 +20,6 @@ #include "iBuffer.h" #include "tensorrt_llm/batch_manager/kvCacheManager.h" #include "tensorrt_llm/common/logger.h" -#include "tensorrt_llm/common/safetensors.h" #include "tensorrt_llm/common/stringUtils.h" #include "tensorrt_llm/runtime/gptDecoderBatched.h" #include "tensorrt_llm/runtime/ipcUtils.h" @@ -386,7 +385,7 @@ void GptSession::setup(Config const& sessionConfig) if (mModelConfig.getManageWeightsType() != ModelConfig::ManageWeightsType::kDisabled) { TLLM_CHECK_WITH_INFO(sessionConfig.enginePath.has_value(), "Engine path is not set."); - auto weightPath = sessionConfig.enginePath.value().parent_path() + auto weightPath = sessionConfig.enginePath->parent_path() / ("rank" + std::to_string(mWorldConfig.getLocalRank()) + "_managed_weights.safetensors"); mRuntime->loadManagedWeights(weightPath.string()); } diff --git a/cpp/tensorrt_llm/runtime/ipcUtils.cpp b/cpp/tensorrt_llm/runtime/ipcUtils.cpp index 49c4bdca1..727e34a21 100644 --- a/cpp/tensorrt_llm/runtime/ipcUtils.cpp +++ b/cpp/tensorrt_llm/runtime/ipcUtils.cpp @@ -29,7 +29,7 @@ namespace tensorrt_llm::runtime namespace { -bool setPeerAccess(WorldConfig const& worldConfig, bool enable) +bool canAccessPeer(WorldConfig const& worldConfig) { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); auto const srcDevice = worldConfig.getDevice(); @@ -50,20 +50,6 @@ bool setPeerAccess(WorldConfig const& worldConfig, bool enable) TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); return false; } - - if (enable) - { - cudaDeviceEnablePeerAccess(destDevice, 0); - } - else - { - cudaDeviceDisablePeerAccess(destDevice); - } - auto const error = cudaGetLastError(); - if (error != cudaErrorPeerAccessAlreadyEnabled && error != cudaErrorPeerAccessNotEnabled) - { - TLLM_CUDA_CHECK(error); - } } TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); return true; @@ -147,7 +133,7 @@ AllReduceBuffers::AllReduceBuffers(SizeType32 maxBatchSize, SizeType32 maxBeamWi SizeType32 hiddenSize, BufferManager const& manager, WorldConfig const& worldConfig) { TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); - auto const isP2pSupported = setPeerAccess(worldConfig, true); + auto const isP2pSupported = canAccessPeer(worldConfig); auto const tpSize = worldConfig.getTensorParallelism(); auto const bufferSize = tpSize diff --git a/cpp/tensorrt_llm/runtime/lookaheadBuffers.cpp b/cpp/tensorrt_llm/runtime/lookaheadBuffers.cpp new file mode 100644 index 000000000..465641bf0 --- /dev/null +++ b/cpp/tensorrt_llm/runtime/lookaheadBuffers.cpp @@ -0,0 +1,153 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: NVIDIA TensorRT Source Code License Agreement + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include "tensorrt_llm/runtime/lookaheadBuffers.h" +#include "tensorrt_llm/common/logger.h" +#include "tensorrt_llm/runtime/common.h" + +namespace tensorrt_llm::runtime +{ + +LookaheadDecodingBuffers::LookaheadDecodingBuffers( + SizeType32 maxNumSequences, SizeType32 maxTokensPerStep, runtime::BufferManager const& bufferManager) + : generationLengths(bufferManager.gpu(ITensor::makeShape({maxNumSequences}), nvinfer1::DataType::kINT32)) + , positionOffsets( + bufferManager.gpu(ITensor::makeShape({maxNumSequences, maxTokensPerStep}), nvinfer1::DataType::kINT32)) + , packedMasks(bufferManager.gpu(ITensor::makeShape({maxNumSequences, maxTokensPerStep, + static_cast(common::divUp(maxTokensPerStep, 32))}), + nvinfer1::DataType::kINT32)) + , positionIds( + bufferManager.gpu(ITensor::makeShape({maxNumSequences, maxTokensPerStep}), nvinfer1::DataType::kINT32)) +{ + TLLM_LOG_DEBUG( + "LookaheadDecodingBuffers, maxNumSequences = %d, maxTokensPerStep = %d", maxNumSequences, maxTokensPerStep); +} + +LookaheadRuntimeBuffers::LookaheadRuntimeBuffers(SizeType32 maxBatchSize, SizeType32 maxBeamWidth, + runtime::BufferManager const& manager, runtime::ModelConfig const& modelConfig, + runtime::WorldConfig const& worldConfig, executor::DecodingConfig const& /* decodingConfig */, + runtime::TllmRuntime const& runtime) +{ + TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + TLLM_CHECK_WITH_INFO(maxBeamWidth == 1, "Lookahead decoding does not support beam search"); + + // auto const tokensPerStep = modelConfig.getMaxTokensPerStep(); + auto const tokensPerStep = modelConfig.getMaxDecodingTokens(); + auto const numPackedMasks = static_cast(tensorrt_llm::common::divUp(tokensPerStep, 32)); + + // Copy buffers to device + packedMasksDevice + = manager.gpu(ITensor::makeShape({maxBatchSize * tokensPerStep, numPackedMasks}), nvinfer1::DataType::kINT32); + positionOffsetsDevice = manager.gpu(ITensor::makeShape({maxBatchSize, tokensPerStep}), nvinfer1::DataType::kINT32); + generationLengthsDevice = manager.gpu(ITensor::makeShape({maxBatchSize}), nvinfer1::DataType::kINT32); + positionIdsDevice = manager.gpu(ITensor::makeShape({maxBatchSize, tokensPerStep}), nvinfer1::DataType::kINT32); + + packedMaskHost = manager.cpu(packedMasksDevice->getShape(), nvinfer1::DataType::kINT32); + positionOffsetsHost = manager.cpu(positionOffsetsDevice->getShape(), nvinfer1::DataType::kINT32); + generationLengthsHost = manager.cpu(generationLengthsDevice->getShape(), nvinfer1::DataType::kINT32); + positionIdsHost = manager.gpu(positionOffsetsDevice->getShape(), nvinfer1::DataType::kINT32); + + packedMaskHostCopy = manager.cpu(packedMasksDevice->getShape(), nvinfer1::DataType::kINT32); + positionOffsetsHostCopy = manager.cpu(positionOffsetsDevice->getShape(), nvinfer1::DataType::kINT32); + generationLengthsHostCopy = manager.cpu(generationLengthsDevice->getShape(), nvinfer1::DataType::kINT32); + positionIdsHostCopy = manager.cpu(positionIdsDevice->getShape(), nvinfer1::DataType::kINT32); + + batchSlotsHostCopy = manager.cpu(generationLengthsDevice->getShape(), nvinfer1::DataType::kINT32); + + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); +} + +void LookaheadRuntimeBuffers::setFromInputs(SizeType32 numCtxSequences, SizeType32 numGenSequences, + ITensor const& requestTypes, ITensor const& seqSlots, LookaheadDecodingBuffers const& decoderLookaheadBuffers, + TllmRuntime const& runtime, ModelConfig const& modelConfig, WorldConfig const& worldConfig) const +{ + TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + + auto const& manager = runtime.getBufferManager(); + + auto const tokensPerStep = modelConfig.getMaxDecodingTokens(); + + manager.copy(*decoderLookaheadBuffers.positionOffsets, *positionOffsetsHostCopy); + manager.copy(*decoderLookaheadBuffers.packedMasks, *packedMaskHostCopy); + manager.copy(*decoderLookaheadBuffers.positionIds, *positionIdsHostCopy); + manager.copy(seqSlots, *batchSlotsHostCopy); + manager.copy(*decoderLookaheadBuffers.generationLengths, *generationLengthsHostCopy); + + manager.getStream().synchronize(); + + BufferRange batchSlotsRange(*batchSlotsHostCopy); + for (SizeType32 bi = 0; bi < numGenSequences; bi++) + { + SizeType32 gbi = batchSlotsRange[bi + numCtxSequences]; + manager.copy(*ITensor::at(generationLengthsHostCopy, {gbi}), *ITensor::at(generationLengthsHost, {bi})); + manager.copy(*ITensor::at(positionOffsetsHostCopy, {gbi}), *ITensor::at(positionOffsetsHost, {bi})); + manager.copy(*ITensor::slice(packedMaskHostCopy, gbi * tokensPerStep, tokensPerStep), + *ITensor::slice(packedMaskHost, bi * tokensPerStep, tokensPerStep)); + manager.copy(*ITensor::at(positionIdsHostCopy, {gbi}), *ITensor::at(positionIdsHost, {bi})); + } + manager.copy(*ITensor::slice(generationLengthsHost, 0, numGenSequences), + *ITensor::slice(generationLengthsDevice, 0, numGenSequences)); + manager.copy(*ITensor::slice(positionOffsetsHost, 0, numGenSequences), + *ITensor::slice(positionOffsetsDevice, 0, numGenSequences)); + manager.copy(*ITensor::slice(packedMaskHost, 0, numGenSequences * tokensPerStep), + *ITensor::slice(packedMasksDevice, 0, numGenSequences * tokensPerStep)); + manager.copy( + *ITensor::slice(positionIdsHost, 0, numGenSequences), *ITensor::slice(positionIdsDevice, 0, numGenSequences)); + + manager.getStream().synchronize(); + + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); +} + +void LookaheadRuntimeBuffers::reshape(SizeType32 numCtxSequences, SizeType32 numGenSequences, SizeType32 tokensPerStep) +{ + TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + + auto const numSequences = numGenSequences; + + auto packedMaskShape = packedMasksDevice->getShape(); + packedMaskShape.d[0] = numSequences * tokensPerStep; + packedMasksDevice->reshape(packedMaskShape); + packedMaskHost->reshape(packedMaskShape); + + auto generationLengthsShape = generationLengthsDevice->getShape(); + generationLengthsShape.d[0] = numSequences; + generationLengthsDevice->reshape(generationLengthsShape); + generationLengthsHost->reshape(generationLengthsShape); + + auto positionOffsetsShape = positionOffsetsDevice->getShape(); + positionOffsetsShape.d[0] = numSequences; + positionOffsetsDevice->reshape(positionOffsetsShape); + positionOffsetsHost->reshape(positionOffsetsShape); + + auto positionIdsShape = positionIdsDevice->getShape(); + positionIdsShape.d[0] = numSequences; + positionIdsDevice->reshape(positionIdsShape); + positionIdsHost->reshape(positionIdsShape); + + auto batchSlotsShape = batchSlotsHostCopy->getShape(); + batchSlotsShape.d[0] = numCtxSequences + numGenSequences; + batchSlotsHostCopy->reshape(batchSlotsShape); + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); +} + +void LookaheadRuntimeBuffers::insertInputTensors( + TensorMap& inputBuffers, TensorMap& /* outputBuffers */, runtime::WorldConfig const& /* worldConfig */) const +{ + TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); + inputBuffers.insert_or_assign("spec_decoding_packed_mask", packedMasksDevice); + inputBuffers.insert_or_assign("spec_decoding_generation_lengths", generationLengthsDevice); + inputBuffers.insert_or_assign("spec_decoding_position_offsets", positionOffsetsDevice); + TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); +} + +} // namespace tensorrt_llm::runtime diff --git a/cpp/tensorrt_llm/runtime/statefulGptDecoder.cpp b/cpp/tensorrt_llm/runtime/statefulGptDecoder.cpp index 48a399e12..8601ad0cc 100644 --- a/cpp/tensorrt_llm/runtime/statefulGptDecoder.cpp +++ b/cpp/tensorrt_llm/runtime/statefulGptDecoder.cpp @@ -55,7 +55,7 @@ StatefulGptDecoder::StatefulGptDecoder(std::size_t vocabSize, std::size_t vocabS dOutput->newTokens = mBufferManager.emptyTensor(MemoryType::kGPU, nvTokenIdType); dOutput->parentIds = mBufferManager.emptyTensor(MemoryType::kGPU, nvTokenIdType); - dOutput->finished + dOutput->finishReasons = mBufferManager.emptyTensor(MemoryType::kGPU, TRTDataType::value); dOutput->finishedSum = mBufferManager.pinnedPool(ITensor::makeShape({1}), nvSizeType); dOutput->lengths = mBufferManager.emptyTensor(MemoryType::kGPU, nvSizeType); @@ -129,9 +129,9 @@ void StatefulGptDecoder::reshapeBuffers(SizeType32 batchSize, SizeType32 beamWid dOutput.newTokens->reshape(batchSizeXbeamWidth); mBufferManager.setZero(*dOutput.newTokens); dOutput.parentIds->reshape(outputIdsShape); - dOutput.finished->reshape(batchSizeXbeamWidth); - dInput.finished = ITensor::view(dOutput.finished); - mBufferManager.setZero(*dOutput.finished); + dOutput.finishReasons->reshape(batchSizeXbeamWidth); + dInput.finishReasons = ITensor::view(dOutput.finishReasons); + mBufferManager.setZero(*dOutput.finishReasons); dOutput.finishedSum->reshape(batchSizeShape); mBufferManager.setZero(*dOutput.finishedSum); @@ -266,7 +266,7 @@ void StatefulGptDecoder::newBatch( // output auto& dOutput = *mDecodingOutput; manager.setZero(*dOutput.newTokens); - manager.setZero(*dOutput.finished); + manager.setZero(*dOutput.finishReasons); manager.setZero(*dOutput.finishedSum); // If outputs contains cumLogProbs, use that diff --git a/cpp/tensorrt_llm/runtime/utils/numpyUtils.cpp b/cpp/tensorrt_llm/runtime/utils/numpyUtils.cpp index ceb9b0704..63516f916 100644 --- a/cpp/tensorrt_llm/runtime/utils/numpyUtils.cpp +++ b/cpp/tensorrt_llm/runtime/utils/numpyUtils.cpp @@ -17,6 +17,7 @@ #include "tensorrt_llm/runtime/utils/numpyUtils.h" #include "tensorrt_llm/common/assert.h" +#include "tensorrt_llm/common/logger.h" #include "tensorrt_llm/common/memoryUtils.h" #include "tensorrt_llm/common/stringUtils.h" #include "tensorrt_llm/runtime/bufferManager.h" @@ -50,8 +51,10 @@ std::string getNumpyTypeDesc(nvinfer1::DataType type) return type_map.count(type) > 0 ? type_map.at(type) : "x"; } -nvinfer1::DataType typeFromNumpyDesc(std::string type) +nvinfer1::DataType typeFromNumpyDesc(std::string const& type) { + TLLM_LOG_DEBUG("numpy type: %s", type.c_str()); + using dt = nvinfer1::DataType; static const std::unordered_map type_map{{"?", dt::kBOOL}, {"u1", dt::kUINT8}, {"i1", dt::kINT8}, {"i4", dt::kINT32}, {"i8", dt::kINT64}, {"f2", dt::kHALF}, {"f4", dt::kFLOAT}}; @@ -77,6 +80,8 @@ void parseNpyIntro(FILE*& f_ptr, uint32_t& header_len, uint32_t& start_data) n_elems = fread((void*) &npy_major, sizeof(uint8_t), 1, f_ptr); n_elems += fread((void*) &npy_minor, sizeof(uint8_t), 1, f_ptr); + TLLM_LOG_DEBUG("npy format version: %d.%d", npy_major, npy_minor); + if (npy_major == 1) { uint16_t header_len_u16 = 0; @@ -109,11 +114,18 @@ int parseNpyHeader(FILE*& f_ptr, uint32_t header_len, nvinfer1::DataType& type, std::string header(header_c, header_len); free(header_c); + TLLM_LOG_DEBUG("npy header: %s", header.c_str()); + size_t start, end; start = header.find("'descr'") + 7; start = header.find("'", start); + // ignore byte order specifier + if (header[start + 1] == '<' || header[start + 1] == '>' || header[start + 1] == '=') + { + ++start; + } end = header.find("'", start + 1); - type = typeFromNumpyDesc(header.substr(start + 2, end - start - 2)); + type = typeFromNumpyDesc(header.substr(start + 1, end - start - 1)); start = header.find("'fortran_order'") + 15; start = header.find(":", start); diff --git a/cpp/tensorrt_llm/runtime/utils/numpyUtils.h b/cpp/tensorrt_llm/runtime/utils/numpyUtils.h index 6cb93ddbf..b5b253068 100644 --- a/cpp/tensorrt_llm/runtime/utils/numpyUtils.h +++ b/cpp/tensorrt_llm/runtime/utils/numpyUtils.h @@ -25,8 +25,7 @@ namespace tensorrt_llm::runtime::utils { //! \brief Create new tensor from numpy file. -[[nodiscard]] ITensor::UniquePtr loadNpy( - BufferManager const& manager, std::string const& npyFile, const MemoryType where); +[[nodiscard]] ITensor::UniquePtr loadNpy(BufferManager const& manager, std::string const& npyFile, MemoryType where); //! \brief Save tensor to numpy file. void saveNpy(BufferManager const& manager, ITensor const& tensor, std::string const& filename); diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index bb99db098..035ad18b5 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -82,7 +82,7 @@ add_gtest(gptDecoderBatchedTest runtime/gptDecoderBatchedTest.cpp) add_gtest(gptSessionTest runtime/gptSessionTest.cpp) target_link_libraries(gptSessionTest PRIVATE modelSpecStatic) add_gtest(memoryUtilsTest common/memoryUtilsTest.cu) -if(ENABLE_MULTI_DEVICE EQUAL 1) +if(ENABLE_MULTI_DEVICE) add_gtest(mpiUtilsTest common/mpiUtilsTest.cpp) endif() add_gtest(quantizationTest common/quantizationTest.cpp) @@ -99,6 +99,7 @@ add_gtest(samplingTest runtime/samplingTest.cpp) add_gtest(samplingConfigTest runtime/samplingConfigTest.cpp) add_gtest(iTensorTest runtime/iTensorTest.cpp) add_gtest(iBufferTest runtime/iBufferTest.cpp) +add_gtest(utilsTest runtime/utilsTest.cpp) add_gtest(worldConfigTest runtime/worldConfigTest.cpp) add_gtest(medusaModuleTest runtime/medusaModuleTest.cpp) add_gtest(mixtureOfExpertsTest kernels/mixtureOfExpertsTest.cu) diff --git a/cpp/tests/kernels/ropeTest.cu b/cpp/tests/kernels/ropeTest.cu index f2ca57eb4..36bc3478c 100644 --- a/cpp/tests/kernels/ropeTest.cu +++ b/cpp/tests/kernels/ropeTest.cu @@ -275,7 +275,8 @@ protected: std::shared_ptr mBufferManager; std::shared_ptr mStream; BufferManager::ITensorPtr cu_q_seqlens_tensor{nullptr}, cu_kv_seqlens_tensor{nullptr}, - padding_offset_tensor{nullptr}, fmha_tile_counter_ptr_tensor{nullptr}, rotary_inv_freq_buf_tensor{nullptr}; + padding_offset_tensor{nullptr}, encoder_padding_offset_tensor{nullptr}, fmha_tile_counter_ptr_tensor{nullptr}, + rotary_inv_freq_buf_tensor{nullptr}; std::mt19937 gen; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // initialize params coming from GPTAttentionPluginCommon @@ -364,9 +365,10 @@ protected: cu_q_seqlens_tensor = mBufferManager->pinned(ITensor::makeShape({cu_seqlens_size}), nvinfer1::DataType::kINT32); cu_kv_seqlens_tensor = mBufferManager->pinned(ITensor::makeShape({cu_seqlens_size}), nvinfer1::DataType::kINT32); - padding_offset_tensor = mBufferManager->pinned( - ITensor::makeShape({batch_size, (mCrossAttention ? cross_qkv_length : input_seq_length)}), - nvinfer1::DataType::kINT32); + padding_offset_tensor + = mBufferManager->pinned(ITensor::makeShape({batch_size, input_seq_length}), nvinfer1::DataType::kINT32); + encoder_padding_offset_tensor + = mBufferManager->pinned(ITensor::makeShape({batch_size, cross_qkv_length}), nvinfer1::DataType::kINT32); fmha_tile_counter_ptr_tensor = mBufferManager->pinned(ITensor::makeShape({mEnableContextFMHA ? 1 : 0}), nvinfer1::DataType::kINT32); rotary_inv_freq_buf_tensor = mBufferManager->pinned( @@ -470,6 +472,7 @@ protected: cu_q_seqlens = bufferCast(*(this->cu_q_seqlens_tensor)); int* cu_kv_seqlens = bufferCast(*(this->cu_kv_seqlens_tensor)); int* padding_offset = bufferCast(*(this->padding_offset_tensor)); + int* encoder_padding_offset = bufferCast(*(this->encoder_padding_offset_tensor)); uint32_t* fmha_tile_counter_ptr = bufferCast(*(this->fmha_tile_counter_ptr_tensor)); rotary_inv_freq_buf = bufferCast(*(this->rotary_inv_freq_buf_tensor)); @@ -478,12 +481,14 @@ protected: decoderParams.seqQOffsets = cu_q_seqlens; decoderParams.seqKVOffsets = cu_kv_seqlens; decoderParams.paddingOffsets = padding_offset; + decoderParams.encoderPaddingOffsets = mCrossAttention ? encoder_padding_offset : nullptr; decoderParams.attentionMask = mCrossAttention ? nullptr : attention_mask; // manually set for cross attn // Fixed sequence length offset if not removing the padding (cu_q_seqlens[ii] = ii * seq_length). - decoderParams.seqQLengths = mCrossAttention ? encoder_input_lengths : q_seq_lengths; + decoderParams.seqQLengths = q_seq_lengths; decoderParams.seqKVLengths = mCrossAttention ? encoder_input_lengths : kv_seq_lengths; decoderParams.batchSize = batch_size; - decoderParams.maxQSeqLength = mCrossAttention ? cross_qkv_length : input_seq_length; + decoderParams.maxQSeqLength = input_seq_length; + decoderParams.maxEncoderQSeqLength = mCrossAttention ? cross_qkv_length : 0; decoderParams.removePadding = mRemovePadding; decoderParams.attentionWindowSize = cyclic_attention_window_size; decoderParams.sinkTokenLength = sink_token_length; diff --git a/cpp/tests/layers/lookaheadAlgorithmTest.cpp b/cpp/tests/layers/lookaheadAlgorithmTest.cpp index d3075c8f5..fc70b2bff 100644 --- a/cpp/tests/layers/lookaheadAlgorithmTest.cpp +++ b/cpp/tests/layers/lookaheadAlgorithmTest.cpp @@ -41,9 +41,9 @@ bool verifyAcceptOffsets(TensorPtr output, TensorPtr accepted, TensorPtr accepte BufferRange acceptedRange(*accepted); BufferRange offsetsRange(*acceptedOffsets); bool result = true; - for (SizeType32 i = 0; i < acceptedRange.size(); i++) + for (SizeType32 i = 1; i < acceptedRange.size(); i++) { - result &= outputRange[offsetsRange[i]] == acceptedRange[i]; + result &= outputRange[offsetsRange[i - 1] + 1] == acceptedRange[i]; } return result; } diff --git a/cpp/tests/layers/lookaheadDecodingLayerTest.cpp b/cpp/tests/layers/lookaheadDecodingLayerTest.cpp index 745d36aae..c66735a1f 100644 --- a/cpp/tests/layers/lookaheadDecodingLayerTest.cpp +++ b/cpp/tests/layers/lookaheadDecodingLayerTest.cpp @@ -26,6 +26,7 @@ #include "tensorrt_llm/common/logger.h" #include "tensorrt_llm/executor/executor.h" #include "tensorrt_llm/kernels/samplingTopKKernels.h" +#include "tensorrt_llm/layers/decodingParams.h" #include "tensorrt_llm/layers/lookaheadDecodingLayer.h" #include "tensorrt_llm/layers/lookaheadDecodingUtils.h" #include "tensorrt_llm/runtime/common.h" @@ -220,7 +221,6 @@ class LookaheadDecodingLayerTest : public testing::Test TensorPtr mAlgoConfigBatch; - TensorPtr mFinished; TensorPtr mOutputIds; TensorPtr mSequenceLengths; TensorPtr mProbs; @@ -230,14 +230,19 @@ class LookaheadDecodingLayerTest : public testing::Test TensorPtr mBatchSlots; TensorPtr mBatchSlotsMax; + TensorPtr mNewTokens; TensorPtr mNumNewTokens; - TensorPtr mKNumNewTokensCumSum; + TensorPtr mNumNewTokensCumSum; TensorPtr mPathsOffsets; TensorPtr mDraftLengths; TensorPtr mDraftTokens; - TensorPtr mDraftPosIds; TensorPtr mPackedMasks; TensorPtr mPackedMasksBool; + TensorPtr mGenerationLengths; + TensorPtr mGenerationLengthsMax; + TensorPtr mPositionOffsets; + TensorPtr mPositionIds; + TensorPtr mAttentionPackedMask; TensorPtr mInputTokensBatch; TensorPtr mPositionIdsBatch; @@ -279,9 +284,10 @@ void LookaheadDecodingLayerTest::allocateBuffers() TLLM_LOG_TRACE("%s start", __PRETTY_FUNCTION__); auto const maxBatchSize = mTestParam.maxBatchSize; auto const vocabSize = mAscii->getVocabSize(); + auto const maxBeamSize = 1; - SizeType32 maxNumNewTokens, maxDraftLen; - std::tie(mMaxTokensPerStep, maxNumNewTokens, maxDraftLen, std::ignore) + SizeType32 maxNumNewTokens, maxDraftLen, maxAcceptedDraftLen; + std::tie(mMaxTokensPerStep, maxNumNewTokens, maxDraftLen, maxAcceptedDraftLen) = executor::LookaheadDecodingConfig(mTestParam.maxW, mTestParam.maxN, mTestParam.maxG) .calculateSpeculativeResource(); // mMaxTokensPerStep = maxTokensPerStep; @@ -348,12 +354,11 @@ void LookaheadDecodingLayerTest::allocateBuffers() mAlgoConfigBatch = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize, 3}), nvinfer1::DataType::kINT32); - mFinished = BufferManager::pinnedPool(maxBatchShape1D, TRTDataType::value); mEndIds = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); mTokensPerStep = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); mOutputIds = BufferManager::pinnedPool( - ITensor::makeShape({maxBatchSize, mMaxSeqLen + mMaxTokensPerStep}), nvinfer1::DataType::kINT32); + ITensor::makeShape({maxBatchSize, maxBeamSize, mMaxSeqLen + mMaxTokensPerStep}), nvinfer1::DataType::kINT32); mSequenceLengths = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); mProbs = BufferManager::pinnedPool( @@ -366,21 +371,27 @@ void LookaheadDecodingLayerTest::allocateBuffers() mPositionIdsBatch = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize, mMaxTokensPerStep}), nvinfer1::DataType::kINT32); + mNewTokens = BufferManager::pinnedPool( + ITensor::makeShape({mMaxTokensPerStep, maxBatchSize, 1}), nvinfer1::DataType::kINT32); mNumNewTokens = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); mDraftLengths = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); mDraftTokens = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize, maxDraftLen}), nvinfer1::DataType::kINT32); - mDraftPosIds - = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize, maxDraftLen}), nvinfer1::DataType::kINT32); - auto divUp32 = [](SizeType32 x) { return x / 32 + ((x % 32) ? 1 : 0); }; - mPackedMasks = BufferManager::pinnedPool( - ITensor::makeShape({maxBatchSize, mMaxTokensPerStep, divUp32(mMaxTokensPerStep)}), nvinfer1::DataType::kINT32); + auto packedMaskShape = ITensor::makeShape( + {maxBatchSize, mMaxTokensPerStep, static_cast(common::divUp(mMaxTokensPerStep, 32))}); + mPackedMasks = BufferManager::pinnedPool(packedMaskShape, nvinfer1::DataType::kINT32); mPackedMasksBool = BufferManager::pinnedPool( ITensor::makeShape({maxBatchSize, mMaxTokensPerStep, mMaxTokensPerStep}), nvinfer1::DataType::kBOOL); - mKNumNewTokensCumSum - = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize + 1}), nvinfer1::DataType::kINT32); - mPathsOffsets - = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize, maxNumNewTokens}), nvinfer1::DataType::kINT32); + mNumNewTokensCumSum = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize + 1}), nvinfer1::DataType::kINT32); + mPathsOffsets = BufferManager::pinnedPool( + ITensor::makeShape({maxBatchSize, maxAcceptedDraftLen}), nvinfer1::DataType::kINT32); + mGenerationLengths = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); + mGenerationLengthsMax = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); + mPositionOffsets + = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize, mMaxTokensPerStep}), nvinfer1::DataType::kINT32); + mPositionIds + = BufferManager::pinnedPool(ITensor::makeShape({maxBatchSize, mMaxTokensPerStep}), nvinfer1::DataType::kINT32); + mAttentionPackedMask = BufferManager::pinnedPool(packedMaskShape, nvinfer1::DataType::kINT32); mBatchSlotsMax = BufferManager::pinnedPool(maxBatchShape1D, nvinfer1::DataType::kINT32); @@ -390,11 +401,9 @@ void LookaheadDecodingLayerTest::allocateBuffers() mBatchSlots = ITensor::slice(mBatchSlotsMax, 0, batchSize); - trk::invokeFill(*mFinished, uint8_t{0}, *mStream); trk::invokeFill(*mEndIds, mAscii->getEndToken(), *mStream); trk::invokeFill(*mOutputIds, int32_t{0}, *mStream); trk::invokeFill(*mSequenceLengths, int32_t{0}, *mStream); - // trk::invokeFill(*mGeneratedLengths, int32_t{0}, *mStream); trk::invokeFill(*mTokensPerStep, mMaxTokensPerStep, *mStream); TLLM_LOG_TRACE("%s stop", __PRETTY_FUNCTION__); @@ -419,7 +428,7 @@ void LookaheadDecodingLayerTest::newRequests(std::vector requestIds) TokenIdType contextToken = mOracle[gbi][len]; SizeType32 contextLen = len + 1; - BufferRange outputRange(*ITensor::at(mOutputIds, {gbi})); + BufferRange outputRange(*ITensor::at(mOutputIds, {gbi, 0})); for (auto& v : outputRange) { v = 0; @@ -430,7 +439,7 @@ void LookaheadDecodingLayerTest::newRequests(std::vector requestIds) BufferLocation(*mDraftLengths).at(gbi) = 0; BufferLocation(*mNumNewTokens).at(gbi) = 0; - mPrompt[gbi] = ITensor::slice(mOutputIds, {gbi, 0}, len + 1); + mPrompt[gbi] = ITensor::slice(mOutputIds, {gbi, 0, 0}, len + 1); for (auto& v : BufferRange(*mHistogram[gbi])) { @@ -455,6 +464,11 @@ void LookaheadDecodingLayerTest::newRequests(std::vector requestIds) setupParams->prompt.emplace_back(mPrompt[gbi]); setupParams->algoConfigs.emplace_back(mTestParam.w, mTestParam.n, mTestParam.g); PRINT_TOKENS(setupParams->prompt[bi]); + setupParams->generationLengths = mGenerationLengthsMax; + setupParams->actualGenerationLengths = mGenerationLengths; + setupParams->positionOffsets = mPositionOffsets; + // setupParams->outputs.positionIds = mPositionIds; + setupParams->attentionPackedMasks = mPackedMasks; } std::vector seed(requestIds.begin(), requestIds.end()); setupParams->randomSeed = std::make_optional(seed); @@ -463,6 +477,8 @@ void LookaheadDecodingLayerTest::newRequests(std::vector requestIds) PRINT_VALUES(mBatchSlotsMax); mDecoder->setup(requestSize, beamSize, newRequestSlots, setupParams); + PRINT_VALUES(mPositionOffsets); + batchSize += requestIds.size(); mBatchSlots = ITensor::slice(mBatchSlotsMax, 0, batchSize); TLLM_LOG_DEBUG("newwRequests mBatchSlots %s", D(mBatchSlots).values().c_str()); @@ -493,7 +509,7 @@ void LookaheadDecodingLayerTest::manageBatch(void) SizeType32 gbi = batchSlotsRange[bi]; SizeType32 nbi = newBatchSize; - TensorPtr theSequence = ITensor::at(mOutputIds, {gbi}); + TensorPtr theSequence = ITensor::at(mOutputIds, {gbi, 0}); BufferRange theSequenceRange(*theSequence); auto theSequenceLength = BufferRange(*mSequenceLengths)[gbi]; auto theNumNewTokens = BufferRange(*mNumNewTokens)[gbi]; @@ -520,19 +536,16 @@ void LookaheadDecodingLayerTest::manageBatch(void) } auto theDraftLen = BufferRange(*mDraftLengths)[gbi]; - BufferLocation(*mTokensPerStep).at(gbi) = 1 + theDraftLen; + auto theGenerationLength = BufferRange(*mGenerationLengths)[gbi]; + TLLM_CHECK_DEBUG_WITH_INFO( + theDraftLen + 1 == theGenerationLength, "%d + 1 == %d", theDraftLen, theGenerationLength); + BufferLocation(*mTokensPerStep).at(gbi) = theGenerationLength; - BufferLocation(*mPositionIdsBatch).at(nbi, 0) = theSequenceLength - 1; BufferLocation(*mInputTokensBatch).at(nbi, 0) = theSequenceRange[theSequenceLength - 1]; - - TLLM_LOG_DEBUG("W=%d, N=%d, G=%d, w=%d, n=%d, g=%d, draftLen = %d", mTestParam.maxW, mTestParam.maxN, - mTestParam.maxG, mTestParam.w, mTestParam.n, mTestParam.g, theDraftLen); - PRINT_VALUES(mInputTokensBatch); - mBufferManager->copy(*ITensor::slice(mDraftTokens, {gbi, 0}, theDraftLen), *ITensor::slice(mInputTokensBatch, {nbi, 1}, theDraftLen)); - mBufferManager->copy(*ITensor::slice(mDraftPosIds, {gbi, 0}, theDraftLen), - *ITensor::slice(mPositionIdsBatch, {nbi, 1}, theDraftLen)); + mBufferManager->copy(*ITensor::slice(mPositionIds, {gbi, 0}), *ITensor::slice(mPositionIdsBatch, {nbi, 0})); + BufferLocation(*mPositionIdsBatch).at(nbi, 0) = theSequenceLength - 1; TLLM_LOG_DEBUG("W=%d, N=%d, G=%d, w=%d, n=%d, g=%d, draftLen = %d", mTestParam.maxW, mTestParam.maxN, mTestParam.maxG, mTestParam.w, mTestParam.n, mTestParam.g, theDraftLen); @@ -599,16 +612,38 @@ void LookaheadDecodingLayerTest::llmForward(void) for (SizeType32 bi = 0; bi < batchSize; bi++) { auto gbi = BufferRange(*mBatchSlots)[bi]; + auto start = BufferRange(*mSequenceLengths)[gbi] - 1; auto len = BufferRange(*mTokensPerStep)[gbi]; + TLLM_LOG_DEBUG("LookaheadDecodingLayerTest::llmForward input len=%d", len); TensorPtr output = ITensor::slice(mProbs, {bi, 0}, len); TensorPtr golden = ITensor::slice(mGoldenSampledTokens, {gbi, 0}, len); - convertInt32ToBool(ITensor::at(mPackedMasksBool, {gbi}), ITensor::at(mPackedMasks, {gbi})); + BufferRange idRange(*ITensor::slice(mPositionIdsBatch, {bi, 0}, len)); + BufferRange offsetRange(*ITensor::slice(mPositionOffsets, {gbi, 0}, len)); + PRINT_VALUES(ITensor::slice(mPositionIdsBatch, {bi, 0})); + PRINT_VALUES(ITensor::slice(mPositionOffsets, {bi, 0})); + for (auto i = 0; i < idRange.size(); i++) + { + TLLM_CHECK(idRange[i] == start + offsetRange[i]); + } - mLlm[gbi]->forward(output, // - ITensor::slice(mInputTokensBatch, {bi, 0}, len), // - ITensor::slice(mPositionIdsBatch, {bi, 0}, len), // - ITensor::at(mPackedMasksBool, {gbi})); + if (false) + { + convertInt32ToBool(ITensor::at(mPackedMasksBool, {gbi}), ITensor::at(mPackedMasks, {gbi})); + mLlm[gbi]->forward(output, // + ITensor::slice(mInputTokensBatch, {bi, 0}, len), // + ITensor::slice(mPositionIdsBatch, {bi, 0}, len), // + ITensor::at(mPackedMasksBool, {gbi})); + } + else + { + convertInt32ToBool(ITensor::at(mPackedMasksBool, {gbi}), ITensor::at(mPackedMasks, {gbi})); + mLlm[gbi]->forward(output, // + start, // + ITensor::slice(mInputTokensBatch, {bi, 0}, len), // + ITensor::slice(mPositionOffsets, {gbi, 0}, len), // + ITensor::at(mPackedMasksBool, {gbi})); + } mAscii->logitsToTensor(golden, output); TLLM_LOG_DEBUG("batch[%d] LLM golden: '%s'", gbi, D(golden).tokens().c_str()); @@ -627,21 +662,25 @@ void LookaheadDecodingLayerTest::decodeForward(void) auto inputParams = std::make_shared(mEndIds, mBatchSlots); inputParams->localBatchSize = batchSize; inputParams->logits = ITensor::slice(mProbs, 0, batchSize); - inputParams->finished = mFinished; // TODO(liweim) ask finished protocol + inputParams->batchSlots = mBatchSlots; inputParams->curTokensPerStep = mTokensPerStep; - auto outputParams = std::make_shared(mOutputIds); + auto outputParams = std::make_shared(mOutputIds); PRINT_VALUES(mSequenceLengths); outputParams->sequenceLength = mSequenceLengths; - outputParams->finished = mFinished; outputParams->nextDraftLengths = mDraftLengths; outputParams->nextDraftTokens = mDraftTokens; - outputParams->nextDraftPosIds = mDraftPosIds; outputParams->packedMasks = mPackedMasks; outputParams->numNewTokens = mNumNewTokens; - outputParams->numNewTokensCumSum = mKNumNewTokensCumSum; + outputParams->newTokens = mNewTokens; + outputParams->numNewTokensCumSum = mNumNewTokensCumSum; outputParams->pathsOffsets = mPathsOffsets; + outputParams->generationLengths = mGenerationLengthsMax; + outputParams->actualGenerationLengths = mGenerationLengths; + outputParams->positionOffsets = mPositionOffsets; + outputParams->positionIds = mPositionIds; + outputParams->packedMasks = mPackedMasks; PRINT_VALUES(mTokensPerStep); @@ -663,28 +702,42 @@ void LookaheadDecodingLayerTest::verifyDecode(void) { auto gbi = BufferRange(*mBatchSlots)[bi]; auto len = BufferRange(*mTokensPerStep)[gbi]; - TensorPtr golden = ITensor::slice(mGoldenSampledTokens, {gbi, 0}, len); auto sequenceLength = BufferLocation(*mSequenceLengths).at(gbi); - auto numNewTokens = BufferLocation(*mNumNewTokens).at(gbi); - TensorPtr newTokens = ITensor::slice(mOutputIds, {gbi, sequenceLength - numNewTokens}, numNewTokens); - TensorPtr pathOffsets = ITensor::slice(mPathsOffsets, {gbi, 0}, numNewTokens); - BufferRange goldenRange(*golden); - BufferRange newTokensRange(*newTokens); - BufferRange offsetsRange(*pathOffsets); - for (SizeType32 i = 0; i < newTokensRange.size(); i++) + auto draftLength = BufferLocation(*mDraftLengths).at(gbi); + auto generationLength = BufferLocation(*mGenerationLengths).at(gbi); + BufferRange posOffsetRange(*ITensor::slice(mPositionOffsets, {gbi, 0}, generationLength)); + BufferRange posIdRange(*ITensor::slice(mPositionIds, {gbi, 0}, generationLength)); + TLLM_LOG_DEBUG("generationLength = %d, draftLength = %d", generationLength, draftLength); + TLLM_CHECK(draftLength + 1 == generationLength); + TLLM_CHECK(posOffsetRange[0] == 0); + TLLM_CHECK(posIdRange[0] == sequenceLength - 1); + for (SizeType32 i = 0; i < posIdRange.size(); i++) { - TLLM_CHECK(goldenRange[offsetsRange[i]] == newTokensRange[i]); + TLLM_CHECK(posIdRange[i] == posOffsetRange[i] + sequenceLength - 1); } } - BufferRange cumSumRange(*mKNumNewTokensCumSum); - SizeType32 sum = 0; - TLLM_CHECK(cumSumRange[0] == sum); + + BufferRange cumSumRange(*mNumNewTokensCumSum); + BufferRange pathOffsetsRange(*mPathsOffsets); + PRINT_VALUES(mNumNewTokensCumSum); for (SizeType32 gbi = 0; gbi < mTestParam.maxBatchSize; gbi++) { + SizeType32 pathOffsetBegin = cumSumRange[gbi]; + SizeType32 pathOffsetEnd = cumSumRange[gbi + 1]; + TensorPtr golden = ITensor::at(mGoldenSampledTokens, {gbi}); + auto sequenceLength = BufferLocation(*mSequenceLengths).at(gbi); auto numNewTokens = BufferLocation(*mNumNewTokens).at(gbi); - sum += numNewTokens; - TLLM_CHECK(cumSumRange[gbi + 1] == sum); + TensorPtr newTokens = ITensor::slice(mOutputIds, {gbi, 0, sequenceLength - numNewTokens}, numNewTokens); + BufferRange goldenRange(*ITensor::at(mGoldenSampledTokens, {gbi})); + BufferRange newTokensRange( + *ITensor::slice(mOutputIds, {gbi, 0, sequenceLength - numNewTokens}, numNewTokens)); + + SizeType32 ni = 1; + for (SizeType32 poi = pathOffsetBegin; poi < pathOffsetEnd; poi++) + { + TLLM_CHECK(goldenRange[pathOffsetsRange[poi] + 1] == newTokensRange[ni++]); + } } } diff --git a/cpp/tests/layers/randomLlm.cpp b/cpp/tests/layers/randomLlm.cpp index f644ae797..2116186a6 100644 --- a/cpp/tests/layers/randomLlm.cpp +++ b/cpp/tests/layers/randomLlm.cpp @@ -206,6 +206,19 @@ bool RandomLlm::verify(SizeType32 const offset, TensorConstPtr const& script) co return result; } +void RandomLlm::forward(TensorPtr const& output, runtime::SizeType32 startId, TensorConstPtr const& input, + TensorConstPtr const& offsets, TensorConstPtr const mask) const +{ + TensorPtr posIds = BufferManager::cpu(input->getShape(), nvinfer1::DataType::kINT32); + BufferRange idRange(*posIds); + BufferRange offsetRange(*offsets); + for (auto i = 0; i < idRange.size(); i++) + { + idRange[i] = startId + offsetRange[i]; + } + forward(output, input, posIds, mask); +} + void RandomLlm::forward(TensorPtr const& output, TensorConstPtr const& input, TensorConstPtr const& position, TensorConstPtr const mask) const { diff --git a/cpp/tests/layers/randomLlm.h b/cpp/tests/layers/randomLlm.h index 191aa7d1e..a6e898bae 100644 --- a/cpp/tests/layers/randomLlm.h +++ b/cpp/tests/layers/randomLlm.h @@ -109,6 +109,8 @@ class RandomLlm } // simulate forward in a LLM. + void forward(TensorPtr const& output, runtime::SizeType32 startId, TensorConstPtr const& input, + TensorConstPtr const& offsets, TensorConstPtr const mask = nullptr) const; void forward(TensorPtr const& output, TensorConstPtr const& input, TensorConstPtr const& position, TensorConstPtr const mask = nullptr) const; //! set inout[i] invalid if mask[i]==false; diff --git a/cpp/tests/resources/data/test_model_lora_config.json b/cpp/tests/resources/data/test_model_lora_config.json index 18affde58..73a598d01 100644 --- a/cpp/tests/resources/data/test_model_lora_config.json +++ b/cpp/tests/resources/data/test_model_lora_config.json @@ -160,7 +160,6 @@ "streamingllm": false }, "use_strip_plan": false, - "max_encoder_input_len": 1024, - "use_fused_mlp": false + "max_encoder_input_len": 1024 } } diff --git a/cpp/tests/resources/scripts/build_chatglm_engines.py b/cpp/tests/resources/scripts/build_chatglm_engines.py index cabe6107c..39829f25c 100644 --- a/cpp/tests/resources/scripts/build_chatglm_engines.py +++ b/cpp/tests/resources/scripts/build_chatglm_engines.py @@ -132,7 +132,7 @@ def build_engines(model_cache: typing.Optional[str] = None, model_spec_obj = model_spec.ModelSpec('input_tokens.npy', _tb.DataType.HALF) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) model_spec_obj.use_gpt_plugin() engine_dir = Path( model_dir @@ -142,7 +142,7 @@ def build_engines(model_cache: typing.Optional[str] = None, build_engine(ckpt_dir, engine_dir, False, is_chatglm_6b_or_glm_10b) model_spec_obj.use_packed_input() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) engine_dir = Path( model_dir ) / "rt_engine" / model_name / model_spec_obj.get_model_path( diff --git a/cpp/tests/resources/scripts/build_engines_utils.py b/cpp/tests/resources/scripts/build_engines_utils.py index 46fcce51b..2c57e5a3d 100644 --- a/cpp/tests/resources/scripts/build_engines_utils.py +++ b/cpp/tests/resources/scripts/build_engines_utils.py @@ -65,10 +65,17 @@ def wincopy(source: str, dest: str, isdir: bool, cwd=None) -> None: # Helper function to locate model_spec module. -def init_model_spec_module(): +def init_model_spec_module(force_init_trtllm_bindings=True): import os + # model spec depends on tensorrt_llm bindings. This will trigger initialization of bindings. # Rely on unique built model_spec to locate the module. + if force_init_trtllm_bindings: + import tensorrt_llm.bindings as _tb + + # Ensure the KVCacheType enum is available. + assert _tb.KVCacheType('PAGED') is not None + cpp_root_dir = _pl.Path(__file__).parent.resolve().parent.parent.parent found_locations = [] diff --git a/cpp/tests/resources/scripts/build_gpt_engines.py b/cpp/tests/resources/scripts/build_gpt_engines.py index 02bfa0266..6cdf1c7bf 100755 --- a/cpp/tests/resources/scripts/build_gpt_engines.py +++ b/cpp/tests/resources/scripts/build_gpt_engines.py @@ -202,23 +202,21 @@ def build_engines(model_cache: Optional[str] = None, no_kv_cache_args = ['--kv_cache_type=disabled'] def get_ifb_args(kv_cache_type): - if kv_cache_type == model_spec.KVCacheType.DISABLED: + if kv_cache_type == _tb.KVCacheType.DISABLED: return ifb_base_args + no_kv_cache_args - elif kv_cache_type == model_spec.KVCacheType.PAGED: + elif kv_cache_type == _tb.KVCacheType.PAGED: return ifb_base_args + paged_kv_cache_args else: assert False, f"Unsupported kv_cache_type: {kv_cache_type}" model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() model_spec_current = model_spec_obj.__copy__() - for kv_cache_type in [ - model_spec.KVCacheType.DISABLED, model_spec.KVCacheType.PAGED - ]: + for kv_cache_type in [_tb.KVCacheType.DISABLED, _tb.KVCacheType.PAGED]: model_spec_current.set_kv_cache_type(kv_cache_type) build_engine( str(fp16_ckpt_dir), @@ -235,7 +233,7 @@ def get_ifb_args(kv_cache_type): str(engine_dir / model_spec_current.get_model_path() / tp_pp_dir), f'--max_draft_len={max_draft_tokens}', '--speculative_decoding_mode=draft_tokens_external', - *get_ifb_args(model_spec.KVCacheType.PAGED)) + *get_ifb_args(_tb.KVCacheType.PAGED)) model_spec_current = model_spec_obj.__copy__() model_spec_current.use_multiple_profiles() @@ -243,8 +241,7 @@ def get_ifb_args(kv_cache_type): build_engine( str(fp16_ckpt_dir), str(engine_dir / model_spec_current.get_model_path() / tp_pp_dir), - '--multiple_profiles=enable', - *get_ifb_args(model_spec.KVCacheType.PAGED)) + '--multiple_profiles=enable', *get_ifb_args(_tb.KVCacheType.PAGED)) model_spec_current = model_spec_obj.__copy__() max_input_len = 128 @@ -253,7 +250,7 @@ def get_ifb_args(kv_cache_type): build_engine(str(fp16_ckpt_dir), str(engine_dir / model_spec_current.get_model_path() / tp_pp_dir), - *get_ifb_args(model_spec.KVCacheType.PAGED), + *get_ifb_args(_tb.KVCacheType.PAGED), max_input_len=max_input_len) # Build the target model with return accepted token logits @@ -270,8 +267,7 @@ def get_ifb_args(kv_cache_type): str(engine_dir / model_spec_current.get_model_path() / tp_pp_dir), f'--max_draft_len={max_draft_len}', '--speculative_decoding_mode=draft_tokens_external', - '--gather_generation_logits', - *get_ifb_args(model_spec.KVCacheType.PAGED)) + '--gather_generation_logits', *get_ifb_args(_tb.KVCacheType.PAGED)) # We build almost the same engine twice. But this engine has gather_all_token_logits # to extract logits from python runtime and uses context FMHA for generation to match draft model executions, @@ -283,19 +279,7 @@ def get_ifb_args(kv_cache_type): build_engine( str(fp16_ckpt_dir), str(engine_dir / model_spec_current.get_model_path() / tp_pp_dir), - '--gather_all_token_logits', - *get_ifb_args(model_spec.KVCacheType.PAGED)) - - model_spec_current = model_spec_obj.__copy__() - model_spec_current.use_look_ahead_decoding() - max_draft_len = 64 - model_spec_current.set_draft_tokens(max_draft_len) - build_engine( - str(fp16_ckpt_dir), - str(engine_dir / model_spec_current.get_model_path() / tp_pp_dir), - f'--max_draft_len={max_draft_len}', - '--speculative_decoding_mode=lookahead_decoding', - *get_ifb_args(model_spec.KVCacheType.PAGED)) + '--gather_all_token_logits', *get_ifb_args(_tb.KVCacheType.PAGED)) # build engine with lora enabled model_spec_current = model_spec_obj.__copy__() @@ -304,7 +288,7 @@ def get_ifb_args(kv_cache_type): str(fp16_ckpt_dir), str(engine_dir / model_spec_current.get_model_path() / tp_pp_dir), "--lora_target_modules=attn_qkv", '--lora_plugin=float16', - *get_ifb_args(model_spec.KVCacheType.PAGED)) + *get_ifb_args(_tb.KVCacheType.PAGED)) if model_cache: llm_datasets_root = Path(model_cache) / "datasets" @@ -326,9 +310,7 @@ def get_ifb_args(kv_cache_type): model_spec_current.use_packed_input() model_spec_current.set_quant_method(model_spec.QuantMethod.SMOOTH_QUANT) - for kv_cache_type in [ - model_spec.KVCacheType.DISABLED, model_spec.KVCacheType.PAGED - ]: + for kv_cache_type in [_tb.KVCacheType.DISABLED, _tb.KVCacheType.PAGED]: model_spec_current.set_kv_cache_type(kv_cache_type) build_engine( str(fp16_sq_ckpt_dir), diff --git a/cpp/tests/resources/scripts/build_gptj_engines.py b/cpp/tests/resources/scripts/build_gptj_engines.py index 96410da88..65d1a15dd 100755 --- a/cpp/tests/resources/scripts/build_gptj_engines.py +++ b/cpp/tests/resources/scripts/build_gptj_engines.py @@ -133,7 +133,7 @@ def build_engines(model_cache: _tp.Optional[str] = None, only_fp8=False): get_ckpt_with_modelopt_quant(hf_dir, fp8_ckpt_path, model_cache) model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.FP8) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() build_engine(fp8_ckpt_path, engine_dir / model_spec_obj.get_model_path() / tp_pp_dir, @@ -146,7 +146,7 @@ def build_engines(model_cache: _tp.Optional[str] = None, only_fp8=False): print("\nBuilding fp16-plugin engine") model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) build_engine(fp16_ckpt_path, engine_dir / model_spec_obj.get_model_path() / tp_pp_dir, @@ -163,7 +163,7 @@ def build_engines(model_cache: _tp.Optional[str] = None, only_fp8=False): '--remove_input_padding=enable', "--context_fmha=disable") print("\nBuilding fp16-plugin-packed-paged engine") - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) build_engine(fp16_ckpt_path, engine_dir / model_spec_obj.get_model_path() / tp_pp_dir, '--gpt_attention_plugin=float16', diff --git a/cpp/tests/resources/scripts/build_llama_engines.py b/cpp/tests/resources/scripts/build_llama_engines.py index 48f4b2c21..c9a5c8494 100644 --- a/cpp/tests/resources/scripts/build_llama_engines.py +++ b/cpp/tests/resources/scripts/build_llama_engines.py @@ -27,17 +27,18 @@ import tensorrt_llm.bindings as _tb -def build_engine(weight_dir: _pl.Path, engine_dir: _pl.Path, *args): +def build_engine(weight_dir: _pl.Path, engine_dir: _pl.Path, convert_extra_args, + build_extra_args): ckpt_dir = engine_dir / 'ckpt' - covert_cmd = [_sys.executable, "examples/llama/convert_checkpoint.py" - ] + ([f'--model_dir={weight_dir}'] if weight_dir else []) + [ - f'--output_dir={ckpt_dir}', - '--dtype=float16', - ] + list(args) + convert_cmd = [_sys.executable, "examples/llama/convert_checkpoint.py" + ] + ([f'--model_dir={weight_dir}'] if weight_dir else []) + [ + f'--output_dir={ckpt_dir}', + '--dtype=float16', + ] + convert_extra_args - run_command(covert_cmd) + run_command(convert_cmd) build_args = [ 'trtllm-build', @@ -52,7 +53,7 @@ def build_engine(weight_dir: _pl.Path, engine_dir: _pl.Path, *args): '--log_level=error', '--paged_kv_cache=enable', '--remove_input_padding=enable', - ] + ] + build_extra_args run_command(build_args) @@ -83,7 +84,7 @@ def build_engines(model_cache: str, only_multi_gpu: bool): model_spec_obj = model_spec.ModelSpec('input_tokens.npy', _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() tp_pp_sizes = [(1, 1)] @@ -97,7 +98,16 @@ def build_engines(model_cache: str, only_multi_gpu: bool): build_engine(hf_dir, engine_dir / model_spec_obj.get_model_path() / tp_pp_dir, - f'--tp_size={tp_size}', f'--pp_size={pp_size}') + [f'--tp_size={tp_size}', f'--pp_size={pp_size}'], []) + + ## build lookahead engine + model_spec_obj.use_lookahead_decoding() + build_engine(hf_dir, + engine_dir / model_spec_obj.get_model_path() / 'tp1-pp1-gpu', + [], [ + '--max_draft_len=39', + '--speculative_decoding_mode=lookahead_decoding' + ]) print("Done.") diff --git a/cpp/tests/resources/scripts/build_mamba_engines.py b/cpp/tests/resources/scripts/build_mamba_engines.py index b920ab96f..a3a7fed18 100644 --- a/cpp/tests/resources/scripts/build_mamba_engines.py +++ b/cpp/tests/resources/scripts/build_mamba_engines.py @@ -112,7 +112,7 @@ def build_engines(model_cache: _tp.Optional[str] = None): ckpt_dir = models_dir / 'rt_ckpt' / model_name engine_dir = models_dir / 'rt_engine' / model_name model_spec_obj = model_spec.ModelSpec('input_tokens.npy', _tb.DataType.HALF) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) model_spec_obj.use_tensor_parallelism(tp_size) model_spec_obj.use_pipeline_parallelism(pp_size) @@ -132,7 +132,7 @@ def build_engines(model_cache: _tp.Optional[str] = None): engine_dir / model_spec_obj.get_model_path() / tp_pp_dir, '--remove_input_padding=enable', '--paged_state=disable') print("\nBuilding fp16-plugin-packed-paged engine") - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) build_engine(hf_dir, ckpt_dir / model_spec_obj.get_model_path() / tp_pp_dir, engine_dir / model_spec_obj.get_model_path() / tp_pp_dir, '--remove_input_padding=enable', '--paged_state=enable') diff --git a/cpp/tests/resources/scripts/build_medusa_engines.py b/cpp/tests/resources/scripts/build_medusa_engines.py index b6f52c30b..401f7ac0d 100755 --- a/cpp/tests/resources/scripts/build_medusa_engines.py +++ b/cpp/tests/resources/scripts/build_medusa_engines.py @@ -92,7 +92,7 @@ def build_engines(model_cache: str): model_spec_obj = model_spec.ModelSpec('input_tokens.npy', _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() model_spec_obj.use_medusa() diff --git a/cpp/tests/resources/scripts/build_recurrentgemma_engines.py b/cpp/tests/resources/scripts/build_recurrentgemma_engines.py index d912455dc..495c849d2 100644 --- a/cpp/tests/resources/scripts/build_recurrentgemma_engines.py +++ b/cpp/tests/resources/scripts/build_recurrentgemma_engines.py @@ -114,7 +114,7 @@ def build_engines(model_cache: _tp.Optional[str] = None): model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() model_spec_obj.use_packed_input() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) print("\nBuilding fp16-plugin-packed-paged engine") build_engine(hf_dir, ckpt_dir / model_spec_obj.get_model_path() / tp_pp_dir, diff --git a/cpp/tests/resources/scripts/generate_expected_chatglm_output.py b/cpp/tests/resources/scripts/generate_expected_chatglm_output.py index 000e1e7b1..d90fa1877 100755 --- a/cpp/tests/resources/scripts/generate_expected_chatglm_output.py +++ b/cpp/tests/resources/scripts/generate_expected_chatglm_output.py @@ -55,9 +55,9 @@ def generate_output( model_spec_obj_list = [ model_spec.ModelSpec( input_file, _tb.DataType.HALF).use_gpt_plugin().set_kv_cache_type( - model_spec.KVCacheType.CONTINUOUS), + _tb.KVCacheType.CONTINUOUS), model_spec.ModelSpec(input_file, _tb.DataType.HALF).use_gpt_plugin(). - use_packed_input().set_kv_cache_type(model_spec.KVCacheType.PAGED), + use_packed_input().set_kv_cache_type(_tb.KVCacheType.PAGED), ] for model_spec_obj in model_spec_obj_list: diff --git a/cpp/tests/resources/scripts/generate_expected_gpt_output.py b/cpp/tests/resources/scripts/generate_expected_gpt_output.py index 020d70a9f..4037a236f 100755 --- a/cpp/tests/resources/scripts/generate_expected_gpt_output.py +++ b/cpp/tests/resources/scripts/generate_expected_gpt_output.py @@ -112,7 +112,7 @@ def generate_outputs(num_beams): print('Generating GPT2 FP32 outputs') model_spec_obj = model_spec.ModelSpec(input_name, _tb.DataType.FLOAT) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) if num_beams == 1: generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, @@ -126,7 +126,7 @@ def generate_outputs(num_beams): print('Generating GPT2 FP16 outputs') model_spec_obj = model_spec.ModelSpec(input_name, _tb.DataType.HALF) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) if num_beams == 1: generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, @@ -142,7 +142,7 @@ def generate_outputs(num_beams): num_beams=num_beams, input_name=input_name, model_spec_obj=model_spec_obj) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.gather_logits() generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, @@ -163,7 +163,7 @@ def generate_outputs(num_beams): model_spec_obj = model_spec.ModelSpec(input_name, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, @@ -183,7 +183,7 @@ def generate_outputs(num_beams): model_spec_obj = model_spec.ModelSpec(input_name_long, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() model_spec_obj.use_packed_input() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, input_name=input_name_long, @@ -193,7 +193,7 @@ def generate_outputs(num_beams): model_spec_obj = model_spec.ModelSpec(input_name, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() model_spec_obj.use_packed_input() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.set_quant_method(model_spec.QuantMethod.SMOOTH_QUANT) generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, diff --git a/cpp/tests/resources/scripts/generate_expected_gptj_output.py b/cpp/tests/resources/scripts/generate_expected_gptj_output.py index b18291535..e10c8e42f 100755 --- a/cpp/tests/resources/scripts/generate_expected_gptj_output.py +++ b/cpp/tests/resources/scripts/generate_expected_gptj_output.py @@ -70,7 +70,7 @@ def generate_outputs(only_fp8, num_beams): if only_fp8 and num_beams == 1: model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.FP8) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() print('Generating GPT-J FP8-kv-cache outputs') @@ -81,7 +81,7 @@ def generate_outputs(only_fp8, num_beams): print('Generating GPT-J FP16 outputs') model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, model_spec_obj=model_spec_obj) @@ -91,7 +91,7 @@ def generate_outputs(only_fp8, num_beams): num_beams=num_beams, model_spec_obj=model_spec_obj) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, model_spec_obj=model_spec_obj) diff --git a/cpp/tests/resources/scripts/generate_expected_llama_output.py b/cpp/tests/resources/scripts/generate_expected_llama_output.py index d0e51faba..08d904201 100644 --- a/cpp/tests/resources/scripts/generate_expected_llama_output.py +++ b/cpp/tests/resources/scripts/generate_expected_llama_output.py @@ -79,7 +79,7 @@ def generate_outputs(num_beams, only_multi_gpu=False): ) model_spec_obj = model_spec.ModelSpec('input_tokens.npy', _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() for tp_size, pp_size in tp_pp_sizes: diff --git a/cpp/tests/resources/scripts/generate_expected_mamba_output.py b/cpp/tests/resources/scripts/generate_expected_mamba_output.py index cdfeb2151..080da0ade 100644 --- a/cpp/tests/resources/scripts/generate_expected_mamba_output.py +++ b/cpp/tests/resources/scripts/generate_expected_mamba_output.py @@ -74,7 +74,7 @@ def generate_outputs(num_beams): print('Generating Mamba FP16 outputs') input_name = 'input_tokens.npy' model_spec_obj = model_spec.ModelSpec(input_name, _tb.DataType.HALF) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, @@ -96,7 +96,7 @@ def generate_outputs(num_beams): model_spec_obj=model_spec_obj) print('Generating Mamba FP16-plugin-packed-paged outputs') - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) generate_output(engine=model_spec_obj.get_model_path(), num_beams=num_beams, input_name=input_name, diff --git a/cpp/tests/resources/scripts/generate_expected_medusa_output.py b/cpp/tests/resources/scripts/generate_expected_medusa_output.py index 4703edd9d..e87a6ab57 100755 --- a/cpp/tests/resources/scripts/generate_expected_medusa_output.py +++ b/cpp/tests/resources/scripts/generate_expected_medusa_output.py @@ -68,7 +68,7 @@ def generate_outputs(): model_spec_obj.use_gpt_plugin() model_spec_obj.set_max_output_length(max_output_len) model_spec_obj.use_packed_input() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_medusa() generate_output(engine=model_spec_obj.get_model_path(), diff --git a/cpp/tests/resources/scripts/generate_expected_recurrentgemma_output.py b/cpp/tests/resources/scripts/generate_expected_recurrentgemma_output.py index 66fdc6b74..9fa89fc32 100644 --- a/cpp/tests/resources/scripts/generate_expected_recurrentgemma_output.py +++ b/cpp/tests/resources/scripts/generate_expected_recurrentgemma_output.py @@ -74,7 +74,7 @@ def generate_outputs(num_beams): input_file = 'input_tokens.npy' model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.HALF) model_spec_obj.use_gpt_plugin() - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_packed_input() print('Generating RecurrentGemma FP16-plugin-packed-paged outputs') diff --git a/cpp/tests/resources/scripts/test_cpp.py b/cpp/tests/resources/scripts/test_cpp.py index bb02e5468..6b3a50d61 100755 --- a/cpp/tests/resources/scripts/test_cpp.py +++ b/cpp/tests/resources/scripts/test_cpp.py @@ -406,6 +406,9 @@ def prepare_model_tests(model_name: str, python_exe, str(scripts_dir / f"build_{model_name}_engines.py") ] + model_cache_arg + only_fp8_arg + only_multi_gpu_arg + enc_dec_model_name_arg + + if model_name in ['gpt']: + build_engines += ['--clean'] run_command(build_engines, cwd=root_dir, env=model_env, timeout=1800) model_env["PYTHONPATH"] = "examples" @@ -415,6 +418,10 @@ def prepare_model_tests(model_name: str, ] + only_fp8_arg + only_multi_gpu_arg + enc_dec_model_name_arg if "enc_dec" in model_name: generate_expected_output += model_cache_arg + + if model_name in ['gpt']: + generate_expected_output += ['--clean'] + if only_multi_gpu_arg and model_name != 'enc_dec': for world_size in (2, 4): generate_command = [ @@ -543,6 +550,16 @@ def run_multi_gpu_tests(build_dir: _pl.Path, timeout=1500): ] run_command(mpi_utils_test, cwd=tests_dir, env=cpp_env, timeout=300) + # Cache transceiver tests + cache_trans_test = [ + "mpirun", + "-n", + "2", + "--allow-run-as-root", + "batch_manager/cacheTransceiverTest", + ] + run_command(cache_trans_test, cwd=tests_dir, env=cpp_env, timeout=300) + xml_output_file = build_dir / "results-multi-gpu-real-decoder.xml" trt_model_test = produce_mpirun_command( global_commands=["mpirun", "--allow-run-as-root"], @@ -653,7 +670,7 @@ def run_benchmarks(model_name: str, python_exe: str, root_dir: _pl.Path, if model_name == "gpt": input_file = 'input_tokens.npy' model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.HALF) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.CONTINUOUS) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.CONTINUOUS) model_spec_obj.use_gpt_plugin() model_engine_path = model_engine_dir / model_spec_obj.get_model_path( ) / "tp1-pp1-gpu" @@ -694,7 +711,7 @@ def run_benchmarks(model_name: str, python_exe: str, root_dir: _pl.Path, if model_name == "gpt": input_file = 'input_tokens.npy' model_spec_obj = model_spec.ModelSpec(input_file, _tb.DataType.HALF) - model_spec_obj.set_kv_cache_type(model_spec.KVCacheType.PAGED) + model_spec_obj.set_kv_cache_type(_tb.KVCacheType.PAGED) model_spec_obj.use_gpt_plugin() model_spec_obj.use_packed_input() model_engine_path = model_engine_dir / model_spec_obj.get_model_path( @@ -887,7 +904,7 @@ def run_benchmarks(model_name: str, python_exe: str, root_dir: _pl.Path, from build_engines_utils import init_model_spec_module - init_model_spec_module() + init_model_spec_module(force_init_trtllm_bindings=False) if test_args.run_all_models: test_args.run_gpt = True diff --git a/cpp/tests/runtime/gptDecoderTest.cpp b/cpp/tests/runtime/gptDecoderTest.cpp index e76eebb47..36ed4a4d2 100644 --- a/cpp/tests/runtime/gptDecoderTest.cpp +++ b/cpp/tests/runtime/gptDecoderTest.cpp @@ -39,7 +39,7 @@ bool forwardAndSync(std::unique_ptr const& decoder, DecodingOutput& BufferManager::ITensorPtr finishedSum; std::int32_t* finishedSumHost = nullptr; - if (input.sequenceLimitLength && output.finished) + if (input.sequenceLimitLength && output.finishReasons) { finishedSumHost = bufferCast(*output.finishedSum); for (SizeType32 bi = 0; bi < maxBatchSize; ++bi) @@ -52,7 +52,7 @@ bool forwardAndSync(std::unique_ptr const& decoder, DecodingOutput& if (finishedSumHost) { - auto const numToFinish = output.finished->getSize(); + auto const numToFinish = output.finishReasons->getSize(); TLLM_CUDA_CHECK(::cudaStreamSynchronize(stream->get())); SizeType32 finishedSum = 0; @@ -149,10 +149,10 @@ void testDecoder(nvinfer1::DataType const dtype, SamplingConfig const& samplingC std::vector sequenceLengthsVec(batchSize * beamWidth, maxInputLength); outputs.lengths = manager.copyFrom(sequenceLengthsVec, ITensor::makeShape({batchSize, beamWidth}), MemoryType::kGPU); - outputs.finished = manager.gpu(ITensor::makeShape({batchSize, beamWidth}), + outputs.finishReasons = manager.gpu(ITensor::makeShape({batchSize, beamWidth}), TRTDataType::value); - inputs.finished = ITensor::view(outputs.finished); - manager.setZero(*outputs.finished); + inputs.finishReasons = ITensor::view(outputs.finishReasons); + manager.setZero(*outputs.finishReasons); outputs.finishedSum = BufferManager::pinnedPool(ITensor::makeShape({batchSize}), nvinfer1::DataType::kINT32); auto finishedSumHost = bufferCast(*outputs.finishedSum); for (SizeType32 bi = 0; bi < batchSize; ++bi) @@ -227,7 +227,7 @@ void testDecoder(nvinfer1::DataType const dtype, SamplingConfig const& samplingC { finishedSum += finishedSumHost[bi]; } - EXPECT_EQ(finishedSum, outputs.finished->getSize()); + EXPECT_EQ(finishedSum, outputs.finishReasons->getSize()); } } diff --git a/cpp/tests/runtime/utilsTest.cpp b/cpp/tests/runtime/utilsTest.cpp new file mode 100644 index 000000000..a7a6aa1e6 --- /dev/null +++ b/cpp/tests/runtime/utilsTest.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tensorrt_llm/runtime/bufferManager.h" +#include "tensorrt_llm/runtime/iBuffer.h" +#include "tensorrt_llm/runtime/iTensor.h" +#include "tensorrt_llm/runtime/utils/numpyUtils.h" + +#include +#include + +#include +#include +#include +#include + +using namespace tensorrt_llm::runtime; +namespace tc = tensorrt_llm::common; +namespace fs = std::filesystem; + +class UtilsTest : public ::testing::Test // NOLINT(cppcoreguidelines-pro-type-member-init) +{ +protected: + void SetUp() override + { + mDeviceCount = tc::getDeviceCount(); + if (mDeviceCount == 0) + GTEST_SKIP(); + + mStream = std::make_unique(); + mManager = std::make_unique(mStream); + } + + void TearDown() override {} + + int mDeviceCount; + std::unique_ptr mManager; + BufferManager::CudaStreamPtr mStream; +}; + +TEST_F(UtilsTest, LoadNpy) +{ + auto const testResourcePath = fs::path{TOP_LEVEL_DIR} / "cpp/tests/resources"; + auto const inputFile = testResourcePath / "data/input_tokens.npy"; + + auto loadedTensor = utils::loadNpy(*mManager, inputFile.string(), MemoryType::kCPU); + + ASSERT_EQ(loadedTensor->getSize(), 96); + EXPECT_EQ(loadedTensor->getShape().nbDims, 2); + EXPECT_EQ(loadedTensor->getShape().d[0], 8); + EXPECT_EQ(loadedTensor->getShape().d[1], 12); +} + +TEST_F(UtilsTest, LoadStoreNpy) +{ + auto dims = ITensor::makeShape({2, 3, 4}); + auto constexpr dataType = nvinfer1::DataType::kFLOAT; + ITensor::SharedPtr tensor{BufferManager::cpu(dims, dataType)}; + auto tensorRange = BufferRange(*tensor); + std::iota(tensorRange.begin(), tensorRange.end(), 0); + + std::string filename{"tensor.npy"}; + utils::saveNpy(*mManager, *tensor, filename); + auto loadedTensor = utils::loadNpy(*mManager, filename, MemoryType::kCPU); + + ASSERT_EQ(loadedTensor->getSize(), tensor->getSize()); + EXPECT_EQ(loadedTensor->getShape().nbDims, tensor->getShape().nbDims); + EXPECT_EQ(loadedTensor->getShape().d[0], tensor->getShape().d[0]); + EXPECT_EQ(loadedTensor->getShape().d[1], tensor->getShape().d[1]); + EXPECT_EQ(loadedTensor->getShape().d[2], tensor->getShape().d[2]); + + auto loadedTensorRange = BufferRange(*loadedTensor); + for (size_t i = 0; i < tensor->getSize(); ++i) + { + EXPECT_EQ(loadedTensorRange[i], tensorRange[i]); + } +} + +TEST_F(UtilsTest, LoadStoreNpyGPU) +{ + auto dims = ITensor::makeShape({2, 3, 4}); + auto constexpr dataType = nvinfer1::DataType::kFLOAT; + ITensor::SharedPtr tensor{BufferManager::cpu(dims, dataType)}; + auto tensorRange = BufferRange(*tensor); + std::iota(tensorRange.begin(), tensorRange.end(), 0); + + auto deviceTensor = mManager->copyFrom(*tensor, MemoryType::kGPU); + + std::string filename{"tensor.npy"}; + utils::saveNpy(*mManager, *deviceTensor, filename); + auto loadedTensor = utils::loadNpy(*mManager, filename, MemoryType::kGPU); + + ASSERT_EQ(loadedTensor->getSize(), tensor->getSize()); + EXPECT_EQ(loadedTensor->getShape().nbDims, tensor->getShape().nbDims); + EXPECT_EQ(loadedTensor->getShape().d[0], tensor->getShape().d[0]); + EXPECT_EQ(loadedTensor->getShape().d[1], tensor->getShape().d[1]); + EXPECT_EQ(loadedTensor->getShape().d[2], tensor->getShape().d[2]); + + auto hostTensor = mManager->copyFrom(*loadedTensor, MemoryType::kCPU); + + auto loadedTensorRange = BufferRange(*hostTensor); + for (size_t i = 0; i < tensor->getSize(); ++i) + { + EXPECT_EQ(loadedTensorRange[i], tensorRange[i]); + } +} diff --git a/docs/source/architecture/model-weights-loader.md b/docs/source/architecture/model-weights-loader.md new file mode 100644 index 000000000..8b5c02369 --- /dev/null +++ b/docs/source/architecture/model-weights-loader.md @@ -0,0 +1,254 @@ +# TensorRT-LLM Model Weights Loader + +## Overview + +The weights loader is designed for easily converting and loading external weight checkpoints into TensorRT-LLM models. + +## Workflow + +Weight checkpoints can be generated from all sources, and may have different naming and data layouts compared to TRT-LLM's requirements. E.g.: + +```bash +# HuggingFace LLaMA checkpoints +{ + "model.embed_tokens.weight": torch.Tensor([vocab_size, hidden_size]) + "model.layers.0.input_layernorm.weight": torch.Tensor([hidden_size]), + "model.layers.0.mlp.down_proj.weight": torch.Tensor([hidden_size, inter_size]), + "model.layers.0.mlp.gate_proj.weight": torch.Tensor([inter_size, hidden_size]), + "model.layers.0.mlp.up_proj.weight": torch.Tensor([inter_size, hidden_size]), + "model.layers.0.post_attention_layernorm.weight": torch.Tensor([hidden_size]), + "model.layers.0.self_attn.q_proj.weight": torch.Tensor([hidden_size, hidden_size]), + "model.layers.0.self_attn.k_proj.weight": torch.Tensor([hidden_size, hidden_size]), + "model.layers.0.self_attn.v_proj.weight": torch.Tensor([hidden_size, hidden_size]), + "model.layers.0.self_attn.o_proj.weight": torch.Tensor([hidden_size, hidden_size]), + ..., +} +# TensorRT-LLM expected weights +{ + "transformer.vocab_embedding.weight": torch.Tensor([vocab_size, hidden_size]) + "transformer.layers.0.input_layernorm.weight": torch.Tensor([hidden_size]), + "transformer.layers.0.mlp.down_proj.weight": torch.Tensor([hidden_size, inter_size]), + "transformer.layers.0.mlp.gate_proj.weight": torch.Tensor([inter_size, hidden_size]), + "transformer.layers.0.mlp.up_proj.weight": torch.Tensor([inter_size, hidden_size]), + "transformer.layers.0.post_layernorm.weight": torch.Tensor([hidden_size]), + "transformer.layers.0.attention.qkv.weight": torch.Tensor([hidden_size * 3, hidden_size]), # Different layout + "transformer.layers.0.attention.dense.weight": torch.Tensor([hidden_size, hidden_size]), + ..., +} +``` + +Conversion means converting the dictionary of `{external_keys:external_weights}` into `{tllm_keys:tllm_weights}`, it includes changing the naming logic and data layouts, and is contains of the following parts: + +1. Translate a TRT-LLM parameter name into external-format name(s). +2. Loading tensor slice(s) according to the translated names. +3. Postprocess the tensor(s) into target layout. + +### Translator + +TRT-LLM parameter names are translated in units of sections divided by dots. E.g.: + +| TensorRT-LLM key | `transformer` |.| `layers` |.| `0` |.| `attention` |.| `dense` |.| `weight` | +| :---------------------: | :-----------: |-| :------: |-|:---:|-| :---------: |-| :------: |-| :------: | +| Translated external key | `model` |.| `layers` |.| `0` |.| `self_attn` |.| `o_proj` |.| `weight` | + +The mapping between TRT-LLM keywords and HF keywords are described in `tllm_to_externel_key_dict` of `ModelWeightsLoader` class object. \ +If any of the mappings has one-to-multiple corresponding, the translated key will get multiplied accordingly. E.g.: + +| TensorRT-LLM key and related keyword mapping | Translated external keys | +| :----------------------------------------------------------: | :----------------------: | +| `transformer.layers.0.attention.qkv.weight`
`{"qkv":[q_proj, k_proj, v_proj]}` | `model.layers.0.self_attn.q_proj.weights`
`model.layers.0.self_attn.k_proj.weights`
`model.layers.0.self_attn.v_proj.weights`| +| `transformer.layers.0.mlp.fc.weight`
`{"weight":[qweight, qzeros, scales]}` | `model.layers.0.mlp.gate_proj.qweight`
`model.layers.0.mlp.gate_proj.qzeros`
`model.layers.0.mlp.gate_proj.scales`| + +The default `tllm_to_externel_key_dict` is based on HF LLaMA as: + +```python +class ModelWeightsLoader: + def __init__(self, model_dir, customized_key_dict: dict = {}) -> None: + ... + self.tllm_to_externel_key_dict = { + "transformer": "model", + "vocab_embedding": "embed_tokens", + "lm_head": "lm_head", + "ln_f": "norm", + "attention": "self_attn", + "qkv": ["q_proj", "k_proj", "v_proj"], + "dense": "o_proj", + "gate": "up_proj", + "proj": "down_proj", + "fc": "gate_proj", + "input_layernorm": "input_layernorm", + "post_layernorm": "post_attention_layernorm", + } + self.tllm_to_externel_key_dict.update(customized_key_dict) + ... +``` + +It can be updated through passing `customized_key_dict` when initializing `ModelWeightsLoader`. + +The dictionary will also get updated according to the layer classes. When iterating over parameters, +if the layer class has attribute `tllm_to_externel_key_dict`, for keywords exist both in the default one and the layer-specified one, +the weight loader will translate according to the layer attribute with higher priority. +This can enable the support for different quantization precisions automatically. + + +### Loading function + +The loading function can load an arbitrary tensor slice according to its `key`, `tp_size`, `tp_dim` and `tp_rank`. + +The template for loading function is as following. + +```python +def load_tensor(self, key, tp_size, tp_dim, tp_rank): + # Retrieve file pointer index + if key in self.shard_map: + ptr_idx = self.shard_map[key] + else: + return None + + # Load tensor from the corresponding shard + if self.format == ModelWeightsFormat.SAFETENSORS: + tensor = self.shards[ptr_idx].get_slice(key) + tensor_shape = tensor.get_shape() + else: + ... + + # Shard and return a tensor slice + slice_shape = ... + return tensor[slice_shape] +``` + +When initializing the `ModelWeightsLoader` object, the file format will be derived from `model_dir` through `detect_format`. The following formats are supported for now: + + * Directory contains or file named `*.safetensors` (Recommended, has better performance) + * Directory contains or file named `*.bin` + * Directory contains or file named `*.pth` + +To support other formats or in-memory loaded models, the format need to be claimed in `ModelWeightsFormat`, `detect_format()`, `preload()` and `load_tensor()`. + +### Postprocessing functions + +After translation and loading, a TRT-LLM key will become a tensor or a list of tensors, which is the input of postprocessing functions. \ +Operations including QKV concatenating, MoE weight stacking and weight-only quantization can be handled here. +The template of postprocessing function is: + +```python +# Example for 1-1 weights mapping +class CustomizedModuleA(Module): + def __init__(...): + super().__init__(...) + ... + self.tp_dim = 0 # Need to set or inherit from parent class + + def postprocess(self, tllm_key, weights, **kwargs): + weights = proc(weights) + return {tllm_key: weights} + +# Example for multiple-multiple weights mapping +class CustomizedModuleB(Module): + def __init__(...): + super().__init__(...) + ... + self.tp_dim = 0 # Need to set or inherit from parent class + # The default value of "weights" in tllm_to_externel_key_dict will be override + self.tllm_to_externel_key_dict = {"weight": ["qweight", "qzeros", "scales"]} + + def postprocess(self, tllm_key, weights, **kwargs): + # Skipped the postprocess of zeros and weights_scaling_factor + # They are loaded in the postprocess of weight + config = kwargs.get("config", None) # Passed through kwargs by default + if not tllm_key.endswith("weight"): + return {} + # The order in weights is defined in tllm_to_externel_key_dict + qweight, qzeros, scales = weights + proccessed_weight, proccessed_zeros = proc(qweight, qzeros, config.num_heads) + return { + tllm_key: proccessed_weight, + tllm_key.replace("weight", "zeros"): proccessed_zeros, + tllm_key.replace("weight", "weights_scaling_factor"): scales, + } +``` + +## Examples + +The `ModelWeightsLoader` class can support different models with the following levels: + +### Natively supported models +For models with native support, users can call the default weight loader without any other operations. +```python +# Using the model weights loader for LLaMA +from tensorrt_llm.models.model_weights_loader import ModelWeightsLoader +loader = ModelWeightsLoader(external_checkpoint_dir) +loader.generate_tllm_weights(trtllm_model) +``` +For calibration-free quantization precisions, passing a properly quantized `trtllm_model` will let the weight loader load at the given precision accordingly. The configurations will be read from `trtllm_model.config` automatically. For now, LLaMA family models using the default `tllm_to_externel_key_dict` is supported natively. + +### Models with customized key names +For models with different naming logic, users can still call the default weight loader with `customized_key_dict` specified. +```python +# Using the model weights loader for the LLM part of LLaVA +from tensorrt_llm.models.model_weights_loader import ModelWeightsLoader +llava_dict = { + "transformer": "language_model.model", + "lm_head": "language_model.lm_head" +} +loader = ModelWeightsLoader(external_checkpoint_dir, llava_dict) +loader.generate_tllm_weights(trtllm_model) +``` +Users need to specify the different part from the default `tllm_to_externel_key_dict`. The loader still have support across different precisions. + +### Models with customized weight layout +For models with different weight layout, users can write the conversion loop explicitly and do customized operations. +```python +# Using the model weights loader for BLOOM +from tensorrt_llm.models.model_weights_loader import ModelWeightsLoader +bloom_dict = { + "transformer": "", + "layers": "h", + "ln_f": "ln_f", + "lm_head": "word_embeddings", + "ln_embed": "word_embeddings_layernorm", + "vocab_embedding": "word_embeddings", + "attention": "self_attention", + "qkv": "query_key_value", + "dense": "dense", + "fc": "dense_h_to_4h", + "proj": "dense_4h_to_h", + "post_layernorm": "post_attention_layernorm", +} +loader = ModelWeightsLoader(external_checkpoint_dir, bloom_dict) +# See ModelWeightsLoader.generate_tllm_weights() +loader.update_key_mapping(trtllm_model) +tllm_weights = {} +for tllm_key, _ in tqdm(trtllm_model.named_parameters()): + if tllm_key.endswith("qkv"): + # Passing the callable handle + tllm_weights.update(loader.load(tllm_key, preprocess=customized_preprocess)) + else: + tllm_weights.update(loader.load(tllm_key)) +loader.check(tllm_weights) +``` +This will apply `preprocess` after `load_tensor()` and before `postprocess`, and demonstrates how to convert the loaded shard into default HF layout. The loader still have support for precisions quantized from FP16/BF16 (e.g. INT8-wo/INT4-wo), the other precisions may require special operations, and can be addressed inside the `preprocess` function. + +### Fully customized +If the model weights loader cannot satisfy the requirements, users can write the conversion loop totally on their own. +```python +tllm_weights = {} +for tllm_key, param in tqdm(trtllm_model.named_parameters()): + # Load from external checkpoints + # The load_tensor() function can also be called here + tensor = ... + # Convert tensor and set the values according to the config + if trtllm_model.config.quantization.quant_algo == xxx: + ... + else: + ... + param.value = tensor +``` +In this mode, every precision require user's own support. + +## Trouble shooting +The weights loader is an experimental feature fow now, and is enabled for LLaMA family models by default. + +If users are encountered with failure caused by `ModelWeightsLoader`, a workaround is passing environmental variable `TRTLLM_DISABLE_UNIFIED_CONVERTER=1` to disable the model weights loader and fallback to the legacy path. + +This workaround will be removed in future version after the LLaMA weights conversion is stable. diff --git a/docs/source/conf.py b/docs/source/conf.py index 2455ed8ee..59d60e9c0 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -71,6 +71,12 @@ print('CPP_GEN_DIR', CPP_GEN_DIR) +def setup(app): + from docs.source.generate_examples import generate_examples + + generate_examples() + + def gen_cpp_doc(ofile_name: str, header_dir: str, summary: str): cpp_header_files = [ file for file in os.listdir(header_dir) if file.endswith('.h') diff --git a/docs/source/executor.md b/docs/source/executor.md index 076f718f3..3c7964614 100644 --- a/docs/source/executor.md +++ b/docs/source/executor.md @@ -22,7 +22,7 @@ Users can alter the logits produced by the network, by providing a map of named ``` std::unordered_map)>> ``` -to the `ExecutorConfig`. The map key is the name associated with that logits post-processing callback. Each request can then specify the name of the logits post-processor to use for that particular request, if any. +to an instance of `LogitsPostProcessorConfig`. The map key is the name associated with that logits post-processing callback. Each request can then specify the name of the logits post-processor to use for that particular request, if any. The first argument to the callback is the request id, second is the logits tensor, third are the tokens produced by the request so far, fourth is the operation stream used by the logits tensor, and last one is an optional client id. The callback returns a modified tensor of logits. @@ -37,14 +37,14 @@ We also provide a batched version that allows altering logits of multiple reques std::function const&, std::vector&, std::vector> const&, StreamPtr const&, std::vector> const&)> ``` -A single batched callback can be specified in `ExecutorConfig`. Each request can opt to apply this callback by specifying the name of the logits +A single batched callback can be specified in `LogitsPostProcessorConfig`. Each request can opt to apply this callback by specifying the name of the logits post-processor as `Request::kBatchedPostProcessorName`. Note: Neither callback variant is supported with the `STATIC` batching type for the moment. In a multi-GPU run, callback is invoked on all tensor parallel ranks (in last pipeline rank) by default. For correct execution, user should replicate client-side state accessed by callback on all tensor parallel ranks. -If replication is expensive or infeasible, use `ExecutorConfig::setReplicateLogitsPostProcessor(false)` to invoke callback only on first tensor parallel rank. +If replication is expensive or infeasible, use `LogitsPostProcessorConfig::setReplicate(false)` to invoke callback only on first tensor parallel rank. ### The Request Class diff --git a/docs/source/generate_examples.py b/docs/source/generate_examples.py new file mode 100644 index 000000000..5dc0c6d90 --- /dev/null +++ b/docs/source/generate_examples.py @@ -0,0 +1,50 @@ +from pathlib import Path + + +def underline(title: str, character: str = "=") -> str: + return f"{title}\n{character * len(title)}" + + +def generate_title(filename: str) -> str: + # Turn filename into a title + title = filename.replace("_", " ").title() + # Underline title + title = underline(title) + return title + + +def generate_examples(): + root_dir = Path(__file__).parent.parent.parent.resolve() + + # Source paths + script_dir = root_dir / "examples/high-level-api" + script_paths = sorted(script_dir.glob("*.py")) + + # Destination paths + doc_dir = root_dir / "docs/source/high-level-api-examples" + doc_paths = [doc_dir / f"{path.stem}.rst" for path in script_paths] + + # Generate the example docs for each example script + for script_path, doc_path in zip(script_paths, doc_paths): + if script_path.name == '__init__.py': + continue + script_url = f"https://github.com/NVIDIA/TensorRT-LLM/tree/main/examples/high-level-api/{script_path.name}" + + # Make script_path relative to doc_path and call it include_path + include_path = '../../..' / script_path.relative_to(root_dir) + content = (f"{generate_title(doc_path.stem)}\n\n" + f"Source {script_url}.\n\n" + f".. literalinclude:: {include_path}\n" + " :language: python\n" + " :linenos:\n") + with open(doc_path, "w+") as f: + f.write(content) + + # Generate the toctree for the example scripts + with open(doc_dir / "examples_index.template.rst") as f: + examples_index = f.read() + with open(doc_dir / "high_level_api_examples.rst", "w+") as f: + example_docs = "\n ".join(path.stem for path in script_paths + if path.stem.find("__init__") == -1) + + f.write(examples_index.replace(r"%EXAMPLE_DOCS%", example_docs)) diff --git a/docs/source/high-level-api-examples/advanced.md b/docs/source/high-level-api-examples/advanced.md new file mode 100644 index 000000000..a1addcc5f --- /dev/null +++ b/docs/source/high-level-api-examples/advanced.md @@ -0,0 +1,140 @@ +# Advanced Usage + +## Quantization +By simply setting several flags in the `LLM`, TensorRT-LLM can quantize the HuggingFace model automatically. For example, to perform an Int4 AWQ quantization, the following code triggers the model quantization. + +``` python +from tensorrt_llm.hlapi import QuantConfig, QuantAlgo + +quant_config = QuantConfig(quant_algo=QuantAlgo.W4A16_AWQ) + +llm = LLM(, quant_config=quant_config) +``` + + +## Customization + +### Customizing sampling with `SamplingParams` +With SamplingParams, you can customize the sampling strategy, such as beam search, temperature, and so on. + +To enable beam search with a beam size of 4, set the `sampling_params` as follows: + +```python +from tensorrt_llm.hlapi import LLM, SamplingParams, BuildConfig + +build_config = BuildConfig() +build_config.max_beam_width = 4 + +llm = LLM(, build_config=build_config) +# Let the LLM object generate text with the default sampling strategy, or +# you can create a SamplingParams object as well with several fields set manually +sampling_params = SamplingParams(beam_width=4) # current limitation: beam_width should be equal to max_beam_width + +for output in llm.generate(, sampling_params=sampling_params): + print(output) +``` + +`SamplingParams` manages and dispatches fields to C++ classes including: +* [SamplingConfig](https://nvidia.github.io/TensorRT-LLM/_cpp_gen/runtime.html#_CPPv4N12tensorrt_llm7runtime14SamplingConfigE) +* [OutputConfig](https://nvidia.github.io/TensorRT-LLM/_cpp_gen/executor.html#_CPPv4N12tensorrt_llm8executor12OutputConfigE) + +Please refer to these classes for more details. + + +### Build configuration +Apart from the arguments mentioned above, you can also customize the build configuration with the `build_config` class and other arguments borrowed from the lower-level APIs. For example: + +```python +llm = LLM(, + build_config=BuildConfig( + max_new_tokens=4096, + max_batch_size=128, + max_beam_width=4)) +``` + +### Runtime customization +Similar to `build_config`, you can also customize the runtime configuration with the `runtime_config`, `peft_cache_config` or other arguments borrowed from the lower-level APIs. For example: + + +```python +from tensorrt_llm.hlapi import LLM, KvCacheConfig + +llm = LLM(, + kv_cache_config=KvCacheConfig( + max_new_tokens=128, + free_gpu_memory_fraction=0.8)) +``` + +### Tokenizer Customization + +By default, the high-level API uses transformers’ `AutoTokenizer`. You can override it with your own tokenizer by passing it when creating the LLM object. For example: + +```python +llm = LLM(, tokenizer=) +``` + +The LLM() workflow should use your tokenizer instead. + +It is also possible to input token IDs directly without Tokenizers with the following code, note that the result will be also IDs without text since the tokenizer is not used. + +``` python +llm = LLM() + +for output in llm.generate([32, 12]): + ... +``` + +### Disable Tokenizer +For performance considerations, you can disable the tokenizer by passing `skip_tokenizer_init=True` when creating `LLM`. In this case, `LLM.generate` and `LLM.generate_async` will expect prompt token ids as input. For example: + +```python +llm = LLM() +for output in llm.generate([[32, 12]], skip_tokenizer_init=True): + print(output) +``` + +You will get something like: +```python +RequestOutput(request_id=1, prompt=None, prompt_token_ids=[1, 15043, 29892, 590, 1024, 338], outputs=[CompletionOutput(index=0, text='', token_ids=[518, 10858, 4408, 29962, 322, 306, 626, 263, 518, 10858, 20627, 29962, 472, 518, 10858, 6938, 1822, 306, 626, 5007, 304, 4653, 590, 4066, 297, 278, 518, 11947, 18527, 29962, 2602, 472], cumulative_logprob=None, logprobs=[])], finished=True) +``` + +Note that the `text` field in `CompletionOutput` is empty since the tokenizer is deactivated. + + +## Generation + +### `asyncio`-based generation +With the high-level API, you can also perform asynchronous generation with the `generate_async` method. For example: + +```python +llm = LLM(model=) + +async for output in llm.generate_async(, streaming=True): + print(output) +``` + +When the `streaming` flag is set to `True`, the `generate_async` method will return a generator that yields the token results as soon as they are available. Otherwise, it will return a generator that yields the final results only. + +### Future-style generation +The result of the `generate_async` method is a Future-like object, it doesn't block the thread unless the `.result()` is called. + +```python +# This will not block the main thread +generation = llm.generate_async() +# Do something else here +# call .result() to explicitly block the main thread and wait for the result when needed +output = generation.result() +``` + +The `.result()` method works like the [result](https://docs.python.org/zh-cn/3/library/asyncio-future.html#asyncio.Future.result) method in the Python Future, you can specify a timeout to wait for the result. + +```python +output = generation.result(timeout=10) +``` + +There is an async version, where the `.aresult()` is used. + +```python +generation = llm.generate_async() +output = await generation.aresult() +``` diff --git a/docs/source/high-level-api-examples/examples_index.template.rst b/docs/source/high-level-api-examples/examples_index.template.rst new file mode 100644 index 000000000..a9ebc60dd --- /dev/null +++ b/docs/source/high-level-api-examples/examples_index.template.rst @@ -0,0 +1,8 @@ +Examples +================================= + +.. toctree:: + :maxdepth: 2 + :caption: Scripts + + %EXAMPLE_DOCS% diff --git a/docs/source/high-level-api-examples/introduction.md b/docs/source/high-level-api-examples/introduction.md new file mode 100644 index 000000000..f25c1f9f0 --- /dev/null +++ b/docs/source/high-level-api-examples/introduction.md @@ -0,0 +1,46 @@ +# High Level API(HLAPI) Introduction + +## Concept + + +## HLAPI Supported Model +* LLaMA (including variants Mistral, Mixtral, InternLM) +* GPT (including variants Starcoder-1/2, Santacoder) +* Gemma-1/2 +* Phi-1/2/3 +* ChatGLM (including variants glm-10b, chatglm, chatglm2, chatglm3, glm4) +* QWen-1/1.5/2 +* Falcon +* Baichuan-1/2 +* GPT-J + +## Model Preparation +The `LLM` class supports the following types of model inputs: + +1. **Hugging Face model name**: triggers a download from the Hugging Face model hub, e.g. `TinyLlama/TinyLlama-1.1B-Chat-v1.0` in the quickstart. +2. **Local Hugging Face models**: uses a locally stored Hugging Face model. +3. **Local TensorRT-LLM engine**: built by `trtllm-build` tool or saved by the HLAPI + + +All kinds of the model inputs can be seamlessly integrated with the HLAPI, and the `LLM(model=)` construcotr can accommodate models in any of the above formats. + +Let's delve into the preparation of the three kinds of local model formats. + +### Option 1: From Hugging Face models +Given its popularity, the TensorRT-LLM HLAPI chooses to support Hugging Face format as one of the start points, to use the HLAPI on LLaMA3.1 models, you need to download the model from [LLaMA3.1 8B model page](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) via below command +```bash +git clone https://huggingface.co/meta-llama/Meta-Llama-3.1-8B +``` + +### Option 2: From TensorRT-LLM engine +There are two ways to build the TensorRT-LLM engine: + +1. You can build the TensorRT-LLM engine from the Hugging Face model directly with the `trtllm-build` tool, and save the engine to disk for later use. Please consult the LLaMA's [README](../llama/README.md). +2. Use the HLAPI to save one: + +```python +llm = LLM() + +# Save engine to local disk +llm.save() +``` diff --git a/docs/source/index.rst b/docs/source/index.rst index bd9c31702..043e684e7 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -84,6 +84,30 @@ Welcome to TensorRT-LLM's Documentation! _cpp_gen/executor.rst _cpp_gen/runtime.rst + +.. toctree:: + :maxdepth: 2 + :caption: High Level API Examples + :hidden: + + high-level-api-examples/high_level_api_examples + high-level-api-examples/introduction.md + high-level-api-examples/advanced.md + + +.. toctree:: + :maxdepth: 2 + :caption: Python API + :hidden: + + python-api/tensorrt_llm.layers.rst + python-api/tensorrt_llm.functional.rst + python-api/tensorrt_llm.models.rst + python-api/tensorrt_llm.plugin.rst + python-api/tensorrt_llm.quantization.rst + python-api/tensorrt_llm.runtime.rst + + .. toctree:: :maxdepth: 2 :caption: Blogs diff --git a/docs/source/media/picture-08-06-2024.png b/docs/source/media/picture-08-06-2024.png deleted file mode 100644 index 5eeacbdf7ec7d93c52244ec4b4a9015883d95bb5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 354043 zcmZU)1ymf*vN(*hxVtUxEEXWRyX)fa?izwD?(PKWGE;oG&xyGbtov9!M|+)fbh2mTNPgb3JST}RzgBm zPC^2x>gH@^>tG25B^#BNfvBl*j2-mu@63g@Is80E3p3 zhVTqfn5ZhMf2sckfH4f5wP?IdikUUyr6ih^wOHU>L~61U8q1cnA~|*Ym~o8&4439oobg z`2FqAXFS?oU<@J@ZipL|NK8iXHHSys^(24%-zoDeHjRly+cz(p^s0WL&h->!m@Qq zl+J>dav+~vpUE|uHhGv` zvC)xp_IUNY$PbbaM7>d;}bno!ps$L=?UfM zAItmdT$hzihgScg|CAS3$8S0l$Tvs<1Z5eTNfd7x<98pW3tV#u`Yil(=)^X0J3M(%AUm82iEV< zb1~Riz%9ZvDm6P%+4t3Y4hJ+Y6S~P@>TM|(&RnFUfv`)ucGB8#jDA)Zf&ijk_=f?| zHU}KgEt2U=1egjH5(Y=@DvM1ercF5{OQashO?f7Z;RbswV?Neo zk6`SGVDAGu;RHe`DC2=txR~PrG>CG)XFpdzpIVZQqz5em&eNdf;M5@Opx3U}E~aa4 zi6kdwM+%i}Wl2kkn>x-h=`rPTO>S;(Z*Gi1MXst-oI%IBX$OiMtXsk8xP?1s45>embQJq8e z>DNgeHqFht0BPUEXjQZd7N&&Gc)uXOcz(%&ABr{(dQ3GCugFCO5-l}tSJf!>F^x*i zkCi{`qAQ5MHfqgj@qQ&&>#2G6jlU;7B>kR?=AM0MP;eurq%ozSRH$9OSnZ{7*2HhP zp(CX9>8sz@XKJ+A@DAw;>DpnVICo;u2it6?Y{4nBDeCNp786$Y8upq4y_c3+n>gDO zn-}|-7H`{vAAyy{RePl=RZWKF8rG5-RVPKv^BcAP@(&*k!=_8h=O&J=kBj~E{Gk2R zdp`N~`0YK@JtsZGKTka;pvoekhSG$dA#5NNhj)hGpy?3u5O4BQ5p%?<$9fUp5q@Il z;fUuka$dI+bogS`&ojnr%k66;&vC~+#a+YGV;jz8o>`i8#?fVko+FT5o~e^<+!Dl^ z%L2joXL&v}JL;Uoo2tq8A4zr?Yo2UyXeO;!Z$WNks<&(Lvvjl}v+h~w9C^-(D^}{9 zg3du~9d8Y8g>OA;bv~Uu^YKda;yq)$54`_+wsvYP0OkXb-t(_}mhew|8(b`(JuZx` z$nDPWbMHF*%H8f>dqNK#2ADcn7`S}z^y#YhI(B*5|Gl4m<~Mk!!mH56-G)?&_mUg-!Qj%87Bs(vG1CXLU)4y zXOoUb?MCGnpzxdUkZ@1$cyCfKUT?ym+ds*Fi2q3cA^Vf@Qh?rv-~!W%ux@JKLJ>J(u%1NKA@!c2Of}94)t^i{OBlnx*+W@>YiX@6&s2IC|kN#ybW=i5@qOrJZ z5oJ+Yk;;I>fHvd~qA`#G;e~ulxQt&Wy#oH^RwrH|E5*d4(4x4bT&3^i?i3m3x3Zi( z0LOt%9rQ_s*_dkSoE(ZS`pHltcFFc)cAY_-x~XlycFp33j$>5V)WH3QZrsp0n0d2_ zmtiE6HHY!EjX2))@z>z)yl#R3(3{s=WS^lblTZ47zGgOe?rDBEt1rzES1HRuX(DYk zX0I}uY>oV%MEqRs9G)cdyv<8;ktMgIPG#8o= zx~hv`s#_h1`AH3vvgN;}pzr(aYaRwn#*GVQm#5N?cQsB1tb|+6T>rWLtQW64t-skL z?mV=L2g{tr{7@63OO~ACnh}Ck&jNWS%=G zuhnjE6sH)QiL;t~{S)~#`5OhqwyCYf#=PDhpO-E&1up{@cZRR4a;uCRE5_uSo2?Tr zOX-HjOzKQdT7Eh_7mKSsrVkN9uR==?erJwVKWmvPpI?hFN4j2KA6hg?bTZCH&SqBL zZ$;;W^9u+?7M5)0Q|AL}@D}cu!WQysyld;6XYH2P0le}gGU&q;-CmH zZrS~n4PCy?y8gJ^jZ%nJsL9A|OPmI?(gq>h7IB$zSSn zdR*uGobu`Bx(2@O*-2)z*Ps4P`Q7ksW5@sa!DwryL*&h5O5${2S0*}PoZMch_HF94 z?J%(XEd^E>Rwo}K;>$it5^&|ecwrolT%ewxE;RQ^Tf~8Ponx2R?d+-lNCGqn8aBrN zQ*=9Wu2WVQ|L6GbDD6k)kD{gIB^+m-UaimiFVj1Q58%<|y$7O;5fR+an(yF;sH@5& z_uC`uy$<6`q2UZ&)UlzQ?Vt+>VMKcnplo+;6@_SL zZ{LF$Vcw(4kIorgVYau4q58X_0Med#cAVlFueMO)s`?OuptCbed#ks+>;-zMDT7W{6i?FAt5LC_pV{?W@+i^O|S=8Ns-^&*^$lE!r9D{&BxK@9~LMis*vV*BR9vQ z;6{f3A#g5lD^|J~I)oF4k}ETc)3H6Zc{ds559`@2MRUfaBr6TqcZ)Ch20>IvIFZA% zap%1259eJ2yo(j?>s)RXlb2P|Rn;8Vn{}LA2pA7INm%qk)YJl7w{HvUXD%lm;LO+4 z`uXKAySJyq_yjr5!rn#lr%*0$WaEp+x9C!7p>qRO*|cVmMy(v z&aat04`1t|%}I=@@DrS@-x`QEEfG@|Km&&jgB08(6}0D&m{98a_-jpyYEZ^FY%B;O zDh{OlH;JB1A61Wn;9$*3I`T4Mur{FK4@CHhlee}7mSA?cxCoZ_Np?XX!v2cXLGb6H zcfDOsU|;CMvnE~)nabobS~>FK$Ca>&`QIDnRz$7hkuAzBS~Hc4Ugq}V{=9q_UsIRG zzR&YMp3Y2qS|lBh^EDz106g_OHqrYVOC$`QSUW=duJB;`|TkZ(yv`q=83h!_Z#0cqny7mV-IOwrBc^Q*vPmrph zPv#Ignn$*WnQmAL>2rgd{UqfTa_pxcfBwu!#ccj3L_HqTH%1NA(QrWYzW#N(*e4n39n+?( z*U7Hvqi*rP>-CR)+L-J}uAR2F@HaHK5G>i=CY;)J3qA4eK1`F(=96^%?2ZW%WG9u$ zA6jr~`rSgRY6>}yT1ZI+BBs*IB< z_9i6yc&-nUBMncNsTg|J8O9b5fDn?lk1uY}Yhh@a5<}3t%~Jeos7wrbTbIh#s*H{p zy62D{q`eGtsrAMV|FLcNrR|_mBYFW#7UJVst*63h!M9JV?44M%SAfhX1t;E?S#D$B z$oSU-ml0rgj6w@G0b7_(#iumpp8=#ql<2&3n2^baz}m!Ss`YD#2mhtK6fRSSF3nZT zIXg;CPCON^Ber{?sor}MkMy1YByJkklh?^qtb-l+s)(fLw*(8P_D$nzpf$M>)nxLq zfrGIAZ^pstxap~?nB~rdZoj7T*&X#7VZi_-kp2~o!RFQcCm&*jF_YY|K~{qlE5lP# zz5(e>OlkV+Uv&5It`brnajW&?sN<+PK#uB{_Ge=%KEj^0pYgGnh-z!Tkd?yzr9abm z21eUzj9u%wsb-dN&1iMT@7d;m0+^Z8vB~*hM${gDPoSLG-S7PPP`?H(s5jMPfx0wg zG>?Se`G>#cKtH2X(njD+0I(`2Nh2Nrv*5$BF^MFar})oP91jnD62>J+zQQl|5#Jy= zpFT85%8wP}Y7DC0QsQ}du8x%;ZO}0!DA8yyX^yT@WAncsZ*ky8ZVTM0*U-KB;edlq z7!50BJt_2w^76Q2H>R`Y8Zf?%ZDhA8VV;YVL~H|ig5Zgr;@Ni7Va_esDu}4{ph?#e zx#T3?Pk>%UwZubdDGPwZlGUIKnO(+efSXFsHp7AGk`SffEBWE*9a*026(?HZc{9|v zjOcW%c3W%Dkwy?2G^D0Z;v~(DwQFU74x0DbPbMXc&@q%N{yWZXC`>@Tk?#(hXD*_u z*io_g8UN>;4Y^!l2ihx^7PqH^F0d$`ki{N(D9Czy2QLWL#B2}>G9AmsXb?UD)NM=JMreet&V&KkF>`uGCu`o+jZ$S8DWb4P*TC z_QfWi8Ms!SruGD_1*j8>>j~P@mGnU!riKCY@yy9XWSUywaj>iDpi0RJ3Q0T5Fbpug zQ1>bQxas5({9=B`J)VM3+CB*Ty+M!EpjR?dzarVvXd6FTkt%^4+DoE*bA$8~EiLT7 z!dKrhHKw<*_ia0kc=sNYqJ{5BQ2Flv1U5>S5&$@t(rbJ$N&H9WY7}N0`qdMjiDBPS zgmfd{f1mh7G!iyPxsC*ZFPHZ#Ug;(d}q%%0N1&YXvySyE-A|IB>_mQCm_?Zu87 zZ&VMXbBokd?;g_3sRE=u%*y&h1%9*w6$W~6W&(e%3&`E&E(KTqe?qY+UT_}HuIv_3 zbE{j3BIFUSQOf_c3amBF;^<(=Y<3@Lwt^LsDzE=!mZ4(@8@o&pcULyt$XV?;qBM}C zt$4d?=IXcFV#7!rKgWY(_pvoUUFK_?J)Z-7Vp2FxjfWm$qxBJf=5k z-s0nc%~J)RG1Bq8?;;A(@tfiRU$OOn5wa@Yoihi)Q9`I$L<`nMqx@9LO`Umpbu#DF z!O0hHfpNe*A-uAO7K_ol*MeKUES|Bj|1-oMiA5#1am& z%x0e$2L(opn2A^<=wyk|+HLHPqa)P-#wD<;vrjNR5jv??^S4$p!w@SN*t;ezFnDKD zlHA!kG>zlurxM%(?FVe}SH*vcVVSjTggCtbNKuD>8 z_@f z2a^vOj-{7oa}S4{csc}=LW+4^RPHOZGmytp$QMqz-CE$OQvsnhMzoV>b?D)?MBm-! zqYlk|@A+*azd;N(rSN*ETsG~$lGp$IlkpkC#Xe2u-ZoFzK1E>`IB_YY{};X<9meHg zSdd~n_fBK0y8OrDgwIHb0ykyriSVln@YTs&AhDraHOrKB?T2NSCW*$rFijY%;ptfA zgo#aj#&tT?d}4<0;!Im${Fh(t-L4(c>NyGgr=@FH8wT6D5OSn4wH=U0c^ z<_6D!tdd&K4)JCU{MXdshW67d{UN)#-p2ZD80l$R^VFm!ZF$|}8LCPo;A0kJ>{lYyrcmi1}Neqn?{PdRgTA}&aeXYKkpqH6Sq{o)-#CFAt zxi>1t_LMm&+U`H2O{X%pOO3{ANXf_iLHcN%KGyVmY;sLK1B;ETRdn9i%0f&0uDZd1 zdMrTLeo`H-cFXBYmoqN0_6O2|6I$_!Nl68#;Qme-(@*^z1}dNurRs;wYt%{w7Zh6@ zY3fx8QhYg-{%DA~Vie=`EGGN!$5~CK=YJO*kUk_TlXwXl=P1`)lbSXwbpc2_t!Jn= za^+MT#0NX)Kb46iwaGPqvCDw5^}f8?KmLdVBO?_#H)W)OS5^<{V+01RZIEp(>m?kk zK&!3R+mp1QdmALttLZjm(#i+CE;v7YYM4Wdr^wjQpGBrjSMXmdUOR6PIKlQ7n$7>P zJofk>->IN9Ns!pdiIwEES1p4A&MGndXPEz215FbJ-w6)3?~Gq`Owxbwt>SN8JZTQs z$?^`NtSn>lL{pdjn|{~h2G1_Ea#LgHKI0)d@B~}q0oIhMwLFmTn&qTO6aO?=ktQNJ z@~-5KV_e}@mM@R5R>C7{5n`dG#>&BiAM>&S2 zoK3UK1M&bh50J`@yUqCTawe8;aEG7vvAVHG!+Aj7IpK)@?H^tCVz)%!M;i^wxJ zZUcw@n^*+tp4br2YgpES9}70FMmAarK#C88k0ivRl)^&^SMACm>F=c+>HWs!3#h?G zhZ~KYA&8U}cIgcwD>|@V@6K4%X5lINZebhD_VfEsj7Ow^gg$XUkB{+HIGJZ8SX5w1DH9uEcv~oVL40duB+m5a!Q#l4z`kWC zlM%K&Zqe-oo6;y=O+M+bTUAFxosa7i+Uc+gm99aai_{?eE zh4k7l$}~5k%#I3nXStXP-liRmZ5CkcCSsVK7zz-6$(#QNOpU5gDd2kVp4Q^Y?jtB3 z?10S}=`6r#{f`%rz*WDo&PUuW9hBvKgII2ewL#A#9FVkINdSZ=y+$$MKeg<&HQ@Ab!$3t)k;8Ec&`N z-I4(QiZ7uJ_T6oPjYT;(j%r=b>Hcx@j$AeDi)$%E+I$runUKiuxhV z#a5J#lxW3tl~aPBQ*gHvG&q%mcI+|GvXx7XLlos%G@G74b(e+0mXu|#C>qM?8kZC$ zNU1VP2bym!wk-)3WrgJMoav@+>l*MhvDjBfefn(@x0NdQ+&aN(Kr%hWzZz5bMWtiI z=^Ub8I!F}<#JAG~U`XL%6UVC(?PPDW8uGP@twxKPDzQ2`?4%N<+aDPUwGtXfDL|3n z%$cyMQF*YQ2aFH?PAp0N8yWhT0JwQ0OqB|@ql7S@^nvZEPt6!nwgUMhR_iQOKXT1m zRP~)q3U!rk<;tUb%an=JEDXfAo3U)-FcA^wO$KdY^Qsm8siRPWH@vh;YS%zN?&1C> zGX|`jw*N$l6rsSzco3Yo&~BKg>D z(XX>4+mZk=KP!cy#1Rz^{8U#IH-UwNkyNMo4Y@~yP&hA2MkchKCm$jO$6?BP@Sz$D zQua@jVX@&aN-^J=q6WRRkBnpc6rd<`b&r=+0+vPtD7Bcn67+a7AXgxPR>^jqeQOZ-Z)!j%gqG!iP@) zmKtVK+M}8ch~gQIE1AsiS}GWj@f(w{+H* zW9sA4`c=;Vr(R`D=3-A&luemqvu2F7$6l^(4wxuSr;5q~;2d6DqL<|p>Nr#&y}sV^ z1L|)LslABev|}*(Q`|Dsk*VU~I@09xn!G^ET~wH(vxRD$WLFIx5!~) z(lnC{I%}DCj`uAv>k;Zd5fZKKA3{r*9h?o^OE9IhVL{98ExobdEu> zQ+8vZadix>-uUrZ(rvt9YMOCEe;LxF=lb0%7d9v@Q%q2G?St6jE zf10Affk%pRsy_^BCO0~IS2My=9->%QJEV#Klsf?|FMT6h>vXFON)ZQQ?WR;sTTJ=^ znzz&RRf2Ei;~GwGBS3n~4wj?Tm2psx9QU-mO?Y8oik^`jzO}aXX$8-zimYMQsP*1K zTKIg)=0bd%>YrxfbET_|!i5U@L$n>HaMjG?zm@v2F_`JFcFC3^ym$oIxbc=^gFHQX zkCxfFu`bb|1+Pzc5JqHP^gnZVdO-BDw@0sp(eBbPc-qJg%DTSF*fn(hR4#twY3c8z zSib9#bIHewTkqh^q0*lz0JxH+cuABP+n=wmvhvBP0348@Z)c0}SwatxJYx05?Y@~! z=k_GZf_aI)c3U)UAuE>B!$pc#v{@Q-3Q;1jtzuJd;wLjtG!Q?Gi>vpyU`7rJ@@(mi zJ5pW=-#V~0nF~f*qsJ72MNcETxz}uti3r-hx*?zOkVv=H)g8byHyhnV;si@nVF#(^ z;+jj>R*o`_@m3}g8uBtahHNOK1;hA={z~3fw(g{na`z~ovyT_8_?Rg@eN@vs zIK;?V+G*>jq0rx4YWfq>y)O&*B-3uimwAn0W(Cz7im)c3NHFK)FS|X*@gYG9>V|_0 zrWGOB#q0o^r%2~{{3pu9|uEK2p27naHt}W#sy;hE!oCc@wU* zX0A-Do*VxfKFrWi18bCjlDV%CE9lWERVYLphf+91UY-frI??oxK|}qsvpuFTQTOPp zS?hIbSlFvP3;|4&h6+=Td7Ns6u8lmg9umN+B*G`;Bc@QImN4ZxFv#uk=o!gmGsL;z#FwTh4Y{H)%Mbmy0T8wd z8(2Is8Hao8vwZrQ>j3Mt8EXAQxn z#xfeEqO@}JOOcDF{fZk+jZOH9g_LY|6-%uo{6}0}196z0{$0>Xz6vX`6#}IvyV#12 zj~Eig zs56U;L&JA4t;zRa?Er&;e4hn)@G|?fKg&`}+sc64OMmAhr+A)neMpwtDC60UV^2jv zl^}(R*Ldj-zmGs|#H-Nj$z<6jK*g{vqA74D&YFvN$hdsTv&U$Qa$Kq0oi~UHcK?Xe zUOcj6Bsa#lEv7zybT$UoRFmW?04V`iR~RqOeHVz&K`|Yr^do#iL5C9C)>99Gb$zo; zDf9Y-*CQ!JXj@!pm}Rj=TOc*wxBciNBiAYOJNcVQ^$T zcB;JngT2L=!*o)ZxQ#nnb_Y*%^JVKy8IX6 zlVlwW#l#|BIAy4x!U|IzVKrv(ZDGf9C?NFsJ_~=)n=`NTDq!Dgt|12vbL*muao4)Z zf9V@xwDpqGwo+jBF9%bfxG+)gEOh*B5oemi!FaxHpK+vdPB~qzYd=f9<7v`GE{Voe z+cs;s68=YbhMdxg+T-0KljK`Ie7L_G73taXe&_hmw;P>`3w}e56ChApi_v*1e{7Jd zrevsvm3i&f%mFiQa4t=$t{>$v-@U}W2M?BSzb5m_r)YT-*WEE+tYnJHBdwbz*2hNL z;9P*Ec5?p7m*%Wn(iGCmMv`Cr!jhas{mg==CJ7>wMTG0O8#$nFL2rf+e=a_|BzSwD zTP=GNiS2mqW;E#HLER8EprG{|G6-S;MbzaD3!Q7T6Y?g1tOu8d4XuEcAMuI80QN`4#z z8_-ct(`!fjZ8<*LI7Xq0WY#)qFG%6C_AH^)H>hcj?g+- z7aYuZ$Krz3g|cwqK_a3jj}p>Ww4QGXt{9zqsN|aBB3HRzzn!jq{2M1~$JcoN)t+j^pe*YLCV^I%|p@~C`Iw2PT^ zm0p+g7zJtoNniO`~&*O zaXwATA7=2HgeID%!;}8$J`DM(fmo}|f*-^4MRUr_P7h`N_SoAQAp7zorl>4*esR|j z%(}n>>od2&;{4FbeUc=O1A`9}g{l9mq#Ys(2o@ay(a;kj$~VkGGnKq1g~6))H z>HSWvh|L0ru`TJti4h83Uh)C zcl6R?_b@c8E5P&2Xn^{4S4G(S1_WB4i;3_0HaOKcQAE<)?$M)A%?|y7cj4sZpZs()MbYm3{=4mbV=M5)syOMy{TaSoIZ`^8|GT+})}Z~UkVfhU=8@99 zPt=7f*x4f^CI76oIY;+m2HEO3f4v8{S*R~Y-IRRv0TeMonms> zXW4jBQmtPhqY$26D7@8bF|447Mx-cI>Bb+D+-{fbMcVm=ZpL&uJJ)1-RI~Kh=Ig|q zYCus$sc{mRe`a4ohBOMbB@0|>_=uO&jy%|vL1Az)Q_!)=Tem)N#CyJXSL{C1Fg}QP z2>(s=F-GV7dg!S3^Ue5OU9BMnXl|-J;e01>Kd{@+BPF3A@y+^S)5&kg1PRGEF8(f7yn(b=-i|ZNi#$DR%u-+pj&tm zV=^mw5ev%P(EgiXavspX&jrMHW^UIIyIS@%(dgw*x`EYFSXOA$vM?*Piu_swh?8Me zC(CNe@vJzlma`C|yti0n);gd3ID2idiufDi|1NXQd~(v*Bt=}10LcL5DvgZygAIkJ zFZ{t@d7Fn?r5riQf#-i*D@wHb2RJvo{SJB`4|L9dKe=~nE)AsuSV|Fg1qqGXrw>|A zHcgO+PBPAia%MJ~PVBwoESLGYSjN0wpo6-Hm~ab3-_Egwy%KbCIiVG$g?#c|&7nWO zDwFm6{^XN!1nzYVqNQUy1TJJU3k~!SYWNKLntvCmbt!_28>H_N-7gCoNeq5n7ksPM zDZ4omoQpGE!NLI85yBK4i{3|+FnEpyt>f_*?cPHvoPOwLuFz&2sTMPdal&w_WCH$xmfh_m|vSxycI#$N(zNorKCB=m;d@3AMip)^D;NP%2a z7-AhUedl|umTP(p5=%3Em1t;0bUonj;KPwxwtg}vpE~xNj>J29NH#$lGh^3QW#E|t z38RxE>oS`qT>V054_F~-XZEqPe!LzcGm6MjjJVu0>R1_yI}sk3@N~}a&~dI^|2k3Y zj{w>*fn*%=w>d4p;_4|Jbgx#2(@Xh>e0oa6@2%5(qr)#I$}s0&ejde6C_#Ki(!R;Uj1M7FoMqsg zoi4Y_$5rH2wxxy0LcxT@S8ZVsVbwbhm^^U;qf^U|#WNctGyv3p;6d6js4fR#yr*H9 z6GlkMnAU!@A9XVJ(FFXmZKEQLs+j^M?Q1|txLJ15Gey=iPbf1E1o^99r~A+%GR~0+ zh40zc&t9>OD@ppBViu@7H6E(ow|m18pHS8lyGaF2^4SFkmM%74o&+AQ-eh=tNlXoQ z1roPWI->eU3WMo;dVvEpolP8(Jl!+P-Xnub91n9ECD)0?2^mU4JAa0`{|+}1jnlq*KYQA5uGH_2v4F+E ziM0#G&bW->gC)C9Z6dMhamO3fsGA7?V3nNXuHZ|`As0Z4WXm4!I{oqIP{w+5V;cMS zi%3x2v4Yd4J@}MS?rhXT40yw#7)i9kt576+N@D=`67WTK00SG5_bW&G^;Nwk=$2H{ zXS1ijImuqpyAKyh(YKw@AUM;=gs9ctWh~wwZRR`!*zygg;T4Bv=3WZpCiaM8{b5d^ zL)pd>*kOq(iavDV?N$YBjc-4SaTEk>-7U2 z0U9Ci$E7etB66esWOFfe5t_bgggmzIs!>>jy3K$wFDc?Eqdi2l6Iq^?C)rdX{12?E zr5Jqp67j|=_wa5h0k37;dWis6Y8FNp95@Tob;&;bn`@ZE_cB(@^;GV#G|WrTH>3_K zwPZYsAdmag+?znpPS~+zzu+{~1}0gjW@U&$8q@c3Q?+d4W4sg(HDyl&3tz?Ku}pH% z+cS5}j$qsHPrKgaWTlW+OrAWnbsN6}a(aoSyu{IK6pxt=m&-{{1e!LD2V&5VPal2V zjQ1?t>nN#{GaM|BmT&6gWt|C{din+%;9C2>?5UF=R81Jnh@J3ozVIu> z|J-Tk<X>6ZevM$z5|+m_tTdqj zG36lXwnuR}_?X4m#p2I(t|xK_a8*^lpR~8|=p<&&`W4#;b&eh?Gg54Zqi4F5fXy>Y z&{$8~W|a&G^Tt&3HM}gbRTeJbVKG%`XB9GjKCnnNxGT2Y6R$_*cc5BqDuHs+`hpHd zwDZ%BZae^pm`U_umN8oDlG$1?$yZs;T}Q)A@M~!-&dkUA@uBSH4oIvlHdIzMsW;eV zicnj1wQv~37?s~W+*bcKcU&Gv9{T*9*`7lqEyVH_HN(R?V*9xrXo5`beBMf-g>0k18yl@GM03wfrlgNQh9 z5^!dFrurm5B?oiX@i)or20IgcwLv8x2J1PV536{ehkR8MjN7<$J!cb9T3^^kQkePV z()3;B-2C18fzJo=;;OjobAX3P&qX^%!TI_IHNJ*_KQbOz$uX8-+wXS0iPUAKyqwrPt_s0Mzb5>^dTQw4$B0XzX9Fn-PJ|FiCqCuo*IJuWubc++hbjRS zg1hwNc9y_4ZHPeJz)yEBO>u%tatDVZ*<_t>auKwa495Trf1gS__KNje7ndI&7`_J& zo@6KtL32!Y$74^56AlZ>VUC4~jO_H|J6WS3CEU#qB%OfL5(} z2tf^=+qC&2rhg6+GVj^{><=egtd}1@v=cpzDT&4&Wq{4j-o7hss%(^FB%C9W?@r%h zD{oVd+91x64+DPyS06=3;78e`F3P(eF3Gn#31(_HDBgb*bUJ^2@9}>pCqE)T1U*Dt z(wQrn`9Ov8aT6sJs(stiXX@}hFuds;SZ9YRnGVPJ%0BE zY0!OQ*->{LKQ0an&(6hC_wTXTE6iQa{NsPlU4gNn&kDy&=2Ea#lVh8GFxcnp{64{` z(y{YXEc^S4x+mh2<Jq$ zc)&Ze9>UJ#uS4dj=?d2O_7J^A;KahYydXDkk3C&oLy9|H*0uf-X*Bul7+LO+k9p9K zDF$wzOl8AlmODh=k3Jv1t$eoX^F#(n`Mhaj0|sTUHlEv_ya36s!uOw{1l4@ z>iFYy_;wmB%+4ugskmAHdYONDFS&{hBNNmoAG)V}#)ob;LQpu0^cv&TFB^$78i|RQ z1uHK;DH7F(VHI!2YU+3C#@qy3iAdQFNEAwW(G0vefb0?$v~t4ak;{V0+9pMDt${-r z#rjLIYTbmreYc2{7{dWu$Arcu10%fauiu64T!aU25q*NoQxKPY{h}&+>NbAuwfH|r zf?iB3TR$@6Qe6D%QxN=4>Ks9^*ZX$J^|1A4;xn}P>&W@hg@6-xuloZAMME`-PIDn| zhLK@FHLM0b1oa4#?0MpBGk#~#3R5?ARZNye0CwsgtrQD7>h>Lf{IG^OF?-=}GK7+= za(Qeg91O%dEVK1cv2MUvS~h$QcWhReL9NGe{K%ud#QN10VK}N=Xd^+yt^&x z9dF_eE~6D0oN@>z$(bb?i~1@ujOYhweX$b027yYdjkkpTAv%wpx0|{Eq^c1-5m}E^D%damIr%BAMvNaNcR?7_xJNql1Y&-f#-%8UsXn%qn}84dk~2NAAa_{L`3<& zv|Z9|8Fo<0o_U0;1Z;E7;3%sFeF{AZetE@63XJ5=Oy=m%@oCJ4TkTIhLD{qHW?K|o zFER<~83gRzDIN+9M|gp!VEH70K2s z>?{V5Qu0k~bs=rOx}9f(Bw7u5F!8$4+^mkqAGjL5v{8&Hfsl^otc|UXn!+nzx~oqL zDR)IAQN>h)>A_0U<@HmT01w(6B&81TXsDj`B_ zI$MuVCoAf;TehAI!nC&6!-e#D#QWvul(5If(YJ25jJi65hh3NyQme<N83BQyYqYCVc_G7z{4d@nZmYZ_ji}*j%@?lko+Y! z!6|p2)E!0yv0kzylPv%ddEXohx`Cp%Xb`qemVXEMA}eAUn8g*~nAow*U-GAZLrx=U zjX~Y;*O#wqmoWkBgrGTQRi11H2~27AYS3OoX_e&g2S+Ar4(S-fdQ%RyfsTE!h<<1Z zZnFwpwtkphYL$1v2lsYM3Oq_-1Hu%m8tT5_52}$5Oz>Z3_`%zgK3C; zBx{UBF$tknqhhdkent?(2}v16p}~rOpp2SRfrA;~NY{TU+Ux!N9@_O;z`gGMGR^;H zLd1=r&e%MAa58S2k=&Dlo&IrJXi9jeY0CQ?`6iy?Z(g$XzEJG{lEWB(1+zJz;63My zQ2raQ3~CR)jjc>K2=MR%QgzHs7vqe{$z!aePgeFRDu1;j>#isB34gjZ5zH?PCtwV% zbN^eCZm71>ejcE0z@&b#$mN|>k(g*knJPbvAIz}zQ07>xe06U1-AEyk|$QH@_2K*|Fn^-X_?E5LAmt}O8Ya65#BDIXD7_v_iEISsum5e!;9Usp6d z>2JhLw@xFPw2=mH+C6Un_6;_auo2owmnfOs#?vAz9G6n)t(o`MWnxb(ZL2bUo9xHU zH5prsWn)EkmiA9kobp}kmliRo2k_4vt)R%!)LqtF5`1Zkv!5C#HBChJN~9rsOY*2g zbT)0pn=ny>ar4J8#0Mp0aHVwIB&50F1_)r|5!~IDLLOcTFKJ(MaZ&}~e|#^ex%A{o z=K7^nJ3WMUc}-C6eJi>d{^-F$x7bN^IA#=#2?M_zCGtyH!g}_Yi^2#$M}#W=9cU4s zpnTahbRE&rY&aLKEM3hG=+~8c`SE-_S@LyyuU-B@Viv4!ByiLZ+TrW>-p|qTd=9 zrkT6NY%;gCwom_BH07Fe+@PQ3nA)24XR4#}sMfQE?G^7OIh&&uZD-l?Odz+ZtREj1 zZ1>}%&-d5MuwB^eROYf{S?uL8Kon372%RkAx6CJdl|U2mvHJnbtKHrI?7nz+<^J$b;W7T~jT>Q_#P9y*JNNBx z-nn;g@7(nE(%nq3q#B8;)GTfoFrK64MSb{&vDXzVNXSUonSk_*bXm2=8HpU)4BHCd z!(_YBjn;4wq&x~H(T^Z+6eA)99h_7*K@CybRS=Zp=wF1oaM@{lDLwl*}5!=(ZPANE5cSh$*$-XQ)@D9rJ+)@ zHry!Dz(xpmNM@t3y#`u!)xH%5j%%P|)zlhd)<{ZZEacV@l_mApd2`OTYNFW&lMpv6 zm_ic*34wMHSa$KF`xByzArOPU|Kp|2<+XYsiaHk)U5pT5d}sllL#ESBQc7Cw(|mY4 z)+|cJdt+`4aytOy>T>V|>6v3rD|rIfsqMp?-2oGtICz8U5k~Jc$K(Am2;9>M>*)?L zueO7EvtYt=kRd+e@6*%DsR2UNGJEDAn50sXUbjWP*Ett2b!otmDFP(%@U-@Nm#!yM z6Hrh1*MnVrp$*2Pj_@@NkYWySeR#$%`vdnh9l4KhKe`X^zjfdJuH(M^`X%t0xu1Uc z=BHOA|91L&hEh-@o4{xp}Tp5$Nhsj*2aBu58pqz zJ3Q*gGjlxdzs48QuCc-bS1y1YeD1iz&k7^I_hRZ}Po711GdBT3Jgq<%gr7FaUML($ z@El8_kq}Qw>F^I8?;c2A4H6VnFnfzJCSBt@?f+f4+6U!TjPG?VVp@ z>ceZKyMlq1eM%46CgQfmR-)%+@){?%o~t|vdigpi`WqXzYc2IGe7FZr4T`2v3?H|7T+G4wA&YPYSJZz#UtSB*w?3?qgRaWx*}ES zdqpDir))byKnB+xm32zRNSA*c{r&>%p3G>lb`0nsIH3-WflFiQQdJ<8c$Vzv5c@at zl?Wi65Ch(E69nXy4u%~~*bB0t$ktAKD6>C>uHbwZ@QMydkilD~AhVP1#65MMT<_Ju z4L`ea!>=z~?{mCpb~$r{rz>~yrt9AQ@ZR13^ngzrKVfDp?8gTy=wTM}2zN5Q%Q|-# zlL5qI4DYjhzBT<<69k;;5#}ui{?il8sS@yugS^5E-o0?wm(SoO$4U?PA2AK&2{w?( z(@~6+m}@-7Y7a9IJei>YLOoxh&@7IW{3q7{2{LhpYQ>Er zpNvpfCC1y=I7Xs{om5hc(C9pAOo@grWo!unUhAF|v)|D+y1Q#5d5Bx8*d#U~us#Ce z3!g8WDG??a#^8@X|1~*&L_?=Ykmzcue(Hq0bi^!8GBrRKoUuIZa{!?$2>^@xJMKUZM)|qzPo6bx3`ZcBq{CMl` zzWwCx-+h#q?yj#eg<`}xrZkY|p+n^vyv5#2ynRc;5m zN%7Xit|TZC;+0@ZiMEG;HAJ2Jun@MN#8w9W0g14_{6uds9~JYCngoJmO#Qynd5A?4hL=P4?B=DremQiH-+pjU-{0e_Y>!x0_Y$O;Qxf>%8ea~h zp&*a~$}*+49FMYvkcxYkn&Z#wgD?`+J_s*o4#^93^!Va<2C~QL9lY9n7yLp(e5B_cLagCyh*aIZ-MXhO(<`+^mM*m|?qtJ1O3>-!Mlm#K6@^y|qCZ0ve-` zF(8x4Ls6{1xbh64lo@Urh>hKXt*tPylqA=IVgBl@6`iN5#h3w7jtDMDDl&L!b9nId zD{|>!fzHYJ65hnNV@I* zGz*6R6EDeu%!1L)`dDq^1w6mM{M_Aqb>rsG`fiNr5##=o8}lhQJf#Et>wpY$3c}Nn zOKXiW)nW_}=@g$dM%VQM-r;v0cl#|qW&B};X$3bZfpcmHn2QTigyrahRk(q{mP}dQ zfrL}Mu`Gl@b0a3O*d(`iJ?j5SWva2Y~w zWxc%qxauUX>8G5&DLy7l^oqzx+j;L5wJE$&S+||ZwqGi}u}EKEO_8znB$$SatX?Oj zf+dSK&{Amgp|*3a06QO$G3qJSMq6gH&TWP^*}=MP3U5=7RtT{+(&iSW9gI7`1Qj&fH>woVcf;u-AWi()Uo zdF?KL(ZgI}5aAnqR02eJM31Q03LM}n?PA)%CB5OuH|^mOIaZ$NbZ#+s_}*RLz_W}; zK&M0WXmLfJ^P0-SSO|)Dp zJea@)JeX_O5$u=rTHQ^_tq@{W-2!|!#-&m=L!cOdHj{I(xfhz~qP>gf+pvRuJLT$M zUE&PSczIXMx99s8Mwq$=nW(h+ zw51^9aJ|%JLA0%WUL5nKI?ng)4rcDFFJ8N6zq)cC`tRMx`7OS}h7WbnUd*lK{MpW! zFPL%0ENqu)A^b`k$Z!{*EgszBkvLYO7+hi=F?_<;Sa|@SABN);OR^y^U$B#6;Z+YQ zQ>eA}ktLI>=IZ9_gqrYWKfWXf`+>ofBw3vQq_`x*^bnH-d#@y?doTh2C?|6PA|KZd z<|b1v#7gAB%Y~ZGmnlgkrylW_apm|-W|3g!Bg=kENNhfpS=PPWz)Hs@vd`;8ZzAC} z2ClM73#L#f=-s!p4aB zz!nK{yizIWp(7Azpogxup{ySnc?05&rhz4G;N1oN8;{%o*j{5^?5KZ%PZ0CV zYIr>goRMGM>w*~5xjTTY>G%Z3AvKW7t?iP9GAj7zEnBk)o_+lNu1jp*) zpQr%xNZ+!jrvYuFf_7cJU^cTD< z+v;)E^vv%K1X=K;%4ctRPddm?2jly5u7{<@F2_Ch*~JUo1Pt9hUJfJ4b-khMU*QF^ z3lL{`jz4_*1h4N0_Yz*>UiTg^q}}2>Zx2{@Y>pKxKyomz80)qP|C?c5V)@Pm?E-=B z%Eud8HmW8*!;WGSSM0GeXj4gi5x2(eafs^M+c?_MsL zy%f4l)NEPOdFRcP1Lb1+TEJq0HP1OGSjmw-&eZ{$(;`^*0ECzWGe5=5C8ao6%gPdX zp>Bqy$GEA-5eF63DkqX|(;t$8_^mlF)|PN1xH~79Dm#Zqw})lKdb106gQeK6Ft_;Y zmsn}y)s1`o%?sDZvTNVH`y2P$Z+~!)m^=LH?haTyfD~iu#QYKD5%iH>P_DGV7wx<%7@3f2 zCW4Keazamd<^k?fLo77}i8lL7;1ZY*&<4dr8ew{`djMIbXLH6?BJhdg89c>< z>4kiJ;(6zd`{S?v$o<)W`Hj1HId*^f*ZAPX{olBc_*C=t!vn0wA6}@#mn=XYL8eKN zWS;R5gc9?Q;bG<4zFfcz3$_7gPP1r(5R+*&W3Hqeb|fq&cG*fv8OZ|Fi>EP11SVq>{rDTjFQz_&RnhHENM z$NBi2lEN+fl^YVA;YLbtMCmPt7n$L>!BUSgrC39w0f3Yh^P~F3L}J z4arvuLV2Nutq4J@kpA=Of|$;*=q2w{svD zT*+QbeQK~QUQ=WOkyx(>J?ET8UO6Pd`~U;JSbS_w(#t549?I913`m|qp5Q6=pS-Si z4yuW)w@Wcgw~~=aTa~qt*{sBcw53xE8*)EPh;ZXMR(=3cnPES{zJD5;WOBv!t1rG3xV~z>LvSDFLN#_PRfg0lHSh8*ooacDykbhX?16MzX z0Fp+=Q8xeZiD^#dz=qp2(Lyz0d*rkYJ(C0rKa8nCo~H@t%ab%7(3S?Zv|VX z7FEmoLavIh%3A5n+rLucN}^yrLbU?JN)|ccE$8~1CDE4FtzX1>28Fx8GLMjhFg7Z( zIiAyNR_{}b5)C!8D@Ho|B17rOzbsn{$`6~h%%h_YEM4!+2aTkxttC;4geCqJNmefmS`HZY32+P_q zhVy$#!N>u7u$^@U;Z=WpCo z57e=r+9GreMaq(2qG`c=qQqj;YBh+j$ylKD>%IETP=-$z$ z81CpX=L>wCP9ouW84<)r$c?ZuuEsz@&AGK47MO?Uy5_MIF6D5ZbE6_iuUja#u-vCy zagVy@mAQOlnrgaG9kc0^Ov#NE6q8E5#>~(@($1D?o~9w$k?>O}gee zva)>|hF7)rF{|!#$*X2CYmmC288>g#y*Xw{D0{Hj7GT*Y+BTPMDjJqaO{H-(ON|Z{ zxed7u3mQ^yCn#(^dcfPfO8dO_wzQBQt?A-uTNERT_X`d6>;D14ZSVr8H86xi`gTs-UJWgeLdfduqK z@k?{;fk=9T*ORJv^uN3j5Qiwn#TmYwF~dCRIaZax#9;jIxw||4P{rNdcW%&s=N=yZ z&OMHP#z!_DWq{`AsyqA|9hnRW^lXYL6*DZsHo$ygtW(^XU&GUO30W7w`#FvmsoOd= zn%%g-TSSi_AHyrz$?CLH8wCy0cEoZ>q&NiX<9MEPlXf9T{2@V*hLY_Ww*E%Rg=KoA z8lVK;M6ghP!HxfL%n!YE$iA5ofAo*aO&7y6Cgi0QdmMS?)s#j>ji|=-`wIjPXnGtp z3-2$dFOd}Uua9~Bcxu6?Gq^VS5*|0EXY*6=xL1UD#Lf33dS?6*OBG}!+D{ZMD&05k zM~PHwLSPRFY(|Lppl~WOA+SvZYIp4%_ct63=xLL!#18y`%4rfL7UD4^Nfx&t23e9l z!?2YHT0_R(qcS{tcu9~1Z1jXL(_tlzEtOcE^&tG0&}d_2H!LC<8#h@*$yw^#caw~NQPKl zAd|9`2Ms7yYA!gdZ>HrG1hgMtU)sJ3!klNx4=B&7`8=U{P)MPy;zocpo*P3szncGW z`-NW+b*Rm~e9=w3u!SDQkz_x?9}-)(9TH;RywF7T?E9Mr;^J%Ddz;8%ecdW3Riu2n zCcv1_$1|7KQdU2zKvT9nPh(^X+&Ti)`mKtW+B(o1nX0DgECzu@h+|Mod7=@pI-6*r zkr`zu!%t18>KM14bmj$t;=m`=PB+B9{| z?o9=*ke{{=;C&myi+eS^ahJoc`}A-ROeR=m19OSTPwwvN4%Fm>8w~HU#xOW7ZX)=& zb$o|;u2^EN4^n*5dxjM%UIF84H-35#cEKqH^yuNQi|GRRSOsQ*#uH*8jlvjFHvXY* zvdJ0Q)CL->UVlXMzie_ZlvO10z%=zf}k3c-l!Cdw}aFVsa@iYe+P>+hOWRM@C%WXCN zJU;9nwAg|qtDN+k5Lg`n-=3Os_?xe$xD?imKr|uNidd}^Xf8U1?hb)aJ%z$|r`cXh zS}@;tE;3kgcwC5IO;-+(I!H3VU)NLzQcRN|Ato8dfB%?T2UE?nS{cWgijV5~sf@>m zC-?UIA6@@?;6DGw7jD?UhEugCkErP}rx%@g;b}i89bW1Jjy-sSXIP%CJG%l!dFiH) zH}2hcQ}^?CKfBT0C5Q~Y!kl)3DFS>U412C`?CVq!TddH<^8#1#pbAA|J#i`l>yp(* z5|!0eLw1=vo;_jEx<@`C>yS;)F^edE@&e;)-{<6>f~$lLut8hKfqE?$`H_by$( zdj%rMum5o#arSf~ds;y$qN(VKlT5DAdtVHL8@A&agcUco58pWd>(mL}u)?R# zI9$8i+E}K?s(idH`5VcB?O98$iBZ(_w-%WKq7abFj14ZZ`F0YBPQhD5Aes>9iDT~uV+OU zDoP*z4?S|EUVV6Q-~IUB{e1hu{o?lCJ$v)oz4+{vcrS@Xw~x;^gS3q?&f_UmY(VOFq}GP*Pdjg048QK{_w9%UVRwj*V!nPn+TSp@{}L8 zIg)rG^3bDC?|knXp7~3>sQ3sEDCcBrcB;u^Vh8@0ZC<{=4WQ23BZj_|UGMYjUFxPl z&xi8~&P1m301u=Y;8W_nh8d1#+^{KNK7T`kdJxObf%Z7plNH)E#ig)T5XjoaqHMmu zkY5^$k?H*C5jY}3jAj^WiGjwuaqVnPE=QW>+Q`7PJei$hLFW~UHRM^j%aviJGsLeE zm5&<@dDa?)D)~WJpt|O#4Qi8EQ~XqN6es8yC3%)d1&J3<=n#kpJxmlykFczeAemXF z&f(-TPt2VcrJ{-=%0~G6kSp|4q349_B6y%o;31G#bCc zq`=rzja4}S3_p#{54>=RI!+V7YIm@M9#H1Pf0)+C zn%Fbs(U$3DX}s6b)KuE3&sVj8`I=*@=@!ov*O;7(lB(yv-=Ji%DZ9p49_wAmFY(K! zJn@CtIU3s2h)V!cRvC#*zs0+wa*@nbsvqX|%X|5k8OzCwbqFhz}JOli`Oqy+rDe-TuJiPDrRC{zL=#?15|t_4+XK#_171uArdSh3nvyIc5P6)s)g!t82zf(SDK%Mek0C@pXJm`umEn<5i58_o`xt^Pp zW8%{0&z70DkS&+6YyKtrdZlwMvID_jF|gh95H7V+7j(1iklxw=)-b^=iRdQvC$ zTKwI7)Aql<9H6|7;*Em+xZac`G!4Zh#8#teDx}pgC=;&Wa7?Q@CE>e6z^Y;^t_2PC zLdMhwndShV!RZj$)y=RzJc={EoP$3NFGvcy4`@_Yc=KMty6{^tKUyRoIid^FNfEtW0Kp6%pv8nZyYS)I~0*p zi)3mtD&q`$ty!&8k|WwCDj{F7`D0D3;XbTLwaO85DbXxQBg2$HW?wy2w>e%C!vqPe z!twXN{+)aG(@*Z-{+oa6zW??uyzuZC<8M5oWtbu&?HW=bLMU2ckV z8LRe7*Sgc%W@+*?2QGGEGaL?NItAAcr&dhM8xP)dv`O6Cw|c`8c3VXoDtkQHt^VDQ zJI7)y$H^d|y}0jm*ujwBD77KAXxg&yoB$y;^2lluc;w8y zGT%^=!T%=ng|jh=A&E%L+Z-i~%ul(jc?tn( zL_3l>HJnc_F)lw%0|1Q>`Vjf^=6Fg5OaRI%z*{IKfinL{`LR*f4JZN z<~Q!?_6|$mOjw^cbQ{u9Iu}Gsn)HC~I85J8=*Ecd7uw?0>YjO4RS53*1 zJkg-7a*TQuQjC3%Hx`&f!?@yRSo@6~sww8g)&Gg&n9uqhI@Z_G} zW6H!FUsRjlxU2qa_Xr}~0e;VKu*NX6 zg1MX}&#z?`ver0~tSlP`nQR8hAKG3YFDQ;C_(b-EgwDD5?>@NO+Xwe>kFNpZMxu}P zpXsURlnQ?Nj~0*}3pNxY8@!e<+IL+DX(2Z>o;ESV|=iKO9b*p zY=T9xdCrYDKy9e$+o{)~yx0Dx9FmR?5rI84VG{;z)1`)8N`1~ZONkJd8bUfeTm-B^ zVKc3(1KujKye=G!*rljvnB-SIMw}|aG&-0HMH8_MD-W*qvYI<6rt@Xi^MEU857rdb zN*J}Y^a&Lzq_e~rD96()dF+g{$%rHtpWo;|7cbld^Mm=e{}f-PySnUQk9orI)IMSD zV&Z*qafz4Tq!xq+D{~q_1PkJyF9C;vP4tpDJ)&(-j}PwS#}Ak@{NzUC2b4R4&lPW_ z;nT-m$i{>i=mFA1>;({(gL>yFJ;c~}GsG{#ajM1cHk3uPp^N^ZeCWqN&TXH8sXso9 z+|Bh1H^)a$=;tPTm-(?EyHl*=%&#Z*hRo!(=0Wi%;(gQ^?8|MP$AV8!uW_EsR2trG zU@NCR188a{TK4a_o2u!S0ulnt5HJa`-MI7Ka#vss16R!(VP%kRU z8js{N%tRrZfO*-+n!+GvkB?(F0(KXe$9psExrf=0z;BM1=sND3KlloBVBfoU@OBQc zJlkwc@<$IQH-4>-gcx71!`xyweXYU%Hv}`nS%-e>qO6afFkJ&PF8TvjG|4burUMa% zcUq@t(2IyT=Hs3gL3i3%!+@~=0yzJD*}hZA(pS>3yMYSWPZ`HT7(9-{44FG?d6n?0EtRO!$>M-kidy+_ zEg21iXi*S!HtM%(k)5`Xi6fL*-V7Ma$!htkk(->+$U^^0=R;j{bx9|)6Q97x8+C02O=$>mMP{R5^-Twt{cJf8l;U;WbktN;FAy6=AY zo%?_O&0o6z^dEl*XYjMk8?UQB&@(^BO$mv0h2^rO3XuLJ+h_I7>go=sC>zr@@HqSe z(yK*Mtws>B~vWO#s^jC`}EQqE^ zG?nEc{(+bi$0=7zfOngZ`?-nLTonW9DM9h#O&+kr@@&)j7~~jrFndJ%#!Uvk3fS*p zWe|{M$u9!rN6xE1^>EdwnrgMPx~$dmEmZfanSi6dhp0FcVUH_A*gj_HX3s)R7P}u;Y}VxmZL); zmeb&v`7F5$$(v7CEQ^_Cu#{20S(F_br|Dv+sKm%FX|vcxJxc(g!(glsD0UwBqi0u^ z%|eHuo+11q5lIJsbr0+?5^{bfQ;o+XA;Y|u=skkFQ#`}en|bpN7DbrX+oGxz;5Val zi=Vkjiqc$Yb7-cFbe3icuXV#-y=W96=zB5P%OQ9ty|7#hS2j2|J0NlMDL*ybcMtHu zKmU5*zWHara$o-M*M~`y{t5D3Qcma>z+Kjwa74dDr?@_ne zA=i}P_Tb*(y}T|y4#FuKSns%h1-aS=BmveXLv739JewlHL8QE%EN_OyyD4`_YH!j0 zz2PDPvU%?HVOS}?hIg4-(%zD1l7w9(F~91EDeVx4l{pYJ1~KjrI5h$U7w2<+$b^so zSq5+JkaUD8D#PwIA~A;>Q&l?Md-uhc&#~_Fm3zF!M^O5@atBM2(pVtMWnL{E2ciY{ z+t?sEwxbRf52ed+m(wAf*R|hhml&4?5UUQ_qTKAFNz5=mo1e%Y;Ez*mbX{lWV*BIO z$v#1(WD?kpxUnFX!}D@H-HS|x))|DHqLkRM%tVw3GCYn;>N(stC&-OT2fkxhkG$~5VEF&B9AP`E+ zI~#%cd`w=5H>XT2q)n#@fzw2Q7aXtpg$u9LCuDYOrzT&a19_N%_~J+6^pm|5Lt;?R zd5FN7T1sRn1;ayeah6)r#1ji*@d%GC@#P%O0aix%>dUX(o7ZnZh(Dt0fI9wIjk;-F zE!L@zt?r9~I;4kKO!=%`=;8-yJx8pJYAl~2JW-jNPtnrmm6bs6b-ra`p}||Cr2yoh zUAd9jkBjx{@3pR`;S6t z5YYx%3Z1;rY|lj57CY$s{Oj&LVomv2OIm47v8#u%4A!}S36D$LO)!OrpJ{=MZjMMSLWU^(64PpK~=Q zm|D^84&4J@e$xso*BU)@S|2{NYR6(I&pjboj4SG&X&9?|YC6eD1yxCL_<7{m9!Q<# z#X7Vb&Ck8&^`sdA*=G54n1s4WiTU13V(eO*WW*0~%Iqri;(Z=Ii~Tfqzr=@9{@efN zU$|fV;n(i}{)@lCoNjpE-Mtn?o8<<8r?X0egn4=cgBU4C1;K&h3g!srSaJl-Su8ldR zCtfb%h*N|}mRet6qXdcrItfCC3+7kyjDr>9`A3J3yfB9(B*T1Zj)77R;^{52Hc4kC zh=}7{+o-LTM}twCENP0s6`pwh@zcWm+%Q)~=nRGuR%B)!+4!q=tl!19@AGLmr^fgr zt$XAktC6-q66Dh>(e;FeNwif_7gAm>yN5O+d6^%7l9l=R=QjqH&qL&xvKW_@8(W7g zjyfSfX}jo!UsVpaBaTl!1{Xujya3L8dF&EXgg7M#n_%I|paf6?uNIGsBs zRorY`3QY(Y2qZ#mfO9fYp-wk-Y*NKd138J6cK!uOgpq?AH;!#Oq-6wwfr4`gY$iCY z%^DOBOH4HvRRwsf@*6!A%n`M(zAuG*#voU3S?Xj|ZP# z46y2qe;0u(QAkU`4F#wA9K|MW>L@f#N*V}Q{kw0kM)tFeN`&|%j1Gx`nio zVZ|RI*uEU@{V}|pc=7K3(}Vlz-H-0m9X_3X`_cV~;{m=7=p2@`V`>^yCY3nN+mnDG z$?~6*8iZ+Uj+bH~_MtxIp%EV1kQB4c@*_QSc}aA!-T;kUf5g39$2nx_AR&+t*dYRE zK#1Ad#BQ794V2k1P~<5a`pZ0S>4BkZS+0?6X%R~ktRj3N&BE!@A|eumNRfwyD)?hB zUey8_hAWtlj&*r89sfD->Dt1cP$?;rJoqUOlwMxDd)0Q`GDOdqtTB03P%ey{;!T3V zuc`56FM2H1FHCmIdf*=w^EJuitX@RlEKZ_P60Sp^5(yhO$;MRl4$B{s@%lwk8mFaI za3sOe|JZZdMRx8p9TxRE|t=2Cyo5WxWb1E=@+sS>!g!VNTUl-Nd_ zHss=JAItP@%GyGhSXQ#DT!iYlYelV!YC{*RyfPZ=9viv_@RkxrQexsS5Y`bLqf9IFzgZ3gWJA8pW6o(0yr%faW%)R5Kt$sPsl1$z$kEbO zL^hLwtwFRB87$D3!M*=OWZr6c<3(K&#pYBF|$`=i#(XoD!3O8`ni&-P1g~}yvTwEA>!%X^4Ncq7 zN6T#1wFZB4vb964GS?Qy47^m0j8@%S`3#X}lx3g39A1=KPTLONyao()gK>Hd89CRE zYK0K5twqX^5ZD_6xY~HZ;rjO%CHMLQ3?3chD_=d3F?RI)_y#xi^e~Z}BETO=EW#vVHj_yefxEk~yt`btRp+ z@5zyl(s*+tL_{jy*B|y$`I?61h*+s#65On4Iba?VO3k8;fh!d&>FJ?dmaR>L9XFXW zU$A0kGe zV+0>b9;yV!AzlZqly+|jl*z%il<&}BF*zU2(_6?n zfJ%sEI70#*%v6_Z$18Fplr;0%kxNUvkv*@%Ru%_NZOTrfPXA9LT15r2R=< zk`Pf=+(v0qMpl+cfxb%0JrO1u<}p(iddHQx<6P2C)-4luM&)CT0?`a7;ludhiW68K zMb;uFe%hA(HN?J!xW;c;i>L`;Ia<55sMZ3M=r@R$_O+Rv4N?hscx$1!aA7It6c9KO zLX2+6x-d?mrq104tA2$TgaEQKY961W+JBM&@liP@P(nWE0NTB8sQm4OdmWpkS#jm` z!NIDW=-`iGZFaHKZrUV&LPhvhu#B)tWs)r7fX7HolBp{ukm%y|H@@{xq9+MxI1@@n z>;_Wz(-~K?z;Q-~*(QYGFgZ3AEt<%=c!jAGSTcz?Rqdk}KfCeEtX9$T>qD@YVu>uLPcR$-fz9hu zymS~yxPxHPGKy?6r?-DSMRRndjq=8V?USE))}`2#{HknXrRs(8T(9)qr1o{ZSejTB z!aE+)#bVO=9ubgc9k~9r>f^4PJ(^T0`a}qE#in#}W(aIDoM5;DTH0H)qxToQc=?12 z^YJ>oI`YVy0Z5K(zA`9irVMo?IKVz(QD3#1*7NA_H>n$Ho2d&5$x5gzjxffFW$?Vp zhf@$nk1YxDa5!+6R~Pbu3D+eV&i;hxQ5-7|FOZG;LSjbKCy-UHa6wNq$tk_PMY%ju zPn?FyOPB}~uQWyoK3%xD1ffQL?>R;^wlXQkDcTpOOK|Jqv%pWXg2tT_&MxIqKL0Wj zo#$uUWUht{$*ua;INmfcp<*VO=Jx5$g5G2(E{d1ey`ju3h??$u3&GN+UL?HQK1hby zHgHn}Qp_7Oc@Y#h)>>bx9n(tJ(xy^7lFd5b2j)96FBNkJ2qZ#$29=ylT#kSYOfpjG zP|kBW?WBVzsp6^Do-Ezf-zPwkBIO=1E+1|Tj`KtB%{B^|2v>3r5z7Fl{P)YfTLaoR(cLWkA-T5gSpKRMY0!K!O106>ocr~=F z%vPKw-O3k^)0l94s6@$;50KSV>YFIZtfec(RkN;l#y;QZoYoUoPi=Sn_4MJ8!rrR7 z4Cer%Ua@?>C{ATSBW@rq9OJ6Ai}?zf;c_>NvFxwvW0zA+E0tzv$YTo|bN_;CJ8KO0U~kKsVFesLlv zc&7+?biRbPRhP3`%#R^)F{n5<>Pm(_L!Uy(!Vw0ma5f!K7joz+F8XjRV!}cj#i94v zXtO%gV6V5dM6Z-Lrc^hG;>%O~@-gR@a!x5%A0P=DxGOLde6p5vhe3{nh%qfTAvIo{ zgcLFvT#1tS($CtUYFXh^L^+2ag!rDjY=i^Cq~@Wv*d=5~ql@1~O9~By4;Bfl zN04vVV>S6IXH>b%m)vk5Gi^J%9xgxO-0h)$WaR_QyOkH&Xc=t9F&i+w`h2@z*ODIK zc#7!=6L{%n?g~>Uc#}iV=WMPov~fo%* z0mh!_)i-U16Kab{crHaYCBjDnJ$oHo3#jN0=1_Mg*RDHxfs|u>u;VA!8{yh$+;RmG zTRv_~ymBC4Lla4!UW+&Hdtu&`*k;uS^fTGmYoHr8`YW4sgzh-U2p%&iSgEi+BErY5C zBNUauwa8rIs!*A7J(i%&MM?kLtZOLr$UMX$Gwg8f{F~B*E2$}U3Og+Xq{HCLF4={{ z3rk<5!{>l;rzC6M!enhf=q5@HseHVMN;|D8QmWUv)M5Azk9#@xX>vngdntV$oF8QQ z^7GO`q?#oOwL=lfALzs#2Rg2yOR~t$Qnc}NV3`|Hte>)3axM{0)-b-q*ViuTB}N#h z!>}y~F$IT}c2LetnJaE}WT>YOg-VW)f@PpQS)Ueh)UD4?P=om1!!!K!G?I8$fvkrd z(Xq44RknIHYsjG2cOM^aLEbTU8hX(~Opp2)Uh5DYUZuQ)N~2Y?wo0a>ByTrtgv65c zipPKuH`a4Lu|R!x;5t^qf}1tTQ?H4&728?{w&Lb(ms(Pb_R53w0Mlx(%w|ke=;0u6 z7KHe4;7*!^yW0I=qr{2bKe|EuCn@66S$uLbB`f3~1}yoP4O|G>`Ie8fjU|VMFKA8u zOPdb9SpZ38O)zC;c$D&$4@E}6O{ALWZ#mCK$1-q{KteA5C_SqYA++=eb7}(rajvVF zl#vlmagdcHSdF;J;oJUv67cZwB*gRb3adYGZYR98{4fRy=yW<(&z_XDp5`Rji;xN- zl5pxzd0HnjWdn&xl>3;kJi*-3`}+qWof1c#DGYx)|5!4wr_kD|RX5%g5UU|p*dQi6 zp3~D#&wHTm*Dh+s45UTdUbRA~R-_%o#@>89>nl4(S@f;}Jz$7-inc~PX_wseu#Rn^ zf!MfQ0bzR&W_#Bg1*pQNzjuw20_TK4BE;uZ+lOGQ(Zk8$zhfP#4hadpgzSAui226_ z1w5Bz8j>lcVmOg8=x_$2I;2I2k}(>PYo8s9RfuTbin>r)g+!#s;9kVn^H8Q7TuHBktdKm^E!=dtQJA^g+dC|&)^(To z6m%B^dWtCl%&99<;3$(L!dCoIlqsc{&cHMNl{eA9Kge;M!wdTe&+yYDr-N`sf&o0U zP!-3bh6wO&PNmUy-F$etgyqNCJp-C^reRjEltwiA@CZNR#(|IeIsJqBTnsK;hwmcp za3g7qrHdgo)#f1I%E>|^xZ1ZcuGi7leJ<$aVd;*j@NpFzE~ zzh!&Z_MH>sLpEv+-L!A0XIOrNRnL<`+Ckt92r-(WUSI7Xn4}&x0_=Q+0H8M(vV}*Q z4B&iE3|o2U%s6h^X>;&8jz(3GCQ)CQnI~=BH(OB-Eg!24nqCjhD6W54dF!}fDG%4fRW{$N;ac`3#70!C*0B=Xt>UV)(@{J>63Q3 zLG`r-r#1_3NxhV|2pnP6_NAcsYtUaabxq@a3f`Ua;cJc6>a{aG#FWD-?(y!)MZ)3>Iheh)>~i~FQ8Oa6I1Qr-eJbp#Sj zZ*WJZJ7CHKRzK+RYjQJuKm#Vie+g^H5Ut)FsYj^HCCoTSSkAM0CMKkU{36_FG#1b7 z&CRtNfEY85kFz#d*iXxYy_yZ zJfc-dE@^{2Lb<#-@=q=}MaF73jRs_aqR;vYSt3(hLSPRFg!+`-HtvHa#4C4%NEm{1 zlbVg~q#}CEP1IbH7uW>BA#TA)Q|@x2RV~+JY31{2%!1d9RNHBzx6|)AFl8PxX+QpJ zWO(%jZbd$}E^l7%O?BKfe_1Dg>~+&~*q0L)b|&gA^=SzMhGLvDT^_nHAQu?v`m;qk zIfDEumtb9&^5j@ADvmmp_&oR1X| zK#!3I9$u6^LA~Joe9vL^f^je7T8Vo@4!EWe2P{@o;m2k9E9I>`1(+t4y-> zS`~Zqnpt$VXEKCDLJ__k*B{U|hz~T3@WBtfOn3X~-rat@hyKu2l!fNmywbdGbKQwX zwm}8}z)5bQk6CeFZR5=3LrBQ^q8~li|LL!PjhLCcyts)kYDJCKO(vGF(x}!IY^1NY z^@W7BHpBYTI-B8106+FZEGKwzQ$8b(7kFp*Dj{yq3PCVRZ|jPn@KLp&EvFM1u}z~a zsC=Ba!*XD8%)T?URl0~!B7`qq|3*?=FWWYdr5TuNgR@jxKhDB*t+r4t+G?k{l}NWrbd7=`!3RMtcY0;`}OHU*4!LExVQ#yd4mw zO46|+#g-@S?I@*A2wy1)e7sJTTS}IbQC*v)CQ5rpc z$2TE=&i`%4o7z~)(AJA3#V)qf!1t+v9U%Tbwf`R1eU~|C^G)*gm9UwA`*iB%eDZRO z9IH*}9kzny-Q)hG2Q1)E^Z1wan^BUvs7`p8S0R^0T$(dab1>P_5GDwhfEMQ8SJ z+f+hwve;D>P6-s#H2ugnC!uklkT0>8JG;8v3y1srN>>}P(AEHTA#9_*Js0)(u;G{b zJ>eP&BoZI=rN4AqaOR;54aN-hLECz`MUi_( ztd}+neUOKOcSieKQ4_8zubpAbl}mwKB^G+ajQ?<7R@`TgKM5~KufkGR*V%F+R|z>y zAd?)6T||~rQ%r3|O{u+rLFneZ#@5nneAsq%c^z&}js?6&^2oRreT!mK+nd^He5uH4 z)4_QFFne}g z8Gr3iB(7`D^tqi_C4`y4@Qpr%l+{%x zS}blZVK(g}ma6vJT{HHp1OPr@l@Hr4&M(90BYilQwYT1o)XaK1=PP?sfT<5441=`pE8?QATUYp4XBy{gX_%OANflTR~&-n zK%I2PiR35U@?cg{zVES3*Uer0N^?DIQV%0?7j_zW zC=KiY@k43y2gYa9jjyT-P3)WQXjS6-okj4_RCv=(*dd3Paz9-6Jtc*__vL20uHHBS zThq$iN}mC0m6hwj+Fg=c>GGqxGQ-OmwbxK&!PEJA;p#8?YzAQ&oyo+xGfgt;lShGJ zt%IvCC0@OlhsTe)VNWD%WD&Hf1?k8Ncs3c#^`!(G)|bdu2;lttvev4wmC@oNfi0a6 z4)(*rBhlt+%tYK6Woa$yF-Ht!!XZP9rEfksV!dNNu6m^v8(0UKcCv;tjvgkXF;6j< zabtbPVLQ5`u#S!UVHuATLZ`dq~)Q#y;Zu%!4AeJpV39S{*hm*Mkko1F9eXdm{4o3Uo>f$DR^X)gP zpUpY6=CM|Ez^B-W@p)!b4wt$LuACEZdcC$JU8T&MJ)wn3ntDw(gkM7ib$+jZjxb4}s?>gcX?*Ho%^yglPsU?oKfJyq zjHjvu8BZzBWysRr!{{d6IYvo+@g6pAC-Pg@K<5|3|D_l)n+8{1GS>cl{lLv&J1#t3 zZ@9d-W70s?-exQ}TMAd>ihH|F(C(zNGo0vbL03csv> z`xq8F1w%>s^PeWAu2v}+_QGgiRzk@<)q1c&ujUzR%xN_!wRM%7+V3ClHM3gb|4Nok zWW-7dSgy;na{c)=frYUM%J$LVqXL~GPx?qI=dn;aT#bvd1&o!!Ytpaj!gT`mth+D3 zHVl33(y$lw%0Enja5B^i+-Wt4nS6bh(qmt_`$0RwX_>U%ob`PtVb&YVy+P&s$KgsY z3Z0?W+Z|kl#pJuN*HwHWkhmNP6CP_Ns!_OE${j*Z!-sw3V0~zTN3AP-dU_g8P6S%D z!NG4nV_7h=0l5~F#nq?#ZO*Z=LlhA#k9*Hz&k z@3&0q={f;-_#|JU-0FmO)Nfp=;9V|YlSPb@Oo>P9v0>i0k4_D9<&_q?aBwyR!3hj^ zHNn|KqgH*uGAetl0GHMU)`|(ZIIvdmVm=jQ2HUcZu(nGUu%8QH4*GM!X4Zrda1&7J zv1)|S!9ov{t0gPrNHtc914uCW^PTj^@BBeo}HiR?_3k9R%4*uxxATzyf0cZ zGrD^8?Cd;@4o2bdaNlHMskELx;xiv%*fr=H9&EV50|w71^tFGyhu-*$t69hb4OnJn zi5b>m{bLgjHfXqCaDf17Jw2!dJ%qS;z(MMj?nzL8$%|PL(vT)mdA+VH(vlobQI0^* zOXZA7@#DOT(|ROjHvz;tZSCB~V=Zl)Q)@-}Q+b%M@=2x^W?4}y>(}lrg<5zO$rPtS z_wMV0(+3r~&wc8HQosk^7`tcKL+HER&>bLtxBKUNmhLrg{@=6oTUD|vrbJdY=uUYW5a~u}SIqCcta2{wA0$I&2I$E`1BrCiee-~Om zc)7Tgg;by#iz`+x0l6DtwWX|^eQmy>4YWgdekIU-rbMi)PsK0#2g6x9!J=*!V*%~5 zor;l{sXoUajQ|^`J=L&vJ2H+2rM2tv+Pu zJ~awOXjeb_8EewAGu08E2`qEu%>URPn_#Inz&yuB1cah?kWKt90a?6`yOeqmT9cc0 zPmXP7e8t{O1>zA({>m|cj*5^)A>$P46zIpYFH`FGdo&qyxhvw- zTTsc|o$NI5UTa_nh~Mk(d$Yn0 z0Bas=`b`s~9ic;zdI4hJmE%ZDC;x_@Miq+)?_g1L?;5NDY(h zTJBB%7ODhIxiw#90Ke$7fbQ^2pq#oW8(ogPY6#w&fXF-#xadX1T_q_IPi z;>9>o9$7{b3mIvQL!Lrjh5`r>L-Gsf)cUZ@N+j>bQF7&DEEg(WLwVXRzSF>mqJbSC z{!n`IEw`h%?>}U{V%j!GRIH&7RA;J-w#2Eh_ZI*I#zQqa6&_CbCTG9!ea=sEE_H8k-ifKJy zf#HFG?EtH!Tq2grUkp_END%qc(=hzSNjUhUC!uqCBk=noeDcRX4i|qr375aR3|C)X zg`iC_`nWzwZ~dYCi}i0R5IhrL>+}wkybMEIMFN2&spMS%$|nKrVeFb)#rg^7!I`h z#)+)Ya+I(SJy>H|)@eO7O!47V-`LjHfO?hj6*1s!Lg*NU04%37C7J6-)M>6mBf7&+ zCjR!NsDFFv3T3ehg?M;>E4vu)v^5_ooGU<^^x!G+H`3dHQM@8l<)l(7oun~lT=;o_ zka#w+dYyi1;`b)gioR*6lu+aepr2UgtYTix0Z!DrsMDlMn-l1TAHmWbQCpi)9^MOG zSytIe?&9n+JbuateB3Vi7}FJRe2ky5p;)1_1hUc1bT$*PKG%vE6CVR+nH9COZ#CtPbg>Pu~0vRI8( zlRmza&L~5`3e2yy+fw+k-c*_cV-;{3%U>IKmRkLZzwN25yOpK@2He|IX75Y?R zAIns3Cs`SC@{r3-j{dAO6O~^ZNnfbVXmS=C;&Ab6J1N4isSQPTSvw7U5E@8VP`xpm zro}q#n?BzG;y10~_m;nI)U`?Yy=}?{!F@W^rOMT6vy~3Ri;u$MO4eln z?V*4o_)y^aAO3g041ap?v+%3`{a4|4|KZCp7dReuwOWEeusn%ntyRr(k66lTjL$IA z@KrjSsj#8OY#~Mph03fwmQ?&Sf!-psAu^rlqrZWCe5dnpes&Q~A03C`9;;`lZu>Az zz)8w5jhVik32q3;Cqm_s;mfUEoL@LE$5Sqn0WGUhSW_!ksN zAWOOWgWdZE(%a$SFdPdIkM!5ohkIjNdTy+?oQH{M^wgohxT1;iQGb}ukVPQbKEA{6 z7;s4a{%~ZZ`oaC8429OqB#W)fT>#V?u_KmSyfvww>;H<$9k$8{R}a@*96$E`ZHLhs|w>V~Rm&kD{@5 zo!hCBDG$H`oB*;sLm+@McMf!hcL=O#FIG&#CcTi46}AkNi5+WT#d%a@ir}l`{tL{( zOv5D$#nKkkm2wpdZ@<^&;|+jjyu=^ii;pYi?N1r|@=?`dYGE>4gpsy7exY@IZ}#Tl z=ue)6pZ(=8!r+gN!sY2IynLkB^m{TIcMig#^xK>5$rt!2%twnbnaCYs`CSNf-DF6P7HWr8NG{*~d8smK3w* zOJ7wjI6++pd4JUNSTt^bIFfbuMjL^gX|;)i@ROhZNPQ?cD4=f73S!H+=?z2;FxToC z3$2NKcyJJ|_2C@jh+XNv{pP!HscoVA+GP*j;y=tW)o;CQeVkfy`_bgJ{-({6K>&NN ztB>jQK_hD`7(%L-@Xjja@FYTBfh?#K$cpPO@iycKQN&ymjPfMb!aKl zx2fE63!%~&bBI{Y11`4(%H8LZ$}ihC-c9LTc4l|y+P1nip1J?n;IJPRL;k!CKE|BH z^+P}T1DU=T)foXO~Wpp)_`nxO7H$k7q%x%11M|Qt{vQc)Q3(5q% zo9@zB+|)MX7{~7>x_Vx&x4Y_QlU(_Dsn*$kA9@RbxGKu-?8DcoNWe6)BxK2Q7it5g^t#aC(U7 zg@faR@W=o3=i%U&kHg~c4a3#5S-3uxud23K?g}jK>))}qRbEV1;pSSaDonJ>g3!up zei4QO#$hSzY5%)$@N^KK{(Kd_{I#}W))KkHq3o%0IaunWyBWZvE^16*yt#gJf;?W) zNbQ97uG3-(p3iwml4V*L&s-<8T-uTRiOruDL#kXS0Ox>*g{;?b8tsk3Y_bS@d-||V zp!%DyUW6-wVcNlZ(O8HbP(rO>2kKWO^_6ABAIVXI6N8jhK%z4Ivj;lqw4;ZH>HH@2 zWX=BbfAmaiDR;t)tG)32QmcAsEUn;YeJWaf!ejleKV(pwL7-+O#zq`vvt!hgl@W=_ zhMY=ISunKxNR+k3+7aopoxpJp1vLV3r}Sc<=@Z$ z_>*w_rzhd7qq8tSorUf*f#+kbrZ5h1O#9(V*2~$kTpeC%8)hwIb}YbnET*Tc zNqDq;5xR1XIDC8_RtK`EUI<9{Pr^t`x=p8m68;KdJdz$3l#X>cJ#=_elX7ZhvQyUy zbalqEO6%WB{>*&z))$z)7O2Hi3?RlDybwsId|zuY&$VF(>mg&o?#ncLHNBC`f`IXK z8NUAVTP?SyjXl_QS|8W(ppkU!5=J?T&ddaCiY8HMbL4~t4Ws!?mTNXQxtP8Pr_$Sx z|5Wm+kH%W!?tJ{p@)=J&;tbR-fPRvp@FO-bOuAd)9gd62be&{HFo$8}d%Uy^BVwe4 zP{C>Z&5NhJ&EGWk85jPSBy=fLe^g`=A?45))E39h#QF&hra*2}uu{q>^V9f*+EOF$ zytLJm#&w>ti$Ayq>bkCZhVQ%vc7XVu@1mW6Z&CxAlWcY>r!AlSyw2*hem6OM zLWRG(EdUGhTGqMQ)N2As2C5c$gc4QaG?Bti1U_X)5~Wqnt0kMytV~%Bwt0Y`134^; z0t4D9ZVuQM09?s;7(jSD>V=PwM0K_8v?8J0o|}~n_3L?ihikG>pI%x zqI-Q2_V-?fr`=cKcy<<^$j^A9)ew5-0wH)N3oNT5bmW&j7trgGpx`|cH7P+$>zF?@ z*ttDGq!!d$lhRdo1MnEie|V&5wnwq3po$ESy~oXT=+o0F8U;r+F` zZ07@Q=AjiNUVQt~KBUv25( zAbuZv?Om7ICY*O&NTxBL$n025c+5&J`)xPJwia4(SlgdZq=|NuYSKqIRItcU ziJBxXxcTIs?$4r06p=g%k?5;OqcMsUbCI!O8cRn(N_4LLA{#CP`0n<5`iM)r$Snp_ zStdK-nSk%3qvOy!k&o~pn{fz4YI|bV6rO2Ah@k-Pes2=`2ik$|^hMZPJ`aaC--W@2 zc7^M*d)uSXl{-d%b*K;N_QP^I&^~s2gs0UdWGyEksau0Ni{K)4nB@x+S3p_WHcD(8 zx~vl{z$=Vsz@NSuk0;^zbGddLjly70t{h?w0Jjh607_zxrFJBDiI=h_fBEItl2`4K zo)+4=8dS-{kt;Cz6%$tYgw(A zS`Ye6>w8=Ymdlzg0a?!(JZ!dN${Qmry!3pu^jck1s-Oc>1C5CllBz$gF-Gg!ENnVnfetoJq{NFxkyn9cYtx?8pp;cnWms-W_=sGE`(&T2X0O)NKXyZDiePF4!z;h`u4(MIV68`eltMKUjC_H)kI1KiO zwhcAz6TbB_AGc}s46RJ@;=7mjAswqepqaTI40`BLpwsF@e*Kx0u`M0*?XAlUh=kVu zD-#}m;zvPPTS85MR2`s44}su`K=8T#E=Au9W36Y0ju+j1RgD#x)hEPL^_f4k+9(^d zwbh4=>w{E-xyWfExa?|%0yKb4q#)lC6&H>A(19^@S#Y*l!rO_uWU32U?Jw0^>$jaw@2=4gqr?sne;EC^YsCX{>No;CJ<>Jxfdq9x`tLM0QdhSV-LHEHW+!K6Jc<#jFiHO0?XzQSDr zV7a9Rc}~bLIA|qdeUv6^48=_Se%6utt@da_k%BP1k!pJVKTT0lY>>b z-WQ0vkYBDoq~oEWHy>y-jw87m>s?cPyH^C$ziXZbMQc3>NfGPwx{z-tKtdm zjqs$N&88m%0ed+f2tx|+$~}aS(5CVa{`UFvuzxTLr-vuXl@VFAy&fRbKLUL5{AGCg z@}+@DfHgRmJbOS9y~!18PM|nSTNh*fJQhy$##eQ)99>_1g5`cO({6aO4lnz}1;#rQ z$vKmKSi1?z-DSy#f^x3_yxVeOj8j0dj;PD9Ev9bMGH{mw#05N%Y(GRuv45sY%U53h zAV$&eHMsPs%~Tg794er_Xp;+@=pFFu%p2%DZfOr?El?&tD5NB6^*nwHN>743W?KRj z;*kJbO4}VDRs*??tw&;CH`k)^UhAV~x?Sq~r~%y`vy1ZH9jzN<#%7Zuupp$H`5N7Q z>sRIN)vLy}o6=g%jR|}a-WoQ_iY28vY`VE|VXn1fV?E7vR8u$g4YjS+SS{Xi#s^=G zLG0+^_Ry}I_A*GhVce*o0m&u?THT;3jej@KG;XiD_3@ODgJ{e~CgTqu6JhV9zWd0^ zt80CbrMo78qc7_zVFkdDAkI_ar}exji;N5Tf_6Zf1dU^-&dPX>|0+3+ zAfwQa_mY0~e_cmVrxMENPH$t-b&bW+wriGklr?kNL;_3be4<6yx~InTus;Yl)2q;x z1$3b?u^i6ARN`vYVYxo^!l`_Q2l|+7B>;BQc@Zv0zYSdx;39FN^@H~mACDi&b>gwW z+@WMR6v&nhy05wgL}znt@vOKYCkjAF(*X^pf9%pOz?;sk#nQb<3GSE z7R+@(Jk0-FAJwhsQysOSS$O%)O?Yym4*^em`tWZk&`!T;Eo50;=dXt0`s+!UywHwx zvXBp?pSd>0Kw6#{RMCW`xp?z`T*&4!j;2$IjW3}Q`gch(Ho_rSY2u5dg!dUJ}C^1 zmW$JegVuai$5Au4I$W zYeytpm%@oQ%{5RU&s_r=llJaf%;=NW8Vi?dTFbWg6w0sYMN}J#7>~39%o@YSgj^Y$ zI7^D06gg}#I#YYDYUE!gdhsd~wy19m62(VpW3FAt%{iM>btx3V#oyHCg4Es!wrSp2 zN;$bcC<%Y>yR6cZWhGXpaQ8Qt?Jjb{bbY=ntE$VGa&{Ktq=~n5+|)_+!SI$ux>q%u z+Ho(2w-mAqszpKd8)eSQi}0IY@G;IIMrn_=skph7z$@o>Tin@seXb+%)qEFGLv}NT zCjMbUlpqNY*$qTlE)z9EM<`pGQas&%%Uq}Ra*`;vF{d>OyR)m5<(HCsxhR_z%4#cw zgRj4?UutM_dTd7bxk)*@ETFCR%x6a(xqKV@-D;Xl=#Z z70~JT1!(lQ=*msPR2>@|r-!4dvTZ!z`5LpF( zfRNgN<@oo%`yzb$laForI3Q_HJL}B_K3}~&)5mvON=_BBHZVHDiffi!#qr-=56D7I zO@hFxkihunWL|>OP>Ht-oUw{LP4~(N(3xE5d>9V*p2}K$Bp`edPL2e0HB-)R1bU@# z2a@tRHXR_tMD0<(S|u(QZSiyJ_R7m&atTS7Q5H|E{gOv3{9!cDG#yIE;L1H|`K-1}1Oe#7v6Y2E9~={I6-oIX+dD^i>G?#bWev>k?7@`Cd)j{ zSR_8hlnX=s_4VLQmbTq{`;@A+}x*a z$jhxl)^JvjMhE&NPh7CJYW-+HWd;ImOd*2)RYO&#Br~aNA67nvx?iT|7jIw!^cL&Kl4SQFZrNTO7^wS#n}S-)w_@Z}``RUyfg zZvt;thbYQ%v({2eQ1K$L2@vPa4@m{t<_PU+p=gq`g>;v^)4+pipni>{>;J(jc!P{~ zfcOn6_ieGZTVmT?agz}DDYJ>+Yv#R?3x^q}K404uQk}-B+v>L&G$|$>w{I9FQgwh4 zs`qyJ#8tL2=bcJH6F*do@|awaXNsFN@U2gUktOLCg~k9!DSuginMTahk=zQ|k?Gw5 z7wK+c!?6VG?L@EVS3P~CC2J!V$Hk3SlemUl6avh8Ssmm75%xv^$b~?nKBSvph8unG zb$z+e+Pq@{UHP`Mjxit3EwpBCXJ1%uPxNftOzl&Bu&1MZbWN^8vpOw;+KQ5k5ZjQ> zm6d=2bSa*)TaP)*NJJ8_h}YhaPoIU~{^qmr07-~#?hX&D=oLTgtUr)gIKr2D_dfHCg zkoqJXJ=zbC9(@{~e?1Q4b5_xi%Zfh86V%toj*?v<8t0V`4?|Wa%0Trt&Zx@+ZO%z7 zRcVl>UpuDpnO=|J!em5FY1C$!Po*@z)Gmjo$}Ovev=&l@P{WV1p61o1{JfY`s-vuo z-zA_!cm8)vr8%>PIC4Z8q)i@{+|$ktS==8dw4ni%@0wXTjo%rOBV{lm|f~OA$*Gg}{dSV8wD(J$ONDH_^!4 zX;c=Tl#9HL0<{Sc%>{@$W`&dmRGV$|N5e3=lr`~6aV?iNJ70vC@+rR1cFueHV6Hcu zgsaJUn6mxc;7V)xPQr!O^Bs@RY}dP`_IK{gwFBJbB;1@I>nQ$!VEkX@`>Mr>)s~|u zOHu+7c~}EpDI*?fll3+7Tf@|}qEKpRwNRp~C_t^}AM5z`1T3fHMfk(#Ux$;^Q|(4K z6_8YWBzE8WQ8n7cfV+gWCbti(I@VvUjarZ+Q+7sB%8LA1hDB33^unJ6&;yA1Dq9~x zlyrt4xSS#6iqc>0hlAxK?FOjTJf!1CpFR%%;4dDlEsw*$`m0}u@2=#k!s;Gc#tpAr zb68O2ORTX9g-(ehHz2jpuU}cE4paRxJ+(7|VdKDq0-PL(0>4wR6fy6rJ@{I>535zC zI&+1jc+!cVqciAC9-VM*f2sn>d?=(cN+h4kjR*qd(ikEumbEk^f>0sJ;RT`g^z66P z5A-}s`n_T90%VjA6E&F|}DV zV*rqNVrddsNW6WfMKNeFY0)QtKqi2|mNJ8h$>dbU&>%ULi^c0!;FL`*7VH`e0e@RuO`zwT7=4V}1RI6yg_HiV zC~m?}|L8dUlmF>a_{lGN;p`<#>amK*NNxr6wYo^axQqWVOS-`~>L%8%dOX3~>XMR| z(!FLMJ0?iwhp`Ve{?_P7adXR1daTDO+=0Va!@xzt+XTIbbaZZ(Zb?ZtrJygR`5$1^ zHN$xhz&Z0YhqYi`t}(32Nb31I8c?<~GVDA?B)ZcXaq)w7T@+;5QTGPjAMyvDf^^FW z@|1e66{3ztIV5W8#9EOL>eSlb+@c>s^P5}#=2IEo2^3SCpOeVhyB&6b_}%WG?WAMG z`5kD=bXp_#oHRvD^&i0=NCah1@0N z5;2vT71=pK*7bn4 zTEW9Qc}z*X%1Qvl>+>9`Z#dey9rO9^ui~f^umD_rnr`6 zcwau?*X%+lG|Y+(UHpe}UlAZSK#EF|S`=*zh*YB~0W*0#D2gb8U-V{vy{fSQ#E~)n z!@QYbLv8$RahWJvn%WMr=+s_vO=HU9ax*a^nO{Rnj}ekxOn!mhG1#@6)sm{Q>lYd? zWo1CjTLij3^&2IKd&yfyrHB$`y&6ik=CzKsI+xw~P6H3C0mgN@LLQd$o8@)~K&;Eo zuio1wDlYT}(k*WuYb7`FTqE8-q3vd}J%?>5@1VPF*fr2>+cgbETa(hwyOH3YqkSXK zjhE2Fw-8ynaS0?)OkW>x;UButD`%6lrHv-ZhKqqI%7C^Gd_?E3%qhhLYo&F7Iq`VT z#HPs{2qI*lNgwMGX`Wx?r4p8$$|^C2lx!BQEZeKAkM?Mh13VdRYOqf9?mxX|b`eq@ zU_(B!EGL1*a4EminIMS%UVeTa{^N`P5`Oo$UxYvY2Y(cP_Gcf3(MQ^)Z6%;P6);qL zh9d#R<+E_}>Ph(gv#ao{|M+b<>^%D*96#T|gbB1J3A+q(-c=QDP&jc0XmD8^5M|iH-hf|9E_3X{nt~Yz3%=0u*x= z7At@JBRXlppO~aaTu_ou@=;orAy(E^P4_&jbv@(zNI`Y?p5HeJoH{O&n5IzMYld`v z*4fmmt7Lm#>hGA93RqwISj!8=CEQvy8G|_Xd@W(aJ(dJvk zAiW|ZnYCc_S-KdcwkAsF!LJc3IirWjtc*$nF(8D1$O@oNo+Yu~ufrk0)(T*(59XeK zb{1ZJ`(MMCzkd;a@sEBK9{+e4_J@xJYCjFjv!91=zuXVM`G0T1AO3C;F1~vb4t}Kc zt3$21tSyGe&$S}Oh3F65VyGSKy0+BUW$Vq~<(Eh%C&GC!-6@*#Ej4eV+F%W-x7!Fk zCq^^fIRgWAl4vDR=^7C;fwxL+#y_SM@x-y=Xr0`U4%e1EJ~+g6r9a?S&f2zmVA0yw zw(^Bg?&X;_=&%nIB_6=GuRf!_(v@(hg*iMF(?TKsr){Q}Y8UJH(mq>{8sA~X%}qc) zD`VF$ZZlg z5`MC_SYa<)5hZdhdHVyJm6S|(1qBs$@8St6cuzvxHB}o-x3(rVxpWioaZnayL?o&* z^|fR5YF>w7n|SSmxuG@@--*6~#T}9!viYgnHv)(g^WAZ$fp=X4=|12Slcp(6QZ{$K zj^F|tOnAPL1(riz`1)(y2qDCsn)5&IC4M&nCb%4)pJaR;k?vepm4Yw*4wv2KiuGKT z6G3vP!>e>!EMO%tBQJt*d36^ z_s%UZ3M9Q}tuN|Mypbuzi-K=4f_|`EcDn+NLbx5dI+z7l{UQ7-H;7RMJ^#H`1 z0)t0?j1L9Bp3sv$66)^mI1~Xg>qm|zjGuBZh*^{|U~i2fot*L*3y3D;OWB41FX4O( zCcp7)Abf*K$j}!2Uf7oVLr6zA)vNw4twy!kP_%pbUCQrhzdsG^0P**y`#aUV=>`^O zoICN`5laKvJ&)~o@tL%V&hBXU*N{y$;sF~beSekiGhpFE%!F!IDX975CMwV&PzdJa zbt>`iHJHUU0t6;{lLue`sN%KtOflfQW^q+)*)+s)hMfm#VMfmJjUxfXmiCiW|0>#>{S?d;~pPp8j@NPm! zszZ7yC!3gqDq~-w^Az7scg}Z>7Dh3Yv6PaI?Kz3!O&<}H&DR!kZ(s}$WGN=T(o%M_ z2|Cn=jThr^d3hQ3`UhGcS}rbH7ET`80>n4kVemR!&JMIBo-D`eyNMoXZnXXLL@RvE zl|NS>@`j)H0LenUO`Fh+4R0g1vm)l>NW&=-7}NxC>x~4y#PUwi80d#5kDr8(Klw;+ z6fVNc7h0QIyEeM7HlROSZ6XTfR(rKsT*NG2(jwwb-Z?;>&R+(*q1AW0NVN2 zFKxZy-tyycGI$i`Qvoe)1i|JVubzJ${?)(u>+tNOgK+Uee#u(Ecs8FJVD4+fi0N!B z-ulWeO5$<>CM?N*tjmxK(Ku1KZPHJ4))iKw{J~|Q41oQtvY27OsU)%(z56CYA#05} zP4!0-_Y+p2;xgmBjcX<(4^mza z51L$Cz6+m@qDo>~0i949cOiEV5)_87G4SJUjiEK8kvb*%xbd+G^VFw)pSmB<#CvyqI~v#l;7TaZ5O9&T z*gis(t=IsGR+kWIXQ?fIvA7G^uqs1GR^yKRLc3ZuVya!<@K4>_7Z0%<=(YUW^NVoy z;<>bQqL0E(R0k^|cs+B0JQ)lX(3zMc3TGx`JmFmbNYHLJvDiTXZf3jI{|1dY4smB{!C>1H;#J9V09r~s)IO*p21jj-+b~TO-^d5 zB#1taf1@O>KY!p;uClqtjnY{26U!;KIXF?BMT0!7Y`P#-#`7$2M zUwC9ZXIi?952{!tfmi2CfoH1mSNyI!&T}p5v=VO1uMlr}f)tw08mLQ|yd&4Tv!Tqx zCjs=Ztnv^A@Ez{$hx4;9!(adPzYfp8(HhBTy)f!Mm4$xhJI=`(%ZGGx`D)Xyi8ei% z>EB!^bE+lQZkA^{!|qDoT|waulTWcB)h(Lf8f(%Sg&8-nv_WKXegMTRQOEdN%x7Ub znS`-ipD(Zu-w3MfNelmB;|-X!f8rxHTx&T$;41kkKw}h)Ejx)LMTfi`XZEMD>;Z!E zzCdO0wO_L)s}3_UQ0dpG?4pZf#PQmV(lfP<-iblUVct^vN*s=Ezt4l;_KN>Vckf<-xcJPHVg+ zkr8nN)j+Y=sj}R4^p#UzE(I);<^5Dn(m5x1Q%4^wp{#>^Ts7BHV74onX&|s*#35zE zTPX_0Hb^SwM*kCE)xmO%11(p#n(CuMF_t#HfR&|f6jyq@hU_g5R2uj*U5~gD4SHJF zPWJ{}u2soblCMAW76*y17h2yD{527qnYPHZa|gB_-8rsGrxe z<0<3H#$$7Kd#bT-uH*KHg|Z=O98eMCu-=TES8=(tOVyHN6I5efDam^F-RdhjMJJ{i;x@ z`bfNU?Y9@ut+NpDZiB7ly{Ubh@=6YOkD1(U#ahQ26Gi)l*v8J%>sLia%mmgJ?yFNe z#kuCZ%edzXq=A4HSjMUoCtJT^F~P0ENZS)19BK(L+booq90;*)fdj^d#)hc(MS zpi$f}K+#*dhE_60rBgtNh?%*=o<4xGby?ZiL13pf0GPk7KL^i-*;6Mt#p#tZAi(@qc{EJzqI1M4%FUJRKzM!%4^yft) zjq>F>LDR`AxQmy^^;^-oPDM#xMWvQtKQM}Vw`lg`CG!a>%d{v!mm1s!Ll$vTVXMEg z@9A&;fX@IZ*9;}C9jb;RDWFdNWCRyA5Eu~1u@)$Vhzb_vrzP;fU`xO0K?q3}g0hQ+ zK2FrmCO`i4({OZn9KQU+*8<4$!xs3|W0S4az>>v)yt(M-@)NdlM`(2auM4N?22wQEUFq66QwMZ7lLqwgGX90)li z@uBAZefP30EIrw8n9U@28%WJOlv?R?#Bb5PXEnFaQrEJak!iF|R@&E=SC_5Ug0<>u z`NgRT;Aam9hzst!$W8Z zG!nQd2myzhKE27FT7b5a%0VoBozf^EPFcC0cAX=^++RdzzS7W(zuC0K*Xq_$_IRVE z^?*{2bGJ>oenLwW@nWTw?$Ze0~mn?%XtuGkcyiIsDYH0vSY1-Qg(-egE?D7(}v*`-zr znB2_6m4MGwO9XTP9R%7H`BJsl4;+iH=bdUzuDX$_iHx)+WnXmphWg0}o$%Lm*k_S}x7EyapJr!cTtskye@z zncNY2J?yTy18EZvJ=n2wM^7#iYK4tu?80nAZEh?8R9avFmbTI+`WXF6#wa8*GND%_ zR2t{CRD{q!e}pmj6O=~XtW=_Z*tYG8Qq%x4X9DJUs&G}=3ml!0x6m7!sa7c&kC934 zAZnX~c!6f*G{CPbNK3|*)?j#W-6rAvT&x>|iI?T1H%K&YCcxVk415cKTYt!Z9zc9^ z!$UPo$>}0SXAz*=z-eQ&OaCA>pzc@j>&9Z9P>#zFveoZTqjrG!{pqWnqz|qE-JST) zByN99(H__emrTHeS(yZ2SR1z^%T!-m$ zx-6b36RnPze_O&}uxHs*`8MLKh!q0ATfl(fb`?iNm&bXU%M0oJE96f!v7%TGDia$~Ukj5{&?>B2@)9c`op4kPn?YPtwqrQ#$x0OTf+Q1B^NiaCM0fxD{}S=p>QA z$<}@WUI1MS28;o(vK7y7%&lM!coUH1fro0G}p^S|VB9I*Z1s z+ghh1{j3!IrT(T!>a%3|;+#@|U+;)gel$toag09163hTKKXUmln@&K$`lAhS!pGkp zPL*bsVYM~6EnQ#D&1C`?1{)LdMWH=CxsUWT_NmGIpt11kUkWCXgy&yd0Dzkn*XWRw z_M6bstGA=EtbDE6br04zH}b{iV?bTIY(I_SkRpNdq&q|Xjm&OO^u8$DmY1;hwKp** z{odn+F1~k3-i`ovo!B+^cIw;JvYiHYfOtDS?WjIb4cJu4L}hpOPo>!qT*+PXQwN9-5yc#q?(^tUMMcfQhG+ zMg6?54|T3D$FdA-Z2*8XmOTIk0L#FbN+Ps@8{%-#50PxbtWHW10Hn$wPx0aijZO+4 zm6L)hMK2F*3zc9gE^0b4M~?_QV>yTisO#sPfLJlfl`x{3i)H~Smnhz zCQrtc(+FLy~V)Rtj3Cu%ZFKor3;q*OM#eO+awTCNHW_={YP@Ryhf=X z?;|_2@mDY4=jNjvsUtDWr6Fo%RJ;b=Vg_?dwo@BjdYwdR_n6BO>fj$sf(s;`s zhJ8viY!)SmBfSZ*~OWd0j;oNRkI>$oA2Xb>Q1$Cv5lX~(m z9%At^I6wh`j2P_JW{kuy9?44abW=?9zcl!wbq zS5`}a7h{xh)DyxqKUN73DC2La)g~Apyz;m4ZT!W+#+e@_jCCaHE&#gn4LqBCrHo~V zBM;h2z3aMgDM|NWyTe(#Vw$;2V5w&H3h5c$NP*OiuQC>S05Kx1Sh>WGxJ)?k$E^rg zromWNZ|%az!=!;=V=rPAOm9L=0(c^)S&!ifO>;W!u z!Lh2W0j?f3g;y)E(H>w4_ki%MjybHCat_1)-FZKhg= z+b?-hv=Fr!&$5hLqwOWrE&5g#TM=yF)-FOT=XGvkat}VO6{L)so>Q__ zmJfh3!(%PwZWpngqWwl&ue^GmCa7v^5_7%2RFCJR{B(?JW}=TtSXOzhY@k%N3v zstjOi;_7LlUfB|2`p`}vxXtAsI2btS0JvCRidQM8Qp)NiXgW#9^_KYL4L1U^5ffPA zmo>}8&{v_r(gY22dsiP!-Dqbx_xq$Cd&TTizZ2=#SQ$^9#Ii-^14c{_5pW<&(OSuF zXZNhT;A7(Z8Na4`@kZ}_(8LmT_>T5^`p8gefR1`_4aYt+WvtT-u?PzU@yfoZV{~Rv zJ;1QJ#)_&5FqwZ^Eg^zSv$%?nX-B}&#POdf zwiMZ#tWVWWc!~mY=h(o#Fn0-nfiKA$N}6G<^EwXW^sM zXW_G7e_`_3_RMD1?rLR;1GR}2F5u@MreQ5cF2E}NeIrmj(&`U`A=YH?DIOqsq^Mqm zq#>SuRUV6ZbD)n*8I~POS={s%i0Jk8g2 zj`eX=&);}n*J)0ibq4xRZh*cHQY0iXA9{Itiz{}2@lHaA(^#^ZB;yEJvCmN z`maT#Et1>&v{rqUmohizSE(y!Z=@Z!jZH~Vzo%I#rVE-1F*vV$9oq zvYS+e1fH?3Dv7t2zU1}!DlSjg!&0h_a8TrOf~ zc+7*05+#P&3#B|er#?|sRcwiwplZLyN>5v4YA#Eyr+s~<57PWu&;SlVl%?kEA8$g$o;A0*9tYat zj@4FJl|n-6p#|Ey&d4$i=c>(wR8);xZ+JntpQ{R>(9 z?^*mV%}=yDmx+5Sdspo3Y9I~RrdsoxNy=|ZOL>jQ{H`kk$^*CT>vc`SSa12mI>lr+ zz8Qz3<3pQtmsq2i6eT1#T3(yBh0Susat4Zs>#k)e^luO~z0c^2~@Ye;!+GN&7;@|;!Tl8RK%Nv*b(a>%z%SJt#C zCIu&T7#UNqPrq2t2L^<&3=4$nd^WiW2m6Q85dbg-t2!w|Cxwf`x=fGer}p|C|JIc@ zf>`M9M(g61Ar)lQ!vy((%bB&%!_ZpZ_v^Dq#Fy|I=^6OsixpOh$jm{OZeZ!vFcN|0aC<&3D$X=m1qP z{!M1ZyL#~8VTgyqksbyH!;yHfF1PZecXK~V0#uH)p-JwT+>%8pdjo@#)aU5Lx5ZY5 zGRI?gUE%?ihbafd>ecwbDk`H(>^Wv5)UwxpAfn>;D4-Vy|L>8|J5hh|8rT8iAN&yb z@D#8;=yHGty-iR|a&fvvD|W^VtckCyC6nx2zN7%piTri1ug2lz^dN#Ud>nbDi&d&! zr<-K6JQu{|K!O!WTQZrY%j6V+KI+2aH`A&T=Vw>4s6hi@r-TC$lQqqvq)X;JaAwWr zsDBoWzeoL?yc86(|A4A8mE5&9B{^f!O8z}3c6Neu_)G1?kimc9KT!X$H3jYx_?r%g z07L;J1C>dTcua>4vJ96abGMlH^h&-n*7g-s1H#ii`Ds5Mgu|a4g`*!Ihvj}JybR~s z26-9wJ~;}HA3X_Ie|HsLeDPhF2&Bz481}S=^*}(?R!mTxwq&2HlU@_#RzPR^vnQ%t zy#PecBRy>{1DfDtxkcFP3kWV*{X`E1(#24~5TM5J?eofBV|XSDGUJ2ww4G)8Rozr# zfAH5BvJVFt8PEXYeT-2zV%{FsiTYaIg4J2D-yR+wY7Y*9f4NM&y7)FsX4<1!4+L0v zb+HLFYiz%~3}1ist@#nN+aCI%Jt)Ka6`e^kuE*(A52@GMkc7=_^pPpVkg)-dKUJuCC4cozkfXR~kYtONz%V%l{C5XjcNldHh3Y z+pY~Cz6N%H_`~nxA506=B$FEK=bDUsK1#{0N1yDNVwrF?>ETDJiPd3O1e&}Cm8lOu z$FsAuFdX&5i7drSwq_L=s3IIf7lMb& z`L+61YupYO;c7V!mpyH4@w6Wv{p?v7eL4!0(IT7;#$m4Qxfkk^MJ23ERFO_dV~S=(<*Nxz@83cJmx|2 zXO?aJQ{l&QV;UY>H+!LvD`mkJ7-n;s1^({hfIErym9`H;%`Hd(*)~v#olDjFdDXpn2Y0Nb_eL43qkvTYU3$5scI<&@wK!$(e zel3g8zP5E{ZB!9k6#&=!1eYPsn8u19=gAv!e+8?L#RRIxatRoZ_2G`K(<)G;377J~ z24qdey>yg@lgfnb?A5|Z;0KcGOd3i2SBBd4WsJ0Q_rF;-o^B1Sf@N)4IyKo4PI^pO? z#{$Cpq5Gs4W`|nUL!QKAEwQ!|`04i5|FUdi-Ml;yA6cglWnn%&?uVn(!!ZB+JluSK z5oVWKb)YLy%qs+eM?E@uf6+8RlgZ$6xciG*EAU&a_JGi};2(i%RRcW`P(Ry!_vU(F z;-6dzIxKskT6(NPVGm8RSPGCX@N1S;xzsuJnjlgIM;}jEj%_pnb{S(ay(WfEDetnc z=Xr`PktUGa8OUv7NuTNSLR^`p%Y}B%!>_ojhXz{`PHFB6tUk1BE9ya=mH@GJt;RY4 zpS6>x^6ejMsWYt4ef0s?OQ)O0NlmKtF0T@lgQJ?RGXOfkomDgJnp5BLVc*5&g>vzc zmaaASuoy4!9acW~1RYF6CbuX}TdL;KZC~v~lwDaz44FNh*Qc5>g^viVoIq72-DuNK!8|3DM>POAXJFs7l&tnHpMAxz z+~cDoZCCtf;r!w{{LSC|d+?k20Xrl3qY3^z08pyITSwa2=#{L#`e^RaAneN(V((*Z zej*@z)zi*nd{!r3D?qToUB5dD`~Aake0&O$=3SM*~^Jlr90yl|lNeh?*;$@}lDzjB#miDHwigJfN2i zD$<(N@Wg5gz{YLD0Wf`I1$3^Uiehi+1f5A%1I3nWX^nkofVtFPqV{7o@WR8as<7ze`ohPnBGsiN{b=)$_>r8;!{I=n zSQhejAL6QnYiFpWHP|D9jb&Tuf{*&9dW&#%sd_(-V?181YAZ6>L#p*7@+&X(VWneR zD||0AXk)sMzLyQy3H^i9fZny1H?cAn^^oO}Qq8ZPZL2xE!ADNqAGZ(Gm)BXw%UA<9 z?y~m?lqx5NjKQ5`rd)IVWojAoTI%On-SucB&2`#BQct#X+Sk~|nA=!LiB#KOPI>p# z`Fee-DOZP0y455rL!T6MisXHSWwDZc#KM@dS(SI=% zxPVrGK;Xauj?S%!O6%zUi_qvqJS36v=Er~#w6TblOZlSpSGkD*z~K@(j~p>X8l$jk zJVvs|;!%24U4TI9vZZd+KUgmDSv8BB)gPrqXVgjCMcXy#~az#8%Lj**$%{H+b|Y44*y@A3xm-k5A>tI)4>bC+DH_{Hrk7699Z6 z3*uMW{q9CqS=H;F!>n~oyY*3?NriJm)Sq;Obev-?W*n7maJhY)8lH^fQ6m8qsnC&y zlI7y?Th<`3v0&-wS$gFmV!qS^mGp=axj!F;;ru8ZND`Kuo1t$V3%3c$gQ3=BU|>~( zNvV`1A(@IBS((Oa7<>py->p>Mj^$JTs?1y)}99m`I?R2f`D&>U7+ z57Iv$B63kEk0th&Gd(O`%)_Wvn`T%e@G~MXiSsyY@sv8@;c%W+bcWPc1Rto^R_=XZ3FzMIUS3 zkqWEsa@3?mKl(#X6hKLSWmjpM&csqo?RPCZam#a48p_Pm*9vb)$mbLIZO;dNw^84X z(+2S>S)2NzLb!I4$;rUTPS;5D%#C#GB~yyI@aUx4mA5l9t+kMm$4F`p6ly8hT)KYK#-vAdk>50O{uVF$yOL!+p-JRE0T^??u%V5}S;X7_#qwP} z9m$_{U+dEPZgy5!vuAp3&2?_5XO%jui zbZkDGds+jwuZ>eq1bjxg1<*Wwtkn|ekGAk+pbzHcxc&IxD4Y+^!dSk;0Abq%R-X?y+_+{J0zb+4WDKI|Tb(egM<^gy?-aW!R4WO=9Ca%})ufl{vxCh^)( zN*UtVi2lv~fJQDfk&eF7A3nD9Dc2^uK;rhpQgMAPo2H&qTd5bYOPf@>C*B$}0OY00 z(DCk4634djkye$(#w3$#LFkb_W*n)_^6kcwY!9T$r?`!61LR41*G=UfO2w9}mPFOL z3U$I*mpg_e4O1;|cX_6F2ICO+dPmkKJqfE#sw=64zM_n&LPrH>P5+=(9+W-XGVk!? zOQl8uYvs^Rao=3aYc5kGYdED5v0)9lBvm=5Gs>lSOTm`pdMUKFO`zWyLyf`uwxTZWmA~vhOEx>Qod({Z2GS^vRkYp99#Vn?uI(mk6*55)^qUFb7a_lzzL@C*miTx} z3v0>uacUpf?d|P_imlH~J7y%yGI3n{Mh3oUQVBUy3kR^t=im z$PaWpzOp5z;AyX6;kAw4G8(7P5@MCrLp`5!PcyJ$Q@E0>{xZ*b=)5MB=~>oP`5|bM z2Fwa%VcpX|ma;R5gb5PxqCfJe&aBAF<<&pCe)X`*c17R!rM0X-rs9IxPauwu%g{CY zW!>l?Fl)M2S?doIje2x$G7_g>^Pmy}h_it}xa!gOUtx)FE-OyVCxV{>o&u6s7`s{} zq1SsQ5IGG`_6Ond;UFB1gu`U<+}oEQ@4-wUPrIoJ@f?oi3UDYZqV#}FD*@4!%-XBP zv1qi-FWS)9VtCnB#R9cV>OKK7swAmS7W%WvRY#<^565C#09d9tei!f;XJ{*3HIhd8NT5d4f-Ngo=v?Jfka)1d zUQBicn2Lt=d?f@iwMRNLO4Ix!sxxL;kJh0BX8xySHO!&s)jkkwf58_QTy z@ssMyWhgQqGGg+!Fmy3nG9NxU-D{gTC!ICr?b3D{cv~9S0pho%>0Lc{(m;OmwD=pJ zIb1yIx~(em0bN+O?A5bYe889Z^5Q}(6X+HC2v8skoB%PPiOCboFz4L(siQvWQ@aCV zCS1jBV%B6F0SV`0*b) zR47WoJLqFCUfPcUVbb-8RVCE+mHdFA>9hKT%!9|W@_wQf5k5NVgr{1Ht>0DpVjM1H z2_C6DZV23LdCTq}V`&8lwa^s?umfEC?U?ZeP$syfel)PG%sD!=@rOPIy7e(y8B$xs zMD0bV1RHbKet;@%(O6`847I7N^^)1;k8pj-$7%G5&M~J-M(abBMQ5BlXjGKWiNPO| z(wUAR+_ivJ97-(y&c`457+J^^3+Tu>dVn5%nJo5Zg6oJe4>1HjGO()iFa) z1Kqt`hR0fMgl*QRTJDW&kq0Jx@VOSzVHp^2j6VYHW^5vdMj>O=dct*yT=W;Kd0bsw z>OP>APWHGS#am;-`wu?O&0^@!WaJFFE!E;lza=$!WhlpKnYN4VG_cda!)w6rzz@%N z$8o2DduhNXhpc4%Jj>cH8Yi{dZ0lv?41Cu*?De%~H@=EznJ1QqB7UKdx%%2Z_NjbI zJG{RYegS~sEXrwCmz`&S9 z8KCjN7yn&ZUBPG_^TidkRB@|KR?7K87E881)@yVC8y{VD?bWt`sQ~6c@saYiC7D2u zmgi#YY{@Z_>%vqn0DF26k%}#tl~urJd39>i04f|%O1cPys(>g8nGR-g#%EbTTmZW_ zk(-2kefLynPu9S$KBNP-cY9~r^x`@k>F=rBAx>m5KCyLhk)h1fMTnZ@S5+Iifg7=-cOQ+^Lts?*qoW|H2>BBR?xznjVQnx*5 zM4-&abY>}6I=d(W6!B*@V5KyAP^}QSe->D(Y;SO&rOx)kzx%&_7yh6B{I|9hc&7r$ z9XQ%(Hk25ZY|?J?!x#Yw^H8JFqrUY&A3NmvAHSZKQW!}+J$SOVi!Fnw{sycYpclCH z2P!GX+NJVw94?kKxfba`=DHsapBRXGc(wscOdNg^UKUv;B9UM&x` zLdSP!U)eVVdm~oO;30*+^s-i-VzE7bOnH_;rpZgckdksOWB0Suz)k~iT?0En{MJ>z ztFTQ2X);Us_4Abeut%SqsLNiV>(uIH5{#28^+hVh9H@W`X7X3;2yhGpiiL{pqn%4_ zm3uPV4Yfl!-j$Uc=1>LGI(4#eJS4taB1xU4|*bG30B@jR)d015dJK=J< zuZIOJh{_WfW?4Ecpw#K@EP)&{;LGam(@D`zkIyA6c*(#yw6dmlwOko~#C?4{wck6D z53ZJ5lf}3{{3h%zufp=h7vY=bS7CankK|;b9KXB>FTcD9%L{Fqaigs{@PU?(c?Zxh z8GAi#xvh3D`2Hu1QHr>o0vc9It3;uTdZk()`l$EcaflbVs?07}5 zICiY1w&=Y)yOh;Uuc2kGlpWKoQoNDoEB82C)|#Lx*O+?6?k+(+J^>4kg;%f6ZHwlC ztkIYau{5)E78WU!HF8{Uk8&e{9m*e-NDYhxg;b;i15$|jMoydvmdDhb#76+i{*jT= zhAX_f6nLLr z$wznMAE%)R04ghG5W3=PuuD74^nk_0b>tV*Or? zGyW-}KRlf2bLjvJcM;$NO$w1-ylJohc^+WWQh1=Tf$HT97+Vh1$OoO;s%$9G6Rtz} z68F0MW}U^ogomlQ+*0PNvGOzqkWixc6i@e$n=9Y(?&;G<_7K8Hgggjgkw*{S6(&{L z4$`wcq_JyKa-lOGww{0eT-P%XBm&w3-biY6)+E;&t#5Zy zkou&lfo~t6H%YNe`=M)~0TAmVwTt>SRO7R9GXuXvH0N!s7@=NQu%>Dsg6C~KTJl?x zt5UWQwIo-|*3RxzrpDq;oZ9qpn{4DWc5b6Xz?^2|c!OBv(C1?|kH;y^2;+}ido%&Y z3658>KozV{Sg82O=IHP!^bZB@G))3h`uZTwmc$ZBsmhmxEz{wH-y{~p(cUP0^R;$7 zJHNDIEHO5>UGen2m_F(Q7&A|o0YFint;d=(U9|uKdIOpS7Elec#dFgqU;>b&lS*Zr zdyVd%fR$A+K{TcVtrma_f_(sI;7oaXXi!UKSHl7;eQD{hojZcnerL-$CQL4mElme*n{X2~BvV7nviV%UlP#*{lI*=P!B^+c!xjGCD$K)>{u5Ik4;XrzFu&2Ya}^#v zIt@L!Gtmw%LF$FI1v^quf2xDu;+le?r1CbnyjX^>WO)W8bFH#M20q0E8J;~P-W0cx z64H5jq*`ldnIsvCf}UDC%}8G(+Qzh9R&sbydH;j8Wcy}3NSoW3wDm(0uUf7cx5~C5 zo5H%?y6=t>}Hf28et&5A3qq*(+XV$JQ@WJi5v zqZo^3*#r~_6yXaBK=|?xUkaq?tM=YrlnGt}4FWdE&1AhEs=V`h60LtZuc_C@xes>h z3-P3GEQ6docL4>}hq#966$dQwAmH~*_6%t^X=8)zvo0fLUHI zQ^P{KTCni~z;Gdus}&deoyWp69j)+i6aL;$Pr~oN{7k;CT32?Z52OSlhXSXIIhJx+ zkY`t_PN3N2lnVo|*98zIn$jgXmQK^Lx37$&Y-C4@GSCLft+b-VQtlD>f@77}>b$Z{ z_QF8B5{|H<%KCetb&5N(cz11AG__yVP1veek*WG?CJ@}63Ye)H2h*}RceV4Kt+c^| zfL{O0{bHpD0UH-;2SCWJquiPu2$Sa8lB|eq4aME{5Cm*Xe>j~Rd)9Ra^WHNd{GI~=UZX&10j|D_$;q3gC)Fj!= zHAebWpW1_k$x2=QFp&lQ;!L3U$+6Z7*SOL0b|yBBC&WYLM4e{iK$5epA0XYpuO2ea zUg0;crRk(hK)7vkB8K*MqQc9XW)&1w(VPNN(l69%KVH3^={!q9FI5=}()M)tIP;A7 z0iq^Kt&v-@&6>1ji8n{){n646>8&oS^}!b6td}ecr7=*Zq!`z5y~dQ6#!z`)?ez9? z8(6A+4doh!*(M&jMBBvM+JDzN@?L9b#Wtc;XmWn*fVe`sJK1UAhpK@HEH*YDOIAr> zU{Y5%lljefVq3|coF3^_^p*J>veCnIp%183B+ZFl9Dwluv-f6AvL(rVUsP_j-=+6u zFv9^TkQ{CxC>lkRQD)MUjGs`C`UEr?O)oM%kOq+i0U0G~FaR-_wP$*n?!No2HCO$9 z?k6%&W@T2@y?v*b>5Qt(b51NC9v*Qr{>P7php&3M9r@J|xHuWRV*aC#J|=f=K9L%* zalEDt(8q)<&=ga=T!2@~3L*^ogCz(P03=0!IA`&SJi&2h1PB9GLJLfUK%r_M1;7D< zc@PjpNM9}X9p;;KhTDEv92+-a(@(K$>Z{Dd0Q?MW`$=2tdSimO4d z9EK_G-9`+eaD$BNIw|Uw%rQvw7?(B=t#yD-*cJ3cEQ5^$!<|Ckh=pyCJG_*;ZUHu6(gKzngM#OuUqxpT)?>-}=hMjC?5 zvpuwwZ zz^mHKoiKZF$2Q<5fS`{Sw+|r3QWra77RI4DonQ)ausbc^d%sim*^>F~dk5u*|M9$h z{8Ma3so%wS1Iq94+F$MYgauXMo(u4j0ZwFT3Rv9`;sJ)Vs+|e$o6g*sJTEx1z)J-i zJh0>003g*LkfVPYfcWs31%7P)a0%ES9AekH{|37U0-WggF~E0k?_QaW@0GL1{qpS7 zZh8DM@n@QSuwV^S3cXCY``Qez6ar!c>_(7}?3*SgfTDzpEBRLt5sVg>veVXxcMZam6oM2cv`Org$Q4S3=R5So}nEcaWCt3FF<%^lzCgyl;3o)j%-`DcvHS_VZ5T*6mAmhrgGc(tpr|B0$V`* zf&%+CW%nokcRe_1=2XbHv!fH1jn31}%@0jaEfOV~yiALv30zN}J}am9j*431KH4~K zh}D_`A(~pq8#8ih278nnk*D9ouP%V{F>clb3scT7u#w%zyF{2^geDG9Dn_|twAf~O z!Pok}lt8v2!HDP1J^)Nqnh7uXCQ2n(EY^bmT|5&A#KR22_P~z|joke#fED=#@yfx! zp83)8=}lsWKUi&2eK7FVcxWFv_OT1@%>b6TF55)|EWp{ru9jVoX?H1azu75w4`$^F zD0PBcHcckf0FKZY_i(!gqz>NmRlf)e*+%mOee1(QCOPBIWdP4dEOfhgd|E#G(SuT+ zAC%wwgSX0S_t0J+qF3ynmwU&+_ane8w#2=?H$oPt{F?5gksgoXue?=0{&Z44{Lz#0 z)!@P=Rq85~Qs z%RP0BdhKx~1b9PBORw?~@S3MeM9?Np>xKg=SASi#uwyjg+UIJv;}}resx~+@!qla? zjc*0wp~#0R0J1jW=-2}UQ;MS+XUDxCza0z42gBBugO0=5@Ojz4)5lbWO>kHwHv>pt z^04T58)z4-YFkY^Tnb`=<X-ntv(%SW?ZJZEjZ&EPwv zZQB(Kz}$9fw?D&F;tW76sNV6dgbye1UqL%riI+yZr=YPm(Bfzy^;xc7tj)WP+)7|8 zfiFt}TR{9}N&1^uPxGUFg>`3+-_hXYblc|TJhl*0mi5{X8eU$nK79BPt(4kCceG>S zR{&xr!YG#c5@?`t1?ZtNWS;Hf3-%Px5>I0CJwCdNmj$+?9kSyY_N-xw0MO7hVGO`T z1@}_Dv6zTyan8(12e#Dg3XH8wFdOc?8e3o>5ZEIZ^j)wy!3^+Y_OuHXv||e_*r1VW z>ePDsin)tTZr{z~)vgIZY%3sQhr(`Kp=m-Nka56vyB)OZclLR;ZB_zJ#sBVv&v_Yv z(E4}@&|5>mIm)=v!;<#Bv7vPV*g8NMeWWRf^#}N)5?B+QDVP1x0p26nhWoQ=`Q+qr zxp?&R^8Futr~Ivd_*!}G^}FR5rxW{k0nYc|LQ3eTN4;{a`?Jy8<(EG@DIfghY5CcY zo|XrnAgdv^&(k~5)pLaaPEZGMgU8sU#0H9}5_U5F5(FD8{0#26nwj7{_j zz)&+gK+W39?^OoVvLDc+5@nCs_N+Tdz@P5Rlt<<P1FgQLK1MYKTUkHII5Of`1;SED@93SK1CHhZrJt=R#bFch&|MH)fC!e2` zKlyimhL#xr!n=oIzB0jVMw1@$EdCrr^Ps-i)cTAET-~ef4_MF__c3ydF`n&`*5aNbZf&6VqE9l> z{PhjR+U&oj$m{uB=JB`0!j|k-0$)1`Yyt7tu1mHIec2MYesWmhh6ym|Q76(xs5Bxr z^7yKECrlm=4)@C2@4b#T3$FvjCIQQuTkToWQrRou)&BVLqo=81!;RbD{fGacy!-as z@% zHQwsjtale1-o3puOc6jMyV&z$r+a+F<{JYvgO{3L;AI2Nkp60?tL`b z9d^u{9pUw$S57CNmyiDP{qnPa`?P%UlV|1R(Q!FKel85V9AoEc}MJ3LUC)?o)cUvBJR z;xl1!p<}~%c`yVpHcne>j=pUWaKH858|9Dx`TOP1|Krc_*72kaFjrv+gyz?_3xe?q z&PPu8=(ISRteZBpyDYwRth%G2W=xMBJuGj(`xn@SLy-}W_eWy0fJEE-lo3TUm+E9V7*%}5pw)=8zv2R6W{rz0m*w(GEE_oZbmB1^L z0Dw5|&N;#SA)Q~|?j-N<+vR=PTwmmOBWB&zmR3fV+h1e9*zZ(}X6tlLd&9|k+24Xd zOnA4*7xgak#aU~zTRju><>HC@bmz{0npggzPrR>CeeX$ zv9J7j;sP;W!>ehwtP2#g-PiOXJesNq4DPgb?{`SU&mTXJz#G zVL7_^JLT{GF$?DIunXPjxLkZXDu44|J}H0mZx70Eetrjy@OR4V?xx3rzu}nI>;Pa* zBc^~}0h~T`^?+cVxlJ>T;k<_A#jBLwg3}zy$+eFUTrY}|pNrb55pQE8(_~sY2zI3$ zI@`!bL5 z`KitAx}gn=6sMePPtEPZC+XS-TRH*oj(Cu`oL-{geH?z2)%Ko02LPL&dVN$=dVY+Q zQ-QIybupE25uF<9!C)UieVO+oZrJvZ9CM5-Xt>?8H3mJ#Pmi$_3;%dXI6FP(emn_q zzRR170H#1$zqHw>@;Y#$KIOa=5wY#B=qLK2-Df^f%5(H;^qVD&yJeV{xtc%v>S}Zo zvpQ}T86~+qrdnyi+x6{S z>y9m2*+y$kz!8%sKg7vB8I_wKfSb2MjuG;6viA2n3og}~KN1-g5@pepuajMw-o?K4 z`IAZc{8Qbq9Rm>Xp{phk5GMegvUp4Y*YDzEx6ed>Iw_AoAC}?y9+NpV`lsb&_*t1? zDlqOmrY!zy@l?Pz7PuUd8G*zs$;h z^46d3lsiA5zB3+Te%LMhzy2rX_~ISfaa7KEkU5{6puyI^FfGDHdW47K zv3!%mozk>LRep|5pcrjQ?-O#lV5N>#W0h@da2rKAdNI!CUo@z~!g-u0j)hjl{K~Ni z-MB}Z452M1HUl~1LCOa=A7qd(V_cJj)M%@Th6WEIGZr!S%YA$>zscvg>|sj6#@h5V z9y##gJOTiB`?P(R-rnWW0FCk`_k-iQgZ5k?%I)9?Adc|@N}larlo6VEcLmh-;N|J8 z?Dy{SW&qg=WX(gZzH!*$L8HgGaBL_1wr*O+n0C{gz1}_EFigv5zd0|z_r1Fm;Y|x* zeLQBTLGBApgBbYm?pq8tSvg^|mCt!w1Ihi}WAf2<_{w%yx!5O+G_}=j0k>p5m!l%} zo~m-@7;j;yYwhpN6&lMCXQt{2w4 zig=MQze*k);j3YFF29;%wZ!`C`F*iI`I=aN^YR{TKO05A!ptVRaffsN7+!P?)N!;Z zyAbkfKPK%=57{iTM%}miiVCyfi}JdWe#mA+Dr3aAft$nnUbaZvgl=>B{-_Fg1us!RkirDdMD4C@ z+JicAhpR9q^*(-*HEoE@B&i8@*DV)Ag|;@JXx3cYzVr~aB%tJNIk%Ok33+>K4n1u*a)x-GWgZ zvkJhqrXN`|Xub!GJ9POxP|zQwb+!5S;f8`P9PI*>I{?%j z9$rY~A2SOz$jnILmpbHrK|km-_?Y{E1&4fm%Mk3xLnUS`+K5Bnw+_B3sUW>0p^#2U zh;y!-!5uS*S8*kl<%{#nLtltHLVHUx|rsf(?*~Rbt6fkbRv?qQqtWIG@ z|J4g3f{(eBt#T>IcWQ(!V?gSZk*$<}#;$%ZkJHuE?e$gyTM4`>32XuJtCHkbNXMVx zIk5W*`S`|G9Y7!_vc;sed1AY+o!Jk9w9FN+fH^;7qmBok&&u&VG?)9@-vR^xYytrn zjKxAa7X7#qemFF47f>f4YY6#TI*~wgG#(zEP{V%GqZ%p!g5^wBUjd+q3&0dr`s#WB zcyvLYnl*vB3-%^%KTELyH`*q^UuqdE2uRQ*3yq|jW7cC_;DM;Ce#JDO7nX|;m4sHg z(WQi~s*saE_;w3sHNd7$P|IET;K1inK#Xnj1$>S`%VPl6g!tT9PaxdEKJ>i&{HGt3 z?@Zne{%7G#w(_#+*zlN*A`{oq7&q7u2=h<^Pt*u+*np$eMRAP=agQC+0Kt!3_=k3K zjCP;RIxeZxy!nY%*)k!ry%T>5I;xN*PXt!^Z_LIJnj|Q!hV}Qpglnk%ka0^XG&UAC znl%zu^H?^ZAnBx;Oc$nUgYk|NU?^!_G_)PwR_p?b_t179@QPy}^$z=%prtM405b}= z-R=TnC-`S}iQyqzWc!9GeZHGPeI}WwrkGCKIYQcK^kuPksN!L!_d+jb>V0K)@$%A2Nxhbg;kih!v z^n^URSp&?|rm%ko7)O6WeG#nCe(|G#HU-nN7q8E^sr)v6D}k*9ewz~50^;AMbiXv( z&In&xWxsP3t%|PB`%Z5!6Tq1&f`BPVeK)kF&4%JAWn^@c{F?I1326Q zTEHPvpJE$30$5(UGD(W^sCG>oP$>agtVskN^#RyylZhS-^=SL%=b1zzajm>wXBpd} zhPZh>jJ7T`r~*dX0Mso*^0LEow7DOE*mL~-@;O6Z=YZuQ=OaKWh7zzOZLE<2%+DS_ zW8ooy_``Q-n>L{7zOAygdQ7%#F+ra5EWR~A39yAGc+7`y>g9Djymc|#czQ8KdyNK^ zzExT?!0>pqo7*;%Z^&zVOC=aiK%5UOke^rC$Rz-qk^$`1#5phKp+G!gdG1H#m%+YU z=Dg-V{1Qn6}f5}7YiDkDVWM*j)aVp1|b(Pl8^HF0Hy7=mf+Y6cQ7x(bY&lp zA?!y;JZ1=(JM0cd<$m{o`vAB}TQHy~hnydv^$in*G3F5?AJDAdZOM@;W8a66KK`iP zHAM3w?XLy5;f;kF^hgm4>onnrbDN1IdC*CQUIB1| zd^O+kN7<~i)-h*=s+GE6Z2@*`s=4E#s3~TbZ3^ukX&MyrYZs|9iHDv_Xx^7iY!r~E zTGIZ_%n2*KmSY!4`pqBm2EK-dco%RwpwITP6TW|Mx4iy3UOOJ+L4^kil(v|^0Ep?o zUF4?6lHLScZ0x>=sNXed=;C6}ZLJmwFCD%CK}xEHV^N~oTs7_jRkhT} zj|Z6I7TE+J?v#C;hxAj3F8*}$k;f08z}r2{hwMkJz42Uf$;0B=^C6pXu+2EYSdgqK zPH3Th2!IY>{x`vkyjqrjP0wS>oe|LVqp`-NeYP}O39X`OX3>gRImsH;Z>xs9!t(I~qrU(H*6 zH9nUCh;Ye{xJd{+urektqN zxZO2xgumN(m2Ta5D%iywU>7-E0L%nkq1~hzr~NY)iB&*+X{&;Ut-Jzwe0YgJHRe@# z_OYQ2jOD4u2kgr%&9^jGLx8s$SrN40%eY#V=R{CoPX7atr4ycOxq zSmdUCJ+|UAwC_GBU4m^#*pg2H!yOjmxo!5Y9gdCfm~ksd$v-Dm#*7UH03YNU4}GR( zaK|~@({Zy6csPB2#sa$sjGMP{%Z6>Z_X2NgKL7M7W)*0J**P(Ip`ZI`hv_H?r5q~F z7d@spD~)z%yeOq@{;dSI5_pvo_=13#o4VfDFT~aQLhEB+i15|S66H7Nw?ezt_FE-h zE8)1I9-|kNLMuO~36+-Pz7D%9otX_bk000q4E zfB4`@dF}pd<=*}KXpPyC?jf&kAF{11{$1VFKw}BrnQ0#zpj4zQ4L=eCD`<7=f+LpB z69-5FJ;#O^f^Q~$7tUG#<43rvqM$y3cY7?NLyI?^V!{!x`1J*AUj4?REtWn&G;Nn{ z!a_f2S_MX)x5K9R$Rocsc)rphD*D2#0Il8*pY&dUIR`wC)dJJfOMv4@aD+j^MbV!x zWdJ>Di0vyqX9!-8j}OcL@Gt*SdGO?;@)v*d>oR$FaggrP|o=f6RFD zzPTI@%jcgx<($ehVTUK-fAHTKaDkIs9^BQqO zjbBVKuV(n%z~VO{IiyZPqMmS~$!N@k^DS8BdKVu_xedsDcE%{Hwo zWUeW9B`<8)wg>?TC^a~u1mw=ZOFy35L#l) zCe&KP1>hLXGj)ak-GF`cv39342!jdoD1V`_+I!Cdd;CdDEhaqxUzHmr!3Bj)HN;u# zx4`&$cGFjY*HSy89J$*CvIa>@$G2DvWaJsqv)dE+lQ)VIc<(wfyGOy#}+BPkdN!l+Xn}*I3Z!KPL z!$VZN(lNIwXVvS<)}|@L9HGk%Ytzng<2}8cr_xwGuI8)dSMz;U*Q?ENR-fM-8<=`& zK)iw8HhL?8Z%vgsw(#gfeeqn0SXoPdY zqvadza78O~8AKH=xsVV;a4`-QQG|I^jWE{QTi0fee66@BoYzZv+vS$2G*{$aBGb@l z+8{7jQ>$Er`93-*1A(R=-RDhtP`Sh-2kYg^6I`r4y#RoYF->5+KmE;V`Rw06EIB~y znfdDF3muuAV&>!MX_WO_z@YZbTW+VEqb2^$C!YgknY~%K6ASME%DGaW*Y%=eCUdn~ zXK2ObPy0u`P7JV(?e@my-S=KA|Kh*@#PtOSElQqpAsoF3;5HxwD%(SMvzuZC8LWiYimVO;itWQ3rH1J8_HamkB~J zVVP9J^%`~S>A*gms0CGqRtWI&5bnI<$d`e7{gYQ0ed+ou^8}ZXC1hY}+&79ZX72u!ldPJx?Zs@{sO3z1S%yjQJC4eU1ZXxE3cS`A7yI(U&U z`fi=HSIcg`j(bRy9)Z@;AN6W;scp(u0$T~JN?;3!S0%E&{?R3i$d)LdZ(arT_ap4YNFXui6z=%_J^}NeCyS z{yTLVCph)oV?^;~NB}WEwrAQU@FXz{nzQ*bFL4>0wM6Zqc{SdL1de7900KJO8b-Hq z77&LfQcx40<~6wSVKWLu1^`KCSC0gkP`LQXMN`jN{5HU)-Ae_T3jUKPPh*F!U^aJC zgE8XBHwPe%9HU%)*sxuE;9Xgp)DB(;362Y(8&U~CmrUff<+8ljHF&m4wWfvmxiC*7 z=T^yx%M?>vc}Qs^ZqukD^Jw~`=9M_wt-CzU5b{CB2bO$nv85wHF<|(V^Ha3GpP{{d zg!z!BC+FA?PuW&kGYVHlx|5@qQJ+kPjh}wQ_C9sviEhVr8#J3dG zHrwx>1Y%CFGa45;rz|F`O)ZY8XFd_G<|3hzAgMjqi7{fnWY%?okPGU%xOsA-)p6B~ z*SCU;AfPhWNtOxj!GlNT&;I<+F|}Z2{@xziWn56CLuHou`E_lBX`K7t03@>n98+Gh*aXYA+Zxd; zd7=RAvt*iwRG!Ek8d2?R^JDqY#063L^+AD3f~5qO)^GBF1#Lqf7JLBs_~XyZ;XO6! z*kQ+8@3bM6-iri*O)h4$y!FXzYWbC&*3JQWWo?lIdsj^(B7!f$k>!+_ayMZSRxTqU zWlC;~#f7;tR$88g{cZrCnpD$+M1CQw=o3psa?@X}tQixJLQTOe<3*FZMQc&ev;bf< zNRtG$`}Ug7(L7VvADgBCh6S2u$m^WqkEZA4F?@G;gK;v%9+$0{b-U-ooz@b%3=BEL zdp6Ipg2_jF_shbZTE*cU90o3Z_K-^F^E630VPj9I}h z^@M!{)TwWZy|?_UC1%(Ff)gBT_hKu-$056>@p<4oB_NF&l`-||Hpl2w`$lT~2dO!| z+fZ1}lXMn5*{%e|%lWofTM29>aI*xqfcRz^ZFBrCOJL2M7L&}4ZC+E5G68(hhA}(G zl`;M~=f!JUfE2TH#Pu|v06+C{`tLF9RsphJXmjeS_8zgaAkNqQ=cjWyB(nrR7NQkpM3$2Mnp9?$Ex}RMURr05F;y zXuF%x&2o)qIRFgj+GXplgzZPVAJXp>y9o8Fa>aoK8qOHS%esO zTsDNSeXx1{sl!4`Q(I?vL zjsV4HwEqkmx}ASEApoAb%i$LjmdP%LOK?gcG;2c^hvM$ z-+%hc^3(r{J3V|A@3Dw1 zhzBMgLi1tFZgK1jhG`GllfxlvuYI`%#Zsade>$d2!1h&1fERW4s zN~%s`b?Ra|Y9Wf8v^G2o&sI4O7b_RGQkez>XUCuC}=wIFi5HA4VEx2CFl3+qG?7zHZ zV+4V+k&RO`2Y|~6;>h2mEY6#90fgomBj1%TlN*_3+R_E}Di`AT4@w9}SXu$LNfBTu z_^s8=6D;54;SgHz=;KCCmIIJVKL(&_9 zFqZ%+`=F|;K}&fqG+u-%)J04Re{mhMZX)YgU!*S18p#o{hUPrmqJz9o+p?c$*>Gar zuL1<;=(&!^4Geq>qOZuNC8oGnUP1;rE?%PGS5d_FE{5n=!NG5}&f5WEe;8NTn7ij$ ziNEPGery~$FB~khsT}-mO1fn|(ZcI!tdYSc(kcj-Lc8p0T05h^e*7=}sKV3Ibvyqp z&#y0kU8AjFS+;XpqP7UFu~DO%ly!%+qis@bTf0xMDc{y+Rl0LtYQ&mjnjqN5)Ra~2 zRtQ?{UuYTC3Z+J{zOv87Z|#Y34RNAYd9ZyQ-v0_bqB%^~?ZJsjBa=4^eJHENB}y<> z;Im!x07;BlV8q~Y*Q`DpbnKMlGGy`H<=J^?k?$6O#FULYc>jFK_PD$KBX<14Mt1K4 zN^YF{X1TWq5X3GS9VtBUx*q_YE~wOOx{-nYegP0L(AmM*Quzm?P5WxUw#TeVim)9_ zw=Kp6kn${aY!RO+-tEY}p|3eX2aVqppgN8B(gH|84Mn?}L`)9J%Vrl8xagq~A7K80 zlsYapWK)iDhs_dt&)MDV4gA+0@_HP(bpc%i7FV%x!_=*SXKahjqNp9@&QdPu-`<9} z4JOo1*EYD%wt#OTCey4Wv&=^c8N#NR=uGsGh*damT|Ey&uIp-OrE?4_s3oCdLPt zdw8y|Y~CekelWn0hsMqDc%th-`$$)0r-Mfj!)G>q&~=RQhC&Ug8s42gz<2f;TX3`J z5u09m5)mB3PX`U{DQ_}ZP}pyX&IIiDkR=cL%@21(K-s-R%rExQET5m^ zfA|UZ#*RZhnjjYdvz|?66Yd@E&k1oflE(Wsg=2uzG@r>g7(^37lJK8{UO<}h>%QeV zB~PklJdhvi+>3eM{;de=5iim8gf$~-r?hI2{8CsR-}wl$l%cs;JYrom9{qbO!iM(Vpj=UzH0ohrY^@Qj~YV0{wS^sMsFG% zRCon5tfR3e&R@CPgpf(=cca?6SGha5HvS>y0{^$g4C$0INlAcOAdQ zYqCPJE&nw+mZI7+xXNE?bW`mU#y*$j31STL>?+V?%qhG zj(80ZG-q+0fPI2{D(znnF@EgvD%dS+)f``5p76umMN7=qv8<>UUgl3#)a0)d!5@2p zD1QJ4gNud2JOeNm8Z&=3i1SC@c7_&ApjN?elv@3=rpo(D8qtyBS36rw22C03id?M> z(f&njq}1$Dg)xMn#Bd8lRwD_Qp{ZA%tP*wg-L31%?urXz~>WHQ%4q!Icol+ zO1An#*^q8>y}$wHUb^fHR8zBBSC{b=a-wcc4g$i3kc7oPkO)V8H1X$NpvMuJba&nd zP;yBDw+!#2wVk5TRm-cH!4QAImyE;MxP@`)aJd9v&gJZQvrb6VaDKtV8(Q5iV0wp5 zVlL3UUa+`v0-&A(E~jdaT@cCjm@TeHye;Wq)?nHWs368CNLaR9pqHJ^n6`yc-9ozR zbZgxtpSGc8lj~(fHc&jk4{)RJ1i_g4D zr7UMgcw7zgzZ$oEUHPo}tZ!L@%lW^^Ro3-IC~V7aCGhQ%z!ngHyZU?E+Uq3143ZPo zEkR8)tIg*?uICTe^T;}<*NGaC$As@xJ^^qzx%N=Q?H-`$D zt6+hip+3=Db^Qo%6Ub9XRAW0ROOdXxiUHNulZhP7Bcn!Tfjos^MAg`NW6ZY*nI|fO zhCF6iz;%&J&7Yi!X5?MIBBt_5#`&T@c>%b(!e2LKRhV)mAoA#6f{_NvsXZWX!wqSq zd(qS%no=$@hmnh%aUS`MGshwgU1KFd+SfNIO*QYDkO+BafXXo-%P|p#9)MPD(FMc$ z@$Itc%mth~`n?_=p(Rxddx5_DIo7<-LWA4c>zDJQ(dWcIEcRgnNPJaL?rUiF)X!a7);9m*c8F$jmz)4!xTkN4DQbE4L>f0?^fbyXdY@ z9Xo*F31dhTCV_EmO5=SPc(+!N{U7&YHZ-VbEP3W*h*2c78-n6ky`|R0Z7BXy(QW$6 za(XFp+d^MN0-J}%7ooZ>`wAtn1;npVm|IfcYza7d#O%gNOdj*eZ*#k{$jUEW>vH~j z)i`ED|9m|*I>JVkJ+WKci9n5EnMtYM;v9w*bnDc%mNXhA>{)a%5c!iiPpSM zxPBsm$ysgK6f=aZl36~$vGpftHGUohp-l+jxs+H_I8s3*1sKjUk)P9)VX|e31QB^1 zWg-t}WK^3exHVqLn!q;nn{l1-P!QD7mYPqX6ASJF42xXkAyf0{wSi-lYh4rPU!+=| zkQheUkRlm^fbp9Ul-s~|*d%z5#de&i$DKBP&2|kIUc=HtOWKz;2h_FG}|uttvpe&p2Wf0%ir7f>%JbZy7L|a5RMU;lqB>N3h@* zL`}S=2s?~xUzwwgoniW7p8$FV!l+{d81JB+?l7>2m-rRt0dWUSHD9cgQ)_Fxvfs21 zwyid|8HL$R(Qg@>5vHp{mS4fN{V5N?aLg;F0gT=B29qMhqo#N?!o&oAwC(3MYl1?h ze72CdsEDH=yVi&N0I` z$YoAWox;^bq0vJ-b#Sm3yU?lay12kw>$zGpem1ewbe7K7seqTBG%d?9{?z~`UE|^h z)v99RHVbLa(M;+AAOcKq4JdHq15GP@ovll(3J!z6R(pedRuM_n?k?SKyfZhC3Zwps8ZNH!OLfV$KU=eIIVxh7z8VO2`Gp)1(3e*T*O zHtTs#QnWjmYtE$-*Q9uA1Fi)(SIHWaBpbr(v%Jz+C8^!4@(U9JWT)gp#)3fPiD55C z7a=BpE?hJJS$R-KN67OX0Ppv(BmNE>TO6P&PR$Cx^NcwWX&hwKEmTl)CQ z_OpZ66Sub3oJUP@2t0(w!{83o93(ZuiJPpZsTLb*r~aHB?z9D*PxaK2(PR&&?GT~&Zm zCw4(q{EyP))gz?%|t(96mkb*!u+(W7DORnPBu2Wyb%;NE?S5Pb*_AfQe z9)XiixM-s5_Q86Z=wm-AzX68fFOF3M2Tua4^+D=_f(1*lD!a2+W7cIEgE5AN zf6g)nNe3O`YnI68m%LqxoTq-3CrT5efOz%wujyzwvYmkx`|F7Q%v4v?L)aIVkT)LiGu$@4OOZBXtV9s-WSZv_=bTCI4F`j<$ix0~G^H;wtb)H)Wcr!Q|sq+-c>cO*G~<3 zxK@K_?ep{Y^r6uH#vAhG1;AZj-UGHj-z(2?YnbDTapS$`m=4m}-g&J-h_oaKZ<#ML zU-??)bme+uS^IJW-THfTrLN@AZ&_1jHRN@@+8)0w3ABH+SLFnGB;JzR_1TvBzCLd|^^24XaMLFC66Y^lzg4X( zd)v6m>bm^%w07s!$W6!S>Krfa@73(r)py;naAJtEF+|y<1#vT;hX>g&`ACIVC}LxUvCqJ*ieIsSXWlsS(43I z{*qobnTT{(@BR;Rv3AKtlBm;qeH^e&2^9;k_~X?(o;scb@^*Ixd}`l$ z@Th}>MwwgS6Z~==Vo2tr1xsJba6^c8FT}N+i*lPwE@!><744n`V6(p(zG)c}MTSN| z32pHC6ps#vEWmqfuiXEGH_G1YfZn%1#5EdPSl&uZaD#@5lm{_;X2Lk-ZN)hnV{Lxl zV0<01(C(exa>kBv-~H1`dGh#@2fO>RL!2)m+<9>pz?E^OMi*_gZ#G6ec>Vg?+M~TIl$N?2?tS+N`dlKLn zDk0bCmxb_cx?@Z~)i6ZcsMycE7rbW}P!!I45YweoHo3t>>F(XT<@A)rhS>G{@YiMa zp{7pWvpwXL4+4-v&hj95mj)gJZwThIzRVW47#DF5)iz4CMMoLTSze)j-Mp6|xJ9T{ zHtVZAEa!=|R^F?p4f9tMLKbVHiOP1bPqV$tJZ#Fh8h;(Hm0~+j%6-BAQoR(oPM)iE zd)=n(^>yW!>N0Z^HpSQU>yEYlXzL5eu$HT|+dQnMnWvpz%UpL{O9SbAUBW7VResk3 z;;#$yTRyfD`0Yp_rka>*@&;Mlu>NEQNNOrx{DfZ5iIcTIih6hO{ulgC`4!#+h^Yl0 zy2UIL^i-d%PP78K_?6yaOJ;n(Vn^xZI$~x8hA{bRGQh$(z=pxBa$Wx-4mtu;(NED?j{ye_YPa&&zk-eY@;-kMPHg)^UjKGx^l~ zg%=I!@M#(lbY;!Z!!~rnu4TFd+eQ1?r_A${6M!)epaUqG3r;AW0KM|6ef9bfeo<4v zS28$eq_sXkadTqGeSNvjvDOzJF{-AVeo~hDB$f^B(_P@E7Iydh$7Sa|#uqlj7su1` z9w3H|&kSo=>f%ra8CLAzr91cbJO3?WBpHe<;? zih_JwXZB5gn=M28x_!Q`Lu$HddGDmXFnnaf7(q-$#4wVprWB_ONfp_p#%q zg4eirkQakEnjEw$Kg-A3^-YmT^xj$c*=_S#`CF#3z1m7(D}mph1h#mrp6nM`vrh_@9?0!_THQ264QrgM>}{KPk_#3wHC80P6IIW)GH0ku3yQdqTNHPnwj2-IGQ*i~P#fzYHGp zJnwDux69Og#71--JCyQ)md|L29H~$HV(tCC4`WduKAIDw%N1iicw(d>bJOR~1`Q|6 z2P>oQ%1WG6UgV*kFPFOhs-by(rfuq00$T~(LIPVrdHrz)ed%(sUXgYU$Z}8xu+dpjBcaasl%xR?UE`NP&+jrcs0@)4N1#qwPr0h+e zusfh81)41JY6e29Vexbot*x&1a$q3W93bSqu6NpLN~b;~Q7mNBPOs-m&V2H29CpiizPP}w zsfRnb{(yb*uq_^raEXXpMDGCsuzeDqTjGJhdq&v{C1;k&nz_-x) zPznTud%CQ0@(op-wK0cEd8S`hwU>Z}&{BI&LT-)kScae%!U;IuXpbeB^~`ApA?T4; z4WYRcd`c&43C%d^+EWXV-JoH&cSJJUU2QjU$%W32^JFGu0vHjH@nislNvp**`pk=R zgnPR``p17%9zT3ke)7|wl_4(fdVrj%3%}g_M4&|DgD~CsHwt)wS8G@a%kF*bR+P5q3(5lZHI6ncuO36R6MZ-DElek*{wa8%k z}2kE9CC1xmnMYmFvE@qTL3?87~+9K8+$>qqs6wk<#pI6$AiLht@SEy z6E4y;XC52(M7`P`w-VS&;H#6s77%}RvftMH6-%H_P|Qd%`8r9)g2kf zdGmJBx~Wxiq0#VSgeM97xT1k_a|1PLF^R@xDow5%(lfMDJy3&QGyr z%uE4@$#u-|ITzQGCQAW)#@S73zYUAfMB*tzog!ymkhP3U*ZoA2B&fBv8Tb9wdzJ7P9N z(L6&IY^oon_ppj-lmFEOLo>5s}@_p;o-GcNl)koB?O9zH)Ur?ZoCid}JG zo8>Mb8u!3FzeZ(Ta3=^28${@>>>-dARFvA)OnFct5gv$u19M z44u<75e?g2`7ox&wTh?hY?vEE~);hA=xUB@X5_lyN*aG5LBE~I|Z?XiO zrsg5$EWQFw)!3yRBrRXpTpl}U;Ci^P5(r)n@v{m@aM4i$OyL$GCP2@aM4=^Ejv$Z_ z*r2&Qz~=M4cfJ?esSkefGYAWMC@PqB@{LaxlTz5Va#C6IXuOj*MbTQDH@>2*R~0NQ zwMV)pnU1!*)+OO2!8eI&!m}I`x@iIlj@tKzpI5Mg6MbrXo%H?Wu4cISY&v+m^!E{3ZzylVUO+!=4#um_s^<~hve@B?NXd*EKh`U%W1`6NWzteGX4+wUA+=5&#nmFs6-;=fJ}`wjHFLNqif&64*-M8!CYXAg&YMD`-gkki`$rRbnqYVMsIR zO0xaC!w-{TLv@i>ziyi3uBP8iwOwX@u6FCCLQclFDLSu<%=Ojcyj)v8^T_olG4as% z^FY})?TBAPZw}(-1zRU;^4HYRBG!xMc%{CT^tcSg4kMt=D+zIZZGDc7Mf+;kh?#O> zmap)m(a>^oBflONcrD*`f^6!D74V#g-&>AqznmO%pRs7vE+Hq0_Xa;-Z zJMi{8anEqI^$41;k=LEaYzv@DK9E5I!)Q2{UaM&oDERHBB6z{Tgkd=3Reb;TvOBv& z{b;}i#Jn=@bOBQRlXAK9yo`3S+db%%Gl1!sEpvBm2U^`dt~;#WpI*?hxU>SX0ll7< z3w-5j<{&!K^_u1YJ1(??M{t@-VnvD6Y5+ppX}rcS*t)`G6V#jb%F*Tb0gSj0+xxVf z-ND~3pl#BzUc4)?L`>Nak<-K*0c|CiFFuo|?<=*ewYiEqtaDwE*VTCYKLJy)+Az5~ zmNT06Hl!En!t_ei^Bk8^3z;vv+O+~!0md|qRtq_mA0uCFIx;&)OMV8Z)0Vo!?sT04 z&JTv==x~f_4=(QL4|=t9(F&Ifw83Y%s2f0gybqzH(mg^OkM{W4IXlXozgfoTpVHrK z3eEVwU|VJ%1l|2`v~x-wxM*YSJ63rjEJv8W9A4hXiwX}OYV$j1^za!n$Ja4nv(vqg z1{RMN0O<++JOO;WlU@(+D&3s}n!g`i?1}fd2QFE3czNlV1N`$lFv65#uyYr} z36is~v!3irEAy=hMWWt$43ymx}ZE2Mr(2v{Yb`0WBs+~Dq= z*8oOL#w-@x9iaQu+`~&WvL_vY6|ds&fB)Tb@Fv^#;$QahN1vCAPXOqEXEnzIG{OEz zhKYomG|)!dts#i@`=AHL3ScMSFB9yir~`W0BEE@?MS0NF?VIU})jA^o4He}NdLy58 zd9NfHA^*~^?P#AL4LG=eCcIE##~m3u9mS_ zSdfS*xvY3HU6uGt;m1e3q|;8}V2=x@sD$$7L>ZiCNOn|wxefu3eY zE=KjCPMdq_r8_p;lTw_Emoko#DyPg*zm-+yuQ4xnoY&G6OM1o%d}Dj+*i~{rHrVlZ zpvN`MRqO=Yn4D!v<1%FTuEy5X?QtuCtpvU*32XuJS0(vvyB`4|jH0Z>o zw!k?qviTLWS!$*7T;))=9PAy@tX`iu!vZE%}dfGCLvp*$?WQHr9wgVvN&UOscQ z4huA3cQHNO8|*?E7z zoU+^5V-{^aSBdsE4-OwZDNjCpR-QdLE&G#$a=4535#O|$QN)76W(#1Y-UZ+#mu~jZ z2(HER;&gLb2fm`qBC~A+#Q88mWCAm!LGds-G2TBqd{L&>LM8xCbn0A%bXDUeC(GQl zV;b4cb0iw8%n<1hmu-h8$z7KU18 z9_(-9ZFh#CZ-v{imB3a4U%3RffcPu#f-M7I2MN?E$;r?8$yc^DL|V<@6zQbq9}|8b ztx#S3rdC)#x{<|+J7(5Or9HVW;&QU(8v1=~h$EmW4qdLm3`p4P^4fZ~&xDKHF|~mJ zm?@?OJ+GO4^&-H#=+F-Ls@7NgPA2Io@q%Ih+~A{Xhr`o_JlYP9^NCrW38^!6;4=xyb`*gE_Bt=lW8@9FxS$ko}lMiEK%nbT$ zZgF|eVms;`L?<-71(!;kKgTeiXc~Rw?u$x^R?%1aW+Jr2)JH0jmNGbYximf^q!S65 zFHc|YJLD+ssIjTjwjnk}O5?rXmBfH8s(C7iaphfw>oYhEUx_DtojbGY;3J$}57XaG9Wmc~P&}70D z5cdGYd&KvlKMFuhP8OK;N!!5|Q@chii2){P0yz^5ON+vMtSzT$0hBD}>)kqadrUCg znFC?%IRQ?n75n4e0LU|Z{GRNM%h@6AMjL$igV)R8_wSU8do0D`Rr^`tJv+9|`y(pw z1GqT3kC%g^yXD^fJ7s+Dv|OMq9)FIl@d*?A1RH69M%XQDJM14|ULMu-t_>86=B!O5 zl0|v(T0i<$B*T9^R7krykD8h^h)J)$?p{YWAX4Cv_*%+h48_-kMM{F!R$1!|{yZA$ zSdkGSm+dztQxi((d2GuK3X488N6?Z^($da-ik5eR?X>pA2Wod+9Cn7d+0J@|y{``` z$|Pav8Rt&`eIFORrIi^r-c!tCy62cmTwpKE_SYxR&@#t(f)41=w-IUXLtf=Al7%71 zj&|Ah;egF8cE}rBjoWn6bBy_jrl}9(pMpMc8J~AwZEf_^yW<2LI)jD@Bcmw@N`Zf>}fN5{}S%x%(Z-+JBN4)z)o2+0zmBy z@Nf)3J>nJZ2}d!+fVeTQs;fIQ?PFn$8iiB>4W{d`2aq_ZMUWLtXWJI9?agU^!2SFK zAZ|3odzfx$t9;6~zSDQv=;H6)E4x29Dzo1kl#?TDR@u10*XQ@ffN<6vjZXnWy5wRr zh;tquPM>0%x_?pb{@$#-cDGl~KN^+OU!iAyfTnE_B5oza}6S?1H$pMb>Ty(CvmaW3E%+*bRGU#-ipI7Pr4+>{h@*x=2 zLxv_4T>$?s+p~{(fJh!2UIhLYhG4i7Y>3VKFw(vAw_z)RtpvVm32XuJSKR~K`oA&> zte-5Mh7(xiG{{8nR2Y+5PR{N^*8_y@q8--#*d@M(&D)EGZ+3ek%?3Q zDv32ACx358&jbU4LcYo0XB*vJR`a{D0v3w_OPxYrNBesp(*^tlP>dbN;As-zP}2w& z_K~UM$|Ps*IsXupH?D0{M9=xDSMrM%Oi&sCr45V|GRourz1{NmA7K~qkNf4xThnrK z7Y_qRopJ|2)Cb@UurbzS#}Ii9X~+p+0eDsVhuB-r*yN$G74z|VdF}mk?1}s3lSh|; zF>IxyUNzd-v;rgoD5}}s5W-AlLznq^l^RI_%Sh&0krjb$>0$(CK=)m&2HsixxA_LJl%33LcZd+KxT~! zc5Js-JeYE?X&U10bv?A<$H&LzW$NGeI9*r;N9E;mT^(+>Ygg6*CoTZMYOP%$^$-5(4{(>YQ~u;n z{;x88dQlEoD5a)VOe7MS;U7ntO8>+V`_#4lG-pI-`^{L_=mgY;W1v* z4ll}`U0%!MX+odDJ$TuH*YRGzoZxot{Dk)4s^AzGXyXxD$iX9AuRUb(7QS$C8#eju z5Oa_FwAGC`crXAoOm&auB0Jiu`?XDBKBj0yyB+3_jXN~M;5;?&m=JKSPg#8+=yuHo zVN;0+ROoKKh0WZQfzAwib)dvm6Bo=lHV8Ezr}3KH%GQMdLYWxf?R=HSyaDr*pkz%; zW#oxLsd)+S1%flRt!6SGLxj;N)Z}c)!y2#jF$*pa&Q8k7gI|<;ZyuC?@;|b5Gj_fh zz2Gh6AsXTY#KX?83|R0ud~&D!>i?UTAOHD9dGsN69Oi8AskD6Wk8Y z*rr+G)eShbs|`0~JT&TgWi)1!58iGlEBm3#YCrEb7lJ;0{Ji{c|N8$#n|x6Y_wh)= zcnhx;v??D*$w;Wbuk#)VnJvpHbH2jwkY`99!TM|Iy^?i?yGCT*#s1ozD4#xgR*vx> z?iTH~-Eo$^L5b*le`n}3=-8I}JC)vOsH160rGJ}NB#ng(AbTamZ+v~yvIW$ia$S>o zrL?z2J9sAry(Y5uNjuM`^V;5;e%rE(a>}umzWKN)mw9ihZ=SztbGwhEa8vnfIf7=` z$v2r^ypOM?zBX~*9#UU~_jWl6ugYPyvKOIk*{k%8eSwZOyMZDvBF8Lr{r9WW5U<=g z*H^nuT`z$fgm^>pdYWIi*q3WWmU_9eH%b7qK{+O>CU-_ed^Zetne5eiao%V10biBI z_N^e5yukq;PI@nKUq_9Uo1zIeIClwC(Axz)am0X$bRQsbK>8gtQ^(kTzQ-%tE}Gak z0OuWm`!Sm)Je#q_F1)!hLLcP(xBvce`L92Dt$cd%44N{_dua#;c*<5so%Ji4&4qwD z7PL9xdk(PDl)Sc0lW3O0hdS zFT2m5(wOJSk;Qa_^Kx)`R%R!sxT=Hp^c_Ad6ok!*_SAsxX@}SIM0BU1?oFX3^U8nC z8q+s&%Z)z-BUS4ym{hzT8?a6PSw3mBi4qetaeZ)T(#T)U0IY8@F|^87Mx+|iO&WY8 zP3t2~&jAm$7a?6P^IjL7D38B%Lvc(>8mFXFlQNZe1^9E-~XGR zlt1|UZ>tu zA25`F?K0Hk>Ukty`9iYFb!|vq)+Q}Qw#&CUA=8{0JBPW1z|M$_^`a1>JcAZe7cY1a4jK)nxo?YyZM+Xm`Pji~b8&za?<9 z1YR8=zPYY#j;#c~5fVsST7I$k0Sj1>xHPoHecV;~s`S#G&a|J590?Yk?DBW6rD?u0 zAv;Mc21_n}A#>u^^nrO65E0rx>yIn}jWLM;6ayj-Fq}B{o&E`%O#Bqkc!U`P4l0hj z2e|%48_NQ=3(-)2T6T}AWq`>7+Z;Pd>#Np17l;b$xAg@BS#NTP1jr}Iav_nx za)b(X*i{=mE_c|V;LSbEBlgh_pwskSqy_j@>t{L3WO@0}j3KrB^dEM=v2!4Rc!bc;pTAjtaXu}ln5w+}2mNyA zjn~T4^NaH2(`oqtApHC{L2r*Y3ukOTgE<45W;cJCs#T}2dHg}U?y-T$m9waNHxbY2Fu2JB_Fd z>x^15NeF5$@kKwZ4_3g(eu{^I%w=l~!{G&OJq$CEG22|bnMaN>(=$(GZ2Bqze>*-? zx8GX{Y$fpJOJEC#zkHc*Yy7%MfO%*RHke7tUM-jlMPO`W`pz$l;(EM5lvaQD~<(p_=$b` ztB2*+XjKmY!&5vp?C$Kx<`|losAatXpj@Ceo~RY34eEGpr3FgYTM29>@DdW(bn`U#;|o&U0^%1G*f#s`js)UX@~gS0oK$1NVsh91 zPY@`8(o~{@&7-!%I^n+@qV*(R_>vQJ(wN(bRl?+HFv6merZUSl)y@T40hMMAGqrAj zOkBo>Ev_bPw9khAELSJ?Zjs)ETqb^$1Lu$u* z6i*nMJ+N2`KVQSnQ%oqH@JgL+dy(mM3>|39FtLConn)d(NwC))vRCJby)xTVTWf0V zNm?7^FGB?!^%x7#8qhTd#Q?|M3sXzkB?{a>iQ?Whmd?Ki)?IMFpRI zjN}`+5*6H&YR<@8g;M3>wZ$V4={*=R(latMX8O{1Q}0q-UaXdromz)!!ESKGRZZYT zel@A`?U5B99AN&z?})qDnHKG=^%=qcqI^x*9=8(MO5j!!Xl|jkdE<9rZOXNgTR?oR zc(#e(1_{Kx2=E*!{ zyq8Trk3`}lf-}K|uB_75*Eh%$fvbQQUM@hyr^8vna~WImvX~Cr%*#b7|yx;)sYyS>Zrhf*QK)7A< z4r4fW3-m#Syn0R;XdZ;-);A3#V;7D`71FIb{2Mb)T@WUKRRZ!bVvNOR8jMRHDBUjl z0F$2GL)&PJQdS{YBJ8_Zz!`#;xs-r;GiTDaq}{S)4iWUoPsVC8VYxXdw$LjL@|P`@ ztkui5QPB=hoBPHGXMu26k1OnEr_W;f&o+=Tg6*&8+v8ROTM2yC64(Obuet}e^?ywy zuskzqFROPdwhpjMm z%eOnil?f~Stt!w%Q+Zw)*#rm#*x`~%KK`8S`Rt00fbRi_QRtpyJ~BiuyQHza4<0d_ z0DyP!JTS(%`2rJ(v(ZDq{1cSlJWx<(Is%NUA%>*?%_ z0OpiQQ(5G-Ze(yJ1ahj>^0Glty7Wi29hT^=q_y+aU-YH?O@`}gcQV+RF4K#dVyU`p&-JZ})RugLH`*|- z@4EI-+j3i^c71(q2IqCOBiEd-^0<`c2L6`vwk~{m95YZec+{oEKyqad6SBK!S+B2o zcht6`LK~B|0{~#>0yM*evNyoj=mnZHKwQsN@Mzak9(fOhMitY5nCOBU8n>peQj?dm z^4r>(^5Z+NnsWx*rCt@NTdLDj{PO-74-0RztudOt35%0X(C7*rhS(!MC0`di-Ol*1 zTnb_UciQk?_Mextou}mt^MuaqopOMRxPw1gwL7i~VwT`dWDe*gq|wqwoE^boyv5rFF>zhF%7GAJGrrp)L0ifIx|K;zAoqK2g3d z<$T#L@USoYn6bp-MtQWa$+shrrk^@$hXLvXc3!iO`hc`PPXYtLUT9SD%2BXy-k&{W z9H615w2J{xv1#6odyxo28}tg%mICriaFg*!NYO>ea!u~|FUXPfQI-y?*Dnd zNT`KZjted()~_Tu@+<)z(xfFYRTh*qO?`3L67x_MF8vS{mhqe_>U%MJgjMxti08q`&OgZ!()irQMP&BFL(xF7_i*4bOr}JnCM@a zaY&@U3BIYY;jv|O^qoDL5S!(ooO?yP{EjgxKlZY1tT`bcLYyD0FfQa#*{%!Hf#|v< zs4LS}8YC7}JW~oyAX`nd1dodd2-$Nn!xiH2k_Up`tQ;QUe(;?8nX%>X_>_CLgKu$k z=*T&Jdt2wc-snVWSL#{c(wH)8X}oxy`EL5wD~;o3b7-}{l__JhoV={^*UB1ww752> zzc1}jnN?Wr_~rBV+|xdfT+xxs`Rmn0Z9*&L?`}p!b=Qp8R&%YtZjS-)qT7Ua+s*GiecRA(chmmJ_UKQ+LVEebATR!q&{6yc zXYu2hsovNE!7jdN`?yfj#&zs!aW#qnKfy-4x($Bq?B)}b6LkF{mZ;Hj(rUYL^UIw6 z@)BRM1H3a_0^m@oxjOx?e)N;_@vj~O#xL;gi+-3zYL^{cvjJu`G(g`OKq?^%wu?+S zmphl`obBO&qVTAemX@-xPEZ*iDid~uNk#)g?EOFAPD^(z>K3ICc;K4(RFor`9>F(QSxkN^HRYpd5czAe3R=nxy86oI}(wna~*tw41hDOjTU&oATwY9Ny8e_qb z+BYUBW}MJZ3$`zxxDkgs#VzM#=8YFi^5G`4aN9(9c_S82SAwxRw=AtBPtNhv5@04F zTed5&P1Vr3O;}dBm4U*oj+%2Ak-9Nhv0v75c^BdgpmZ~sO{4TF^- zzmc#e_S#;PZvEKl;X172snCVKYB)F!SH|3RIA5Q3aGAKi&V~}-C=D>hdiUM`1?tJ4 z6KeigBoOwe*@;^)3~=Ht`zhNp`v}w^jmt^pX~#70lDl(h^T*0GKLRjQ4Q*s)G8AkG z>C&h5ygYvVn78tI>^i=Wn*^p^d=0c@igMksBG(5d8FpGjrULifQ{t>eyvs+w-1sp` zFf6e22R&XW0|*KrFAFKJ4jcdz+8x$H5_hl=?f{CFcX0~X#0ousu~VM-Sezx^cCe_s zm910#=?QIT0|$Z9%h_|RkWX204WN56F8f%6cTV0b&p*4%5?}C{0<0D+uNLbuLj%wX zi!UI*mcANe*P0HVNb1mEnl?85|LruBl#sYygP{iw&7P;X7K93vf%7zOfqO(B{Q zwJU*>ZuEZ0YL(b=S8MLII~&(r3pUj}kNoL-UEAahOg?2a#hXV*F#6=^^>czn{@K%M z`KurQ9b?(0_81%JTS+uX!x)`67S)iD@k55Ut0{9th&Fo4M#Xvmnv-9r>88xrI&ANk zcp|ioH*$t7p&%qeUM|acIaXJSGwvYWmC&06YjD$&R+rm&4(Wpi1`WIs4e-k~gh2zp zNg8k(s#8!Noolt?=zr<7YQ9_0x^$8t!bxlo%d9WRxGi&?lbx`o&57B#DKcx0O*M5c z4}#_^>WuQ0OTLXh=Hm%_Q zg3SQTb1cNZq^2b~O>Tl;T``<|6Kv!fs(n4wc$JcKz_g%uN9!>4r$y0a-JcG#Uw$_#gmIcpT}9h`xQSeIA;uy#-=MMlwxj6}uMi4B)w9oz!KzXDw_@t(#dB;8>O@Oh7WjOwD zml$*IE@&Qiu~Y^i2E4h<+7W$uhYnZ(?B;AjflZV4p(EO;rFg_0Mg_A9VP6S~b8Oh- z8Is00dTRRIv=utfy8#mgAywQ z4JDl%Np{l@*BlnW2u8y)3~&&TyZf9}Y=jH2fVDso2D@09bf9oaHdmxr;5LC{0{~K& z1Gw^tT(3}aIRZEY1Asp*zgert)zDzIurLdT7w~Y)XZh^_EO!B}&)Lx7@BZ>v<=&@v z%ZI=HVfpBH?w0-caCx8&XJ@!?@Iu|QDI05ypR#1v3)Z&9I{V@w>m}bQFFrdeU;J!b zp8N_AQNZzt<<)jvV@?@ylh7_5vTJR$qiBn+E4%O(0DFU=)DyOe_Qfngn*QDX?W3!M z0L2s5XC{wrq_Oq0+Z|vYM0G^J-i61Ul`ixd)?Zb>aoe|tE?HeHR#vd@a*WVs<0cSR z0WS#+SZ=l0C{K_WX;qm~*01D4jA)H}tZ{Q{$WfJN8}jK7|ANZCZv7@5Z-#yu^G&h^ z{5d`%KcpmHkH(DG578z#E@R0*&QlNSKA(OF>|Aem>FNr52jM%udpIhOpRiRot4vJj zhvMcS-s~WQO;%D0t-3%h##;H2;w_ll5+R3IWdn3EFh*_ys%~4 zBOdH2AF!w|O~g&@z=mf6ZmosZ?CKWWFaSCQIMWyN@;NUHJ^gA~9)CV7-}?T&a`XX! z`5tXxYw5DsFH7ILXGMvfr}xX^A3rLOKH-(E&j6!e-7U-Gd+~DK2*odW;mS3L<4r8} z5^-g;{{9e z@^XSkGC$tOFc!58FV48Jqmw@V=%ez3Klow!>__wR;t4B{aPy&jUP}SsZ5}!gHN(Ww z9RE!NJQoDZK3Z>tm}xnNJ@Y&=8Y;#x3~P!dZYQkkb(O2yk1kg>WsV;Xnu8#2<}>1V zMOk&jvXTDQEFH*07&I_w;H_(50K{*-3kCsRzXqIe)__EF&YTrl0d$?1nWHfv#$+Aa zu425=_sg8ORB(v-QwbHbwC^dQt0ZR^$Ba8hy?}$54XupO_;A`&mh&sU`=7MvuI>f zmbjKY-;L5rnA`3)GCo_D&rTkf=U=`ocR$=OAN)3M2_Lgu-R=jhGJJ~v@JTuO6@Jct zd$+uN0&Y4x!Ycd#Fq~kNh2>!Sf;w;s(Sit2WnnNrwgDX?gxh9M0TucufB2mbq1`KA zeDbKtM-JM=LwoGK05U!}ag+zFWN|h=2N1hLg&+*Bd=B~e!XdAGg^qX7p&s13)&=;U z3TB#?J4OQJgdTcdq_2QtO8O^p>P+dCKhI*U)eKt(X-n8k@6~YqXbTE!Q5jp&rjX+# zbJhfqPZV8ChuGG|U-Xl}4rN9aQ6SsQIQGVF$;v6e_rvd(-~U%1m*+qGWqJ9`mn@;< zLW>;_jAj_NG{e!&0JLBcqc$v@v$-3P(cL;+91`WJj4cr(I#}EIl%f6Xw{tjO9 zi{X!3{4gpIv9;g1);f1xdDj@NvE9n8t#3uHt>h~iZP}I7>z{1m6FRzTBdBr{kE=+s zrZzdX#X4}%rJ1>U){<|HT&$zxl6lGTl2H@2x@D~63#*NzvcOx=qyGSIzFHub+nUMPK0Wky%cuNs|Ul+VoXG**j#}aA> zcShyO)5ie4Zy_sw$5=X@%w6eWZy#_B@USWRb?%(A9WNkJ{nJFiw%`_NZ2U@qLo^{} z%lZU$u4^GJ-(elZdhKp?=wy!<*R;gzl+PbO zDW_imRv&#B<;M6%Ymp7*Y8B=sKwTx|9Wq&ctox;2i1V2*Fin&1$`=nmykGwHAAeB( zmeh!M1d+v86s>gG&cvl;G;C`-)uUkU9N%(XZ+xjvaGAUR`SiebuCesW0J_6N zTv!;7rh60R(}f-G@O+_67ou42TbCRv3ZGgU<&%%lHT(_rw@5ukyw?%R)meujZkl0# zw8G0|J@nPp%L{&$NiV%d)%mudZ2o1v0(;K9acE?58|N_>-w5CLId_(>i>;0_NZXP& zX{(2-hnkvgtY>fbc$psTp4EK%=rZvx(dku+UCMg{gA01>ji(LdIp#KH+%TRS5QW+KS*EE_FmOZpJwIjWmA-kzjYVKLZ2gX*yT0j@6Hv3Un{=x zRGwRIc%|QV-P{-uhY+<>HcE8;#9+6PC*q^0jah5etc{Pg-0PiQrM_2kybhD=(7v+b z4Fs>7e_Q4CDhldrcihD~>P23~h;znGUSfW)mvQ2C(gNgZsS3}<;AC`<@5>b2p{6jykM>2`M1kx>WUPu z0i5mfCBd|KYaQ0|?LYkN2wBGwVVEedG>;kL>DnNQ(q!Z4&oEQE#C0Y$R7MpUbS>0JvMVqjG-e#;tHqcp9;w*KUA;}uz$VAQ-j8&>WvH6ub z$66E;{eS}iqc2Y46WPrZjwOR6`X=~$D>cJqg%?#_A6nV$TW_)=KS6KSWgkNit^Mw0 zxl-?%m~q<3)z3;zZ^-Bqy@q(GtC@)NcS0Fb4xi}Ex$b=OBOrGB@&Et=W=Ue$5b>Y* zD@My}kp8w=X1kMXG;V7J6%cJAxKt0#`1F_Jwy@MD8D48zUrkr3=(_Zm@RZf%J+r;= z)yq1Vp}s}Aw@cN_xvah|jsALoxL5ZO9W?N6YQT|SFWu$}0m#)=418>BSm2u~wi~k+ zaJ)o@?{Gf0zj#&o1HJtR-1-_I8Py6%j5KKh_Bf3otsiN6qc%MbtH<8mKg+@Ji}7hnl3ZcMmLvcAoZ<8Jxn*PoaF_vL?KLyQ?doez1@4X;_i z-@Z$t!Ot2qRN3k|#hH=2HVPiQJl!ppY(AjsT%zn8%Qf3$@CPty$>4G4 zwmIWjpJG=H(eK?&D(oo7KuAk<{Z^$;S43-0l&hg6n)2EVsxzv%@C@+k1)8Grb zu5-TRn!R)xx3?+!Q1_sL-!KgffcQ78!+$fI>qL2RVoYoj({4Prb>bFux;ley2eFe@ ztFR6rT;$XeT_LhrmTd)VT?t5h!H>0ldGYS#RDWAG!8mhGVCH=QT|iPM*gXK~2y5u! z0oLIsPp};G(wUZ9tjtcn0g)`Tty0qkx>~LX0vtgvh2vanw4hi3B%Bq(&dCppAbOL2 z!t!zMOr~%4-W+g)#qeA(w3~pq@=hSEivc|G8`h6^wXcu?ImFqsIcqGlqn+pT#W7$P z|85?=`*__l_NVwqsRdax`tg`u^2ToTftA|eQpr=&+~-bPu)r%?Ls`qIodtj_fEQ;5 zo1K26c^+weSyzDIh7MNz^HtA-FbW%sRQXC#LmwEoX~`%~gSOGT-1Ac@J$5qnQ~P!F z)7RsczDV1Rb?H;aTDQPXjj6l5QHmBdVyI15XW8mM!*qY;|Np&4#iFPlE>p2tkWk+~IuvS5HRvaYcUwGttv2j`aRo|P{`l?dB z{7zqSV;7@~Y}(@svc8w#dgi`F+jeiS^xKR^e_o=us`Ixg<52gYf!Cpd0T90qtqwfD zIT~Q%6G-_Yo1oFj$*pHi%ZY!rDcw9Fe5#XPrRj2ON<8~?(hlpTMTqy$gD`n%T}%!! zB?VdKXfaovm;i_;Sg{4|x_Rtkd7IC38)mS4fDzX^owH8xx4!+5ZIN}EcoF`$<9#M! zz{-4aLRrh;U<@$a8Uhp}1^`s((F#Pm@+2fOpIFdv6QQpzfbxX;m-A;KPi&zK0CFif zH*U^41yx!DIieeZlt4OlYF!5Fu@;s>!w>c0{k!GSv(IqBz|4zfnC6g=4wMbLv^N8G zkw-h^4mSWVP5@-jDgVh?c`#w?V1YTy#<^?W6uWk8SB$@_7UJ|}Wfg=yP&4Pz3r#x{ zv?Ktl=^-H<)Sai?P49*Lh(*X=Tmfq<<)mr&G#ws?sI*B)k_+EmU8`A<-WF8|8?xTl zd7e_&@(m4jXMaRVGCgk?`obeRPH->1FC+Tl=7f57af@&ZW38Y&_`3Tds+)1JrU?Dw z5`KbQeZE<3fiy-Wyy!3eBS_CRudUILIuF0@R3=I0q|~*Y6{H1tx2aXu;PK|Z)^8qa zE0ozyWz2V7ac^J+#Om=aiTdW0GSoC^;2WZW0T6#fI%*K}_BG(Vs-Lbv#mQ-p$Gb}& z-TJbRllPzZpA8&bGVu3&cu&?#teG98TpAy~F6FxFhGZ>DYe7r*<1@)5qq z|LH&cdHL##mu#q!ZWY?&w620hDz31Wm5^}5B!~nUDl0?+xbOm8!DS(Vt!@TTksqKL zV3XE8WQ;gam*X6rF10fIRvX~SSJ_wyEqDa)0665gK!>Om(sujj;O;^BxBvSemrp+b z$MR=?{%7U0Pk&W@^0(iD0uXN9$_Sk<4_b$(01iRXqTFM>Of7msNeS%VV!My!C2abaV1`S+C0|Oww zjzP2CT?SdgY*9 z%RO!=SV1Ei0}o}q2p)sIxqNK(wy>HX?d@qjEsBKnkTP<5@ zw;iQk<&l(qFigG;&|et-={jSQH#q7m0j-vQ-HluqdY2bK-9}sg+4tC_W5mZD6fzS3w<5DOup#R@GJBHlZQvwQo~%dfD5eUUOeR%E_ucb+UynEr{cF=+kD#3cYDYSC8ksg&GH2|yN zRxsR*FcfYqnj9}B3YwViX}XrM1X1$QitJ4X`iGCY7Gyz8o>pT3!_L#_lVi5~qEA2XcrBjUPi<)rRv5~(axyZ81*Cty-Cu{U9UH*tHu8$=?;vUh^ zj(*pSJJ#?(x4hU%A34J8%|qC}2Ou6D?3B;OEWdYjQbsJ*xBK)Crg}=_TYTzRKnJ^y zQL33k@Al;0wP=9~SU9C`8 z{6?{6{9L>2RfLoGD&4heTj_B4tg_l5amPKL`qXjw>Vg}1nbh&SLgia{$i?fa$?lDb ztp#2~M*Lp1D$F@yrFk3b47t4m?1x78dHq8C%D&jZ(4?w|E!mrr>(#p{@Aaf@Xxt6t zem$~X;CeI3;)m>JMK4Hj<%G-XyDaa@6|54jv&`R<_0-iep7~g`hWdkF&o4d3xM}zs z@BM&nthF6yf=N!CRJpR^1szVEdYx=}R?Y;k$LSQ%#)ReDTkNhDR<9^irP^3aWVSy* ziq(8-sXUTeiz`YKZzsS=AQ47J1;x2`sX$M;0S^ES=g^(88-oP26wU$Wm{ce10Efki z&DpTB2}*E@nC!3;1zQqR{H$4(VTVW8JNkFBeeROa9+pt++7vnvw4O`dfbZHM>1xwF zMpK#};@T{MZ`^zYD<6kjXMVzULJ%RS3s4Ne#wBDlc@n^CIme<1qF&z!V$x=73c z#B-ORpnt{y;)P(LHjHS3hfJ`1gF>Q3+2|-5WZQKI&4x)T^wd_CELXl;;G!UF!NOMt z#Fp)Nc#E<<^vA_2-8nBt?wsGDG#}g}c7595hANCZeSl*X1)gS1a1WSpBQ$0;kTGj| zkFdtScaO{McMi&vC(H8e6fk(USB@FjJ~5~+HZi(smu?j?)k}4q!ZjN=69spY z%lhMDWm$PVDS}_`#x``*>*sYT>v_LdSDLbNQmMcGF_*4PJ~!3%+OkPmEA?06D6!IjR~B92 zvwCuq_P=>Ryeji>HfZ3jY9M}?nc=E0pFsJ-59^I3^pd>nlCGERB{VU+g`Esm&@s`* z%SX*mw%Jaw3*J`OMZwJvu&+`t0E5Z*=n!DMk5zf`7+?q=mlOjC#!_TAZr5!TtZkQ< zHvlvOEa8iT=WH@kMu1e%{PgSvewTNl>C96!D=jKE@ zSca81He(SWM9Z$%2=%anCaRp*V{=vNes$Re%3SN@Xf%QmfqNXHZ82mUS^e!9!k~dc z18-jg10a6;eK1JymNZc3EB*NPTm`|2nF%>2V`i)s;BrM7gJ;ZqS1hoR3@!~S6LQ3K zsiiphR_3iO3=qjnZh$~7th=Z0EV^Fy9TP@ zVFf>Il;su^tLU<{dj4u3#L5E7MuLXoBVM9Aot~D*FOJKd39`4L$D}E28w6Afz;Lcq zpsuw3VvU<%A;tgX$w$W+Q*ly!AZEV}N zZQHhOH#xEG6P?)3O~3mW)?UwEbIvj5JLWNn^_A1)mAu`9zWtNXRu}(CXkMf!7#)nC zSmTRv(wM>f6KU&5JJTDL@JZnNtM}XR)#HJ-9`XDp=9JtoF_juMv5L)A+~fWevjxU-AsRi+e{yzhF|{FWwIVuS^imT5tW`?>|V&Kd7If zJP_-~Wvju_*@<6v#+$8Z^YaZK4+I{JEhOzU-p**dpVsD(vPY#Irm5PC*ygr0KvMQ`* z-wd*nvwr00M(2uZo~=F*btTwi@$R4}0JCy9LxKzXAOE|U^8YTT|0qeBTl2@ol!UCM z2Qc8MAzd2nUAi@Lu1oK5doB{})nzq#ZOj|UdgQf`$!|fHg&?Yk(my~uKm_d$@<0Tx zVoYkV`C`*Lg+{vy*|28@CaU-dKvDr`uVz(t1nCYI#1)8amm ze}^p_%yiQm-!_mgtaOJCZaCJDx6$d93p949(PkO)pu}rqsH*DGl77#$R>KK1qz8_zXnu z+4{P;mQd>+21Fv+so?K5`?^suAcNr{79Mna{QY)^V|3iHd_;HvrbBTdd{6tFAXwmb zxj5)#QO#IxkA|`U;VCDIrHXqy|C6!-^LyENL|^~nzte86`e^Ys+a%$GHmN>Q{oJ1B zj*r73G%idz)D!NMkjhwivB(ag_d!T=!eJGDMQ2b4ez%0*LUUow9wInK=CumhLgpsZ zT!DarKmj-7yLNO6>`?Re$0bj%c~I@N(6lgFXwooZEOzinlru1O|}H zn69Gx^s=r_{)h=c{>xR4Bdw9;vwpgCTxEzwJs+G6LFuPb|DBODmR26~LKbz2qHhqT zCIjH)dTAaI&C)jGmhs19_1tzr=~|5fF^79$`R?q+h`CW>l>_FtB>rTWF9w<;U-hz44jG(Dqp6#juq z$)LYEzRhL6$`Cu@xVH4C)T)f*j6Ys7&8=G8GdK`uq`+eCflQ$aJ@4V&w&_5a_eRMo zHK*^xgoRcMkxy#zrz~fDGY>-D5qIYMnm;N^%9L}$mZetcWTu`Jk=fAY0x@)nop$yw zE94r`e(D+e(xiz_Us8mRqK9`y5kH&!Fi?2$| z9afNPFy+2FK{i$iR45PP1Ae~sPYM8l+1(7YxRgy+UMk-(KK4xyFFhviR@cHIlJ%dB z7zZv{KUTEzq6yCswhAc%8oaFigefoQM=V=c_H!)vCNJYG!1qmEKU367t=^YLK)=6M zsaBbL2S=2TmYJ&?Ue{$_68DLktFKoybxIO)AD5V4of4J%B&t(N@!h)kQmNIdE_k(C z+pV$laHpd`kDHDLPPom!4q$J?o^EL)rq3uLSiSBk2zFEzt5?5rqlhSbl@>5r#M%B< zQ{K5!QNJVbtffPL0Z?gZ9mv>&KfAq=Sk8>!!jxi;IPR6Qs1yyYjyOQ)OV*Q@Gj@Lk zR40NPi?2qU43|%|bCC~T+BNF|^D>&QJybh>5^^g}4gM5dRhbKBS9rUu;A3huO*wUe z`vWpxAFfv}nu5W$@^t8*Ozh~kt}XG&6*#r_F)jo-Jd3KB+y{z`8<42AUQ9w5OhQdl zEDY952AZ}>dR5-DQ4P}0`0EcL;!gN76`OfpM&-j^dA8`NaVpWwy>2azTYYG8h5IZ37MrX^sploz zbrH`V$1C2Juy|+VPH;~wKBb`-vKkf{%e75Yvv$-i;ED~NUO~#U)zn_VUFS@GYJ#3& zZBt}PmU6gnYrrTM`JE2;u$LEAn}tVzipY1P4xbAX)VL@8lD2uFwJ4`#?`lZTqWwGI zwrl0d-A<;f+JEiLZtTv`D;cB+@7VIkQ)44O@wh1(*Zh;_h7()$ma_MTbStB&XGn>D!}?XGKt0kTgmzVnt& z`vD`yT?}$0d>?|Pu{SY};vz22Y9x)7|JLWaBzflnwHkaVV71V>zJ*X7=J{IlyXjA5 zXRP)Yb4_%4?JwQFMu}gA;o?Tv6$SO{>Ax(PrB8)f_xyTzM6y}Ui9#`f(yB5+%SloF z;()p7Ltg=6`gI>Ne537QtAvWW6&@!{i@A_`H^}W7nyY+JYW-LhTvcsiV<0XWKKJuD zc5ddlNB~`HN|CxFF#p5(BWUxcyceKsGQh65&U!hQaCDa)TC^lb6vCtT>J*=D4^a!S z4Ki?o?YWO&b{P$i|2?s8Yum=os->fo&83}R4rfqWED`_Sg1uF+J$gNE;+6o zhd?zTZ$1f1ys17x28utMZHEtcO$hr~9EtTcVu4VWBgtg{VuX4!X{DC}8+ZDT{=IwE zzy4Jjk)y;a-u7%$vKlg&T3n%3tc|JLN;Ah2$;rIn*7%R`*Mo6K1ypGWoh3*{*|ijC zsq^KdikFA#Hxg3arv5?N-)KibsoE7kWwwO{>K+bXmaT^f`%&F!ipb6tkH=8%h;F(7 zqQ5wHtO|@$&>YVq*NLLYiAWL3rVf^HYrdmJ8@uF=Z{ z@#xhb5HP*B;|wc0+c_W+>edu1Fw66+0P~$tDDVa*qSY*cyuX+r^+&x1M+kcKe`AIj zq%u?k>U-~KHU}Uwpi5wslHp5VRoHt|WO08@S~S1Bkn(^1@k0*!rCq$<8PR^h)TxvY zEG-ez)VpBR58Ef6SYHjW&nG%^G=v6&pIBO}vYQn(C>2~S=rAlT4Av96*%;Fo=}jpK z<)w4~Hq|c(NAw$M1X)c97fNax9?PnPo+=O5&!TYX`RFXfkyXj^wKWCk$C}*K8aAR4 zQq)RJ5}1(&@I!?dxhzRK%Q(QaDwBY{RG>@+1-+!PGsdR!_2d)D~)h`J`) z>39;qCKMv8{NbspTUZ&{KOCI?a8|1S2E2xWs3&ixraHu!bD`$y^zO6k;&FS6zclx( znT-cvd{s%)Eg|q=T8)iWTp@d(<293ld3w||eC?J*p*N1qdHXeY@}mWG{G;lxZbpy= zsk??Toy?%I+8J+&k|C2V-4)$e#4C`?5;_|opk(TwdTJT9DRp6CZfdy2yFJF6PK70` z;X0B-o7;{5^F zAS!M^=rqoZnb1w>F*5f3sZ(ppcY)WFd3Sup{-pF+I#x-sK$y_sDgmq-xFveUbQVpr zx8}|;>d3Wm{LKGL5dR`DU*5#f-9E?&Toaa3l)cw(QK<6&L!xKaEBqe z6a9s5ossCdLwgXlw(b+NN_tbO%B+aitZBI!8DAl4+_w3@tGZb7PookCp;mPF66o|@ zRg+e6$5zb@VlChQA(_1$!nHwakDAcK z(!rzW7^>1C83XCiGr#4&`7Zl-=gd#{zAg9OeYJNP&caUwjpk|^Iu6h?ZGxcUn{E=V z=kx4c`&2z@i5%iwtlr`6F%pdnO`Vd&fGFkzjx{!XnMnZGx|>hC)uXG^ zG=Hf#SI1^hE96fP&`iu%peQUd>hJ3d-#FD)ZPMpHvpR`}V}*a2CqS{(KuC;!yux_7 zA{I)btdTuF?){LRxM)rAJOqDqj<(OR|fy%F{szpUqrG0!*eubyz@ z0g@a4J5DJ!n-6FVX@l408}>^hV2b}c{pAZrQLH9#PG7qPxKo|}R@bKOozZZ>N43$i#rjq73@z&QEaiC@O)oA05KrD=y2x)`L)HcnjQgTSs;@>0y=&c5uvnLGVtd#Ey zw)1u;L0BIVRzJ8%xZ1%h4Qrjv+HV)yUezy)@7!ObyP=pSA;~Tz>4`J?kutFt%o1xb zDJGu|h&8o25+|H2zG&6wf78~bxUuD)JAC&zNZ}L^JE%w<8-?adX}o|+qVl!++!kv^ z5{{T;yP$0J3_3~$0M`fgvU#aU zJ!<2F(V4dtTZHr5oihYe;0Vwm+RRo`8zKtg|I*jeKccBMkZxl}X*W&2+(E74lT#&J zNhZ_{CX_oyTO?@>=7$+%k?gw6OZ|0~CK8V+VSv|0f`nrlMU#s0n2*jXwBkkmes>cV zD?`?zgieG`W>E4%(8_0d?GKSE5z-LzU9WMjJX-y4{n?8aDVW?{u8r=#X!OZ&_x}5n z0VZgEwxYV#p-Ss8EkVm&{m-?TY-v+LJ9E6Ua(!ywsCDklN)Ve;)z1xoCrQyvWmtX3>S zAR@APNb$( z9dhICyg~;O6Z?g$-Md^}WWDSBeP_cVhA05z!dGJ8h2<|R;+03gGuCFzTUmjD=lsk} zq5SK)@_cQU_CdrPA>V9yS2gg~jzd7MKL}JkNZRX`H+{82o=$DLioKjXYqejKmgZ(k z<+EOXtj)NdIj?jB{6UoUfrzD7OX@hj-Cur^n=>PI+AWa$b*&wEO85H2ydqC87UQK} za0*lcKDk?$SdP_~dup4JcM*j9noad^h(T-7-Lf3L?`bByU#*4Tlz&yG@4V`V_jg;b zzH+-=j=&vog6wy16TZO3k>7cfm$R)5!MzMQ-%0+8GZlBjr1!SufcaPsetg1G1jrKH z8kgHq%fJp$I$oRqbbq}B{@V+A&Fk=f*MG@T<+f`2eMYolNUxK07B>DTC;G}vFvd^- zSiGS6sCnB`X`$UzepTjR;*rkz*9&ukOV>hQ4ZQ9#2UR_Bb2?8=%Pzbvu50c6M}>rA zNX|z5(4#KzWx=KXNn^XeuKVH9*7fB<;$m%A7Let_NtD)`vR2MMCxvNzSFi1iPg%r? zuVl(yCXEQOZrvo!IG4a|PWW^5sj!8&Bz%p8yZyl4t3w|Icb6 zXurbSt-X!(txQQV1|j?-FzoT8V)2-y_a%b6@bK{U*$Ks~;a47rPtN(Xv`Wi^*i|`O zZSDFMxzEqf4@3g}^E$r3Ky@zUq}mB9`4Pk0H7yeCAKrf;&46NJz7cMUmo-^;jJAmn zlkbn!z|2>`7tYU3iOsIoWb&F3`H_l<{2WFdfudF! zE)w=0Ey}VEokE&VxzR3u-!ilEuFJ!zzKmUZxTa2@1z-z!se0Yr3jm6Z>+KE>wd!F= z65*G|uP$%MZxTGq_<0&ZY~>W~tiZwrbG1|dH`n*lAyAwaU-RcMkuZ6fM(&drDzoVL zvf_jhCqacHM&ADlxQ#M6IKdaCd4WG~>})yn^bAANlFZD5F#A;VGWn}}z|a%DDuxB4 zV`Ncg_#q2ZO}6&Pxd3|7*-w3+IBJUg3^c^_5SJcbt^vS!+79WVl|jm$C>*`(3Dsy$ zUXeZm^G?s3-^JiGCC6WrWl2R`qccb%FU|rKj*?_JP=tj(YDlF z+teaSXFQ(2P=EZmi9p3d6xX+C=-O{Gw_mM-yIIz}G*HXpb61M0IXjP7>e{uC6kz!@ zm{QtNx#~g*y;}*}q7=Yu#>K{%+1kiZii2Eu}!6~iNqkCHA%Zo=sS9KHf@au zI0xFDcDTZ>0DoqL$K%p>T^+jP0P8aowO@8{L2@6oFyKKpA1sQw*brs>IJbio9<
  • ;TKM zbCUJ1w8KzuCvxO7+?#~{ky6~W5Z2lrX0JuCw{^|K<*5htAEt?Sp~nZFO~&_EfR~c& zIx3M*84YYq+v*Y_ItSa^_;?KzjQLK|E-Ht??zC5{_c@;i%SGUUWg9aga6peVLIQlK zK};au;Y3d7yiiK!nPXAtAV~!rMm^q(Fo`$dt46Ip+mgjGE87Ll57r%*hg_JE!-xIX zDE#YjyTlApD>j5a1aURv>_!O;+gpjT_slDXI5AIb;eQ6-+e&oaC5aZ!^%(d}7Nt2gfKWX<(WtbiR2)a&9r$vaU_X3zqtBXF7Y;gr= z)oSC9iVXZ11}A#uM~*7MBS~Umo!1dJEa(XQsvZRASYfSa*@y|QwdW{Tt!q84T8%7Ly*sVsx$QVVmz^lx;1_l~Nxhk}Eaglo zEx+Pe=Y&gW>~wj-(n8T5$iMvY96>412Nw)!NUj*hyLS(T+k%&g0sxwaI}43zi@!SQ zia^a>qD7>&8n6+h9ye%G$EvPxHw9yIFfV!%kl)p5wm1k>Sh{$)l#W+6BGO0bN`AFL z@IYf^C0J1U@ z+5T{l|o2#aUyJ*8CyZ(l(LlsS7WQ$IK_su11)nk*)5QJ@8Z;vSVuT4qbI(& zfADlBpXRDGSkkjspc#uCdZ=>7YVogr8&^(txJ4Rt0M!$hdj9BrIQqGzhKx7$_yNN3#mD0NBL#FA zGY>GHNxzE-N0v9Rjj{#E@`x-|_7_Y!q&yfy#gHWXAsn>0Dv*^SleBJ8YBG_+Sv`l_O49&CLT9|vHwMhE-TTeRvMVNWw zdTn{R(l0hp#$n zKH_EuanjNny3U#)6QldSW8o_$c-#uG5svZ9 zXj#udkFp@g&`0a#t@3n2WPWL~sVE`alG@j;Ol$AysHmlH>HOjEw>C<%T_NpO?1FJB zPXtjP%1roK4xCTD&W%Q{if(QrNN$rl_Og1Z>ub!dAz|7}w<$LDfuS?=@ka1?7{AbP zfNEk_ZyB9|>h_-=pEW1hi9xC4e`c{Q9Te>obHjs)WL8TsIMA)cT?}^30LIs*8Nf+f z4aXEVO(N_%%fcG8+@#R>eGRR#o*6A|uX1(BR!G&6pz= z{A0k3A*;wnj<0nMoRSgsN-xY#012jqY_V2@th}1}F^t_T+;D+YVc%&2&YY?h+o_}l zSy&Ng>5t#9$NZbGXU(Ji!JmQQ!`jTMh@z0#Wr7ewytMUSz7%UEYNn~FAuhVX)G5Tp z!5;G5S0e-IVc65fZboSJ)aamERC~v_h+6xyyX-vmbW`tJv0zd zhixtm;5~|HmM7s{4k##d=UsKE4!z=r7=hLJHnn8a*~gDul2{AL{3TdVHV4QKf^V&* zCU>iY8K^cE6RS#_V#iEkmI$=;SxUiB($1VGdxz^aF=<+KxFns&~2;P@Ih(0284{a~%PZH?VpIOFuJ7|#$OcXpSBbhUO!myx<)+mx^|RKf%2xO_4rIDv36XOWsSlF`Iz^*Yl9L%t3r z51+G|s)-4U*lt)T{HLcVu~u|Dz{dtMj;#ZdY((&%PzYRj z{#C++3+^vu-P_!o9~rJ9-GvL~V}$Bq8(+A37TpSEMP-jkYT1KU2cV_`qbUV@M$YPJ z)_z8*sZWIV>Jjr=1>=7`x^r{H|Cy>E2BP1iW!(3ia1_Ja1`sGU{uHG}quwgbn2#rV zVa*NIGGDt3Q)ah?;VjVb!Eyd_UCZ0M4g-!1Nsow75pnIX{|K-h3-@{cNQ2jk($-lw zGHx7sVB*PWjl8w8=;qC=YpMfQU)FjOggCaM84!_d^p>4ing5cS!>*M!nQZ>i#7=eF zmYYMp@M9IO4p{=sqDAIHjM&k#qzV6XTs=zh&Sj0{)iAsxbcY*qV2~4eF#(K>m7J$f zPhc)H0vJ}GXImuoMr`|?-er;M!_GKZC8~E6BvNDUYxhuSFEa2OYS3a8df2AaWZD-H+JEo&jgV-raEZ=rkNyy7`ys1v)>Ayk#e?FF1?k*LP*Hi6D zM5%ta9hHZ z$sk)4MO*|sDI(5J5PF+7i*8Wzb>Nsyvt_-`LLk2eRAVcPL(1`|O;$F-9t%Tha>L+I z#M_?yR8}$FxiR?lcGdmbz}s>U*|i>T66~`)|9xT~0R5tJHyCbg_`X#`L?l}iRTCe=XbB ze`^*prfww%#B)BFcsO5ub}(jlm;kLXDIIxVVC_@+uTgLbw}#;1h#AOCQ{nH9)tue5l>lLO!Qu! zO&56l4I{|*zLenVcig!!(TG}}aUCtfgoijFXu6jwYzd@Hv^e!z+7z`;mN2v|?FW{y z?+tFrD_AEnNfLQtTQWySzh`~7Qp%>Qxaw-~JS9%iPuS9w?_@^8ud8yBW;Zouj1o$2 z`PG~rf=E>_9pRa0`5I|1%TV}}kxkQ6)m-2&xfO!hAk|Oj`54~CRyA(CUS6(J0T9B( z!%awVXKZ<$W@DGH)qg|f{C^$*O97!Lc1+Kip5kTHpuo@y#9qlNd= z4yZ>XnsYBR_ESw09qnzW^!T_YiYO>inn`<4Ge~$ytmPffgCX&|rib#CI#Z(iBBihc z)#L?uN=>s&k$l0~X;o)0W#6k3F~{iS>7+}9HFhqn?V)1spcap11C)>g-6tt?^v%Ji zGErQStB>ggwXh*NRJjHN-pSLc3UzIb&R_0W>2*vQ!5r@r)uTQZBjZk&7o;7e_%M$7FFr*sEf zu3%?@=DR`>15MK8MrUjvL!S1APtZ&>w_W^y-rV5#q}WqQX6aa4L{G+2q+OYj7eg*l zbT`lMo)sXHp^v*J8D^mr%v{xXK zp}K~dNC*(~2<>kcuv^=aPE4y%E{uoWmDa?Pi@ry9#-vNG#eKucG@Md0V?Hb({}(P{ zoNWQ))V2zm`u-4$ViOER13_8Vbr3o2p>#RdncI(%c|BXJG?2xgE6rSy1B8(SGJb{3 z^SX#MaE%(j1G2P!q?%{IwVG?FISdu6#JR(TA!fbEa$>Z{x9 zZq;&Zxs<5JWut}J1a2Dw55fjCHPQ1!l*Fw+BC1`H3(lsQZukEFn!Q0_b?nC^FXDEl z?pmh?Q0-KE`b6Cyz=L$|JB5>Dm_fT&PEp_2)uL*qC`(_L;8s@}In;8bnP+GGWs=;o zS2(~4<$XpLlR4)LZGf(o%C?#hP z*M|%0(?S=K$<=W`h1s_Y4=|1DNJ4I=z)f(+64^;G0HHusvZJe`-1dAWF zbS;j!V-Gua$7!#ogwi*1EGl$H&57YQYkb7h)NlN`FMD12WnQ;gC!kc&k{Sn%9)ID zqvdmINQ^1vj^m0ZG_6^rS)q#=JU{o8Uu(m+^hL@`ItD5YhN zi`0tDTf@?|7Ipb!XGFQUzv}4xPh9F%PV9iJ-1;i75JYHGy70rS93gA|$!OmN8If4U z(B}x@@>n7D^wNo)H?iEgUQQb-{mI%-N~w@B#_c{UeC(mE-|67bPZRsw*Y+|Pf%VlB zqbIN&&M%qJcAC*{qTWd2n!zfocO+8a(`$o0BBS)O2urIwuWux&Vs}Txy5D{P@pa*%^62y^@TW~{%!*u1R9S5Tkew~k z3Zb08?S(%$eV{Ub8db5iLJRJBjTwZAfJ)X!MH%_C1)X!B47sM{1RG|UnKAc~wP5=^ zHxxr%;DdOnssM)wnKQmb%m`7jR>9CZq?M7Kh^u0f{CNWSAmY&{u=I6!H~z}*fJ@GD z2z24um7Qewau`62m<&pg;iFkwibQjQIlKi}pOrw;gCWV5(#fq2#tDzi{0Ecak*1rR z1`#VJ&8Pr$dm^8C+N30KkDwI22sua0$3kS17EI(NTA3SO1*;NDttGDShgcymNvufP z7#U-}S3b?4=>DrJnoIvl)iS4_9lIevAt1TMr`S8VXC>vPfKyu6{`TIwI;X0ri=fZ> z0x~!mkI|iOW7TE8mtV0l^2eIS(!>$@)yb6S3Dx!J?eBf@KJDXIz(GJRxgq#A0O=*0 z52+V2KCzsZdKEv?@bX6bYQ^Po;eU1D{iZ#NCCCdeI*_mCV;d&)No7^ITfD;R98Pd~j&q_&mg8f(ox3SJv)YuDdMNi$y(M-CO{cIpFh0#BOs0D{ zQlV3|4oc{|2bFV;>CtU0rT##{RQE_PxkB8*D90{VPO@20P%YtZ!`DV4;?`!e>|{f) z^NI+8E%8$kBJ2%)RlYTkB0u|~UjCTd3xuf<X^XF^N=G*db0GvPqVWJ?o}^XqULg(TtjOfYRw!ouF_NV*q=W&yu(? zb9KFmOT(W|r`?bpJF^FP-$d5lo2g_FVit0Mb0seSMgq6h^NXL&1Hpd~7L$Jv7O_*2 zLTe@HSNwAA@A2rEe`IC7XumAdEktWN)h8zs~2j)UC!89L$_TFS%-a?z%>SSe*O$>_I{RqoYuZpSi!65 zSu5Z3BNCE_upDjuS#AZ(ybOP^`rbArTmIKb)bN;R!cv}4X_hWptG0l(*Dh|>%lE@H z4#rTrsyc#-Eh#VUzn)Lf!OUZ&I+V^MtB_ca^{u6_OSUJ+VPPdJU04OIPIk+;NGwr^ z0XV)MY1&rNM_!_h)ef}i)C?kP@0HH~fqdBwDvYHftfQ%VAr9Vv z5u8wd5nGi3fY9Im+ADG6>tk=cv-9_oGL0T(*4uSBYeW%;`>8Q>9z0x6PA>g4dhZ}Q ziC!wbs2R%=ex47na|w%nM`jp@*kSH!cDFd&QJT^{6tB7`;k^*117zna_@-4ebBRR4VP{MOx?T;r~ z_Bpq_<}Q!w{QZxe^Xb3OCO9I3eeMUhV8kLilWjdY42GdC(WKcM^h0#W@*3cukQVI; z_<}*rSvoQjTEYAx+SWs3!pHjEph(Q=xiL#=vcAEq(++)fi*Rfhcc)K5dX0=vybxw6 z_0JFxT^F<@I*ztYeBYNaqn{s~RS zb`bdzN9`$vOe^5Z?WWPJ zsM*A7N8FJ4vAy)9?Xg&Ib{_MnH<9 z5DiQ4hZ^}I9)T_*g=Hk^6(QRS;bPJi z&b+hu0g;vnC$TB_Hy+8uOg7NSmB~k!1*bR$Er^H4Xg3LnJHs+r+;JI4#De&1r~>Fd z8fEe0`56@A=zdPT_LX$)xn@kRCW8?MMr)tzh)D?jfFCEx7=oPk1lT7j;`eROYX4b= z`ZuX&>%5Z{ovzvsQseSsLS%^5u4Un2qm=ryH%^WW%ykxk zdoQO^f^IABG&YF8G*z+FQQM;wsl4I2mpDUc;;03DQ6*ukos zUI=6~OLZc7h<@UV{0+Fh)n)90ukpNlhvQg+f1b2d@R9uHYjTXsH~>-g=vF_^-?x9I zZuIi$J@O7Qw)ZEjUlKQA9l%AIM-EbqPQbWXUlbjZN}Z#ok4h(JZ?C@Bt-1bP`P#wh zc4b$b!Dsv$gRiuuqUvvla`_B#y7|trdo_=H0P22?zP5AX4yR>6?m6e;2v(dR!qLEm z3}}o{PiFk{V+JzI(s)+9n&mxzGC~7mCk;t21PTQ>D0dZt>jaKRnb}2DJhZ>%u5i>= zn2%xmSJ5#3t7tg>n5ZB%ieBPMoqj_ffB{=LJX-HZnpMP`tN-XP%=V%jdf!9VWN+kj z{Z0(!^*W1kLQu$bT0m~hg0F9B7Ybk)ni}xK%D`_92U&QFQr#d#BSaxViNcn!c zmEOLMCo$B^PD#dF1oV`V-52^6m0^`8^ynoV@PS~0^(lk2(F*#Seyg<&f1Vrwl@wQJXP23i9 zi2;hZKH5H2`eVg_1)j{w(f-dHfXQ>-p|39DX}qe(D74#|=u(wJ0@Izr+>Mygcwofe zwKH`CD}O(G4O3Q>1v_sTJBIbynOm)ox7<&EI`KffTTW=cxZru)TULEMzCm0Byr_EW z4T$){;>_#J zsb{;polE~^jD?6GZq#z>-5)J+mYn<1H)^T#v!4pv3iwS0n8f(x@OjH;=7mc{*1(iH!su=4e(jZw9?cyCa4={1Ztg|6{JwsF|+8+YS3Qn%8u%R=A0XHY*H zwV-?4oJNnnKUu#kK3>X^BvyApIn-&LfMkD)Ea z=t{RG!ZcxB8iQlta6PYGI7gpS(tf1oL1%JFtNp_yczFg1k+}Rbdoy#8*-C2CR@beU zpq~~p=>8&lQ=d_Hk&@&R#s{6W=NCr^Y>vzJm8D9;dT^lQ*U_(*ntBMqJn3yj5DvD# zQ#w%uxvy5P)G$+4*{g!jJr4S>{5Kz4Jg&-zLB`z=!4`O}gd($9W9IN;8A0VC428N8 ze+0MLFm-SvB`{y1PVTc7($}gv_}L}7s8B!TTpO8xQ_eIb!oTmOCOx4Qh~ka1;%E>B ziPI96C=VITM-vw##Ja!Y>1bOevkMox$1AUmoQKB?mdcfMrVm))s7$8kDt!az^n^8c zL{I3@jSno;d3(bK*#{{ZWDcyPZHDd!hU@Lz`rx23wG;@1!sYf6Y#2&huR!5f@ucrW zf}g)Eb^EWi;*Z$=`_XDIF8XZy z-JAILOII!)t%PFj9sBUrfq~OH*Sr|+86}`v_XYzCli`LMYyQfdaCta)PyZ)DKNxVD zq(DaJWMl?l5^~cvmvc9;6<^;m4CFN72)6=)o5LtJ47k29dGkv?$nrOUY&L$t$-d58 zC(;>nJm z`F00)sZQFh{B{w&pz`9V#6?+jZf$Xk}FVkrR;Xt4FA8@)mAJ*nqbk~_>nUykyEFB~>jaJU3dS&7|8H#CR6ZB3#%Y(@~ZbB-IX#{+2TNpLI%+4v#V%J3F zr!5ef*DuEHvbxRT4wU?eI;}-tSQwH@*m1));5(X4jm>lBA*@AW&UrkTcWIx~(faJ9|kmJe`|Jtw?e`N#<$dZoWJ?oy$CBKnZ_;zM$l@wyXbUe@Q1Q>G%9UUA-Lst zw^TVq%d3>k41g~2qEdLn+KD_b2p*cEEtbYgierz``mO|2I&<8B4p zy36_;)gbh{?;doH$toNAi1Zbmm}6#uY`!6kpQ-u3N$^OoA5x45AYxyqiIZydp4Y#6 zO?5OjOzxU$lSQx>)ev^)!%q9GG%41h-pn~1Js^CeD5YO)nx-m?%3w;T$S7%RvJmx` zdW{>P7@?QTqscd7C(m2|S_*~r^${~gjKTLO4yLr)QUgklxK4y1QjCP%9TxU=HVS(o>Z4~Q3j`8=SW zp|)8|U%Y3YkR~zfyWRK{6Wm|!z{i|z>vs$j+FkG3a(S`r{2u-8u{n9^?KB%n%+Rw< zxF=;-E0yUJ6Q<@F4U}>%AJ8hx%y^t~Aj)jluei`wcXYW`rI0kEi0kimvH){Qa(q0enZFBk*vAB z>2vG}`2V_U_J5-F>fC-0n0P@@cC{Mo8x0+e77a6GEueo-L87B1i0!Tn`H1%0k(s%# z@J`U*L%D!`In-egR{8vL+-b6Ewc{09&2uG>;SI^^@>%`FH^4|{lnYVCEy3~tLsz5M z1E1588^3NNB8oL>9eyul*ARm~$o6Zk<>$p|Fz8p<`jT?OM`(dD=~lSVZL!$?AK`4k?1NTGaLdzMx9>pJPhm zUHt?tGhM8iZzMAc3}W#~T&hLe8^i=;Fx47dRV}e z9*hlMHv)?SnY#W)cQ(-wflws(KY|V2gr9SINYc&GW|B zosETCV?qW1?o>Ha2;16QkfUp1NQsz${XFq@=NnqB_dc}Dd%$W`5R1CRI4HsRABbHXE$Um-VbmO#WOJv8uR74iSYN!x$Vc4WCn$wj59U`;vb1c{u+g@!}hhxsg=cWFtl z?mE_TLd(2fx8Ir1(GY4~p*jBWtMz;R&L1uVzWIBYKGGeaYjSeH(030;*Z~Mlk8iW@ z9FWggQWqmvEm(JyOYje8kW5al`d*XNdo@TC5ODW314oG+Houff=+A|kp}#p%`-DTB z-pY*UCTH1U1mwlsGO1#2(><*gET~m6ss7mYukhoA)O)ar%d)~$pyI5x2>TU?xP%J= z99#T8z3}_tmp|I?mJ?b10ov*iQrSjCOAy#;(+evPl{T)Z98YPn9{}1hnA31(UJY!) z+^@L@-`5~~r%}n$g?G;E_xHbgI5H*|J&ZON_jDwu1ItFHI^<%?0MV#pOv4J@aKCwc zgJj*w`=+O{se2t6{BQ>atcAG#fYongGPlnky@xXZd3LXE3{<0vBjyD!q3kOcHnOOu z?k#z(F9N`lbUMoL?GWD8%hZWpA1*S$`|+lSfX4r5daHmo+qP*p!QI`RQrz82aV@mC zy9Rd)?ykit6e;fR?oiy_JruW}=l#~&&t5jJIp>}^CXq~3cz^JQW&`!ARU|WKE0Gf8 zfG*KBy`$)3Jbni*AIg}yud+5DrKVQC+14RS&VR2+RHh8~`A^U>TrG+u+(-VQIR|b- zhysl%0k2tUF6$Gd$;j93tYo7>^Cj<=F7U|&fy9a~A_{AWix_)&pnXjBWRFGh6*ERw zkZzHft-ZXqDOqFnN7*GH zfKU<8rcim55dPeIjk77`Hr~pMI4uBe*dO?Go!Tmr@SN;A!ZrH1gg2)4&v|&< zQ)Y|nyt#=q)?H_h2DJGNEo!e$&>$r!jyySgQ{dASdct8&WtV3Ai%Ez2zRpf7W!^UF zE5`xm1A?dW)re4c>n&ujj6-5_rcd?arS9W`8I#_Bc_2#?ZLG(%zksH4^PJs8e0m|l zM+Q4;F2v`-RqQYnW4F}){>Qy(qOH)DT>odm`ADNRpE#47pSkX@MQ+Zid#UXa>#0J; zUb+VFT6$iS2MOwp@FcA0Ta3(o&AVmKSg0oE5Fie5p@ZG9?}w{FVEtqZ=2C%P0p=@L zYgF4&wv$J#h6u*lHP+7Vbrb~QeNIjPNZ==6nC$()S4K89lh zI!!+83*}GgR;B6?_7AQM6Rhxy?j$i%EcL2TO8c*JREm%w6MrfGqi-%c<*%JS0687gkTW8K9bTV@#;NRA4}s^8V(}G}>d~z2D^CHE}l>%^|{iTVv+E zCzvGoutH|dS7g_+w3*}AQqMSOEu?isBOB4~Hr$y1Ra1uf$wgr^r$(yr6v=-W*+x*) z2VHYxBJGDBo_$Rm3vfiOl2DJq%{|T|0M~|jan30>vt#CPT9`TPaLU9RQpa%KeeW5^ z{B!KFYKFliEc+Zu9z7P19LPuhS2Q;@ElJw&9p3Cc3;!{dwRCGyGgq{5^GcQsdc4Iv zw%Ldzd#kg?BMu*Ow?q?cHx2CDfLV1T>Q3{Tw_n~&(9QI)k`_>h4yRVG%c-%tnPG*# z8e9rJ3~x=X^xIAhKP$(#A%AWI>aduMI@y9+g-g2vz6mcIu;$rIru@X^&KGWaBMymO zT-aeJSa6ca9hG<-JNfV@1x~~MP|rfrejSX?P2uj9NrJA^8=gA!-=7|>f2SQJ9$&60 zetGlbc>)o@urAFT3M10_i@hBYCS7of z?SxBjRc?-cB}-}+^`AplaSGAcy{4v5-zpdFDfCHub@{gSCx3G;yh_QSmVJP{ldf&Z zv!U8zRp05hemY8`-$(O6ZVwG1MP>xii$rd2JKl%u%@?s8)5?5t2=E-Z3euw>7`HrQJ;2Ad7%VF9s9v9ae-$12#RhDIkI68d?VHM4{B`&4~iUWOb101hRt zZZesu4ZQN9u^WVytq1glis?*k%9!I;t~pl^n}DTHl2=i%OM1A{)F4w`VOX(Nhhjbu z9l9<*tj$GM3a!|b(7Fd&y!kQrJD&7f)^sg@?v3}_+SR{)WYl<2=Q|QP>uNx@3;o|O zrwwv-tr++gr|IzBtA~)W^E{6 z97+=prk`X&1tLI+jdyNy!*+?br zhP_easlcXs3%{oLO(|C_ui4HK9bu)-grLxa!JiRHYn2;R67h#d=|%244N|zpc}TWwtkR>gEgHIK&@z`y|l+GlM{F?(tTnX zwqN29!?$Dsfqfy{Lf_UNhIJ1g05%v=-t8A(L)qZ460Mssu91Sr!>7bO2jy?t^WfQL z6)8NRqp=pAHNDT0Cuex1m@E^~rZEV*tGG$)zb}tUoypkMf6dZNMhE>|q>oM>m%nH@ zzg?OtBeMQGt;h0_7sGfRQv6z=v2$&aQSP&ffzjA!a?c*OHN@2M``S%l2eu5;?+;)T zwv3p>H;p*%5-{2++}E&!qj}RV#7C6&jP6k{dqUKlt|@fP5X37sH~Ucuk>3kaXcZ$;2a%X0d#MckvDVYN$y`5E)zuU3pr-29Xr+mVn{c)i}qIc&&E`9eC|9wpBMxugo`KvR=ks#)nQT4m1vL|C=rOAfW4A!(@G74 zT}i9c+0AxQQmoC+0k>guEbUmr8M9Vu{bD8tSL%e$@bKXnDT}WY4Td_;(3tf*WBlCz zSt5FW|ASin-XJjbEFePg*zWIwTXA|jqpLxZ$y$RrmdHzQvq$c;(+L&E4%I0X)s!c8 zrl!!>If4xjx{%oIOux&^q5h5b@M#b8-wV$^h|QX-$%yEC$}x#p{0KaPCJxp z;$36M3+;X(C6S)QMpGyePYCT8a&8R-hXcy*@QD`^@w*GaQxlDc-GXP)6X*|>N|Zb{ zt5lmYOBz%pXlJa|+b*!^zSV`LfhvX*yg>c1Lk=*}ZU?}7I@nk;k!F$tIO>s*lwfWe zDc=J1X5nTFp*hblYV|qe4%2KO*!DyE)8N}pJ`7A#;x6g};RXEADO}0r*%o&7 z+^SXdSd?}APRYu=Nm2zbW5&*~%Y6(0aJk+RU(bbFHS{ShxYX-OS+H%T#}nP<$e+*> ze$hHs>Ju^Kw{+yOWN2^ME1VW~j1?!`V7O1^k&A$>Ox%2Vyj+UHdZ zQU7mB)#1tnyU)+=W3y^aEDH%x9b8^`njfj>D$C$qtHM+8c?>%E;h~V^{Z-5JIz6)^ z>3Kisa*f%2Lp1W*BijFN`B;m8ug?D=ah}E@%_HFbYWCF$>b0PYJdM>)gjJc$c|!QG zX!i^NMyD9mzKl@;5$s%9J<&r*sQHVADPpgnG@*w)K4TviS+z_EHQ=AB!OPq!bn05H zJ$yG*jP8f&eN2`#%+83jAgtUAOj+54H*+|*(5s|SO`eXp_VT+Hs9E^GT1*jl_tg9U zsSJvuA_1$$Kf@!_5h|xuNvTO~>c*REiy98-@^%V^WX4Xk=xkUAGLI^Tv!+cSUqbhZ z$Mk#5pst8+5WL?Fi`UHf8=;xflHdVZ)SqWbp=^q`Jl_)2I`lUN&j0@SjHkI!T}?_3 zeL~hdkOrkCOhNKFtKKe2S`-xltk*apWm*E6yZLCp{?TT0kr&2=(JY<^a;c8Vra%7u zIcX4{4AzR2p=Q;k|I+mLIwAMb2T{L%5>Z`G(ZusA@rOY5Kas7_?Y3qSpG)zdK38Nq zu+T0oHJTUDP+EFh!ahOL-tcC*4H;uMlAeLAWquXz7bZr&017CsDSvL}x~XegxU(~- zo7x~r16G~Ifr$wQRk^Md8PC>C)Q}*am1LZzYi7RLV!e90(|Y|fe4tU_7vI2p!oD?w z>wnwrYw!e{W-WD>FV{m%w?ngG^oRS8_|YhM+UY$O!JF6Y6f^Ggb8dIi^}|RzR{gNc z`P@qfLLdCa&@mX1rKGw(r-KQB6a{OLx}n@v=v_h{IYP z+mgV~&t&j+Qttz)`}iem@%{s5S^GHPGWsxqoOzl@r+X6PT2g*j4TSM}=3r6aDnzRy zj2LJV?ScAmEl{HTjYL_0D^=`YdjJiV7k7Z%c%xP-Eg1_zk2IsRzB0DHpB}|h7Izrz z080nn?t(~UFF(s(t$>Ka|Hl|q`wy~`xk7&8cgQsz*pptNDl`kz?c80Rl=T*`1Zn-K zo`l-Gved93S;A5(`SKzX5xD~yA@k_-_+e<*zz%;8m5Vz5p&94}qE2{#;c>a}rkBOe zp}5OqATAzVSQwa#X`sEE6SibqYka7bpgKSy)JKPx{F@0pH|^h7ZTJDZK~$>JJ~6a! ztx>dfAE@8QK@kh1z+IU(Xi+ZLRa}-ZT(M@LC*u4xuUP-W$XHxu|GTKS{Q`}w)83=j zn&0&2!2ss-W#OAlcS7nRKGJ}{>t`hg1?hWUB_JcCd33wo?(n*Y#nBpCtPensEbpsH&d` z@jo)k8Ph)Pb-0fto3tR40Fz{KB5`z)f4$-a&QBZH>}2d~GSY3Ze=|T2t6N2_r+tdo zi-#A+f`~3y4I0!F|KV2trsvp9iHxG-B>%P{v{PHdz4bMNKcW?Rv z*AKIV@66sGN&8N@8s=4^`(l?0laj_`yqP(s`%cgk3;i~Yig^FT-Wg=nK$rO7gy3r4 zI~@+$i%6#%U;)^jw;nCqmkrcFA62m`Y#c#MeEBPLZ!BBCp!U3K^Jeovy$aiQaR@m3 z&68uBN4%?NJbqCOtV$+7Bds<G8vWF0Gzh_!?Jl(mby^)FfDIS?8Q zc5sheuZb6r>m!lIE5OmSxH|OoFe;ixSr|3woA|(=D-4sr*~L-rKE|kM3K~%y*$vq5 z8R~zEXGxZ)$iGA9|AnesE$+aVPJzq?N{@`@dvYJ_u(S`3zFV%JgAPUk*v8#mX$-Mu z4!w!27oS`DlE0nwL6(Df_vc}f;P&{$bEvy1)p2c)dKd60Of-S*sn4&q{*hrp&|)5! zIGL*qnK<;88XummfoC}M?LEMONub(qV2YV#rTMeX`oD%gR}RbKu4fT5O*6YUy^J5B z`2+u$gS7R)>H4e+Hx&}Y-?VDlE|`mnL|zEQ#s?=-GSr0}@^A)8fcHVk*A?L#R8X?g zxd--{hFolhW$%^_(IqRfS5_z(bACKP0eM#5bIj>QXiz}OQu9507}+Nc5m9d~k=J$m zvCWrEi+zNo3*}9cw8Pxa?tfu^T0US}WOH2S^j+je92Kff5fh`*2{xz2!HumY@0$a= z$IZ9JcT$HqIG=ZR&v?=aHa@A4!3?m+nWuF$?T|BxJ~W2t{+_@_5emFVu;zOSmz^>P zrSdbsPXaviN`&rEyW`cf*9+2DG}gc2Vva$`$4B(0_;nT%{@9DA<7bqvvnW4Z1v^Hh zG9X2)FDv3_Y7W#?dR9NhhI$FE7uy=5?5RwFVv>iA)hbHAj?xvp4X!FDe+E!NuLj ze~`LM-7o?9{F3S4VLnYp*h^?=mzJ(q7WipBvctZ3CKB`mFfF}`AKsu`$er?lUbWM= zbW4N&p8>YD0`Mid4(`zvXP(E7!@%e3>HZ#gBGW4&FuclmoXbN(LkED2J2EsqefPrm zefYUDnn=4rYoZWj`@%N=@Kip?{-sl!N5h9nV~k3f2|RZF7_AtF9v{Zi|0#@aEhfl$ zEj(muOX3N@U%^tGRl~-BjyBKKDJ6e0(;ozJ2~@lbb}^mX%}c_(>qlyc(}1>XfG#&7 zKa(1ersyXN<7I-gu%uJI`VJfh?xFxhRc!VUJYez=D^q=S!}DugoOczetcae@_2Uyr zDuR5=jsg+$RjuPfVjRL0J!9o>rf&D;1z{sNHyp+j_Mz0E1E{aCE1DSEzk!PmU_g^< znYnIts?7!j)yr%E?i}SOChHOoaFg)ISb@K)RII_NsAu?(4|SVEAYP_@ote)+2iI{sfqYmquVJcz0du4DlHdk6Q2qaNtW2gA;EXFD?~ zq{PbnDGwLU3NF_wyfAyQDQFchKOi;NOZnw~jcluv6=qlBf}{)95zUpaY#wy~W?EsJ zm!ANZq2DNjoWnNLLe3h%_}W@q4UyZR-AS60%QT5pS};$~1cE~&X~Tez-9RL075?dS z#ofGS08iCFi|K(Rpj5x*&uW_=^QF9q;M5hSiBuDlw7kp-=ReDEOFb;?IqAP-b8J|) zLq7v+%Av@FDz$Pe0%5jQpo{>BYc#nBW67>xul#4pJ;RXF1V-SOVj8suiN;OZILh(i zDC6kR($y}<)7BiZYe#V!vf-Cn-fp`d7x!-u@%PVf=Ag=6b%1|_jokG>&=fB8O$7Z#U&bi_6Gj2j*vg%4&w>5uA2}av|HZa=GwRItra}S#{ZH%3 zjb77$6QC>X8Ndjxj~0BN%+4m}#`(};ielw26jb9pm`dDLw72aGZ$kLq;&U~E`A))Z zy@_c_vS&1Lk6H#K(tCAvW|OW)Cim5?-adrR?bBq2W?4sQ&G@UaS0gDu_%HEtKJmJ2 ztZn+bH`JaF=RMNxC#xBF@3>ATe8$daR3{$2;kNFh&yTkiM?g&9?D9NeFT!ZApT!18 zLg8ww%F_RbBq9|`S~6^cZ9q6LjB=j+9ZvAGk+x*1O~ACLx{lEQcrNU&lpz~3q>ZI2#MAge5> z#_gdL?5B-QqfE$AIfbY$X2T5!P#_HX%RAhh*5eS}~N5xINOHg++1 znU?+-sLK~pF1p@_OFSez#kY^liY?#WK%ses_x^L{CYR!iShbabB`{)4mb~)w-wLM2 z${uDg)hGf}1{dg5PnQuv-ymXw=``9&yqv@*nDsytzp%#54BV?3d27cujI4VZ4i+GZ?1-N$7Z$S^`y^vT;Y;(5Bg z8=bJI`%^|k{opx1Nwg4ACP<*)RuXjwF)Gv9R=b#lx1B;!XWI2}5TS&l5Ebksz6f{a z(1y*!u_D_%YeJpYyw|~jjq=n4r&e$0W>iGy47|dYgX{phWu|UCZ8#oeCybR!6N<*vuI#q!}J=GARZmNGF7~5)I=4C_CSjA-V7W3XCG0n^M_sf zsj1{(E+P(=!N#UU=Pc?t zolbL&5f}Ds7y(3|Vyq*;wfpCd*oQbBC_(HW3g#0qMA1!`Hu9fl6)ZLR*z$oR>C+vp zgrMOf3fIV1i>PImjb$fv!^!DnbSlN~L0`fw1B=1UQH%Hri%b2#Jb8{G(I(k5@m4xQ z#&sHTv-4iM8nbmAg3@(&-1IfsYQiv| zxdSdAJ}vrBlfhRR4!O%jBB&FzTA^CjcfNF(YBbI=Q6OtAizgiWMU%3zQGw=QPz$`g+wltP)A(R1Lcnyj0 zyyMfdQr>~#IXW3?+WGzw{HOpSC&5h$u!k&+0z}Ia2}mK;6SJmUUZXo05G1gQI$ZUDLi+cz_Ki8SW@mCzDFf`lIb_g(KcE%5l2<@-)=sK^~=F_QYC+_ zubzUMD~>aoMX3o$NNm)JrAloi+(m$Vwsofz3sGch3Kn?NJ^K`fcz~6*#MkBdN`xv} zENGlqKw4wMBvp{dz4wy?b-6dw(4%|F(H^*FdiigOCZ%0?S&~NIKEoBeOrWB}<2@W5~l1(3RKBD96{9&}S zRha3_$6QIl5O7?HkX0hQaCa)YlZd3#4=?JkF7h@nZPpW76@^WK)V+lzIA1F?0C$H< zr0hmIv1!chs4Hdv&a6G_jK2lF9TED|3neXsNqdA=;B)xd{I$;OKPCs`-+xr(4IL6DR zkJiuw4lL47+Z#!p0dM(vz9cZw^R56MT?(F}H0&-}X^WcNq&f#2ZXvekx-D`Vbvk;e zX(!7LA}zJ%-)EQi9248d6ho;-Hkbn|DN>ua&usbg2MkCA5alvV;pglt1k#Cu#1Xou9|wLg89u-4Vd}h4M-1gUs(7!Jfc#!4&*4q**s-BGj2Lkc zox>`w^BdYsb}c-W(j2p1sD9HlB&JkjUFjZ0(MN)SlJm=S&q_ayjJ5UONn-Q?ms(Jy z6mL1O8$a%;Q^DBk)oIv2IA^hvYDQYH*=Ik$!}=ng!O&Pn)iX$`aSt+QR5=cOvu>?- zMb!=nUr^)NYu}t+2nr)Hm|(l8}hEeh_SXMcPx~(#;S9<)d%; zMPjtMSq1r6)+!dr-7oBR>S*YLg7LCMDIXATjp2tY|9~HCxC- zH-?AE6OWQiWOtE{C*!k&Q;`*n`?ulCbkf5iGH|-neHWpz(UYuPwRT6ovgnM?BQ1Y6BRK|{^vz9y6~Y#+v4dB zCQH)Kqjq%mgU+qf2Imd%A4O+$29w&l_T6GwyWl)gh1)=Fj&VBcGGVQV1*zyCE0 zV~bk|`;6AmI+{Kh*jOr0@^6TafB1yQ78kI58TW5q;Upi56S@0?hg%fJyz{o)e3lE{ z320&!6=bg4)KBhuZX!>i_8FR-$(eY#KpmPIB~;RQe>nbdrbK;)Lv!e-_h8!idev2c z1Vp~L%#nAs~o7Aei+_bn@w6UttxKRY1Q(WZWG59RM(9}!AaAwBWZQv(p zu3$#+lKvwJ?LQl(255p@$2rcsB~BrQ=w%OX*mz|P`_W_(&3|!XiA>-5NX$wVnJ$*$ zUU@AwX1=T*nUi>G1ev4q$EFlw{mZyNKu28m`3Mub=t#jhVU3&XM`Y$)GlDm3w?dUq zogt{+hn9{Bt5Ax?05XBJ`ZDECk;Y?*oCeQas5fuUv$C1wT;q%@uVEb+p3v5%YM^uM z>1o{q0r%E?jbnft*?s4CgL9jfrY`LszvvQ@PZc=h*y{8%Z&w(-=F(=ASNbSgOEYjp z;^fFfv;mAlE9F0>V#TGbIq^tILvekME?E3}cb?0?KBCzy1cpV5I1!i>M5pndIM$UG zVAO?^Nq@QLZ!d}y@)7L5>^?%lk1V{C{$PRHMBy?b;USLdFS_nt`=v{IelE-r-noi% zeJ)L+8Z%5kS==P%pV)FYM*3LsBw0SDf9kul(n4Sia~)1diu3cnz-mMY zW?EsV>=TS{Dbgt*zf^HyG_YIM8|`o>^A}ynUte+EL**U5uY_98pU%0o{_AVC&g)-c zaqmS+>aX*1d4Dljo#djkKy2k>gUB}f57yS+AJvD)iGJx+${?ve>E>iN8*n{utT$eP z<&m4yoMM>f;g7i2w9>Uqx?#3<@OeI3KK}GZuh-R}Y2{eKJ6<8pwp)OdaF^XULROMQ zX^SM>Ty86$ZcJlt&?c@#eN3aN)fKB`vgTAs)aZ*8_AU9__6_95K$w}_X|IB(rfRdD zX)4!WOTWWQjqJp>*8mQXPzC0$%Hw&!Xnwj;F|yyE;cJN0wZ>^8jBZv^`dW44rAB_j zEgmV_42%ry2WdkrmgC)NkYC!rfgqV?a}G~7_!&w)C=`lQSm98frT3i7X8gOZU1WR* zPsW2HU*x~5qy0+a=-<48UAf)meMlEJu5kpv8`TG}kf{i&!4^;DD=ySS^#L(v8D8?N zq$NaQaVDp`b93{4`g8Am6%$)0lOhZp+Li_U@=%O@ZqvUE_Jbxsp8j&cBgx~b4h=fiWXf%@iT?$lVD3$?ezry+JGUin-pmrTG0Kbbsx3la8>vY#qMA{mJOso;W zEyR?H??D5cqY^V}-nEZJNge?Rx~->%oU`oDwZmy5r#k6guz(io5xofY&gN)a6NXcPOQ} zB6_BN34P8+@k~hYHWThm-MlQn_p2(1T1X};Du2?;huDjFcB&r8eN~M<%C0x1B6d!h z=FU1x?ou`9eI>$blJcE}G-16*(5&=illY-zo3Yk+f!y~?q5IuazNcL(t3d&&EVWPF znE=wK{KWdyCvp0(qYa&IxLIsKZ?A0Ug>Vx~dJpdDka-k$Yak1tERf_nkq?h1P)co8 zR!OCgUK4$&neqJF&+bEj)hp7}6#v6^6YJ=;jqgz9=otRcK0&n*$r~&fE(TxC51bC@ z)pCEMqESHwC#IO0K$n}X5OteL?AlvDnX&+Ntc9Bb8nD=;J|hN}F>%lVh!_|cX1Vyc zZk>jOZoEpw1M&2wsoD0SKNsLm!<7zm$T^QX^k;D+p1=%&TRMqxACD?0cb$CKMZW51 z_{Yw1BuU3ynSxIn1|*bN1%U*0Ug`P{M{q;B1#TB{OL>PqI9VKv(l(k;ww0eP11zS- zmRE0_B2xW4-MT-zhL+zw2mE67*dH~#uL8~0bR5r(qQWErl?^o$ciok>W9CnQ%TZM; zQv?-~S1}%5(UVxFzuSUvM)qHlr*M4UMx)YywV=>^*zTG z0n(Otx-i{UjdMqX)Kgh6oMN?fpR5`S%(e6h)pQHsMzzCsDReWrvqtmYIntY)zVAZ# zLpK`P0ym2cB`Q)&Yeb4s>U?6RY@$bsWaiqSIqFYT(A z+uN9C$42aMpi};?IGPfeY-78hSd4J>`+5T^z{%Hb3-P;9h)s=g9|@SwLOuZ87#&Ei zw6Wqno=|VGAEl-rSKyj+-MXsQPWLdVSN%8^pJ7zw4NjYNf-07YJhZjQuAaa~okY2{ zpKs(|rc%bE3FD=!A47cSkZ9kD+f&%=zp)is;R}!gosF%$p3=XCEC^X8U=mW{1M^mt zlxR;W%S4I?Q2{lE_&bh6A?;HCv0FYCL&BhFqy*tCf8x)IIGv_wOj(${^alw6xUD7l z+f4?<$@%_lE!lwoJ)pD#);4VZ)=b?AKeKa)_^K3Dk*6e8k!`h@Xm0ocKeanRlal0_ z>D5{>i_~CXvb7#7U7*BZbW}LSG)F7@b!b4vhBU6VD+?UVnrpYvdr$}NjgzXx!>`1& z8{bgeKe5zh-SvzCW)C#fgm#=q(|fo8H%S0+y%A6bZTNI2kfX*5g)OTfT_DhZr{U)< zSCXqr6cf4&pr_5U3Eawnoo(V{8yICP=%2wMSaUlwm)U1m8iPWO?1LN$O)#btxSmr) zje&kcX^-G%;F@1j-taBWRk?0fxsD@7#Ii7#_Sg3{?YjdNY#T}5!!yWEE{#h<4OUUBEUC)UVs{iSv0bTQ_u|_*ga!5)rg$!!m zNlUmPWPTqTEOoE^NQA$rxzu&Stg&iknE+Gun0>QIUc2XO-Sb%pJKDU2 zA3GYPts|}&k4A;4&&#iB%uos=1Qtt7DVT#HqKfSIhhkKVKD4)FgnenyzcCxBwn6I- z{28%w6Hj^9W27G_*t&l@je~#Y2XA5kMt+*iX9Ne)T-5E!sbMN}WfmanL3P8GN$;Z$ zF~2u`xOLY<1(36}-P(JUm`eT-g5Ez&Fp= zXW2){{gOwQvD(t3^e;8bGNS=*9o_j>RbC4IZ6h81313THVlA$Y-3Nr_D-B|UhDJEE zR3{0v31O=3jk!;I6Hm)VoMJH-=~34^R7g8DKb10dZOsY!YL2vju^Vianzr(x5}PPe z;T3vc!G>q?@J-Hj(-Ytwg*K*J&u5KFtSpR$UGc6aEBC0s$QqsvQw%gpn@ogc{UZID zd``*hSVZS>kZ8*5si+e++~T(qeX5?^^fy|W)ZPypvT8_d1^RcNc&QKY$H&SzQ85qK z^Iv*W?UgJ?#>&L7Cv_`}YqdQ+r+k`n!`vYg5HUB9cDXP9@6IAu`R~pGhHUgthY+0xOvkseZDEpRz!u+sfxgU=S5=*4Kk{bo!6c7VDOTh0UeON6 zTZn)M0n81EKi88L!Hq&q5=9n|<0vq;96yU-cB<@B!YUwzzw*a%tTdhw%fvEF7kvWa z_9=cBfx0W-=CRWB93OzSh{-_?JGZ=hrg1qd4^ z;i-^_9ZP-8d^zQ;m3_krMz#j3>jZ?_#?YF5T~^Ox@8m{!9D%DhQKMuh95?du1sRi4 zMXArAeajT! zkfI|Jmuwd!P@`S3_D<1>ld!ifGhyfy?EJljDrrk)FeXBjM00t|+tLvCiccw{XEo4K z;`Z}@mWtbdPOH6iENM#aADFE8mpT4Y;l&0z4f7XbZOa}xikrP-js7QsmB}^J;#8~t z9#TIPPNf-(lP{j`Ls5x!=&Mb;L8uP`iDjw>%_oNhGr3sO`fFW3%FQY zTA#{0uEGnlUF#YJ1_T~QbDSc2QwS!6zM z2Il+oY2Aft57~~I&^e6%xS0&=&mmu@0AT1|)h>lq_@d&Xv@mrsjO1k)tE)M%u^Az} zpv-FX!{%^87)eb!KoIQ#vTfe=F?0+$WdI7G^?}pt527ED`s{U&wU#;(EPtY$I*!sb5~$zU`qO zF#mfvAI_~k7m}4GTD{)9Rhwf}Rtlf^>Q3r^xnn{NznUi6dHGpwcGWDVPI<*~vp()a zdgjkn0BocaQ1y}| zF8z#wtH&@U5jI=_6(Yv$#9CKBQSST5&)DYj(wS8#`0}%@!+*CFGjx+^=%89&pjNjG z+AXjX5DC>JB=DG3!5erp<>XOxH1~fTKBb>2meY5g1q6{li*1ex; z&7{2wDT!|g=Tc43w%OA}o8f#$wUvgUnT;>r=g%6= zf3b0FCb2$%C5a@)Xdvr zbv~~#G~LS7+%vUEnxBeqRH;-nEG^C`M9wuoDa^ILH-6f96mOYVxvWako1jL|#L8Sc zy;$MD)Gqp4+P>83Wl+UE6?d+^zi`00vYvRY=K)IVl7A!j&bf!{-_-9r3$=exe7s9< zA^$z<^$PcDI8hS47WaC|a<3TJ%<$H>y9Y{6Vx?cGsJu?b(Nm@kaj01iBrviUqOOCy%PXs%$|6|G=oB_*v}j zUxzUSL znE!j+{=f~Z2X_9YSuEV5Z84!rtI4dZoPNNtDkK{xcMu-^lp0{6eKd8wNCs;64ZJeUi*t8NcaNLZOxMTayjFMr1+ z1PywC%za1J|Rn*{;i?ijyUvnvHL^VnT?Fb<0{XR9>v85;BiHB8`kB)EU>*ha*wzVjS0p zr`sPB4vEI)cN=>zoVgN7ZnZhpcyB|BG{ADieT?wuv$n>7C2z=HHHnC!&$K8ROAQP` z>$h>qsKmG?-EaoGXkX%hdcHzx1+@4MZYj|=irrlLHpB~En0{jz3cDoVF?Lhuf?A*) zV@kZZrF}scsdG*A5*KwiW0x*ZbM-RPXcgj5>h}e{{ml1p%Cbx#>fo^-i;tJuP@-fv zR8v^z56vA<(|%*?VY(?qZO9uKb3!LWo~LcnGN`>6Q8nKBZP+q&wZT}+f1=H|fqB}> zW4u0o&8_jG^nbee>wmgfyno}z(`>3(b|G()X=CvqFLrLyraA^V9xq8mjmOyt0AK6* z_55uc!2GGIt_Byc@F;za z!QA7!AE>FN`zQ11=F51u>1kYV7P&L_-~HV};Gb8hDQuw0^qyutKAgrlM^8+O__G33gVGcoWJ1jLtjfUtq}n()P21MIsd*&UE$?nCzYIdV%mDiz z^`-B^m)72TrgKx?i6@b+{{>N)<^Btz{9+SS^&iD5F$~cXJ&>l`x(n_C2_*c2rdxy| zG>7CUf3!ui5UA^%s5ZO8;lH&OHvllFe$PCT)^J?FPZIrC-Gy5s$JUKs|KtIh zW}?!osu@8w6cB5($)U`=k^<~tf?Xp*J;UML$x-*g0>u^<4;F8LiJXhcJov8deIUP_ zyr=R!7yL9lPH*a#fba1i;dv?IvTvJz>W-COU*%#x5gxv3NMVIluz~q1p-fj#jj2po zx0)(qQO#1E#{B55NfoLn8Rm*7TzU0D%o;r3;f4sa{>YM7 z>^KliP)tto*TlUM+PKG{^qH^1B4DYv*>0YYdjl2Ye3jU$bV<2kMsv1&fNo=l{H>_6Q7ne^yQUrQ+8A4D%*$LEkFH ziyh*Z#)wED0Zn+7WS=r9xjq(vjSWG{0N$A@MU0bG5Ps36zN)v)x{uvM8GSjQ4e zj)HxF)?c0erxJfaDZjEuC;U9ZQ%Wjzh(KyWR88Q{_IGcm2yt}zfhbYVchu&u))2!I z!mvudTGF|!XGWgwh)L%Dq9nj>jBiIV)-009?`;dBz3{pqGtk=KwrEcUKf71w_1-cW6GoWxqd(J+8s6XX$_bGgTC!;)ne&Ai^(Ch z3W@Kn+MF!-nr#g$yly}A-PL^6;l#6;cn@1EP4O3H^OCynDFe_j{K-`>P99|Cww^{} zMug}ZGc&%^|MjBXd}{m&=ZU9_C}{H69LtrA^tx63_9R(tZf;!ZPHMle>!Di6=c@c= z%)ToRWdz*2c2$(hB7W&(4J9U_B3?A(5zI^@Em>gHNx{rDXo`IHMJCBeDNF~{hrnOcF)&Vl7IfPq-n!@c`xS%!k^yvMTHqz^*fMayud#iX30#> z4|v78_79oxysTSNh|fuz^8i_J6bEdvWN+@b<9Tbpd_n0Z*!tvR8(eIr``=cU&qu$B zS?FK_K3Gx(zeLDinn-;>L-WsG+)ZC@XJPQaV~!V;S5O3hEV#`ePW%9{ernWfNsqW5 zsNX z2E?H50sU_nKF9{Tvh73m!@kz~RsF;PdR@P|^fTPwYIdbJawptO7O;)Vd>4cMoSxpL zHU8kDs~~prW1Kl}4a|$##QL|;r=Gp)B*%bovVjXjdx^VZ66cldyT9G&<=i@nxak!% zU`gjskw;fDRd|l(jeUECkr%qWTB{)u^`3szW&+yO1;SLAoljk`~w(pZNZV@6Iq}}t6 znkil;J~#0qroi%aK(Ql*0Rk7WH$~73B{o~s<3ZIXr=Jok4uy=6O z62&j&EverLM3*A0-||SJk*6~a_3H87f+X$cDr&x>-YuHbIM5GA^+%_;fNFQ8k^jk8a=^Y?BVm6F&>ME zhw5R1p{bplw>A*I!N=7b6Cxe$j+M~k6ikTZ+H8rWV}ryDC?ad765eF7wKFeZB@Et6 zBQwF_vd_e?uVOK54x&2;2k)qCT*rk>8Or>ui5mPsq$rJ&`;8IY*r7D`Bflty;h11b zw+y~!xE-@Wv-=~s1<|71W+=PA60d(GRT{I3IlgrAQeU8PPpeDJuJ|l%Gls6mlxI}S z+TPGjsH;k}>GPObD?G1&lGfeThHKhJsxzA=Bz(f$5bWg4zqD<*Ev);T*WbN@I^*>O z`!0O@9Fpr;-H59*-xm(gYaDDA+-b}O-E^EARCM%-7dG`SKL0LbZ~Ny1iu->wy#-s8 z|JU_BGjtE#B~sGeB~sFY(%s!%LxZ$5L#If0cS(cv&<)bv`0)E*_wyRQ=W!l;uf5i1 zRh7{`>ofJ9JRWV^`At8f=y^!^@;u}6Y1p)}>`_28IhWD9ltU$q!>5U!Rql`;=`yHi zm$jW~E5?tMLNzWMX<)msyA$E$@vQ4dO`l8AxsPjGcg(fpvwXg&w>G3ZyKgGyQvSEP zdGqx_>E#Kw06d4$)z3B>2QMt`4WD^v=0OjUY+$njesmwjFLKI`F zxLX24hZCmOML*(dV=v#wdn8OHVQ9kZo}&VKPGDq{h~aTayPI`>n2Gn_HP-Aa)qIAb z)?-Yp=C=2LjR;5jSwnO$AJ>DKym3#Yhv~tHKgR&xeq!7zcVB?WKzi%GYsUmbNkExi zg<%?MF1{f@KpvLny&*-R!W<(Fel{K;S4`*xM&K-1xa|3Y)elOGV&twa4{ zF+w4I*tw=40I8GuTOcJHFqn!!2{fauIQ9H8GF=##V`LY1!J^=G+l$ih(+pmCkSw=E z2Cc^vgE5MmmOa)V)sVi*-?E-ucC|%rLKyyFFpg)zGn$vqmqS$BnuN~@Rd;2{-RHE8d!e*#1?KTY#$3Q!U1wd{Ii*6iFTeEyw= z5u4V;-)UUl%3s*G`b_LVFuMZR-8UM-MOA)k%cn2;*%7EI6InAN&DihIc|)-&nA{BahqjS39ONPQZ`3Zh@vZkD}@IOVqu z>%18cF|p|xk&eRxEg-xcg0PM(M!ZOjSm(DCqiPH6u_S8lOiHK>d1J*rY$fB5ErWoa z@BDk%Oa?J>FOBv7Y=#Bs+c@at)aV`j_dt`1|1H!mM;DbH9zP%(MM?=CA5^#^3D#|R z8&u%3jHTU;321ZYwt@{H&Q7AcZhTY(P*{gd_^!k2ZPm!%^X*IHe^L)OV*m2my&|(t zHsYH$0hqH8v}3E()}E~fK8H3qT}&Vejra|K9r&gft2KlKy*HpQ;fuJYg;t%L6@%Ct zyoFEUp}u;W^3`&a78K8HnmCE@mNc{l1p~nEr8B|uJ8oDi&p_Cz6JV?3GD6pHwg<~< z7Pw&z5!2FZHzLOvpSq#u08oVCb=r1m#4j9(0o0=bJwKY3vq-c>uXwXougP!_3VWJP zJ`XgjBt@&XOBDc3%6Wf;ktE%rmnI~;zo>sa2P*)q3KAjy*ELNh+*OG^M0QWuG)Fu# zYFw~I!$v+qhLTLvrjL89#kL`6$=KWG3Xfoj_jfjvC)S$_R%7^PW^wMG9(Chkzs9H@ z&`%!ZU*Vpp9Qy365pOqjwVb7q4+KURjX`{*^D&3R=w00|N`|xV3riX)JD={ec^NE5 zeac(>L&E!yCVP+M}LMlMDXS1f46-EC+wxuFsM%>cif{WTFNq3Y-YU2m!mSE?$!os(nNK{2m3@I zJ9Lc#sjQ>D`f;YXfbBQ>EF(a}qYF`z3;7C@Y36tI8R5~ET)ATl>l$zy^l4>`!bl8u zV#~QyNg9bbKaL~%`-;rAUS!iOR*h`IYH}L zRG+G@rud&LkW(1p0@sVEq?le!LWFJaEaZ_`b?4e4xx-LkgNYkhI)a@|0OQSd(>EHb z5Wb21fyUQ7*!CT^y}4G7ewz!=uJ2fDNM2?IJ?;(Wz%{306}PObZ#j_E>gtmqiiJ)S z;h?1tWl=Sc3^SSGldO2;{WlYfDgW>J>ek!7{X?eRNIIscH_wYn2u7n)wny=j-wFXxR+@t1{F{l$|L9Tai8ZJNBK!(za1QZVowYzq!S^Xh^l`6N@^>x(e3$7d}`Z_Ei@D#(kut*1jEQc__g53GSn z+APcPh~U}v?q|~cocwoQoSCzw3J0#=XlT(v2w$v4G>wbVTl`u;+4zg(OQWdWDM(_@ z;_qe7VFHc#m&C4wK;`L5Ds%uh)<4_`Z+Va~%4M(8vQ2w!97t6-P&>uux0zqMcb|b5 zEZMxz?ws-beRytl*q*+YAg1wV-@Vr6+S$&D)x%4GufG4&kyr|!fs4A6*PzPAj_BZI z#LUWX1bnYzB}Fnoj+$*#@&Rf!8Hw{1a%~=^11@N1#^#d_r;n2PPiVwfOIL&;>*QqY zgln6{Dw{2F0j%3JmtUDoq*%ol!Po-ZncMl~T@XEinFa$9f|Z!CDE=aBSeouedpybF zF+GvHs+FZF-izJpt*Xk%&NEXYE2k}Fy#^XY=K6mx#`E8cDObp}-TDgQI7;8Ste-3H zjr`ipp8$OO*fWKPMg@=5Dld73VnETpd9>GQ-^cH1G;+~Pn7ProAjXXJz zB~9sm`u#`#0!rf;_Wn6PtS;mOI0;*(?QX351%!-n>-UB#GxT6wyk&l|kF$!+atbNh zNcMi8GepxqRJ4Dt+2a+xq64RfRh`geBXmDJOVNh$`+%{+ialnbN(x~bJQe`6jnra8g4UdjuReyA5#ccVMDbQnc?qw>zNqr5MBD! zZGDN&r&!dJc(<3F;W7WMlxW%!{vxSJU>wTw8H4jWq2V>CLIZm2vU1V!e0g+vbG6@0 zZ4wxg`a^&Y?_dX*9wavu!5NBU=owhG=fd4+|NRQmHa#2TroK(jx%6FEWxx}|Ph`8+ zh({#7nZ1B=llD=jd)PD8;NQStYwj@awz4u9h9zXGkw{n`taRaY+{-fVC2S6xBqKGt zI+XVw2xRq^LV5B)5<@R+r&?*hRB0na`?S=gW*g*XCBdn?3$;qv9LZV!Q)zno?tgBY zNZAc#c<>WVhL~wk=<3w-snoUGVcWye)#>CKPod5HiH+KD)|Er5|5?c+99!Tls$Ele z^AkM%FOtkld$e?y#I7tpd3~1ky@ltyO^3E)-jg<7HTxoySM8?BJS!W%4e~pk+5}A< zbe4JTCO#GP*mo=s!;ylymh4(N(+a<6V`Jnxagg30m@A$K5!jdV0brX(aIr6;;qE`R z;M$R0{Km`WS%`T=^oxefY4R$oMs~SdR_iO)XyXpwRahaa_UNS=0=lw^(2mT*4!3{VW`(O!289^*i*>mJYy7ScMWxemn#|)`GHZvdW3lwX9 zCb-i`!f-sOU7nKOYcpbL&8I2GXMkeM?#3Omn!@V)CZ5JV>0j|tlBU4id8*6aO0+a% zr@?U?F^iPs#-F!8%9HBFyz|A)i{a3xPO%;9A01Yx3k|n)q>Pmhpxk(3lrKXg&`kSv z4a}DW*itG}-8&T2Ly|@N{mBbVA(as+iXM$E36ji}Ne1nmhej> zG$Vt1%C=!AXn~w0#vLlm+;QOJ#Pzd4h|nthH!n@)Tg7Xgm8)`gu2#7w%G&jLXM0)Q zmUgbbWSnK}TQrW_N%IwMt&P%6cRe~6(sg!SHDC4EqZ$p|%C1xMWnyU!RK1EfE`GgV z8>W}dZ*BV*$D&`9!p2uZ2uvKj>v2VGw*ybA>x`kcM-EehMI%1sPA}t6?o#pVrQ3S- z#?Nbl_GI)N;+@mr;SZM%wTs3{%kwigAE|HOKEvfs*vip+gPAc8z1E9`kgV)jjo<~- z<(MsxQ)QYJ8QYWgwu*lfyGFav&&Q2zw-T>l`;khmt_qPOg||NQi&aVpfR5$WJ!+^9 z{Yvtf|JI;>h;nalNXsQW6yk^$jUqhtqb5*xgP$ya7cNOxf5R+@#%f=!n=@kuTe}PH zmS7W_KQ@^c2nopwl0QQ6Q8g%1s}Q;@DkVg1_)R@(I91N8E_#HwYVWzW5J_8m`rGj; zjYT2t?Pj09@nJ)CrRC6Jgy|aXk3R5K15RlZx=qQK6jPr_Io*`mc5E)FOgzKXLpEMO z5Butw?lzy%@{uVJyXh!kggWPi^llpBJMs)^Jiw}C7BJfJF$8DAH(&}d4c6xu})tb)2dreskb+XGw!k2WxU2fHdO}MWJ)6! zM~TkQm(98qaM2w9$ZM8dRxIkWiZQ=ojN?z79jlq0N)UqV1scrFa-7+l&{~pwd^Li; zF6VdDT}l++7tDXnECIumv=q|mVBCHX)8KTF;nvGY5M8NR;Z4r@qb9GpPPlxgx9dX?ZQN$&k>f!UP~u<3{nssg zP>(|A#&fwi>Fy-$2;~(8xOGIjIN*-cTnC>Sc)vM;a<*!ao!ge(;F<54wEAGIuMq%9 zE&k4nLjMb(A;C<40WaSzsiI)XlgFl_!sU)hG*6qtJIEy3Ig>pw!6^)?mK&jo54R5j zar{I%-u$I-;>gCEtEyH{xIujei2zVd4NgcjZiK?00gw(LgG zPEd&kdwCIlk_90`NiIrTsK-O z^)K!R;F8T}Y%9jn4^89A#@Mn=Si>j(PW*q9c|6)IvHmZ%otPy4Mu_^cI?wT9_qjW= zA&9Rk5V-zT0)j<D3Qcybzo+(BYZ4&7h1TI%(O$JFhe_@csm54zp_YE0!!%b^sC z2FrBd69iS|(TX6cSjwXzlU{xB?PhW^h{FQJSPZd^f`q1nhb1gARztc^&WHTpTvEMDH~Su{F38rKF&Sb z|1gzRN%v(1m@fggs^>5(*q@o5F3iRHOKm|WGZBRGgGchV_2lBLRF9TC2>sdT3s*d1 zlR(|gnQjb zfx7|}6=W&N$x1oYuuSGCI2f1SB<^o~(l_Q`fji63YFz3`kAj0(*OK*_hh#}BS1Sb! z_DZt1hfuiP?#0FLQolrA$Lrk-n-?z9+jk#L(?4=hgo8*}_-?OUjtl$hL0qV)s%6O+ zsxOKI8be`T$1XuyiPN6#lR1L^l}vT>)%vN61OGdk2Fw5F2=^p(MSmuu?M@=by%F+q ziH#w46|(R#zV;W2`w5(HE zW#0{=V*Wn+U7T2EPSVAzs@HSX(C)=>;^pB58tBjnAc@C;WgA8W4f~ba`08{I$6NEbq6ehV*uiF&5x!waOol$MBr&j^pKg7kDp%GO z6CogNo$iwczR?s_93c}7 zT1NG0p#WS0_dGa&>Y~LSVvB5vIh09rL))BU#>&e*zs3O5yl|FLjT&{41=%z%s0}w4jUtkzQi)-*U z>B8dgw`s}FCCDh2?mY(qIph+4{4ocy5SqSMkW|dpZgg$=0q1rAV7&t+u_8oe2N>L@ ztT&>^0(OT=raKLiy%KNq2#5b~Gs!80zqdO6_`ju+pY{}l9H^}j7L@FPK<`2S4W;2D zbWai2W+UZi5Sj2S?2-f=BWEzqo+z3tDNe8 z0=T%l-|D8@pV$ALQ;v>S>BB|e;(%SI0_h_$&gFe0#Z0ggh%RAX!iev?WdPWG8z}5> z^Z=5NUzu}2Yyq4As*9TGL7xs1*`-f|c8N{A^Rob|&Uyy`ZLW;fPmE(Y-(s)^+HuVo z{qIe(jrk#sQ$|={QyrS8^iu@mux5ABw>C#X%EmJH9Pyvd)s>t$Upj{~iE(8S7g1H< zxqDs3-))*>Dw-;?^g1-+byAA|ybW_mZvyf9^= zv)y@Jof9&b`%f%BAYWzVoFrfQDJb7=5<)joqFDJVtbXTP^%4JCEE)b=6*P?TX);VI~yM?(XX05}ABFryn zJ!6_EW^E$!4r12T2E*(M)su3BJh8U{GAk5@GnDye`W(`LT~AY3s}RNL`N3VOV7*{e~&(s zp|-OQ&21r{CQK7(MplK2^>7>a_8yI{*Dy2^gYA1?UZv7o-Thg z5p(S6#JQFb40%LZ>-2cwHL{UNw^#H0OGin+2orDeVZ}qJ|L%K8^vNW1kA?A!T}zyw zqmhLKbfrvVQWdoI+tA?uX%F-$p2*Wah@W{w7rK}{@?BkG68?N%D<)Ouo8NFq!yS7Z zip}KMT+!TGMQL-1Cp#Y0lc5bzaoD&Evpl>nShs$ie_E)kKfaVM5Gg-I`pYHREcYFsEvxBvy6X{I^#`wDiq zzFuu@k>;L2CJgvS3L@wuremMw%Vw*mZ?97f zU1kobc^ni4;SoxL^s#0DE|TF)xvJ1_z-aA-^Y_3L{M^#6Oe#2Dhde!9R!^xoq!_i; zicJqM-ar(X(t8_#CvLT~_%J&0Eg)?=JKo{8U`4x0<7kWV@!fHN#bou@ApK_CPDGhSb8ojpzWupHrYyc_0J`#VWa<)^&lGp% z$H1R>n)rQBD&oy9FL?HEb@t2L_MXuXW7yfU6P7bekVG&s>C5m>boy&C2P|)pm7v`q zG?*i|QTH;P(gfo&{~A+2pS+X#$>n&a;qJsmdn@E3>TJVLfpLVGEIVd&mrQK>7I+!C z{;rh`3E`y4Z~Tt#IEn^N_4bFJ;guI>>h@UKC(8zpSw@U-yoVT>L3Tv}MV8t3CYT_G z1=o*ut{LQvLc`4}7jSxyabi6Mu3Vq&B!(<3d|$5NmNZ&SO>!Hv^PGl zE#I%m)x8L*vP903%r7u$(S$A%V4Xae>7B!%WwVtm@{MCH!(Ao9h{`PzE)aW%9H*FUVxd@HeR#anr_TV}dZ9-l1_K~EqXEE7Y{mZprkHK3vh`0?M^D_qJ<+8#CXUy$_i zEd-3x4smQdxB()j$c%Z&leeorKLotKIE;!`mC8hgTOLf!8~u|rSJ0R~p5~9Ua+i4p zpJX=nXM^5~t#%a7(bG_>rF4K2P5`)v*;!)#ZI;RlG*CS9lR|^@)FIrv*_6kT^>KON zd_!c6#zZYEKs9WQK39d?YGEXqS1|TD27Hrl0sNIyFi!sW?i=L@sP6z92JA*}v*OPQ zeffFY^QEud1fhCc>KUD~#lkW$HBBK}AbRtMsLnaeA3+5d-R75J*RR|Yn@5RNV35%l zDw13U;+lNTB|7eoMwT%u_yJau+iBBZMRYmkOz#?Eyp1$5UUVG;5UC24#CI$~acP9i z_fmS*(>6{D&`B%G#>RqIINL4`8An^{A$*w1kv`yD1H#MfJqpg&D`)sk7Nq{U%`Pg6 zSdoBC5^p1}$A)Xd(gnjhEI5q3cU%*~H{=Rb=^QE+B!oN?WxMeg1oN^23h1{u zNzPJBCNW2bD8-nE0-71v0)`f|DY_ddl;O`}k$E0_EU)D_S=VDrq)*3rUT~oVjJo7% z&tDm+iH7pdHCu>el=|~|S2(@%589k2lZpJS1AL54CfHJHGyQ<84bfiFhT1ROWcSE9 z7d3rb*}B-u*@hFs%Mw+T_PUB6E+zbuYQA1fUd)H!DcU*2*y%x?Q#x5c9pA`NpRK+R zKytuPTu#c#yIplp*ZakjeE)6P+O|VqzHENs-O}5be|&qC{HPsb&*;^pyJTUJCD)$%2#_fH*;W+Se=GXFTntzFbJf_s zQ|o*^KY7mI|77?onhiMLMpgOVhwxKF8N9E?9ltM*UEf>w!0wC6$Hg!rf5HS5hx`J< zHmR$+Zot&8*n1>w@xzU(O~)S>;TbsUh%T<^g%?Tdhi zE|>T0fBWQvok?8ztcc-SHkM4L_lZ964H{GkFDU*j`MIm?NrnnYC)08shnqKxTm=R8>RfB6fi{WO@5=zzee61(WSO3cPRDio;wBH=2}e1Sa)! z_lM*nkUmKT)boCoDU!*&wqsR_bM6&5!_)23z;Uan?I=#M;PogjQ^-TVb52aO3^V3N z0!9$5r_eq2shZ}S3%cTIGSk?!z8LR&K|0edqFRfFWL|E$X{-^2bOxH`O`>*PbEx+s zA{)CzI?9M!O-X4C@iQeFK3=~k#bLosAFkqgrg^3LitPF(Ky2?Ag~4^Xa_uw=?cTL3c9LD2t8K;fZuzh!DvX=8NRO zG+p#RUBmCcr2Be!Qk$SZ4i4Cw-QyI#n1xH&M_ima z`kE?MAMz;&KF%2_XBx4GI+}=BVzHj|!`YxtB_kTozOR^zcurD}*2f00@>JYo%~uSl z*%37tWaKgHHQbU0$&;^{;l5%(cn4jjeb$bwDxRK9bGoa)8*XYDDf(VpD;=N~Y+%$( z!S_?-9_KiaxfHuIVz?3Y9++OwWBX$+ALNJuBuJJh?1^#jvlN&y2?<-uP!Na4HLl8! zu^~b}9;2{B`G=Ud3z>)ZEkl5t;mpFZ8`#epbe6hvnYFUW6?MK^cK<*z7ZSuLE}#`` zI#e><8vf8HuKLonFX$!vW&!6!hG8jBUuNaHrkiq|*~M`B@CudJZ4xsZH_vhMAs#*2 zmV7OIT^}o{P5;!VIu(ccM!Zx(93x8m+59h}038AMsbXQIVm;1j1CC$mteg7?ykle7 z{qV(KeyCzHP?YQUEf&tjT>Ncz-mCuXkI|XL?ld z%4HI>LDsSjsd(JPze~#YIZt4yVgaUzwr7=`bQ*o4fsk^Yb&34fdbXpwym_ILEDOol zC-OWqsW$I>ODsnb))KbKk;CPvO8{jbSqP$iFCzX%S=ON}UGo<-PT3Bp?=`?7h^Y~j zwI?M*=(o5f0u(X#W;d4q@?MUITO*zXqG`}8uN#iwXfIx~@v|vT8XLlI(YHdjJ}6(` zTYyA{6H^!4L(O~w%p4+n1MGIGKb4;FL9t{U>t;m>*@*lIFt<@)XV|yv&F@LGse?@> zynQglg$KT6aK#NR65eSYpqno){3%?)LuvCy%Oe*V{IrN2zA`sEQxfD=^?SfSAISLQ zo(Xan#>?%;u9MdhmSZ8KJi7kx?%#Klj<-ghoaZoXmmspKZnz91VsKP((ztW0x{!#( zC`R?1rAasRI|pp_!ZSz!A>8TBo1WI26K)I$@=5ElEIVobOEW6aoetMDT^^uc-%t5sNPbeGdNThB&5>FhpGP~95SxP z-YS0tambvEDRH>*CTj5J4?9Y!9oH`@WF#c`zJuG|$|$16Ct;%7iuuO>&dV+;40}WH zugMH?J;1tC$pYuwc9~U(8mpQ|v)YkKncy|DUVI#*WW*WJmJTo}T8wlk0uWNF_iJTC z3pK7jD4ZC268~~ZfU*XL-Jvwj78B>>pYMaJ%LKyiGt3cF_&R`?oqAXmtc6>OqH~NG ziG(IfU?2Eu-~Rbu7K?}l0&Z}y4IpoA)j7{I0eI5Nr$ZhPxyAwc3darIbc0;}O5FXR2Va zj{~5WO8rmx{ULrC*26WR*E)1Hg0D_V8(P^>E_IU6yD>GTAkldpRZfgZGLrr3qzA6ZcCizW6HP{}k?0JwY#@o{v z-d_+58IJ}oBN#9PY={1??1r+=uzO6!@t~Cn)U%dcBkT!Axro2tgExWXLj~?0+p_>* zABO1Ljs0L;g?E5A2pFxl!`O-d5~4qW<(pb=TkC^4KIF?@!=U^#I22&Q!_&&5l&r>u z>2m-3j&7&fseL+0@j6VYd_v)(?PJ@+0%k0!qIfSwns=|#;&&oS&>wbr(6ct{IV<<| z@JaJcq2%FTtmOfO=*wZzbDwq;mWd!)UnAb(v*yG%HQ>MC(ssP~Q|k3l&-9byro(qJ zSC4zU$n{#fb@EyVCzBo? zVg~%Z7C^-!)mH58FyW2(~*JMt`Z8&(yOymnF;b{5N^|)88Op`2_iuls<&Qk_-I=0q}xeh2{`z zAgQyq8O5>PLs*$n|A{c(CTP1f{1J=7s7bVcMIz=eLc68+#cjw$3k1s(?{#UP;-#Xj zg~Lgbdu^Y~bu=2cp%RUIcC#{e#ToD4bLS$SR}6IAk9+!n1a^mQArZxk^5<7xWLcL- zu&0nj%n@_SMW07ZPx;_#!6*wsHiNNhdKVAKJg>S-NC0NX7&68zf@mxZrSGnuZpinh zbz;)&B*zY!$4;JfBS>7pIRLjPKswOri3jM&2%HZfvGo?_;BP~6Hpnbzqv%l_(F9f_tMS8QMFC3S{gf1A;6x_c+A z>%nVqN!coLx8+OkawXJ^ue03vx*4thX-;-@)E)Ni!iM5ro5$=-qf!bLgc0d8zxrC^ zcD&lv1iRE+cSy71NN?hqOopfVMb+$A!u+$~n_5+}`Iwo9)#u+VT>1+O$=M48HnPJc zEEel_rVi?R^07bYzPK4j9k=03v`XsXhe9owLfpsgFq8_?|AyF*yid18qLK-$fY?Q9 ziCgC-QL6MWv@T9Rwyx!-hO@`1NJhIWZSbJl)j&h|@zDvjO(0h)itiK`;TXMu;vSMsElZ(|1*c?G_?ZfFgtPBEG;eeXYBXbk||~ItJ7y zgv<~>GYcq~JcHLL3Nn1d+UiTvhz_!PFl-@Sijuy1arB&!clrzyEnWj#8~6|UlPZG~ zeW@oQ7~v+}E@W&N5!mA1Ur2i4!7c;7MnxS6&N`(x1$HkY!tV`dsXUC#a~sn*H(7v& zZ2&+UapgvrriZnBLqAXy7*6T<21@^VOu5xz!h8;gY=fAqg~+MFKQfogW`v4LFKJTs z%_%ZWOo0#_8=l=P3tpVy7Z^vFBD%Bf^W$BlPTA!!e3&Daxj5`2N+j|nJma8{Zq9<{ z7@c;9wZs3kw6v=gcIGNP;Rb-}BbOm|)6}n<=^w}^K}W<;PI41mQ%dRZh_*@*$YC(8LF>Ff?0TRbZ1UsjG9O?G#rOa+f; zSiAD*GZ{s$)l1L;J{sXdMiT1Ub7B|iLzY4;V^Ah1UEQ4L_pq^ToRe6gOA^W$EbqC> zX2pWnSgY8bJC6d1RKlT_hC-C@dpNN+C#!SkZ6m6&f6BUO_ZA}PKeBgcEf_Xh8JG#X z>>6y7AG>hyHXJcK64m5*+D zwEnu>_T22A>c|xR*lm2Mu<~g6wr0KNmNOUOqwjIJ+Hl{bdg^Dj(!Wj6K@{vOmPuJj zddG+Tkg{D@vfkBYCc`8BKRlOb3H)EeaR=eMo2Ye2zxIlk0-!KukunC*2-HnCmnhl9 zN)y0UKu>>d2Qvkq7%YkljWPi@4&98Xg;%wJ+9DE1j^atcEb%VGNrZkZsr&d@!Ma1~GlAn+g zYwz$pbRnaoB9K$$S46T6K$j6NI#ssO9!K~BF$Fn=@TZxV1Bo=XYwjBUc{nG=xN4~) zhT1&4-|l$=aJ-e8)GCzEfC!)d&|5vSzLM~7f)cg6f7*=^3}EP0BnwucLt4?${p`^2 zl(*@vkD6W`Js?aDDZ|cBxXjAHIGFUr{U%VjpQq+MvYo&e@zKA5^wyKO{ERiv^Z=at z$f0%vakR1FEo5S21EI~rW;kJoXagtin_(-+l%%if*=C#Pi0WKk_dvZdf~LNa^yUT; zyL+t$K*QRCTsaUCN^!BoaB0n%8yUQfp%6Xh>C+uZXTJrN$7T149Uj@0zYl;+n!-9h zmHEUz^zRCEW^-T@TRDjAK+@iva<67uSyX>a)K|U7(@s*Owr8AQ&8jl}zV|EFK<8hH zbzIna>}1}dDoi&7^>qbh!IxI_@6Eq^5LskY@NHB0vp&A8UFz~CIYS-T` zkC3H-kcBfEzy$z81R*lzH@`F=3`>Ct@}xlOTemT^-9UtNokF^)C4*KVsy|lUgdU(D zw1!TVUVs#A8OX*oQuBuQ2wj&XcLQV!LHm-rYJ=y6Xn@IQnV#>QxXCiv$%EI_Wmz;T zB3ML!E?|w#p|h0%X^1{e4xKVP=i0bO9i z8W`d~+P@f98ixnel5Ga#-!7?$1T>b>HNTF=2!xR0JXuyd!5|oHIyVGtz&gusOTQb% zQ zbOG6=J^s2%k^Zks_3igN^~W7R!```5wvg$MP36nkt=Vo~DGa>h)dmcZK0Cn=?o>BF5r0nV3MU)6I z^fgaPZs;tthsiwa|E$Jma{t9Lvf)?1Ua2KQxUzFlZ9e=PB+gWCF;FhVCU(MK9NZJA zZ^@89_{>zVV11F6q}KG|#P^nj0$hl6@Oj++4`J|tG-qXoIDa~c#o-7!`zQG?B{t%j z7aZesey2q_l&sj+a-0sKs}{IT+wjh>xW;9QWf?pTb{U~$xke)Q8%A29{mEo>XdmdG zEQRmrEzuj~5Ul$1$SMiq2gL0JtF}^`<8}1gySYuj-AaVT4Ni^>FV(g`KHDt`kte~e z)Qa@_zV9a3z!PC|X1V9u`}zt7xA>C3I$}-HnW*R$12&fa@gE=!_k*{HwUw_(wK8+y3IDr) zNsCf!Rzv}NuXTGyIAbv{Hv?=a3`uuD`l(zU?d!+H0n}9HaJnD*_e`olR%}M;&5W(7 z&|+mznfQI?atn)o_kLH(>*7J~;bHx`oUAsN_{~L_Alhj<&5kkiqs|#(HE_Nqmmcs2 zF}ryB@`_9(dYjZbC&~Pw8aS;Bf0c;9se8fOeHR}qKO_&2a2=bQIiKfdqboH2o}+Th z0cktQ!#6HpCVB(c75d}`d_pxmO8qRtUSFSnEB>SYP$rwX_Sju9>s=bphnvCt9u$w7 zKQYa#VhDMBF~h_5*LQKzw-|R3qASB zNxj8CDOJK6SS92;|6th}=Q7Z%@eFaa4c4E8WT`Rl82?BHcDuqf9T7eThAbk%Pwkir z0rBrf1*&dJO^UGD5`7ju?RfIOB(I4`wtrho%5$5AQkFRDygxrsJ425nwqB(m6aF9q zDL$KL1K4JGHJ93kmdfO)-FYvHT}zdMiE{l;;67@MHUZvI;K_5mG7;Zu^^&h?mbHFuiFD|}9wsCMiVTE8_h|iW`;qR2=B1#}%la^-L@*4U z(vQ1yl=xc9?i!FAE4He91cc`wk--IsGhAL-+RvEVe%G01&DmeM)BNPAIRs9AC33(G zBevh{^nGd6rKlPS@8V}x_6JV+K^g;;69WsJogRwE)_hgggonD$&r&SUriBeCY9j=; z`UK&Hw$NbRh9Poe!qwy7e8l#}aZi%?{zU&>yPBNu?hFlkIF_-wF%hd5_<1)zhl!Y5 zOfGg=i1jkjO7)$O`?u%dqB_l46F0Sg5th|aE1XR%CEu0X|9&u(ss9P_+kfL?p_Ezh zfBU~COt_)XN2UsoRT%5@6ooCCVe!hI;rp+qNoTo!Mav-WPJ*#)sHRba?Y^p3xIV;7 zI3R$;flH`_8eHuN%zzvYfh*~No{M&hn|44#=mIJbmPM!cU^~9m<8H?#hpl}m9L_4J zln8f?6m@|FkQR_23Rh^Rw#CY6fD=un!KwnR1nv-IuFUtDe4-SJ1H!gMa|6Z(D@1tV zPJt^MZJeA|~*z|NWnP0?GH&F%m9`3$;H>Wwe$Oy(X>sc0^ zX54mI@>?wf5gzJCg>UdYdB=PyoS((JJ0bK7d_fz}J0O|h?^7g5mif&2Ke8#c2N1

    =+V*;p5j-OIs)iHKaiP%Caoe70}m3Z6I_0V?n6pI73hT)EUQEwoR zIssLHhgZtBS#^0X;O89eG=0sT{g!wU(PlAv$J6`dhoqkukV}h%v2<A}>U$NBCZQ z02qGtY=aj&R5*A6bhMCu>y=*FPeUo37;WJaU=1L~KkIVJnZAubGzN*>T~IKqMH7b| z7npV67lttUx-%h};fY+CPN}wEH28c_UKyum$-aw0+$&8`UcA9l8yKM7yhS>B~WB81k)3?N>!@eoEz9lqYknyeMz+~Suc#6m$LN}`u z!>P*S;o$Paa{?`DosQvP&*0}|!P#Aq91J7H%v$$R+&S5+`WeY}uYkb}P9&?7b2n0d zHeJs*xfsmY30WT}LxIKO%t9uWGSLyx7CfrB0rE4!_tDUo$TGYw9fXq)MKcd&}p4q=Yl{mstR$z-^2PSG54sOhVrbpkxBzEI2II4oD|R z5a6FR&k|@aTt=;65uM&!w{IXVe!=D->&*+KmmTu%K*s@Qz9bSMCZG*-ROlFQ0OEF@ zuOYqO2$G=EjvPsC}5PJa)<_5o!GdeI2uBFDY-+UFGN}vn)1SVd34z8j@H* ztZ~#QUgMMUr(Zp6ZUeN(=|_7o(9Jd6Qg;pM=p&@ssxgeg1DnS>c%jKPp{onfZX*T1 zj2XFO^b!Q=z7UNIZnRzjps9T70G#Zv?yeVUF!$;nr%^fk(qyv;XaqZ+X92YGSLty5 zI$*o#(uX|rr(f%vFoCcO@dS{zmnQC(m$8XV=m$$K2t-F1D;dkszk(!riH%xj@w0Aj zPrZiJ{}AbAM=3Q}E1S32U*T{4vqZ`7j=^4AdVYKga*^QZx|MJkCxb7D51{akuX)R>h2KF&sR2{gs zU5gHQ8@~i6PUS&vhW?Z>*|(4}>Ne(hQ4jq*?Bn=ZM!p8n_YP1y@&f@JAU^hm`Ff(n z+7|_15I5JcxWZ!3O%_+~ZnI!^f=>YZ@X4JRM+rlQ()WHn3&$#-2g|Y#T(}DkybTBZ zo_yhc&>T29m^g^E2a|i5a*$+*lu)TSj>-fBt20^G4GuhI5O4G7PAY9=252XQ!Sp4s zfx(~rof5ykJDEqXejR|)?#^xu*?0KtT!5ePIoe5UWch7)x8?))=0QmQjnSW1f*kz-AGl(X>u*p81HV>n!I1g@+sh7PrH_f@!v=8yn3&kl+BxvCiKINT7d> zl=%7lVzc#l2hhBe#duF%y#lOn1UM^z)Anzq-~iWsHv3pXH(5Q~8&@u;KV*kDSqklP zLkXgQH0II(YB8NgpVexF?QWX{XabszFHe=G&j8-F%{&Dd6%5-3d2x|>^0@p%yPP#A zez{Ed(T|HrEd#LWC!9fV`L<*Ji+r(I#=O^6+F%t6AB*s2mJBBLmBv!XeZUrddyDed zZ3nW{WLz$4;r53VSohL>~0xuKafs zB)pBJPO%d(5F)}oBS&@Oh-G5%*GXDvX08Fw`Si$AVm?v=rdydo5sm%7VSm+34f)!eh3GzS`VGS2=cg>D!Zl%0*NsAgp=GDtlEb z=8<||X2IX?7K`SPM9*LXZW>^?0igN#_n$ZO()P3yb7Rv;Om_t*lqt~mqqPg2!WV%B zOuB-^)kSo(Nav2|f2udgBkQYb!Y+&QlmrXN1*p-#C9K!NgQSx;wImm(P2hC!WWNe_+Wb~(N=xLT?A1qK-v#L28d4rfTz*h z)(*6FQlDL^@D$%31@@|nC^0`y#m%biL?OLg`)j)pwY{qM@n~;qQ0dpwT0+y)%XJ&Q zTLcVqFDX^bbX=`2jd$}I0fwdcv zCefvx3);|U!ZHM*+blY?&C7gC4*gl2tW!%uJryZ4Eb@6H;RJP=LZ^6|H4y?`w{(^{ z86Q-ipd5+FR|ov#2XzYDnJDXQ*SwEP&r4L3R4pm0U6$HUT&3dTe8GY5 z2M73yS)aJm73xi#r^DWdkCa?IZtsJ$XXM#sI-Kub_u?xVM&831ioPSVF@{YJ2JPdr zjb~{wTdf>r5sc}bsM<*-qcxcefXB5>K&aOPBR`(TixXA7D`#o-7|hchwRSTh`Z*hF zMVbCPxwLIjO`^2a*i9?sL;uKey*}3(!)kw|7YYfp)>4 z7A6EZ0!BYPTt$EN`pV^I`|eKj=&Q%gqesu04Ji@QcwBz&1QA^awMz^%a@|3yN2Zm8lbs?F731J&2}>p04(*KfQ~I_V@Vs2 zht?xd#n}P;00I}$nbwwXcH*O^s&Hr#Xuyx3StzCfD78^fI9nkk!%r%E(RT}J%>z%#kCAOE>`RcN*`8jgvz;1a} z%kDO{MW!zrn1ABLGg0s|LVi#EBz{d~G}Bp&GL9VaH49lcFH$qjrR7C2M{j;+cAmaB)@*I;@jL*ECuez)gaa*~ z_zX_bZ}XU1-EWJtLL;4FXYp=i z!i#_(CHl+w>&O?Cklioq1SJJ$qRuqq3cCkyu-{ zpzRo13swB~75`?%GIg1FNWwyalN;2KZzSpMq+0+n!1#!5gqe%RkJ#LynFGLZ1K@FC z(iDdF74VI@zb@xPK*$ud3RfmrC!{n^D;+K%Qo>_FwX7a5^}(fH(psk5;#qz}e;ZAo zT=w1TQ-N=ha+zJ3v`rgH9KcS%>SQ|wkQ>)>1JXw6H~=6ik=I313}}mft6&P+hdJ9u z327FHp%<+4v-c6Yv}!I`mJ_%j5aYegC8T`IAAH#SjN?ysH=BR@^!Ls09zMl3FA`4d z<4$tufUnRKxCb;r_mxF-uRed+eD;V9H(11X73&l~{qV!)D(2fdj{s9xk5Edh6n0j+ z>G%75R$fFJtUX>OwMVlq6g*&41fCbbqC3@Dn$quCXkXvo0H{nhAKbpxTt_0k4;ZqU z?GIb*3bu{OH%*;IFB_gz-DY`^MOe_QR8dP2v#4A|EdsV>LmAMh>Fi(f)Xhl(wDgBw zCHr>Jo>af>3gjKmbFNLDXHic)$SE%#0&F*HaQ8BGmnV72m{0|Q=LY;AV6P41_0BFE zsdQLWh_)$dR4+Ne%-$5B(=16^x+K?e7Md}{P@hfcJmFdj+js^h5%+DxdhXW1szNu5 z|5h$9VuJ5Jv>!IV{^h4lXS>y7C-JQtcbfY*e}r!OYV#Ns7dL4s#6d@Bs%5q1$lUWd z(&Z(L`sKw{9UAhN7dtzC%^1wIt=*vKkh|&WIZUIG6}V%Kl{nSGEm95#`|#Mlz=VKA?TA712rU#?a;DVDDM{nZ!|aJ)S^fgRNH3ODL&4 z_a?iNIf`ky)v2VV#t$#Dvm%V%UPR={KTYwD%;R6YoPDAI( zA6Zms_gdO2-R7M80Mr5`1RzB^L>O77DlM-CrBW9lOrW2s<+=MwOag+ZFmE;Qf+Zx0 zm(ZDAT$lq4VL8BXK1(a;oH|0v*otnP$U+0Zf@7tIN>O|CR$ApiO15;4LYMI%z0ioC z`4<4yh8vHvj8|vzdLId9(kp+HlKMwffKe7TrSfD#krkL3EA3+5{FVPUf13h-7l?V~ zr{A)8EHRTu;T=F-{sA(wCA8p=`Y+7QA@yBFHQ{okfBQ`GyGXw57vh~PcB4LWB0AYz zYjbxU(D(VnC(W0SzsjPlc^2|*;P=_@zwB>f044ZTrtiY6Lq0o4{R344Ul^2_ z7MLkeRZ2UKdd9WpgFCll+H4=4=ME|tYb@T|LPbJ8grBx2ymHJEtazRU1TUhudx>KX zsjx2yVGiM?DJ_hHLMxw2Ma{}~;{K-qb zU^WMiJ?bqMOK#B+FVV^0K>c7H3m;q9Qts>kwg95zh$#=TJfSrZ>tY!esI*iNZyV;` z>jY9@!b=>(bC^!ohLxKVw|LVpxYhY(%jbESF9*w)RpRo26#+B`PB5$u@_)$O#~ku!amXg2w!L?thjjD9@Fll-a$`^WlBrV>#0o?7PrC z&DP$YJvzKB{KG%|l0E=m+q^BZxRSrm!=USVAmjyTyC80w7Z3EyQFHj4zxmtd&;RwG zH^2J$H|YO%n#;>qu@9Q_ zd2N2@EAPB?j^gKc&A*H>r>XYU@xuM!IlzcG-~=dsc0~$@9Bm9R`?Ej&N%P?G%c3O)#7QP9RVGwV0lom9ra9)b3B&AP{g}$}N(v9v=!Ny?ZMh60LYkvi0ox<=TM3 z@OH*Gb^c5PW-t~%;$b7gZDUtTjmqpbU$ON)91MNN}C(Zvco^QK$Iv zhuh7UY!CevHddcw!cB0z55rttuL_3k?gkh8f@!QMxJC5;^LM|*9xoDE^o9KhA7Du# z4xZ}GP6F0^amZI^{Ez8Znc$?1{Cp7@L(f)3c?>NsCe(-|q3zw=O;{bXkv9_^*>JVx0{uT-(Y|Gv7f-s3hb_`J5ajQM^vYcABg_I&o zIf;=-^~waf?osQ@T*pgCx09utgdip z3m}h~yyHFe(|HR$=2_wqL|RJP0S89{=%_;}dLXoP_7h$MxAXz4&vBGz+_s#|E^w+m z^d6jnM6bPZoQUn^WG#qCZ+@fGZ122izWDs#H2>%S@o$@3Y*O8uePuSM^ zC$VT^drbk7XBXH=1wp{h&St8mtz};Uu|;QNqW9!&=SDQu~_C}yX#mDzp@L;N5#00WcF^g*+7ZKAn+ z9dlFTt5lm*{)31-_Lu*^*khfVCy`s( zvP~|3#kDiX@R{fBSZr1yEEj!gnM#|8P%fR={e9PZB@#!ib5vc;>>KUG$@4~r$4XoiFxkUZMI0hhP8=} z<{7(iJ!ay6wXvNSqo_Wp-ofS@drHr6E*?|`V4W8yCEs%ZXZwc|ZslqL#Z=U56>gLQ z3vRg#$kbMI0h2`?XBQCs49gVSHjc!#^2&Zkc>>=oj#K*V0f??@Cbow<*#cChz)6pv z{&-Pqe%iX9oDaGn)Jho#nk}!5Uc9{5GhyW$0uMw$qQKslttqvp=wIqLWAYXWIfjG?JnC_4_&Ot-xSZ(wbfggxVwkRIJT1BK4L+g5>~czJec+gRdA|CwlBuz1ZEFcfjphJ6X&?2_W`3tCoDf;%xW zOJ2ONpu;^rF^B22z2=kOeAzts>}m6t|K=B%E4Tap7t!yo&Jbds3exRGo0zIY1v4TQlxDm*A-Pxz=7;(BTZJVABxbXQ7aiBhV zZ$t70AbuNeeJ|tocN<67Qb{Mj4vS9I$(&@eb1U5mKHsZzd4Rs!IEt$=ZN;SHXI}r5 zR>Wi!aS$-D0v`Y}#!7t-vSVUnLj9;|{+EAsnL?P%^I4dbPVQxd>Ti4CT+G4zV=8)T~Y}L!0fv<_OIi(%_fP_TkHBw}V-vIV8r? z?8X9|%{!AMe618%M4^$Zf^vyed`|t!nCh%s4wAM0hQ`lxJ-8@-UQ|!owH36fj!<8B z2z_f8QmsT>TN~?2t&qW^y4gMBYxF2o)kC;c&(!ql_!0NJP4fPtYr%ejqO+NqcQ<#@zeGO z(rFj;sR|QGaMHn}&{`};2!y5G291>ftDD9JV0um($gm9SYrJyGJ(sO3SA~b|wIN2% zsrq@gtklfQa?e>nKPIq;=i~d-%XbA=Yhi6eJHslVCbdkCKYb4{%1{BAc7uolKMI`| zUE-HAT2fm2OFeu>h*N2l6rF{%u0Jfg)9`x=o4WdIs?N9DdD{8LRbGH@+(&%|wFv<4 z_AAWyQHMk9>LTi<9T==Q?*ih-d=W(ttmv~_7;$Qu@O#kNrudxl*Uy+WVL4kB!MniI z7cV8-ulTywR*~2>zG+7}Gw#LH#Gz&RyqjO?djMnV8(HcQ$GQ-a0^hWzx973=cIEoV*Ki4Ngxq2adJ_30)O}yAUPR&m`6VIgCHk3 zrm*5M4H%w1UTBt@Yt8)OGAsNAOn|kCP1Gh{Hd~!%&1MJb=Gd<@SqY{*v@=@hKfzE7 zp{!Dxh2r78^ckjWFKYd*ok8B`s@%jcagCoeB?G%qHeaB^f?%Ck-!H(DhweS#55!b@JM^O>7xI zZ=NHy`vRb{M_(5RDt)wA-;ef56JP)+RtHxssN7eea|j^wFOY#`(s4vW%a#1_ka(td z1CAXQsLh~HIt~p=gk5ZB`(;6#bn2+5{i9ncF0HdnfU4x1XR%f~VWo;hDd}53GZ~1M zjN8CkNFtPYHLiDF`>RBn4?WZu4}fn0WQ4Zul{9~{vnGOZwLJDZP@>iZVK*@ z0t*=3`nyB>crRzivTd`|q9!KESz0W>llQqOrxI4$w)H8eHd^&dGV4d*v45D(zOcEu zgId`}DW^t$%kv>j_G^V-F+wk(IC|*hzvlv0!e-c3JYzI(C$Tg_yR5x9Xr4ZO)O`H+ zpEZB=@BSKrz?J6y2e+F)`lCO>%F2(MkN@$rW^)5|HURO#fjwHKGOW7rVkwU=+`HvK z?c>98tz+@9unWI;%Yh3({BHUAJ;}|%RY|bxjhV(+LQ9azBFYXjSZ1eVuz12^%N{e?HS8TA3! zRP1T;&|Wj*CVt)hdq}SvIH(SFKqmX-!B=-|6F1EaewNiMZtyx`DMNZBpwPe# z7a=H@xz&pU-?TrRi}^h*X)Li>$n9IV*rxk-Gu@eQ*4a*b6010f{ZWgdE_{U%uv>?G zkpToxppQQ>=FW!XLmQPPW~C&{C|9K&ODngY-IFb^lULPIznWMtE8gDATi0D|Y@nkn zEj7too{z9dV}Ei7IbXE+p&0YikLjHvRmt$Hp1t4Q8y(xR<%rJoCG1A;utoE3^H0C} z66+LSG=KLupYQ^4t9ki+8$j$@hB<^4aD%?Tf3VAQCfC0YuI+n6tKv8~G~mMXyTSpU zfsB76=KtRd@VC|qJxTnoZH;l+2p$RMpNXZ+U^6glLd$eY!?Ga6pgB~6`J%<)|~2XK$mBb?wt3AcL0FJ8QA zezJNOsU0?Eq03L}$X!Mg0R82I?$+(w}C*m1ec}XtRKr6d=omD7usNFTM#%Ex4EN z{t6iwV#}!;%tiS#+%)!;-gPOf+}H9`I&oUE2>kl4zXnp6ofHSq2W3#EHiDIsffS2% z2+lJ|xt7?Ou7i&IG?Tshrh-cCuG+~L07qk(6T8Wl%D109ZvO5!zs&-?2|)7>y2vx? z^%BoC%F1kubaBoutW%vqolaf)KlcTsKnDF>u%-Vldh>#r8LWsLX%9Pk-^OhLH0k6a z#e}V;kKw0!f_dU?A5JNpicg}NBF@m_YH06XX?-sofAgX+{RG}|DI6Nw^7M1@W_BdB zE>zq)As^>ujcek|i(=#pa7q34Q4dfGJ4HgXlae;~6j6UNfz@*B*}%SbO`O7!B$Iz` zu(O9Xg$1_O{?Uhbu%p3~ty#t$bl7)UTTx7&oo_1kMGmerrb0h_N`I*)-FRE` zJAxavcvz0Mms1567#;(Pd8?r@`2whUHm+YhU+0V2I;tisq~*DgSLERr!eh#CUI?Im zU9~U5FF5cW$2YTd08S#M z&P?V9it~_YI0sEJS$V6~1t(Qt2_#6DlP0MF1fHWaX~Dysg^X~$$yb`X1g4%$6|v>I zvG=0+^~e9z{PREi(@0CESv0cCf;H(lojL;*K5gLZYRK-MMD< z*?-1{1J9eo&8PUkB;8sT-5pKsvB;lKvYNcZ6oR^*s=pivTqu9qJe=^LK^MBXF1Jvg zib9+dtspG{wjVKR>~SP&O4WqtDhjx`tN^vrdgiDOtS?B)5#|0w=vNI#{wa-bRT`v& z!!+S=%A-FpNlVArkw8T!QRPY+icjUY5!+ay9rYd&TH79DtgN?G*M#k9$)mMLY#wPJ zUw}xjlzEmHqGNW)yU9xYe^}pZ<^U4=0H=9O-dWlKL`?_;iSO3gQ@l7%s+Wo?hC1k~ z2Lv2ayuI4Rsj(r=;xetbD6!VU#4PE@P@R}k+G+yg*@MDI>QDioFO~io9x1T#a%&pv z2pI)XXbJR3mz(_7K33bJgc+sgLCt7)t;HMxVmH=zn&;2fSZJ(WZh$Fu zJMD^JO0LzPv=w(r^ecd?ro8$et_$va?IL(o^2Wg9O ztlH>YUkcgbf6e(!x{|8*6B077^!)m->KN4TSFg3&avoRP^6k_5BvkI5PCc5|aFae# z8D7$kj(eMWh%AKrXhUJ-$f$nMhKgm=vX7Y za0@9Q;GHde@ojlr2^_=$_x5c`4H^-=4iOvKMMvu3rl22l#ECd z-Fm7W-C5#LML0`bF6{SOXeh4aRm+QCWyU}?`xroYi9ZmbO*%U4WbAGp=65oX`);^l zM5wVUd4>n5xAY9;;Y~eEuj_)tHl}UNrQVT>1AP2E?`ydI-#{%VX!W2Q?#mVB8SIy) zdF%<_xqh#?bMpgUGUnN8cr8-lh54ms9(9y`tVviv06Ujn+oi|?XTinKyFn*$I8N_0Nor|@dNdydT0`uZIFR}CkY10zMKtX*uUR?gbIgq|Of(K`X zoz+c8WcO|K9vFxE2`mqBzt7jcK0hE%LyTyRGY5>XP4h(t-v9^to3~6rewNeMNvcS& z&O?T2=jYWwW}*;d*{MC=YoC$s;iH4TE)>~eQ-n`G`Ly}zM;|g-p*F#%_Dp6c-@r)_ z=|T5lEI>%#o~vb*^8j${MLKm=SRr^q!U%X(zjYiiENjM)9`EhVHgk*Eqea?FC^T;Y zlsB+8u@49U_U`iem`}n7tdwVa6n3*y7a1_Xf*W_>8k_KQy|OH56&&QJr5ltG9bgKp z)Q4x>D*+bp!KwCe8JSJyJud8pE99Zq1ho3%16xtM;U0i2CUzqW%RfXy2j=HYJpq{e zNM8kvsVM$RtO_vBjU~L^$s?lkANO^;6I=uLmU_=n38lDcM+y<_$~()*f-LCuB|t%^ zsuF@6d8|5+)vBsFwyW0y;yqTTxxmjnO8=;o7yA1r0k0G4xn2{kUf2G3GEK>;0AN6$ zzweh!+MG;G!O5hTcblgvx_OxuE4&Y+prqPGySK{#UfJoJYMAQ z*2B_cUeQ0M|Ch+{$S)%N$@dQC9PJxR3oJFgd>x7KHB^Td(09eaFfWJ`d~sS_Sj?_; z9bOjIv-hi)pfi$a&M`-r979F-O4=%aeCZr;^6OkKOioHS$#Eeik0WOHo=1n+dii(Z zzB?Qk-4z@I-+f!wJ~^C2qxO2qXZ>@zvl=P-XYZk7oQu$lMCa!K&$f@Z z6Jn%z?HuJp-<>euI(K%CTE4`@V`jnzAwz*%PJo$|y3cs+WufA&OX?*s!_xzm;Cu{; zlRAiV6Z6f-zx-8m@7^6ioVs(+b!Hzf%<1OZ#?8IJ(89ZcEhQ)KTkm=*E5QL!0%Abb z7!&*+dag<)J1nN#-eDokI!Y{+3;) zVAx&srt>8Li7@9WB&u#GArSOSKqTPg?pI=MS_^>vFso^E3Sb1WJo~Q-nOef`~v4DjFw56at zzNKKb+e!ccKmbWZK~(2RRJ}4R^=zN-5E0Ke{YeFhtgbUwY_KS@#rE7^ef0;@8ZdTQLeIgRQP-GA4Sj~gqZhI!|!{hqgwFXfcX2)upfH; ze6$B91w5P+%0zO0?r4T;_3D+b9nNtkM?pp=K7Zoc3A%W8ea~NNn=+uIH)vTOKg|l} zTy&z5{D%)8v76J6nr(J(8iK-lB!m|=-#)HX|8@ec5(Ce@l6%?oUs-#Bmb=yEQ>rP@ zgY>V1D#Sh;Lrkjrhoo1_A8x7b0>@ph#k~javc>kZ0rR98*OVO#VUAoB$z*)2bdBw` z$Ju3!bxtgJ0u;M2FGRH9gd1N{3f;9m00^m%fQEze(qKu~_KQ{u96$^Rp2alZEI@n) zDYa(Ax-rNfu3!tNMfNTXw zF0c)E%;d#Ktz%r`awYonHoxV<>;C)xn;O<19=|pp1&ad`%8(I7mYJ9X2&t=m((NN$RkRCU|6gWkc;u>e*e zytK63Tv@)19_|w9c#&n(k0Teq@)B5fSmRBMEtgwmdHiTAZ0OoTwzi*yly+k1F3#MVkg-Pvo z|H)Pu?rO^6q2CYR(Y3eh{17Znx{CAj zu{xo6l?n2k%n<o-YT)AXH{W@q(30Bv<;rCFFoqQ_f@l1g=o{i1GL)eSq;6`tTey2J3_FrB~F zD{bNlso*4Sqs`&V%PU9>UHwlxwZOWYCKqiU$%|{x9OhmBQcI(?VfAeID3_?_kaQ%K zQ^~XRzrLdJ4;KlxY(tD=f54e0=?XP^mirPlaT3q_a+mQWead?dzci4CYBdUw1;iHR z0yGyWx^Q!48Hq6J5toq+PqXpH))t%7?4TZjba+ev%rVJ}pupLk)i4MRmGo~H>A8ST z8u%^SrF&L;CAkW^Yn%jX!xFXaT5%`_)9$mlpQH65@;xikG!KQyGcprpc z^)Q`o<4oiJbLuBx&Ps9q)+{TxY8o6)p0e@(OaIi!F50yh*FcDIv3tX z!kEve00Qha3$PsXGJ&~Wr^i8HPl zI41rXbTno;W>`kYzI*&}g=Gy=eoRzSmlzWq#JSF=biZdY5K$Zuc4sLycfHIzRKqk zLcP-AdbD`kO`&5x7NKzlb>EAP=Kyy&mT8m@fFI4%4a*axhboS zR1;mCzoi`L_VFR~)$x8v(1q8xj02zmIT*RM4%UcQ7E?%JpQPTQkSrnTtW&&av$<0sFFNm$r^ z;w%jUFelISTYe7Q#3WK|CVhaY62{aBK#|gsfOR_CxmGF+dnp^qm>|P?0gT5KHG8la z42 ztOXIlq+94JwVN5A!?f5Ol49?Xh>@PHFaY-~Qe$DDWqBL|iVpz$huQ)bC>ax01PnuR z`P8j%7wm(mxZE4p+Rn z>m^D0@-l~K>p#VpgB=GE>OVJ6UjPv9v32@1Un|tr)=J9eIxl`me(&AB*WA7J0coe2 zoi*$l?~)02xyO+Pt1h8R#W6dmDHT5S>j}ETZo51t81^m279h!Zy@qD*W1k?u?mWS|*7){t&wz?pfzE-+;}4-nDGtv-4_Frzxu>+&>wxr+CANTl_Usvk z|JlqTo01GK{Oh)Y>4&kvN!6E=5R;<0Pef8AQSY;GCUU|8W>Yuf0&3Om&6`aYt_fmo zLx3-U3?miI*gnhV=8*KFQlYW`LjbE1V+nHN6JUESNG+h9+n{g5;>qMrdXE5d0X87m zovzr`aO#Mi_mC1p2?wAN5aZ?+ljN&TZSqy0xt%o2$@Xcd*wh!2tnWAACbOd#AQ$QF z1nNCEuU~K0Sq$~-Pd^959Rikli2y7gKwma-0o2XTvK9HP`rhhxBL~OEY$rQ}0kCL8 zbz;{!D1N7C7z0%USs90@1CFt!9p|MAxpY@ zmlm#JPVlJN#(Ko@F6s~+)g$N+T1}zvs0tw{&H_2_1C)!ua$sE)eCV6r3*hKng)oWK z>1B=V#~kHL5j=>$UiuTB`*Pj&D>oain!ERIH~;<@{~_kN zG{iqWi2;43m+#GiI8HbpM5QznfD}QTlb3pa?p*iVPd){>|5-D|r|n6?q)chC^)K^e zPtsg@)Stb1_wy)#fj*o;nj0t9?ttCiXC_W2=4g6vN~~(Pb}&!tL_SC&O>3Bwxk)Nm zJp$k>B|8>CXllY24Pcj^d%~mDj3h5?rs8xKe&4b{c7w(1FQwbHdwKn?$p=9ZfK_wKR# zA72tC7bt6{dHH0!dBxVj2YUisc4|DtbQ%jr{koxMPx_ovVswQAh@&TL-12Pl{ud_< zj?|a3%Qf+FMOCEGM9b-uD#V|DDd|Q1h-c6QK3AUk@v6;UpOMZwx&xk1i0dYHFah_E z|KXRIhueZC!LrY!85)C9_&RXmE;#TmIWYJP4(8lHj|_(VuV@F}b4yq+efVwvwH=T;#VKt|8N`p5KY(5R(qYXlW8cJ?8!RO1! z(avGEd;a;4e}s-p>mCv;$E(gNvEkb?Jv9Cvtann zjT_DKJTD?_DsiCY51uXARV9yUHTyz)*!<}mx!|&o17*TX3&%H6jo!xT<`KUdn(Rva z(t(m7Y51Y!UYPgwFpmDC1rc3EB-zvxzD+cErLyOxj%N5WKEHZb06B{J^F|7L_K;lGlwbEB=&jn`Q7h+ z-#mW&IEw|dGaY5ARv|dHua(k9XFD=Pn$L3Q&iJfMFTyT3@crb#9|RD8Ygn1Vvv=TCOXpT$-c@SU0(thk4h%)50; zdj*L~`eyj7>4K*YAV!M_+w5BR%YXb8Y7^I+8T4X}Gdsf;)_e{xDkD;LvQgT5Cs6q2 z#4oP`TsUW{H44{37wx$xEry=9im<7V6Xa6^OwN&PTCjMetyNtnzcwN0p^GIm+2c<< zCA6T5Hacw^9b*?JA-(1E{xP4kkI>Z=u#Gd3PYI0KGI|{8raH*hp=_f~78U^5t$YyJ z7FjX?GP=kBKEU+RvG$GuG{i3|6MP0Y+%UwcQK1l+b3mjS7xtvh2kRcfzlk5(CVJC= z6&Eue`Go`kyF9;bz4O810|=IASa?Eo9I3C&y< z<*i~-Vg)sc8KlNXs4;NimSXWH^d-3{i%iU>4KhN@o1*2_gtomg{V29LBxNjQ@g z*IzvRqWSEz-!(t~+0U|=YL2a7m5^G8!Rnjm*)9VRiBct5$c5LT#=#;PR-fYUZ;4;? z?@e*SfbL|-!Xx9BG60oi?i?H4UvnrkIdKHUPc47g z$I?zply$aQxhfa*3tfKgBlX&|VI{bpjM{Yx_b$=ei0wUzOJ03>B%b%xwQ&+R@!H;& zIEBzkLkTu*>4WwM!@5UGvl}wq0Ccq{eB;_p%&1*$Hn80B$>zi6V7F;@H;*_bn!PRf zai}RZHr?Q*OqG?qCvl>i!6yFbV`ay=(w8OMC!#=1W6-bLelA^Js}AKt$>}LncmU?M zakfb3%gBBQQ*L|(^JO<9~7;dWbrObG9 zt}=hPiJat=_SFe-cXzkh<4~8_7VNFaemfp%_t#13&fOc$?K`)cf8TtZ{QBmdx6AOY zW7j?;XhUKJEg-jiwsdh_9Yk^_>fitE-#2$|-9hhpwb|HM$9}SE1kPwR9|`EcZV8fs zbi7QEwF4S06>TrtI7xw5d=uBJHcrmvnK7xpf??Z5Y-`A|K*JCvtO7_?Cfc|mHF*{e z_kg$e#5JGfC#|GYZ2>@Is9;T7!Aix8RJcUwyu7HBJZ;a z3zBnR%6yrS|2m?RyzroU;8pk#kMdJu$k!3hj3nel;@p}fLEXE7Op0%zqDx8JE)v-P zS2Aj2^@njCDLZH5C|$pPReQp#0KxHQ?ZtMpLHlE!7d7=+?2;pdXCs=4V=XZ7;^ewc zUiRoo6sgJ%xAddb*v^>>l=@cy?8{&x&{$E)Nv+BYu2wE)XW9Oo7nIpq%m4z6_pvSQ z*RQP|auCR~xYVa7xwl2s5-e{xw^B&?DOD|{@(j4y&*xu!(E@F9QZ=m2Brn&L>F#x} z&$zFQi^~fRTyWsMaDb1XuYJVdX~9mU4qk8Tlx)4ha5OaA!P3ZO&|_$tc)oGO!7MU3 z4|=`Pom;w|92t8gyeZLdR8%)ok-w3*{Eak@9-)W>V5Gby^! z=Ck~~{r1i`Qvud03+w*l|MZWVKmWh}0#i$y&BEN3NY=D5%3f8_!z5_-`Mh7imOW>< zBjyP9obEHF#^$3ApK21?>s4%{q;ecSM5mU?cXE8bSzFs~e({UHZvN%J{Lj&SUTk)D zTpd4+r2*}sqHc?hpxg3l6WC9&dYV(2wkuJEZs%&Hg2ffejMt4NAo3>B{TZY_cZ36_ z+C0i>EEXa9Vhb7s5`eJ+-AcRy&TDD<70J93u`Cvh&MXovq{YWrg*e)uWwkup3o}ZL zv*LezoihAlaCx_xUSeYefN_Iv@!>JzJ$dI2dTb5dK{B?By=iyTTRYl87Zg2PcF3EW zBpxO$Y+1qcJ|M7zE!mxeN%)BoHnuZ9Qg>4ljR0Pad?^Ha7EoV&yl9IQVt<` zVIW;T^IH~aOS{7dfOM%YvHySe-ZaRv&cUOGyi$+$&*nopG>cuKHFxv(l$df z8Zo%{orMPHFork3FqbB6@#5xZeFX72U&H4znWn+4dnbqLJP)EHAIT9sY)1P)d892S zDm-g0FW(y<2kOf5NL##5ss(VrfQp2Z&Aq6K97Lji-PCQA1S=C~sdu;RMlTq!OtWaF=FO1?0y9%=l80>od}z5jh#mb)3t?L-R24+F^)# zsBAq33NfWp4PPuc*{qoAd{6T@z)lRk2ZoE z>Eqwc%P`hk*P|ag%`QX-ja048=FuZR+jQ+H{YK5H%dKhlNViKY2G#LFwLiKt9MsnI_d~EME49t=9FH~f=&HS2VGfm=`^trklS(p(+@Aj=D$I|o9 zzmOh!_(NU|rqhL2MgieG2sm%x3|NhfuQ}62y^301s{!7@QHpGN23E z$OUkve?H2M1AMtJe%-E{Z9(>^2t@pE5Op96smAJhdM8e9RHD*KlOBqXQnoX)8FR^$G!1A!i}pFArIU7IC}Gz zcNuWD$i#%g0#K_ck$|-3v_{yHTd8@dPwDDQU8V0LL>J5{4WAgD#MbhD7Nok}@?Pc& zfMF)oyfOiLW2ZLt+5-#&UbyBMWVy*6EU-F2ZhF@-H?!Qq^1B#`hs39`6;<*-Q2qxpWShin*4>p8hs;xDbf`{Mv^ zCH_wAUisc@=ntPsGjsFU(=t?xGOyld8ZUq?$c>G7!OZt=@LjbH1rDp$j;cGD|r zE!fs>NwiSd9x?LkmT9+0?TKvL^ZI(-bkg0*uu%e+Q^tPt+=!-b{#AayCS5DETih+W z?>r#hB7wWXuOtU(Y4Kw4)85E&3P6DLNMJa}Mg?!bbu>LP_#rw}I$V(sFbe#V!?gj3 zZd5Z?uxcEDMfAJxxhLIs|Gnwt@$+f!5{tMZmDj+y8zG6+I+x8y0Va- zefB%)y6X?7+i$-W8Xc&!PGSZXN?7yY9H6oyCs^9f*B+#jWE>GizA~UShv6O(D=&wf z!n=}|gf{O9SOHR_1RfIAIXD6Ugt9sBoU0PIFb{=aR>L4IkloTeI>CUJMZoV8762yp zp<_FxG=>_)VueaIXbB^UCBA%|gpXk*yRRM%*&kb4|Zn;DacW)YgbyxY8p94Ua2ZL8<30+Sy zc0Ke(j5d*_p2isB-F8cACD4NVJRS1A%(1mZqQo2z!ceQW6a1`C-9Rlnf)nxj8GHUWL)N*QwA;kEm-%Yj`Ez=2&r{H|{X zcDQ!3bhdWT1$Fu$CxzwY|< z$Pa%wee>_0U;!7BA%M1>xp~D;3{k`BHOteVq`L-tfdUF%)=w*AO^PSS|-8&9VgG~0rV80wX3CTTGlnx@BW2^>Fxs~EITV+JsL)oH0a1obx24I@ zp|xO^v$RF71Z$L>a_!_Hg`W90-!-19`ly4}2w#F#{t@2$$~`XY{7NKp@;1UZ-5QZi zERG12!|^OvY`Q~y$5+zncHp5ykEM~=Jq?R8)>G98a}vQy=)yg~D{WKYDgX={zzipQ zr}m`1llzejA3#bu7Hwk@)d>ZcgQ`F{>td2=JCB+b3nNWc6(v+0xLO|F1k|?t0Rtz3kxFEB@U@KWhuJcIWu`+zf^eP zt!)*3aMeq2&XRK5IbTIk2|0f1SWKX2*^y2@3BYHO5+lcL(jT7{+WR!0JxH{1l>2fm zVRC>4=vMV>NI(yhmzJlAZ6Axtx0r7r^+t{7`hz#7i|n*`X?6zp$pG}e*Sp6q2X;Bo zH|AdrD-rv{QbG0kl&pN?8;_^6XD-Cx zV(z--qZ-`8ZD)C7ZbjcWOMeH_`HSZR3>-RiD5Oe$UiCAni?19+S5O6UTi+PSgCCif z!6cr8#TB0lM|=!-9LhEPs)s683EuOXImEA{bjwkEio0RNp4KoK0Xl={0Y0sFo|XEA zp9xi5s6*-8yWB4bWeu5ouR`;m*Awqy2g>9p$;Y znpQJ4sZlzL8)rFYXBLo}yGSsXN3UoPGCb0(5{?MERnOj5Zuz|{DdsR$s47hHD`_|L zH(Uv8+L5={x_cu!K$hwpVUls&j) z6QfgU{~lBk0A5q$?C^+%h=n=6BA`F3#fXt1^wC*3=jVHMgtayisuJqdYBo^4;gBBV zuAB8pd$T?>*PCT*kgpHUxejt&L>)m})CpPp4wpL8%0%dUQx@w-5Pbgpd31fxVh!SA zY$L7JiKsg>iAEr3B%$D;6GzfiWg=#8dLejM)W~ zwjYx8cz;T|r0HQEoXVIjVSf9(*3ax`@O6h`VZPd^>cX4Eq?P&@(wrdxpM|T}-*hm2 z^3ji{|KexAz@&aA9Xoz9CZBL6@>-YPh7N_YX276M=M9hUf$HzjhRJL?K#%<7a5wL& zYDzYnr_BVrC0bJJc5RKf`8>O&w~>2mej9~u^}n3BttoBlX$Q00l@M>LM&4b496s&j z@~2QFw{#Hd2EOt7VFr+kY5xH}y8&p*KqFwxgZ?J?L|TH!kQySQy64_I(lG+ndHHKwsL;(13yF= zvL3O6@EB&bR#}+jz*#y)2hXh>#`K72rb7bF)Usc<-yE0U$e&9iprEMnK$`Vufrowq z54;#y-S|LI6qQ~{=d9bcN;(`;L&8X7Y_!0(%qsw%Rm`-_W4daE-R_p=0SAk0yfBHx z98+y;4A9HepBPG>M0#dhl}|wxO(NP{NCfpVPs8}by>WXUgp2U;YkM>_KU=MYo|$aj zGt*IvSLlK^{qkc~R=KJ}BMi53U&W6IO^4yt}s&JsmXn5nZ=P9qX8%b9s#=k z32GMs$HRbOZ3-(5&idlM>wreiymOoHd(|e16D5lGKaqARS8kM%hU>&{oG(cBY4h_7 zfZ>U?4yf1y`GmoZ^Jp3>`o=pGg|NS%knZUD{&_#=pRi{LE-F7+fF6F>3 zAbyuteAi+}$6o;o`smPoV*qoJ)KRvqJ%4^W?b$cUz>x<7{R=R5P*Bp>Yj*2r#RWDp zaI0oN)gAy`fA+^emHy*@_>*+*%q52q#s)GLj5P*{&I4Z8GHio~1E>R@lY-y@~{Hse^3|L6B zSm0P-XScCE>`I7!@yG;=?yx?g3~MdYtjeX{buM5p3Z>pntTf6hN9>~WMJ`yWWl}CH{ns*hgaj|VjedD6~xM1+qsgnT0g^-w< zQ`AemhgJHzRPAF8iw1+d$CKlzM@;Pp6z?Gly7t%)USo2Uuo|KT2?4QR5Uj^R>Twl| zA_Cx8z8Q5(ZL79}e6&b~tDG~fjq>*>3?U`6(~+HnG0|TMc=JNo1*~q85_#Wv#|-xJ@-A3KJn;B(?=iqV0!<3cVpsi zUr5Pkr?KhV&pel=Sh0S`9q(mJ=ZkDyF~-2i zHmwZ0?rdhl^3P;L>O^dT*cuwBmw_=O)_#!GbB>29P!wRYb8vSUqF%v z_9}t+oNY9N32Fqfq7JWIUwam zsV;PjLne4cIh!RZcX4eib8*wTgv*KKU)=)mwE(I;+J{;6xzzPTJYHf>rxfvh90>vPUU8$%%xu>7@y{+i$Mc0n`$@C2mc-p(lH%y@! zfc&~;8I54uGHPb|18|qCY5jO4x4N-&!(x+YueiPZHcWeq0j_F5TrW$2<9^%dPI9zP zbjSPd#B8ch=$PhZIL2Vy+MMOJovs)#>kwwi?>v?!UVZiT^r=sOl24k>y}?% z?v|fFb8FN|WNjG*d#mZ=V0D$Gz{rCmntJtuXqp9^ncLlN@}t?J6%^Xy>5p zz!)Z)YD7h{Rk0e)yLzq#lakiQ`cu>){%t|xvF;oIqu%*gPnI-aO)~4?Qhp-?V8V-Q zfM0=&9va`}aH13=l)%FUQvx4Bu|NkY;VOgo+`>G&kO92rSa`Pw^Im}CVbpH~7fv2T z3M1@XI5grad=MCgu=xb&mLUQWD2#mg?Q}Ft)Zk#R;GL4V;%=ob)@5YfH5%DE-_Cx9 z=QPbow-!y;=qnp;6?f`+7+rtdl=3SjUSx4(mK=sN9H&n3&}xK!g4T(NJ!ub0@&iAaH$`4XUBctU3wTO+vfzT)nN8iPuID^qGtas<~NZn>P>8PBvCTVPCrf~XuO zBgkBz;U6*Ef3knOYvUPjfp-}@f)fF_vsBo0oBXTzBsGvQ65B5`W#GM~_k%uDH3ri- zb&+F|ZrX$Cm6u-$Tjgs^3Zk3_CA=vbLjdmNuj665w1GLK^AjU{d1FGju#7aBm%z0N zPFOC@U%)!c2x?pIB*~O=0e{}DeS7z%$3FE)`thImvGmDLJ)HLL+XJQ4oYlaDRP~89 z7Vjw)p2bw;BJUyzK(p^z7kWApdF36O_0T=9Tw5E|dH3ioKE3iYhv-sUV(dQj#u^lj=SgDg~_{s6JpZj!r^wCeGzxvBBGe9FsB%C{#DUES; zctyzNmuZtJWa|!{YikIQ?jo^Mr_ZHtf9q)^MdK_uyqSeL=ivpCBbIlEq?`d#kP(s` zfMiS-HjtoilDw8+S$f@clIlACY5KI@g=z!?sDon+0u1iauepE?43MS2ctg7zmVjsi z57QV3m&=7`mikeySS$tD8H&%p@Cbmkw#=3Ko5d2vJobynFD<6M2liqQc7nQPlK@S# z!AmWA1b~2$n2bf+b9lEjSJ&X(%Ei{F^?avP;aIh$T7Xb?;- zZfs{&J~_E{4FK=pZ%#c23O!z2-BpmPB@NPX63hfhvuN{k*p*fn9F-XD2~SP#VKT#) zkd?h*Ds2%2&KY;P}0x(!Gd8+YMpXQ3jQ{)d_Vw4 ze*dydw1I|ycH8JT@-SQ5DSsXTPbq6jYCXTLg9zw6jIsbG0gt}FVOJX>b$5cGMGoCw zd-cuq^i$sn3l5XmU8YOpFu$CqPw_J4o3`JoT@z;yygh&EJZUDZbnF8ZFQn;(8P+CX zJDEwg6*Rvz!&jHtbnu#kfZ!ibzx>O;#O6kKrhNxkxCdQs1cLk%CqSMJMzm42`?$-2cPj^W0r9)F<~LF97zhBSYti}BYcp`Y^wP`e+*uaZ zT|Yo4J4@foCsGF)M_dMhjZ`MIQRgRroLzI$qFApP#|{T3D{zLjRDmszMY z#(~A0R%6E|$lj(PF|t)@)$ZXz>`2Fm;-b4FZ@rcN@-P3t^z*;)bLrZH2h*7| z?0V(G6{I^Oe(FTRwopwHw5pMH6t_-du(w;Z+Sn_l^{_R{yT!&`5CVfMR3car8kkN7 zE`n9=4Q5_|UKC$4P!Rl5|9T972Hb5-P^Co7#Zqn%u!_WdO*3AY(wbuev?zh=6PY{2Ipa{E=PDY zeIqXmiBm4CWg|V>kj*>OLR#K5qebGS9=tB8{slU9a__UWYkfi!YZ?U9g2a)-Z>J}p zcsiZOvc&$WYfzoKlt$^_Ha<$mUNa#A-BHvd{Hk?#fj96<_=Bi=)vaUwzazNUK2pe5WGWq0E4t~7B4QOvq+1l z&o87s2gcAZMo-cW2C$c!M{1Ve0SqE%(rvY3LK+R4N#DbOq?yxw+&l=G1&m<88FvB6B#CZ`}ZCS>k(Gu z!lk@3-Qd7PES@x607G?%0mlhI>~`S@AbGF-;vkU3-83UJ-Ba2gUAwa#tq&pCs z?iEeZ_~Nnl)0Un-jq>JHEq!CcZnLPeij4!h|(ez`V`%Em98yV$m3)V3-_cpMyfNnnEm%hb`=Q0)GS36%(>y}=W z)Q6bJxMl-)a`TH)e7S0&3yfwJy3dPJ8$(ym81%~9hm>Y zuYECn?JM6%FTZ?*?wY{#>;fXUQf(b%u0;9kX;Stg5|s_LK`gk!&lsPNml=fQ%h9)w zr@#Kvm(ph*`&7F2&_wV-{zG4Fb+mfIBx6! zMI;aaGm|#4!l`{W7#jVUHYsD?Rz-Gg#(08}GimEb6Bfnw4r-xCV_ugg;FV#C>CO zATQt+Qs4#ji0851GS22Ux7~7c`oM=CNRNE{;qo1Fl_R&1kN21UQr=6mdiv`5JJ*?=owKH?Q-9SL_sdHis5dCyz1ii!yOsmHfcRZo z^}P%3V5LOFj#c#ruOTsE5Pb5mfbz^KfD+7(X;T;zVWV8vme{_Tg_6tI zm0klF3!b$iVt93o5t>FFD;>?d!R0oQ>zj(Lz_3lj9O4M3$LYdu}+*Kt2b6 zZaVe4E1=jgkz_6~54p_x+cu)j>crXdZdKu(%(`)n$FN^|nDEa5Dz(x4jclt>GVf=!5CwKk{&T@PqfK z8*aLuFDp{Gj9&32cu!x$cFcoJFqhb#S$o1GyesayrzBqz_zxf)CAFVozN0Pm+&}AD z@rLW}vCDxS<3L-#*OD*&_SJA~BOtDNk| zD$v=hr)y*K-yxKpQh(HRNbEA5l-}hP$I=8QUAvqah6}tntJ2H*4h)t734m1ILBspkxDDWs8rc{wnc5VOF%aH9g zlH)*F<0y%gbcq8pa0Nbcp-99s4Z@$d4xfNeO1?+Zt+(ESba)D2ArNM95_-|90?1b< z0)lDPC}N=@&vLnORQxK>?KEb}o4)1FYgKJ^r|RBvz_F@0@!7N#%@9#B-dOisI7?U` zGPj8jn`0M0Zj=Vc5X|Jsz^19LFadZ(y04I*M*+y%Fgzn|7bNOGs5%9^m#y;Ica=Jx z!-~oB3}9<7I^{^il#~gwQK(6aL$i^NtL&x3t)L~uFARRhCc%7C2Ej>uuIEmj zKFXADx!2wFvRhFvv7O1AVk*pBq%5LFc=JQgT_B9g)946h@la6;vuX1zlx2dV>9i4~ z`gwsI@ndqs71LtFfu^|^Z~+W`)Y3?{B!PRpMABuZ)sbJU@|3SZ6lIErX5etB!q~P- z-BwO5muB78;}zDfk*2M~1Bx;&Rerh6q)>;@-yJ4nZ2*z&kJbhkAf32=31PCUHJsPq zIGUdS&Ue%46EjTW-Fc6QfKexJ?r<0DBA7H-2O&%#1bCqJGZ&+~&4|)qx}R0jUWK<# zV62b4s9Ab=t`-H(U7Crt3?tY+bfK$!m5%iXc|={*eJb9CQ@wtwFgn%ixgK>J@1u2; z$}L$tfOI?hrHOd!``d1rw7Re7U8{e2%9E4w1Owaqd&^m|ni?qq*Ij%0tqWXpXwARo zvB}%k_|ONvncbb497Pzgsh|VN?DS&#<^Zrj zPruinPWRq(9~upq)B*%+^KuA#KT3ULq%dwsc1p$N#C?(MZ_5Day}(>ELLRQ*e&1a; zr!W4>&!<;jdo{iMy;rl)2_41p@yWEtf-J$L1CUZJKifygZt^9oMt#SK73p^RDX*}8 z_4`KHs{D=TxT%$c4C@^rV-L^h7al!>1npS{pyhPe-S0!= z7v4q37WDg*3)4zdqi~gf5VMJ&dc4-)`IBibPt&J7mw5S_DS|S167?6sYGG&_!7e<5 zClQYOj0FH%$hcS_v=S37fG$8F73fi#W?=*X+pho zsbt`HwLE`fv8cHi_#vot7*#lZTMvmg5^=*L3Q+~g_g$k_q58Q*%Kij{xCsYyiW{!n zOM3+}y|+CS-<+o|9=MW@e)*YS7!wkE;$;BbytYCmh=GZ*m-=40&@w)ixW`D4TX+wK2`{%I59jV#|Alfoy@^cuUp{0bgPN?+= z=Hzn~o#YGW&#@)T#2ZFk)IbC=Y& zlu#74CB}M};$7aD@_e;k?aMkII`?84etF{AG4>8Rb}c~*4fXf2#p5b^*d{=H6_w~} zAf-$Dz8Xl|EDbT>3G6$RMbm0eKlM!dnVhryQ z^k^(P6yQDZzRs z@c8I8xM#4-uEz2Cea+qElmt7O^^AMcl5d>0?*?g5l=&JrRUJsDNBcFqcKhwG-{_WF zahG47>yI#}292tRxaozUc5))`z$p+OXCh{MyEHSOUi$89^mosvb4Xby2l;#v6MgH7 z+vtFDKDN7QzIIFwP9oh!?Z>Yt)`gqc0756-G8v$&-R%#5-eqrbOqI7i$Nj3H5*R+clE;1?SOAIvmb;tLm-UhoSa+Q0? z{ytc9m6vLlymyKNyMXwe;{5kPzJj%K*nyTo%Z~Ku+b7d&uf53vvJcSVviO9-c4aI} zO7bVW5_7Xxq#AriRLZf0lz5cEu_++fT+W|n!%HA~qvb%!`ERa$nf%9yF-%<|CWxhTO%*&xe;VIWBesRm34**zb zEYY@lwP-o<95eE%5~=RJ$h+RnR{s(?OZ-eX+$wj$vT7&-Vg!+K*fwIa2v1$yNOKF| zJ2a@7J0=*D6QLrpvH%YeRXPt_<#W3%(}r+txvVYWP>}-oaTV7IhOM?Mca*x>&K&9l zH`6cgpXSJ7i+0~X|EYVwSZGgqnjI;hd+tTf?_#B4Iqe%e0AWnVV%}<7hhf&U%z=5< z%hbr;kPgq2t{Y;gex&sjRd;4saQCC1{&f1)fB0Xg#~%AweEpas+~nSYbZ+_*VKpI~ z`;LfTfpq(e&gu;jrR$HX9Ins6Vtksx3P#i)lPA?fZ0F6k)isG-k9V2_G%erxjz=%Q zjuGmP&3B>h`fk79aX=hZ+vwz5*&pLjW58ccv%O zgCDprYzfa{ePVoKG&Wp_#U(9VDlfDHNCi-Iih-mAM4y)3$}X@y^#1+((h`e#?!4=^ z^n3sKAEm>GUrVpOcn;u=AQ9FZeKICtSp`pCl{P6rPiWN^9|3vt|eO7#gPH>y%FezkRelr9R`S`k9G3cf`FyLOmY zsQ3<~>Pu#xLn1&a07kqp0ogL$K{kboYX-$xI#mO8qF}(UpYmhT7Ls39Duw;xwE=c4 zq|%oVUoNqrXmOgA_a>If&RZWzls$=WdoHq|2kHy1p zym^EP0YDYi3H68trdr}?)R*bUyC6SMA7jD}22B%4h&4wyIJBDXxbyDxfrsu-zy9mL zn(n^ucFd%q!TGnHrVnNjTG&4Pm&Ygn=0K-GT zYv2IB4(#=V?n)36e%al{d6Sb2B-Ns;v2i{vKJ-BPum1iorvLSS`9IRhW2e*J@%;qO z1G0eF9i4`ddc>#mNXER|^UCS(#4ls4LLoj&=GFIp>URs(*V)l(dS)g){LqKedvCif zKAT>eodMJjumf1uQ;fw@Ekw$n%E5G7j^bgEb)GFDyFTO@XcupwM2}>%pTIcN;d22U zO3nnhC4NXOBOHO$Mqwd~wEm3&d>S3Al zDgO!$ZK+gYvTMcX2r6%~Bse7N5nz2fxZsn24N^*Aetty}3}*lfAfp}b-vGm*^GbzZPbW^X$qp(cW3{amS*^?k{$U7|POz%C%(fI;t8?>vTNh!YaGdWXwB)=1MU zFCR|NJpDp?@9pnlpcr8h3fqk$`PjP`J-c#5r&J!gT^&#yAn2~qE7g>nfXYvoTCe!| zU-+qX{EfNvZ~x7|OEa^VvK?Cnb#-n-;^@e6wH(nw4}^rrovlP?BiP*)>>qjSXn?vI zfbsioza5FXc17KkBrgzLWyiMlM=o2giQ=nUcF!*L`x0j3YZFp!9JX5sL0ASsby)6# zxn>M$58cxSD9wIr@tGRh)q$l62#N)8&XWYWrX5@F0#=676obCs_R+OuU|(P}l=-Pa z>=(0v!~x8Np_?gBLUL`qoCQv~ZRJN?0nLqj!Eb~#bcx}$KYr$Pd;iub%RDOCMl|#F zY|<0qgB;@pqah&Q`(bnRbDePmtaSyU$catHPXbffk`nOYc9(%ZdWbsXqY^Fe3V zN%sb&uyyPn>p5@*0dcQtTu+EDImXuGbxv*9wVeZ7;jdv^UmBk5eEGNI9vm3sGu(9g z<~N>5pF#q%huy|BFQx(b2^OcQ0^k_rQ2s% zdKuL6!GMHhQ_wdI-%hZ@)%5f<+r3_3vEBV?-~PQJ5qHpY8*6!F%vcb^pzX7M^2USP zd^B#V=YB3Q2+nkl2@?zdItS?B8cCJ9&_61I9I~6Bx8*IT3?&Z%bJQ7=ppX_TNsb8c zLv@H%rDv!}Fo9S_g<@^dMVMK?cm;{_b60UUW`zqpN`+=g`R!m}Kz5y5;s$f@DxZS$$}fe%rg^4vJkkgK-}sXu?d zclwJ}gO&)yGv!d~tI0EU?Z+^ordcp2t%BRdc>oq**!y9n?b5&o5NSyXv3w@MNEE3s zy2GK&9e7DTkfZ#NHX@pTR!M0Vsd;C4J^MMj$FuCc4r%IiG$snXLqohTTBjMt zx`wvToq%EakTns<(zC2rIDF(yCa=7UgV*FQRZQZvzrJjnvpsS;Nps@vE4j><3;Vh| z0KN}C@IZR(CqI(zyZ7F7>w9mB_t~B97M9W9C2UxjAsydv7di@j-3r*~6=Qdb>WG=Ku_@@z`;&>k&a~fe9D`PxLeVXcrjC<_ph#kI#!QArbii zoi}4FpXUU{d-fm+VbJh1oreIg^&sswCvD6&!x7JHXiO^*`o24_OTY8q{yMsSucas0 zC29p#iNTPbAZbC~C0J-A7zN9;2hFV+=cnF{v{Q^Nx>>kPL)bTkMnAQB&R$yJv+Q>< z;dDOTfB${yw%cw+hj274F3;dEKv4?iplOyo!`RRfjYrb-znYxr)Xv9n+r&?%u&okG zye`hgyM7Hog+o{(XvFiToGqbi5AugFl-W=6!Y$K^bQrjEdOllZ>emLQbWS_h0t(Ep zt;{XO%6@sOw0O_H$x7M=2t@yQWspgeww`H!0V@Fx0tI>NVx?eaVPLeO%-bkN0E`;B zaVyQ;kKC-PFp&shgZ>V23`T_fCm-O3H0Wj=e{c>!!L9F7@CRPF16czwDer|}>IrKm zY=p_`Dw5t6Ho9>qxrF(&;gI@fP#F1u(88>meD!?^KwPO#Q~)?jhNatgOHv3IXMU%{ zvGG&<8V>nJXO5`Le7oh}$hS&oU1a#u?T86D^*+M86}#ZHwo%m&IP=$1s!Tn-f$@)r@DsM~nSBV4w7pCd30*1P%M$5W3V`BU+t$twC^ z-hmmu+FfK4o@vM+xlFJ-rD(HjB*tAcy-uMCS>;=NJH9r>bGxr4^Oj6=OWp3QwllqY zn{H#@x@>xtrZL%A7i$BDjWXRJ`VXr2(B8rO|6pee*Jn*^^j2PhLiC%Z<4b=>pSn5ZwSgl>FXYXBoz!U26vpzGjDo}!#M>df-K z{#*qiyr5k263am1dyw5KK9G1#oO6pk%Tm$?K0SZT0)HpN(v{3p6kMk#J(y*NZj&fVcJSYQ88CN6q@?dV! zj-rH_(4YXCbgzY*xi}wIQUc7Hv_<6_ptlU(UODw{)Ms=WP0$X-+224M0=NLEfGIwt z6J20K;_ze?s~eN-FgHH32PrW+_~=+KE#QwS!~qu04Xgm{u=lJ@;gA5UBP@?lnTUmR zE^PGFUmnLZWs~Po01II`y+}}gUVp`_+d-68?sW^;!QJe-k$(exK=a&+OG>SpMYvv@K$*{X3hGAL;@9OA}yu$80! zJ{Yh0+@rF#l0k>C%{(H*s@^haJxN|#4#OHw=Vunh&i!c^`8 z;vNj{xB8l$RFA|vdbM{@9RqEjV-&ETJc$V45aceis`}#e+4S(kA51sjay`HVBR?#P z^Yft67=f&Vzn|wEpo%mx`i@3PB&2tzs|ln7*GvQ`zKum`Pe1c?I>X9oK~KgRwfH8- z8AB00zmHeLkyTCH)oL}QQ|=OWcKTd8fyIvV=P!oc(?bWZ3mdWXfIBx$aG}i%3vT3{ zC2BdMoR?F&w)9#O@AVKF6QEpk*k&ico+hE5`n!H^&hd;x4AE%>ucuYJcd-}XVT}nw zw2=xpX0-;~Nmj|J3!es22XL|H%muc%WrK;aA*7{~BrnK|_9d_;IVT$2H704%Mr=RQ z<0S+2`XK9l{=>gI0x;?xk+P$JcyU2oucehs6veM{hkRTU|2w$*a2`Pv5^hCl4U?F7>0*}VJ%0%SKR{$ z%NzOZRWJ-l3tp@%aiKc%Dia(GBmBmP{Ojk)ypw+&P7>^vt`hX1kv8L(D>n|Bn%qkp znM>b$>E-mTZ$8c!rz6zY3SVg86;kqL+R_l4#CR;C>M($n2=&{Z*QqK~OQQ9cA*9H|s!nNuxFYT|nH*kM2*M`PV6Y z)RMt3^XS(0H%@jNGT%6pwh-?`o^2#-@VET7mVJreKeRqseU2H0N5DB#xf z6fUf&YZy`xO$eO{>B*-)^$9?R!vPXI^?L!s4i=&eC32&qmBhFcV`W+Z3q9AL;}Z_j zi3NA_?EZE~+PC-G^wbm2kdNlEGBDC#fR#T(=1fd7-|4#pN09Cy>QQoT1rQEYN;L&? zBe6qUN~h1BNpHV(EWp&BJ^N5O*&B=P7La@jj5Fwp#4Gl2Up+SS$+>R&*Cb%jcAX7+ z3SAwpxwU0}xV3JGYXu5Xhj#C{34r4zvN17m!5e1sr2~cPogftu5+wK(E7$Abv8mf zQF`ozUL9caio6K(-Ec!7 z8WMKf4{bBRB<LTrA{RPzwe0f6Ja#I`)e&(HPV<3|~@4y;n*iL`Iu zHSDUkFTjo)MyxHe=nFtBFji8`Cg(`V1<915Qj-t>4^-0j0B3?>C51H}XMkLlup-rW zoPgEIg(CyLe829u&Y*RkI&2tVT`YOrt&R6eP8A8QZ5UrW=`Vfvd+BRm{d#)+wZk!a za*~nxn{~3)zOqi;SZ8@{J}s=wrt5Dyl>X^I{U_;v{Ga|wdiarhSs=HZW*24%=%h#c z*bAhA1X=k2ZM3l{u}n7PZ^h?kd*a=Pug#OX-a1=?PtABHkk7^)`pMdye)`Gpq_2MY z?|3h;WZ{?6+rxO}1w2{FK8m_cIx0#cGo>EoCi3f)Ww(Yy!UbzSmMgn8r3^p!8yZ!b%kdW<(tyO0+!=;4Ku{&06$E|}7&m~FRuJ{poDIv~u z=lr;D43I%$fHKt}lzggh=pv~>@1g(P+4Jf6@e={s#?Zw!r4dz6xQVjF)q^jbvE8YbFkYNBKR9WN-nyu~$9Cg_;IP<1+JfP%GLg#=c}H2wv%GE=EYsCG z{50sTg$qDjR3-v#WvT-Z!-m3%a&5XaO2>_DqJ)43%A#)T7~2f1BB5%?5|%Yo%~)B2 z3M_Dh$^$@0-C)%s;$1*mEEvnuU}(1usw#nm&`;^PE}CWZeT?^ucP#Q33=7O>m|H>TAJV+m2=92W6Uyj`mot^pDDW%AxKk{0V zyF6N3lG=fXZ7A>+rg8&7+@rS%!t~!L%r=$a>gQ#%pncNX90Z$5)$h6{t4FsJuSN|u zZmLux!FI=bj-Bs~p&qP*f>Kz4ode5>6Q|PMci)q4zx}-c52SYJ-f20)&Pr2kZo1G| zNnIzH1Whw{YTi|648Z22{-4AmU1im@)+Nl(f$9D4zX#jIGwH3PZ(>LE3^5tz#Zd+~ z8RD!2FacCRXR-7U5)wCtAiRToJ~9)V13+=Fyf<|r4U=hFhkB79~o%*um&;eNhNlxmiaJVu(fL9?w#|YrH zep0R|Gw}tXEvH>{clGnD}j2l1<--_-W0{5Ux|E&8P zyQw*t2Y?3X3HE%}RGVS^6s#l^e>ZE1)%<+&9!7;?05yXJB*k;nY}Jd2vLQgFsuqz8 ze2jJ(Jb{M*g~2s+Z=p-6q4lE)H%P4{(V0-=>ei$?p})LleUIN|Sox3dKEDo=7eul`kW%?9dbP#gNnm%f&sd-_Gx zXefJpCCK429ravH5kH&??}OL%Ady-87`Wq+tEU+zKO3~^)-hr~jwV^>ooPfiC9ixf zPn*kPYoVKI8gFxPZ2@#Iw&+dAiJC$c69gY7DIPH=dhb+Fc`3x#Ng=OgmH;<{`5ln zKmYeX#uCJR>ELw}3@EI)2f&^^cNRdrH$a_3>5{gMC}%PKlWC~E$pYa34P4`%Za;LE z8NwLLwb!YwT4BS!YlqWs{`N1W1J{nFfBw(^Rl0C?me0jXrvWiq84wJS*BS$uRxuiY znKT5a9(f>9x@MaBj3J{&QUu)W&pN#~#o`JkNKs-0<|;34f< z(p95icZn#clO`8H4KT@pt2r!bk1n&J2l1?Hmfj&DTS=GT^Bjv9ueok5O<}5S3LC~l z#9Luw6n;sQ_PxexJW?8P5<`Ce34C2Fnip&7o#`o*MtI}!*BtFy%j!hPww+VtT7e^L z)4Y6D-8C+32BYse#EC);79#{+Xhm>Z>|sHz699Ib8(1a`@jM%of=WFi!UPyLO@VL* zj?!#?9giTnDpkA_wNUtViB${4ZF@X@NwQh5t-)KLg1Q`mUoDdeDxDZ?bvte{ReHiXfuRtg(^j(n2e{DnzF@)(MSRV1ePtYOcY zK1QIujDGMi7AD3CGc$88boxhGgg1nGing)G2J^z+RTeW23|c|Pmx<)!)pFw&PPkSM zao6~%*d2Wp@r={;XeLBHhO#UkG6M&AFHj$bwiR^g&z`F5kW6K_zx9 z$0l;95AE@FuG4$BiF9^@WDaZ~A@0SdQk%WPY@^>6+VeK%+Fw$A)7pclcCy{5>FU9I z-6P4IV6OmOGK3bl*0tLeQ@UV9>B{`lY&vu9L^^b6f4cvH_d7m?nuLRbpRu(%F~L^5 z#r|utI-!4K#lKlbFE3r3Q8vr&VD3ZvIhdyQOtB->o#}n=%_iB-vT(+{Ofz-u_r&ne9IITpdeo`OE}Q)gUdWFvoMtZt6^h$L zR?=tRU_OFjo+*!Q-N|^Ug^;59_Vz*N&*g2GpM3J^^y+J`r5P;KtRWTFrm*S6WRb*z z2FZ~44u`o0(4fs}O6{|sdkhOVQ(<+&O*#bFzGHqQiv^Fg8#o{Rnw!WJzUFsXA6%fl zTYf=8XHvqe_m2@h&7~Z+Ny|B$&LSoM^51?n9e({S3Pm4*n>>`3Xc{F-?b?&h$ZPAw zMmvc%Sj*^&-&S;Tkt3Twwi33Us`2`WOO&nASIe_jiMcs%Hvi3rtL61BxZMTBSE7=8 zw6)eDu`Tm@pVsEp`#tOPbqYFrW!g?ed8YFRAmoKENF31HRPWAJ*-J=;&zwDx?p2z3 z`)%wDwE#F}Tj$wXJ~=M17>LiifR6yfcH#~c{K=PQM+{*N+PoozNH!e9qaq{^@&fDw z2PV^d-g7%Tf48PL-gq+|KY0uq0l+RQBJ>J7M0vz!AxH`R4DMhifCw<$km~023($&4 zJM@!3+nYap;Sx1dRnOSi80MlbVK4P8swpRvpx;4u|NaAUI^md~UyOlVfDkrU9kGp{ zi^MGt#Ho@Deur0pO2gFxx0ks}quqI@o(mw(`Bf$G!kJq?HO z!nBE8mjoNnKkL`F?_YpdNp8$R1S_aNU_D}l6D@frk$n1}V^W9YaZS+6Dfo3^R92B7 zZ&X2a&yrz5BeW?w7W8;u`;#jbXX2V4aSODv`D>`S)Y7fz(eqxg*vJbKJQ~mS{B^6UGkD)0bPd|=jfn_7J-a5^w?`lCtna)oH{wRLc4b{xkKPp75%vzz**7Bf zk*E3ji5bA!R{MXz5gu?zMV)Tsn)y#K!2QQ5dBwqZSf{3uo__Qp04IdVX*)Q@Em!@N8kaB)78F*LYjnKTgO?pVa;^f~ z(i%#ANd%w|{^Z)rH5gA7Ahgs74nm^*1W@AaX&d4?b72~(^L)e$X}sYgAuj$wmv!lv6{}b3RF`qV z(=aw-mzVg`GDrQ*f^*qe!v%8#NQwo-1FHbx0K-n?C<|aX%&ehHtZrxn3|l@<(~eVM zT&usRYnj>ak(=e#V~6^a(M%u;F9X({xh|W|u`3_o&aIZMx6x6g=kz;sbMxtq*AHVt z?dkLk+b*9veToSUDh$-0VM8w%U2!GD_AQMf=a;MbEJ^JLQ#OZY!|i<=;_kO<-VX><`TMfVl$b(Cp&pmbaIg^n?JS^ zww|i-`iM)EUi3Mc?s$ZAGe3E}`TU#lI;Q;t?%0i|Lwg5$0OFoZ=~78O<6MU4_f!4# zO>3V#&2PfinHzL&Jx|B~K_ntdNqmuq(V+`zD?Pp-OluNn&zw%zUw)VePO{q^jq%y&Ono8te)jSu&Vln@=RC1zZSNiBLJ9@+w*UG=CM$0He*PF z^WryoKE#5#h4k9%Z>Dd2{hR4YwsCiI=zBbdNjev$F2n*=`hy`c12y&t15yOxRQOsIavM4nYQTl)mZVjhqel$0Z=5r%R$D7TD>lpd_-?0l%QJL5U#M@rGz1mur zipVr{_w4So`Bz|FIfKq#DX)uBkx3Vigv3wIkp$o?^!fs5(C6Yezc3fuI^WGE8ix)Z z#EsASO4>jcAq{4r8buAjPsmD*wGCVK(ruZV1KkA!;kuF*`r|6#$hx55-gNy9H>Nx9 zx*Z$E%jwv$qkzY0tWrz@R`}csxEV%r6nd5n4$aPpgp^`62JSXeuK1&M(N-UoWY4l? zRS2cqnA}2?sbq12#XZw=7t={Be7tq!?R4P+(s}A-6ti@p(n9&Hqzv9ql&&vA)BdS zVr~gb4s&x0s6!n|kAM5g^z_r;K`rWJye+D@IMG-}ZOE1~9BMCGW3g|sy!j|#INEh1 zkK)_B4)FdAsT#pMrj?2J-FaIqX!Q%1s!Ljgi3vLIM=aLl3z(Cj1*FGI)Rnw+TY2BV zfWx)}U#woo2HxGHmYTEQ$cdEqVHw-o-}~;X>C0dG&uMmMo)%#nqHWNQqnFtQ#9i3h zZ)KFa=#p$VXQDORe78W|+?%xKCf0oJ79+cFHeByie$ae(0r8cnQ3W*}+MAtFmKJo* zv_q7(fYFV$x%t@V>l7JwOS3~ZH77p@s~hZ~@25#uFe?R9mpG)q_@6m*imgq@QDe9# z+zqevSP6^j77i{hesgCiKV663WetQyukAB~9KCpzd!1hH>D2uks;ML;g{JQsYC-W$ zH(ig~#(lUCrNeK(nJ%7pwRJTZFj&TbCC#L*N{5E_O|RrsEAIB)T8?tA%gLOZrzJ0- zGyoRN_EWnca0IEMpht-IYXhw(G7?Rpq!oKrEIDUS%+xCBTFO zH2S`2bJsIaEZ5+vS5kEHFeGpRIIV_Rz@d;dt_`EMFt>tL0fCO6>VXqT=G`?=`s2?l ziHJMBxyOu4G9~h$40{j`II-}-7 zQd&v*;dubcNP|Gp_5hL%phlgrNX}z{uRrI{&7c}_BzBk!)reE4DKoaYHL=EoK;Y}U z>^tDQ&Qr8=`_{x{R#@7ei7(Ak_IFa`7-6%)2V-oS&4qh^HKT`-vENJIC{=HDw^vt8tzdWFp)u8!Y!y0=c% zE+F1I&o+z^18!TxjD}5pRb5uct6fyL!zS51Znsz)xJNtho@dwCyoA1of-4UPBR>;{ zBTm}GHEGo&PM$oT4j#IeMRxauBv{bkV0VcX{+8FB?6O|3prH(y#3mgFrYLjyOx@s* z`G#j)^B~ME`DQZAUH`Ro%{BYehadV-y5Z0b>DAX?!AisVNHo;szj8%*K64`>o^ZFX~lkQv6q$_`Jm<=$GdmkDPudubfjB_2ubGXTTSJ1*{n< z?p5pY$UMrw`VoK!zfK6YrOkF6Gzv-uS(<4BgwD?{p<_IUPOt6D35KVj&33|MkiXpS z66zLg0HPh@P>T@23jUND3Whb01`uX*1kDvLVXAP&jS>L03AKoX+C<(&Ly2$|H*|*$ z18vY03+612fX(s=fRzyQ*DPas@>=~_pN+E$YC3%#2jWNY%oJ(s^E@@ui6bj1lEK-fMcB*5~7_%R6x zesCLAYklT2}r?z+H~D@H#GV_E!V_Ob@V+$ zEWBJ`*TA{CSyIsUx?4drVWF({k2cA|3$HCld8}yc8qP>H-p5yma5MF|!Q06}maKpK zn@^xx@;D$E!NWZ57eGAV*D(&kd^s{bCE?=Q>AIOP&3MgpCA$=|>3?PK`pu7cg`GVJ zTvvyUis=z)o4u@S`I>pVmGX8E8$~bWC)m!{TYpSyp}W5%(k>wGlX1PGwfA+s!nXRA zR@mx##h&7w=r2#pX!Lkl1V^0t8AryaSj2;astTu0ok|Zs{7?)gYY8M^hYg5x^9j`_ zl)TuvlmXKPOr~s@I0C#BI;-m@0hyASDK_x9_14?dU3cHbf~DDX>JN~tu zN4V+BBn!|Op>gxiqwuN`yz-%CawpIQoQ9eLzzc8-E{bh%UC8C83<5ZTah_0Q{s=-Y zU07mc4yZK_Kv(_cFr|jg+5p0sPg}vJaOesv@kLS``5+-yFE~DF8&5zSVAy-g8-SQ= z0N`ATTpHX|o2dRW^>zQ^volw}5SdfdS2Ue$aiP>-o|rK?VDX#%z#8RL`mWScm4}NL zX42s|kFcxUchmFFy_nu)QP?Fu_rr3+S#N#~@hcBC>!iTkY!8CrILftptX-ncYtP+p zp`Y|@>e~BuBaREx=hG4B5};pm&7rVnBR%#P0`FzkM+nvjh9+Zs`Mvx0h23@O3o5Jj z*fr1YWOt3Chix^EMt@kSU zd-R*%=Hk`Mmdj?kUsdq4`DMH6Ks6ipPbwi z5@gHer+%$Ys6G)#&LH3W+wyEab(-}o&)yfhj0lbZ*!kIYYR?#;_&w?V`|k}^kGGE= zPM6r>PB1(+=5AnE2w-6hLv!det9l{8xmpf8-_|qpT7E0($(>d$3@ni=>Sk*ePl?+} zq{eSD88~(PGz*g!W5CYl=O{Ci0YQLs0VU6tUGNeTb3qOn32O3WB}-KM;*A7%J@o5z zLR08=TOXb!jU2WyFoko~7NU*h;iCL{l~0*h&AQF*HY*drJ@hP9VF1hmHirRg0%hCM z5`3FR9c6xY4qmL$md0WsqtZ3SFs+Es)ci* zCoCxD90U*x4g(BpE>K`xfN)#|k5DU}oKcRTSwP(6oE2Y6DZBOC-lKbp`KuA_qiR1+ zYHdroj&d6dKc(iJg4!h(x*j`vg3UBuNl!oZo%AA`YP^kfcxHN*xT;dwE}Lmu_G*q6 z`B;C=qdRSlcG3*oB{uMYF+Q|Hpt&peyXv$4w^@4J)9_7q?pRjVLq zE$zML2GlDKgldKFR6clbtdm}CzdPAoXe`dTO*i_)Xb-ve=VlkuH@^1G*r`t)blY_R z4(kJ*-FXg;?>KR<8O^Ot$FtM)Y|s9lE&uB zXjSigy=|o&+Z%Ql5N|I(Beh=rUhnLh9aQgd+vFSX#MbkS?p8^a9jdGU-GwdSSEfsA z2rl+oW)O2AbMewEue=hvnIHR+2N?jDkzOoRI(8{dPhSL_p(_dSDw0Z5ichO#5mLCa z*sbGxTpXb(=rXo$P-gx}@}zYPJIVXd1$^k?A7b;6>luJwh|i!)e7n9le%gAr?wMckUEb&5Mb*UT+!UD(} z0OPmcdMgHYCoof(r_-F3z%C?iRT6@}IIF=k09OAE90Ch%nZnKfs(}#YErm8>5L3JA zR@!0E3!n*UtDvC*4kdoD&sLk=GZ;1#Kq%*mAgoeLkf(m~HjNBy2ARw)V_Vy8nq8!+ zPB+^tk4~kF=N8d92D|}i*Hn){3M>#ty_9=EFOug_k5KxIibNuu;8|&K*cJv54=Onh zX)%7JElAFi;e5>!Z{I&Dv&~3b#U8Ka!E1*jv^ntW;@u%eyHod=`CT=3D^?p5oX%A7J$pfCd2w3SbH!)$1g_Y2K_RNL!%FC~$x8FXR4j#M~8^!z5wbxz~ zmTnGQdjsnxvZWU%mR5$e+n4mrzget8#3rw_2`fBUofDBq{>@#QM}PS5(u>c1kM57Q zkH9-lxv_+)IrzdSKl$giniLIwwue+8cuh_%p>1-uobMfkB%kj6yo7O!0ds^TA zo9RRyHy>N>dc|uC7;UM=?FaZ_7Z6{8%8dc3uXLLoRA14z8Y$lUt>>A$TRUPqRn;3f z1F%Yf12{t&E4`Kc7G3NhhtZ(gZmDq3rPOd$#XHxJgdfCY_)TTbl1w$l5thu2vW*lzjmTCOL0|?aMO;3 zRct`BfGhsw6@%lfX4{S)O)tLiQhNQ3Hvo8ZOd3XG7d$5?A`!v1lbX<14rw1m6vX9q zt7l8OHvbUw9O}c4+<{2Yp09PD6+U^jUGlX;=__%vuBBy?SH!6#X(4SiO#_Et1v=S8 zLP_jEO!x$3mNov7y5}@kkO*I5(cLs=+7{U&cNpss7+0Z9yE7vyLAb4`B7symB*3Yu zhT-NKf@aOD37Q4&L)@!RtW-DyU_mvs31$Sv0QCac^SxBdhbAv#klsX{1n;6i7HW%U zk9b+lNeM3euc|{p=sT?WG(Y3N_WB#?Ti=P`hrx?>a>9@b|n%#77Ka9~lf;Y{_o_F%S>{2Ktwv!bmNXyjuFkimxOO72o z#{2U&i-zw_?|a{ENQSRr{sP$KeQ_chUs`x)LdRbo_c}K4?iN)Q5|4WFp=%D!7g*r= z*MIpXCLM3GI8&Y~Ay(ytb`^bgRY4JdvjiA!vtgyH88-MRu{(A&^WIvPn~PU3TTb72 zn~S?Odb)@132d9yyDotcdaQHp;6QP9zTP^}u|rO~fcOe9y4DA5G4&oTc#A1*DtNrR zThG%MMCnbGlf`I=v&c>WLRt&}Bf}E`bO!+rN*y&xr0wBLGjr+rm;Nr@efK@-mRoNL z{b5xTRF`m(-3p-1Bm~4Rnu&ps!8H1qfjS^A9S_$4HWkRp0imPmKesRgz?)37=nk(U z?J=(f7W<7)j;7mhzbQTX=%+$D`W!lof>qx+W~x(2@1nuNZkOVKwQ7|mA{(DQxDd~b{E-D;>{y(vYP*m z8052bOVtVN9s`o)n}<4+(y-|cHDYc26Q!p?ImmQ6S94>lRc#cqDr-(ZHX1OYGT6hj zVHc`Ze`TVS2N8f2{&6qMqMZr6_!AfpxgEB&*rW;IB$XAc=qhTb-&V$`VNYq=@Xy)7!&UCDDNTxvU&&g0+5gQ zh_+++xUXkZzi%bA?$h%g%Am{0xpw7Lv=REnlP6B6KmXIeOm7`N7Lzq6U>Oh#qG`|3 zr{v@}&l~gFJhAn3gRY$24*hPP&Q^jp7q4En9Di%PY0b00r`(rqJ?^Iav8I+*002M$ zNklg%T=Pb5-Jm?PaRMH`VapddY5~UtlS0Y7~<3E zxwLQJ-gIehI+76(>+X(rzVHH62O|NAd6lux0j8CHncK=>H>;SPy9A&E)S;uSZnIlV ztM{km?z-y_r2p>se>*+$$Rp`r{)<0M-(~^IrHk`vku7>%nVma5%jhT1T*iE_n{<&S zr*4IwR+?mrhXLycp#o&JyVt|i?n_33vN$S!$9>LG$y6pLUu5PQKx0i*iBEpnQ4j8U_hQB|JG@1b_=Nz{28!;MBrcmWsin)JjtVc)ZN2PzGlKQWj2H=^aztDrKi*A|DI!e@uPf!a1pEe>kI6@ z7fqDV!@OH2p_E-74a0`|Vr@8-ch>6cPO zJE&-EhOwREQ^m^@;QVa7*p1FFD``26&;}rn>R(Laib5|KQjDetPf&ccf@O zX(`QN-DSXqmZTS7SV+SOf^@3hvFZ z7J&K{DEbA=N`^WM?pgtgTzoTD9^_$=2 z)J(s$_Qx4w*(tRhjgHx#YQ1+`=$(1Yq;%sB?j6b@Wxgz7Ey8ldn^P@@tH{nNs#{s4 zx^%(_fBnomK5}Pz`Wwg6|26ej=^y^)!Svocu0u*W#~`{0&=^QFvwS`UXe=?PUUT3& zq_8bH-e(hxZbaVTZIe(w0WIpH|{IGB%Wc z^m89h@4fT)(;xrwpK$&^>E-9oro|byK{i|rR_clRsgZ$8lLL4t(Z(r2lP22)?4g4T z$Y~zIEK{kS`sOgfD|tv|tPVh5jptFK>bYTVD?XbWzkURj0cz{6vPB6}2Fm(GZWSd0 zGQe_VB5?C;9!fzko?A}S=if@-d+{iX5~tD)H{6hJy78uT@P_?q@BWEcP^9EvJ_b)h zeI^g^f*8vbV6$rT(jku==$%|NfCzP2D>igUi6_KmCInz=P*y1rzgRMUtnql4(c^px2E{!i_QxhIo*`Ts5IKDEM_V2*S}KbGWE`0VBx( zgyYk{z&O++xRPIhU);h!UMBL@Dn-yQZ634_$&n2l`C_8AgYZVtM4U(Ptwg9?GXJt~ z6(XF7N!odX3FBq{=d$N;xCTGC;;ero7baL4yiteHsVaesW;IWD`pg-u%AAd_3}?@t z#{`{#*7#V_U@}mvovLngx~3g~w901g+>CK-_CcTR_R8uMJ07lHdoWE-XpzFf z8`y@V!%je;k~{U%mD}Ui?@QjD-1KuG0oX7ANt5QZ?z;1i^jp96o3WY4pZ=$>rLTVJ zx%B4YH%XRHq%1aD0)YET*I`YmPhPBtHvq}cy?I5wX){na4)TZYaI1hz9^DL6jS}IX zpTewIMknE0sBa{M{eIsfA{ypisgNKkDUze`A_5jl4 zJ)x(aKMf01lx{j{$q+Qs6C5ZZrF;U#05t)OYv9=OMfsZ`SuRy9*`d-)n$S(|Qc)&lqA6VhbslyZirfX&un*{nUmvAVDo_*EjD^@B-UKf#mv1c>Udrp+lDq*sm% zukw9%h!DZ^!o=L%gow}(Ct(Dbmc=q~^$SfDn;$1Hz9;#>kLNkQ0!>d}VAG5XfZ(&S zee!v9@Xwt-&%2sU7pmHjzYbZyRrG#}8GJQjd*qU6{hIk2{5J-bWl1elxLJw1mtUSW zE$Dlv`iA8f;4^(ren0_HQ|BwFc-@@7@N++zKL7b2OZR`^hlqoDJ=YlkFzpK#u|A@T z$T|`upVpsXam`7}7FlB_KjWXjaE9wLE4f$F9?Tp0Szk%A9s4j_t75-2-ng0DB{m1mgx1YvJ09G-e zZ5eX`kVJ9w7vwKcE=UdU$AVDzfp+m#5;o@i(F#?RV`bVH0 z_{q{-q*UOCSQO>NL4ZsB&0L&8U-n`;aqMIo`QAv>%>gz*(cIK^*B!!w$F)e5CsB*R zwlh79(nJULECJ2s@i%~R#hRE*IB1)Pl z5!ee{0Z<{Cw1xseO08kI5KeW7P`!{4{Q;Sla+ zCi>pQoxF;pfIq`TMRFDF<_U-S9K@aNsJsj{8ZscB90OIofUwv7H@nnv}z*Qu5S}<;^xzu1#Wa?_GOLw zmoSd$>FF#;h^Okuf>W=%$1VqUIk27syMTB-PcGl3wa2>r^nQR+(8L!b5rANft(51} zAN~735pkto{k8uR{nY&o28*amECdKsGI`y6>#c-IEh$27?*o43HoK13(c3DUk{kMTxRx$y!meWq;PY z-u1`U=i@)t_U=bs|K+@1TlU(Lv$fL73Y1ApqRdPYfyf|o8cfc)!*t&7_tdTTUU$Fu zru$7G2I!i3eebP!>QvoZzdCj5R5d8h<6{hf3=nY~Z#9VI>}JyTt2d|5{`4o(y&t$U zee>(zPXGSj|3-T8rB|^8z*0FI)beq^0(S=@q{RXufU$6i1>|`p@fdWJj^Wi`p|^@I zII5f>4u3gb@x63cB~=d0Sv5tVka;Rx6%;EbRg$G8g;6Xm96fT9w%MC{`g`KI?v0-4 zwRt0xnRS%}c{S~)#en?D$XqLT=?nFRUkv;1X9_b@s{3YwxqVgH=%Sm?wip z$JL4G6esdI!4m+VsWdr~`-O2sx@wbAP_T|Z6LsdHlohy}lg@A^zU=%G;_1*jqN^DLW;Gb{LBui0(%Apl2 zvClo0#->No>Xkj|Lm&D;`pG}@vZU5{#&$D@y6dpd(vEz#sX37U+|lk+}w zpKESjrOJe$>SB;xuaBEsXLz4X;wp=L^*sxzv!3!T`hNK;Q%lWnCfZuh0;t2ud z$eFY0_kZ_~kut7LKlizxVlY5=aMY0#X)Pwx#@G;d>hw`0w3A^wSbLv+NQWJWl<)|! z1y|-7BS9H$YA=y_<=4s>jm`m43k!yOys-dq1vW%A)i%yyAw6i=-*VeE=^Ct2-248! z(r^9d@1-yP*Dt00`;G#LvCzP&RQ*UnXRs!q?d2Hwk&e~N3!-w7F;MwjetP>M2u1{j zaTq{&=X80DllF#u48H_Uup>V#+5uW#>D0;W;#sihhA0PGttY6Xm?dx=Z~{;`d31Mr z`Hk1$VOLr+G?dn@Ux%&M4Qcb{El82CNLOsy6!uzGH}R-xH!y`%k*oqJh#siAB*=k0 zpJ@1S54tm(QxO^U6_+Qolc%Una*lN8S5(^P!jt4Y(}+?VW*k=$_j$!6 zO{@A>q&sf9l~t=xq)&X}YK+(5Es%~}RAByqef zO6zLxl3=6e%A2HLEk-TW8>KSifHyY;4FTE&!yXzk!eEpEIcgbw44xA+>9#v>N`L9I$jPDhWPO0#FN-OGUteVCb&AWd`$kefQmkmv?_=EsasQB1~NKi0Im=dTs!3}UB z5E^51 Ap(yq7mz;g~cJTd1CEUXFN=#Tf<9|wS%r=gqq^1roq1YlDL6`m7S%hTmkhn?ycbA$5R3Z2&SwIk zlb3w8t&SZ(5~oOJD-ehC68l-+P9`Vn&GvT>0MIz*yJt1k*Je0o^Mu^@VuJ67 z6Z17|*I<6HzB*J|YJRn=`Bj}9?o=rfm=B*ml@1;{6q4O5Hg8VTVg8Tz0wVK@2DwFN zi9Z?G)3Fo|7*N_@o#x~0!|>Z?Ux%#d4tZ12C zOArR4>-PsYOkw{)p zndq4vWC?IIKcGrS`84F0n>Je&hj{~rUC<(I+eaQ>H=Yb z0vcWB?FZQPARS}O4|nwNiS#F5`gZz*&;L<+{5wyF%7Cf>Gk~VhZ`MYEfFuALz*3+h zD2RRtO@*G)EtNLnwDeG11U2;AhFhYPgmYq-hdI}r&m6D%Lsf3wqw$WE&>gN6&a?ai zriO&c13$EIpk)ntAHXc?DRd?dKj0FBIk_NF3rRjzEz;(xFlmKUi{POZ1994yy2tC+ zuMa(BrON7#t6ymw2?_*vb!n>0#sC&AT5uP9i3$tA2S$MKYu2oj+{7G`eB&WGf(@4X~`rP7m*iGS5xmW>zd1Pebl?Q z@lB$9_Lpy|5BcNa1SW&tEPWRgDg9OT;TRj>PMta#M|q!QLg$1-X<@8xfSPS;fwa=f zdR~?cuW3l*S;q(?a_-|wCMlbrKv`8MCB;E66^_1ddM$bc#a!k&(^{8i&U_-XT3|h< zbW~j6V&(l+UL{AovP9H=LZEF(zns=u4BwY^Ow1no*n{ckfBt_+_r3p)SQ(mR;+-4I zZv?#<3Gy`k%!%(9_TyYxvKZUU$+^N|t=ROrF>juWc*V~Lopf^_{es&8zKLhY#?r1` zyVCC6yW<_W>E@dV$ABeLWBJM-V%kQLFw!cO4{IFp4)Cqw+ok1{6Nk^FfB5(QdHVeC z{4ssmQ~$K`p^f#KG=R!iyo(aBL>H|jkC*6jJJr8$OHAG}dV5N$7h`_viud!q>(4|e ztXWV%>wcOWbwPr9win~BHs&oY^Q;QCv@{nlw%?KFx!2$C9G<_U&EsAOU%ycAI#**V zeYair?vybO+`4GvRxRCW=9hUofyy*GioHk`r?6Ahp9aAT04UKWy!u!F;@={*9ZjG7 zp$Aj{pa*++kj7YEX=c<>5_0tD(b(K`<&{?=)m;&Og)LdK2@rB{S8CGc`y?=PaM6N= zF7%tZ#vqTx&l9t{S@IWL)Ib*>piP_Cr=R(AKbr1;;GXn{|LsfZi(mW`j&j`@G)xpn&`Z(+>Db(FAaXq%e#xh^Yp-GB^e?^Zw)yL-ur#lvj&!QC2B%UV?Tx zM3|#|qod3z{HBR zP6zfMB9AN1O-_k1VBha+m$`WY5I>hYkU@&WUAXxAWe( zEgczFSuTtcZM)DreEim$kgVN^=DVu;L!2@RSfnQgSf6Gk$am10&Ma$GeFNR;3RHOR zzV|)p(?9k@>A?p-g0A}tChDX|;@#Cp0$ zIBuie{08v^&zV<2u|h!yU3;SPqSViZPj2~i%PtZEM=?4@UTBO!5Lx`MNUb&I?gxd)M zM3IJgS~Zv=-_dFbTCiLOHYYpUKVFIS*a^|ZanZD(rx>IbtvtX`%!F4GTyuq)p%JtT=i+OOb&;>kWND=*wj*bQZKFdU8m_sl|SUnlT&q-I5 z1u^n5Px_(=yaB(_7SI>5GI*8BkUWQr=K;98ddl6<9sik1**QMOseQFl4u({ z5%Zn1UETZ^W*TYlF6{O0*s(3$b@v_VV?XdW^uM}Zt$u;RaKTR%N$td zz#I;|y8&^0=NdnDb9l4-a8Wo=z-bv^k(9XHPFb@^WR-#)KRTR#=eNIr)Ob35=CePV zR&hewGyu~hm_6dz0Y{x*LBa0bdjJX(=%8Y+c;!$4sl3_F%}tbLaY~WqI0vl8c5l3| zB0`h;hcUwH74ePdPu*rEEey5{ZdYHwHT{*p`b+7fAOBE#_*+k;FaP1=>ACMdhmPhk zR6PK}j7APyGY(b`(xfYTpCPbg5Om-cpIuVdro6cTX)aY+%1C1VdtgC7hg9?p03ng` z#$hG!#%1Jjcw-X`uz5?#n36XoV|5pe6OXo)?HVdIj@}{3f@|X@9-zFwWQg&sT-OacGo)C5<}eF1=(3kiY+ zz5=7O{FLY_-Bqt$ozRj#gJrEj0Qz86EZM^oAF^cD2?=Gwxv+3SLj{Pp!Gd%pmFkj~ zw!lB$z=n#%v;LK{AloIXZM!wP23{CPH|PDzUO8=be}OL6(l<3!HZ0S3i5E#wt8{gi z6^X9yY`tX5wk_%Y55GVCz$YF|_uh9$+O%aODFK?SK=t%lKV-+#W0^AgU)@!8k_R=H z@8fUuh5TIls-$@DX3>!9Lzk;vwwZm%zIF<`-h23dd757j5^2?gRDbY2vA?(iqhxr} z{t**I`i(%DFqUYdpLg+|*@x|KmgDzD(|78QkDr|&uJ06ef%bU6=C^CDK9}o_TyAWa zhI%w~%Ld=&+hq>4+R!<<;YB@L}F8WU) zu6$?(0Q!}ygO39*l1et^80M*Yx7>O|y7rnK>H8o0XnO3ir_)!y_RaJ(M?oJtcr?Jy zv>>EcDRDIcaPTr8<#qtHSvKliN@uyPoWo2j7gDH-7am(Y)pa&~{39QZ9rs`V#@8{Y zcbcT?{ZiLycjzm%1f;5K3;?OS!e3h-J_bOOC)`HyWvFvd$3s;Gl7y7mxOy@83oyLb zT-cP-_$saCoaD5$3={>6{FD;r?#3x(rp-SmI09*PXU%Vx)5(k{nD9A3mT9YhbL&vA zhkZog5&xontxxVR;g6pxBmrt!e%JJ$eQ6de4*sG}75!0<$jf>fBtN9z{$rV3g9niy ze5|9+aC%kbmowGhcwDV>#Lnr$&qU#J`nn&_C0e<%N|M{c_hGK9imyD)tK>H~-i3M! zb7cU=i5aZISZAs{IW-pVV;5>SLzqw7zT@ijzylvZ4eKMkqc_Kp4<|xk=?2%V4!H#yBE_xw<;jQGTu^J@%_|hqhWPm%@SvMlUU?;KItygGpud~r z(N*M%RnX;fk>RxtI-lrwK>g-d>23WSjnRg}w|{#q1L8K#w;XgaIN(de2ibR{ z_Wt<5kLR;U{d%=TK%A4u&!*q~{XgX6-j)8`FFus6+;&As%v>@aVCm6;X97d^lasTd zJ2^ajI;mHzc`qd}8Gw*&36+`dr|*04gKWBcGX3{I`C5APk;gIDb~uf(QBOeJk19wnl2z3P9QXuC)yjJ0cS-)V zWr^+4I)WRL9PHf$H`_Iib&&hgCx7rk0LL}y3t#wRdgZk@0H>2_sBcXic;YzR&xWI{ zi(P6Lj4cn*p3!FV1mDor1qBG+^eKQ+vO@PYz^zjE+y>?*aJ6MJfJSP~BqN}URWXQb z=pBM2RZYmSM$nokBN4^8EUoLJ?@*v+*PKDM$6Q`5jC`%DWqAz{9v<`@^DrI$=9j^1 z6IW zM03~RU#n+h%v^3JLYUU*<|UZ-*ckId+z`k6u5|6K4h7>W~kErQO3oe=o%fj`n>b~t)I-%4dsw3AaV>(R-k?oQ?V6<{&0BIH8(!X7SEfX2g znwGZ7E_GnkuQOV^*pO9oH9py!0JSVvu3x>LrEtwRp_0IW-QT++z4Y2^Y1f`-(+xLV zA2w_qB$PI)Q)lBU6;V>8zfxom?HEFzv@RK$KWIZitqzU#$RH@_^9_!l;Q|1|a(FLN zQ7Il{AQh~*!Ayzt6t+J@+J~7qttM=|Vr{zZ_8Zd2KmK64^_JVvbzPlKu*-ikbfB%H zV1~Q`MCClA?6<)mX)9oE{*^J7NsGGLF86D)t~Y()L-zoPKbAiDp}UaC?O-W*Z`!?Y zFCwj;v}Wac>d8rdA`Q(q%y$7c;3)ZK*`+{&rC>HDD!KJuwE{t~rrume&VK6Is=mU! zUOw6?me>6}^wZf=ZCW3;7Vf>PVarR=c&P;b_F{W1+&6<|sxV1za&FiN-uOa6L1 zn8kekVVbN5-SF}Y>21)WbXN5WKq)5NW|RQOsdO%Hm!E1k-V-Lx(glr{nfr#DG06vD z8#er6Vwz?uG zp9U9g7=%`y<-UOY5_eu+#e1n&>NA&u^2DTucis98q5G>k$|QQkx7~ho`U}7G3+aFU zU;bkHp-(@EE#Ymf8c|>N!qH>^%CWF=Bd_dnzQ1lhn`UA?MVs5V8aKQe$2Q2!*nE7ZF&L6wyWwda6Ucp094;1T${r8D`dUzlg~c zJj-rA{iPnQuel|bp3WTSw_@v3E>uXSGsc0EC0^&Iqh7x`X-gh0ucoV~Z5i!?26R?l z^n>%IgQCyP%Up;<>80n*(kuAx1;nKv`7LkemeAM>l9#*}1iV8$qJFIy*S?N*KwL4& zHzjU4z+vUObZYoiY(x{72o}|;?3wLLZ|#3JJ@Ux6)AsG#)6F;E50wsBC^?n(}KN zW4`ZvpxgKylY#jOs%Obm5;nM^kIBnvfc{|mnV8sV$DKD)*B?%+2G^#S zUVJqjIduZ|32dd$!Pp!c>sapnyjFDp_ywp1@Oo8GR$hBwekG4AO)Ytn1N|m1BLgNy zm<9b1X!`u`e;zY=C)4(;u3~S*%4lnK$nERC zNB$E8kFi2F?gX1vv#>NpttgXfW39=v-3cw{+Y&Y5s3*x$P$9XfJ0z4pSsG|;y;U3=~I=>83`#F%%GYbJ}a zgb%*UpsHSQQqvjCwEjzY548 zDOB?3bJmSDPX`(RoApt$sdNr0k*3Bp_GCM4*|sKq@WbzADfiyA!ueZZu*tL1g=3(;wx>#punl#m&;FSoOV{0S4Ss9@LoHNx}_xxePsO?#=yeX9D zVJY+nfKire^PW}GX&Rn;DWY%kYeJo+QJ>7o+M5V10dVaLH|Q1K%SUf4VH zkN!sgn4z!DaE*gU=%W@N{geh%&v(jqhKhBg5?~`bWip3Z{QPzCK1`B+g4GC%p21%E zbT=y#>-*Amw{A~A@)O^m{_5ZOAJgal;?Ja8-g9k2XrIQ(lai^PeO!qzKdv50E#1@V zYy177{RNnUWZzO=LZ03DX8*GNO1o1<`wB7qk)*0JbSiPRzp5TFy&^s4pb zz4q!0s7`&0GDnCzP8Df)-W}CEoUk~l@9)}@rYBaUuYcvK^zb)dNW0!VnVx%k7xsjw z(#_Z3nN|*M3dwN-ICr75KQqfpSQ5$ zqTO5N*+}9g1H|!8&aGQ(`WoXkQoEFU^SBHg?_jLfiw-8SQRP}FTeUyEH7@`wiVm7N>%vKBQ0})5f8?u z6b7S7q$gUXaJkY!&t-lkjeanB&Elh}L`Gd$2gdvn74Z52E+63zcr>Rye8@jRR`@pAH>j^W(6#QUNOD7t7tyYCNS~a|R!aw2h+dOuIhe zs<84{JhOdZ0~>yTk4{|FwjrCwEZg1ZB>ugWu1$Hif0JMKG7LKB|L}-cAOaTFW6l7z zAh;^8_WtCyv+vmtLt06cm=wWe+uV0h^@8jtSfu}oIC1`+o?#*d*j%v^yVRT(x8=$W z=>s3UFa6BV{$%>|fBxq%d-%iX2w%%e43&Y^lu27ML~*)08!1MeJ=VismbeW2NQ z(04RXtV=ocS3suVdWuQmo;|zL)6YDacJF!%FsrV6rQSonlWYKWayT%69d%Y!PP2*l zo8NdiJ^JVqSf4lp&}FiZzVK`4;y?A&vuXXh_3V$>67AwU7ON-hJLuxw(Vn;82F>q{ z4o_fyZ!&%N*_YGPPd?9k$a@uF*vXM}@~hsf+$Pp*DbDunl&MQ8r!npl$|Gu>z9pn< zEUw{x&Nr#Szxl1@by{Yp>6%k=Zr@8?-nl7i8sid^xeSP#wnFjr<8kR+$8Fzn!t<$H zr@ZMbodO*P2?x)U9OL}f&bMM2a?RSc9NMrhzC(FOw*!@G4eqdZ33&!7&Dq$%A{gyQ z&u##dMJ_vQ&dPyJ>0~xt#o&wLX$FoNe0Id4Iql&?{weM2XY)%|^T@-}nl&raO*h?` z?!W(mbpHq5pKiS2T6723fE_qOdh9dk-Z~s6)$|sKwa{v zt{nnmnc305NGDyF(gmZM6xg~3Mh*#lu!+VLD3zZzZ)D@VoM&q*D)R!WcCfwjZ(e^R zJXA#JI5~U%(vj@Dz6~=>Xx(%v7*;=h3R`sclfjkUY4f&q>8|(P0r>sF^b5cEbLr1< z#PUZT`~ZE94Yd6kOi!SPt6D@Ky40>Jm}{&ydo6l8L2QwzI#0Mo;?TCfB(a8rzf6zCXEeq zTr`_)jnk*OLcT}Gjvh~c`lsIxyVKWPb2X}9+GDqE^S%iupO!7i&U)%S=n%gQD1M22 zh}RSl=gG0PTAIs!YH4ZCH+JFwBJv@s6lPbhi%8cZ$}%8s!I(?sef~|jBubY7aa(r9 zC$(*i3mMpP+jpGse5mh~H=U&$z*<3=5n|KJQ2!7E$#~kg??4=Ls;RY&8#kqOYuAP3 z*nxC_Q|?rO$eWpv*r1CX2O4mIffgM!2{wRTS_f!BbJovOTBRh#0Ug&VUrm=s}pjB)~Vs=o>e#OgG+i9UIf$hxGV8>ALH$LHdfWBj8|kbOhj=EhhA$6BwJr zpj*8`fs9fqRVV}(4!#oKi{5G85GFx_6YJFoQY_V<;qxn7g+&ng)uIRj1m~o5;=wZZ zrYqI~P<|Tv*M_7|aG?EMCGzS~Dru9~JGSpgcieF+ZPT0H+QrF!BO?KZH6hi7R4`Le zD+rnjup+ET>nfy{W0{6HJmheaM4FO^0rFz$t~OU#V$xFb5|f>0bW2^j3soVL(SPiL znRfb7Ej#;w;WdwVR1G5c8^R;VcqNWkf3!R3>$K^J&DSdw4Xm5>@Hw z$BbcCVVX%-Y+27l>dy3|Kk>=*XMgs`(@*}?r_)0p`@VGjjaTzt02Tu@GpP+V09P2$ z+trjF{m(ErvlV@72wH-J2T?WOOsK4?nQO)aonLhd*&<-~YI&|mz5d240fhJN-OW4B zDhd5sr#jQ@loE1}P*$a7Ae|kZLD%=S^wlqaD;?N-ns?lzpV{Dt>B$6O7TnIDmNmf9 z@ncB7Uw!%Y^wwLu(SKf@uH3pE8Zc|9Vo1F2l*6}CRgkff$@KiQuMmGHe6=pB$7Bg{ zZc5uae|MI^lqzb)lcgs-j}&r#kwARfXhMvc5=+!?L;du*dLHcSSq zZsZXps@I&sF`ykuuF~UfHUJF{VU=O+inL|R73n?ixix+0fxFZFANo+b<(6AmGF}n( zmd~e)zhMSEPT`-tbg z`iSxhJG%TdhxfT(_*o868KhnE#x8-ml5p!b$pplS#~{`ytk)fP-j=T3u_K*ivz@?U z5=n6{;ejm3J@d`>u&n&EymCJ`UK#%Cacl8N5y-~t65YE35Ib3rZQCgNKed^Bw zgnuM`-$Uas7G|(kLLM{5M>=btw-07%ZBbuPT|%AS0){Pg>8|vudL8O{_^$fS zDEalf;YpOLM?Cl3vuWqf*XZx)^j7bICsjHzoy78k)>gcBcMY<+ZaDqvHy=+AfBW%t zq=XJsQA>fE+F zzrvRR@%i!Zyi+fs>Cd|?GU(dc`{QLayb$?aygZz;^CDkpmzV2W<=UBaX|Z(9FT2yW z51qb?z?)WFiB_nV?pwua<(~(yR-p_13y*sHSEEzMZg5UUbAwR~stlmBUDzQ;?{>wi z9`p*|n|}PWKb+ok*DV0rDZuYZhphl*N{f`n>IBqEH{H~=Y11b3cekZAtFf1h&YqIZ zP&;7Ij)8R{N$2p9COA|B5hZ#q+YfA%F9-%O3Kac-%a^Y1k)Tyju4KEvZ$(H@M~5-n zg!+Ld<#z8noSu2=MJ#T7Cq4V@bDVN@kU^Ovl%ZQdpQXa6egHx>8x}g7QG(_3t4Zzb zr2vbdv!S2QdCwZ(w}v=wQj)K;E>VxCyYIU#{lOQ03!B8$jfqGvr+z6VRuXSLw5H%- zt@)^4O@jdtM$SycDTBZMYyTnr)^Gk!8bSBbrQ*CfOj_Ltl;4OLybCE6eF6FlYh(Gk z_P|a3O#-$z1}yqMHZU$ac4I`qMrTDfX0qseixS?na)I4i24?k}984b3H++)+wzt>P z22~lhBb(&vPX8(_JR9%pv24p}Hk3s_hbZ4ICB?nGXRB5X@y_0v?q*fwgQyMNe9QH$ zI;_Q<-^x7q!D@?Y8mc4sJ#aNdQ0nAM;HguMA;1ej*cBh&Zx7|^Qi|#eRde*5#>blf z+#5I06_?{@PAiSoZt=#A>nUf73Edt*@SEwtp#$j*@8{~({irse=ERIz-ChriQQEs= za1Hvslc+naO|QQ4X8PvWA5O2owksy}Ljy{LF&l^3w@`DU@2hh!&rlp8AD-=0eAC7` zUfPYU+F8EuzPr;;f9@0MmfNo5{TV^`_%xH|)kvFh$rjh_-H*^Cbhs9koR}%Fh+H1fHHN ztBV;LTTzV7$Iyibv za2J*ndU}C&9PNzOZMyNs9qGqD^MmQ54?Tdyaz*%0V4GB4E3xzo=^&bc0>d(SY8Gwt z=B;7kNOMdAIrWRPlz6Fa!t3kc6$5Hxi{=kdY4g~)ClS!4gvSrJfY@H((ZUX->I7=e zO+cusgUi8F44Tq5&H*Mn-#ma+>iP70zxVst{oMyRKEPnD#7YRTpBK{2W4^y0%Pjp_B|%jIL9jIB@&q#3hO?8gM%tjD@lg_+xW@jLgT;9-9+sWj}U@;r9gZ%McUdF!2Zo4>crKf(DpS%3EzItA=#d z4MXYYKKE1U!w)Fq`f%PK~-k*Nzr#_pu zZePVvevG3nC6ti1psl-@t5nGH^&vfxgUC}fN7`w>Cwj?jT7-+c>cw-Yu7$D z9iE1EWw|b!I$-C+9LhluS#snCBtZssje!@?!NkFV*t!?VoBY?lYCdUBxvTn~UFqz^ zX_mErEdBbw{rlYezGxA^ftUMeBkP;%5PpK_s9yoS5D!q_`R4BQkN)AW#;JbCk8wmd zG|#YftQ6mTrU3}jC2yrOw}~K$;jf158r#Er>)zV614fo zQxD7a%{=n&w5t^S%ltE9#ZM+YwsSEjXx|969{Rp*Fy(~OY^!gwlb@`(P;4%eyMbbn@pr8GVG@YFF-O=pPGPvZ_a| z5gRHhZIz=&<#x{1Z-77R9{r#G?&^^B^R&rX>EWM8DEG4I(|E4JoIN|5_U_x4UVZhI zbmHXk+PmWWvSP&w)FnnpgNg+G75i{$Xkayy*OU$&IG(=n`9J2U=EJO9jiVkh1Z7U_ zod64rse5hWk=2{xy_rKl?`YBAjtK{o=C0#uacD zsD7RDH4iibZtIon79UvZU8}nTp zJE|k7m8iQrh2G`(^k|lk-x4scK3Y#LhG zi~3AoTD7J>-M|L8Pk-iz(qI0cekuLpFa3P_QB0)0{{#1=ZC7o=ezm}RhDj`KihML% zy~urAkR3$X-vtY?x?%kamwZ2}7LLZ!KTlTcS37k?TkuCov+r$Dp9)iN)G-bgA-4VB ze(!h1es_jr;rHy_gH-r+j$M8pz^f@oClj75=V^t#u{uN4n8+#p9q3zyBzQbM@tyCc zKl;P3q+>^h*+|D^0X<=LeN_*l`j!Tn(E!|61bg9^pK@51%tvLbc(hcgCUG*|3ooNi zwVy+OINl!Bug5qt{&Yy8^EWZtw0Z|EE4Oh~7P&3f#W$|dxY$JJ9j`T=^>@EnA+6Ju z*mKeoz4ZMOzO<|5ox9BDd3Ktvd2Z+Sz0~EMo1&&ME-{(!dX^8l0^p+by=Z~k;M%zN~V+qPhtShKrz3p-a&u$ z#@Y1X$KTJf!ka8a4yqV89c_7TxQFOSxEGr#6nn6d-0lf@(X5c-)0GZ{vT=nM%h?zyrAO=>74YO9% z+p%2riV-Y1?B26CJ@*V|pk8?u$?+bfRQuB5!$;EDGc569_USC9-IV%yD2fAnQRgtv zs2BMK+bK@7Z+V#->2VJdDnRyM`{lo!{?EVr*Vze=)E%C>%iksM>;x|Sq%*)gJP#+2 z_=RM|aO;j_nhDDjk3W;tEq>&Y$611&V3QDbSn-F07*naRC;%2Q>U%pNjmG$t{x?Z!xTKX3ki_I zx(fa=;X_gzy6ptU;V<~dN{Q+Vv@6zgNnu}iB9%`pT(^D=<_}k)Te~h@%~8qkecx^A zz3;gbKzuz?;MGjT1iHLC=>O_MzZ^d&PXcc9GS58WEmY_0EE3MpEjK(SindNw3EzD) zjA#9PDp#KDb8TFa68p-!>QE+seR}ZU4Xj6x9!~rA?@b2}9za)jl$8XK`0=+rES4^cElPKlx~+;Anicln6jj83cz@(({T7FeL zco9C(5wf3Z&{BUGcHq>p1pK03eDvrs^d5JG`KFz(9!LlF9pe72wC}(HBuZz}1k!a+ z$%{=l@WA@?ppUGCP`(Qorc^h9tD2q?5dWRO`@f~X{>#4<)+8L1W1<0vEjK&civc(Q zaRJJuK86`DZ!lDHr%vUrw+^N+e({gfZ~yl1a{TklG?F{;m1a7S6Fu6!q6uJ8b5Ugd zx#!%CQA^r*(^>D%H7>Po%lWo0R}1^N7ih*yXhdR z5{Z-Ix-qu~=8ruAz8XrVrBcas24PMVt)uiBvKU7^Clf?!LO~$n*(SUlnZ}kP7@cshOjy5 ziJse{NrFXr*>3(iaLa(WIn$N_adW=-p>El`TO>JO?`1$-=V1AO)O|XG44nhwB3Xoj z08mm=|M&<>9kjJO>PO9wy33htKg+w~0dcF>4yJqVyE8rX@dwald8^7`YrEh)h$vDAFU2k{qJ0a5ao+hfCq?mQM zD6%{N2dd&qF^j#r7qJs(vSv-&NnVRd6HQ#T$Ab8Dni%lw?8t3kMEsQY8y*tc$OC@H z)&4X~pYP=z=}SW^2GRypTDD&W*xj-v-F(~im_Wli3t(8i`?YJ=pg%iETvT(IOe*lq z5@lBt)H`-U2Jog6MH>_Dd*yrLN=Ce^rLROKf{WaFwLj_EPzfJv&hQeJKgDG~woj!eret$e2+_yW-0(#P=`CAYBqd?aFHZ=vO0G&=M`ujM7oQj-f zqw6cr9p-epXVRN*VnJfe?*fxmI+GSuW?21*?^E!>6&C6BT_jJ<4$k30q0boJ=eR!A z?*Zd~fhBb$8_3iF;gsjbp>1t8|-=iO5}+#V1&mKTLqf8y)f=y3_| zmjUsGXL4(vUih-kBgbVx+=6+_fVc%?_*`BB-UOIw{6@DLgj-UX1M+}-j!4ad9W}!h zgd;Ke7m_A|m0lG?{7P{6PYfh5N@3V^=Z2S=?yO;i6yYpjMVfdqax6mecVB1}j&tMCWyo!88w+MN%9id`0cT3q-X-Oro0wF8gIk zMw4&`wyqvGs^t%yyeL*H%t}=cM!p!-<%b-}dZOHg?lt5HijaJ{sYpO@`qXebfMn~y z{v(_SxC@Ys1bOfNbm-t=HWHmgpK>&$VpFK4XcPAT{mp-ue(Wbc79hdFHrq$Gu!`Oa z5LW1{@dwy{>$&CXP)~mk}oDBEIkYC^X_@- z9(A*Rm5ZVQEG7HmzsT!50uaOh+;4LG7ho9x68?0-9CdhOLdCP7c0Grys3Np|$JVr! zqk^xyZbty$E3e#2pWg`hU6od^9|}NAi{AOl{R4;ZB{hN=WlzMCMlc$SV( z6X2Tlk?p(H03Gi@bzk)#^@~Z#Z=s9tQmye9b^_o3RxWr0F@jtV?Xhp~-Mc6D5ll^t zK%@0w0*Qp&_ez?rhm%bKvG2?PhXjq&M_+sG&Ghn1FQ;dpIE-|eViRKW^tndYSQpb}p4MXsOaZltO>;0dPUZvfPE3!nVUH8+R6&7d zf?`vK3qXuu!E%=YaeOoK`%(BKmuuXrJeP8r-$qMFTYbNmkm5X}wtg?pGvf=J+`d|S zzb|ao&2n2-nP|q9%5PahTxU*n%EoVW4an5vEat9&&c(!SjH51Gc6{i0N%8>cmBE5% z@p63R1N8|6@(Vx9F6$`{oqUs}E(a zkA6BUTZQv9J)`xE5+)z4uRO?p0uvshKKfauSOWJ$1HXtJ666eMkoz)_DiNLpC`^tc zdE%6;GpEN{LOzuaA3BnDzO@$#*v|Aa$J6fI`Fi>{|Nd9gM?ZQ`8au10I;CLNGYD;< zv!bO|f8z*pxCYo!(hWERNN8EYrFJE2n#5B#bL{kNdgS5f(tr9-znQ-BwXcPA+YNY= zT~`8LcFo!y}IJ5_6h=o5n9=*N^{*I2V| z2sMXoocwkj79Xxnsy=A`Y}4iqY|dJNWVk(ky7t#aBLSMPtWGeZ5HtcNweLjp$V zZsM%J@039Jz=8d&j_kse+KI3vVZJ?BLorTNt18ELCTLWmj8=O;VEDxG)9GIix8!*;$}v`c5{V0OSY& z?21=2vjc1ZUYX8}pG*S)Vc$O|s!E@;>V(<(Eg;WjK%CjfUpegmIo|;LL~ig}yrtdL zI?U3N8G9KJH|JTr3%=`@#$7&uxKn<_0NfcvI|sy_65E>2@&b37bJKL^#hcc&HZ7~> z**IX;JkNO(_4f<5R9Dq8z^o&j0|~%G5a9sfArkL>@2%;lf9A94y?5OTs2fK5$$&C2 zfx3kUWjGKkNz;yQ*86j$jKPwRr<;S;t=)i4)tdnkn_@6C%nz*~AeN&cTTmbkF(`sK z1%5?Wvs6i2%XR0+weEuc`s2L2&Ujh|)p};|r4@_umusOrNxgKj3{Jh=&{Lm0n894y zBXay9EZU>)GxyFCOl|slJ^|wKQ-VFg#=U?2ul`;77r*+iQK1-xo~%C6#kChxRsxU- z)G8n=tzNa3_B>b>A9 zd2ktD-q;QlNJg)#mQExS8l3g2^Pvi>H*-VGxwq3y%$&Gs zgk%N5^vvE+ti66tZ9(+%FA9nwb!jK{Q+!upF%Z=27tes`jdy_ zsiz;^UznPYes@KJqoSvKn1BI=IZ;k45>Gw(Li*}gICgp85y}AcvWZm-616%;HR?PF zo`zp=BJOHYLW-Qd=7rYd&AZDv+lQUx<|&Cy5$xw`xc52Uyu{(#Jj9^<4$e(jjWIldX_XHX)*kO%%b zcUod}@C6SZlDntysi)}@^tZnG?etIo$^Vm{eBwEP3_6sqso=_B6hO;i5fvXM3~as%8pvY|(_!|%_*ERp{ndUg!Ap1T z(&Zc*Ik33=W7+~ev1$-N*A195Gfz6DU$qZcb}U~LiW%TDY11qp@2@8$5*QXEcD?l` zo81n^G43L~Y;T$O88F;UBPzU}m^w=K`EU(&ZTkShPPnG0dedvKyv5PTPp9X<`$`%^ z55K>ECG@b7E#}Ux>lg2Ot1372$5v6A`74pnovm4DYrotJTF`2rXp!_C4~VUIO!!-I zpao;j*Spj9@9Z^=Z&_y~cQ)2^R@S9YwG4>c@O&8%w`NYefvPEVhFIZMfV;u8L zN*tBSxs<mpt&*Ph><@w8JwJa=HH z4*7p8(i{AV*a09mgV+GZFg*kS90i07FhSY*=Hc{@|It57U;5Hl<5=9$5p2VHL@j_> z+tjO8uBBa0lUn{(KS}Vu8i2g7ONa{}i4;riKAXU+fLKYKlNcq5IZGwD_6sZ|$2i*8 zFKEtyvhc|XMBGzslt-UY7gcGe{h_QRXy3UHFy~~dm-=Z7dkDSWRjXETSj-w!b5_OX zwhbHBBk>gktM9uujvpT!9K^0Lwr@G)L8)+mKj4mXP{XDQygl|g^-8(V5>sm}&^FBa zLYqLS`YC1Y#I@_ERVho(RzMtdR!o=v;3paS9vbF57Ms+!Eaqk!Oj zVKHKo{Q|k3)nu0ZJx<(wrqK<~`N17{=S0_42m73xPTzcePkQXpr_#juqSc&jts8a}#6(TqQ=RV?qitZ%lm;8BLs~=;*@y3lx?6v@2 z)@QK7^0Y34Vq@{I8sG&et(=2&Yu7qm>ok-dfT7{zP8sb=spr)gt08?oLW?_@-k7#U zwE5l^K{}VmwJ_nPrs*j;hdpBKyZ*_SzK$h|-@$(I)9LIO5@f)M`p0qT1mIXM*M0)p zAVGh9{U`tQEI&gGul&m@12KVIs*>ywa&|sc)320ZW7((Lfgla|TPJ8m1Qr$rgu;@dzCv>x+z`jsg%88cP z)jlNo-WR*v4&H(=CzJl=PdL?pjMoiaIWN5JUlkZOS^>#9#p3_A>!*~^kyCQ3(6pU}#Q`x;cA>EV5!A&qKK-LaY=wF})cAkOdH zLe6yV)0U@p_zpVVG9Yf(9(*|7!EPB4w`tsEK-`8oZGzejiVU@FQnn7dFcM;hJ^+zJ zpa5XjBZhrq$9EUfv(V!NAWyQ{WtwA@*Rq@bmRoP))VKGh_q^wJ?5l1d&MeERN|@BM z1i}I?Cei7%&ROCouy#o|+tYQoxgcoWx^?_m(nhT!fDT~CWn86DljtEknD+DwMVh;d z!P8CwaU(752c-FR5p*qmgcABF-BgS!_gb4Xs{Q?l@dx4PtU82gMOWMk>6(CmHpO@LgUNikcb5~opY)b$8|MAP| zQ=j=5{mFdIN9iwr`ICPXQ$S~cxbiHJ&0sF;$g0KzCPSY$V?gqYERpE){$xOy-=n~> zO0!FmQEAGIaxxX!N=nuXw*1J$;6O&@Z3D zZZfsV?^{l0y{zZx=qM*09!E9eXgYrE1Zoi(1P5KdY))dxCx9>fb)q8kLLyF+`QD-+ zm8Qnlq+PrB0EAykufFm+8}AOsfg2vIA^45ME|~Dx7f6+V@U+{b$y+#SZ0S0c5L&if zZHX?odJB3=pJ#k0SHIWKs_&Jx!&jAAdQ$@#sr*346xtSn?Je4-6Z&|01yzE(Zh*@~sHh;B3-CZ&Rr;{nzrBl-(XP5`bGrMU+tY9U=6^}wdF=7ngxAFh zcXd|)!2GK*ir@VE#%F;KP(si|$I*860HCs0P)>Uf40dC=!BgCH_5-6A4wY*TGg2(~ zlb-yA8u+MBRZHjxw38&e z0@S<)(Ut$(#SbCAo$p^W0(v)l|}%?`U(1!#%uLLz}pk; zCg9C&?PN-|Cjsr?fF~h#hpp>_`}d~jpW?t8Hr?&pw?8aVkQfeP!i-b%dTI06Hvq8q zuox#UwqZXUPT04%5{D3G`Cm{5h*vJS0be0D*&8Ak07kxVn)9b+Jh+&GfR=I)e?~ zqv)*eN_%(j#{?M$@HyIf?V9xfXf|@Osm@cXvMpZ?_cO$z3o5=)r%>SIV5>ySrQBhR z{T~A$A3A&}oj!AlGBsDmK#%_CrF8RovDmUh(q-lN#bvPxjaO&=qEi8V_?ImS1h8S_ zlf2Zzhf+fUiJKnRtX;`b!PllcQFC#F+pDj>l%~d&f?96rKV?A6ghWt{5}vYm<>LeC ztYkO=F&nMuD1CsRDL}Al3@T6b0m?t|Ne+B@&-KiMR6dn9!L8*2Pq_w64QaH`RXABH z1&&Q(#t4w78c#iK=>L+305F5+QkQ_e#-t461FGRi07#-(l>OTcoq|7WZXb_BTYR!! z*`t2NPybRvRQr4UM^%a3d~U2{lF17;J|PG`7IuR*nYMpF`oTwzgoN11LDn;tExcP) z)BfT+7`o%~B_zAr5njcQ!z?fmvh$6->6_npIDP%AkEAzVM$O{rDK^SkChsm2@!=K=REjg%sQnrCtsIIecBJ z7oM;#k5st3SIZQ$Y3sI)=#1W%-v5DnIJjU-NGmJsh5L=G<>Rwa)8*eKnybw}tNU*TzZt5(IJu0CX5I?s7YiT`z`caWdo{`%`zHb#|r za=@e|EKaYp(oLLkF1#bpjBJ*ab<{c9v5{6AE7wy}`bmID}VD> z)Bb%2)2UOZ0X|6Ld$DUg4zLWpR84KMN>K4Y8@cNHX1PC(r6C#8Z_K-n?Y(LTS!oI<=!j5Na`Pv zo&6&B4FhujsOO!tsGjGokj#Y2P>yk~zl-e;jeX8MOJDR}2`v?k{#?Qee@eVs`U31t z$2%kcCOBSwWOSGVG>);_awwfRaV$2>QAhxOf$*djU*L!44OM}foWiy^O{mq0em1}f zY9}#|rj1~q$LR~Ny!2X}i1<2(YmAJbV!x!!`{|5&x;Vqd+#UAmv_OhU2(0x zdhODjcUY%9?J!*pfR)O64Q^G}{N&#N6I{J#2<6A1S^=Pt3h~jJWgvA!&FPcF>B*;_ zPESAmEX%CJY?|AMD#Rv^uN_Qh$Iry(FEDUOF+(y%PE&x}Nx-nuD)ow$VksSVfbGWb z5QknIJ$fV^Ja{m4AhngM<$%?zS7wzCq1bQMLn26>vdRie*Br9qX=_`yUXece@lOMy&H#${B8@)@ zXl4T(yq71vET;=zwR7vU|AK!ojqJ=hQ&}t98{xB)5bjt7$Wp8^ZYAm<29-Bh(WgG*O)H*&Bi1n3b)%%VSSvRC4b|H-WMCq5B80$;`$@&Ebu5f}HxlgZ-;{54+McPra1B_+P(- z-uX9J`8muaiGG1S>nXy*T1l-f+ObE5*MHWbm1`H4+r3rF*8YCUI&-9pzLNP~%fohP zmuIKNQ9#>#YXmI3Y0tNX@#{Vv5*uxp({^5Vor=ZXbw+Dvsi3G$KW$HM)oI#Cx@<$2 z0dd==duKO(yYF6GwwzaBr#$U2-GVPh@mQE&fyq`c17LL(1wi}*TzK!_Y!7udtsGdD zM$U|-?|%1%^dzTM9YL)^wTUfPZVpML$Gc8Ws#`dSm5KqrK^X`rAs$ECrL%hOOn!C#9FS8Q69?z!hKCKao&HgGr{ zJ#jRRaQKSm$zpR>ESfYsMl1J=y5>A0ZM*n$fG=r6j92Kb%8vBrr@wwiD^l=yydZy^bh#Q*CTES- zDl{o)zs-|3^m^&*O43z_5%?M=ke2tSPoGYE_w7NA;SE%kup4~v5b6*kP_7DtfLrw@ zCNfk({;KMvdPBDU&|BG}cEU1~#>XgwLp4^e*pMd9&SFFOsq|m};7jS--}p}2x92Ey zpn}7RmXj04IEhL+rBn|0lCkflvqFv~vSDBb!zlINiZ4Y?xDCn?0Awa02 zYhyIUPmhvEzIOl*6$-B1TBzu8Fn5_8T~=(s-hAhdbnm_QraSJqEo{eT({8F<2o%PV zObTED2Ix1IQ^(}lG)uJsp!it13tkm~kXRC?zm_UIDtgnVO#r;rVb)EFT0f_X2rT%+ zf59@t8o)xm(J4h^IOFLrP}X@B-j@%5csBeg@fy4?z%6A*pIdrX?Z8_GQ%qMvsz}Q) zxv4^Z`8$t&H~q)|_*>~4U;Q>mP#+_HKa-UefFUG`Y_5upa_R-MtRC}} zXm|OQ`-~iml>-2%{O&=*JT=2$flwHMU3(% zw@Y~{W!9hD+%>FY&uus9ou6+#pBau)UaXs&Z|hX*LPv@4JlDAZEgHIY>qp@5z0z{UHwpJI#hdLo8ON3e()ZZR&jM8bbH+cz~si0Nup_` zK{I^&P@DPgvoEI?UwoC5<&FZ9nT&F(oIu$_TzZ+vX1(IvW*&#W(9Lf}SG5AgW zJ906pS$fJSvvm`<%rLh~MJtzc0^usBhOX8!?^XfJDif_ZgMX*(-$C0;#j?so^R{r* zUv`&02mGS7c{ADsUG`N!R|V#O!3Dj=!M2!8__s_BVnB1C5G)jc>~r0PP9X!ZgOMkQ z&8U;e04(@X8uiQ*ucw!u-<__$<_R`<-Nv!b*QRaTx1{y!SEGfo0<&$SNP)*9t*2@g zAXfmZyXi}l879b3>Q>B2DfLyyc>VhI0PZzuBS%ILVXZ>Hfq`rTc@j`4$S`hk1vmys zvz{RxEunAoYnuV12T84QLG5yhh`*M2b z*%#92*qJoM5zI>c`{1<}1VY+e0opoO>;vUj_pRTR7OVT5-(9o(=Jvx{{&U`a(^YBV zL695~o8?!_;ey@7M8Lj9*->J-!q40{RU?`TD-Z(UGiS~O5caBNh;kC71gTo#hQI*X zfLrqpwU~G}sT$?DlB1hqB}P?={yt8V0~DVfi`$$PhPpgvl@+iLy}s&D>=l`q-ciskRx^4D!HSISc(U&_7LYA_G*EMFb; zDlR)HG7!78o_G9LI*i11c+MMehSJFCvGmFdd(xh_4yW}SpG?qxJU}|?CQ>^*LnP|68!6K*q%Q33qPCg zyZ7$&fBoZMOOHPKWSSXQI$TLpNg6CSi=6%9Sr#OksQgOO2&f)fW!lWY5K*orzjoKg z317*x)%7BL{spP>-z$uCA}5H|&i3%>Q(?l)ep>+WB*#ZwAUK`{t~n^A zR{^FRNE~b#Tv>we6QnUXo*kV^2gZ(Jw(U@QicNUicWe$&cGJz*rX4%B0+!dZ!AKKg zE)QoZXQ3p31?7~b-wXx-;}tM=sI;hNDY{YdoKtmwo*P;0#eRq{&7UmUg~*m3C{praZ}FH-p(JFZHf{`9BP z3!Ff9^z6~p-@PKz%mU8xq$cORINk`>+Qm1lgg2{4DW$}j|J>-`Dt#%VirY$R99c_m zt9(k?on6g!MimIu9nOx8VPbG3B*033$I$^+I;<*$mRIb5g$h?PoV1k5o3*elqdmFL zeZHzAiF~%5RC5>^8BYiIA4#vjzAL@)8m1BV9!w{X53@OrzCOK*HrIrr_P~)gBT*(- zGS7KvqgcDny@-VE8y?bP-dwV)0bd^}SP)Gv89El~4bvaj=A^n(w;b_%{WsU3MnwgN zYtPF!%N%$Ya^Rg0h(n^+x@G5m7Uo2(FI>;@j)%ri;lj{rP+Ayp4xje9wocq(dBK}b z%B%*a)&#W)sRoWVF`5OnYo})Mi(lA;t$~r^>owOqKxd z!Bh=j<3FHHm9XMV~&$$S@*4%Po_PuolH+ZyfbaPYHPab<{Q%;x8Dv>zACL) zdm^k*2pR&cI&Nd=f0E}FD+UAL2@ottA(jBznfG{$AI>_-z;J&{8 zv|?ywNR2mc*pSxqTeWf(>DWxfF8u-?!`gzJi9tWt!7%F>^iVy+;>hZd=5errX0C|S z#}5Cg0F$NeRUe6oh4<18KWmc<8Lj_(XCd?bNZU1vr;>T;RXVS(DAGd#F!$5&LON8L z;3mT4Nl({G=mjVv{X2=Zf+L5IvUJY!{OD|2v8o?nHc2b>k)L2X>s8y=WY*9nvD58;#@|EfCh7#d%+Kbmm9oY}3rJ0dmsAPr8}ht)wrn zrhkv81N#qiirgFN4S?|eeTOi`c9e46tO}*Gtd0PH)mzn8OjMm-VEG{c=IZQ~Y6Lx* zrG^%sFf>*yEcLE&f$VJRxe=*f-%WDZM@Z6W7p+YQHe7}tAD^JCi2y1t z8y`G$AONEP+27i=E7H34Ytxo3Tez-ILx5-14;&E0Cb}3La&-a(2}nU20MMC`stJzu zQy01nTms9vU?Fp&@Fy~E`^WeV4z?=YRcfacFN^Ploq#fE6-Zid*)9N+FG?J(kM%fq z^jJE2>@dser_n8iS5ttb{uRWlB*(T9X$zptfYY{Yq{Dm5iii!wRksxos;BZ!SFZV~ z)nC;+3eb&I)~j+gb&m2}O}TShXVq*rrnP&f19gI8aDgy0k zEJbA130gNmx2DuqV%vL&em;xkl@sYZue^|+d+w!>1`nS(OFhutN9`q!Kkv2=+ZS`O znc^rlJn~yses=)k4jVVXK!<7HF3IAPm`Ae(B*k~Fj0Our-x(fC53_ZE zCDtcKzK*{9tqUSM2V|vi*}T+DfQr*dbN6{%>>DGTez!W^aMN|^-h1v&x7>PjTDb~+ zWCs0pYqtX60Ju{~;s72>KedB90I5ol>@Y#KjsA*S7q zq+Pp_s`k7Xsub!VYr$f}dZox4t7D}HBFsF@-{tQ9em4RZz{0Tzp@c3GlAYG zP^^v1wVT#FY8FBp&P+L>piKKRzmQY|e7XVf)^~!XZ^N}iI{ruo6hMc*Pc7M5x}h$i&G`OnA(#ic`K#rt8J5pUyVQKc%hq zU7q7w`0lmdr!^6nL1$hisOJHAC0*`M6Z91TFh3_(u1Fj|$x4P3EldZxdSRKFtS+88 z*a8>8Jgl$4E5dEPe)?qYLt0lsT1z4~&3O`>6BSQ=T#58onlxD`FczdAJAz*D&Nt$q zmG1(Cl?GF69IO#)7N|{_hi2UZ=+h6aX5mo~bbrJe*YxP+W-0oE^jdDiY+UzVmu|;?bAVh7Ie|d)|9{y8X^u z(vGXQg{@m{II*;hg$qqipaUfi^dxlxIVE?N-=p3p z*g=q@nJYIsY424jpHfM6k=L;~PZf)e8#f?PUWYAL0XVz(0YfWRtP1_)$O~Xfb`chJ z^R~D)%g7~4yQ}zP8uM66%>_`>tTa$*A{xX2pu(gRV8ptNBC$Mi;y6}0PH-5X2X`f~ID`7c&4%iAW+qW?-NtWVB8|kTRZX?UAN+!&M zEI@meUv>4uuSkLOmDCaJAAzFa-8kV{vWR?oKyR9l8;2iCi}R_7`D8>TzMkCYe>M#c ztReqtR!uwxy*FriYv-P@5b^3uZ=&jQoc=i+(%^xCwP}crchU`{j@1sY_B$tKg6NpA zQBIg?i`p*#S>`~U1LyUw*0Wi@dxvqLaa4GRRc*OC%N+RLa6oOjW=>2p6*{*W6a;<( zDa;@(QwMgSXCoL_#RI@9Y6+)~Po-~u?TPfYuRfACZ`qKpx%Mig#y12wUbl`N^MGGL z+dyAW0Mv=I=))o<>*ZOU){r6#h$vS8nK$ia;38ZJDJ$v@f)js&Nq-Y8i7Ta>m^cft zJepp8?UksG2Uqk1k_GC5DApjgfx_T$W|sGx&-`0fq8ir+=lh7n{Lm3(n`_ z=0mOl@&sMv3?BtreZsZsS4o3?3fBm7!xO+q z9KoK0s{eA@Dt?C-!CZ}NQ7^O2R9H{jrMMbx;UUk9k5|@1LQI)+Qx(I!i+*`-hI2Q%me+oq?^jAMo3Y}&W?a9D_V<>lAY!2^f$3Ws)~X|tZ5RWWrONB`S* zq?h~z!qb>N%w-71oiw>Bk^KtK-m%x{gO^F|N;kbs$XzD$-N%7tK-{j)+r7ZG(W_ks zZ-cNlbx_um;$<91QSK#US*4u}m>N#c|FQD}Yn7=CJPxYJJ!) zK6YrBo%1iJmtJ@c^J*(N4eyF{%{4o)t9xbIvJEq9s5`7)wVE`jFaUJaEA1y;&y!!f3K^SE78GuZHJuxxHF8(u-ZY|PdL2))g zC)ms#N}4aU6Hp1TKt90}1kwS5l#Z(#JTh`7ENv(qu1z|SCMz}9_N<%dlzI!8RH5;( zhAaVA3T#_9{tWZE@ycUy(1I+QUp;MI;$)igD+#>n*V+lOvjfxxptX&pAv*zIjhvci zrc#~=d^6Zp!fXFgTAFQ9`|Q2y4f4T>ffFRv8*;o1UW=NARqp}(>MXXniu!bE6>50!_f#NipF?V@f0$N|#=*sm7&cckrbe(M!qa z%PX=AsUicaye}KX1eAi2*gO@$0rdyLgX#g8Zku75y1NSrDIjKIbS51cJ%e85ne_V0 zd*iT!E3RCL-tYRfZQItgW$P6o!Ck#ZGi&HoqFYHx046pa1z5@8N6;h1O;S)2)}vS!=_Vnz`d)d5uDor#K*ft@i?43Yu8B+Mc? z1PBkGMAhR={EF_hCH3`}O@|qz2MFt}B+M?S1GdyT)f89gI2T~6q_vP8H}h#FP_Kwn zMQpsc@NmuQIX|xwH($-hpVC&rqx=uh2SdG<=eYnD;7gxy0-(P?^{Mp>32i00jeH^; zenh@GZ`LcKRC%i;-Fb|_TBp?6boL9qysvKHAHzm8NOTDNQB81u7re#dNu-m{_?rWh zvq_}S$Bv&)M~@y)d-v>PQgwh$o=4J=LnqRy6P!vHhuEQhfi2N$>^yjE2btg)M2aUGrg>*qun>;i7E@X+zd0h++v}wX9Zn+&V z2G`&Ac`gIu4x1=TZ}MQ#m_>ZB8$I3z_tMnTIj}W#)~!W4yjvz~npcBorwLop(vWrY z+g8(o=CK=n=ku*mV%HdN{(U>XX`Ml!5Fky^Td_aqjnxPS_(*R6e6|Im>VN>%AV>&k z0Pp}v&w^*hM<>yh-36H0i#o&0NRP2SylH*fiUe8JhO4i+3f`^a zjd@CiyLtpt86*iXvSe33ByyT#sJ-w8V)Y8pdr_psjh3(}tBPWqqvpX{kTx1*a+%Iup)t2_CSD#X(Ul)# zqtk2-L$|v-Z!(Oe{vi`}bxl`(CBO1q0By5pCG*xRhY5V^09m54$^uttD2^Dl`1%D; zp;N0I0g923_n}(jedcl1099RMvSvcOEmxQG0I0^%$k|`u+{a|2o4#!Eo)qV)aigQ- zoD?^VPWs`rd-ooszlYL3fbgkPXTtp5FdH6~;+n3936}3eHp!=sIJ_la`w|rM%%fGa zUl5NAPMY+Ln#Q$GXQq|EvS-OIvd@LINZT&c+%H-r?996jEf=A*bI?dXYZ=2fyYv*b zCg*KLOG_e`1Kv&?C?ne2iKfel7GJpf=OI27KE9pa!#a6Q`Pzz9B|ELc8tdFN(>9*1 z^K6rdpmS8CP2x*mVjE(Dt7YKM19rV;K-}DSAX)yGjub7Sq^pW5NT}D!C1`EDVp(B) z7U}VDI(qnYdTZzYuwC3g(8Hk|E7GPbHl(dvH=|;)HC>4$dE>@S$c;I?0LvBK-GVJq zhoIM`YNft_y6_W}P#Qqp@+F~%O?Q2OqM@NxB*~ydK$iiNz)TJl3Vp~RIRm#`PK9bB zO#v-oE)Ik$02BOS`EPtZWpG&DgaAhxD00D4e4q>=vA7U`-4}a@<%`US>l;NjgL2*B*Ln{*xrT5mKo|6NM>W+Lwuo)Y(Fs6Om`b9YFAJuZ0+zE7gIqItkv5A$Dv3WU@`i<01Qxu3*S z{E;WBWe9>x94iKIg23o#qzuN@n$WCrIp$Z=>AOgGjwJH6l{-h$R>|7>UyMB47T7k> zx1%*htwY*odQk$~($c04FA6csnO+tgSO&zGMKfJ`BK?@O{AR@9)H3wa6MH$77~p9h zp$_T|j%{q!liY9om4O|<3WViB$vDKE#ezbWKd=~3GnK}Nr_(r=0{Z)V({c2V-+Fyt z00ylstYt&q8qB-hc>DIWdF#foAmOtoxM^2dU^;_Uhi*>Y^4Xr?=BpX3TG%NjkWOoA zt*2Ytzr}nOl0%R}0FfYzh_yNw-~x06U=@%O0F6f_o`MVknSfT1rV~^u5q87ZsZ%GB zI*uTL9fb}o9YCYqFmz+%q`KaaR8AtD92sL`0f-tJTm$G@8`ALCUVANl`O9BPfBNvl zsGFQjgS}Wa7!#OblUWHR09LniJ5um3LVFpF{kGzLs9-^XAXC=VW?*wCJb{ue? zCAuY9257LP;?ex8d74|qBaS5mvn`3+{K^Sxt2hyE?m+ZFmYJ5UFPtU+r4FHDKs@OS z8fm`(nMlsPLMrX6WIgMxtFEJjJmOQf?a|9>fGR!a+0U~!i1!{jnD*@6$K-1-zdh&z zA3<`DJ>s+Kv6B}-w;SExylUXaxS^<{R5y3lS?ASXu2a7E9mxK!qLll%&U=pt#5=ed zx66Mn5eG_hU83Tai&^Hth33FAAimHXUC!@a&w;u$>m00@H-Hp4FelYK&v{8As%LS{ zx^o|$P)%7WAq{b~Ew^lR;sh=*}#+?S-uVW+DD4XY0 z-XAAdD{^($XZ-}=*UaX`mW_yTb5T9ptmLl*|`*tnH}z4cIkLWt#6 z0H*U=0KF9}mCNA)Yzd!1N-D6o?aRi=0JmmA)^Q)J=dAPFi?xy!@UI`5xrfS1*8o)OrsYmvizH*UH<>}&csQM z;!5K=_vk{f;n-O3^%re~_wRHBUScC~z_QrJwm=*L3>YI=i~Ep3LLiOi*6jCtS=n9P zQ<*(g>h9^|mDE#JdA~2SE5CmA@+AxUIO;O0W($iEs?3gb<|bWpuC&4yrh6mBwfvSm z^%=GB14H+K;)f&P2si>~K)?y{89-XvBnvT@#L2OJA0{nb%bX&Ckx;4LjE1pSqQ%!i zjwGfR-=hyHjmkD6knZEOk9qNlNG6^W;`|TEiKSeWe^*jRLMBqdtNe|5FWe)j3-;ma?-5ytlw7k8su)ei}_={A|5oznbn3QvB7;M$y&mj(eR#7ooF?FG$GCjE)MeW^>7BhBlLBA=dBczxeHjEuoz(vb)R z=PDcypgYV2AU}~{MsowmihV>}jEGrv^eCD{krR=HsNL0)GZ8^Ki%?nQ5s}%EZ`wz@ z@&NEKeE7kq;uhLCL8R;I)u&``aWy>k%xd`8fB!}J)vx|TcBC>(uvhjXWFp4mTq?V% z%`t)&7zrjxl@1dR{NF-^_x5(fu0AaKvm`1ETVmQ3U;Rt#DZ+(p+@lG-dgYQx^0Bm* zyW!?Xx56vGe>L2=@vd$28xBQ+MGCPcrtV`@nfi|=)D~|k42?A;3u_MA7Tt+ay9hFg zdjdhtREk8=e+5%w6=r0*C%7f3lU*aQc3JH9v+R0BdUqb~YeD9{aQpThd4ISo4f>DahwrtS zi5A1%zWsv=6}i+8o>gItW?ioZ!pgDA79n0$d*Y8EmCaZc+2zO4BWaTp5#O*ymy%eO zpNJfrI5mi9e}zcAjOYko%2fYWZx&R5;`>;{Jls=Q`ap!aR-g5VvC`04evB0X#=IYn zz#<~xgm@8q)nz(Y1Ry!_s88C%O-gA}xQLZZ68e)r)C+>dM}$K5yzzj%rAa;#C;>Vd z#pc3|oSA)YBuF#?B58*)5r{B#svZc62+i8McAb-!8qF&jKk6})0+|sUqumo3#MHv< zh?VBYySuW#)ox*{Kgv$`-?X^uNbz_rfUs%tQ>F3cI%>&j)3^Wt7|KaRK~(W#eNiS| zTRR|~g!4KcuMB%p8_ZcB+dM=vr?&TqV2>O@{6)x27D&*lHgmWgKGH@GZ(M&TeDMCq z;m&vWM5w7puGLm#_F?7(BGIZ(PHiI&IzxzwlYpk3hABs4$3m_VN7cuMyHeAh)CUj- z!y>g=-KC!zR-lnt^7hzar2$qNGqq5vI^ngxQd-JR;q^1cK#FZEW$jFeEifANhYxl{ zcz0!c{ZIt>p_w-PbpNMtUnUNB@7`0`10%$H+C6SxA0yT>`>+LcI-~XF2ZwE-!R8tI zfHmz{6EQx)p4`kWkPXtUE?1Hb5pu*ciOqdqWlOCvE#CtfB6SR(m05+vc>Oi(NW-T; z&MgsdR;&`MRY9cT$BX(hkzq?ifzeb$3ECYL?S~`a2rLx>PKb-$&4adl^dIn~vnRTq z)%mAsn#{_j&OXuBeDsbAI|F-yh9+QfR5tl&0aFh)^%;jOFku zxYyRI%?4O~l!1GaC!THH8+AcUQxw#q0^8u>HM){eQcNOJtOaY)OaLu0r9zbBXI-RI zFW5y|@q?@!B1dSJ*Hz}@$3L5gfz3@3D3yAAye~3Tm8z3@ZBSn7>bD`<7Nb4HZ|<%Z z$Ds{oN3=uP2SZk^ENOyGBdqqK&Be7F_qN?KRY(iDA+iQh`||Uz!+Y=jEnI)?M)>&V zr_yfTQ;m2%P#*QcrpOt@*vv!>7)%CYyHtQkA9CT*LWHv#Lnq27g9^~B4g*H%kVc2H zm^Ta+ye4~DwAJ7ePGmPL6O1j{&psA2Og6)O7kVEfkWtfH4+D1y-D45-W0707Ove0$ zYc%EiBD#CK+Cq76U*-(EcE+THFf_gQ?>)4ww;{O?v<3G=nKrQXGPb?D(%6!3Umv`1 zFaoH~?42QycIbzL>PK>mSNpMUu(5&3g%QpeXT)4Y7n2!=7x%WHlQY_N-1Jcjag-S^ ztYl=g$XIEtt9MNZ$C_i}f%xr4ph50`54slJ1X^jau?B%Og0 zrDftjIg!h@Uc4@h5GSmOsSv3Wac4ggq#<2p0h!DRHn-%dLY^7E{qjM$`QevpO56km zyIf>J6ON`kE&|k!aU!_~BDoO@w7R)y&hA(s^#P?#7gy@P2SF%~i;2(=Y<{Dn4NMkDTw{#DP zn4a{W4ArwUU&CO1D%t)GhsOI{DXJ|s#CkD((<25D@V?nabIJM8GzRi1#u<~7kUC7N zYW$t9lbog8r*jZ8Do3v$o3l?jbX03I&BhchyNvBV-=v2si@G2sj~b#>GPy1_36^$&sU3 zfGJNT5It$l#@AA|Jbvn_XGFd#4-I5zd;r`3E_O+_nS?)0GNE^Jf^FTyVn`uZ3 zjLse#V@Mks#zgF}0llz&QDtG@`#4;>c+s@VUw!pWxc0lZ!t=jur?2?I}*8}V#&>0CbT^te4{!tZ+lC&*qr}y&dh5Et146Vn#ikecX)=$H z?Jhf-t;!Y`4+YDbxL zXjDVJInqNN0Y~7h2sj}=D^i~B%m^gyS5t;W4w~XTY(5Zxz+SoXw7gXC>RxTu2a^D? zH2D$MVmU43I@ph{)NJcrTYx3XEDi1nY#C)P>Cxz#%`RjQU%Jx#%0BjM`MrJlem(XhbBaG>x>R!A%+3b==t{vcsZ6tw%GGjV(1~8*RGn4rc_svd>kK zQS(e1%9pu?>Q2XCoMM%?`5-T!**RvbHM4`uL|P)SZR|~?a4*ymm`?LuRw@moUvfo~E77z-4f|p=#CU3mZsuYcY{FvQD#A?e z!SWTNLX8NIB0b5(BsJ|E&q?m4CX~iEKo}NH89mGqa0DEI76_!7q(v4_`XmVOaL+!# zvk^~HuOECI-hJmoxlem5 zeDV3W;n71aRFe;3wxnJ;W;~&R;KB`D|$++H%*Ho-) z5zTQQNei1yCujrLMnugvnl^9{rY49qa}ZCf=&e)+xNyzxd2GK;Eca0^-68|=I>%mC znyR}=&4Il&&y`*TmmE|cBjWv-{?fkb9@Lkq(_H3Fw>3pf!8~_N0ykBctFdWFq^2#M zb~rP95*~hb1VCW?vxVb20)hOah*zoEr`w29=OsEXJ)-9x?cJq|^)){YV{PNFvwLg1 z<%DQW@-V_?b$}!GuA*u3o^b#EjtK9rLgeKDf4*h{KoSVBw8PjKt7&M4ZIPF>!kcSs z_q-Cm*X9|2{orPJ_0>1S^;h2xUwwUBzJs@vcOz`9ZtKjWiTrq5fDXy3)_A7sw-B_T zxIRO6jU4mg40$9`42x&oj`>J7yO7{STvHz~-cF39pRN=>R3C}fYx)C5_(3*>uaWqe z*PaI^3-`5eN4*fmG!fJoHk2?HJAZ!-s9dwBFJr03Qd3L$a?(9fxGF3?Z=a~_8OfZ! zcTLTye=imthbH+wtUUrwh}&c4IVOa_)F+Gy$+76gVh+G&8u-H9*wSVaB6Do8fhont zI(D@zwp*3kw76aG=H^z|-nuLzyA^)Cvlm`_^?G>al|O~|-u+v+bLXD4ziZmm;!1dY zfVNkUB;`koZ1hot*odtW-84e_v?CH>5U&&AbWJ$|UuOFuw>y4-2BKT2yz4 z7YS)^*VknOt9#aPneSYg0IaWF3R_$F>pj-4a^Hrx-+Vv3^xJFU=0~4|J3rjjUL5QQ zw;B$yiPfegTiRHIo#u=Pn=Vyk*5-7`B6tnW{Fo;YY~j`9Z?C0mB)DKFThcCBu^s;O z+ut%X-Y^7i_c z-we+grXpJn?imued_ojBBH9EY;6y|i^j8sv-N(axRYslfo_+Se!`pBD#XKfl*tlW~;Y_2c{A|s%t5pY7>2#W_g0**iqfg@>lk3?h-MPyI(s((W^!t3%Hu`B<>-{1L0 zi{#!3&;8en;k|c0l6~;HG{*8FEDs7YIhLo0bz6wWcE-3CLtDJ11$ekB>)Qc~P#USM z&e-KPuK$G;LmEX0?X2rWmQ&+b=oGb*Lz(&lyBr0 zyIY+@wVd&GWMj@)PK#uT_%vlHJ@*vX37JlrXq+-m8J?~ys3Vzb0yG~6ESN}aZ(h_3 zcXp*)4@dhaa+mgb_~Re1g;)M?EqwIhExAsU2Z)mk(jspJ_Km6V$_%>%V9!KC5q9LYpjCqQ zr{>|*zMO#^XtPtB2O>t^NZ(#dQ@^eS+hq-l5i;b){&0!y?z_p?u8w}{XkVQdG#67s zuwQz^l?p698ya%-gStNZt0zvsp#D`!3!0u({nWF3mFua=w0Yor*m*Earfpv8Y$D)< zcsBdRTh#^uCa~NCk7sb_NvcgF<}q!zX@5;x-}m49FnsdKr{Rt3Z-)Q7bt@e1iCi1b zad*0i2Az^7L0#Kzx}Qh9yv-ocZPYvoc8f7)inxu^bUz7vp4Sm@1kQxO%0I3DXO2po zsQ^zjMg$6raZJ-QQ$00Z4KW)(np_ugzYwJ0A})&l4A^?z`d(#%R+H%z-@qx$@DGb^1x7wvEj@mzG1H?l{t znxKdVsZB8+HUR`4%g6A8pY}A@Xj^4zi}4zPp@2BCnM16}gG4;y1PBSv)0PP|(2HC! z1kw+xoLJ42*g~75as_#~Bj5-uF#>7kT4D%X^tnKw@JrvwR)>kNtXZB5RL-T5ID}Zy zBDsr~IguVbDykDNWFkXzUDVi?z6~pyKGZOz%|5EhrC;bJP%qa-3l7jZY@~E zTtYRw8gCiQ&lpwhj{_0k6WK~`ZC?nxd%L!=1`F!eR@b6-*osU8Fx4@9-@abYF)Ik9 zK5f~aQnoZa8jB=jDRU{Vu|Ne*=I2@0wQ-2wMT(qsGa24dz>>MuWO{KX(ep-40Rbn( zQvi89Rg@kJr)#J`CFip}Zg%3Mh7jhsUSoDk1pr?qWzUTe{9VryIf ze5U8|-5#GfsV(%>rtSonIyRCQ!qN*mEq@m*=X2b6tLFuQ?3lE1+#AgEB96Dy5pV>a z5CJE|PYB?AWcP>reDu@2wz3*u@ZhWoijTp0?OkuTBj5-)0?UAa6XIp)WtVYk2tfNz z@+$dqJErcxnAUb6s-^%Q(gIsCgr)FQcHP%;3M<3lv9pdqf3ak%mNGOl-;I^R*iR#> zRb8Ig5pV~dJ@M({iuI==EqY$t$J}OaTyuOQ>@mL(+nLJbxqnZ<7_0ZMw zA;`~mx1;0prdwsoMSWfB6ynXWRhZ?^{^Z+ zK(JPD568rNhrAgV51o4ioDk1_7e1)~&5yn~)%cUIJ-Q&*jA{F_&YGi8U-6xBAz!g- zjSC+dMT9(Zdipgja0HeV0Vl*u+VL*->>)5b+1FcGa*yYHppKMZFDe29-wzFX$Z(x3FRI*=kSo48 zDm^(SQLC5f2+TGDPKal_ehrUYv2JxPwMkGB->!HT zhJ{Bt0yBbu6XF@^2yccXun-7f3p!1bs`_g_CZ(^PWi~?BzQGj=+o| z;DmU_I>ejf2si?czI0BBq!XfbgFJB0RDnr@; P00000NkvXXu0mjfDj|ev diff --git a/docs/source/media/picture-08-13-2024.png b/docs/source/media/picture-08-13-2024.png new file mode 100644 index 0000000000000000000000000000000000000000..02ec7b1a1b5c88aa79fcd561ecd8f50cd2fe9ea0 GIT binary patch literal 390124 zcmZU330%_IySI&%m8F%_Xf9Y*I%TD%=ECAMTGp7-R$6LqQ~u2j zVam!Jx5~-|)Lh9@$jn_pL_t(GK|pbN`QLlrd++Cc`EWkJ!+Fkg4(B;=&hvb~Px=KP z&mG&0w`pl$EQZ8nOT9riZ_s{UK$y`@TEFk2?2U)jeYIs&eL{ zrm`>NXaCp79yZy0diHm9#wL>^;vZ69Q^Zsyy%@V@A7UwE^dvFdTnw~+cBu9z>RJEr z*Z$8>QkJm&Vb{4cs%v+2Mt=eL|B&+K%Wix3U+-xhv0oN1tew1Lxwzv?xBvRuYSNxV z&HFNRw0^u7YvGoWe|NzyE^DFV@hz>tv|lmzIR%2c4KKVr_w|S03C`W!{UF!wbl=R@ z^GhBnZ!TQczViz&_E5LhUpsyWf>ZM|yY{R9zI9+y_w>)Dk=izU6KJDe*crzJZz*yvbQoOX;Qp7p<)I9Jft70>|_IpR! zlD$uk?#P|~?eXgZ`<}cWUf#>opXU$%@Zl(d@SBHxJAX^*Pd$js0nV_&)I+si&#%2* z-ei#^9-$K6*24Z=ou>Z!;TAmMH;+5>^W!5L5${${rO}n!xVt&xO6c>R5TfwdmqR}K zkCt@5t=>>n5)mV;Tk>D&q8*Q#+W&|S{j^=%d_wo$*Hk_Kj{s*apgetZ^T<7FH*lN3 zSLc_5ZBzfcZTDBv#{eF`|W_fVRA6sibY&X(UxBq;U_LI2X^+A;%h%^ZS&HaDWc{67W?vcu5jb)I~#x0+VM);EL7`!U&tol#K!SEh5<4A zwrt+D@28`iL4W;l;?BmYdwW8U{rtnBP~G7TM}FBetUvp4KVsv%dwO3t(UZSS7}swC z+#%R*48HTb1d!>zGbQ!soqH~(#_d1w>4&$;Mt|u{>25l8?ZVN2X&rL6IJ>Rt&fuv- z7Y_fiL6|)Il>F4-f??dQ*<{fv?XO$po0qpAu-*Hzb@a8}tsVBEmOXb5Oq@oXAa3Qi z-RDX-@Dfc%Xh&j94;?(8+ooDx6%yQT- zJgXn8E$|GsAlo``-{Vg6zdmleq2TOx{oM2`_eavoKYqU!U<+;88*6g;-q0<%CYvr(3~*FHTbpE*Wwsw;t~2rex~ln&dG~0Ut&yRu!!FJ{(8vx zzUb{avn z+nj%Y6yXtfXtTjm$BmBO4(*PF$xD+vV~Ec^PV8^UwK!Y;{QdLTiv|Ky!G1v%kw|PN zW?X$mybzsrwPE~PgKn&LEcw&tKanRgOrcL6J3Wf(F6&l)f*=0zp2qvkB}}4cK>fZtA}ew(LLHUy9u-OMBb1l^!+nwa zPi;_{FU%wVpSh>j-4U}xvK@X#CyiN77RGj&7c&!#7me_?C+)IN2E)g1f^IzvZ#&uT z6zPD8_|0z7q1U16WK-k=`>=u+g+e<@xLz@^sI0)RDCGT}W5lB=KVgq9^RDw7d-n8J zk+8JKx4Pf;yt(zx^!3H}+uj~|ee-?djoT4su%>}V+H!GLsdr=VhGLx$pFiCFu<3*F z1AM+mh)&2$a1tI?5EK={vH1|7zw;JTh3k0yb5}a*s(4ta8t5K5H~D?aVe-}tae_Lw zq?g>a<=U-3uOeC-(Ui&r0b*%tcB)92*s%z8@~U&F+g!)NSjsla^mNE5pp!d{-xKM{f%Lwiwkut}i}Z!f>~QSXDSK{_E# z&7YeeHScN8S(UFoUNv4lvud`QuO{n#+>H3)!{+g8QMH))L`B5k$cfJ%YfVpB5q(=R zyZ;0n+|1|F@wEb55<&?z~ z-}d|zr<9f)N%pX*bl*3Ji^d~nFLv%Rzhu6+f7Ghcq0z0&HT*`;w11ZWwOfIvkmE|M+ettT9~l$utjf(DBNJ!Dp2p zZW+6p20beJttD4)3O)4)57(3R8C+EMFRRa#w>`L#2R9fCs|!g1*;nX+tfpV#ouhlS ztqZM{Rrf9*s-6wL{kjpc_|CRR7>wK&bD907-O^yixJ1u+8y)GJ7d$$p2u- z!NZNUtlRhjWr6rUTN(YD{W_|>?m9vGVt4W5w~uX1FeWyup=$RfeS_l1Cl{j=MyV*> z=!}Rj!YWeW+;4fm$-H#xdf9_-OPZU|YKoax!qub5pDRYsjUHx>gaH0vefWYHdJ+_J zR&ONo9b#e<_w*OaN?74VQUQF!~GZ`DW2msd(9G>Upf`Th?RzkDH0$QW6d|MLAV ziEQLHFck6qU*B<6dj=Fk_XkRbS~;;GkVPEGj4zYv&vqrW+PfcLn4 z;Yuq0{dT2clpNn2!@6INeF-=d=o_FOyFRw;vyn6tjf!$x%NVeE zZ^glMWRmznY04kJFC*|7a$BrV z5TfO4OyA;Ux>u%GRes6wx~nnN*cCroNRHBV@x#^l=Fx=S=>RO7+DxA#4N;<15PgXP5YRzXoXpPCi3Y5WYMuXd0Xs`Qx`=sZ?oX>zLaP?a6d>xNaww9|6 z(e@*yxM6HRnc$?@L2<}~ZRCrjC*XekWw%>S<93ryvBIS`zPn9_O;^ZIt54-Lk>AUj z>{Y=cKd-N#@9EIvAp`iy=1WO|YC0!K>Hle%tK2K5x&4^*$GX2VU0TjZ%K0#ELx?2g zb4wBa_IlTn0$~w${nws6MA?}Q8-oxRcWc3KZuq<7hhLgDYejP8=fQ`*%Gd84{$V}6 zj4wJI^TWi1u~r*ZYs;DVJvUGMn!ggM_3MSe6vI10;f<*9wUVL%tJBhbTC6iHqU3K+ z2C&ubMq>857jymlFF!59mqWOlHZHZSZC7jD*ULBj&xLz?YaP+lw`grhjnvwxscp~{ zV@=W0(ti9vOGoqEttlRV|L}jMKhXcy{_pzxe|J3P>wfN>=II+2d*enlG6EiV!kZ`6 z3^f?}hku;E_j%Va`0eA@{)At@aU6Xc@oyC^5ZYB!y?rC@+CKE{ThT~YG}z)ldbn!p z|8BOi*!Le@;-bJ7{@xe%xx-^`>^phf_PDJDWZS-d`#`aOhPz($IP+iXnonShh`2a} ztBnl`g*uM1KMs$D+1R7d)#Z+;qh@`3yXhG^xv=l{GK<^k^eJOH1fZerCFfOza=(y$8Bx?du)v==-;ib z7b4L&Ze8(+ysa^hW)8?n7kkiu^#A`P|1;zN()9lyO(#3&|E>AIB>%7GCFG4*cld41 zlyQ*%*_r=R{@=p?QUuxjd*%Pj68~xD|7g|N8L|yz^WS?0*>>o2XS0^pFIwk3PW^%2 zFy3@G;UF#g7Pe}7ecdDJv$W!8MRfG$M^^^Cv!C;?R&3d~bJs7Ik!NlPjVN2z_(_`g zI@^Z0zFf1uoV2cPsU2XlI>xRsk=~?&T;?arOHa_I9CHe=HQBy5C3j}2t7ai@Jx>Hx zHi&Q1FD1R8WI)M7&+#{McnQok)bgO@%I3hxtXwOeKiL?B$yP@s=Ncq0IrxA(XI{E+ z)}}gSR)G0Y;OnSmYoDmgj?3A-W#;29)KHiC&7!nw?!z$C5Z-gAYY`$vTYJ9?r01^v z0KzYE8FfI+e+UP-U}M)ybc~&Yl4~XhjxP7D4^plOzd0wx@{{f*-DqiB{pYG5^$QxS zBl&riFh9i*=~}Tp7xaA<^ZNO`B<*?w)d>kskfkIKJ!{ZXI*lLvz2O210~LLw-<-n7UXq-#1<@POQyYq;vZn zS*@tVL<#Vve(KB>QQb*L<7Y0?NSmmlx#c5dUX_Ev5cZ1U@s&Na6`%{GW6%gHGMgx> zoz?M#6R^)|xvaR5*YdHbB6C(;z*xLP7YMbWKFqrUw2=|T4x-bt&q0==o5#oHHN5ix z10J(x0ar4mJC~Q=;MXibIyED#2;Dhssi0?ui<(12i7GNF$IY6iyImlI?94&O8p{5@ zk6hHNHz>;P3&Hp!Q*~2ENE?1)@GyD}@29dQY3S;k61Tl&tFdsBJXcxG+UgS)vAdfi zi8gD&$0Qg$J%E2*-74-H^fi#4V-{ei%+#mfdA2;S=GGN4$G;015NqPvk>+P}_{*dT zNl4*c1>2}WCuTHeC^@OKa5*%6N_A+VI=7fh#g^CTHFrpWWX0ARDeDl?6zyR{K zaDZ!F!voAQ^+1D|S_-7h<3?g8@9a_&s)gF*h~Tht6J0j~I^i9gIgWgZLE(ot=}-Q~ zxEd$KbtatgV=sQzW}D>d_3(%U3dY<+e%b%S;`;#IQd1OzJ|q#(CXY;ZGx{`fJ`!L zl&$Z`l7B20pAkRk3hoYSFPsbte%C*42I-BuRm32AkOmtbz>hAaB)_*2XX1O>^Mh}y zB*xnC9nLtKya#GSE7G6R4wQ17A4HieoKjczgg7L@t};u-^`qU(d#s$r`^BG)`5bBC zQi-vyl}<~}>>Ly%+_f1JGBR{Y?a{j3;&^o#M$o>$tSjsxg2HFM#PmX&it83$hj$9D z^nEeX3j{{qk@CWh0-2#vWa8N0(d?B{WAZ_#&NH&G+;N@pn!;pxR`wZ zGv(Y=hPrzIwi95C``VZfqM62-mq5*D4}S=sq(0kjlV z>{4=jXG@eqSC0mCJd=|~4s);$LL;R023$ik*7v^PrK$w!RqVmC=5KAaLr2KeXCWMG zaaW(!r04doFf*4&C6tqprRU~Z@!cHD*ojKjL_~SvqXGbALNeoRHyk(bxY!!tCg>?# zMte&yHG?bTqqC7LsI9E2WvQeX)+Ec@e7hd=s<+JZybZ=v$qmbv`KnZQy;0jkK$TDy zZYUzHO4x|0=0=GJt7p-}R#b&xS&yc+O?FWo`X(cUW{aRM!SaGf%X`SVIo<9Rv4!Cz znCtqAl1gqVUUfR3FmKS)90Hm{dNBZ2%8#-2ZkP+QK)y@FZuL9*ssf?U%npONxXxw^ zxB}c{9~mULl4=t(dzw!pPUU`Zsc2MY4o&fyOqk7z<~U8~_|x>Vt$if?mY4^B%1|;u zG`XicSDUeVI8_XQ2p_pZqh4_+XWVmKzMBcG-s&`9yZi2i3`i~q$kRcCadhw)10D6U zsKJ*p97J-Y5S73sk?8f@4b~KFd>QD1x?_^Wmm9RUCgzKrYL2&>I&Dk1sGs?GA42sWIn}1qXQq&O!P39mA&}@g9IqP{T zAKgi@|3c{IHpr&Ifhfl^#~z6(DD%b{2@gtS>9l0?n7wMRs(5w_i;ys~YJmBZR^p5| zKG9_5F{&t=Z5upj_lxJmqZsdu~ElzFsI)}{V$W+9XiDbiYSAtv$>wpHqN_fIgYTJBMH^;TflzXUop@ zyUxyh5^A40FZQ9&D>k9Fr|N(fq4Yc!O4Utde+?Fn+6OaN^~h#vJs!npNdQn-#J5uY zlc(Qxq|4y6HMxjNi_~i(cuwTLY^X5Kk#o7DMdY5>4EXeXE*>+vgW$ripF6dNeqJm} zS`1dTjjy|ZtQECzPxvJzknD?5m~?fDpy|B7X>v_se}~ynFpwoPi};w*$69P_n011^ zH__Zad!aV4>BYWP)zZV`v_2^dYlx@konN3kH^{fHv_R3r0P+EkzA zJ^R`NSFx>E7O6uedwE=@R1Y@#2H)6>Tpy$chV(WrG^zD2Q<=C#574Wo;y0H7&DcQ{ ztM+UBE#@EWfse%?f;MG=Pp_c|EKLrr?g0cRVjj!>G{^pBP=@alIp@-iC`OdeWtcA2 zBu0d~aT%{Zzl@f76xp;4Zlt*4PV(?FX9yX>ezZ6rc}86|3#U}38LF1nve|ElI& zhj=>$o_rnLLo|1*4(Rf{#N8swRT~%YI8UrVV_02G&1++!@k4vh>nv(FEqh z*kWU3S1Ud6Rv++PbMSvVP>om?Zf$;;YwnMg-jJElSveGF?c`qkJ5aCJ`V<&P^yD@T z-;EHf+Q2Z-+Ihgl6`oF`72;rs!_D(oM=8P1=wPX&))F&!k(+KAFB?Zfq@((49WUy> zJ!dynOV5t?{%wv&s}9y3ch)ynCQqMloKRV|xT4P&@eN_;_xV~Wu9t^x&hB!|PtGv+ zxiH>5}bP`^?vCF0P&9^kO5Uhj_U) zlfRAO(CHwakrxDiSt6*i09hz~Zi^>Kz{TX&QgT82O3o|`%J8%k{*p6Xd(k9=1~*fG zC{u>?dbG^e!EQQ~6ps~t&=v%SEK-Xn;DSAj+HQlu zq4>?M2OhL1=NsPRv!;O8q+6qp8QC`uIZ_ArTETQIiXwHQ#u>PTE!l4HeGa|K-nDRU`jYTG$iJM+YO(L;4*vJzeiNxUwC z+4a63icUg%P%oAu^|uFmBKwo!<1Tol@Ep5HUYui2Ca zcCxHn{Eu5?!tfLgyZ-~ENaPI}e38^?M>gFd+l~3At#5uj;F$|uu-;uiDiHO|Smo>i zuz9`)*puQE$eX5#DtQ))nK;{lt13O?S%P1<&>zyd>er!v2{}5Z{ws-aV-7gDP7-5Dc(sh=$n+v#2B@(N(gK@FccDHz{tYj~zuNEGHQoHIn5U+rVzCAA6(){P0~??~iP_!dwPR-bDv!8+ro_pKT5Tmv zd3vD8{+eh#px-a`{f0X25OZqO|WJj=ol(c2kB(tXB{y*c?&D%46(Oty~3E0^7T zS0+t(n_*j3x8wCVq4Q}9XopMdo5}0U5`D}Bxz`0nUGaI@bZ^=Y&et`xx$Zz0zG>^dGTp}-P^0!O{Z`lJ2Q-2U znaXhe1;r2I`pGKEU|Bfhoc%T7F8V_l(WR=zw=~Nf6Q_Wi%f8JSuk{~b-LdgdcV(Dm ziRVeOjEz1WImWKe@z-r7Hqv3ZE22@S7=5++uyR(H=|INr-n%h>3+qk1z@&H~udKn^ zhmrK6d2TO&GCn|ko7&K`^~n`bE7Pkb%E5p{VkA+{vYwTP?|sMnD>WBnms$XFrha#k zxc!qu?ka)@^9NrH#mmI8mdrg*Mo@ZGKatDQnVfv}gfD|S#d{1ThN|xKmygb-y7Vvl+`?bQC)+L=x1A>%f)4kO5T) zreCSfaB@bzl5P?kv8FZ+E>{a_J)ywhWdvteKi5HLB`-~G?@eM!So7X-|9}{74O6I+7C_USUJAIOi)xSP)DkVu2F=~18ISN>J3vX z!aLwNt6S>CGP38y?GT|;!`50H#i&PMWZl*>TF}H!s}ei(jJRX)g4MG9Sr&Aq7gaJ& zdBOX;)dn3)t2H8gj1p}#KT6t(1>Ma^*_%G~s^(d%AEvieml1W$#0s764ie>mExZHgqRkBt+%v#=Dq#QJGbVF zG*6`;T6VD(s+8>eCN?J4&(=t0-fHVpJ=}mv4(L{9r{+Fv(K5MmOV9c0lZ{ZKt&-i1 zP3?)8H)H2LP**LlI}rGy3gF*|6hXn5I*t!=ve$kQjC6uen}gRBji0fXK?(NrsC-y2 z`BN=zpu1_Unu{mex7a1^R?Y6dM1`fU+-_wX5t4e{ z<`(o?P*PB0mjn#kAubA4*H=~a?|BDZS(ggYt^0VG%Jj%9AnwkFqIC< z-JH3aK9P>zt9F_iS+t_OPiRy#v1>QpAh*A~QTq!*nOHuPd>6{<89vRMclHA()K7Y^ zk|V5IwyL``{aDt>H0qe*Z*q`euRVkbOMTDixUB~)(6#Er5ApmdpWETyRH6AXVjg2# zk|)X$%P$O`^P{X#t zvSg7}$Wi7d#wX@~2MvClb3t5|U3PZ8j_%)Xi%zdF>sj($g2Ipt+!Z0^c1tF2^{7Z> zGf>t-^%W<)^ps=Wi@_s{-9Vg+C$nIOj0J>Uz5N5lxfYp2_Ovup-Dx3{zFnNioLd8-zZB5 zj{NGnQD4XYI{ZrZ5YK)N%M}Y$qRIi8g2+H8Ew(hd0S0;WA<0xGJt? zZgAyH@3=sNM5%Gb>nhmu^>c+lbeBwbv2_zoMisYfi&=M3*Q*xgefDngN=vq{|lA-z;eup{U0@(GIyzSFz8uleK-k$CrxS*ZNhn?5lWvUJdTD z5)>R7d~$(Lc2af7g53Oj2ny0H!>v?Ks-nRbD|0$bL4vaUGO4_^($lHanq}tZj9TGX zF8CW}s_!bZ5#OLTizWTk3Kt+lF&dLJItR9Dndg?V_Zo}6n_O1)OUi&eYdJ{En3|KZx@J(8{4bXs3q!L%xdJ)%YvBxWW-7GrFFCH7uY`m0o zziy3vx#g|WdR#zSs`MkHsKTT?r=%jCE-}D3;cuw?yzMh=vyNsP`)m#A#{C-N(bN*Gt?9B71Fw<;9wDfB#_ zGtwb{>>CItaD9a?zUx6f&GYAOK@rc?24~15*oz^@mBW)9mLfxD%{#f8g>%H-U+?Nc z%e@vre6USp5F3Y%c-0$2TI;yx_sp?7tB$pJgq8!@1+w!ekv8Q+97i?&<4_+MG_hf#2JO({ZNfxC<}tM+3FUsxNdbB zfQn-0GeW<$9cX*g1?J-Yl#+ArAVuG+9FvCZ87xwtsjRWAri?L&$$>38E}gJaI**3D z)>&N{#mE`$Waz98P5I7qfo1J8(HHObL~gEN>^xQrFacwIe?ZNyZk@kLVteyKq-ADf z+CJ3rc9YC4Zo9JIOd*&$Q6jfXwfZrm$t1CzH1l+_9yLmWnp9*Uw})Iw2$*onFIh>( zJdo{4N-ec2q9ht3`mL;tGYrw{Y6QAR4?gL`l6;*Uc%z=Ngubh~oAE(ip4HmUumh_G zKb`_EPB^#e^TvsO3#73{`rN=$Z_7WZ+A`KnWmk|m;jGO#sJEDBQu_b|0P$GeU^fh? zEX=f*SqNm4?xVQXVnk2NXR*q$Y>v4dQpS6yBX)MiM7X;->@=ed@N)BMngl2nsy z%wTDut%JyWDTwr51{c%Vk@Hh$0s8M4*a7mUdJo1SXpT*zs-%E*KHbJ8{+w_7#6i8h}Zx)PesaU5EuOUV*Z zyxET0G@dif&8Qx^Y{ofB2VGYNK>_(8M5{;M>*cujLnVeWFc>b)+FP+D6+Z#>2hCW) zZ`B}7M|Vl(+(F`Tw27$r2B2wzlx#cvpFN>s~?fX<(Xc}=a7Lv zt8=3G24%29vFUADt3f-;O#?((lF_OW4W;vUzAJD-fS-sSJ8QyIVM@y@cy%E9Hj7 zGq{2cYq#64@%>w&(d*w`PEba3&!YF4 zN)(PH694O*v$SAO6Zo=hGCf=q5kU}|^D}WXh~Y%T$SVqkC8PX>4CE)f^{n<6qGjgv z;ulp`kTg`s!uipN2V(Mhve$N}rH)S6!$FEZzzt;3+Uu6kVqQFDuYFHP z%%5TpmyBzWcXr>pC9J54lD|Yp0Y~7A$F1;cU#_Rw?FA51yUxutYzp1RVTayJ^%NyN zyxUhsST=2OcXLs#b|f=Jx7wJ)E&O}r;2g?`)9L^{Oe|f(_suZQIP}a1JzIKa{X^3r zD(G#U7w?*6mYB{aN%eSE1@Gk(8r-?CA3W=!>LS=P0ELe}?T4LoT;7kDXWYG0hGIHB zJ&AFB&%|rh!I7Czmb|Gy1*4eCUE5ZdkOy0X%B72Dmyv5f(t%@3*cRYQiC-eshMMNr z2*gu0n6~I4Wy}UVSE9U{m_EOkPHzFfl0I7#)-Nclt+P>f?0Oj@Az0x*t?-HlfM0vW zCU#uXfW4*Sh~Va*t3_LLAa7PhgDU294O+F+k2=Bm6OMFi8N?lt0WK5!x$X1GNObJv z;uZYjom`dNR%O$(l(~AG5ax}mp@N1mb2Gb``du&*Zt^30abjnU$QDz@s~dWL<9MGb zE}iqv%JrGL!n7K8*Sin$#fHAWaQKYhs>DyXNnVl;Bb^W(5+f~^j7n=Pe(-KUmxC02`OnJ?81WoFDCnU~hV?m5Qx12ZH zJGr;q{LNj8&~6L%08CtWx_;~4Wt~y(3rGa*N6g7}aWz>D-s}r)(dmztlm)-O#I8s3 zj5G9Ga^=ZYdRIZayGzgMS-qEihfqiuUsixv%iKqNmR-MC0cupd(`ojo|DMg98@rwgT)tQ=XmwJyFZ%vLcTMbq9>$3q}-DXu9I?oUqJJ)Tcfv_$nKW#@2B&X^u9`RX22p{2Sr*k8M zW(VWlh^Fjx>iZziR5ErDsPCrZ9Xods`SryE_Y`*r(Re_;M?wSyPXn6DLb{iVOYoI1 z437d+a%&0@0;y8`s|`9v^VZGJcM}rhr@4l>OBdteOGBS=*(K(nAyEvbUh6!kNEl=-h)o0nDsN# z0Xh0p2DN0&#NZ=?F_Rt6l)X^RF?>`fWKw6jyUg;W4f=JSP2;7^C$P>)G4DNO5nfU; z(qor{Pr%mmL4?(T$av*Uq>Jiu`4A!}8_1U{EeOO;J4nW#D&vDI*{!7BF_{}*G6#xj_*3IZt|SK?;v zP3Dj9=Lyq75=M5x<&*I+9{mVl{zscwht}YdyPi>$@U9>Iu2KUHh^efHUhF>9L8}kY zA?XJ9u*~tMP?s6VKQ(Ucp0!;3rKVuKda~#1hlO96#j^w7tZ@sW`5iL>l!`u|WMva<4fuXnHB%*)XNq(jB&PG|ksOMK0>o5ZT$*Zq?7D($AL=wj7Z`}C z`3Nh0+ZM87uBgFG_$YWt;^(~Tp|JRdJso8(*MRt0G;+FzD5%MlWi0-!OG+lG?uwFl zq7iZgd5UThP7H}NeM zziH<;FZGn=7c@)1SA+oqkf;C|k?*BRyUP+U>|v{9KQ-$6iUxp|B#7uXdmck3m}zBa-uk!qBUBVKs|zSLQ`Y1hTD zzZwmB7Kk+T1q11G=se_o7?P5TlI+RH13&oPbe38;?96AmaKS?XX!Z-b1`79#yNM?)t51ojkfG0$vh zZG9G>ZHbPpM`-dR4l3??KF@hFgXz^AaiqC!+IU`>OVaA%V$B&)p+WUhEAn2U`fl+0 zI}Hp=?TM^bpWhTH%!3dkdhD}9Xo$*l>_+**MtcANy#I;W29D(mvV^Bo0uN6srN;Y1~E{KwMeQMA_miO1`fsD`=I^cMV zqA5grE6x(vR=IQzf&RNJ*8C_#V$fD-CpWwzS#!BW^>{VULYp}!`XMBpY#Xq}Dm$&` zqjh|k;6{Ry3XPhu)MO&mXW3ghLthWH^7oo|C|8H{Fy@3M}<7C@qdBF&JRzC52J2J(9;B(W~GrYB3 zn^?3ZTL=9NW)w1=OM9rhwzv-7El;~T@%f#4=Fi3R?eR%@ZkuwzZT3#9Bud*o{`r+0 z`Ve{k6E#xd*IDLdtSro6bZm74)eo-cF16ey4^YrIp!GPt?+U#$1h55kz zCwO1}#Oak^4BgCGWsI;PBww3NqLp~k-LX+?5w^)K!Xe*Qn zB3%PxcQFH}fh~EKswH59iaA3`Xp)sA_B1Xu%+i2LyprA@ZJ=10oIllcb;xepg9pw# z>XS#zYeKuYxd6QSt(USfdvgf2d@QF?EgX3dI~cMkO*H`MgA<{@gQMPF=`+z)vN~Wn`(Y+nHN_x$E03j)LPP0-WY;GX>tROvs1X}l1N<*>WN{o z`f5N$>gc8geMplvuQE<<#_7r-Hope`Tpo{DY04wqWpvob!A zlxW`U!mnOh0W5AkP5?EC_(34tc4&jxdC>%WpRp=Il#DwPJ9-KdTR~)E+Jh}D)>V}dV_pG(nDt`pT~y8eIeJORk6azmSv^L)Eqz-V`4c`({tEaF+F`EYyD0!u z3&QuBF7hlGzEC7Oq4`4ys(2e22Kvsb-%&kRo#!W<7bjb_hp1EX)^Y>nZo%*5tmO^n z&c*;+=Puz64Mc@|$m<~QHE=3$(F7=6Wq9fB=pj@>hNb;N%RXnRRi=x4H->8C0t$cO z!*EUvn@OLU?8nXZBo`ML*pT$6xMho?WSyuGc`N7cEfz@T6t5bPw1Gmif`wZgz zrf5;E26_Oj)d0eGms&yN=z1N9Zq6pzQ|Nhyi~UzhJ}Aef1p5;}1KN%CH_4e^c>ZuQ zF(`2TpQIJ^=oiOkv)-8OGNrehBS8N3uCIZgGyb~zF7RaP@>1vCeHQvBqumz}oSprF z(=!C?M4tqysK+)OvKl(heQ2_S=n>0RS)Ry==xI>5;#tDZcJ&DIez7}!g<`ClVP{Dj zxQ;QRhz9*fU|-ptcw+cT*gxE{0A8}MCzJ%hs5Opsu4@tDtK#U*We<792&eUw?6;15 zv?paW4_KGFRMWE+HEGgbTtGt3hE8~hBN-E%job}h9rhS=TqRP^9Dv=`kJ3xp!!;n7 zub8y_6H>?wZlXn}@+R|&D-$AC1WD9n_moci>hP3b^m9C7m7J28A}LYvs6dG?g`Q|8 zNJ62&cJn6Gf8Zr)~%HqmN8o)V@|Ht&Bj|Ukhvx*p2g0%-UD!J!f|e z0uV3JsTpwjR3Ed?$3E+gQ?o#b6YLV+e*$13?&)}?xa(I%C7c*qdj5v+S^C^_b!Bq9 z*4?`kz8f>b^SH_4BdqDu=Er4eDiLm?ZU3Yo&D?lsW_NehCY1i_5d~3l&s+65OheyR zO1in^wgi-N%|JZMeA;UDXa&j|vIq66H^W-yCvkOw*bqSvJS3q#jCNZHT!b%a(!eOr zEu+HY`FM^2HW)4Ft2^%Eh;1!RIM2YvS_D{($~ha6(bdRz9uC=42vg?RQh*HpX$keL z3E6D4`AdJuZbniCZUR5r9)Mj6>MV^{J=;&ZD$OFRznLVo%ZOlk!vTsn5Z|gvkHphpsqy@N-|Mma!s>+%P^!Zu&_VeSdXjfm0T-HkGvUMH|;jZGIj!_P59w8j*jeB6-h$E^{euy^mOK3Ad|s=P|9#BG?t@^)MIUv#CgB`1_R7vSqQ`xM9|IvkX@dUb zxlcG}4`+E$`?bxx=s)(w*#yj$q>ecbr*NC*T3b_>t2YzovVe8sS+fE$bxCmo{<)@J zD0FR@Qbd$^mYdBX`H}sDpsNnsi2q6w7 z#qj$pMPsQ%e0lnWLMyrUp`_B$NfSD8jNMJ<-PV`%-@)xb^^ucdHVCwa|CHqx=Mu7A z%B_4P#2|69bQId0sL(^fsuWv&az2Hx0t&oS_!0UG-Bv~r$$aj3$X_k;>=H6x(%q`N za;0p2qM)t_liyCP_yhpk#D^(<)fsdg?egW`_CeHlpmW=Igg3?Wl1rgE24KAU+j^w@ ze8O!g@+C<*8dLRNLvnPNQw;T$f<(rh+n?Wp0LCbH$OZ5vQ#l`+fNJx+rdWTW0X{is?5dP#a;K{QL_bXq$W zAaMmSy{5x$;$S(2w1gN*9pMbOH!M!gl(*9|3RrY2@bZjFM(XNC<3G{3co9~Al1waZxYhiOIY+eY!H^jZto}+0}Akuq4jpyN>!F;vW^|mbR{!4P}#aR zg)%!HET61haK-Ml{s4VXZpj7bUg0GR<~U0&Kf~ykqHovF!^Bf89Um1ga`m2oS%Bnu z=Cp5r*AruY@@nGX#!{c2{uOO>tmJIR`bxm+urOmy;))S~7Yx!aqc%_sMh3>w(=j=t z1&+QN+BVgzcU5v|l;Ix-5*m;i=t9~p^~W7&vcoSq>6s_ZdW7|FS5_fO7){s@JKaw5 zz&-?G`p2%pH?>)sBS$%GcDBcwtW({-7j#LbL+X1EXf=RSX=K!MM5+LBP($KH`78db)N%K%Ri7HiOS$!6o7)A*ErvyYj9T9YFSY7 z3wG018NrtYK!8eJ$_4GvKlyad-(I(&v5K;jHvY?Nbz?j`E_d#y0T{8K%w({~Bt45I zq!p?f7E^K_&`&D-0(5?2lr?fJ5l1dq(*xOIJ|5}uokRBS-vY0j zFeB#??u$X}zWEiAJh)`(NDB&mI*D)i=k$UGaSDwUic(L)?(D%9K%u}XLX(n z@=R~QOd08oH5nmJ98o$U6L4!Luse~TO7g=>nm&nx(X_Ee(0Vk9i5A^#d8nvAASEzWJ{#wxCbcn5lZ}7mnlS3bBMPX`FsQP%9ikzIzBfa&Bx0~n6dBkndXjC zYN}||6WH0kYLy==%8K%982fojk+2=9{D|#! zQaL4-BxYC|M%a%0m>>ILhGFc7jg2v%y}y^s_j2)dGZEVD6<(N+g z4uUPc?SFbroMxpBWef}nmL4c@Ut`MB@&+%>4B&3j#)E)N3!3y577wZ(Nn_Ku*(v)|k7z}yO7Hi%y zbSWP*1)s~kdP3uSYb&E8U7PTOJDk|x8XPqmy#4#p#rGN7>zX~!cml_Ww$kHLMPFFaJkqxeb zu8-Ivt4JzV-<(|?8?Mo4o>_6C>!!M}5^|{gX`QlAA*#;CnJuGOx>fk2DaXLwNJv*& zZyg3?3&$%h;82(8&k^;1FsSH~S{UIp`>V2UKax_$XzpHB@st0QeZWS9KJ|~Wkpx+a z&m@=N)mDZH0CbOZ2gV9{(M3B;Oek1yrQ1e#!L_i%9j{~#W%gZwa>uacfw6$A=@AJw zW{vh6TR~%=3WSVJ8p~Y+ZbO5JsSTwb27aJOS0}o`V2G*}vJ&$AV>n5Pob_<_86NG> zewgfX?N0mA36}B$1NH@8kird@LYG(z_Y?|qGtD} zYSh7GKfeH5*k*dES6A@y#glh?Q=yx51DBgp+sZ(bE*Z$#Cgo(v5_(}oP~FaDansgzYNOO$?SXCp zW0YDiTS-J)y3Fq|Fpug{yR?ASQMDsM+~dvgHN`i$ZSR2`g#5jBb%h<-2riS-Nc9TC zRl2sMt)U!Z&J*5k?Mz7!b`>2|9WAzO;j`M`97{Q7_+%>qe{8h{)R;lp-sVPh<#-Dha%>nTDfwac(x|v9}G}E)mux$eya(R=M%W3rnK2n=)vD0jW+wW zL-?tJB$W$OkzF?Gd{K7Xbm_ApIwCY)7%3`&Pn)Q70nqM`vGf1SXp(<+_d!OFfu@UU z%PVkAy>+-G_d4c0K7UFZ=MI8uj?udqiL(>1RO8~m0!xU#dMIwJF%7yV=n2haO+DZ} zR4=0Hw~}jY>&qKQ8|^F%7R2*q*xL;p3)u_)n6cxS=I2i``y*ZjOS1D9721);{=kM(x)oz^wbB&IVgO*8zq|bdIa=v!^)lMbshaqvX}k6MGONXyhD*tK zi(t1j2h!$%YLS|A0?2$^A#{4mvPMFcLJ2NOK9ko@;PqvNsMV6lB5!TZU(?X;Q}o8s zhIevRW!LpU6Fbr>Ich&$Gv&H|T?T>^zl`efRSX39U@hDLs%S9-aT9G+S>C?-xRiQf z&FUBI9Px_>W2z8!$xzBmn+pB7r9xRU>ctPs+tytvB%8zYiC;cMo>NZ(+ZgEeG`2yO z6|FJejc*%Epe$>g@-i~a4XFP*5$XTP4f|?pvE;>f!E<{3Z+AG(w+_x6g9zw5X^sKm z!1p;gQohhPN{}1~U&CzjliEsjMX4Q{KDN zMY~7`>r|jJWXQsKpL+0S+cBpc5F32YUb@w_5sZ~J-+~KnE1f!m^YQpku|!2@*fD+O z(3P!EJ1cV#@GEZ8(UiT2&eb~$9vlKr*%9I0+m!dj{MuXAT<5--LDg5>u<;Tdo7X3U zpeq*11bG)hZmoTCv8^2kjTFg74ZZDMi%Vv&H#^W2rnm&cvg=uqwxgPwvG%I!u`1nb zh7%HK=D&U+F5N1KCG%ib-(Sm~efUyWYP>|75jMMf0_HbFD<+?-OzWcoupHbiK;vSD z{QI&}(6}4=z2@cpC-i8Gc?#9!Cc30x&Q}BgR{3cFk0McK(e~or3!HVz(%X!aR20I1 zBkJ&B1oLwVIev01>}AFrzI(MyiNZ~onX_U~iMFoD9!d11tyqkxJXenR z0bQu$^B25$buly0HPL+1@btrEdP)-~F33@_a>tZQ2cdF1jBW4 zune9NS9tW8E?acZc#|l41??xl^jEveF&oTIbH56z9a(xt%K z)iFm|peW6piO1jO)bM6h>_Rqx;8hl#!X9J~ahLdMVaDr@&CR2B&6z_RHaYGtJ_m2m z6WtY4T4noe8*|1@dP?9F)*q%NNpGm`2GH{aEzuls@xX6L&*|0BDnpseJFBGKf*1;Z z>Tj+8x6bt$HgA+0(D9?@oz7t0-iJ<{{uS&*%35!;qCZSXogPN&LBZ zT&z+(PQPL&oait4%cxpXGNEr)|XeuL*jV$+}d{=N)7XOiSsv_A&r4V0qTSKjq zI~9zgPPTppjH_QJeROVci;3hSV{+p0Ik#Px^&!fs9a%@bpwBE0(FPm@=Oao=23NEc^#?X@5%QHS@Yl9(#Aor z%FE(Ex!MPOTKf`-0j1ws?-@_lb-&nT-r^~&Plxa9wFK9H`S zh|kcxsbxY3?PGQqq?V!Ax%#X*)z5!SnsxU7|FZz@fF}3iHq}P>%$lk`DHubOVf~in zcr}JNT4XgzD%cP^(>%RBz$Si%GbUUHFGMXmRe*-_Lc!@)AD$}cI(lB%O4na6?KhDk z<#xq@?NzK%rye`@S`?HVPi`IO@3zAv$+q$XgW!XAOHUOqUml`4cHix(MkkWHSoJtT z3+|J+lt$T2Qy@W?R$Tby1yJUDa3z67i#FZCuIY=Pnp_hEpASGN;pZVxwRdEL+`c;y zK@+4;kz}WFQ54C~u=1u=Foqss8Ut1kM!Q}diS-iHCAEPwc$NA>3%rc#D@b+m%wVIdpkIM`zR@IUO2im(P|d9Q*~)Z6MAC#$W_9*DB@MBv}i34~xqN ztvySUw+D}iD*erU83v*)4QJnlR~QSp2f9m-)GownPFhAW%N^ZNE&_D%)_!FxN1}Oz z7x|%b_Asll%bC`M^IXqtJo^;ES$gFPAQHC(NAqJ9aPyeRy~4g>#(QmMUhgVxJIlpT zK`KxN_zGLn65CaSHLXX_?Fd`WI|5anReW@H0B^9h@XObIi!3Su?0WN1Rs3>h+(RV2 z*(92%;aAst_cP~RY2d*`#s;=LDMLra9iEyG`@1Lqc?a%YM+FzSYG7 zjZ-HdVvg8~Bvy`_ygX8(hwW75TBYzcs$aU)h}qsGOtcSaTHB=U0WHut@L}tUQqU+` zwWRr#X;}S4(4*A8*q0Z(AcQ!oG|h2xq7tgy10ff$X1M&O{s+sBbyu(pWi_z}lLJ)| zst4<&B<1;~$5dDkEayf3QumdztyE;VkH)vid!j3~##_5HBEL;qfg+E#EXEe&@#^cV zVWYjy>um)g__a2p+?}rui(2?b8{GJD0}W-mhv@=&Cih^^r|RdTD_3#GKCXBCjFr5j zcrvm3KpRu9^5o(n)1YKqchzyuYJHnsD1xXpaeiRQ^#@cU^{=%A4DLNHc`!bK({Dou z0%%(PNGD0%<5RQHV44y!;u>UCZkH_bUmPS;Up82j8L`o9*D19}H0xUIH=rA4paqypkl3V3zT2!)dy8cPV_!hpgcSt(`nyt%)X^7V;wu`G7+U>}@N# zmB|XS^xNpmV8t%$vQ#o$th}MJuroDDi{B#r0}gwGAmJ%ERu6aYdim+W5=())&$7p5 zp@q=2l8PIF!bQ<^S7&kM2o6=Ppkn(+<{akENzIg%EG)V%Us*J9$x70*40iD8S-OFa zGWL~!@7@ExhwO$xERhB|@)8(mbB=PnFVncW^x}jOls-aSd0JcE=%2E9%zLRQ>m}}& zHP`USipAyb)TR+6aVW_^9ldIU&9f?#Ominba`PO3y7tV;*#rW@zvLvf%p~~^&;yAs zA-F0>>jeAoc8svy!_>({o7(sp$=47M^eEr`B~PvE2c^oC2APVafhnApVNG0vN*lJv z2l_h>uhJLXc2{2>#mESdIlkuFYpD)acd1Yuh|l7XO=pT8_J~ z*W7@3LBkJsbSKeZ5~rTcZ1wVkUTp9Pp|UHANp ziQ2jc+PCzJqnWVDWcLQW9?Uyhs03yX^AN~gGv^;D~_OM6B~2x)e5 zl5p*_H8R|YYwY;}-P8C^XtCk|Qy}xdReO5)K z=v=y>7GGyN;;PL;$sk!$yP!6XaI48<_XJM1s$Nmbc4ya@0>`>(Z6ff1VUz7fB>ouT zjm6XIcWmvcheWdU*|cb{0Q*a2DSvGRLUKjEib)@D9tikD$TLuLr43ATHci}98hrqu z2Y*2zpxX0|aZLRjqEUCZI;$%*{=C_aYTrY*J3*O$W*CXD<;4uds(qHO)!yK2c$!?} z>hNqy^X2KX?2cW9ZMs!mDqkD5FNN?trpui2@zkdEvGx_#u`jxX72^C_akg}}N^Q}& z3RORtf6e1ubKOKSRVL2X;u+HnwZ34kTJI+Ll^e#hN)yxcuxp;g>rz*)1H=7#z$$94|>CDq z?$!x=rV2)IUm5<69sJAiSG{OL8^-lCuxLbrizmbo18|q>KAhiWe?vKd!QT7|R#GvmuRzcLSSIlG%4Vx6s^xO0{L}rO{JyiEd3b9Vp1;Cv^dA zJ1EQ&!}2jcAO`rBa%d4|A>4yCpXX7-`qVK(zQAVn=!IBxL-YB~Xhh@>VlM8UM@EzK zF|dx+lV-;jWb0)Y5Nk2-X{*l!vpB)y516@0jZGy41(>>Tr;Y+Hr~HNtgSToT2Oa|( zj=it&C?lu`wBsEibfX1#ybq1OC%7{wL=oOT z(@-5fR^~p@#d$|i>3<5W-!QVYO!7hWdZwis zfIPqGmk$V0=rg-`ok>bf);1XIa@>6tClAXx)wyx3W#~$Z9WDePmpK|lyyjll(qX&L zQTKOdZ;V@)5B=C=(#@Qu@yzaOB!2Rx72M*lx8T02Tsy8mwKl1wbXPKCI85a1ka*1a z|1kx$P!wHY-t{k6FB<7fC+dc&c5l9+p$qskZE623Uxr{ zFbLy80`@0=z0Fkiy>)bv6`%*IsPLAuyn-Y!+3DrU7n2((GrUe8)>6t{W%cmawsiwI zCRBu@qQ)p@9Ye7i91CDstmCflO63%0`?lRS-r>=8e^d>G_0K8spXEHrq$jrqd$?J1 z%umqM2!CtS!#IXc4YPTf{4q ziO?Ga#&y-@#%iy{gRpM&l?q*`WoQxHP4nQfFZywoMlH5S0MXF%55c=l;hIs>Ab6|* zox#d`9DEj~bF48LVG-!5dtncuU)N?t^E<%gKdx~Gdbo<8;A`EwOz6ziKjdCWD<|)u z;z_VXP&I<2s;i&gl7Mw%w`_%7p$pBK1MmFti@VB@o7}5bNB@`VCFz$=G#=up=N}}G z1(KWJfuV^x@tn4vp0YOA9?v(3b@pLVy1y+<{q?e?sryiS;)6zB%Z+TOwzc0Q9H@WX zzakth_essz$pG7Lusu+J%v8?+SlL*3khrlL-?*tV%VbXl5cRm-X1iL$hK!3P$mKQ{ z&g5?dn?X12?6r_5aU(^eYM@W;Vke*5tEjP!No}}P!ALv~Do6%wGNs{K z>>*xDGv;aTQFZ(2XM};k)eY5V|Lm>O@=`@W!JAG^r8C|m4#fSYL8k^ln^Xu>UeWHdxH zpvAiYJ)>T8_Oq{P2O6A&(1d5um&A;or>Z+}b-F`96y1XJiLSeBWKz4{^Oxx(c;Ue9 z5#Ve|yFJToKf1jOtiC+j{xrX6RmSj8n|n&p=R4pG1EEKL^k(up8{F$Kld=Z(15WC+%c1AXU1* zhBSMPq|_v~*mgO*rFO-LykN%+?P%>$KM^@TY||$CUx3r}`_z-}e$87-$}o~gTwCvq zi)}d2itU1;VN})1U1+hwW~ZlFZTBFl2N$RrhI7R$b2~}G=1caok99$-hn1goft32_ z$Q`c8Y}bYNy<5`}DZS*J7n7{{6aE{e?1$iVWy!Oz*JUsyiC+|_fT0QMhec_a^WZ%P_qZ9o?!Ck}di403D^1}e=qe}S4dqZ0Wu@Zc zQmQ-Fg?3r?G3$u~Q1xAg(`W@zR=C_utMvklcshByRl(x*u(9+N>qF>TsC4Px$~z0S zLbnfi+ZLK9N6wuT_*=5?$WfN1Od0Kfk!nl3kBkC9Mt_j0o{ouFhCHIX_^F(2xrAusQ!kfv(k}w&`s%dMRFuX66i9hB0)>ZCVyJfnpQqHz^D4gG*_!#ilZk z`B0}X<^hs+5067CZ~-y42@Rt@ff7}o{2J^6_N?KwkT1`zZ|o{uX?}Sh1xay{*vPZK zP_eb%cqap2Mz_l#6t`%AML~vtPEdTI5_tz{#Yb9^avLeYOPd2#w+&0?+7(pW5F^o6 zfR*gJfzdEy&~82`cC+ajAo-a9jGQYrZ}%oUuWfpZo5t5;4g!qb%Hi?~{AkPd_QD7T z-_>S$@(8`iRg}`a>w{H|>HiW{e7c`XxG`47mXyGzJgXwrUuEiHGhcguJ#2gYyyO#Io^Q*V+RYqD8J8?gqmF zTj|eXq$|aT{b%cNHaO%$_gXV3lS|W7fRysa3COC2v|Oa=UsF`h*{l0XBoi&wE+F(H z#xid;FvK(1yB|r5y5s8WK0Gp!DA+(CeDrHq&FOYlYo>OLLOm@qqEy|2zB1LTUh12C zZ(H1^9)S0+UA3vka>P!a<G(_Nf2decl5VUs>-0t=CTx3P4jL^u%I!B z20e?f_Yg38wv#J!QtsCcXH2xqv^Aur|@qOSi<@drtM!SXZ zn?w@p1Z!xt1^a`>5E#BjckA)#!x-PxB2RJWDk`DDHxMAnD&9_p%=*!MQlIVmbTkc^vU|Iu z!8gM%5y-gQA^SmWYXwQQSsm?oB?f2GfE8+s#mMYciX~ z?9biaLh^V^Cc-@zhWn}BqN;H-rp#_j&lztS_!DcDv55_39cdCbaG>BU}06VkKi zIU;7$c1z|GE`fM|;2^khoPMT39^{(rL#)hq;*)DG5kKJhBx2PTk0v!aDOJ9oD@8C?4<$djZ?Q@ zz!-k9r;{-cggb1zQvnPCZ|!@{xp;iD^MgY-`d~qkN6axfKtnMx8EVuvyIqJr>z$r_ zua~iMDEiCo)ch#|JVNghkm8tP8CR;(OehuxK?BCWsEuje5F?4<#6jE0BNY4 znF<3*nwQ)F-8D3Whooq&d?A)d)ebhYLz&WYI8EPxHdt(C)$77$*L~>i3GQwyH;oQ> zV`ge>0>_@)u2-4<_)A>`nwm~CG>qy;$4IWvE~fr5V!f%S5j8L145QDG6oKGRzs8EN zI)k`FsK`Ktui({Xl+^Tm&beMiCW0S@i4&wOQ%-WGICG`a3@l%}4odHVR5`CD)RPa( zD6+GVNILrw-BjF%hc`FrwI*&nEHe zc%)=nnpI*yZBgypHGkkT4)=Co>9P5Mp`TR1?w!N;u+@&SJ{GmwzRPgfl@)zBs5qrf zvwKW7WU;~bL~m-?8pybBn?y)MdWgy+Y*?6Mv5uVCts>jTp92BiN_Mi;i#(ePu%z2s zm~_k0z;d*ex)%u(JtEDr9jmltk>U*9RP!!lv0FDw8sVyXL>dIsVG6UYK*MmzJ%8y8te2}C=^;z;YhN7{YxxfF zesMptVS|ou&Mx*YBfLueM7kDOH))w+`Q2CvMA%qMfOn{u@q%$OzKNwjWVLB~{8+D_ zqNs#zZ__xwm_(N(zA0I59=6&P|DyT>^vx-zscmX=&spy3A|vX*dv^cN<&NSPi2mhv zo1#~1u5ZEaU@VRh0GI@G>baq+!@AGl)sh%t^xT7yx@cA8&G{EV7&3+Q2@3~Zs(Y1F z7z-`dV=vivnT)Fgu}3&8VJygTyOJnd{lMMzSr!Bj8<`A$cxr7tsw^$#^CwD_2+l5(VJSqS z&^3Lr%4)uP?cawoaGM<&mnmG!qz-XDea7EvrRTa;xRl9*!V3x%_(d$xn8_b8#LTfl z2sNnfp(oJEt6|h)95`DhIO?Bh8?d|jpW7^RPBC=0AKbIf;&Cb~VNvk(et=$^Vn+8D z9Q*T2DedSJ4}QYs>uJI3+%GUTMDyLez(@x}GPe6E(R|=$4ezcArqAqcVu$IpE*8j% zxRtAzUh31!uTYj#*!jie|GUDF8>#CL^fYSN>G@#^%YCXcULHAc2DbP=3;?v*riWT6 zuyKR=ro7_bA`7R^`;b;bEQvd7V+2vDAm7|G>$`G(i8_Z67aQ06>o=yfjWwG)JSoFp zcX#xQ3-a!Boa<8*AJ_hd!E24_S&RSXL;tZThT0Ztn9VA8oU1$r#}rFwlG4YMg(o_1 zhC7UBjyE%^&xVxv5t7c6>8$xWZq?cvPs~2tQFRT4kN-ME^t%4))*Ioy|1QjQQJ%6r ze5=>?A4=abxSw5)*PkxSCHO7s)AfoW{r1t4J=S|09`w%0K2Ty2Nr$PRp1A? z?B!m+mFN;W_DAQY7`K=j)QWTRvh|T8@06L8H?xiDJJC@ecS7g?KA-Yuk3f@|v6CH9 zinwDPJATHtoGIKjuylSl3ODc6Z-$O563idkxL-K!jWe04O&ZxDO zX`GsSifxfOdlfyt%}mJJ+1Y9{ZG{xLg$8km4Tm!28b+(H>Lg{4_X2HRu3P{&nbPL8 z#y{PXYx3+Vv;55^+v_f)%wFC;d#bzw$;9Ti6K(b(;Z=6D_YA+0`wKpQ^;4O;jIs9o z!P5E7)Jq9BVC!!PGMPeuk=*%zOZBxcPK-kvYiOKYjqWPM@Y$A|uCr!;z4-l1!q0fFN!?}>)~ zgzVgsVs2BvAZF*1s=C$pwUQmOV+VuRH;-~mZX;z0QfF>Z);o-pIBZ%%z6S6YFf*4Y z+QLXbCyfgla9#Ut;-Fx<|(P)L8nMp{&(8(i#DAS9Qh`6` zxHl$3nvBgAN$0^K?R{#zVkQYM*-z@`+zYr_v5xn7-AhyZ+-gR2YYWAne&ABfUa02) zA(Tg;MdiE2&FAijC05C@)qphzwPW-~uyr$`ZoJ-hM|)GQplSK2Q~w2J5@Dhs>37en zFeqMeGBCV-e2X6~U>|f#g#Vj2-LSC@kpt*Sm-yVkg`meZ9L4+saaonDsGo36{=oH6 zlx`l&g0q_2q@!T77obvK_nEJ&!ZUY}>*}daj%KeDkP!#}urb5cHlnTJWCQjEt%ie0 z;UA1kw`C2S%wD19h$mi07>Kh?%|fM^OK;`&u-4XIdbTZ{$$`ZSc1$&9*4vj2I2S1N zKhFBTR1C<*eAxdzE8o0vuzFZ5UC`iC`}_?_(2(2`#pV)-aW~i-A6`-(Q4e&Y&sL$j z_M5W7jrx$8w=Z~uuQ$pW^S|fJ}~)j#q zL&S0?%J>^5OmPI?rmAs5b%C7VcD(40S5 zi{QF+U;H4g@!qMNc7Ee>IIus&V*K^R&lKn#9Sl3A>yCviUK2QD3vgqyFxd8 zZ_b88+;}U0ufOee^6VCHyiiyS@PlVD&4eUhHTG?E2_K78%i;3r?M8l_AxfOZOu8co z`OWh0oW8c;3fLj~j~zOiN$ki1veh!s!;a}H&GE(Uc)?K&oJ(Jl?Rt`ORk5D?WIKKg z>J1ya*`m3oZklSpSpr1Q2U&I+tk85!E3WHx?ZhKL3`-Vhz9_j!p8vDiv9ZK;eGH%b zkM`P#Y&^uI)P_>|*1==r9@`pDj?C5n8ErUoXB4;4##-OGjry)FKBTlGOLQf@iogHD za+1JLMMUh8ojnlT2HuzzH~7yk=dWr#YHqNO3{L>j`@<yB7zE~B+eYbN6TQ9->qrN3uhdW;$alQ!< zY`RcYF2)k}K`>hCaVu#R#`i+FjJDD0&`5|6@Am0$_B5X45o1oyQCas+l$ZI0a>maP?Mn3L2H_5$>rnoN;GoRr@rHZkJR_x(Qs(QJRoSDJwj60TY zb=nf%rD~Th^>FQXP!E8S!q^G@$~j9pnx(J)%revwihW7w2(fCp0>rX+ZlbFHZFPDD zis_ysiF*k#il%(rHe24s29w~eSjrP&OATgQd%n}3&w)YY(~Y!_r2-Gfe$YH=qk%X6 z8`yNlTya4&&B%I*_Gz*77J*j{r8t|}7H}-F1lSnyNxC#fIJW38N~=?A^=4EPfrVL6 zS?(j5uTH8=^I>6DMV;{11otva+(X58VZ2-lQ`(iOI}E=$b@A!11T(skr?`y}R`R}LC5uy^OtIEHJEf#tbRW*%IN~{?lE^{-O%E6(SWzVv&-4C(! z35RjkqR0elcrDRjgg-Hy??(8bPS^HS2bC#bvBv}}PWGqA&&;;lLmnE3iWfr;idBx~ zlBM{?=UMaVt_iKcA-CiS$mfeB*@bIy+6aU? zLb-emvWiQTPjL<1j0F_GARy*XC`8wIaCtO;vR`#_Y8gF$a9Qb{;Kw zMM|DOZzhlZV;Y$2qt5yyu4FC@8-0-iE@v`{N}Yo=|6q%F+jyS0m+bF?dkoFEFlH1w zKnHZbl}dyoG&U!%vuUwvvL!6Km9j@DsR~kBF7aRx^`KVFg?R&r+itx|+tWs0nnK~* z3RdVDfg;oBMR`YQOw_SEb2WI?>~Yc#Z~SaQWf^s%eOeorSeTOFIbihsK(~A2;a}o~ zUT4!EnC=|0C*?7_b1<7uNIbyp-wWy(DS$DPvp6)*x|8)&%g&*p&}J+seD^(QOT@p{X7N5_?iDEC;dd5`hN6dnwPL`LUi z-&nwr4>l)J68>Z~#KHda-zrZ^>^OEhx*`Tgs=&E@2&c``r3=(+4?SgKia%Og9-z7u zTZ1yUzIWI!{F--Th@Gw5F|)U0ULJMP@*KCAg(9G zk99p^^dQBVo0c@LR57kEV(jFi^G2dm#wOK9OZm)%FXg_S!E*Hz#^4zW!cpra zQ!gYvvTGtFgwn;qdy<;Myt95C6Ue5BWvRXbY;Vn{6So$-0%mwe+S*tD;$;@3##P8q zL23@6+-w3(=JTqP1;|rPD<7*G3gU~ZL%}u zzco8YMpa%JN`xI27V~?`w^(OV`X{?nUqO?BIHD)(=g~Vk8|@b_#u!R5wn<~mwXK0dOMLqUD_~fFm zbz}Dqpkps!RR0G&OBf7M7DOOia20l(iK)E{HdU9`^S6`;AAkSF6F-ygmUO3(<=<_o zZSpgX>=EaJ?1;Vo$h5Sos}L2Tl`)8YP=Qowkvq^d_2emI3(NqqJkPDFxWyleAU2ZY;~rt?Bf zJ5SAw&G?M?>gy0+DhV2BpiQuX1t48X5KREP&cy{$t4g$s!=9Kk`~>^KS1jeyj|&$@ zu?f)gRlgY~U-5c|2+W?etXcC^E@pKhCFNr>MTnvUoZ+U=Rg+FbQ15tRW7~fCAmRgG zxKJu0Xz9HcooW69zj5QXupK=SI((&=u5}1-O7WcR`wI8rJHn5yPg2xN-;a)vH z2u3mLlJ;3{9X6$VS`@vJ%CByVu*#2rbExiIrD(i0pYBXh<+`Uj*+Z-N$k{F+d2DKk zi*w1#H%9Sjvo-9s2z}gMT#bCK_?@ubhaEfcW#z_OY(p>nQN36g-~nm=X!zOsh2Cz5 zq~!OUrWYCQ0Pq)_PaFQ-W^Ibi!NRn>zHTvI+`skN2&uv76rE%%FMWXc)A#fA1wRMd zWG49Esw`&-g7swCkhPj}KPd(lb>PV&>|UBEcq#LRdN5b(zCVCJiid!rXS-}w8?Dt2 z!KB)GTDUChv>LHEW4-vK%SUj~Fcu9k&Su^#HObvJs)Me#8-h;o1Yz#c>zGl0b{jpd zPDUkyeFyb-nCg~yNkmI$-uO9ACfH6(d5Ck{W?B3_62-mbwG)bWP&8FsoExPTaG-cs zJK&38uW%AyQaX51y!B~^50|;lmb{QBWtAe*frN#<1E8~3mIo8ayDQ%)tdfC0rMo6M zqb#*I+R=jISw_T|tGqkP!4~QY!bjt#j>^z||8#H%ZY)&t>7_atdbd2Lu!=f?J$Nxe z@;YFj&wnEK)r$5AgYJ78_I;=MU7{&$lj$*$smB!7Hom>* zPAX42jGOj5W*dvq?Yxs+Ix-c#+HYeHy2f0K=<|_7iu~V-86y2)1VT>!?x#~7CNH#fBp*( zZLwMxw(p+5?rlso=?84{&y74&3L9Pwt&Oll@KetrQI3VEIOwZs<@fl2%dGTv@8l3& zH2vp7T35hc&(WcLEFM{k@#Drm^Zf%UMJ=4QhT(T#({zVrMGgQbZ+RHMO66D4j?5VL zld=K$DLNfQ{vKz0Sf1)@kBHoRXq5-sf%&rE8q8#czL_lNRaWwDODQ|*|D5eo(q|H% z<2vHWA@;R%tsFVTI{~n~edb@Mp;M_P%^p$X!#hzk>E8ye_MYwjt|>nBu`kt&$r|kC zZN6K7jw1+o9RvF-dAhgwhy9m0|6VuSP^^_D`{tHbdwum-1$y;uCCfJ70F?N@v!^AU zocQf`?Zd%$pW(!Du!Bu&_mWvWk%q@F%uh^|qtNIkqIP+Hz7>^^3j1A%?YtEh_PsT; zer)A_-ll1x{vU1Y2|@C>HR53C;sqh?AV!2R(>XJkr1K)|O?+J|VO#KOm8pPVxAbKy z?}O)9V14*`@C66gwkJpTe3*Qr9j%ARzrk(`Rwg!0xw(X7G7o8M6vk#?>zK98PN=z^ z4!o(})iiFG#gy06Q?AlVrY{qaqXR1dbOffsGT!_^nW-d-kGO1LoEo@Q(f-BnZ`K1s zgVK&$1B;<*cY%dP8BQvyKVS<#xUR+pf*8>Q^*&nXCt(le0K^-HuF(6DKC@7i)FoT* z$@+rD=ZO&>@yEO3=MmB?mqcXkS#M}=UTRIHAD{ca*N!@X8q1w@j5k2$lWW!++wJK! z7Tg8Jn|TDHwtBk1+-C8QK@)#tU3pO3HgKID`J z#%w)vIqJ*BuFI(X;+_@ewrGfl<6q;u%V^4b%Ob0tAN9zD21lr7Kk3D*Cy_^sW{Yd- zWhe138Dl?6h~LzswnSCcc4Hd1{nE;aHr{b~ZtR%KixSq%MSl--Kk;JEhWr4~o9xt{ zPk;LA&+2ZTRR{+Q-LH$DAO1y8MaYvv+XVbyHXn=s-$Nzb{U|)4;p6R3#k<{OYwT@1 zQQptmO@@nuuA2iK%#|#rWh${>_}HnT>@L-8u7; zzc{Q05_5HWq!T>h;BFFUs2`yl`cm8jw+(2>e+I_3<6d`U6Ic6@?i3??ZFwHSF?#1S z8qKW}_RO{3PIvNrMm~eQWSvtrIpAC@ynSLMF;aGYq;jEj>K58~^q%XX!lX*lmf79> zbdF+V$i@w6P1@#F^D2)XvzaNcyBDB-l`A@#&={wpRswUSS$Vx4q3{PX&Yix+mPwr0#N$zDDgzXrhD z?!3xA<*IVnkDJeVe(|aBEH{7?wr36WH&PT@!sgGm(RLGimg{@T>`O5v@t26D^ z7uGZX(C)6@UEp&xKCUtek{TpDt4WKBYBfucKk{_{(f;VZ;a@?$$6h|lXx%&BSJBGU zuFvcxHT>{;8H*+oKV}8_PWb*!kY%|yJ?cL3HR<`~nSRf#u^t|Ghsaq0rs%2~sX7bA7(;{mkRz zhcI?eqnqVHx7JU&Cqj0~iT!=4=>-&imd8{JgyWTV@RrcQ2jbP~@>@kdGc1H`)GW_; zv(ceslPuPqq9;&@OB1$hs`-gna`KWp*^CY~S4)@Ne=hBuJtge$XJov-SksM-I~h9u zfaZR~a1%}V2H|2mL<&_GoTL0$YiZ?VpHpZD#a86j%48mb@;tB(U2(Tud@56%mEn^* zib99CJ2&+Ub!`5ti5W?+sT^L`(`ePT5?HQKHrqa*f0yryR5$>w%mibYSIU*MxieUTuK zY`WwhJ(0jBUC<9+)Yjs(mbYVp2wsa;FCMi(W1;A$o$~RN_1#k~R(T@&=DnVfvVdhF ztEZ?u31wl*Lh`K^uEg^6)lXtsaLV`QEl*nCY5~lXT1n-;_re!Et+Ie-N3W-=bpD=R z=$nNpOF?0&?iW<#JBt9$sE4W(cg#QB2C11cZcAgr)f6vL{{JVs&gIY0@!xsyTiMRj*BeQzK$A-|(9KDqM;qCj&AO6Ab zUjF_!e|`DcS3kS_j-EPy|KhvLpZ@xPxcuHv{`m4cuYRt+bd3$(T=VlfR9~iL>_d<3 zz;bYJ!`JbFt?_OL6WM#Xg$w?3%ie$Lci&oX?e`WIzQiNX_~Iw$>WOoLs|;x`1Sb9! ze`bSUInJ<0{zvLP${!r^(XH1{{+he`lao5Jlh^Hgxyd=>I*JDy8O)iapI{^7i+z?G zKe*(?7dtZkBNv$=oapx}93yUV^edCOc+PfgMcY{rl`u<;|k0OE17d3mi4U_Bm`zZR(DaiIhsnE>PEk1vsn7k^q1$&J^G zJQ?87YcW|*SM?lIizJ?85|_myl*N>FyewSxbQR3_D1e{9jwi2ATF~+o^t~3MaKo36 z39!g59^&}H>M5)ixldY!`JeMtQx=QX%Arp>=gBJzw(7%+YU4C6#z=RJi4|R0@-t{6 z`X{wMasdk-aSi{xE{s)_g{sd7d7Q60Z{(#rNpFHRnt2 z=t0DG?c9jQcv34Ea?sh_cg}4aKmLw4))_i1xjkmyB{#krZyoFD4R0dzt;M~#tMu=; znEfo#;#FVNGmm0>c=bvWZJTe58QXRC3AS4Qg>kwM#NDGCDUNyW6`XP5rXM|dl4TK3 z4|YCwP|rCxgWxnT^S%_n#<%&NIpd-!`#ObdU_i#K7KU+dTM&3^2DUjVWbox5iwtAQ z&}vGP#M#A4wD@Ilp7ak67LS{MEb{ymm&GD`;jN3eez1#0p>!6jUbJe#sf8y`U+W_R zl6*duj|{MQ#qT(aPjdLIs{G{IuFt6QQG&Nm+V#|xr?@O+>&Yz(QJ%K4u%@5tOOG)9 z^b;P&$ikDp1Ss3G7QxIzeF&rS$JOZZdHlliQ4PWD<0&qB&ST93$joVJ1Lk^y`*no$ z9D8wT5_64%V^3NnQj(1t@UYP4lImyQhA_q^0h>f^mhuyVhq^F?!?yY`?}!&R$*|O- zo$tzlGes7$nv!r8Qb*T9QfsTH$KIDOc)(kw@|5AIanq>$&Nr=MZ20Js<2E@A+#y ze5sq<*ui&uZ})?vJ9xsKwM?`#g0Q`#7F z{Vp&MB1h!ZBPH1CzPJhs)#ST^vmgD5>Q6kj|EKR*OO2!1ct_i^5t+YDMXVXf0=0 ztZKpd#KMv1rz}FHYiAMZ#i~B9%3_osu42LZRv#C5_oN++zx6+U*vboE)nMVOy?%JB zeRRP2eD;-vtUl|i#VwbhpVqSY)k4;bR*$O2TjLoEYF~d^ysCdy5tbPlGvI5v<^mU? znlBiblM%B~kz9`j&=I$`K_wa|$&Pfixa5Ly+bY>P_UpAX7Kh|>Z3Jyw*W@gOu5H$3 z(W`&tuvjPj#dfl{lG1X0AX%(=lgPT%c3Z@OFpAfK@l_k=(A)=l;iz8=Ui(I`@D^T2 zlrr2;#$g{Mit&*bLsG|v4kOO&7A|1#Nv-V$3Al6H^Bo3@TqePeug>ouUyaRD`N^r@9~J|QD6AZWX6(u;YH4c z^HK8J@Db*ouNMcq+Q~nvb>>S>2D0+@fj&|%nDB%*JgL`Z&*}b8)l)nCYY+IlFL>&9 z|8x6DJ@UG~^7I<1eTMJYm8aX}vxkpZa?iNIB-ZorXX~72m|$1gbFX4@lo99E`~S#L=G zh=t*=#92=SYw>I*2zUM0(>^^FWC5bBo)5ABt+C9p%48^BxG*Pfp`9bg>MIB@^0e4w zdBYLgnQ(Z#enx2@-=620Wjzh81#B%C$HGzz&04rhvM}_b6kI(O<;m(>EiQRl`sTep zullZjtAIaVpr@^G^fVQ$7p+JwPW^3uFMw;|%MWYe_vd1z*T(}`g0cu@4ym_|)z^T! zuZ?+P*El`7jFCK|Ot$h^t)Xhy5CDw11XFVYKMM}#7&@mrN)EM$F-7@mJ{446El6v= zf#)|%_-XLusseQ#0h)v&aF!dLybAMcE_1yFoj6xjU5bo9n>77rp@)}$wq0wC#X1Kt zV}oe);MeQOBWk3csuCuHr`WzWoh+xnXew@8@>Gs{F9)x&bXh$Qkde~i^h4ERs9N&M zX+GDItw3>+qb+^A_j38GH>us_nj&KEs703?tE;-N)a$KgtrwlIL*d!TVJm*maPvCi z`Kc<{)uuV=VdOhtyVoZ!yq_i`#e&0gP? zPrt5nr;io!$5WUCpW{A3&a6YmyJC_TXmGLb$>-wl@g2q2o`Xs4I{)R353A>OOxJ@$ zu6}&kh^^c`*5kcyFuR3wCjyYd@y8~1M23IedJeJPP{+o<;!`I*^qf^s?mcX3bzFSm zh)j*x!FB)2lUVn!K4RetH<&f2<5rF{JO0R(Gydnw^pd{s@g|P!H>gMICifP$&)X61 zUi>I0zO!8V>amO~GWln+=Xait?_*zi*PJzOo^i7!<|m}Po3FiwRh-wPO&v<%Mz6#; zhaB$=5@_=&6$@jpW=1KZ`oE0w?`( zgq+OV(|98<7G;6bu7O79$!INDVMS;0#~W7drSmOz{CvigZ~3!;`zJa>#yGXiEI7vl^3%5oGL$D<&OtwA?uF{m|ykBf|p+Cl|JafI}2xUdb;aprqVfQ zEU5Ktw|dstSzLOY^0INQ)Oe5;t2}05_{>wf@t)1Z!l(IEVtpH#`K~2@@sEmLyuw=N zgHz#Ja&Ug|bM(S1t6zgESaEXdiwmP&vrah7^P2Jr2a8Wq-->ts@KexF`l=+y)`XCN zNz@kJZSgKnhl|tDfg7tv(zMk*t<+K2O=J$ZmTAO@Aq*)0MqErT+OXKdR7SOHDseKe z^dCE43^HhY9aGeR!oplX9Enz;${!Z+!8;3^M^4Gvh_fpPcWSUu_go?eSI(EmK+{UF zZjSUn2}fBF72ADbZy&^kAK9+i7`8q|<1=A8CHaf9nLH|h!Bt)i~`4U>7UI;p2I z`2?-m|MBh2$=b=0zmN4OzH%_;M{sK%cCXdr;p^PHKY((U*h$cjPDm$R@sHR&|A@bH z^gc7F9>1rr{GFrs(Rq&i$?N?jXRQ%`WO8GFtbC--JMt&^kG24&q z%^h%*nd9OnwZzUk$@y5X0d9rx<$5}cXTE|rpAyrH-#)kdD(_bnlwwst``q);iTJ6k zg#LwVE|AI5o;O8M9wXsSwA^!`$ru|sHDAmRW2ji3)G=c` z5iM@4n#)S}8CI;y1ml=#ipwWU>i8uAwJ?H5TXJF8u1EY8_cb;Mb17{fanKcl)8|kE z`96zlCpkW-d~PYrb>%1b`V26aO;zFxo$Y#a&$Gw6##~F7;U2kUm(TtAP_-ExoNT^c zi(vFbBl;@M#j6AJOw8a_E-qMnE~I5VHD5j>qsqV;&)igP>N`oc`+>kA=6tk3PVlWA zcQLReS%uGd6t>|VWF9rHxD7V2iZSWh+(yUXOy20h^+!I{{hcF&+T4<-*-3Yn;_-FM zHIbOe>Zvr?s_)bs6n9xQ=If8^-Z3YRAVT?>E8->#i6lwdoeS3pjbU%VVB~><Ae8SCd9$Gv&F7vDW`PGdPqvD0`b%SlWO?3$PS9wB;_JAhc=}yA?&ZPd7x@9{%5?yF zmMi1#NoTc#Ne@T4!Q7IWmlczFS>vw9%~bz#d-Q!ZjmoH;&geU2cj$VO+3w`22MBKB zRRfR6xd!!bx7pO8!Xy+H#qR*E#ITJ37n?T+=iBMJEF!ar^TL%yqZg%Gdh(Rji_TiG zdLcVL!|LA{Fcz7$DCO5+)dJO@OC3L0_4Y}>gVn!}m8Z2Vbou-%zb1=ci%6MQd0B>+CtZQ=%ej}`0FP0VL8e%NDX6ALAM8QiHH;q;qgaX(^ zvFdsZ*1Z~}=#Vu4>IuyoG!OsD^?&*;ps&T|PuxTvh|9Sy+lNRw6Q(LaeM0)#sJ(u=p4@n>^> z^2pKk;7cNaCUaiNF}L!fOR1IyzUHH9k8W&R-5qdU*z$e%X>X0lbYs@=v(_N zHo?B-iK}!Lul@^I>cbr3^ua&k^1qY6&Hy9fO<)Xj%y*`> zen6~t+GLDWEE)3Z z|H}+?1pOMO7Zv1s^=Kiu%C7`M=G=I20WwtraKw;F-lJbwZ7dA99756Q`!>k?yDgH5RF} z9qNo)DH%8*>auxc(mePctb5JP=9!VrIJ1(x8p46kHjit#t_i)`aF2d>=dAJi+7_tCXSt!w z@wPld_;2MM@pOE6SKP=W7A$g{|JKU+Eq9y*@L>;;!Bm zS@~dhbdnJVyvh-x+iJ@cO%AH z^E)sY?bI5wXh^KBvTFX)Pol&;hmmp+E5F9eVzKDuR|iQn-X3YZ;U-YowmA5aEfUnI zC35rQ9B=v+HDc5scBZS+;f zJl3^cvF(}!z4?50M|v=kowafp$k+%r6R*dx_`qark-K(-*ZEIhA+G)hGq}-Ea6D-6 zHiYrB=~CucZ0dj%-u{9VhpQJ6eNw}9M#=zLF!!ECq;_7SKA&FsDqA#Qa?kN8iUHJo0k7iY@3HyvmPj%rKX?g3tC zn1R~nIiM;0Bf!1Bfd+aLGmoq!HT_FuaS_;2;F=KF?qM0U&P8;}0b zte^aTxcu_h|Ig*m{{H{C{N~MH=>}Yj;abRQG3%$eJhiR8es8N5xLWvfQ?8F9)JF$u zL9KkwhUSq)s{d($%9x77S+@NdSkPn6k)BWaI;N1iVy}BpA7)^Rm|#O@Z!w`cq_-(>;U;Y0mKQh zYo5HAOkBi_)o(kdkS)oK@xQ%)$nYwUU~R^sI&nD5o{&M%dosek^px?&Hxj+*R5goA6S6^YD)?9${F4S(iFDG zGXlFfIm>4J;;ZiL^n6T?syzago)!#&)_X}Xmg1i0Ac0ZO9NfEn-aI3IZ3gC(Gy0tz^CGC%G*ol5p$8t+WFB<0 z;#t7jA9Y!vdM;(S1PAYP588-&-$_x@4k+A z(l~TX_utc#cgA}!e~pK4^?xq=h`;x~#@9Kek6?~s>9b?}QH?WjfJsDlEPA#xe~+X0 zoWvrJ>LqqdhO_6VpTv@XX7}7+k@&-fz2@Wl99v#+;acmgc5p~yj*l1k$`$OHKk?_v zPvv_gpK;~fXHs8Q&d}FP3yN(0OpGKJ!v0menZ zHVT+Jj)<*U*I>C^WaBN8f^r<;d2U^8oE+D0AneehOx+j`+Ptu!GXT{gy0PP6?=IPy zb=Xo`PYI1Agm9iA&t1*daJO1p3(!`aEM`>ppsPsL;Al#l)yy~(HJq8#@J@m+9mvtt z3X4T^Q?EREay|)W+vN;wwcxy*XZp)l*HRoJkctIW-?lLkTcbiQE{0McH-d#=5vP*9 z+XP~2Q*=PDn>gx<8g)J@f7mV_&(>I#+w34RqB*w-jAk?5GC@hxf1fYWCC&vg=hF~+ z;}tzJOAM>^fvRzU_Py1MW!=W~3uUr!=3eDGU{!?Mj~BfZG?hu*Ip?Z&-Mh6o_I(Z? zys~}Y6`aNLhnMxS9@F5nUpW5JFL^%bkCtfB{9d2I@_m2Qpdk1A7q9epEqU#Vk546= zn|!&L0XFkV%2k_+G2X;0!gbp)1KQsohRdQ}Gr|w99`m)qfNpHbzX7=l9o4!5eT1X) z_P)->epaIMfVnphoxkhhPcKKYVEn{+EfhP6YhxikYVs@=&ZC?(gY5a;-}B=?!r}uP zd}MO4kG7R7cJQ&++%tc0_u|2=UEQmjyvX3OS1x??(J}b4-HZuz#OGXb`geAQ=!W#m zd3jNv@(v#Q&HRzS;brMg`c`+@|NejdZ!iDhAO1Jt1YnGy>i>4b8n*B9@yh2V_Nc*4 zdMueMUtG1IoyqQid!=va^k`jz-zcDeqq_3l42asq#W?)AIC;cKs<-08Gkj8=Ej(N1 z;Dei?23z$;%>D2KF6Gv--WdE#|K;|Ynrog!{(2U?GU?Y@_`U0P9vrq3eS=S2F6UyK z4n^DGo7d3jd$>()TElhFIC#@yB)l2M?Pf-BW&~KEhZtC`m?t2^VgDuyPQN980W_F&};0~|h!P_zDu@A~V%LUJ^x{%$%9L0`& zYg6}XuK3t%9<|t3fAEpSZ}oM?L!Lk5+=*5_$@q_o;G<1H0UHg61DWQ!-Z?w=$e;Y9 zSkH|feC*`mXN%qAGbU%D61~Q6k&n|{U%jk#R<^azHU3;XV?4|ob3=@}9ZR2_-}Kw# z!Sy&CeM`*wtxm_R`caM42g?=5Q|G_^)n8ry{XhJVZqN$?-ZY4%+c6gKT$-L!8gf2Q z3T)tToFCZO*KwqRej}o;;gvbK`eol33nRRrdWhNesbyLntw87wZJVEi8?K1qI#liC z0U<9@NgFj*49-urU8vAH#Wj;to|**dl;<(ZZWD(Ia2B7X)O_2sY{pX?-5u6s!Mwq$ zPsS^s-8xXg=W+N`G9%_qj+H%U+hpYW_pZq_e=*RBuO}2?a}I1)g{Y9#7CFo=a?Mw7 zL|UEbagFmcuSehvQ1SFZj3Ig>wr=5)-?ho*pCohZ4IM>?`9(%W%1J?Dg{qe2vrXb? zpyD;d9DV(a>mG_qcS5n7bI>X1d6o}o9mp9x@PQ$edQdfc&I3-zgyz3?dKqw!+g(B? zK;^Z0#O+EFBPnC8>yciT4aeZKoRbTV1_s`k(l?i!_(zYakvTNFU+@cGMmD7`?tB)0 zeBCnYGk?`UU5AWab{g>*%BXWU;-mEp$$jZ7V{;Wz9|;KYigvFHU}_mZ#iog4NG} zNPE(g=BFP%T;6!`3=iD=j7%-QU#OSYuU}lg`Wf#c+SLm+{7X9KJl0G*NAY>iC}s>! z?f&C5`Pl>Rv@t2U@nB!{2>f59U*4=S&~rB!(|C{>T-N{|FykYDLB$zR06`Jty(x z_ZS?p!*?dv{N#4OpE$1sX`mO19vKpHeZh^%T#JM|oUzI8zUbix7ff_y;;G;B5(Bqx zXR+{rXB?lprJpmNUhfR^PwnsHUi(|ev&QfB+PTlLaPH0zce=50%h;@P3eD~A@+W`& zzvwf}PrC2ujs=KE3fg#&!i00);1zOo5PDofIbLa%>tjD8I13YVMN2)gs*bokOPGtZPDHPWv zojJoJfV9ofxsox8P5RKtv12CBpy8c7!*(n=?R?frOPR1YIfy5}ILyV^s|e<(;_Is= zU#X*w^0(Tv-DF%jMnL{vH|t+thr_V0La8;%$d;$`0*H+q3E{7bz4I;Tl+|OvhSwnaBpouH-IfiP&58HUk%tDu6 zOY=tZ>HBw=H{V|_Z+_qhtF@xl9KZg_7nj#Rdv$sLl@`7J13_BwzSKVM_q5Fy(pfO` zl=%B^^aI`Bzt{fO!=i^*Uuog}`rYNLr&pJse*KC+KXQ2`9)Es%%()+kvxrweZ{I#$ zzWL?b%lF^%n}@h*pJ$&J zy1^PJz<7LY{iKvv{kc6n7tcu6oYn55O$^D_=Vv{CJvQ(5l{RwqaRmKf;4i=X6RnkX!_r@&+K}7*Za5tQxZ!-`Ojy#N+vKr?ix5EB zOy5jLm{8SmqFa&2_~e1FH^Sk*667XJitCPo!Bz_cR7Lwdk(4?GDiS)OHviVedQlh7 zEu$I-bio&RI}*RKT}P{v`fSN@qR*R7hI<7vQc!*FQP}X`*)~XxV{$rg5D(#AuN$rk z-EY#7bXC8AzBY{WOpYN`PSTAW7{9JYjodqfyDHHg&e37;I3#^{S`;4CmK&5gu5I&S z4{w44vn!HV5{ILvxcN;R&j_WQCO3>E`U z+F2(Pz9VZ1YhqofPRpULPZ2bZ?V4*&5*9NYma64&$56l9yMz&Q-T+Cj`Agb9d7EX3 ziX23}=F5ebG4UNDzEl1F>5I!-EpmVR4{tBu{Jnl?Tfat=&x`Zt=J|-k>z``jtS84m z)k0M(RTtK`%I8?TXcq3B7R*0<`|k4VzY))`^c#zmpWgt*pQ7i<^;cRj(-*%k`3rH; zn?L@+B3wODk45sEZ{J`3>d(HreEW}Y1k@EmTu-d|Lq9bCL5pNs`SP_svhn&$KOuj~ z=ck3M>vLRJ;&&0!hg#=TJl=99XLO*1*BJA<9pFzEAG5}JH0Qa!_VM-Hu7C1?PYAN+ z=sk33;@#Kv`NM(TiM~$OJ)-1i(7}f%c5spSlM`R$nzP!$Jx8XWHRsH~;_lhzIRWv~ z@JdMHY|JOAe$mx!$ggk1`rLIr97n#yRtfjfcGl-1H#dFV6&UBr7dx`oI^^nrp$BJv zEtbD|^J^8t(#=YDEB|l~W_f(4s@p5E@~1wn8ZbIF=LlBFo=UPqs^XP5-167_1VFrW z-yEfOytDApbHUm?i9G&<-4DRgSdyyzXrlQ1#uWeHcAfYlJ6-ohA2PS|YILj5c>L#L zM~#^#d%Ts|mHEavzJ@a1d4@cG^XHtj$pbZeq8*=am%{MB8IN1iKH+^{Ja!-E^-$1} zsrh#|e&=f>t6CJR*LDx3AGb?j-NqE)CfsvZsG}>#NT{Bb!8pybWm9_uJiqkRufGjh zutHZ>akZjRB{`?W=c}C*`GHjSX2Th0%}Kf(2?hX3T^ultt*Q3J_G|wHBe!83MsKb# zn;Ur+s%)+rQ$>{M|8zA7u%&j2(o;lnU9L8W5m zKFP+z@oO!Z|Kk7r zwVn#U^rDu}5!0I&)q48u8|lk8{(SP6U+OnmXo2gst`@lf`Dn%4H&2(p{L^nQzy0-_ z%U54&Wef-Ns|kFm&z|Ym)nC5*?OQK&-|8b6Uw--Ce11KM4<0X^wUGbwKl$eJ=GVf0 ztR3ZTw3gZtHt`)JcZUm|5f$mA0+gexUO-}0Mu;5u#n;DjL#<-g8|hw zMBnWHvA56FLwL69NFyEN>%M#AB?mlv@{^O;>L;EWD<*d5h<+sD(MPF{o9OrKO`|s&ToM&8T{(Ci!;`I`|j^L-K?AM4Jo-wDI&B5fpXj(2Y$K-x}r+19XXYo^E6Yy(EAdP7cvlcA1{TQ3stj5C2Qe(Ts;Xe7sT3`Izd+lg)f)2qlTBmhwx(0H|$@$7X!E^>1Pg zv@=@CG0=%S82D&;|hgMa98c?1lJ!_(aGWbrEWONHq*EkM#(GM za-`qNH)+jH?{w6{MXOgEPUsr0&MR(rJX3boAj>)H-1aeMDL%)PurtL}&dpD1w-D^~ z(pL?2@;K&RCu&n>6H^)abqA^A$vljHA#2$icN(cq<)4jBeE9pg8$naYz%(b+8cZWD z3+LFGCKjp4Z-4c4`SXAGo6A=}eW}k3>+{E7>C@Fb7}i%QJQ;rV;>*hqzt!iTf6$X) zEsj|rS8jEN&i&-?{`~vPKmO$p`WS}33#rBROCC<^DX@S2oXY!)o;NQq-~WamAk@b~ zc(hCd+Pz>7&tH6d`Q~qaxV-$Sa{T#YN#(N+_oo8&hqm}!_V?es_mgQB?eF!2gZ^0- z^Nk^I!?i<|YJ<#QoKH_8njYpvJ^Q*U_j*lD@H3QfblzvAPiXSbWPpjsj!Yl1pDW?* z^Ml?wqesGze%4Roy$|r&yFWTy^|g_dG!&u~>;5Em2`)Lvvj)3AIZo7Jwd21fJKq@& z%q`rbb?@QyGMG0Bbhf-Cj_MnKr?+C+jz>MflR6+%E3)Urw=wQD;^1%M#<}j9y?*@g z^xE-vnR(jz)B{`kIKmWORrwvx^eoU?Z_td%p~Kqod-tOXxdZedbl_$-_(aP8VT&R6 zSR@Yxl7k$Ju8m(l+>VvxraIavX@ss%=eFzAyqpX2bZ}3c7bk27hc>4eYHR`0Bb#|7 zfo7LeZZgV=*uHZ(a~xMJ9#r^ zSjNbCaIW->8Q!+xhOdY1(*gco!)d}ZCAoH;>p5!m0a7>pljF_BoR7Kdxd3%kZsrq| zdmHDtY7@_dj6`b06-kBYya>)Vt}nvb!6}cid9yy%QKp>GdmJicJdWg5O5sICFJKE% z+U;~FIxOXj)yP(Yk`y=~1IS94M?O6O+G;c4aP`Ic{geT5V` zD{Z>RTz%Bd7E

    @aXX6w|LO1Q-IMndrAzo=0Q%4?X7+Q?=S!TH+l-JAJ%4}s)cf$ zY&|8`DwRd=W&B0hx8G=i%7RymR(|cMe|?>bdC{xSNB_MR&aZ!$`4Yt|ExP&qu^Uno z^r8C`pGW@TI~LA-45S(ooqO@eM#S^WKhqQIp9!l8c0b%3+4*>fc6dEzJ(+&U?yrp=xz^zfJ~F7) z?pQFfgTwB7c)A43p4=Y0rGsaS>>Ry*=SVM!6H6ZY?kt5k5&#=^C%_kyU?Yj$+g2?0 z>bu9W_H%|m!sViVwC0MN`J-rE{$-%|?Wn#6+ab@>>(%F+vs%+fx@$?xPKX~tQ zz?se?!Sz~a{NVbQ9C*T&+Sq*z=^^>_m=jH#2LZQ=(pc>5gfhZHXE$}21p@>+AlF0VsPwLK25C;jS7C$N!M@A7AUp;*T= zXse;CnpahJ@M%cN9GvlwGgWmocW!sob1c)N%&qj<}CL2faVe zw+9ER=L(c|wl=Ob`f`>IvqWezc~@D@5p1tb-Xse)z!6IvxAt{@$f^wb%fHQwHUL#? z&=28o9lB?mqbfhDPKXo-Msveo!#V0u6*3MN3z3{Ve`^QTWda~4`%n>bZQaUob0fdD z{+f!fh23bj)ajdwV*By+ND}G2wmN@xoy8Z_US8d+Jw;=lTv7*7FgL?%qwcri zR^_OouS?j;^IqVpxwd_Bi9`J1@^^pnTm1mvJAeMze;jGF$O4xy#B*;)&R-9wMJkJB z7OQ*#%~RmN{?l*d(_fI)XO3ANlV7K~n)CUUiu;vTzu)~MpQF~I8;*Xnw09!<`zKFF^Z%DD1xfrIUsanbZSy2aFEuDzVxP8~gC>T}(Bf|ouXe~&MN?1s5+6m-2=P}QW{mp(#D#SMqW zRl`{y;p=)E{*e==f!LcnD>F;tVREA=Og}FzxUsHO*9rUAAJ?^*F+^|5`ebkY)b+0d+7$0O zrm60FkH!KVPCc9TT9wZ?|E+!ikB?*2CV*f-%{wGx;5U2lH1_TH!q|q61&ls_thMjo z{D<%K5s&dh;>vSPNXi!vR0I+BWUTQ(q*^)xDB8J=a0Q`6zkyh4jeZ8(QOO@D^sx!M?r}k5IRL^>Vq! zq^Il1x#!WG)XaFXliPi#m3$Z3N0YzzYK~T{U*v^a`$%s0lfUEO;ues4!L51N!6epY z&RckqYyYl0k`gn%W7}9naMdSwFKQ*P=ro?$%%*j!UV8@xz7CrCWgpwrny+Rfh;zv} zNplGf+9%&Gtpd9eJ${_j+blG-MxC=tP`S>Ivl&)(&>Ia@o@>}?kr+DxZ8O&NKJNuK zv<@S$c}jK7EAMplGd!J{$umy*=XIteV77xlsJVLujCeS5Tq8T0OXXcTK63o8F7(1TrX`TZK{HM-w$)9SFBsv_-q zIZyQh-3sBad*4Q|wWJwR!HtJH;VzD3_1C$Z0V+u1psg5?3JYNO;XMJ3I zjlI7n&W8|u5ajjTH6A-M_}IwoaK;~gWcWMo$OrZ)AOFpTbI1UmZI58ia?hGN#h(Q} z(SO#*(b(e4<^P=0sRPI4nJ=VkCSFo(J#RRi-+AmWuw&$SpMg@sg%$ollH*wJ(!P1t z#g?9lhX;;&{TEy32nPJocEvYp#-IGy8rYr&J1*(`AHnBec;+BSihF7U ztUg;Kjk>{VEP~Cxz|FKYIehqM4B!|Gk_mEbV?Q0+;FvkGna9Jqbd{^f@LE*aQq@Qr zR3Egmwd4N05TTwYC`m&m6?0&~;r2NT+0|24z=LCm9UdR;^j`3doLNBt>pq!;^>5TK zxfE#}sA+GnlLM2AAEX<5<1kveLS+6trYbS&R2jxU%Q0^D>DcJPUosQ4^_p>e1XmL{ zc)}~`zBc;(oPbM-V3HfV$w1wkj5?>1>Vufu(ZLh}%g^VBvl*wA$8Z0h-ztKWGeWO7mvv;Gls7uBvMz2VogT z=8<1{=X*fn&yYuXN4_J<^%{9J4=%7r_#@w1JDAk>9C^}fzJdoAOngTYT=tG%c~iG@ zocX#xx-Zux#~Hh>nzF896Z9`j$+exXZ;h)@eDKIyIUU#ao)g~g+hJqyiW(bQ^6F(+ z0eqb&F?`uO#~C&lWa>mu7u_E`^*ZN@C6=DZv3>Bu2w&pJ=!r#=&igda3i!MkH(iOU z)84@G`DP|5wvj%%aS!NQcbhiy$RO85#lfx5x4*$%yv5^R2{-(t*2c|wtAd^l+Oow6 zg>UFmMkAwa)k_jGB;+&Rf$tDcgK-LuHzRs2P7=m-6E1Uob0waP&79$#rQ}E}DTDM_8!370#wG^`o1A0ZF$<`pLOzFv4H63u z%NGsK;gDqP=^^eKoiNk~N&v-nDdcQ;K(!ekW6I4-4n6ge`}L>6=Kj-TlfsE4YY(0D z-S{TB!N8JpwGjwW^~2G817Fb%1BI`Zfco`EP_}=F&T-`()wNHDAvbn8#>K}|S$~mi zDHW^_6dAobG1qRfWd00tjy0u@bKHD$p!jxMaiy>R+MvI*K|DHR+3-UbmfzdVClrhm zz{8tK6s0(Uaj){{D&%G1Jns2S0Z*>s=jpWJ?n9l8y0GM4fUicY1vpQ!mGActdHFri z{1FxZ!50Df6@_Vsaq`cF;HMV$T``a zucY0O&&7ADyvN0Ni!0d3qh61E;qG$9g%eElTfOxB;KLug=cC6T3BL2Jc4D1Jm(BS$ z&#@rA$a%((7xBe6=s#;Od1XAFiyhfh`#g2sc>%U@#Z|JdOcu`DRyU%}Zd$vLp|sft z+Q!hb=1tCXseo^s9bhgUI@gGAok<%;${=e_Ml1n0)(WgPJlPGym~D!)>5k}7kg}5N z25_wf3O0{~vj^K#24C8*yfiTi4>36%7K!9+qhnV7k(hJfB)8RogKO643NY>aOR41s z3kSNIvP@paZD+tGuUVA4n~Z@NT+Ui~{6;C~@VB>G!>_Kno{vuI*hz4B_!Bdc1J<5( za6jSenfNrW8ljAUXyP#sF~JC%`M@Dh^&*7s-~idsjKrd@Hr&g3WbR!@9+^6RI} zg3o&ey5bzB^iybCPTpRxdiJaZFn=+Sz9_s`)-PnqAyTYY5=k-=iGzQm9{7JZc+kAAD4iEeU7wNBwnKJc{0M?K-nF1Ht%MPIANB`S7n>kDtwjF`DE@x71$o zXZ{t3P0by?)yJO`57stH+vqmqNqo%%j?WwQ#?2(Tv5AB0bLE`H=NVf13=WxEk%>{K zYGsZYyKgtnDXq|^hhUIY%X$IG}q6rE(tU$u+_86DL+hw9JVyH?7WDN!b7#PKIK zd8K$OjIV>ZhIHDJOUN0GugK*XKF5o8GMXRX@K-&?wE=U#Ay(Uv^NR%R#0JDQb>P}_ z1L%s)i(}yATQVzcyvJ4GHMH$B&kHZN!h{V-@}jeS`mlcv*b=BS$<%HV1q| z8KcO@Js|TRyy0oHegKf2zW9tZ{QSgV7U87g^1*cVxodpz8?S!+1YCX68(hvw-oRqA z%~+XrtO-1ecH~Ft^z^CQ$8wzIah})M>LWg1hn{TB-;irA<4Y`K1ruD?6Yp}xg)1^~ z?7496q+RdCT@Ok3J)*A{O?Vw<+E{dZ|1CGu^{n#D9PR`^WO5&I^f7A`O zW8&-bEZ+USr=Gj&J%?ByX8>bBoWIwJfn|?>*Z(pG@O@kR>2>?L?EaqL^AC?+{x7$>>OS#mb91!t@^7GK62 z+s$r9k;u-NH`WYuf4vhw!a6U-8VRK)Uh!F4x|mxU!44qgY-9SNvNqm6$H^*uRr4vD zcczNWb1{f>u7^25<4wCctJWMviwxBv9r!;7u9%wM)yx-73K>UUeBl@|Uwe&f9j{>9 z#zt$!TBjDsirHAK5zaZ{h!~*GUq-oDXfWz5) z$oblG!&dx>htJaBx?jf+XRf)*5l^Esj=&(?W~4whR&*jsj7$}z3te$LOWf7NHk#6k zC*$^EC(9t&9V`N!)_8=Ta6rC$~Iax8ZF zsNT!h`dL3c^dsLpVBz2*)7h-)wYg96u`p(#%*QlXIKR^V`e*#bl=}V7=Bcj(oTLob zhu7yr`R+)6Pk=wx;yHrL1C_0!t}DeKtpR~eC*<<0IxaH!-zr!!_^q2gO?Y^rz2}-UUggmx^VQfcRYIW}Ji;m3R zdDgl|w)+?t6S?LbbI|dtzS`+U1igi;v4fj684tKU)B9ZKVb&gR+|aJ)V`4@OOctXxSF;I1B9Oh?%e^}aLhrlr>;Ukr z4%w8-P8f{#Ax3ZrIs15>#(`}l*c`t7IE9g4SLOhdbZ4X9I3e44X*r7fT1wGjQ{ilz zJmeU-NAIr9GZOnDU}-cq6dUr-BE;O>07;(-@mFH%S{hULfexndT)TH}ttyytlt^=f`SY$moF|~|K2NC=Z6OC zulQBJeyvs~KKdy#7#7(g=PwoVE9YK_>0tw9&GOeJ$vGjJvTZsdT_}2dOW<{7wlX(;{bSX<1+Z%Td(R1>YzXd zwhPTGZ|9vxc$53w-jf%?*pcx^&iOl;$Mgd}dUB3>%ZUqyyx@{gFY&E?C7(QRW4^9% zOFU-J*$PwQq+c&U12R}U@ZNlZZC0t>`F>3k> zG7s}QwxLYhi#^3gj)4lEjxji;gu~d>j=$ktS#|6RZmnyO+L-a-h@b3H&^-I_w73dt z8z{`U2qZbOzy=je0XNaXTb}s^?DJC;5cb?H$8qPhMu5&IUt8)Nzmpb@_#(wId&Wl# z-XM=IIdqHb#uW~}fac^d+a2)=o7mxwNo*%WQ2DXowH|C*UG3q|IlY>j*$K*IpX0@o z9R&2Vgq4xg^_mUfSDUY3W0|qZB+}@uhd$3E$59@M!vDIB1 zvg*IyS3puIan4W_w1kl!B_Qh4fXfLFQ)KoUu+PLo!^jA z3s}Ocpr^#If1;^165>^?Lt&(95> z^oh(w&*T%uSjCd-K3a|V*Yx-;g+J<@*y_ioM&@Vb%^k4E$(eN++f~wugZgbhLLK!8 zE`9g>=)^My(T`0^bQ;v&`$Q*yZhoP_v+Xlx7>C-O#=-vKmLNAN7F%U( z<1SM2Hoh%!^Nl=2!lRCPi)POE)~w)OY$a&`!!tOFQ?V>gOR+uBT)2QkCEcqUk3okrisX0mV|s1knkuo)K<6Z45<7LaR{dCqttqC1Fa$grH=7L z&XV7nb2cWA;wvJ8+_aUO_!m6y)uQjMYKSM^JT?EFUwmnf zm*TCt9a6){Ht#iYjFSMj&w*3wy>mkDD7$la2m7(%zBcjw>|=>q-dW$Pzt>x1tKI0W z_P%kf__H{;U?aob`_UiE@tI3(CQXNwp3wbQ#6384T@T(?EYA2kf7gj;2cKB?;qM#u z#_Qki)W9I-IAb2$rp0qC!xMRo3hACTId~aE#sNBjOBl!i06+jqL_t(~B;j6PXF0a*U7+{?mZT)0y{+bXrm8cu2IO|T2JxV!I8k` zKCjt#9x!b@&T9vcV_;l&^zPTq$ZK#K-I?LGbA$|GPVJP;fO?(5LQ2Nt^m5EH<`{X7 z7pby_b0_7U@ojqOr@2SvO{_x-3H4m1m-5^*Ci30UMs(sT_cb;%wC_%{K|^jT-^AT_ zKd*r4-ngS7ch~5|j^2lTJ&ym6ym$MxBs=y(61O4t9z@8U-0K+f#%K(FcY(XPyB-@fGQCCW2lTEU_x~d$%FXCGnxz5VH z_o*sMGA66`j#v@j_eHFgxpS|x>zq@i?iRoeZq69(>^&3GXc*z$2$NqP^pTHT+8`k=(WYpm|BE9|I-S&l_d~5sk`%mL{DnH*o`8YPHPh$feyHssb+4%B3(6Ms{ zPy7o%@n(Fw8~1)~cKKfC7uy?e-fchmi|=ord^a}E&*Faa2r3ZE|X2{fDC@c9-F zK0&^F5}z_Zd9i)|%dy#&PJB{a8|NqSZ681J^Pi3134Is;Im*-RX>8`7#jmJ)9#%gv z_zXRI|^bla!tp$m9>^NJhq)V#yQRy>>1uT=eu2L8Gp&bGZ#I*$}J4B`?HWPfpPW) zo*@!fk;s7Og(Az9HXWC00VVH8r>?mzp6jgD7gE~M>!?wSJjPx4 z#P50?;<_s>$J{Ge#cV@}SYLVTsRp@t#8WR>sfGP;0x~zGi%{;!ecR zx0Jb`UZ?x6rK{YvqTK$*RA_*a*SxBi{XME~ih&ug7J#)OMd5ls6mJJTxH-b;^>IN7 zsaMsjAm*N20P;c>$Z}X8)Z^2%oMc9Gt$Cft*qWVW@~{RGNZ=?-icmZf6;46`J2?7e zEx{$N)>uG4vECeop#RciO@?%&M-QfU3osr7U%6b{7%a#mMCmMFy%c$2s>?dBS##rE z`1#n($82K*{^<{%ZBOGrMnUh>kK;c`QD1?N&Gy^xKHk3gmDo7fCf>hM10Bs<&y4EO z{iLsPuC*eE8=(mgv-i%!g5KX(7U`=Y*SMr(4AzfXnD`})8PC{S@bEbg94jR+ELO;?;bwJBqekz3w{f)X(ME7sTV4u!^thjDA9ZyfN_cyN>1+ zF&(Xc#+q+t*3T1u26@eFoxiFZz&?jdygnD>u&!Yp&0ROQM0rgPoEU%?r(LKW)BSl~ ziG%j!sFCc$^Vw+|y%c(s2-)Ig0x6iqBH}n^=R%bq~qmTMdbLA)?r0R_vD-jocx$BYEU(j=eJpR@Wwl_QF-V2_EUfFU47cjrkRa58*%DW?#;IzZNK!NeIfSA@riSMg8lJF z&$drKij8<|#QHa=7|9tik9CKr zD_;vb&3$Nrsd}D2wZ&OAL2Y^n-a%u+I(Ie~YSOdkYv7$&guAZw(|YZ-%+ZPKIS$dW zY+&cpw-5EDi3Fh%qgRT15IMn2ENl*`>UvadNx9h-avC;G`phFRzAgh~*IC(tt>_Sp zv2rbt3sZ6Mkz-BerR{Bk*pEbwv6>#Eazd}Jn>mx??1g9bW=U(=9>@-gHF%~Ps^#M; z=2J+ghV}FfD7f-)R7vC-x%5yNE-0Sb+TRGl- z?~UzqKmO+S6JLKPehbGNsObFBBYNnf6(1hQ4E$f< zuQRQ)_(LjP(w}uM)thzZ@vL>rc-COdK_B*S(YeHl%OQ_`qV3zxMS6VIBn|e(az+fyS6dr=#Bbt^=2R7%<&qD8(8((PJ*>~6*%WQvaCl7 z419~(?L8A?di~6dt zT7bBh4>tx%1PNaImQk&xCuZ^}6aB?}{DWVJ&@mFZUKEN$wc?3H4sZ&mpvL1#`XU89 z^F~!Ppu4yGMx~OLy2G-8QAQ4VXz^Gr-F-|7W~CBmaHx~x!y43V4f85dm_Lpqgr9lq zWQ!^iwU`&b7VcO6@t3#X|LyN=-}>ec;uGU%`o7`Me*Vqvy)VA8{n*!Hvm2ij=eJhn zYhwS0kWknCBR&NTv7i3Mcef9I;+^f!zVTiCARoW3?ydNBai5JJApG1H-_%B3YZFhS zFD*ch>6b0^+p&56rCF zPa#IkMA^uWH}b%wN<8B+Zw2I!ogl$>-Oy~JjXkl!QGbn|^}xGODLKIgOR2E@WyqLE zz3FFla`{H1egdbX`V_oEu9f%*j-*KgIObn6d!lH^S`KEP0=wVNj9BVXxaK2ALp_ap z(^~6Iu-eS?nHq%pUNob--aXG?CIGlJjdOjhwBqRiQ83Y3N1v2xB>U{3Qf?t4qG7X2 zftrzC8#mWnhqBk|(!2=fJIO$WQ%xX%TNEQamPGGYT6K*LFz2!hL_LP_@N02hqzQz3 zmc2Q7|##wjlZA3ciyGCMwD{)J!p z;`R%_9G^JH_ak$k5D1lNOod;p-@0XD_yQ;`^9C^X^*$F-;E5NCQN&?z~R!Qv{&`r`>*DGxVuL zyJ&s>T~Lawo9n$5_+ZE%Umu6`axmjv!(iua9lvC+VV!rsuGz;OLoLSSjqNdf$Bmow zj4vJLgK_OMJozUE)irU3&BlUY9EIST--*ukn0zK4jyWCY#@Cn3t^`+@(9#iOAG?>4 zmAEW#H4D2<@ z&Et+&u4=Aut61mT_uM=lHnjVB1b24$W=#I9?qIhy!X2{IQ*Izt?l@8f#q;U7l1)0& zaV~j|VAs%mo5DD6HQ&_W_0;CVhD$frY#2=D5vm?ktP6(^NT!lIDfN`)={S!OPWs7Z z1*hH!Pw2bObS|k_+QuB#E~VAg1T#E)ilnCGsOH<6F}Z~dm909C^`R(-ri_}bYU~AS z$+e_hm&ch$I5c%FvpH!}Zk3SA_9U@}O-zea(Y)4nC3*2wcia0SljayUY1LZHqt8=3 zm#upH&|axC41GnNI@F7xuJyu4?zmDFm+{O}h-XP@7*FCxFw>Q-0p?J5A2T;ngBE^2 zbWWY@8X{d_R?4MSr}KLe`~YSdGYT*b4-$RQ{lknZF?6!ZxOXxfIo!@n$?J z_<K29klc)5U%?Y98M@A0MQ{A3}#bKMH z>O~1MtC~iZ?d6+)&~@B)l3;kiE1$Ks0yU4hoBN!F@FA26ormic14z7q$5y)JiK7q4JoQzY z-3KqsRFAlixsKw-+Zx=g*QfX4bl2>@@|=gz9V@`v&PT=WWmedkJCB3Yd%V|A$6uIl zhh6e>Uiqy4$3%k_ejlPUk5>`bqrI^v8pMde2pWkc=R{unUI$ZDyD<3KOFXxA0IRc= zNrWR%x`7kD;YTxq6fRkdHk}VEA;$G^Lr!TvLtTn)iSpS(l05CRb1f8{>g&|d$~bah zj%*F|8VlU07tN?AbGuIY%Ilv=Ato^I9L%w5MLg$ap|q)8q+_aS?Q4Zv*QDN9Zu4h3 z#&gWrQ|o(d5MzVj^N{ehpsp>ik8KckH?OhLZxhwL3W!c`SFj#vis~n>;e0*p-Sr83 zy?TvA-ha+Afo#1QmAUFoy^e+hLUwObwIG&MA&JDXm|x8ag(FskNxd)z+9q_r&LYm- z^O}x9hyxXz*Ztn^D56~Hc-?9^Fo*HLhEwESmOuvg*pcnm8_~UXc}FvdJ1#jI$N7?$ z&KrKfHQz+*nqm_f4?Znsqx&eXYsUJG6}5S;#Zj$Zhq(5-yw1^y0e|u`soCjXx_{a{ z)~^XAC!T%ZB^P@6pk=-Tn!_vB$UrMANPlXu?1?0Gs2H=&8_iJd)HA=I8!+?ujCU!n ze&~sQP7Y(7zJxVqwq2w9;5_cSeQu81&f3=K@k?0aW(+lZUE{Fvk0SeFAAv5i@(+S- zk-gkqiwfY;G0(oWM}BWdmmqOJT57n~G2e;KQH}j-6~uV#e7GmaJvLUh`Z5MP<9hCz zf6#7V;^_8AOW$+PbOfyP1je`!uGD^RI`&?H3V5!EeCzWNf9(T$=B8S|G+rBrIFFpl znhz!Q19$<=8Rjj`pBiXArNUEP1M0u<5%L@@nF=T`4uRCbzP1gf3MJHiYVUR89Kg|y zy2$v1++&3!QeI6^FwfL;j&Z=y7o9YZwu$uIBo+hpQqrjw8OAk0hX*`D;EnS?$Y7>5Kgdp z4ySt7C+3GIIL4wEI*fZTpN}lFWECzgN4>Uy;N-nE>y3396A#1?#$|wRYUgXbUwau> zi{mL+W8tYc66Lgj-LTBdUxag(GnKXF%f<7E{6fz=B$HHqo3#72cmf`zLj)fA_^54*CFMo+IH^_Z>3bHTF* zFjCCsBbU11GK0tUtCbel_d^EBRalI8(&tDk4TwXgFS@Clt1b`|USm9L&!{T1f*$MQO^}NIoMD-|#dTPmv2fI3ToExUGgniH)Pyw{*ceG9b+vIhrfoVFxZy zypzmF%`fLrd790Jr@H8qi+M;Vp1m>CyA(k(@d&7|FZDuSq~g#TmVQR^d4xQN2=6Yd zI}n`~m|S179W&}1U9F8qh(|N`aK=}jvU?v9ta*^u`P3LtJZg>AizW)HAJe%6Q%Cpk zTbV>eQ1dGwzI#9PXTcE2c7ovE2zc|>PDDwHnSPP$ysB<1lCwB=D2!{{21kFws$6_g z>Z48FA(mR_L{1EH918gL077NABMd=~NMu~Aldr;U(Tkg3GH>wK?>-8cAWCA zd}x9&fx0d=(Oey(_FNTo%4UYKkNx^`qzY9ZFmfJfI2aiI>a`g*i@Hb(Iaaa^)pCtB zR4@ao;aUSjy#|UAJ!t(ECSQ*M0})^C1&BWxwxw6ih)R+kG}jC{RqY7^iXxl3JWCvz zloCe1VkICh(Brt|TvZl?t^ti8)-@eo_N_$rkQZSFN;)+?D6OgwFgwTYIsBcGaisIQ z2&a;JU-08kg4CCDw$yr^2f8b5$MgE^v&ib?ox`q2{t8RZ59d3c{(9bh zcZ_+)GByX>=Q#876XyW=Fu){_C0fD*YiF`n>s%m=otr4isb-xJPRrbb^Jf~fCgXm3 z;i(7aTMLUlJE-S5$ksab`tC7+18Y6c*BXuw+iIU;1~_|f!`-dE_CR<|d&I#NcJw`x z4|B}#>+&##3-bw~OY4zSi`zMKsnf?f@nhA;xY7D_PwNABh=r$dVbkQZ;t3XGxy}cr z>Z>@(?*b`v`W$6@tJTt8TS@d>pk8BHXpdKcJ{LHmE0}zpR|iY7s0Y(~V%_#j(68|z zADYdfvc%&t%OA!QH;XsKv4PQ>X*%Ef)G2FG4@#%e%iecNC|{dti&|6TnP5=Z1CB9) z9t_{}?|oPTX&$099`2534WwdPPi8Dcj)=ByJ4>%IO{b@;X}`W3TyBTb)Dd^AI_Ay< z8qudkf|{JGv`G_+Va~gO1 zv4)=KjLo?6l3a-90*UAu+`o4gZ_tOMHiy2$Vdd7ig{#3)X}Zqx%DlHm0oVDEP|kmx zAB(rCTfI#!uhn)DF~`L5p+I=E4tWc)cI5pKgM0w=w*gxp>F&9HtrN{Nh(N;>e?JG+ zSTry?Ufy5O$cO9LmgH+Q==#w`SFt(Y$G7;_+}GpCBF{0~BWfR6$GTWzj)Sy6V|JCS z=bAl^@3?hrJFfmAbuZSF_6Ly*go)B91q_&c<$W>L!hqK?P0~#|ZOu{BiL%ODd&*rK z!hJ}+i#=Q`R(a2734h6XEVbs~745UR%`!jA@q8|*bxU7kYI&c@776;XYFZ~>6-M@c zt#Oq$tvrKu*=Jl=f6ClHzxN}FOl@UD>^S#`QoxTO7@arg(Xytf5gL0qJdeU^flwgk zIP&EyoIWwdo$Cxic&($A9fcD|>CC<$k+Usmla5H&k-+{mPHcr*8J{acC}gStGY>@L z!Wcb7Tt0cV()($?_y>_{>Vf7U5q+rle7>yIdkK7>04n$9Txv*jG&;k)S~7V^d7w|K zJ;*Z-@75zPIr?g?m7Jb=kG%k^vp z+h-Jo#ToM9H0ja5V*7?SRkQ+j4>|{pfoMI1n`NqAO9|e482pDjTt~j99?oX@li%$_ zbW$#q|2V)3}w6Ui%wIYYDNF~$@BEZ$*YZozfVgM7U^B*XpR<2Ovtfj{MR z-~;_1#hm*Z(lg`Bz3VCnT>VhRVGi8rIM8Vc20c)|YKK3r)1Zi^b&Y5xvd1o%+P(Jy zM&n+sy@EYkYT%Ct=Z-N?I))RYV-L!JPp9x@fOcwQn`Z)JEAPEmB+MQ=`g@PWOT;y0 zt?Ei)oo^wzi1{JfELen+?6r?l6bR5%r)XJ<=gRj}rDUa2KS7<_ec^kZ-cQ*Yln(-NseKH7i|4G_@X2`91iI1TIOC@gR)H z(7q!1r0YXa-@6(v;_x)*;0jjHIVue;YKo70XABQ(ZGLGcleKV-jL;-?4rB#dMK02c ziG{Cm21g2*2W2(ih#wDXYLyut-HrmJ==SfH}`srg~Aou!Un&6?|$Q`f^w{qQsbA3*Z9cgh0Ap$lDE0^cDS$UqhP5IF3F@QZjSSI6FkMUF8l@Nx~4F#FPyo=Bxz;?=ZkyF0=C{ji=`C36i!hPjV< zG0vW{Ud~Sx`sXL=+naNa@Ap0W(44DB%@EbSyFSOP7tg?}hL9R3C$GKi5j?$ea8A|f zIIoH-m*w`Fe*Lxx3RxFV>x5d0>&&=^u%!6`De-lIgr@;H1F|&zL3K^}naa5Otu+l1 zWW=eT|jYlUDd~VbGs`=R$C_ zo~ThHZ1&C%fl)uH87B~)3WLo9Otz8pLWZ-Z)c_l{!4U0-=bgFALXN=d2by5JH|co5 z+{r0uCnE6|NajX%02Sd}>!^GXh=>y*qWQynxh_UCVVy(KGOGEcSO`R{v{EgeV_v?N zm~)=5y7y5Bt`BloNA9MkH{g+kAnk51qTwO0r9rf#w2vI*8;Nv_#>?dW;msqw6d< zqBzosxTsb4{`pG^^Oi&o^I31~JrKT2p_{`)ul&lcv4(>>kRvjxRbPl?55}rg70eBe zFw_+=ahhXv!pIF}EUCUWN$Ut;)~PPbR8Zns4Nbp6K?)#`(Xac%W-oZii5Ji6iGXOC z$t->x?xp0DK#5QgIWPfDbUc@!UM>P;jl>2g`U`5pq*J>ObXB9n}C8 zhxE`rVg(5VwZPR(ONoHkl~06{B4#dsdD&Bm^Nf8EBAM3CUhk4?YMU@WLOHGj>oiw14}wO^#ip*~>6pYZ8fdA>r3 zS_2z?hEiPp^F@Bd`2t5h)HCK8Cz@UrWBo&mvDPB+>~Z5yBB&kP-%8K-UNCzjxO{;X zY+8&ukM-3IeZ+s)@;rp|Qi^fb9dmsNXQkm7mIsHed$*lS-hwmN*p&tt<5u+@Gsbq- z>bPDHA5M?0xQ-d?I=1l7G@j&%t&67?QbMKLiQSLRSn+;Mh|!A@7+R%WvZoC>f6ew@ zhF?8h<0?--oWK|i{!PSxyFp$ntk3l=EAkNRbv>OmE+{JX%Z9qf99OJTcz}DwK ztYwn1KbPzt2+#C&FtPwdT(O;6tsQ*?xf)*ew6A@y?mF3-pB>GHBU*1JW`fI^JCgO2 zAF+CEkUjK5+@MHgPZ1a~S0-Q|l}RT<`*O9pS1Rl1O>Eb38Dl8ydE~1(GOc&v=J8J6 z`)*SpotjfMFncSzO6S&fwI^xk{!JWZ{)aOTNybwSGp2>nIjcOVmJ;#sR7u6UUm%JQ z)><&&5zlGlS19~gNhKzo7+OXI(uV+gT-z!mZB8GULzx&H=1OA%6iP{-RaMGzwYF+4 zp1P(0+5njQjpQ#5ie8P@uir3TaVJ;C`>2$N);kHGpoR1q0O6)&v}?(2#-W~T2&$=r_M zwO6d_k$TQ&|21Zfv|$Wjkz~Nre8q6|L*J5(pmxq^)Q)nWs3@NQUNby)wwW!JjHx&G z7rl7>c$qzV^dvUK;Nn4zs*_rgXFa8xujSNV6%9XZ?8;@fd3jDyIKtJ8=Bk zxo01}zkTxYJMmwNKZ(tA)Qe3swfUFAHbwQndP?kNw3A=r!Xj$nuQ~BJR-q07Y4|^V|uNi z<|E(09I`GbS;N&US38y@ z<`|)@Bn`Mo$3{bI%;qQlR8+o{&M0O%@gk8zLWku*mVaefvsAa<;yHn6u9@n@QqYKh2E3WA^kKape`KSj6Jq zn0Sz6Lta^{k-&JUFq| zIY}$rYuv}}3P)V;_Q=x9N!L#|PEa^NqImdh8MZo2350JQqP|~^uz%{`5T4Pc(m+^j0 zjFn31=+WJQF;{()VM{JjLH}oibNpL`dCIRmWO5!jTVNe#)s)VxfhHG#>sq zhZ&3rPCPOhezq3 zc(69`=zvWe0WvZFCwFf}^e+10pWvf|yVx+sA8=1%^9YBZB778m-)Uo)8$Le0R4$r< zM;~lH?^G=HnKxtgfDYI@Y|V*GY+7?2Gq3qXU)mhT9MpN}uy$I<&|_Ux2Su$f^9rnL zRSYicq();#tPS&GUGTUTB8I65X&r)cg2EA2x_F@dtaOa1yXR6Op81PJAoQgQ( z`;nwBJg$bg7eXM2qb~$mgXyR*lv16XDr6fmLMe0UfzWi+E86)9vHH>a#X4|~nTP5Z zzUIZ!@+~W$xqDaNw(}%%a=WD9^H($AUn#Yd6vR(Ajdyc}dT*3lgH;-?z z%=>SW=bp~$_FfL_R7B^w1@G5V+~|0x_g#uL)g3c_h!LD_M0wNB8)+aR$6)-B5c<$c zC2iIqMTQuMPFW22VYU0`FHziLb^WRrNTSM7JMu;_M(_~Ld8kH2beMn$f8&u=BI~(D zP_dX8s;F?_&pk2|#d)SA1|7ZOkU@z@qR5vQQR3iblt>2m3CHo1AuRafWiL4zm=aS9 zq7|CCL~(sP1KK>$p-=g!ix6)_Y(g264hM{C;^(<%O{nplH%9drIbf*5jVIOtjC=7F zuzx=>!08ArU1y<_y;?WrwFz-^LCx;?pp3 zGp3D}bi9#CRW?n`hfVDr{|pUToyG@T8V5gW3{7-{1Dj9nyeXn?^4L)2hL4ZPo|Fc< zZ1VhxCfr;RwCEoW^>aO>EzRtc{&S6(F56D}h>{%ihem7=@u;O!7AZEKPaeO?f&?z3 ziPM=57MogF7_VOnGdBs57bfCL#WP3%u?QggAIho#oIZoOCDU28F$v1u!PW99QXQ~YC!>`rRmav zv!s?f21@Tw_xeVQFNb`joJB1VS)CZqCx)DEUQbmq`x(`%FsfkUTB{SA=YhDf_$99m zb`E%r^>dilTjZdHq&k#DT^F$!yk_2rFRY%#wf!vKOZ2}6vI*B>1aEOr!-wnjUY)}p z=h!ouTVbqqmUB&P@1^5=-1wfy=jgRMF0*^S&!cmUTXE;uJT@0=eQVFyId5+Ln~F{? zS69aC!l|gShsAOsar%W8{#miX8H)FvQ4^h>t{TVC0WnU-Jo!irtyK^1w`>2X{8jxQ zWi8g??6{OW;}CyYo;7`buIa4ud9HG!#G5kzLi)yCTn9{3mjgGUylINB>n7JgZxgIX z!G*8&@y(AOk;xlbzOaai6I5-O$p=s8(sj|47S$U;n>aNV65L?x6AB3mh+5|KsYvSN zEHclw!DaU5vkK#}PP{op56^wQ*W77K^FuB${!I<)DspUWq#J>Jr9^dYT4IcrHdM)# zL)=FNR&zqvW2>xjp-rhmd!UGQx1=s9xQ?!`tm{sgmf zxenCTCw}UWY;EW?N5)JyD#X}_*IT)$Lx1#|b7W&0V`6d8UEll{9TKLZ4&&LWc%wqYLItv_p&wZ>*;5yX#Lq8qR5}u2DZKjf*)t}AN+cP~AL-)naHj_Yyzt9*~S9{95w z*0Sv!VqI^>kTc%aF^|oeE%}wUaWj7&gL5tXp6@=nvqQgCBbdtHBUSv2T%=5UA-M9m zASP~awEG8KaWB@pA$ice0sn6Ugf+QzvGp~02)6U=n{Vs*>##Y8yj~x_;Pa{4(8T5- zHx|15_!qP|;Mc#R>+V{XljEBY`k)#3nv(?1)DK{ zny02xk54*?BhDrc{lw+w*>j~HpD;=%`qL+YfmJ;w9(j)JFJ=;&tOXH14NP2Uho755 zP6!*j>a8?l6upy+3||0+<#q$bBgZ1|`w0Dm$XsIdsbaF@nJ)w?9=JSG zn}5QE8|2FDI*R#(LNZ2LPNbgp`_c(qrRc{!?lm)+ES?b~8&9OdFyx_2IBXu^3gA#! zZ`O7G#f1ruLt8y*8MjCAAb8%v;ctxd$g$Uy^^*`u4WI_L$Cc7moM80}A&(teJ*pJ1 zGcu(&dN(HZ)PKONPBY0$Z9m^%b23L{-b08}LrpNq`Djj2C9KX}ejkPMk}+JCu6Ar{ zczL*flg2Yzz6KrL`^dTv=D6!s+rl z=RC%04$-&qm-?`F=gZU8fP(q+tZ3i`mC>GoOjR{ z+k1Ck-oF0sf4qJA>GSQ~&%VEX?%nre8&=<R%~)nO(fdZwT0z%-lQD1K$>TZya(>8#(SgsWqJ00IZ-8uc^9{W~%F$=4 zp^7%VJmQy1r5}(`6pny=>EcIYq~b|DZJ_W#u|5S)`%jyGYIrpgiQ1&Aj-BNvFnGSIfA6eL-_Pw+A}ftV8@6 z)NmfvZI#+!Sztz4Noz}2>L^fufpC8fkg zI8mgsG8a@(Sl5!o5Y|@zmHmX>3V8`n?ma?BuI1^9AP@9kE_}{rShQ`=xB90c2K&DW zDOz14Gfv@1fMk$(<-veTSLoB;L-|gq9lv_HhL>0RHxhpaVI>=pRjG!T!v95?oj4Qf zBeJ4d(O<(TTb|mze=~5iFKc~1iDz@>&b6h@XCo&}TY3%QkvRz=%UL&GNDrneA-do7 zVh|Ne@6Ti7#xOwBe>cS!P|2Q|u=n;q+aV`HUUl7dRq*XTJAOT-p?;p*`OsyBx%oQF zD)B_`depzX#&dEw!KDOb7=^WJc)p19X_yodw_wM3H}4HKjT(df`cy?MX{uo_heP%1mrH3}p~EL;R>?CcKcj zBP>QSdMU1ILP+@);9Wx2d8olXf=g5r>Ad4CKtSn2x zSV9p|S03&}UwZ1W6GBuA6s@PkCnA5dpgKJq2aM(X1b*$NVB*N~XRYzR@m>$jg!-qZ z`hR1eo1_P*v`GOF|5CIi@I(qIes{hoL#%?b;CdbYr{+XbxOJHN%01(!+QUM5A@k)>QGb z8G{2MKr|s?JReLWg4fuxA4JAFlpj_3b`(f;2U(5`8ZA>+kIf7++y4N2tkRpDGHJpq zhhy&PnEVZ&+_^*3muf?;LUr2K+k--_X=F%r#cZ-Dr*`k^yk6X3FS+NV=BA&US+JO z)1yO}S>F_ZTAD|SZ?B;)>7Md)&hAq_QSJP@9mQ$`4T(W2h)TXu;H*BNdo6L6Wo2qq zW!U8w2B3;<->cEYgV8<|(S15f5W{?P=}G}7t6wC^5+t162H@FA>X|n5HUdXL^(iZ+ zB}&H>uGT2~3#@|hPl^`$YW)gs2*Ixmr1i%*Nq@3WAz`7TqtdPT>SEEilQF(5Gn}X~ zs_s-2IMw6?U%7aH4)GGR;f=sU#%Ng{^{J%*EI1K7`&E-xxawx8bwAqKjH; z>O_5JvlZsS1?D=`B^~bS43H_#%(L;b(zB+>6_kAWhp`d_DXtiI%)@WNB9cD zhoLO3IM z_e8}w2gN6iS*HlLZlLt*UI5)5)<*V171&^uZx57Jw2?y{Oy|sD%|SE@w@3&#VOa8< zdbA+}k7_9z(&)CKmiDP`QF%xXo5x1*hCpSL4ZtW{fvpk}zMSu78@eeiqbb zY3!9M)Ag>59AjKAh;W@>AInWB=##Hb)Dg3%u`J@O7FpK1l>_C)+F0r72>Qysm{a2J z0xC>r-mYG8*(VPrrTt>BW2Q^Llr%}e$8h_>t+a3i($_O}>c7#@yq}V$=3ba3)UzfYF;(_#7jiZ`HHY$2^f~z2QEDqbhPFFwE!fku{yGG z`8rN9FSXN)0#G}c;+5v$M12A|TeCM>*e`lsVLmrNbXRP37i+fv{KV9lwBs&@B<^I1 zBn>lm%eoDWfZ^Q*RIn8lZ&t>fAOmz1VWb;+G4kU@i%uX^OEtGZlw)IMuTAwUXNiUc zng&hu=-08Kvj|-+aDt->^E5U(xKil8_8T!bb zpy~3yY_^p{GSVFd+1~cuzeS-5HB8_oU=>_KS#~6tnAF?>&!EQ{grb`pTBA|ROn6;% zSOsT5x50!t4La$Iq&#pglIHg;P*IA-_#sa8ywo*8{5yuq$UoRE*$Z2?nCz(o5JnaTV1^B|K+|I4eJQp#2kNx|__QrElFI#(cN{Y(s@B@g;pn zkh3I6s6@-3z^sV?xg1e6+?%9;vhxln}}wE z0_A5D8T%=taj02)X(NL9wG;WsAbil~J^YV-$j;Cwm6_=20{sFSpDZj_M@WY&C!-AO z4LSiyPw{VLEi`<5XhrypVyzXb%O|gq|D|>*Lb#&0dpM!8;17?Xc1> z1ZJIUF>Ej!L?}w0sTdduc7}K~Kr~n&Nwfbi>#Z~oYL`dbGcAxVK4}*lJ&CKb<7YNoH6iSqhBcx0PYJ zZGncQPXaR(9u|f!EbG7qbi9A^(46W)SwwkfTNG2+GX&CX39_wUNSmhU+vo@qgYe=Or;NNu7xii z)iIPlkq@sUz?8f0-40c0a6@;>UwDrK%Xpo!MLu-v;hAm8G;A*woX5x$iRvI?zxoJN z7=38U#FD1^)pl>g8+#5wnXO3}*Y-F1LL^Y&gB36^5b>h<6y<&ngcY5Tgral2#lgBD zDUUpuZm#fNz=CwnWA(;?RG-kwfD931VxU@pkM0onX?iG*JiHEAhdNCAS_3-@B^2!j zir)1dR1SZFSwid})8C02B4`=0jgXO^n!r)n?MAfr&sFL0e)>(%X3@yZHIGXY{+`Fd{!85VeyxLKx%gi zde%DFn#aXM2{2N8=wQ1Szn_d%ZiHwu=Sc*-pB~yh`o}|2F(0cK7dHiU;kloSgp9eq zH&!VLv57(3^FA%VJG07)hba=Ze>rf%BRx^if2x*_7x}gTJwQXnLLE?11;lKYEMm;5 zvp@zYeD-}x2LVDSRtxWQ61N<6*=;F3wYg%*ybG@il~H0Ke(1u{1Z_X$bG{d4AV&UL zWmc21OkmF8ICdKO=}1TCNE6uG8L!%fd($;u6whjUv*}4nC!7XAQc@++>X!Tt5PzXA zP<2Xa#S7C-FThX?&VD}&QWE_^>LruHn-HMxK(-V@$7gO4A!Wg?hqrSQcf z`$*|2jU3VJkO(>q^=Mu0`(2&`tFCzpsP&2pHpCB?A;#>qlOcL61gN?0DX6y38=zPl zvbqwQqTCyLznSY=hZ_a>_5jon z>B{Mn`q{9yLoo~oD*Mr7+{#}{e`g=S!rN*HW6p&NqW?-gm@`S#IV^I}1GcaMis3wkNk(|1} zRMVXXV|p$TgX%HR0w8?ToysIVd)9R=+Y zxbxsg>JQWK6iy0kkGeGGf_^vgfI@U_#A+hwj8HhfOgIE>yBEhydw-C~&E4>EW9hv@ zsGo%$JVEIp#AhC%OQA!rYJoFYxztu(4v~91p+l@~h7tZBp%3ANh4FwC;IR2frw^yT z5CMC}zHjrdJ%oG$$!M^cg=~t^_2;l(3i3MWAgtUGNU^;WhK0;tW#_nt?}N$rU5kAa z_seG`BKM!j&Yyd(U4)U8b-^+6LDM1sca?uTO=+Q~Jnv!U*abDn8r zr@&MOR<+gTvSJ#e3-c2qbzP(lPvww!jvIj)ZKA^?F7)NHW#6vNAhez`O~F8wOhJvN z*;J)+72NP8sdiRt0Avt7pI5BZsXY6K`D`lzlYZ&7BT0BHb9f^VSGg}%hdtA6a-I2y zSdrs&@3O%Lh8lTcu()PKLq9X_zRPl>>g7JosOI{noUSvOeb0QLY1VPh?N0PIPSX`W z*{?JumlIti!$9H&p{*Bj$7SvOu>Pcwt1j-dbwZAP~5K4O;>C8vBbURN( ziY7lY1(nOc;kS-+KQB>$70LlD@o&?)j(P6$MhIM|P^9i8)qs6FL~GH+VV)(rp_1ei zKYv=X#eW>_^m*nr5@A zQReuetyvDO9AmvntFfCrxkQb_Q%VJklsXXVcjORtP8fXwK4@od1-EBFX`B=fm~R;@ zwPK8t!xx^PJ~HGTUDWZ7ctGs{%AP0di-&c^kQH@#jWyNIN(79NWae|YxG4=t24s(5 z6&q$G`li(ruK-rGBKVB}RKhfHgr*DvQth;P?upV+YY?}N>dQ9g_F-#&74wRDGc3&> zBgIUF!yAOqdlPU>ybLp$9eNWo6dO&w)U+4J*EBHBy2l6ED&i0`68x>p3*)2Bsk-Yy z75c&Kt*OkGT}1`&t_xcx->M2fLq78p66s&I*x?z^JCY97Zi$&4EgF9PEe$cW}PU#N2pE_98xU9w%Po@j%e)IkVuz@A?XYd%SE z;i+3v!@IY_b~2;g$uh6M@M%|mR0YJY#*M#0U`YNw?Ln`mjx2Z;aWe1(4rO<22cs`M zA>36ZlsWB_C<13t30%{sJse3zQ0OgMCd^|70Q|bV*S6IaCQJ*nM z(qOi5$nRJ5;B-FNu?P^SEBUF#@nD`JQ|CIX3q{~YRPuJ3I3Zl(PU_6@ro{6Dykq)4 z51k_^B~MM}`CP9f1pDc;UKfBZK+ADBj;zvs)AahL6Sqw3xUo5l?|RV0aC+O5leo_F zja!N_zZVwYYG{_XKu+-(6B^<%p?A;9D`!7`&WaH~_9=c#o&+yR7}O+C`sz)*Iib8d{sr@15y?XEc{r^?5LjJWlVCDr z-;&QXvf+d!kY$PuFm}2kNg#7`p^{ECL58O6XZH&1He@8+9P;1i@fX~=FJj;Ma;7wn z64T)W>Al8NEURRGp5$jN?U%s(XXQy;4~+CilpR`G{T&d|@ZBe^n`Pv~aqEjuGU(9G zr!Gw7Kz6reuh!AEEu(K+J$95%RhLvr&X1f>c>uq689$~u-|c!y;9@-e4W@fSki$$q znWRU*!QEAh9#-gX=9VXz5%@&3Za&Z1#gsn~{QhQ7rFaH7#2mD7fD{&HxegY{UNiYI z>)A()D~OTKW%R9Vj-zJweG(dC2V{JgEK7#ySKEC+p_wzFL^PNkX9$q~=FkFsV`Gi6h9$E3JtlZv6 zt6`fKDo!S~V{VIPIQ)?A6eV+EGyBCmEmP@VxxW(#2&|8hcIWDit1OdF1|4LhHIS*S zcu57=TG6H?FemJC?261AE=ij;SW7x0U?Q{X4P?^W+$M6*Nz<&*pL77sxZkr~TwjC3 z)1rrlw?aW3lOBP#!*W!$3v){Y5J+istOv(I6#0CQzqpG-3a7?2aeVi(_?i(csBN$d zT0`N*?AK#DZ=kyZm^Ec2G65}3ajogcnk+NSm=SvoGI6nU0KwoLubJM&K`M4AVb=D6 zfMeF_$%NNydS4q!>=4;z=(`A-6R9?8mG<#6lD{w{6N{g&4DoL1gD?>4JoDR5m7jy` zLl*0p#d~*hx-KRhTK(O~^}*bfnuxo3Gm!aR-p?btXLDv_0aRZ!p7#e~MH$#*M%SiR zY5Q_^^l%C}Kmw)z^S7T_0R4MUhFGEQ+93EWR%Hx_RDz|e=D5z#e3x*7!S=h~W?*`5RU2`4EQ2}M4kFj+B`@W>{knI%P#d9<9)q5BSYj|~~z;g~;-tD~Hp zwgc%IIP-v8^+E-eywQaMYM+Kf2Irmd&JfaX5*t`J>DN5kG#yX~St05%|1_zboNGC^ zC8sZpzFXSj!@}LmP_R0qwUx<4u13m?2ZcLAci^$8--X=>h;&e!4@l&hc-|+u@H{KM ztCFn35ooYApB|5Hp!q0VZ+%r1587jBu{d!?4e0}OiC5Ps6z)r~E_~DUvp(XL&t85W z!52%Nro*Od$6psga*?Ii5}R&Vf2Q4htgIPb1n(eMxK6yt;dn^*o!xd<0lk+eop#vx zU0Ek^W_*{Ls=RG9iJNyuf!l1)%|EBo-IZ7x?{i`-J}q`b-PIPL;%3IYZcL zJIh#1=_4{aNUsF)jz&jfJSscnZGw?tV5CtOHS|4&qdCjs;6O$oIF34kTP*ir?l&m} zGFQ}FC6A;P&-m8CoTMK{wRW13&Me#$ft_1?V0oQ6oJOvw$t2yO78pPDgW)P+Qavwj zP+g&@&P2VfuysH`T$=${SzSjRZ8S0!3F*p^EdSowMx66X9$7Z6SuM!(P0x|tAFe6P zk5<=W!ECvjwhX+4r^yAdD{3-?b)ac$Fu%?S%ER&oL*3(H70HEAQ5-|OxI^t_KL@UI zbot4v`Mdnlz@Stj7_{YsRYqn$A8V1V6JfL{uX8VdX@-LZBk#T)V?@YdqxDT#?i&aW0>^!niFx=VDKrY7Lo-X=`>>)CS3pIC1+1l}=(V8zgK z96Qf1>vXN`Ybp^CDWFf)K-!qWv=QDBZkxj9{i@?77*S=3KVp-pbn+6cQA9sk*drVg zGQMA z1Q>3Ki~X9=#DIqOU9Iw@?KH#qauF7^zMLMc%G#S8e#~}sN=O56Oeh7@rqEK+ z^=8$DXA)KU@hki2>Qmwrj)67bM~kj$RjGph`(@NUnAybQm#RbcpBHH>Ny1W`6vM7l;$e`C!lqLt;OlU?+sx zExy2vF%H_Jo)Uw6w?_OuZB!;(9*EVUpp^VqhS|1G>kzh@v0ag)?q6%~!gW=Si%)4U zL`;HbIhx?!_9L_cZ{bkO2}_aUcN@ii?Klv7JfZ-?+Cvq9Kx+geQGEZfam7XELStC2 zsAGmR%l=}=FsDmY6@>}I#-bx}Ok2`j82%5Z$o&qp)~&X@fM39z-lih0Sc~MY*{iGM zViYa&p0PWNkNawCjxkI{_2x8n!xWB=wTv?GlIjn&rT`5JVAX|qQ5~lc=8Vv@YuPO(h`0N!pubAw+7nJBj#0BBi*MeWwzY9u#T1OXcTxu|-8iIIEnog97zQ=A4_G4%vRfT-7$kT9ecT1*CjQ23 zVIC#u-abJkwlD71!z><0orx0DSBJl`&EyYPBy zeWBcH#~%}bICJkL_U8y0)4in88FT)o5;a730cUlT+gYc~ym?2(dI)}EJuErLXJPF2 zNv9=H0V3n*W5aE{Bg*1^n)S*HwvSfW-tk4-$oYm=#wu6-B>c-sA zy^?NCQ8~P@{GYB=^t98yhe}RNgBvY%H)`l3%tB5p0#uXXdBep49LsoOXz~ zPAgAWP+Q5a&pJ2b9inxfn@WV7hR>Kp`QZ7xZDW^g%yp^DcF2W$QFReF8jmudof+l% zAJ>x^ho{)K!MfN&UK(|!{Q2^C`pUDZyufPJka?-y25r+@IeLHYd_w=eLVC7wzuXGB zzuqhP)#bm$6PFHaGY%ZOfU!ru+geh@2mXTTKi2%`R-awj16>rgIRR%EKQtJ2EifxF zR;}QftNxUO=7B-6Zrj0W`S{fndcJ#8=!J!A?j~x7ulD%Z98tE6zYu*)Pj^5`HJDSM z8ac(yYns#CmW>BP<#w_K&yk$-gE#$bQm4(tFE?K;W>8HHZJ{48Rd}Och&)KGa3Q># zwF6OY7nOVJ8?2}U-`f(q^Wnf=VRbPi$_Y(Rj7M2VFb7DtqcpSk+0!z0-rU{$m{gy4tE7gaq9(rUeyr$f}+ltN&3#u6$T` zojNpqz43UP_Z_S~J6&=*d{juk7d=7A4lH<^@hkFBrDe;uaxm!W1NO=4YvJ=~@ulzi zYFjHS;POp}*>_H5(W!YOOYJ+T`H6Fqf%+r-ong00%I`1PH+r__KNfpt4Q_R<`1Rvf z#R=f4(8k%jjl&DC%g}j9vgyeKjQXgDDH}*P?CsazI6VC7re@Ky(r5DJv_-QylL^Z$ zyXi~%Wc%V@%WVp<_!Ijg^7_TSpcV~YU%ract2c!AC7%4lh5vMyl_uZrcxe9z9ro>I=>P*mBl)nP+0|-&&eVzG7cl^P}S8_&Vx4(fYR(^`y7Qc^KHA!Rx4VKzDSD zibJLTFXF?0$BBWbcOyBCR;A9dt)I0&k5VIa|H-tG_Gc_t4inEDZ%%x*lfc zFg9WDz_zqvY|(yo*{`=JEBEH&3dgTr zZSm6l)01ROnCY3|yZGMLso_QWV%W;Np|_nfUV^RFhId|xN-DbR=VVwxLbCNhN1l@J}|#9uB*hV^{ogf zI!J>wh9fHf{QF1T!T5G&D?5-uO$0D4IlS;cui*2mnEOeLUWwwWh@n> zdPHF3&sZOmJrRS04a9y<>%M1?R=#e@O1Cr{8swCII%I&Z={~tERNk^9+B|jo>;dSN zAoE}Ml67q3Kofv3$4B=bg(qlR@~>~dmpbVgeU)Ibsd<8b*hc&(BSdkOD zPaXcs)!y(&<-C0)M}c{+jAqjFdQXi^=+n;b;*gNCUy2?Ud9jTiE@%`5zl^{1&jYl@ zCG;Dz(1ATnUZU!Su~&)(VVI=vTJXfY57&|&Z9l4er5%=x;hhM@{js7hiFY&qdqYuj zPo8E&YHLfx>;6@-*lD7R_(ED@aAwMqmPIUjl3c`cZ<6HB{PgWG!=0t&&K1$`U?L)_ zyxxiL$_m${)~q#oie5N!zM_n!Vi$lDeu_5Kg>)@07*{Edb5Gx~4D=S?h+Pn}08p0qsvP9$A9eA+CF&! zcQVvsA@z1f&86}5GhsMAhNQntFCrGNY`%HrtbE&P+d$mPhnESCLp zO=Y-%Om(|`+Gu(z6}XTkX_E}0?fO@j)>boT*omCt8JV7azg@6&%4`oaxBxnuyr;m1 zJwD0nE*@;;uKf5*y4PemUXvP_bXLB?Cc81TdKWN?G*-~GG!Wt|a>(ugrHR-_^y zIIr|H%=GB3-j=B~Ej6&vh)FcTf2pK0tZB8}zkgUB+DOSMUw(b+kqYn@xa}Dcw)Qio z3pZRnxsS6;6mPXjZKK$q7y2Tpq^rWV@y3gT7#wJC)8?KLcVAzFy}$7K<$*3VV*H@_ z&s>MW)wJsGK*D!#fg#V%CrfCK1(<;RYc1d;sW*JXTD@$M^|2w3dY+k_Fj;cG_Q;vR z5>=}1#b?d~q8a+yw2ZZ;Xr+)5f(Cjm+CFvXnZz3#{r5PhShifg{94P-@>@6Z&v<57 z#@Wqk+FI+(mZC0W(kNvrWaTf)D~%SfRJQE6|@fcG)!@6^{;_i=DTBSn-<&cQ!SSwLjBj4-D~|6SJ%$%PSXZ4&P$X$ngqZ%$z$W&W$*B{e2Z8$Cq*%rfR= zQleySp&(-`iC*< zqxG?JF;JhmD{~!mo4Kd}@zV3rk9XIBLMwXHyBAOK2&A{rS z7JHB^a>sk8@&%WeRc3{i?rc*B>VZh`O>}fd2RppIsy7!7k<|O5ZjWHhQ5n z-?i>W{Hd4u?aUj%*kzh8(flKji##MrE>2C>}BF8Ac8Bh3Z1*v zN^DHi9fUw?^QfPT zeT6GU!kUp>R2tIkz>D<9o_+l3WaKum%kl}#AU4Hqc1IK+>wGvuqZrdSZTH)r>S)Ee zZeOzCB+4Bp&ejtK-WF4h(^)VQ1$0t5kXYJH6bv+XpHAlXdBpzWehs-ve|h2gRwd|{ z+COi@-<%(=xzTC^(XF)q^{?v@jJ<4LvMR%{R~P83aWXl3&8;!Zu0Kg^zeJod!))FR zN33uo62UUeTE7D;{hEEBogN0;#@PSh(==g-ZiU=*CY#*v4rR~ann+y zOn;4~64)~TWQIilqoxgvC1>k9YRM)ruw34QI@4aQ#zl@v)%}7!T5H;{>aaGo5}8d`=Bw2r5x7e!f=9p?&_RM(tGORaF?}Cq_kw| z;a8wVxlMEO-@D5Sef8S1%C(WG`q;Hw%(q0NpB-jB3&&)C;CnVYTyxExr^a-mzUOe# zYIU}PZVXyI2&y}6QGG1X`y%b`rI&S)TOHG$W&ZdFg+P7t=$mzZ{b8V*1G4=-|8%w$ zM=TF`tCVK1O%zTxEzekTsn@+)OY0ifLeE~I#T*6*9L+&NKBtaK(8M3@HsS228OvGiEd{rOw)8mi3N2Ltupj*L!QmPHlRhYm1$~8xZ$OF5I3v z!%&Zs8BnynEm$wGiZZ1}uYm*4VAWW#o};S2(iiiJ$1=}rdWR9*{V^?zMh$Yz8*KD{ zv0+TjLH(G)B&d@~b3cQs{TqiLkKKvNR63C5TXTR8Yle`PF z$t5o_(WMPizufR4atU~JejRl%3*dTrf6``?|`_ci^M zGdj+A^fM3nRQ`Sk!vH=G-;KSIYg49krYHB@xVQt`7(6!f<2H7 zAgvB)_3-RuFu1WO6wVt;$?IFW9Q`1`so&E3)&yKKUxqG4E??6<&TkhQg5$({II5Mu z`%|h$|J23HzIJBsJ+izPbk#7Lk6E|-9Ng$Ju4JVv-saK9qAhMK72ah%`IU&XW!>IA zAVh~;)+FnhoT(+||4eFG+#J+;C;O!KWnFlX( zAB<6SITV@F(a>pW7`u$i-I_1h`_OB8C_!54?uUc{o8lc~-Ja-f3q!m_l1{l5850HG`v4uj66$oEIoWkvhY_O z7`>;(1@)@CYdaL0cxA)igIkBONB>|CgZfvEj1jjzUrI|QxiXU>j2}_Km?ii*A;oIw zQhPZ^`fpb3Ihgj+!du1*C!3dIeSc)^a=E}ZKO7VCTbomvCRW;jUVm};n4|eFG!3`M zye8z_xMZ98-1n|eeIGOrXp2VPQdC{~qv~L0|2Tb%pVh+0ZaOLtd}QbcyL0!rm4yFQ z)JDnn>xQvo{qu&`CPo*iYm%}ae}weiqQ>4C-ZX6Nj+2e?o3lQCglW&@d1xq86PptS@`(pmd=A6*42TZeH(ynuAs|fy^`Ren3tfCmh%4Ei!Asglk z+Z5DAM^lpMgwo(U-e{zqRg7?2$lP9O_4T+iAy|d_VR!F zSB5_QzSFCJqNMA6%hA1{sQXt-ZA+pfUyYhWQCYTiBi@vqr1|dP)Pdl{6G_e(FIXVF z%V@rHHnL`-ic^d@e)Y}5Xw1$}CM&x&yUoc{sWY6jo7CJ@U+A+Yh=||1Gao$sYK-EI zC9mE6dJ6}#A2$Ky<5ZQ>TI5H%>(Z7+yGJM`YuUMpYo+!pJakzZdiGgz&k?#Z&*f3n z+9~(W^diTZ*!_71`o@umRkU^Y9yD_vP(>YaK;dbHaE6dk!Gagf_f86nwR&36=u)<9 zD3;THb8g(?TsJa=q5Po|;Jh~N-(+vgDBehqTI~eCul7d?37z-7yRAT;RT=5o7N^6H zej&oFjcs7Qml{N8&+X#8lwJQ5?yGxV=?S#0=2fz_>fCKr_X|I>#TRsIRGTwp=?#w z?ke9Ud*-9xn$;D`XV%j>CC^8orw)Hb_*A=lOu*jPIP6P#SgU($@ZY+!?dcpX>B8k- z)st&0WqM9}iHoQ8p-!s4am7E4A{7++?&3-5>IBHQ>hGbmQU7^+3jkN7zNgYq9`j!q ze)x#T)g){><@Icv9ZU6Q%!Lb{&1wG~H#phTp?a1RfA3ED17Q%72ovB!8f$>4jTc6ziGuwqdEH!d}jVFb*vRSvINwoj| z8nd63Q&0=%LEm)JD&m)O$iDc7WjAQUg6|6S^t!6;w|*x0uyK7xa-gZV59XDeouVlt zqeF&kHj3k_`eNE_B4~C={`v4_h0$(gp)zGEf;11d>*Tj?DdtK};tTCDWJb z_omvDmQU?lckyuXeB#3BX@r~}e=@lt8LF~P(`b&6x=WU*>5eE8Sqgk&zO?LGa}(!N zf46q~nIAN~|8d9B;MfRuz6)w$rQ)dB+AJSk7R&BzAU^&=$4`{H7fkEfCZ3W{TK1aQ zs_3G>m#SU3D-I%&=6Lml_Rq?f^sG@|OY8&l-`QW-ylD)c6-0o_knMhKOW%k(w> ziS+8_z)!oHZ$4@U@o;=^HsT~n_Mg0qoKHM<#sC2cd2C@jmKQO*g-m!{A!NM~o4P42 zB<$yiKd+U(b=#A2jq?2H34k#x)xhTJIl2Ba?whbbclHCjV0dZd?y@i;G4M6$clS0l zc+IwlFwWj#Lsg zbm=pRSFX*dorC`8>Oa3pok}Y*4tYfN$zE6e&?ID%<3XMr(t#_St*5QrPV|O+iWgR} zlw4?DhwAaSryUas!b;QJxMp=QfOCE9R@IMwHHoX5h1LbwscrSY^pn?m@lHM^?MVZB z%)hA>NDi<<-tMZ63bgJ4DGzESuF1hYS=2;8rpb|0@BVzcmGxdPQ4k29+pkZFJU1;= zE8f|u{LN9+9re~X=X*sq?!0)s2#0NWtZM$O*9`&OhM5W+nSPZe?p_%0URq$XyXbd< z?vswbN&OOTa*Zu}NA~#0-jMG2@N%C13a;R#0pZ50?ZM4raW|!jkkAq6U7z(t$6Wu& zpWGT_z=@Omsv|GL&HFdBfL~@77`YyJcKp^o-VZ2PFzpnJTDt4gbZMS}HYr6fm1rM% z_x>vVM3zc#tPGY8n<2$w$<2i2)-6Y=%jz9!3#%)Y(PwuP7xtvS z`SZH1tzF295=Um)C#sOQQq6M8rHV-kd5O zSGD*57@HGLNAhLY#N@-C%iuxa`OsS0>Co?cc!nflymn<0k$9h)R26lOoX8w zZ2CK=9RFIhmbv^6p_7?Q1r=FI_t(B)=-f#iy67=bdn8i}iVF3%u`iF2M))IU%Y)E@ z_z*1;LFO^g8!_~*wYOIp|+UbXoV;Z6-o;`Tu^L(%^jVe6BFzQTZLG;~1O$gc4X4@nZ6( zl=X$es{io>f-hktoJzqi`M=eD0j+Sr=T_CJGINemnno?3#>^sZhMVIin05<|qW=Cc z=cg`F`q1Cilhc;Nu{k$fdi;}|W78HkpYwFwFb3#sD2jiuri*>2KAD+- zy7Wmudf?EG7}cc(pQBAb6Zi`;vV2vC9_DHWRq6hk?{|81if7ZVd84E+C?#DrrAVe- zmmjZvb-`Si^9*XF@}k-~^NXQ?o!GQsb+jPYQp6Pcer^}?L4`+~>_^=6UEL`}Zb^1P zHe1LoGw{qTeL{8U`57iZ_|*8n@Ik9WvOI=gF46ce+Pa6?v|_0u^4Mj$+B_a{B-XXF zGbl0d-fmPX18ai*SVdEdMzz_i$C!aFa0tRCgU_scc?#XriM7_ zo2mXM**abS%lLM9@Zrvt@2BIxvhQXhasGNU+IoeQ#}XfELB#6F6`KYfl{&f)WV5ay z7DBIYiwmw8F{iaojy<70uFR~Z+7h%(gRB>bgERpat6;50-?hiJ_<}+(pXL9N7@lH; zkj95M#mc@-Ko}N8#h5+f$lZ7mE`nlhUItHH8OB{ zao|$r%dhG&x4(S=htlt>U0*#*_lP!Frx2|_GeUzL$}8hai)R^UIlR>qzb)qaY-eFB*l|Lq zsTo*uPs5?FDw5O~S@4jw^9=X! z-li;h3ufv#xc_dC^#?Vk0PiC zmNj5sT%%|V068m^y%8Ouhg>+{$Ym*Br70@mPAUt>b|vhy&kYWr^FbxejeIJCGLo)6 zUp@ar?@MafPU1p|(~exzr-`nQ@@iQ}@#yU@%u3Xo;02xeWmO1pEgzIzdQP}tQfq7@0h6H)63#_q&ob?4R~i`7#!QC7p5 zpvoG(eENdzJlmy&Ew^9XY^vF8>Dp|!jlzx|Gh}1Cfz;I%3Z+YMe0(AYIFr1OHhq_-Mz_5gX%{Q zmQ9POzup{e**a5U{3Ftf84Vh*GvKXzn0{0HwC_`ue_^hS4A-bk`u8#mU%$m9@Z%GQ zwuwnQ0NQf!M#hCK(_N&o0O)Dhp{)qlcH#JUiG?4&p3Cy*%r&D8!F+Kl;}td%rKoXv z+AbmeatP%8V!G^2!JkxJzv-XdjA*vlGG|9NQtg0#@4~{<$%Wtj-_KQXl$a-fli6C_ z`4`mQE4RnBc(T38x>4rUrQZsSe7p)cEy4;5!y^2$tC@k{)ZeHeDhW zCgu^1ak}m^75mgBFsSw?cj=nQt=LXzcueEVSj7ECAqSyPGUzJRNnd7 z!8F5nWe*VAD|#|ix0jR83PMP zRx2uKh54#Q^lowz90F~!@RN>&7w;y~jy%%^QdP zujJIz)#QUe-0X?O=KIV?Dh+gKkdsJqdn&L+MLoFNZs>EwCub2=Jex7%E=TIFv&gJN zvMb+R<+}ZgBGbuP0>V14RqEjvi=G3N~hSV9nLG|0fWMKc&=o}hqN1$W)7UqWUl z0S~+;;|Ih>wOKYTvJhaP;N2${8XM&tkuN+Z&m?n5`)XwW%oJ?8_?m4!{bg#JoOv{? zB`XY|w;>z&iyCO`9)CAygvWH^u~tYL#!E43(cy^b$quHGYpi*!g#$M>8DAo~ZlBD3 zt+)|U5qb0wv=rm+MMOGZvh{e-GD#J#;z0X7&@)oJxwb8|EEl%=c0x~>DyMmzby?+t zdEg#fX_}^+hC~Q=%U&pXyLsLQu~MZYly={%Hiw*@NQIy)Ri223Uq3sk!%el9TQFj| z*7qz|ajhDzDBRkiG&LJYv1?oE*OCzr)Egc^LPc#lxeG~b@_2{ro0BQr!O33-zjk4| zM}JqRCKVGD=dZ{6$ZMkui*{Ty7as|Lth9)QYIYxIO!^-j<#k?-N4%&2R;_E*+uqN8 z?Sp+`{(FV6D+;_=trsyy`Ocd@E_+w3*FwijhaspUrR(L$*;lhamv~JF-#W0H2sT3z z@1;RfRj^*2o%vaDF8FNKQ{66CEA!leUF$|jDk0eV_&WP&=q1czutV*awyMKFLg1FW zxRT+uw6_xdW`9^)e$y)i>4bi(slZp;of(Yx zZM=0$U=Q;QXe9SKdKCqdoI*&%b)u)jFKY&Y*dXKe(O@RS{b^Iw81@8;2iN1Z`m_>_xd+ zMr>dS0r*e$wWyb!vy@Xab7klYZTY@Go_`19C{2u* zsyj`ykX0F~^Q3|&4XSP^#g+;J!Fa3To$oGhhaErC!Cl3V6ahbnV9o~XSE1ae-O$-* z_^%d{ter=Po^PQ0wg0~B_2OM@T9xkHjIWM7`=FLOx_nL`5US?LAjU|*bH-|xZavmS z0&N|54zT8nvg(iR=V0+*fpo`5`|w2iYCa^u`4<@`0^UefhRGs2t-&5YYT$YQ70Sb)V3GSPrR<}5( z+&K9?nxv|B^q=S#9@|c+VmKrI_+z9TB|36Oj5`xBAyeCDo0!;qxo;>lkL%G`N}7cZ zK;oaXVz^1-#hP!zv7}2aE*p;+%E`ITFA2B<_O|eX}Y|NB8zk}JzSJ~xvaz1Q( z9utmnUg4beh7Mx}bPJqpmRGqVa4dGb5a6^ku?Fq}95-$O1hpG0`-eNn%)CWF5CUK~ zo}!?f%Whh~c=HtZUdt4!c6ZH`rr#rywGk%kX`Ht5#;)c=T9M!%r!{buc*R^Yq=Rqw z!t$a?<>pX7-=NKK70X)nqjDBm%Gc*{xUaS7`|+kP>|Gsv+hWUi^K%#`rdgqQyW}T2 zXhq6v2er5#TEXf~3umjUD8gVH_Xlo{UMA^z)Bn2s^@u-I@^ORt@p6m2ShhcVUk3jq znTPgn(p6W7U_LxWod1%0iz9?B^|>=FI$6EKTK2`|aQNAqb*W%l4|n8blisf0VFTW^ zFKxU2gKGdK$$65e;x17`sE7WGxN=oi@qqt5>S!#ci)>6>1e;*&#j|7l%J@N3<8d?o zxKPC@o#ZE{)cpG)ZyAd_ThfJ8@QWLFp`itoT@dt|b)^hv^;MgR5i?(9l8L?c6=|eH z$~k!P#d%lN&e5ZIleO>zTs^!HZw-%yo*!6P4*S-6k=o#9qVzC4^m6P==G$o#m-M_v z5#VDQ(WB|bc5k!YBaF-?HWsQ>!Jo)nRSkqw-L8<&?Jh%cYG*=0n6lT6-rFbqwF--O zh*9{PyP;_oYm|j1pQafkW3Kq%PeZsvZr+~nf@=S!kH8Gpkgk3%^XDtasw5kD`|B3x zpv|>y$mUMa%2>*1NKbhaP;dizXdh~T?x!67+*1dyGUZL64u1Fg9(PlD)J;ojNke@} zjc&I|+2}NgB*u=LO+F1U{p^$$YlwpQ)%F>tU;|o>fLm5k3w>f6-Ou<#FO;FJkz*FJ zO8UVUXot#eB@c|)U;!z_%a=p-~DVbgyd-%LJGlAZ4?zu@@ui9Ik9Fxf#QZw<@ zEw-@}ZQskG^?WzK9bKERR?6-MwrmL`@(c_3mVDn9&+v*v^PIOOQ!ae<_Pwv*cPqS9 zyR50^?sDC}!!U(D5CWcs6?u{8XzvU!1pp4FO(yMfbh>Xlo`#x|35}Zg;`N@B99#rP zJ--W{Rc^Rvv24V4mp}O98l>@e{PzSKDsbPEN#s!3kehbuz5LzZilMIA$&wrOe&K;H zeN_3wW?6{CJ$dra^i5l)(~uzKv9=X{m6bry#MjFMh4iPsjgGtlw<+7d=sEe}(nO4o zeqTlrRZYFU20aY;VE7gh`tE&h=#FadCs)19AK}>D&gg&_Jmjj!IE3hZOV^5wpst*w zG$(r z1o_X0-!I%9%+`|}j*P%f z_q*%xeQ6KECe7Z!fnc-C$x%;#Aqb%Q#}~kx1H6&d@HAyWhZT_Ut1h8tjrpJuoB}&V z%SoX6QXdETnCffI_&;uF5sxYQ1iRm_HUh&#D|!=<3Z*}_BCo^B_9sG+{`aA{(^#`q zsQlga``3F&=lippRAT&}n&7vlyY~S8y%qduAgD#h-CMzO)Z2q(@S$=%?dOu=zspah z1`BOG(Z-Nt$4{~kK4`KV_l@Plf0)Hqf;7chg2^6XCc5ivzw&-|GfP*{ck;kR7QNP>5b)-Sl-4w3)xJLM#`N8X zkM&go-2~Tfq8c}h?O}Zf+GZnG_ig}X)kop^wufeYI>Fzrq_`W0W>Kt^+AQH`*Vuf2 zKJn}1TXG+kKVGq$&R*0Xzjh+Xh+pq)8OVK9Lw3A+KiOY4Z>Mzj;;bS%>xHlczOsB@ zF5$AS=1oR#YmcOwWX9F%)8-Sth6!$;N)5;e8|i9>S5f7qF8L993!A!hO9xc_}6Hk zt2e?$KqPB593HqH`nzL-vyAvn=Vjgm*j3pHool6kIB09hL zhN=YG4qbKKP`%4mKhK1dq{bTwoGtx2Kj?L}z=5s8Pb0s*!C<@ZIt&_{KA>0pDyEiu z?3`EAl((fsuU)1@e}hmV4R2VcGQ69@i)(0{`cOSiV!d zp}p^=Xk{72n69w^RG{$i@2>j;3a90uo!hPiH033G^lncsVctMQh+^1eVR7An-TIPz z&ofI*E`{J=5zX3gJ*>Ctf#>rFeZ6{;ITt5JZ1RP?b$NUq{#z9l83i!CACe{4naSU^ zUE<)SAleYM=TPc7uOC|>pZKIUYAtRe*e6pdrfq}53sJL@!7p*+HwRyGlrIQoDKko#m zzd}BkgI*vQT~moDzI?c1G2HP-tz3C@uzmhUBFx!dDD8FXB6}A4A4V#vtJr3l`RjzK ziLlxH(ajSp$?zOjH~0Z=ZE)+Vz6>7}y6GLZyx?*89+mUY1I+K&|Efu^4|9LGBos9f zZx0^g3xJg+zL%zddC&TA&9I)Ymk;>#n(&}H!EoN)U2@nftbIOVKw&xH4IXt;y}UM+ z^8*#}+LwJX`wp)&a*qI@B#^nK^_jcl8Iy!>9yYEN%RwM5xe5@W1^ zy{HIWA18^gs4Tnq)Ue2nbn`(~C>TO~=&tDO*cuZygyicpggqzM#mzu;&nKg(X3`#*M@IHRRg0M1WyN^|(6 zIPTMa9K6cVQ9LI9w?oxuZeMb|vG2PsoF!-xk z79slV%_ze9!(%dA18Q;ztP|#aB_!VPcbn59-1SV$H@zF}zVfdBXe+J>N|lC-Ty0f* z0{5N?nyr|KKgeIOfiSk7g(nm=6LLLKiVXj;^^ndxXj+6-9!ut#F3&sQFb`(VS0}%g zr+~X@$}FQpwTD^zz;z?WiV_KesVbP!aMPK!&AgyKjV*w{krRYDsR<%rH!xy``zxs0 zH!9Or&gB5XO{WqclCB*%HA|89bg5NH`#AEM|EM!BW44aUl0N=qr&->maz1s4a3EJ>-zoU-9SNW6UiJ1bB z%f^kAhcf)2h$C7HOmk{jUU0GwT9y^6c6G7Hds@Eu)NVvXuTi40lycm@alSyQBG#ft z*q>hPPrcH_NkVYuV~;fEM!;73zU`j|;?y6m>vb1BOrEL_p@+|Y+ym$U_ z^Les@lqfCdbXTR!z1PvvP0$;NG}$7gWq zuJhumVvv+-gYf0{zZ>-&SN3kJI^D@bCTWa48WOQX5_i>wY{@Hi!53&GP**IiZT!0Cw3r80C z#!abMqjJGLWPpX={i)_W`K6Ty)U-&mDk0tdi@T*VCjEE29lt$x41RdJz3&sxIZh9E zrQ=`K??&8M<2)_$@zx0f&>hDtrX3v^5qV)Tzi_BE$J!lu5pB~6V^PdD?l-WQaYhW% zd8c{I%`~OijD+6Mh=$-v?#t?A4`gbhh``a}eY}QjA0-G06A%nrXkS#-WTQ=|=JcRa zn2sRdXqj`E;jaMJ1B;sk00H{gY9>`)j5xfp~$)v zF^L|*Kd=X{w1>LrKWxk>iqOC2f?f$-R~cSof58(!FeA2I(TEWftLq^$keL5IPlvQ# zu^xNRCRD7;wC(;il{pyejIkr~^5D@OHBv-gu8!rny8ZOJ1PH!0@>_C-L@U6F$F0tYniHXLiK@v%4&B|q`Vc6Ll5*Uz?LhO4Zw!o>u{DagrzZK(aI(PgO z;RlqI>L+Ka^mhK4O)73Wm>As2Y$>~p6dwN>G_85vNkMU3Oe?qo=9-(Z(k5@MqCFh; zZ472iOt(_u%ZxR2wtaM2PGWY@O+cx?iX6|>WC`Oh8pX4a-UTKkK6)Lm1$)6-c6!pYY5QQuf^qBkt`1MH>CnDpz>X+uF=y%G8UOz}FLM2-$Z~DQdG7en z;UmOR;ZuBmE&t6->8*z`w}$SiOcYgfAU+n@*fj6IQOG!q%NU8ohc6>eUj3nwms3W4 zmfp}F^0>x+Fcl0nwedFPg*o@8p%0A7Gbgp-!vgYCu`PGfqtEHM-P&~*QmFjdVH=k4 zAT;K|jp|u`o5+*b#tHjBObo={g%wB6Yu)HNS-PyfVkUPrOWa*b z1rJUfDU&I_X{fF5fU;eCBqSAnR7Bh72WM`=1sXT^4+$r_2n@NGrn)tD?fevr_MOrg zgE3E7YofAR2t^fvIZjnU?Z#rAwlfPS1c9MKUdtIT1>D(ThzJBWpp29P2C;)nm2(S1 z!^dI66&PXj#zVufKD*lq5lmI}T4Obl7DP((cA*TCL+8v-&c;!+WL zg2GjtFIh8dW{@+7!N}&BtgBm%{Mb($&F{#2= zgnp9$#r=UYjA5c>-EZN>*9$+eT+3HQ}u2I@S z*p;IS8BFL~S2CFnV`ZCRhwmk~RctA#s4_(4m($>6p}ty%tAxq5_KD*=an zQD3zZ)qI{r9Y*P83TywFu_s>adM}sc^0{AV!75K~CFbIzRC_}KW7Qg|kC84iaQpAy zIgj7HITm~Rx-jU-He{Mk&jt=Rr>UTa*IA zyT}3vX*txsM3D@kSVY%+dUh1~qgO`0-tFlF;PuxV3%co!V3{eNqpp~{c+sngoQC_$ z)T&E7F=&;RccOy&yrn?wfU+EaW@G2Ddnx_darQxQ>#vix znI@&<{ub@r>2RcJg9sZ|`4}CZAas12SAbadKJsg$aD<)Z_YjCE=fL17_y$^kC8D4A7QMWfjrf3Vap_b<#nF>0C++-_lc}9nd8?_=%xNFBTArmt@UK;S_orr z0d-Pxa|Y{}TmT!vY~L&m7wmb?45PfNT5;nASQOD=+oQnll*Q^Hl)h_$SYaKNM-<~^ z1<-U+>Kwlm{-hg15*@1dj3D$`K*XkMZF0e-tHa4lc!w4IUAZ>!P_!1( zVPM7{1*3UrxF?N!zu;SyZ)<{LW0BduOBHR&`$Dsw9D)2HKN<%#p9eKQ{@n35EyiLk zu_yA&*d+E3dH(AqnRdUe*-JU3&$<^J&p1au=({qn{6YR8U()q_;jDPaIUdQW--Ejv z+hs%d&dtTMovQ^-+>?Y|xt&M8X=wVvUB{#?(Cx5H+Fx9OZ2C+b3hVljJfl+iq;7xK zObe-$H5pc-HK6$)HD3w&x0m&cfccJ5XTq{tB-J3|FO_vhD|`B2uzHz~oS$0));g0w z8)V7c+5T=|7SWtA+;%LD5i?mF_+HE-hXg!#3CL_+l7wpz9c6K^=o11w!_p`xQ@y=y zN?6&ulcxYa~rxt8RyEPMv-%;b( z_q%wqyD!DY@`pfNyV~U+PU;f!`1X8EQPUc^k()5X;Q_b-@X3ChF^_q>g~Z<}W`In- zp~DVq|E>pqa3}XPtwD@&*t{$N(#zHT!>bpWZ+r*7^SwRZ&gQnLkA&Ten|l9LErz(e zj|e9BU1FvFicMe43`}_b$yLI&tBjvxTBMIFy*K(U&zx;&%VbmN{pJ8ra(fywFnJcg zgNz%$l{M$$w)<3pe**?(ecq9mJ zO=6g>#6KcF5P0avGP3{h1BW{mXur!}nKenNBat~TNM=J8-UhfZVvj1_k!Yzb_Jm^4#m z4{gE{D%mM7i$hCr9C1-r6kUXFg})X2^9=g8{+2g%XLGS?$F^R06|;-3p;aq$;39wV z+@v^u<#)*{fHS!(`E11)&S~}e-|Hdd-fKZT!nxphDr}6SP|W(&EdR&L%}YO*|8WP5_gt3UI+KkAmm0X5KCKHaCP2?PvQIDl-HZHlPpsB~Z>syF zPk(AW@cJOT?%X*y@4AMxg=36hn(@!aO<>t!;r^{RgpbatQUkq%P*thDDd?~2rgvYG zd4fI0atsYK_WjZfa(Z^pO7fy?rXJrh%YAIl^m_^n&m6{@ZHP?8Qw*!8Z5}=^Z%dRB zr5v^i%HQTu0vqYn>p63PR+V0R2Yz?gisID2siC9mM@^MusY^7Qihc1+cey8F#bw^! z$-V>qt=EHaI(N7ttU13GNb~?%>T;kG35IF2!{|^DMpeeq)v6nU|8QSSv3WZ>?qSH!}Gj z(deg2#qX~=COqDf4ZI=CINYVrbLP$DmZu)ws=t5k9jqa0K3YsFqdqAS zupz6N@Tom0rp7q!y>WD0!~S?ZsiEyCBWDVSGW>yK(!nF89dEiqDnsO1;`+}Ui9#zR~)H}KUXqFKNi#uwA{9g18cZ9 zAJiqshE9rL8T%7X3LDFp#`}N#LW{o?ZQZ{u6Sr-Jd$e8|rw{dP=M-YsTGGe6JP932 zpR$p8pP=aTcjJ$n=rnEKo^f9sU0gV0>%CYtb0Z$k<*Rpp<@+l$k=pYkeoNr~GL@|% zCM9Y&{OKotHPvU{hNE8_wLE9wG3h~_3g22E89l?dTa)eKXC~@cWJ#C!6yDn#VFnlP(f+Vp|* zId8k7yp&l;&bvO*ihF^tYoEUx4Ld?yBE55^Yuio-RTEAH_uhpLt0!XwkzDsF5ctzUFgOAPC{%1tFjJ$@mpZ@ zkU#l!UeMyRm%9WU`XP5b@HuFp&J9!d4x86)G`MA@br>f0E^UHZw=3%7yHJ5PO|LEo zNyXgQuPWf-bKur@o?3J2=Q$tmJ~acY@;UdCSS#Beo3Z!h=iulE%1t)9o%~30RL#R! z{)qx$Vwgnaw7J-X=$k<&?g{)Iy9Zaeks}*LnZo4Rxf;;op4pmhpW~Hx(klbY`IZAb z(VU536_{a0??RY!l=9a>RzQ5GCWzU)4irHR9e%nODaJ}zgbU|(1UV*Lsp4t)b4#A5 zYd+44cdH7tLH>+-lhU2~Q~) z7{>NlhdM^3h-KA(l1XXP?c9x=Q1N+(LUSQ?OUmb@gKI_jfsCQf%=63I_>o8}&`OW) zXC`~x|0?Necc1ZXyO<~8{M=VJd%noS%6zS5ZJobZsSV}lfV|49(U#g0!r_xmti+yl zJ;mqJPGj{yo1WXQ^w{&geK#+(Alo%Tq$|Te zZ7Qxa5p6JFEHOQ)`j3aaiNFBnI4_8%rHF%M&Zm}=qK|z%B-K<8SG49$@gebICnnJa zKBZ-e#}6$m%oZG0fXYbZ!kpbklf z(y&RXh^*m7+4611ZHcW(@^0N-%|kgZ!JImUJL)LfIy^#LsYiMMI<9>-oD?mVVGEn4~jiQ7y=FkCvgf?~y$D#Z)uyZ9X#FwTb;P5^# zM8?30Fh@`%zCM_EJK)ILAg^hpy+g1BJoS2deJqw52=-k4?RI)EO(S7EiQ&gM(m{FE zduDB>>d4a^ahxaDNfl6`4iJ_F+4FJxDNt}{Ol>O%Gqc-|h-3 zBe}2Mq0)b5fD@qx3Ep%^L%N2cAeZGf0DddF+-E(u%T@+Rex03iLcuwgNzS2jx#C~h z{|vvTAO5(InArr*rO$ z3Vgmx6(ya$bmr`zsa4nWHD_2KYrY?;Y*TqavghsyG6vMW1h!No^hZ&8?1ACRvS zz?U`Z<3N-3_v(1{;c5mUG&oOJXg!eOH&q934&!Xnz)b)SX6WNDoJ3xdez|}2z)M4_ z)ek!cLwFY0_7V;?s}lDgA9D%|!|Z&ZV|pvMb>}?GNQ6aOk_-gvd8 z%bM?Qfs>b|W5mJz2liQ07N7U-=G?>2*m$x#MmA&rTm}o?lO28e9%8NkP4o_4TH>qO zRsQSW58Szt*+*v+L0ut{Igz7~vkIUH-NhLXzFAb|}Qd1?ggT6!)y`%5z}?G{-pbFWbF; zy*v1@3t0HOHq|*4Ci42!Vh<#*06(V|`L1J;pZS7LY(qLm_FFurcGs!l<=awq%0`__ zAR_}S>oDQgYH9gR70LQ0t8gS0jwr4|k#tG2u(`TO5qF``>yilivk7N4_s3>$$Wz}J zjaGRjsoKJJ`rmav5HK4uV7JVIPLErxZaujDD)Yr^@qWF7m&_T%?vCx~3q1%`he#8y z=2)+D91>g%azvo@NUN;$N)bS_GWT}bujI*|hi41z(=ah!Pz4^JcFE{Hu`O3Ka%nq7 zeRVo5aW%6lJu)S#7#3vcgx$JxA2@!RW%axmJ)h4)lKEA@@nly?u4sD>>Hu`#`;c@L zl+gy6cAa-zwQHoD&oaExID% zMq_kp%xj4yNPi;wJ~wb>0_(gt*AaZ(KxVW?hZ5x9+>aZ9^xO$fa2y~P@x!dA1e?W2 zZFDPD%tfg5iVy{QW)CD{%)8K$$8TS1I?@{$q|g{4#!i+^P~@REIL}xI|L-H|YlcnK z!T18a35E`Iq`Eh0B^sQFw;=@vZV|GG!3zqD4I0?G(y71%e?CCLuD<8}U}T^iY|voO z{+KBHx5~#3(*GWH2$_*|O!g|CBz(vY$}Pn2)rWbnw>&0=ejHyZ+=d*Wl2FclKnT$> z?Slbmq`g}_i>4)3=8%NwP#i*AH(m}C*d4b4mjN{+#iu$4$Hr}7R@y-m{osf`aArY$ zJhX|_tc->6t*h>Ldo<4(WFgL@W>`QhlNaEhwEmDlE{q0^3V~oFnjdl>i(kHO26nm- zU9bC3XbQg}d1TZT7hA3wqJZDsd`<(-sjtjCo?(W2>3|@a*omZC`-&g(DsF-rVI*CH z4M4(pr`@s6-nRn-va1s;4ESgIy-3FkSQEGp?9oV;-4p98o*(| z8P)!wNE4FYKjV~Tl8UcTuIFjZu0=wl=QgIvEW7``3$1=t4XaoSVJd2V0rWJXIwI=Z z-$bhF7Q4-}NU14oncF)zCT$$?2=zXsZ`w?q6!TIIPy@2Fo-Qgc4C9?LyxA=4)khnniaCwCh(xHcYtb?~6}j9qLnko- z5h;k^c$mhI2_XQ47@kWmoys*=UrZ9@f?eJ@cU5k!#Hw*F=pd_*{R89_l#&P0;csJF z9Kbg)>Q2(fEhn$`}HVwtY};I>nF^4Cs zH4?uLWT?zYPO_@o?#wp?o);*@0rsv|=80s+{8`a!t=Fjj!byz-;VBNa_>yF-uP@;N zuK6aVd^S<2RX-UyhjM1@-5s!AX}W?za6$93DJ2np>78hw(Fkibc8}&g(ll%rLS}l= zB=At(rrYI!s04kHO|Ya}PjuAGTp0at1+~Q!GAjf&_MxUmkHE5V)+Z%exfA)2o25p99B0K}L3KZK%a5h4w z3pE3(uyH4iasjex)r4nR;8C*^xJf_uG(iQ{l4z9tT3&^%>U{HO6M&Q{Z z#S~cDlscs-g5W+?)mi^z+KGg>&G5>-z=2%Q)jr7}Pu!$Dp39Hs9_(Z;&-=XW;Ca84 z*zM5G@E!n^s8iP-QeK}>T15#a-S8%Ol?5_nA%->rhc-Hxx67m5qtwNKh&~Tc0{WVE z!zPgtR0|N^(JdskhcJ1F7(Q6!fvb+{UwQdOsULPi2SEf4*2m+~rGcY$)TKPlaA5_! z-5a#ecxS>&z>JI#8RI%IPAU}VnY*sShEet`*b1SF1qrDpBiE zgS65`D7@lrP%ZPuec}SptXn*F5O;l~F#{xQAKHY-yY;8yprmDGaguA8ER7;3{1;ZP zzP;W`hL!Ec2D5uF8rZ9G+o6}lOpz!>E)Y#e4TMwU_GtEJ%Ax4vv>8chU&3C8&>ZSm zQO&=mVqckia^dgjgMBgOD}&;iP9taPbq5HxlvUS`od)#CdneL!2u#GgRQq|!jKlLX zK-9o;R?5vLaCkT*Z*P+^fe2>DhL3Lxu1KyiueRn>m2YwAGF>X-V%rMl7CW}N7~ihb*#ewc=sHW`9h&xTYIZa@RcPc4}}F(5whh>J|GpjDN!Fr*4$ zqJ1DpW5D+7acd@xQ0oWrdGjHosc}C>sq-FFm;nqcu%4>q4ikF}He@2%`RR!eUSviQ)g3wedYFh$-5%Xy&4Zj2f-!;(1LJoTP|h z9pkF4B)9rqHo(TNHe$Srge=>QUyb$~sVfoTCyic*X4#DK+;z6r1DJb`Jj`BG&m`Kv7p>8 zzL;QS7)g?ne1y2Nt|0`ZHJ5K$O=tK5n&7kHa*UG6xr^03?(7N-?$JS$PHRLgn!P_g zu}}N=3U&%@bY#tMS+GeUQn{46V<0XlZLaeo`o;G&75A`n*d6;GZZ}L^rYi`dgu7R< zr-^>QH~|;fttQSLX2k^|s8v(6>VT#K+V5$DMyz!Nfnh%-+(B})3;k0%rOl;OHQv$# z2rV^ZDxn%eOT{sGz|24f4kZuVU5jPpGkhXVJRb&!m#Q+bhgLK(L`IuNB7VZx?!Z8m zu}SEFbk;#kA%Lks-sqY>2f`>drF74+P-l)*(q=o#&_2kCCVy($u=MRy!lL8e!A28L z&RwYjVent)4mbVedSZ1RigajnR_t3$%0;Y`x?ec;==y+2&{Q4aaOPC6lSucRn@k?* zAHyQ+@FNe$Vy3p$RLz#NS$K*=ps1XwY)2cv$rP&+7VZEtBlia7N z&6H_RJ>Q6hz~fYcVNU5hKXfO;2lReaF{NdNE{66!kn+jJbtqObTUhDiQab#=>0QgB zK!R_1@!_BsNgGo?HTqI7D8SDNy$j_+wt(mdzW^uvGn5is=~DEDVC;BnXxStPn`f3F zS}50V%QuiNOgz8Q@1YT^+|Qfug+{6~`YhlWjeeT>v&fA43KH^wvZP%EAM_C`HEV^R z_i1rW!1lc##)vdmsH7R^}P|J0Y#s(l0W@CyPR;^&F{Db!X z_un-P(0*20Arl0aA0R1T!Y>VC6;daEs*@Lja0 zyC&2+!L6|^Y5m}FhDTz~3Z;zT>|ne&yXyI)Qr6=b)}nG*5Yk=8jA<}3UJ)hdntUE2x7Bv*6`Q8AVkmg4#u zwPkaBeOh=1%kj5P%59(ZspnjYzV3>0kRM&NS@Q?wSHE*37;UE(O?sZdO1Y>P#4K$z zhw8d54+N1e=mENdY9j=nUhNCW3YF&2RBRrgNtNGx>qLS(jh;{K*w5%;?rT`a${rc-d_P<_f&BG(d!u%h@3v|5`*0u#MS)*O>8>N)r#%VGbS7GHFEG zI<58^Mvg9O*Z0EX^d3w6F&^o+SRO)VhUu-;Lc&F6C>J^>NPVu22bJqU!Sn3R7_4*f z3uYAvj^kkz1;Hjpv3-T#Vjksv*5*}LWj~2UYg|&0*9@9q*7p}Pk3Ggu?Kk^lYNnOE z$HONJ$Je}8NWj`1a2?7qbk5v|H2XT3H1tmHH;t*xd!7KxI!>hzS7`3@`$nX2`3l*= z#?6>hz??)U^6HE$74EYKubj$aPGQj+{VD*FNN#U@!Cg>kUqb9iiJ2NEI z7)L@I`30g;6Ja_CeVDCqN?4BW2fD85zV&c^%exY!i?S>M*Xt5wwv?|CinMpVBc8TM z@fv=SCFCEc{BgGLZnI|~_?6n$R^&(H+S?vku3iQ?WtK-gYOagNl&eqI!}Y=h)IXFI zsqi!;E{r2xE>cvw?@bxWKzryHGND~xc=^DaxKBei;?SO|jTb%SN6u2Yfuo(g;+U$OsxkZCZZwm;tjuYpUk$rNu+C|@C}A`{VZ}1e>Ub2E%1KFp}w!V&w!H( zRr^jsPyMe}l+DKO?ZFig>Xif#m|1G(_CT1k(!e7)31e?Qz&t_emO z>qfG{kVE|IT~43e1Y0CoZnZ)X@RxVtzFN3C=wm#&7-9G#7ils+U~yE@K+~!*AeRO( z3uh-q`3I=U>55eOH>Xm9#~}oI1w=uwP$KKbKos$o3PwRmtp z^6Y?pv4&(x&qHkpU4Byd^s@#_npSdl4^0k*cQ0-vW*z#wXGMk?v)3sRAxG9~ku3o< z(k9#bo2h~Dd#zc*99WKoQ)Q^0N$qa!4voUc1CwP?GWt$x1E(fpRXo#LDC;D@)!1M} zi^qY-h!Mgu)BsF@idGy3Z(hGyKPSo9O7B58$^kclbpCLz=B@gZX*Y&-$lii=SV#Z^C|Ja?>~SIY_%F=>Gt-Kuo_dua2V^y7$XLJ;I3A%EYzZ zX(f5sGDz0IFsDXw|K>3Z^zw3{#5vZ~4Tl2bfAR$~Qosx!FJVd=3smPC2unpk=rlpusH}KhS%Q+?KZjJYk1>)Wque9gi>L?Q5jVE2?Xb%?s>%G7uj<5X zwVkWVik~OWCU7##T?LJNwr$04*&Wx*L-5po2(IVe)ixjU{Cs>OG-emi&yw#&j?X76velTXJO7A#gsV1>LmwM$_9Y$Ej&x1=qjw%p-~I8^>D~9wr>B}wa_m)i zVqX(PQh$0g?iw(S&3*9ZP2Gb%;?8RNd?T7T7O^O1(u7FG`7ohFItXFJ3{3Tp2|=HAp8X9l6t4;)4n*}|hgZs6riqNUckY-iqsks@T0 zBt9MOZZ;{x+Fo!ZdJauXW<&zciH;<`*Oh5$C&@ner&~?2R}>Yvu8zp7-J4g?2fav%`qcMyq7I~%Sh(t9 zn`QM`caA>R?HXHcj!&86%kk7lAoH$qms^$pvUmIaSF!szDi-~!_^ZU!F|uyi*Fk&B zSI4hn(D7Zh59V{6W$oKMaW;5V@IG55Hg;sQ??uVysrk?vJopvg_pNVN?2FKQ{=R)^ z4&wGLm3vEvqx$x79_p_<{q7v}qaFBSLR;D`nd|F0T}!ol8|Q)SXnLk+lYj3|e>(la zzkW7-%$x6-_*Y{`Zr+eN4sddD#q-cI^S`~xH3?Il(UuKAI+HG-9J|R$tq~VC@S0Qu z)DkiBpSJ`3x$bJy^&?GmiDNxEKAK*B`R4ScFW#TN@R_se>|~lgd;8<*-pM0DnFwnl z&4iU~niMi0jK{CZq}luXA3mA>&7VA;zWvtu^zjEwxCNjVrOG}jq1wmCJeDE5YKcj| zY~*Lcp0%W8!W@%(!I_Y=+)#(&LL+Jx@6k=I$wOT-t52-iCnoc{)lb^6%Nm@?)3}5M z7O?>1LMAT+B9K@W8XS=eja*0S|FiU|Jo5k zDf6NN{#H;#M_WkOpEB!-EZ}2d%$RJbVOlYSTgHSDm*bN)laqY>XE3yJZ%D@T_iD!C zKER#hh&gjbU~osu$S)iFmIT{1dzBoKD|*$Kfo5UN5X7m&(fh)qAn4><{PZEnFh`w0 zU1QQj5LR5VI!Q%%Sb%0ZkwK z#kQ38=YFvEts}i%v4V5k(y`fx92v&R=Kv1G9HGGHveelOvsk!h(@NRr2i#*^C%&qW z+SvM~kFkHtZhw`vrbFYvu{hSU@><7N@ij-akMQ>6t8w$h*+JKlyNa&Kwzx-_k!>q} zm)$&;yRf~F7?!?&``X0p%6EvbVn+Gwwu(R~B6*JODcyk%ww{0KE5#I=RsIB&w zF7B(P;&(=nnV$A6-oE{Pf-F!IPJ#pZn}> zP2}%RpSycwdgOX9en{`u2|>O~3m)kEZW@^TzZv9u+x>+IZ|FYG6#_ zM5Aa;e8`!#wF<*W5mA;f*+1%{xTRn4h*Ne?j+57SUWA2)nXuww;=GzTCLgclioQgj zW<$%BIpI!zM6_T%Fl)7`tr(;IKznZESpyVL#quT3w%_HerY>J#A~ zD~JmQIz(-Z1AUARs?Czb&$NBs)h84met0oG{P^kg3`m2;I7HoIpdP3gABEC}4^#dz zgm5mnXr?Vm@yw4G zWN&e@vyk^#7j(xb`XnUy6s-%gxEM)@UPoh@PV$g1riRL)&q9ROV2R_g_Wh3^DIfcB z7Au*1rt9Om_!IyDKmbWZK~$qkwtHBk;M^am>u7$IsJ(}xM;_0XA6tyE#)TZ=W@E|E zwvV>5WLUFjY;MU&8qoZ70*JxVqi*9pcEn^{BxrdN1s)ifZg3pYzw}{YjU(2?hbBjQ zzw!NubFHz5u1SPlmKg8Yr^(+qW1Mp4Z9iLrh4IaPF13*(d#L_^(M!Hhc<%|H&W3U$ z^U2tjh?H)B%4NLR=I{bHh8Pywm9N?ZP->!DFAGofGw&+Mnfr`UBe^~fP#0t8yo>?; zHoq8}i~W!##xrb7OgNt7-}YxV(() zBU=fX)oV|joxnxUj7`CA3?}p*JF*$uXl7xq5r;w5x5aJ#iruoAcNdp2mafyBwj=wT z)i=`3MVxD}4ScT~H{!C*Jbk?VuCa3Cv)#B}_IUe8HDTMb6IZ$R)hjUn4_?ge{>=QG zcy)aH@WJ`?dw;?N^;S%3IJEi1?-=HGoH*7?+rPtHeby}%r97N;upn8AH`-XJheIy? z3pBT6%c6?YE*mGxj~-l1U;B&qrn6he)6r{Z(+@tnJKetjbUL~5G!sCmh`bmHE z?Ai2>|Cf(7X{N8|)A5le&2WnAf-nBHywnhtsy#1p(r&GIG-dWERxA{xWf||qpY|*% zrbeCsFbeulJm;>~h?*ANaXw9}Uiu^yujdM!w<(e0iW;I*;#OT@Qw$S%?mROnKl}m~h(`2y(X?b<$5x#<#^P#(9v;3+~>#HNEocOVb;#-J4Es z>H7jVVg4|3m3%kUujso*nY#HjhmX>!@mOOO7c*-4aUJ&^ zg!_8L#dVUzwo;0S$3(oZt7GCGcZw4q@ybOl;BrH^aaQ3>j^POI_>_dU3~k)mZH`s+ zv9LFFd-;e}?_Kto8{Fv^?kHyWilHKnr|S$uv87!BQ3U&kjvKMxvu_!V*L5~q?gTKA zAdRGROr)lfRiUkG2*w!>LDEzb@+wk&kNw=BncG+=4FE4~AxAK%Eo>B#b&xB^1*rjL ze8I$0`I@PS+K;)e=C8hmFZVn4F*>QOuI*C@2da6z?VJa@ZG>63ADc1gUTm|p-O~2T z)BD_ZPRH1e#OB2~OWQ5`c*pj3@FQF0u|BHbzFs5{V`=xcL@ro!)_QC+_ByiWCBEmc{+bVaZTGn=PxVzk>y@Xsxm~64wwd2{ zOWToPvD??TM_AkX7~7Hd^|rn?W3fATA47ZX%L8uaSl*;HuFtd8<`BlT^!NYhW8En{ zj*~4nTqY(~^NFbC;6PiFkY8ns!TWkT!K8(8rS{p-?D)+jK%C$jG225&RDL}pz%AJ0 zS*V23bI#n6)4}+mCVp>y>!a!AyJtEiUrg^lyfeLd_rsP?7X(b|bmy47fAX&$Pv86Y z?RXSIr(Rijd?EW14>&|UjEt6;4ASpxU-;xNFDLia!o)mg3Vz?VJ_HCX_Kb+TaS_gB z3?o=-Sagv^-mF3Sh4&;b#v|e}3dsoxg&v9{|G^+XymgFaoi1+a9-1ad?J<dVvjzkN0xpFW%J+>2ZBS#XYqs5Chdzw7_*cRrrp`gc!-Kaq_+A;&RlS>=Q# zFMy(Uja~92J(aVhkLtzX!LnokRCSJn5cCUyRZ83;o$FM*wpDNlgUksdXz-aIJ?bE1 z@JWVI_N+oJ#+D^xTn@+>2hBMG+a}l#-hO}j!S~(tmn3?~-a<Rw8jcS)Ro*N2Z%?29?5oq4KYwR>?e>l7)!UDz zm(O$oC)D{39m{kKo5v$qqKf%t?sxD~(+z!U@?`qpy(jwQWSX8ld7>!o?-amrMDgqm z)Rpz5OiJEAT*w(GMe*#fxs5o^(Sz70n=SWQ#naC=e7WB-hKg@vK`aZUP=Q?0@P$qq za!liR5(7nkaF*z7drYcHgUBZ1>)=@z7t@)6u)2Vcx|`AZ+}F;&igiOAH*e~As~Z=* zH|bk0jAy>b;B_wROSth_ziY6+%0AXUbN6-QuYA>S`&O*&&S?o}&qKS~z<9CU`LI{p zD7IoN57?2GkC5sjhv-Jt{s17n^5z{i0;CPq40>F9r?yLvy;;}<`9_v7jK zjoZ`PA0JPz+&-R8HL>Pn+lbT&{3D$Z|M*`#(aGaX2W~x*pyyH4%P5$X#D4B3p3k4D1|vR~4tiGXUbq12?*6J}Kd^m;m7CKaLED zkd{AAM4b7^-^tgC}Fh9B5!oIN%|T! z17Q-!wt`T0T*;>VNs$@Q<Uw@=WG;U9~b?2H)=lYc6h`XU$ygktKaV8MCopP)JWzFn=Y>(Xb8QZo*@Yo() zWm_wt^*zS`zu)H>qb6_npcl!Q!9|ZSBTx2if%*BKkYE%NP*>A>ZUSwk1nKv+U`V$!T9b#MbJTXqo zQV*15k1>uia*u=8`;6;n{3BnKgFhEzk@_Dc8+UegI=!k-MP7UDrRn}F_f!XyW`0kJ zN3^_{snub#WbjD3hN!doBX2h*(Hq$qoAbOfY~!?W5pG^{nAftUm~n28>aeZy_B@Wa zbY8a|Te-0HZR5B2D{sYMXYH4ft&gkiJ{}+Ih^Jx?!BqUEiF0*Y-&NQ?%vS8aj+l3! z9Py8A@NC(4`KYsx>)Wmo`@VkJKjN^>(!9MK<>_s^IP4qMV&B%7RDZ3@cFXG9(htGz z!f3cx<2`uaMe5xg-ewom+q%Q@@dJ*!vc;rC;k=j2J`l%Uy~IIVC%`!;j)a3riW5TQC z^RS0EU(AvLRER~ak+Y2(`)RW;9MKzhH)4V#kl+bXW6OY00CCnSTV55NCA5jU&KBmlx*W)MW(?9vY-k%k||n2~vKBGd>?GIdY;eySU?hU@B^FyzKXwwWp~cp~Q=ZF{QlJ9r3NtzJLJ29c{mV z<}ndY#{Y-AH+z;OOV0bE?ow;-Jw00wXMn*nLo753B4{EdJ@Zljrylsg2kAkk2hxl* z6GWqtpdbJm0<&>2Gd;a`?RV4nb3b2%$GImm^Hx<40G_H$Kkokd=YCE^MxKlt8QFLn zk4SW7Hy&8@B){@S9*!CD*O>6Mc?YS+9I#E>9Qzt(7SEh{Pt>~bE9OJjrSRJM;g3I# zFE;<)_uid;t~1R?NB5>T?>y2Uf7rhK)j{+gY-WsSW|U*b+4jYb>a?MIFAxk68r0bZR_PIuE$uf z6JT4F`~1fCIeXo>Dm_{IY}N06U)R2T`~2ob?!xZ!RSx&TkH%@g<%qW*qp^p|t@tfF zytUS)&kKMfW<2x0syRBeipqIw`spvvbg<;W&KW)Ul86PRDN}0RNRc2&&`JZfwi#ML zb9aS?tL2*+WLp-sjoJlW%LMJM>9bFsPS2j}G7P=hfA-{9Z=k%rX>2;9!qq1~`hm`g zawgJAu1#9j0b9;&_WbOy&`n}F0x<^JW1q_)qvpBwoUKdAOf6dVwk=R5txQF5R^1E2 z$gK)8tZ|0Oe&Ax>FZE~Co>8L!6|2!W?<^T<+`k(=r)4nR;5lz z-A-bV;C)Tx3RvXGc;a$<@kPNa&FM$eJ72j!z4^v{{bfYAFS+8wT>Z0nD}tG#s|DX* z=(3|9eQ=QrIlIY)TMsw-%ZSvo5?y9~Y0G%hR(%U98FL=;I5}^5o63KK=B$_W!)xC`QzBy56W+cp6J`vLz?|Hs6t#c}zdG@S)@Rv-9ak zKl~f@FQ)gu`A&Wi*82cwo`aTxrFkBL(Il`lhY6fzX7IE2j^=kP$7V`R8ftDusn=tP z!B+ENbF*(tw|yjQuHIjHYi{~9`$rtcnA2G6$Ps7t)87|Y@s+RI_w{S7&R1>3n;W_8 zwfmS4@b`%h2)PES8=&^D0cF_dAH^JMgWC_s^t$a;_89cqdVlrZq+fY!zj7S1J+@*i z&!|6&8`(y&*m|sMwH@6ybKtW-ZS6qA!H0v6WH;Ro4cYTmCtoW2_;Fshs>Dz&F{PHw zsAt*8nzDf{!e=E=V_bs-CoyweSQY|$v#*#Vrs~;eoR1%KImY>Pex~a~AL*0D=)e)gkfjD;b)Ok&KnJ%f6}WjotSXHMES*cG)zfI=fm(i86BCS!lss&yp&^=*ixCIx0SUs_Vl)=7ZFwuJzQSJJUbMw@{%+4NQleaORa=&=Up<q)r&{k zu&2eID)U%^rQwnw4TUF2WChRcRnFLs_=qAG&f4ZFCN5(EvxsgfLe4N^m1m_#K*L8+ zKmf*eB*cw%7$1iJ`iCD+UpzXWbOp!s@sm55Z$8tTHLj1N`eL#0fAiCax;B}=oPfzH z>X>p29*d+<;ZI7J5pq{t8^&tB7H&9~2iVMHdg`hV=0iGym2y0$G z#u;|@$)Ei2gX#0n9%~*sp6N2Sd12qS9|UUs_|m%TIp@s?epu^7kGQ<9>ypFH99^?GIy~fhFzQt>v zD&Zti#e^5QS9xrP>CeU=Ry(W+r? z0vt*=&A*M3lx~o|9K8ThqG9fEa;rc2)I@% zDD9oCE+&Y0acM!uHHg3Z)nk3|&=)SXh<@}iIlS$N1^3a1n#^ud=dpkxX&r~-L(1Fl z_Yo2cgY3=EvHjpu9nmMh=OFv$CMIiW^f^O3%=#$_bnTxabQmo<=!ZG?s6 zI|-J!j7h9{K<;Ye*FlR|O~^{QMmh5c=Gt?g$uUJeL4jWPA>F}6mlQFwAOGY7UF*&5 zNaep`@eEcYloNh*q#qRFyi3lKuT>e_yiJ`K?;+c3sKI7o&e?FoPK>q0^m=iv#!~HD z{W|tt7|#_L&yDewqmH%OjUCAvV}Eb6ZfP8{pATb@yB@^mQIDQl1*c_gGqJJ8?2<0d z+`F*&>O>psyp_W?$5gCyVyiL6?6Oyk`<2f&%UXwRU$*SW^r~yv#kb4HxO+cyw`=}9 zSACrABToA`m}OfQjJ6=*>~q^@yYq5gF29=Ri!>Bns7R!7#LFf7cG_com#mTWRou1u z=6D9@$82<`ug&(Xl1g=x+qkvb*}3k|tm{P2bR9Ee=HPvfjXTsuK~aeamV9BB30TU4)HQ4_dk z)x2>Rl_R~0KGs>dGo7(N)BHVsl7~v~FNxRvn)LysoiK`I$-;BZVtuhOOoeaSq$*q2Gt#=`iQw!H4=ag_b0)>`{f%gh&p4azmQOfeBi zy1a&gEhwi-3)^GaCJVB&HIBJnfP>b`i)_ii0^6+6#uF)-VT+o_D8v^t&b;mD}Kvoyt&M8-Eqd*e~3KV6*TfWStYlv z$B*o`)p+{0o5Rw$s@Hyv?XjL9r2EELnhV+Yts84;?3Tac+Vbvef*2$GY7G|avFDC=0P)V@-L2;YvPJG67d98JJ4o|Jnp)$MB1bBP#j0W zhHV8buyIXmbZ*OJ^awN`kz>)_ODQjT)z-LXpf)+4KD&r7{9NeL46XfQd^$<0Mf0Wp zT%M6J?~TiRNGfN9%LsD3L@hRu`x`M7M4IfU9+BSmQ3GB-2nqC@5Phj4qLV_;S#P8X zj=;6>Oe`e|urcG&oX6>82@V#_oOycw{6Y_(W5Bn}z~x2zGFn97vCmSD*+xI}XDO}U zYzfP_@H5(gRIbL61UV*5!Y+HwvfZK{VWG)vsz!P+CV52bV3HD?fvdU|w?Jm>urtTo zn}NxzJ2^jm8h47g)IOLWMvYS-wUJ0?`+oi*Uue@csLeLce=V6f)8_4pxKY7t#!rgB*=FVEE@o4nHuqe$PM0;9t{+A;RGGBdqO7PW_KB&uQ`Z zna(&p+ktYF_KR{)91sy6!0X<0elx6gTOx8BQ&caB{#_F0+_-4EL;w{6Hi);{AcvC|*v zz1`R<={wh!?C~`hzS?@9^T6R0W=r0nlky>!_Yn*;#V}Z>>ac=bd2wV6Tzj295O_`Ud#&!3f-0zt( zVu{1g5=ytR(t`Qf(~Ie;7JmHdUwY)#`@wvf2HQ>!=qrc&mh?uPkzlGu zzv}i!4RWMRL~bABGcxnD&p|}}IRr> z_UxIqKY4JAD>Pjbsp*^PBB;}Bi#REnE}wHBR6o=rBPjQ4oNWYY&CrOci_V+@?YJbG z`RHWydLDx+o>YE|G4bMyfmXHjBd+M9ZZh)R&8~;j7R3aY^FtWnNIEuX>-h43x+iLn9(Ta z%@iBU(c%pY)) z#Gg(0&;Daa*0DYQ99xXT<-637i_v*j?pev2T-$6`hu2mNZK1*%#)UKE7kveeF{Y)x zb`{;GfnIAKYpRU-k?z;|9DNG2&C(NLNfEWbt0-{$ zuzj9={JS`vt76=*zP@H-oV)53ORYS%{1rRGV551=72o%bwe0=qBj1R7gzNc?aXfl` z)O^O+R&{LX=0kVu`D{n_9N3JfzizHA7d!a2@;R>h&C5>)zkZK5?L5p{#u3wjI(o90 ziY?c0xFVwHBAFxHSuFF>GhB-u6)hiuYWqk4yQd{iNY_lX`#P&}p?Aw?`f1{4x(xS} zyJ_ej?$bNI;eP}qGY&n-J69MDt(Xam_=yQMU`v(1`8hL*Kwf&N&v=Y{c|D;fXq_#g_%}cEdIHnos@s z10P6y{Vn)8w_ansnjb&q!YR13e&>kATE_Jm#w3Ad;L(uoHxr|l3ET_|lys%!8vQSH$-#v#%X+R$Hdr|4JaG&N zMQAp(F6VfpAN~lE-0+dDq)T7sbo$IoS!2|)7}e{>TS&+iYCg3w zEy45lE@Q)6C|V5n&enHczR(v_^#O#l_?b5YGtWZeeI&W4Vew%bHNDdWF1P^X)JH9I zotWFP&^BcJVra9ExPwmqDl4wmXWrglG4{C@>-*%0!!e`0_Kjr4$DC=7ckz!n4z<-> zTQT#(xdI5Js9sjgzJ5*K574}D^uAu-m*)`wKF(UJ`Kpb09@t#F`y+gfwSP+*Tlu%J z*elMl#`k(}-|`!`CCN#%bblWoeq$_ysCzQb80#G@)U}T{I{vlR>Ri;>W7DSO+Q6^8 z&VrHyPTpuKqV>eMooB?@j(Gs{G0NiFEE{Hyl}?`rIXu@jpX-o2kF(Ba5_Dd>{L^^2 z9OE%})_8gz_i`nU$dl76{g4EIe&q12rntySH~xh&ZQ%$M#%Q^noYdBoGmmR4?ivw{ zU`ng89)EGKx_JiD5i3_w*8bFZKA;7R>znm69eOZlS#BO1ap#xFa#R7Ispn3MVYB3d zyUsyWu|TgCl~Y&&+B#80v1w5j|74H-qqr(z`V!+WCINL!=E{h5jg>It*-K(iiCSuw z>{CxbP&IDalAp^7G^bjSaNl6u_}DLbJb8})e(0~}{VqQ;SoolM1G3Oy#zbR{IVn8%Ii|5Br&vXp9)DOpSTeUo< z#Vkin9)pe(XC_OzH5a0v=gB-hHs;7h5TLeToM&T;c5#kf;+OFS@w}6z=4Z0>RNmI! zhAD;Yfi0FVvp&`Qv1sPkHP;`0oM|novwy4+O)I-c)XMeybXs$bsIJ?-7nQEd@$hAO9%G&n);^^D zmep2qeSb7BJ*IN@`1Iw`CKdvDB%kYX9pL&{^lLT+eDl3q)2ZHH^0>yqtQ^favT*<% z#FDEb(cP}uRnOeuihmgoc7-Q@%Lgu-tCMany8yHp7Im;a)=zN1cC3XnuXN#=58c{T z_a){uin&^x1stWNwzd^_>B66Ch}Jh?#lV7(`HUvnYsfYyHHYUj znR6?IZ4TDbT=$IS)_t6@J2~}y@fwU%P)d3^o!%xulZ#Lxb9kwb?sYpBKRR$zJ(sB6 z^mGC9VH{*&b01&g>T4;2R}<{&(iqbfIc@LKp*B~Y_cdx6n=yXP!hfMJr=D|KFIw(5 zIbO|ezWic>S!_KGE8WQ%Fr#Zi5Z>m?Z5Dt^>=P55fRVz=yIY98Y(eY{x? zM?Uzkb5)M5ym~E6@4k6!dLw=kSa0ZhgKmfJ8ZzvYpUm5b#ugR_RiTj4cTmv{ZP{l| zIusc#CG&upHqVLre2_TNs)Ms8e0lBbZ|lr5zq%GLclq>Pp>KYhpNA%rrjDsoo6Ez@ z!{ugY4m3LRFQ8VJNx}PIpM4o3EqrrW*cvtf2F=Qzj$&src(N@SbDo! z6(HtSJj`KU-+ZC#--*m@S^pUo19tUGE&(C5yB4vT?Zv4@xvuK$V^f_??S8 zx3zNCsng)n6aIOdp+u?8Qb8H9E=*d$m5hJMwLl_#QClw})W@6ssIggcBG-|r@mwEL z`xd~0&TgyRc^Pwa^zmDI;EBmousoI^9k4ynPOpK(_m{l!k#iN`+5z7w76395aZtIX$m zh%2%>$L6#INZ$_!T6f-xHwJ0HZCg0|s|4G}SAX9(w`K3I`UqniIqBC&jUDwJi?8;L zHD~o#+#%z9sfDu(`E|%T7!ML#wSO6PIM>WyI6pCY zWU^&LI`M}4rOhs`C#rdC--F(YGtWLhICD8>m%ItK^1NtV<*4JY_80BHYQ9>>7B}CcwHJ6uhVVb0D7ScA>AghnsIGdMjmM7;$URRD|wbe2E} ziwPd*`q0Fp`R={EtRUyGEjYEqy#A&x&3MOKy$G*os@&a_*Ph zi)Y+Z{Z7&W;Ex=$UwXmNKhjTUU+DUL?yzwT9=h1~79bp_aeW$2MKuy?yfy}mZQmWs z!n<^L_kuIGhsV@RyW{W**w+{w_~v;*LG`|n`smsD!$%SNPZ*bNL>@kPbj3NQ+Q+$g z|H-Uusp!g(|CVYTYq@xqo^$mWbbP&?$dS)H3-h{Xf6v$Jea_xrS@GF#e*7a_jiYZo zSobY$L;6wUvANlAiGAcZzL#JRS~yqYnxH2i@F(FPnE){4QS|PsMHZ!P|%AQTK-}KI}7&WBQ=Ye)(!2sB_Zic&N-VSWpZ~+#(|!G!n4kIO4O?$L0_cUOGq_hebHhuJgCGAwfdCDXR>$%(#ek19RIQI> zmEM}MEwteiYfOF-O<{xvBKs5&Ha*298v5zj+p7J(?mc?v?lk?;@9L*rIen@HKTBpl z05ER;4E{&|{77e=;Y$wZLb|58Pm*e^VEXd+^BZPzDql1(VQf|1)KVnlVL zRFYwT$#<$9jzuwNLhsyTLE_nIGR6x2YWl5joW_TjTb%lnJ(}Sy53gAo?nLCX%QM__ zTeo4_h|PJouj;|;+%4yVYldrLw0L2kXk#$XD(?EW&phjx8sf24TlUGM{a44UN4g#5 zt*eXp01`~ySis-E#|J--0lYpy)dz^2qty%1w(#zL@_Yqqkl*u-e;(5Shre=pW~vPg z&78eV@A}!8B`#-x*Zg_h0Y(?(m&WU`zIPPYs=jKczZ#MT8e`Ry$I;`r^igil!N|A8 z)7yzPPw(5ZgR5~P95z2HZ>`mFwjtfGF~-@pg~LC>+TPb=>?o&mA?;tilh{~+^!p^( zM%a-J?5G`r^6l4FmoWB2?)AT?5L!Ehk{bV}TqUcmVMa!FgJm_m7dc=b>zgk$a$YRmT zHN`wQtCkDh^#LRDarC4v)yNM~#HnC-^uQhuy(+Jy^Kc7%#1NOUv7q72E*?5dsxwFT z@86m3>5`LsTG0LW*RQ5;zo%PGacGVOiVlo%W|@%7={J9yMWN2D#v3M?vR-P)%sgj5 zb<}19<9ZtghMA)=sM6dn5!d_;2R8XrW@9P7|oulcUcaNv?&8d64G~< zl9f}+64#g(Lmzr$-E*2_%i{fPIs57q%$YFmYC21!*9LQY60fJL>HY&4J+90Wdy95( z?wS4#&HX#C7J1B6;057O$0wH*1VAs=gO)0}Lj4)8&3eoKPD zwqT292*FbYsLyCJqy^6L;?ll&qGJGm33;Z=VZQn%#|uU1zq~Cn=L6&Gy4>iN&g#br z0_Q(e*19%goC)__X2YWI@;BH}^vCvPA0EQ588^47it=%s`;Bw#jQ5we51FXur|`fI4E0f342Yn{%gmRt8dzUe&Z~S_x#w7w6EIwKA2s&>LW+hE0=kC zf3MdV`qi)VO-maMXz^yR$3RwXz3%ueyZuOGDqr=x4~}M>%x#+hzjvb2 zw@w`M3g29->ZF(j<54ZL@=yvbY9eqRb8NBTrP9zux>TK3VxzU3%rR*4xAGexYvK&^ zz56G+G=mR8m(xG`n!R)bA${3zu_P|{+eN46Rx{Yv%hA}utY0s#FdRJv#+A+6Y-Oy9lAdt3S&KQ& zgx=vhuB>@$ z{FT$kh9u40-WG#IIUO;CB}d}&bpszmtaBXu%3MsH&)koLPV!_Ye&?$TmP@)~-jX~VZ@hI!muo!JUjcOe3_t2KZJPO_Eo+hcOBUZan)D*V9;wW$Ka#k?_-QN z)_O*)^V(-=OfUN!_K)IvTdxy~jaGRpuJ!{sc&f2>o7eiGvU0iqqFm%Wr2edpdB_!5 z+!{>tg3R2@L8#VJ`_x)0ZOH-@TgNedB!kimq)wIhKzi<! z`o%1*QMe|OS|e91SjeR!gHHCgh=DpL0CFi;yy=1wKl4kUR3W2;Jl2pox#pS7AYo%$ zWRxJxNRHF{7pXF5Zj*rKQ?rwkS)En&BY7C+sZa^|#kllQGk;0AdtaC7yr#w7$;I^h ze|T&9ZJm*K{m9(!lta4-bEGY&xU1=#?;lM+J-e;j|LF{WOaNHTrRP9>(By~4HDx_H zS1wcTXdbg=THEnDiV;)PB#(6CNFRAxZa{`bJ_^AW{!l_+$Q+T!NxycoRzWp*7;`@0 z>RK$kM6N=jCh4a-1Nz{>y*M+^zWVNa%(u5~#>cg!>gwZLuBVmopML-L^yfc5nLc^= zJO%|d3vY7AYnVyH=wjF~YB|5yLsd-4B`l-n(cBG`RH3xNvM;ThbW-Xc`)BU#La2t8 zI&Ce;bLMRmbixMfoFi)vR>l%7YjpZ~JPV=nBoE_=q6Cg{X{@(T@93{#`X0mmtLe=L zy8onF>=)5f-5C_xZGAww|KMo){4*IfxA5|psd)XheDgjK5ek=mw)K5sW_s6)t@Ins zb*^16Owofk>#{b`J6AB{jYLyBRHLmDsYbzClD zc^f}E&IKGe-m0_})bpF&Vx`BTDc;9*$w$78=}jscQ7w+`0v3BIH{Xl{EOW-?$lLoF z;CP*4h252~$c0F?^2Vpe8SkLr3s|MqamGlrkiLKaV)`flEKlm4Jv)wt`DL797r%Zz z?=lv|ypf7hZzL=RFT}6V*uVIJfH>nWdwdws7Y>p9_yX_NOf&cGh2UIVn1i0P`1+BS z_^=@7rGDj%4+&s{N4L^bNaTcL$-%FAC2!dEL`}h?dI=()TJr-{&If0AGEVk3EirAH ztz#|hj&D!Xn{Q9k+ix6Azwx{0(|5k3`J{f0_3~0@p5~R2QczfHY=;N0UrgWruFl;5 z>`pA6xkWJxYesOZCCiZ9E2xpZ08Y4aH^5SegWr1FQ6f%`wkxo&#TP?y2c+#4B4t}+ zqW*9-NWPxfe-$i8j)gqJ$Npt1_Tj{YS#!SN=@LVHlPyVRO)REU-8lTNuA@D@eG=RD z^|vl`R$t>5UtEUK;)6_2u7UdAe)DqrAOGOq^uPa!?ry*ars*$3Q~L>d>WQM}Z9^^E z_EDQ_XP=;-@$q9I@%oHN;7JjYO=-sb;SkM&COXEO!wP-gaIq6n2(iMFs?DPe(c%so zxYa;L*`cANgOHpGb4ZVaNduOkTLQ+s&v{gz7J52Ys_!q{(?e%T^&0r&@9Qro3U4## zj4KJnSKhljeewBoaY|uscyX_*?8d~lTWA=s_k&_y02{p;2ZY!)G=J0;9 zEXPs~BWW>pdU#H6~x<<>I&vn_CdPC#}jy)yzmD#MkY^ZL|n|l_c z`Iw!hb11HaY7&J&xkyYAaqvr7gYlTd``31x2tjF3NotHRt?MlWNm@ zj(Wij#!+!;O4u_NntAhRu}USxF$clj0L&l!tVzQUGxH<^i)b2)a^I>fX6cfj$PS2P ziwCQRF7ebGr0yN8%UAB*d!{qb8m*>)v;IfgX3Q?Y*p_fKOPKY?!EtQ{o>f; zyZY5JEk0s#&l0Xh6Gs81S!IDA>q5p*T7FpYoKrI-j7u`0tuH;8XZl5tg|!oj+qj@g zEi=l%YXN`OIg>Px<9Pmp9655|xv%^6eqHA&&G%G%Dd}+vygcr>cv4Ny zZiY-vBXSG!GZ;NNQuMg$Q zG@4oSj%jx!hTaS@=$1sr$wQioFPu}!S?`KyetaQHtNllAvftIhOv15Vn|-M_SxRx| zL}8CfycM^a5s7MTSK-r}(_!=8eAHhP(5vNX&%1xiFU$lj&X!Qk?PB3H$3yr6Z}j$7 zmci~di4Q87lfJC?zO0qH{#yXF_vtoe@Dxps4}XFOw4#f+;07>HGDxb9w3U9k~ z>ZdfwaO>e3R|NeS`iX--N%SqTQ1 zR+v_wKdY<_XbYG;1eH+5x5PrPC$M)}mOQ+pNb{2&y=)XGU-5gp`#i)5 z+)!$Hs4VxhQ)D{miK0x6&ovzrYk^nYF^L05Z5ckcA>LQ`vG$+7>1Mdb1)zC&uTFf8 z;ro!5DDm-zlCMn0ovZW4#JJB2>&15-?!2Jixo-QMBPPU00iXG?>7?$X!#ssmZ7V;M z7X$5v2P0HRL`mfWa4GzE^PHetcfq|oW1{b$WHiA-PNVn6EPn93 z)+TZbL@D4)|Gawr;n%IT3HBCRT?*E;^7T8Z$s5Uuw^cI2UPX?(e@}fiC5_q$8FP$- z7y|t^q))Ya3usV!e1TmC)6{FWtFOOZ5x0-Zo3a>vbX>29q6EvJl>L>2e4(Ak@x!Ik z;F)fbPqsWFTR7qC)#n_#{IR8qN*u4l?GpTJPd;-!>em|DV&&@eLk{9WitC9 z+Lh5sn(xHr36m+(hf1;a*Uj>|ufPs8%$Jtgn41#TKO=Mh z%|G~h>59fPetb{?^i0q@0YMG0P|{2L&tU-aZL4f&c5DRPVIt(!3(;cXLjUe@F}&~|A<2{QbfnDo=t$3jgg>xQA1D_yuJQMwYD zRAbIt{5?C+%x|*H$OiMyL757wc@b?=CJJ8`quaB!)*LnCOL{)zH8{o)FBo1?h@h+L z;Nql9T*ZL}PLY-TuUT#8DTTvMIx~z}Bk+pIyeg%b1h0b@ix?vY1u5n1yiAmgPa>M= zsX8>a8OKaoQheF)ozPVr?S4*TNvr3)YyFD3MwV@v-vW{bH%>?_?dR=fswGNcN=-QBn83W*V-=yE^I2r1T|&u3PLdBvIFpY zv)$Qq2=lU=a;|ZLKMv83fBBy!+8yj3Z6r_wpH}d{9}q~M`X!^R%|BKFwH`Np``@0a zN@Dl@LzC$ikGYbj>4JiuYYEO=oVRD@nna3UcKi5s3Uy^v$pCfgW=%hT9ohf#9U4A0 z<#y$E22V|oa^w5y#yLUOf) zH;H-&XssHhgG2a4GU#xVxj7Bpz0x_lxpVR3ZLDHg*XQ=Ybj9(|)JEZ1nPz5ZS75tP zBI8o-s~N8P2+G%6Gb0%zZjd-g;z8Q+H_$iOogrNJOlW}gO|@>rfrj-T^Pm&QccMCw z51NXbDEvw%y;kon-9;$Bt8ep3ccHSL1jzU>w$%GMq%#_~m~)%>Muc&|@wm*z=pT=k zPrg0pe$-UFc(7Kle6Rt}(2oUVR;|h32f01`Dq7M@>QEf$Dii6X-gF%h7CdSeLiD-a zZ*&*E*n_RQYPMZ~k-15Ph?zk$26I;WaU;R@sVJX0Jh)hBuyKKt$e zvj8;PfR1c1w&>7GwPLj^JG3Q0T&c^*wl@Lr$Dw3kcVN+hFy~Mj2L22&RVs%uKHD~) zFyep~d!~@P<$#v`xROt$9Z*Y>K$xXfkyuNhfI8e7h<_tih1pC0W)f=^E<$3{Bb3Qf zpGxr+Cgak$_Md7M$H5o(BBG|wz(v33OMqJv?zG-}Py|K!n1-^XO|!DhThbCyquRXOd7dQ|n2?{v!<&CJ*p6 zrcv>2kT%U#QFL0j;k)mVTjTZRg0HE!+M?~du|KWN7_Pw6kxO~$Cb5^Igmyo=L-Y4_Jl=0r4)=%R0SH5vDB zt9Lt>2*N38`fqBWDpFH^bRN8@+zUN!{-H-lOA0MJ;P0S(2^(%^D53rP`Ik@L_RC(~ zDwN@{29DFQLH{hiAS1r2o2_!+?)f-4Fk+^UTK{KbR!iv$=G^v3Zp+8D7s$R@s=K(o z)76_MBJLoe<7SH-t?u!r{az8727l5n-z|R<_JdB-PmG3w|*=<3^a)-^CzA?%FRNM-*^G2lbk0>K+j%U7xiG_4s%F{F-*PS1XLUi zdqF?>7svbYdTB9zll8jOLIX7b_WR*qVyt%h_ zb_wz2?!8HRTkOW0dYT+=xZIdzM5b%vs$-XYUY@V|Gfz*d8#2MNK=3;;r67ip2$GbZ zyQhN_w~NAx#v+XiUCmXYQq?YlI4;t#CFPK`R0_!jV(83wxV<)wEhZ8%Ou1(_aZ@tQ ziT3Kv0QJVkTQh(&Zlm=naPUiSP14s+q=-zFtUsyt`0JF)(#Cc(sH_ZnF zE=9%{#|fEzIgw- zYH&)$Ts^lPqR`zX;8pd;%r?aXs(6Nf+?s19YD$(PIeKR7CiIBU?EZUyFNw!rXS9iB zjK@mgwL&zpR(8J@t8FUe;6VW1xgNi14jHpORnqvGS1S^3eL& zf@7`LU0wc3A`BVq`u0A-$***K*AKj=Q!2=KtBxc^w^EfW0s*69{fw1nwf~)4lW}97 zbNRU4u0R9J^t!$CLr+bN{I{UwRtR!c9p|>Xu?1Xea{#tMvB(xa}3VGE`-BHGPq*H=n)7&F`H!vPZ?7No!2Y_5E_ zpOXn<2-~N5=usS`nb)e9+uls7o!-xn41SCM;RD8sxjj5V-b!o1^P7AMHZ=*UZv9sH zFfPsyR~d@_eByC#RgM7D-sK#}=H#8%WXE*;lZO9h-4;0Ev)#3J;(0)XxW~2Tf4QnJ z;Ny&Jy-&$-!tK`t!>_jK-CmWaiE8f$f&vBvMdss@FY%XWQJp@|a_A^u??}5u>&vZ9 z;n-*iWdr_sd9c$1CV86Mrqp!wZY?Xa-^OGIrs)BIf4OvUs5Z>+5vGL`JoBCV(ZHia zqpGichMEf{TkK7jyo{*M`G{gBE}(%ryZS%L*K&&X2*{9KB*&dkg&}!XVbxiTt6!Y6 zP7WHC$85K}P1|+8+p}21Gk=Cs>kH5b9WbUikUdP8TP9xYbD$M=zns1Kx4u;+2v0j` zW9~1+b*-B%NwUCMq%{zsiz}f^1LcJ)40Q~E zsH>tpw~=e?B$S+>RxhFfT3y!;%Xx z-!)p6Hs-4RxxPQUbJj>a8l8Dm)Egi8T5Ls1VXTk$TRld`wIdZRPt-#6VN53tg*Bmw zLR)fLi4bi(l!HU5^*W4ICLNqe>m-4{rf{DLym{ihw&mZbJ3t$gDm8#!ak1{TLM@Kz zo+SoqV@bz(E=*7NMFQ1KpsVk%4ym~&)RCS@;2IOM@m6)uKP&|?&a z`c!8ltwm|x?q;RXLZ4#E=+?&CHyj+?!55y4IiJ`>dg7DtzSln(N}3bZJ8jrb8F5my zNCzkfKTvOOLehbbZ$vv3V)L@*71&DyI=RXH6=Nq}+#ix=fiaTlg&Va-_&_wkz79&? zF1B_kawS+@)Y}(T8F}vWZ~lG|09gp+L?O13G+ndkXL=y@NhKlnlprNW##?hLIJCw1 z588rzAP1W(S>*Xbr7=4L{8)?uZL2aSvj+~;wx}LT@mrID#5UwlBK(Tj12a`chOpmi z8E&_Y5(;z1TX=sR#roEH>{mgy-V4G@GgJi5(I^3}n+GWf#f$Ue9m0BzLynm%Ep>1J zW~)+;)1-V};tq?pou6{+j~K{_>agY~anXVQl~mbjpZl+f&6!sLUxgoM$!=Mha$lD# z{~`jfKHY9sbP@!E&GlrYy891*6sDYlRLKofLfoE8BiGa;=X~L#C?HHoLHzjyKx3X@ z1rVwr*x(wDx{LHpqfRYypRp+nA^HwE9wv8%=-_OM--ETQ?D8rbG>PS5OisDSvnnb% zD6mI8-#m_CMs7|escs6>K%OZ+ue$7pF*b$ce9+aqg^kn#B(xhg3N3IxUp`_#!v5A> z$CQg$y8HAvA^FQj531hElcdG^X-aKLcj>YBHM!;3h|Ce4U2wmVW}UUD9u43PM-l z(R2T>BPm@DUI8uP@L5B1T=+mu1i7H0I+e`qv?C?B7umFKX=rLc?jPBBm_~2=*Xd-FOi^`o z#=H$!L*v^0i+}b*r@JKy%3fUaDO~j)8(1=lmGWowG=|w*o#Lvy-NZbYgzUDjsKLPO zizH($haL|%CYYH=k1oa5lnpj2IklGD?74c`_ajDfr4S>qvb`@}!(O0|6|&M+Uh-Ut z|BDfReK5F+0`$|`w)*O)U{UO8>!1pL9`iEoKqbz6KRy`Dr_!le(ex5}j}u3um|T3c z(H3|HjQT0*%Xpp1U6K)Sy6`o9m|-+~h(k1);*BL?AX0R7k>|Fw4xPF3t&VC1Stc7F zMMjO1=>Oz~MMIr4QRaY@Lj2aQgS4h+qI1S$TEUJ*edwOU1Qo=F%ZmlAVd27SqAgos z=3AfJC2ixs8w(E~FOd?tE;3A3BYg%BEuhzpw_^7OjbMKXNnbSEaH&x~J>^pM7SP#b za)W$zAF-I~M)oMeD!hOHmtwMKF37sepX#AEUkci*8!JhRQP8SoWvMz!s=Hz?!!Is} zR;JDvbIn_vwYc(>jV0ecwYm}5+Q_Xi<34WWShaom6;jh{FdAKidr4+Dj?l;}{iHu% zOWkT3--aGg3)nIcz#>Sl3Vuh_0?3f5MwT~y(A%Aw4DX;(FDyy`UZVR1T$ zF8-R$qJfqR!2{+-Od~ykD@u&ruB~-)gc~Ss?6B-roEdU z!M_HwP;&(!gjtNgIYycgQHoqwiO#2Q<|C@uE%<6)$5GZvgFeRBk=U2|Ra@muG2u1g z0!4hnC%?kcVPc@iZ#kC{ODFSjjCxL6ze;E}eE~J{d3jxM{_$C3;q;*bLBzFmE_;3Aw8qrE)9FUg{tDF19QJXW#?bIfJ(>CHlR9MA z)!5%aIyQQ*q|_~H9V3SZuIKsFv>v36hO?ljfwW6(%%V*tk*44UblcWj9DlD`L^y1hdkWx4d;+uW9lS_8qid zmy=Sswk=4jTMEqD6YEF77sdZ39N8>al|1$a`$eOpcO1z0ZF$NPW0|r%9y#5VY!-V4c|RxG8YrX$?P)Mm-9D~#mVIbMABujg_S5NI+BgXB)FEsx zhul9i4{q7o>72vCR1?B7Hbdqub|d!^yGHe!2g zsQi*$NnGUxFd(#887fWhosZip0?yh8tQU~625p@$5d$cQA+;k>+rKv~+sEIv<&~^S z>5%y?*au>^0iX1zL%MLRoGQBPrz~gEbsJoChpX@TlFP}}&cDLwdLtvEU0}yccK&0_ zr8_s&Aq78iMZJPur@YXX0QiJk3l(VMPm_~o8uw@M4^zey%!Q)J8}QNT6|4>hgk!4^ zae_k(Xrs;ChRI4HrL1(ON$4@z^_}AyD`t(3F&~55tM~bQDJh&OAiI{}L8Km)4U$ng zlDx~^q=vyWYhBIV6hB84BVMj#p@@TQZ%<+AwlT6rrGUdcwpb&k$+j*V+B;pQ2=Hx9 zfn67aWdeO0Hnxy>lNpHkeR|43Q##8)?;}_EJ$E%d#Im)iuaNoVZT{hL8m;UvTPM<| z8r#oy%CHIdL()y`RM>%xVu!@g#WY>5Wi*@F9ExmH<7X?Hwqwa2F!#mOyd+Ogcm(;` z5U^3onPKK>Ki{#uQs-W?pI1{X%E!J}rEsXfYR-x7QFCX^8GY6Nt4UfYBbJ*TclS&h z9B0Tct3NKg#={jY9b=wpRde+Po~?`Vs=d%yyDx<}G!v}OdG%4eB2vS{#i^+$?owEW z_d5bxm+XFDL*kVpX7Db0Erp+3PH|*p6?6uQmi6h0wX^5SA#gnYIcmfhLjAH_^@)Uc zS<-y5-71ljZ;4@xIXgU@j5U0_R>hVNDJSh`mQw_p2u))Lht=P_xS-30UzBuK{!Y*^ zgFqpxy;{uuk?8_|l$t^fkZBWQyrea``51hunPOffn5)PAUAb@zTI65tPMc(#PA4> zrHXM-{e&-1ASgww$Th5v#EmyQkLrjHxZlq>xXrv_DcYf3MDbhn$HcY~8#GHdzg4`e zc?ODv5bPl33xnJx;EHz_#8SmKL+lqNpRP_!;u~J@8(#^Z_~6>wvGKlxl)OdZY}6Q1 z%GHGD&H{CXwj!0`VpT1&;2p5ecd^>}o6@DLSmM@F5=6Scumru)&_2$TqoV6Ud*eAs zaOv>QSgHR=ID-Sy%1mQ7eYUsNb|#HHyurccRE*^fq*-jf+F=UE!OaUY3@7?Xz~xDc zk(68}`*|Kp1zj;l)a#NxoLXyREZDJvU&&+)frDUCFG+6K#({K-n|!41S=u~#{Lk2C zYXN`*+4a0-b$n;Rdu-I*1QJ~{L=fg`L%j9nN<&D!;TcuNC?s2M1hdBnVXZT_-UO@| zr!lThaMINLQW88uW$=ind~dQDIKEWsJy6__Bt{# z$#Tf`pVvW5DF%cY?y(6cYiROM)I7egH0e5sDTP26zM;pMt+D&&9@|Vyfa^*{^iq*B zgJg@0yxdqWsSjx6GDo&-m1_oM*0mWI`IN9mbr9t6ELU|!2wv&@)KGW$xyjg<&-u8- zC%E0sGSRX5N#Y_WL;hC1Q;EHyjE5ou5*ylrXq2+vD6~Fp4@><^F$;GURo+Wr0x?U4 znd8b$YBo2^OTX5TDcrp<#}xoCtKh!)W&e_&W0r3-j9V$f(^-T9_-LBnr&2o8oaz6L z-s=Ds9Qy{X-v1Nj3K*rN7gi6CEKzD6*_ZG4U#|7BK$s=iA)Izi+r<~VH+EZlH`jaT z+z%~*;DMCy(9-V4IL7T*o{g<9Gc|O_F(JHTEIu^p^TN};JsLqM zAbKofb+m-0R}2FnFjx!9+UanW(~G-J{WO>-EDAaxNR+=hzYu4?1{^9!o=2=2RW~9` zgo8IluT4|~>Q^vg5`NjT!T8 zpt^L+m{NMof#WJAZdQdoCXSo)A&6632g2_mhK?+gEMbf9}bw6I}_n;PV?t#A&4X0sDT29s8mI| zEaOKV8<@hKlwkzd5JZ0xMa-Q`%y=i?=(Nn*obFk%(&XuYxv!Fkq=u75?;?i2O16WP ze4n()CGOFJ4sFn-_HvCoNuf_pHLvH@%{@z!$_Sn9q+f^3egneFirbS_I`+rzHzS}1 zZI#`r!J3J)kYVeLyDU+bB`xL#LAtu@7qqm#ZFQ!^8!y?@aF0JN6=A9qT zgJN7`;lX3rDPw#cU$qS&n#|DHh<1$kD->6?u!{{uyRA1z8Go|&cmEHWj^smo7FQ+; z5hE6k*%aO+NoxXzyk4>!=~m{lUi3GalqKaRc%4nvAm|XpA3$b<(JsF^>-Y8XewLu4 zM_-X7?O5-2RcHUO8B#LC0_9(6k;F3}IfknN+8EHzHu#vCv0d`>E-mJ(j zKgHQkijO_#bwDO0}d|_c1d{86uIK=puKwuS5p&y)VnesMJfdDW;b>1v?+4F zo;#Oz(!S~+*WsU~vO5$Wu5QJM&**=(`i~0Dzeu@Fzq^LI=kN#r$dn5zT^n1~4gvxr zP64|HzFn67Os6$jYx0wudAm;RnvAH5$Sg0ebS+kK;@K6miPTlCnk35rikt5`HDsTN zc3$c!$iiRdDy~02rz?;lYA8^!z0uLTxijO&-iN8+b)d>~%a~g2WAoZx;DcjgeIkKL zjQfWgT?P#Zt~57BN65&#*Q%FKo;v)np29EQs8o)WPdFwhaX+|^_YHHJs4QuIVg0rH z0AN2E{t*8~*ls3aBPr`lVkWp)c(i15_-xOZqai5vUR56Uf(!ivZYvhM9U6nRE%P1N zZ|Yq~Iknd(+VG~uj4>1DKg;h<{!3Nd$P8AW}MrRp1#*5-=#%Bxm8&*4>?jX2`=|hjx{psy9LS3ex4Ws%j}eOF za#OX+sanZ6-dAB!fE!UMM{ByYe1>@owxsH@nXj?9s7`7@4@;Ye$dzmOSB0Zxzg49D9|R5CwJPY4wtw8z5z(2s0`SJCCO;YoO1mXq-#l(i{m2_D zP5O|=H1#z|FzbVgBTTB7SEy)3LTxs~R3}Jmw2-(XKCc#~M5w?Uvy7tT+B+dDNHTO` zWx-r2s;tqO^ua&NNu@d3p_p-hX`B5tK@p9s0@?2&!?==z42nzr(7y0<1Ws)XH>dG7 z1z$ht)sMr51rvKbNo>PoWr zVM+Ue)4QbEvUM?kKx6D*Z&jw1*KSX6nh0m6vJQGhJKelL6kz#0VDDC9e@Acr%@nAp zo{03<0XxlRVO_sWM~&Z~ZbsEg<+Nv3b=){__Y&PQ?kUy0@iD*E;?ZnkEYgfCH>|f) zX2|SQ**SOI$2**^|LJCkYOzvXd}nu#pMEs*1TQEN^K>Ymf2@DANjB5a&Ms5`XeV~^ zb42&+wI+&qQG7QxUtk4*fUZe*pV=fitR-fR0%rUB`N`=xOqQwoQglr3MU> z)Wec*?OOjty7S;jw{Dq%VK-y43%^A=r0Wf9U*jw<{}XeV&*W5oDpKmGHlM-eP1p;5 zsVTLP{0;gR<>s&c3VmswgTv-Mihib>cLTGLA)ek9P{MuVBv)?0(!#^SuLriH;?rDj zs}MOj=~X3;2nOgxrqtfOGt%392zh*kg4{G^)co}0p+^Me5W(cBKQ{!@UMc9&RML?= z*8zX-mn&rjD@p4~)h^d;JR4z~X@)_%DX#f*XeZ4{X2irWS>PK3T@ug6)1B7a*aA7m za|R5;$mK0HLhyqZiz#;-q3{&DmgB=lPVt!|W|6S6o;}@^Mr$V;oe!GnRKnR1tMWN% zOog+Lo^HHhj~WrM?9{lspodILdW_6nR}XLFB{fY-nokx`8IJq)vHn-N}Yy%2kU z>O(_m8xt2*a|niF_F{q%#(x1^b{v`?EgrJ`sbis}Ij!F@?DL*fwwmPV8a7Jo*^Z8g zHDoe_W;K<#V zDJAPyGb>eg@%?_6zvxohd2bq;`GG6M1@92s-nVak#Tla_0^_MIN;i60al>fW&iIXF zbnt~X=km+iJUo{U7^>@v!B1qqn<$ElrR!%ReSrsC+!^|fN3pAm!two!6z)|+$U)oR zqFWa~%wA&NmA$c}bu_!UISd$2j&Y_%6(oec;m=oHK)Xei=f%De#eFCUIqc0|z5grV zrLTq^ccspw(QXxhR9JYIoVAp8d*`@{XlOZ?!64?;OfvOom(ecFc8QH?oZ-wRtUEjv zs^)ugE)NV;*)%m&gFkt4u*@B2lwYoFF)^;?exMrZVxC5q+*!w+vDl3Tm37B;JOI08DTq9Jp{L*dn;X=>Im=#&*uR62 z$aHJT*3`Wa#BD<#J{H3I``GQ|Ta6|G$|=b%zO9JvQuqev>07aH#Y&!)P!Uc)@BWd; z_zh(6Cnekzq5e+oDLMX_z~Q4!N3>TNv^L&m>smLy4%E(f$;q@IN{jhL%NdSM!&!EJ zeS{s(n+RjInVYx~M&!CvtK(?(U6L>{%f+I<3iRVECOn)Y6+pWzp^%(__L{uiOXFPw<+2IF{!<-hBTbO^spBab znHC0GyRyRl*$IwdyF!%$lbw_45Y^|_u!-(iYkk~pC7|B!y%=k+ur}B08hQy~-~Plq zOMf&O-)$xFM;CScGL?50N~{E`yo3nay(>~TrMczo31|hVA-8C`{l++7MI^Jgc(KRJ zAh*uT#D~g^Uh#Ja9xMkJE*>3}%iMaA0VTLa#AHJqgE6u$r`1HZw1MR=(DyB)0K3uS zuU-wr*~JUNWy)N9yR75+fxq;0Tc{F-9?cUDVR{cgy>s-z)ZOU#*N(p~H=NcH^ES(0 zAC4>G{jjQP$L`DhnW@W$)wkwzU+!J_Z@$QN;41a@#b@FeftwMyY;O5X%ja6G-O){Y zu3i7yJ~OpHwR9p`W=kMudlfWL-^qQa^&2L?c0s)NozMHyS3-kUD zI`?^&yjv^Y=|n5c=(@bxSk~~|r3A*J#@hA4ZM=U?cRt%ZBn?e zG$wg^NM4ZpIFtYQb@0k45$Jfo?Wc@~T?1bi?cV(PwdrCiSVtNwNgESl%g z8OcdzD5>Geh&SCSlJmdz7DXh5CNhYJAepV%D>>Zv)@__e#D@bLE==v)bKpKqD5`T6V5CzsEgCXm&-Bownhx8EF|X+U&;aI}9}I<0Y% zpHT1^Cfquna=4NS${^&hT(5mBlCSvPlI$pVOYYo%&qP*DRJ0As=n!XqXT&w94+|%M z{D(=XA9Azgm-Teq6L4KvO$1)Qy79C%PU64oIoan8PLon{FT9pQ9mnu{$&^cUNGK-s z2JdkdMX&v5*Djk2I9!-^m)RvBOsn_fjlm#nOEmaeAWfY5U(nSrb%^sbpsnw`D*#2iLOtgB&df&9RpaODO_JyBHXAMx zQ5n;*PwNg*zH6)LY!pAKe?`6ay7u?PME0z3LQQL@1*W#sQSRK`BaM78keh_LkR;p4 zk5$S}+I%{~;G&w(g&(Od*}fWk%p6thr@$x@DzazRil)y&4LD2h_1pl+%YDgA*}x}- z-O#Ak**OzE-k#m+Y5C^irhufu>9Wj?L?Nduo$s7$?DqSbRSUcQt_0nn26aX~jL?(z z+wAxHJ}b?6rU&+54&uryq4POEAxDQlIz7#*`{0<@gX`hREUUyJZ(V)iN6_=ah?tPD z%FEHwuQ+aLLX>Wv{qWZg?DqR$OjM<2rA_YqX^;kjm83|Nk zz!eT0RSEyxM|-@%DH2WfI$xLT34IJpx>EldlP*qxO;Y{?wHE`r{n;i}@K z{=9rSkYx(}YrlF|kH#IvH)L`?txN~kVKdhF5{Mo zNHy$gr>es*e^c*>w)%l(ZH}2%SytnhoeGJYAIrM6ueD&hUtYq1#ea)6>}Khdi5H~SwN*N|%$mxtKY3uA z5pqXhd8746zujYH-03u7KlgysnW~{Iu`74>a=z*NJ6bT4X9Tyo6cS55>4iaKj+G*05Yh;BFfHOjCA*;fSqXYJT zd{(2cJaRqqo<&=>RC8L|lCaN9*9KpI`*_$)Rf_F#0OVbKER2kGJl(yRk9u-v)$y;{ zGmp*y-&7cV%f{GA!7`XfuijtBOp1*4E8sp>d(qOX35%S!u9XJ1-q7ww>1Uj78lTmG z0A7}VdCYZKBNELkx@fkawC0()`=sJ(sIl~ic-albx1x>`OgRVJ!z_C_@f{I?~ItF!N7Q0vHAU^p1BH_L3)d-G%ZlY*b%4WSO;h2~P0QD0st0uVDLlL$;JxuU? zrBTmojZbz}xse;-giafMWCHOQkG0{#USES0aKliihrk znk?%7KJ?^6STEJ-o0;&Iqq>*XPwCgkAgBFOe`l&yMg^L%&R@d6*9I5-X`9j&7n0=k zH~CkIin5G~zVV&ENa&m#&ZpywZ2tU@T;&aF5Qyx%C;g*pT+#(S zZCAK15NJn~%{^LypyUrm1N`&!5C)qz29q-RYrmw0mF|8mxa|;Q+9C1YI&Du@6ZG+k zc7!beLPdZSh|GZkt>%-^=RVspGGrfimZ9FbOW3R`#VKogA2{qSRg2pnCvbABPG!k) zv)Zx`3?f$%$o9S2>DJ9IZrL0nAXDGYm}0j5O;b}c?l$_Wchwa({_hsjJ2p0(BJi4 zb}vAskxE&oaV&vPiZMwVmzDe9mqVM&&VBi*@L5CeqIz6qHpslMTlM`})ubV!<+|%l z*)BqB^DRKS{qbCmJ#>~T17*x*C*fg|iQ3njXf}Y{j6(P9+j|*M39oc!Weq)2`5Ds| zhVJ?&;Xy!UJLQ*Ysu4e_xIfC#1=b7NcIk4N8)FU^X+pQ8Z|eH1noZVfeZ3>pb9Hp- z{a5hbj8ihHHkA8jthj}K+fCE}z_1OG0Y?-$6NGyJ3D zJa}b0(1@oSF_fI;Vlh>pDJ5xSIAtRx#3AE7?I0Q1x;jowY46W@Jl^)|q({x<_i~H8 zk3s8bRi__$FE8@H>A*v}TLe(CAKy6&W!mKrLVEl~Vn=pQQ0H0)H@^XpsuP3RB5U=G z2V8aPk1}a={V=1eSMFWRpYD?lao9T=JlgBsG!CFkS!0(U8)LZs6g~`yk7#-UISe^h zwc=Ee3GCMH8IKMLNER?OSG;qR(DFJsZ>zynNi|+CkO{nVBs zDU+KSGFi2LNBKVkb8FjaEiWIsoq7?80R*@xAEX|wcWef#W@-Ra37bt9PDZvTnLE4* z1u2$aNDp`wwhY(LLr$6K-irxQG|{YyzHXNQy(SH?8^lIFAOHM{(flY; z^7kX_hp0a9rrwyUk+%8P-Expr&{>O1WQJIJh^8x@$mO~7_DnS@$}C<1p8jkFXrShp z)y@atO%CqPEafIbzh5zAW=WO)+@us@Mr7WEq$fuUeA=s5X~BKPhUE<7Z_#=(gX^Lx z)ZWmiHA8BXe>AgrUzS`irxBi2l^8{LkvM{M!c@#kA@ zRW9o1grcjf(-%u8)zUWZ4iS4W=*yVK_o zzwn=l3;ur8ABM1H*FVp@HqKwCY^EKF=;#5s6ebltjwK$^q3JgL6GA(eml?S!p(@^7 z|HT+)te?gUaF9ekQtbwxOoaraKWkBW69nLs0?*sC1>V|!~8g%I)@5eH(Wv-<3ZFk}( z1>}xeb(PnoXd4Z#teQTGSiQULqJ-m`sVvMgci1f=EF0=?xOV>s0=c-fF~Nmoz|jK7K|3*- zFu7#rG9mHeTf+-tCE;oPiNS-$qaHl_$V`swVYhTKltD(_X`QTdsb9M7jQd*{}y*|4YHc*7#!KJEFrleddFZo7P z+<420^8vr~kl7cMOk8joKSn4Ir#xAPL|8R-dff{WjymT0xz=h^=SpQBXWg-0yVEnv zm8v#hpC5(Y2)#}wp2<++-a;dA@Pff3FGDXVia z1D;yLDN#jATUA2AMKwHzt*d>Io1Ox)pe3zOjWMlhp)Ep@iuj+dg(2u*zvi(umCr0$ zh4xN(!^YU>%7(Fjn(tV!0L7A*{$jyCzT>#V+s1GjD;`n!xj4zpE;1R*>Yle`)8zR-4r#j8mC`C7_bf2_rH6uKtwZ~)Km6=|LiLsS`2Dq#{ndwkUXcTWDoCScMxr}T+AcA*V2W9X+`?^}Hn*eg^q(7xl zF2+!+Mzeu_12=N(yvFi91jy*ul1p3-=!{06m%w2td6%{rd<%?<-_E_o#u4`P7T1)h z2Qk2^B)<}g!sDczMoRN zF^#As=5T>U}=7Wa?FD-BhDLRUc87JvL4=q ziQ1zITCH1EFH=}aI5LHWYBg@aZz0P{NT6+aGsR@~HUU#iSD)u68i8BQ*;Ro3Hh_c< z!(DeAq%RtFa^}fXe_uG~-`&@ZvDpixvvzk0yp2n z4&EMWUk+W>Kt1*;4vEekIdb*-aqN0i#S@--2@$zCJsf|(|39}i$~^J+LNw!%-9wII znMT1K@bH8FsrE-`!$eG!GTbMe_Fz z-*KQ}0nZ|KfjgQIY3BxkFE7qE8_#zCTe@>#5|)1i4p`lp?a!^f*J6R>`F1LB(gbQ% z{Sg?vw;9gww-@w7`(OK*F5*YT{{!7XBEQLg9TsyoYFJxzxMx(uw;39R=Ju{hgsOQ` zhrK&KiaV#MWtN!pl4($@c~>&2iMZCld0$|`^WM83>%Q3!bpH{}+sUbXI_uo#$@Mwy zEuBgJqyO{6>B*-;pTPtDI9bMI_2hGXOF`*d+hq&R1r?giv0qYGq~j@+;5|{@KYjO; z=`a7{!StP9eyA@ho=*3*$U8k@ai@JE_Lt2I4E9D|NQ@WfBNw8$#h#k`jbEJ z4vvJ$A2Lf#d(!B+z2}A%Y-<`g`(Ms8w#ApZ4a6122%BnnBEDh5LA|+Oq}BRdl+0~g zP_}xl^0s~W;Gl(lGe&jf4HS~rzBuT>RzDcdc>mxBAL~Qb=hNT)yWg0u-nl2+o#~y| zbjDgAEMf?n<73^KgzLn;J?DKa6U@){W5_wuTt(Fh$%?Qr7>=`yXuECY*^YQ|Ppy56 z#bes?+rF=E=c>3vxt(K69>P;GRqr`npE2~~qYj2q8`(!T6IYIEt9r#)ul_Cmk=^l@ zm8Z9jd|=FLzx$52#6I#l&T@poX21Ckk%z<>Gs;_Y^nS-xjBP!(V$hB2?YsJYjB{A- zirwprgCvKaWk2rojW}v<&L+I3AEr3r=C0Ps7k_r4GjNS(#*2tA2F5+cXCq2mP|UU*1JhEcX4SV( zM$m5Sxw3UE4|#o2Teg@qsam1LBgfsGHxlG!Y7VWH4SOL=+W%O+sR z`szGg>O$i`{zLt#{v>O5K4)ZmEv@QlUAY%v)4*$cN%)Z^)cw+&Il}yte%8Wuyg&TQ zkEX|;Y9G-T7rAX@#6w1y_dWQv(BrQVAAbCF`h!1xrty$hJOvZ4DFw!uh;KBlSoJa1 z)8wna<2Vem4DI|K_e6KC5g!+`#P!IUKaWS| zXzU-Z)wU}AI^o!vI*fWKx9{@9UnsK0EYU*&s*f<_t{DC1LT)S<<$fQ$We4Y2%NpD3@Qk=dxN18jrsC0wsWq9y()b={ z`%Prc_Fu(ZQJ=?;w0=n4TiCW6A40K-dGl@EjP0(Du5ldm z!7Pr+ddj{IwW;f6(_T4@rg6%*gr!|^VQP3IYZ3T!yVhXd6gs+K3Ur1ycw_)9fAB;g z!3|K71qxT#qSw5N)dBp8Zaeo>muwuLF4s?T_DVl+r^ejm8%_&4Sman*))20sx3w{} z3Y{Rv*gEL_>tM8BFxQEb#-f2oX~{|cW=D;O4wA;Td5vxk!k*l31q7=j4asI-ub8^W z5X&1m3%n=#1;BIt`T6Pd@oVCe>rrDrWC6Og%-ZLkGd5uL7vHevdGt#Xvs9XE-mxL3 zt;N;tR`qeD=C!=A7d`7=`lIMf$lUUQ;}b20_%L#*Te8J<%n`zVgwl$W_W1Fo&QSBd z!2&xs=`uyjysT~cK4Y%Mw{w>W{(KT|?3?hzwd83#SK77hKh<9)KGIk&^z!`pL;hgQ z82H!>$1)Diupa5hbpG&99#79@j@w9bhYViBt$`+l83`RbO6!G6H(JisGRC#nsI^=$ zXz>Vw?B(`vy1cFdr;o=yN?Tk9(<$ByJWY`qS3ee~nco=pGo zPv4(DL@-V3002M$Nkl!@Rx?`8j-7=ZIA2Oa@`}92CAS9M; zk>eo)`WM|?tL07e?Hb=rxemoss~^OCQ8n!1C4cqDtH_gRSpgm#HWy!U*1=W1=Wwj; z`>@V?6KQVO;=ZNrH{mq4)@$3ABwx)5j=nkU_nhFj54|?qEvpZ`&ojb}Y<=8_tIxHo z-{)}Oa#!rAmMuJ3bLPQoj5MdQW7OJu|NgD%tuN`|6$iFFTm}MFk2nzPPoi5*rNv=z z?N1(jD!d&U>jBa_`;0L0gj}-(vuu&$nlE{4XgwTn&Qswr?~~8-Y}VPGHhvcVx_srK z=Egj{uX}!}vg)oH?6tX9KHDN6gDf@Vykp2W6mubU=!`DjS@(d$qbK!)u1VJwxp0It z!Lu)F$T%|i0arW%zT^%X_{#%GtY=XihwNIiG-DE}qc>wY_|U>HS8_>AESz;cbI)+0 zMRnR>vX@}%PPk;kGrJeFfkPSF*&DkdmJDs|!^o`4SW81a=kDJ`dcUbL@Qv(XB9{t? zu}eOF^i&II-9}Ce&b*v9tE=&x(S2Wco;cS^GZwG&My**ri5{J0WNkq2A8XraUW3hx zW=rqu6z_DRVsUq_F}$l^`n=GY@kbwXi4U(87If5+;LM{B4Emt;N8i&eB()&C;t~Um zXE>x70#ErMZOof2*y5iI-hR~=2;oba5!EUQefUHwHRrO#kSu4ri9Te#|44UeA+G=&|KrlC!O4TV zAI(KIPS0D~Ta6T)2hzx!71qc_-{M{ni?)T`vM-T`XT?50B^r-RpK+I=dOKp_buV6+*NmuinXoEQH=dHr)^c5 zdlv`3p2v9XRW~0ws$O&U`Uq!RFAs_BF||H??i=SE?r%xsEXTNI8|T>;4n`dOt{;B- zI>vl?06jUqn!fcNev}~Rl|y&@)7husiCY!vO)8FkYM0K_0!FlIz#vR>&~pV zWvO3zi??THLmkn3`{bCYuZ^!2im=b=kSkjZpxBs8-b`X4kY{(6*yLZTT0CfxKb33q z!a|&@(um~?WA(v3s`FM_EKGckN_=49qqQ+*r|vZhsStjW zoLkIsDNNSYe4s+*t&pEF&wFS?nRcA(N7f{MFw_`>gOz%*E%vx=9UGX#2KLAYHjV7> zRO&g2vEUZZay+90vCbDueE7j5eUX<%^QA6B$zzkuZ5pV7?WJEq)%}`(`U%&>!=K|c z=W5EJhJZ$q(b5>>DgJp*1C!%Lb$FP`l}dYF_v~^e_mK;7@lwOGJrd3FCu?3Jn6bXG zt#W_GQ?Rp|Jc(1h#?PHh{^AFp=veq%w};d(t8(d-qGJ_NAbopK^a=4i^5|*(t_W^S-z~rtiZwvN;F*&an$;{7s~JYs|>D zFK_j4=_B4zUgGs$!3%2Miz+U3`%OyUIsnJoW@&rH1fiacalc~htMSH-Y&F(8I6tse zPVDaEyGg5Y=+sf%U?pyYp^y|DC zXF0<6KHezekh1@{GNNlX4qTVhuYdPMKSZH7D4l(ZGZyhiqSI7ytm6o$N95s9hp`<5 zIpD=XFEOqWDNAbv@?igD+^yRXZFjLz66ByMPw=m}69-2=mdJGtYd%Vd`N#&-H*l?osxI(2y?F-d^Eo;`^rZv2bkBN>CU~-)6GMP!YJhQ!0 z=)|FL;MwP3A!l+{+;yE}RhCT@Z_r3u&Mh3~l70|pgwId#@H&pFx_I`Ve61MHGK7pg zW&O)ITnDc70i!LocKr^Q-sPFN3$2=2Jad2N`E}POA&)<~4L<#-Y(QL#vcbdX-F-yY``wja}JQ^hIi*AmZ zGwZIF1#fM}gfhY|#W@GJ>-*&6&!)$ppX*-NIfh2waoB23`owbt zjz*^f=+^{*YQO6Eh^_J1YODAW&i>wqy|u%3+&j zjjg)A;#kmOeCF9|m;&=!sTi~U}>%=y=nj`kV zwLQlXhwK>NGS8WV;+ob15y`jMowz{2L|)#M^ANy~qVY4ljPaY_Vc~{-iRChupMRY{ z`SNBwA2yP7Nf!MjYMBFX6VmoNeLtKsF@Q_m%0ri`vso^A3VN@)L}yK_{zVP^5m+o-pIotYul2_;_`UOz- zE&oNu$dUA3Kq{rQp zFYhUf$2QBw!!{UA+=AHWUyv`RebM}fR@J=&vfs29sHAns>cm%ll*s-=2ifBsKjL;R zHO9JSA7?)MYu`4<^nKe$(mY$f%GG15&w9mR^8BYZ!G z-54KnbZ$82tB)S{UGLhtcyau68it zH6vGj%!e5%U?a2<7#D2eH%6Ye{iz8(SR;=7`M?%@K_D&c(TT8(Y5kUy9%{se?MjQW zThp0ty>+TfHQsug%T)*mL{Vt^cXTm*_ZNlL4?;vajTSLlzRzw-k8F`~HdlphOerZB zTq${mN3MV{2bhhIM1hNXXFG`H-%Qky0c(lOkhJhi(_`+pZ1lqhN5rb<;IJ+JjK!F3 zj&1np>+kP;+n=Z>I-B_5c3%DwmR1A&-cE6LI`SrGeIA?JqAIu+G~2P3y?9bv;4zAo z+vA8!73_)R3wDV&-{Nyfl@V@3>E!-y$9}oIJ{neO*^1m%{8qkI$Kd%DN4ipg6^xXzf7EKJbaNi z$FmNE_ha3CgAW0&ZADl*QR6bC*|V;#nJF<+wRvfMOktj*QPQ%SlK?7KB=MM0OQ3ly z(*ui(43bvJ8kIS*fh9+SMJQ{VWaD}EHB(~^JAVoI;3F-Z_2J|ZSMS70lM>uws;KW( z=`xTYhtI`LS1yzP7En8q;GA$?2GiO`U>x@Z^IU`=#DY2F>wEH`}1# z(hP3*bw_uQ_=o@9@pSLCqv?Bpb~%0asoroE0&T`>l|x@7jEz`VpzK+Wd``oGabEC6 zo{SAZCXP3_;A@=3(gzdsM;`iW^b^~BX`Y`GuPz*sFKY_E8AZ)nu<4DJ9{{+08{UiQ zxBknk>F!-EGL%;*Juya^NBUCauYLP!`h!2en*QoTM&oL-oTb&a#`icp6`g?y;H_u8rM{GijKtBR5_@sTaY znpU1`f$FV@b1a^dC8ou-Q2aG)#tn^2jyAIaXEBnjx%aGRBR#p50+!gBpIB;0j;JR> z)K_hlD{Iv}fc3W09B`XO<|1C_@gpihz*98h9=@e}9kUkoDWyUTRko}F-SoI>ZPc2L zIue&Q$67X65Hwwa5wRuLEyqM2#uDS<{>=?tvBbxs8s2yzr`K~+vFL4Di`fNHLypDu zL~VU(B{IjG^eNJ8NFLHQ_e|niENH1YnE*>`^>Q^8xKEA ze!&-Zxs@M3Pp%Uwo&#dp4$Nnr)Ac^El|i*BmU)RGz?HqNfE(FeLk+4mRy+C#-($Y0 zzUQ##r(U=Jl=Zm=OV@j1#S(yZB&|-m2ZXQUYzOZ+bXxU|`n_#mKJYZ>F~4Pc(_d@E z-eZg%Cq-=cfp%P&yz#<83iUe`O~+_G^-nUxejKEc`M|LPy=vW?$9o<4l< zNPptZ9|5^I*PE7B(O}N?I||$u>OwyL0?e0>E@XSIGZgyryRJb*<_-3*xRsbdz9kku zzJ!vCappw4veD;A?`e-a0K|~M`asR=Qe)?Ak`B0mWd#VXVe^#>Od1Q8Fjxjizl^1Y z*!`2~OlR3nb&1Na{bxthw|WG|!$=b!H$D`4xt$M_6jjfgC&mY&F(z9?{ow=pJLriV+JVt%XyqY_>YwV1Y>U3%N28TgMenI zb=$+)p1@$QK6LE%0ky5hR-1E~r^Y%SS#j2@9o@}b?pu!X;;VLJN3zytz2-7ct*_^C z9QKiJK77>=-o8Gz+VRnBvxKYKqf>D1Bm2DA;4ue%nsLs7t?Kk?#$|7esr(}j$6lA- z2bb~nbzYCTUK5yAojSLU$*d%dk z5gLnV@z4j82TNXCyeut3%$bS=tRU_aLl`B{w53-$98>G0r}JvcMwLRkKc z>sxO{UVSq>{!Fck$j#ZyGyTN)!#A#`H{P74cfNKxedAkK2x_qhr>mM?y=LorO#am*DVOn}Bhe9RksJUDhVtL9iyVIv1E zBIf+^q3eIt22?E;;h`-xdcR3a11dH$YGY(37QAXRe|+OkU8xG!hc$q*)IE(5_ozqrh-5wgnaS%bqnXc3BcFF?zkt zHAlweG*`_JZhqYj>jUi?n-=qt?>{|P?R!k@>-_TGb6vN}B}w^%7#<7Usq?eX58phF z>&UrvU6$^Bay(Aw6RY64ui2K>QYW#?7}B5p-WM#C)8Or%D-i-a^TjsjI%j@u^7M?o zn(fbmKbd)Et{>jlFWqUMniu>y3WjbgF4PE0tvnMTve6BY?~N^^4midSx#EwLvn1R++GZsq8YXsDc9Avevj4_|AqIc=}DSFLYt zYv+keHR$XcgafMVWBLKvUyFE|4dB?y=X#v0uK~aDyW+u_1D!VF!frle%v*ichMiXV zs=x9ja>>!xRQ=|xT=u^TS?fKNZ>!!z7 zAvx2lf0vaj@$~(l9#0=U(%Bdm^-9E9cZR@1pC9VT+~^u(s}G{A zn|c{LKFu|C#&~iL0-pnpb!a>Vhy0K@7R;*Q!@%}nv76g5w+RG{iEYMKRL*$QqEAT9 zZ4gjE{E3^G$QXE`f*CBehZ7^b5Vn@=L8(%im2|TorIy#9p6c6v_jPH^sqSdix-a!%^R?HY z&+QS$j`Q^gr_&p{T;siW`DtY?Jg2IxdzqCkC=m%|z{^C*L)0?H>yZsF2%&6EBHY5( z!m%@+BVE$NWg7ft>%LyU>Bd|0=(3Z$Ct754S((S%AW0M{GOCXaSNi3;;ig!EJB=*a zq!21EQz5W_D{BWJ4PAYBsFaDbFnx~s zjB_2d6PplezkNN?i}uai$JgBWjH|U+x9oAXk4`g>Wm=c#t-K53Fs;_K6=95X_Bd+9 zXPo`+Z^apBIl^y^zmIVr>3aHD^Be2BjI*@A%3Z#mx5lA!0Cmfp%vW0A#kR%K=k5E+ z;sY9_I>XFCQ)-^=#Fl>wz0^W8^5GPQ_XM?y{v8og{>=NufW;}^biK7H$*tLbws##toCqFM96{GZ3dSxmY-e2^H{$qI0U|q^KkBwG#~*7CCkc}{l`Ll|9W2u4+0w+%I-1^q=N52n zA(>BgChMLSH}`ZF^feucf9pH9ru({7gBn@u%>#>I0`txGOFHZS=|^{`k3M=Pr0#&S zB%9NqU}k$Vwq;+)eFW|(rA6fa4;FtV%>6ZB)gF4{OM=E$#hNQUOXi$+tj$()$eLrw zsDhe3@fs;LU+Zh4Pj$9AZndcW=Yh`HZ#81M<>I%0>9!Wmy5=etO1xHD>x+?@a>MVJ zGB)6I&`QN~tpzQwb1Cs7B@K~N5#F$%aklwbxBOw>Khu5ZUwaK}?iVd=77t%PogV0n z^kcq2sFq9=w(OtY-ph6f==_9#X}K@e{-Rr3>a`^$_NGoUS(^0U5F zlPoDp-7{xNcAc`kMwlUe%RnDcE_IAN*O~oejtk;hW=&9>q#(CiM(R-|M)o+e{vT~2G z=EUbbBb@!Nug93ja^$bJI>xG34Eio##e&;v9#_S^Y+6ZmV;t>}OO&s5YGQKT@@eAv7 zP*Hc(0}XsU!zG5u#~XvrtYOoGGtqIDHDYp+?D`0yah)CuXwIDKuDJ2$y)4wJF{*Ql zR*T}bkV=00U*lJu$$DvqUYEN0bbd;+nsw$7sb0y~$d@S#C(c8o_a{1 z%E=mBbx<{pYuXue*3_)(A8WjvX@2eg>GY<~fIie0Gk^1!E~l@&DUA5o8jS+2HLUO| zUW>u6eVx1~`jHD_lsHow95T?&uC#5R6f|s#3tME%xW2z)YY+v^`>FjJ&!G)y{Dol- z&<75-f5+?5M$uVtL^!u!-=?`{&%x;o{i!~n=nH?kwd4cMFPA4dUrw6lyX80Mum9HR z^!vZ34|vRn62una1)%M|YB82(nF|(95$o}g0~~v^+oIu&*FkgTSiF7MZpP|1nWuaw z;Vky8uYF5*xzS}P?1ya3WY7H6aQpVf^v$o_p8ohR*fDANvMu$QTN1U*bs$^rbS?3- zug53-wdZ*Vv&Z=`G~NTbW{SXjZkea%iQ`-9%iPUTQergCpxgxVx)on?q@NGB@q;;< z;1eD91c~F?f=LSRH}t$Op;tS7H?~wOU3{4{wq7^A;Fs;R*3ISdRh)e{k&d^#N$lSg zY@PdB)0VwrZmP$Z10BSq(fUN*IL0CS33e<#$MT_=MT2{_JzV?kXxBKR;cH1mRKHHEz zuKHZVBJnQ!s)DuP%Qr+H)LdI2+dfe_d%fbR#XPDk4zmkB4tq-%P3h1aq|>MY8lT8uzR6t?`= z$mCd7KZ%bdQIZ+M4u>C+H^;{r?7Z}ZFOO-_qCN}hXF9u`*OY7VtvD9uoQdb`doJ9C zi^X_u)!Qdm(*xZb`|W!daTgP=YmUoqR8CfuqxExc>vS^KyZ5f9Z+!D~dQV>z{OqxQ z6yxbL#peiOdRt)FWT0l|fNjiDUQcQ{KQU1C;&P;jD>Fe@fT=2L&za8WMif!`#D|64 z9@&qMU?gM?8+7?3w%FuazI5556_8_xQ^D$+g1^kn`&Hm_|Hj4~ooG_;+`FSO>&w1c z1-zfO$)Q z+0h68ILo*YM9Qw4Wt3-+Nu!fB$C!!I0#ti(c%zV}nZo0C-`mXg+DEtDc+2XmG4^l8 z*}o+#?+CNycMXoI`L|+>?WOTOr*T_$`wx-SSM$tgoc9GgsN(v9Ob*6+osb$k(j7DM z^|mdn?MUl=Y#j?2H;3uAlEDv&y;K;{e$8UZseAvF=;)Hthp<+Lo6DG0C;7CgW4`x?8elHOYIEXYy@tw;W_vm#n#{ zY8m86Um4e`rWPX+i^%9UHcD>;iUmwUWs@WSr$s*mlYcodia@mx$;Fv+nwHX9;H1We zLXBT*)V;Cug9!U=%im%^*T!~ri@DL8`W-F0zw$NxO!`xP2;;Um#G^#X#Xf*nzQ-G=>UEm}a9;~VpPW-{$xY8_Fa>+fnL}x7ZEloOgM9W2R z{+ix`5TJD!i?VCm9l=(zwn(y2_i-qQqu2p|azkGS7Gq6#Ar8j%4gZ%7E zzx@yHO+Wnp#q^;rhl$0!#>F+yp;H5OvCX{M#1qw&ek^*U&kV3X1JZ^&$~j&rh-7@h zz#x-Xf_xdrmwB~l*5gFi!++;jCtZszuGnvgWAU6za4E8DJLtp0x4(Kez5Ti_J^3Ur zJ;^zWLIRfLna=eyx}eAjmzwp2HHyhlo;~NS@^CCU=vEJ1W}NSxwGx zTVEnI`;i=<+;5{94XwGh1oe=|a|f>Kec$*IUmrKZ*fx*NhJoDT+p-TRR;GTOIL$Tk zt%6^o*lY0 z^bwb_wNB%z&0JMF7F)$yH;?_bUv=ZnZQd&FJ4E(;eGIuPrtkN7`o{07dlXk=tvg?i zkFzt`Lh8+egOM>cUxWSsQZV*<_(kt+b>N<#jA}Dj-aRtoA*=P$(bMsb!5CZ;tv2 z_ZTudZGR_?d|^(a$I9i8JVQX)Jc5;@oeKb@Bi0yIN$Pp6L%!7t@<>9ZwHlZ}Z%? zLk3=dj)khsZ|LqC|MfqLv;_kJ6@=BJ&JCZsklhdI)O z-qAJl|HW_IpZ@9Zy{Er?w7daid&O3UC@t(ig=Bv1G=CIT{H{4-D7Da|v)xwBfNPG5 zf`s`pS@tVpq({DRtdq_(^2OXzXCbKm_^}unxQ?95dv5Epo)hgq?>ywg8n0Q-Giqxx zZ#A&+p=O@MfSsGvoO5zu#pg%Hg|+ZImwBAmeeg8r%-;Ih9Mx)`YWtg@k7}*9U3KsB z9Wu6EbJ)kPF4eHJFQ9IuN^YF}b%5sTJ{f)uFpQ(NBxSeyten!9In}Z?A9%@RZe4iTeYLRRUX@rj&}{!W}GqZZ^^{r zZHGV!-kkow+B_$9?$E2<`e=-d+_5u`gRkuUtjvCVa&r8yvFPq+F5V~>OxnKq=XDL& z^HDc-)z-)K{lqdWxxb{A)_87bFvHLMv7mL6ucn*Na6`iUgi{MR7D}<8kuA0WfA|GZ z>t#_BriD{6*{)U9_Jfwl^+Y5|wV-3A8v6@LWC{wD*4ntt%$s)39hhYiz`l`poB)Y0 zYpU}|9Q`=+3aa&Uktl4 zcrKc?cxGLy*jR`+o!VG9pXnBVXS!+g?K{%X*aBIX+^sHZ8?=}!w%oJnSO1QF4C(Ct z^e=z++4R$Q`H5@Y0+97L+nBt7V)GP8advi3x%zSQOKW zQ$&n8W4NchzCe0ki)MX;^ug=X^xeOIp*|nBbnUh-BS5Y%)ex0-u>25kI{o@Lucklw z{=?}%{Nyv8>{e!dXv?}unYB_G?OKnr(iL+UDkU$y^cyr|n}Q$* zojK<%>tz0;#k}^w#TBqpGg)K4C67;ZU-jE3+K*1PXg=3IfRhiu+K=J`Q}XcK&)?!n zEnh3_C#}UasIP^%_M+HEJ|VF!v#_&1BVw-usQKvc_PqalLQE*JI$QxRJlwz`E}^OV?x0 zip54Fwzs?A^VGgEaM*6HYO@bp_0xB=9m%6_<}eQbNFT-a*q)od`HeNE+U=`0^qSMY zTAR5ncli>Nhq?L9oA)feq46eN`OSBT?CabYb5lHgTQRP==B#s2=dWVSDP}hI>V4_@ z9&U-Zy*j6!r)s-iZH{+&>xp4%7PJ%5+H~Zig=uXDxoS*c z!@xWdMh7gK9;ovo3^a`RVi}D=O*Xwr=6+1PIl?*b%YiJfBdLmkFW+F7v)5S*_}qN( z(U5I?()0Fz6sQ(lQEYUs?-v1xkp*5)eZ4tFezRX{$nldxL#7 z*XkN88RHD6{Q6>P<^l{XWnSYegNG6M_{j>$Ih)QRliSN-nnCJZZ^C^gDi+t@>`oSbVAmo?K((g-L z!XF!5g}E%_wFh^mx4wKlz4f-Pz5aXLcU|+M%Wt@4WZa4{BGzMJ8^t1C z<37_Z4FAz@-<^*C)inLdUp=0#boTUM-f%NlFl63k6ojj1`RepAjb z68}ZkHQ&eC8-jsUy$%d}ojiPfADiR1eAQk#ts~6=->AXSm_*SwKRvZ^lPiRu#fr`-(!q-K1;{+eb-@WJ|s5V9cPK%y0KN_GsYbFN6kDHSL2Cs ze-w``bAfAmI~wO<;N}Md=W%WBTaN14!a8OoJx4e3^?4SKWqZMA+%m7s!+N{T80Jgc zH2nmJs;906ra?Pb--Ub0o!*hH#_jW0>_}(((>$*1<@3l^{niUVUX?XP`(n!fT?&A85pvnbA?P*)BG8+x2Y*P=HrU6IuKH5S1v zgww}abj8uvXN35|sB~_lnEmu|Hy5pZJH7QM$I}nqJD)y&r29a#XxAgIai^XjP3GW>GoLC_KK9$# z|HRKCG8!M=l$H;jkxPB{$-FZUKISo@A$U?vS(Z9;Dl5%LoLa#3965lnO=4fC)AYs} zpdf6qxEqI=8e!H|ydae&{vyD2@!TTvw*D;h%U?U0zNE`Ef~TDYW-i;bhRoX9>rnY< zew}8oJ!e@_$}Q$ya~2<8a?%5oarqY)47ty@_zjW!embpN_95o0{$Z^pu=m$kV7bGyi zmp0}l4tJ3PykjhP4P`^-X}|hVRzmk?!)wI7>suy01yHt~`Iq?7 zR)T?jg#1gMm(vy^*ARS5Ash5{9Bat_?Xw-bW?Ii@?_2tnPvdFxbD}$;)Ha$k5IF;D zY+CW~(880~ChN~wkgg2UpW)&o~pVi&C_BW8v<*zOcZLg}$icHjnDizffL$e1DYn9)IOS zVvmJ?#AVggN@aml?JTy5Wn_7cI|S5Ld8dFo(2amG|1y!P8}oPE__eXa?cG1%O<|0dEkRG!LhJJQ(RkFPe@Y`lGa zogNgNA7Fa+#81VIP!kfxt~^^+-AeS_?9k- zd3bl4e&y#+rtf@BHvJe5pK;vaVq4X1Z6C7Qjh+1zrWV=b!vT0Ka3bB$I-SQn=BA(6 z+)r%BW^4d9o_b7;ABEVDbZ&6g?eAmlul9|k7+38TXTNh>_ITSZ?RR`Hjk~Ed4{_w+VVh;Ojrd&~K4RRrbWH76Zrf|D zV=B%zq_M;rQ~lOQe#ee5wHEug>V;U88+*tYy%x#oS0=>SpfJX;}K2(SaLs;aLEle{G529$^-AYD-M# zp}3{6m9rl!&K%3~dRhhS>5#_pTTsR_$~(euj%fsb37hk5)wWgdA=_nNS<9U5L+2syb=$u+Ui)80R_>e6Q+Ywf4&}uc6|G-adF{Gwmad%3EtnS;!|a<}hn*N0a~;_B zb+xBV5kBrgeZ&_pC0PnzUFmvp*|`#&bKS0xs(2_O6)h^WRhej27%oR! z5*zvD_xC0^f1Yr~L^!c6f<=8e)d!|Y(E2!Fd}xRTw2H;Y;yTWVqsx{*WW-`yc7Etc z)5SIC{P>YBKW}~Au2EQG!yjkL^UODO-qCgBuj@`FKlf1g@VTD(Vl0W z+bmS!*QaImS)cdy*xp!GR5Z4|^!QS+-{){JXNjoK&OyCyRkv+Rj`GUIY#J998M`nSf& z%WGIXH)?at_1FK1?^;eLyJ_AQH=GM|Zb%=hT+5haFrE#*pOxA5l8@=~I9xw|>+70w zEH!5B|C`s_9?Q*Z-NiMYuS0XR*Gfz;f9l`fXO?T%2iywFMQ2<$kUaCt5KrS}WvjMq zN9+b76IaK}_~_FtvBekL;ueUxC?)eT{CY$h;$)leN&x}Q*EqD2(#SKM&8R@GalVW~ zGbS&i1Cbb(pWcDmufVs@G(zqex0K4+isbh>Uq0R31^+7GS$*J zbKis?Q|2+Zr8&1`A7@-|&s_E{aw|`b--?B0D}KqaXy#lj%eb*sf7L6#>RT9U!uP5g ze5<=^nAk~b!mXklV_9Qt8%gIF`L=9Z+*@3=W@CCe;;c5~sj2c+Os%)t(MQcZRf4Or zjx)ynDvh`7ee@lVq`4;du^BVs!B5-eOT_-?Lpb{STw7(&+L>D&Tw>vj#(`)&-!*om z_c`5PW9y4=cb-wrYp$i2V_Eu#X)KHxZ#FjVTv1NT+~a9lq;Nm3vFS9`&M@Qs#Uv!%}Q*wAgR>$LUW3x_F#h9hZ zfGw}zB@Tyq7B04h$I|nT#^v#o&3=xL@k$pL_bKM}%_2GOJ04|84@oj9(cuX!j(92( zOREts;{;92T734mKjeCeKo9m~+2~T5~}iTwUqTv*d{l9?8{h(KX`BbVmwfUJ7vZaITQkxeeqnw7Az?emOt+}5(?FdqOG+M1NPI4nVpP z@cUvsm>O5PteeyQDtGxR4&61mu0vuSV=iK9-R8C5-0t@|s(;J3WjALp!Sm>A?Qu2t zKA*X2jBSpszLBlgW&bW|zJC0~;IrMh5mz2l>H#eeG9&DvHs{^OGxyY(k8NCzW0(Kt zzR~y?zaLdAMXK1GD>63hii-J`D?p1QwkaW_HWM!R88-W7OH&qX{#v(~bU-+b5c%XXaiC+}CAgx*K1OtAB8hQ6d@|7-g21X-)c zKpZ-)x7B_k_CHrHZ&Pz;|N2-Qvzxuf<<@KUvHwte>lP)|pw+(B7PWM*Z?8FcDS{-U zeRBAs3~*$~+LDI-E@_tR9!JHwX7K6v!gP5biSdao#yVGtza9HpcD*twTo2EW^*Lx( zJP+iwj+6deXaC}1{EeEE9#?VgI5sllpyfina#kIL`O)26M~}6CB*7kvuj7L2(6}B; zoNIMW9?w3`Ex+T<=N#^TQF66#CP^)DYmhFcFWUA)BzbDCLt=W&Xp;8TW;+bhix~P%WL9#>{1K9Wz4(xmFql*S=dj07L9RuKE3Uhynd5hEOp$Ab*&kLWE|$b z$@Xy!%&u>pW!yWky% zT$<;>mbpJq%(CwovywAzWsQz&O1T_VuOq9a%%`}Q`2>@-7;S#~Yn%};E7xyIFUv+e zb@to2j^lcc^>)TT!h8IdV4P>iCc2)kvzX7ab6@KoimQRHv)X#yyq3LxU*B`)c=w63 z%^dC{t>Yu6#u@)b%0u!|qid`6x-N4VhrQzLHy$~%VK>K?-}cIHKKsmrq+vJSGCk|( zT|Ogg4%;k=$8Su}N55ivobk4s*V1<5%w@mr^ugM1-G1|Ewwdt6a_mz~Jfx%Y@#QOvaL$+epGwaE2sIY ztzzv*_89w&w;yTUKI9g@x7Qf!>t&d&Ygu!`+4ELEk@UF^O(WBT>*cJ+kuLh+Do^U^ z$LE%I4ixn6MUa*#P^~$*Z-pNeOy1TUHsK&y^rttYu_>ZY}Jomb5tGQh_lD^9Q130 zW5ff-_Fb5si~N3=qw*L(ve`G1J>LD%c!`hKbsJk;uE8Z6d{?Et)Dgr0(?Bf0!ecJt z+_%kr`?m7oufD1~7I{b`wvVUpnhxOra|nJH#$!kJ_4GCD;_0#Oo8SE{8D9>x(}g(U zu8A?m(j14%5%*2FjCIZAtv2g@J+?c)`#sM75&sBVZ6i+efvdXXcE#9-to2mgJR_TZ zyQH}-jUDyzIZyA$*V`)Av0(bv#~EX3o;gkwyhVqdqtbxW*q+<=D&YYaSeC@1jN2D+ zOC*kOaj8RY1HNEGN?BVC)KS@5ZJ1bi_1SJSdjq_xgT#T}|2R>QI}4#V%yerVmVBHy7L;FkII&ai(h6>(@T$5gy?+xM#E z%kb9vUR6DZ@*I-;5DfXiF%Nxn{FcAsjjgtQ`qtc!@>Tw=xPAF5Z{_RbVlFr^s)3v1 z+HPN8pM5P6+Y_o^amH8<;Ipa5)tYVZC4u{R_xYXNI7{2B+~=z`*SL93t8tIs$B=uA zzf#Tjaqf%l32RWV_sRM`_{u*jtlCELa2V73ZSV8Yr#W}UrDykZpJ#-w80(fj9=n?{ z)Ku-&k8Yf${gxh+CHATtXK7!}X&abP9{YQr?KQ4qYL1Gj`iKL6wHt4V&r^xKrJqOj zLW_PoJ^`OwUki?Pj=W!J-qTb!))(W4A018Kd-rns$w$-l^qdzImrLkYG`dC7*~zWx z?DR;w`nrwLsq|z0{pM7+WW|4SLSHx0*KLlDWIN`e+D`NA?b&Z&{NO zog=Aq{uUIAQx?6K!d++qtREc^N8aQ~iahy1uZM3N^m%j@`JX&giSY!U$;mm3 zx?-FSE-~8Hi+8QXzJ#gcFv_{zPiW4mg67()-}*9tw(OTn?ar}3rXrSjwolFlW9Vy% zsh{V5VIV%{giSN>FUXr|=Ix{E{BB|Tn49!-EaUA|bMNEasttSJZo=Eg?&{YXs_vMP z4d2LaTVJc~jx%?U8`*0teqwr?<1Eb!j#m5DjjgiB*Vu}ydgVK$F6XEm=v(%YZ^@I# z%jG4dxzRBQN9SdLIcKM90amM4@mDB#w7|hZ8oX^s@%4HwYb&cv6*&I9a^)~l+<**N4 zw!(6-S!#36U6Ncm7h2M_1KE+jNcU$yI-mZ@?>w0vJwKJ794yh!+wzVhTU)@gf9coOF7Gs@dOh4(%uULV(qZRnWH;$$Uch9D~ zXIImM(~If;-DlHn{RUV5TtI&LkUSbONn@YjEc2xv$`*@n?oG#^!!u$gTxmwL}EkFNE^~{f;M@0$eoaeW`ItWjH#VY6}7{5o(yjOEEO#k8sV^~?M;`Z>>5 z&=RvoxURKzf6w)9@$P3xgn72+ayQsc7K$66z@3WYka>Qj5Ws6u}EWV z>p6@emWS~bXWhO&kNu+@BP=-QbBv{P7-PF-tEokK(XzwOX(FE8kWecH=9?`7P};XK%Ov5NQr$ z-8bj(KF$W;M4FfwliNn0N8d&^V~DMO^xo!v&Et5-+Go3EwHs?+mCn)I=)0!s>veOF zc);3j+1q-)tv>N?)-7#Ex}K^#xAC=K@s-EeYU??BpZn%?p4zXx_B+nf7{_T6V;=2) zaj9$0v}pdxd(Wr;>3_SLo}8a)!K-bgMH-7m&7K~4=tH2zPrtHnxA~8~Tu;~&qviJV zX?mtd*2{j(vK9;r!^SAGt2pzFLbi+P?9Md3`TE85E5CAYdT{S_y8HBW`s!<+P4~{8 z0cm4yiGqnI7L@wsEG<-j^!>}}2j9Onee$7xD(mVj@TV-+^*GkAWu35~mwtMtdbD`w ztT<=KPc-IS=wE2juhFa6Q(=!eWqu@^`e0dHUtkHScTb-~W75 ze9!3Gd;6h%;t#2z&vU)M&Ugr?BYG~|_sLP*$hMCM|4r&OA2EHt7vXXK5!WF$*Kd7j1=Lc99R%1qWRDadY zFXd@QU_xO|00c6-3jWh*DA;)=^uSaj<$ z7UAPE7Z&W|h+pkG<}w!YB_<4T6&$f5-G5K})Mc3*5{%4V=&+#H- zWP5JK_4d9`{!#o0*VpR)p;&OEeQztiVqIh9a9p*yre0P|jq7nY?Yl;E*7%XF`mL`x zd>E0;BguM=g|&{w9L}}N2iEiGoQ~g;JzwA7imUdTtLhaq(mkf?+f{$%?qjPDKJ!%W zefmwuezGsgK0gJG2GZO16>fjz<4?-VyOv$EG&fitv^sG6ZATi1PK!Levu-qVIL`d; zZ-nhM*VvxF)>Cm18tWRW&AMZZag6QYtZ(^j2TSYw*eZv0OZ+tYyJErMH>Sqh-rL-F zj3r!3$RS6h(R-mM7$ErZ2N%Fa9};WATgUw5>Md5(+NWpeE*Dr^U}z;Q8~g zV2zmYK@>>g__UxJ;}Q_!c(C~9b&5mn>XMPN=+@}lb=<_WU1H&@fwB$B#3J}upG&wb zg+(=zIyn=5$|Achy3ceboW(d;&VZkt-J0&dc0Ap=r_T^~Z%wbiaW&m}?TPT3hh;-3 zZRVja8ueCx)GvFE+GDq5+%)>=?qh2)){ z$hV8T;(J`}U+OqR-;Fr>6@#zx8C#|Cj;}s+`+K|l=C`zcOPUW^n`TijdD+VTW=t`~ zc+BB=*WTCQcuV6Pw-*p; zhvQr~{XVAlE3RTIrq`XX+WWlK=Nyg$TXn}GiL=kre)_cDzR&Mk9Phs4kk!V5_UekW zs4UGmpi6eU5iz^?@Vmjmpa1{;pK}HAar`W;mtW9E4$h2nz>CIGIOl;HmL{DSq73;q z%rjB3It~~$@ej_NE@E&Ai;wvQ&uMPixSZn0KYTv@+?P(KpL_Fo`ryg!>FwLkbRD$g zG9L^dyni+Q>wm%G{YM`D3EBb!Tq%quWE?OS@jMY@obOSCYtz5E#QMQ z#*qOm%;Dha56d=# zwdt)dA5C}eoK8Rg^{3OF`%mU6Y*|<1_T?e)9O;9^lc$%{XOHxyPsyAY&VtAE$70>r zqsOA3?H&t%`8gvW`$2BI_&^lp(`UQUWMIQKp}}X@{g_^Yq(lIE>&p=HL%jAd083w8 z_3PXROxD;A(aAjvWRUZsDD#+k$VKzE&Axflw>$@TbfefPIc}pRTgD)b&v{WnIigk* zd2{>cTpC@a$GOPLyCC6-Yx5!7Oc!HjUd&;2dg55%0{cloyXY^rbp}+cr8@iQj<0yzj9t&xAf|28)uyeT z*>A?o#$7`$F0Q7(_=~60gZrntL_rJWr?;oCo@kNDnQ5xk;)t`pzyEvF^!&5CTHtX8 zn3Z7TkRn8O*sdj)=c>*tepi9)#|Dwd50JZ&L<8tR)t%?p*)Di_Vjp6GOuXXa(@qA( zVhETTHRTy|_j3^m1HJw{Po%Ld-|T#B)${ zM-n9?z54IHcRqdZ&(F1JW{mo$GvqNhUcd_%^~DDlSab;q^PyAaoB@x!xack?Tylbb z!lfs%pmv{2O;jnLeYCBPk8kI7;W~3JXgruZ7T|2Rc~LGM6LorFO7>ik=k|+*VzcGK zngNP4KHxA4^~Fv7(PGi;4w3|Q!AzgVd?S~>_0@h65Y-&G#8O}6#TO~C*PNOJb-`uL z0A*6bc^^{Wd2D0<$cCav8&u$MXj^0FQYmJqZ&6dk;MbU`LI0Q^x2$Phhkb`g*IKb2 zhi!g+x&I=v=IQzR{)oS?X@tR6^Hj{C@wJvdruyJ^f0WZc%Ua7J`Y6tMdS9)x*ZW+K zH5dL-O!b??v9TA%YcyZe8E@3Wx8N5aT5oqBo?45e`dW;)A8Cx^E5^1e9b0j>RXOrI zSH+=k+2OFyeC{Kw&HWnF=h@dcw{d+Rd)$uB$qYogm|c3sAEJ9O=C&NgR^O=b8dI^@ z-M7T%F;-i}q94)@iSIGybq%Aw$G8>im@WUvZkxHyfwa%E`YIm3>mFf8w#rp=*BowCx7Z=TEPuPoCydFZs6d>1l7kAv8}>1D@+|;%lai zi`v(Yxz~?|Mb^h3Pt&8%&Zmd>kEf5H98X`-B@3K|Wr4t&@42qa{@$OdcZ*9GIJ?ZY zV_QpI5oZ25Sy31Re1RaiHO>aFINA_nEo4xWv0xFHaQOHabE3s-tdp@sAhP*7j)*Pf z^P*aVIo3B7AANQ?{XhTV>2&Y6#Bp?cdhf5!rmucO-@ptyp0bhK!>5M7*6js<^u3em zLWOXdNZtw%Ec2!~9xUwVD5BXX#G@&=)CZV2dmZyDC`KNU7%BXmp0m?@2>V=ieZbK+ z#Kkq;fs4Ocu-ML!xGY6J7Ti&a^jMq+pDKvHI+_>u2QG1uNF6hNt<+<|-um!VKJsS1 ztdr}=X>5ns7Q$z3DT$}$ET$(G$`OXM#Rm$YY?#_YK0Epl7etyn`DRWlj*ak^P`Zy3 z&JncXZg*|bHk{FoYdIcioZ;kYMzu_}j>Q+RlZIGI%keeyL1L-lAh_z(?AGK8TgILo zz5g)g1@)gIzK&s6e*5Y?RDBu6Slhy4cOyrw$9Bt#wQZN&;u4Sb>BRPT zu0D?buI8~2 zPHbG`+&lL5U6^IXk8IVCehB78Vs6U250BsVzDPZNOyBp$+p^dCs$TJ~v2t18imCBi zIvm8Czv8xR&TDMnx4o~c?;BU+t8PDXUCVNyteC^}t5`TckF$m*c$ya_N_ZUN- znZ`vH+hcUf@!oqE(@%bUF@5u!`atoq7SoT;rq>=k6*kz~bA&4TXnOYSa{BQPxEFSw zA(pXW6Q5VIcr;!r$`|#}PakM1j(T)fKkPA%#wR_J^X8~WU`2j)F1!Apx4F$X&@cs8 zumWK*8yt#_YsXue&|{~G8k(p@c(xyz;@W)avD8P#0vjv~?ObHXmos(h+dlfSz!#Yp z+4$lvE1GPD>XG==$)AgNYNk0Zba=$chmyGSO3V%WY1ZMGFdY2EN^Hmo9$b+QSru_ z3k;9GRe$vv>)az*@gvH9j<7$62M%P$zz20M-8#!t{-WbQxH^vfM_0!*KqrA4k z*YngIJ!g-teul^9&bQEIp*YwDYS>Qw8DEmSfU9dt#%$)^mCqg%Nc-?uM-t#OtLedp zF?~Qh#$u8!VfZ~~EQR>f^rKHNr)O_-9roGu+4=Ew`@{=`ThsS-efD&rg`A+Vb{77M zUrZ^@`7n75mWCM~*0M`~cVTaC~;AkmXST2oyM*|BQ z>5}JK-2LUBKbyY%<&){|-P7s)zrH=a{_x3Q%&=(D{6l^6xH#2WS}tSBFMX0B<{RQ# zH{({HN@6}FW8m1@2FmIfi$YnvCU}`1b|vB4#WZobhn+dcY*juSh(TkEYrRF1WvEX{ zI;-rw-j|rKyp$sn%1>*vtTW%VSb$Ucu~ydXXSo1RorUr}ZHqVF&@B`n+|!p@v0vy6{}bKa<*DKzq3-L= z`?@{e-g$;!waD>YgFC-G)?ZjuK}=27rjI2Q7npobmaISX0S`d9q8DAf#>Fld^CDwX z2AU`4jyYNy8@M){s;7ba@qip1??yIrnB9Hk2ydLFF^;o+mpqhv7f0`N9lgEw`@Dz5 z)c7s@$nN>5KI`^X?%r0h)+@f+>bPQ;$lWmZt2t)9LxtHKeQw9#?|rs8)>zARExfju z9l7epBWuhSe~sOh+q{*>u{DozRXPTnvDJp%eRI;Mxj&M|SQ-<7d!FWB4g}}qCbA6) zsqL27`&NB@A-jC$sd;Qej$*O#==0P#$L`~;{G<3oY(1v$@5)i@t9s8d>eul*hb4IH z=C#CU9gKCPn`12%(dOI;@0|Ij;l&OdNy9d}ezqyz6!j)UOYVG=Uh*tE{$g8lE-_T3 z))pM(NysKYatfj+>C36%%^!5kXy|?FUwKkqivdqEq zCwC`b76Pslj75*u^HxLCN|dOkgV zd^vsa;ra9x{b0X$dD+(H$k#{XfF{N*MVF;``%$32>!?M(KEOTKr7<&(dSK7F z`r!{h(FdCQ(=UAMt?Ax(WAlZo8Z<{4gF&si<28g2CvzD*03tz{=&kp*NSNjoX)^_R z#$(BvYD`!&MJY3*6W1z|DUbd5VpH=J{MxoHHut&3?%IhNwR}&3j~E{77IPv~`l&zn zS=&KZ8#o@e*^e}b{S||s=DUFeM)mME7_UH;#%nSEFXN?V#$e85$!GIh_89CmOLQ#8rH?o71s9&V9$YUpXDy z+uV1ob5-fsiXlhsSKYCWtNnO|^TJoWMyy9WvW2b#1ZSjT#fCj!thoT4S**0!heF>j!}%Nc8F%-nB7o%;qzVxyWk zIP%=4%aXk7xCK*aLQNbK!5gzK&v>o{i!M1}a-QmPjm*RIk-jj;hmSgEt;j|iSebHO z(+HkEE$VhBO)*EX(AG7lt$&V;ddjhjxNrwHOdfypw~6rwtTks6A~xtx3LJ%;z9hMJ>rc^T+3jz*o8a!!;b3Oo0*Jt+&Qj- z1E3~$g{rPPe{Ed&S0`Czk9~j$<*PBlPmS$01#iT=ZaS^b9E=Oi$^su_h^XLaOpXbZ zLZ~(xi!~Z`%QREVG=C8hf2$c8>sl`F2p)&BD#-nh@j&cc1F#>baF7#|1d_R~LoF zwuw4)49Zi`AR?Y_T$Z9!6q^k80q=!yU6 zvFMiK1#WB@$0SJ}LzO zvu>^}t@U+=#eGZW&AMf^Rqou6s)c!5=c``IXSDHQiye(|0bslEFFF}tc}DrG9||{W zw$Jt3S7p!X7%)|*UpeW!8CUaI$7jFsNOM_MUyrZ-%3EX5E3W!_`=R|2uCEu&YT@hx zYXLXaAvKR>&(Rmx`(I_hA9Uq*9b0u)dmUHhsJe62xa#jYoWEkM+tfl~(z7JJ0g<}-?UIBE8L0}entka}*H zx{23=JT0lmmT`wa-_{-6j&GgljSjvfYm{ZY3*S=1V(a6IXI34jS4yi{Sr`T_5@uG9 zbsm_U++E|Tei#M(d7N=3hHap;(K;JP*+jRk0%K_;@j==NiQzR1j1NEw_x4?CO?&?g zKCyU25{4#l1fg?fanu@ELLy)KixrRB`i;P~MQl)WhW%>#^do(k&~4>(-OBXI$NG8l zhY{N1;hCmS^~Flg?#ALnbw$QjTa+M-tBe3WQD;Niv?+}>7r+&t95HUmc>U(UgD>Z! zQRCOYoOek`+Q8@ip(UB8w)LTmGqx@BJYV97TJ#jQz-6A6wbjvZa}Q;i0n!AI{bA6o zA|pA^kjMYH%;e*bo=rdc;YZUieETgeoViTqj{Y|CnefZL7IgDd<~!2D?bFBS(|`ZZ z-k*N(uI53fk#l*dr6u`vMtL5e3b>_3U+jk}kmVC~a)deO%76e>n7;9~`_sd_XVY7EpG|Mxei|PF zPGVt1HN<-MsxF%r%<eHbJMukS~?RpR5v7t=G{v;E?nu_~Y5tz)sT)8cUrIv+B$ z|8qTg+{Hz-Tt-8xW8uND19AO&hEJ~-!aBl->cGVjg@n?aITJ@h3O01x9&;v7?y~hHpi_yQS6|er$9nq5#QKq&)&V+KDs3kh zYMg9%ng;Q}+S`XdaVGEWsF*-isyZHtGqMWIQpLN;hmo@Z9->2DZ0OmZ8eXbuF?xGx zz=o5waN6oG1l(ftsTLb2dOY6~Jz|cO-hu|7pMF{=<%)+k6`y%9nca;rS%~(u@^)b_ z+rfyn`v4P8`gGmjn>;kA2E%G&b8$o2!#&_ZBjdh!HfMZ+n|#gR{a4YIu%v`F+{Eb!|%>l&{~bxflpo)^AUtm6iZ zlcXy1XLVBj+v@AK`|JQ+v+UqsfPza%HZevY-_--{CePq3py3H~E zS>FAnJY(Wx=D$onb0l8mooCNy*UvoEW7Bbo1^p~X))V%LC)oT6?FEnG;-K>c#)N!Z zb(Vk}i2Wet#8W3Epzst|T|4rGn?IlD%8X|gCesn)sP`)n#F?0g!7tj-mDqE=F8e^| z+VRcGVaSc;&h7K%!986M`_Rs-jTMLx4#a+|Z)t0#^i#=cV>DZCv^PVjirY0#bs{>|qOsaQKQDT#-mT^R_o{F6XLk5WYcr}kWSoR5p5BJY0xcbV zE36zPQZ8s=oqF&QU>Dx#5(jyu!x+}Ki=@W^&q313jXxFht&Drxx#E2HuCB&E9HUVB zELq@r#(l9+u;Nic><4tIly%j#C#w+G`z9;KwuCh7F2uY!aWg|}G2BB?#Z4Q&wLfUc z@Z%x|YmyFA%bLP;t77wCoifJkupL~h^(YLew4*Nvj)exvE|k;r_9Qu9Yxwqp>@MyF&8Y!09(tRUQjhEGfe8I;*wM*+BrMgw(rfwm5t}*z} ze&_p&?|k{iuf4r||KY9W?#cb-)!Pr{2C|9)tF1&$I$EX0>G%Hl+44XBckeCV{)125 z?pnmtMvil^>RH;Yvd4mW*C*&m=HtOIW=q95Y!ERPtBsV$M+dNPOF27Z`qsXwTTxh~ z*X<~>8D(x!;a+mT?UO~g7Tu~Umt1x0k#SXA8*(qyb!zZ;6&CO;`28$%cxyq4JpZ1h>w-G8@9IX+SkSL`EC9z6Ms>AIA|cVXci;{dOlce@1DWmoZZEcU6VoE)C$Iw!J@ znQ?nI-Ttz2xWkE`l)K&RBRDWUwqA~Xm&Go8$vc*E;);H*cl?ZreK31D2RSVz*L0;u z=w60JFW2x%MjCg6UgXR#VvJ$75^hWg)k}2+R6--_KE&d!**cmbquNsWMv#+7mD$p! zJNND^Z@i&D|2}`d+`FeAA!(N8N&`B7m~0i67v3zIoGef~h?_+ILO�mY;c3uejEq z{?E0KQ5S*vs*SG_${}*BoeP`QGA|0@ zRoukE)zxRZcl6eYJj>Zl&f+(p=_v+9>N(jwG8d7ppu*ZedaQoHNBkM?eJ2^aa0FK^ zpshN?$v1rDgoZI!{lL&Jdsy25u2ROcYrfh7U+HY6O4%bD3QA4BNn@whSV+V4=WRWM z`-#3#e8OD-$VVr@Pp`7Lb{u6SNih<;%N`4xmo=Uts=gC8oogTNi3+G~!;J|3q{ugJ zk+|PRTL+d9!34kBkJzcNUyK}2d~CU)YaeX#aAjOoU7m)|s+aFrA*=B(94i~eR=Tna zhjXdZl`6XZEW$q0RoRD+o-Plx5Wb-ggY1QV;HqU)nKQpA*+%op|MPp#bjyXl9`7)O-Si$@*Dzf)b2<*54S z-+XuZ!rKp)FTSQ1^L%o)e9a3?zRZSO$E(`3UJZ39x(EN!`SQ2^+jo~AeNSHiZ{P7) zWeo6Y`y_V(UR5BZwa3P|@NyYGltYFsMLyLGJ4CE{m7nRVdCi&2^3bA^qb^L<#p15C zu{Qn!F^;Rc9fXCrqLMG)O2R^0^@PIlck9-f7S=4VYZ2~SOf;tI>bDl$+(XWladPz? zSgwMfo@sv8edM?GS3$mH_@zUVb0wU`crDnuGOiKD;$58nailjei zno%2%csavkiRk8YoG^2<1{!tD79U)S|=^qsqx zqu8ozjJQuBTUUwd~n5{Yh5kr#hiO)5m3WS_JC6;YEABQd*NP zy!xQ?_lfZ*V4lu_UkC1jr)E_ZFegHtcF$Q<4oJnSifmXUv5=>6oD`lteZJgL^}eo< z)80CPoh)y?qnn=pX--s9$kr~^FF0q>r&Cas4;CUgt-b+4Ow~IoyJN_?b2N2BT-Dbm z$tj|sgw2{Y;70ZjmDGeL@ciW4d zZMAQ!yD=kvq#oC;z9M@ND(~GE(aow~CV$w`3)Nj!UL6eY%}+Skhr>j3F$|V?k!)u{|S!Q_SV%#d=~Hdi)YKP|F0kDB|f@kLcg)_K|Mk`82`ZS zTCG&shw#8b#TRniRLqCRl<~r(`R*UhO*IaXLr$uE>nG}IXE5MO7F_i^mk{(%Ly`IL zndV5%f|kLH^(VeXr}m}W<}s>8G)m?MyI>BZoViS0;^CqNuom2I3w8bppL@K0MO%w* z-*zIxQx@F1wdKrL!gV8w7uqbyPrT4(@qSm&#DD#bGv)e$=Y#X*j_yytr5}4%`y32` z=|dl>Q~_E>kp=N4qn^I9$+{`C(LVFw16v@)Up)KvAp&#EM=)mG!R^Hv8JsC&Ki6l! zsV9%mo+r-a=t$kBB>UUgC7)5q;UZneIorqPDmxP&*&Lo_rT}J#pU2S=|DQ{IS!abf zokUyq+7sMmx?nzUep$S;|6mjUG2gKxj}6#fK4yD8Ig`^}5#AA;*-En_2R_C%W?etz zLQxaI7}pI9vgxUgsa1|#E6KLij$7p}_{#CXt_M$D9bh4Qm=v>8p}o}7Rfj@o3a-Kc z;Qmj4>BjQapV4H`LRSk}=y1pVj9Nb{uZ1*M*>!=_SKFWKQD;x2TE?3_8dsCQ zuLy!8l69YyPo!JINHsc}2BwiKpiYKV9rKVU#K1gMFsFsOdgJ|}zUE{|6RX8V!bzDc zTULZGV@+5)BBSxEBgof^tyWIe=-VNj7SH22jJ{CC?J^OMNN_iNZv8q!P<)3n)onQZ z!sYsiO67E=yLLJdK&7c>@PfqwtsC1J1aZ>vH~MSTqTF(v4@~y0`HawkqH*!z2cP(A zEVmim(Ed0WV}l9)AAfwl{P3ehJQNDalDzi_x0nxFWx8kIq=xNSU&vG^W`7@@A{&^l}HAwlF@-! za-9tZ<)`{~tJhpm97s*AhJo8qSYu!ELbI*0*!!Vi;FyDI%(IUvyFQHJuey%Q)(6aS z=CJqjXWtHA^B+TonJ|3AxnpIlp>>WwQE39@;sh{koK&a8KI7tp+bVrs1=kZ=B)@n6 z*7COgLh#N{-(SA;j(*(v`q}dKm!2$lALyCmI@c8+Y}HHQuKu-`qnka9zn~DyX!nTp zfTP<{j?WH?d{5V9DU*jcwlg2~;JYs6*rz_{@sWJSM~+?B#phmJvyX0vF1D0s-7M=^ zbV+-5Gj_&YSUArHGkMpC3>oFFi(T?#`q&)P$9^xrj^FTOpLi*EIrW~7=@Mh|Zu5D| zJvO*{3@KmbH@G>LnHzn$l6PCm+l0ZNR&Q$p;t*#=rpZzNR!)hTQj!Wc%kVc4Oll@q zz2+h_Gm*p=WZ<^4G5!m&`bOf(QfM21Da&fZ7?VRxhO=s`+Tga7OhvyqLQ7iB$lSXK zZcLm$A>P!j5jEW{x5;H7GH~YMq~H?+wQ*HMI!)*9D{ZM5!a2cB~{QP-Uu-XVm(iOuksonZ>Y4xR$ipPI` zCK%w_U&G36@B89l-+}X3v$vH#W0FOLyL8o17|g?u2o#ILQ~lFN{6J8Db{1-79OL6m z_gM1c>kDZGO4G}#c#+f%Ix*!W*?9ph&t1H!)y}mc<)j{kC&chczpvI!1_yGFS^Quy z^woFqBJq%0;xZdWRR4$G43?*#8^a+-S#Ro58mtauue}!xh+4sYs#`UlX-u-*~h<(T`VowsbJGq2@MZl-am= zVPmYN1*qE+s^8MaJc^NpZiv!P8=>s|U&oY`d}8nV2CSASMzQ+5*&!E2(RWh_o1cO#J>8SdFzvpKh+ny zhs(GB=ydtoFY3pgzwn0UNZtPTg=KmDEoKbnMfQ7f_dc;?_@bP?qMURni~rHLfa+#> z^F!6gs@3J#eF0bJt9~s{U0xEJd$|81-@_v7eBF01&z!OAoSkP+e=Lp}8(+zz@9|LX z{=yy1tm|}8aFWk<$8w0#V*r3Y%H_NZcE?#V?PM(!{LAB@_7bu>^;$Z0*VYZU#;Y z&}+-nF-~=_a@E*!!a>XZfqDy|eQ?9cDZPX$i|aIqNw&;P5sy3NU(Mffuo z+gu4(&hww2>UIWxDEEZ>r1c%ax+g=g2OV&`!xPo5SI*+w3w9RZtk3nO;+ZbB6Bl1B zxJ3e<6D>@+s#|?cT(kuYJvUmb*ePW^n#AofG$sH>nRZo^-S)LCy%zu_U>`qw^31+o zeFGal^5p&^mKVMfUu%Zq6u>Apgs@4`kV-R*_A&d^^PJ(>7C(Fi(Y&A-UDuZXja0?N zGZf>PsY$~j3K*A=`WDfzE;8el$`yZ{_gv#_cge~%S zF~LJ@m2bXcn=!w~9bK@&RDAf`QpYCdjt4o{H|zaU=j@31-_u72o^r3pf4I7R%E81I zY-E&!OBo%XE=Na-tlOuIZm+z)IWWc$1xWd`BR2YXX=kX5|nXga@$YPv2p>WFyi{~uH$v=Gm zeEGxQf4Y3*cOLpV(!TO6JABrnS%!8sh|%auqQ?k+9XyUGR6EGMP_uvy^3o5KBsxhK zvF}%WCi_!i-uj{z6YM`4H2m@>*Lx3cEFb7)8t49NLXBlN4ou=uG5guZwx^_m9Iv_& z^je3aoXPcwQ}H0neuAAPmkt86|BX=|Qc+(00{>X7$Uk2xSs3!=?SwCi+D+VZ>VBbn=`Y2^Em03YE#Fv7 zUR%steASMvp3>S^5h#zJh&tvNPq2xGE@gNT->VV<2TnG_E~cAZ@r5>WFSZA*JprhKxGFBHk*4ezCG|c{ zEVvh-(}5=;r%m9acU}GGd&`yo%inubl7J8I%gyd~MT@rS<06h!plK%R2fMNWRqSLDgTChf`4 z#Yb>Gk1p{e>g<{{p*@M5xO;p@Mmi=9RO*MIr8KmYI_e7yY8?{j}R`%BT; z`G~-jRff}kg!5l29aB7^s~Gzpaa-XPtiIPsqL?}9pXfQX43O8})H7f14eroM&LZ`d zSM?R}`=@^L#yY+#z(S7D$`3x$2ZKoZh|n00J#83}l*}>sP*F!``2{S1!9YI@T)2Qk zRX!=D)_DsZo8)CXkE+3CUm!>cR+ec@Db{Ten_#pmc!w2~a{sS-wHt2zWkY`%@KX|Q z#c*Y`9y@-F%lPosd+$HB;Q&shxHs$TmNvWjcuBo>E8}Lv#((BU_hQsGz&~;6qdNFU z51%X#?&`~et}62^p3;LOmGQ%m`3n%$K}H76nlg4I2*$Pplo*DQ%^6QoJC;t^_VKXw zcJWRCr5Xge)bz~@_fS8 z^JAQmho{?6p5?*MIO@lAdmMXMWP9>MZf+0I{mq)t!pr1ykl@*o$!LT1gWO_fpJ~?E z(4IImKMiEfH3Ngp_GHaXlUCalLt)V<1>0t`yOI*Dn?zjBKd*{KZ?}k&a2ef_k6fz zgU>Vls*%T6J$>>-S8sU=z|H0Uozvx){_LIQKl?A?||V;gwj-V z7++qrbFv>FpOD>LqZ}8Tb<7G{r`fu?&LgPK^gODs|FV8L?>~8CSBV(d0k>{H(=X?n zT*WOu$}J6DlWfHRjgdIH5hye60xV2Z+b?w^X!95$>7JoTHhkcS{-j#vWE;?4!dw;O zZMxRWpJ`*T=H?Im$gLc~Nz{@fsr@27`z`yffmEc86$AIy^F!I{CC2&~ct+IbmR{uH zqkyp?-ujgN2xCQSD2{d@H^SXoD!jAgteCzT9 z?6Ed%ZSU}MoS4bIPp5c#8&G+Cgl5En9rj?^&Dqtdr}K@$77Td{?4~A@_kZ+o`TqAF z>b8w#`S88EU2Y%`!Ppiv=dY!(64iN{_#e>E*;i9Fg~O^mE?Zi!SY;e2iz0cTWaKPvMI!zL3v4 zFp+_)tIhmLT`{f%hw)mxDpix+L247yDhI=7@M4j8R{zL`f%U6#N!h0T=nGxCs$00m z9bOt^ls4&$Le>@?h+^7g9pf^ca!D#}kK_>SiQ9cOd<`gBKCy_W^ig_djZ!?dzlvl~ z#e5@_;zFAbcJ46o;+3*^xvj&Ogio|U#$M5A#^7q~x#Zlc!Q#2@3D^CAcWy3!3`JgM}^U{VLFTl54?GH ztN~+#O^w&;pExZa9eG$TzlVLf?r(gJCe$vMfB>p-ji^}4CuLIQD|9WCHXQ3Q=6yY! z1){+Kra)Q0Q}=Q5pprv$9Cv@#vXj-o(Gxxuj>->+Ab+01K0a#!;Jo1L#l%H zb6Zxv!#1z`WB90g`UlSy2YprXkl$EXI6u)3d+Rp=@~dx%=xf|i(DR#h#o1q0wZUc$ zvuFo*@s_Ch^)Vz1AERUJ4+2p6tUG^jrf;@b9l8BC_SN?o0>fHZ8DdMS@@O}8e`Xet zU<+ThszVEQL=^4QP%v>!|_Q|~Fd9NXKRvr%-Jd?eX728uzBOPt)bp)+}@ap4%Sq3msKC~xdrUn9$Bo(Szb z9L-ogVQI(ra!d~MnmoA@%he<2`5T&wc*^v9fT(fAy0W@$3NOWja#vq()v`YZm??ww!XW z$2_uDuh!v~(f~NCGP{&GN!jmJi=6oYi8kc<3{eTH0&e4iZDs^FCkQvY^hbDRbk)Z` zB6k0iR1K*wI;Xx3SeN6Dd`+WY z6V58H0^*9R0Evlsdpa~vExAcn9v|1VckKi8#WvAyic)({7{!(~KTUeExZ&7uv}J3@ zrb~RWH^%+DVX9lZJ!9ywPi${HeAU={|NT#vk3ZI5*7P)^M|zRXwtvD^;~>7t)8}1w zSf(-lZ?R`MIc(AH$0l@QzRCysJxrHTzKkzAf=oHQsk;H%TqO@jx1-$a!H~~!A|rKt zFPFBqvu*0Z_j+VlMjkG7T_1VRbLpkaK2tf42qI;O=K9N!hbMWrjqEc1V6f}u&eO{s zbBq_vF&$i&>AJ6!yU$~?Jxs8d<=yV8xY925lqhx5(zAj&ptU>MvQd%_bqOu8VAPq9 zd`>{qOsJaQg>{3F$_jPKBupj{u*lg`Nw#`Qhw?aP>pr5` z{NuxQ_SxD$g|8pv720FR_fSZ;8p#;wU^IoEB*HzZXSev#Aq(xBdKT)f<*)w7_m@BT zy^sBa!spNQBn~~96S{S886OonHcn8Eq+5k zF1vg0hWwoCzUCLpFZ`Rgmbc#G&*1#on?(qz1w9t$+{W?Fm-GWk`FZ$Ic$!Lk+z=b7 zTCJnEQzD0rRIJg+BqOWXJfI1DS zvcq|xQ63SOzDp;Y^Ipcn-*!BcK^ z{0$}`?33EZ+kSx7aSRIM$KrkK*NP_81&vP_h@fJ1W%P%$LzvZUX5H{m`et(=AJ)X> zb_kdD(I=$nQZJL%CkAbHrPmyzsf@o8eDMC`<^KJ%<&z(4uF-Sz_0>O+F^1^(EoChB zJld`QkQ7gSNvc*k))9B?W83rS$a_jX`mUpVOdcIMF;tv|4<|a(Tu;og0TbC}{=kvK zhfU|0<*AodZNoq39g}kBj%?2NFyW3ab! zYISg0dNH=D%n}sXhV0B<0 zRvWM>%lKZy3k7jGUC;z^kRPDarsM}m_n!8NgoFAp&crQ!Bdzv?NQ&qJOT2R)6A^5u z*WIj$eWQAJuGksR=qMh2a1Z&?TW;+^g zR(aVD>Bu@zqP405WvB9tKetF#LS<~6=st8pxu+j+@)w9Yo`%DitH18+OVK-To-Tj* z16`HYJ<e>&sQ)B; ztkwUSIv^2MxS-S)>F?gJAKLO9az+%}WaOreOY)RKrVCXr1%4G0x>sj-cPNS z8=xGzUU7}6=&ON3^2sN9?zq0qKmJrw+OGa4RV?x7Mkuz_3zKcIIld1At+vnmCw(yc z>RjQ!EDye8!kxV9h;dKeF`YAQj$P^{VWUbF*$m4;hu55 z{+Sof-9ZA{%M-+&?oUFNxVqnC-)Akh{qAkRfpZ=S%pc28OD`0XQHWTpmg{wL~t+;Tb24WGo~%$EjO$q4x>q& ztCV=m$f+mg(t>4eVL36oT>C6BmId33$<#8U6-N-&9hd@Y>6lG;Cio>j8mWrdueK^w zJ4%wibWkgH=-4OOZ-tv>hmM_Scmfw4^+-ag}z?v3bx)ZNwh4Gt3fPG`Nxu% zTb;mH5`sz%$ijPJJlbv$eNYDYVs+%cP?2l;>_2|AS$)Npcxu0{eh5Qy9J`MPpTLq? z2|#XS?JJ6fTkTQeTE-i<1Wsj--#Hb%Vlt692fm(F&cazbwdcXB#8!WyC?CZ{nJb~E zr_YzK|J>Q~t#_a3YO}sPDgK-K(gk6>*6j}P5vSx*kSDp%QmKZ!h7VHO<;pcTWlEoz z;lL`cQBmfWBp#9Y=+U#~o?h|nWF^`{vAuU+w;M_QLSGD#6IWeXw?&J*>WABE|B4i0 z+r1Lw;Yb<*)8lqB>ezVtk77U8Zx68mvX#?CuuR&m(VEm!k&jLoO$*==RVv*%!vV6IyF ziW!2>ehLrTUQRvSJr2sVJoSAqn-m|NYtvk8Yjm;M^U-C!9NXmanLP63-L{v}$A9{# z>yPn;cg|xQ&e-=nJ}+H3?*%hEo9l5t%jbH6L*C0X?mG2uAO9~Kd+fTu$U1*7N8fEy zzOMhy*KKF+;E?ZeM}L_v{KxV-#+g{g!WluIbP8}d=M1u%6t+oX^)XE_JBSq;-Q4yS zkiv)~)ywh&F4?mdo9f;Q$r5_4-jaANqliMs#xgAhlz@qHd*=_w0tX?j_s5(5WYmw?W z1SaQ$4+qYP3@=nHsCpFzt$pLgH?gqz^L^LK2F7COS{+&4JNr~GJ-E5tzWro*U5~IM zF5fobylklROyMk`xk7Hg;^P_DTsen3x0F2Bn;LneBUjV8N{n4yWgm6h(KTFoM<)!i zf#siUy}p&u_Nc7KugX%o)#{@YFQx@>-_TE$+#Vd2vvqLM_~}SWV4UQWHYzv~j8$*D504QpK{>FP4JOz9G8w=|%JwJE7w@_FxZU z8pa;$iLZw?^tLnEYT&;X@Z9mo`s)|5_|THN;tE+KI8xkVE1QF^*@-3X-t3MLH!(Kg zV-pS_ku|uBvrY7ocaG@DJ732{p1kX3`}l}F*vQFK4(3=Mf6>o%UaGnV;IF=wyP}i~Rm|i}{k36_EAa!)J|E}wL%GXKZ>)0;q{Jq@q$K+RGVi$kZ zDW~4;DI-scE%lV=JlHvBBKHa03ukd|v@7&L#8``f7&7>XSO3K3lO5%nRJXwy!Y&CF zx-Jhk`7xdS8MHAWBW)5_*UfRR6;cHOgRLuSv`@UNP!-zW_C*W>sd}U%AARQd+=m0T zXEaoETc~FryR7=!|GO;@Y7nm3%)Bag3*N72WX6|<=UPp2)tSYq-Ri3I$;mA(Ciw*v zAoy80&^TFhCIVa4A*VVP7uK6x?b8C&_JF0AuKe>O#Jb)qgshh> z(xJZd)oaTn*Mh4S;gqpa1n4Lm19x%qXZ|eKX_0#ao#ln2A!|J7M%_&su^M9y-J#ws zrL-eafYa6DcOO=h%^xigKVw^0neW^=)754k6?ne9tryf3A1wGFE)$mh@bTtrHc88!m*p!ze@hEz{pn+B-s;x;8%2%#p_L70?nBBIxavi=_RZL9 z*w&Y@vS%a|-@vayRNK?m;SEn!#Mcn6@kFqJS40;v4ijgWA1&|jt{l@YI>x)Tdxvaw zTx8WFdB7kLm9o`i#FEtoW^ve(L$AJuKM^o3XEAbBJP}ZjD>pJ3w6*`(UC& zek^rt$8|1jyKd$RHhDNZF80a0j`E%j^5FN{vga>2Z1+<5_H@T&-DZyS(uMOeLgaIx zsVBe8_9w0DK6|R*eDk$=D~vR0gaxglcC?zbuvGHF&53B#D-t2>Xh@?JCStm#i3iyrA#>jeaRD?i zaYSD0(6KN?P#>m^3IAlOG>Ve1*_f3OgpJpbyxF zDfha88@+URs$-es1sf&h;J7;P1+rYwA5d}S8y%nLx@{#3ZOer5VjVkQEgyB+)B>EG zMX7pKnBpRW;#|JsiN>90Bt?-G>u?oPL#_zVj_b51*q&e4 z?(@Xo{mte0N1h!TOs@ymWxd?v>*a9Gd7WS~86+R5R?VTBNy^9Ws#jqxQ>;qjqGo%a zA;VPTsuU$#al`$F+)AYq(%?x>6}cmzQhJLJugS(kJA^a{u~pc4#Rj7&?na@j$qdYuMHUnI9hE~Sj!>Ya_@f@ zb^6D}TCmWuSmRM~)HzX|p6P`sk0m&-`<99G7_H^m3{d*K_MvQ982JAH5YpN~B}=m` z9=dhN$=evQSLbnsVV?w4MX6j0MU9_$Ht!tcV-?2uM9WNz`NnO#dMf$m3-ocMP*{6J- zJ+E6oumi_m7~sOybG>oRedAE|)kZ7dTY9cHKjgf1ru)iY<=$m|Nzm;Y)mrvLYly4~ zf<-fWbPw*IEPwqkK3M+FKe)O4@Z-mR8Vml6;htwBS%gsHo_YG3f7%b+Cn1c>#uW-L z5*kI^Ak5>6Jr~5G)v;gwghzohfc&(*&$_{$>gS39c@YrLuARMAnCUOTD!cPO0= zuF>yg?0VStQrTA@Rln0yRi~6zZMItkCENPTSgInZUY5k|K*qQxZT2g-CYRs~F)&Oz z`(c~u!saq89l`z|{wr23m^Fr&q%uWT@u(v3 zap%5p3}v5s^03bJ8k^BVH^#@{N*zSU!b3{#$`zY#zXzRTo4I=|vo3t$NRB+%J^x)6 zT)283TrhJz*x4^QWV8G6F-3QuG@Db4tj+kwVKSS!R<^+b4}6}V1LoRC-)aKMbQg`qya5-4Hap9?1d;q_-| ziaspjghpPHh;t4eKIEamJpFVDm3?E>?QE+J{TsQ*2XRIKh)z=AplUx!c1i4*8V81o z`l;|*zUT_gi&_DSQYRmt->l0uUREi$N#7|lC%NCGt+7y(RcT5Wlky!+@*!>QtiYnL zCT_L4ggvP@x|d6PeA}_=nNf9L%gE!sLk3aw_+77jdS3w83@kD!-$)dzhFT)VS@Emi zCFH}-W&acgp+utNO_5Y;ouaGc+>gF7sfbN2fQgMO{EUf;1wYu6mz@`RFi7y?{8r^d zhrgTpW#7${@x102S~QQCwsym_rSwzuTJkSH^Y)A7Km9AWmf!eir^_GcDH|U>(hEgc z6Kl~DGXV%Yk@Lkr+ohxPZq{KrxGRmarI#*WA@mgOGAT8-t}E}%->!-$LzYi+ad4ul#}DTmygBrdE`B| zm*q+Pv3u!Q)qXXwK9otGe5&YeFj6xaknoeV$|OK*#NYx|7C~Q?hu722E;#(1`?!w{ z<=X%hO@FdTP0|EelO$JjZfYV`3oCHSuJe?Kc*)eA`fivZTkY2E^}EniFnV8;~@_5P_7Nt_)D3SCS`5LiSp*nQ#~2s@u0Q!>SCDNrlML^ zYhk!8-c_0cepEiB9Rk{P$U1dHNVb?KqB?efU2w44US1$L#X$e=IZxxlKQPDfqq%E1 z@NN;fz2dAdfTdXVTD+FKl2dlOROPk};`5Iuwcl7=SLB7Y2krrPst#u)xJ^J#^wg6w zP$l}h!d*mltgc@Wis+Ipc*c_6`1s0;f?=liogqQ+4E#VDOd{Ya8{$}7lP-% zJ~#3N7wL@Q`?a2T58(9XEiHf_=%Vk= z`DefNk>g-dk5!JZ;;^KVU|R`I5;lk9NA;m(EF_}f*E}E#usIM6X`v7KSB8o=RheI8 zh*Ei!ifO)$B5eTD+t#eFSv2|P3#&0u*ZIy4w2|C?71)`cgmUY)=JWH@<@L8J2F7B> zFYPBK&Yn|Fn)@kD_U|dy4hXvP&V8u4UM==P_j=;I5Z7qWp3N*v`;vDq%Ds;5>@)S) zhd*-aa3n__?4CTbZf|34X1mnUcYNfDJ)AR7aOV$w@OphH215H} z;4FyLpp~GwBir&By=ks0Ra@QrK6t=K)@%JVeywdXPtb@2gfS$=^#|jQ%#73+emyXiS9$-`Z@Q0b4$W1qpaIdyx^AZnUoys zPdVPT2(Q~xO7>U_Zx-F23$z#~uTNTbqWhU|EU#$6`~^Kd;|;yg*rw)o)7SnoQ+lfO1IacPt54`d zbD%6x?J+~Wv1Z^)I*2yNL8U8hHGd;eNtN)uXn#Lz#r9BmIYLy?#s_lfb z22+3jDeyJ6*_Y%!=GkyRE{qM>=(?P8 zm-q5zxah#c9sbm#Pab{hIisb1Wqyn}hO-0YSJ?zZj{U!?l(@)A$9xCZ<2r`znCq0U z@_iY{UR>a>ifz^()^k{Epn6v@#f*KTthDkmWUf!U%(Bs^iN9ECx&{uW=fSOc9Ki!n zAF`x1rU;aWvx%-OCI$mV@UEU6`#b;SUA=_k)UR88rW5D6?(Jkj%^S~HJl}h8OXX8N zu1L>m{p5Uk<<%R@U--+sC%6{fYbS}c_|q7?^j6sGy6Kvj=J`|S;tt2 z5tLpSNN}icS2^3t!TU=_oe(;Es=*|KO9P+zV}2B4jB|E0_8SRMoj!KQpZEBstrF(29|irEpf94 zMmw>PVrUQTQAsy*!!vfv0iF=-V@QedRQHzO*5eazYCiwzpVI;<3xNwkPrbh=ycssUpN8V-Oo^8X~ zV>nh0?=gLs&%Waq3_KSW&a*)$+bhrb?jZPM0nfG_*URBKmUryTdCcazIP5$B9^YP@ z_i%f%tGK((Ud(&4J)8Ks9M8m{MNXR#nMg80QLa>Z$tg!O^y2`-7aw)tjF^}P?G;&@ zW4up1&EvxnTr@r=@5lMkT7K<+{*IrO>$jcxDsvt1JYMV0ox97|zW%fN zGv15k{U5zpe*B@Xg6WFHU--+tr^*9;p7yIS+-eb##MOp~hb-06lcVp&Ev}w|v+SVK zQAf(5N=$I4+)Z|5YkP>S>1!`A612FW-Ghe(jBk6gYl^rKuay`)!(KL8|#4E*va9Bf*>TLWF`B!d?dcq zlUK9UA}ypKj2t`^)J?@kpWK67tQ)uO(0hN77aksT>$rudv=oZlB`Q{0nsJca_TUD_ zen#0=>xQvW{($3)v4VY#_wqq}TOE8&{mml&umC@c&W6N@-+k-auLp8(?a+ z5>FmS68I~hH{wbZ7aY z?&*H?@eSXC^5m(mbU)FeU7yFF>dN@zaf^zsg7dUTo}tb{p2fY(x_8BO$rZW#lNi14 zRj^c7tIxR{U#)SUQm%zReLEJ4G=R41<1ZBMAIZ4VuXDudMx#A;QBEa-g{ zuz&T>30t^S;Y(0)NP@|zCk8AI#}&B2gQ1GVS4p1WW*ot9J}}@1Xst>%#D&ujTJPLi zZr{~?>UWprtA9rGmaahCH|+qf4cN#S_q|LEITk9Oc*RH`Kj0guO7zQUM>xpm6h3ra zAK$^J9z61yH<-v{8*Hzqd>x;`_B?UKK6x-b7Gz!D%N-ZaaLl;KXIwbY&$`%jT`=J% zN5<#E!a1N20qU||4(4U@nJ>2CJ*E#1Jt_Fed)_&Exnp|yGOmt0R=&!<$I#22ugi~> z_x$Y1y8Y~<*AL4zAi@U+Y5kKw$GiSTU$%9C=Ppm2@)Q4?O-$h<+% z7`xdy`y+bF>6Mbz3AAjrM;aEr>Q0qs_f)gDjH(d26S%7QNjSaV_amyMD@b#I-DzlzuuJpa(^o7>ETmfehUi*b;YiUv4 zPQ5-iylAAq@=+_am`wk(P-Y$Jmb#77`#2HaWGv{Bsc)d9D=08OtBQa=vr6AID<0+C z(t=Ba#&I`n>Mspis;hqI?rl9b@$Pb0&))te-B-`kV3NwOC`{U8t{CV2zqLvFkPxea za8rDn&4^k8?8`@$Gxow0ikHo2p2XSZk%hba3=Un&U4P7WPaavf4`+POIW}Or4VZ8R z-+59_KI6lQJeV#E9^G7~eqrHk)JtsU;AhhW75P>8pS1oM-z=ZwB;Gw)+HqNanO*nW zamZ8em|mWJcAMzJk$Si9W%T=4W+Ief7&(&#gICor6DWP-RF^*#o+eQB0Y4f+^kkf*IB~uH z7JYzzo1Dw{;I0lTy0M>2vmbkT-g>WkeQ>qRI5{f(P>lWLTH4TTTqHR5PSb~LC2P9b z{HnU?1xj2mN5#oQwlXH)OBu)J6(+FQZ2krpBnifZRq@zz@j%&JuFQ_7DRztNn3;|c zM+nt{n^ZO*9T50ITk$Hz);>5u7RS~fHDb&m$lQ;yg6^_(UECpuy&i}vUf7ZnFUNOe zJ>Rftf8~HPPj-BjgT1J^S|8hOA6{((VlVu8UM;5DUZw+4S4!D`wP@xT^%Z+`{#QKA zM_SSyf#M?l8s2 zoOe`wby+XRw&N0S$59UUy7J3x!f}jqjwg8X^jmB@w&Uh<>}L5~@3{B~rt6S(eb*tU ze&xcs6U0e!2rknH_a~XBQGb$ge3o{0n`5yZlfO)V%^Y+6Ww?T^iAJ?CX+-XviGs78 za_#&T19Y7NOdyr=RW?Y1EBWkTa1Jh}L(&09DnO-U(p*!EXO$gVZ(@})b`Zky=+kG* z!;dwq`>H@mX=YsTCp#us-|wcYG4%q8WqIohnv|K#Nh7|sdv;=CA317mP?PMZiWj#A zH?fT*w0#}OO}oaBxDRwx;7dP7nShTP1vuqy-yC3T*MOJ1i@1qx?T_@!tgkw3G`_M6 zZ2=^LmE(fj7*?EH-*56k(Z zXw--LAd~sE(sgkm+vXnHo;W#2q*=wd)gv#4vM2CkDfU-K){kv?x7dzx@5^VNeN;*N zoPP}0;|O<`ZDmG~k~Ur#DtF!8Lx~AzEEr3pHr5Z9k^AwO!^Vqn&1p5C@`yX^M?Y)f zy4EFUA&aKGa~lWqT_tqOjkaW<4iY~sd|lQ;RL{?@E6~bCvE4$#f?QXey#uGo6VPL!t7q~s1$dZHGOS2E~ktNR9&GoC~ z(a-tJ6Mxr|4h=-44L(28`!cQ5(t7| z>TQV%pSTuo)vJ&;2T)LLt6(CE*Go5+Pd@Dc9Rfx#J_>|^y35dD%tc@=0tBq>ayFV0Yuz3msPLOT= zg@f5c$yQo|aDdf{3-$rJV^V4Px=*p+`qH$9rBwXQaullaWy08|a>b8Tm|1B)jIE0Eq)TrG`no2umMq>J@Jcv(Kx(>EJm` z5hTa|Hf9d7TdnVZ!rZJ2CuN@c(J_sIx(}*CXR}bc=|p|jFF`du9vBA zDd#VEX){R;YAJk(!sGrMebS)SuR}g#uBvzb&Y3d46HDTVZsv~ss(gT zsRz^JoMY>py&Rj)5&o|0zIyrS!g&s$8>JjasguvLxqcZox^8nUj*f}lzpgy7^jIS! zpXI%tn1fG!FUO7)Y_CVQXE)3C{MUpEl8K8e7b@z=X0r1Up7}%VsRMqnj+k6a82|^g z>J$gnD60k~dM1z2Kek7NN9+^>lc=xmx~!rWuEnUy#?Mbxo~s_1Dy=7l!zfF6Ve6^9 z0u=tk5A^0PO_uypA9J_eq}nJZeBaQc)tKsdjx0aEV&Z*8KMo{TPBIcIw>g%hAL&mP z)aA;}w6<{INySm6-plSI>&rgd@)c_UI9Q_|&LmSGd8`aQ9!QC!_{Gzxsu2=|Q@lM< z^FEy9#^gloirVddq4(53{LqFI5-0ft08WI(S!GmNPof5lE6assA8-mLJb-bTD`vq$ zYD}=oN!%CdrcqXb{&62!+c!jYnCxrB9@|0;Y#ds+`Edi9jAHpLj8$&u#aJBc+SeQ( zBY?qIw%gY2?~qNU5}mTOtEi~U;GoktTpf2s*lCHMSdjI;*z3#0oSZlgOjqgpem7?3T+fZ96xa6Cr+C_a&7eB!VGs~k-9$f0lt0z;_Ed=VLW7tBqy`dtt z^~*besK@S3wQRE;hOTaNMGd#+pW$FkSRxq5CgP}wJM@Tss|2x?ls47A4~jhH8W#>( zUmR}gai5xIoocLd^cSXVm|v-5uU!AeeGpsUNp;Tsz!lv+jZS8W%M1hXCGN? zsKxx}|4+ z-k~IY{XYnRF5pk4UMzy5Q`0Z$`=Yyai+57x>YIadjR6C^sR{bvmKs@Li`|+W+-$pQ z2l&8ZRi+6eAZ5fPtl{!B0_Mj+^(S84$Ef{w;ze_*YaHPpJYPgA7IKzQBo^DIINk>#y~NMec>4PD*vPT^K}Wnm6$){n!sPqUtncp8@@B+mEq>@?i0}#&AvWmj`=V zT^;kQUMQ1|VXC8;F)R3zwrrS`M0~K2jy!qAvK^~+LPuNua;y!}7f&4$9Pg=9s7CcZ zEbVN&`HY@?&o25JcNaUY%TrE{Z7+wb`#DzbSUC359yhqoxo3wgw#mCqWP7$9*Lga2 z#&liE$9y2~DLBMk*UQ1=U#~4(3NZ+FPg)F z*T+|UoqOy5_$R*NI&8zNPwc^_UAE5&#Ea&Qr~Dtb`@n7-)Zy;Bl=u8ampqvGi~Lv~ zf7tP%zGvHUaHSsJ*mVD0M>)D)j$QbY2RqjjWAgae%lB--&GOjB=h208fFX%NG2jZ(LGxt^XJ-*qdV=mLt4)nY25lQs$bxh~n$~6HM zcHo(3=ob#yZ4RnBKqaiKYI+@3mLgj9Dql=!N9sTsnnM!{%O7ocb*%@sWTH(OB_K;?oC7FX0J{i`*J1bytGzaoB^inCnoZSBqbZo~xb+)sOk zdomi^W1KN^=7s8LqSf~8Ys?Bje-o}7fdmxB*3_1IGn z-baCXI6fJl8y91>KGONZ2Qvm|mu;@^obV==F7L9G!6v@su_s@XFA%V%=OO{IN#hBw zj^5eNYWJ_O&|`?(v@>l>J$mvzyIDSCQs4Gp^<(D^N5{;1>RmtcQjeZI`eQukyG_d7 z_F4<)%N)rAQSZgzVE1g(h&^2=>Vfuh$9&fERlYwf?{)2dlI`hjImX*D`!UwR+jZ>L zNXfJC(y%ftiyy9RMppK*qQ=w z@N7BL4?2GS7tWVgAMnb)aS3>M>mIIK<_9M~{Qf8P_kUX24ltr&e!HprGH&R{f%GMh zYNBbczpaI{u6pxlKT@^G7MsBRBaL!+qoBR6(a2Sfwy=Y2;IEnCn*Lt-EGXl+4%Lc@ zpye@(4fT{)m;{j}Uv&d7JMIPltv@_-+fVdrW<8ihKfF0#&eVlx{NPf5N#IYW^e=tM z^Pag1&6^od6eFiN_my-v#6{TEj~;=vc6H`+lTJ+evu)jyFgFIgWW;P$*QbmRaJ^yZ zh-Q=4uG?&|_&fZ(Z&6v%@#Zv5E#^xf+z9gK<_fan=O?$gl_>*@X<=CXH zvmU!+x{mGqGrrgN`EoQU_oB3q3&w0dTp*#$B6jm0{=FFDFZpJ5SPDm%ZZe$Z1A>{2 zXtGhQF@-^Ki{E&-Kq5!(}|jbhKmErESTN#WUM+0JAIm z@w>jrf_>GAbc|zfu*&lxe^{?t*=x{E=(M3b#scK|3%#9Bzwr2<{)azWzVofy%e{MN z`jxf*)TRYi{grLR++!oImrPuF;BiVn{y;y~mU9+e2W?P=s|05}tXYxOqmxwBBJS;X zcsvq5F|013x6+Sje9xh<8?MEuc-;rW9VW;m8GT<@SPPNZvTR)YP--Gy*$JZUr%iF_ z$-&P3wvto~)u17C0E0VJZm{%*6fUENarEoIb-q0Ow0_a=r<3r#L;ZZJtMxZ^Px+a4 zDEst8U3^Qc=u_R-c<1)b<(9q|-O?M`ZtLX}{L1~d>SuZ>%_)m&*^!@WL5*!>jLkC^ z=zO8#>dt8Rx|pV)(uv94O!iS4M0}&YLxiq4WPhllS?u6F%- zR*k4r-?mGF*6`SsN#?1AO zJ!YRiiT*lyY-WGgvE9Rj4_)W%@@|K`LV5oPV=(|nIv(1c+9{kK3j^vwdn|B?z zjx!H>9T&{$9)C6d>#vH!Ywl0#J|o+>BiYXry$ed^Kf_hUVDPWrgKs!g}dV-H=u3etNRIO&oSwkM&V+7 zVRIT_KvAZCSw7na8~GkrWXWUmCzYpTep38%+`Yc%YnILR9#?e9qw78*OMXn3w;ctS z9Q@2*IKhswiSKQv@ zURv4sV#Pqh_d@cU`e7hxZLM%$cw6}Oo;A7C0{P*ibN$fpeEI03W%=ZjXX3iK{Mlc* zquV?PkI9-}nCt4bN{*EheIP#9*EW=#Hbn0LvQdMpiEF_cI8e|&QucT17P2X05Uo74 z71*c%X)!>XmE*w0$I2zSZX>L!`s7sig5T6H+)1kXUUK^2Dc@DxfBfKYJBhvE625#^ z3v@MFJA^kkvfugK(!!b_X7bmDGp(q#cGe@M^sD&S&X%`cJ6RswIa%)Adb+%Q>+y2u z?6LY4upH~Z$F+hGo$FOl5w)&+ z>f-Fk8ncD#{dPDO@OD|5^Ccp|OpxE{_b%RsA*dr2aB__dVN1*JIi+8PuCZqfHeZxZ~hP zQ%i+R9{Uqn;j$eQ1JyOb2^Sk?57k%=k+XQ$jKZYyoB!y&r^|cq>7HIKy1;Qa-JkonZhL{pLYl=C@!Ab>sJxdlW|ALZynjk! zF4f&XMJh!+lpl+2qY{;L)QUe~Kdgw#F<>7%5Jjr}MWL!ysF8x+i(TpYGFM+z=voxS ztA1dS3T>TEluJ5piAfWT6ETbKMUS1LU#t1-6Zq)Q=gW7#_e_tLy19Jy%XgO7?(5c` zXD7=~=`CP)Pam5%SQ|;=MJ-PFuj)uM5L?Xy3(Ml z-?wgQpWZt4V*XTRUeSAYrpZ*_tI+fPilO3LO_cNK@0+?xe&Z%z%+zMJmn&d=Ny9Ns zsv=(;kXK(IS3pE5F?o#}Ft#%Qtdjd5F0&Pcs2QsIa$%@|FL-ks#}_8*ud+6S`>^j? zXnV@M(J{3TIx1z_NO{CVX@HSa%zPA!%IOnQ>>*LB+<@Yk?cm)zK4zS0e*&AUc%x6= zUGB+VM&GgN&o0|5cb~C2CPSXoF_B$2k00)l4w%8m*HGEmn_d1?|yoWT^`P3`7yi1w#T*C2Jo>-x%&)HaxkRS zXKd=xCy%`6(ZxTy$T}`%bfk{&x-JiP4<8?COE`L7m(ZFdMoqLENY@`LcgK6Y-L5yJ z%m2Sz?(OSw9+QK)Oo#reegbn@>ipqK-*(*|N9o7J$pItKsBs!otquy?x~R<%MH75k z7RR2iRW=SZuIyopA4-4pt&f&p{X5_C!}4*$g2}f?sC=RY7>k(l70<<3GPaUeWp#D2 zAOPlCSp4974=MQHVap5Rg|jblhDC3hy04EGWn1Feq0g5;_~!e|gZppj`J#HTzn&-h zOgHoXlizr@eDO;+moNR4u8!(oU0I9e;Ba54+$jUGaBC}&wKn?DO=$|(y%AWG3i~(J zm39^EI$dE0G2t&~fDN(6gX(|{A85T$t^mv23So0N&Q-{P^P+%m4jND~G8V{;MZRxQKIN8@v2do$Ehr~vx+<G`z*J-Y{;#I4Jo#%!#%aIfIHi( z=DHn|T~X%HuDmYy9&S%|9qy~_I!7;W`=_LP?Fl|UW}E1*%4hEIgg5qGo^o`_v577@ zx-Jj4=kbM2?8uQv*YPO_dli51pCxA;`>4|(c0AO^;2xRli6wF4tC#ocm&Fqw;kc}g zm+597;p_6q;jCL?E1 zY+y=?Zf#Ss9C5V^UgbDUqO1kjiLZhZs6K+c{yK}HfGTGJ^W%?x>;>~Hue`lH z)4kKro<3hb`tgh9AO3Gom%sX-oGth72+LK!(Xqt6c~c2~gvjN)_TdmTWFg8n9P@ba zL_NqhAF;%i_HO<*%}xmd@85}O^m!u0vJl}eeqi`=*Xqj(HLi$miT+m_eQ!Q*Snk%p z0%(!S4sib=FTEML*;)4A*ArOo-g~ip_2=#^AL}U_KYDy;dHbHe936zdRTu-$&rg?M z`+HB8Z~m^FitI!;T(juqo)cdo=Qa*h#HTMs|*xd%@A2D(9;A z30J|$rWWF0Z|MGSKgC2pvUQYW|2cklf5sdV?*O;)vbluzle~|$sc~Df z!)^E~pE;{ue&@05SVpXUM;;y@@s`%4{_;7z7h^c&nK5(y5dE*F)?=vskUszX{yR5p z)Jd21Q@5FMa~-bWqa%;3`?}0`Y``B&;S3MBUY8w=8L(v;O?@$%j8o-V)n8=o$J@!!9#h5zuA zc%8?;Te2QA^l630wtOYLH78CuT4LGOo3n8Y(12&3F+MI$sMmOjgnnr@DhGnrwH8t< z+KuZnsL-31{xwmWRCV~=MCt1013(Z+F9i!#fBDkjIMWxMZ-4j6^7<>M%Uf@pF5iE6 zy1b^<B%BQs2{qzK z(s(U13(~h#kaI=XYR0vuu$Ybj06+jqL_t)_yi%B}on2={>3)oK>n`f9-sCv~O!?!6P;i+^YN!WVBZuf4AOzh8T?y!O^3T?JQv ziE>L~X-&ZuZb`jd5_C8&&^8We!_bwb>f1P}axs7-VVizRUM+%tfC_qaTO3_|+-{4t z<1d!?d?(iA-D*$P^*@Pn_d%I7$9Y*DUFS$l;p%){7Ms`ym->v2KKz&E;l2#Br;BfN zog=Yy{cImyuwCDElt~@e%jklcanwnLXZF&+_4pF6FOVFL({N#vG8~lSx63=Om%B~y z$2h?D)bU*&{^VVka%`#Z`Rn@FB!`z2Ombw=Q6{AxAE|d;m(B9nB}d;=e8bUsBEQzc z*}L|d6IgeCSvdhGkUi{Wy5MOP@1XCELl^$w<~nk0uJUmV6Mxu;d*(k^obVIZ@vv9KTy7S*ixiT}MH{AhXWojbY` zsHcHEy}3Nq)yd!g=TDa}zH_?#nXmKXK;l<4`d@###`j1BxmH-+Xp!d0v%WFd4#NSn zPUV=9gJGXJFlBpb-d}qdWEr2}*>^>|2DqBBTY2*$eN_8+s4yTIfZgb4n!~RMhu-|v z#DZe8rs`u?-LPRqrv2d<6(8nA_(HGZ{m%EFFR#6F=7s2wpWj;EI(eel;HiD>bCym5HO%vVs#7aSeYX7K9>XTm2u`#|(Q8|tNN);}*-0@dKV_Lmh$ojG1SU3-3 zz8W~K%}#vE)ex=0bwBp9JkhI#A3o%2GJ0(l*YE#}kC(UJK3jhISKeCQetc7pc6_kB z^Oa9@+lczZ4IbR_MO(xP17n`!qDqYaQ@zIc`SMuf`RQY|M=SRmxChNS{wFlR+a~WTeB}dld>GECV9bv*tZI+&W z2Fnl&0gwPmkRSz%vPf$)Wrb`{rt|!r|MMQdhdOEW&~0Z&hGnW=2@O|a;dDWtgNb9V6zmvOpXgHc&W$40xwEv znT$r9iC|`X{NCq+MH2tS!w4k-k@Bf3`f{==pBD)z;1j6x$1oBR0E*CiU_y&~0U;{Q zY#2<<)b8Bb>+b5^-{-g2x|`am@t9VU2m7eY1IfSj#!+`yZ4>STQR+I-#faiHizh{R z89E3Rm%$E>fXoLo+)WP7VVvb-gt;CF**vy`6l`!QYt(Z%otGj=95KO+a-}@!1HGl> z+WQCHpZ|yJ-4}oO(eClbw4LSk?e4ixN>6Ve>GY-zJ~Wg*Hoch1F(DLQ^LNn*CbH0^>9}rOqjX0jO`|Dr(y!F?Iv-&wtTp%GUam$Inf3?C)%PR*dg13{?KsZ z0wcEzA;pC@T4@v9#K6Qr&lH%*rz6ntg^*>-!Zmg>k}KQQr-6kB&7M-o)V@5(ot(1H zN%~WsbrvDRpZ9stu*?+8wifXR1nO!iKlEw7$~ryg*VLx5rm}N++jNYs`(U`&yGRdS z9IF0eseyyM{Wf)qVHx=5nKsQE!?F3!J6@Jvu`Kk; zGrwY1**Z#C=6GBNjpNLZfkIDeHtZazHT^MI`A87cN;TM&{&D(jY{$0g7&_r3n&kw7BdJe<|b>9P`3`uL!Efue)CTE>%a0=e`}f*7?{{ZE|zIB z75g@0Z*lW2{89lTKIYRvn0kgh`%!P--i`M-bGyD73|B#n`V%&LB+RVTjEVdK_b0%wX#&Y-u#XgedIDwS3mVwgK#u%cD zU|S(3rO3BgH}d9F$I&hZ!Hl+ToYCg#_pUzZzW3dm-S$@=l>%FeHoGUE)oj?acfgMY zYU7~W(+_8C~w$@P?7NV~7B7 z$_K2{Yuf6>(@`AtH3^1%AWDZ?fIu8Fr1cjoFsZP<2Qf3%2#E z%p6YDF@H{X4rh*L4B|OHw5iQ$zGBR!rx@hNBZai)K&?0jx`;N1vq)A(7qUJoqxw#I z8W#BponRf~IBz}Yku{C&7kA}AT3&(V!jiX4ggEk229 zzhX3MZFby;c?vGj4DVgpQHt4HHK?@kC@-%KTj7Z<@)7Kxr0emLJ#g%3?OEGrhG`4h1E5iut5Vg%FljIl>>TpjlHhL6=$XC|p_ zM0!f_oi`uo?JoDb=U+J2-FRPnzaKl>ZJyO0|2Wu6$s^C?HZ1AlcDH-~>YnySa}mk$ z*kn~_E9{+$&=*F0BuuAH^)FOh$9i)&`%4;9@YFuM*E@aBx0&?ikYt~ZJ>Yp#jL9)e z-BB4_2C-c#ALjMPcH%81D*M^9o885WXS+-K+tyog*jjRUw5z`d0++|F{qq<}eA~?O z4Zqn0`J3Cua6xsUg0Oloz^L==$JB-JbQdb>1hv>=$P|}JeBvDPy1)pc24Q^WA<{ z+fGk;HDA;A0h-gsn9IZSxK?qjb5cCTo0GN7N!XQVztx3CaNxuU+?-|Lp9Uxl0Yd;| zi~~lUf~-?nI(+u z(FP1iFi#KQTzUJx1ltKssVD)y_T}Q1GxzwxRwN(JUtkp)$#AguAo%#C|KW$yoB9IY z-MiOCTGNF4ytaiLb$2%R^~$|nO-a^%tPC+taUlx3fi6b@%&F?^LewyCn2LW0A zNwBXcvb}8(@Kt4%9Id$XAh*dNvZuY~8}U|&Gzk{_ba(D6k;cVf1rcTHOK`yetBF8D z93Ofv;^fA`{NO|d!k1#xsSG>((l zvm6eYl5_;ZF*&6Ffi7HP9YNiZ-u^*A`^}=WUXmZF#4ATQzj<{Mqi4pL();^2v)Tk5;x-sKdki=?j)pm@cp|9E!;& z;dl0qx*K|p;f*``=+W+ar%Q{t3}4eYwV{VId5`;=+LCPUqtYXV7l#;daS$Ry*8V#^9s4@JQBX4HhdZtlqNHbJYFDuf5T| z_R<}lERqHkK4h2BAQ1Z8&V|DXK_~7w(fDM-gqJh9gA`Ei4Hth*GpS);dHbGDU^?N7 z?&5~1j5FAUO`Y(-^y=0ldQ(Fvq##oICjkAADN4f*&-BpL=BD=0-uXZ}tKG)t+3vPZ zv}bQ0bgz7Ozq@d8wY&Ueo9u^g)e)&E1UZwD4tR)>NQPnt?DV?WROQ1*B4XvEU!n@7msc(Y zLnja$Q1}hWpRS891 z^@=SoUWCxjvXBok$|ii{*%18ji#Yv9A>^Dzh7dTmoupm|+3HhcyKtzo8hhu~es}%) zZug8-Y`bt(KMf^{lsPElg1G2t z(pO?_wJAn1q(MV??875$7J z);*`m?Af>=IMC!dz{1z`*Em$g0TbOr^%we&*ss2`*L~}cZgkggA9cICny52L=gM04 znE=O@6!vZRR=(IVPHE+(F6Iexb`RnESqpI~7}0sUZ-hztb%O@n!oQsJ`g2bqyi zB8NXw3CCrvKaGv%zA<2!GsM09-c|J<`Ay%oKIVsV54tC%tc#3oIIcANUa_oe`6>gg z=Bu95RnIhH(@nFT$~y^rj%R<2X&a8GHs&y@edEpP+eXENHqCSP|4?BH^e`aD3KpF< z5svX52KXb$F0wmEcZDxw?qOt?!COYgHmmQZpy|LyxZaLG19C%zF_F=2F#9B}YNXT>JYaB(t7S}_s2 znQu7ZMHVHPpd?EiChv#cb***>tpX_xy4;kxT#>@J+*f(jJ))K3R>G8PcEWw>W__ps z?5X6nV5@t(nmk_@-g>tq%sV%=LbZL^z4Vt4v;wBpKYcPfi;shLM(If6P;)#AqI6_o zlAtt%{B_L)*J^`kNCsYiA_AoeVkck{D};lUB~E4iR@`Q{`iOSkXr>ouU^4SyC-=0n3R%nwR4dZj2OX39P~*pz5;3bobntp3 zgH`j7Ya%~TBbCAhr6uWT)Dge@5ESRix%??ErDTfC{)T`RQkvgBD8tJ3wGZxgm!H_6 zSh1pf_MDE-KG=6n7WKNu8#ngUZ_CA9RARFcWm3Av4JUd`6N|e77>j}-(Co>dzV9jJ zo*cBP<53Ywj9-lr0ohu*(eVOTwz8`T@eL-erFM+1 zIx!s-9}wwBWk@FS=+NF-_rwldoMjc!_SruD_!+j4Ow5?5lXl-Rh(?+O0Rf-D&s2(t z9|2wd=E8Aa`jN6jf0)x6s;yGFSR`t?8lro@j(6(_Tss7C& zsj(_dNlQ2Mtz%XWW(*eZymP<%SHJkOPRi^b)}W>nc^fAz%n&LZ*<+bRP`u(T_ay53 z6P(|)LUt4@)xFBGM0#+H+!OBK)1Koy-dEU@WsZSGdF<+Bg^%%+zfMY=d>(n6fjIr6 zo)HayiOB>->dgUYn_Fx8sLNfg_TB6*KJvI$z4T#}+g-Qy!DjdJU+#53_+_2E_0Uox z4W5$j=+2(0>MfX@U*Gmj{M z_)$Kx)EB~uLwwBtMswMHeAW6)jI`F5A&jLja0JupK@qu&?vjOQMaT; ziK=K=$qst_#07!vG~MW^=pFBqgQBbyxk}HnRmwDt&!WXuF}_n8j&sXCUkp8ctGr{{RkD!-cqYsM0@b+PIF5H_e+q7cV30Iy_DGG`{UGBWIh|A)cdEe#Ir-AI2>a1EUUb zhar`e7!Mkgab>ps#mIr}pnLG(u=|C7_8onmI#+0z*f3DX@Y%p}EXf04ZGF8vyR{Jy zh|j>kw#o@rc627xB92yo zR^!2DzbFEA8@^_NqKZ+Mex#Y8w2&IKGla0N zINPd;j3VQYh%*_A0}hkX#FgiA;Jz;0_(c4QE)4H$4>P=w(=Q?X7U{0`?ouiJIN`FT z*yI;$X$P#R2NxR5#tkKjW!*@l$OJDWGbJGeB6D2cbl9LT2+~K=`bcH^u>0V>o86O7 zT#7G&#zVjyGQ@wz?Hxq~*?Z+)luLlFpl-;MDu@lubv zY7a(J*ZgB_kzV?#gv0}eq|$7yIHQFFUb_f0ZQytEf)JDJ104egdc%({k_35p@36ah z<9_$Xo9}l|KmB<3l^=Rq`?5DXts!)uxTp(!O`fB@nGDB~i`DcMUxs?o(WCwEvEx(a zxpH;4d+*&ddh3eDc-iPQabDlh=jQe0OTKEkxw)=wYFucv)o)@}!#A|*&Pq6Y!9`>B zn|U#B6yd>a?lSV)&n#co5gtXJx?ln-`*<5&TrqW^TytBo&Kc0629*sSS!oaDksi+& zBuGzpnX?_lPDg_8bRfw<&Ck(j*XTgKFSNUCPQu{Rgwe9-xGz=NIjo9MWh=evA+t!X z>X1gxzEqxR#+}17ui_Y^;#N8HmyL}xwPF7%&KwPXrQ4T<6K4!gm4dLuj}19qLglYa zp&c)JwFbBt5XkZBzNcsfh=CVxpy!z}8t(8jM}^ z@R{4LctEDeTtEN&aI|FG*hwUQeWZwsa(CoO~q)sxGKOvLa{2tj@O;>2##tuGZYvp18ITu6wT6xB% zv1X4Gbi!uyoU%0>`==R0B=dr^Q3Dj)(vwbKC4;U=Q~JCwz-mJw&f&dzanja%@;`%Hy;zZsw$Zf<43%7 zJSO%?(pw=#QS&F=&_pzd^mqQT$|@R@aZUGwhWM4MH@iRhqy6s3zw%7?v{s+rxV_eW zN^hW9WtF&jnBL{&Jo{9$84_#VeO-M0=KpoG`=dX&5g&Rwj4_?@F}I+^YBVD!9a&(E zk?|RLjo%ykjy$gxq@I`{^O<{A@He(J_Q&4u6}_@kc|Bmwq?#>nd}@9}V{lB$mFCr; zOuE@u8jOM^voro;l<#F-)u(DF`};M8Y+Y{V!PU z$L9*uX2%LKv;C107NGp6EnTTL9b3n- z>3^N#T45j6uW31_`8oV5Q`5+PoW4zMPi;;4eV$`E#*IwmX)6!#{7uOmT1-){>6j8y zWYk}HNb5I$?al609tLE4M#$=xo(3#d7zms+7*6>591jd`ZEb5ZsfTwoXhPf4fcll6 z*y=7_YJ-29q@ar)2>d!kE}W@UapM5zHDvzOb~tG@#Iew{^16#eru{ z;h6_l8kPzoN0#RR|nG1USRIo-(lF(%E{)2<=A}d^)>;xT|2DqUW zG4_N$(BwK#WXT*IhbW~xih6|f4{=cgE>hwvLw*zjLqGgC-%3qztP=`7E+ToMmUQx? z6uA1Ap1{P&farQekBVe2+)$3J{^HHq zO6*9#)2m1+8)5{!jGzQ&cnb_|6blky>(&rk_9zAC1Gb^$7b_W)bs@tDM>|nP8}we=Y0B5yXCSo{ zC6w0z$q#*rGGa?~S3r}_zLCUi`^H!hS;qFAui|sG%3mfu#i)K-f9gZlRo0DVOmn`* zQyZ0cQhJe|{ju&MIrA6ARqxb^a}UxwcN91d!k+VeNz646hUe&Ww5gM>1jkc;%`bz8 zJaU9Zn9JfU(hu+t@pGDA3n6vfw!?j9h~RLC?;}2QNi~nU;5M2*(X0IsfAJr$b-(+Y zZ^y(@YXV~C;GFie?s90?K&b(9Q!h)tc6%v+N&xH-b(jwn>ZiIfU)|MixgS);Y1*ZqJ8m$w_wCWiaCoJ)c_X+tcLJF;eqb` zyY73-I*!;p<5b{JD<*3yy zE`7x|g6~4W(_duOFX#J|Bs*<|ULlnJUk)f*EzuppzG zClMtSvd&X`=u^oHA4xXl&<$3WJ z7AT1p!hr)<;RHO`)Iin(xp6qw4oxuIw`!#=A1Bo_+LL4U?wtqS-FpwZ3u{_+R%d1R zgAwQ}gq+ZJ-JgB$t|o7BaE4I6J%$cWXywXGVDuCvjAdUagOAPRQRDfKeDaBhW7zV6 zQ4S*0n`uR^^WFndoTeWut^ccfSoyuHH@bJ;z0kez-1+W3?RS4f6Hc~F^rey<__+#B zG3gga-S7Rz_3khK^o}N@n=x69;~dNKJ^EDW;SuKn;ym_<$%DxXb(C0ylxg|_lI3Q* z)fwFx*q5w&tWF(@!$x9&6Ztr&;9k}t<>PpZ)o+dAaZSV+t&i($YSR3u?qojs)CE24 zz1}_c_*QrR;u*c+=)Ud-K8P_q?`DSMIImFC7J%V1ju zeFC;VPAp>vc>6KF7|49iRtZ+3@L*aJ=*gH)!1 z*Xo%4sqeqb0Er)iJ}H_bv^9Z`6%=KdJV3wr2$q@5lIJ*3Qa?-}nKb{-BW|S^-(tWcsLk_pSZz*-x;yI^RITN;50ev~S6TuB z9fgcqCP`7QsMB6z>_zRn_9u>=z#E)lqsE4A-Evv70< zIc|{6<0mJpQAIkwur!SI=~Gq~a;z$+Byw5O*tZ=ef4Y4bqD);Ia=kPCWF2)98dL^%~vez)bhY5Keltcjjii= zu|)QfYQs8AdT;7gR=@c3FKJL>*w7%!q*+f%6VS=qrLE2d#iw>JK5|Z?^VxU1D|D{V0obQDokHbi|^=Bpw?Cp$fn;Z;U>5q2L#Vx_SRme(mrwKhg zN%Et~49O%YmTy1xw=Q=te*U~T8a z!;_jgzo8XSeVUs#VuIsxI+nrsmtgI;m1{&hF(WkLN3gE*5J|%}FGwPkJYodO*#|15 zWwUnl%cL*f72HcDj0nMF+QCc_{M}PpoOh znL8CZ4=)?dwj^uC{4xYWq;%XoF~^xYbR))UJU? za;=nRtd!>TTMTEpQpaOYVUGbyF6yu4hhkuzNm^c{6!A zY%&j|%!Y~MQ28BgTiK^gGyxmRaQwpim_LPJ%tDH!=`JrNtRd<6G}MCmCcz`^KlA;e zo#VQk3p56*FWlCO`;D6qR0n(AjR#tJ*KsfoxfeMmlhXP1Z+_6NaDgEq#^~&<7)Pu6 zNZ%Ps1E;@((aS;2zx2`a_=h~0q6r6!WOE^dRzkM*g`lT=jM-9V9A4ML%bMDlF?WMM zE#R<-v#AS49tgdr1&lxV-PgJ|-@K=D!WlhpvKbGr;}iBXP!6XT$F;1-rNVPzD(2kL zz=@qct|Ko7C;BvY>K}T>fnIgyFc;CP9_hLAOlgbk*`GzW=IF+r)3rZU1`NyE56AE< zOL`j6bFdDv)8-MIPI`J|KwCa6(~FizJF$RsFcf6jVA)zx=$c+}Y6VsL$CaM?@Nwau zgflJYG_RUCNj-Rj1f6aACsJ*XmcDoYsQZ`Scu5Zf?ugAxn;78QU?hm>@L&O_u(7_O zuku~cgi;?QV2-U3b5rj({*j;D(DnufZU)U1=C`Ru>~sJv@pJp8o-~hFJ|R5Trhc%q zsy)aI&`g+-;8x#xZPmzJs}b?x1%iIobQ1Zy|M0o)^3z)}Xp1%Uvycr*Dd^(|dIi?` z3%a=Y;A*^A_l`cKaR0s@+R_R7jaT$4Dt*x|J}kgV%;o@G6oy9XC0t*y-0tZCzxu1~ zmZg}`dZ=-t=-I|tKrms5ixW*^^{T!81SmlbPhlFrXlDod#7r^z!7=5KuTn}wYJapD zH#hiz#QCj*?#Di_N4VlzEl()DwWjvSqX*rmKS!Gr^vi!?I4mk(@ICpBNh|$Z0bTu7 zi~vop*~!NYvhgpJA>cyD^}?{A_KYCJPpt{q=#=IOnV<(RA$Sk)o<7>~K>NSXUE*Q5 z08ZKPACmg0-h@C~O+y~L$Ebmlj!${VF_7r}YU0zp`)siBD}0&9Y7nDn>SwfxDktiP zdXfiRK<((g&b*qCxBNs;2uB8Q@Z*m5!RJL4I`X~9gdGtAf8@bwnRy*gE-PNz!|vc0 z=vhZeGjE7n9jh(pYK<{j9_S9)p}ydHPko~6Zt3EE=a7daOAx&4<=VAF-L1o3j!UdW zM^GJsUB7dsN<oxhaNQ3@x#6t^Rzs2mT_!6 z^Puww-7@f|dGo5i<;$w6AmlE$^ZJ*f1I3fubP|>yZv&I`W2Ndml z2)7>~ViiHFke4rSb$|QsKO3)~i+X9;fkQg-fa*QY+McndJ$3J0x!&Enxfieh+R^)E z@7~$(-h73Vd2_B6BkVMr;dh@lC%}UmehyhnW(-|_p~XN#23il(@JinEp5A}_{9*T~KIM1m>`|vz%W9I_b!TXTHQtgQ@2CBWK572MCB1Skd`d_04BaTK zGT{F3kLRb<^MRcZ?tk(EAgji8fO`L@L^%(N-lRex>o)B{Af#saKS(z^{P0ki^tF`6 z;Xxjvu^OMc4(T2pphj^bJ>!_;4F&YA71h-xT{yI>oY3iiV@&Y84)P3^5+7 zl?~cmO(hAusPjD{uQ-Rat>o8@Q|606K`3pljH^-M=t|qTRCXu z${}W2NFV9h{xK*a66Dq=G`RlxpYkx^o0{e+3zwpJ2-Me4E-2L8fYQMAQB}~X-e(E8Vj%Xw@(DhP}egT}5KF+J#09WXKxN z2~qE1;|)=2UgmcXsuW4ZR^vu`aqaq1!oNMO3dt6aT*Q@bny;?h;5|odF!`n(GRBE&YSF(X%pm zVe_DSY|J-@4-fNP*O9Lq6 zN*y&G?1=jxnZ^e~*i;#!-h!Z1)Q2UCtQ-qoYK8tF71_y8l(MIV>wQkP=ai!zl99f7 zOoGOWE3fGDYs}=gK%>6-mEY*FGbg0UBve?X&T=e`Z1xFhpyyE2re&OcX#k|1!ozA} ztfE{K33}R=tx6op2+yuG8H?tal+Z_t% z0O@~hp;J0NM>7pl1x!x8#SVvEErWCvqyGWJ^_4P-Hfr?{qR7;<6r!*75Sa*n#_a$` zh>MpY{bWMU^D{4f@4fD+XU=txJi6K4_&|#}+q5lxAmy`d&`G@p(nr1DJ$_?_x_+SJ zaPYzIRU^^2Wg~-^HitT~hHW`-UB{-+#nv_UoQ!G9;8}Jqwyil{rBypMZ5jJo>tvDK zBAb>s7T8l-<=J+X8%><8jKI|BF`#K{S9GN-)*RkMywBQ>?K|!TCJD<=?IE^EcTRQ! z+h-SH^a6i{iPL;rrkwK_lOuUS8S~g@z*Su%69WP>#1(ywi-!SUimeVYs3FAwa12W{ z%}O^n^ac&BG;g2hU8+E6CGyeL?#n-}ea?Ei8w1wG7K3R&FqZ)=WH@WeR`@iMg$q?D zeffIjox4Keq(zo}iZT6)JYswpf>j6x&G1Rm=Pqb))r4bFNc%ZZBlS0b<_W!G?r!(m z%eRCtuVco9dx?>4qjQyqngAH+3q-TTZU-}Uy%4^-9{DD@R?rPe+zp4D5JaT`Fno`{X!e8Fzju~jH^PmTt zQYk^2A5EH>d_<2|6A?rQ=1O{v1G2JK4S=E+9D4>%*T_9?;`JK1!=)K+>-JP3yJJPedAm=8j z&|FUg67`aG=NKMo!cd0h3qHACsFSF_alwsa3~*|Cic5Y`Nc?&IMLMQen>B6zGI1?` zm9O;bq%?-+bSk~3Pr?cwj!lG? z)t~cY0@Y{dWT$qf`CfU+#7X)m@s@;NO3dYSyp)oJ>L5pZjK-p9^94vDysD>__q+f7 zpMF;p89p2WQHab*W4R_qvmStzW!{^*y}jL?JIBL-4600gHoC9<^tL9BOg73140XUb zQ^w#l8Fa9x{|uP!O^(B8OkC2R_pc1^Y7H4&ZKVa8SK`Hcf5Rsx7H8shMZ7;WEhs`T z20wJebR2+8tdQctw14zZKGprR|LHIEmB4&wY20oM>HNW0O0lj7A2&4VyL#n)z4C5T z>3o1?eVr{Edgti2-fO+B1DppdS-ox;;~FTyeI29(zxEvOTxR+s!FW#hxTzE}r~dRx zDH(Gt_(grj1quMz5k-I40lyVjsd0d-sd@K z=#NSI!5$N9y~S6q9%!<~ie;=yQKsfkRw|{#a(rV>{fdh;W%*&^EFNvqO5bSs&Z`LF z$ylqe?Dwd^A7Ym&+EiqM@vt00I;gDaLlJ!beM5INKJ}TE?#X8*8TEtT!(<2(*uk?W zo?P#4-=xp!F95k^P7x3q2luC>A}oAxBpUL~p%(SZ@Uk4@BzVuOi2+}7i|mV@o?$z+ z?W^OUHhmr-e(%+@`FgZz$(T={SH1BT2tKA`KN5oSjWx17X?&1HCgq?_RBAD2 z(nDFMOxx<~Zz`bt$X>XBZ^wP+?IQaJTk()`R9=&fJ7)9%b@&}^S)pC0`XWf8^yMV* zTSDsMPx_QTC|6`!QY6kJkgj(_i+2COA?y4}AJv9)o#lt*;+mx!rx@ zQDs=j@n>QtG^tMo;7tBiGR2cFY4UZgXnSADQ6Mzw{UW#?q7E^?!Zvw=>FZGd<# zRJzkR^9bI*0Z_RFuw2PgRaD>ewjU@Ux@+S*v{?(X01-hKPJ zR{1V#`-;AH*Xrf1SNFS5enwx~(n@qp24iLe$k-q`+pa0oM>cVy1#~PEB;!VAKy2d2 zqpBQrl(yDmR~MKhQqVee!HI(c3BDGr<6g zKy|<70V_W9&9)LI*U0W`qP?%}C}5!j?MT~E*lt1{#kLd8kq@;6g}nc9%@Nm%;d1)k zSCy+Q>PT{tN3)bsMEWdE!hU@|u5(^rgL(AvgYL^;Wpb_%-8|)nnt>yl>hR(ttKCK2 z0lIk|E)Iz|s?$MhoJtJAM2K!ds@HA&WT*d-76w|g^##coFZxT#N1^&UpQ`ZO1Olq(@qB6po%GSJnsWfm* zub9w2YM9z_-q_C1*@0&{_|8|F^^9Yl^X3yToH(O2r+|_Hon9md-(eXU%RWpDZuMu8 zuFKGqIEG%2!8*!-_i^5Z)$R< z^?ER^&c*FxouvCovyw4t zrBM%-8K*y3A+=x_ky#m${abp=$AbrZvd0=j@vWutk*ylBwF0&#^vLq@6Xc_Qjty2= zg_j{vn?qmv(M#RyuUzi_{U2V@iANGKQHg43KlqGIot(8z^-+Q?4cHgr%^U1T zWySBxTYBg4r{sZfZKs)&_7INu&vc~fqaLtf>J9Q0*^#ZZ?{^mC3+=|&w# zdn(gtKqfcMAsAH}en=xSDb*0jD5##8Y$C557d<>bpOq>~Ov=t> z)oD|btT^gLgglg}SJdHGZnGE~4HDA1Z(a2iVvVm1Wv6Vhk6T^%KwC?4QmqNICeVAV zTx&x8KwCvwy<^f%oN38u;tikM#!S+7_4bPcO}fDaGv53_?uEd|002M$Nklg1L-{jGq9Zdo*9(IpiKI*>uH#YPIPPW6aj!%742qsJ|L?Kls z)hDm@d+OQE?%wSkT{H~aUo2W<)Y7k{_RUB(`Vx+3+hjg9Vq8d-C}-0~DU#7GqPWUi z-@M*p%AydaLbHemeUL#tj&*J8i7QtTwzXv{ZRYCxU`ksx^o7YM9^dTlysPIUBo#dU zge?*3q*%FTeH``2#+V!!o{YOLz~?W^5*Ek#U;-4z)eqxHi!wolJ@o^W2rbc2MUt)t zVaD;@on3vNJ|B#8TY6-}M_{vu8wy$&10{(k{JMNqWK=KdR9}veYc%T^V;Or@r}vXe zO_$AXv5qg5@wUzp7av(uU1*CLcVgl$mfK>YvLG8{VBuvMqoxBW+b1!dp6V3!NV=Yo zb9yS{yz8i9o~C2ns$<&2#=Rh#S6uV0<9I4#y5q{9(x!H5o^;I{r(&4zc;Upk zDj8@lK26Ss=5!yH1{NV~(#u+b=WuKnS*Ls5IfaLy*^jDHZB%+qFC+6X^?90XwN2W7 zqc_#fJ-&=0IenOnQ4Pui4Rt?^I1EH=N%;4F{C@X4-+D_Ey{#N*G-=K;RC)z4n@&3M z>bdiJ_(EqR4cJHeD%4ZYt#)7f%9apya@9%v*q|7F0I$i%M1lCtFB(D3BV-eW84!HTxfBj`~DVfAN8j$;1rwT+L=`a;E#4zxPyk zIv6_K4h7$cQQ|2hn(?@^0Yih`csPY3+QzHtx%UBFN^?}3O z4$=xFqp}|jKpDhu^~?BhQFnlzePK;^Lp0gnW!p{qn0~ibRvD?mC@hR9bJj-wX1~ii zkPaAO37sK8i{n}X;f``yuhcKRl<&u_-p+uN@`;3o%My)31-C+TfxMv${WBWFwzW5Y zL&sW#hz2?O2A60*{E5}>rFXcrVq(pOH=jlUZm``KEPw`#$m%DCF=ry5x=9~!+&@w@ z%QEQFf0EPdDKTiLovg1gB3@#kHH?4Mrsz_P2f3xdR-|=plVg%R;8FLi$R21M)Jp7K zaZ)Xu804^#x%lPBq=;2xI3;>Yh5Gmhxk(5ZV~OYXjJQo|s|xhALFtrkn6}w^Vy`mP zHRa@EH`=noo^QQUl`(-Wf(emM4Dvlh_~ab?@SzdHPt_-G5yB5UlcZz66=NBD#{fPg zVa{$X%amVvHC@eHr}9agUS%uYe8ZD{x7S{~*ZtZry{21Rn)GroJkQTW{RgWS^CU z8J&RR0jiw%1C1fxEU@VnNUH^N##kYtj<@G7tabnOfA>Q7&;RL5TFubJLk#Kys3=V& zloFj6^dB7TYL)tGcmCY7noMu%LWBL_`U>M~`of?tHn#NowyY;sFJ79($1vg8F;W#t zOYFD|i0x#lWE>g7OS!o+rt{JJC8v+LsqK$CU0UkIEnb^Ny4Z=wWZM@SJiMieoI25= zwt+Bl<^~5{nZ3>QpWKEZd2I=q4Y<%;bJ>w&qILO6ZDrZm>^``9U&bHmrR!Y3Wb6(rRQ)4Ztpq zU0sBo)m;V$%QKN)Rh^yP=7K@vPPDD05~PXS*Oqm$`#5?}(e~!!enc9vL)X}a zGseUx3#ETX8_T5>#frN;I(WDXl$z3@oGnz}qt6zeWr)#9S&j#lT@WGPc*snIP85q z*cvAE-vdkaE9y#vXPhwN#B$i(xPH+6i(mL|tbB3e>%-hYB3#}eaaOaw^XJcL^#X-` zt$wX_U-{a4cW%oQ@*X7;dD2ocN1cJ0BifKZAr+$1=LGzoZne5?5`y2s`4ue_@_~pr z_3K2Tg{3%I#4EfcnvzO{nsOYx%{C;5V4EYEr=Hp9e){h`(|zk#-qfUq6)S0Jlol&O zR++4dys3u+A3V6-z5d!Yt?)dhRWaV1%2fGG_tvX>-KRdMJ=p8oR-nDNaRRdgVQ=sv zeXvKz<6cdykd-s3Gn^>${8%L=oX)1waaehgCyE)?$4nZR&^h8|0>yCsTi&!RajT6P zIm=V!(F3TcX1H%l8q-8ly|EKK6b@SLIn=1@lpbT{@hA=*6dTf8k-eA_qnvl!#Zzc9 zM0|uHAEE+p5@tw9fXeZGNqfJy&#h{|zTQHi)$ZJm!b6fevNX|%Kb4XBbEMf?q6{{X zrTn>%Tl`iR&`63C#URa!E}0a`lVke9ujQ4Dinh~j>556{jtC%cm^)rp*rT&LBulsL`+@Dg##H9xnwt6+eJJ!cX-N zZ2R_lHJxvxDVc` z_s%bYSqo{k4;z1+Sj+k_ZG>g@mXZ6ou;WmsX-Q0i*04m;>aA909^60de)Sh$(iVbU zom4qX>!8%hG53Q>S?wKl96)qXedbjA9nPKC8#6Q!WCUUF@E3n{vwPxcR&JUx_Y?X< z3}i9P^e4S06;Vi7uQC>-S%rK5>fKnG%Ui8Ei3q=&UEZj%ria9sWH4D}xId>a{H?}B zpYcux_20ced}#JeBip=IDl3yMEkE+rN4wWvy4`*I&u(OSkCinw<5RI(<-T_9gYN1( z=TvX!V|AWY!#lSQyQ^<&BK|@=dMO;?>x44+VK1O4oN!!)P7_OE{|;!h<3ojqm47Jk zsqo+%X0yvUF_kT4>RISdD`22y)md*UCsjoGQwZ5i9{=r~E3Bqx^SxEIXIu~UqPf}{ zle*?gMBxs-v#k!ce2)|QUSZ{qOXIkGByYE@nRn)|3Ra+XiboYgoS!`48 zqqc&r!#^kH#2PA?93QkV@-dml2PV`9{I$t6Z)!k>JjbCY*c@jpPP26&?6iqDz96R& z1%@z#qC|Qh6Y@w8rI1*(6UR0a;B`%!*(S!DbIxg!`1ljl1^TXm&}g(U6P0yFh5D&4 zo$J2y=5BZGj^6jpg$M<0wnHmbLzlW4syipU;RQ7gTvrdtDP}=A}|K_ZRvatoH8XN8hh{ zYl{<QD${+X4q}T_*sFkxCB;eSs7Lzfie72iNGEE|F>AZ_=A)gI zQ#>x^9Kz!EoR=P`)%HMr_>^(x`jvU0!mj}yVM?zuC#5U)NpdIg<}jA=tCrR2T8@6= zY^I%o!tjr%6whNPYK~E*;^Q9E?0!vYiLa7!9 zSI=UJ)He#%fKHTO8Ph|*WhT_VDX0hPJ^QyU;%C->-6D^e~+(lbj9FqLYLsOGym4H9Vuuq%GGZShi-N?km8@g6Vb7fmA-snmO z9(7KfTUWwRc(bMnA{W4$T4`R_T^&Ao|ETtV^TkPQ<-^Q*K|mdWL!kOuVO>#uUc7MF z{g*#-uKVV{yQ_V$azgC`KPsR_Sw@O3M!^Vn3isH(9(5;P+61uqEn2|aLdbxW5=Am; zvNt+bwaDS%hh!_CZ8+Qs+|+opq6_m+Kf4x|Z8LP0p-jmBz~?u+-~NB@=t2h$wz7R` zKf9cqR-Q@>FvP@jsai1`E2QzycO*<7$0(_;Dozm?>ao?X{O|1*xnoJea&0B^x}&1@ zj#(1KQEfyUxejSBr>A)72kKeKs3QrHbKjckkT&vp>Gk{myT^qe(Nbol}UDyQPAl_GR)JBbK&4Y<3sVUy5xR3}8$Ip3tY5zx-oc zI%#1Cl$n7r2K^kEJ#s{^>S$^zVTZsV^08N^+k{Wu_?62MH z^RN~2JX`G8&Ex{xhay#IaZvU7esbOPGRj}K1wKR&Ig=!minc*k>DU8|R1$*<3(a%Y?HWV<~j+JfUD$R;HuXyCc zF1hbJeNLMgNguN>lFxuWE~q)S89BbJPr2)pNZoJ!hn?=GUR{=xJPE~r^<{<*umZS& zKq2pdsSu#=3t45x4($>BEvU$(Poy%*u@%Z9^jrs~oGihGN zIAIFX8KP`rfj6K$uUXUh?)=F@Kt%r&incw==FrW^BiWj zx?!J$O?gpzz&@U;I>)(-Db{n0Bx(F2 zcG`I(S>ANVrW?od@Ewd<{)9d=m+uhr@ zbfP*NudB;18nWEB*1h)Kz3ww#T2~$C3CwnsAg-(d+0}k#J&Sy;fXKHlJ zh?uL`*hFByl=cr1(!7J2b~I^8L`{fB6GDzR+exSF^FSZGAJH@>d{J8# zo_K7d`_9|@-CI|8y4&~lT!LQE9joEO)7kfJHhcs=3><7$4^%Ap=I4W zCda<@nER%SQ9mFvuEYg-lp)Izwmj#JH~q-2h;hhCpE}O>R%1((L`{~rxD&Xou|*G} z{*BLbXRxilM;n0K3_Cuv-~IU4H}!hVUA>+#{m8booXZd%4iz{`4YDILnJh~9S9#Gk zqJ~FNmhpIu4_HhA*X0mLrAJ*=)T{^8(4&9wd<*9++VA`&Y#`71k1o|Hz@CHm~RJR^N&z_m>JFqIhREkCAy^xPubi|A8}wG6ecs{S;M+-c=8 z-|^J$qP+d7{AwS%F^O~HOoPsJCW9c@2jptLXMFU;)KMeO(On43JWM=?_b~e3PuY*+ zE4ruE(`op5SToUUSStLr>-W1~`GwbXi&9-7Mw>~ExVyA?MQ^X*ea-A^UeQNUn7!&v zbls2s)TSn**`4~U`2uAa4}uW~e?0g=NX0*yz~$6eID>P*)t z-PL!m>D{~A-TEhZuu?wir1jul*S+)lLHFz@WliPt8wM~JCrhd&DyTWo{AoG38K@NT zQcu58NmPghhB$N$Q2C)OImazIXjb8Y8mZ=&sfKs{v^c6R45#_1LzGShl255TGSRAI zM4M)&GErqyA@cGe`a+mG%^~>5>uIWm#FU1qR6VXAykkE`8RWAJ$}>~zJJ2!3r3miB zO9@M;PHNpGu2bg+Ec;H8b0s)=)C%KO{^bNZ%g2-}rd<>-DsQMkk8uP3e9B|04z*ZS ze(q7d-cNUs?tV(|<<6nN{kTh&Ok$MEtftm6i9LQ;#ue_Jsp*u$(y2tfkm|$gFuqnyV=SXz z^+==Zy0^abRkrf12d$ z_b^%8c^IAVm+V~GmnnCZsp-LQh9nO8SU1t&f7t!2UwXNF_sx5;D$7ZU6>03devr@q zbYdo<8+wBVAM((xOr3@O0#F}ndGEE6Hf=vz#JS=iXk!sR-K08wCT>L z^zaM)>brV}Gvgpdsv+Fg3XGPQ^HbZc>&dyTZJs!fl1Wi0wNK)EGidviKb&ejvQpB2 z35KldRA?B0dh<*WDttR2ll!SdNm|>6$K8&=g!HGcII>h!M4=#xW8)$P+?e>C7T= zdT6RMg3IX-ic|T_=kZK>v84gyq!jwLQPY&$^wb~fV``VN#C{Aqy?%(RjwO=y${TU0 z-(s)H;7ihT%*yoOzcGpDc+h~tokLyy?jPw3bBZ6)gS;HAXEdQ*VLJ;#nm)!Vy0CP# z1}*kzOP8@T9+DK^o+jA*9q66iyBcHTm2ish?yPE}t#=7)ukDWB&%LWj_YNP#(FA)> zd#iVtjPs>R$;RGpVKJ;7F#(q^{80rfy1>pHuiKOF?DN*&)`rIB?e*@-3wmhzv+Lb+ zm-pqT1lmKNQ3Q(7Le?#9!D|T*^@gB}Iv40Gd-oW}0j9nckyMOr5@1qm(E8v^FDQ92 zO%A!>rH0$ZM{CNqHfl;?p$D)8wn>kJ@hTM>WB=x+>PO@Klh5#uZ%IT!bkYc%hTGdo z1mo2{Gtin9)eqi8J&^`Of5?T2Cbn#pdRexbUPk{k`O0^hpb?y(+F3-O%6%Mp`*)I0 zmcgFoNT1{{_n@1>dB<}Q;jMNzynpq+R%mz# zaoqi*oKob&C#=HI^}y5IRBzN^&oi@a0obT`B&j!(dizS4Cy^mEfrvs$Uvbm>tAFml zf1&%v&%dM-sYHV=i6$F;N;YDDrM9))(<|y;dHDmahOpmRpKO*}tTey*@}BnN=}iH8 zLqJ^6pwEeHbPVO7kUmdNCzK-!-3$+2M=ErTMSD2i!JGEvy>RgX0a-JNGD9>CpF&73|J=t_2Lhaco;m%hIm$llkSHP zVYC+=^_ie%3!CoP3e+_;{?ZxJbhVsVWLpe0>!p0=vM%)yV?VCMR8nx*X4GMu1Ziv- zJJUfZKlZ1PGAaEyRCU1eN;=mOCcx)IDM6DnVN3AgV?VJ>I(MD)GQ4g}sCdTyaZ&`v zMPA7#N&WF0N!AO;Hf7`uBs`qAPaG3zeVB$xcTB#ulC4R$K8_kI+z&Lte!zrVd5OfB z_K04q_qZm^PwH!^kDSwco6m~DggGh@KQh!h$hwtJxTjIxN`wvYPNi!4Yjg6!3Q=j47L}1097SF93L7M!yy!Lm(mVJe+xTu0??+}t_!Ay<5?3o?? zX4?Q$6eMAtX*`vyU?=e`XFTVpI#WJ!b2RHat} z54~cnS-=Vl;~8fw5PDOrYP8~3dQDfEDR$3`j<>ktdbuSNw(odJwS{7faB6v$ky&J5 zk?tb-ljzl_sT>b8<=Z1-Xi=wG>HhfNebD{hZ(Y%VqD%086q6>+;o~?aYRYQ7SzFuC ze%y=Nroop0wL)^V);;-Tr+v-aQpv#rAFON$;v7_Bs8ibQou?47dvn%-c04fX`2?VW zPr?s&_Pcv}g_NHxw@&TeE85%U4@KndUVSx9w=Y-pCV@k(3VF2_UE9xi@Y9?uUs|Cw zLs?YDs6$RLhu!DCc%l1=|Kh3c4}VvmQ_Ca=?pY$EFZgmXA8CvMiwzjkP{jK)Q+Y7CVtt^RG9yD|DU!Qy0nG) z)*j@jN9uw;mE6=B>eV{Ll?QIpz;Fq&UW3u_rnvBI-*Mt(r399g(x;56CCd^w(~22) z(8CA(6E-DV%bFq@NIyNDg` zRt1|fNr;Cj$6@f%z79TQQNjDfi!0qf{vXbF|L*s8W6Rk6d+hO+2jU$nLAjA@KO$zm zVB2>au}dCb!+jITHv;oxLX@GU8B`*#g#!)G3NZB7z!+kqjIam|pYmr$7YfjI!NLQ9 z+!Z~qHx->dx2`d{>mGePC(41rNiFs{qnmytJrHDP=nEmxezIRAFMNwjF08BG*;*IA z3>vkL=jwfs#D}z9WBXC%9yUFPVPA}eP1`li@tppYS7j=W)72idDsL>u@lhihV?i*c z*~XjZr*iOZ*F5rT)j5MK8ym>-9|g-8j;GkB$5GH{{_{yJEKe!IGBR$&qV}Pai|uhp zb9J4AFb}A``tptLTi<*&R*<oki&LFE(KPeYlWr(kGtkOUMJnV|l6DL5x#IzA z4}*-qiPg!j_9bzme=6zvw`|%=EYfprJoq$DCFXz+*S>@#4b>br?ub9>Ib&YhvJLAyPOiIs z`e51+AD>L5JMzn&2p_1Is88cCj-q>7eb57~Gguw!Rw!|Cqc2K6vC;j^KUnK--#qB9 zzt5-Owbxr)O!l?Hz03Y>t#nl)rv0JEJ0XFN^`~Ns=u^2lyHnb- zSQWeKO!2B*P0z_qamYVxFs@_k&tXmNjqg=k^RH1{9iDURG=2;qAEHvRbt-m~v0u{$ zF#SKzm_Z}==iR)yr;nVxriajUz{jmTo-Up*72{Z}it3eV8ynlI{`{OaA4~Z1k8N~M zKBd7bW^!#sgfXOGc}VdI8!NqtMx0ebK%YB^jV6&pmQ>%olb0XRN ztW=7<$o(oeM*lE5(E7^!FaW&{V;bWiA=8V?&?|k!gzW$`KL#&XHv3NOw98Umgf|V< z?i_zA@BAX!MRfE|;!XXne$D9)SXoE>LJPe}Q>SR=@i1fzqTgX zY%|#veMdC*bjRdc52N#uoS1ZrA8#xHUzV77$9{3els8t-#fS;G7J4Wr^r#Ds@jOh) zf(Y)j-+Sw<-p#EE^P`WgYtf`4gB;M5CNwU>$O)t{vW!iwkE1uXx_sws-q)$}q z%YfV~iem(REI!H%x`jJ6NF_<22@9JdgeG=qVLAoW*+ z^BztzuUrV10|h@B468EfMxT<&UwP-A23Za2On}>JMRg(`UX;T?;26`?H9cK@?p*Hu z8&5#fGjK`SkD7+(Fi59t%_GAk_qi9&YvuWw?$>|iZS6(WnL-yHXZUiJ3^PW@yj|8< z>ES)Jw{PF=Uisd2J-m2Xw-|X)P<5-XdA;+xK4ZSkm(gS;9OJfo7ztm)b`%m~`kyK1 zkd__&o5~JylT^d2EfS>TT)s@M$E@S!^%gl$%Z7~oU#5<-;i3mEf^T&D$w?|nr{kv} z{#ggjejIDa?rYO>^z2DuSvjj6w>Rg5r3YTMQ!z*r;8z*bf~lV>SJTPQ`ph~6&&QSP z*(j&S_+Wgb2cMWQ=UBwp1+u@Om?h9I=(fM&Nbmj6^Mp7OWI;m0q0g9`+SM1f3NkBZ zlAFHg#Q2OR&ieBNTSMfY9-7?WQe<@M6WUxZ%RKYe-d`(+1HA75)EDRYuftek*{gFUh0nIV;4*eDa6DF}ytBytR`~52# zUFAaz^axJ`+2^+Ii8G4wXH#GLWAe;|FU2QH$6&_5i#3rOP}BFL?nl4&X!pizx4ZxN zC%1K(v7^1hK1^j-LRnphTF+5NtK;upx!PTNV>{jpy{c7bHv8@AMDfN;d)q=(nc$`fFkc>N^$l9m(Ow513CIkgmfa)lV6J@Im?F@zGV$oTsVOo<(fc67E=z> zK5GGs47L-~dJhvf#QBSaH;dYFAik+KNMihALtgZ{HdPv)edRr%?GQd zjcvKgw>`^~ciOS_9aou}uDH|mAUCw17b;wr$ea}H53yxU8}eY(ZEwnJBAE7B90_YG zBQe{}ghd_dA1lIfM^kZrXl1AiMhv=dYy$}lW#un#x!^6KN1F|sG9-A&_s@(3Iomtt z-nt717OUJb>DGjqx0kSr&7#h^^LjvYQ#GXViMxxjNWlV+{-=qd9yVZ^BBMPh^ z*OT)#rtG`5|nyh^L-9&jdMe`Z*{ua$Qa?nzk2Kn4tgA%SPj69NlSfG(|%ix@GNZ0(F{8F@5p;h)I+{z<84fbzJ zUzA54oQh@IBAq#U)rWr!W2T&QIE!e;KZZL|u-t^)vTNP@@7?YG@OR$RfW=C&{Nx{f zx>h9w@^J;tr1-3EyKQL|fO=ZfprqFvY0}K6qGR7QlUA;><4KM@KtOiDXM#;+j-ix6 zhL~UYl3zG^^clCL+3oYRx%t14fJ zyB{?_)0afaG(HX?{Zl^ORRW(Jgx^BwHf%lz{C9rth3@Lx-|61FqNNoD_V3|e?#mm> z#l;BQHePx8O|6`~*gf;yrVeT*&u6;(_YUJ>o#$WV0hjcSs;8+4u7^08DPwwat3t847%p;Pxs?VtZK`zzR24gpZ zY*`=UDVx8>cdbITob}S9<2I4Q4QysQJn!LSyAu7QZ;zoI2pQkHHQ$3MaonHdx|W~H z8rytg+o`llHwLshed|qm#F~Rm6PwCaVbt|o= zjbWOxoF_JIDr*_rsB*}fZaK%sfnRmZpX$v{oGtuOVjEwCVMkn;MRKRnPqLFudK$v= zd{=JOn4?wxGU<8+Dg5U%HAlLl z-@TPr^6W+H&ghcM8HAQDk}p8qT%q5U-unh>IHxtrs65oql23;SfKGXfn|Lv{r{)3%(Sg;NA;S-+43XWpU^>v}L-@WwRx3x9n zh1jPTlLehjuD!4Q&KI=b`3a1u9DNPN6g4eVP=(%CrClX)e7Q|Rs{GW72#rLrZb))6 zQeFP(A0Ku1@3L*@AXe|WI60$>C*Baks?VBMlkyYDnpCeRok{lEs{VAsODLv?DU`zl&p5lsdAO?G;)JX_}sIo5i%xZ z-Z@tg+8&2{9*ff|m7{*g2NT8`Vi1ixJDSmPg*zWq#69p0q<^Wo}U# zZsl2bF0S@!y6V@oWgJg2O|LpNJ(VYKojG3BO}RO#d0N#)0{tQQSj*Tv>kykZ=nNRN z7Xe7OT~ca!h8~qN*#|5~Y&*6~cTLPex5~Q+j?tT9hu^iR1z@gBg&j#`68E?dc_Am8 z@lXCwm6dmy%rc(y%fwTRsgBEm+(}{1wsoqFshnj_!iHD%=F)RG)$Sa=b^br@-uy|E zI)vpUld7-mI*u zIl9zQY35`sg28v_z!{~aRJus`e(>Z^c?;=dL@z{xNJk`5U(Yim5kG{Sb}nmUZ|e`i zwmFqy516Cg*Za5st0%i({GV_8;~aNsi-L~(+AG$lHtA7k@9^cf^;(DfdS|nuxfShQ zTT>A9+`r%Ho_JzY0iK9i0#FdGgC~r*bw?i#V=6|N>P-m~Oikk&)Bzd8lLKf-ym4bU z9&mJ&E;|~Zu}uu0t!7`Iw}i+jRzyyp6|&)j*2e^;qGIJ>Nn;bOO2JG+d(BCiIl^K! zhJE3ux@Uj~RMKcIIfqdpNdcQ+l8P`NhH>=0aRRF6l}8D_8C) zW@V+fP{2of)F%YOQB1MMk@XRMu@;?MGbO(C2`)b>^7fIeT3Dw|NNi_-TF| z=uQG+WgN{R$aa-_6sH9p-V*=P|Io1 zlqTV$`~Z%vHo-fskK}zol|{aoYj-u_hh-}^%;^poR~}Y~_2s0;HoKqv*+;wI`Rc1$ z_1yK|G=c&Tcc1V&kPATIv$}r0yK!w(E1#zg=1YTby{1>EUAWgh{V@?mMBxWMd>gGf zL&L=~>@O;<-y^8e1j9+)Xln+Uc`LA#Z6~3Df`E4vxbeiclU3wI0Tq9|QA1xh)Lh1j zCLkV6IekW_Nh+!1QDwN&kCaK(&MZ3w$x4v9i77TfP=yp)OX+X^)Fa)?FWl%}eD11j z(E~PZ)qkW#a=jX>AoG=%-_V_cE$w$c-5t?ttmzayU-;f`_wmnbzcX88tbz$!j4e3g zhl0m&vp!9AJeUn@b~`h#5Iv!wNtZAPCBW}T)n4q)744JvxH5u+eaATC!!JA;?_@dH zBRsn)yt!T#8^d;wBWmMH_tev?`gn}`_3B3V&|_V9`ur_+Qbs!@ihil*A13F+t@rc; z#4FFb8^QyaHr`_s&x_A1*a7AOH*`BfBB1nMe+$JlB6Z=gxyhl9H2672oqgVrNe1MD zU{ln?7)L;Y2r=m|9DgQ$I*zHV~tP=}$Sm8mrIq+^M5tvrQF7IqjR{nbKx`Dy#CQJZLLCdC8N% z;wxXJseH9ud4W@(%2-C9v?(WTn(|EH(|XdU9O@~Xa)nbL1~b37FW z9e*h&^MoyuGo`CC4_i)M$%pJkdXpye%9}QUtMH_q<_DpvJZV$aQ}O$ir~D~rzP+5_ z1Oj?MP(jO=e)PP)N_VUK);He(q`(rq(DD_T+oBD`1aGJG6{_3YmldF{b{kvA#DBD7 zrT5~iJNg{-W_RkG1PBJLzuuP@1xeCq=*I$+3dtH8O+Y3`=Akl%CRoXnB>&fgfp6Z_ zmIiL?lej1bZ`RSj>IVbS9u^QlvrpT5<0f|4cfq{$SnWe9K6B~|QLQ+Io7Rb||K5N1 zRQJFCPdB@3SGDzpeYqx#kgQ1(NVaRGRzGdJ??3md_B*d@zq39xgKzZj_N}|!#TOK8 zKEqo&b|qW>W@Q~&t*vYknN8Sfx*2SRheM-%;>oQf_|A7K6tr{ApiO!4px1e2wPQ_< zIqVtP;TcMUg)JXS_w}oWr|qxmN6@y~^vuWBx`!WL=`Ovop)sXxHRo0Wit5WLw+EEMGF&0^IaBM?@|>K6WnOve zyinnkr?}*w%b{IFkF4aG(oj!)ma}ez4wWa%m39$b%1K&mpqw(2FX36A=4pF{Pi5?< zqw>~r)txdbo^t9>om0BRLpz7t1M^spc~GNT3c@Gwzn@g?IEb8V&m!7I@+#kyrs8Tj zbyWJqXPz|VNtZb^6<&FuNqnU-!#;=POBpq_m6vkWSIhJ5<%EtsusW?ZBXr$Q{LI68 zmD{cEwTssjbO_B!uLRz&4N{%*sy=0P>hyMZ<C!s&Fr7Z(I zC*~kcAqHY}A&*l7iSsG(p*LzAgFX1eqU3KJ&baU%>8@V7E8As!MTxxkfg&ZgYQ%XO z6B_ULT+^pl&D65gi>CBLl!s%|;FI)sg-v+}PD)AyV(8N!A7A;(&poBr0>0>nF4O)X zFttGRgSI=U4Lf=n>T}E54p4*xO*Xd z05=04{weC4kApD$Npl)~ z7?)C%4}jh#V*?I9DbE{MCiTSGTp1EP6Q4AD;SZat#VK_@Px1@)r#l6 z?$$c{ZSQs;`h{V;?!-pKnl|JD1nCfi!@SPui5P+A?KDiz|W&~k0KOQ2vjR#&^%>UTc*6ra7lX*nxviPQ{HJk z{g}2+`KNIBNmF?8ErKWhp!w9!IlldDfoD(PjDg8$q0B~0CZ9RQJpi>pO24e|?UCDv z9+X2;=bWBpIu7EzlgGRN z^e4`D|M%~`qTo#zT|MY^|ETtgdRqr9zRWr}IB|-vLA|4G8+tX}&WdiU>m~E2^g6gV zcJxWpV+wA#h{x(~f1MwjCtpv89#hSPNRVYI+jvKNt*>5YB~tu|k(_jgLm|QV;^8V! zq!}wR!Ool~xX5#2xo%@E-wG^eC^fZ6L$KQcN0jkg(C!TYpvOl)`7@7puf2Lrf43xq zEdt^1;Iq1rs(|ReR%Bnk@Rn9$x4Nf4a;n=<+Xy!A>NSBcKflv`{4?unZv;tj#vOo= zkv2I%RM6vg*-VwNB_K(>G&gUdK&F%Bg%@y#gTq1>Mf5dbc2=vhWrH7HY=d~~jeF{+ zyWROSYu)8bN4s;6s;#t5h0& zUCKvRg(vTRu(YAdujQ1VdDSzeOIhTJOTNr2O_q~3>&tTFkdlXdS*rBOw~#(WOE$ig z4*pHVr{0A!8Y1=1(Isw~tfZOa-w&3&Q~9-?G?_1xxs0aDtYzd;&oq^hINV7bL)SCR7Y<85FJ*sd>X|xmf6l{EqoUH>;8I8CQ{F}JDV^(_)lQt)3{C|K`b6rF|Kwxc#h16cfB)x~+JK7N z)xN&QB~oy`PE31+Po3UXuzFR&%CT;HTQ8m0vh7hGV0-a)cU+%0J${lvg2%bE$yD;K z(UXa*);{tR1e5V-qJcbQB+%q>VzD3g#&v!Gpa2*rQx;WMd4*s^@s)`5AIhWM*|WOv z6N5q9^A-iInEh%(i-1~8OH<-4c|;i&qBm$XTC|dXr2C1#{Z#iW|L{9KZRlundL{xb zg=ez+FaN(+w0+^3?y*OY8KylBdcED>tGGP*zPJbb+vpLekj|UVx^Z}x~tb99`Ps2PiikZ@$$`^YucBtZ}qkw zumtXC=9e~!_AW|&?UOg#b z!M@9)KsLrS^x8thKW!cH#~=g=oUB5J&bQll;0+%1IPjKyuUO$E3|5^$yAQ6lC!79f z+Z1 z+ayuSVP3(;PT|_2fg^lmkJv%jd07RZg}s^MMl|Mq83T`Jt)hq)T|# zD^1ExT-Ga3($CQ(J-DQ+uq;n;z^C$(W(rRk(>&=be9jK=d4AY)vK2bqSasDhJjkEI zfY)>on*C%Tuga{&%ENMK1iB9fpcY= zE5n$E%;DcJ>_N#tsEz4}_iFcH`ICOmzG)}DSDPLfpRX`+vOAE_o>4BqKdmhrZ@qTC zyQFsn$4a3FI%C~qgG|AO>YJM@4q)EBc3HvNhE|^0f2vEVBD$Bpzter-XJ%=DeoTxAk{Do4M+vKW2)cQFSNau%I0es8jtOH;uY1GpP{TOa{$d5rKqx0B z_{PWV*Mwrx{{0!6k_Z2VB5baSU`DOf>gAeVi_M!scugBR6wY8>5cmAH*gpbT75(;~ zz1pom`;4|{=ycasg-5}SIR-i9%`YCA{ z-t*j~#hW@zCDXMEjUW97tL);39jmw)C?g_=B<>k7M);gCXdV^Cddsj*GYP|kyt0Gf z82@3up!sfX0({Q%6(I@uQe&o@y1ngFYes{FmNN(Tc`3DY- zVK+JTgi$9=UssDf@0E@^6>KC7@SyT7sl*(ZG+D(p@_CfDLTbOlds5BAo2l`zHC zwoQ2|ZN*VezJzCvuF9A7w4u@^Ec28}J?Sz}nu<$$XtG@Ck~VRfCr$DJPd!;CpVCj| zQBRuW&pdTZ^Ta1##V4-9Y8m_--#`=NUY2`4iS1U)$J2f~k-dm!5qz*`T&nc7T&ZZ^ zA{=!4;ZxQkI%Ln`=3r?DxMgW7b1G|EPg%)FJ#|qY1Vck5Q3^tvcAcp3Cu z*cB8IEU}mN=8fxG5xk`jkgO}P;Uc@z-F#PHwR?HDd*Z_pWQQiP_eMz$kxHHqk{e06 zymt+L!?PJIYmkb0XJ z;3tOImgGTAMrwiqzIki6``W*Jxm(f0in;QLWRt?|6fMF-w1p3Z+*7|@xwNB|{~%gh zU+F&jsWrbB*=@7V;IoKccN3Xe+LrYN`5YM!&bg0mLhC>2nWIH46cGwG+FsYtzUUW% zI<3sAZQHlBlByv3`a3ireo|NgL7muc-(HDV-l>XIhq3Yzb~(YOlr|*>N}mS}4~>gr z3v0~9!6}9iQbR5)mB6@z;UootL4#5g7MUPDP-l|HrKuZDthBTGOAC0zh%8fJihVx9 zZrV_V@TenHWj9W55>eytYo(sLxBUps_Lf-57OCpAnnDEj!b>LsG<}Iqg5`Ml5jbzk zVY~s0X9bQOTknn?U(=lu%^Pky?|jiG?=4qH#Lp&dY6#lmiVHM>|AsZ|s0{SrntT_N zp_dWR@iCM^5`9;()@A@yQR8TDI}dx{ZGAxAH^8htXWF(GpL8KPYN0Je(a%vIC`H@! zWIl(huq;=4%C>!CbA@Gj5iN47%w>8KS9y{*$)}Gi5Y>>=MV7$tt&93P;pwB@$un)m3Ps*8%?T2% zwm>0){@{k?yP-txikD6Oa z%$0n|2y#{<5tXFq*_#HKOx1RUstR1<5u~!Kk}C{*g}3#?f>m__XyXXx;F2zAwr>HZ zuar^*a!5@aHfCQQVGD(kafEVIqD`<05o5r_K~thv%WO8J1Y07+UOz~p{v(q!E^P_P zSRk~Oau5wNfVpNv%;jnxf=xBq8-z)1UZUENDGHU-CZF*#1a|Bp5peUqZQi7lFz9pZ z2p>U0kKZn$Ao}b%?XiFQe0S!oUZtZqo$xT^-Fr7h!M2ZR-`ustFh6{W70~fE6s?r2 zPwvULw-xm7++Fh+S@qcB!As6xbgL{YXp{TjTTE0T$5yow8=c4??5ip;CPas;_;CV` z8jA*$+-~uJhXQ^c6r~6*$6L(FCNh7N#AgBYMMwxMg@soO6mm^V0wf7&RWnuDHebkq z6`~|oz6=MMZFW26`HLvi*7~%Zx~4gFIku+dDx=a=IAvsJT1E#=ZBz2?$CI)Tlcyed zQcvYiKNw#+l$OLaF|?`jEh<-ei)i+PFOsngj?9Dl4>{8h=qrGv^(x+adweAcHDo7`~$UE9z))AkL$&h8yuNFP@4rrWo= z=X_5W*_WU1x=-rE91c*;uZtTOZ=}flm$5r z%2dk@W?7W8EvTdl%qCI_TRq+yE}9*1NHal0s^?7L2wt#T?Gzhj#_Vk^X71|YM^-_- zT}Zm(VK;F}*6y8<*TjZlaph7OK_R^%VA5X?OyoFUgkHdEunyBRr59>DgoThak`Fup z5pt@GEI@Rq0a~3uixqZ z;)VO&YuB{ZNc;3x^syrD67f9mrVe~ra#KHcH#b+EZ>YX5+`7g-`@GlKYNEe21=p+k zQRUYLs=Pu#?Vy^wWMWX6-REwM3G=sFEG>4y)KXA|`G42vJCnIq+Prl5-)pC^+WyVnSTWF;J zTE916RQa`x+$t||sV{L!mpOE`JXcRyb9t2uKI;deO})uOeUXf$t#XrZjwk7%Nx7AG z4p#Bga>`5iw4QLy5_{7kBcRMiEE2d^+WwF#vr?xWiLbc73grjYuJqIU@kz?ae2_H^ z@*ITFaDmy}xZnMqfACEAi~q}ayKC3j@63u$P6bQ`F(Dk)N|RzZy#xAf1#Tz1$MpGa z)9UiXEq31T{E<)TgA=@ax6@lXGI)7G0+MX0H#qh9n3l<|Ug7~n z641az#1Swa3|w2&tMyu9MeVfqH+zLDnh_Z3ZQO1hC>hnbBFkFlBV=D8xaceWd1WAN zJEHxme4IiT{O7*&PWRjY^!p0rWnbZeJK8@0xvtYcYx*T}?)=H_JV7&0JgPls&mZZ& z@a0W!XW+uz`(ZF+n0(6Qmi=DH?1PjrXon)bLun}EYl|EcSCI!U4}Oy{sX!0yxpVtS zd(SmN4na~A*?e+WtCV5iV1z9+J}|fi7!+Y|$ZnEK)?_5HI0i}t7E;TCkiKzdeDp!9 z&tyr99a-+ldcA^#Xy*o}Y37@ZvqIj>qCo#-;}em#g0?K|NMPoHGmFL;w)Fk1!MJ)< z?{MF}+kNNTuXkU1_9=ZPUC%Wf+0t(~He&OZ8v;QR@~}Qrc!$+v-r)1E{^CaW2j94^ zS6gbENw=feme*jiavfZL@L2lz$sPN*`5~;}m{n+9{~7_0Ni8UZI^z6Y#A0ma!QTk`Y&eIZ8{`FF@b|>6 zFxDw;;v3}9zK{iZmMebBpEOfFm8aqoo_ZJYFX927ylG3yobrHQMw9f(pK=pUz6h2y z$;$$Ub&)5s@nN*c&?H~xbMy&=Cgmo5=9LB*c2wGmuQn$CvOHx?Wz>3>tL)^f z_M{%@6JN`T&%DabGPvYPx@lf%69;TIa89F>u;yuGg&nFarE2LOmN}hCHHX_HU;*HL z+q*#SUq$fTAaNh)kO496ru%pk2EIN2$VT@QKmB<3d%yLHf?%z~=vojwoN(}I%}50< zUc34*$4f8rinJ$U%Z6mJigWGqz3#F;N_hS;odg}hVchPGeVig7uP%rc5bP_FZ#vh0nKVRXndR(lN=g0+j{?huO+u3bg?R zf0$yE#RHbwbNbrFTiQ$hynj?E48ZbUn8&v5#Z36X$%6_2&YV4^K>4J$pTr8kPC$A>8d%P_T>`;YX&2e)E@G3Uf;n zHwxNtNuvIVma5McnC_}?UViCv_wkQE>g_^Dk8CPLj32Y^r`De>lRT`@JRl>!@t51( zAAD0^{bX{{a6al!q(edg+pJa4jA~@<;%D7ux}&EgJ$CT6klF=rdRC2P64Y&LmgEo= zBTqQyiBaI z_xymME*kyu8c3`aSoX+PEf~-W2go|d-6T6W=N-*d9%ErMc2ASZ4G(|p#lw@h%tKE2 zXw2ccC4`O1~h9qoJ4sA82p1zBCdiX?d4?jTRAT~~IJAl{^grwS0 z%X7-+@W38qs{J*ZYg^}8pDt)r*;Dw%XU}Q7#=G5Le(Q?A%I2A$u@nC_U+V&(iyj|> zxOC|qZJXHap7}UiHbkHc!d(T=uU)vO*VAbw=(yUV_6*l(^u$Uw7jhDM7q=Lh1D4A- zdtdpB2dxHPf(h`X;fsN;`ebAObw1b?(BX&;HH91`MnY& zJKb;p`UQV#o4ui?1jou0b2>>t2RM|)%NOfTozhD4!>9G}8AZt=;MVnL|C5#OJLvWxVF3=RH9O}YYy&2rVK%4{P zzdlgGHG%i4=1vFmGM*qC9R$&Gg>9BM+~QU>&ImzCv=zI>rZzw|B+ui06*(18eQ!cu zBxvzh)C>(!!w=csaf~e~XP_IGLU5(qmt`zKw#8TioaHKSLf+E=hc0-6D*1qsQeI%^ zk4XbBt;PP#&Ec_(ypTCBWq_u<%m>VaDYB<)+cKZx z7u8eVe(hf#IJbd!ux_0TR;Dl-tT-%AE4OwNymiF zl+C+exxg{eb8+Lrq8FckTPva`yN4cO%SK%Ac6RP|ue@-i`xqbQ@XD_)W(x3h8F6mS zp+3|$U(lX~44dC-Vky}$dSwjB?0_C6oPO05}Rk zgN?BoTzqC+&}6&sxec<}ZAU5>Dsq*YqO2#ZdB}l_VhWFr=zl2KlB0djx3$mpn0^>Q zfL%=(qP=5<0)MSfO-40!!lF6}??7v>ivTv)O_NjrhjN@>z|2iSEG?)LnN>sY!oX)4 zebq{X#{8<=U$Oxjc&pz8uy2w~=$fK!z3_@^WdsJEQl@1bNfX8ll;~>K1W6Ofd45mh z`t7%G=!czNgUE!SZ5-N_uBM0s|B?%m{IGuQPp)=1c^eD^S$)QqBW#T^AJhUwf2)dq z##APcQ}FsnqMrC9I}*#!K_fC21BN8MN*-s*)0g)Nxb^RjB6?uVFZcEEB6ruE$R*3? ztID2xsO!%kxX}+`$_~C2HM2jPZ6oW7ocXdOtK6}wy(&1k47jz*Ew^nzBmQ+(!ySO% z?~bkMtF5cLv&Krj{LSkT?@38q4!gH2)#?Ps2cr%zE%dgvk{oD_ZRVBZFW{J?R94%^Duv3zVORJ8)vq&~JO z@V#HKfwrY-;vaB(Wu@#&la}-*A&WaFI+6K@UwT{;Px5SIf~PhEuey_NzkE&(JZb+T zJT$3~@?5ICNjrsQecHY%51Kjtq)8cB&phGDpY_C(Cv2)e;m}NR30odGH%5pLlG4be ztxB`;i91w28SL;@+55?=j;OR1KDDpnD$fT|K5$!#AKt5d2jQFgqHlB2pD<2(WS%^| z+WqX`f2#Y{|LyzQ8X=$%T%cInj7c^6jN6plQvcp}Uel|pp6-sbZx?b+{@1P??cR9x ze)rgiVzPF?;cXjbqOrV?CB-N=zYmntMwJ7jRqnU=(wfcZBGGD~hFZOV zhozyPa*mi@L2F3go)`+2ReF9;*F&$-7Ds0hupk$B_)>a<1hA4S*lKgYB|iY*Mg))P zqwaoaD1%2b4Y8PPewPTaV;(>?bZ74>5aR6}R9!&1?2K5U^7`fP{FncD{8qt8v&A4W71 z+P9;p0G5^3Mh!8oG0Q)Jh}gSqr9zHakWD~s9nv4N3Hh|GjbF$5F?;*AYC6ZmPpuKC zQxQM(V^(eD!B>6&@*p!`kUXugus-qVsqT@7PIRY^9qo><-|e2zJIps%Zrf%(Inv!b zs`(>c0PaRaTf%MtygD@ObsB$UHCG59(Dl@T1tyk@Why| zRQ8h;gv}Kba)Hm`lG3{SfjYJ&btY}*2{&&7m8UI$lBS?X#iY%=BCEZrBl9U;!ZWY< zEKhOMI&>9Ad1_aMWjW;^B(H6%^rH2Vh5Q|4v$6ugY)LE)A)@ImQ# z5ONM;=Rs(e*>YI^MT2If!}AOWO+HMLZ4w&MsKO*=oaV=8K6EG$j*Cp? zlf;CCK{+uA%_`*UFY5_@q_-0kuDswOgaDnu*?0_8I(D2_XHniO(@i!NSgh`)HRyhs zBvc_Hr^>@Y1kT_7v&-Ed{hzPPrZvG?JtFWO7^0b~tAJ)MUOf18{-HDeoH;Qzd$yl` zW<^_K^b3FoF7Hd00%z={Re2$EMRlj@I~$Y;RE1x9NTuz8GleF{+_+L#)RCw#X2cld zGeD!UU2O^{*exDYvgfYG(A~SM-BTZxSNJ5n$F1a`S7T{&GuodkWmPGnokVL73E&Th zBwSDj3w?bkG*bqEo!Uzr;H|CprTvFru`NlYSvE&Kfdc_{jYpbj)UFXsAlu*CKFCfT zb9v|*`XzJ%Z6>Q^!Ol{`FeZZEa4F1M@(yj};Z40~`}*x}?Tj8;y{E8{jlMkx5@^3* zVtQVmQop6`H{RQCvKFwoZIf|i<*-4xD21+`2Imy|Qj~yfMac5pK(UFBKBOvu{=3x{ z7X;KPx#oEUdn1SpiJEoVT+?YjJgSEsL#E%?5k7z)f2zIlj7bEscXqYXtv3?g*Y>k( z`l0yRYuCC*A3NQBUT-eBds^rDyIbAkdLz*0TD&uxc_nrBZI@-kxfSzH{0Lg}2XwCA z)JKJ`9O-Ue7u{|wZg6LDRS#D31|&Yf#1C_}-?0MD`{LQAf(-BBmNV@e`w?FfBsl4c4^>S{;QWS%rNPh9e){7OR^x<$At?SA#CoU~!emvo2K zy@)>bWRAWpr>?YtdQB-4*aHoms}jm-&=kI>9%s@X@Ph=@Mn6coKS){Aj(sn>4=RHx z$HQhkb~wiyLb+!r5qL6Vf8UYrumAW%-Amf<{Qd7<>&;_|a6*OU{;c3?&kEVz~5{z9Q##v`NY3 z#>=FoAd`pa@^&gbr_R_|B&6y-Z|ugzq~4?UFG%>jM1``#z4qG8?zg}CvTywkPuQo} z(A@K@O3=KnE7=)s&p31DxZ1&%1UbIkH=mls<_IP3Wz5-eq}9Sa`O!?sgA9s(?&>@jG#K z!S#sK7~0TyVmpg|OSzrMbaKuCsST)NZqXTNT(X{?H$bCf_&0?MH5O|?yY7$p<}0PQ zMmFI~aY&8ZSL}{rAoDo*6Ct#%$Q>z$BhpYxNNq}DIY)u(P-Ggb1G(p3Q%I-CZr-@v zojj#|&U(uU!55A2d8|^jMQe^CeBmN*mVtnM&`7dWJp}rHvJG%J-=iTm4|DWyd zUZ%x?*{#xQ5`Ff?|0wOb|Tim(s&h2h?6XSgv?wf3lJ>VU^}E_wkBbeVFj zwV{z<=Q&;~a2h)#f6`3x2_NT8KMhs~gzmwn)SG$oMmw^t0o{)#?U~ZSJqJr&%Wz4P z`P8oT$()QSEi&flQZ{+z$?|?YmH)kz4u;Mse4?HaGW(gmK2uqIssBKwW; zEMgwKMpTB5OplxjU=GLuy-EaKz z3*Ec#-nLErXyCPfM~>oOG&AA0)>)J;V!9&nFyKSy&*+DS=ylKdm{yxU_w1G)4%CIn z^wSvO0AbGSf7$S0h4<4XGpYE4tr7nIr>lS5AA~u;hJn)6*ACNvGfxCl4CY( zkI%t>?6cZpqYF2;$MH4&D@yBL)vDNrJ_CRA2A`q^jm~DD%k%oI8#2?5Y0{QxUErF; z-sEa4z!*~&*wYNPp=8>ouGeF9(8N%HfO_F8#Q+CQtk$*OuQa_CA;o$*K*K@jkR_-X z=EMN|XCkn@s&YC49Yr~k+X zdw@>mjbWlSj~i!He<-LJQ$dsC47r{+8bgfRsKgIK3l0E9{b^Wf*dT{LfHgIGgJYD^ z(y%X9USj|^e$fR-_`;u?SKC))Nc`x2$FHpX!ewf9b96l?yjzw_aO`nj<^HHC_qf2RQt|0<3wt^$oru=v~o%Z9aU(tN+%u zAi-OH2%b0ekn;M5R>JkrGH)Xx=w=mrBLndb=(K{Zb97Y?fAZzlb-nVG{ohBmmCpgZ za)0DT!L~mcFB|P%*~G$!jdNq8?B8f*@i zQpmxNQk!dHyGzInk}+MC8}^N)?5WJr%)$0B&H$?JM9;yd^0PiiTjfwr+Jw*L&{SEq zJV#$)NjsGRP2#dlzQ~^BhlT`irU;x>qZ*ErO@)DkEXb^8Mr`FLX66<6DNSGALs&H`Y8cYC??^1DSH~ zZ}j6Y2c%GI0@lBC`byZ({evgFfBMTWDR9*-MgPiXpB?tH-!H}*w`p%&zopgbx4O@L z@lmY^@}mR_eQEG@eO2((O83xXJmpURfx`w7V3LHOtFD{@CZUBgf7`e3>J!Xt_duy2 z{BQcH@D(yvyZt~A2qt7vpFY!WE#irCVMb<1b|LI)mym>DZO{e8M+wbwfPCY90Kxxz z-EV#M#qRZ2Z|Gv7}lO4_~MVoiXHpbF%8>- zMY#&6PVIKLZtClGy7I?Mz(hlUU{8p^Ow^TNOaqO_|ATGxCn`wD4%UDSn7~75UhGjB zDvxPzMG6|LGd%D>_SAqEYnUQy1|%`xud=4_w<~@u6kfqxBY#lnJ)ZGYYF6TXj@0L zrPpd1*)GesTzNa&T6IHR>x^s7wg@eI1BxDNz=tX_nXZ>rx^k#SQ!d(Tj@z6H&iB3RIKeih_^vEd04Yq}? ze9(#};I2Ejce~rScqp6RjPn?AkwOLn{Co-XudW5)c?BS^{#(=AN|*=N6ku;|=`AI! zYV+zp9+Kn(Hw57dx>+pXy1|Mze{3aL(L<8F5^zP63}u4$BW#C}Z3N))@FzZ?Jy=AU zLiP_ZYBI=;1V5zcOHla10!HP5QZfqOdaP1&6F&wmdoO>=uX5f`*?nhSgOssd1AI<* zwv&8{pUO^rg(ocYLEGNaLHUx7oP_LzrJlqkd=VY+q)lF6q^wsNz$0)jXMqXYY7GRW z6dJYtj|=HrBddgkw3TQn zyoS{1VKR>a3T%WAfJuF5C+Mesz7cG9{w&`)bLF3^^&vqNSu*53Nk?!+9y4IaOCpz^RD>AvF zidC!@g}z?woA=x}l9iKRbQ(Vz*x_PD+fwu8p6TPL)QLO$hvzIU}plP@C@ z9HDP83u66|5s-*4_ko68tZv0#bbjE#?_QIuPMo^m{bxVP>J>i>2*of>f+p$sN!=B> zrXOrKpkZ9o3(!Tt9pu!D{jDE*dDtjC7LtOJQsYx*oDi}Sgi+as01P-wX{-Y@sl@7b z^aMs2Bs-9w&8)h@kHs>vw`L52U7{U5XAs$-&>J>W4pc9((WEeJ4Ea4ppc-S~!)N!@ z|K48XpeiMhONQhUU+t)Cc6L!Rkq22zwryfZ*gHPgvUPp`rGN!_(1l*)C13Sl!cqjd z?1zMr&%r7_XtEKdt+R}!T}?=}&%V;${_lVPlD2Jabz5t@-P^a;yT?zk)g@$y-SkzA z8@EYL|HJ?5?e49MY-i#wU*KbVS17VyWu~9GK$^v2ciNS3YP9y^8>Je#<>?!wys=r71Y4d|JV@}S<4-;(J+uhxO^ zUX<_8zrYJ~{l<0+y_=P<0RD}yJlehX$}N5P;Ht(HpRmJfaTl}=YS2U`Cn63wD^Cfwz z=~_bXPHT;*Dw@b>DTvk*HTbXn*qv_s=3PA;7GL+<)oS9do>}A#EPU~AN4t7>2eWqX z^fdZjjvn^Yc&+BEMcxr7mTWu&CJcFVNGwx%J6Xv1xl# zv)2j;+^WG4>;Tr=(@;fLnpQyef1YcDFj2^F*gV(`cHp!u4kBRHnMYqM9_q9)d_#W@ za&4_oo}kS6KQy2_thH_QLsWx!5bZkS93AFCg>4x~YiZopsZjBXzp1Wly*(8e=7+@r zN1pz&U9?yA)Pj9`d8$NM)eimhHbPBvSFYaFHka*ggB?Q6ENRM28#0Humgkt^ouf&aDTBNZoU0(1Go`SsA0)5J2T6-L z8CD0OTP)!N#1vmlwN!^radY)U`*J*k&53-Vu-b1U!3#Ra{Wi7Hv~j^ZeqyEj>A&}Q z_fP)OP2C=6wV4T6Tq%zUqy<7ZZrt=+Hjbaz?2c=@!F}CV>LVfqzOPT~FtM|Px#6brq57)(~w&UW%#LsFo3%=sgA#Akn zCY~&`#<74G7L&4F7cGEQjaT%>kKfi74c@VQguUXOqCtXQH0Xal7j1*uwxNje-1$@S zfR*gbH)woBd&2o~;y09daz<;@Y*~G6p`Cr-B*o15w|j-RfA%m0BQ#D{e?-CUz3$XW zE`qG!B&x0F)2A92#IE-oDC;Yea>e#GUz=2*y+fcyu0UIX01K7JkKgZ3C}2LRm1bS< zcG ziByh;R3Es2B&?7|5d5`7HwULS$3S{hPYXH3;V+h|HCXZ-} zd29p`G};~(M6FpPLJUPyIt3|_LLa!r37TmdpSKkoItCuuh3;Mk1!#J}-ZocpP&Q$n zm-4||KQ~h9w34@a>3(V42W=Q1HH%bi1woxF5dnQTH}pvXuby} z86lkcopetRQ1T`c{an`kVWpP&)|>BiufB4u`|Rf)?Vfz{gvRBj=i*c6^jUX*oF@&n z7>o5aEUG@{wI%zZp!yBf**3heKI-`-=9I%cMqz)RVLP@ z+-iHm=B!_an+=?+kg4*-^~yaWVn`$oh5VVfoz#iTyvGYLr=#MN$N7xDW%z}1Mob?- zWj{Nc%BA)#@ZBP7`#xDjun!ztYU+E{aMkp`+B@pEWc8%SXY2R z|Ic1kKo?`edQz5GUA0=Wx}z-{SM^@ft?uKW(3TA*OZ{97p4d24SgB#tH1a?zt?(AtH{1{&4YmLqL%)RTs#uaM-tASJFTy; zo$$Ut9PsetN4w8_UbiXbZ*7F|`4m1@yDc^p~ zP@rJ&)xeXyCeb0ZaCA$|!z%nYCgj2|yn-4^s~QlSe&&IdL~9dz5@^(MRBRD@ z*r(incYjH4-!AzfWj5=m*0v%eQ_=>_ywB|NG*g83i%8w-21njtLoZ!0bVC%fQy&z_ zi8?k9__5zbC?TnaJ5F!wLsTDmT62NsvFmrVvZ^fzDJ$sm8r^k&@lEDPW*zPpwg5X+ z4UV)is`9*r9i=ulh1lS($|ftWN4aC>FdP0J*iee&tGm|QjBxjlX`PJWF5+iA9IdtQ#+D3sjAF`Ez7Ik zTAuRE>7CNldeYC?Gi`tBo#u1$DlE&Xi~3Ywt*73ktNQj0oKx8}?-ej2W>T*-M(~KD z$Bm%l%0ikMebo^fQF+#k1-{4HNcLf5%-A{Cra9OmHPH5cpCOv!D*Y5U!`_1iTBaH< zJwqR-z0kPq$$$eiLmm?r_50nkUq0Wx_`Mt5cmMKj2T=O$?}>v80rH{Y10F~86!nD{ z-_{#8j&+Ycs@GL%*_V}A9^`uI`**reexB7~>7k2t@Z2k0Y{QTkrLb2rH6d~<@9I_E zzGLz;n%uDlSytADY*MHdU%ysn36&YV7@+lwdtG&-bb&mZZ& z^z4S|?loW7Ci}M8B|5`w(ad29GVR{OH94>CE^PL=$ktOAkZ~(}j(Vksi(>f3R`ul= zYjjlXjpPm9^oQ!rhe1lc_6K}TN7Ie=UTouq1btrFYIG-0+}FzW9j|EL-Hq4O?cUKX zV0F32oyH#9LwFZBdvf_7x2HVWvdNjKr$(4s60YrPKn4Hh&f?sJLl z_q_*|kF>)-e{C=H+%jL6&+l|=r`qb_===&co*q4SY^{6#i1zR(_(NB`nr<*4_VBm% z6k9AodU_xET%a}3MsAz!>-$bLrU|JbP#BZtAQBrK4XK%=O_Z=5nL~LsK$>UbcAp5LCA}`ED=uYCX|kTQnFD{%fpZGo zH}{AgQ&j^J18kWSl5Q^F!#AUIjt^LMS}i9o`QDFxDPvJS)w_sp&TorofghGWWmSJ# z#y~DyP!{$UsIij-MCPoqD}B& zUT-H=9)LN0=9IP=9QTvvH0Ol&H-G+FJ$cTLiMR|z6Oq**@Mf|dI?$A~$vBAjOZHt8 z4DAAc3}dHRVA*Jk^V?$f=xVgB#kQ~6*{doBk5uo0TVc~sgcuUI;(li z`t^dpMut(7ydh;n2FTvhK$GLN$^cgDp>N>XD{n8vCW!@~4CWytYz-aY1{hN8!@k$z zTuC@`27BQh@X&;wke~LmErD%KyLtuVwtftnPKS^KKYT~^RwLb`(zx}O2KPU9F%L0b zDUrffq!_D1NDQn=8ZOFXc;d;Uo#X?}P!4f=|gj`Rn}MY7_< zP)B{|@4MfAyE}Q}F;TB}*Dh^#XCB@TiQEbF+oLQ=hcL4CMb%}h*2d5~ZWnl2WSbx> z?l5L5%hXdMQy%UxC--m?20zlSW%APBm8O=_JFTZJ6<+mJnYEmJnO9tv6IW#?Ec2vY zMg#49;2dXa$g1U({sH7^@PigTIj{DJuMVnZ2y6YI_CZ(q_9&bI9F~4Y!~+5Cv3<#j zTK(EjV9X`_Riwrb6ZDxgYu#6V?uqW7{~s@RH@BIL_~jmE!_Q^h>vwwl#kF^DbuT>s zR`)5rk91uht>Ck?_Y^!_dXo>6tSESl2?xLBq}MyZGP%fVwfTx(hot>|+?pGi1&wXh z`xo@Yb=twzoLieGPPU26Otmr5G{E~MY%`}0ZXb0i;&bDF`X`sVZ+-oeY>=JJ7HCLV zn&s5NJEl(@KjD4Nn|e<+!8Z@aefCSM+S*daTZoVl{F2;z^MlyGmrDW{$OPQWlx_&) zm{Mp_z!H)<r00Y#D#Dmt~~gg9AajTlL+Di zca%E8Gj{Mtl5J7UaVLK90uSVb7;vA$|IpRBD49_^H9e*E-1}P6%^&W!5nCks7g3YIfUC4Du$T>3mX}G_w~N=JKZsD zciGmn0abt6M;oudtC6H%e@C_Is*x%}1V*2Y8{)G=tZjTvd%)_$hAApq)^L+%+83#g zhS+xrm=GVL8rJU5;PI|;={t-ff9=(GyH}q$(LM9b89f7Wv^#ZPKdyNrQ#+Y#qWl(p zcrf{uwjWn&A|45O`^$L{6mDw~6l3LZzOFKAyNcMpSZRCUAqxB1*ZZ=`oYJTM%v1Fu zI`EaYmMbsi*}&Ojcc9GD2byE#ejV|i42W3;kK_z$BO_9&p^5Ej9zp&xANI=7?K7kby@S{tf z=!>Cg9YhC7G_26tv)DY-?=)|qO(uZ*CAG2z6*FQd%qe#QM)4w)Nh`9JQx1`GYd{Ma z!gA!Ql+EZ_1uzOe6}fZL^9!K&#Ky0d#&4>OU=c!DBnp1rztjo9C6?7`R{tDitGuh% zv3W(C2PO@VbHLk2c+-ez_;X;*k4uSSyH3az+~ZHOxrOd_xEI}~*k_Jj2^L-SLpIhX z+9vu1D3(uC_SqP(ngO(59I}ZqLtwJ5)n;BB#@Ifuhcn|gct{PKZHm}MEE#7{X#c*x zf_LGT&SAz$`-fOke~v`e?T&e_0hVTi6v$|Q!!mIqArZJ_gTeog&ON`!K!ml+a+cdx$mZui6!r=;zkR_%{=TgP}0HWv}Bi}@n_KQ<`+Ui&6tNg4iVK1g6|bzglz z1$1X$9!6yHy%*kPdQxtsNf>#isoq+rJf*Gmsf?7L@MZaaw23F54V*#zpi}CcI_CY@ zk+L#hCM%sXqERRLL_UN+4x($M;{jLDpQ*N~!V^Eur!<2kd%@(>Q$A!c`Nf2%Q#GbW z14OO&^~OahF4F(y&pxW)`FeNZd3};w=&+IL3LTn2xgE{qt$)vb?^WG4d&U=8h=FbE z>v|WSyVHH*bL*ifJLT)lF;`|2+}ryz)EA_Fhl(CqH>XbBe(KBs-|+*!Rn zLLVjIsdw=`^Qn#Q=?`Ql5NZe88c(tK|tB?Mr--%Q^3`WMmO4pV_$sL zcVqQxrTb-~a7;9+Ez+|HK4He;1*F#S<4!G0XyhCdqGAkix82a*X7(u-%HBC@aYICq z3XuQ=$xSY1SZfH>J7+}$nGcK)jT0r)!%CT4;QM;JfP-e037B~cNd(l|t4*DEB@=ji zB^%m?SN*P{aO&PZqPLLv{kZfIe+1%aWL&ji$42xptjGinw5s=D8ikQaQ4%ulkPv{Z z`!!%&+M>aOzHYrTkvdr?0SxR4_Bju~{K=!;t8Z(|f!aqJpH~AcO2aYz8nQz{o@bS+ z7(Rei+Dkh^@4?##g(Ug$4=Ho}G588N*apKTPV!v~SgEG0sv}`R-;T(V4AW_zVBByA zWZnC8L8Sun=x1IJ$yT*6OJhz3A=_g?^7z5inlMsj`lgTeBv0leVl-Pd4DsjD(f9%v zHieDQjFa^N+KU!b4E%9a$tl)1*QyEje1Lb_F_CqZvb;{5BLGzR~&FA=& zCUMmtl&AHi%Y0Frr|@Y#`7_V9roPOH*MM4BN}gF83Kw+y(G>DM#e?#Dl1hJ+ z^VzzqPkNd*-KlOJXzuBnUs(zBq|tT%^?(0#_rLy+-|cSQx})|$tN&VcNVb^3cxBa& zKFR%t-qriGKIN^IRz8iT3+vS@_w;^fy`S`H0_~P0sELwl$L;~>iRuK*`SF8p3ekpNk)Am(oOtH1nxO~?`a zy5aape>RVn;6HL8vSp2+`MlnsasHTAnY9;Mf%6kjuXUgNjDB3$-a%>D8Am_C1`7Eh zd?2i)Ww=VGvL!cLZJy1I{%)rC9L+Y79b{|2t8BQ->a_}9 zJx5}FqK#)p+B_^hw8WgCvdOSD-!`|ZjwCX8=}Q=} zM{~Z)F}E5eTnNIp^fXJjWBD=~$|5 zE-a(sln3M-U-ePFvpz?g3KBP`BWcLz@KYG|v|)-%J(*Y86}E^!ZK$-svYvcNN1iEh zX;bom2ll=L=TwjiWmR9SPBF0IraWm}&)5o6x+#8-`~&h;8>ccq@H#U<@e%sw+CSH} zIl9BxdrHEZ`BEbK>hV2 zNEiLfm#=p(U%1>o^YL@~Pze*1=(Q?#@xqQ)hc~pH1M0?z!lviW&V9eJ!Zv}R6?2gr&biR-0r~-kv<{$u|Iao>UO>FMjhv_xejWWS#;8B}{^{ zX)z?qOy2Qs=`&|fYd`mC1^4XJ?z;1ju6CdPd~DZn#3imwp|#cO_!<->>5wnO4ODwe z;}**RCU+#RO1ZJhC;U&H#B)KZYr6& zeOcrre{W|Cc6w=j0pF)Y&ZlW#1TNr|70|HaC0&f>(8EQZ2UV$4*7hF-)EK4HO+hqU zLG%MqW0^qOtJ_|^CYaWrU=F}rz}>$trvlX;aRndVPCH~#h z*mM8`sS+^wpd(pC6c2RFIdy*S3lWx|3KmKY0_61V?`a3-#P_I?@mL=%U?2I80>GQw zce>;9|K1dI1c!X?nz;`rST`~xH%{Y#C%-L;0b>yAM=9$f9Qy>oh;mff+pa+OcWW9o z{grZ3e)48tj0Hm;K27+vUg4A(|LsVHx-HZxN5i7u_BJx{ z(3+RL{Q+B)X4@m;Mes#(rttI^@GO(3oy+9cHYWX4{(Gq}(>W(E>C^r>nuH}S^+n~R zJ!rl(a6YI}iyXKJeo*6npmgnQz_GJ-4`<7P%v$KXJu}U`xLF(KNf$!vAX!MX2ys9= zV=7rBMDQXfe8R+KqU{>`^_!R1QIN^DMWntbBFCz_WpyrnlvZ27a?G3jX~ zW&3V-;d^(wGk-n4wr2*Wqt;KiZteQvn1D+>L5HsdK%_??9=hX`tW*@M+SF60bt%s4 z46>9&afqbLxB?I+TKIS{?CbyU8{MD$(OU}A4{DM;B*JH@jdR0#KUV34`akAjY$a$d9udi zCwO9uPqUD7K>xC93{B&`nn;KWpwYCPI|=O1_QQ^>R#R6{&H22hS4^(xtuHHD^+bQp zNqMf<`8LY50XqFDXNCF6hmUkW^W_uW?|kEW_m1AKv%4ElyYUdA-QlM6VU10U{oUBh zm`ho#Vn@5t?_UJMnZTKFVI)wcFOzqXJ4n952cAfjHce>NZ^(66Z;^RTwb;GCp1mQKIA*E>#t^kD-M{=+=kx3f$ z^z(@PkQI-`)?Wi_`Ul9MJN!{_4b`{D{wjF56DIJ;zv8`lgXd!y$7(h|GMT%Z%)yp} zmwZk&a$6w);&KK4;F1ncndcmweVdz@#ASYX>A^9T3p9QDUS-3Vax-6M-<-aK@Xg7b zv$@Kj4V+Wi9P?C33Y)^S(Np-cdg@ywa~bdZ#xX%HOZ(v`yxEonr%$t<`677YYyKc< zr?U3cby{Zzmqsh@wUd;Qhz?oGX>E>>tIznv_> z$Af+PJo4?`?)m55>OQghq=HY~D%M5WZ`07;R0-6~7V@gK*R-FQl`vpw1!OMkfNiUK zFII1&3T)9JKqZaZAe`-L?Ki>90ZvbcZB>h=Uii-S?stCkWp<9pW^OfyZ1hk4!`?Sm z!B3n#);;vdX}xtoUjpPJsE-$X;o0K~ro~KPg6=RAY1C95E>-CbBg&y?DX;?bs>EpqMY3R^{ee{t> zAVk1*PV0A~e}KA=z@sSe0TM*3cCnw6IUrWHEnsqp@en|Mzs4P!qus}z+U@?Etz+G{ zp1;@WRqx%6?GMXsUZL8kNqk-kK^b8{6HdK=bdh@25fEph+cYc^yrYjsm%;&BQm7w4@{ioZ@#|W{mL(V zTNiN!3f^bvtGQQ%XrEKdDD#lc)-kQD>&t*!`uM{Ut#Yg=0{FtSTY5l}l|05Kw%2Z$ zA6p@Oe4Mwr(6sZvhqOBFPrnVgzSMje_39%-5`YW?vcavE)g~@hU@EYd8__7+4?iM) zV3n~?(z6R}X=CoaD)+hDk8^ZLU+t?}1_ud0K2V6?eXE&DjGy4AuG9iy_ph5Fu4b=) zq9I41I0rZFqq!O=Ig$VkT#*_N&)Hr(3SIw>OJ? zzQ}?$;TDaiqKYD4Lfz078QxL;$qxCNDJH3 zMgf#kVYVxv<73do*coFERGSeUHi)~ar)9D@>Z49d`%;ssL>*(f2xz{8z9>}nj<&hn z))?E=>kzq<0%1rICFj&cMcP*TF8v+2F#~6k(^OEJyicBb_8}XglPB$}$b@BHaaoRb zMEikFX{Yt%o6DEcV&_59A{iB)vN8up%6iQ!E@dRX@_-u$&IrY56gJ$PtV)zOWwJ>Ha2Ag>t)O_6|wf<%3^;`9vDKa7~-b&uqo}F_ES$BG>Lxv$<6L>eC5&ZYrk{Rzw(iruU11tox{59yy?|;h-_43ZihhN);HF=^XJcK#W{W*u>JZodhOgJ zkFjI~QDp5GgX*C%aeItOgXJiX*$`oJ(>9p{UotdEb3&d&1D#3}(!?>&OkZRVr1Ey+ zv7*srKhEkItZho=OYx0qi!vKRUE=r98NA>yl`%s$7&_5U3q$9S53Qb&WRTsPY>S8t zmrtw1_v0}>^(%L=)!l|1k4Oa`{Sqd?9Qr~HxWGWl`I6NT+~k@h%b7Pmars-YlKJ+-t6Ci@NgDZsiq&^$Mm4^<|bzjbA{mi@lQW2v78|Lp3Y&v-cJPFZs#FL zeXP*Iw&J1gw&s%j+g7lBQy&ZALm>p$cXUTkja%$C+mFU;rCo0cIhkNU?@Cb*k<#I&u(E?6$@camKx z*Sxe%HnVU4$cnZQY60Vj-qhr8En-&^_3VXz)JM>!H3)4C_|S0p8tf3+AakB6@zr-p zIR^um@XV9G<`svIW%cB%Gzr@;ud-`-Dr;I#xl{Nu8Of75yp?{g49z%j7B_?GAbmMC zE)_4#%ivr{GlNJyG~l2q8?T;qdy?-kGM4eAO-b_~LY{4VP`2T14Iv%cWSIW1-5{91 zswcX!rw5PpePPgebg6r^2XdwRYd`jo_B-p7+<$S^PxX5ga$@?IzG|@1mlXK#$(v~~ zE}n>n#3q6l-J7owIAd2!)o#K1-A<xtC~~xpv>yZNA_8 zt(UsjF5cAVw6)r7QIhM1$Z8uZfr5wKNA&R2MtAn?Sv{P%>4&G-qxj@UR=TG@8ed9u zU!*la6I9Qg1LvB4obwTN1R8wa6F+kbHKilCp0~@HrzBmH4wxMeO#5gv_ zjCPqy=`sYys {ETH4`nj6=s8El>!5ejF<23x50w z>)Q&fcN}=@9lu)T-cdwOpv{{|cC?%w4@jzxsd856l0M1qU;76XyO`hP>3m=j+IUk4 zeE33m0Yi$5agbd1gMMrWjf6`c3joLU&iCV5062Z@Uia*$*0gN{9mt5(++qJ3BKeOz zw$?rQ^lJC=OZfm?)5$g*445Qj)HJC|7Zx`tSu~@!0bAXGG)g4ypJ8wf{3z;0wE{4u zaV=qEXpA}36=jWkXvY`_56{2oy)r?mL1wAG?u6+BZ|L3s7&`JAfIJ{v3wmGeFXX@! zd)s?p)&mi~U-`@UD~|GXY#hkI8KV8Oq8w%~z zY18_gvV+jf@%^A-i|ouncX)f4`r2fI51DkRj_0Hh+kBk2_W3{uK~KD3kqQ%mk#}FW(>S3Bk)yxlw*bI%G+`4v5)(0d~1iw>?P(M&M6mt zelSpzV``|f(~QiGgKz}EMMt_n`{rBSxBldcE?TS$8QNVG5d*#K^XtQIc{V)d_5)a6)r^d6xMYRvYul<;>5d^ zJ=dZ_w0nw88B=)H_s8RtIp5e<&FzckmDECxrlFVS08o*#yt@p#1;Z}{NLaO?W;5i<&b>+r)&GA$?1GxlajNbW#bE73c< z>-tmByc1hRwpG5hqqkYGKUT2q+xl{!BJyq3S=nYA$$f%!c=bnqUrU>;j?;qZ7h2|; z^l7!7Dhg#Y&EYTG&*{T+5WIeFYiqqbp{-u$w(fRc*7l3jCut9I26|T}2kzqi=l{it z?yLX!9shWUJ?y->1kGrRT||Z*i|W(ms4hM{}OU++mEi&)bbLs?+KNDT4v9j{zddjTjeGU0~0=1PMMWH=@Lgh z6EJ9#eox?>gn!lYIcL;1rb8-h4^NJGh2$cp{W_w{AGt?sA(&g0#0{*#yezz?eWcQ7}-Iq6YFuUDi!5i<3( zq>Ib8-d%d*x|WET=Clekn5L5dvv*jpozvv#Bp!Tq@(dRWHda7ZWV(*DNN`b3zuV6O z7%${27uv^NM3W&e7ok`w7qHpDG|U5vJPdg5{3)$=bJ4!noj$wPeg4^v&I{s-{AH~u zW6I{D7A48a)S5vxZ4*(?BcZ13amhEy>v-9ZHd#%VeXI&T09I4)CjkS$Si12kfQk}! zrHp{&kqZy_tnbZho4_I`U`v>qn1R-etzrA~HTDORW!tCiIGphCI?iGYW(zW}vhXKq zPG9;uX&NC;bsqp9;Y^T+=)?WmQ~SBvknqe?>X1ikLO;3&3=Qr-G$ArxgZWUpF_Dxp0mGhbGZ3Wr4wLhEfBRk|;0r!V(G$-&NB*FC2qgqwgTy|n> zt$X79R`=MM``s5ly55~Rp_u}MMR%Whj=+3!9%UU{pZ>^E{ZL=;zV&t9eUFp)kwDw| zixefv!hbqas(qAx>XE_bBeE(jypae-S}6;O>hpoxAgY5|jcPBjjumuIr1a891rhO( z_?i}TZeEWM_3%*?eo$HEfrJbaYnulslH$*vw4d?AqIquM%u;Pz!ZJ@^lP{yI^jTh% zXFF2f)YgPo{z_BJm1bH_IsZRndzCH>Ah>YRd@0KcTW5> zPsWq^+~?k|9#XWU>OK*1mj5|%A~G|ccp@{evRC!c>E+>@?%X=IVHwk_kH#|(ovJsR zIQIsu^ipyL$J&9hjrUTFuY))k&etWyss5J(@nyu#9kr7m&B>eSEVXZOz;Il1lAz5h zbAVe8^2qK1YhPN(GBtkC#E-WqaZ=KW{g;3B!SKD`dOZB^zxST53GHCOJeYC@AEMZB zquj9ZDV)+t@1qZ%$AwClt;U^%aDh|kzHT+wbyKTOc|v5>iMN@hl|Y;6Vv>{`KILD0 z+COu(LZP4exRi#aEQD{Ns}F3F_Ou!M-o3lSTW>##Z6d6Gv3K)t{q3XSMBAKWwMZvz zU-W7JlvHkU2wE*ekR4wt>}Tv9@B3f^oRzAj8KbK#$&IwJka3GSi1Z7&)gm$zG6Eu5 zZagML;EO}-6-UzoO_KW5ybiOW*JCDSm*o(*Y^YAa4QV~Q`h0-n>vntO^uvu^)2sYi z2CeC+4O!wiN|`k@B0a@-@~Ed7XHHLS(|q1Y6kp^LzsA^Q>(w|W#^gtqm}q35=P_bF ztzz1mDJHhjf7oOtwIwE&qQ{ZU%Adg6`fiLUR-Ri+q(JPD1(`{)j&mwZOmFEiZXvio zXcvb>b$m0il}y-*Uzaq=)_z%j0U!wG1GNX++56RP@r}yW2ipiw_U$AwATn7-7 z=VyA-!>K0KPxWMx%Ufq!S=N&=?(Gc^Z)??=^M<@T*Ap@hbpAUNVBLD`7$Ze;ofYff z{zv!Y71Q7Tqvz7+94cM;UQ;w#9}&)oow+F&BW$h`0=8pmaWfW9JxbF>n)qmUNandyCm%E#ah&&N_J> z^0A${`YZM}m$9Ic+01m~0dhxbg39tjrYWQ@02lF->A&~eZx8Q&|H<%|-}y|CTzqs9 zvc7UIWvu6 zq@r9UsFQT@<8388JwR=Wi+0e3B)hS{S!(pto!k0ZRWCTnCunfvb& zk}0t8!hbL*D}_)NDN}8nM$F8#7^hUK-b`|{vW7X6j6P@%z=(SDTd|C%IxuD=Vqb8A zzOXAXfoQ&=p7go1Oje+0AF9xl!(6L|F8vpRO#q!{IVlr8IEc%+lu4V>0|^$|`Y6fz z_Ro0{i6V4TFQ3%S&m|nwz>tUXDf%(mQ*bRdf|>_`a+drbFU)%_gWU{ZDCDGhB{?c)Bdl-KEKe;nJ zd|msUzjH8rtT(jsbO|QV*vfH1OI3-#7L(E8#B}U4Zmk_-tME|TIUvHrjFgCCv{P?Z z`i_OKh9oXNq*xYOJu8i*h&A7OZjkvf1MJpUC)y*mNFp5E7c+^g#7TP7#Y3{XbH z+K8wYi_B6`s^}M0#`&AtUJL3PeU~NU;eS)-lTqJwI*z2pHs67m<`h{M~>6b zkK)Eah@7C12!s&MpQ?2epjMnyS9r2-QZX_ zy7VbM9Vz$?2jNJ*G*gGCTsqOqVJoHhq;E)RuVDsd`tbQD4-eS}u82Y6e3G}^l&7!RfYINH9VO#Xu}dC9b5V~IsEnw|=ee(q?m#t=^giPjB;0Z^THn}5 z`xH9J32Quemoh#iJv~NGQ^)vT9$)iY=S^RYG8Xoi7Wtgl+iq@0Kf8SY!R7Gx{{FF^ z>wGx;>pvfc58r<=@V=R6dOFbQ>A5D-dc2=jy?HY$PaHYd4Uu`%qb$ma>?_LLFv)pL zE!5vdEF#br>UbNCZf1!BNjc}v7WbH3NBblTi@0#qA|A&r=UFh(ZS8JQx3Iy=e~D#N zL|93JO~rMv4z+de_FZk0JGvacp?%`4#J3QK%{VVeZYiN(6R{ZkG#KzPsy`K}EU`w1e^sEj=^Ed;W7q32RBqZX$EL@$&Fkl! zy7DF%&Kf?Et>WPL(pZm1t+c9)g^Wp}^Kz57-#Hon!+-GA;h+3>|5_(se*EMFG!5Q_ z6>jq_FDXr5vhTh7Oeb^S+``E=oy&IM4^PjSzB6(m;(47%dX96-P6u!FebY}1Xj{&d z#CHuH_lbKohP_NfKRP@fUemLhA3nUJx2myzbTItv*9QINa>7e;v~*BhLeEuwodeCJR1hL7IYN{*KHd0sXbrX&5VOg9{; zO#MKwR6o!L(!I#JDMw?EYo8|A?DN)#;8hOX7!{BrMZBZQ^q>k1nBX#gv&82#>{=8W zR; z#BM8eaHoV%jNnMoUvobGLw4%agPd(BLKePOzNug1)!Tpvt;*UrB$yuFG zdApz<%g1E-1#czPc9BzUA<+|ro<7zV66L4Qv`71?ZiIZIzh~N7!cCCu*Vgm@&UBOH z3q7g?jB~xEFm8&AzGQ{m=Qt+X;c$#&#;A64r~e$A;^miR+Q%0~$s^7uV}m>h@cCL1 zuIm#m+kltI^J86ak<~AS_g*>BFMj*OJ72lf6OF*-+>`MH58YjpV~qS5=fKM`>|a#t zv1V*^M;#NDUzd*R5>A2KT3oZYRcQz2b$nX#0UD{tv8?Ijy{vhb?cKGG^?E+_oIP}$ zZ3>;}wW=|uI}JP8k!MvmaP-R~jU&nBnZ|Av<6b50Kwl-dD}M=ZY}>9NMQ)cE7V)hI zAFBs*Uo=2{u5i4a&p@eI(2VTy@DE2ojt8=AL#`r_lJkC-PNQSFZYMHzI-|S(y!iD8RzsfuJ$(86gC2& zQcD@5PA*3~Of0$RgoU`T9Xp}QN659jK&%$+qjL#H-|HIp-n*BYJimz5X7t%p&dr(H z4jzBpjI*yk;RZ>*z#MAZ3NJs4vL@Ka$4sv2H|3%q9GppBy1devKW#YO+ha8w^wxj! z-}}ksN&2EZFND-Xw0%fe7d3(t?S~reXmU-ehRuAjf*WlmW*`K1R3Ma;C-OTJ zbrMqa5?9(dOQofX%4J!_WB%a5_1|%Y?org4!PP$}4v@alWU?pLlA-qc?61zw)=VT|<+;u*++2 znZSi^+D(idJ|#|ELTjy$?9vh+#gX6axn9jzxk~R;VyKA5kIUSs$#spV3b0@DTsJ&L z{~$yPfAlK>7cr_44@FHAVpgH~<8F5*$vj3dC)Ut4iB>F`P_u#^a-uQ0-aj~(96F-w z&5ca1_my)K=Dt?7WAd&GW~?g3MU{&y#)-e^@d{#mp{)OLVP*_wucy!Aqa%SDQi-F$ zXC(6?ChbC8%LrLKSB(R7RE}dadKS8IOIgJnZ{~sLvU6mVa3+l>ev=ESQD^ceh321m z>{z{PyMWwYBsOKki#cGc_2-}@!u?!ZMR;P; zsaCUD?LK|VO1CE6Y#Y&D?08MK9tFrb1{@~Xyu^eB?TbBL&7SAg9DUp?lh**w2X|jN z8Qyr~aQON!?hW7gW!-;rSF+=|n09O?+EZiaa!-%C)NMB>;w`4cufyprxxae)uJx<* zuj21&=O%J)JNZ(oF@?V-x1?*?wf3#?El<8?|LTb|4cc{2}uKU67wDJDgWTX z;qV{)!!Hg0{eSZp!&A+j6HSPt?Uc(ZgM1i1`tUSfVu0)=D;kVk2!x9{FUF( zTfX!p4K5KPt~{3UlO)kD|u_e9JsF)-LNw{rb6jgtL5=TO>JVL z$)uM@T?A~LQ_cWQ-)<*pTz$88CI^g-vv@SpR(^s*^YPHDyfqno}(N9PZ19f>dBGR5SW z$Mc=N(5g0DLbUgq73&vz>IjqR_|x_gCfB*GL_E>C;gKyPocE89_0kw^&3Hp^XMOWa z!|!eF zGG{F3OEOo{VoXNd=6+tKx5QubqqZMC*KOPNimA)P6{$IGDTBFQ^KHB;x7L6vLpkJn zVyo|pv7){~u0!{_*t@oS!{|$DOZB&w*X)_TO_qx{N3{nH@o|jc%?|cBSjDvf1{gPj z$PI{rhhW|MTzsSdh$=kkc$D_Prv^$(l*GGM$Wm z@{?!rGZ?<)N~h>GNsviw?v-Uvu_j%bRK0Q^?CNkhB27-*oZ#eTuxWcsVruP6*Z$>49E8&~-1^GD?t;?1nn$1$1CDbm_P@@ii)&|Ki1}}AytJefp zt#4`7noAoOQ0(Q?1Zb_8{6`Pm^0O$D-<)V;g^Qy6Ukvo5}X!i=Z*#W@61Gn^kYN`!Ip$!pOD}{Gyc^H2H&Be8M*U9>GX|;ox}c zL@@!-5nf~+lW~boz#_(Zd*)sHb&e5i8O)>66Jt^A6y zWZ(SihiNsh{AvrCiaWEd(9)b3LyQ~sv}MVI678Mh82W+9QJ(sd>jNABrw}cbIUXb% zqEmmWn)<;_K9la)j-}%c-_pPKEgMq!n0{xQ@o7(w*VF1Y=L`KYael5Zua&xs zp5=SlB|OXac`w^zRry}F@?7p^`nu)F&2w&Et>X`MQtw-pt7YRjH}C3+b6SZZiV}Zo zX&bNV^iEe^EpLH5}uC7@e%8gtinuVL-dmNi}IRlkC+d((fOgwoyE3? zyAQNdv45;{5_ow6`iUMz7bm|sQ!`#CL`t;hPtLfm}AN;~KGSlt1^e|Da_ z%k!fB5_KjL_4YwqBNd3sA2C`Rg4dax1(Kll`~v9llIHkaGdw9_I#Zt9sw>bLm(%4; z7f)PZax`k0rd(83xWS0OxYDKpeB&A6+H$GHO27JDKl#R(jERB}<3Atj7i9Y(mFY!&Qa>%=0bIVpd^Oty* zLuOm5e3#K(^20dhxlV3hoSWymu`O$U#j=cZc%&+0J@SfeS?gAQUv{0mPsXe^U3Sia zfosh@`JrQ>oh0&k%b<#&>&2c{tJ#`$sR=S)iZAbpp3RDOv1NpGOItd;${vLFVSg%( za&2ej)i`9C{!i!fOC9424V>U_`}i;(8f$5a%cey2(^zY5LfbrJTF1!7tMpp7T%T85 z^QxZt=GA(YGvB#+&U-rb>T@l_t9rEzuO@itUO92L;5a%f%9iq|w?*cQW-SHaD!&}i zFUmLYt6%n~r!U!Gl3AiH=|V%6RPFcr<~7kcp4&A#%Jw&a%16-Sa-3%)p9YMFsO>v1 z>Tt8#&LNssP`aSIt-qW76qv6gSrVW|_al+(`sR_h; zKX{VuBhJDE)%KV;a6)Bukp!J~9-n9;bi|>MC$?b1*I&8=mP+jm-8>B*vQY;}B4iNA z16vz!Uwi#-+ziNGx&uw(zV?g9!_U0KYkXWj%V&pLC#skW7GJc+kPMCL2pgi=aPaHLH2bc+dbri7t)X-+$yBpwT^ zsv{TU0lplFabTBFmH^u%VoOMX2U5l-bg|8!#+49sUy@!#(P+vpd1rF#Jz@$ctO_Pp znKe<33AQH3OuF+aBCKfh)9`_QbsA2HLm1HM=o28SK${;)6KRa6l_|QyufE4C&m!z(qwDSNZ+6d%Rx%tNE`EDC>ec8Ib z&Qg7w?3!(GY}1&=>FMyzv#e#D!|%(LWrr0Df_3 zW0K>TUn-SyMLBbR6{4QoCOF7Z*t`xHf6cD>HMi{Ke~xzvIysN!6{ki3-=9o;B&5|*z4^^-7(WYKyp}B57>o~XGHd%P2if>$KS5BO5 ze0$fXk)Qgf8lWis)Xjb#gSGR(vG+ZF$;K`+OL}3a#i_^W>(=Lx4&PMBa(v%VGR(uv~%Lq3`ZAswpBdTYpDP3Z)>%g)jI8S;4bf5?0Fk)@iW|@ zCu-9#pylNZ$bUSU_y_XXnz`%3rT4(Z@=VYamk~u=nis#YM(eU8mD>-SF zNgj9f<)1wqe*PQ0S&#{j4*Hm!aZ(2jUo#HX>!i+o8=VNvo+pjYtt!7!uEsW)jZpDw zc}>33h2NV1B8HFQ6}0+GMxJ+&+Dyck*9o^FtsHb>nUi7lz6^>M=ZBpB<`)yhGRnwn zPOHz>n9&(g8ZdXkgUlDl^hDz_2($-HebRMevcHo(+U&m!OwsbfjAlw}2*kKxzxACv zd&ApbIvl?I)nnb{bf|rp!|>?sXBs2IVeIQ)Cg-gxI1ouG$1#$xcgD_`RHPOZ9W$hy zSR^&*cp5BXnPuq44<&u1Y!=2a1+z+nF-nSSluAP>b>=t$CSOu#jsp)mdd5l&=&0nx z$2{bxw23wG`cjppY=10AeTiQAE<5k#drZsKyqD?gyU13ZTJCkLe@k-EEJIE*&E+7j zeDs*LUU976(`*yI{jeSq+M=YX?SeAROq4UqHP!dMEf5f2-~c^BNsjF_y~i6z5AaJJA-}F!T8eklLA7 z=~dY^*uFhEGLUWuHvV#E(hMSwc}_fVTRV{FqMy?G%);Cp>d4i$tIPytmblr33pya0 zD6?`GGZ$!@r0}X+rbVD(=(&D>|MX)vucbe+8YjAag1)DvKqj#17N3{H1Fc}k!~trw zt@@TSv;6djG*I!>g~qqP?;Qv1Q}cNBhGs{o0{;LW_wCUkcbF zB1QdAe-mo7q(V&Ais6yhXtf?LG(U<;6p4521FmtWy|vyeQAZ{nT;guhEMGYbJ~^KD ze-bfe0;uEC@tU|U(Qmk$;|~;E$;VD5dcG_r=|M>a#lH1PgJSz(0}D!w_P|aXybY0w z{sn_p^jmuDRt#W$$wM_dO$EyQ<@ixq6YW#IKlzCsz4g&Yr@{O3SB{6v57~FGM`6A7 zIJRy?2*W49NQpAVOeGxyFL?3*H$(rERJj8g~!AvG%$fpSr(~>6O;&Qb%sxR_Qa1yEtaKKEw%>Z@9EaGKd8~u^6ebfDwa+Z@vX5wB{ex zel5?XGIq6HG9DtSqhhP)fih_ull)~S$Ih6p_jdZaICX+8=&PmKE)LdhFcmZ`{*Q^ZeX@Io!RoKm6N& z`!H@q$-c-5O>97DKOsCb7>PfXSrWt1{U_i0R4?Dz z8{T~DaQN^8Z9%%vo69`0j&I3XJ1%>v~-Kcqy~DTC3p;-EoJaTGBoOXKLU-u!xE`_A(i zZFnU!9l51see*2udd+*D>&B*TTGgXmWqMxEw@&~s$+jQNz?HSi~_{MU%*D>8X z&MV!xRoAp7`HAK;0DYe^n#O>AW57-ELpQOTZW}wsB)4zQE6?q%G?(W-YrHiqj+QuV zIIbuxjHTLk(uifg~2)v{&D zohsina?4l;xpAw8nN`rrmDfh3N2WzqbBxUNJ4riZa$h%ntx@J*ndgx-6I;Rz+Jfv% zu~nAzm*k*bM|KTkq%ZilhF|H|j;+QN!z#}u(}iYRe}<3pkCVhWBwgtIMrINU&G_i? zjg|5-g-JO+ZA%Bh2S0hCpUT5y(eay)5LKpesDJrHjXa65?SqNN{fC~0rK9mFyc)Yi z=|0Or7smze2=7D>(tq?wd$8{vYcK2m@bJ;W@N?g|HQdwB^TrIf_`t`jQuAeui{rVb zx4=((`fNV0T6LS8HyexcwLc|a&C<@SV-o;xK#;$)?TbnP5q}E&St}TeEvl|ib_^GV z3vm~@NrwJT<%x#hIS#UQ%jYs=+zQ%*J`~2nL>G7%fTBO~_yTvKn_a*2?Wfwa&-RUj z;r$#hO9USR7+4_|vUP{BHJ6=2kFITho_x0RyEwFQAExPQe z;s}ayAz+5i{`_2N7>%#}r;pwKAE>c=$NVFChp<%#DFx zp-o?y$d{1zWkw(w(-7MDFt3h-UdTMlnr}LF^D519&XMW))S)l&tY;b5`@CXW*12(A zcMhFoeqRpemJjP02i#pTdfmQmAHg$jrP&U7P4>mPdB!l^x#^Z8H;-~}6IxByw+{J4 zLz0*<>V>Pn{54F4lD63}&zMDRELL7Ej{+-&HQ7q1>^3er z06+jqL_t(_Ebkm#`$pa;Xgy}^(XG59IRgn^{fMSMo3|B$?{ zZGpK7+Jw9Kr0@1%T3IkO6X2ix=t->N0gX>cN13_`yfJW}MoicQg&pUyM^~=>>aRZ-zV^*CJ-7LAIMMTh{m8RCT_Rm|XXpXt zkIH#Do1SAG64%&kB@yD0LkACoC`0CFc z44-|hH>*9{*9#c*%ZYIKo02Afs(<$Bp{-t4a2m-p+=bVNMUvHXlM>+*yfcEK7!50&GX(M%&C)=fyOl@r03}$F?YR zPI97+d*gwRzl;-Bg^5MhRT$&NWIKEcr;vz7jQs2aH1N9#Aq8CEi5lc=GgOlM&gCGs z5BF`QEtQS2*|(GQHr^yP_s4YW+75J=&Ff`dcW(Jwr)+)m$(<_CvX--qbIY#Pd%AI~ zXS=Q=@A7qdw$DRg+YnZm*=WBl3j`KC#tN2yVw(LXY)$$U*>cV#&z3PMM6~pD#iE}kx%La2^c|Xl^ zG*|}5Zf9h6CjbY5{AK%V?A11R{;bOVwPi3lZP8U*#yE@W(=0TXr`RWTt? z`x4`6Rn zfC*`wQ1b;fw=Bgr5hm1ag3b02jzbPZwums%rXDNUtb7NCOb9-owcg62$0S=-c;xK@v!|K57jp{cn_AGVnJOG~20M%Wm?PajNdTJU>q6ytP_Cc5i}V zogQn=rsXT&Z8e7LOR`mOsf>>Gm-v;|$;UBHJH(*xtEG5tvxSMuUn?*9Y1-<~=uLKg+S&?s17;1j)o1NkJh@s@`2v4w02Ga#i;m5bkGq0IJj=CU zYaBTn&>ocHs^-h_vNbN<+{rw{+~~kbH$w$}Ui4{STnhn+ajDm`>X+@0KVau+JE_+t(F4<0FPZ?&`5%?ETd9#^etd)=Z;s(Kb`FiNV7MkqvK&vE`arGJKXyU~LM;n30i9PU?&ewP6XSY%4hU z!pI_YO`EBU@c6R}tt6l6DJ;iYb=FfdenyY9(=0OEg?}_aZ$^7E3{M`j4T3m9q+M=5 z{((De>(IJ%35RVl9;`{#Cy8`qdA3wr{%gUa!`xP1CC$ z<*IX&vaxEr-QG$w z_7cs!ZTXU&DsMitTCOrK&nC`x)=Y9cX&~~KmB3oVtG?LwHoI#$GO$f8c(ska@UFJ2 zzdgocy+311$bKxzulZ5wE*r=7CA>;ol+#2qE;8{c-}H~v%O+I}wI?pu5pJK1d=X3j zC`(oU#-0%)>Il$Z!pdSGMmg|<2V1&LP8yL;pT8JB`N+@LO}qqw9R_r9)hcjI07ObN z!-H3~;-&J1CNbEY(saQ!%|6w-%VSQz#;inbXk}KsD25;HleW+8!B<)suS>E)1GLFo zhI}oh3mH3?t-q$z(__`L*p0p*nELPb;KRHb9Ov2J<5N9wU2+*0N~Gpn5|b3vu{evP zsU8n)r;W+8+%EUycND_E@Toc07`ejQ{A?4_QAdT*RW&&JTeQhWi%2xIX@y+ICx3~^ zRp$$Bm3aUC7g}jP(8PFOd-XM8)QeFv#nBPPPd?R~5=D*}D_&`V?GE;ZSRn&19$k4y zTcnQT%Y~j{qA#$|gzFgDw(Ef`pTNg|ecjcQZuazIA`W9>#MfSa(F(~%+fz`I#HWbq zwQ_&?W<0>6vrrN?xn#kNvIIgifRG8X6jwR>ir*xViVZlz3}Mx`C(B{S$4A;vJ!rj* zH%b3WlrEPfT8PE9=u6?i!_FT5SbNk|Y)pfb*Q#Kt;>s1`fSDtm^ zJVM`T>`R%>)t~x+yd~qhUUSB~v7lSlxp_;rD~`*Sv!3ZSH?OzhdX=r^%CF@rL)kpr z>G_pzS<6r-AyehdC$|h`%k?<$`m`j|(`FOrYN!YMqUyCl)yC)xfvJoel(8}5E$DCK zt#wL;*+r(xN4Xt*{QCR1e z-qZU!a;CX0&W%BC-_5fQeNR8eCt*-5=$h6&sD>``#t35jSfW{{%2nDLmgSA<-1I*0 z`P8q%Hb$>&`dV&W=aw^$>8_in+jD+coby6uq0^&T#01Sao6#qnFY*G&e$3QPMLzLwPpeZTE>gE7v!^!Re^mtV zSOp6%x}PHf5)k^h!PuRwr#E?KRk>BeGlXirmhF0@nYDpyxm|OBHtZU`2bl!Ju0Cog za;ya>=xum4kfisB+v;G?F_3QfB~W#YhJ0}!-jyi_Yl^kW<%YKnCg_z3-Sv>shw2GQ zLC+z7u5Au_7C8?)dZE?VliQO=jP>(#-Ng5t_0N19+{7)=!1j%+f~7ph4SD{;uk5RU z2w4+Vkq8M2%^$?7RYTYs%xdiZtsA4_NTwR$n2cUNl3;^(P-c9lrg? zKN^1i>u<&*ArJO-hc46KOWgy}8rk>v)L!|onJteKG|;OADj>Geez;>{7gKZ z5BCrCEOu=xIpMYIx)Jh7TfL4|KGEApxe=4CCdZmovz_EXb!bfJ_o2!6coK>7cq$40 zz$53WCu*AaqN~O77mW=3QY_<_0Qp+mQ_BKaOuNV|DvCJyLG07FHCfY_>>0;A@}6e> zr5su7)^abq#Ji529&fFFl`ljq#%lDnd|wEi{aUkO`Yv)yGSKX&v2KztVH;w^ODpSincC9-GKtm-y800k8_;+%g@tp!_7y2 zUg~J}RTvJ9ZRT}oYJawMY;lUAL~3!`z({U>R^}|ORcN;|tzt5fvE8OatE=JcDkcIt zZI4}?1#srF+mMroJc+mvhRmb#g?;okri;9Nn2-M^Z&$;wjDTH)6>NR5B_SrxdZ7u& z9_47B^_BAr$$9i4GR;2x&Qu=PSh3H}q4lW&jK^E@a|O1St##?4Y8&BKqsd%D9Tn#F zP>g=dQiIZQ>AoaVfMgy2bp*%uh^Z~pb_sFAf5l8oz)U`#BJXJQ}f8R|!x zv>r=F`<}U>QP|+tCR3&fgSKf5`r^VD>rbET4ZriheLDO<-+87lG-t#43j{RDWkSsU zYVF4+KR!N;3G}fh&P$u)n6dbRqI^due=@PqwxO|Vr~ z`+0^oKZ-Hgo^Fn0y_q8i>)BSvNmVHMSsGONIUX`@wH!*_N7bvWWj*Ys?euKwzp6{Q z$E|hB*?*Ik`8|D-ST6N?t|M(brd8~oSJBO@w577;Dz4>nY_*#DiE9!m&y!jHX?)r@ za>wCi*flO#H^p`Qsy}OfR(iEvb+1!i!>D$Q2W>WSUQ@QhV$O&t2kfHE0&&S9#BSt6 z#lOOz>vRsVHys4krJIzO`~;`Q?eqF({fcQDYPk;Xv$)$mR%q86(axLUZz^v}#yLAr z5u?psiZK*dg#aP&DkQi(8OKFXJl>iJdnCX5wUgmL|1Z9-iSzmJ^znuEiJlLSKYcMg zeR?@Odz$qVJsa-zH;;$c-Z;=rSo^xDYJViJIMzy5xQHED&GKj-hpwHb6XMj430QF1 zv>7RkWCKTg{qZpOFF{6s(NZPiMOkr*FjBrMU2)u?9^)hm%V6|vu9l?_G%`FK`%-l# zM^M7e37)qyc=?SlxE70}(!RCaM=CIslPp{wC#g#A zNf)kGcOO4~q8G$`G<^LVZ|M8KXt(ZY#rTtKSu+y;#)Q53ddbU^&WBI5E&9LuAAU5v z`@wTfhFM|OK5AVpi3pP(^v++r(63yB&N2G7!xKm3Q{0d!Ci5bW1^(@8!oRNxGfxyb z(quYrfNbR>O|oy_=CPH0vCm2NiDYh{@Z9%<;r7AhaIBZoJUD(n+&yAEaJ~YaC}+xc zyJlKnPx*e_&ZRbM?!O#=#3+BYLlRor&YadencmBp-{;1HUUjLP=iD^%wS=sb+u(Yi zS6uU!Xz)yTZh7)*XNjME#)v@J(VF_PVxV7BACtDs-`X2sdHyyPdk5vPUreIEO6TkqSYp6AL>oVo<$ z1wnbN$Z&&0-+uKT7Zvy3%>Ndz=#fq@Y`UwYJhd-PM14ffla2kw7SNImcW&##bD|ZV z`}a9H!PTEul2ZoXrjtHeT&#JlR}|wj@uLWCvXV$yKuHF-DL6|bqQfi9R)!L$kTbv9 z0Eaps;)nc9Ht8XPSeHT`nhL2s2{@*s-HA1o?9)U^A4>hIB7HvI{3l_2iitEnsUJK> z!`JwgU_y+Wz}$W`TdYGOo1Vo|rk(DQ32D8QaR7RXV)z6~p{3`R32)PP&sQYt@jb@2 znT@*2?V*P%#Z+f>YVtWrjEOjEXs;$jFEkn3)9UEqVLq3_is1$JyJWYg?hC+_O4p+d zKV+j>0)j^hqASiDE3^5h4Vr4hk<*gaBaL3;3OeQ^cESl)B zKRVOPU!-F(;Q7~1Bl|JQMo-!r&pa9Ami9tNJ;)wc92c5s>uDpWr#zoquUe=5BIX+! z!OV8>%y(R5g8k~FSB9^?bAR~Kn|Fo}Pwr~+d^Ws&=ZPlqIxk-y$`cO=>|!fStE`wZ zu=o{?965OOiT-m;lJZLmv{ZyjuhcP}FOh+6;?kKA^8-cA$e3RD5ERlXvSG~#8YrN(|<`X z#)pd40sflQiWRxdW9TaLC<|fxurQ2wW@6?3yUv|<^`vxB0g2(*r(z8&;1S` ze)y?w7x|zN}|_@9%LV zG&c-(eGj7g1hZo(}|dpkL#7 z-u51^Vdsl4LsD3PF&1bq|5{5M69H0dH$|F79tD@0mOxD*_A#77mol8hNraRPs2LrX zh(oiH0^?`KLC!}?8)jO^aZH;{GMRxr$rD@qWUB3_e@pz7lwQN6UHp3)$-dtNtG2i5 zXUx*SHJK{kmzQ)Lm&&VldW@xdm9IFKU81e=EV~rH%0u6Emv^2x+xY)y^M!-uuVXCe zU=PsVzoh}v%gpPX)Vav$#SC#y-~``JK-7r|j24^K6G5I&;slw8;&ec6_!_ui>(AIt zfAkHLu7nRM@0%E5g})Mti>sv75`9|BCwqfTxy=iq5v3)yZ2u%TLRC_Ox9wmVFyL%o zoEEcD-Et<(>e4H!7J2FUXmi0>CDn3uqUpnStPRNp`RG_TXz2z{R^ON`5aL|Xq9_eb z8qkX|rb$NhlECOYI)J!NVM`~HHxCa#h=hFjA)fRE29U^EiM)ZW9v5jsic<>psDjfH z23obpN;{*V3Mm-SRud>~#hp{VCi~7ke%9YK&PpWvYvaoYS!;`J+e)8LF@qzcp=+s4 zJLdFxwtc1?rlX%|7KZdc&OxD!tswMJrH*1YWsY1#X2p~@DdO9{P(d5pN=&ti%f9fB zK6pI5_4cd7{xKG`f0-3qrO=H$s;YBC;QsK>{@~g0T>6JzT2A)*r(k2e1K(1_pg9vf z=pi>sG#wG;r&oc~C;Y}gZQs$8L%#iIKZz#-ee-J%)jnrnT4uuogh%hs=TM@M??`|TsWlt%lo z6|1=MvE?zbKGNh`e@B{N?`aFkq5krD+N!hSuCYlTE31lKG+z^LXc-E%mA+Eq)93ym z(O*F|mzmS|P?QqNVrbxVJP&Y1l4r*)6~Ni0T# z>g}Xc$A!x=Za7tOQXD16!gQjXSxm_sY~9|YF$$YSDAg-Ut! zsoWvbmJ`4w9BlYlDLifoXv zgRteD!wfP6t)S`AYCrdlVYq!y4+Z;d(4KR>$m8kWaQcjwWoU8`fB6y{#v`NUB9?e_ zZ8;_t6eGcvOvFm%m~1msR?jv2e1URE@{5z$A>R;ES+pcB9RskyGcS*A(=b;6@h71T z*=i;tpXE4&Ol~&vnAPf@CL-K~_S&N(O_V@vN=+Vr4i2>uTCtlNG}*?9O)bMHR`NNU z%qYir!l2{SNUByU+^JsAbK&8{GK|DUTx22~jF5Glv+qP-!ZNCa#{Pho9i+S}fj!ya zai5PLpT^(e;fYRRn%w1Xia>Qt@=A-F_PyafO^EO5nbo%sSaqH!bTYnR zupgSo>0J!}=l}J1_^1EV2g7HdOl`t0kJ^iU<}uM_Tq$8!MQ)_#1vKpOK592U9^c*{ zPEHQVM zH+!83=Iz==KV|HnH7bt%gT|lhmhJN%tLJ~-y6xMao8Z|;=f>#krtgwhzmRvm>Q?-| zyfJYegPliU;6NiSfd^yDN3ZMp7L{wUYJ+-3+p^!(EpJ+{b5-3KAxB&m;uO3>zY6p7 z@~cm7m)pK8w&SuX&m`ulPNZ6?(MgNdLQYa-T(2nQb&Hciqn8s;Qu4Hh=5z`xmB4Iu z;-%Yy?_O0L!&v@Y?u#DwOIpKKazL2~r7@k;v^ zr01kH`^LNuRza?D#dFMTPs{B?ZyJ-VJY5WLyrl`@8?5w6M*_@?d6CE28LMHsDNZc@ zo@p*{`eZ+D=zRM4R;<9DYWu=-O|DP%I51YM&(2th&Y?3DZuwbrou2cA?{qbsp zrlQ07+2!!ZdP>Nv_ccqsvp4+c@zL;e57+mSav@-z`mw2!qiuBVwC z@o2si#ZylfiAVNn;(dGqZC^54!Pdn3@Q{f%FR)>|h$ieuY#CuSJGY1Mh4R)d?G0z; zn(ZmHIh^4*B962h=bGcn9pj`m$7!iziu_t~-=$3IRU2-r^|51`bMu^6-?*nH7oKn$ z37pyDkU?5rZMZzO)p1B+^E-%Hwd&3otpdhXGKj9pp=&>!UxjBZ>ssD<zgg?HP`FL2#JMQ^_uq2^uBKT zRXKK-+K8Tg+9q=qeveJlGNgPnKG%u!srJ>~6$}T0(Nxxnfa8AB$4~ec$~^Hn(Pcl` z=Ltzz36$p(F^~<~h!w>O6G%*7#OOhCJqsfQqRG2FO@Z)cL++JwGSYfsfP zi#*z3%+PB#Osm2>mg7@m9;5~i%LNSYI4F|H@pc+>88>^dt`6KNw z*96+HDUZ|JXb<$_)GSJq2aZelGLc7B`|o@TLyiUd-55-0NqlMg(CvNNy zpPcGh-*RqWbRM0@24wCNzzDkT!H+*Z9e(#8|3qKPc$1<=wBXVnvQi@!G@P>jMe9`c zDmat&*iNNmHO96M$kddA3bupI^E1Wy{JCC6#p}*Tsno-c;z&D~Y{y@$XzNjZTBT;C z`h=&8Xp+t9_Q`EcuJuKdiT9x<-Aue$xjtmF%}O?tY)!D4R5S5rD@kq_A!ouZdo(oq zPG#DsGG3)`oD7HsttQ@#4US{h_X!fmpq{YO-k>XH>N6msjBd8cedYcPfsg?&WilMH z!Pw-OkMQ6Z{YyoD;2%COB=ai2mKRihDx@X9dc*dU+%Yn(m!)2L)-lcc)^i=cWy~YD zf0cGs**MOP;X3)o#Q7@F%D2;%_Ib-V^|EkRvEpF*yq@l+|1#Thl|^^#RdT!V)eTdM zlLdEz>oC_No$>^OK%Ro~RATYGI8j5MH0N*D5r_%+C_OViN1*R;u8N=sEqGDkBU0EN z8`C^#nPo=Fj?O7HQ!_>esgUi4Pk*ByO>@@&p=aeL&}X*M(@Q-tqrHu|4f3XA9Zbh< z4a2@Vw~xgivtsLoE0(kSMLtbwBG^)@WKU3@`kmEcUBopr#!V*JOeT4x8>@5d>3pGQ zA@ioUSh3b>H7nMqTDfL&{X!G(;A@rpO!6@ykJW7IT)^a17}tmXiz4HBVT*NY@iRow z;pD&Q7OslXrsOeOfzHtq$7n$3MN=FNTfOY$uD(F%5oEvqcbFV7nsP+Z*FLVnI6k@1 zq+(xh8RU&}fP$0Wg&$!z$g1Wfr;$|8;}yAdu=y9xW;s5k5IT)*DeJ&9ZNhd5e;Ry| zgEXcLQXZ}b8kw~)Sz=r?ZnZ}lq(Upr#r6oU?sTf?W1 zWkr)aA{DWxrO9e0qID`mtZ9!B&6MLebvRBn!R8yVzIgI_b!L#^v&7MlJ^aeqD#Gj4d1)1Uuz6lK6KI~l z?`d|UHxEf_tHvDV~!`c4yPj6^rn>0P?6!=3s@6}*>~UMAMxWS_2TTdqE^`u8MO z#s)nuOT$_J=_^i=ob~j-oDindX6?^Ljgv=OMR(Ru2|2oiMu@kAIb=oX2Y$!}e0U>w zY6Je~`NAjDFV}seG^T3ALuy2ZQyj}7uZA38A zr!n3QxV0lQ`knD2N_cN_tKh_K8o-?u5%XFA@pFU%ftv^l1`Kgu3Igig{kqVqJ ztsHAi=3`X}!(QHF^@@KX$aW{{ni|VL9?yBC$BeNB(n*_s&K%3;E)nN%(a2b}C>k^JPfCK0y2$)?o4YAtD{*Bm+ zT*sT^WLtCpYmGMKSP7%!SWg*-(??oye)>!=OwnZWgQthXqubB)2x-}7JT^zewkD{j zdN=qV|I6pPk#Cwb&q8F!stc{`Sf&VmWt*yKwud-n9O#J_wb|H!%ca5Xr;YjOz6@-f z8~8?}Lnppus=%*GXFAY05_n`^oR94z4MHA{1ZA%Qd!WkS2TmG2ZfDiD}7xUNhh^PGL z5rp}rSelB1%WHa-KDV75XpW8htm?RIIoEf|Z4cR9Z1kAdvAHH|x^vs0PI8-__i~os zHLv=%fo?5hYp02GRRY98bea6GDmh?l_G=*4WUivu_Fg4>Q~pi-yot_|e^=x90M}~4 z#ixSMvu5=()-BzY%}-7I`ScM#>xnWu-N4kDGfy*3Qk977=#c1fc5Q>Sp5YBX%4Q*K z^gJC$d^PAXBs|x_*&ZS5AtxNWizP(knzw0o8N^g@(2cr2i+L+@1m?qm#m7hM_3qEpr;n&4qZMe|OIkMfdp;fN9GV57nwCT>hi{bUx4~Ad(mA&EX zzr+nx@+6*9)>_x%hJX5&5IPTE)g#v4-_wm=ia~UNc>G6JQv4z9M3X$pFm0Y!={aWt zLne&L4l-4zlp}t`yTfT!=<{Q1l1m=#Xx-3Z01Yq4MYfVUDLu3X+-h_Gp(bXp@cC2QIOanRu%y$Dj9;D)IA)*eF@8^< zBt7qausv3o+0Mc?5xs!1iufO#~zH0SZTR!T< z7F%f#ALc3A-UnnZf>WZQ%aRdihGP<%-kOU9U2=Z2nFYXA4K(0yd+)#LAMB+u)JkmevBiO{eEo z8yo(`!61z*{OB?J`Zhl|Z1&2Zv%bxCLi=DP);gJ7Jk!2WPUAY+@iVkaXGpaGB@v)?|xGn`$#(r8BQrggod1e@-s7w3(M5d=krUw6gIbCkZ)~ z!rxX7UPK|V$EIvxNFJco1QR8tCWn2BM7#Y@`yr%{(H9fe^Nh9l4SOazIxXt1n5}&? z9N;w}j6M&Sxw(`G=VYaq-K=wQO6oXnRGnyIMnE(=ooE%T`0VPC)wX&y)EccUf?YPE(_u7Iql*zETg%dtwravuFrjs9ga_?h-CpTBW;(EC*MWk3@_-8e^EGNCOk4cb5aechK5 zuR9FKgAOX9z3*ck^#;Xp>`*4Wbi)XA1X;<><0{C(gidObZL-Vq3bJXOV%CkDc;Jg} zqblcp5~A@R6=>k-+LL>_b#Nv_S?f=+kzgX7C5AxsT*!#1ty%m6aW;JN@sEce{Oga0 zU--A)&>JQn4(C5%rTb)f^Jf&hum@765r3?kg9{-%5k?u)mAh!3@=)vO8z7A^_E8bo z^r^+5$hg?cLa(&G48ONoW#HGe#Dmr+V_M#Mm9va#eciaGFJV}>&#R7k&`dYYHe4sS zo$F7W7Xnd%|E8Dw;4<=|Z4Qs+NI38tTuv4%vE8uQl*4Ybzk&Bs32q%H5z3q_oIxSo#~)E3a1>F}DdduwEAn^n)iDc2Cy*57Z0ZK#83Nucjs>xW_bl zwTv#Zc`RTT38heL<0K(OGBZU&tQl$|R!o73I4K~}u){7K)sse_>c*tt$L&*1FK=|b zbB7DIBs3m5CdhfIMck;Ui5uRrm-FBtSD5n)0$}Lzk_*P1ib~VT8{@f!gT=VpnrMId zos;3+cb{vedtcbtN&lcUGdmVBo9L7_Y2u`9!}dg%tI~40<#OO48CVe(hjoTyfTa zR?)dZE~J`2F`h(v|2>{W!Pbw~=D-m{ofZF@Y4=@I4^Q}r6<*1tef5sqNEH>-H`z_i zHx#l+sii4K5MXD|H$fX#!)(JA8i1;4H~CO=n-e8`NuR?>`3r@zq*adRM~BrISI#5#n2a_(T0lbZhvj^O1SvYC;@)WkI4_(RQ*~9s&Z9-tz40|$yABVahGaRN(BCe!TuWRlGVAg`U`sf>>0(uLGX ztGYb+poo(vHpK~d(AR&Ts9{BFzUCD`lnD|Fkq6U+M!zaU zM`@Z~r~<_3|J)+7eQFRrd9%JW?v~tLq?y=qS7pU_>f`hIdA?$}bw$nxB5GCyZ#Th;M3SRgz>R3?UzrB7g835hr#HeITO@SYlSFxI8&9gf}keC9Bi@1`9WS=NxHbf&+~h_ zx$L?2I6rs*&Ep{~giOH6@7M0L>hB12#>c7};5LRL=detUV;#GVwUI@Lthm8VhQ~dT zOL_f*p(li#>M?Pb8dTA*87U!*4yxbTKE%X~{@qNIWaUA%LscTnaW2M*NS33LtsW^9 z+IE-_4GDiVO2CBR5~B&hO4Q9B8Lg#ktJZGG>p}VpoixT%1j>B*)f77(ZTTmEq-SO8 zUW2cE_5SebhlhG@HhZJ><)eM^$m79dC-G=|Ci2Or(GFA_4~aHH7^CW&DwjAn|Fj)r zx^6_*otyqLa%^6OTV*U;X={G$BD1CoeNBIjUa|0VN$y4y=KvlLplW7&*-rFQL5Fw) z%pR-Ot34WjRay#A#aPrY2gQ>9qRw9tk+zU_@zb&|*HxdroF{s$-a^t}TrRaL7wcBh zXYMynI67%XH7C((39X4er1}*$Zd~M9qWN|;@wh;WZ-nV2j9sW$;lxG_c~#m8zi5R_ zGQ-6Xa~-DR+0OAzekF^h^U>_}ViAK73OiZq<#DvpdVSZUuUBkX<_pO2ime2(VyChe zK{cV%UIit(6SwALOOd8IEfGtSkGrgc>NLq$J##nnW^|LWLSf>SeYzqLWcT^HOyaB_c7Xzt>hvR zvMQvk@_>;21_f)Bi4w7?%#hgrk$s0R6WW2^#K9Lip6GB_TVy=85{VX&pbx`W-aIUm zb+;}h!WWB|!B>h+Fta+AE5l<4KS)G|7|)XpK~J8)Y_a8p)ln^}ws@&*@sMr(uNz$B zixE$U7@bVn-kNQvZ>|-!iL_dBT(R2YyI)dyYEzCWCZnscg)v7So75yCo`{elcZGH| zU1dovJdIgiio=)8Kl+xQmHpaX$sZ0MymvCZ^_5R0uU~=K4%1TYKgx;CnBzE^a9y`N z>JvA=uvUuYocH!zU(2z_KJJLanoyzlb_#1O?jkb^Ur}&8cJZ&b>$>wROpR>hYud%O zO3y);zEzFkPVAN&v8hMkza1Z#9QWrvrDRWCroR?O?>lucTBeiIW#BaOSO^Jivf#V z#w5o_wmtPH+s4{CkDCjdw934hjbD>_%oopt&cyguiP-B{%RkPC`D76$&M#s@t$p5_ z7x;!oCS_-Of(ZOGO}1l;iRADf8os;z(e9YwW4LG@RprRZJ$>O+k1W)C zEd#*v$f4tASdTn?p@ijm>irB1*&?2m7stJPp4lh?pvdH(IPvA^Qhm-I{GcRRytI#8 zZ1JT|9F)jHtTmvv-)$szE{D=We1l#->|*NCs^t{+nlIrE<3{PSTb-V+3l*pJ+svPc zSQSzmZNRzPBU^k~ZTz^2-Hs~vO}n4y#>?-1_ru|vzwmH){Hb29bapiC9cinXQf${y zUgH*wMyqklK1?4*d0YTT*BwTee;KK=#;f_LUx^E4>>JBE_Nmrg?qw^av9`%nd1F>u zZ@cm+oA110)H1Twk6xa7&vzYNl4Y{C{#mhmAF9@kCe9V$Wt8pgDpnrNtL)0CSZ;Ww zSr(duYdQ03Zn>*ym4DOnlC52^*7TOznvyC#D@(h~lfPSSm*q6P+&V{-oBap`Jj zm!L7=u_IQS?56+Jr-Tk$LRdMwywDX-vdHB#LvxZm*_r=5mXqf;xz>|H_{Pq}nl|Pi zb;?{2gc~)P5I*NWT;maClb@4cCWiZ!j5>eWu);|%g9&xy6Q6nNG3Q*>stP!Eki1Vz zv|gsv?RSg=u$wc$u(HRO727*~p=Y~={fyk)?~GeQ!%5$!K3?#(_~By8?zYU4Y1LZu$((S@l2+-S>n1Ds{86XB zFLXSzubWkNkws#n;G>5l3QNtu%)*~&N~4Pq3BUUoXY58ri;j7;RUtWyx7ZzOKl$bG z)>|Cc^o9J8K*dcO&jj8Ze&LGw?g+M2?1j-TrJ)pTFj zmX?@QBeysF;CmnINgEIKtJ#?*&-=rJN1XY!>MVK2Ey-<*O4PFz^Lnuzhh!ok8Q*`J z57lqVqrbDj8F@XouE&`rYrU4?_w|Zlzj_(t_PKRUU(z$Jm$lAPj?OlDV_MD_rf<{j zdDbyT&8zHu;!H>NBcf`ya_``R$kn7~3umrO3o^A4Yx>);9U$jx7)$&m+M51#=r_f^ zuHJR!m)dY$jFfL@3QmTRoKvag9FfJR?~?AZl=bF`vf9__#J{PZ(a?-RYl7Rp z)S`^Q5@(`~V;1NLD_Wa&n6MjFEUT~%;)xrp ztQ;%%v6jb^?dg1^ZoPANfB5(#b%_0)+*2w24GnBcCi|fIk`2RPXaSKLNn!Lb4B$rW zL(D=c8EpwV5X>C0gM1RpnPjpr7Cn-3nNQ~p`ZX6t0_++eQwn!UPhl&d--iX?;>0Et9&ibJ;>TseCzBgwft8R>K*-^d+8ui7f)^DK0Y`fPIN)z!gZ)WUf-+#nqV`jmLZ+^$@t{w z!Z_xl9V+=sWY>c(pH!b7y`~8ZFlfeqaD5U+k(4>=u`(Gr$?<2{jgK0J)Po$yVF=GL z6S~nDSNtjA1O369zGxK_w$sz6il4$G7TFBVc^Cch0T19`Rm!JT`Ks~>F`4M~C zBPzkeSK5vp&N=rV><>Tuey*}}#v##loKL`-KC@+c9H|_B20LXWDpJHsT@e@&Md*`Q z(6j3EQDwRuqsWz6Zr_`0VKnXFbn{p<*z_FLYJCQ^S+OWM#K^h=n|Y)<_RaDY$24-&`+PQWHfUA6vfP}ly1i3N7*&3W|3%TRvb&4#UzBe<<1P%4C0*J= zd#6KtNot)|W3g7YcGzhLifMhmi%hSxTb*r)zZ?Mu&#CNo3@<=k8YY(?2ccfdnnN;igHyj9*7jm$A&0lVHkl(VAPXW>XQyDzh z1NjfQh^w~8e%0o9?3ZJYpb4VKML~5)c`6A?RN_WV*&Jh_J_|V-S5jU~3h10IaPh3u zCQ`A@1)BUkJ9|EyJ^!THPvCjqB(}%I=uh<_@WZ>H*1m+$*DFTiZpsw+&sFSguYr{a z%9%B(UFbJczDldcY|V@1wr&iFQ9Vxs@UgB*TpUk2cJ!r^#|oe5#iMDreaT{!ur(wm zvzp#&g*qnLu|mzuXS6j$&)t2_-fQ+`tDdh(=Y~hNkZ5wF9BX`m+vAwKbEp?u><#zy z6pdHzPfu^-vr4 z=DMx$NcfVakPXk=<`5_!04DBooGqFwn}<4a@Fu)ln%sT)b?o4o{85|saz>nO5&!4D zaWeeLztWp}!anV!-DG@7H=7=GeRL`PPh!fC0;lVj>6-Z1o;865x_#}jcfqPMz5ar!k+f#B$1uM05uhue zXBlI--ib!lGCgk_-FVK?&51Kd&_ajz;A7qL&PT@QFW}tY`HU{;3r)LO#<1?_OHHkG5pTI z&;(hvBc6pSdJIji7-NE;pf5CYKGOy2mfqmSk{4?~x8e?VCfIo)LFYg#(FZZn)`a<3 z6KGAWk5oSvo!7q}>L>D8xz;menOrl;W;L5hw&vWiKl4bNQV;jJ=<9W#@zzG%a9dRx zj~jew=jb?5OGdo>`jp2xZ6$?H@S=M4N2sN42vZDrU9{sTWK~4tRk_ceeyDgn7h6v$ z7+3f3H^J2c17l3`H*nd=NiVC( z>_>)wsN*2M!~zp5*NNlL*yh3hYgV#Z#pYPlafcpzeD`%+h40}Dm%3@vW&1#rruLr6 z$BW_3hkL_6{JRf^-~DHw3?Du}9S&t9WRL}igG{t=d~9F#P}&SwaoqN-&p+YmFGTVf zLVoha(!_yPWSxs{$G48*>u>8z`k_|ebuQRvyGv(=m-2k`myd^k`a4H@QHs7ajBy94 z2}{OwTUyGiKeR2*FX-adEO8#f|HcwOOh_ib;J2n~f}g6{t~n6OOED;htX4>4Q9Ps5 z&M1pU8~Q|B##qdl+C#1{W4r;BFH8C6Pc2^{FPv?;VJt^IY{zw@^PzoF9R0NNmfW^` zk)`(aGJSnXXNlJ9QQuW=N!PTswwOR+;%S!i9#bg=9F@@}I=24>b6kqLY|REr11;YwU9>r8z)b8qrW#TJ zkK@E<&CrmG0y?5E#`50$_Z!c5m#2) z-+pI*`0iimTSb~k$J!^zm7MW~BF^Z1O63{Oeol(hacQ*d`+O5Dw3-@rkZhUQWehFp zIJoXSx<{3WcN<^kMUFYU0l_AMx%}ZVS-Gga&@Tm!W?DL-PJ&UY7?fi?bHK0?qMzmd;@$J%L%kyT7C+4ndH_BbWlanc zA6iR3$dqF?PpjfJ5nX<@D8~sV~PwXa}JlieeIV%c{aTH z`tkqE-kbecl4NIo9uXN!F12)3*V0wJvDs`cJxx-a83=lrd64)Z=ugz^ya{@dAP}M? zU?gEM#E2Y<1cqXBnHJgXz4pq?*faJ0&auz^xP|+@H=?qtDM*$X*UZlHonz+a?&sPy zHy^(CHaE>-IJ;s7_iA|Qo_4EH3b$jr38VInhlVnczE*plYk%Ko&ko}!aN5BPcm(SN z?5%0!PH{7AP-OYqTNT{i(GC{#rgyC=ziS1}r)N)fFFzICfZQ!I(I!&D1acCf+g`Jg zRrQ1l(Ql#Kj#b=&0x9*a+-jRLd>EUtN$?BcweDE8w2i;2xX>TtI1eR|TZeZboBC#s zfg;Dl0PToPgvQ``fi@N-5>o9PjqxD)2%LhJejzg*r^F2~$wZ%28$ke{f})d{&1Bp# z3v0X5B+(MGi6BFJ^_SvJu4X7ClO<}=Kb7rrYhbq4#FmHIJa+JBGR=ifV_t1O z;|E`mSRV9D4}~-C-2pz9xN-A*n8VF$4SdWRri7+rHrJ@N$u9SX22usKl<%Um1#)ED zw~_Dy3hvw6_+9<5{x(;t&Nf;2#$61YcL6{KquWw#_>%PKxCR5mVHeD5bJVZI7g%;j z0eM^B?&8L6+hd<SiTo!Bh-?CDU%*!3=$sie@Uc&)ToH zd8#FfKm5_TF5=II?|k!QxYmP-KR-JfP99tghngKvpBq3v+4P&I6X0L=rZ8 z*K=lY6~bSx9$W{G_4?;W_Ya5HAM6jm{q5(&x8GqGA*J|W1-aWCNbvy%9r|0pdo=v{ zpBxYW@WV4*6X;<6np|{K>c(-J)Th#T9;1Yhd7N-C`+E|tCH&5WT(l^wK~7%Kh2!It z7Z~iM?~&_Qwv>zJjIKBES(d2TILgy8SD9-{vL+qssg$&6Q)o(*Z5W?vfp8a(l-Y)vROkcN+iigZ%;A|%lUP=aEle6B5q2oW7?>&Aw`FB|Vl6F>yA|OjoZ4@FbM5DnjjBhPv?N#Q)^Q$6MQ_s2{y?sm zwN8&~x-p!aKFbBqNKLo#oiCB1=saPM4$ixmMJ4DwXyL0WoRRS?nI40P0Gr)H&J{rG zP1(cQ#s2VEUxH7ye)j390&eY&@#HK6?^9iSF2v(4sBG`2iRR(HzU03B+Wq01ZypTa z`pWa+^_Mu8wPZy0Xg%$oQt1y7`PJ$0@M`!MzyI>^-~OxX;Y_dT_IVfkt8t!b8I68_ znjmOY{3#2#_T|V`@-!2<%_og(5lsJOF~qBz3Ec1{Go~iQHN#3GxyYmhL1wQw9z!|f za>^xewApQf1URz#H_0V#>ho)Zu5j^g08IoCydzDZt9~E*C*k80y-i~|1{+2Ww4t ziMJ-Zgnb)YZ>!d8`8JryR)1@G6DGV~Hwd4@N3}KjrvYHPO~UJG<}Kx1n3*loeJu52 zh9=vEe3V){XZo@esf~;NLKxR0Fkfn{eE$a;!?Plx#wM@-(JX8u}oj#b;Zc|$u zUqDWt-Ue}!4S!z%&V;yB!pr3j@EEQA$yJ3PdJw8edu+l^OtgrK?OZB$F4TfXBx92Pvi z%PZoS7KyNYO5>Kx%X2LSh+MPSvE)OQ^NT(0AaWhM?wsfg?TLP6IuaJMlgO&ThyLc* zH@7R=0k%$fbiVe@{o%j(uU;Dd_5b*Ic>082sdUZ2RnEERd<&)>v^5~39i9cAA4h?X z=bwT}`oTIDq^^|Y=n>&5$7Q9_N0n0)O#ah<(YTP~!+k#zq7cF~JyU{=xRwR4CSyh3 zlX1NfAy2Mna801i8|?_Hq3sh4 z6ZGq*OL^>^=DxD21Y7?&6t&-KxgRqrn<#w7u>{e_Gw|X_OhT<>T;~>8mWIo5fP1@y z){98JpA+V0pWGMj64%%95;^6KeX|wDnf%C7#ihJC>70=Xv zCtughH6o9yMrWKyjVrmFRh;lNu5q;a8qZEd2hqMr{PwthVXc+~66=_LY~b_;{RyR&3##Jau=f z&AM*s8pU$AhmWqb<)|)N)4@hOucC(&WVQfxTCjhsr>Q3CvCF>Ytv-C}_ z_2JYqbufDx^cRsT?wXw{v(2VuoiE9+*;$e`%{sHd86z0i|H{{00Uy}0;D#=>tkDYb5g5ScAEm$5XTf|n`?jHnRWc$Fg zZ@$#WF@zD9!6@9~BgZN05%+Y#jO6jCsxQFV25v8D#q`lo8$bNv6rSdafyg&T-`SA7|xmDN4XM4kI$K0aig?uH{ee!sJ_z!g6DvY63LjE_8r zXf5aFHM75*lbolN+!DO`cz}Uqa;PbpkhS>i$3)s}HfG9`l69he4HLZsEo?qHq5m$mE6IIbW4Nxh zC1!R%txjNDN^xu?Lmy@Xl#iX)j28^|c_A$p>oH#xSRw9;U)7g%Zfk!R`6WBm56ZjX z^g6xXnjNtFWP9e#0%sF@rO#Bi6V5v@JKqnTO}?PBiC;~u$`*_&vnwug=vCcnw=dt2 zYyDxIUVdX8yAr6rx@RXDwBmYciYkdmY<#-t(!OH^bF9XC{Ul#N@3m zfj7s0K)P?{_>xy!S(m8)aZxi?*c5VJjA3%aYe_bi@ZjeIB`Iea$K+6X8`*@Ja^t6X z1Ww~b2W`}i>gm(V;nT-QTJJmzpXw)H)(kTn`&{es!20$6ko8r0Q>Ad z*E&xGKKX=Ma(ouHz<}3(-JrN>v8*F!$#^J-S?dU*6;K~(Y07;)Aoa?t_jF^^%(cEW zJbdYDc;&S->0+SyEiCYzJZ7RE4TGJ&j8ZDNEg{z!h|$hG5D+PI{F^`sgO@BHI zYhY!6rd0Gb=-@TD>O*H?g5FRv&P|cNZ|L!DDb555%{__ZR(@j6I&Da~Yy)M}u-$^k zXe-B_ZiH_elP|RoOtlT_lO1#Cx%I1#^~|dp#;dd?xj61}oSx%LLAIf3l`aar6dIXo zsuDVrV9=s!nsV2Lj}IxIBduEAe1G|SYyjO?l|XtJNmkHSU0t#vqbALZq(&R4vUGigtJ7mhW&EA%GR>^OZk#q zkGrI|Ml+uCHMyRS%^aikQr1gNb^AzO6PK|2+I515-kv{;$A5y2Kcfzoikf*9XDqkz zg7|1FE|ftS#g+=_L_gr8bmB&+i~F@+`0(lDD?K!%ZRvQjUpK#dhb+d+^^ZUNkFuq8 zlM&r2%(b(?WG}^SElgs=Df`?v0xrzvOj1Fcsv(eimYs5UZF67kmwnARbJLF`{4~*y zomkZQ;&P=2row__Jht3O#m|cLn$OYpaHR2q5Wcy+#gvN8kwBJg{%?LT zV`+PHt$7U$>^Pru?^4S?S-wVRpJ%($?oHc$TRpa&drZVJ4=CfQ72N@;SaaK(TUKq= za@B%vOxtyCxvFcPX_g_cG~_G`ua>E=rCyG@eY8$*2YRK|a^)GzIDMXac^>idraZQK zxL3dA+~3z1(PJhZ81vbl2zK|5WSa*Ob*+gYyX_952OJ5oPxUa!nI3R_{3(I9g6pSx z=uv+ed~M_gHi7;2nuF?kHOkR;lc%ojgUtgaG?CKX`DWFX(&2cm4@MbhCmFO8F~F`jW>p zZLA#^(^EDe!ZHg3Y16K0+wt8M2RZxF=f=Qicx(J64D0qhw;#H3Eo=OxdM~>qgZ>(R zk8QjvZ~dBMr`NS@>%!kDaE=jc?UozdV3oZ@tNdD?$;zQfoFTg;S?jkc&=qmWVD~9DZ=^(R--L5 z;!1xNQ`m{b{pkIl^OGVs58q%CmE+R9$%{=G;^LT2L_6Y!mOjv(W0xY6CQ^9or(40z zvNb85<8-3LyFH{Vt}^;+Eh zy@$ga?|vdETA9w~7{}QJ4Y;tD4`y9n@NxtCOJ6>;OoW>v4?~iJ!1`)*39u8A_9Sf# zrjZMCX+u>gnKiqUM@cs~<*Hs&3%!)WT;fk!mbz=cY0S=fw(0z)#q8so0V+7zSN1=3 zvn`Y-Ok>!#>$cX{O|SWq3^bDSDqqXy+rD)+)sdqkBtU9UBm;%%K62I$^`whs2u;^ZwT}k^@9PCK54Cg2 zJ9=@<*S|$m8K+HCM(B&RzBj!dGtg_|ItCv9A_%ABwxK21iZ2Sc_JiAS zSI5@$tFN_eJJknh$W+>rj&;dWJU^}dMmi}KB50iQh{YQEGKj|b;VGP~2_Y8Dr@={Rq{AXWUhkZF|M z*9-%%yVlN4v*h+bwCp4>jj+lyHFB=(8n2Uo((cDE{JaB9&`pM$RNURfVI6h`&ik?5 z6@ecYf7TzKY9Cpuae>CLN~6*60TN!$SyV^C=0mha_G_DvG7RO#6nD3YlWh)*a;4Tr zivn%!Yqs%Q;qchzXYh4n&-;viM%K;t2S0eK_0kWA*Iv6GKKbb0@Y0u#hx-q8XO7lk zN1mtIaE3Ddc&YW9AAfK(JXV~2!QEOu-Mhz{X)P6z$=th|U>dr9zSRP3$~(vVZlS`%cmJVeX9SR%prO|!&lzXi*J61Wnu_`lZh_* zED81K>bOpDZBj>c$;WN!7r8+jMkG;&24^^JBZCbX+SgKpL z*EhCxDz7PZn+J2Gknf%kGDXx#x#m>A<$1Y?v-np=yXsO2QuWC8O zPXlOn9{9R>1D!yLz-}Q$J2{uS3mpBZgF3?>eT1&RutS-IjlfW_r&*aRtuJKzRc=0c zLu1V$HxtQf?Q`st)PKmoARNazGBx9 zmXh$MH}vR0jt)P|a>I;iG@_13aiAnXJ$-=eVNq!z{(?R@PSJ%pw~!{jJdj&H-<_ip zW4HO7GZLiiTV~0PNE5GVk2AX9|bG?_h zEOIWByB@^$VGQ$(Ofu}nnEMoyX4Rsfze-nm4Cwb(f;-o}}% zj{JkTEbUw-pw`1Wrc4ZrqV&-8k8e#wGQE98ru@v#bmKKj1y>z146*(=dF z`XYO66!9?~2`Vhn%wOX5ep+^kUVUD|vTn_*TrF1_+gjsS*4NH9UmgG%y zUiGoj*P)S+Uk;qx#URD<(+*S`Wk5z=o_!%wov{O>To1CLZhYfe6fAF@W!hS9jGo_P zZsTwAe-oB@&bQg=`M0gRzc<;lOvQHl*7TNitY38~ugO+9bo;vT`rLRuw&_9&tl)#o zC8=HPz_J|qQVMEZCl7{SdHZ_!zkjBR+M)0&F6Z0~@)9a60NPj&|08Ig&V$j1xadkV z6s?`sLysG0K&9?9Q;GkX>@RiQZ$xK?g`YUJ_K-c2HIozm=;rf`t)=4WKW1Oh$cuf0 zingTpiH_UZI!k1pc+w8BsEIMPFWa6NXfUJXa4-<85|t2;Ov*=#8!lON9XxbeKc*y~ zt$$R?Mq*2JSo&l3_k;H@^>Ub_;VW+)44;0ouh}WBQM|7;-yuXM!SE+Nc<<-;Vy1b} z+Rc65WTAcw(;?pi1+@q1oDtG-SK}@eaWfWxqfR0Acf}V0fI*!XC+HbQ1)_$#|^$)4+mq|9pJF*SKm6Dj} z^?TlU967*oWR(YmZO%1pYs%Z0sbWD&I^;ZqDdiQyyK+DYo3hs?w6o5q+p98PR`+8I z%V)!V)dMk3Q-1y7+~E&@58}FbeRZr^X`Qd}#zq3^46vESeoAm14?VJUgkXA40kQ)3 z*Y>sK>tJ~Mond(W%izfeE`E98POGhN-FEvJ%=WQFoA?^Hy5?jz<9l9TpL0>)G1>Mm z;#JmV{Oi*a&GNnMwtCfD!bQKzxK6$#kF0r1{9vx=R$gz@SZnmEyHtj@95`Eghun<| z(Q@Jjy&S0Qf*k-mkr{sB(fBo95sTuI95i=eFaQ7c`nGoU{o%gYrf0t8-A|^wew)0< z>UFpAk)MX${05C~O?|!cC=(EAROpqq!|-qaLT~ocLooLk!;F;>(1#$Sy`ok0v4$Wr zA)gZRDv8i)5;=Jg@jdeZu$+iN21KYv(8pXXXU4YYTYb9dM`RL$4t2G7NMZBXiy@BF^)a;Y&pl=qgZFIhv%PIF^4XV2&4L5MUE%E1-^DM0k0O{+P)oxQ>}_ znR&;7>})J4FhiCi2#%X2Fg^C<1fB3K%tre z3p|fCr~xm%nx{s}MgImafF$RwCPj~f=-)ixVZ-#4%ui^x-{+QF(lc*Kr>9W|quR2* z^J=4(d#p7a%PryAzIo);hUt}7bt<-HDh>U@$86E5bAjt1OF0mV>y^Ttn0eOeLD0;XQk{k|;~XzNiEF36 zq=wFVpHzQp+wA{xp1$UlB7MTI`RHHPG}V5&zLwi3_w}xN#=DKYZJ@iPSNT=OeYm7! zKdOACEtPvaRR$YteAD~9>en)Iq}9OL#;Ve-%NSu0k~f35#`v5RwjHp~$?Pu$)`P;v z`?mX7_jJqm`6hV}tW8*!>2a6pOE{G_mfHoA@y2E8Zb=tYZ58w-{Y>}k?_Ljo`ghC< z5Uglzo-VS1%y4et*xtD5zpFVCrf&)Oy-81R4nJM6Y9NV+zZ@O6>s~w^;H>`QT7~862F4Opg%_SLV zZa+rGZV94z>IW_>Pe0{}e$601I@DU{z2VJw3E-?rp8flgUZws_4-&@1e(K3sCczE1 zS)s{eQaP*nnbs{Z<=9U_(Wrpeg4SfvHvQa;3hhyTdBHLbZqUl2QNfhDj-ev#2&`Ee z!t0$?KhWKsnQQ&epjxxq8GLIzNG{iAYn7>F^TjdXX5Yk&bvW2;;0C<*#VOl9I?|eg zE-lV}%XaXYy&wosqPbb-Q@qyhK2Lr6p-4x^P4&K%W5QqVgwT|oBacj<;HPde)`ZsR zOBm3MUu_z{*QxTAXL;wn53V~mj^!(s%T*S+rBwB68CmN)hh`jTq&^Q~`7qylg*7W~ z;u~Y8P%7K_ajr%4Ij``$eW8r8t2Ss-feBsKX)`n@Jjp_6do#zN*L6RjVjNHXQQWyv z+|)J;BQx5iZ{y2nyO)H;v}*YmneXD?ZS2vG9<#6acx(JMdevW(xr-ka*Jbd1Y;4Q9 zWolk!mjmaOld*#AT4q?}skp1gKR!7+lN%I?kE`ssp@U17#~%O}2`KD(6^SNF~><<59Ofd?orJk^yulATYVSywX0S%&;h$ z;^#{F&aC8vM{2__v}6M{V@NmAAXQv$IG~#wmZF;Ei9Y(#q)QKkHn=ldv(pggnRpZO zI@aX|q*-J_yL0_4`$Svx9qXk9{4~o_mOXvf$*_+ASvC}8vy_9MOLNU8 zUqJ|9GcCEBV{nuRm&SI+adH2J3Gs27P&IwazSgxP=whMM+Lb>A^LUkHBjru$LdW$| z>pA1Tt|bgSn8h_G8$pW*4)_8kipB!#Gjp9a%Rvs}ye9Jq!B6M}E z7)L@QvdEw8sbi<*(eL`8zoX;vfIbr+=OWPe1E_u(Zc6wgbtL4pdK3_Nk7>lXN7}nQG98lx<)3}5e120EU;P~vz_v=as9J)%w6?+y{-PliDU13?c3LFkDTNVTte#CTw8ZZw$l28 zRCz9MvTGUV6|a`z*SgyTeXR^{EmfT*nV#>u^CcbAEK~h8Zz+%Urb~=M1B(Q<%Ojh^ zEA~DYTm=ZPzIrwMm%q0+{J|ez3{SLW*@1$W&;cXH0)a>yXVN90KtF<~sAH(nf#xEo z;EjY{l-igIJn=yb-;klKzevb6Q~b2Z%rY~lUwd6-{k#~0&9Aq=wm6EHGajjT-EC_sl&Gq)>)D> z&+@$w#;EqHOwu_%fzv|A^^J8wm zYL4-=d9$CJW09qeMv-W~bu!N!&L!4q?}F1~;!EwD%6IuTxW?M1)Azw$Y*}Y5uRhdr zm0$Ct%JjO_YjT?_t(U31TD}oDn`xuF0$Fb@cSG0smA*-K$shX~7`1ciSyBz$qP%2p zsl$QnW&8TfZ1HUkZzg|>O2wt4GdO9vNF4SyR>{6q+t z7{(>mP%IQaBb&pZ1sM&4=le3bK&uo2wprz)L%je+FD`iZO~OKgKe(yh!Xpx4lV2%CEFFp3khsXH~J?;Cg0Kqc+m1QkE3+-B}`ul6t~(Q z%4vg#?Py8{eL1!+w6y0L@5xcW@CA+~NLJ4sL-<0b(WEb82l^F*06SiPeta)qk*=Wn zP>Ze)v~>vE^TqmXMcI2SC(!{S;7VhL zKW%}J3zMBZa!Csiv~TEFJWC~3G;YMR+=O3kWqI-`k*wpq$M5AUpYo)aN2AiJAJF?c zG}45TWw))$yKH})CxSn-KU{NPqMN@YnUivS=CS9vwChD2amzu^F&NA87*}1gU*v8_ zPjkIfE!Fgdw+50XtZ6Km=Da9(Iz4B(?snPdWvS}R{5VxVD3ici!ZQuIegbh9yHy4( z+pgs*L)kVdTb{C$`R4U?%hlX6(9k16Gu>D>0_R@X^+2c}0uyqjY%H(hENI=;B+qme%rMdSt6$NY z*;k$q?|-^KJl3{HPc9CJQw1O_4>;!)wVLEOQ^0n~o}>z_FLhmHsQ{D9%r3;Q?5c2E z=WHRqqJ4S0&T}YUy&B_ziz@L z%rIvd4ZPX0zIE~YLfcZY%z>o~EMtiz!R%)}ItNMm*$)N>a7wj?4L2pNi!cVq#aCz+ zoS?c=MV|V??adq-S#YMQmqS!`yknZ4FaGEVMkZ0cc69(+`w z@JxTtpR>QVjEwzd-_q3d=?2LYiA3LsM6jPrN%Bj9&kqi01lIIW95iq#XSbw&3$i1C zo-F0BOVr)x=@V_9`f!q8DY(Kc%1|Q5(aHUY?YPrYl)wl7HU&Gr) z7yTtUXimnjH1no>^F?pT#-uwhAAq3x~xP{e5t3sE;ThA+a6u+ zE12FJp70u4#T-wyng0_7)@P#emOp~)3(2Cx-k>}v#wmB5k%21NfBbw;3kVVRCBLV2 z^|bpyPVDIg9ry2RIl}#e;jNdR55M+K&OqC1&go4sZV2O}ZuvMK3V*7RAb-SJ#%IJLf1HiCJsZ_BHnmaXwtD(q z>zn+jn7hhVoq1Vgg$rehzhgFMOfs$a{N z-!%u%*yy-yJH}a~o1cO6!bw{}EnM?O=j+v?2`|c3pwCe5JK;7^eMj9!_luTwAAVjw zZ^K>V+poSqs(dZ?a@23jM;^ONiCL*lc;;E(^(8FgGZsPcitBtaM+RtlkJUq2S}Jj% zpyGjEn8FRw0ZCvIsPV_lv(_SCC|EwzQh_sV-p_3HV+Gg**^d=RoGFetjeuK$xMT^g z^{;xkj z96oqY#E6UX4xcijg>1Hv_TaPor0)ykCLKlkmc8!!%aeWsZN4UA$8|$OboyxhCqnv! z1^p4ZHGrv~CatA*(QmCQN53ZxO414efZdWTsl_SU04i28+NT&H+1$&qgE!4tD>|2S zhN|L6G}Q;x8T$vd zo8!*y!%^_t7+3y8OK_f?>vhi83dF_R*N!sxv{Zz>%nxX0zuHKi`jL)MTC-WPLa@oYyp~rz+pzty@_)wDpj68;l99mW}NkzEiJ@Ud6WD z60P!#Q*D@LIp;YHOlH)dv=LFpZvFvzsEQOblh$gjggz==H z{5oc$wM7}hwO&8_RKfJ=`F_k;pK7+6mu0-H-7{W$Su@{y%hr*0i%^W9#Ox|o6f{$9 ziFJGr?%x~!(|`GV_}71+nQ!ef!B6ekF4~mRl++-+YCH%x)6`dBgo=ddizcf)QQ!qy z$idB?NLlRjXS~ocMuQ?VzmRq-YB>N806x-WX zBw_l574D7VBPE4`-uknZ$M%gn?k5pQt$(sYz1av$4PWDq@r4GVYp%N-HKKoVEvMuI zHa=S05)&j+a;93d5WM`NCkeiMF-}#apF&AA`YpOJNXSvhV@SoQq12RvrVloHLHShg zQvUv5{&e{6Z+%4%ZXRoLo2^bxH0Dp!)`Wt>cqRXI(HiU%J>Yn(HQD@eejMpb<%#z5J>rdy@+E?51=sh~x3R>8EgaQ8 zjTQyivHRd5BZ2x$7%ACp1x0QfofUv`aCsdIUJefO-J86(=sQJ zM=y1ndJ~C8McMpGmvK2^f^8oyl~!&cSWu;rZ3UVBd@+9 z%U`9f*{T?Q**2?g#f2W%J}HJxz8h{yRlgUl{=#$J`6k^>JnL6X+wOTiPS3aAE_uZ= z4mos^X~wGbs%t*^8XcaoYi>HZb)7>aje&Dkcl)%F-K0~hZ}1vMFSpIcOn$nGQ;JRzTq!7Jzs$028YI2=+#&2*dG&7Fo_59T#kBB^8*p}-DpV#NCBh5#0hG1 zX`&JVAMmHM@I-ogb2M=JV^2{o;^=X6*Po*B40c2hi|~+|r|iS4;cMT#H~fp=e?I&l z|7L&q_(P#E8_ygf2Eb;KEJ;GVE#bMYM70OKKaV}aBQ`{ zOSWXdXg)&k;s zB^(SNJ#^ET$?w#Xx|A{;thVT(OranBqq+_XFVq2r6l029|H3B;+fK*`B=tjFrV;8AYR z$1%{hP+Du9uQ<#}Wgkmw)n4M@vw&MN4!O(bIj{22sONd%ez4?} zFX>F&d8x;@vnY)1 z`e8%8{B)Z3oX=2i9ibli3?QGM>RMmCD2v8-zJyx`(k8hjT>#8LGrwqe(OJXl>Bg=; z)^acJI!8N>IODUWQZ#8b_la^-VApXp*)^8iauaiY(wcAiaqBS!g0r@d5oX%jwcYFHT{95a=S(B}$BU&e@hCfx!GWVsO#CV)biWtprua>zt%6$h!dWD3WmnyIO# z5)~!!++P{w@sJun;a?4Je&sM{`YoEyM_{zCfJh>dg5B&l(MHDyC^CW%>1g&xvm1~h zjR2`4BxP!Q)>A6wN-N7TzKLO^Ies`bfx0wPCtyph8QHRtr|gw^TkOIAwp1&(S7YK!WYflR*&GCnQ(&Y!{MHS=bWKdt^nI_ zY&?$OS~9HD)*mt_>?RVk*Peur z@*cxFYxFgK@0;Z|bg{{a-roQ=VyAD@yVWYE#-LfEM5$9 zrX?MxD)U|td>(KDf?mqV4vFS>u1A{|Ka1|wJ$v!4hd*AE}!k4 zdl*C)0m1Q&k0tH)Qv!{hT;1N*3r@8|IRj_Tj!lUJRAdb9M$A+<98BvWUmcn0cNJ_@ zy~sJF zcHMaSWoG3h9hfZC(5+bk@3n%9E4@7ALIL)<){>rUZ{#xt(@*1XZ+QIa^Wox@xAX05 zD?VnfSstNVwe(nt5&a%tV$zeCYf}T-(ZXQ6W#mo6{<=No5@VL+xyejNbVL& z^>Pgi9+)MW?L3X##3?FKq+QmDUWrNvj^vD_3U!Wy8fX0gj1RXpDHA`GCb3bLLf_&^ zEIE?{ln6{8KRMTKGgrgQT5G-+Z|BUw5KSMi=vUgPar@_gb2|L=1MS7FfE53E=#xMg zZ#hUYW~c=l0sW5w#JNFWEIGxMf!W4nzOX2RWfT-@hY+H~o8-^*1@lbX;GF6tc>alq z`Xz=GGuL56_$=Sh-_g<0@amV2hqu4-((vv(FAtwQJRTmL9}Vw3JRe>@*4~xaV7Ud( zBux4fi232M6t1Juz63;_^H^TVnmF`qj_rgKO>4Ddc3Tp8UeGV#@`3HBKoG8|xY6>; zOa3HZ)S6E6>>JqmC;Li0mTC34x*Q4%A3eWf_Pk!*d~{syWj57&9qQG-b$k52j$GC2 zaeVCxxj05f{>PQO4ZZ)>%T2s0XDnzg_j#4SO&J{gaB|uB&@0X5Ufy-|j9D?@yY76q zr5eS#3Sxt^T(VPBW}G`!j76jEw1N7LfNmLb^G&nfTK=Nts-0c2tIn?FrM|Il&s+1O z(yi0?pXp%NvTfZp--NZq!&m5}^xLDv_1k;WW6trjy{1)k_gm#vnsJ?1-EC!pWo}S3 z)%e?SBpmlmKDgnM=pX(xTdW|0<;~0t$Bb1(j@(ft%}>!%Q4Bgf(pq6|%Apo`s;Yji ztwPxMmw=gv@y<>aSU=XA`?Lo2iT1iaeWIO16j(ofqSuw5DiD|4wRQ^ORd$*8LyNj`9;IXuzBklHHn`2OYa+y7KA=FrdT8Xm&UF_P;^ zF{U=XdOduk_1|*L)m@4Nq$WRdOpw4`jBg2OUAR0kWI6U zaa~`M?R9z_Fsh8}y9Lg+`~vgp%tqKYvUZhsXF1=9y#sSsyK7LNReue)(l^=I#H$#! zyrxs>wY-aNm2ugFuwt3!yz;E)+`Jefana0mx_OcIMb*-qbK6^zg13}xA6_jlVXa|Q zUCV$)xz>@ZH0sMq7njjpE;Qf}u|nj6ty^rwk$TX;U*dkGWtK$imimtNi*UVB~pT7UOgJ2&JT>hOz{#{__E z>w^c+hKG+1hL1l&TU32yAFJN%pa*lR!b3k(9GfVn4bwAEJ54goM$FKJ!s-NER2xz1 zAs^0Y3`in9ZT>(nzF=VTY=XOKB9ZbFIdCTVpmg<)n=06rgMkzBR+{@d*G(@OULSt& zczE~SSJdZxNmF1YESltT9)9Rs%_sWu_2Zvw##e;tq15EguyJIT@K1!+f5F0-fU@r! zUyL{pM8!6CNY7d8L9k7xlouP8@fw4eUyV`A^&d*6F|c>R^5;b%{d zhKE|xbbO$nZ&CxukR|uIJbpRmORl~keDv}4@YjEFIehfNwU$t^bcA);$HR$s5job@ zmpuH)`s*XD(`Nj$EoKDNlzG5WIq^OZ9J5s)Yp!Fhw`log(ya||f)*v3*xF57TPw3G z!~Z8ukvNZ9IgUYfE9|9-==up(M7|uDm&Y&r zH)-|LHpoe?SD7Wc<$JmGGqKzbWL!qiHk=zHPFwidxgrZEpk|G;_-fw!*4KN*o?qM0 z%P!S>{UsUGmav!fdpTs>hsGkW^d%h2R-UnqS9w+5a;B~2OZv$E(gNpAj=IrnGB1+8 zYX`nan_mPL2Yg#Un(x8U_l4=bzr9?~|19-2n`@YE!y5lKbhoQw*76dTb>P)vJs&;RQ3-3c^{NoNr`Q@Gg67 zJNEjU!|>Kuo)2ID*1h4&Z^^N?4ts3w*n)JLhq+#VL$l+bYAK2W^xDq6r3rYX;`7+p zpu6s%q7hSW3~1xlIBC)!Iu|o3SBDTBL&>mL{SwUl#P<-75Bor#DQ%~2ge}HTK)X0f zBRMBZF#m^4a+~d@bu+^fGVL$iu})MPZ$ZuBFhq1FI)9l>9qRoYu=VFtWbFtN<1o+bU z;~#&dWl$%>!{2;eUncj5_fEBJ<0S%T=AD;Po-YgtzHq+x-m~Ek|NHlbzx~T|EwAEB zWWG@5Kmk3=tax)Me}~#7t#XsQn>cS?-jX!4HwNxbfCI%_JPCqhIiM*S?qU4ZntwMwYWj;(l6On^C z>k`Rx0leBH?U+OtL8m^Y48W=5YwG2-3L3^U`T8}TZ%LnYurHKq+R9y?$9cl8Y~xiL zEqK=Z`PM=Y{LcxJ*SG%>ze1>I(@#z z@9}!Qo(~SGr$ehe_|&%s&K5MNL8yNo=`Q%_EHxPaHu2!E>A_n{+whijD(x=%ch#wO zU9MPt+xxn8`g}>Q;?{DnTkGKLDqHnjMmG}|YD&Uul-p+ZB~Y4QeYwZ9edq99c5a=D zZ(1*7dN1obISIWrU1N}2uP>*JZi%g(#&Vwg-9KY%HuZh_#}QyZ<-EzpjW?#ap=SWb zOxclsc7FJPn}_J!q~oS|B@BL^Q_!GU=PO0Zm)g_%T(8qU(^8PLCwgd1ameult$Y?ajU6=Reh62<#*h9^e-#$B%Cy*4BIvALs!`J>+<#^3f45 z3sQis{jQI+A0~gtqVW)I{OODE{=t<3ZGvps(4W6r=JJc|(+mJ3AmhCgyKiUwMX~4RTmEW7I@;$G|uwCO-UDK8@HqokHEpO7f zJ5RTO@lrvM(~u$xrNUi`nU*YZ<5VXGecW|GG)VZ$vXB$aiXH1F{^+5w^~S^qutR_w zv;7#1%6xt&xVT=hmbw`1yPdeB^+T$LZLN-S%i_ z`ee$Khpi2KOWHTg;7jzKK3EQPd7-!cMIfdZnINDv$|IxW_o;q$;5ZWOj441;b~ct9 zeUwunY_`18N5+h(3SZosFCk;TXl+!?a{LW(>maYO#|-Yd9)SF-zxw&`rGNHSz4+*0 z`0(s#_{xJ*!Qxe>slBQT>r#8_{ZIehd&9>cXa-n2h$xH|S<8+TY{r9u%}#{MYA;_@ zX*RROd`Sf_Uz8&&ir&xl>&?^jQ20F8Ai6~P@^PTGR)^YVk0n<}3bK!nk0Q`!2Ku<| z9>V_D1li1LpXdR|SQesyT1D+nLvXIZK|%HLv3@;Z<~r)~L=P+O6I?TCjeYfN2H5Uj z(ehLk4~IBsB@^c@I<$p8%D$QoZFT^%AwMw?ZaUH*1NVsrKfl;hasrQjbUpLLraT?1 zxkQ!gl$+peY|fO039Q;2XE0?sknNobMg<)FAvurA4j-0xzJp4L*k+#_$8>~E>#>*U zOSFmsE`OC)ahL2`9=XbQx$=$G`$4_RyWY$9dgv?$&OLyEUS!@ob60FP{I>eNed9Rq z@z&_Q+_rjecbojKc6XIqv)3QRZQHfx#~Qubx*-$iTwVy{{ELgyB-OWV;!Gv* zYqGscuVrNWdevXzulZ5wwcO*~wZ4YA&Hvr)~^Doo51==OGQ|FP5Y?F5_jIe-!mHaHz-&6k~Kd*O25m-qapY)M7p)})xW zr}`IB+GcYbf62$Hv#G7{s%*ttqg%%OiUF~$6Wvg5-Q!l&o zOI>eya~&Hs*-lTdGA<)q<@$P+qg-VvBfqB8%NuLgdpDIpQ**j_5dXO&7b4V!G1kOU)%5qS5IW{!DighHqV zCP5%GlJqZoY4gP>L1*rTH60s8RRoRsX2A(BK*xTGT9Wvz{Sm%|;S^GlT3C!PU#_=? zrPOYps2DnMIcLFcfsn=(>$rdM{?p+lZTIq2ap}2!(a;2Lrq*D$n{oChfAmDll6aU; z(BU5j2(Gl9WVAO8HrE4Zbc3nn=1zslRO^c5l2T6!P)z_ScDt$`Q=A=;KmjMFl zi!-*fWX4*Ju*0HYhk%+dLK$GQ9b_&C;n##CmWA+=8TPtX9(!T)_C_9PWNC=jV8dqy zn}->Xj#zW8@e%8)l zM6A$2n3Tsfm9y+VOTIswZC5wuc@88dg4DCWH>&*-m{Xs8zQ*ZO+j73E#U=Tk*4KB% zwp{fOTCHnmc@GwgheWA-e#Pa%`1#nQ6YpxTcG&0R-Gus&$<|$bc@a9d$CxfkT*`7t zGp^gsY-x;7UA|hv52sp&*4KBD>vj72Cfsd2+h59U$GK%b%Q?~elMnE?zv%bn^wHkBJS>#qA0Yd0ms%{YQ&2hj1xMXk5i zPwW>8d@i*c$hEfSyHfp1ds?%Wm*AR!8-Ms2Jx%IHp?};ipJ{C6O`f0^HhId^1WE*s z!4p3dg(kz;Cip~0S^jLVVw4jbozOsqLQ8?Ia`I9(^-|w*nTJ&1;*WLbGF`2+XXE>< z16I&{sfRrya29kwCTY`4+HX7F09dU>f0ag5ljS^Z9ap)N4H}6+%cjB#ZbfMw#Bt(k zi9xR+u5h#r+M{D%H(QvLnzAbq$_w=f~j+anQxg*Khke}$L__&mDa6u zZ)N6MzG&=+KeR>vxdszlA0FhoYLYPv?X&K0XD(( zRqzg&$=0%voVnJFwU&zTr`-c(hb1DiEnC?aG?IF2m)cL?=C#AeL1$s~1+8k=E7cj7 zc^vTl1*X;?@r4jS=^UopqmN#U9nK|RAC2hqtEZd4DZh)(5?=Ld3A6HRc?k>JHW~Oe z*{12WPT6=aqwBJH6_4`ti%!p}bw*?7Ol9A->;|B}=HNE$RlOa2Uec{J%B1S2{i$-M zk+0F=E#W|~sh8P=*YoZE#WDD2K2GGncN_=`Y0C5#mBG z7hz|IeWq6t(3%*E=bSbb^a4K9QV9aX}}9geb)#8q^zo z1Qo%^oJX~MK%eY<^2uogmHT?T-EEQnyX&rYQJH1Kro@Tpw?jV9}EeYYxjVuW{;1)1)k@JP~ zyNn}INa5$Cn8XdAOY+&bSX7@y*Ki(p=Vr52p_lxre3!A=rzM(?>k_ZmU#jowk9BQ_ zyz2IJVtQ+7-SaNVh zbVgp)>wNAzrFLWmta!CtWhk4sgtJ8J^>0((70*811-sYn>lJfXf2t1Us^_xvUUpL* z9n$0@H=-bjNa4jK+i|`N>L%IA-$X)ojkXD==hb>G@8a(lA%jnSJ2%;_vbDTPXA^Jg zlME7iS%y;v&{NW38KU+yM2nGCwWr6&l)}z?h6xa0r6+ zr+|UrI(H3GAg$T!OT7l2J**?JR&dSCwU>>AoZ=0BI%nn&zamI)Ggu6FeAs73O=Z=l zQ$Vz6`*==Juv`>TKt%~Z)qeTE)aUfkm{K(R1f5ogZy&NOUMOk3tiE*chd;*(bl4g& z6)Q0Ps#jOPtXW4*C0-9#>NWWIT;PFCx9sP@O0G`Bh z_Z?=6<%{}&UuVwLZ>Q(lK}7ql=Z@z3KgxC2 z3aSrT5~3OGdkVhU`}+7kK{m76EDs^**B4>}Y_{qF$-^Z)DQ?8-WPjOJchZaS!=38gn6+Yj(M{>YCh=F3)iha{a~UOKqxF+`c7$tL{>{(kkZvUtaHl zrGB>b(s=dpL59nmQ_>ko%nEr-aYM`DML0kzD(xJB;&GROc1l5KRw;vOCV!b71Me6t z-K;4;*UTiraR$~3y$P@tNU-)b_OMp)e68S`+3HvlqJWz+Ylc~)o${#zo?l8sId?En z9==1ECs~~lXyKd!TU5YWHd7eoZ)&9+KdbZeJP(iU4G$hRs_^=;PN4ky%lGsWk-gy; zAA2?qkDL~~GrO8Mjg~|ZCw=(*MS!G?>r!-(v7>ntso~YJMc{X%zuNU}G$}Y{RnN6= zHM>%99N{&%G)CL?@1qa-rGNli65$tl$|E?pWrq;0oTOu8Q}Sgv+7p7QVS4JCW;DiX z!B??W<%DEH%(Cz%zX+C*#oqW&Wzej4z?Vr0#j#Kygf{)49ZBNLm447k`#g??CbQKV z-}`|9yn$|+XCK{CX>@V& ze+l6(*h@M~v?Y5zZB6gC^oqNtiwwM_;{t8tQjO@w7_2$?tHHaL!x@x0>2^#wx~YMi z_-l>d(ezfa&x*gvcN^c-cFQc~yI@;yN!B#ymc31mE@`a|OZ3~?!vq)HFcs!DUjT07 zzh!svskUXU-KM)wYTdSW&9P^{J`2vS_H4`Yw(mOm8m-D~D)%-v$;QoxW3PVT&uIz6 z{3TxMfuWmOTx)X7f}icl8)ghJro~7y$E`1lD#yA|0tIM6&heOTLEx{^P>LXlj3665 zmM=6__>$*m<$V1!eA+1t$;Xlq1=jlYJKnfReyKo>wbn6n&Fr<-ix6nD);b@46oy=< zJp^*(;6=aSKNg!$%~hOeDYz+Pl5(i&u}jf#(Nn&i`Y}t$Okn)fuANNoYkR@aj4XA7 z0Gj+d(NFDPfBR^7?~`*GA&6=6O(^5E)cugW_9>zcp~$z8TaGyj5YM=EAo{} z;;B3u4&>$!WF%OBjST(C!;C6&Zs??lFQGvYt@%Jx*foST(f2hb-+1HE@YS!sGQ9ET z{o%2e+_dRz4~Q*@=CuA#x_~=D}F7TUvv8fZJVt1mgFjqv7Glj<5WJF zwZ7I*)wXrev%JgXq)EPBfVg;3caG$gD%M2ZUfR@7YgeC5BcJ@7sN4Lg{I#;}IiJu! zpHklkOLk4GKKI^KnV#3%fRDWCQ&~65NE~Z|*}Q2%>Se9t9GNNUMlA?0)qw5*GHKX` z%FQZ+W^jU%4E@}iGTiX9-NBxH7!zV@H$TVaAY#Z)jN0&rQbf38%R*0)7(aDmnxCA( zO?RLnK~8Y2KgFCeYfWGs4=%E%ng_ZFu(c3E^ef7m!pqrhbg~?K|F-tIO*vyG!$T1I`^e1^qVX{5P0|M>Hg{p9r<4**~ zpp~UWjK^{OhhX}cHb-Z|>bSNlEXam}?59sJhaddlz2PT6{b=~!Z@)Wy>szl5AN_)z zbWVo1zp9*m{r=}W$> z$ynC8FRWRQtFBqI+;}tM=UA!s*2+ufElc0Hj9e`*>D`598_t(7%)>tAZE49*>#ku{ zooZvbj1NA}lRvgm{j2(wp0ZGCP*5e7%f_?6bVptyeXP=-NiJ^&mIu|mzHEz@b#6J= z$$c2e9n|Nt5Pd2d;f;#4?MUmbgB~+;fuxVan9RLW#2gPa5=3iu`YL_`XXgv8yS~_u z8SI#~X7`W?#uaqLj9z|;h$SJaXMB|H=59fM@P*<;+m5~Z>cipDBewrz-6;=*`AZMJ zkf!@f?hdj)eCMn8hOfVQZ}_{PD0tRe^!ByqFJ@9Y?X_wWgCQJsM$jo{NgV8@bOe%l zutHBqew3@_VEUuE2cHzjz;-MX_)SnWc4`^4w)sNeRF2uYEo}PS$??V)fd|@Af@8<* zXvhSFar%B5jYa=PzcoLqT8;~uKruE=GC6DE3Rctqh9S<-?}~D`sQ1Ar@-0DOSyrT zcz347i8OUEIBwu=F>8ly(%n_AV(g+*JG|irkgas< znr~gxo%cE^uQ4-Ti{|6UdY@O0pZ2-8vDSuOuCMocn{4#Dx2fB2=S#Zg^*&YEZT?mM z8cvm|*nOGnFPDiv85e(DwoG5Ibhpnm@}5?GyNP9cQ@~=ZB|nXS6USON^SM7Q_zJPw z5tM6P?I1*mo3v(ZqhHXKpSt=VDTsZp7gC%M%t-TD%!Dz^sh_?Hsuj>N8_h$CJn)vw zLo{pMmV+>Z%@%VP3O;hpH71xf9<+{hU+YK@hlh{!s%NbsS*)oP9k$#bK6BD#IP~LgbL_fE!)63uYX`f zA0ylJ_w{qYm8SHsw4=&>J$y!*a0N)e>yQ3<&OXTvpRodB_R$Tafs-HbbYNt!mx7;t zisRsA69PNNfi;xtKYpb=(&(5^y(I(yhy-7)#?8^!##+e6Ka)AgG7cKdpjtz~19FX^oL21d_Y(yz8^`7;O3edkp0JC|c{&$V}LRNonMlrO%v z&F|azyGeEvZ<`PBt?%4AuDj=`FD1)6x1RH=Q|+1F;~J;(d)mZvi7QcFje1$zwB(D< zd;cr`8t*nf_3f$nvog)^4$*Dw%&=}*S!?I6Fm9vsUG#1vy9v)Yp{Ya_^JuWAD845+=PT0dp#A;8=sjEA%^;QlENLGDxhq^)HZ()rd` z_J@D|o5#Z+{iOnPR&6jfF8`trQu!ld`&NJez}w4VvBV{THV6vAP5$rl)-u7Fe52;q(FL0_L)Pw<7$fUyq? zd`L7#DWvp~$EQ(L$2QL>msteQ-Ry@%H(wKLT*!15;yL zJATsJcny~rFWBt_TXj5kKA1%eEb|L_V{uI<`o7`yzOUKnv6kd|ESy`CgKk;#dwgV^ z)_A+<8iU+6E6o_D^>yf`kw@T6VF}D2R*K`?4(2X8RsXK#UF@uNXtlYEOqHj+i;iWS z_ZZviZXfv3-tYsL7wp(+zZjxI*ZaI$OaeUJbex;vz?dC?D0T2Abq$%B+M|;0JUzJ%5&K{POizh z0hZl0@9~!ODt0Yb*`DsUIj^!VqqB*R+?vcLy`I9s?OtrCZN=<+3+fUPIEte&>=OQKl=2dzVc8?iA3m?eUb}opF zq$=C%)Vgg~c|a`Vd;~6qNk{GNF2oh1sN5(TW0Re#Wcf;~W#d?myr#Zh<*QsRSD9Xa zNe&u1y$_}(hW(m4&UR{NrYvJKXAR9Ww(aL>XBkr&kCX0%G4Uy--RZowA7G{(e4YHQ z>-zoXKx1TgV-sYeg!DM;iQ%%_koEKffAh_S9_wwkG=!kpr!WvpI>LwsVCc#OzHm-v z-z7JFNgY$1kL_g{gJxl6C4%SQ|L6CIUw!9z`2LTsh9CXna`@!@QcFYfb?8^xuaC9a zv^QRqLV$_u(ZsMWc#!f-N5<}H-+hFA^;C2%9=D?W2LxHTA)zvu`joQ!D+rB-8TJ)^ zpqF=iN&5zW{q6SUL-Out9)=vWg&_MKzxDQ^mTMdjAAfu)3tI22FDTJ}Dy5eK%z`ZR zseTCwuc9A9oGE+0?2IY>jbnnSQu<#a$RxK<+Nm~=jZCs#ft-FOC8R^Pfg?^I>0b;b za{Zee#C|&_$VxZ*M@6-2EW>gFzWT%QOAG2t3B{q;?6Ez_wqiA%7XC zW(?P>jLYyymNmW7tiP65ovODBea*gYZNgZRsd#;x%v-{&JoGAdUw0dknSGb)4fgfk zuv`OIJ3VmFnBOdo9h~Bq>r>8o z#Q0@gLd~@9-*o=DJ+_s87$Na`8r+KQx~OBjCWNVE8xtjcre1kBeM*0=*UMtp81`%1 z@kqHu%y8(Qe7GKd_1%Nv8}BHPxqP5kSo4}^ZRMs{J%4(t*H-I|bRVDZ4Np$5hsURb z@@K=-vny@Mrw0mIqpiUDI(7@u@lgF*K`S9!M%Ni=MF7M4ZUT0WS^G!O#jYMKtB=OL&flwmPJ2PUfZzUkK37A!n+;9a^Kt*)Ec4N%hxy&Z~YcR~KNH*at+*L#~I1EDL!=Sgn5wXkuS$^$#=8yru6#4=bKu z>dAtc^Hrw16HD!6`frZ>{@RyzHionI>eA0&4zowo0F3#zjn&9*u|13*IUHiIj< zm@DMVInL`aQ@^JdYrOvYOT$}wsm3?Hyf=LHbp`0Lox}Vmd>?;u*uVWx9}Iu_AI>z3 zF1h%^ufS3~uyYh*WGRL9!pQOV|8Vy%%aSBXdfv%fRdw}crZGJLhr=Nt#E5|4WydXd zTyR0*yK=`Jw|oPNa7Ph>TtFbg>F&y|%99uBr|Msrx`nw%oRgW=Gq9=%HB8?}mTp(Kk~7YB zmn*+py4jx`9oLnOi{0C|KRx{Mhj$O(eiM(d<-!}kaso@4`)K=C9fQFG^L-9K`8c-L&9Ub`=PFix_v!U+U-9{Is4vCqFDUe2l=AH*SJEox zg_L`?^)_F!N2T%|pVy8XeeBwwr*76CNKm|w1U}`;^;o^m+J^bzO zzIphs|L*0(-^IlFRXozU?omz0>Kwv{SM@#gQU2~_Z&aY8vf#`?ovNS z-(%U2qvutT+TWSSJdf;0c=Wyh_xg)7*UkZ_`B10K?b42S?eY%Gz9tSWb;` zjJK}KiSw=_aWWc2ho4LK#Qt14*w>zmI^}BcyRO$)JS&~tN3wHvOy}zLj=5FGk75kl zM}9qySbdLtx?X>Mv2}mzO6+3QDY0dVi`%Q0-^3T#_W_RepWKVem*zO(uYA(Fe=|3= zD<5@Au;!=VJ(3B3=eWn`9?aZ_TR!GaF4k}PJjec2e*4k==JB%KW$(l6+imv2IJB)7 zpWkZrrS6w(=di9{$K>Q<`)nC2&UB)TW~=F2s{I7}il+JXYW25Px6oI9>v-55w~li@ z(qo>DqkT7C^_$ixEB}f)Fw}a?7qO*+3Gv4l?_e>eq2wykCC%;^B9{;i+?B%)(tl*v7%+nt#EHRqMFrUi>|tTS9VM zNKCN*5G&WekL@5#uz!jlE0V%5<6hPO{I{ERE*hm?Fd{v_vQE!DwUlJ~M_@3!Mu^-Ssl zvxCaA)|fJ+gr_E;1+1JA#Q!Vo4=dKeyFCN17PEy?!s(SuV59w*Bhy(#EIeU@Y#< zXVnk5DaOynqOBgO5A~k!%+YONk4o<=nBMNOW`6nRIQppXIO;P`k7r$sZ8?ND_({;D%=A?gbuY8HIioL=< zMJMOn2leK6#Ca?4m0#!9-gR=`(`Ws|-y^p?`!_K=FMZ(|C9gp^~3-8KmX;!|N6iD!^7|Y;obIQ zHT-uDmp)^tXfc@!3^}49{}7NDHdjlxEheawjz2@LdYA3m~>zYzUBdO<=7Aa-ztQ}IC*H5wPl283ekNX0-gN{tp{Qus`% zI#%NszC`hSeD@O%0pi}|u|GZT@yu=OUT|DfU&%&mV%I(#TA+^k<=Rxy*Xz3De&)i>y_YHlc3*x5ew zYggYnsB7K6T1cro2(T|Y6lu^UG$80+FzIxrnay|_L+{IkFM zi#&J#@-Kc9tIprXk2`*N_`@IL2RFGoRedv;nS0Gy7rmZO_v>7}KI6O1`aKx2D`~%H zoBcS_>vx2k{qAYs;_CKZKjQ9Vv%<$AUl#nt_xcLF#|;E}e9b{wi2C9i=w2J2$*!I2 z^0klf49T{**{EJQ+i7dFFCC*zTpx$RRX@zHI6A{o9l!B;S;g+2a~|Ecs*6{G>GhhF ztUOECv1n?NjHHp5jU8W(f`>Hh?8d16)if!U6oUn-a;hY* zQ{mWv+FRh)qjC&fk`*7H89$FbPwPiE^-A#6b1ZTIWVn{y=i0j0cpSPUVc8TU?oonJESPy6s8R#@`l z_c8t%G;U^}%P?{2;vD7zxN(B*V=gCgy1vJ?e#)B~ARyQ#t&P zuTIz4;gyGceT@vzpDqmL>hj)r zqb2_l?3NwBBTVn>5#}f^di$k3$2!(^uI%39TEZkXC#Abn37?%JL#%U+W0yI7jBO;Z z9{%;edGYYu-@SbJcmLP-5C8AKd-w3eA7aZ$tbVcrihPW{*K8xkEF^B#TpBuO{ zWRPW@yx2RD6UW$+l0R(BNTK6@>R@@NzU6zV(yqx~A;BG&#B9JE*NqMjV)U`5&Zv#B zBN`25I{*Mc07*naR6?I%Ry3~LkW`CzkAHGB{l%8L&2P9}*UWbt3jwH!@jh0VU%smE zh+P5#uVfvE-q$^3>oc91^4{Z~-y-ik{?Dhlc+Om&zt7?CKD}nst+~XC$}MJYGft`Gwk70FB}v$fT;^K2EX-E94BLodF*7rp z*^KW#KYag&*X#XyKcA1Qkx(_*vnGRmHHm6R3q5GvGynqeMPxu7NYesIc7iG4hvj9c}NdsK$U#YE?m z;UBmvY&0Y&pknmPECN&&G_la)_DpcN2hv^udk+iZMr-GF2MO@vJe)B=Wf_yQPXEW ze7DPU06lWJfu0%B84^0G#m;@&AY7;Pu3H$Fl1~tmYF1b*#O`pX1v3fOcSTyRz;ti#Kbf0Dw>4 z9XioWXV9*KIW`m*14w5#-#tu9+*+HN zj*8rR<`U7W^Luao_`QwvxZSsGS==bkOX?^7_AUk}iFXDEwj>U{q3Iz%1nH7dIbp>^J_Q1-MSHY@wL?{{tv>jx|w}7lPdtVRy&3l(0kB%Y|)eAFtjR_R!}d&orUCE z`m*A!0Ro%VCT_|$1ExT+yjjj%a#*d!qD$=n^w?Exoi5uKQG)`!zP6`hzJ*rKZ7VRi z>k-)=|CDcCm)QQDpJ$ANBN`h#XD;OqKGGhtZO|G%lvO2o{$?Z0U;jf#XPVLXt<~Kw z118GnH|yDkOt@rMFO0L*Piw^OSPp-<(-`&P_xU}L(;Td%TSlqCEfrtRI!F+dnedF7J{v*#Oxto$JIF{c|s9XBoxbT}i9$$X=gM zu;`qSd|SVUs+O&a8k2I_ziQ^8N0^u`%OQooiJSd2_9R8);E6EE`m%QRZ_F7`cl^5@ zla`6UjWbU48y7tH9!;6lI{Z(~WAInoFDfmfQIYmScc0A~cl~f-b+W$O$dE!vcWq=R zDbNSxsj#;Dypaj`Rx-Y4_!gJ7!y`rGdkg_M94I_5+`W6ftrpucwK%f&5OkqAsERj zF)5edy*Af(toXhB{n2LlquuyOP~udl=0oR_w370j!lnrBdrSSh9h2{g$8%i<{dHfW z{9YGmwW>s93#EDsQA-}uGT@O=tPw0oK`7O&6tS<}2vK2QJT~HhMLV+GOgWM|u0IQ2 zF{L7PaBLNyOsMU;q`Ah<4!`6*5GLPM<74KleEYVEae%Hr>GasxRd@crN_FX|7v@Fl zD_zf}mwJj-lU!I!Jr2YDI?p$Dnf_W9)wp&*9X>!N9EQ((P*kWYslbk$^txsnPz!@S z8r&^Gkui`0Ii)}_kZ<90?9B`(_q^80{zJx%>+4w{ecHXlQeuPCSi%YpCNOoz!rrf; z&m85pqC}^H2(lF;W#=l)B9TWNKDH#}BsAPm)5Ra%va%psv}vM20bJ5#u;SVFqZ=tm z*f}dan!I-PbM++;8syw8CsqAdaPX;|=FmOT`9-DN$|Y zD#cn>XJrym#Lzdls#{aebMR;8OZ$NsPG&yrKM;Vubi^f6AU?&4Tq23l+-y_lJn+Hi zPyD_UEEW5NxUrc_bor&zLsSyw8{)3t2!B5f0oNckFGc9aMVT*r0uyV_B&?Pfm9RQg z?V!XSE561o=5eUzsw%YUK&xk#m&b~=OZkuYFAh?5hAJFt?pS(aXqWPmy*)M&b~Kr# zhAHI(l_x`zi?eu>Mces@ZGL?6Vd@^q{IhiIR`jA^dLOp}oOxZ$x%wMmjuTv#$;wmk z-!AT18V(%|z1H~+D)i^OUDzJSBWD{l5lGamYD2dlviMY9lo>qo=Or`fDJ?|J?RI5DpI_bgd$lg4>}R2|o&Poa?mrs4!sFD3=!X{k z;G?^bRUSN57MIewRp{O(nZ0lOA*v$EwgPtjheF)TYlsS2Wp3KfjqLQG$L;|36VQB< zuhJTm*a$c$Nq{#$6*=0D@f)V}hAHPOe+*sd-I7JP@BG0Id$c=yMP2-CFw>yi``s(^ zUS*MQ(akq%wR(?v(NghX`^q?RsHLG^IKJkG^C{mxLv|J+$0ep|y27-70xG&#Udp$u z%mYnYLtS04q5I1FglP)l0q|Lo3<#%-gknzr1z#)$t z`IOfgDyE8vJb~#^!Ag(gM(4K|)HP)6fO%H7lcq*}bupW6@KFL7y<_m*my!hUwwe2)Ok9YQ^TX3?&l zjhMT;5eBsT?3U&`ewP7hoN9`|@F+$2)w8=y*1OLE-+(lMFx+JC+BcsHcj7tG{pyw* zjqF@b*0st)gA~&Y+T$6Q%h-RF$Z=+8cD_so{wlYt+||PysVZ7EtO<#SsiV89BFW>^ zPT~KW^}ttq?a@A0{Pap1FAj+M%({H^XIRk_DsH2aAeDK%Ih+!ligq%_Hq4Twq-9`}6mUT+$+$kQDrzebDiSEmtdU z6FbDE!v~gx36b?ail24Hrtmb!M%)CI52ACFq{T6S%DVSX$)c?(vZ zk4s4N@a#VSvyGJFe&i4T;BVd!{=cK5j1dL(w_o403VYU+<1WQ0()h6gcCcowgyy0U zx8W6v()Lv9N?{u;bw9sUW6yg-=vvuCdRM0)RX_K_?1Mm)78}m^tPiLo7kd_ty(wi^ z_?xHu4O{54`+ILw{@;)mV^@ma8vVJuCxWQ&DbuAET{iib97(p*EP^bJ;aOD2B$F7%LrR|+_)<(~8am+_FHZw?P4Gz3A4{r7Qz&aGPZM8{H6 zEF4K@rj)Btxdynv6Q07jvm&ajguu{FH{Z0{M|w{Gd&Qj&eZu>R!)mP!<^*50t3~cs zT`w-yJ#WT%txOrE4I?$$y_DfGOrJhBBA}J&4V?P!$u3l+*+;(9I)niByh}8CI8fia zs6`05upq^m2}?7{?@F04sOi$ly_`&|p&%&l7fZcH7d>aaVvPMA<2wS+!83eg#cKY- zZ>>EoVbhf@F)UuGhDKngMRbK{@Xzu&vo56o*%{IPiS6o&tsrj!cyl$%b5O!^inEoG z>GfQOK4)aZ=VcNhoKe|Gy#I&voMK!A&?~fZr#)2IQ3TNnZOUahh(oYJ8kivgP-X)nw=_u^skQZ%Yfo*3Ru89D6)(cy2!u3wtUp9p3(( zonUpAfXUZj&Z$fuGb*l z(3a00U)fBRPn!lzWcI`d!BF4}fw{`=7)$h;k8jhO_@yHd=g<`Rp>=LXNV?y29W~h3 z{5TwnW7F;v;|o?h$VQ`Hh{=FIxD%{zaW?rZ5mvs#hRQdofk88&V9OOH=cdr-M;C6z ztTl3ew6U-h8_mvhS6D?-o|B)2^rvHhuAhYpqGy519>$#ge#hGp}Me-y!A?{cT?yzM1!QjdY99WoCtwGqu){If@%es`eOWu}5oeLcx&vc}Xnyk40yL0tG%O+|nFR&N>OAtXf&zmprncdx$n}Eqe!@lRsTF9=qXJ!Oh#ELxzC9LTFNQgg zpB2Up!ptvu>KbgH(7L*36YM;Cb@@kwW}0^W8EIdzY(}EVsOX9DH5xVK`uXT|WjBX_ z`)Ub+kHsMV@a6@?SwuI})hCsR3*I<(UbpPSjn4JTt~eaKvamg8eLiN^h}m5}Pag=L zo&Lwag*dBTqp8-Z(u2MJRrt{(>KPk6wRMc%7$}Qwd%W>Mha4WZkP-bMz8_I8wltBe zA+iNZ+#KnTkFBf^L1+-0B}^%0M7e~A`B^ZE#4Xb=YuM9{W0T-^1QbWfXIx>#{5>oP zDnb?YV~;ZD`Zf$>A(HY59Xb65L2;)zR0UZ+0(q(LS#g^+b|Et7?F;x;*ttUKd%}(C zoU7>xQ82k^y?_`&MdV^Au-A|7g)J`IO|g3|HKN zXT2)&p%~JxE#7~6!*<~F z2yx%N{47P5`im>Grbn`fk>DUW)MDvnG0YiZf|*3b5D`*K#OgOP>XnNre*{<}BvfjS z1h$NU$|LV$c8Qv_$=CMu?i{)G3b`D9bp++y7!((Tnn5hS@|%7mC9RNrW6pNnBooMi zQBAGX>B9S%ZLP|%2oGGuI<`oLwa3h8ZTCV9rNM*#o`i_2vE!JSXu1|Dm&dO~-@ur& z+9#q`mk`*Ldzb9`Ij^Q4A@b`h-kyE(QK}-@4q)mZ;f`!RpvaEk&LyM(CX-)EpV>Y( z9b`J>gxNkzrA%d%X7iqB>JHlG!+iSgnT5aX=rF7M(6L4BD{I$*@B<{EiO9%qzKR7! zVf6t(=bV?C;GNw&L0%63D6}0;KROt3P3k<$+%W`pmX6Za{-R5k-Z>FIx1B&%K!*(^ zb8Ev)NVV3?kJ_-}mKBfs?UXTamm+xb^h!g`&>s_PMzDELEgPvZQ>^aW!g;;IT6Ukx zXYE&5T^zZz4%1tPk#8`f0L=U(|A7)0!tLv_T)zgJK>!~I({Z=xpPRT$kHwL#eHoE0 zX!c!cGCd*xcw36Ucg0oWRBqGLsWYMmsVBW(>^R)E;WSTO_mcs^tK-elqCwoSuX3`2 zEg9yN&GryHQQ3c=T6+D(!@Ti%NoVQFW7f`it+r-ovZthud7L^X{^HDPnELW|lOO&0 z>igH+6!zw^)1hp08fDb7v~=<1b6SI+slTGX-CW?fVTj%14eejZbc3=PElXjL!L^cg zzffUm%V|iyuGB`Zlnx08`3aqZD8E!;I{j4B>om^w4B7JMn!%@{godlcR}(H9 zDcyAn!ae8kz7Cv%3$C^LP(636=451RY*M$Lp5YV-hW_s2vNa{sPk62iqsmW6&EH}; zLI&*nCy`p79l&beDhgPNi@m~*>Be=1oQyYp>iaKqKfx1y*~-(~LQf;>s9H7c50|4( z439OeKg|45FZ#h|E5pru*r%8m>u2zhHVUNsB9rYs-UKHDF0=vn(5lCKsK0eV!Z%2_ ze_^mto!I9?*!BK73&gH;0x zs$N8`nl|Y|e>*O&#UopGUz-8zz1hEl#86oSpW|=^?M%`zd!H#tLL&BTHTZ8DMgMx5e z?-@J&64&FMb&*r~6c0teoi3MNhvy*_YZ%jMeXgUKj0h~-fZcWvnF#^>$z+H$O0m7B z8%nVy>9D#H=Wum|;400Hzz-B!!Py`cEtDUCXx|)86ORsI$LgxtegF_8nj_N&0KA>W zNU3jr;M7U=Cf*6hgx|bvomQ8ZO1v}&R)0;F+FP8!JBRZn+NMdn{}KbA=VtmlSGrZs z3?VTgfX^~L%hp|r{=)sTPQEhHH@o~>&7H`#EL~!)I9<12&mEFJT&rL{`mXoPk$|0v z&5xR{{*8a(^kwS(g{7k_(lN6yom-pgnGUPRfBE9@KkN0xM{9LeX}hen?MAhu+hd%r z&7WcK8>%1e4A1|r@e0Q3n>1l;HTn-WblYW+wdk1^1L{PVSAIZW z(#KD~iUDd)LXu4rD-&K15PA&v$Q?0s(+St8|5=csl_Jh~e1+6gaHJWV48AjCBnH2! zLEYcth`N`LWqwxF8B))Hhzrx*#dIw{Pt%OyQ+1eom=cTw0Ms?09~WHSFBgTJRNpec zFQR(xVyXBX!fc4Cjwf$<{0#ZIaM)ml{)T@QCoNlg8C=3`^wgQ5JNu1`MNc^#G)Iu* z2oTf>F5KxD-d-+mDhZdQ1(qbl%^3J*buFkf>^$}eRE_Ju%_pZ<{>H+W=9*VN81gBc z^8t1?R_`Ru*yU`EYl9Z;XyidGKiz5_e#c2_wD;rd^_c)i;V}r28 z=GeB4hdrd#CEncFw=t#4UwCCZ342ZYVA8s^+^WKrn57OqZ=}{_Jx4x1^$z@d(mc(b z-}xbL#H9SPscTqcSMkALV~rU~6ddTv-hE$xn>!;aw%?z9$6YV>5ba4>dP}cn*I_0? zxS+p}_OzGGTk6k!`uFB%RygrcX?sJxd%b$Ge{V}}T5k&|d4!U=PHbs2PPa0~0(y%K zvSRH}l?@aO2pxPQ7xE%x`@Jm-4F@`Qt5VDIQ$C);DXW_2B2dX}m-Qp|+}|~U?;qRN zC3e?dl*gh5`bw55Q7h3|0j6mf|C9LF(uZq<3&Cn%03S%^u|CVF?xionAHb~a7XkNhEZ;K(V<_V6CbVasn4i$ZjDdc8$o z=eK`w3vYt!|MxN?^PRA|R$&>jHy!r}*9X*KB13Bo@Oj_xz$x7*f%z7iN@D;ieBLt~5*0N3F7xcYaWn z?k)@7Zhj=gPG7b8;93R)R(}jW61vr#IPOaUFd@l?t5~3}=70Ml-b9O3tVYWg_8B^t zj9lu=veCO{;O%?4QJY`kN_}Ql9~WTMb(Z^FBRzC+wl_<0ug)wq;cKjTs&aiZAW7`A z9TNymY)d(Beg*Vz$eR*{TMwLW{XTXxxZL`?*ZNf(E5L)Sg4~h=+y+l&#F<`htR)`z zYcR_)3w4tA`yu3>S47XCXyG3jy2dO^sfXdN&Se*^deX$%m~{mqn>mB0V?6jOgZ-mm zW!OqgnALCDAX}2oy58etx5DE}Jiyj+!2<8UT|}&QGdw`&C-Y?nm##hZrws^&6=3|Q zl)i=bUh(vsymK?D>1lSlgAUDRdEnGc@eR~r`!O@Sak6n!_*#JZqu!>a0))_7AN4R< z2?ouJ9&OQ=XOMKTY+mUDWF@SO%H6&bc+Mlf@U5XJE!h+i_000OS15SX17qdsJzjgq zRA+X?eaPx<{rxcrTBiAM!9yoV_Powns{xWZ=>CrfgY}k&YA4zzU1yh{uzpZfd@^cq z4W?)rS71Cp(8DACMBVuf_Gyu64SQ>P{TD^~UU6 zx*tlqmjhiKOb$GP^Qw6-#fRZHa$BgR0(jF^5q{Vs3wy_7vMg25*%|WZa%=3wGZN5H z`*$&58t}P8&ifFi*ksyQ{uT^8T7D`#6Ii)i_9IK> zHs6E9cf`MD@V|}d7AGpey}+1C{BtNlSWF4R;FF3silF4ET*|GsFIvIgQ_kVz#p3;S zfKsiL{JRGA^y*u8z{#9)lg!1NQ+7p_1ur<9tC$h@j<7bZ*t1<<&>6|mz@@hac0{3> z3eP9?KGb}ea%!nZXCuk6!Uz?h^()c3)$>VX{GWk9xf)uVb3wzw=`1u{0T+)r)C7yx zC?Z($m+>V*b}M4UMh-Hlmm%nw?W`L%Rp8Xv6C>0FnV4*YylgFbRMENZ1$<`)_z7D8 zja?ZneK$@RG0yzLQlF`Fy+=&8?kUGg)CZ??OClSvgl?Cut5w^I>mDr*^SG_dX6Ck+ z%ChNUiR7pZbLbOd+{M?!pzB@C?Mgf6No;JfyYvT=#%ag$U^2&v?1LYj3*YTnR1L}B$F$WF-GT9)F#mtDtA)iaHQ*RVJHv_H(mD#9kWO%=VRx*k{k<* z;gxJ8Dzm?W&H5Sutu=3@x(s7$6g(oFmE-K=zG_bYj;c_sr(%{&x*lJiP$X)nid=9a zOsK49i>RInr;oYB1hBn8!f^m&Xf-;8JSfnx_t0lP@>+eFhCDGSGgGDx(zh8T`1qv?{ z6O9DT@T)%l*RQN$L0o=M8+xYy5TPHUeY2k5b1e+(PJQ#Lr&#;<@LcC<1WNe8;3z9* zUb=v&fSXn$HS-i>A;UR*_^6U`*FgK?)@rlZlplBl)M88xM(I6zMe)1GTck{5scWxR zW17H>J+B{;B@8Uu#*896i6oP1!RV0k%5fBd?$boEBNZ8p-EtO6f1t%PtQBiY@^fmAQ!-Z`$G=nG`x6cuU(zbXQ>rY1s7Cp50%Nr?&`{GOBEz2b zmrkjqgv2FGRIQine)K6KwiA%ydX0c`Y&fY;E5XcJHwA zy+qk%gv74i(ptfzS0hq(m(vY-zO{M7K(7vt;FzTjk#M*`@_7W=*AWdhWhEq=miZPf z*M;7=&o7ybQvFQo|F!ARySmm-mEd2HM`JXzoz@28y|h~+CZkumX`{auqD~W^eDtt) zqA`UpNnxMra;3$X8L!aCeLUguE%j8Y$qz51oQ(aaHWfmU8(L$Qb7hsxNc;QI@`d~q zei?wqTBHkSrS6^VM)Y$6E&78hHIP!)9$|0V!_pk5e4p~3Ox+pQJ*mdhLp^rSxJyzU z5lpwmDK}BkLuqZXj3AE@&{?68Y|MJU;IC0m(FB2ehtG7?ROYV^Xw@PZZj6{^gOAZM zS&Dud$GM5+O|N4kp*py;_BXRXTmul2ke^Y)JlY~7i2<7%M-3q8m5pGTMCw#%{}A@f zKWbO<8Z)0=*W}CvjBB-hz_|CI)}&eSsC)?qQ`fSwD);w?-;8T*+cPfdLQ@uj5eWCd zMyU@{A|IEZV^8M^EC*F#B&^QpUwc&I*IO3M>&N}l5~ut2luvf(>r~BG&KN(h%!VN< zwYzIEjX}7TLZoZWxS1zcwqc((a@7oxp{psJBNzaZBE)*EB^H)-eI5iur zFIHI+xD*FxId&PPHW)pY?*ecU&S?G<-q8j3XnnLdwJ&mX@>7H!f@X8}kTm0OWzmG7 z2=v7~!Nl}oL=jfGeBOj@v-XK{xj+WPGDo_`*pp4}cKN}kh^NgCuLGLT@d|j&-ro)8 zPVq1%Qx?^f=+TY>!BV!OP-*5WRf-mwzlhY3!-d0dm5$Ygo4y*3EK@9Rg&+TGFg7LL zlcYn?m>5`*u|H%id)1BVrT)920Fqy~5Uc^kMQnH)Utx7% zJ?Hb&BL$NwBOHcr%XQdnf8bE(&%%&lQt^w5AQbU~FfVxQ84>s$3|3o@#A|M9KOJU^ zWj>#39m4Rmt_a=ZrdkIHK7C2y7DFqgMA2k9Y$*+RVX`X@#OSje3XuGfKT<1=M-B0w z6DsepIKuDSbItypO}#f%pvNQhVf5pa9&4zif5;C?2u#@$6}dP0k(tm)czQ5`T~b8c z=P%IF-M0Ret65}c%Ki+|;5A35{#va?4Q{Y5(~ufGGKzupsQ1+zMFc6=@%x7T>nQ!p zuIKt131?&rs(sAK8coLj@wD}qa?+NPuE?06Md)-mQ06CEpg@q4N3Azy%X;!Vr!NK5 zR8WJk!QqM9Y^|<~iPYI>%~z`Gna9g`Om|%5y3%Y+qEpbFzR>1sN#g#td=pr6(--oE zvG}n*&_75Wo&M^F^b+3A9w&E#i&0tUku#HN9ykvTq0$w*McpO~kMMli)2A88sAo2| zO8TNl{bDCskq|H>I?8t^*K*o3-+i*1*jOhU&NYmw(}QtA@l;=rJ)!y@30}dJ;Y^Ph zOkz>Asqgp*xW1^}J4HXW?X_;c8ULNfwr&x=Gk$&0=4=G>?uLPt`-e?H^ina0vz%>9 zWmx%0AQoNuN{{N2rV1{;mltk9;$bHwTQH`*!0WEXozX>sRF$g`W& z$4wl?uPlWXA$=^i8L$=!tq4+GjTsq9jw3^^kCb0-3!p?TgICN7#bKzGB2FET1OCz$ zHXB8vGmKdO*7G=K>?wPO@N`#24MjZsrkq*(t|@qulvxRwAR7(dxa+V1VT7G`iN8T? z8sq&+FhbBYfZ>`J0-3CbA~K+*c+{rWS2K!{pEn$N3$ zv7L{4X1dy()N&H&1-_zbI1$U6MEQiWuZm_A0PUtyejhN+3|!20_y5};wg2_Qz`YL! zDPuteIUuI1yaH3lrMVBhC90`ziNkX0WJS6xbhY+1L%tH`dNmRBT&Y!pzfuXm5AQ6Z z{1UhpwHI?wj{`N@UG39WW~zGPG)m*5O?ald`z7$+h90sZjEnzPTqIbK32pA9{h>|n zH5rgHMx8}n(}3@uKsZ#YC*X<~Xlx&uE*GYZyREa(n#ebLyPbXwdubGjk`p)XHRorX zM4k7I3;3G>Y1WmHp5yRS7R+|-gvk3Cotc-{HDgkM;X&mar6*aS9LW#|zR^L3nX;#E z0x{7TdsYr!Oi~&3(7|3O%tUImV;CZpp?Iq#N2?HcUPI?Bq`~AX|CaN2XkV;1Ui{yvqH7#{h8q;vL40c`bX zP|b>dVe`v^NWw*rsT?z1Dm@gFXo#%Harbh@!)1hd>xlk=oV&Zr76#uX{ zT5G$?DXi|EKy(w@#j*(B;U#$ZoK1M-(o=(GSV9;L)-_mxiUx>1jkJ+3;EsvaBb0*6 zlHqLGaLk;RV~zb$B!i1vAKR-0&YU5Kvs@cK+UV__V?bILlCiL5YqE&UFeT50E~^Y7 z=g~D|QLJG&be9KlFieL0xCclB4V zx19&1g^dKyW%{ILBt40%wN~5+@htb3&MX1_Q@Q{n%&%thJ5(J`F_30CmwKrVEUIG$-{H;LQc;T+!oaQGNB^nqU9Rw zaNimJX-uP|95|rZ?QpeuGO3jGiM z6H#~kL4Ia6b&)y1yY{+K9FJ?g$Ijlmw7Deoq3yRnujCvB+br@M_S*Z$812eCHg@;C zA8EcVb!%n)bu;DA3}CYL~$dZO;|-$>2se^a=y{FFfO6#n$6i&2;X zB@HWC2|2e=X(DFRq|1Z8b~PA|!71YY8B(mTT1;R*GdGLnzUDplB@5IEA-g@Y{yh%f z{`aKlrn&;CEXA1frIIp?l{1D9Z#Sm?nzicHbo7MvFI@ISgpMpY8?6D@zoo7Z&1yfltTzW}-iML(X~vx?8O!hSk_aq{OjeNTq2 zYR)c)*v@2+X4cP7I={|$8_6Z!eQsD3)I`KFWe**PIS|!MRoX3b8t)8GOjv`MR*0su zc6%X~WF{h?36BsMz*MlibPpm;Q$_vv^{?+Nk`mf)wdxjq@2bElNi+L02DL1^oKld| zX(#NOdbt*9-nt2f)X0ko*-mWnhCMvAp5BoRzztygngdTVk<>;v?-|Lm6|> zx5fLBJtAm?ha_VPCbS$;Y~+ z4ErFtkvC=UKH=ggC+)OW>n~!-tg!#?R{mEmFNNt4Z$2KQ@xbs@eboMt%lAnT!Crq6x4o~E%$!JUGG3^2C%)}9VSIa6-m zWzUjz8-h!WE+YvdZ`zbmEGgR(aht1N9=+0Uw`Z3TXAV6X(PE)?ZQt}p$ma989m+48 zPnyYvH0_n-?>4hp(e3;Cd&oW!3s>8H>WW>+=s+@iZQvB+W1L$Tk{ZtqyftSAFj{6n zB&wcvA0pWd9JN;Kl!&8Eh9BFOH`adYTw#V6Zp1tyI0K3DRS%S#KRJ{VLi$RHnfn@+ zPuFd(%jA+ktly{&b%AMHje3wkM(~}g(@f|O2csk^cer4<9%y;+7#QKc?^K^>-xF@& z>d#wKpNArqsZ1XPDXsmAp5m>|GaD$)@7iYu67F}X?&CYk_hwZJhKE?$aEYmE%ue}9 zY$VB?KbgBl-!p2@Ql)k(ukem`{c8EQ2CW@Xt3MSkJVzYT*`OO+sz21PJmRH6vzF1I z>2n>=f$58pEpf^ba^BEug~tRZZd+Zvr)I+`@ogn-wc_sijyc4%C`VBZ%RZ9J2O-WnCC5cPBXp+6YQH>hq;wSFZWk>8yKcmHt_y5%tCUN$9 zP*{h&RxCe9w4y=lw^Fc0vMDc!D@e)GIUyrk$#7)KbsAo-2rtald9s%Xdj1870CmJCuzO`Y*ImGW=}Uttan z{Y|fa$I;R5f&5P6!E2dt-N=L2#9cJl}r{ptWDj1+{EmIEf<6u41&Tpv3eUS63 zC`_jp?qaRy17X$aiV2Txi)MWoFcsv`yZQ&Zv)dXgX2QTXUd@89sYeAzz1&8OG{o8n>RpvyBH@}99c#T|c>DSoE5L-rBKDVF0<@H6YZ(Myb zM;f5p)P@KM4J!~J@2zkV4XKr_M8}AOOhWit^t@KAz*2|bsf}sS@7?bEVEPt$9o`gm zC9}KxN1oZ`C}(20{g|ao6cW4WdPV=iWI|{T0I~F_;7aCm*p2%!cyWANHHC{TL|4+C zLB8^%1qoZl&p&Eg*uE5tNO~#P#}ZWPEViHU%jZZ<=K7gQ&5%hK6k=`2(@c+@vUZ2ek5`&)GJ@&41L{>`mb( zRvw+oA#68Vc^Is--R+zv%=WW$?woaY@yieLA3cD^!6&6DhsG!)Dotj=Q|E;jQi;J>k_AOa5FQCVz{Xy^8N}te;UgBHhKs+B($vN zlVU_T6PGBg4`N}2=xT!}f>w&|OFMe4dS%U#O9#9C@2cNfNMS}LWLD2bxXm$QQvJZP z;L18B?k)v+OPr|!Z!{peCQxtGr}GD9{&LfSyw=Gts(y7Vb*?CZ8}B_kGAe_Cl_%tx zK0gyogy8M@aT347<(w>v8I8%Qp_cR-vSp zdQr+*6)o(#e!qx?myY4h^ilFC10$X<23Er!COf?^zTHG#{XJkVHhod_Wuo{a(jmbf z>JhN=k{yYZh}K3hJ$_dTYE#?3-;NE%?fYx-#)*qETN-8u^SHm@Y`qQ3Iy6xL8ZQp2 zZ#{@fn<^KEx6Zu%T^}M(V&$xhIW&~JsJgr<|G%T2mIt+4Pu3;0|Gl2F-lOEN>Cwti znqn}U@Jh#>KjlQmB@NDD0C7pS%v&>fpFRamVpJdCG(DuGbYz>5_~CN5w49K2TXYVhYyrQ@oedJeHApeZ~9RgwJ5 z#nQnlpyh@~bKTbb=0v$;xWypH1a~+*bnor4C#BvKvJ_)C{vP>9mU7a|f7>Se?Mb8) zL_L;D0W^dHJ_*MbDzZWlzc!M)okG1*))FsK zy6+3$^_~ChBE0=Y7w-lU9`m9K#7O|;N;-cBZM6_;+y$3X)~vP7q zkD$+J5!mq(SA!zdy<@ke-l6&F54C?(xI-Qhj`iKk1)Q7V2requ(GMK>-7=g7&=0-g z{js1XtFiSqN^MsVtiH0#Q$fB6K!myS9W#T6tIwrG3?$(Ne6tP>z51i3Xo2$gqmk?R zkM&kq)+t|2785M|iU6>R$R6#$8^;1q2@GC1NGu%yA7U7|rB5(Ib-WV%UJ>EjwCY8Y zh^xqQTYT)ka7q^}dDmw8tB~j8>=3|m)G4i&P_h@sLt6AP2l6az5Dc!f$GEB7ORa~0IZsgdS0^ylGc(bnIl~BZZ zCM9b?)jw0KF0)VsscU@Y9*3kS!3T7t==nL!-1Kya zcHpH7W^YGN<4{Xj$_#k6l#WE@I9tiJ*{GxY6`HQ{ZK?14A$P4t0c>WBUcGctoX@>lLzOwfH21YTzv{&(0n;BlR}zos!(0_`N3A3; z3tmWX$LaYuV>z8sM}z)*8SzkqILGRa0f*6^pFUjz6wY6Wt)IuW4Yn;9&Mk%SF=OiT z{vuSD(;=1)nlhzBiU?Lwx8q%Lm~C*cWMKGO^itiVBAE5wGwLtx{~~R; zQRmOI@(wLaG93ZCc)4NOq9VeUmCc?U?7aq+5A1QXN6zd|&ZvR;0zhI_YxUoaDkZJr zPY|&Gq@K5q+2<9zJ{3i?_P<)b3sY&HZTjC1*RWj>_#Q1%1$Q-nq$4qhKNr#zDD2-T ztLC%KF_mJ2hhSYVSlAx-I`iPw;)zR#+LgML8)Y}5d#Si~+2WIS; zcz%oYDs%jSOJIuq$b@=h5{<@a>%#hz7pc5Huk~LG`Jz;%cY)c!O{kkcibEEX!>4k_ zd)nZF#$hc}7&6bSGlpbOs-oKX&%yM*xRA^erY|gTVD2(N#fxc9oHJn~_1VLfpyAIO z3{O)}_U>Z)wZ~^)`*cRc*r_Uy+BOnDp9cM@P-2ckrHqIx75rUD>>bvJ8~||XY&J0= zg4>{VmC-A2bld@yodmkO?h z->xCs81C*TY&#yiFUk#%UJu!$B4LL#t~bZmj*m3`cL`VBLyNJ%J3LyeZj6BiBq1N# zl?ctkp>=8YX!fzMAeN~8-Pugz3n|`*3Sd~=*}q?JO}$rjtGi{x-{w*&ZECeKy^sT; za#}FZrNp;qD0-boQb!QVmf_J^teb)+{!?~Hs@9b}P_&v?Plfp-<;F3j`oDFZU!P#v zexCTNyg6UJK*Zgt0_n2XHGF{_o$efQaEp|5VyM9dP;gn)G?( zlmVNQ;86HKWkUQV>V2chy&DD=CTAy?^X{{%z`cdK>%&ok&_j?s2YH&!>Vt7dlHasq#68=HNA}X78Ze1%U^t11sa0NKKi4arO3P?S9@8Bzf$_ah;J>t*)L`M~hHi zf2j*^Q;vdymGofxg8J#M=Q{dBvLut+_m)WPz4gPe>0I~sYFSEw&XubLu&LE^F#w-) zXv?B0m50tBM^@)=F267w_}t&EqOx8yVDb=$*CUnh+oFtae5TGE%E(ENa64*cqu=FQINBBs0RR zX6O4uH0L>(+CywjCFVXZ*)hD8YBl-)Spf2nmk=o89q#X#fsr>`TFgL}I&2LXiVV@S z82yx73RVvANs~EM?99vowMR2WJNPG-?GwGNqW$_V6rz899N7NmJtkbnQT5~J8aslZ zSOZgH0e%L=MoMp88d~`RX+-00`TLZ@j&=q&T=smH1VCnF|tB(#3ed`Xq9_%P1>|2A>P`w3bh< zWoO?ZPX}7@=PmxmyxW+`3)Uy57@?Lf00oa#>f#>**{*~!&0X!S|3}k#|0VVR@Bh`S zVN)wB&Dqq-($pL{;8mGcmR6P{2PTekZ$uQ!hM5YfnHwrIGZk`g&508#2X0bSR1`u4 zWPf7F2*^mHKg2XQ3(TLZ{*(4*O#%Qx^=BMAF0_KXsIy%IGvOG zH)j9K93hqBudEFFN#LJGESm`aO6d{Pjy-eAvGZmG@$bR&?*1Fey1(7Hx~{cGEc;01 zh_E~45Ka+g_yOpzCP>q!>x9pvfjIRN{8=58pDdSS|DNYcCg1|Qe;Z;l*4_tl3~W#r zEGLf^jp+|ua7Kl77u^Q1BGKcowEwwW}>RFb-(2Gi@i> z37f-=&M^qL%`|jN5C(N|h0(=PX~3}oZb4+_0zpsXo05R{o9eUG5dgs?9j3xed=RM~ zQJIIIRl{T#Ot7}YQ&t{ArYAAdZNmD8N|3$}KAh~v0RAz}G?bba9pVlZ(crEU{qcHg zomT5f@wh`?^yh|?Glv&@WXv}+h;P*MU8WNnyu3(}I2Y#ZB-_fR8!t`6E!^qb8aetz zV3J0pDApDzhS|(JiQ0`cK9+Mowfr$Qavw)N@uz)F=(zX@8#`*&-lQ=aV)S+lKGJki`FK1a$QHm!~{8Y#i zB+(>{kQkvVzkbP~`y!z6I^MRJZ*t?^dlx zx%lc*HX^A+D(%o2j_10F0qn|Em$41IPo~no`r02p&xo9X3yW&7`4YEXnM^`d+jID6 zClCFPPaDS56mSEs7WueqIz(zcO}B=jZ1F#egK*C;+v;NL?gs|(!>2R9;pQUf&XMJd zk3m1V4Q4&{rQsg-hV?&6SNStq4l!%`xtCN=29d_&0<1za7&mLzTQx1!!;I(c`}~y8 z260EUfm!a#gV%1W!~BUqPsZZ2?W9xofrMzGsZ8|b^*3V#LE%cRG!sg=+6u4=Bh82$ zROZ%DF*O;|HEEY(|JdC8uv7;8%;41B>}EX*EBl!F7q-)YTm0QM$(dT|cfdf&q56 z(CS8JAi}4q;eMx0SAJP(4AjTv9A25S8=U7|^Ot#)HYlfLt)ZbgqTB0zuer=|=vR*K zi2A?&I=lMJh~)sJQ^)=;1bnaR(9!2;1Cw zSi|0~f89!=lrT(mudjN%3-thL%@HqZP4?Nk5{DXiV><+;iL7(k@bDeLuF;~%UB_o< z5@qj^BW#OtbqlLC`Zi3}=RrEELjhrXLp&ZkJ4DXFY3t$RvG6H3g&|RuU+#hvdVMcO zQ4^~HaGMPK9lb=j1SQZDpd3;->cU0qh?+Qe?7Tk4wYo?vQAW- zsJuOJ^Uce!#e7O+pvXRt!`?e2GLhop6vI~Qq(_xIiV~)5x5W*iV;6yb4k)7ec2I2T zUu@IUZEIPLDD#uotp^*H46jyt|5jnq^6tZ-Z(W*^V%=Yea#3(< zXHT=6!4-PQmxbbmO@;Uyu_<>&^#1}l>!GDTS7cD6JHNO+Dx^A2yB&M&N>fOIsn!Hr zMhj&!X=PUFN;PcGI_9{C$byV^v&EFy;F@zP3;V|&AfeZ=GB$V{vHH@b#vZY0Dh zRA@Hro2_6f<$2zkRg-kjN?hx)9k?I~BvIX@LWWwH&`Fa}yW_ZgwL{DmTi~6@o13Xk z3e=~@|K;0GU%&>Etj2j?p{0i5M}+=b)v;KYg-g`R_PwrxiW1u8FZWd<##XEl_-sOK zpIAfXN}poVHx%;>Jmq7=LW_D=NK6~P1vMml`QGRgM=1*EpPP^gZLlWpAs)4|S{Zfv z`pO~Q;l_NI zwk;g2%RkX&XXHl8Ktoqakl}K^LUa@K7IPK1&m6yK@a=9oL@@So+)ZRWcMq_L5cUINJ54&DoNq7^{Z(j3qxQx3Q0*QX1AW!>=7nOt+ znshL&VD=|MRq=Ud_|6=BR%>)eAE0DZSQp}4?!92Lx{=B{;e~w|#J=?oeIIjjrCT-R zx{4_>^Pa43pW<8>%u=F-fi?Nv0%$12mmxnne%K=OMV-2S){4CjA5ikU&mDVq)_m-Z zKHtULOeXD62bHWTIID{Co1@h<16- z<>@!dS&=J{)tltWO4nSm6o|0t6fNr>?Tpr5_@xdIgx|s?t+fnyDHhQ>If^Uq_++#( zb&G7RNda5J4?Tc`){uy4^$Icxjyv$tRJUccN1S$zAhwr3g{efq+mkm#p7q-5<%FcZ zNo}n1Y4JmL8=f&-D9xJes3on0=a*MGDCC(Fgo>H5bb(yGF~zO6dhfXIXvU|-8vf*O z@tR;rG+z@mCN-EzK^G?YvzT|N^4)$*Dn5FsUkXzqqGH=+!U3e~IT_#!G<7zc@??KFKUlp>m+S8Hwag;R+HfTHzYcetK-b*1t!+VO`zvD zVSfW>DeK=xO7=7012Ku2buJ@YN>5SVnsh=Y^Dq-LsjMoTt{f6 znUDX*{J~AMU5k(TjD>i9%;?cewq<6AR4im0Oz z-!P4?B|+|qj&mOAr+ORY9bR#n_C9UOacqSvK-9xrj#Q=t<`zZYz|F|s>Pb+RORH3n z^u-GSbd`JXvbLX#EmIfYMh1WrM@JJ@dp==~rdG|hHsq@UUoJKa@mYNiirs{RHoQ2} z6D=EJ@7!0=MGK1;{Jq7Ge`oR2*Ic~>D#v>q)CyIaUD<~JD4)?J-p$;@?(x}Lc+qGc zZPj!SH=_Yu!*2CK_R;-B*ZWR$2cD5jb0_ni(b!yCXN)gkZmqs&GQtYMf#1rT{5AAA zE&tARW}4M5G!u#`)%oF&_58AoRmk7PXeGHBZ*|ByxvBLRzqvb0)L^!$a{f3d7|@91 z-y~Do@g=m4+^*xRy`Vd~yZ0a8+#Bzed!jS>+%V9#3<0dWS@ z-O5FTwkMK#_b{<->kHL{@G7Knt0xFqS=aIt^QbWZi2FW*A4NxOh(Z=24R0!aW@`Id zEgxtb6QVlTvcHW43=`{;f5qD8RbnZ#A3o@#0|Its%_N%_r(ewvF_Ah5Ab%A!N&XIT zt@By_OK<<`-xy5vUHzA*jW^N2ZaIDgmv<(iG_gwV<*alE(1x5`HrFdBn2(m;x5MGH z;x_kdO_aCtbu~uirVgK)`%YD|(HJe?pD#}%>E?Q8Ox$48jH;>p5kuyp+Nl4&MYei7 zlCV^z$pc!QGd}dKjyKum_s$Kz&q>+iO)RQs5gNuLOU$`;!1>jZwI1qD9Qi_GD`ir; z)yD6a?mioMk>HTfQh({V8UdNh({!kR{O(WK=Uz<6ej+k_Dz!!G7y!pMw2(|eZ2eqE zKmg{(_jehn8o(F6zMKE%bW(o(V%GFbfDeqOANqAqCST`xN)R{zO3$z{GZqX*ELqJ| zajs2XAZLdWL#NW9FTdi@8HM2_4*%b8A^+G>U2xAF4+{X3X|sRAc8&`BTVsC_Eud9e zLHecU9~ZiZpHeK$P!9JEzw+A=VgV3EWtDZm6M||7cx7{n5TFj~yVN|`kO%N_4vH(| z=LH3E`A*oy8c>4uB%rg{?i*&|4Ne^@>>6mP8g#k^obsLoB{$pOswG#i2@04VOeqI7nmX~@vQqT9ezU+lj%`|T9qvhTC9y_~NL zRrF%@d8-LzFIM%7nbXJS&mdECd>J7nlm{lp+(H&C&s%~#%g}xD#XKy@is14GUw=3D z5K%FsS!mz#a#N4d{h%*eE3noXv{I61#mP{!Vc&!`tkz*c-*9#FKP<~Zp##7$=?P>z z-smxJ&}wYh9vJ7lXUaOA>_=-cFD@$(2iA zV)*l($4y5s#9W>wbL~7b3!AuQ&4FI}i>w93m%4gdhM&Nh4QAK?(qp6(s`kp>eCmZ9 zH&9;~yZ)k@cvN~-H`1#-Yw8%a(h3_aP)vSCy_YgAO%(5H_Uit?D1V$p&BMjViI|qo z@No`fdXz?qnGrA`)QOOJyEMq9&|^otF;T-?^&2w2@wa8Fg4`12&eW(dHibTib-Kmwy2g1{`W(3Rz4jz?hqkqP zW?9gJGbPh`XV(jP{0*|o$tMR-E?c_FxL>`G%sstL_RneQV_3$a+_>ezxy<$cu;ZMc z!y%=0FUiyojPqURbT}WViVT6_`aN(cM)DF;(o<$}J3oy_l@@%}{MHQaxW_P)8m2 zF-Z=^u7Q*K;94jeOX315!q5VI!gE_9`sQagvhTqr!bxnRcdH1UsIRx%VEaMX|NCe zdM1JxsO_tR$++{F)&QItYaEQcA=+bCw34;)j@+`AJvjduT%)fs++8<+_jwj+pNNLq zO5YNIfW$-V<08(R>OWgdNMfi%MocCs*ke*a8|Qwy)vY|%uH^b0)rO^>#;Ex@{Fu^} zyoLvw@|30i8qs_N_C9`IO5>v8-#F*@Lfw5wOe#{U;*X26q5@q%ruVvH(v%96Z~-bO zaP5|oe{W1P%R;skY5R@SgOXFW2jYFd(op+1Iy8bTr?rccL%Oawr~KH`WhCyHp&aF2 zfAio?()og%yZ6l#xpRl4G_u#_uOb=+AJ>6~6~_ezR6onq?7z zPh>H5?lokxPY zTzTR7JbZDwdVgObZd*K7Zq>xr=Kdkk7=)MANy zoH?KKsS5C=&_nBCH!e&ZEm(-^t?fQFwtVZ=JD>SWEdvMtfn0tLQ?oeVL)$#YVctm${ zeF<`mVS{{OXWpG_e4wCx)#dTK>QE=Nf^6V^wL31NInr zG4Mg_OVfU@!;Of4980=`W||UpxyQ9EsI0;1|HdW`XpNMs3~Ff(>-H`pkD(?}ihf$x zML)_l#-#(nji^;2m$9cEul?hN3#u{=%;zA>1*Jce$+89~Q& z8)4-_R$1pCb(u)CT-5|wr9PvXa`(HWq}H=#%p3O??KVa1E?xWg5;pY{&Cj1ETuVpX zE7r2ucsW%dhbHks4o8T_+}Z4~_qpY*nt@2jR!*4ZWJt2eSjo8a$f7D`D`z}5wk())W*I8~u}O9u5OUrKc55%}2!wW?L)36aEz8mKe1 z09$4)`gQl+Ts1LZLo0vtWTIS`@!Wh4djg|1``lUl|Jb|k7kmJL9a%pYKlyDmcCURJ zJ6DK%6)`vL(2-217_I-CAuag`mGGtEf%Xnm*o(oG%_gbcz~KoDs_ z?CiqNFEbqfqLdVc?PgwqMWJx{p@?Dja&)-WX7kKu&)O+}3+HGBWe1BIKhH!>y@Q%3 z%$r`{@Sn=0%uWWi3_8zs-8VV*t4P>tU*;!0w0=RA4)C&HBS}*$?5uT2>3wNbZPIei zDjB3#j2yewl$fJSp{tkL>lYK?5AfS^%G7Z&rLelqe~#Lh(%!=};uaCh>xK7V4sy5| zYKUrps8_wVOuhUeakucQzc~+xwsg`GK0hK+`TVh=@jc!nslbjouB2C~ETfO->@3OT zXqy99Hm~~p^u|Zn?n1ajzqnG=l4ac)3nNAS&MhS_@8zk^7UoOl5vGx^7Y39H@-K$g z)tYUG>rj(T--LX|#~J)t^1;k}nGr=#@&}oxdMZ@n&an1>MyW?BHP7rqZK&##0BE-+ z^O}5_e7=&Yz*HXqwBo1&I}RpFCAJg*5>8{VJaXhwn zmX~j)-20~?N?S|SDzIxijF=#PKCfjI>ZZ4E;Q^uJQ1v)Gc}jSjHe40 zj7N39HFX=f*F~=m&+ATgKaRt`etlr?J%L$OVXZm-Ac(fmj>Bs;`IFXr0`vUj{_RZK z(wlhxwFzeZl$CONXVvNbrrZ?7>s{IgK&@&vD$gm=lE1YJC8cxS*bw~eO1f~O{N|u} z3Hjvn9L)7UYUHzKo56W7E^;tg>jc~6`4rV(XuAM| zT20;vPGW_vZFD_jxLEO#fD)W=O_GhNDMB)LxBl)0r`cM^{^dkJjESIw-jWmzhKg+1 z8aRBs+M|-KIpb5S7d32hi;boK-|bZ16NBM z{v+peKa|*Ct{(h@_*RqFbu-byH&Q9qQ>A9bmcM~w>ND-{`JVh!{Rv|o}=>`H6OMU*5&+Hq#y z*lBtPm#*WJAYop6pf;DsBJr|a?+?ktat;+_z~WpxG_>1z_7A_#8zmrJ1$3v!5t!IL zuZl7sxUk?RlS5`wj{WSCSBi1~im_BAt}H$OrT_`ZS(kUt62WWP(a}~XM2*`8E2d@o zkuf2iWwa9?$DxIjc=cFp$e4$|vGWZZ<+BL^MKk{ECm~{`(}5w}!D_kskw~O;yhbTd zqy&MP0C`%LyMBmt!4+_L_DJk`t++RdhUuY!y$XMY%o73Fe3)PBPHB9;G?f~OioSRY zdrwL%3Q%7HccP!D?d&*}JGoZ=DPIx*FV<)M98up@waN2Lk;99bs_7EB@*u%&Y_w#B z)VjBknt8Cb=rA5N^d{zvg6ptASCvnRnv*d+n;E44#pXF*yNDQ;&FuU%5FO!FWE+9~ zX2rQDeo5m~hvC}Dmu8ENdB%Gl-nbs&Hr|NUd@6QpC4|_D&H_{yfiTx*Tp_K6S8i()>KZeu z&1|Qae^*~dIe-3zzm4h4K5=8)&hsJRUwwU5GcJMIwwSjcX0^W=avmLc->un{&Fb6k ze69brR?j`nqIISFq8Xac^wbVj6Mn1ST5rV&#WYaaS}jRSX~(aMeUE(&n@4>oCW?k2 zHErnzNW0Bz>Y*;RCYBqEb*ghIb$=E9wO4i?XTEGr_uQ_ZO9 z__X7=yY|OdojHIHr23w#LEM;&P{}x{Tw!ZV=26sbjJ|SR_E3%3riTuuSl3%<{C0A>_nP**zC_J4p?H+zwA4+?uIduD&7G7(Bql~QIhHN5H-Q>+ z^}AIj%|D5cBc>GhOorf>|MZWr>WU(?eS=jYaR$!+0Xu6@M&%FPCP8l9ojEdH^FZ2W zi^u6kM3ds)*{$+Ci_Zz_h$v+fL@HUEH(2sMZFmL4Le=id7a&I3j<*d-*H7{A|`&p*i;e0TOd&#bG^Na7! zzf>AedG`-&K;87-#E``&2-EOM)t1Qx>blP*YH)M0#)oZ?hVIZhVx1Y$xi+fENHn<3 zy|G<$r0y42_=mC!v047=seYzoj`-BCRn8un9bh+xsMoVwQE??vo866}HqIYcdDZuX z_gRi0*ht_G-=<#;E;d!OcgQH3=~%Y9?SqkDLr0CQ#lCHsE?`s>f%Mts5lG~uEBNfy z&42IRxVU^$?Ei5^?-RDgviF$o0*RZq^%f%^8uCgo#H`oENbM?EJVUQE7TscY4gRvr zkOZOEYS|gbuTh@#tGxfyJ{It6diLkDE)XzP&=j_+M|2+b2UyfUOQaC*S*fb8_0RE~ z5*&cEQeym04pqI0)W(TKMR6hrUu|rI{03kl+9&rRKua>zS_UnpojjJ&Z7b;nBZO4)N(76F5%$dAqAUb-S~E0yC&w88Oq3tsb}rSwVmRZ2 zQ*ARlxbLVz)5qPr--b_Xi4p~H0?~yCBQ6vlP-WdjN=g(rp{Ak2?-9ICJ%$5!EwAlI zSNi30*%2w7lk7e>Z8drDWIB?$T(yHTE+&%<3ZT*ls6tuekQxWOnpFv1?&Zk7XLt*D}k8;JQ zUdYmw1_7#WyF+4kH7F(u>EFz*FCX@w-)kY-zgdVUj0%l#x1xo=w{W-rh^_cMdRt&; zt5o)Q-ubf=^~TePxBCwY97xx;%$S|x8hvjq;Z5Pm2V*#4#;v(<#wNP`G?(BIx(n~x z;v+<^mD6Kvr&WP5<>Q)cPm(TP&nTp~|J6&B(+4R%vq6-B3LZAaON2oZS6Il0Mf7|U zw1DQi%u4_Zwb=}|f+BC<@wgIo3t>LEq=>wu{ZYdwaT}w45ps$^k?=ur{ACIDKgnkl zOqgquN3yb5vCWgQ?;C(s` zGVNw)Rlo8=rV}Cx(_=-Ew{ZuCp>{5>R*J`9**wBFF*GyyEaQn zOSQ04vp=xM>SIq z)>hkXsy(J!`C`j5{7LkGCnPlz>jV&~1y`0k6;jPl07hew=UpFDFd1b^kPGsS8-W;^ zij;hKPjPnfgXJCQ;M`}IkAAu%S7iChFc;VtqyOTCBVZ{j?mMctj_dn=Gj{wcz-~)| zkGX#xMCwsfkP*`#au-eKx#Q$A@OqRID5-^W_iuCD=#!V$E@NnC&P^c^z(>jpNlY(C8S$hM5TQ8d~oP*5bZf6;;TWtxQ=d|OhTiX{Ohk0ac zE$G^KtUPNz>Nlp&fu)Jm6wN*KdtJpnZs_#BKV?IkKybD;=97y*33y=f0{CG4q(nQE ze8}9LWJQPI@9lJ0o#rGagR$wfl04|9T_)h9!;w#@UaATu2`Fw0mkf>SLR-px!t=}$ zEd=8|a$|)k&R@x!iWvh8?=;u^iiGS~^Cy3B0`LU{|5)Zjt>h!hOtJmxwOXlKvak&m zn;sN8T{5-;{=|9s#m|8hw9u?~VX!a=-_S|uiI~K9_>^PP>^;tPq+HQ!uhC4#l$Vo6 zOo!n${Q{-%TqidWMN^^6rV&IJsBhJUa=`@2E7sRoS>HmH%Vye3@GePh)9zdEP zUVFDZpZG6Go4PC}Go1A1^k&w%<{WA%12WM}MEgiz-C=4X!7pMyleu}_753y4Ve2g1X^QDLDzMfvTa#vrY*m8EumC(Wk^f|ouFA~ z>KxwYncRZH`Ku}b0KVtF(3W6U@EuvbTW(;X8aipD($r0`7lMsJ*V` ze6bb+_#(U7dy7=UwvO4veudaN z+hMHmuP@~F>Oa}0ti77rXiLh^Yl~$>l?~NTEw`S2@8^nM^m5G%%tXtLG~4t(Ou#A26*yeXus4W5w{}ggGCEQ z6pKaq5>tT#1<{^4WdarIdF;_{7-$5~yXz67ico|gnH}TZ4$qWU8?Vj%?aKH}zy7?w za+d3fAB9OdcS&nD3eHrMur?G9xyykxn+br-r!#EG%vn0(msj#yo|Pmp8RPbJx65)% zQ#|7s=@A}k5CQVse7#&p&wx~EHlz;Q1?n&M#1e?^;5?`rwa)jJcC$Jp*Wu90_1nq) zs6!35_jbR6)iowJ(;Uw0hvyn}IvK8Z8)v%n>;A$^k-Ee{&D!Crxo})bo})v5vxe|c zhGKHb*BxGqVA;ys-wfk z_Lu@k=&-WH>8Fhz_l!m5j*B=Rp;_5}!i*GgMxOFy*T6#5jgw=+fNJ}-o`_n_q*h{9 z12FNu8KxY&W=EK*q{{;+M{$DbadddEf%Nq&^T@(-;LcF-C99 z5_i&N;R+z&QHJpQOIEM!B2s-iUo8gfP7l^n)7i_YZ%-tvf>)_^bE`+pRatjp*cX#i z_36L?70EY=eTrW_T$j++rZYnKLWNQxrnr1*VeA|2g4fd&!Vt?T){RCni&Xbn8WZV? zr!^a1(p(&P8`W`m*zG^g(qG$!Qr(l@-R$Y7 z5ony6FErA*@)Q`( z>&VQ9+~(!XCExS}e1zPEslMW{NT+7la@M6>@(ZHn$#LG&^ z|3=Noda*GQbLwA0)AI%)VWxa`wV?;+56x<6TtRVu6bB_TY`?t(O(!+KE2bEVE?S`{A#X$#p4c)E_%3b-tMNX zpg+T>F%Z@*Ggu8>xK`1ihg=@~qxPJ|FN4WU(p#XSwBBOs%PyC0V8jTzR2;39dZS9f{q!@vn!4pOkjXbkn{~}`QSeoj#_gD?J?5?QY!7U> zy^Za4Dk9|)AD!1<8rA&M=9|g1QPB zo8K>#I{XBbPGnAxm2Pce08Bw@ajjY~A0gbLg@!NL6rrHgyyy!Y!F(y@uq1KN3!cxzLsiJ+ih||VL25b?~&A;8Xkb_!^VI5TU>wo39+ny8td<}udoz;;8 zypBr2@vsV_p~#O)yGhHrOYbl-KTYFq#22cOZkyjLA-^A;%BuFE>^V_mSHq>@(UQc^ zL!aNN6@fZ!uth|GEt+Y*24y(_@;7nrwq)GkNaFHuT0PAw`qfpuP5i-`1q6OlB>ANm z!8Rer+=H&z{&f{H*?3S>n{eSIFZv|;#az9%cSW-sX5*~i+C>qNJNRW< zldqt=GhlNyqtXe090jMFU6OovQ-=w&mf12@ri^-EwhzY3mV>ry>5CF+M2qy!ovV_aqzLWU0JU-JR5+K;jZeEFij6CC>kNsTWFc^pmu+PQix&A|odusO%INLC+n z@Z0X)y{B#=W6W0fljJkO9h}nVGc4W>9i^*V*qv6Wm7m-)=GJxVT(evfQM8ZT-asw& z!D~5A@v_RE$jF%!L|8js&IVcY7%vR+YsbpYsUvF zzw^DB?aYwmaAtf;!rzGL-iV17FY!4WL`RR-Oryb0P3|@N0b`T*u;8P=*d>(4y^DF! zR)_UyXXcLW%dm)PyHC?HG2Fy~&3W~`X72o^Fmf+BzL^EdRBvoNHiRjT`;pf=~pz^g&aDdV)1@KmPrl zXI%J!@W(^)p1De%i^7WuHXsLE*IX)5*LmZdeKR>HIHi76;Op!!_d68vPs~>vpQwfT|*a!azJ3?~#;8Ni{Fgo)JIxN1wDa?wX=&w%U+9Wiyj*)qhXhsHT zR@~&bMoLk)K{8G+lOU`F0AkkE~%H zv+{T`X=jVz)_-S<*@HM z3eb5Q{GE*QN#=1T~nqo;1FCilbDmNPn*rfAmBHRfWhl0M{e#C;#5 zuQFr3;{teJp6g!*-Ow=p?(>N$xA-83$r$UxlV5Wk&^t{PS`s5xM9{gl#!ZuNXj7{G z$qKp1-j1oNx7&$z0&cdbEX0Pxl37(<8epHrb>)tAwrXp}KvjJ5rl#7^5k$+<=CVUa z2(jL3$PCEm(Sff(MW!B=?H-HYX$WJ-lqyUneLb{v0%sl_xm7DB1zi7kS;W6Npym^5 zHeVj4MO<9kEIcN+Z}_gL#`n0Uj^FXkihZC%i5-fttq6+es8NMaptxG|NH@&gip1W9 z5t==;o)+!aFj=f`C@Y4Hjxlp*b2Sr*D^srL_wtoZMdz%Ey%ptY%bAZWBL`IE8<}bb z3N;}F%R?G9x|~PtIlrfFGq2={{ViM*k7&|J4Wt;8eJF^UzvZ1rn9`GO_|lfAp+VoT zqT;BI$MbdS+~rTk_Pln>`o}HV+}=H3v!RGd0FBM(@BkV}np3QC2vsGf_0O`&qH87q z*w%&C1%%&19Wbwp=~A;<{2Ow*YoO3f%>$C$MA$a$DdTT$S(Hd7f+P=Dn$YQ6!DyN1 z;LPmShL-r)E>`gxjrmn4f`0dRqpP5rPswF#wDy$eQ%WKlgu(7RZPUWg-AyMLKvf0u zSo!Hd|6jibL^xsHqyE76reb7bo^E-o9=e6v6gK{T@aKip`gDH!jxm}SdMWm#{?(Nk zx-8a**`EYbqzLect7N^mQ4#K6Zv8j~GhRi2za845GVF+Xt(45js{&G}b-}4A_~S8%x$UPp414;D=)F zLR*|dc0v4ZNz;9xxs$ddFow?4Rb=^QR$ORdEyi8p3yzis4w;^Gq0Yr#U?a8_w+w@o zl?UHMbHdKa&H^#2;Ym(=Dxd3Ajwo>Ar<>fKwa2*~n|JDpgpYzEg`Z5l7D1eq>KIU^ z;GtQ1B*Xf|hPX(f**kbyCK~(aQnAdiQ2@F*SG?K19I~yAB=RE^OhOg!*{Yll({& z-w_&j8lQYQKWqcq@JG^W_S@DsE+S%JAz>?IqdiMwaNxCJUE_akY>S2%e&?vDK_w=5 zpi%g)-xT>5$it({@fJ|Rua{aiX0xXiF4{VLFZVl&>Kc&u+%8$BK3wJHT_~FJd~^)o z!%RL=uttyr*xqV%eSginh_e*U>~)B43*rQ;%}?MqI$Qd$$O$sgk@*V~tEV^A#xj-G zPpIpVAS5S=%#JThlwDLdR&gZgsz0e%N0$sQZ2pFwIM~ z4sDnyIDj!p@*X~PQmJ^t=)&AyBto&&@nG5Y878SOBZ&W@E@u)HMx?7h($4&agAY_1 z0-ifou$s6HwY-)jtCilo1Fd#(v!M{n3j(z@mwZNh$iG&rNzt7ROV+cZbcE-&dJ>Fp zl9>vv?+RP!xtX$uvUZ7^o>G5}@Ag!u+3ayUHiY{T>v(!=R*9(E4T_c9+oi87cmSh@N8Qi(#d?Y_TPyXK?QfXfQnu;?i!)C5j9W;{7x zgyACHwawy4XW^IW;+t?DvsN_73IbxGb|J*Y0oT&H%V5P2FenLjVRg(ur zNHUlhkGEI-kM>eH&V*CK3yG2Iwo&wp(1s!xU|ak$%sfQ2Q*>05=);ADQm7@J6rk}J zn;;A`i*jst7{iQ2O zp@)w8``U;$;$l-YUvKG~xG2^Q{^)i0J^5{Bh>C2FhxhN&X6}2{J?MRgnnu>+{w1}s z7Z`P=`ap9$UbJjhsA|`3YP##Z?BKif?~%W!QRn)xW9eaJY+Zjb8QRh#ODPdeOAdZ8YDd_|be;Zru$ zY3F^-pRS89cDtqZqg6}=^EvIWKY!$@a{!`>J9&6hy!%Jq`7-W4Ptflqg`$g#&oFeSwg)WXK zh#4z>2j4gf4O|gV97f|{M@8l>k_(-U{mkOUwLdX3D-E%~>N3HgfYj^K59>y;_P8mW zNZH`oJ4dbEcC5NdE0%Wy_jA#9vZW+rz|CIG|$-pYp z>|U;UN(B9}_r>f&qhMg*>zS&=O=&Of8VtL4XY0K&?iat6F>O=)<`<*hDP4U*pRvb_ zx_WP0ql~Vtxns^Y(R!5bv1Wzt1oCG5o8(vf5zkZs6};`4gKab;!hzQis@#hWvqHQ zFHxygbE>4tZJglvmzAMnE0~@V3f^pFxpqtB>GUB#I)8qB7x+6_A6*W2n{6KI;*2ou zu+sZLeSxJBuS9!{#e+h!6ST(YU4(BVYGBvT6)hMoE}>h5Tk|`D zx?Mm^*(!s|&5+>l;`!eXQB5S#WEeM2M=Gja)&8+f$i%Ze=qRu54lvkxInP1VJnKiYq*VBul4WVQa)*>av{V zC`DFzI`Jir*5Fd&XfG}7QlEX8-KhyV50X$NLX4;v7wMDt;px#E2jShWM&f&}(}n)O z4sBa(YLGmcr27AHO?O)F5o%G0BbUgQec^QvDPJtzoX| z3eSCr`qUX?$^oLAobYz)pN3}veiKF$9Pq4E^DI>3oq$S%05CO%@C`=UL;(C^Kvz=QDs9}mEF z?zY9_y)h4h@Y=Al+s1R41L!}gZDqX2dtWIy)ae1QDuBOhjjoKO$KipG#wb!JwBO%( zE@UHdRZvFFRe3FMZo+XH)d1vimh`nj0-acJ<73iJ#!)|~3)7G2;q(gHyP`c$s;4(T zJYC)oBotmcGyOqYD9=@53P!bJ;cGZgwd3YRGyNN{%fYHc>^9fc1x944cze3yY;RN8 zHQ7Zi$R;|b=P{1HtA0b%XAyeRtvEtAMxzb?GOjKgnhk4hDC<|1=gGsr^Jwka3`&VM zU-?<}%Nfa?S%L`S!|=7K`lDwl8NUvSwry$Xz}~Hp+30xoQ`Bo4PCEQt1An#l?saTW zSn|j2rG}tP<&DdyWV=A%&ZjIbQq=CF?Cv2c>rXRgE9hee&A0zp+0;X^e9qf|ruTn$ z2jDkqf6a)_9sd6;fLlygTh4ul^Ef&9mT!fa&Z{4=y}IRgVePfN*g)h0dmBRZWw~bN z<_4hu(<5B`QP9ORi=l6o^iz+$JyxP&Ze_Pk`cH}ZX^|_WLMsYRkK(qzZZMKpKlaO} z(7V<^OV(SEa*!%|RY)b~_4_HoP(uNKU2)hcIP+Zr_Z~OQjVmO*Ed~>4@v; zH{9hY@^Lt=2e0+y*O~z1W0fwD5SeO9q&lYri95OB2%t9=J1_Pmvkq}0#Sj|qz? z%OZX?9mo%yPB$nyrS@U?&3EnxnNO;~#5pw>b|~>- zy=H+qbo^SpCX@dX6;X8U>w!G=GesWz3`=)-x|tEY77iqEAH11g|L)tEO$_3%@%;!nkfCzGM{5ab!e@TBw>!?{yDm2@! z*M667=w=|a2|K=kCI-ULh2++2J2&#Ih?{*!T+lIDtfC_fEJ0uSUdcD-R{6bDQ(-<` zdlWv+F8%?#gKOr#+pWHp;(cB@yuKz!=oyfHV%tbJeQNmhZ%tpq>j)0^G8~M)xDPlK zQJffO{{5bJpCQhPaFXU~vqxQwZhIh$xA$ust}Y`xfc^dtc0h^0Tn%z^cK&-^p_*Au z;plBhL=I^2JH|e@PLDk3N==zF%4@i8AqKzF8A)dCyZ)Och zZ)=hU1~A6ET6f*GU!C`$`-bVL`60j03HKM$Z4J)0-jMd1@9Kl%+KDSs=ZQ6I;S$5L z=X>Xef7kD_*}E?IJg*S%y3g$Q?(t*aW8LO-3L{S^r%CPH8nY-}Ha3scw$`z|=BOz9 z_b|w*9gg^0)*}xeU1wkIbj`Knlu=8_=+xQ6MpuZAuey|TL3b|nUQ5NBiyEF~OZ}!D z&-jHWmseN#Utzw-*89|Xp7b4qy!OFqc|X64amSb5g%Q6~7u)IIai?G6XIvbS-j8>j zJ?49cuOWJ_XY5azdVZbn&foj>-7dZJe6RC6&hw^O4qquO3l2?^O;Nqj$g$nU2qu|u z8&i6GPCN71;`7xpd6*~Pjq?PNCx`W9(z&%Hui{rT{2e}CaGc*r-1GJr?`k{ie}~_= zYm#l1OrHF+MX;Z1&^e!E`zP9tdWyNSTN3*#x$G=`mTJa+^rjqpv*WJvZEdV0Py&l< z@ox;%zG=5f_?;3w^rVjqu5Ptr&$Zr>*PQHn-*bGE3-Z1O&puZy8eicEZ|1QxqKm!j zQ`?;_IC|FF5%pg-p39kfl4Is1pzeks(vuzu4ERshw)yShqurxU1vLiN5b%*DS7@-Q%ebSHG?~IgUNclz+X z!#&yS9p}0Ig*gj4=TAhz-Hm56gL!H~*JrJte8he8hXcqnar=1YA71p4=}6?RN4~o{ z7Q6gAaPHVt(~ZuPT^(!k4GR9~5-W2s(Fs3e4R1Z+4fz-nuNdmT<5{m^iN%#XY|1wn zi^ck39XWU1D>*U1NTwf0?ymV23%=xOjPdl&y3oabGTH80PuuL#)vz=QH2mNEK#X&6 zqREdK=e24u?^!!B@y|F}+r&HVnFr@wiJuzQ0SS(~E55dS|J=pv_MKq^Ntq{6eG1D*ZJJ!C`d1Ex6NT3a~2K9WW6a|&Bb ztNt_Ng+a<52cJ5+yraK$_Bw~K_Vq)THAyY1l7#2l#M7hI?nQjfDYgmzw7wGuKjGao z=ck?}rvXUpYo6D2Qf7DcgtGvBU23+)Pf>~~?C9ITMW39Kd;IzcCb7C6*?nBkAF;dS z&(;?o=d)@E&$4w*@GJgq-Dk;h*Y30Q18wkqMBg#YUtJvwProwXF^eC4Fd56*DL$4v z*E^>b>lq2A=Y5AfW4^=RalF!f%6FG5x;i-LUC_QTKi9`BSOh{}o9#8sVGhjhyXMyO zk&U}V_oP;wUBB~V;|kxHvQwq=58|0?+UzkmlBO>ndNo%V)|KnkzN+7C+|c%QUssgW z6nvjY)`-8IKdU7TkWcbnPb~B>{44gTZj`7#7thLeA|)t989{Jh3zFZ5jBh``#fGD? zI^xx2V9}1>m?2;Ml{;luLIfN0WFe-u9Li@HS8a|4mMfoi4!7a%kLj<~#@DOc0@_)R zibpNP84#zOxYvH;m^>L3U#Zxq+gd1zX@&jFjV?YNZ@o448wpC~+VA{c>qfpKP-3UA z$;3RRH{8BHT=|Nb8Qp zM=w{udZ*AEORUb{<;3gbnOnDgRSp+^&vCE4`)ZsYg-?!*T*<#asBv>ty}oz7dC3^xkcZ`OHZ1u#KgUJY1!}~wyB8M_me6DqHAH+yyuO5 z&bP@R*oHl;1O=gpR9a!pDxGVN9@TJ zeV4DkiNP3IUag;Alid#HAA9v$dX4|stUiyP{4YNoXo^~l+SRl^8(qtz#RcI-p>Xu$ zp1^Q(jfEY29Uy-bmzuvxU&9D!arky8$&QamAt~m%5Ib(s@B;e#oDyqR<%QCgSq&+V#{i}nv z4&LU5cYe7#&znqcT=qafBIl_o`R;OF-9jubM%7iatC!mMhsE)D<-h=qG7#PVBrGobxbT^)w;b1htR&_}!^+pSy=8 zsO1-lJhR5hHa6>5KG|QkoAa!PdDaMApLaPi`q*>n9Kxuf3!U7S2I;xZ9hceOo5Y7l z;Rw>M)QajzXQ39!t!`#mxJ!-8V@k8yxs-ZAuCvX1eMjWM?9-tl7Z*v=8(9w(nP=sv+^ zJUN^bo#yJ@x90y(kNV+2wPEe#*_1R&^7RBddFY!UqKwn=zenZ!65N%H|idRo4#F>G7tc_x-2L>Fe zD~IxQJm;Mph?P9h@u8gLgB}jqB{N@N0Lh5BIS7N-g&+S-JX$GhbS}1=0f{SxxP45>y-S{uNp6&>2_xDZ`3Pc$cXak(Yf zdrjC!!$NU8#_#061;14)XhZEasJF5XDLo#!Cl?^j2OP2nBnjy)qt^>{u#O3J_~r;~ zkum$G%ei&enc!3=M--9~h@G05-&~!$%YnkxuUXr{z4;zK_;2=3_=Dx1UvlrU?{wXs zbkxJD@y-^^)0h~sC#TFAPt239`yel_?16oKf=}Leaf115^#1+~+w=R>W8u>CrP-y_ z)U~(oIQOC>dryvLjSXtsYT(Vjy)0DtdJ>&SpRLB2^(oQ3O|d)EHEG~Uk&@BMMvmT$ zJ-+b*@i21@hm=j9c}D#I`~zNV+=+Z<9LJSceEKN-c%Vkm95T6>i=S~kSst&VohyaYEB@o3FAiKKDF+Vg;a(8x zP|m{e@caC1v*6H;rvChM@%{8S6gnLHBOG;r{*`yC`QlLRgR>6L6V3W<%=TQBNb1j%IMzu5wtH1G)c*~X8^qVnP)}iZS%eZXb1DVHn;hcNsa~yk8 zbJgdXr@e%5W$ZTMN#A%_&%3Mp$4}oRfnR0IwPVB8b#Bw{gdP1_Bl0lDHm|Yy+^=w_ zZ_Fe2cuT+g#s3_WNBmB%6aQ}RdW}+k6gjTDUc}~c^&x#n-(}{>@zm476%FM5rMzxU zeXsBP)XZxgvrf?5@vXH5v9e0P-4^RRS8Bt{*Yb_#$Bp}ayy_Trw#7`Z7p7;i?_Dgt zj%)#~Pfg5mQ&+{)Z*!_K{_^RezEht0E2)-4tCbL|UTQ&84~)V|K-sRbUrHnRu%wQR zap?1&lYOuKjNgwKi9KT$VdRWG2D+YW_wD#DcYoyWf7&vSKiJoJe(#*^?dP-hRsUDQ=JP?Xx;KugxtQ?e4W@BlR{3GGdZ&w@nUijI!B(JcjHh z-(I+24XYRg(Y0^DXfNZ8J-mkhnfvBfF$sIz`;@U?a7#DFVP~i#DGZY0mvQxD&<#Us zm;d~D1MTzXH@)jtM@wN>TTWg4oJV4j+iaq^(Fj8vV4YX%XQiXqelB)yRP_Ec+U?1N zgL=6sYCSkg4l`(NO~1=h-)jty5fmTgU48lU_uqf|UEn%sD;^yGwGPqaP+bRL4$pcA zx(?GiP{&pd`N)SbVC$V~^8cg0`O&-F6*Iifg~wJyMVl9)bB&ZU)i+Gzom}ZyEQm)< z!By1KVY3DZ7+~sa)+G*o{Iha5QV!%fJdIK;J!q~vh?QVy&$*E8A(T{3}64D5v@SMm;U@FkP}9Jl2avqVu|- zKT@>vUgEVjz33bC=XqqcZb*g-uC?B4&wrA~VhzV*M+HsZD_&cSMJ8!s4#Um#9rm65 zhVq+KsqZV!^-&F)r@cG;(!|vq6)+d7?BHh<-=uwG>qgiVfY;_vgpFkp4rVJ%vwVG9 zF^@yo7`cpHcB!n#JTC0UxE|#)4tn=_^&BFP5w7CRyQ|sMymKo{8F^<{SJG~I5HBBj z2_8aR#2{a9^3cfc2VoF%skTz+FqO8{A&tE9Wh^i7!!hq53U8cpF-xe`etGav$=IzX z`d9A`*EpX)z;MASeF=(Jqin<(c0l@vf@|22LKwc`tDcfWCGor7nf3wLKVbOtPrv=f z;kmyf&Bs0d@Znk??I>9v^YGz0y&)W5yb(=(^r5m*R(WtrOULJ|Ra7}hD}WQ`aUsKy z5z8n{z~N6|{;}W=HgKgJII{{=wmb=j*Va4^r)88>zI8!A_mwo`VWc%xb52fkB6Ivb zvaifNYdc7GJjUXq=DMI($B!5?e(-Q|Z_fpXziW3b&Uw{!=Le?i&v|0gci&rAJk^0a zeUH?>G$reb1gXc1ch;KG=#hkxZiM8XSLAqIXx}LC@7FmeYv1x=dNSXr8kpv2xpE@xYKXV zR~}1eNnJ{kGxuJj%<<0^d3b28vW6XVmw$YN%b4C|&KUnYTgT~E*%WKyyw?0H zQheEqk(a+Zx~4-qad-3V?d}XNabl0JM?W9MCpPB6XUx57PJ3A1yWe$@GbYaSxN|+@ zo-I$Y&~vSNo;hAwek+E{-rz}{n>)doM;EdL&KP|kmu+8|-j;<~o?X{vBkaBOVIu}| zjrFFlL~R%*!&75&Fafz|Wn=LauHR%);|ww4#D5u_$JEz5 zxbBFV{SGX-)SP&AAX+IaQpIvz%((n(>1tNfo|BBSQx{J%_OY@=QJ?P`=sV1C}{_ zdtL^p2cuKOm3!kz9&1cI`+xGdabbzuBCy&kn#Af6j6j>Zbhnptz!Xl#n(;so85Fl&VvG;^H6!s;tp%`lOp3-G=wgZ7uk4Q+yY41OwZ1mBl8 zhEjD~xVjFnp4UCgu65hFH>&!X_7ntuiq)CCrd@ND-(p5T@k|}5FFAzuNzaPKW~}$! zt{WDuqvhDQ97Se3kDG0&Ka}XH`+U}N64<`hAo!J|6U%zJZRmUjm-$~dw)IL3Wl{Pq zhngq5UQ6oK9xMV^9HbdLCNWZX=6I}bMf>%!^Cr)XVe9i=RS(-p*U5#6QNnTd_k7Pv)C% z=OG8Uc{dn`krM~c9K`X}!Ab{0=U_f1C5FsJ!L5u?15aZkLV;0poBM_(*7zq~^nJnf zE~;Jd(hbQ54<_Rz7{WZ5$rlIxGsl?Br`YP9E~hgXNKHplIU)r#vc=*gF<<{E@xq4+2fYdXuNaPbcml$BNG|2{&TMbU021YKd>{ zmRp$XaV%rI+^9AY%y~r@e_|s?-|?p|*p7+Z`Jy|=n{?E>uJ7{cf6mjl+x{2W_G=Hq zGus}8e0=XTI?=^Zat{j}_;v;9rc;fr3^~zv0H|zHl zbLGuiW?j5r_bObM-k2$o+2smBGq^iV@qwtEX|O{c9=FQEurA5i*?BsOYAiiXWBi=3 z>&<05eaUkWvdM_o@83^qT{-67X^DM@nCFI2^f+6NDJZ}fW9sub-fWxx{GZw7yvJB? z{1{U5wed#xL=>(vh;15sEnvuzwn(qHMvgNK2s#u$e$ct?uNnE1oJ0HvLiC0pgW>Ht@0 z`O^n^!e5;F=48c`??B6I@Ds-@_>F7mxRLg+!~$2*QX5lzob=?&95Vpt$CDfLdg8;W zMm{T^cYLw@%_=aQUl-dTt7_jFc&!L!K zp{HM?HxAF!UN&)Y=%){S5pl}Q21bHXmX4FQ-s#6aY{TF|h1!28#?W=HQLRwSF;A0d zQR(Tg`wazoHa}UxxuqaRMi%u5zQ;vrjy1+!`oL+Pu7PD58{$wK!{d4(RJ^j!=YVF5 zZuvR?mE#~*tmLrJcmB5aJ`GJJ=^ifsV55QW#O(cae!boNh?-^u%ixpC2<8KWVHmH?%f7FF(h7eXL(S*hySnCttGf6Y$oB zdnOXo%%<+ecWF-S_X*ege%vd4yc+So-3F`OwADZsuk2E|ky+#!gEsa) z4p~f*izFN>we&jDm1BIpG$Yo~z(ERh{j2ZC zhR*D82&2w$=n~7&VdS4*6v2rf%t(G6^c*vKv(Uc)lN+w7eCQTr*-9tegpSyR4!#8B zj?=s2E~1eB4paRjA&LPle;$_q%oA8$6H`JCkOeRO@)4cDYTC?0AAt}{f2jP0Rknn7 zIt(*k0DR|D>&2mxSoXqD^QvcHRTJw`|8;-YKd?!V^za*pMb!R375KK#(F<0qUH|8T z1ZPip=b3{u`RH@i{x}CZKD{wNYfXQ~0h)KJm4D{77Vcevyeu_G%!J_erMOC1vXnJN z^E01OnFmp@>w;?#NU6D;>Or;|YNS{G7!S2uhr@tOj#<0dtcI>%!HCld40Ew9!qO4g z!_5CA6Q-2a4t=wo0#q-(BzLb%&!NlI<3QGug2RqS1GElCTptJIYVhK0diT#!;@9`0 zo1mmd%*fDq-_pa~$054jedc*<3qN`i9eI3swY5qOR&meK=gMy6<=Czy#^@BSz@(+< zjdynbOnRi4I7}03n7%Y5mvbbeq?54|2k*VRY7S=C+a4QZ_wV|T}*gO)un;=WV(}IW|LL_x!rtw}m`goy^_;-1qz@Ci>tvMW~WZ z>YYa|R4p&%4mWt-cp;tCYL2zXUle*VC7*8S>$3FDIrCmKy7-*>;2Bq*>P$CDPA(bM z4_Q_Z;>2raV!G$lpFP$2Bij~?^Xy$*`K|bg=FvW@)+b!|s+4A)yFPh!?Jj@9k4N&F ze*h&4H>d)&SQ5!9S7dc@HE^lD&C;-JLhZhyag3FO=5nr0I(BF<3dvz*yx7+LIW&)d<- zsUIBkxnB4lQANO&-oXdFE+aK|9e5n7Z!bC}k5;Uu=qQf-5hq|7e zYS5wChiGvAhC{zLui}BlSN3Ayz|B742OYh(%a01F!71)La$NIV zW?4S+Og{I1f_Os@v_<<({n- z39iSFe|+s@&gf43Igih&@AIDTsqgc5@^8Etvk7uTT!_o~XWPh~Fq`A45yl2pvnFCo zp{M_9)Oz9?TYhft_)p*B7JVIDIAj0(bNz*k^9hJraI8!37UIBF2Oac0>V^<*AZv(^ zusld-gFFcs9cKKQn{3!HD8t34I1rL=JEUly>=vL20IZL^)*dmmB1@*b+`F$s=5huABVTg^jPdPkr}C8B91QP&W6k_cpkU9Uoke5nabE zIqS1D_xP%>{;A$k=UR_*?G~5t5az046ERwI7GU7a8&n)$kMT(_pV56^5hGdb9;q#< zr;a;4Hm=wrqqk|*(t8U&yx|bhc<8ls?vD9HK7D&!s$X45zN?nIyb?3E@P{jQ#vZHd zpX6|bJGjX+6kZqZy|sQ38U4C5?$c_db*T~4M4^v{6^L%o&k&Utb*FY^vsmDz!x*~{A@j<5p$Y9~46^I4VwQ}>^ z#(IQCJ|&EUvovz|CJ+y9i5Cj_Nxj*K<#-}@UL%ni>%un@)&Fi+aJ3=%)2B2|y{L-o z=rmL^rlrmfI@B38HLmLbCvsNab7Li|o`x(XdLafSnm=?HE0)q}1DDPl8Q%asnim|N zCX^1JLQ7WyIU#F*(hNZ8?O1}nG$%{|V2A&VG0gHvV}&n+Kv^9!K&O9G=zek^Fv<0R z9hpP3_5d1fFt3<%6N~iPBXHn^A8%7v9(tFW;6MFdhhdWN%H)7ds`}wV4$`#+)<-#f zu-^U<48DBn;0@J~d50S6iXwg{5(nPXvPUoT#JH9LnkH)Ny2!3?1T(r!v6^d=ls^4J zM?UcSqE%jNdK~QWyWDeonGZ2%jm(iw(hs$k_T&;m}|abBo{v)I3iZ-Q-Jrp ztkZP}07sme?}N}|07>iz_*4hi{iB7qkB!M~Fc0Sst-HY;m%JB65{&~;;D{OuanyVw%S6sQzUtxOu zJN|CJlM~~;ZO{9aU*~$l-Rt)lU%TIX)T>+vXOy|A4M<~?OrK{Do>X9jhc91*WLw0#iKjeN9F)?^UGdW0G z#(M8?^Hta+F?^#__BGLeHBk!)MjbXnD|>2puTZK$y(S3LVnD%_JUugKcx110SAs&P zHG(Y48dIM;XC@^DCQ8yteC1m>>dKcmfphrcMQt+FeCYVbMU>uC$^|ZgOBlhj_Nsa0 znl#An&pJGUoo*;cd^bx9Js4Hu$SJ+y&=?m1Q_~2h_@v|!nZvHX$s@-{m~3QuSZC|8_fZ^ z{z@XBpY}&OcxQSXuxk{qFu(HdbmgJ1dcfEC!k|}P=2Vtt*Z=XT(fPS&N@#mO8apmQw$glXElP>uX2+v)&_`2vtW(UcJS$qBS&BKy>pMk^8`TD;Yy3 z0r!P@gJmQak45Jc zp6URAzcpO+TWzB;VkUy&-mAt z(TdlgPpxOqYwmAq)m^jD(~`0zRxZtwR?s`$vVKikdj-~Ykq^(A+8a}CEmLjNa>*z( zlNZ<7Th%dL@*MDj;4rP+55vtgd!z5E_-DLvlPbPUf(=i`(>J)T?>ymRJoWZTug}%q z*Xi6j_Fa$R&WNSU3;Fkve*RDY>3?5KezRd{oQrayp5G^T+L-foEfD>cWBK3NyUy~$ zn=7f7b>z(Fp%F*0o0#$}yF3O#;P}O5#TpAB$wl z*Ix&jVCzu%l#__Z!|F{bsMXYUzTv45g-)Ua?Yv4;`PDZ*oV1X2IJP5wnr9>O8wXO? z!|wxk58zIX;?M&<#ZD^4ryh+eNJUo(Wl}WNPcEulBcD|clpOVqBqMJ4&EJVXy(BFu4D4^(yls5BDTM4LN~$E z%Wg_;b59=mT2Hsveck;(k?-Q(`CV}p*Z(Fwc_!~^C?Pg%$<5?7a_ywtl`2JEj*D$<2S#@bcgxlV5&Ce>2J4xR7 zDo^f2ux$Dg^bf@-!bZhN>|Z%V;KEoiX0c7rAt=7V14BID46?~r02m8XGNeOJ?H;*N zo2?Fhlw_Mtv#9|COGU>$?+jAK$A; zRw*N2eS~4izZ8fPi3<;L^^S@|6siOY1EjU660a#oPcE%ZiLSxg$okAQnJX!^q@qI!7=0Nxnr|GdfW4O`bB?^uW;c%V}_&at@FC^=ycli(3xOJ z=4h|wI}KBK%GEs>vDITk6vo6-2I;ner} zncLNSX;M4>@mto#$y9vz*uh%$ALwLuuUB$r_T^-F7TJJ+w^R(vVS(x;NGnLM9 z@Xxq|NNnQ)9DYih{UH|lh%3Ir+28Nx!pmHaBGc-wsrts^vc9o-FWWEw`_ek+lO3Wg z? zCD+8i1KS7XKlJW1{=WB>TXBSw(HFiecXX@%Q5%2a9D@4tN-vCfsbkba0QC$`1qioi z?y(GdkiVrdhp4b4r}b{P%&1A@_S$WLvNmq*s_V|Zh(~B!_aN!fifyp=B$1Xu{e&Yn z@-jK&{ngyHo+|W&7SGwg4YD^tCg;7^viW>4NgWGnK5D+&7V4#R$@N=vRC6oTSnD}0 zz9U9@?Dcq=-L{^6ZSB=_MJ8z1u})Y1b1lFmmurmOf7$ovXHbsy9c!_N(rm>W#`Lm+ ztfk=#pG$CA`vb3qzX|yTRdM;Bd|dx$?qJpPkxufqH_BXH9y&ReJV}CmKVJIz z-bIM??_2%9>eulu*Nr-#hxhbfuPdCX;~aOMeb(0|emml`*`4opaNYo8H&x1mohO!R zLUkh=3~rP)*%x*gI-ZaQ!{zy5+dyt`pp0`7Hrzmr{KzLcBz_{QaRo@|u@l9h(2(SDZDxoW_GVMt^RL@hn$m+e2R=C01Eyg^ zqYEGxhH;5sxRUC77`~0s#}|k!9Qh-!7$gxtB~?Du=Wl@qvGN#lh7~rN35GBJ$T{e< zgGqx@_q5pev~P32z=Va+MPbtv=E1MU+@sb^dxp7vH#>78t#q30 zuM*Y4x%~Aj_G(mo#Z?Ft^-oolcSe|!E2K=x7rh=3wo5seM*N$lb8ZGgZ&&w?ZQb`B zIvg`-+~eTyeb@a)PluoD9i2FplV~_HgeC>SSkDqbep{^O}Rw^|u zP5yVDKU$Z)eex!sUGH_-bw6Sj-fO>~*u4(%TPWAF=8-PUN0Z<8y~nz+DNODqa&Uf$ z?&m;1hur%vBM< z(q2kE)$q!Uz6K<}qq=l5*2C6@S|H7F8rNMzvS{86#0ok>{Mj(qg%@Z2nlMnjaIeMl zA)>rQ2=6*bn+@w;z1S^&f|Q)_VGRE?|Xo#Hxq&P<-bCwL6UD z3wwEMqIJcae>6r!)P}&7eo2%D9U;hPWGX*@M8S~Xe;)^8rZZ=WlUc|jQY?jY?#Z7R zmRi*O(v_CHB)a3L2t@W^~ocB03&ma=$vb0 z)q<9uz(FuwQy8^Ge?Q`Xz1;7vSAO2eBcAI??}Bxv=ENsgm(hvOD-p;V%b2>N^gJ`$ zi$y%qt}*dj0?Vi_x9P3up2Fi8?vty#IH;JrK3to*_vBdnd6c zk^`eoB6q+q>wf;1fB8SMB8PK-0z4bCpe~{)U>Q61Bs>c}=XN$HPX@WJxRpm=geQFW zkG#h6YKIv87Kh9!rxNiu068er5C89M4$;5sclPobQuj#*$l*sm$&Yv6e0ZpvMrT8B z2o4ZAL}Gs(iM3F0S&?Hx#39;0iO|V=PG%#icjuj}FZ?@m8v;s)uREMSE(D!^UTI zbJ>moP36F0WlUuYu$0yEdK7T3kG!-1Wg{=s$V>CmQ6mtn;xxtN2+myQ8DZojvhcD# zm7=@jTxB2Y7MtQsoLnYNC1e-nMSlEXBaHPQW0`Ad=H;54v`e)=M^(qEc^PUq4_>>% z!v!bR>xoBe?s&XOO}@G2H;DDkjk;Bw`ZMD-a@fs&|AjyJGUj}iT0f)+roQ;ddJWF2 zFu8vSk6t+w5UhdHmqKai5q(`*x7g6h)DN_5ym`5Y|W=3`Aq^STuGbion?DLH(HPE9;cpiQLzD&&1DpIe26r?U>JIiFb0OUw162 z=auj5yNPtqz?HSS<2`kWbLyEpcKVQL^$axjt?j%4k>?Bkc0~Lnwg>#j^X&sBJk3es zXZ0k%&-Pa8&UnUT?)q%mYu@)w@;=9KM$f#<%TBEJf;9|x*!X1p1QYC6$5-6r9q0Ar z>^;Fo@CK}sD&Fe_$L?p1fv#!%8M#OTYvxjJ58iy6YEHVJ^YNl~{RBIBn;fDn)3r%=yhPQJvg{jhiXA0+oH>XOVl(To7c~qk-qyXYF(5S917IX z_&{2|oQUcWFBo9`LsufQ2zd99cctsMHfUalsX72~i2j8Gw4Cr+L4l16?&3;n;ACUf)FWgNMxPOMAcW0sZAf&)HL8eX!o|2kFYl z;hO!)9I%GbUGZXur2cvBMqM>`^8i>^cdN0bVv#Bfq&c#O#MM@ix%KcHtw&m96P00pZy-oXQOf(@m}RQ+32w*&awM!@^pBg-(3%J?|hAJu5)z4c{4^ed~=R) zgx1!1=c<;c1SXqnXwxZn&Z+!C!d?C69_Sck>`zz6g6Z?f8N+i&2PXEXSm-LTz&(uVbw|T;xUA;nA7blScCO7`s)k1Pt56d8EjeJ9)zt>m_VMe?zJ70nfBciV29v8V&hYm!1 zBdV|iPu&4#Y>H_GjvlTdN@x${mCtc&zXDa55{4{{6DVC7#W!+6U;jMg$vzXD*M*&4 z1-C3n;j*OKmuf+4&jgLcl)mearj+n|ot%F!CG+$^G3{m?`Bgl%7n$$&xYE<4T|T32 z81+wnHKR(dKsCw3arF1m=#guzLH9)0 z!<%~}BX)lk{g6q`-xxV(-ofUAlP_sCB6HP*^dz5aioupV=Hp$&!;v-Vm$lXe6LxEt z!|59y`P~?uLr~u~D*EKU&js`BRfW-g-@^IhFev=I+OcSP9(3(}FRWPUo9h$}ZrUrO z-J1W9hgl5qtAlg?o;@|(A?Zo#R%LCj=N38j-4K@1T=KZN#CdPJL4U4#s%7b&Yie36 z_Z%Z@bz4e|&+gMgxtG07KPLiVph`d6GlVm~{D?mc-e=8ktJPVa*G1<$;TeWA_Y z=a#!4n&sfOPTIXvrNjbF(y|EWVy zz0l|_HR3CpM;#<8T9xQ`HA4v=)vHL}~e{x_#$zhd&LdV5n3YVz5`$84C zFl-0$)g-pgg|b#c%sNB|L|9}v@U39#LqYTIHwwPFNiwsJ!xwVdieu83uk((k*1g0v zb6h~+{JCt0g1)wlII-~!nf4S0fz6_rx*yQjmoE6K78LqMpp@m~H4g+Z97B(q+-Gtn zOc`Tc!{~jo{NlrT)$(wx9Ei!DkvkNSD$iPxI$YCdo$VP}qcZz2%{P#a!F>o%{VSXA z4`hF#=iXp$rTmFh^I*7ll7p1wf;~MeSJ`nGI(NuxW2@R2agL#oe?3&z;+}hjqQ(XN z^%^mT@d}|2@j@7xu@AoEB0u$PaF~wl>skS2U7;K^*x~wih<{?P-lV7I?fJYVb!MX- zvq7Uf7?LR>;CsI2Or4)*wr7%=SZ2@66-I1{JD$bj_S}z=7J0{4P0z-g3?B-~t@&piVMu>+ zFPe4SJ7eF40^7{V!0j=0Z^|DuA@0g`k#=2j>$)z75}?hEO=&Nj<#zT!oaoX4E_v z)Ao6YuOxivCz&*h{S0N=R64R1+&kHhx&S@V*&inf>|sk%Qs(M}|kbddMY3o1<54rDJ{a-3|FSpWQ)W~gfLk&Bhg4*g002M$NklcqKF&!Mr^uiqpaU6 z_k0rp+qx%^?d$V%$^4_n8WZ2|PM2Qy2nToU?eJaqkbTgbPjLnhIfQe2Cl$lZC5~YW z!s}}=_BAJ7P$6^vnPuF4qGfc>J8!o)*~K))5|#IwPD#6#o1pb$8i$Z77Kit!Yg;|- ze!+S_kNc?uN?tX44cPc}%geO-7%uhVKHS0ljZ*?^ZLe#za&Ya=F>&sbijV8`J{Vg^ zj(ulz8Nc2ez|6y!sCOg$J-6`Sd-Zpn!JK?g`rz*J#CFNGCp4T!EJh_a|^TQRFSQ!^$m*DA`bH1a$e>Q(O&;^Y)`*g%kQ-q#S54n7T>((WM z>l~+?`NV-Qcj-5|UM6*NaihB*(H4F!ggDu>Y*_v=POLX95pLGC-&{C2oACe z-|S<;UuXbGK)1gkSeg^e;D%zm)T`2zfjz|ml{au1btpx~7kj>W!)N%l>_rPyV8v&$ z5b8xut%63m7T(+owIoV#pXt%c z=Zv@Jphw}0=B2dqcnO=vp2xkfbGYJGZYy7C?|U1JMqujw;y~r@d|EHQtjnNMpTcLG z0fV*m8-40|S`+zx=^?Q{aq8IBFZU=ld4H@mdEQr_MEq6^Ih|K-o7_WpnA7Lo+_m4Y zefU=|%+~dh-^C&Ko08-fjx)|XPVBFIvAyDqy^nC>6J3vid|tt#_eb5%b)&bvzx(s! z$(MQOjjoUH>>c|~o*&hK)VFOO_tZnwn`4~|yj|c@o-GNFMmZS)Z@$xY4p!)<$Cm3JyBn3!f{D0(!=NtcKufY#tXgsNLHna z9Wp}b;7}Wrw8U8>2U`wamDn%!(FTf(~1vvAG~>ovgYH(*g5?4 z)8BsK2e+t!FLJAcjSe|ACkN?$r~nX5@Wd!BgIg!5@D)Q;EA97qpFc?_(*`tpy;8~JDFJ7bh_Y% zfw5os{%t6b%j$pD)qLWokRGrmQJP;RM* z(;r|NMyh#dB_C5kEd=dJ418I`_)<@5!p=R!XxS8Uvsij&*9(1ku-@a$9q-^z-`Jmd z&Xw*S_p)kW@7!FeRi%kKHFkWLV>emqV7BCr7irlN+;cCCd%al#oMEP8U>b>~KG!J5 zu{OB7|A_-UWQ@_XbDeXvAi1(8@p3G%wtww0ET4FFtoO_6sr}EnYja}B`;$mlO!{)Q zoEr6`Ddjo(Z}r}&Ue#`CBoCt3^4@s0aX;T?gr=JIB8?$WOsAyg-dS39Jb_EB#7nri)$)=*9 zUX&R1P)P`|a3u{t|1WaPTpT3V!P$J_1u|`Qs&q_w&A2^27HzpWHr6oxnvP>{Y$VG%T5W6rXV4Uy!%`8*{GqRgb z1I~OPOLYE>zC;<9sTI{g4BrHcLeU8X{*05Z_~@GN9WiMuHY6Vg#RxORCD zl&yIEjj~7nwZXPf=_gTWwz^M`@V!3M_m;!og=!DZVPgaj{O;W=+@QEAu zg^Pv}pZItEvhA!pCi!F}!Bi5YZHdv@*rz8tID6x(Z)P;5e%8zVL}mBI8ENd6)-NnO zw`9~WQpPKFMQkNeZJ`Ejyy^7AgNnV%6@2YQg}oJap7 z#xsZ2qt3o^*%qa#JNoM~L`UQYZcxj#B)Mz4_payC&|Uqp{5|WCJ^gX820Y_tZ)9xH z|JRF`gLC$IMpwX6c|%Y<7k0!A7v-=ExXJ5D8pg9>b6!tm=Jk`BdE_BIIPAhm_Tf;t zp^u%B?(VNOlFPgt_co{nXF9}GnYM75I zq;S?3p#E_fIj{Z>F7^12X%wm)nR95K2j@CeleKkxlTjFba{*Q^H4e=0)KLd-)l+<= zphJJ@>vY7SL5Bcj4h-OG?DZ#I_huXl29N8gY;@8hUsp;Xt&0IePj2-e5M2XPbG=hv zI=zg-jh$MAbq4UEbL;781+@uv*-D48aC!GMvV4IgIAhhSOU`nuY3ZOaqm(ks4}5YS zPG%VK=e)qon0qAmfB(E>UJ%#T1#}60 zPvKmm-bS5@!VsPky7t=A~gW`M>Bcf&jb6y7vjeHDuiz&)DVAnLU1?T9a$M zORNE$H@Z`u@dL;&{x3TTXcjMg$-^?+HBu`#i{W6Jku`o?$n@x*mII6ns`>67h|sMKA3^V zW{xlZ`obKpIW*&(Hl2rzvFGImu^@D8bcqpR{-VbmA{2)I$2Y$-2Oc9LO3t>^m~id0 zMKd(DA|G61N_D^)>w|7EXwK%(i^~A~eC2R$lHu#=?KH%TO~!8OK*V^k?*!6-RE=yH z;-uMR>@1keiiP-3#YK-sJoSOjT*Hml`-L3%^KxnKYjgOns19HJ4%WH(4~~~{h^Moi z>lFqvNWSTj`Xd|c18h#3!-wPHg_{Qa1jBJ@&m+%Y4$;1e5Ee+C|9|w!6)#!6 zD(9y&z87*|kbC}f7uoVcQ{R3^TNu3g?^kQo`^pi>#xGhm@nOon4Djg~={;M$mB*!e zl9&_@?d!0-Xs+Wc7F}xZ04~@oGWEmxxmKG$YqjINTECmY)%aNR*n5d&k{V&_b?y^@ z$&hT@KeP$Sz&Pu#c_TV~4P-j3lK+Xh=Xu)9a>eJ71DNJxymTw6A*he>Sl6SCbgxVJ zVw0Skx8GHl)jj9N-53esS6g$4U#t_CM_r@`(#GAs5KNxpxZ}#-9BSXbaqYP+LVk&h zq^~iTlSg3w?Y8&XmSuGJcUHUkl(&6s;Lmr$N-Xc{aQE9eoUbulWBY_D&r@IC#tr5i z-)+EutN#Xj`aAKTeB5LDY5qOeZ~u5XI77~|om=H6ht_u^s_z!O2h!rhzQ(>mqgmucTo^XEe)^*e9J0pRlEY{fqvV1p^sEDJ&|r(9hfxZ-TXFcM7H#9 z=F8mmlQ5sV?MsDPUi;R|Sq%L!x*i@c0jQ|~H1h3t$h#)Uyv}1#a%4_ey>pPBgK72H zp|`-3hPbKe=)q!ls6M z?x=8t&*=%thi=Inp0(dy;8^uOOv;Uma#mK@wi75J`LUq*;(qKWj}v$IwHyqnzGI62 z31E?A9W!MYL?654{OW^TRZO)rZ))DQ^>=?o4EiVnHjOjmSKIQ!QVbLz+nZTL)^B&> zKKYs(d$pkGFFvF?t&04<&z{_PoNYhE)I7k{ckbtFeRrIjt@c}zQF-oi=BMsblS_S4 zmxxw@QfD~+N>xPXDu{f!oaDBrMDEA4wE!`^V}i^TzNb2!n4iYydK7z?b>99`M;U!u zm;D%&OwJtPyS6+dM23!a+56{;?F{{$r@H@U|3?naDaH_$)a7{)saw}c?9t=l9XS<6%B9gJt z2ulzYw_}xR{(}i+5wJQV=^GQ8%8hcdrv4IW%4)bqs!g;28RfP?1F5eVf6(r?&IK zcyi^V9)9N(UzMrP;aQPe=9hCNa3pwN4-c9!=i7Nu6OGd*5W2y+IGS>Jwhci1(7Q=W z3>rr^97sq|x@d?QgP;-tb1ZdL$2apeM&@}i!F3#;$imqDkl9dKnFmtgIYo+(=ozl8 z6oa}RsGWO)wvLPABm~#Vl390Bv;yI4!|+eGO$$Ff^>gQJjfwNF`_6fC7#|moyHCvx z8~GGw`z4mdP@2uK`Kz1F@e<_bJL}7S=zO@wGJN&Rj!Bqnkq!^m$-r^N1X9-yS?&*Z zA0z>Ejaics$$pGYbjXJbT=75#4XYbDmbbFq59tzg9+tB{K=%68ARh1X#FSi%{I-7| z>%nkIGBMm^RkVk}7sFL>?)Zy?H6u3raf0Qn|E$9TOC3$>_8MR9f9_k8cO2D!V#4;s zbxi+`H+;Dt-gCFkty%kb7Ol!V{eMqB|K&Cg8uTpMi_bjRrDz+^jn=kx(VuCr8}h(! zyZGz^@YLfbthb(@geZE9yEw&n&Lc#9E?J$pj}~1&-|BasB(7cIc7>dct+w?D`68at zjk0s%$U!{EBw*2DLk{YdlMkMdO`dk-OKQ!K9Q&O)Pv! zhLkv=cl?Sqhv(#qj)MWRd~2l`lb_##W1fc!A0VXYJJSgypC9eIt#_h*n9xCh!-7r( z|J2{4=FkDI7|H~P*@dGqYjfPPCFj(7G@AEa!boI89#tzxQg`~u^`C;)6BaOPn)tBDSo-MV8(7M1Tg6EIiYlfTa|mPdvc z$Dkf|RH14OCl2INP-5!b`hrCfyTk`mn_V%H6UJ<-2}M2kSz}8awv##WdXM5${4m<5 z$C`Vs6AL!*G$ze_-*dmX$!8DCzyGd7vfhdQ{a^a*w0!*jFa6|0LH&nAGu+~BO!?1x z5|?8Z&)gQc7@yJ~`NrYI%B0CMw2^cLz3t=kSuMw_?Qz`f`GTwgULW+zxiPkWpmvO7 zBL|j)ccWGr;hW(gV{2V(20j=mWH6@VXw5=cVmOmnDD=RQx$F-N%02IXvdNNTck>d! zT$heqX0kaiP*2Xdwl+Rehf6gCSspY-r@0Ha<+eET-MrUP<8n?4HnD`~HLi(fc(=as zC5)?wPc!^%+fT@)6a3f-iC1wqbF)o+&28Df890g+#u^yCb0oLfD>A5jukVc<8sQp4 zaVM%A$iRJ?pLWGp*0|`)(Y_e^pJlD$Y1J{@9bdWf)8?(ak6Hr?LJWI;NB({Ddp-a8 zKmYT;K4tgf`CX8A`Q6cz{v<#39k*9m*$HbiXRxcjaVO*J=DBb{DY&1k)h`Zq z9UVMiLqvS%1G!@L;rKWtJIBbQD<*Z!VOw}np(lT2G|eoViMzzV|m4L$SE z>n&U6Q8|R@fz%0WvoYs5iwK8Jb6I2T5pi+RqUWlqDyoZ;qt=LAbM-p3F&i=qAe?P_Xt)KFN_geFq(D9If z={j^RNA(vvyzt$YecD;$>i+8-eP!>km7jg=^Wa$fle70pZLXN+zvD%Jwa1bi<^i}w ze`Alo#&xIF!TEm_9n15yiSO|5=C28EyHmHu+^M+(R&kb2rsvp9;AvySewgj2LAF{T z?-Mse-7=9`ZOnaK>c4L74WA>h?{h9WDneNp%WyP}dB;RQ6GzXu0u4EgolQ@j$5c9u z)V#tnT7oe=%$Y6$8HNJRh!>YE{Eqz8iRd)M!Zn33b$)n|0J!uswj`HPgYXy9 zpsGhsosgdzm)0Qm3C;D>h4}o=tM?0I>|?v?EtK9QwM}fXN(0lz2+RJY$DRoKz!)o8 zKQ@JZYF?IGFY|jYeDo-@O;8b1TgK z0wY&w+3f_uX*ji@8d&W{BPUkStD_Qk>2@&$X>bE zYTJ^|j{B6~$Ej1W$@z}B&f~LzOzbKhvBl>V|I@5Fy!uYQm7j6`#QCUKfR2`BPS_gE zn!o6pQ1W`@`$5=v;|zyz(eF1zEPS}UuSvk0d(W>l&j;tMPjb%qeqR$`V!iU+XWVQ5 zy@T`J+d0=S2hO6M(huXJze($frWsAt*yU?T!ajce?-o(nP$A;3Kz}HbO>{dgWy7=QX z_?*Dh{ZoJ5#A~Z@vfux~i~nN^%N>N{aE)}3_Qycvlb>cI^M7n17=JR)IT6p~=g-0u z11}~d%OP$K6o@9k)Pp%N8#pHH#Ae&*lKDUTd2k7d9UbePJcx_$y<3>m_z^m7rU+s^ z0{#;m+lmoQ?aTxx|JYRcs?xaOEx&Z8q-Ws$h-cZ&8Y~e~a&~JV2uIW&qT+z@xP%wR zl{54|bZSR_@lmp^ShO9-fL>OMyBQLJx!#N12 z{uZUs!b><2sE0xSA4yr(yG5a3;fA~p& z?yqo%H@3JEAYUy+RV!2kych}VX%mSlv@i3Hww8U~#ctimBkzjm$u{*+O!%FI+Art! zj$UCk#jSihBBGz*_`0qO>$EA)SP**-putScu^IwGhMb%B(!4MM|!_zjn8}>jdN`T>6`nVf%!e}6VtjNbCeXs8IxTywES(h zSbp0Q>xssLUCJlBg{uSEU8kqnkK^PFEYa_No3{4X`abK=8g_j4+iH)>za>0&(s+_# zZOPshm|op#snxkYQI%r>FM~6tcX?e59wz0ut1buU`%?NwBA2s03Qquwp9QMZ_8Y}c zP{C%04}7c7k(6C-^GJ8+Uj-NKHQYQvgbtQ6F(d{wT(;GLKb^EGeW`h zun#=&&Kk4_NAvvfhl4xDWMFO30h2WRHx~a9XBRG{`9;614#gEHPk>7ASG;HAEo}7bNyD#zn}{G54QF93OTnJ1lyzm`IHEwve9J zUc?lwe2=nQ3WtgPb-wnb z1^8ev^*>yJbLs&Q*hC^n`7&?AX$b=lQ8-#j{2*ryqOXk!haE3xDqtEy@U_pnU3K`l zw(9E}tvMN>+>_*s+$~YW=T+8tyEm$hk3C~l`>-YN$j>aVHrE-r+MVN|TH~wDp4C5+ z#+&-Lj*YQCw(YNu%rFJ7uj^uNMzG&jz*gd25o!#-cgpy4jV9UVjH=`9`Mc4s>+XWDC;4AQ?7R$ z*FQ%$@h$Mj$KR%#A8+u^CVBN6yW|;$RaZ@&6QO{SJ#mg#gVA5`mIC1kH8HOz*0D~| zu%rgzfM|?~O+&u}&Hsvc#JXf0s+BWwqbL1sP95W4EFqoR^09;*5OQeX$v+2d4%TxB zaAYwflH@FQa>9?fT8Dbo9Jz|`T9Ui|t-50F6gm20yIVfUq64uGUeurk2;mF@@;`Hj!e2pNKAYYDL*andTUIr z+{UmU*BCF3vG%N=e|ZoC$oZm&pZ?P9G5d?U06y{oO06%A`N+=Pjd{QtW4FV7+G?B# zr~y?FMl$LDgex&5Io}_HJ9OkmEjkA=+Gd@tMHz zmG5A4zYy~ufAA}>!0;&$J^$Qms(rvhhB5by<1oJQ^xmnvvBz|9^l^jE!{fq!PJV~* zHk@4UQrksHj`%6p=?7Vwoa5i~8EabXf)!oG%+u{h|D-bap@_RLz$KQ8A=0=W@#$~J z7+WCGH})|?zLlPM?GR49CvUAPqX*VCiQdMA{%w7Ig)r4BdHuJqynXBOq~E-IKNVm5 zvwz~)dRKGRj)JC)JKj5YOWm4ouFoTQh-H?^;KnvlJQ94;VSkSglJb`DK7YbAz9+u& zJlWl4XH4Zh?HT{%qxf%e#kYUTx4PHfH-yBWD*X@sQkj5SM8kAB6NPjy>};M-I|RsQ zf98WCMYM+*hyN_@IS%o}fSFng(jM6^?rb6@u&j7Om|XP`5Bx~*c!G=Oa*dB9mlEtv zk94A6qh%}wy4o;V?-2S#L45U{UVoWt)+7GR<%J-ATx{R0Cd`(KXtzaqCB2E9V?fm34+bZ+|Fd8`TX{PVYeFaLfwT7LQayZfDL zB5ACQ=OZIX9V#&an5hOxNOLuO`EVCuYK}Ix#P=`T*-}#dn|Juak^O44E3LdzC>7SJL}F zau26adBNzNCKaiYwO`u1t$oJfuX1ITwjLk;;8iLtGod13(vez#Wn|j{h}i;O8sI~YwK+cX6v1+ z1jy{#V;FYZz9R=qaEVM@7SKQ0Y#Af4y>E!C<5OO~)AGcXClSbL0#D7Rs8^0gx*1P@ z<*Sd4+N02B&B4|F+7F)pP3Y3{_<`se7op!{yUv5pUajrv|H_H&u?Q&DPP=o?xLl0e z?lECsT)B#EUw!Al(wFnxKh2}DY)m#^%hrDFg&!Q3?*fD5*q5hn^Pke!@gwgI_O3>! zZS$;Yb*KE4onvhFybQaqZcF~(8eP}_eQf_a6Y=dbs{{eVE}FKjS#k|wZ;#4~J>604 z`Urj=&<)(V3z=nOZARq`7hkatx69@ht$?L?Y*5y5Q|&VmIHoZw(3Il#klA0+a82^nruq zlIh^=&mMDtHb;8f525SRF`!#p?~-x2)DwOVpYg%kFy}x>nxw!eSy6xK<_Q~VQj}KU z<;ep_;lncu4$vU{aSn}g@Rvl%`C}gB*KOY8u4w5o*!y0?OG9oud97r_gTd^zvIY{pVJ|P9FJ{Y-lTXGMA&jC|> z;@~??hZTnUw%h}zH3T1|W#9%g1V$1vut{Ua277EBe!);UHO4f{=Fbx{M;}~k0EVQ| zs)I-CZw&R(pfOh8c*Y=s2}J!3b~l6u2?CGg<9(#@cj1#}`@3;CcKI2u`Y#He=D!@6 zPx;I}hP;1OBWof7hN%LBTazt;`GpOgOAh;A3l` zF7elyah9GEIPkq&=l6aB7VwJ4GC3u-+JzD9i4Uf^);8FCEZgqnB-QToaKqWP3)2}7 zn9R`y8TLV(+O~(IYq!zZXFV&s^Hu(-zxn`4-pYr#JwoOPGWg<3mTpr*cgOy# zx;=RNI_gnbF@L3ht<$w`ckbf5j_3Lp`y{Kb{WqBG>sox@mwT_@_qOklaKd-}sKz_A zmnh|1qBVuQ!f3*QF;8z_ijED!4QV9Ex?vU=dQ(FLpzV#9Io!gWZ6uSd>5n>11Q z&}K)OAC?~O*yb2x0V?o~aBMBAjGRodVKb$(N|zp>p#|s#mpq#k+1WN>y%g^!K9RWg zQicy(kb~|MGV{T>%6gYePnEXyfz-OG|8Np#)^)fhEc@4UtUva^6D221;_gRTq7DoP`+?hJDLkN)m$_Zz(r&^<6ac40KVj{dZ8`bm>>wF02ndQpOrG$0SI{}@Hxr$8g7%o2I9ACJyIH1@`rTUN zNE*Yga7LC3vip4G!`cy&d-a@xU|w~xZMo4Bc=xhV^TE;x~qitfgGF0Us|u zAfJyz_F=pbg0Tnk6OUyg-gvmg38lWL!sA(Xk;9o z`7?XJtLHy=;Q*b#Va-7~`Er2%?Kd79Bvbg@V-9un!8sdWnA3=nk?U%SrMg7FL^G%z z({sFW<5vQ6+fQqT|`g2G|GNdfzc1&UV+I_bYt(yJH%|$#WgIFgq6X zi8&EEOZdmI{gvm7tSZCKh*iiCVTJM{q z>#}tTfl6$@*SL6P0aHeoA$g8swEX^$9 zxt1+GRc&x9q!~2WU~@yWIqv)?Z`O1_V2NE}48yUr!hPzBB5KL|yrQwq>=?|7e5J>L z7;+F9vF9_cXiSsCRhI=oHcwDhkB#|P%||$~L^lQsePva}%!dwii?88A=#Yt1(O^;% zjk{D~niUc*hnUn8w|Vy&s@Y3(;Kg4L%N|co`*6)6ngg}c2Fo7wX z{^0n>KmYv?{r?p{6lmrreSePHljP#EC`S6-I-%FM@b(1&TRI%lFTp9(Sbq09bB2Tj z^yV%HS()j=t?FW8RTSqq;>a1(&RITskeZ@je3A){JMp zD^~AAt9oZ>itz9*d+veEDURzcpjoCQt%n)Bpo&+0F~2M5+L3KOW+P4s{Q_IRh01|r zy!3UIBlMn>W|mERAx^i?BvN0@j1aDSgZ1qFWWDF{7B*d1F&T%mSNop4vKOkk0_{UM zn2E11iery#f1Agcc~OeadD=Shvi8C>e)j|?c@D|??`m~u_Q6^Gzy8pF_M*p{9GIyo z+#IsSq23=?()PUYO_7k<7qRP`IWcPBp5a-SF}n&iyEpS3!p_^{lh%2mm3JNtv3aE} zF2*4u*Pt>3G|vj{;CI~d3odEy{8yd}5*BmLrHt_x;G~i|E=LRL*j?OlYF1*_{MEzh z?Y(yA8dU-!PS5^wFsCz*n@e#ofUg8o(}(cO%L+A4N_O$9iNkyJ6Re^#OJGHEj;#47x@|}7U9_2MCptuzm_Y(6OE}04x&6^Iy)<;e z@?86>$Bzs|6comTlFTtIqd{oShzD<99YjEWSI1Ajyu-|A{e%z<9-g%L=Z|jKbmB&j z+oQGark0-h`RyOm3g362FpYlRQF3fJNppQVb;6E3%F695oVtpSsMOCPg=7rriJROc z1H}Ql>*d4G)Z9Ok7Qp9DUGk2iupFp`!P*?~5Eo>Y)d>+uh-1DQE0G;zl$l?Iki`z; zwz|{n6Eis<#fHxwER)VaG1)xx z>L-!}EL>wtpZx@e@5TZxEUT`bNk9vsbDLqtM|=w=ng0n$@2|mi9*t(+#H ziz>$7T$rCJ>%%O{evy*bBXdg!549czdW^IP9A$+JZn@jGPc>W zu3}uprW`xQZ4qF(W4`fc#5-J@Nu zbN^Jo>s(*e-1}zt6YjJ-bHeVNpSY)S@v$M!Jg$JBX$RY17UPpU#g1_?> zwannZN9v$AuMcdR_P`4Vo2l_Q2F-0I1&loJaSka9_%xmn0!D;BF?uwZpE{nxLzou| zc!Wpa`YZ%|#A-jLBEwij2UcKxaJNsG7>CgYFPF6jpj21*EYpB~>DM^xq^Yn5VfOOG zxEx{UNw~na?Zdy1E`+L}=>44;+|NgG)U)Q1DL2|$ zfW}MT?-g)!mkmKoe96gCO%6No758u?$4oq)&D1?uJqI0{uevxs<^@BhE!#y3_s~-)&A^$KUW2+rBo9v+{cn4$M!uyB+h72j{1SdnM_^ z?sbv9B^;P6*yM-dHL>SSv*5F;%VsGusJ!BC0ybjD-j$wQGjfa=Y5@M;T2L}v?MmUI zdjU)$l=K^t5!8$bh==(UBdk1eUE8*fi7h9-UF%)Z=rfmI=fL=rTuJ4jZR~WC;qV+e zn}(s#zx|$G4w>hhj~z%3o4;U9OXM2cwPp>Wdd*kujxh#fk|$R^m}XfbU*jS4rY`5Q zz4{9@0mLV9GUq$lH*%gN$%lW5BX&xdfF&h=m4i{fB+<|Zj3~ia>FMCyiPg#=;@{xZ z0q7(-vKS#aSa8NOlLL`$#+>_Nvwa?>(Wl;%s0_RIVT%W%nd z4XB^y!-*a)zk4rv8WEb!7gtHT{0BptaTX=t&4mhCNE`)J{V_W8uIEcmK z`Rp+{flKfE)k$iuhZ*vq@%ewmmTABX4$pP*Poq2u4|w}xAi3irzES_N{#wML6UuEs z&QStvSKJdzVuYF)a&YDaO5(sL2X(s+<~Z#;Oxxit>&$4kiNk^^l*afWntYRs*33Mw zn8thdraLa-gHD`qw?8nJYi`%h2>T#$&KG&kp37D0AX|MIs|Q;_CcivsnAZ+f7ogs)fbt|_eF%Ku5lYG( z4^MinlPsi_emFqE11~)5-9hx0{hqnTuZ5q z(4_zHe3%+c?ZO0QIdA>)24tZxW5`f#z3MS(Yzyh}CSY_01i^tMBcY^ni6CaZT@1AP zKI(?rwF7SqeH!w{lltPCopCnS%OP1f)V7Nau>=s^#Fju;yo*sF{IduC`MXB>k|c4X ztHbc)A2NW{1^Z=WC?+)HAyEDN8EHFyN1q4(t;%?PlvSQ$~@~ty!mCc=740fI831Qy5d~%?Upeji|O^R5nwZH zvBwbQP@Htd0ogD3CfBYPN;N248xF?I^p$s)qg-{X4v3ub&ErbzTdr*Wgq3%p(I_I~ zbgZ(`AaBwpQs>O$9{XDuj-UMF9b(YP{J(M99d zM?pTut9Km)vAem(L1gx+*r;vRJTZA)Cl-J1+T-dbPUXoz`1G}!ZR>O7=n1R+0zVEg zBC%`#*0#>4kIU4is&FNSxE%fhJe~V?+@`jP85@@s+%L7}fPHfcLHJMEEBPtGp4wtZ z*vo!EN#(ToB2bZFku~$kMj73?G+!bHouuLzF3A#A5?vkzmNLGamxt?;S~0CXHjLY& zdv4b{)c%U?9)+_t-M+5a|MqO>`s%4`j%LUFihC#dWOH&q**C6t9OyqEocAt0_*?KS z44c?6A-pZJdd6pAf@ineLLeS=8eE9?rhxC-%o-;s#$jzmS!fP{p74}@D_zt1QL!Owq3f&?`K0Y8mI0T zUZmFHmY}A2U2L+2K0KlyV{6W_RHbQuVv(H<(_4A|!7qd@9R7{rExX8@^7Zq+vantI z#1UT*=v!@8=n<8)FW3@T=pS_%vY5EoPZS)OaZ>w>Ei5DO)hRt?3#_XWMX9e6ceH zYb#zSX@N5Rz_8^3OVL(TIH z>@VWzh!wq~R4mm+hS6@sT0PR{e%fy|u`}EE1hdFgk@4I79i#DyE4j$PfXMvlr(yht zzxHsq;bY6OjeYgIr}+KGraLf*JSVn3+b`P9A1pkT?cCr_Ydx#0ys;&xU7zOQ$Y)Z1 zY|qd4j)w##13#8*2gitbk|Xid=eZ9L@NJk}x|gfm9F^cd^`34kG$MzqyfvaHrgN>T zD+T)Or)&Jx|GoX|T2Z^}e!hErwIS#09@_OebGTRYnDK%(^6oSLUE!x#PHxRNKH zy}dIpCOxLfpE#)&a=uyM_>Ml4;8#)xL^E{+AfDu{TzD`tZBa}S9LF^N6! z3HEButUy4_%#um3vK{lFZ4#XEr7&Igg}E>~+UpuOn&5nuP4`c&@4dMX-VPgM=R1}9 zp$vRdH}9}5-lcW|_D^132hasFPJyKz;V8ZDmSb={5#tFXE}CGX@pO?zNnn7Yl*}0H zH1K_P;CNEM#xZ+Fv2=f=7MrJhueQWG_oMv9er4x8yl%oI90F2YOlb5SYhcyrd3dzM zds@QvVi7L=lb!3N12z9>#G+p*GpDHmSfR_@W6-a6Rhh`v>!q@`+p$Se(*2+Dt9@dF8G6sIQ>rZ5jVc{skMuo zc>Jz9hiiXl&)6QNzSDt%8&NQEb&iS0CW!?@wXPR-j~+*Sozm0p zGH%2Q6!{q|_KqOQYxWvhlJ1xp&Q%-!r&-fdSnMxwqKf zWFJLeaG3rmMpW> zF4L_~&e!;wVII`5@B^G5P?^n5IWTuFB08E@z=hJ~NGJ7{`jiQx&7qC+;8Jb4y!_9@yS32I?AAaqi1N5DNv5Eu_*I?D~ARxTsBHfNy*vpxWa0&gVa@D$S#i|;B3c%Qs1mejLqi@ zxofaLxR)ga*Ve&>ZClpNHx5ESL`;y&C)oHTdi;`u zi8_}cuW_^X5{;U3)zNx|V%o+Q!N;Ri9MzWQghORT~L9;GHu80wNy|&n+$ge_k zjc8L%y<>HE5&zN^Pjq&?WZ5s(J5)`$YaBH>xMFZ@U(U6d(QB$!K6{Gj!aXu8pGd~UI(zc2 zI_4+eA^uy3_qa=r?^Ab=U3PmRUHrhht3*5X-8-uqwl7mZzJwCH4Z_w_kM_`vv#nl;ir0b3F<2bdCoeOGB=pDF>w|6G66uVs(|E;;qP!X}lVkL<(T;c+Qls+Xsxm?_ zP4S)skarwum@a$SGaE|}KyeY#!r}+9X2MEbC+u?~;GT+!oQSnwEGwwc5qR@bMa`|Wv??&^d**P3HM}NG9Tx(E{f8zll zUnq$!BkGWYa}Lsefq={h=N_K*_h@|3Zhh!eLm#x2SvYo|8k1sjOs*Nb_QYVF3+R{} zmt?dFeZ~_rwaU=)UdH56SInii!6s@vV7x{b!ZW_f1>n@rHjXPeOuOdj{%V7(@jdZY z*8UT=u~5U{*ekmZ)zd%8PuV$+-80Y0`80-ySopqkR)<~3#Wc^8v$|7uAAf_n+x+Uh zvA(tQ0z`Y}@uS@X;jh>{)wq;%a6UJCRo6ZnJhFT6NZ;%y8+3~_5FRvW`8=ZU9M9if zu#ZnR?nWKMB}|R`aL0|EW;q*fA@2iwkuW#Mnd3Qru@W%=c|eB0wMk=;7`JTm5>4U& zE%~xzdd_)r;_2Tgz^eiC6PhG2w=MINbCeb3P_kxI)7*skhtFDF&Ps9i%K8V9C$x|+ zn+B4ly?L%Oy}2}OB$TBOSHm2WQ31KET57MmvfX+gcmqIEsJy z4EBpRRwe$ixkrNReEd1z8QQTj!ZPtx?ieI4YxzMYbv?>d&!4K)Kd;v?5m^q;;iU|Y zWj1K@O^q@e6&_TBL=u^c0)ce|fIn8l5@CHPcW~G_8V0-Re;GY%u@yqs8Tn}J{~c5Z zVBSsU!0dOG^}*}(6Wd^k1b>Yad&i`Uf1;u}`=7nbA)LSLfyHm?8xOq?(LEsRbKUE( zES=wd?#DXB%RATD@O}B;6NSOI57@(1916jg9Tm^JNTtwU>fvb|=eQ)N>>OLa zm$dNJgKaB+=TChxFFp?N$$5w`!+-wI|NO7*`J(!B6X!Ua`(&W-*|fU~p8|YIP{wDV zryf6xf1LUjI^qETQ?bekg|WT~6>pxshuPX0CzoelM{w9^c>WTEi-GzRN6jC&tdcxm z1+F?&kl2{b16>u5{X0L8;74Iy8(@wsU;Ob=RY##q@Zxi1@hHIhc*}Ax5b4cK}#OA3pb-GQQy6 zuA;Z*%ncGe(B(nx%r`_=Ol866BOc4`a0L@T))hcLI&5|-uM4{K+#efheMqDF8o+Uh z)k&05=%FsY0>my01r>;Q99yPfm&KBo%5iv731qRp#8MwnjebRTn6jYkNB(9sZE*;b zRXJov%moeyvvH?-jR%)nP?K>Ezs6+lURMa_?z)&)~} zw5MGpuh-3sZTDhoUEQlyYRg`0jKL(HjQ3t(4%bsWmC>iMPFbtiz?1m^+?}!2wmweW zyK(J*)wXu!J?$%NkFNN0T>Y>1KjrsXpY?t6d9@?va~=2oEbiXFikn*?qd1| z5pvo$Tz~t)*^YJTHuP3y!z!qYED;`yZU|BZp0N`cyX^UFlIjgUE@#3m&Qmd>v!`WU zoTYIV9YH&Z1zm&ss9?{>h=n+~_#^+#qc-i|}_U z5Lvwo5%J6QTsM@=snz-YZ+;1ogEQYgJZH{ln&EbBHFWK$hT_S8_%myzIR{&ZId$c^ z-t(kkj^;{5%jp6(>an$-1Rk}yiG{`iI)`L`ghTH-|L%9JIT)kkBOqIrceZozMxVnt z7#Dx$6B+jUCU9e?x7W!7|3K2#KvUv7=cZXQ^1b9`66z49&)0I_z?yDJF=29j4^krA z*sZw7h2|P3l2``&d~gPJwv!h=+C8Qd|IN5@-TQBB|6jte=Bdft2MVq=BF@$>F?UZr zt==6Qi*j^U?$*Ea>Yw^+3>%TyGk=HK=!}kE4!o^RSQ26V|pU*wE@5(d_S`b~J|EV=n%Tq5jjOvG?f{yMq}gFC16rkACYf6>1S+ zI@p>Q;ByR0FhmzT)5G4MDP6MquwY+Yn)qzkD*^^%MrF}kM}O!bGYta+7__|zj@Yl5 z`bV1aVSB7?t?qqoylztFV+zC;p43C*p$&c|Xu6aGz{nZt;L+IpF0W+E_HE*^KW%X^ zk$5m+=uOCxs$=Lq!N6a-xTSA=V7BD?1Q1O8kFh$FIjyr!=+6fq@2QEWwyxX6ll<*L zbx|)lv1BjS!rr7JE{%teLqF&KOegv=OqZM(!^#&8sq-ap{M8v#V?ikd%{zdIEe993{SFdHjoUs)S$m7%(?B*BBbL)_6owCp1HmiGc0iFTW|3 zvF4p@@p&x0KYNZ`df$pfqJJ8@chQ5l>TF3;At zHU!p&%+oEhpzdi$!Cq3X%F}p`Z#3r5A2kKdA@`@W#&)-f)2z(&D`u~ zV;-qAak=5xHm?024hJl&sxk7ayQ0F9^#%~$pvBPPl@@7gjXPiN~F zCmOo7&rDa_r60)bV`h_=3v<+b;-FyvUO&nfk$u7}JAL&~ozF(^!*dVP{>vWz_(x*( z;d*{~kvMNG^zoW^QizhDmpCvKz_AA9| z#8%%o17Wt!IewSJBE1Z{-{9)_0?-8c>Fcg%{tf2I?n(A5T#fnfi0|}UUg8UP*P-)k zcG^4IR1pX1sU_pbjG(>geROZMtyRpC`N5(;(R01-cJ!rTNW z)=QyFIhZex6(XDQUb^c>g%eDQaba;d&R?V0UvGliy|2;c+auHZ(nX8?mVX>2;2UIR zzU0a0Ro;3SNNv=D_3-g>#`y$j9iDVswjX3qf3C}_|AC&^DtXQ1siH;GN8i{_&C6K6 z%8%PKzHOf;-eZ@U<`SmXMmuEDZ`&pMdVnqOm(k3q3sL$>`(WuuH8#+v?Afq8FK(1p z>|otYian9zCd4;cq#ny*MT;NZvo`iIUheuVS>aXdmyykI!$J+|=;CXSOHi6joJvd;F3 zka&j7XL#3-1N8VXhPVtR=@~R*&81__?U(rHqi4kL!$M3&l&LdCE*UEl)8A&nXk_-~LR z^&FiB6=`wv6Bgxfv6(#X0iDWV!)L4A%rj)FEiLzrQ?MWIk-WeDkAM81|MP!@2)}v6 z#X98gW$y!Vb^J#(`Ez=~^q}p7I4^$YK<$IJxpbfM;)wC_O5zLy9}+E;-EHHKaqX*TrNm( z`n&sX>;`9laze@XtrD*%=8U5WRd&v8qq`wXSj6s&J$tlg-i*&?&Jk31+Ml+*6DP@& z<>WZIAIEd}2rr1|o7vCA*0pl)aF1S=@F}+Dv|{iAU2jr!XS?vAd-EW4+WbA^#_=wH z>48i<&5gB+_-c30>vN1|2xM};+EeWMbakuWakFoe)-oB;HHP5s zK8tbogeelbv^}bX-UnROv9^{S&S&cnmvCdeAcqe)c5Y{VWQF1KGj$L+JYij&$Nh1} z71xabTaHF}yvC0&GJmzLzc3j!bhDKbPjhnbY63NjZiw1(L54uAbZlh2M9LXurja4{D4t_wfqkTw zAfm^wc_vSgmD!dB28thdNcB(al#g~O2R<`>%;EDdA#!-0&sXz~_Ih_3{d{xa?#D-B zXE*DeYh?B7K9h^glZQ+4AfFY!P~k(^w9qF>4~EIBIdVj*b%3L9hfUV6lC#f$dPNN2 z=7r@6cr)%AvcL2;q&7lYEdT}h02bF8H zUsQ^hY#wtK2riz0(ZdfXgxFsiu~<3MH67aphCt06XRR%8FnLF5=0GPv$8rlmsQg$x zltVl|)bS_j$Qhh-SYzd-e{OaK!j2pp^3*Z$8#fo&h{@JdZ`aQJ~$%gOW%%PgZ3PPBo2N?%qzo%56T#ikfA?gyWZs|u9e}isoUY_3_-nl_Z+!w zowxpD5gOL6p&WW|U@UxVgAVg9nT;&)D6tWk?KJfmU4wR|5BXq-TfFR(m}rg+OABHL zkIOv)5D&*9KKsi$fM<8uX5yL;rAU+5H6S{FlZeMM%>BCkQEzly91G+Ys7Hsb9t&cJ z0z87qEFT+tsjZN!9XTdXY|FV&djMW99Q8VD^*G$`G%ErT^zU*1{;%KtU#_#ii0AAX zch0-g9HxoEd6ApH&92%ygB^!P&hH{QC#i=QM7Gq%4IcUE6!xE*%>}~j-iTq>ZF4n` z6Mte)`8h7;o?G}h)Tch!Agk|k)b7duU3Q-a7&zA=^ej;sERtyucXJFc$$^?&M5AE*AbT_4q`aQAc9Zoyv~HGrK+`Ez45kzaD+ zMaWH|p^CJvnXfiu^p$sfm`Hi$&}?z0m@pz^(=cWeq(^Ro<7RHo3vqFV^hJ;Wc|=|9 z0kd>}ZP19{vk0+r4Qkc7g_JlE>T1~(aRJ}6Uo;=^=SIHRTqZ0?BDskF@?@_!?Dld= zbwBu?uCgx@PY$$uFLnR0Kg{X9Bx}Pvfemh5^K3tWLmzs8$G(w1Uy%zFFU36h9M`sR zM6peD9(!pIE=A^A)MVXH!Z_x`Hkc#sW4K6$+kS?$j|r~jIPi-Q8{vpA$WkYpiNJVI zbg^1kxBZ|^N&U_ra@!OWWVn&>o%`LD6P82TZs^cRXii_UX85L|@;I?&9HArzI9Irt zIRSWbpo0QEcGTTp>3k4G?r&-R!5__`P5t1Pm%+o;1fq{m2<*dvT!scJx7Mp4UAgj4 z>;$Ly*|WA&pBD!{77kMwNz-+!1GIKJ!8U-ga*PH9H<8<)A>!fSN24A<6Z77!rz=0o zO7Nfg`_ROK{(RgvUXRBl<&;T$P@{?0M|P3#nj{t+ zazM3>uNZfrKJ@A}z@wn$r0=&az?`-=hoHh8PdPC0Png8ZFI(zOXzvrRg?&PzY~e@< z@!@qRmT_s{@dGz|r#>9p!zFZZN9o5+4Q*H9J)sAlM8J9^#EEDjiSgdDw~@wb&+o|Z z=CM!CCs%9n#PVZ(riSnp+a6i{oHE9IS9kiYyt>Z8RCeM&jZb{X+_U9`7TfUVop28|3dN9l$(;Prguc=ul+loJF)?7{<$j#*x9>)fmN5NvaylF z$*s}2FSrp_2l5=plH26Esjhi#U2Io@Ft`5;=T35bUGX+rmt>RsbjC|}3=U39clrlt z4~Zvr*|LsYu1;=!YxgeV({|--@K`M2jrh&qMhU`H{} zn7dE9zrB-WlzENo>fnvQCFxROkqOrKPi zNvZ4%To}y`cOlU=Joq}|8KcwXY3uP~V9;0zYPiA}+9i`tPug)s50>wA{YWn&q^wWZ z&X`_gr(HEO){!BXt|@ad)CE(wuLtUhrLFIa2nCMSg$yv&dmiPP+Cd&?XV5ILsyd;uN9x?<|rJ<%-Df!UA%rLCaO9!iKGU^vTIC zr0`qzps!M)8BL$r1@69k8#qIrpsbF;nw(=lfuk{a(@i{NLGUnlam;27=Y7_ZAminxp z_Lb9j3?F0K$=z{v<+$Tc`F;Fk3#Pu`>5C~}@tr^Q#k7CIR93&u@s!_Xr>*~`6{Vhd5qEiT;yJI8?i?*GKKXcCa8>s~)(!AFuH++AL) zQ{M6E2V6O7bMLEfjGY&E&cU2>Fn52QpV)K!#9JAC`RQv{&g#IOI^@-zcHn5mX13>* z*Y@$318YQI@>BL{{DiO5Pj=(KC9=sdD<}ckin^ zv5eh#@b{C~j-xRZk{FbvpH+4|TmQp|0x6lsWtzieo~IAV9@Yj)YkA6wBr2An805|4 zUERRkTi4-Y3rNJj%2STJkFVxavv06p$HmP$gw#n%c~<-8J`i`U>z0+_ki3Xq1P}K} zF|QA7WqyihR+*c@;nQ6k4@i|H`phewjAojyHpRq#^K>pf=K4m*@Nky>7w8hZw!oIQ zK9n~yPvgIP7kN>pv$Hr|@)?A~ku2ztDu#gC1BfX8Y%s#|8Y_^V0!-t%U1PJHIzFcT zCCBQ{-s5EwpZu+3{{;=?dLY14LV%2~_^B&&t1gpf4`Vj8y$q%EmSCK6<) zJPR0>U^1>G_{u;eR&z8!IDzOwoDAr5h-aEPnymu}SNhICv^CD<%RC^{s%OR+pL0qj z0#l#7#jk$Zu?A`3iB&i@L+!kQ>7Df!?>?e2h;t_Y_HJ9K8^gMb%(L;6Z~bH9LgsG%W{95F|Jr9 ze&!6j^)^)_-I9(PTWpErjD_*`dtz&!99|EE)tlF^hKM1pad-Z0UtizgpX!9&$x+N1 zPi0_RllI7H#h+ZrJ8zGScArQ3dM&Q~D`HE2!c_m&!5LZ%{#)%{z`x=rTk?B`X4PB3 z{Vmt8;^VG*v?CUIIrZJ^b^cShfA-+K9)f;y@Tz})px8KvKA_YrDT3%;Y2Dgy>+eR} z&K|xq`U^QL1ET-Z&#%JfffBRXC*8M=o72jV7uiq9%U1ZXo`wO{ADX?suV@Ii zbVhWP=9t2hCgEK^`~CD;bo<1d16yS;JSYV0i@t^t-1b}hgwZD@n^v`Z_)O5X4&Jxv zLX8aExJmkZZs-Jd$#d|I``|0jDc^SWH!+*z^o73jVyCY^`2w>!$GK$uvYJy~@$7*t zk|P+k^XiE<3z)vkUp)0n1+B$-?A3XT>q4Ky;O@g2TP&)_*nFpriKKwLN@8IG{}}Z# zr*n&KizEjSFwsY5&lZ4ATRbE8yfFxJ!lSF@)xOf0#)_*Wn0J52LTN}UWl`x3E{%I| zU0d5kYCE=}xsRSYmph=;!Ol!S9(hHSK*x`}8RrlR&%Hg#z*xT4%aXh4ug_<15hpb` zeRSU1g#0z$XQ%y<&BkAOhV7_*d5GbT?L6hueKB$SBQvwY$ad|53BJtNesi4uSZ&Sy zlb%z4wY|^oG2iERJo6_^<8N#y?g?K#eQk;>S9P5ihn!Y?W?Z^1LW3`!Uqm{+b3*175P1n(_c?S>pP|ponKgH*fY|VZi5Z%7H z7yey520Y-G;p^b6TWI$UdDf9stQWNOH@_ge4yujKJ7*n2d;Zu=DX zoi%fw9C?t;DqiX`d*fw`JQE#K#XBLNwmh8f#u~CVU7thactU_4oEz8%iUZ{HGus0) z;OWL*og26(pVxXB<#k{6I0zW}t~`?S&_Tquw~0#~yWrL5H{5eueFME&5i4@U2VwS6n3A?AJ(Zr)?z2c)r|d zFVU3h>f|rXt`W_K7T>EpPKdd!p`LZ@IOQz%DUa%Krt!mmtDpbhd&NhzuY2~_B6Ag1 zo_TUOIXxau2b(Fe$>p(`BU3tPSk8l~b;M_1 z1&)yqY?Gbz9OBqfNk?*Q)I1+2Ih=>5*#rh2xi0?VLf#e4oahDA%&M`-*l-M>2z$qH zCoZdfiNDHYXXhL8c9b<9wM`R|hq`#ps>U%2^}OJ$_T0;cj^2miz%6c~i?dEnjK=;P zF7#@P8%_%lW`NGa!7^FgwygoZ+Pu*8*x?7wm9?lZ(W*&W@_7N^U+lDoiun)yfVM%3oEn?%_OAsV?`_khu1F-!;rqvs#&7+VhdgPOd$KO*`ar@us|Gq4 zWNFSABEzh|w6rL0gJF!ciN5~!6JGaY!*aC-!&(BS=;n|(Fy}P+aWuSJha`5njzp7P z&zqh)ZlzgULx1#;C?>4m=H#0xJm@qN2mg^qBnZ*)*qXlfV}VdD>tL|(;hd|wF}3d% z52)itFnat`jU1R8MY!S%cak@dC3F~>0T%}A3vcNxok=@lS$b$plDr$|{Qd{-4Rz1O zGmMrzM&5=j%NKvhgWL1U*aNQ|*<<%*#TBx>Z?5BV(if9hW)C;k5>Ghy7+&LP{3eEe zwbRcM&Vc3U+TDhO*xfybtsxiuN78&wza5`8#ho(7PuM$#`BM#wtFC>18f7b8_H%gS zZY=FVo^fm*R}A&rc5(o!?P*Y1ebnaOSC8#EKIeD&>4$NBbzFSq%+GOgl`*H)F0=P@ z?D#JJX zc@_Cui*o_GC`5B|T;vm~*}1DVSe{OLKhacB|H5h*?wxaf2_e{U)ox?Xk+r(O^{JhH zl4sD4LBpEgapgSamA&g*BW$*>&w0G#`fKuLwd-@wBk{e9EeFC&9$A#fdiOk0wmi@7 z8@xP!KBt|}$#Gr#32=5|>;H;@fk-#!iSc%}N|%8YxveS*P4i#tkJyka!R2n1%36^I zdc}(3rEQ#Qi`AHHVAd`1WQ>k;UZ4}fvYWXuEB0cQ!2Hk3*~<&F0)~3E98VT;_$ZiK zxih<6$Hb9%Tz=ukep}2TNP_JtIPbXuo;vS)cq$yu>-_+4ofR--p|^Q`loQJEudBJ4#HujgFj z+*=1`4cDOT7p^7za)8_3o5XbPqf=M9R}AqaFL*k`w)I>NEZRN{S=K0@T5DGH6XsQR zuit&XEwn1cean68|Llmk8#^4tf98O$^WYK(*n2y94xPm8sm4y|HaXM zkV_`LsS*Q@->(wdE~}BN7*{#Emqxv|b3F(QVhA(e^fRmAxj#xTUvtY^h3|DgDh>tP{c(EFc;`UC$g*E1K228cqeF96-A^hJZhMo-IA3GA z*qZ`Y`Ml}8vu;78y5d~ldTZG+DB<(~CW0iPnt_#tG16gXcgXWW4Xjo&;0Mz>TdzxB zJT&&W7)LfL67ZtgLdUVE^@GImId+`W1O}gpL-qz3o#nGP&YA}}M#qG;cBH;Uf3cH@ z_3EdX(K+7>e(YTA`q;8S>t|-}?w({b@sP*b$50KP;Ss)vIiy%~RZl$W;&+)Ju|7mG z%YN7x@2FmM<~;azB!6zyJy~n-)lSs26<^v-xHu{&71=ROs8lUAHh*z}z0ki3c5PmD z-%Gm2E8b-~0k8RA893871Vfp=`5NA)c$XJ}cDDP0; z$eYwRSTJYuweePT%&+?7Ld5S|a@qQGVH#f^6sU`Qz*fOFTcg@kaPn2wc4+8XjAI<7hc8(9+ZAk32C)Y4EZi5P3lh=W3F}Uv{0Q}wlN|{=RSKmt_V9N1* z1j#h`)}-~muG!}NDU!Wq*g+hVu+4F)b=};OEQ9UCF?4^>`)fbYK-nkkWgd`xNh3XG zudP1X*ZtKX_Gje5VFhgu%&(ujj>jF-M9^R^imbms$!L!p{pQE2TtEjBda}JN@TD1vi1O9e6P0ZBnb+_ zH~<;u+V#iwlrz6HDJL8C%`q0>a!#H-qj~NkY+6qewF3C1t-Mr-`q+ckX>J?!y>U2TTjwnM;@xei^s6$pcXoVRgN zuDTWbrazx!&hZ=g65m5viZ_F1f2#XR4Spr}lq7 z&aCfZ!+R$)o{fOCU?37_sy(kkV;$8+hUwXUig|J2;6tn5drdx=IId6nPr5ViC!hDY zPfTOI^PimPI=^$FE5}_|8!)u{Jkk#b=Moo~{#f`!@xz)N$e)Kzl+(z>v6S!>06s}drQio_Q0yUneM3_Fx@ph?KejbbbDzSdj?^j})GRxZRl{=~K5-}WYyR8zGknudhmj!Mqa%KVkMM;) z2ja)YvPZY~&B_gZW2rfIr_ABxffh)kn+$BV@0l%)%xYgw$}N32ikF06k1L%W1EiN6 zdN3i5+T|X5@ietTzV8QYsM8C2u~lQ_N~(UN-F#;pB~>pw&TVrfD&05NduSA zGEaW3x%Z|for9@AWd5UGiz@Z0Z~FW>u3b43>)sR5WPey{cL zRV{<#4{(@#|HK3=_f&e6t($>C=V}~zm3Si3#yZ26tkdr;7)xT`yvPFcsb?Hv4F#{ATuobSdbN5}j`stvxi&xYSS zXZAvE+GCe7RlT;xc49%C^J1Rl;Hs|;^UAK?vZg0baVO8+hIw(dJ?E7b_lc{z_II04 zyv5Sf;Gor}y@j{S=?_)s^tX4MS)|tog7bmo#_ef?dD49=BY{smpA!7mhsq`BYkt^o z-|shSaK3W6v~7P8ipLKV|o{QT)B6`qjJxDtcwK6JZ2;9g_A+GAAA$vb2(-OpHJ zdDof``qwyKt(?nU++_d1=H3Nea@}UKy>`!kmi_fCuH6g-7ZIeW+*uF1fBB^F3Isrq z(#|@rsx#)mT9b?M$d~od3%hbXbLEh?;m+FFfU!53Uy6FB^yAd_YyR24snpQaMgr@s z12SvTLcuUbY~vcU@_zKD^!mJZ0XjO#qlWW$e_E>lmcJH}2^Zb7={=~tL26!|`S%K< z%N|+3N8&ui<(JP`_|oV4XS~}bXL!Ul1K_C(-ZgC*(+T_+>+t9LR^BK#PDb?%XUX!{ zgf{nXxVt(xGf*=IMDy4sr_+NY%Tl4W{Ap}B&#u8Uy+jt%Z?VuttunS7v^C*OT`0}O zJ)ARdi2v$5+XFlPa5Gf?TPSL>D@@*5vvOz%(2#b~$E2CKn)BF1Fp9qQ3DyidkF0)H z={}K-U+;%!_|@}m&RY+k@^7)5PtB!Yx~{i*RdC+G{tjL_S$9%y;ug=EaBN8rL+jc1 z4vx1q-u%_eU@A5r%C59t7|*$#=j*gZa{6xdDaK4v5=%*3KKQ7UGIP+C`zu%C(UH`` zhBy7vv5~?lCu8JFf|Go^h>b7K^9w>D-y=iwr9li4eaOWe(u@!!!^v-guP zBgu5x>{u3qR>rxB&7#-#&U2eG>w_mTHJd>Jrn!g4mycSAD(Qy@S8I6QrLeKM1&rQ- zj0(jZA>i~cd%+2YB$fG>-M4L?%=z4}pz}snQ^PxWR)2A@9Hz3fYo<~ISBIj&GlPe% zi0yNVb@sOEHWoYX+3d# z`4}1FU+;cuMPEGW86M^(|6XVIGeuR5vW*?iN}iR+zkYNL+dNL!rA==A3X|FhlQ zM|Ih>p*1F);*%-T@u@wacEDk?NEek3`G!H2wIlM9^WiOoKVr_4Ayy(CV$uEa=Btm3Zkb0(YX z#NkK&X&jz6+2X57&FYT}-|3`z&v(P`|n|hOhjCK!eK;Jm5+jG0T0;a~-?029uj$ ze&?_I$Qc!f>AJPPdy#=rU;aYk|8RwK#1&JO`Et!ZU!v>Fx#Be-cLqFXhOgvnU_(6& z#>H|ub8Txc-HQ5=UgKXr6^@HU5u?vAwE|sS2ZO^4T-AmT)R3svn2hCU{itoyxDS4x zDd&1kuGQzi@zu@<(+v@2pFU#W%CGX*T2EHiE`RDW!=B#+-}`;On|RJe!!zXSlXr-z zEy*h4$s%&16PDyNXsl$?++i zoZ}LDWelbIsmR1N{m27pfa^_giH@IGR~D#e-*Zb2)!lh)kGYJYMrbD4N8ie1jRs7v zA)dJtk%6IC#!SO71m`S0#MbL{GqQ|zhLSex(U|h~GkD-dF){gECn{^7T%TgeM-B0& zPMyTcN1Yp%_*okpuA6+TM;#=%n|xxl3Ert^?8fk)_xX%vojfRYsxw%6&-`(WMm+PK zsIaz)nXq!~oXwy$zToujV$0ik zWL+=ujlgUCnnWL8WR697cFN%)P(ka^Q=hrxcW2Lea5m;B>)|TwAQIP_brY2a|ISE{ zLry%sc6za{HK_j_H}#P2VrHvJ;m76p-0xviPq7J+`rDWISz|T0FZ9CebzK6})z7Yf z!S`!@vR;#QGu?pzSi#0cCU5R9ORzK(n{?;+L9u!LZs{KEWc&t#LK1+ zN&P|zucu{PG>wZ&og99J_B!*c6$|c>qf0pt{Pd5LRTysFyLr+^hbyJ>iNp0K!I2M+K8fQ`d0YR9 zKkdvB;w|-?-|FGYup60T)hC%gN0{_1a-iN~M0nb&@O73nWB_*pMJ zUN_PIS2BN5=N@{^V*viA0_XnpUOy-PkNS^i%r(_m_dle=YP*)7+hTH|F?=P~*E*l$ zqfF5j0=6lIBRJz>`Us60(6JxR?q?N${Xw=Q5a&pi3l3TqN5v*yzwfuNXsnX4&0-Gk zhjMZ@&2iE6<#*JnzYSyZ!VEh5vu_}3zQfYLo#wAZQE-6$d&B9=(s{mTZ|2@H71sf& zU^SKLf26Z7ze94%O`rNsJ8Q6J#S{02KmDgI?$dYLV6rwoNnM$fx}(0|{5N0b{AYEY z30?cSvG2S;&kyrBksoaz)Pk_~#WP~r$?|_7tgc29TnUv=92rYpSZrz`GnSkuU*Z#Y z>eNa9$(6AiUh!Mb8}`OWzse!+4$;&k>;4&lDN&R(diyY<=z-n}@8t||4b z-@c5v2Xe$vd8V3QjQC>dFI3UqMD2oeuzP(Z9ZuxynhW?084M?rBw_4Z!fowhOz8Y#HWw1RZC`+`ncp~TuH7Gyr!@F z`jHu5B%@R`sP~Y5NiGci_gVMk3B;>pmww4%j2I&WFyG>mVn=FAMS|b)7YK zpD_i@TdiyKiOkq<=-ET+Nk!l3lg>I$zj!j|4F`redD!r#|J1>dqj+BU(|6NvIDBxX zjZa+Y#O2$5tA-puUu-xF=mYw5E{Kx@|0&I_hSj{KFx2ivotfA|QVk#S^xv>2hTK!0 zx!&LDtz}5}HJ>ZYhpd|0nn!L?UAifkxvPwSv<^t_P6v@zxe-ti<^X{?Jo7nN= z?-T8rda=b;*HUntl69Q^b7f7$%=taw%H)A4mH`#I)<9aU2bZv%M~AZ)>-m6Dw`$Cj zxcMe=l&T(tS-7Qif7y?@@AvBH0&bTyJzJCeLetnwaC4c9v$usd1Du?!+_Te&Dymyt z?(YoNSFS?o>(*o_oA*UfO#l32&h#9@0@j;Alozl?zUP}d!q;H(^uO8 zf2b@k##R_wWdD#_=e};$13gkxe^k>x26k9H!v+0x_#a&VUB{=^@;&o?SLZBF`+KGx zDIQ&8&einP%g624YHj-q1+uo2?*zTK?CUwVv-^;IkHqbK6j1`c-=yrJk577E zbd}o@HskRyY%KA#cpaY2!tf9bM?hEjs;nIKp&wmy2FLJt^u_Vq55FK3Gme>>Q~v#8 zPbY4{$V>Fim+|lQwbYTaPyI8;ZI+CuIn4-GlhicwTAra8-H3b+wg<=?y=WX;OyyS< z_~dYJbH*7PTpPE*b&YI&d6|o;{2Af-o929pjs4DoX9Iwj^c-@H4neZdX{DjW47@f4 z$PouR2^mkHbp76famKUs(2HT#snc9OeUTcGIgnL_+>xdK@jR@V)Nd;gXl1DI;E~wCwxvsH@{YQ+7|(CJUTJoD1m{FTm#(|ysK zli~5Ss=&mP zKjqf%@SV0``0{MZM_-ewaHo%a{AXTr!y^ekeP?dQz{3Xzek5^haBvaF{z;05oYdu` z#`U*qyb~*t=u&X}g4B_9em3S;;gd#ZM0M+|Jj1{2f$vQ0wK;OyCmx$2BjsnnU?g4l z=eZ?MO=>j4+G}1x>aWGU(Q~Y-Rz0xU$yw>64dr%RUcXAHRk+Pi@9bUk7w!*kp3C8XC9G)F~nU7C-=b99&x-&o?Iqacv zRJJ_1i0(75E(uM!uj2?_%{7}9-8t4eR{T`FM?P|%)jX05xYjy6uxpK_j~nZv7CrUv zoAtg44;SR}F9L4b^cGGEkIIXV28w^8y>Rs2Vms7Fe zEZ=eCEFA9tnz?2Yz5YjfDt;|XGyOc*zg>T=v8MXDc0tDN_j?$7?tfgZ%4N;|xu$1s z?@fhg2lL#hkG!zOctnP$>nMgr4RW`&MGxSzUg>M^L<(#33$t-nQu{JyeFDA|%E=4+ z>=&DQNcqVTPwHn(ad@QXbz)EZ#A0VXe3_eZ`SsD2e)4WIxfEYtoJ9Z`awqUH$ynx$ z7Df~A18FWd3=aW~g3kZ|C8J41K~%EiH%&SF#-6CXmyU}69L{k@8vL6Y{N9Vbp4T^e z*FSR!!lB*Ftx)qRPQgoxfskpke_GK#Oms& zs%Mp{V{MFBBKKn`?e)6!p1J9nhGKc+qh|#@dUh2sUN5*gZhRfr2 z1OK9r^`2=l$}=?WoZfI%Uta$-3RgUA^avMX+Br-2q1kE9=FVAV6U4V3M;>LKSjA?v z8W>-AORAm~*;RY~Va0#^FWJfoR{cjjW}a$PA0u48oTXxxzD+wsF7RgQlo|#(@@-&vnlT`<4GP zC$K)3gL`7eQ{%?}t^fB)YOftk0cKHrNX2eb+2SMD<(j)V=k4q%WAGq}xt-=a&-aZJ z4r1cUy!3-TG1ON)@#(+ifF}njY}O)Ik_#*x@}D_iCBfsvK6BtxoETF7-{`p5xU=9q zG==F+DD!b6o;j&M5|)F;m*Y^faUa72FiaM7Mto{r^clnH`7L3*Gk+a3x_(FU3{q%f6bA!C} zxKF@|J;&WnfA*DnmIi;GQ(8f0&Nxh)`DUE^a9hV)Zuw7wa(+|$f>}NL%gtW*r_TC4 z)CYgo7{W7@eefu@7kYgkD3?8W+Y_iBoHL+2|HuaUl&;awBMAAak596;U@i;&hTC&S z!m)RiGkxUSeD;!D?%|m02^hAxcHYW7+qh)zE;x2C+|NFC4{KqEkSCjMum2~^Ug6Fp z?`NJZZ|=k9OGD4fdR{Vc=mX<n@gZn$U+P$R^9I>M&E|Z*iV|B=0=J1=$~@&SLZ0^ON@Z z(2I{X7E?8potS@TvE|;0>^Xak9tXAp%E=G$amb|xm3v#i{+_@9l;@1g>AqC%xxPtr z6({!yK|kaMtNd&*{kc{@{pq2K%2EwDHFJ@S@~k8ZI~ zIjUR=Jzb|KZRyyOuDP#&`d0bVZU!LaW2fwQiR-d|$>MWF+%Np9`m%AS+a0PZYs(I- ztFAxdpS~fgpJC%uB(Z5r*Ht-Iw|jHA0QC|?E>bb_B%MRA;%P4Fo!9#Q9A>p$HxrD+ zm^;zwAI;F0JtNL|hx}YKuq#O{{lj=xSweg#r+o`j3K--b#iG$+>u}BkDsMypA9D1u zJo9J7hXa20Q8mUb{iQFCLaYi_y5fRWVBeEQDLAY>d{Wu2JJ_8;A$i(+A*@3%5Th}z zqx#zqMpYnQoD=Inc`Db*qwUG*8Ffu@bH1xi6U_SN(mk@wT_LYT^u4CeS-$9BKGo{a zL=B94SXmQeoBsN=cs{D8tJh&awxfGIV>dm|lJdO|-4~oUN$$yupB%+cOxoa(ik%$9 zQr>to{z=dJ|M=hk`~Qp;`v6ir0gC0Z5V0Br(|T{+Gn;_`jp;((kp@|KnA>XWnV=OuO|n_O^a;sME@R+24Df zGIYJIb4IS5^olgouimpS*6`%$8ep`oxy{dyH4D!s_v-?jKhG3te4U?<{5PiEN1w=w7N<1T>K=)ehxe|WvxlG7Q2Fyl zJ!Hmi`W<)lcJ_J@-8l>*hAi(`*JG_CV2luBKCyPrP3=n&7(3MxpJG?s+zXfKy$7pp zP^>TgcC7LHt#r*?&WxS6o=y%U^L*u8QZM9DXN!+m&hX#*;aVQLc}nK^jEt4cb4LwH zUF4)B9}IR%@c5D+AM(VW{hTBq(FMy2;JG8Qx5IpH%K+OZg>Zd^sHWd1lsO`b3e#{e;Wd*|#`YeaT_7 zMr2_~6h3m`78X1{N%;-vNc4nR#xGc#)(rep0{%&Po-Fa({H)luge1FfYnqBwFyT|lg z^%?Cincgm8)ww?K?kicj^?-8LjbR3s)Awx1p81^kyc(OmmA`60KrNlKUQ_Dk+T-#i zM*3z$KV?uKk*AqFjxw_+_8ZTwHZ^Xs7>X2z2Lfcbn4oT_}n(Lb|+ z$2lWgwN^d2Vyz=4?57&f*vubmz18((s`K>~5FC;|)0R#xq`5pd#rcr$0nNECXXA&q zyyOWQ-NlS1@ag@H8tQ61S(A8B$+v{*pQGGl&#n+xFQYL%QdsDa~c)wL&vj?Hn zlRnu)@({Z*!ga%^Pq}Z{8*j~N1dt~eImEFgPoHc)#1nJV@trb$tA;k`S*)C6({xsx zY@Gf3eJ4K-z=VR?Gu!fD-fQ>}dHBvcv3+3E}{vEktUGvE=EO>Xk_pL+VeVWh6q<~4KD_=a>*o&?_=1{zq zv}e+f;)>X&;*Hq(!kOPWU1y%P5wx$@yx3|XuD5(cfdilH^wk>Fr)rlWBr_wcR)JAi zR!mCf_L|ML5<{tWeOqThYnMxx`Q-7%Ol48)loPrMFq?C7oUyEN;!pd`Rs2`-;X89~ zwrZUp<)@E4`Ov9x%8Y}rFU~Tw`RAnsh1eX&YnuySX+G++cplIvpYmPHOn>X%3scQL zJmPn~-S@ZZ{wRBg%jN;1YgS&F3;a=vvD!;#kfI~WE>j=t2o)wSq z#bhS?5+)Br9l<@W_vIk-O^TVXpb?^oQR__ces={0DIb)pT*}0|eczS-tIs4?8i5leD8}`D^8t%l z@-5PsH!ZpMXVDq%Y3IJ>oH%j8cgDmFN8;XkOh26ILnj6|{*<>~GXB<2a!VgyocEzX zLUGQg{7Ud)EK8p8QN9lO&LbSfg#PAo&7J4Wqp#C%n_oTb96t>!x$ZriFZ=ry&g8%8 zbi1b6LoT@D`p{d|K8V;GFbcb012%EMMJ!o*L)g?j0W9!_))(Y2Orc%Dk7bo;}VJG+m!L zx4bbNt#4nG-XF7WHPC#_LLJP|>R-OftKe28j9IX-Lyt?A`|$c?_xc5FJblRPUgi&W zIpXS!b{cQ{8h80lKpRDKPU=k;K1qG(hu-NGoZnSin~?4Rt|FW_MR-pX*SOIdvY^AC z>p-tuTAGQC$=)BbD<&eBWn%QKnKfAwFzFUBbP3rTXJEPGLqq3fTaxN z7{aQhLUmQ*T8r!NYg-g#j1=KCQ|3B6Rok?E&TCC0uW*&8dcIuCWLgIw;uAGzq6&S{ zr@e|~RQ1F*eH1UBIlOoCYz$XH9FkQUtim$4pqOZ5ZMdGW`4KtY6(@!&Jqai8=zi4A zGj?J#e#S&XuB76~RbVLOy>XEPPPwTkSL({UU2nN({aZf1Q~p$E{^7tKCK5FRS?WS| zquPYd?Jv(S$F+Eip_7J|b+*7XGEQ$d+^_iloJR}K1O6-3@7QzBLg7Q6nSMt405dCc z6Hg2-NiOD*7-D)t`|qahoE5(+%ty%su+o-tWNn~`z4;qL@9p_rO|^90di)gg8TLH= zOt3G<;!S_O|Gdsj6TkPW_des_=)KoMvu650erEFQGe2kHTrhf9N_aFWwrP(@*=cqk1`Ql7!1gM9P<6kq=V z6ajk})O1`bVVDfXDH?EuUEx&R#XW^amE16lj z^7=MIup!q*R^0od{TbEk`3ING-amO1P&(Uj?8rq46Y-K92KU&XkyOI zwH8*@=lUhrC0#sfAg<^%yt}F#aO9ht=h*nd&6?Ts4eE0rTQrQvoaR6TsX63Z=3LrVzY-?G55_jffqm3E z`FN4P)*LVL%Hege6Sy5C&enFM)^>V6*>B(&UaKd@~G?%g>q@QS}4ov$X#(itu%PYYovYzI?7-k@_u_22Sc|0eXcjlfwR-C#`J~TDKrvv26lRAjAwmFFQ9DE+_XC}6KUD+1Aef}EP z>^h%*PpY?Df5M6vK7qhxJ@@T%vappqH)o#g${jc5!lN2oMo%7`Ur1{9dY?(F-YA~E zOIJP|nW=vGoPOKx*t#p2tI^c`RHM<}6WQ+P^Up#!SvO}o`BGOeZ~eUa$=`BuhIGH- zm@4O$UO|8-{!{OnM=n0Es`o-%VBi-veVyC>^hr-1Z2CmXFJ8E1bEPkJ#m}5G4u@pc z!Y`jb+ZSh?&jfF87jZIVl71+1n=|_RI=vu2F?D~vDemh&gil<*6U+LQPw_H%MBbTs z5IKt2nL%v%9*z|$<+i6=TRYqq0Oasmy%obZWi6An5`#c~eCV?CGt9c+!2TWiH#m;E zw12pNHh(*DymX(>+r8d5HSQRD@3F3>xM$i&BXgwpzpe23J@>#EIFn~F4d>D)(6;a5 zRi1NeI^$*5wo=abh3)KRuTYWU`q!m4z5a@%{$6Eg$oIvG^V*C*-vb%j8Phr$XDrC* z(UA*&U^SQe%sf!Y*W3~4KF|HkZ0mb3+CNU3b=S$eerV=7h*)i=VqF*h8Cu}lgTX^w zq^e#1p{TpL^vV&g*R^PtuXwzq1m7awEvmO6qMvT6Y!_ZLgwoqp75VC^U;L?mp=5AJC>u>2>M2L z#M)Wsj6dyXjT56<$@_E0&Tu}6r@f=+L)7Q-3=6Oh5c6Ul*z}7ONj(ZG9_t}bQTJ7- zrQ@@Fwt}bQ;*XE&igLXsRz|G>>(68rIBJBio4b~&8P(h3)6lP}b8RZEdeppQ<)uRH zlQOcm7mkFO`B`Vj5cl)ivEjee=(t`i)zn;yc^s$nN#RD;U6|%s?#VOlK_UJPJ6pZw z;^U0kc0%gpz<;YMUmxmi`}Hk}I7ahlF6;Z2rg~vPM-tqguR*TaKzb;t zp7gmi!A@U7YYt`w`Q&kKzo)v3c?dk!cE4$Z=-k_c(!bUMj(D!m6Pf+ag-I|IPvgk} zm!vt;)90e%gAl$OcF#25Y^6e}pV*sSwIaYQMpxn8Rr=e#PwOY#r(RWEd(r(o`Q_R6 zBp~+B=-|nHRb59SAymbPvj*94y!D+J(UIGE&0IYU6U@n3>sH^hE*#<=NhP?|^EwUT z%xMSw^7B!o_-78HbK?xoTJV5HDu*jE$)6aok~e?mCHAIM5BUkB*sUHp#3a#)A@OHD z`o$+%e>kv|7!D+oj!Em`;g`&dqUl2y50`M*x13Kgab!GoV#%p^+Vt?>kb7;dzh~%} zx@g~9PuiLo-y7`J@2&OB;aXBz)q?w=FV%a@Z8hQlqr#X)FqD0b)wGKXbh-LWIGD3A zSM{UdN1miUde}lE7HiE=>mTy9z6xib$|MKbY+q|V%&${U;C|j7(cy@`tDZ37J5$pB zh}TQlTau?D+!UT*BX3H`pH=+2c=SpL$9j-YcK@z%v)E6<_Abr%QX!^$7C4*0 zB&j9+yvV^%->01%)U0nnH-ZFf!>}#=14w!K0W;7GPZLNA%l3h7?!?d&oJi;OuNcGu zR&`r9+i6oHWpbw#hpf0KLdKCHq!w);*VqMHAoT)BUG3utEY_5Ales2ByEY)cJQ zQ|^p>1^2ml|J8Q+#ZlMvPus#=6A)s58ARX*^^I?&Rl^_R#r#wJR-PPWVAk5hcT+E^ zH`tp`bI$#E;*J6X{B3LE@?OF^pYKJW=TWg)gY5=BBdfp48#yyB|CYb;>MG{X8n<5-eZ(eO}0DB$$?tWiP3B=05*AZ2}&fqBXmAW=?$tmU038nu`^x zjT>wE=7LzWCfWM0)oV1B2is8LV;tCxf6=PJTjwwM&E0veZGdIa*K!=n+pay%f|iYQ z7QLPGq}MEN^Qb)=9F}@7-n&rlcmog0{)Ik!SUD_iZHc&gPpUSU*%&=-`6DJfdqtPb zm~>qkM<-T*19SSYQ<8u3;s1oqSnBi-2i(F)PaBLR`iVRJ#3gkp4(Ix<8fPIx4R#V9 zHD%o-YiAdLI1Y-rvBYbm(kA06b5YJA&ztRx)IGc>)Bki2>-u&wx0SbVpUC9w68kXb zgCu7{tm`f|7||s`qZ{M<2+vFBLcV%Fs}_+HiGRxZKp)=!fXO=k9(6OKN%F}$Kg=MZ z7v0&qQ|AIYTzkbM?|6u{1v`F_ZH_QPrKH&%u-^6dQuzx2z5K?CCbYy#v$D)tNI#~| z^4Lgv`bkpv4X4__!g=;8UTW};l<#)j_xGPrzkB`Mu@J8)8orc&&ADK4ea_LuVrm2Y zTAl!s0$<2AZ<@JqL>C+51-ixmw!t}h>*vqvc>uaIisBo^&hxj$kD@t~2QGt0-f8}{ z#zj?%zXYBf*k}K6t3T=3;Ln4$&d37X75BudC-X4bx6T^xwprC*pkK~5R3mY%7m|mA zfMwP^*AFr=(%*34PYE7*#&7nC#a4Yj%EK@HcX~TH#3!CVx#(&8!+{Vgm_-ck9VA(J z`Zya(jBp%0;nC$E);1vf?stYN4(gsr_u=8t!`Yj1(g*J3!p?Z=#D7P!mS=9yIb&yC zP{`|X(4h4Xph*ap87eG({M5}EfprX|q`%xy!?=C`tM3`?8Z?!SrS2+TGqva{G5$(CmQ(@_xy_)&B zCEBrHnQyMX^ zmwfaki65CfHyv&`gu#b?%abpAB`0~ZA2{fX+<)rOBrpjCZu_~h6cstd6run&(l`$v zOxG|9KRMXcL!NoU!%yx{67HXHe>?6z#p=1<){-^PK6zx2iEX_f;PxGw_IS8X{sUEJ z_GtWkpmm7G>m3fiEauDST41r@a2Bv!RRp zw_3I2o%CvDegl}*x#o$uCw2OZcFyyJX|137ew+8n^RPX#hx>0k+<(sw{@a>vKH-0w z5u7)f^GW@N{ZpU#-+KMC+V9WRulB6|&K|WpZ?peZ{@=y_O8wi>J=a7ZH>_fJ{2SVA zpY|~OPj1cjU;jGyuj_dBXWaF;pRX3x${OIr_06SqthJ0JY~{nd;y(}yPfkMNB!)bB zPg@-6OC5f3;eV50|3t!_z2i&X>_<8uVT9KwIq>7dPVVthj&$|ne!7_Klg~b9pRtqEw)mdhin*RqH@hQG&9yy$>D^2B zzh^!DowFll-DVDoSHs!A)=8|eqp!S{HU1FRzyH7ShZyQVSb;u%zt-^oc-{dzZQ^%=P5%oL=4 z03WLAi+6nV&n$u5cq=vm0}SRtDK=n_V9&@Qct#)FTzAua%ni~xyH!gzA28yXd4+qK z$$%(apT+lS9nCL|60T27L}R`B8#eVHRoVW-Dis>Oh>bY^HB1d1;dT-D7+FoPNJ7=&*ZGpMP+DGs0(b;=?eMTA;+FWF>)!EkP%mGiI z#4?Y3`ELH>w`%leKq>4}fm`R0|=UDchnbxx>bM$a0 zB9fZRPGOB#59gsZ#`mArk=S?r><#4ZmzC=R^(N`9SazzFqV=7tU%{a$<{5;y{7 zh#LLQ+6#65tTH!r-&a3;;@bVx)SkC43?F$N8u#H}unw@53#ric>LOeCt*5KM8kjGW ztr^XQ*;wxR#8g^|Y0LM`rJOTw`VRNA_5m0?_V`}sWVGcG-TFK8qhA$MSf6bsf%!^x zjqaUu0eO-b+l8?c^E>)yp7lKZ4t3<1=l&88SgG|sJe`Nl%y@tJS@T+c>N&AV`{qx~ zssGXTpJa#489m%(vPR?|9>iOTAAFpfQ%2f#I9F#RS=G8;USI2-${E;`#s6K6=etGk zgKu-Qs(-f6&NHy-ObX}kR3i6H?(Lryot1ODBbuFfJ(Ghxb8CJRP*bns`o;hI-ZgV& zl*!DI-uwG>`n)&OPa3@7uun<+Qw?&21(!ZJPMH|#9^kQDp`-E?%49v`$xcw}FN@@J?tUid+2R6vlKjO6zuN4$o&$VCj3;gD9rlmc4 zC8FwlCn^G=i%PiLdozIyAS;?NLH#xw-x$~|9rGUk&C4a($JLSo9#(&k$ zXY*HcZ}`8>ry4ycuO3^q_z3r_wfo(u>_5BSYG;493V+taub+muK5xDKjxU~)mHzs8 z55(CI^RPeJSSxbT4@JKHXRf01{xpB#zPsE@_xD|vnz(g*X2Xj-rZB_?^2*0Z0Q_dT2CVs*w#&!0PF=1{zCk7mx z*x=|{l6vT1$UW`!D+ZTzut=~e6EJ@@+!=UPwv4f{lW^7O1h{R(FvWoN@izu0iU$(mo0 z?TmXb%?7_-56m8^JzvN{C;zgy21am}gHPeY&jg)^<|SFd{G=aUQfDRYXBWgf(%P1} z-2D(|y!1h~&+CC`M=ffRw*N)*42<8i+G1|cdF?aZOLRVd@)g-RwRWK{IWu|96UdN@ zV`7#KX8KdyLav>tKiBiz{W;sOGkkqs4r=%9DM{bgu^sx6XpEFsQ)YG@2p!n z`#Q7L+lk5ePx^cQd(EFXb@tzR-?G1%&B^PWbKhOz#J<(lF%i!<^~I6AshhuhQS5up z+uP;Yao79x`99fpkDd$j#~KENN30LHz3` is the path to the directly containing the TensorRT engine files. +### executorExampleDebug + +This example shows how you can define which engine IO tensors should be dumped to numpy files. +From the `examples/cpp/executor/build` folder, you can get run the `executorExampleDebug` example with: + +``` +./executorExampleDebug +``` +where `` is the path to the directly containing the TensorRT engine files. + ### executorExampleAdvanced From the `examples/cpp/executor/build` folder, you can also run the `executorExampleAdvanced` example. To get the full list of supported input arguments, type diff --git a/examples/cpp/executor/executorExampleBasic.cpp b/examples/cpp/executor/executorExampleBasic.cpp index 993eb7383..b3ae33283 100644 --- a/examples/cpp/executor/executorExampleBasic.cpp +++ b/examples/cpp/executor/executorExampleBasic.cpp @@ -46,7 +46,7 @@ int main(int argc, char* argv[]) auto request = tle::Request(inputTokens, maxNewTokens); // Enqueue the request - auto requestId = executor.enqueueRequest(std::move(request)); + auto requestId = executor.enqueueRequest(request); // Wait for the response auto responses = executor.awaitResponses(requestId); diff --git a/examples/cpp/executor/executorExampleDebug.cpp b/examples/cpp/executor/executorExampleDebug.cpp new file mode 100644 index 000000000..4179f6bd9 --- /dev/null +++ b/examples/cpp/executor/executorExampleDebug.cpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tensorrt_llm/common/logger.h" +#include "tensorrt_llm/executor/executor.h" +#include "tensorrt_llm/plugins/api/tllmPlugin.h" + +#include + +namespace tlc = tensorrt_llm::common; +namespace tle = tensorrt_llm::executor; + +int main(int argc, char* argv[]) +{ + // Register the TRT-LLM plugins + initTrtLlmPlugins(); + + if (argc != 2) + { + TLLM_LOG_ERROR("Usage: %s ", argv[0]); + return 1; + } + + // Create the executor for this engine + tle::SizeType32 beamWidth = 1; + auto executorConfig = tle::ExecutorConfig(beamWidth); + // Select which tensors should be dumped + auto debugConfig = tle::DebugConfig(); + debugConfig.setDebugTensorNames({"host_request_types"}); + executorConfig.setDebugConfig(debugConfig); + + auto trtEnginePath = argv[1]; + auto executor = tle::Executor(trtEnginePath, tle::ModelType::kDECODER_ONLY, executorConfig); + + // Create the request + tle::SizeType32 maxNewTokens = 2; + tle::VecTokens inputTokens{1, 2, 3, 4}; + auto request = tle::Request(inputTokens, maxNewTokens); + + // Enqueue the request + auto requestId = executor.enqueueRequest(request); + + // Wait for the response + auto responses = executor.awaitResponses(requestId); + + // Get outputTokens + auto outputTokens = responses.at(0).getResult().outputTokenIds.at(beamWidth - 1); + + TLLM_LOG_INFO("Output tokens: %s", tlc::vec2str(outputTokens).c_str()); + + return 0; +} diff --git a/examples/cpp/executor/executorExampleLogitsProcessor.cpp b/examples/cpp/executor/executorExampleLogitsProcessor.cpp index e3dff7f4c..0913b77b1 100644 --- a/examples/cpp/executor/executorExampleLogitsProcessor.cpp +++ b/examples/cpp/executor/executorExampleLogitsProcessor.cpp @@ -61,9 +61,12 @@ int main(int argc, char* argv[]) // Create the executor for this engine tle::SizeType32 beamWidth = 1; auto executorConfig = tle::ExecutorConfig(beamWidth); - executorConfig.setLogitsPostProcessorMap( - std::unordered_map{ - {logitsPostProcessorName, logitsPostProcessorFn}}); + + auto logitsProcConfig = tle::LogitsPostProcessorConfig(); + logitsProcConfig.setProcessorMap(std::unordered_map{ + {logitsPostProcessorName, logitsPostProcessorFn}}); + executorConfig.setLogitsPostProcessorConfig(logitsProcConfig); + auto trtEnginePath = argv[1]; auto executor = tle::Executor(trtEnginePath, tle::ModelType::kDECODER_ONLY, executorConfig); diff --git a/examples/dbrx/requirements.txt b/examples/dbrx/requirements.txt index 5d81c2f7c..ea7fd8f1e 100644 --- a/examples/dbrx/requirements.txt +++ b/examples/dbrx/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/dit/sample.py b/examples/dit/sample.py index 8c800ba49..8b8f41641 100644 --- a/examples/dit/sample.py +++ b/examples/dit/sample.py @@ -10,7 +10,6 @@ from torchvision.utils import save_image import tensorrt_llm -from tensorrt_llm._ipc_utils import set_peer_access from tensorrt_llm._utils import str_dtype_to_torch, trt_dtype_to_torch from tensorrt_llm.logger import logger from tensorrt_llm.plugin.plugin import CustomAllReduceHelper @@ -77,11 +76,10 @@ def __init__(self, expected_tensor_names = ['latent', 'timestep', 'label', 'output'] if self.mapping.tp_size > 1: - is_p2p_supported = set_peer_access(self.mapping) self.buffer, self.all_reduce_workspace = CustomAllReduceHelper.allocate_workspace( self.mapping, CustomAllReduceHelper.max_workspace_size_auto( - self.mapping.tp_size), is_p2p_supported) + self.mapping.tp_size)) self.inputs['all_reduce_workspace'] = self.all_reduce_workspace expected_tensor_names += ['all_reduce_workspace'] diff --git a/examples/falcon/requirements.txt b/examples/falcon/requirements.txt index f0c1e9f12..decac46a6 100644 --- a/examples/falcon/requirements.txt +++ b/examples/falcon/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 transformers>=4.31.0 datasets~=2.14.5 evaluate~=0.4.1 diff --git a/examples/gemma/requirements.txt b/examples/gemma/requirements.txt index dd5a143ca..c28634625 100644 --- a/examples/gemma/requirements.txt +++ b/examples/gemma/requirements.txt @@ -3,7 +3,7 @@ # WAR the new posting of "nvidia-cudnn-cu12~=9.0". # "jax[cuda12_pip]~=0.4.19" specifies "nvidia-cudnn-cu12>=8.9" but actually requires "nvidia-cudnn-cu12~=8.9". nvidia-cudnn-cu12~=8.9; platform_machine == "x86_64" -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 flax~=0.8.0 # jax[cuda12_pip]~=0.4.19; platform_system != "Windows" jax~=0.4.19; platform_system == "Windows" diff --git a/examples/gpt/requirements.txt b/examples/gpt/requirements.txt index b71f09c9d..3ff478877 100644 --- a/examples/gpt/requirements.txt +++ b/examples/gpt/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/gptj/requirements.txt b/examples/gptj/requirements.txt index 8d7092105..a4d628574 100644 --- a/examples/gptj/requirements.txt +++ b/examples/gptj/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/gptneox/requirements.txt b/examples/gptneox/requirements.txt index 76d7fa110..42ecdcecb 100644 --- a/examples/gptneox/requirements.txt +++ b/examples/gptneox/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 rouge_score~=0.1.2 evaluate~=0.4.1 diff --git a/examples/grok/requirements.txt b/examples/grok/requirements.txt index 12eb5ee0f..e2177ded1 100644 --- a/examples/grok/requirements.txt +++ b/examples/grok/requirements.txt @@ -1,6 +1,6 @@ -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets==2.14.6 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/high-level-api/README.md b/examples/high-level-api/README.md index 907b61a3b..15213b438 100644 --- a/examples/high-level-api/README.md +++ b/examples/high-level-api/README.md @@ -2,7 +2,18 @@ We are working on a Python high-level API(HLAPI) for LLM workflow, which is still in incubation and may change later. Here we show you a preview of how it works and how to use it. -Note that the APIs are not stable and only support the LLaMA model. We appreciate your patience and understanding as we improve this API. +Note that the APIs are not stable and we appreciate your patience and understanding as we improve this API. + +## HLAPI Supported Model +* LLaMA (including variants Mistral, Mixtral, InternLM) +* GPT (including variants Starcoder-1/2, Santacoder) +* Gemma-1/2 +* Phi-1/2/3 +* ChatGLM (including variants glm-10b, chatglm, chatglm2, chatglm3, glm4) +* QWen-1/1.5/2 +* Falcon +* Baichuan-1/2 +* GPT-J ## Quick start diff --git a/examples/high-level-api/requirements.txt b/examples/high-level-api/requirements.txt index 46456a250..f7e1fd97f 100644 --- a/examples/high-level-api/requirements.txt +++ b/examples/high-level-api/requirements.txt @@ -1,2 +1,2 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 diff --git a/examples/internlm/README.md b/examples/internlm/README.md index d9b9c0e93..aae120486 100644 --- a/examples/internlm/README.md +++ b/examples/internlm/README.md @@ -18,7 +18,7 @@ The TensorRT-LLM InternLM implementation is based on the LLaMA model. The implem be found in [tensorrt_llm/models/llama/model.py](../../tensorrt_llm/models/llama/model.py). The TensorRT-LLM InternLM example code lies in [`examples/llama`](./): -* [`convert_checkpoint.py`](../llama/convert_checkpoint.py) converts the Huggingface Model of Skywork into TensorRT-LLM checkpoint. +* [`convert_checkpoint.py`](../llama/convert_checkpoint.py) converts the Huggingface Model of InternLM into TensorRT-LLM checkpoint. * [`convert_checkpoint.py`] to to convert a checkpoint from the [HuggingFace (HF) Transformers](https://github.com/huggingface/transformers) format to the TensorRT-LLM format In addition, there are two shared files in the parent folder [`examples`](../) for inference and evaluation: diff --git a/examples/internlm/requirements.txt b/examples/internlm/requirements.txt index d3de57f84..211fa8ceb 100644 --- a/examples/internlm/requirements.txt +++ b/examples/internlm/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets==2.14.5 rouge_score~=0.1.2 sentencepiece~=0.1.99 diff --git a/examples/jais/requirements.txt b/examples/jais/requirements.txt index b71f09c9d..3ff478877 100644 --- a/examples/jais/requirements.txt +++ b/examples/jais/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/llama/README.md b/examples/llama/README.md index 5bc69c50d..584bc0b51 100644 --- a/examples/llama/README.md +++ b/examples/llama/README.md @@ -85,9 +85,9 @@ The defaults have been carefully tuned for better performance. For example, `gpt Normally `trtllm-build` only requires single GPU, but if you've already got all the GPUs needed for inference, you could enable parallel building to make the engine building process faster by adding `--workers` argument. Please note that currently `workers` feature only supports single node. -`--use_fused_mlp` enables GEMM horizontal fusion in gated MLP layer, which reduces input traffic and potentially improves performance. For FP8 PTQ, the downside is slight reduction of accuracy because one of the quantization scaling factors are discarded (accuracy 0.45734 vs 0.45755 for LLaMA-v2 7B using modelopt/examples/hf/instruct_eval/mmlu.py). +`--use_fused_mlp=enable` enables GEMM horizontal fusion in gated MLP layer, which reduces input traffic and potentially improves performance. For FP8 PTQ, the downside is slight reduction of accuracy because one of the quantization scaling factors are discarded (accuracy 0.45734 vs 0.45755 for LLaMA-v2 7B using modelopt/examples/hf/instruct_eval/mmlu.py). -`--use_fused_mlp --gemm_swiglu_plugin ` fuses 2 GEMMs without biases and SwiGLU into one kernel. This is a preview feature and is only supported for dtype `fp8`. The supported architecture is SM90. +`--use_fused_mlp=enable --gemm_swiglu_plugin ` fuses 2 GEMMs without biases and SwiGLU into one kernel. This is a preview feature and is only supported for dtype `fp8`. The supported architecture is SM90. Here're some examples: diff --git a/examples/llama/requirements.txt b/examples/llama/requirements.txt index 6ae2e6d9f..4656b4f4c 100644 --- a/examples/llama/requirements.txt +++ b/examples/llama/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets==2.14.6 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/mamba/README.md b/examples/mamba/README.md index b218ae070..90b9f6573 100644 --- a/examples/mamba/README.md +++ b/examples/mamba/README.md @@ -25,10 +25,10 @@ In addition, there are two shared files in the parent folder [`examples`](../) f ## Support Matrix -| Model Name | FP16 | BF16 | -| :--------------: | :---: | :---: | -| Mamba1 | Y | Y | -| Mamba2 | Y | Y | +| Model Name | FP16 | BF16 | TP | +| :--------------: | :---: | :---: | :-: | +| Mamba1 | Y | Y | N | +| Mamba2 | Y | Y | Y | * Mamba2: TensorRT-LLM can only support the pure Mamba model for now, will support the hybrid models later. @@ -78,6 +78,9 @@ git clone https://huggingface.co/mistralai/mathstral-7B-v0.1 ./mamba_model/maths ### 2. Convert weights from HF Transformers to TensorRT-LLM format The [`convert_checkpoint.py`](./convert_checkpoint.py) script converts HF weights to TensorRT-LLM checkpoints. +For the Mamba2 models, if they can support tensor parallelism, you can run them with 1, 2, 4 or 8 GPUs. Here we use +mamba-codestral-7B-v0.1 as an example. + ```bash # mamba-2.8b python convert_checkpoint.py --model_dir ./mamba_model/mamba-2.8b/ \ @@ -103,6 +106,12 @@ python convert_checkpoint.py --model_dir ./mamba_model/mamba2-130m/ \ python convert_checkpoint.py --model_dir ./mamba_model/mamba-codestral-7B-v0.1/ \ --dtype float16 \ --output_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_ckpt/fp16/1-gpu/ + +# mamba-codestral-7B-v0.1 with 2-way tensor parallelism. +python convert_checkpoint.py --model_dir ./mamba_model/mamba-codestral-7B-v0.1/ \ + --dtype float16 \ + --world_size 2 \ + --output_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_ckpt/fp16/2-gpu/ ``` ### 3. Build TensorRT engine(s) @@ -153,6 +162,15 @@ trtllm-build --checkpoint_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_ckpt/fp1 --max_input_len 924 \ --max_seq_len 1024 \ --output_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_engines/fp16/1-gpu/ + +# mamba-codestral-7B-v0.1 with 2-way tensor parallelism. +trtllm-build --checkpoint_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_ckpt/fp16/2-gpu/ \ + --paged_kv_cache disable \ + --gemm_plugin auto \ + --max_batch_size 8 \ + --max_input_len 924 \ + --max_seq_len 1024 \ + --output_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_engines/fp16/2-gpu/ ``` Note that when building Mamba models, you need to disable the `paged_kv_cache` as it is used for @@ -200,4 +218,12 @@ python ../summarize.py --test_trt_llm \ --tokenizer_dir ./mamba_model/mathstral-7B-v0.1/ \ --data_type fp16 \ --engine_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_engines/fp16/1-gpu/ + +# mamba-codestral-7B-v0.1 with 2-way tensor parallelism. +mpirun -n 2 --allow-run-as-root \ + python ../summarize.py --test_trt_llm \ + --hf_model_dir ./mamba_model/mamba-codestral-7B-v0.1/ \ + --tokenizer_dir ./mamba_model/mathstral-7B-v0.1/ \ + --data_type fp16 \ + --engine_dir ./mamba_model/mamba-codestral-7B-v0.1/trt_engines/fp16/2-gpu/ ``` diff --git a/examples/mamba/convert_checkpoint.py b/examples/mamba/convert_checkpoint.py index 831ceab46..6c7d6ed4c 100644 --- a/examples/mamba/convert_checkpoint.py +++ b/examples/mamba/convert_checkpoint.py @@ -23,6 +23,10 @@ def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument('--model_dir', type=Path, default=None) + parser.add_argument("--world_size", + type=int, + default=1, + help="world size, only support tensor parallelism now") parser.add_argument('--dtype', type=str, default='float16', @@ -60,6 +64,14 @@ def get_tllm_linear_weight(weight, prefix, bias=None): return results +def split(v, tp_size, idx, dim=0): + assert v.shape[dim] % tp_size == 0 + split_size = v.shape[dim] // tp_size + if tp_size == 1: + return v + return torch.split(v, split_size, dim=dim)[idx] + + def convert_hf_mamba(hf_mamba, rank=0, dtype='float32', @@ -160,13 +172,18 @@ def rename_hf_to_tllm(name: str): return name -def convert_from_hf_checkpoint(model_dir: Union[str, Path], +def convert_from_hf_checkpoint(mamba_config: dict, + model_dir: Union[str, Path], rank=0, dtype: Union[str, torch.dtype] = torch.float32, mamba_version: str = 'Mamba1'): logger.info('Loading weights from HF Mamba...') tik = time.time() + tp_rank = rank + tp_size = mamba_config['mapping']['tp_size'] + d_inner = mamba_config['rnn_hidden_size'] + d_state = mamba_config['state_size'] weights = {} if isinstance(dtype, str): dtype = tensorrt_llm.str_dtype_to_torch(dtype) @@ -196,6 +213,44 @@ def convert_from_hf_checkpoint(model_dir: Union[str, Path], in_proj_params = torch.split(param, param.size(0) // 2, dim=0) weights[tllm_name.replace('proj', 'proj_x')] = in_proj_params[0] weights[tllm_name.replace('proj', 'proj_z')] = in_proj_params[1] + elif 'in_proj' in name and mamba_version == 'Mamba2': + nheads = d_inner // mamba_config['rnn_head_size'] + ngroups = mamba_config['ngroups'] + in_proj_z, in_proj_x, in_proj_b, in_proj_c, in_proj_dt = torch.split( + param, [ + d_inner, d_inner, ngroups * d_state, ngroups * d_state, + nheads + ], + dim=0) + in_proj_z = split(in_proj_z, tp_size, tp_rank, dim=0) + in_proj_x = split(in_proj_x, tp_size, tp_rank, dim=0) + in_proj_b = split(in_proj_b, tp_size, tp_rank, dim=0) + in_proj_c = split(in_proj_c, tp_size, tp_rank, dim=0) + in_proj_dt = split(in_proj_dt, tp_size, tp_rank, dim=0) + in_proj = torch.concat( + [in_proj_z, in_proj_x, in_proj_b, in_proj_c, in_proj_dt]) + weights[tllm_name] = in_proj.contiguous() + elif 'conv1d' in name and mamba_version == 'Mamba2': + ngroups = mamba_config['ngroups'] + conv_x, conv_b, conv_c = torch.split( + param, [d_inner, ngroups * d_state, ngroups * d_state], + dim=0) + conv_x = split(conv_x, tp_size, tp_rank, dim=0) + conv_b = split(conv_b, tp_size, tp_rank, dim=0) + conv_c = split(conv_c, tp_size, tp_rank, dim=0) + conv = torch.concat([conv_x, conv_b, conv_c]) + weights[tllm_name] = conv.contiguous() + elif any(keyword in name for keyword in ( + 'mixer.norm.weight', + 'A_log', + 'D', + 'dt_proj.bias', + 'dt_bias', + )) and mamba_version == 'Mamba2': + weights[tllm_name] = split(param, tp_size, tp_rank, dim=0) + elif 'out_proj' in name and mamba_version == 'Mamba2': + weights[tllm_name] = split(param, tp_size, tp_rank, + dim=1).contiguous() else: weights[tllm_name] = param del model_params @@ -205,6 +260,11 @@ def convert_from_hf_checkpoint(model_dir: Union[str, Path], if 'lm_head.weight' not in weights or weights['lm_head.weight'].data_ptr( ) == emb.data_ptr(): weights['lm_head.weight'] = copy.deepcopy(emb) + if mamba_version == 'Mamba2': + weights['lm_head.weight'] = split(weights['lm_head.weight'], + tp_size, + tp_rank, + dim=0) tok = time.time() t = time.strftime('%H:%M:%S', time.gmtime(tok - tik)) @@ -218,9 +278,7 @@ def do_convert_from_ckpt(args): def convert(worker_rank, args, convert_args): convert_from_ckpt = do_convert_from_ckpt(args) - world_size = 1 - args.workers = 1 - for rank in range(worker_rank, world_size, args.workers): + for rank in range(worker_rank, args.world_size): if convert_from_ckpt: weights = convert_from_hf_checkpoint(rank=rank, **convert_args) else: @@ -352,13 +410,18 @@ def main(): 'residual_in_fp32': hf_config.residual_in_fp32, 'pad_vocab_size_multiple': hf_config.pad_vocab_size_multiple, 'hidden_act': 'silu', - 'num_attention_heads': 1, + 'num_attention_heads': args.world_size, 'rnn_hidden_size': hf_config.intermediate_size, 'rnn_conv_dim_size': hf_config.intermediate_size, 'state_size': hf_config.state_size, 'conv_kernel': hf_config.conv_kernel, 'use_bias': hf_config.use_bias, 'mamba_version': mamba_version, + 'mapping': { + 'world_size': args.world_size, + 'tp_size': args.world_size, + 'pp_size': 1 + }, } if mamba_version == 'Mamba2': conv_dim = hf_config.intermediate_size + 2 * hf_config.ngroups * hf_config.state_size @@ -377,6 +440,7 @@ def main(): convert_from_ckpt = do_convert_from_ckpt(args) # TODO: Add convert_hf_mamba support for Mamba2 when transformers can support Mamba2 models assert convert_from_ckpt or mamba_version == 'Mamba2', "Mamba2 can only support convert from checkpoints." + assert args.world_size == 1 or mamba_version == 'Mamba2', "Mamba1 can not support tensor parallelism." if not convert_from_ckpt: logger.info(f'Convert by using model') hf_mamba = AutoModelForCausalLM.from_pretrained(args.model_dir, @@ -394,6 +458,7 @@ def main(): else: convert_args['hf_mamba'] = hf_mamba convert_args['mamba_version'] = mamba_version + convert_args['mamba_config'] = config convert(0, args, convert_args) diff --git a/examples/mamba/requirements.txt b/examples/mamba/requirements.txt index 2fdedd010..465abbad0 100644 --- a/examples/mamba/requirements.txt +++ b/examples/mamba/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 transformers>=4.39.0 datasets~=2.14.5 evaluate diff --git a/examples/medusa/requirements.txt b/examples/medusa/requirements.txt index e99999e73..774d5ee62 100644 --- a/examples/medusa/requirements.txt +++ b/examples/medusa/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 rouge_score~=0.1.2 sentencepiece~=0.1.99 diff --git a/examples/mixtral/requirements.txt b/examples/mixtral/requirements.txt index e02c93f53..2727c662e 100644 --- a/examples/mixtral/requirements.txt +++ b/examples/mixtral/requirements.txt @@ -1,4 +1,4 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 transformers==4.38.2 accelerate==0.25.0 diff --git a/examples/model_api/README.md b/examples/model_api/README.md index b3fb8d736..065a81847 100644 --- a/examples/model_api/README.md +++ b/examples/model_api/README.md @@ -38,3 +38,30 @@ Using AWQ INT4 weight only algorithm to quantize the given hugging llama model f ```bash python ./llama_quantize.py --hf_model_dir --cache_dir ./llama.awq/ ``` + + +## AutoModelForCausalLM + +The API `tensorrt_llm.AutoModelForCausalLM` can read from a Hugging Face model directory, find the correct TRT-LLM model class and dispatch the `from_hugging_face` mothod to the correct TRT-LLM class. + +The following code snippets demonstrated the usage of the `AutoModelForCausalLM` class. + +```python + mapping = Mapping(world_size=world_size, rank=0, tp_size=tp, pp_size=pp) + trtllm_model = AutoModelForCausalLM.from_hugging_face(hf_model_dir, mapping=mapping) + engine = build(trtllm_model, build_config) + executor = GenerationExecutor.create(engine) +``` + +## AutoConfig + +The API `tensorrt_llm.AutoConfig` can read the configuration from a Hugging Face model directory, find and return the correct TRT-LLM configuration class if it's supported, and raise a `NotImplementedError` if not supported. This API is useful when one needs to create a TRT-LLM model object using dummy weights, for things like workflow testing, benchmarks, without reading the real weights from storage, since reading the weights for large models can take significant amount of time. The usage looks like below snippets: + +```python + mapping = Mapping(world_size=world_size, rank=0, tp_size=tp, pp_size=pp) + trtllm_config = AutoConfig.from_hugging_face(hf_model_dir, dtype='float16', mapping=mapping) + + # Use the __init__ constructor directly to create a TRT-LLM model object + # instead of using from_hugging_face class method, since from_hugging_face will read the weights + trtllm_model_fake_weights = AutoModelForCausalLM.get_trtllm_model_class(hf_model_dir)(trtllm_config) +``` diff --git a/examples/mpt/requirements.txt b/examples/mpt/requirements.txt index 8d7092105..a4d628574 100644 --- a/examples/mpt/requirements.txt +++ b/examples/mpt/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/multimodal/README.md b/examples/multimodal/README.md index a6aff6356..c9ff35237 100644 --- a/examples/multimodal/README.md +++ b/examples/multimodal/README.md @@ -300,7 +300,7 @@ Currently, CogVLM only support bfloat16 precision and doesn't support `remove_in --checkpoint_dir tmp/trt_models/${MODEL_NAME}/fp16/1-gpu \ --output_dir tmp/trt_engines/${MODEL_NAME}/fp16/1-gpu \ --gemm_plugin float16 \ - --use_fused_mlp \ + --use_fused_mlp=enable \ --max_batch_size 1 \ --max_input_len 2048 \ --max_seq_len 2560 \ @@ -405,7 +405,7 @@ Currently, CogVLM only support bfloat16 precision and doesn't support `remove_in --checkpoint_dir tmp/trt_models/${MODEL_NAME}/fp16/1-gpu \ --output_dir tmp/trt_engines/${MODEL_NAME}/fp16/1-gpu \ --gemm_plugin float16 \ - --use_fused_mlp \ + --use_fused_mlp=enable \ --max_batch_size 1 \ --max_input_len 2048 \ --max_seq_len 2560 \ @@ -417,7 +417,7 @@ Currently, CogVLM only support bfloat16 precision and doesn't support `remove_in --output_dir tmp/trt_engines/${MODEL_NAME}/fp16/1-gpu \ --gpt_attention_plugin float16 \ --gemm_plugin float16 \ - --use_fused_mlp \ + --use_fused_mlp=enable \ --max_batch_size 1 \ --max_input_len 4096 \ --max_seq_len 5120 \ @@ -427,9 +427,9 @@ Currently, CogVLM only support bfloat16 precision and doesn't support `remove_in # for VILA trtllm-build \ --checkpoint_dir tmp/trt_models/${MODEL_NAME}/fp16/1-gpu \ - --output_dir trt_engines/${MODEL_NAME}/fp16/1-gpu \ + --output_dir tmp/trt_engines/${MODEL_NAME}/fp16/1-gpu \ --gemm_plugin float16 \ - --use_fused_mlp \ + --use_fused_mlp=enable \ --max_batch_size 1 \ --max_input_len 2048 \ --max_seq_len 2560 \ @@ -458,7 +458,7 @@ Currently, CogVLM only support bfloat16 precision and doesn't support `remove_in For VILA, you can use either local file or web url as input images. Suppose you have a local image `av.png` downloaded from `https://github.com/Efficient-Large-Model/VILA/blob/main/demo_trt_llm/av.png` and the url of `merlion.png` ```bash - wget -O av.png https://raw.githubusercontent.com/Efficient-Large-Model/VILA/main/demo_trt_llm/av.png + wget -O av.png https://raw.githubusercontent.com/Efficient-Large-Model/VILA/main/demo_images/av.png python run.py \ --max_new_tokens 100 \ @@ -507,7 +507,7 @@ Currently, CogVLM only support bfloat16 precision and doesn't support `remove_in --calib_size 32 ``` - Then follow the same `trtllm-build` and `run.py` steps as before. NOTE: for `trtllm-build` command, do not use `--use_fused_mlp` in these quantization modes. + Then follow the same `trtllm-build` and `run.py` steps as before. NOTE: for `trtllm-build` command, do not use `--use_fused_mlp=enable` in these quantization modes. ## NeVA diff --git a/examples/nemotron/requirements.txt b/examples/nemotron/requirements.txt index 41442cada..286046ed2 100644 --- a/examples/nemotron/requirements.txt +++ b/examples/nemotron/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 transformers==4.40.2 # https://github.com/NVIDIA/NeMo/issues/9793 huggingface_hub==0.23.5 diff --git a/examples/opt/requirements.txt b/examples/opt/requirements.txt index 8d7092105..a4d628574 100644 --- a/examples/opt/requirements.txt +++ b/examples/opt/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/phi/requirements.txt b/examples/phi/requirements.txt index cfe76b68c..6402bf4cb 100644 --- a/examples/phi/requirements.txt +++ b/examples/phi/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.14.5 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/quantization/requirements.txt b/examples/quantization/requirements.txt index b5c226d7d..bb98915c8 100644 --- a/examples/quantization/requirements.txt +++ b/examples/quantization/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets>=2.14.4 nemo-toolkit[all]<=1.20.0,>=1.18.0 rouge_score~=0.1.2 diff --git a/examples/qwen/requirements.txt b/examples/qwen/requirements.txt index 52f502ec1..24f07e2c1 100644 --- a/examples/qwen/requirements.txt +++ b/examples/qwen/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.16.0 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/qwenvl/requirements.txt b/examples/qwenvl/requirements.txt index ef5a14bdd..a5c37c5fa 100644 --- a/examples/qwenvl/requirements.txt +++ b/examples/qwenvl/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.16.0 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/recurrentgemma/requirements.txt b/examples/recurrentgemma/requirements.txt index f05166baa..11768c51d 100644 --- a/examples/recurrentgemma/requirements.txt +++ b/examples/recurrentgemma/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 git+https://github.com/google-deepmind/recurrentgemma.git flax>=0.8.2 jax~=0.4.23 diff --git a/examples/run.py b/examples/run.py index 4313e4f59..1f65e14b4 100644 --- a/examples/run.py +++ b/examples/run.py @@ -358,6 +358,12 @@ def main(args): assert args.temperature == 1.0, "Medusa should use temperature == 1.0" assert args.num_beams == 1, "Medusa should use num_beams == 1" runner_kwargs.update(medusa_choices=args.medusa_choices) + if args.lookahead_config is not None: + args.lookahead_config = ast.literal_eval(args.lookahead_config) + assert len( + args.lookahead_config + ) == 3, "Lookahead needs [max_window_size, max_ngram_size, max_verification_set_size]" + runner_kwargs.update(lookahead_config=args.lookahead_config) if not args.use_py_session: runner_kwargs.update( max_batch_size=len(batch_input_ids), diff --git a/examples/skywork/requirements.txt b/examples/skywork/requirements.txt index ae491ace9..c2aaea208 100644 --- a/examples/skywork/requirements.txt +++ b/examples/skywork/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets~=2.16.1 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/smaug/requirements.txt b/examples/smaug/requirements.txt index 6ae2e6d9f..4656b4f4c 100644 --- a/examples/smaug/requirements.txt +++ b/examples/smaug/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 datasets==2.14.6 evaluate~=0.4.1 rouge_score~=0.1.2 diff --git a/examples/summarize.py b/examples/summarize.py index 8ccdb5949..58cc19b9f 100644 --- a/examples/summarize.py +++ b/examples/summarize.py @@ -140,7 +140,7 @@ def main(args): bad_words_list = tensorrt_llm.runtime.decode_words_list( args.bad_words, tokenizer) - # random_seed = 5 + random_seed = args.random_seed temperature = args.temperature num_beams = args.num_beams length_penalty = args.length_penalty @@ -148,6 +148,7 @@ def main(args): repetition_penalty = args.repetition_penalty presence_penalty = args.presence_penalty frequency_penalty = args.frequency_penalty + torch.manual_seed(random_seed) output_dir = Path(args.output_dir) if args.output_dir else None if output_dir is not None: @@ -347,18 +348,28 @@ def eval_hf(datapoint, local_early_stopping = "never" with torch.no_grad(): + hf_config = {} + if num_beams == 1: + hf_config.update({ + "top_k": top_k, + "top_p": top_p, + "do_sample": True, + }) + else: + hf_config.update({ + "num_beams": num_beams, + "num_return_sequences": num_beams, + "early_stopping": local_early_stopping, + }) outputs = model.generate(batch_input_ids, max_new_tokens=output_len, - top_k=top_k, temperature=temperature, eos_token_id=end_id, pad_token_id=pad_id, - num_beams=num_beams, - num_return_sequences=num_beams, length_penalty=length_penalty, - early_stopping=local_early_stopping, output_scores=True, - return_dict_in_generate=True) + return_dict_in_generate=True, + **hf_config) if eval_ppl and batch_size == 1: # model.generate cannot return context logits? # Will cause additional latency diff --git a/examples/utils.py b/examples/utils.py index ab20f9ae0..5d975ad94 100644 --- a/examples/utils.py +++ b/examples/utils.py @@ -289,6 +289,13 @@ def add_common_args(parser): help="Medusa choice to use, if not none, will use Medusa decoding." " E.g.: [[0, 0, 0, 0], [0, 1, 0], [1, 0], [1, 1]] for 9 medusa tokens." ) + parser.add_argument( + '--lookahead_config', + type=str, + default=None, + help="lookahead config to use, if not none, will use lookahead decoding." + " E.g.: [5, 6, 7] for [max_window_size, max_ngram_size, max_verification_set_size]." + ) # model arguments parser.add_argument('--engine_dir', type=str, default='engine_outputs') diff --git a/examples/whisper/requirements.txt b/examples/whisper/requirements.txt index bfdd3b026..b426cf51e 100644 --- a/examples/whisper/requirements.txt +++ b/examples/whisper/requirements.txt @@ -1,5 +1,5 @@ --extra-index-url https://pypi.nvidia.com -tensorrt_llm==0.13.0.dev2024081300 +tensorrt_llm==0.13.0.dev2024082000 tiktoken datasets kaldialign diff --git a/requirements.txt b/requirements.txt index ddfddd173..2efbdec26 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,3 +28,6 @@ optimum evaluate janus mpmath>=1.3.0 +click +click_option_group +aenum diff --git a/scripts/build_wheel.py b/scripts/build_wheel.py index a3d095a0b..0f7e4c750 100755 --- a/scripts/build_wheel.py +++ b/scripts/build_wheel.py @@ -76,6 +76,7 @@ def main(*, trt_root: str = None, nccl_root: str = None, clean: bool = False, + configure_cmake: bool = False, use_ccache: bool = False, fast_build: bool = False, cpp_only: bool = False, @@ -185,7 +186,7 @@ def main(*, source_dir = get_source_dir() with working_directory(build_dir): cmake_def_args = " ".join(cmake_def_args) - if clean or first_build: + if clean or first_build or configure_cmake: build_run( f'cmake -DCMAKE_BUILD_TYPE="{build_type}" -DBUILD_PYT="{build_pyt}" -DBUILD_PYBIND="{build_pybind}"' f' -DNVTX_DISABLE="{disable_nvtx}" -DBUILD_MICRO_BENCHMARKS={build_micro_benchmarks}' @@ -326,6 +327,9 @@ def add_arguments(parser: ArgumentParser): parser.add_argument("--cuda_architectures", "-a") parser.add_argument("--install", "-i", action="store_true") parser.add_argument("--clean", "-c", action="store_true") + parser.add_argument("--configure_cmake", + action="store_true", + help="Always configure cmake before building") parser.add_argument("--use_ccache", "-ccache", default=False, diff --git a/setup.py b/setup.py index 810fe7b71..a3db33e18 100644 --- a/setup.py +++ b/setup.py @@ -116,13 +116,17 @@ def has_ext_modules(self): 'libs/libtensorrt_llm_nvrtc_wrapper.so', 'libs/libdecoder_attention.so', 'bindings.*.so', - ]) + ['bindings/*.pyi', 'tools/plugin_gen/templates/*'], + ]) + [ + 'bindings/*.pyi', 'tools/plugin_gen/templates/*', + 'bench/build/benchmark_config.yml' + ], }, entry_points={ 'console_scripts': [ 'trtllm-build=tensorrt_llm.commands.build:main', 'trtllm-prune=tensorrt_llm.commands.prune:main', 'trtllm-refit=tensorrt_llm.commands.refit:main', + 'trtllm-bench=tensorrt_llm.commands.bench:main', ], }, scripts=['tensorrt_llm/hlapi/trtllm-hlapi-launch'], diff --git a/tensorrt_llm/__init__.py b/tensorrt_llm/__init__.py index 4eef01cde..8b2bd324d 100644 --- a/tensorrt_llm/__init__.py +++ b/tensorrt_llm/__init__.py @@ -48,12 +48,15 @@ def _add_trt_llm_dll_directory(): from .hlapi.llm import LLM, LlmArgs, SamplingParams from .logger import logger from .mapping import Mapping +from .models.automodel import AutoConfig, AutoModelForCausalLM from .module import Module from .network import Network, net_guard from .parameter import Parameter from .version import __version__ __all__ = [ + 'AutoConfig', + 'AutoModelForCausalLM', 'logger', 'str_dtype_to_trt', 'torch_dtype_to_trt', diff --git a/tensorrt_llm/_ipc_utils.py b/tensorrt_llm/_ipc_utils.py index dc0884055..37fc5a7f8 100644 --- a/tensorrt_llm/_ipc_utils.py +++ b/tensorrt_llm/_ipc_utils.py @@ -15,7 +15,6 @@ import array import struct import sys -from contextlib import contextmanager from typing import List, Tuple from cuda import cudart @@ -31,17 +30,7 @@ def _raise_if_error(error: cudaError_t): raise RuntimeError(error) -@contextmanager -def peer_access(mapping: Mapping): - is_p2p_supported = set_peer_access(mapping, True) - assert is_p2p_supported, "P2P access not supported" - try: - yield - finally: - set_peer_access(mapping, False) - - -def set_peer_access(mapping: Mapping, enabled: bool = True) -> bool: +def can_access_peer(mapping: Mapping) -> bool: src_node = mapping.local_rank for rank in mapping.tp_group: dest_node = mapping.get_local_rank(rank) @@ -56,18 +45,6 @@ def set_peer_access(mapping: Mapping, enabled: bool = True) -> bool: logger.info( f"Cannot access peer device from {src_node} to {dest_node}") return False - - if enabled: - cudart.cudaDeviceEnablePeerAccess(dest_node, 0) - else: - cudart.cudaDeviceDisablePeerAccess(dest_node) - error = cudart.cudaGetLastError()[0] - if error not in [ - cudaError_t.cudaSuccess, - cudaError_t.cudaErrorPeerAccessAlreadyEnabled, - cudaError_t.cudaErrorPeerAccessNotEnabled - ]: - raise RuntimeError(error) return True diff --git a/tensorrt_llm/bench/__init__.py b/tensorrt_llm/bench/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tensorrt_llm/bench/build/__init__.py b/tensorrt_llm/bench/build/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tensorrt_llm/bench/build/benchmark_config.yml b/tensorrt_llm/bench/build/benchmark_config.yml new file mode 100644 index 000000000..ff2b32a17 --- /dev/null +++ b/tensorrt_llm/bench/build/benchmark_config.yml @@ -0,0 +1,69 @@ +meta-llama/Llama-2-7b-hf: + tp1_pp1: + general: + max_batch_size: 4096 + max_num_tokens: 8192 +meta-llama/Llama-2-70b-hf: + tp2_pp1: + general: + max_batch_size: 2048 + max_num_tokens: 2048 + tp4_pp1: + general: + max_batch_size: 4096 + max_num_tokens: 8192 + 4224: + max_batch_size: 256 + max_num_tokens: 8192 + tp8_pp1: + general: + max_batch_size: 8192 + max_num_tokens: 16384 + 2176: + max_batch_size: 1024 + max_num_tokens: 16384 +tiiuae/falcon-180B: + tp4_pp1: + general: + max_batch_size: 4096 + max_num_tokens: 8192 + tp8_pp1: + general: + max_batch_size: 2048 + max_num_tokens: 8192 +EleutherAI/gpt-j-6b: + tp1_pp1: + general: + max_batch_size: 128 + max_num_tokens: 2048 + 256: + max_batch_size: 2048 + max_num_tokens: 2048 +meta-llama/Meta-Llama-3-8B: + tp1_pp1: + general: + max_batch_size: 2048 + max_num_tokens: 8192 +meta-llama/Meta-Llama-3-70B: + tp4_pp1: + general: + max_batch_size: 2048 + max_num_tokens: 1024 + tp8_pp1: + general: + max_batch_size: 8192 + max_num_tokens: 16384 +mistralai/Mixtral-8x7B-v0.1: + tp2_pp1: + general: + max_batch_size: 2048 + max_num_tokens: 3072 + tp4_pp1: + general: + max_batch_size: 8192 + max_num_tokens: 8192 +mistralai/Mistral-7B-v0.1: + tp1_pp1: + general: + max_batch_size: 4098 + max_num_tokens: 8192 diff --git a/tensorrt_llm/bench/build/build.py b/tensorrt_llm/bench/build/build.py new file mode 100644 index 000000000..30a5fd5f2 --- /dev/null +++ b/tensorrt_llm/bench/build/build.py @@ -0,0 +1,273 @@ +from __future__ import annotations + +from pathlib import Path +from select import select +from sys import stdin +from typing import Dict, get_args +import click +from click_option_group import AllOptionGroup, optgroup, RequiredMutuallyExclusiveOptionGroup +from transformers import PretrainedConfig as HFPretrainedConfig +import yaml + +from tensorrt_llm.bench.dataclasses import BenchmarkEnvironment +from tensorrt_llm.bench.utils.data import create_dataset_from_stream, initialize_tokenizer +from tensorrt_llm.bench.utils import (VALID_QUANT_ALGOS, VALID_COMPUTE_DTYPES) +from tensorrt_llm.builder import BuildConfig +from tensorrt_llm.hlapi import LLM +from tensorrt_llm.hlapi.llm_utils import QuantConfig +from tensorrt_llm.logger import logger +from tensorrt_llm.quantization.mode import QuantAlgo + +from .utils import DEFAULT_HF_MODEL_DIRS + + +def derive_model_name(model_name): + model_dir = Path(model_name) + if model_dir.exists() and model_dir.is_dir(): + hf_config = HFPretrainedConfig.from_pretrained(model_dir) + for arch in hf_config.architectures: + if arch in DEFAULT_HF_MODEL_DIRS.keys(): + model_name = DEFAULT_HF_MODEL_DIRS[arch] + return model_name + + +def get_benchmark_engine_settings( + model_name: str, + tp_size: int, + pp_size: int, + max_seq_len: int, +) -> Dict[str, int]: + """Retrieve benchmark settings for a specific model + configuration. + + Args: + model_name (str): Huggingface model name. + tp_size (int): Number of tensor parallel shards. + pp_size (int): Number of pipeline parallel stages. + max_seq_len (int): The maximum sequence length to compile the engine. + + Raises: + ValueError: When the model_name is not supported. + RuntimeError: When the tp_size/pp_size configuration is not found. + + Returns: + Dict[str, int]: Dictionary containing engine configuration information + for engine build (max_num_tokens, max_batch_size). + """ + # Load up reference configurations so that we can set the appropriate + # settings. + settings_yml = Path(__file__).parent / "benchmark_config.yml" + with open(settings_yml, "r") as config: + configs = yaml.safe_load(config) + + model_name = derive_model_name(model_name) + # Check that the model is a supported benchmark model. + if model_name not in configs: + raise ValueError( + f"'{model_name}' is not a model that is configured for benchmarking." + ) + # Try and load the configuration TP x PP. If not valid, inform the user. + try: + model_configs = configs[model_name][f"tp{tp_size}_pp{pp_size}"] + config = model_configs.get(max_seq_len, None) + config = config if config is not None else model_configs.get("general") + except KeyError: + raise RuntimeError( + f"TP-{tp_size} x PP-{pp_size} is not a supported configuration." + "Please specify a valid benchmark configuration.") + + return config + + +@click.command(name="build") +@optgroup.group("Engine Configuration", + help="Configuration of the TensorRT-LLM engine.") +@optgroup.option( + "--tp_size", + "-tp", + type=int, + default=1, + required=False, + help="Number of tensor parallel shards to run the benchmark with.", +) +@optgroup.option( + "--pp_size", + "-pp", + type=int, + default=1, + required=False, + help="Number of pipeline parallel shards to run the benchmark with.", +) +@optgroup.option( + "--dtype", + type=click.Choice(tuple(get_args(VALID_COMPUTE_DTYPES))), + default="auto", + required=False, + help="Activation and plugin data type.", +) +@optgroup.option( + "--quantization", + "-q", + type=click.Choice(tuple(get_args(VALID_QUANT_ALGOS))), + default=None, + help= + ("The quantization algorithm to be used when benchmarking. See the " + "documentations for more information.\n" + " - https://nvidia.github.io/TensorRT-LLM/precision.html" + " - https://github.com/NVIDIA/TensorRT-LLM/blob/main/docs/source/blogs/quantization-in-TRT-LLM.md" + ), +) +@optgroup.group( + "Engine IFB Engine Limits", + cls=AllOptionGroup, + help="Runtime inflight batching scheduler limits.", +) +@optgroup.option( + "--max_batch_size", + default=None, + type=int, + help="Maximum batch size to build the benchmark engine with.", +) +@optgroup.option( + "--max_num_tokens", + type=int, + default=None, + help="Maximumn number of tokens the engine can accept.", +) +@optgroup.group( + "Engine Input Configuration", + cls=RequiredMutuallyExclusiveOptionGroup, + help="Input settings for configuring engine limits.", +) +@optgroup.option( + "--dataset", + type=click.Path(exists=True, + readable=True, + path_type=Path, + resolve_path=True), + default=None, + help="Pass in a dataset file for parsing instead of stdin.", +) +@optgroup.option("--max_seq_length", + type=click.IntRange(min=1), + default=None, + help="Fixed maximum sequence length for engine build.") +@click.pass_obj +def build_command( + bench_env: BenchmarkEnvironment, + **params, +) -> None: + """Build engines for benchmarking.""" + logger.set_level("info") + + # Collect configuration parameters from CLI parameters. + tp_size = params.get("tp_size") + pp_size = params.get("pp_size") + dtype = params.get("dtype") + quantization = params.pop("quantization") + max_num_tokens = params.pop("max_num_tokens") + max_batch_size = params.pop("max_batch_size") + + # Dataset options + dataset_path: Path = params.pop("dataset") + max_seq_len: int = params.pop("max_seq_length") + data_on_stdin: bool = bool(len(select([ + stdin, + ], [], [], 0.0)[0])) + + # Initialize the HF tokenizer for the specified model. + tokenizer = initialize_tokenizer(bench_env.model) + + # If we are receiving data from a path or stdin, parse and gather metadata. + if dataset_path or data_on_stdin: + logger.info("Found dataset.") + # Cannot set the data file path and pipe in from stdin. Choose one. + if dataset_path is not None and data_on_stdin: + raise ValueError( + "Cannot provide a dataset on both stdin and by --dataset " + "option. Please pick one.") + stream = stdin if data_on_stdin else open(dataset_path, "r") + # Parse the dataset from stdin and return it plus its metadata. + metadata, _ = \ + create_dataset_from_stream(tokenizer, stream=stream) + # The max sequence length option for build is the sum of max osl + isl. + max_seq_len = metadata.max_sequence_length + logger.info(metadata.get_summary_for_logger.info()) + + # We have a specified ISL:OSL combination. + elif max_seq_len is None: + raise RuntimeError("Unknown input configuration. Exiting.") + + # Get the config for the engine + config = get_benchmark_engine_settings(bench_env.model, tp_size, pp_size, + max_seq_len) + + # If specified on the command line, override max batch size or max num + # tokens from baseline config. + max_batch_size = max_batch_size if max_batch_size is not None else config[ + "max_batch_size"] + max_num_tokens = max_num_tokens if max_num_tokens is not None else config[ + "max_num_tokens"] + + # Construct a TRT-LLM build config. + build_config = BuildConfig(max_batch_size=max_batch_size, + max_seq_len=max_seq_len, + max_num_tokens=max_num_tokens) + + # Set the compute quantization. + quant_algo = QuantAlgo(quantization) if quantization is not None else None + quant_config = QuantConfig() + quant_config.quant_algo = quant_algo + # If the quantization is FP8, force the KV cache dtype to FP8. + quant_config.kv_cache_quant_algo = quant_algo.value \ + if quant_algo == QuantAlgo.FP8 else None + + # Enable multiple profiles and paged context FMHA. + build_config.plugin_config.multiple_profiles = True + # build_config.plugin_config._reduce_fusion = True + + # Enable FHMA, and FP8 FMHA if FP8 quantization is enabled. + # TODO: Revisit, there is an issue with enabling FHMA. If only + # paged FMHA is enabled with FP8 quantization, the Builder + # will not enable the FP8 FMHA. + build_config.plugin_config.use_paged_context_fmha = True + build_config.plugin_config.use_fp8_context_fmha = True \ + if quant_algo == QuantAlgo.FP8 else False + + # Construct the engine path and report the engine metadata. + model_name = derive_model_name(bench_env.model) + engine_dir = Path(bench_env.workspace, model_name, + f"tp_{tp_size}_pp_{pp_size}") + + logger.info( + "\n===========================================================\n" + "= ENGINE BUILD INFO\n" + "===========================================================\n" + f"Model Name:\t\t{bench_env.model}\n" + f"Workspace Directory:\t{bench_env.workspace}\n" + f"Engine Directory:\t{engine_dir}\n\n" + "===========================================================\n" + "= ENGINE CONFIGURATION DETAILS\n" + "===========================================================\n" + f"Max Sequence Length:\t\t{max_seq_len}\n" + f"Max Batch Size:\t\t\t{max_batch_size}\n" + f"Max Num Tokens:\t\t\t{max_num_tokens}\n" + f"Quantization:\t\t\t{quantization}\n" + "===========================================================\n") + + # Build the LLM engine with the HLAPI. + logger.set_level("error") + llm = LLM(bench_env.model, + tokenizer, + dtype=dtype, + tensor_parallel_size=tp_size, + pipeline_parallel_size=pp_size, + build_config=build_config, + quant_config=quant_config) + # Save the engine. + llm.save(engine_dir) + llm.shutdown() + logger.set_level("info") + logger.info( + "\n\n===========================================================\n" + f"ENGINE SAVED: {engine_dir}\n" + "===========================================================\n") diff --git a/tensorrt_llm/bench/build/utils.py b/tensorrt_llm/bench/build/utils.py new file mode 100644 index 000000000..f5b2e1563 --- /dev/null +++ b/tensorrt_llm/bench/build/utils.py @@ -0,0 +1,22 @@ +DEFAULT_HF_MODEL_DIRS = { + 'BaichuanForCausalLM': 'baichuan-inc/Baichuan-13B-Chat', + 'BloomForCausalLM': 'bigscience/bloom-560m', + 'GLMModel': 'THUDM/glm-10b', + 'ChatGLMModel': 'THUDM/chatglm3-6b', + 'ChatGLMForCausalLM': 'THUDM/chatglm3-6b', + 'FalconForCausalLM': 'tiiuae/falcon-rw-1b', + 'GPTForCausalLM': 'gpt2-medium', + 'GPTJForCausalLM': 'EleutherAI/gpt-j-6b', + 'GPTNeoXForCausalLM': 'EleutherAI/gpt-neox-20b', + 'InternLMForCausalLM': 'internlm/internlm-chat-7b', + 'InternLM2ForCausalLM': 'internlm/internlm2-chat-7b', + 'LlamaForCausalLM': 'meta-llama/Llama-2-7b-hf', + 'MPTForCausalLM': 'mosaicml/mpt-7b', + 'PhiForCausalLM': 'microsoft/phi-2', + 'OPTForCausalLM': 'facebook/opt-350m', + 'QWenLMHeadModel': 'Qwen/Qwen-7B', + 'QWenForCausalLM': 'Qwen/Qwen-7B', + 'Qwen2ForCausalLM': 'Qwen/Qwen1.5-7B', + 'Qwen2MoeForCausalLM': 'Qwen/Qwen1.5-MoE-A2.7B', + 'RecurrentGemmaForCausalLM': 'google/recurrentgemma-2b', +} diff --git a/tensorrt_llm/bench/dataclasses.py b/tensorrt_llm/bench/dataclasses.py new file mode 100644 index 000000000..a4238689b --- /dev/null +++ b/tensorrt_llm/bench/dataclasses.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from pathlib import Path +from typing import List, Optional + +from pydantic import BaseModel, computed_field, model_validator + +from tensorrt_llm.bench.utils import (VALID_CACHE_DTYPES, VALID_COMPUTE_DTYPES, + VALID_QUANT_ALGOS) + + +class EngineConstraints(BaseModel): + max_batch_size: int = 2048 + max_tokens: int = 2048 + max_sequence_length: int = 6144 + tp_size: int = 1 + pp_size: int = 1 + + @computed_field + def world_size(self) -> int: + return self.tp_size * self.pp_size + + +class EngineConfiguration(BaseModel): + quantization: Optional[VALID_QUANT_ALGOS] = None + kv_cache_dtype: Optional[VALID_CACHE_DTYPES] = "float16" + fused_mlp: Optional[bool] = False + dtype: Optional[VALID_COMPUTE_DTYPES] = "float16" + gemm_plugin: Optional[bool] = False + gpt_attn_plugin: Optional[bool] = True + paged_context_fmha: Optional[bool] = True + gemm_swiglu_plugin: Optional[bool] = False + multi_block_mode: Optional[bool] = False + multiple_profiles: Optional[bool] = True + build_options: List[str] = [] + + +class BuildConfiguration(BaseModel): + model: str + workspace: Path + engine_dir: Optional[Path] = None + engine_config: EngineConfiguration + engine_limits: EngineConstraints + + @computed_field + def get_build_feature_args(self) -> List[str]: + ... + + @model_validator(mode="after") + def check_engine_dir(self) -> BuildConfiguration: + if self.engine_dir is None: + limits = self.engine_limits + engine_name: str = ( + f"BS_{limits.max_batch_size}_sl_{limits.max_sequence_length}_" + f"tp_{limits.tp_size}_pp_{limits.pp_size}") + self.engine_dir = Path( + self.workspace, + self.model, + engine_name, + ) + + return self + + +class BenchmarkEnvironment(BaseModel): + model: str + workspace: Path + + +class InferenceRequest(BaseModel): + task_id: int + prompt: Optional[str] = None + output_tokens: int + logits: Optional[List[int]] = None + + @model_validator(mode="after") + def verify_prompt_and_logits(self) -> InferenceRequest: + if self.prompt is None and self.logits is None: + raise ValueError( + f"Both prompt and logits for {self.task_id} are both None.") + return self + + +class DatasetMetadata(BaseModel): + max_isl: int + max_osl: int + max_sequence_length: int + num_requests: int + + def get_summary_for_print(self) -> str: + return ("===========================================================\n" + "= DATASET DETAILS\n" + "===========================================================\n" + f"Max Input Sequence Length:\t{self.max_isl}\n" + f"Max Output Sequence Length:\t{self.max_osl}\n" + f"Max Sequence Length:\t{self.max_sequence_length}\n" + f"Number of Sequences:\t{self.num_requests}\n" + "===========================================================\n" + f"\n") diff --git a/benchmarks/suite/tensorrt_llm_bench/utils/enums.py b/tensorrt_llm/bench/enums.py similarity index 100% rename from benchmarks/suite/tensorrt_llm_bench/utils/enums.py rename to tensorrt_llm/bench/enums.py diff --git a/tensorrt_llm/bench/run/__init__.py b/tensorrt_llm/bench/run/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tensorrt_llm/bench/run/dataclasses.py b/tensorrt_llm/bench/run/dataclasses.py new file mode 100644 index 000000000..e234b4491 --- /dev/null +++ b/tensorrt_llm/bench/run/dataclasses.py @@ -0,0 +1,176 @@ +from __future__ import annotations + +from importlib.util import find_spec +from pathlib import Path +from typing import Any, List + +from pydantic import (BaseModel, Field, PositiveFloat, computed_field, + model_validator) + +import tensorrt_llm.bindings.executor as trtllm +from tensorrt_llm.bench.enums import IFBSchedulingPolicy + + +class RuntimeConfig(BaseModel): + model: str + engine_dir: Path + sw_version: str + settings_config: ExecutorSettingsConfig + world_config: ExecutorWorldConfig + + def get_config(self) -> trtllm.ExecutorConfig: + return trtllm.ExecutorConfig( + scheduler_config=self.settings_config.get_scheduler_config(), + kv_cache_config=self.settings_config.get_kvcache_config(), + parallel_config=self.world_config.get_parallel_config(), + batching_type=trtllm.BatchingType.INFLIGHT, + iter_stats_max_iterations=0, + request_stats_max_iterations=0, + max_batch_size=self.settings_config.max_batch_size, + max_num_tokens=self.settings_config.max_num_tokens, + enable_chunked_context=self.settings_config.chunking, + ) + + +class ExecutorWorldConfig(BaseModel): + pp_size: int = 1 + tp_size: int = 1 + world_size: int = 1 + gpus_per_node: int = 8 + leader_mode: bool = False + + @model_validator(mode="after") + def validate_world_size(self) -> ExecutorWorldConfig: + parallel_world = self.pp_size * self.tp_size + num_gpus = self.world_size * self.gpus_per_node + valid_world = bool(num_gpus >= parallel_world) + + if not valid_world: + raise ValueError( + f"World configuration is invalid, TP * PP ({parallel_world})" + "does not equal the total number of available GPUs" + f"({num_gpus}).") + + return self + + def _get_tensorrt_llm_executor_worker_path(self) -> Path: + module_path = find_spec("tensorrt_llm").loader.get_filename() + exec_path = Path(module_path).parent / 'bin' / 'executorWorker' + return exec_path.absolute() + + def get_parallel_config(self) -> trtllm.ParallelConfig: + if self.leader_mode: + comm_mode = trtllm.CommunicationMode.LEADER + orchestrator_config = None + else: + comm_mode = trtllm.CommunicationMode.ORCHESTRATOR + orchestrator_config = trtllm.OrchestratorConfig( + True, str(self._get_tensorrt_llm_executor_worker_path())) + + return trtllm.ParallelConfig( + trtllm.CommunicationType.MPI, + comm_mode, + orchestrator_config=orchestrator_config, + ) + + +class ExecutorSettingsConfig(BaseModel): + chunking: bool = True + scheduler_policy: IFBSchedulingPolicy = IFBSchedulingPolicy.MAX_UTILIZTION + max_batch_size: int + max_num_tokens: int + kv_cache_percent: PositiveFloat = Field(default=.90, le=1.0) + + def get_kvcache_config(self) -> trtllm.KvCacheConfig: + return trtllm.KvCacheConfig( + free_gpu_memory_fraction=self.kv_cache_percent, ) + + def get_scheduler_config(self) -> trtllm.SchedulerConfig: + return trtllm.SchedulerConfig( + capacity_scheduler_policy=self.scheduler_policy.value, + context_chunking_policy=trtllm.ContextChunkingPolicy. + FIRST_COME_FIRST_SERVED, + ) + + +class ResponseRecord(BaseModel): + request_id: int + timestamp: float + output_tokens: List[int] + is_final: bool + has_error: bool + + +class PercentileStats(BaseModel): + p50: float + p95: float + p99: float + minimum: float + maximum: float + average: float + + @classmethod + def from_iterable(cls, values: List[Any]) -> PercentileStats: + length = len(values) + return cls( + p50=values[int(length * 0.50)], + p95=values[int(length * 0.95)], + p99=values[int(length * 0.99)], + average=float(sum(values)) / length, + minimum=min(values), + maximum=max(values), + ) + + +class RequestStats(BaseModel): + request_id: int + input_tokens: int + time_log: List[float] = Field(default_factory=list, init=False) + error_responses: int = Field(default=0, init=False) + num_responses: int = Field(default=0, init=False) + num_tokens: int = Field(default=0, init=False) + + @computed_field + def first_token_latency(self) -> float: + try: + return self.time_log[1] - self.time_log[0] + except IndexError: + return 0 + + @computed_field + def request_latency(self) -> float: + return max(self.time_log) - min(self.time_log) + + def register_event(self, is_error: bool, is_response: bool, + timestamp: float, num_tokens: int) -> None: + self.time_log.append(timestamp) + self.error_responses += 1 if is_error else 0 + self.num_responses += 1 if is_response else 0 + self.num_tokens += num_tokens + + +class BenchmarkStatistics(BaseModel): + total_latency_ns: float + total_output_tokens: int + total_input_tokens: int + num_requests: int + issue_rate_ns: float + + request_percentiles: PercentileStats = None + token_percentiles: PercentileStats = None + + @computed_field + def token_throughput_ns(self) -> float: + return float(self.total_output_tokens) / self.total_latency_ns + + @computed_field + def request_throughput_ns(self) -> float: + return float(self.num_requests) / self.total_latency_ns + + @computed_field + def average_input_length(self) -> float: + return float(self.total_input_tokens) / self.num_requests + + @computed_field + def average_output_length(self) -> float: + return float(self.total_output_tokens) / self.num_requests diff --git a/tensorrt_llm/bench/run/run.py b/tensorrt_llm/bench/run/run.py new file mode 100644 index 000000000..52eed4f5a --- /dev/null +++ b/tensorrt_llm/bench/run/run.py @@ -0,0 +1,431 @@ +from __future__ import annotations + +import json +import multiprocessing as mp +from copy import deepcopy +from datetime import timedelta +from pathlib import Path +from threading import Event, Thread +from time import monotonic_ns, sleep +from typing import Generator, List, Tuple + +import click +from click_option_group import optgroup + +import tensorrt_llm.bindings.executor as trtllm +from tensorrt_llm.bench.dataclasses import BenchmarkEnvironment +from tensorrt_llm.bench.enums import IFBSchedulingPolicy +from tensorrt_llm.bench.run.dataclasses import ResponseRecord, RuntimeConfig +from tensorrt_llm.bench.run.utils import (StatsKeeper, get_executor_request, + get_settings_from_engine) +from tensorrt_llm.bench.utils.data import generate_dataset_from_stream +from tensorrt_llm.logger import logger + + +@click.command(name="throughput") +@optgroup.group("Engine run configuration.", + help="Runtime settings for executing a TensorRT-LLM engine.") +@optgroup.option( + "--engine_dir", + type=click.Path(exists=True, + readable=True, + path_type=Path, + resolve_path=True), + required=True, + help="Path to a serialized TRT-LLM engine.", +) +@optgroup.option( + "--max_batch_size", + type=int, + help="Maximum runtime batch size to run the engine with.", +) +@optgroup.option( + "--max_num_tokens", + type=int, + help="Maximum runtime tokens that an engine can accept.", +) +@optgroup.option( + "--beam_width", + type=int, + default=1, + help="Number of search beams.", +) +@optgroup.option( + "--kv_cache_free_gpu_mem_fraction", + type=float, + default=.90, + help="The percentage of memory to use for KV Cache after model load.", +) +@optgroup.group( + "Engine Input Configuration", + help="Input configuration for driving the engine.", +) +@optgroup.option( + "--dataset", + type=click.Path(exists=True, + readable=True, + path_type=Path, + resolve_path=True), + default=None, + help="Pass in a dataset file for parsing instead of stdin.", +) +@optgroup.option( + "--request_rate", + type=int, + default=-1, + help="Desired input request rate (number of messages per second).", + hidden=True, +) +@optgroup.option( + "--num_requests", + type=int, + default=0, + help="Number of requests to cap benchmark run at. Minimum between value and" + "length of dataset.", +) +@click.pass_obj +def run_command( + bench_env: BenchmarkEnvironment, + **params, +) -> None: + """Run a throughput test on a TRT-LLM engine.""" + + logger.set_level("info") + logger.info("Preparing to run throughput benchmark...") + # Parameters from CLI + # Model, experiment, and engine params + dataset_path: Path = params.pop("dataset") + request_rate: int = params.pop("request_rate") + num_requests: int = params.pop("num_requests") + model: str = bench_env.model + engine_dir: Path = params.pop("engine_dir") + # Engine configuration parsing + exec_settings, build_cfg = get_settings_from_engine(engine_dir) + exec_settings["model"] = model + engine_bs = exec_settings["settings_config"]["max_batch_size"] + engine_tokens = exec_settings["settings_config"]["max_num_tokens"] + engine_max_seq_len = build_cfg["max_seq_len"] + + # Runtime Options + runtime_max_bs = params.pop("max_batch_size") + runtime_max_bs = runtime_max_bs if runtime_max_bs else engine_bs + runtime_max_tokens = params.pop("max_num_tokens") + runtime_max_tokens = runtime_max_bs if runtime_max_tokens else engine_tokens + kv_cache_percent = params.pop("kv_cache_free_gpu_mem_fraction") + beam_width = params.pop("beam_width") + + # Update configuration with runtime options + exec_settings["settings_config"]["kv_cache_percent"] = kv_cache_percent + exec_settings["settings_config"]["max_batch_size"] = runtime_max_bs + exec_settings["settings_config"]["max_num_tokens"] = runtime_max_tokens + exec_settings["settings_config"]["beam_width"] = beam_width + exec_settings["settings_config"][ + "scheduler_policy"] = IFBSchedulingPolicy.NO_EVICT + # Construct the runtime configuration dataclass. + runtime_config = RuntimeConfig(**exec_settings) + + # Dataset Loading and Preparation + metadata, requests = generate_dataset_from_stream(dataset_path, model, + num_requests) + # TODO: Verify that the engine can handle the max/min ISL/OSL. + if metadata.max_sequence_length > engine_max_seq_len: + raise RuntimeError( + f"Engine supports a max sequence of {engine_max_seq_len}. Provided " + "dataset contains a maximum sequence of " + f"{metadata.max_sequence_length}. Please rebuild a new engine to" + "support this dataset.") + executor_requests = [] + while requests: + request = requests.pop() + executor_requests.append( + get_executor_request(request, pad_id=-1, eos_id=-1)) + del request + + logger.info("Setting up benchmarker and infrastructure.") + new_request_queue = mp.Queue() + response_queue = mp.Queue() + logger.set_level("error") + benchmark = ThroughputBenchmark( + dataset=executor_requests, + request_rate=request_rate, + runtime_cfg=runtime_config, + request_queue=new_request_queue, + response_queue=response_queue, + ) + logger.set_level("info") + try: + logger.info("Ready to start benchmark.") + benchmark.start_benchmark() + benchmark.wait() + benchmark.stop_benchmark() + benchmark.report_statistics() + except KeyboardInterrupt: + logger.set_level("error") + benchmark.stop_benchmark() + finally: + logger.set_level("error") + benchmark.shutdown() + + +class ExecutorManager: + """Utility class for managing a TRT-LLM Executor instance.""" + + def __init__(self, runtime_cfg: RuntimeConfig, + response_queue: mp.Queue) -> None: + """Initialize the ExecutorManager. + + Args: + runtime_cfg (RuntimeConfig): Execution runtime configuration. + response_queue (mp.Queue): Process-safe queue for passing request + responses to main process. + """ + logger.info("Initializing Executor.") + # Runtime related properties. + self.runtime_config: RuntimeConfig = runtime_cfg + self.executor = trtllm.Executor( + self.runtime_config.engine_dir, + trtllm.ModelType.DECODER_ONLY, + executor_config=self.runtime_config.get_config()) + + # Runtime tracking and multiprocessing. + self.responses = response_queue + self._shutdown = Event() + self._resp_daemon_finished = Event() + + self.response_thread = Thread(target=self.response_daemon) + self.response_thread.start() + + def enqueue(self, *requests: trtllm.Request) -> Generator[int]: + """Generate the next request identifier. + + Yields: + Generator[int]: The request identifier of the last queued request. + """ + for request in requests: + req_id = self.executor.enqueue_request(request) + yield req_id, len(request.input_token_ids) + + def stop(self) -> None: + """Stop a running manager.""" + + logger.info("Stopping response parsing.") + self._shutdown.set() + self.response_thread.join() + logger.info("Parsing stopped.") + + def shutdown(self) -> None: + """Shutdown daemon components.""" + + if self.executor is not None: + logger.info("Shutting down ExecutorServer.") + self.executor.shutdown() + + def response_daemon(self) -> None: + """Daemon method for retrieving messages from the Executor.""" + + logger.info("Starting response daemon...") + + def _process_response() -> None: + responses = self.executor.await_responses(timeout=timedelta( + milliseconds=1)) + now = monotonic_ns() + for response in responses: + # logger.info("Pushing response to queue") + self.responses.put( + ResponseRecord( + timestamp=now, + request_id=response.request_id, + has_error=response.has_error(), + is_final=response.result.is_final, + output_tokens=response.result.output_token_ids[0])) + + while not self._shutdown.is_set(): + _process_response() + + logger.info("Collecting last responses before shutdown.") + # Reap the last messages before shutting down + _process_response() + self._resp_daemon_finished.set() + logger.info("Completed request parsing.") + + +class ThroughputBenchmark: + """Throughput benchmark utility class.""" + + def __init__( + self, + dataset: List[trtllm.Request], + request_rate: int, + runtime_cfg: RuntimeConfig, + request_queue: mp.Queue, + response_queue: mp.Queue, + ) -> None: + """Initialize the throughput benchmark. + + Args: + dataset (List[trtllm.Request]): A dataset of TRT-LLM requests to + benchmark against. + request_rate (int): Rate to deliver input requests to the backend. + runtime_cfg (RuntimeConfig): Runtime configuration. + request_queue (mp.Queue): Process-safe queue of request identifiers + response_queue (mp.Queue): Process-safe queue for passing request + responses to main process. + """ + logger.info(f"Initializing Throughput Benchmark. [rate=%d req/s]") + # Dataset and input properties. + self.requests = dataset + self.delay_func = lambda x: sleep( + x) if request_rate > 0 else lambda x: None + self.request_delay = 1.0 / request_rate + + # Runtime configuration for Executor + self.runtime_config = deepcopy(runtime_cfg) + self.executor = None + + # Request and response reporting structures + self.new_request_queue = request_queue + self.response_queue = response_queue + + # Benchmark stats and time tracking. + self.start_time = None + self.end_time = None + self.submitted_requests = 0 + self.statistics = StatsKeeper() + + # Multiprocessing for handling request load generation + # and response parsing. + self.stop = mp.Event() + self.parsing_complete = mp.Event() + self.request_thread: Thread = Thread(target=self.enqueue_process) + self.stats_process: Thread = Thread(target=self.collect_statistics) + + def enqueue_process(self) -> None: + """Method for starting enqueueing requests.""" + logger.info("Request serving started.") + + request_generator = self.executor.enqueue(*self.requests) + # Iterate the generator until we run out of requests. + # Note the walrus operator. + while ((request := next(request_generator, False)) + and not self.stop.is_set()): + self.submitted_requests += 1 + timestamp = monotonic_ns() + self.new_request_queue.put((timestamp, request[0], request[1])) + self.delay_func(self.request_delay) + logger.info("Request serving stopped.") + + def start_benchmark(self) -> None: + """Start the benchmark.""" + # Start the ExecutorManager for running the backend. + self.executor = ExecutorManager(self.runtime_config, + self.response_queue) + logger.info("Executor started.") + # Note the time we started the thread. + self.start_time = monotonic_ns() + self.request_thread.start() + # Start the statistics thread. + self.stats_process.start() + logger.info("Benchmark started.") + + def stop_benchmark(self) -> None: + """Stop the benchmark and clean up backend and threads.""" + logger.info("Stop received.") + self.stop.set() + self.executor.stop() + self.request_thread.join() + logger.info("Request generator successfully joined.") + self.stats_process.join() + logger.info("Statistics process successfully joined.") + + def shutdown(self) -> None: + """Shutdown the backend.""" + logger.info("Benchmark Shutdown called!") + if self.executor is not None: + self.executor.shutdown() + logger.info("Executor shutdown.") + + def wait(self) -> bool: + """Wait (blocking) on the benchmark. + + Returns: + bool: Return whether the event is set. + """ + return not self.parsing_complete.wait() + + def collect_statistics(self) -> None: + """Collect statistics (daemon method).""" + logger.info("Starting statistics collection.") + + def _process_requests() -> None: + while not self.new_request_queue.empty(): + new_request: Tuple[float, + int] = self.new_request_queue.get_nowait() + self.statistics.register_request(new_request[1], new_request[0], + new_request[2]) + + while not self.response_queue.empty(): + response: ResponseRecord = self.response_queue.get_nowait() + self.statistics.register_response(response) + + logger.info("Collecting live stats...") + # TODO: Revisit this conditional, if the request rate is slow enough this + # will probably prematurely trip. We will likely need a conditional that + # captures a new event for submission being complete, with the stop event + # overriding it if detected. + while not self.stop.is_set( + ) and self.statistics.num_complete < self.submitted_requests: + _process_requests() + + logger.info("Collecting last stats...") + _process_requests() + self.end_time = monotonic_ns() + self.parsing_complete.set() + logger.info("Ending statistics collection.") + + def report_statistics(self) -> None: + """Report internal statistics about benchmark.""" + + config_path = self.runtime_config.engine_dir / "config.json" + with open(config_path, "r") as config: + engine_config = json.load(config) + + stats = self.statistics.generate_statistics_summary() + rt_cfg = self.runtime_config + build_cfg = engine_config["build_config"] + pretrain_cfg = engine_config["pretrained_config"] + total_latency_s = stats.total_latency_ns / 1.0e9 + + logger.info( + "\n===========================================================\n" + "= ENGINE DETAILS\n" + "===========================================================\n" + f"Model:\t\t\t{rt_cfg.model}\n" + f"Engine Directory:\t{rt_cfg.engine_dir}\n" + f"TensorRT-LLM Version:\t{rt_cfg.sw_version}\n" + f"Dtype:\t\t\t{pretrain_cfg['dtype']}\n" + f"KV Cache Dtype:\t\t{pretrain_cfg['quantization']['kv_cache_quant_algo']}\n" + f"Quantization:\t\t{pretrain_cfg['quantization']['quant_algo']}\n" + f"Max Input Length:\t{build_cfg['max_input_len']}\n" + f"Max Sequence Length:\t{build_cfg['max_seq_len']}\n" + f"\n" + "===========================================================\n" + "= WORLD + RUNTIME INFORMATION \n" + "===========================================================\n" + f"TP Size:\t\t{rt_cfg.world_config.tp_size}\n" + f"PP Size:\t\t{rt_cfg.world_config.pp_size}\n" + f"Max Runtime Batch Size:\t{rt_cfg.settings_config.max_batch_size}\n" + f"Max Runtime Tokens:\t{rt_cfg.settings_config.max_num_tokens}\n" + f"Scheduling Policy:\t{rt_cfg.settings_config.scheduler_policy.values[1]}\n" + f"KV Memory Percentage:\t{rt_cfg.settings_config.kv_cache_percent * 100.0}%\n" + f"Issue Rate (req/sec):\t{stats.issue_rate_ns * 1e9}" + f"\n" + "===========================================================\n" + "= STATISTICS\n" + "===========================================================\n" + f"Number of requests:\t\t{stats.num_requests}\n" + f"Average Input Length (tokens):\t{stats.average_input_length}\n" + f"Average Output Length (tokens):\t{stats.average_output_length}\n" + f"Token Throughput (tokens/sec):\t{stats.total_output_tokens / total_latency_s}\n" + f"Request Throughput (req/sec):\t{stats.num_requests / total_latency_s}\n" + f"Total Latency (seconds):\t{total_latency_s}\n" + "===========================================================\n") diff --git a/tensorrt_llm/bench/run/utils.py b/tensorrt_llm/bench/run/utils.py new file mode 100644 index 000000000..6ed640b1b --- /dev/null +++ b/tensorrt_llm/bench/run/utils.py @@ -0,0 +1,133 @@ +from __future__ import annotations + +import json +from collections import defaultdict +from pathlib import Path +from typing import Dict, Tuple, Union + +import tensorrt_llm.bindings.executor as trtllm +from tensorrt_llm.bench.run.dataclasses import (BenchmarkStatistics, + PercentileStats, RequestStats, + ResponseRecord) +from tensorrt_llm.bindings import InferenceRequest + + +def get_executor_request(request: InferenceRequest, + pad_id: int, + eos_id: int, + streaming: bool = False) -> trtllm.Request: + return trtllm.Request( + input_token_ids=request.logits, + max_new_tokens=request.output_tokens, + stop_words=[], + bad_words=[], + streaming=streaming, + output_config=trtllm.OutputConfig(exclude_input_from_output=True), + pad_id=pad_id, + end_id=eos_id, + ) + + +def get_settings_from_engine( + engine_path: Path +) -> Tuple[Dict[str, Union[str, int]], Dict[str, Union[str, int]]]: + config_path = engine_path / "config.json" + runtime_config = {} + + with open(config_path, "r") as config_json: + config = json.load(config_json) + + engine_world_map = config["pretrained_config"]["mapping"] + engine_build_cfg = config["build_config"] + engine_parallel_map = engine_build_cfg["auto_parallel_config"] + + world_config = { + "pp_size": engine_world_map["pp_size"], + "tp_size": engine_world_map["tp_size"], + "world_size": engine_world_map["world_size"], + "gpus_per_node": engine_parallel_map["gpus_per_node"], + } + + executor_settings = { + "max_batch_size": engine_build_cfg["max_batch_size"], + "max_num_tokens": engine_build_cfg["max_num_tokens"], + } + + runtime_config.update({ + "sw_version": config["version"], + "engine_dir": str(engine_path.absolute()), + "settings_config": executor_settings, + "world_config": world_config, + }) + + return runtime_config, engine_build_cfg + + +class StatsKeeper: + + def __init__(self) -> None: + self.requests: RequestStats = {} + self.num_complete: int = 0 + + self._unseen_cache = defaultdict(list) + + def register_request( + self, + request_id: int, + timestamp: float, + num_tokens: int, + ) -> None: + request = RequestStats(request_id=request_id, input_tokens=num_tokens) + request.register_event(False, False, timestamp, 0) + self.requests[request_id] = request + + def register_response(self, response: ResponseRecord) -> None: + request_id = response.request_id + + if request_id not in self.requests: + self._unseen_cache[request_id].append(response) + else: + self.requests[request_id].register_event( + is_error=response.has_error, + is_response=True, + timestamp=response.timestamp, + num_tokens=len(response.output_tokens)) + + if response.is_final: + self.num_complete += 1 + + def generate_statistics_summary(self) -> None: + total_output_tokens: int = 0 + total_input_tokens: int = 0 + num_requests = len(self.requests) + total_request_latency: float = 0.0 + start_time = float("inf") + end_time = -1 + + request_latencies = [] + last_queue_time = 0.0 + queue_time_total = 0.0 + + for entry in self.requests.values(): + entry.time_log.sort() + + queue_time_total += entry.time_log[0] - last_queue_time + last_queue_time = entry.time_log[0] + + request_latencies.append(entry.request_latency) + total_output_tokens += entry.num_tokens + total_input_tokens += entry.input_tokens + total_request_latency += entry.request_latency + start_time = min(start_time, entry.time_log[0]) + end_time = max(end_time, entry.time_log[-1]) + + stats = BenchmarkStatistics( + num_requests=num_requests, + total_latency_ns=end_time - start_time, + total_output_tokens=total_output_tokens, + total_input_tokens=total_input_tokens, + request_percentiles=PercentileStats.from_iterable( + request_latencies), + issue_rate_ns=queue_time_total / num_requests) + + return stats diff --git a/benchmarks/suite/tensorrt_llm_bench/utils/__init__.py b/tensorrt_llm/bench/utils/__init__.py similarity index 96% rename from benchmarks/suite/tensorrt_llm_bench/utils/__init__.py rename to tensorrt_llm/bench/utils/__init__.py index b327f7a91..e0f49a822 100644 --- a/benchmarks/suite/tensorrt_llm_bench/utils/__init__.py +++ b/tensorrt_llm/bench/utils/__init__.py @@ -1,6 +1,6 @@ import functools import os -import subprocess +import subprocess # nosec B404 from pathlib import Path from typing import Any, Callable, List, Literal @@ -10,9 +10,9 @@ "tiiuae/falcon-180B", "meta-llama/Llama-2-7b-hf", "meta-llama/Llama-2-13b-hf", "meta-llama/Llama-2-70b-hf", "EleutherAI/gpt-j-6b", ] -VALID_COMPUTE_DTYPES = Literal["float16", "bfloat16"] +VALID_COMPUTE_DTYPES = Literal["auto", "float16", "bfloat16"] VALID_CACHE_DTYPES = Literal["float16", "float8", "int8"] -VALID_QUANT_ALGOS = Literal["None", f"{QuantAlgo.W8A16}", f"{QuantAlgo.W4A16}", +VALID_QUANT_ALGOS = Literal[f"{QuantAlgo.W8A16}", f"{QuantAlgo.W4A16}", f"{QuantAlgo.W4A16_AWQ}", f"{QuantAlgo.W4A8_AWQ}", f"{QuantAlgo.W4A16_GPTQ}", f"{QuantAlgo.FP8}", f"{QuantAlgo.INT8}"] diff --git a/tensorrt_llm/bench/utils/data.py b/tensorrt_llm/bench/utils/data.py new file mode 100644 index 000000000..4f6380325 --- /dev/null +++ b/tensorrt_llm/bench/utils/data.py @@ -0,0 +1,137 @@ +import json +import sys +from functools import partial +from pathlib import Path +from select import select +from typing import List, TextIO, Tuple + +from transformers import AutoTokenizer, PreTrainedTokenizer + +from tensorrt_llm.bench.dataclasses import DatasetMetadata, InferenceRequest + + +def generate_dataset_from_stream(dataset_path: Path, + model: str, + num_requests: int = 0): + # Check for data on stdin. + data_on_stdin: bool = bool(len(select([ + sys.stdin, + ], [], [], 0.0)[0])) + + # Cannot set the data file path and pipe in from stdin. Choose one. + if dataset_path is not None and data_on_stdin: + raise ValueError( + "Cannot provide a dataset on both stdin and by --dataset option. " + "Please pick one.") + # If we are receiving data from a path or stdin, parse and gather metadata. + stream = sys.stdin if data_on_stdin else open(dataset_path, "r") + tokenizer = initialize_tokenizer(model) + # Parse the dataset from stdin and return it plus its metadata. + metadata, requests = \ + create_dataset_from_stream( + tokenizer, + stream=stream, + num_requests=num_requests + ) + + return metadata, requests + + +def initialize_tokenizer(model_name: str) -> PreTrainedTokenizer: + """Initialize a tokenizer. + + Args: + model_name (str): The name of the HuggingFace model to pull a + tokenizer from. + + Returns: + PreTrainedTokenizer: An initialized HuggingFace tokenizer. + """ + # Initialize the tokenizer specific to the model that we are planning + # to benchmark. + tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left") + if tokenizer.pad_token_id is None: + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + + return tokenizer + + +def create_dataset_from_stream( + tokenizer: PreTrainedTokenizer, + max_input_length: int = 0, + max_output_length: int = 0, + stream: TextIO = sys.stdin, + num_requests: int = 0, +) -> Tuple[DatasetMetadata, List[InferenceRequest]]: + """Generate metadata and a list of requests to drive benchmarking. + + Args: + tokenizer (PreTrainedTokenizer): HuggingFace tokenizer. + max_input_length (int): Maximum input length to cap prompts to. + + Returns: + DatasetMetadata: Dataclass of dataset statistics. + List[InferenceRequest]: A list of inference requests for benchmarking. + """ + # Initialize dataset list, and metadata tracking variables. + dataset = [] + max_isl = 0 + max_osl = 0 + max_sequence = 0 + max_requests = num_requests if num_requests > 0 else float("inf") + + # If we're limiting the input length to a certain size, then set up + # a partial to truncate the data down to size. Otherwise, just use the + # unmodified tokenizer callable. + tokenize = (partial( + tokenizer, + padding="max_length", + max_length=max_input_length, + truncation=True, + ) if max_input_length > 0 else tokenizer) + + # If we need to limit the output length, fill in a partial callable + # for max, otherwise a lambda that just returns x with no bounds. + output_limiter = (partial(max, max_output_length) + if max_output_length > 0 else lambda x: x) + + # For each line in the standard input, parse out the JSON string we expect + # to see. + # Note the := walrus -- we're assigning and checking the condition. + while (line := stream.readline()) and len(dataset) < max_requests: + # We expect the data to come in as a JSON string. + # For example: + # {"prompt": "Generate an infinite response to the following: + # There once was a man who.", "output_tokens": 1000} + # Each line should be a complete JSON dictionary with no indentation + # or newline characters. + data = json.loads(line) + logits = data.get("logits", None) + prompt = data.get("prompt", None) + task_id = data["task_id"] + osl = data["output_tokens"] + # If the request comes in with logits, just use the provided. + # Otherwise we need to tokenize it. + logits = tokenize(prompt)["input_ids"] if logits is None else logits + + request = InferenceRequest( + task_id=task_id, + prompt=prompt, + output_tokens=output_limiter(osl), + logits=logits, + ) + max_isl = max(max_isl, len(logits)) + max_osl = max(max_osl, osl) + max_sequence = max(max_sequence, len(logits) + osl) + dataset.append(request) + + # Fill in basic dataset metrics here + # TODO: Maybe fill this out to be more complete? + metadata = DatasetMetadata( + max_isl=max_isl, + max_osl=max_osl, + max_sequence_length=max_sequence, + num_requests=len(dataset), + ) + + return metadata, dataset diff --git a/tensorrt_llm/bench/utils/tokenize.py b/tensorrt_llm/bench/utils/tokenize.py new file mode 100644 index 000000000..44f04df56 --- /dev/null +++ b/tensorrt_llm/bench/utils/tokenize.py @@ -0,0 +1,105 @@ +import json +import sys +from functools import partial +from typing import List, TextIO, Tuple + +from transformers import AutoTokenizer, PreTrainedTokenizer + +from tensorrt_llm.bench.dataclasses import DatasetMetadata, InferenceRequest + + +def initialize_tokenizer(model_name: str) -> PreTrainedTokenizer: + """Initialize a tokenizer. + + Args: + model_name (str): The name of the HuggingFace model to pull a + tokenizer from. + + Returns: + PreTrainedTokenizer: An initialized HuggingFace tokenizer. + """ + # Initialize the tokenizer specific to the model that we are planning + # to benchmark. + tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left") + if tokenizer.pad_token_id is None: + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + + return tokenizer + + +def create_dataset_from_stream( + tokenizer: PreTrainedTokenizer, + max_input_length: int = 0, + max_output_length: int = 0, + stream: TextIO = sys.stdin, +) -> Tuple[DatasetMetadata, List[InferenceRequest]]: + """Generate metadata and a list of requests to drive benchmarking. + + Args: + tokenizer (PreTrainedTokenizer): HuggingFace tokenizer. + max_input_length (int): Maximum input length to cap prompts to. + + Returns: + DatasetMetadata: Dataclass of dataset statistics. + List[InferenceRequest]: A list of inference requests for benchmarking. + """ + # Initialize dataset list, and metadata tracking variables. + dataset = [] + max_isl = 0 + max_osl = 0 + max_sequence = 0 + + # If we're limiting the input length to a certain size, then set up + # a partial to truncate the data down to size. Otherwise, just use the + # unmodified tokenizer callable. + tokenize = (partial( + tokenizer, + padding="max_length", + max_length=max_input_length, + truncation=True, + ) if max_input_length > 0 else tokenizer) + + # If we need to limit the output length, fill in a partial callable + # for max, otherwise a lambda that just returns x with no bounds. + output_limiter = (partial(max, max_output_length) + if max_output_length > 0 else lambda x: x) + + # For each line in the standard input, parse out the JSON string we expect + # to see. + # Note the := walrus -- we're assigning and checking the condition. + while line := stream.readline(): + # We expect the data to come in as a JSON string. + # For example: + # {"prompt": "Generate an infinite response to the following: There once was a man who.", "output_tokens": 1000} + # Each line should be a complete JSON dictionary with no indentation + # or newline characters. + data = json.loads(line) + logits = data.get("logits", None) + prompt = data.get("prompt", None) + task_id = data["task_id"] + osl = data["output_tokens"] + # If the request comes in with logits, just use the provided. + # Otherwise we need to tokenize it. + logits = tokenize(prompt)["input_ids"] if logits is None else logits + + request = InferenceRequest( + task_id=task_id, + prompt=prompt, + output_tokens=output_limiter(osl), + logits=logits, + ) + max_isl = max(max_isl, len(logits)) + max_osl = max(max_osl, osl) + max_sequence = max(max_sequence, len(logits) + osl) + dataset.append(request) + + # Fill in basic dataset metrics here + # TODO: Maybe fill this out to be more complete? + metadata = DatasetMetadata( + max_isl=max_isl, + max_osl=max_osl, + max_sequence_length=max_sequence, + num_requests=len(dataset), + ) + + return metadata, dataset diff --git a/tensorrt_llm/commands/bench.py b/tensorrt_llm/commands/bench.py new file mode 100644 index 000000000..9c48dac3f --- /dev/null +++ b/tensorrt_llm/commands/bench.py @@ -0,0 +1,42 @@ +from pathlib import Path + +import click + +from tensorrt_llm.bench.build.build import build_command +from tensorrt_llm.bench.dataclasses import BenchmarkEnvironment +from tensorrt_llm.bench.run.run import run_command + + +@click.group(name="trtllm-bench", context_settings={'show_default': True}) +@click.option( + "--model", + "-m", + required=True, + type=str, + help="The Huggingface name of the model to benchmark.", +) +@click.option( + "--workspace", + "-w", + required=False, + type=click.Path(writable=True, readable=True), + default="/tmp", # nosec B108 + help="The directory to store benchmarking intermediate files.", +) +@click.pass_context +def main( + ctx, + model: str, + workspace: Path, +) -> None: + ctx.obj = BenchmarkEnvironment(model=model, workspace=workspace) + + # Create the workspace where we plan to store intermediate files. + ctx.obj.workspace.mkdir(parents=True, exist_ok=True) + + +main.add_command(build_command) +main.add_command(run_command) + +if __name__ == "__main__": + main() diff --git a/tensorrt_llm/commands/build.py b/tensorrt_llm/commands/build.py index d78a93be0..8cd5c7264 100644 --- a/tensorrt_llm/commands/build.py +++ b/tensorrt_llm/commands/build.py @@ -33,6 +33,7 @@ from tensorrt_llm.models import MODEL_MAP, PretrainedConfig from tensorrt_llm.models.modeling_utils import SpeculativeDecodingMode from tensorrt_llm.plugin import PluginConfig, add_plugin_argument +from tensorrt_llm.quantization.mode import QuantAlgo def parse_arguments(): @@ -131,15 +132,6 @@ def parse_arguments(): help= 'Deprecated. Set this option to enable is equvilient to `--kv_cache_type paged` for transformer based models.' ) - parser.add_argument( - '--use_fused_mlp', - default=False, - action='store_true', - help= - 'Enable horizontal fusion in GatedMLP, reduces layer input traffic and potentially improves performance. ' - 'For FP8 PTQ, the downside is slight reduction of accuracy because one of the quantization scaling factors is discarded. ' - '(An example for reference only: 0.45734 vs 0.45755 for LLaMA-v2 7B using `modelopt/examples/hf/instruct_eval/mmlu.py`).' - ) parser.add_argument( '--gather_all_token_logits', action='store_true', @@ -443,7 +435,7 @@ def main(): kwargs = { 'logits_dtype': args.logits_dtype, - 'use_fused_mlp': args.use_fused_mlp, + 'use_fused_mlp': plugin_config.use_fused_mlp, 'cp_size': args.cp_size, 'tp_size': args.tp_size, 'pp_size': args.pp_size, @@ -466,6 +458,11 @@ def main(): model_config = PretrainedConfig.from_json_file(config_path) + # avoid ValueError if not supported quantization is chosen with use_fused_mlp + quant_algo = model_config.quantization.quant_algo + if quant_algo and quant_algo != QuantAlgo.FP8: + kwargs['use_fused_mlp'] = False + if args.build_config is None: if args.multiple_profiles == "enable" and args.opt_num_tokens is not None: raise RuntimeError( diff --git a/tensorrt_llm/executor.py b/tensorrt_llm/executor.py index c562d7943..3ba8f6b2d 100644 --- a/tensorrt_llm/executor.py +++ b/tensorrt_llm/executor.py @@ -1,6 +1,7 @@ import asyncio import atexit import datetime +import json import math import secrets import threading @@ -369,7 +370,7 @@ async def aget_stats(self): @staticmethod def create( - engine_dir: Path, + engine_object_or_path: Union[Path, "Engine"], executor_config: tllm.ExecutorConfig = tllm.ExecutorConfig(1), model_world_size: int = 1, world_size: int = 0, @@ -387,7 +388,7 @@ def create( f"on {world_size} ranks.") worker_kwargs = { - "engine_dir": engine_dir, + "engine_object_or_path": engine_object_or_path, "executor_config": executor_config, } @@ -412,7 +413,7 @@ class WorkerExit(GeneratorExit): def __init__( self, - engine_dir: Path, + engine_object_or_path: Union[Path, "Engine"], executor_config: tllm.ExecutorConfig = tllm.ExecutorConfig(1), ) -> None: super().__init__() @@ -422,10 +423,17 @@ def __init__( self._pending: set = set() self.result_queue = None self.rank = mpi_rank() - - self.engine = tllm.Executor(engine_dir, - tllm.ModelType.DECODER_ONLY, - executor_config=executor_config) + from .builder import Engine + if isinstance(engine_object_or_path, Engine): + self.engine = tllm.Executor( + engine_object_or_path.engine, + json.dumps(engine_object_or_path.config.to_dict()), + tllm.ModelType.DECODER_ONLY, + executor_config=executor_config) + else: + self.engine = tllm.Executor(engine_object_or_path, + tllm.ModelType.DECODER_ONLY, + executor_config=executor_config) self.awaiter_stop_event = threading.Event() self.awaiter_thread = threading.Thread(target=self.awaiter_loop, daemon=True) @@ -678,7 +686,7 @@ def __init__( @print_traceback_on_error @staticmethod def workers_main( - engine_dir: Path, + engine_object_or_path: Union[Path, "Engine"], request_queue_addr: Tuple[str, int, bytes], request_id_queue_addr: Tuple[str, int, bytes], result_queue_addr: Tuple[str, int, bytes], @@ -699,7 +707,8 @@ def workers_main( # TODO[chunweiy]: fix the non-rank0 process failure init_ok = True try: - executor = ExecutorBindingsWorker(engine_dir, executor_config) + executor = ExecutorBindingsWorker(engine_object_or_path, + executor_config) except Exception as e: init_ok = False raise e diff --git a/tensorrt_llm/functional.py b/tensorrt_llm/functional.py index 374f05f18..741ed8eb9 100644 --- a/tensorrt_llm/functional.py +++ b/tensorrt_llm/functional.py @@ -4913,8 +4913,7 @@ def gpt_attention( tp_rank = trt.PluginField("tp_rank", np.array(tp_rank, dtype=np.int32), trt.PluginFieldType.INT32) kv_cache_quant_mode_field = trt.PluginField( - "kv_cache_quant_mode", - np.array(np.int8(kv_cache_quant_mode), dtype=np.int32), + "kv_cache_quant_mode", np.array(kv_cache_quant_mode, dtype=np.int32), trt.PluginFieldType.INT32) paged_kv_cache = trt.PluginField( "paged_kv_cache", np.array(paged_kv_cache_flag, dtype=np.int32), @@ -5519,7 +5518,6 @@ def lora_plugin( transa: bool = False, transb: bool = False, host_context_lengths: Tensor = None, # for pad-free input mode - max_num_tokens: int = 0, max_low_rank: int = 0, lora_ranks: List[Tensor] = None, lora_weights_pointers: List[Tensor] = None, @@ -5550,9 +5548,6 @@ def lora_plugin( host_context_lengths: cpu Tensor = None A host tensor that contains the lengths of the different inputs, - max_num_tokens : int - Maximum number of tokens, used to determine the workspace size. - max_low_rank : int Maximum low_rank, used to determine the workspace size. @@ -5600,9 +5595,6 @@ def lora_plugin( "remove_input_padding", np.array(np.int8(default_net().plugin_config.remove_input_padding), dtype=np.int8), trt.PluginFieldType.INT8) - max_num_tokens_field = trt.PluginField( - "max_num_tokens", np.array(max_num_tokens, dtype=np.int32), - trt.PluginFieldType.INT32) max_low_rank_field = trt.PluginField("max_low_rank", np.array(max_low_rank, dtype=np.int32), trt.PluginFieldType.INT32) @@ -5616,8 +5608,7 @@ def lora_plugin( pfc = trt.PluginFieldCollection([ in_hidden_size_field, transa, transb, num_lora_modules_field, pf_type, - remove_input_padding, max_num_tokens_field, max_low_rank_field, - weight_index_field + remove_input_padding, max_low_rank_field, weight_index_field ] + out_hidden_size_field_list) lora_plug = plg_creator.create_plugin("lora", pfc) diff --git a/tensorrt_llm/hlapi/llm.py b/tensorrt_llm/hlapi/llm.py index 2b5e8288d..8725bf382 100644 --- a/tensorrt_llm/hlapi/llm.py +++ b/tensorrt_llm/hlapi/llm.py @@ -261,7 +261,8 @@ def _build_model(self): if self.args.decoding_config is not None: executor_config.decoding_config = self.args.decoding_config if self.args.logits_post_processor_map: - executor_config.logits_post_processor_map = self.args.logits_post_processor_map + executor_config.logits_post_processor_config = tllm.LogitsPostProcessorConfig( + processor_map=self.args.logits_post_processor_map) executor_config.normalize_log_probs = self.args.normalize_log_probs executor_config.enable_chunked_context = self.args.enable_chunked_context executor_config.max_beam_width = self.args.build_config.max_beam_width diff --git a/tensorrt_llm/hlapi/llm_utils.py b/tensorrt_llm/hlapi/llm_utils.py index 9383ccd01..cb6c6da2e 100644 --- a/tensorrt_llm/hlapi/llm_utils.py +++ b/tensorrt_llm/hlapi/llm_utils.py @@ -46,9 +46,8 @@ from ..builder import BuildConfig, Engine, EngineConfig, build from ..logger import logger from ..mapping import Mapping -from ..models import MODEL_MAP -from ..models.modeling_utils import (PretrainedConfig, QuantAlgo, QuantConfig, - TopModelMixin) +from ..models.automodel import MODEL_MAP, AutoConfig, AutoModelForCausalLM +from ..models.modeling_utils import PretrainedConfig, QuantAlgo, QuantConfig from ..module import Module from .build_cache import (BuildCache, BuildCacheConfig, CachedStage, get_build_cache_config_from_env) @@ -283,6 +282,11 @@ def __post_init__(self): # The underlying implementation might disable it if it is not supported. self.enable_chunked_context: bool = False + # TODO[chunweiy]: Enable this option in the future + # Currently we want HLAPI to be consistent with the lower APIs in the model building, thus disable this to avoid + # magics. + self.perform_config_arbitration = False + if self.skip_tokenizer_init: self.tokenizer = None else: @@ -386,6 +390,14 @@ def setup(self): self.build_config = self.build_config or BuildConfig() + if self.perform_config_arbitration: + self._perform_config_arbitration() + + def _perform_config_arbitration(self): + ''' + Arbitrate the configurations for the model building. The configs between different functional or performance + features might be confilcted, and this method will arbitrate the conflicts and raise errors if necessary. + ''' self._config_arbitrator = _ConfigArbitrator() if self.build_config_mutable: if not self.build_config.max_num_tokens: @@ -415,6 +427,8 @@ def setup(self): kv_cache_config=self.kv_cache_config, build_config=self.build_config) + self._config_arbitrator = None + def _check_model_or_model_dir(self): if not self.model: raise ValueError("model should be provided.") @@ -598,7 +612,8 @@ def __setstate__(self, state): def __getstate__(self): state = self.__dict__.copy() - del state['_config_arbitrator'] + if '_config_arbitrator' in state: + del state['_config_arbitrator'] return state @@ -1012,19 +1027,7 @@ def _download_hf_model(self): def _load_model_from_hf(self): ''' Load a TRT-LLM model from a HF model. ''' assert self._model_dir is not None - - import transformers - hf_config = transformers.AutoConfig.from_pretrained( - self._model_dir, trust_remote_code=True) - architecture = hf_config.architectures[0] - - if architecture not in MODEL_MAP: - raise KeyError(f"Unsupported model architecture: {architecture}") - model_cls = MODEL_MAP[architecture] - if TopModelMixin.__name__ in model_cls.from_hugging_face.__qualname__: - raise NotImplementedError( - f"Unsupported model architecture in HLAPI: {architecture}") - + model_cls = AutoModelForCausalLM.get_trtllm_model_class(self._model_dir) if self.llm_args.quant_config.requires_calibration: assert self.workspace is not None checkpoint_dir = f"{self.workspace}/quantized-checkpoint" @@ -1061,6 +1064,7 @@ def _load_model_from_ckpt(self): os.path.join(self._model_dir, 'config.json')) self.pretrained_config.mapping = self.mapping + #TODO: TRTLLM-1091, change the architecture in the checkpoint to TRT-LLM one, not HF one. architecture = self.pretrained_config.architecture assert architecture in MODEL_MAP, \ f"Unsupported model architecture: {architecture}" @@ -1257,16 +1261,7 @@ def get_final_build_config(llm_args: LlmArgs, # The build() doesn't need the real model instance to get a updated BuildConig. What is really needed is the # dtype. That's why the model will be downloaded from HF if necessary to get the accurate dtype. - import transformers - hf_config = transformers.AutoConfig.from_pretrained( - model_dir, trust_remote_code=True) - architecture = hf_config.architectures[0] - - if architecture not in MODEL_MAP: - raise KeyError(f"Unsupported model architecture: {architecture}") - model_cls = MODEL_MAP[architecture] - config_cls = model_cls.config_class - pretrained_config = config_cls.from_hugging_face( + pretrained_config = AutoConfig.from_hugging_face( model_dir, mapping=Mapping(world_size=llm_args.parallel_config.world_size, tp_size=llm_args.parallel_config.tp_size, diff --git a/tensorrt_llm/hlapi/utils.py b/tensorrt_llm/hlapi/utils.py index de0b23dc1..6aaf28119 100644 --- a/tensorrt_llm/hlapi/utils.py +++ b/tensorrt_llm/hlapi/utils.py @@ -1,6 +1,5 @@ import hashlib import os -import signal import sys import tempfile import traceback @@ -326,15 +325,6 @@ def register(self, obj: Any): exception_handler = ExceptionHandler() sys.excepthook = exception_handler - -def sigint_handler(signal, frame): - sys.stderr.write("\nSIGINT received, quit LLM!\n") - sys.exit(1) - - -# Register the signal handler to handle SIGINT -# This helps to deal with user's Ctrl+C -signal.signal(signal.SIGINT, sigint_handler) # Use the system temporary directory to share the cache temp_dir = tempfile.gettempdir() diff --git a/tensorrt_llm/layers/attention.py b/tensorrt_llm/layers/attention.py index a093d006f..6256a4ef7 100644 --- a/tensorrt_llm/layers/attention.py +++ b/tensorrt_llm/layers/attention.py @@ -728,7 +728,6 @@ def forward(self, ], host_request_types=q_lora_params.host_request_types, host_context_lengths=q_lora_params.host_context_lengths, - max_num_tokens=q_lora_params.max_num_tokens, max_encoder_context_length=q_lora_params. max_encoder_context_length, host_encoder_input_lengths=q_lora_params. @@ -1470,8 +1469,7 @@ def forward(self, v_lora_params.lora_weights_pointers[0], ], host_request_types=q_lora_params.host_request_types, - host_context_lengths=q_lora_params.host_context_lengths, - max_num_tokens=q_lora_params.max_num_tokens) + host_context_lengths=q_lora_params.host_context_lengths) q_lora, k_lora, v_lora = self.qkv_lora(hidden_states, qkv_lora_params) diff --git a/tensorrt_llm/layers/embedding.py b/tensorrt_llm/layers/embedding.py index 8ade20b07..869a05a43 100644 --- a/tensorrt_llm/layers/embedding.py +++ b/tensorrt_llm/layers/embedding.py @@ -44,8 +44,7 @@ def __init__(self, tp_size: int = 1, tp_group: Optional[list] = None, sharding_dim: int = 0, - tp_rank: Optional[int] = None, - share_embedding_table: bool = False): + tp_rank: Optional[int] = None): super().__init__() # num_embeddings records the total vocab size no matter using TP or not self.num_embeddings = num_embeddings @@ -56,7 +55,6 @@ def __init__(self, self.tp_rank = tp_rank self.dtype = dtype self.tp_dim = sharding_dim - self.share_embedding_table = share_embedding_table if sharding_dim == 1: self.weight = Parameter(shape=(self.num_embeddings, @@ -91,11 +89,12 @@ def weight_loader(self, mapping: Mapping, param: Parameter, shard_size) param.value = loaded_weight - def postprocess(self, tllm_key, weights): + def postprocess(self, tllm_key, weights, **kwargs): + config = kwargs.get("config", None) if weights is None: return {} weights = weights.to(str_dtype_to_torch(self.dtype)) - if self.share_embedding_table: + if config.share_embedding_table: return {} else: weights = weights.clone() @@ -119,10 +118,9 @@ def __init__(self, tp_size=1, tp_group=None, sharding_dim=0, - tp_rank=0, - share_embedding_table=False): + tp_rank=0): super().__init__(num_embeddings, embedding_dim, dtype, tp_size, - tp_group, sharding_dim, tp_rank, share_embedding_table) + tp_group, sharding_dim, tp_rank) if vocab_size is None: vocab_size = num_embeddings self.vocab_size = vocab_size diff --git a/tensorrt_llm/layers/linear.py b/tensorrt_llm/layers/linear.py index 0735df1e8..8a150c3c4 100644 --- a/tensorrt_llm/layers/linear.py +++ b/tensorrt_llm/layers/linear.py @@ -342,18 +342,34 @@ def collect_and_bias(self, x, **kwargs): return x - def postprocess(self, - tllm_key, - weights, - using_head_as_leading_dim=False, - num_heads=-1): + def postprocess(self, tllm_key, weights, **kwargs): + using_head_as_leading_dim = kwargs.get("using_head_as_leading_dim", + False) + config = kwargs.get("config", None) if self.is_qkv: if isinstance(weights, list): + if config.remove_duplicated_kv_heads: + head_size = config.hidden_size // config.num_heads if config.head_size is None else config.head_size + k, v = weights[1:] + k = k.reshape([ + k.shape[0] // head_size // 2, 2, head_size, + self.in_features + ]) + v = v.reshape([ + v.shape[0] // head_size // 2, 2, head_size, + self.in_features + ]) + assert (k[:, 0] == k[:, 1]).all() + assert (v[:, 0] == v[:, 1]).all() + k = k[:, 0].reshape([-1, self.in_features]) + v = v[:, 0].reshape([-1, self.in_features]) + weights[1] = k + weights[2] = v weights = torch.cat(weights) if using_head_as_leading_dim: # Reorder [n_head, 3, head_dim, ...] into [3, n_head, head_dim, ...] - head_dim = self.out_features // (3 * num_heads) - w = weights.reshape(num_heads, 3, head_dim, -1) + head_dim = self.out_features // (3 * config.num_heads) + w = weights.reshape(config.num_heads, 3, head_dim, -1) w = w.transpose(0, 1) if w.shape[-1] > 1: weights = w.reshape(-1, self.in_features) # Weight diff --git a/tensorrt_llm/layers/lora.py b/tensorrt_llm/layers/lora.py index 6de43e42c..5ad9cac72 100644 --- a/tensorrt_llm/layers/lora.py +++ b/tensorrt_llm/layers/lora.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional +from typing import List from .._common import default_net from ..functional import Tensor, lora_plugin @@ -28,7 +28,6 @@ def __init__( lora_weights_pointers: List[Tensor] = None, host_request_types: Tensor = None, host_context_lengths: Tensor = None, - max_num_tokens: Optional[int] = None, max_encoder_context_length: Tensor = None, host_encoder_input_lengths: Tensor = None, weight_index: int = 0, @@ -38,7 +37,6 @@ def __init__( self.lora_weights_pointers = lora_weights_pointers self.host_request_types = host_request_types self.host_context_lengths = host_context_lengths - self.max_num_tokens = max_num_tokens self.max_encoder_context_length = max_encoder_context_length self.host_encoder_input_lengths = host_encoder_input_lengths self.weight_index = weight_index @@ -71,10 +69,6 @@ def forward(self, host_context_lengths=lora_runtime_params.host_context_lengths if not is_cross_attention else lora_runtime_params.host_encoder_input_lengths, - # For cross attention, max_encoder_context_length should be used instead of max_num_tokens - max_num_tokens=lora_runtime_params.max_num_tokens - if not is_cross_attention else - lora_runtime_params.max_encoder_context_length, max_low_rank=self.max_low_rank, lora_ranks=lora_runtime_params.lora_ranks, lora_weights_pointers=lora_runtime_params.lora_weights_pointers, @@ -93,7 +87,6 @@ def __init__( lora_ranks=None, # : List[dict[Tensor]] lora_weights_pointers=None, # : List[dict[Tensor]] host_context_lengths: Tensor = None, - max_num_tokens: Optional[int] = None, max_encoder_context_length: Tensor = None, # For cross attention host_request_types: Tensor = None, host_encoder_input_lengths: Tensor = None, # For cross attention @@ -104,7 +97,6 @@ def __init__( self.lora_weights_pointers = lora_weights_pointers self.host_context_lengths = host_context_lengths - self.max_num_tokens = max_num_tokens self.max_encoder_context_length = max_encoder_context_length self.host_request_types = host_request_types self.host_encoder_input_lengths = host_encoder_input_lengths @@ -115,7 +107,6 @@ def get_layer_params(self, layer_idx: int): lora_ranks=[self.lora_ranks[layer_idx]], lora_weights_pointers=[self.lora_weights_pointers[layer_idx]], host_context_lengths=self.host_context_lengths, - max_num_tokens=self.max_num_tokens, max_encoder_context_length=self.max_encoder_context_length, host_request_types=self.host_request_types, host_encoder_input_lengths=self.host_encoder_input_lengths, @@ -133,7 +124,6 @@ def get_runtime_params(self, layer_idx: int, lora_module: str): [f"{lora_module}_lora_weights_pointers"] ], host_context_lengths=self.host_context_lengths, - max_num_tokens=self.max_num_tokens, max_encoder_context_length=self.max_encoder_context_length, host_request_types=self.host_request_types, host_encoder_input_lengths=self.host_encoder_input_lengths, diff --git a/tensorrt_llm/layers/mlp.py b/tensorrt_llm/layers/mlp.py index 77066bd24..25056d757 100644 --- a/tensorrt_llm/layers/mlp.py +++ b/tensorrt_llm/layers/mlp.py @@ -46,8 +46,7 @@ def fc_gate_lora(hidden_states, lora, lora_layer_params): mlp_gate_lora_params.lora_weights_pointers[0] ], host_request_types=mlp_fc_lora_params.host_request_types, - host_context_lengths=mlp_fc_lora_params.host_context_lengths, - max_num_tokens=mlp_fc_lora_params.max_num_tokens) + host_context_lengths=mlp_fc_lora_params.host_context_lengths) mlp_fc_lora, mlp_gate_lora = lora(hidden_states, mlp_in_lora_params) mlp_in_result = concat([mlp_gate_lora, mlp_fc_lora], diff --git a/tensorrt_llm/layers/moe.py b/tensorrt_llm/layers/moe.py index 9fa96a12e..40a6b744a 100644 --- a/tensorrt_llm/layers/moe.py +++ b/tensorrt_llm/layers/moe.py @@ -337,7 +337,7 @@ def __init__(self, in_features: int, out_features: int, self.register_parameter('activation_scaling_factor', None) self.register_parameter('weights_scaling_factor', None) - def postprocess(self, tllm_key, weights): + def postprocess(self, tllm_key, weights, **kwargs): if tllm_key.endswith("weight"): if isinstance(weights, torch.Tensor): weights = [weights] @@ -667,7 +667,6 @@ def get_params(module): gate_lora_weights_pointers, }], host_context_lengths=lora_layer_params.host_context_lengths, - max_num_tokens=lora_layer_params.max_num_tokens, max_encoder_context_length=lora_layer_params. max_encoder_context_length, host_request_types=lora_layer_params.host_request_types, diff --git a/tensorrt_llm/layers/ssm.py b/tensorrt_llm/layers/ssm.py index 05ebab307..6feb9a2d0 100644 --- a/tensorrt_llm/layers/ssm.py +++ b/tensorrt_llm/layers/ssm.py @@ -21,7 +21,7 @@ permute, selective_scan, shape, split, view) from ..module import Module from ..parameter import Parameter -from .linear import Linear +from .linear import ColumnLinear, Linear, RowLinear from .normalization import RmsNorm @@ -240,32 +240,40 @@ def __init__(self, chunk_size=256, bias=False, rmsnorm=True, - dtype=None): + dtype=None, + tp_group=None, + tp_size=1): super().__init__() self.d_model = d_model self.d_state = d_state self.d_conv = d_conv - self.d_inner = d_inner + assert d_inner % tp_size == 0 + self.d_inner = d_inner // tp_size self.headdim = headdim - self.ngroups = ngroups + assert ngroups % tp_size == 0 + self.ngroups = ngroups // tp_size self.chunk_size = chunk_size self.rmsnorm = rmsnorm self.dtype = dtype - assert self.d_inner % self.headdim == 0 - self.nheads = self.d_inner // self.headdim + assert d_inner % headdim == 0 + nheads = d_inner // headdim + assert nheads % tp_size == 0 + self.nheads = nheads // tp_size self.A = Parameter(shape=(self.nheads, ), dtype="float32") self.D = Parameter(shape=(self.nheads, ), dtype="float32") self.dt_bias = Parameter(shape=(self.nheads, ), dtype="float32") - d_in_proj = 2 * self.d_inner + 2 * self.ngroups * self.d_state + self.nheads - self.in_proj = Linear(self.d_model, - d_in_proj, - bias=bias, - dtype=dtype, - gather_output=False) + d_in_proj = 2 * d_inner + 2 * ngroups * d_state + nheads + self.in_proj = ColumnLinear(d_model, + d_in_proj, + bias=bias, + dtype=dtype, + tp_group=tp_group, + tp_size=tp_size, + gather_output=False) - self.conv_dim = self.d_inner + 2 * self.ngroups * self.d_state + self.conv_dim = (d_inner + 2 * ngroups * d_state) // tp_size self.conv1d = MambaConv1d(self.conv_dim, self.d_conv, pre_stride=self.d_inner, @@ -274,15 +282,16 @@ def __init__(self, if rmsnorm: self.norm = RmsNorm(normalized_shape=self.d_inner, - num_groups=ngroups, + num_groups=self.ngroups, eps=1e-5, dtype=dtype) - self.out_proj = Linear(self.d_inner, - self.d_model, - bias=bias, - dtype=dtype, - gather_output=False) + self.out_proj = RowLinear(d_inner, + d_model, + bias=bias, + dtype=dtype, + tp_group=tp_group, + tp_size=tp_size) def forward(self, hidden_states: Tensor, diff --git a/tensorrt_llm/models/__init__.py b/tensorrt_llm/models/__init__.py index e09ab9eac..39481b3a0 100755 --- a/tensorrt_llm/models/__init__.py +++ b/tensorrt_llm/models/__init__.py @@ -22,6 +22,7 @@ from .cogvlm.model import CogVLMForCausalLM from .dbrx.config import DbrxConfig from .dbrx.model import DbrxForCausalLM +from .deci.model import DeciLMForCausalLM from .dit.model import DiT from .enc_dec.model import DecoderModel, EncoderModel, WhisperEncoder from .falcon.config import FalconConfig @@ -157,4 +158,5 @@ 'RecurrentGemmaForCausalLM': RecurrentGemmaForCausalLM, 'CogVLMForCausalLM': CogVLMForCausalLM, 'DiT': DiT, + 'DeciLMForCausalLM': DeciLMForCausalLM, } diff --git a/tensorrt_llm/models/automodel.py b/tensorrt_llm/models/automodel.py new file mode 100644 index 000000000..974064305 --- /dev/null +++ b/tensorrt_llm/models/automodel.py @@ -0,0 +1,73 @@ +from typing import Optional + +from ..mapping import Mapping +from . import MODEL_MAP +from .modeling_utils import QuantConfig + + +class AutoConfig: + + @staticmethod + def from_hugging_face(hf_model_or_dir, + dtype: str = 'auto', + mapping: Optional[Mapping] = None, + quant_config: Optional[QuantConfig] = None, + **kwargs): + import transformers + hf_config = transformers.AutoConfig.from_pretrained( + hf_model_or_dir, trust_remote_code=True) + hf_arch = hf_config.architectures[0] + trtllm_model_cls = MODEL_MAP.get(hf_arch, None) + if trtllm_model_cls is None: + raise NotImplementedError( + f"The given huggingface model architecture {hf_arch} is not supported in TRT-LLM yet" + ) + + if not hasattr(trtllm_model_cls, 'config_class'): + raise NotImplementedError( + f"The given TRT-LLM model class {trtllm_model_cls} does not support AutoConfig" + ) + + trtllm_cfg_cls = getattr(trtllm_model_cls, 'config_class') + if not hasattr(trtllm_cfg_cls, 'from_hugging_face'): + raise NotImplementedError( + f"The given TRT-LLM model class {trtllm_cfg_cls} does not support from_hugging_face" + ) + + return trtllm_cfg_cls.from_hugging_face(hf_model_or_dir, dtype, mapping, + quant_config, **kwargs) + + +class AutoModelForCausalLM: + + @staticmethod + def get_trtllm_model_class(hf_model_or_dir): + import transformers + hf_config = transformers.AutoConfig.from_pretrained( + hf_model_or_dir, trust_remote_code=True) + hf_arch = hf_config.architectures[0] + trtllm_model_cls = MODEL_MAP.get(hf_arch, None) + + if trtllm_model_cls is None: + raise NotImplementedError( + f"The given huggingface model architecture {hf_arch} is not supported in TRT-LLM yet" + ) + return trtllm_model_cls + + @staticmethod + def from_hugging_face(hf_model_or_dir, + dtype: str = 'auto', + mapping: Optional[Mapping] = None, + quant_config: Optional[QuantConfig] = None, + **kwargs): + trtllm_model_cls = AutoModelForCausalLM.get_trtllm_model_class( + hf_model_or_dir) + + if not hasattr(trtllm_model_cls, 'from_hugging_face'): + raise NotImplementedError( + f"The given {trtllm_model_cls} does not support from_hugging_face yet" + ) + + return trtllm_model_cls.from_hugging_face(hf_model_or_dir, dtype, + mapping, quant_config, + **kwargs) diff --git a/tensorrt_llm/models/baichuan/model.py b/tensorrt_llm/models/baichuan/model.py index 5fb74e7f0..cd95c57a5 100644 --- a/tensorrt_llm/models/baichuan/model.py +++ b/tensorrt_llm/models/baichuan/model.py @@ -110,11 +110,9 @@ def __init__(self, config: PretrainedConfig): super().__init__() hidden_size = config.hidden_size - self.vocab_embedding = Embedding( - config.vocab_size, - config.hidden_size, - dtype=config.dtype, - share_embedding_table=config.share_embedding_table) + self.vocab_embedding = Embedding(config.vocab_size, + config.hidden_size, + dtype=config.dtype) self.layers = DecoderLayerList(BaichuanDecoderLayer, config) self.ln_f = RmsNorm(normalized_shape=hidden_size, diff --git a/tensorrt_llm/models/bloom/model.py b/tensorrt_llm/models/bloom/model.py index 62ee0e92b..a189f7c3b 100644 --- a/tensorrt_llm/models/bloom/model.py +++ b/tensorrt_llm/models/bloom/model.py @@ -108,11 +108,9 @@ class BloomModel(Module): def __init__(self, config: PretrainedConfig): super().__init__() - self.vocab_embedding = Embedding( - config.vocab_size, - config.hidden_size, - dtype=config.dtype, - share_embedding_table=config.share_embedding_table) + self.vocab_embedding = Embedding(config.vocab_size, + config.hidden_size, + dtype=config.dtype) self.ln_embed = LayerNorm(normalized_shape=config.hidden_size, dtype=config.dtype) self.layers = DecoderLayerList(BloomDecoderLayer, config) diff --git a/tensorrt_llm/models/chatglm/model.py b/tensorrt_llm/models/chatglm/model.py index d52bb66df..a6b82c928 100644 --- a/tensorrt_llm/models/chatglm/model.py +++ b/tensorrt_llm/models/chatglm/model.py @@ -176,11 +176,9 @@ def __init__(self, config: ChatGLMConfig): self.chatglm_version = config.chatglm_version norm_cls = RmsNorm if config.rmsnorm else LayerNorm - self.vocab_embedding = Embedding( - config.vocab_size, - config.hidden_size, - dtype=config.dtype, - share_embedding_table=config.share_embedding_table) + self.vocab_embedding = Embedding(config.vocab_size, + config.hidden_size, + dtype=config.dtype) if config.chatglm_version == 'glm': self.position_embedding = Embedding( diff --git a/tensorrt_llm/models/convert_utils.py b/tensorrt_llm/models/convert_utils.py index 06f8699d1..486a47c22 100644 --- a/tensorrt_llm/models/convert_utils.py +++ b/tensorrt_llm/models/convert_utils.py @@ -1,3 +1,4 @@ +import fnmatch import re from pathlib import Path from typing import Dict, Optional, Union @@ -73,12 +74,15 @@ def weight_only_quantize_dict(weights: Dict[str, torch.Tensor], if quant_algo not in [QuantAlgo.W4A16, QuantAlgo.W8A16]: return weights if exclude_modules is None: - exclude_modules = ['shared_expert_gate.weight'] + exclude_modules = ['*shared_expert_gate.weight'] for name in list(weights): - if any([_name in name for _name in exclude_modules]): - continue - if any([_name in name for _name in quant_weights - ]) and weights[name].dtype != torch.int8: + is_excluded = False + for exclude_module in exclude_modules: + if fnmatch.fnmatchcase(name, exclude_module): + is_excluded = True + break + if not is_excluded and any([_name in name for _name in quant_weights + ]) and weights[name].dtype != torch.int8: quant_weight, quant_scale = weight_only_quantize( weight=weights[name], quant_algo=quant_algo, plugin=plugin) weights[name] = quant_weight diff --git a/tensorrt_llm/models/deci/__init__.py b/tensorrt_llm/models/deci/__init__.py new file mode 100644 index 000000000..71bf6d298 --- /dev/null +++ b/tensorrt_llm/models/deci/__init__.py @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tensorrt_llm/models/deci/config.py b/tensorrt_llm/models/deci/config.py new file mode 100644 index 000000000..b9accc61e --- /dev/null +++ b/tensorrt_llm/models/deci/config.py @@ -0,0 +1,207 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import asdict +from typing import Any, Dict, List, Optional, Union + +import torch + +from tensorrt_llm._utils import torch_dtype_to_str +from tensorrt_llm.functional import PositionEmbeddingType +from tensorrt_llm.logger import logger +from tensorrt_llm.mapping import Mapping +from tensorrt_llm.models.deci.convert import hf_block_config_to_layer_config +from tensorrt_llm.models.deci.layer_config import (AttentionConfig, + AttentionImplementation, + DeciLayerConfig, FFNConfig) +from tensorrt_llm.models.modeling_utils import PretrainedConfig, QuantConfig + + +class DeciConfig(PretrainedConfig): + + def __init__(self, + *, + architecture: str = 'DeciLMForCausalLM', + dtype: str, + hidden_size: int, + num_hidden_layers: int, + num_attention_heads: int, + vocab_size: int, + hidden_act: str = 'gelu', + logits_dtype: str = 'float32', + norm_epsilon: float = 0.00001, + position_embedding_type: Union[ + PositionEmbeddingType, + str] = PositionEmbeddingType.rope_gpt_neox, + rotary_base: float = 10000.0, + rotary_scaling: Optional[dict] = None, + max_position_embeddings: int, + num_key_value_heads: Optional[int] = None, + intermediate_size: Optional[int] = None, + mapping: Optional[Union[Mapping, dict]] = None, + quantization: Optional[Union[QuantConfig, dict]] = None, + use_parallel_embedding: bool = False, + embedding_sharding_dim: int = 0, + share_embedding_table: bool = False, + head_size: Optional[int] = None, + qk_layernorm: bool = False, + layer_configs: Optional[List[Union[DeciLayerConfig, + Dict[str, + Dict[str, + Any]]]]] = None, + **kwargs): + super().__init__(architecture=architecture, + dtype=dtype, + hidden_size=hidden_size, + num_hidden_layers=num_hidden_layers, + num_attention_heads=num_attention_heads, + vocab_size=vocab_size, + hidden_act=hidden_act, + logits_dtype=logits_dtype, + norm_epsilon=norm_epsilon, + position_embedding_type=position_embedding_type, + max_position_embeddings=max_position_embeddings, + num_key_value_heads=num_key_value_heads, + intermediate_size=intermediate_size, + mapping=mapping, + quantization=quantization, + use_parallel_embedding=use_parallel_embedding, + embedding_sharding_dim=embedding_sharding_dim, + share_embedding_table=share_embedding_table, + head_size=head_size, + qk_layernorm=qk_layernorm, + **kwargs) + + self.rotary_base = rotary_base + self.rotary_scaling = rotary_scaling + + if layer_configs is not None: + assert len( + layer_configs + ) == num_hidden_layers, f"num_hidden_layers ({num_hidden_layers}) must match len(layer_configs) ({len(layer_configs)})" + + self.layer_configs = self._ensure_layer_configs(layer_configs) + else: + self.layer_configs = None + + # HACK: this is needed for many parts of the code + self.layer_types = [ + AttentionImplementation( + self.get_layer_config(layer_idx).attention.impl).value + for layer_idx in range(self.num_hidden_layers) + ] + + def _ensure_layer_configs( + self, layer_configs: List[Union[DeciLayerConfig, Dict[str, Any]]] + ) -> List[DeciLayerConfig]: + return [ + DeciLayerConfig.from_dict(c) if isinstance(c, dict) else c + for c in layer_configs + ] + + def to_dict(self): + output = super().to_dict() + if self.layer_configs is not None: + output["layer_configs"] = [asdict(c) for c in self.layer_configs] + return output + + def get_layer_config(self, layer_idx: int) -> DeciLayerConfig: + if self.layer_configs is not None: + conf = self.layer_configs[layer_idx] + else: + conf = DeciLayerConfig() + + attention_impl = conf.attention.impl + num_key_value_heads = conf.attention.num_key_value_heads or self.num_key_value_heads + ffn_impl = conf.ffn.impl + intermediate_size = conf.ffn.intermediate_size or self.intermediate_size + + return DeciLayerConfig( + attention=AttentionConfig(impl=attention_impl, + num_key_value_heads=num_key_value_heads), + ffn=FFNConfig(impl=ffn_impl, intermediate_size=intermediate_size)) + + def get_layer_num_kv_heads(self, layer_idx) -> int: + layer_config = self.get_layer_config(layer_idx) + assert layer_config.is_attention_layer, f"Layer {layer_idx} is not an attention layer" + return layer_config.attention.num_key_value_heads or self.num_key_value_heads + + @classmethod + def from_hugging_face( + cls, + hf_config_or_dir: Union[str, 'transformers.PretrainedConfig'], + dtype: str = 'auto', + mapping: Optional[Mapping] = None, + quant_config: Optional[QuantConfig] = None, + trust_remote_code: bool = False, + **kwargs): + import transformers + + if isinstance(hf_config_or_dir, transformers.PretrainedConfig): + hf_config = hf_config_or_dir + else: + hf_config = transformers.AutoConfig.from_pretrained( + hf_config_or_dir, trust_remote_code=trust_remote_code) + + assert hf_config.model_type == "deci", f"Unsupported model type: {hf_config.model_type}" + + block_configs = getattr(hf_config, "block_configs", None) + if block_configs is not None: + layer_configs = [ + hf_block_config_to_layer_config(block_config, + hf_config.num_attention_heads, + hf_config.hidden_size) + for block_config in block_configs + ] + else: + # older deci arch + num_key_value_heads_per_layer = getattr( + hf_config, "num_key_value_heads_per_layer", None) + if num_key_value_heads_per_layer is not None: + layer_configs = [ + DeciLayerConfig(attention=AttentionConfig( + num_key_value_heads=num_key_value_heads)) + for num_key_value_heads in num_key_value_heads_per_layer + ] + else: + layer_configs = None + + if dtype == 'auto': + dtype = getattr(hf_config, 'torch_dtype', "float16") + if isinstance(dtype, torch.dtype): + dtype = torch_dtype_to_str(dtype) + if dtype == 'float32': + dtype = 'float16' + if dtype == 'bfloat16' and torch.cuda.get_device_properties( + 0).major < 8: + logger.warning( + "Pre SM 80 GPUs do not support bfloat16, fallback to float16") + dtype = 'float16' + + return cls(dtype=dtype, + hidden_size=hf_config.hidden_size, + hidden_act=hf_config.hidden_act, + intermediate_size=hf_config.intermediate_size, + num_attention_heads=hf_config.num_attention_heads, + num_hidden_layers=hf_config.num_hidden_layers, + num_key_value_heads=hf_config.num_key_value_heads, + norm_epsilon=hf_config.rms_norm_eps, + rotary_scaling=hf_config.rope_scaling, + rotary_base=hf_config.rope_theta, + vocab_size=hf_config.vocab_size, + max_position_embeddings=hf_config.max_position_embeddings, + mapping=mapping, + quantization=quant_config, + layer_configs=layer_configs, + **kwargs) diff --git a/tensorrt_llm/models/deci/convert.py b/tensorrt_llm/models/deci/convert.py new file mode 100644 index 000000000..5f0a58ef5 --- /dev/null +++ b/tensorrt_llm/models/deci/convert.py @@ -0,0 +1,365 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import enum +import json +import time +from abc import ABC, abstractmethod +from contextlib import contextmanager +from pathlib import Path +from typing import Any, Dict, Iterator, Optional, TypedDict, Union + +import safetensors +import torch + +from tensorrt_llm._utils import pad_vocab_size +from tensorrt_llm.logger import logger +from tensorrt_llm.models.deci.layer_config import (AttentionConfig, + AttentionImplementation, + DeciLayerConfig, FFNConfig, + FFNImplementation) +from tensorrt_llm.models.llama.convert import dup_kv_weight, split +from tensorrt_llm.quantization.mode import QuantAlgo + + +def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int: + intermediate_size = int(2 * ffn_mult * n_embd / 3) + return _find_multiple(intermediate_size, 256) + + +def _find_multiple(n: int, k: int) -> int: + if n % k == 0: + return n + return n + k - (n % k) + + +# BlockConfig is a custom class defined inside deci huggingface checkpoints, we can't import it +def hf_block_config_to_layer_config(block_config: "BlockConfig", + num_attn_heads: int, + hidden_size: int) -> DeciLayerConfig: + attn = block_config.attention + if attn.no_op: + attn_impl = AttentionImplementation.NO_OP + num_key_value_heads = None + elif attn.replace_with_linear: + attn_impl = AttentionImplementation.LINEAR + num_key_value_heads = None + elif attn.sparsify: + raise NotImplementedError("Sparsification is not supported") + else: + attn_impl = AttentionImplementation.ATTENTION + num_key_value_heads = num_attn_heads // attn.n_heads_in_group + + ffn = block_config.ffn + if ffn.no_op: + ffn_impl = FFNImplementation.NO_OP + intermediate_size = None + elif ffn.replace_with_linear: + ffn_impl = FFNImplementation.LINEAR + intermediate_size = None + elif ffn.sparsify: + raise NotImplementedError("Sparsification is not supported") + else: + ffn_impl = FFNImplementation.MLP + intermediate_size = _ffn_mult_to_intermediate_size( + ffn.ffn_mult, hidden_size) + + return DeciLayerConfig(attention=AttentionConfig( + impl=attn_impl, num_key_value_heads=num_key_value_heads), + ffn=FFNConfig(impl=ffn_impl, + intermediate_size=intermediate_size)) + + +@contextmanager +def timed_loading() -> Iterator[None]: + tik = time.time() + yield + + tok = time.time() + t = time.strftime('%H:%M:%S', time.gmtime(tok - tik)) + logger.info(f'Weights loaded. Total time: {t}') + + +class TpDim(enum.IntEnum): + NO_TP = -1 + COLWISE = 0 + ROWWISE = 1 + + +class SafetensorsIndex(TypedDict): + metadata: Dict[str, Any] + weight_map: Dict[str, str] + + +class WeightsLoader(ABC): + + @abstractmethod + def get_weight(self, + name: str, + tp_dim: TpDim = TpDim.NO_TP, + tp_size: int = 1, + tp_rank: int = 0) -> torch.Tensor: + ... + + +class HFModelWeightsLoader(WeightsLoader): + + def __init__(self, *, hf_model: "transformers.PreTrainedModel", + dtype: str) -> None: + self.model_params = dict(hf_model.named_parameters()) + self.dtype = getattr(torch, dtype) + + def get_weight(self, + name: str, + tp_dim: TpDim = TpDim.NO_TP, + tp_size: int = 1, + tp_rank: int = 0) -> torch.Tensor: + weight = self.model_params[name] + if weight.dtype != self.dtype: + weight = weight.to(self.dtype) + weight = weight.detach() + + if tp_dim != TpDim.NO_TP: + weight = split(weight, tp_size, tp_rank, dim=tp_dim) + return weight + + +class SafetensorsWeightsLoader(WeightsLoader): + + def __init__(self, *, model_dir: Path, dtype: str) -> None: + self.model_dir = model_dir + self.dtype = getattr(torch, dtype) + + # the index has a weight map that maps weight names to the files they are found in + safetensor_index_json = self.model_dir / "model.safetensors.index.json" + has_safetensor_index_json = safetensor_index_json.is_file() + if has_safetensor_index_json: + with safetensor_index_json.open("r") as fr: + self.sharding_map: SafetensorsIndex = json.load(fr) + else: + self.sharding_map = SafetensorsIndex(metadata={}, weight_map={}) + + shard_files = {f.name for f in self.model_dir.glob("*.safetensors")} + if has_safetensor_index_json: + # only read the files that have weights according to the index + shard_files &= set(self.sharding_map["weight_map"].values()) + self.shard_files = sorted(list(shard_files)) + + self.safetensors_files = { + shard_file: safetensors.safe_open(model_dir / shard_file, + framework="pt", + device="cpu") + for shard_file in shard_files + } + + def get_weight(self, + name: str, + tp_dim: TpDim = TpDim.NO_TP, + tp_size: int = 1, + tp_rank: int = 0) -> torch.Tensor: + shard_filename = self.sharding_map['weight_map'].get( + name, self.shard_files[0]) + if tp_dim == TpDim.NO_TP: + res = self.safetensors_files[shard_filename].get_tensor(name) + else: + tensor_slice = self.safetensors_files[shard_filename].get_slice( + name) + tensor_shape = tensor_slice.get_shape() + if len(tensor_shape) == 1: + if tp_dim == TpDim.COLWISE: + slice_width = tensor_shape[0] // tp_size + res = tensor_slice[slice_width * tp_rank:slice_width * + (tp_rank + 1)] + else: # row-wise, but 1-dimensional ==> no tp + res = tensor_slice[:] + else: + assert tensor_shape[ + tp_dim] % tp_size == 0, f"Current weight shape is invalid for tp_size={tp_size}" + slice_width = tensor_shape[tp_dim] // tp_size + if tp_dim == TpDim.COLWISE: + res = tensor_slice[slice_width * tp_rank:slice_width * + (tp_rank + 1), :] + else: + res = tensor_slice[:, slice_width * tp_rank:slice_width * + (tp_rank + 1)] + + return res.to(self.dtype).contiguous() + + +def load_model_weights(loader: WeightsLoader, + config: "DeciConfig") -> Dict[str, torch.Tensor]: + mapping = config.mapping + num_hidden_layers = config.num_hidden_layers + vocab_size = config.vocab_size + pad_vocab = vocab_size % mapping.tp_size != 0 + vocab_size_padded = pad_vocab_size(vocab_size, mapping.tp_size) + + weights = {} + + def load_weight(name: str, tp_dim: TpDim = TpDim.NO_TP) -> torch.Tensor: + return loader.get_weight(name=name, + tp_dim=tp_dim, + tp_rank=mapping.tp_rank, + tp_size=mapping.tp_size) + + with timed_loading(): + if mapping.is_first_pp_rank(): + weights['transformer.vocab_embedding.weight'] = load_weight( + "model.embed_tokens.weight", + TpDim(config.embedding_sharding_dim) + if config.use_parallel_embedding else + TpDim.NO_TP) # vocab_embedding + + if mapping.is_last_pp_rank(): + v = load_weight("lm_head.weight", + TpDim.NO_TP) if pad_vocab else load_weight( + "lm_head.weight", TpDim.COLWISE) # lm_head + if pad_vocab: + v = torch.nn.functional.pad( + v, (0, 0, 0, vocab_size_padded - vocab_size), 'constant', 0) + v = split(v, mapping.tp_size, mapping.tp_rank) + weights['lm_head.weight'] = v + weights['transformer.ln_f.weight'] = load_weight( + "model.norm.weight") # ln_f + + layers_range = mapping.pp_layers(num_hidden_layers) + for l in layers_range: + layer_config = config.get_layer_config(l) + layer_idx = l - layers_range[0] + tllm_prex = f'transformer.layers.{layer_idx}' + + # Attention + if layer_config.is_attention_layer: + weights[f'{tllm_prex}.input_layernorm.weight'] = load_weight( + f"model.layers.{l}.input_layernorm.weight" + ) # input_layernorm + + qkv = {} + for comp in ["q", "k", "v"]: + weight_part = load_weight( + f"model.layers.{l}.self_attn.{comp}_proj.weight", + TpDim.COLWISE) + qkv[comp] = weight_part + + if layer_config.attention.num_key_value_heads < mapping.tp_size: + # duplicate the KV heads up to tensor_parallel + qkv["k"] = dup_kv_weight( + qkv["k"], layer_config.attention.num_key_value_heads, + mapping.tp_size) + qkv["v"] = dup_kv_weight( + qkv["v"], layer_config.attention.num_key_value_heads, + mapping.tp_size) + + weights[f'{tllm_prex}.attention.qkv.weight'] = torch.cat( + [qkv["q"], qkv["k"], qkv["v"]], 0) + weights[f'{tllm_prex}.attention.dense.weight'] = load_weight( + f"model.layers.{l}.self_attn.o_proj.weight", + TpDim.ROWWISE) # attention.dense + + elif layer_config.is_linear_attention_layer: + weights[f'{tllm_prex}.input_layernorm.weight'] = load_weight( + f"model.layers.{l}.input_layernorm.weight" + ) # input_layernorm + + weights[f'{tllm_prex}.attention.weight'] = load_weight( + f"model.layers.{l}.self_attn.linear_attn.weight", + TpDim.COLWISE) + + elif not layer_config.is_noop_attention_layer: + raise NotImplementedError( + f"Loading weights for layer with attention of type {layer_config.attention.impl} is not supported" + ) + + # MLP + if layer_config.is_mlp_layer: + weights[f'{tllm_prex}.post_layernorm.weight'] = load_weight( + f"model.layers.{l}.post_attention_layernorm.weight" + ) # post_layernorm + + weights[f'{tllm_prex}.ffn.gate.weight'] = load_weight( + f"model.layers.{l}.mlp.up_proj.weight", + TpDim.COLWISE) # mlp.gate + weights[f'{tllm_prex}.ffn.proj.weight'] = load_weight( + f"model.layers.{l}.mlp.down_proj.weight", + TpDim.ROWWISE) # mlp.proj + weights[f'{tllm_prex}.ffn.fc.weight'] = load_weight( + f"model.layers.{l}.mlp.gate_proj.weight", + TpDim.COLWISE) # mlp.fc + + elif layer_config.is_linear_ffn_layer: + weights[f'{tllm_prex}.post_layernorm.weight'] = load_weight( + f"model.layers.{l}.post_attention_layernorm.weight" + ) # post_layernorm + + weights[f'{tllm_prex}.ffn.weight'] = load_weight( + f"model.layers.{l}.mlp.linear_mlp.weight", TpDim.COLWISE) + + elif not layer_config.is_noop_ffn_layer: + raise NotImplementedError( + f"Loading weights for a layer with FFN of type {layer_config.ffn.impl} is not implemented yet" + ) + + return weights + + +def load_weights_from_hf_model( + hf_model: "transformers.PreTrainedModel", + config: "DeciConfig", + act_range: Optional[dict] = None, + qkv_para: Optional[dict] = None, + smoother: Optional[dict] = None) -> Dict[str, torch.Tensor]: + quant_algo = config.quantization.quant_algo + use_weight_only = quant_algo in [QuantAlgo.W8A16, QuantAlgo.W4A16] + if quant_algo == QuantAlgo.W8A16: + torch.int8 + elif quant_algo == QuantAlgo.W4A16: + torch.quint4x2 + else: + pass + + use_smooth_quant = config.quantization.use_plugin_sq + int8_kv_cache = config.quantization.kv_cache_quant_algo == QuantAlgo.INT8 + if use_smooth_quant or int8_kv_cache: + assert act_range is not None + assert qkv_para is not None + assert smoother is not None + + # TODO(oargov): add support for these quants + assert not use_weight_only, "WOQ is not supported yet" + assert not use_smooth_quant, "SmoothQuant is not supported yet" + assert not int8_kv_cache, "INT8 KV cache is not supported yet" + + # TODO(oargov): support moe + moe_config = getattr(config, "moe", None) + assert moe_config is None, "MoE is not supported yet" + + # TODO(oargov): implement resisdual mlp + residual_mlp = getattr(config, "residual_mlp", None) + assert not residual_mlp, "Residual MLP is not supported yet" + + loader = HFModelWeightsLoader(hf_model=hf_model, dtype=config.dtype) + logger.info('Converting weights from Huggingface model...') + return load_model_weights(loader=loader, config=config) + + +def load_weights_from_hf_safetensors( + model_dir: Union[str, Path], + config: "DeciConfig") -> Dict[str, torch.Tensor]: + + if isinstance(model_dir, str): + model_dir = Path(model_dir) + + loader = SafetensorsWeightsLoader(model_dir=model_dir, dtype=config.dtype) + logger.info('Loading weights from Huggingface safetensors...') + return load_model_weights(loader=loader, config=config) diff --git a/tensorrt_llm/models/deci/layer_config.py b/tensorrt_llm/models/deci/layer_config.py new file mode 100644 index 000000000..84ed3487d --- /dev/null +++ b/tensorrt_llm/models/deci/layer_config.py @@ -0,0 +1,86 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +from dataclasses import dataclass, field +from typing import Any, Dict, Optional + + +class AttentionImplementation(str, enum.Enum): + ATTENTION = "attention" + LINEAR = "linear" + NO_OP = "no_op" + + +class FFNImplementation(str, enum.Enum): + MLP = "mlp" + LINEAR = "linear" + NO_OP = "no_op" + + +@dataclass(frozen=True, kw_only=True) +class AttentionConfig: + impl: AttentionImplementation = AttentionImplementation.ATTENTION + num_key_value_heads: Optional[int] = None + + @property + def needs_kv_cache(self) -> bool: + return self.impl == AttentionImplementation.ATTENTION + + +@dataclass(frozen=True, kw_only=True) +class FFNConfig: + impl: FFNImplementation = FFNImplementation.MLP + intermediate_size: Optional[int] = None + + +@dataclass(frozen=True, kw_only=True) +class DeciLayerConfig: + attention: AttentionConfig = field(default_factory=AttentionConfig) + ffn: FFNConfig = field(default_factory=FFNConfig) + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> "DeciLayerConfig": + assert "attention" in d, "Missing attention configuration" + assert "ffn" in d, "Missing mlp configuration" + + return cls( + attention=AttentionConfig(**d["attention"]), + ffn=FFNConfig(**d["ffn"]), + ) + + @property + def is_attention_layer(self) -> bool: + return self.attention.impl == AttentionImplementation.ATTENTION + + @property + def is_mlp_layer(self) -> bool: + return self.ffn.impl == FFNImplementation.MLP + + @property + def is_noop_attention_layer(self) -> bool: + return self.attention.impl == AttentionImplementation.NO_OP + + @property + def is_linear_attention_layer(self) -> bool: + return self.attention.impl == AttentionImplementation.LINEAR + + @property + def is_noop_ffn_layer(self) -> bool: + return self.ffn.impl == FFNImplementation.NO_OP + + @property + def is_linear_ffn_layer(self) -> bool: + return self.ffn.impl == FFNImplementation.LINEAR diff --git a/tensorrt_llm/models/deci/model.py b/tensorrt_llm/models/deci/model.py new file mode 100644 index 000000000..b0d0ded0e --- /dev/null +++ b/tensorrt_llm/models/deci/model.py @@ -0,0 +1,643 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import List, Optional, Tuple, Type, Union + +from tensorrt_llm.bindings import KVCacheType +from tensorrt_llm.functional import (AllReduceFusionParams, AttentionMaskType, + PositionEmbeddingType, Tensor, + gather_last_token_logits, recv, send) +from tensorrt_llm.layers.attention import (Attention, AttentionParams, + KeyValueCacheParams, + SpecDecodingParams) +from tensorrt_llm.layers.embedding import Embedding +from tensorrt_llm.layers.linear import ColumnLinear +from tensorrt_llm.layers.lora import LoraParams +from tensorrt_llm.layers.mlp import GatedMLP +from tensorrt_llm.layers.normalization import RmsNorm +from tensorrt_llm.mapping import Mapping +from tensorrt_llm.models.convert_utils import has_safetensors +from tensorrt_llm.models.deci.config import DeciConfig +from tensorrt_llm.models.deci.convert import (load_weights_from_hf_model, + load_weights_from_hf_safetensors) +from tensorrt_llm.models.modeling_utils import DecoderModelForCausalLM +from tensorrt_llm.module import Module, ModuleList +from tensorrt_llm.plugin.plugin import init_all_reduce_helper + +from ..._common import default_net +from ..._utils import pad_vocab_size +from ..modeling_utils import QuantConfig, preprocess_weights + + +@dataclass +class DeciLMLayerOutput: + hidden_states: Tensor + present_kv: Optional[Tensor] = None + + +@dataclass +class DeciLMLayerListOutput: + hidden_states: Tensor + present_kvs: List[Tensor] + + +class NoOp(Module): + + def forward(self, hidden_states: Tensor, *args, **kwargs) -> int: + return 0 + + +class NoOpAttention(NoOp): + + def forward(self, + hidden_states: Tensor, + attention_mask=None, + use_cache: bool = False, + *args, + **kwargs) -> Union[int, Tuple[int, None]]: + out = super().forward(hidden_states=hidden_states, + attention_mask=attention_mask, + use_cache=use_cache, + *args, + **kwargs) + if use_cache: + return out, None + return out + + +class LinearAttention(ColumnLinear): + + def forward(self, + hidden_states: Tensor, + attention_mask=None, + use_cache: bool = False, + *args, + **kwargs) -> Union[Tensor, Tuple[Tensor, None]]: + out = super().forward(x=hidden_states, + lora_runtime_params=None, + lora_hidden_state=None) + + if use_cache: + return out, None + return out + + +class LinearFFN(ColumnLinear): + + def forward( + self, + hidden_states, + lora_layer_params=None, + reduce_fusion_params: Optional[AllReduceFusionParams] = None + ) -> Tensor: + return super().forward(x=hidden_states, + lora_runtime_params=None, + lora_hidden_state=None) + + +NoOpFFN = NoOp +NoOpLayerNorm = NoOp + + +class DeciLMDecoderLayer(Module): + + def __init__(self, config: DeciConfig, layer_idx: int): + super().__init__() + self.layer_idx = layer_idx + self.config = config + + layers_range = config.mapping.pp_layers(config.num_hidden_layers) + self.local_layer_idx = layer_idx - layers_range[0] + + self.layer_config = self.config.get_layer_config(self.layer_idx) + + layer_type_len = len(config.layer_types) + layer_types = config.layer_types * ((layer_idx + 1) // layer_type_len) + layer_types = layer_types + config.layer_types[0:( + (layer_idx + 1) % layer_type_len)] + + attention_layer_idx = layer_types.count('attention') - 1 + self._init_attention(attention_layer_idx) + self._init_ffn() + + def _init_attention(self, attention_layer_idx) -> None: + """ + Initialize some attention alternative + """ + # normal attention + if self.layer_config.is_attention_layer: + self.input_layernorm = RmsNorm( + normalized_shape=self.config.hidden_size, + eps=self.config.norm_epsilon, + dtype=self.config.dtype, + ) + + self.attention = Attention( + local_layer_idx=attention_layer_idx, + hidden_size=self.config.hidden_size, + attention_head_size=self.config.head_size, + num_attention_heads=self.config.num_attention_heads, + num_kv_heads=self.layer_config.attention.num_key_value_heads, + max_position_embeddings=self.config.max_position_embeddings, + dtype=self.config.dtype, + attention_mask_type=AttentionMaskType.causal, + bias=False, + position_embedding_type=PositionEmbeddingType.rope_gpt_neox, + rotary_embedding_base=self.config.rotary_base, + rotary_embedding_scaling=self.config.rotary_scaling, + tp_group=self.config.mapping.tp_group, + tp_size=self.config.mapping.tp_size, + tp_rank=self.config.mapping.tp_rank, + quant_mode=self.config.quant_mode, + ) + + elif self.layer_config.is_noop_attention_layer: + self.input_layernorm = NoOpLayerNorm() + self.attention = NoOpAttention() + + elif self.layer_config.is_linear_attention_layer: + self.input_layernorm = RmsNorm( + normalized_shape=self.config.hidden_size, + eps=self.config.norm_epsilon, + dtype=self.config.dtype, + ) + + self.attention = LinearAttention( + in_features=self.config.hidden_size, + out_features=self.config.hidden_size, + bias=False, + dtype=self.config.dtype, + tp_group=self.config.mapping.tp_group, + tp_size=self.config.mapping.tp_size, + gather_output=True) + + else: + raise NotImplementedError( + f"Attention of type {str(self.layer_config.attention.impl)} is not implemented" + ) + + def _init_ffn(self) -> None: + """ + Initialize some ffn alternative + """ + + if self.layer_config.is_mlp_layer: + intermediate_size = self.layer_config.ffn.intermediate_size or self.config.intermediate_size + mlp_hidden_size = intermediate_size or self.config.hidden_size * 4 + + self.post_layernorm = RmsNorm( + normalized_shape=self.config.hidden_size, + eps=self.config.norm_epsilon, + dtype=self.config.dtype, + ) + + self.ffn = GatedMLP( + hidden_size=self.config.hidden_size, + ffn_hidden_size=mlp_hidden_size, + hidden_act=self.config.hidden_act, + bias=False, + dtype=self.config.dtype, + tp_group=self.config.mapping.tp_group, + tp_size=self.config.mapping.tp_size, + quant_mode=self.config.quant_mode, + ) + + elif self.layer_config.is_noop_ffn_layer: + self.post_layernorm = NoOpLayerNorm() + self.ffn = NoOpFFN() + + elif self.layer_config.is_linear_ffn_layer: + self.post_layernorm = RmsNorm( + normalized_shape=self.config.hidden_size, + eps=self.config.norm_epsilon, + dtype=self.config.dtype, + ) + + self.ffn = LinearFFN(in_features=self.config.hidden_size, + out_features=self.config.hidden_size, + bias=False, + dtype=self.config.dtype, + tp_group=self.config.mapping.tp_group, + tp_size=self.config.mapping.tp_size, + gather_output=True) + + else: + raise NotImplementedError( + f"FFN of type {str(self.layer_config.ffn.impl)} is not implemented" + ) + + def forward( + self, + hidden_states: Tensor, + attention_mask: Optional[Tensor] = None, + use_cache: bool = False, + spec_decoding_params=None, + kv_cache_params: Optional[KeyValueCacheParams] = None, + attention_params: Optional[AttentionParams] = None, + lora_layer_params: Optional[LoraParams] = None, + ): + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + + attention_output = self.attention( + hidden_states=hidden_states, + attention_mask=attention_mask, + use_cache=use_cache, + spec_decoding_params=spec_decoding_params, + kv_cache_params=kv_cache_params, + attention_params=attention_params, + lora_layer_params=lora_layer_params, + ) + + if use_cache: + attention_output, present_kv = attention_output + else: + present_kv = None + + hidden_states = residual + attention_output + residual = hidden_states + hidden_states = self.post_layernorm(hidden_states) + hidden_states = self.ffn(hidden_states, + lora_layer_params=lora_layer_params) + hidden_states = residual + hidden_states + + return DeciLMLayerOutput(hidden_states=hidden_states, + present_kv=present_kv) + + +class DeciLMDecoderLayerList(ModuleList): + + def __init__(self, cls: Type[DeciLMDecoderLayer], config: DeciConfig): + self.num_hidden_layers = config.num_hidden_layers + # global indices of local layers + self.layer_list = config.mapping.pp_layers(config.num_hidden_layers) + super().__init__([cls(config, idx) for idx in self.layer_list]) + # global indices of local attention layers + self.attention_layer_list = [ + self.layer_list[i] for i, layer in enumerate(self) + if layer.layer_config.is_attention_layer + ] + + def forward( + self, + hidden_states: Tensor, + use_cache: bool, + attention_mask: Optional[Tensor], + kv_cache_params: KeyValueCacheParams, + attention_params: Optional[AttentionParams] = None, + position_ids: Optional[Tensor] = None, + lora_params: Optional[LoraParams] = None, + spec_decoding_params: Optional[SpecDecodingParams] = None, + ) -> DeciLMLayerListOutput: + kv_cache_params.fill_none_tensor_list(len(self.layer_list)) + + presents = [] + + # put None where we don't have attention layers + pkv_iter = iter(kv_cache_params.past_key_value) + + past_key_values = [x for x in pkv_iter] + + for layer_idx, (layer, past) in enumerate(zip(self, past_key_values)): + layer_out = layer( + hidden_states=hidden_states, + attention_mask=attention_mask, + attention_params=attention_params, + kv_cache_params=KeyValueCacheParams( + past_key_value=[past], + host_past_key_value_lengths=kv_cache_params. + host_past_key_value_lengths, + host_max_attention_window_sizes=kv_cache_params. + host_max_attention_window_sizes, + host_sink_token_length=kv_cache_params. + host_sink_token_length, + kv_cache_block_offsets=kv_cache_params. + kv_cache_block_offsets, + host_kv_cache_block_offsets=kv_cache_params. + host_kv_cache_block_offsets, + host_kv_cache_pool_pointers=kv_cache_params. + host_kv_cache_pool_pointers, + cache_indirection=kv_cache_params.cache_indirection, + ), + spec_decoding_params=spec_decoding_params, + use_cache=use_cache, + lora_layer_params=lora_params.get_layer_config(layer_idx) + if lora_params is not None + and lora_params.lora_ranks is not None else None) + + hidden_states = layer_out.hidden_states + if use_cache and layer_out.present_kv is not None: + presents.append(layer_out.present_kv) + + return DeciLMLayerListOutput(hidden_states=hidden_states, + present_kvs=presents) + + +class DeciLMModel(Module): + + def __init__(self, config: DeciConfig) -> None: + super().__init__() + init_all_reduce_helper() + + self.mapping = config.mapping + if self.mapping.is_first_pp_rank(): + # first rank in pipeline-parallel handles token embedding + assert config.vocab_size is not None + self.vocab_embedding = Embedding(config.vocab_size, + config.hidden_size, + dtype=config.dtype) + + self.position_embedding_type = config.position_embedding_type + self.layers = DeciLMDecoderLayerList(DeciLMDecoderLayer, config) + + if self.mapping.is_last_pp_rank(): + # last rank in pipeline-parallel handles final norm + self.ln_f = RmsNorm( + normalized_shape=config.hidden_size, + eps=config.norm_epsilon, + dtype=config.dtype, + ) + + def _vocab_embedding(self, + input_ids: Tensor, + prompt_embedding_table: Optional[Tensor] = None, + prompt_tasks: Optional[Tensor] = None, + prompt_vocab_size: Optional[Tensor] = None) -> Tensor: + # prompt tuning + ptuning_args = ([ + prompt_embedding_table, prompt_tasks, prompt_vocab_size + ] if prompt_embedding_table is not None else []) + + hidden_states = self.vocab_embedding(input_ids, *ptuning_args) + return hidden_states + + def forward( + self, + input_ids, + position_ids=None, + use_cache: bool = False, + attention_mask: Optional[Tensor] = None, + spec_decoding_params=None, + kv_cache_params: Optional[KeyValueCacheParams] = None, + attention_params: Optional[AttentionParams] = None, + hidden_states: Optional[Tensor] = None, + prompt_embedding_table: Optional[Tensor] = None, + prompt_tasks: Optional[Tensor] = None, + prompt_vocab_size: Optional[Tensor] = None, + lora_params: Optional[LoraParams] = None, + ) -> DeciLMLayerListOutput: + + if self.mapping.is_first_pp_rank(): + # first pipeline rank ==> do prompt embedding + hidden_states = self._vocab_embedding( + input_ids=input_ids, + prompt_embedding_table=prompt_embedding_table, + prompt_tasks=prompt_tasks, + prompt_vocab_size=prompt_vocab_size) + else: + # receive hidden states from prior rank in the pipeline + hidden_states = recv(hidden_states, self.mapping.prev_pp_rank()) + + layers_out = self.layers.forward( + hidden_states, + use_cache=use_cache, + attention_mask=attention_mask, + kv_cache_params=kv_cache_params, + attention_params=attention_params, + lora_params=lora_params, + spec_decoding_params=spec_decoding_params, + ) + + if self.mapping.is_last_pp_rank(): + # last pipeline rank ==> do final norm + hidden_states = self.ln_f(layers_out.hidden_states) + else: + # send hidden states to next rank in the pipeline + hidden_states = send(layers_out.hidden_states, + self.mapping.next_pp_rank()) + + return DeciLMLayerListOutput(hidden_states=hidden_states, + present_kvs=layers_out.present_kvs) + + +class DeciLMForCausalLM(DecoderModelForCausalLM): + config_class = DeciConfig + + def __init__(self, config: DeciConfig): + + transformer = DeciLMModel(config) + vocab_size_padded = pad_vocab_size(config.vocab_size, + config.mapping.tp_size) + + if config.mapping.is_last_pp_rank(): + # last pipeline rank needs to do calculate logits + lm_head = ColumnLinear( + config.hidden_size, + vocab_size_padded, + bias=False, + dtype=config.dtype, + tp_group=config.mapping.tp_group, + tp_size=config.mapping.tp_size, + gather_output=True, + ) + else: + lm_head = None + super().__init__(config, transformer, lm_head) + + # Create constant attention parameters to be reused by all layers. + Attention.create_attention_const_params(self, config) + self.position_embedding_type = config.position_embedding_type + + @classmethod + def from_hugging_face(cls, + hf_model_or_dir: Union[ + str, 'transformers.PreTrainedModel'], + dtype: str = 'auto', + mapping: Optional[Mapping] = None, + quant_config: Optional[QuantConfig] = None, + load_by_shard: bool = False, + load_model_on_cpu: bool = False, + trust_remote_code: bool = False, + **kwargs) -> "DeciLMForCausalLM": + import transformers + + # TODO(oargov): add support for these + assert not load_by_shard, "load_by_shard is not implemented yet" + + use_preloading = isinstance(hf_model_or_dir, + transformers.PreTrainedModel) + if use_preloading: + hf_config_or_dir = hf_model_or_dir.config + else: + hf_config_or_dir = hf_model_or_dir + + config = DeciConfig.from_hugging_face( + hf_config_or_dir, + dtype=dtype, + mapping=mapping, + quant_config=quant_config, + trust_remote_code=trust_remote_code, + **kwargs) + + if use_preloading: + assert not load_by_shard + weights = load_weights_from_hf_model(hf_model_or_dir, config) + elif has_safetensors( + hf_model_or_dir) and not config.quant_mode.has_any_quant(): + weights = load_weights_from_hf_safetensors(hf_model_or_dir, config) + else: + hf_model = transformers.AutoModelForCausalLM.from_pretrained( + hf_model_or_dir, + device_map='auto' if not load_model_on_cpu else 'cpu', + torch_dtype=dtype, + trust_remote_code=trust_remote_code, + ) + weights = load_weights_from_hf_model(hf_model, config) + preprocess_weights(weights, config) + + model = DeciLMForCausalLM(config) + model.load(weights) + return model + + def forward( + self, + input_ids: Tensor, + position_ids: Optional[Tensor] = None, + use_cache: bool = False, + last_token_ids: Optional[Tensor] = None, + attention_mask: Optional[Tensor] = None, + kv_cache_params: Optional[KeyValueCacheParams] = None, + attention_params: Optional[AttentionParams] = None, + hidden_states: Optional[Tensor] = None, + prompt_embedding_table: Optional[Tensor] = None, + prompt_tasks: Optional[Tensor] = None, + prompt_vocab_size: Optional[Tensor] = None, + lora_params: Optional[LoraParams] = None, + spec_decoding_params=None, + ): + # fill attention params. + attention_params = Attention.fill_attention_params( + self, attention_params) + + model_out = self.transformer.forward( + input_ids=input_ids, + position_ids=position_ids, + use_cache=use_cache, + attention_mask=attention_mask, + kv_cache_params=kv_cache_params, + attention_params=attention_params, + lora_params=lora_params, + hidden_states=hidden_states, + prompt_embedding_table=prompt_embedding_table, + prompt_tasks=prompt_tasks, + prompt_vocab_size=prompt_vocab_size, + spec_decoding_params=spec_decoding_params) + hidden_states = model_out.hidden_states + + if self.config.mapping.is_last_pp_rank(): + hidden_states = gather_last_token_logits( + hidden_states, + last_token_ids, + default_net().plugin_config.remove_input_padding, + ) + + lm_logits = self.lm_head(hidden_states) + lm_logits.mark_output("logits", self.config.logits_dtype) + else: + hidden_states.mark_output("hidden_states_output", self.config.dtype) + + if use_cache and not default_net().plugin_config.paged_kv_cache: + presents = model_out.present_kvs + for i, present in zip(self.transformer.layers.attention_layer_list, + presents): + present.mark_output(f"present_key_value_{i}", + self.config.kv_dtype) + if self.config.mapping.is_last_pp_rank(): + return (lm_logits, presents, hidden_states) + return (hidden_states, presents) + else: + if self.config.mapping.is_last_pp_rank(): + return lm_logits, hidden_states + return hidden_states + + def prepare_attention_inputs( + self, + *, + max_batch_size: int, + max_beam_width: int, + max_input_len: int, + max_seq_len: int, + num_kv_heads: int, + head_size: int, + num_layers: int, + kv_dtype: str, + kv_cache_type: KVCacheType, + num_profiles: int = 1, + enable_ctx_gen_opt_profiles: bool = False, + remove_input_padding: bool = False, + use_gpt_attention_plugin: bool = False, + paged_kv_cache: bool = False, + tokens_per_block: int = 64, + mapping: Mapping = Mapping(), + use_cache: bool = True, + streamingllm: bool = False, + attn_layer_idx: Optional[List[int]] = None, + opt_batch_size: Optional[int] = None, + num_kv_heads_per_layer: Optional[List[int]] = None): + + if attn_layer_idx is None: + attn_layer_idx, num_kv_heads_per_layer = [], [] + for layer_idx in range(self.config.num_hidden_layers): + layer_config = self.config.get_layer_config(layer_idx) + if layer_config.is_attention_layer: + attn_layer_idx.append(layer_idx) + num_kv_heads_per_layer.append( + layer_config.attention.num_key_value_heads) + num_layers = len(attn_layer_idx) + + attention_inputs = super().prepare_attention_inputs( + max_batch_size=max_batch_size, + max_beam_width=max_beam_width, + max_input_len=max_input_len, + max_seq_len=max_seq_len, + num_kv_heads=num_kv_heads, + head_size=head_size, + num_layers=num_layers, + kv_dtype=kv_dtype, + num_profiles=num_profiles, + kv_cache_type=kv_cache_type, + enable_ctx_gen_opt_profiles=enable_ctx_gen_opt_profiles, + remove_input_padding=remove_input_padding, + use_gpt_attention_plugin=use_gpt_attention_plugin, + tokens_per_block=tokens_per_block, + mapping=mapping, + streamingllm=streamingllm, + attn_layer_idx=attn_layer_idx, + opt_batch_size=opt_batch_size, + num_kv_heads_per_layer=num_kv_heads_per_layer) + + kv_idx = 0 + past_key_value = [] + for i in range(self.config.num_hidden_layers): + layer_config = self.config.get_layer_config(i) + if layer_config.is_attention_layer: + past_key_value.append( + attention_inputs['past_key_value'][kv_idx]) + kv_idx += 1 + else: + past_key_value.append(None) + attention_inputs['past_key_value'] = past_key_value + + return attention_inputs diff --git a/tensorrt_llm/models/enc_dec/model.py b/tensorrt_llm/models/enc_dec/model.py index 072900d05..78acb32ff 100644 --- a/tensorrt_llm/models/enc_dec/model.py +++ b/tensorrt_llm/models/enc_dec/model.py @@ -667,7 +667,6 @@ def forward(self, def prepare_inputs(self, max_batch_size, max_input_len, - max_num_tokens, prompt_embedding_table_size: int = 0, lora_target_modules: List[str] = None, *args, @@ -890,7 +889,6 @@ def prepare_inputs(self, lora_params = LoraParams( lora_ranks=lora_ranks, lora_weights_pointers=lora_weights_pointers, - max_num_tokens=max_num_tokens, host_request_types=host_request_types, host_context_lengths=host_context_lengths, ) @@ -1226,7 +1224,6 @@ def prepare_inputs(self, max_beam_width, max_decoder_input_len, max_seq_len, - max_num_tokens, max_encoder_input_len, gather_context_logits: bool = False, gather_generation_logits: bool = False, @@ -1596,7 +1593,6 @@ def prepare_inputs(self, lora_ranks=lora_ranks, lora_weights_pointers=lora_weights_pointers, host_context_lengths=host_context_lengths, - max_num_tokens=max_num_tokens, max_encoder_context_length=max_encoder_input_len, host_request_types=host_request_types, host_encoder_input_lengths=host_encoder_input_lengths, diff --git a/tensorrt_llm/models/generation_mixin.py b/tensorrt_llm/models/generation_mixin.py index 17300b3b0..cb12289f8 100644 --- a/tensorrt_llm/models/generation_mixin.py +++ b/tensorrt_llm/models/generation_mixin.py @@ -185,7 +185,8 @@ def prepare_attention_inputs(self, mapping=Mapping(), streamingllm=False, attn_layer_idx=None, - opt_batch_size=None): + opt_batch_size=None, + num_kv_heads_per_layer=None): default_range = GenerationMixin.default_range @@ -258,16 +259,24 @@ def prepare_attention_inputs(self, else: if kv_cache_type != KVCacheType.PAGED: for i in layers_range: + if num_kv_heads_per_layer is not None: + heads_dim_name = f"num_heads_{attn_layer_idx[i]}" + kv_heads = num_kv_heads_per_layer[i] + else: + heads_dim_name = "num_heads" + kv_heads = num_kv_heads + kv_dim_range = OrderedDict([ ('batch_size_beam_width', bb_range), ('kv', [2] * num_profiles), - ('num_heads', [num_kv_heads] * num_profiles), + (heads_dim_name, [kv_heads] * num_profiles), ('past_key_len', kv_cache_range), ('head_size', [head_size] * num_profiles), ]) + kv = Tensor(name=f'past_key_value_{attn_layer_idx[i]}', dtype=kv_dtype, - shape=[-1, 2, num_kv_heads, -1, head_size], + shape=[-1, 2, kv_heads, -1, head_size], dim_range=kv_dim_range) past_key_value.append(kv) else: @@ -774,7 +783,6 @@ def prepare_basic_inputs( mapping=mapping, streamingllm=streamingllm, opt_batch_size=opt_batch_size) - for key, value in attention_inputs.items(): basic_inputs[key] = value diff --git a/tensorrt_llm/models/mamba/model.py b/tensorrt_llm/models/mamba/model.py index 977154a7e..7d2aac4d6 100644 --- a/tensorrt_llm/models/mamba/model.py +++ b/tensorrt_llm/models/mamba/model.py @@ -21,7 +21,7 @@ from ..._utils import str_dtype_to_trt from ...functional import (Tensor, arange, cast, concat, expand, gather_last_token_logits, shape, unsqueeze) -from ...layers import Embedding, LayerNorm, Linear, Mamba, Mamba2, RmsNorm +from ...layers import ColumnLinear, Embedding, LayerNorm, Mamba, Mamba2, RmsNorm from ...module import Module, ModuleList from ...plugin import current_all_reduce_helper from ..generation_mixin import GenerationMixin @@ -38,6 +38,7 @@ def __init__(self, config: PretrainedConfig, layer_idx: int): self.last_layer = layer_idx == n_layer - 1 if config.mamba_version == 'Mamba1': + assert config.mapping.tp_size == 1, "Mamba1 can not support tensor parallelism." self.ssm = Mamba(config.hidden_size, config.rnn_hidden_size, d_state=config.state_size, @@ -54,7 +55,9 @@ def __init__(self, config: PretrainedConfig, layer_idx: int): chunk_size=config.chunk_size, bias=config.use_bias, rmsnorm=config.ssm_rmsnorm, - dtype=config.dtype) + dtype=config.dtype, + tp_group=config.mapping.tp_group, + tp_size=config.mapping.tp_size) if config.rms_norm: self.input_layernorm = RmsNorm(normalized_shape=config.hidden_size, eps=config.norm_epsilon, @@ -105,17 +108,15 @@ class MambaModel(Module): def __init__(self, config: PretrainedConfig): super().__init__() self.d_conv = config.conv_kernel - self.d_inner = config.rnn_hidden_size + self.d_inner = config.rnn_hidden_size // config.mapping.tp_size n_layer = config.num_hidden_layers self.residual_in_fp32 = config.residual_in_fp32 if config.vocab_size % config.pad_vocab_size_multiple != 0: config.vocab_size += config.pad_vocab_size_multiple - ( config.vocab_size % config.pad_vocab_size_multiple) - self.vocab_embedding = Embedding( - config.vocab_size, - config.hidden_size, - dtype=config.dtype, - share_embedding_table=config.share_embedding_table) + self.vocab_embedding = Embedding(config.vocab_size, + config.hidden_size, + dtype=config.dtype) self.layers = ModuleList( [MambaLayer(config, i) for i in range(n_layer)]) if config.rms_norm: @@ -180,10 +181,10 @@ def __init__(self, config: PretrainedConfig): self.config = config self.mamba_version = config.mamba_version - self.d_inner = config.rnn_hidden_size + self.d_inner = config.rnn_hidden_size // config.mapping.tp_size self.d_conv = config.conv_kernel self.d_state = config.state_size - self.conv_dim = config.rnn_conv_dim_size + self.conv_dim = config.rnn_conv_dim_size // config.mapping.tp_size self.gather_context_logits = False if isinstance(logits_dtype, str): @@ -193,11 +194,13 @@ def __init__(self, config: PretrainedConfig): self._logits_dtype = logits_dtype self.backbone = MambaModel(config) - self.lm_head = Linear(config.hidden_size, - config.vocab_size, - bias=False, - dtype=dtype, - gather_output=False) + self.lm_head = ColumnLinear(config.hidden_size, + config.vocab_size, + bias=False, + dtype=dtype, + tp_group=config.mapping.tp_group, + tp_size=config.mapping.tp_size, + gather_output=True) def __post_init__(self): return diff --git a/tensorrt_llm/models/model_weights_loader.py b/tensorrt_llm/models/model_weights_loader.py index a2cca7cd3..3b55f2e30 100644 --- a/tensorrt_llm/models/model_weights_loader.py +++ b/tensorrt_llm/models/model_weights_loader.py @@ -9,8 +9,8 @@ from safetensors import safe_open from tqdm import tqdm -from tensorrt_llm.quantization.layers import (WeightOnlyQuantColumnLinear, - WeightOnlyQuantRowLinear) +from tensorrt_llm.quantization.layers import ( + WeightOnlyGroupwiseQuantColumnLinear, WeightOnlyGroupwiseQuantRowLinear) from .._utils import trt_dtype_to_torch from ..logger import logger @@ -185,6 +185,8 @@ def load_tensor(self, key, tp_size, tp_dim, tp_rank): return tensor[:] else: width = tensor_shape[tp_dim] + if width == 1: + return tensor[:] slice_width = math.ceil(width / tp_size) slice_start = tp_rank * slice_width slice_end = min((tp_rank + 1) * slice_width, width) @@ -196,7 +198,8 @@ def load_tensor(self, key, tp_size, tp_dim, tp_rank): def load(self, tllm_key: str, preprocess: Callable[[int], None] = None, - skip_tp: bool = False): + skip_tp: bool = False, + custom_postprocess_kwargs: dict = {}): """Load tensor from shards This function contains following steps: @@ -226,10 +229,10 @@ def load(self, tllm_to_externel_key_dict = sub_module.tllm_to_externel_key_dict if hasattr( sub_module, "tllm_to_externel_key_dict") else {} tp_dim = sub_module.tp_dim if hasattr(sub_module, "tp_dim") else -1 - require_weight_transpose = (isinstance( - sub_module, WeightOnlyQuantColumnLinear) or isinstance( - sub_module, - WeightOnlyQuantRowLinear)) and tllm_key.endswith("weight") + require_weight_transpose = ( + isinstance(sub_module, WeightOnlyGroupwiseQuantColumnLinear) + or isinstance(sub_module, WeightOnlyGroupwiseQuantRowLinear) + ) and tllm_key.endswith("weight") if tp_dim >= 0 and require_weight_transpose: tp_dim = 1 - tp_dim tp_size = sub_module.tp_size if hasattr(sub_module, "tp_size") else 1 @@ -260,7 +263,9 @@ def load(self, else: weight_dict = {tllm_key: v.to(trt_dtype_to_torch(param.dtype))} else: - v = sub_module.postprocess(tllm_key, v) + postprocess_kwargs = {"config": self.model.config} + postprocess_kwargs.update(custom_postprocess_kwargs) + v = sub_module.postprocess(tllm_key, v, **postprocess_kwargs) if isinstance(v, dict): weight_dict = v else: @@ -290,6 +295,9 @@ def check(self, weights): continue w_shape = weights[tllm_key].shape if w_shape != param.shape: + logger.warning( + f'{tllm_key} has invalid shape {w_shape}. Expected {param.shape}.' + ) pad = torch.nn.functional.pad pad_dim = [] for dim in range(weights[tllm_key].dim()): @@ -298,11 +306,12 @@ def check(self, weights): pad_dim.append( max(0, param.shape[current_dim] - w_shape[current_dim])) try: + logger.warning( + f'{tllm_key} is going to be padded by {pad_dim}.') weights[tllm_key] = pad(weights[tllm_key], tuple(pad_dim), value=0) assert weights[tllm_key].shape == param.shape - logger.warning(f'Parameter {tllm_key} is auto padded.') except: raise ValueError( f'Parameter {tllm_key} has invalid shape {weights[tllm_key].shape} compared with expected shape {param.shape}. Auto padding failed.' diff --git a/tensorrt_llm/models/modeling_utils.py b/tensorrt_llm/models/modeling_utils.py index 9b78d12ae..70f7a98db 100644 --- a/tensorrt_llm/models/modeling_utils.py +++ b/tensorrt_llm/models/modeling_utils.py @@ -650,7 +650,6 @@ def prepare_inputs( model_inputs['lora_ranks'], model_inputs['lora_weights_pointers'], host_context_lengths=model_inputs['host_context_lengths'], - max_num_tokens=max_num_tokens, host_request_types=model_inputs['host_request_types']) if model_inputs['spec_decoding_params'] is not None: result['spec_decoding_params'] = model_inputs[ @@ -823,6 +822,14 @@ def fuse_gate_mlp( for name, mlp, layer in model.named_modules_with_parent(): if isinstance(mlp, GatedMLP): init_params = get_init_params(mlp) + + hidden_act = init_params["hidden_act"] + if hidden_act not in ["silu", "gelu"]: + logger.warning( + f"fuse_gate_mlp cannot be done for {name} due to unsupported activation {hidden_act}. Skipping." + ) + continue + init_params["inner_layernorm"] = mlp.inner_layernorm is not None fused_layer = FusedGatedMLP(**init_params) diff --git a/tensorrt_llm/models/opt/model.py b/tensorrt_llm/models/opt/model.py index f66bdf11e..45fc5d08b 100644 --- a/tensorrt_llm/models/opt/model.py +++ b/tensorrt_llm/models/opt/model.py @@ -112,11 +112,9 @@ def __init__(self, config: PretrainedConfig): super().__init__() self.do_layer_norm_before = config.do_layer_norm_before - self.vocab_embedding = Embedding( - config.vocab_size, - config.hidden_size, - dtype=config.dtype, - share_embedding_table=config.share_embedding_table) + self.vocab_embedding = Embedding(config.vocab_size, + config.hidden_size, + dtype=config.dtype) self.position_embedding = Embedding(config.max_position_embeddings, config.hidden_size, dtype=config.dtype) diff --git a/tensorrt_llm/models/phi/model.py b/tensorrt_llm/models/phi/model.py index 5cacb835f..71f64a640 100644 --- a/tensorrt_llm/models/phi/model.py +++ b/tensorrt_llm/models/phi/model.py @@ -144,6 +144,8 @@ def forward( class PhiForCausalLM(DecoderModelForCausalLM): config_class = PhiConfig + config_class = PhiConfig + def __init__(self, config: PretrainedConfig): self.check_config(config) transformer = PhiModel(config) diff --git a/tensorrt_llm/models/qwen/convert.py b/tensorrt_llm/models/qwen/convert.py index 124313f1a..d78c71a6e 100644 --- a/tensorrt_llm/models/qwen/convert.py +++ b/tensorrt_llm/models/qwen/convert.py @@ -1086,7 +1086,7 @@ def convert_hf_qwen(hf_model, if mapping.is_last_pp_rank(): if hf_model.config.tie_word_embeddings: # lm_head.weight has the same weights as embedding - lm_head_weights = v + lm_head_weights = v.clone() else: lm_head_weights = get_weight(model_params, 'lm_head', dtype) diff --git a/tensorrt_llm/models/recurrentgemma/model.py b/tensorrt_llm/models/recurrentgemma/model.py index 856cf90d1..d555fc5c3 100644 --- a/tensorrt_llm/models/recurrentgemma/model.py +++ b/tensorrt_llm/models/recurrentgemma/model.py @@ -149,11 +149,9 @@ def __init__(self, config: PretrainedConfig) -> None: self.lru_width = config.rnn_hidden_size n_layer = config.num_hidden_layers - self.vocab_embedding = Embedding( - config.vocab_size, - config.hidden_size, - dtype=config.dtype, - share_embedding_table=config.share_embedding_table) + self.vocab_embedding = Embedding(config.vocab_size, + config.hidden_size, + dtype=config.dtype) self.layers = ModuleList( [ResidualLayer(config, layer_idx=i) for i in range(n_layer)]) diff --git a/tensorrt_llm/plugin/plugin.py b/tensorrt_llm/plugin/plugin.py index 15333bd1a..5b24dd85f 100644 --- a/tensorrt_llm/plugin/plugin.py +++ b/tensorrt_llm/plugin/plugin.py @@ -23,7 +23,7 @@ import tensorrt as trt -from .._ipc_utils import IpcMemory +from .._ipc_utils import IpcMemory, can_access_peer from ..logger import logger from ..mapping import Mapping @@ -171,6 +171,7 @@ class PluginConfig(metaclass=PluginConfigMeta): _paged_state: bool = field(default=True, init=False) _streamingllm: bool = field(default=False, init=False) _manage_weights: bool = field(default=False, init=False) + _use_fused_mlp: bool = field(default=True, init=False) def update_from_dict(self, config: dict): for name in config.keys(): @@ -297,6 +298,7 @@ def set_nccl_plugin(self, dtype: str = "auto"): "paged_state", "streamingllm", "reduce_fusion", + "use_fused_mlp", ] @@ -378,10 +380,9 @@ def max_workspace_size_auto(tp_size: int) -> int: @staticmethod def allocate_workspace(mapping: Mapping, - size: int, - is_p2p_supported: bool = True - ) -> Tuple[List[IpcMemory], "torch.tensor"]: + size: int) -> Tuple[List[IpcMemory], "torch.tensor"]: import torch + is_p2p_supported = can_access_peer(mapping) ipc_buffers_ping = IpcMemory(mapping, size * mapping.tp_size, is_p2p_supported) ipc_buffers_pong = IpcMemory(mapping, size * mapping.tp_size, diff --git a/tensorrt_llm/quantization/functional.py b/tensorrt_llm/quantization/functional.py index fe32b5525..4b781362c 100644 --- a/tensorrt_llm/quantization/functional.py +++ b/tensorrt_llm/quantization/functional.py @@ -655,3 +655,34 @@ def postprocess_weight_only(tllm_key, weights, quant_mode): } else: return {tllm_key: weights} # Bias + + +def postprocess_fp8_rowwise(tllm_key, weights, **kwargs): + if tllm_key.endswith("per_channel_scale"): + return {} + + config = kwargs.get("config", None) + if weights[1] is not None: + assert weights[0].dtype == torch.float8_e4m3fn + scale = weights[1].to(torch.float32).reshape(-1) + return { + tllm_key: weights[0], + tllm_key.replace("weight", "per_channel_scale"): scale + } + else: + clamp_val = config.quantization.clamp_val + # activation range bound. + x = weights[0].to(torch.float32).clamp(clamp_val[0], clamp_val[1]) + xmax = x.abs().max(-1, keepdim=True).values + # minimum scaling factor. + torch_weight_scales = (xmax / 448.0).clamp(min=1.0 / (448.0 * 512.0)) + out = x / torch_weight_scales + torch_weight_scales = torch_weight_scales.reshape(-1) + out = torch.clamp(out, -448, 448) + processed_torch_weights = out.to(torch.float8_e4m3fn) + processed_torch_weights = processed_torch_weights.to( + torch.float8_e4m3fn) + return { + tllm_key: processed_torch_weights, + tllm_key.replace("weight", "per_channel_scale"): torch_weight_scales + } diff --git a/tensorrt_llm/quantization/layers.py b/tensorrt_llm/quantization/layers.py index 042590103..caf8604b5 100644 --- a/tensorrt_llm/quantization/layers.py +++ b/tensorrt_llm/quantization/layers.py @@ -33,13 +33,15 @@ from ..layers.linear import Linear, RowLinear from ..module import Module from ..parameter import Parameter -from .functional import (change_qkv_leading_dim, dequantize, fp8_rowwise_gemm, - fp8_rowwise_rms_norm, postprocess_weight_only, - quantize, quantize_fp8_per_token, quantize_per_token, - quantize_tensor, smooth_quant_gemm, - smooth_quant_layer_norm, smooth_quant_rms_norm, - weight_only_groupwise_quant_matmul, - weight_only_quant_matmul) + +# isort: off +from .functional import ( + change_qkv_leading_dim, dequantize, fp8_rowwise_gemm, fp8_rowwise_rms_norm, + postprocess_fp8_rowwise, postprocess_weight_only, quantize, + quantize_fp8_per_token, quantize_per_token, quantize_tensor, + smooth_quant_gemm, smooth_quant_layer_norm, smooth_quant_rms_norm, + weight_only_groupwise_quant_matmul, weight_only_quant_matmul) +# isort: on from .mode import QuantMode @@ -443,6 +445,7 @@ def __init__(self, dtype="float32") self.quant_mode = quant_mode + self.tllm_to_externel_key_dict = {"weight": ["weight", "weight_scale"]} def forward(self, x, lora_runtime_params=None): assert lora_runtime_params is None, "lora is not supported on SmoothQuantLinear now" @@ -461,6 +464,9 @@ def forward(self, x, lora_runtime_params=None): return x + def postprocess(self, tllm_key, weights, **kwargs): + return postprocess_fp8_rowwise(tllm_key, weights, **kwargs) + Fp8RowwiseColumnLinear = Fp8RowwiseLinear @@ -500,6 +506,7 @@ def __init__( dtype="float32") self.quant_mode = quant_mode + self.tllm_to_externel_key_dict = {"weight": ["weight", "weight_scale"]} def forward(self, x, lora_runtime_params=None, reduce_fusion_params=None): assert lora_runtime_params is None, "lora is not supported on SmoothQuantRowLinear now" @@ -529,6 +536,9 @@ def forward(self, x, lora_runtime_params=None, reduce_fusion_params=None): return x + def postprocess(self, tllm_key, weights, **kwargs): + return postprocess_fp8_rowwise(tllm_key, weights, **kwargs) + class WeightOnlyQuantLinear(Linear): @@ -600,16 +610,10 @@ def forward(self, x, lora_runtime_params=None): return x - def postprocess(self, - tllm_key, - weights, - using_head_as_leading_dim=False, - num_heads=-1): + def postprocess(self, tllm_key, weights, **kwargs): if "per_channel_scale" in tllm_key: return {} - weights = super().postprocess(tllm_key, weights, - using_head_as_leading_dim, - num_heads)[tllm_key] + weights = super().postprocess(tllm_key, weights, **kwargs)[tllm_key] weights = weights.to(str_dtype_to_torch(self.dtype)) return postprocess_weight_only(tllm_key, weights, self.weight_only_quant_mode) @@ -681,7 +685,7 @@ def forward(self, x, lora_runtime_params=None, reduce_fusion_params=None): return x - def postprocess(self, tllm_key, weights): + def postprocess(self, tllm_key, weights, **kwargs): if "per_channel_scale" in tllm_key: return {} weights = weights.to(str_dtype_to_torch(self.dtype)) @@ -749,16 +753,17 @@ def unpack_int32_into_int8(w_packed): def pad_like(w, target_shape, value=0): - if w.shape == target_shape: - return w + if w.shape != target_shape: + pad_dim = [] + for dim in range(len(target_shape)): + current_dim = -1 - dim + pad_dim.append(0) + pad_dim.append( + max(0, target_shape[current_dim] - w.shape[current_dim])) + res = F.pad(w, pad_dim, value=value) + return res else: - if w.dim() == 1: - return F.pad(w, (0, max(0, target_shape[-1] - w.shape[-1])), - value=value) - else: - return F.pad(w, (0, max(0, target_shape[-1] - w.shape[-1]), 0, - max(0, target_shape[-2] - w.shape[-2])), - value=value) + return w class WeightOnlyGroupwiseQuantLinear(Linear): @@ -811,7 +816,6 @@ def __init__( scale_shape = (self.in_features // group_size, self.out_features) self.weights_scaling_factor = Parameter(shape=scale_shape, dtype=dtype) - self.transposed_weight = True self.tp_rank = tp_rank if self.is_padded: self.tp_dim = -1 @@ -866,11 +870,11 @@ def forward(self, x, lora_runtime_params=None): return x - def postprocess(self, - tllm_key, - weights, - using_head_as_leading_dim=False, - num_heads=-1): + def postprocess(self, tllm_key, weights, **kwargs): + using_head_as_leading_dim = kwargs.get("using_head_as_leading_dim", + False) + config = kwargs.get("config", None) + num_heads = config.num_heads if not (tllm_key.endswith("bias") or tllm_key.endswith("weight")): return {} if self.is_qkv and type(weights) is list and len(weights) > 3: @@ -921,7 +925,7 @@ def postprocess(self, scales_fp16 = pad_like(scales_fp16, self.weights_scaling_factor.shape, 1) qzeros_unpacked_int32 = pad_like(qzeros_unpacked_int32, - self.zero.shape) + self.zero.shape, 7) zeros_x_scales_fp16 = (-qzeros_unpacked_int32 + 7) * scales_fp16 zeros_x_scales_fp16 = zeros_x_scales_fp16.to( str_dtype_to_torch(self.dtype)) @@ -956,14 +960,6 @@ def __init__( ): multiple = max((128 if use_w4a8_awq else 64), group_size) * tp_size self.is_padded = False - if in_features % multiple > 0: - in_features = math.ceil(in_features / multiple) * multiple - self.is_padded = True - if out_features % multiple > 0: - out_features = math.ceil(out_features / multiple) * multiple - self.is_padded = True - multiple = max((128 if use_w4a8_awq else 64), group_size) * tp_size - self.is_padded = False if in_features % multiple > 0: in_features = math.ceil(in_features / multiple) * multiple self.is_padded = True @@ -991,11 +987,6 @@ def __init__( scale_shape = (self.in_features // group_size, self.out_features) self.weights_scaling_factor = Parameter(shape=scale_shape, dtype=dtype) - self.transposed_weight = True - self.tp_rank = tp_rank - if self.is_padded: - self.tp_dim = -1 - self.transposed_weight = True self.tp_rank = tp_rank if self.is_padded: self.tp_dim = -1 @@ -1051,7 +1042,7 @@ def forward(self, x, lora_runtime_params=None, reduce_fusion_params=None): return x - def postprocess(self, tllm_key, weights): + def postprocess(self, tllm_key, weights, **kwargs): if not (tllm_key.endswith("bias") or tllm_key.endswith("weight")): return {} if tllm_key.endswith("bias"): @@ -1088,7 +1079,7 @@ def postprocess(self, tllm_key, weights): scales_fp16 = pad_like(scales_fp16, self.weights_scaling_factor.shape, 1) qzeros_unpacked_int32 = pad_like(qzeros_unpacked_int32, - self.zero.shape) + self.zero.shape, 7) zeros_x_scales_fp16 = (-qzeros_unpacked_int32 + 7) * scales_fp16 zeros_x_scales_fp16 = zeros_x_scales_fp16.to( str_dtype_to_torch(self.dtype)) @@ -1368,13 +1359,13 @@ def forward(self, x, lora_runtime_params=None): lora_runtime_params=lora_runtime_params, lora_hidden_state=lora_hidden_state) - def postprocess(self, tllm_key, weights): + def postprocess(self, tllm_key, weights, **kwargs): # TODO: add FP8 modelopt format support if self.is_qkv: if tllm_key.endswith("scaling_factor"): return 448.0 / max(weights).unsqueeze(0) else: - return super().postprocess(tllm_key, weights) + return super().postprocess(tllm_key, weights, **kwargs) if tllm_key.endswith("scaling_factor"): return 448.0 / weights.unsqueeze(0) else: @@ -1463,7 +1454,7 @@ def forward(self, x, lora_runtime_params=None, reduce_fusion_params=None): reduce_fusion_params=reduce_fusion_params) return ret - def postprocess(self, tllm_key, weights): + def postprocess(self, tllm_key, weights, **kwargs): # TODO: add FP8 modelopt format support if tllm_key.endswith("scaling_factor"): return 448.0 / weights.unsqueeze(0) diff --git a/tensorrt_llm/runtime/enc_dec_model_runner.py b/tensorrt_llm/runtime/enc_dec_model_runner.py index b69634728..d61b735a7 100644 --- a/tensorrt_llm/runtime/enc_dec_model_runner.py +++ b/tensorrt_llm/runtime/enc_dec_model_runner.py @@ -7,7 +7,6 @@ import tensorrt as trt from ..logger import logger -from .._ipc_utils import set_peer_access from .._utils import torch_to_numpy, trt_dtype_to_torch, mpi_world_size, mpi_rank from ..plugin.plugin import CustomAllReduceHelper from .generation import ModelConfig, SamplingConfig, LoraManager, GenerationSession @@ -329,11 +328,10 @@ def encoder_run(self, device=self.device).contiguous() if self.encoder_runtime_mapping.tp_size > 1: - is_p2p_supported = set_peer_access(self.encoder_runtime_mapping) ipc_buffers, all_reduce_workspace = CustomAllReduceHelper.allocate_workspace( self.encoder_runtime_mapping, CustomAllReduceHelper.max_workspace_size_auto( - self.encoder_runtime_mapping.tp_size), is_p2p_supported) + self.encoder_runtime_mapping.tp_size)) inputs['all_reduce_workspace'] = all_reduce_workspace if self.encoder_model_config.lora_plugin: diff --git a/tensorrt_llm/runtime/generation.py b/tensorrt_llm/runtime/generation.py index 1fd563cfb..9e8c46c70 100755 --- a/tensorrt_llm/runtime/generation.py +++ b/tensorrt_llm/runtime/generation.py @@ -32,7 +32,6 @@ from tensorrt_llm.runtime.redrafter_utils import * -from .._ipc_utils import set_peer_access from .._utils import (pad_vocab_size, str_dtype_to_torch, torch_to_numpy, trt_dtype_to_torch) from ..bindings import KVCacheType @@ -518,6 +517,7 @@ class ModelConfig: # ReDrafter redrafter_num_beams: int = 0 redrafter_draft_len_per_beam: int = 0 + num_kv_heads_per_layer: Optional[List[int]] = None @dataclass @@ -736,10 +736,12 @@ def __init__(self, self.first_layer:self.last_layer] self.attn_to_general_idx = {} + self.general_to_attn_idx = {} attn_layer_idx = 0 for i in range(self.first_layer, self.last_layer): if self.layer_types[i] == 'attention': self.attn_to_general_idx[attn_layer_idx] = i + self.general_to_attn_idx[i] = attn_layer_idx attn_layer_idx += 1 # Cyclic KV cache buffer names. @@ -773,11 +775,10 @@ def __init__(self, self.mapping.pp_size, self.decoder_logits_dtype) if self.mapping.tp_size > 1: - is_p2p_supported = set_peer_access(self.mapping) self.ipc_buffers, self.all_reduce_workspace = CustomAllReduceHelper.allocate_workspace( self.mapping, CustomAllReduceHelper.max_workspace_size_auto( - self.mapping.tp_size), is_p2p_supported) + self.mapping.tp_size)) self.gather_tree = torch.ops.tensorrt_llm.gather_tree @@ -997,8 +998,17 @@ def tokens_per_block(self): def remove_input_padding(self): return self._model_config.remove_input_padding - @property - def num_heads_kv(self): + def get_num_heads_kv(self, layer_idx: Optional[int] = None) -> int: + if layer_idx is None or self._model_config.num_kv_heads_per_layer is None: + return self._model_config.num_kv_heads + + if self._model_config.layer_types: + assert self._model_config.layer_types[ + layer_idx] == "attention", f"Layer {layer_idx} is not an attention layer" + + if self._model_config.num_kv_heads_per_layer: + return self._model_config.num_kv_heads_per_layer[layer_idx] + return self._model_config.num_kv_heads @property @@ -1690,7 +1700,7 @@ def setup(self, num_blocks, self.num_attn_layers, 2, - self.num_heads_kv, + self.get_num_heads_kv(), self.tokens_per_block, self.head_size, ) @@ -1706,7 +1716,7 @@ def setup(self, cross_num_blocks, self.num_layers, 2, - self.num_heads_kv, + self.get_num_heads_kv(), self.tokens_per_block, self.head_size, ) @@ -1714,15 +1724,15 @@ def setup(self, dtype=kv_cache_type, device=self.device) elif self.has_attn_layers: - cache_shape = ( - batch_size, - 2, - self.num_heads_kv, - self.max_attention_window_size, - self.head_size, - ) for i in range(self.first_layer, self.last_layer): if self.layer_types[i] == 'attention': + cache_shape = ( + batch_size, + 2, + self.get_num_heads_kv(self.general_to_attn_idx[i]), + self.max_attention_window_size, + self.head_size, + ) self.buffer[f'present_key_value_{i}'] = torch.empty( cache_shape, dtype=kv_cache_type, @@ -1732,7 +1742,7 @@ def setup(self, cross_cache_shape = ( batch_size, 2, - self.num_heads_kv, + self.get_num_heads_kv(), self.encoder_max_input_length, self.head_size, ) @@ -1894,7 +1904,7 @@ def add_tensor_with_bs(x, name, bs): if self.cross_qkv_reuse is None: # see Attention's self.qkv output dim cross_qkv_out_dim = self.num_heads * self.head_size + ( - 2 * self.num_heads_kv * self.head_size) + 2 * self.get_num_heads_kv() * self.head_size) cross_qkv_shape = encoder_output.shape[:-1] + ( cross_qkv_out_dim, ) cross_qkv_reuse = torch.empty(cross_qkv_shape, @@ -1980,7 +1990,9 @@ def add_tensor_with_bs(x, name, bs): for idx in range(self.first_layer, self.last_layer): if not self.use_gpt_attention_plugin and self.layer_types[ idx] == 'attention': - kv_cache_shape = (batch_size, 2, self.num_heads_kv, 0, + kv_cache_shape = (batch_size, 2, + self.get_num_heads_kv( + self.general_to_attn_idx[idx]), 0, self.head_size) # for empty tensor, TRT does not really use the tensor data, so any dtype is fine kv_cache_buffer = torch.zeros((1, ), @@ -1994,7 +2006,7 @@ def add_tensor_with_bs(x, name, bs): if self.cross_attention: cross_kv_cache_shape = (batch_size, 2, - self.num_heads_kv, 0, + self.get_num_heads_kv(), 0, self.head_size) # for empty tensor, TRT does not really use the tensor data, so any dtype is fine cross_kv_cache_buffer = torch.zeros((1, ), @@ -2269,7 +2281,8 @@ def add_tensor_with_shape(x, name, shape): if not self.paged_kv_cache: for attn_idx, layer_idx in self.attn_to_general_idx.items(): if not self.use_gpt_attention_plugin: - next_shape = (batch_size * beam_width, 2, self.num_heads_kv, + next_shape = (batch_size * beam_width, 2, + self.get_num_heads_kv(), max_context_length + step, self.head_size) # We will make current layer's output KV-cache overwrite previous layers input KV-cache # buffer id: ... 5, 6, 7, 8, 9, ... @@ -2785,7 +2798,7 @@ def reorder_kv_cache_for_beam_search( assert self.buffer is not None assert self.parent_ids.shape[:2] == (batch_size, beam_width) - cache_shape = (batch_size * beam_width, 2, self.num_heads_kv, + cache_shape = (batch_size * beam_width, 2, self.get_num_heads_kv(), max_context_length + step, self.head_size) import functools @@ -3738,7 +3751,8 @@ def decode(self, self.buffer[f'host_kv_cache_pool_pointers'] = torch.tensor( [self.kv_cache_pool.data_ptr(), 0], dtype=torch.int64) - block_size = self.num_heads_kv * self.tokens_per_block * self.head_size + block_size = self.get_num_heads_kv( + ) * self.tokens_per_block * self.head_size self.kv_cache_manager = KVCacheManager( num_layers=self.num_attn_layers, num_blocks=num_blocks, @@ -3760,7 +3774,8 @@ def decode(self, [self.cross_kv_cache_pool.data_ptr(), 0], dtype=torch.int64) - cross_block_size = self.num_heads_kv * self.tokens_per_block * self.head_size + cross_block_size = self.get_num_heads_kv( + ) * self.tokens_per_block * self.head_size self.cross_kv_cache_manager = KVCacheManager( num_layers=self.num_layers, num_blocks=cross_num_blocks, @@ -3802,7 +3817,7 @@ def decode(self, if self.paged_kv_cache: self.kv_cache_updater.init_paged_kv_cache( - self.num_layers, self.num_heads_kv, self.head_size, + self.num_layers, self.get_num_heads_kv(), self.head_size, kv_cache_type, self.kv_cache_manager, self.buffer[f'host_kv_cache_pool_pointers']) else: @@ -3811,7 +3826,7 @@ def decode(self, for i in range(self.first_layer, self.last_layer) ] self.kv_cache_updater.init_linear_kv_cache( - self.num_layers, self.num_heads_kv, self.head_size, + self.num_layers, self.get_num_heads_kv(), self.head_size, kv_cache_type, past_key_value_list) stop_words_lens = None diff --git a/tensorrt_llm/runtime/model_runner.py b/tensorrt_llm/runtime/model_runner.py index 717c3f76d..6c381fcf0 100644 --- a/tensorrt_llm/runtime/model_runner.py +++ b/tensorrt_llm/runtime/model_runner.py @@ -465,6 +465,22 @@ def from_engine( 'Build config doesn\'t have kv_cache_type, you might need to rebuild your enigne.' ) + # TODO(oargov): this is a hack, make it prettier! + if hasattr(pretrained_config, "get_layer_num_kv_heads"): + # each layer has a different number of kv heads + attention_layers = [ + layer_idx for layer_idx, layer_type in enumerate( + pretrained_config.layer_types) if layer_type == "attention" + ] if hasattr(pretrained_config, "layer_types") else list( + range(pretrained_config.num_hidden_layers)) + num_kv_heads_per_layer = [ + pretrained_config.get_layer_num_kv_heads(layer_idx) + if layer_idx in attention_layers else 0 + for layer_idx in range(pretrained_config.num_hidden_layers) + ] + else: + num_kv_heads_per_layer = None + model_config = ModelConfig( max_batch_size=build_config.max_batch_size, max_beam_width=build_config.max_beam_width, @@ -498,6 +514,7 @@ def from_engine( pretrained_config, 'num_medusa_heads') else 0, **rnn_configs_kwargs, gpu_weights_percent=gpu_weights_percent, + num_kv_heads_per_layer=num_kv_heads_per_layer, redrafter_num_beams=pretrained_config.redrafter_num_beams if hasattr(pretrained_config, 'redrafter_num_beams') else 0, redrafter_draft_len_per_beam=pretrained_config. diff --git a/tensorrt_llm/runtime/model_runner_cpp.py b/tensorrt_llm/runtime/model_runner_cpp.py index 516306f65..dd63de63a 100644 --- a/tensorrt_llm/runtime/model_runner_cpp.py +++ b/tensorrt_llm/runtime/model_runner_cpp.py @@ -79,6 +79,7 @@ def from_dir( sink_token_length: Optional[int] = None, kv_cache_free_gpu_memory_fraction: Optional[float] = None, medusa_choices: list[list[int]] | None = None, + lookahead_config: list[int] | None = None, debug_mode: bool = False, lora_ckpt_source: str = "hf", gpu_weights_percent: float = 1, @@ -235,6 +236,11 @@ def from_dir( if multi_block_mode is not None: multi_block_mode = False # Medusa doesn't support multi-block mode. + if lookahead_config is not None: + [w, n, g] = lookahead_config + decoding_config.lookahead_decoding_config = trtllm.LookaheadDecodingConfig( + w, n, g) + if max_batch_size is None: max_batch_size = model_config.max_batch_size else: diff --git a/tensorrt_llm/version.py b/tensorrt_llm/version.py index a68d887f7..cef9e1f45 100644 --- a/tensorrt_llm/version.py +++ b/tensorrt_llm/version.py @@ -12,4 +12,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "0.13.0.dev2024081300" +__version__ = "0.13.0.dev2024082000" diff --git a/tests/bindings/test_executor_bindings.py b/tests/bindings/test_executor_bindings.py index 2ff552059..60b70bd5d 100644 --- a/tests/bindings/test_executor_bindings.py +++ b/tests/bindings/test_executor_bindings.py @@ -516,9 +516,16 @@ def validate_results_shapes(result, input_length, max_output_len, assert result.context_logits is None if return_generation_logits: assert len(result.generation_logits.shape) == 3 - assert list(result.generation_logits.shape) == [ - beam_width, max_output_len, vocab_size_padded - ] + if streaming: + assert list(result.generation_logits.shape) == [ + max_output_len, beam_width, vocab_size_padded + ] or list(result.generation_logits.shape) == [ + 1, beam_width, vocab_size_padded + ] + else: + assert list(result.generation_logits.shape) == [ + beam_width, max_output_len, vocab_size_padded + ] def verify_output(beam_tokens, test_data, given_input_lengths): for batch_id, tokens in beam_tokens.items(): @@ -610,6 +617,76 @@ def verify_output(beam_tokens, test_data, given_input_lengths): verify_output(tokens, test_data, given_input_lengths) +@pytest.mark.parametrize("streaming", [False, True]) +@pytest.mark.parametrize("beam_width", [1]) +@skip_pre_ampere # ContextFMHAType with fp32 acc is not supported in pre-ampere architecture +def test_finish_reason(streaming: bool, beam_width: int, model_files, + model_path): + if streaming and beam_width > 1: + pytest.skip("Test does not support streaming with beam search") + executor = trtllm.Executor(model_path, trtllm.ModelType.DECODER_ONLY, + trtllm.ExecutorConfig(beam_width)) + requests = [ + # Finish due to length. + trtllm.Request([1, 2, 3, 4], 5, streaming, + trtllm.SamplingConfig(beam_width)), + # Finish due to end id. + trtllm.Request([1, 2, 3, 4], + 5, + streaming, + trtllm.SamplingConfig(beam_width), + end_id=4), + # Finish due to stop word. + trtllm.Request([1, 2, 3, 4], + 5, + streaming, + trtllm.SamplingConfig(beam_width), + stop_words=[[4, 2]]), + ] + req_ids = executor.enqueue_requests(requests) + req_to_batch_id = {req_ids[i]: i for i in range(len(requests))} + + num_finished = 0 + i = 0 + num_responses = 0 + max_wait_ms = 10000 + while num_finished < len(requests) and i < max_wait_ms: + wait_time = datetime.timedelta(milliseconds=1) + responses = executor.await_responses(wait_time) + for response in responses: + num_responses += 1 + assert not response.has_error( + ), f"Request id {response.request_id} failed with err {response.error_msg}" + result = response.result + num_finished += result.is_final + batch_id = req_to_batch_id[response.request_id] + + # Non final results should have "NOT_FINISHED". Revise this when streaming + beam_width > 1 is enabled. + if not result.is_final: + assert all([ + r == trtllm.FinishReason.NOT_FINISHED + for r in result.finish_reasons + ]) + # Check if finish reason is correct. + elif batch_id == 0: + assert all([ + r == trtllm.FinishReason.LENGTH + for r in result.finish_reasons + ]) + elif batch_id == 1: + assert all([ + r == trtllm.FinishReason.END_ID + for r in result.finish_reasons + ]) + elif batch_id == 2: + assert all([ + r == trtllm.FinishReason.STOP_WORDS + for r in result.finish_reasons + ]) + i += 1 + assert i < max_wait_ms + + @skip_pre_ampere # ContextFMHAType with fp32 acc is not supported in pre-ampere architecture def test_gpt_executor_timed_out(model_files, model_path): beam_width = 1 @@ -784,7 +861,9 @@ def test_request(): "external_draft_tokens_config": trtllm.ExternalDraftTokensConfig([1, 2, 3]), "prompt_tuning_config": trtllm.PromptTuningConfig(torch.ones(100, 64)), - "lora_config": trtllm.LoraConfig(1) + "lora_config": trtllm.LoraConfig(1), + "logits_post_processor_name": "my_logits_pp", + "client_id": 1234 } request = trtllm.Request(**kwargs) for k, v in kwargs.items(): @@ -809,12 +888,16 @@ def test_result(): result.log_probs = [[1.0, 2.0, 3.0]] result.context_logits = torch.ones(3, 100) result.generation_logits = torch.ones(1, 3, 100) + result.encoder_output = torch.ones(1, 1) + result.finish_reasons = [trtllm.FinishReason.LENGTH] assert result.is_final == True assert result.output_token_ids == [[1, 2, 3]] assert result.cum_log_probs == [1.0, 2.0, 3.0] assert result.log_probs == [[1.0, 2.0, 3.0]] assert (result.context_logits == torch.ones(3, 100)).all() assert (result.generation_logits == torch.ones(1, 3, 100)).all() + assert (result.encoder_output == torch.ones(1, 1)).all() + assert result.finish_reasons == [trtllm.FinishReason.LENGTH] def test_response(): @@ -973,6 +1056,24 @@ def test_speculative_decoding_config(): assert config.medusa_choices == [[0, 0], [0, 1]] +def test_logits_post_processor_config(): + config = trtllm.LogitsPostProcessorConfig() + assert config.processor_map == None + assert config.processor_batched == None + assert config.replicate == True + + kwargs = { + "processor_map": { + "test_pp": None + }, + "processor_batched": None, + "replicate": False + } + config = trtllm.LogitsPostProcessorConfig(**kwargs) + for k, v in kwargs.items(): + assert getattr(config, k) == v + + def test_executor_config(): config = trtllm.ExecutorConfig() assert config.max_beam_width == 1 @@ -986,10 +1087,9 @@ def test_executor_config(): assert config.batching_type == trtllm.BatchingType.INFLIGHT assert config.parallel_config is None assert isinstance(config.peft_cache_config, trtllm.PeftCacheConfig) - assert config.logits_post_processor_map is None - assert config.logits_post_processor_batched is None - assert config.replicate_logits_post_processor == True + assert config.logits_post_processor_config is None assert config.decoding_config is None + assert config.debug_config is None kwargs = { "max_beam_width": @@ -1014,11 +1114,16 @@ def test_executor_config(): trtllm.ParallelConfig(), "peft_cache_config": trtllm.PeftCacheConfig(10), - "logits_post_processor_map": {}, - "replicate_logits_post_processor": - False, + "logits_post_processor_config": + trtllm.LogitsPostProcessorConfig(), "decoding_config": trtllm.DecodingConfig(trtllm.DecodingMode.TopKTopP()), + "extended_runtime_perf_knob_config": + trtllm.ExtendedRuntimePerfKnobConfig(multi_block_mode=True), + "debug_config": + trtllm.DebugConfig(dump_input_tensors=True, + dump_output_tensors=True, + debug_tensor_names=["test"]) } config = trtllm.ExecutorConfig(**kwargs) for k, v in kwargs.items(): @@ -1029,6 +1134,10 @@ def test_executor_config(): assert isinstance(config.kv_cache_config, trtllm.KvCacheConfig) assert isinstance(config.parallel_config, trtllm.ParallelConfig) assert isinstance(config.peft_cache_config, trtllm.PeftCacheConfig) + assert config.extended_runtime_perf_knob_config.multi_block_mode == True + assert isinstance(config.debug_config, trtllm.DebugConfig) + assert isinstance(config.logits_post_processor_config, + trtllm.LogitsPostProcessorConfig) def test_parallel_config(): @@ -1103,9 +1212,8 @@ def logits_post_processor(req_id: int, logits: torch.Tensor, # Create executor beam_width = 1 executor_config = trtllm.ExecutorConfig(beam_width) - executor_config.logits_post_processor_map = { - "my_logits_pp": logits_post_processor - } + executor_config.logits_post_processor_config = trtllm.LogitsPostProcessorConfig( + {"my_logits_pp": logits_post_processor}) executor = trtllm.Executor(model_path, trtllm.ModelType.DECODER_ONLY, executor_config) @@ -1162,7 +1270,8 @@ def logits_post_processor_batched( # Create executor beam_width = 1 executor_config = trtllm.ExecutorConfig(beam_width) - executor_config.logits_post_processor_batched = logits_post_processor_batched + executor_config.logits_post_processor_config = trtllm.LogitsPostProcessorConfig( + None, logits_post_processor_batched) executor = trtllm.Executor(model_path, trtllm.ModelType.DECODER_ONLY, executor_config) @@ -1298,18 +1407,99 @@ def test_peft_cache_config_pickle(): assert config.host_cache_size == config_copy.host_cache_size +def test_decoding_config_pickle(): + config = trtllm.DecodingConfig( + decoding_mode=trtllm.DecodingMode.BeamSearch()) + config_copy = pickle.loads(pickle.dumps(config)) + assert config_copy.decoding_mode.isBeamSearch + assert config.lookahead_decoding_config == config_copy.lookahead_decoding_config + assert config.medusa_choices == config_copy.medusa_choices + + +def test_debug_config_pickle(): + config = trtllm.DebugConfig(dump_input_tensors=True, + dump_output_tensors=True, + debug_tensor_names=["test"]) + config_copy = pickle.loads(pickle.dumps(config)) + assert config.dump_input_tensors == config_copy.dump_input_tensors + assert config.dump_output_tensors == config_copy.dump_output_tensors + assert config.debug_tensor_names == config_copy.debug_tensor_names + + +def test_logits_post_processor_config_pickle(): + kwargs = { + "processor_map": { + "test_pp": None + }, + "processor_batched": None, + "replicate": False + } + config = trtllm.LogitsPostProcessorConfig(**kwargs) + config_copy = pickle.loads(pickle.dumps(config)) + for k in kwargs: + assert getattr(config, k) == getattr(config_copy, k) + + def test_executor_config_pickle(): beam_width = 2 config = trtllm.ExecutorConfig(beam_width) - config.scheduler_config = trtllm.SchedulerConfig() - config.kv_cache_config = trtllm.KvCacheConfig() - config.parallel_config = trtllm.ParallelConfig() - config.peft_cache_config = trtllm.PeftCacheConfig(1) + + kwargs = { + "max_beam_width": + 2, + "max_batch_size": + 8, + "max_num_tokens": + 128, + "scheduler_config": + trtllm.SchedulerConfig(trtllm.CapacitySchedulerPolicy.MAX_UTILIZATION), + "kv_cache_config": + trtllm.KvCacheConfig(enable_block_reuse=True), + "enable_chunked_context": + True, + "normalize_log_probs": + False, + "iter_stats_max_iterations": + 100, + "batching_type": + trtllm.BatchingType.STATIC, + "parallel_config": + trtllm.ParallelConfig(), + "peft_cache_config": + trtllm.PeftCacheConfig(10), + "logits_post_processor_config": + trtllm.LogitsPostProcessorConfig(), + "decoding_config": + trtllm.DecodingConfig(trtllm.DecodingMode.TopKTopP()), + "extended_runtime_perf_knob_config": + trtllm.ExtendedRuntimePerfKnobConfig(multi_block_mode=True), + "debug_config": + trtllm.DebugConfig(dump_input_tensors=True, + dump_output_tensors=True, + debug_tensor_names=["test"]) + } + config = trtllm.ExecutorConfig(**kwargs) + for k, v in kwargs.items(): + if "config" not in k: + assert getattr(config, k) == v + pickle.dumps(config) config_copy = pickle.loads(pickle.dumps(config)) assert config.max_beam_width == config_copy.max_beam_width + assert config.max_batch_size == config_copy.max_batch_size + assert config.max_num_tokens == config_copy.max_num_tokens assert config.scheduler_config.capacity_scheduler_policy == config_copy.scheduler_config.capacity_scheduler_policy assert config.kv_cache_config.enable_block_reuse == config_copy.kv_cache_config.enable_block_reuse + assert config.enable_chunked_context == config_copy.enable_chunked_context + assert config.normalize_log_probs == config_copy.normalize_log_probs + assert config.normalize_log_probs == config_copy.normalize_log_probs + assert config.iter_stats_max_iterations == config_copy.iter_stats_max_iterations + assert config.batching_type == config_copy.batching_type + assert config.parallel_config.communication_type == config_copy.parallel_config.communication_type + assert config.peft_cache_config.num_host_module_layer == config_copy.peft_cache_config.num_host_module_layer + assert config_copy.decoding_config.decoding_mode.isTopKandTopP + assert config.extended_runtime_perf_knob_config.multi_block_mode == config_copy.extended_runtime_perf_knob_config.multi_block_mode + assert config.debug_config.dump_input_tensors == config_copy.debug_config.dump_input_tensors def test_return_full_tokens(): diff --git a/tests/bindings/test_gpt_manager.py b/tests/bindings/test_gpt_manager.py index 947eaeff8..a90c53eb8 100644 --- a/tests/bindings/test_gpt_manager.py +++ b/tests/bindings/test_gpt_manager.py @@ -32,7 +32,7 @@ def get_model_spec() -> model_spec.ModelSpec: model_spec_obj = model_spec.ModelSpec( 'input_tokens.npy', _tb.DataType.HALF).use_gpt_plugin().set_kv_cache_type( - model_spec.KVCacheType.PAGED).use_packed_input() + _tb.KVCacheType.PAGED).use_packed_input() get_model_spec.model_spec_obj = model_spec_obj return get_model_spec.model_spec_obj diff --git a/tests/functional/test_moe.py b/tests/functional/test_moe.py index 1e1e7e0a7..c6819f3f1 100644 --- a/tests/functional/test_moe.py +++ b/tests/functional/test_moe.py @@ -16,6 +16,7 @@ import math import unittest from collections import OrderedDict +from itertools import product import numpy as np @@ -32,6 +33,7 @@ from tensorrt_llm import Tensor from tensorrt_llm._utils import (torch_to_numpy, trt_dtype_to_str, trt_dtype_to_torch) +from tensorrt_llm.layers.lora import Lora, LoraParams from tensorrt_llm.layers.moe import MoeConfig, MoeOOTB from tensorrt_llm.models.modeling_utils import QuantConfig from tensorrt_llm.quantization import QuantAlgo, QuantMode @@ -362,6 +364,145 @@ def create_weights(self, num_experts, hidden_size, ffn_hidden_size, bias, self.activation_scaling_factor_1 = None self.activation_scaling_factor_2 = None + def create_lora_weights(self, num_experts, hidden_size, ffn_hidden_size, + dtype, num_reqs, lora_rank): + genfn = torch.randn + + self.lora_rank = lora_rank + + fc1_weight_rescale_1 = math.sqrt(2.0 / lora_rank) + fc1_weight_rescale_2 = math.sqrt(2.0 / ffn_hidden_size) + fc2_weight_rescale_1 = math.sqrt(2.0 / lora_rank) + fc2_weight_rescale_2 = math.sqrt(2.0 / hidden_size) + + self.lora_fc1_weights_1 = (genfn( + (num_experts, lora_rank, hidden_size), + dtype=trt_dtype_to_torch(dtype), + device="cuda", + ) * fc1_weight_rescale_1) + self.lora_fc1_weights_2 = (genfn( + (num_experts, ffn_hidden_size, lora_rank), + dtype=trt_dtype_to_torch(dtype), + device="cuda", + ) * fc1_weight_rescale_2) + + self.lora_fc1_weights_ptrs = torch.tensor( + (self.lora_fc1_weights_1.data_ptr(), + self.lora_fc1_weights_2.data_ptr()), + dtype=torch.int64, + ).repeat(num_reqs, 1) + self.lora_fc1_ranks = torch.tensor((lora_rank, ), + dtype=torch.int32).repeat(num_reqs) + + self.lora_gated_weights_1 = (genfn( + (num_experts, lora_rank, hidden_size), + dtype=trt_dtype_to_torch(dtype), + device="cuda", + ) * fc1_weight_rescale_1) + self.lora_gated_weights_2 = (genfn( + (num_experts, ffn_hidden_size, lora_rank), + dtype=trt_dtype_to_torch(dtype), + device="cuda", + ) * fc1_weight_rescale_2) + + self.lora_gated_weights_ptrs = torch.tensor( + (self.lora_gated_weights_1.data_ptr(), + self.lora_gated_weights_2.data_ptr()), + dtype=torch.int64, + ).repeat(num_reqs, 1) + self.lora_gated_ranks = torch.tensor((lora_rank, ), + dtype=torch.int32).repeat(num_reqs) + + self.lora_fc2_weights_1 = (genfn( + (num_experts, lora_rank, ffn_hidden_size), + dtype=trt_dtype_to_torch(dtype), + device="cuda", + ) * fc2_weight_rescale_1) + self.lora_fc2_weights_2 = (genfn( + (num_experts, hidden_size, lora_rank), + dtype=trt_dtype_to_torch(dtype), + device="cuda", + ) * fc2_weight_rescale_2) + + self.lora_fc2_weights_ptrs = torch.tensor( + (self.lora_fc2_weights_1.data_ptr(), + self.lora_fc2_weights_2.data_ptr()), + dtype=torch.int64, + ).repeat(num_reqs, 1) + self.lora_fc2_ranks = torch.tensor((lora_rank, ), + dtype=torch.int32).repeat(num_reqs) + + def create_lora_params(self, num_reqs): + + moe_h_to_4h_weights_pointers = Tensor( + shape=(num_reqs, 2), + dtype=tensorrt_llm.str_dtype_to_trt("int64"), + name="moe_h_to_4h_weights_pointers", + ) + moe_h_to_4h_lora_ranks = Tensor( + shape=(num_reqs, ), + dtype=tensorrt_llm.str_dtype_to_trt("int32"), + name="moe_h_to_4h_lora_ranks", + ) + moe_4h_to_h_weights_pointers = Tensor( + shape=(num_reqs, 2), + dtype=tensorrt_llm.str_dtype_to_trt("int64"), + name="moe_4h_to_h_weights_pointers", + ) + moe_4h_to_h_lora_ranks = Tensor( + shape=(num_reqs, ), + dtype=tensorrt_llm.str_dtype_to_trt("int32"), + name="moe_4h_to_h_lora_ranks", + ) + moe_gate_weights_pointers = Tensor( + shape=(num_reqs, 2), + dtype=tensorrt_llm.str_dtype_to_trt("int64"), + name="moe_gate_weights_pointers", + ) + moe_gate_lora_ranks = Tensor( + shape=(num_reqs, ), + dtype=tensorrt_llm.str_dtype_to_trt("int32"), + name="moe_gate_lora_ranks", + ) + host_context_lengths = Tensor( + shape=(num_reqs, ), + dtype=tensorrt_llm.str_dtype_to_trt("int32"), + name="host_context_lengths", + ) + host_request_types = Tensor( + shape=(num_reqs, ), + dtype=tensorrt_llm.str_dtype_to_trt("int32"), + name="host_request_types", + ) + + self.lora_params = LoraParams( + lora_ranks=[{ + "moe_h_to_4h_lora_ranks": moe_h_to_4h_lora_ranks, + "moe_4h_to_h_lora_ranks": moe_4h_to_h_lora_ranks, + "moe_gate_lora_ranks": moe_gate_lora_ranks, + "mlp_h_to_4h_lora_ranks": moe_h_to_4h_lora_ranks, + "mlp_4h_to_h_lora_ranks": moe_4h_to_h_lora_ranks, + "mlp_gate_lora_ranks": moe_gate_lora_ranks, + }], + lora_weights_pointers=[{ + "moe_h_to_4h_lora_weights_pointers": + moe_h_to_4h_weights_pointers, + "moe_4h_to_h_lora_weights_pointers": + moe_4h_to_h_weights_pointers, + "moe_gate_lora_weights_pointers": + moe_gate_weights_pointers, + "mlp_h_to_4h_lora_weights_pointers": + moe_h_to_4h_weights_pointers, + "mlp_4h_to_h_lora_weights_pointers": + moe_4h_to_h_weights_pointers, + "mlp_gate_lora_weights_pointers": + moe_gate_weights_pointers, + }], + host_context_lengths=host_context_lengths, + host_request_types=host_request_types, + weight_index=0, + ) + def create_fp8_scaling_factors(self, max_act1, max_act2): self.activation_scaling_factor_1 = torch.tensor([max_act1 ]).float() / 440. @@ -580,10 +721,176 @@ def MLP(network, trt_key): 'int8': 2e-1, 'int4': 2e-1, } - torch.testing.assert_close(outputs['output'], - outputs['mlp_output'], - rtol=tolerances[dtype_str], - atol=tolerances[dtype_str]) + torch.testing.assert_close( + outputs["output"], + outputs["mlp_output"], + rtol=tolerances[dtype_str], + atol=tolerances[dtype_str], + ) + + @parameterized.expand(list( + product(["float16", "bfloat16", "int4", "int8"], ["gelu", "geglu"], + [True], [32, 64])), + name_func=unittest_name_func) + def test_mlp_lora_comparison(self, dtype_str, actfn, use_plugin, lora_rank): + """This test uses one expert and compares the result to a plain MLP""" + skip_bf16_pre_ampere(dtype_str) + + use_int4_weights = dtype_str == "int4" + weight_dtype = (trt.int8 if use_int4_weights else + tensorrt_llm.str_dtype_to_trt(dtype_str)) + + dtype = weight_dtype + quant_mode = QuantMode(0) + hidden_size = 8 + if dtype_str == "int8" or dtype_str == "int4": + dtype = tensorrt_llm.str_dtype_to_trt("float16") + hidden_size = 64 + quant_mode = QuantMode.use_weight_only( + use_int4_weights=use_int4_weights) + + num_sequences = 4 + sequence_lengths = 4 + num_experts = 1 + top_k = 1 + bias = False + ffn_hidden_size = 4 * hidden_size + self.create_weights( + num_experts, + hidden_size, + ffn_hidden_size, + bias, + dtype, + weight_dtype, + is_gated=is_gated_activation(actfn), + ) + + self.create_lora_weights( + num_experts, + hidden_size, + ffn_hidden_size, + dtype, + num_sequences, + lora_rank, + ) + + input_data = gen_uniform_weights( + (num_sequences, sequence_lengths, hidden_size), + dtype=trt_dtype_to_torch(dtype), + ) + + def MLP(network, trt_key, lora_params): + mlp_type = (tensorrt_llm.layers.GatedMLP if + is_gated_activation(actfn) else tensorrt_llm.layers.MLP) + mlp = mlp_type( + hidden_size=hidden_size, + ffn_hidden_size=ffn_hidden_size, + hidden_act=gated2act(actfn), + bias=bias, + quant_mode=quant_mode, + dtype=dtype, + ) + + mlp.fc.lora = Lora( + in_hidden_size=hidden_size, + out_hidden_sizes=[ffn_hidden_size], + max_low_rank=lora_rank, + ) + + mlp.proj.lora = Lora( + in_hidden_size=ffn_hidden_size, + out_hidden_sizes=[hidden_size], + max_low_rank=lora_rank, + ) + + if is_gated_activation(actfn): + mlp.gate.lora = Lora( + in_hidden_size=hidden_size, + out_hidden_sizes=[ffn_hidden_size], + max_low_rank=lora_rank, + ) + # Quantize the weights manually so the results are comparable + fc1_qd = quant_dequant(self.fc1_weights[0].cpu(), quant_mode) + if is_gated_activation(actfn): + # Note that the MLP uses the opposite convention to the GLU paper for naming, + # the gate is the matrix the activations are NOT applied to + gate, fc1_qd = fc1_qd.chunk(2, dim=0) + mlp.gate.weight.value = np.ascontiguousarray( + torch_to_numpy(gate)) + + mlp.fc.weight.value = np.ascontiguousarray(torch_to_numpy(fc1_qd)) + fc2_qd = quant_dequant(self.fc2_weights[0].cpu(), quant_mode) + mlp.proj.weight.value = np.ascontiguousarray(torch_to_numpy(fc2_qd)) + if bias: + fc1_bias = self.fc1_bias[0].cpu() + + if is_gated_activation(actfn): + gate, fc1_bias = fc1_bias.chunk(2, dim=0) + mlp.gate.bias.value = np.ascontiguousarray( + torch_to_numpy(gate)) + + mlp.fc.bias.value = np.ascontiguousarray( + torch_to_numpy(fc1_bias)) + mlp.proj.bias.value = np.ascontiguousarray( + torch_to_numpy(self.fc2_bias[0].cpu())) + + output = mlp(trt_key, lora_params) + output.mark_output("mlp_output", dtype) + + session = self.create_trt_session( + tuple(input_data.shape), + num_experts, + top_k, + hidden_size, + ffn_hidden_size, + actfn, + bias, + dtype, + weight_dtype, + quant_mode, + norm_mode=MoeConfig.ExpertScaleNormalizationMode.NONE, + custom_network=MLP, + use_plugin=use_plugin, + use_lora=True, + ) + + inputs = { + "input_hidden_states": + input_data, + "moe_h_to_4h_weights_pointers": + self.lora_fc1_weights_ptrs, + "moe_h_to_4h_lora_ranks": + self.lora_fc1_ranks, + "moe_4h_to_h_weights_pointers": + self.lora_fc2_weights_ptrs, + "moe_4h_to_h_lora_ranks": + self.lora_fc2_ranks, + "moe_gate_weights_pointers": + self.lora_gated_weights_ptrs, + "moe_gate_lora_ranks": + self.lora_gated_ranks, + "host_context_lengths": + torch.tensor((sequence_lengths, ), + dtype=torch.int32).repeat(num_sequences), + "host_request_types": + torch.tensor((0, ), dtype=torch.int32).repeat(num_sequences), + } + outputs = run_session(session, inputs) + + tolerances = { + "float32": 1e-2, + "float16": (2e-2 if getSMVersion() >= 75 else + 1e-1), # Some issues for geglu on volta + "bfloat16": 1e-1, + "int8": 2e-1, + "int4": 2e-1, + } + torch.testing.assert_close( + outputs["output"], + outputs["mlp_output"], + rtol=tolerances[dtype_str], + atol=tolerances[dtype_str], + ) def set_weight_layer(self, input_weights, @@ -614,21 +921,24 @@ def set_weight_layer(self, moe_weight_wrapper.weight.value = np.ascontiguousarray( torch_to_numpy(input_weights)) - def create_trt_session(self, - input_shape, - num_experts, - top_k, - hidden_size, - ffn_hidden_size, - actfn, - bias, - dtype: trt.DataType, - weight_dtype: trt.DataType, - quant_mode, - norm_mode, - custom_network=None, - use_plugin=True, - max_sizes=None): + def create_trt_session( + self, + input_shape, + num_experts, + top_k, + hidden_size, + ffn_hidden_size, + actfn, + bias, + dtype: trt.DataType, + weight_dtype: trt.DataType, + quant_mode, + norm_mode, + custom_network=None, + use_plugin=True, + max_sizes=None, + use_lora=False, + ): builder = tensorrt_llm.Builder() network = builder.create_network() @@ -649,6 +959,13 @@ def create_trt_session(self, network.plugin_config.moe_plugin = trt_dtype_to_str(dtype) + lora_params = None + if use_lora: + network.plugin_config.lora_plugin = trt_dtype_to_str(dtype) + network.plugin_config.remove_input_padding = False + self.create_lora_params(input_shape[0]) + lora_params = self.lora_params + moe_config = MoeConfig(num_experts=num_experts, top_k=top_k, normalization_mode=norm_mode) @@ -662,6 +979,9 @@ def create_trt_session(self, quant_mode=quant_mode) moe.router.weight.value = torch_to_numpy(self.router_weights.cpu()) + if use_lora: + moe.max_low_rank = self.lora_rank + self.set_weight_layer(self.fc1_weights, moe.fc, quant_mode, self.weight_scaling_factor_1) self.set_weight_layer(self.fc2_weights, moe.proj, quant_mode, @@ -682,7 +1002,10 @@ def create_trt_session(self, moe.proj.bias.value = torch_to_numpy(self.fc2_bias.cpu()) if custom_network: - custom_network(network, trt_key) + if use_lora: + custom_network(network, trt_key, lora_params) + else: + custom_network(network, trt_key) if not use_plugin: quant_config = None @@ -692,9 +1015,8 @@ def create_trt_session(self, kv_cache_quant_algo=QuantAlgo.FP8) moe = moe.to(MoeOOTB, quant_config=quant_config) - output = moe(trt_key) - output.mark_output('output', dtype) - + output = moe(trt_key, lora_layer_params=lora_params) + output.mark_output("output", dtype) # trt run session = create_session(builder, network, diff --git a/tests/functional/test_nccl.py b/tests/functional/test_nccl.py index 0ec927d6e..1abdfbf7d 100644 --- a/tests/functional/test_nccl.py +++ b/tests/functional/test_nccl.py @@ -28,7 +28,6 @@ import tensorrt_llm from tensorrt_llm import Mapping, Tensor -from tensorrt_llm._ipc_utils import peer_access from tensorrt_llm.functional import (AllReduceConfig, AllReduceStrategy, allreduce) from tensorrt_llm.plugin.plugin import current_all_reduce_helper @@ -97,25 +96,24 @@ def test_allreduce(self, dtype: str, strategy: AllReduceStrategy, input = self.reference_tensors[self.rank][:size].to(torch_dtype) inner_loop = 5 - with peer_access(self.mapping): - with tensorrt_llm.net_guard(network): + with tensorrt_llm.net_guard(network): - x = Tensor(name='x', - shape=input.shape, - dtype=tensorrt_llm.str_dtype_to_trt(dtype)) - current_all_reduce_helper().set_workspace_tensor(self.mapping) + x = Tensor(name='x', + shape=input.shape, + dtype=tensorrt_llm.str_dtype_to_trt(dtype)) + current_all_reduce_helper().set_workspace_tensor(self.mapping) - current = x - for i in range(inner_loop): - current = allreduce(current, self.mapping.tp_group, - strategy, config) + current = x + for i in range(inner_loop): + current = allreduce(current, self.mapping.tp_group, strategy, + config) - current.mark_output('output', dtype) + current.mark_output('output', dtype) - # trt run - session = create_session(builder, network, precision=dtype) - inputs = {'x': input, 'all_reduce_workspace': workspace} - outputs = run_session(session, inputs) + # trt run + session = create_session(builder, network, precision=dtype) + inputs = {'x': input, 'all_reduce_workspace': workspace} + outputs = run_session(session, inputs) # compare diff torch.testing.assert_close(outputs['output'], diff --git a/tests/functional/test_reduce_norm.py b/tests/functional/test_reduce_norm.py index c4c13006d..be9dc759c 100644 --- a/tests/functional/test_reduce_norm.py +++ b/tests/functional/test_reduce_norm.py @@ -29,7 +29,6 @@ import tensorrt_llm as tllm from tensorrt_llm import Mapping, Tensor -from tensorrt_llm._ipc_utils import peer_access from tensorrt_llm.functional import (AllReduceConfig, AllReduceFusionOp, AllReduceFusionParams, AllReduceStrategy, allreduce) @@ -105,66 +104,67 @@ def test_allreduce(self, dtype: str, strategy: AllReduceStrategy, input = self.reference_tensors[self.rank][:size].to( torch_dtype).reshape(token_num, hidden_size) - with peer_access(self.mapping): - with tllm.net_guard(net): - network = tllm.default_trtnet() - - x = Tensor(name='x', - shape=input.shape, - dtype=tllm.str_dtype_to_trt(dtype)) - y = Tensor(name='y', - shape=bias.shape, - dtype=tllm.str_dtype_to_trt(dtype)) - z = Tensor(name='z', - shape=residual.shape, - dtype=tllm.str_dtype_to_trt(dtype)) - w = Tensor(name='w', - shape=weight.shape, - dtype=tllm.str_dtype_to_trt(dtype)) - current_all_reduce_helper().set_workspace_tensor(self.mapping) - - current = x - current, z = allreduce( - current, - self.mapping.tp_group, - strategy, - config, - reduce_fusion_params=AllReduceFusionParams( - AllReduceFusionOp.RESIDUAL_RMS_NORM, - bias=y, - residual=z, - norm_weight=w, - eps=eps)) - output = current.trt_tensor - - output.name = 'output' - output.dtype = tllm.str_dtype_to_trt(dtype) - network.mark_output(output) - - build_engine = EngineFromNetwork( - (builder.trt_builder, net.trt_network), - config=CreateConfig( - fp16=(dtype == 'float16'), - bf16=(dtype == 'bfloat16'), - precision_constraints='obey', - )) - - output = torch.zeros_like(input) - - stream = torch.cuda.current_stream() - feed_dict = { - 'x': input, - 'y': bias, - 'z': residual, - 'w': weight, - 'all_reduce_workspace': workspace - } - - session = tllm.runtime.Session.from_engine(build_engine()) - session.run(inputs=feed_dict, - outputs={"output": output}, - stream=stream.cuda_stream) - torch.cuda.synchronize() + with tllm.net_guard(net): + network = tllm.default_trtnet() + + x = Tensor(name='x', + shape=input.shape, + dtype=tllm.str_dtype_to_trt(dtype)) + y = Tensor(name='y', + shape=bias.shape, + dtype=tllm.str_dtype_to_trt(dtype)) + z = Tensor(name='z', + shape=residual.shape, + dtype=tllm.str_dtype_to_trt(dtype)) + w = Tensor(name='w', + shape=weight.shape, + dtype=tllm.str_dtype_to_trt(dtype)) + current_all_reduce_helper().set_workspace_tensor(self.mapping) + + current = x + current, z = allreduce( + current, + self.mapping.tp_group, + strategy, + config, + reduce_fusion_params=AllReduceFusionParams( + AllReduceFusionOp.RESIDUAL_RMS_NORM, + bias=y, + residual=z, + norm_weight=w, + eps=eps), + ) + output = current.trt_tensor + + output.name = 'output' + output.dtype = tllm.str_dtype_to_trt(dtype) + network.mark_output(output) + + build_engine = EngineFromNetwork( + (builder.trt_builder, net.trt_network), + config=CreateConfig( + fp16=(dtype == 'float16'), + bf16=(dtype == 'bfloat16'), + precision_constraints='obey', + ), + ) + + output = torch.zeros_like(input) + + stream = torch.cuda.current_stream() + feed_dict = { + 'x': input, + 'y': bias, + 'z': residual, + 'w': weight, + 'all_reduce_workspace': workspace + } + + session = tllm.runtime.Session.from_engine(build_engine()) + session.run(inputs=feed_dict, + outputs={"output": output}, + stream=stream.cuda_stream) + torch.cuda.synchronize() close = torch.isclose(allreduce_ref, output, rtol=1e-2, atol=1e-3) if not torch.all(close): diff --git a/tests/hlapi/apps/_test_llm_server.py b/tests/hlapi/apps/_test_llm_server.py index 3d72e6a41..2261f6fa1 100644 --- a/tests/hlapi/apps/_test_llm_server.py +++ b/tests/hlapi/apps/_test_llm_server.py @@ -13,14 +13,17 @@ from test_llm import llama_model_path -@pytest.fixture +@pytest.fixture(scope="module") def client(): llm = LLM(llama_model_path) kv_cache_config = KvCacheConfig() app_instance = LlmServer(llm, kv_cache_config) client = TestClient(app_instance.app) - return client + yield client + + del llm + del app_instance.llm def test_generate(client): diff --git a/tests/hlapi/test_llm.py b/tests/hlapi/test_llm.py index c6e9e6cd0..a75b7b1f1 100644 --- a/tests/hlapi/test_llm.py +++ b/tests/hlapi/test_llm.py @@ -534,15 +534,13 @@ def logits_post_processor(req_id: int, logits: torch.Tensor, @force_ampere def test_generate_block_reuse(): - llm = LLM( - model=llama_model_path, - kv_cache_config=KvCacheConfig(free_gpu_memory_fraction=0.4, - enable_block_reuse=True), - ) - - # Check the configurations are correctly set - assert llm.args.build_config.plugin_config.use_paged_context_fmha is True - assert llm.args.build_config.plugin_config.paged_kv_cache is True + build_config = BuildConfig() + build_config.plugin_config._use_paged_context_fmha = True + build_config.plugin_config._paged_kv_cache = True + llm = LLM(model=llama_model_path, + kv_cache_config=KvCacheConfig(free_gpu_memory_fraction=0.4, + enable_block_reuse=True), + build_config=build_config) sampling_params = SamplingParams(max_new_tokens=6) diff --git a/tests/hlapi/test_llm_models.py b/tests/hlapi/test_llm_models.py index b7c74dc17..559629f73 100644 --- a/tests/hlapi/test_llm_models.py +++ b/tests/hlapi/test_llm_models.py @@ -174,10 +174,13 @@ def test_llm_phi_3_mini_4k(): "examples/phi/requirements.txt") command = f"pip install -r {phi_requirement_path}" subprocess.run(command, shell=True, check=True, env=os.environ) - llm_test_harness(phi_3_mini_4k_model_path, - prompts=['A B C'], - references=[' D E F G H I J K L M'], - sampling_params=sampling_params) + phi3_mini_4k_sampling_params = SamplingParams(max_new_tokens=13) + + llm_test_harness( + phi_3_mini_4k_model_path, + prompts=["I am going to Paris, what should I see?"], + references=["\n\nAssistant: Paris is a city rich in history,"], + sampling_params=phi3_mini_4k_sampling_params) @force_ampere @@ -222,7 +225,7 @@ def test_llm_gemma_2b(): sampling_params=sampling_params) -@force_ampere +@pytest.mark.skip(reason="https://nvbugspro.nvidia.com/bug/4800391") def test_llm_gemma_2b_int4weight_only(): quant_config = QuantConfig(quant_algo=QuantAlgo.W4A16) llm_test_harness(gemma_2b_model_path, @@ -232,7 +235,7 @@ def test_llm_gemma_2b_int4weight_only(): quant_config=quant_config) -@force_ampere +@pytest.mark.skip(reason="https://nvbugspro.nvidia.com/bug/4800404") def test_llm_gemma_2_9b_it(): llm_test_harness(gemma_2_9b_it_model_path, prompts=['A B C'], @@ -292,7 +295,7 @@ def test_llm_baichuan2_7b_int4weight_only(): quant_config=quant_config) -@skip_pre_ampere +@pytest.mark.skip(reason="https://nvbugspro.nvidia.com/bug/4800424") def test_llm_qwen(): llm_test_harness(qwen_model_path, prompts=['A B C'], @@ -300,7 +303,7 @@ def test_llm_qwen(): sampling_params=sampling_params) -@skip_pre_ampere +@pytest.mark.skip(reason="https://nvbugspro.nvidia.com/bug/4800424") def test_llm_qwen1_5(): llm_test_harness(qwen1_5_model_path, prompts=['A B C'], diff --git a/tests/hlapi/test_llm_multi_gpu.py b/tests/hlapi/test_llm_multi_gpu.py index 8c86db288..385991877 100644 --- a/tests/hlapi/test_llm_multi_gpu.py +++ b/tests/hlapi/test_llm_multi_gpu.py @@ -202,3 +202,7 @@ def test_llm_multi_node(engine_from_checkpoint: tempfile.TemporaryDirectory): command = f"mpirun --allow-run-as-root -n {nworkers} trtllm-hlapi-launch python3 {test_case_file} --model_dir {engine_from_checkpoint.name} --tp_size {nworkers}" subprocess.run(command, shell=True, check=True, env=os.environ) # nosec B603 + + +if __name__ == '__main__': + test_llm_pp2() diff --git a/tests/hlapi/test_llm_perf_evaluator.py b/tests/hlapi/test_llm_perf_evaluator.py index 6155e05c2..a2422add9 100644 --- a/tests/hlapi/test_llm_perf_evaluator.py +++ b/tests/hlapi/test_llm_perf_evaluator.py @@ -5,7 +5,7 @@ import time from pathlib import Path -from tensorrt_llm.hlapi import KvCacheConfig +from tensorrt_llm.hlapi import BuildConfig, KvCacheConfig from tensorrt_llm.hlapi._perf_evaluator import (LLMPerfEvaluator, MemoryContinuousMonitorThread) @@ -50,12 +50,16 @@ def test_perf_evaluator(): # try to set some flags kvcache_config = KvCacheConfig(enable_block_reuse=True) + build_config = BuildConfig() + build_config.plugin_config._use_paged_context_fmha = True + evaluator = LLMPerfEvaluator.create( model=llama_model_path, num_samples=10, samples_path=samples_path, warmup=10, kv_cache_config=kvcache_config, + build_config=build_config, ) assert evaluator report = evaluator.run() diff --git a/tests/microbenchamarks/README.md b/tests/microbenchamarks/README.md new file mode 100644 index 000000000..276bcb170 --- /dev/null +++ b/tests/microbenchamarks/README.md @@ -0,0 +1,2 @@ +!!! WARNING: This is not intended for external users to benchmark the performance numbers of the TRT-LLM product. +!!! This folder contains the benchmark script used internally to assistant TRT-LLM development. diff --git a/tests/microbenchamarks/build_time_benchmark.py b/tests/microbenchamarks/build_time_benchmark.py new file mode 100644 index 000000000..0ea5026eb --- /dev/null +++ b/tests/microbenchamarks/build_time_benchmark.py @@ -0,0 +1,134 @@ +import argparse +import os +import pathlib +import time + +import tensorrt_llm +from tensorrt_llm import (AutoConfig, AutoModelForCausalLM, BuildConfig, + Mapping, build) + +# model name to the sub dir under the llm-models path +models_name_to_path = { + 'gpt2': ("gpt2", 1, 1), + 'phi2': ('phi-2', 1, 1), + 'llama-7b': ("llama-models/llama-7b-hf", 1, 1), + 'falcon-7b': ("falcon-7b-instruct", 1, 1), + 'gptj-6b': ("gpt-j-6b", 1, 1), + 'llama2-7b': ("llama-models-v2/llama-v2-7b-hf/", 1, 1), + 'llama2-70b.TP4': ("llama-models-v2/llama-v2-70b-hf", 4, 1), + 'mixtral-8x22b.TP4': ("Mixtral-8x22B-v0.1", 4, 1), + 'mixtral-8x7b.TP4': ("Mixtral-8x7B-v0.1", 4, 1), + 'mistral-7b': ("mistral-7b-v0.1", 1, 1) +} + + +def parse_args(): + parser = argparse.ArgumentParser( + description= + "One microbenchmark to measure the engine build time for common models") + + parser.add_argument("--models_root", + type=str, + default=os.environ.get("LLM_MODELS_ROOT"), + help="The llm-models root path") + parser.add_argument("--model", + type=str, + default='gpt2', + choices=list(models_name_to_path.keys()) + ["ALL"], + help="The model subdir under the models_root") + parser.add_argument("--dtype", + type=str, + choices=['auto', 'float32', 'float16', 'bfloat16'], + default='auto', + help="The data type of the fake weights for the model") + parser.add_argument("--verbose", + '-v', + default=False, + action='store_true', + help="Turn on verbose log") + parser.add_argument("--load", + default=False, + action='store_true', + help="Load Hugging Face weights") + parser.add_argument("--opt", + default=3, + type=int, + choices=[0, 1, 2, 3, 4, 5], + help="Builder optimization level") + parser.add_argument("--gemm", + type=str, + default='ootb', + choices=['plugin', 'ootb'], + help="Use plugin or TRT for GEMM") + parser.add_argument("--strong_type", + default=False, + action="store_true", + help="Use strong type") + parser.add_argument("--managed_weights", + default=False, + action="store_true", + help="Turn on TRT-LLM managed weights") + return parser.parse_args() + + +def build_from_hf(args, model_tag, hf_model_dir, dtype, load_weights, tp, pp): + '''Build model and init executor using huggingface model config and fake weights, useful for benchmarking + ''' + world_size = tp * pp + # TODO: Only build 1 rank for now, all the ranks shall have similar build time + # shall we build all ranks in parallel? + mapping = Mapping(world_size=world_size, rank=0, tp_size=tp, pp_size=pp) + + phase_and_time = [] + if load_weights: + start = time.time() + trtllm_model = AutoModelForCausalLM.from_hugging_face( + hf_model_dir, dtype, mapping) + phase_and_time.append(('load_and_convert', time.time() - start)) + + else: # fake weights + trtllm_config = AutoConfig.from_hugging_face(hf_model_dir, dtype, + mapping) + trtllm_model = AutoModelForCausalLM.get_trtllm_model_class( + hf_model_dir)(trtllm_config) + + start = time.time() + build_config = BuildConfig(max_input_len=1024, max_batch_size=16) + + build_config.builder_opt = args.opt + build_config.plugin_config.manage_weights = args.managed_weights + if args.gemm == 'plugin': + build_config.plugin_config.gemm_plugin = 'auto' + else: + assert args.gemm == 'ootb' + build_config.plugin_config.gemm_plugin = None + build.strongly_typed = args.strong_type + + engine = build(trtllm_model, build_config) + assert engine is not None + + phase_and_time.append(('build_engine', time.time() - start)) + for (p, t) in phase_and_time: + tensorrt_llm.logger.info( + f"===BuildTime==== {p} {model_tag} {t} seconds") + + +if __name__ == "__main__": + args = parse_args() + if args.verbose: + tensorrt_llm.logger.set_level('verbose') + else: + tensorrt_llm.logger.set_level('info') + + target_models = args.model + if target_models == "ALL": + target_models = models_name_to_path.keys() + else: + target_models = [target_models] + + for model in target_models: + model_dir, tp, pp = models_name_to_path[model] + model_dir = pathlib.Path(args.models_root) / model_dir + assert model_dir.exists() + build_from_hf(args, model, str(model_dir), args.dtype, args.load, tp, + pp) diff --git a/tests/model/test_decilm.py b/tests/model/test_decilm.py new file mode 100644 index 000000000..083db6671 --- /dev/null +++ b/tests/model/test_decilm.py @@ -0,0 +1,602 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import itertools +import os +import sys +import tempfile +import unittest +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import tensorrt as trt +import torch +import transformers +from parameterized import parameterized + +import tensorrt_llm +from tensorrt_llm import logger +from tensorrt_llm._utils import str_dtype_to_torch +from tensorrt_llm.builder import Builder +from tensorrt_llm.mapping import Mapping +from tensorrt_llm.models.deci.config import DeciConfig, DeciLayerConfig +from tensorrt_llm.models.deci.convert import _ffn_mult_to_intermediate_size +from tensorrt_llm.models.deci.layer_config import (AttentionImplementation, + FFNImplementation) +from tensorrt_llm.models.deci.model import DeciLMForCausalLM +from tensorrt_llm.network import Network, net_guard +from tensorrt_llm.plugin.plugin import ContextFMHAType +from tensorrt_llm.runtime.generation import _Runtime + +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +from utils.llm_data import llm_models_root +from utils.util import unittest_name_func + + +class TestDeciLM(unittest.TestCase): + + def _make_decilm_config(self, + layer_configs: List[Union[DeciLayerConfig, + Dict[str, Dict[str, + Any]]]], + dtype: str = 'bfloat16', + num_attention_heads: int = 32, + num_key_value_heads: Optional[int] = None, + hidden_size: int = 4096, + intermediate_size: int = 16384, + vocab_size: int = 32128, + max_positions_embedding: int = 1024, + norm_epsilon: float = 1e-05) -> DeciConfig: + config = { + 'architecture': 'DeciLMForCausalLM', + 'num_hidden_layers': len(layer_configs), + 'num_attention_heads': num_attention_heads, + 'num_key_value_heads': num_key_value_heads, + 'dtype': dtype, + 'logits_dtype': dtype, + 'hidden_size': hidden_size, + 'intermediate_size': intermediate_size, + 'vocab_size': vocab_size, + 'position_embedding_type': 'rope_gpt_neox', + 'max_position_embeddings': max_positions_embedding, + 'hidden_act': 'silu', + 'norm_epsilon': norm_epsilon, + 'layer_configs': layer_configs + } + + config = DeciConfig.from_dict(config) + return config + + def _gen_tensorrt_llm_network(self, network: Network, + decilm: DeciLMForCausalLM, batch_size: int, + beam_width: int, input_len: int, + output_len: int, rank: int, + tensor_parallel: int, **opt_flags): + list(range(tensor_parallel)) + + with net_guard(network): + # optimize_model(decilm, **opt_flags) + # Prepare + network.set_named_parameters(decilm.named_parameters()) + inputs = decilm.prepare_inputs(max_batch_size=batch_size, + max_input_len=input_len, + max_seq_len=input_len + output_len, + max_num_tokens=batch_size * + input_len, + use_cache=True, + max_beam_width=beam_width) + # Forward + decilm(**inputs) + return network + + def _gen_tensorrt_llm_engine( + self, + rank: int, + world_size: int, + decilm: DeciLMForCausalLM, + model_name: str, + use_plugin: bool, + batch_size: int, + beam_width: int, + input_len: int, + output_len: int, + use_refit: bool, + use_gemm: bool = False, + context_fmha_flag: ContextFMHAType = ContextFMHAType.disabled, + enable_remove_input_padding: bool = False, + **opt_flags) -> trt.IHostMemory: + + builder = Builder() + dtype = decilm.config.dtype + + with tempfile.TemporaryDirectory(): + builder_config = builder.create_builder_config( + name=model_name, + precision=dtype, + timing_cache='model.cache', + tensor_parallel=world_size, # TP only + use_refit=use_refit, + strongly_typed=True, + ) + network = builder.create_network() + network.plugin_config.to_legacy_setting() + if use_plugin: + network.plugin_config.gpt_attention_plugin = dtype + if use_gemm: + network.plugin_config.gemm_plugin = dtype + if enable_remove_input_padding: + network.plugin_config.remove_input_padding = True + network.plugin_config.set_context_fmha(context_fmha_flag) + + self._gen_tensorrt_llm_network(network=network, + decilm=decilm, + batch_size=batch_size, + beam_width=beam_width, + input_len=input_len, + output_len=output_len, + rank=rank, + tensor_parallel=world_size, + **opt_flags) + engine_buffer = builder.build_engine(network, builder_config) + return engine_buffer + + def _gen_tensorrt_llm_runtime( + self, + log_level: str, + world_size: int, + rank: int, + decilm: DeciLMForCausalLM, + model_name: str, + use_plugin: bool, + batch_size: int, + beam_width: int, + input_len: int, + output_len: int, + use_refit: bool, + use_gemm: bool = False, + context_fmha_flag: ContextFMHAType = ContextFMHAType.disabled, + enable_remove_input_padding: bool = False, + **opt_flags) -> Tuple[_Runtime, trt.IHostMemory]: + logger.set_level(log_level) + mapping = Mapping(world_size, rank, tp_size=world_size) + engine_buffer = self._gen_tensorrt_llm_engine( + rank=rank, + world_size=world_size, + decilm=decilm, + model_name=model_name, + use_plugin=use_plugin, + batch_size=batch_size, + beam_width=beam_width, + input_len=input_len, + output_len=output_len, + use_refit=use_refit, + use_gemm=use_gemm, + context_fmha_flag=context_fmha_flag, + enable_remove_input_padding=enable_remove_input_padding, + **opt_flags) + runtime = _Runtime(engine_buffer, mapping) + return runtime, engine_buffer + + def test_config_to_from_dict(self) -> None: + config = self._make_decilm_config(layer_configs=[{ + "attention": { + "num_key_value_heads": 4 + }, + "ffn": {} + }, { + "attention": { + "num_key_value_heads": 2 + }, + "ffn": { + "impl": "no_op" + } + }, { + "attention": { + "impl": "no_op" + }, + "ffn": { + "intermediate_size": 8192 + } + }]) + + config2 = DeciConfig.from_dict(config.to_dict()) + self.assertListEqual(config.layer_configs, config2.layer_configs) + + def test_save_load_config(self) -> None: + config = self._make_decilm_config(layer_configs=[{ + "attention": { + "num_key_value_heads": 4 + }, + "ffn": {} + }, { + "attention": { + "num_key_value_heads": 2 + }, + "ffn": { + "impl": "no_op" + } + }, { + "attention": { + "impl": "no_op" + }, + "ffn": { + "intermediate_size": 8192 + } + }]) + + with tempfile.TemporaryDirectory( + prefix="test_save_load_checkpoint") as ckpt_dir: + config_file = f"{ckpt_dir}/config.json" + config.to_json_file(config_file) + config2 = DeciConfig.from_json_file(config_file) + + self.assertDictEqual(config.to_dict(), config2.to_dict()) + self.assertListEqual(config.layer_configs, config2.layer_configs) + + def get_loader_test_cases(): + model_root = llm_models_root(check=True) + test_models_base_path = Path(model_root, "nvsmall/tests") + + models_path = [ + os.path.join(test_models_base_path, x) + for x in os.listdir(test_models_base_path) + ] + test_cases = list( + itertools.product(models_path, ["bfloat16", "float16"])) + + return test_cases + + @parameterized.expand(get_loader_test_cases, name_func=unittest_name_func) + def test_allclose_to_hf(self, hf_model_dir, dtype): + if hf_model_dir is None: + self.skipTest( + f"Missing nvsmall checkpoint, define a valid checkpoint path with the NVSMALL_CKPT environment variable" + ) + + dtype = tensorrt_llm._utils.str_dtype_to_torch(dtype) + + hf_model = transformers.AutoModelForCausalLM.from_pretrained( + hf_model_dir, trust_remote_code=True, torch_dtype=dtype).cuda() + decilm = DeciLMForCausalLM.from_hugging_face(hf_model) + config = decilm.config + + log_level = "warning" + batch_size = 1 + beam_width = 1 + input_len = 4 + output_len = 2 + max_seq_len = input_len + output_len + dtype = config.dtype + enable_remove_input_padding = False + use_gpt_plugin = True + use_gemm = True + + runtime, engine_buffer = self._gen_tensorrt_llm_runtime( + log_level=log_level, + decilm=decilm, + batch_size=batch_size, + beam_width=beam_width, + input_len=input_len, + output_len=output_len, + rank=0, + world_size=1, + model_name="decilm", + use_gemm=use_gemm, + use_plugin=use_gpt_plugin, + use_refit=False) + + key_value_cache_buffers = [] + head_size = config.hidden_size // config.num_attention_heads + + attn_layer_idx = [ + i for i in range(config.num_hidden_layers) + if config.get_layer_config(i).attention.needs_kv_cache + ] + for layer_idx in attn_layer_idx: + layer_config = config.get_layer_config(layer_idx) + new_cache = torch.zeros(( + batch_size, + 2, + layer_config.attention.num_key_value_heads, + max_seq_len, + head_size, + ), + dtype=str_dtype_to_torch(dtype), + device='cuda') + key_value_cache_buffers.append(new_cache) + + # compare context + ctx_ids = torch.randint(100, (batch_size, input_len)).int().cuda() + ctx_context_lengths = input_len * torch.ones( + (batch_size), dtype=torch.int32, device='cuda') + ctx_position_ids = torch.tensor(range(input_len), + dtype=torch.int32).reshape([ + 1, input_len + ]).expand([batch_size, + input_len]).cuda() + ctx_last_token_ids = ctx_context_lengths.clone() + ctx_host_request_types = torch.tensor([0] * batch_size, + dtype=torch.int32) + + # We need sequence_lengths start as context_lengths for step 0, + # and it will be added one after each step. + sequence_length_buffer = ctx_context_lengths.detach().clone() + + with torch.no_grad(): + hf_outputs = hf_model.forward(ctx_ids, + output_hidden_states=True, + output_attentions=True) + + torch.cuda.synchronize() + ref = hf_outputs.logits[:, -1, :] + + if enable_remove_input_padding: + ctx_ids = ctx_ids.view([batch_size * input_len]) + ctx_position_ids = ctx_position_ids.view([batch_size * input_len]) + ctx_last_token_ids = torch.cumsum(ctx_last_token_ids, dim=0).int() + + cache_indirections = [ + torch.full(( + batch_size, + beam_width, + max_seq_len, + ), + 0, + dtype=torch.int32, + device='cuda'), + torch.full(( + batch_size, + beam_width, + max_seq_len, + ), + 0, + dtype=torch.int32, + device='cuda') + ] # ping-pong buffers + + perf_knob_tensor_size = 16 + # runtime_perf_knobs is not used in context phase + context_runtime_perf_knobs = torch.tensor([-1] * perf_knob_tensor_size, + dtype=torch.int64) + + ctx_buffer = { + 'input_ids': ctx_ids, + 'context_lengths': ctx_context_lengths, + 'position_ids': ctx_position_ids, + 'last_token_ids': ctx_last_token_ids, + 'cache_indirection': cache_indirections[0], + 'host_request_types': ctx_host_request_types, + 'host_runtime_perf_knobs': context_runtime_perf_knobs, + } + if enable_remove_input_padding: + ctx_buffer['host_context_lengths'] = ctx_context_lengths.cpu() + + ctx_shape = {k: v.shape for k, v in ctx_buffer.items()} + + ctx_buffer[f'host_max_attention_window_sizes'] = torch.tensor( + [max_seq_len] * len(attn_layer_idx), dtype=torch.int32) + ctx_shape[f'host_max_attention_window_sizes'] = (len(attn_layer_idx), ) + for layer_idx, buf in zip(attn_layer_idx, key_value_cache_buffers): + layer_config = config.get_layer_config(layer_idx) + kv_shape = (batch_size, 2, + layer_config.attention.num_key_value_heads, max_seq_len, + head_size) + ctx_shape[f'past_key_value_{layer_idx}'] = kv_shape + ctx_buffer[f'past_key_value_{layer_idx}'] = buf + ctx_buffer[f'present_key_value_{layer_idx}'] = buf + + ctx_buffer['sequence_length'] = sequence_length_buffer + ctx_shape['sequence_length'] = ctx_buffer['sequence_length'].shape + ctx_shape['host_past_key_value_lengths'] = (batch_size, ) + ctx_buffer['host_past_key_value_lengths'] = torch.tensor( + [0] * batch_size, dtype=torch.int32) + ctx_shape['host_sink_token_length'] = (1, ) + ctx_buffer['host_sink_token_length'] = torch.tensor([0], + dtype=torch.int32) + + context = runtime.ctx_context + runtime._set_shape(context, ctx_shape) + runtime._set_buffer(context, ctx_buffer) + runtime._run(context) + torch.cuda.synchronize() + + res = ctx_buffer['logits'] + np.testing.assert_allclose(ref.to(torch.float32).cpu().numpy(), + res.to(torch.float32).cpu().numpy(), + atol=0.12) + + # compare generation + step = 1 + step1_id = torch.randint(100, (batch_size, 1)).int().cuda() + gen_context_lengths = ctx_context_lengths.clone() + gen_position_ids = torch.ones_like(step1_id).int().cuda() * input_len + gen_last_token_ids = torch.zeros_like(gen_context_lengths).int().cuda() + gen_host_request_types = torch.tensor([1] * batch_size, + dtype=torch.int32) + gen_runtime_perf_knobs = torch.tensor([-1] * perf_knob_tensor_size, + dtype=torch.int64) + + with torch.no_grad(): + hf_outputs = hf_model.forward( + step1_id, + past_key_values=hf_outputs.past_key_values, + use_cache=True, + output_hidden_states=True) + + torch.cuda.synchronize() + ref = hf_outputs.logits[:, -1, :] + + if enable_remove_input_padding: + step1_id = step1_id.view([batch_size]) + gen_position_ids = gen_position_ids.view([batch_size]) + gen_last_token_ids = torch.ones_like( + gen_context_lengths).int().cuda() + gen_last_token_ids = torch.cumsum(gen_last_token_ids, dim=0).int() + + step1_buffer = { + 'input_ids': step1_id, + 'context_lengths': gen_context_lengths, + 'position_ids': gen_position_ids, + 'last_token_ids': gen_last_token_ids, + 'host_request_types': gen_host_request_types, + 'cache_indirection': cache_indirections[1], + 'host_runtime_perf_knobs': gen_runtime_perf_knobs, + } + if enable_remove_input_padding: + step1_buffer['host_context_lengths'] = gen_context_lengths.cpu() + + step1_shape = {k: v.shape for k, v in step1_buffer.items()} + + sequence_length_buffer = torch.add(sequence_length_buffer, step) + step1_buffer[f'host_max_attention_window_sizes'] = torch.tensor( + [max_seq_len] * len(attn_layer_idx), dtype=torch.int32) + step1_shape[f'host_max_attention_window_sizes'] = ( + len(attn_layer_idx), ) + for layer_idx, buf in zip(attn_layer_idx, key_value_cache_buffers): + layer_config = config.get_layer_config(layer_idx) + kv_shape = (batch_size, 2, + layer_config.attention.num_key_value_heads, max_seq_len, + head_size) + step1_shape[f"past_key_value_{layer_idx}"] = kv_shape + step1_buffer[f"past_key_value_{layer_idx}"] = buf + step1_buffer[f"present_key_value_{layer_idx}"] = buf + + step1_buffer['sequence_length'] = sequence_length_buffer + step1_shape['sequence_length'] = ctx_buffer['sequence_length'].shape + step1_shape['sequence_length'] = (batch_size, ) + step1_shape['host_past_key_value_lengths'] = (batch_size, ) + step1_buffer[ + 'host_past_key_value_lengths'] = sequence_length_buffer.cpu() + step1_shape['host_sink_token_length'] = (1, ) + step1_buffer['host_sink_token_length'] = torch.tensor([0], + dtype=torch.int32) + + context = runtime.context_1 + runtime._set_shape(context, step1_shape) + runtime._set_buffer(context, step1_buffer) + runtime._run(context) + torch.cuda.synchronize() + res = step1_buffer['logits'] + + np.testing.assert_allclose(ref.to(torch.float32).cpu().numpy(), + res.to(torch.float32).cpu().numpy(), + atol=0.12) + + @parameterized.expand( + itertools.product( + (os.getenv("NVSMALL_CKPT"), ), # "deci/decilm-7b"), + (True, False), + (1, 2), + (1, 2), + ("auto", "float16", "bfloat16"))) + def test_convert_config_from_hf(self, ckpt_path: Optional[str], + preloaded: bool, tp_size: int, pp_size: int, + dtype: str) -> None: + if ckpt_path is None: + self.skipTest( + f"Missing nvsmall checkpoint, define a valid checkpoint path with the NVSMALL_CKPT environment variable" + ) + + hf_config = transformers.AutoConfig.from_pretrained( + ckpt_path, trust_remote_code=True) + + mapping = Mapping(world_size=(tp_size * pp_size), + rank=0, + gpus_per_node=1, + tp_size=tp_size, + pp_size=pp_size) + + config = DeciConfig.from_hugging_face( + hf_config if preloaded else ckpt_path, + dtype=dtype, + mapping=mapping, + trust_remote_code=not preloaded) + + if getattr(hf_config, "num_key_value_heads_per_layer", + None) is not None: + # verify layers for old config + for layer_idx, num_kv_heads in enumerate( + hf_config.num_key_value_heads_per_layer): + layer_config = config.get_layer_config(layer_idx) + self.assertEqual(layer_config.attention.impl, + AttentionImplementation.ATTENTION) + self.assertEqual(num_kv_heads, + layer_config.attention.num_key_value_heads) + self.assertEqual(layer_config.ffn.impl, FFNImplementation.MLP) + self.assertEqual(layer_config.ffn.intermediate_size, + config.intermediate_size) + + elif getattr(hf_config, "block_configs", None) is not None: + # verify layers for new config + for layer_idx, block_config in enumerate(hf_config.block_configs): + layer_config = config.get_layer_config(layer_idx) + if layer_config.attention.impl == AttentionImplementation.ATTENTION: + self.assertFalse(block_config.attention.no_op) + self.assertFalse(block_config.attention.replace_with_linear) + self.assertEqual( + config.num_attention_heads // + block_config.attention.n_heads_in_group, + layer_config.attention.num_key_value_heads) + elif layer_config.attention.impl == AttentionImplementation.NO_OP: + self.assertTrue(block_config.attention.no_op) + elif layer_config.attention.impl == AttentionImplementation.LINEAR: + self.assertTrue(block_config.attention.replace_with_linear) + + if layer_config.ffn.impl == FFNImplementation.MLP: + self.assertFalse(block_config.ffn.no_op) + self.assertFalse(block_config.ffn.replace_with_linear) + self.assertEqual( + _ffn_mult_to_intermediate_size( + block_config.ffn.ffn_mult, config.hidden_size), + layer_config.ffn.intermediate_size) + elif layer_config.ffn.impl == FFNImplementation.NO_OP: + self.assertTrue(block_config.ffn.no_op) + elif layer_config.ffn.impl == FFNImplementation.LINEAR: + self.assertTrue(block_config.ffn.replace_with_linear) + + # verify config is valid enough for model creation + DeciLMForCausalLM(config) + + @parameterized.expand( + itertools.product( + (os.getenv("NVSMALL_CKPT"), ), # "deci/decilm-7b"), + (True, False), + (1, 2), + (1, 2), + ("auto", "float16", "bfloat16"))) + def test_convert_model_from_hf(self, ckpt_path: Optional[str], + preloaded: bool, tp_size: int, pp_size: int, + dtype: str) -> None: + if ckpt_path is None: + self.skipTest( + f"Missing nvsmall checkpoint, define a valid checkpoint path with the NVSMALL_CKPT environment variable" + ) + + if preloaded: + hf_model_or_dir = transformers.AutoModelForCausalLM.from_pretrained( + ckpt_path, trust_remote_code=True) + else: + hf_model_or_dir = ckpt_path + + mapping = Mapping(world_size=(tp_size * pp_size), + rank=0, + gpus_per_node=1, + tp_size=tp_size, + pp_size=pp_size) + + DeciLMForCausalLM.from_hugging_face(hf_model_or_dir=hf_model_or_dir, + dtype=dtype, + mapping=mapping, + trust_remote_code=not preloaded) diff --git a/tests/model/test_llama.py b/tests/model/test_llama.py index 20d9b8bf9..344d52172 100644 --- a/tests/model/test_llama.py +++ b/tests/model/test_llama.py @@ -208,14 +208,12 @@ def load_test_cases(): dict())) # GQA test_cases.append((False, True, ContextFMHAType.enabled_with_fp32_acc, False, 'float16', 4, 'silu', dict())) # GQA - test_cases.append((False, True, ContextFMHAType.disabled, False, - 'float16', 2, 'gelu', { - "use_fused_mlp": True - })) # GQA - test_cases.append((False, True, ContextFMHAType.disabled, False, - 'float16', 2, 'silu', { - "use_fused_mlp": True - })) # GQA + test_cases.append( + (False, True, ContextFMHAType.disabled, False, 'float16', 2, 'gelu', + dict())) # GQA + test_cases.append( + (False, True, ContextFMHAType.disabled, False, 'float16', 2, 'silu', + dict())) # GQA return test_cases @parameterized.expand(load_test_cases, name_func=unittest_name_func) @@ -553,7 +551,6 @@ def print_layers(m: tensorrt_llm.models.LLaMAForCausalLM): }, 'use_parallel_embedding': use_parallel_embedding, 'embedding_sharding_dim': embedding_sharding_dim, - 'use_fused_mlp': False, } config = PretrainedConfig.from_dict(config) diff --git a/tests/model/test_mamba.py b/tests/model/test_mamba.py index b7a5fa88a..9f16397d7 100644 --- a/tests/model/test_mamba.py +++ b/tests/model/test_mamba.py @@ -68,13 +68,19 @@ def _gen_tensorrt_llm_mamba(self, hf_config, hf_path, hf_mamba, load_mode, 'conv_kernel': hf_config.conv_kernel, 'use_bias': hf_config.use_bias, 'mamba_version': 'Mamba1', + 'mapping': { + 'world_size': 1, + 'tp_size': 1, + 'pp_size': 1 + }, } - config = tensorrt_llm.models.PretrainedConfig.from_dict(config) if load_mode == 'from_checkpoint': - weights = convert_from_hf_checkpoint(model_dir=hf_path, dtype=dtype) + weights = convert_from_hf_checkpoint(mamba_config=config, + model_dir=hf_path, + dtype=dtype) else: weights = convert_hf_mamba(hf_mamba, rank=0, dtype=dtype) - + config = tensorrt_llm.models.PretrainedConfig.from_dict(config) tensorrt_llm_mamba = tensorrt_llm.models.MambaForCausalLM(config) tensorrt_llm_mamba.load(weights) return tensorrt_llm_mamba diff --git a/tests/model/test_mistral.py b/tests/model/test_mistral.py index a479d5d10..5000566aa 100644 --- a/tests/model/test_mistral.py +++ b/tests/model/test_mistral.py @@ -84,7 +84,6 @@ def _gen_tensorrt_llm_network(self, network, hf_mistral, "top_k": 0, "normalization_mode": 1, }, - 'use_fused_mlp': False, } # Initialize model @@ -505,7 +504,6 @@ def print_layers(m: tensorrt_llm.models.LLaMAForCausalLM): }, 'use_parallel_embedding': use_parallel_embedding, 'embedding_sharding_dim': embedding_sharding_dim, - 'use_fused_mlp': False, } config = PretrainedConfig.from_dict(config) diff --git a/tests/utils/cpp_paths.py b/tests/utils/cpp_paths.py index 5b4853aa4..02a8abff4 100644 --- a/tests/utils/cpp_paths.py +++ b/tests/utils/cpp_paths.py @@ -45,7 +45,7 @@ def engine_path(resource_path: _pl.Path) -> _pl.Path: def get_base_model_spec() -> model_spec.ModelSpec: model_spec_obj = model_spec.ModelSpec('input_tokens.npy', _tb.DataType.HALF) model_spec_obj.use_gpt_plugin().set_kv_cache_type( - model_spec.KVCacheType.PAGED).use_packed_input() + _tb.KVCacheType.PAGED).use_packed_input() return model_spec_obj

    6D7wjy_2RZSEUAg`#-`N3is}q$WqKR?Sf-iA0M0_5ia?2QZ(4ScY6$)Hvr6a z)Mq?SWdV=5nE?Z!ZTuHxfPXg%dFrmBfY^`^Lhcf;CrS1?umhqnY6JMEbIZ0Bgwef* z$@=)kn;1${nq}frrAw1$#bi5pM5in{Jh0+@vBat#No=9t<%)O(-MizKaJr{!6noLX zl35?TShc(SQF#R?BTioArlJ}Yq$wLcfwe85E~g>OBnVT*~MIuFp)G@yCWCS)QHE0 z70>S3XPNocl{%2${Hwbw_KPKx%g&~+Y>#VI{vp)y=e00FUv~W;%|VR6P(fCD_K7|3 zlGMDO7bX9V@ZP34040|*N{Rr>lMa9-Ha0#k()-WI$Osen>tC5Y@=-PQjG~(Ijulk0 zSdGtWxnMi<2^yw=ghV&7-XvtS$bG5id@x#`_<~ejC>r-NF6Co0Y#q>Z&9` zWVi7+=dg0KmyO5ov1NCTgWJnm^5Wt99ijvlLFVCSGWh$JojWpD_ftgl`n=Rg)+qNl z(J({-?l4QEyd<5UgIUfqMz@PAURLjsrjYHqQoxHYi_$JjTfx>~6|{ZJpw%>EXzJi& z=Tsx6te%XLC7I37@5>@MTm%@>I$m(Opz6C%YNRmroSJ~J_*s2bv%Sc>^It5Zd z{EgVZ)mhQuB4!7#*Wmp)Ev4U%?lBQ%NTueeNeLOApH6eK-FmI4i*lWgZ*7pKY~?1@ z{-XbOYeEGu!X3J*e`zE-eq8Jt`SzF{yPY)-zI?{Uv!&rj>X7f zkS90s_q}kAAq~%7W`?U=US8BS5zTBQN&6Z-wE_CXcJX&=fGiM-?7^)thKOu|=(L(T zl-USrT#NU*Dhk7u#J4YWJR7XS6kRDVknrOJU!cHEyz7OR+UN`<_=lS6Xkp=txoovB zqEnPRASzt4&)#X49z4L${4n_pluuG3&BjieGwD|Hb=CyGDbH5QI242On`h4#n%9r1Z-*ZwV+ z%t14@YsWm9W&cax{lSY%uZk=WT~%iDKqlwcwXZrV%VR~({U~V{amT|+G^2)JRtNMp zpZ4FbDO5&EQHz=x-hr)KJe8eJ;?)w=xQD}Yb()s>CEVB3@zn+oK87jC!8e#bGft_mfGFhmMdOc zlu@GGdYSZ&77Bs2L0?tN(DE%*uvB_3tKy7+h{foSyf@?igs&+FviwqNUJtxqvOF4f z^v-WMm?o}?M#sO0BgX%K0Fgj$zYRXn_JKfn=&uRDRod?Cvn_5r-N9EZ`z)kGG^uaa z-he~Zasr^Q)QAErd?5_}n%Z;1slA49eB0Q3nV0!cE1PS?cLTr^JPJ0Hks3${yInH9 z#%uzU2LM%*eDs9y?v8W3u!b|=J+#2>b!>FohpDr1g8eS$6h2b#vL*2OAuhd6vEfB) zySatO1-1l6hJE^d)HqEjfDs#DN*Uob;&PL6?0nbQLn$|s12nEX@Qia~EMfuF63Qd2|6OIQPu>ob;S!MTXw3}{AAkA01L8r(V?(RH zFeqHWxKfXnqMoJ5i1}6MqooDhZg$ZIJEs7M-UR3}uG=`m+XdS64dxZ#xN*SE7Vg>l zr#uPh0?f489X;hv1Rx0wkhG%VgQhsN#AJgldyK^zT7A7*sNr;j4Q-Kat2g=pDAnsn z8>ui?qGx!$fXz0WhNKU;%i}W|Jp+1wjS0*#rUPi|*`PxiqTi;6K9PV{6aUTo+_(+! z+CdwwE~Yd61E#{1LZnA5pIPO6bv*L9u4|H7$b=wMoeO0MN)hEmyx|>#ZN<5m`3X044CFey4(fU@ zwDHI?%$Z_R!ugQ#SP+7HiSv=S+^cAfmXsmHqqC;vp2obo-tEYbGL)a~LdncIi*hMt zTZGy5AQH=0@LH!K)!K1pwUoa0Tc_<4lW|>*gZcernlaADRiTtIqT9D}&O-L?neKzxUyiaP+t2ZxgQiG|qfc;6CiEOUP zWm0HCEUuml@@A7K!m4lMK3i(`IBGH0p7pD+lE5EV0`)W5I46bYwkJruC{tY>C#f!$ zW@_p{DK1zoA46+1tTzdg!e-bkwS7_pgJpuG5w(pfR{eTxG;+sfjkrjvDGu#$AMGhx zMqRC)oSdZBuh-N5!2@2H5|r95fP}~CP4Wc>YO6K1$O|87g79cNTKfrKxkK8iDu?Ks z>=UugY;#E*dy1B55lqs-1Ymo+k?!4br?!Kzfn|%jcp*%X+gS6(F>G7c@qr6i?kQxn zyxQ{)0N@Q>xuF5?p{Y|_D_~sH?HXX&4LQQUFKO;P*9BBJc?qn8MtEy?J#B6C$bX-g z`B0?lZ zHEy^Mg z0vv};aj?Lb!-h#o1}Wq@aF$`-gb{~E$Cf2UfEqW4fJK)KU9#nn=cjWt17dFiyNvH9 zz@9p4P-FT@$Wnklgxt~{9ydAy=m5yxINHZN3>EGCGf4_&rt+o^EI3<4f}W7`LS!w- zi$2e*WExLjP^EkG+fq}^JoV;Rjd2DI^JwN?b}r<^V0E3bq#7%4Qo=f#zB4aL1m;Jc?Hni62}vm;G?91_+@o zU(_0P*y>qd#c0KQXmqgz##6&dTHC}Xc@HatU3P_gz)N9jPo;yFyx#-Np)p=x--s9U zl(8@mMl*Jjf1umsF2>GHhYXpOW0pZE`$E3V5|VPNaY|O=wjFbFUZ$K0hfM$gKmbWZ zK~z^|#X+_!H$Tq-k1I5p@4yhy5RI{3EZiEpjmF!}>jt_;Lo18PHSXGQ-_~RMZ?*sr zZxm>QW7806QBltpzWAti*m){N5wYN2`OBkoaDXiGbk1Cd#um+aeqy2t4>Anqt!pF$ zVu=V6N6gPY8fxX(LH2EIKAP~*#-wF{y|`OlYnwd)bayy++RW*pJThNHV0FQSbWFIG z7&+xj$%RI+DwfY<$khd23QJAC7v$Mi(5uQzBf4oDM2eRYT_Ae?YeEs}^m{zuwYDt0sP}5^&ig@Z|zpmj7vPU17_|96>+^nl6-T zY;~h$&|cFQx!i&`gW*iqJSoKL>>3!WbhJEKA34-lYYu7z0Y0|F8%jgkxayw)a@@|D zMVCdnPa8)km+9=U=jr9^cj??6&3MtS!K3IQnm)Jc4S&c1ilqZ+v2HX=zePaNV>v7B zk{yitRmp}}R0`A=2XUMsLq5*^K`v2Qx_kt~0xgbWa}t3upcc;1H_{I^jiNhVvcgCK zKm_C!l)=|fZ7`4GNBS4$rMZz>U~G)F8tEa60h;F?yWRC#r+_Qep)G8VJGgDbbz5t% zm-ev{-n)a#Fl=6X8(nS^=r>w(?RNG0plq}gZUDxVWsEyx5~7JN(ca`Bce3`Kvt|mE zLw{hJc)O(LG7KY?T&*(sxJ2_TtMQZ#p^GoqEin2_T-~d)%6p zC;f|S4YKk#-$4F!ZpTfnA}a~3B=E@+SOMZs7Tv1i??M7DN_K>=@j}JnWQi&ZnPpt? z_>g2BHJ`p@D;B%np}LAKgn zA^->;Wdpj~m`SXy;ais7#NPGZfG4nP1XQY=&i-jNT~J5Istu?f`3OAtr!j$=kHLeN zapMsQ+*b8(I;YMk99fW%`>0&1V`5=X4QNoy7*@ofOEIZ+w1f~Csr4Spll-DJ*0ov> zHwsrhw8QQU2R+R_`u2;y4sqM%PIg^@;t&AU$98zo!A=;j5d$`gxWLOnVsnk{2khX7 zjpxQDpqZDw+}62pw}xgI_`(e#vF36D{HNtxa+W+1Fn{aQ&{6hn~jyAlB`D&M~lP^C2 znNP7i*HZ^}+o`wCZgSWU53oaKce)XtEA$ndr!x%72lG~=hCYB;?-;&Z7+^X2pm~sE z;{0@6^PE&(REnSS>kXot&or^|d5!<*L$;)T`|=%1`HdLw4qD=`{^?n|;AZa4k37kU zH~G4NciNN4+(1LNH5E$3IYaLADOHa1SWZ=`9h7skmb5S`#h}fv^>i25L(}ceuS2c9 zu{XuxK!d(hVuqIe^6clYnj2+`M8VAROr6!?A4LML5B{yfmzTf_5P$hGSaI;hB`{f$ zV!>jy3V<}eNRc1tp<39~HnIT5oBNa%0QC8QTPWuPcxsa#n341#!-B8YZyW@>ke#{^ zhIWqf6+5xW(=K{WKo1~>NB(-P0L!6uV-faIEr6)q0qn2{`^454`26_TT>j7fT@lqS@WM&A1L+3K{K;se<^UipVxhc6&h>J3AF+RWr+~8`E{`TkZLz8;h z!}b|%aTl|dCr=)x|Lgz$Z|N_8{w}?H{Sv;V4cXjfA~y&j?^4dn!DYxgYpF}x1b~*( z<6j!hwZN?s)XmeAlUJ9H@+U{oHqa+Y~Zi(!5sY=Q?p}bJh@iLJLgKY z?F}M%e>dsR9MbcNeQK&q z#FcOffs%g{hL@1JP1ddZU3k1rBR49$jPPQCWDV@%{KZW#YR|<|fKW*$7BH8stP%2M zIc+6fBz**}&9GWW0Zaxl@FF`@FD(RjE{1`Ur8Uj3OJgaU`SsWoJ^(|2|=V&=k*V5@BfcRK#GJVA}MHhgJKLX@{wz6~1h7s5$v%B1#C+x6+$;1Fq z=#FA)>-%^vSl__&1R8WfTSK5|f0RFrhTSZD6;SWZf<3k0$|d)G((~EaSay^T(}-~a zg^;Btl?av@=T}K4RN@a=#|=uE7x0xCp%9RXahJaVN=z z=fMk%xW3y)`-%+D-{Vc=0@rMpyy({5!TT8|IDHBY*alh^G&e1%;GyqL6B}nZ$GDmC zMZT$iztULOw&}CG6k%9dPsGlZtg*59;IC^ex=*^)9$M+qg`3*}?Gf(}VQ4 zzkQ!BySyaIRvvaW=4bgOH48@p`Ke2<5?Z8_6~85|9Q$jir{|RNopToDJC`SU<0y=V zwjSB|GN4n*F%ckZg5)*9d(kXAdCui+Gml}RXM=k68tb`JW%f2#|8m(o8r}C`x zDfOm^eB!~EYIixig{xbL|2n}!e_{me3J<%`h_14zJj zXTAEE1f9rZ z*{9lgu?~%y#DGdZ_lKVVsVs(SY_rX<3%TF`x&RbXqkQ%@k=qi31Yv*pE1U^PvYQy| zMa}riM?w(F!b1R;csA=X3$iar0W@p5xmX#E9J;&MHh8-r@ z=PWbIM70bXEf-~>mpP&^1=MoclaK*m=IUaQzWesabnx&X-5cFYd-rzZk$sP?gOAUK zfLgSc>mO2k^9|+=Yyip4LBcs(7x>!$aw&(rZcH`&+?4fy3cn@F(9Mi=?% zQKG#!Oi%U>&=4bYwqe%ZRr3f+M0^c9V|TM#!yAA+MnCupPU)vS=b)^JSK{%3U7$6I!-2Ii{6LV=V3b2k3QdVqDzv*#~-R{564*4&xVW(i`Ta z<$63ta5Id?7M9s;=&`=nPM3#W%qV#IkeiLZ&Z~LxZf$^>1sdTITR?l$q=yQyFxq3< z2oo3hac))3goYTP9HWhFd;ri0Fbx~QEjNQAbV8FGHf@oc^3GqF7BF`&F^N%&eTm_4 zACnoi!!dVgBhSRBp1v$o1=$OWDjv zwjs{5BKLqBKKwsl;$dTRyMq~%FTlzV>HabRg58@kJ%`?yms(yuEt8&{hqYqk=;R!o7M^tS9Gy-dSG+|e)44^3TNYiJ zOx#jntKxqg3B+>FqU*1qQrwiS5m@9|=*Y?!PS5i|x)dOuPy2k8E7!W5R;|(HqFI%l zlu?uGQHDdb$X{Mi1K477>&s2LVGATA1J$L52UjNZ<0}-)Xvcd@ntCbU$?t)??F< zCct40jcx}U*EYXBHsrWCS>w}8@84XdLx8Ye6Gqs*HhiwH&*9O#bT)7^2wu+PWw{n< z%0kU=kDGuIKr7!+WSIm*GAA>1Dnw4yU?0m6c&udNVjdT6`WV$pVasMYoFEF`K})0*x+|kaMfG!;=T*HFq3?E5Nt5@v5Su8OPmm zr5(Votq6?;#mdHUx6j^C>C3pKO)Vw_n&!9|s@2xNa_|VtK4GAC{w>ooI+OcDW)<=| z=fOBB;?FU|J`}3Cri4q%47Hg<%XcyNQG2Z1de}$n^+itA}lB$$gs93dFSY5zedj-SU z?vv#lEv#TB0~#`7(X^8BNvJQUlT>5mL>oC8DS$kX9Z`XVTnRf21^mc|V240C0cvOr zD>`r#{AN%bg=nMRjs=A-g-=0>bu3FL{1~s&qBpMEHNYQYqHva*g()6&rOY33W!7Ui zwJTn#xnmXOZAS1Z^284`<|}P9$LB|wrSQ_-Y44C@Hp>`dTg-S| zUE%MU-R9hBaERTn_QY8g%r8448dq*Z(&aaH+p}4k6e9cWOLgni-`rq_y(VT8oF|u; zU1(!BZeObYQrK$wIJ%PiG$})`W1KkG#)CnHh}-CNm`J3g2T=}c4VElZp;Om@)v}_E zJPw71xzHK&G28y6jXpNSP0V&Q6H?=DFRi8-g*PX`j>Sur7_V|*`Oh2je3?V@=WB9K zW2>Z<1b!P5SOMbS#yG9G{23$=>sz(R0Yp4x@o4ZkJ4_#lYYvg${1*g} z5S57sn02M8FQoa%RErAYjK{HQgmGzXV{zm}g1|xAnSL%?zyymi?WrY>$JY`V2TMwA zOmUo(Bc#_oMrfWB z%rUK!pzN755)Q=?qGHD?5XhV4vCM`ur;nPqtMa+ErWXN+yP@O~E zG@v=xo5v6z&>h^gZDB^S#!GkF7!TR_q=zQIdwEWOk#P%KY+R-ZUUlCmATB%j4e_Wl z5+GAj8{wGiVHS|vUc=OW2| zODp%y?--}tr2w&W!7PQW9xnq%oghX^mRMDN>ZeY3Dg#EBk71#0k4ijK@I2`A1VryM zno0C|;ZDEddZq{gL~*{n&hVePo@)KG9V`UHws4*G&(`jW&`JWoRSB#B@o#nHR($__ z5~$X=02-hrv1zzi<5yq4J|gxnuWf#_tkqRtx8)2HnWDCwR<5 zjqaObM1WAq+miUEh1h$Eh`jx^>X6L0F-4oszx&JCT5zEEYcwo zDcljn#w&6g9%7ifL@ zy$vO`_-crC)y9DG8TrvB9lFYREp9#l%# z+N1K3mi@?OV_3{GRKNb1o9AwIKuWG*I{6`ol0unVhiG9*t-{6iBHlFj*pviI9yX)V zC%7*|<`*G-`OYAZ3{u4H*SIe^W%a$1z)AvNNCGQB{DlO+YWCM70T-lVCmIVft5htk z6^q&c`XZg$A75Bg3l*-*s)bzm#Ii>EvObl}Y9X9cbQC5Nv|6n;kHN9)<)d=Yg@1rg zTptsMyx>VinCxoT+vnxC*kunCr2IO7V{Ke7j?U8g=_!4{bb<<<4tpwtpw0oqy*66p z`@MAk>&>)^!v1`AJbEb<5`a;GgM<3)(FsqClQODP@ z;g__4v@%y3ARdChK$F~1|A{U-um?I zAf10eYkYww8P$IO@)A2>+QnU+bIlk2*1RdUC@@v{8D3>TklCd1G%93Z>IhCA9c0A!t`BR30rJv>5al&8^=>;Onw|GBv!vG39KaWg(UFFfY?jV zr<}^4TJHxyV@+nHHW*6T|% z=vMT@IYcbCj90bTX`ib7usp;fYgqwu0iqDhYjHKk^ZHoQ_^sCMIvWu$MLdXjFiuR+ z6F^qrD-1H2=||vHLP@iBiJNDU0)WUzk1-#=;`MXvSCuOFH}u zFm#jFV;X*h7wL?<^pUbR&A!gtO*pwJ_3P!90%GrS4PLxUC~+CR&TJ_)^IMyUK34s- zAP*Bhq%moqC2l&a)&zOu#l4nXMVFn#Q(&WZ!M;vqI>iP1TBw*U^1GH#f{z@_MQYNw zT+&5+QSg!PviwDDDwoAnmtWvq+6;BtB|gi0oo^IPpA^u$FdXwM=dqvP6Nf$?@wy;K zbwMi&6OY~1G`3jaTQJqd?yt$Co6QBm#fIeQa8< z*lljKe!+Ij{j|mA8helMle^B#b^vLSlrn0n<&{V3L_qV46RU|7|1p;T}HW~)be zHKIAk^IS1x`2kq?(|>LRGCc3HWGzL!vOf-GGi3eEIc5vzB#llMk%9vmgZw#X=uw#F ze2J6377Tfi$HX~l`hf8_PL%e6!yr!MIfZWyU7bi$09m zEQQ@w7~>=@$sL?J@LA*hWIS`#E+cA%StLikS#?gZ#$fn(e6@v>r*JBV$}RL;(k8=H5;?9d*Yxvkqo=d(R1bIiVYTtYso zr<5F8p$$J#r&c7^wQ*i9UtjB_In_$gsL`F`V3HnuF1QX)N>x3m@#E0mSf-9$t!|zN zHR?0G)^cW|naEVX>x5@Ym&1^ZbL=Le#!J4&+USl9->w8WYhnS-5MinxahAqcF5@ZhxmKNTx=hkkz>_C7x$tU00gtO9Q{-~R z`8hKMKkH%DgNo+#(e;Mv{CV;AxLm~RdBk;P2Eeh~1HgF~0DRHqaXLWqig4wAs@(>^>0#QA~~|h z<89nB<1`|YRYoC2Cy<#Hp*W1=QY&YaBT7cFi1W=~2}XV#L_)M-ES_47Wnoi6JlZoJ z)MLI!;n41zI8G;7ek~a-q78$4_*Qnz;%=SyhlZ3KiDXG%PA;4UGA%zTocFkKo?h9tgkEJ? zcx+jx(M!^tP#Fd)rj^+7nn4h@{-#H8bfS_(F~6O#7$JD0w-VI8mWQQ zBSffCS@h8gkDBM0N~E;Czn=CV?5C}JXb16nFd#3$i`q`n0BvMol|iCUUGmtOZ#H14 z3|=JhRf65C4mI~MNiWi#+kDRu=q9>#8IQxzd~|@{&z6D1bn$W;x49)>g}> zL_CgA<(qDu$oH!hfWQ>U38GOl8VJCjDOKchMN3R#9FP#e-$dd8wyaSd2N<&>Ft4&a z#Nn@F$W6}vgRQiSo3^v}eZVm<x zN8zc0UGUWK3W{karsmkSpZX|e#rwh z{c7V29Jg%&@Q_anyKVguXLBFB&Nx^6W)NHTwHn2!8(oes59fsOs7BuB0K;<{StYF` z@Q0AV3K0JRhI!dV@|V`2d}(pJ=H@Vtlmu2Nq~Y3ZmfA9G0o9DTJ+5|;VLNHwvh^vb zSzwlasZ^zBbT?D3lnEj?{+d}5XX?%JVRlIa2vwaKKh7k}_%HyWi@T;J3WaTx0l0%6 zfEK_qz@($UhOKA^jc^N(5^SKM2Z$kdk^>%l4>ef;B(IS^BK-=@af2P@w(j=R{=L1l zdv^ynU}%sj*AqOWMRs?&z@q}R;88G4+lVDJqZQjB-T1OERVZEZ(0C=v0*F1HjD1z7 z{D?}-_p2ZjlFAT`NlP;lky<2L-g;SzW}6=*tUoCp^U0LZQb*k?$vOndN} zuF&dArSc&Mmh~ksfv4jgZQ3`-!4$?Nwj<_I{~dtt_FgNU zez-_y$4BY>d_T2tDc0(6GvlLwZWjAJZi)u<4-GSq10aJrPIy(YWm?lTApA!&KjNiG z05S7qEjvrj434{H9p|3EvmcTo)YDDycwShPKCf&oZOlt8?S|2)>Q}{L0AOU%rCPn&^p71E+VjdSTMz{u2I|KG zosWltIskEY=Vh+4Vll_0L30Vg@dywr;0-V=Al7bJlLPJ zLGT_g%x!G5@dmcSZZq5)s5PdAa190@$XYs+-dv5BW5%g!|*~f!d)9vAqzqF zY!ELCdQrjnh!(hUawk2s{^7lX8_swSqAql*1c6##Cw;JYW{fkdMxxUAE6ZVgI1{06 zt@=!+BK`CbmF;*XXPe^Ga=vEM6mRkz<&styp;tr&$#GK?9F)Ap?3DAMlqkm`_mX&< zUzqrKtG6MzD!Y=vZ$knrK>XVnrxll0+UflDGVuCrBA%EwDc=5~?DFb3l=vd#E zWWs$?!H?sWR?79IqfXcKQb9^3&op`9<4>)A)*hL5JuF+?+@#`qsM;9wP};X-G+g~= z`4l;+6%?q5VgxIpEherFaJflPVwFnoblID_KEW)J51C|#FnZOkF=+}lYH z9v-Bf`v9=bQ|yp0(KUxBwJM~;xje69*@ZdeG+_&PTKr?17X=1g$n4H z5wO-`mX#m{{3*mHz?LLvl{So+SDK1u_;NI&R8`RYNVU9`2$nGvaGwuye&m^Ld5fdb z^~J*IXY_#*%q4Gxdi=KVNTK;euYbyo3$MMg{qx=Z4SX1pfE&@>RTQNt{Q%oFX*^}L|1Idg01E_(5cc|~-zns1&XG827qE>z4$C*Z5%|4ptKQ4N71Ih<}5Fwqp6V67VuUeJL*~{uV7?rkX*A;4fS! zVLz!VK&DZjmoz4K?(PUFnVN2AZoO0qc%|0P!jN+o)g_ItdXeMV$0; zouH`*GU$vdl}0*bL%lF{@E}A86@HyJe!!LExZ=xxA#8vQfg6>i%*4s0tIp)3NXl7( zQY*gfhgqeclFBJ#zN6n!y&_0_EQ^YG0AAZRkH2xBO$soEjC~pqHzPPIu-@lj(0Pv8%JkeIxkQND<2E%2(dKCNz&SO*j<^{&mdb)Wj?L#Nx;M?Z9lS}nX}O!wT9sKz;EyeVrGU6B9CZrL8={@t+sNv+QlD}0R? zV#wChN-Je9mAsN(|1D+eX*Y>yk0kd}d9!x!mT zyoZ3{CNIKRM&KrY{@GIgCK925V_FH4xkPRKrc`hc3uUfT_?-;g@;Ow>=i2q}Q2}rD z#9+j+u-y`>xfcC)4TfekFJvPgnmtlkyalW5z(79FzFWBKB2A1ck88N>iXGzsy=$Fr z0OCR8ETwfHi+9uJb~8Qs>PdR=Xg@T>gAtG3-J}CLE!}M)3)^TqE&oT z09*lJ(rS4kCwntHSMs<}KW7Gj^=NtwIIh#F3!K7PLtNUyDIGp!&v}u|$N3%XWv|$* zLan(kkB!)U?DXO^y*+%N-W(pLlXJAC?ns8nfPa_~w9%w)r-S|dbg;LVwzqIM7YLdg zD*X1gpgClVYv+}qRaP`yn4QZpM)s-dno&#EgBt1*;{2M<4jGgZA*b9(fU`0d0^m$m z?YuI#7nVl$%ojgu@SA|+)HuaWA@26p*@pYsZrVB6PRDQ9g8L2oZ#UU-aIit&&<0;Q zVSv)OB5NbR!R0V*?y)=G)oGa5w0t_imd^T_mhU)=6a*)zW=dguOZ(n{j^_tlMPKZ* zk?5;m|BUb6s93RgInPR!vN?P(LqnS%q{x$W~0c>sgUfZ)ZV~YjRsu?)K@-wLCPL zmn8(2@i~3%TSDX(Sxeh<9?gVhv#^<0hLvrrnPi?1Ozr^e3 zNxjjffVf_86<QlQyU#plC1P}Fv%_=pl zQq`JplM^Nv(bz(G0qv#9W7a0LTs)^jl&R86d6>%!k0_2jfd?8q5DfqnpaS@7VCx$% zyZNFVe_?j&Bi}HapbUU`*c9wur6%SP_aEO$_a7ak&L%);otNnnFW&K4i%kh|VTb?P z%$tk5KqpH$4~nEFV{Q3z(Jy5rTGyISXfmG8 zlK`BVR-VH}$DOsDz&gqy5sF3)=7RG@t^XQN2s*q-cfe;8bBYfy0n2CXLU@j^;q`5R zF!mKVKRy%qZnV<-!-YRQCAa|baT(>`waO(S z11J_L07)$P0t6Q5IuKyZBE(mK3m53TIGbJt2xdUx(q1TMjax1eEk!ghftLB4VU`}p zvK+h`$?fO9aOhUNRet!GRlUi_N8Pcj7Qi>*C8Ce*=a7#;SwFS9SQFr``63-I*BW@5 z*t@%#9z4C1b`Q{m#tU_QHhiFtAHlZTXW0v{0+}tLPhNb@kT-!>6oPF>f^zU~N+9w0 zz5bQW8AzuumK`5QQl@jgmb7zf(2t6_zNu<(-Fro_(g&={_7IbiSMT4Z|NPru)6v;k zy6V|C%sVtGAkV$#)Oa_DucJNH)z%PO=_7#k`PEgrxay`y_aCH#ojqL9WgwinQ$w6@ z4s2V#qSrAvrYs8?P(=wQcXZ=g$H*P06?xJ|j7jt!(J6Za^qj|PHDmIS5`1v1yomSN zc_hlivz|s8>`K_$Wn+%*7XGB!wB- zKrlBN+NOI$z}tA-D5xQ4etFkLp6v#3<3_{O+`bYXvsUqDLhgsLTZKY4t`WDYd4s~$ z1hXW4@jZmWkQ+$upqwLXJRR^!LtLL~MXwWP%xc}M$FG-NrK}{dlE9}*Uhv56I}$gcsE>Nw}u%S6U! z`^syjeUM~6%ESwJ@D@P6NG%^r;vK@5oFW}?Gy%mu02_-iuDA3Wp}lY;Y=)^0xa~JC z@fCn}4}0OgdmHKA6F?sJq^(W2VaAOb9~XHsRf8SOY+Y~&uF0@V9sme}L8TlJErvke zO{IWks=frv^=_F}4+ZR*CR(VDbCn=eFMTZ@V@om1mu4OLEkNxRnqDlW0sXD?>hN9q zH^8vk;Wc)DTi-%^44@O#d%x9byNe$4ND~Mijg!Sa1h(gZo<8XM)w}occzo?yBi-LW z2u*RX>#lOnJzA_Lneqs2cg4)ou3I-`Ci4}5tmH;k=}{Jo=|wj3Y(votKML8S#nL^>BkQY!z zuVR50@CtrN;y3V6weW|jLuiD7AEt#S7l`8{abK9z7j1cYj$ge4Xi70yI}0HE^!cN- z4-ifr^90G#@P#;mS=C=rG4ZmHhZl=r)Z2{=a)fm98)YnSTv`DXGM!~(VQ+KDV^)sU z6fdh+YAKf}Ij%^Kw!QR4z`n7$p5C4vrGNYD-_lF$g-3YDXab14nv_tJ?uMPS-OxX; z{qRM>=mMFV0R-43>FD${z4-BEY6E}|b`PLKF#vN+wF;~jFdQFDicI{xq>4Nowi=mUh^4&6;Eu&Esaep{_|z%jJ3*^;MUDe_F7oqXs5eqgzrAuN?SV}?1ll* zm`~_CK-Xy~zi3WS2$+eN$ZQk%)WEhDz~mx09KAp)&njMp+(0M0{mF)S6;4!HZA6Kz9XT6~zAV_FWoupa^T~jp=<_Mm9Vk4Lnk`()oAD@_3Xkc~NnV4Lv$&;CFX7)6V*S zx;()P$*FUV`azg^1@E#yQll(iVXRGoin(Al544PYa*$lu?`MLA{3p??#F%*u*-L%G zus7YZ>`RPIwBl=+P6&vrrsuXfoyTa`hGK4oRpLqlD+zp(1Xh6flf<^F^xKla$5)U5 z3aa{Cghh4yhwG$RqNjUXEpN5NoD$!XsO3zeCG~pkNtw?Wn*uQaAx$DA&sUf;_^S=E zw!m=gqyMmD)w2XzW4Bz+TlfOzcqHD$F8KcayXooIPuOh^?WsG|p+WBU+0qw3!cA<8 zTO9x(kGI7$no6h@Q2rX-stspOQB`6L8{4NO#OJ%(4(;g+`fDBmKW$=h4#A-eL+nVo7aXrk3m*10bxP` zFxGvU@iu@rLf0D%HvI+|u4SWSOeluwn3wI2_fFF0I$jxQqpl^)vRKS_y(gJ%z9}C4 zAakmIB?o`T@~zUs&LfB^Z}XL$GS4(Z^wC2H59$?5E&463L6|Y1$<;iBJT0g-H)=4a z8YJ`v7qKx*lNT)aciQP*<54<#eU;w7{eY&pneHFlPfs6yogVBzN&o(@e@?Hj4v`U0 zCNL|o4*`IgIOC@ZnMru8g2x28&RX?xPE}y}Pspq4lQpa8@=8A;SFafXh>@k!x+ z2|)OUXO2&&(#Q6ws zLth=wx&cPxfhU9V>{Cai3W%x9KWC!d4ozTWXDwx!LrrRaO1Q)px`YrL2ho;tI!SRK%J^nGRKiWx~$WQ>TZEehR>J+=+m`U&_i`=G!6o!s# zb!>Sq;v5;uFR@89XPG;brbm@}sOQ_Bjpmz@JRc*CltCpc<=`xefrob7+Qf-vW#+DS z(Ui$0ur0s;Z)xLxBRxes?0L4kiBIF^+b~CY)BamJ-ne4?x6rU}rZyW_ti`#DUHbY- z8f+XR08A~YJHjp-fd_w#27k8f4NMEUa8ttXh^G{5%>A{s^WY^o@~K7ep8FFsEY3%z z>AA&s95X{S+xUc_=G*f#c5-yWDMUDoYfjMQWlFX{*F>btpD(Wt+DDYae{-3GRl6a| znY3A!@=VAhGn-e=t=V*sgR}Y7<`o~ONi)yFDv7Qw3RC@|t?+BUsPK~X7?+wH>U8Vz zSrQ?eCH#%c%Ueh!r$oHg-niUHB3Uvs*QuMd58cmkQ(uUB}QS1oGQ*35Zj7NxoGW`z0}RPp+F*Xyq|#C2(`;$KVx^^^9OWWw_! zUmRvmR<6SS-8Mvx8B#Z=!NoLCi3Fg;0xk^7Ars9fa!D({xr9J3ylRM&Vo_GRMj8Mr zv}~dM&Bq$Pwl};X5EcU*&njq8?Yk+?#wn&bNwkpUE%1%fHvZr|QpStGS#CipIZ4uV>ny314dNh5-L7 zJVTru<3xI9ht+pi-Dw+cf7yhO-=KLJ>CumC1oV zv*Va_nPtA_`IhTtrgQB{2Pjv`(=?Cjv%<7n1ql$UpBSr2ob)*jTit*bLfvA zmd&X@x@=bc|GW}d0piaq&lR;>NWcqSUJPzg>2nulg_4KDN4fZxMeO#?O=#V=Zq~Nh zvS5hc0JVh^7sm22N<7x~4AG)3%7+GCv=`u)$Mx6aeG5zoSN#NIu8$>Kethr6hLY_FVkh6_=HZ1X3l!6QG)HjaZyVu1>- z=a-(&=0`Q;%+K}wvtksgs2@2 zKuS6~!O&4Q(Kru5;CLmIdZY&+mL~skpQ5OVhwtGt{5P}1Tf2?sJ&I)R{EiQ4!Dmq8 zz;tA%eRGBA$u~XcMS)vyQ{(`xAvXkD zXiG12)5bhRuij!CXbiRi!h&b=HNnx_1ohWQ3+X!u$UL932$tKC;8nYLz;~48JGkXQ za6g`#kvr)}$+6$ot8`1mZWn?>2o#^<(lSK)IA#Gsd~QFV4lz^UHD418XWnAv{6Z;+ z)V@l|TGCIB{v0%aGE+gI$n5h_TTxj_U|IqzKs+sv)zQx^0WWA}(XgpFE&%CgZhH}V z7xA)?f61l5m)SzY74mS=uVh8c$K@I9av<>KI6zT-ovbv&0fqw*3x0Y3Ur!Mnhu4VM zPy_G)`EK(#(%bA9X8r(#KzqM{N4Wb38|m3MkJ8g8#bbaNgOnu;PpuRT| zlr2C;%&Ge{U$j(9Y<4MT9go!rYjzRZceRmJ8iW>|q8Y0NZ`zA$kDOl?L#)v%8fZ4F zP#Y?cw)et0#VYNPg*-&FRpZ2+2c}sff;`aXqEMB(mYrK%lneEG7X6AR4^pvWY1`;b zSj>q9xrKmvwc0*w7~+v4yi?$1;=xzjX=|HppkJNiqnI0o{v~qSh?nnDmm7vY0NMFX zaE!be8Ji9vTiK0kiFYgVt67h?Dclg{d1Z;*DQlBKAEQ>J=NwB^(w!0jMQ$20iwjFf zo29X8Zq~Qgwqm&^`unBA!lvUy_%dA0%47EU%W$=7^~MtTa?|<7EUp0YjYYL8__-ty z0Ed%f3_`1`Z#n*4?c9WpZN~PhrEpmZNY4vt$++R~PdMp?M0w553wEG~@dE7vE^$6; z+A?a2V?l-?>>mXUf?>Qw1QLf9z-(&CSo>0%3Tiib%C8h6X8@fF9FakRO55NKC3g5PPm z%ImUF0RC(l``?_=p4ZD1*_X80)Y1b7bsauKaZ!$#Qmi=+LtkvHo)9Rg z7=OMj%U?ciBGJ*00#4KX@?qxrbAQ`fxy}`t>b{OvY1fp@o5!n=B`^zoXOCBM`fW&{ z%=ce2w<|#WYaX57Id5Lr%Hr^)x@!9c6xT1bd6~RDSGhl*QhU5&(rKrF~xNdh+CP zIyl%(8{4?Z0z-sNu^>dmAQ0s7ZtN!JAs!LrZ6RogMV*3#kOE&tt{zu#F7ji^H<3EA z{@Z7R$NpprK}aAyH{_yovrL@4a3;O#l3dXe6h+`iGV!U}&hKpv(^>C|n*(z29H9<9 z_^=>125zb_1aS{Y6y{0Ic;wj@<<_c44JwjykBHISQ`g zEAtdQmM>`%ud<=A41$s?uX94kVo@gL1>reRl4x)ytw=6;Rgv0{bQcAE7uzFU(*9Wf<+3n4=`*87MWLz7xu>yiK z$?<_OU&sU5ny$RVV~qW@Tri(uusmADo|KPi5h%a77p2hrUtfaFBJ#UXi4>0E0N#ph ztEt8*CQqKj1ggZ1S;!wgD3q@$Is1?PU9Kg6bUCd0|MN&-1&Dthxvr=#kifMIylX~T zFBa7KxszkTnE_+JG*fx8t_z~XSrm=>3C);s0IeD}KI+^AyoPY0#tU?#@FC2@Xf(sa z7B6n8CC2SRyfg=(5H$P3`}6m5X06+jqL_t*jA}HSghjCmLC}|XkU_Z=@UwVlc;*GBf`0NK7vp!{T zw;?EUT!c6U5C=G;UyMQkF`Xm`wEK9i=&$P)LDLCo2x9YWkS7kSpUkA+S^nj?^d^^^ z{$4CMTbM{g)z@q-)8y{`r+FJ6_8mj^;2`raD>)PVR30ps37gdi^vjxkAvsUS++}%#| zMvJrU)!MmDbegU;r)iIQzR?wlOGmz{j#-${pQalv-KrB4+GC!^t;~;1+P%+&IY4mZ zD*4WqJI-C=IU?}_-Gj&X(pTSn#byb(`%**6i*}k@2*iYIA(r^Up%riW+o19viQ<$B zU?#NDl!;=b#u7R?h~r4EzH)c1KS`(^MdH*LTVeLZDs^Xmr4#dnI>9INNLEZMuAbsp za|hGNQwt}!*Yw1l)&#N*)Z=cHpr7Es{NwNGKfnE!O*D>3CylaQ(%rSLkIRET^Fweq zKr7iq9*V|a?=*b8F9+&FeWV|nV2<@qtng7v;sj&zy zN)ErYYp9ng@vH@o?CW2ER|E|IJh_PZ%nx(fKDo7vzOc<7;Fc_=IkxS4Y~HcAvzd-w zou-#BKBTMOQQFwJ$0jGcsegWzdRMr%Yi&Sg9giKDWb=~Z2zPBA?2>~^Y_vTi10;vf z-#B?}mTmy1E73V0a? zn67iah-RJNdi*Bki^)~J-n{s{^KD+ad1)W3`?36d>Mj`DIStP3_sy$MQLK;GT)I=G zZ+5Vh&JsQgFZKG$Zlxi+mj; zZ}Y1uyZd;bjWr&pyY~(NTlfxkJ7$qbpbus$WQ7GqBd9(tw$F{LER4yQo%4ky8TCJ3 zfpa~MCt>O!CsVe*C{4=dSX`unhskliVA<&M5GCZrk8-29^jc_N^%T+N;|^+q+j!v( zP6ocj2mtnmsM+eI)AO_R*T4Kd{rS&-NiX}SX?GtV!FyYzUmzg>?q)lk;wsIJDPp{6 z1X%4?$YKCthsX9Eq8u{t?jzhjn{-@xqs7+GYMU>4sqTArzk73dobEGFe|q>ZJ$?E# z?eFhP1hCgl7mTYnXC3%!bJG(WhV%vEz-(#3gCO31mnKb93^xmz1{1ETUn(lWMIo0_ z%B9s9yG6yT^zcC+<1+z#(Wp-~ASqR*bIg5rM~uzAv2(_fB0cyTbBg;L>D}v#^!A6N zbbPMw@I5rkTL5D`R$QD?cOz}??(hI)J#{ac)ocyD#&|p6@uYyUS<^=`jbvVE(0=o= zak+{xr|Y@Lj>=CD(kS$dI9|&_ebJsRH>NY2@sZ~{bMBeddw3G!HgaYFOLg zL3Eu>I^3O3zQnv4LuI-3wG$3-%IyW3eMTyDubOj{5j?3S?owAXS4FBUgIC7+iZ)-J zo~Lu%yZ!z3%k<#hz4Y+GgLLN(H!iyyk^tNd(gpU+4Zxk-KC4w6xFs~;xEaF9^dOw+ z#yHNGS4oYVu8I$fD`Kigh?}iaaOSrv5*6asqiS=e*tNqlg!VxhW&(CjIcBOcZtxUi zr~uKzhba)++$}Wb9YF9l+dY4HdzMZ=U}FprZlXnQsU=1O-RC9I9vxzkKTLyx<$40oySVdZ?Tn+OHJ1HNksw0 z%*vn^$3&g0UF_M1*mSo$Tj|czEp8Or>1gjPoxJDvkDG};+Vs?Ai}AHB`moJSV<)zU z22VpkZ8~l7@%&(GIQpH=GVpA z^a6-MRNkl!Fa0j~k5AURhPw%nQ+(UpKU>VJ5 zw}%@mw9QSC6o6m|xN@zPgUoRTD6#NUF0`0r1PG>#K&UJ#rH2^|OCzVm@v;dZu_a+b zFSi>-1yF#Zg~9x=Kj#Ra=2Do z>mYP&Sk>fe&al~|uYh2|@y_N(x`!8zE6gsAPEKJ=`O${r6D+})AAwU3&9k!T&`k~* zrp&i-?TOu#<1Qu<&!0R^-IMe5=Et|`@)Q?iY*1huJP_{?FJ|hn+*HyA2g#ej*X7B_42tOnB%>UA;vB$THlCMaB8i1yQ7zgr0kPq*S>eEd*noh9v zvH-=9Yn&@p!Kfn-%PK~$C2ZIWfT0{4TU5L7pKJW8zZZ4kXJ$GNUDY^0#$rr9Hkdy% zUn}yzJ_)P<@vm=8Rvi6~B`~X98y|CVjveFdyYC)pgsO>zW)y0C-AztlTzyOv6boul znxVlwXBi!)djR2QUq4C@AK%4|8sKWpx9qJi&~qusbp#mtLRdpXIVA2X;i78^Ly3pI zrE7G66UtK{9{X2ls=P=t2Erd*WIkFT+Ky5Hkpeg;AR*YB`VxfL1y{N9(&h*tJLs!- z#BLX@_=lsz^yBMS>E+uu>FDe=B+go~09f z;{NUL-zRjmV;Lz=MH-^88Z79FO_PZXX);(2ZxqPTv<>-|Kq>>$;|=N>x0+n z`0a=E{n6|6!`UG(KJr4HcFnv{2JTCzm~C9Ve3RZ#W`BDtJpmX$d+;dT(asq8XfnYD z$C(F303W7LMv~)2Aqrau0q}2~GLB_j=T%kSCD_9;)Dt)^3Fzh{H{foRwNdAk<6J5Z zE7@`5M>e+|+{`iO$d-DT62Z4Oemy>&omP6dzLR$MHq!AMw%LA*CjXS17VP6&YrI$| z5kPN)n-T9#q!B=w7ZgFm90>jMJjvYNFbj7R^rXiGU8-qI9QMSaQ68ns-es6YJi|QVAs!@l z_BJUG000aJ;0w?P&=3Tx8D?1+=pIZkLSNLGxljn^^7=}gG=Yx4fl!PDojM#ca~e#T zvNR~3WdQ~PAknr#{z9Tl7XA#$@|b7>5*5%0t0DgLU;Z<_efK`S`S3noP}5vg&s-z4 z!B=REb*H8&g<9k;AX+=&ZM+)%^Vd(pmiOT99kj})>HF_qq|>7_l*pN%0*>S@_>K1B7?+{*W7%-SiFM7By#jG<=-CJAad2ynLPBzds5S z4fz+~D}x^6b$EQ5PT=ADzrRQi4i3`OCy)5OhduF5Zd<@s$w^+8hZ;O7MFSYy8$$Jy z4`#nhh1}fIqP40;%kS+%X+85PD5XjG&&Pa|prqr&m^RS#uBm+&JaaQ6tIp?`$Jh(+ z-q}FDd+Ch(gyYxjc=w){5qa5e4Q~@MR(+m~P$zg5T^p2_PeN=5&>Uk$vb^{SZWy>R z4wk9$0R>j!wUCpz4@6gekCjefJ)PA>?G7GP7))Q=}Z+Km+*pqomNGg ztekK0>H01{c_|u`x?#je%K??Hm7i|mS~d&Yva!hhugh#Q7P*g7tjo+kO!{}dyYQO|RJsz?40h$A}tv(+2acv{Cp_C1^<8_**ENe4s*PP|bh)*?d1A4-I z^pbXl0c<&PvjLcEws~x>evy~s)L8cLBh>^DZ*Jj^Yjgn_)Rt`?u?|Oq|5B zMy|)c*ca)N!PYYDF)41H5kSSV;Zv6W#km;AyV+T7>NU<^iR zYybOS|08|(;uZ4723Qf2etw1r54=-sV5%~_QX5Hmczs9#?UT3hQ1R~aJdGH;OUAi} zORe_iTH4;mrkQd5@#U-3xYI~a?mk7xjdZWEo$lO!oc`P16@d6X?foOYdh-?#&p0ES z0W!LBY#G_NcoX^d!wF^`uhPS}JL!Mldz>Ee65c*01o46$XguaRH$3zXyK&D}#s%&4 z2z%u|+EHD{ZSx}9z}@mFtjjay;@Bu(WzB18F%Aq<{OaLCxnJ=c<|vf+*Wk5Mt*RPa z(>#})2TeU-5aPy+t(Z%2ra5;)X!OZwOwsu9K+_R}CKR^MS8j@*Ow$`D=Z)vv)sUN& z%`3KlMqhr9KH$0I@Xa6{y+KQipW+sqc{KZM=5Yx?9&%GM+)TqhAltc(2u4@%OIw~7 zg72Pwj%C0$`CVhsLwio2yoO}nGRwxRQ=E-pI2h+r86u^u=~0#m?e)lB8uTtRVy7JE zzvr1RX2v;3UY3!CEO|Y|^|26|%`O+u*>sM*)wQkLhp4%r8r&}8#TrZaS=d3U`Q+wP zLatZqqr6r8)kkHPwg}}~eoThhlJkypdrP~zXfP{sxo(Z`nPxtgKT~|};f-ZFGoF=- zWK6Rzr(ASWp`I7LsdcFq9n+c{nUEW2Ph@*zD&x8(5>HF7RrmbA7Os`5m6*d_Eq8u& zmYy_b)8^NlN?lrQS?035sTRJ#fpU<)n}$VlD}Aif%CzG`J+3y6)EcbUsULQ))64g1XBYpstlT|Zskwnh zYpa>=J-eTt@HqeR!!9p>ZSv9x4M-8KVM0ipy_5)7aONa_eA1)PA90VWN; zY-YReidX6f9Fs3(r+?NC-Xa8ReO{33q80ZhVhxS%&h}0^##_XT7vJMQ_`CFi7t%B} zSVQ}pHUY87KmaJ+7WLNw9~(f7_Z=QUecGq{)usckbos;12 zU;gdi(yP~Rc}cMe@OG@AN#A#Oc)9Cf51_u5&H$$aT&nf)7NIZMb^6ypBe{v3PA(lQ zyh1Q0?o!vKf7{SFzhoyiK-=#6owU_K!>ceZ`)OmXlm6++H|d|Af0JIneVhLB-M8t7 zH!suS+QLkRS*0B@9~XCbV6+d;Eq6HY;|j0m z3z&>UP%a2BScsNvEr4I$;f zp;Vs9h~!0i!N!8j89MdDsb*|ofihG6m{fVgC>epeFsbxI&Km1Uv&sVv6D zHeYdFz~aOC_LELIk>mmjw2&_v{P=cz*7kM&X3rF zxsRQ23(pY`@EGyfOuzrH?^B50Sx*&_NVP8kC5ZVG5|`WF*(JfPhJRuKUq}Q zmU~*hL%VGe|Kg${ERCZ$wwxe87W<5G1H^i)?JF5UehiWT=>2%hA26wjM(lVes8QpV z2QSBMN9o`lAOJ<~x8P?D0DILQ0CMpMtnDwF+SU%5W;CrAy|eU>Z~tfd7BKwc&1;;@ z@xm3}G@1ZM-I;AdPaxVxOV|PEt?P5xcCSY1|NQ5FPR}1bO^*&9Q3?%o7i}}P!cBN{ z>*kklU#EZh_kRz2<0jrJ=vsij%Zpxm{OB&)L=+>Up-F` z_wND90nY&KQEy0jZnEHa$e0WcPXhRMeCf=735Fda_-tTP(n20;jlEI1VqSUk)5hj{ z19OtI0E~H@tr@cgHL`zo&2S4>fZh_R#g85Q9AAaFB$Rb3mvaurlnNfLR)BD!6W0RL z^(Ae*w7t!^(^IaJw#dhj?|)0xrCkb+{$%I-z9pT?sb~0l$8Wl68O9ln5>qc zm+p$%N&=r%0OtDOzl)z?Y)dhv z=mB;_q3*oZR-1=5Hb1uJ5s>~^CyYdU(IwkMyD6|Jpdv5A_DjC_sj(e(s*2eznKE_} zO`v{U1$^qb)$nF*FC;<>Zl$a-wvS7NPqwFB|eW87Mf=?ke8iq z+QF8%k0$!v@rShD*-U%GZC<$3*REy^Xm`VSQuYx2>+t|a8%6Qq&vTxBjb+{A@Ho=sqqg1=;dzM56F;ReB|;8XdvI>=L#4EnEI zIiC!K*$(B#Pa~q%{IAJ*CvbFrGKS7Y=MH=qAZ+b&TxG{B7r5)SLU@ZGHeOB~@${s_ zZg=f;wu83#)mb_|Ji|j9Hx+IJ-DKAfZW2b^Ai4WsgE6%~Lu3#H1+37^J2NVfj|BM~ zpE#aTo=cu%L_Od0AxTCGt$Yra^D6pVvOe{^A)bW{>R_`BmD!96zwfKC+NpESfIR( zxxw=%_;%jjPoqm(gx?K-sK8i&D9c@Z;iTaAtM{Qi^l`t`ezle}HB-SA1{EbZI_ zi~*oe*m3RE(OW<!m^v5ChqTG6*?j_(4dAK$%)z4HBZ2cO0pnBq7`I?NkQNrsG< zv!U6{Z3@hkeca?Ak1ls5N9QNFse6;&1H>=c1}?Ud4glEz4AWPD{08&w0qyLt3Cbo~ za!0}&3FlxNkl6qzMIOwD|udpx1fARU*N$Op&r5&aeKxXC%+ItTB+$dmh&If|gJdpxm?Nm9v ziIz)*^FNCFWvt5Zdcr+lgzATLrRt>#7Ui#8%ls99-#o~WpIe5)_${k_YVp#)Pp!YI zx01jwA%PVj{w1WmYU8s?z>8~rF}xY^`3pzTiS8~sh3Nq$ zglJ=Gw&P+bK=tD@YBa%lJ7(Y#8*H@D_6^Y>b{lL(3n+f@>>z#f4ZF-exR-=~+99vb7k=2#4g4-UecJ7zoaOT2pl#i@cQDQmnLOm~( z7pE+b+7Vk_9`ysxPuVr>$9FGzne9!w!1Uk(J6TNtv^90xVQotVCUw(Rhhn?T6Eou%C5XUbkY-uZ&go0bQD! zZ18x09c|_R`ta-Y5Fq&I?nAV)Z0ifKUF-4@aG@Q}yOaeQ4|yD&AESAGk^b|$?->td z1i%%PYHk5Xcy+jw4gl0`?g9jOckpcRVCP=Ka02-`hu|44u?StC!_V(MPA~Re0D9js z&d7hl_{Y*eE}zAr>4VN9T&;DzNO)kN3xin4(~=@!@qkLo@8gBQcf?n3~@P$3x6R z^w`lHodMF{q(|5=Kf!EcAMo7-=zEhh;GESYX2FzlCPfIFdt;u229aiP2eb;8ThrCN;iz*<$RpQ}2?dJ@><}chVkDTi&wU-TPOU>5QAKywhNdvYZrd z?;H(4$o7%F+&dq=iIg?C$n6(!?D-`RRl58bwYq0ro*P~ls*FfCClV-#9BtEzeyU_k z|CYiE`2LngYsKrV1Xh4}RwAq8U!??QLGbMH$C-{LYHqQ-M!BF>Wq#6PG{16Q2wCS` zP5J0TO_1uw8>0Zmpc1sfg9eXs@iuYy;bwaJPtVh%r+0a2Oi(1W2KuSdgU^;baiLWn zfMlBds;v!(2as_)TIGYHkS@tj<|%-9)kk~k+wBqfiS^%-CIyR@V8ISxQk65sg_{CH zHQY2H7(N?ZV7l;v7r_3;E^F`Og)hNhQ((IZ=#mkR-5jF_sOrA&r9*aPd%yJ|J$>*D zTiFNL_OcZ*eFJX<&|*Km^bQdD;^hzFHR9zJ_QBZVx+w#AB>+Tcfc-ME_`ZaxlEferw+*~2!LGHYl?iygFbQ*t9T(Rv(tdH6Q{%m4fjw4&G| zqixj{n?O!b{D>`;)qrYmtQ+VKCI$x^dugkQ`2%*vZG_?GA8pdF@I0Y>?rrU-Z?IE7 zIzB=X4xk0}3Jy=O6K(-^JDb`r<2eG`U^nx4^5}8cAonqaa7*Y$xV2y`F=^;%^9nyB z9_M4^kak*_S?qQ&iP%f0_s)?$+Vj`1(+h4$&f!;(7rI|wAVl$*e&B}X?GNYauV{L= zc?t7>+s6)_7YYw{FsA@Cg2=q9fIno)oP2@Z@?T$ko8Fy%;0BF$eV3AXIKm4`%qb>I z27sh7m#psGd5QM=7|r+NhmX<|KHkXnF-r+BNsaj-gW8hq_(TfDOO0G2ANlREAlN4` zPV;!l6S>JolyWlj5p;U)1XyDp`jViVcRb|AW{0iJPhJhu$xF1^@7TCT$!bQ?1ISx9 z=0BCHFs99MhQM%PW1iEcP_682T4V)VeqFC175}2yne6$8x%|AmJT~)kzD!!ke?il? zS8yyKv`St{;3r6+RTe^Nc>P!!IJ?UU{;{Hq=tO%9DlbT0MCxZxughk(v5(4XwnBBB zFABAu)*FfBN#1SSn57*T)&+c&WXsGCpME)AcHx`f*2h!lT`+IbPRL*)(n&)uV1S|U zT7VYKH|u>g3>t7{jr6>t0-zid}D2m#0t;{M{ zm0fO+-RSP=!Azsa?nYx8#$bAWXbjMpKV$x>!Jq*HGlrM8%Vpr2OB!h z*aHyn9+q%VyPt-y52n#OgQz!>q7JDaXalmyMt}*pgI)*XJ*0DPG!t`LL=69wCm0JL zS`-|e2`LvU4TpfeEKvqoLaRsAmA)9&ij4g7y7^qGu_@0)slHg20W9Qrf+?i%s1t#AmM?^;sbN$?Ow8 zQ@B*d5&}HhL66wYae@>rK3I9dd83}r(4p>W?LcBWfI7hyq`_=9#MQBSv6|geM_Ea* zmK^Fp)5uPvO_UglE4m^P%3O`=}1zk~V<{j?sbyzhe0A9nI*$xa|G&x=#!&*>wlK+>Uql3iiN#W;JG|r(2mtJr`J@WCxWZo ztDaxwJ4e}A*lW|8^L`b&7=n(9=Iliv=s0fWYP?J+c~r?JUOq3A#dj8`3ZYT{H)*Bg ze2>p;ck?_Q^6FJRk)G3N(^{mSh<>Up29Iyt6HjIMo4W7;rY=W3a2OEe_zQO!b=*j# zo~gjYUe9>xGr^Rb z2sIyOeq8|JJ9loTq3(fn8LJANjcs^%$lvGdruywc~mC$kbo*3K7hyeiB9x}2T_%1Lq}F1ERR}|M0cVK-6Wa@I4QxE zPd;zD#MYWclnL!!fM!jhEpC!G%lK}rbMv0+4^04H#NN0T5SDm+}Qy&7^HZhqtG%A1fCnr0Z-xWE0-p^F>V6>39S> z$!8zBwz0;hO2o6zF#p!}AdRuv@CJR&O=VwAGQq)p^ged5<$G*~B%xT?Nu>QF0G!Va zHC0VWn(R;Nn1kEXKIJW}tfaP%=jgy+q8f>tsg+&+0k{L=wB z55i(~fnRfw9?$G|hfIQW51pR}*oX>+F3_9UXz6n|IsGs#Om3$3eO#P#uf7SVHcnFM zV^oLAP%ocV>TIVuLm?47iqQOPFNmXf4|u_I9=s`2EDs`kvjrR%*Euv2L>7@(k$r@m z`7)E&I%;KFBSVu_EAut8tWnyxcne~y(Oa#4P-7cbPxx}=<~+{Etwzi@FE5u=EB~{! zRKL!-RLiScc=i0M-YJ-5TNLM2Bfrdzm!pk0?Zt7yfeIX;{~W#8&)sHds`1h|7yWw| zaPr~s+yM#6ks}^QQMYpgaNMo^mD!^7$9w^yM{nKJW#+*a9y3r1ja-^-WEuC6!FV65 z5^Dgn_2;{3X{RY|?ysgBSMPwk&=pp96)>rkSU{o#vjH%YrIQZ)ga;UASzlB_u|Wgi zlgI!pBzO$`p{7A~L+6+mj+25o#5G(W)x99Q+}`nS5>#QT3|KCpSKuTNX@FKGo6;$8Q}U-IudbDiXwbE=zL=gaO=E^^m1}4OsO-WItuUzL z+6-__C3q^mstgc5h!cvpZ{ALCjo(gPO_(mjPFgc{^P^<=Q0X4a=M8L73<)u13Q%oa z!JUG*ZNU*BeuGU{_ntgR_opV2paL#YG0;SrlaMwfrkByV?eFdZU?ZW`K?96ZnyM;5 zfG+~-W1}IGW_c$_1pJAIjNi%T!K*_^iI<**D#Zq1YZn{6rH#!ra&;(et&~EgV2=bV zrHxQIXlZLp9W5PZ7!!g3+|&!w=_{EA`;JyVQ0|Cdb;9?^Thyv5M<>=V2GDW7eR(u3 zV3O_0)5$dZ42v9?qfE|mLPcc@J>&!EbHb&)XtAz#y64d^;y~(=Kq*jtZi7Lw%A$2eFiL`OpxWU-kRTnFQtX{ObCIbQka zMEdXFsgxGM!@1Bws+ld zwf1NU9n>{+QSYRY!ExXZ>K#~dP%_!f4*$j-!3h8u0MNkhc)_wuz72rmDBbF?M68O4 z60!iqs7`pk$MSI<(n5~{6bl`dUYO`oCykGeV~L?H?QiV|7(Up<34|N?D7OjlBVf}ETZ3&a7%Bf*4yRG}783cEp4Rl$ zy|2=}M-PY(kBHu&dH@^P+E91=J!4lk&SAW(kGKR8St z0)H~%S^(Qn&qglHwDq&eY?1bC$E%(?!b?l5ss37h>gwpg`Nsl4t(o?0Op7etH_kR< zOZWyG1Br`skD%XnEcct@cOPCKQeQVL+Ge^k-owhp!8YI!I=Y}+vts>29ciq0C@qXE zrWwq@Wmnd?h{mSzE|U(;^C@L5VVy$rf7YQMFswJjZNi(VkDL2KZ38AX05lr`$eMDK z*D(ozN}4NZwT%5crM!7TGKj!zXLuGAgTWR!LQRle*7!=znpfkG;*nP2#GOt*?RO#e`B)Y zb7~p%3+Iy_=51&^37lB2S26ps=EZ%3BRzGEun35B#*KJ!|9t(UPDbbOJeQcSFTQ`c z94Pu~QG>JBMfk<2=EdR3j%t?WCL7CU&&LIf$__u>H6px zZk30KhopSdcH0NE0z?6qn*364+09$(HESiI8INjB`|594D-M|szXjr&a%)0T_-JY( zJ$U>um2mo@GY_S#x>;_=k;g!PFXqayP669W()0dR{*GV zSe?*%)Br3}T?g&-UcGWPJwdk@3vD5F-9iGrgr~XN)T@N$i4F9MNgQdHQ8k!r#bU$Y zFrXNf1ti4^Q6oR|Y(Dsf@ z`1a%^;|)J|7)frOZlq%3V)VSC6S+oRt*+0c&qA&TWy%-J%F#^r+DFu&NYl<)Q5a$<&O2CoDej>&wZyW~q(i z5%2;KQFVMik<sSCt#0LPQbdPlT}o+Gf3sfcXu$_k2!f##sjD&9n%iIWFOo;KDeT z>WRQu{nC1oCP&yd<}@~E0Wfy~EZYEMfu(w_A>|VULwgUle#b_xr7M>%r(vXc&Ezd8 zBoSacgHwTOY%rt_LW?>8dIOul+&r~&xSm=LP`7|K?<(n2LLAa^&ezz)_Q~g;;2dKi zz;OxFT(-TE-OB@)(EG(a8J^^n3Jbckv>V;w_6~p(&ME+UdC6Q5B*>-|S;n8zV&_&$ zP?g$huUM-KTE19iBBA|bv^&!5%v`#OedH@x#dtFN3_Hcx%q4HCg4Fm~)OmPdIQnYz z&)}kE;WyPd$gi^{fBJ8+dzkVV)_x*DFDax zt3#yb>I`qQ8BSiS>Y_!HI{MZ=eKW$WA;3MP-&8ns?`;>$@}GVrt!`SB2@L@HH8$~W zV3)cBz3cEQN51yGs7LuhI|8-kx}EwVZq#@-Md_QF1=ry@}9nT1NgYc6S9n%+QaX%vmK{qn*pYNclbZJ+8(nVyXn6iu1dEk^AQ{7mDkeeg8Q6#o4#afeS$V zEi?EBo&C`p`gAQ~xcEN3uz3^5z<-*|d?lSRkQFzJ*LME(J6=lVy2jXS8gHB!z>3r_ z{~I}_b2)P@DXVgVP25X&2@Nai}iq}S5QGpsW#rj?Cn z=_(S_ANSp4FbA&jsn?+Zz$MoH$Py!@rp+W#5I6#^xAIkIv zRl$cy95fnM0ti3H6Iuhy`$~H^v5?T-)lPCit>6y{rJ014fPxKV?}01~Ac?vL5><6m zl_Ux*1$Nr{)zg{wYp)GnO(O#%skgm9wE)N(5QD0!P)A%%o7F=n0%3AP$Cf;C$g;}T3Dgy8B7$Q9bu)H>M@L7}Q#|WAsZc*xssAj} z_B-#slluF1(lgW@Bve4BuJ00-AUfFSrQKlZmh|GVRie3^9E9#Ta90qjG#OfC`C&+_ zgG$Z;sbpZ4oCGxi{CZnD(~rjAN!PAjMThua`t`%F*|5ok1Ll$w2f0uWb#d)SEjoKa zt)hWCYh5EImBCNBEIqad6D_67WUxcuwQZFMZvysrJMajI6!ws?P|+cuu%bcQ+{-NU z?7^LCH%VU{K?e2JPO?@yc33@m@bGSW@by#pxIumBU+M{~J5D++x2P*^Ka7b5^~-(M zJPStiTa;mN6qm~^Vjcf-=0BfE&53_;6z9=^InTU8WKqVB6cN=veROzQ&f+NS)XC|z zr*56h=k$&8Rn3emrjX84^i_#<5qzEl1^6xQoV_l>FMfY>95@Y#dA3eGil?Y5-U&ru z9$u zPncNs;8wM+p)}N_sznG9l#qFZE-Am7<7!MSiK{!vKqX^V%XXI*o`IY~+ULX@Dy-YSP!kuLBJBZgajzJ18xs6yidU5@A~e5TaUz79CvP9|8b>aO;CK z(mRrx_Shh|$C5kxyh`=LRWRiUeC6XdX7XHam$wZ7q2M{R*OOP?>?MHC_x3=6f*y5a zH|n<1Cl5bKAJ5-U?fvSPVs(IgJ819T{nqs1`1=6gHuBck08(T6j*@HNBVD*rZbB#5 zu=o%jlL+G4QyQpSNM>zAy2Un~ftte^r1${bI|Db;bmw%ML`uAmdA&UxvD{smN+YG~ z=|=l#YVB=Ilh3Ep4$fIrC0W?Rp~n*7wedROwHpv>Jybctry;b}Gl|gIh(O%-J5VA` z*)&76+rI70BqJPXK%4w>BB3O|ZKnlKZO{vV>}_ME!3~$_9yfL1l!MyjcMk0<4`@I2 z)V0&O%`;-#dfygxauOYd0H~u+aF00Lk_h?}y6Q}V)&c*`BWay3U>sF6)p6KS&QI?x$r;_-$e%8m58k zSsj5N>d;FULF7Tc(5xPGy=`}hRTApC&qs?S1ugOX!dLkaGbFw}^83;$tg1QG-&L1L zWF<}vucTEG*_Me3sCgOR*;~A4N|eVFL8r5yN?+8^Y-B>UBW_i`FKdF7Drw_>m3vi# zUn^10t^78rn03v2K8RJ0TH|nj$JZdZGTY>Q=6#hiYXzSgKYcDv9%tI|WbtRCo=jHj zycXZ=L&b4?mP2Z%D^i{RJ)6(@7?ndZt~;H&W}SUr;(*el^U8Ubqn){VqJ~9QXJS;k zJn_VOyL!r#yJT}r<5rHJUvI{$aM&p-CnGX9ihE}|k2i{26>`mKE8g%Wef&TLZ6|vG zYCx60V#PG&z?v;qBjL;De*)(2L}% zK9%Y*b0u&EJSdfGz~RF-z+q>b!JBJ0=jhzQ%}=`kz+EgWY_oCb@y1u!lwD1w{Zbm~ zd?yVw-$b>S2>^Pws!+C|ORIEs8=$`fz&-#J)&uJ5QA+^IlCOCS;PR8Aj&0Og7SqwB z-)pO`H17c5pmhi>LLlpn%~l6)%p)J;$T91G*gzb{c7ZQ@_W_Fl8YUoNNur$AsJ1~J zAWG{I;)+)4-Qwmz61MutYT|-c)hSeSkP@+?hs*Ta z06XjWcy=;<@$_Lj?5a=m+a+|gp?jNtuvM4t4Btq1`fsKuTQj5rObhb`o8;RJ&y)^_ zG&Wu-`Aru+#PThRhuCX%d*vb!3^GU$r-8&6=QvDvu8gOdr!!1|;2-Ynp6`~hQ~WI5 z8XHe_18hRuTuRS2pJRd#fJ(|mR2jM&aUwB@BM$hok3_zKNr;kACC#yc&vvAY19?LG zW-tZyY`Rp}T2QJm!p(6pX#-e?qmNy@&vCy22O&)j^a;w_!KsM4;Y7^`_pMtKK}HRG&wVkF1ViRpaJ6Lht?xhmx+fR zx@^4|*X%ExJ81~OYdj{Swwupfac~v+*}nPk9`&YQ`xcSnLuZ8am6$5Tud$6^0BN3i z`z(2r8Ns#(!!W0ED&8+XL>ykrk6Q2Af|5w^9ejP0@}h24Xslhw>ZNAhR?alP%_`rk z6`EQ$9={3(Yd2yU=gcQg5q9{-*R866yp?6k4ei>Yot4s7i68}@oQ2^mZm(L z`t0?olJiB-{o;5&N<1j1GdOd3`ex;;Gk~~qq>IqsJO_%-od*-?p_|eRPQ0&w^F$UD zyhcYXl71V%KG4M=mLF~iEWZ$904`n@1n0Q&l{3tr<%hUjA;ONMI-K$sxBM!99C$%t z+L_gino=$Ag)<2fuL5qG@!qCxV?zrPXq+7!)*R?0LlUu z0dtf^{z}YTl~#XNiP9P%vTl7AYLJ#5Y^1~PB|tIK#`ZzV>0*N3%IE()Fwy~ z{2{#nCwA)7Sp|pzh{;cBv??1eT}y=Kuo~da9+_{TH(f{m2LMJb00b}%7)BidTOq zw}slo)ztr>H_f8=Hkno=R)OYeBrD?n8O>ibG75B3ECsNA4<`U;*E)phs9 zMI6&P*~>@fW%NI8XZz74=NS?R>pW+8csOltuu_65hJd>X^_BtLQL796|NiCwm%g}r zH}unWajpeA>*-69dbsrfzeXii>eN!+&~RVs9qven?WOeK`7+KTwphLf zdH@IoWdST?+6U-HYP((9!UDv8 zYOhl=3w`9DCAKm?^C7RG*bYKB_(`J#F7%4B#apF=t+a*Y|I>$`VGkJTYZodftJ^s9 z=nd0j-A(Q3{kPspot+(NVP+xdluw~s4KFk~*4YjX0_1Z03pR^>VE2fYk#U5F^z#JJ ziF?#j3lQx9+4tXlH$D4@zYV=Yc6^bdW zLUf=K(B0O7jbE8APJ31K?GyA+L_WK;pZ4 zpq_VZri-*rFj$o!ee}No&_e8dL|8n7(c^;$z1@AGGNV1^E^JT>?z^yU+||>SKK|t6 z^p}7A*C8$M?(0aa>l-1B)`Z;_^>;%fWa+1J3UN`Du%Ww3_xT zz;PsscBRf7MvDoU%qHA+&c{(HHcSy{mYEQKY=`n(?)d(9bS8Bd$D8Ir?q?@+wT-{c zv+ob*bnD+%B{yw(K3uy8oI0nTpi2QxKUF-dDJes@`)wNy<#fx9*X$-lPUxXEf z$V8>303YI6q*$08BTz}@ z%El5p!40S}Ao*+DOPzQmyMFs-8XX%>+f56pr5k4khjVGV#O65kIi+bApx7kBc7Tp= z%I8G71zwHqY;0qhcV%}KfV2(B+DTA=O3gpY-C*g8s?H&6}uYLX3BbB|LU-T>t*Dy5+&(S@${yJL6K^()t@ z(<&+pYk3l4XW(Bcf2(RIdX+05$5OrAA`vlaNJSvfwuxTi?Pxd3q^|X-Gq@@68eZY9 zU=#Q0@_h7#T_!~jXCI}Zv7yw1m4~aBM$<0NNM`Udw}%RY4ocQhiMc;Lk?xG&OkM3= z=vHUD%P#X5($ttRfO3Q9_FMXmWqN4ILr`mz24`7dOFQfMPf&F1^!$yC|nbFwByLcX!JD!?SU&P(!!rd zoN;Phf@j|2w2_T`s#^$7TXo6J1aS~&DbuJ${Ka4XMViGLhm%eF#rED7suN7o&^zB? zLg^;D{H~Om^1*D$5(2|o7N6YI&`+7_x;qIlTsq<%5J}}@(X(x5dzT^QsMP$_5rr6H z44-A{5YKGxe>uD;<5bGu!VCB&lYfVf-?jSwHZ<)8ApU{0Vt$w8fmV`>hb_PCzh9Lo zXsbw7j6xOfln1WL_Y4(KRXa`$cju#=dJ*$d_S(TLevhMjEqS4n8B$RYlafuAtz%4! zsOtPVFgmE_0ni=&LO+H%@=%^{1=jmr~CV zl3M`m&fW&LYTMIRT4V6vB4?JC0hIJFr7ky%Uhl@-g)|I7EdaSRxQMf4~Z52dG4vD1wQNKTZe$ zwZgpChR!sK6^NNxyqa{dFd^KHe!g&IcM?tVcL$ zS39(y3+ggUYLVN4QqLb!1Qh{>@;R6O2MAO`E5L3G3cvV0(Ig z1ru=fEvc`)FL>;xw>doA9a4`&fc@;sTxy+bNw=@v4!v6SDV(fXQnU{A2C#(iEIEf0 z#F4}5b1Mm^BGj>tzP87P%rz{A%%|Ds&(h4?9DQvQM;7RtJ5hmVa>a-Toqx@l9;tH(g4!>w)2sk^%?0Jj-tj1JYr5y;9R`5I3aeJ=NJ`OInt zlJI8SXLli?*CgBq-u(XN@BSv;e{dgq+1zRE=xDAWY%|%|!I6n-Htnb+?Wzwgd1fSU zXc&26kwLn@L^4=8P-(@~$d35&ryQJl!tXJc}1t5Mk ze*MN{`*1l}6vx@?BK-UNTZ|O1Mq#ANBj>q?J% z8XDY+q9L z2ZRaU6a#mLSYhFoIE^^L=tv_&R{_Rf2F=@ZK;G3JktRB!N7y;jB zU`3ef462j}C)C&7M|GeH9bK2q1ynNtwdX5~>A~bg8Xvfdj_)8I--g32akC!Sv`m52 zvko3bGxwi$V&534)p z5jKN8d4|2=PNbUjmx;;8=}O;~5Y%_Dk?z{?C_LK3h4)f`(K>XjpDnYJ0GPkle=SVX z!N{n8R+$JEaBZ1RQnD(`p;jNL*B-!Z9evwXRu3kZXVN^pna63y4r(v;fMgwbY;Cb= z4?T8srMdX^N#UC|vkYxKb3xxEPr!EGcqXXKeW(F?bnc-Jv+lEZQJJ{Z)rC1dTx>HL z*x7MLliSHjQdo^3t$p1-XSPKf`rFsrhtrfMCW<@h(St|nlTSZR&leYv90NdgrUH@b zF2@9ni9;Q{4ZGIVsd%=FwjgIK#J99sxlFezHhwJI=Hfq&+>BlofVg~vpPXFoaKIb6 zB)=lyCH$^qkBn+D!h11zo-C3VZG;_XdcLaOtDJEymYpo6I6p3> z2s!_oOM01@=hL3twSDextt=fdYTZAVS%rWg+)VN?lr!X-aKTRO{N~K1?J(TW04;d2 zT)~X7YztpVi*k5Q;I_w5_j1+>U#!G4BFSj)%Gx|!N9>WgsAhxrGNi43L zT9H6XE+7>f6#%;pE>_b)2@tV0o4VUa&_lhP1{&X?1VEBH&d{KYRe-#Y^$Lrzo-S)^ zt4;Z%qqzWF{ZBpRT?WuSc>aWVfDt5zsy+yul+?+?;0?(O39%LqLN686;byB{Egx{M zyE{Z1MNWN)K;y^ZHMHwe}fujzozf~cyvsjj^R zM;us~(3Wu4Uk>J2KpfWd=qNwGxfmtl)+V=`ZK`z>g@po7oqmjujHTyzP!ptYBO#xj zpTkY}N^BIfaeF&^!ZAr{ZWBPi64LDhHrq|jPUEGoBaL1f0S&UN(mn~GrG`u_5IIwp zy3_lhQbF+^Qg78FW&zVvbF*n-X$hCgYoXc`F0Y}-_Ewj>3Ef)(W&>6%^5#k#QPXBj zB*;@&&R&P4Tm(t|!bBNmJJHDC)L;4|(%M!g6vH?r(L#sP?9e%eFL{EIpDb-W=e{jy zB#e&}k&ZT2P3Xt7sM6fKe=kiunxK#2R=W$U6HF9#*bHc&b15S1JyQq!fPY6|B*>2t zV&+n$pMO)v#LP{wJcs~k)lj5?T;YG@;e$p@zfJ95-bh$(d09}-IZnMaj$szUFPBj# z%QxJ7?MvE;_MKnQYW#dIeK|L)^twlp@9U=jKBW5w)vrnZo6h%5=U0>X^37Uh9{0tU z$*ESHb3wzZ(V1Z|Js!_eq$sJ4)vuLK(P1rSR3DEp<2a;gL(ikSE^VX5WbJ4 zi5PNeN6ng9SwWN5AOvXk8~@^n9|uliDIG-8>A0$db{jjW>TT-yV3)i$$Xg2)E??vO z4t>=r0G0$DeKcVUS5T^yGE7nd6a-w| zc+Pu=O=wfA063PdTYJ%Y=FlB;kO_@We%xrMMF4r%g2Yw5-K^6pfRV9+u*1{__!7@* zals?Zr$PT7^%5X$HSeam?d7!gd?U@WY4Bn51S$n1={geLf$jk$r2y8*OQ|}XA~ONH zlHrFllOY*3ADim{-R!RV_VlwK{UnVIUIQFAq-C5&Y~vI|X{8cJ!JeBI+gjTI!~!|m zfc!Hc4iYPTS*{iLR{5NK;)*ck*azg(M88r`x*6*h!+k^P5*x;*H|N;gMty0+`x6ss zeDDVCNLrrX{+@pHhd0vJT(+*@20z>684_(RWDIs(3NWndfFMz6u;5tR&znrN){qE4 zUt3B~=V$rMAtA;CB7Lh7DY$(|Ua89~;T@RIsD>3KGv7h z+|INoYB<)-Nu7N)+@8xjR%yb?4Kq~lhp9U1Vn2=vvn80VnR!?-fpc+9n&Jr75^kGF zX3!g?nj7^Y!ing)ymCI@1kiqyv<2dY7#MsAyR70r&}CkalHMkwI{!IyhQ} zWSX1DFTjFs?va&X1s51;3qX8`qYjXICm|%596)byN}rm|Ivs2(xcE#!#QR4i%7H_yAYf7KuX>L7I*&rhVv0DHfa@ts13A*PGR~xQKK2WoaUb5fP+bi z?euhRCcy6X!Ljh}*2ZSE%e_MY_D#a)Gk63nYIk~iVFqb`U+`)hl>#SbZJljMRkzY} zq`|X`&(g!GCrHQ_18fU!_mmnFw+9ChJzOJzH3m z>iDRF;2|H1RT7`4xJ%{imoZIba?^^=^*)YdcCij}b?8!h_r@Kh@%^cB9nI{}cc$0F z-`IExJzCMw^|Y2x`ucm)aQ9$Z+*(M#`uLaW(UZq%fxac!*VA9U{XnyCO>WGC7|IO+ zF65~?WcwB> zIXz%gsv8fjw+A`p-)Qj64{6Hp7w=jiZ5;)3#jnqSeIWNKnP-1-btMCNQQkYyGZgaa-h}>JEUY)%l?jEjZqw5 zjvB?e2rZ9{Wh%KY08WJD$Sxo6vGYFaigdFPy-KA#ZaCY<naHYPy_(L%iYq-JUXlsX>4&S zy*K{zG}w79ZNh^RfOHpWt?CioT}Y4**4V(b1Ry!Ui(5-t+t|j=aZCE6pZp@!HvayT ze+W-xZS3&3T+MDBG2IFRSTwb!LkS&lxG@VO8vqS}HFkFg0JZ%9yIa4wmDX0)(wAS} z1sqQWU{*(2DRdXoOYJ7RAx>My8vvKJmG$(zw45eppM=!<-P>=aJ2!8~=Cl9y(ch=_ z)~(bAKyHL@d!@b9jt9W&m#?Ouz5mnLoVSS@!Rp2u?x#1wB}h#L#Gn;<+TIBu#$|Gt zT7!N!+T}h(&B{$+cd@_QMuMGippsiGq}*WMNMrmgVWuqrR&LaDQqtMnfpv?qbbs+N z(%e=0l#`8=etq{dz~q(aD+lP@w$-(yt9>J=ha99iEK}@a5>P$gCDb||J$sa{jiJ-s z+loEj-86-6@S~~6Y2xVmewgF6~FCSD#KPNUbciwp=BjJ9^Q z#^lBggt7V%s|!qY;GZiGd)RMQ@>?7Q5O$L2pL*A>0%${83n6b`yOpjFj;2N&P_zIn zw=o|l6zpUYq?ERVB?$XM51UOdVRd7Vdi~AEe~YKNdpu*C=r&`UyWI&|Gx2%ul-%Mp zq@6YuKnm3BVY2=(SY(Md(519YH-8@&A1NRAe8kffby258x9EDP>&Bej0N(nRrnl4T zvl8{py7Ki-F0oj#wY3o&I-8g%`pgxtj}zHO_Ldh%{N}RaeaJf_A5iUtiG;?Dd|;xq zdO&fGR{r)nz;Ti9sk>Y-$vU0lY=n#JA36tWjkR=+i{lM)-~tf8L1z9=WpzBloUe9` zhZI@K`S>pp604yv5$|l2Vz9{MB7r#?vAk2(A&Cm0;5pRdkVD<%edCyt zx|mb|UD7QyyY`j5e)jO=^mJ+($?1pb&h-z|06>4cRF8zRls313$Ve{(3?PSYKxefT zLiUgV)+5pUVC?;nbpHCQ&(ae>n3D*>OB0gEW+a(fk#HlG7AOvNJi_L#LnLkTxV^D8 z^>*~64mRqQ7B*7{JRIr0l5YOvV78Yov!ep)#N&hHmsq-2|rEK@$Lc zoymy+a|@~B^H2XS{lga@0bbD6ZiEh2;Wq(sZHItj6s$+ld;ZDI4^v-rFFMyaLjh3j zsjdRhFMtkco27iD(^x5JMtYf@UBChWa|}+jOYk55=4QzJm&1a+0Et|P8)MlXUKu}t z5$#Vh@&^zD_KbUZ;Bp!s7)tZAt7#LpjgF4CG%@`I^@hnberXIl$v6=KOt!WRq{Chm zUvS1Thd#I}Alf0G-I&1`H9%|SFg=3*kEfo}S2x3Db4v&O51YkG_fzV?^Pf7s+A{v} z?RU~JQu!Vv!&^xH_dvRCWJTJ;_xBClIbs z2+sD94BHQHT^&t7{n3xpP|qbiCJH*4rXh`PBdt8O{kCiu=olOtWObr5-Fy6H`iGDH zAuS>W-$oB#HKQh+wYWP^$*sU1GEy6LYDP6_6TNTHH1(n{3OM-{9%f;7M(qU*wPns8 z>T|2h-b91+4fdwdzB_4ict1@)T1d|xt)-1MHn?sw@vHAn?OokShXK%TIHb>eM4R%P z`#}EIZFW4j-uKV0VS_`reJ6t(X)E_7BM|TVbYyIv>+`B~MIV3FL_eVT--I_mplbeB zkeZ*hSF6eo2N3%*sa7{$D&MbHN;N_+!hcg7$aZ#>y*YrD2c~$to$OV|y~uIOC0T}7 z00r)CQGMtcXirziE~oKZW9jDY>#3)|8*r>z1d=`OI~dgK$&7^BmE8e|>;sh5KNL&| zUbF)n%eXEb^Eo`=5fluok7@COV1e(69-Uvp1alHS-6;GK5YY^cbYzkBJ4vookimH$ z$)^`eE_Ip_$@Bn+bg-cXz>9>;@x84FsqY%r81DT|T3mjb-n#K#y4*LKy6~K~yogj0 zkl51F4KDyZ)T4U!h{q(i*1azFv5OZEE_PoJA$z?xXwZ`tY?-vuI)SprYMNodaom z7=7o3t+aHw7AE4hQ2BWL#gnuS2;SI&dvpR#%hjX1zD7NpDM^oloz!O#edY11V`=pA zm2|105A^_~pBqdZnDFcZDyL^>(}S-cBS{9tGO2TOsgoP&7W|?ukak%;P@);CEL2VP zp=LUfQs=I|ZY18L>E_kzslUB9wbH+}7`+b|R?ViFi9i5!m_3MvjZU0vtf2bxOVlHt zPCrTun4LqM%%-^(SX57a@We;E=rE)W+uhnvZbWUujib$&VcX}qaYDC4-)aOjZ)2v; z4W0)8<>>2qT2eFt?PRzu7H@&vp2X3ua}$#1o{`}+*n2tcj5no+4<^&oiFq8jIC(?- zOk272R9*1g4{{mDEnE}>!O$K@`KHfpY(9;$4WySuCXypPnW9W{zAgtI^{u$R_`2Z0 z1qZ%04*YNc@wdi;i;REh9LV6XdM%V29giwX0{I}d$>Bx16N6BBz*Ew>4+!XF8Troj zvGmUSx6{qHuBVRfmb8U*bq{M1CBT$>@xV|qumKmzn`KqTJa?`)H@G1SIWZE>ZOu)Q zcZj;6!QVP9H~<=ypyg54!BXJl0BW9*;;05eWqANCD2GJy0E`1X4lLNL7`*F|__!cX&2l%u{>igXaR_lQ-MD%uy*vJU*iObt z#2#*rH{kSM8v;Ahv3bsohOKzMlO`p}mpd+}%kK}RmFtUXX>Ac52kh~hxsDlrdh@60y|MSw zB#t%iP2Ip0aw_jJ1fS^-%$=X0xLYxnC=8(?Rc6TiD##jCzIFP7=D%C+=!&OQQqB=?38XG8+(;%4?Zq7qyqr zqs6L3BdP#%b5GO#uOA|zMIXGA4y|MsaBh{1*9bVie5o&W@8A>!F>F|!;HG@)0GxK< zG~;srr8Lw#m^$bed(c$}U~O+eA04p08+y2?+#uoa>h5+%kRDDvOdo&tDM0u+*MM3k zX+A@mhSQdJTPOOvy7Wfnqmf@%XIlBFPuv7x2dk&8(r(&pU;d`s4Gr9TsuW8JmvPi_Ri8H7z(r1n^iuO?F3Y-}Er>j*+f4mKt?9*@OVXu5oL zF!lCzF=#tjV-uDE-X&}Ec0g4HLCK$z*du*FK_UPtO=kDU!vaBH`GoDSHh((v;c4qpl(9- zbaiS#WPmXyumCa-X7AxH_!*ne-by#Fy^FMXC>?GiE!%*K9VD^TPsrrP!xpytt6xg> zQ(s$es6mXSvCwNifKP{jwS}$cp^rJSG#TX_=+_y{xrpPCrJZE}j=8ilaX3I)joWIZ ziFFX&1W-Oi-Jz?#GrfQL{jk6=y)=^^&p%2tYjf$z(ljzQ)y)QGWi3**?eX@x_Pf2Jy-QnTao0qBau{}xB{r90iyuQI>!78lF6C=4oJYP!DM`#oKfStv5w*8~8AZ~R7ACZx!_NBK6 zZl%?^6?C|_0^n^RE5DC^@W9|u+C^%*#%9R*)%i5>{Be5ndh3+!|(fl`% z_Np#22%uLdb_^gqI&dXS%(c@04wxPS{|x?fo;8bUIr8`Vy{UirQX0QgN{{YOqE0cB7U!3vUST0ZC2>AlA@RKuis|Oh!{=lt zRs*Wg+?M^YaSG!*;Ja}3wLdAQP>rgT3*}&IKT8qxw_s$aVa@Q{;vq}UpQq`kc zKQsa9YNV{1Wx4m)xPSWA>y>ewM@>MS-x|k}Po2F4h&_%EnPp1Pv&Dz@#nli5IOtS9 zj`QbjawZBT)@sCav5NpDMZTzBA3<<2MCC5|DsKz(zXwPr%cM}?qQ(PIEO6;Bt}R9i-QrP9jO30 zNX2#-5I41hix;qZMt|Tg4m7&c*zM7D`~9nIRvL*s*4J0q`M!?MBf6*n2`j0|rV!=H z{}>d3n&b_djJ_Hs%qZKxEUi*{%uvYkbe-nOLVZBQXkya(nb53~) zpF#!r@S%Z^QfLp$i2+q+2*=ucyV`1wbBkX2X>Hf3iN!Cbk|NWOSk%%A{m{1F0YYs}t}*)3@xvchyTum?1lC zW%J))y4g0KX18b4WN9*eKFub)rP+|y%e#Fy#UZ)u0+4yWh4fLo&&#WK(^r_B>zr>* zR{)ExsA+UJ;Xw_lY6Gey&BRwnS)FMuim2b+h#}v4oSuYfKKN6r!_9FciVZmZ2wvc%S<&|Fkq91KGq#JEx=@I7S9;{9x zQKuX%hCE!xp~{>NQ1;SeR4QgxpR+1~w>>~xJ1|x7yA7R(wl(FnA;BEMuJQ-t@1)Vr zOR2BEgZi-kv65lz=zm6#*^U| zPYb-ig3aTxzFTQ&rwWj++iagd( zuO{xMnVHG3cv9C2UsyqKLPMtsbq6u3^9d)HKE1L}>2N#!v6J5(uDd|P&GD=qq)obo z<^eUwyx~d>pB(`50iOn4l}E>Sl*vyaA%xL)ojlU(jmHzfhE4SSeGX~YZOkIN^0NzA zZ|*>1e7hrcUAdjc-no<>fBh&;PE4nj=hzWvHK+j-ezDR+TkpV2t&%iAUpVZcUOthHrnXY9 z@tFCduW)3(RK8as`stu5d7h3Gj|oAqSw=at+JN|Mi5J&DI1bpC@YH1wdw3JIFEyvn|YC?W>;&*s)kQ5@kscz*0qAD0uM;G1Eol z(+9!D#O?I+pX2*|s($hIDQ6LB^Zkfj&LB&gGDS`%kMba+lVwEzDBm>PPcxujX`9mJ zCYIP&+2FRa{AF5LTuko_9%Av}R_YsQi>3edb(h}BKWLH<@W#zf{51w>AwvfC79_7% zE?q`Pvp=mqUk)kj7D(Lk);HJF^2Smur8{R4G%nz>_;PDM_Iafpo(m2*cXFe*v^u1U zy*<5AuP;#X5U_?r5NPf|FIVYbJ#i%=+dgJ8KzSS3;oU}EU~yq3b>S|1psNp8$vvER zhMsZTFqS1~PpuW$XZ*7Vg{2T0z6q-#D8Qs8tXs@IpG!-N%jAc&)Hw~HN@;r=(r&%U zb+>h;0qh80LX9Bn4pUh`qIJ-emvs$0yhxkd(NVws_RTc$tET~;cTgu;+ucZ?e*PJJ z2F_#EM2UI_`oRL+EgXsH`K}(add}tUNdUlyG9aR9FSJ<9P%N^_* zk?6W2ve$r)JD~gSgD;WTqQ6YvHZS3!VBN(H-B^3B4)WVKZ&5r$2G64T2>ct-6;gF( z58w&WQAf+!XSJanHHty%t?vBN(sS$ye};|aujxbcY>cBlnS}0OYOR^gm-5$qq8HO% z0oZ8ou=~t~ymCdOiOrME&8#H2VKdqrIsvgf!qf|%@?3|xM4!dzkI+Tl_Qtb^cg`N6 zmSTtDP^H32r#7FPsdGCUY}-dr3qif*)~%)V@b05DaepeUFSE)3KdiSBVv02WYf0?4)jNAnD*jp^rq_+I+&{`t=tsL@RXXsOG%vAxcKhU?{y*3`oi zH0r1<$7(hTs|Ert4+c%gO(C0f_|7jViTkheMZ#i0F)#CpypB$|=0*&r)Foo6v*}%LH0~02?FYMpw8V?{fTo^f_t}v(MOQ2N;J&?b|9H z4#10*3&FJ&bCE$1qNGj}dOHA}4OAvJ0p7Yp?rQH$B_zM~1E|WhU7;B2g7?2Itb_>a z>W~gAU7tta^(iU_6SI%mfQb5xlLq>jbq`y@NSYgw_-bdk6}6AWRdlKGK6eFs!)?uN zvNJaSi72&q9A@fpJW|(wnEKGsZO4}KCjD+p^LPNfH8!~Ez@ih-E805P00i>s!9v5; zkt<`D-p1%C*3tVx}g=#_<8z|kxCRh}_ZtwV9(l&t%%e^z6E6-^$`>-GTI9*nt zPFJG_T=0u<9!V~^(J3EzcCJ60OH>m>#G&~{dw5F zjh9W9T2`xj_3%R^FQ?zBN?eXZGLSpIE^;~qlp*M2AZDNk)a_tRU~6v`5L{v-P(%8~ zAHSFW!=L|KcE`6d_+bNqO=Y3;isVZt5Bm&&j`a>!>PqghiOF5^f*7v_N^XF2+0{|~ zxPG>ypOZ8h=M@jKh=HGcLMm6zFAwStp!pMkBJLIC@oQRv#Zewb*+m=xF!#+%faIs3 zSBYdBpx^p!tZqh}4wk-KP-DPeS(=#tJWVb-Mu0n+ME0=7^+XaZ2(9=)ADyZ>uI*c_k>pbdXrZeLzsMlW`OO;qq2WY&ZgiG{^E zKpeaOan_(Z%H~cfuu)Sd>Ktrrd^kJ768!zxcolAfksu17wpVx3Kl|~|(4Xy3k0vM6 z-D_t&%ERU=`qS-A=*yx{%`C)~13`CP2{}17rH7HQUweNvjf@~| zr+!NK??1pjHPZWLKzkGEG?h0w^CUH2y`H+*?6y?`tWlyXCY#uFZezpYY-u8W^!3MK z4Mct2_V(tqgrvT=r#r16jh5d#fbniN@!h<39rY5V-l(hW;WbY{r_EtiWA^n8&E`-g z;N93B?(A+2&^$%keDdpGr6-dU^dmONVMkh1Yb8vXi8I{@NzI%OMIDRY>8eZak@Ri5 zDyt3Jg;w=t2Oa1QRBfn6UVWh7Y&o$3^lur5xUo86pQay>lD=yup4->+GiaaBAImy< zSTg}NlR6@VM?WStP9y}`O7m?K_0Lto8W|r<*T(t^z}-6F@Zld7$&NHgC*2U|3vdSsmQqJ0i@KY2$Gyv=F5xQSDnMya!64`$ zS5TL04oC_Z=N%WMQAJ>V9T{loVCp0z@`OIaHuu-lEE~l(@Pc=UMB7bbS*DchDG%eB zp}Sa6^qXQFjRGybd~h1?cP}HjxMv>bSHvsMrBNMNme{HPCg4RcbAn^b+(lA$fOOCC zU8f08rY6$@9?e>Caop9}9r~i0KHEYqVG}QR^?)@^n7L$6W%wZJ1Oy5WpRX>!cK{#3 zTJvCL5njMLS!!wo>Fny z4*F9!du?MK5WkIyIaC{Zd)UyoohGN1(B4mV1FwDqmj-ZjvXXxF(ch)N{pDY$IRGM>BQznK-FSvLLq&pB8+g9UWXVmY zp_5)K#*b)8CG@6UE4JA?42W=y7VT#cW zoKC8CSl#I8YEM7;`G-6+Y~C)xPbj8Qq5y<$n2Jq3sA6d0V(S1vyf}m8ZawsD>)9CSdrxUx zV$b>`P*1n#eY+4L}(7}?wsu;Vd03d`+O_PtpSbc?Zk$ zF4cGHm2amm4fhPCpS|}p%za_oc+<^S*x$t**yz=(X`sI!z8zwXp#&()4JM7YCtMB? zu4g-?vn!jphMt{DPXTo^%d_d};&htDOWdB;F#tm$1%zgo!re&aih|4QilCdA04Oz9 z=hgd8gS1UuLP}ji%Ds+_=Eu`d(x-PnPZLv5(gHf{+o(sh1I9WqlcqFORgb+!02F}O zO?vN)-bsJ>-XCB+;u4$1pdD1WgAL*ibi7r?aK5;>xRf?<$t@_?N=6AuvEa9(tBd@Q zyc6!Gtu4j(()(a1I>-Zq{aAijh{wt%X@mZEuH8xFz1LIwURP?~#e^Cv zM~6r|w`fx*IQ{*7*yKi~2wK0q_iJ1(|6Q7R@-=-IH4rx5?a)`6vH5KInuc>iXrI?9 zOfwU&Q1_s)(DSwnQf{`4l6xvoTn?na+D>km+hSs-S-A~>@GAP?>uab#t)pgw2{v6u z@4J}}X>lB+bJJ_!N`eAlMj^x@>kNqBX5-%={S&i-Y&K+lh~5C0MV$%-n0VNH9*PNV zJI+B4n5>kLsQVl=Bhh#DVrUQ@Zfr5j=RLr>cDn_`0&6GG`FY511|L%=%6rsh`Vp?f zk#vVtI-cMBj2=D9xhChoi)a1(ofn4W5?&bXl_F%`R!-u3`@22L?f*s#r4QJVDjxa0 zT=CWm1FPlvf`sute?goIw_mfgV(6%l;|qem>!yDJIn{3aq*uE~_z$EOCu(4A-Wb)j zSaYJxIDI39Qed?*YrITeSm!rF+nEa2%ClM>B8yr#t7ocOLLM9#*i=zqfXODW08@e) z0ILJ18-qd@(H8}S?8YuqvbKYCEHb6AEgg|as!az7>8alfPozq3NU~bfQb)Y^_(0{JpPK!d0*kF@DX5|2_c^Uv?JUAKQ!B>eP}z6 zJdOznZS5+I=FSAhg8!mV=e|lOg*M{~TKAEJYfVFb)st6D66G)HcAYQ*&UbLi5!OBc z!|uVgZerq`Z+;f+(;h`xmT~4Ois#J@lum?I84}3S6Nu-e*9kFpk9k+PI@E&dj!HP| z+bc{Mw{Q)PwE68DX>{y*y8qRqbno+tG(WdWn>tUT?liR^*mnR=)6sKkW&JZxCE(UO zij+K$^7|uSyu$a{XT3;jy(mlrXQq92`{FEBzX zGqrMPl`KvLzo@)Zkzf9zR<^HC?8}$`24dE#!W+o`L>9TE#{BX*5q{$Iq%`S7aGcie zh_AwJUsPvvKfZq94EZ=}J|nYg*}ZCB)pI&qXntLu4Sm&Xr4Q8(2isFG$deIkvK!iJ z-anbq+4E|gBwn>lO;Y1UR3pdgc}8(8Aqx}(7(1${c^5-UEJwRcsi`n`uDfAJomH>< zE;py2{Nmm8KmXakNWb`JKTq9#q=6p}cn(pjE`&EOmok+GkuUhqTVkj))g*xa0q)OqK^n zrA(Q70c-PV@PsI*k4R^PpyKGBO;)7!PQ;+C42zg$@N5F5;1(z8d58RQ`R1!2RNCtS zs{1%ZfTICUVF->r?xq#gAx4L9rt2ej08WDeR!ZwkKA1$b0_4;I{Se@K4|}vFq{PkW zsqO;$4p|A9TUunpSa%w1!F@FO9Z=3r9rplO&)1))$IqXn`%{ncRA;dONl-=|znw^g z-x+^9-MoAw;d2DAimC!SuRuQdgp8K5w^!*WNR9P^cl$~^Cf-WeJVvJ+0JyPULj7Y2 z({4Du&>R?2-~ho$Sa*XTs3*{U_UK#9^phs?Z_=@fUjN{f0E)$S!T?f~DQFhty8&>A z{-!#N-}|WKNS0F2&?UB`f%;m=Zy5E2k*+K0`q+5XbM7wm*k_14GdIU3zD3k6kkq0( zqMKwWppN;zeNc-Tt39jf*~%QN4NNj<-(@8C_4Bi7?AjRVSJEe6eu@GSY9sQEhH!)E z_3?2e%5Fql1Rci)xNf@GLSg*rB(8i7>~Hko497@cPvrA&ODrk*_k z-~>XZnE(Jl07*naRH7o&LL|^A&@bL*;NRe(Z4eVrdmW%MEI8QfqTfTm{g7Mvus?E- zBl>)q_q*^&-S0zI0cc$Mijz?KACsvACsY7qU4pABA&53V)6?|jymOv8-^f0`?8irw z*y!=}YbMBsyKF=b%YFX`QYhS%2%j+o+HX0gNAq z4dbQxRVHQheR<;~$38@#adpTQ1Wm!k=1s`lMS@;DcTj&!?3uITop2^NUs|tXDuq?b{7sg5l>cki<0yTF6Z>bi&t7Nc@_=27*0 zE2pX*8o9kDK`+~8wJES~<`_tbFOJ`C4)_v(^G_SY(VJ&m(JL$D?4}ge8Qi?qq}ekD zlNbmYNVMguKH}a1zzPtvhjgzE*S4|Db)+|02<<;xOeEF&YwWn zBX*pVNQs%Ir?v!UEYWWrmXL&@^9(q4u#fSX__8ME87G?Z_+>y~xjBkfIf~|qf#m?x zIM7GD;P>eW0gj^}(pXo%!~5M)h6IF|EAv5?)gck2X8buZV51@TSGVTVmuygbhBWfV z*jwq!kgl#fkQn23g0|WQ*z}{b`Qck1roa05Z&Fh)Qes%6gNHSw#2ZNNFCl%?6>k%O zZAZ`zJ0CutU^CwoY9#==upk2TL~5z`xp!{7mEIY9CqSw8gPlYQ?CSw>PMEe?PTxcI zLQj1{RyRoM?BnXyE1~AGUD`@hPp8uC%xs#d$C?I?Jp`}~NN-hf09>QPBb2bm#=qxC zpV8;09!p5w_n1V?&CaD6HU{dpyA6ZAZe|?6jt4t<@YNUhLeIAq6^vH6*xlTLn#S9y zX%Ag)R1*ehuc5BNRBFRX2s*M4?tYoRe)uKUSQf!{!hSd6QRy9_ zpNzA(Z=dq20C2|5*PI03?Om{#tmt>;N7bZGSOW0!TnpG$lZZ#!gl3ToA$uZo9&K_I zZY0CJ;hE)DIt(v9_JN$n)dE561}hj&)O-3m=?6NyVM0LvY~p_ZP;Yu)>HE#`^wF)<{PJ4oua}oo6$wQ3l98FaNq(E z|3I4AS6c>b=LzE@m=oDUd@(aix{kQc)T;Dz&W zbWyE}j)-xfW6SdS7Qkeqv=P#zR+jEVq6@goQrsvq&QAmBay}6+ z1Crwj6nR8^^X1P<$n-M57`C}8CKL?lvJl`VI97VtzL-pyTm=mluLY#QeqT6a)d ztSjth(9yx>K^nU}n%=wlUK;2aNOgF%YXQ^=Oyr%QaSy*h4QQMt!CtGp{ zbf%OsgH|KK9cVExZ6y!m1?>_RI!1bi(*QKIsk<6#6Rd9F%;GB2*%dbV?On$|4_aL-)AjM^%a_ho2rMrygq#;@7QhiL`3%zLPSsHTE{ zlr%azlJ4BPO3K}o(FTXm*94Eo`mUzo_DiW1QM{Y&1jHpKE1%uFi&cwz={dU9 z4VW2gKs{oIjjMI)Rv)2JI}RTv3D!A-Vt(SFUq!dG)5Slb(Zk~IasTwve4<;yQ+|C^ zsizRqX_#?gb&lY#o7ep1un5a3KA!k&nkLGKd(z0qa^k~J#iM+mI$55Ra;3sMl318I(5_lXNGw^id{u9bNtDPkR3_ef8^m>FY1=r{(7>#83@M506-S+;e;8+z)<}S z7W%$3>94S4u|T=S<~IA&Nf4~`Qi-g6hlTS1;gCn+w7Sdul zw;(K#BA~37v{x?erBy6fh!0mb)-cP)W;tkW?P3y%MT$!R@OQ7jg>{9l^y!zMF_|pI z`R2kllJk~yID0$$ZXJR%D5 zo#6S*m^6Pp&*>vP=rOb6F#XAE!wvJ}23zu`EKSm>p3%yNM$HT!z;{(G_AHqGptBxL z4{C2&izZDSjp_G(@qW634dajg;gdA^cnTU($6(WBv7f*)Vm}1!?_rPlDQ7ccStB&?chXt}OSL$42tt9BehH7_x`gec+@6vz%FaI%(-Rg)jQ*&!ANN}}W zu(Z64NicVgJ3yfW8fq8PiI`Fl;wjRt_&Fl~=){|RJODW%l0rp7=?-~IcV{Q+07&IZ zNQyhV0V>ev61w!d*=7rA-8P_pb!{2Ckp{ENMor73F^PA#gJYH``XVpC%pDB*r~@kn z!J{&N^LdWxfGH$8O2;{=a#;C|aw4QWARgB$(vn0vz&bY=@;oK?t9E|E(j7h^bX36fSMa+3AL3gQH({65>)7|oqKOiquGIX!p1Q>2% zlA<(N2NUMEgGGl;)KT`>02ua=?Mvk0hPO^O?RDWS#HMg#))p!n3OT!)dQ*2R)-C|8 zAE!^#8tOPgvXDNK)@m!6RYy2|<6NS1>hHtijt?cB95B;tYu-1fyhWJC0-CN(2 zTDRGh2O#dn0f}n_>XEw)zfWDR^j=P*gCl8aawQB0YKh{({G;>^Y9n{rZ$}^7hA2&w zHDVp%09|M&d#y;HuOUgs<8}J<(U)n73ELi^*43gdbnG#soBrtSKVZ^0l%_E8_w4z6 z=-+Q3Q9eLodF%Fj>3{fxf5D`sIrM`+{~C)PGY{B2hP4*dkyHuVVN;wN%v!N5qDe6) zg|6V_=RbEA`Rx(<{n3{Uw=W4w<5uoIaW7x{9ruhAe+KxBl0c*4gxL;iXXMok`zTPK zYw@G!rjNA6dBpYJ@#hrCBs?F{nak()^YV4{^P*L;w~Y#lcRI1nt@@V0*o~F@=#yKr zhdskTS2I``iiz$f`rKDsODR&JGZ@#5?6JISY#LB%A{}SJr5M?B zSn-|PL~xK}2i;|9SyY8!yVtSg$eheSx2M=8HaC%xIRvw#u(CWj0=*$n$UF*F3?!xy zPdJtwklJf~Vh!EuHmpyy1D4#lwv7(3K-Fz$JJ?tL?8}eS%*m9>>LHG^whbhIB&PXR0dedHfNfzs>`9~nmtgS-Kn&3cPt zU0txF#I(MN$p|V3rOj37Qtc+wFK}1fDf>Q@&B{;=1-bs zXPw{4H}lPXugcn&uI}nptz9j(5E23eg9jK11eh6OAUwbj!EhNy|lDgHcUR2e!S61%(o0)Ge{(PQuZ!T5&W>scal~hul%=_MZ z&prG4Jd`W^PFZk475465v@_a) zNG-4-0KNhxx}9AF`Eo1snUy9@(*{I&$n5Rz>|!e&DlUtyHYnJq4xLIDk#aG-3a@S_ z{MmrkSKfRjJ^tRa(Z)Qa=Z*;#NgCv1CSEhje;4wC=Q+%%q>5%)hiGRMA!6k^)0xAk zX|45i@xsNR@+w=Ief~5s(3L*^qaRIm78U2-crCs9+RJGYjgCuI*j&1hO^xIhf+jo` zAC#2~PNhJ!wZr%90+H;-3WL(ItpuOG4G$`OBr4jJZfvWTVg|R`XmZoWmWL>1>co46 zev+xZIMNg0Y*6ZXmX9}6TKSxdBm6~ph7my;hF8QC_l@LeRhk0G`)bRM)~!P>6{*<9 z)ZW+vmA(wuqm`?+AKJNjUWVeXW=qBUwbRx@lN}4iC3b~Wp7Q)hK9q(I9Z7%l*ME~P zw~wLFCbJ9WaRm@9yNT^8C)T9w&6v-<`FQC1VF~6O&;#d)lHsw-E~adEup(cs7J42@{ZCeJ!BBJSqfz&es`dCEmhj_ z80O~p9qbh*Te=PKub;vn?@m^xCk~zQ=&L|u733&IPbCpbB3OlI_~6lj^xX6BO8@%5 z{)Ke%F?c}R*q(O#*zIe;liT(J3Ikb0!GwUbgM)C!vEm7?(v(@|T|l_`YFfOu8n=c$ z$i!hdCPrnwR=Mi%IJ?!^%EUK50VNYYVbu=@AQIg25RxdMq^)x}s#HW_T+OyY!c^g_ zk}gODX>fA4oGla6no||O8_CLc8!0RvGN`vzW?ykHz7cekhZC(1HgjuC8sjb9*Dj}-cTJ~`rXHf8MhM4NVtBUCI=d#5 zur_@6!keg#o(R5S9f$egF>q{PSyNBUe*7lvK0N;ym@sVfU$G33IBQ`#km)~V=3frJa`{a_y zc4Hr=?r6hHw53k?vRxR-n=WNoMK6vi%A;0diDQquL|u$GmYWA>O}F*u*i;X#iOuPe zvnSKffBI)r?egc-cfS1wJl?aoqk>tW$@<|ED-vks`9PZ@89e3GlIgHF4`CvcJcZNp z#c;Nm!tu>Lc#DgB@MwRy2f==y1-VfVx1_7_m!g7i@o^^>`wn%ty4WqyJ4w{{iJM!q zajVVNr24hR@uQJ(u+hZ8UN|Znbzqso?&eztH8=>|(p^W>~j&-#ZJ7j#y@r z{A?={GLxsW?QzB%<>EkiCuUoicqr&;4ZITYE@j@p)9!vEtqpzd@&SsJCz}MfQa1yv z3OCG=m)|XeVld_@3IsAr@U(nGuvi6y+nEACX7iBPu6bH{_V^JCXmjqOdC%IhU0Kv+zrET z0Kf{U>c;jp`EBN> z1@NpmvKuT+Ca8}^+L^ly9(F@y1>mnE9hB#|PTPmtXWMfj)dEkteChLCwF+A6;N4we zp{2^;HKa{QMb+I3b*Y32MXXhoTERiM$_9v`?d6Ifukshu?pVatIT*L9(BK{p4RScG z56ye4JTEq*tfK|0A?4A+YC@?QQjry{w#tFc!$_StJNRh2aUD6tEW*}d&6q-c@|VWH zlHT>mv+2;n(ab^sbzJq*OMG8#T}p3W{7!oH@{7D9D;kRP3i{)!@q}Kv^!3G; zQ+*m)$58JNz@Oj60$^c!9u|n@^!YFTXUdXx(3RUl>?)8~Ma->Q$F?IIeLQ-XN{w~& z5njnA%U9fZ#Rok^IU8d1y>Jmh<@fMQ+bO~@j@Z432OD1DkG7#3yP=;ndI@h@@?f(J zupA#4lWpI0hUjXv&sA0xYov`{8ST=PsZ|j;OT984nOE{wfd}2gf@{d}Q|`D2iFC^6Z1}XLrKV}bsx+m8XWG)Q|Ia^@ z{`^nY)BpSDUrf_i>Z!WaliJogqkTaGVAycnqEK1vI8N1Rvl1mLq$|>5hB1x_rpteg z*7kDSqjiz*_@MiiJ1yDe_x5+A&i9%O3cASZEj~j^&n>pQS>%wSB#ZLg&GjwcXluJ- zhDHe`pe*g^mJn}i;po!P>=Iu?KYTD3jZBS%xXrzE ztFIrX=V-t`Oc}m8ueJh_VbL1AMXRwx?9?e3e6Y>~u9P8--A9|!PkE1z#gE`gd;ANA z;xSWRbP%wNG`Q6+Ww^qYqr&#rnS7MmumtsWt%~vlC@tP$+ozw9yG13f}LH8HH3*l9W99xD$YNj0Sr8Ea+ zDe|*ZCd^~50n?dHu9b~4>`|zgz-t8Et2Yt@|#h z^-~w56|9=ve?sTZU0@xqhc(VEVke=9vc^r##g5fAFskdaIn9nJ9j zUQXY9^)(Ka>YGfW-nj!x_p!l4q4dPHnF>v6vq+&(1Q`6&mk`O;QBZp25iSp6mh~9z zhqRUN`3c*yD!p&KmX@2JH*34WlLc0KE5h0ckg_eUG8pD9|K}l#GAQl6^PjJc^lDew z=VZpaQJNnYtL|X1It^fNJy;v=8fX-+s7uA+xv+~di#pmY4O4fgCYPW-%3j-z+VfBT z!Y`&X$DT-k{UEs#Y3X22`L$YARDmBXZ$~-w?xq{wE5zlvKobbF#K3;>6 z;h1|p?nL_BxK1dAsu^;!(wyr z7>~|hOAD*hsn!h31rxLfluWsj3d~71x%|twxZ^0ZdgQc`82lCGTR1eFEj7dUY_lz# zxBEhnWgNDUFx;Ywyho~xchkfh-;|cn+YgoZ#$>uNKCf;J4kA^98rQ{PNEZk#P$%0U zDIY&@EDdy=2vDhJvMS|1PZ=urwH?Y^8!Q&GXb6hcdKa9&d{b1P6!|Lb9NoR$G<{0R zWRgI6xEB|RcYYEI|7@C(|67VKV#Rv;0YNh?Um?8R67Ao%(BI#ijvYM` zGMJUZA!p6Z%xt=L^=dkM__4^tk_RF4i!{HfHA z#<`^#sPL3^4Vl;1E)T8SC&j7=}`Y(yIFnKG;`uK|R^I_lQ`nkmB>(iUC}B-&w_V3TLU za~5G?P!u9N2`!$&gl(j&ja>%%vWwlkIHm1zP_Ys_SN6tojENWs$b|UZ*hG;K;WnON z{5S-zz*}C0z3U@C_Mvp**x~fY|M4^FYhU;#GK*p3S?h|iqltxJ>`vjQ!q```aYO({ zOE^;0i^sT^KZUiUJI-?1mERuT_fP`X*B+cdboyYGz(YWMpX)0+(_QXSF$CsuaF4ms zQ%Om6pk%l#!x6qQ- zn~uXK^5%IYdT?Oz!j-G(+I9GgYs+bMaWx$~(2HE;L$DReE|g6)=}5Uv(9D<%au)fD zr}%2_Y!}vTlu54QaGTxd{BKy?=D{!`s-ld2-n{&l(;+yHXW1xt+2+PHm30swtc${Z zJegoFzS*zq@cAyuwgdY^OG4TPb`p(Y)(;M(v$?ad zOzVDH4Zil{jV$ahzP;sryX*a?)oCw-QNwd!&Jky zY&;jX*++&%x@tyNaD4KEKp1ubH;4^i5LrP~)(YF!@|iBIOsAHP_4Lt?e<=M=|I2Tu zXW#c|8axbNFc@N4(NTC?;&4jDE)XujaGoGciSd$`Vj|$>79+3_aLbb5#2Rf1uL3k@ z_)=KD?vxfW4<*;_Q~8DSK{?I`lyOYNJeS!ka#W_< zdtBPbv;GSAMZxRWuq>U@U)X3L0f*=8+A!j^xEFqW%i+dvylp-~N!MLm#A6tgqp1N(lnrX1|=X1i*s>! zOi30M__mceu?U4#Wy@-HvtCqop+tqq2hBsiRJ)0jq<~lUh-EZ=sNF6AFz-sU2&$zh z8uyEU>TaYloH}$o9UVNDU~UdY93B`%21+cp1m}ye&A8x_I!p^CepnuQ0Ax^%FJ8Gq zIw%v^a+YUm=}2`b4IDp^=35ri7he5xdi~n@1V0Jj4W4WG(vS2ErJwk~N7K7cJe!6( z2T|BDo&LWUK9^p;dKpC$w0+9QHN&yP18Hbr0Dt7!3c}8kv#R1n3yic;?X1%c6OZ{l zmv^rJ4S!plZurT;<<~9GCD;{Z`W#(xV_#d3RqYkDF%8GO`8ava)sT zY+}J&ek#(JI|5;#dC)Tni}gA9?CYs#5E<3)en)CW8pZjyE+f*;MKy~lwkX)_h&CU` zC|$8x8%6n=uXrrp%jn8`S!Fr@Whw0CPWTPNF5_r)-`$^S*t^N~+$jC~OsL7&WzNVg zXYXm<)kmZFqF5T2jf?O5YTibXHcDsC!QIbC+14AyyOX_0O*+Y(?4jHImS zRQFP+1;zPDlPY#+Q6SLV4wbcqZQY*M^ogJP(e&HD^XYW#9q91tLi-qiq#pd3s7z(j z>Qcomzyqry{vurwNg3}(Lhkq>yaPrQnya1Kb4m@+5|cCgOoP7C=ocK#4ZxN=OiT zsH7v$=*C=_^3eg2x^iMD)1g~IW>BMWz6nq^Dv0ksmw}Xl-sxR`rQhC9zOWQ9DKTk}6!AFUpfc zV!qqvEQ6>|?aiTei9J;o-=f83uX?|8gImMV3%4zocG<#jIquD%ZDYF6ZH${?_#R$- z=E3xsy0YK!=7QLEB!73X)+iniO7JQ>K=ymOD{ea3rO1cz%zu_U@^y=5nrCAn9-GK9E z+ZZ+IZ9tR|bXM{oZ-a zYyf~=z2u>5S0@cV-dXs!dd@jCmpfgzx_|pN`z=$!s1ycacVUrW3q3c>=#m_$^Eh z$UFTfKmU>RyZ`>v>Fm=(sO|-rGAk)E%S-Zbwb~vBgY;Y}DJ2FU#yk~QX|=Mdg))wGg9=g&)>>i5K*jDV(h)q6rHH58l8e^0 zrD=r?@1{L2QqNtgCFp7QHPF)%6x3@Y?%sHuwm1z zJrt<&h91D?```IqsNJWLbkWV#l{&eW0A9=T%NNCCvFbH;E{J88^w!{2uh35H2V)as zh=8MaLNQcu$_mmTUb^sVnt;M6xwje0@6n$A^pU6Em!5m_-RaV$x6)U?@imm2Os1DF zyp~30k)A>KRc)|~h3?L_pb`tld%7qW>MabN*{A$QImJ5(D@>7G-tyDtyAaX8Y$8A9 zOJTS1v3xJ?0a|(9aLz*>Q*!)1$F~5qJ{M^%Q@PxEOFZC)#&J>*&bkm}KQyX=K z<(%=(f8^Ciu1#YBm{v^O{kY9Re7ch#`}ls$e*NtyuIy)j=?uROz2-K!bDA9gcK&i( z+p^=|*5P&s8{_GA>wEXM-~8RX6g3QaKV`EkAdbncK@IO#f7#1EN)C_v@YS$D$`AJ@ z)H3v2@3BRUwRySu+V3EAYb6`bZaz*I#;iQzf`H_wQus&c}96$*_0rH(1Ez8ST&vJ9*? z&8I^ry3@b zz57h@#x^@;W(c+wSxj=e3b0tljb-pma<_~Hjw>z-nCzD+ODVzY0NK&W>Ga0AYp^K* z{sFbNMp;by`;an%4Cyxf*5!w7ryN-{jE!7H0n0Sf7Y+txxQV)BRS~j+XHmj2w>Xs+ zp}MV8XR}Z}7XZ^zYuj2nQ!7$FWN9%Y<{+}cT_j;0dwA+t-!a6Z^`*g~{%8|le(@{F z0iGZ)IwUIUUd?uvX)Ba$0gPnPs`U#tR|H!dTL9i6|Hc8jp}_<2Hy=Xb1&biWskuep zfx?e2_=S51;3tNMn#M*lXb+UJR9eg5?Rho9DWPHkc8V4#c`LYgCy(P{v|ZIpLEvWZ*C zyOL2nP^U<^0C{#LOx#LJw~OIu$9%9nOsgQRcn#na_IYH9wniNb)JhYew|L)bUw)6k z{6PM^mDkqqrCsn2+YYhNE+bbKEW@%{b!DyL8Kotr@7jpz_w6# zwwGoO(N&Q}O(TO!toS`uf9LZ8$`yAxAN9mts`!fICh^)oTx{9qjV$8ZNW4iTJzSAL zUYA+?TCYun4d)p49Bd?XcMRq^?D(#CKaw7Q{48uf!;B{*IKVB+Xe?pbZGtt2cB3?v z6?RUHFc#^Shmqql078tzE=6;B6iF}{8<(zizxT^9_Tz7NKhYodlOFSJdUm*S%b=g_ z5N@>ESTyXAF2dScR@hm2tlh+&e7X^{aE z;FSR}H)aecVJQ#_{crU^5efq;0ye;!lw5f@-CB+Tfed@NOfcC3RV`103^p;SFkuvT z-gDNfDa)iQ zUry&4kO@dF^4#uth^}CF0KDcIGA5=xVbW2s3)#TjO|$P#PEr|X)oO;wE!8l)eJ(ZE zMM0z$up-4=$|1iuA|qL3V7N1Bk0E1e&BeZu=|pT4cKHH5tt2f{4jB zKbrK&U(^-PTIi=Jy`tQ((Vm|&Je_K(|BXA3vEb8jVJzwJ#pItCCpMw{-EZbuEM1f?BX;42C;bfZ3E<}qPYXgUy!8%Lc*Dn5^A1$x_i=r z!68Jl4TQ?(Bb@fXF*%&R_VU-#rR!Ji7EuHvNDROKb^CXA>F;#YHCJ8h{0zELGfDAsS4Eqr;= z-FXmTMZ0C2MM);JvfcoK>hL_*T{xjIqZ{^$DtYUt%^mB6Z2~cNGZRy>5LrjA^Tmnl z>GJG2;@Bt?(ks=pi^7x&?_}w_bF(0%jYW$G`a((y_ST!_1nPnst{eLjciRD#Q&O|7 z!T6?q@vL-@>@Y0DU;fGOxBNyLBIF!Le%y3sWoUF4_xv8|DH3I`vByU1)KM%pOkG~U z#hC@#KouDL=!vr^hB?T=wJzMzmH@$mVb1`p^G-*^AMKlPT$sp8Vd(PrH^bj;v0zCV zNhsij%P2!BOHoc`pTv5b(SF6mHiBUIN7cbl1bZ+W`v9OsiK6-w}b75&7qu z0E4QJRrAt1M}FS-+`FJ&E~W3h_6q!qNQ+=1US(zkssQSwltP{0uv-RIIlOKC2dgR+ zvCwWsyn^8s1lczs+G?#*OYeTqqv^N*?LSY?f9#pm*3DoJwREvQ&-@E$3|0hqjv3)& z>?i|fvdL4=4rQa8JVSEJ-6KyDPKf5+iHv6^4ynQ(WLkhnREn@bRbVeQSQIDqH-Isd zV*nH;ODTuF?U1ino`-6Eii=rVZ5ALSBCIk1!hw60_X5h9n9YEaRc zbK`F2iYtHxellRcDSwe2GHAZavbf2~y?Zs@?WLY4djWrB-J->bklTD^z`$=j$0CKT ze|aM(;d2!Xk54T`D-7x|^`TFBD(8?k(b)+LiXe=w_QeJULo7)--sBY<-#((uWw!;aAj`0hZ2Rwv;5 zD5`n8d%DxuO2KdZ|_AKM9>3Mhb$BT;uTmcl&Y}|pS&u4 zm*9DhG*I7Fq^~@3;&d9F9^;@Nhg3O0*AB09h5XcMf31M%D&TjCvRXm`h}-?mU1|X& z&aoRTJY9s+s%1xkRbpt6gR0cg5(oa|V^&GZ7}^#UJ0|#<1I=m{YtI(8LPMjA_}kPbrh4U06bv}|1qFiOW7uJE&-6$H+yijC z;LWQoB3iA=#Ef{OCisJ&|L{B0Z~dEJOCS8mlc@?ni7?q3{w&$ zF@S~J;A~$nAwTxoRSwC)7d#5@Yi(Ztq!*wpYlJ+0CQ){RkPS+h(xOb4s$A8QWtdG; zp6G>OBtB8-&h_^!VRgJG(@hdgdDKAe3CC|1oF*%m5Lm{pnGd)PYSfL?U z@3akpShbUP4qC2t0#-Yba>9ab647$D*>*&#^>o4Wikn3ij!m?;xd|jwz_UKo1^WZL zI_5?0qGkYdy}FvNkKUlH0Lau%3vtLVd;r#nqlboqrDR3*=H{JtybPsr60v~xp)SC* z0Lz7|@bfqk^&XjzjxWd1;Y-L}Q4y;S<60!?hn%`s1FVSZP6E@n{RQ z-Iey6kwudwuT&fL&bY+F>w0Y@EkfWu(0d3L5)SLq7p*7B;f@Ekqf8C94eVX=7oJuSGH%%BT7gR8y@wXFDzs;vfa)t&!r=0 z4yIrItxu&&S5x}@pZ-Hy1lq(AdQ1+ql`$ zcf_{im$zrRpR^d`e*L|@)IRk6;FiEcKztACt{8uI3^|63do^zC7~?LMF)%U#$I$KY z?d3G*B*Jv=mLX}CGFMvxG(ur|&j(MZ(~locKlbtWr!W8GchaB#-+z_9^^I4cjsQT= z61KE3hfmRibPGHSN)59Zo7~!)V^VKs+jp%QmVouS^x@~VmF4W;EQivfmax9W-JJnq3$&`p=MQnLE=;l zSf=vick>=9Xo){lf$4TqrbuJnj`dL>R@BAD-9jr0jwi#;ro0!g_xWaxngUZ6&o&aC zsd?eZ!^)m2hNXqsbjQe}0byy#b^zLUOr>E(1|VFd8*?f`j` zG|KhZsYlX#o_tR_(tRW?kd_X>Z19h>W1>d$;HNIT6IJRVti!!W0iXa|BwWk@;5*^j zRfhKP=ymemnjRT=EOoJavVu6nC1fFcaFi$mh&GcRHOK|^7@*eI+?&ShV*$G7IcO&( zcxn`&JGYX$;VXqU&Ela2N;)D5VIgrLpvbOac;f91t@b12-nPRIG0@usD+g=>l;Hwp zD;q(bzFXP&?fI^Et`dOTXUj-^HWu&pL)}ChwT^girhF8AIE)CyNtBz+4WPDr zu)>0A05%$UsWD3Qu>FRz9KwnAfW3?Ano?1U#dS;1V3dQrybiBQ-7sPvv)RbEQRl)4 zkPHi&Mw<+!9SgFdB~ZRBa99Xge^q#0x(~OdPyNfEPHpwp^tXTh_sHO$O}&i2bBsyK z+BU*90DG|VL@+rGj!0(vc9=NF{P6c-3H)%Cz(YWMFY4Qow~;9Z!A76&uf2n$V9rU4 zG&>qgwG2=U#f|`MK$E|_vRKzS@YVsvX>5Ers9T+ie>!_Koj!Xw{q)a$ApPU#UrB%b zfBt29@hh*enFx3wkLzqX$}60W!*DiPW-Rs`8e2laz>k08`SjoX5C1Yf`Hn~6A4QBB zqShAc@XyNk$+q(<6P8pwW*(@hQm8CjFNbw6Cl-ICh8PbMab%=;b}}YCUK*)u^MK$P zz_9$nvV&+sasZRKK?m<9@f(ZO;3{C>ZRZgH+_^X2N+T#Us6ioKfnU^kTLCgkys!*2 zy9I0@#_6FazQ+MYC&fs1rk+Z=1AY+$?#P#!kgdW~<~d(Z+N7N%m1*Y##y~VVKl%AS z{}OfV0C2aiEq6g+P~8DQD~qtYz=EO}HKjYK=)pSaU@lK!v#nQB#sXCrQ=5q!)nk8CHO!bxSJHT!j=Om85V++E?=EO99uhJx&~k+4IK7V zNk@ftvRGS#g<*~ZXlv;JYK#vbJemdu`qR;YV`-@SXlQF&Spd}GR?6q9lwH!P;sv#V z)#mEyi*Noz8U_rnp(I7VW*1C;}?Nx;JkTSJ?N;?NXl z`;~Xu1(ss8lwKh_P9HqX$BrCJ^=t5m%j-#AmjUEAMu$`X`XE$BfVGk%EKlMyPJRP~ zH^UOr4U5TOFY<<4`_lPuol6sAQ|apDNScG4qdtx7T7aE0m@9xQ)&*$+)zdIE^UlHz zp7?2YL8J=fBEbE@LtRLhfT~F!Fr5Q^h=D_zMcuSh)`H_XsK$u{kMde8dt19vc2q12 zMFcuZs3EWoiob{Q`g%DeNI9sbQHo}q5h&WMJ0gNnjoqj0q413arubLr^m0S>!;9G6UlU|J{5R)F9$&zwp>^G`mJ zPCxQQnq-n52Iy3zfaWQOAt{3<2PdgwEp1JRf4Tvc`T6wU|Bt_!p1{A9k0lOFwY5WK z!i|%eU{cDBQY8dCk-iwn8L$NLk>+C1+bB~(t#Nb3z%M^AZ%o$8pq-nWP2=P6b8^Wy zELBTYwepP$=*(MySv<7?klNcja651LlcfLvKmbWZK~$B#_SF~Di(mg1)Zs~3LdcJl zL;zVUho+hVQR@H}Sw@`jD;$i|CW-4jb*AoU3BGdwZLHz?~9 z)9?*bCT)PHI^RuOe{V;6?9o%{z=8f$1DpvO1=p7s76XJ(B(dGU#L4?XcwejV3RjRd zynx!>c|af71nMUIp45l3hEb#qm9IDTbq}Wgu7hdNgHs%QS|v?NQ|N|Awo3YKRYAtYj?%f|q?>O^xYFPyMtafmKtvxj@RZzLy7Ya6N zq(SPck|X3DUjsmD@ETWVA5>s~)wSX4G!SGe19EGmy}jO^2HK!5B2h&45v7kPmz!-A zsMpPiYwJq~4-OGVC4Kke>*V4@xrSXWi11qn$*7@MHhju;AI(4dZ(_ z$nNr^p%kX@t9|xYgdMBn?51!+zri0bo3TeP&+&UE0P%>2Q3u|Zs59mCA&+i_Mb7Bx z2>C^_2$A%3ccRoq(3&atE&z=?k1a~RA#~nn%s;L+B~0Wr^VzPnX|}zXyuSF>);yNaH{Gp|-O}Y?yJusF zyN230#C^p_?)K@gaYAnMWuMu*O{n+e^L}cuxxeL|zn^DXyvK}thj+5zpcrEfg|zMN zw+2^kGIzUFw|4ql%hus<=Re%;CC_*2mfp`G^1G|ws0JrK z%ODOwLDhw6F(_iI9CE8ykc%m}bJWNF$l=53pMC11ymQb8c84`68S?+GaaGVL_ryDo zrl0?nkEc_Q4ZwFfmTpX64oL~}dDhs9Y~oC!K}%$Ka_TU*Gjwmtc8BU37{uT ze9X^&D~(j90KQmguXBk;eEQhgA4#1{9KN%! z0BjWpxP~N@Wu<~NQ(u5{7j)P}eG3|6_d(uesDN#204o+!O#s=sr77x}MNr2=8e5(Y zwvdi~)h(~5?p0I;*E&+`YA3Rg52cR&-ZTv_$P2H3KK;kf`~j5VVHRtdYAsiS3jy~R z5+HsacTI|ruzm5j3VX!F=q%-g8`{?d3yiLV9qBBf*+Xv?5fE1_4n0%`AXj7HJQU$+ zt_2n=N=aPBzKO-lI*WuRMFGm+3_zD}d!6?t>UN#?1=x({S-=o#9NLp5#Ftfo+^S%V z_M!9|3W{gKiMlqe^kvzFNQ-P*Ia!3QyTS;L_>?ch(AaiqD*>F0Gz{;N0xWRv7?R`T zBkuEIGd{4GP+7{cp%ZCwhW@((h~%JLWxWScc|B16ThjRT#WX(5w|XC}Hpg+_$9KW8 z3qgA*qUz8R*o4~k4%#}aEf#;X9MW5XEv1b`GgNrgbJHPc3mkq_TpIb7XWep;Z@W&} ztc9c>@`gK7l`s1W?aT#IGvmk_GN_y2<%a7$wXSuf$;w=6Jzh!A{K%82^?oH?ynGQN z1!ChE!CHuGoduQW6)auL-`ze%ycxGV?mHQaGmjUhZTn@}#UPL!MOlVjyc3$)-0?C{ z>Uu|iertTi+!TKGzn80Vn5NO@v(pu6**O!9GMDWgj~yIs<2PL~M>t=_yye`rdC9Bp z46@^2%*}jha3|u1CNCs*{v1S^m_JDKO?Qj9%zpHt;(Meo`sGF^+d1$ZY$Ke#d!tNl zt)pC4_LJSN-M=>S**wtluHWAw{kOGpa?GW6Tc_I`WMBT?O=G9sPk8(Fw|iW?21MH1SaY>%WZetQx zxH3L>D&$F4B}AB5lQEb{1*PUbKjCylcP3 zkI#jTwj1A#F1~QLj-?2$Q@8@#w=NH-tBChnW|9p#l>j8Slv{$T>NDjkFqE3e6(F1M zL0vX>Ku#N5_D2pMNJkG3g};gMi8M6{V3e(adYolJvnCZ8w-%paFGKNEN`m2=ycWVx zWLbw27qr7|HwVlFcxoS09`XDvb;%Aw7YFX73TAK(n?T!gx-xwsed(1ir72V!gGbXc z6z0Q#=jK{ddg{~@>4VRHFm<-@0gA1Z+a|)X%{4)B4gPlfj?Ig>$!?@c{^WSzeXTeZ z*arJim4kds^oe=+bOlA#7PiBQPtVaQ);NW0Td9Th~OJ8{D3+b=^?yuAE z_zeyX0-6L$ltT|xLltbu>f>QfjqD0TLkEItKRPynf(O}ni*;E5 z_@F330X3ydIku+j@X0IIOTJ-kgW_3GgUy@o!O{_V)j#FUrI9*p7mg!%+?(0PZ)Wis z-I?&I7JhZh6=R6L01+7D@;h1Kp2PFKG#j8d?DBWKL>mgL;@NN`EI!3JWmts+?TZ&8 zBENvkqX$o<)dlK}g^RM%*8#=G8{nHVRsQhw^bAzv(V!H|9%Ft~tYdy1YnI^;cW1)G zp|MDzk67N8Z!7MtE91_GYTfxFJ_=K57cML{SVghK%S}H;ZnxQbICXArJ`RgM_udbs zF=U=!yLcU$$<$kvt^JJBj&>>M87vuu;BH5>*=?cS;mb}5xvke-GIm?P50Ha>mCg4c ze_Q9Jo1%}o1-q?)I=W8oKsUL*53l#JOdkT`T?#ulf}JaT7jMO2zq7yZFWWK~Ij1p6 z1rT+Gi=C8MXEt)A$futEYOqYSLw(C4lmwxMkuL}H7|@yAx>2)y_OVmxz3+cEJ@VLL z24y5iAQ$%ZrGj6JMYTDZ8n6?l_rg9t_zWmv za2LDF%U6Ngh$ zb>L(xyMr=>zk7ZZaK}L`CT9Vahv)>XaVN{NJqW_QD>Wp*6yJk#N_ki(E{YBT{@iY# zLlm3fLK(kK<^eG9w-kK8(9f`|0z&5D_w+DV7b41(nJWkuSbJE^1CBk&IF7At+m?Wh zxK^ZmqNHPLY8p^VTS0n7dtX}`u3bxCdHpNt%^T;_I()YYD*X`-?Vad9oE{lGl|J~y zbLnXRNx%|~mIG^Ut*-|lrU8qLWYgh_Ell%dNm%!^+u+gnP8wv(F-=wY<1NEQc#qfE zvJb@@@E)(gQ+xn^XL+b0Byte$2GsZS>BZN-nZEGi7t>qUE~K^|;->w$SPkSwcj0&O^7Y{3 z7heZxqx}o$e3k+q^-DV_SpzCih-IOYax5P!l?RO+X%*zVf_&sQc#B&ok5F|@n0Alc zHC*6i=t^` z&5$Vhi)DWi6x%i@Cs4G*LZXX>bUV^X7CG3c(ik-#u5s2t7NU^KK}MDsgOotxf;aaM z`g^3z_CebG!K}u(7LWO-D2pvNF?4MC7>}~wI0{RJc9D1L_$kJ)>kt7ZA{y&UO_v_} zb6hl7bEH_!jRM*l$(=msB;Se4EpBqmTP%JsANN%@`!9-({=U~Bw$4kp@f-chPWinu z_pwar6+alJQFXnovfQl^6a(20M(ORG3kPc{B#xqPF&>6x!a+3z^)u*OwoYYPa)J=i zT3~w>@`wj)l%09}bYD7p@+0Z}A9yx>``a(4u7Lq4#P1AX;1+HNl&(4=)l_aF6*-U6 zgdBkzIVL)<7F=aT3qUkFjf;tw`BO@lW&styQ&Ur%2bv5(r4#_Enx;}}j?_A~LBWd9qm%f$W8oij>`{C(rT1?eN0Og?r>CA!C>Dfc?PRH9% zp@4&p>`*LYD<{qDk_doFYP7?RMO(JP(%xa4%MO5)LUa0$i-OiP)OQlT-b%W3^&)Hu zlc|dZXB*NqZcJU}z#s=e(cpOT>XmeLY$UyQ;k)VT#1)j)K)D1AFR&Ome6Tw`dg>^w zCcSAD{_J^qvMJ{!`rkSLZ3HU2okJ~p(L1T9b~?#H`C(nuz|XGz3KhB{k3MBI%Pz{o z(rAEWb7oxzCExlZBfeJ!iLxM2m-5~Veg(o6q-=P}?x4LARBF`6q-O(AQ$46#`i~V< z=aQoBAwN}K?{{LSI=6L2-)}J+mOS`O(rqKfn=x53%WRB~Gi`>O{;G)!0Ccb0J#ah{gOKlTp#y#S^spFWdLoIV_sr{J9fohZ&o%3e$g zCc-q7Q^7_NlfDf%SO$_}kT2Y>LY-8YzDgEUq%j2uo|qhEQef+w2`s=F+x)tbcbG(x zOsQN|K-eNc_=w=G2}{9xAMx@DSN@H07@#Z{IfwBCd+XE zEAnpXm4(4m6cex!ziQc1I)U{lP+FyJgs3ZgC_h>-vRH$yfN&Jm)`GhwSOuhfs?4H| z#ZZ%JAj~cRbbnWOs-V%NJfv=t_j?m0p0P zh^{4gKe;~i)br_h$0JY{;4svh2g4%5s5{%b2cmchu;U_8YG}UeUf?hF z5DKJnVsYABL7W-~;G{q{RmoQ?J0MsrXu7hqRfgtf#ye9Ptl;@*x!i4PUiP?cRk zT6d8l>TPCjAzekwB8iMJGF4cBXqz|vEF#T~ppkNK1_(F9c2tGZUWF1|c@}zVeD8x-GzD`@D}6XL<~2r^x1XB$tEb+iXn`Wam>DNCf8-7 z=r+dK9UPQ(w!^3IJ1hIg17Wt#OFJYun%?HmMj!m1OG49u2 zgWzs!%}^5qe2zCyN_cW|i9Z3M{KI+Rl-IV(ff{T9KV@2Ntv;6*+OhyLd3z3j_-%pQ zC52fQ235^=bO0=fLj??3d=YaFGhk3MA{>H?4ojF!CK`r*!eT-SQ^XnXPCUF5t~{kP zfZ*{7B>LEX$Dd`BZOc3t$@1oiv2_NIPrB1YNcr&r{*~T1K0TL4r;w;X-KwQVY~jap74YaN?ES$Akm`W6R^DS%k!UyaG zYCVVrrvPz1D39oX-`B1DHri|tqOZ_J0bpo?ML^K0Bno)4L82;_Fs;a?Ky=wmI(Owt znqID@YL9haN&Q3JsdsH4ogI2Ao#=WTFpNy&Wu!_}x&iL&VzAhgMl5)Re}ar!BHZQM z0W^!YBS+D<9AbH+tv1Pb3^gyB2RzXN|Nj0S#GQ4e^CPwN()shjH@#3>NFzu$m`9y? z1@?nW<5vO3JOd_`lrn(W!!sw3q=P+3wt!dE*!t*GhX?!85?kdh9Hv_bbS(gsnvj-I zR0&FRs_E!~fpoZk01$=b82Uzt#KYYZ;VSoA&6Jv;^b6uCXNkcYx&Zm0?C5LcCS!jr zTR7yXdn+p>DS~@r=$e}gCu#4C=c81nj81hKC#T6x2G>&+V zNd0MSaIO!H<4O*yw8{2cf3u6NHk;2yR%2mpy$M!h*U51ZFd25lAWwP~DKsmvyu9`1 zl{7Umo6bCPD)k>749OiXaF^?75u~i#?T`Ymy61&?Vy6u%v%L*XgzgmB{B6yf##H3@ zt+vsQbmZs4tilS`*V~tdua6+R`zSKr7dYe5kv{TM9}3NgfBuJmm4?rc#DXOl{;5TG z#T?V=ZDla`;ciaK5n->@agUNZ4hJwYhhhE@NSI9W_qK|Ga&#l6n*AI?4hI;POQfv zu%fr+)H2BHFAA*mEDa`4tb$Juj#T08TtnK#avkLz0D~+F4N46&uHVb4EYcL|h!9AR zVH0iu5C+wV)Od1S9jLNhs4WIncxDDoUdsX|hF>P@Rj7bf`0Lf{g@{ z_DF9ec8f#bO9ZgR`T=^Z0iY%j#U;{`4Pk+Wh~mK7sE23*mQa~(>dtNaW*R{pbQ6r4 zeT<;1tV5p^@g*Rx>z=lv5*LAPJBmD1sVfyti4$Y62DCwuoCjzHg$&B)0zAj{4)`Rg zFI6#jBQ8%qVl~pKrg_!H7)mczL?@(WiFUD!{NZ!(Th>^>iS}xMn<~7O*Dj62B5@>r z>b;*#ZIvGS0C6KFU}kC#uqlACU6^#1hJ+RJqN!8w(Ox2dPW}Y|2>xQ*Ep3NGS1q)w z4tVT)VO^*IoF>;%5HmHJ#;&7DZhp>H11u;V=?Y3al&&$4EZ`O;I?xX*w9QkehSI5_ z!_bxKQ)H*bg(ZuHgYX_-9)Y@x37|_lEn)$&T}V05eH=3EqCA)Jv(CHx!&TT{lqyk! z1x8j3sm%2J$eUr|op9{WnSVRlKY3L`h17N>rI;s*hC_<5x4?E%1rQ6cyXkq#7oS38 zn{M1aYOVt^(XQ5nG!24?FkD~Q26Ai>VZ7b;hje9sx4LX(ughq9w|WU1so@pML>4Ri zx$HbH0BpaCT=Y62(Q2oJ>84*UQ>14GXF)>=qvG zTv9aMycz)6MPbLP8MdD|6wIP>)0lEg{brNxw&u%vb%8O6w3TVbzOJ4gr0CE;81w#% zU-$$-c{u&WwZEc~zy?Nrv+{}E7?=A&6}UNyd-fi!Xn&!b$p^yhRA=`(;k%X1-Ad1H zaop?t-QV!E??iNQa_Dn-~%%JL~1Po6x@49pTS z$>wcXL8him%VxOQig&i71?9^8Z9$Zg2k4}@2(p)%%w>@nA0H)NL>)n)a$8cV7M^}r z{1I}YkQLhx@89M02mkw@rmuhHCBXe!div4Rh*nawo_wf)VJ2_IxiynVXge_pZlsH3 zgg7WtyIcJ?Zo@^s%4|lSOK_aOMf)(G*!Cu7+}cLBc?o~__=gC$=YuyA<0aUV`Y4Np z?}9*88z`y6!)}5k)4R2A>4q9)4yHZu)$$9F`T2T_D@00|52fV^&~nu-Hk z2f)?%cKpyGcxopRJ2soT2D{TVTG~`9F$q{b2-xdx>qy;bUQ_<9i>x(RETqI9=|_W)7wf~o_z9TIzRqKdh*aya8W?j?X-P&z?VKF zmZ5M2zqVt+4!|Z(?E^Dd+{sEH*pamAVa`=3i>lKNx1_ZjKHzQ^4~vK!yl~@94kf;l zCJ_tQ0h`AnEDRn*tI6uXqD;+uaWKk7DS0`CB#x7VL#e0Q1;}U7Mw!@qRGM*MU=Uz5 zo+hu2h<-_=b>P7+%vW#sBg zyedthxY}$Ox=`l^F)pn=$L-Qn6ezDF% z6#%}BsLM8X^nUTzKAEmx;=t(Nek1i&251(Dl4ExOG3t4xWI%f{Mr1;EFxh(NZ#y{O zW@TO)nP__rKJMnPftNe5w%+f+*&dwa@b(vIw8sWHqkHzfRNRuyiza116Y}))dCi79t>oX@p4# zyVyzu(3A)FwkIaPm07s?+t%#)Ek?pJpIaSkXTBdijn zsZ<5qg)zv3tWt<$9NH8>NU@gE*(OETVkQAysBMBJ}+XQs> zA;RqRiK9?&n`l>Wq}j3SX{h&DI&t99)Ym?gs)u{i2z;4NQf_xUTFWWn!8C9cpiusL zl*|BLok-*pt^hD4LHCoVPJrs-3WpJgUr%pc{dSs|x{Q0+HK1Z2J$xh%o6Xg0p#uHN z$Q0n2#UJg|5DuY;QV&j;3-D>#g_eO@)Hu|*9 ze(Qu=PX6Ar>{p$Vcajw1DXGUCtm*gJ61Fv)X4t1WJUBhMkiPliw^2@WI6d|RY!p2Q zP(p*MZOTOUE#11X7QD(UA3GT=6l5(?dW9?+j%D_ZY>JY}Q51HBViE+)Q4YAZp>sKw zH4#3HVrg)wC;jHX`HeJzLZPwq?4GGOrr71!7nr=WBHc*i=7Sz@%E0EwU7z-q&Rs*i z-ACK`?cChG??!bWqq}|6*=3NbM-E2_4tduKFH5jQv@(geGI>jxa*$kv%AzKz z830aKXE*Pt3q~|oPgfsXhIwlc+E==C@oM_xKm5b=!slKDxxjacf`oOzjM65W)J)|h zM_N7Yu7;>Q!DTyMZZes2lr2r%O=%{rQH{=4k>Sz}WucYsi%&QC^y33zoTy_WEb`kSWAe1y5Ny0e+ak4P%*hre?UzuMJoQ7znUd z0crxvR<@WAuswVonXuzXm=H)>q=M;L+NMT%#O_kae$hf(Sphhz9=fZ&D{6Ofag2j& zXl^4-QsF!ZNQPiPsH7vk{pp!gXZXPPWtCY>vx6|57Um|>(BKKcIJ~Dc;?;~Yk5 zd-M0O*AEHWc;-F)m0|gz{O+Rw-6ep@VGJzX3g|--P{gstJ$_4>zRH323_=ZA=9kVR z512*A$x}yQp+FLdQa#wgvd@GZbinZ<{Nkf5`l>ASdsr|jKF~$~3jIO;Wfxf?%Fj6X z+n!s~plu|%$O7eyg}Yo!0PbE6sXqDane<=&`Y)yb@W1|Vu!YScmJr@$Pd;wVK4ILk z*(l?id~7X}n?6~uwuSwf-2&cUi1)^q>3mSqdT&bLK@atZ0ub+~-`}%ly=!82{rEj< zYYe!Ku#BFyS`9V?4);{LnEkevkk94&*8BGS_?AI~St0vzV&;3Y-8&^+7eUwPrn>2sg``}EqY7XVcNVW`C( zs`IX5j(liOs*M-ZO`c77GHw$QiQ|3;W}9>Um6v{taH1@8SW&L#zr3>b1MpksrMR0r zCrSSUJOGAywgd%kmZOwLr7p0Xi88jfU3v(7m6a>#5Q1c&&7=-hGk^)8LCTrHP9R}u zs?hC(`ag#HXBizbbb$vL_JT=R8iqJzB-Bz)a>c_;fN)tDT*^#w7;qfb(RI?b!j?UW zia1>s^}}j$2tZ6L!p}^w4ybftl>=>Kvscn=ZH7CCmyk=`(LE4UY_+#7)yM+^DpcyQ zio9-8kKfi%8qxueH96qm-{o{;bU59B;yg7o0`Q;XpcCnXEo2Els$fYST2&k3G9g7m|tq1E#s(Xm9;~8gBzG`|?mUyOG@&N;}pFYHX3YWai%Up_lYl70= zL^@56^`&wT*o9GyjC=ATbzMp`(ZwPv5S;1Mar|%(pPP5BH^6W_Z@SQxX6yG2?}D4eKahh9SPty8zK4~qlQi6X{D@}*{S;O8y7iyF_)fr z@=WR-=wgS1_CTAgQ|DxvGPQha>B{+Y@P?m8nF{K;4V9?C)zRi7wTv5I!nPRdCyI@`` zzLw5!si3mLbYMJemBYhahYiAQau2)ABU@H5?Bwe9qV87fQeN7abh^@&%Qw>BeD-tc z@BZ>jX?~m&^()e7 z)+9S~?U*M&YBs}+Hdb2akKgyd!M-A3#RLE@^6Rb4x;YF$Au7YtEV?_dkrHnU@sOM# zEQjHj3@@Zqx}|Krs#ZAIFk}tDeXvk~!c+&W)C2)m)IJfcK;^wLI-X8IWmRRg+w#in z^)CPN6hQdW)!{S-zwtWKF-(WRP%2y(;SIowp@z5_4yFwZ@g4hRc*+HiiWaM{0j6h` z;X~$o{}CiMJc9I#Lr~mkn}|cBNg-maHIy~9A#tGv^~tpwn#0B~qRH&vIbPwlRgg+BYvFe(Rw0HN<+X&m*bAx?rJE1t42@Dj%7L#iNJ%l1E&L zBynJkMqnEe{NW5^s@hF#upp#nA>`RuNhmND*#sxt6(S8dwxjXic1VUo#{WKeS-XYRR%Uw>;Zt?Aqgu7y8 zGkj>i_a*Q{0EoZ$h5VqVz=6T-O9x%4bAk~8aSZnK;0zo$W(KlZJTp?q1mTDs(8qiJ zCc8p@=G_QlvcW38>t@Ty5Tk_e02&A?@+d0LR&~1`L~-))Fwrt6r#lhpv@p+P064ga zeBA%^pZ>4(;@4kFEi(g9LpWqd9z3+Nz=?gg6RQB2^$LMjKrw>ipjVm-#0kfQxC$`YvWwF+{sITShtP`q#j3hg+2l%$E^Lj0Or z(_L&U5A^p2kFC-u=24;C+R+PDmbl@!Tt%{ql-R6X1h50h7@fL=!jFk`Xy7Co>`rqy zsvkfon@CT9+m-cMc#yB8%Qs#FfL{)Z@S0i)0q9cA-J!@0LdhH)hJqA){_2%9Iz0_F z6%a!LD}Z9t+1JNn&O?Q;LCjWX;Zp@{1CUoqhtx~KtmUc5w2_f9#1Af~X7ZG&^Mpg$ z)Bx8bEEYPB!KW+`#=S-!RM{L{Sjsw5rCtq3_Lk_4)Qe%eXfQ3BN@fTy#Mk9|S*Gll zaU{bC`TJn@$_CXKY$$DX%aXi6!v?n8t=ZLBJ@!Q7ZwwLg; zVWZ#DA$LNJ-)vQ6*uaa&Elvw3n;(KrfmeqhpM(F|@N8$~Z`0-WIM=ABDvP4b=788a z7CNfEpPHOYufKMl@pB$o>_>t(+!nvgDoPPZ7n-K$a1-H?F;2ZN#?35x9!R+>;0if44ZdM1MCPW18E0RosoJABJ&L2|NVEHxAeD+g_8Au%a9DCZH9v~NV?)Jaa6u=6yU-+95hAiKmj-?`$s z%)58o#A_KK13&LdyMRu`_q7m$-`y?W@FJ>I6$gimVSvI5fBU8MCx7&rbn(IsfHJ`> zalogkFZ`^4s@pY~JJ8$wnY-yhYqGTKQV0kLL}eo15*+G?$LH zyD7o)&Av+455isEYpGgRiWESN#betR-~`OIT?lme7F+u9-51*h3miXZmqDfFCn(5y z;Z;v#u)xey-zzARP_$cDZ!ezFHpn8#cIbi0zTPghu}zEkB+c!aVD& z8fD=UfCU|7!8Hm6+#A4Ha6F8NwJSG9(mY_lg*by{0x&g%C^f0D9z<)QEsg{Bmywx# z{1kBkUS$($xcBZ5PpF4K}2`zK!@h48FzJBDPQ$HR&wf;&q z&=)UWv^%6DM~=pVBFB`&kAjZax5ixpVbf}=c&F0rH}aCVw{@db2Jwj0^LyU^Z2H+x z{!IF#|L_-SeX0!e&hM7R4z+XV0X%g1umrx3OQ2;P9shSue{6X4{WJ+pS*t-X_MFu-Q6Xb(W5RH$!)F(}XBBKZYRtu-g2I z@Hby$cny$zlik)=!-Bmb*M@O;0^3OdE;sjK~l%2f^7pM=m3|8o<(o@;faOjMYVRyd>t)wg^PsE{7F>um=S19yR-lqA8<3aW!wsTV1V9^w)20;~rUv$Pc`nNHwtDX-vunP;1o zG)BQ>8)#1Xvx4AdffV0EWEfsNB&YNVfv)1yq*5;B2AyhvPT69b7n@T@UG=}TMJbtp z-xgQ|R&aY5d9UNhC00(ahsdP<-XLAq;UzwG;y4SYgtC@}^tDS@k@pL=7%3K266YbN zwwcxR=+T4e2vPuAI2@;T#WggoO+m%&g2{4bc8v6_qVcXLw0y0=Y9n=Ae%x3HlHOVq z5*<*WabsaPwXFl7*>I?#f_i3d25RYY>}JSvk?P1&UZrgjZ^%U^5A*b)l^#?#!&m&Z z(aUL+a617pEvOD3K;g!-kDuTG;UJPIP(jXND0Rm)L$w}bM`ZoRbUHEw6|t$Eb_1ZI zV&1%R4Y6&MI)EBj3=o%39*QO9-3lwnB4qZfNUk^zmAHoJttJ54BEZ#yj4JX_xjq{51gT#$e#ybn+U%GE6V_B z>Ow5u+B}R)h&fz>YP<$WZh~4JN+M_j#6*|`XV@E;{oEJY6CbtLCCw^7W6@;ei|^WH zHvxFVn%ynWJ1}0z^Q`>}{n|bipqH_SFT9gh$}m7PZOok}W47hTcN2y?pChuepA?Jv zOcACHvB-|Zg{$nJN9ut#%XlNz_WFg%(2Do;JDz0hI+mt6wAtNqmG(BlP8VfUn@+&6FuueT_vMy9<$~C=Y%jeL0P*ELapJDL|jEjlt87 zY-GmKr$2l$J@fLJ^y=p?r1=>ZfUUi-(5!NBlf%;Pl+jO=w4`%ZJ?@@R>F2oPM)MF;Qq#aM9FKPaplU z=h8c#eFVT})7{#9!&&^~vN2IRU7PMUpKf-y`Q?_UNSn4r+%y(Nu&tu?>eR`@=|?~E z{LfRn_w2Ozu3!E(OuX|3f+%fOnh_%J09A8{7c)=V zsl8BYpj5BSXPmiB^|uBnDkW3M_bP9sWh4Ko)G^((6NZP-JdhM&U}<>EfT|jNne$M) z7Uo$n0Lr3mifRDbv)E^u$Rtw`2%u5+@Bq|R*(_ds`8y%QxFRSIw1qpG`0)dM=`2bV z2I0wFBn;Ehtb_$cUy&XMR5jO`p5_2dT3V*0o%EYV0i*HBQX%kIgwMCews>62w{Ih*^}RNclEReaOD;Mkco`6jF07Gr@eMnQ~o${mJsJlP>E< zjf2*u2mF?h%;Dn1Lxq|wfr7MAN6n;9cNQhZ$TRg=ya#&*l3v#3aiKlTPr;7F^O2$c zG=O?xMa9X(uk-~MK~Z+3!J#H!6q?2eG};?cVRRG5yWfhdu#G&HZo(;rrkZ6B1(spD z81*k29m!O6^YNimX>E}{fwYe}7#FNvII++Rz^^2Uve!hXiKIp>IxLbf<2i@Wj(R`;$XwqOE7+k1%4;C-_S`ZT= zAw~Su!m5N%zEQjw?LrQC2bt5QeHM z>K#q^+wn_S>UTe8_nCMJE5Ii35=G0aP;p6hRl18obLwar)6?sb>(1 zFubO~8p4$4FN1)sc`K7~XLo#tXG@NV3gg_WjgymrSn9k$SjDy;d|9mk$^gXZw}ogA zKo%GSn8UsP;?Fw5zd!dA5O1w(o|U4|48T+!a*YF1wiPGeY|OJ%ynJUi=QWpwAgIov zD(wW6VNYeW0XkJ-T$MLKfQdW7h^m+c&x`D2NmW$Smj|;HO(jmL0ti*uF~@elqSXZM z0U(*!+gSizzj`fQ86KxxX&;0u6 z|GFXqUG(s0J71ombAi>hW=hnFJ%)V&oq-zvaPW>&Q0V2EX#j zL#NX4rO|Z$+*{Na?N;R}jD2x{)PQZX1{`79;`3r4Vu%9W5>ffMJ^Io%?(wNe=e7@b z@RHf`?!e*q+=;o|r4V*YU$n{H96aFP_E{|E`le=m=LNBkI3EJyU5a%QuGQ{CvnR94-0@6A0==FR-($&)9s2(YGH zuObKnrXhgq^yFx|HsMKHi0?RX0~kbFod9GxyL-gxfL=~S1q;??u(QaEC+Eoo3_k*B zfY}j)|LiPB853R(vKs-d*)9Oi8>|d`>O zst;MNtRTatVoeBig$3HI;~x#V#I zq2*I70+w9 zo+DqZ1_+7^P?!_&J(@E=#OiBU`F-Goc+G z7)^isgCB)jI7LKkQ|{C7;FfZ7TjPjR_G~PxRD6D>)zr$q;`!w?sNPy~Ez&glqSjJ; zzM+ct*-E0e=ax(h;jRF26H2#0&nB_GPbV@{c7E?Orh4!v0I_MneNc?Tew|KGC=)5e z(R35qYah9pe&=_8lU?lCmIUG~-FLMNd8 z`t_0Y@sIyf`i=koR{|Ixz`0srW0IUL&7D_n0*H~F3l6dIl}m$CyOplkaMD9CX!i2m zw1P(DOFg7}$a3r-!4Vb=1A5~ihfbE7RRvc7;sZIv0f>#G{QKDhVyi*!ZztS#_2bHQ zbr6+%>vDc{(zr{>F3Rf?tSF#7aKZOzRn%8+Y#bYkV}%C*07C$%LE`LHe1HKx!~+0V z>&UR@@MD1% z``U4zTUky&c>CSV+2+VyP)-_xp1*MCrS!_pTYyQ`qOmy+nPmZ_AXq)Zf*ApCaH@&Y z{LZ66V+HLR$^t98;zZ-x_y{WDP%(%+@F751?z$Xu)86NDrTv-{8plAUfPt2gxZ=86G@7$#M=gO@jf32mRZ_+HUii8_& zSbDpfvAn#RzV;X2MsvzgoK|REhmLe7&9wVX_CNG+NJ|_^ZkDLWka;G~qKJ`l)DcgS zDlduvX>8Y)*mQk1%n>VRlNCF)X>ZOG7j{D8#z1& zY+TuQ6CazlWL8JQWx4Pdpdjwq>3t=9$$?#t7XTguLglR9M4gjzkG*rtX?cxfhvod* z25b+M3A{@2XhYZqhm3j=xQ&kvpgL^;@ausSsN<6R$Z2YWfU^;}VTuc^uCAxm_4Tv_ zpvwRz15r8D>h6_es}MsUYR2SdDue6~d0`1u4Pd-LC(p$|X9^7G_*p@VkE39*>oE!Uk0Is$hMFetgC>#DPvWI}=)A;;u zaF5`)2}H@4`gvD2*VEhg*hGiQxNbCFcz{SZ<+w93k#0{;p(8dSo4pGcuSXab;vDU8 zScjzvAK%Q}JQ@R#HH8M-EV<7kDk0cCG7sc?mQHGu0N{<~@ME`4Bj( zA)|MaO@MHp<7jE>RNnRma>eR0*1wfU)x~#lOgy&9D2@g7*&Bb7Nc+;wLpp{Bd(%x$ z6dY$mVh4ayzf5VNi9!95UWBHsaW|%T_8pH6muW9wf_T4MW(lN4hNG74u&&IL6p&6&+Fsdwa42vEh(9=W^=xzMhiu0e#!l7FkM7V; zF*rDa88*pmZUrDNJaUVG_19E` zHaNohF&tP9I~W9jG4K^zrvltWME+2m9Ba+y(jFX;_gprPMRP?Y3V=bH!HoeFnSnfT zTOH3TC-1LF4v@$ueepac{js7D%briejTF_Z8Z;6u2} z>bq6d?;ab=`rzaWW)ERUD<&bQd2FaJee8|b0%$5nY@0b`;Fr#^`Hgsx6i%a@bho#; z1`t&=1ThQBdW6629ZqW8=}ny+v1`?cNxbA(=SdiORgDH|3AqH}V5yiK2%hDrD&BDS z5pvBO{?QHBPfo2e)wj_m@%qi1sb95S0QEsY>^@}$;{*14;XDJ>0BqV1A=0o%8<)Gd z0kGXAZgu{;D&pqF$??$u?FY~UA&+=gq^M(G3TG~MyPz2GI|+?xd~lwj1K}0PxOaC( zibdVIwC@2ZF89agM(L0LAV5@2Zo`(-*4s~+CP#;u>&<&zNH@g-M z#B;J;_A%zo>dwX-6gFB$!17l3;->XULq@{r*x=4{=~DNf1KSR519J1*q=TyZ0D{)3 zMb%wU8~~KemLR$w&sk5#U8b1R5tx?Wc3$PkOa>1VYCGsnGY^*27eDuPxVS0(;;+0! zU7$S$5ZsQ4%j(KPB+?JJy${V1p8n@XJL|oXYB|lEUdV@&UsVsdSFQNzU|^Hm<-oAI zybi!*uop}?$l-2zaAn{G&BbyhwDXj|IAu-#rKpWTFKZy6T-wRbhL%yy&{jH?w2FI0RvL2k+#Un` zS?{Firne|J0Ie6T9oHs?)5O?N$eJpTYa6j#8o^~RfUeu6Vt~gwoXx!gUmG2-gvHIcXO4^Xrvv7m$-3_w5(LQz;4oe>)t)U@M4H1Yy-Gy#sMB*zHuWBlQ(JU zghSBjCPCY^Tw(L(_t{OF>fhcbt@-7Zw8{jP75@1C6jvDWSZhG89A`JM*~a87zI1as zZQp;GR+*^uji7mge0zj-Kj7XCgL3TWmzL7oKYbW&P(8)_$YFP(3FA7GovF$3m^`ek ztirK(QVeh+|JH+tPxL8v0sV*AK-UfMQ`{v^!o#24Nt^wL-vrNDeHt{ktk^l};l_`Z zz(ZVw(NbE(EkiPVU~<^pZ6uky%=5JdDLkFDyLv&jwL^XQR{BZB5 zv+47H`V~0Izmr~ieS&nh@k97S`j73c^yuMzG?f5~`$pj6yU|&Yo!f}q-{^~rB+K%c zYR+3NBjlX6PhYshndATXpZ$aM=|A~gdh=U9<=au$yPHR?Cle;G_U{o?iu=vbXe&)F zW2JKo4t}3488Tg`eoHYt8HlV@=En9{?fPU~mtYf3uZ4JQ50`D?d#s38>k6leZ1{~B zLTXx0^%pVKWm%HruNJdt|E*Hwwjxk8(TDhilWBHR^isvx>;kQ z3FBNZXZD*vYkPdb8tyd@EVnWcyFo{Ny^Rg^WGV(YX@kZ4GHz&c*SG4La)(^TuiP8@ zTL)ZXbl;A<@>Pz1$Wf?p;%0x!rmxM+NnzmFQTHk!N?FMm@QkMD`0E+83ohfY+dlCG z@f;pbdX;(tVFwdMB-})#W(4Gc1>m*8CC63{TJ^Yd_g^D<-Qk6K%9QaJ&_+gs3Qnf5 z5+JL8wukE&@TR3@^oq8IE>%I=A(}i6kc@Vi-=RK!ByU)ZWW(!GUK2m^}E@Kv<&Tg0Brqi+%p3UY(CrV*$8!H zlVf9%=TQAcc#sP76qlIWLv&#d(S$X)bZ#QtMebWb+oKvYzSB0|SJt=F?93b+_ZDbV z$o!J_(Jn-Qd*`!&d1QnM0-_MZZ0vmeXqF?K=QFT^aEIuA9Yt)!qo&6>c%(d_;Xpz` z2fsjpv6NOjiYJeAdS7~mOyQ5gMFe$}=01GWays3ItLli--L#^lq@ za@NNp`kF_lw$l_G=QZf~;PG6@PsgS`@+aM{qnhsK)CA9ff9iN(axzU%IT=MXgm^@; z(o2rLfNY=oQocOEF!XY=7Zpz_^1&NLKJgQMZ?qffS>~Q7G1<+MKWQXS5TyuP`m)}2 z2FpZaq8U;nq?PZKv3lR+gP-;1jhi}SP0{|3^lul6Bx z+>35%>%tA4OhBjy>6TabOtdVyckQ*yM*Zf;oYuKTTk59o{3rkH_tXFJlYg$xJyuu{ ztw6uEUy;308O*YVDJ_b)YB!Q8Q>@ut!b3sfONi-tL{UYY=aKeB)41fkUUGD~HC=k7 zO^Nh^0pg~4eeRCU!1A-mOw;tDvpKuFiH_%Q#_&f_ene$CF#qVIJL4w+p$pa2l#&TX|L zbZ~l@W3?0&MA_F1Vq#!+Aop4O3KQhc8OlEJ8vYri9r%NT&Qhh^!(C)I<@5zdIDCia zikM$~YjKEBM^lnW4qyO6KpI0d<##h(rb9``2q$d;acX231Td=MK`|UBI!AyuH6GXk zH(0vgf$JLI1aZkV6-o#c2U#}02FR3k)(a>Y1t{E~n?vT9(+A6P#An@cnR~6(+9+DTfR)u(igRyGTY-?mM@r>LB2hT zxE=7{#l|=(qO7TPFIYXK-gnUryaZRZok`jv;v{NvqK36h1XF2q*m)8{_()$|YlG3q0442Rmlon7jUIp)gJLRuaf z=g^jGS=@wVBR(R^^yd9kR-SmJK|85Zr=)5gQMpL3D&z zimxsr46<4YEf~YL7ShBry<14O#UN((Tne@r_f@bLnF1dYK-~JL`1~ucDzAQ^Rc_|8 zncGE5QN&|!-PkEa|DcTFE*qWJi17IWVgZgPX$fvUQLi=_(P_K$-wiQ}By4a(*c{+! zd70Co7^EG*+ZoKV?prvFfE7X05gf+=!pbgCR6&D>H3aI4vNoY7?!xluf8`^i`t6bYx@lWxZNqMBTGb0-Z-EFfrwno%Z!VNeBd7UDFx zh3Y2;;1Fk^9_0E5h^B70)VP7)F2ob8h8@K%0F;WH$g%Wfx`+=Ndbouomvh_)Aoc(g zIkX$dEFTlU6C*V|$UW`qg=-1;^z^k+HmS7(LIu3*OjOq9*jPsR!JUEFur`P|Mt>(u zAMP4WqR6ofpes%&>7 z%ihX0-XJdmz3Wrs90)Omb_w7&oH^U3VNGiv&>5ODD3@~8u7U(dm^3<+muA&t6E``R z%3Lc~-Amm4T*?ExiE7Y;FyK}T;AC`;3=DFD;~<;*09H^_F^MHqvhBV1Fs*NJ#2$WP zWrOxRh%E5+YjC#V#2+Gy+(CZbOnAV?L{${pUY+z1gbedNL2vKvG=>0WVQ0%O-t~s4 zfNr%5KL!h5{*=b@gg@`aRk|6U;dJL7fpOz4`;dU~T)fx9)gQveh|e+vw`^G(126cC zLuCNQ9c+SS4I;jMCoTTv)sY;R;d1LQBK9?mEn0&==#!U%vGX}7NZ-ZE)gW3y){tHA z;izRLZNB?AZ?p0>n11hn{;f1TKEVlqk0bqk+V0%UL*%vL;-Z;C8rtq`KTf(86${#s zUR5#u78Mz@&8Y6Lv%4+*{oniT^!>m7KFe%sF<}J<&a(|t@iLx5P{O;Vt}>M_I7HP{ z1#Hp2Xn|*juC<9XT$_bE!+YJza@5~WJUxr@ok-y7<(X69Ll20nKAPN@`T1YmRK{nqz(jx974gp8?{(+j;Z?a+YJEK zpBz&y-}>p@v;a_5q@i6ss@MfE=16J958NE@gvxfHoNEW%+yfAzz;gpG{yMU_i%jxH zI0$2cQ|pxFb@P%c$~plZJ;=o_Z*ojE`eNr+SK-(K)B%(ItTYG;1r|qu3O5QKlV+U0 zCuoyC(gDB9*#dXr?7~|LhL1q`+Y^(aq z`OiR!au6s#M-0I!hOHm++P$Bq%|0sZQM`5SMA{#m33ij;hOb z+E_0~(M!WUj_;=S(zpKd$7vK5djI3!|BW=%HA|rY-GTxb0^?s zp364rdrpYkDP!qa+Mw-{7}qhY3xDU=e>r{ZS3jD*{k0#ZzIIM|#LlvrQNOpcL^k3Dr zD?7_GW3CQ%ruWh^x}4}pOPhu}9e35YU+;_?{5L#ShivfF_!tRM2=hX^(Rmi`a^lvJ zMSR{S)Ct)wGhlj%g`1y(6A0j3U0qHK3-bV$Wq?6mu0LSVV9W~c;?6FdLj5DZ>Ol}) z?CS1<6A0podQ{J-tTsV~T+Zm!3ENoi)8&=sL01#uIRF6MKy}u7?5!tJ#jbVSUB=dw z16Dam<@kfxrB(QQ(SHQDs`#SVwKiFlBFdQ6^hrl-owV>2l-9o-&M2Vap#bryb68xW z3RJnNa(-1Kwg=D*y}kN3D!98I9MK726Zpz0o1a}w?>(GLySNK<1+fBY>%&ccM}SRb zVFh#oS(m*PnK=SrtARrBZJXMo-qgBq2q<6gSWSZfRwq=0$o>wo8E=`%%>wOyoe81o z8^04V!K3oh)^Tj+3lR;bSftkl)&S`bLD0MRNu~ zSz4RvLCQXcrjJLc$U7h}a>KJ05V+EEyorATx#0=QtXeuxb7YN07G#tox8=;mmm_YO zN|{xY;6IWoWuK*Q07e|#JSP^^Qw-L%XY8~r-g6|`DqIW~VTy3^u0>(5r&C5JGvXaG zo-mQNHsL9}W%pAa6?2ki$I(A1vyEmC8mVnr(S7{M6;uU=poP8ShSHJkz$jx`AU;oXI#iV|Be*>n#HZtj` zN*kI={T%NsD2~+$%51hzWh7^(;a7%0J8WoWzNuu6j~+^6)cbG$z28hf`1YG{js1oY z6>{=pxiFT-sPp%$&6T;Lz|R*2t^jef`m_hAGN(GvDsLCIuSzpI)rv%`!jy?u1S_4N zycZF2i#|T?ITBBr+6ELncSHH*Q-rpQ7)aa@vcaIg1Q1?~O;|^&O9GrY-~@oCLkAlG zR^URy9o}5oN_XGA7bin8w#5LQkF2I_{+H$Cp5ZPJXBcxL7Y8g-qfacCyQ~cWQ>%e_ zaM@MR?0}hp1A{4cXPE%kXF0rpc2cFJj{{_G89AfD5!Fr_LZr4pTMqvCih$B353>C9Zx1#vK5%0f8s7xN@~5<0kF!C-2-3KxZ-sfET%^Z319| z12qFcDsFMaBxP?8che;K9S3~D3~ru;7h^tP$BklcfSf@q2@NL=vyrVG&^ycO!#a9> zBVSZ}fGY0!Xlc3O87K&1&5Qm5u#zV+g{u%w22|m;$I`pu)w8^d#)jif05HHLj;E&0 zdT@l?Yr$k_oWQR@R80%?=dul+`4t0xS%od3xt&ojsr|2lH70(b%=`N z=3Vg^jb(}|<~@KgFM{Eq1ug-I!$S^-_aZ#ue2&mLo+41;v7D6SsZ68h+i*oje5{#> zu=aNPq0*HyT1@HZii49Sr~oYxL!ex~ZzI!fjcNxIHIk3Xv;n;^L5X-KB|nBRKIgv4 zd(95thfF-W+;B>84-e{4jE2Y$m{6@S(f`sHzLv(P2h;0sT+cC)xApav=>OU|;Vw>% zgutp1G!HpHL9Qa5lQ-Qhj~gLv7h79!r;$7V*e|`2UV8afdhj-C-pFH~+~%<6*!&gM zbTYyZ$y;vW6@|5$i~6tdab^92Q=nx)Tyh1@DqnQ4XE|Ue=is>-I?z^AEc*E~v)jz= z%m9UDv%KV-mYwIRSnR+~S=C>SwL0diaQOr#UFqsLmN|Fha&(Gj5ox-)A}{-QcPf+E zkyhvNkHD=bt|_yol6ZltvTK@1&w6f1*yZssfPpZgg~+r1vYShNayJ4CQwN4w9dC`3h24u}%)US^ibUNa&i@m36U5;o1m zqcjIcw-sut6Lk;p6Mpgr`^4z;jlI31D} zmfb@QY|neN6Yp!B5ccNXyJ;NJj3YM8!4TwhJc1Ip0HqtyY54_Ro*dT&XSbVE{2s9q zG7sq8#a}-Ma2y~vJpp&|<4*C2p8~?UDH~?iB&M`8h0OaYh z@$~S$8N@Qk6Zpv@sYzpMbTEDN#&qh9@?&q|sOLlS9vokaCWsOw6;2jRFJTw9zUv4L zB&nS8j0X)Tq#{WCdC!+^B*&TK3gC&WzKuJ+1MbG-d&Eb)zW84JP-hMA`YTe%=~Gs1 z^sl{{<5JwE6LAG)zz;YiM{yT9{nlp(SDUL0)J3#Mj&$!}Wyy)mHgr|Y$0M)}>v8NJ z>fy*MvyTfAs+_T)%kJTzmaK?4L1EJ}{+)z4iR$SZP0RPU(ii^A z_tMz^ej`oXx|X(&7RdWXntymVjSdcnIzH=Nx%_>PcBU`(0H7#m5Rx*He;R3A(#q_W z*8y_j2ekK*>%-~eoa*`Bz0X4&CWGj%wqxu^9^F3907Z8#A)hco-Y0Cc1v#j2X>OhC z>})^H`$*|uVaRAzUW;n02zZ`#ZsX_qx`=zT^5+V^NNm}!pHC~MDZQJ-8Q*---Y=eR zYcy?|*NRL;pI71bKCEqORn)0!hVLV}W4?y!Vh#-bu(qfDt4SyIciUF4ZXPTT(^IUA8Lq633 zgV15(Zk)VXyvRJaoR$`v!gm=cTb}xD>UhfU^LovYHfR)uBi+a*^oR#5(SLvZt-seM z%Y}X68X0%8BhXZRn$bwx(3~LNfcvi)jGN_l5MQ`AGn;16quY%pg*||sA~b@BA;bn= zL#OW;hkvZxpG^xKrlH70&>wIeD%Jpex35p9>7n5`>|+&u$gB9Ji6`LIe03dC!G|}QCwq^&3_((p}yQv4o5jclZM<) z!MGYS)TYsec*GDJgB z`*2-C!@GTmN%JT@Og>x0L&LL_@bp80Gp8%6r<{W`AURRe@1gdsyKN{f&uoVH#6S99 zejCjo)RU*!vAJ?(X%1c1V z=N+zfnzokHX9S3=$a!U-JB5nMYOQTQ8=^L3F*;SihJ(XC9rYSZwhoGd(U7yF`w)DF zzsSuQ1rca!8sg0s#V_vu**R+jtj1?2A3LYvY^@+Gya;D-lg%e?T9b2q$Qrqv;n|r- zh(+|L363`1-cod8Fs-3y^)vtR6X`F%`n~k<(L5X!FWX37CR->Wj1 z^A*2jQXa5IfUyIZQni}hXx3AzTRk4jRDUy6X>uIp+Xw4ziE^n*x@B;ihki>|)}^E> zX&^?*v@7enqygc06od&<+$@Nc$>zIFCUg;NL}pU-$M+d7Z5nWE`LGs zCLrekx!z#n6HXAY4@Xdu7)xvZLoCC*@~pAl=7(s(v|C#O&sy_iJN7wT372W%_hf8x zuPIv*MK5Jg(?xG5a_%B;zAL9$hul}02re-R@X(iT%DuC_ABTE)XhwW1#3LtIvkb^r zCOOMm@5P2d4@B8SZQI*-9|D%M2m{#(z$4w2Q{Ld@!GS^+*-y3(it7cAaz**-WsGtf zS9XlM<&F2E?5Ew#Ctmz-F!@ct%YIp?fMU19xJrwyC3D1HeCHU-w@6|&~kG%hHyh@-(Wv=x1RpyZ@!mqy*814m^0iI>!%^P!eN)rZ zX=r2=z1^9MYhF8;*g27r2AQhTgNxNYx%DwQ=RNo1OgI729-Q;~M?aEYy7N-{@#0Sd z5IZqEAYPkeG~SBBkn>FX3irk`Rd~5%>u4}7#`*kn+hRtqf?Xm7t^jdUVwT-VQ%{%X z7@cUX>$*U+kll zT`SVa5W|=3Cl0T`+9gZXrTM3(hQ-D8kQH8AN2eEC z8|J$^$I{<^=gstq|Mbc9%SfUyVAIb_fmK)1o|ksFg!Ey_r`wX)(6gDE*X zZAw2LxFNXqNLwcc4yw5h4PngE&@y^Ogb{zxhu3m^uG0%|A2?$<>$H+KazLZl7P+{> zhi<;>p9jmIIr7=O`WCDuzemJGQ?dcIZSnoYtlDE$>(9|ikS2wKb z-M%@MUcGY*jSa*@yLbsM@b#IQXgh)f)7?bG;_>`)oXVy~3EP<`(@nOIr47LL5}B|}3hfC*4avP{eGp@)Fd7otSn_3Ty*ZlUt zDIVqYIaezLZ#F%TpjQ9!<9Rd_AZoM*ZQO(xz~45?rFg&wCjidRFQxw5*Mq+1KPs#& z6aFk)X?%{d^HAsfW)G1^!|_-AN3b&QwL7+DJo%pAwY2VY_yW+C=@l7>w8?$=JrPgN zgDuTCZD_7$lxbQf`zvSLYG>$SbDa9g5151<(=Vt&TI+V5q(H_hldiL{c}^U}L` zoEHyqPO*Njoar4dnLt;7xM>5qWNJ4}OUD+|D|@|j<3GbglO$s>GKd`^HwTdp^i#%> zyX-)BaEK*390TAu@Zh((DSP`yUuJ>DzX-&4jI(Bbl@Vi%rN zC-;npdPd5uTOP_%WppXILvn~>fI-&NdvMHoj?mJ=qDjA^jvC3Wp9}~scU5XHw!Qfd z5G)8VtG;`SGS}U*Bc;bNXuxu<6Nntjjtveapu+)J_|wluHsev|w%uj@p`NtP#Q>GVAV83_N4t9W{=+o= zYab^waDTD$l(-S}6!(uIi;GO=5*%X>#8@G{xuccHw`Izw$WdQR)&%7NdIWCNuK-q+ zd+J%X1z64is@g}-Z8$7(5&Tmu^vt3i0#SwT4zvphg~`h?;G>KyWRC&-Ze(3Wj#)Yx z)jpFzk4YW}1R0;Q#)oi(rRSis*noXcZOpogvE_T3U0`D%@pSaTT_#V08|7s+ic~nc zSVrXN_U#))nE|AQ$XU-1`WE_Yh$6}uVh$FTH=pCKn?dx;Lw*iVS{7HC1{R{mF#6@H zz3PXTnnmFd?@SuvuXN98>%%z0M1FEz)Y}@>Nx7{%x%BN$mZ%sf5Tj^qcxmdjw6ofg z?%#Vr8`#E;Z<{{EDAkDKB-p+%_jT5{8#XBb$q#h(WqUsi0z`jcK;N5~EV?CgArnrz z*=0m2c#u3@9$n6%9WxIX0ma`>|I_dPHX=hFv%JTtoDbLx_cEGR1|lstl9DarbBH$< zVvIakdxCE4X}9*V(XRu#4?1k8M`-|!6CeG>*SUWsEu+`HpOv?Ez8&|R1ZP=J@r4aF z)tm&CG3I)!bgp*D#H)6%aw+LvE32fRW3) zE2}G^=B)z`uS?^h*O$A4lzDNZoy(jqd%pGNgY>CC{!;p@um1oJp>mk)yyuiDMl3pg z1wU!-ZYqHjBbbiChqpXXn6!yk<+Ywo!Ek^#z?y@qD*tu(}Z|3Tl;b z^W?+1MUG|OOoM<7H^<3E?WPU&@O})ZuooH49pdj>??pw~BAhum#sUcPoCi1Z&HDks z7{MW$7yt{ftELMlcKqjRrA-Ki1<%2x!%wWLfQHAA_q~aThpNu@HxWnTC}#so1A+u- zbJ8XpT1NNxCR{>ghvjM--L>Ej!?6U6Q3TS1)`WMO(GMtAo_G&%Z8_v}Zb6Sd;_d_x z@1hB1oDE^Bfm=t@MaWjueAnTIFQS)u6dlCICzP}nNv`;yXOaEJCX0dCAL{_-H5zmzRDv+aiJp$um1SYyD!pG(kBZS-m{SzOAf$uZq!2lpbX)I4!c z@NH*(c#MoL+8-Re+-P-t1nfEL1N4rk#o3+oxj+4S`se@WAEa-5`8({|XSow?7zgNg zmFuQnPZtg4CaT52_>7}0U*y-30aM&~C|+X3;HBX%M`#~zrgX8q{z)0ROu76;nT+tX zDWZz_3%8)(X@4K6cS=wrTtPaiVp%CicFKOlCZ(NsUks~gfJX*L7-(v{R3_66k^{(3 zt|2qtJKT>>;32qRs%X=Po1)s%%eSY|EHMRV6%hgISTKADcNDCeh7dz|8O;eDaBbxT zyZRuPcMERdy+<740#~&Q4z23b+T`pid&%Xu;XxL|aLbe}6)*~*Tv=#@E`ApQlm)SX zQT?eySpce5iqNARpcnu~8I}1o-*R{F+_{B_qSqj=JeEK4?0a_S9!}X)t}W}8c(PS- z$Hlk{y9iIXGOZkGSl~y$#+ps-iSRy0t|Ff7l7Edm$A2O&{43%h(6bJm%u4r7R-+!# zE@lw5DOZ2wBAae066NH#C`K(Ox;xQvqU@wS{ObxGqdvJeH6>WhrQnw`71{1YrhJ># zk#1B+9;=`Ds5`B%?4@u2)epHNLc>g~j{{id*l4=XcTytP=;I(lUhOJ}dZ}XjRs@0- z0yia2mmY%dDHqScWd* z3@q&^y)qcg9e+J0!`i*`C(vPOx&%*IURdrH92h$0eZYxe+s#96oRdqeaRA!eTgMOF zmv&Ta9k79g4RVNlrXT(2QTpU3K9hdch+y=0h<4gg&9b9(I4rY!;q1Tfozu<(? z;maLv9HY!rc}4vY)_2R4Wf}9kK-huO!OxOeE(b16l-YQEqV1G)%Yfhbv%mN)XsTB^ zfL>t=Po;AaYeT@F!K1|?*~osBk7wJ){Na15M**cu~WjN|`uvJN?_706e&y9Y*+V;!u zf^3FG+;_ z#a4_Z%fA+pg6pE~@fsX^=x3Ysw9D<)H(8D74KWoXR(!>FV7rX6)Y`GG5Y)<}KQT9Z zOj;~oTc1ahyH=6&;)+LzgqbVajF;umJ?A-dhm9r6``YhjAEvFIYU^`fZY?^BkN7_B zzBQBH`N{qCD}U!T^bj*?q>Zk#qSnV|QA!i-FY*_uM2n0x0}vN9G;WF)Z8|qtMT*!s zIMkP3d*hY#RrDXD<1N~+aoTosNr~z#V}UA7Zby~Q6%KhaR^iyxI#ai)mkLL#^x~%V z^rDKY60~7e7~6T3v2H7k-(02Ri{;R?jF+6%X99>XIgdZX=$=uNsc5I!y27z36fpuvJ8cp2E;_$G)I6dS&Wfn6;Uo1223zO=ob(icAW_4H?-`ciuE?i?I4 zfXVR?@#lf04R8{a^amiE2Vpv7-s#rKP0nXzgrAz<+H=?xdC%@y78w;>0(EIqnC!E7 zsaqG3W()mzW$;{jl1@BFTv9&|kTGmvXIV<}o=Bre>UrIw?oBM}G?$|+Puz=_$Rj^- zFZ~-Vp1s!{EnO@tQ~&!19`wY! zcGAl&G@yEcLq46Ah(5ryhj;8#CvurZz*d>SSRK7?dIRqePVQ^w7zcFHlbkgojH{R= zInCP!bS`p|-0U3c1JMZK0T~B~F~r1@yvWTx=DxMDmF5yX2n&4l+FC9GqW|K)q$ys(eTtH?O$X)h?x?Lu<9-Tb~Fka^9Yq{6PVL2DjiIVR*Jpx`N<@ zBy*{;rfhp#N9p_D`f;o@ba6O`;$W+Y){IY0(|C!LZ-I3OBB9*8W{OFJ-2a-VFp<9B z3m4@%LN%ajd#KafUWb#(rpU2@o-{Es7^>E`(44Td#imNa31X$Oa*yJG zZYE*9fW8~orUF1KQE3%e3-VKE7M9WqD;^%>d?4qT&3x^Hy{Qk)6Wagu!Gko9EU6Ge z5f0UK8HeH+B4WP*^DclbkDAWn5{gI&j=6C2#4$Nt$`i|dHUsO3F+2vOt0l!_bc2IT z{3tp|t4stA04Hj>SOPE$#t0hePLQ{WfuYnzG@S$z_Be@Dc9qtOC6i2wc^{e5N3)1O zkO$S+9dKG7`c=|6oar9&vJQCKp^QVkD_S9UTL5RBEOV0G#^yHppR}1zm3OBlb^${A^Eg2kmVk66~PP~;k^kKrAM}l*nJFe?OWlO z*tOGSa0(a|3`5@lclhf7ICdh3?j}if?YgN`UBi~st6as3NS@+HPQ3uwRir&G-^HDL#CP6&n11rZd-Qs2$^-y!(x!IMO&t1#DO27j z!QC(cj!l6-qRUjw@8-=Ea#~Irj?&FL*VFW^NqR^6QTo%UHb1H>b72ZZtMoglxGo&w zQv|3=E54hj2>)U9a>XJ3FxL77%N!pN9lKq%ou&iDIP%Hc!V~D$IhkvyZ#b5CKl|CQq<`}-{xH4!HXBUZ zM;Hi@k+hSBJC}1O%+W;m&K>X@dH03003XlML#x9WWS2Dg%twE};srxutcq002aZ zp^E{3pGlIyRF3ZyI(I$#nC6&v+PBgk6RBr$54Ml^9Pub%D@0xuG(wc_? z0Qmkyz?!H0IX1jDdu&YZguhb`o1ny$#m8{v4`9*+d}EfB3#{D#gm!!vM=*qm^eJBe8DC9I^3zp zyZ?xQPLfepO(Qmt`Fg@?axKR_08OOa_s1v)tUK28j(iZnafx#XxU& z?Jn-{!QmNl)3&$ZdNFWMu>^SU#|!EI`NSWlPk;KqrM+!LC^`m%v)0Xq6FFq$%8>#x z3V^+1$X|ZU_IN7>fZ{!mcH#BpPYKM)cYu4j!*YDs^hJ5v0Ur){)uu?l+QSFiB}P`t zD1CXhe#0tCRoMHin+_VE`D<5vB`oshGdI$y294QI9+2VIiB=xYEpY$`a=U;r)vk2` z=njz|9st}g5L+}L8n8e%%5%ZMc)wy_Au_%T3o4j?Q5?gN<0b@u39>wXA8seK7F0xuDd0NjG9 zwo1&^fu9Vb1clKmgY#{=wrOQ|J@R~UaW$C>2t%Kd-OQkFajXCMu4UZ)#EZoJi zomyb!aWigIB3`rco+1_ow5s$!K5L12bdUS;0VeU_7WK59l((}SaMPnk?;TwH{T8P z52ghG%2&Vo&Gg5A@U`^rTkkQzAX0%Jm!SK5`-AhvVs&)Zrd#jb^MH{@GX}Q|S;&#) zDSP$8e{DoC!?_L|hkcUM?FJ~pu%;BHh?k~6rlav*rqfuzg?;$>VEtH%#*#GVKHLh& zxbb5hJD57z3AzFf+-~GXE>Dh=v*kc*Ep@PZu!h|10!#L4Jm^BBgg~svDg*glpB_nf z&@FsO*j@Y$GH9z&LD}Bz&<~sCHalH~fcqEX1OUTMbj?nTjij;Bp|rDvMhn`o4A~A2 zc$udy@4od8vZH&%p;!z+62KJlvgB32iW7K_nRc3@h>_<2!%@dh5UE!#Z{R8lmA?f; zxe-u+-2+Fqua|>3XvcDQ^}B&eH96O!Hcz1&fkXa4Ei0BunP>bABFZriaP1Q8lTYP( zfpFj_xYR879|M{dKe_*C22i{X?bw7x`|aWT8b>5Tv5_uly}yS1KWQCNHpPNfIk`WA zGYVm9(VoceQ@ptGSYuiOamB2h%s2r$-`HmvILDFuET=nI);PK~YGBVK61~J-y`AaH zU;IY;-QW8kXwRL&WQa|80Dr@^Q7*nLBj%sG^g#qdV{G=>@sLTCX%Umu z6^3iZp=ZO!lW!+YW7bZ(#9KwEGkji-m393*jikv$s!Fw*0?jrKT0O0q>hyAjw-)Qq zbEqpo{M>7|StHf0)z;rE-e!e7C%rSGRb*n70au{nj(wM(1w1hd&?7hyI4A`Of~)4U zo8y$fbpw&l#t{RTqc9cV002M$Nkl}JdDOL{5^{*G zOY2S!tRf1J;SLKdI~dTn-~#tT_fBX&IXRkMzIBTYWg}_*cr`er?bNmH$X)!36l}6& zED-Dm^azG*0gm4tfQ)WLaIQ~H0D#wn>*|uZ2Yh(k?dINEZX4vGbATg`0i=dQ>lAn5 zCl3RFS+y*9YM2N$IB@t0#vA^;3mXI=IbS4BSi!7qtRK1O>(>FLwDS&t^&xHDZ^cQL z<=>%d#QBZP%6(q;rOuNpW{TU1n{ms9CtlONpN4nXe~6HE^rnO|Is zas}ie-hMP$+(wO-8&g#&r^v-3YTP#Vk;f)qZrEC8gI^E+Mn?M6{K8u3CpO<}h@@-} zqyL%H?_$F&A(|P>Ns*)6K3F&TmAlQMW*S}$5LeByc*uS-x6Wr{I&E_Nr)|sk9IfFp zI98>YGU%#To1be(PI{9%u+PCqBoVm{|BWd|Yj3nD6F7BumCXLZtQtm|+21+Ii8xOA zufN)F)!#~5<+~2KzjE?ONosy$-vkx4%cT7qU;k$M?|$c(*of)s+gh4{q2Nk}Gzdch zb2%&smkqph^Bt2p#8~ob-+uef?buYh#Qu#g{O65zr~1kWX;j3XqbY1o-O56pvVK;t z7Oh!!o>jVM#T%`(M(ZbiG^N{<-k-{*R_|x|w(egT>YSCqpN$<|X_H!d1_a9uw3TMs zET7GY*Sv(yLSD%2DYJLJxAT2Gk*{;fS>=JH-xzMo7<1b3TL$r7d|AQ$vb}%=HkaY1 z>;YtZpfCD?J7HJJAwDQ}`O6jIKV;Da!rL6Au*(wfuDa5QQ_L&(!lO!MNx0!d_krHA zw7z_hzVWs1ra$}im(q8?^CQG7IL(XgKyuhRJvf2E!5#74oxOxN4m)TfEjwJYXc-(h z{EH%l4gTBMJ(Sy~~(qrk_r~GWN zc;+4x1});s@dq$0LY84n?%5b~OMak9uh>kMmBN}#Mw*zcaeuZ!0!^rDyt^!7%*&gvbP6t zc?>7GBYot~bhncq8_D3lj-hSg1|keMrlvyk!sFSw*o-#|VBH2d zcLAzPIb9g8Oxgg|l8W})&PqlPaVn~C^BPA6+)ly7xOyHjw? z1Yr&>4{p>G)T;TQAKBq9t`Z>2Wp=LTk-8huT3XEjgdfeXgr4OdR$I1_5f;SXgsVJ> z*uxQ%4}tU$@f@;|Zhg&_m<-<3+;L2Kpr$R3uI3w2e8oDI ztB(Sl2~9gf921&A@aW@q5KpU6+VYbUL;o3|X1cZ_aC zT>crUd5Vp&dZupncF>|RzZs`rDn8SJUSBb6h*m&<+eQGxqMb+{l_+Ud5@s>*2Ho)h#e zXTsPTs!esId2&+~vN~KXB+$NN)sP+rE6;rd1>I{ zm61a<%WaeVL}fH_uXK94d|s?Fo5e}_nmJeI)27y9z+&m_%%~k0bTDc==IEUHJO#un z+&TQBvrkcoT>uH)_aOr*y|o8nw9~O4V!zEH8>&>}ASF0k08N&zUC#4_F*!A%+Y~uq zk3$_Fnod9b{@wJsPk$r*#h1RD9=-<`@Nk5~71#lPfS@CYs%!yn0&xJQizi=2K7Fm3 z+>J!mv1_cO{ET~)hvEr_iN=A!_yhd$plFC3o&bBsQN-aVE3kG_qLA|dwCi_-CSApW;;3I$-{G&H&4%k}m{2|44&T5cnka^MIphP; zi33i>8{*q_lb-U@YM~I^3A{AcfhGq3c%iH~nXHqWFr_Db`-}&>XnYnPa$GgfC#RV|V!Hp9ig-mf7 z#++nB2}Q@mcf8l0))%+ZqjzW0M}GO`w8zPZhipi8iGv;*NK9P%3qt2yO8{RZH^!4o z9g{H2YTfpr=X;!Q;O+Ligmt!eGXG^x%Db^VPZNE0lLRaC16hN%XN1k|tX7L>_-JA! zL0d4^NK2;G^z4&cO|zwp+WyWCSdPbMyS@+Giu9VrcqKB?j58J~il=?H- z?4k%Byde zJ>?E7_Z(d(mV(**@m9oV?u;{H(2q|=`r4mCOM|@7#yl5!F9ETD?tsDDW&8lTy2F9h zJp=w6#xXYZ*Ajss+IZBUAjj;`gCY<|abuI~6bo#!^x6Y4?=lj{W-)+oI~d`mn^Woa zn>XOrBE`#OObsM%3len>qa8v$y{b(E)rB?)!So6Tyetd$SeaNxM8bn6l+l$_EWQv( zxB)Nfg!Zd3?>zw3$r~f-*7y`^-A2;X;4mC)Kma;|VlE z9=)>^v=!6}N}Uu80B+j=>>Io5L7y#XVBFmRmQCV(G&2{c(Me_}acZ5IKu-3Cr}UAQ z>cnE8rxHp{2qTDJ$ah?&KFquHP$Z5`~qf}bzqFsJ)t*bP~n{$wD+`uR(j(#Mp00~T9B12`Mr@j|) zQwCG)qP=ZzuckMD{FC(B$L<7!!9#>Rn8mEw6H)4%lmx(NA5Z@1hkJgkm%E>d)Aj4u z>D!rP5zbHk=fXr;E$tb`#(!llodS96JR{noJ)Yt6+^wzv@pC8n2P{DwYth+9&Uv`n*j*u4Ib#5 zO!G6V=|B9tKTMze&!1r#cMpvLodD(D7|0LXI0}`68?vgQXGFUwQwBvb2vTHsh|7@^ z5ZgVL173Eo=t!T)UhY^OUOvcOtYyyS_s*axD1xI3&Z3T>n_$$D(n1!^#Q`|{;w^$s*u&3FZH5cshg=+QyQ-;X81mKM&{Y=I-;lRmV-SwGvxd;H>L?mGiF`P30ZET>^DCgNjI&wMX2JZvBT_y1N z=0P@}{qnDTEKPTDN*f&0W4N|X#`@q6)6iohUne359$wK-*omRxG(9m9AXJS4>!?E8 z_kaz`XMVdNf?V0)&XTsO<&F{K2>sS_qp=fcs0E{cWEkxZs+@y63{4fqaD$(dfPQG@ z#zH3;PS!T|w__9F5t~yNDf=3_n5C~(un%W&2oZ;yY(5mos69ii7lK{uBGM*hLHaRZ zTz$D7w8Pg67-oV?S-W=qW+@{h#4fVVPB12qR3ip3+JSDwW!}x3tA2{Lh_(V~VqY`b zYs?s?d@a039N9(3{8WP07(lhdb2ycOORw7FQ2Hx^*Zk%%rF-44{$m53yi;f@2Q_Pe z$eD~^ej+&=af6BPpiHR2zI@EzNlw$+h@2PxJ@BI!dOJZWb(x=xy^Q7Q?`Ds#H5d_l zw8!`E-bcUmdK#V@rtPo-L)y{)%}Wr={)|Ew72*`2mQNy!KiZL^5oWuaZ|2smTiK7? zZxd`%{-@9R<@k&;wGx$dBY#9JJvi(%Bdm1FtW&SpHn+rL;ylkcI^QDvd^Fe zl$~3xW2GE%kb++HW>qCec;G+0F*Glzg{O~?n+ zegtmH%kB|hH+gU=UJVNBHkM1OoUz={X*g?w*)9-aPn3g}0Du-g2zKL?McVx#;P(*F ztO&`lq6~1mxA_g(Y5cKt5+JeU3Q7m*)gh;vv~yp`hB}@%0i84R3ySCmx44)1gzyvS z0v=YqAb=-C2LPk{Alr4mFZBq|qcy_#1^Obqe&mKHCPvYyF@l`(cC=qry-5S*qeE(t8r{gVX60Ulny?Z+)I-xTqh@=(CaFu2SnfiBsdM{1Ca)XIp77a2lk^d7}G=HX> z(>9+GQGPWsKruq+}+Hap_B7}kMOiy{RG07pe6V7^ik@-|lFF(4CpMcfXm%kY(bkS?t(Cp>U;pTn=|BJBpQYJ{Y}h#L1Aw6F%8h!A>)pyJ9w8q} zylrp}jj`;!Pi3d|*Mckpy-X)50uW(h!fblne?#Wp4Jhm zSp$Ua_7V@=M^94gfg{-u*!|dRH`A*e3~;c{COHXC!)OP<^#WdxabKtX&2a$54062l zaK%;thC7H42&x4Ko%j`K3V5xs1GudnERheQn^#V-T-lW+RIUkJVdDzwwgJ=kXJ*qc zzA^!Kjr=1{;Nc-hf@Z`PdI&2w+VZ!fw>CsVNDuxoKpyb}xZiE0(@h@Rh#yEo(giSy zMnj7#=!sJXaOM$^vB6{JP2O24YQa(sP3=q>nZ&;I%FV(As(E#t_T++ApIQe20nXF%urO zO?2&daliwz)8uD`4SO3*fQOm5^mg@ufT{DCFqW-B5)&c+6t6HoVR5F>ZZ6X`2 zEU;WOfl*)(1`az_IU8=Y>ul>rrn4=*`J;EzAARC8=}TYuCLAn)L}y;!YsWnXD?x~y zEc!v0$mOOT$u_IXZj1e8q-FUUjCQJ6%bV#KMod;O{Ny3Id1B!j^ztbWhCbD-Dhtt& zi5KkAI*%*L;joz}C>;!}){UIWb~(0k9iz_S@G|(ziB&U*hp`Bdw*g(7$7m$!RhKQt zf3x#`kqOV?0OA1sa)(*Q-#t#(;BbEIwdwTHTeoR5U@s;}2dYt{UF}g98!SC9F3zW! z<>dgvOK^olmX&-elAw5orjrKLBVMOX3S2$0ZfasMy~F_>*AR11)5G_F@MfBWtEMce zAX?26s(o9ZSZ6aQ`ghg3K-vK)0iTBe8iBT4MPd%FtNMu*{WwBAqaRSxfuOBwumVWi z1^}d;T(L|~UJNN(iEo8;wbc*a!nI&*U)Y;jvR9jViv;y@LfcCmUg$1 z7hXGbaU-4x8y>Q+A_%vbSopO?Sl?@Y1$H?XVU3@V;l`^i znW6^W;z18lx^Rp@S(T&lW+RQGAo<6@;gE0Sn|4rI6fO~uc77j+w!d87iIl!k0OT3hznB)GV+iW)uIG;PL77pc&f^33JU)f;2B zKg;>&t_8_hzN~2izXEma2bq+aXXA2}Pd)I@*A(SHBT=)RGk=C-6RWHZC5L{(TTG5B z+$7SKxiAH4pW}r?KHmV%TXwTj6dl3Evs=~Bo+hRyjjn4s zk$gqyYU^fcHVK7OYjiy~y^4BPf2{ggaHo?ZaUE=BQ;s|CqUIJhT*<(8Wu673v04ixIf83<@ zD@kL$I3Q&DSW(%lx7_)g7DI|)j)WcLJ*GMA8WPOG8Z0X4Qkjtohn&AgN7E=T?H#m9 zy~uZ?&r5r5C|0Bw$<{5)HX^;e1XZ03&H^d*t9tZrFKWEn0FRz*w#t#j>wwf9Hnb^z zk|#tg^P|&dy9;?-2IM1v-6Ftl8`f&)5L{oD=#LS%86oXA0IR?J@=I}~@fsVEI$5^g z2N|!R!+L&s1BVy23joLb(^zqz$8hP!t;He?7L!5AtiCcZY`#k5#nr#LkOuG?q z>vp^cH{14#?~u_2RHy+X+7xyHiUC0(B^~rM`{-Y0Gu=Gpl1uGmP(VBY5WNm9l>hbU zWL2s40$evY;C8Q|xd0BWS0_ZUiS%)RLO+~Gfwmeev@Md>ID?{5uOh;zr>7AaU<9VOaz83yKsYp~6Fpht1DP&JqWvY;qvVAJV_O5^H- z@d{K0tjA1J*U%X}Kg-FBOh{vbif8My4x`J^`A+L8N>*z`MqHdpbh#=NMm8qmR0UB{g zRrn9BeG6@)DIl(B6G7kV>N}78&0=j9wn>V1B$Yv{T`Ge-g+sGO`V_&>^wO*pX9jr2 zmQ^2{6y0UOU16De6B$L1`E&{J0C5LpH`#F$+`*2}6HGsi>VWTm`_1&nfAs0}S6_LQ zmQWFu4*D5HGesA+kBJw;S2|zC%{GuVsdKP$>>7EsbZVuoc zl?;)BF&dsw217m`hNwA?QA5vi`c|V zh$;voZ4as?lTHF~{aDZP=FEmo!l&TXIayG&gEl3Eb_1Y(0{9Rv20^htMhQ+IOgy+__J z%&*2FM^5yQeW;Bl(*mF6vNJjv}^e(AB^Tq z0+b#u|NaDRP9)JBkui6p>XJz>W zYRWomJjE(xhs!~zl`rbNL<{$_-)y1ZeaaM`p*J$^0(=KDvhJE!(}N-q^Bhz$zlzs&OPhnV^_3iX=$tHiT`}58MUtJch07QNSHcq^`qd{^jXg>BhilfUudB#q=YN zA%4ioX|qen&LSdkAg2o%S4Zst3*fK=Hnw$>hMV68;3iLS0K!0DZ;Z`@9PjPPa2`m3 zY!9coaSGdTLJI?8I&i7woZh~9EiJusFSw;WaH>5qZW)oFw;w)Czxs>60GAb+VL)OJ zx_{+%y38G*0uuYx3C;z-8sBX;;v)WJd(Zb-)>S8Qu@K zD2wSwS+(*RXIBT*JuX0(>n!)#eE@=HIqDjJ1Iz@S@_R|Ing=~`j!CkWE|CM+v&ac9;AQ!Z~h&!!rw@1oIcmv!ER+lC%V`@uY%bl zWESmoLqmbRp6r?!6zs(8%&?1o``LAB4n_hyIn(=q2$wQ7F-Vo;QP^DGk8ukSPnc7$ zC){4J7u7rUv8k;M-KWa$?yYkaGWp*CfE|K(J8)AYLg;v9FqLB}DC%V69Fj2{1q;Us zj%KGEW5OM(a~g4qkHW2;0;DY=AA4_RKE3^DKHWvHuN&j`0Bi>er$V=^O+j%0H9T;- z8fi`p_oi#8^t&}Vm9CAW1`V-;Mf4MUS{pe5!^tg0I|`t9vtuhxu~WpOi*|eC`ZQ_H zq!rq;9irDZjryGjkJ78Jy^LJ2q7Z;N>;-h*ZJoAOpKT^y0gQ1_{6Z7W2mhs!Yit;e zoMRV2j-b!d<~kSpx(OjCx)V>6fcgnem)i$e(m*M@C(%Kdg@uqYR-EBTF$&zKDEBlQ zpQQ12$2M&k4liZdgzljlkt~=CvvNSWoKy(BH`$D+4rC_}{Y+AOJ({|!CyfHEAI>ku zDv&bG(!ASzu^cZaKR~@G7<-$s@9|0=`H$1{6rnOrY0^*p=GVH;hHzv?_nIl+y%*1o zK5Bk(D^2Ds`|c$mw2b}~K5BR4H1RSI-$ljv;wNU0(!?lo91ZkFD)N?BlH+)B} z1b-32vdrXT)g#dQ_W`=x-gwsL_4!XDSWh3Ldk|4bC+;cJ?G`VwaD)j{0nQsF?vbeR8-kF_AOH4X?;0Pa5jvnZ8i(_?f zPL9JJC(pz?04EuJcIY!ko)~buH$Z(L*X_x3uHv|9&}I5gxbteN*ao<&YEIhrA?7fM zhKf#>$k*4<@IqeO0aO+<=Bm<=zbKl8x0sg0Kksqzjg!CNfb%V_A#Z#DcYSn}LqPCA zl*Z;y-4L#{I^|MY<1YQOw>)pofs8M}ZbQI$%n9M<8~O8QW{Y?%R1TZt#T7#X)R0fV zNxd7^|7>!9QG0E03psy%ON0s&{Y?>>{fr0HbOm?5NW-9JH2OJL$g+v@lrM3w+bE3U zp(ycZ$7)SNpmpT<>fcRNw>_v~i)sOq!a=k!bVr2uzL0Mbn;N(X2 zvMk*Vw-D9CX?A%r{p)}A@6&($_n&}Uybae1wM`syu)E1Xe9Y!Lw6sQl&ySo3Wf-NU z^1JN})Ok?K9Wb`M%h?brROaSfk~a6?Af352a%_BVGNv_;>P%NF^mN|oaEnCBdwrlf zGm;m|@)*#?*@0;uZ47VTokyKoN&{@T;g~g6)uAQ8Om$xZVKdW>$G_GPq}|Q zyPh1pf!&|~`Ipmw`Vap#eeEmXV$%)>cA!N9ei6&J=pSxFbx%@j z1UiR_+)mpKCNgA|$?QP(xM0a}!!o!Tq>nnPoQr2w|8VO_-vKJ}#fTpR!ZReXW8!Jb z7USeIO!e}jK4$q_@q%|}PC(gAcJy%!^5|$9 z=P2VHRz2iEhB`AiolEE`o}XJt6SwaK820qJ0J>C|I4U@g;F+@i+;j(S&%lvq0krwj zpIkxJ!s%Hct>}U}l+~o+(bIZ0Z#Pj3=aC$3w8N$89E%Ayc#+Jlqq zhP`&`I2$0A^~j5yddm-#tGNYOraZ0$_;*Mp35qSF+(x0=4wGHwiu;f$mP@RGBxjlo zgiA+znv*62w;V^!B|Q0Kzr>x(9wy?#HtI-M?R6)3c8h?uXwLrb^`FZV?k9uP!sdsj zj^U*!KJ|p=5&In4L}_;-vRbCKwLjD}0_SW~7A`i-g_VVa=G3#=Eb3uS=j|j7tvdo1 zkxC-|1nOiiRP6EX1Xay!-CNE7)k8^vD&m}^cj>!+uL`G+^TO6+dHSbj>AjyctKzM) zfBGCh51;gUm!Hw#7})WgX{zkc^xR$r5OZq#gr z^>f@C)rN$rySa&K#LW0|^VR3ioB#QL`d7{W{=fd~=H-(uR++D|Djj`H7a<)YAsm~@ zBYlMW!;~9tMA}e3^%aW_>V;~itq%Mx#a(_tCxiTGcUG8nG(CrdxlwcN$8smS$Uw4) z%ls>lFjbvwynH*?(o@UoPuvE@E7#Ih$a2E9-13`?UdlF|Eb-C_E3Mx8#}ogOzL%{3 zD$SX)x+SzJ`eXH78&{sS&h$9|Uh8W+*rdhc0Dc~-NVwvCf=}$@F~tCNwC9mXinbl2 z-6uVm)Rkd5wfp&Aiv}+`EdIi}!7CP6YW56yR5L|>Ztd(BU{%h4{^5;g6^ZC&^lHZm z>tOYuv(K(FI{mFC6MSC|ll2Gj|(#k<3fc@LjIZ|>c?-ONFZn{PxV1N!Bnwpy)+;4fX&0%#2L=IYR7p2iS{y!(3Vi)I3!;eNIiOqbq&n1Z3148 zP%|;#1?u#{4Rob1VWdv=6rU^7!i3bzexAOR#-f)~cYC|-UuChc_@P(Fn^ zhE^@QkG83j*7PN>T#%LI_s1IuPoxs3y-fUa4Q-@NZY{0$Q3>@VeW95!+bAzNiEp2D zOlZd;>XL{hcIfOq50z+gGoCqH_v($$DMysb7mHJ}t2#*?XFbP16q>qZR>KSRCv&9^ zy!;4PvrvzUTL=5FDjD+L#g{IEP3B2XIaEAcpuob7TApS48l$7vK%ZYU{1Qm_pg|3h z$|0`uVMx%|@fuNDo8?)dr7ym1+2>1D$r(XUVd!X5?Citb>(UKN^9FvSimOS_S1f&M zM8@eFHS>3PE>Fz6%kaGbVjq|h&HFVdC&!bc^e}I(`|8QwndFJ?3_MBY9^lm6SDM^ za*=X@PM8>!7VZH!oZgF0G;|SPjnKDuR!q8EzFPhX7H4os)#L@!k#WKgPM+KAQg%rk zmKlw(^n>oXgeQ@gHEkW3x}T}r0A}oNv>LbPQ;O{V7SLmv_M>cGLEO$ZUj`m+u)Q*% zk^C`S&`i!L@h?C`9b5q_!dNC}zL+cZB~M?DX6Oco6WGrsT-(BHEUp}5mpFw{9YCn+ z77IIvY(jz6i0Rp8VR^Q>Jh#-`I=UP+9aRH-p;Gli^r2;t01}N5>!_Ui!a7@U9)06Q zz~VHUY&-{)?F0M_74vD$Oi(JQy~3ijTi7Um^f`;`b~c(ZHe=hNPfnos{Q3H;<~C;0 zZZEAibLd3OLgS<$oWcTx$;-ur2OI(-w<}kQY=08`dzODn&fVqC9r^5!k5Lg>M{+8? z(}3RvB-?i{vmlU6_SIo#=em#4VSdTxB}YhXl_t&-cJIpN=KhuI&BC!8b?j!-67`ZT zqt^XEaXQYiNIS+Z=&L=&ZY>X%*m3S?dcJwZ8^;#X^k+!SeIaqDI#b(I@_34bXbjRzo|!VlO1_e z=RC%gVvLkzWn?zxWq}^of?K~_g%9`}t_d@#&i2fLK>6TUKrr17U3Ah>u4QDi6|7+# z(g~(4 zf1H=rY-Vzb5IxS~&FhLAoz|8*kE`8vcv?9=y3FBeitng=liT@79bKHy^61cWg#d^z z+;@co4(21>yY6fw3v#GXdUe?R;unA2{Pq9$x6OkuAF#;hn2iAzuv5B?6j=2KX{|&t z!1$!N;`tBi`e`bJD6#R#M2T0-YC(G_AN6#fSqD?rqf9TEZIWRxp|?e{RQ!^H^3uZG zShSOD0l)Z`2;B^90|(j6PYEt@%Eauo;ktUw*)->zB$8G8qwct$-Va2SNT0MrJAouI zlRBVJ)rn$<7ZAndt2%(_Lom9dISS$klqZxVgSg!7&W%_0Q3X(vb#wJnB*9CV?wbV6 z?r);+iuDqumB;EM1Z);!QY(Mr$gf`K3>^SxKTRv`U0z;da}T#C?!*Q!2t6tcZZx5_ ziKR=In!9&zHEV3t;iTvbfYuTAkitIv>QQrRc{Mu7*`$M_DvRyazSIaF1&Bh&G*2Ol z-!gE!1L8CiQa8WYVxikp0I_-4_XJ5RNQN&#!z7FCr1J|bUu*){wG5%jI(MYI;UYR0 z+#w}Cg8xdJ-B6}HZ}Pomf%PZf$_`1PsGcM2z;XfM3)pI&9>YS6i%K!qmGVHfC2cuN zqmuBF2mLx7k0SK2%$zNcKkjL%050>$!H)~=1bt;3YpJObdX$~IGD<0?)W`hwD|K$K z+f;duiI8xkw1>gDt^s<)o${yw@8z6Ymqzar6bh_VC~WysX)6g@S}Lu*IKkFkPU~O7 zR2^DaSjeKiv>kl!cPXxyO$ln-w^xcUwmd&PC$@YS^mj8dW-pVPt{XgX(T;@!eit_v z9Qbx|-~teTyV~QN)Hn&9Jsh1gH!n1Q$Z>T>fb!I}DMZc2E!I&3t+ zCWd(lQ%YLx09W32>mZHfQ)?8`L8!j)2&^xmI-yEH-J07n)hXU`F&XLu_T8&jR{_Rf zHqQ>xqZMpYcXy_9=en1-U$LF>l`JY#Dk)zgAuWhFX4I4{YJWL>z2%n#DXlv|5;zU; zKG-`13_r_`ega+p1hSXWVZOxS_P$>{del6?-mrF>Bf$imuPn|rci6e^3f41Rq$pjf zthDIbQ)#T?Re$A44UQjlN~R$7h)SqO>~o;LXw;9}gr+IrmU^{0s7u`CbldLMH-F$R zUxuJ7?wk0a!nigeD6osyrrX$g!sthwqBBO;VwAvUFN_AApS15-XDne zEJ8!_H_PW%KUp8KC@S_Y8Ak;x8Q8scgwBLi2+c&(KWV)c{cXN)m4?V=Z0{TOnW~}P zxyK2@Nz_l4E=F>C_Rl2m%~7V4R*x-|mI;M&Ex5g{pf`Sc>u;ig@ItM08-bv5E*IYQ zoPPb8^+JYUk)<3r)83p{JO#WSp)sX82?KCLr`<}!N3ckbS7e)l`nCeZ6mJpqgA8$WpTu(^f> zh#7Qwt#G8l`6((Qhg5djt;hM~Uge{)(@oj7ly>IaTw}Yli#fL^ZQp|oDx$64Ra7C= z1O4*pljaK+@+q;MXDjq^UKAFQ_}ynA;f=**^sC{OmL>$m|YJ zOfwExvfkQ67Q8Ak_RU18_&!Khx_q2-CUcunDR{P$HXGtvq`xNh+ULo9GDdJOC#2FN zFHMA*Ux;ecdG|Uu0CwBRDKFO+o=k^iV=TfXu+Qag(5vtf;!b$?rTTi8QkcADK7}KGSn}DUE&Z#2IBl97rG}jS zhbCWOeSzs`rO(W6bn;7jYmii>B8i)qL+LLK>(vj|4eO_s8S?nTy(I_e`{`FkZPXu($e=kV}8c z$vEv@JZf+oZp?(6=FOZ_uj&sepns;sU$%`2jvcg^h}GXz{Q>+KP+TTnB1Y<7;tPrn zc!BC@%-9{A11O2(=XCT;$I&r80synr4M{3`kpN*A;-w7BX-P?qs0&$BwhdJ(Y&bst z#0iL<)C=gqj2XakX<;!Y!L;DuqB0kB3bc0s$#VeH>!>6wJpLTXF#sJDKZ9COZJhYmoJvc>O0zP!-qPFg3BI)HHkT8TVWx#JI8uKxC^X06s(@{0K zRQyYz;fdA8H9^-eVQ2r?rxIUa=LLm%mRBxln+7~(=QSoxL79uloIf=s=4Vzxk$j9! ztAy%Lgk!|W9rTIEn52)@CnknZB);yUXsFQi*3o`6zyl5H0Def9V$03Cf_ zc}zVP7K(Iu6OjDXSC7j=I~LyM%>wm`8>^QA#F&{I1DK<#qLqz+VbQpfdQf-x1D#s= zus!5OsX67#ci$XNIJe9m=Os3%o-l2KQ%%%LRGZ)>L4HCvzpWZXks{h&@~Y-Y?%}Ox zo&J$DN(a})x3qBT0MB_(WJYUYl>Cw#h<@m3xoQ z+&wH_T5PXD=e=BP>F`0&4XnXPYMwF1Uic`@_(|U`{XSnx2&;6r(Gp(9sOocZzTm)jmjf4o_{2caYs!%U zy(s6ndT+U&8dNKB3}NlKT7AxQ?x#Dex*4OLKsEW}0=wdv^dnv3ZeZjzb(@1${mu#BPq3@<@~C)FhCd~Lb%QfVwu zE0JyOfMW9y097!@JJS)?3xIK!(47h;Ipk^C`gUIA2U=$5BH-Zv@#Z&jo4? zu)ziqVC+-JSKAxSv$gf8V+dq4MYqiEaMJhikzja>e0YvXcE}>amHFA`9(IYB*?dGW z48loxtXoidU5Vhmk&3rT+UwS%-@kcru8wZS=slcHCgu&5Kvmn>*X?dMJ954 zT(vKaukkR>@WtU8o7T+EmBr2WL7x!|E7`XtcgyN!S!uGq_F8H4D`4ohEt3PWj98ud z^xs$izVncx`a;itX}=m(_G?Hwf1wvCzqcIt4gq4j_q%_r$^-rlwr`%`QRSVtehgmc zc=}VCqNO)+84TNqCIQ&;alDmol(y+fsfl1clXoI_OE@>bMLr{Ft-7_{Li#VHTM}@Y zwgbqe!c81za~z^mS|%go@`)Ykm=f=Eu(TA^_gg2Hv`;eB2d9Sz;K#`()&j(kZ5$^^ zDXuQ*>rH#s&loM_^P*&Hc=928^RG9pe;edoe-Y)r^$bm>D+Pqck_~)A$8?d&_Sq}! z`qC_#Sj&r$`m>~0`dKU=SQ3>2Id`RjN~nl`fI_Omt7G(W{dC`v-vnv9cnjOVUp@J4 zvpD^EbLYltbLaZi<~lpYDVY|u)*6Mzm|LTK(%H}ngE8_?pfF*-!By$9*Gg1v=Vd-A zyU1%}Ym54!_CS~$i)g>K!F1ql)EGW{@B{z|SjXS3jGwP>ut~-ftUX`@n0&?$0g)_} zo12_RrykI%8LVa_fVi{1mg=itFZkWT#MVO==Lwc0v4bzu@bb#a<>uM57XV_mj<)~7 zpAHKOFCjVp$=%yoM0dQyD@ItSTp6_!K@=YOIsoIsJN>M+jezr%!;00;f=Buok)rF1 zNw(~onVVo(<3nKI{{R3$07*naR3h>|`PfBh zVoTQfYkQ`O@KjK*bkU7Jmf3hEPo4yp=;^1WuP6EIe@I)}(sNtjuvDpUEyq$%JK2&K z3)&|AH)9&4ld)3&yv$fh^Ww0Fl;Hu7S2>5a*t145eE-9HQU7ViPzmCtKQ@Y!UwPRwi{eV( z@w$?iYiP93@1jQZ3Om?ZHQC*wU5(Tqk=t+=0^{A*%{)%IG}tNMH}E)7;6RGn#si+Y z8P?{fV-4}?OK5w)EBB`TycGHF%6(V#{}A%|4guo6Uiv+5nc+kCYbwlOd_L}+FIAzY zKW+c@Fj{@MUl(Usx;OPB?-3JGw^Rq?j5PA9_zJGvns#u27i4s!z9|837*Dt+9YoJj zpj{bsD}4r48jRG^lB;gAB)1;@gJ+3tf}|>^HeF8L2z{Qz{Q80iE@d0>-A?#23w9oS z^$PQ0EP%T_%f=DdR7JAr#t%jlRA{p__Hi?jTaX9-fMT~eW(r4#7obNQD5>ut{pgq}so-JUBB0H;*pO8YU43hvhcVrVK33A0J(NC@) zFT79bV$^UhbgH*&*Vx2dpf5pgA8t5E5Rf~~qPFQ~6Whlop1>-GQr^#55ce|x$MVAE z=8%Pd3-e3ZW1a;Vqx!Hi1pt{u|9Bhe`Em0C`^B2Aa_6@fPuJMpdW+3UO1rzA^!-~` zBc;|Rv4C55C>UmY>L1-?Gm=YMlmH}`AaQ2sPDK1*W6#+cKyjsi!{Zv#f_?q9KoBqu z2;AP^2^e-02eVf~JPVzw!)V@4ehC6?%a}{!-aK6>7lv3IWty*#A6X`imNTT%ozf1` z0iWV4(mbZ*1a9A$YkAo%584V}x<1?wapi#;67UUp_0#ON$H3Trl7G|)kUND$*q0ff zp?%aQR#ui+7q`)I8 z6M(Twp%QN9ms1nx+cSzBnM^XbW>WJ$UOoRh@y5Tz<+7xyVcq*eL6wBpc%HlUYSWZc zTXsn{@N1elA_jQ3wXf-F@X52%jhK0(z+-@;TPWYSy3nkD!Om`g+8H*I5X|;}PFGT_ z1zZ568G*21TD?{7r=*LsaRhE&c>bInZ)kts>!_rtatz9|4HnempDA+5!QWYv?GufNB3=uX)O@bK8L6 zSmU7HWAH$I<~39(UT&Z_Efk05dD7iPWkZ#bMHb>6v$#)+BZY-4pFup5arjjiGD9%1!wl;?*Dg zUal)SEs;_kuP(us+DBMBPuPA_sOcO4gxLp>_Bw8E-?`P?yLTH2G&$rMY~}YRH`BZz zIS!hmp0+cmwya8uVcWdGX=Os&<8!TEzv+g#WIaG{f2`7n`hEJZi%_87=i>T;1K&Ic zE&%a2&+~6liFN3E6Owvyo%~*p+lkamLIIAre+b9Kl0jNvz$DO^DBYwZibXc?2h5)yyzPo7QbB)j;$UGl=Ed$I-CW01+LtePnppteBmhWI9QBXzkb2Tt z%C2pkq2FE0>=&L~9EN$~W|B#Jmi%0BwT%w1Qdq&~%yd~fue!kj`R^iae2J9x!PE8T z@};NE?HgC3>w9Ho8A}YPE&wEzqzV!=C#D+30_qGYEiBJZ;Suynry#)3!HX=UJ77`U z9(7aFJT6Uud^gK@f%JGsU1KD1krHDK28KyI_gEGu-^C1?HJbcxVU;l4%o({-RQ772aHbpO@`2OMv$7` z)XquYBt^r@`tAT?m{}giVb!?sJ3j|T^wLuq4#q2k=c!01JcbRNgZZ3D@a}IV!M+p8 zpR;%C@T&txfHIN1_d1iR_ciXa!NJ-460h$*8Uz$|gN9E$?wWa2#AKY!`FuC^R*)#NPpEs}AJx*XN zaFhodfJ(uYx~&52H2~`7@4jk&_xa=I3ev~>tg63#{d&|K)CYC3*a8dDnn+jMkwYGa zLVmdLTFJ6%5-ze6I38@G5I`S@b|>IuadsBIzmYCfO{lxlVI}L+NUNuiNH34Gqu;A7B<5_l3=mT7C1PIcLiVUv`>c4-qUx>U zZ$0A(*J{T8)?TxRF8C_?tT(S;p{^{8^rhk*yKqB>c$A+~Lhj3DLR!9@4D2TDMg7BE zrCr;$4Z*gjebk?px25F==*xmq+S&eU+b^I3^wXbyz?TPJF6lc3T>BbZ#=eXZDo1zo zB*^RVD+Lx|2VR;a+(JUUzRn_0+uCTxtK-qS7k?Os0 zz(Mvb=cqYs0hUWZ&l5mv^!#fo*Pmr%FJd*Qv`w zSiEyd8h2zEIx@z@d8^3*2 z7@i?H{)`>wesqt|#kX#-@Nc@=*v5n!pOeGbP*r}4RIu4{kYB8$PE?`=2RN?QSk(}G+;$nSptp!xZ&pEgTKsKK034N*T@^@t-DN%^T+$?S`l zFPmr2UzJ5{L{$2%-tPGLen6>`S^@IhrMZCmE7&=9E9{t8qibqQLN$p1VyfT*MF;Lk zh`pD$w1tQHx|85OD;C`uPKmeu*~7N;B`WrYX{KZBLA#~(JTDFcVm`0v9+TBK;;b!o z%pVMDT^%btLv8gQJlnVb5KfX&uhAf1aZ-Ph%Cw>p%RyVH4&zIW(toA=H*VdG#8|0u zuZoVJHQ$nZo=rQCj+0m6n(fH-HVf)D(Sd*U@>K~7eMY`;7aaJ0a^T$p;yg|Kywd?C znYP^Tzy9_%NSS)Rz4Q%GmI2W=)3@bsPVvC6o@;Q4Q-F?XPCMqBGhEH@%t&qe6LC)5 zo>9n&pju{~AWLv>a-^0{>bL?pOZZ#;c8h>WuDiFn_ETeVc{B_R;x0W!AQ9eo{oBZ% zYQnCU`7>b=H$mi6CB@w+$*+W)WhJKdPP7zN1Cr6Hg1XraTnMfJby}L(g#wNK-vc~p zXZN#*&j69CDwMn;%?_1-eb}4N=m4lWq@>5hTSAif&p!HbbBhfpCX@z)v`o3O=(e-- z0cDTb;`qVS7tLo#j0M5IU>pIEw~;oYF^T>#l369U0PzhZs(<^BzimGG#G)i$8``OCFz@}ZQ~9rvbKV77}y{RnBY zVAutLd+>A>Abg7#1LU;L-o`F^$x8hIJOCZ^F@VK_sz;zeCxYe%e z5mAkZT15L|LL12k`>IlPFcuZaOXpJ{=)HOEYVzOJ`otD0D>95{L?C=P4KU^` zO`46Xiz2~hfb_9uXMKn5uVA2if1ojxGEp+c@d&cY4=hsNcV~RgIte)(HKc z)z5tD!G*jtWB+RO5`z-uA4mP-QnRqV3zQsN+_MreHoPx5roXW;Ivz%%(dI@2?grPia<4$wAmW3J~(H;~C0H)m6A z$^E_VjEo36FP&%8aCVvBbm)2O`1W+335d^PStYT83X87Sk-&_2aP%(kpQni}L;RqB z%`*+}X!VUcr=MQE-bvACG4-6C``la+rFbCQV#=do7(AJBnJ)3?BiykvJVA&=%L__k z>s?zg{`|%<*>#=7IOxWmrtC=Z60SV(CC_$hXOgbzh>$_NmtMKadED`_T^vA)!TLyF z=tC@_A~$TsGGS<9&Bb3cQvjYlb`Jw!WnmbT%&4ZSl=O!Wz96t$A7%Q7u~+!W73G$l z%iPHiJ~MHZ!`l1F-9yk|Jdy!8N;^=B9=X@Y$*shj@OiL4(SzpFzm}*n_YfD4;%7Qa z^1jPR(%ZK2==qv$b^7M8D6dIxJ@${$wS^|I5#V;G`Ps@s^8o<&1ro)l=)`XCBAo>M zm9On22HZ_?T%sA4Q8~DN_d2_2U2f)4M;JR|B8OmD9w)up9-aY!T|rv>5xZ{vvzHr< zmKa>nwzCHSM9M7bZX&Tqf<0JtG>d68lv|&W(Px-9n_v;!pDwR7*8qN3*@$Bvs}Zqj zOgZkbr|z=gZ<37UmwcT-Z+D6`Un(V@zZ8JGjFkEQ3{v1%00hgT(reUx9iaN}9(~+g z|BwZ6Qwyo%tKH+~DQP$Mcbk2>`d3?<&F`MSXtpS;<4YH|m-ktir$p9`O4L<82E6`! zH`x8I0|?fJ@&VFe znmH0~`?hAh(oNv3z*|)g54WqHf(MH1vzS!=6X%FNXgQsoPIHxAQWj>eH7_wW_X5r@<CClb?*!^x4kA4r7_SIx@Z-ZLt83eECvA z2d1yUZ}{Wkc%+Db+!q=8nY_}hpyRTX=c^N*F05Rm-yZXl_59iM=+#qw+Q50DE#COv z^xY3!_oC(MBxm}A&kBnP$NtFu*R3F<->Z8~efvr4`oHdiy4OmbMc2t}N@=ez5w4T{ z+3^O-Jv-`2GP?Klr0(^}3&)gEu7|l!#|SB}q5X6!y5VS25`A=;CVWleVFjMk&+Ujt zCROtE&K3^MyFq!7^>(siupQ=zd)9-9{HaYfy59}U_I>y% zLG!b+i>Q1cQ)T-nf2s0_{$BuNAN{A|kve>6S(nV(Yi3mc%45e>JK?ANXm58`nE*WIA7yz%>_36Fq`@r9Y6uVngf8{ zWQVt_*RBAF_p=a9DXT9W@^{hCxk!FzKu;cix>whg894;I!No9V-Z#*zUA=mxxy!~C zUp;x&JbL;P6K2S$X+ur49Vih4B&$v_jkyG^R_t!>HlMCPMlbm#i{r400VuwCWwlvh z;n+0sw7OtgCx&$+cTKtLHoeXupJR?;Jo<%M2hZB%^Lq<g+;%~ov z(9GSqi-Z_3{(OsVnRfub6V2w~e)AY$z6qVaWLZDKtbCiMT@L}BN6>xs@)CSQ{~4