From 408a3b2d540f5ad838dfdc262f8b4672e668f89d Mon Sep 17 00:00:00 2001 From: fpetrini15 Date: Wed, 31 Jul 2024 15:51:16 -0700 Subject: [PATCH] Purge PA from Client Repo --- CMakeLists.txt | 41 +- README.md | 37 +- src/c++/CMakeLists.txt | 39 +- src/c++/library/CMakeLists.txt | 24 +- src/c++/perf_analyzer/CMakeLists.txt | 262 - src/c++/perf_analyzer/README.md | 195 +- .../perf_analyzer/base_queue_ctx_id_tracker.h | 67 - .../client_backend/CMakeLists.txt | 113 - .../client_backend/client_backend.cc | 582 -- .../client_backend/client_backend.h | 675 -- .../client_backend/mock_client_backend.h | 660 -- .../client_backend/openai/CMakeLists.txt | 60 - .../client_backend/openai/http_client.cc | 301 - .../client_backend/openai/http_client.h | 172 - .../client_backend/openai/openai_client.cc | 319 - .../client_backend/openai/openai_client.h | 182 - .../openai/openai_client_backend.cc | 112 - .../openai/openai_client_backend.h | 111 - .../openai/openai_infer_input.cc | 103 - .../openai/openai_infer_input.h | 75 - .../tensorflow_serving/CMakeLists.txt | 122 - .../tensorflow_serving/CompileProto.cmake | 116 - .../tfserve_client_backend.cc | 200 - .../tfserve_client_backend.h | 150 - .../tensorflow_serving/tfserve_grpc_client.cc | 729 -- .../tensorflow_serving/tfserve_grpc_client.h | 220 - .../tensorflow_serving/tfserve_infer_input.cc | 112 - .../tensorflow_serving/tfserve_infer_input.h | 76 - .../client_backend/torchserve/CMakeLists.txt | 58 - .../torchserve/torchserve_client_backend.cc | 126 - .../torchserve/torchserve_client_backend.h | 112 - .../torchserve/torchserve_http_client.cc | 409 - .../torchserve/torchserve_http_client.h | 175 - .../torchserve/torchserve_infer_input.cc | 109 - .../torchserve/torchserve_infer_input.h | 76 - .../client_backend/triton/CMakeLists.txt | 62 - .../triton/test_triton_client_backend.cc | 142 - .../triton/triton_client_backend.cc | 855 -- .../triton/triton_client_backend.h | 344 - .../triton_c_api/CMakeLists.txt | 59 - .../triton_c_api/c_api_infer_results.h | 62 - .../triton_c_api/scoped_defer.cc | 53 - .../triton_c_api/scoped_defer.h | 44 - .../triton_c_api/shared_library.cc | 91 - .../triton_c_api/shared_library.h | 44 - .../triton_c_api/shared_memory_manager.cc | 208 - .../triton_c_api/shared_memory_manager.h | 141 - .../triton_c_api/triton_c_api_backend.cc | 401 - .../triton_c_api/triton_c_api_backend.h | 224 - .../triton_c_api/triton_loader.cc | 1274 --- .../triton_c_api/triton_loader.h | 519 -- src/c++/perf_analyzer/command_line_parser.cc | 2017 ----- src/c++/perf_analyzer/command_line_parser.h | 182 - .../concurrency_ctx_id_tracker.h | 48 - src/c++/perf_analyzer/concurrency_manager.cc | 193 - src/c++/perf_analyzer/concurrency_manager.h | 145 - src/c++/perf_analyzer/concurrency_worker.cc | 208 - src/c++/perf_analyzer/concurrency_worker.h | 122 - src/c++/perf_analyzer/constants.h | 51 - .../perf_analyzer/ctx_id_tracker_factory.h | 67 - src/c++/perf_analyzer/custom_load_manager.cc | 178 - src/c++/perf_analyzer/custom_load_manager.h | 137 - src/c++/perf_analyzer/data_loader.cc | 744 -- src/c++/perf_analyzer/data_loader.h | 246 - src/c++/perf_analyzer/docs/README.md | 55 - src/c++/perf_analyzer/docs/benchmarking.md | 250 - src/c++/perf_analyzer/docs/cli.md | 663 -- .../docs/inference_load_modes.md | 100 - src/c++/perf_analyzer/docs/input_data.md | 306 - src/c++/perf_analyzer/docs/install.md | 106 - .../docs/measurements_metrics.md | 225 - src/c++/perf_analyzer/docs/quick_start.md | 114 - src/c++/perf_analyzer/doctest.h | 7824 ----------------- src/c++/perf_analyzer/fifo_ctx_id_tracker.h | 48 - src/c++/perf_analyzer/genai-perf/.gitignore | 1 - src/c++/perf_analyzer/genai-perf/README.md | 558 -- ...ce_lengths_to_output_sequence_lengths.jpeg | Bin 45885 -> 0 bytes .../docs/assets/request_latency.jpeg | Bin 39579 -> 0 bytes .../docs/assets/time_to_first_token.jpeg | Bin 34944 -> 0 bytes ...first_token_vs_input_sequence_lengths.jpeg | Bin 48427 -> 0 bytes ...oken_latency_vs_output_token_position.jpeg | Bin 84372 -> 0 bytes .../perf_analyzer/genai-perf/docs/compare.md | 251 - .../genai-perf/docs/embeddings.md | 93 - .../perf_analyzer/genai-perf/docs/files.md | 129 - src/c++/perf_analyzer/genai-perf/docs/lora.md | 53 - .../genai-perf/docs/multi_modal.md | 122 - .../perf_analyzer/genai-perf/docs/rankings.md | 100 - .../perf_analyzer/genai-perf/docs/tutorial.md | 301 - .../genai-perf/genai_perf/.gitignore | 2 - .../genai-perf/genai_perf/__init__.py | 27 - .../genai-perf/genai_perf/constants.py | 38 - .../genai-perf/genai_perf/exceptions.py | 21 - .../export_data/console_exporter.py | 107 - .../genai_perf/export_data/csv_exporter.py | 119 - .../export_data/data_exporter_factory.py | 42 - .../export_data/data_exporter_interface.py | 33 - .../genai_perf/export_data/exporter_config.py | 77 - .../genai_perf/export_data/json_exporter.py | 77 - .../genai_perf/export_data/output_reporter.py | 61 - .../genai_perf/llm_inputs/__init__.py | 13 - .../genai_perf/llm_inputs/farewell.txt | 104 - .../genai_perf/llm_inputs/llm_inputs.py | 1585 ---- .../llm_inputs/source_images/dlss.png | Bin 150094 -> 0 bytes .../llm_inputs/source_images/h100.jpeg | Bin 152564 -> 0 bytes .../llm_inputs/source_images/h200.jpeg | Bin 101670 -> 0 bytes .../llm_inputs/source_images/jensen.jpeg | Bin 109460 -> 0 bytes .../llm_inputs/synthetic_image_generator.py | 82 - .../llm_inputs/synthetic_prompt_generator.py | 125 - .../genai-perf/genai_perf/logging.py | 99 - .../genai-perf/genai_perf/main.py | 170 - .../genai-perf/genai_perf/metrics/__init__.py | 29 - .../genai_perf/metrics/llm_metrics.py | 108 - .../genai-perf/genai_perf/metrics/metrics.py | 88 - .../genai_perf/metrics/statistics.py | 196 - .../genai-perf/genai_perf/parser.py | 834 -- .../genai-perf/genai_perf/plots/__init__.py | 26 - .../genai-perf/genai_perf/plots/base_plot.py | 82 - .../genai-perf/genai_perf/plots/box_plot.py | 77 - .../genai-perf/genai_perf/plots/heat_map.py | 100 - .../genai_perf/plots/plot_config.py | 57 - .../genai_perf/plots/plot_config_parser.py | 211 - .../genai_perf/plots/plot_manager.py | 87 - .../genai_perf/plots/scatter_plot.py | 82 - .../profile_data_parser/__init__.py | 31 - .../llm_profile_data_parser.py | 299 - .../profile_data_parser.py | 152 - .../genai-perf/genai_perf/tokenizer.py | 78 - .../genai-perf/genai_perf/utils.py | 110 - .../genai-perf/genai_perf/wrapper.py | 147 - .../perf_analyzer/genai-perf/pyproject.toml | 97 - .../genai-perf/tests/__init__.py | 0 .../genai-perf/tests/test_artifacts.py | 59 - .../genai-perf/tests/test_cli.py | 855 -- .../genai-perf/tests/test_console_exporter.py | 176 - .../genai-perf/tests/test_csv_exporter.py | 213 - .../tests/test_data_exporter_factory.py | 83 - .../genai-perf/tests/test_json_exporter.py | 274 - .../genai-perf/tests/test_library.py | 32 - .../genai-perf/tests/test_llm_inputs.py | 882 -- .../tests/test_llm_inputs_embeddings.py | 172 - .../tests/test_llm_inputs_rankings.py | 182 - .../genai-perf/tests/test_llm_metrics.py | 106 - .../tests/test_llm_profile_data_parser.py | 742 -- .../genai-perf/tests/test_metrics.py | 64 - .../genai-perf/tests/test_plot_configs.py | 112 - .../tests/test_profile_data_parser.py | 297 - .../tests/test_synthetic_image_generator.py | 123 - .../genai-perf/tests/test_tokenizer.py | 76 - .../genai-perf/tests/test_wrapper.py | 175 - src/c++/perf_analyzer/ictx_id_tracker.h | 51 - src/c++/perf_analyzer/idle_timer.h | 115 - src/c++/perf_analyzer/iinfer_data_manager.h | 63 - src/c++/perf_analyzer/infer_context.cc | 356 - src/c++/perf_analyzer/infer_context.h | 222 - src/c++/perf_analyzer/infer_data.h | 64 - src/c++/perf_analyzer/infer_data_manager.cc | 210 - src/c++/perf_analyzer/infer_data_manager.h | 96 - .../perf_analyzer/infer_data_manager_base.cc | 189 - .../perf_analyzer/infer_data_manager_base.h | 152 - .../infer_data_manager_factory.h | 88 - .../perf_analyzer/infer_data_manager_shm.cc | 384 - .../perf_analyzer/infer_data_manager_shm.h | 164 - src/c++/perf_analyzer/inference_profiler.cc | 1867 ---- src/c++/perf_analyzer/inference_profiler.h | 818 -- src/c++/perf_analyzer/ischeduler.h | 42 - src/c++/perf_analyzer/iworker.h | 38 - src/c++/perf_analyzer/load_manager.cc | 288 - src/c++/perf_analyzer/load_manager.h | 182 - src/c++/perf_analyzer/load_worker.cc | 132 - src/c++/perf_analyzer/load_worker.h | 159 - src/c++/perf_analyzer/main.cc | 48 - src/c++/perf_analyzer/metrics.h | 44 - src/c++/perf_analyzer/metrics_manager.cc | 174 - src/c++/perf_analyzer/metrics_manager.h | 94 - .../perf_analyzer/mock_concurrency_worker.h | 69 - src/c++/perf_analyzer/mock_data_loader.h | 98 - src/c++/perf_analyzer/mock_infer_context.h | 69 - .../perf_analyzer/mock_infer_data_manager.h | 150 - .../perf_analyzer/mock_inference_profiler.h | 123 - src/c++/perf_analyzer/mock_load_manager.h | 37 - src/c++/perf_analyzer/mock_model_parser.h | 78 - .../mock_profile_data_collector.h | 54 - .../mock_profile_data_exporter.h | 95 - .../perf_analyzer/mock_request_rate_worker.h | 79 - src/c++/perf_analyzer/mock_sequence_manager.h | 91 - src/c++/perf_analyzer/model_parser.cc | 467 - src/c++/perf_analyzer/model_parser.h | 254 - src/c++/perf_analyzer/mpi_utils.cc | 251 - src/c++/perf_analyzer/mpi_utils.h | 85 - src/c++/perf_analyzer/perf_analyzer.cc | 473 - src/c++/perf_analyzer/perf_analyzer.h | 202 - .../perf_analyzer/perf_analyzer_exception.h | 54 - .../perf_analyzer/perf_analyzer_unit_tests.cc | 39 - src/c++/perf_analyzer/perf_utils.cc | 416 - src/c++/perf_analyzer/perf_utils.h | 140 - .../periodic_concurrency_manager.cc | 122 - .../periodic_concurrency_manager.h | 92 - .../periodic_concurrency_worker.cc | 71 - .../periodic_concurrency_worker.h | 80 - .../perf_analyzer/profile_data_collector.cc | 85 - .../perf_analyzer/profile_data_collector.h | 122 - .../perf_analyzer/profile_data_exporter.cc | 302 - src/c++/perf_analyzer/profile_data_exporter.h | 102 - src/c++/perf_analyzer/rand_ctx_id_tracker.h | 58 - src/c++/perf_analyzer/rate_schedule.h | 66 - src/c++/perf_analyzer/report_writer.cc | 391 - src/c++/perf_analyzer/report_writer.h | 108 - src/c++/perf_analyzer/request_rate_manager.cc | 305 - src/c++/perf_analyzer/request_rate_manager.h | 172 - src/c++/perf_analyzer/request_rate_worker.cc | 168 - src/c++/perf_analyzer/request_rate_worker.h | 126 - src/c++/perf_analyzer/request_record.h | 101 - src/c++/perf_analyzer/sequence_manager.cc | 178 - src/c++/perf_analyzer/sequence_manager.h | 218 - src/c++/perf_analyzer/sequence_status.h | 51 - src/c++/perf_analyzer/tensor_data.h | 40 - .../perf_analyzer/test_command_line_parser.cc | 1904 ---- .../perf_analyzer/test_concurrency_manager.cc | 941 -- src/c++/perf_analyzer/test_ctx_id_tracker.cc | 146 - .../perf_analyzer/test_custom_load_manager.cc | 431 - src/c++/perf_analyzer/test_dataloader.cc | 1639 ---- src/c++/perf_analyzer/test_idle_timer.cc | 94 - src/c++/perf_analyzer/test_infer_context.cc | 178 - .../perf_analyzer/test_inference_profiler.cc | 1132 --- src/c++/perf_analyzer/test_load_manager.cc | 460 - .../perf_analyzer/test_load_manager_base.h | 305 - src/c++/perf_analyzer/test_metrics_manager.cc | 137 - src/c++/perf_analyzer/test_model_parser.cc | 365 - src/c++/perf_analyzer/test_perf_utils.cc | 375 - .../test_profile_data_collector.cc | 161 - .../test_profile_data_exporter.cc | 327 - src/c++/perf_analyzer/test_report_writer.cc | 93 - .../test_request_rate_manager.cc | 2242 ----- .../perf_analyzer/test_sequence_manager.cc | 298 - src/c++/perf_analyzer/test_utils.h | 112 - src/c++/perf_analyzer/thread_config.h | 58 - src/python/CMakeLists.txt | 1 - 237 files changed, 56 insertions(+), 61041 deletions(-) delete mode 100644 src/c++/perf_analyzer/CMakeLists.txt delete mode 100644 src/c++/perf_analyzer/base_queue_ctx_id_tracker.h delete mode 100644 src/c++/perf_analyzer/client_backend/CMakeLists.txt delete mode 100644 src/c++/perf_analyzer/client_backend/client_backend.cc delete mode 100644 src/c++/perf_analyzer/client_backend/client_backend.h delete mode 100644 src/c++/perf_analyzer/client_backend/mock_client_backend.h delete mode 100644 src/c++/perf_analyzer/client_backend/openai/CMakeLists.txt delete mode 100644 src/c++/perf_analyzer/client_backend/openai/http_client.cc delete mode 100644 src/c++/perf_analyzer/client_backend/openai/http_client.h delete mode 100644 src/c++/perf_analyzer/client_backend/openai/openai_client.cc delete mode 100644 src/c++/perf_analyzer/client_backend/openai/openai_client.h delete mode 100644 src/c++/perf_analyzer/client_backend/openai/openai_client_backend.cc delete mode 100644 src/c++/perf_analyzer/client_backend/openai/openai_client_backend.h delete mode 100644 src/c++/perf_analyzer/client_backend/openai/openai_infer_input.cc delete mode 100644 src/c++/perf_analyzer/client_backend/openai/openai_infer_input.h delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/CMakeLists.txt delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/CompileProto.cmake delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.cc delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.h delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.cc delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.h delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.cc delete mode 100644 src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.h delete mode 100644 src/c++/perf_analyzer/client_backend/torchserve/CMakeLists.txt delete mode 100644 src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.cc delete mode 100644 src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.h delete mode 100644 src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.cc delete mode 100644 src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.h delete mode 100644 src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.cc delete mode 100644 src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.h delete mode 100644 src/c++/perf_analyzer/client_backend/triton/CMakeLists.txt delete mode 100644 src/c++/perf_analyzer/client_backend/triton/test_triton_client_backend.cc delete mode 100644 src/c++/perf_analyzer/client_backend/triton/triton_client_backend.cc delete mode 100644 src/c++/perf_analyzer/client_backend/triton/triton_client_backend.h delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/CMakeLists.txt delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/c_api_infer_results.h delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.cc delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.h delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.cc delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.h delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.cc delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.h delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.cc delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.h delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.cc delete mode 100644 src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.h delete mode 100644 src/c++/perf_analyzer/command_line_parser.cc delete mode 100644 src/c++/perf_analyzer/command_line_parser.h delete mode 100644 src/c++/perf_analyzer/concurrency_ctx_id_tracker.h delete mode 100644 src/c++/perf_analyzer/concurrency_manager.cc delete mode 100644 src/c++/perf_analyzer/concurrency_manager.h delete mode 100644 src/c++/perf_analyzer/concurrency_worker.cc delete mode 100644 src/c++/perf_analyzer/concurrency_worker.h delete mode 100644 src/c++/perf_analyzer/constants.h delete mode 100644 src/c++/perf_analyzer/ctx_id_tracker_factory.h delete mode 100644 src/c++/perf_analyzer/custom_load_manager.cc delete mode 100644 src/c++/perf_analyzer/custom_load_manager.h delete mode 100644 src/c++/perf_analyzer/data_loader.cc delete mode 100644 src/c++/perf_analyzer/data_loader.h delete mode 100644 src/c++/perf_analyzer/docs/README.md delete mode 100644 src/c++/perf_analyzer/docs/benchmarking.md delete mode 100644 src/c++/perf_analyzer/docs/cli.md delete mode 100644 src/c++/perf_analyzer/docs/inference_load_modes.md delete mode 100644 src/c++/perf_analyzer/docs/input_data.md delete mode 100644 src/c++/perf_analyzer/docs/install.md delete mode 100644 src/c++/perf_analyzer/docs/measurements_metrics.md delete mode 100644 src/c++/perf_analyzer/docs/quick_start.md delete mode 100644 src/c++/perf_analyzer/doctest.h delete mode 100644 src/c++/perf_analyzer/fifo_ctx_id_tracker.h delete mode 100644 src/c++/perf_analyzer/genai-perf/.gitignore delete mode 100644 src/c++/perf_analyzer/genai-perf/README.md delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/assets/distribution_of_input_sequence_lengths_to_output_sequence_lengths.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/assets/request_latency.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/assets/time_to_first_token.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/assets/time_to_first_token_vs_input_sequence_lengths.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/assets/token-to-token_latency_vs_output_token_position.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/compare.md delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/embeddings.md delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/files.md delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/lora.md delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/multi_modal.md delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/rankings.md delete mode 100644 src/c++/perf_analyzer/genai-perf/docs/tutorial.md delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/.gitignore delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/__init__.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/constants.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/exceptions.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/export_data/console_exporter.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_factory.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_interface.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/export_data/exporter_config.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/export_data/output_reporter.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/__init__.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/farewell.txt delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/llm_inputs.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/dlss.png delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/h100.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/h200.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/jensen.jpeg delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_image_generator.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_prompt_generator.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/logging.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/main.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/metrics/__init__.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/metrics/llm_metrics.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/metrics/metrics.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/metrics/statistics.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/parser.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/__init__.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/base_plot.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/box_plot.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/heat_map.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config_parser.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_manager.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/plots/scatter_plot.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/__init__.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py delete mode 100755 src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/profile_data_parser.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/tokenizer.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/utils.py delete mode 100644 src/c++/perf_analyzer/genai-perf/genai_perf/wrapper.py delete mode 100644 src/c++/perf_analyzer/genai-perf/pyproject.toml delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/__init__.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_artifacts.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_cli.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_console_exporter.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_data_exporter_factory.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_library.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_embeddings.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_rankings.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_llm_metrics.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_llm_profile_data_parser.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_metrics.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_plot_configs.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_profile_data_parser.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_synthetic_image_generator.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_tokenizer.py delete mode 100644 src/c++/perf_analyzer/genai-perf/tests/test_wrapper.py delete mode 100644 src/c++/perf_analyzer/ictx_id_tracker.h delete mode 100644 src/c++/perf_analyzer/idle_timer.h delete mode 100644 src/c++/perf_analyzer/iinfer_data_manager.h delete mode 100644 src/c++/perf_analyzer/infer_context.cc delete mode 100644 src/c++/perf_analyzer/infer_context.h delete mode 100644 src/c++/perf_analyzer/infer_data.h delete mode 100644 src/c++/perf_analyzer/infer_data_manager.cc delete mode 100644 src/c++/perf_analyzer/infer_data_manager.h delete mode 100644 src/c++/perf_analyzer/infer_data_manager_base.cc delete mode 100644 src/c++/perf_analyzer/infer_data_manager_base.h delete mode 100644 src/c++/perf_analyzer/infer_data_manager_factory.h delete mode 100644 src/c++/perf_analyzer/infer_data_manager_shm.cc delete mode 100644 src/c++/perf_analyzer/infer_data_manager_shm.h delete mode 100644 src/c++/perf_analyzer/inference_profiler.cc delete mode 100644 src/c++/perf_analyzer/inference_profiler.h delete mode 100644 src/c++/perf_analyzer/ischeduler.h delete mode 100644 src/c++/perf_analyzer/iworker.h delete mode 100644 src/c++/perf_analyzer/load_manager.cc delete mode 100644 src/c++/perf_analyzer/load_manager.h delete mode 100644 src/c++/perf_analyzer/load_worker.cc delete mode 100644 src/c++/perf_analyzer/load_worker.h delete mode 100644 src/c++/perf_analyzer/main.cc delete mode 100644 src/c++/perf_analyzer/metrics.h delete mode 100644 src/c++/perf_analyzer/metrics_manager.cc delete mode 100644 src/c++/perf_analyzer/metrics_manager.h delete mode 100644 src/c++/perf_analyzer/mock_concurrency_worker.h delete mode 100644 src/c++/perf_analyzer/mock_data_loader.h delete mode 100644 src/c++/perf_analyzer/mock_infer_context.h delete mode 100644 src/c++/perf_analyzer/mock_infer_data_manager.h delete mode 100644 src/c++/perf_analyzer/mock_inference_profiler.h delete mode 100644 src/c++/perf_analyzer/mock_load_manager.h delete mode 100644 src/c++/perf_analyzer/mock_model_parser.h delete mode 100644 src/c++/perf_analyzer/mock_profile_data_collector.h delete mode 100644 src/c++/perf_analyzer/mock_profile_data_exporter.h delete mode 100644 src/c++/perf_analyzer/mock_request_rate_worker.h delete mode 100644 src/c++/perf_analyzer/mock_sequence_manager.h delete mode 100644 src/c++/perf_analyzer/model_parser.cc delete mode 100644 src/c++/perf_analyzer/model_parser.h delete mode 100644 src/c++/perf_analyzer/mpi_utils.cc delete mode 100644 src/c++/perf_analyzer/mpi_utils.h delete mode 100644 src/c++/perf_analyzer/perf_analyzer.cc delete mode 100644 src/c++/perf_analyzer/perf_analyzer.h delete mode 100644 src/c++/perf_analyzer/perf_analyzer_exception.h delete mode 100644 src/c++/perf_analyzer/perf_analyzer_unit_tests.cc delete mode 100644 src/c++/perf_analyzer/perf_utils.cc delete mode 100644 src/c++/perf_analyzer/perf_utils.h delete mode 100644 src/c++/perf_analyzer/periodic_concurrency_manager.cc delete mode 100644 src/c++/perf_analyzer/periodic_concurrency_manager.h delete mode 100644 src/c++/perf_analyzer/periodic_concurrency_worker.cc delete mode 100644 src/c++/perf_analyzer/periodic_concurrency_worker.h delete mode 100644 src/c++/perf_analyzer/profile_data_collector.cc delete mode 100644 src/c++/perf_analyzer/profile_data_collector.h delete mode 100644 src/c++/perf_analyzer/profile_data_exporter.cc delete mode 100644 src/c++/perf_analyzer/profile_data_exporter.h delete mode 100644 src/c++/perf_analyzer/rand_ctx_id_tracker.h delete mode 100644 src/c++/perf_analyzer/rate_schedule.h delete mode 100644 src/c++/perf_analyzer/report_writer.cc delete mode 100644 src/c++/perf_analyzer/report_writer.h delete mode 100644 src/c++/perf_analyzer/request_rate_manager.cc delete mode 100644 src/c++/perf_analyzer/request_rate_manager.h delete mode 100644 src/c++/perf_analyzer/request_rate_worker.cc delete mode 100644 src/c++/perf_analyzer/request_rate_worker.h delete mode 100644 src/c++/perf_analyzer/request_record.h delete mode 100644 src/c++/perf_analyzer/sequence_manager.cc delete mode 100644 src/c++/perf_analyzer/sequence_manager.h delete mode 100644 src/c++/perf_analyzer/sequence_status.h delete mode 100644 src/c++/perf_analyzer/tensor_data.h delete mode 100644 src/c++/perf_analyzer/test_command_line_parser.cc delete mode 100644 src/c++/perf_analyzer/test_concurrency_manager.cc delete mode 100644 src/c++/perf_analyzer/test_ctx_id_tracker.cc delete mode 100644 src/c++/perf_analyzer/test_custom_load_manager.cc delete mode 100644 src/c++/perf_analyzer/test_dataloader.cc delete mode 100644 src/c++/perf_analyzer/test_idle_timer.cc delete mode 100644 src/c++/perf_analyzer/test_infer_context.cc delete mode 100644 src/c++/perf_analyzer/test_inference_profiler.cc delete mode 100644 src/c++/perf_analyzer/test_load_manager.cc delete mode 100644 src/c++/perf_analyzer/test_load_manager_base.h delete mode 100644 src/c++/perf_analyzer/test_metrics_manager.cc delete mode 100644 src/c++/perf_analyzer/test_model_parser.cc delete mode 100644 src/c++/perf_analyzer/test_perf_utils.cc delete mode 100644 src/c++/perf_analyzer/test_profile_data_collector.cc delete mode 100644 src/c++/perf_analyzer/test_profile_data_exporter.cc delete mode 100644 src/c++/perf_analyzer/test_report_writer.cc delete mode 100644 src/c++/perf_analyzer/test_request_rate_manager.cc delete mode 100644 src/c++/perf_analyzer/test_sequence_manager.cc delete mode 100644 src/c++/perf_analyzer/test_utils.h delete mode 100644 src/c++/perf_analyzer/thread_config.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 752dbe79b..be401781b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,17 +38,11 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) # Options # set(TRITON_VERSION "0.0.0" CACHE STRING "Version for the clients") -set(PERF_ANALYZER_VERSION ${TRITON_VERSION} CACHE STRING "Build Version for Perf Analyzer") option(TRITON_ENABLE_CC_HTTP "Build C++ HTTP client libraries" OFF) option(TRITON_ENABLE_CC_GRPC "Build C++ GRPC client libraries" OFF) option(TRITON_ENABLE_PYTHON_HTTP "Enable Python HTTP client libraries" OFF) option(TRITON_ENABLE_PYTHON_GRPC "Enable Python GRPC client libraries" OFF) option(TRITON_ENABLE_JAVA_HTTP "Enable JAVA HTTP client libraries" OFF) -option(TRITON_ENABLE_PERF_ANALYZER "Enable Performance Analyzer" OFF) -option(TRITON_ENABLE_PERF_ANALYZER_C_API "Enable Performance Analyzer C API" OFF) -option(TRITON_ENABLE_PERF_ANALYZER_TFS "Enable TensorFlow Serving support for Performance Analyzer" OFF) -option(TRITON_ENABLE_PERF_ANALYZER_TS "Enable TorchServe support for Performance Analyzer" OFF) -option(TRITON_ENABLE_PERF_ANALYZER_OPENAI "Enable OpenAI support for Performance Analyzer" OFF) option(TRITON_ENABLE_EXAMPLES "Include examples in build" OFF) option(TRITON_ENABLE_TESTS "Include tests in build" OFF) option(TRITON_ENABLE_GPU "Enable GPU support in libraries" OFF) @@ -131,28 +125,15 @@ else() set(_FINDPACKAGE_PROTOBUF_CONFIG_DIR "${TRITON_THIRD_PARTY_INSTALL_PREFIX}/protobuf/${LIB_DIR}/cmake/protobuf") endif() -if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER OR TRITON_ENABLE_PERF_ANALYZER_C_API) +if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC) set(_cc_client_depends re2) if(${TRITON_ENABLE_CC_HTTP}) set(_cc_client_depends ${_cc_client_depends} curl) endif() # TRITON_ENABLE_CC_HTTP - if(${TRITON_ENABLE_CC_GRPC} OR ${TRITON_ENABLE_PERF_ANALYZER}) + if(${TRITON_ENABLE_CC_GRPC}) set(_cc_client_depends ${_cc_client_depends} grpc protobuf) - endif() # TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER - - if(NOT ${TRITON_ENABLE_PERF_ANALYZER} AND ${TRITON_ENABLE_PERF_ANALYZER_C_API}) - message(FATAL_ERROR "TRITON_ENABLE_PERF_ANALYZER_C_API=ON requires TRITON_ENABLE_PERF_ANALYZER=ON") - endif() # NOT TRITON_ENABLE_PERF_ANALYZER AND TRITON_ENABLE_PERF_ANALYZER_C_API - if(NOT ${TRITON_ENABLE_PERF_ANALYZER} AND ${TRITON_ENABLE_PERF_ANALYZER_TFS}) - message(FATAL_ERROR "TRITON_ENABLE_PERF_ANALYZER_TFS=ON requires TRITON_ENABLE_PERF_ANALYZER=ON") - endif() # NOT TRITON_ENABLE_PERF_ANALYZER AND TRITON_ENABLE_PERF_ANALYZER_TFS - if(NOT ${TRITON_ENABLE_PERF_ANALYZER} AND ${TRITON_ENABLE_PERF_ANALYZER_TS}) - message(FATAL_ERROR "TRITON_ENABLE_PERF_ANALYZER_TS=ON requires TRITON_ENABLE_PERF_ANALYZER=ON") - endif() # NOT TRITON_ENABLE_PERF_ANALYZER AND TRITON_ENABLE_PERF_ANALYZER_TS - if(NOT ${TRITON_ENABLE_PERF_ANALYZER} AND ${TRITON_ENABLE_PERF_ANALYZER_OPENAI}) - message(FATAL_ERROR "TRITON_ENABLE_PERF_ANALYZER_OPENAI=ON requires TRITON_ENABLE_PERF_ANALYZER=ON") - endif() # NOT TRITON_ENABLE_PERF_ANALYZER AND TRITON_ENABLE_PERF_ANALYZER_OPENAI + endif() # TRITON_ENABLE_CC_GRPC ExternalProject_Add(cc-clients PREFIX cc-clients @@ -172,14 +153,8 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER -DTRITON_REPO_ORGANIZATION:STRING=${TRITON_REPO_ORGANIZATION} -DTRITON_COMMON_REPO_TAG:STRING=${TRITON_COMMON_REPO_TAG} -DTRITON_CORE_REPO_TAG:STRING=${TRITON_CORE_REPO_TAG} - -DPERF_ANALYZER_VERSION:STRING=${PERF_ANALYZER_VERSION} -DTRITON_ENABLE_CC_HTTP:BOOL=${TRITON_ENABLE_CC_HTTP} -DTRITON_ENABLE_CC_GRPC:BOOL=${TRITON_ENABLE_CC_GRPC} - -DTRITON_ENABLE_PERF_ANALYZER:BOOL=${TRITON_ENABLE_PERF_ANALYZER} - -DTRITON_ENABLE_PERF_ANALYZER_C_API:BOOL=${TRITON_ENABLE_PERF_ANALYZER_C_API} - -DTRITON_ENABLE_PERF_ANALYZER_TFS:BOOL=${TRITON_ENABLE_PERF_ANALYZER_TFS} - -DTRITON_ENABLE_PERF_ANALYZER_TS:BOOL=${TRITON_ENABLE_PERF_ANALYZER_TS} - -DTRITON_ENABLE_PERF_ANALYZER_OPENAI:BOOL=${TRITON_ENABLE_PERF_ANALYZER_OPENAI} -DTRITON_ENABLE_EXAMPLES:BOOL=${TRITON_ENABLE_EXAMPLES} -DTRITON_ENABLE_TESTS:BOOL=${TRITON_ENABLE_TESTS} -DTRITON_ENABLE_GPU:BOOL=${TRITON_ENABLE_GPU} @@ -189,16 +164,13 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER -DCMAKE_INSTALL_PREFIX:PATH=${TRITON_INSTALL_PREFIX} DEPENDS ${_cc_client_depends} ) -endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC if(TRITON_ENABLE_PYTHON_HTTP OR TRITON_ENABLE_PYTHON_GRPC) set(_py_client_depends re2) if(${TRITON_ENABLE_PYTHON_GRPC}) set(_py_client_depends ${_py_client_depends} grpc protobuf) endif() # TRITON_ENABLE_PYTHON_GRPC - if(${TRITON_ENABLE_PERF_ANALYZER}) - set(_py_client_depends ${_py_client_depends} cc-clients) - endif() # TRITON_ENABLE_PERF_ANALYZER ExternalProject_Add(python-clients PREFIX python-clients @@ -219,11 +191,6 @@ if(TRITON_ENABLE_PYTHON_HTTP OR TRITON_ENABLE_PYTHON_GRPC) -DTRITON_VERSION:STRING=${TRITON_VERSION} -DTRITON_ENABLE_PYTHON_HTTP:BOOL=${TRITON_ENABLE_PYTHON_HTTP} -DTRITON_ENABLE_PYTHON_GRPC:BOOL=${TRITON_ENABLE_PYTHON_GRPC} - -DTRITON_ENABLE_PERF_ANALYZER:BOOL=${TRITON_ENABLE_PERF_ANALYZER} - -DTRITON_ENABLE_PERF_ANALYZER_C_API:BOOL=${TRITON_ENABLE_PERF_ANALYZER_C_API} - -DTRITON_ENABLE_PERF_ANALYZER_TFS:BOOL=${TRITON_ENABLE_PERF_ANALYZER_TFS} - -DTRITON_ENABLE_PERF_ANALYZER_TS:BOOL=${TRITON_ENABLE_PERF_ANALYZER_TS} - -DTRITON_ENABLE_PERF_ANALYZER_OPENAI:BOOL=${TRITON_ENABLE_PERF_ANALYZER_OPENAI} -DTRITON_ENABLE_EXAMPLES:BOOL=${TRITON_ENABLE_EXAMPLES} -DTRITON_ENABLE_TESTS:BOOL=${TRITON_ENABLE_TESTS} -DTRITON_PACKAGE_PERF_ANALYZER:BOOL=${TRITON_PACKAGE_PERF_ANALYZER} diff --git a/README.md b/README.md index ea710bbb7..bfe36fb77 100644 --- a/README.md +++ b/README.md @@ -146,24 +146,9 @@ The components of the install packages are: * grpc [ `service_pb2`, `service_pb2_grpc`, `model_config_pb2` ] * utils [ linux distribution will include `shared_memory` and `cuda_shared_memory`] -The Linux version of the package also includes the -[perf_analyzer](src/c++/perf_analyzer/README.md) -binary. The perf_analyzer binary is built on Ubuntu 20.04 and may not -run on other Linux distributions. To run the perf_analyzer the -following dependency must be installed: - -```bash -$ sudo apt update -$ sudo apt install libb64-dev -``` - -To reiterate, the installation on windows will not include perf_analyzer -nor shared_memory/cuda_shared_memory components. - ### Download From GitHub -The client libraries and the perf_analyzer executable can be -downloaded from the [Triton GitHub release +The client libraries can be downloaded from the [Triton GitHub release page](https://github.com/triton-inference-server/server/releases) corresponding to the release you are interested in. The client libraries are found in the "Assets" section of the release page in a @@ -186,15 +171,6 @@ include/, the Python wheel files in python/, and the jar files in java/. The bin/ and python/ directories contain the built examples that you can learn more about below. -The perf_analyzer binary is built on Ubuntu 20.04 and may not run on -other Linux distributions. To use the C++ libraries or perf_analyzer -executable you must install some dependencies. - -```bash -$ apt-get update -$ apt-get install curl libcurl4-openssl-dev libb64-dev -``` - ### Download Docker Image From NGC A Docker image containing the client libraries and examples is @@ -254,17 +230,6 @@ because Triton on Windows does not yet support all the build options. Use *cmake* to configure the build. You should adjust the flags depending on the components of Triton Client you are working and would like to build. -For example, if you want to build Perf Analyzer with Triton C API, you can use \ -`-DTRITON_ENABLE_PERF_ANALYZER=ON -DTRITON_ENABLE_PERF_ANALYZER_C_API=ON`. You can -also use `TRITON_ENABLE_PERF_ANALYZER_TFS` and `TRITON_ENABLE_PERF_ANALYZER_TS` flags -to enable/disable support for TensorFlow Serving and TorchServe backend respectively in perf analyzer. \ -The following command demonstrate how to build client with all the features: - -``` -$ mkdir build -$ cd build -$ cmake -DCMAKE_INSTALL_PREFIX=`pwd`/install -DTRITON_ENABLE_CC_HTTP=ON -DTRITON_ENABLE_CC_GRPC=ON -DTRITON_ENABLE_PERF_ANALYZER=ON -DTRITON_ENABLE_PERF_ANALYZER_C_API=ON -DTRITON_ENABLE_PERF_ANALYZER_TFS=ON -DTRITON_ENABLE_PERF_ANALYZER_TS=ON -DTRITON_ENABLE_PYTHON_HTTP=ON -DTRITON_ENABLE_PYTHON_GRPC=ON -DTRITON_ENABLE_JAVA_HTTP=ON -DTRITON_ENABLE_GPU=ON -DTRITON_ENABLE_EXAMPLES=ON -DTRITON_ENABLE_TESTS=ON .. -``` If you are building on a release branch (or on a development branch that is based off of a release branch), then you must also use diff --git a/src/c++/CMakeLists.txt b/src/c++/CMakeLists.txt index a54253172..71c433850 100644 --- a/src/c++/CMakeLists.txt +++ b/src/c++/CMakeLists.txt @@ -39,7 +39,6 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) # option(TRITON_ENABLE_CC_HTTP "Build C++ HTTP client libraries" OFF) option(TRITON_ENABLE_CC_GRPC "Build C++ GRPC client libraries" OFF) -option(TRITON_ENABLE_PERF_ANALYZER "Enable Performance Analyzer" OFF) option(TRITON_ENABLE_EXAMPLES "Include examples in build" OFF) option(TRITON_ENABLE_TESTS "Include tests in build" OFF) option(TRITON_ENABLE_GPU "Enable GPU support in libraries" OFF) @@ -71,26 +70,16 @@ FetchContent_Declare( URL https://github.com/google/googletest/archive/9406a60c7839052e4944ea4dbc8344762a89f9bd.zip ) -if(TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_GRPC) set(TRITON_COMMON_ENABLE_PROTOBUF ON) set(TRITON_COMMON_ENABLE_GRPC ON) +endif() # TRITON_ENABLE_CC_GRPC - if(TRITON_ENABLE_PERF_ANALYZER) - FetchContent_Declare( - repo-core - GIT_REPOSITORY ${TRITON_REPO_ORGANIZATION}/core.git - GIT_TAG ${TRITON_CORE_REPO_TAG} - GIT_SHALLOW ON - ) - FetchContent_MakeAvailable(repo-core) - endif() # TRITON_ENABLE_PERF_ANALYZER -endif() # TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER - -if(NOT TRITON_ENABLE_PERF_ANALYZER AND NOT TRITON_ENABLE_CC_HTTP AND NOT TRITON_ENABLE_EXAMPLES) +if(NOT TRITON_ENABLE_CC_HTTP AND NOT TRITON_ENABLE_EXAMPLES) set(TRITON_COMMON_ENABLE_JSON OFF) endif() -if(TRITON_ENABLE_TESTS OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_TESTS) FetchContent_MakeAvailable(googletest) endif() FetchContent_MakeAvailable(repo-common) @@ -111,33 +100,33 @@ endif() # TRITON_ENABLE_GPU # # libcurl # -if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_HTTP) find_package(CURL REQUIRED) message(STATUS "Using curl ${CURL_VERSION}") -endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_HTTP # # Protobuf # -if(TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_GRPC) set(protobuf_MODULE_COMPATIBLE TRUE CACHE BOOL "protobuf_MODULE_COMPATIBLE" FORCE) find_package(Protobuf CONFIG REQUIRED) message(STATUS "Using protobuf ${Protobuf_VERSION}") include_directories(${Protobuf_INCLUDE_DIRS}) -endif() # TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_GRPC # # GRPC # -if(TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_GRPC) find_package(gRPC CONFIG REQUIRED) message(STATUS "Using gRPC ${gRPC_VERSION}") include_directories($) -endif() # TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_GRPC -if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC) add_subdirectory(library) -endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC) if(TRITON_ENABLE_EXAMPLES) @@ -148,7 +137,3 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC) add_subdirectory(tests) endif() # TRITON_ENABLE_TESTS endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC - -if(TRITON_ENABLE_PERF_ANALYZER) - add_subdirectory(perf_analyzer) -endif() # TRITON_ENABLE_PERF_ANALYZER diff --git a/src/c++/library/CMakeLists.txt b/src/c++/library/CMakeLists.txt index 7a62971e5..a0ed94374 100644 --- a/src/c++/library/CMakeLists.txt +++ b/src/c++/library/CMakeLists.txt @@ -45,7 +45,7 @@ target_include_directories( # # json_utils # -if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER OR TRITON_ENABLE_EXAMPLES) +if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_EXAMPLES) find_package(RapidJSON CONFIG REQUIRED) add_library( json-utils-library EXCLUDE_FROM_ALL OBJECT @@ -111,7 +111,7 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER OR TRITON_ENABLE_EXAMPLE RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ) -endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER OR TRITON_ENABLE_EXAMPLES +endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_EXAMPLES # # shm_utils @@ -176,7 +176,7 @@ install( RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ) -if(TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_GRPC) # # libgrpcclient.so and libgrpcclient_static.a # @@ -350,9 +350,9 @@ if(TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) ${CMAKE_CURRENT_SOURCE_DIR}/grpc_client.h DESTINATION include ) -endif() # TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_GRPC -if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_HTTP) if(${TRITON_ENABLE_ZLIB}) find_package(ZLIB REQUIRED) endif() # TRITON_ENABLE_ZLIB @@ -494,9 +494,9 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER) ${CMAKE_CURRENT_SOURCE_DIR}/http_client.h DESTINATION include ) -endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_HTTP -if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) +if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC) install( FILES ${CMAKE_CURRENT_SOURCE_DIR}/common.h @@ -508,7 +508,7 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER include(GNUInstallDirs) set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/TritonClient) - if(TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER) + if(TRITON_ENABLE_CC_GRPC) install( TARGETS grpcclient @@ -519,9 +519,9 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ) - endif() # TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER + endif() # TRITON_ENABLE_CC_GRPC - if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER) + if(TRITON_ENABLE_CC_HTTP) install( TARGETS httpclient @@ -532,7 +532,7 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ) - endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_PERF_ANALYZER + endif() # TRITON_ENABLE_CC_HTTP install( EXPORT @@ -573,4 +573,4 @@ if(TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER export(PACKAGE TritonClient) -endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC OR TRITON_ENABLE_PERF_ANALYZER +endif() # TRITON_ENABLE_CC_HTTP OR TRITON_ENABLE_CC_GRPC diff --git a/src/c++/perf_analyzer/CMakeLists.txt b/src/c++/perf_analyzer/CMakeLists.txt deleted file mode 100644 index b81795e38..000000000 --- a/src/c++/perf_analyzer/CMakeLists.txt +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -cmake_minimum_required (VERSION 3.18) - -if(WIN32) - message("perf_analyzer is not currently supported on Windows because " - "is requires functionalities that are UNIX specific.") -else() - -add_subdirectory(client_backend) - -find_package(Git REQUIRED) - -execute_process(WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - COMMAND "${GIT_EXECUTABLE}" log -n 1 --abbrev-commit --format=format:%h - RESULT_VARIABLE RETURN_CODE - OUTPUT_VARIABLE GIT_SHA) -if(NOT RETURN_CODE EQUAL "0") - set(GIT_SHA "unknown") -endif() - -set( - PERF_ANALYZER_SRCS - command_line_parser.cc - perf_analyzer.cc - model_parser.cc - perf_utils.cc - load_manager.cc - data_loader.cc - concurrency_manager.cc - request_rate_manager.cc - load_worker.cc - concurrency_worker.cc - request_rate_worker.cc - custom_load_manager.cc - infer_context.cc - inference_profiler.cc - report_writer.cc - mpi_utils.cc - metrics_manager.cc - infer_data_manager_base.cc - infer_data_manager.cc - infer_data_manager_shm.cc - sequence_manager.cc - profile_data_collector.cc - profile_data_exporter.cc - periodic_concurrency_manager.cc - periodic_concurrency_worker.cc -) - -set( - PERF_ANALYZER_HDRS - command_line_parser.h - perf_analyzer.h - model_parser.h - perf_utils.h - load_manager.h - data_loader.h - concurrency_manager.h - request_rate_manager.h - custom_load_manager.h - iworker.h - load_worker.h - request_rate_worker.h - concurrency_worker.h - infer_context.h - inference_profiler.h - report_writer.h - mpi_utils.h - doctest.h - constants.h - metrics.h - metrics_manager.h - infer_data_manager_factory.h - iinfer_data_manager.h - infer_data_manager.h - infer_data_manager_shm.h - infer_data_manager_base.h - infer_data.h - sequence_manager.h - sequence_status.h - ictx_id_tracker.h - concurrency_ctx_id_tracker.h - fifo_ctx_id_tracker.h - rand_ctx_id_tracker.h - request_record.h - profile_data_collector.h - profile_data_exporter.h - periodic_concurrency_manager.h - periodic_concurrency_worker.h - thread_config.h -) - -add_executable( - perf_analyzer - main.cc - ${PERF_ANALYZER_SRCS} - ${PERF_ANALYZER_HDRS} - $ -) -target_link_libraries( - perf_analyzer - PRIVATE - client-backend-library - -lb64 - ${CMAKE_DL_LIBS} -) - -target_compile_definitions( - perf_analyzer - PRIVATE - PERF_ANALYZER_VERSION=${PERF_ANALYZER_VERSION} - GIT_SHA=${GIT_SHA} -) - -# If gpu is enabled then compile with CUDA dependencies -if(TRITON_ENABLE_GPU) - target_compile_definitions( - perf_analyzer - PUBLIC TRITON_ENABLE_GPU=1 - ) - - target_link_libraries( - perf_analyzer - PRIVATE CUDA::cudart - ) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_C_API) - target_compile_definitions( - client-backend-library - PUBLIC TRITON_ENABLE_PERF_ANALYZER_C_API=1 - ) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_TFS) - target_compile_definitions( - client-backend-library - PUBLIC TRITON_ENABLE_PERF_ANALYZER_TFS=1 - ) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_TS) - target_compile_definitions( - client-backend-library - PUBLIC TRITON_ENABLE_PERF_ANALYZER_TS=1 - ) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_OPENAI) - target_compile_definitions( - client-backend-library - PUBLIC TRITON_ENABLE_PERF_ANALYZER_OPENAI=1 - ) -endif() - -install( - TARGETS perf_analyzer - RUNTIME DESTINATION bin -) - -target_compile_definitions(perf_analyzer PUBLIC DOCTEST_CONFIG_DISABLE) - -# Creating perf_client link to perf_analyzer binary for backwards compatibility. -install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ./perf_analyzer perf_client - WORKING_DIRECTORY ${CMAKE_INSTALL_PREFIX}/bin/)") -install(CODE "message(\"-- Created symlink: perf_client -> ./perf_analyzer\")") - - - -set(PERF_ANALYZER_UNIT_TESTS_SRCS ${PERF_ANALYZER_SRCS}) -list(PREPEND PERF_ANALYZER_UNIT_TESTS_SRCS perf_analyzer_unit_tests.cc) -set(PERF_ANALYZER_UNIT_TESTS_HDRS ${PERF_ANALYZER_HDRS}) - -add_executable( - perf_analyzer_unit_tests - ${PERF_ANALYZER_UNIT_TESTS_SRCS} - ${PERF_ANALYZER_UNIT_TESTS_HDRS} - mock_inference_profiler.h - mock_model_parser.h - test_utils.h - client_backend/mock_client_backend.h - mock_concurrency_worker.h - mock_data_loader.h - mock_infer_context.h - mock_infer_data_manager.h - mock_request_rate_worker.h - mock_sequence_manager.h - mock_profile_data_collector.h - mock_profile_data_exporter.h - test_dataloader.cc - test_inference_profiler.cc - test_command_line_parser.cc - test_idle_timer.cc - test_load_manager_base.h - test_load_manager.cc - test_model_parser.cc - test_metrics_manager.cc - test_perf_utils.cc - test_report_writer.cc - client_backend/triton/test_triton_client_backend.cc - test_request_rate_manager.cc - test_concurrency_manager.cc - test_custom_load_manager.cc - test_sequence_manager.cc - test_infer_context.cc - test_ctx_id_tracker.cc - test_profile_data_collector.cc - test_profile_data_exporter.cc - $ -) - -# -Wno-write-strings is needed for the unit tests in order to statically create -# input argv cases in the CommandLineParser unit test -# -set_target_properties(perf_analyzer_unit_tests - PROPERTIES COMPILE_FLAGS "-Wno-write-strings") - -target_link_libraries( - perf_analyzer_unit_tests - PRIVATE - gmock - client-backend-library - -lb64 -) - -target_include_directories( - perf_analyzer_unit_tests - PRIVATE - client_backend -) - -install( - TARGETS perf_analyzer_unit_tests - RUNTIME DESTINATION bin -) - -endif() diff --git a/src/c++/perf_analyzer/README.md b/src/c++/perf_analyzer/README.md index e910f4663..1686f99f5 100644 --- a/src/c++/perf_analyzer/README.md +++ b/src/c++/perf_analyzer/README.md @@ -1,171 +1,30 @@ -# Triton Performance Analyzer - -Triton Performance Analyzer is CLI tool which can help you optimize the -inference performance of models running on Triton Inference Server by measuring -changes in performance as you experiment with different optimization strategies. - -
- -# Features - -### Inference Load Modes - -- [Concurrency Mode](docs/inference_load_modes.md#concurrency-mode) simlulates - load by maintaining a specific concurrency of outgoing requests to the - server - -- [Request Rate Mode](docs/inference_load_modes.md#request-rate-mode) simulates - load by sending consecutive requests at a specific rate to the server - -- [Custom Interval Mode](docs/inference_load_modes.md#custom-interval-mode) - simulates load by sending consecutive requests at specific intervals to the - server - -### Performance Measurement Modes - -- [Time Windows Mode](docs/measurements_metrics.md#time-windows) measures model - performance repeatedly over a specific time interval until performance has - stabilized - -- [Count Windows Mode](docs/measurements_metrics.md#count-windows) measures - model performance repeatedly over a specific number of requests until - performance has stabilized - -### Other Features - -- [Sequence Models](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/architecture.md#stateful-models), - [Ensemble Models](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/architecture.md#ensemble-models), - and - [Decoupled Models](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/decoupled_models.md) - can be profiled in addition to standard/stateless/coupled models - -- [Input Data](docs/input_data.md) to model inferences can be auto-generated or - specified as well as verifying output - -- [TensorFlow Serving](docs/benchmarking.md#benchmarking-tensorflow-serving) and - [TorchServe](docs/benchmarking.md#benchmarking-torchserve) can be used as the - inference server in addition to the default Triton server - -
- -# Quick Start - -The steps below will guide you on how to start using Perf Analyzer. - -### Step 1: Start Triton Container - -```bash -export RELEASE= # e.g. to use the release from the end of February of 2023, do `export RELEASE=23.02` - -docker pull nvcr.io/nvidia/tritonserver:${RELEASE}-py3 - -docker run --gpus all --rm -it --net host nvcr.io/nvidia/tritonserver:${RELEASE}-py3 -``` - -### Step 2: Download `simple` Model - -```bash -# inside triton container -git clone --depth 1 https://github.com/triton-inference-server/server - -mkdir model_repository ; cp -r server/docs/examples/model_repository/simple model_repository -``` - -### Step 3: Start Triton Server - -```bash -# inside triton container -tritonserver --model-repository $(pwd)/model_repository &> server.log & - -# confirm server is ready, look for 'HTTP/1.1 200 OK' -curl -v localhost:8000/v2/health/ready - -# detach (CTRL-p CTRL-q) -``` - -### Step 4: Start Triton SDK Container - -```bash -docker pull nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -docker run --gpus all --rm -it --net host nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk -``` - -### Step 5: Run Perf Analyzer - -```bash -# inside sdk container -perf_analyzer -m simple -``` - -See the full [quick start guide](docs/quick_start.md) for additional tips on -how to analyze output. - -
- -# Documentation - -- [Installation](docs/install.md) -- [Perf Analyzer CLI](docs/cli.md) -- [Inference Load Modes](docs/inference_load_modes.md) -- [Input Data](docs/input_data.md) -- [Measurements & Metrics](docs/measurements_metrics.md) -- [Benchmarking](docs/benchmarking.md) - -
- -# Contributing - -Contributions to Triton Perf Analyzer are more than welcome. To contribute -please review the [contribution -guidelines](https://github.com/triton-inference-server/server/blob/main/CONTRIBUTING.md), -then fork and create a pull request. - -
- -# Reporting problems, asking questions - -We appreciate any feedback, questions or bug reporting regarding this -project. When help with code is needed, follow the process outlined in -the Stack Overflow (https://stackoverflow.com/help/mcve) -document. Ensure posted examples are: - -- minimal - use as little code as possible that still produces the - same problem - -- complete - provide all parts needed to reproduce the problem. Check - if you can strip external dependency and still show the problem. The - less time we spend on reproducing problems the more time we have to - fix it - -- verifiable - test the code you're about to provide to make sure it - reproduces the problem. Remove all other problems that are not - related to your request/question. +Perf Analyzer documentation has been relocated to +[here](https://github.com/triton-inference-server/perf_analyzer/blob/main/README.md). diff --git a/src/c++/perf_analyzer/base_queue_ctx_id_tracker.h b/src/c++/perf_analyzer/base_queue_ctx_id_tracker.h deleted file mode 100644 index ba0f17813..000000000 --- a/src/c++/perf_analyzer/base_queue_ctx_id_tracker.h +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "ictx_id_tracker.h" - -namespace triton { namespace perfanalyzer { - -// Base class for CtxIdTrackers that track available IDs via a queue -// -class BaseQueueCtxIdTracker : public ICtxIdTracker { - public: - BaseQueueCtxIdTracker() = default; - - void Restore(size_t id) override { free_ctx_ids_.push(id); } - - size_t Get() override - { - if (!IsAvailable()) { - throw std::runtime_error("free ctx id list is empty"); - } - - size_t ctx_id = free_ctx_ids_.front(); - free_ctx_ids_.pop(); - return ctx_id; - } - - bool IsAvailable() override { return free_ctx_ids_.size() > 0; } - - protected: - std::queue free_ctx_ids_; - - // Erase all entries in the tracking queue - // - void Clear() - { - std::queue empty; - std::swap(free_ctx_ids_, empty); - } -}; - -}}; // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/client_backend/CMakeLists.txt b/src/c++/perf_analyzer/client_backend/CMakeLists.txt deleted file mode 100644 index 2c780ee22..000000000 --- a/src/c++/perf_analyzer/client_backend/CMakeLists.txt +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -cmake_minimum_required (VERSION 3.18) - -# fixme -add_definitions(-DCURL_STATICLIB) - -add_subdirectory(triton) - -if(TRITON_ENABLE_PERF_ANALYZER_C_API) - add_subdirectory(triton_c_api) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_TFS) - add_subdirectory(tensorflow_serving) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_TS) - add_subdirectory(torchserve) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_OPENAI) - add_subdirectory(openai) -endif() - -set( - CLIENT_BACKEND_SRCS - client_backend.cc -) - -set( - CLIENT_BACKEND_HDRS - client_backend.h -) - -if(TRITON_ENABLE_PERF_ANALYZER_C_API) - set(CAPI_LIBRARY $) - set(CAPI_TARGET_LINK_LIBRARY PUBLIC $) - set(CAPI_TARGET_INCLUDE_DIRECTORY PRIVATE $) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_TFS) - set(TFS_LIBRARY $) - set(TFS_TARGET_LINK_LIBRARY PUBLIC $) - set(TFS_TARGET_INCLUDE_DIRECTORY PRIVATE $) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_TS) - set(TS_LIBRARY $) - set(TS_TARGET_LINK_LIBRARY PUBLIC $) - set(TS_TARGET_INCLUDE_DIRECTORY PRIVATE $) -endif() - -if(TRITON_ENABLE_PERF_ANALYZER_OPENAI) - set(OPENAI_LIBRARY $) - set(OPENAI_TARGET_LINK_LIBRARY PUBLIC $) - set(OPENAI_TARGET_INCLUDE_DIRECTORY PRIVATE $) -endif() - -add_library( - client-backend-library - ${CLIENT_BACKEND_SRCS} - ${CLIENT_BACKEND_HDRS} - $ - $ - ${CAPI_LIBRARY} - ${TFS_LIBRARY} - ${TS_LIBRARY} - ${OPENAI_LIBRARY} -) - -target_link_libraries( - client-backend-library - PUBLIC triton-common-json # from repo-common - PUBLIC $ - ${CAPI_TARGET_LINK_LIBRARY} - ${TFS_TARGET_LINK_LIBRARY} - ${TS_TARGET_LINK_LIBRARY} - ${OPENAI_TARGET_LINK_LIBRARY} -) - -target_include_directories( - client-backend-library - PRIVATE $ - ${CAPI_TARGET_INCLUDE_DIRECTORY} - ${TFS_TARGET_INCLUDE_DIRECTORY} - ${TS_TARGET_INCLUDE_DIRECTORY} - ${OPENAI_TARGET_INCLUDE_DIRECTORY} -) diff --git a/src/c++/perf_analyzer/client_backend/client_backend.cc b/src/c++/perf_analyzer/client_backend/client_backend.cc deleted file mode 100644 index 09af5e5e5..000000000 --- a/src/c++/perf_analyzer/client_backend/client_backend.cc +++ /dev/null @@ -1,582 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "client_backend.h" - -#include "triton/triton_client_backend.h" - -#ifdef TRITON_ENABLE_PERF_ANALYZER_C_API -#include "triton_c_api/triton_c_api_backend.h" -#endif // TRITON_ENABLE_PERF_ANALYZER_C_API - -#ifdef TRITON_ENABLE_PERF_ANALYZER_OPENAI -#include "openai/openai_client_backend.h" -#endif // TRITON_ENABLE_PERF_ANALYZER_OPENAI - -#ifdef TRITON_ENABLE_PERF_ANALYZER_TFS -#include "tensorflow_serving/tfserve_client_backend.h" -#endif // TRITON_ENABLE_PERF_ANALYZER_TFS - -#ifdef TRITON_ENABLE_PERF_ANALYZER_TS -#include "torchserve/torchserve_client_backend.h" -#endif // TRITON_ENABLE_PERF_ANALYZER_TS - -namespace triton { namespace perfanalyzer { namespace clientbackend { - -//================================================ - -const Error Error::Success("", pa::SUCCESS); -const Error Error::Failure("", pa::GENERIC_ERROR); - -Error::Error() : msg_(""), error_(pa::SUCCESS) {} - -Error::Error(const std::string& msg, const uint32_t err) - : msg_(msg), error_(err) -{ -} - -Error::Error(const std::string& msg) : msg_(msg) -{ - error_ = pa::GENERIC_ERROR; -} - -std::ostream& -operator<<(std::ostream& out, const Error& err) -{ - if (!err.msg_.empty()) { - out << err.msg_ << std::endl; - } - return out; -} - -//================================================ - -std::string -BackendKindToString(const BackendKind kind) -{ - switch (kind) { - case TRITON: - return std::string("TRITON"); - break; - case TENSORFLOW_SERVING: - return std::string("TENSORFLOW_SERVING"); - break; - case TORCHSERVE: - return std::string("TORCHSERVE"); - break; - case TRITON_C_API: - return std::string("TRITON_C_API"); - break; - case OPENAI: - return std::string("OPENAI"); - break; - default: - return std::string("UNKNOWN"); - break; - } -} - -grpc_compression_algorithm -BackendToGrpcType(const GrpcCompressionAlgorithm compression_algorithm) -{ - switch (compression_algorithm) { - case COMPRESS_DEFLATE: - return grpc_compression_algorithm::GRPC_COMPRESS_DEFLATE; - case COMPRESS_GZIP: - return grpc_compression_algorithm::GRPC_COMPRESS_GZIP; - default: - return grpc_compression_algorithm::GRPC_COMPRESS_NONE; - } -} - -//================================================ - -// -// ClientBackendFactory -// -Error -ClientBackendFactory::Create( - const BackendKind kind, const std::string& url, const std::string& endpoint, - const ProtocolType protocol, const SslOptionsBase& ssl_options, - const std::map> trace_options, - const GrpcCompressionAlgorithm compression_algorithm, - std::shared_ptr http_headers, - const std::string& triton_server_path, - const std::string& model_repository_path, const bool verbose, - const std::string& metrics_url, const cb::TensorFormat input_tensor_format, - const cb::TensorFormat output_tensor_format, - std::shared_ptr* factory) -{ - factory->reset(new ClientBackendFactory( - kind, url, endpoint, protocol, ssl_options, trace_options, - compression_algorithm, http_headers, triton_server_path, - model_repository_path, verbose, metrics_url, input_tensor_format, - output_tensor_format)); - return Error::Success; -} - -Error -ClientBackendFactory::CreateClientBackend( - std::unique_ptr* client_backend) -{ - RETURN_IF_CB_ERROR(ClientBackend::Create( - kind_, url_, endpoint_, protocol_, ssl_options_, trace_options_, - compression_algorithm_, http_headers_, verbose_, triton_server_path, - model_repository_path_, metrics_url_, input_tensor_format_, - output_tensor_format_, client_backend)); - return Error::Success; -} - -const BackendKind& -ClientBackendFactory::Kind() -{ - return kind_; -} - -// -// ClientBackend -// -Error -ClientBackend::Create( - const BackendKind kind, const std::string& url, const std::string& endpoint, - const ProtocolType protocol, const SslOptionsBase& ssl_options, - const std::map> trace_options, - const GrpcCompressionAlgorithm compression_algorithm, - std::shared_ptr http_headers, const bool verbose, - const std::string& triton_server_path, - const std::string& model_repository_path, const std::string& metrics_url, - const TensorFormat input_tensor_format, - const TensorFormat output_tensor_format, - std::unique_ptr* client_backend) -{ - std::unique_ptr local_backend; - if (kind == TRITON) { - RETURN_IF_CB_ERROR(tritonremote::TritonClientBackend::Create( - url, protocol, ssl_options, trace_options, - BackendToGrpcType(compression_algorithm), http_headers, verbose, - metrics_url, input_tensor_format, output_tensor_format, - &local_backend)); - } -#ifdef TRITON_ENABLE_PERF_ANALYZER_OPENAI - else if (kind == OPENAI) { - RETURN_IF_CB_ERROR(openai::OpenAiClientBackend::Create( - url, endpoint, protocol, http_headers, verbose, &local_backend)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_OPENAI -#ifdef TRITON_ENABLE_PERF_ANALYZER_TFS - else if (kind == TENSORFLOW_SERVING) { - RETURN_IF_CB_ERROR(tfserving::TFServeClientBackend::Create( - url, protocol, BackendToGrpcType(compression_algorithm), http_headers, - verbose, &local_backend)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_TFS -#ifdef TRITON_ENABLE_PERF_ANALYZER_TS - else if (kind == TORCHSERVE) { - RETURN_IF_CB_ERROR(torchserve::TorchServeClientBackend::Create( - url, protocol, http_headers, verbose, &local_backend)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_TS -#ifdef TRITON_ENABLE_PERF_ANALYZER_C_API - else if (kind == TRITON_C_API) { - RETURN_IF_CB_ERROR(tritoncapi::TritonCApiClientBackend::Create( - triton_server_path, model_repository_path, verbose, &local_backend)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_C_API - else { - return Error("unsupported client backend requested", pa::GENERIC_ERROR); - } - - *client_backend = std::move(local_backend); - - return Error::Success; -} - -Error -ClientBackend::ServerExtensions(std::set* server_extensions) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support ServerExtensions API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support ModelMetadata API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support ModelConfig API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support Infer API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support AsyncInfer API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::StartStream(OnCompleteFn callback, bool enable_stats) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support StartStream API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::AsyncStreamInfer( - const InferOptions& options, const std::vector& inputs, - const std::vector& outputs) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support AsyncStreamInfer API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::ClientInferStat(InferStat* infer_stat) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support ClientInferStat API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::ModelInferenceStatistics( - std::map* model_stats, - const std::string& model_name, const std::string& model_version) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support ModelInferenceStatistics API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::Metrics(triton::perfanalyzer::Metrics& metrics) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support Metrics API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::UnregisterAllSharedMemory() -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support UnregisterAllSharedMemory API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::RegisterSystemSharedMemory( - const std::string& name, const std::string& key, const size_t byte_size) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support RegisterSystemSharedMemory API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::RegisterCudaSharedMemory( - const std::string& name, const cudaIpcMemHandle_t& handle, - const size_t byte_size) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support RegisterCudaSharedMemory API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::RegisterCudaMemory( - const std::string& name, void* handle, const size_t byte_size) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support RegisterCudaMemory API", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::RegisterSystemMemory( - const std::string& name, void* memory_ptr, const size_t byte_size) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support RegisterCudaMemory API", - pa::GENERIC_ERROR); -} - -// -// Shared Memory Utilities -// -Error -ClientBackend::CreateSharedMemoryRegion( - std::string shm_key, size_t byte_size, int* shm_fd) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support CreateSharedMemoryRegion()", - pa::GENERIC_ERROR); -} - - -Error -ClientBackend::MapSharedMemory( - int shm_fd, size_t offset, size_t byte_size, void** shm_addr) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support MapSharedMemory()", - pa::GENERIC_ERROR); -} - - -Error -ClientBackend::CloseSharedMemory(int shm_fd) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support CloseSharedMemory()", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::UnlinkSharedMemoryRegion(std::string shm_key) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support UnlinkSharedMemoryRegion()", - pa::GENERIC_ERROR); -} - -Error -ClientBackend::UnmapSharedMemory(void* shm_addr, size_t byte_size) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support UnmapSharedMemory()", - pa::GENERIC_ERROR); -} - - -ClientBackend::ClientBackend(const BackendKind kind) : kind_(kind) {} - -// -// InferInput -// -Error -InferInput::Create( - InferInput** infer_input, const BackendKind kind, const std::string& name, - const std::vector& dims, const std::string& datatype) -{ - if (kind == TRITON) { - RETURN_IF_CB_ERROR(tritonremote::TritonInferInput::Create( - infer_input, name, dims, datatype)); - } -#ifdef TRITON_ENABLE_PERF_ANALYZER_OPENAI - else if (kind == OPENAI) { - RETURN_IF_CB_ERROR( - openai::OpenAiInferInput::Create(infer_input, name, dims, datatype)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_OPENAI -#ifdef TRITON_ENABLE_PERF_ANALYZER_TFS - else if (kind == TENSORFLOW_SERVING) { - RETURN_IF_CB_ERROR(tfserving::TFServeInferInput::Create( - infer_input, name, dims, datatype)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_TFS -#ifdef TRITON_ENABLE_PERF_ANALYZER_TS - else if (kind == TORCHSERVE) { - RETURN_IF_CB_ERROR(torchserve::TorchServeInferInput::Create( - infer_input, name, dims, datatype)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_TS -#ifdef TRITON_ENABLE_PERF_ANALYZER_C_API - else if (kind == TRITON_C_API) { - RETURN_IF_CB_ERROR(tritoncapi::TritonCApiInferInput::Create( - infer_input, name, dims, datatype)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_C_API - else { - return Error( - "unsupported client backend provided to create InferInput object", - pa::GENERIC_ERROR); - } - - return Error::Success; -} - -Error -InferInput::SetShape(const std::vector& shape) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support SetShape() for InferInput", - pa::GENERIC_ERROR); -} - -Error -InferInput::Reset() -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support Reset() for InferInput", - pa::GENERIC_ERROR); -} - -Error -InferInput::AppendRaw(const uint8_t* input, size_t input_byte_size) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support AppendRaw() for InferInput", - pa::GENERIC_ERROR); -} - -Error -InferInput::SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support SetSharedMemory() for InferInput", - pa::GENERIC_ERROR); -} - -Error -InferInput::RawData(const uint8_t** buf, size_t* byte_size) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support RawData() for InferInput", - pa::GENERIC_ERROR); -} - -InferInput::InferInput( - const BackendKind kind, const std::string& name, - const std::string& datatype) - : kind_(kind), name_(name), datatype_(datatype) -{ -} - -// -// InferRequestedOutput -// -Error -InferRequestedOutput::Create( - InferRequestedOutput** infer_output, const BackendKind kind, - const std::string& name, const std::string& datatype, - const size_t class_count) -{ - if (kind == TRITON) { - RETURN_IF_CB_ERROR(tritonremote::TritonInferRequestedOutput::Create( - infer_output, name, class_count, datatype)); - } -#ifdef TRITON_ENABLE_PERF_ANALYZER_OPENAI - else if (kind == OPENAI) { - RETURN_IF_CB_ERROR(openai::OpenAiInferRequestedOutput::Create( - infer_output, name, datatype)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_OPENAI -#ifdef TRITON_ENABLE_PERF_ANALYZER_TFS - else if (kind == TENSORFLOW_SERVING) { - RETURN_IF_CB_ERROR( - tfserving::TFServeInferRequestedOutput::Create(infer_output, name)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_TFS -#ifdef TRITON_ENABLE_PERF_ANALYZER_C_API - else if (kind == TRITON_C_API) { - RETURN_IF_CB_ERROR(tritoncapi::TritonCApiInferRequestedOutput::Create( - infer_output, name, class_count, datatype)); - } -#endif // TRITON_ENABLE_PERF_ANALYZER_C_API - else { - return Error( - "unsupported client backend provided to create InferRequestedOutput " - "object", - pa::GENERIC_ERROR); - } - - return Error::Success; -} - -Error -InferRequestedOutput::SetSharedMemory( - const std::string& region_name, size_t byte_size, size_t offset) -{ - return Error( - "client backend of kind " + BackendKindToString(kind_) + - " does not support SetSharedMemory() for InferRequestedOutput", - pa::GENERIC_ERROR); -} - -InferRequestedOutput::InferRequestedOutput( - const BackendKind kind, const std::string& name, - const std::string& datatype) - : kind_(kind), name_(name), datatype_(datatype) -{ -} - -}}} // namespace triton::perfanalyzer::clientbackend diff --git a/src/c++/perf_analyzer/client_backend/client_backend.h b/src/c++/perf_analyzer/client_backend/client_backend.h deleted file mode 100644 index 06f68c2e3..000000000 --- a/src/c++/perf_analyzer/client_backend/client_backend.h +++ /dev/null @@ -1,675 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "../constants.h" -#include "../metrics.h" -#include "../perf_analyzer_exception.h" -#include "ipc.h" - -namespace pa = triton::perfanalyzer; - -namespace triton { namespace perfanalyzer { namespace clientbackend { - -#define RETURN_IF_CB_ERROR(S) \ - do { \ - const triton::perfanalyzer::clientbackend::Error& status__ = (S); \ - if (!status__.IsOk()) { \ - return status__; \ - } \ - } while (false) - -#define RETURN_IF_ERROR(S) \ - do { \ - triton::perfanalyzer::clientbackend::Error status__ = (S); \ - if (!status__.IsOk()) { \ - return status__; \ - } \ - } while (false) - -#define FAIL_IF_ERR(X, MSG) \ - { \ - triton::perfanalyzer::clientbackend::Error err = (X); \ - if (!err.IsOk()) { \ - std::cerr << "error: " << (MSG) << ": " << err << std::endl; \ - exit(err.Err()); \ - } \ - } \ - while (false) - -#define THROW_IF_ERROR(S, MSG) \ - do { \ - triton::perfanalyzer::clientbackend::Error status__ = (S); \ - if (!status__.IsOk()) { \ - std::cerr << "error: " << (MSG) << ": " << status__ << std::endl; \ - throw PerfAnalyzerException(GENERIC_ERROR); \ - } \ - } while (false) - -//============================================================================== -/// Error status reported by backends -/// -class Error { - public: - /// Create an error - explicit Error(); - - /// Create an error with the specified message and error code. - /// \param msg The message for the error - /// \param err The error code for the error - explicit Error(const std::string& msg, const uint32_t err); - - /// Create an error with the specified message. - /// \param msg The message for the error - explicit Error(const std::string& msg); - - /// Accessor for the message of this error. - /// \return The message for the error. Empty if no error. - const std::string& Message() const { return msg_; } - - /// Accessor for the error code. - /// \return The error code for the error. 0 if no error. - const uint32_t Err() const { return error_; } - - /// Does this error indicate OK status? - /// \return True if this error indicates "ok"/"success", false if - /// error indicates a failure. - bool IsOk() const { return error_ == 0; } - - /// Convenience "success" value. Can be used as Error::Success to - /// indicate no error. - static const Error Success; - - /// Convenience "failure" value. Can be used as Error::Failure to - /// indicate a generic error. - static const Error Failure; - - private: - friend std::ostream& operator<<(std::ostream&, const Error&); - std::string msg_{""}; - uint32_t error_{pa::SUCCESS}; -}; - -//=================================================================================== - -class ClientBackend; -class InferInput; -class InferRequestedOutput; -class InferResult; - -enum BackendKind { - TRITON = 0, - TENSORFLOW_SERVING = 1, - TORCHSERVE = 2, - TRITON_C_API = 3, - OPENAI = 4 -}; -std::string BackendKindToString(const BackendKind kind); - -enum ProtocolType { HTTP = 0, GRPC = 1, UNKNOWN = 2 }; -enum GrpcCompressionAlgorithm { - COMPRESS_NONE = 0, - COMPRESS_DEFLATE = 1, - COMPRESS_GZIP = 2 -}; -enum class TensorFormat { BINARY, JSON, UNKNOWN }; -typedef std::map Headers; - -using OnCompleteFn = std::function; -using ModelIdentifier = std::pair; - -struct InferStat { - /// Total number of requests completed. - size_t completed_request_count; - - /// Time from the request start until the response is completely - /// received. - uint64_t cumulative_total_request_time_ns; - - /// Time from the request start until the last byte is sent. - uint64_t cumulative_send_time_ns; - - /// Time from receiving first byte of the response until the - /// response is completely received. - uint64_t cumulative_receive_time_ns; - - /// Create a new InferStat object with zero-ed statistics. - InferStat() - : completed_request_count(0), cumulative_total_request_time_ns(0), - cumulative_send_time_ns(0), cumulative_receive_time_ns(0) - { - } -}; - -// Per model statistics -struct ModelStatistics { - uint64_t success_count_; - uint64_t inference_count_; - uint64_t execution_count_; - uint64_t queue_count_; - uint64_t compute_input_count_; - uint64_t compute_infer_count_; - uint64_t compute_output_count_; - uint64_t cache_hit_count_; - uint64_t cache_miss_count_; - uint64_t cumm_time_ns_; - uint64_t queue_time_ns_; - uint64_t compute_input_time_ns_; - uint64_t compute_infer_time_ns_; - uint64_t compute_output_time_ns_; - uint64_t cache_hit_time_ns_; - uint64_t cache_miss_time_ns_; -}; - -/// -/// Structure to hold Request parameter data for Inference Request. -/// -struct RequestParameter { - std::string name; - std::string value; - std::string type; -}; - -//============================================================================== -/// Structure to hold options for Inference Request. -/// -struct InferOptions { - explicit InferOptions(const std::string& model_name) - : model_name_(model_name), model_version_(""), request_id_(""), - sequence_id_(0), sequence_id_str_(""), sequence_start_(false), - sequence_end_(false), triton_enable_empty_final_response_(true) - { - } - /// The name of the model to run inference. - std::string model_name_; - /// The version of the model. - std::string model_version_; - /// The model signature name for TF models. - std::string model_signature_name_; - /// An identifier for the request. - std::string request_id_; - /// The unique identifier for the sequence being represented by the - /// object. Default value is 0 which means that the request does not - /// belong to a sequence. If this value is set, then sequence_id_str_ - /// MUST be set to "". - uint64_t sequence_id_; - /// The unique identifier for the sequence being represented by the - /// object. Default value is "" which means that the request does not - /// belong to a sequence. If this value is set, then sequence_id_ MUST - /// be set to 0. - std::string sequence_id_str_; - /// Indicates whether the request being added marks the start of the - /// sequence. Default value is False. This argument is ignored if - /// 'sequence_id' is 0. - bool sequence_start_; - /// Indicates whether the request being added marks the end of the - /// sequence. Default value is False. This argument is ignored if - /// 'sequence_id' is 0. - bool sequence_end_; - /// Whether to tell Triton to enable an empty final response. - bool triton_enable_empty_final_response_; - - /// Additional parameters to pass to the model - std::unordered_map request_parameters_; -}; - -struct SslOptionsBase { - bool ssl_grpc_use_ssl = false; - std::string ssl_grpc_root_certifications_file = ""; - std::string ssl_grpc_private_key_file = ""; - std::string ssl_grpc_certificate_chain_file = ""; - long ssl_https_verify_peer = 1L; - long ssl_https_verify_host = 2L; - std::string ssl_https_ca_certificates_file = ""; - std::string ssl_https_client_certificate_file = ""; - std::string ssl_https_client_certificate_type = ""; - std::string ssl_https_private_key_file = ""; - std::string ssl_https_private_key_type = ""; -}; - -// -// The object factory to create client backends to communicate with the -// inference service -// -class ClientBackendFactory { - public: - /// Create a factory that can be used to construct Client Backends. - /// \param kind The kind of client backend to create. - /// \param url The inference server url and port. - /// \param endpoint The endpoint on the inference server to send requests to - /// \param protocol The protocol type used. - /// \param ssl_options The SSL options used with client backend. - /// \param compression_algorithm The compression algorithm to be used - /// on the grpc requests. - /// \param http_headers Map of HTTP headers. The map key/value - /// indicates the header name/value. The headers will be included - /// with all the requests made to server using this client. - /// \param triton_server_path Only for C api backend. Lbrary path to - /// path to the top-level Triton directory (which is typically - /// /opt/tritonserver) Must contain libtritonserver.so. - /// \param model_repository_path Only for C api backend. Path to model - /// repository which contains the desired model. - /// \param verbose Enables the verbose mode. - /// \param metrics_url The inference server metrics url and port. - /// \param input_tensor_format The Triton inference request input tensor - /// format. - /// \param output_tensor_format The Triton inference response output tensor - /// format. - /// \param factory Returns a new ClientBackend object. - /// \return Error object indicating success or failure. - static Error Create( - const BackendKind kind, const std::string& url, - const std::string& endpoint, const ProtocolType protocol, - const SslOptionsBase& ssl_options, - const std::map> trace_options, - const GrpcCompressionAlgorithm compression_algorithm, - std::shared_ptr http_headers, - const std::string& triton_server_path, - const std::string& model_repository_path, const bool verbose, - const std::string& metrics_url, const TensorFormat input_tensor_format, - const TensorFormat output_tensor_format, - std::shared_ptr* factory); - - const BackendKind& Kind(); - - /// Create a ClientBackend. - /// \param backend Returns a new Client backend object. - virtual Error CreateClientBackend(std::unique_ptr* backend); - - private: - ClientBackendFactory( - const BackendKind kind, const std::string& url, - const std::string& endpoint, const ProtocolType protocol, - const SslOptionsBase& ssl_options, - const std::map> trace_options, - const GrpcCompressionAlgorithm compression_algorithm, - const std::shared_ptr http_headers, - const std::string& triton_server_path, - const std::string& model_repository_path, const bool verbose, - const std::string& metrics_url, const TensorFormat input_tensor_format, - const TensorFormat output_tensor_format) - : kind_(kind), url_(url), endpoint_(endpoint), protocol_(protocol), - ssl_options_(ssl_options), trace_options_(trace_options), - compression_algorithm_(compression_algorithm), - http_headers_(http_headers), triton_server_path(triton_server_path), - model_repository_path_(model_repository_path), verbose_(verbose), - metrics_url_(metrics_url), input_tensor_format_(input_tensor_format), - output_tensor_format_(output_tensor_format) - { - } - - const BackendKind kind_; - const std::string url_; - const std::string endpoint_; - const ProtocolType protocol_; - const SslOptionsBase& ssl_options_; - const std::map> trace_options_; - const GrpcCompressionAlgorithm compression_algorithm_; - std::shared_ptr http_headers_; - std::string triton_server_path; - std::string model_repository_path_; - const bool verbose_; - const std::string metrics_url_{""}; - const TensorFormat input_tensor_format_{TensorFormat::UNKNOWN}; - const TensorFormat output_tensor_format_{TensorFormat::UNKNOWN}; - - -#ifndef DOCTEST_CONFIG_DISABLE - protected: - ClientBackendFactory() - : kind_(BackendKind()), url_(""), protocol_(ProtocolType()), - ssl_options_(SslOptionsBase()), - trace_options_(std::map>()), - compression_algorithm_(GrpcCompressionAlgorithm()), verbose_(false) - { - } -#endif -}; - -// -// Interface for interacting with an inference service -// -class ClientBackend { - public: - static Error Create( - const BackendKind kind, const std::string& url, - const std::string& endpoint, const ProtocolType protocol, - const SslOptionsBase& ssl_options, - const std::map> trace_options, - const GrpcCompressionAlgorithm compression_algorithm, - std::shared_ptr http_headers, const bool verbose, - const std::string& library_directory, const std::string& model_repository, - const std::string& metrics_url, const TensorFormat input_tensor_format, - const TensorFormat output_tensor_format, - std::unique_ptr* client_backend); - - /// Destructor for the client backend object - virtual ~ClientBackend() = default; - - /// Get the backend kind - BackendKind Kind() const { return kind_; } - - /// Get the server metadata from the server - virtual Error ServerExtensions(std::set* server_extensions); - - /// Get the model metadata from the server for specified name and - /// version as rapidjson DOM object. - virtual Error ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version); - - /// Get the model config from the server for specified name and - /// version as rapidjson DOM object. - virtual Error ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version); - - /// Issues a synchronous inference request to the server. - virtual Error Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs); - - /// Issues an asynchronous inference request to the server. - virtual Error AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs); - - /// Established a stream to the server. - virtual Error StartStream(OnCompleteFn callback, bool enable_stats); - - /// Issues an asynchronous inference request to the underlying stream. - virtual Error AsyncStreamInfer( - const InferOptions& options, const std::vector& inputs, - const std::vector& outputs); - - /// Gets the client side inference statistics from the client library. - virtual Error ClientInferStat(InferStat* infer_stat); - - /// Gets the server-side model inference statistics from the server. - virtual Error ModelInferenceStatistics( - std::map* model_stats, - const std::string& model_name = "", - const std::string& model_version = ""); - - /// Gets the server-side metrics from the server. - /// \param metrics Output metrics object. - /// \return Error object indicating success or failure. - virtual Error Metrics(Metrics& metrics); - - /// Unregisters all the shared memory from the server - virtual Error UnregisterAllSharedMemory(); - - /// Registers a system shared memory from the server - virtual Error RegisterSystemSharedMemory( - const std::string& name, const std::string& key, const size_t byte_size); - - /// Registers cuda shared memory to the server. - virtual Error RegisterCudaSharedMemory( - const std::string& name, const cudaIpcMemHandle_t& handle, - const size_t byte_size); - - /// Registers cuda memory to the server. - virtual Error RegisterCudaMemory( - const std::string& name, void* handle, const size_t byte_size); - - /// Registers a system memory location on the server. - virtual Error RegisterSystemMemory( - const std::string& name, void* memory_ptr, const size_t byte_size); - - // - // Shared Memory Utilities - // - // FIXME: These should probably move to a common area with shm_utils not - // tied specifically to inferenceserver. Create a shared memory region of - // the size 'byte_size' and return the unique identifier. - virtual Error CreateSharedMemoryRegion( - std::string shm_key, size_t byte_size, int* shm_fd); - - // Mmap the shared memory region with the given 'offset' and 'byte_size' and - // return the base address of the region. - // \param shm_fd The int descriptor of the created shared memory region - // \param offset The offset of the shared memory block from the start of the - // shared memory region - // \param byte_size The size in bytes of the shared memory region - // \param shm_addr Returns the base address of the shared memory region - // \return error Returns an error if unable to mmap shared memory region. - virtual Error MapSharedMemory( - int shm_fd, size_t offset, size_t byte_size, void** shm_addr); - - // Close the shared memory descriptor. - // \param shm_fd The int descriptor of the created shared memory region - // \return error Returns an error if unable to close shared memory descriptor. - virtual Error CloseSharedMemory(int shm_fd); - - // Destroy the shared memory region with the given name. - // \return error Returns an error if unable to unlink shared memory region. - virtual Error UnlinkSharedMemoryRegion(std::string shm_key); - - // Munmap the shared memory region from the base address with the given - // byte_size. - // \return error Returns an error if unable to unmap shared memory region. - virtual Error UnmapSharedMemory(void* shm_addr, size_t byte_size); - - protected: - /// Constructor for client backend - ClientBackend(const BackendKind kind); - // The kind of the backend. - const BackendKind kind_{TRITON}; - -#ifndef DOCTEST_CONFIG_DISABLE - public: - ClientBackend() = default; -#endif -}; - - -// -// Interface for preparing the inputs for inference to the backend -// -class InferInput { - public: - /// Create a InferInput instance that describes a model input. - /// \param infer_input Returns a new InferInput object. - /// \param kind The kind of the associated client backend. - /// \param name The name of input whose data will be described by this object. - /// \param dims The shape of the input. - /// \param datatype The datatype of the input. - /// \return Error object indicating success or failure. - static Error Create( - InferInput** infer_input, const BackendKind kind, const std::string& name, - const std::vector& dims, const std::string& datatype); - - virtual ~InferInput() = default; - - /// Gets name of the associated input tensor. - /// \return The name of the tensor. - const std::string& Name() const { return name_; } - - /// Gets datatype of the associated input tensor. - /// \return The datatype of the tensor. - const std::string& Datatype() const { return datatype_; } - - /// Gets the shape of the input tensor. - /// \return The shape of the tensor. - virtual const std::vector& Shape() const = 0; - - /// Set the shape of input associated with this object. - /// \param dims the vector of dims representing the new shape - /// of input. - /// \return Error object indicating success or failure of the - /// request. - virtual Error SetShape(const std::vector& dims); - - /// Prepare this input to receive new tensor values. Forget any - /// existing values that were set by previous calls to SetSharedMemory() - /// or AppendRaw(). - /// \return Error object indicating success or failure. - virtual Error Reset(); - - /// Append tensor values for this input from a byte array. - /// \param input The pointer to the array holding the tensor value. - /// \param input_byte_size The size of the array in bytes. - /// \return Error object indicating success or failure. - virtual Error AppendRaw(const uint8_t* input, size_t input_byte_size); - - /// Set tensor values for this input by reference into a shared memory - /// region. - /// \param name The user-given name for the registered shared memory region - /// where the tensor values for this input is stored. - /// \param byte_size The size, in bytes of the input tensor data. Must - /// match the size expected for the input shape. - /// \param offset The offset into the shared memory region upto the start - /// of the input tensor values. The default value is 0. - /// \return Error object indicating success or failure - virtual Error SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset = 0); - - /// Get access to the buffer holding raw input. Note the buffer is owned by - /// InferInput instance. Users can copy out the data if required to extend - /// the lifetime. - /// \param buf Returns the pointer to the start of the buffer. - /// \param byte_size Returns the size of buffer in bytes. - /// \return Error object indicating success or failure of the - /// request. - virtual Error RawData(const uint8_t** buf, size_t* byte_size); - - protected: - InferInput( - const BackendKind kind, const std::string& name, - const std::string& datatype_); - - const BackendKind kind_; - const std::string name_; - const std::string datatype_; -}; - - -// -// Interface for preparing the inputs for inference to the backend -// -class InferRequestedOutput { - public: - virtual ~InferRequestedOutput() = default; - - /// Create a InferRequestedOutput instance that describes a model output being - /// requested. - /// \param infer_output Returns a new InferOutputGrpc object. - /// \param kind The kind of the associated client backend. - /// \param name The name of output being requested. - /// \param datatype The datatype of the output - /// \param class_count The number of classifications to be requested. The - /// default value is 0 which means the classification results are not - /// requested. - /// \return Error object indicating success or failure. - static Error Create( - InferRequestedOutput** infer_output, const BackendKind kind, - const std::string& name, const std::string& datatype, - const size_t class_count = 0); - - /// Gets name of the associated output tensor. - /// \return The name of the tensor. - const std::string& Name() const { return name_; } - - /// Gets datatype of the associated output tensor. - /// \return The datatype of the tensor - const std::string& Datatype() const { return datatype_; } - - /// Set the output tensor data to be written to specified shared - /// memory region. - /// \param region_name The name of the shared memory region. - /// \param byte_size The size of data in bytes. - /// \param offset The offset in shared memory region. Default value is 0. - /// \return Error object indicating success or failure of the - /// request. - virtual Error SetSharedMemory( - const std::string& region_name, const size_t byte_size, - const size_t offset = 0); - - protected: - InferRequestedOutput( - const BackendKind kind, const std::string& name, - const std::string& datatype = ""); - const BackendKind kind_; - const std::string name_; - const std::string datatype_; -}; - -// -// Interface for accessing the processed results. -// -class InferResult { - public: - virtual ~InferResult() = default; - - /// Get the id of the request which generated this response. - /// \param id Returns the request id that generated the result. - /// \return Error object indicating success or failure. - virtual Error Id(std::string* id) const = 0; - - - /// Returns the status of the request. - /// \return Error object indicating the success or failure of the - /// request. - virtual Error RequestStatus() const = 0; - - /// Returns the raw data of the output. - /// \return Error object indicating the success or failure of the - /// request. - virtual Error RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const = 0; - - /// Get final response bool for this response. - /// \return Error object indicating the success or failure. - virtual Error IsFinalResponse(bool* is_final_response) const - { - return Error("InferResult::IsFinalResponse() not implemented"); - }; - - /// Get null response bool for this response. - /// \return Error object indicating the success or failure. - virtual Error IsNullResponse(bool* is_null_response) const - { - return Error("InferResult::IsNullResponse() not implemented"); - }; -}; - -}}} // namespace triton::perfanalyzer::clientbackend - -namespace cb = triton::perfanalyzer::clientbackend; diff --git a/src/c++/perf_analyzer/client_backend/mock_client_backend.h b/src/c++/perf_analyzer/client_backend/mock_client_backend.h deleted file mode 100644 index 483af914d..000000000 --- a/src/c++/perf_analyzer/client_backend/mock_client_backend.h +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include -#include -#include -#include - -#include "../doctest.h" -#include "client_backend.h" -#include "gmock/gmock.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { - -// Holds information (either the raw data or a shared memory label) for an -// inference input -// -struct TestRecordedInput { - TestRecordedInput(int32_t data_in, size_t size_in) - : shared_memory_label(""), data(data_in), size(size_in) - { - } - - TestRecordedInput(std::string label_in, size_t size_in) - : shared_memory_label(label_in), data(0), size(size_in) - { - } - - std::string shared_memory_label; - int32_t data; - size_t size; -}; - -/// Mock class of an InferInput -/// -class MockInferInput : public InferInput { - public: - MockInferInput( - const BackendKind kind, const std::string& name, - const std::vector& dims, const std::string& datatype) - : InferInput(kind, name, datatype), dims_(dims) - { - } - - const std::vector& Shape() const override { return dims_; } - - Error Reset() override - { - recorded_inputs_.clear(); - return Error::Success; - } - - Error AppendRaw(const uint8_t* input, size_t input_byte_size) override - { - if (input) { - int32_t val = *reinterpret_cast(input); - recorded_inputs_.push_back(TestRecordedInput(val, input_byte_size)); - } - ++append_raw_calls_; - return Error::Success; - } - - Error SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset = 0) - { - recorded_inputs_.push_back(TestRecordedInput(name, byte_size)); - ++set_shared_memory_calls_; - return Error::Success; - } - - const std::vector dims_{}; - std::vector recorded_inputs_{}; - std::atomic append_raw_calls_{0}; - std::atomic set_shared_memory_calls_{0}; -}; - -/// Mock class of an InferResult -/// -class MockInferResult : public InferResult { - public: - MockInferResult(const InferOptions& options) : req_id_(options.request_id_) {} - - Error Id(std::string* id) const override - { - *id = req_id_; - return Error::Success; - } - Error RequestStatus() const override { return Error::Success; } - Error RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const override - { - return Error::Success; - } - - Error IsFinalResponse(bool* is_final_response) const override - { - if (is_final_response == nullptr) { - return Error("is_final_response cannot be nullptr"); - } - *is_final_response = true; - return Error::Success; - } - - Error IsNullResponse(bool* is_null_response) const override - { - if (is_null_response == nullptr) { - return Error("is_null_response cannot be nullptr"); - } - *is_null_response = false; - return Error::Success; - } - - private: - std::string req_id_; -}; - -/// Class to track statistics of MockClientBackend -/// -class MockClientStats { - public: - enum class ReqType { SYNC, ASYNC, ASYNC_STREAM }; - - struct SeqStatus { - // Set of all unique sequence IDs observed in requests - // - std::set used_seq_ids; - - // Map of all "live" sequence IDs (sequences that have started and not - // ended) to their current length (how many requests have been sent to that - // sequence ID since it started) - // - std::map live_seq_ids_to_length; - - // Map of sequence ID to how many requests have been received for it. - // - std::map seq_ids_to_count; - - // Map of sequence IDs to how many are "inflight" for that sequence ID - // (inflight means the request has been received, response has not been - // returned) - // - std::map seq_ids_to_inflight_count; - - // Maximum observed number of live sequences (sequences that have started - // and not ended) - // - uint32_t max_live_seq_count = 0; - - // Maximum observed number of inflight requests for a sequence - // - uint32_t max_inflight_seq_count = 0; - - std::vector seq_lengths; - - bool IsSeqLive(uint64_t seq_id) - { - return ( - live_seq_ids_to_length.find(seq_id) != live_seq_ids_to_length.end()); - } - void HandleSeqStart(uint64_t seq_id) - { - used_seq_ids.insert(seq_id); - live_seq_ids_to_length[seq_id] = 0; - if (live_seq_ids_to_length.size() > max_live_seq_count) { - max_live_seq_count = live_seq_ids_to_length.size(); - } - } - void HandleSeqEnd(uint64_t seq_id) - { - uint32_t len = live_seq_ids_to_length[seq_id]; - seq_lengths.push_back(len); - auto it = live_seq_ids_to_length.find(seq_id); - live_seq_ids_to_length.erase(it); - } - - void HandleSeqRequest(uint64_t seq_id) - { - live_seq_ids_to_length[seq_id]++; - - if (seq_ids_to_count.find(seq_id) == seq_ids_to_count.end()) { - seq_ids_to_count[seq_id] = 1; - } else { - seq_ids_to_count[seq_id]++; - } - - if (seq_ids_to_inflight_count.find(seq_id) == - seq_ids_to_inflight_count.end()) { - seq_ids_to_inflight_count[seq_id] = 1; - } else { - seq_ids_to_inflight_count[seq_id]++; - } - if (seq_ids_to_inflight_count[seq_id] > max_inflight_seq_count) { - max_inflight_seq_count = seq_ids_to_inflight_count[seq_id]; - } - } - - void Reset() - { - // Note that live_seq_ids_to_length is explicitly not reset here. - // This is because we always want to maintain the true status of - // live sequences - - used_seq_ids.clear(); - max_live_seq_count = 0; - seq_lengths.clear(); - seq_ids_to_count.clear(); - } - }; - - std::atomic num_infer_calls{0}; - std::atomic num_async_infer_calls{0}; - std::atomic num_async_stream_infer_calls{0}; - std::atomic num_start_stream_calls{0}; - - std::atomic num_active_infer_calls{0}; - - std::atomic num_append_raw_calls{0}; - std::atomic num_set_shared_memory_calls{0}; - // Struct tracking shared memory method calls - // - struct SharedMemoryStats { - std::atomic num_unregister_all_shared_memory_calls{0}; - std::atomic num_register_system_shared_memory_calls{0}; - std::atomic num_register_cuda_shared_memory_calls{0}; - std::atomic num_register_cuda_memory_calls{0}; - std::atomic num_register_system_memory_calls{0}; - std::atomic num_create_shared_memory_region_calls{0}; - std::atomic num_map_shared_memory_calls{0}; - std::atomic num_close_shared_memory_calls{0}; - std::atomic num_unlink_shared_memory_region_calls{0}; - std::atomic num_unmap_shared_memory_calls{0}; - - // bool operator==(const SharedMemoryStats& lhs, const SharedMemoryStats& - // rhs) - bool operator==(const SharedMemoryStats& rhs) const - { - if (this->num_unregister_all_shared_memory_calls == - rhs.num_unregister_all_shared_memory_calls && - this->num_register_system_shared_memory_calls == - rhs.num_register_system_shared_memory_calls && - this->num_register_cuda_shared_memory_calls == - rhs.num_register_cuda_shared_memory_calls && - this->num_register_cuda_memory_calls == - rhs.num_register_cuda_memory_calls && - this->num_register_system_memory_calls == - rhs.num_register_system_memory_calls && - this->num_create_shared_memory_region_calls == - rhs.num_create_shared_memory_region_calls && - this->num_map_shared_memory_calls == - rhs.num_map_shared_memory_calls && - this->num_close_shared_memory_calls == - rhs.num_close_shared_memory_calls && - this->num_unlink_shared_memory_region_calls == - rhs.num_unlink_shared_memory_region_calls && - this->num_unmap_shared_memory_calls == - rhs.num_unmap_shared_memory_calls) { - return true; - } - return false; - } - }; - - /// Determines how long the backend will delay before sending a "response". - /// If a single value vector is passed in, all responses will take that long. - /// If a list of values is passed in, then the mock backend will loop through - /// the values (and loop back to the start when it hits the end of the vector) - /// - void SetDelays(std::vector times) - { - response_delays_.clear(); - for (size_t t : times) { - response_delays_.push_back(std::chrono::milliseconds{t}); - } - } - - /// Determines the return status of requests. - /// If a single value vector is passed in, all responses will return that - /// status. If a list of values is passed in, then the mock backend will loop - /// through the values (and loop back to the start when it hits the end of the - /// vector) - /// - void SetReturnStatuses(std::vector statuses) - { - response_statuses_.clear(); - for (bool success : statuses) { - if (success) { - response_statuses_.push_back(Error::Success); - } else { - response_statuses_.push_back(Error("Injected test error")); - } - } - } - - std::chrono::milliseconds GetNextDelay() - { - std::lock_guard lock(mtx_); - - auto val = response_delays_[response_delays_index_]; - response_delays_index_++; - if (response_delays_index_ == response_delays_.size()) { - response_delays_index_ = 0; - } - return val; - } - - Error GetNextReturnStatus() - { - std::lock_guard lock(mtx_); - - auto val = response_statuses_[response_statuses_index_]; - response_statuses_index_++; - if (response_statuses_index_ == response_statuses_.size()) { - response_statuses_index_ = 0; - } - return val; - } - - bool start_stream_enable_stats_value{false}; - - std::vector> - request_timestamps; - SeqStatus sequence_status; - SharedMemoryStats memory_stats; - - // Each entry in the top vector is a list of all inputs for an inference - // request. If there are multiple inputs due to batching and/or the model - // having multiple inputs, all of those from the same request will be in the - // same second level vector - std::vector> recorded_inputs{}; - - void CaptureRequest( - ReqType type, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) - { - num_active_infer_calls++; - - std::lock_guard lock(mtx_); - auto time = std::chrono::system_clock::now(); - request_timestamps.push_back(time); - - // Group all values across all inputs together into a single vector, and - // then record it - std::vector request_inputs; - for (const auto& input : inputs) { - auto recorded_inputs = - static_cast(input)->recorded_inputs_; - request_inputs.insert( - request_inputs.end(), recorded_inputs.begin(), recorded_inputs.end()); - } - recorded_inputs.push_back(request_inputs); - - UpdateCallCount(type); - UpdateSeqStatus(options); - AccumulateInferInputCalls(inputs); - } - - void CaptureRequestEnd(const InferOptions& options) - { - num_active_infer_calls--; - - if (options.sequence_id_ != 0) { - sequence_status.seq_ids_to_inflight_count[options.sequence_id_]--; - } - } - - void CaptureStreamStart() - { - std::lock_guard lock(mtx_); - num_start_stream_calls++; - } - - - void Reset() - { - std::lock_guard lock(mtx_); - num_infer_calls = 0; - num_async_infer_calls = 0; - num_async_stream_infer_calls = 0; - num_start_stream_calls = 0; - request_timestamps.clear(); - sequence_status.Reset(); - } - - private: - std::vector response_delays_{ - std::chrono::milliseconds{0}}; - std::vector response_statuses_{Error::Success}; - std::atomic response_delays_index_{0}; - std::atomic response_statuses_index_{0}; - - std::mutex mtx_; - - void UpdateCallCount(ReqType type) - { - if (type == ReqType::SYNC) { - num_infer_calls++; - } else if (type == ReqType::ASYNC) { - num_async_infer_calls++; - } else { - num_async_stream_infer_calls++; - } - } - - void UpdateSeqStatus(const InferOptions& options) - { - // Seq ID of 0 is reserved for "not a sequence" - // - if (options.sequence_id_ != 0) { - // If a sequence ID is not live, it must be starting - if (!sequence_status.IsSeqLive(options.sequence_id_)) { - REQUIRE(options.sequence_start_ == true); - } - - // If a new sequence is starting, that sequence ID must not already be - // live - if (options.sequence_start_ == true) { - REQUIRE(sequence_status.IsSeqLive(options.sequence_id_) == false); - sequence_status.HandleSeqStart(options.sequence_id_); - } - - sequence_status.HandleSeqRequest(options.sequence_id_); - - // If a sequence is ending, it must be live - if (options.sequence_end_) { - REQUIRE(sequence_status.IsSeqLive(options.sequence_id_) == true); - sequence_status.HandleSeqEnd(options.sequence_id_); - } - } - } - - void AccumulateInferInputCalls(const std::vector& inputs) - { - for (const auto& input : inputs) { - const MockInferInput* mock_input = - static_cast(input); - num_append_raw_calls += mock_input->append_raw_calls_; - num_set_shared_memory_calls += mock_input->set_shared_memory_calls_; - } - } -}; - -/// Mock implementation of ClientBackend interface -/// -class NaggyMockClientBackend : public ClientBackend { - public: - NaggyMockClientBackend(std::shared_ptr stats) : stats_(stats) - { - ON_CALL(*this, AsyncStreamInfer(testing::_, testing::_, testing::_)) - .WillByDefault( - [this]( - const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) - -> Error { - stats_->CaptureRequest( - MockClientStats::ReqType::ASYNC_STREAM, options, inputs, - outputs); - - LaunchAsyncMockRequest(options, stream_callback_); - - return stats_->GetNextReturnStatus(); - }); - } - - MOCK_METHOD( - Error, ModelConfig, - (rapidjson::Document*, const std::string&, const std::string&), - (override)); - MOCK_METHOD( - Error, AsyncStreamInfer, - (const InferOptions&, const std::vector&, - const std::vector&), - (override)); - - Error Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override - { - stats_->CaptureRequest( - MockClientStats::ReqType::SYNC, options, inputs, outputs); - - std::this_thread::sleep_for(stats_->GetNextDelay()); - - local_completed_req_count_++; - stats_->CaptureRequestEnd(options); - - return stats_->GetNextReturnStatus(); - } - - Error AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override - { - stats_->CaptureRequest( - MockClientStats::ReqType::ASYNC, options, inputs, outputs); - - LaunchAsyncMockRequest(options, callback); - - return stats_->GetNextReturnStatus(); - } - - Error StartStream(OnCompleteFn callback, bool enable_stats) - { - stats_->CaptureStreamStart(); - stats_->start_stream_enable_stats_value = enable_stats; - stream_callback_ = callback; - return stats_->GetNextReturnStatus(); - } - - Error ClientInferStat(InferStat* infer_stat) override - { - infer_stat->completed_request_count = local_completed_req_count_; - return Error::Success; - } - - Error UnregisterAllSharedMemory() override - { - stats_->memory_stats.num_unregister_all_shared_memory_calls++; - return Error::Success; - } - - Error RegisterSystemSharedMemory( - const std::string& name, const std::string& key, - const size_t byte_size) override - { - stats_->memory_stats.num_register_system_shared_memory_calls++; - return Error::Success; - } - - Error RegisterCudaSharedMemory( - const std::string& name, const cudaIpcMemHandle_t& handle, - const size_t byte_size) override - { - stats_->memory_stats.num_register_cuda_shared_memory_calls++; - return Error::Success; - } - - Error RegisterCudaMemory( - const std::string& name, void* handle, const size_t byte_size) override - { - stats_->memory_stats.num_register_cuda_memory_calls++; - return Error::Success; - } - - Error RegisterSystemMemory( - const std::string& name, void* memory_ptr, - const size_t byte_size) override - { - stats_->memory_stats.num_register_system_memory_calls++; - return Error::Success; - } - - Error CreateSharedMemoryRegion( - std::string shm_key, size_t byte_size, int* shm_fd) override - { - stats_->memory_stats.num_create_shared_memory_region_calls++; - return Error::Success; - } - - Error MapSharedMemory( - int shm_fd, size_t offset, size_t byte_size, void** shm_addr) override - { - stats_->memory_stats.num_map_shared_memory_calls++; - return Error::Success; - } - - Error CloseSharedMemory(int shm_fd) override - { - stats_->memory_stats.num_close_shared_memory_calls++; - return Error::Success; - } - - Error UnlinkSharedMemoryRegion(std::string shm_key) override - { - stats_->memory_stats.num_unlink_shared_memory_region_calls++; - return Error::Success; - } - - Error UnmapSharedMemory(void* shm_addr, size_t byte_size) override - { - stats_->memory_stats.num_unmap_shared_memory_calls++; - return Error::Success; - } - - OnCompleteFn stream_callback_; - - private: - void LaunchAsyncMockRequest(const InferOptions options, OnCompleteFn callback) - { - std::thread([this, options, callback]() { - std::this_thread::sleep_for(stats_->GetNextDelay()); - local_completed_req_count_++; - - InferResult* result = new MockInferResult(options); - callback(result); - - stats_->CaptureRequestEnd(options); - }).detach(); - } - - // Total count of how many requests this client has handled and finished - size_t local_completed_req_count_ = 0; - - std::shared_ptr stats_; -}; - -using MockClientBackend = testing::NiceMock; - -/// Mock factory that always creates a MockClientBackend instead -/// of a real backend -/// -class MockClientBackendFactory : public ClientBackendFactory { - public: - MockClientBackendFactory(std::shared_ptr stats) - { - stats_ = stats; - } - - Error CreateClientBackend(std::unique_ptr* backend) override - { - std::unique_ptr mock_backend( - new MockClientBackend(stats_)); - *backend = std::move(mock_backend); - return Error::Success; - } - - private: - std::shared_ptr stats_; -}; - -}}} // namespace triton::perfanalyzer::clientbackend diff --git a/src/c++/perf_analyzer/client_backend/openai/CMakeLists.txt b/src/c++/perf_analyzer/client_backend/openai/CMakeLists.txt deleted file mode 100644 index 93963e378..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/CMakeLists.txt +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -cmake_minimum_required (VERSION 3.18) - -set( - OPENAI_CLIENT_BACKEND_SRCS - http_client.cc - openai_client_backend.cc - openai_client.cc - openai_infer_input.cc -) - -set( - OPENAI_CLIENT_BACKEND_HDRS - http_client.h - openai_client_backend.h - openai_client.h - openai_infer_input.h -) - -add_library( - openai-client-backend-library EXCLUDE_FROM_ALL OBJECT - ${OPENAI_CLIENT_BACKEND_SRCS} - ${OPENAI_CLIENT_BACKEND_HDRS} -) - -target_link_libraries( - openai-client-backend-library - PUBLIC CURL::libcurl - PUBLIC httpclient_static -) - -if(${TRITON_ENABLE_GPU}) - target_include_directories(openai-client-backend-library PUBLIC ${CUDA_INCLUDE_DIRS}) - target_link_libraries(openai-client-backend-library PRIVATE ${CUDA_LIBRARIES}) -endif() # TRITON_ENABLE_GPU diff --git a/src/c++/perf_analyzer/client_backend/openai/http_client.cc b/src/c++/perf_analyzer/client_backend/openai/http_client.cc deleted file mode 100644 index 17fb42e08..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/http_client.cc +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "http_client.h" - -#include -#include - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - -HttpRequest::HttpRequest( - std::function&& completion_callback, const bool verbose) - : completion_callback_(std::move(completion_callback)), verbose_(verbose) -{ -} - -HttpRequest::~HttpRequest() -{ - if (header_list_ != nullptr) { - curl_slist_free_all(header_list_); - header_list_ = nullptr; - } -} - -void -HttpRequest::AddInput(uint8_t* buf, size_t byte_size) -{ - data_buffers_.push_back(std::pair(buf, byte_size)); - total_input_byte_size_ += byte_size; -} - -void -HttpRequest::GetNextInput(uint8_t* buf, size_t size, size_t* input_bytes) -{ - *input_bytes = 0; - - while (!data_buffers_.empty() && size > 0) { - const size_t csz = std::min(data_buffers_.front().second, size); - if (csz > 0) { - const uint8_t* input_ptr = data_buffers_.front().first; - std::copy(input_ptr, input_ptr + csz, buf); - size -= csz; - buf += csz; - *input_bytes += csz; - - data_buffers_.front().first += csz; - data_buffers_.front().second -= csz; - } - if (data_buffers_.front().second == 0) { - data_buffers_.pop_front(); - } - } -} - -std::mutex HttpClient::curl_init_mtx_{}; -HttpClient::HttpClient( - const std::string& server_url, bool verbose, - const HttpSslOptions& ssl_options) - : url_(server_url), verbose_(verbose), ssl_options_(ssl_options) -{ - // [TODO TMA-1670] uncomment below and remove class-wise mutex once confirm - // curl >= 7.84.0 will always be used - // auto* ver = curl_version_info(CURLVERSION_NOW); - // if (ver->features & CURL_VERSION_THREADSAFE == 0) { - // throw std::runtime_error( - // "HTTP client has dependency on CURL library to have thread-safe " - // "support (CURL_VERSION_THREADSAFE set)"); - // } - { - std::lock_guard lk(curl_init_mtx_); - if (curl_global_init(CURL_GLOBAL_ALL) != 0) { - throw std::runtime_error("CURL global initialization failed"); - } - } - - multi_handle_ = curl_multi_init(); - - worker_ = std::thread(&HttpClient::AsyncTransfer, this); -} - -HttpClient::~HttpClient() -{ - { - std::lock_guard lock(mutex_); - exiting_ = true; - } - - curl_multi_wakeup(multi_handle_); - - // thread not joinable if AsyncInfer() is not called - // (it is default constructed thread before the first AsyncInfer() call) - if (worker_.joinable()) { - worker_.join(); - } - - curl_multi_cleanup(multi_handle_); - - { - std::lock_guard lk(curl_init_mtx_); - curl_global_cleanup(); - } -} - -const std::string& -HttpClient::ParseSslCertType(HttpSslOptions::CERTTYPE cert_type) -{ - static std::string pem_str{"PEM"}; - static std::string der_str{"DER"}; - switch (cert_type) { - case HttpSslOptions::CERTTYPE::CERT_PEM: - return pem_str; - case HttpSslOptions::CERTTYPE::CERT_DER: - return der_str; - } - throw std::runtime_error( - "Unexpected SSL certificate type encountered. Only PEM and DER are " - "supported."); -} - -const std::string& -HttpClient::ParseSslKeyType(HttpSslOptions::KEYTYPE key_type) -{ - static std::string pem_str{"PEM"}; - static std::string der_str{"DER"}; - switch (key_type) { - case HttpSslOptions::KEYTYPE::KEY_PEM: - return pem_str; - case HttpSslOptions::KEYTYPE::KEY_DER: - return der_str; - } - throw std::runtime_error( - "unsupported SSL key type encountered. Only PEM and DER are " - "supported."); -} - -void -HttpClient::SetSSLCurlOptions(CURL* curl_handle) -{ - curl_easy_setopt( - curl_handle, CURLOPT_SSL_VERIFYPEER, ssl_options_.verify_peer); - curl_easy_setopt( - curl_handle, CURLOPT_SSL_VERIFYHOST, ssl_options_.verify_host); - if (!ssl_options_.ca_info.empty()) { - curl_easy_setopt(curl_handle, CURLOPT_CAINFO, ssl_options_.ca_info.c_str()); - } - const auto& curl_cert_type = ParseSslCertType(ssl_options_.cert_type); - curl_easy_setopt(curl_handle, CURLOPT_SSLCERTTYPE, curl_cert_type.c_str()); - if (!ssl_options_.cert.empty()) { - curl_easy_setopt(curl_handle, CURLOPT_SSLCERT, ssl_options_.cert.c_str()); - } - const auto& curl_key_type = ParseSslKeyType(ssl_options_.key_type); - curl_easy_setopt(curl_handle, CURLOPT_SSLKEYTYPE, curl_key_type.c_str()); - if (!ssl_options_.key.empty()) { - curl_easy_setopt(curl_handle, CURLOPT_SSLKEY, ssl_options_.key.c_str()); - } -} - -void -HttpClient::Send(CURL* handle, std::unique_ptr&& request) -{ - { - std::lock_guard lock(mutex_); - - if (exiting_) { - return; - } - - auto insert_result = new_async_requests_.emplace(std::make_pair( - reinterpret_cast(handle), std::move(request))); - if (!insert_result.second) { - curl_easy_cleanup(handle); - throw std::runtime_error( - "Failed to insert new asynchronous request context."); - } - } - curl_multi_wakeup(multi_handle_); -} - -void -HttpClient::AsyncTransfer() -{ - int messages_in_queue = 0; - int still_running = 0; - int numfds = 0; - CURLMsg* msg = nullptr; - AsyncReqMap ongoing_async_requests; - - do { - { - // Check for new requests and add them to ongoing requests - - std::lock_guard lock(mutex_); - - for (auto& pair : new_async_requests_) { - curl_multi_add_handle( - multi_handle_, reinterpret_cast(pair.first)); - - ongoing_async_requests[pair.first] = std::move(pair.second); - } - new_async_requests_.clear(); - } - - CURLMcode mc = curl_multi_perform(multi_handle_, &still_running); - - if (mc != CURLM_OK) { - std::cerr << "Unexpected error: curl_multi failed. Code:" << mc - << std::endl; - continue; - } - - while ((msg = curl_multi_info_read(multi_handle_, &messages_in_queue))) { - if (msg->msg != CURLMSG_DONE) { - // Something wrong happened. - std::cerr << "Unexpected error: received CURLMsg=" << msg->msg - << std::endl; - continue; - } - - uintptr_t identifier = reinterpret_cast(msg->easy_handle); - auto itr = ongoing_async_requests.find(identifier); - // This shouldn't happen - if (itr == ongoing_async_requests.end()) { - std::cerr << "Unexpected error: received completed request that is not " - "in the list of asynchronous requests" - << std::endl; - curl_multi_remove_handle(multi_handle_, msg->easy_handle); - curl_easy_cleanup(msg->easy_handle); - continue; - } - - uint32_t http_code = 400; - if (msg->data.result == CURLE_OK) { - curl_easy_getinfo(msg->easy_handle, CURLINFO_RESPONSE_CODE, &http_code); - } else if (msg->data.result == CURLE_OPERATION_TIMEDOUT) { - http_code = 499; - } - - itr->second->http_code_ = http_code; - itr->second->completion_callback_(itr->second.get()); - ongoing_async_requests.erase(itr); - curl_multi_remove_handle(multi_handle_, msg->easy_handle); - curl_easy_cleanup(msg->easy_handle); - } - - - // Wait for activity on existing requests or - // explicit curl_multi_wakeup call - // - // If there are no descriptors in the multi_handle_ - // then curl_multi_poll will wait until curl_multi_wakeup - // is called - // - // curl_multi_wakeup is called when adding a new request - // or exiting - - mc = curl_multi_poll(multi_handle_, NULL, 0, INT_MAX, &numfds); - - if (mc != CURLM_OK) { - std::cerr << "Unexpected error: curl_multi failed. Code:" << mc - << std::endl; - } - - } while (!exiting_); - - for (auto& request : ongoing_async_requests) { - CURL* easy_handle = reinterpret_cast(request.first); - curl_multi_remove_handle(multi_handle_, easy_handle); - curl_easy_cleanup(easy_handle); - } - - for (auto& request : new_async_requests_) { - CURL* easy_handle = reinterpret_cast(request.first); - curl_easy_cleanup(easy_handle); - } -} - -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/openai/http_client.h b/src/c++/perf_analyzer/client_backend/openai/http_client.h deleted file mode 100644 index 7ff9bb14e..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/http_client.h +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - -// The options for authorizing and authenticating SSL/TLS connections. -struct HttpSslOptions { - enum CERTTYPE { CERT_PEM = 0, CERT_DER = 1 }; - enum KEYTYPE { - KEY_PEM = 0, - KEY_DER = 1 - // TODO TMA-1645: Support loading private key from crypto engine - // KEY_ENG = 2 - }; - explicit HttpSslOptions() - : verify_peer(1), verify_host(2), cert_type(CERTTYPE::CERT_PEM), - key_type(KEYTYPE::KEY_PEM) - { - } - // This option determines whether curl verifies the authenticity of the peer's - // certificate. A value of 1 means curl verifies; 0 (zero) means it does not. - // Default value is 1. See here for more details: - // https://curl.se/libcurl/c/CURLOPT_SSL_VERIFYPEER.html - long verify_peer; - // This option determines whether libcurl verifies that the server cert is for - // the server it is known as. The default value for this option is 2 which - // means that certificate must indicate that the server is the server to which - // you meant to connect, or the connection fails. See here for more details: - // https://curl.se/libcurl/c/CURLOPT_SSL_VERIFYHOST.html - long verify_host; - // File holding one or more certificates to verify the peer with. If not - // specified, client will look for the system path where cacert bundle is - // assumed to be stored, as established at build time. See here for more - // information: https://curl.se/libcurl/c/CURLOPT_CAINFO.html - std::string ca_info; - // The format of client certificate. By default it is CERT_PEM. See here for - // more details: https://curl.se/libcurl/c/CURLOPT_SSLCERTTYPE.html - CERTTYPE cert_type; - // The file name of your client certificate. See here for more details: - // https://curl.se/libcurl/c/CURLOPT_SSLCERT.html - std::string cert; - // The format of the private key. By default it is KEY_PEM. See here for more - // details: https://curl.se/libcurl/c/CURLOPT_SSLKEYTYPE.html. - KEYTYPE key_type; - // The private key. See here for more details: - // https://curl.se/libcurl/c/CURLOPT_SSLKEY.html. - std::string key; -}; - -// HttpRequest object representing the context of a HTTP transaction. Currently -// it is also designed to be the placeholder for response data, but how the -// response is stored can be revisited later. -// 'completion_callback' doesn't transfer ownership of HttpRequest, caller must -// not keep the reference and access HttpRequest object after -// 'completion_callback' returns -class HttpRequest { - public: - HttpRequest( - std::function&& completion_callback, - const bool verbose = false); - virtual ~HttpRequest(); - - // Adds the input data to be delivered to the server, note that the HTTP - // request does not own the buffer. - void AddInput(uint8_t* buf, size_t byte_size); - - // Helper function for CURL - // Copy into 'buf' up to 'size' bytes of input data. Return the - // actual amount copied in 'input_bytes'. - void GetNextInput(uint8_t* buf, size_t size, size_t* input_bytes); - - // Buffer that accumulates the response body. - std::string response_buffer_; - - size_t total_input_byte_size_{0}; - - // HTTP response code for the inference request - uint32_t http_code_{200}; - - std::function completion_callback_{nullptr}; - - // Pointer to the list of the HTTP request header, keep it such that it will - // be valid during the transfer and can be freed once transfer is completed. - struct curl_slist* header_list_{nullptr}; - - protected: - const bool verbose_{false}; - - // Pointers to the input data. - std::deque> data_buffers_; -}; - -// Base class for common HTTP functionalities -class HttpClient { - public: - enum class CompressionType { NONE, DEFLATE, GZIP }; - - virtual ~HttpClient(); - - protected: - void SetSSLCurlOptions(CURL* curl_handle); - - HttpClient( - const std::string& server_url, bool verbose = false, - const HttpSslOptions& ssl_options = HttpSslOptions()); - - // Note that this function does not block - void Send(CURL* handle, std::unique_ptr&& request); - - protected: - void AsyncTransfer(); - - bool exiting_{false}; - - std::thread worker_; - std::mutex mutex_; - - // The server url - const std::string url_; - // The options for authorizing and authenticating SSL/TLS connections - HttpSslOptions ssl_options_; - - using AsyncReqMap = std::map>; - // curl multi handle for processing asynchronous requests - void* multi_handle_; - // map to record new asynchronous requests with pointer to easy handle - // or tag id as key - AsyncReqMap new_async_requests_; - - bool verbose_; - - private: - const std::string& ParseSslKeyType(HttpSslOptions::KEYTYPE key_type); - const std::string& ParseSslCertType(HttpSslOptions::CERTTYPE cert_type); - static std::mutex curl_init_mtx_; -}; -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/openai/openai_client.cc b/src/c++/perf_analyzer/client_backend/openai/openai_client.cc deleted file mode 100644 index 9b167fae1..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/openai_client.cc +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Include this first to make sure we are a friend of common classes. -#define TRITON_INFERENCE_SERVER_CLIENT_CLASS InferenceServerHttpClient -#include "openai_client.h" - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" - -#ifdef TRITON_ENABLE_ZLIB -#include -#endif - -extern "C" { -#include "cencode.h" -} - -#ifdef _WIN32 -#define strncasecmp(x, y, z) _strnicmp(x, y, z) -#undef min // NOMINMAX did not resolve std::min compile error -#endif //_WIN32 - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - -//============================================================================== - -void -ChatCompletionRequest::SendResponse(bool is_final, bool is_null) -{ - final_response_sent_ = is_final; - response_callback_(new ChatCompletionResult( - http_code_, std::move(response_buffer_), is_final, is_null, request_id_)); -} - -ChatCompletionClient::ChatCompletionClient( - const std::string& url, const std::string& endpoint, bool verbose, - const HttpSslOptions& ssl_options) - : HttpClient(std::string(url + "/" + endpoint), verbose, ssl_options) -{ -} - -size_t -ChatCompletionClient::RequestProvider( - void* contents, size_t size, size_t nmemb, void* userp) -{ - auto request = reinterpret_cast(userp); - - size_t input_bytes = 0; - request->GetNextInput( - reinterpret_cast(contents), size * nmemb, &input_bytes); - - request->timer_.CaptureTimestamp( - triton::client::RequestTimers::Kind::SEND_END); - - return input_bytes; -} - -size_t -ChatCompletionClient::ResponseHeaderHandler( - void* contents, size_t size, size_t nmemb, void* userp) -{ - auto request = reinterpret_cast(userp); - - char* buf = reinterpret_cast(contents); - size_t byte_size = size * nmemb; - - std::string hdr(buf, byte_size); - std::transform(hdr.begin(), hdr.end(), hdr.begin(), [](unsigned char c) { - return std::tolower(c); - }); - if (hdr.find("content-type") != std::string::npos && - hdr.find("text/event-stream") != std::string::npos) { - request->is_stream_ = true; - } - return byte_size; -} - -size_t -ChatCompletionClient::ResponseHandler( - void* contents, size_t size, size_t nmemb, void* userp) -{ - // [TODO TMA-1666] verify if the SSE responses received are complete, or the - // response need to be stitched first. To verify, print out the received - // responses from SendResponse() to make sure the OpenAI server doesn't chunk - // the HTTP responses in the way that misaligns with the SSE responses. Reason - // of not stitching responses now is that it is a bit complicated that to make - // the write callback bulletproof is to assume the response can be chunked at - // arbitrary position, then bake in checking for SSE style (data:.*\n\n) by - // iterating all received buffer character by character. - size_t result_bytes = size * nmemb; - // return early if the response is empty as the response handling is - // triggered by the content of the response. - if (result_bytes == 0) { - return result_bytes; - } - - auto request = reinterpret_cast(userp); - if (request->timer_.Timestamp( - triton::client::RequestTimers::Kind::RECV_START) == 0) { - request->timer_.CaptureTimestamp( - triton::client::RequestTimers::Kind::RECV_START); - } - - char* buf = reinterpret_cast(contents); - request->response_buffer_.append(buf, result_bytes); - // Send response now if streaming, otherwise wait until request has been - // completed - if (request->is_stream_) { - auto done_signal = - (request->response_buffer_.find("data: [DONE]") != std::string::npos); - request->SendResponse( - done_signal /* is_final */, done_signal /* is_null */); - } - - // ResponseHandler may be called multiple times so we overwrite - // RECV_END so that we always have the time of the last. - request->timer_.CaptureTimestamp( - triton::client::RequestTimers::Kind::RECV_END); - - return result_bytes; -} - - -Error -ChatCompletionClient::AsyncInfer( - std::function callback, - std::string& serialized_request_body, const std::string& request_id, - const Headers& headers) -{ - if (callback == nullptr) { - return Error( - "Callback function must be provided along with AsyncInfer() call."); - } - - auto completion_callback = [this](HttpRequest* req) { - auto request = static_cast(req); - request->timer_.CaptureTimestamp( - triton::client::RequestTimers::Kind::REQUEST_END); - UpdateInferStat(request->timer_); - - // Send final response on request completion - // if it has not already been sent. - // (e.g. in the case of seeing [DONE] in streaming case) - if (!request->IsFinalResponseSent()) { - request->SendResponse(true /* is_final */, false /* is_null */); - } - }; - std::unique_ptr request(new ChatCompletionRequest( - std::move(completion_callback), std::move(callback), request_id, - verbose_)); - auto raw_request = static_cast(request.get()); - raw_request->timer_.CaptureTimestamp( - triton::client::RequestTimers::Kind::REQUEST_START); - request->AddInput( - reinterpret_cast(serialized_request_body.data()), - serialized_request_body.size()); - - CURL* multi_easy_handle = curl_easy_init(); - Error err = PreRunProcessing(multi_easy_handle, raw_request, headers); - if (!err.IsOk()) { - curl_easy_cleanup(multi_easy_handle); - return err; - } - - raw_request->timer_.CaptureTimestamp( - triton::client::RequestTimers::Kind::SEND_START); - Send(multi_easy_handle, std::move(request)); - return Error::Success; -} - -Error -ChatCompletionClient::PreRunProcessing( - CURL* curl, ChatCompletionRequest* request, const Headers& headers) -{ - curl_easy_setopt(curl, CURLOPT_URL, url_.c_str()); - curl_easy_setopt(curl, CURLOPT_USERAGENT, "libcurl-agent/1.0"); - curl_easy_setopt(curl, CURLOPT_POST, 1L); - curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1L); - - if (verbose_) { - curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); - } - - const long buffer_byte_size = 16 * 1024 * 1024; - curl_easy_setopt(curl, CURLOPT_UPLOAD_BUFFERSIZE, buffer_byte_size); - curl_easy_setopt(curl, CURLOPT_BUFFERSIZE, buffer_byte_size); - - // request data provided by RequestProvider() - curl_easy_setopt(curl, CURLOPT_READFUNCTION, RequestProvider); - curl_easy_setopt(curl, CURLOPT_READDATA, request); - - // response headers handled by ResponseHeaderHandler() - curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, ResponseHeaderHandler); - curl_easy_setopt(curl, CURLOPT_HEADERDATA, request); - - // response data handled by ResponseHandler() - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, ResponseHandler); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, request); - - const curl_off_t post_byte_size = request->total_input_byte_size_; - curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE_LARGE, post_byte_size); - - SetSSLCurlOptions(curl); - - struct curl_slist* list = nullptr; - list = curl_slist_append(list, "Expect:"); - list = curl_slist_append(list, "Content-Type: application/json"); - - for (const auto& pr : headers) { - std::string hdr = pr.first + ": " + pr.second; - list = curl_slist_append(list, hdr.c_str()); - } - - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, list); - - // The list will be freed when the request is destructed - request->header_list_ = list; - - return Error::Success; -} - -Error -ChatCompletionClient::UpdateInferStat( - const triton::client::RequestTimers& timer) -{ - const uint64_t request_time_ns = timer.Duration( - triton::client::RequestTimers::Kind::REQUEST_START, - triton::client::RequestTimers::Kind::REQUEST_END); - const uint64_t send_time_ns = timer.Duration( - triton::client::RequestTimers::Kind::SEND_START, - triton::client::RequestTimers::Kind::SEND_END); - const uint64_t recv_time_ns = timer.Duration( - triton::client::RequestTimers::Kind::RECV_START, - triton::client::RequestTimers::Kind::RECV_END); - - if ((request_time_ns == std::numeric_limits::max()) || - (send_time_ns == std::numeric_limits::max()) || - (recv_time_ns == std::numeric_limits::max())) { - return Error( - "Timer not set correctly." + - ((timer.Timestamp(triton::client::RequestTimers::Kind::REQUEST_START) > - timer.Timestamp(triton::client::RequestTimers::Kind::REQUEST_END)) - ? (" Request time from " + - std::to_string(timer.Timestamp( - triton::client::RequestTimers::Kind::REQUEST_START)) + - " to " + - std::to_string(timer.Timestamp( - triton::client::RequestTimers::Kind::REQUEST_END)) + - ".") - : "") + - ((timer.Timestamp(triton::client::RequestTimers::Kind::SEND_START) > - timer.Timestamp(triton::client::RequestTimers::Kind::SEND_END)) - ? (" Send time from " + - std::to_string(timer.Timestamp( - triton::client::RequestTimers::Kind::SEND_START)) + - " to " + - std::to_string(timer.Timestamp( - triton::client::RequestTimers::Kind::SEND_END)) + - ".") - : "") + - ((timer.Timestamp(triton::client::RequestTimers::Kind::RECV_START) > - timer.Timestamp(triton::client::RequestTimers::Kind::RECV_END)) - ? (" Receive time from " + - std::to_string(timer.Timestamp( - triton::client::RequestTimers::Kind::RECV_START)) + - " to " + - std::to_string(timer.Timestamp( - triton::client::RequestTimers::Kind::RECV_END)) + - ".") - : "")); - } - - infer_stat_.completed_request_count++; - infer_stat_.cumulative_total_request_time_ns += request_time_ns; - infer_stat_.cumulative_send_time_ns += send_time_ns; - infer_stat_.cumulative_receive_time_ns += recv_time_ns; - - return Error::Success; -} - -//============================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/openai/openai_client.h b/src/c++/perf_analyzer/client_backend/openai/openai_client.h deleted file mode 100644 index 00ccbd5fa..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/openai_client.h +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include - -#include "../client_backend.h" -#include "common.h" -#include "http_client.h" - - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - -class ChatCompletionResult : public InferResult { - public: - ChatCompletionResult( - uint32_t http_code, std::string&& serialized_response, bool is_final, - bool is_null, const std::string& request_id) - : http_code_(http_code), - serialized_response_(std::move(serialized_response)), - is_final_(is_final), is_null_(is_null), request_id_(request_id) - { - } - virtual ~ChatCompletionResult() = default; - - /// Get the id of the request which generated this response. - /// \param id Returns the request id that generated the result. - /// \return Error object indicating success or failure. - Error Id(std::string* id) const override - { - *id = request_id_; - return Error::Success; - } - - - /// Returns the status of the request. - /// \return Error object indicating the success or failure of the - /// request. - Error RequestStatus() const override - { - if ((http_code_ >= 400) && (http_code_ <= 599)) { - return Error( - "OpenAI response returns HTTP code " + std::to_string(http_code_)); - } - return Error::Success; - } - - /// Returns the raw data of the output. - /// \return Error object indicating the success or failure of the - /// request. - Error RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const override - { - // There is only a single output (and it has no defined name), so we can - // disregard output_name - *buf = reinterpret_cast(serialized_response_.c_str()); - *byte_size = serialized_response_.size(); - return Error::Success; - } - - /// Get final response bool for this response. - /// \return Error object indicating the success or failure. - Error IsFinalResponse(bool* is_final_response) const override - { - *is_final_response = is_final_; - return Error::Success; - }; - - /// Get null response bool for this response. - /// \return Error object indicating the success or failure. - Error IsNullResponse(bool* is_null_response) const override - { - *is_null_response = is_null_; - return Error::Success; - }; - - private: - const uint32_t http_code_{200}; - const std::string serialized_response_; - const bool is_final_{false}; - const bool is_null_{false}; - const std::string request_id_; -}; - - -class ChatCompletionRequest : public HttpRequest { - public: - virtual ~ChatCompletionRequest() {} - ChatCompletionRequest( - std::function&& completion_callback, - std::function&& response_callback, - const std::string& request_id, const bool verbose = false) - : HttpRequest(std::move(completion_callback), verbose), - response_callback_(std::move(response_callback)), - request_id_(request_id) - { - } - bool IsFinalResponseSent() { return final_response_sent_; }; - void SendResponse(bool is_final, bool is_null); - bool is_stream_{false}; - std::function response_callback_{nullptr}; - // The timers for infer request. - triton::client::RequestTimers timer_; - const std::string request_id_; - bool final_response_sent_{false}; -}; - -class ChatCompletionClient : public HttpClient { - public: - virtual ~ChatCompletionClient() = default; - - /// Create a client that can be used to communicate with the server. - /// \param server_url The inference server name, port, optional - /// scheme and optional base path in the following format: - /// host:port/. - /// \param endpoint The name of the endpoint to send requests to - /// \param verbose If true generate verbose output when contacting - /// the inference server. - /// \param ssl_options Specifies the settings for configuring - /// SSL encryption and authorization. Providing these options - /// do not ensure that SSL/TLS will be used in communication. - /// The use of SSL/TLS depends entirely on the server endpoint. - /// These options will be ignored if the server_url does not - /// expose `https://` scheme. - ChatCompletionClient( - const std::string& server_url, const std::string& endpoint, - bool verbose = false, - const HttpSslOptions& ssl_options = HttpSslOptions()); - - /// Simplified AsyncInfer() where the request body is expected to be - /// prepared by the caller, the client here is responsible to communicate - /// with a OpenAI-compatible server in both streaming and non-streaming case. - Error AsyncInfer( - std::function callback, - std::string& serialized_request_body, const std::string& request_id, - const Headers& headers); - - const InferStat& ClientInferStat() { return infer_stat_; } - - private: - // setup curl handle - Error PreRunProcessing( - CURL* curl, ChatCompletionRequest* request, const Headers& headers); - - static size_t ResponseHandler( - void* contents, size_t size, size_t nmemb, void* userp); - static size_t RequestProvider( - void* contents, size_t size, size_t nmemb, void* userp); - static size_t ResponseHeaderHandler( - void* contents, size_t size, size_t nmemb, void* userp); - - Error UpdateInferStat(const triton::client::RequestTimers& timer); - InferStat infer_stat_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/openai/openai_client_backend.cc b/src/c++/perf_analyzer/client_backend/openai/openai_client_backend.cc deleted file mode 100644 index 15bbbdc68..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/openai_client_backend.cc +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "openai_client_backend.h" - -#include "openai_infer_input.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - -//============================================================================== - -Error -OpenAiClientBackend::Create( - const std::string& url, const std::string& endpoint, - const ProtocolType protocol, std::shared_ptr http_headers, - const bool verbose, std::unique_ptr* client_backend) -{ - if (protocol == ProtocolType::GRPC) { - return Error( - "perf_analyzer does not support gRPC protocol with OpenAI endpoints"); - } - std::unique_ptr openai_client_backend( - new OpenAiClientBackend(http_headers)); - - openai_client_backend->http_client_.reset( - new ChatCompletionClient(url, endpoint, verbose)); - - *client_backend = std::move(openai_client_backend); - - return Error::Success; -} - -Error -OpenAiClientBackend::AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - if (inputs.size() != 1) { - return Error("Only expecting one input"); - } - - auto raw_input = dynamic_cast(inputs[0]); - raw_input->PrepareForRequest(); - RETURN_IF_CB_ERROR(http_client_->AsyncInfer( - callback, raw_input->GetRequestBody(), options.request_id_, - *http_headers_)); - return Error::Success; -} - - -Error -OpenAiClientBackend::ClientInferStat(InferStat* infer_stat) -{ - *infer_stat = http_client_->ClientInferStat(); - return Error::Success; -} - -//============================================================================== - -Error -OpenAiInferRequestedOutput::Create( - InferRequestedOutput** infer_output, const std::string& name, - const std::string& datatype) -{ - OpenAiInferRequestedOutput* local_infer_output = - new OpenAiInferRequestedOutput(name, datatype); - - tc::InferRequestedOutput* openai_infer_output; - RETURN_IF_TRITON_ERROR(tc::InferRequestedOutput::Create( - &openai_infer_output, name, 0, datatype)); - local_infer_output->output_.reset(openai_infer_output); - - *infer_output = local_infer_output; - - return Error::Success; -} - -OpenAiInferRequestedOutput::OpenAiInferRequestedOutput( - const std::string& name, const std::string& datatype) - : InferRequestedOutput(BackendKind::OPENAI, name, datatype) -{ -} - -//============================================================================== - - -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/openai/openai_client_backend.h b/src/c++/perf_analyzer/client_backend/openai/openai_client_backend.h deleted file mode 100644 index 2d475eacf..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/openai_client_backend.h +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../../perf_utils.h" -#include "../client_backend.h" -#include "openai_client.h" -#include "openai_infer_input.h" - -#define RETURN_IF_TRITON_ERROR(S) \ - do { \ - const tc::Error& status__ = (S); \ - if (!status__.IsOk()) { \ - return Error(status__.Message()); \ - } \ - } while (false) - -namespace tc = triton::client; -namespace cb = triton::perfanalyzer::clientbackend; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - - -//============================================================================== -/// OpenAiClientBackend is used to generate load on the serving instance, -/// which supports OpenAI Chat Completions API -/// -class OpenAiClientBackend : public ClientBackend { - public: - /// Create an OpenAI client backend which can be used to interact with the - /// server. - /// \param url The inference server url and port. - /// \param endpoint The endpoint on the inference server to send requests to - /// \param protocol The protocol type used. - /// \param http_headers Map of HTTP headers. The map key/value indicates - /// the header name/value. - /// \param verbose Enables the verbose mode. - /// \param client_backend Returns a new OpenAiClientBackend - /// object. - /// \return Error object indicating success or failure. - static Error Create( - const std::string& url, const std::string& endpoint, - const ProtocolType protocol, std::shared_ptr http_headers, - const bool verbose, std::unique_ptr* client_backend); - - /// See ClientBackend::AsyncInfer() - Error AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::ClientInferStat() - Error ClientInferStat(InferStat* infer_stat) override; - - private: - OpenAiClientBackend(std::shared_ptr http_headers) - : ClientBackend(BackendKind::OPENAI), http_headers_(http_headers) - { - } - - std::unique_ptr http_client_; - std::shared_ptr http_headers_; -}; - -//============================================================== -/// OpenAiInferRequestedOutput is a wrapper around -/// InferRequestedOutput object of triton common client library. -/// -class OpenAiInferRequestedOutput : public InferRequestedOutput { - public: - static Error Create( - InferRequestedOutput** infer_output, const std::string& name, - const std::string& datatype); - /// Returns the raw InferRequestedOutput object required by OpenAi client - /// library. - tc::InferRequestedOutput* Get() const { return output_.get(); } - - private: - explicit OpenAiInferRequestedOutput( - const std::string& name, const std::string& datatype); - - std::unique_ptr output_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/openai/openai_infer_input.cc b/src/c++/perf_analyzer/client_backend/openai/openai_infer_input.cc deleted file mode 100644 index dcf213fc2..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/openai_infer_input.cc +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "openai_infer_input.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - -Error -OpenAiInferInput::Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype) -{ - OpenAiInferInput* local_infer_input = - new OpenAiInferInput(name, dims, datatype); - - *infer_input = local_infer_input; - return Error::Success; -} - -Error -OpenAiInferInput::SetShape(const std::vector& shape) -{ - shape_ = shape; - return Error::Success; -} - -Error -OpenAiInferInput::Reset() -{ - data_str_.clear(); - - bufs_.clear(); - buf_byte_sizes_.clear(); - byte_size_ = 0; - return Error::Success; -} - -Error -OpenAiInferInput::AppendRaw(const uint8_t* input, size_t input_byte_size) -{ - data_str_.clear(); - - byte_size_ += input_byte_size; - - bufs_.push_back(input); - buf_byte_sizes_.push_back(input_byte_size); - return Error::Success; -} - -Error -OpenAiInferInput::RawData(const uint8_t** buf, size_t* byte_size) -{ - // TMA-1775 - handle multi-batch case - *buf = bufs_[0]; - *byte_size = buf_byte_sizes_[0]; - return Error::Success; -} - -Error -OpenAiInferInput::PrepareForRequest() -{ - // Reset position so request sends entire input. - if (data_str_.empty() && (byte_size_ != 0)) { - for (size_t i = 0; i < bufs_.size(); ++i) { - data_str_.append( - reinterpret_cast(bufs_[i]), buf_byte_sizes_[i]); - } - } - return Error::Success; -} - -OpenAiInferInput::OpenAiInferInput( - const std::string& name, const std::vector& dims, - const std::string& datatype) - : InferInput(BackendKind::OPENAI, name, datatype), shape_(dims) -{ -} - -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/openai/openai_infer_input.h b/src/c++/perf_analyzer/client_backend/openai/openai_infer_input.h deleted file mode 100644 index 93a12b519..000000000 --- a/src/c++/perf_analyzer/client_backend/openai/openai_infer_input.h +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../../perf_utils.h" -#include "../client_backend.h" - - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace openai { - -//============================================================== -/// OpenAiInferInput instance holds the information regarding -/// model input tensors and their corresponding generated data. -/// -class OpenAiInferInput : public InferInput { - public: - static Error Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype); - /// See InferInput::Shape() - const std::vector& Shape() const override { return shape_; } - /// See InferInput::SetShape() - Error SetShape(const std::vector& shape) override; - /// See InferInput::Reset() - Error Reset() override; - /// See InferInput::AppendRaw() - Error AppendRaw(const uint8_t* input, size_t input_byte_size) override; - /// See InferInput::RawData() - Error RawData(const uint8_t** buf, size_t* byte_size) override; - /// Prepare the input to be in the form expected by an OpenAI client, - /// must call before accessing the data. - Error PrepareForRequest(); - /// Get the contiguous request body string - std::string& GetRequestBody() { return data_str_; } - - private: - explicit OpenAiInferInput( - const std::string& name, const std::vector& dims, - const std::string& datatype); - - std::vector shape_; - size_t byte_size_{0}; - - std::vector bufs_; - std::vector buf_byte_sizes_; - std::string data_str_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::openai diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/CMakeLists.txt b/src/c++/perf_analyzer/client_backend/tensorflow_serving/CMakeLists.txt deleted file mode 100644 index ba1c2fa40..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/CMakeLists.txt +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -cmake_minimum_required (VERSION 3.18) - -FetchContent_Declare(tensorflow-serving-repo - PREFIX tensorflow-serving-rep -) -FetchContent_GetProperties(tensorflow-serving-repo) -if(NOT tensorflow-serving-repo_POPULATED) - FetchContent_Populate(tensorflow-serving-repo - GIT_REPOSITORY "https://github.com/tensorflow/serving.git" - GIT_TAG "2.3.0" - SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/tensorflow-serving-repo/src/tensorflow_serving" -) -endif() - -FetchContent_Declare(tensorflow-repo - PREFIX tensorflow-repo - SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/tensorflow-repo/src/tensorflow" -) -FetchContent_GetProperties(tensorflow-repo) -if(NOT tensorflow-repo_POPULATED) - FetchContent_Populate(tensorflow-repo - GIT_REPOSITORY "https://github.com/tensorflow/tensorflow.git" - GIT_TAG "v2.3.0" - SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/tensorflow-repo/src/tensorflow" -) -endif() - - -set(TENSORFLOW_PATH ${CMAKE_CURRENT_BINARY_DIR}/tensorflow-repo/src/tensorflow) -set(TFSERVE_PATH ${CMAKE_CURRENT_BINARY_DIR}/tensorflow-serving-repo/src/tensorflow_serving) - -# Copy the repos to a proto staging area. -file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/protos) -execute_process(COMMAND ${CMAKE_COMMAND} -E copy_directory ${TENSORFLOW_PATH}/tensorflow - ${CMAKE_BINARY_DIR}/protos/tensorflow) -execute_process(COMMAND ${CMAKE_COMMAND} -E copy_directory ${TFSERVE_PATH}/tensorflow_serving - ${CMAKE_BINARY_DIR}/protos/tensorflow_serving) - -# Protobuf compiler dependency. -include(CompileProto.cmake) - -# Protobuf sources of the TensorFlow Serving to be compiled without a gRPC plugin. -file(GLOB_RECURSE TFSERVING_PROTOS ${CMAKE_BINARY_DIR}/protos/tensorflow_serving/*.proto) -file(GLOB TF_EXAMPLE_PROTOS ${CMAKE_BINARY_DIR}/protos/tensorflow/core/example/*.proto) -file(GLOB TF_FW_PROTOS ${CMAKE_BINARY_DIR}/protos/tensorflow/core/framework/*.proto) -file(GLOB TF_PROTOBUF_PROTOS ${CMAKE_BINARY_DIR}/protos/tensorflow/core/protobuf/*.proto) - -# This is a dirty hack to prevent unnecessary leaking dependency -list(FILTER TF_PROTOBUF_PROTOS EXCLUDE REGEX "autotuning.proto$|conv_autotuning.proto$") - -# Compiling CPP sources from proto files. -compile_proto(0 "${CMAKE_BINARY_DIR}/protos" "${CMAKE_CURRENT_BINARY_DIR}/compiled" PB_SOURCES PB_HEADERS - ${TFSERVING_PROTOS} ${TF_EXAMPLE_PROTOS} ${TF_FW_PROTOS} ${TF_PROTOBUF_PROTOS}) - -# Compiling CPP sources with gRPC plugin. -compile_proto(1 "${CMAKE_BINARY_DIR}/protos" "${CMAKE_CURRENT_BINARY_DIR}/compiled" PB_GRPC_SOURCES PB_GRPC_HEADERS - ${CMAKE_BINARY_DIR}/protos/tensorflow_serving/apis/prediction_service.proto) - -set( - TFS_CLIENT_BACKEND_SRCS - tfserve_client_backend.cc - tfserve_infer_input.cc - tfserve_grpc_client.cc - ${PB_SOURCES} - ${PB_GRPC_SOURCES} -) - -set( - TFS_CLIENT_BACKEND_HDRS - tfserve_client_backend.h - tfserve_infer_input.h - tfserve_grpc_client.h - ${PB_HEADERS} - ${PB_GRPC_HEADERS} -) - -add_library( - tfs-client-backend-library EXCLUDE_FROM_ALL OBJECT - ${TFS_CLIENT_BACKEND_SRCS} - ${TFS_CLIENT_BACKEND_HDRS} -) - -target_include_directories(tfs-client-backend-library PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/compiled) - -target_link_libraries( - tfs-client-backend-library - PUBLIC gRPC::grpc++ - PUBLIC gRPC::grpc - PUBLIC protobuf::libprotobuf - PUBLIC grpcclient_static -) - -if(${TRITON_ENABLE_GPU}) - target_include_directories(tfs-client-backend-library PUBLIC ${CUDA_INCLUDE_DIRS}) - target_link_libraries(tfs-client-backend-library PRIVATE ${CUDA_LIBRARIES}) -endif() # TRITON_ENABLE_GPU diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/CompileProto.cmake b/src/c++/perf_analyzer/client_backend/tensorflow_serving/CompileProto.cmake deleted file mode 100644 index 79de28e4e..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/CompileProto.cmake +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# A function that creates CPP sources from proto files. -function(COMPILE_PROTO USE_GRPC PROTO_PATH OUT_PATH SRCS HDRS) - - # Checking args. - if(NOT ARGN) - message(SEND_ERROR "Error: COMPILE_PROTO() called without any proto files") - return() - endif() - - # To collect paths to created sources and headers. - set(${SRCS}) - set(${HDRS}) - - - - # Getting actual absolute paths to all protos location and output directory. - get_filename_component(ABS_PROTO_PATH "${PROTO_PATH}" ABSOLUTE) - get_filename_component(ABS_OUT_PATH "${OUT_PATH}" ABSOLUTE) - - # Launching sources generation for all proto files. - foreach(FIL ${ARGN}) - - # Getting the absolute path and filename without extension for the current proto file. - get_filename_component(ABS_FIL "${FIL}" ABSOLUTE) - get_filename_component(FIL_WE "${FIL}" NAME_WE) - - # Getting the relative dir of the proto file (relative to the protos root dir). - file(RELATIVE_PATH REL_FIL_TO_PROTO "${ABS_PROTO_PATH}" "${ABS_FIL}") - get_filename_component(REL_DIR_TO_PROTO "${REL_FIL_TO_PROTO}" DIRECTORY) - - # Preparing a path to label created sources from proto. - set(COMPILED_NAME_TEMPLATE "${ABS_OUT_PATH}/${REL_DIR_TO_PROTO}/${FIL_WE}") - - - - # Firing sources generation command with gRPC application. - if(${USE_GRPC}) - set(_GRPC_CPP_PLUGIN_EXECUTABLE $) - - # Marking created files for CMake. - list(APPEND ${SRCS} "${COMPILED_NAME_TEMPLATE}.grpc.pb.cc") - list(APPEND ${HDRS} "${COMPILED_NAME_TEMPLATE}.grpc.pb.h") - - # Launching proto compilation command. - add_custom_command( - COMMAND ${CMAKE_COMMAND} -E make_directory "${ABS_OUT_PATH}" - OUTPUT - "${COMPILED_NAME_TEMPLATE}.grpc.pb.cc" - "${COMPILED_NAME_TEMPLATE}.grpc.pb.h" - COMMAND - ${Protobuf_PROTOC_EXECUTABLE} - ARGS - --grpc_out=${ABS_OUT_PATH} - --plugin=protoc-gen-grpc=${_GRPC_CPP_PLUGIN_EXECUTABLE} - --proto_path=${ABS_PROTO_PATH} - ${ABS_FIL} - DEPENDS - ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} - COMMENT - "Running gRPC C++ protocol buffer compiler on ${FIL}" - VERBATIM) - - # Without gRPC. - else() - list(APPEND ${SRCS} "${COMPILED_NAME_TEMPLATE}.pb.cc") - list(APPEND ${HDRS} "${COMPILED_NAME_TEMPLATE}.pb.h") - add_custom_command( - COMMAND ${CMAKE_COMMAND} -E make_directory "${ABS_OUT_PATH}" - OUTPUT - "${COMPILED_NAME_TEMPLATE}.pb.cc" - "${COMPILED_NAME_TEMPLATE}.pb.h" - COMMAND - ${Protobuf_PROTOC_EXECUTABLE} - ARGS - --cpp_out=${ABS_OUT_PATH} - --proto_path=${ABS_PROTO_PATH} - ${ABS_FIL} - DEPENDS - ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} - COMMENT - "Running C++ protocol buffer compiler on ${FIL}" - VERBATIM) - endif() - endforeach() - - # Returning generated sources list. - set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE) - set(${SRCS} ${${SRCS}} PARENT_SCOPE) - set(${HDRS} ${${HDRS}} PARENT_SCOPE) -endfunction() diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.cc b/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.cc deleted file mode 100644 index 1fde3e5a8..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.cc +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "tfserve_client_backend.h" - -#include "json_utils.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tfserving { - -//============================================================================== - -Error -TFServeClientBackend::Create( - const std::string& url, const ProtocolType protocol, - const grpc_compression_algorithm compression_algorithm, - std::shared_ptr http_headers, const bool verbose, - std::unique_ptr* client_backend) -{ - if (protocol == ProtocolType::HTTP) { - return Error( - "perf_analyzer does not support http protocol with TF serving"); - } - std::unique_ptr tfserve_client_backend( - new TFServeClientBackend(compression_algorithm, http_headers)); - - RETURN_IF_CB_ERROR(GrpcClient::Create( - &(tfserve_client_backend->grpc_client_), url, verbose)); - - *client_backend = std::move(tfserve_client_backend); - - return Error::Success; -} - -Error -TFServeClientBackend::ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version) -{ - tensorflow::serving::GetModelMetadataResponse metadata_proto; - RETURN_IF_CB_ERROR(grpc_client_->ModelMetadata( - &metadata_proto, model_name, model_version, *http_headers_)); - - std::string metadata; - ::google::protobuf::util::JsonPrintOptions options; - options.preserve_proto_field_names = true; - options.always_print_primitive_fields = true; - ::google::protobuf::util::MessageToJsonString( - metadata_proto, &metadata, options); - - RETURN_IF_TRITON_ERROR(tc::ParseJson(model_metadata, metadata)); - - return Error::Success; -} - -Error -TFServeClientBackend::Infer( - cb::InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - tfs::InferResult* tfserve_result; - RETURN_IF_CB_ERROR(grpc_client_->Infer( - &tfserve_result, options, inputs, outputs, *http_headers_, - compression_algorithm_)); - - *result = new TFServeInferResult(tfserve_result); - - return Error::Success; -} - -Error -TFServeClientBackend::AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - auto wrapped_callback = [callback](tfs::InferResult* client_result) { - cb::InferResult* result = new TFServeInferResult(client_result); - callback(result); - }; - - RETURN_IF_CB_ERROR(grpc_client_->AsyncInfer( - wrapped_callback, options, inputs, outputs, *http_headers_, - compression_algorithm_)); - - return Error::Success; -} - - -Error -TFServeClientBackend::ClientInferStat(InferStat* infer_stat) -{ - // Reusing the common library utilities to collect and report the - // client side statistics. - tc::InferStat client_infer_stat; - - RETURN_IF_TRITON_ERROR(grpc_client_->ClientInferStat(&client_infer_stat)); - - ParseInferStat(client_infer_stat, infer_stat); - - return Error::Success; -} - -void -TFServeClientBackend::ParseInferStat( - const tc::InferStat& tfserve_infer_stat, InferStat* infer_stat) -{ - infer_stat->completed_request_count = - tfserve_infer_stat.completed_request_count; - infer_stat->cumulative_total_request_time_ns = - tfserve_infer_stat.cumulative_total_request_time_ns; - infer_stat->cumulative_send_time_ns = - tfserve_infer_stat.cumulative_send_time_ns; - infer_stat->cumulative_receive_time_ns = - tfserve_infer_stat.cumulative_receive_time_ns; -} - -//============================================================================== - -Error -TFServeInferRequestedOutput::Create( - InferRequestedOutput** infer_output, const std::string& name) -{ - TFServeInferRequestedOutput* local_infer_output = - new TFServeInferRequestedOutput(name); - - tc::InferRequestedOutput* tfserve_infer_output; - RETURN_IF_TRITON_ERROR( - tc::InferRequestedOutput::Create(&tfserve_infer_output, name)); - local_infer_output->output_.reset(tfserve_infer_output); - - *infer_output = local_infer_output; - - return Error::Success; -} - -TFServeInferRequestedOutput::TFServeInferRequestedOutput( - const std::string& name) - : InferRequestedOutput(BackendKind::TENSORFLOW_SERVING, name) -{ -} - -//============================================================================== - -TFServeInferResult::TFServeInferResult(tfs::InferResult* result) -{ - result_.reset(result); -} - -Error -TFServeInferResult::Id(std::string* id) const -{ - id->clear(); - return Error::Success; -} - -Error -TFServeInferResult::RequestStatus() const -{ - RETURN_IF_CB_ERROR(result_->RequestStatus()); - return Error::Success; -} - -Error -TFServeInferResult::RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const -{ - return Error( - "Output retrieval is not currently supported for TFS client backend"); -} - -//============================================================================== - - -}}}} // namespace triton::perfanalyzer::clientbackend::tfserving diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.h b/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.h deleted file mode 100644 index bd6b5db8b..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_client_backend.h +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../../perf_utils.h" -#include "../client_backend.h" -#include "tfserve_grpc_client.h" - -#define RETURN_IF_TRITON_ERROR(S) \ - do { \ - const tc::Error& status__ = (S); \ - if (!status__.IsOk()) { \ - return Error(status__.Message()); \ - } \ - } while (false) - -namespace tc = triton::client; -namespace cb = triton::perfanalyzer::clientbackend; -namespace tfs = triton::perfanalyzer::clientbackend::tfserving; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tfserving { - - -//============================================================================== -/// TFServeClientBackend is used to generate load on the TF serving instance -/// -class TFServeClientBackend : public ClientBackend { - public: - /// Create a TFserving client backend which can be used to interact with the - /// server. - /// \param url The inference server url and port. - /// \param protocol The protocol type used. - /// \param compression_algorithm The compression algorithm to be used - /// on the grpc requests. - /// \param http_headers Map of HTTP headers. The map key/value indicates - /// the header name/value. - /// \param verbose Enables the verbose mode. - /// \param client_backend Returns a new TFServeClientBackend - /// object. - /// \return Error object indicating success or failure. - static Error Create( - const std::string& url, const ProtocolType protocol, - const grpc_compression_algorithm compression_algorithm, - std::shared_ptr http_headers, const bool verbose, - std::unique_ptr* client_backend); - - /// See ClientBackend::ModelMetadata() - Error ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version) override; - - /// See ClientBackend::Infer() - Error Infer( - cb::InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::AsyncInfer() - Error AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::ClientInferStat() - Error ClientInferStat(InferStat* infer_stat) override; - - private: - TFServeClientBackend( - const grpc_compression_algorithm compression_algorithm, - std::shared_ptr http_headers) - : ClientBackend(BackendKind::TENSORFLOW_SERVING), - compression_algorithm_(compression_algorithm), - http_headers_(http_headers) - { - } - - void ParseInferStat( - const tc::InferStat& tfserve_infer_stat, InferStat* infer_stat); - - std::unique_ptr grpc_client_; - - grpc_compression_algorithm compression_algorithm_; - std::shared_ptr http_headers_; -}; - -//============================================================== -/// TFServeInferRequestedOutput is a wrapper around -/// InferRequestedOutput object of triton common client library. -/// -class TFServeInferRequestedOutput : public InferRequestedOutput { - public: - static Error Create( - InferRequestedOutput** infer_output, const std::string& name); - /// Returns the raw InferRequestedOutput object required by TFserving client - /// library. - tc::InferRequestedOutput* Get() const { return output_.get(); } - - private: - explicit TFServeInferRequestedOutput(const std::string& name); - - std::unique_ptr output_; -}; - -//============================================================== -/// TFServeInferResult is a wrapper around InferResult object of -/// TF serving InferResult object. -/// -class TFServeInferResult : public cb::InferResult { - public: - explicit TFServeInferResult(tfs::InferResult* result); - /// See InferResult::Id() - Error Id(std::string* id) const override; - /// See InferResult::RequestStatus() - Error RequestStatus() const override; - /// See InferResult::RawData() - Error RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const override; - - private: - std::unique_ptr result_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::tfserving diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.cc b/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.cc deleted file mode 100644 index f53e4d179..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.cc +++ /dev/null @@ -1,729 +0,0 @@ -// Copyright 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "tfserve_grpc_client.h" - -#include -#include -#include -#include -#include -#include - -#include "tfserve_client_backend.h" - -/// Type alias for string-TensorProto map. -typedef google::protobuf::Map - StringKeyedProtos; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tfserving { - -namespace { - -// Use map to keep track of GRPC channels. : -// If context is created on url that has established Channel, then reuse it. -std::map> grpc_channel_map_; -std::mutex grpc_channel_map_mtx_; - -void -GetTensorFlowDataType(const std::string& datatype, tensorflow::DataType* dtype) -{ - if (datatype == "FP16") { - *dtype = tensorflow::DataType::DT_HALF; - } else if (datatype == "BF16") { - *dtype = tensorflow::DataType::DT_BFLOAT16; - } else if (datatype == "FP32") { - *dtype = tensorflow::DataType::DT_FLOAT; - } else if (datatype == "FP64") { - *dtype = tensorflow::DataType::DT_DOUBLE; - } else if (datatype == "INT32") { - *dtype = tensorflow::DataType::DT_INT32; - } else if (datatype == "INT16") { - *dtype = tensorflow::DataType::DT_INT16; - } else if (datatype == "UINT16") { - *dtype = tensorflow::DataType::DT_UINT16; - } else if (datatype == "INT8") { - *dtype = tensorflow::DataType::DT_INT8; - } else if (datatype == "UINT8") { - *dtype = tensorflow::DataType::DT_UINT8; - } else if (datatype == "BYTES") { - *dtype = tensorflow::DataType::DT_STRING; - } else if (datatype == "INT64") { - *dtype = tensorflow::DataType::DT_INT64; - } else if (datatype == "BOOL") { - *dtype = tensorflow::DataType::DT_BOOL; - } else if (datatype == "UINT32") { - *dtype = tensorflow::DataType::DT_UINT32; - } else if (datatype == "UINT64") { - *dtype = tensorflow::DataType::DT_UINT64; - } else { - *dtype = tensorflow::DT_INVALID; - } -} - -void -ReadFile(const std::string& filename, std::string& data) -{ - data.clear(); - if (!filename.empty()) { - std::ifstream file(filename.c_str(), std::ios::in); - if (file.is_open()) { - std::stringstream ss; - ss << file.rdbuf(); - file.close(); - data = ss.str(); - } - } -} - -std::shared_ptr -GetChannel(const std::string& url, bool use_ssl, const SslOptions& ssl_options) -{ - std::lock_guard lock(grpc_channel_map_mtx_); - - const auto& channel_itr = grpc_channel_map_.find(url); - if (channel_itr != grpc_channel_map_.end()) { - return channel_itr->second; - } else { - grpc::ChannelArguments arguments; - arguments.SetMaxSendMessageSize(tc::MAX_GRPC_MESSAGE_SIZE); - arguments.SetMaxReceiveMessageSize(tc::MAX_GRPC_MESSAGE_SIZE); - std::shared_ptr credentials; - if (use_ssl) { - std::string root; - std::string key; - std::string cert; - ReadFile(ssl_options.root_certificates, root); - ReadFile(ssl_options.private_key, key); - ReadFile(ssl_options.certificate_chain, cert); - grpc::SslCredentialsOptions opts = {root, key, cert}; - credentials = grpc::SslCredentials(opts); - } else { - credentials = grpc::InsecureChannelCredentials(); - } - std::shared_ptr channel = - grpc::CreateCustomChannel(url, credentials, arguments); - grpc_channel_map_.insert(std::make_pair(url, channel)); - return channel; - } -} - -} // namespace - -//============================================================================== -// An GrpcInferRequest represents an inflght inference request on gRPC. -// -class GrpcInferRequest { - public: - GrpcInferRequest(TFServeOnCompleteFn callback = nullptr) - : callback_(callback), grpc_status_(), - grpc_response_(std::make_shared()) - { - } - - tc::RequestTimers& Timer() { return timer_; } - friend GrpcClient; - - private: - TFServeOnCompleteFn callback_; - // Variables for GRPC call - grpc::ClientContext grpc_context_; - grpc::Status grpc_status_; - std::shared_ptr grpc_response_; - // The timers for infer request. - tc::RequestTimers timer_; -}; - -//============================================================================== - -Error -GrpcClient::Create( - std::unique_ptr* client, const std::string& server_url, - bool verbose, bool use_ssl, const SslOptions& ssl_options) -{ - client->reset(new GrpcClient(server_url, verbose, use_ssl, ssl_options)); - return Error::Success; -} - -Error -GrpcClient::ModelMetadata( - tensorflow::serving::GetModelMetadataResponse* model_metadata, - const std::string& model_name, const std::string& model_version, - const Headers& headers) -{ - model_metadata->Clear(); - Error err; - - tensorflow::serving::GetModelMetadataRequest request; - grpc::ClientContext context; - - for (const auto& it : headers) { - context.AddMetadata(it.first, it.second); - } - - request.mutable_model_spec()->set_name(model_name); - if (!model_version.empty()) { - request.mutable_model_spec()->set_version_label(model_version); - } - request.add_metadata_field("signature_def"); - grpc::Status grpc_status = - stub_->GetModelMetadata(&context, request, model_metadata); - if (grpc_status.ok()) { - if (verbose_) { - std::cout << model_metadata->DebugString() << std::endl; - } - } else { - err = Error(grpc_status.error_message()); - } - - return err; -} - -Error -GrpcClient::Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs, - const Headers& headers, - const grpc_compression_algorithm compression_algorithm) -{ - Error err; - - grpc::ClientContext context; - - std::shared_ptr sync_request(new GrpcInferRequest()); - - sync_request->Timer().Reset(); - sync_request->Timer().CaptureTimestamp( - tc::RequestTimers::Kind::REQUEST_START); - // Use send timer to measure time for marshalling infer request - sync_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::SEND_START); - for (const auto& it : headers) { - context.AddMetadata(it.first, it.second); - } - context.set_compression_algorithm(compression_algorithm); - - err = PreRunProcessing(options, inputs, outputs); - sync_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::SEND_END); - if (!err.IsOk()) { - return err; - } - sync_request->grpc_response_->Clear(); - sync_request->grpc_status_ = stub_->Predict( - &context, infer_request_, sync_request->grpc_response_.get()); - - if (!sync_request->grpc_status_.ok()) { - err = Error(sync_request->grpc_status_.error_message()); - } - - sync_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::RECV_START); - InferResult::Create(result, sync_request->grpc_response_, err); - sync_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::RECV_END); - - sync_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::REQUEST_END); - - tc::Error update_err = UpdateInferStat(sync_request->Timer()); - if (!update_err.IsOk()) { - std::cerr << "Failed to update context stat: " << update_err << std::endl; - } - - if (sync_request->grpc_status_.ok()) { - if (verbose_) { - std::cout << sync_request->grpc_response_->DebugString() << std::endl; - } - } - - return (*result)->RequestStatus(); -} - -Error -GrpcClient::AsyncInfer( - TFServeOnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs, - const Headers& headers, - const grpc_compression_algorithm compression_algorithm) -{ - if (callback == nullptr) { - return Error( - "Callback function must be provided along with AsyncInfer() call."); - } - if (!worker_.joinable()) { - worker_ = std::thread(&GrpcClient::AsyncTransfer, this); - } - - GrpcInferRequest* async_request; - async_request = new GrpcInferRequest(std::move(callback)); - - async_request->Timer().CaptureTimestamp( - tc::RequestTimers::Kind::REQUEST_START); - async_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::SEND_START); - for (const auto& it : headers) { - async_request->grpc_context_.AddMetadata(it.first, it.second); - } - async_request->grpc_context_.set_compression_algorithm(compression_algorithm); - - Error err = PreRunProcessing(options, inputs, outputs); - if (!err.IsOk()) { - delete async_request; - return err; - } - - async_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::SEND_END); - - std::unique_ptr< - grpc::ClientAsyncResponseReader> - rpc(stub_->PrepareAsyncPredict( - &async_request->grpc_context_, infer_request_, - &async_request_completion_queue_)); - - rpc->StartCall(); - - rpc->Finish( - async_request->grpc_response_.get(), &async_request->grpc_status_, - (void*)async_request); - - if (verbose_) { - std::cout << "Sent request"; - if (options.request_id_.size() != 0) { - std::cout << " '" << options.request_id_ << "'"; - } - std::cout << std::endl; - } - - return Error::Success; -} - -void -GrpcClient::AsyncTransfer() -{ - while (!exiting_) { - // GRPC async APIs are thread-safe https://github.com/grpc/grpc/issues/4486 - GrpcInferRequest* raw_async_request; - bool ok = true; - bool status = - async_request_completion_queue_.Next((void**)(&raw_async_request), &ok); - std::shared_ptr async_request; - if (!ok) { - fprintf(stderr, "Unexpected not ok on client side.\n"); - } - if (!status) { - if (!exiting_) { - fprintf(stderr, "Completion queue is closed.\n"); - } - } else if (raw_async_request == nullptr) { - fprintf(stderr, "Unexpected null tag received at client.\n"); - } else { - async_request.reset(raw_async_request); - InferResult* async_result; - Error err; - if (!async_request->grpc_status_.ok()) { - err = Error(async_request->grpc_status_.error_message()); - } - async_request->Timer().CaptureTimestamp( - tc::RequestTimers::Kind::RECV_START); - InferResult::Create(&async_result, async_request->grpc_response_, err); - async_request->Timer().CaptureTimestamp( - tc::RequestTimers::Kind::RECV_END); - async_request->Timer().CaptureTimestamp( - tc::RequestTimers::Kind::REQUEST_END); - tc::Error update_err = UpdateInferStat(async_request->Timer()); - if (!update_err.IsOk()) { - std::cerr << "Failed to update context stat: " << update_err - << std::endl; - } - if (async_request->grpc_status_.ok()) { - if (verbose_) { - std::cout << async_request->grpc_response_->DebugString() - << std::endl; - } - } - async_request->callback_(async_result); - } - } -} - -Error -GrpcClient::PreRunProcessing( - const InferOptions& options, const std::vector& inputs, - const std::vector& outputs) -{ - // Populate the request protobuf - - // Describing model name and signature from remote server. - infer_request_.mutable_model_spec()->set_name(options.model_name_); - if (!options.model_version_.empty()) { - infer_request_.mutable_model_spec()->set_version_label( - options.model_version_); - } - if (!options.model_signature_name_.empty()) { - infer_request_.mutable_model_spec()->set_signature_name( - options.model_signature_name_); - } - - // Describing remote model inputs shape. - StringKeyedProtos& keyed_proto_inputs = *infer_request_.mutable_inputs(); - std::set request_inputs; - - for (const auto input : inputs) { - auto raw_input = dynamic_cast(input); - request_inputs.insert(raw_input->Name()); - // Add new TensorProto submessages only if required, otherwise - // reuse the submessages already available. - auto itr = keyed_proto_inputs.find(raw_input->Name()); - if (itr == keyed_proto_inputs.end()) { - itr = keyed_proto_inputs - .insert(google::protobuf::MapPair< - std::string, tensorflow::TensorProto>( - raw_input->Name(), tensorflow::TensorProto())) - .first; - } - - // Set datatype - tensorflow::DataType tf_dtype = tensorflow::DT_INVALID; - GetTensorFlowDataType(raw_input->Datatype(), &tf_dtype); - itr->second.set_dtype(tf_dtype); - if (tf_dtype == tensorflow::DT_INVALID) { - return Error( - "failed to retrieve the TF datatype for " + raw_input->Name()); - } - - // Populate the shape - itr->second.mutable_tensor_shape()->Clear(); - for (const auto dim : raw_input->Shape()) { - itr->second.mutable_tensor_shape()->add_dim()->set_size(dim); - } - - raw_input->PrepareForRequest(); - // There is an extra copy into the buffer to collect all the input - // batches. This is a room for improvement for later. - bool end_of_input = false; - - // auto* raw_contents = itr->second.mutable_float_val()->mutable_data(); - size_t content_size; - raw_input->ByteSize(&content_size); - temp_buffer_.clear(); - temp_buffer_.reserve(content_size); - while (!end_of_input) { - const uint8_t* buf; - size_t buf_size; - raw_input->GetNext(&buf, &buf_size, &end_of_input); - if (buf != nullptr) { - temp_buffer_.append(reinterpret_cast(buf), buf_size); - } - } - ClearAllInputFields(&itr->second); - PopulateInputData(raw_input, &itr->second); - } - - // Remove extra tensor protos, if any. - std::set extra_inputs; - for (const auto& iter : keyed_proto_inputs) { - if (request_inputs.find(iter.first) == request_inputs.end()) { - extra_inputs.insert(iter.first); - } - } - for (const auto& extra_input : extra_inputs) { - keyed_proto_inputs.erase(extra_input); - } - - if (infer_request_.ByteSizeLong() > INT_MAX) { - size_t request_size = infer_request_.ByteSizeLong(); - infer_request_.Clear(); - return Error( - "Request has byte size " + std::to_string(request_size) + - " which exceed gRPC's byte size limit " + std::to_string(INT_MAX) + - "."); - } - - return Error::Success; -} - -Error -GrpcClient::ClearAllInputFields(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_half_val()->Clear(); - input_tensor_proto->mutable_float_val()->Clear(); - input_tensor_proto->mutable_double_val()->Clear(); - input_tensor_proto->mutable_int_val()->Clear(); - input_tensor_proto->mutable_string_val()->Clear(); - input_tensor_proto->mutable_int64_val()->Clear(); - input_tensor_proto->mutable_bool_val()->Clear(); - input_tensor_proto->mutable_uint32_val()->Clear(); - input_tensor_proto->mutable_uint64_val()->Clear(); - - return Error::Success; -} - -Error -GrpcClient::PopulateInputData( - TFServeInferInput* input, tensorflow::TensorProto* input_tensor_proto) -{ - if (input->Datatype() == "FP16") { - RETURN_IF_CB_ERROR(PopulateHalfVal(input_tensor_proto)); - } else if (input->Datatype() == "BF16") { - return Error( - "BF16 datatype not currently supported for populating input data."); - } else if (input->Datatype() == "FP32") { - RETURN_IF_CB_ERROR(PopulateFloatVal(input_tensor_proto)); - } else if (input->Datatype() == "FP64") { - RETURN_IF_CB_ERROR(PopulateDoubleVal(input_tensor_proto)); - } else if (input->Datatype() == "INT32") { - RETURN_IF_CB_ERROR(PopulateIntVal(input_tensor_proto)); - } else if (input->Datatype() == "INT16") { - RETURN_IF_CB_ERROR(PopulateIntVal(input_tensor_proto, 2)); - } else if (input->Datatype() == "UINT16") { - RETURN_IF_CB_ERROR(PopulateIntVal(input_tensor_proto, 2)); - } else if (input->Datatype() == "INT8") { - RETURN_IF_CB_ERROR(PopulateIntVal(input_tensor_proto, 1)); - } else if (input->Datatype() == "UINT8") { - RETURN_IF_CB_ERROR(PopulateIntVal(input_tensor_proto, 1)); - } else if (input->Datatype() == "BYTES") { - RETURN_IF_CB_ERROR(PopulateStrVal(input_tensor_proto)); - } else if (input->Datatype() == "INT64") { - RETURN_IF_CB_ERROR(PopulateInt64Val(input_tensor_proto)); - } else if (input->Datatype() == "BOOL") { - RETURN_IF_CB_ERROR(PopulateBoolVal(input_tensor_proto)); - } else if (input->Datatype() == "UINT32") { - RETURN_IF_CB_ERROR(PopulateUintVal(input_tensor_proto)); - } else if (input->Datatype() == "UINT64") { - RETURN_IF_CB_ERROR(PopulateUint64Val(input_tensor_proto)); - } else { - return Error("unsupported datatype for populating input data"); - } - - return Error::Success; -} - -Error -GrpcClient::PopulateHalfVal(tensorflow::TensorProto* input_tensor_proto) -{ - // Building FP16 one by one. Note that since protobuf has no int16 type, we'll - // have some pointless zero padding for each value here. - input_tensor_proto->mutable_half_val()->Reserve(2 * temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - int32_t elem; - memcpy(&elem, (temp_buffer_.c_str() + copied_byte_size), 2); - input_tensor_proto->add_half_val(elem); - copied_byte_size += 2; - } - - return Error::Success; -} - -Error -GrpcClient::PopulateFloatVal(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_float_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - input_tensor_proto->add_float_val( - *(float*)(temp_buffer_.c_str() + copied_byte_size)); - copied_byte_size += sizeof(float); - } - - return Error::Success; -} - -Error -GrpcClient::PopulateDoubleVal(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_double_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - input_tensor_proto->add_double_val( - *(double*)(temp_buffer_.c_str() + copied_byte_size)); - copied_byte_size += sizeof(double); - } - - return Error::Success; -} - -Error -GrpcClient::PopulateIntVal( - tensorflow::TensorProto* input_tensor_proto, size_t step_size) -{ - if (step_size == 4) { - input_tensor_proto->mutable_int_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - input_tensor_proto->add_int_val( - *(int*)(temp_buffer_.c_str() + copied_byte_size)); - copied_byte_size += sizeof(int); - } - } else { - // Note that since protobuf has no int16/int8 type, we'll - // have some pointless zero padding for each value here and - // need to build the tensor one element at a time - input_tensor_proto->mutable_int_val()->Reserve( - temp_buffer_.size() * (4 / step_size)); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - int32_t elem; - memcpy(&elem, (temp_buffer_.c_str() + copied_byte_size), step_size); - input_tensor_proto->add_int_val(elem); - copied_byte_size += step_size; - } - } - - return Error::Success; -} - -Error -GrpcClient::PopulateStrVal(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_string_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - int32_t string_length = *((int*)(temp_buffer_.c_str() + copied_byte_size)); - input_tensor_proto->add_string_val(std::string( - (temp_buffer_.c_str() + copied_byte_size + 4), string_length)); - copied_byte_size += (string_length + 4); - } - - return Error::Success; -} - -Error -GrpcClient::PopulateBoolVal(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_bool_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - input_tensor_proto->add_bool_val( - *(bool*)(temp_buffer_.c_str() + copied_byte_size)); - copied_byte_size += sizeof(bool); - } - - return Error::Success; -} - -Error -GrpcClient::PopulateInt64Val(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_int64_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - input_tensor_proto->add_bool_val( - *(int64_t*)(temp_buffer_.c_str() + copied_byte_size)); - copied_byte_size += sizeof(int64_t); - } - - return Error::Success; -} - -Error -GrpcClient::PopulateUintVal(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_uint32_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - input_tensor_proto->add_uint32_val( - *(uint32_t*)(temp_buffer_.c_str() + copied_byte_size)); - copied_byte_size += sizeof(uint32_t); - } - - return Error::Success; -} - -Error -GrpcClient::PopulateUint64Val(tensorflow::TensorProto* input_tensor_proto) -{ - input_tensor_proto->mutable_uint64_val()->Reserve(temp_buffer_.size()); - uint64_t copied_byte_size = 0; - while (copied_byte_size < temp_buffer_.size()) { - input_tensor_proto->add_uint64_val( - *(uint64_t*)(temp_buffer_.c_str() + copied_byte_size)); - copied_byte_size += sizeof(uint64_t); - } - - return Error::Success; -} - -GrpcClient::GrpcClient( - const std::string& url, bool verbose, bool use_ssl, - const SslOptions& ssl_options) - : InferenceServerClient(verbose), - stub_(tensorflow::serving::PredictionService::NewStub( - GetChannel(url, use_ssl, ssl_options))) -{ -} - -GrpcClient::~GrpcClient() -{ - exiting_ = true; - // Close complete queue and wait for the worker thread to return - async_request_completion_queue_.Shutdown(); - - // thread not joinable if AsyncInfer() is not called - // (it is default constructed thread before the first AsyncInfer() call) - if (worker_.joinable()) { - worker_.join(); - } - - bool has_next = true; - GrpcInferRequest* async_request; - bool ok; - do { - has_next = - async_request_completion_queue_.Next((void**)&async_request, &ok); - if (has_next && async_request != nullptr) { - delete async_request; - } - } while (has_next); -} - -//====================================================================== - -Error -InferResult::Create( - InferResult** infer_result, - std::shared_ptr response, - Error& request_status) -{ - *infer_result = - reinterpret_cast(new InferResult(response, request_status)); - return Error::Success; -} - -Error -InferResult::RequestStatus() const -{ - return request_status_; -} - -InferResult::InferResult( - std::shared_ptr response, - Error& request_status) - : response_(response), request_status_(request_status) -{ -} - -//====================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::tfserving diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.h b/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.h deleted file mode 100644 index bfa475b8c..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_grpc_client.h +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../client_backend.h" -#include "common.h" -#include "tensorflow_serving/apis/prediction_service.grpc.pb.h" -#include "tfserve_infer_input.h" - -namespace tc = triton::client; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tfserving { - -struct SslOptions { - explicit SslOptions() {} - // File containing the PEM encoding of the server root certificates. - // If this parameter is empty, the default roots will be used. The - // default roots can be overridden using the - // GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment variable pointing - // to a file on the file system containing the roots. - std::string root_certificates; - // File containing the PEM encoding of the client's private key. - // This parameter can be empty if the client does not have a - // private key. - std::string private_key; - // File containing the PEM encoding of the client's certificate chain. - // This parameter can be empty if the client does not have a - // certificate chain. - std::string certificate_chain; -}; - -class InferResult; - -using TFServeOnCompleteFn = std::function; - -//============================================================================== -/// An GrpcClient object is used to perform any kind of communication with the -/// TFserving service using gRPC protocol. None of the functions are thread -/// safe. -/// -/// \code -/// std::unique_ptr client; -/// GrpcClient::Create(&client, "localhost:8500"); -/// ... -/// ... -/// \endcode -/// -class GrpcClient : public tc::InferenceServerClient { - public: - ~GrpcClient(); - - /// Create a client that can be used to communicate with the server. - /// \param client Returns a new InferenceServerGrpcClient object. - /// \param server_url The inference server name and port. - /// \param verbose If true generate verbose output when contacting - /// the inference server. - /// \param use_ssl If true use encrypted channel to the server. - /// \param ssl_options Specifies the files required for - /// SSL encryption and authorization. - /// \return Error object indicating success or failure. - static Error Create( - std::unique_ptr* client, const std::string& server_url, - bool verbose = false, bool use_ssl = false, - const SslOptions& ssl_options = SslOptions()); - - /// Contact the inference server and get the metadata of specified model. - /// \param model_metadata Returns model metadata as ModelMetadataResponse - /// message. - /// \param model_name The name of the model to get metadata. - /// \param model_version The version of the model to get metadata. - /// The default value is an empty string which means then the server will - /// choose a version based on the model and internal policy. - /// \param headers Optional map specifying additional HTTP headers to include - /// in the metadata of gRPC request. - /// \return Error object indicating success or failure of the request. - Error ModelMetadata( - tensorflow::serving::GetModelMetadataResponse* model_metadata, - const std::string& model_name, const std::string& model_version = "", - const Headers& headers = Headers()); - - /// Run synchronous inference on server. - /// \param result Returns the result of inference. - /// \param options The options for inference request. - /// \param inputs The vector of InferInput describing the model inputs. - /// \param outputs Optional vector of InferRequestedOutput describing how the - /// output must be returned. If not provided then all the outputs in the model - /// config will be returned as default settings. - /// \param headers Optional map specifying additional HTTP headers to include - /// in the metadata of gRPC request. - /// \param compression_algorithm The compression algorithm to be used - /// on the grpc requests. - /// \return Error object indicating success or failure of the - /// request. - Error Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs = - std::vector(), - const Headers& headers = Headers(), - const grpc_compression_algorithm compression_algorithm = - GRPC_COMPRESS_NONE); - - /// Run asynchronous inference on server. - /// Once the request is completed, the InferResult pointer will be passed to - /// the provided 'callback' function. Upon the invocation of callback - /// function, the ownership of InferResult object is transferred to the - /// function caller. It is then the caller's choice on either retrieving the - /// results inside the callback function or deferring it to a different thread - /// so that the client is unblocked. In order to prevent memory leak, user - /// must ensure this object gets deleted. - /// \param callback The callback function to be invoked on request completion. - /// \param options The options for inference request. - /// \param inputs The vector of InferInput describing the model inputs. - /// \param outputs Optional vector of InferRequestedOutput describing how the - /// output must be returned. If not provided then all the outputs in the model - /// config will be returned as default settings. - /// \param headers Optional map specifying additional HTTP headers to include - /// in the metadata of gRPC request. - /// \param compression_algorithm The compression algorithm to be used - /// on the grpc requests. - /// \return Error object indicating success or failure of the request. - Error AsyncInfer( - TFServeOnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs = - std::vector(), - const Headers& headers = Headers(), - const grpc_compression_algorithm compression_algorithm = - GRPC_COMPRESS_NONE); - - private: - GrpcClient( - const std::string& url, bool verbose, bool use_ssl, - const SslOptions& ssl_options); - Error PreRunProcessing( - const InferOptions& options, const std::vector& inputs, - const std::vector& outputs); - void AsyncTransfer(); - Error ClearAllInputFields(tensorflow::TensorProto* input_tensor_proto); - Error PopulateInputData( - TFServeInferInput* input, tensorflow::TensorProto* input_tensor_proto); - Error PopulateHalfVal(tensorflow::TensorProto* input_tensor_proto); - Error PopulateFloatVal(tensorflow::TensorProto* input_tensor_proto); - Error PopulateDoubleVal(tensorflow::TensorProto* input_tensor_proto); - Error PopulateIntVal( - tensorflow::TensorProto* input_tensor_proto, size_t step_size = 4); - Error PopulateStrVal(tensorflow::TensorProto* input_tensor_proto); - Error PopulateBoolVal(tensorflow::TensorProto* input_tensor_proto); - Error PopulateInt64Val(tensorflow::TensorProto* input_tensor_proto); - Error PopulateUintVal(tensorflow::TensorProto* input_tensor_proto); - Error PopulateUint64Val(tensorflow::TensorProto* input_tensor_proto); - - // The producer-consumer queue used to communicate asynchronously with - // the GRPC runtime. - grpc::CompletionQueue async_request_completion_queue_; - - bool enable_stream_stats_; - std::mutex stream_mutex_; - - // GRPC end point. - std::unique_ptr stub_; - // request for GRPC call, one request object can be used for multiple calls - // since it can be overwritten as soon as the GRPC send finishes. - tensorflow::serving::PredictRequest infer_request_; - // A temporary buffer to hold serialized data - std::string temp_buffer_; -}; - -//====================================================================== - -class InferResult { - public: - static Error Create( - InferResult** infer_result, - std::shared_ptr response, - Error& request_status); - - - Error RequestStatus() const; - Error Id(std::string* id) const; - std::string DebugString() const { return response_->DebugString(); } - - private: - InferResult( - std::shared_ptr response, - Error& request_status); - - std::shared_ptr response_; - Error request_status_; -}; - -//====================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::tfserving diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.cc b/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.cc deleted file mode 100644 index 60edf87e7..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.cc +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "tfserve_infer_input.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tfserving { - -Error -TFServeInferInput::Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype) -{ - TFServeInferInput* local_infer_input = - new TFServeInferInput(name, dims, datatype); - - *infer_input = local_infer_input; - return Error::Success; -} - -Error -TFServeInferInput::SetShape(const std::vector& shape) -{ - shape_ = shape; - return Error::Success; -} - -Error -TFServeInferInput::Reset() -{ - bufs_.clear(); - buf_byte_sizes_.clear(); - bufs_idx_ = 0; - byte_size_ = 0; - return Error::Success; -} - -Error -TFServeInferInput::AppendRaw(const uint8_t* input, size_t input_byte_size) -{ - byte_size_ += input_byte_size; - - bufs_.push_back(input); - buf_byte_sizes_.push_back(input_byte_size); - - return Error::Success; -} - -Error -TFServeInferInput::ByteSize(size_t* byte_size) const -{ - *byte_size = byte_size_; - return Error::Success; -} - -Error -TFServeInferInput::PrepareForRequest() -{ - // Reset position so request sends entire input. - bufs_idx_ = 0; - buf_pos_ = 0; - return Error::Success; -} - -Error -TFServeInferInput::GetNext( - const uint8_t** buf, size_t* input_bytes, bool* end_of_input) -{ - if (bufs_idx_ < bufs_.size()) { - *buf = bufs_[bufs_idx_]; - *input_bytes = buf_byte_sizes_[bufs_idx_]; - bufs_idx_++; - } else { - *buf = nullptr; - *input_bytes = 0; - } - *end_of_input = (bufs_idx_ >= bufs_.size()); - - return Error::Success; -} - -TFServeInferInput::TFServeInferInput( - const std::string& name, const std::vector& dims, - const std::string& datatype) - : InferInput(BackendKind::TENSORFLOW_SERVING, name, datatype), shape_(dims) -{ -} - -}}}} // namespace triton::perfanalyzer::clientbackend::tfserving diff --git a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.h b/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.h deleted file mode 100644 index ec1a35dd9..000000000 --- a/src/c++/perf_analyzer/client_backend/tensorflow_serving/tfserve_infer_input.h +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../../perf_utils.h" -#include "../client_backend.h" - - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tfserving { - -//============================================================== -/// TFServeInferInput instance holds the information regarding -/// model input tensors and their corresponding generated data. -/// -class TFServeInferInput : public InferInput { - public: - static Error Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype); - /// See InferInput::Shape() - const std::vector& Shape() const override { return shape_; } - /// See InferInput::SetShape() - Error SetShape(const std::vector& shape) override; - /// See InferInput::Reset() - Error Reset() override; - /// See InferInput::AppendRaw() - Error AppendRaw(const uint8_t* input, size_t input_byte_size) override; - /// Gets the size of data added into this input in bytes. - /// \param byte_size The size of data added in bytes. - /// \return Error object indicating success or failure. - Error ByteSize(size_t* byte_size) const; - /// Resets the heads to start providing data from the beginning. - Error PrepareForRequest(); - /// Get the next chunk of data if available. - Error GetNext(const uint8_t** buf, size_t* input_bytes, bool* end_of_input); - - private: - explicit TFServeInferInput( - const std::string& name, const std::vector& dims, - const std::string& datatype); - - std::vector shape_; - size_t byte_size_{0}; - - size_t bufs_idx_, buf_pos_; - std::vector bufs_; - std::vector buf_byte_sizes_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::tfserving diff --git a/src/c++/perf_analyzer/client_backend/torchserve/CMakeLists.txt b/src/c++/perf_analyzer/client_backend/torchserve/CMakeLists.txt deleted file mode 100644 index 19e4c6245..000000000 --- a/src/c++/perf_analyzer/client_backend/torchserve/CMakeLists.txt +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -cmake_minimum_required (VERSION 3.18) - -set( - TS_CLIENT_BACKEND_SRCS - torchserve_client_backend.cc - torchserve_infer_input.cc - torchserve_http_client.cc -) - -set( - TS_CLIENT_BACKEND_HDRS - torchserve_client_backend.h - torchserve_infer_input.h - torchserve_http_client.h -) - -add_library( - ts-client-backend-library EXCLUDE_FROM_ALL OBJECT - ${TS_CLIENT_BACKEND_SRCS} - ${TS_CLIENT_BACKEND_HDRS} -) - -target_link_libraries( - ts-client-backend-library - PUBLIC CURL::libcurl - PUBLIC httpclient_static -) - -if(${TRITON_ENABLE_GPU}) - target_include_directories(ts-client-backend-library PUBLIC ${CUDA_INCLUDE_DIRS}) - target_link_libraries(ts-client-backend-library PRIVATE ${CUDA_LIBRARIES}) -endif() # TRITON_ENABLE_GPU diff --git a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.cc b/src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.cc deleted file mode 100644 index 76e62c6c0..000000000 --- a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.cc +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "torchserve_client_backend.h" - -#include "json_utils.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace torchserve { - - -//============================================================================== - -Error -TorchServeClientBackend::Create( - const std::string& url, const ProtocolType protocol, - std::shared_ptr http_headers, const bool verbose, - std::unique_ptr* client_backend) -{ - if (protocol == ProtocolType::GRPC) { - return Error( - "perf_analyzer does not support gRPC protocol with TorchServe"); - } - std::unique_ptr torchserve_client_backend( - new TorchServeClientBackend(http_headers)); - RETURN_IF_CB_ERROR(ts::HttpClient::Create( - &(torchserve_client_backend->http_client_), url, verbose)); - *client_backend = std::move(torchserve_client_backend); - return Error::Success; -} - -Error -TorchServeClientBackend::Infer( - cb::InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - ts::InferResult* torchserve_result; - RETURN_IF_CB_ERROR(http_client_->Infer( - &torchserve_result, options, inputs, outputs, *http_headers_)); - *result = new TorchServeInferResult(torchserve_result); - return Error::Success; -} - -Error -TorchServeClientBackend::ClientInferStat(InferStat* infer_stat) -{ - // Reusing the common library utilities to collect and report the - // client side statistics. - tc::InferStat client_infer_stat; - RETURN_IF_TRITON_ERROR(http_client_->ClientInferStat(&client_infer_stat)); - ParseInferStat(client_infer_stat, infer_stat); - return Error::Success; -} - -void -TorchServeClientBackend::ParseInferStat( - const tc::InferStat& torchserve_infer_stat, InferStat* infer_stat) -{ - infer_stat->completed_request_count = - torchserve_infer_stat.completed_request_count; - infer_stat->cumulative_total_request_time_ns = - torchserve_infer_stat.cumulative_total_request_time_ns; - infer_stat->cumulative_send_time_ns = - torchserve_infer_stat.cumulative_send_time_ns; - infer_stat->cumulative_receive_time_ns = - torchserve_infer_stat.cumulative_receive_time_ns; -} - -//============================================================================== - -TorchServeInferResult::TorchServeInferResult(ts::InferResult* result) -{ - result_.reset(result); -} - -Error -TorchServeInferResult::Id(std::string* id) const -{ - id->clear(); - return Error::Success; -} - -Error -TorchServeInferResult::RequestStatus() const -{ - RETURN_IF_CB_ERROR(result_->RequestStatus()); - return Error::Success; -} - -Error -TorchServeInferResult::RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const -{ - return Error( - "Output retrieval is not currently supported for TorchServe client " - "backend"); -} - -//============================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::torchserve diff --git a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.h b/src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.h deleted file mode 100644 index 25566256e..000000000 --- a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_client_backend.h +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../../perf_utils.h" -#include "../client_backend.h" -#include "torchserve_http_client.h" - -#define RETURN_IF_TRITON_ERROR(S) \ - do { \ - const tc::Error& status__ = (S); \ - if (!status__.IsOk()) { \ - return Error(status__.Message()); \ - } \ - } while (false) - -namespace tc = triton::client; -namespace cb = triton::perfanalyzer::clientbackend; -namespace ts = triton::perfanalyzer::clientbackend::torchserve; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace torchserve { - - -//============================================================================== -/// TorchServeClientBackend is used to generate load on the Torchserve instance -/// -class TorchServeClientBackend : public ClientBackend { - public: - /// Create a torchserve client backend which can be used to interact with the - /// server. - /// \param url The inference server url and port. - /// \param protocol The protocol type used. - /// \param http_headers Map of HTTP headers. The map key/value indicates - /// the header name/value. - /// \param verbose Enables the verbose mode. - /// \param client_backend Returns a new TorchServeClientBackend - /// object. - /// \return Error object indicating success or failure. - static Error Create( - const std::string& url, const ProtocolType protocol, - std::shared_ptr http_headers, const bool verbose, - std::unique_ptr* client_backend); - - /// See ClientBackend::Infer() - Error Infer( - cb::InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::ClientInferStat() - Error ClientInferStat(InferStat* infer_stat) override; - - private: - TorchServeClientBackend(std::shared_ptr http_headers) - : ClientBackend(BackendKind::TORCHSERVE), http_headers_(http_headers) - { - } - - void ParseInferStat( - const tc::InferStat& torchserve_infer_stat, InferStat* infer_stat); - - std::unique_ptr http_client_; - std::shared_ptr http_headers_; -}; - -//============================================================== -/// TorchServeInferResult is a wrapper around InferResult object of -/// torchserve InferResult object. -/// -class TorchServeInferResult : public cb::InferResult { - public: - explicit TorchServeInferResult(ts::InferResult* result); - /// See InferResult::Id() - Error Id(std::string* id) const override; - /// See InferResult::RequestStatus() - Error RequestStatus() const override; - /// See InferResult::RawData() - Error RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const override; - - private: - std::unique_ptr result_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::torchserve diff --git a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.cc b/src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.cc deleted file mode 100644 index c835ab109..000000000 --- a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.cc +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "torchserve_http_client.h" - -#include -#include - -#include "torchserve_client_backend.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace torchserve { - -namespace { - -constexpr char kContentLengthHTTPHeader[] = "Content-Length"; - -//============================================================================== - -// Global initialization for libcurl. Libcurl requires global -// initialization before any other threads are created and before any -// curl methods are used. The curl_global static object is used to -// perform this initialization. -class CurlGlobal { - public: - CurlGlobal(); - ~CurlGlobal(); - - const Error& Status() const { return err_; } - - private: - Error err_; -}; - -CurlGlobal::CurlGlobal() : err_(Error::Success) -{ - if (curl_global_init(CURL_GLOBAL_ALL) != 0) { - err_ = Error("global initialization failed"); - } -} - -CurlGlobal::~CurlGlobal() -{ - curl_global_cleanup(); -} - -static CurlGlobal curl_global; - - -} // namespace - -//============================================================================== - -HttpInferRequest::HttpInferRequest() - : header_list_(nullptr), - file_ptr_(std::unique_ptr(nullptr, Deleter())) -{ -} - -HttpInferRequest::~HttpInferRequest() -{ - if (header_list_ != nullptr) { - curl_slist_free_all(static_cast(header_list_)); - header_list_ = nullptr; - } -} - -Error -HttpInferRequest::InitializeRequest() -{ - http_code_ = 400; - // Prepare buffer to record the response - infer_response_buffer_.reset(new std::string()); - return Error::Success; -} - -Error -HttpInferRequest::OpenFileData(std::string& file_path) -{ - FILE* pFile = fopen(file_path.c_str(), "rb"); - if (pFile == nullptr) { - return Error("Failed to open the specified file `" + file_path + "`"); - } - file_ptr_.reset(pFile); - return Error::Success; -} - -long -HttpInferRequest::FileSize() -{ - long size; - fseek(file_ptr_.get(), 0, SEEK_END); - size = ftell(file_ptr_.get()); - rewind(file_ptr_.get()); - return size; -} - -Error -HttpInferRequest::CloseFileData() -{ - file_ptr_.reset(nullptr); - return Error::Success; -} - - -//============================================================================== - -Error -HttpClient::Create( - std::unique_ptr* client, const std::string& server_url, - bool verbose) -{ - client->reset(new HttpClient(server_url, verbose)); - return Error::Success; -} - -Error -HttpClient::Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs, - const Headers& headers) -{ - Error err; - - std::string request_uri(url_ + "/predictions/" + options.model_name_); - if (!options.model_version_.empty()) { - request_uri += "/" + options.model_version_; - } - - std::shared_ptr sync_request(new HttpInferRequest()); - - sync_request->Timer().Reset(); - sync_request->Timer().CaptureTimestamp( - tc::RequestTimers::Kind::REQUEST_START); - - if (!curl_global.Status().IsOk()) { - return curl_global.Status(); - } - - err = PreRunProcessing( - easy_handle_, request_uri, options, inputs, outputs, headers, - sync_request); - if (!err.IsOk()) { - return err; - } - - sync_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::SEND_START); - - // During this call SEND_END (except in above case), RECV_START, and - // RECV_END will be set. - auto curl_status = curl_easy_perform(easy_handle_); - if (curl_status != CURLE_OK) { - sync_request->http_code_ = 400; - } else { - curl_easy_getinfo( - easy_handle_, CURLINFO_RESPONSE_CODE, &sync_request->http_code_); - } - - sync_request->CloseFileData(); - curl_mime_free(mime_handle_); - - InferResult::Create(result, sync_request); - - sync_request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::REQUEST_END); - - tc::Error nic_err = UpdateInferStat(sync_request->Timer()); - if (!nic_err.IsOk()) { - std::cerr << "Failed to update context stat: " << nic_err << std::endl; - } - - err = (*result)->RequestStatus(); - - return err; -} - -size_t -HttpClient::ReadCallback(char* buffer, size_t size, size_t nitems, void* userp) -{ - size_t retcode = - fread(buffer, size, nitems, ((HttpInferRequest*)userp)->FilePtr()); - if (retcode == 0) { - ((HttpInferRequest*)userp) - ->Timer() - .CaptureTimestamp(tc::RequestTimers::Kind::SEND_END); - } - return retcode; -} - -int -HttpClient::SeekCallback(void* userp, curl_off_t offset, int origin) -{ - if (fseek(((HttpInferRequest*)userp)->FilePtr(), offset, origin) == 0) - return CURL_SEEKFUNC_OK; - else - return CURL_SEEKFUNC_FAIL; -} - -size_t -HttpClient::InferResponseHeaderHandler( - void* contents, size_t size, size_t nmemb, void* userp) -{ - HttpInferRequest* request = reinterpret_cast(userp); - - char* buf = reinterpret_cast(contents); - size_t byte_size = size * nmemb; - - size_t idx = strlen(kContentLengthHTTPHeader); - if ((idx < byte_size) && !strncasecmp(buf, kContentLengthHTTPHeader, idx)) { - while ((idx < byte_size) && (buf[idx] != ':')) { - ++idx; - } - - if (idx < byte_size) { - std::string hdr(buf + idx + 1, byte_size - idx - 1); - request->infer_response_buffer_->reserve(std::stoi(hdr)); - } - } - - return byte_size; -} - -size_t -HttpClient::InferResponseHandler( - void* contents, size_t size, size_t nmemb, void* userp) -{ - HttpInferRequest* request = reinterpret_cast(userp); - - if (request->Timer().Timestamp(tc::RequestTimers::Kind::RECV_START) == 0) { - request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::RECV_START); - } - - char* buf = reinterpret_cast(contents); - size_t result_bytes = size * nmemb; - request->infer_response_buffer_->append(buf, result_bytes); - - // InferResponseHandler may be called multiple times so we overwrite - // RECV_END so that we always have the time of the last. - request->Timer().CaptureTimestamp(tc::RequestTimers::Kind::RECV_END); - - return result_bytes; -} - -Error -HttpClient::PreRunProcessing( - void* vcurl, std::string& request_uri, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs, - const Headers& headers, std::shared_ptr& http_request) -{ - CURL* curl = reinterpret_cast(vcurl); - - // Prepare the request object to provide the data for inference. - Error err = http_request->InitializeRequest(); - if (!err.IsOk()) { - return err; - } - - std::vector input_filepaths; - - curl_easy_setopt(curl, CURLOPT_URL, request_uri.c_str()); - curl_easy_setopt(curl, CURLOPT_USERAGENT, "libcurl-agent/1.0"); - curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1L); - - if (verbose_) { - curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); - } - - const long buffer_byte_size = 16 * 1024 * 1024; - curl_easy_setopt(curl, CURLOPT_UPLOAD_BUFFERSIZE, buffer_byte_size); - curl_easy_setopt(curl, CURLOPT_BUFFERSIZE, buffer_byte_size); - - // request data provided by InferRequestProvider() - mime_handle_ = curl_mime_init(easy_handle_); - // Add the buffers holding input tensor data - for (const auto input : inputs) { - TorchServeInferInput* this_input = - dynamic_cast(input); - this_input->PrepareForRequest(); - bool end_of_input = false; - while (!end_of_input) { - const uint8_t* buf; - size_t buf_size; - this_input->GetNext(&buf, &buf_size, &end_of_input); - std::string file_path( - reinterpret_cast(buf) + 4, buf_size - 4); - if (buf != nullptr) { - Error err = http_request->OpenFileData(file_path); - if (!err.IsOk()) { - return err; - } - if (verbose_) { - input_filepaths.push_back(file_path); - } - } - } - } - - long file_size = http_request->FileSize(); - curl_mimepart* part = curl_mime_addpart((curl_mime*)mime_handle_); - curl_mime_data_cb( - part, file_size, ReadCallback, SeekCallback, NULL, http_request.get()); - curl_mime_name(part, "data"); - - curl_easy_setopt(easy_handle_, CURLOPT_MIMEPOST, (curl_mime*)mime_handle_); - - // response headers handled by InferResponseHeaderHandler() - curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, InferResponseHeaderHandler); - curl_easy_setopt(curl, CURLOPT_HEADERDATA, http_request.get()); - - // response data handled by InferResponseHandler() - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, InferResponseHandler); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, http_request.get()); - - struct curl_slist* list = nullptr; - for (const auto& pr : headers) { - std::string hdr = pr.first + ": " + pr.second; - list = curl_slist_append(list, hdr.c_str()); - } - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, list); - - // The list will be freed when the request is destructed - http_request->header_list_ = list; - - if (verbose_) { - std::cout << "inference request : ["; - bool first = true; - for (const auto& fn : input_filepaths) { - if (first) { - first = false; - } else { - std::cout << ","; - } - std::cout << "\"" << fn << "\""; - } - std::cout << "]" << std::endl; - } - - return Error::Success; -} - -HttpClient::HttpClient(const std::string& url, bool verbose) - : InferenceServerClient(verbose), url_(url), - easy_handle_(reinterpret_cast(curl_easy_init())) -{ -} - -HttpClient::~HttpClient() -{ - exiting_ = true; - - if (easy_handle_ != nullptr) { - curl_easy_cleanup(reinterpret_cast(easy_handle_)); - } -} - -//====================================================================== - -Error -InferResult::Create( - InferResult** infer_result, std::shared_ptr infer_request) -{ - *infer_result = - reinterpret_cast(new InferResult(infer_request)); - return Error::Success; -} - -Error -InferResult::RequestStatus() const -{ - return status_; -} - -InferResult::InferResult(std::shared_ptr infer_request) - : infer_request_(infer_request) -{ - if (infer_request->http_code_ != 200) { - status_ = Error( - "inference failed with error code " + - std::to_string(infer_request->http_code_)); - } -} - -//====================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::torchserve diff --git a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.h b/src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.h deleted file mode 100644 index ede9cdfd5..000000000 --- a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_http_client.h +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include - -#include "../client_backend.h" -#include "common.h" -#include "torchserve_infer_input.h" - - -namespace tc = triton::client; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace torchserve { - -class InferResult; -class HttpInferRequest; - -using TorchServeOnCompleteFn = std::function; - -//============================================================================== -/// An HttpClient object is used to perform any kind of communication with the -/// torchserve service using libcurl. None of the functions are thread -/// safe. -/// -/// \code -/// std::unique_ptr client; -/// HttpClient::Create(&client, "localhost:8080"); -/// ... -/// ... -/// \endcode -/// -class HttpClient : public tc::InferenceServerClient { - public: - ~HttpClient(); - - /// Create a client that can be used to communicate with the server. - /// \param client Returns a new InferenceServerHttpClient object. - /// \param server_url The inference server name and port. - /// \param verbose If true generate verbose output when contacting - /// the inference server. - /// \return Error object indicating success or failure. - static Error Create( - std::unique_ptr* client, const std::string& server_url, - const bool verbose); - - /// Run synchronous inference on server. - /// \param result Returns the result of inference. - /// \param options The options for inference request. - /// \param inputs The vector of InferInput describing the model inputs. - /// \param outputs Optional vector of InferRequestedOutput describing how the - /// output must be returned. If not provided then all the outputs in the model - /// config will be returned as default settings. - /// \param headers Optional map specifying additional HTTP headers to include - /// in the metadata of gRPC request. - /// \return Error object indicating success or failure of the - /// request. - Error Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs = - std::vector(), - const Headers& headers = Headers()); - - private: - HttpClient(const std::string& url, bool verbose); - Error PreRunProcessing( - void* curl, std::string& request_uri, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs, - const Headers& headers, std::shared_ptr& request); - - static size_t ReadCallback( - char* buffer, size_t size, size_t nitems, void* userp); - static int SeekCallback(void* userp, curl_off_t offset, int origin); - static size_t InferResponseHeaderHandler( - void* contents, size_t size, size_t nmemb, void* userp); - static size_t InferResponseHandler( - void* contents, size_t size, size_t nmemb, void* userp); - - // The server url - const std::string url_; - // curl easy handle shared for all synchronous requests. - void* easy_handle_; - // The handle to interact with mime API. - curl_mime* mime_handle_; -}; - -//====================================================================== - -class HttpInferRequest { - public: - struct Deleter { - void operator()(FILE* file) - { - if (file != nullptr) { - fclose(file); - } - } - }; - - HttpInferRequest(); - ~HttpInferRequest(); - Error InitializeRequest(); - Error OpenFileData(std::string& file_path); - long FileSize(); - Error CloseFileData(); - tc::RequestTimers& Timer() { return timer_; } - std::string& DebugString() { return *infer_response_buffer_; } - FILE* FilePtr() { return file_ptr_.get(); } - friend HttpClient; - friend InferResult; - - private: - // Pointer to the list of the HTTP request header, keep it such that it will - // be valid during the transfer and can be freed once transfer is completed. - struct curl_slist* header_list_; - std::unique_ptr file_ptr_; - // HTTP response code for the inference request - long http_code_; - // Buffer that accumulates the response body. - std::unique_ptr infer_response_buffer_; - // The timers for infer request. - tc::RequestTimers timer_; -}; - -//====================================================================== - -class InferResult { - public: - static Error Create( - InferResult** infer_result, - std::shared_ptr infer_request); - Error RequestStatus() const; - Error Id(std::string* id) const; - std::string DebugString() const { return infer_request_->DebugString(); } - - private: - InferResult(std::shared_ptr infer_request); - - // The status of the inference - Error status_; - // The pointer to the HttpInferRequest object - std::shared_ptr infer_request_; -}; - -//====================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::torchserve diff --git a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.cc b/src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.cc deleted file mode 100644 index 22eb1ea97..000000000 --- a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.cc +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "torchserve_infer_input.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace torchserve { - - -Error -TorchServeInferInput::Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype) -{ - TorchServeInferInput* local_infer_input = - new TorchServeInferInput(name, dims, datatype); - *infer_input = local_infer_input; - return Error::Success; -} - -Error -TorchServeInferInput::SetShape(const std::vector& shape) -{ - shape_ = shape; - return Error::Success; -} - -Error -TorchServeInferInput::Reset() -{ - bufs_.clear(); - buf_byte_sizes_.clear(); - bufs_idx_ = 0; - byte_size_ = 0; - return Error::Success; -} - -Error -TorchServeInferInput::AppendRaw(const uint8_t* input, size_t input_byte_size) -{ - byte_size_ += input_byte_size; - bufs_.push_back(input); - buf_byte_sizes_.push_back(input_byte_size); - return Error::Success; -} - -Error -TorchServeInferInput::ByteSize(size_t* byte_size) const -{ - *byte_size = byte_size_; - return Error::Success; -} - -Error -TorchServeInferInput::PrepareForRequest() -{ - // Reset position so request sends entire input. - bufs_idx_ = 0; - buf_pos_ = 0; - return Error::Success; -} - -Error -TorchServeInferInput::GetNext( - const uint8_t** buf, size_t* input_bytes, bool* end_of_input) -{ - if (bufs_idx_ < bufs_.size()) { - *buf = bufs_[bufs_idx_]; - *input_bytes = buf_byte_sizes_[bufs_idx_]; - bufs_idx_++; - } else { - *buf = nullptr; - *input_bytes = 0; - } - *end_of_input = (bufs_idx_ >= bufs_.size()); - return Error::Success; -} - -TorchServeInferInput::TorchServeInferInput( - const std::string& name, const std::vector& dims, - const std::string& datatype) - : InferInput(BackendKind::TORCHSERVE, name, datatype), shape_(dims) -{ -} - -}}}} // namespace triton::perfanalyzer::clientbackend::torchserve diff --git a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.h b/src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.h deleted file mode 100644 index cc629cd1d..000000000 --- a/src/c++/perf_analyzer/client_backend/torchserve/torchserve_infer_input.h +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../../perf_utils.h" -#include "../client_backend.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace torchserve { - - -//============================================================== -/// TorchServeInferInput instance holds the information regarding -/// model input tensor. In this case the content held will be -/// the path to the file holding data. -/// -class TorchServeInferInput : public InferInput { - public: - static Error Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype); - /// See InferInput::Shape() - const std::vector& Shape() const override { return shape_; } - /// See InferInput::SetShape() - Error SetShape(const std::vector& shape) override; - /// See InferInput::Reset() - Error Reset() override; - /// See InferInput::AppendRaw() - Error AppendRaw(const uint8_t* input, size_t input_byte_size) override; - /// Gets the size of data added into this input in bytes. - /// \param byte_size The size of data added in bytes. - /// \return Error object indicating success or failure. - Error ByteSize(size_t* byte_size) const; - /// Resets the heads to start providing data from the beginning. - Error PrepareForRequest(); - /// Get the next chunk of data if available. - Error GetNext(const uint8_t** buf, size_t* input_bytes, bool* end_of_input); - - private: - explicit TorchServeInferInput( - const std::string& name, const std::vector& dims, - const std::string& datatype); - - std::vector shape_; - size_t byte_size_; - size_t bufs_idx_, buf_pos_; - std::vector bufs_; - std::vector buf_byte_sizes_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::torchserve diff --git a/src/c++/perf_analyzer/client_backend/triton/CMakeLists.txt b/src/c++/perf_analyzer/client_backend/triton/CMakeLists.txt deleted file mode 100644 index 203a8e350..000000000 --- a/src/c++/perf_analyzer/client_backend/triton/CMakeLists.txt +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -cmake_minimum_required (VERSION 3.18) - -set( - TRITON_CLIENT_BACKEND_SRCS - triton_client_backend.cc -) - -set( - TRITON_CLIENT_BACKEND_HDRS - triton_client_backend.h -) - -add_library( - triton-client-backend-library EXCLUDE_FROM_ALL OBJECT - ${TRITON_CLIENT_BACKEND_SRCS} - ${TRITON_CLIENT_BACKEND_HDRS} -) - -target_link_libraries( - triton-client-backend-library - PUBLIC grpcclient_static - PUBLIC httpclient_static - PRIVATE CURL::libcurl -) - -target_include_directories( - triton-client-backend-library - PRIVATE CURL::libcurl -) - -if(${TRITON_ENABLE_GPU}) - target_link_libraries( - triton-client-backend-library - PRIVATE CUDA::cudart - ) -endif() # TRITON_ENABLE_GPU diff --git a/src/c++/perf_analyzer/client_backend/triton/test_triton_client_backend.cc b/src/c++/perf_analyzer/client_backend/triton/test_triton_client_backend.cc deleted file mode 100644 index c32ad17be..000000000 --- a/src/c++/perf_analyzer/client_backend/triton/test_triton_client_backend.cc +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include -#include - -#include "../../doctest.h" -#include "triton_client_backend.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritonremote { - -class TestTritonClientBackend : public TritonClientBackend { - public: - template - void ParseAndStoreMetric( - const std::string& metrics_endpoint_text, const std::string metric_id, - std::map& metric_per_gpu) - { - TritonClientBackend::ParseAndStoreMetric( - metrics_endpoint_text, metric_id, metric_per_gpu); - } -}; - -TEST_CASE("testing the ParseAndStoreMetric function") -{ - TestTritonClientBackend ttcb{}; - - SUBCASE("nv_gpu_utilization metric") - { - const std::string metrics_endpoint_text{R"( -# HELP nv_gpu_utilization GPU utilization rate [0.0 - 1.0) -# TYPE nv_gpu_utilization gauge -nv_gpu_utilization{gpu_uuid="GPU-00000000-0000-0000-0000-000000000000"} 0.41 -nv_gpu_utilization{gpu_uuid="GPU-00000000-0000-0000-0000-000000000001"} 0.77 - )"}; - const std::string metric_id{"nv_gpu_utilization"}; - std::map gpu_utilization_per_gpu{}; - - ttcb.ParseAndStoreMetric( - metrics_endpoint_text, metric_id, gpu_utilization_per_gpu); - CHECK(gpu_utilization_per_gpu.size() == 2); - CHECK( - gpu_utilization_per_gpu["GPU-00000000-0000-0000-0000-000000000000"] == - doctest::Approx(0.41)); - CHECK( - gpu_utilization_per_gpu["GPU-00000000-0000-0000-0000-000000000001"] == - doctest::Approx(0.77)); - } - - SUBCASE("nv_gpu_power_usage metric") - { - const std::string metrics_endpoint_text{R"( -# HELP nv_gpu_power_usage GPU power usage in watts -# TYPE nv_gpu_power_usage gauge -nv_gpu_power_usage{gpu_uuid="GPU-00000000-0000-0000-0000-000000000000"} 81.619 -nv_gpu_power_usage{gpu_uuid="GPU-00000000-0000-0000-0000-000000000001"} 99.217 - )"}; - const std::string metric_id{"nv_gpu_power_usage"}; - std::map gpu_power_usage_per_gpu{}; - - ttcb.ParseAndStoreMetric( - metrics_endpoint_text, metric_id, gpu_power_usage_per_gpu); - CHECK(gpu_power_usage_per_gpu.size() == 2); - CHECK( - gpu_power_usage_per_gpu["GPU-00000000-0000-0000-0000-000000000000"] == - doctest::Approx(81.619)); - CHECK( - gpu_power_usage_per_gpu["GPU-00000000-0000-0000-0000-000000000001"] == - doctest::Approx(99.217)); - } - - SUBCASE("nv_gpu_memory_used_bytes metric") - { - const std::string metrics_endpoint_text{R"( -# HELP nv_gpu_memory_used_bytes GPU used memory, in bytes -# TYPE nv_gpu_memory_used_bytes gauge -nv_gpu_memory_used_bytes{gpu_uuid="GPU-00000000-0000-0000-0000-000000000000"} 50000000 -nv_gpu_memory_used_bytes{gpu_uuid="GPU-00000000-0000-0000-0000-000000000001"} 75000000 - )"}; - const std::string metric_id{"nv_gpu_memory_used_bytes"}; - std::map gpu_memory_used_bytes_per_gpu{}; - - ttcb.ParseAndStoreMetric( - metrics_endpoint_text, metric_id, gpu_memory_used_bytes_per_gpu); - CHECK(gpu_memory_used_bytes_per_gpu.size() == 2); - CHECK( - gpu_memory_used_bytes_per_gpu - ["GPU-00000000-0000-0000-0000-000000000000"] == 50000000); - CHECK( - gpu_memory_used_bytes_per_gpu - ["GPU-00000000-0000-0000-0000-000000000001"] == 75000000); - } - - SUBCASE("nv_gpu_memory_total_bytes metric") - { - const std::string metrics_endpoint_text{R"( -# HELP nv_gpu_memory_total_bytes GPU total memory, in bytes -# TYPE nv_gpu_memory_total_bytes gauge -nv_gpu_memory_total_bytes{gpu_uuid="GPU-00000000-0000-0000-0000-000000000000"} 1000000000 -nv_gpu_memory_total_bytes{gpu_uuid="GPU-00000000-0000-0000-0000-000000000001"} 2000000000 - )"}; - const std::string metric_id{"nv_gpu_memory_total_bytes"}; - std::map gpu_memory_total_bytes_per_gpu{}; - - ttcb.ParseAndStoreMetric( - metrics_endpoint_text, metric_id, gpu_memory_total_bytes_per_gpu); - CHECK(gpu_memory_total_bytes_per_gpu.size() == 2); - CHECK( - gpu_memory_total_bytes_per_gpu - ["GPU-00000000-0000-0000-0000-000000000000"] == 1000000000); - CHECK( - gpu_memory_total_bytes_per_gpu - ["GPU-00000000-0000-0000-0000-000000000001"] == 2000000000); - } -} - -}}}} // namespace triton::perfanalyzer::clientbackend::tritonremote diff --git a/src/c++/perf_analyzer/client_backend/triton/triton_client_backend.cc b/src/c++/perf_analyzer/client_backend/triton/triton_client_backend.cc deleted file mode 100644 index 419123e52..000000000 --- a/src/c++/perf_analyzer/client_backend/triton/triton_client_backend.cc +++ /dev/null @@ -1,855 +0,0 @@ -// Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "triton_client_backend.h" - -#include - -#include -#include - -#include "../../constants.h" -#include "../../perf_analyzer_exception.h" -#include "json_utils.h" - -namespace { - -triton::client::HttpSslOptions -ParseHttpSslOptions( - const triton::perfanalyzer::clientbackend::SslOptionsBase& ssl_options) -{ - triton::client::HttpSslOptions http_ssl_options; - - http_ssl_options.verify_peer = ssl_options.ssl_https_verify_peer; - http_ssl_options.verify_host = ssl_options.ssl_https_verify_host; - http_ssl_options.ca_info = ssl_options.ssl_https_ca_certificates_file; - if (ssl_options.ssl_https_client_certificate_type == "PEM") { - http_ssl_options.cert_type = - triton::client::HttpSslOptions::CERTTYPE::CERT_PEM; - } else if (ssl_options.ssl_https_client_certificate_type == "DER") { - http_ssl_options.cert_type = - triton::client::HttpSslOptions::CERTTYPE::CERT_DER; - } - http_ssl_options.cert = ssl_options.ssl_https_client_certificate_file; - if (ssl_options.ssl_https_private_key_type == "PEM") { - http_ssl_options.key_type = - triton::client::HttpSslOptions::KEYTYPE::KEY_PEM; - } else if (ssl_options.ssl_https_private_key_type == "DER") { - http_ssl_options.key_type = - triton::client::HttpSslOptions::KEYTYPE::KEY_DER; - } - http_ssl_options.key = ssl_options.ssl_https_private_key_file; - - return http_ssl_options; -} - -std::pair -ParseGrpcSslOptions( - const triton::perfanalyzer::clientbackend::SslOptionsBase& ssl_options) -{ - bool use_ssl = ssl_options.ssl_grpc_use_ssl; - - triton::client::SslOptions grpc_ssl_options; - grpc_ssl_options.root_certificates = - ssl_options.ssl_grpc_root_certifications_file; - grpc_ssl_options.private_key = ssl_options.ssl_grpc_private_key_file; - grpc_ssl_options.certificate_chain = - ssl_options.ssl_grpc_certificate_chain_file; - - return std::pair{use_ssl, grpc_ssl_options}; -} - -} // namespace - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritonremote { -//============================================================================== - -Error -TritonClientBackend::Create( - const std::string& url, const ProtocolType protocol, - const SslOptionsBase& ssl_options, - const std::map> trace_options, - const grpc_compression_algorithm compression_algorithm, - std::shared_ptr http_headers, const bool verbose, - const std::string& metrics_url, const TensorFormat input_tensor_format, - const TensorFormat output_tensor_format, - std::unique_ptr* client_backend) -{ - std::unique_ptr triton_client_backend( - new TritonClientBackend( - protocol, compression_algorithm, http_headers, metrics_url, - input_tensor_format, output_tensor_format)); - if (protocol == ProtocolType::HTTP) { - triton::client::HttpSslOptions http_ssl_options = - ParseHttpSslOptions(ssl_options); - RETURN_IF_TRITON_ERROR(tc::InferenceServerHttpClient::Create( - &(triton_client_backend->client_.http_client_), url, verbose, - http_ssl_options)); - if (!trace_options.empty()) { - std::string response; - RETURN_IF_TRITON_ERROR( - triton_client_backend->client_.http_client_->UpdateTraceSettings( - &response, "", trace_options)); - } - } else { - std::pair grpc_ssl_options_pair = - ParseGrpcSslOptions(ssl_options); - bool use_ssl = grpc_ssl_options_pair.first; - triton::client::SslOptions grpc_ssl_options = grpc_ssl_options_pair.second; - RETURN_IF_TRITON_ERROR(tc::InferenceServerGrpcClient::Create( - &(triton_client_backend->client_.grpc_client_), url, verbose, use_ssl, - grpc_ssl_options)); - if (!trace_options.empty()) { - inference::TraceSettingResponse response; - RETURN_IF_TRITON_ERROR( - triton_client_backend->client_.grpc_client_->UpdateTraceSettings( - &response, "", trace_options)); - } - } - - *client_backend = std::move(triton_client_backend); - - return Error::Success; -} - -Error -TritonClientBackend::ServerExtensions(std::set* extensions) -{ - extensions->clear(); - if (protocol_ == ProtocolType::HTTP) { - std::string server_metadata; - FAIL_IF_TRITON_ERR( - client_.http_client_->ServerMetadata(&server_metadata, *http_headers_), - "unable to get server metadata"); - - rapidjson::Document server_metadata_json; - FAIL_IF_TRITON_ERR( - tc::ParseJson(&server_metadata_json, server_metadata), - "failed to parse server metadata"); - for (const auto& extension : - server_metadata_json["extensions"].GetArray()) { - extensions->insert( - std::string(extension.GetString(), extension.GetStringLength())); - } - } else { - inference::ServerMetadataResponse server_metadata; - FAIL_IF_TRITON_ERR( - client_.grpc_client_->ServerMetadata(&server_metadata, *http_headers_), - "unable to get server metadata"); - for (const auto& extension : server_metadata.extensions()) { - extensions->insert(extension); - } - } - - return Error::Success; -} - -Error -TritonClientBackend::ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version) -{ - if (protocol_ == ProtocolType::HTTP) { - std::string metadata; - RETURN_IF_TRITON_ERROR(client_.http_client_->ModelMetadata( - &metadata, model_name, model_version, *http_headers_)); - RETURN_IF_TRITON_ERROR(tc::ParseJson(model_metadata, metadata)); - } else { - inference::ModelMetadataResponse model_metadata_proto; - RETURN_IF_TRITON_ERROR(client_.grpc_client_->ModelMetadata( - &model_metadata_proto, model_name, model_version, *http_headers_)); - - std::string metadata; - ::google::protobuf::util::JsonPrintOptions options; - options.preserve_proto_field_names = true; - options.always_print_primitive_fields = true; - ::google::protobuf::util::MessageToJsonString( - model_metadata_proto, &metadata, options); - - RETURN_IF_TRITON_ERROR(tc::ParseJson(model_metadata, metadata)); - } - - return Error::Success; -} - -Error -TritonClientBackend::ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version) -{ - if (protocol_ == ProtocolType::HTTP) { - std::string config; - RETURN_IF_TRITON_ERROR(client_.http_client_->ModelConfig( - &config, model_name, model_version, *http_headers_)); - RETURN_IF_TRITON_ERROR(tc::ParseJson(model_config, config)); - } else { - inference::ModelConfigResponse model_config_proto; - RETURN_IF_TRITON_ERROR(client_.grpc_client_->ModelConfig( - &model_config_proto, model_name, model_version, *http_headers_)); - - std::string config; - ::google::protobuf::util::JsonPrintOptions options; - options.preserve_proto_field_names = true; - options.always_print_primitive_fields = true; - ::google::protobuf::util::MessageToJsonString( - model_config_proto, &config, options); - - rapidjson::Document full_config; - RETURN_IF_TRITON_ERROR(tc::ParseJson(&full_config, config)); - model_config->CopyFrom(full_config["config"], model_config->GetAllocator()); - } - return Error::Success; -} - -Error -TritonClientBackend::Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - std::vector triton_inputs; - ParseInferInputToTriton(inputs, &triton_inputs); - - std::vector triton_outputs; - ParseInferRequestedOutputToTriton(outputs, &triton_outputs); - - tc::InferOptions triton_options(options.model_name_); - ParseInferOptionsToTriton(options, &triton_options); - - tc::InferResult* triton_result; - - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR(client_.grpc_client_->Infer( - &triton_result, triton_options, triton_inputs, triton_outputs, - *http_headers_, compression_algorithm_)); - } else { - RETURN_IF_TRITON_ERROR(client_.http_client_->Infer( - &triton_result, triton_options, triton_inputs, triton_outputs, - *http_headers_)); - } - - *result = new TritonInferResult(triton_result); - - return Error::Success; -} - -Error -TritonClientBackend::AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - auto wrapped_callback = [callback](tc::InferResult* client_result) { - InferResult* result = new TritonInferResult(client_result); - callback(result); - }; - - std::vector triton_inputs; - ParseInferInputToTriton(inputs, &triton_inputs); - - std::vector triton_outputs; - ParseInferRequestedOutputToTriton(outputs, &triton_outputs); - - tc::InferOptions triton_options(options.model_name_); - ParseInferOptionsToTriton(options, &triton_options); - - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR(client_.grpc_client_->AsyncInfer( - wrapped_callback, triton_options, triton_inputs, triton_outputs, - *http_headers_, compression_algorithm_)); - } else { - RETURN_IF_TRITON_ERROR(client_.http_client_->AsyncInfer( - wrapped_callback, triton_options, triton_inputs, triton_outputs, - *http_headers_)); - } - - return Error::Success; -} - -Error -TritonClientBackend::StartStream(OnCompleteFn callback, bool enable_stats) -{ - auto wrapped_callback = [callback](tc::InferResult* client_result) { - InferResult* result = new TritonInferResult(client_result); - callback(result); - }; - - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR(client_.grpc_client_->StartStream( - wrapped_callback, enable_stats, 0 /* stream_timeout */, *http_headers_, - compression_algorithm_)); - } else { - return Error("HTTP does not support starting streams", pa::GENERIC_ERROR); - } - - return Error::Success; -} - -Error -TritonClientBackend::AsyncStreamInfer( - const InferOptions& options, const std::vector& inputs, - const std::vector& outputs) -{ - std::vector triton_inputs; - ParseInferInputToTriton(inputs, &triton_inputs); - - std::vector triton_outputs; - ParseInferRequestedOutputToTriton(outputs, &triton_outputs); - - tc::InferOptions triton_options(options.model_name_); - ParseInferOptionsToTriton(options, &triton_options); - - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR(client_.grpc_client_->AsyncStreamInfer( - triton_options, triton_inputs, triton_outputs)); - } else { - return Error( - "HTTP does not support streaming inferences", pa::GENERIC_ERROR); - } - - return Error::Success; -} - -Error -TritonClientBackend::ClientInferStat(InferStat* infer_stat) -{ - tc::InferStat triton_infer_stat; - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR( - client_.grpc_client_->ClientInferStat(&triton_infer_stat)); - } else { - RETURN_IF_TRITON_ERROR( - client_.http_client_->ClientInferStat(&triton_infer_stat)); - } - - ParseInferStat(triton_infer_stat, infer_stat); - - return Error::Success; -} - -Error -TritonClientBackend::ModelInferenceStatistics( - std::map* model_stats, - const std::string& model_name, const std::string& model_version) -{ - if (protocol_ == ProtocolType::GRPC) { - inference::ModelStatisticsResponse infer_stat; - RETURN_IF_TRITON_ERROR(client_.grpc_client_->ModelInferenceStatistics( - &infer_stat, model_name, model_version, *http_headers_)); - ParseStatistics(infer_stat, model_stats); - } else { - std::string infer_stat; - RETURN_IF_TRITON_ERROR(client_.http_client_->ModelInferenceStatistics( - &infer_stat, model_name, model_version, *http_headers_)); - rapidjson::Document infer_stat_json; - RETURN_IF_TRITON_ERROR(tc::ParseJson(&infer_stat_json, infer_stat)); - ParseStatistics(infer_stat_json, model_stats); - } - - return Error::Success; -} - -Error -TritonClientBackend::Metrics(triton::perfanalyzer::Metrics& metrics) -{ - try { - std::string metrics_endpoint_text{""}; - AccessMetricsEndpoint(metrics_endpoint_text); - ParseAndStoreMetrics(metrics_endpoint_text, metrics); - } - catch (const PerfAnalyzerException& e) { - return Error(e.what(), pa::GENERIC_ERROR); - } - return Error::Success; -} - -void -TritonClientBackend::AccessMetricsEndpoint(std::string& metrics_endpoint_text) -{ - CURL* curl{curl_easy_init()}; - if (curl == nullptr) { - throw triton::perfanalyzer::PerfAnalyzerException( - "Error calling curl_easy_init()", triton::perfanalyzer::GENERIC_ERROR); - } - - const auto metrics_response_handler{ - [](char* ptr, size_t size, size_t nmemb, std::string* userdata) { - userdata->append(ptr, size * nmemb); - return size * nmemb; - }}; - - curl_easy_setopt(curl, CURLOPT_URL, metrics_url_.c_str()); - curl_easy_setopt( - curl, CURLOPT_WRITEFUNCTION, - static_cast( - metrics_response_handler)); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &metrics_endpoint_text); - - CURLcode res{curl_easy_perform(curl)}; - - if (res != CURLE_OK) { - throw triton::perfanalyzer::PerfAnalyzerException( - "Unable to connect to Metrics endpoint " + metrics_url_, - triton::perfanalyzer::GENERIC_ERROR); - } - - long response_code{0}; - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); - - if (response_code != 200) { - throw triton::perfanalyzer::PerfAnalyzerException( - "Metrics endpoint curling did not succeed.", - triton::perfanalyzer::GENERIC_ERROR); - } - - curl_easy_cleanup(curl); -} - -void -TritonClientBackend::ParseAndStoreMetrics( - const std::string& metrics_endpoint_text, - triton::perfanalyzer::Metrics& metrics) -{ - ParseAndStoreMetric( - metrics_endpoint_text, "nv_gpu_utilization", - metrics.gpu_utilization_per_gpu); - ParseAndStoreMetric( - metrics_endpoint_text, "nv_gpu_power_usage", - metrics.gpu_power_usage_per_gpu); - ParseAndStoreMetric( - metrics_endpoint_text, "nv_gpu_memory_used_bytes", - metrics.gpu_memory_used_bytes_per_gpu); - ParseAndStoreMetric( - metrics_endpoint_text, "nv_gpu_memory_total_bytes", - metrics.gpu_memory_total_bytes_per_gpu); -} - -Error -TritonClientBackend::UnregisterAllSharedMemory() -{ - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR( - client_.grpc_client_->UnregisterSystemSharedMemory("", *http_headers_)); - RETURN_IF_TRITON_ERROR( - client_.grpc_client_->UnregisterCudaSharedMemory("", *http_headers_)); - } else { - RETURN_IF_TRITON_ERROR( - client_.http_client_->UnregisterSystemSharedMemory("", *http_headers_)); - RETURN_IF_TRITON_ERROR( - client_.http_client_->UnregisterCudaSharedMemory("", *http_headers_)); - } - - return Error::Success; -} - -Error -TritonClientBackend::RegisterSystemSharedMemory( - const std::string& name, const std::string& key, const size_t byte_size) -{ - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR(client_.grpc_client_->RegisterSystemSharedMemory( - name, key, byte_size, 0 /* offset */, *http_headers_)); - - } else { - RETURN_IF_TRITON_ERROR(client_.http_client_->RegisterSystemSharedMemory( - name, key, byte_size, 0 /* offset */, *http_headers_)); - } - - return Error::Success; -} - -Error -TritonClientBackend::RegisterCudaSharedMemory( - const std::string& name, const cudaIpcMemHandle_t& handle, - const size_t byte_size) -{ - if (protocol_ == ProtocolType::GRPC) { - RETURN_IF_TRITON_ERROR(client_.grpc_client_->RegisterCudaSharedMemory( - name, handle, 0 /*device id*/, byte_size, *http_headers_)); - - } else { - RETURN_IF_TRITON_ERROR(client_.http_client_->RegisterCudaSharedMemory( - name, handle, 0 /*device id*/, byte_size, *http_headers_)); - } - - return Error::Success; -} - -// -// Shared Memory Utilities -// -Error -TritonClientBackend::CreateSharedMemoryRegion( - std::string shm_key, size_t byte_size, int* shm_fd) -{ - RETURN_IF_TRITON_ERROR( - tc::CreateSharedMemoryRegion(shm_key, byte_size, shm_fd)); - - return Error::Success; -} - - -Error -TritonClientBackend::MapSharedMemory( - int shm_fd, size_t offset, size_t byte_size, void** shm_addr) -{ - RETURN_IF_TRITON_ERROR( - tc::MapSharedMemory(shm_fd, offset, byte_size, shm_addr)); - - return Error::Success; -} - - -Error -TritonClientBackend::CloseSharedMemory(int shm_fd) -{ - RETURN_IF_TRITON_ERROR(tc::CloseSharedMemory(shm_fd)); - - return Error::Success; -} - -Error -TritonClientBackend::UnlinkSharedMemoryRegion(std::string shm_key) -{ - RETURN_IF_TRITON_ERROR(tc::UnlinkSharedMemoryRegion(shm_key)); - - return Error::Success; -} - -Error -TritonClientBackend::UnmapSharedMemory(void* shm_addr, size_t byte_size) -{ - RETURN_IF_TRITON_ERROR(tc::UnmapSharedMemory(shm_addr, byte_size)); - - return Error::Success; -} - -void -TritonClientBackend::ParseInferInputToTriton( - const std::vector& inputs, - std::vector* triton_inputs) -{ - for (const auto input : inputs) { - tc::InferInput* triton_input{dynamic_cast(input)->Get()}; - triton_input->SetBinaryData(input_tensor_format_ == TensorFormat::BINARY); - triton_inputs->push_back(triton_input); - } -} - -void -TritonClientBackend::ParseInferRequestedOutputToTriton( - const std::vector& outputs, - std::vector* triton_outputs) -{ - for (const auto output : outputs) { - tc::InferRequestedOutput* triton_output{ - dynamic_cast(output)->Get()}; - triton_output->SetBinaryData(input_tensor_format_ == TensorFormat::BINARY); - triton_outputs->push_back(triton_output); - } -} - -void -TritonClientBackend::ParseInferOptionsToTriton( - const InferOptions& options, tc::InferOptions* triton_options) -{ - triton_options->model_version_ = options.model_version_; - triton_options->request_id_ = options.request_id_; - if ((options.sequence_id_ != 0) || (options.sequence_id_str_ != "")) { - if (options.sequence_id_ != 0) { - triton_options->sequence_id_ = options.sequence_id_; - } else { - triton_options->sequence_id_str_ = options.sequence_id_str_; - } - triton_options->sequence_start_ = options.sequence_start_; - triton_options->sequence_end_ = options.sequence_end_; - } - triton_options->triton_enable_empty_final_response_ = - options.triton_enable_empty_final_response_; - - for (auto& map_entry : options.request_parameters_) { - auto rp = tc::RequestParameter(); - rp.name = map_entry.second.name; - rp.value = map_entry.second.value; - rp.type = map_entry.second.type; - triton_options->request_parameters[map_entry.first] = rp; - } -} - - -void -TritonClientBackend::ParseStatistics( - const inference::ModelStatisticsResponse& infer_stat, - std::map* model_stats) -{ - model_stats->clear(); - for (const auto& this_stat : infer_stat.model_stats()) { - auto it = model_stats - ->emplace( - std::make_pair(this_stat.name(), this_stat.version()), - ModelStatistics()) - .first; - it->second.inference_count_ = this_stat.inference_count(); - it->second.execution_count_ = this_stat.execution_count(); - it->second.success_count_ = this_stat.inference_stats().success().count(); - it->second.queue_count_ = this_stat.inference_stats().queue().count(); - it->second.compute_input_count_ = - this_stat.inference_stats().compute_input().count(); - it->second.compute_infer_count_ = - this_stat.inference_stats().compute_infer().count(); - it->second.compute_output_count_ = - this_stat.inference_stats().compute_output().count(); - it->second.cumm_time_ns_ = this_stat.inference_stats().success().ns(); - it->second.queue_time_ns_ = this_stat.inference_stats().queue().ns(); - it->second.compute_input_time_ns_ = - this_stat.inference_stats().compute_input().ns(); - it->second.compute_infer_time_ns_ = - this_stat.inference_stats().compute_infer().ns(); - it->second.compute_output_time_ns_ = - this_stat.inference_stats().compute_output().ns(); - it->second.cache_hit_count_ = - this_stat.inference_stats().cache_hit().count(); - it->second.cache_hit_time_ns_ = - this_stat.inference_stats().cache_hit().ns(); - it->second.cache_miss_count_ = - this_stat.inference_stats().cache_miss().count(); - it->second.cache_miss_time_ns_ = - this_stat.inference_stats().cache_miss().ns(); - } -} - -void -TritonClientBackend::ParseStatistics( - const rapidjson::Document& infer_stat, - std::map* model_stats) -{ - model_stats->clear(); - for (const auto& this_stat : infer_stat["model_stats"].GetArray()) { - auto it = model_stats - ->emplace( - std::make_pair( - this_stat["name"].GetString(), - this_stat["version"].GetString()), - ModelStatistics()) - .first; - it->second.inference_count_ = this_stat["inference_count"].GetUint64(); - it->second.execution_count_ = this_stat["execution_count"].GetUint64(); - it->second.success_count_ = - this_stat["inference_stats"]["success"]["count"].GetUint64(); - it->second.queue_count_ = - this_stat["inference_stats"]["queue"]["count"].GetUint64(); - it->second.compute_input_count_ = - this_stat["inference_stats"]["compute_input"]["count"].GetUint64(); - it->second.compute_infer_count_ = - this_stat["inference_stats"]["compute_infer"]["count"].GetUint64(); - it->second.compute_output_count_ = - this_stat["inference_stats"]["compute_output"]["count"].GetUint64(); - it->second.cumm_time_ns_ = - this_stat["inference_stats"]["success"]["ns"].GetUint64(); - it->second.queue_time_ns_ = - this_stat["inference_stats"]["queue"]["ns"].GetUint64(); - it->second.compute_input_time_ns_ = - this_stat["inference_stats"]["compute_input"]["ns"].GetUint64(); - it->second.compute_infer_time_ns_ = - this_stat["inference_stats"]["compute_infer"]["ns"].GetUint64(); - it->second.compute_output_time_ns_ = - this_stat["inference_stats"]["compute_output"]["ns"].GetUint64(); - it->second.cache_hit_count_ = - this_stat["inference_stats"]["cache_hit"]["count"].GetUint64(); - it->second.cache_hit_time_ns_ = - this_stat["inference_stats"]["cache_hit"]["ns"].GetUint64(); - it->second.cache_miss_count_ = - this_stat["inference_stats"]["cache_miss"]["count"].GetUint64(); - it->second.cache_miss_time_ns_ = - this_stat["inference_stats"]["cache_miss"]["ns"].GetUint64(); - } -} - -void -TritonClientBackend::ParseInferStat( - const tc::InferStat& triton_infer_stat, InferStat* infer_stat) -{ - infer_stat->completed_request_count = - triton_infer_stat.completed_request_count; - infer_stat->cumulative_total_request_time_ns = - triton_infer_stat.cumulative_total_request_time_ns; - infer_stat->cumulative_send_time_ns = - triton_infer_stat.cumulative_send_time_ns; - infer_stat->cumulative_receive_time_ns = - triton_infer_stat.cumulative_receive_time_ns; -} - -//============================================================================== - -Error -TritonInferInput::Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype) -{ - TritonInferInput* local_infer_input = new TritonInferInput(name, datatype); - - tc::InferInput* triton_infer_input; - RETURN_IF_TRITON_ERROR( - tc::InferInput::Create(&triton_infer_input, name, dims, datatype)); - local_infer_input->input_.reset(triton_infer_input); - - *infer_input = local_infer_input; - return Error::Success; -} - -const std::vector& -TritonInferInput::Shape() const -{ - return input_->Shape(); -} - -Error -TritonInferInput::SetShape(const std::vector& shape) -{ - RETURN_IF_TRITON_ERROR(input_->SetShape(shape)); - return Error::Success; -} - -Error -TritonInferInput::Reset() -{ - RETURN_IF_TRITON_ERROR(input_->Reset()); - return Error::Success; -} - -Error -TritonInferInput::AppendRaw(const uint8_t* input, size_t input_byte_size) -{ - RETURN_IF_TRITON_ERROR(input_->AppendRaw(input, input_byte_size)); - return Error::Success; -} - -Error -TritonInferInput::SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset) -{ - RETURN_IF_TRITON_ERROR(input_->SetSharedMemory(name, byte_size, offset)); - return Error::Success; -} - -Error -TritonInferInput::RawData(const uint8_t** buf, size_t* byte_size) -{ - RETURN_IF_TRITON_ERROR(input_->RawData(buf, byte_size)); - return Error::Success; -} - -TritonInferInput::TritonInferInput( - const std::string& name, const std::string& datatype) - : InferInput(BackendKind::TRITON, name, datatype) -{ -} - - -//============================================================================== - -Error -TritonInferRequestedOutput::Create( - InferRequestedOutput** infer_output, const std::string& name, - const size_t class_count, const std::string& datatype) -{ - TritonInferRequestedOutput* local_infer_output = - new TritonInferRequestedOutput(name, datatype); - - tc::InferRequestedOutput* triton_infer_output; - RETURN_IF_TRITON_ERROR(tc::InferRequestedOutput::Create( - &triton_infer_output, name, class_count, datatype)); - local_infer_output->output_.reset(triton_infer_output); - - *infer_output = local_infer_output; - - return Error::Success; -} - -Error -TritonInferRequestedOutput::SetSharedMemory( - const std::string& region_name, const size_t byte_size, const size_t offset) -{ - RETURN_IF_TRITON_ERROR( - output_->SetSharedMemory(region_name, byte_size, offset)); - return Error::Success; -} - - -TritonInferRequestedOutput::TritonInferRequestedOutput( - const std::string& name, const std::string& datatype) - : InferRequestedOutput(BackendKind::TRITON, name, datatype) -{ -} - -//============================================================================== - -TritonInferResult::TritonInferResult(tc::InferResult* result) -{ - result_.reset(result); -} - -Error -TritonInferResult::Id(std::string* id) const -{ - RETURN_IF_TRITON_ERROR(result_->Id(id)); - return Error::Success; -} - -Error -TritonInferResult::RequestStatus() const -{ - RETURN_IF_TRITON_ERROR(result_->RequestStatus()); - return Error::Success; -} - -Error -TritonInferResult::RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const -{ - RETURN_IF_TRITON_ERROR(result_->RawData(output_name, buf, byte_size)); - return Error::Success; -} - -Error -TritonInferResult::IsFinalResponse(bool* is_final_response) const -{ - RETURN_IF_TRITON_ERROR(result_->IsFinalResponse(is_final_response)); - return Error::Success; -} - -Error -TritonInferResult::IsNullResponse(bool* is_null_response) const -{ - RETURN_IF_TRITON_ERROR(result_->IsNullResponse(is_null_response)); - return Error::Success; -} - -//============================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::tritonremote diff --git a/src/c++/perf_analyzer/client_backend/triton/triton_client_backend.h b/src/c++/perf_analyzer/client_backend/triton/triton_client_backend.h deleted file mode 100644 index fd48d32c2..000000000 --- a/src/c++/perf_analyzer/client_backend/triton/triton_client_backend.h +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include -#include - -#include "../../constants.h" -#include "../../metrics.h" -#include "../../perf_utils.h" -#include "../client_backend.h" -#include "grpc_client.h" -#include "http_client.h" -#include "shm_utils.h" - -#define RETURN_IF_TRITON_ERROR(S) \ - do { \ - const tc::Error& status__ = (S); \ - if (!status__.IsOk()) { \ - return Error(status__.Message(), pa::GENERIC_ERROR); \ - } \ - } while (false) - -#define FAIL_IF_TRITON_ERR(X, MSG) \ - { \ - const tc::Error err = (X); \ - if (!err.IsOk()) { \ - std::cerr << "error: " << (MSG) << ": " << err << std::endl; \ - exit(pa::GENERIC_ERROR); \ - } \ - } - -namespace tc = triton::client; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritonremote { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestTritonClientBackend; -#endif - -//============================================================================== -/// TritonClientBackend uses triton client C++ library to communicate with -/// triton inference service. -/// -class TritonClientBackend : public ClientBackend { - public: - /// Create a triton client backend which can be used to interact with the - /// server. - /// \param url The inference server url and port. - /// \param protocol The protocol type used. - /// \param ssl_options The SSL options used with client backend. - /// \param http_headers Map of HTTP headers. The map key/value indicates - /// the header name/value. - /// \param verbose Enables the verbose mode. - /// \param metrics_url The inference server metrics url and port. - /// \param input_tensor_format The Triton inference request input tensor - /// format. - /// \param output_tensor_format The Triton inference response output tensor - /// format. - /// \param client_backend Returns a new TritonClientBackend object. - /// \return Error object indicating success or failure. - static Error Create( - const std::string& url, const ProtocolType protocol, - const SslOptionsBase& ssl_options, - const std::map> trace_options, - const grpc_compression_algorithm compression_algorithm, - std::shared_ptr http_headers, const bool verbose, - const std::string& metrics_url, - const cb::TensorFormat input_tensor_format, - const cb::TensorFormat output_tensor_format, - std::unique_ptr* client_backend); - - /// See ClientBackend::ServerExtensions() - Error ServerExtensions(std::set* server_extensions) override; - - /// See ClientBackend::ModelMetadata() - Error ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version) override; - - /// See ClientBackend::ModelConfig() - Error ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version) override; - - /// See ClientBackend::Infer() - Error Infer( - InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::AsyncInfer() - Error AsyncInfer( - OnCompleteFn callback, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::StartStream() - Error StartStream(OnCompleteFn callback, bool enable_stats) override; - - /// See ClientBackend::AsyncStreamInfer() - Error AsyncStreamInfer( - const InferOptions& options, const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::ClientInferStat() - Error ClientInferStat(InferStat* infer_stat) override; - - /// See ClientBackend::ModelInferenceStatistics() - Error ModelInferenceStatistics( - std::map* model_stats, - const std::string& model_name = "", - const std::string& model_version = "") override; - - /// See ClientBackend::Metrics() - Error Metrics(triton::perfanalyzer::Metrics& metrics) override; - - /// See ClientBackend::UnregisterAllSharedMemory() - Error UnregisterAllSharedMemory() override; - - /// See ClientBackend::RegisterSystemSharedMemory() - Error RegisterSystemSharedMemory( - const std::string& name, const std::string& key, - const size_t byte_size) override; - - /// See ClientBackend::RegisterCudaSharedMemory() - Error RegisterCudaSharedMemory( - const std::string& name, const cudaIpcMemHandle_t& handle, - const size_t byte_size) override; - - /// See ClientBackend::CreateSharedMemoryRegion() - Error CreateSharedMemoryRegion( - std::string shm_key, size_t byte_size, int* shm_fd) override; - - /// See ClientBackend::MapSharedMemory() - Error MapSharedMemory( - int shm_fd, size_t offset, size_t byte_size, void** shm_addr) override; - - /// See ClientBackend::CloseSharedMemory() - Error CloseSharedMemory(int shm_fd) override; - - /// See ClientBackend::UnlinkSharedMemoryRegion() - Error UnlinkSharedMemoryRegion(std::string shm_key) override; - - /// See ClientBackend::UnmapSharedMemory() - Error UnmapSharedMemory(void* shm_addr, size_t byte_size) override; - - private: - TritonClientBackend( - const ProtocolType protocol, - const grpc_compression_algorithm compression_algorithm, - std::shared_ptr http_headers, const std::string& metrics_url, - const cb::TensorFormat input_tensor_format, - const cb::TensorFormat output_tensor_format) - : ClientBackend(BackendKind::TRITON), protocol_(protocol), - compression_algorithm_(compression_algorithm), - http_headers_(http_headers), metrics_url_(metrics_url), - input_tensor_format_(input_tensor_format), - output_tensor_format_(output_tensor_format) - { - } - - void ParseInferInputToTriton( - const std::vector& inputs, - std::vector* triton_inputs); - void ParseInferRequestedOutputToTriton( - const std::vector& outputs, - std::vector* triton_outputs); - void ParseInferOptionsToTriton( - const InferOptions& options, tc::InferOptions* triton_options); - void ParseStatistics( - const inference::ModelStatisticsResponse& infer_stat, - std::map* model_stats); - void ParseStatistics( - const rapidjson::Document& infer_stat, - std::map* model_stats); - void ParseInferStat( - const tc::InferStat& triton_infer_stat, InferStat* infer_stat); - void AccessMetricsEndpoint(std::string& metrics_endpoint_text); - void ParseAndStoreMetrics( - const std::string& metrics_endpoint_text, - triton::perfanalyzer::Metrics& metrics); - - template - void ParseAndStoreMetric( - const std::string& metrics_endpoint_text, const std::string metric_id, - std::map& metric_per_gpu) - { - std::regex metric_regex( - R"(\n)" + metric_id + R"(\{gpu_uuid\=\"([^"]+)\"\} (\d+\.?\d*))"); - std::sregex_iterator metric_regex_match_begin{std::sregex_iterator( - metrics_endpoint_text.begin(), metrics_endpoint_text.end(), - metric_regex)}; - - for (std::sregex_iterator i{metric_regex_match_begin}; - i != std::sregex_iterator(); i++) { - const std::smatch& match{*i}; - const std::string& gpu_uuid{match[1].str()}; - T metric{}; - if (std::is_same::value) { - metric = std::stod(match[2].str()); - } else if (std::is_same::value) { - metric = static_cast(std::stod(match[2].str())); - } - metric_per_gpu[gpu_uuid] = metric; - } - } - - /// Union to represent the underlying triton client belonging to one of - /// the protocols - union TritonClient { - TritonClient() - { - new (&http_client_) std::unique_ptr{}; - } - ~TritonClient() {} - - std::unique_ptr http_client_; - std::unique_ptr grpc_client_; - } client_; - - const ProtocolType protocol_{UNKNOWN}; - const grpc_compression_algorithm compression_algorithm_{GRPC_COMPRESS_NONE}; - std::shared_ptr http_headers_; - const std::string metrics_url_{""}; - const cb::TensorFormat input_tensor_format_{cb::TensorFormat::UNKNOWN}; - const cb::TensorFormat output_tensor_format_{cb::TensorFormat::UNKNOWN}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestTritonClientBackend; - - public: - TritonClientBackend() = default; -#endif -}; - -//============================================================== -/// TritonInferInput is a wrapper around InferInput object of -/// triton client library. -/// -class TritonInferInput : public InferInput { - public: - static Error Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype); - /// Returns the raw InferInput object required by triton client library. - tc::InferInput* Get() const { return input_.get(); } - /// See InferInput::Shape() - const std::vector& Shape() const override; - /// See InferInput::SetShape() - Error SetShape(const std::vector& shape) override; - /// See InferInput::Reset() - Error Reset() override; - /// See InferInput::AppendRaw() - Error AppendRaw(const uint8_t* input, size_t input_byte_size) override; - /// See InferInput::SetSharedMemory() - Error SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset = 0) override; - /// See InferInput::RawData() - Error RawData(const uint8_t** buf, size_t* byte_size) override; - - private: - explicit TritonInferInput( - const std::string& name, const std::string& datatype); - - std::unique_ptr input_; -}; - -//============================================================== -/// TritonInferRequestedOutput is a wrapper around -/// InferRequestedOutput object of triton client library. -/// -class TritonInferRequestedOutput : public InferRequestedOutput { - public: - static Error Create( - InferRequestedOutput** infer_output, const std::string& name, - const size_t class_count = 0, const std::string& datatype = ""); - /// Returns the raw InferRequestedOutput object required by triton client - /// library. - tc::InferRequestedOutput* Get() const { return output_.get(); } - // See InferRequestedOutput::SetSharedMemory() - Error SetSharedMemory( - const std::string& region_name, const size_t byte_size, - const size_t offset = 0) override; - - private: - explicit TritonInferRequestedOutput( - const std::string& name, const std::string& datatype); - - std::unique_ptr output_; -}; - -//============================================================== -/// TritonInferResult is a wrapper around InferResult object of -/// triton client library. -/// -class TritonInferResult : public InferResult { - public: - explicit TritonInferResult(tc::InferResult* result); - /// See InferResult::Id() - Error Id(std::string* id) const override; - /// See InferResult::RequestStatus() - Error RequestStatus() const override; - /// See InferResult::RawData() - Error RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const override; - /// See InferResult::IsFinalResponse() - Error IsFinalResponse(bool* is_final_response) const override; - /// See InferResult::IsNullResponse() - Error IsNullResponse(bool* is_null_response) const override; - - private: - std::unique_ptr result_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::tritonremote diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/CMakeLists.txt b/src/c++/perf_analyzer/client_backend/triton_c_api/CMakeLists.txt deleted file mode 100644 index 5e21b7449..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/CMakeLists.txt +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -cmake_minimum_required (VERSION 3.18) - -set( - TRITON_C_API_CLIENT_BACKEND_SRCS - triton_c_api_backend.cc - shared_library.cc - triton_loader.cc - shared_memory_manager.cc - scoped_defer.cc -) - -set( - TRITON_C_API_CLIENT_BACKEND_HDRS - triton_c_api_backend.h - shared_library.h - shared_memory_manager.h - triton_loader.h - c_api_infer_results.h - scoped_defer.h -) - -add_library( - triton-c-api-backend-library EXCLUDE_FROM_ALL OBJECT - ${TRITON_C_API_CLIENT_BACKEND_SRCS} - ${TRITON_C_API_CLIENT_BACKEND_HDRS} -) - -target_link_libraries( - triton-c-api-backend-library - grpcclient_static - httpclient_static - triton-core-serverapi # from repo-core -) diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/c_api_infer_results.h b/src/c++/perf_analyzer/client_backend/triton_c_api/c_api_infer_results.h deleted file mode 100644 index 440a94c0b..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/c_api_infer_results.h +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "common.h" - -namespace tc = triton::client; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -/// This class is used to pass inference status and id to upstream backend. -/// Created so that the API is similar to `triton, torchserver, -/// tensorflow_serving` APIs -class InferResult { - public: - static void Create( - InferResult** infer_result, const tc::Error& err, const std::string& id) - { - *infer_result = reinterpret_cast(new InferResult(err, id)); - } - - tc::Error Id(std::string* id) const - { - *id = request_id_; - return tc::Error::Success; - } - tc::Error RequestStatus() const { return status_; } - - private: - InferResult(const tc::Error& err, const std::string& id) - : status_(err), request_id_(id) - { - } - - std::string request_id_; - tc::Error status_; -}; -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.cc b/src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.cc deleted file mode 100644 index ff25eb0f4..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.cc +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "scoped_defer.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { -ScopedDefer::ScopedDefer(std::function task) -{ - task_ = task; - done_ = false; -} - -void -ScopedDefer::Complete() -{ - if (!done_) { - task_(); - done_ = true; - } -} - -ScopedDefer::~ScopedDefer() -{ - if (!done_) { - task_(); - } -} - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.h b/src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.h deleted file mode 100644 index c5fcc7ea0..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/scoped_defer.h +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once -#include - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -class ScopedDefer { - public: - ScopedDefer(std::function task); - ~ScopedDefer(); - void Complete(); - - private: - std::function task_; - bool done_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.cc b/src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.cc deleted file mode 100644 index 8c06860e6..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.cc +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "shared_library.h" - -#include - -#include - -/// FIXME: Duplication of server/src/core/shared_library.cc -/// Separate shared_library to common library and delete this - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -Error -OpenLibraryHandle(const std::string& path, void** handle) -{ - *handle = dlopen(path.c_str(), RTLD_NOW | RTLD_LOCAL); - if (*handle == nullptr) { - return Error("unable to load backend library: " + std::string(dlerror())); - } - return Error::Success; -} - -Error -CloseLibraryHandle(void* handle) -{ - if (handle != nullptr) { - if (dlclose(handle) != 0) { - return Error( - "unable to unload backend library: " + std::string(dlerror())); - } - } - return Error::Success; -} - -Error -GetEntrypoint( - void* handle, const std::string& name, const bool optional, void** befn) -{ - *befn = nullptr; - dlerror(); - void* fn = dlsym(handle, name.c_str()); - const char* dlsym_error = dlerror(); - if (dlsym_error != nullptr) { - if (optional) { - return Error::Success; - } - - std::string errstr(dlsym_error); // need copy as dlclose overwrites - return Error( - "unable to find required entrypoint '" + name + - "' in backend library: " + errstr); - } - - if (fn == nullptr) { - if (optional) { - return Error::Success; - } - - return Error( - "unable to find required entrypoint '" + name + "' in backend library"); - } - - *befn = fn; - return Error::Success; -} -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.h b/src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.h deleted file mode 100644 index dbc49e4da..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_library.h +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../client_backend.h" -/// FIXME: Duplication of server/src/core/shared_library.h -/// Separate shared_library to common library and delete this - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -Error OpenLibraryHandle(const std::string& path, void** handle); - -Error CloseLibraryHandle(void* handle); - -Error GetEntrypoint( - void* handle, const std::string& name, const bool optional, void** befn); - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.cc b/src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.cc deleted file mode 100644 index 0658daedd..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.cc +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "shared_memory_manager.h" - -#include -#include -#include -#include - -#include "common.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -SharedMemoryManager::~SharedMemoryManager() -{ - UnregisterAll(TRITONSERVER_MEMORY_CPU); - UnregisterAll(TRITONSERVER_MEMORY_GPU); -} - -#ifdef TRITON_ENABLE_GPU -Error -SharedMemoryManager::RegisterCUDAMemory( - const std::string& name, void* dev_ptr, const size_t byte_size, - const int device_id) -{ - // Serialize all operations that write/read current shared memory regions - std::lock_guard lock(mu_); - - // If name is already in shared_memory_map_ then return error saying already - // registered - if (shared_memory_map_.find(name) != shared_memory_map_.end()) { - return Error( - std::string("shared memory region '" + name + "' already in manager")); - } - - shared_memory_map_.insert(std::make_pair( - name, std::unique_ptr(new MemoryInfo( - name, 0 /* offset */, byte_size, dev_ptr, - TRITONSERVER_MEMORY_GPU, device_id)))); - return Error::Success; -} -#endif // TRITON_ENABLE_GPU - -Error -SharedMemoryManager::RegisterSystemMemory( - const std::string& name, void* ptr, const size_t byte_size) -{ - // Serialize all operations that write/read current shared memory regions - std::lock_guard lock(mu_); - - // If name is already in shared_memory_map_ then return error saying already - // registered - if (shared_memory_map_.find(name) != shared_memory_map_.end()) { - return Error("shared memory region '" + name + "' already in manager"); - } - - shared_memory_map_.insert(std::make_pair( - name, std::make_unique( - name, 0 /* offset */, byte_size, ptr, TRITONSERVER_MEMORY_CPU, - 0 /* device id */))); - - return Error::Success; -} - -Error -SharedMemoryManager::GetMemoryInfo( - const std::string& name, size_t offset, size_t byte_size, - void** shm_mapped_addr, TRITONSERVER_MemoryType* memory_type, - int64_t* device_id) -{ - // protect shared_memory_map_ from concurrent access - std::lock_guard lock(mu_); - - auto it = shared_memory_map_.find(name); - if (it == shared_memory_map_.end()) { - return Error( - std::string("Unable to find shared memory region: '" + name + "'")); - } - - // validate offset - size_t shm_region_end = 0; - if (it->second->kind_ == TRITONSERVER_MEMORY_CPU) { - shm_region_end = it->second->offset_; - } - if (it->second->byte_size_ > 0) { - shm_region_end += it->second->byte_size_ - 1; - } - if (offset > shm_region_end) { - return Error( - std::string("Invalid offset for shared memory region: '" + name + "'") - .c_str()); - } - // validate byte_size + offset is within memory bounds - size_t total_req_shm = offset + byte_size - 1; - if (total_req_shm > shm_region_end) { - return Error(std::string( - "Invalid offset + byte size for shared memory region: '" + - name + "'") - .c_str()); - } - - if (it->second->kind_ == TRITONSERVER_MEMORY_CPU) { - *shm_mapped_addr = (void*)((uint8_t*)it->second->mapped_addr_ + - it->second->offset_ + offset); - } else { - *shm_mapped_addr = (void*)((uint8_t*)it->second->mapped_addr_ + offset); - } - - *memory_type = it->second->kind_; - *device_id = it->second->device_id_; - - return Error::Success; -} - - -Error -SharedMemoryManager::Unregister( - const std::string& name, TRITONSERVER_MemoryType memory_type) -{ - // Serialize all operations that write/read current shared memory regions - std::lock_guard lock(mu_); - - return UnregisterHelper(name, memory_type); -} - -Error -SharedMemoryManager::UnregisterAll(TRITONSERVER_MemoryType memory_type) -{ - // Serialize all operations that write/read current shared memory regions - std::lock_guard lock(mu_); - std::string error_message = "Failed to unregister the following "; - std::vector unregister_fails; - - if (memory_type == TRITONSERVER_MEMORY_CPU) { - error_message += "system shared memory regions: "; - for (auto& it : shared_memory_map_) { - if (it.second->kind_ == TRITONSERVER_MEMORY_CPU) { - Error err = UnregisterHelper(it.first, memory_type); - if (!err.IsOk()) { - unregister_fails.push_back(it.first); - } - } - } - } else if (memory_type == TRITONSERVER_MEMORY_GPU) { - error_message += "cuda shared memory regions: "; - for (auto& it : shared_memory_map_) { - if (it.second->kind_ == TRITONSERVER_MEMORY_GPU) { - Error err = UnregisterHelper(it.first, memory_type); - if (!err.IsOk()) { - unregister_fails.push_back(it.first); - } - } - } - } - - if (!unregister_fails.empty()) { - for (auto unreg_fail : unregister_fails) { - error_message += unreg_fail + " ,"; - } - return Error(error_message); - } - - return Error::Success; -} - -Error -SharedMemoryManager::UnregisterHelper( - const std::string& name, TRITONSERVER_MemoryType memory_type) -{ - // Must hold the lock on register_mu_ while calling this function. - auto it = shared_memory_map_.find(name); - - if (it == shared_memory_map_.end()) { - return Error("Shared memory region " + name + " doesn't exist."); - } - - // Remove region information from shared_memory_map_ - shared_memory_map_.erase(it); - - return Error::Success; -} - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.h b/src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.h deleted file mode 100644 index 6b2082c44..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/shared_memory_manager.h +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include -#include -#include -#include - -#include "../client_backend.h" - -#ifdef TRITON_ENABLE_GPU -#include -#endif // TRITON_ENABLE_GPU - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -class SharedMemoryManager { - public: - SharedMemoryManager() = default; - ~SharedMemoryManager(); - -#ifdef TRITON_ENABLE_GPU - /// Add a memory block representing memory in CUDA (GPU) memory - /// to the manager. Return an Error if a memory block of the same name - /// already exists in the manager. - /// \param name The name of the memory block. - /// \param dev_ptr The device pointer - /// \param byte_size The size, in bytes of the block. - /// \param device id The GPU number the memory region is in. - /// \return an Error indicating success or failure. - Error RegisterCUDAMemory( - const std::string& name, void* dev_ptr, const size_t byte_size, - const int device_id); -#endif // TRITON_ENABLE_GPU - - /// Add a system memory block to the manager. - /// Return an Error if a shared memory block of the same name - /// already exists in the manager. - /// \param name The name of the memory block. - /// \param ptr The device pointer - /// \param byte_size The size, in bytes of the block. - /// \return an Error indicating success or failure. - Error RegisterSystemMemory( - const std::string& name, void* ptr, const size_t byte_size); - - /// Get the access information for the shared memory block - /// with the specified name. Return an Error - /// if named block doesn't exist. - /// \param name The name of the shared memory block to get. - /// \param offset The offset in the block - /// \param byte_size The byte size to request for the shm region - /// \param shm_mapped_addr Returns the pointer to the shared - /// memory block with the specified name and offset - /// \param memory_type Returns the type of the memory - /// \param device_id Returns the device id associated with the - /// memory block - /// \return an Error indicating success or failure. - Error GetMemoryInfo( - const std::string& name, size_t offset, size_t byte_size, - void** shm_mapped_addr, TRITONSERVER_MemoryType* memory_type, - int64_t* device_id); - - /// Removes the named shared memory block of the specified type from - /// the manager. Any future attempt to get the details of this block - /// will result in an array till another block with the same name is - /// added to the manager. - /// \param name The name of the shared memory block to remove. - /// \param memory_type The type of memory to unregister. - /// \return an Error indicating success or failure. - Error Unregister( - const std::string& name, TRITONSERVER_MemoryType memory_type); - - /// Unregister all shared memory blocks of specified type from the manager. - /// \param memory_type The type of memory to unregister. - /// \return an Error indicating success or failure. - Error UnregisterAll(TRITONSERVER_MemoryType memory_type); - - private: - /// A helper function to remove the named shared memory blocks of - /// specified type - Error UnregisterHelper( - const std::string& name, TRITONSERVER_MemoryType memory_type); - - /// A struct that records the shared memory regions registered by the shared - /// memory manager. - struct MemoryInfo { - MemoryInfo( - const std::string& name, const size_t offset, const size_t byte_size, - void* mapped_addr, const TRITONSERVER_MemoryType kind, - const int64_t device_id) - : name_(name), offset_(offset), byte_size_(byte_size), - mapped_addr_(mapped_addr), kind_(kind), device_id_(device_id) - { - } - - std::string name_; - size_t offset_; - size_t byte_size_; - void* mapped_addr_; - TRITONSERVER_MemoryType kind_; - int64_t device_id_; - }; - - using SharedMemoryStateMap = - std::map>; - - // A map between the name and the details of the associated - // shared memory block - SharedMemoryStateMap shared_memory_map_; - - // A mutex to protect the concurrent access to shared_memory_map_ - std::mutex mu_; -}; -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.cc b/src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.cc deleted file mode 100644 index e97f1ea80..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.cc +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "triton_c_api_backend.h" - -#include "c_api_infer_results.h" -#include "json_utils.h" -#include "triton_loader.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -//============================================================================== - -Error -TritonCApiClientBackend::Create( - const std::string& triton_server_path, - const std::string& model_repository_path, const bool verbose, - std::unique_ptr* client_backend) -{ - if (triton_server_path.empty()) { - return Error( - "--triton-server-path should not be empty when using " - "service-kind=triton_c_api."); - } - - if (model_repository_path.empty()) { - return Error( - "--model-repository should not be empty when using " - "service-kind=triton_c_api."); - } - - std::unique_ptr triton_client_backend( - new TritonCApiClientBackend()); - RETURN_IF_ERROR( - TritonLoader::Create(triton_server_path, model_repository_path, verbose)); - *client_backend = std::move(triton_client_backend); - return Error::Success; -} - -Error -TritonCApiClientBackend::ServerExtensions(std::set* extensions) -{ - rapidjson::Document server_metadata_json; - RETURN_IF_ERROR(triton_loader_->ServerMetaData(&server_metadata_json)); - for (const auto& extension : server_metadata_json["extensions"].GetArray()) { - extensions->insert( - std::string(extension.GetString(), extension.GetStringLength())); - } - return Error::Success; -} - -Error -TritonCApiClientBackend::ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version) -{ - if (!triton_loader_->ModelIsLoaded()) { - triton_loader_->LoadModel(model_name, model_version); - } - RETURN_IF_ERROR(triton_loader_->ModelMetadata(model_metadata)); - return Error::Success; -} - -Error -TritonCApiClientBackend::ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version) -{ - if (!triton_loader_->ModelIsLoaded()) { - triton_loader_->LoadModel(model_name, model_version); - } - RETURN_IF_ERROR( - triton_loader_->ModelConfig(model_config, model_name, model_version)); - return Error::Success; -} - -Error -TritonCApiClientBackend::Infer( - cb::InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) -{ - std::vector triton_inputs; - ParseInferInputToTriton(inputs, &triton_inputs); - - std::vector triton_outputs; - ParseInferRequestedOutputToTriton(outputs, &triton_outputs); - - tc::InferOptions triton_options(options.model_name_); - ParseInferOptionsToTriton(options, &triton_options); - - capi::InferResult* triton_result; - RETURN_IF_ERROR(triton_loader_->Infer( - triton_options, triton_inputs, triton_outputs, &triton_result)); - - *result = new TritonCApiInferResult(triton_result); - return Error::Success; -} - - -Error -TritonCApiClientBackend::ClientInferStat(InferStat* infer_stat) -{ - tc::InferStat triton_infer_stat; - - triton_loader_->ClientInferStat(&triton_infer_stat); - ParseInferStat(triton_infer_stat, infer_stat); - return Error::Success; -} - -Error -TritonCApiClientBackend::ModelInferenceStatistics( - std::map* model_stats, - const std::string& model_name, const std::string& model_version) -{ - rapidjson::Document infer_stat_json; - RETURN_IF_ERROR(triton_loader_->ModelInferenceStatistics( - model_name, model_version, &infer_stat_json)); - ParseStatistics(infer_stat_json, model_stats); - - return Error::Success; -} - -Error -TritonCApiClientBackend::UnregisterAllSharedMemory() -{ - RETURN_IF_ERROR(triton_loader_->UnregisterAllSharedMemory()); - return Error::Success; -} - -Error -TritonCApiClientBackend::RegisterSystemMemory( - const std::string& name, void* ptr, const size_t byte_size) -{ - RETURN_IF_ERROR(triton_loader_->RegisterSystemMemory(name, ptr, byte_size)); - return Error::Success; -} - -#ifdef TRITON_ENABLE_GPU -Error -TritonCApiClientBackend::RegisterCudaMemory( - const std::string& name, void* handle, const size_t byte_size) -{ - RETURN_IF_ERROR(triton_loader_->RegisterCudaMemory(name, handle, byte_size)); - return Error::Success; -} -#endif // TRITON_ENABLE_GPU - -void -TritonCApiClientBackend::ParseInferInputToTriton( - const std::vector& inputs, - std::vector* triton_inputs) -{ - for (const auto input : inputs) { - triton_inputs->push_back( - (dynamic_cast(input))->Get()); - } -} - -void -TritonCApiClientBackend::ParseInferRequestedOutputToTriton( - const std::vector& outputs, - std::vector* triton_outputs) -{ - for (const auto output : outputs) { - triton_outputs->push_back( - (dynamic_cast(output))->Get()); - } -} - -void -TritonCApiClientBackend::ParseInferOptionsToTriton( - const InferOptions& options, tc::InferOptions* triton_options) -{ - triton_options->model_version_ = options.model_version_; - triton_options->request_id_ = options.request_id_; - if ((options.sequence_id_ != 0) || (options.sequence_id_str_ != "")) { - if (options.sequence_id_ != 0) { - triton_options->sequence_id_ = options.sequence_id_; - } else { - triton_options->sequence_id_str_ = options.sequence_id_str_; - } - triton_options->sequence_start_ = options.sequence_start_; - triton_options->sequence_end_ = options.sequence_end_; - } -} - -void -TritonCApiClientBackend::ParseStatistics( - const rapidjson::Document& infer_stat, - std::map* model_stats) -{ - model_stats->clear(); - for (const auto& this_stat : infer_stat["model_stats"].GetArray()) { - auto it = model_stats - ->emplace( - std::make_pair( - this_stat["name"].GetString(), - this_stat["version"].GetString()), - ModelStatistics()) - .first; - it->second.inference_count_ = this_stat["inference_count"].GetUint64(); - it->second.execution_count_ = this_stat["execution_count"].GetUint64(); - it->second.success_count_ = - this_stat["inference_stats"]["success"]["count"].GetUint64(); - it->second.queue_count_ = - this_stat["inference_stats"]["queue"]["count"].GetUint64(); - it->second.compute_input_count_ = - this_stat["inference_stats"]["compute_input"]["count"].GetUint64(); - it->second.compute_infer_count_ = - this_stat["inference_stats"]["compute_infer"]["count"].GetUint64(); - it->second.compute_output_count_ = - this_stat["inference_stats"]["compute_output"]["count"].GetUint64(); - it->second.cumm_time_ns_ = - this_stat["inference_stats"]["success"]["ns"].GetUint64(); - it->second.queue_time_ns_ = - this_stat["inference_stats"]["queue"]["ns"].GetUint64(); - it->second.compute_input_time_ns_ = - this_stat["inference_stats"]["compute_input"]["ns"].GetUint64(); - it->second.compute_infer_time_ns_ = - this_stat["inference_stats"]["compute_infer"]["ns"].GetUint64(); - it->second.compute_output_time_ns_ = - this_stat["inference_stats"]["compute_output"]["ns"].GetUint64(); - it->second.cache_hit_count_ = - this_stat["inference_stats"]["cache_hit"]["count"].GetUint64(); - it->second.cache_hit_time_ns_ = - this_stat["inference_stats"]["cache_hit"]["ns"].GetUint64(); - it->second.cache_miss_count_ = - this_stat["inference_stats"]["cache_miss"]["count"].GetUint64(); - it->second.cache_miss_time_ns_ = - this_stat["inference_stats"]["cache_miss"]["ns"].GetUint64(); - } -} - -void -TritonCApiClientBackend::ParseInferStat( - const tc::InferStat& triton_infer_stat, InferStat* infer_stat) -{ - infer_stat->completed_request_count = - triton_infer_stat.completed_request_count; - infer_stat->cumulative_total_request_time_ns = - triton_infer_stat.cumulative_total_request_time_ns; - infer_stat->cumulative_send_time_ns = - triton_infer_stat.cumulative_send_time_ns; - infer_stat->cumulative_receive_time_ns = - triton_infer_stat.cumulative_receive_time_ns; -} - -//============================================================================== - -Error -TritonCApiInferInput::Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype) -{ - TritonCApiInferInput* local_infer_input = - new TritonCApiInferInput(name, datatype); - - tc::InferInput* triton_infer_input; - RETURN_IF_TRITON_ERROR( - tc::InferInput::Create(&triton_infer_input, name, dims, datatype)); - local_infer_input->input_.reset(triton_infer_input); - - *infer_input = local_infer_input; - return Error::Success; -} - -const std::vector& -TritonCApiInferInput::Shape() const -{ - return input_->Shape(); -} - -Error -TritonCApiInferInput::SetShape(const std::vector& shape) -{ - RETURN_IF_TRITON_ERROR(input_->SetShape(shape)); - return Error::Success; -} - -Error -TritonCApiInferInput::Reset() -{ - RETURN_IF_TRITON_ERROR(input_->Reset()); - return Error::Success; -} - -Error -TritonCApiInferInput::AppendRaw(const uint8_t* input, size_t input_byte_size) -{ - RETURN_IF_TRITON_ERROR(input_->AppendRaw(input, input_byte_size)); - return Error::Success; -} - -Error -TritonCApiInferInput::SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset) -{ - RETURN_IF_TRITON_ERROR(input_->SetSharedMemory(name, byte_size, offset)); - return Error::Success; -} - -TritonCApiInferInput::TritonCApiInferInput( - const std::string& name, const std::string& datatype) - : InferInput(BackendKind::TRITON_C_API, name, datatype) -{ -} - - -//============================================================================== - -Error -TritonCApiInferRequestedOutput::Create( - InferRequestedOutput** infer_output, const std::string& name, - const size_t class_count, const std::string& datatype) -{ - TritonCApiInferRequestedOutput* local_infer_output = - new TritonCApiInferRequestedOutput(name); - - tc::InferRequestedOutput* triton_infer_output; - RETURN_IF_TRITON_ERROR(tc::InferRequestedOutput::Create( - &triton_infer_output, name, class_count, datatype)); - local_infer_output->output_.reset(triton_infer_output); - - *infer_output = local_infer_output; - - return Error::Success; -} - -Error -TritonCApiInferRequestedOutput::SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset) -{ - RETURN_IF_TRITON_ERROR(output_->SetSharedMemory(name, byte_size, offset)); - return Error::Success; -} - -TritonCApiInferRequestedOutput::TritonCApiInferRequestedOutput( - const std::string& name) - : InferRequestedOutput(BackendKind::TRITON_C_API, name) -{ -} - -//============================================================================== - -TritonCApiInferResult::TritonCApiInferResult(capi::InferResult* result) -{ - result_.reset(result); -} - -Error -TritonCApiInferResult::Id(std::string* id) const -{ - RETURN_IF_TRITON_ERROR(result_->Id(id)); - return Error::Success; -} - -Error -TritonCApiInferResult::RequestStatus() const -{ - RETURN_IF_TRITON_ERROR(result_->RequestStatus()); - return Error::Success; -} - -Error -TritonCApiInferResult::RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const -{ - return Error( - "Output retrieval is not currently supported for Triton C API client " - "backend"); -} - -//============================================================================== - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.h b/src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.h deleted file mode 100644 index 0f9f5defe..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_c_api_backend.h +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "../client_backend.h" -#include "shared_memory_manager.h" -#include "triton_loader.h" - -#define RETURN_IF_TRITON_ERROR(S) \ - do { \ - const tc::Error& status__ = (S); \ - if (!status__.IsOk()) { \ - return Error(status__.Message()); \ - } \ - } while (false) - -#define FAIL_IF_TRITON_ERR(X, MSG) \ - { \ - const tc::Error err = (X); \ - if (!err.IsOk()) { \ - std::cerr << "error: " << (MSG) << ": " << err << std::endl; \ - exit(1); \ - } \ - } - -namespace tc = triton::client; -namespace cb = triton::perfanalyzer::clientbackend; -namespace capi = triton::perfanalyzer::clientbackend::tritoncapi; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -class InferResult; - -//============================================================================== -/// TritonCApiClientBackend uses triton client C++ library to communicate with -/// triton inference service. This uses the local C++ library -/// -class TritonCApiClientBackend : public ClientBackend { - public: - /// Create a triton client backend which can be used to interact with the - /// server. - /// \param triton_server_path Tritonserver library that contains - /// lib/libtritonserver.so. - /// \param model_repository_path The model repository. - /// \param verbose Enables the verbose mode of TritonServer. - /// \param client_backend Returns a new TritonCApiClientBackend object. - /// \return Error object indicating success - /// or failure. - static Error Create( - const std::string& triton_server_path, - const std::string& model_repository_path, const bool verbose, - std::unique_ptr* client_backend); - - ~TritonCApiClientBackend() { triton_loader_->Delete(); } - - /// See ClientBackend::ServerExtensions() - Error ServerExtensions(std::set* server_extensions) override; - - /// See ClientBackend::ModelMetadata() - Error ModelMetadata( - rapidjson::Document* model_metadata, const std::string& model_name, - const std::string& model_version) override; - - /// See ClientBackend::ModelConfig() - Error ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version) override; - - /// See ClientBackend::Infer() - Error Infer( - cb::InferResult** result, const InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) override; - - /// See ClientBackend::ClientInferStat() - Error ClientInferStat(InferStat* infer_stat) override; - - /// See ClientBackend::ModelInferenceStatistics() - Error ModelInferenceStatistics( - std::map* model_stats, - const std::string& model_name = "", - const std::string& model_version = "") override; - -#ifdef TRITON_ENABLE_GPU - /// See ClientBackend::RegisterCudaMemory - Error RegisterCudaMemory( - const std::string& name, void* handle, const size_t byte_size) override; -#endif // TRITON_ENABLE_GPU - - /// See ClientBackend::RegisterSystemMemory - Error RegisterSystemMemory( - const std::string& name, void* ptr, const size_t byte_size) override; - - /// See ClientBackend::UnregisterAllSharedMemory - Error UnregisterAllSharedMemory(); - - private: - TritonCApiClientBackend() - : ClientBackend(BackendKind::TRITON_C_API), - triton_loader_(TritonLoader::GetSingleton()) - { - } - void ParseInferInputToTriton( - const std::vector& inputs, - std::vector* triton_inputs); - void ParseInferRequestedOutputToTriton( - const std::vector& outputs, - std::vector* triton_outputs); - void ParseInferOptionsToTriton( - const InferOptions& options, tc::InferOptions* triton_options); - void ParseStatistics( - const rapidjson::Document& infer_stat, - std::map* model_stats); - void ParseInferStat( - const tc::InferStat& triton_infer_stat, InferStat* infer_stat); - TritonLoader* triton_loader_; -}; - -//============================================================== -/// TritonCApiInferInput is a wrapper around InferInput object of -/// triton client library. -/// -class TritonCApiInferInput : public InferInput { - public: - static Error Create( - InferInput** infer_input, const std::string& name, - const std::vector& dims, const std::string& datatype); - - /// Returns the raw InferInput object required by triton client library. - tc::InferInput* Get() const { return input_.get(); } - - /// See InferInput::Shape() - const std::vector& Shape() const override; - - /// See InferInput::SetShape() - Error SetShape(const std::vector& shape) override; - - /// See InferInput::Reset() - Error Reset() override; - - /// See InferInput::AppendRaw() - Error AppendRaw(const uint8_t* input, size_t input_byte_size) override; - - /// See InferInput::SetSharedMemory() - Error SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset = 0) override; - - private: - explicit TritonCApiInferInput( - const std::string& name, const std::string& datatype); - - std::unique_ptr input_; -}; - -//============================================================== -/// TritonCApiInferRequestedOutput is a wrapper around -/// InferRequestedOutput object of triton client library. -/// -class TritonCApiInferRequestedOutput : public InferRequestedOutput { - public: - static Error Create( - InferRequestedOutput** infer_output, const std::string& name, - const size_t class_count = 0, const std::string& datatype = ""); - /// Returns the raw InferRequestedOutput object required by triton client - /// library. - tc::InferRequestedOutput* Get() const { return output_.get(); } - - /// See InferInput::SetSharedMemory() - Error SetSharedMemory( - const std::string& name, size_t byte_size, size_t offset = 0) override; - - private: - explicit TritonCApiInferRequestedOutput(const std::string& name); - - std::unique_ptr output_; -}; - -//============================================================== -/// TritonCApiInferResult is a wrapper around InferResult object of -/// the C API library. -/// -class TritonCApiInferResult : public cb::InferResult { - public: - explicit TritonCApiInferResult(capi::InferResult* result); - /// See InferResult::Id() - Error Id(std::string* id) const override; - /// See InferResult::RequestStatus() - Error RequestStatus() const override; - /// See InferResult::RawData() - Error RawData( - const std::string& output_name, const uint8_t** buf, - size_t* byte_size) const override; - - private: - std::unique_ptr result_; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.cc b/src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.cc deleted file mode 100644 index 35f7657f3..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.cc +++ /dev/null @@ -1,1274 +0,0 @@ -// Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#define TRITON_INFERENCE_SERVER_CLIENT_CLASS \ - triton::perfanalyzer::clientbackend::tritoncapi::TritonLoader - -#include "triton_loader.h" - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "c_api_infer_results.h" -#include "scoped_defer.h" - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { -namespace { - -struct AllocPayload { - struct OutputInfo { - enum Kind { BINARY, SHM }; - - Kind kind_; - void* base_; - uint64_t byte_size_; - TRITONSERVER_MemoryType memory_type_; - int64_t device_id_; - - // For shared memory - OutputInfo( - void* base, uint64_t byte_size, TRITONSERVER_MemoryType memory_type, - int64_t device_id) - : kind_(SHM), base_(base), byte_size_(byte_size), - memory_type_(memory_type), device_id_(device_id) - { - } - }; - - ~AllocPayload() - { - for (auto it : output_map_) { - delete it.second; - } - } - - std::unordered_map output_map_; -}; - -bool helper_verbose = false; -/// Helper function for allocating memory -TRITONSERVER_Error* -ResponseAlloc( - TRITONSERVER_ResponseAllocator* allocator, const char* tensor_name, - size_t byte_size, TRITONSERVER_MemoryType preferred_memory_type, - int64_t preferred_memory_type_id, void* userp, void** buffer, - void** buffer_userp, TRITONSERVER_MemoryType* actual_memory_type, - int64_t* actual_memory_type_id) -{ - // Initially attempt to make the actual memory type and id that we - // allocate be the same as preferred memory type - *actual_memory_type = preferred_memory_type; - *actual_memory_type_id = preferred_memory_type_id; - - // This variable indicates whether the buffer should be freed or not. - bool* should_free = new bool; - *buffer_userp = should_free; - *should_free = false; - - // If 'byte_size' is zero just return 'buffer' == nullptr, we don't - // need to do any other book-keeping. - if (byte_size == 0) { - *buffer = nullptr; - *buffer_userp = nullptr; - if (helper_verbose) { - std::cout << "allocated " << byte_size << " bytes for result tensor " - << tensor_name << std::endl; - } - } else { - AllocPayload* alloc_payload = reinterpret_cast(userp); - auto output_map_it = alloc_payload->output_map_.find(tensor_name); - if (output_map_it == alloc_payload->output_map_.end()) { - void* allocated_ptr = nullptr; - *actual_memory_type = TRITONSERVER_MEMORY_CPU; - *actual_memory_type_id = 0; - allocated_ptr = malloc(byte_size); - *should_free = true; - - if (allocated_ptr != nullptr) { - *buffer = allocated_ptr; - } - } else { - // It is in shared memory - AllocPayload::OutputInfo* output_info = output_map_it->second; - if (byte_size > output_info->byte_size_) { - return TritonLoader::GetSingleton()->ErrorNew( - TRITONSERVER_ERROR_INTERNAL, - std::string( - "shared memory size specified with the request for output '" + - std::string(tensor_name) + "' (" + - std::to_string(output_info->byte_size_) + - " bytes) should be at least " + std::to_string(byte_size) + - " bytes to hold the results") - .c_str()); - } - *actual_memory_type = output_info->memory_type_; - *actual_memory_type_id = output_info->device_id_; - *buffer = output_info->base_; - } - } - - return nullptr; // Success -} - -/// Helper function for releasing memory -TRITONSERVER_Error* -ResponseRelease( - TRITONSERVER_ResponseAllocator* allocator, void* buffer, void* buffer_userp, - size_t byte_size, TRITONSERVER_MemoryType memory_type, - int64_t memory_type_id) -{ - bool* should_free = reinterpret_cast(buffer_userp); - switch (memory_type) { - case TRITONSERVER_MEMORY_CPU: - if (*should_free) { - free(buffer); - } - break; - } - - free(should_free); - return nullptr; // Success -} - -void -InferRequestComplete( - TRITONSERVER_InferenceRequest* request, const uint32_t flags, void* userp) -{ - TritonLoader::GetSingleton()->DeleteInferRequest(request); -} - - -void -InferResponseComplete( - TRITONSERVER_InferenceResponse* response, const uint32_t flags, void* userp) -{ - if (response != nullptr) { - // Send 'response' to the future. - std::promise* p = - reinterpret_cast*>(userp); - p->set_value(response); - delete p; - } -} - -Error -GetModelVersionFromString(const std::string& version_string, int64_t* version) -{ - if (version_string.empty()) { - *version = 1; - return Error::Success; - } - - try { - *version = std::stol(version_string); - } - catch (std::exception& e) { - return Error( - std::string( - "Failed to get model version from specified version string '" + - version_string + "' (details: " + e.what() + - "), version should be an integral value > 0") - .c_str()); - } - - if (*version < 0) { - return Error(std::string( - "invalid model version specified '" + version_string + - "' , version should be an integral value > 0") - .c_str()); - } - - return Error::Success; -} - -Error -FolderExists(const std::string& path) -{ - struct stat buffer; - if (!stat(path.c_str(), &buffer)) { - return Error::Success; - } else { - return Error("Unable to find filepath: " + path); - } -} -} // namespace - -Error -TritonLoader::Create( - const std::string& triton_server_path, - const std::string& model_repository_path, bool verbose) -{ - if (!GetSingleton()->ServerIsReady()) { - GetSingleton()->ClearHandles(); - RETURN_IF_ERROR(GetSingleton()->PopulateInternals( - triton_server_path, model_repository_path, verbose)); - RETURN_IF_ERROR(GetSingleton()->LoadServerLibrary()); - RETURN_IF_ERROR(GetSingleton()->StartTriton()); - } - - return Error::Success; -} - -Error -TritonLoader::Delete() -{ - if (server_ != nullptr) { - server_is_ready_ = false; - model_is_loaded_ = false; - server_.reset(); - } - return Error::Success; -} - -Error -TritonLoader::PopulateInternals( - const std::string& triton_server_path, - const std::string& model_repository_path, bool verbose) -{ - RETURN_IF_ERROR(FolderExists(triton_server_path)); - RETURN_IF_ERROR(FolderExists(model_repository_path)); - - triton_server_path_ = triton_server_path; - model_repository_path_ = model_repository_path; - verbose_ = verbose; - verbose_level_ = verbose_ ? 1 : 0; - return Error::Success; -} - -Error -TritonLoader::StartTriton() -{ - // Check API version. - uint32_t api_version_major, api_version_minor; - REPORT_TRITONSERVER_ERROR( - api_version_fn_(&api_version_major, &api_version_minor)); - if ((TRITONSERVER_API_VERSION_MAJOR != api_version_major) || - (TRITONSERVER_API_VERSION_MINOR > api_version_minor)) { - std::stringstream sstream; - sstream << "triton server API version mismatch. \n" - << "Expected version major:" << TRITONSERVER_API_VERSION_MAJOR - << ", minor:" << TRITONSERVER_API_VERSION_MINOR << "\n" - << " Actual version major:" << api_version_major - << ", minor:" << api_version_minor; - return Error(sstream.str()); - } - // Create the server... - TRITONSERVER_ServerOptions* server_options = nullptr; - RETURN_IF_TRITONSERVER_ERROR( - options_new_fn_(&server_options), "creating server options"); - RETURN_IF_TRITONSERVER_ERROR( - options_set_model_repo_path_fn_( - server_options, model_repository_path_.c_str()), - "setting model repository path"); - RETURN_IF_TRITONSERVER_ERROR( - set_cuda_memory_pool_byte_size_(server_options, 0, 1073741824), - "setting cuda memory pool byte size failed."); - RETURN_IF_TRITONSERVER_ERROR( - set_log_verbose_fn_(server_options, verbose_level_), - "setting verbose logging level"); - RETURN_IF_TRITONSERVER_ERROR( - set_log_info_fn_(server_options, verbose_), - "setting if log verbose level is true"); - RETURN_IF_TRITONSERVER_ERROR( - set_backend_directory_fn_( - server_options, (triton_server_path_ + "/backends").c_str()), - "setting backend directory"); - RETURN_IF_TRITONSERVER_ERROR( - set_repo_agent_directory_fn_( - server_options, (triton_server_path_ + "/repoagents").c_str()), - "setting repository agent directory"); - RETURN_IF_TRITONSERVER_ERROR( - set_strict_model_config_fn_(server_options, true), - "setting strict model configuration"); - double min_compute_capability = 0; - // FIXME: Do not have GPU support right now - RETURN_IF_TRITONSERVER_ERROR( - set_min_supported_compute_capability_fn_( - server_options, min_compute_capability), - "setting minimum supported CUDA compute capability"); - TRITONSERVER_Server* server_ptr = nullptr; - RETURN_IF_TRITONSERVER_ERROR( - server_new_fn_(&server_ptr, server_options), "creating server"); - RETURN_IF_TRITONSERVER_ERROR( - server_options_delete_fn_(server_options), "deleting server options"); - std::shared_ptr shared_server( - server_ptr, server_delete_fn_); - server_ = shared_server; - - // Wait until the server is both live and ready. - size_t health_iters = 0; - while (true) { - bool live, ready; - RETURN_IF_TRITONSERVER_ERROR( - server_is_live_fn_(server_.get(), &live), - "unable to get server liveness"); - RETURN_IF_TRITONSERVER_ERROR( - server_is_ready_fn_(server_.get(), &ready), - "unable to get server readiness"); - if (live && ready) { - server_is_ready_ = true; - break; - } - - if (++health_iters >= 10) { - return Error("failed to find healthy inference server"); - } - - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - } - // Print status of the server. - if (verbose_) { - TRITONSERVER_Message* server_metadata_message; - RETURN_IF_TRITONSERVER_ERROR( - server_metadata_fn_(server_.get(), &server_metadata_message), - "unable to get server metadata message"); - const char* buffer; - size_t byte_size; - RETURN_IF_TRITONSERVER_ERROR( - message_serialize_to_json_fn_( - server_metadata_message, &buffer, &byte_size), - "unable to serialize server metadata message"); - - RETURN_IF_TRITONSERVER_ERROR( - message_delete_fn_(server_metadata_message), - "deleting status metadata"); - } - - return Error::Success; -} - -Error -TritonLoader::ServerMetaData(rapidjson::Document* server_metadata) -{ - if (!ServerIsReady()) { - return Error("Model is not loaded and/or server is not ready"); - } - TRITONSERVER_Message* server_metadata_message; - RETURN_IF_TRITONSERVER_ERROR( - server_metadata_fn_(server_.get(), &server_metadata_message), - "unable to get server metadata message"); - const char* buffer; - size_t byte_size; - RETURN_IF_TRITONSERVER_ERROR( - message_serialize_to_json_fn_( - server_metadata_message, &buffer, &byte_size), - "unable to serialize server metadata message"); - server_metadata->Parse(buffer, byte_size); - if (server_metadata->HasParseError()) { - return Error( - "error: failed to parse server metadata from JSON: " + - std::string(GetParseError_En(server_metadata->GetParseError())) + - " at " + std::to_string(server_metadata->GetErrorOffset())); - } - RETURN_IF_TRITONSERVER_ERROR( - message_delete_fn_(server_metadata_message), "deleting status metadata"); - return Error::Success; -} - -Error -TritonLoader::LoadModel( - const std::string& model_name, const std::string& model_version) -{ - if (!ServerIsReady()) { - return Error("server is not ready, abort!"); - } - model_name_ = model_name; - - RETURN_IF_ERROR(GetModelVersionFromString(model_version, &model_version_)); - // Wait for the model to become available. - bool is_ready = false; - size_t health_iters = 0; - - // some error handling - if (model_repository_path_.empty()) { - return Error("Need to specify model repository"); - } - while (!is_ready) { - RETURN_IF_TRITONSERVER_ERROR( - model_is_ready_fn_( - server_.get(), model_name_.c_str(), model_version_, &is_ready), - "unable to get model readiness"); - if (!is_ready) { - if (++health_iters >= 10) { - return Error("model failed to be ready in 10 iterations"); - } - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - continue; - } - } - // flag to confirm model is correct and loaded - model_is_loaded_ = true; - return Error::Success; -} - -Error -TritonLoader::ModelMetadata(rapidjson::Document* model_metadata) -{ - if (!ModelIsLoaded() || !ServerIsReady()) { - return Error("Model is not loaded and/or server is not ready"); - } - TRITONSERVER_Message* model_metadata_message; - - // get model metadata - RETURN_IF_TRITONSERVER_ERROR( - model_metadata_fn_( - server_.get(), model_name_.c_str(), model_version_, - &model_metadata_message), - "unable to get model metadata message"); - const char* buffer; - size_t byte_size; - RETURN_IF_TRITONSERVER_ERROR( - message_serialize_to_json_fn_( - model_metadata_message, &buffer, &byte_size), - "unable to serialize model status protobuf"); - - model_metadata->Parse(buffer, byte_size); - if (model_metadata->HasParseError()) { - return Error( - "error: failed to parse model metadata from JSON: " + - std::string(GetParseError_En(model_metadata->GetParseError())) + - " at " + std::to_string(model_metadata->GetErrorOffset())); - } - - RETURN_IF_TRITONSERVER_ERROR( - message_delete_fn_(model_metadata_message), "deleting status protobuf"); - - if (strcmp((*model_metadata)["name"].GetString(), model_name_.c_str())) { - return Error("unable to find metadata for model"); - } - - bool found_version = false; - if (model_metadata->HasMember("versions")) { - for (const auto& version : (*model_metadata)["versions"].GetArray()) { - if (strcmp(version.GetString(), std::to_string(model_version_).c_str()) == - 0) { - found_version = true; - break; - } - } - } - if (!found_version) { - std::string msg = "unable to find version " + - std::to_string(model_version_) + " status for model"; - return Error(msg); - } - return Error::Success; -} - -Error -TritonLoader::ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version) -{ - if (!ModelIsLoaded() || !ServerIsReady()) { - return Error("Model is not loaded and/or server is not ready"); - } - TRITONSERVER_Message* model_config_message; - uint32_t config_version = 1; - RETURN_IF_TRITONSERVER_ERROR( - model_config_fn_( - (server_).get(), model_name.c_str(), model_version_, config_version, - &model_config_message), - "unable to get model config message"); - const char* buffer; - size_t byte_size; - RETURN_IF_TRITONSERVER_ERROR( - message_serialize_to_json_fn_(model_config_message, &buffer, &byte_size), - "unable to serialize model config status protobuf"); - - model_config->Parse(buffer, byte_size); - if (model_config->HasParseError()) { - return Error( - "error: failed to parse model config from JSON: " + - std::string(GetParseError_En(model_config->GetParseError())) + " at " + - std::to_string(model_config->GetErrorOffset())); - } - - RETURN_IF_TRITONSERVER_ERROR( - message_delete_fn_(model_config_message), - "deleting server config status protobuf"); - - return Error::Success; -} - -Error -TritonLoader::LoadServerLibrary() -{ - std::string full_path = triton_server_path_ + server_library_path_; - RETURN_IF_ERROR(FolderExists(full_path)); - RETURN_IF_ERROR(OpenLibraryHandle(full_path, &dlhandle_)); - - TritonServerApiVersionFn_t apifn; - TritonServerOptionsNewFn_t onfn; - TritonServerOptionSetModelRepoPathFn_t rpfn; - TritonServerSetLogVerboseFn_t slvfn; - - TritonServerSetBackendDirFn_t sbdfn; - TritonServerSetRepoAgentDirFn_t srdfn; - TritonServerSetStrictModelConfigFn_t ssmcfn; - TritonServerSetMinSupportedComputeCapabilityFn_t smsccfn; - - TritonServerNewFn_t snfn; - TritonServerOptionsDeleteFn_t odfn; - TritonServerDeleteFn_t sdfn; - TritonServerIsLiveFn_t ilfn; - - TritonServerIsReadyFn_t irfn; - TritonServerMetadataFn_t smfn; - TritonServerMessageSerializeToJsonFn_t stjfn; - TritonServerMessageDeleteFn_t mdfn; - - TritonServerModelIsReadyFn_t mirfn; - TritonServerModelMetadataFn_t mmfn; - TritonServerResponseAllocatorNewFn_t ranfn; - TritonServerInferenceRequestNewFn_t irnfn; - - TritonServerInferenceRequestSetIdFn_t irsifn; - TritonServerInferenceRequestSetReleaseCallbackFn_t irsrcfn; - TritonServerInferenceRequestAddInputFn_t iraifn; - TritonServerInferenceRequestAddRequestedOutputFn_t irarofn; - - TritonServerInferenceRequestAppendInputDataFn_t iraidfn; - TritonServerInferenceRequestSetResponseCallbackFn_t irsrescfn; - TritonServerInferAsyncFn_t iafn; - TritonServerInferenceResponseErrorFn_t irefn; - - TritonServerInferenceResponseDeleteFn_t irdfn; - TritonServerResponseAllocatorDeleteFn_t radfn; - TritonServerErrorNewFn_t enfn; - - TritonServerMemoryTypeStringFn_t mtsfn; - TritonServerInferenceResponseOutputCountFn_t irocfn; - TritonServerDataTypeStringFn_t dtsfn; - - TritonServerErrorDeleteFn_t edfn; - TritonServerErrorCodeToStringFn_t ectsfn; - TritonServerErrorMessageFn_t emfn; - TritonServerModelConfigFn_t mcfn; - TritonServerInferenceRequestSetCorrelationIdFn_t scidfn; - TritonServerInferenceRequestSetStringCorrelationIdFn_t sscidfn; - - TritonServerInferenceRequestSetFlagsFn_t sffn; - TritonServerInferenceRequestSetPriorityFn_t spfn; - TritonServerInferenceRequestSetTimeoutMicrosecondsFn_t stmsfn; - TritonServerStringToDatatypeFn_t stdtfn; - - TritonServerInferenceResponseOutputFn_t irofn; - TritonServerRequestIdFn_t ridfn; - TritonServerRequestDeleteFn_t rdfn; - TritonServerModelStatisticsFn_t msfn; - - TritonSeverUnloadModelFn_t umfn; - TritonSeverSetLogInfoFn_t slifn; - TritonServerSetCudaMemoryPoolByteSizeFn_t scmpbsfn; - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ApiVersion", false /* optional */, - reinterpret_cast(&apifn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsNew", false /* optional */, - reinterpret_cast(&onfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetModelRepositoryPath", - false /* optional */, reinterpret_cast(&rpfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetLogVerbose", - false /* optional */, reinterpret_cast(&slvfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetBackendDirectory", - false /* optional */, reinterpret_cast(&sbdfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetRepoAgentDirectory", - false /* optional */, reinterpret_cast(&srdfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetStrictModelConfig", - false /* optional */, reinterpret_cast(&ssmcfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetMinSupportedComputeCapability", - false /* optional */, reinterpret_cast(&smsccfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetCudaMemoryPoolByteSize", - false /* optional */, reinterpret_cast(&scmpbsfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerNew", false /* optional */, - reinterpret_cast(&snfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsDelete", false /* optional */, - reinterpret_cast(&odfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerDelete", false /* optional */, - reinterpret_cast(&sdfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerIsLive", false /* optional */, - reinterpret_cast(&ilfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerIsReady", false /* optional */, - reinterpret_cast(&irfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerMetadata", false /* optional */, - reinterpret_cast(&smfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_MessageSerializeToJson", false /* optional */, - reinterpret_cast(&stjfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_MessageDelete", false /* optional */, - reinterpret_cast(&mdfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerModelIsReady", false /* optional */, - reinterpret_cast(&mirfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerModelMetadata", false /* optional */, - reinterpret_cast(&mmfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ResponseAllocatorNew", false /* optional */, - reinterpret_cast(&ranfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestNew", false /* optional */, - reinterpret_cast(&irnfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetId", false /* optional */, - reinterpret_cast(&irsifn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetReleaseCallback", - false /* optional */, reinterpret_cast(&irsrcfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestAddInput", false /* optional */, - reinterpret_cast(&iraifn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestAddRequestedOutput", - false /* optional */, reinterpret_cast(&irarofn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestAppendInputData", - false /* optional */, reinterpret_cast(&iraidfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetResponseCallback", - false /* optional */, reinterpret_cast(&irsrescfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerInferAsync", false /* optional */, - reinterpret_cast(&iafn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceResponseError", false /* optional */, - reinterpret_cast(&irefn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceResponseDelete", false /* optional */, - reinterpret_cast(&irdfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ResponseAllocatorDelete", false /* optional */, - reinterpret_cast(&radfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ErrorNew", false /* optional */, - reinterpret_cast(&enfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_MemoryTypeString", false /* optional */, - reinterpret_cast(&mtsfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceResponseOutputCount", - false /* optional */, reinterpret_cast(&irocfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_DataTypeString", false /* optional */, - reinterpret_cast(&dtsfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ErrorDelete", false /* optional */, - reinterpret_cast(&edfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ErrorCodeString", false /* optional */, - reinterpret_cast(&ectsfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ErrorMessage", false /* optional */, - reinterpret_cast(&emfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerModelConfig", false /* optional */, - reinterpret_cast(&mcfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetCorrelationId", - false /* optional */, reinterpret_cast(&scidfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetCorrelationIdString", - false /* optional */, reinterpret_cast(&sscidfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetFlags", false /* optional */, - reinterpret_cast(&sffn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetPriorityUInt64", - false /* optional */, reinterpret_cast(&spfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestSetTimeoutMicroseconds", - false /* optional */, reinterpret_cast(&stmsfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_StringToDataType", false /* optional */, - reinterpret_cast(&stdtfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceResponseOutput", false /* optional */, - reinterpret_cast(&irofn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestId", false /* optional */, - reinterpret_cast(&ridfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_InferenceRequestDelete", false /* optional */, - reinterpret_cast(&rdfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerModelStatistics", false /* optional */, - reinterpret_cast(&msfn))); - - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerUnloadModel", false /* optional */, - reinterpret_cast(&umfn))); - RETURN_IF_ERROR(GetEntrypoint( - dlhandle_, "TRITONSERVER_ServerOptionsSetLogInfo", false /* optional */, - reinterpret_cast(&slifn))); - - - api_version_fn_ = apifn; - options_new_fn_ = onfn; - options_set_model_repo_path_fn_ = rpfn; - set_log_verbose_fn_ = slvfn; - - set_backend_directory_fn_ = sbdfn; - set_repo_agent_directory_fn_ = srdfn; - set_strict_model_config_fn_ = ssmcfn; - set_min_supported_compute_capability_fn_ = smsccfn; - - server_new_fn_ = snfn; - server_options_delete_fn_ = odfn; - server_delete_fn_ = sdfn; - server_is_live_fn_ = ilfn; - - server_is_ready_fn_ = irfn; - server_metadata_fn_ = smfn; - message_serialize_to_json_fn_ = stjfn; - message_delete_fn_ = mdfn; - - model_is_ready_fn_ = mirfn; - model_metadata_fn_ = mmfn; - response_allocator_new_fn_ = ranfn; - inference_request_new_fn_ = irnfn; - - inference_request_set_id_fn_ = irsifn; - inference_request_set_release_callback_fn_ = irsrcfn; - inference_request_add_input_fn_ = iraifn; - inference_request_add_requested_output_fn_ = irarofn; - - inference_request_append_input_data_fn_ = iraidfn; - inference_request_set_response_callback_fn_ = irsrescfn; - infer_async_fn_ = iafn; - inference_response_error_fn_ = irefn; - - inference_response_delete_fn_ = irdfn; - response_allocator_delete_fn_ = radfn; - error_new_fn_ = enfn; - - memory_type_string_fn_ = mtsfn; - inference_response_output_count_fn_ = irocfn; - data_type_string_fn_ = dtsfn; - - error_delete_fn_ = edfn; - error_code_to_string_fn_ = ectsfn; - error_message_fn_ = emfn; - model_config_fn_ = mcfn; - set_correlation_id_fn_ = scidfn; - set_string_correlation_id_fn_ = sscidfn; - - set_flags_fn_ = sffn; - set_priority_fn_ = spfn; - set_timeout_ms_fn_ = stmsfn; - string_to_datatype_fn_ = stdtfn; - - inference_response_output_fn_ = irofn; - request_id_fn_ = ridfn; - request_delete_fn_ = rdfn; - model_statistics_fn_ = msfn; - - unload_model_fn_ = umfn; - set_log_info_fn_ = slifn; - set_cuda_memory_pool_byte_size_ = scmpbsfn; - - return Error::Success; -} - -void -TritonLoader::ClearHandles() -{ - dlhandle_ = nullptr; - - api_version_fn_ = nullptr; - options_new_fn_ = nullptr; - options_set_model_repo_path_fn_ = nullptr; - set_log_verbose_fn_ = nullptr; - - set_backend_directory_fn_ = nullptr; - set_repo_agent_directory_fn_ = nullptr; - set_strict_model_config_fn_ = nullptr; - set_min_supported_compute_capability_fn_ = nullptr; - - server_new_fn_ = nullptr; - server_options_delete_fn_ = nullptr; - server_delete_fn_ = nullptr; - server_is_live_fn_ = nullptr; - - server_is_ready_fn_ = nullptr; - server_metadata_fn_ = nullptr; - message_serialize_to_json_fn_ = nullptr; - message_delete_fn_ = nullptr; - - model_is_ready_fn_ = nullptr; - model_metadata_fn_ = nullptr; - response_allocator_new_fn_ = nullptr; - inference_request_new_fn_ = nullptr; - - inference_request_set_id_fn_ = nullptr; - inference_request_set_release_callback_fn_ = nullptr; - inference_request_add_input_fn_ = nullptr; - inference_request_add_requested_output_fn_ = nullptr; - - inference_request_append_input_data_fn_ = nullptr; - inference_request_set_response_callback_fn_ = nullptr; - infer_async_fn_ = nullptr; - inference_response_error_fn_ = nullptr; - - inference_response_delete_fn_ = nullptr; - response_allocator_delete_fn_ = nullptr; - error_new_fn_ = nullptr; - - memory_type_string_fn_ = nullptr; - inference_response_output_count_fn_ = nullptr; - data_type_string_fn_ = nullptr; - error_message_fn_ = nullptr; - - error_delete_fn_ = nullptr; - error_code_to_string_fn_ = nullptr; - model_config_fn_ = nullptr; - set_correlation_id_fn_ = nullptr; - set_string_correlation_id_fn_ = nullptr; - - set_flags_fn_ = nullptr; - set_priority_fn_ = nullptr; - set_timeout_ms_fn_ = nullptr; - string_to_datatype_fn_ = nullptr; - - inference_response_output_fn_ = nullptr; - request_id_fn_ = nullptr; - request_delete_fn_ = nullptr; - model_statistics_fn_ = nullptr; - unload_model_fn_ = nullptr; - set_log_info_fn_ = nullptr; -} - -Error -TritonLoader::FileExists(std::string& filepath) -{ - std::ifstream ifile; - ifile.open(filepath); - if (!ifile) { - return Error("unable to find local Triton library: " + filepath); - } else { - return Error::Success; - } -} - -Error -TritonLoader::Infer( - const tc::InferOptions& options, const std::vector& inputs, - const std::vector& outputs, - InferResult** result) -{ - Error error = Error::Success; - if (!ServerIsReady() || !ModelIsLoaded()) { - return Error("Server is not ready and/or requested model is not loaded"); - } - - TRITONSERVER_ResponseAllocator* allocator = nullptr; - TRITONSERVER_InferenceRequest* irequest = nullptr; - TRITONSERVER_InferenceResponse* completed_response = nullptr; - tc::RequestTimers timer; - timer.Reset(); - timer.CaptureTimestamp(tc::RequestTimers::Kind::REQUEST_START); - - RETURN_IF_ERROR(InitializeRequest(options, outputs, &allocator, &irequest)); - ScopedDefer error_handler([&error, &completed_response, &allocator, this] { - error = CleanUp(completed_response, allocator); - }); - RETURN_IF_ERROR(AddInputs(inputs, irequest)); - RETURN_IF_ERROR(AddOutputs(outputs, irequest)); - - AllocPayload alloc_payload; - for (auto& output : outputs) { - if (output->IsSharedMemory()) { - std::string shm_name; - size_t shm_byte_size; - size_t offset; - // TODO: Error handling - output->SharedMemoryInfo(&shm_name, &shm_byte_size, &offset); - - void* buf; - TRITONSERVER_MemoryType memory_type; - int64_t memory_type_id; - RETURN_IF_ERROR(shm_manager_->GetMemoryInfo( - shm_name, offset, shm_byte_size, &buf, &memory_type, - &memory_type_id)); - - alloc_payload.output_map_.emplace( - std::piecewise_construct, std::forward_as_tuple(output->Name()), - std::forward_as_tuple(new AllocPayload::OutputInfo( - buf, shm_byte_size, memory_type, memory_type_id))); - } - } - - const char* cid = nullptr; - RETURN_IF_TRITONSERVER_ERROR( - request_id_fn_(irequest, &cid), "Failed to get request id"); - std::string id = cid; - - // Perform inference... - timer.CaptureTimestamp(tc::RequestTimers::Kind::SEND_START); - auto p = new std::promise(); - std::future completed = p->get_future(); - RETURN_IF_TRITONSERVER_ERROR( - inference_request_set_response_callback_fn_( - irequest, allocator, &alloc_payload /* response_allocator_userp */, - InferResponseComplete, reinterpret_cast(p)), - "setting response callback"); - RETURN_IF_TRITONSERVER_ERROR( - infer_async_fn_((server_).get(), irequest, nullptr /* trace */), - "running inference"); - timer.CaptureTimestamp(tc::RequestTimers::Kind::SEND_END); - - // Wait for the inference to complete. - completed_response = completed.get(); - - RETURN_IF_TRITONSERVER_ERROR( - inference_response_error_fn_(completed_response), - "inference response error"); - - timer.CaptureTimestamp(tc::RequestTimers::Kind::RECV_START); - timer.CaptureTimestamp(tc::RequestTimers::Kind::RECV_END); - timer.CaptureTimestamp(tc::RequestTimers::Kind::REQUEST_END); - - tc::Error err = UpdateInferStat(timer); - if (!err.IsOk()) { - std::cerr << "Failed to update context stat: " << err << std::endl; - } - - InferResult::Create(result, err, id); - - // CleanUp the response allocators - error_handler.Complete(); - - return error; -} - -Error -TritonLoader::CleanUp( - TRITONSERVER_InferenceResponse* completed_response, - TRITONSERVER_ResponseAllocator* allocator) -{ - TRITONSERVER_Error* response_err = nullptr; - if (completed_response != nullptr) { - response_err = inference_response_delete_fn_(completed_response); - } - TRITONSERVER_Error* allocator_err = response_allocator_delete_fn_(allocator); - RETURN_IF_TRITONSERVER_ERROR(response_err, "deleting inference response"); - RETURN_IF_TRITONSERVER_ERROR(allocator_err, "deleting response allocator"); - return Error::Success; -} - -Error -TritonLoader::InitializeRequest( - const tc::InferOptions& options, - const std::vector& outputs, - TRITONSERVER_ResponseAllocator** allocator, - TRITONSERVER_InferenceRequest** irequest) -{ - // Create the allocator that will be used to allocate buffers for - // the result tensors. - RETURN_IF_TRITONSERVER_ERROR( - GetSingleton()->response_allocator_new_fn_( - allocator, - reinterpret_cast< - TRITONSERVER_Error* (*)(TRITONSERVER_ResponseAllocator* allocator, - const char* tensor_name, size_t byte_size, - TRITONSERVER_MemoryType memory_type, - int64_t memory_type_id, void* userp, - void** buffer, void** buffer_userp, - TRITONSERVER_MemoryType* - actual_memory_type, - int64_t* actual_memory_type_id)>( - ResponseAlloc), - reinterpret_cast< - TRITONSERVER_Error* (*)(TRITONSERVER_ResponseAllocator* allocator, - void* buffer, void* buffer_userp, - size_t byte_size, - TRITONSERVER_MemoryType memory_type, - int64_t memory_type_id)>(ResponseRelease), - nullptr /* start_fn */), - "creating response allocator"); - - // set up inference request - RETURN_IF_TRITONSERVER_ERROR( - inference_request_new_fn_( - irequest, (server_).get(), model_name_.c_str(), model_version_), - "creating inference request"); - RETURN_IF_TRITONSERVER_ERROR( - inference_request_set_id_fn_(*irequest, options.request_id_.c_str()), - "setting ID for the request"); - if ((options.sequence_id_ != 0) || (options.sequence_id_str_ != "") || - (options.priority_ != 0) || (options.server_timeout_ != 0) || - outputs.empty()) { - if (options.sequence_id_ != 0) { - RETURN_IF_TRITONSERVER_ERROR( - set_correlation_id_fn_(*irequest, options.sequence_id_), - "setting sequence ID for the request"); - } else if (options.sequence_id_str_ != "") { - RETURN_IF_TRITONSERVER_ERROR( - set_string_correlation_id_fn_( - *irequest, options.sequence_id_str_.c_str()), - "setting sequence ID for the request"); - } - uint32_t flags = 0; - if (options.sequence_start_) { - flags |= TRITONSERVER_REQUEST_FLAG_SEQUENCE_START; - } - if (options.sequence_end_) { - flags |= TRITONSERVER_REQUEST_FLAG_SEQUENCE_END; - } - RETURN_IF_TRITONSERVER_ERROR( - set_flags_fn_(*irequest, flags), - "setting inference flags for the request"); - } - if (options.priority_ != 0) { - RETURN_IF_TRITONSERVER_ERROR( - set_priority_fn_(*irequest, options.priority_), - "setting priority for the request"); - } - if (options.server_timeout_ != 0) { - RETURN_IF_TRITONSERVER_ERROR( - set_timeout_ms_fn_(*irequest, options.server_timeout_), - "setting timeout for the request"); - } - RETURN_IF_TRITONSERVER_ERROR( - inference_request_set_release_callback_fn_( - *irequest, InferRequestComplete, nullptr /* request_release_userp */), - "setting request release callback"); - return Error::Success; -} - -Error -TritonLoader::AddInputs( - const std::vector& inputs, - TRITONSERVER_InferenceRequest* irequest) -{ - for (auto io : inputs) { - const char* input_name = io->Name().c_str(); - const char* datatype = io->Datatype().c_str(); - const TRITONSERVER_DataType dtype = string_to_datatype_fn_(datatype); - std::vector shape_vec; - for (const int64_t dim : io->Shape()) { // this is a vector, just use it - shape_vec.push_back(dim); - } - - RETURN_IF_TRITONSERVER_ERROR( - inference_request_add_input_fn_( - irequest, input_name, dtype, &shape_vec[0], shape_vec.size()), - "setting input for the request"); - size_t byte_size; - tc::Error err = io->ByteSize(&byte_size); - if (!err.IsOk()) { - return Error(err.Message()); - } - if (byte_size == 0) { - RETURN_IF_TRITONSERVER_ERROR( - inference_request_append_input_data_fn_( - irequest, input_name, nullptr, 0 /* byte_size */, - TRITONSERVER_MEMORY_CPU /* memory type */, - 0 /* memory_type_id */), - "appending input data with byte size zero"); - } else { - if (!io->IsSharedMemory()) { - io->PrepareForRequest(); - bool end_of_input = false; - while (!end_of_input) { - const uint8_t* buf; - size_t buf_size; - io->GetNext(&buf, &buf_size, &end_of_input); - if (buf != nullptr) { - RETURN_IF_TRITONSERVER_ERROR( - inference_request_append_input_data_fn_( - irequest, input_name, const_cast(buf), buf_size, - TRITONSERVER_MEMORY_CPU /* memory_type */, - 0 /* memory_type_id */), - "appending data to tritonserver"); - } - } - } else { - std::string shm_name; - size_t shm_byte_size; - size_t offset; - // TODO: Error handling - io->SharedMemoryInfo(&shm_name, &shm_byte_size, &offset); - void* buf; - TRITONSERVER_MemoryType memory_type; - int64_t memory_type_id; - RETURN_IF_ERROR(shm_manager_->GetMemoryInfo( - shm_name, offset, shm_byte_size, &buf, &memory_type, - &memory_type_id)); - RETURN_IF_TRITONSERVER_ERROR( - inference_request_append_input_data_fn_( - irequest, input_name, buf, byte_size, - memory_type /* memory_type */, - memory_type_id /* memory_type_id */), - "appending data to tritonserver"); - } - } - } - - - return Error::Success; -} - -Error -TritonLoader::AddOutputs( - const std::vector& outputs, - TRITONSERVER_InferenceRequest* irequest) -{ - for (auto io : outputs) { - const char* output_name = io->Name().c_str(); - RETURN_IF_TRITONSERVER_ERROR( - inference_request_add_requested_output_fn_(irequest, output_name), - "setting output for the request"); - } - return Error::Success; -} - - -Error -TritonLoader::ModelInferenceStatistics( - const std::string& model_name, const std::string& model_version, - rapidjson::Document* infer_stat) -{ - if (ServerIsReady() && ModelIsLoaded()) { - TRITONSERVER_Message* model_stats_message = nullptr; - int64_t requested_model_version; - auto err = - GetModelVersionFromString(model_version, &requested_model_version); - if (err.IsOk()) { - RETURN_IF_TRITONSERVER_ERROR( - model_statistics_fn_( - (server_).get(), model_name.c_str(), requested_model_version, - &model_stats_message), - "getting model statistics from server"); - - const char* buffer; - size_t byte_size; - RETURN_IF_TRITONSERVER_ERROR( - message_serialize_to_json_fn_( - model_stats_message, &buffer, &byte_size), - "serializing message to json"); - - infer_stat->Parse(buffer, byte_size); - if (infer_stat->HasParseError()) { - return Error( - "error: failed to parse server metadata from JSON: " + - std::string(GetParseError_En(infer_stat->GetParseError())) + - " at " + std::to_string(infer_stat->GetErrorOffset())); - } - RETURN_IF_TRITONSERVER_ERROR( - message_delete_fn_(model_stats_message), - "deleting inference statistics message"); - } - return err; - } else { - return Error( - "Trying to get model statistics while server is not started or model " - "is not ready"); - } -} - -TritonLoader* -TritonLoader::GetSingleton() -{ - static TritonLoader loader; - return &loader; -} - -TritonLoader::~TritonLoader() -{ - FAIL_IF_ERR(Delete(), "dereferencing server instance..."); - FAIL_IF_ERR(CloseLibraryHandle(dlhandle_), "error on closing triton loader"); - ClearHandles(); -} - -#ifdef TRITON_ENABLE_GPU -Error -TritonLoader::RegisterCudaMemory( - const std::string& name, void* handle, const size_t byte_size) -{ - RETURN_IF_ERROR(shm_manager_->RegisterCUDAMemory( - name, handle, byte_size, 0 /* device id */)); - return Error::Success; -} -#endif // TRITON_ENABLE_GPU - -Error -TritonLoader::RegisterSystemMemory( - const std::string& name, void* ptr, const size_t byte_size) -{ - RETURN_IF_ERROR(shm_manager_->RegisterSystemMemory(name, ptr, byte_size)); - return Error::Success; -} - -Error -TritonLoader::UnregisterAllSharedMemory() -{ - RETURN_IF_ERROR(shm_manager_->UnregisterAll(TRITONSERVER_MEMORY_GPU)); - RETURN_IF_ERROR(shm_manager_->UnregisterAll(TRITONSERVER_MEMORY_GPU)); - return Error::Success; -} - -TRITONSERVER_Error* -TritonLoader::ErrorNew(TRITONSERVER_Error_Code code, const char* message) -{ - return error_new_fn_(code, message); -} - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.h b/src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.h deleted file mode 100644 index 1a18176c8..000000000 --- a/src/c++/perf_analyzer/client_backend/triton_c_api/triton_loader.h +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include - -#include -#include -#include -#include - -#include "../client_backend.h" -#include "common.h" -#include "shared_library.h" -#include "shared_memory_manager.h" -#include "triton/core/tritonserver.h" - -// If TRITONSERVER error is non-OK, return the corresponding status. -#define RETURN_IF_TRITONSERVER_ERROR(E, MSG) \ - do { \ - TRITONSERVER_Error* err__ = (E); \ - if (err__ != nullptr) { \ - std::cout << "error: " << (MSG) << ": " \ - << GetSingleton()->error_code_to_string_fn_(err__) << " - " \ - << GetSingleton()->error_message_fn_(err__) << std::endl; \ - Error newErr = Error(MSG); \ - GetSingleton()->error_delete_fn_(err__); \ - return newErr; \ - } \ - } while (false) - -#define FAIL_IF_TRITONSERVER_ERROR(E, MSG) \ - do { \ - TRITONSERVER_Error* err__ = (E); \ - if (err__ != nullptr) { \ - std::cerr << "error: " << (MSG) << ": " \ - << GetSingleton()->error_code_to_string_fn_(err__) << " - " \ - << GetSingleton()->error_message_fn_(err__) << std::endl; \ - Error newErr = Error(MSG); \ - GetSingleton()->error_delete_fn_(err__); \ - exit(1); \ - } \ - } while (false) - -#define REPORT_TRITONSERVER_ERROR(E) \ - do { \ - TRITONSERVER_Error* err__ = (E); \ - if (err__ != nullptr) { \ - std::cout << GetSingleton()->error_message_fn_(err__) << std::endl; \ - GetSingleton()->error_delete_fn_(err__); \ - } \ - } while (false) - -namespace tc = triton::client; - -namespace triton { namespace perfanalyzer { namespace clientbackend { -namespace tritoncapi { - -class InferResult; - -class TritonLoader : public tc::InferenceServerClient { - public: - ~TritonLoader(); - - static Error Create( - const std::string& triton_server_path, - const std::string& model_repository_path, bool verbose); - - Error Delete(); - Error StartTriton(); - - Error LoadModel( - const std::string& model_name, const std::string& model_version); - - Error ModelMetadata(rapidjson::Document* model_metadata); - - Error ModelConfig( - rapidjson::Document* model_config, const std::string& model_name, - const std::string& model_version); - - Error ServerMetaData(rapidjson::Document* server_metadata); - - Error Infer( - const tc::InferOptions& options, - const std::vector& inputs, - const std::vector& outputs, - InferResult** result); - - Error CleanUp( - TRITONSERVER_InferenceResponse* completed_response, - TRITONSERVER_ResponseAllocator* allocator); - - Error ModelInferenceStatistics( - const std::string& model_name, const std::string& model_version, - rapidjson::Document* infer_stat); - - Error ClientInferStat(tc::InferStat* infer_stat) - { - *infer_stat = infer_stat_; - return Error::Success; - } - -#ifdef TRITON_ENABLE_GPU - Error RegisterCudaMemory( - const std::string& name, void* handle, const size_t byte_size); -#endif // TRITON_ENABLE_GPU - - Error RegisterSystemMemory( - const std::string& name, void* ptr, const size_t byte_size); - - Error UnregisterAllSharedMemory(); - - TRITONSERVER_Error* ErrorNew( - TRITONSERVER_Error_Code code, const char* message); - - bool ModelIsLoaded() { return model_is_loaded_; } - bool ServerIsReady() { return server_is_ready_; } - - TRITONSERVER_Error* DeleteInferRequest( - TRITONSERVER_InferenceRequest* irequest) - { - return request_delete_fn_(irequest); - } - static TritonLoader* GetSingleton(); - - // TRITONSERVER_ApiVersion - typedef TRITONSERVER_Error* (*TritonServerApiVersionFn_t)( - uint32_t* major, uint32_t* minor); - // TRITONSERVER_ServerOptionsNew - typedef TRITONSERVER_Error* (*TritonServerOptionsNewFn_t)( - TRITONSERVER_ServerOptions** options); - // TRITONSERVER_ServerOptionsSetModelRepositoryPath - typedef TRITONSERVER_Error* (*TritonServerOptionSetModelRepoPathFn_t)( - TRITONSERVER_ServerOptions* options, const char* model_repository_path); - // TRITONSERVER_ServerOptionsSetLogVerbose - typedef TRITONSERVER_Error* (*TritonServerSetLogVerboseFn_t)( - TRITONSERVER_ServerOptions* options, int level); - - // TRITONSERVER_ServerOptionsSetBackendDirectory - typedef TRITONSERVER_Error* (*TritonServerSetBackendDirFn_t)( - TRITONSERVER_ServerOptions* options, const char* backend_dir); - - // TRITONSERVER_ServerOptionsSetRepoAgentDirectory - typedef TRITONSERVER_Error* (*TritonServerSetRepoAgentDirFn_t)( - TRITONSERVER_ServerOptions* options, const char* repoagent_dir); - - // TRITONSERVER_ServerOptionsSetStrictModelConfig - typedef TRITONSERVER_Error* (*TritonServerSetStrictModelConfigFn_t)( - TRITONSERVER_ServerOptions* options, bool strict); - - // TRITONSERVER_ServerOptionsSetMinSupportedComputeCapability - typedef TRITONSERVER_Error* ( - *TritonServerSetMinSupportedComputeCapabilityFn_t)( - TRITONSERVER_ServerOptions* options, double cc); - - // TRITONSERVER_ServerNew - typedef TRITONSERVER_Error* (*TritonServerNewFn_t)( - TRITONSERVER_Server** server, TRITONSERVER_ServerOptions* option); - - // TRITONSERVER_ServerOptionsDelete - typedef TRITONSERVER_Error* (*TritonServerOptionsDeleteFn_t)( - TRITONSERVER_ServerOptions* options); - - // TRITONSERVER_ServerDelete - typedef TRITONSERVER_Error* (*TritonServerDeleteFn_t)( - TRITONSERVER_Server* server); - - // TRITONSERVER_ServerIsLive - typedef TRITONSERVER_Error* (*TritonServerIsLiveFn_t)( - TRITONSERVER_Server* server, bool* live); - - // TRITONSERVER_ServerIsReady - typedef TRITONSERVER_Error* (*TritonServerIsReadyFn_t)( - TRITONSERVER_Server* server, bool* ready); - - // TRITONSERVER_ServerMetadata - typedef TRITONSERVER_Error* (*TritonServerMetadataFn_t)( - TRITONSERVER_Server* server, TRITONSERVER_Message** server_metadata); - - // TRITONSERVER_MessageSerializeToJson - typedef TRITONSERVER_Error* (*TritonServerMessageSerializeToJsonFn_t)( - TRITONSERVER_Message* message, const char** base, size_t* byte_size); - - // TRITONSERVER_MessageDelete - typedef TRITONSERVER_Error* (*TritonServerMessageDeleteFn_t)( - TRITONSERVER_Message* message); - - // TRITONSERVER_ServerModelIsReady - typedef TRITONSERVER_Error* (*TritonServerModelIsReadyFn_t)( - TRITONSERVER_Server* server, const char* model_name, - const int64_t model_version, bool* ready); - - // TRITONSERVER_ServerModelMetadata - typedef TRITONSERVER_Error* (*TritonServerModelMetadataFn_t)( - TRITONSERVER_Server* server, const char* model_name, - const int64_t model_version, TRITONSERVER_Message** model_metadata); - - // TRITONSERVER_ResponseAllocatorNew - typedef TRITONSERVER_Error* (*TritonServerResponseAllocatorNewFn_t)( - TRITONSERVER_ResponseAllocator** allocator, - TRITONSERVER_ResponseAllocatorAllocFn_t alloc_fn, - TRITONSERVER_ResponseAllocatorReleaseFn_t release_fn, - TRITONSERVER_ResponseAllocatorStartFn_t start_fn); - - // TRITONSERVER_InferenceRequestNew - typedef TRITONSERVER_Error* (*TritonServerInferenceRequestNewFn_t)( - TRITONSERVER_InferenceRequest** inference_request, - TRITONSERVER_Server* server, const char* model_name, - const int64_t model_version); - - // TRITONSERVER_InferenceRequestSetId - typedef TRITONSERVER_Error* (*TritonServerInferenceRequestSetIdFn_t)( - TRITONSERVER_InferenceRequest* inference_request, const char* id); - - // TRITONSERVER_InferenceRequestSetReleaseCallback - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestSetReleaseCallbackFn_t)( - TRITONSERVER_InferenceRequest* inference_request, - TRITONSERVER_InferenceRequestReleaseFn_t request_release_fn, - void* request_release_userp); - - // TRITONSERVER_InferenceRequestAddInput - typedef TRITONSERVER_Error* (*TritonServerInferenceRequestAddInputFn_t)( - TRITONSERVER_InferenceRequest* inference_request, const char* name, - const TRITONSERVER_DataType datatype, const int64_t* shape, - uint64_t dim_count); - - // TRITONSERVER_InferenceRequestAddRequestedOutput - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestAddRequestedOutputFn_t)( - TRITONSERVER_InferenceRequest* inference_request, const char* name); - - // TRITONSERVER_InferenceRequestAppendInputData - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestAppendInputDataFn_t)( - TRITONSERVER_InferenceRequest* inference_request, const char* name, - const void* base, size_t byte_size, TRITONSERVER_MemoryType memory_type, - int64_t memory_type_i); - - // TRITONSERVER_InferenceRequestSetResponseCallback - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestSetResponseCallbackFn_t)( - TRITONSERVER_InferenceRequest* inference_request, - TRITONSERVER_ResponseAllocator* response_allocator, - void* response_allocator_userp, - TRITONSERVER_InferenceResponseCompleteFn_t response_fn, - void* response_userp); - - // TRITONSERVER_ServerInferAsync - typedef TRITONSERVER_Error* (*TritonServerInferAsyncFn_t)( - TRITONSERVER_Server* server, - TRITONSERVER_InferenceRequest* inference_request, - TRITONSERVER_InferenceTrace* trace); - - // TRITONSERVER_InferenceResponseError - typedef TRITONSERVER_Error* (*TritonServerInferenceResponseErrorFn_t)( - TRITONSERVER_InferenceResponse* inference_response); - - // TRITONSERVER_InferenceResponseDelete - typedef TRITONSERVER_Error* (*TritonServerInferenceResponseDeleteFn_t)( - TRITONSERVER_InferenceResponse* inference_response); - - // TRITONSERVER_InferenceRequestRemoveAllInputData - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestRemoveAllInputDataFn_t)( - TRITONSERVER_InferenceRequest* inference_request, const char* name); - - // TRITONSERVER_ResponseAllocatorDelete - typedef TRITONSERVER_Error* (*TritonServerResponseAllocatorDeleteFn_t)( - TRITONSERVER_ResponseAllocator* allocator); - - // TRITONSERVER_ErrorNew - typedef TRITONSERVER_Error* (*TritonServerErrorNewFn_t)( - TRITONSERVER_Error_Code code, const char* msg); - - // TRITONSERVER_MemoryTypeString - typedef const char* (*TritonServerMemoryTypeStringFn_t)( - TRITONSERVER_MemoryType memtype); - - // TRITONSERVER_InferenceResponseOutputCount - typedef TRITONSERVER_Error* (*TritonServerInferenceResponseOutputCountFn_t)( - TRITONSERVER_InferenceResponse* inference_response, uint32_t* count); - - // TRITONSERVER_DataTypeString - typedef const char* (*TritonServerDataTypeStringFn_t)( - TRITONSERVER_DataType datatype); - - // TRITONSERVER_ErrorMessage - typedef const char* (*TritonServerErrorMessageFn_t)( - TRITONSERVER_Error* error); - - // TRITONSERVER_ErrorDelete - typedef void (*TritonServerErrorDeleteFn_t)(TRITONSERVER_Error* error); - - // TRITONSERVER_ErrorCodeString - typedef const char* (*TritonServerErrorCodeToStringFn_t)( - TRITONSERVER_Error* error); - - // TRITONSERVER_ServerModelConfig - typedef TRITONSERVER_Error* (*TritonServerModelConfigFn_t)( - TRITONSERVER_Server* server, const char* model_name, - const int64_t model_version, const uint32_t config_version, - TRITONSERVER_Message** model_config); - - // TRITONSERVER_InferenceRequestSetCorrelationId - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestSetCorrelationIdFn_t)( - TRITONSERVER_InferenceRequest* inference_request, - uint64_t correlation_id); - - // TRITONSERVER_InferenceRequestSetCorrelationId - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestSetStringCorrelationIdFn_t)( - TRITONSERVER_InferenceRequest* inference_request, - const char* correlation_id); - - // TRITONSERVER_InferenceRequestSetFlags - typedef TRITONSERVER_Error* (*TritonServerInferenceRequestSetFlagsFn_t)( - TRITONSERVER_InferenceRequest* inference_request, uint32_t flags); - - // TRITONSERVER_InferenceRequestSetPriorityUInt64 - typedef TRITONSERVER_Error* (*TritonServerInferenceRequestSetPriorityFn_t)( - TRITONSERVER_InferenceRequest* inference_request, uint64_t priority); - - // TRITONSERVER_InferenceRequestSetTimeoutMicroseconds - typedef TRITONSERVER_Error* ( - *TritonServerInferenceRequestSetTimeoutMicrosecondsFn_t)( - TRITONSERVER_InferenceRequest* inference_request, uint64_t timeout_us); - - // TRITONSERVER_StringToDataType - typedef TRITONSERVER_DataType (*TritonServerStringToDatatypeFn_t)( - const char* dtype); - - // TRITONSERVER_InferenceResponseOutput - typedef TRITONSERVER_Error* (*TritonServerInferenceResponseOutputFn_t)( - TRITONSERVER_InferenceResponse* inference_response, const uint32_t index, - const char** name, TRITONSERVER_DataType* datatype, const int64_t** shape, - uint64_t* dim_count, const void** base, size_t* byte_size, - TRITONSERVER_MemoryType* memory_type, int64_t* memory_type_id, - void** userp); - - // TRITONSERVER_InferenceRequestId - typedef TRITONSERVER_Error* (*TritonServerRequestIdFn_t)( - TRITONSERVER_InferenceRequest* inference_request, const char** id); - - // TRITONSERVER_InferenceRequestDelete - typedef TRITONSERVER_Error* (*TritonServerRequestDeleteFn_t)( - TRITONSERVER_InferenceRequest* inference_request); - - // TRITONSERVER_ServerModelStatistics - typedef TRITONSERVER_Error* (*TritonServerModelStatisticsFn_t)( - TRITONSERVER_Server* server, const char* model_name, - const int64_t model_version, TRITONSERVER_Message** model_stats); - - // TRITONSERVER_ServerUnloadModel - typedef TRITONSERVER_Error* (*TritonSeverUnloadModelFn_t)( - TRITONSERVER_Server* server, const char* model_name); - - // TRITONSERVER_ServerOptionsSetLogInfo - typedef TRITONSERVER_Error* (*TritonSeverSetLogInfoFn_t)( - TRITONSERVER_ServerOptions* options, bool log); - - // TRITONSERVER_ServerOptionsSetCudaMemoryPoolByteSize - typedef TRITONSERVER_Error* (*TritonServerSetCudaMemoryPoolByteSizeFn_t)( - TRITONSERVER_ServerOptions* options, int gpu_device, uint64_t size); - - private: - TritonLoader() - : InferenceServerClient( - false /* verbose flag that is set later during ::Create*/) - { - verbose_level_ = 0; - enforce_memory_type_ = false; - requested_memory_type_ = TRITONSERVER_MEMORY_CPU; - model_is_loaded_ = false; - server_is_ready_ = false; - shm_manager_ = std::make_unique(); - } - - Error PopulateInternals( - const std::string& triton_server_path, - const std::string& model_repository_path, bool verbose); - - /// Load all tritonserver.h functions onto triton_loader - /// internal handles - Error LoadServerLibrary(); - - void ClearHandles(); - - /// Check if file exists in the current directory - /// \param filepath Path of library to check - /// \return perfanalyzer::clientbackend::Error - Error FileExists(std::string& filepath); - - Error InitializeRequest( - const tc::InferOptions& options, - const std::vector& outputs, - TRITONSERVER_ResponseAllocator** allocator, - TRITONSERVER_InferenceRequest** irequest); - - Error AddInputs( - const std::vector& inputs, - TRITONSERVER_InferenceRequest* irequest); - - Error AddOutputs( - const std::vector& outputs, - TRITONSERVER_InferenceRequest* irequest); - - void* dlhandle_; - TritonServerApiVersionFn_t api_version_fn_; - TritonServerOptionsNewFn_t options_new_fn_; - TritonServerOptionSetModelRepoPathFn_t options_set_model_repo_path_fn_; - TritonServerSetLogVerboseFn_t set_log_verbose_fn_; - - TritonServerSetBackendDirFn_t set_backend_directory_fn_; - TritonServerSetRepoAgentDirFn_t set_repo_agent_directory_fn_; - TritonServerSetStrictModelConfigFn_t set_strict_model_config_fn_; - TritonServerSetMinSupportedComputeCapabilityFn_t - set_min_supported_compute_capability_fn_; - - TritonServerNewFn_t server_new_fn_; - TritonServerOptionsDeleteFn_t server_options_delete_fn_; - TritonServerDeleteFn_t server_delete_fn_; - TritonServerIsLiveFn_t server_is_live_fn_; - - TritonServerIsReadyFn_t server_is_ready_fn_; - TritonServerMetadataFn_t server_metadata_fn_; - TritonServerMessageSerializeToJsonFn_t message_serialize_to_json_fn_; - TritonServerMessageDeleteFn_t message_delete_fn_; - - TritonServerModelIsReadyFn_t model_is_ready_fn_; - TritonServerModelMetadataFn_t model_metadata_fn_; - TritonServerResponseAllocatorNewFn_t response_allocator_new_fn_; - TritonServerInferenceRequestNewFn_t inference_request_new_fn_; - - TritonServerInferenceRequestSetIdFn_t inference_request_set_id_fn_; - TritonServerInferenceRequestSetReleaseCallbackFn_t - inference_request_set_release_callback_fn_; - TritonServerInferenceRequestAddInputFn_t inference_request_add_input_fn_; - TritonServerInferenceRequestAddRequestedOutputFn_t - inference_request_add_requested_output_fn_; - - TritonServerInferenceRequestAppendInputDataFn_t - inference_request_append_input_data_fn_; - TritonServerInferenceRequestSetResponseCallbackFn_t - inference_request_set_response_callback_fn_; - TritonServerInferAsyncFn_t infer_async_fn_; - TritonServerInferenceResponseErrorFn_t inference_response_error_fn_; - - TritonServerInferenceResponseDeleteFn_t inference_response_delete_fn_; - TritonServerResponseAllocatorDeleteFn_t response_allocator_delete_fn_; - TritonServerErrorNewFn_t error_new_fn_; - - TritonServerMemoryTypeStringFn_t memory_type_string_fn_; - TritonServerInferenceResponseOutputCountFn_t - inference_response_output_count_fn_; - TritonServerDataTypeStringFn_t data_type_string_fn_; - TritonServerErrorMessageFn_t error_message_fn_; - - TritonServerErrorDeleteFn_t error_delete_fn_; - TritonServerErrorCodeToStringFn_t error_code_to_string_fn_; - TritonServerModelConfigFn_t model_config_fn_; - TritonServerInferenceRequestSetCorrelationIdFn_t set_correlation_id_fn_; - TritonServerInferenceRequestSetStringCorrelationIdFn_t - set_string_correlation_id_fn_; - - TritonServerInferenceRequestSetFlagsFn_t set_flags_fn_; - TritonServerInferenceRequestSetPriorityFn_t set_priority_fn_; - TritonServerInferenceRequestSetTimeoutMicrosecondsFn_t set_timeout_ms_fn_; - TritonServerStringToDatatypeFn_t string_to_datatype_fn_; - - TritonServerInferenceResponseOutputFn_t inference_response_output_fn_; - TritonServerRequestIdFn_t request_id_fn_; - TritonServerRequestDeleteFn_t request_delete_fn_; - TritonServerModelStatisticsFn_t model_statistics_fn_; - - TritonSeverUnloadModelFn_t unload_model_fn_; - TritonSeverSetLogInfoFn_t set_log_info_fn_; - TritonServerSetCudaMemoryPoolByteSizeFn_t set_cuda_memory_pool_byte_size_; - - std::shared_ptr server_{nullptr}; - std::string triton_server_path_{}; - const std::string server_library_path_{"/lib/libtritonserver.so"}; - int verbose_level_{0}; - TRITONSERVER_MemoryType requested_memory_type_{TRITONSERVER_MEMORY_CPU}; - bool enforce_memory_type_{false}; - std::string model_repository_path_{""}; - std::string model_name_{""}; - int64_t model_version_{-1}; - bool model_is_loaded_{false}; - bool server_is_ready_{false}; - std::unique_ptr shm_manager_{nullptr}; -}; - -}}}} // namespace triton::perfanalyzer::clientbackend::tritoncapi diff --git a/src/c++/perf_analyzer/command_line_parser.cc b/src/c++/perf_analyzer/command_line_parser.cc deleted file mode 100644 index 8003be711..000000000 --- a/src/c++/perf_analyzer/command_line_parser.cc +++ /dev/null @@ -1,2017 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#include "command_line_parser.h" - -#include - -#include -#include -#include -#include - -#include "perf_analyzer_exception.h" - -namespace triton { namespace perfanalyzer { - -PAParamsPtr -CLParser::Parse(int argc, char** argv) -{ - ParseCommandLine(argc, argv); - VerifyOptions(); - - return params_; -} - -std::vector -SplitString(const std::string& str, const std::string& delimiter = ":") -{ - std::vector substrs; - size_t pos = 0; - while (pos != std::string::npos) { - size_t colon_pos = str.find(":", pos); - substrs.push_back(str.substr(pos, colon_pos - pos)); - if (colon_pos == std::string::npos) { - pos = colon_pos; - } else { - pos = colon_pos + 1; - } - } - return substrs; -} - -void -ToLowerCase(std::string& s) -{ - std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { - return std::tolower(c); - }); -} - -// Used to format the usage message -std::string -CLParser::FormatMessage(std::string str, int offset) const -{ - int width = 60; - int current_pos = offset; - while (current_pos + width < int(str.length())) { - int n = str.rfind(' ', current_pos + width); - if (n != int(std::string::npos)) { - str.replace(n, 1, "\n\t "); - current_pos += (width + 10); - } - } - return str; -} - -void -CLParser::Usage(const std::string& msg) -{ - if (!msg.empty()) { - std::cerr << "Error: " << msg << std::endl; - } - - std::cerr << "Usage: " << argv_[0] << " [options]" << std::endl; - std::cerr << "==== SYNOPSIS ====\n \n"; - std::cerr << "\t--version " << std::endl; - std::cerr << "\t-m " << std::endl; - std::cerr << "\t-x " << std::endl; - std::cerr << "\t--bls-composing-models " << std::endl; - std::cerr << "\t--model-signature-name " << std::endl; - std::cerr - << "\t--service-kind " - "<\"triton\"|\"openai\"|\"tfserving\"|\"torchserve\"|\"triton_c_api\">" - << std::endl; - std::cerr << "\t--endpoint " << std::endl; - std::cerr << "\t-v" << std::endl; - std::cerr << std::endl; - std::cerr << "I. MEASUREMENT PARAMETERS: " << std::endl; - std::cerr << "\t--async (-a)" << std::endl; - std::cerr << "\t--sync" << std::endl; - std::cerr << "\t--measurement-interval (-p) " - << std::endl; - std::cerr << "\t--concurrency-range " << std::endl; - std::cerr << "\t--periodic-concurrency-range " << std::endl; - std::cerr << "\t--request-period " << std::endl; - std::cerr << "\t--request-rate-range " << std::endl; - std::cerr << "\t--request-distribution <\"poisson\"|\"constant\">" - << std::endl; - std::cerr << "\t--request-intervals " - << std::endl; - std::cerr << "\t--serial-sequences" << std::endl; - std::cerr << "\t--binary-search" << std::endl; - std::cerr << "\t--num-of-sequences " - << std::endl; - std::cerr << "\t--latency-threshold (-l) " - << std::endl; - std::cerr << "\t--max-threads " << std::endl; - std::cerr << "\t--stability-percentage (-s) " - << std::endl; - std::cerr << "\t--max-trials (-r) " - << std::endl; - std::cerr << "\t--percentile " << std::endl; - std::cerr << "\t--request-count " << std::endl; - std::cerr << "\tDEPRECATED OPTIONS" << std::endl; - std::cerr << "\t-t " << std::endl; - std::cerr << "\t-c " << std::endl; - std::cerr << "\t-d" << std::endl; - std::cerr << std::endl; - std::cerr << "II. INPUT DATA OPTIONS: " << std::endl; - std::cerr << "\t-b " << std::endl; - std::cerr << "\t--input-data <\"zero\"|\"random\"|>" << std::endl; - std::cerr << "\t--shared-memory <\"system\"|\"cuda\"|\"none\">" << std::endl; - std::cerr << "\t--output-shared-memory-size " << std::endl; - std::cerr << "\t--shape " << std::endl; - std::cerr << "\t--sequence-length " << std::endl; - std::cerr << "\t--sequence-length-variation " << std::endl; - std::cerr << "\t--sequence-id-range " << std::endl; - std::cerr << "\t--string-length " << std::endl; - std::cerr << "\t--string-data " << std::endl; - std::cerr << "\t--input-tensor-format [binary|json]" << std::endl; - std::cerr << "\t--output-tensor-format [binary|json]" << std::endl; - std::cerr << "\tDEPRECATED OPTIONS" << std::endl; - std::cerr << "\t-z" << std::endl; - std::cerr << "\t--data-directory " << std::endl; - std::cerr << std::endl; - std::cerr << "III. SERVER DETAILS: " << std::endl; - std::cerr << "\t-u " << std::endl; - std::cerr << "\t-i " - << std::endl; - std::cerr << "\t--ssl-grpc-use-ssl " << std::endl; - std::cerr << "\t--ssl-grpc-root-certifications-file " << std::endl; - std::cerr << "\t--ssl-grpc-private-key-file " << std::endl; - std::cerr << "\t--ssl-grpc-certificate-chain-file " << std::endl; - std::cerr << "\t--ssl-https-verify-peer " << std::endl; - std::cerr << "\t--ssl-https-verify-host " << std::endl; - std::cerr << "\t--ssl-https-ca-certificates-file " << std::endl; - std::cerr << "\t--ssl-https-client-certificate-file " << std::endl; - std::cerr << "\t--ssl-https-client-certificate-type " << std::endl; - std::cerr << "\t--ssl-https-private-key-file " << std::endl; - std::cerr << "\t--ssl-https-private-key-type " << std::endl; - std::cerr << std::endl; - std::cerr << "IV. OTHER OPTIONS: " << std::endl; - std::cerr << "\t-f " << std::endl; - std::cerr << "\t--profile-export-file " << std::endl; - std::cerr << "\t-H " << std::endl; - std::cerr << "\t--streaming" << std::endl; - std::cerr << "\t--grpc-compression-algorithm " - << std::endl; - std::cerr << "\t--trace-level" << std::endl; - std::cerr << "\t--trace-rate" << std::endl; - std::cerr << "\t--trace-count" << std::endl; - std::cerr << "\t--log-frequency" << std::endl; - std::cerr << "\t--collect-metrics" << std::endl; - std::cerr << "\t--metrics-url" << std::endl; - std::cerr << "\t--metrics-interval" << std::endl; - std::cerr << std::endl; - std::cerr << "==== OPTIONS ==== \n \n"; - - std::cerr << FormatMessage( - " --version: print the current version of Perf Analyzer.", - 18) - << std::endl; - - std::cerr - << std::setw(9) << std::left << " -m: " - << FormatMessage( - "This is a required argument and is used to specify the model" - " against which to run perf_analyzer.", - 9) - << std::endl; - std::cerr << std::setw(9) << std::left << " -x: " - << FormatMessage( - "The version of the above model to be used. If not specified" - " the most recent version (that is, the highest numbered" - " version) of the model will be used.", - 9) - << std::endl; - std::cerr << FormatMessage( - " --model-signature-name: The signature name of the saved " - "model to use. Default value is \"serving_default\". This " - "option will be ignored if --service-kind is not " - "\"tfserving\".", - 18) - << std::endl; - - std::cerr - << FormatMessage( - " --service-kind: Describes the kind of service perf_analyzer to " - "generate load for. The options are \"triton\", \"openai\", " - "\"triton_c_api\", \"tfserving\" and \"torchserve\". Default " - "value is \"triton\". Note in order to use \"openai\" you must " - "specify an endpoint via --endpoint. " - "Note in order to use \"torchserve\" backend --input-data option " - "must point to a json file holding data in the following format " - "{\"data\" : [{\"TORCHSERVE_INPUT\" : [\"\"]}, {...}...]}. The type of file here will depend " - "on the model. In order to use \"triton_c_api\" you must specify " - "the Triton server install path and the model repository path via " - "the --triton-server-directory and --model-repository flags", - 18) - << std::endl; - - std::cerr - << FormatMessage( - " --endpoint: Describes what endpoint to send requests to on the " - "server. This is required when using \"openai\" service-kind, and " - "is ignored for all other cases. Currently only " - "\"v1/chat/completions\" is confirmed to work.", - 18) - << std::endl; - - std::cerr << std::setw(9) << std::left - << " -v: " << FormatMessage("Enables verbose mode.", 9) - << std::endl; - std::cerr << std::setw(9) << std::left - << " -v -v: " << FormatMessage("Enables extra verbose mode.", 9) - << std::endl; - std::cerr << std::endl; - std::cerr << "I. MEASUREMENT PARAMETERS: " << std::endl; - std::cerr - << FormatMessage( - " --async (-a): Enables asynchronous mode in perf_analyzer. " - "By default, perf_analyzer will use synchronous API to " - "request inference. However, if the model is sequential " - "then default mode is asynchronous. Specify --sync to " - "operate sequential models in synchronous mode. In synchronous " - "mode, perf_analyzer will start threads equal to the concurrency " - "level. Use asynchronous mode to limit the number of threads, yet " - "maintain the concurrency.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --sync: Force enables synchronous mode in perf_analyzer. " - "Can be used to operate perf_analyzer with sequential model " - "in synchronous mode.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --measurement-interval (-p): Indicates the time interval used " - "for each measurement in milliseconds. The perf analyzer will " - "sample a time interval specified by -p and take measurement over " - "the requests completed within that time interval. The default " - "value is 5000 msec.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --measurement-mode <\"time_windows\"|\"count_windows\">: " - "Indicates the mode used for stabilizing measurements." - " \"time_windows\" will create windows such that the length " - "of each window is equal to --measurement-interval. " - "\"count_windows\" will create " - "windows such that there are at least " - "--measurement-request-count requests in each window.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --measurement-request-count: " - "Indicates the minimum number of requests to be collected in each " - "measurement window when \"count_windows\" mode is used. This " - "mode can " - "be enabled using the --measurement-mode flag.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --concurrency-range : Determines the range of " - "concurrency levels covered by the perf_analyzer. The " - "perf_analyzer " - "will start from the concurrency level of 'start' and go till " - "'end' with a stride of 'step'. The default value of 'end' and " - "'step' are 1. If 'end' is not specified then perf_analyzer will " - "run for a single concurrency level determined by 'start'. If " - "'end' is set as 0, then the concurrency limit will be " - "incremented by 'step' till latency threshold is met. 'end' and " - "--latency-threshold can not be both 0 simultaneously. 'end' can " - "not be 0 for sequence models while using asynchronous mode.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --periodic-concurrency-range : Determines the " - "range of concurrency levels in the similar but slightly " - "different manner as the --concurrency-range. Perf Analyzer will " - "start from the concurrency level of 'start' and increase by " - "'step' each time. Unlike --concurrency-range, the 'end' " - "indicates the *total* number of concurrency since the 'start' " - "(including) and will stop increasing once the cumulative number " - "of concurrent requests has reached the 'end'. The user can " - "specify *when* to periodically increase the concurrency level " - "using the --request-period option. The concurrency level will " - "periodically increase for every n-th response specified by " - "--request-period. Since this disables stability check in Perf " - "Analyzer and reports response timestamps only, the user must " - "provide --profile-export-file to specify where to dump all the " - "measured timestamps. The default values of 'start', 'end', and " - "'step' are 1.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --request-period : Indicates the number of responses that " - "each request must receive before new, concurrent requests are " - "sent when --periodic-concurrency-range is specified. Default " - "value is 10.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --request-parameter : Specifies a custom " - "parameter that can be sent to a Triton backend as part of the " - "request. For example, providing '--request-parameter " - "max_tokens:256:int' to the command line will set an additional " - "parameter 'max_tokens' of type 'int' to 256 as part of the " - "request. The --request-parameter may be specified multiple times " - "for different custom parameters.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --request-rate-range : Determines the range of " - "request rates for load generated by analyzer. This option can " - "take floating-point values. The search along the request rate " - "range is enabled only when using this option. If not specified, " - "then analyzer will search along the concurrency-range. The " - "perf_analyzer will start from the request rate of 'start' and go " - "till 'end' with a stride of 'step'. The default values of " - "'start', 'end' and 'step' are all 1.0. If 'end' is not specified " - "then perf_analyzer will run for a single request rate as " - "determined by 'start'. If 'end' is set as 0.0, then the request " - "rate will be incremented by 'step' till latency threshold is " - "met. 'end' and --latency-threshold can not be both 0 " - "simultaneously.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --request-distribution <\"poisson\"|\"constant\">: Specifies " - "the time interval distribution between dispatching inference " - "requests to the server. Poisson distribution closely mimics the " - "real-world work load on a server. This option is ignored if not " - "using --request-rate-range. By default, this option is set to be " - "constant.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --request-intervals: Specifies a path to a file containing time " - "intervals in microseconds. Each time interval should be in a new " - "line. The analyzer will try to maintain time intervals between " - "successive generated requests to be as close as possible in this " - "file. This option can be used to apply custom load to server " - "with a certain pattern of interest. The analyzer will loop " - "around the file if the duration of execution exceeds to that " - "accounted for by the intervals. This option can not be used with " - "--request-rate-range or --concurrency-range.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --binary-search: Enables the binary search on the specified " - "search range. This option requires 'start' and 'end' to be " - "expilicitly specified in the --concurrency-range or " - "--request-rate-range. When using this option, 'step' is more " - "like the precision. Lower the 'step', more the number of " - "iterations along the search path to find suitable convergence. " - "By default, linear search is used.", - 18) - << std::endl; - - std::cerr << FormatMessage( - " --num-of-sequences: Sets the number of concurrent " - "sequences for sequence models. This option is ignored when " - "--request-rate-range is not specified. By default, its " - "value is 4.", - 18) - << std::endl; - - std::cerr - << FormatMessage( - " --latency-threshold (-l): Sets the limit on the observed " - "latency. Analyzer will terminate the concurrency search once " - "the measured latency exceeds this threshold. By default, " - "latency threshold is set 0 and the perf_analyzer will run " - "for entire --concurrency-range.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --max-threads: Sets the maximum number of threads that will be " - "created for providing desired concurrency or request rate. " - "However, when running" - "in synchronous mode with concurrency-range having explicit 'end' " - "specification," - "this value will be ignored. Default is 4 if --request-rate-range " - "is specified otherwise default is 16.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --stability-percentage (-s): Indicates the allowed variation in " - "latency measurements when determining if a result is stable. The " - "measurement is considered as stable if the ratio of max / min " - "from the recent 3 measurements is within (stability percentage)% " - "in terms of both infer per second and latency. Default is " - "10(%).", - 18) - << std::endl; - std::cerr << FormatMessage( - " --max-trials (-r): Indicates the maximum number of " - "measurements for each concurrency level visited during " - "search. The perf analyzer will take multiple measurements " - "and report the measurement until it is stable. The perf " - "analyzer will abort if the measurement is still unstable " - "after the maximum number of measurements. The default " - "value is 10.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --percentile: Indicates the confidence value as a percentile " - "that will be used to determine if a measurement is stable. For " - "example, a value of 85 indicates that the 85th percentile " - "latency will be used to determine stability. The percentile will " - "also be reported in the results. The default is -1 indicating " - "that the average latency is used to determine stability", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --request-count: Specifies a total number of requests to " - "use for measurement. The default is 0, which means that there is " - "no request count and the measurement will proceed using windows " - "until stabilization is detected.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --serial-sequences: Enables serial sequence mode " - "where a maximum of one request is outstanding at a time " - "for any given sequence. The default is false.", - 18) - << std::endl; - std::cerr << std::endl; - std::cerr << "II. INPUT DATA OPTIONS: " << std::endl; - std::cerr << std::setw(9) << std::left - << " -b: " << FormatMessage("Batch size for each request sent.", 9) - << std::endl; - std::cerr - << FormatMessage( - " --input-data: Select the type of data that will be used " - "for input in inference requests. The available options are " - "\"zero\", \"random\", path to a directory or a json file. If the " - "option is path to a directory then the directory must " - "contain a binary/text file for each non-string/string input " - "respectively, named the same as the input. Each " - "file must contain the data required for that input for a batch-1 " - "request. Each binary file should contain the raw binary " - "representation of the input in row-major order for non-string " - "inputs. The text file should contain all strings needed by " - "batch-1, each in a new line, listed in row-major order. When " - "pointing to a json file, user must adhere to the format " - "described in the Performance Analyzer documentation. By " - "specifying json data users can control data used with every " - "request. Multiple data streams can be specified for a sequence " - "model and the analyzer will select a data stream in a " - "round-robin fashion for every new sequence. Multiple json files " - "can also be provided (--input-data json_file1 --input-data " - "json-file2 and so on) and the analyzer will append data streams " - "from each file. When using --service-kind=torchserve make sure " - "this option points to a json file. Default is \"random\".", - 18) - << std::endl; - std::cerr << FormatMessage( - " --shared-memory <\"system\"|\"cuda\"|\"none\">: Specifies " - "the type of the shared memory to use for input and output " - "data. Default is none.", - 18) - << std::endl; - - std::cerr - << FormatMessage( - " --output-shared-memory-size: The size in bytes of the shared " - "memory region to allocate per output tensor. Only needed when " - "one or more of the outputs are of string type and/or variable " - "shape. The value should be larger than the size of the largest " - "output tensor the model is expected to return. The analyzer will " - "use the following formula to calculate the total shared memory " - "to allocate: output_shared_memory_size * number_of_outputs * " - "batch_size. Defaults to 100KB.", - 18) - << std::endl; - - std::cerr << FormatMessage( - " --shape: The shape used for the specified input. The " - "argument must be specified as 'name:shape' where the shape " - "is a comma-separated list for dimension sizes, for example " - "'--shape input_name:1,2,3' indicate tensor shape [ 1, 2, 3 " - "]. --shape may be specified multiple times to specify " - "shapes for different inputs.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --sequence-length: Indicates the base length of a " - "sequence used for sequence models. A sequence with length " - "X will be composed of X requests to be sent as the " - "elements in the sequence. The actual length of the sequence" - "will be within +/- Y% of the base length, where Y defaults " - "to 20% and is customizable via " - "`--sequence-length-variation`. If sequence length is " - "unspecified and input data is provided, the sequence " - "length will be the number of inputs in the user-provided " - "input data. Default is 20.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --sequence-length-variation: The percentage variation in " - "length of sequences. This flag is only valid when " - "not using user-provided input data or when " - "`--sequence-length` is specified while using user-provided " - "input data. Default is 20.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --sequence-id-range : Determines the range of " - "sequence id used by the perf_analyzer. The perf_analyzer " - "will start from the sequence id of 'start' and go till " - "'end' (excluded). If 'end' is not specified then perf_analyzer " - "will use new sequence id without bounds. If 'end' is specified " - "and the concurrency setting may result in maintaining a number " - "of sequences more than the range of available sequence id, " - "perf analyzer will exit with error due to possible sequence id " - "collision. The default setting is start from sequence id 1 and " - "without bounds", - 18) - << std::endl; - std::cerr << FormatMessage( - " --string-length: Specifies the length of the random " - "strings to be generated by the analyzer for string input. " - "This option is ignored if --input-data points to a " - "directory. Default is 128.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --string-data: If provided, analyzer will use this string " - "to initialize string input buffers. The perf analyzer will " - "replicate the given string to build tensors of required " - "shape. --string-length will not have any effect. This " - "option is ignored if --input-data points to a directory.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --input-tensor-format=[binary|json]: Specifies Triton " - "inference request input tensor format. Only valid when " - "HTTP protocol is used. Default is 'binary'.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --output-tensor-format=[binary|json]: Specifies Triton " - "inference response output tensor format. Only valid when " - "HTTP protocol is used. Default is 'binary'.", - 18) - << std::endl; - std::cerr << std::endl; - std::cerr << "III. SERVER DETAILS: " << std::endl; - std::cerr << std::setw(38) << std::left << " -u: " - << FormatMessage( - "Specify URL to the server. When using triton default is " - "\"localhost:8000\" if using HTTP and \"localhost:8001\" " - "if using gRPC. When using tfserving default is " - "\"localhost:8500\". ", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left << " -i: " - << FormatMessage( - "The communication protocol to use. The available protocols " - "are gRPC and HTTP. Default is HTTP.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left << " --ssl-grpc-use-ssl: " - << FormatMessage( - "Bool (true|false) for whether " - "to use encrypted channel to the server. Default false.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left - << " --ssl-grpc-root-certifications-file: " - << FormatMessage( - "Path to file containing the " - "PEM encoding of the server root certificates.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left << " --ssl-grpc-private-key-file: " - << FormatMessage( - "Path to file containing the " - "PEM encoding of the client's private key.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left - << " --ssl-grpc-certificate-chain-file: " - << FormatMessage( - "Path to file containing the " - "PEM encoding of the client's certificate chain.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left << " --ssl-https-verify-peer: " - << FormatMessage( - "Number (0|1) to verify the " - "peer's SSL certificate. See " - "https://curl.se/libcurl/c/CURLOPT_SSL_VERIFYPEER.html for " - "the meaning of each value. Default is 1.", - 38) - << std::endl; - std::cerr - << std::setw(38) << std::left << " --ssl-https-verify-host: " - << FormatMessage( - "Number (0|1|2) to verify the " - "certificate's name against host. " - "See https://curl.se/libcurl/c/CURLOPT_SSL_VERIFYHOST.html for " - "the meaning of each value. Default is 2.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left - << " --ssl-https-ca-certificates-file: " - << FormatMessage( - "Path to Certificate Authority " - "(CA) bundle.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left - << " --ssl-https-client-certificate-file: " - << FormatMessage("Path to the SSL client certificate.", 38) - << std::endl; - std::cerr << std::setw(38) << std::left - << " --ssl-https-client-certificate-type: " - << FormatMessage( - "Type (PEM|DER) of the client " - "SSL certificate. Default is PEM.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left << " --ssl-https-private-key-file: " - << FormatMessage( - "Path to the private keyfile " - "for TLS and SSL client cert.", - 38) - << std::endl; - std::cerr << std::setw(38) << std::left << " --ssl-https-private-key-type: " - << FormatMessage( - "Type (PEM|DER) of the private " - "key file. Default is PEM.", - 38) - << std::endl; - std::cerr << std::endl; - std::cerr << "IV. OTHER OPTIONS: " << std::endl; - std::cerr - << std::setw(9) << std::left << " -f: " - << FormatMessage( - "The latency report will be stored in the file named by " - "this option. By default, the result is not recorded in a file.", - 9) - << std::endl; - std::cerr << std::setw(9) << std::left << " --profile-export-file: " - << FormatMessage( - "Specifies the path that the profile export will be " - "generated at. By default, the profile export will not be " - "generated.", - 9) - << std::endl; - std::cerr - << std::setw(9) << std::left << " -H: " - << FormatMessage( - "The header will be added to HTTP requests (ignored for GRPC " - "requests). The header must be specified as 'Header:Value'. -H " - "may be specified multiple times to add multiple headers.", - 9) - << std::endl; - std::cerr - << FormatMessage( - " --streaming: Enables the use of streaming API. This flag is " - "only valid with gRPC protocol. By default, it is set false.", - 18) - << std::endl; - - std::cerr << FormatMessage( - " --grpc-compression-algorithm: The compression algorithm " - "to be used by gRPC when sending request. Only supported " - "when grpc protocol is being used. The supported values are " - "none, gzip, and deflate. Default value is none.", - 18) - << std::endl; - - std::cerr - << FormatMessage( - " --trace-level: Specify a trace level. OFF to disable tracing, " - "TIMESTAMPS to trace timestamps, TENSORS to trace tensors. It " - "may be specified multiple times to trace multiple " - "information. Default is OFF.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --trace-rate: Set the trace sampling rate. Default is 1000.", 18) - << std::endl; - std::cerr << FormatMessage( - " --trace-count: Set the number of traces to be sampled. " - "If the value is -1, the number of traces to be sampled " - "will not be limited. Default is -1.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --log-frequency: Set the trace log frequency. If the " - "value is 0, Triton will only log the trace output to " - "the trace file when shutting down. Otherwise, Triton will log " - "the trace output to . when it collects the " - "specified number of traces. For example, if the log frequency " - "is 100, when Triton collects the 100-th trace, it logs the " - "traces to file .0, and when it collects the 200-th " - "trace, it logs the 101-th to the 200-th traces to file " - ".1. Default is 0.", - 18) - << std::endl; - - std::cerr << FormatMessage( - " --triton-server-directory: The Triton server install " - "path. Required by and only used when C API " - "is used (--service-kind=triton_c_api). " - "eg:--triton-server-directory=/opt/tritonserver.", - 18) - << std::endl; - std::cerr - << FormatMessage( - " --model-repository: The model repository of which the model is " - "loaded. Required by and only used when C API is used " - "(--service-kind=triton_c_api). " - "eg:--model-repository=/tmp/host/docker-data/model_unit_test.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --verbose-csv: The csv files generated by perf analyzer " - "will include additional information.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --collect-metrics: Enables collection of server-side " - "inference server metrics. Outputs metrics in the csv file " - "generated with the -f option. Must enable `--verbose-csv` " - "option to use the `--collect-metrics`.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --metrics-url: The URL to query for server-side inference " - "server metrics. Default is 'localhost:8002/metrics'.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --metrics-interval: How often in milliseconds, within " - "each measurement window, to query for server-side " - "inference server metrics. Default is 1000.", - 18) - << std::endl; - std::cerr << FormatMessage( - " --bls-composing-models: A comma separated list of all " - "BLS composing models (with optional model version number " - "after a colon for each) that may be called by the input " - "BLS model. For example, 'modelA:3,modelB' would specify " - "that modelA and modelB are composing models that may be " - "called by the input BLS model, and that modelA will use " - "version 3, while modelB's version is unspecified", - 18) - << std::endl; - throw pa::PerfAnalyzerException(GENERIC_ERROR); -} - -void -CLParser::PrintVersion() -{ - std::cerr << "Perf Analyzer Version " << VERSION << " (commit " << SHA << ")" - << std::endl; - exit(SUCCESS); -} - -void -CLParser::ParseCommandLine(int argc, char** argv) -{ - argc_ = argc; - argv_ = argv; - - // {name, has_arg, *flag, val} - static struct option long_options[] = { - {"streaming", no_argument, 0, 0}, - {"max-threads", required_argument, 0, 1}, - {"sequence-length", required_argument, 0, 2}, - {"percentile", required_argument, 0, 3}, - {"data-directory", required_argument, 0, 4}, - {"shape", required_argument, 0, 5}, - {"measurement-interval", required_argument, 0, 6}, - {"concurrency-range", required_argument, 0, 7}, - {"latency-threshold", required_argument, 0, 8}, - {"stability-percentage", required_argument, 0, 9}, - {"max-trials", required_argument, 0, 10}, - {"input-data", required_argument, 0, 11}, - {"string-length", required_argument, 0, 12}, - {"string-data", required_argument, 0, 13}, - {"async", no_argument, 0, 14}, - {"sync", no_argument, 0, 15}, - {"request-rate-range", required_argument, 0, 16}, - {"num-of-sequences", required_argument, 0, 17}, - {"binary-search", no_argument, 0, 18}, - {"request-distribution", required_argument, 0, 19}, - {"request-intervals", required_argument, 0, 20}, - {"shared-memory", required_argument, 0, 21}, - {"output-shared-memory-size", required_argument, 0, 22}, - {"service-kind", required_argument, 0, 23}, - {"model-signature-name", required_argument, 0, 24}, - {"grpc-compression-algorithm", required_argument, 0, 25}, - {"measurement-mode", required_argument, 0, 26}, - {"measurement-request-count", required_argument, 0, 27}, - {"triton-server-directory", required_argument, 0, 28}, - {"model-repository", required_argument, 0, 29}, - {"sequence-id-range", required_argument, 0, 30}, - {"ssl-grpc-use-ssl", no_argument, 0, 31}, - {"ssl-grpc-root-certifications-file", required_argument, 0, 32}, - {"ssl-grpc-private-key-file", required_argument, 0, 33}, - {"ssl-grpc-certificate-chain-file", required_argument, 0, 34}, - {"ssl-https-verify-peer", required_argument, 0, 35}, - {"ssl-https-verify-host", required_argument, 0, 36}, - {"ssl-https-ca-certificates-file", required_argument, 0, 37}, - {"ssl-https-client-certificate-file", required_argument, 0, 38}, - {"ssl-https-client-certificate-type", required_argument, 0, 39}, - {"ssl-https-private-key-file", required_argument, 0, 40}, - {"ssl-https-private-key-type", required_argument, 0, 41}, - {"verbose-csv", no_argument, 0, 42}, - {"enable-mpi", no_argument, 0, 43}, - {"trace-level", required_argument, 0, 44}, - {"trace-rate", required_argument, 0, 45}, - {"trace-count", required_argument, 0, 46}, - {"log-frequency", required_argument, 0, 47}, - {"collect-metrics", no_argument, 0, 48}, - {"metrics-url", required_argument, 0, 49}, - {"metrics-interval", required_argument, 0, 50}, - {"sequence-length-variation", required_argument, 0, 51}, - {"bls-composing-models", required_argument, 0, 52}, - {"serial-sequences", no_argument, 0, 53}, - {"input-tensor-format", required_argument, 0, 54}, - {"output-tensor-format", required_argument, 0, 55}, - {"version", no_argument, 0, 56}, - {"profile-export-file", required_argument, 0, 57}, - {"periodic-concurrency-range", required_argument, 0, 58}, - {"request-period", required_argument, 0, 59}, - {"request-parameter", required_argument, 0, 60}, - {"endpoint", required_argument, 0, 61}, - {"request-count", required_argument, 0, 62}, - {0, 0, 0, 0}}; - - // Parse commandline... - int opt; - while ((opt = getopt_long( - argc, argv, "vdazc:u:m:x:b:t:p:i:H:l:r:s:f:", long_options, - NULL)) != -1) { - try { - switch (opt) { - case 0: - params_->streaming = true; - break; - case 1: { - std::string max_threads{optarg}; - if (std::stoi(max_threads) > 0) { - params_->max_threads = std::stoull(max_threads); - params_->max_threads_specified = true; - } else { - Usage("Failed to parse --max-threads. The value must be > 0."); - } - break; - } - case 2: { - std::string sequence_length{optarg}; - if (std::stoi(sequence_length) > 0) { - params_->sequence_length = std::stoull(sequence_length); - } else { - std::cerr << "WARNING: The sequence length must be > 0. Perf " - "Analyzer will use default value if it is measuring " - "on sequence model." - << std::endl; - } - params_->sequence_length_specified = true; - break; - } - case 3: - params_->percentile = std::atoi(optarg); - break; - case 4: - params_->user_data.push_back(optarg); - break; - case 5: { - std::string arg = optarg; - auto colon_pos = arg.rfind(":"); - if (colon_pos == std::string::npos) { - Usage( - "Failed to parse --shape. There must be a colon after input " - "name."); - } - std::string name = arg.substr(0, colon_pos); - std::string shape_str = arg.substr(name.size() + 1); - size_t pos = 0; - std::vector shape; - while (pos != std::string::npos) { - size_t comma_pos = shape_str.find(",", pos); - int64_t dim; - if (comma_pos == std::string::npos) { - dim = std::stoll(shape_str.substr(pos, comma_pos)); - pos = comma_pos; - } else { - dim = std::stoll(shape_str.substr(pos, comma_pos - pos)); - pos = comma_pos + 1; - } - if (dim <= 0) { - Usage( - "Failed to parse --shape. The dimensions of input tensor " - "must be > 0."); - } - shape.emplace_back(dim); - } - - params_->input_shapes[name] = shape; - break; - } - case 6: - case 'p': { - std::string measurement_window_ms{optarg}; - if (std::stoi(measurement_window_ms) > 0) { - params_->measurement_window_ms = std::stoull(measurement_window_ms); - } else { - Usage( - "Failed to parse --measurement-interval (-p). The value must " - "be > 0 msec."); - } - break; - } - case 7: { - params_->using_concurrency_range = true; - std::string arg = optarg; - std::vector values{SplitString(arg)}; - if (values.size() > 3) { - Usage( - "Failed to parse --concurrency-range. The value does not match " - "."); - } - - for (size_t i = 0; i < values.size(); ++i) { - uint64_t val = std::stoull(values[i]); - if (i == 0) { - params_->concurrency_range.start = val; - } else if (i == 1) { - params_->concurrency_range.end = val; - } else if (i == 2) { - params_->concurrency_range.step = val; - } - } - break; - } - case 8: - case 'l': { - std::string latency_threshold_ms{optarg}; - if (std::stoi(latency_threshold_ms) == 0) { - params_->latency_threshold_ms = NO_LIMIT; - } else if (std::stoi(latency_threshold_ms) > 0) { - params_->latency_threshold_ms = std::stoull(latency_threshold_ms); - } else { - Usage( - "Failed to parse --latency-threshold (-l). The value must be " - ">= 0 msecs."); - } - break; - } - case 9: - case 's': { - std::string stability_threshold{optarg}; - if (std::stof(stability_threshold) >= 0.0) { - params_->stability_threshold = std::stof(optarg) / 100; - } else { - Usage( - "Failed to parse --stability-percentage (-s). The value must " - "be >= 0.0."); - } - break; - } - case 10: - case 'r': { - std::string max_trials{optarg}; - if (std::stoi(max_trials) > 0) { - params_->max_trials = std::stoull(max_trials); - } else { - Usage("Failed to parse --max-trials (-r). The value must be > 0."); - } - break; - } - case 11: { - std::string arg = optarg; - // Check whether the argument is a directory - if (IsDirectory(arg) || IsFile(arg)) { - params_->user_data.push_back(optarg); - } else if (arg.compare("zero") == 0) { - params_->zero_input = true; - } else if (arg.compare("random") == 0) { - break; - } else { - Usage( - "Failed to parse --input-data. Unsupported type provided: '" + - std::string(optarg) + - "'. The available options are 'zero', 'random', path to a " - "directory, or a json file."); - } - break; - } - case 12: { - std::string string_length{optarg}; - if (std::stoi(string_length) > 0) { - params_->string_length = std::stoull(string_length); - } else { - Usage("Failed to parse --string-length. The value must be > 0"); - } - break; - } - case 13: { - params_->string_data = optarg; - break; - } - case 14: - case 'a': { - params_->async = true; - break; - } - case 15: { - params_->forced_sync = true; - break; - } - case 16: { - params_->using_request_rate_range = true; - std::string arg = optarg; - size_t pos = 0; - int index = 0; - while (pos != std::string::npos) { - size_t colon_pos = arg.find(":", pos); - if (index > 2) { - Usage( - "Failed to parse --request-rate-range. The value does not " - "match ."); - } - if (colon_pos == std::string::npos) { - params_->request_rate_range[index] = - std::stod(arg.substr(pos, colon_pos)); - pos = colon_pos; - } else { - params_->request_rate_range[index] = - std::stod(arg.substr(pos, colon_pos - pos)); - pos = colon_pos + 1; - index++; - } - } - - break; - } - case 17: { - std::string num_of_sequences{optarg}; - if (std::stoi(num_of_sequences) > 0) { - params_->num_of_sequences = std::stoul(num_of_sequences); - } else { - Usage("Failed to parse --num-of-sequences. The value must be > 0."); - } - break; - } - case 18: { - params_->search_mode = SearchMode::BINARY; - break; - } - case 19: { - std::string arg = optarg; - if (arg.compare("poisson") == 0) { - params_->request_distribution = Distribution::POISSON; - } else if (arg.compare("constant") == 0) { - params_->request_distribution = Distribution::CONSTANT; - } else { - Usage( - "Failed to parse --request-distribution. Unsupported type " - "provided: '" + - std::string(optarg) + "'. Choices are 'posson' or 'constant'."); - } - break; - } - case 20: { - std::string request_intervals_file{optarg}; - if (IsFile(request_intervals_file)) { - params_->request_intervals_file = request_intervals_file; - params_->using_custom_intervals = true; - } else { - Usage( - "Failed to parse --request-intervals. The value must be a " - "valid file path"); - } - break; - } - case 21: { - std::string arg = optarg; - if (arg.compare("system") == 0) { - params_->shared_memory_type = - SharedMemoryType::SYSTEM_SHARED_MEMORY; - } else if (arg.compare("cuda") == 0) { -#ifdef TRITON_ENABLE_GPU - params_->shared_memory_type = SharedMemoryType::CUDA_SHARED_MEMORY; -#else - Usage( - "Cuda shared memory is not supported when " - "TRITON_ENABLE_GPU=0."); -#endif // TRITON_ENABLE_GPU - } else if (arg.compare("none") == 0) { - params_->shared_memory_type = SharedMemoryType::NO_SHARED_MEMORY; - } else { - Usage( - "Failed to parse --shared-memory. Unsupported type provided: " - "'" + - std::string(optarg) + - "'. The available options are 'system', 'cuda', or 'none'."); - } - break; - } - case 22: { - std::string output_shm_size{optarg}; - if (std::stoi(output_shm_size) >= 0) { - params_->output_shm_size = std::stoull(output_shm_size); - } else { - Usage( - "Failed to parse --output-shared-memory-size. The value must " - "be >= 0."); - } - break; - } - case 23: { - std::string arg = optarg; - if (arg.compare("triton") == 0) { - params_->kind = cb::TRITON; - } else if (arg.compare("tfserving") == 0) { - params_->kind = cb::TENSORFLOW_SERVING; - } else if (arg.compare("torchserve") == 0) { - params_->kind = cb::TORCHSERVE; - } else if (arg.compare("triton_c_api") == 0) { - params_->kind = cb::TRITON_C_API; - } else if (arg.compare("openai") == 0) { - params_->kind = cb::OPENAI; - } else { - Usage( - "Failed to parse --service-kind. Unsupported type provided: '" + - std::string{optarg} + - "'. The available options are 'triton', 'tfserving', " - "'torchserve', or 'triton_c_api'."); - } - break; - } - case 24: - params_->model_signature_name = optarg; - break; - case 25: { - std::string arg = optarg; - if (arg.compare("none") == 0) { - params_->compression_algorithm = cb::COMPRESS_NONE; - } else if (arg.compare("deflate") == 0) { - params_->compression_algorithm = cb::COMPRESS_DEFLATE; - } else if (arg.compare("gzip") == 0) { - params_->compression_algorithm = cb::COMPRESS_GZIP; - } else { - Usage( - "Failed to parse --grpc-compression-algorithm. Unsupported " - "type provided: '" + - arg + - "'. The available options are 'gzip', 'deflate', or 'none'."); - } - params_->using_grpc_compression = true; - break; - } - case 26: { - std::string arg = optarg; - if (arg.compare("time_windows") == 0) { - params_->measurement_mode = MeasurementMode::TIME_WINDOWS; - } else if (arg.compare("count_windows") == 0) { - params_->measurement_mode = MeasurementMode::COUNT_WINDOWS; - } else { - Usage( - "Failed to parse --measurement-mode. Unsupported type " - "provided: '" + - arg + - "'. The available options are 'time_windows' or " - "'count_windows'."); - } - break; - } - case 27: { - std::string request_count{optarg}; - if (std::stoi(request_count) > 0) { - params_->measurement_request_count = std::stoull(request_count); - } else { - Usage( - "Failed to parse --measurement-request-count. The value must " - "be > 0."); - } - break; - } - case 28: { - params_->triton_server_path = optarg; - break; - } - case 29: { - params_->model_repository_path = optarg; - break; - } - case 30: { - std::string arg = optarg; - int64_t start_id; - int64_t end_id; - size_t pos = 0; - int index = 0; - while (pos != std::string::npos) { - size_t colon_pos = arg.find(":", pos); - if (index > 1) { - Usage( - "Failed to parse --sequence-id-range. The value does not " - "match ."); - } - if (colon_pos == std::string::npos) { - std::string sequence_id{arg.substr(pos, colon_pos)}; - if (index == 0) { - start_id = std::stoi(sequence_id); - } else { - end_id = std::stoi(sequence_id); - } - pos = colon_pos; - } else { - std::string sequence_id{arg.substr(pos, colon_pos - pos)}; - start_id = std::stoi(sequence_id); - pos = colon_pos + 1; - index++; - } - } - - // Check for invalid inputs - if (start_id < 0 || end_id < 0) { - Usage( - "Failed to parse --sequence-id-range. The range values must be " - ">= 0."); - } else if (start_id > end_id) { - Usage( - "Failed to parse --sequence-id-range. The 'end' value must be " - "greater than 'start' value."); - } - - if (index == 0) { // Only start ID is given - params_->start_sequence_id = start_id; - } else { - params_->start_sequence_id = start_id; - params_->sequence_id_range = end_id - start_id; - } - break; - } - case 31: { - params_->ssl_options.ssl_grpc_use_ssl = true; - break; - } - case 32: { - if (IsFile(optarg)) { - params_->ssl_options.ssl_grpc_root_certifications_file = optarg; - } else { - Usage( - "Failed to parse --ssl-grpc-root-certifications-file. The " - "value must be a valid file path."); - } - break; - } - case 33: { - if (IsFile(optarg)) { - params_->ssl_options.ssl_grpc_private_key_file = optarg; - } else { - Usage( - "Failed to parse --ssl-grpc-private-key-file. The value must " - "be a valid file path."); - } - break; - } - case 34: { - if (IsFile(optarg)) { - params_->ssl_options.ssl_grpc_certificate_chain_file = optarg; - } else { - Usage( - "Failed to parse --ssl-grpc-certificate-chain-file. The value " - "must be a valid file path."); - } - break; - } - case 35: { - if (std::atol(optarg) == 0 || std::atol(optarg) == 1) { - params_->ssl_options.ssl_https_verify_peer = std::atol(optarg); - } else { - Usage( - "Failed to parse --ssl-https-verify-peer. The value must be " - "either 0 or 1."); - } - break; - } - case 36: { - if (std::atol(optarg) == 0 || std::atol(optarg) == 1 || - std::atol(optarg) == 2) { - params_->ssl_options.ssl_https_verify_host = std::atol(optarg); - } else { - Usage( - "Failed to parse --ssl-https-verify-host. The value must be " - "either 0, 1, or 2."); - } - break; - } - case 37: { - if (IsFile(optarg)) { - params_->ssl_options.ssl_https_ca_certificates_file = optarg; - } else { - Usage( - "Failed to parse --ssl-https-ca-certificates-file. The value " - "must be a valid file path."); - } - break; - } - case 38: { - if (IsFile(optarg)) { - params_->ssl_options.ssl_https_client_certificate_file = optarg; - } else { - Usage( - "Failed to parse --ssl-https-client-certificate-file. The " - "value must be a valid file path."); - } - break; - } - case 39: { - if (std::string(optarg) == "PEM" || std::string(optarg) == "DER") { - params_->ssl_options.ssl_https_client_certificate_type = optarg; - } else { - Usage( - "Failed to parse --ssl-https-client-certificate-type. " - "Unsupported type provided: '" + - std::string{optarg} + - "'. The available options are 'PEM' or 'DER'."); - } - break; - } - case 40: { - if (IsFile(optarg)) { - params_->ssl_options.ssl_https_private_key_file = optarg; - } else { - Usage( - "Failed to parse --ssl-https-private-key-file. The value must " - "be a valid file path."); - } - break; - } - case 41: { - if (std::string(optarg) == "PEM" || std::string(optarg) == "DER") { - params_->ssl_options.ssl_https_private_key_type = optarg; - } else { - Usage( - "Failed to parse --ssl-https-private-key-type. Unsupported " - "type provided: '" + - std::string{optarg} + - "'. The available options are 'PEM' or 'DER'."); - } - break; - } - case 42: { - params_->verbose_csv = true; - break; - } - case 43: { - params_->enable_mpi = true; - break; - } - case 44: { - std::string trace_level{optarg}; - if (trace_level == "OFF" || trace_level == "TIMESTAMPS" || - trace_level == "TENSORS") { - params_->trace_options["trace_level"] = {trace_level}; - } else { - Usage( - "Failed to parse --trace-level. Unsupported type provided: '" + - trace_level + - "'. The available options are 'OFF', 'TIMESTAMPS', or " - "'TENSORS'."); - } - break; - } - case 45: { - params_->trace_options["trace_rate"] = {optarg}; - break; - } - case 46: { - std::string trace_count{optarg}; - if (std::stoi(trace_count) >= -1) { - params_->trace_options["trace_count"] = {trace_count}; - } else { - Usage( - "Failed to parse --trace-count. The value must be >= 0 or set " - "to -1 (default)."); - } - break; - } - case 47: { - std::string log_frequency{optarg}; - if (std::stoi(log_frequency) >= 0) { - params_->trace_options["log_frequency"] = {log_frequency}; - } else { - Usage("Failed to parse --log-frequency. The value must be >= 0."); - } - break; - } - case 48: { - params_->should_collect_metrics = true; - break; - } - case 49: { - params_->metrics_url = optarg; - params_->metrics_url_specified = true; - break; - } - case 50: { - std::string metrics_interval_ms{optarg}; - if (std::stoi(metrics_interval_ms) > 0) { - params_->metrics_interval_ms = std::stoull(metrics_interval_ms); - params_->metrics_interval_ms_specified = true; - } else { - Usage( - "Failed to parse --metrics-interval. The value must be > 0 " - "msecs."); - } - break; - } - case 51: { - params_->sequence_length_variation = std::stod(optarg); - break; - } - case 52: { - std::string arg = optarg; - - // Remove all spaces in the string - arg.erase( - std::remove_if(arg.begin(), arg.end(), ::isspace), arg.end()); - - std::stringstream ss(arg); - while (ss.good()) { - std::string model_name; - std::string model_version{""}; - std::string tmp_model_name; - - getline(ss, tmp_model_name, ','); - - size_t colon_pos = tmp_model_name.find(":"); - - if (colon_pos == std::string::npos) { - model_name = tmp_model_name; - } else { - model_name = tmp_model_name.substr(0, colon_pos); - model_version = tmp_model_name.substr(colon_pos + 1); - } - - params_->bls_composing_models.push_back( - {model_name, model_version}); - } - break; - } - case 53: { - params_->serial_sequences = true; - break; - } - case 54: { - cb::TensorFormat input_tensor_format{ParseTensorFormat(optarg)}; - if (input_tensor_format == cb::TensorFormat::UNKNOWN) { - Usage( - "Failed to parse --input-tensor-format. Unsupported type " - "provided: '" + - std::string{optarg} + - "'. The available options are 'binary' or 'json'."); - } - params_->input_tensor_format = input_tensor_format; - break; - } - case 55: { - cb::TensorFormat output_tensor_format{ParseTensorFormat(optarg)}; - if (output_tensor_format == cb::TensorFormat::UNKNOWN) { - Usage( - "Failed to parse --output-tensor-format. Unsupported type " - "provided: '" + - std::string{optarg} + - "'. The available options are 'binary' or 'json'."); - } - params_->output_tensor_format = output_tensor_format; - break; - } - case 56: { - PrintVersion(); - break; - } - case 57: { - std::string profile_export_file{optarg}; - if (IsFile(profile_export_file) || IsDirectory(profile_export_file)) { - Usage( - "Failed to parse --profile-export-file. Path must not already " - "exist."); - } - params_->profile_export_file = profile_export_file; - break; - } - case 58: { - params_->is_using_periodic_concurrency_mode = true; - std::string arg = optarg; - std::vector values{SplitString(arg)}; - if (values.size() < 2) { - Usage( - "Failed to parse --periodic-concurrency-range. Both " - "and values must be provided."); - } else if (values.size() > 3) { - Usage( - "Failed to parse --periodic-concurrency-range. The value does " - "not match ."); - } - - for (size_t i = 0; i < values.size(); ++i) { - uint64_t val = std::stoull(values[i]); - if (i == 0) { - params_->periodic_concurrency_range.start = val; - } else if (i == 1) { - params_->periodic_concurrency_range.end = val; - } else if (i == 2) { - params_->periodic_concurrency_range.step = val; - } - } - - Range range{params_->periodic_concurrency_range}; - if (range.step == 0) { - Usage( - "Failed to parse --periodic-concurrency-range. The " - "value must be > 0."); - } else if (range.start > range.end) { - Usage( - "Failed to parse --periodic-concurrency-range. The " - "must be <= ."); - } else if ((range.end - range.start) % range.step != 0) { - Usage( - "Failed to parse --periodic-concurrency-range. The " - "value must be a factor of the range size ( - )."); - } - break; - } - case 59: { - std::string request_period{optarg}; - if (std::stoi(request_period) > 0) { - params_->request_period = std::stoull(request_period); - } else { - Usage("Failed to parse --request-period. The value must be > 0"); - } - break; - } - case 60: { - std::string arg = optarg; - std::vector values{SplitString(arg)}; - if (values.size() != 3) { - Usage( - "Failed to parse --request-parameter. The value does not match " - "."); - } - - std::for_each(values.begin(), values.end(), ToLowerCase); - std::string name{values[0]}; - std::string value{values[1]}; - std::string type{values[2]}; - - cb::RequestParameter param; - param.name = name; - param.value = value; - param.type = type; - params_->request_parameters[name] = param; - break; - } - case 61: { - params_->endpoint = optarg; - break; - } - case 62: { - if (std::stoi(optarg) < 0) { - Usage("Failed to parse --request-count. The value must be > 0."); - } - params_->request_count = std::stoi(optarg); - break; - } - case 'v': - params_->extra_verbose = params_->verbose; - params_->verbose = true; - break; - case 'z': - params_->zero_input = true; - break; - case 'd': - params_->using_old_options = true; - params_->dynamic_concurrency_mode = true; - break; - case 'u': - params_->url_specified = true; - params_->url = optarg; - break; - case 'm': - params_->model_name = optarg; - break; - case 'x': - params_->model_version = optarg; - break; - case 'b': { - std::string batch_size{optarg}; - if (std::stoi(batch_size) > 0) { - params_->batch_size = std::stoull(batch_size); - params_->using_batch_size = true; - } else { - Usage("Failed to parse -b (batch size). The value must be > 0."); - } - break; - } - case 't': - params_->using_old_options = true; - params_->concurrent_request_count = std::atoi(optarg); - break; - case 'i': - params_->protocol = ParseProtocol(optarg); - break; - case 'H': { - std::string arg = optarg; - std::string header = arg.substr(0, arg.find(":")); - (*params_->http_headers)[header] = arg.substr(header.size() + 1); - break; - } - case 'c': - params_->using_old_options = true; - params_->max_concurrency = std::atoi(optarg); - break; - case 'f': - params_->filename = optarg; - break; - case '?': - Usage(); - break; - } - } - catch (const std::invalid_argument& ia) { - if (opt >= 'A') { // short options - Usage( - "Failed to parse -" + std::string{(char)opt} + - ". Invalid value provided: " + std::string{optarg}); - } else { - Usage( - "Failed to parse --" + std::string{long_options[opt].name} + - ". Invalid value provided: " + std::string{optarg}); - } - } - } - - params_->mpi_driver = std::shared_ptr{ - std::make_shared(params_->enable_mpi)}; - params_->mpi_driver->MPIInit(&argc, &argv); - - if (!params_->url_specified && - (params_->protocol == cb::ProtocolType::GRPC)) { - if (params_->kind == cb::BackendKind::TRITON) { - params_->url = "localhost:8001"; - } else if (params_->kind == cb::BackendKind::TENSORFLOW_SERVING) { - params_->url = "localhost:8500"; - } - } - - // Overriding the max_threads default for request_rate search - if (!params_->max_threads_specified && params_->targeting_concurrency()) { - params_->max_threads = - std::max(DEFAULT_MAX_THREADS, params_->concurrency_range.end); - } - - if (params_->using_custom_intervals) { - // Will be using user-provided time intervals, hence no control variable. - params_->search_mode = SearchMode::NONE; - } - - // When the request-count feature is enabled, override the measurement mode to - // be count windows with a window size of the requested count - if (params_->request_count) { - params_->measurement_mode = MeasurementMode::COUNT_WINDOWS; - params_->measurement_request_count = params_->request_count; - } -} - -void -CLParser::VerifyOptions() -{ - if (params_->model_name.empty()) { - Usage("Failed to parse -m (model name). The value must be specified."); - } - if (params_->concurrency_range.start <= 0 || - params_->concurrent_request_count < 0) { - Usage("The start of the search range must be > 0"); - } - if (params_->request_rate_range[SEARCH_RANGE::kSTART] <= 0) { - Usage( - "Failed to parse --request-rate-range. The start of the search range " - "must be > 0."); - } - if (params_->protocol == cb::ProtocolType::UNKNOWN) { - Usage( - "Failed to parse -i (protocol). The value should be either HTTP or " - "gRPC."); - } - if (params_->streaming && (params_->protocol != cb::ProtocolType::GRPC)) { - Usage("Streaming is only allowed with gRPC protocol."); - } - if (params_->using_grpc_compression && - (params_->protocol != cb::ProtocolType::GRPC)) { - Usage("Using compression algorithm is only allowed with gRPC protocol."); - } - if (params_->sequence_length_variation < 0.0) { - Usage( - "Failed to parse --sequence-length-variation. The value must be >= " - "0.0."); - } - if (params_->start_sequence_id == 0) { - params_->start_sequence_id = 1; - std::cerr << "WARNING: using an invalid start sequence id. Perf Analyzer" - << " will use default value if it is measuring on sequence model." - << std::endl; - } - if (params_->percentile != -1 && - (params_->percentile > 99 || params_->percentile < 1)) { - Usage( - "Failed to parse --percentile. The value must be -1 for not reporting " - "or in range (0, 100)."); - } - if (params_->zero_input && !params_->user_data.empty()) { - Usage("The -z flag cannot be set when --data-directory is provided."); - } - if (params_->async && params_->forced_sync) { - Usage("Cannot specify --async and --sync simultaneously."); - } - - if (params_->using_concurrency_range && params_->using_old_options) { - Usage("Cannot use deprecated options with --concurrency-range."); - } else if (params_->using_old_options) { - if (params_->dynamic_concurrency_mode) { - params_->concurrency_range.end = params_->max_concurrency; - } - params_->concurrency_range.start = params_->concurrent_request_count; - } - - if (params_->using_request_rate_range && params_->using_old_options) { - Usage("Cannot use concurrency options with --request-rate-range."); - } - - std::vector load_modes{ - params_->is_using_periodic_concurrency_mode, - params_->using_concurrency_range, params_->using_request_rate_range, - params_->using_custom_intervals}; - if (std::count(load_modes.begin(), load_modes.end(), true) > 1) { - Usage( - "Cannot specify more then one inference load mode. Please choose only " - "one of the following modes: --concurrency-range, " - "--periodic-concurrency-range, --request-rate-range, or " - "--request-intervals."); - } - - if (params_->is_using_periodic_concurrency_mode && !params_->streaming) { - Usage( - "The --periodic-concurrency-range option requires bi-directional gRPC " - "streaming."); - } - - if (params_->is_using_periodic_concurrency_mode && - (params_->profile_export_file == "")) { - Usage( - "Must provide --profile-export-file when using the " - "--periodic-concurrency-range option."); - } - - if (params_->is_using_periodic_concurrency_mode) { - if (params_->periodic_concurrency_range.end == pa::NO_LIMIT) { - std::cerr - << "WARNING: The maximum attainable concurrency will be limited by " - "max_threads specification." - << std::endl; - params_->periodic_concurrency_range.end = params_->max_threads; - } else { - if (params_->max_threads_specified) { - std::cerr << "WARNING: Overriding max_threads specification to ensure " - "requested concurrency range." - << std::endl; - } - params_->max_threads = std::max( - params_->max_threads, params_->periodic_concurrency_range.end); - } - } - - if (params_->request_parameters.size() > 0 && - params_->protocol != cb::ProtocolType::GRPC) { - Usage( - "The --request-parameter option is currently only supported by gRPC " - "protocol."); - } - - if (params_->using_request_rate_range && params_->mpi_driver->IsMPIRun() && - (params_->request_rate_range[SEARCH_RANGE::kEND] != 1.0 || - params_->request_rate_range[SEARCH_RANGE::kSTEP] != 1.0)) { - Usage("Cannot specify --request-rate-range when in multi-model mode."); - } - - if (params_->using_custom_intervals && params_->using_old_options) { - Usage("Cannot use deprecated options with --request-intervals."); - } - - if ((params_->using_custom_intervals) && - (params_->using_request_rate_range || params_->using_concurrency_range)) { - Usage( - "Cannot use --concurrency-range or --request-rate-range " - "along with --request-intervals."); - } - - if (params_->using_concurrency_range && params_->mpi_driver->IsMPIRun() && - (params_->concurrency_range.end != 1 || - params_->concurrency_range.step != 1)) { - Usage("Cannot specify --concurrency-range when in multi-model mode."); - } - - if (((params_->concurrency_range.end == NO_LIMIT) || - (params_->request_rate_range[SEARCH_RANGE::kEND] == - static_cast(NO_LIMIT))) && - (params_->latency_threshold_ms == NO_LIMIT)) { - Usage( - "The end of the search range and the latency limit can not be both 0 " - "(or 0.0) simultaneously"); - } - - if (((params_->concurrency_range.end == NO_LIMIT) || - (params_->request_rate_range[SEARCH_RANGE::kEND] == - static_cast(NO_LIMIT))) && - (params_->search_mode == SearchMode::BINARY)) { - Usage("The end of the range can not be 0 (or 0.0) for binary search mode."); - } - - if ((params_->search_mode == SearchMode::BINARY) && - (params_->latency_threshold_ms == NO_LIMIT)) { - Usage("The --latency-threshold cannot be 0 for binary search mode."); - } - - if (((params_->concurrency_range.end < params_->concurrency_range.start) || - (params_->request_rate_range[SEARCH_RANGE::kEND] < - params_->request_rate_range[SEARCH_RANGE::kSTART])) && - (params_->search_mode == SearchMode::BINARY)) { - Usage( - "The end of the range can not be less than start of the range for " - "binary search mode."); - } - - if (params_->request_count != 0) { - if (params_->using_concurrency_range) { - if (params_->request_count < params_->concurrency_range.start) { - Usage("request-count can not be less than concurrency"); - } - if (params_->concurrency_range.start < params_->concurrency_range.end) { - Usage( - "request-count not supported with multiple concurrency values in " - "one run"); - } - } - if (params_->using_request_rate_range) { - if (params_->request_count < - static_cast(params_->request_rate_range[0])) { - Usage("request-count can not be less than request-rate"); - } - if (params_->request_rate_range[SEARCH_RANGE::kSTART] < - params_->request_rate_range[SEARCH_RANGE::kEND]) { - Usage( - "request-count not supported with multiple request-rate values in " - "one run"); - } - } - } - - if (params_->kind == cb::TENSORFLOW_SERVING) { - if (params_->protocol != cb::ProtocolType::GRPC) { - Usage( - "perf_analyzer supports only grpc protocol for TensorFlow Serving."); - } else if (params_->streaming) { - Usage("perf_analyzer does not support streaming for TensorFlow Serving."); - } else if (params_->async) { - Usage("perf_analyzer does not support async API for TensorFlow Serving."); - } else if (!params_->using_batch_size) { - params_->batch_size = 0; - } - } else if (params_->kind == cb::TORCHSERVE) { - if (params_->user_data.empty()) { - Usage( - "--input-data should be provided with a json file with " - "input data for torchserve."); - } - } - - if (params_->kind == cb::BackendKind::TRITON_C_API) { - if (params_->triton_server_path.empty()) { - Usage( - "--triton-server-path should not be empty when using " - "service-kind=triton_c_api."); - } - - if (params_->model_repository_path.empty()) { - Usage( - "--model-repository should not be empty when using " - "service-kind=triton_c_api."); - } - - if (params_->async) { - Usage( - "Async mode is not supported by triton_c_api service " - "kind."); - } - - params_->protocol = cb::ProtocolType::UNKNOWN; - } - - if (params_->kind == cb::BackendKind::OPENAI) { - if (params_->user_data.empty()) { - Usage("Must supply --input-data for OpenAI service kind."); - } - if (params_->endpoint.empty()) { - Usage( - "Must supply --endpoint for OpenAI service kind. For example, " - "\"v1/chat/completions\"."); - } - if (!params_->async) { - Usage("Only async mode is currently supported for OpenAI service-kind"); - } - if (params_->batch_size != 1) { - Usage("Batching is not currently supported with OpenAI service-kind"); - } - } - - if (params_->should_collect_metrics && - params_->kind != cb::BackendKind::TRITON) { - Usage( - "Server-side metric collection is only supported with Triton client " - "backend."); - } - - if (params_->metrics_url_specified && - params_->should_collect_metrics == false) { - Usage( - "Must specify --collect-metrics when using the --metrics-url option."); - } - - if (params_->metrics_interval_ms_specified && - params_->should_collect_metrics == false) { - Usage( - "Must specify --collect-metrics when using the --metrics-interval " - "option."); - } - - if (params_->should_collect_metrics && !params_->metrics_url_specified) { - // Update the default metrics URL to be associated with the input URL - // instead of localhost - // - size_t colon_pos = params_->url.find(':'); - if (colon_pos != std::string::npos) { - params_->metrics_url = - params_->url.substr(0, colon_pos) + ":8002/metrics"; - } - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/command_line_parser.h b/src/c++/perf_analyzer/command_line_parser.h deleted file mode 100644 index 461e24e2d..000000000 --- a/src/c++/perf_analyzer/command_line_parser.h +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -#pragma once - -#include -#include -#include -#include - -#include "constants.h" -#include "mpi_utils.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -enum SEARCH_RANGE { kSTART = 0, kEND = 1, kSTEP = 2 }; - -// Perf Analyzer command line parameters. -// PAParams are used to initialize PerfAnalyzer and track configuration -// -struct PerfAnalyzerParameters { - bool verbose = false; - bool extra_verbose = false; - bool streaming = false; - size_t max_threads = 4; - bool max_threads_specified = false; - size_t sequence_length = 20; // average length of a sentence - bool sequence_length_specified = false; - double sequence_length_variation = 20.0; - int32_t percentile = -1; - std::vector user_data; - std::unordered_map> input_shapes; - std::vector bls_composing_models; - uint64_t measurement_window_ms = 5000; - bool using_concurrency_range = false; - Range concurrency_range{1, 1, 1}; - std::unordered_map request_parameters; - uint64_t latency_threshold_ms = NO_LIMIT; - double stability_threshold = 0.1; - size_t max_trials = 10; - size_t request_count = 0; - bool zero_input = false; - size_t string_length = 128; - std::string string_data; - bool async = false; - bool forced_sync = false; - bool using_request_rate_range = false; - double request_rate_range[3] = {1.0, 1.0, 1.0}; - uint32_t num_of_sequences = 4; - bool serial_sequences = false; - SearchMode search_mode = SearchMode::LINEAR; - Distribution request_distribution = Distribution::CONSTANT; - bool using_custom_intervals = false; - std::string request_intervals_file{""}; - SharedMemoryType shared_memory_type = NO_SHARED_MEMORY; - size_t output_shm_size = 100 * 1024; - clientbackend::BackendKind kind = clientbackend::BackendKind::TRITON; - std::string model_signature_name{"serving_default"}; - bool using_grpc_compression = false; - clientbackend::GrpcCompressionAlgorithm compression_algorithm = - clientbackend::GrpcCompressionAlgorithm::COMPRESS_NONE; - MeasurementMode measurement_mode = MeasurementMode::TIME_WINDOWS; - uint64_t measurement_request_count = 50; - std::string triton_server_path = "/opt/tritonserver"; - std::string model_repository_path; - uint64_t start_sequence_id = 1; - uint64_t sequence_id_range = UINT32_MAX; - clientbackend::SslOptionsBase ssl_options; // gRPC and HTTP SSL options - - // Verbose csv option for including additional information - bool verbose_csv = false; - - // Enable MPI option for using MPI functionality with multi-model mode. - bool enable_mpi = false; - std::map> trace_options; - bool using_old_options = false; - bool dynamic_concurrency_mode = false; - bool url_specified = false; - std::string url{"localhost:8000"}; - std::string endpoint{""}; - std::string model_name; - std::string model_version; - uint64_t batch_size = 1; - bool using_batch_size = false; - int32_t concurrent_request_count = 1; - clientbackend::ProtocolType protocol = clientbackend::ProtocolType::HTTP; - std::shared_ptr http_headers{ - new clientbackend::Headers()}; - size_t max_concurrency = 0; - std::string filename{""}; - std::shared_ptr mpi_driver; - std::string memory_type{"system"}; // currently not used, to be removed - - // Enable collection of server-side metrics from inference server. - bool should_collect_metrics{false}; - - // The URL to query for server-side inference server metrics. - std::string metrics_url{"localhost:8002/metrics"}; - bool metrics_url_specified{false}; - - // How often, within each measurement window, to query for server-side - // inference server metrics. - uint64_t metrics_interval_ms{1000}; - bool metrics_interval_ms_specified{false}; - - // Return true if targeting concurrency - // - bool targeting_concurrency() const - { - return ( - using_concurrency_range || using_old_options || - !(using_request_rate_range || using_custom_intervals || - is_using_periodic_concurrency_mode)); - } - - // Sets the threshold for PA client overhead. - // Overhead is defined as the percentage of time when PA is doing work and - // requests are not outstanding to the triton server. If the overhead - // percentage exceeds the threshold, a warning is displayed. - // - double overhead_pct_threshold{50.0}; - - // Triton inference request input tensor format. - cb::TensorFormat input_tensor_format{cb::TensorFormat::BINARY}; - - // Triton inference response output tensor format. - cb::TensorFormat output_tensor_format{cb::TensorFormat::BINARY}; - - // The profile export file path. - std::string profile_export_file{""}; - - bool is_using_periodic_concurrency_mode{false}; - Range periodic_concurrency_range{1, 1, 1}; - uint64_t request_period{10}; -}; - -using PAParamsPtr = std::shared_ptr; - -class CLParser { - public: - CLParser() : params_(new PerfAnalyzerParameters{}) {} - - // Parse command line arguments into a parameters struct - // - PAParamsPtr Parse(int argc, char** argv); - - private: - char** argv_; - int argc_; - PAParamsPtr params_; - - std::string FormatMessage(std::string str, int offset) const; - virtual void Usage(const std::string& msg = std::string()); - void PrintVersion(); - void ParseCommandLine(int argc, char** argv); - void VerifyOptions(); -}; -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/concurrency_ctx_id_tracker.h b/src/c++/perf_analyzer/concurrency_ctx_id_tracker.h deleted file mode 100644 index 9699fa30e..000000000 --- a/src/c++/perf_analyzer/concurrency_ctx_id_tracker.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "base_queue_ctx_id_tracker.h" - -namespace triton { namespace perfanalyzer { - -// Context ID Tracker that always returns context 0, but ensures that only X -// requests are outstanding at a time -// -class ConcurrencyCtxIdTracker : public BaseQueueCtxIdTracker { - public: - ConcurrencyCtxIdTracker() = default; - void Reset(size_t count) override - { - Clear(); - - for (size_t i = 0; i < count; ++i) { - free_ctx_ids_.push(0); - } - } -}; - -}}; // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/concurrency_manager.cc b/src/c++/perf_analyzer/concurrency_manager.cc deleted file mode 100644 index 283861846..000000000 --- a/src/c++/perf_analyzer/concurrency_manager.cc +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "concurrency_manager.h" - -#include - -namespace triton { namespace perfanalyzer { - -ConcurrencyManager::~ConcurrencyManager() -{ - // The destruction of derived class should wait for all the request generator - // threads to finish - StopWorkerThreads(); -} - -cb::Error -ConcurrencyManager::Create( - const bool async, const bool streaming, const int32_t batch_size, - const size_t max_threads, const size_t max_concurrency, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - std::unique_ptr* manager, - const std::unordered_map& - request_parameters) -{ - std::unique_ptr local_manager(new ConcurrencyManager( - async, streaming, batch_size, max_threads, max_concurrency, - shared_memory_type, output_shm_size, parser, factory, - request_parameters)); - - *manager = std::move(local_manager); - - return cb::Error::Success; -} - -ConcurrencyManager::ConcurrencyManager( - const bool async, const bool streaming, const int32_t batch_size, - const size_t max_threads, const size_t max_concurrency, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters) - : LoadManager( - async, streaming, batch_size, max_threads, shared_memory_type, - output_shm_size, parser, factory, request_parameters), - execute_(true), max_concurrency_(max_concurrency) -{ - threads_config_.reserve(max_threads); -} - -void -ConcurrencyManager::InitManagerFinalize() -{ - if (on_sequence_model_) { - sequence_manager_->InitSequenceStatuses(max_concurrency_); - } -} - -cb::Error -ConcurrencyManager::ChangeConcurrencyLevel( - const size_t concurrent_request_count, const size_t request_count) -{ - PauseSequenceWorkers(); - ReconfigThreads(concurrent_request_count, request_count); - ResumeSequenceWorkers(); - - std::cout << "Request concurrency: " << concurrent_request_count << std::endl; - return cb::Error::Success; -} - -void -ConcurrencyManager::PauseSequenceWorkers() -{ - if (on_sequence_model_) { - execute_ = false; - // Wait to see all threads are paused. - for (auto& thread_config : threads_config_) { - while (!thread_config->is_paused_) { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - } - } -} - -void -ConcurrencyManager::ReconfigThreads( - size_t concurrent_request_count, size_t request_count) -{ - // Always prefer to create new threads if the maximum limit has not been met - // - // While operating in synchronous mode, each context can send only one - // request at a time, hence the number of worker threads should be equal to - // the requested concurrency levels. - // - while ((concurrent_request_count > threads_.size()) && - (threads_.size() < max_threads_)) { - // Launch new thread for inferencing - threads_stat_.emplace_back(new ThreadStat()); - threads_config_.emplace_back(new ThreadConfig(threads_config_.size())); - - workers_.push_back( - MakeWorker(threads_stat_.back(), threads_config_.back())); - - threads_.emplace_back(&IWorker::Infer, workers_.back()); - } - - { - // Make sure all threads are reconfigured before they are woken up - std::lock_guard lock(wake_mutex_); - - // Compute the new concurrency level for each thread (take floor) - // and spread the remaining value - size_t avg_concurrency = concurrent_request_count / threads_.size(); - size_t threads_add_one = concurrent_request_count % threads_.size(); - - size_t avg_req_count = request_count / threads_.size(); - size_t req_count_add_one = request_count % threads_.size(); - - size_t seq_stat_index_offset = 0; - active_threads_ = 0; - for (size_t i = 0; i < threads_stat_.size(); i++) { - size_t concurrency = avg_concurrency + (i < threads_add_one ? 1 : 0); - - threads_config_[i]->concurrency_ = concurrency; - threads_config_[i]->seq_stat_index_offset_ = seq_stat_index_offset; - - size_t thread_num_reqs = avg_req_count + (i < req_count_add_one ? 1 : 0); - threads_config_[i]->num_requests_ = thread_num_reqs; - - seq_stat_index_offset += concurrency; - - if (concurrency) { - active_threads_++; - } - } - - // TODO REFACTOR TMA-1043 the memory manager should have API to set - // num_active_threads in constructor, as well as overwrite it here - } -} - -void -ConcurrencyManager::ResumeSequenceWorkers() -{ - if (on_sequence_model_) { - execute_ = true; - } - - // Make sure all threads will check their updated concurrency level - wake_signal_.notify_all(); -} - -std::shared_ptr -ConcurrencyManager::MakeWorker( - std::shared_ptr thread_stat, - std::shared_ptr thread_config) -{ - uint32_t id = workers_.size(); - - return std::make_shared( - id, thread_stat, thread_config, parser_, data_loader_, factory_, - on_sequence_model_, async_, max_concurrency_, using_json_data_, - streaming_, batch_size_, wake_signal_, wake_mutex_, active_threads_, - execute_, infer_data_manager_, sequence_manager_); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/concurrency_manager.h b/src/c++/perf_analyzer/concurrency_manager.h deleted file mode 100644 index c6c90f1d1..000000000 --- a/src/c++/perf_analyzer/concurrency_manager.h +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "concurrency_worker.h" -#include "load_manager.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestConcurrencyManager; -#endif - -//============================================================================== -/// ConcurrencyManager is a helper class to send inference requests to inference -/// server consistently, based on the specified setting, so that the -/// perf_analyzer can measure performance under different concurrency. -/// -/// An instance of concurrency manager will be created at the beginning of the -/// perf_analyzer and it will be used to simulate different load level in -/// respect to number of concurrent infer requests and to collect per-request -/// statistic. -/// -/// Detail: -/// Concurrency Manager will maintain the number of concurrent requests by -/// spawning worker threads that keep sending randomly generated requests to the -/// server. The worker threads will record the start time and end -/// time of each request into a shared vector. -/// -class ConcurrencyManager : public LoadManager { - public: - ~ConcurrencyManager(); - - /// Create a concurrency manager that is responsible to maintain specified - /// load on inference server. - /// \param async Whether to use asynchronous or synchronous API for infer - /// request. - /// \param streaming Whether to use gRPC streaming API for infer request - /// \param batch_size The batch size used for each request. - /// \param max_threads The maximum number of working threads to be spawned. - /// \param max_concurrency The maximum concurrency which will be requested. - /// \param string_length The length of the string to create for input. - /// \param string_data The data to use for generating string input. - /// \param zero_input Whether to fill the input tensors with zero. - /// \param user_data The vector containing path/paths to user-provided data - /// that can be a directory or path to a json data file. - /// \param shared_memory_type The type of shared memory to use for inputs. - /// \param output_shm_size The size in bytes of the shared memory to - /// allocate for the output. - /// \param parser The ModelParser object to get the model details. - /// \param factory The ClientBackendFactory object used to create - /// client to the server. - /// \param manager Returns a new ConcurrencyManager object. - /// \param request_parameters Custom request parameters to send to the server - /// \return cb::Error object indicating success or failure. - static cb::Error Create( - const bool async, const bool streaming, const int32_t batch_size, - const size_t max_threads, const size_t max_concurrency, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - std::unique_ptr* manager, - const std::unordered_map& - request_parameters); - - /// Adjusts the number of concurrent requests to be the same as - /// 'concurrent_request_count' (by creating or pausing threads) - /// \param concurent_request_count The number of concurrent requests. - /// \param request_count The number of requests to generate. If 0, then - /// there is no limit, and it will generate until told to stop. - /// \return cb::Error object indicating success or failure. - cb::Error ChangeConcurrencyLevel( - const size_t concurrent_request_count, const size_t request_count = 0); - - protected: - // Makes a new worker - virtual std::shared_ptr MakeWorker( - std::shared_ptr, std::shared_ptr); - - ConcurrencyManager( - const bool async, const bool streaming, const int32_t batch_size, - const size_t max_threads, const size_t max_concurrency, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters); - - // The number of worker threads with non-zero concurrencies - size_t active_threads_; - - bool execute_; - - size_t max_concurrency_; - - std::vector> threads_config_; - - private: - void InitManagerFinalize() override; - - // Pause all worker threads that are working on sequences - // - void PauseSequenceWorkers(); - - // Create new threads (if necessary), and then reconfigure all worker threads - // to handle the new concurrent request count - // - void ReconfigThreads(size_t concurrent_request_count, size_t request_count); - - // Restart all worker threads that were working on sequences - // - void ResumeSequenceWorkers(); - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestConcurrencyManager; - - public: - ConcurrencyManager() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/concurrency_worker.cc b/src/c++/perf_analyzer/concurrency_worker.cc deleted file mode 100644 index 37a562f76..000000000 --- a/src/c++/perf_analyzer/concurrency_worker.cc +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "concurrency_worker.h" - -#include - -#include "client_backend/client_backend.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -// Function for worker threads. -// If the model is non-sequence model, each worker uses only one context -// to maintain concurrency assigned to worker. -// If the model is sequence model, each worker has to use multiples contexts -// to maintain (sequence) concurrency assigned to worker. -void -ConcurrencyWorker::Infer() -{ - CreateCtxIdTracker(); - ReserveContexts(); - - // run inferencing until receiving exit signal to maintain server load. - do { - if (RunInference()) { - break; - } - } while (true); -} - -bool -ConcurrencyWorker::RunInference() -{ - HandleExecuteOff(); - if (HandleNoConcurrency()) { - return true; - } - CreateContextsAsNecessary(); - if (HandleExitConditions()) { - return true; - } - SendInferRequests(); - if (HandleExitConditions()) { - return true; - } - WaitForResponses(); - if (HandleExitConditions()) { - return true; - } - return false; -} - -void -ConcurrencyWorker::CreateCtxIdTracker() -{ - bool is_concurrency = true; - bool serial_sequences = false; - ctx_id_tracker_ = CtxIdTrackerFactory::CreateTracker( - is_concurrency, on_sequence_model_, serial_sequences); -} - -void -ConcurrencyWorker::ReserveContexts() -{ - // Reserve the vectors in case of sequence models. In non-sequence or - // synchronous mode only one context will be opened hence no need of - // reserving. - if (on_sequence_model_ && async_) { - thread_stat_->contexts_stat_.reserve(max_concurrency_); - ctxs_.reserve(max_concurrency_); - } -} - -void -ConcurrencyWorker::HandleExecuteOff() -{ - if (on_sequence_model_) { - if (!execute_) { - // Ensures the clean exit of the sequences - CompleteOngoingSequences(); - WaitForOngoingRequests(); - - // Reset Ctx IDs because CompleteOngoingSequences() - // has destructive side affects - ResetFreeCtxIds(); - - // Wait if no request should be sent and it is not exiting - thread_config_->is_paused_ = true; - std::unique_lock lock(wake_mutex_); - wake_signal_.wait(lock, [this]() { return early_exit || execute_; }); - - // TODO REFACTOR TMA-1043 - memory manager should be handling this instead - // of here - for (auto ctx : ctxs_) { - ctx->SetNumActiveThreads(active_threads_); - } - } - } - thread_config_->is_paused_ = false; -} - -bool -ConcurrencyWorker::HandleNoConcurrency() -{ - // Only interact with synchronous mechanism if the worker should wait - if (thread_config_->concurrency_ == 0) { - // Wait if no request should be sent and it is not exiting - std::unique_lock lock(wake_mutex_); - wake_signal_.wait(lock, [this]() { - return early_exit || (thread_config_->concurrency_ > 0); - }); - // Stop executing if concurrency is 0 and early exit is requested - if (early_exit && thread_config_->concurrency_ == 0) { - return true; - } - } - return false; -} - -void -ConcurrencyWorker::CreateContextsAsNecessary() -{ - // If the model is non-sequence model, use one InferContext to - // maintain concurrency for this thread. - size_t active_ctx_cnt = on_sequence_model_ ? thread_config_->concurrency_ : 1; - - if (active_ctx_cnt > ctxs_.size()) { - while (active_ctx_cnt > ctxs_.size()) { - CreateContext(); - } - ResetFreeCtxIds(); - } - - // TODO REFACTOR TMA-1043 -- this shouldn't be handled here - for (auto ctx : ctxs_) { - ctx->SetNumActiveThreads(active_threads_); - } -} - -void -ConcurrencyWorker::SendInferRequests() -{ - while (ctx_id_tracker_->IsAvailable() && execute_ && !ShouldExit()) { - uint32_t ctx_id = GetCtxId(); - SendInferRequest(ctx_id); - RestoreFreeCtxId(ctx_id); - } -} - - -void -ConcurrencyWorker::WaitForResponses() -{ - if (async_) { - { - // If async, then wait for signal from callback. - std::unique_lock lk(cb_mtx_); - thread_stat_->idle_timer.Start(); - cb_cv_.wait(lk, [this] { - if (notified_) { - notified_ = false; - return true; - } - return false; - }); - thread_stat_->idle_timer.Stop(); - } - } -} - -void -ConcurrencyWorker::ResetFreeCtxIds() -{ - std::lock_guard lock(cb_mtx_); - ctx_id_tracker_->Reset(thread_config_->concurrency_); -} - -uint32_t -ConcurrencyWorker::GetSeqStatIndex(uint32_t ctx_id) -{ - return (thread_config_->seq_stat_index_offset_ + ctx_id); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/concurrency_worker.h b/src/c++/perf_analyzer/concurrency_worker.h deleted file mode 100644 index 4645f07af..000000000 --- a/src/c++/perf_analyzer/concurrency_worker.h +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "load_worker.h" -#include "sequence_manager.h" -#include "thread_config.h" - -namespace triton { namespace perfanalyzer { - - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockConcurrencyWorker; -#endif - -/// Worker thread for the ConcurrencyManager -/// -/// The worker maintains concurrency in different ways: -/// For sequence models, multiple contexts must be created for multiple -/// concurrent sequences. -/// -/// For non-sequence models, one context can send out multiple requests -/// at the same time. Thus it uses one single context as every infer context -/// creates a worker thread implicitly. -/// -class ConcurrencyWorker : public LoadWorker { - public: - ConcurrencyWorker( - uint32_t id, std::shared_ptr thread_stat, - std::shared_ptr thread_config, - const std::shared_ptr parser, - std::shared_ptr data_loader, - const std::shared_ptr factory, - const bool on_sequence_model, const bool async, - const size_t max_concurrency, const bool using_json_data, - const bool streaming, const int32_t batch_size, - std::condition_variable& wake_signal, std::mutex& wake_mutex, - size_t& active_threads, bool& execute, - const std::shared_ptr& infer_data_manager, - std::shared_ptr sequence_manager) - : LoadWorker( - id, thread_stat, thread_config, parser, data_loader, factory, - on_sequence_model, async, streaming, batch_size, using_json_data, - wake_signal, wake_mutex, execute, infer_data_manager, - sequence_manager), - max_concurrency_(max_concurrency), active_threads_(active_threads) - { - } - - virtual void Infer() override; - - protected: - bool RunInference(); - - void CreateCtxIdTracker(); - - // Reserve vector size for contexts - void ReserveContexts(); - - private: - const size_t max_concurrency_; - // TODO REFACTOR TMA-1020 can we decouple this thread from the total count of - // threads? - size_t& active_threads_; - - // Handle the case where execute_ is false - void HandleExecuteOff(); - - // Handle the case where this thread is configured to do nothing - // Returns true if an exit condition was met - bool HandleNoConcurrency(); - - // Create and populate contexts if needed - void CreateContextsAsNecessary(); - - // Send out the desired concurrency of requests - void SendInferRequests(); - - void WaitForResponses(); - - void ResetFreeCtxIds(); - - uint32_t GetSeqStatIndex(uint32_t ctx_id) override; - - void CreateContextFinalize(std::shared_ptr ctx) override - { - ctx->RegisterAsyncCallbackFinalize(std::bind( - &ConcurrencyWorker::AsyncCallbackFinalize, this, - std::placeholders::_1)); - } - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockConcurrencyWorker; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/constants.h b/src/c++/perf_analyzer/constants.h deleted file mode 100644 index fbcd911b8..000000000 --- a/src/c++/perf_analyzer/constants.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include - -#define STRINGIFY_(x) #x -#define STRINGIFY(x) STRINGIFY_(x) -namespace triton { namespace perfanalyzer { - -const std::string SHA{STRINGIFY(GIT_SHA)}; -const std::string VERSION{STRINGIFY(PERF_ANALYZER_VERSION)}; - -constexpr static const uint32_t SUCCESS = 0; - -constexpr static const uint32_t STABILITY_ERROR = 2; -constexpr static const uint32_t OPTION_ERROR = 3; - -constexpr static const uint32_t GENERIC_ERROR = 99; -constexpr static const size_t DEFAULT_MAX_THREADS = 16; - -const double DELAY_PCT_THRESHOLD{1.0}; - -/// Different measurement modes possible. -enum MeasurementMode { TIME_WINDOWS = 0, COUNT_WINDOWS = 1 }; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/ctx_id_tracker_factory.h b/src/c++/perf_analyzer/ctx_id_tracker_factory.h deleted file mode 100644 index 0a455fc9c..000000000 --- a/src/c++/perf_analyzer/ctx_id_tracker_factory.h +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "concurrency_ctx_id_tracker.h" -#include "fifo_ctx_id_tracker.h" -#include "rand_ctx_id_tracker.h" - -namespace triton { namespace perfanalyzer { - -// Context ID tracker that is always available and returns random Context IDs -// -class CtxIdTrackerFactory { - public: - CtxIdTrackerFactory() = delete; - - /// Creates and returns a Context Id Tracker - /// - /// \param is_concurrency True if targeting Concurrency - /// \param is_sequence_model True if the model is a sequence model - /// \param serial_sequences True if in serial sequence mode - /// - static std::shared_ptr CreateTracker( - bool is_concurrency, bool is_sequence_model, bool serial_sequences) - { - if (is_concurrency) { - if (is_sequence_model) { - return std::make_shared(); - } else { - return std::make_shared(); - } - } else { - if (is_sequence_model && serial_sequences) { - return std::make_shared(); - } else { - return std::make_shared(); - } - } - } -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/custom_load_manager.cc b/src/c++/perf_analyzer/custom_load_manager.cc deleted file mode 100644 index 55a20a690..000000000 --- a/src/c++/perf_analyzer/custom_load_manager.cc +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "custom_load_manager.h" - -#include - -#include "constants.h" - -namespace triton { namespace perfanalyzer { - -cb::Error -CustomLoadManager::Create( - const bool async, const bool streaming, - const uint64_t measurement_window_ms, const size_t max_trials, - const std::string& request_intervals_file, const int32_t batch_size, - const size_t max_threads, const uint32_t num_of_sequences, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const bool serial_sequences, const std::shared_ptr& parser, - const std::shared_ptr& factory, - std::unique_ptr* manager, - const std::unordered_map& - request_parameters) -{ - std::unique_ptr local_manager(new CustomLoadManager( - async, streaming, request_intervals_file, batch_size, - measurement_window_ms, max_trials, max_threads, num_of_sequences, - shared_memory_type, output_shm_size, serial_sequences, parser, factory, - request_parameters)); - - *manager = std::move(local_manager); - - return cb::Error::Success; -} - -CustomLoadManager::CustomLoadManager( - const bool async, const bool streaming, - const std::string& request_intervals_file, int32_t batch_size, - const uint64_t measurement_window_ms, const size_t max_trials, - const size_t max_threads, const uint32_t num_of_sequences, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const bool serial_sequences, const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters) - : RequestRateManager( - async, streaming, Distribution::CUSTOM, batch_size, - measurement_window_ms, max_trials, max_threads, num_of_sequences, - shared_memory_type, output_shm_size, serial_sequences, parser, - factory, request_parameters), - request_intervals_file_(request_intervals_file) -{ -} - -cb::Error -CustomLoadManager::InitCustomIntervals(const size_t request_count) -{ - PauseWorkers(); - ConfigureThreads(request_count); - auto status = GenerateSchedule(); - ResumeWorkers(); - return status; -} - -cb::Error -CustomLoadManager::GenerateSchedule() -{ - if (request_intervals_file_.empty()) { - return cb::Error::Success; - } - - RETURN_IF_ERROR( - ReadTimeIntervalsFile(request_intervals_file_, &custom_intervals_)); - - auto worker_schedules = CreateWorkerSchedules(); - GiveSchedulesToWorkers(worker_schedules); - return cb::Error::Success; -} - -std::vector -CustomLoadManager::CreateWorkerSchedules() -{ - std::vector worker_schedules = - CreateEmptyWorkerSchedules(); - std::vector thread_ids{CalculateThreadIds()}; - - size_t thread_id_index = 0; - size_t worker_index = 0; - size_t intervals_index = 0; - - std::chrono::nanoseconds next_timestamp(0); - - bool started = false; - - // Keep filling the schedule until both the thread_ids (which can differ if - // sequences are enabled) and the intervals are both at the end of their - // lists. This effectively finds the least common multiple of the two sizes - // and makes sure that the schedule is complete and can be repeated - // indefinitely - // - while (!started || thread_id_index != 0 || intervals_index != 0) { - started = true; - next_timestamp += custom_intervals_[intervals_index]; - worker_index = thread_ids[thread_id_index]; - worker_schedules[worker_index]->intervals.emplace_back(next_timestamp); - - thread_id_index = (thread_id_index + 1) % thread_ids.size(); - intervals_index = (intervals_index + 1) % custom_intervals_.size(); - } - - SetScheduleDurations(worker_schedules); - - return worker_schedules; -} - -cb::Error -CustomLoadManager::GetCustomRequestRate(double* request_rate) -{ - if (custom_intervals_.empty()) { - return cb::Error("The custom intervals vector is empty", pa::GENERIC_ERROR); - } - uint64_t total_time_ns = 0; - for (auto interval : custom_intervals_) { - total_time_ns += interval.count(); - } - - *request_rate = - (custom_intervals_.size() * NANOS_PER_SECOND) / (total_time_ns); - return cb::Error::Success; -} - -cb::Error -CustomLoadManager::ReadTimeIntervalsFile( - const std::string& path, NanoIntervals* contents) -{ - std::ifstream in(path); - if (!in) { - return cb::Error("failed to open file '" + path + "'", pa::GENERIC_ERROR); - } - - std::string current_string; - while (std::getline(in, current_string)) { - std::chrono::nanoseconds curent_time_interval_ns( - std::stol(current_string) * 1000); - contents->push_back(curent_time_interval_ns); - } - in.close(); - - if (contents->size() == 0) { - return cb::Error("file '" + path + "' is empty", pa::GENERIC_ERROR); - } - return cb::Error::Success; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/custom_load_manager.h b/src/c++/perf_analyzer/custom_load_manager.h deleted file mode 100644 index 39c51d99f..000000000 --- a/src/c++/perf_analyzer/custom_load_manager.h +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "request_rate_manager.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestCustomLoadManager; -#endif - -//============================================================================== -/// CustomLoadManager is a helper class to send inference requests to -/// inference server in accordance with user provided time intervals. This -/// load manager can be used to model certain patterns of interest. -/// -class CustomLoadManager : public RequestRateManager { - public: - ~CustomLoadManager() = default; - - /// Create an object of realistic load manager that is responsible to maintain - /// specified load on inference server. - /// \param async Whether to use asynchronous or synchronous API for infer - /// request. - /// \param streaming Whether to use gRPC streaming API for infer request - /// \param measurement_window_ms The time window for measurements. - /// \param max_trials The maximum number of windows that will be measured - /// \param request_intervals_file The path to the file to use to pick up the - /// time intervals between the successive requests. - /// \param batch_size The batch size used for each request. - /// \param max_threads The maximum number of working threads to be spawned. - /// \param num_of_sequences The number of concurrent sequences that must be - /// maintained on the server. - /// \param zero_input Whether to fill the input tensors with zero. - /// \param input_shapes The shape of the input tensors. - /// \param user_data The vector containing path/paths to user-provided data - /// that can be a directory or path to a json data file. - /// \param shared_memory_type The type of shared memory to use for inputs. - /// \param output_shm_size The size of the shared memory to allocate for the - /// output. - /// \param serial_sequences Enable serial sequence mode. - /// \param parser The ModelParser object to get the model details. - /// \param factory The ClientBackendFactory object used to create - /// client to the server. - /// \param manager Returns a new ConcurrencyManager object. - /// \param request_parameters Custom request parameters to send to the server - /// \return cb::Error object indicating success or failure. - static cb::Error Create( - const bool async, const bool streaming, - const uint64_t measurement_window_ms, const size_t max_trials, - const std::string& request_intervals_file, const int32_t batch_size, - const size_t max_threads, const uint32_t num_of_sequences, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const bool serial_sequences, const std::shared_ptr& parser, - const std::shared_ptr& factory, - std::unique_ptr* manager, - const std::unordered_map& - request_parameter); - - /// Initializes the load manager with the provided file containing request - /// intervals - /// \param request_count The number of requests to generate. If 0, then - /// there is no limit, and it will generate until told to stop. - /// \return cb::Error object indicating success or failure. - cb::Error InitCustomIntervals(const size_t request_count); - - /// Computes the request rate from the time interval file. Fails with an error - /// if the file is not present or is empty. - /// \param request_rate Returns request rate as computed from the time - /// interval file. - /// \return cb::Error object indicating success or failure. - cb::Error GetCustomRequestRate(double* request_rate); - - private: - CustomLoadManager( - const bool async, const bool streaming, - const std::string& request_intervals_file, const int32_t batch_size, - const uint64_t measurement_window_ms, const size_t max_trials, - const size_t max_threads, const uint32_t num_of_sequences, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const bool serial_sequences, const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters); - - cb::Error GenerateSchedule(); - - std::vector CreateWorkerSchedules(); - - /// Reads the time intervals file and stores intervals in vector - /// \param path Filesystem path of the time intervals file. - /// \param contents Output intervals vector. - /// \return cb::Error object indicating success or failure. - virtual cb::Error ReadTimeIntervalsFile( - const std::string& path, NanoIntervals* contents); - - std::string request_intervals_file_; - NanoIntervals custom_intervals_; - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestCustomLoadManager; - - public: - CustomLoadManager() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/data_loader.cc b/src/c++/perf_analyzer/data_loader.cc deleted file mode 100644 index 38bfe9403..000000000 --- a/src/c++/perf_analyzer/data_loader.cc +++ /dev/null @@ -1,744 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "data_loader.h" - -#include -#include - -#include - -namespace triton { namespace perfanalyzer { - -DataLoader::DataLoader(const size_t batch_size) - : batch_size_(batch_size), data_stream_cnt_(0) -{ -} - -cb::Error -DataLoader::ValidateIOExistsInModel( - const std::shared_ptr& inputs, - const std::shared_ptr& outputs, - const std::string& data_directory) -{ - if (!std::filesystem::exists(data_directory) || - !std::filesystem::is_directory(data_directory)) { - return cb::Error( - "Error: Directory does not exist or is not a directory: " + - std::string(data_directory), - pa::GENERIC_ERROR); - } - - for (const auto& file : std::filesystem::directory_iterator(data_directory)) { - std::string io_name = file.path().filename().string(); - if (inputs->find(io_name) == inputs->end() && - outputs->find(io_name) == outputs->end()) { - return cb::Error( - "Provided data file '" + io_name + - "' does not correspond to a valid model input or output.", - pa::GENERIC_ERROR); - } - } - - return cb::Error::Success; -} - -cb::Error -DataLoader::ReadDataFromDir( - const std::shared_ptr& inputs, - const std::shared_ptr& outputs, - const std::string& data_directory) -{ - // Directory structure supports only a single data stream and step - data_stream_cnt_ = 1; - step_num_.push_back(1); - - for (const auto& input : *inputs) { - if (input.second.datatype_.compare("BYTES") != 0) { - const auto file_path = data_directory + "/" + input.second.name_; - std::string key_name( - input.second.name_ + "_" + std::to_string(0) + "_" + - std::to_string(0)); - auto it = input_data_.emplace(key_name, std::vector()).first; - RETURN_IF_ERROR(ReadFile(file_path, &it->second)); - int64_t byte_size = ByteSize(input.second.shape_, input.second.datatype_); - if (byte_size < 0) { - return cb::Error( - "input " + input.second.name_ + - " contains dynamic shape, provide shapes to send along with " - "the request", - pa::GENERIC_ERROR); - } - if (it->second.size() != byte_size) { - return cb::Error( - "provided data for input " + input.second.name_ + - " has byte size " + std::to_string(it->second.size()) + - ", expect " + std::to_string(byte_size), - pa::GENERIC_ERROR); - } - } else { - const auto file_path = data_directory + "/" + input.second.name_; - std::vector input_string_data; - RETURN_IF_ERROR(ReadTextFile(file_path, &input_string_data)); - std::string key_name( - input.second.name_ + "_" + std::to_string(0) + "_" + - std::to_string(0)); - auto it = input_data_.emplace(key_name, std::vector()).first; - SerializeStringTensor(input_string_data, &it->second); - int64_t batch1_num_strings = ElementCount(input.second.shape_); - if (batch1_num_strings == -1) { - return cb::Error( - "input " + input.second.name_ + - " contains dynamic shape, provide shapes to send along with " - "the request", - pa::GENERIC_ERROR); - } - if (input_string_data.size() != batch1_num_strings) { - return cb::Error( - "provided data for input " + input.second.name_ + " has " + - std::to_string(input_string_data.size()) + - " elements, expect " + std::to_string(batch1_num_strings), - pa::GENERIC_ERROR); - } - } - } - - for (const auto& output : *outputs) { - if (output.second.datatype_.compare("BYTES") != 0) { - const auto file_path = data_directory + "/" + output.second.name_; - std::string key_name( - output.second.name_ + "_" + std::to_string(0) + "_" + - std::to_string(0)); - auto it = output_data_.emplace(key_name, std::vector()).first; - if (!ReadFile(file_path, &it->second).IsOk()) { - output_data_.erase(it); - } - } else { - const auto file_path = data_directory + "/" + output.second.name_; - std::vector output_string_data; - if (!ReadTextFile(file_path, &output_string_data).IsOk()) { - continue; - } - std::string key_name( - output.second.name_ + "_" + std::to_string(0) + "_" + - std::to_string(0)); - auto it = output_data_.emplace(key_name, std::vector()).first; - SerializeStringTensor(output_string_data, &it->second); - } - } - return cb::Error::Success; -} - -cb::Error -DataLoader::ReadDataFromJSON( - const std::shared_ptr& inputs, - const std::shared_ptr& outputs, - const std::string& json_file) -{ - FILE* data_file = fopen(json_file.c_str(), "r"); - if (data_file == nullptr) { - return cb::Error( - "failed to open file for reading provided data", pa::GENERIC_ERROR); - } - - char readBuffer[65536]; - rapidjson::FileReadStream fs(data_file, readBuffer, sizeof(readBuffer)); - - rapidjson::Document d{}; - const unsigned int parseFlags = rapidjson::kParseNanAndInfFlag; - d.ParseStream(fs); - - fclose(data_file); - - return ParseData(d, inputs, outputs); -} - -cb::Error -DataLoader::ParseData( - const rapidjson::Document& json, - const std::shared_ptr& inputs, - const std::shared_ptr& outputs) -{ - if (json.HasParseError()) { - std::cerr << "cb::Error : " << json.GetParseError() << '\n' - << "Offset : " << json.GetErrorOffset() << '\n'; - return cb::Error( - "failed to parse the specified json file for reading provided data", - pa::GENERIC_ERROR); - } - - if (!json.HasMember("data")) { - return cb::Error( - "The json file doesn't contain data field", pa::GENERIC_ERROR); - } - - const rapidjson::Value& streams = json["data"]; - - // Validation data is optional, once provided, it must align with 'data' - const rapidjson::Value* out_streams = nullptr; - if (json.HasMember("validation_data")) { - out_streams = &json["validation_data"]; - if (out_streams->Size() != streams.Size()) { - return cb::Error( - "The 'validation_data' field doesn't align with 'data' field in the " - "json file", - pa::GENERIC_ERROR); - } - } - - int count = streams.Size(); - - data_stream_cnt_ += count; - int offset = step_num_.size(); - for (size_t i = offset; i < data_stream_cnt_; i++) { - const rapidjson::Value& steps = streams[i - offset]; - const rapidjson::Value* output_steps = - (out_streams == nullptr) ? nullptr : &(*out_streams)[i - offset]; - - RETURN_IF_ERROR(ValidateParsingMode(steps)); - - if (steps.IsArray()) { - step_num_.push_back(steps.Size()); - for (size_t k = 0; k < step_num_[i]; k++) { - RETURN_IF_ERROR(ReadTensorData(steps[k], inputs, i, k, true)); - } - - if (output_steps != nullptr) { - if (!output_steps->IsArray() || - (output_steps->Size() != steps.Size())) { - return cb::Error( - "The 'validation_data' field doesn't align with 'data' field in " - "the json file", - pa::GENERIC_ERROR); - } - for (size_t k = 0; k < step_num_[i]; k++) { - RETURN_IF_ERROR( - ReadTensorData((*output_steps)[k], outputs, i, k, false)); - } - } - } else { - // There is no nesting of tensors, hence, will interpret streams as steps - // and add the tensors to a single stream '0'. - int offset = 0; - if (step_num_.empty()) { - step_num_.push_back(count); - } else { - offset = step_num_[0]; - step_num_[0] += (count); - } - data_stream_cnt_ = 1; - for (size_t k = offset; k < step_num_[0]; k++) { - RETURN_IF_ERROR( - ReadTensorData(streams[k - offset], inputs, 0, k, true)); - } - - if (out_streams != nullptr) { - for (size_t k = offset; k < step_num_[0]; k++) { - RETURN_IF_ERROR( - ReadTensorData((*out_streams)[k - offset], outputs, 0, k, false)); - } - } - break; - } - } - - - return cb::Error::Success; -} - -cb::Error -DataLoader::GenerateData( - std::shared_ptr inputs, const bool zero_input, - const size_t string_length, const std::string& string_data) -{ - // Data generation supports only a single data stream and step - // Not supported for inputs with dynamic shapes - data_stream_cnt_ = 1; - step_num_.push_back(1); - - // Validate the absence of shape tensors - for (const auto& input : *inputs) { - if (input.second.is_shape_tensor_) { - return cb::Error( - "can not generate data for shape tensor '" + input.second.name_ + - "', user-provided data is needed.", - pa::GENERIC_ERROR); - } - } - - uint64_t max_input_byte_size = 0; - for (const auto& input : *inputs) { - if (input.second.datatype_.compare("BYTES") != 0) { - int64_t byte_size = ByteSize(input.second.shape_, input.second.datatype_); - if (byte_size < 0) { - return cb::Error( - "input " + input.second.name_ + - " contains dynamic shape, provide shapes to send along with " - "the request", - pa::GENERIC_ERROR); - } - max_input_byte_size = std::max(max_input_byte_size, (size_t)byte_size); - } else { - // Generate string input and store it into map - std::vector input_string_data; - int64_t batch1_num_strings = ElementCount(input.second.shape_); - if (batch1_num_strings == -1) { - return cb::Error( - "input " + input.second.name_ + - " contains dynamic shape, provide shapes to send along with " - "the request", - pa::GENERIC_ERROR); - } - input_string_data.resize(batch1_num_strings); - if (!string_data.empty()) { - for (size_t i = 0; i < batch1_num_strings; i++) { - input_string_data[i] = string_data; - } - } else { - for (size_t i = 0; i < batch1_num_strings; i++) { - input_string_data[i] = GetRandomString(string_length); - } - } - - std::string key_name( - input.second.name_ + "_" + std::to_string(0) + "_" + - std::to_string(0)); - auto it = input_data_.emplace(key_name, std::vector()).first; - SerializeStringTensor(input_string_data, &it->second); - } - } - - // Create a zero or randomly (as indicated by zero_input) - // initialized buffer that is large enough to provide the largest - // needed input. We (re)use this buffer for all non-string input values. - if (max_input_byte_size > 0) { - if (zero_input) { - input_buf_.resize(max_input_byte_size, 0); - } else { - input_buf_.resize(max_input_byte_size); - for (auto& byte : input_buf_) { - byte = rand(); - } - } - } - - return cb::Error::Success; -} - -cb::Error -DataLoader::GetInputData( - const ModelTensor& input, const int stream_id, const int step_id, - TensorData& data) -{ - data.data_ptr = nullptr; - data.batch1_size = 0; - data.is_valid = false; - - // If json data is available then try to retrieve the data from there - if (!input_data_.empty()) { - RETURN_IF_ERROR(ValidateIndexes(stream_id, step_id)); - - std::string key_name( - input.name_ + "_" + std::to_string(stream_id) + "_" + - std::to_string(step_id)); - - // Get the data and the corresponding byte-size - auto it = input_data_.find(key_name); - if (it != input_data_.end()) { - std::vector* data_vec = &it->second; - data.is_valid = true; - data.batch1_size = data_vec->size(); - data.data_ptr = (const uint8_t*)data_vec->data(); - } - } - - if (!data.is_valid) { - if ((input.datatype_.compare("BYTES") != 0) && (input_buf_.size() != 0)) { - int64_t byte_size = ByteSize(input.shape_, input.datatype_); - if (byte_size < 0) { - return cb::Error( - "failed to get correct byte size for '" + input.name_ + "'.", - pa::GENERIC_ERROR); - } - data.batch1_size = (size_t)byte_size; - data.data_ptr = &input_buf_[0]; - data.is_valid = true; - } - } - - if (input.is_optional_ == false && !data.is_valid) { - return cb::Error( - "unable to find data for input '" + input.name_ + "'.", - pa::GENERIC_ERROR); - } - - return cb::Error::Success; -} - -cb::Error -DataLoader::GetOutputData( - const std::string& output_name, const int stream_id, const int step_id, - TensorData& data) -{ - data.data_ptr = nullptr; - data.batch1_size = 0; - data.is_valid = false; - data.name = ""; - - // If json data is available then try to retrieve the data from there - if (!output_data_.empty()) { - RETURN_IF_ERROR(ValidateIndexes(stream_id, step_id)); - - std::string key_name( - output_name + "_" + std::to_string(stream_id) + "_" + - std::to_string(step_id)); - // Get the data and the corresponding byte-size - auto it = output_data_.find(key_name); - if (it != output_data_.end()) { - std::vector* data_vec = &it->second; - data.is_valid = true; - data.batch1_size = data_vec->size(); - data.data_ptr = (const uint8_t*)data_vec->data(); - data.name = output_name; - } - } - return cb::Error::Success; -} - -cb::Error -DataLoader::ValidateIndexes(int stream_id, int step_id) -{ - if (stream_id < 0 || stream_id >= (int)data_stream_cnt_) { - return cb::Error( - "stream_id for retrieving the data should be less than " + - std::to_string(data_stream_cnt_) + ", got " + - std::to_string(stream_id), - pa::GENERIC_ERROR); - } - if (step_id < 0 || step_id >= (int)step_num_[stream_id]) { - return cb::Error( - "step_id for retrieving the data should be less than " + - std::to_string(step_num_[stream_id]) + ", got " + - std::to_string(step_id), - pa::GENERIC_ERROR); - } - return cb::Error::Success; -} - - -cb::Error -DataLoader::GetInputShape( - const ModelTensor& input, const int stream_id, const int step_id, - std::vector* provided_shape) -{ - std::string key_name( - input.name_ + "_" + std::to_string(stream_id) + "_" + - std::to_string(step_id)); - - provided_shape->clear(); - - // Prefer the values read from file over the ones provided from - // CLI - auto it = input_shapes_.find(key_name); - if (it != input_shapes_.end()) { - *provided_shape = it->second; - } else { - *provided_shape = input.shape_; - } - return cb::Error::Success; -} - -cb::Error -DataLoader::ReadTensorData( - const rapidjson::Value& step, - const std::shared_ptr& tensors, const int stream_index, - const int step_index, const bool is_input) -{ - std::unordered_set model_io_names; - auto& tensor_data = is_input ? input_data_ : output_data_; - auto& tensor_shape = is_input ? input_shapes_ : output_shapes_; - for (const auto& io : *tensors) { - model_io_names.insert(io.first); - if (step.HasMember(io.first.c_str())) { - std::string key_name( - io.first + "_" + std::to_string(stream_index) + "_" + - std::to_string(step_index)); - - auto it = tensor_data.emplace(key_name, std::vector()).first; - - const rapidjson::Value& tensor = step[(io.first).c_str()]; - - const rapidjson::Value* content; - - // Check if the input data file is malformed - if (!(tensor.IsArray() || tensor.IsObject())) { - return cb::Error("Input data file is malformed.", pa::GENERIC_ERROR); - } - - if (tensor.IsArray()) { - content = &tensor; - } else { - // Populate the shape values first if available - if (tensor.HasMember("shape")) { - auto shape_it = - tensor_shape.emplace(key_name, std::vector()).first; - for (const auto& value : tensor["shape"].GetArray()) { - if (!value.IsInt()) { - return cb::Error( - "shape values must be integers.", pa::GENERIC_ERROR); - } - shape_it->second.push_back(value.GetInt()); - } - } - - if (tensor.HasMember("b64")) { - content = &tensor; - } else { - if (!tensor.HasMember("content")) { - return cb::Error( - "missing content field. ( Location stream id: " + - std::to_string(stream_index) + - ", step id: " + std::to_string(step_index) + ")", - pa::GENERIC_ERROR); - } - - content = &tensor["content"]; - } - } - - if (content->IsArray()) { - RETURN_IF_ERROR(SerializeExplicitTensor( - *content, io.second.datatype_, &it->second)); - } else { - if (content->IsObject() && content->HasMember("b64")) { - if ((*content)["b64"].IsString()) { - const std::string& encoded = (*content)["b64"].GetString(); - it->second.resize(encoded.length()); - base64::decoder D; - int size = - D.decode(encoded.c_str(), encoded.length(), &it->second[0]); - it->second.resize(size); - } else { - return cb::Error( - "the value of b64 field should be of type string ( " - "Location stream id: " + - std::to_string(stream_index) + - ", step id: " + std::to_string(step_index) + ")", - pa::GENERIC_ERROR); - } - } else { - return cb::Error( - "The tensor values are not supported. Expected an array or " - "b64 string ( Location stream id: " + - std::to_string(stream_index) + - ", step id: " + std::to_string(step_index) + ")", - pa::GENERIC_ERROR); - } - } - - RETURN_IF_ERROR(ValidateTensor(io.second, stream_index, step_index)); - - } else if (io.second.is_optional_ == false) { - return cb::Error( - "missing tensor " + io.first + - " ( Location stream id: " + std::to_string(stream_index) + - ", step id: " + std::to_string(step_index) + ")", - pa::GENERIC_ERROR); - } - } - - // Add allowed non-model inputs/outputs to the model_io_names set - model_io_names.insert("model"); - - for (auto itr = step.MemberBegin(); itr != step.MemberEnd(); ++itr) { - if (model_io_names.find(itr->name.GetString()) == model_io_names.end()) { - return cb::Error( - "The input or output '" + std::string(itr->name.GetString()) + - "' is not found in the model configuration", - pa::GENERIC_ERROR); - } - } - - - return cb::Error::Success; -} - - -cb::Error -DataLoader::ReadFile(const std::string& path, std::vector* contents) -{ - std::ifstream in(path, std::ios::in | std::ios::binary); - if (!in) { - return cb::Error("failed to open file '" + path + "'", pa::GENERIC_ERROR); - } - - in.seekg(0, std::ios::end); - - int file_size = in.tellg(); - if (file_size > 0) { - contents->resize(file_size); - in.seekg(0, std::ios::beg); - in.read(&(*contents)[0], contents->size()); - } - - in.close(); - - // If size is invalid, report after ifstream is closed - if (file_size < 0) { - return cb::Error( - "failed to get size for file '" + path + "'", pa::GENERIC_ERROR); - } else if (file_size == 0) { - return cb::Error("file '" + path + "' is empty", pa::GENERIC_ERROR); - } - - return cb::Error::Success; -} - -cb::Error -DataLoader::ReadTextFile( - const std::string& path, std::vector* contents) -{ - std::ifstream in(path); - if (!in) { - return cb::Error("failed to open file '" + path + "'", pa::GENERIC_ERROR); - } - - std::string current_string; - while (std::getline(in, current_string)) { - contents->push_back(current_string); - } - in.close(); - - if (contents->size() == 0) { - return cb::Error("file '" + path + "' is empty", pa::GENERIC_ERROR); - } - return cb::Error::Success; -} - -cb::Error -DataLoader::ValidateTensor( - const ModelTensor& model_tensor, const int stream_index, - const int step_index) -{ - std::string key_name( - model_tensor.name_ + "_" + std::to_string(stream_index) + "_" + - std::to_string(step_index)); - - auto data_it = input_data_.find(key_name); - if (data_it == input_data_.end()) { - data_it = output_data_.find(key_name); - } - if (data_it == output_data_.end()) { - return cb::Error("Can't validate a nonexistent tensor"); - } - - auto shape_it = input_shapes_.find(key_name); - - const std::vector& data = data_it->second; - const std::vector& shape = (shape_it == input_shapes_.end()) - ? model_tensor.shape_ - : shape_it->second; - - int64_t batch1_byte = ByteSize(shape, model_tensor.datatype_); - - RETURN_IF_ERROR(ValidateTensorShape(shape, model_tensor)); - RETURN_IF_ERROR(ValidateTensorDataSize(data, batch1_byte, model_tensor)); - - return cb::Error::Success; -} - -cb::Error -DataLoader::ValidateTensorShape( - const std::vector& shape, const ModelTensor& model_tensor) -{ - int element_count = ElementCount(shape); - if (element_count < 0) { - return cb::Error( - "The variable-sized tensor \"" + model_tensor.name_ + - "\" with model shape " + ShapeVecToString(model_tensor.shape_) + - " needs to have its shape fully defined. See the --shape option.", - pa::GENERIC_ERROR); - } - - bool is_error = false; - - if (shape.size() != model_tensor.shape_.size()) { - is_error = true; - } - - for (size_t i = 0; i < shape.size() && !is_error; i++) { - if (shape[i] != model_tensor.shape_[i] && model_tensor.shape_[i] != -1) { - is_error = true; - } - } - - if (is_error) { - return cb::Error( - "The supplied shape of " + ShapeVecToString(shape) + " for input \"" + - model_tensor.name_ + - "\" is incompatible with the model's input shape of " + - ShapeVecToString(model_tensor.shape_)); - } - - return cb::Error::Success; -} - -cb::Error -DataLoader::ValidateTensorDataSize( - const std::vector& data, int64_t batch1_byte, - const ModelTensor& model_tensor) -{ - // Validate that the supplied data matches the amount of data expected based - // on the shape - if (batch1_byte > 0 && (size_t)batch1_byte != data.size()) { - return cb::Error( - "mismatch in the data provided for " + model_tensor.name_ + - ". Expected: " + std::to_string(batch1_byte) + - " bytes, Got: " + std::to_string(data.size()) + " bytes", - pa::GENERIC_ERROR); - } - - return cb::Error::Success; -} - -cb::Error -DataLoader::ValidateParsingMode(const rapidjson::Value& steps) -{ - // If our first time parsing data, capture the mode - if (step_num_.size() == 0) { - multiple_stream_mode_ = steps.IsArray(); - } else { - if (steps.IsArray() != multiple_stream_mode_) { - return cb::Error( - "Inconsistency in input-data provided. Can not have a combination of " - "objects and arrays inside of the Data array", - pa::GENERIC_ERROR); - } - } - return cb::Error::Success; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/data_loader.h b/src/c++/perf_analyzer/data_loader.h deleted file mode 100644 index 2f83f959f..000000000 --- a/src/c++/perf_analyzer/data_loader.h +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include - -#include "model_parser.h" -#include "perf_utils.h" -#include "tensor_data.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockDataLoader; -#endif - - -class DataLoader { - public: - DataLoader(size_t batch_size); - - /// Returns the total number of data streams available. - size_t GetDataStreamsCount() { return data_stream_cnt_; } - - /// Returns the total data steps supported for a requested data stream - /// id. - /// \param stream_id The target stream id - virtual size_t GetTotalSteps(size_t stream_id) - { - if (stream_id < data_stream_cnt_) { - return step_num_[stream_id]; - } - return 0; - } - - /// Validate user-supplied inputs and outputs exist in the model - /// \param inputs The pointer to the map holding the information about - /// input tensors of a model - /// \param outputs The pointer to the map holding the information about - /// output tensors of a model - /// \param data_directory The path to the directory containing the data - cb::Error ValidateIOExistsInModel( - const std::shared_ptr& inputs, - const std::shared_ptr& outputs, - const std::string& data_directory); - - /// Reads the input data from the specified data directory. - /// \param inputs The pointer to the map holding the information about - /// input tensors of a model - /// \param outputs The pointer to the map holding the information about - /// output tensors of a model - /// \param data_directory The path to the directory containing the data - cb::Error ReadDataFromDir( - const std::shared_ptr& inputs, - const std::shared_ptr& outputs, - const std::string& data_directory); - - /// Reads the input data from the specified json file. - /// \param inputs The pointer to the map holding the information about - /// input tensors of a model - /// \param json_file The json file containing the user-provided input - /// data. - /// Returns error object indicating status - virtual cb::Error ReadDataFromJSON( - const std::shared_ptr& inputs, - const std::shared_ptr& outputs, - const std::string& json_file); - - /// Generates the input data to use with the inference requests - /// \param inputs The pointer to the map holding the information about - /// input tensors of a model - /// \param zero_input Whether or not to use zero value for buffer - /// initialization. - /// \param string_length The length of the string to generate for - /// tensor inputs. - /// \param string_data The user provided string to use to populate - /// string tensors - /// Returns error object indicating status - cb::Error GenerateData( - std::shared_ptr inputs, const bool zero_input, - const size_t string_length, const std::string& string_data); - - /// Helper function to access data for the specified input - /// \param input The target model input tensor - /// \param stream_id The data stream_id to use for retrieving input data. - /// \param step_id The data step_id to use for retrieving input data. - /// \param data Returns the input TensorData - /// Returns error object indicating status - cb::Error GetInputData( - const ModelTensor& input, const int stream_id, const int step_id, - TensorData& data); - - /// Helper function to get the shape values to the input - /// \param input The target model input tensor - /// \param stream_id The data stream_id to use for retrieving input shape. - /// \param step_id The data step_id to use for retrieving input shape. - /// \param shape returns the pointer to the vector containing the shape - /// values. - /// Returns error object indicating status - cb::Error GetInputShape( - const ModelTensor& input, const int stream_id, const int step_id, - std::vector* shape); - - /// Helper function to access data for the specified output. nullptr will be - /// returned if there is no data specified. - /// \param output_name The name of the output tensor - /// \param stream_id The data stream_id to use for retrieving output data. - /// \param step_id The data step_id to use for retrieving output data. - /// \param data Returns the output TensorData - /// Returns error object indicating status - cb::Error GetOutputData( - const std::string& output_name, const int stream_id, const int step_id, - TensorData& data); - - /// Return an error if the stream index or step index are invalid - cb::Error ValidateIndexes(int stream_index, int step_index); - - protected: - /// Parses the input and output data from the json document - /// \param inputs The input tensors of a model - /// \param outputs The output tensors of a model - /// \param json The json document containing the raw json inputs/outputs - /// \return Returns error object indicating status - cb::Error ParseData( - const rapidjson::Document& json, - const std::shared_ptr& inputs, - const std::shared_ptr& outputs); - - private: - /// Reads the data from file specified by path into vector of characters - /// \param path The complete path to the file to be read - /// \param contents The character vector that will contain the data read - /// \return error status. Returns Non-Ok if an error is encountered during - /// read operation. - virtual cb::Error ReadFile( - const std::string& path, std::vector* contents); - - /// Reads the string from file specified by path into vector of strings - /// \param path The complete path to the file to be read - /// \param contents The string vector that will contain the data read - /// \return error status. Returns Non-Ok if an error is encountered during - /// read operation. - virtual cb::Error ReadTextFile( - const std::string& path, std::vector* contents); - - /// Helper function to read data for the specified input from json - /// \param step the DOM for current step - /// \param inputs The pointer to the map holding the information about - /// input tensors of a model - /// \param stream_index the stream index the data should be exported to. - /// \param step_index the step index the data should be exported to. - /// Returns error object indicating status - cb::Error ReadTensorData( - const rapidjson::Value& step, - const std::shared_ptr& tensors, const int stream_index, - const int step_index, const bool is_input); - - /// Helper function to validate the provided data and shape for the tensor - /// \param input The target model input or output tensor - /// \param stream_index the stream index the data should be exported to. - /// \param step_index the step index the data should be exported to. - /// Returns error object indicating status - cb::Error ValidateTensor( - const ModelTensor& model_tensor, const int stream_index, - const int step_index); - - /// Helper function to validate the provided shape for a tensor - /// \param shape Shape for the tensor - /// \param model_tensor The tensor to validate - /// Returns error object indicating status - cb::Error ValidateTensorShape( - const std::vector& shape, const ModelTensor& model_tensor); - - /// Helper function to validate the provided data's size - /// \param data The provided data for the tensor - /// \param batch1_byte The expected number of bytes of data - /// \param model_tensor The tensor to validate - /// Returns error object indicating status - cb::Error ValidateTensorDataSize( - const std::vector& data, int64_t batch1_byte, - const ModelTensor& model_tensor); - - /// Helper function to validate consistency of parsing mode for provided input - /// data. The code explicitly does not support a mixture of objects (multiple - /// entries of a single stream) and arrays (multiple streams) - /// - /// \param steps The json data provided for one or multiple streams - cb::Error ValidateParsingMode(const rapidjson::Value& steps); - - // The batch_size_ for the data - size_t batch_size_{1}; - // The total number of data streams available. - size_t data_stream_cnt_{0}; - // A vector containing the supported step number for respective stream - // ids. - std::vector step_num_; - - // User provided input data, it will be preferred over synthetic data - std::unordered_map> input_data_; - std::unordered_map> input_shapes_; - - // User provided output data for validation - std::unordered_map> output_data_; - std::unordered_map> output_shapes_; - - // Placeholder for generated input data, which will be used for all inputs - // except string - std::vector input_buf_; - - // Tracks what type of input data has been provided - bool multiple_stream_mode_ = false; - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockDataLoader; - - public: - DataLoader() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/docs/README.md b/src/c++/perf_analyzer/docs/README.md deleted file mode 100644 index 34f33475a..000000000 --- a/src/c++/perf_analyzer/docs/README.md +++ /dev/null @@ -1,55 +0,0 @@ - - -# Perf Analyzer Documentation - -| [Installation](README.md#installation) | [Getting Started](README.md#getting-started) | [User Guide](README.md#user-guide) | -| -------------------------------------- | -------------------------------------------- | ---------------------------------- | - -## **Installation** - -See the [Installation Guide](install.md) for details on how to install Perf -Analyzer. - -## **Getting Started** - -The [Quick Start Guide](quick_start.md) will show you how to use Perf -Analyzer to profile a simple PyTorch model. - -## **User Guide** - -The User Guide describes the Perf Analyzer command line options, how to specify -model input data, the performance measurement modes, the performance metrics and -outputs, how to benchmark different servers, and more. - -- [Perf Analyzer CLI](cli.md) -- [Inference Load Modes](inference_load_modes.md) -- [Input Data](input_data.md) -- [Measurements & Metrics](measurements_metrics.md) -- [Benchmarking](benchmarking.md) -- [Large Language Models (LLMs)](../genai-perf/README.md) diff --git a/src/c++/perf_analyzer/docs/benchmarking.md b/src/c++/perf_analyzer/docs/benchmarking.md deleted file mode 100644 index 96f1ad3a8..000000000 --- a/src/c++/perf_analyzer/docs/benchmarking.md +++ /dev/null @@ -1,250 +0,0 @@ - - -# Benchmarking Triton via HTTP or gRPC endpoint - -This is the default mode for Perf Analyzer. - -# Benchmarking Triton directly via C API - -Besides using HTTP or gRPC server endpoints to communicate with Triton, Perf -Analyzer also allows users to benchmark Triton directly using the C API. HTTP -and gRPC endpoints introduce an additional latency in the pipeline which may not -be of interest to users who are using Triton via C API within their application. -Specifically, this feature is useful to benchmark a bare minimum Triton without -additional overheads from HTTP/gRPC communication. - -## Prerequisite - -Pull the Triton SDK and the Triton Server container images on target machine. -Since you will need access to the `tritonserver` install, it might be easier if -you copy the `perf_analyzer` binary to the Inference Server container. - -## Required parameters - -Use the [`--help`](cli.md#--help) option to see a complete list of supported -command line arguments. By default, Perf Analyzer expects the Triton instance to -already be running. You can configure C API mode using the -[`--service-kind`](cli.md#--service-kindtritontriton_c_apitfservingtorchserve) -option. In addition, you will need to point Perf Analyzer to the Triton server -library path using the -[`--triton-server-directory`](cli.md#--triton-server-directorypath) option and -the model repository path using the -[`--model-repository`](cli.md#--model-repositorypath) option. - -An example run would look like: - -``` -$ perf_analyzer -m my_model --service-kind=triton_c_api --triton-server-directory=/opt/tritonserver --model-repository=/my/model/repository -... -*** Measurement Settings *** - Service Kind: Triton C-API - Using "time_windows" mode for stabilization - Measurement window: 5000 msec - Using synchronous calls for inference - Stabilizing using average latency - -Request concurrency: 1 - Client: - Request count: 353 - Throughput: 19.6095 infer/sec - Avg latency: 50951 usec (standard deviation 2265 usec) - p50 latency: 50833 usec - p90 latency: 50923 usec - p95 latency: 50940 usec - p99 latency: 50985 usec - - Server: - Inference count: 353 - Execution count: 353 - Successful request count: 353 - Avg request latency: 50841 usec (overhead 20 usec + queue 63 usec + compute input 35 usec + compute infer 50663 usec + compute output 59 usec) - -Inferences/Second vs. Client Average Batch Latency -Concurrency: 1, throughput: 19.6095 infer/sec, latency 50951 usec -``` - -## Non-supported functionalities - -There are a few functionalities that are missing from C API mode. They are: - -1. Async mode ([`--async`](cli.md#--async)) -2. For additional known non-working cases, please refer to - [qa/L0_perf_analyzer_capi/test.sh](https://github.com/triton-inference-server/server/blob/main/qa/L0_perf_analyzer_capi/test.sh#L239-L277) - -# Benchmarking TensorFlow Serving - -Perf Analyzer can also be used to benchmark models deployed on -[TensorFlow Serving](https://github.com/tensorflow/serving) using the -[`--service-kind=tfserving`](cli.md#--service-kindtritontriton_c_apitfservingtorchserve) -option. Only gRPC protocol is supported. - -The following invocation demonstrates how to configure Perf Analyzer to issue -requests to a running instance of `tensorflow_model_server`: - -``` -$ perf_analyzer -m resnet50 --service-kind tfserving -i grpc -b 1 -p 5000 -u localhost:8500 -*** Measurement Settings *** - Batch size: 1 - Using "time_windows" mode for stabilization - Measurement window: 5000 msec - Using synchronous calls for inference - Stabilizing using average latency -Request concurrency: 1 - Client: - Request count: 829 - Throughput: 165.8 infer/sec - Avg latency: 6032 usec (standard deviation 569 usec) - p50 latency: 5863 usec - p90 latency: 6655 usec - p95 latency: 6974 usec - p99 latency: 8093 usec - Avg gRPC time: 5984 usec ((un)marshal request/response 257 usec + response wait 5727 usec) -Inferences/Second vs. Client Average Batch Latency -Concurrency: 1, throughput: 165.8 infer/sec, latency 6032 usec -``` - -You might have to specify a different url ([`-u`](cli.md#-u-url)) to access -wherever the server is running. The report of Perf Analyzer will only include -statistics measured at the client-side. - -**NOTE:** The support is still in **beta**. Perf Analyzer does not guarantee -optimal tuning for TensorFlow Serving. However, a single benchmarking tool that -can be used to stress the inference servers in an identical manner is important -for performance analysis. - -The following points are important for interpreting the results: - -1. `Concurrent Request Execution`: - TensorFlow Serving (TFS), as of version 2.8.0, by default creates threads for - each request that individually submits requests to TensorFlow Session. There - is a resource limit on the number of concurrent threads serving requests. - When benchmarking at a higher request concurrency, you can see higher - throughput because of this. Unlike TFS, by default Triton is configured with - only a single - [instance count](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#instance-groups). - Hence, at a higher request concurrency, most of the requests are blocked on - the instance availability. To configure Triton to behave like TFS, set the - instance count to a reasonably high value and then set - [MAX_SESSION_SHARE_COUNT](https://github.com/triton-inference-server/tensorflow_backend#parameters) - parameter in the model `config.pbtxt` to the same value. For some context, - the TFS sets its thread constraint to four times the num of schedulable CPUs. -2. `Different library versions`: - The version of TensorFlow might differ between Triton and TensorFlow Serving - being benchmarked. Even the versions of CUDA libraries might differ between - the two solutions. The performance of models can be susceptible to the - versions of these libraries. For a single request concurrency, if the - `compute_infer` time reported by Perf Analyzer when benchmarking Triton is as - large as the latency reported by Perf Analyzer when benchmarking TFS, then - the performance difference is likely because of the difference in the - software stack and outside the scope of Triton. -3. `CPU Optimization`: - TFS has separate builds for CPU and GPU targets. They have target-specific - optimization. Unlike TFS, Triton has a single build which is optimized for - execution on GPUs. When collecting performance on CPU models on Triton, try - running Triton with the environment variable `TF_ENABLE_ONEDNN_OPTS=1`. - -# Benchmarking TorchServe - -Perf Analyzer can also be used to benchmark -[TorchServe](https://github.com/pytorch/serve) using the -[`--service-kind=torchserve`](cli.md#--service-kindtritontriton_c_apitfservingtorchserve) -option. Only HTTP protocol is supported. It also requires input to be provided -via JSON file. - -The following invocation demonstrates how to configure Perf Analyzer to issue -requests to a running instance of `torchserve` assuming the location holds -`kitten_small.jpg`: - -``` -$ perf_analyzer -m resnet50 --service-kind torchserve -i http -u localhost:8080 -b 1 -p 5000 --input-data data.json - Successfully read data for 1 stream/streams with 1 step/steps. -*** Measurement Settings *** - Batch size: 1 - Using "time_windows" mode for stabilization - Measurement window: 5000 msec - Using synchronous calls for inference - Stabilizing using average latency -Request concurrency: 1 - Client: - Request count: 799 - Throughput: 159.8 infer/sec - Avg latency: 6259 usec (standard deviation 397 usec) - p50 latency: 6305 usec - p90 latency: 6448 usec - p95 latency: 6494 usec - p99 latency: 7158 usec - Avg HTTP time: 6272 usec (send/recv 77 usec + response wait 6195 usec) -Inferences/Second vs. Client Average Batch Latency -Concurrency: 1, throughput: 159.8 infer/sec, latency 6259 usec -``` - -The content of `data.json`: - -```json - { - "data" : - [ - { - "TORCHSERVE_INPUT" : ["kitten_small.jpg"] - } - ] - } -``` - -You might have to specify a different url ([`-u`](cli.md#-u-url)) to access -wherever the server is running. The report of Perf Analyzer will only include -statistics measured at the client-side. - -**NOTE:** The support is still in **beta**. Perf Analyzer does not guarantee -optimal tuning for TorchServe. However, a single benchmarking tool that can be -used to stress the inference servers in an identical manner is important for -performance analysis. - -# Advantages of using Perf Analyzer over third-party benchmark suites - -Triton Inference Server offers the entire serving solution which includes -[client libraries](https://github.com/triton-inference-server/client) that are -optimized for Triton. Using third-party benchmark suites like `jmeter` fails to -take advantage of the optimized libraries. Some of these optimizations includes -but are not limited to: - -1. Using - [binary tensor data extension](https://github.com/triton-inference-server/server/blob/main/docs/protocol/extension_binary_data.md#binary-tensor-data-extension) - with HTTP requests. -2. Effective re-use of gRPC message allocation in subsequent requests. -3. Avoiding extra memory copy via libcurl interface. - -These optimizations can have a tremendous impact on overall performance. Using -Perf Analyzer for benchmarking directly allows a user to access these -optimizations in their study. - -Not only that, Perf Analyzer is also very customizable and supports many Triton -features as described in this document. This, along with a detailed report, -allows a user to identify performance bottlenecks and experiment with different -features before deciding upon what works best for them. diff --git a/src/c++/perf_analyzer/docs/cli.md b/src/c++/perf_analyzer/docs/cli.md deleted file mode 100644 index bd82415c8..000000000 --- a/src/c++/perf_analyzer/docs/cli.md +++ /dev/null @@ -1,663 +0,0 @@ - - -# Perf Analyzer CLI - -This document details the Perf Analyzer command line interface: - -- [General Options](#general-options) -- [Measurement Options](#measurement-options) -- [Sequence Model Options](#sequence-model-options) -- [Input Data Options](#input-data-options) -- [Request Options](#request-options) -- [Server Options](#server-options) -- [Prometheus Metrics Options](#prometheus-metrics-options) -- [Report Options](#report-options) -- [Trace Options](#trace-options) -- [Deprecated Options](#deprecated-options) - -## General Options - -#### `-?` -#### `-h` -#### `--help` - -Prints a description of the Perf Analyzer command line interface. - -#### `-m ` - -Specifies the model name for Perf Analyzer to run. - -This is a required option. - -#### `-x ` - -Specifies the version of the model to be used. If not specified the most -recent version (the highest numbered version) of the model will be used. - -#### `--service-kind=[triton|triton_c_api|tfserving|torchserve]` - -Specifies the kind of service for Perf Analyzer to generate load for. Note: in -order to use `torchserve` backend, the `--input-data` option must point to a -JSON file holding data in the following format: - -``` -{ - "data": [ - { - "TORCHSERVE_INPUT": [ - "" - ] - }, - {...}, - ... - ] -} -``` - -The type of file here will depend on the model. In order to use `triton_c_api` -you must specify the Triton server install path and the model repository path -via the `--triton-server-directory` and `--model-repository` options. - -Default is `triton`. - -#### `--bls-composing-models=` - -Specifies the list of all BLS composing models as a comma separated list of -model names (with optional model version number after a colon for each) that may -be called by the input BLS model. For example, -`--bls-composing-models=modelA:3,modelB` would specify that modelA and modelB -are composing models that may be called by the input BLS model, and that modelA -will use version 3, while modelB's version is unspecified. - -#### `--model-signature-name=` - -Specifies the signature name of the saved model to use. - -Default is `serving_default`. This option will be ignored if `--service-kind` -is not `tfserving`. - -#### `-v` - -Enables verbose mode. May be specified an additional time (`-v -v`) to enable -extra verbose mode. - -## Measurement Options - -#### `--measurement-mode=[time_windows|count_windows]` - -Specifies the mode used for stabilizing measurements. 'time_windows' will -create windows such that the duration of each window is equal to -`--measurement-interval`. 'count_windows' will create windows such that there -are at least `--measurement-request-count` requests in each window and that -the window is at least one second in duration (adding more requests if -necessary). - -Default is `time_windows`. - -#### `-p ` -#### `--measurement-interval=` - -Specifies the time interval used for each measurement in milliseconds when -`--measurement-mode=time_windows` is used. Perf Analyzer will sample a time -interval specified by this option and take measurement over the requests -completed within that time interval. - -Default is `5000`. - -#### `--measurement-request-count=` - -Specifies the minimum number of requests to be collected in each measurement -window when `--measurement-mode=count_windows` is used. - -Default is `50`. - -#### `-s ` -#### `--stability-percentage=` - -Specifies the allowed variation in latency measurements when determining if a -result is stable. The measurement is considered stable if the ratio of max / -min from the recent 3 measurements is within (stability percentage)% in terms -of both inferences per second and latency. - -Default is `10`(%). - -#### `--percentile=` - -Specifies the confidence value as a percentile that will be used to determine -if a measurement is stable. For example, a value of `85` indicates that the -85th percentile latency will be used to determine stability. The percentile -will also be reported in the results. - -Default is `-1` indicating that the average latency is used to determine -stability. - -#### `--request-count=` - -Specifies a total number of requests to use for measurement. - -Default is `0`, which means that there is no request count and the measurement -will proceed using windows until stabilization is detected. - -#### `-r ` -#### `--max-trials=` - -Specifies the maximum number of measurements when attempting to reach stability -of inferences per second and latency for each concurrency or request rate -during the search. Perf Analyzer will terminate if the measurement is still -unstable after the maximum number of trials. - -Default is `10`. - -#### `--concurrency-range=` - -Specifies the range of concurrency levels covered by Perf Analyzer. Perf -Analyzer will start from the concurrency level of 'start' and go until 'end' -with a stride of 'step'. - -Default of 'start', 'end', and 'step' are `1`. If 'end' is not specified then -Perf Analyzer will run for a single concurrency level determined by 'start'. If -'end' is set as `0`, then the concurrency limit will be incremented by 'step' -until the latency threshold is met. 'end' and `--latency-threshold` cannot -both be `0`. 'end' cannot be `0` for sequence models while using asynchronous -mode. - -#### `--periodic-concurrency-range=` - -Specifies the range of concurrency levels in the similar but slightly different -manner as the `--concurrency-range`. Perf Analyzer will start from the -concurrency level of 'start' and increase by 'step' each time. Unlike -`--concurrency-range`, the 'end' indicates the *total* number of concurrency -since the 'start' (including) and will stop increasing once the cumulative -number of concurrent requests has reached the 'end'. The user can specify -*when* to periodically increase the concurrency level using the -`--request-period` option. The concurrency level will periodically increase for -every `n`-th response specified by `--request-period`. Since this disables -stability check in Perf Analyzer and reports response timestamps only, the user -must provide `--profile-export-file` to specify where to dump all the measured -timestamps. - -The default values of 'start', 'end', and 'step' are `1`. - -#### `--request-period=` - -Specifies the number of responses that each request must receive before new, -concurrent requests are sent when `--periodic-concurrency-range` is specified. - -Default value is `10`. - -#### `--request-parameter=` - -Specifies a custom parameter that can be sent to a Triton backend as part of -the request. For example, providing '--request-parameter max_tokens:256:int' -to the command line will set an additional parameter 'max_tokens' of type -'int' to 256 as part of the request. The --request-parameter may be specified -multiple times for different custom parameters. - -Valid `type` values are: `bool`, `int`, and `string`. - -> **NOTE** -> -> The `--request-parameter` is currently only supported by gRPC protocol. - -#### `--request-rate-range=` - -Specifies the range of request rates for load generated by Perf Analyzer. This -option can take floating-point values. The search along the request rate range -is enabled only when using this option. - -If not specified, then Perf Analyzer will search along the concurrency range. -Perf Analyzer will start from the request rate of 'start' and go until 'end' -with a stride of 'step'. Default values of 'start', 'end' and 'step' are all -`1.0`. If 'end' is not specified, then Perf Analyzer will run for a single -request rate as determined by 'start'. If 'end' is set as `0.0`, then the -request rate will be incremented by 'step' until the latency threshold is met. -'end' and `--latency-threshold` can not be both `0`. - -#### `--request-distribution=[constant|poisson]` - -Specifies the time interval distribution between dispatching inference requests -to the server. Poisson distribution closely mimics the real-world work load on -a server. This option is ignored if not using `--request-rate-range`. - -Default is `constant`. - -#### `-l ` -#### `--latency-threshold=` - -Specifies the limit on the observed latency, in milliseconds. Perf Analyzer -will terminate the concurrency or request rate search once the measured latency -exceeds this threshold. - -Default is `0` indicating that Perf Analyzer will run for the entire -concurrency or request rate range. - -#### `--binary-search` - -Enables binary search on the specified search range (concurrency or request -rate). This option requires 'start' and 'end' to be expilicitly specified in -the concurrency range or request rate range. When using this option, 'step' is -more like the precision. When the 'step' is lower, there are more iterations -along the search path to find suitable convergence. - -When `--binary-search` is not specified, linear search is used. - -#### `--request-intervals=` - -Specifies a path to a file containing time intervals in microseconds. Each time -interval should be in a new line. Perf Analyzer will try to maintain time -intervals between successive generated requests to be as close as possible in -this file. This option can be used to apply custom load to server with a -certain pattern of interest. Perf Analyzer will loop around the file if the -duration of execution exceeds the amount of time specified by the intervals. -This option can not be used with `--request-rate-range` or -`--concurrency-range`. - -#### `--max-threads=` - -Specifies the maximum number of threads that will be created for providing -desired concurrency or request rate. However, when running in synchronous mode -with `--concurrency-range` having explicit 'end' specification, this value will -be ignored. - -Default is `4` if `--request-rate-range` is specified, otherwise default is -`16`. - -## Sequence Model Options - -#### `--num-of-sequences=` - -Specifies the number of concurrent sequences for sequence models. This option -is ignored when `--request-rate-range` is not specified. - -Default is `4`. - -#### `--sequence-length=` - -Specifies the base length of a sequence used for sequence models. A sequence -with length X will be composed of X requests to be sent as the elements in the -sequence. The actual length of the sequencewill be within +/- Y% of the base -length, where Y defaults to 20% and is customizable via -`--sequence-length-variation`. If sequence length is unspecified and input data -is provided, the sequence length will be the number of inputs in the -user-provided input data. - -Default is `20`. - -#### `--sequence-length-variation=` - -Specifies the percentage variation in length of sequences. This option is only -valid when not using user-provided input data or when `--sequence-length` is -specified while using user-provided input data. - -Default is `20`(%). - -#### `--sequence-id-range=` - -Specifies the range of sequence IDs used by Perf Analyzer. Perf Analyzer will -start from the sequence ID of 'start' and go until 'end' (excluded). If 'end' -is not specified then Perf Analyzer will generate new sequence IDs without -bounds. If 'end' is specified and the concurrency setting may result in -maintaining a number of sequences more than the range of available sequence -IDs, Perf Analyzer will exit with an error due to possible sequence ID -collisions. - -The default for 'start is `1`, and 'end' is not specified (no bounds). - -#### `--serial-sequences` - -Enables the serial sequence mode where a maximum of one request is live per sequence. -Note: It is possible that this mode can cause the request rate mode to not achieve the -desired rate, especially if num-of-sequences is too small. - -## Input Data Options - -#### `--input-data=[zero|random|]` - -Specifies type of data that will be used for input in inference requests. The -available options are `zero`, `random`, and a path to a directory or a JSON -file. - -When pointing to a JSON file, the user must adhere to the format described in -the [input data documentation](input_data.md). By specifying JSON data, users -can control data used with every request. Multiple data streams can be specified -for a sequence model, and Perf Analyzer will select a data stream in a -round-robin fashion for every new sequence. Multiple JSON files can also be -provided (`--input-data json_file1.json --input-data json_file2.json` and so on) -and Perf Analyzer will append data streams from each file. When using -`--service-kind=torchserve`, make sure this option points to a JSON file. - -If the option is path to a directory then the directory must contain a binary -text file for each non-string/string input respectively, named the same as the -input. Each file must contain the data required for that input for a batch-1 -request. Each binary file should contain the raw binary representation of the -input in row-major order for non-string inputs. The text file should contain -all strings needed by batch-1, each in a new line, listed in row-major order. - -Default is `random`. - -#### `-b ` - -Specifies the batch size for each request sent. - -Default is `1`. - -#### `--shape=` - -Specifies the shape used for the specified input. The argument must be -specified as 'name:shape' where the shape is a comma-separated list for -dimension sizes. For example `--shape=input_name:1,2,3` indicates that the -input `input_name` has tensor shape [ 1, 2, 3 ]. `--shape` may be specified -multiple times to specify shapes for different inputs. - -#### `--string-data=` - -Specifies the string to initialize string input buffers. Perf Analyzer will -replicate the given string to build tensors of required shape. -`--string-length` will not have any effect. This option is ignored if -`--input-data` points to a JSON file or directory. - -#### `--string-length=` - -Specifies the length of the random strings to be generated by Perf Analyzer -for string input. This option is ignored if `--input-data` points to a -JSON file or directory. - -Default is `128`. - -#### `--shared-memory=[none|system|cuda]` - -Specifies the type of the shared memory to use for input and output data. - -Default is `none`. - -#### `--output-shared-memory-size=` - -Specifies The size, in bytes, of the shared memory region to allocate per -output tensor. Only needed when one or more of the outputs are of string type -and/or variable shape. The value should be larger than the size of the largest -output tensor that the model is expected to return. Perf Analyzer will use the -following formula to calculate the total shared memory to allocate: -output_shared_memory_size * number_of_outputs * batch_size. - -Default is `102400` (100 KB). - -#### `--input-tensor-format=[binary|json]` - -Specifies the Triton inference request input tensor format. Only valid when HTTP -protocol is used. - -Default is `binary`. - -#### `--output-tensor-format=[binary|json]` - -Specifies the Triton inference response output tensor format. Only valid when -HTTP protocol is used. - -Default is `binary`. - -## Request Options - -#### `-i [http|grpc]` - -Specifies the communication protocol to use. The available protocols are HTTP -and gRPC. - -Default is `http`. - -#### `-a` -#### `--async` - -Enables asynchronous mode in Perf Analyzer. - -By default, Perf Analyzer will use a synchronous request API for inference. -However, if the model is sequential, then the default mode is asynchronous. -Specify `--sync` to operate sequential models in synchronous mode. In -synchronous mode, Perf Analyzer will start threads equal to the concurrency -level. Use asynchronous mode to limit the number of threads, yet maintain the -concurrency. - -#### `--sync` - -Enables synchronous mode in Perf Analyzer. Can be used to operate Perf -Analyzer with sequential model in synchronous mode. - -#### `--streaming` - -Enables the use of streaming API. This option is only valid with gRPC protocol. - -#### `-H ` - -Specifies the header that will be added to HTTP requests (ignored for gRPC -requests). The header must be specified as 'Header:Value'. `-H` may be -specified multiple times to add multiple headers. - -#### `--grpc-compression-algorithm=[none|gzip|deflate]` - -Specifies the compression algorithm to be used by gRPC when sending requests. -Only supported when gRPC protocol is being used. - -Default is `none`. - -## Server Options - -#### `-u ` - -Specifies the URL for the server. - -Default is `localhost:8000` when using `--service-kind=triton` with HTTP. -Default is `localhost:8001` when using `--service-kind=triton` with gRPC. -Default is `localhost:8500` when using `--service-kind=tfserving`. - -#### `--ssl-grpc-use-ssl` - -Enables usage of an encrypted channel to the server. - -#### `--ssl-grpc-root-certifications-file=` - -Specifies the path to file containing the PEM encoding of the server root -certificates. - -#### `--ssl-grpc-private-key-file=` - -Specifies the path to file containing the PEM encoding of the client's private -key. - -#### `--ssl-grpc-certificate-chain-file=` - -Specifies the path to file containing the PEM encoding of the client's -certificate chain. - -#### `--ssl-https-verify-peer=[0|1]` - -Specifies whether to verify the peer's SSL certificate. See -https://curl.se/libcurl/c/CURLOPT_SSL_VERIFYPEER.html for the meaning of each -value. - -Default is `1`. - -#### `--ssl-https-verify-host=[0|1|2]` - -Specifies whether to verify the certificate's name against host. See -https://curl.se/libcurl/c/CURLOPT_SSL_VERIFYHOST.html for the meaning of each -value. - -Default is `2`. - -#### `--ssl-https-ca-certificates-file=` - -Specifies the path to Certificate Authority (CA) bundle. - -#### `--ssl-https-client-certificate-file=` - -Specifies the path to the SSL client certificate. - -#### `--ssl-https-client-certificate-type=[PEM|DER]` - -Specifies the type of the client SSL certificate. - -Default is `PEM`. - -#### `--ssl-https-private-key-file=` - -Specifies the path to the private keyfile for TLS and SSL client cert. - -#### `--ssl-https-private-key-type=[PEM|DER]` - -Specifies the type of the private key file. - -Default is `PEM`. - -#### `--triton-server-directory=` - -Specifies the Triton server install path. Required by and only used when C API -is used (`--service-kind=triton_c_api`). - -Default is `/opt/tritonserver`. - -#### `--model-repository=` - -Specifies the model repository directory path for loading models. Required by -and only used when C API is used (`--service-kind=triton_c_api`). - -## Prometheus Metrics Options - -#### `--collect-metrics` - -Enables the collection of server-side inference server metrics. Perf Analyzer -will output metrics in the CSV file generated with the `-f` option. Only valid -when `--verbose-csv` option also used. - -#### `--metrics-url=` - -Specifies the URL to query for server-side inference server metrics. - -Default is `localhost:8002/metrics`. - -#### `--metrics-interval=` - -Specifies how often within each measurement window, in milliseconds, Perf -Analyzer should query for server-side inference server metrics. - -Default is `1000`. - -## Report Options - -#### `-f ` - -Specifies the path that the latency report file will be generated at. - -When `-f` is not specified, a latency report will not be generated. - -#### `--profile-export-file ` - -Specifies the path that the profile export will be generated at. - -When `--profile-export-file` is not specified, a profile export will not be -generated. - -#### `--verbose-csv` - -Enables additional information being output to the CSV file generated by Perf -Analyzer. - -## Trace Options - -#### `--trace-level=[OFF|TIMESTAMPS|TENSORS]` - -Specifies a trace level. `OFF` disables tracing. `TIMESTAMPS` traces -timestamps. `TENSORS` traces tensors. It may be specified multiple times to -trace multiple information. Only used for `--service-kind=triton`. - -Default is `OFF`. - -#### `--trace-rate=` - -Specifies the trace sampling rate (traces per second). - -Default is `1000`. - -#### `--trace-count=` - -Specifies the number of traces to be sampled. If the value is `-1`, the number -of traces to be sampled will not be limited. - -Default is `-1`. - -#### `--log-frequency=` - -Specifies the trace log frequency. If the value is `0`, Triton will only log -the trace output to the trace file when shutting down. -Otherwise, Triton will log the trace output to ``. when it -collects the specified number of traces. -For example, if the trace file is `trace_file.log`, and if the log -frequency is `100`, when Triton collects the 100th trace, it logs the traces -to file `trace_file.log.0`, and when it collects the 200th trace, it logs the -101st to the 200th traces to file `trace_file.log.1`. - -Default is `0`. - -## Deprecated Options - -#### `--data-directory=` - -**DEPRECATED** - -Alias for `--input-data=` where `` is the path to a directory. See -`--input-data` option documentation for details. - -#### `-c ` - -**DEPRECATED** - -Specifies the maximum concurrency that Perf Analyzer will search up to. Cannot -be used with `--concurrency-range`. - -#### `-d` - -**DEPRECATED** - -Enables dynamic concurrency mode. Perf Analyzer will search along -concurrencies up to the maximum concurrency specified via `-c `. Cannot be -used with `--concurrency-range`. - -#### `-t ` - -**DEPRECATED** - -Specifies the number of concurrent requests. Cannot be used with -`--concurrency-range`. - -Default is `1`. - -#### `-z` - -**DEPRECATED** - -Alias for `--input-data=zero`. See `--input-data` option documentation for -details. diff --git a/src/c++/perf_analyzer/docs/inference_load_modes.md b/src/c++/perf_analyzer/docs/inference_load_modes.md deleted file mode 100644 index 83fa83eb0..000000000 --- a/src/c++/perf_analyzer/docs/inference_load_modes.md +++ /dev/null @@ -1,100 +0,0 @@ - - -# Inference Load Modes - -Perf Analyzer has several modes for generating inference request load for a -model. - -## Concurrency Mode - -In concurrency mode, Perf Analyzer attempts to send inference requests to the -server such that N requests are always outstanding during profiling. For -example, when using -[`--concurrency-range=4`](cli.md#--concurrency-rangestartendstep), Perf Analyzer -will to attempt to have 4 outgoing inference requests at all times during -profiling. - -## Periodic Concurrency Mode - -In periodic concurrency mode, Perf Analyzer will periodically launch a new set -of inference requests until the total number of inference requests that has been -launched since the beginning reaches N requests. - -For example, when using `--periodic-concurrency-range 10:100:30`, Perf Analyzer -will start with 10 concurrent requests and for every step, it will launch 30 new -inference requests until the total number of requests launched since the -beginning reaches 100. Additionally, the user can also specify *when* to launch -the new requests by specifying `--request-period M`. This will set Perf Analyzer -to launch a new set of requests whenever *all* of the latest set of launched -concurrent requests received M number of responses back from the server. - -The user can also specify custom parameters to the model using -`--request-parameter ` option. -For instance, passing `--request-parameter max_tokens:256:uint` will set an -additional parameter `max_tokens` of type `int` to 256 as part of the request. - -```bash -perf_analyzer -m -i grpc --async --streaming \ - --profile-export-file profile.json \ - --periodic-concurrency-range 10:100:30 \ - --request-period 10 \ - --request-parameter max_tokens:256:int -``` - -> **Note** -> -> The periodic concurrency mode is currently supported only by gRPC protocol and -> with [decoupled models](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/decoupled_models.md). -> Additionally, the user must also specify a file where Perf Analyzer could dump all the -> profiled data using `--profile-export-file`. - -## Request Rate Mode - -In request rate mode, Perf Analyzer attempts to send N inference requests per -second to the server during profiling. For example, when using -[`--request-rate-range=20`](cli.md#--request-rate-rangestartendstep), Perf -Analyzer will attempt to send 20 requests per second during profiling. - -## Custom Interval Mode - -In custom interval mode, Perf Analyzer attempts to send inference requests -according to intervals (between requests, looping if necessary) provided by the -user in the form of a text file with one time interval (in microseconds) per -line. For example, when using -[`--request-intervals=my_intervals.txt`](cli.md#--request-intervalspath), -where `my_intervals.txt` contains: - -``` -100000 -200000 -500000 -``` - -Perf Analyzer will attempt to send requests at the following times: 0.1s, 0.3s, -0.8s, 0.9s, 1.1s, 1.6s, and so on, during profiling. diff --git a/src/c++/perf_analyzer/docs/input_data.md b/src/c++/perf_analyzer/docs/input_data.md deleted file mode 100644 index af2328fcd..000000000 --- a/src/c++/perf_analyzer/docs/input_data.md +++ /dev/null @@ -1,306 +0,0 @@ - - -# Input Data - -Use the [`--help`](cli.md#--help) option to see complete documentation for all -input data options. By default Perf Analyzer sends random data to all the inputs -of your model. You can select a different input data mode with the -[`--input-data`](cli.md#--input-datazerorandompath) option: - -- _random_: (default) Send random data for each input. Note: Perf Analyzer only - generates random data once per input and reuses that for all inferences -- _zero_: Send zeros for each input. -- directory path: A path to a directory containing a binary file for each input, - named the same as the input (and optionally a binary file for each output for - validation, named the same as the output). Each binary file must contain the - data required for that input/output for a batch-1 request. Each file should - contain the raw binary representation of the input/output in row-major order. -- file path: A path to a JSON file containing data to be used with every - inference request. See the "Real Input Data" section for further details. - [`--input-data`](cli.md#--input-datazerorandompath) can be provided multiple - times with different file paths to specific multiple JSON files. - -For tensors with `STRING`/`BYTES` datatype, the -[`--string-length`](cli.md#--string-lengthn) and -[`--string-data`](cli.md#--string-datastring) options may be used in some cases -(see [`--help`](cli.md#--help) for full documentation). - -For models that support batching you can use the [`-b`](cli.md#-b-n) option to -indicate the batch size of the requests that Perf Analyzer should send. For -models with variable-sized inputs you must provide the -[`--shape`](cli.md#--shapestring) argument so that Perf Analyzer knows what -shape tensors to use. For example, for a model that has an input called -`IMAGE` that has shape `[3, N, M]`, where `N` and `M` are variable-size -dimensions, to tell Perf Analyzer to send batch size 4 requests of shape -`[3, 224, 224]`: - -``` -$ perf_analyzer -m mymodel -b 4 --shape IMAGE:3,224,224 -``` - -## Real Input Data - -The performance of some models is highly dependent on the data used. For such -cases you can provide data to be used with every inference request made by Perf -Analyzer in a JSON file. Perf Analyzer will use the provided data in a -round-robin order when sending inference requests. For sequence models, if a -sequence length is specified via -[`--sequence-length`](cli.md#--sequence-lengthn), Perf Analyzer will also loop -through the provided data in a round-robin order up to the specified sequence -length (with a percentage variation customizable via -[`--sequence-length-variation`](cli.md#--sequence-length-variationn)). -Otherwise, the sequence length will be the number of inputs specified in -user-provided input data. - -Each entry in the `"data"` array must specify all input tensors with the exact -size expected by the model for a single batch. The following example describes -data for a model with inputs named, `INPUT0` and `INPUT1`, shape `[4, 4]` and -data type `INT32`: - -```json -{ - "data": - [ - { - "INPUT0": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "INPUT1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - }, - { - "INPUT0": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "INPUT1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - }, - { - "INPUT0": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "INPUT1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - }, - { - "INPUT0": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "INPUT1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - } - ] -} -``` - -Note that the `[4, 4]` tensor has been flattened in a row-major format for the -inputs. In addition to specifying explicit tensors, you can also provide Base64 -encoded binary data for the tensors. Each data object must list its data in a -row-major order. Binary data must be in little-endian byte order. The following -example highlights how this can be achieved: - -```json -{ - "data": - [ - { - "INPUT0": {"b64": "YmFzZTY0IGRlY29kZXI="}, - "INPUT1": {"b64": "YmFzZTY0IGRlY29kZXI="} - }, - { - "INPUT0": {"b64": "YmFzZTY0IGRlY29kZXI="}, - "INPUT1": {"b64": "YmFzZTY0IGRlY29kZXI="} - }, - { - "INPUT0": {"b64": "YmFzZTY0IGRlY29kZXI="}, - "INPUT1": {"b64": "YmFzZTY0IGRlY29kZXI="} - } - ] -} -``` - -In case of sequence models, multiple data streams can be specified in the JSON -file. Each sequence will get a data stream of its own and Perf Analyzer will -ensure the data from each stream is played back to the same correlation ID. The -below example highlights how to specify data for multiple streams for a sequence -model with a single input named `INPUT`, shape `[1]` and data type `STRING`: - -```json -{ - "data": - [ - [ - { - "INPUT": ["1"] - }, - { - "INPUT": ["2"] - }, - { - "INPUT": ["3"] - }, - { - "INPUT": ["4"] - } - ], - [ - { - "INPUT": ["1"] - }, - { - "INPUT": ["1"] - }, - { - "INPUT": ["1"] - } - ], - [ - { - "INPUT": ["1"] - }, - { - "INPUT": ["1"] - } - ] - ] -} -``` - -The above example describes three data streams with lengths 4, 3 and 2 -respectively. Perf Analyzer will hence produce sequences of length 4, 3 and 2 in -this case. - -You can also provide an optional `"shape"` field to the tensors. This is -especially useful while profiling the models with variable-sized tensors as -input. Additionally note that when providing the `"shape"` field, tensor -contents must be provided separately in a "content" field in row-major order. -The specified shape values will override default input shapes provided as a -command line option (see [`--shape`](cli.md#--shapestring)) for variable-sized -inputs. In the absence of a `"shape"` field, the provided defaults will be used. -There is no need to specify shape as a command line option if all the input data -provide shape values for variable tensors. Below is an example JSON file for a -model with a single input `INPUT`, shape `[-1, -1]` and data type `INT32`: - -```json -{ - "data": - [ - { - "INPUT": - { - "content": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "shape": [2,8] - } - }, - { - "INPUT": - { - "content": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "shape": [8,2] - } - }, - { - "INPUT": - { - "content": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - } - }, - { - "INPUT": - { - "content": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "shape": [4,4] - } - } - ] -} -``` - -The following is the example to provide contents as base64 string with explicit -shapes: - -```json -{ - "data": - [ - { - "INPUT": - { - "content": {"b64": "/9j/4AAQSkZ(...)"}, - "shape": [7964] - } - }, - { - "INPUT": - { - "content": {"b64": "/9j/4AAQSkZ(...)"}, - "shape": [7964] - } - } - ] -} -``` - -Note that for `STRING` type, an element is represented by a 4-byte unsigned -integer giving the length followed by the actual bytes. The byte array to be -encoded using base64 must include the 4-byte unsigned integers. - -### Output Validation - -When real input data is provided, it is optional to request Perf Analyzer to -validate the inference output for the input data. - -Validation output can be specified in the `"validation_data"` field have the -same format as the `"data"` field for real input. Note that the entries in -`"validation_data"` must align with `"data"` for proper mapping. The following -example describes validation data for a model with inputs named `INPUT0` and -`INPUT1`, outputs named `OUTPUT0` and `OUTPUT1`, all tensors have shape `[4, 4]` -and data type `INT32`: - -```json -{ - "data": - [ - { - "INPUT0": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - "INPUT1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - } - ], - "validation_data": - [ - { - "OUTPUT0": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - "OUTPUT1": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] - } - ] -} -``` - -Besides the above example, the validation outputs can be specified in the same -variations described in the real input data section. - -# Shared Memory - -By default Perf Analyzer sends input tensor data and receives output tensor data -over the network. You can instead instruct Perf Analyzer to use system shared -memory or CUDA shared memory to communicate tensor data. By using these options -you can model the performance that you can achieve by using shared memory in -your application. Use -[`--shared-memory=system`](cli.md#--shared-memorynonesystemcuda) to use system -(CPU) shared memory or -[`--shared-memory=cuda`](cli.md#--shared-memorynonesystemcuda) to use CUDA -shared memory. diff --git a/src/c++/perf_analyzer/docs/install.md b/src/c++/perf_analyzer/docs/install.md deleted file mode 100644 index 5390dc00a..000000000 --- a/src/c++/perf_analyzer/docs/install.md +++ /dev/null @@ -1,106 +0,0 @@ - - -# Recommended Installation Method - -## Triton SDK Container - -The recommended way to "install" Perf Analyzer is to run the pre-built -executable from within the Triton SDK docker container available on the -[NVIDIA GPU Cloud Catalog](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver). -As long as the SDK container has its network exposed to the address and port of -the inference server, Perf Analyzer will be able to run. - -```bash -export RELEASE= # e.g. to use the release from the end of February of 2023, do `export RELEASE=23.02` - -docker pull nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -docker run --gpus all --rm -it --net host nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -# inside container -perf_analyzer -m -``` - -# Alternative Installation Methods - -- [Pip](#pip) -- [Build from Source](#build-from-source) - -## Pip - -```bash -pip install tritonclient - -perf_analyzer -m -``` - -**Warning**: If any runtime dependencies are missing, Perf Analyzer will produce -errors showing which ones are missing. You will need to manually install them. - -## Build from Source - -The Triton SDK container is used for building, so some build and runtime -dependencies are already installed. - -```bash -export RELEASE= # e.g. to use the release from the end of February of 2023, do `export RELEASE=23.02` - -docker pull nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -docker run --gpus all --rm -it --net host nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -# inside container -# prep installing newer version of cmake -apt update && apt install -y gpg wget && wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null && . /etc/os-release && echo "deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ $UBUNTU_CODENAME main" | tee /etc/apt/sources.list.d/kitware.list >/dev/null - -# install build/runtime dependencies -apt update && apt install -y cmake-data=3.27.7* cmake=3.27.7* libcurl4-openssl-dev rapidjson-dev - -rm -rf client ; git clone --depth 1 https://github.com/triton-inference-server/client - -mkdir client/build ; cd client/build - -cmake -DTRITON_ENABLE_PERF_ANALYZER=ON .. - -make -j8 cc-clients - -cc-clients/perf_analyzer/perf_analyzer -m -``` - -- To enable - [CUDA shared memory](input_data.md#shared-memory), add - `-DTRITON_ENABLE_GPU=ON` to the `cmake` command. -- To enable - [C API mode](benchmarking.md#benchmarking-triton-directly-via-c-api), add - `-DTRITON_ENABLE_PERF_ANALYZER_C_API=ON` to the `cmake` command. -- To enable [TorchServe backend](benchmarking.md#benchmarking-torchserve), add - `-DTRITON_ENABLE_PERF_ANALYZER_TS=ON` to the `cmake` command. -- To enable - [Tensorflow Serving backend](benchmarking.md#benchmarking-tensorflow-serving), - add `-DTRITON_ENABLE_PERF_ANALYZER_TFS=ON` to the `cmake` command. diff --git a/src/c++/perf_analyzer/docs/measurements_metrics.md b/src/c++/perf_analyzer/docs/measurements_metrics.md deleted file mode 100644 index 3f5b64348..000000000 --- a/src/c++/perf_analyzer/docs/measurements_metrics.md +++ /dev/null @@ -1,225 +0,0 @@ - - -# Measurement Modes - -Currently, Perf Analyzer has 2 measurement modes. - -## Time Windows - -When using time windows measurement mode -([`--measurement-mode=time_windows`](cli.md#--measurement-modetime_windowscount_windows)), -Perf Analyzer will count how many requests have completed during a window of -duration `X` (in milliseconds, via -[`--measurement-interval=X`](cli.md#--measurement-intervaln), default is -`5000`). This is the default measurement mode. - -## Count Windows - -When using count windows measurement mode -([`--measurement-mode=count_windows`](cli.md#--measurement-modetime_windowscount_windows)), -Perf Analyzer will start the window duration at 1 second and potentially -dynamically increase it until `X` requests have completed (via -[`--measurement-request-count=X`](cli.md#--measurement-request-countn), default -is `50`). - -# Metrics - -## How Throughput is Calculated - -Perf Analyzer calculates throughput to be the total number of requests completed -during a measurement, divided by the duration of the measurement, in seconds. - -## How Latency is Calculated - -For each request concurrency level Perf Analyzer reports latency and throughput -as seen from Perf Analyzer and also the average request latency on the server. - -The server latency measures the total time from when the request is received at -the server until when the response is sent from the server. Because of the HTTP -and gRPC libraries used to implement the server endpoints, total server latency -is typically more accurate for HTTP requests as it measures time from the first -byte received until last byte sent. For both HTTP and gRPC the total server -latency is broken-down into the following components: - -- _queue_: The average time spent in the inference schedule queue by a request - waiting for an instance of the model to become available. -- _compute_: The average time spent performing the actual inference, including - any time needed to copy data to/from the GPU. -- _overhead_: The average time spent in the endpoint that cannot be correctly - captured in the send/receive time with the way the gRPC and HTTP libraries are - structured. - -The client latency time is broken-down further for HTTP and gRPC as follows: - -- HTTP: _send/recv_ indicates the time on the client spent sending the request - and receiving the response. _response wait_ indicates time waiting for the - response from the server. -- gRPC: _(un)marshal request/response_ indicates the time spent marshalling the - request data into the gRPC protobuf and unmarshalling the response data from - the gRPC protobuf. _response wait_ indicates time writing the gRPC request to - the network, waiting for the response, and reading the gRPC response from the - network. - -Use the verbose ([`-v`](cli.md#-v)) option see more output, including the -stabilization passes run for each request concurrency level or request rate. - -# Reports - -## Visualizing Latency vs. Throughput - -Perf Analyzer provides the [`-f`](cli.md#-f-path) option to generate a file -containing CSV output of the results. - -``` -$ perf_analyzer -m inception_graphdef --concurrency-range 1:4 -f perf.csv -... -$ cat perf.csv -Concurrency,Inferences/Second,Client Send,Network+Server Send/Recv,Server Queue,Server Compute Input,Server Compute Infer,Server Compute Output,Client Recv,p50 latency,p90 latency,p95 latency,p99 latency -1,69.2,225,2148,64,206,11781,19,0,13891,18795,19753,21018 -3,84.2,237,1768,21673,209,11742,17,0,35398,43984,47085,51701 -4,84.2,279,1604,33669,233,11731,18,1,47045,56545,59225,64886 -2,87.2,235,1973,9151,190,11346,17,0,21874,28557,29768,34766 -``` - -NOTE: The rows in the CSV file are sorted in an increasing order of throughput -(Inferences/Second). - -You can import the CSV file into a spreadsheet to help visualize the latency vs -inferences/second tradeoff as well as see some components of the latency. Follow -these steps: - -- Open - [this spreadsheet](https://docs.google.com/spreadsheets/d/1S8h0bWBBElHUoLd2SOvQPzZzRiQ55xjyqodm_9ireiw) -- Make a copy from the File menu "Make a copy..." -- Open the copy -- Select the A1 cell on the "Raw Data" tab -- From the File menu select "Import..." -- Select "Upload" and upload the file -- Select "Replace data at selected cell" and then select the "Import data" - button - -## Server-side Prometheus metrics - -Perf Analyzer can collect -[server-side metrics](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/metrics.md#gpu-metrics), -such as GPU utilization and GPU power usage. To enable the collection of these -metrics, use the [`--collect-metrics`](cli.md#--collect-metrics) option. - -By default, Perf Analyzer queries the metrics endpoint at the URL -`localhost:8002/metrics`. If the metrics are accessible at a different url, use -the [`--metrics-url=`](cli.md#--metrics-urlurl) option to specify that. - -By default, Perf Analyzer queries the metrics endpoint every 1000 milliseconds. -To use a different querying interval, use the -[`--metrics-interval=`](cli.md#--metrics-intervaln) option (specify in -milliseconds). - -Because Perf Analyzer can collect the server-side metrics multiple times per -run, these metrics are aggregated in specific ways to produce one final number -per searched concurrency or request rate. Here are how the metrics are -aggregated: - -| Metric | Aggregation | -| - | - | -| GPU Utilization | Averaged from each collection taken during stable passes. We want a number representative of all stable passes. | -| GPU Power Usage | Averaged from each collection taken during stable passes. We want a number representative of all stable passes. | -| GPU Used Memory | Maximum from all collections taken during a stable pass. Users are typically curious what the peak memory usage is for determining model/hardware viability. | -| GPU Total Memory | First from any collection taken during a stable pass. All of the collections should produce the same value for total memory available on the GPU. | - -Note that all metrics are per-GPU in the case of multi-GPU systems. - -To output these server-side metrics to a CSV file, use the -[`-f `](cli.md#-f-path) and [`--verbose-csv`](cli.md#--verbose-csv) -options. The output CSV will contain one column per metric. The value of each -column will be a `key:value` pair (`GPU UUID:metric value`). Each `key:value` -pair will be delimited by a semicolon (`;`) to indicate metric values for each -GPU accessible by the server. There is a trailing semicolon. See below: - -`:;:;...;` - -Here is a simplified CSV output: - -``` -$ perf_analyzer -m resnet50_libtorch --collect-metrics -f output.csv --verbose-csv -$ cat output.csv -Concurrency,...,Avg GPU Utilization,Avg GPU Power Usage,Max GPU Memory Usage,Total GPU Memory -1,...,gpu_uuid_0:0.33;gpu_uuid_1:0.5;,gpu_uuid_0:55.3;gpu_uuid_1:56.9;,gpu_uuid_0:10000;gpu_uuid_1:11000;,gpu_uuid_0:50000;gpu_uuid_1:75000;, -2,...,gpu_uuid_0:0.25;gpu_uuid_1:0.6;,gpu_uuid_0:25.6;gpu_uuid_1:77.2;,gpu_uuid_0:11000;gpu_uuid_1:17000;,gpu_uuid_0:50000;gpu_uuid_1:75000;, -3,...,gpu_uuid_0:0.87;gpu_uuid_1:0.9;,gpu_uuid_0:87.1;gpu_uuid_1:71.7;,gpu_uuid_0:15000;gpu_uuid_1:22000;,gpu_uuid_0:50000;gpu_uuid_1:75000;, -``` - -## Communication Protocol - -By default, Perf Analyzer uses HTTP to communicate with Triton. The gRPC -protocol can be specified with the [`-i [http|grpc]`](cli.md#-i-httpgrpc) -option. If gRPC is selected the [`--streaming`](cli.md#--streaming) option can -also be specified for gRPC streaming. - -### SSL/TLS Support - -Perf Analyzer can be used to benchmark Triton service behind SSL/TLS-enabled -endpoints. These options can help in establishing secure connection with the -endpoint and profile the server. - -For gRPC, see the following options: - -- [`--ssl-grpc-use-ssl`](cli.md#--ssl-grpc-use-ssl) -- [`--ssl-grpc-root-certifications-file=`](cli.md#--ssl-grpc-root-certifications-filepath) -- [`--ssl-grpc-private-key-file=`](cli.md#--ssl-grpc-private-key-filepath) -- [`--ssl-grpc-certificate-chain-file=`](cli.md#--ssl-grpc-certificate-chain-filepath) - -More details here: -https://grpc.github.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html - -The -[inference protocol gRPC SSL/TLS section](https://github.com/triton-inference-server/server/blob/main/docs/customization_guide/inference_protocols.md#ssltls) -describes server-side options to configure SSL/TLS in Triton's gRPC endpoint. - -For HTTPS, the following options are exposed: - -- [`--ssl-https-verify-peer`](cli.md#--ssl-https-verify-peer01) -- [`--ssl-https-verify-host`](cli.md#--ssl-https-verify-host012) -- [`--ssl-https-ca-certificates-file`](cli.md#--ssl-https-ca-certificates-filepath) -- [`--ssl-https-client-certificate-file`](cli.md#--ssl-https-client-certificate-filepath) -- [`--ssl-https-client-certificate-type`](cli.md#--ssl-https-client-certificate-typepemder) -- [`--ssl-https-private-key-file`](cli.md#--ssl-https-private-key-filepath) -- [`--ssl-https-private-key-type`](cli.md#--ssl-https-private-key-typepemder) - -See [`--help`](cli.md#--help) for full documentation. - -Unlike gRPC, Triton's HTTP server endpoint can not be configured with SSL/TLS -support. - -Note: Just providing these `--ssl-http-*` options to Perf Analyzer does not -ensure that SSL/TLS is used in communication. If SSL/TLS is not enabled on the -service endpoint, these options have no effect. The intent of exposing these -options to a user of Perf Analyzer is to allow them to configure Perf Analyzer -to benchmark a Triton service behind SSL/TLS-enabled endpoints. In other words, -if Triton is running behind a HTTPS server proxy, then these options would allow -Perf Analyzer to profile Triton via exposed HTTPS proxy. diff --git a/src/c++/perf_analyzer/docs/quick_start.md b/src/c++/perf_analyzer/docs/quick_start.md deleted file mode 100644 index 17d63f560..000000000 --- a/src/c++/perf_analyzer/docs/quick_start.md +++ /dev/null @@ -1,114 +0,0 @@ - - -# Quick Start - -The steps below will guide you on how to start using Perf Analyzer. - -### Step 1: Start Triton Container - -```bash -export RELEASE= # e.g. to use the release from the end of February of 2023, do `export RELEASE=23.02` - -docker pull nvcr.io/nvidia/tritonserver:${RELEASE}-py3 - -docker run --gpus all --rm -it --net host nvcr.io/nvidia/tritonserver:${RELEASE}-py3 -``` - -### Step 2: Download `simple` Model - -```bash -# inside triton container -git clone --depth 1 https://github.com/triton-inference-server/server - -mkdir model_repository ; cp -r server/docs/examples/model_repository/simple model_repository -``` - -### Step 3: Start Triton Server - -```bash -# inside triton container -tritonserver --model-repository $(pwd)/model_repository &> server.log & - -# confirm server is ready, look for 'HTTP/1.1 200 OK' -curl -v localhost:8000/v2/health/ready - -# detach (CTRL-p CTRL-q) -``` - -### Step 4: Start Triton SDK Container - -```bash -docker pull nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -docker run --gpus all --rm -it --net host nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk -``` - -### Step 5: Run Perf Analyzer - -```bash -# inside sdk container -perf_analyzer -m simple -``` - -### Step 6: Observe and Analyze Output - -``` -$ perf_analyzer -m simple -*** Measurement Settings *** - Batch size: 1 - Service Kind: Triton - Using "time_windows" mode for stabilization - Measurement window: 5000 msec - Using synchronous calls for inference - Stabilizing using average latency - -Request concurrency: 1 - Client: - Request count: 25348 - Throughput: 1407.84 infer/sec - Avg latency: 708 usec (standard deviation 663 usec) - p50 latency: 690 usec - p90 latency: 881 usec - p95 latency: 926 usec - p99 latency: 1031 usec - Avg HTTP time: 700 usec (send/recv 102 usec + response wait 598 usec) - Server: - Inference count: 25348 - Execution count: 25348 - Successful request count: 25348 - Avg request latency: 382 usec (overhead 41 usec + queue 41 usec + compute input 26 usec + compute infer 257 usec + compute output 16 usec) - -Inferences/Second vs. Client Average Batch Latency -Concurrency: 1, throughput: 1407.84 infer/sec, latency 708 usec -``` - -We can see from the output that the model was able to complete approximately -1407.84 inferences per second, with an average latency of 708 microseconds per -inference request. Concurrency of 1 meant that Perf Analyzer attempted to always -have 1 outgoing request at all times. diff --git a/src/c++/perf_analyzer/doctest.h b/src/c++/perf_analyzer/doctest.h deleted file mode 100644 index adda4134c..000000000 --- a/src/c++/perf_analyzer/doctest.h +++ /dev/null @@ -1,7824 +0,0 @@ -// ====================================================================== lgtm -// [cpp/missing-header-guard] -// == DO NOT MODIFY THIS FILE BY HAND - IT IS AUTO GENERATED BY CMAKE! == -// ====================================================================== -// -// doctest.h - the lightest feature-rich C++ single-header testing framework for -// unit tests and TDD -// -// Copyright (c) 2016-2021 Viktor Kirilov -// -// Distributed under the MIT Software License -// See accompanying file LICENSE.txt or copy at -// https://opensource.org/licenses/MIT -// -// The documentation can be found at the library's page: -// https://github.com/doctest/doctest/blob/master/doc/markdown/readme.md -// -// ================================================================================================= -// ================================================================================================= -// ================================================================================================= -// -// The library is heavily influenced by Catch - -// https://github.com/catchorg/Catch2 which uses the Boost Software License - -// Version 1.0 see here - -// https://github.com/catchorg/Catch2/blob/master/LICENSE.txt -// -// The concept of subcases (sections in Catch) and expression decomposition are -// from there. Some parts of the code are taken directly: -// - stringification - the detection of "ostream& operator<<(ostream&, const -// T&)" and StringMaker<> -// - the Approx() helper class for floating point comparison -// - colors in the console -// - breaking into a debugger -// - signal / SEH handling -// - timer -// - XmlWriter class - thanks to Phil Nash for allowing the direct reuse (AKA -// copy/paste) -// -// The expression decomposing templates are taken from lest - -// https://github.com/martinmoene/lest which uses the Boost Software License - -// Version 1.0 see here - -// https://github.com/martinmoene/lest/blob/master/LICENSE.txt -// -// ================================================================================================= -// ================================================================================================= -// ================================================================================================= - -#ifndef DOCTEST_LIBRARY_INCLUDED -#define DOCTEST_LIBRARY_INCLUDED - -// ================================================================================================= -// == VERSION -// ====================================================================================== -// ================================================================================================= - -#define DOCTEST_VERSION_MAJOR 2 -#define DOCTEST_VERSION_MINOR 4 -#define DOCTEST_VERSION_PATCH 8 - -// util we need here -#define DOCTEST_TOSTR_IMPL(x) #x -#define DOCTEST_TOSTR(x) DOCTEST_TOSTR_IMPL(x) - -#define DOCTEST_VERSION_STR \ - DOCTEST_TOSTR(DOCTEST_VERSION_MAJOR) \ - "." DOCTEST_TOSTR(DOCTEST_VERSION_MINOR) "." DOCTEST_TOSTR( \ - DOCTEST_VERSION_PATCH) - -#define DOCTEST_VERSION \ - (DOCTEST_VERSION_MAJOR * 10000 + DOCTEST_VERSION_MINOR * 100 + \ - DOCTEST_VERSION_PATCH) - -// ================================================================================================= -// == COMPILER VERSION -// ============================================================================= -// ================================================================================================= - -// ideas for the version stuff are taken from here: -// https://github.com/cxxstuff/cxx_detect - -#define DOCTEST_COMPILER(MAJOR, MINOR, PATCH) \ - ((MAJOR)*10000000 + (MINOR)*100000 + (PATCH)) - -// GCC/Clang and GCC/MSVC are mutually exclusive, but Clang/MSVC are not because -// of clang-cl... -#if defined(_MSC_VER) && defined(_MSC_FULL_VER) -#if _MSC_VER == _MSC_FULL_VER / 10000 -#define DOCTEST_MSVC \ - DOCTEST_COMPILER(_MSC_VER / 100, _MSC_VER % 100, _MSC_FULL_VER % 10000) -#else // MSVC -#define DOCTEST_MSVC \ - DOCTEST_COMPILER( \ - _MSC_VER / 100, (_MSC_FULL_VER / 100000) % 100, _MSC_FULL_VER % 100000) -#endif // MSVC -#endif // MSVC -#if defined(__clang__) && defined(__clang_minor__) -#define DOCTEST_CLANG \ - DOCTEST_COMPILER(__clang_major__, __clang_minor__, __clang_patchlevel__) -#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && \ - defined(__GNUC_PATCHLEVEL__) && !defined(__INTEL_COMPILER) -#define DOCTEST_GCC \ - DOCTEST_COMPILER(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) -#endif // GCC - -#ifndef DOCTEST_MSVC -#define DOCTEST_MSVC 0 -#endif // DOCTEST_MSVC -#ifndef DOCTEST_CLANG -#define DOCTEST_CLANG 0 -#endif // DOCTEST_CLANG -#ifndef DOCTEST_GCC -#define DOCTEST_GCC 0 -#endif // DOCTEST_GCC - -// ================================================================================================= -// == COMPILER WARNINGS HELPERS -// ==================================================================== -// ================================================================================================= - -#if DOCTEST_CLANG -#define DOCTEST_PRAGMA_TO_STR(x) _Pragma(#x) -#define DOCTEST_CLANG_SUPPRESS_WARNING_PUSH _Pragma("clang diagnostic push") -#define DOCTEST_CLANG_SUPPRESS_WARNING(w) \ - DOCTEST_PRAGMA_TO_STR(clang diagnostic ignored w) -#define DOCTEST_CLANG_SUPPRESS_WARNING_POP _Pragma("clang diagnostic pop") -#define DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH(w) \ - DOCTEST_CLANG_SUPPRESS_WARNING_PUSH DOCTEST_CLANG_SUPPRESS_WARNING(w) -#else // DOCTEST_CLANG -#define DOCTEST_CLANG_SUPPRESS_WARNING_PUSH -#define DOCTEST_CLANG_SUPPRESS_WARNING(w) -#define DOCTEST_CLANG_SUPPRESS_WARNING_POP -#define DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH(w) -#endif // DOCTEST_CLANG - -#if DOCTEST_GCC -#define DOCTEST_PRAGMA_TO_STR(x) _Pragma(#x) -#define DOCTEST_GCC_SUPPRESS_WARNING_PUSH _Pragma("GCC diagnostic push") -#define DOCTEST_GCC_SUPPRESS_WARNING(w) \ - DOCTEST_PRAGMA_TO_STR(GCC diagnostic ignored w) -#define DOCTEST_GCC_SUPPRESS_WARNING_POP _Pragma("GCC diagnostic pop") -#define DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH(w) \ - DOCTEST_GCC_SUPPRESS_WARNING_PUSH DOCTEST_GCC_SUPPRESS_WARNING(w) -#else // DOCTEST_GCC -#define DOCTEST_GCC_SUPPRESS_WARNING_PUSH -#define DOCTEST_GCC_SUPPRESS_WARNING(w) -#define DOCTEST_GCC_SUPPRESS_WARNING_POP -#define DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH(w) -#endif // DOCTEST_GCC - -#if DOCTEST_MSVC -#define DOCTEST_MSVC_SUPPRESS_WARNING_PUSH __pragma(warning(push)) -#define DOCTEST_MSVC_SUPPRESS_WARNING(w) __pragma(warning(disable : w)) -#define DOCTEST_MSVC_SUPPRESS_WARNING_POP __pragma(warning(pop)) -#define DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(w) \ - DOCTEST_MSVC_SUPPRESS_WARNING_PUSH DOCTEST_MSVC_SUPPRESS_WARNING(w) -#else // DOCTEST_MSVC -#define DOCTEST_MSVC_SUPPRESS_WARNING_PUSH -#define DOCTEST_MSVC_SUPPRESS_WARNING(w) -#define DOCTEST_MSVC_SUPPRESS_WARNING_POP -#define DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(w) -#endif // DOCTEST_MSVC - -// ================================================================================================= -// == COMPILER WARNINGS -// ============================================================================ -// ================================================================================================= - -// both the header and the implementation suppress all of these, -// so it only makes sense to aggregate them like so -#define DOCTEST_SUPPRESS_COMMON_WARNINGS_PUSH \ - DOCTEST_CLANG_SUPPRESS_WARNING_PUSH \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wunknown-pragmas") \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wweak-vtables") \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wpadded") \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wmissing-prototypes") \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wunused-local-typedef") \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wc++98-compat") \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wc++98-compat-pedantic") \ - \ - DOCTEST_GCC_SUPPRESS_WARNING_PUSH \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wunknown-pragmas") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wpragmas") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Weffc++") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wstrict-overflow") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wstrict-aliasing") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wmissing-declarations") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wunused-local-typedefs") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wuseless-cast") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wnoexcept") \ - \ - DOCTEST_MSVC_SUPPRESS_WARNING_PUSH \ - /* these 4 also disabled globally via cmake: */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4514) /* unreferenced inline function has been removed */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4571) /* SEH related */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4710) /* function not inlined */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4711) /* function selected for inline expansion*/ \ - /* */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4616) /* invalid compiler warning */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4619) /* invalid compiler warning */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4996) /* The compiler encountered a deprecated declaration */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4706) /* assignment within conditional expression */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4512) /* 'class' : assignment operator could not be generated */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4127) /* conditional expression is constant */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4820) /* padding */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4625) /* copy constructor was implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4626) /* assignment operator was implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 5027) /* move assignment operator implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 5026) /* move constructor was implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4640) /* construction of local static object not thread-safe */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(5045) /* Spectre mitigation for memory load */ \ - /* static analysis */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 26439) /* Function may not throw. Declare it 'noexcept' */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 26495) /* Always initialize a member variable */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(26451) /* Arithmetic overflow ... */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 26444) /* Avoid unnamed objects with custom ctor and dtor... */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(26812) /* Prefer 'enum class' over 'enum' */ - -#define DOCTEST_SUPPRESS_COMMON_WARNINGS_POP \ - DOCTEST_CLANG_SUPPRESS_WARNING_POP \ - DOCTEST_GCC_SUPPRESS_WARNING_POP \ - DOCTEST_MSVC_SUPPRESS_WARNING_POP - -DOCTEST_SUPPRESS_COMMON_WARNINGS_PUSH - -DOCTEST_CLANG_SUPPRESS_WARNING_PUSH -DOCTEST_CLANG_SUPPRESS_WARNING("-Wnon-virtual-dtor") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wdeprecated") - -DOCTEST_GCC_SUPPRESS_WARNING_PUSH -DOCTEST_GCC_SUPPRESS_WARNING("-Wctor-dtor-privacy") -DOCTEST_GCC_SUPPRESS_WARNING("-Wnon-virtual-dtor") -DOCTEST_GCC_SUPPRESS_WARNING("-Wsign-promo") - -DOCTEST_MSVC_SUPPRESS_WARNING_PUSH -DOCTEST_MSVC_SUPPRESS_WARNING( - 4623) // default constructor was implicitly defined as deleted - -#define DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_BEGIN \ - DOCTEST_MSVC_SUPPRESS_WARNING_PUSH \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4548) /* before comma no effect; expected side - effect */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4265) /* virtual functions, but destructor is not virtual */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4986) /* exception specification does not match previous */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4350) /* 'member1' called instead of 'member2' */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4668) /* not defined as a preprocessor macro */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4365) /* signed/unsigned mismatch */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4774) /* format string not a string literal */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(4820) /* padding */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4625) /* copy constructor was implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4626) /* assignment operator was implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 5027) /* move assignment operator implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 5026) /* move constructor was implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 4623) /* default constructor was implicitly deleted */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 5039) /* pointer to pot. throwing function passed to extern C */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(5045) /* Spectre mitigation for memory load */ \ - DOCTEST_MSVC_SUPPRESS_WARNING( \ - 5105) /* macro producing 'defined' has undefined behavior */ - -#define DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_END \ - DOCTEST_MSVC_SUPPRESS_WARNING_POP - -// ================================================================================================= -// == FEATURE DETECTION -// ============================================================================ -// ================================================================================================= - -// general compiler feature support table: -// https://en.cppreference.com/w/cpp/compiler_support MSVC C++11 feature support -// table: https://msdn.microsoft.com/en-us/library/hh567368.aspx GCC C++11 -// feature support table: https://gcc.gnu.org/projects/cxx-status.html MSVC -// version table: -// https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B#Internal_version_numbering -// MSVC++ 14.3 (17) _MSC_VER == 1930 (Visual Studio 2022) -// MSVC++ 14.2 (16) _MSC_VER == 1920 (Visual Studio 2019) -// MSVC++ 14.1 (15) _MSC_VER == 1910 (Visual Studio 2017) -// MSVC++ 14.0 _MSC_VER == 1900 (Visual Studio 2015) -// MSVC++ 12.0 _MSC_VER == 1800 (Visual Studio 2013) -// MSVC++ 11.0 _MSC_VER == 1700 (Visual Studio 2012) -// MSVC++ 10.0 _MSC_VER == 1600 (Visual Studio 2010) -// MSVC++ 9.0 _MSC_VER == 1500 (Visual Studio 2008) -// MSVC++ 8.0 _MSC_VER == 1400 (Visual Studio 2005) - -// Universal Windows Platform support -#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) -#define DOCTEST_CONFIG_NO_WINDOWS_SEH -#endif // WINAPI_FAMILY -#if DOCTEST_MSVC && !defined(DOCTEST_CONFIG_WINDOWS_SEH) -#define DOCTEST_CONFIG_WINDOWS_SEH -#endif // MSVC -#if defined(DOCTEST_CONFIG_NO_WINDOWS_SEH) && \ - defined(DOCTEST_CONFIG_WINDOWS_SEH) -#undef DOCTEST_CONFIG_WINDOWS_SEH -#endif // DOCTEST_CONFIG_NO_WINDOWS_SEH - -#if !defined(_WIN32) && !defined(__QNX__) && \ - !defined(DOCTEST_CONFIG_POSIX_SIGNALS) && !defined(__EMSCRIPTEN__) -#define DOCTEST_CONFIG_POSIX_SIGNALS -#endif // _WIN32 -#if defined(DOCTEST_CONFIG_NO_POSIX_SIGNALS) && \ - defined(DOCTEST_CONFIG_POSIX_SIGNALS) -#undef DOCTEST_CONFIG_POSIX_SIGNALS -#endif // DOCTEST_CONFIG_NO_POSIX_SIGNALS - -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS -#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) -#define DOCTEST_CONFIG_NO_EXCEPTIONS -#endif // no exceptions -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS - -#ifdef DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS -#define DOCTEST_CONFIG_NO_EXCEPTIONS -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS - -#if defined(DOCTEST_CONFIG_NO_EXCEPTIONS) && \ - !defined(DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS) -#define DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS && - // !DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS - -#if defined(DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN) && \ - !defined(DOCTEST_CONFIG_IMPLEMENT) -#define DOCTEST_CONFIG_IMPLEMENT -#endif // DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN - -#if defined(_WIN32) || defined(__CYGWIN__) -#if DOCTEST_MSVC -#define DOCTEST_SYMBOL_EXPORT __declspec(dllexport) -#define DOCTEST_SYMBOL_IMPORT __declspec(dllimport) -#else // MSVC -#define DOCTEST_SYMBOL_EXPORT __attribute__((dllexport)) -#define DOCTEST_SYMBOL_IMPORT __attribute__((dllimport)) -#endif // MSVC -#else // _WIN32 -#define DOCTEST_SYMBOL_EXPORT __attribute__((visibility("default"))) -#define DOCTEST_SYMBOL_IMPORT -#endif // _WIN32 - -#ifdef DOCTEST_CONFIG_IMPLEMENTATION_IN_DLL -#ifdef DOCTEST_CONFIG_IMPLEMENT -#define DOCTEST_INTERFACE DOCTEST_SYMBOL_EXPORT -#else // DOCTEST_CONFIG_IMPLEMENT -#define DOCTEST_INTERFACE DOCTEST_SYMBOL_IMPORT -#endif // DOCTEST_CONFIG_IMPLEMENT -#else // DOCTEST_CONFIG_IMPLEMENTATION_IN_DLL -#define DOCTEST_INTERFACE -#endif // DOCTEST_CONFIG_IMPLEMENTATION_IN_DLL - -#define DOCTEST_EMPTY - -#if DOCTEST_MSVC -#define DOCTEST_NOINLINE __declspec(noinline) -#define DOCTEST_UNUSED -#define DOCTEST_ALIGNMENT(x) -#elif DOCTEST_CLANG && DOCTEST_CLANG < DOCTEST_COMPILER(3, 5, 0) -#define DOCTEST_NOINLINE -#define DOCTEST_UNUSED -#define DOCTEST_ALIGNMENT(x) -#else -#define DOCTEST_NOINLINE __attribute__((noinline)) -#define DOCTEST_UNUSED __attribute__((unused)) -#define DOCTEST_ALIGNMENT(x) __attribute__((aligned(x))) -#endif - -#ifndef DOCTEST_NORETURN -#if DOCTEST_MSVC && (DOCTEST_MSVC < DOCTEST_COMPILER(19, 0, 0)) -#define DOCTEST_NORETURN -#else // DOCTEST_MSVC -#define DOCTEST_NORETURN [[noreturn]] -#endif // DOCTEST_MSVC -#endif // DOCTEST_NORETURN - -#ifndef DOCTEST_NOEXCEPT -#if DOCTEST_MSVC && (DOCTEST_MSVC < DOCTEST_COMPILER(19, 0, 0)) -#define DOCTEST_NOEXCEPT -#else // DOCTEST_MSVC -#define DOCTEST_NOEXCEPT noexcept -#endif // DOCTEST_MSVC -#endif // DOCTEST_NOEXCEPT - -#ifndef DOCTEST_CONSTEXPR -#if DOCTEST_MSVC && (DOCTEST_MSVC < DOCTEST_COMPILER(19, 0, 0)) -#define DOCTEST_CONSTEXPR const -#else // DOCTEST_MSVC -#define DOCTEST_CONSTEXPR constexpr -#endif // DOCTEST_MSVC -#endif // DOCTEST_CONSTEXPR - -// ================================================================================================= -// == FEATURE DETECTION END -// ======================================================================== -// ================================================================================================= - -// internal macros for string concatenation and anonymous variable name -// generation -#define DOCTEST_CAT_IMPL(s1, s2) s1##s2 -#define DOCTEST_CAT(s1, s2) DOCTEST_CAT_IMPL(s1, s2) -#ifdef __COUNTER__ // not standard and may be missing for some compilers -#define DOCTEST_ANONYMOUS(x) DOCTEST_CAT(x, __COUNTER__) -#else // __COUNTER__ -#define DOCTEST_ANONYMOUS(x) DOCTEST_CAT(x, __LINE__) -#endif // __COUNTER__ - -#ifndef DOCTEST_CONFIG_ASSERTION_PARAMETERS_BY_VALUE -#define DOCTEST_REF_WRAP(x) x& -#else // DOCTEST_CONFIG_ASSERTION_PARAMETERS_BY_VALUE -#define DOCTEST_REF_WRAP(x) x -#endif // DOCTEST_CONFIG_ASSERTION_PARAMETERS_BY_VALUE - -// not using __APPLE__ because... this is how Catch does it -#ifdef __MAC_OS_X_VERSION_MIN_REQUIRED -#define DOCTEST_PLATFORM_MAC -#elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED) -#define DOCTEST_PLATFORM_IPHONE -#elif defined(_WIN32) -#define DOCTEST_PLATFORM_WINDOWS -#else // DOCTEST_PLATFORM -#define DOCTEST_PLATFORM_LINUX -#endif // DOCTEST_PLATFORM - -namespace doctest { namespace detail { -static DOCTEST_CONSTEXPR int -consume(const int*, int) -{ - return 0; -} -}} // namespace doctest::detail - -#define DOCTEST_GLOBAL_NO_WARNINGS(var, ...) \ - DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wglobal-constructors") \ - static const int var = doctest::detail::consume(&var, __VA_ARGS__); \ - DOCTEST_CLANG_SUPPRESS_WARNING_POP - -#ifndef DOCTEST_BREAK_INTO_DEBUGGER -// should probably take a look at https://github.com/scottt/debugbreak -#ifdef DOCTEST_PLATFORM_LINUX -#if defined(__GNUC__) && (defined(__i386) || defined(__x86_64)) -// Break at the location of the failing check if possible -#define DOCTEST_BREAK_INTO_DEBUGGER() \ - __asm__("int $3\n" : :) // NOLINT (hicpp-no-assembler) -#else -#include -#define DOCTEST_BREAK_INTO_DEBUGGER() raise(SIGTRAP) -#endif -#elif defined(DOCTEST_PLATFORM_MAC) -#if defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || \ - defined(__i386) -#define DOCTEST_BREAK_INTO_DEBUGGER() \ - __asm__("int $3\n" : :) // NOLINT (hicpp-no-assembler) -#else -#define DOCTEST_BREAK_INTO_DEBUGGER() \ - __asm__("brk #0"); // NOLINT (hicpp-no-assembler) -#endif -#elif DOCTEST_MSVC -#define DOCTEST_BREAK_INTO_DEBUGGER() __debugbreak() -#elif defined(__MINGW32__) -DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wredundant-decls") -extern "C" __declspec(dllimport) void __stdcall DebugBreak(); -DOCTEST_GCC_SUPPRESS_WARNING_POP -#define DOCTEST_BREAK_INTO_DEBUGGER() ::DebugBreak() -#else // linux -#define DOCTEST_BREAK_INTO_DEBUGGER() (static_cast(0)) -#endif // linux -#endif // DOCTEST_BREAK_INTO_DEBUGGER - -// this is kept here for backwards compatibility since the config option was -// changed -#ifdef DOCTEST_CONFIG_USE_IOSFWD -#define DOCTEST_CONFIG_USE_STD_HEADERS -#endif // DOCTEST_CONFIG_USE_IOSFWD - -// for clang - always include ciso646 (which drags some std stuff) because -// we want to check if we are using libc++ with the _LIBCPP_VERSION macro in -// which case we don't want to forward declare stuff from std - for reference: -// https://github.com/doctest/doctest/issues/126 -// https://github.com/doctest/doctest/issues/356 -#if DOCTEST_CLANG -#include -#ifdef _LIBCPP_VERSION -#define DOCTEST_CONFIG_USE_STD_HEADERS -#endif // _LIBCPP_VERSION -#endif // clang - -#ifdef DOCTEST_CONFIG_USE_STD_HEADERS -#ifndef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS -#define DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS -#endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS -#include -#include -#include -#else // DOCTEST_CONFIG_USE_STD_HEADERS - -// Forward declaring 'X' in namespace std is not permitted by the C++ Standard. -DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4643) - -namespace std { // NOLINT (cert-dcl58-cpp) -typedef decltype(nullptr) nullptr_t; -template -struct char_traits; -template <> -struct char_traits; -template -class basic_ostream; -typedef basic_ostream> ostream; -template -class basic_istream; -typedef basic_istream> istream; -template -class tuple; -#if DOCTEST_MSVC >= DOCTEST_COMPILER(19, 20, 0) -// see this issue on why this is needed: -// https://github.com/doctest/doctest/issues/183 -template -class allocator; -template -class basic_string; -using string = basic_string, allocator>; -#endif // VS 2019 -} // namespace std - -DOCTEST_MSVC_SUPPRESS_WARNING_POP - -#endif // DOCTEST_CONFIG_USE_STD_HEADERS - -#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS -#include -#endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - -namespace doctest { - -DOCTEST_INTERFACE extern bool is_running_in_test; - -// A 24 byte string class (can be as small as 17 for x64 and 13 for x86) that -// can hold strings with length of up to 23 chars on the stack before going on -// the heap - the last byte of the buffer is used for: -// - "is small" bit - the highest bit - if "0" then it is small - otherwise its -// "1" (128) -// - if small - capacity left before going on the heap - using the lowest 5 bits -// - if small - 2 bits are left unused - the second and third highest ones -// - if small - acts as a null terminator if strlen() is 23 (24 including the -// null terminator) -// and the "is small" bit remains "0" ("as well as the capacity -// left") so its OK -// Idea taken from this lecture about the string implementation of -// facebook/folly - fbstring https://www.youtube.com/watch?v=kPR8h4-qZdk -// TODO: -// - optimizations - like not deleting memory unnecessarily in operator= and -// etc. -// - resize/reserve/clear -// - substr -// - replace -// - back/front -// - iterator stuff -// - find & friends -// - push_back/pop_back -// - assign/insert/erase -// - relational operators as free functions - taking const char* as one of the -// params -class DOCTEST_INTERFACE String { - static const unsigned len = 24; //! OCLINT avoid private static members - static const unsigned last = len - 1; //! OCLINT avoid private static members - - struct view // len should be more than sizeof(view) - because of the final - // byte for flags - { - char* ptr; - unsigned size; - unsigned capacity; - }; - - union { - char buf[len]; - view data; - }; - - char* allocate(unsigned sz); - - bool isOnStack() const { return (buf[last] & 128) == 0; } - void setOnHeap(); - void setLast(unsigned in = last); - - void copy(const String& other); - - public: - String(); - ~String(); - - // cppcheck-suppress noExplicitConstructor - String(const char* in); - String(const char* in, unsigned in_size); - - String(std::istream& in, unsigned in_size); - - String(const String& other); - String& operator=(const String& other); - - String& operator+=(const String& other); - - String(String&& other); - String& operator=(String&& other); - - char operator[](unsigned i) const; - char& operator[](unsigned i); - - // the only functions I'm willing to leave in the interface - available for - // inlining - const char* c_str() const - { - return const_cast(this)->c_str(); - } // NOLINT - char* c_str() - { - if (isOnStack()) - return reinterpret_cast(buf); - return data.ptr; - } - - unsigned size() const; - unsigned capacity() const; - - int compare(const char* other, bool no_case = false) const; - int compare(const String& other, bool no_case = false) const; -}; - -DOCTEST_INTERFACE String operator+(const String& lhs, const String& rhs); - -DOCTEST_INTERFACE bool operator==(const String& lhs, const String& rhs); -DOCTEST_INTERFACE bool operator!=(const String& lhs, const String& rhs); -DOCTEST_INTERFACE bool operator<(const String& lhs, const String& rhs); -DOCTEST_INTERFACE bool operator>(const String& lhs, const String& rhs); -DOCTEST_INTERFACE bool operator<=(const String& lhs, const String& rhs); -DOCTEST_INTERFACE bool operator>=(const String& lhs, const String& rhs); - -DOCTEST_INTERFACE std::ostream& operator<<(std::ostream& s, const String& in); - -namespace Color { -enum Enum { - None = 0, - White, - Red, - Green, - Blue, - Cyan, - Yellow, - Grey, - - Bright = 0x10, - - BrightRed = Bright | Red, - BrightGreen = Bright | Green, - LightGrey = Bright | Grey, - BrightWhite = Bright | White -}; - -DOCTEST_INTERFACE std::ostream& operator<<(std::ostream& s, Color::Enum code); -} // namespace Color - -namespace assertType { -enum Enum { - // macro traits - - is_warn = 1, - is_check = 2 * is_warn, - is_require = 2 * is_check, - - is_normal = 2 * is_require, - is_throws = 2 * is_normal, - is_throws_as = 2 * is_throws, - is_throws_with = 2 * is_throws_as, - is_nothrow = 2 * is_throws_with, - - is_false = 2 * is_nothrow, - is_unary = - 2 * - is_false, // not checked anywhere - used just to distinguish the types - - is_eq = 2 * is_unary, - is_ne = 2 * is_eq, - - is_lt = 2 * is_ne, - is_gt = 2 * is_lt, - - is_ge = 2 * is_gt, - is_le = 2 * is_ge, - - // macro types - - DT_WARN = is_normal | is_warn, - DT_CHECK = is_normal | is_check, - DT_REQUIRE = is_normal | is_require, - - DT_WARN_FALSE = is_normal | is_false | is_warn, - DT_CHECK_FALSE = is_normal | is_false | is_check, - DT_REQUIRE_FALSE = is_normal | is_false | is_require, - - DT_WARN_THROWS = is_throws | is_warn, - DT_CHECK_THROWS = is_throws | is_check, - DT_REQUIRE_THROWS = is_throws | is_require, - - DT_WARN_THROWS_AS = is_throws_as | is_warn, - DT_CHECK_THROWS_AS = is_throws_as | is_check, - DT_REQUIRE_THROWS_AS = is_throws_as | is_require, - - DT_WARN_THROWS_WITH = is_throws_with | is_warn, - DT_CHECK_THROWS_WITH = is_throws_with | is_check, - DT_REQUIRE_THROWS_WITH = is_throws_with | is_require, - - DT_WARN_THROWS_WITH_AS = is_throws_with | is_throws_as | is_warn, - DT_CHECK_THROWS_WITH_AS = is_throws_with | is_throws_as | is_check, - DT_REQUIRE_THROWS_WITH_AS = is_throws_with | is_throws_as | is_require, - - DT_WARN_NOTHROW = is_nothrow | is_warn, - DT_CHECK_NOTHROW = is_nothrow | is_check, - DT_REQUIRE_NOTHROW = is_nothrow | is_require, - - DT_WARN_EQ = is_normal | is_eq | is_warn, - DT_CHECK_EQ = is_normal | is_eq | is_check, - DT_REQUIRE_EQ = is_normal | is_eq | is_require, - - DT_WARN_NE = is_normal | is_ne | is_warn, - DT_CHECK_NE = is_normal | is_ne | is_check, - DT_REQUIRE_NE = is_normal | is_ne | is_require, - - DT_WARN_GT = is_normal | is_gt | is_warn, - DT_CHECK_GT = is_normal | is_gt | is_check, - DT_REQUIRE_GT = is_normal | is_gt | is_require, - - DT_WARN_LT = is_normal | is_lt | is_warn, - DT_CHECK_LT = is_normal | is_lt | is_check, - DT_REQUIRE_LT = is_normal | is_lt | is_require, - - DT_WARN_GE = is_normal | is_ge | is_warn, - DT_CHECK_GE = is_normal | is_ge | is_check, - DT_REQUIRE_GE = is_normal | is_ge | is_require, - - DT_WARN_LE = is_normal | is_le | is_warn, - DT_CHECK_LE = is_normal | is_le | is_check, - DT_REQUIRE_LE = is_normal | is_le | is_require, - - DT_WARN_UNARY = is_normal | is_unary | is_warn, - DT_CHECK_UNARY = is_normal | is_unary | is_check, - DT_REQUIRE_UNARY = is_normal | is_unary | is_require, - - DT_WARN_UNARY_FALSE = is_normal | is_false | is_unary | is_warn, - DT_CHECK_UNARY_FALSE = is_normal | is_false | is_unary | is_check, - DT_REQUIRE_UNARY_FALSE = is_normal | is_false | is_unary | is_require, -}; -} // namespace assertType - -DOCTEST_INTERFACE const char* assertString(assertType::Enum at); -DOCTEST_INTERFACE const char* failureString(assertType::Enum at); -DOCTEST_INTERFACE const char* skipPathFromFilename(const char* file); - -struct DOCTEST_INTERFACE TestCaseData { - String m_file; // the file in which the test was registered (using String - - // see #350) - unsigned m_line; // the line where the test was registered - const char* m_name; // name of the test case - const char* m_test_suite; // the test suite in which the test was added - const char* m_description; - bool m_skip; - bool m_no_breaks; - bool m_no_output; - bool m_may_fail; - bool m_should_fail; - int m_expected_failures; - double m_timeout; -}; - -struct DOCTEST_INTERFACE AssertData { - // common - for all asserts - const TestCaseData* m_test_case; - assertType::Enum m_at; - const char* m_file; - int m_line; - const char* m_expr; - bool m_failed; - - // exception-related - for all asserts - bool m_threw; - String m_exception; - - // for normal asserts - String m_decomp; - - // for specific exception-related asserts - bool m_threw_as; - const char* m_exception_type; - const char* m_exception_string; -}; - -struct DOCTEST_INTERFACE MessageData { - String m_string; - const char* m_file; - int m_line; - assertType::Enum m_severity; -}; - -struct DOCTEST_INTERFACE SubcaseSignature { - String m_name; - const char* m_file; - int m_line; - - bool operator<(const SubcaseSignature& other) const; -}; - -struct DOCTEST_INTERFACE IContextScope { - IContextScope(); - virtual ~IContextScope(); - virtual void stringify(std::ostream*) const = 0; -}; - -namespace detail { -struct DOCTEST_INTERFACE TestCase; -} // namespace detail - -struct ContextOptions //! OCLINT too many fields -{ - std::ostream* cout = nullptr; // stdout stream - String binary_name; // the test binary name - - const detail::TestCase* currentTest = nullptr; - - // == parameters from the command line - String out; // output filename - String order_by; // how tests should be ordered - unsigned rand_seed; // the seed for rand ordering - - unsigned first; // the first (matching) test to be executed - unsigned last; // the last (matching) test to be executed - - int abort_after; // stop tests after this many failed assertions - int subcase_filter_levels; // apply the subcase filters for the first N - // levels - - bool success; // include successful assertions in output - bool case_sensitive; // if filtering should be case sensitive - bool - exit; // if the program should be exited after the tests are ran/whatever - bool duration; // print the time duration of each test case - bool minimal; // minimal console output (only test failures) - bool quiet; // no console output - bool no_throw; // to skip exceptions-related assertion macros - bool no_exitcode; // if the framework should return 0 as the exitcode - bool no_run; // to not run the tests at all (can be done with an "*" exclude) - bool no_intro; // to not print the intro of the framework - bool no_version; // to not print the version of the framework - bool no_colors; // if output to the console should be colorized - bool force_colors; // forces the use of colors even when a tty cannot be - // detected - bool no_breaks; // to not break into the debugger - bool no_skip; // don't skip test cases which are marked to be skipped - bool gnu_file_line; // if line numbers should be surrounded with :x: and not - // (x): - bool no_path_in_filenames; // if the path to files should be removed from the - // output - bool no_line_numbers; // if source code line numbers should be omitted from - // the output - bool no_debug_output; // no output in the debug console when a debugger is - // attached - bool no_skipped_summary; // don't print "skipped" in the summary !!! - // UNDOCUMENTED !!! - bool no_time_in_output; // omit any time/timestamps from output !!! - // UNDOCUMENTED !!! - - bool help; // to print the help - bool version; // to print the version - bool count; // if only the count of matching tests is to be retrieved - bool list_test_cases; // to list all tests matching the filters - bool list_test_suites; // to list all suites matching the filters - bool list_reporters; // lists all registered reporters -}; - -namespace detail { -template -struct enable_if {}; - -template -struct enable_if { - typedef TYPE type; -}; - -// clang-format off - template struct remove_reference { typedef T type; }; - template struct remove_reference { typedef T type; }; - template struct remove_reference { typedef T type; }; - - template U declval(int); - - template T declval(long); - - template auto declval() DOCTEST_NOEXCEPT -> decltype(declval(0)) ; - - template struct is_lvalue_reference { const static bool value=false; }; - template struct is_lvalue_reference { const static bool value=true; }; - - template struct is_rvalue_reference { const static bool value=false; }; - template struct is_rvalue_reference { const static bool value=true; }; - - template - inline T&& forward(typename remove_reference::type& t) DOCTEST_NOEXCEPT - { - return static_cast(t); - } - - template - inline T&& forward(typename remove_reference::type&& t) DOCTEST_NOEXCEPT - { - static_assert(!is_lvalue_reference::value, - "Can not forward an rvalue as an lvalue."); - return static_cast(t); - } - - template struct remove_const { typedef T type; }; - template struct remove_const { typedef T type; }; -#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - template struct is_enum : public std::is_enum {}; - template struct underlying_type : public std::underlying_type {}; -#else - // Use compiler intrinsics - template struct is_enum { DOCTEST_CONSTEXPR static bool value = __is_enum(T); }; - template struct underlying_type { typedef __underlying_type(T) type; }; -#endif -// clang-format on - -template -struct deferred_false -// cppcheck-suppress unusedStructMember -{ - static const bool value = false; -}; - -namespace has_insertion_operator_impl { -std::ostream& os(); -template -DOCTEST_REF_WRAP(T) -val(); - -template -struct check { - static DOCTEST_CONSTEXPR bool value = false; -}; - -template -struct check(), void())> { - static DOCTEST_CONSTEXPR bool value = true; -}; -} // namespace has_insertion_operator_impl - -template -using has_insertion_operator = has_insertion_operator_impl::check; - -DOCTEST_INTERFACE std::ostream* tlssPush(); -DOCTEST_INTERFACE String tlssPop(); - - -template -struct StringMakerBase { - template - static String convert(const DOCTEST_REF_WRAP(T)) - { - return "{?}"; - } -}; - -// Vector and various type other than pointer or array. -template -struct filldata { - static void fill(std::ostream* stream, const T& in) { *stream << in; } -}; - -template -struct filldata { - static void fill(std::ostream* stream, const T (&in)[N]) - { - for (unsigned long i = 0; i < N; i++) { - *stream << in[i]; - } - } -}; - -// Specialized since we don't want the terminating null byte! -template -struct filldata { - static void fill(std::ostream* stream, const char (&in)[N]) { *stream << in; } -}; - -template -void -filloss(std::ostream* stream, const T& in) -{ - filldata::fill(stream, in); -} - -template -void -filloss(std::ostream* stream, const T (&in)[N]) -{ - // T[N], T(&)[N], T(&&)[N] have same behaviour. - // Hence remove reference. - filldata::type>::fill(stream, in); -} - -template <> -struct StringMakerBase { - template - static String convert(const DOCTEST_REF_WRAP(T) in) - { - /* When parameter "in" is a null terminated const char* it works. - * When parameter "in" is a T arr[N] without '\0' we can fill the - * stringstream with N objects (T=char).If in is char pointer * - * without '\0' , it would cause segfault - * stepping over unaccessible memory. - */ - - std::ostream* stream = tlssPush(); - filloss(stream, in); - return tlssPop(); - } -}; - -DOCTEST_INTERFACE String rawMemoryToString(const void* object, unsigned size); - -template -String -rawMemoryToString(const DOCTEST_REF_WRAP(T) object) -{ - return rawMemoryToString(&object, sizeof(object)); -} - -template -const char* -type_to_string() -{ - return "<>"; -} -} // namespace detail - -template -struct StringMaker - : public detail::StringMakerBase::value> { -}; - -template -struct StringMaker { - template - static String convert(U* p) - { - if (p) - return detail::rawMemoryToString(p); - return "NULL"; - } -}; - -template -struct StringMaker { - static String convert(R C::*p) - { - if (p) - return detail::rawMemoryToString(p); - return "NULL"; - } -}; - -template < - typename T, - typename detail::enable_if::value, bool>::type = true> -String -toString(const DOCTEST_REF_WRAP(T) value) -{ - return StringMaker::convert(value); -} - -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -DOCTEST_INTERFACE String toString(char* in); -DOCTEST_INTERFACE String toString(const char* in); -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -DOCTEST_INTERFACE String toString(bool in); -DOCTEST_INTERFACE String toString(float in); -DOCTEST_INTERFACE String toString(double in); -DOCTEST_INTERFACE String toString(double long in); - -DOCTEST_INTERFACE String toString(char in); -DOCTEST_INTERFACE String toString(char signed in); -DOCTEST_INTERFACE String toString(char unsigned in); -DOCTEST_INTERFACE String toString(int short in); -DOCTEST_INTERFACE String toString(int short unsigned in); -DOCTEST_INTERFACE String toString(int in); -DOCTEST_INTERFACE String toString(int unsigned in); -DOCTEST_INTERFACE String toString(int long in); -DOCTEST_INTERFACE String toString(int long unsigned in); -DOCTEST_INTERFACE String toString(int long long in); -DOCTEST_INTERFACE String toString(int long long unsigned in); -DOCTEST_INTERFACE String toString(std::nullptr_t in); - -template < - typename T, - typename detail::enable_if::value, bool>::type = true> -String -toString(const DOCTEST_REF_WRAP(T) value) -{ - typedef typename detail::underlying_type::type UT; - return toString(static_cast(value)); -} - -#if DOCTEST_MSVC >= DOCTEST_COMPILER(19, 20, 0) -// see this issue on why this is needed: -// https://github.com/doctest/doctest/issues/183 -DOCTEST_INTERFACE String toString(const std::string& in); -#endif // VS 2019 - -class DOCTEST_INTERFACE Approx { - public: - explicit Approx(double value); - - Approx operator()(double value) const; - -#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - template - explicit Approx( - const T& value, typename detail::enable_if< - std::is_constructible::value>::type* = - static_cast(nullptr)) - { - *this = Approx(static_cast(value)); - } -#endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - - Approx& epsilon(double newEpsilon); - -#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - template - typename detail::enable_if< - std::is_constructible::value, Approx&>::type - epsilon(const T& newEpsilon) - { - m_epsilon = static_cast(newEpsilon); - return *this; - } -#endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - - Approx& scale(double newScale); - -#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - template - typename detail::enable_if< - std::is_constructible::value, Approx&>::type - scale(const T& newScale) - { - m_scale = static_cast(newScale); - return *this; - } -#endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - - // clang-format off - DOCTEST_INTERFACE friend bool operator==(double lhs, const Approx & rhs); - DOCTEST_INTERFACE friend bool operator==(const Approx & lhs, double rhs); - DOCTEST_INTERFACE friend bool operator!=(double lhs, const Approx & rhs); - DOCTEST_INTERFACE friend bool operator!=(const Approx & lhs, double rhs); - DOCTEST_INTERFACE friend bool operator<=(double lhs, const Approx & rhs); - DOCTEST_INTERFACE friend bool operator<=(const Approx & lhs, double rhs); - DOCTEST_INTERFACE friend bool operator>=(double lhs, const Approx & rhs); - DOCTEST_INTERFACE friend bool operator>=(const Approx & lhs, double rhs); - DOCTEST_INTERFACE friend bool operator< (double lhs, const Approx & rhs); - DOCTEST_INTERFACE friend bool operator< (const Approx & lhs, double rhs); - DOCTEST_INTERFACE friend bool operator> (double lhs, const Approx & rhs); - DOCTEST_INTERFACE friend bool operator> (const Approx & lhs, double rhs); - - DOCTEST_INTERFACE friend String toString(const Approx& in); - -#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS -#define DOCTEST_APPROX_PREFIX \ - template friend typename detail::enable_if::value, bool>::type - - DOCTEST_APPROX_PREFIX operator==(const T& lhs, const Approx& rhs) { return operator==(double(lhs), rhs); } - DOCTEST_APPROX_PREFIX operator==(const Approx& lhs, const T& rhs) { return operator==(rhs, lhs); } - DOCTEST_APPROX_PREFIX operator!=(const T& lhs, const Approx& rhs) { return !operator==(lhs, rhs); } - DOCTEST_APPROX_PREFIX operator!=(const Approx& lhs, const T& rhs) { return !operator==(rhs, lhs); } - DOCTEST_APPROX_PREFIX operator<=(const T& lhs, const Approx& rhs) { return double(lhs) < rhs.m_value || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator<=(const Approx& lhs, const T& rhs) { return lhs.m_value < double(rhs) || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator>=(const T& lhs, const Approx& rhs) { return double(lhs) > rhs.m_value || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator>=(const Approx& lhs, const T& rhs) { return lhs.m_value > double(rhs) || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator< (const T& lhs, const Approx& rhs) { return double(lhs) < rhs.m_value && lhs != rhs; } - DOCTEST_APPROX_PREFIX operator< (const Approx& lhs, const T& rhs) { return lhs.m_value < double(rhs) && lhs != rhs; } - DOCTEST_APPROX_PREFIX operator> (const T& lhs, const Approx& rhs) { return double(lhs) > rhs.m_value && lhs != rhs; } - DOCTEST_APPROX_PREFIX operator> (const Approx& lhs, const T& rhs) { return lhs.m_value > double(rhs) && lhs != rhs; } -#undef DOCTEST_APPROX_PREFIX -#endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - - // clang-format on - - private: - double m_epsilon; - double m_scale; - double m_value; -}; - -DOCTEST_INTERFACE String toString(const Approx& in); - -DOCTEST_INTERFACE const ContextOptions* getContextOptions(); - -#if !defined(DOCTEST_CONFIG_DISABLE) - -namespace detail { -// clang-format off -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING - template struct decay_array { typedef T type; }; - template struct decay_array { typedef T* type; }; - template struct decay_array { typedef T* type; }; - - template struct not_char_pointer { enum { value = 1 }; }; - template<> struct not_char_pointer { enum { value = 0 }; }; - template<> struct not_char_pointer { enum { value = 0 }; }; - - template struct can_use_op : public not_char_pointer::type> {}; -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -// clang-format on - -struct DOCTEST_INTERFACE TestFailureException {}; - -DOCTEST_INTERFACE bool checkIfShouldThrow(assertType::Enum at); - -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS -DOCTEST_NORETURN -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS -DOCTEST_INTERFACE void throwException(); - -struct DOCTEST_INTERFACE Subcase { - SubcaseSignature m_signature; - bool m_entered = false; - - Subcase(const String& name, const char* file, int line); - ~Subcase(); - - operator bool() const; -}; - -template -String -stringifyBinaryExpr( - const DOCTEST_REF_WRAP(L) lhs, const char* op, - const DOCTEST_REF_WRAP(R) rhs) -{ - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) - return toString(lhs) + op + toString(rhs); -} - -#if DOCTEST_CLANG && DOCTEST_CLANG < DOCTEST_COMPILER(3, 6, 0) -DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wunused-comparison") -#endif - -// This will check if there is any way it could find a operator like member or -// friend and uses it. If not it doesn't find the operator or if the operator at -// global scope is defined after this template, the template won't be -// instantiated due to SFINAE. Once the template is not instantiated it can look -// for global operator using normal conversions. -#define SFINAE_OP(ret, op) \ - decltype((void)(doctest::detail::declval() op doctest::detail::declval()), ret{}) - -#define DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(op, op_str, op_macro) \ - template \ - DOCTEST_NOINLINE SFINAE_OP(Result, op) operator op(const R&& rhs) \ - { \ - bool res = op_macro( \ - doctest::detail::forward(lhs), \ - doctest::detail::forward(rhs)); \ - if (m_at & assertType::is_false) \ - res = !res; \ - if (!res || doctest::getContextOptions()->success) \ - return Result(res, stringifyBinaryExpr(lhs, op_str, rhs)); \ - return Result(res); \ - } \ - template < \ - typename R, typename enable_if< \ - !doctest::detail::is_rvalue_reference::value, \ - void>::type* = nullptr> \ - DOCTEST_NOINLINE SFINAE_OP(Result, op) operator op(const R& rhs) \ - { \ - bool res = op_macro(doctest::detail::forward(lhs), rhs); \ - if (m_at & assertType::is_false) \ - res = !res; \ - if (!res || doctest::getContextOptions()->success) \ - return Result(res, stringifyBinaryExpr(lhs, op_str, rhs)); \ - return Result(res); \ - } - -// more checks could be added - like in Catch: -// https://github.com/catchorg/Catch2/pull/1480/files -// https://github.com/catchorg/Catch2/pull/1481/files -#define DOCTEST_FORBIT_EXPRESSION(rt, op) \ - template \ - rt& operator op(const R&) \ - { \ - static_assert( \ - deferred_false::value, \ - "Expression Too Complex Please Rewrite As Binary Comparison!"); \ - return *this; \ - } - -struct DOCTEST_INTERFACE Result { - bool m_passed; - String m_decomp; - - Result() = default; - Result(bool passed, const String& decomposition = String()); - - // forbidding some expressions based on this table: - // https://en.cppreference.com/w/cpp/language/operator_precedence - DOCTEST_FORBIT_EXPRESSION(Result, &) - DOCTEST_FORBIT_EXPRESSION(Result, ^) - DOCTEST_FORBIT_EXPRESSION(Result, |) - DOCTEST_FORBIT_EXPRESSION(Result, &&) - DOCTEST_FORBIT_EXPRESSION(Result, ||) - DOCTEST_FORBIT_EXPRESSION(Result, ==) - DOCTEST_FORBIT_EXPRESSION(Result, !=) - DOCTEST_FORBIT_EXPRESSION(Result, <) - DOCTEST_FORBIT_EXPRESSION(Result, >) - DOCTEST_FORBIT_EXPRESSION(Result, <=) - DOCTEST_FORBIT_EXPRESSION(Result, >=) - DOCTEST_FORBIT_EXPRESSION(Result, =) - DOCTEST_FORBIT_EXPRESSION(Result, +=) - DOCTEST_FORBIT_EXPRESSION(Result, -=) - DOCTEST_FORBIT_EXPRESSION(Result, *=) - DOCTEST_FORBIT_EXPRESSION(Result, /=) - DOCTEST_FORBIT_EXPRESSION(Result, %=) - DOCTEST_FORBIT_EXPRESSION(Result, <<=) - DOCTEST_FORBIT_EXPRESSION(Result, >>=) - DOCTEST_FORBIT_EXPRESSION(Result, &=) - DOCTEST_FORBIT_EXPRESSION(Result, ^=) - DOCTEST_FORBIT_EXPRESSION(Result, |=) -}; - -#ifndef DOCTEST_CONFIG_NO_COMPARISON_WARNING_SUPPRESSION - -DOCTEST_CLANG_SUPPRESS_WARNING_PUSH -DOCTEST_CLANG_SUPPRESS_WARNING("-Wsign-conversion") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wsign-compare") -// DOCTEST_CLANG_SUPPRESS_WARNING("-Wdouble-promotion") -// DOCTEST_CLANG_SUPPRESS_WARNING("-Wconversion") -// DOCTEST_CLANG_SUPPRESS_WARNING("-Wfloat-equal") - -DOCTEST_GCC_SUPPRESS_WARNING_PUSH -DOCTEST_GCC_SUPPRESS_WARNING("-Wsign-conversion") -DOCTEST_GCC_SUPPRESS_WARNING("-Wsign-compare") -// DOCTEST_GCC_SUPPRESS_WARNING("-Wdouble-promotion") -// DOCTEST_GCC_SUPPRESS_WARNING("-Wconversion") -// DOCTEST_GCC_SUPPRESS_WARNING("-Wfloat-equal") - -DOCTEST_MSVC_SUPPRESS_WARNING_PUSH -// https://stackoverflow.com/questions/39479163 what's the difference between -// 4018 and 4389 -DOCTEST_MSVC_SUPPRESS_WARNING(4388) // signed/unsigned mismatch -DOCTEST_MSVC_SUPPRESS_WARNING(4389) // 'operator' : signed/unsigned mismatch -DOCTEST_MSVC_SUPPRESS_WARNING(4018) // 'expression' : signed/unsigned mismatch -// DOCTEST_MSVC_SUPPRESS_WARNING(4805) // 'operation' : unsafe mix of type -// 'type' and type 'type' in operation - -#endif // DOCTEST_CONFIG_NO_COMPARISON_WARNING_SUPPRESSION - -// clang-format off -#ifndef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -#define DOCTEST_COMPARISON_RETURN_TYPE bool -#else // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -#define DOCTEST_COMPARISON_RETURN_TYPE typename enable_if::value || can_use_op::value, bool>::type - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) - inline bool eq(const char* lhs, const char* rhs) { return String(lhs) == String(rhs); } - inline bool ne(const char* lhs, const char* rhs) { return String(lhs) != String(rhs); } - inline bool lt(const char* lhs, const char* rhs) { return String(lhs) < String(rhs); } - inline bool gt(const char* lhs, const char* rhs) { return String(lhs) > String(rhs); } - inline bool le(const char* lhs, const char* rhs) { return String(lhs) <= String(rhs); } - inline bool ge(const char* lhs, const char* rhs) { return String(lhs) >= String(rhs); } -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -// clang-format on - -#define DOCTEST_RELATIONAL_OP(name, op) \ - template \ - DOCTEST_COMPARISON_RETURN_TYPE name( \ - const DOCTEST_REF_WRAP(L) lhs, const DOCTEST_REF_WRAP(R) rhs) \ - { \ - return lhs op rhs; \ - } - -DOCTEST_RELATIONAL_OP(eq, ==) -DOCTEST_RELATIONAL_OP(ne, !=) -DOCTEST_RELATIONAL_OP(lt, <) -DOCTEST_RELATIONAL_OP(gt, >) -DOCTEST_RELATIONAL_OP(le, <=) -DOCTEST_RELATIONAL_OP(ge, >=) - -#ifndef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -#define DOCTEST_CMP_EQ(l, r) l == r -#define DOCTEST_CMP_NE(l, r) l != r -#define DOCTEST_CMP_GT(l, r) l > r -#define DOCTEST_CMP_LT(l, r) l < r -#define DOCTEST_CMP_GE(l, r) l >= r -#define DOCTEST_CMP_LE(l, r) l <= r -#else // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -#define DOCTEST_CMP_EQ(l, r) eq(l, r) -#define DOCTEST_CMP_NE(l, r) ne(l, r) -#define DOCTEST_CMP_GT(l, r) gt(l, r) -#define DOCTEST_CMP_LT(l, r) lt(l, r) -#define DOCTEST_CMP_GE(l, r) ge(l, r) -#define DOCTEST_CMP_LE(l, r) le(l, r) -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING - -template -// cppcheck-suppress copyCtorAndEqOperator -struct Expression_lhs { - L lhs; - assertType::Enum m_at; - - explicit Expression_lhs(L&& in, assertType::Enum at) - : lhs(doctest::detail::forward(in)), m_at(at) - { - } - - DOCTEST_NOINLINE operator Result() - { - // this is needed only for MSVC 2015 - DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH( - 4800) // 'int': forcing value to bool - bool res = static_cast(lhs); - DOCTEST_MSVC_SUPPRESS_WARNING_POP - if (m_at & assertType::is_false) //! OCLINT bitwise operator in conditional - res = !res; - - if (!res || getContextOptions()->success) - return Result(res, toString(lhs)); - return Result(res); - } - - /* This is required for user-defined conversions from Expression_lhs to L */ - operator L() const { return lhs; } - - // clang-format off - DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(==, " == ", DOCTEST_CMP_EQ) //!OCLINT bitwise operator in conditional - DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(!=, " != ", DOCTEST_CMP_NE) //!OCLINT bitwise operator in conditional - DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(>, " > ", DOCTEST_CMP_GT) //!OCLINT bitwise operator in conditional - DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(<, " < ", DOCTEST_CMP_LT) //!OCLINT bitwise operator in conditional - DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(>=, " >= ", DOCTEST_CMP_GE) //!OCLINT bitwise operator in conditional - DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(<=, " <= ", DOCTEST_CMP_LE) //!OCLINT bitwise operator in conditional - // clang-format on - - // forbidding some expressions based on this table: - // https://en.cppreference.com/w/cpp/language/operator_precedence - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, &) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, ^) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, |) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, &&) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, ||) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, =) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, +=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, -=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, *=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, /=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, %=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, <<=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, >>=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, &=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, ^=) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, |=) - // these 2 are unfortunate because they should be allowed - they have higher - // precedence over the comparisons, but the ExpressionDecomposer class uses - // the left shift operator to capture the left operand of the binary - // expression... - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, <<) - DOCTEST_FORBIT_EXPRESSION(Expression_lhs, >>) -}; - -#ifndef DOCTEST_CONFIG_NO_COMPARISON_WARNING_SUPPRESSION - -DOCTEST_CLANG_SUPPRESS_WARNING_POP -DOCTEST_MSVC_SUPPRESS_WARNING_POP -DOCTEST_GCC_SUPPRESS_WARNING_POP - -#endif // DOCTEST_CONFIG_NO_COMPARISON_WARNING_SUPPRESSION - -#if DOCTEST_CLANG && DOCTEST_CLANG < DOCTEST_COMPILER(3, 6, 0) -DOCTEST_CLANG_SUPPRESS_WARNING_POP -#endif - -struct DOCTEST_INTERFACE ExpressionDecomposer { - assertType::Enum m_at; - - ExpressionDecomposer(assertType::Enum at); - - // The right operator for capturing expressions is "<=" instead of "<<" (based - // on the operator precedence table) but then there will be warnings from GCC - // about "-Wparentheses" and since "_Pragma()" is problematic this will stay - // for now... https://github.com/catchorg/Catch2/issues/870 - // https://github.com/catchorg/Catch2/issues/565 - template - Expression_lhs operator<<(const L&& operand) - { - return Expression_lhs( - doctest::detail::forward(operand), m_at); - } - - template < - typename L, typename enable_if< - !doctest::detail::is_rvalue_reference::value, - void>::type* = nullptr> - Expression_lhs operator<<(const L& operand) - { - return Expression_lhs(operand, m_at); - } -}; - -struct DOCTEST_INTERFACE TestSuite { - const char* m_test_suite = nullptr; - const char* m_description = nullptr; - bool m_skip = false; - bool m_no_breaks = false; - bool m_no_output = false; - bool m_may_fail = false; - bool m_should_fail = false; - int m_expected_failures = 0; - double m_timeout = 0; - - TestSuite& operator*(const char* in); - - template - TestSuite& operator*(const T& in) - { - in.fill(*this); - return *this; - } -}; - -typedef void (*funcType)(); - -struct DOCTEST_INTERFACE TestCase : public TestCaseData { - funcType m_test; // a function pointer to the test case - - const char* - m_type; // for templated test cases - gets appended to the real name - int m_template_id; // an ID used to distinguish between the different - // versions of a templated test case - String m_full_name; // contains the name (only for templated test cases!) + - // the template type - - TestCase( - funcType test, const char* file, unsigned line, - const TestSuite& test_suite, const char* type = "", int template_id = -1); - - TestCase(const TestCase& other); - - DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH( - 26434) // hides a non-virtual function - TestCase& operator=(const TestCase& other); - DOCTEST_MSVC_SUPPRESS_WARNING_POP - - TestCase& operator*(const char* in); - - template - TestCase& operator*(const T& in) - { - in.fill(*this); - return *this; - } - - bool operator<(const TestCase& other) const; -}; - -// forward declarations of functions used by the macros -DOCTEST_INTERFACE int regTest(const TestCase& tc); -DOCTEST_INTERFACE int setTestSuite(const TestSuite& ts); -DOCTEST_INTERFACE bool isDebuggerActive(); - -template -int -instantiationHelper(const T&) -{ - return 0; -} - -namespace binaryAssertComparison { -enum Enum { eq = 0, ne, gt, lt, ge, le }; -} // namespace binaryAssertComparison - -// clang-format off - template struct RelationalComparator { bool operator()(const DOCTEST_REF_WRAP(L), const DOCTEST_REF_WRAP(R) ) const { return false; } }; - -#define DOCTEST_BINARY_RELATIONAL_OP(n, op) \ - template struct RelationalComparator { bool operator()(const DOCTEST_REF_WRAP(L) lhs, const DOCTEST_REF_WRAP(R) rhs) const { return op(lhs, rhs); } }; -// clang-format on - -DOCTEST_BINARY_RELATIONAL_OP(0, doctest::detail::eq) -DOCTEST_BINARY_RELATIONAL_OP(1, doctest::detail::ne) -DOCTEST_BINARY_RELATIONAL_OP(2, doctest::detail::gt) -DOCTEST_BINARY_RELATIONAL_OP(3, doctest::detail::lt) -DOCTEST_BINARY_RELATIONAL_OP(4, doctest::detail::ge) -DOCTEST_BINARY_RELATIONAL_OP(5, doctest::detail::le) - -struct DOCTEST_INTERFACE ResultBuilder : public AssertData { - ResultBuilder( - assertType::Enum at, const char* file, int line, const char* expr, - const char* exception_type = "", const char* exception_string = ""); - - void setResult(const Result& res); - - template - DOCTEST_NOINLINE bool binary_assert( - const DOCTEST_REF_WRAP(L) lhs, const DOCTEST_REF_WRAP(R) rhs) - { - m_failed = !RelationalComparator()(lhs, rhs); - if (m_failed || getContextOptions()->success) - m_decomp = stringifyBinaryExpr(lhs, ", ", rhs); - return !m_failed; - } - - template - DOCTEST_NOINLINE bool unary_assert(const DOCTEST_REF_WRAP(L) val) - { - m_failed = !val; - - if (m_at & assertType::is_false) //! OCLINT bitwise operator in conditional - m_failed = !m_failed; - - if (m_failed || getContextOptions()->success) - m_decomp = toString(val); - - return !m_failed; - } - - void translateException(); - - bool log(); - void react() const; -}; - -namespace assertAction { -enum Enum { nothing = 0, dbgbreak = 1, shouldthrow = 2 }; -} // namespace assertAction - -DOCTEST_INTERFACE void failed_out_of_a_testing_context(const AssertData& ad); - -DOCTEST_INTERFACE bool decomp_assert( - assertType::Enum at, const char* file, int line, const char* expr, - Result result); - -#define DOCTEST_ASSERT_OUT_OF_TESTS(decomp) \ - do { \ - if (!is_running_in_test) { \ - if (failed) { \ - ResultBuilder rb(at, file, line, expr); \ - rb.m_failed = failed; \ - rb.m_decomp = decomp; \ - failed_out_of_a_testing_context(rb); \ - if (isDebuggerActive() && !getContextOptions()->no_breaks) \ - DOCTEST_BREAK_INTO_DEBUGGER(); \ - if (checkIfShouldThrow(at)) \ - throwException(); \ - } \ - return !failed; \ - } \ - } while (false) - -#define DOCTEST_ASSERT_IN_TESTS(decomp) \ - ResultBuilder rb(at, file, line, expr); \ - rb.m_failed = failed; \ - if (rb.m_failed || getContextOptions()->success) \ - rb.m_decomp = decomp; \ - if (rb.log()) \ - DOCTEST_BREAK_INTO_DEBUGGER(); \ - if (rb.m_failed && checkIfShouldThrow(at)) \ - throwException() - -template -DOCTEST_NOINLINE bool -binary_assert( - assertType::Enum at, const char* file, int line, const char* expr, - const DOCTEST_REF_WRAP(L) lhs, const DOCTEST_REF_WRAP(R) rhs) -{ - bool failed = !RelationalComparator()(lhs, rhs); - - // ################################################################################### - // IF THE DEBUGGER BREAKS HERE - GO 1 LEVEL UP IN THE CALLSTACK FOR THE - // FAILING ASSERT THIS IS THE EFFECT OF HAVING - // 'DOCTEST_CONFIG_SUPER_FAST_ASSERTS' DEFINED - // ################################################################################### - DOCTEST_ASSERT_OUT_OF_TESTS(stringifyBinaryExpr(lhs, ", ", rhs)); - DOCTEST_ASSERT_IN_TESTS(stringifyBinaryExpr(lhs, ", ", rhs)); - return !failed; -} - -template -DOCTEST_NOINLINE bool -unary_assert( - assertType::Enum at, const char* file, int line, const char* expr, - const DOCTEST_REF_WRAP(L) val) -{ - bool failed = !val; - - if (at & assertType::is_false) //! OCLINT bitwise operator in conditional - failed = !failed; - - // ################################################################################### - // IF THE DEBUGGER BREAKS HERE - GO 1 LEVEL UP IN THE CALLSTACK FOR THE - // FAILING ASSERT THIS IS THE EFFECT OF HAVING - // 'DOCTEST_CONFIG_SUPER_FAST_ASSERTS' DEFINED - // ################################################################################### - DOCTEST_ASSERT_OUT_OF_TESTS(toString(val)); - DOCTEST_ASSERT_IN_TESTS(toString(val)); - return !failed; -} - -struct DOCTEST_INTERFACE IExceptionTranslator { - IExceptionTranslator(); - virtual ~IExceptionTranslator(); - virtual bool translate(String&) const = 0; -}; - -template -class ExceptionTranslator - : public IExceptionTranslator //! OCLINT destructor of virtual class -{ - public: - explicit ExceptionTranslator(String (*translateFunction)(T)) - : m_translateFunction(translateFunction) - { - } - - bool translate(String& res) const override - { -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS - try { - throw; // lgtm [cpp/rethrow-no-exception] - // cppcheck-suppress catchExceptionByValue - } - catch (T ex) { // NOLINT - res = m_translateFunction(ex); //! OCLINT parameter reassignment - return true; - } - catch (...) { - } //! OCLINT - empty catch statement -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS - static_cast(res); // to silence -Wunused-parameter - return false; - } - - private: - String (*m_translateFunction)(T); -}; - -DOCTEST_INTERFACE void registerExceptionTranslatorImpl( - const IExceptionTranslator* et); - -template -struct StringStreamBase { - template - static void convert(std::ostream* s, const T& in) - { - *s << toString(in); - } - - // always treat char* as a string in this context - no matter - // if DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING is defined - static void convert(std::ostream* s, const char* in) { *s << String(in); } -}; - -template <> -struct StringStreamBase { - template - static void convert(std::ostream* s, const T& in) - { - *s << in; - } -}; - -template -struct StringStream - : public StringStreamBase::value> {}; - -template -void -toStream(std::ostream* s, const T& value) -{ - StringStream::convert(s, value); -} - -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -DOCTEST_INTERFACE void toStream(std::ostream* s, char* in); -DOCTEST_INTERFACE void toStream(std::ostream* s, const char* in); -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -DOCTEST_INTERFACE void toStream(std::ostream* s, bool in); -DOCTEST_INTERFACE void toStream(std::ostream* s, float in); -DOCTEST_INTERFACE void toStream(std::ostream* s, double in); -DOCTEST_INTERFACE void toStream(std::ostream* s, double long in); - -DOCTEST_INTERFACE void toStream(std::ostream* s, char in); -DOCTEST_INTERFACE void toStream(std::ostream* s, char signed in); -DOCTEST_INTERFACE void toStream(std::ostream* s, char unsigned in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int short in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int short unsigned in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int unsigned in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int long in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int long unsigned in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int long long in); -DOCTEST_INTERFACE void toStream(std::ostream* s, int long long unsigned in); - -// ContextScope base class used to allow implementing methods of ContextScope -// that don't depend on the template parameter in doctest.cpp. -class DOCTEST_INTERFACE ContextScopeBase : public IContextScope { - protected: - ContextScopeBase(); - ContextScopeBase(ContextScopeBase&& other); - - void destroy(); - bool need_to_destroy{true}; -}; - -template -class ContextScope : public ContextScopeBase { - const L lambda_; - - public: - explicit ContextScope(const L& lambda) : lambda_(lambda) {} - - ContextScope(ContextScope&& other) - : ContextScopeBase(static_cast(other)), - lambda_(other.lambda_) - { - } - - void stringify(std::ostream* s) const override { lambda_(s); } - - ~ContextScope() override - { - if (need_to_destroy) { - destroy(); - } - } -}; - -struct DOCTEST_INTERFACE MessageBuilder : public MessageData { - std::ostream* m_stream; - bool logged = false; - - MessageBuilder(const char* file, int line, assertType::Enum severity); - MessageBuilder() = delete; - ~MessageBuilder(); - - // the preferred way of chaining parameters for stringification - template - MessageBuilder& operator,(const T& in) - { - toStream(m_stream, in); - return *this; - } - - // kept here just for backwards-compatibility - the comma operator should be - // preferred now - template - MessageBuilder& operator<<(const T& in) - { - return this->operator,(in); - } - - // the `,` operator has the lowest operator precedence - if `<<` is used by - // the user then the `,` operator will be called last which is not what we - // want and thus the `*` operator is used first (has higher operator - // precedence compared to `<<`) so that we guarantee that an operator of the - // MessageBuilder class is called first before the rest of the parameters - template - MessageBuilder& operator*(const T& in) - { - return this->operator,(in); - } - - bool log(); - void react(); -}; - -template -ContextScope -MakeContextScope(const L& lambda) -{ - return ContextScope(lambda); -} -} // namespace detail - -#define DOCTEST_DEFINE_DECORATOR(name, type, def) \ - struct name { \ - type data; \ - name(type in = def) : data(in) {} \ - void fill(detail::TestCase& state) const \ - { \ - state.DOCTEST_CAT(m_, name) = data; \ - } \ - void fill(detail::TestSuite& state) const \ - { \ - state.DOCTEST_CAT(m_, name) = data; \ - } \ - } - -DOCTEST_DEFINE_DECORATOR(test_suite, const char*, ""); -DOCTEST_DEFINE_DECORATOR(description, const char*, ""); -DOCTEST_DEFINE_DECORATOR(skip, bool, true); -DOCTEST_DEFINE_DECORATOR(no_breaks, bool, true); -DOCTEST_DEFINE_DECORATOR(no_output, bool, true); -DOCTEST_DEFINE_DECORATOR(timeout, double, 0); -DOCTEST_DEFINE_DECORATOR(may_fail, bool, true); -DOCTEST_DEFINE_DECORATOR(should_fail, bool, true); -DOCTEST_DEFINE_DECORATOR(expected_failures, int, 0); - -template -int -registerExceptionTranslator(String (*translateFunction)(T)) -{ - DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wexit-time-destructors") - static detail::ExceptionTranslator exceptionTranslator(translateFunction); - DOCTEST_CLANG_SUPPRESS_WARNING_POP - detail::registerExceptionTranslatorImpl(&exceptionTranslator); - return 0; -} - -} // namespace doctest - -// in a separate namespace outside of doctest because the DOCTEST_TEST_SUITE -// macro introduces an anonymous namespace in which getCurrentTestSuite gets -// overridden -namespace doctest_detail_test_suite_ns { -DOCTEST_INTERFACE doctest::detail::TestSuite& getCurrentTestSuite(); -} // namespace doctest_detail_test_suite_ns - -namespace doctest { -#else // DOCTEST_CONFIG_DISABLE -template -int -registerExceptionTranslator(String (*)(T)) -{ - return 0; -} -#endif // DOCTEST_CONFIG_DISABLE - -namespace detail { -typedef void (*assert_handler)(const AssertData&); -struct ContextState; -} // namespace detail - -class DOCTEST_INTERFACE Context { - detail::ContextState* p; - - void parseArgs(int argc, const char* const* argv, bool withDefaults = false); - - public: - explicit Context(int argc = 0, const char* const* argv = nullptr); - - ~Context(); - - void applyCommandLine(int argc, const char* const* argv); - - void addFilter(const char* filter, const char* value); - void clearFilters(); - void setOption(const char* option, bool value); - void setOption(const char* option, int value); - void setOption(const char* option, const char* value); - - bool shouldExit(); - - void setAsDefaultForAssertsOutOfTestCases(); - - void setAssertHandler(detail::assert_handler ah); - - void setCout(std::ostream* out); - - int run(); -}; - -namespace TestCaseFailureReason { -enum Enum { - None = 0, - AssertFailure = 1, // an assertion has failed in the test case - Exception = 2, // test case threw an exception - Crash = 4, // a crash... - TooManyFailedAsserts = 8, // the abort-after option - Timeout = 16, // see the timeout decorator - ShouldHaveFailedButDidnt = 32, // see the should_fail decorator - ShouldHaveFailedAndDid = 64, // see the should_fail decorator - DidntFailExactlyNumTimes = 128, // see the expected_failures decorator - FailedExactlyNumTimes = 256, // see the expected_failures decorator - CouldHaveFailedAndDid = 512 // see the may_fail decorator -}; -} // namespace TestCaseFailureReason - -struct DOCTEST_INTERFACE CurrentTestCaseStats { - int numAssertsCurrentTest; - int numAssertsFailedCurrentTest; - double seconds; - int failure_flags; // use TestCaseFailureReason::Enum - bool testCaseSuccess; -}; - -struct DOCTEST_INTERFACE TestCaseException { - String error_string; - bool is_crash; -}; - -struct DOCTEST_INTERFACE TestRunStats { - unsigned numTestCases; - unsigned numTestCasesPassingFilters; - unsigned numTestSuitesPassingFilters; - unsigned numTestCasesFailed; - int numAsserts; - int numAssertsFailed; -}; - -struct QueryData { - const TestRunStats* run_stats = nullptr; - const TestCaseData** data = nullptr; - unsigned num_data = 0; -}; - -struct DOCTEST_INTERFACE IReporter { - // The constructor has to accept "const ContextOptions&" as a single argument - // which has most of the options for the run + a pointer to the stdout stream - // Reporter(const ContextOptions& in) - - // called when a query should be reported (listing test cases, printing the - // version, etc.) - virtual void report_query(const QueryData&) = 0; - - // called when the whole test run starts - virtual void test_run_start() = 0; - // called when the whole test run ends (caching a pointer to the input doesn't - // make sense here) - virtual void test_run_end(const TestRunStats&) = 0; - - // called when a test case is started (safe to cache a pointer to the input) - virtual void test_case_start(const TestCaseData&) = 0; - // called when a test case is reentered because of unfinished subcases (safe - // to cache a pointer to the input) - virtual void test_case_reenter(const TestCaseData&) = 0; - // called when a test case has ended - virtual void test_case_end(const CurrentTestCaseStats&) = 0; - - // called when an exception is thrown from the test case (or it crashes) - virtual void test_case_exception(const TestCaseException&) = 0; - - // called whenever a subcase is entered (don't cache pointers to the input) - virtual void subcase_start(const SubcaseSignature&) = 0; - // called whenever a subcase is exited (don't cache pointers to the input) - virtual void subcase_end() = 0; - - // called for each assert (don't cache pointers to the input) - virtual void log_assert(const AssertData&) = 0; - // called for each message (don't cache pointers to the input) - virtual void log_message(const MessageData&) = 0; - - // called when a test case is skipped either because it doesn't pass the - // filters, has a skip decorator or isn't in the execution range (between - // first and last) (safe to cache a pointer to the input) - virtual void test_case_skipped(const TestCaseData&) = 0; - - // doctest will not be managing the lifetimes of reporters given to it but - // this would still be nice to have - virtual ~IReporter(); - - // can obtain all currently active contexts and stringify them if one wishes - // to do so - static int get_num_active_contexts(); - static const IContextScope* const* get_active_contexts(); - - // can iterate through contexts which have been stringified automatically in - // their destructors when an exception has been thrown - static int get_num_stringified_contexts(); - static const String* get_stringified_contexts(); -}; - -namespace detail { -typedef IReporter* (*reporterCreatorFunc)(const ContextOptions&); - -DOCTEST_INTERFACE void registerReporterImpl( - const char* name, int prio, reporterCreatorFunc c, bool isReporter); - -template -IReporter* -reporterCreator(const ContextOptions& o) -{ - return new Reporter(o); -} -} // namespace detail - -template -int -registerReporter(const char* name, int priority, bool isReporter) -{ - detail::registerReporterImpl( - name, priority, detail::reporterCreator, isReporter); - return 0; -} -} // namespace doctest - -// if registering is not disabled -#if !defined(DOCTEST_CONFIG_DISABLE) - -// common code in asserts - for convenience -#define DOCTEST_ASSERT_LOG_REACT_RETURN(b) \ - if (b.log()) \ - DOCTEST_BREAK_INTO_DEBUGGER(); \ - b.react(); \ - return !b.m_failed - -#ifdef DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS -#define DOCTEST_WRAP_IN_TRY(x) x; -#else // DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS -#define DOCTEST_WRAP_IN_TRY(x) \ - try { \ - x; \ - } \ - catch (...) { \ - DOCTEST_RB.translateException(); \ - } -#endif // DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS - -#ifdef DOCTEST_CONFIG_VOID_CAST_EXPRESSIONS -#define DOCTEST_CAST_TO_VOID(...) \ - DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wuseless-cast") \ - static_cast(__VA_ARGS__); \ - DOCTEST_GCC_SUPPRESS_WARNING_POP -#else // DOCTEST_CONFIG_VOID_CAST_EXPRESSIONS -#define DOCTEST_CAST_TO_VOID(...) __VA_ARGS__; -#endif // DOCTEST_CONFIG_VOID_CAST_EXPRESSIONS - -// registers the test by initializing a dummy var with a function -#define DOCTEST_REGISTER_FUNCTION(global_prefix, f, decorators) \ - global_prefix DOCTEST_GLOBAL_NO_WARNINGS( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), \ - doctest::detail::regTest( \ - doctest::detail::TestCase( \ - f, __FILE__, __LINE__, \ - doctest_detail_test_suite_ns::getCurrentTestSuite()) * \ - decorators)) - -#define DOCTEST_IMPLEMENT_FIXTURE(der, base, func, decorators) \ - namespace { \ - struct der : public base { \ - void f(); \ - }; \ - static void func() \ - { \ - der v; \ - v.f(); \ - } \ - DOCTEST_REGISTER_FUNCTION(DOCTEST_EMPTY, func, decorators) \ - } \ - inline DOCTEST_NOINLINE void der::f() - -#define DOCTEST_CREATE_AND_REGISTER_FUNCTION(f, decorators) \ - static void f(); \ - DOCTEST_REGISTER_FUNCTION(DOCTEST_EMPTY, f, decorators) \ - static void f() - -#define DOCTEST_CREATE_AND_REGISTER_FUNCTION_IN_CLASS(f, proxy, decorators) \ - static doctest::detail::funcType proxy() \ - { \ - return f; \ - } \ - DOCTEST_REGISTER_FUNCTION(inline, proxy(), decorators) \ - static void f() - -// for registering tests -#define DOCTEST_TEST_CASE(decorators) \ - DOCTEST_CREATE_AND_REGISTER_FUNCTION( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), decorators) - -// for registering tests in classes - requires C++17 for inline variables! -#if __cplusplus >= 201703L || \ - (DOCTEST_MSVC >= DOCTEST_COMPILER(19, 12, 0) && _MSVC_LANG >= 201703L) -#define DOCTEST_TEST_CASE_CLASS(decorators) \ - DOCTEST_CREATE_AND_REGISTER_FUNCTION_IN_CLASS( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_PROXY_), decorators) -#else // DOCTEST_TEST_CASE_CLASS -#define DOCTEST_TEST_CASE_CLASS(...) \ - TEST_CASES_CAN_BE_REGISTERED_IN_CLASSES_ONLY_IN_CPP17_MODE_OR_WITH_VS_2017_OR_NEWER -#endif // DOCTEST_TEST_CASE_CLASS - -// for registering tests with a fixture -#define DOCTEST_TEST_CASE_FIXTURE(c, decorators) \ - DOCTEST_IMPLEMENT_FIXTURE( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_CLASS_), c, \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), decorators) - -// for converting types to strings without the header and demangling -#define DOCTEST_TYPE_TO_STRING_IMPL(...) \ - template <> \ - inline const char* type_to_string<__VA_ARGS__>() \ - { \ - return "<" #__VA_ARGS__ ">"; \ - } -#define DOCTEST_TYPE_TO_STRING(...) \ - namespace doctest { namespace detail { \ - DOCTEST_TYPE_TO_STRING_IMPL(__VA_ARGS__) \ - } \ - } \ - static_assert(true, "") - -#define DOCTEST_TEST_CASE_TEMPLATE_DEFINE_IMPL(dec, T, iter, func) \ - template \ - static void func(); \ - namespace { \ - template \ - struct iter; \ - template \ - struct iter> { \ - iter(const char* file, unsigned line, int index) \ - { \ - doctest::detail::regTest( \ - doctest::detail::TestCase( \ - func, file, line, \ - doctest_detail_test_suite_ns::getCurrentTestSuite(), \ - doctest::detail::type_to_string(), \ - int(line) * 1000 + index) * \ - dec); \ - iter>(file, line, index + 1); \ - } \ - }; \ - template <> \ - struct iter> { \ - iter(const char*, unsigned, int) {} \ - }; \ - } \ - template \ - static void func() - -#define DOCTEST_TEST_CASE_TEMPLATE_DEFINE(dec, T, id) \ - DOCTEST_TEST_CASE_TEMPLATE_DEFINE_IMPL( \ - dec, T, DOCTEST_CAT(id, ITERATOR), DOCTEST_ANONYMOUS(DOCTEST_ANON_TMP_)) - -#define DOCTEST_TEST_CASE_TEMPLATE_INSTANTIATE_IMPL(id, anon, ...) \ - DOCTEST_GLOBAL_NO_WARNINGS( \ - DOCTEST_CAT(anon, DUMMY), \ - doctest::detail::instantiationHelper( \ - DOCTEST_CAT(id, ITERATOR) < __VA_ARGS__ > (__FILE__, __LINE__, 0))) - -#define DOCTEST_TEST_CASE_TEMPLATE_INVOKE(id, ...) \ - DOCTEST_TEST_CASE_TEMPLATE_INSTANTIATE_IMPL( \ - id, DOCTEST_ANONYMOUS(DOCTEST_ANON_TMP_), std::tuple<__VA_ARGS__>) \ - static_assert(true, "") - -#define DOCTEST_TEST_CASE_TEMPLATE_APPLY(id, ...) \ - DOCTEST_TEST_CASE_TEMPLATE_INSTANTIATE_IMPL( \ - id, DOCTEST_ANONYMOUS(DOCTEST_ANON_TMP_), __VA_ARGS__) \ - static_assert(true, "") - -#define DOCTEST_TEST_CASE_TEMPLATE_IMPL(dec, T, anon, ...) \ - DOCTEST_TEST_CASE_TEMPLATE_DEFINE_IMPL( \ - dec, T, DOCTEST_CAT(anon, ITERATOR), anon); \ - DOCTEST_TEST_CASE_TEMPLATE_INSTANTIATE_IMPL( \ - anon, anon, std::tuple<__VA_ARGS__>) \ - template \ - static void anon() - -#define DOCTEST_TEST_CASE_TEMPLATE(dec, T, ...) \ - DOCTEST_TEST_CASE_TEMPLATE_IMPL( \ - dec, T, DOCTEST_ANONYMOUS(DOCTEST_ANON_TMP_), __VA_ARGS__) - -// for subcases -#define DOCTEST_SUBCASE(name) \ - if (const doctest::detail::Subcase & \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_SUBCASE_) DOCTEST_UNUSED = \ - doctest::detail::Subcase(name, __FILE__, __LINE__)) - -// for grouping tests in test suites by using code blocks -#define DOCTEST_TEST_SUITE_IMPL(decorators, ns_name) \ - namespace ns_name { namespace doctest_detail_test_suite_ns { \ - static DOCTEST_NOINLINE doctest::detail::TestSuite& getCurrentTestSuite() \ - { \ - DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4640) \ - DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wexit-time-destructors") \ - DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wmissing-field-initializers") \ - static doctest::detail::TestSuite data{}; \ - static bool inited = false; \ - DOCTEST_MSVC_SUPPRESS_WARNING_POP \ - DOCTEST_CLANG_SUPPRESS_WARNING_POP \ - DOCTEST_GCC_SUPPRESS_WARNING_POP \ - if (!inited) { \ - data* decorators; \ - inited = true; \ - } \ - return data; \ - } \ - } \ - } \ - namespace ns_name - -#define DOCTEST_TEST_SUITE(decorators) \ - DOCTEST_TEST_SUITE_IMPL(decorators, DOCTEST_ANONYMOUS(DOCTEST_ANON_SUITE_)) - -// for starting a testsuite block -#define DOCTEST_TEST_SUITE_BEGIN(decorators) \ - DOCTEST_GLOBAL_NO_WARNINGS( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), \ - doctest::detail::setTestSuite( \ - doctest::detail::TestSuite() * decorators)) \ - static_assert(true, "") - -// for ending a testsuite block -#define DOCTEST_TEST_SUITE_END \ - DOCTEST_GLOBAL_NO_WARNINGS( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), \ - doctest::detail::setTestSuite(doctest::detail::TestSuite() * "")) \ - typedef int DOCTEST_ANONYMOUS(DOCTEST_ANON_FOR_SEMICOLON_) - -// for registering exception translators -#define DOCTEST_REGISTER_EXCEPTION_TRANSLATOR_IMPL(translatorName, signature) \ - inline doctest::String translatorName(signature); \ - DOCTEST_GLOBAL_NO_WARNINGS( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_TRANSLATOR_), \ - doctest::registerExceptionTranslator(translatorName)) \ - doctest::String translatorName(signature) - -#define DOCTEST_REGISTER_EXCEPTION_TRANSLATOR(signature) \ - DOCTEST_REGISTER_EXCEPTION_TRANSLATOR_IMPL( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_TRANSLATOR_), signature) - -// for registering reporters -#define DOCTEST_REGISTER_REPORTER(name, priority, reporter) \ - DOCTEST_GLOBAL_NO_WARNINGS( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_REPORTER_), \ - doctest::registerReporter(name, priority, true)) \ - static_assert(true, "") - -// for registering listeners -#define DOCTEST_REGISTER_LISTENER(name, priority, reporter) \ - DOCTEST_GLOBAL_NO_WARNINGS( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_REPORTER_), \ - doctest::registerReporter(name, priority, false)) \ - static_assert(true, "") - -// clang-format off -// for logging - disabling formatting because it's important to have these on 2 separate lines - see PR #557 -#define DOCTEST_INFO(...) \ - DOCTEST_INFO_IMPL(DOCTEST_ANONYMOUS(DOCTEST_CAPTURE_), \ - DOCTEST_ANONYMOUS(DOCTEST_CAPTURE_OTHER_), \ - __VA_ARGS__) -// clang-format on - -#define DOCTEST_INFO_IMPL(mb_name, s_name, ...) \ - auto DOCTEST_ANONYMOUS(DOCTEST_CAPTURE_) = \ - doctest::detail::MakeContextScope([&](std::ostream* s_name) { \ - doctest::detail::MessageBuilder mb_name( \ - __FILE__, __LINE__, doctest::assertType::is_warn); \ - mb_name.m_stream = s_name; \ - mb_name* __VA_ARGS__; \ - }) - -#define DOCTEST_CAPTURE(x) DOCTEST_INFO(#x " := ", x) - -#define DOCTEST_ADD_AT_IMPL(type, file, line, mb, ...) \ - [&] { \ - doctest::detail::MessageBuilder mb(file, line, doctest::assertType::type); \ - mb* __VA_ARGS__; \ - if (mb.log()) \ - DOCTEST_BREAK_INTO_DEBUGGER(); \ - mb.react(); \ - }() - -// clang-format off -#define DOCTEST_ADD_MESSAGE_AT(file, line, ...) DOCTEST_ADD_AT_IMPL(is_warn, file, line, DOCTEST_ANONYMOUS(DOCTEST_MESSAGE_), __VA_ARGS__) -#define DOCTEST_ADD_FAIL_CHECK_AT(file, line, ...) DOCTEST_ADD_AT_IMPL(is_check, file, line, DOCTEST_ANONYMOUS(DOCTEST_MESSAGE_), __VA_ARGS__) -#define DOCTEST_ADD_FAIL_AT(file, line, ...) DOCTEST_ADD_AT_IMPL(is_require, file, line, DOCTEST_ANONYMOUS(DOCTEST_MESSAGE_), __VA_ARGS__) -// clang-format on - -#define DOCTEST_MESSAGE(...) \ - DOCTEST_ADD_MESSAGE_AT(__FILE__, __LINE__, __VA_ARGS__) -#define DOCTEST_FAIL_CHECK(...) \ - DOCTEST_ADD_FAIL_CHECK_AT(__FILE__, __LINE__, __VA_ARGS__) -#define DOCTEST_FAIL(...) DOCTEST_ADD_FAIL_AT(__FILE__, __LINE__, __VA_ARGS__) - -#define DOCTEST_TO_LVALUE(...) \ - __VA_ARGS__ // Not removed to keep backwards compatibility. - -#ifndef DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_ASSERT_IMPLEMENT_2(assert_type, ...) \ - DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH( \ - "-Woverloaded-shift-op-parentheses") \ - doctest::detail::ResultBuilder DOCTEST_RB( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__); \ - DOCTEST_WRAP_IN_TRY(DOCTEST_RB.setResult( \ - doctest::detail::ExpressionDecomposer(doctest::assertType::assert_type) \ - << __VA_ARGS__)) \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB) \ - DOCTEST_CLANG_SUPPRESS_WARNING_POP - -#define DOCTEST_ASSERT_IMPLEMENT_1(assert_type, ...) \ - [&] { DOCTEST_ASSERT_IMPLEMENT_2(assert_type, __VA_ARGS__); }() - -#else // DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -// necessary for _MESSAGE -#define DOCTEST_ASSERT_IMPLEMENT_2 DOCTEST_ASSERT_IMPLEMENT_1 - -#define DOCTEST_ASSERT_IMPLEMENT_1(assert_type, ...) \ - DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH( \ - "-Woverloaded-shift-op-parentheses") \ - doctest::detail::decomp_assert( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__, \ - doctest::detail::ExpressionDecomposer(doctest::assertType::assert_type) \ - << __VA_ARGS__) DOCTEST_CLANG_SUPPRESS_WARNING_POP - -#endif // DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_WARN(...) DOCTEST_ASSERT_IMPLEMENT_1(DT_WARN, __VA_ARGS__) -#define DOCTEST_CHECK(...) DOCTEST_ASSERT_IMPLEMENT_1(DT_CHECK, __VA_ARGS__) -#define DOCTEST_REQUIRE(...) DOCTEST_ASSERT_IMPLEMENT_1(DT_REQUIRE, __VA_ARGS__) -#define DOCTEST_WARN_FALSE(...) \ - DOCTEST_ASSERT_IMPLEMENT_1(DT_WARN_FALSE, __VA_ARGS__) -#define DOCTEST_CHECK_FALSE(...) \ - DOCTEST_ASSERT_IMPLEMENT_1(DT_CHECK_FALSE, __VA_ARGS__) -#define DOCTEST_REQUIRE_FALSE(...) \ - DOCTEST_ASSERT_IMPLEMENT_1(DT_REQUIRE_FALSE, __VA_ARGS__) - -// clang-format off -#define DOCTEST_WARN_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_WARN, cond); }() -#define DOCTEST_CHECK_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_CHECK, cond); }() -#define DOCTEST_REQUIRE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_REQUIRE, cond); }() -#define DOCTEST_WARN_FALSE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_WARN_FALSE, cond); }() -#define DOCTEST_CHECK_FALSE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_CHECK_FALSE, cond); }() -#define DOCTEST_REQUIRE_FALSE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_REQUIRE_FALSE, cond); }() -// clang-format on - -#define DOCTEST_ASSERT_THROWS_AS(expr, assert_type, message, ...) \ - [&] { \ - if (!doctest::getContextOptions()->no_throw) { \ - doctest::detail::ResultBuilder DOCTEST_RB( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #expr, \ - #__VA_ARGS__, message); \ - try { \ - DOCTEST_CAST_TO_VOID(expr) \ - } \ - catch (const typename doctest::detail::remove_const< \ - typename doctest::detail::remove_reference<__VA_ARGS__>::type>:: \ - type&) { \ - DOCTEST_RB.translateException(); \ - DOCTEST_RB.m_threw_as = true; \ - } \ - catch (...) { \ - DOCTEST_RB.translateException(); \ - } \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - } else { \ - return false; \ - } \ - }() - -#define DOCTEST_ASSERT_THROWS_WITH(expr, expr_str, assert_type, ...) \ - [&] { \ - if (!doctest::getContextOptions()->no_throw) { \ - doctest::detail::ResultBuilder DOCTEST_RB( \ - doctest::assertType::assert_type, __FILE__, __LINE__, expr_str, "", \ - __VA_ARGS__); \ - try { \ - DOCTEST_CAST_TO_VOID(expr) \ - } \ - catch (...) { \ - DOCTEST_RB.translateException(); \ - } \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - } else { \ - return false; \ - } \ - }() - -#define DOCTEST_ASSERT_NOTHROW(assert_type, ...) \ - [&] { \ - doctest::detail::ResultBuilder DOCTEST_RB( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__); \ - try { \ - DOCTEST_CAST_TO_VOID(__VA_ARGS__) \ - } \ - catch (...) { \ - DOCTEST_RB.translateException(); \ - } \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - }() - -// clang-format off -#define DOCTEST_WARN_THROWS(...) DOCTEST_ASSERT_THROWS_WITH((__VA_ARGS__), #__VA_ARGS__, DT_WARN_THROWS, "") -#define DOCTEST_CHECK_THROWS(...) DOCTEST_ASSERT_THROWS_WITH((__VA_ARGS__), #__VA_ARGS__, DT_CHECK_THROWS, "") -#define DOCTEST_REQUIRE_THROWS(...) DOCTEST_ASSERT_THROWS_WITH((__VA_ARGS__), #__VA_ARGS__, DT_REQUIRE_THROWS, "") - -#define DOCTEST_WARN_THROWS_AS(expr, ...) DOCTEST_ASSERT_THROWS_AS(expr, DT_WARN_THROWS_AS, "", __VA_ARGS__) -#define DOCTEST_CHECK_THROWS_AS(expr, ...) DOCTEST_ASSERT_THROWS_AS(expr, DT_CHECK_THROWS_AS, "", __VA_ARGS__) -#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) DOCTEST_ASSERT_THROWS_AS(expr, DT_REQUIRE_THROWS_AS, "", __VA_ARGS__) - -#define DOCTEST_WARN_THROWS_WITH(expr, ...) DOCTEST_ASSERT_THROWS_WITH(expr, #expr, DT_WARN_THROWS_WITH, __VA_ARGS__) -#define DOCTEST_CHECK_THROWS_WITH(expr, ...) DOCTEST_ASSERT_THROWS_WITH(expr, #expr, DT_CHECK_THROWS_WITH, __VA_ARGS__) -#define DOCTEST_REQUIRE_THROWS_WITH(expr, ...) DOCTEST_ASSERT_THROWS_WITH(expr, #expr, DT_REQUIRE_THROWS_WITH, __VA_ARGS__) - -#define DOCTEST_WARN_THROWS_WITH_AS(expr, message, ...) DOCTEST_ASSERT_THROWS_AS(expr, DT_WARN_THROWS_WITH_AS, message, __VA_ARGS__) -#define DOCTEST_CHECK_THROWS_WITH_AS(expr, message, ...) DOCTEST_ASSERT_THROWS_AS(expr, DT_CHECK_THROWS_WITH_AS, message, __VA_ARGS__) -#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, message, ...) DOCTEST_ASSERT_THROWS_AS(expr, DT_REQUIRE_THROWS_WITH_AS, message, __VA_ARGS__) - -#define DOCTEST_WARN_NOTHROW(...) DOCTEST_ASSERT_NOTHROW(DT_WARN_NOTHROW, __VA_ARGS__) -#define DOCTEST_CHECK_NOTHROW(...) DOCTEST_ASSERT_NOTHROW(DT_CHECK_NOTHROW, __VA_ARGS__) -#define DOCTEST_REQUIRE_NOTHROW(...) DOCTEST_ASSERT_NOTHROW(DT_REQUIRE_NOTHROW, __VA_ARGS__) - -#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS(expr); }() -#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS(expr); }() -#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS(expr); }() -#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_AS(expr, ex); }() -#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_AS(expr, ex); }() -#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_AS(expr, ex); }() -#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_WITH(expr, with); }() -#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_WITH(expr, with); }() -#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_WITH(expr, with); }() -#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_WITH_AS(expr, with, ex); }() -#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ex); }() -#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ex); }() -#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_NOTHROW(expr); }() -#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_NOTHROW(expr); }() -#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_NOTHROW(expr); }() -// clang-format on - -#ifndef DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_BINARY_ASSERT(assert_type, comp, ...) \ - [&] { \ - doctest::detail::ResultBuilder DOCTEST_RB( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__); \ - DOCTEST_WRAP_IN_TRY( \ - DOCTEST_RB \ - .binary_assert( \ - __VA_ARGS__)) \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - }() - -#define DOCTEST_UNARY_ASSERT(assert_type, ...) \ - [&] { \ - doctest::detail::ResultBuilder DOCTEST_RB( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__); \ - DOCTEST_WRAP_IN_TRY(DOCTEST_RB.unary_assert(__VA_ARGS__)) \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - }() - -#else // DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_BINARY_ASSERT(assert_type, comparison, ...) \ - doctest::detail::binary_assert< \ - doctest::detail::binaryAssertComparison::comparison>( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__, \ - __VA_ARGS__) - -#define DOCTEST_UNARY_ASSERT(assert_type, ...) \ - doctest::detail::unary_assert( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__, \ - __VA_ARGS__) - -#endif // DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_WARN_EQ(...) DOCTEST_BINARY_ASSERT(DT_WARN_EQ, eq, __VA_ARGS__) -#define DOCTEST_CHECK_EQ(...) \ - DOCTEST_BINARY_ASSERT(DT_CHECK_EQ, eq, __VA_ARGS__) -#define DOCTEST_REQUIRE_EQ(...) \ - DOCTEST_BINARY_ASSERT(DT_REQUIRE_EQ, eq, __VA_ARGS__) -#define DOCTEST_WARN_NE(...) DOCTEST_BINARY_ASSERT(DT_WARN_NE, ne, __VA_ARGS__) -#define DOCTEST_CHECK_NE(...) \ - DOCTEST_BINARY_ASSERT(DT_CHECK_NE, ne, __VA_ARGS__) -#define DOCTEST_REQUIRE_NE(...) \ - DOCTEST_BINARY_ASSERT(DT_REQUIRE_NE, ne, __VA_ARGS__) -#define DOCTEST_WARN_GT(...) DOCTEST_BINARY_ASSERT(DT_WARN_GT, gt, __VA_ARGS__) -#define DOCTEST_CHECK_GT(...) \ - DOCTEST_BINARY_ASSERT(DT_CHECK_GT, gt, __VA_ARGS__) -#define DOCTEST_REQUIRE_GT(...) \ - DOCTEST_BINARY_ASSERT(DT_REQUIRE_GT, gt, __VA_ARGS__) -#define DOCTEST_WARN_LT(...) DOCTEST_BINARY_ASSERT(DT_WARN_LT, lt, __VA_ARGS__) -#define DOCTEST_CHECK_LT(...) \ - DOCTEST_BINARY_ASSERT(DT_CHECK_LT, lt, __VA_ARGS__) -#define DOCTEST_REQUIRE_LT(...) \ - DOCTEST_BINARY_ASSERT(DT_REQUIRE_LT, lt, __VA_ARGS__) -#define DOCTEST_WARN_GE(...) DOCTEST_BINARY_ASSERT(DT_WARN_GE, ge, __VA_ARGS__) -#define DOCTEST_CHECK_GE(...) \ - DOCTEST_BINARY_ASSERT(DT_CHECK_GE, ge, __VA_ARGS__) -#define DOCTEST_REQUIRE_GE(...) \ - DOCTEST_BINARY_ASSERT(DT_REQUIRE_GE, ge, __VA_ARGS__) -#define DOCTEST_WARN_LE(...) DOCTEST_BINARY_ASSERT(DT_WARN_LE, le, __VA_ARGS__) -#define DOCTEST_CHECK_LE(...) \ - DOCTEST_BINARY_ASSERT(DT_CHECK_LE, le, __VA_ARGS__) -#define DOCTEST_REQUIRE_LE(...) \ - DOCTEST_BINARY_ASSERT(DT_REQUIRE_LE, le, __VA_ARGS__) - -#define DOCTEST_WARN_UNARY(...) DOCTEST_UNARY_ASSERT(DT_WARN_UNARY, __VA_ARGS__) -#define DOCTEST_CHECK_UNARY(...) \ - DOCTEST_UNARY_ASSERT(DT_CHECK_UNARY, __VA_ARGS__) -#define DOCTEST_REQUIRE_UNARY(...) \ - DOCTEST_UNARY_ASSERT(DT_REQUIRE_UNARY, __VA_ARGS__) -#define DOCTEST_WARN_UNARY_FALSE(...) \ - DOCTEST_UNARY_ASSERT(DT_WARN_UNARY_FALSE, __VA_ARGS__) -#define DOCTEST_CHECK_UNARY_FALSE(...) \ - DOCTEST_UNARY_ASSERT(DT_CHECK_UNARY_FALSE, __VA_ARGS__) -#define DOCTEST_REQUIRE_UNARY_FALSE(...) \ - DOCTEST_UNARY_ASSERT(DT_REQUIRE_UNARY_FALSE, __VA_ARGS__) - -#ifdef DOCTEST_CONFIG_NO_EXCEPTIONS - -#undef DOCTEST_WARN_THROWS -#undef DOCTEST_CHECK_THROWS -#undef DOCTEST_REQUIRE_THROWS -#undef DOCTEST_WARN_THROWS_AS -#undef DOCTEST_CHECK_THROWS_AS -#undef DOCTEST_REQUIRE_THROWS_AS -#undef DOCTEST_WARN_THROWS_WITH -#undef DOCTEST_CHECK_THROWS_WITH -#undef DOCTEST_REQUIRE_THROWS_WITH -#undef DOCTEST_WARN_THROWS_WITH_AS -#undef DOCTEST_CHECK_THROWS_WITH_AS -#undef DOCTEST_REQUIRE_THROWS_WITH_AS -#undef DOCTEST_WARN_NOTHROW -#undef DOCTEST_CHECK_NOTHROW -#undef DOCTEST_REQUIRE_NOTHROW - -#undef DOCTEST_WARN_THROWS_MESSAGE -#undef DOCTEST_CHECK_THROWS_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_MESSAGE -#undef DOCTEST_WARN_THROWS_AS_MESSAGE -#undef DOCTEST_CHECK_THROWS_AS_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_AS_MESSAGE -#undef DOCTEST_WARN_THROWS_WITH_MESSAGE -#undef DOCTEST_CHECK_THROWS_WITH_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_WITH_MESSAGE -#undef DOCTEST_WARN_THROWS_WITH_AS_MESSAGE -#undef DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE -#undef DOCTEST_WARN_NOTHROW_MESSAGE -#undef DOCTEST_CHECK_NOTHROW_MESSAGE -#undef DOCTEST_REQUIRE_NOTHROW_MESSAGE - -#ifdef DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS - -#define DOCTEST_WARN_THROWS(...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS(...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS(...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_WARN_NOTHROW(...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW(...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW(...) ([] { return false; }) - -#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) \ - ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) \ - ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - ([] { return false; }) -#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) - -#else // DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS - -#undef DOCTEST_REQUIRE -#undef DOCTEST_REQUIRE_FALSE -#undef DOCTEST_REQUIRE_MESSAGE -#undef DOCTEST_REQUIRE_FALSE_MESSAGE -#undef DOCTEST_REQUIRE_EQ -#undef DOCTEST_REQUIRE_NE -#undef DOCTEST_REQUIRE_GT -#undef DOCTEST_REQUIRE_LT -#undef DOCTEST_REQUIRE_GE -#undef DOCTEST_REQUIRE_LE -#undef DOCTEST_REQUIRE_UNARY -#undef DOCTEST_REQUIRE_UNARY_FALSE - -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS - -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS - -// ================================================================================================= -// == WHAT FOLLOWS IS VERSIONS OF THE MACROS THAT DO NOT DO ANY REGISTERING! == -// == THIS CAN BE ENABLED BY DEFINING DOCTEST_CONFIG_DISABLE GLOBALLY! == -// ================================================================================================= -#else // DOCTEST_CONFIG_DISABLE - -#define DOCTEST_IMPLEMENT_FIXTURE(der, base, func, name) \ - namespace { \ - template \ - struct der : public base { \ - void f(); \ - }; \ - } \ - template \ - inline void der::f() - -#define DOCTEST_CREATE_AND_REGISTER_FUNCTION(f, name) \ - template \ - static inline void f() - -// for registering tests -#define DOCTEST_TEST_CASE(name) \ - DOCTEST_CREATE_AND_REGISTER_FUNCTION( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), name) - -// for registering tests in classes -#define DOCTEST_TEST_CASE_CLASS(name) \ - DOCTEST_CREATE_AND_REGISTER_FUNCTION( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), name) - -// for registering tests with a fixture -#define DOCTEST_TEST_CASE_FIXTURE(x, name) \ - DOCTEST_IMPLEMENT_FIXTURE( \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_CLASS_), x, \ - DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), name) - -// for converting types to strings without the header and demangling -#define DOCTEST_TYPE_TO_STRING(...) static_assert(true, "") -#define DOCTEST_TYPE_TO_STRING_IMPL(...) - -// for typed tests -#define DOCTEST_TEST_CASE_TEMPLATE(name, type, ...) \ - template \ - inline void DOCTEST_ANONYMOUS(DOCTEST_ANON_TMP_)() - -#define DOCTEST_TEST_CASE_TEMPLATE_DEFINE(name, type, id) \ - template \ - inline void DOCTEST_ANONYMOUS(DOCTEST_ANON_TMP_)() - -#define DOCTEST_TEST_CASE_TEMPLATE_INVOKE(id, ...) static_assert(true, "") -#define DOCTEST_TEST_CASE_TEMPLATE_APPLY(id, ...) static_assert(true, "") - -// for subcases -#define DOCTEST_SUBCASE(name) - -// for a testsuite block -#define DOCTEST_TEST_SUITE(name) namespace - -// for starting a testsuite block -#define DOCTEST_TEST_SUITE_BEGIN(name) static_assert(true, "") - -// for ending a testsuite block -#define DOCTEST_TEST_SUITE_END \ - typedef int DOCTEST_ANONYMOUS(DOCTEST_ANON_FOR_SEMICOLON_) - -#define DOCTEST_REGISTER_EXCEPTION_TRANSLATOR(signature) \ - template \ - static inline doctest::String DOCTEST_ANONYMOUS(DOCTEST_ANON_TRANSLATOR_)( \ - signature) - -#define DOCTEST_REGISTER_REPORTER(name, priority, reporter) -#define DOCTEST_REGISTER_LISTENER(name, priority, reporter) - -#define DOCTEST_INFO(...) (static_cast(0)) -#define DOCTEST_CAPTURE(x) (static_cast(0)) -#define DOCTEST_ADD_MESSAGE_AT(file, line, ...) (static_cast(0)) -#define DOCTEST_ADD_FAIL_CHECK_AT(file, line, ...) (static_cast(0)) -#define DOCTEST_ADD_FAIL_AT(file, line, ...) (static_cast(0)) -#define DOCTEST_MESSAGE(...) (static_cast(0)) -#define DOCTEST_FAIL_CHECK(...) (static_cast(0)) -#define DOCTEST_FAIL(...) (static_cast(0)) - -#ifdef DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED - -#define DOCTEST_WARN(...) [&] { return __VA_ARGS__; }() -#define DOCTEST_CHECK(...) [&] { return __VA_ARGS__; }() -#define DOCTEST_REQUIRE(...) [&] { return __VA_ARGS__; }() -#define DOCTEST_WARN_FALSE(...) [&] { return !(__VA_ARGS__); }() -#define DOCTEST_CHECK_FALSE(...) [&] { return !(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_FALSE(...) [&] { return !(__VA_ARGS__); }() - -#define DOCTEST_WARN_MESSAGE(cond, ...) [&] { return cond; }() -#define DOCTEST_CHECK_MESSAGE(cond, ...) [&] { return cond; }() -#define DOCTEST_REQUIRE_MESSAGE(cond, ...) [&] { return cond; }() -#define DOCTEST_WARN_FALSE_MESSAGE(cond, ...) [&] { return !(cond); }() -#define DOCTEST_CHECK_FALSE_MESSAGE(cond, ...) [&] { return !(cond); }() -#define DOCTEST_REQUIRE_FALSE_MESSAGE(cond, ...) [&] { return !(cond); }() - -namespace doctest { namespace detail { -#define DOCTEST_RELATIONAL_OP(name, op) \ - template \ - bool name(const DOCTEST_REF_WRAP(L) lhs, const DOCTEST_REF_WRAP(R) rhs) \ - { \ - return lhs op rhs; \ - } - -DOCTEST_RELATIONAL_OP(eq, ==) -DOCTEST_RELATIONAL_OP(ne, !=) -DOCTEST_RELATIONAL_OP(lt, <) -DOCTEST_RELATIONAL_OP(gt, >) -DOCTEST_RELATIONAL_OP(le, <=) -DOCTEST_RELATIONAL_OP(ge, >=) -}} // namespace doctest::detail - -#define DOCTEST_WARN_EQ(...) [&] { return doctest::detail::eq(__VA_ARGS__); }() -#define DOCTEST_CHECK_EQ(...) [&] { return doctest::detail::eq(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_EQ(...) \ - [&] { return doctest::detail::eq(__VA_ARGS__); }() -#define DOCTEST_WARN_NE(...) [&] { return doctest::detail::ne(__VA_ARGS__); }() -#define DOCTEST_CHECK_NE(...) [&] { return doctest::detail::ne(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_NE(...) \ - [&] { return doctest::detail::ne(__VA_ARGS__); }() -#define DOCTEST_WARN_LT(...) [&] { return doctest::detail::lt(__VA_ARGS__); }() -#define DOCTEST_CHECK_LT(...) [&] { return doctest::detail::lt(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_LT(...) \ - [&] { return doctest::detail::lt(__VA_ARGS__); }() -#define DOCTEST_WARN_GT(...) [&] { return doctest::detail::gt(__VA_ARGS__); }() -#define DOCTEST_CHECK_GT(...) [&] { return doctest::detail::gt(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_GT(...) \ - [&] { return doctest::detail::gt(__VA_ARGS__); }() -#define DOCTEST_WARN_LE(...) [&] { return doctest::detail::le(__VA_ARGS__); }() -#define DOCTEST_CHECK_LE(...) [&] { return doctest::detail::le(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_LE(...) \ - [&] { return doctest::detail::le(__VA_ARGS__); }() -#define DOCTEST_WARN_GE(...) [&] { return doctest::detail::ge(__VA_ARGS__); }() -#define DOCTEST_CHECK_GE(...) [&] { return doctest::detail::ge(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_GE(...) \ - [&] { return doctest::detail::ge(__VA_ARGS__); }() -#define DOCTEST_WARN_UNARY(...) [&] { return __VA_ARGS__; }() -#define DOCTEST_CHECK_UNARY(...) [&] { return __VA_ARGS__; }() -#define DOCTEST_REQUIRE_UNARY(...) [&] { return __VA_ARGS__; }() -#define DOCTEST_WARN_UNARY_FALSE(...) [&] { return !(__VA_ARGS__); }() -#define DOCTEST_CHECK_UNARY_FALSE(...) [&] { return !(__VA_ARGS__); }() -#define DOCTEST_REQUIRE_UNARY_FALSE(...) [&] { return !(__VA_ARGS__); }() - -#else // DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED - -#define DOCTEST_WARN(...) ([] { return false; }) -#define DOCTEST_CHECK(...) ([] { return false; }) -#define DOCTEST_REQUIRE(...) ([] { return false; }) -#define DOCTEST_WARN_FALSE(...) ([] { return false; }) -#define DOCTEST_CHECK_FALSE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_FALSE(...) ([] { return false; }) - -#define DOCTEST_WARN_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_CHECK_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_WARN_FALSE_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_CHECK_FALSE_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_FALSE_MESSAGE(cond, ...) ([] { return false; }) - -#define DOCTEST_WARN_EQ(...) ([] { return false; }) -#define DOCTEST_CHECK_EQ(...) ([] { return false; }) -#define DOCTEST_REQUIRE_EQ(...) ([] { return false; }) -#define DOCTEST_WARN_NE(...) ([] { return false; }) -#define DOCTEST_CHECK_NE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_NE(...) ([] { return false; }) -#define DOCTEST_WARN_GT(...) ([] { return false; }) -#define DOCTEST_CHECK_GT(...) ([] { return false; }) -#define DOCTEST_REQUIRE_GT(...) ([] { return false; }) -#define DOCTEST_WARN_LT(...) ([] { return false; }) -#define DOCTEST_CHECK_LT(...) ([] { return false; }) -#define DOCTEST_REQUIRE_LT(...) ([] { return false; }) -#define DOCTEST_WARN_GE(...) ([] { return false; }) -#define DOCTEST_CHECK_GE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_GE(...) ([] { return false; }) -#define DOCTEST_WARN_LE(...) ([] { return false; }) -#define DOCTEST_CHECK_LE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_LE(...) ([] { return false; }) - -#define DOCTEST_WARN_UNARY(...) ([] { return false; }) -#define DOCTEST_CHECK_UNARY(...) ([] { return false; }) -#define DOCTEST_REQUIRE_UNARY(...) ([] { return false; }) -#define DOCTEST_WARN_UNARY_FALSE(...) ([] { return false; }) -#define DOCTEST_CHECK_UNARY_FALSE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_UNARY_FALSE(...) ([] { return false; }) - -#endif // DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED - -// TODO: think about if these also need to work properly even when doctest is -// disabled -#define DOCTEST_WARN_THROWS(...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS(...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS(...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_WARN_NOTHROW(...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW(...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW(...) ([] { return false; }) - -#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) \ - ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) \ - ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - ([] { return false; }) -#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) - -#endif // DOCTEST_CONFIG_DISABLE - -// clang-format off -// KEPT FOR BACKWARDS COMPATIBILITY - FORWARDING TO THE RIGHT MACROS -#define DOCTEST_FAST_WARN_EQ DOCTEST_WARN_EQ -#define DOCTEST_FAST_CHECK_EQ DOCTEST_CHECK_EQ -#define DOCTEST_FAST_REQUIRE_EQ DOCTEST_REQUIRE_EQ -#define DOCTEST_FAST_WARN_NE DOCTEST_WARN_NE -#define DOCTEST_FAST_CHECK_NE DOCTEST_CHECK_NE -#define DOCTEST_FAST_REQUIRE_NE DOCTEST_REQUIRE_NE -#define DOCTEST_FAST_WARN_GT DOCTEST_WARN_GT -#define DOCTEST_FAST_CHECK_GT DOCTEST_CHECK_GT -#define DOCTEST_FAST_REQUIRE_GT DOCTEST_REQUIRE_GT -#define DOCTEST_FAST_WARN_LT DOCTEST_WARN_LT -#define DOCTEST_FAST_CHECK_LT DOCTEST_CHECK_LT -#define DOCTEST_FAST_REQUIRE_LT DOCTEST_REQUIRE_LT -#define DOCTEST_FAST_WARN_GE DOCTEST_WARN_GE -#define DOCTEST_FAST_CHECK_GE DOCTEST_CHECK_GE -#define DOCTEST_FAST_REQUIRE_GE DOCTEST_REQUIRE_GE -#define DOCTEST_FAST_WARN_LE DOCTEST_WARN_LE -#define DOCTEST_FAST_CHECK_LE DOCTEST_CHECK_LE -#define DOCTEST_FAST_REQUIRE_LE DOCTEST_REQUIRE_LE - -#define DOCTEST_FAST_WARN_UNARY DOCTEST_WARN_UNARY -#define DOCTEST_FAST_CHECK_UNARY DOCTEST_CHECK_UNARY -#define DOCTEST_FAST_REQUIRE_UNARY DOCTEST_REQUIRE_UNARY -#define DOCTEST_FAST_WARN_UNARY_FALSE DOCTEST_WARN_UNARY_FALSE -#define DOCTEST_FAST_CHECK_UNARY_FALSE DOCTEST_CHECK_UNARY_FALSE -#define DOCTEST_FAST_REQUIRE_UNARY_FALSE DOCTEST_REQUIRE_UNARY_FALSE - -#define DOCTEST_TEST_CASE_TEMPLATE_INSTANTIATE(id, ...) DOCTEST_TEST_CASE_TEMPLATE_INVOKE(id,__VA_ARGS__) -// clang-format on - -// BDD style macros -// clang-format off -#define DOCTEST_SCENARIO(name) DOCTEST_TEST_CASE(" Scenario: " name) -#define DOCTEST_SCENARIO_CLASS(name) DOCTEST_TEST_CASE_CLASS(" Scenario: " name) -#define DOCTEST_SCENARIO_TEMPLATE(name, T, ...) DOCTEST_TEST_CASE_TEMPLATE(" Scenario: " name, T, __VA_ARGS__) -#define DOCTEST_SCENARIO_TEMPLATE_DEFINE(name, T, id) DOCTEST_TEST_CASE_TEMPLATE_DEFINE(" Scenario: " name, T, id) - -#define DOCTEST_GIVEN(name) DOCTEST_SUBCASE(" Given: " name) -#define DOCTEST_WHEN(name) DOCTEST_SUBCASE(" When: " name) -#define DOCTEST_AND_WHEN(name) DOCTEST_SUBCASE("And when: " name) -#define DOCTEST_THEN(name) DOCTEST_SUBCASE(" Then: " name) -#define DOCTEST_AND_THEN(name) DOCTEST_SUBCASE(" And: " name) -// clang-format on - -// == SHORT VERSIONS OF THE MACROS -#if !defined(DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES) - -#define TEST_CASE(name) DOCTEST_TEST_CASE(name) -#define TEST_CASE_CLASS(name) DOCTEST_TEST_CASE_CLASS(name) -#define TEST_CASE_FIXTURE(x, name) DOCTEST_TEST_CASE_FIXTURE(x, name) -#define TYPE_TO_STRING(...) DOCTEST_TYPE_TO_STRING(__VA_ARGS__) -#define TEST_CASE_TEMPLATE(name, T, ...) \ - DOCTEST_TEST_CASE_TEMPLATE(name, T, __VA_ARGS__) -#define TEST_CASE_TEMPLATE_DEFINE(name, T, id) \ - DOCTEST_TEST_CASE_TEMPLATE_DEFINE(name, T, id) -#define TEST_CASE_TEMPLATE_INVOKE(id, ...) \ - DOCTEST_TEST_CASE_TEMPLATE_INVOKE(id, __VA_ARGS__) -#define TEST_CASE_TEMPLATE_APPLY(id, ...) \ - DOCTEST_TEST_CASE_TEMPLATE_APPLY(id, __VA_ARGS__) -#define SUBCASE(name) DOCTEST_SUBCASE(name) -#define TEST_SUITE(decorators) DOCTEST_TEST_SUITE(decorators) -#define TEST_SUITE_BEGIN(name) DOCTEST_TEST_SUITE_BEGIN(name) -#define TEST_SUITE_END DOCTEST_TEST_SUITE_END -#define REGISTER_EXCEPTION_TRANSLATOR(signature) \ - DOCTEST_REGISTER_EXCEPTION_TRANSLATOR(signature) -#define REGISTER_REPORTER(name, priority, reporter) \ - DOCTEST_REGISTER_REPORTER(name, priority, reporter) -#define REGISTER_LISTENER(name, priority, reporter) \ - DOCTEST_REGISTER_LISTENER(name, priority, reporter) -#define INFO(...) DOCTEST_INFO(__VA_ARGS__) -#define CAPTURE(x) DOCTEST_CAPTURE(x) -#define ADD_MESSAGE_AT(file, line, ...) \ - DOCTEST_ADD_MESSAGE_AT(file, line, __VA_ARGS__) -#define ADD_FAIL_CHECK_AT(file, line, ...) \ - DOCTEST_ADD_FAIL_CHECK_AT(file, line, __VA_ARGS__) -#define ADD_FAIL_AT(file, line, ...) \ - DOCTEST_ADD_FAIL_AT(file, line, __VA_ARGS__) -#define MESSAGE(...) DOCTEST_MESSAGE(__VA_ARGS__) -#define FAIL_CHECK(...) DOCTEST_FAIL_CHECK(__VA_ARGS__) -#define FAIL(...) DOCTEST_FAIL(__VA_ARGS__) -#define TO_LVALUE(...) DOCTEST_TO_LVALUE(__VA_ARGS__) - -#define WARN(...) DOCTEST_WARN(__VA_ARGS__) -#define WARN_FALSE(...) DOCTEST_WARN_FALSE(__VA_ARGS__) -#define WARN_THROWS(...) DOCTEST_WARN_THROWS(__VA_ARGS__) -#define WARN_THROWS_AS(expr, ...) DOCTEST_WARN_THROWS_AS(expr, __VA_ARGS__) -#define WARN_THROWS_WITH(expr, ...) DOCTEST_WARN_THROWS_WITH(expr, __VA_ARGS__) -#define WARN_THROWS_WITH_AS(expr, with, ...) \ - DOCTEST_WARN_THROWS_WITH_AS(expr, with, __VA_ARGS__) -#define WARN_NOTHROW(...) DOCTEST_WARN_NOTHROW(__VA_ARGS__) -#define CHECK(...) DOCTEST_CHECK(__VA_ARGS__) -#define CHECK_FALSE(...) DOCTEST_CHECK_FALSE(__VA_ARGS__) -#define CHECK_THROWS(...) DOCTEST_CHECK_THROWS(__VA_ARGS__) -#define CHECK_THROWS_AS(expr, ...) DOCTEST_CHECK_THROWS_AS(expr, __VA_ARGS__) -#define CHECK_THROWS_WITH(expr, ...) \ - DOCTEST_CHECK_THROWS_WITH(expr, __VA_ARGS__) -#define CHECK_THROWS_WITH_AS(expr, with, ...) \ - DOCTEST_CHECK_THROWS_WITH_AS(expr, with, __VA_ARGS__) -#define CHECK_NOTHROW(...) DOCTEST_CHECK_NOTHROW(__VA_ARGS__) -#define REQUIRE(...) DOCTEST_REQUIRE(__VA_ARGS__) -#define REQUIRE_FALSE(...) DOCTEST_REQUIRE_FALSE(__VA_ARGS__) -#define REQUIRE_THROWS(...) DOCTEST_REQUIRE_THROWS(__VA_ARGS__) -#define REQUIRE_THROWS_AS(expr, ...) \ - DOCTEST_REQUIRE_THROWS_AS(expr, __VA_ARGS__) -#define REQUIRE_THROWS_WITH(expr, ...) \ - DOCTEST_REQUIRE_THROWS_WITH(expr, __VA_ARGS__) -#define REQUIRE_THROWS_WITH_AS(expr, with, ...) \ - DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, __VA_ARGS__) -#define REQUIRE_NOTHROW(...) DOCTEST_REQUIRE_NOTHROW(__VA_ARGS__) - -#define WARN_MESSAGE(cond, ...) DOCTEST_WARN_MESSAGE(cond, __VA_ARGS__) -#define WARN_FALSE_MESSAGE(cond, ...) \ - DOCTEST_WARN_FALSE_MESSAGE(cond, __VA_ARGS__) -#define WARN_THROWS_MESSAGE(expr, ...) \ - DOCTEST_WARN_THROWS_MESSAGE(expr, __VA_ARGS__) -#define WARN_THROWS_AS_MESSAGE(expr, ex, ...) \ - DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, __VA_ARGS__) -#define WARN_THROWS_WITH_MESSAGE(expr, with, ...) \ - DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, __VA_ARGS__) -#define WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, __VA_ARGS__) -#define WARN_NOTHROW_MESSAGE(expr, ...) \ - DOCTEST_WARN_NOTHROW_MESSAGE(expr, __VA_ARGS__) -#define CHECK_MESSAGE(cond, ...) DOCTEST_CHECK_MESSAGE(cond, __VA_ARGS__) -#define CHECK_FALSE_MESSAGE(cond, ...) \ - DOCTEST_CHECK_FALSE_MESSAGE(cond, __VA_ARGS__) -#define CHECK_THROWS_MESSAGE(expr, ...) \ - DOCTEST_CHECK_THROWS_MESSAGE(expr, __VA_ARGS__) -#define CHECK_THROWS_AS_MESSAGE(expr, ex, ...) \ - DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, __VA_ARGS__) -#define CHECK_THROWS_WITH_MESSAGE(expr, with, ...) \ - DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, __VA_ARGS__) -#define CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, __VA_ARGS__) -#define CHECK_NOTHROW_MESSAGE(expr, ...) \ - DOCTEST_CHECK_NOTHROW_MESSAGE(expr, __VA_ARGS__) -#define REQUIRE_MESSAGE(cond, ...) DOCTEST_REQUIRE_MESSAGE(cond, __VA_ARGS__) -#define REQUIRE_FALSE_MESSAGE(cond, ...) \ - DOCTEST_REQUIRE_FALSE_MESSAGE(cond, __VA_ARGS__) -#define REQUIRE_THROWS_MESSAGE(expr, ...) \ - DOCTEST_REQUIRE_THROWS_MESSAGE(expr, __VA_ARGS__) -#define REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) \ - DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, __VA_ARGS__) -#define REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) \ - DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, __VA_ARGS__) -#define REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) \ - DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, __VA_ARGS__) -#define REQUIRE_NOTHROW_MESSAGE(expr, ...) \ - DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, __VA_ARGS__) - -#define SCENARIO(name) DOCTEST_SCENARIO(name) -#define SCENARIO_CLASS(name) DOCTEST_SCENARIO_CLASS(name) -#define SCENARIO_TEMPLATE(name, T, ...) \ - DOCTEST_SCENARIO_TEMPLATE(name, T, __VA_ARGS__) -#define SCENARIO_TEMPLATE_DEFINE(name, T, id) \ - DOCTEST_SCENARIO_TEMPLATE_DEFINE(name, T, id) -#define GIVEN(name) DOCTEST_GIVEN(name) -#define WHEN(name) DOCTEST_WHEN(name) -#define AND_WHEN(name) DOCTEST_AND_WHEN(name) -#define THEN(name) DOCTEST_THEN(name) -#define AND_THEN(name) DOCTEST_AND_THEN(name) - -#define WARN_EQ(...) DOCTEST_WARN_EQ(__VA_ARGS__) -#define CHECK_EQ(...) DOCTEST_CHECK_EQ(__VA_ARGS__) -#define REQUIRE_EQ(...) DOCTEST_REQUIRE_EQ(__VA_ARGS__) -#define WARN_NE(...) DOCTEST_WARN_NE(__VA_ARGS__) -#define CHECK_NE(...) DOCTEST_CHECK_NE(__VA_ARGS__) -#define REQUIRE_NE(...) DOCTEST_REQUIRE_NE(__VA_ARGS__) -#define WARN_GT(...) DOCTEST_WARN_GT(__VA_ARGS__) -#define CHECK_GT(...) DOCTEST_CHECK_GT(__VA_ARGS__) -#define REQUIRE_GT(...) DOCTEST_REQUIRE_GT(__VA_ARGS__) -#define WARN_LT(...) DOCTEST_WARN_LT(__VA_ARGS__) -#define CHECK_LT(...) DOCTEST_CHECK_LT(__VA_ARGS__) -#define REQUIRE_LT(...) DOCTEST_REQUIRE_LT(__VA_ARGS__) -#define WARN_GE(...) DOCTEST_WARN_GE(__VA_ARGS__) -#define CHECK_GE(...) DOCTEST_CHECK_GE(__VA_ARGS__) -#define REQUIRE_GE(...) DOCTEST_REQUIRE_GE(__VA_ARGS__) -#define WARN_LE(...) DOCTEST_WARN_LE(__VA_ARGS__) -#define CHECK_LE(...) DOCTEST_CHECK_LE(__VA_ARGS__) -#define REQUIRE_LE(...) DOCTEST_REQUIRE_LE(__VA_ARGS__) -#define WARN_UNARY(...) DOCTEST_WARN_UNARY(__VA_ARGS__) -#define CHECK_UNARY(...) DOCTEST_CHECK_UNARY(__VA_ARGS__) -#define REQUIRE_UNARY(...) DOCTEST_REQUIRE_UNARY(__VA_ARGS__) -#define WARN_UNARY_FALSE(...) DOCTEST_WARN_UNARY_FALSE(__VA_ARGS__) -#define CHECK_UNARY_FALSE(...) DOCTEST_CHECK_UNARY_FALSE(__VA_ARGS__) -#define REQUIRE_UNARY_FALSE(...) DOCTEST_REQUIRE_UNARY_FALSE(__VA_ARGS__) - -// KEPT FOR BACKWARDS COMPATIBILITY -#define FAST_WARN_EQ(...) DOCTEST_FAST_WARN_EQ(__VA_ARGS__) -#define FAST_CHECK_EQ(...) DOCTEST_FAST_CHECK_EQ(__VA_ARGS__) -#define FAST_REQUIRE_EQ(...) DOCTEST_FAST_REQUIRE_EQ(__VA_ARGS__) -#define FAST_WARN_NE(...) DOCTEST_FAST_WARN_NE(__VA_ARGS__) -#define FAST_CHECK_NE(...) DOCTEST_FAST_CHECK_NE(__VA_ARGS__) -#define FAST_REQUIRE_NE(...) DOCTEST_FAST_REQUIRE_NE(__VA_ARGS__) -#define FAST_WARN_GT(...) DOCTEST_FAST_WARN_GT(__VA_ARGS__) -#define FAST_CHECK_GT(...) DOCTEST_FAST_CHECK_GT(__VA_ARGS__) -#define FAST_REQUIRE_GT(...) DOCTEST_FAST_REQUIRE_GT(__VA_ARGS__) -#define FAST_WARN_LT(...) DOCTEST_FAST_WARN_LT(__VA_ARGS__) -#define FAST_CHECK_LT(...) DOCTEST_FAST_CHECK_LT(__VA_ARGS__) -#define FAST_REQUIRE_LT(...) DOCTEST_FAST_REQUIRE_LT(__VA_ARGS__) -#define FAST_WARN_GE(...) DOCTEST_FAST_WARN_GE(__VA_ARGS__) -#define FAST_CHECK_GE(...) DOCTEST_FAST_CHECK_GE(__VA_ARGS__) -#define FAST_REQUIRE_GE(...) DOCTEST_FAST_REQUIRE_GE(__VA_ARGS__) -#define FAST_WARN_LE(...) DOCTEST_FAST_WARN_LE(__VA_ARGS__) -#define FAST_CHECK_LE(...) DOCTEST_FAST_CHECK_LE(__VA_ARGS__) -#define FAST_REQUIRE_LE(...) DOCTEST_FAST_REQUIRE_LE(__VA_ARGS__) - -#define FAST_WARN_UNARY(...) DOCTEST_FAST_WARN_UNARY(__VA_ARGS__) -#define FAST_CHECK_UNARY(...) DOCTEST_FAST_CHECK_UNARY(__VA_ARGS__) -#define FAST_REQUIRE_UNARY(...) DOCTEST_FAST_REQUIRE_UNARY(__VA_ARGS__) -#define FAST_WARN_UNARY_FALSE(...) DOCTEST_FAST_WARN_UNARY_FALSE(__VA_ARGS__) -#define FAST_CHECK_UNARY_FALSE(...) DOCTEST_FAST_CHECK_UNARY_FALSE(__VA_ARGS__) -#define FAST_REQUIRE_UNARY_FALSE(...) \ - DOCTEST_FAST_REQUIRE_UNARY_FALSE(__VA_ARGS__) - -#define TEST_CASE_TEMPLATE_INSTANTIATE(id, ...) \ - DOCTEST_TEST_CASE_TEMPLATE_INSTANTIATE(id, __VA_ARGS__) - -#endif // DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES - -#if !defined(DOCTEST_CONFIG_DISABLE) - -// this is here to clear the 'current test suite' for the current translation -// unit - at the top -DOCTEST_TEST_SUITE_END(); - -// add stringification for primitive/fundamental types -namespace doctest { namespace detail { -DOCTEST_TYPE_TO_STRING_IMPL(bool) -DOCTEST_TYPE_TO_STRING_IMPL(float) -DOCTEST_TYPE_TO_STRING_IMPL(double) -DOCTEST_TYPE_TO_STRING_IMPL(long double) -DOCTEST_TYPE_TO_STRING_IMPL(char) -DOCTEST_TYPE_TO_STRING_IMPL(signed char) -DOCTEST_TYPE_TO_STRING_IMPL(unsigned char) -#if !DOCTEST_MSVC || defined(_NATIVE_WCHAR_T_DEFINED) -DOCTEST_TYPE_TO_STRING_IMPL(wchar_t) -#endif // not MSVC or wchar_t support enabled -DOCTEST_TYPE_TO_STRING_IMPL(short int) -DOCTEST_TYPE_TO_STRING_IMPL(unsigned short int) -DOCTEST_TYPE_TO_STRING_IMPL(int) -DOCTEST_TYPE_TO_STRING_IMPL(unsigned int) -DOCTEST_TYPE_TO_STRING_IMPL(long int) -DOCTEST_TYPE_TO_STRING_IMPL(unsigned long int) -DOCTEST_TYPE_TO_STRING_IMPL(long long int) -DOCTEST_TYPE_TO_STRING_IMPL(unsigned long long int) -}} // namespace doctest::detail - -#endif // DOCTEST_CONFIG_DISABLE - -DOCTEST_CLANG_SUPPRESS_WARNING_POP -DOCTEST_MSVC_SUPPRESS_WARNING_POP -DOCTEST_GCC_SUPPRESS_WARNING_POP - -DOCTEST_SUPPRESS_COMMON_WARNINGS_POP - -#endif // DOCTEST_LIBRARY_INCLUDED - -#ifndef DOCTEST_SINGLE_HEADER -#define DOCTEST_SINGLE_HEADER -#endif // DOCTEST_SINGLE_HEADER - -#if defined(DOCTEST_CONFIG_IMPLEMENT) || !defined(DOCTEST_SINGLE_HEADER) - -#ifndef DOCTEST_SINGLE_HEADER -#include "doctest_fwd.h" -#endif // DOCTEST_SINGLE_HEADER - -DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wunused-macros") - -#ifndef DOCTEST_LIBRARY_IMPLEMENTATION -#define DOCTEST_LIBRARY_IMPLEMENTATION - -DOCTEST_CLANG_SUPPRESS_WARNING_POP - -DOCTEST_SUPPRESS_COMMON_WARNINGS_PUSH - -DOCTEST_CLANG_SUPPRESS_WARNING_PUSH -DOCTEST_CLANG_SUPPRESS_WARNING("-Wglobal-constructors") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wexit-time-destructors") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wsign-conversion") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wshorten-64-to-32") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wmissing-variable-declarations") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wswitch") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wswitch-enum") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wcovered-switch-default") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wmissing-noreturn") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wdisabled-macro-expansion") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wmissing-braces") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wmissing-field-initializers") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wunused-member-function") -DOCTEST_CLANG_SUPPRESS_WARNING("-Wnonportable-system-include-path") - -DOCTEST_GCC_SUPPRESS_WARNING_PUSH -DOCTEST_GCC_SUPPRESS_WARNING("-Wconversion") -DOCTEST_GCC_SUPPRESS_WARNING("-Wsign-conversion") -DOCTEST_GCC_SUPPRESS_WARNING("-Wmissing-field-initializers") -DOCTEST_GCC_SUPPRESS_WARNING("-Wmissing-braces") -DOCTEST_GCC_SUPPRESS_WARNING("-Wswitch") -DOCTEST_GCC_SUPPRESS_WARNING("-Wswitch-enum") -DOCTEST_GCC_SUPPRESS_WARNING("-Wswitch-default") -DOCTEST_GCC_SUPPRESS_WARNING("-Wunsafe-loop-optimizations") -DOCTEST_GCC_SUPPRESS_WARNING("-Wold-style-cast") -DOCTEST_GCC_SUPPRESS_WARNING("-Wunused-function") -DOCTEST_GCC_SUPPRESS_WARNING("-Wmultiple-inheritance") -DOCTEST_GCC_SUPPRESS_WARNING("-Wsuggest-attribute") - -DOCTEST_MSVC_SUPPRESS_WARNING_PUSH -DOCTEST_MSVC_SUPPRESS_WARNING( - 4267) // 'var' : conversion from 'x' to 'y', possible loss of data -DOCTEST_MSVC_SUPPRESS_WARNING( - 4530) // C++ exception handler used, but unwind semantics not enabled -DOCTEST_MSVC_SUPPRESS_WARNING( - 4577) // 'noexcept' used with no exception handling mode specified -DOCTEST_MSVC_SUPPRESS_WARNING( - 4774) // format string expected in argument is not a string literal -DOCTEST_MSVC_SUPPRESS_WARNING( - 4365) // conversion from 'int' to 'unsigned', signed/unsigned mismatch -DOCTEST_MSVC_SUPPRESS_WARNING( - 5039) // pointer to potentially throwing function passed to extern C -DOCTEST_MSVC_SUPPRESS_WARNING( - 4800) // forcing value to bool 'true' or 'false' (performance warning) -DOCTEST_MSVC_SUPPRESS_WARNING( - 5245) // unreferenced function with internal linkage has been removed - -DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_BEGIN - -// required includes - will go only in one translation unit! -#include -#include -#include -// borland (Embarcadero) compiler requires math.h and not cmath - -// https://github.com/doctest/doctest/pull/37 -#ifdef __BORLANDC__ -#include -#endif // __BORLANDC__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef DOCTEST_PLATFORM_MAC -#include -#include -#include -#endif // DOCTEST_PLATFORM_MAC - -#ifdef DOCTEST_PLATFORM_WINDOWS - -// defines for a leaner windows.h -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif // WIN32_LEAN_AND_MEAN -#ifndef NOMINMAX -#define NOMINMAX -#endif // NOMINMAX - -// not sure what AfxWin.h is for - here I do what Catch does -#ifdef __AFXDLL -#include -#else -#include -#endif -#include - -#else // DOCTEST_PLATFORM_WINDOWS - -#include -#include - -#endif // DOCTEST_PLATFORM_WINDOWS - -// this is a fix for https://github.com/doctest/doctest/issues/348 -// https://mail.gnome.org/archives/xml/2012-January/msg00000.html -#if !defined(HAVE_UNISTD_H) && !defined(STDOUT_FILENO) -#define STDOUT_FILENO fileno(stdout) -#endif // HAVE_UNISTD_H - -DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_END - -// counts the number of elements in a C array -#define DOCTEST_COUNTOF(x) (sizeof(x) / sizeof(x[0])) - -#ifdef DOCTEST_CONFIG_DISABLE -#define DOCTEST_BRANCH_ON_DISABLED(if_disabled, if_not_disabled) if_disabled -#else // DOCTEST_CONFIG_DISABLE -#define DOCTEST_BRANCH_ON_DISABLED(if_disabled, if_not_disabled) if_not_disabled -#endif // DOCTEST_CONFIG_DISABLE - -#ifndef DOCTEST_CONFIG_OPTIONS_PREFIX -#define DOCTEST_CONFIG_OPTIONS_PREFIX "dt-" -#endif - -#ifndef DOCTEST_THREAD_LOCAL -#if DOCTEST_MSVC && (DOCTEST_MSVC < DOCTEST_COMPILER(19, 0, 0)) -#define DOCTEST_THREAD_LOCAL -#else // DOCTEST_MSVC -#define DOCTEST_THREAD_LOCAL thread_local -#endif // DOCTEST_MSVC -#endif // DOCTEST_THREAD_LOCAL - -#ifndef DOCTEST_MULTI_LANE_ATOMICS_THREAD_LANES -#define DOCTEST_MULTI_LANE_ATOMICS_THREAD_LANES 32 -#endif - -#ifndef DOCTEST_MULTI_LANE_ATOMICS_CACHE_LINE_SIZE -#define DOCTEST_MULTI_LANE_ATOMICS_CACHE_LINE_SIZE 64 -#endif - -#ifdef DOCTEST_CONFIG_NO_UNPREFIXED_OPTIONS -#define DOCTEST_OPTIONS_PREFIX_DISPLAY DOCTEST_CONFIG_OPTIONS_PREFIX -#else -#define DOCTEST_OPTIONS_PREFIX_DISPLAY "" -#endif - -#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) -#define DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS -#endif - -#ifndef DOCTEST_CDECL -#define DOCTEST_CDECL __cdecl -#endif - -namespace doctest { - -bool is_running_in_test = false; - -namespace { -using namespace detail; - -template -DOCTEST_NORETURN void -throw_exception(Ex const& e) -{ -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS - throw e; -#else // DOCTEST_CONFIG_NO_EXCEPTIONS - std::cerr - << "doctest will terminate because it needed to throw an exception.\n" - << "The message was: " << e.what() << '\n'; - std::terminate(); -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS -} - -#ifndef DOCTEST_INTERNAL_ERROR -#define DOCTEST_INTERNAL_ERROR(msg) \ - throw_exception(std::logic_error( \ - __FILE__ ":" DOCTEST_TOSTR(__LINE__) ": Internal doctest error: " msg)) -#endif // DOCTEST_INTERNAL_ERROR - -// case insensitive strcmp -int -stricmp(const char* a, const char* b) -{ - for (;; a++, b++) { - const int d = tolower(*a) - tolower(*b); - if (d != 0 || !*a) - return d; - } -} - -template -String -fpToString(T value, int precision) -{ - std::ostringstream oss; - oss << std::setprecision(precision) << std::fixed << value; - std::string d = oss.str(); - size_t i = d.find_last_not_of('0'); - if (i != std::string::npos && i != d.size() - 1) { - if (d[i] == '.') - i++; - d = d.substr(0, i + 1); - } - return d.c_str(); -} - -struct Endianness { - enum Arch { Big, Little }; - - static Arch which() - { - int x = 1; - // casting any data pointer to char* is allowed - auto ptr = reinterpret_cast(&x); - if (*ptr) - return Little; - return Big; - } -}; -} // namespace - -namespace detail { -String -rawMemoryToString(const void* object, unsigned size) -{ - // Reverse order for little endian architectures - int i = 0, end = static_cast(size), inc = 1; - if (Endianness::which() == Endianness::Little) { - i = end - 1; - end = inc = -1; - } - - unsigned const char* bytes = static_cast(object); - std::ostream* oss = tlssPush(); - *oss << "0x" << std::setfill('0') << std::hex; - for (; i != end; i += inc) - *oss << std::setw(2) << static_cast(bytes[i]); - return tlssPop(); -} - -DOCTEST_THREAD_LOCAL class { - std::vector stack; - std::stringstream ss; - - public: - std::ostream* push() - { - stack.push_back(ss.tellp()); - return &ss; - } - - String pop() - { - if (stack.empty()) - DOCTEST_INTERNAL_ERROR("TLSS was empty when trying to pop!"); - - std::streampos pos = stack.back(); - stack.pop_back(); - unsigned sz = static_cast(ss.tellp() - pos); - ss.rdbuf()->pubseekpos(pos, std::ios::in | std::ios::out); - return String(ss, sz); - } -} g_oss; - -std::ostream* -tlssPush() -{ - return g_oss.push(); -} - -String -tlssPop() -{ - return g_oss.pop(); -} - -#ifndef DOCTEST_CONFIG_DISABLE - -namespace timer_large_integer { - -#if defined(DOCTEST_PLATFORM_WINDOWS) -typedef ULONGLONG type; -#else // DOCTEST_PLATFORM_WINDOWS -typedef std::uint64_t type; -#endif // DOCTEST_PLATFORM_WINDOWS -} // namespace timer_large_integer - -typedef timer_large_integer::type ticks_t; - -#ifdef DOCTEST_CONFIG_GETCURRENTTICKS -ticks_t -getCurrentTicks() -{ - return DOCTEST_CONFIG_GETCURRENTTICKS(); -} -#elif defined(DOCTEST_PLATFORM_WINDOWS) -ticks_t -getCurrentTicks() -{ - static LARGE_INTEGER hz = {0}, hzo = {0}; - if (!hz.QuadPart) { - QueryPerformanceFrequency(&hz); - QueryPerformanceCounter(&hzo); - } - LARGE_INTEGER t; - QueryPerformanceCounter(&t); - return ((t.QuadPart - hzo.QuadPart) * LONGLONG(1000000)) / hz.QuadPart; -} -#else // DOCTEST_PLATFORM_WINDOWS -ticks_t -getCurrentTicks() -{ - timeval t; - gettimeofday(&t, nullptr); - return static_cast(t.tv_sec) * 1000000 + - static_cast(t.tv_usec); -} -#endif // DOCTEST_PLATFORM_WINDOWS - -struct Timer { - void start() { m_ticks = getCurrentTicks(); } - unsigned int getElapsedMicroseconds() const - { - return static_cast(getCurrentTicks() - m_ticks); - } - // unsigned int getElapsedMilliseconds() const { - // return static_cast(getElapsedMicroseconds() / 1000); - //} - double getElapsedSeconds() const - { - return static_cast(getCurrentTicks() - m_ticks) / 1000000.0; - } - - private: - ticks_t m_ticks = 0; -}; - -#ifdef DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS -template -using AtomicOrMultiLaneAtomic = std::atomic; -#else // DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS -// Provides a multilane implementation of an atomic variable that supports add, -// sub, load, store. Instead of using a single atomic variable, this splits up -// into multiple ones, each sitting on a separate cache line. The goal is to -// provide a speedup when most operations are modifying. It achieves this with -// two properties: -// -// * Multiple atomics are used, so chance of congestion from the same atomic is -// reduced. -// * Each atomic sits on a separate cache line, so false sharing is reduced. -// -// The disadvantage is that there is a small overhead due to the use of TLS, and -// load/store is slower because all atomics have to be accessed. -template -class MultiLaneAtomic { - struct CacheLineAlignedAtomic { - std::atomic atomic{}; - char padding - [DOCTEST_MULTI_LANE_ATOMICS_CACHE_LINE_SIZE - sizeof(std::atomic)]; - }; - CacheLineAlignedAtomic m_atomics[DOCTEST_MULTI_LANE_ATOMICS_THREAD_LANES]; - - static_assert( - sizeof(CacheLineAlignedAtomic) == - DOCTEST_MULTI_LANE_ATOMICS_CACHE_LINE_SIZE, - "guarantee one atomic takes exactly one cache line"); - - public: - T operator++() DOCTEST_NOEXCEPT { return fetch_add(1) + 1; } - - T operator++(int) DOCTEST_NOEXCEPT { return fetch_add(1); } - - T fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst) - DOCTEST_NOEXCEPT - { - return myAtomic().fetch_add(arg, order); - } - - T fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst) - DOCTEST_NOEXCEPT - { - return myAtomic().fetch_sub(arg, order); - } - - operator T() const DOCTEST_NOEXCEPT { return load(); } - - T load(std::memory_order order = std::memory_order_seq_cst) const - DOCTEST_NOEXCEPT - { - auto result = T(); - for (auto const& c : m_atomics) { - result += c.atomic.load(order); - } - return result; - } - - T operator=(T desired) DOCTEST_NOEXCEPT - { // lgtm [cpp/assignment-does-not-return-this] - store(desired); - return desired; - } - - void store(T desired, std::memory_order order = std::memory_order_seq_cst) - DOCTEST_NOEXCEPT - { - // first value becomes desired", all others become 0. - for (auto& c : m_atomics) { - c.atomic.store(desired, order); - desired = {}; - } - } - - private: - // Each thread has a different atomic that it operates on. If more than - // NumLanes threads use this, some will use the same atomic. So performance - // will degrade a bit, but still everything will work. - // - // The logic here is a bit tricky. The call should be as fast as possible, so - // that there is minimal to no overhead in determining the correct atomic for - // the current thread. - // - // 1. A global static counter laneCounter counts continuously up. - // 2. Each successive thread will use modulo operation of that counter so it - // gets an atomic - // assigned in a round-robin fashion. - // 3. This tlsLaneIdx is stored in the thread local data, so it is directly - // available with - // little overhead. - std::atomic& myAtomic() DOCTEST_NOEXCEPT - { - static std::atomic laneCounter; - DOCTEST_THREAD_LOCAL size_t tlsLaneIdx = - laneCounter++ % DOCTEST_MULTI_LANE_ATOMICS_THREAD_LANES; - - return m_atomics[tlsLaneIdx].atomic; - } -}; - -template -using AtomicOrMultiLaneAtomic = MultiLaneAtomic; -#endif // DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS - -// this holds both parameters from the command line and runtime data for tests -struct ContextState : ContextOptions, TestRunStats, CurrentTestCaseStats { - AtomicOrMultiLaneAtomic numAssertsCurrentTest_atomic; - AtomicOrMultiLaneAtomic numAssertsFailedCurrentTest_atomic; - - std::vector> filters = - decltype(filters)(9); // 9 different filters - - std::vector reporters_currently_used; - - assert_handler ah = nullptr; - - Timer timer; - - std::vector - stringifiedContexts; // logging from INFO() due to an exception - - // stuff for subcases - std::vector subcasesStack; - std::set subcasesPassed; - int subcasesCurrentMaxLevel; - bool should_reenter; - std::atomic shouldLogCurrentException; - - void resetRunData() - { - numTestCases = 0; - numTestCasesPassingFilters = 0; - numTestSuitesPassingFilters = 0; - numTestCasesFailed = 0; - numAsserts = 0; - numAssertsFailed = 0; - numAssertsCurrentTest = 0; - numAssertsFailedCurrentTest = 0; - } - - void finalizeTestCaseData() - { - seconds = timer.getElapsedSeconds(); - - // update the non-atomic counters - numAsserts += numAssertsCurrentTest_atomic; - numAssertsFailed += numAssertsFailedCurrentTest_atomic; - numAssertsCurrentTest = numAssertsCurrentTest_atomic; - numAssertsFailedCurrentTest = numAssertsFailedCurrentTest_atomic; - - if (numAssertsFailedCurrentTest) - failure_flags |= TestCaseFailureReason::AssertFailure; - - if (Approx(currentTest->m_timeout).epsilon(DBL_EPSILON) != 0 && - Approx(seconds).epsilon(DBL_EPSILON) > currentTest->m_timeout) - failure_flags |= TestCaseFailureReason::Timeout; - - if (currentTest->m_should_fail) { - if (failure_flags) { - failure_flags |= TestCaseFailureReason::ShouldHaveFailedAndDid; - } else { - failure_flags |= TestCaseFailureReason::ShouldHaveFailedButDidnt; - } - } else if (failure_flags && currentTest->m_may_fail) { - failure_flags |= TestCaseFailureReason::CouldHaveFailedAndDid; - } else if (currentTest->m_expected_failures > 0) { - if (numAssertsFailedCurrentTest == currentTest->m_expected_failures) { - failure_flags |= TestCaseFailureReason::FailedExactlyNumTimes; - } else { - failure_flags |= TestCaseFailureReason::DidntFailExactlyNumTimes; - } - } - - bool ok_to_fail = - (TestCaseFailureReason::ShouldHaveFailedAndDid & failure_flags) || - (TestCaseFailureReason::CouldHaveFailedAndDid & failure_flags) || - (TestCaseFailureReason::FailedExactlyNumTimes & failure_flags); - - // if any subcase has failed - the whole test case has failed - testCaseSuccess = !(failure_flags && !ok_to_fail); - if (!testCaseSuccess) - numTestCasesFailed++; - } -}; - -ContextState* g_cs = nullptr; - -// used to avoid locks for the debug output -// TODO: figure out if this is indeed necessary/correct - seems like either -// there still could be a race or that there wouldn't be a race even if using -// the context directly -DOCTEST_THREAD_LOCAL bool g_no_colors; - -#endif // DOCTEST_CONFIG_DISABLE -} // namespace detail - -char* -String::allocate(unsigned sz) -{ - if (sz <= last) { - buf[sz] = '\0'; - setLast(last - sz); - return buf; - } else { - setOnHeap(); - data.size = sz; - data.capacity = data.size + 1; - data.ptr = new char[data.capacity]; - data.ptr[sz] = '\0'; - return data.ptr; - } -} - -void -String::setOnHeap() -{ - *reinterpret_cast(&buf[last]) = 128; -} -void -String::setLast(unsigned in) -{ - buf[last] = char(in); -} - -void -String::copy(const String& other) -{ - if (other.isOnStack()) { - memcpy(buf, other.buf, len); - } else { - memcpy(allocate(other.data.size), other.data.ptr, other.data.size); - } -} - -String::String() -{ - buf[0] = '\0'; - setLast(); -} - -String::~String() -{ - if (!isOnStack()) - delete[] data.ptr; - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) -} - -String::String(const char* in) : String(in, strlen(in)) {} - -String::String(const char* in, unsigned in_size) -{ - memcpy(allocate(in_size), in, in_size); -} - -String::String(std::istream& in, unsigned in_size) -{ - in.read(allocate(in_size), in_size); -} - -String::String(const String& other) -{ - copy(other); -} - -String& -String::operator=(const String& other) -{ - if (this != &other) { - if (!isOnStack()) - delete[] data.ptr; - - copy(other); - } - - return *this; -} - -String& -String::operator+=(const String& other) -{ - const unsigned my_old_size = size(); - const unsigned other_size = other.size(); - const unsigned total_size = my_old_size + other_size; - if (isOnStack()) { - if (total_size < len) { - // append to the current stack space - memcpy(buf + my_old_size, other.c_str(), other_size + 1); - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) - setLast(last - total_size); - } else { - // alloc new chunk - char* temp = new char[total_size + 1]; - // copy current data to new location before writing in the union - memcpy(temp, buf, my_old_size); // skip the +1 ('\0') for speed - // update data in union - setOnHeap(); - data.size = total_size; - data.capacity = data.size + 1; - data.ptr = temp; - // transfer the rest of the data - memcpy(data.ptr + my_old_size, other.c_str(), other_size + 1); - } - } else { - if (data.capacity > total_size) { - // append to the current heap block - data.size = total_size; - memcpy(data.ptr + my_old_size, other.c_str(), other_size + 1); - } else { - // resize - data.capacity *= 2; - if (data.capacity <= total_size) - data.capacity = total_size + 1; - // alloc new chunk - char* temp = new char[data.capacity]; - // copy current data to new location before releasing it - memcpy(temp, data.ptr, my_old_size); // skip the +1 ('\0') for speed - // release old chunk - delete[] data.ptr; - // update the rest of the union members - data.size = total_size; - data.ptr = temp; - // transfer the rest of the data - memcpy(data.ptr + my_old_size, other.c_str(), other_size + 1); - } - } - - return *this; -} - -String::String(String&& other) -{ - memcpy(buf, other.buf, len); - other.buf[0] = '\0'; - other.setLast(); -} - -String& -String::operator=(String&& other) -{ - if (this != &other) { - if (!isOnStack()) - delete[] data.ptr; - memcpy(buf, other.buf, len); - other.buf[0] = '\0'; - other.setLast(); - } - return *this; -} - -char -String::operator[](unsigned i) const -{ - return const_cast(this)->operator[](i); // NOLINT -} - -char& -String::operator[](unsigned i) -{ - if (isOnStack()) - return reinterpret_cast(buf)[i]; - return data.ptr[i]; -} - -DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wmaybe-uninitialized") -unsigned -String::size() const -{ - if (isOnStack()) - return last - (unsigned(buf[last]) & - 31); // using "last" would work only if "len" is 32 - return data.size; -} -DOCTEST_GCC_SUPPRESS_WARNING_POP - -unsigned -String::capacity() const -{ - if (isOnStack()) - return len; - return data.capacity; -} - -int -String::compare(const char* other, bool no_case) const -{ - if (no_case) - return doctest::stricmp(c_str(), other); - return std::strcmp(c_str(), other); -} - -int -String::compare(const String& other, bool no_case) const -{ - return compare(other.c_str(), no_case); -} - -// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) -String -operator+(const String& lhs, const String& rhs) -{ - return String(lhs) += rhs; -} - -// clang-format off -bool operator==(const String& lhs, const String& rhs) { return lhs.compare(rhs) == 0; } -bool operator!=(const String& lhs, const String& rhs) { return lhs.compare(rhs) != 0; } -bool operator< (const String& lhs, const String& rhs) { return lhs.compare(rhs) < 0; } -bool operator> (const String& lhs, const String& rhs) { return lhs.compare(rhs) > 0; } -bool operator<=(const String& lhs, const String& rhs) { return (lhs != rhs) ? lhs.compare(rhs) < 0 : true; } -bool operator>=(const String& lhs, const String& rhs) { return (lhs != rhs) ? lhs.compare(rhs) > 0 : true; } -// clang-format on - -std::ostream& -operator<<(std::ostream& s, const String& in) -{ - return s << in.c_str(); -} - -namespace { -void -color_to_stream(std::ostream&, Color::Enum) DOCTEST_BRANCH_ON_DISABLED({}, ;) -} // namespace - -namespace Color { -std::ostream& -operator<<(std::ostream& s, Color::Enum code) -{ - color_to_stream(s, code); - return s; -} -} // namespace Color - -// clang-format off -const char* assertString(assertType::Enum at) { - DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4062) // enum 'x' in switch of enum 'y' is not handled - switch(at) { //!OCLINT missing default in switch statements - case assertType::DT_WARN : return "WARN"; - case assertType::DT_CHECK : return "CHECK"; - case assertType::DT_REQUIRE : return "REQUIRE"; - - case assertType::DT_WARN_FALSE : return "WARN_FALSE"; - case assertType::DT_CHECK_FALSE : return "CHECK_FALSE"; - case assertType::DT_REQUIRE_FALSE : return "REQUIRE_FALSE"; - - case assertType::DT_WARN_THROWS : return "WARN_THROWS"; - case assertType::DT_CHECK_THROWS : return "CHECK_THROWS"; - case assertType::DT_REQUIRE_THROWS : return "REQUIRE_THROWS"; - - case assertType::DT_WARN_THROWS_AS : return "WARN_THROWS_AS"; - case assertType::DT_CHECK_THROWS_AS : return "CHECK_THROWS_AS"; - case assertType::DT_REQUIRE_THROWS_AS : return "REQUIRE_THROWS_AS"; - - case assertType::DT_WARN_THROWS_WITH : return "WARN_THROWS_WITH"; - case assertType::DT_CHECK_THROWS_WITH : return "CHECK_THROWS_WITH"; - case assertType::DT_REQUIRE_THROWS_WITH : return "REQUIRE_THROWS_WITH"; - - case assertType::DT_WARN_THROWS_WITH_AS : return "WARN_THROWS_WITH_AS"; - case assertType::DT_CHECK_THROWS_WITH_AS : return "CHECK_THROWS_WITH_AS"; - case assertType::DT_REQUIRE_THROWS_WITH_AS : return "REQUIRE_THROWS_WITH_AS"; - - case assertType::DT_WARN_NOTHROW : return "WARN_NOTHROW"; - case assertType::DT_CHECK_NOTHROW : return "CHECK_NOTHROW"; - case assertType::DT_REQUIRE_NOTHROW : return "REQUIRE_NOTHROW"; - - case assertType::DT_WARN_EQ : return "WARN_EQ"; - case assertType::DT_CHECK_EQ : return "CHECK_EQ"; - case assertType::DT_REQUIRE_EQ : return "REQUIRE_EQ"; - case assertType::DT_WARN_NE : return "WARN_NE"; - case assertType::DT_CHECK_NE : return "CHECK_NE"; - case assertType::DT_REQUIRE_NE : return "REQUIRE_NE"; - case assertType::DT_WARN_GT : return "WARN_GT"; - case assertType::DT_CHECK_GT : return "CHECK_GT"; - case assertType::DT_REQUIRE_GT : return "REQUIRE_GT"; - case assertType::DT_WARN_LT : return "WARN_LT"; - case assertType::DT_CHECK_LT : return "CHECK_LT"; - case assertType::DT_REQUIRE_LT : return "REQUIRE_LT"; - case assertType::DT_WARN_GE : return "WARN_GE"; - case assertType::DT_CHECK_GE : return "CHECK_GE"; - case assertType::DT_REQUIRE_GE : return "REQUIRE_GE"; - case assertType::DT_WARN_LE : return "WARN_LE"; - case assertType::DT_CHECK_LE : return "CHECK_LE"; - case assertType::DT_REQUIRE_LE : return "REQUIRE_LE"; - - case assertType::DT_WARN_UNARY : return "WARN_UNARY"; - case assertType::DT_CHECK_UNARY : return "CHECK_UNARY"; - case assertType::DT_REQUIRE_UNARY : return "REQUIRE_UNARY"; - case assertType::DT_WARN_UNARY_FALSE : return "WARN_UNARY_FALSE"; - case assertType::DT_CHECK_UNARY_FALSE : return "CHECK_UNARY_FALSE"; - case assertType::DT_REQUIRE_UNARY_FALSE : return "REQUIRE_UNARY_FALSE"; - } - DOCTEST_MSVC_SUPPRESS_WARNING_POP - return ""; -} -// clang-format on - -const char* -failureString(assertType::Enum at) -{ - if (at & assertType::is_warn) //! OCLINT bitwise operator in conditional - return "WARNING"; - if (at & assertType::is_check) //! OCLINT bitwise operator in conditional - return "ERROR"; - if (at & assertType::is_require) //! OCLINT bitwise operator in conditional - return "FATAL ERROR"; - return ""; -} - -DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wnull-dereference") -DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wnull-dereference") -// depending on the current options this will remove the path of filenames -const char* -skipPathFromFilename(const char* file) -{ -#ifndef DOCTEST_CONFIG_DISABLE - if (getContextOptions()->no_path_in_filenames) { - auto back = std::strrchr(file, '\\'); - auto forward = std::strrchr(file, '/'); - if (back || forward) { - if (back > forward) - forward = back; - return forward + 1; - } - } -#endif // DOCTEST_CONFIG_DISABLE - return file; -} -DOCTEST_CLANG_SUPPRESS_WARNING_POP -DOCTEST_GCC_SUPPRESS_WARNING_POP - -bool -SubcaseSignature::operator<(const SubcaseSignature& other) const -{ - if (m_line != other.m_line) - return m_line < other.m_line; - if (std::strcmp(m_file, other.m_file) != 0) - return std::strcmp(m_file, other.m_file) < 0; - return m_name.compare(other.m_name) < 0; -} - -IContextScope::IContextScope() = default; -IContextScope::~IContextScope() = default; - -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -String -toString(char* in) -{ - return toString(static_cast(in)); -} -// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) -String -toString(const char* in) -{ - return String("\"") + (in ? in : "{null string}") + "\""; -} -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -String -toString(bool in) -{ - return in ? "true" : "false"; -} -String -toString(float in) -{ - return fpToString(in, 5) + "f"; -} -String -toString(double in) -{ - return fpToString(in, 10); -} -String -toString(double long in) -{ - return fpToString(in, 15); -} - -#define DOCTEST_TO_STRING_OVERLOAD(type, fmt) \ - String toString(type in) \ - { \ - char buf[64]; \ - std::sprintf(buf, fmt, in); \ - return buf; \ - } - -DOCTEST_TO_STRING_OVERLOAD(char, "%d") -DOCTEST_TO_STRING_OVERLOAD(char signed, "%d") -DOCTEST_TO_STRING_OVERLOAD(char unsigned, "%u") -DOCTEST_TO_STRING_OVERLOAD(int short, "%d") -DOCTEST_TO_STRING_OVERLOAD(int short unsigned, "%u") -DOCTEST_TO_STRING_OVERLOAD(int, "%d") -DOCTEST_TO_STRING_OVERLOAD(unsigned, "%u") -DOCTEST_TO_STRING_OVERLOAD(int long, "%ld") -DOCTEST_TO_STRING_OVERLOAD(int long unsigned, "%lu") -DOCTEST_TO_STRING_OVERLOAD(int long long, "%lld") -DOCTEST_TO_STRING_OVERLOAD(int long long unsigned, "%llu") - -String -toString(std::nullptr_t) -{ - return "NULL"; -} - -#if DOCTEST_MSVC >= DOCTEST_COMPILER(19, 20, 0) -// see this issue on why this is needed: -// https://github.com/doctest/doctest/issues/183 -String -toString(const std::string& in) -{ - return in.c_str(); -} -#endif // VS 2019 - -Approx::Approx(double value) - : m_epsilon( - static_cast(std::numeric_limits::epsilon()) * 100), - m_scale(1.0), m_value(value) -{ -} - -Approx -Approx::operator()(double value) const -{ - Approx approx(value); - approx.epsilon(m_epsilon); - approx.scale(m_scale); - return approx; -} - -Approx& -Approx::epsilon(double newEpsilon) -{ - m_epsilon = newEpsilon; - return *this; -} -Approx& -Approx::scale(double newScale) -{ - m_scale = newScale; - return *this; -} - -bool -operator==(double lhs, const Approx& rhs) -{ - // Thanks to Richard Harris for his help refining this formula - return std::fabs(lhs - rhs.m_value) < - rhs.m_epsilon * - (rhs.m_scale + - std::max(std::fabs(lhs), std::fabs(rhs.m_value))); -} -bool -operator==(const Approx& lhs, double rhs) -{ - return operator==(rhs, lhs); -} -bool -operator!=(double lhs, const Approx& rhs) -{ - return !operator==(lhs, rhs); -} -bool -operator!=(const Approx& lhs, double rhs) -{ - return !operator==(rhs, lhs); -} -bool -operator<=(double lhs, const Approx& rhs) -{ - return lhs < rhs.m_value || lhs == rhs; -} -bool -operator<=(const Approx& lhs, double rhs) -{ - return lhs.m_value < rhs || lhs == rhs; -} -bool -operator>=(double lhs, const Approx& rhs) -{ - return lhs > rhs.m_value || lhs == rhs; -} -bool -operator>=(const Approx& lhs, double rhs) -{ - return lhs.m_value > rhs || lhs == rhs; -} -bool -operator<(double lhs, const Approx& rhs) -{ - return lhs < rhs.m_value && lhs != rhs; -} -bool -operator<(const Approx& lhs, double rhs) -{ - return lhs.m_value < rhs && lhs != rhs; -} -bool -operator>(double lhs, const Approx& rhs) -{ - return lhs > rhs.m_value && lhs != rhs; -} -bool -operator>(const Approx& lhs, double rhs) -{ - return lhs.m_value > rhs && lhs != rhs; -} - -String -toString(const Approx& in) -{ - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) - return "Approx( " + doctest::toString(in.m_value) + " )"; -} -const ContextOptions* -getContextOptions() -{ - return DOCTEST_BRANCH_ON_DISABLED(nullptr, g_cs); -} - -} // namespace doctest - -#ifdef DOCTEST_CONFIG_DISABLE -namespace doctest { -Context::Context(int, const char* const*) {} -Context::~Context() = default; -void -Context::applyCommandLine(int, const char* const*) -{ -} -void -Context::addFilter(const char*, const char*) -{ -} -void -Context::clearFilters() -{ -} -void -Context::setOption(const char*, bool) -{ -} -void -Context::setOption(const char*, int) -{ -} -void -Context::setOption(const char*, const char*) -{ -} -bool -Context::shouldExit() -{ - return false; -} -void -Context::setAsDefaultForAssertsOutOfTestCases() -{ -} -void -Context::setAssertHandler(detail::assert_handler) -{ -} -void -Context::setCout(std::ostream* out) -{ -} -int -Context::run() -{ - return 0; -} - -IReporter::~IReporter() = default; - -int -IReporter::get_num_active_contexts() -{ - return 0; -} -const IContextScope* const* -IReporter::get_active_contexts() -{ - return nullptr; -} -int -IReporter::get_num_stringified_contexts() -{ - return 0; -} -const String* -IReporter::get_stringified_contexts() -{ - return nullptr; -} - -int -registerReporter(const char*, int, IReporter*) -{ - return 0; -} - -} // namespace doctest -#else // DOCTEST_CONFIG_DISABLE - -#if !defined(DOCTEST_CONFIG_COLORS_NONE) -#if !defined(DOCTEST_CONFIG_COLORS_WINDOWS) && \ - !defined(DOCTEST_CONFIG_COLORS_ANSI) -#ifdef DOCTEST_PLATFORM_WINDOWS -#define DOCTEST_CONFIG_COLORS_WINDOWS -#else // linux -#define DOCTEST_CONFIG_COLORS_ANSI -#endif // platform -#endif // DOCTEST_CONFIG_COLORS_WINDOWS && DOCTEST_CONFIG_COLORS_ANSI -#endif // DOCTEST_CONFIG_COLORS_NONE - -namespace doctest_detail_test_suite_ns { -// holds the current test suite -doctest::detail::TestSuite& -getCurrentTestSuite() -{ - static doctest::detail::TestSuite data{}; - return data; -} -} // namespace doctest_detail_test_suite_ns - -namespace doctest { -namespace { -// the int (priority) is part of the key for automatic sorting - sadly one can -// register a reporter with a duplicate name and a different priority but -// hopefully that won't happen often :| -typedef std::map, reporterCreatorFunc> reporterMap; - -reporterMap& -getReporters() -{ - static reporterMap data; - return data; -} -reporterMap& -getListeners() -{ - static reporterMap data; - return data; -} -} // namespace -namespace detail { -#define DOCTEST_ITERATE_THROUGH_REPORTERS(function, ...) \ - for (auto& curr_rep : g_cs->reporters_currently_used) \ - curr_rep->function(__VA_ARGS__) - -bool -checkIfShouldThrow(assertType::Enum at) -{ - if (at & assertType::is_require) //! OCLINT bitwise operator in conditional - return true; - - if ((at & assertType::is_check) //! OCLINT bitwise operator in conditional - && getContextOptions()->abort_after > 0 && - (g_cs->numAssertsFailed + g_cs->numAssertsFailedCurrentTest_atomic) >= - getContextOptions()->abort_after) - return true; - - return false; -} - -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS -DOCTEST_NORETURN void -throwException() -{ - g_cs->shouldLogCurrentException = false; - throw TestFailureException(); -} // NOLINT(cert-err60-cpp) -#else // DOCTEST_CONFIG_NO_EXCEPTIONS -void -throwException() -{ -} -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS -} // namespace detail - -namespace { -using namespace detail; -// matching of a string against a wildcard mask (case sensitivity configurable) -// taken from -// https://www.codeproject.com/Articles/1088/Wildcard-string-compare-globbing -int -wildcmp(const char* str, const char* wild, bool caseSensitive) -{ - const char* cp = str; - const char* mp = wild; - - while ((*str) && (*wild != '*')) { - if ((caseSensitive ? (*wild != *str) : (tolower(*wild) != tolower(*str))) && - (*wild != '?')) { - return 0; - } - wild++; - str++; - } - - while (*str) { - if (*wild == '*') { - if (!*++wild) { - return 1; - } - mp = wild; - cp = str + 1; - } else if ( - (caseSensitive ? (*wild == *str) : (tolower(*wild) == tolower(*str))) || - (*wild == '?')) { - wild++; - str++; - } else { - wild = mp; //! OCLINT parameter reassignment - str = cp++; //! OCLINT parameter reassignment - } - } - - while (*wild == '*') { - wild++; - } - return !*wild; -} - -//// C string hash function (djb2) - taken from -/// http://www.cse.yorku.ca/~oz/hash.html -// unsigned hashStr(unsigned const char* str) { -// unsigned long hash = 5381; -// char c; -// while((c = *str++)) -// hash = ((hash << 5) + hash) + c; // hash * 33 + c -// return hash; -//} - -// checks if the name matches any of the filters (and can be configured what to -// do when empty) -bool -matchesAny( - const char* name, const std::vector& filters, bool matchEmpty, - bool caseSensitive) -{ - if (filters.empty() && matchEmpty) - return true; - for (auto& curr : filters) - if (wildcmp(name, curr.c_str(), caseSensitive)) - return true; - return false; -} -} // namespace -namespace detail { - -Subcase::Subcase(const String& name, const char* file, int line) - : m_signature({name, file, line}) -{ - auto* s = g_cs; - - // check subcase filters - if (s->subcasesStack.size() < size_t(s->subcase_filter_levels)) { - if (!matchesAny( - m_signature.m_name.c_str(), s->filters[6], true, s->case_sensitive)) - return; - if (matchesAny( - m_signature.m_name.c_str(), s->filters[7], false, - s->case_sensitive)) - return; - } - - // if a Subcase on the same level has already been entered - if (s->subcasesStack.size() < size_t(s->subcasesCurrentMaxLevel)) { - s->should_reenter = true; - return; - } - - // push the current signature to the stack so we can check if the - // current stack + the current new subcase have been traversed - s->subcasesStack.push_back(m_signature); - if (s->subcasesPassed.count(s->subcasesStack) != 0) { - // pop - revert to previous stack since we've already passed this - s->subcasesStack.pop_back(); - return; - } - - s->subcasesCurrentMaxLevel = s->subcasesStack.size(); - m_entered = true; - - DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_start, m_signature); -} - -DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH( - 4996) // std::uncaught_exception is deprecated in C++17 -DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") -DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") - -Subcase::~Subcase() -{ - if (m_entered) { - // only mark the subcase stack as passed if no subcases have been skipped - if (g_cs->should_reenter == false) - g_cs->subcasesPassed.insert(g_cs->subcasesStack); - g_cs->subcasesStack.pop_back(); - -#if defined(__cpp_lib_uncaught_exceptions) && \ - __cpp_lib_uncaught_exceptions >= 201411L && \ - (!defined(__MAC_OS_X_VERSION_MIN_REQUIRED) || \ - __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200) - if (std::uncaught_exceptions() > 0 -#else - if (std::uncaught_exception() -#endif - && g_cs->shouldLogCurrentException) { - DOCTEST_ITERATE_THROUGH_REPORTERS( - test_case_exception, - {"exception thrown in subcase - will translate later " - "when the whole test case has been exited (cannot " - "translate while there is an active exception)", - false}); - g_cs->shouldLogCurrentException = false; - } - DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_end, DOCTEST_EMPTY); - } -} - -DOCTEST_CLANG_SUPPRESS_WARNING_POP -DOCTEST_GCC_SUPPRESS_WARNING_POP -DOCTEST_MSVC_SUPPRESS_WARNING_POP - -Subcase::operator bool() const -{ - return m_entered; -} - -Result::Result(bool passed, const String& decomposition) - : m_passed(passed), m_decomp(decomposition) -{ -} - -ExpressionDecomposer::ExpressionDecomposer(assertType::Enum at) : m_at(at) {} - -TestSuite& -TestSuite::operator*(const char* in) -{ - m_test_suite = in; - return *this; -} - -TestCase::TestCase( - funcType test, const char* file, unsigned line, const TestSuite& test_suite, - const char* type, int template_id) -{ - m_file = file; - m_line = line; - m_name = nullptr; // will be later overridden in operator* - m_test_suite = test_suite.m_test_suite; - m_description = test_suite.m_description; - m_skip = test_suite.m_skip; - m_no_breaks = test_suite.m_no_breaks; - m_no_output = test_suite.m_no_output; - m_may_fail = test_suite.m_may_fail; - m_should_fail = test_suite.m_should_fail; - m_expected_failures = test_suite.m_expected_failures; - m_timeout = test_suite.m_timeout; - - m_test = test; - m_type = type; - m_template_id = template_id; -} - -TestCase::TestCase(const TestCase& other) : TestCaseData() -{ - *this = other; -} - -DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(26434) // hides a non-virtual function -DOCTEST_MSVC_SUPPRESS_WARNING(26437) // Do not slice -TestCase& -TestCase::operator=(const TestCase& other) -{ - static_cast(*this) = static_cast(other); - - m_test = other.m_test; - m_type = other.m_type; - m_template_id = other.m_template_id; - m_full_name = other.m_full_name; - - if (m_template_id != -1) - m_name = m_full_name.c_str(); - return *this; -} -DOCTEST_MSVC_SUPPRESS_WARNING_POP - -TestCase& -TestCase::operator*(const char* in) -{ - m_name = in; - // make a new name with an appended type for templated test case - if (m_template_id != -1) { - m_full_name = String(m_name) + m_type; - // redirect the name to point to the newly constructed full name - m_name = m_full_name.c_str(); - } - return *this; -} - -bool -TestCase::operator<(const TestCase& other) const -{ - // this will be used only to differentiate between test cases - not relevant - // for sorting - if (m_line != other.m_line) - return m_line < other.m_line; - const int name_cmp = strcmp(m_name, other.m_name); - if (name_cmp != 0) - return name_cmp < 0; - const int file_cmp = m_file.compare(other.m_file); - if (file_cmp != 0) - return file_cmp < 0; - return m_template_id < other.m_template_id; -} - -// all the registered tests -std::set& -getRegisteredTests() -{ - static std::set data; - return data; -} -} // namespace detail -namespace { -using namespace detail; -// for sorting tests by file/line -bool -fileOrderComparator(const TestCase* lhs, const TestCase* rhs) -{ - // this is needed because MSVC gives different case for drive letters - // for __FILE__ when evaluated in a header and a source file - const int res = lhs->m_file.compare(rhs->m_file, bool(DOCTEST_MSVC)); - if (res != 0) - return res < 0; - if (lhs->m_line != rhs->m_line) - return lhs->m_line < rhs->m_line; - return lhs->m_template_id < rhs->m_template_id; -} - -// for sorting tests by suite/file/line -bool -suiteOrderComparator(const TestCase* lhs, const TestCase* rhs) -{ - const int res = std::strcmp(lhs->m_test_suite, rhs->m_test_suite); - if (res != 0) - return res < 0; - return fileOrderComparator(lhs, rhs); -} - -// for sorting tests by name/suite/file/line -bool -nameOrderComparator(const TestCase* lhs, const TestCase* rhs) -{ - const int res = std::strcmp(lhs->m_name, rhs->m_name); - if (res != 0) - return res < 0; - return suiteOrderComparator(lhs, rhs); -} - -DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") -void -color_to_stream(std::ostream& s, Color::Enum code) -{ - static_cast( - s); // for DOCTEST_CONFIG_COLORS_NONE or DOCTEST_CONFIG_COLORS_WINDOWS - static_cast(code); // for DOCTEST_CONFIG_COLORS_NONE -#ifdef DOCTEST_CONFIG_COLORS_ANSI - if (g_no_colors || (isatty(STDOUT_FILENO) == false && - getContextOptions()->force_colors == false)) - return; - - auto col = ""; - // clang-format off - switch(code) { //!OCLINT missing break in switch statement / unnecessary default statement in covered switch statement - case Color::Red: col = "[0;31m"; break; - case Color::Green: col = "[0;32m"; break; - case Color::Blue: col = "[0;34m"; break; - case Color::Cyan: col = "[0;36m"; break; - case Color::Yellow: col = "[0;33m"; break; - case Color::Grey: col = "[1;30m"; break; - case Color::LightGrey: col = "[0;37m"; break; - case Color::BrightRed: col = "[1;31m"; break; - case Color::BrightGreen: col = "[1;32m"; break; - case Color::BrightWhite: col = "[1;37m"; break; - case Color::Bright: // invalid - case Color::None: - case Color::White: - default: col = "[0m"; - } - // clang-format on - s << "\033" << col; -#endif // DOCTEST_CONFIG_COLORS_ANSI - -#ifdef DOCTEST_CONFIG_COLORS_WINDOWS - if (g_no_colors || (_isatty(_fileno(stdout)) == false && - getContextOptions()->force_colors == false)) - return; - - static struct ConsoleHelper { - HANDLE stdoutHandle; - WORD origFgAttrs; - WORD origBgAttrs; - - ConsoleHelper() - { - stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); - CONSOLE_SCREEN_BUFFER_INFO csbiInfo; - GetConsoleScreenBufferInfo(stdoutHandle, &csbiInfo); - origFgAttrs = - csbiInfo.wAttributes & ~(BACKGROUND_GREEN | BACKGROUND_RED | - BACKGROUND_BLUE | BACKGROUND_INTENSITY); - origBgAttrs = - csbiInfo.wAttributes & ~(FOREGROUND_GREEN | FOREGROUND_RED | - FOREGROUND_BLUE | FOREGROUND_INTENSITY); - } - } ch; - -#define DOCTEST_SET_ATTR(x) \ - SetConsoleTextAttribute(ch.stdoutHandle, x | ch.origBgAttrs) - - // clang-format off - switch (code) { - case Color::White: DOCTEST_SET_ATTR(FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE); break; - case Color::Red: DOCTEST_SET_ATTR(FOREGROUND_RED); break; - case Color::Green: DOCTEST_SET_ATTR(FOREGROUND_GREEN); break; - case Color::Blue: DOCTEST_SET_ATTR(FOREGROUND_BLUE); break; - case Color::Cyan: DOCTEST_SET_ATTR(FOREGROUND_BLUE | FOREGROUND_GREEN); break; - case Color::Yellow: DOCTEST_SET_ATTR(FOREGROUND_RED | FOREGROUND_GREEN); break; - case Color::Grey: DOCTEST_SET_ATTR(0); break; - case Color::LightGrey: DOCTEST_SET_ATTR(FOREGROUND_INTENSITY); break; - case Color::BrightRed: DOCTEST_SET_ATTR(FOREGROUND_INTENSITY | FOREGROUND_RED); break; - case Color::BrightGreen: DOCTEST_SET_ATTR(FOREGROUND_INTENSITY | FOREGROUND_GREEN); break; - case Color::BrightWhite: DOCTEST_SET_ATTR(FOREGROUND_INTENSITY | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE); break; - case Color::None: - case Color::Bright: // invalid - default: DOCTEST_SET_ATTR(ch.origFgAttrs); - } - // clang-format on -#endif // DOCTEST_CONFIG_COLORS_WINDOWS -} -DOCTEST_CLANG_SUPPRESS_WARNING_POP - -std::vector& -getExceptionTranslators() -{ - static std::vector data; - return data; -} - -String -translateActiveException() -{ -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS - String res; - auto& translators = getExceptionTranslators(); - for (auto& curr : translators) - if (curr->translate(res)) - return res; - // clang-format off - DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wcatch-value") - try { - throw; - } catch(std::exception& ex) { - return ex.what(); - } catch(std::string& msg) { - return msg.c_str(); - } catch(const char* msg) { - return msg; - } catch(...) { - return "unknown exception"; - } - DOCTEST_GCC_SUPPRESS_WARNING_POP -// clang-format on -#else // DOCTEST_CONFIG_NO_EXCEPTIONS - return ""; -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS -} -} // namespace - -namespace detail { -// used by the macros for registering tests -int -regTest(const TestCase& tc) -{ - getRegisteredTests().insert(tc); - return 0; -} - -// sets the current test suite -int -setTestSuite(const TestSuite& ts) -{ - doctest_detail_test_suite_ns::getCurrentTestSuite() = ts; - return 0; -} - -#ifdef DOCTEST_IS_DEBUGGER_ACTIVE -bool -isDebuggerActive() -{ - return DOCTEST_IS_DEBUGGER_ACTIVE(); -} -#else // DOCTEST_IS_DEBUGGER_ACTIVE -#ifdef DOCTEST_PLATFORM_LINUX -class ErrnoGuard { - public: - ErrnoGuard() : m_oldErrno(errno) {} - ~ErrnoGuard() { errno = m_oldErrno; } - - private: - int m_oldErrno; -}; -// See the comments in Catch2 for the reasoning behind this implementation: -// https://github.com/catchorg/Catch2/blob/v2.13.1/include/internal/catch_debugger.cpp#L79-L102 -bool -isDebuggerActive() -{ - ErrnoGuard guard; - std::ifstream in("/proc/self/status"); - for (std::string line; std::getline(in, line);) { - static const int PREFIX_LEN = 11; - if (line.compare(0, PREFIX_LEN, "TracerPid:\t") == 0) { - return line.length() > PREFIX_LEN && line[PREFIX_LEN] != '0'; - } - } - return false; -} -#elif defined(DOCTEST_PLATFORM_MAC) -// The following function is taken directly from the following technical note: -// https://developer.apple.com/library/archive/qa/qa1361/_index.html -// Returns true if the current process is being debugged (either -// running under the debugger or has a debugger attached post facto). -bool -isDebuggerActive() -{ - int mib[4]; - kinfo_proc info; - size_t size; - // Initialize the flags so that, if sysctl fails for some bizarre - // reason, we get a predictable result. - info.kp_proc.p_flag = 0; - // Initialize mib, which tells sysctl the info we want, in this case - // we're looking for information about a specific process ID. - mib[0] = CTL_KERN; - mib[1] = KERN_PROC; - mib[2] = KERN_PROC_PID; - mib[3] = getpid(); - // Call sysctl. - size = sizeof(info); - if (sysctl(mib, DOCTEST_COUNTOF(mib), &info, &size, 0, 0) != 0) { - std::cerr << "\nCall to sysctl failed - unable to determine if debugger is " - "active **\n"; - return false; - } - // We're being debugged if the P_TRACED flag is set. - return ((info.kp_proc.p_flag & P_TRACED) != 0); -} -#elif DOCTEST_MSVC || defined(__MINGW32__) || defined(__MINGW64__) -bool -isDebuggerActive() -{ - return ::IsDebuggerPresent() != 0; -} -#else -bool -isDebuggerActive() -{ - return false; -} -#endif // Platform -#endif // DOCTEST_IS_DEBUGGER_ACTIVE - -void -registerExceptionTranslatorImpl(const IExceptionTranslator* et) -{ - if (std::find( - getExceptionTranslators().begin(), getExceptionTranslators().end(), - et) == getExceptionTranslators().end()) - getExceptionTranslators().push_back(et); -} - -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -void -toStream(std::ostream* s, char* in) -{ - *s << in; -} -void -toStream(std::ostream* s, const char* in) -{ - *s << in; -} -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -void -toStream(std::ostream* s, bool in) -{ - *s << std::boolalpha << in << std::noboolalpha; -} -void -toStream(std::ostream* s, float in) -{ - *s << in; -} -void -toStream(std::ostream* s, double in) -{ - *s << in; -} -void -toStream(std::ostream* s, double long in) -{ - *s << in; -} - -void -toStream(std::ostream* s, char in) -{ - *s << in; -} -void -toStream(std::ostream* s, char signed in) -{ - *s << in; -} -void -toStream(std::ostream* s, char unsigned in) -{ - *s << in; -} -void -toStream(std::ostream* s, int short in) -{ - *s << in; -} -void -toStream(std::ostream* s, int short unsigned in) -{ - *s << in; -} -void -toStream(std::ostream* s, int in) -{ - *s << in; -} -void -toStream(std::ostream* s, int unsigned in) -{ - *s << in; -} -void -toStream(std::ostream* s, int long in) -{ - *s << in; -} -void -toStream(std::ostream* s, int long unsigned in) -{ - *s << in; -} -void -toStream(std::ostream* s, int long long in) -{ - *s << in; -} -void -toStream(std::ostream* s, int long long unsigned in) -{ - *s << in; -} - -DOCTEST_THREAD_LOCAL std::vector - g_infoContexts; // for logging with INFO() - -ContextScopeBase::ContextScopeBase() -{ - g_infoContexts.push_back(this); -} - -ContextScopeBase::ContextScopeBase(ContextScopeBase&& other) -{ - if (other.need_to_destroy) { - other.destroy(); - } - other.need_to_destroy = false; - g_infoContexts.push_back(this); -} - -DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH( - 4996) // std::uncaught_exception is deprecated in C++17 -DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") -DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") - -// destroy cannot be inlined into the destructor because that would mean calling -// stringify after ContextScope has been destroyed (base class destructors run -// after derived class destructors). Instead, ContextScope calls this method -// directly from its destructor. -void -ContextScopeBase::destroy() -{ -#if defined(__cpp_lib_uncaught_exceptions) && \ - __cpp_lib_uncaught_exceptions >= 201411L && \ - (!defined(__MAC_OS_X_VERSION_MIN_REQUIRED) || \ - __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200) - if (std::uncaught_exceptions() > 0) { -#else - if (std::uncaught_exception()) { -#endif - std::ostringstream s; - this->stringify(&s); - g_cs->stringifiedContexts.push_back(s.str().c_str()); - } - g_infoContexts.pop_back(); -} // namespace detail - -DOCTEST_CLANG_SUPPRESS_WARNING_POP -DOCTEST_GCC_SUPPRESS_WARNING_POP -DOCTEST_MSVC_SUPPRESS_WARNING_POP -} // namespace detail -namespace { -using namespace detail; - -#if !defined(DOCTEST_CONFIG_POSIX_SIGNALS) && \ - !defined(DOCTEST_CONFIG_WINDOWS_SEH) -struct FatalConditionHandler { - static void reset() {} - static void allocateAltStackMem() {} - static void freeAltStackMem() {} -}; -#else // DOCTEST_CONFIG_POSIX_SIGNALS || DOCTEST_CONFIG_WINDOWS_SEH - -void reportFatal(const std::string&); - -#ifdef DOCTEST_PLATFORM_WINDOWS - -struct SignalDefs { - DWORD id; - const char* name; -}; -// There is no 1-1 mapping between signals and windows exceptions. -// Windows can easily distinguish between SO and SigSegV, -// but SigInt, SigTerm, etc are handled differently. -SignalDefs signalDefs[] = { - {static_cast(EXCEPTION_ILLEGAL_INSTRUCTION), - "SIGILL - Illegal instruction signal"}, - {static_cast(EXCEPTION_STACK_OVERFLOW), "SIGSEGV - Stack overflow"}, - {static_cast(EXCEPTION_ACCESS_VIOLATION), - "SIGSEGV - Segmentation violation signal"}, - {static_cast(EXCEPTION_INT_DIVIDE_BY_ZERO), "Divide by zero error"}, -}; - -struct FatalConditionHandler { - static LONG CALLBACK handleException(PEXCEPTION_POINTERS ExceptionInfo) - { - // Multiple threads may enter this filter/handler at once. We want the error - // message to be printed on the console just once no matter how many threads - // have crashed. - static std::mutex mutex; - static bool execute = true; - { - std::lock_guard lock(mutex); - if (execute) { - bool reported = false; - for (size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { - if (ExceptionInfo->ExceptionRecord->ExceptionCode == - signalDefs[i].id) { - reportFatal(signalDefs[i].name); - reported = true; - break; - } - } - if (reported == false) - reportFatal("Unhandled SEH exception caught"); - if (isDebuggerActive() && !g_cs->no_breaks) - DOCTEST_BREAK_INTO_DEBUGGER(); - } - execute = false; - } - std::exit(EXIT_FAILURE); - } - - static void allocateAltStackMem() {} - static void freeAltStackMem() {} - - FatalConditionHandler() - { - isSet = true; - // 32k seems enough for doctest to handle stack overflow, - // but the value was found experimentally, so there is no strong guarantee - guaranteeSize = 32 * 1024; - // Register an unhandled exception filter - previousTop = SetUnhandledExceptionFilter(handleException); - // Pass in guarantee size to be filled - SetThreadStackGuarantee(&guaranteeSize); - - // On Windows uncaught exceptions from another thread, exceptions from - // destructors, or calls to std::terminate are not a SEH exception - - // The terminal handler gets called when: - // - std::terminate is called FROM THE TEST RUNNER THREAD - // - an exception is thrown from a destructor FROM THE TEST RUNNER THREAD - original_terminate_handler = std::get_terminate(); - std::set_terminate([]() DOCTEST_NOEXCEPT { - reportFatal("Terminate handler called"); - if (isDebuggerActive() && !g_cs->no_breaks) - DOCTEST_BREAK_INTO_DEBUGGER(); - std::exit(EXIT_FAILURE); // explicitly exit - otherwise the SIGABRT - // handler may be called as well - }); - - // SIGABRT is raised when: - // - std::terminate is called FROM A DIFFERENT THREAD - // - an exception is thrown from a destructor FROM A DIFFERENT THREAD - // - an uncaught exception is thrown FROM A DIFFERENT THREAD - prev_sigabrt_handler = - std::signal(SIGABRT, [](int signal) DOCTEST_NOEXCEPT { - if (signal == SIGABRT) { - reportFatal("SIGABRT - Abort (abnormal termination) signal"); - if (isDebuggerActive() && !g_cs->no_breaks) - DOCTEST_BREAK_INTO_DEBUGGER(); - std::exit(EXIT_FAILURE); - } - }); - - // The following settings are taken from google test, and more - // specifically from UnitTest::Run() inside of gtest.cc - - // the user does not want to see pop-up dialogs about crashes - prev_error_mode_1 = SetErrorMode( - SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | - SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); - // This forces the abort message to go to stderr in all circumstances. - prev_error_mode_2 = _set_error_mode(_OUT_TO_STDERR); - // In the debug version, Visual Studio pops up a separate dialog - // offering a choice to debug the aborted program - we want to disable that. - prev_abort_behavior = - _set_abort_behavior(0x0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); - // In debug mode, the Windows CRT can crash with an assertion over invalid - // input (e.g. passing an invalid file descriptor). The default handling - // for these assertions is to pop up a dialog and wait for user input. - // Instead ask the CRT to dump such assertions to stderr non-interactively. - prev_report_mode = - _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); - prev_report_file = _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); - } - - static void reset() - { - if (isSet) { - // Unregister handler and restore the old guarantee - SetUnhandledExceptionFilter(previousTop); - SetThreadStackGuarantee(&guaranteeSize); - std::set_terminate(original_terminate_handler); - std::signal(SIGABRT, prev_sigabrt_handler); - SetErrorMode(prev_error_mode_1); - _set_error_mode(prev_error_mode_2); - _set_abort_behavior( - prev_abort_behavior, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); - static_cast(_CrtSetReportMode(_CRT_ASSERT, prev_report_mode)); - static_cast(_CrtSetReportFile(_CRT_ASSERT, prev_report_file)); - isSet = false; - } - } - - ~FatalConditionHandler() { reset(); } - - private: - static UINT prev_error_mode_1; - static int prev_error_mode_2; - static unsigned int prev_abort_behavior; - static int prev_report_mode; - static _HFILE prev_report_file; - static void(DOCTEST_CDECL* prev_sigabrt_handler)(int); - static std::terminate_handler original_terminate_handler; - static bool isSet; - static ULONG guaranteeSize; - static LPTOP_LEVEL_EXCEPTION_FILTER previousTop; -}; - -UINT FatalConditionHandler::prev_error_mode_1; -int FatalConditionHandler::prev_error_mode_2; -unsigned int FatalConditionHandler::prev_abort_behavior; -int FatalConditionHandler::prev_report_mode; -_HFILE FatalConditionHandler::prev_report_file; -void(DOCTEST_CDECL* FatalConditionHandler::prev_sigabrt_handler)(int); -std::terminate_handler FatalConditionHandler::original_terminate_handler; -bool FatalConditionHandler::isSet = false; -ULONG FatalConditionHandler::guaranteeSize = 0; -LPTOP_LEVEL_EXCEPTION_FILTER FatalConditionHandler::previousTop = nullptr; - -#else // DOCTEST_PLATFORM_WINDOWS - -struct SignalDefs { - int id; - const char* name; -}; -SignalDefs signalDefs[] = { - {SIGINT, "SIGINT - Terminal interrupt signal"}, - {SIGILL, "SIGILL - Illegal instruction signal"}, - {SIGFPE, "SIGFPE - Floating point error signal"}, - {SIGSEGV, "SIGSEGV - Segmentation violation signal"}, - {SIGTERM, "SIGTERM - Termination request signal"}, - {SIGABRT, "SIGABRT - Abort (abnormal termination) signal"}}; - -struct FatalConditionHandler { - static bool isSet; - static struct sigaction oldSigActions[DOCTEST_COUNTOF(signalDefs)]; - static stack_t oldSigStack; - static size_t altStackSize; - static char* altStackMem; - - static void handleSignal(int sig) - { - const char* name = ""; - for (std::size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { - SignalDefs& def = signalDefs[i]; - if (sig == def.id) { - name = def.name; - break; - } - } - reset(); - reportFatal(name); - raise(sig); - } - - static void allocateAltStackMem() { altStackMem = new char[altStackSize]; } - - static void freeAltStackMem() { delete[] altStackMem; } - - FatalConditionHandler() - { - isSet = true; - stack_t sigStack; - sigStack.ss_sp = altStackMem; - sigStack.ss_size = altStackSize; - sigStack.ss_flags = 0; - sigaltstack(&sigStack, &oldSigStack); - struct sigaction sa = {}; - sa.sa_handler = handleSignal; // NOLINT - sa.sa_flags = SA_ONSTACK; - for (std::size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { - sigaction(signalDefs[i].id, &sa, &oldSigActions[i]); - } - } - - ~FatalConditionHandler() { reset(); } - static void reset() - { - if (isSet) { - // Set signals back to previous values -- hopefully nobody overwrote them - // in the meantime - for (std::size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { - sigaction(signalDefs[i].id, &oldSigActions[i], nullptr); - } - // Return the old stack - sigaltstack(&oldSigStack, nullptr); - isSet = false; - } - } -}; - -bool FatalConditionHandler::isSet = false; -struct sigaction - FatalConditionHandler::oldSigActions[DOCTEST_COUNTOF(signalDefs)] = {}; -stack_t FatalConditionHandler::oldSigStack = {}; -size_t FatalConditionHandler::altStackSize = 4 * SIGSTKSZ; -char* FatalConditionHandler::altStackMem = nullptr; - -#endif // DOCTEST_PLATFORM_WINDOWS -#endif // DOCTEST_CONFIG_POSIX_SIGNALS || DOCTEST_CONFIG_WINDOWS_SEH - -} // namespace - -namespace { -using namespace detail; - -#ifdef DOCTEST_PLATFORM_WINDOWS -#define DOCTEST_OUTPUT_DEBUG_STRING(text) ::OutputDebugStringA(text) -#else -// TODO: integration with XCode and other IDEs -#define DOCTEST_OUTPUT_DEBUG_STRING( \ - text) // NOLINT(clang-diagnostic-unused-macros) -#endif // Platform - -void -addAssert(assertType::Enum at) -{ - if ((at & assertType::is_warn) == - 0) //! OCLINT bitwise operator in conditional - g_cs->numAssertsCurrentTest_atomic++; -} - -void -addFailedAssert(assertType::Enum at) -{ - if ((at & assertType::is_warn) == - 0) //! OCLINT bitwise operator in conditional - g_cs->numAssertsFailedCurrentTest_atomic++; -} - -#if defined(DOCTEST_CONFIG_POSIX_SIGNALS) || defined(DOCTEST_CONFIG_WINDOWS_SEH) -void -reportFatal(const std::string& message) -{ - g_cs->failure_flags |= TestCaseFailureReason::Crash; - - DOCTEST_ITERATE_THROUGH_REPORTERS( - test_case_exception, {message.c_str(), true}); - - while (g_cs->subcasesStack.size()) { - g_cs->subcasesStack.pop_back(); - DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_end, DOCTEST_EMPTY); - } - - g_cs->finalizeTestCaseData(); - - DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_end, *g_cs); - - DOCTEST_ITERATE_THROUGH_REPORTERS(test_run_end, *g_cs); -} -#endif // DOCTEST_CONFIG_POSIX_SIGNALS || DOCTEST_CONFIG_WINDOWS_SEH -} // namespace -namespace detail { - -ResultBuilder::ResultBuilder( - assertType::Enum at, const char* file, int line, const char* expr, - const char* exception_type, const char* exception_string) -{ - m_test_case = g_cs->currentTest; - m_at = at; - m_file = file; - m_line = line; - m_expr = expr; - m_failed = true; - m_threw = false; - m_threw_as = false; - m_exception_type = exception_type; - m_exception_string = exception_string; -#if DOCTEST_MSVC - if (m_expr[0] == - ' ') // this happens when variadic macros are disabled under MSVC - ++m_expr; -#endif // MSVC -} - -void -ResultBuilder::setResult(const Result& res) -{ - m_decomp = res.m_decomp; - m_failed = !res.m_passed; -} - -void -ResultBuilder::translateException() -{ - m_threw = true; - m_exception = translateActiveException(); -} - -bool -ResultBuilder::log() -{ - if (m_at & assertType::is_throws) { //! OCLINT bitwise operator in - //! conditional - m_failed = !m_threw; - } else if ( - (m_at & assertType::is_throws_as) && - (m_at & assertType::is_throws_with)) { //! OCLINT - m_failed = !m_threw_as || (m_exception != m_exception_string); - } else if (m_at & assertType::is_throws_as) { //! OCLINT bitwise operator in - //! conditional - m_failed = !m_threw_as; - } else if (m_at & assertType::is_throws_with) { //! OCLINT bitwise operator - //! in conditional - m_failed = m_exception != m_exception_string; - } else if (m_at & assertType::is_nothrow) { //! OCLINT bitwise operator in - //! conditional - m_failed = m_threw; - } - - if (m_exception.size()) - m_exception = "\"" + m_exception + "\""; - - if (is_running_in_test) { - addAssert(m_at); - DOCTEST_ITERATE_THROUGH_REPORTERS(log_assert, *this); - - if (m_failed) - addFailedAssert(m_at); - } else if (m_failed) { - failed_out_of_a_testing_context(*this); - } - - return m_failed && isDebuggerActive() && !getContextOptions()->no_breaks && - (g_cs->currentTest == nullptr || - !g_cs->currentTest->m_no_breaks); // break into debugger -} - -void -ResultBuilder::react() const -{ - if (m_failed && checkIfShouldThrow(m_at)) - throwException(); -} - -void -failed_out_of_a_testing_context(const AssertData& ad) -{ - if (g_cs->ah) - g_cs->ah(ad); - else - std::abort(); -} - -bool -decomp_assert( - assertType::Enum at, const char* file, int line, const char* expr, - Result result) -{ - bool failed = !result.m_passed; - - // ################################################################################### - // IF THE DEBUGGER BREAKS HERE - GO 1 LEVEL UP IN THE CALLSTACK FOR THE - // FAILING ASSERT THIS IS THE EFFECT OF HAVING - // 'DOCTEST_CONFIG_SUPER_FAST_ASSERTS' DEFINED - // ################################################################################### - DOCTEST_ASSERT_OUT_OF_TESTS(result.m_decomp); - DOCTEST_ASSERT_IN_TESTS(result.m_decomp); - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) - return !failed; -} - -MessageBuilder::MessageBuilder( - const char* file, int line, assertType::Enum severity) -{ - m_stream = tlssPush(); - m_file = file; - m_line = line; - m_severity = severity; -} - -MessageBuilder::~MessageBuilder() -{ - if (!logged) - tlssPop(); -} - -IExceptionTranslator::IExceptionTranslator() = default; -IExceptionTranslator::~IExceptionTranslator() = default; - -bool -MessageBuilder::log() -{ - if (!logged) { - m_string = tlssPop(); - logged = true; - } - - DOCTEST_ITERATE_THROUGH_REPORTERS(log_message, *this); - - const bool isWarn = m_severity & assertType::is_warn; - - // warn is just a message in this context so we don't treat it as an assert - if (!isWarn) { - addAssert(m_severity); - addFailedAssert(m_severity); - } - - return isDebuggerActive() && !getContextOptions()->no_breaks && !isWarn && - (g_cs->currentTest == nullptr || - !g_cs->currentTest->m_no_breaks); // break into debugger -} - -void -MessageBuilder::react() -{ - if (m_severity & - assertType::is_require) //! OCLINT bitwise operator in conditional - throwException(); -} -} // namespace detail -namespace { -using namespace detail; - -// clang-format off - -// ================================================================================================= -// The following code has been taken verbatim from Catch2/include/internal/catch_xmlwriter.h/cpp -// This is done so cherry-picking bug fixes is trivial - even the style/formatting is untouched. -// ================================================================================================= - - class XmlEncode { - public: - enum ForWhat { ForTextNodes, ForAttributes }; - - XmlEncode( std::string const& str, ForWhat forWhat = ForTextNodes ); - - void encodeTo( std::ostream& os ) const; - - friend std::ostream& operator << ( std::ostream& os, XmlEncode const& xmlEncode ); - - private: - std::string m_str; - ForWhat m_forWhat; - }; - - class XmlWriter { - public: - - class ScopedElement { - public: - ScopedElement( XmlWriter* writer ); - - ScopedElement( ScopedElement&& other ) DOCTEST_NOEXCEPT; - ScopedElement& operator=( ScopedElement&& other ) DOCTEST_NOEXCEPT; - - ~ScopedElement(); - - ScopedElement& writeText( std::string const& text, bool indent = true ); - - template - ScopedElement& writeAttribute( std::string const& name, T const& attribute ) { - m_writer->writeAttribute( name, attribute ); - return *this; - } - - private: - mutable XmlWriter* m_writer = nullptr; - }; - - XmlWriter( std::ostream& os = std::cout ); - ~XmlWriter(); - - XmlWriter( XmlWriter const& ) = delete; - XmlWriter& operator=( XmlWriter const& ) = delete; - - XmlWriter& startElement( std::string const& name ); - - ScopedElement scopedElement( std::string const& name ); - - XmlWriter& endElement(); - - XmlWriter& writeAttribute( std::string const& name, std::string const& attribute ); - - XmlWriter& writeAttribute( std::string const& name, const char* attribute ); - - XmlWriter& writeAttribute( std::string const& name, bool attribute ); - - template - XmlWriter& writeAttribute( std::string const& name, T const& attribute ) { - std::stringstream rss; - rss << attribute; - return writeAttribute( name, rss.str() ); - } - - XmlWriter& writeText( std::string const& text, bool indent = true ); - - //XmlWriter& writeComment( std::string const& text ); - - //void writeStylesheetRef( std::string const& url ); - - //XmlWriter& writeBlankLine(); - - void ensureTagClosed(); - - private: - - void writeDeclaration(); - - void newlineIfNecessary(); - - bool m_tagIsOpen = false; - bool m_needsNewline = false; - std::vector m_tags; - std::string m_indent; - std::ostream& m_os; - }; - -// ================================================================================================= -// The following code has been taken verbatim from Catch2/include/internal/catch_xmlwriter.h/cpp -// This is done so cherry-picking bug fixes is trivial - even the style/formatting is untouched. -// ================================================================================================= - -using uchar = unsigned char; - -namespace { - - size_t trailingBytes(unsigned char c) { - if ((c & 0xE0) == 0xC0) { - return 2; - } - if ((c & 0xF0) == 0xE0) { - return 3; - } - if ((c & 0xF8) == 0xF0) { - return 4; - } - DOCTEST_INTERNAL_ERROR("Invalid multibyte utf-8 start byte encountered"); - } - - uint32_t headerValue(unsigned char c) { - if ((c & 0xE0) == 0xC0) { - return c & 0x1F; - } - if ((c & 0xF0) == 0xE0) { - return c & 0x0F; - } - if ((c & 0xF8) == 0xF0) { - return c & 0x07; - } - DOCTEST_INTERNAL_ERROR("Invalid multibyte utf-8 start byte encountered"); - } - - void hexEscapeChar(std::ostream& os, unsigned char c) { - std::ios_base::fmtflags f(os.flags()); - os << "\\x" - << std::uppercase << std::hex << std::setfill('0') << std::setw(2) - << static_cast(c); - os.flags(f); - } - -} // anonymous namespace - - XmlEncode::XmlEncode( std::string const& str, ForWhat forWhat ) - : m_str( str ), - m_forWhat( forWhat ) - {} - - void XmlEncode::encodeTo( std::ostream& os ) const { - // Apostrophe escaping not necessary if we always use " to write attributes - // (see: https://www.w3.org/TR/xml/#syntax) - - for( std::size_t idx = 0; idx < m_str.size(); ++ idx ) { - uchar c = m_str[idx]; - switch (c) { - case '<': os << "<"; break; - case '&': os << "&"; break; - - case '>': - // See: https://www.w3.org/TR/xml/#syntax - if (idx > 2 && m_str[idx - 1] == ']' && m_str[idx - 2] == ']') - os << ">"; - else - os << c; - break; - - case '\"': - if (m_forWhat == ForAttributes) - os << """; - else - os << c; - break; - - default: - // Check for control characters and invalid utf-8 - - // Escape control characters in standard ascii - // see https://stackoverflow.com/questions/404107/why-are-control-characters-illegal-in-xml-1-0 - if (c < 0x09 || (c > 0x0D && c < 0x20) || c == 0x7F) { - hexEscapeChar(os, c); - break; - } - - // Plain ASCII: Write it to stream - if (c < 0x7F) { - os << c; - break; - } - - // UTF-8 territory - // Check if the encoding is valid and if it is not, hex escape bytes. - // Important: We do not check the exact decoded values for validity, only the encoding format - // First check that this bytes is a valid lead byte: - // This means that it is not encoded as 1111 1XXX - // Or as 10XX XXXX - if (c < 0xC0 || - c >= 0xF8) { - hexEscapeChar(os, c); - break; - } - - auto encBytes = trailingBytes(c); - // Are there enough bytes left to avoid accessing out-of-bounds memory? - if (idx + encBytes - 1 >= m_str.size()) { - hexEscapeChar(os, c); - break; - } - // The header is valid, check data - // The next encBytes bytes must together be a valid utf-8 - // This means: bitpattern 10XX XXXX and the extracted value is sane (ish) - bool valid = true; - uint32_t value = headerValue(c); - for (std::size_t n = 1; n < encBytes; ++n) { - uchar nc = m_str[idx + n]; - valid &= ((nc & 0xC0) == 0x80); - value = (value << 6) | (nc & 0x3F); - } - - if ( - // Wrong bit pattern of following bytes - (!valid) || - // Overlong encodings - (value < 0x80) || - ( value < 0x800 && encBytes > 2) || // removed "0x80 <= value &&" because redundant - (0x800 < value && value < 0x10000 && encBytes > 3) || - // Encoded value out of range - (value >= 0x110000) - ) { - hexEscapeChar(os, c); - break; - } - - // If we got here, this is in fact a valid(ish) utf-8 sequence - for (std::size_t n = 0; n < encBytes; ++n) { - os << m_str[idx + n]; - } - idx += encBytes - 1; - break; - } - } - } - - std::ostream& operator << ( std::ostream& os, XmlEncode const& xmlEncode ) { - xmlEncode.encodeTo( os ); - return os; - } - - XmlWriter::ScopedElement::ScopedElement( XmlWriter* writer ) - : m_writer( writer ) - {} - - XmlWriter::ScopedElement::ScopedElement( ScopedElement&& other ) DOCTEST_NOEXCEPT - : m_writer( other.m_writer ){ - other.m_writer = nullptr; - } - XmlWriter::ScopedElement& XmlWriter::ScopedElement::operator=( ScopedElement&& other ) DOCTEST_NOEXCEPT { - if ( m_writer ) { - m_writer->endElement(); - } - m_writer = other.m_writer; - other.m_writer = nullptr; - return *this; - } - - - XmlWriter::ScopedElement::~ScopedElement() { - if( m_writer ) - m_writer->endElement(); - } - - XmlWriter::ScopedElement& XmlWriter::ScopedElement::writeText( std::string const& text, bool indent ) { - m_writer->writeText( text, indent ); - return *this; - } - - XmlWriter::XmlWriter( std::ostream& os ) : m_os( os ) - { - writeDeclaration(); - } - - XmlWriter::~XmlWriter() { - while( !m_tags.empty() ) - endElement(); - } - - XmlWriter& XmlWriter::startElement( std::string const& name ) { - ensureTagClosed(); - newlineIfNecessary(); - m_os << m_indent << '<' << name; - m_tags.push_back( name ); - m_indent += " "; - m_tagIsOpen = true; - return *this; - } - - XmlWriter::ScopedElement XmlWriter::scopedElement( std::string const& name ) { - ScopedElement scoped( this ); - startElement( name ); - return scoped; - } - - XmlWriter& XmlWriter::endElement() { - newlineIfNecessary(); - m_indent = m_indent.substr( 0, m_indent.size()-2 ); - if( m_tagIsOpen ) { - m_os << "/>"; - m_tagIsOpen = false; - } - else { - m_os << m_indent << ""; - } - m_os << std::endl; - m_tags.pop_back(); - return *this; - } - - XmlWriter& XmlWriter::writeAttribute( std::string const& name, std::string const& attribute ) { - if( !name.empty() && !attribute.empty() ) - m_os << ' ' << name << "=\"" << XmlEncode( attribute, XmlEncode::ForAttributes ) << '"'; - return *this; - } - - XmlWriter& XmlWriter::writeAttribute( std::string const& name, const char* attribute ) { - if( !name.empty() && attribute && attribute[0] != '\0' ) - m_os << ' ' << name << "=\"" << XmlEncode( attribute, XmlEncode::ForAttributes ) << '"'; - return *this; - } - - XmlWriter& XmlWriter::writeAttribute( std::string const& name, bool attribute ) { - m_os << ' ' << name << "=\"" << ( attribute ? "true" : "false" ) << '"'; - return *this; - } - - XmlWriter& XmlWriter::writeText( std::string const& text, bool indent ) { - if( !text.empty() ){ - bool tagWasOpen = m_tagIsOpen; - ensureTagClosed(); - if( tagWasOpen && indent ) - m_os << m_indent; - m_os << XmlEncode( text ); - m_needsNewline = true; - } - return *this; - } - - //XmlWriter& XmlWriter::writeComment( std::string const& text ) { - // ensureTagClosed(); - // m_os << m_indent << ""; - // m_needsNewline = true; - // return *this; - //} - - //void XmlWriter::writeStylesheetRef( std::string const& url ) { - // m_os << "\n"; - //} - - //XmlWriter& XmlWriter::writeBlankLine() { - // ensureTagClosed(); - // m_os << '\n'; - // return *this; - //} - - void XmlWriter::ensureTagClosed() { - if( m_tagIsOpen ) { - m_os << ">" << std::endl; - m_tagIsOpen = false; - } - } - - void XmlWriter::writeDeclaration() { - m_os << "\n"; - } - - void XmlWriter::newlineIfNecessary() { - if( m_needsNewline ) { - m_os << std::endl; - m_needsNewline = false; - } - } - -// ================================================================================================= -// End of copy-pasted code from Catch -// ================================================================================================= - -// clang-format on - -struct XmlReporter : public IReporter { - XmlWriter xml; - std::mutex mutex; - - // caching pointers/references to objects of these types - safe to do - const ContextOptions& opt; - const TestCaseData* tc = nullptr; - - XmlReporter(const ContextOptions& co) : xml(*co.cout), opt(co) {} - - void log_contexts() - { - int num_contexts = get_num_active_contexts(); - if (num_contexts) { - auto contexts = get_active_contexts(); - std::stringstream ss; - for (int i = 0; i < num_contexts; ++i) { - contexts[i]->stringify(&ss); - xml.scopedElement("Info").writeText(ss.str()); - ss.str(""); - } - } - } - - unsigned line(unsigned l) const { return opt.no_line_numbers ? 0 : l; } - - void test_case_start_impl(const TestCaseData& in) - { - bool open_ts_tag = false; - if (tc != nullptr) { // we have already opened a test suite - if (std::strcmp(tc->m_test_suite, in.m_test_suite) != 0) { - xml.endElement(); - open_ts_tag = true; - } - } else { - open_ts_tag = true; // first test case ==> first test suite - } - - if (open_ts_tag) { - xml.startElement("TestSuite"); - xml.writeAttribute("name", in.m_test_suite); - } - - tc = ∈ - xml.startElement("TestCase") - .writeAttribute("name", in.m_name) - .writeAttribute("filename", skipPathFromFilename(in.m_file.c_str())) - .writeAttribute("line", line(in.m_line)) - .writeAttribute("description", in.m_description); - - if (Approx(in.m_timeout) != 0) - xml.writeAttribute("timeout", in.m_timeout); - if (in.m_may_fail) - xml.writeAttribute("may_fail", true); - if (in.m_should_fail) - xml.writeAttribute("should_fail", true); - } - - // ========================================================================================= - // WHAT FOLLOWS ARE OVERRIDES OF THE VIRTUAL METHODS OF THE REPORTER INTERFACE - // ========================================================================================= - - void report_query(const QueryData& in) override - { - test_run_start(); - if (opt.list_reporters) { - for (auto& curr : getListeners()) - xml.scopedElement("Listener") - .writeAttribute("priority", curr.first.first) - .writeAttribute("name", curr.first.second); - for (auto& curr : getReporters()) - xml.scopedElement("Reporter") - .writeAttribute("priority", curr.first.first) - .writeAttribute("name", curr.first.second); - } else if (opt.count || opt.list_test_cases) { - for (unsigned i = 0; i < in.num_data; ++i) { - xml.scopedElement("TestCase") - .writeAttribute("name", in.data[i]->m_name) - .writeAttribute("testsuite", in.data[i]->m_test_suite) - .writeAttribute( - "filename", skipPathFromFilename(in.data[i]->m_file.c_str())) - .writeAttribute("line", line(in.data[i]->m_line)) - .writeAttribute("skipped", in.data[i]->m_skip); - } - xml.scopedElement("OverallResultsTestCases") - .writeAttribute( - "unskipped", in.run_stats->numTestCasesPassingFilters); - } else if (opt.list_test_suites) { - for (unsigned i = 0; i < in.num_data; ++i) - xml.scopedElement("TestSuite") - .writeAttribute("name", in.data[i]->m_test_suite); - xml.scopedElement("OverallResultsTestCases") - .writeAttribute( - "unskipped", in.run_stats->numTestCasesPassingFilters); - xml.scopedElement("OverallResultsTestSuites") - .writeAttribute( - "unskipped", in.run_stats->numTestSuitesPassingFilters); - } - xml.endElement(); - } - - void test_run_start() override - { - // remove .exe extension - mainly to have the same output on UNIX and - // Windows - std::string binary_name = skipPathFromFilename(opt.binary_name.c_str()); -#ifdef DOCTEST_PLATFORM_WINDOWS - if (binary_name.rfind(".exe") != std::string::npos) - binary_name = binary_name.substr(0, binary_name.length() - 4); -#endif // DOCTEST_PLATFORM_WINDOWS - - xml.startElement("doctest").writeAttribute("binary", binary_name); - if (opt.no_version == false) - xml.writeAttribute("version", DOCTEST_VERSION_STR); - - // only the consequential ones (TODO: filters) - xml.scopedElement("Options") - .writeAttribute("order_by", opt.order_by.c_str()) - .writeAttribute("rand_seed", opt.rand_seed) - .writeAttribute("first", opt.first) - .writeAttribute("last", opt.last) - .writeAttribute("abort_after", opt.abort_after) - .writeAttribute("subcase_filter_levels", opt.subcase_filter_levels) - .writeAttribute("case_sensitive", opt.case_sensitive) - .writeAttribute("no_throw", opt.no_throw) - .writeAttribute("no_skip", opt.no_skip); - } - - void test_run_end(const TestRunStats& p) override - { - if (tc) // the TestSuite tag - only if there has been at least 1 test case - xml.endElement(); - - xml.scopedElement("OverallResultsAsserts") - .writeAttribute("successes", p.numAsserts - p.numAssertsFailed) - .writeAttribute("failures", p.numAssertsFailed); - - xml.startElement("OverallResultsTestCases") - .writeAttribute( - "successes", p.numTestCasesPassingFilters - p.numTestCasesFailed) - .writeAttribute("failures", p.numTestCasesFailed); - if (opt.no_skipped_summary == false) - xml.writeAttribute( - "skipped", p.numTestCases - p.numTestCasesPassingFilters); - xml.endElement(); - - xml.endElement(); - } - - void test_case_start(const TestCaseData& in) override - { - test_case_start_impl(in); - xml.ensureTagClosed(); - } - - void test_case_reenter(const TestCaseData&) override {} - - void test_case_end(const CurrentTestCaseStats& st) override - { - xml.startElement("OverallResultsAsserts") - .writeAttribute( - "successes", - st.numAssertsCurrentTest - st.numAssertsFailedCurrentTest) - .writeAttribute("failures", st.numAssertsFailedCurrentTest) - .writeAttribute("test_case_success", st.testCaseSuccess); - if (opt.duration) - xml.writeAttribute("duration", st.seconds); - if (tc->m_expected_failures) - xml.writeAttribute("expected_failures", tc->m_expected_failures); - xml.endElement(); - - xml.endElement(); - } - - void test_case_exception(const TestCaseException& e) override - { - std::lock_guard lock(mutex); - - xml.scopedElement("Exception") - .writeAttribute("crash", e.is_crash) - .writeText(e.error_string.c_str()); - } - - void subcase_start(const SubcaseSignature& in) override - { - xml.startElement("SubCase") - .writeAttribute("name", in.m_name) - .writeAttribute("filename", skipPathFromFilename(in.m_file)) - .writeAttribute("line", line(in.m_line)); - xml.ensureTagClosed(); - } - - void subcase_end() override { xml.endElement(); } - - void log_assert(const AssertData& rb) override - { - if (!rb.m_failed && !opt.success) - return; - - std::lock_guard lock(mutex); - - xml.startElement("Expression") - .writeAttribute("success", !rb.m_failed) - .writeAttribute("type", assertString(rb.m_at)) - .writeAttribute("filename", skipPathFromFilename(rb.m_file)) - .writeAttribute("line", line(rb.m_line)); - - xml.scopedElement("Original").writeText(rb.m_expr); - - if (rb.m_threw) - xml.scopedElement("Exception").writeText(rb.m_exception.c_str()); - - if (rb.m_at & assertType::is_throws_as) - xml.scopedElement("ExpectedException").writeText(rb.m_exception_type); - if (rb.m_at & assertType::is_throws_with) - xml.scopedElement("ExpectedExceptionString") - .writeText(rb.m_exception_string); - if ((rb.m_at & assertType::is_normal) && !rb.m_threw) - xml.scopedElement("Expanded").writeText(rb.m_decomp.c_str()); - - log_contexts(); - - xml.endElement(); - } - - void log_message(const MessageData& mb) override - { - std::lock_guard lock(mutex); - - xml.startElement("Message") - .writeAttribute("type", failureString(mb.m_severity)) - .writeAttribute("filename", skipPathFromFilename(mb.m_file)) - .writeAttribute("line", line(mb.m_line)); - - xml.scopedElement("Text").writeText(mb.m_string.c_str()); - - log_contexts(); - - xml.endElement(); - } - - void test_case_skipped(const TestCaseData& in) override - { - if (opt.no_skipped_summary == false) { - test_case_start_impl(in); - xml.writeAttribute("skipped", "true"); - xml.endElement(); - } - } -}; - -DOCTEST_REGISTER_REPORTER("xml", 0, XmlReporter); - -void -fulltext_log_assert_to_stream(std::ostream& s, const AssertData& rb) -{ - if ((rb.m_at & (assertType::is_throws_as | assertType::is_throws_with)) == - 0) //! OCLINT bitwise operator in conditional - s << Color::Cyan << assertString(rb.m_at) << "( " << rb.m_expr << " ) " - << Color::None; - - if (rb.m_at & - assertType::is_throws) { //! OCLINT bitwise operator in conditional - s << (rb.m_threw ? "threw as expected!" : "did NOT throw at all!") << "\n"; - } else if ( - (rb.m_at & assertType::is_throws_as) && - (rb.m_at & assertType::is_throws_with)) { //! OCLINT - s << Color::Cyan << assertString(rb.m_at) << "( " << rb.m_expr << ", \"" - << rb.m_exception_string << "\", " << rb.m_exception_type << " ) " - << Color::None; - if (rb.m_threw) { - if (!rb.m_failed) { - s << "threw as expected!\n"; - } else { - s << "threw a DIFFERENT exception! (contents: " << rb.m_exception - << ")\n"; - } - } else { - s << "did NOT throw at all!\n"; - } - } else if (rb.m_at & assertType::is_throws_as) { //! OCLINT bitwise operator - //! in conditional - s << Color::Cyan << assertString(rb.m_at) << "( " << rb.m_expr << ", " - << rb.m_exception_type << " ) " << Color::None - << (rb.m_threw ? (rb.m_threw_as ? "threw as expected!" - : "threw a DIFFERENT exception: ") - : "did NOT throw at all!") - << Color::Cyan << rb.m_exception << "\n"; - } else if (rb.m_at & assertType::is_throws_with) { //! OCLINT bitwise - //! operator in - //! conditional - s << Color::Cyan << assertString(rb.m_at) << "( " << rb.m_expr << ", \"" - << rb.m_exception_string << "\" ) " << Color::None - << (rb.m_threw ? (!rb.m_failed ? "threw as expected!" - : "threw a DIFFERENT exception: ") - : "did NOT throw at all!") - << Color::Cyan << rb.m_exception << "\n"; - } else if (rb.m_at & assertType::is_nothrow) { //! OCLINT bitwise operator in - //! conditional - s << (rb.m_threw ? "THREW exception: " : "didn't throw!") << Color::Cyan - << rb.m_exception << "\n"; - } else { - s - << (rb.m_threw - ? "THREW exception: " - : (!rb.m_failed ? "is correct!\n" : "is NOT correct!\n")); - if (rb.m_threw) - s << rb.m_exception << "\n"; - else - s << " values: " << assertString(rb.m_at) << "( " << rb.m_decomp - << " )\n"; - } -} - -// TODO: -// - log_message() -// - respond to queries -// - honor remaining options -// - more attributes in tags -struct JUnitReporter : public IReporter { - XmlWriter xml; - std::mutex mutex; - Timer timer; - std::vector deepestSubcaseStackNames; - - struct JUnitTestCaseData { - static std::string getCurrentTimestamp() - { - // Beware, this is not reentrant because of backward compatibility issues - // Also, UTC only, again because of backward compatibility (%z is C++11) - time_t rawtime; - std::time(&rawtime); - auto const timeStampSize = sizeof("2017-01-16T17:06:45Z"); - - std::tm timeInfo; -#ifdef DOCTEST_PLATFORM_WINDOWS - gmtime_s(&timeInfo, &rawtime); -#else // DOCTEST_PLATFORM_WINDOWS - gmtime_r(&rawtime, &timeInfo); -#endif // DOCTEST_PLATFORM_WINDOWS - - char timeStamp[timeStampSize]; - const char* const fmt = "%Y-%m-%dT%H:%M:%SZ"; - - std::strftime(timeStamp, timeStampSize, fmt, &timeInfo); - return std::string(timeStamp); - } - - struct JUnitTestMessage { - JUnitTestMessage( - const std::string& _message, const std::string& _type, - const std::string& _details) - : message(_message), type(_type), details(_details) - { - } - - JUnitTestMessage(const std::string& _message, const std::string& _details) - : message(_message), type(), details(_details) - { - } - - std::string message, type, details; - }; - - struct JUnitTestCase { - JUnitTestCase(const std::string& _classname, const std::string& _name) - : classname(_classname), name(_name), time(0), failures() - { - } - - std::string classname, name; - double time; - std::vector failures, errors; - }; - - void add(const std::string& classname, const std::string& name) - { - testcases.emplace_back(classname, name); - } - - void appendSubcaseNamesToLastTestcase(std::vector nameStack) - { - for (auto& curr : nameStack) - if (curr.size()) - testcases.back().name += std::string("/") + curr.c_str(); - } - - void addTime(double time) - { - if (time < 1e-4) - time = 0; - testcases.back().time = time; - totalSeconds += time; - } - - void addFailure( - const std::string& message, const std::string& type, - const std::string& details) - { - testcases.back().failures.emplace_back(message, type, details); - ++totalFailures; - } - - void addError(const std::string& message, const std::string& details) - { - testcases.back().errors.emplace_back(message, details); - ++totalErrors; - } - - std::vector testcases; - double totalSeconds = 0; - int totalErrors = 0, totalFailures = 0; - }; - - JUnitTestCaseData testCaseData; - - // caching pointers/references to objects of these types - safe to do - const ContextOptions& opt; - const TestCaseData* tc = nullptr; - - JUnitReporter(const ContextOptions& co) : xml(*co.cout), opt(co) {} - - unsigned line(unsigned l) const { return opt.no_line_numbers ? 0 : l; } - - // ========================================================================================= - // WHAT FOLLOWS ARE OVERRIDES OF THE VIRTUAL METHODS OF THE REPORTER INTERFACE - // ========================================================================================= - - void report_query(const QueryData&) override {} - - void test_run_start() override {} - - void test_run_end(const TestRunStats& p) override - { - // remove .exe extension - mainly to have the same output on UNIX and - // Windows - std::string binary_name = skipPathFromFilename(opt.binary_name.c_str()); -#ifdef DOCTEST_PLATFORM_WINDOWS - if (binary_name.rfind(".exe") != std::string::npos) - binary_name = binary_name.substr(0, binary_name.length() - 4); -#endif // DOCTEST_PLATFORM_WINDOWS - xml.startElement("testsuites"); - xml.startElement("testsuite") - .writeAttribute("name", binary_name) - .writeAttribute("errors", testCaseData.totalErrors) - .writeAttribute("failures", testCaseData.totalFailures) - .writeAttribute("tests", p.numAsserts); - if (opt.no_time_in_output == false) { - xml.writeAttribute("time", testCaseData.totalSeconds); - xml.writeAttribute("timestamp", JUnitTestCaseData::getCurrentTimestamp()); - } - if (opt.no_version == false) - xml.writeAttribute("doctest_version", DOCTEST_VERSION_STR); - - for (const auto& testCase : testCaseData.testcases) { - xml.startElement("testcase") - .writeAttribute("classname", testCase.classname) - .writeAttribute("name", testCase.name); - if (opt.no_time_in_output == false) - xml.writeAttribute("time", testCase.time); - // This is not ideal, but it should be enough to mimic gtest's junit - // output. - xml.writeAttribute("status", "run"); - - for (const auto& failure : testCase.failures) { - xml.scopedElement("failure") - .writeAttribute("message", failure.message) - .writeAttribute("type", failure.type) - .writeText(failure.details, false); - } - - for (const auto& error : testCase.errors) { - xml.scopedElement("error") - .writeAttribute("message", error.message) - .writeText(error.details); - } - - xml.endElement(); - } - xml.endElement(); - xml.endElement(); - } - - void test_case_start(const TestCaseData& in) override - { - testCaseData.add(skipPathFromFilename(in.m_file.c_str()), in.m_name); - timer.start(); - } - - void test_case_reenter(const TestCaseData& in) override - { - testCaseData.addTime(timer.getElapsedSeconds()); - testCaseData.appendSubcaseNamesToLastTestcase(deepestSubcaseStackNames); - deepestSubcaseStackNames.clear(); - - timer.start(); - testCaseData.add(skipPathFromFilename(in.m_file.c_str()), in.m_name); - } - - void test_case_end(const CurrentTestCaseStats&) override - { - testCaseData.addTime(timer.getElapsedSeconds()); - testCaseData.appendSubcaseNamesToLastTestcase(deepestSubcaseStackNames); - deepestSubcaseStackNames.clear(); - } - - void test_case_exception(const TestCaseException& e) override - { - std::lock_guard lock(mutex); - testCaseData.addError("exception", e.error_string.c_str()); - } - - void subcase_start(const SubcaseSignature& in) override - { - deepestSubcaseStackNames.push_back(in.m_name); - } - - void subcase_end() override {} - - void log_assert(const AssertData& rb) override - { - if (!rb.m_failed) // report only failures & ignore the `success` option - return; - - std::lock_guard lock(mutex); - - std::ostringstream os; - os << skipPathFromFilename(rb.m_file) << (opt.gnu_file_line ? ":" : "(") - << line(rb.m_line) << (opt.gnu_file_line ? ":" : "):") << std::endl; - - fulltext_log_assert_to_stream(os, rb); - log_contexts(os); - testCaseData.addFailure( - rb.m_decomp.c_str(), assertString(rb.m_at), os.str()); - } - - void log_message(const MessageData&) override {} - - void test_case_skipped(const TestCaseData&) override {} - - void log_contexts(std::ostringstream& s) - { - int num_contexts = get_num_active_contexts(); - if (num_contexts) { - auto contexts = get_active_contexts(); - - s << " logged: "; - for (int i = 0; i < num_contexts; ++i) { - s << (i == 0 ? "" : " "); - contexts[i]->stringify(&s); - s << std::endl; - } - } - } -}; - -DOCTEST_REGISTER_REPORTER("junit", 0, JUnitReporter); - -struct Whitespace { - int nrSpaces; - explicit Whitespace(int nr) : nrSpaces(nr) {} -}; - -std::ostream& -operator<<(std::ostream& out, const Whitespace& ws) -{ - if (ws.nrSpaces != 0) - out << std::setw(ws.nrSpaces) << ' '; - return out; -} - -struct ConsoleReporter : public IReporter { - std::ostream& s; - bool hasLoggedCurrentTestStart; - std::vector subcasesStack; - size_t currentSubcaseLevel; - std::mutex mutex; - - // caching pointers/references to objects of these types - safe to do - const ContextOptions& opt; - const TestCaseData* tc; - - ConsoleReporter(const ContextOptions& co) : s(*co.cout), opt(co) {} - - ConsoleReporter(const ContextOptions& co, std::ostream& ostr) - : s(ostr), opt(co) - { - } - - // ========================================================================================= - // WHAT FOLLOWS ARE HELPERS USED BY THE OVERRIDES OF THE VIRTUAL METHODS OF - // THE INTERFACE - // ========================================================================================= - - void separator_to_stream() - { - s << Color::Yellow - << "=====================================================================" - "==========" - "\n"; - } - - const char* getSuccessOrFailString( - bool success, assertType::Enum at, const char* success_str) - { - if (success) - return success_str; - return failureString(at); - } - - Color::Enum getSuccessOrFailColor(bool success, assertType::Enum at) - { - return success ? Color::BrightGreen - : (at & assertType::is_warn) ? Color::Yellow - : Color::Red; - } - - void successOrFailColoredStringToStream( - bool success, assertType::Enum at, const char* success_str = "SUCCESS") - { - s << getSuccessOrFailColor(success, at) - << getSuccessOrFailString(success, at, success_str) << ": "; - } - - void log_contexts() - { - int num_contexts = get_num_active_contexts(); - if (num_contexts) { - auto contexts = get_active_contexts(); - - s << Color::None << " logged: "; - for (int i = 0; i < num_contexts; ++i) { - s << (i == 0 ? "" : " "); - contexts[i]->stringify(&s); - s << "\n"; - } - } - - s << "\n"; - } - - // this was requested to be made virtual so users could override it - virtual void file_line_to_stream( - const char* file, int line, const char* tail = "") - { - s << Color::LightGrey << skipPathFromFilename(file) - << (opt.gnu_file_line ? ":" : "(") - << (opt.no_line_numbers - ? 0 - : line) // 0 or the real num depending on the option - << (opt.gnu_file_line ? ":" : "):") << tail; - } - - void logTestStart() - { - if (hasLoggedCurrentTestStart) - return; - - separator_to_stream(); - file_line_to_stream(tc->m_file.c_str(), tc->m_line, "\n"); - if (tc->m_description) - s << Color::Yellow << "DESCRIPTION: " << Color::None << tc->m_description - << "\n"; - if (tc->m_test_suite && tc->m_test_suite[0] != '\0') - s << Color::Yellow << "TEST SUITE: " << Color::None << tc->m_test_suite - << "\n"; - if (strncmp(tc->m_name, " Scenario:", 11) != 0) - s << Color::Yellow << "TEST CASE: "; - s << Color::None << tc->m_name << "\n"; - - for (size_t i = 0; i < currentSubcaseLevel; ++i) { - if (subcasesStack[i].m_name[0] != '\0') - s << " " << subcasesStack[i].m_name << "\n"; - } - - if (currentSubcaseLevel != subcasesStack.size()) { - s << Color::Yellow - << "\nDEEPEST SUBCASE STACK REACHED (DIFFERENT FROM THE CURRENT ONE):\n" - << Color::None; - for (size_t i = 0; i < subcasesStack.size(); ++i) { - if (subcasesStack[i].m_name[0] != '\0') - s << " " << subcasesStack[i].m_name << "\n"; - } - } - - s << "\n"; - - hasLoggedCurrentTestStart = true; - } - - void printVersion() - { - if (opt.no_version == false) - s << Color::Cyan << "[doctest] " << Color::None << "doctest version is \"" - << DOCTEST_VERSION_STR << "\"\n"; - } - - void printIntro() - { - if (opt.no_intro == false) { - printVersion(); - s << Color::Cyan << "[doctest] " << Color::None - << "run with \"--" DOCTEST_OPTIONS_PREFIX_DISPLAY - "help\" for options\n"; - } - } - - void printHelp() - { - int sizePrefixDisplay = - static_cast(strlen(DOCTEST_OPTIONS_PREFIX_DISPLAY)); - printVersion(); - // clang-format off - s << Color::Cyan << "[doctest]\n" << Color::None; - s << Color::Cyan << "[doctest] " << Color::None; - s << "boolean values: \"1/on/yes/true\" or \"0/off/no/false\"\n"; - s << Color::Cyan << "[doctest] " << Color::None; - s << "filter values: \"str1,str2,str3\" (comma separated strings)\n"; - s << Color::Cyan << "[doctest]\n" << Color::None; - s << Color::Cyan << "[doctest] " << Color::None; - s << "filters use wildcards for matching strings\n"; - s << Color::Cyan << "[doctest] " << Color::None; - s << "something passes a filter if any of the strings in a filter matches\n"; -#ifndef DOCTEST_CONFIG_NO_UNPREFIXED_OPTIONS - s << Color::Cyan << "[doctest]\n" << Color::None; - s << Color::Cyan << "[doctest] " << Color::None; - s << "ALL FLAGS, OPTIONS AND FILTERS ALSO AVAILABLE WITH A \"" DOCTEST_CONFIG_OPTIONS_PREFIX "\" PREFIX!!!\n"; -#endif - s << Color::Cyan << "[doctest]\n" << Color::None; - s << Color::Cyan << "[doctest] " << Color::None; - s << "Query flags - the program quits after them. Available:\n\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "?, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "help, -" DOCTEST_OPTIONS_PREFIX_DISPLAY "h " - << Whitespace(sizePrefixDisplay*0) << "prints this message\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "v, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "version " - << Whitespace(sizePrefixDisplay*1) << "prints the version\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "c, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "count " - << Whitespace(sizePrefixDisplay*1) << "prints the number of matching tests\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "ltc, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "list-test-cases " - << Whitespace(sizePrefixDisplay*1) << "lists all matching tests by name\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "lts, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "list-test-suites " - << Whitespace(sizePrefixDisplay*1) << "lists all matching test suites\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "lr, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "list-reporters " - << Whitespace(sizePrefixDisplay*1) << "lists all registered reporters\n\n"; - // ================================================================================== << 79 - s << Color::Cyan << "[doctest] " << Color::None; - s << "The available / options/filters are:\n\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "tc, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "test-case= " - << Whitespace(sizePrefixDisplay*1) << "filters tests by their name\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "tce, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "test-case-exclude= " - << Whitespace(sizePrefixDisplay*1) << "filters OUT tests by their name\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "sf, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "source-file= " - << Whitespace(sizePrefixDisplay*1) << "filters tests by their file\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "sfe, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "source-file-exclude= " - << Whitespace(sizePrefixDisplay*1) << "filters OUT tests by their file\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "ts, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "test-suite= " - << Whitespace(sizePrefixDisplay*1) << "filters tests by their test suite\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "tse, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "test-suite-exclude= " - << Whitespace(sizePrefixDisplay*1) << "filters OUT tests by their test suite\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "sc, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "subcase= " - << Whitespace(sizePrefixDisplay*1) << "filters subcases by their name\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "sce, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "subcase-exclude= " - << Whitespace(sizePrefixDisplay*1) << "filters OUT subcases by their name\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "r, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "reporters= " - << Whitespace(sizePrefixDisplay*1) << "reporters to use (console is default)\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "o, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "out= " - << Whitespace(sizePrefixDisplay*1) << "output filename\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "ob, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "order-by= " - << Whitespace(sizePrefixDisplay*1) << "how the tests should be ordered\n"; - s << Whitespace(sizePrefixDisplay*3) << " - [file/suite/name/rand/none]\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "rs, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "rand-seed= " - << Whitespace(sizePrefixDisplay*1) << "seed for random ordering\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "f, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "first= " - << Whitespace(sizePrefixDisplay*1) << "the first test passing the filters to\n"; - s << Whitespace(sizePrefixDisplay*3) << " execute - for range-based execution\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "l, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "last= " - << Whitespace(sizePrefixDisplay*1) << "the last test passing the filters to\n"; - s << Whitespace(sizePrefixDisplay*3) << " execute - for range-based execution\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "aa, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "abort-after= " - << Whitespace(sizePrefixDisplay*1) << "stop after failed assertions\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "scfl,--" DOCTEST_OPTIONS_PREFIX_DISPLAY "subcase-filter-levels= " - << Whitespace(sizePrefixDisplay*1) << "apply filters for the first levels\n"; - s << Color::Cyan << "\n[doctest] " << Color::None; - s << "Bool options - can be used like flags and true is assumed. Available:\n\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "s, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "success= " - << Whitespace(sizePrefixDisplay*1) << "include successful assertions in output\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "cs, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "case-sensitive= " - << Whitespace(sizePrefixDisplay*1) << "filters being treated as case sensitive\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "e, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "exit= " - << Whitespace(sizePrefixDisplay*1) << "exits after the tests finish\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "d, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "duration= " - << Whitespace(sizePrefixDisplay*1) << "prints the time duration of each test\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "m, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "minimal= " - << Whitespace(sizePrefixDisplay*1) << "minimal console output (only failures)\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "q, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "quiet= " - << Whitespace(sizePrefixDisplay*1) << "no console output\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "nt, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-throw= " - << Whitespace(sizePrefixDisplay*1) << "skips exceptions-related assert checks\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "ne, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-exitcode= " - << Whitespace(sizePrefixDisplay*1) << "returns (or exits) always with success\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "nr, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-run= " - << Whitespace(sizePrefixDisplay*1) << "skips all runtime doctest operations\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "ni, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-intro= " - << Whitespace(sizePrefixDisplay*1) << "omit the framework intro in the output\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "nv, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-version= " - << Whitespace(sizePrefixDisplay*1) << "omit the framework version in the output\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "nc, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-colors= " - << Whitespace(sizePrefixDisplay*1) << "disables colors in output\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "fc, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "force-colors= " - << Whitespace(sizePrefixDisplay*1) << "use colors even when not in a tty\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "nb, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-breaks= " - << Whitespace(sizePrefixDisplay*1) << "disables breakpoints in debuggers\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "ns, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-skip= " - << Whitespace(sizePrefixDisplay*1) << "don't skip test cases marked as skip\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "gfl, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "gnu-file-line= " - << Whitespace(sizePrefixDisplay*1) << ":n: vs (n): for line numbers in output\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "npf, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-path-filenames= " - << Whitespace(sizePrefixDisplay*1) << "only filenames and no paths in output\n"; - s << " -" DOCTEST_OPTIONS_PREFIX_DISPLAY "nln, --" DOCTEST_OPTIONS_PREFIX_DISPLAY "no-line-numbers= " - << Whitespace(sizePrefixDisplay*1) << "0 instead of real line numbers in output\n"; - // ================================================================================== << 79 - // clang-format on - - s << Color::Cyan << "\n[doctest] " << Color::None; - s << "for more information visit the project documentation\n\n"; - } - - void printRegisteredReporters() - { - printVersion(); - auto printReporters = [this]( - const reporterMap& reporters, const char* type) { - if (reporters.size()) { - s << Color::Cyan << "[doctest] " << Color::None - << "listing all registered " << type << "\n"; - for (auto& curr : reporters) - s << "priority: " << std::setw(5) << curr.first.first - << " name: " << curr.first.second << "\n"; - } - }; - printReporters(getListeners(), "listeners"); - printReporters(getReporters(), "reporters"); - } - - // ========================================================================================= - // WHAT FOLLOWS ARE OVERRIDES OF THE VIRTUAL METHODS OF THE REPORTER INTERFACE - // ========================================================================================= - - void report_query(const QueryData& in) override - { - if (opt.version) { - printVersion(); - } else if (opt.help) { - printHelp(); - } else if (opt.list_reporters) { - printRegisteredReporters(); - } else if (opt.count || opt.list_test_cases) { - if (opt.list_test_cases) { - s << Color::Cyan << "[doctest] " << Color::None - << "listing all test case names\n"; - separator_to_stream(); - } - - for (unsigned i = 0; i < in.num_data; ++i) - s << Color::None << in.data[i]->m_name << "\n"; - - separator_to_stream(); - - s << Color::Cyan << "[doctest] " << Color::None - << "unskipped test cases passing the current filters: " - << g_cs->numTestCasesPassingFilters << "\n"; - - } else if (opt.list_test_suites) { - s << Color::Cyan << "[doctest] " << Color::None - << "listing all test suites\n"; - separator_to_stream(); - - for (unsigned i = 0; i < in.num_data; ++i) - s << Color::None << in.data[i]->m_test_suite << "\n"; - - separator_to_stream(); - - s << Color::Cyan << "[doctest] " << Color::None - << "unskipped test cases passing the current filters: " - << g_cs->numTestCasesPassingFilters << "\n"; - s << Color::Cyan << "[doctest] " << Color::None - << "test suites with unskipped test cases passing the current filters: " - << g_cs->numTestSuitesPassingFilters << "\n"; - } - } - - void test_run_start() override - { - if (!opt.minimal) - printIntro(); - } - - void test_run_end(const TestRunStats& p) override - { - if (opt.minimal && p.numTestCasesFailed == 0) - return; - - separator_to_stream(); - s << std::dec; - - auto totwidth = int(std::ceil(log10( - (std::max( - p.numTestCasesPassingFilters, - static_cast(p.numAsserts))) + - 1))); - auto passwidth = int(std::ceil(log10( - (std::max( - p.numTestCasesPassingFilters - p.numTestCasesFailed, - static_cast(p.numAsserts - p.numAssertsFailed))) + - 1))); - auto failwidth = int(std::ceil(log10( - (std::max( - p.numTestCasesFailed, static_cast(p.numAssertsFailed))) + - 1))); - const bool anythingFailed = - p.numTestCasesFailed > 0 || p.numAssertsFailed > 0; - s << Color::Cyan << "[doctest] " << Color::None - << "test cases: " << std::setw(totwidth) << p.numTestCasesPassingFilters - << " | " - << ((p.numTestCasesPassingFilters == 0 || anythingFailed) ? Color::None - : Color::Green) - << std::setw(passwidth) - << p.numTestCasesPassingFilters - p.numTestCasesFailed << " passed" - << Color::None << " | " - << (p.numTestCasesFailed > 0 ? Color::Red : Color::None) - << std::setw(failwidth) << p.numTestCasesFailed << " failed" - << Color::None << " |"; - if (opt.no_skipped_summary == false) { - const int numSkipped = p.numTestCases - p.numTestCasesPassingFilters; - s << " " << (numSkipped == 0 ? Color::None : Color::Yellow) << numSkipped - << " skipped" << Color::None; - } - s << "\n"; - s << Color::Cyan << "[doctest] " << Color::None - << "assertions: " << std::setw(totwidth) << p.numAsserts << " | " - << ((p.numAsserts == 0 || anythingFailed) ? Color::None : Color::Green) - << std::setw(passwidth) << (p.numAsserts - p.numAssertsFailed) - << " passed" << Color::None << " | " - << (p.numAssertsFailed > 0 ? Color::Red : Color::None) - << std::setw(failwidth) << p.numAssertsFailed << " failed" << Color::None - << " |\n"; - s << Color::Cyan << "[doctest] " << Color::None - << "Status: " << (p.numTestCasesFailed > 0 ? Color::Red : Color::Green) - << ((p.numTestCasesFailed > 0) ? "FAILURE!" : "SUCCESS!") << Color::None - << std::endl; - } - - void test_case_start(const TestCaseData& in) override - { - hasLoggedCurrentTestStart = false; - tc = ∈ - subcasesStack.clear(); - currentSubcaseLevel = 0; - } - - void test_case_reenter(const TestCaseData&) override - { - subcasesStack.clear(); - } - - void test_case_end(const CurrentTestCaseStats& st) override - { - if (tc->m_no_output) - return; - - // log the preamble of the test case only if there is something - // else to print - something other than that an assert has failed - if (opt.duration || - (st.failure_flags && - st.failure_flags != TestCaseFailureReason::AssertFailure)) - logTestStart(); - - if (opt.duration) - s << Color::None << std::setprecision(6) << std::fixed << st.seconds - << " s: " << tc->m_name << "\n"; - - if (st.failure_flags & TestCaseFailureReason::Timeout) - s << Color::Red << "Test case exceeded time limit of " - << std::setprecision(6) << std::fixed << tc->m_timeout << "!\n"; - - if (st.failure_flags & TestCaseFailureReason::ShouldHaveFailedButDidnt) { - s << Color::Red - << "Should have failed but didn't! Marking it as failed!\n"; - } else if ( - st.failure_flags & TestCaseFailureReason::ShouldHaveFailedAndDid) { - s << Color::Yellow << "Failed as expected so marking it as not failed\n"; - } else if ( - st.failure_flags & TestCaseFailureReason::CouldHaveFailedAndDid) { - s << Color::Yellow << "Allowed to fail so marking it as not failed\n"; - } else if ( - st.failure_flags & TestCaseFailureReason::DidntFailExactlyNumTimes) { - s << Color::Red << "Didn't fail exactly " << tc->m_expected_failures - << " times so marking it as failed!\n"; - } else if ( - st.failure_flags & TestCaseFailureReason::FailedExactlyNumTimes) { - s << Color::Yellow << "Failed exactly " << tc->m_expected_failures - << " times as expected so marking it as not failed!\n"; - } - if (st.failure_flags & TestCaseFailureReason::TooManyFailedAsserts) { - s << Color::Red << "Aborting - too many failed asserts!\n"; - } - s << Color::None; // lgtm [cpp/useless-expression] - } - - void test_case_exception(const TestCaseException& e) override - { - std::lock_guard lock(mutex); - if (tc->m_no_output) - return; - - logTestStart(); - - file_line_to_stream(tc->m_file.c_str(), tc->m_line, " "); - successOrFailColoredStringToStream( - false, e.is_crash ? assertType::is_require : assertType::is_check); - s << Color::Red - << (e.is_crash ? "test case CRASHED: " : "test case THREW exception: ") - << Color::Cyan << e.error_string << "\n"; - - int num_stringified_contexts = get_num_stringified_contexts(); - if (num_stringified_contexts) { - auto stringified_contexts = get_stringified_contexts(); - s << Color::None << " logged: "; - for (int i = num_stringified_contexts; i > 0; --i) { - s << (i == num_stringified_contexts ? "" : " ") - << stringified_contexts[i - 1] << "\n"; - } - } - s << "\n" << Color::None; - } - - void subcase_start(const SubcaseSignature& subc) override - { - subcasesStack.push_back(subc); - ++currentSubcaseLevel; - hasLoggedCurrentTestStart = false; - } - - void subcase_end() override - { - --currentSubcaseLevel; - hasLoggedCurrentTestStart = false; - } - - void log_assert(const AssertData& rb) override - { - if ((!rb.m_failed && !opt.success) || tc->m_no_output) - return; - - std::lock_guard lock(mutex); - - logTestStart(); - - file_line_to_stream(rb.m_file, rb.m_line, " "); - successOrFailColoredStringToStream(!rb.m_failed, rb.m_at); - - fulltext_log_assert_to_stream(s, rb); - - log_contexts(); - } - - void log_message(const MessageData& mb) override - { - if (tc->m_no_output) - return; - - std::lock_guard lock(mutex); - - logTestStart(); - - file_line_to_stream(mb.m_file, mb.m_line, " "); - s << getSuccessOrFailColor(false, mb.m_severity) - << getSuccessOrFailString( - mb.m_severity & assertType::is_warn, mb.m_severity, "MESSAGE") - << ": "; - s << Color::None << mb.m_string << "\n"; - log_contexts(); - } - - void test_case_skipped(const TestCaseData&) override {} -}; - -DOCTEST_REGISTER_REPORTER("console", 0, ConsoleReporter); - -#ifdef DOCTEST_PLATFORM_WINDOWS -struct DebugOutputWindowReporter : public ConsoleReporter { - DOCTEST_THREAD_LOCAL static std::ostringstream oss; - - DebugOutputWindowReporter(const ContextOptions& co) : ConsoleReporter(co, oss) - { - } - -#define DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE(func, type, arg) \ - void func(type arg) override \ - { \ - bool with_col = g_no_colors; \ - g_no_colors = false; \ - ConsoleReporter::func(arg); \ - if (oss.tellp() != std::streampos{}) { \ - DOCTEST_OUTPUT_DEBUG_STRING(oss.str().c_str()); \ - oss.str(""); \ - } \ - g_no_colors = with_col; \ - } - - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - test_run_start, DOCTEST_EMPTY, DOCTEST_EMPTY) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE(test_run_end, const TestRunStats&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - test_case_start, const TestCaseData&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - test_case_reenter, const TestCaseData&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - test_case_end, const CurrentTestCaseStats&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - test_case_exception, const TestCaseException&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - subcase_start, const SubcaseSignature&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - subcase_end, DOCTEST_EMPTY, DOCTEST_EMPTY) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE(log_assert, const AssertData&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE(log_message, const MessageData&, in) - DOCTEST_DEBUG_OUTPUT_REPORTER_OVERRIDE( - test_case_skipped, const TestCaseData&, in) -}; - -DOCTEST_THREAD_LOCAL std::ostringstream DebugOutputWindowReporter::oss; -#endif // DOCTEST_PLATFORM_WINDOWS - -// the implementation of parseOption() -bool -parseOptionImpl( - int argc, const char* const* argv, const char* pattern, String* value) -{ - // going from the end to the beginning and stopping on the first occurrence - // from the end - for (int i = argc; i > 0; --i) { - auto index = i - 1; - auto temp = std::strstr(argv[index], pattern); - if (temp && - (value || - strlen(temp) == - strlen(pattern))) { //! OCLINT prefer early exits and continue - // eliminate matches in which the chars before the option are not '-' - bool noBadCharsFound = true; - auto curr = argv[index]; - while (curr != temp) { - if (*curr++ != '-') { - noBadCharsFound = false; - break; - } - } - if (noBadCharsFound && argv[index][0] == '-') { - if (value) { - // parsing the value of an option - temp += strlen(pattern); - const unsigned len = strlen(temp); - if (len) { - *value = temp; - return true; - } - } else { - // just a flag - no value - return true; - } - } - } - } - return false; -} - -// parses an option and returns the string after the '=' character -bool -parseOption( - int argc, const char* const* argv, const char* pattern, - String* value = nullptr, const String& defaultVal = String()) -{ - if (value) - *value = defaultVal; -#ifndef DOCTEST_CONFIG_NO_UNPREFIXED_OPTIONS - // offset (normally 3 for "dt-") to skip prefix - if (parseOptionImpl( - argc, argv, pattern + strlen(DOCTEST_CONFIG_OPTIONS_PREFIX), value)) - return true; -#endif // DOCTEST_CONFIG_NO_UNPREFIXED_OPTIONS - return parseOptionImpl(argc, argv, pattern, value); -} - -// locates a flag on the command line -bool -parseFlag(int argc, const char* const* argv, const char* pattern) -{ - return parseOption(argc, argv, pattern); -} - -// parses a comma separated list of words after a pattern in one of the -// arguments in argv -bool -parseCommaSepArgs( - int argc, const char* const* argv, const char* pattern, - std::vector& res) -{ - String filtersString; - if (parseOption(argc, argv, pattern, &filtersString)) { - // tokenize with "," as a separator, unless escaped with backslash - std::ostringstream s; - auto flush = [&s, &res]() { - auto string = s.str(); - if (string.size() > 0) { - res.push_back(string.c_str()); - } - s.str(""); - }; - - bool seenBackslash = false; - const char* current = filtersString.c_str(); - const char* end = current + strlen(current); - while (current != end) { - char character = *current++; - if (seenBackslash) { - seenBackslash = false; - if (character == ',') { - s.put(','); - continue; - } - s.put('\\'); - } - if (character == '\\') { - seenBackslash = true; - } else if (character == ',') { - flush(); - } else { - s.put(character); - } - } - - if (seenBackslash) { - s.put('\\'); - } - flush(); - return true; - } - return false; -} - -enum optionType { option_bool, option_int }; - -// parses an int/bool option from the command line -bool -parseIntOption( - int argc, const char* const* argv, const char* pattern, optionType type, - int& res) -{ - String parsedValue; - if (!parseOption(argc, argv, pattern, &parsedValue)) - return false; - - if (type == 0) { - // boolean - const char positive[][5] = { - "1", "true", "on", "yes"}; // 5 - strlen("true") + 1 - const char negative[][6] = { - "0", "false", "off", "no"}; // 6 - strlen("false") + 1 - - // if the value matches any of the positive/negative possibilities - for (unsigned i = 0; i < 4; i++) { - if (parsedValue.compare(positive[i], true) == 0) { - res = 1; //! OCLINT parameter reassignment - return true; - } - if (parsedValue.compare(negative[i], true) == 0) { - res = 0; //! OCLINT parameter reassignment - return true; - } - } - } else { - // integer - // TODO: change this to use std::stoi or something else! currently it uses - // undefined behavior - assumes '0' on failed parse... - int theInt = std::atoi(parsedValue.c_str()); // NOLINT - if (theInt != 0) { - res = theInt; //! OCLINT parameter reassignment - return true; - } - } - return false; -} -} // namespace - -Context::Context(int argc, const char* const* argv) - : p(new detail::ContextState) -{ - parseArgs(argc, argv, true); - if (argc) - p->binary_name = argv[0]; -} - -Context::~Context() -{ - if (g_cs == p) - g_cs = nullptr; - delete p; -} - -void -Context::applyCommandLine(int argc, const char* const* argv) -{ - parseArgs(argc, argv); - if (argc) - p->binary_name = argv[0]; -} - -// parses args -void -Context::parseArgs(int argc, const char* const* argv, bool withDefaults) -{ - using namespace detail; - - // clang-format off - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "source-file=", p->filters[0]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "sf=", p->filters[0]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "source-file-exclude=",p->filters[1]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "sfe=", p->filters[1]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "test-suite=", p->filters[2]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "ts=", p->filters[2]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "test-suite-exclude=", p->filters[3]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "tse=", p->filters[3]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "test-case=", p->filters[4]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "tc=", p->filters[4]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "test-case-exclude=", p->filters[5]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "tce=", p->filters[5]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "subcase=", p->filters[6]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "sc=", p->filters[6]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "subcase-exclude=", p->filters[7]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "sce=", p->filters[7]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "reporters=", p->filters[8]); - parseCommaSepArgs(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "r=", p->filters[8]); - // clang-format on - - int intRes = 0; - String strRes; - -#define DOCTEST_PARSE_AS_BOOL_OR_FLAG(name, sname, var, default) \ - if (parseIntOption( \ - argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX name "=", option_bool, \ - intRes) || \ - parseIntOption( \ - argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX sname "=", option_bool, \ - intRes)) \ - p->var = static_cast(intRes); \ - else if ( \ - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX name) || \ - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX sname)) \ - p->var = true; \ - else if (withDefaults) \ - p->var = default - -#define DOCTEST_PARSE_INT_OPTION(name, sname, var, default) \ - if (parseIntOption( \ - argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX name "=", option_int, \ - intRes) || \ - parseIntOption( \ - argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX sname "=", option_int, \ - intRes)) \ - p->var = intRes; \ - else if (withDefaults) \ - p->var = default - -#define DOCTEST_PARSE_STR_OPTION(name, sname, var, default) \ - if (parseOption( \ - argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX name "=", &strRes, \ - default) || \ - parseOption( \ - argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX sname "=", &strRes, \ - default) || \ - withDefaults) \ - p->var = strRes - - // clang-format off - DOCTEST_PARSE_STR_OPTION("out", "o", out, ""); - DOCTEST_PARSE_STR_OPTION("order-by", "ob", order_by, "file"); - DOCTEST_PARSE_INT_OPTION("rand-seed", "rs", rand_seed, 0); - - DOCTEST_PARSE_INT_OPTION("first", "f", first, 0); - DOCTEST_PARSE_INT_OPTION("last", "l", last, UINT_MAX); - - DOCTEST_PARSE_INT_OPTION("abort-after", "aa", abort_after, 0); - DOCTEST_PARSE_INT_OPTION("subcase-filter-levels", "scfl", subcase_filter_levels, INT_MAX); - - DOCTEST_PARSE_AS_BOOL_OR_FLAG("success", "s", success, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("case-sensitive", "cs", case_sensitive, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("exit", "e", exit, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("duration", "d", duration, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("minimal", "m", minimal, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("quiet", "q", quiet, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-throw", "nt", no_throw, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-exitcode", "ne", no_exitcode, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-run", "nr", no_run, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-intro", "ni", no_intro, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-version", "nv", no_version, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-colors", "nc", no_colors, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("force-colors", "fc", force_colors, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-breaks", "nb", no_breaks, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-skip", "ns", no_skip, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("gnu-file-line", "gfl", gnu_file_line, !bool(DOCTEST_MSVC)); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-path-filenames", "npf", no_path_in_filenames, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-line-numbers", "nln", no_line_numbers, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-debug-output", "ndo", no_debug_output, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-skipped-summary", "nss", no_skipped_summary, false); - DOCTEST_PARSE_AS_BOOL_OR_FLAG("no-time-in-output", "ntio", no_time_in_output, false); - // clang-format on - - if (withDefaults) { - p->help = false; - p->version = false; - p->count = false; - p->list_test_cases = false; - p->list_test_suites = false; - p->list_reporters = false; - } - if (parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "help") || - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "h") || - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "?")) { - p->help = true; - p->exit = true; - } - if (parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "version") || - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "v")) { - p->version = true; - p->exit = true; - } - if (parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "count") || - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "c")) { - p->count = true; - p->exit = true; - } - if (parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "list-test-cases") || - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "ltc")) { - p->list_test_cases = true; - p->exit = true; - } - if (parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "list-test-suites") || - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "lts")) { - p->list_test_suites = true; - p->exit = true; - } - if (parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "list-reporters") || - parseFlag(argc, argv, DOCTEST_CONFIG_OPTIONS_PREFIX "lr")) { - p->list_reporters = true; - p->exit = true; - } -} - -// allows the user to add procedurally to the filters from the command line -void -Context::addFilter(const char* filter, const char* value) -{ - setOption(filter, value); -} - -// allows the user to clear all filters from the command line -void -Context::clearFilters() -{ - for (auto& curr : p->filters) curr.clear(); -} - -// allows the user to override procedurally the bool options from the command -// line -void -Context::setOption(const char* option, bool value) -{ - setOption(option, value ? "true" : "false"); -} - -// allows the user to override procedurally the int options from the command -// line -void -Context::setOption(const char* option, int value) -{ - setOption(option, toString(value).c_str()); - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) -} - -// allows the user to override procedurally the string options from the command -// line -void -Context::setOption(const char* option, const char* value) -{ - auto argv = String("-") + option + "=" + value; - auto lvalue = argv.c_str(); - parseArgs(1, &lvalue); -} - -// users should query this in their main() and exit the program if true -bool -Context::shouldExit() -{ - return p->exit; -} - -void -Context::setAsDefaultForAssertsOutOfTestCases() -{ - g_cs = p; -} - -void -Context::setAssertHandler(detail::assert_handler ah) -{ - p->ah = ah; -} - -void -Context::setCout(std::ostream* out) -{ - p->cout = out; -} - -static class DiscardOStream : public std::ostream { - private: - class : public std::streambuf { - private: - // allowing some buffering decreases the amount of calls to overflow - char buf[1024]; - - protected: - std::streamsize xsputn(const char_type*, std::streamsize count) override - { - return count; - } - - int_type overflow(int_type ch) override - { - setp(std::begin(buf), std::end(buf)); - return traits_type::not_eof(ch); - } - } discardBuf; - - public: - DiscardOStream() : std::ostream(&discardBuf) {} -} discardOut; - -// the main function that does all the filtering and test running -int -Context::run() -{ - using namespace detail; - - // save the old context state in case such was setup - for using asserts out - // of a testing context - auto old_cs = g_cs; - // this is the current contest - g_cs = p; - is_running_in_test = true; - - g_no_colors = p->no_colors; - p->resetRunData(); - - std::fstream fstr; - if (p->cout == nullptr) { - if (p->quiet) { - p->cout = &discardOut; - } else if (p->out.size()) { - // to a file if specified - fstr.open(p->out.c_str(), std::fstream::out); - p->cout = &fstr; - } else { - // stdout by default - p->cout = &std::cout; - } - } - - FatalConditionHandler::allocateAltStackMem(); - - auto cleanup_and_return = [&]() { - FatalConditionHandler::freeAltStackMem(); - - if (fstr.is_open()) - fstr.close(); - - // restore context - g_cs = old_cs; - is_running_in_test = false; - - // we have to free the reporters which were allocated when the run started - for (auto& curr : p->reporters_currently_used) delete curr; - p->reporters_currently_used.clear(); - - if (p->numTestCasesFailed && !p->no_exitcode) - return EXIT_FAILURE; - return EXIT_SUCCESS; - }; - - // setup default reporter if none is given through the command line - if (p->filters[8].empty()) - p->filters[8].push_back("console"); - - // check to see if any of the registered reporters has been selected - for (auto& curr : getReporters()) { - if (matchesAny( - curr.first.second.c_str(), p->filters[8], false, p->case_sensitive)) - p->reporters_currently_used.push_back(curr.second(*g_cs)); - } - - // TODO: check if there is nothing in reporters_currently_used - - // prepend all listeners - for (auto& curr : getListeners()) - p->reporters_currently_used.insert( - p->reporters_currently_used.begin(), curr.second(*g_cs)); - -#ifdef DOCTEST_PLATFORM_WINDOWS - if (isDebuggerActive() && p->no_debug_output == false) - p->reporters_currently_used.push_back(new DebugOutputWindowReporter(*g_cs)); -#endif // DOCTEST_PLATFORM_WINDOWS - - // handle version, help and no_run - if (p->no_run || p->version || p->help || p->list_reporters) { - DOCTEST_ITERATE_THROUGH_REPORTERS(report_query, QueryData()); - - return cleanup_and_return(); - } - - std::vector testArray; - for (auto& curr : getRegisteredTests()) testArray.push_back(&curr); - p->numTestCases = testArray.size(); - - // sort the collected records - if (!testArray.empty()) { - if (p->order_by.compare("file", true) == 0) { - std::sort(testArray.begin(), testArray.end(), fileOrderComparator); - } else if (p->order_by.compare("suite", true) == 0) { - std::sort(testArray.begin(), testArray.end(), suiteOrderComparator); - } else if (p->order_by.compare("name", true) == 0) { - std::sort(testArray.begin(), testArray.end(), nameOrderComparator); - } else if (p->order_by.compare("rand", true) == 0) { - std::srand(p->rand_seed); - - // random_shuffle implementation - const auto first = &testArray[0]; - for (size_t i = testArray.size() - 1; i > 0; --i) { - int idxToSwap = std::rand() % (i + 1); // NOLINT - - const auto temp = first[i]; - - first[i] = first[idxToSwap]; - first[idxToSwap] = temp; - } - } else if (p->order_by.compare("none", true) == 0) { - // means no sorting - beneficial for death tests which call into the - // executable with a specific test case in mind - we don't want to slow - // down the startup times - } - } - - std::set testSuitesPassingFilt; - - bool query_mode = p->count || p->list_test_cases || p->list_test_suites; - std::vector queryResults; - - if (!query_mode) - DOCTEST_ITERATE_THROUGH_REPORTERS(test_run_start, DOCTEST_EMPTY); - - // invoke the registered functions if they match the filter criteria (or just - // count them) - for (auto& curr : testArray) { - const auto& tc = *curr; - - bool skip_me = false; - if (tc.m_skip && !p->no_skip) - skip_me = true; - - if (!matchesAny(tc.m_file.c_str(), p->filters[0], true, p->case_sensitive)) - skip_me = true; - if (matchesAny(tc.m_file.c_str(), p->filters[1], false, p->case_sensitive)) - skip_me = true; - if (!matchesAny(tc.m_test_suite, p->filters[2], true, p->case_sensitive)) - skip_me = true; - if (matchesAny(tc.m_test_suite, p->filters[3], false, p->case_sensitive)) - skip_me = true; - if (!matchesAny(tc.m_name, p->filters[4], true, p->case_sensitive)) - skip_me = true; - if (matchesAny(tc.m_name, p->filters[5], false, p->case_sensitive)) - skip_me = true; - - if (!skip_me) - p->numTestCasesPassingFilters++; - - // skip the test if it is not in the execution range - if ((p->last < p->numTestCasesPassingFilters && p->first <= p->last) || - (p->first > p->numTestCasesPassingFilters)) - skip_me = true; - - if (skip_me) { - if (!query_mode) - DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_skipped, tc); - continue; - } - - // do not execute the test if we are to only count the number of filter - // passing tests - if (p->count) - continue; - - // print the name of the test and don't execute it - if (p->list_test_cases) { - queryResults.push_back(&tc); - continue; - } - - // print the name of the test suite if not done already and don't execute it - if (p->list_test_suites) { - if ((testSuitesPassingFilt.count(tc.m_test_suite) == 0) && - tc.m_test_suite[0] != '\0') { - queryResults.push_back(&tc); - testSuitesPassingFilt.insert(tc.m_test_suite); - p->numTestSuitesPassingFilters++; - } - continue; - } - - // execute the test if it passes all the filtering - { - p->currentTest = &tc; - - p->failure_flags = TestCaseFailureReason::None; - p->seconds = 0; - - // reset atomic counters - p->numAssertsFailedCurrentTest_atomic = 0; - p->numAssertsCurrentTest_atomic = 0; - - p->subcasesPassed.clear(); - - DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_start, tc); - - p->timer.start(); - - bool run_test = true; - - do { - // reset some of the fields for subcases (except for the set of fully - // passed ones) - p->should_reenter = false; - p->subcasesCurrentMaxLevel = 0; - p->subcasesStack.clear(); - - p->shouldLogCurrentException = true; - - // reset stuff for logging with INFO() - p->stringifiedContexts.clear(); - -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS - try { -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS - // MSVC 2015 diagnoses fatalConditionHandler as unused (because - // reset() is a static method) - DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH( - 4101) // unreferenced local variable - FatalConditionHandler fatalConditionHandler; // Handle signals - // execute the test - tc.m_test(); - fatalConditionHandler.reset(); - DOCTEST_MSVC_SUPPRESS_WARNING_POP -#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS - } - catch (const TestFailureException&) { - p->failure_flags |= TestCaseFailureReason::AssertFailure; - } - catch (...) { - DOCTEST_ITERATE_THROUGH_REPORTERS( - test_case_exception, {translateActiveException(), false}); - p->failure_flags |= TestCaseFailureReason::Exception; - } -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS - - // exit this loop if enough assertions have failed - even if there are - // more subcases - if (p->abort_after > 0 && - p->numAssertsFailed + p->numAssertsFailedCurrentTest_atomic >= - p->abort_after) { - run_test = false; - p->failure_flags |= TestCaseFailureReason::TooManyFailedAsserts; - } - - if (p->should_reenter && run_test) - DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_reenter, tc); - if (!p->should_reenter) - run_test = false; - } while (run_test); - - p->finalizeTestCaseData(); - - DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_end, *g_cs); - - p->currentTest = nullptr; - - // stop executing tests if enough assertions have failed - if (p->abort_after > 0 && p->numAssertsFailed >= p->abort_after) - break; - } - } - - if (!query_mode) { - DOCTEST_ITERATE_THROUGH_REPORTERS(test_run_end, *g_cs); - } else { - QueryData qdata; - qdata.run_stats = g_cs; - qdata.data = queryResults.data(); - qdata.num_data = unsigned(queryResults.size()); - DOCTEST_ITERATE_THROUGH_REPORTERS(report_query, qdata); - } - - return cleanup_and_return(); -} - -IReporter::~IReporter() = default; - -int -IReporter::get_num_active_contexts() -{ - return detail::g_infoContexts.size(); -} -const IContextScope* const* -IReporter::get_active_contexts() -{ - return get_num_active_contexts() ? &detail::g_infoContexts[0] : nullptr; -} - -int -IReporter::get_num_stringified_contexts() -{ - return detail::g_cs->stringifiedContexts.size(); -} -const String* -IReporter::get_stringified_contexts() -{ - return get_num_stringified_contexts() ? &detail::g_cs->stringifiedContexts[0] - : nullptr; -} - -namespace detail { -void -registerReporterImpl( - const char* name, int priority, reporterCreatorFunc c, bool isReporter) -{ - if (isReporter) - getReporters().insert( - reporterMap::value_type(reporterMap::key_type(priority, name), c)); - else - getListeners().insert( - reporterMap::value_type(reporterMap::key_type(priority, name), c)); -} -} // namespace detail - -} // namespace doctest - -#endif // DOCTEST_CONFIG_DISABLE - -#ifdef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN -DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH( - 4007) // 'function' : must be 'attribute' - see issue #182 -int -main(int argc, char** argv) -{ - return doctest::Context(argc, argv).run(); -} -DOCTEST_MSVC_SUPPRESS_WARNING_POP -#endif // DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN - -DOCTEST_CLANG_SUPPRESS_WARNING_POP -DOCTEST_MSVC_SUPPRESS_WARNING_POP -DOCTEST_GCC_SUPPRESS_WARNING_POP - -DOCTEST_SUPPRESS_COMMON_WARNINGS_POP - -#endif // DOCTEST_LIBRARY_IMPLEMENTATION -#endif // DOCTEST_CONFIG_IMPLEMENT diff --git a/src/c++/perf_analyzer/fifo_ctx_id_tracker.h b/src/c++/perf_analyzer/fifo_ctx_id_tracker.h deleted file mode 100644 index 750fc63b8..000000000 --- a/src/c++/perf_analyzer/fifo_ctx_id_tracker.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "base_queue_ctx_id_tracker.h" - -namespace triton { namespace perfanalyzer { - -// Context ID Tracker that reuses IDs in a roughly round-robin manner using a -// FIFO -// -class FifoCtxIdTracker : public BaseQueueCtxIdTracker { - public: - FifoCtxIdTracker() = default; - void Reset(size_t count) override - { - Clear(); - - for (size_t i = 0; i < count; ++i) { - free_ctx_ids_.push(i); - } - } -}; - -}}; // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/genai-perf/.gitignore b/src/c++/perf_analyzer/genai-perf/.gitignore deleted file mode 100644 index d4f588edf..000000000 --- a/src/c++/perf_analyzer/genai-perf/.gitignore +++ /dev/null @@ -1 +0,0 @@ -artifacts/ diff --git a/src/c++/perf_analyzer/genai-perf/README.md b/src/c++/perf_analyzer/genai-perf/README.md deleted file mode 100644 index 53e510541..000000000 --- a/src/c++/perf_analyzer/genai-perf/README.md +++ /dev/null @@ -1,558 +0,0 @@ - - -# GenAI-Perf - -GenAI-Perf is a command line tool for measuring the throughput and latency of -generative AI models as served through an inference server. -For large language models (LLMs), GenAI-Perf provides metrics such as -[output token throughput](#output_token_throughput_metric), -[time to first token](#time_to_first_token_metric), -[inter token latency](#inter_token_latency_metric), and -[request throughput](#request_throughput_metric). -For a full list of metrics please see the [Metrics section](#metrics). - -Users specify a model name, an inference server URL, the type of inputs to use -(synthetic or from dataset), and the type of load to generate (number of -concurrent requests, request rate). - -GenAI-Perf generates the specified load, measures the performance of the -inference server and reports the metrics in a simple table as console output. -The tool also logs all results in a csv and json file that can be used to derive -additional metrics and visualizations. The inference server must already be -running when GenAI-Perf is run. - -You can use GenAI-Perf to run performance benchmarks on -- [Large Language Models](docs/tutorial.md) -- [Vision Language Models](docs/multi_modal.md) -- [Embedding Models](docs/embeddings.md) -- [Ranking Models](docs/rankings.md) -- [Multiple LoRA Adapters](docs/lora.md) - -> [!Note] -> GenAI-Perf is currently in early release and under rapid development. While we -> will try to remain consistent, command line options and functionality are -> subject to change as the tool matures. - -
- - - -## Installation - -The easiest way to install GenAI-Perf is through -[Triton Server SDK container](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver). -Install the latest release using the following command: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - -docker run -it --net=host --gpus=all nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -# Check out genai_perf command inside the container: -genai-perf --help -``` - -
- -Alternatively, to install from source: - -Since GenAI-Perf depends on Perf Analyzer, -you'll need to install the Perf Analyzer binary: - -### Install Perf Analyzer (Ubuntu, Python 3.8+) - -**NOTE**: you must already have CUDA 12 installed -(checkout the [CUDA installation guide](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html)). - -```bash -pip install tritonclient - -apt update && apt install -y --no-install-recommends libb64-0d libcurl4 -``` - -You can also build Perf Analyzer [from source](../docs/install.md#build-from-source) as well. - -### Install GenAI-Perf from source - -```bash -git clone https://github.com/triton-inference-server/client.git && cd client - -pip install -e . -``` - -
- -
- - - -## Quick Start - -In this quick start, we will use GenAI-Perf to run performance benchmarking on -the GPT-2 model running on Triton Inference Server with a TensorRT-LLM engine. - -### Serve GPT-2 TensorRT-LLM model using Triton CLI - -You can follow the [quickstart guide](https://github.com/triton-inference-server/triton_cli?tab=readme-ov-file#serving-a-trt-llm-model) -on Triton CLI github repo to run GPT-2 model locally. -The full instructions are copied below for convenience: - -```bash -# This container comes with all of the dependencies for building TRT-LLM engines -# and serving the engine with Triton Inference Server. -docker run -ti \ - --gpus all \ - --network=host \ - --shm-size=1g --ulimit memlock=-1 \ - -v /tmp:/tmp \ - -v ${HOME}/models:/root/models \ - -v ${HOME}/.cache/huggingface:/root/.cache/huggingface \ - nvcr.io/nvidia/tritonserver:24.05-trtllm-python-py3 - -# Install the Triton CLI -pip install git+https://github.com/triton-inference-server/triton_cli.git@0.0.8 - -# Build TRT LLM engine and generate a Triton model repository pointing at it -triton remove -m all -triton import -m gpt2 --backend tensorrtllm - -# Start Triton pointing at the default model repository -triton start -``` - -### Running GenAI-Perf - -Now we can run GenAI-Perf from Triton Inference Server SDK container: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - -docker run -it --net=host --rm --gpus=all nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -# Run GenAI-Perf in the container: -genai-perf profile \ - -m gpt2 \ - --service-kind triton \ - --backend tensorrtllm \ - --num-prompts 100 \ - --random-seed 123 \ - --synthetic-input-tokens-mean 200 \ - --synthetic-input-tokens-stddev 0 \ - --streaming \ - --output-tokens-mean 100 \ - --output-tokens-stddev 0 \ - --output-tokens-mean-deterministic \ - --tokenizer hf-internal-testing/llama-tokenizer \ - --concurrency 1 \ - --measurement-interval 4000 \ - --profile-export-file my_profile_export.json \ - --url localhost:8001 -``` - -Example output: - -``` - LLM Metrics -┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━┩ -│ Time to first token (ms) │ 11.70 │ 9.88 │ 17.21 │ 14.35 │ 12.01 │ 11.87 │ -│ Inter token latency (ms) │ 1.46 │ 1.08 │ 1.89 │ 1.87 │ 1.62 │ 1.52 │ -│ Request latency (ms) │ 161.24 │ 153.45 │ 200.74 │ 200.66 │ 179.43 │ 162.23 │ -│ Output sequence length │ 103.39 │ 95.00 │ 134.00 │ 120.08 │ 107.30 │ 105.00 │ -│ Input sequence length │ 200.01 │ 200.00 │ 201.00 │ 200.13 │ 200.00 │ 200.00 │ -└──────────────────────────┴────────┴────────┴────────┴────────┴────────┴────────┘ -Output token throughput (per sec): 635.61 -Request throughput (per sec): 6.15 -``` - -See [Tutorial](docs/tutorial.md) for additional examples. - -
- - - -## Visualization - -GenAI-Perf can also generate various plots that visualize the performance of the -current profile run. This is disabled by default but users can easily enable it -by passing the `--generate-plots` option when running the benchmark: - -```bash -genai-perf profile \ - -m gpt2 \ - --service-kind triton \ - --backend tensorrtllm \ - --streaming \ - --concurrency 1 \ - --generate-plots -``` - -This will generate a [set of default plots](docs/compare.md#example-plots) such as: -- Time to first token (TTFT) analysis -- Request latency analysis -- TTFT vs Input sequence lengths -- Inter token latencies vs Token positions -- Input sequence lengths vs Output sequence lengths - - -### Using `compare` Subcommand to Visualize Multiple Runs - -The `compare` subcommand in GenAI-Perf facilitates users in comparing multiple -profile runs and visualizing the differences through plots. - -#### Usage -Assuming the user possesses two profile export JSON files, -namely `profile1.json` and `profile2.json`, -they can execute the `compare` subcommand using the `--files` option: - -```bash -genai-perf compare --files profile1.json profile2.json -``` - -Executing the above command will perform the following actions under the -`compare` directory: -1. Generate a YAML configuration file (e.g. `config.yaml`) containing the -metadata for each plot generated during the comparison process. -2. Automatically generate the [default set of plots](docs/compare.md#example-plots) -(e.g. TTFT vs. Input Sequence Lengths) that compare the two profile runs. - -``` -compare -├── config.yaml -├── distribution_of_input_sequence_lengths_to_output_sequence_lengths.jpeg -├── request_latency.jpeg -├── time_to_first_token.jpeg -├── time_to_first_token_vs_input_sequence_lengths.jpeg -├── token-to-token_latency_vs_output_token_position.jpeg -└── ... -``` - -#### Customization -Users have the flexibility to iteratively modify the generated YAML configuration -file to suit their specific requirements. -They can make alterations to the plots according to their preferences and execute -the command with the `--config` option followed by the path to the modified -configuration file: - -```bash -genai-perf compare --config compare/config.yaml -``` - -This command will regenerate the plots based on the updated configuration settings, -enabling users to refine the visual representation of the comparison results as -per their needs. - -See [Compare documentation](docs/compare.md) for more details. - -
- - - -## Model Inputs - -GenAI-Perf supports model input prompts from either synthetically generated -inputs, or from the HuggingFace -[OpenOrca](https://huggingface.co/datasets/Open-Orca/OpenOrca) or -[CNN_DailyMail](https://huggingface.co/datasets/cnn_dailymail) datasets. This is -specified using the `--input-dataset` CLI option. - -When the dataset is synthetic, you can specify the following options: -* `--num-prompts `: The number of unique prompts to generate as stimulus, >= 1. -* `--synthetic-input-tokens-mean `: The mean of number of tokens in the - generated prompts when using synthetic data, >= 1. -* `--synthetic-input-tokens-stddev `: The standard deviation of number of - tokens in the generated prompts when using synthetic data, >= 0. -* `--random-seed `: The seed used to generate random values, >= 0. - -When the dataset is coming from HuggingFace, you can specify the following -options: -* `--input-dataset {openorca,cnn_dailymail}`: HuggingFace dataset to use for - benchmarking. -* `--num-prompts `: The number of unique prompts to generate as stimulus, >= 1. - -When the dataset is coming from a file, you can specify the following -options: -* `--input-file `: The input file containing the prompts to - use for benchmarking as JSON objects. - -For any dataset, you can specify the following options: -* `--output-tokens-mean `: The mean number of tokens in each output. Ensure - the `--tokenizer` value is set correctly, >= 1. -* `--output-tokens-stddev `: The standard deviation of the number of tokens - in each output. This is only used when output-tokens-mean is provided, >= 1. -* `--output-tokens-mean-deterministic`: When using `--output-tokens-mean`, this - flag can be set to improve precision by setting the minimum number of tokens - equal to the requested number of tokens. This is currently supported with the - Triton service-kind. Note that there is still some variability in the - requested number of output tokens, but GenAi-Perf attempts its best effort - with your model to get the right number of output tokens. - -You can optionally set additional model inputs with the following option: -* `--extra-inputs :`: An additional input for use with the - model with a singular value, such as `stream:true` or `max_tokens:5`. This - flag can be repeated to supply multiple extra inputs. - -
- - - -## Metrics - -GenAI-Perf collects a diverse set of metrics that captures the performance of -the inference server. - -| Metric | Description | Aggregations | -| - | - | - | -| Time to First Token | Time between when a request is sent and when its first response is received, one value per request in benchmark | Avg, min, max, p99, p90, p75 | -| Inter Token Latency | Time between intermediate responses for a single request divided by the number of generated tokens of the latter response, one value per response per request in benchmark | Avg, min, max, p99, p90, p75 | -| Request Latency | Time between when a request is sent and when its final response is received, one value per request in benchmark | Avg, min, max, p99, p90, p75 | -| Output Sequence Length | Total number of output tokens of a request, one value per request in benchmark | Avg, min, max, p99, p90, p75 | -| Input Sequence Length | Total number of input tokens of a request, one value per request in benchmark | Avg, min, max, p99, p90, p75 | -| Output Token Throughput | Total number of output tokens from benchmark divided by benchmark duration | None–one value per benchmark | -| Request Throughput | Number of final responses from benchmark divided by benchmark duration | None–one value per benchmark | - -
- - - -## Command Line Options - -##### `-h` -##### `--help` - -Show the help message and exit. - -### Endpoint Options: - -##### `-m ` -##### `--model ` - -The names of the models to benchmark. -A single model is recommended, unless you are -[profiling multiple LoRA adapters](docs/lora.md). (default: `None`) - -##### `--model-selection-strategy {round_robin, random}` - -When multiple models are specified, this is how a specific model -is assigned to a prompt. Round robin means that each model receives -a request in order. Random means that assignment is uniformly random -(default: `round_robin`) - -##### `--backend {tensorrtllm,vllm}` - -When using the "triton" service-kind, this is the backend of the model. For the -TRT-LLM backend, you currently must set `exclude_input_in_output` to true in the -model config to not echo the input tokens in the output. (default: tensorrtllm) - -##### `--endpoint ` - -Set a custom endpoint that differs from the OpenAI defaults. (default: `None`) - -##### `--endpoint-type {chat,completions,embeddings,rankings}` - -The endpoint-type to send requests to on the server. This is only used with the -`openai` service-kind. (default: `None`) - -##### `--service-kind {triton,openai}` - -The kind of service perf_analyzer will generate load for. In order to use -`openai`, you must specify an api via `--endpoint-type`. (default: `triton`) - -##### `--streaming` - -An option to enable the use of the streaming API. (default: `False`) - -##### `-u ` -##### `--url ` - -URL of the endpoint to target for benchmarking. (default: `None`) - -### Input Options - -##### `-b ` -##### `--batch-size ` - -The batch size of the requests GenAI-Perf should send. -This is currently only supported with the -[embeddings endpoint type](docs/embeddings.md). -(default: `1`) and -[rankings endpoint type](docs/rankings.md). - -##### `--extra-inputs ` - -Provide additional inputs to include with every request. You can repeat this -flag for multiple inputs. Inputs should be in an input_name:value format. -Alternatively, a string representing a json formatted dict can be provided. -(default: `None`) - -##### `--input-dataset {openorca,cnn_dailymail}` - -The HuggingFace dataset to use for prompts. -(default: `openorca`) - -##### `--input-file ` - -The input file containing the prompts to use for profiling. -Each line should be a JSON object with a 'text_input' field in JSONL format. -Example: {\"text_input\": \"Your prompt here\"}" - -##### `--num-prompts ` - -The number of unique prompts to generate as stimulus. (default: `100`) - -##### `--output-tokens-mean ` - -The mean number of tokens in each output. Ensure the `--tokenizer` value is set -correctly. (default: `-1`) - -##### `--output-tokens-mean-deterministic` - -When using `--output-tokens-mean`, this flag can be set to improve precision by -setting the minimum number of tokens equal to the requested number of tokens. -This is currently supported with the Triton service-kind. Note that there is -still some variability in the requested number of output tokens, but GenAi-Perf -attempts its best effort with your model to get the right number of output -tokens. (default: `False`) - -##### `--output-tokens-stddev ` - -The standard deviation of the number of tokens in each output. This is only used -when `--output-tokens-mean` is provided. (default: `0`) - -##### `--random-seed ` - -The seed used to generate random values. (default: `0`) - -##### `--synthetic-input-tokens-mean ` - -The mean of number of tokens in the generated prompts when using synthetic -data. (default: `550`) - -##### `--synthetic-input-tokens-stddev ` - -The standard deviation of number of tokens in the generated prompts when -using synthetic data. (default: `0`) - -### Profiling Options - -##### `--concurrency ` - -The concurrency value to benchmark. (default: `None`) - -##### `--measurement-interval ` -##### `-p ` - -The time interval used for each measurement in milliseconds. Perf Analyzer -will sample a time interval specified and take measurement over the requests -completed within that time interval. (default: `10000`) - -##### `--request-rate ` - -Sets the request rate for the load generated by PA. (default: `None`) - -##### `-s ` -##### `--stability-percentage ` - -The allowed variation in latency measurements when determining if a result is -stable. The measurement is considered as stable if the ratio of max / min from -the recent 3 measurements is within (stability percentage) in terms of both -infer per second and latency. (default: `999`) - -### Output Options - -##### `--artifact-dir` - -The directory to store all the (output) artifacts generated by GenAI-Perf and -Perf Analyzer. (default: `artifacts`) - -##### `--generate-plots` - -An option to enable the generation of plots. (default: False) - -##### `--profile-export-file ` - -The path where the perf_analyzer profile export will be generated. By default, -the profile export will be to `profile_export.json`. The genai-perf file will be -exported to `_genai_perf.csv`. For example, if the profile -export file is `profile_export.json`, the genai-perf file will be exported to -`profile_export_genai_perf.csv`. (default: `profile_export.json`) - -### Other Options - -##### `--tokenizer ` - -The HuggingFace tokenizer to use to interpret token metrics from prompts and -responses. (default: `hf-internal-testing/llama-tokenizer`) - -##### `-v` -##### `--verbose` - -An option to enable verbose mode. (default: `False`) - -##### `--version` - -An option to print the version and exit. - -
- - - -## Known Issues - -* GenAI-Perf can be slow to finish if a high request-rate is provided -* Token counts may not be exact diff --git a/src/c++/perf_analyzer/genai-perf/docs/assets/distribution_of_input_sequence_lengths_to_output_sequence_lengths.jpeg b/src/c++/perf_analyzer/genai-perf/docs/assets/distribution_of_input_sequence_lengths_to_output_sequence_lengths.jpeg deleted file mode 100644 index 1f9b2cba6d5a1b31e2ee56628f4d0760c5dcf1d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45885 zcmeFYbyQo;w=kS~3zY{bQ2Z(G6fa&OxP>IRv{-SM;x)WL69^PNBg$n?{ zh4Tw=HVSwOxODMX`juZgPnWM={*|s@y?W*9jq7A&H?H5fL3Z=bO)~P^*t8Sk>4OcfBN4{XI}vn*Drj({N>UGR=`Dy z3zsM^oYe!Ee+~2UrC;OyJ6*qW_1cXKmo8rF{nCx|h8Heg zx_sV*g7P}m-TOD_gh4qa)HJlLk`MK5J-t8o_Rq43h)X=yg+TS5dw9iV;fhOp`tFH> zEo@%6xx?cTMeJfvUc-=?*@{3V4o(#-YoCPgGfyqg`(pY<{K|!Yj^)yYibZ88nnvU?__>=q436yGpcNC> zg?N0%v5H!~{x&sz{fUKT<~{Z|-#6JLB%ea9vt~HrX3s_dH!oc{XU8Q9fC6CT+8zD6j>^2Lr(FC@SF-wY5kkLG~xw|U_@>+`z%eO{#@|*`9;5xn* z$mZUem`R&7W>`bd&oe+bTyvyo<1u|Hh=To)k8fcQ3Zktad~9ja!6kH04DS49-lir!s@IDi9T6nvQ8{@iKX5TQ1n=J*|G{2)EAe+XkLNe-fs%(aqN-^9wNZ@sLQ zt+hCzE~5x#NUiDrOKhvMcrfYc_Gqt+Sep=tUlxpNs*)K$IW5A|QT zIqTYr1{6uL*sUqcoBC|oPX*!nw9S?d{Gdm>nBBta^oUS40D&$2Kt= z5i>Vq-`vT=^1X4>tKy~<@Ub^BAXhIRck5}ZBQtod&88S^4x?4au`(*`HEMY}i?#|c% zl?i9fzG$qXCf4zCBdGyt3$nm&y-baG44*TYxX+Q!A4%$A3a$j&;JHNEQ%|fg{z)vU z_8V(M6IEKnd3d9o1-BYj<=Ue(`%U{gW+HeN-!2+|^#v4_^p3jRY3qRGC#_D> z_^n$&{IY2AtfQZV9^W`#l2uwv(Tt|lgk&T^CWR{Al%nGv}vEqr0U`oO1B^oDx;loP19<%uWE9hfJ~gdN9FD& zGE{$04uvySv@BUWf{MMAa@O3DoJ**PJ>u>CYM5}Z%3jfXphqfcn1ZSjoh38qSGkft zj@=s#1^OmL)*+?}KD{BwD1yN31)r>XN{gzgLiFqy3)VbR6r3eWTX?X0sm_D12Zx0Q zrJjBHW}lo~hMjWtpJ;_d7_-Cs+ev^iz*MO4g4em=Y714cV;yRANEm zL&j;68zxoo@NzkcNlK3LF*w43K4feT3G6whoNh_7;r|k&paLY8KUq)hUf-EdpBiFRi#YSrqph zp*BV_a+vl&M_sk0r0Z|A%@L{bu)`CKjAcU)S{jAiOH_|_zReDstdV$HkVejCQBRIP^2@#v9mKP0FNFYYQu?+iZySMt3zTR`+m!KL0K#@fEZ5AEd*V&bZIVEFs%Ew#E94IaH z{nY5yeQvSqX7wp^n2>|mCWYv?3*hp%?`k6>yyjdyK(6=IXOkoI-XnfQOI)*;@InvC z3*<>IpTyYhbnMH%A=(pERa(|=7QEr`VG`|oi&uf^IUSinlg+8FK%6L0~AD&m!{r$H;lPb$6sVDjbc@$Fbhso4YrI+kd86z8>gQgb&G{ z%Q(%!6{vEt+_~0<%)LWP+cWBnV_D{4>V-G8D&IhN;fxz{85;S;C0hLm9g49?L*2Zt zh&5$oYs}L0{6oabT+eX;caAcu53L@?lWd~JpTV7323jo8g6-_LW?@lhfD!kN(p~f} zY6|FH2no+G1_#~M!*~%uocYr#wGrw|=oYGaqQXVhi@XNe#P&MPf_{mDH`^bSV{B-T zOnYN)zHKHn%uT9;WW|oD38gP*%$&^Uha00cC!)sNl5Ygz$5s;CfO2gQ0!KEbFMFN6sY>z$4h(4dpkbv|H>WIIljPOCx>R>-H8)dkJH(q}l2U2?97>tCHs#uU zbo}Cu7`O&O8SRB;C2@N+a76y16G)3;;q3}6|%;7oTb-Uo8HP8qZe`(aWBU(GahDh+ujt1oL5_o>u}K!X(f{Zn_8R>Db8tXf5>!>rKUDHFm>w zx1^aC85g+OQ{VfVvth!$enQd{{z=nQ^A9Dbr>2*tN1YBM^{mq@`~vNq)*n>YIb|hy zgP#;SWUz0`1l>bMhZn+@aVw3i72al8q^vC|O-On_dUjBqc#5tu{r!unwI{x^Nm)Px zu1-bV+)lmY01GEs!i;cyS)7pEppneXGf?R*D zXuI^eg7&#AA6l}^l>S-Bp_qZDLl3$}o&&TrhelG5N$T(H%Ml<$!fj)1URkesv+;wY zOHRA07YSi$D)yGqYcD8~Zmhh{sFdB$z7*5zdC76Ftlf;fz&GlglWyrr`C%Jf$}Y!6 zbY{LT*3{$|Wo;ujFQ)ZYnL}rS{Psp#`%-egjPW13r4AL0jtfZYya9I*orJ_kZZwu-o9qV_#dQlW9Yng?=7kH<|Rk?;Tdcd<1EiAaSWI?Br*Sf1n*ort%O9G8JDYCtX^q&no73E zeS(8PiuPJrGP-?`E_3T7Mt8<*-#=P*-v$Kf?<;%B38%|ZHEN+=NL@}Xj9yjV?tb5Xz5PC7$MMof+4`+6q?y=d7(XS8*S%4`Il zrq7icos2U;>E@}dp3HnICN2&Lx%A6;V`pij5K5wkrW`rj-d>LShY3B~tAVn*GP7iZ z;-;lCpei!dd0cDo#Z~L&Caw8{R;8ER6+za~KlGHXe68C)jV6J#)fOv~I7SRfR>G2@ z5)~7Es`tkCh9$?n6``zaspI0IQ!o17&o$T()k7xMp-%bD`Rlak8~lhJYVIh=m)>kk z_+3l$raO}4EX5RzY4)KIlK%n+c>fH*aw4Fnw-G^60Byw4wiKC!+q^QY>_C`qII*`E z^Ig`2ldOsc26fp|>iZKOn#@%g?|`T&pz@KdA4RpZi1x;v>1Hc5a!{00<4Z#Xbaua$ zKE=t4v42g!|0%fxhk;p(6u#(7WBre5AaIRFgt{z0mlYdfZOy9thOveqlsQX^TNW*4 z5KLtKVl~K7##iXYM9=%uMLU9L_HAr(T4X2ZF``hf6ok<$#VFRMF&)Ww?FP-e^=m|a z|HxjlLnG}+o*Lg@qLT1)$rLJk3lftVy~VpKB2{&3m_jCdSvh`V`nV(^%W(bWMrd&I zyi;yrPN}wQPE1)5Tvc1+zzE*n{L{BdIFeAo(AdeLxHRp2($DiV$UJ7goJ3-izQFWTc)`CwYY0Uv12MAcDsS!eJ6tofXOej zk?v9paHJd~*31nZ2}g>vE3+AwN&@f`8#Njquc8gN-zV^;(jEm$gK#7^fzHhIjrc8a zC5$%NkB{9we~cIx;iJsz7~$6gqXgGUz32w9@lGY&+3D4M20Q_7twzn#1=*#JZ663X z^Ch51VN=88ddYP_hI>&=&^)t&mW~RXv~mG>ldZBxVkpkDuMVn74VA?t@O&J;x3|wo z)a(Ht>b%}qVdb9}QL5=hGkv-!vhhE;*K;p{S*?=c=53^g5sBjx3I1Xt=L=eD%>#k%KtTN}+e^rk_eI@t0(zxG-S}l!M)@hXrZt2Fx74v=u}C*8KQj^r z|Ncd#KLY(;uRy)-`y86phl1R@;gb<4dTc?k?7fYRG*MCo57zX|Pmc^WMNsVQ;M5lG zLzH=0i=w9{^DXZKV%^zczyvHE&0T5Q?EYPZ_?H=Y5mA3ZSI&tJ^H7ctMJ`856RLIuogt-7=x=hJCN?sj8>CT_?@w;=p~|RYaMD^B^JzA3@z=Ix^2X z`$4F%Nt}k^!pgFyj=1ZSjMVyYpFb>G=ry!It^4Dw#}%dE;WRbCwnkM-FWzkgUQDd~HG( zLtS2?Ypoux;Pl4sTZ{WIIzu6jg%RPX0SNAfL2D>duo<2i=Zu{Pnc7?nZwM~2+Aw4d zoqCQ}x5LZR%0FDpV>cjfN^qi;@y(W_-fQ9GaaPcY8nRLMtQ_4ow`SyQxwB+q%Zo(T zvK_pEAk56EtxJ_f*pSL>Ky{um)mTC~mmF`uJ`jatzPHnNBeE-gh|X%{tynrb=YUo< z6z)+NC|O*uu(+WB5pVF(e-)G%erMcb=0Y8{62wDRI77IyR+PFF6Sl; ztjCm(hi@@MTl{HigqWaZucR`+)5$Khl{l@H1+C)s)Ym`@M{%++i^S`-JDUY{Cmgm$ zN-bm$H{*aH@?lYvS^uA@vEyUv4K<`S@&vvC19ece<_t^mW6z|7^99pZ)}W4c%p3J~ z7YxcjpFM;g);eDhxdWrdxHl7(`QoCXrUPa)N4K!UR$+{xbex|;Z@-5PXmfxwm`C}n z0y;fr$7RsFQ1i?Roh#^l8Tv#vknsH|i`jYq}h%dyHr4pmzcC`X+;irtfISOy`Vqb^Fil2 z-Cy+>8o6tT&QrhDz}iSyh)FanpwY0;y&IDrM(hl&- z5+We&KhQuwc36NVcY%Gutf8J9eP4jzylYkWOjml%s`D1-4A>Qm!otfEBTPFwmm-*P z$GBXoeV^MMlOYsTnbh5@!gYc>K5e%ZWb9XqYWVz!V@u0ypDahe&-E$G7FtX51*|tG zPfDZHI+4d$gkoE>9VPF-@cpnj8P6hmhi~!-SIWfqY3Dpl4_pisib|cCR|wuda&y{| z*GFxVlBu%|tG6;)TihUdSWM(YhXme{$d~yuu_QX^U0n(3gi>i3heO{l5f6F=V~!Go zk~Ly)&}e}(r9D2HvQ?Y$do-9c@!9I-jJl)osvM$F=$GatLpDQ8%|ROl@0?|vSDwdE zfu5s9mvXG-K&yXX*(AB$qMhB)`xx$#6gH!9|8GDK1bhA~v^a%LxJ8AO@WS%)8tpwN zsVnSwwc#~!O=O$%PLV1|l(^N>(Eu3`H<{->=vU+&rU|hN1-rpleZCMqDAPsd#wR2NHY~G8)2>#}9)- z#_a907F0!ZR8kkS{0Mf;q-pQ9QLA&&pj^IUO1*Bv3o3AMyZ>CzRxe>-G_y=KT9xYe ziUG6V)01bQ+R!W3;IYF9R#c2q2Ma?gp|ntGpUUZck9^4aM&u8g#nq45g`XRsI!l#_ zEwTtINaw7gAxE;lw5oZrQL45(wnK4^m3qUd&^-&GmI-=*r11*+VAs*dT5Tvf(vqKsbFyjj#@@y}^q`B@J)SxBA(^^ZA2eXi-U#aYiuYv9;r za`jWEmX_A&0C$xuC{@bI@!3slKo{b1|-(OS|WducaTlYhTaI8g;zLHwp^{6+-U!P5VI+cw#!2GC;x6^pH#o?Elv6%K)4Vb;JXH=UGJI0M z{Xr!f#vsEmu83g4?e6=vhC1aAIcFMMl*}dE6NK#aN#vxN%qt7Xw)De+eAI=}3z5G* zk*wqom^>;Q&6P?tR8<4Ug)MZZKEvA;{ycdDt?atkTQlWa4jJQxQ=1NOc0k!<9jHOn zMnpx^IL{^71k;PXjzR0Hz9rMxh`psSJ)QIBP=Q-_mup*o&kTstCXb8o)>9&C9+rt* z=L-me@;tA&DU@H8_tyt*#das5L@5G@AJNScS;8tNPL- zQ*w=HO>N#k+h26To5T>s68&UMB=VNHu~0|^RI9Vb!YWjWL(xQy^aH^-DO+u7Z~h6h zyQP?74A;W6Fj_b*--m%-u zOw0Eg4&_W`?__6t?h?f{GDD8snlxIn{CS25tb7(qNF=LMQQ(_|aUKmr)lMaO+yYIU z<-VVKM=rIeHXb643q|ac<=NWinx3j&VVE^~X_Bgr!%3iBh()gR2F!eU1|3=~$;Rg9 zcKL8R>ktGOTw#7UIzf{CLq(x|<`8ko-G}_u6@P?H{yRf^_ACQ~ny|%*`>ybL+pOv~ z{{Ai97N#cl3X|Uhv^7%)pMqs!*hOL`K4?8)hQ2$EzuviDKc88Wk5?cdNLd6)(uZoh zOxu@^)8EMUgq(UoYuOz95a4nG?FMfVb2$+hnzQ*rHA~t@;+A<%ujS}WqFTffdZE(f z48WdXAA*97&9gjAX+^KXnj^+E<4{dT*+@erP^zhWfIq``^r#QMPU&EI%}4qM{ldsB zFh4g}zi;eg3bGVlCU^e%46-cxg?FgXzSElN7FPA%AYHcxb4&COnka-EFtAQ*!6K{7 z1c^?EoAwlCMb3t-PFux9pcgXg8-^)BYVCecbKRbmNINjgl}S0FLeU9P*l2H_sf*1UtlV=7PUoFm*n+~>cl2MUWhZ$aEXJ~+{K|UK`-%k z+D%^DYVK|Cya~6}#eqOgSt3Mb+0;s-97y#t%ZqK2Qliji_c+9i;FYsrN$Pu_zHQpd z+M*5wYeEx@7BJp~Xhp$DY5c+UA&Pbw_=PyGq8Uvzqn_4UoWjc=p~Oc-V~k6-IlpMX z83el5^qZ`T^z?P($UXbpH1BpPS}RMjV-)A0t5eQPsE~b>`%&$UQ()P+0-ajShpmvR zCE(n|a&#biP}#Q`n?Ud?bF^Q)0@>LqKS4W`UXkmwK)+KLOt84#cQoXM zDcSgl&aVcdA{IEHx|qu zM;99-1~3H$O;Sg1m$;(B(gMXV%YYkLn;OISgh6IiDv<=*49?}{{GBlBCUntQOqXlp zibR{Wr(}u%wA%OQqCrzuz!5`dDPw~cr*m3o4enYIMHG8|d5aNApM8r?5nu^_^hM-Sb)6sB9TZlN{+uHTnJEu_Z`(}r2;J4Qy<$fQ>< z3giqvuj;qPBqws`oNphNctWXNuvUEZDlpu=F_OQY9%~bxrOnHC&?y!dfMt|Kq*+fF z(^s4FBx-R_1tdf{mTrF~RS@|=&qJ74OUHO4=So4;3=KHJMdwAO4C}r3Dau&=cC{&` zA*&8-U;Uw%hm$}#1ZQt+QVx=f7pq$9FvZC};F zbDyb9r$ZaapRUsXy)=*S{&0S$+{2IPfwjEG@jlEAIx>1emO9?&Kl2>YceNR9EW!-z zFox;bIyR+cZF*8~fp*E#^O?yf=+7RLo#&?++8U!BC@?_N9tn3ebF+9%OT1;pEt@0| z(aP{j41&`s6q#;pY66!-f5}YJYaVN!KoL|hQxJ!;aj+%1{aosN5;Hh&Q05kLXW&~v zoe-oeE88*)8ye*U;o#LGhWfH;%f5G)LeGh#qiWjpzYLhV9bxq`uD$jXPTs}S6H2;| zeH)*ZVLZ|lq+4z6rm{nk%B72y%|#W_jQ$59wtI6)_wPd`}G#9(Vy6l<^)DTC-?sLwDp zcp7i!Su^QeYJzt0T1qVI61c*zr?%+86-m3H%Bzc`PGbd*&PLW-AWK0|YHC-r%$W+o ztUB~TTE>;;nbL{ja3gLzJ}0U^;+S5{EWE#?$I4y}RusqTCKXXByga7ao|Mqy{Iv&FDg5lTNi6z6U~ms9MH=#Hwo)IvqSXOD=GOOs=&1rwr!vE z`x9+ygzZGj=;KfMPSWRX#{8HpKt8cFXaAlq z^}sD^22ni8@jKwV$fN(78UO$T=FYL_E4SEs<-ee&_1jt}PUH_6?RDnBQ%>xd@st9Ql(?aw65WbcI& zr}TTrihsia0|r%8`R%@=?`Hrio?@+W7EEy+OL)v*FuCoLojC1Ho+)7BDpqnW#9 zFD*AI{(@A7&;A-2`LRY$hm2=XS=Y@V*1w?x|E;WklWTS-Q^67O!?}OWM@>m`*t==- zp2Lr(3PDemH6QwvQX^mQfK-yHVibF?@1gi;L~H){6mj`Ctn|}Yyb&Oas$Zj@C;hMb z&-usHi85lwXMl}AnHM%Ndd@^mYkax8L>$PKDR)1rmyh&*IK%nR8s)#Brp<+tWi{2zWj(^A5F8Va7?xZ#qxc5v%Hya(Dg z8bX(+SYpWkx=os&vb^Z z(e;L!`Zvc?mh!7YA=mOpB>@*uaeqkn@44I)EoXqW;DLZRm4LeA^sSmh8{hk+MCA7I zsu=imw}+(;FH^VGR(t!zcqOi&N88GwFC&k|yX-VNTar8yjY3q*ZP>jp>9;7z$qVfR z`+|8tH2D=3+i?^AlV~x?{3VzT|3^r@R;zRSx(X`t&e(Wer(D5gTj-H9!Q~X`z_?>v zW2<{IoDmjU32T%4i_Wj9BLL*brMd z`P;EyQ{#xS20YpwIjv?<}b?aq+!b${~W*uI$D`gFCJ{NAynqlp2 zp(&OIWyjou#EsbLEcPoBly8>;^i@WjRDYQV&dI-OGRwfnZ9kPabtx}=iDv;%S1R?{ zYn_{yO^G9Gh4$Ka!PLTys>6|6wE_9XLGxy?9`kRVzRo|Umw=Pb?+8Brvkq)Ana4-t zH7gnLuwq>kqP|AVoKlFGV)|lBP14=owdt%+kR5I}l+H?pImdU>HxAQXlhFFVt;XNb zfuBMCPvzchTi+B(5nJs!96i^m@V{V0!I!@jJLLb>DmuNmvVuSG#O7t5bdBH1tcP#< zI-H+(>5&-yoHO3L^DovB*Fug*%a14&1a5ZPLrXh}@kY9JA!Mp=_tWB{wW{4E<@5#+ zU>>n@?{xHvsJn$pIO^T#zp%YqgOh@`zbuWeV{fw&_PL9@(nT z%WMD>~{8LYNT>>7%#VGm6S0s9`Fnx*w**$oiYkl+9CaM*+Yg#*6wUzYNJ z`F>9KZmnkJlu#+xX4&SdkOk`TJ4su?S)0evh11gu?W3f!QR_7yP5!vJ589s_pYE>) z3sZFL>Y=&y#XUfLWSGQq)Ep~6afrItkKff;JLrWb~=9s*m__f4;@AL)a@CLOAqudnO!?K8fpKoW~uZ)pg03~oBnS-*e0?td)&=;4I|v6|ljF`qy{$6AI~+e{Q28~-t@}}uW#q>_XMo95 zehUGkgMk;gnd8KJuFLv#>e0r~ANOUt z7iTlxwq3hJdkH$~(v#@4x3;2)j7vCPGOO9|KXt1A!u=)V{9EQTs78f~og|a)(>MEN zSMLH%`?fsZH~i-iY31n82&<{f=f5F?DYtr$`#QKZg#{G1B%9X-BzVaOpUzjTMfchb zONw{8;$y>N!=ge6t2HfS3X)9=F7PuzcBZNvawu5&yB)2I4sjRAOtql1l8*qnR``?? zRevaY5S@799$kAV@<^UDS^A~9Z0A&rME)4T1;^@));%t>JOh|0EGq0Wo5~aNd2`u# zc0RfyT#<6C_GmXFNRt2a>ZAB1{VQy3HVV6Sr*UThovq>66W239))}DxnN#C@@X~Jz zN6+`83WepM3%oxzbS4FSq!P$@vF=LvXz=S6VkPH^-Cp;8dIMR8V!>zg1&}etM~ry$ zY_b~?O;Ld+#=#!}tmFSftbA!(5Fn3|{q!Fm{wiEb*uSmCFeY_5A{=l*=hu^Kx1}Ds za<3=@07}1#=RaZb%KS5o$vkb8%;mei$1zyXCHcxt6BNkX<((n=eneu*_Yt7`{i|O% zS0nw|%u-KOj!N-6PCV6?Ksi{UR!~;p$mpR&`~rQdpDq`{JSj(x%l(}bfa%lsUpRlI z3k&#w?(Cyt1)e)n|GFhNuh3OdFqnXF=m1Dz#8Jq7pyU_%bAFnR z#|10$ORkGeX6VvUN&eIH7n#f>e{2bQ$)vR&GGAkz*8#{CKfCf7Z7w6Q2UcilnE1D) zwgLrJRON&GVUw80f3`^dpF-H*Q0$i&LdkFxL^Xb)iC9Ra7A2V%a6lKN)EQ=1iovOco=o77V za_dww=&FW5Gk{^3n@7r}GkbYGPpce*Dz&H(C4aDU*J)7W=&m01S`x83JNY=`<=K3)2d6c)KhFw7sU zRu`J(iI7$v73q9LFmP6%$V%C|`#tq=zbjeXE6lkF>A|$kK&^iD=vG@Jq*8g5iETB0 zBG7*(!|dR@%e7Jr`Mh@aOjUcz8DPrvz)^1R1l)wB69m1_uG!QU2F@L=eDWPtQi-uvhwi!Xv}~}L00%SHin}EeNpXV>IwEj*y9CzuR1+y&Q_}@ zdCb+88@(nRH5R^da9m7sC^miERGo~d-(;wuoIS^Ix6p+#&WYu6jOEBY7ri*G`{MU) zKiOV}C}Z0S!!;VenzwFcV=0sn-PR(UUS@&PH1)A*+}6D@^9;*fdrSD9v!k zgX~3`mWiC@;1+d9R65;@+?+QF+(x)c+8TLDIh>4*TywOZ*Lo$Fq5r*bTvy9MI@p8+ zNeX&()BO*l@R!&fx4pW9zQg^u5dIsIs_WuH_isODwEzGhzaC$xi_u~vlc6~ODfQK# z&D3K4X*bSYMp5))Yh87O*XE6mAB0G&RcYi%ii( z-*d#7IQR7T7Y*i*3P5!F`U*5*TM#L@BPn1*xIjsWDr*hNind~7v8SPinYgd`DAe2X z>yu~4M9w&PQP27Q{zlEO^SQ}h8>4PTp+nxs6jxrUSQr{J>HgE(DUcsnnEK_eW0Z-I zge{hO$KWDV$5Kz)cu#A8d}!GAeo`!~2V5qh(wb)(!&TkVsnnpwOe0gkF^`7mwP(vfu7FFNFfhT(^e_Y$dC;p z{SLHMZR0tM#Bt$rVD5%)P`59h6l}bz@Fs3AG^+H_S#eROW^&2Ja2zao>H5LH>fUzL z@*Pgv9pB9;VKC2`)q>#9bdo8I~|x09BKc^I{CEi(t3&?3BJO$dUbHIf-1}D zG-o%BDC)-O#WFv!x^HzHFL>wQ9oIPesmIte<)nyJ>y3TJR2HWvOB=E|YP$2{J2O9$ z6o>Q1pNT5!M*_ue z@4?TP!B+pcwRUREZ(Vz9u;pWVh*_p+rxe|ndd^}hlo1xT_w*mP&f;eJo_DysCeaf` zjNkq@#3W{*B%eorlDc>FZ|@;Hl>TAHAkbvjyY>8Rak{)yi)>K^h2&&@NVv)Aqw`Sf zhU77Lmp1!$K4zcX)b5wijYarf3E}4W$KvIs z?CZ=^N|&FxOM+>A+@9J{3kcAC}5qLu2onEY~}V$>0lk%WokcTT){h#5}jwr9@^$m6MrSb|*J|0~fZR z)}0bgg@mdfluIy0haiIJZ)R{43$NYzJiaaF`+4dNV0s3iO`J%W2u!ft&u6e*MI7(6 z$gtFP$kljJ_?V7`X8><{lTUTuuXC59c754w0md#B6>xVU8!RabRrz6kM>X(Q7n1Fs z{X;nbfE&w0drjE=Xwl|*ysk&P^{x?Vo_;(rv%VIn^IECpi1zedl6jfhEJ&Z#vtUA5M4s`_q8%^}OZwnbU| zZz3PJo%`fGaP|7zLAFEd!Z;hx#a|J|YoBwvOf(zWtIbK#bcM}g}%BL#~ z7!7%V_rHTS=>YMmf#H{?>f*kfzajXK-L|-PE@)6gEpyJf>9|2OFLG(htF={sWgn!o ztaY2^DPfK{+lZQC^~9Ge20uuH9?=~8rR=@^7t&`zz*d-%tLq`tB(sKn3?-Mk|2yS> z6LLw63%p`l7*~@p$~QfSw%s7GQ;9|})v<9ucZl;Gr?mzy1BV7+C7LWd?BC_E#sNm6 z-wLtGiDq?H(&|Rx#AHLD#!?Q(OP}jW#n6N49+UG8Y2*mRba7pryb9pJ2?x6aN0};x z{33%p@?KZ$t|v|Mm=;AZlV=158 zk4O-7$~$)H^k~bnUQ5lZe+BTY`}&8FTLf$!p%djabNg1PxSNWM6mcTUvmw2`NAF7e zq{m%{lv*Yv&Tj6q3eFkaKUr2PkiY<|4KT;yMgFC4l4Fw9$WUvZC1cy; z;_BwCHBdz}P7xLutD6$xeBHCk^=bf(E}Z&_rT{_}Rf7~>v%8WDW{*e*Cvyhcw)jps zeBL1^8}*YP3z(nwxdShnR;!V==0Gyu8~}x&Arv(b5B69mH_^$jn;8Bwt!|!bXhoF)#Zbrd*mOBZ2=UCAL}Fs?Az7O06w3x8}4KA zmae0@gnm``IbKjHyz=28KVu)y{YXB3&eJCa7>Pr-k)ATQ{J)y@FJF6c$-nM(^$#I?uV2k)R#|OY<^#K-_ocL6`AMNp z6UQ0*gVopG;KP_*|51MZ(rx})W^(LeU{8ngPR#=Cf2!`Dm}*(e&3U~7^^i0@TJrXk z4*qdM6jVYdxh_yJ_Cp|>`**t`l}rj7K}#`V+Q%%F$kxOaPm{dOZt!E>123>QH0it}=HLD?^n{Q9?YVIOhb3v2StFQ!!r-Sn(&_Y%cyj#T@gyfXb@aJc_)PVO zzKDT`>38Orl*ci(nRc4X^gJukejdAQYXg%#CAp9KXL@0BBgY-Wi!$+Swt&^Ezx{#q zcXodVaQV}(!VSx;(=cxZ%R*9lM14_j@KXXy(cspYoxaDM&cPYrO&SYWK@~U`oRmER zkn$b<1D{mgcOD^Ysf@n|T(CWlk3ANBkTz`ldv6=>8DHtjo{0DuJ2`Ua-g9LOFn!Wo zk5vB0k)ME#uB1nQU<1S-55smKed`Xp%py6}(eL)G0;$E0MoyI9=M`EmsDIeuV1Wf$ z%Px$T0xnsx{fYC>{OUg(ou(rL*NXWerFGEIV8dtm;FFx%b9XlJwYp%w`ZiAC6B%+5ocB=aPEi;e%=djwnLcPZ3e$NhkdO#~2_jLPXz-*`H*9+UiyUzfC zTgdZ;z-La1Rrk^9o@9bVMCR$ckIJQOE>TL#zs((H`Tklt3T|AWYmU@1B3LA5uNyDv zpQeO&^vaJ=nv0kgkx%bqO8L$Js`Yw{WUd&!ACq<^)~D2+o(|z*&at!zQEEZhUbkm# zS%_p7_ad3Qrmge-TVC7uRlcT1!8-@Er&%^m4wtKV zu0=+44=^OhLBu`X1uqLexbWQdd{1Pu@SYxpX@SIX6I4uVHDzXVrl-3bYUJ2eUXxH% zDk`tn-*BhJq``99fCy^0FBos|p8ddVZ(F^Fsl`eWlicw5fr z;_&1Mec@V}j%`y((#xvzQ1F^hNR?@0lB+H=)-%yxQ1}LFpaTGSb-uHuAo~CC_MTx; zEKlF?pd!bBNY0>S2_jjt3d@p_u;ifRjFMTRN0B7pk~5NFNs=WS$T=-JNM@HLIpZ^c z|8w+x@B4bL`~L9shuxW;uIiqd-m31ZU)B0t#=QQ|uhTi}rT7Pouu!zH=A$-Yd~9Y+bko5i zfS)9NL4PH4#T5SX7R_Ulsm&HKB*QKP$s9;&P!&c(#5E;*O@^yiKw9SO7 zT*Vajr%=kAa1v*)GpZD``P&j#$A*lF5#pI;qEHtda>SD}pBDxkxAQafHrB|^ z^V`3;NAOc(3Apy??2MpU=H%9jSF=I;OTvJE1zcajVn@o>PXJp;EYmA2sN*NVBUcL8 zZXdp77@NkB_R z_KrGTC56$7b7nPb8#^58STi4mv~sMxvDOO-A83DyLBcAklKxtgU%;5Wj{B6C)0DmQ zU8>&EG9__S*R(ZyB=e&&uO4fli_{($qlc2!@V1AY@jEpSJFC$lzEw2`T~PO_aa5KX zD4G?k46Ao*QSmxCXh2kPM~hk45Ui|n^tt4$YNFi`HFyO=$O{6ZQw{cy4(og%uaV^es_KG zSc}!bRta3wF#pO8>v=IJuOeGaqZ?8qF`7*}sqj+NEi zwP9l8nQ6A*Hh!?gbDN)S9Y1z%Z&fsO;CYWJ-2@2M7Fb&aEqkm8recJz=D5_y)Q0q_ zMH6N=?&K*8c`ty0_W}5R^8Gz_wY>KGy`0j=`Q*~7JbeOiqjQ}R8cz4*)>IuTtq^1E z4-uS0)zZ)bM)<;UsJ6<{BBeBlG`A$E9Tmx zrJ+RpJ}&6UU?sx&X*osbhH}XeVPr;>XWHw-wn!RmT!(DGw=grR$dLz|>EGEqtqprlW+SizjbLg}u+B z@o>nz<45+!b*l-8UfIBg9Ki&!B{h|kVj>+}UE)ir<;mWx5xxM+as2#j7hAaZai&4{ z6Pe4^7EU7PZtUFQj-bd}o^q7irmRlQ4eJPokolJYm*{=~0Df{Lznfc$##EjyN_(6j zZtOK|jzrOqowsl5e(%;&X{$&+7%@Sg1qI=c(>H* z>XS!|B&UL6f^syB$8v3kJU*zf$_zg&xe6lV&jNvuy8t}o%}1d41q;?_V#Dd{>{Z1X z??;^Gz$NhdS;2#Gu3McLeL`oPta|6U5HQgXzuGYeuw@>dU8RB7?}|^t@k9-dXxCX* zB#Dxd29%2lfRx*vv&*0&i7$#pH~q5gj#EH?SED#YDAK;XJ&Ub}wV!-|o**9p&>{E2 zXM(9h3?Q+_n)4<^4o|{InSBL^Gp1*a5#3Zp!ui2!-Z+%-ecefBJBPosk*GK#wIpmV zPdny!OqLHhNR}fvQRYs_sAl`}-ht*0y(J4&I~9zL!43gWM^NogHrE!~3w9|W_@>$= zD4VS+%f0x*T49DLl56d&h<(P15@%g~t@l+;}i|A;JB z)-??w%CaxY(~vwvetKnte*{L4%_?EG?gDR?WjJM*-Q&K zd8h4N30a3mQYoz%_MfzI!eKt=tUyuzLIIQccldwPBbIm2GN)|tbII@VCG!8;rI$_pI4^dOP`wOX|vm70IZQ~qsE{FJlT z(*#ZNHRyck8vpiYc(b0Vl|7cj6WRG*H=8AAPU@S2oCbrk)C9uWiXz)T9=UhB>y6uh zc(PpKMEK^@T}w4tNi%-VJ{h~|DfvuaWC*y`ZPppO@9KW0^J+rwLdm@;!k3x#2ZZa8gJsBb9-%k&x5V1J-xuq9qhF5 zqSN(2yul=ui3RnR4gR7 zri+xX@;%CMZ72e``StU^OAwU}_LZR%FFepwdUQ{XFQ{-^r&1@O;t_T;xUs%wHr%$| ztahZGf4p*a5Lb1VyOs+jHkj)zj8*n}OJXTRqeT?=r?;u0RwCma+qt0dnJ#R#`ryxtyg z9%}Q3k8=2Jdoc5%m^+_2M015pHDxLQRBt+1D154)Y0>TF6DP;IT?0L(Oy#Ksqn_b~ z@J{XeZJNhkh-0G!y_y@VbOams9r7%lM|zDM!nst?wASFhSQQT4@RL5}N?R7APaWp* zW)gJW8r=ppIZO0bPndNORTSeQ-_`G+n{}?Q@Wl0+qLcf3KX@*Fjpa+>DjD^)g;D_b zLq90T0e}oy5$?wnWFw4R6B2#Z_qjVN$_mCqmfVM&c!z{^sqZH$j27>`s73qk2~@zQ z`J6j!I7)AQ;_vlzk)Cwv7jWS-wn8IH()7M`RL{GN$6aErY7>%t3BxZ@i%t3g2B-%M z$u{;Mv$lq2G{>X8q?DU+#J@CtY5pA0_^oNRV(lfTN7YS2%o?+wOfrWPlWldGziuIaNNR)M(bd2g>7MUr>tjjO$L-=ADymEM-BFjlc@&k!e< zUJnqYxCPKE-#&YMV7WE0#=K1pwR9mo#BzTGkouh00`Q#k9c;70gsI8aUXd&GSG4`!CvL>k zAMEE&uslN;|LI|7c3q@dwNh;3Y6EcfjqW)K%8%VnKFVxQTb$5m)Ghax=432;_gbkJ zGuB7l#YABcT~TZ4iGG;0>SY@946rqO{;Q+&Fa7T~#{UA{#BJwie{E%Zi$xop!8Gpr z|9iX+1}Y{9hi^zESVk``fhU`vDjr=OefjJ+PNY8p=5A;w-|m#@#&8f&e;@`IvnVV- z5(sGk!55<-_<~s8Y$+Y^_zd^CAfp&w$v&i?7I$MhK78*JIS~_gh*41aOev!!!m@k? zlS36a07J7)^6kx`W|zy%rl$_OKo$NJWvO|ip>klHb-DPJdaoA5gArzX+|S;TcT@GB zhRxv`p#`X9KksFJ;l9|qr!3<)s-CtrRwsHi*iDR`%4&Hj=!;dWdr^a_ua=W$Um(r+ zV0y|t0#k2gbmyk(i*wZ8W|us$5-xkCl+uvKPmo&4D44Pc5G1Ss0PC<{w@LJRu)w0W z!q*$4=^h%gf`;xfyJ}JkmAS>KY2hXi9=xb`@hgFgCu>TB7DGx7%2rG7r*N0XSaN(x zJ(l6|Dq~VI)#2O2o~)wXUVWdvfl?s9j277vgZb$GpsE#~hJFUVGiKGr|m+ z?svs?vp3um0Y3WL*2Ct1xr-#JxSDRWj7Y(e&I{tA8U`k<2eQgtXvU~asG6EI+5`dX zc1MBP15SuxYdKrRF7MF;rdF;u-+Z!8Wqu;PFvu+kTa?U8mwywd@Rh{5 zhdQ3vDjO(e)s8s~B#ggOS$y)9A^nRan1gyAYa-A+z=?iuLxA`u zkRo6Ghp@h@VNe+7ecj-NFv?lL-XSc}ii4OlEz!_LC$>v6-{pF)HZp#tn%VV-*HY3` zv4S{XQ0VQ{mx*bi+;=_m$`q?a6H`^muT_*6x5v$EmAjh8Jm}cS>52LflsDta7_pFG zx`mY{K0(-qgX(`;f1FgYo3^o?K|Cu*mB3Ls;tDEw4jQrNxFA3Lx0>nflgEmh)ywYpUnuv_a=6~JwTCvTGxHp^p=^9@5#LPr&faM zKN&eK?LEax&YtvUR_7K|sxqRowj%4`ZyPMjDW+p2(xJQC#Rv67vV#^}k+!adnE`8S z=}@TlmsdnB&n_d91Z^-Xt6737dwG#lF>Y9(d+aEEcVK5s!M<@_bNN_rp%$!)^X&Ul z*Z8(ZE@3$?fnllWpfx7PVR>o~DpvawaLn9Vo}xP3>on7*rjzoRi|3?C(^dTmq$(Wj z-%J)@@y{=e+VE-|k$IQ5H)_uI{F4L9e_4)|Rt1fAD^HhK%ZT<>(I{A^5Tqy^;o@Yt z=eol>cc$kYTpI=9YMYj|lSr%V`yI{Z@%B!;Yz4{?_JSIHG~4$a-v|^}o`NN}m=`J2 zmb8BxMF)8RJa=y_FN%U2d`z9pE_)?uj>V}bVLPvvHQ5@0llb>Nl^6{%QU ztA+lKQQV$`@(%^dPs%Hj4jxCY`fIg?CDhOML*SLKOFSW-Awr`%?9M{cHpGOAt(09` z;RB+5%@jp_f<-n2^a%gPt@Wu~x}*jfx)VZ0$-SHSwa;uzb;_MwP3D ztID0APbWT9rBgRTfK-85It;F<5pXaR)sq>HQ!KA5zNL)Wu4F7AI4uHl$j<8Ak)w;jk6Z2|B}qez~o zmEUaJ&~VJH)z5xbt=iwGpTDgt*=ALGyS6JhJZ;vY-&UYZow;d|%T*WY?;qqGVzfJ+ zNfjuJ!e|T0MZz$Ft?a!Vbz##gVGKK5W%M$zF0DmKN& zqP#r+KH*Ta42LK?{@Pl|VkNO9gL_&auU(R&KuhAwmCEeR364^YcpJT>c|1`Wy(Jf$t zy*V59GEWkC0CQqd{onbt3^IiAoRrSNE&d@e6~@iF$SwQ8_RKK8;65SWF2gUNJZS6)GOpdfnMh3e^mR4aZYS5aGml_@lpEuq?};D-Ei3am2{Fu1Uy^ z@3@Jl2F;tK1>DWpKdyq>oOAZz#t^yD-vjepTNUQJ4)y%$t^(Roc_V?87S1c3Fvfc` zT1IiEf~!3fb)Wkup48}s(RB?JrEh5I&J|x#z{xEOUD);JY1mBJRl<1vt#yg%)p-rq zyu?cKz{!>38{>55Z2wf%gH_pAw60X^otk~yy1yB}t3S2G_<*j3^1OmZ)vJB%-X9_w{@s>Vbf>S8?|5qD8M3W*S+J`ds}0$PEju?uO=z|Z+w&Ds z`SUWis`mzO^}w5A;j==oMuFOwiH}wR{ZCyVOlcGiL^KSZGcVrmUP#fI&~H?U3UVuB zkmt_`=GId6%C@~>#=??+uNl$ zj6FoJNogbgIY8yH2u{_rLh<2uv{2JmcW$x3IL}qDv?jHoqHV?&I6-JjJ$I6$&8QDn z@qb`~aE>Y7dtNKuZ4xYGbrV}pqSrc()45+kgls;aNZP7(d;ql1ps*`;Z7HqaykNJa zL3a?a_WDzOpJQT=3+bVpfq&(vt{$vwAmUXgcCkb1WTJj9Q-qtw?brS?^yVkw@b`KZ z8RB1){Q-hFe^LIaINo{}v-!IV{~X>?feQ?kMDoPy0*15_U=Nd=%%P8Pd3F;`H&}wH zDI4cDmasCt`tv3k@w>_(Tbfk!9UxwO;YXzg*ZW5}%z~{}n7GwULHJ&t03NAZftUAw zQ=`*!=e~|s_u-R9Z<63vqh(FpSO<4v8EM=8;S)zw8DU-j;mR0TM=$}@ z@y)X26KS$S&C(LCfi|S2Hf7WG@`raV^XnPD#k=W;H>};$UQ}$bWP=4%+*-O%UD4>+zF5^X0#>=USa5F+ET4_~L2^}g0 zN0mbAV5AkD@hqOX0+VvURB=96%Np6cqTy#1>OK9l{8!5QA7iTwFoA*l+cH!b7u(Ko z-+ZG_iCxy?Uv-G_X&D+{b7@*4$L|1A`T#&1_Xul8^XzWHLX5H7F|W^_+GJsb$De3( zVg!zZM&9ir4f0&>jslGQzjj%892)BZC#vad$de6mq+?XheA8hgaOusnGak1pyd21hPdKcLy-4(3blBEvjR3xEY(a8cgR5df&X>hHi zRWUO%$KW1-;@UaIWgU-PPuk6?s=obnrdC{=&HQcecCTf*nfKwnd$M+;i5DRFw~PR} zpV=A3ZG-A_TM%&0P~|BVEU%i0`W!#7l4Os|B|vv-J}LTBafaV6>Cr zjOO+sII^#@^m zq&K|-_2&25Yv*WpT;p*c0MI?uS&H{RIsLHlmgI{A+Ah_We(w;(Cupz8{@QI(J_jf^ zw$AatH1NYiHa%R0u6Yl6pbq} zSD}nm$M2?;d{ejrYa6|nj4NNvZxIWw8Do+;=!ECqDbg@b%kE}PS9D;$C>R&d5%HJW zco4&E3d-xsueZ>{T%ck63O7DcZ3pX>>^f-Xsi{{!ZHx)Q&yAtlW&gSw`Xziw$7&O! zm0$km??BrChst6k>)Y+XEXma=`Joqi`405iQb@s#NVP3%4)pnaqpMyJ+;&n4q!^1!fD?+t+K z|MBcaf$#Yrz%KctCX$B9dFjN1Sy0GzLqhA;_Yt*A<7SGVt|a_*oD-|WBC1Csc2qwj z#O}H0-B5hk|6xS64Nv2|Wk?XM=hO!2U=XWg1?W6z(4QzeZa_Kb8-RoKQJ=PS)_foyK^05mmwY6}pZS%P+WN z)BXc-^rb>Om#r#IQ|bZJKN9;IbB?xNXunZ@m{9xQg))XtEnQpn{)SUL%bX}!{g_!T zzc#WwOc^TVlm0FrAZf z_x`^6kAu6B+H-cA&CQu(TT;<$E63@aq5p6dWB!`2zGVDKLRlSsFRp}(YBv|wsnG8k zKaL??xaV+Rti)$7aN9cG_r*FB!3?(V{Nct9AMGyfx?l8i^T2@juy@@(_m`hmM|0ZN za=iZjx_RPy7#DhkxA(1mXBJ~Az9K$#W9;n&GNAwyjjqvGqVb_HTNUb@QKn6v;v5Ri z$^Hp^{0Vs2Er&xSFb5COFg5=Hk70!nD80LX?&IqP%@0xJ$WiudhwU*#_e1JZhHl5S zap#R9b9tjS!XDu;^3nC7P-C(@kaeRW;$v+IEii;4r$q0$UTLU+{;r_CL8CFBT_0hc zFDL0Zv);f|r_ok8mnxdp1}c4IsVelurN{Bj`iDU?8HVP&2d+E%E6J_sp^Z30yZ0LJ z)3Z->t22KBhxRGn6qyVVW){#?++RMYdBr>hlU+sxs1FGYR*I#DWB-Qs-IhB!|Uaql`SP* zj#KZQ{Ro3;%#jKOYMG~aDo7Xxt31>XF-jPL)d&}?h6hk2u%hos6{x|SF>Di%E%N5A zdz&?8WN{l#pJrnzty>jLjOHD*L_)2`1c(rdMNJ4%7o#EP6Y{10ir3AJf2G;JNQSd^ z$~M@?Cne5S(P!*?a+DXmMeb48ZK;&V$viEPLP5*!_p_Fhj}9PK^;sfGeTj*U5dn@W z?LmeI_n(J8Euqh8(HygJYFqWLD8%JCh?|(a5PsrJkmFw|t2FkMiR*?R z-S6}r$1~Px42&0&mB)@)aj`|V;YA@GEWOSB)%2$A$d1x+$ce;%8sb_=f1l_{?K2Vm{Nq5t z`Y`Jh>XkA#QG!W6pgw$Cn*PFeN!fxsCigeR-F=+4Mid#dnh~+Nk!z)!y9g7iEL%_U zxz+kI&rv8N7%pSF;*r%2|B$yloDh8Q6u?dI^ErEGhnF2GKQP1r0*|Y(cmdnjj+e~w z(${E(Q`!WpT^>8Ym^?3s@b5KKx___hGZDn0vRMaDKbOWTJ_hS%gKK|qFsa_(v_F`s zI=UwQ@U1t!RN(yn8YBD@Y);@-0MT|1mFey2q_zqTI3R$hI2xY}m`6djN8Hm}5Qu!fF=@^^9_F&)dM?HU=f-NHK2 zFsFQm6h+eNRmpv?+^*;qz&|>9JbRN1=O5nOt9{hZ`s3f`O{)>bk_w6{LTuhLIm+}0 ztmOpb3#eNsS|;`lp?G)8V9&&Khs?RqupPA}FlzZJ3P!p&79KGk1N4U`eLj1x!2b?O zxjiOkq2f6y-X_5r^Y_S>Z1~#7sS3GAQ)uw5l$^Z*=)8Oza0TZH0Nh*;@K_l=38Fn9 zHVpsg&}b3qo!Gc2EELbq@zox{@&}i$X9dd)6W!JB8PUD@GT>&#e*yuU7=%t54mpqI z_G$Qpm%Y9N{IfS{bSgTkx{X>7SmB`@V+(p_MRUiItT8S344$4sI-dW)LeoC<0f0XM zAxhnM4=LyB(dK0qT|MNV5=5N$$iV6ya)QMzYwY#))Muc)Z0qgz<~ zd-)pp7grFmzKwzgtHci0hbQjt+3ATFMm0hM`UdA!k|u@Hxn!A22%AIG)TS-`YzvA@}bQXAFVF?iF(X3PJ;ug zgE}6=w(@eYZ^WpmPr^Jq&KYzZB_yS+MLk4?AUcE05mfJ_IFr3jw1_TacWJN>^dlGm z?2JcXEUTwgw89Sa)jCXK+Ee=)c;pYI*QkNf{QN|276yc&tfs}H;dbzRk~)N476ld3 zODtwhkD-4{oqkxzA(e<8)C(Oo|G3IoxcNjlkMurVXBIshCWI~h5dXRQRschSWp!=9 z*W3y2JFI**tZkCYru9;g?$*jd*=&2^H$(DuB@eXF7Ix^!o<@klW%>a1DE|Jh0Yooe zNbm8y4j?w5%Z+py<0YF_ToE)XBNgo~c4uSX&}T?39YqUCFokK$(s;kk!8EFunK((H z@}~mG247m&${<>1*{67CI`nWEMgS~wH_FN<^99MAr9Bg?l?r^h1g%_1q)A3|0Z;#U zE<>eCl?;LFb4MlXr-Hr3y!MrwZM$i%X`x>448!`V$M8EXQha%G%hSMMV{I@JSvM(6 z@IDJh%X?Il?r#>VCeB2=D~bw{%IeY@I{AAKwUq2rvT_3jQ?rej1B&^I8+Kc%`~!nZ zVs^_q^)Z6kImM|&haTei87*1u=$BTK0+{SR@-;+H!)T?ELb*|<)JA2k%s_613i&m1 z{3zomA5nTdTMpfj?aA+psfTtiHeX}N-&;j*&=kNqGxu;rN&DUXBCZA7pVFg#YxIh% zz@PzMxl|WFPPJ{IutxKgiLZS>RO76p;-eHQI-ktH&dGhMN5`)~Ji>1Ku7hs2 zSr6M^AE}T7O0;gen^K|sb_W_ecnqNL;O$Q!(pwpV4cejkO~oYl(QNwCU?>;vPk{9_ zWiFWZ$JAyahUJ5fatYTabF+;0e?@$0G#CsR6cdsi3gh+u=G|7G?M4qh7C6p53}hi0 zuz@5_UWNPN>^SY35y#Av=*GHup zRi{jn8+%D>=M_To;!11xrZ4pcdlS;R9(B9O_@Pf4shMcA_K1GGvXH6|K!|uQUa&~C zANYn^JZTik0&S3~mBv~GiTK)|2%9J2RcBmS&<{?;KV`ikk#8JAaNk`!KTZ2JASS*! zd4Kmm+Fc8FBc!AGa~MYYk}YqFR?HO$51vo2T2<%=IUUj-zgbubzProqDM;p0bieMd zVD+~EOwh%Gfis8{%>qz>yKyXN>x&gAPe!2MgGOu6M9;8j_;yi(Se5K_kx+*ohVM1% z6zKyaR=tibGs_Zvlm14vy3Tz8-6!ojFk>|x;~kRy^e{i0@&(-mWMg^jX0^-hL4*I! zZMi;ZU;hGQ^@!EM_2;j7ma{tKT#AP_ ziN0MuczO`-rXl#b;HV!VNEr(Ny9&QnM=U1Pt4J6rD{CiFi4BaG zcK78yTqqN^d|*t+r<>5D%#Y&v0zZMz5=Y9v9joBx4$*W?mt#Ulb!OMNzjM*9#eF_4 z1)&RAc&m!45lw&9wksU{xP3ZcqyK5wg;*{>hm@GHPHdLrHNh#`gK+ucqSwEN7wyLM z5)qQ8ihPSu0lp#L69p_W6pR$wUcl0c1zysht%>( zayt1DfhaOWH614$vL3}5hZ~L2o?79VqUN8&9Kx;_)~b-@J7s8bj=QO~XCi3{Y|?ov zzgW9{81c%;Km8{9W9xD8iJZ>KTlZPhTJgI<<+8bc77j`OtN@mlP4yOX#QU5@nKRd zC+&3XWNGuY>tSZ-k=?{bVSMKqwM?iRC{5I7bD` z=!DJZhvlJv2KV%Qxmrn5*(3H5X(9$re?~>+D5exzNAL+mhb1Z z*{{LdV~Te4xC;-}Vk`%ARk@{sIf!p~!Hx33#pTJywcP#mw<&Pw82@@f`?pkzH?=G3 zEA@f07X9fb^^W7Vrr5iLL&3KSb7tCtD8Xe&*2OX;y3Y%Qq$-W=Da30f67+IZ(-3Sd z!_eSt>Dyu3pSb&RRyQ$3(5=(14h*b4RL}6HOy}n}cX@nkM?rn#ZsYxVGo9+V9zaAw z>HNq1yuF?!*1F0M{*}28_fh^^l#GHf8^@pq%wa9eG6vNWtQW>8O_AqyR2{ICZ-=9A z^uV-oyi`AwBRxpDOo_j#K+ZO}S4E3cKwFv?N0{?whzWBG3Nr_1i|)1_Ox>X)_#)|$ zo+I|;VPJMR1mS`1O4TnpN<^b5+}_VR2mXcDfrxG*ELho?yoE|qpCFO%4Ed=O>e-70 z#YZ^Pus7|(TJJxOlkuI8%~T4ijT%xO-;~811|z-u1P?8}qRUr|RB+fv6RiYpM2_Q* z2@$;;utMF=RW3x96TuHdO7uSy;by56PLKwbsAz zQiL7c9ZLFuBo#ckj~gFIrrSbw)LL^78Z)!fDhAXCWQTi6py|9T9tk(0e7dJJBpXba z_!KpWed~4B)aNR*e9zk{`x#?T216UZ^K;n`CUTdFjgRIf7KWgqN-zITjSGVZx}up~ zZt@W-5K>c%D5V+>4Nr7SLU4x<;La3t*8aaK zp!Ys}F>N94$gae71>$3HoGePqJ2d7Q-)zX0lVn2}R~eYev#kP%JrjL&!2Ow`bE4Ova|W?WLj0?bfRVpGUl6hGyL3B+uSbH25k&&L=ToAC zN_A>ei53qP<&a7V$_Lbmw>op|;#CAYL6?bx1QGzQa15+ViUh4k`~;#higia5m;4Z( zo5`C4iL^F`WsTZ^iJ=Wa1uoD86(2XWGu`7Ut-nj}YS1vx zo7H!FX%GMU0W`42A_gM>9Pyfu&vmF(UHD8C(0>U%p{y$xI6^--qS#E^={rnr70Nia ze-}^AUp>N=u&@}UHfk0(X4qUHFqS_OmURU$-I$;*GHH2KZ zh+E%5pR@HCfi!4lX?^K7*J$vfV2R7}?PJlmVzL~7rPtXdcewfdB{6DjEjf8ykfQssV8L3OHYo@m0 zh!Un|;13MQG7esJYsJu@je<4NRKw)~_$>{F2Yc%%gn~8$!YcsDZT}U(_5~-)1N8#q z;2e~Y2kG4*KXe9y{pq-cdI&NwssOEa7!b?mc9V{fR%x=&H z7C%|+^?X|VC2G!<(W^$=LC1tWYJWw8b@Y%SKPl4~!JIbZ_o|Gw(fWF*mB6Hl;adI( z&zDz@oHWJ8F3Xb;*AKmM&C2_v~h3ZDNl9dSdw<>ou(;Lu#dZ=m>-(icI&^1d% z%!PhdCt8^XE)hHeA?<%8C4tW#Nc{w!&`1o?wh0{GMw0VdTY3UF-hofzkAekSyoArZ zJrH)Uxn+PP=KIBEg5;ubbjvUF`Xq(Kit(ZSgI_^le*d7?lSt0Oz8-;?$8IeL%1ogn zNK(61ZKE<=*59mt>}6YfNj`6<6no&H$~U^LKGq_7Xey7h;M&YWZ)mGeWni!tlE_|x z;f-;<(dnaE!E(a4-wzmJ6%W6fvuhTfHswwSbYvtU?&N9FLsc9+UbZc&(F;JUO*H6I zumx=~zOPBvgE;|itni>DQ|$$lN)r~p)7nud6PNu$Kzb$ z`FnO>AV-h?WwJ=MRdZWX@oIg7x)UE#w#9rd zF2^V-yBKlv57#%hJ6)s`Y#Yv}s;bH-r$L%$!roV_e_%eQM{ki-s9*8&38Ii-Ew}Ed zV2*#py}3t#VT_OW>Kx=C;JiiNUgIfn{#h#mIK>K9X zX;Ahn**OC_yFC@o`kTp6R7-x!SICY|EM9c3c!t8eoWQY??8_$WEF zXHC}c?RMqwVX%Mn$$DZ{xiq1tu6)PsuM<$*$GNpe%iJHy4`yY^^1!bqyX?3g++TP7 za=*<%n%tfF)hhvTOXYwZ+8Cv`fD%<_YUZQFeY5UWEo$dQy1oZ+9}b?+Lb5E_Vu9b? z3|TM52WBgt^!WGfVWX*<6HTC5&{4Lc95R+OA@glhv7ojiPipl}ufl`DgzHnt(ZbMg zG2kCpE5LL_`!qU6N!be^X@85|CnLwh6Zkp+nFvH8(|gjx-HVvtWlJ3t_9~ILPf2cA z&1cTZa{;jvlO-6&XEHHGcRh!?9EWfhknyi3^b`YeNns5>^()T$h^IHf6)J z2d+To(-4;BIDrP_AyalLkF|3yXM9i1j-ybE#k8oWnh!7K^sYms@(K(ERdXFML9hpM zA)3dmAz)U4;3P|_)Lu3IByIn=f~dbRuz)M!T-o!E*6`ZwA=`kj)tcH_@B1c782iTt z#lux*6-*dyMyn(dZ4uR4U>Sn;E+DnxpgXu+5ir^f381Z^n3Zjahff7eH4xXF#vh zy9x(XI3fuj+e^SSsJ}0v#T)jZGN;T{>ZS^(G^#b!)Xe)y=e3fJ|1Zv$qnBvUxQdABAs<-e^IPwQLNVjw>bGsORh1gd>TBqenUxyLcfEtG66h zv24APr=k?8;=QNOEmpIEmO*IsGZ&ZfV>R{fz~v{5=M&vLtC$bnJrG*0 z<1N(}Kln%Vw&JUt<_tRbvV%CIJ{K0L9<0%ucg~6aFCV|}64=35t>40Jg5|f&6Hpy6 zuH@ABSX)?_Gfr5ttuIA%c*+lOV~o^uSL+k3YYFC|)M_F&Q@-N_bQO*h&UzKg_t^Vv z*n((uWkq=#$&H+pPh!t6Pz!BJlH6%jBI9n*^Ap-@2Mgj7B4AvMklaOFOdKYB z;iF)COwv_8ZgXIcNhah#s$e{+?6Ef@x*k8Km-FG^Eu`5!`qWzPKFB1HfAySX6C;m+g+ z?uGS6BVty5?JkKZ4Kl90zv0&{Dh|hA5(@7tjzSb{WZ@$J|e&Se_SyIek^7ZTMkT4uNUuK!mG^ zAw(QmCZfiK)d|{h{uMJ;RKee8mX$=m#^3Ycg^J?8B&mc^ApI(M28C}CI_H(6hS!~b zj4Y*A>Mt)n57-_Il%d+T2J<*k>?oun+J@uldlB+%t+p^0qsJXlC6bb8eca~JM$G8+T z-ms`N!cg-N!-~X)8(?cl#^Ok!F5jG4?a1)?vvds$XG-z5M&;GDazq2%pms}_?%z1YfyBCA^ z%Pw?TO)usr01*}cWq}x0G8OMtQaULdaBYj*6?;>O_eA-CbV2nx)0sWu#@X+e`PSN{ zAph^cnQ;RAxnmU3qt{ZvRFKlYMk1y+Ar>0#@)&CP5V-UJ0H!Cty^eOdg+&S45m+v{ zJW^cx%M-u?$Boz1eU>{^G3@9(cj)ofpE97+N#-1CsdV|mfyvNJl^$)YSe04dlDp{N zVXl;TWO{fYTQHumqwI|k=)N7(qal>IRfI8FK!BkGBdiaHKG`Yqpt??qx=u>kyRtkS za;AhFLml+)3b+$?&&(4em^WDqo3mwR`C}{7~P0Tc@*N%xJ8lKNb zF|WeM{=0HX&6%rKQ1ikg zQ*#-5^3G0C{URvL4w#8cJ%pWM7|Ba*zHWjUlgQ86;1cah)e0UW_N?rdMpfZWe3ZPn9o{KwOZAyi{{9 zLQS-0bI(ttcDAuA#1S3!F5kULF{I&5f^OQTj_r*tg@)T--dm7E8zj2*6jAx zi4c6`rLHO$zN}=K08h^k0B_i@!C!nC-B2&qy)TG;!?c0^HA(f9)ysCL>H8uMty=OW zWnE$M@sc5=by22VXto68JD8F3SVpI1H9o|iiD#<^5X%IApyvgrZBHp}-u*+_XW+)K zs{iWin(}bd_WUo)?+;ws-QVZfKWiw{-ijM-V0fYNIg39QO$s#_rmRI0;i%`ZK$Q_% zi~cbYfuI_`9yvyZ=76wH2BLN2VC(C{h7f3NF?}K(^9^O<_H411^xgsc)&ZFr7ea=x z;oU+{k`dW#M-Ii9{Ja@fFjC+_W4a)5F})A4(sbvKVgHlH3!Dha(6+tDRz3j9?=5~0 zGt|TegR!=Hw-h`pMrxetf2HO^8i_;hTwb_5a88D0x=3D&`Q}zU6O+WJ;9jpklbaiO zy*jX`zfn%1lcBFpy^*bA23)7)P^=X*VFn;7`IeQ&zQoD|mR52l^h-HelS3F069sp| zlH#H$l|?PFD3b`saFycecLRdkTdZv8c^lPu21PXt%xH80nWeHi@R(;w4grykx^y43 z3T(~fO(J~&YvX4ZcX8$Hwp^4x==T8Fp8-%d^pI9v!Uv`6EuHa)f_5*6ZF$}U48QIe zob0uq@!kV;;HvTuv`BK2#I``IcVBOMQm?gm+O%Lnj@X&G_FWwR#e(bkyGo^k@i-?7 z_hG)HZI9-~ti&TT^gBx5PCfmh6fxq3i44eutfnARa{)P{x+Qoabhn#u!!JT0%D)xI zy;>2k4~i(4O>VL5?jyR|T$BjwwAJvi-E+z+W@3w-%w35Zb-fc=HV|}!HT|tyrek2M zih6IoGe18Yi4-&8E{q539oH&FL5aaCH_W6Xx2Y&M(P^gDLe511{Np=SGt2Zcs+shn zrcvbErh(u<|Ah@f%z&zFA=}%bhzgbQc0x%RMOTE&T!Ky`j4F)V75H(1-Tw7(phvY) zC5pD1J87KNIC3mQ zAijNOpF(sT{W!zEK`d)@Sck}Do7Ex8PY(s}*k{nT9+WK=)8)buY%CuEBlCWUyKKCG^eSy4;wfWcScim(5ty(@P zEor3NASI)^LGoD!mEN~AlkJv3K_2)n0GaHHJKOMEIk3_qxVXSE^(uV zjf!SH-!bRVg?MJzWT>rb^>YLKqIN{&HVn^@ugq6u5sa(}(6qw|x2_0WW}$XpZExa; zMfg=>Wh*)+OH-ivVvjcr{-s-N3>myuQMkrLMlj^7ZQ8-V;T!}<|DZh9t`?D zcXe_|YtaVM;)MxtO(*HcImg$i+!avv@Az5A9~W20X!}A3=ab4zb4`=P<~VVfxd^@e zgcF~9^yN?ofDKRnVrgQR{$t}NF8kI`^Fk6q z-@*;QA0Cm61W?8*al)SbFMwzkMy0wuk~&i-b40(IyLVxaG71?@5K-W<9p}Zz9!4e(pJ2eLlAA zTfB~)qZN$_bdW#ZAF0qnv-W38JM-6Zi+{Vf_?G1ehu7MeKzh2+`RNxge65j{QU+nr z?t`lmzpTnZW|2PMkU4Q5A{{%FxIyNu+Xwb=GQ=Q~>eVrp1aon1BalupDQ|(OT^Z!p zsy!?v=*Df;o&jliSmB!kh=uj_kNm;&LL+nRwlhY?M$Y>#ef!I%pY1pOkbAWAll6<% z?x#B8Hod2_`3C3>>sanuVQF0WZeGyeEY^N^p64QA8MwOpRo3DwI_xjMIoqYJ37~9p zP^K`ifUHom`g??n^}tx`+%D=xMFQN!p8{>RCa#9_1kq2s7@kZOsr!fh23@Xp)-7 zC=7Nd6yXLl@hwq%-F6>>9L6!Iur(y?e$v&@c4*+&O9qbFA^AP_56TCdiwDm*jN_TE zA$oyHrf};#Tz4fhPGNj75G{_60&+CYP#%QIAd-(5<0K>acsYJ8wbhKZZ7Ar$#@Bh+ zFzZ~A5m(0dH+ehvqf$x@YO^RQ>UptF`q}u7m}e96`$wA=blCmgEXiR<)b~?Qh#P^A zOia1H)8N>mtJT#NM0F*ToU_=UTv6U{p!&YgeU3Z=X=X(;Ra|%m6hE%ASMI-JVLid$ zmM+II_9htqo@gpz$`QYT;faJ0@Sor#y^>Yiu>f7Rc$T8hJu|Ln#Afd z$B7qvOk-Bhxl4hEv`k6V*&nT5$|sfB+KmV!1rgP!x7?}hpBmVgSm}UU@Q!BIH^SU# z$}Qh-=>mSFt{wVf>Y)_ocQtyokDhiNY)yHfvjd|&sQtdG;*!3z-A*s36%{+<6RIWk z1u;*b;cacO{*1)yd__F>`9_`H(@!zA zJfP64%}3DMT?S3VjTylY=`)m@y~pF~FN*|ow&)SVV+z;R?dML?TF2~crQQ)aCUGa6 zE<>~BXU>H0&?{Ux0g0%;d9o<@=P&l@$5eVpm36IuBkg>o{$8ga!mPNq?36`$x1u_% zAL#?tn#w6>Qx_Akl7kPNL2x)+jk%dS@cdi&bXl*D<2g3Z2U8(xw3~0xF$nrH<;-m9 zZPA#%2RMe5Y}I(0uiX6*V5=Ml7HxaPHgV#^jX%aFZV2Rs|MPss{C9xYaWmfjK&Jnr z@bHI_&WIflg>qgdiS`$^)xd7l@&lvpne|)CY_fKRh-T4aGIZH?+_#1V8ViZ%z(wh9 zcmIN878noy;?iZh=3YeG&`^bCd7>U=O2c22o=(Ah+zSFt+_?*6=WY&F|M{yakI!x6 z({v@siWxh39yBiJ^BW>6Qv-@m=#x69$48@Ey=Ld`IfqvS-=U+04IlFMxvV0B!Nyh( z{3fHh@J2|d#9Sml%1 z#`&Ali@}=&P`=6B;G(UegcIQ(X|>>=C5%5rM7G%gM*S6+=T#rP1Akp8LTCr%cCuUz>iXVK1%qUPPZpN1M?M^`R_zTF_FR4QW%^8V4xJf{G6JbvhWL&Dx`ozu zMmc@4aC$Td`5m{FmY_9gO(4F5S(o84b@&tRBbXFN?;}d~EkJ*LW!Kq&h~q1brc}Js zQq3ziJm=;wNBIFo2B9^)r+jbrV}CIIoC@Q3_92}ldd1qD+nxN>+ChdGg5Q5u;dU40EE>Qx z0Lqmz6i!7HmH(NDI}iIUx?IQqor-hZyAZ#8^j_rJefp1;{u(o(^(pP?$wo`r&uh|N zyq&l+mq*}{;jz&`^RY07%MG}AI!u<+AK+HD2w1!S{g$)xr^Vz;LDiwtqPsCO`S6d! zp$viVthvVUw@X~F{vRF8(6`sw4%*RtcF@%;wyNjh-f1?Z7qv-OOL!7g;=TPot~>iR zi!ZfT=kHOEeVXGU9TFy&H!_XM8m{neE5ITtbNM@Qgt_D}dtf7sd1sNJUtl9xj7W(^ zBHCUS(KrGtX1eQTzFth?(p&@H7U`Ybdi=`qZ!$+FK)pKs##LEG<%4+#5xD(V^VC-4 z-L))V!_u72rQHWd9T-drC?P#P7E|xnIBa_J$%3DZCnaV5cv?h^V=%GomOgB4P{Ox+ zY)X?Al@Y>}0@;^7M>FTWL9-YW(zU(hV!=REGbUQ06gWz6>2q_2Tb+hGs?Nlya2uVn z60n2LTo!`5o0#|2DHA)2rcSEWWv;4=*?4ABMh*2zJF5gtFNZ*tDxfTvdb2;6xd;`2&v5yz*!F-*wM*;oRY>I@{B0oZHkJ{IEYo(--+A zXPu;hHMes{m2%!kvl`ep5*4|=b>`*~rzPHQ7H#45Wa|<|l#yn0#^sQuT2A0hW-HE^ z3+xKNXW?24a=LJdw~MU{tjJPNvJA)NnJ0!n9+Gzj5Z8;5&Ku)Kq!h5-WK%St!O+NG zicM_WM#@EfrW@!eYT`^BMz?^`0jq*4M@HXw`5gw9Ow2yJxmdC3&v&a``mh{5@rzVu z!<^T`;-(ik5WFby%8da^=YANwVlZ7=MksDruKJwTh(e$&5X@)KOQwVF*F2~70_>AU zs2+f08erDEbq|6euX@Y_^f+?l`^S`dUgNiRgTdOry6$(M+HYQ;d-Rj*O#jQ=3lGQG ze!XVPw#IyLCf*J=sNYJOwb?YTYBS5d)*o30z zwv6XYAP`WXZfRJb^PskWv@xM=axC8)7adz|M5g(yc6lmHdlX|q> zF>aG6P%g%*@?#%@psd{jw^MZ6)GB``cE9*k=jwl~VgDdW(v&i$eB9-2@>6fcyB%-& zGQt=0-;tu0$ySd$pG<~oiLr}?L)A-2DIaGkHGiBdm3lG7=tAJ zEn5{^K62Ur&E-cCMZ%_aCvu2xKQK2qh_{hXx=S=JGa-;FPat5?dE(uR^GiFNoTNvi1B<; ziTOMjH@TkrJ`3HzOEQ&J!6V93icr`Fj~E(9a~fT&j+vSF=s3L8948U@9=f!?t7SRn zC#xw<7gf(WOFVi6sPVTpkYVsjNsf@;VRf}W-gE*;_1KO;_*6Ub@RMb>p?6;aI)H(% zGoY5!y{`|MHJjYufY9xeiu}IeBiniUDd{e2GkYw>(t~_qjfySYJ4RYQyqbKRJ(6*z zznA3fXzmzPbAe931}sM%DVaG`*jXW7iV4>=O}kTKY?^c~foU=4tTtrbiE zL7&wzPB10{J7&pVkF@s(y8$*r9e)(&gFV!ogcMlj-5>nTtlPbdGD2FAj^`FU&sR9$pW9@WN{J^SgSY zMonQ*^EC{T?3QEz(lUs^Mivy)dS+Z?U_xMA6cVXQobwH##0f_uRc8t*PFkpd(tx^I zidlZT3;z}k_ziQoA;)}o1d|~q6v?6i<=K?+8c1kwgPJB+He`l!``f!>>rgj8kl+w* z=2fq2&l6mFw>1IB=_%Qj1%ZioM;yzQ%=q~fJl7C9p~w$TYwhsPMtDxJttDbRo^<${ zL1HvdKTSON%9g`_Na|Tzp-$p-`s*)9xCh*_UyhHz|AB6T70P%wLl1eNh>X_B;Y&Fy zk(DR|h=2PtCGdedZ;EqUmQQogyuYO7EK#$xy7>*E-tKayl(taj)L<}DMl!rj^^=zD z_wTo&rOOArhf;-$0%c2^W=W{;cujCibyDVZE;Q9^l!Z9s;o$8C^WJKL0lt1Hr&80! z8dt^!kF99|hQN5{b@CZOI};?O#jB8I3Y^Ib!fzpnR6y>jJ8FYS+TFlLK5`8PUWsA$GD=>HQ1E@YB5-ei|pa z1Dw()ZTG!(q+Q-?n zGMT0Ls@TQkfe4g0iruKR{)!SL}Nl5WKx6?wOnCl{;j`<<^S_ZKAMS1&wO38A@e{Dl6DB48uPS6Zo z%`=8V90F{px@rBdUN&oVw~t==b&Gy!R4HO`pXvI7rRpsikMNx}!xDS0x*cCk2-DJ< zH6HJ2>BDvNmkwb%SIM-ogmS>G@m^$B81=$k)Y)1gG_vbdjq6B}t#QaT>Urg?M2oX; z$L{hL!$91sv0L#NNF;kEW-QzEZWm~NNPDy*?b3>SI`u44 zW9)<4haX0y@3+Oew_wPaU9xYB8>-Ab-weW$h{Ig8)w_&KbxLH<~zTZa{1iPsX7AbMfYT0(t@WH3aytfO&a)Oh_<+ z=>sNo4^lYt28_ExBZn)jA&1p+F~1lAo`Su4*ZZKS49$G1XWNuIO&mx!rVy(u;=?IG zKhk51&_OO-N5Pjp>8FfTMyCmYHSpQiP+t#wO65)29%|Mwne72|1@>M%@W`{68L1r^ zZ96O2AGNy$vXK9Xu(EC1mW=*FgN4msoAIcJoO^Y5D}7~b;?SaWe3{9lF!StxncC49 z8b3-Le0wry`YM|E@aDkau`Y-HG+t*@Q3b3yp%{@1P}Eto&efhFGvveBfZGd1)y4n2{@*IhTkv8kGpC<|L1DR^S6y69fY zoJ00rMos&0c;vd!KxYoK)tm;PxH@h}LCTUscl^={{n)Vuf3vMHW@ z*00MINX_lZc6p!#uOru}sM`M1q$RcaQHsf#2N!w+2h3gL6d=s}943ZZd*VAdsL{Ui z33KA`5_s&)Sfj9=Q93zuG(f-k*DaD`;PmMe+TeyQUppQB-zGfF9yt6+)Fh;OK8ppW zcfTOcTii3ijLf1+@|J~Z=%Jck7e+CFLL6Hv_}To`WnHu#l8 zz}Q`97KRc?4x!~Y{c88PMe@8M&d{wk&(gmh$qZCYzCi^aU*CIMAu8{Go1Opv_iS3= H*WrHw?G+jp diff --git a/src/c++/perf_analyzer/genai-perf/docs/assets/request_latency.jpeg b/src/c++/perf_analyzer/genai-perf/docs/assets/request_latency.jpeg deleted file mode 100644 index d681195ff73d0294611025cceb808f2e108eab84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39579 zcmeFY2UwHK);1gq78C^$L7GaHzA2%Dg%T+NLlQzqIwTYcU7DR5*nk8H0!k-=Py;4Z z=|wW_eo|##*?zNs-Yv!5qW8lXZ0He01 zmL}lXF#zD$(I4Q)FhC7(;`lG&m-xg{IC<*iFX7CoQ*@`!o}s5dd*CazczQo9UlxF;;kz>D5o;Z2th>-Ez*>gv^|Hbg57Ql4oSl!9O z6UTS}$C-|uU^@1r0l;+x^7x5kM*;AApgVm8@yxN~XOD`l7y-wQaGyBCaDwslQ32hN zSjSJCq&vlQ`qB;N%U2{Ju(<9sEIfR7;D)v_@oDMftgm|5c%@|I5LPzs9^Ox4-xZa} z-i3M=m-b3KJjYG|)eO*HPge!htu>#KKGYqt;5=e|jP5V|0e}<7jvui&edeh29uwgB z(bMrGRh>O^g!2gG5dfx>mu^7lBu_Elv5L7Y1$FOUWx2v*0DBS}SA1IfuC>Rzw4Pp8 zUVb3D=)<^7$y%M-k0HSM6GxIVonQjo0({SX{JXIK`u#sn1BM9}EmfC|ziyjJLp-Zt zW}@+DbspNCVx<^1>&vEF=FC6H4)9wb6N*N(jU5tJRrz|VxtoU`=5}vkoOiNtoQ5SW zLvQZx(TC^`dr7T8nqC!?7IHAH@Z}-0bGsDsKD26IX?gfF$UslFXioWR)WejYmoN>! zQoH(|0oo5!mC!v;!M$E6^T4x&^qRiEe;`ns?V#Z0`Sm7aLH2m2|V1H8Q$%aSgyh^m+ zS5LgoTJ5X6V;Pu&SHf+TImwwL$dH(HDyxS*WLp>R>VnJ&43cYnb@);~{c;Vf`D7Jz zYz5YBW>^iwW0)CLuV@Z>k-9G_r>koWjNp=Fuk5NUJ9K335B&fwYT10K{LZg| zZJT@kqE=soa}Vk6ctxCZKVKc>Px5Oq{Zk7pnq>>C^Ld%*5&W zK8-tNS>%E}=)0`neiZWXIU}iCcOMWW(-c{7L z&GM;e%`T6!p8lR;r0eBRR2u555np_MY@m9_9tPeql#@fi`Uzg;ijc%|)NbwhXWb;b z37b$BP|i}WQd;>C2oLC-lnoqeyrSL#0&&+(u6G-aJpdR}duvgh{l2jIIVQ(vZhTOqf(50uB2z6UeAhG z#G7q?Olqd(OML0znm0h1bN^hp!WnX*2VuNWB(}m%h^K26Ch^-!*Ta?DnI{bufMZ1+ zG0c2sScez8hWce=)s5lR38=y?wwvd*_h-Un%AY$ey-vMkk=7vZ&)DUeK`|F3gyZqy z?0)aC@+s4HO7Wfk@WS+~amAWh_a1Z$Tq88FodfN`s-0>ssd+cP`UEq-fv;3>Buc?+ zXBER!k}U@2KQ}8ax<29Han;!}y|{j)#p4MSpN}4jS zZDyx%cbDYLN7W0kJk_xmC3>|jgZPT8&*De%#Kb&=i1i~f#@js|VazmUS9&mBbzyFi zzIqZ^R?n}yZ2}%QR_UqEnwIIS@W?h6&WM@6G(&C*$5^P9(v8~gnw5)s3$cH1GLvv< zls~?oUg0UbQ<(WJsIQDNYiuwBanlZsrB{#d;RDmKz>#5ciNI`A|GS>i3N9IyK+`1g z*M?5H#7+GILYfW#OC{GAxP zjZ%l^_38nn+@Ysvc0M+I9&tCmNRoxU4cpt+)CXjOGx33(2OA6b13#riuAT4S!{w=n zUEZQxF%=RhH51N9gEYGev->k*fu7*uI8JtbK~Md=bq@UQ+%eRh=w8+dXHr+9qgLU0sj14cNK94q<6$vY}6nR4YJBk%kwn=wA6CR8XfVpn zg>8^l1(aml7Qdf9vh<~Ih@AS;@uG&%gK8`C>d2<&YwXQ06*|VAdAS5aIIvSz6>n^= zNMmeCI+F&ld@3=JG= zJj3=9l~#d(;@9}`ljC@#_#ZULOre*Ax)>HZ7R-)rFR!?=%Cv$em)&rdnP86pGL z1$MM%EWUk_|24jQjH+gpqCR$3x&3p%>0jeBw-s}orCXB51rzFid*I+e2YZ#3MZ#vueUl;XWuVY&oeuaIe#r^&P&*+G5k<+|Ru z=ZhLLJx;CWa;>V{0~P)kdjH<4r2l1*{}W&Fgx8fgwyNmT?$Nq1LMmAmG;U44!nLpU z1CVU_<++zolO#p_d6Kv7TR^h_fJHCKvpjSI?$C_>IovQ9>r9{J*1Dusjs|-o?+JLG8H@qO+;e$ z`PEHw$Y>1?oy7iU3|!_aby4(tT5|VGd;`rdd{E587Bi=nZKZ^9gSl@{UY-pkc$K#Ky@%(wU?Xn+T+PAj5hp zvqGs%s}4njPqA10OOrI8x1Z0whN0XNRetgG%fMIhk`&~l96L;S&aC9zl!l%%m40MO zvg-5~wk6#)d7NCbmZsaZmU+{bw|)(*G%)La(DfAh*Re>abgZ1aN)!kfY;vtQ#0(cp z0RUjX$a<{9y7HJ^6Oa)$D!c+wOb&12=eHTx&@kId_wAd%Em`EN$hM&OQh**37>#}5 zzkcm`fJ0~?>iX`*I#uZgRnpIePfT&C;gzg&g{vZCKOgW;DyuCTpKb|aT1&aHnfKy3 zU2~(wxeX)V%P;IBmkQ_aj=i|*RsEGdNu3m}LDtHr$bw<59uw`gUNtv%Kux|O{l(W@ zZ~5rln%e|@`?*lfKH=Gh$MgQ&aQIJae#lZHS+vmGe5knU5B>om5%R?1;P*^jt3Zx{!uUp-j*DHtu{rZQL#ko5JrbXG7{1hH?V|YBV5(11sqP_( z*CdKt@|71@>^|TG`rmefxx%kQ`UeX?04EiC)9P!SZha#hFou`owDddq0jF-rD}NnS zleuq$VQ*K?!(f&>UHxO2R7)K!d`usutk{m64Swc9Xk90$ls}=AtZhP+-~)` z$V)K4tCK8hoCs*hNY3Abyz)0O`#jBksZ4G^QrRqHmoLe6Xx(I8gKpSgKXP8teltPN zx_pH7+z&w0kdi@W8`DcHcO8X&!wCOSYz1Q+q zePa2mJ7FOX@i{Nz5OEiSiu3T2c3NQb=!Qf!Aq z@d%dAyi^Re%}%6rw^40+l}j{q;~8{VVh3O5VGk5+vp)AP!ue!iKQEH^-$+WbH7zQ@ z+v7~aaCrPQ2{w}pU%)B znL^2To6Yiiz@gMHWNW@~MWGaU(HC8N%I&BAebU|Lm-Zd=$`K`}s8qPrAd8B4m6EH| z{kBI&ELS0y3#)L06a+8#EXlC_gM4gMy53G>;eezDP^EJinVx~qt=nctY6)ZpdSrvw z_!z>N3K1M|Jl4dJO#BW>R-C`elg9<46!EYF+K%}*c!B+8hu_&IM`=Mnv2X{j7S-Ev? zFB{^|go%1O&H{HGuqffG_U6VEtuMM;$>4(G6W2@&4+G0|O5=gwIyiC(0+?ve^DE`* ztx*21_w6!f6-1b@yHWM4ht;vrw>zWAq6R2bFH=7A>W9&3NTu^sis_)33tvCJsT-`3 zt#;PgyJg`V!nL56I5mG|0bd0&=ijn zL_vDU7;7?<6d0qf$JZWF>qW@U7saQol75exvl{Q|d*+ z-9UA&Tsy5U5tr(sHYZECGsVL!Rwa_2w`h^w=^V=#F|ei}w}2#@XjY=#pxF!|jvA66 zOtK^kN~JN6yL0EZm`Zl~n@7=F51A&|e6=OxkUfTvlq@xSZvz;<{N4|bS(*i{V1v6C zqGxYBh~I2pnRhtk&iRhMa@NpMo^@6-wJhPwW58v(KL~y&1OWaYJ^KEj+#^WiT)iHdz=g;)H51g@8hfxlvdVfredTthKsnv z_An0T>J78Q7X?qV#OJ$oykFW+?Db3!`?97ik&|u?i_1N2mlTUp|D5aH%B4~FZsQdT z5RwjGt=nafn&@E1C#Lu%=~M2Pe9;jXs<=On9Qp`(k{aRi$)Fde!6n*V{o>k5qGy*e z+f)HEeK;4N2ZMP#X1^m|{z`b8n~5=r#<4b0eJ%-;Akre7CkD2k7OosGv#Qi^8l#;W zU(n%io%bMMxr~N6GVP>P=HIK&+kgl*&W_WX_^gKZ+{~LA0VIzOfsCfzQ6@4SbJ zH!sn`fM{@7(@26-0J2Ucv^Z#3)uyufWqy10E%)U=hAmI`>mPWeR8d12F&5CiJUkSzm!J0&Z?8irt23 z=X(LUl<6xX(TyptBPOGUz)MHVix)aon7`a}yFd1&w67j|{%0_`8I8A-DXwd3zFgd! zkrY`BjKo|56?K9&T~?r?@=$d)k&`2)=z7-%&xKZpM4`+^R_$<=9tVXUvXfnoCq2>Q z^Vh3-b=Cy+sMih0%H;R_i5koVgYoUeW}mE$E#(Rm+WChQIClaGdhG9u1-F2_d& z!etf7PMIU7+;cfm3Ch&a-=l|jZfD)PNjUQ+mB++D~F6Y+GG$iavxD* zdOlQWxAJ)A?C^p=khP~kNW5)!+8t5hn;H3EzkqJauK00phc1fys&PVX76Yzy+?X6u zcz~;Si3K`;GgQQcw&cEEnKz_g9$Z1%7oku}eCBp@4?FCNb*e9Ykm-8i-a)vx-JU;` zm^A(2mFMf871%)5Vr$oS9}LA6QdLcrA-@onObN;U|Wrick41? z7GctFBD;_zX|oy&kj~SGW!w#mrd;3Hd&akWNl{ObRy5n)*}TL>0qsPAj|s|M{jzmC zKJCW?#{5RSBio|!N(r{JwuJH+=yl6gJ)tk(nMxlOY}Oqt&)9c$RbVSg zKuMRbi=)V!cJ&*ijqVQ`ETTo!mO)k%nO4pyq<~{?EFPJqlX~qb9lFbbNyMvk*fGC< zZ;#^;=o}x30BbiX6!(y&RxEM0ET9`)&c}vGhU&Myp~p#lXNSu(&C!- zua>U7I8YG6#8)}dYB93Oap#;VFB}aAwdQ@6*=Jjax2rBjZd^r~q#6e42vIY;0~GQ3 zKC)GMdR>R#30L>Kux@uRe49@C^b%mu{O*6^XTA_kho?Vx#kLbZ3`HMzNLhLj_N#-+ zU-|QcS-aFxp85N?tNTttXZ|hkrtdRBzrc7_xof|1)A)wt@6(l2n? z0EOh#U#eaIHSfXky8kih^W_lqXSWej@hI!k`){omod2AI@#Ix&1|vdm+H+)$DLXVHg`y3)ClJbA7v;i&;JH>Av}rwM8J)%U20|a zaqRAq5htZaYLzb&TGkJn(X2BWBj1O8-)AgkfyB#Oa@u#EliS4j> zEw}idKA@F)2>g{UQZj-^BxSmTiRqlv-Ajlhm2h;dcbFH4!=v+!<$f#cy=E>9Ixp&(;Ee?`xp5-PxnO#wn>3_~w72op zXl`D`xk+}F>hA{;whP`|%QZSX+{NQ+wkflGtin~x%mc4Vs&Vv*6V64ws=2%)rqZ}A zf{jP_3P?_1TPx*7w$?zbnY&Bf>x5%vUEm-W|7C;BZb5>f0p5NO=ZU#=s*0$AEYp%7 zq8%*?ae`Bew$@M$Ntw2u2_j(d%vc~+OH!r(WL8S@t3~{9fcC3iJXn`jj3ATkN9~m$ zK+3r*m^E74iwg=h2IGoCjs|$){i@`i;tIyv8BxzQcwG@N&si(yu()GXbJScNem6=c zerca>W*0l~%R23!;Q}AP`TXrapa1zO#`nX)tBQNr_ZiqC7n0BkUfq#mS63=kebQ_v zt`2-HF`4&<|Ac+T5Y@0e9zF2*8H-NAJ4Hm9EtLlJLZ$ESoOM#H11tL*7LRVH6BSg{y!o)X6OsL4I{Czq53P=E_ zqeZ|ioo6(nLD67CpQ^`Y<5>dhe4P6n^-(sgz zOwWFlh!9>S5Dc-SZh0;NLi#dZg|6Kc6GsNr`Hw?z6oxFsrvaRjoy%jlcLrGDEy z^}`=`#q?khXh&TRdFXW$AuCyOL+S9eC0y&rxW$q{*k%^YQ{_L_=#Ju68zV5=k zsbvqTdvBKQz!u`S&hr1xKYW0?`Z@8_hb|`J*T|aBFtj0TSH2N-IID7X4&e6#@KVui zeJGmhe&Kh?44QiPZ`TE~7reN&!2S4i>dx6%3u)e9$UJ+ zEeZw0b=)g@K6P-otQEmyZwb1ncgXT&bsGEtFc^(JRQ-8;XFX@G?*2n3AU`+N%egof zaQx-Y-|W!S$Y)vbyI1g2&iQVuvC`CC>i07mrX76QvzH#9V0`nBto1|~fa3(?KPvra zBY2#ks!HA39CGD4cv+@&_ccIF7$2jA}IWb zkLc#+wjgq0x?Fj^UF_|LiKvW+z+@r$cD_8C%+eC=(ULM86jtDUpjL+$p4ITw8Z(GX zH<=#g#1qZpkJi!b26C}I`YcY;9FrzD^9NG4Yn;9$cw>@=*w%xz6K9%u)`XLAyv1e` zJD*bqZ}0MWz9PBAYL?5`7^Y}$TRt(%cUx48#$9!q31DuTo5r*?7q!OWrjD9cK9}ge z=8@k}cp#UDal!+$WhTAT1zAoFBT7@-%xDgBgN6I~Hp`H~H$`qn*=hCKi$C!#f=$)cig{d#6Jil1< z_iYkqDZJ7lHkfv3|~*=e^V+dede!G6udhMASdkr!mozmr*({a)mc`(7y`ok@19! zlXN8u_z^)s@MAjBs5Id$e`e(b{&uG>^&u~W>v~Ir`id7@RENjvOfL50hxEFQgwWO3 z(E5pZ#kEYqSQatSSPV%|Ov#%+u^E&VO4+E$at4OFyuyxfsOj+33zuo3T->z??1JbR zzR)RkGg@s#V`J&q2ll`^9kGlfFCY4ysx@uuTr`PLDp}-|-Eed^J z>L1WBp!#wU>Q4I9WfJD@-*ZapW-{-ad7uaTDcih_YK)+Npn9=q zWbvnOYjiH)eyr0M^?ppu2M%7W9MEFBMuDZXPctxr>TSuUAc7_HzjHXzwY2oKar;2z zVUIm~nQ`vJ&uqoR60A7kflO?1dH_@kpLWyU7Go=N`I4NV;?Xf*oNa?H?QMw@g5A(U zjCEnipT~;xPS|2$Bfrdj?F4$C+k3Cm@d52TM`s}UdE?OchVq^B%zcD~St*yy+Tljk zAlUigwh|n;JLfa}O^+)^Hz|!n`i!)jaHo@;X1brDhZY6MdiP`9zpbV5spO|;vg>&> zah^!Icn6uJbcm4gjvXjDf4Pc$ap!|0Y3Jc~hC%?y2R!|=lI_Zu{Wk(t8Oe~ZiE_#A?DoKKagfZ*RGlF{nGcty*n7&&2gS3`=v&LYav9HDBcsC#QFJ)(<=-D~zt zy)-yGG1I=U_#i2330q!4HH8kpUHg1?DXwWpvu_v(yIdab$ZH5y%r%Q`EFo+7BPd9; zumiTk_O7^Ui@A8Ca-1})(qaM4gG=iKF7*kM5hZu`bn|)o=eCm~)lU4`5s1BTiB>s| zc;gRxxbUgrV37qa(pWJ5pv(DqNNwP&SBU-m>JOn0M@mU7B^ugoIHy=m(P$V_xCp|M zLxfEr>=J{2vw^IAx`u}%cQ5$*nTrQv*W`mFYkflic{!8w({6CQ6LBWVa^32b%Wqx& zZzOizIscJ;#Oyq64<*Vz3KwCiLuq)1_Ps3k%ha66nSz^ljJB7k!-m@vowKoGnL*3I zwPJ~EIs>E>QmEQ>%Y3D^LVW6TYtrq4YQ562?S-W8@T|paf+lYJo8TV+;a;!T7N&?# z`c0|dl2V^OOx4TWLj3@!7v8Z9NOr3EN|iexfcsVdq~!iv<>@)Yy(^eEm$liMKt&)}Ec1u7$v;+_59z1Xz5|8_$++)EbI zKWt|Y-;6EJy(u{I)=*>{gp}~WCz+=ThJ?=j=0y#pFROlas+aT6q&r`LPk)<}^Mn`% zzoMx*3uIANg3xt`MQe=<=gMM@kt7rd|8-zhF)eK>H}>6LtVRq3&YlS5h=cMCBnE$5 zP5t>^z_p;-UxqI6l}3X9SQybq*8H7~b=pX(M&5D{*ntGuwTmSI;nEs#4Ya%cBeQP^ zYf|jIstCm*?QKpVr{OGAv;))D(cRNDWjEz#Vk?eJ5VlGD;?CC!O=qqJ3(awc~1^czAGVQ1;rjKe#oYapqPoKbTH- z`1*9qQqZy5vGd&re~GwKOBsjzlxq{$au5E~ZHMfH9IaCBj59oI#{K}jDam{I^220% zaHny9(9ZA3Ia!N$l{CI-dYyas{l;EuLy)}MgSF|JS2>W8bh|snzEO1CrBr z(-lMLma6r-1GuNruR42X58Z#Jkc8-3>>S-8A$v%lIAT~+z{+uuz= z#3(9K|J_N***_K%?)=t$Gd5oe@ud6PVLrlR5P||a*n1krd6^pr6ZZhtagTqon7<3B z#WFrhhDv6gBabU91(zF7SM-9_pm0-{Ko5fyqm<@!Ufb2F@AE92M3`=ZEYV-VSjISd zs2%wE*}eX7P90}bmrY7Fr{ZR2&O85f52@@x6LOqp5l8T9=?>Ue@hj-f^c|Solji=7vUix+n7Pt`*8A#+)-T9byvc zjV#{EXD}RIV>mI>_%ZdI@mJFO*ii=^?~JwnzPuH*7L+2ab?zEmX`mqtAD-8-qM3HZ zSKKRW!#Nvq+1&*|`kWo-Bia=-q1PwYcf z-J%v9G+WyQGitUmGBA7S9^Th3TjZNyJ(c9!o;Tnw&KfZegmc=WTQinDH-ZRD?LvJR z4c}A`3;5Ln;nZUV5~?$mcxel`D&XQjAecPfRY4Mt@V&z7lU33_Eph&5(anIZHUGC6CUKrizVV{MfHxG#y3d zjp2ObNcM&PTu`eFCTw0>Q#2LY!cxb(TfVbUTDgdnpE~$J{0E zNF1XP zgc6MyZX!qt*viDjEE*@y%e6jh=}iX1(Y8!oDQV3NO4n_q2Jw;Byrv0LKN)nxV0WPK za|n-g41Y}PAq+vvT~=VEwUgofL*YpZ!Mi^ITl@;|2iJqx9SxJ&At{cTUS1Ul>f%^> z5r4=GF5!}?unT1!%wl`-1q)duB^sG#o4BJtXOC*jep(8xWY&^L>Li;Ay$zo}N#N~9 zgMcl0;83#t6w;xw9GT{#GUrouE_xei7v$o!89cV4EVo{7uu~}}f{skyt}qESJe>W# z7kM{hemB$quxqh&*=iTG%v>L;In3V8iMR%wHYi^T+mEuoxTF6Kaz){Z{ttj^_TG&h zJjX}SuE4b|QVrc7E=9WI>VK`X{-=8MFO|X@> zT}d|u|BkKL)dkIi=Sg$!fU4k=;sGm#p+%5 z$V9wAuLcv`Tin=XvZd=@{7S(KM=q&;*L+XCu=WCq15N}kJDd8vat9rk>1!XER9zO( z-wwW};(BO;8ftu7CDdwI>Q>jIo|Bjt3sa9t3!Vkp*2G+JV|j3(@s!9kv<9s4qw3JZ zB$G2Sd4&+)NB*W0i>q+_#x`)Yb0kC=#GW&^n0?<_(y$!pO-LF^k8i~Z_}}j<92fa6 z(C-q3i8tVl&%FdwALP!(KFafm>73N#gKGPe`iZd|f#6O&(nwbAWE{D>`DkX3CkrkB z`vuEf+Kq87OC_KzV(@NB`{oR+pNc(3i_GawMc8>xb9S1S#lDCgv~#Q66ttOPY<{gB zJAXwa9BHC}6`R-LTMi<`nVPrs?Ep{$+Iv&TUkqWV#g}ymimM@@Uuu4hxbvH^oTh1Fw7CmrxQN9= z%_n4TmbuZ&LL4J_GuuVsFeR{pNxMqEUE~qAvE=xJVX>J2HDvwK*S~N)x*f`SGa8Pb zSxWej8X{UEs4c?0W%6a;bgq}dctOG?nevUiE+#%+E@t+o8Kq?JFY5x%q&kp$KQGH5 za59hXSb|w#_cmIGqLB*HdS>xwc2dtz_};r)!}vTF8`ip*ZCBtm>hHsnZSW@PzWo58 z@6ul0_qzAsX*N-1Q)jhSyKFjBheoMP=GyijF2S<_O*`;ibXYZ1%Ny&A6j&r9#K= z=b6)&T%55+B0ifMS3v0C=B#4x5%(~^9Ru5lO+mJ|C)-;nl z8`dz9@wVE`X;406Wn#E$w70{xi6#*7X!iLA;zc3DlYtMYtQvEyNj$y?R{X9aTm`~D z?O)E{$|^qTT7m!$mLF|dVXo@)l~J5^L@9fD;c33S>egx7$gQG5ys)4L{!M`Yg+MA* zsu`>g5y^U*pwh;w;AIlNG!m9@1=SqWH=*-Aicq+$a)qOIQ+7CgWDto=bq~n1iGc&z zXgrIOCgkN-2pXJ|NR0Ij_v(EZc|9#YPH&}cq^HE^bF&@jMCgZ3NrEEDY*t_>78344 zN{>YfIlgjhYfmXZ6(S?|yvPD04qvM3qhvGIy^2God%QEsLTv&e{+WoTX8WTP1!ghQ z9}^3=1gzNULj`Lr7gzEzZSyy5E~O|yYa?O_F$!q3#JF0Ax0!L+>WCMU)q$FOS#d72 zx3+ddV?n|tB0j5cdU7atTrs1!`jxzbEPEU?SkxfhBwladRhDNaCr&`m>_C`Y!Pm;d zP{tbm@P#&0!}!;m1WTM-s>oK=SJZG!`!^FiK+t^X z0?K9O9oD6(_?<1x-)sa6MB`^k9_oWw6t=#1)~Ob6CCTPvc~+k%j_Omeqa2*|K1k?s zGw4L4aye*G>wH};r_C5GHgK%4ep*)XH{;Gh^fi5z=`;%9nOn?R6a4is2*ej z^<)k_-u1lD0(Z;FU+cU0T67wGpX`6(e_!+1D)wA2Xj#H>r=m$U46!o3r1a(CEeR9e z^`4}+Nplwy@|kv>z@NYyr|XO)z}HHv(kH&M?eSYyBouyqQhQM47~QVz`qb^MszoVi zI#ceB+^1`2A5_T%^=w5)RNBnN2d!Atn-&YMD1_b`zty2`G|ikj@=I6f4b4VuQR0j|B>RnWlcLu7>DM+$|B49y5T^}^Z6R;kL+_(v_mT$ zGfLfk1Jv3z)8bZ~egKLh!pD7=&coJV4ktG%(JS9*yI?TuR^2B}y6_-u{Kz026-M z5RbCY?_i<~&SI(h$3B(c)laXg2L(32wthLR>;&PmAu=uM(5Dg1T1IjRDIMhLW|AJq zgG4a6f*Sp$S*@s)xdtn}wzb zp}&{FA=3Kw4b|s+oungHNzoT!!w*O{B7x)aukvty4>a16SKu8)YX5|8Szv+))ZQH^ zW7v|{2mfU5V=_M-AJ@I2o%D=@kTV_R?^R-+=u9!nQOskCc#j>vpHEqT4lRspUbNY zbOyeWXe-f?!PRO98B$m1w(snC9|jT|E?J9$G$0RS_!3`vIY%jX=Y++&A}h7V;WE|P zA62HaBsU{&^@@}0U~}ahFOcM$joAvV%JWV_Z94i;`C9K_=nVIIFWX*Y>U?zWw~KyG zk%87-8;$KjqHAO61!YwsY!|u~0_s1T*jzIzIh5Q~wfZV18J%bR{q_5ulIEbO>Y**p z2THg8ByH~74XG?`QY~`Wv!0yj;@VT(M-8|?Ui$tgnrnaZjEIg<_L*B;F0g7kfh;I+o8Ci~}Yi~?***#fX;v#lY zxTBlLC7^$ZmmIg|5Ov5ENLu>GVjtaeQrhs^q7_aa8$8s1?cc6zvv?&qOZ89etkPEoM9$;76<(FUBq`|uA>@^^{f&hxqN88!BEay>;Y6w#UMGSZs% z19ZUL^TO?XuIf|`m~J;y=TJ_Pd}nQrKmV$QaMNqmr>P83smz-wAF8g12(LK|(*a7W zf=Ia*&z9GxjFh?fa3#-PxC1_wS!Px&mcuz-zc`#lBsFN87-)qCq zh*OWe9}o@|eZRFdAxg2Sn0t20_@~Y*b}uP-uavTBo;H%0ZBz`T$bqAq$%}o2$^u2& zwUuELJrgoGG@}cv1wNGa+;4ppuCQq9BBi(B;)~mA9zD7rgiw%%b&*%OUql52C0vEC zQeieG29eUh_?*w3+QVX8N%of&6Ldrqc&AL*KG{c!_VvBqD#PN97tHzKa_ldjba{g{ zP$rFf<5C(hvz@I1CZ&jt{q$w-(VT~UpWuC(dyaO_Wjm7rjI|qkLurcwPK4O)n^kRD zW+ACER7jLN%QHF~WlP-vX7}rPGXuZRbwERC5So1GiTCwO90L*q$gFfD;YmT z84O797ptJh$5y;x8&1gEcu_rh$16R2+&S+h9_zgbl92!v&9{XoF+6+usXuy!9ryt< z-kw>Sy`;ZnGb+~Ni_#=v)H}@Z6--RB_@76VYrh*0RM}kY;mN+64}awD)<=Yw zlH(pf8=pD7-r2aw7WDHUshHrH2^G#twhpe=+B>V?PV72ZZ${p__wbLDA2)Az`-jNx zkvAu=UHs`5)?31;ek0`1)PI^>1bqAl0CaTkIfHg>?H%n;Uc?RFh=)g8o&n)C%qF?1 z0zB`vn>tK6SU!663i1dqYpe>9>a9>ju2AD^^4_=ld0%0@h|{%aRJk7ni!NCdh0I_0 zBgJ08+we%iS4#JP4)?zx?P!_xIfnTjsSf{g8s+G=sSwgwxQI0j0Y~r*S`qHpzhbw@ zx*WGGS9_BG_Ma}|L}OKq90xKHuA3N5CxSygDw3kPyf!(#A+CQ1wOAnI=ZFxHP- zvN8VNBz`lzOL}@ywRWyFNB^l+Kyvx{80~`%L#Msu{~I-B4Fa}R2GR9TtQ1dbNYUX+?zKphITmdKD4-T z+R^8MBK~wW4nzTBWkoTlG6kY$71LnwvROhHG@c@|M7vFe+$l z%Fs_Q2!Y9#FGd?ydtxA+VWFwEvpnKXjfA|KCxO+zA-6r}p_wB}mv#iO`WH8E#C*Qk z;_thW@-lM@>J*f4AmQ|_dD`1x$0CxWmjD()S7r_mw`BR>z|ls3f?D*U#a#3pb5jq8 z^+1n$J$!4dE@T(05^*ofkM4ti9C#O1oN2M`&g1GL@|}I$lB;(Qk<&B*7pmuigER57 z-(M$Ph`wKdcxq*Svn*d#tLCVmZ}D&E%gIG-F}j(!y0I?ZV5&D0nWO}3Z(jJlf%?Uq zgXsxOdCcUI4PKR~aw1n+7C!rp`bj5zFi=RQ#xONMSJ~o>Bwh^fxEmf-y@nmzsE0d0_&=kvNsCu z|F8DGJFLlc?UT__XHaw$0i`)orK&)vf{k7c5CVjN(g{Us=rE$R&?FL&&?M9(B$1Lp zf`CdhAR;B9gY@1Mq$s#KGo#}<=liZbyZi06yVst7^4v-8rwkEsJaE@8@A_KzRP?w_6H)rBk%O-Gd>fN^u&ZzCCLtz9= zG*G0p1SuxDE=0w}=M@gJQ3|JgGNMpI!#xbRw4{kK7Lu5SI{c$Y3IpXp z!$a}K?eHUo%_j&is?$gTFRgG=5dIG21t+jG!5-ryi6imuAIu?JODh>F`KEJ^2!vL@ zL_xAnCKzuAYLnZ`0jbt0T6TwfG>?1hw#tm z7Nr?FO6i#?5dBU%dps-voEz81sSI{}mO}3Tc$p?)*`m9h%hW@cw`H=tc{4=UmPKS9 zhpfhNQy?5T3`X)N0W%=7p!xzDft!_mqqXK-&*pxi3v0D$K8`j)>Q&dr2S!X>8jj)9 z?R(_m}}Ssk62yNQGIwJ}4T3rvc>YbJ$#>E*|Y0 z!QTV#ipAoej(qrq2`*BFC_OV@$Ay<)A=*4n*giuS*nJeodFN>*ZvTcU?Ga*cV}f z=1E(0ELXYVoCJ2pZc!>=MA+=T%h`zZ%Ei0%4t-R}{>0Gz|=QOklw7dSz)-|?U z%g?{gHT&-5mVf1big~2U(H}{~?;^1(uRm{4N!+Vy_>qk*zu_zYU+MpvSMCo@Zn0c< z43(nXv6r2~jTCrF#j17cFjk@>QcOo3%?SvEI0@?g1B#w}J)QQ9wgpuoex5uM)W zL0k3e@9A{{@@3Tcqj7xzEA%Wr9Tt}fvi5uC7v8u>~ zi>a3B?286CwZ~#aRsdJdJC>~!3bH#K$_4kqaS^mt-jWwJGdTDm3*}rD>2H~g7gsjA zIsiAf96Mh1h5$fNF($6kekP&Zx43&OYK8`pxUgCsP?TfYHZWVQ-KseqpBgUdkj0PYFK6J*;m7pm3|N-}olv6(By!K**#uC^$LvfE3EJe}lo zQg^CH379Vr@GOQ9Y(^1TKp=JbCZbC)qbfK7$MREh$|W)cXh#f8jy;r99y_ISvTXz` zYmilG^N|?5Y}0W?XMhJ&tH7hiH(N$+ztL(+JA92smetZ$iQA;}B>CA&r-I}M^3w12 zg{s%@aC~9YZuhU~QKu>@F7mrlwc`ok2}}LQVIHuXl&rv8 z#(j5Xm*GRGO)9`c^N#2Rt5xdW!5;+=HxMOWFgdi#B0=}?Kj|MLuW3_Yn>hSG-i18|hA#M`e>+KmvZr>9V6cq<7T=v#Ex*Zdn zfBgmj#R*=)=%2m)1wnSR4cymJ9-9C3-xXX#XN0s9B5AM>G#0lmGUkgnd*m)T^M%b;bfX(2 zyxMR3?qFL-++Q1?E$9ndMKY}(ueVZ!zvmE@9P<+vWG0e|Ky0a~gjTCGeA15vw98O= zNKOKiXB!dSouDgg{Qq|m-v~_WUwqGiJq)8Lu(`UKg$$@T+ukCJq>loQ-+32?uLQSY(e*KkwBi> zURp`OXGW}-Sl@OH4S92sEWDl)=@f_pB`-W<4FxYM_)jq zgP&hE9w{W`2{MzKlfo4154aXPsTEpgNfnse*C`JsG^2xVrM*z1Ndpo*&%{h>@8UuO zYyym40R%UoF6)RJ_^&2j1~6m~yB zx-ptR*nT%*p2oe&_p4)aBO&iigjn@geziY&3LKB6_6Ph87WO|V^Bz*|I3-eL8$D7+ z*{;vZzkBOzy-5${bnY-aXRmTX$mwG|J^nHa0)O%VD-P=}sSbReop6W$*{hphc@(eF z1!5srvp*YoF(F}gMsKT2zw&6mIxGBi(d9#4JmG7zkALMEI4A-a_*>T$q-4^23v*1z z3=yfd`}5E89#R#!w!|9qM)FveRnfbHe*11?FAVN}C%cuu!%D;RF0%}!#-T;;(`w{i zukI~H{`h8zEuc01+rB(_DAaB2x@jBLLRsKwH@Kj}q7vk1^ZD%gY(z6y%_)n+W+m|P zm?($3baJkum{%wO#D#4p`?aUM{K*F(tp8xas1WHnhC(i{41cgl@-9ABMzt608O?97 zk?=$rz%Y1q6ODx%5JJn;skQy^@n%?ME$CN(!QahFfqm z1i!~|EZtDX!-VA7!P@t>mllhQ^sNc&S~L{_5!XKFCg15tC{F2*j?bc(>QHYfx@R8C z>9pcT1{ST8;1nnmAHXH2)hXR~_noYU(YW6`e&Yvj>+Jh_l5*)_V|iq*OGj|Gu_A>8 z@-m(V3bY$!V3{$JOHjKj7l@WwqJ|`UA6Yl@tu8%;0JovN=!AA@O?S=-7=*PQbvnMPQB*J9!c^HQC;AgZsMvR%RwpCh?_M62WmLB( z_8B~QZv<4dUQJz?mDP>Q0^dD#-ps&)$bPzr_v{C?2qXnRZQEs&gQ^Xlcj($nI9^lx z(+JW9;0p0(+>>8H8g<~s7w8V{n;F1a!DdMBFHq9s2rID>*gFJ{`wQC$MjO|+9YGJ& z4pK%_*U!&QFvCGR@=cALU5Sd|K&}gQcbyBruuXKxHaoH%I==L6RsNkw`Ms=(SKvST z;c@4X!P+%8cD{cO5@Y)oL-Mc3EFO_bs6s`1YqdmmhC-Sceahn4;$m_?p6E+E1Lbr| zeO(x%6cmqL-QQD!~~jae7k_I zCAF>B4*|k|)W_=YoUFy^&nyyMY4g?Kt3M2E2+F7Yk}h`nHTI<`>|+By(yXW2frrNf zp;iIDxhvA;)N_p19`9w2vcxRv*DfHX?puUdZfT~Lwumw7PL{KL#>@v3Dit8?o&6&i z_bXK>netAsx=0q@5sOb9T+!+Ppb;!*A`c{MQB;Tte<6>6+-2Xvr!4a5~X( zW4W}?s}d!rX;JWAhXrgRt7}H)vW#Z#h87uh7wZR0iaJUzko}Mr!u*>ICiCt8u-q~y#seW#|4%t4cy9iZc!N|!#*Wci7RpJU*h}8qM~9G2LFx{nZDA{q-YNZ&1gkyrS{ksN+Q`{bY|h&ih};RxwJPtwe41$x_SQi%P8gXkE+tn1x=SZVIW?dv7M2bQz|Y0}R!&L#|{)SH7@44<0+Fs=;CHmjnt-JjeVDF}Xfh zj;U<<(P<+g+;iWvwSxy{(h!p#%D7}yUK2Ww9Mh!P-!=) zdQp~8@#*r%Gdrxe1-tt}dF|-D%vPqdQv7Esb|*XB1xVoe=>F-Z^~I&WkFgmR1L2Q9 z9oAa)%lKAY^4A)ydfm15(ED$h37-Ia`O^%FowLS^)X?GS?XQJ7d@DbIw=2@Qp$dT6 z?}&5_4)DwvNO|gLyQLEGwXpxBxq?uL(?^G;v;XflHZVNdpX_hB%5BzXBLI;D0&%Zr zgt^R=95cl_9ytF#GXmWA&aty18Svv^v2_d`P|?1WC@U3@1;^Msh&*G^wIZ_Ny7k$n z*mDstKm5aiBnXU{biDl!KOWsW$R@w?$Iz>z))M(qN*TAp_9q73`SlSZWSm>=^8-EyB%%wNaPL%nCVG z6;KJ~)=L>Q*i0?YovF_u^@0tju%g_^u3cuvUTbgokrzJ4&gqoiFiY_2PU@+6KyKSn zTNv3KO5MTg>lH`wMNlAV9)86PXJ4H2=e{ecrPJnGpb5=Rp*L?qm51L#CD-ApBGrwa zlxpjwO~6mM-C`<_m?quqY`Ya3AO@riGmzMD+a+v3xj6&UfNNK#DPahd1zw=w>nx|?QHZIzP{9JSa=oZGD& zX!4q}@a!O>EHEUvx}d6m;trib%$@Pz%2HZ%@VT8hXNYu1#JZ&c-`F^2qe zVox1SOe;6>Mw|r-PI)ASEa15~Qy}WldM@LeBej1l7Ime9#|xIjS4xbL_f2dlmGIfs ziLmjVYwxymIZ{gW>%~Br$sj6a7&J)Uxr!UbJX(72g^lcOvNf1DtuTG`oKcPZsfqbZ z%eE)0dkkcXF>X#yTE-YjmALroi_c4bejizURm>x|+_vCg9FQ=L62a0-+!eC+PSzs7SD~x4;r)PMS4xg-pzVUs zxfOGCKH9syBk#D0*gWx#I*x0=xaqKPHx)?d^gW&Mv9 z)!PjB6u;em$U?MkyxqXHbW;hGOtoEEUbc74^c)1kjq@D_foXn8-)?FUp2Jibl|bwL@9QN3oCw*L*** z;TfhEY)iJeJ@wXrcL=B;!7p1R-GXccZ;b?Y`d4yA=dqi+Sv;6#Y7NSn!hz`e+$#Uy zI%%dK!2WqZ{E-~QHu%h>UplBUZ_okl4mv{g{D&W zD&0pXVFod(j}zf1UuFlF(Vbo|v)<3@5nB!M0wImPlmsZPLT&GmC&+fb-p#F0 zhahGJE67HaE<1ug3B;#cp;z>+WUIUa)a7mpv7iA&=?Uefdrn#_@-`m^GhN;pCa}Ng zUra0)NJs2l4=OJLgmL7GAl*{uk`3ScidYGioSMSvY4ukob7?e$mzPI=7RelIQQ(m; zc>6{j(GKRghkTG`J=Ie&3;)7qkhN6XWli@9czj^YAe2BhNPfEEk*w5iAO(X8iyKxt z1fUU2$znUlTKTo?Chs1gjRRt#)x@~9cxg~S#$Bae0E|(g?ib1O#oE_PCp(LhPFjn{ zJHYA~d}YQuU)Zcp^7aXp2+ia@+=Hlbt!tuGtFv`rCHFxiBeL`In-v4Y)H1`$c6Aog zvhb7a2!azkzsif5$U=0rz2`H6%!ZL46*gDEK1UllwzVht8s(K)I|z$4o_*ESKW&eh z#-j^BaIxU?clfpAsvL|8jyeTM_wm@^q^3PmLgalNPL`H33;aN~fDXT@fy%q#f2`>P z%+BJF%Q5)qadT{6W6Oi3;rydc#;YhId1f=z<{zKm5)qRT|Jus_S7d=hcYQw5tXLL) z+|}&pC~vU%i}$N8&=&zgYQRdn##-@Mp{75CgnJfmBJV}`{fSRMESAcH1TniC@ zxoZX)0qoNzO=J#lg&FA1C{cau1kh9gmU`wj#b@T3R=(#FB!I+evx<$M#6`hEn5JfQ z<5Jwt1X4i{ipCTK8d1dqGEVt$jUF#+u1nlTSTDKUPF)+7aEy|)bayaESqXEAfnZcl z5wMUD<|6!=PLU4cj?ZJ>8jX|=Z31SZz_nUx)WWzAZe0)gSa}4W=3HduYPrD`p2BS~ zViEnM+?*DgmI{%Q%x(5_&LJ9Es6svc;dAjHWwaN4e;JhOj*dMs$-aE{UE;AcZ)UG~ zs|iTdp$Kgq*T1Kun`}r1135ulC#ksQK?`Ly&cRhx_><v%yEeVx760P)vuO7WvUev-LEHLkxHL!y6Qdmh! z%(J3l1wAz|GXYaTlx zm;Y-(cN8W`)M8P@UBt_oV#`mid{s2BCRjerC9UAUYk={%rQKhkRfU;i7VBm%`koJu zS~|kp;yvh}Vl%UQ?A z-#u3w`#wEb{VKdoeCmX*UD)77bMfwTzlPlSG}9H5uaO%1!&j~p@sgDR&CmDdkFI8E zEj-x0`4|7i+Zq5LnaM5hec;r_-q3}-zp4rQ#;>(>q=hK@EMnzc+m+nBTBq-t#Xobn zi>Thh++El%-xL+PHH?#K>x1y_1;oEX@62Sn(zs*r`5&tPepG(@%BAzDXUhW< zacv`ds{^xnBmDed#qvBo$6J%W{;4MN!uybj9#~ zcj?WzPtlr-QfKQjV6XawaFQ4w48}*+#??HX|8hiAx4D)>wFcJBVWb3JY56KM*ZPeg zinsSO&Pw3nnfn4m8__|PePhIvLoiE5NuiTLb?tn1ujcNcERg}5MZ2I~UKROxGy&3U zwMIJ)UooX#``lE^$*qL&#|y!xsLn)~&upcJP0y}8 z?`)XaJ5@QXva#`{wr)7Edxs460Z+S=9g53cuj-vaqFL^Nm~`A>HE;l8+VG>waFU;+ zxt(4V^n^pHBzIIXB8D%$79v0~$dLoJAreO9WWrseEql zT{4Knzir!17_7q0I#ThLc`SXRSg;H7e2VpHVN>rCIO)Azzy_@vwi%MRBx>VisLorH zSe|aaAf+zw2p(Kkw$%|Z7^uv1V^_18gFGR9)##|JXbvg4PIA)gU0HP=S)wF-Uw;TjNCKI*lk1C^(1D$m&>C^7RxLGO2Ga_h^rO+}KE>}0W=S{Owjh=5? z(X)f#3H7yk@*6{ikBJW*e8EwP64VP}8E2h5t=g6s9jR6xY=_q-o_}4q?33R|J@^c8 zZt*vrTg#ilNc$bg+YdL{xVdhw^f=|c;nijL2zE9$7xbmCS^qwjs*`tZnJ&aT)#oz& zqS8Zt&fT+4@_eJ|DmpIhDB=rQ1wbPnrk6|Bq= z9?Z%boHk^fJPA@p!6oJ#ET&=PQMYu&s%H_g1f(MkxJ0p#EIGA66M%SIEm*R&siNSg{0_aN>u zr_;TL{;*9WCq^o5*&(%*xsD+YXDGBw=H6+ZYza}Ae(A$+CcUWqMm{*r;U@_fFqj_b zQI3Fh=?%_$7A>Y{7E&!2F69W!uv;y68A!wDvNw{kAkgw75KjVClR413ZBWrmUUV;j z-~dtM^J-ibK&L1C@<0(nyEkgRYe&1lgg*@B{}8OlYppb|AfbRaNHq}*+B zm@`^CMu@~&g2=H11KpH@ioAR_$DXtO(EZ)a9t!_n$^`eFt1Zgg2r1ZkWhrX)905f^~lv%Sg@$+*h2+^nEWF)&jN(E`EK-k*4(k zzaY(Z>PFfxcLSxR?Wt!~Rp9_0nXVKaHHH3OXy*BOflK2w{_FQ-kw!7&&zNt`41hO} z4o>?yYp+*>fS@o{>_;m{a9u)B9^c5EtcClYWKnVdt1QjEnOCQjBx|h|81BGfd<%B> z?S|mNxW=^=P7R5e0v_8epGRqV623kV0*`ZM*~msC;jA&HM7%(0T{~vueC%cQ>eFK+ z2?uv>k*FAbrk06(zP2z6Yl4OmQi6zk+Mq!4HY={qF~!{GW^HtPRO!dR-5J$k-oeGC zC@C4FUtmN#f$Js&RLtIO(7FydW)h`s#FyJz4z}WwgCY`gLltMTtvt)05@N2%#1Mjf zB{O>E2K2Zj^=B6Fo4oOajQdw=o*hwdzI=K;h+o}m!pRgz^9kIMs_1Mm?u#r%XWO`l z6gcX$9S?i_-HaS^dtx^?{e^7*Lk=kh8W) zFXgBX5jb_!ttou9sPXC*iqHPdq1!W;1x~-1fVEYXNYSf1;CniSYb;>cSnlzDSdhLs zw&tazj z%cazXWi1^g*pj(TrA>vL>lZeyAOju6t!^vVEm%CdthB6Dn-+}5=i*qn0!pln+HUWH zUREH7oVPBPvp`|%?sn|fh0of>e8!Jo@Gf;O9z8G0kF{2Xmk=@n)#lw{K|?x0r=|-% z47hnI<~qi++B?%|BSKP=@s*lB7{+7zb5WAxQb;!_V?SxSb zGs}ot7AhT#_M~9N+?2);)fdV)XqefUr^_;jjLbktJ;4ehjtJ}R0E6T;`!mIwrc~t} z?#-ci{U_Gtp7qpCY*gDbBFBRBC$FLmorRwDorp4bI!4Jh3k;hs*hPBc{j}8ba}L>5 zImNLcx1=`JotO0&lpUg!loY8;@Y>1=+i^}AY;|JIZ>k=G?8yf*%&uE&h-dpmTuzvL zT$0$*t}tkTw3eM>I}9xQwoIgwU**-z{Q8BBrGf;UbU?K_nhg8>68SO`9asLEJ#uIB zYS)S8Tch?@>6chKO2YO9wjXj`pMT5#_k^oGduWnV%(R|M>9Evn0b126fE=iTLL|Z? zj%AlpquMI{=iA(%g(DeC!*wpg#T=sMb4;8~$!$6K!S_PHBirjEWC0eLhQ$4llijo&sUQbu46&!OgvI>aRv!Hak zyP;N(EccBoyV^o!sLnL{HRT=g+)`T#kJ$jjZqN%qas+hUUT>R5L)AkTL7pP52!+ z=C>b+7ys{edpnU|D4=SsUZ@>JupKPm%F+uq`nAI{1q zYIz0YJO{`e>F4fG7ThBl^#poxTf2MoEw2irEM1I0WxSIz`BvA>>CuiUF_lf>`c%U9 z(YRglPj!Vk{z}qME%LrKI<~zo>y@G3f=>APHJ+0jCR?F8P>y!#lV`xEdM4Tpyf&Ca zsG|bH1;bva+kbpTSX;mm-)M<;I9Zz&C>diat>rX(z}MI;znlL5l>9G~0$DXE^X4et z#@WCgbAyOuc+){Z&Uvpbk2mI;uv!RY3B{>yQ@J)>)lsZpn`p^PLDEB_#{96o9=w~0 z#bE@UYG|1sN)hijVfSMEa572PTSG6d`HnMg^K7N|MH_l>J!Jzr>&@PCjTz?S(#YjK zsRIHyV*3ga#h%^TI=D;{?*bVdFTW-^T@9#sfqJq2Vd61wP+MA&UWc7Dm1K9(y5}L= zLC5b#f@|};Tde->dMoGqU)5%8KirJ?F3X?f@o!Si%^X!}wU3X!u&Fk@_z53enD%+% z7MnfiZ~vUP(>iOKm<_R@%_OeyB_to@G2v8&VG>NFsTwohyeDcM4Bi}! zsTPpSmttbHn|9*@B}DGYG5oTSK>?{eGNo~~L2o|)5QCWmCQ0bgi`?ryN`7h95lFzs zM*|Ltjat43FI{6ZzVmytgx7!EY714@?Ds-cXr?^X~h6(E`@=hjLpsuI&Va;8p*TP?vwD+Ch(|atC_}Cqz&PIiZSc8|5 zy;8}e^OLV^lopi&j(?-Ll?An~8eJXV2d;IT%kN&x+hlhUg!qr8Tq=T}#=qodx5QV= zxH(WIsWhabCY?)ekcCa}$htPte|<`pZ!?RQz|Sqq+dHCV9q1f|{lFuyI>2LKv4_*(X7X zL&;7hxUr5>PwS>fPDhKq^iwQYu3i{ft0{a-ta@?M3A?J4fNGniYU>N9(hrp&B}>#x z)Dhxu-ehe-LV%6Gq7JDI(|WgvBY0>4jK zB{_BN-fWd6Bm<&RQgB#XKf}+WVCzbkJS$X-@0{=#wp9aNjoL44Po7?*dW1;47!RxR z3=eJ}1do09Usww3HeQ)ucUSkLUa-~vhixzzPdxY{A%S=W&O~a4yPhKsRg^cL z%e7^rhX$sphIj{^KflA%2RiDi1G1jGq)$rrcP-0QW`%k!3jC4DdF||UO&f)EWunK- z=n$>d%`{`XM)dsX~xk_9bPxt zvFi@$gS)a_V7q~vS_g*m!w^$XcAH=%knc*S3mUg1T3SUdEIWI@D)hl>d7qeAlQ$Ia zNF+6q`)KG;Pw8d-@X_Rvw3M8+Uekh+GA8-eo)b%VJz}vZIKF$7DO0CjX(6lNf1KI_ zf@vJ`Fz+bQ2GB}r#H9GLWUw>tncmZxp(nL6fRtl{%a)C&A&cH2=2}j*%|H`TPk{7* zFc#z#y>7sMxxD<8?(h;O3Dh>TcsATq5g9(Vygd76>Z}N{ufD!9y>ak%cZ3qD@v(N- z?I(?ayMt2XwrTaHiS3wnuB! zdvF~5Z#gp`k%K#dLd3Cq`J zxyL$PcT5K*?Z$tqxRTpEwVZr*zXxOnBSVpe>YX=wOcF2lcb;&A@ZP~CYVQWS&l7S8 z;c9uujzuTK%)8n)R?e_)C~?UYbXEMMV7CT!6RC^nN$KX!Emu6z94PW6m+oC@MdDtB z8ga;^PlGiGH{UrXqbGSqMMcW281$oE3sdfXexSMdlfFH&sov1ea~(dc7k$gmkp|Ho z3SE{`k^xLG}bETt(!bzT^ zn5bM_&clkSkq%QrgC!WhIpMISM18whOqtEK^0M*&Io2y6kT$TcA?#RPn5dpLVy4QBGMctTmYX8W7LTNB`D*WV`sq;`XnmUf@O9Ui5n&mm?Rt!O9TReK-Cm3d zW@wU-#jIEz%koyvJIOLSB%jkhGb{68R^(`C%l)SvgOvH8n9Z^B? zmkHF-%I00CqLZTl?wP`lI$kNA&bn!x`kxzicE$O?%_;Bo#1c^^WebxOCU|O-c(oL~k_IC9vx_SH z8F}%(roE+Q6_dkUelJydpOd+{&6(q>OV@pCZ6(BT$k6+e5=bkX{+R@*b2@ef-b)87 z*kqN?EnP10lz>Vrn{b-MNC$GYDr--NldW9ODg>Og=(&En{M!7OaF&$w2$!OEv;9-*?16HoI0a*_wlE*;LVzy3+6=9O4 zSP5+FQ!*18>7qzH;&)v<>R^TOFtuB%QqpM|6;lm5D};&A@@ce};j-sb&;1nG_&DYh zVm)c5_pujZJOqtY9CCAt3=k_jd96~^Knys~ZcUSGIt}azohuynRzno40ZmKG& zC;`r$0|3sQ{Q#%K0C~WL^S@rdrWekxi}LE0mO1uUx%KNqvKwlIl9u z)vMQNu3f)z^CrzrN@`j<+M9G|`J2BoIrl5&g^O3t2yaqdr8k`O3NTSI^MTZvxJp$$jzCHHvE#6cm>! z&YnL1>jN~GY3c9LJ%h%^^;`imunRwiXj{afvL=~yOyheQIUb6N!*pM`dK8uPJ#l-P zQ!FBukeLlOb9qzB$^A(F)$0v&OZT#mjb}Bmo>hJR!aoRbR^xXO&afIZzgBqh!UYOS zs!J3XFPzDE?mW$fi?sJBgdvyc9s?O3n)Y0#XMd&*jfsuh5P9;#^<`#J@yFgt4wwrn ztAvr$Y;B)MfphtIwsi&r z%B2*Gx9OLkFL#x$M*_Q_Zsi=#X(+lZ%@ag6oJ6F#vhDdI$G&VFRfx;YE5$1omRj8otZ z%h4y-Ulca%VPr%mq}i*Uw!|6=d$14&#wQJTR2^XSovOH4Mvk6-??G&)d`nQr*safHM7wMyYg~b{Odh_I zQj(LH{8K&uoPj-uJo#^^n4wq{wJ}m5b5Yp2)nPBG6uDb zauhx?w0#>_+>lG#2%bY=9V0P<8_yIyu@cktFA2sY$cHc5edu(07X{Y@o+1pyjuIuU7^cI)~+hPrYk*;NsuGL7fr`XupZ(G{;KSa9qiIfs!#{6z2FM4PNZy#j& zX?bVAH{fc*>KpQ0ri4zi)bbk)S&yrMMa<0{HwbNSt%Ely3h50@YlpJaw&SXlg!_6s zHIIfkl2@h^3Vb|?9;*4Z51~XL(D_Irq;yu_HwWD;DbO%b4UK~4W3!u!Nklq#T`qe* zMP|7GmBeaKP+>UN{0g0Gi`QD}<8rFR7-1M#_Fl%V@=Y+;m>0&59gT5mXjP3GP;|r^ z3|iv`>^NYAxww&Lqw-^6SvuJ}F>Eufa@JaQ@55}?Jcb-ahNddDW$O}|Fg!W-1^TEk zMPKunPu=9Q5{qL)CeTZk@*R(A>JU?VBJfOc6Q`gRCGm! z7Bh0doN(Q4m^0cpRg!=o>+fXWLv?~tvqLi3>JVk$ngQ^G6BcZaFP>y5P0WufgTc?& zzjk#|aAhhWMHYcIYi@E?nQl|7t;%(!7> z$2;{}+-i@gbz$1kAPT1F0%Ety3(3R6ekvur+)m=}F{KQz8M!Es=eo=f-lnM%51=gC zdC)RAKZC%`)_*Xl=B(fM?KN3T8$1Q5-FpdN7GgY7&P)UDDSH&_73x{x7mW|$_VMoE z-e-M%oNlXmks^3#vK zLj&k6L}5%x9Wryk0Y?|z}g?!bU47|Hn%j)fJe-jdS&yy&;?%(Hp zBBV3)elD)yiw^esNbJgrrGHML8@Q-tP#fAOlMwf*6|@oIYlvwYFw25yyT7J_nqjYp zZPR48i@pw96CY9YY}`F8!8(EIv|+tEN;p3!v}+=pg(703>K!|ws>=t1!OQL~o&}JI z>Tzw{hf7tT{~JR2zg-1I;`(dn(Y>2`FEj>58%G~}Dx)*Voy>OZI+%2Oh{qa529!Fw z`py`Y5Ff&43uzZJl=th<2l^p67Nak*-E42?Rxw_?VV7i;%lVVHl`Ix5;8rthhmC}X zoiF2cp)Tlw{K{}dF@JP^7&P!IHL?Q}&XS5nX1OS~5dmRK zQO|5cH_RUiDeX%sAG#$Q5Awqp#f5ju5@00*Bq){!63V40KhD>ahsA`=DiB>1h+0m} zk`0U_a71eoi$!f+iNcbPY8#*R%XX@F(R$hTPIgw&`gyA=G(@6ETl)`#uyuc;u$_LP zgTc8TuGl@bfbvN}N(5zvZjZKPUCy>>f*=frq!fQzx=}AK0r?b(2_vADAQA5!S$Z$A zVjA?ShAYTeI9=|UTEpSyF~ucykwh1ZU9hL%Q89cd)91V$#_FpSiZ>)&;S!rg&-JK{ znPd+`vD-uAK@7D6LFD;>Pi1&r=n37R3A_V4zKl2pSaS$pRMck85uuSLe>(;Iq5(9s zD`;5vY;>(MtnO8dr{7cUK&b9wD;{melX@lRGd?2k{q`e+RBCYfzT<{T0#;Wl zCvlR#AW<^ZN*7lmbGrqEi}JAPS0su9J=uUZ-`3shjRn2T($QJ;KPo%alq=Vo7QT|+ zmt|mcyw;5Xp`I2DDzwuKvENHP?@l5y+b(?E7z=W93PLOiGGMu&3GVb-6#^Rm1I2YV zizbPs!m=pcs{4bS#3HP=i}y{OW7r0Z^8jY`1niXrL1%@rXw? zK^;2SDFg3B;tjVpW`1?`qoTMF(s+-ZNpEhvYr0<+R(dV&bM5>G&+qD&hM2&PiukwJ!`VHLiFh_s!avM3iX2vC>* zJ}_k&y#w*zbDktBEb23IY2gl>sv=#o$2Nz*XJ- zd&b;sqJ3VLirON!aKNRii3`xh3N^22r3z^g+eQycmNcci!>+3NY0M%&**lKp&nj2< zznzokh-@mMN$hN1O8uG~Lgu#i9*$R|R-L|{26J(E?i$#^qIL?%T9G*g+zfu(2>oD*lD=xA!v390p@F+K{EFw>ku7EM`su->!YnHg=;8aFO%3ZWlb4 zj1{Ipe@?0rxJL|f=y%q@#p~>Yuy)JuaHnGSWi$F_+>8qan?167_RMJ}~z`>=t+a|L+=*d(C_ zK@WcSD0HRi(F^@B6PXNJbCxJ(!}C3T<&*|`Ik>l1%@b}l;>i6GzB^-cpr_kP6`a}+ z>J}X1Xfchxt;xmcuwo<k&?ET4aRq&2 zNLH`3u+^MOz>`tx^{0zaU@*ReaML-y&YgA(qbM^bKk{{^2{p$Q%ePtf` zfX&A7dG%|67#ux+BvG zn0V1Da$&f@QnZh_II3`BCN;(d22+r%C<({ALr$JUhoUDQ+=PjKdd)vXO)82fV#4z#&Hvlw<lJsxbN}$AX_s!O+>}eo7amU@qVd(Q2C^Z~Go1M) zIyQNZG$DKE8{ZYu1~$!#ycd@yO=N(x7TzY{dSK<(c!%G==38G>gxMg=RtzJM%N!OY zMa3=4sfVY4zp>}N*1EN5eLR+T;=*!KV?^`DP!~;PC^AC5RwS3fOl+3fCRG{h{r^#& z$I}&oljgZ+0aDX9jR%J}K$ zvx{hqXK2V6-9JW9kMjSP5NcvCtO8^i<^y7iK{NT=_aajm*p5_A0c9o=L3)F|-8Ya6 z0I+Vb)b5W&dZuZm;%+_;b6?lgv!Sq7P2;pS>Y9JfTN}$`%ppD}-(~X9q+&Vno1Y-w znCE4KNa@^H**A(1v5IMJn5+m*mk-J%d1!p);WiPbbw4B}rTi!>;K$GK ziqaF;?8VdpN5^KTHbKTyK-Vc?*;?56^84=aAeqTZXUgX-lc0+|OV7CP9Imw%&wmUo z^Qgo7o&v~r^V@Uo`}MY!PQZ5GZY~ip0-ax%4sC`^LXT`LbuV?#@YE4aVT#g)q9m5C z6Ja(+wZY?PlVh^UHgC&-ItO@9!Grrb>s5c0zUcD~b$Nb|{0aB2Iu(Jg99K?~241OL zaxl9HyTih*k1l5Sc=x##LLL8A+(5zr=VP5{lOLO@VpeSUfD}6(%pX4tt|F;i19D2P z1zx6BR}!x3g1O8LpsX+$f)h-7fI-Ne1yp#$h%oq+ENe+I%s}QR%VOOJ5KOL)s7&yV!pK7)vT%Gi7GZt%)*tQM}%cyua(P0w~PcU32U)Yy>pT% z2?0v7JIq{aw@SBdzzbqD&*2Kw#(s>q0=?0sR!gw=qPvCv`EL3S^|r1;9hpynwDPgc z9j0EcsHIk@IgErpOi!dxf-3S0S(Q5`w6zjeVMAU#w2*$=ai(&XOZU;agkcA$TLSEn z@wA2tXMf>Y5G_@?V5Xo6Op!2(^r-~{I~+C?^~h6ww;9w;7iR#J41Ztyc<}SN&xpSl z|0{Fgoj>{U;NYFkoQyr6tb;atTFVa;CU3d|d7RtS^v7sB>d*E+7WPR_ z#SOq5-k9ObJsIZbl;xEh8Y?_nNi0NwgvLxac|R-R`jowAh8mN+phTDQoNtpr*}p+p7nl(2V|VnFQkVnaTpDp|EZ3Tv{P`9_F20a6I&XIo(VmoEh&+!T~_IFOG)TeWxDn`FOwIq zqnQ#kx#(<29lTAUX;adOw-Dz%e=Z6Qs13>QL7h@o&8;k$wn(~F-&#EXK5b{1WB((A z@)G*z)>Ff7^ra*kWi?T!Oy!S!X(9tk5`5&m-6aGT2xLR@ejcRsBz=bVwiGB6siqOt zAHoWt>O}LOd~a{)uzk?^Bpy;A<^HkKC$`% zlTUcK^9Tk}_j^=5m(Z*&M`6=MhW2VQM>7mcs)Jz~Syif%;hnUj5Y{AR)h=cz8lL$q zjfJv|xlpBS94Uxl{vxr1JfLaZ?rqw;r>s5MlS-a`kqb86#tf?@?JW%56x_rR`C zd)#A8kOTwVg49X4vlve;*U6rBETTf}QddMlUKn$E!;T*>gxbj-cBK`BeWp{*zjp^n z=IJ?Z-+8F><&ml@>{Bkqeljrg^>|7HKs6SN_N{mJU$K$18rLUash7h zhxc5#3ShhR%OA!6#LPMKmVHdd#Yw~w1g82e@y8nEINomk<<6P%Nx)t&4rz%|9m9N&Aw*Fk0D z?;WzPO&N4b38od1?`R}CO$^bcq}xsr$I8SZnblm0s8reswJd?1ga^+&`raDYuz@NJ z9vO`Ej)D0F3a5Yzk3Hd92T_NofNM#4WByU1Wl)1# zjs2wf-j5<;wKlV6WyCJLrFzfV8j~V_bM&iCwl|-;gjjom!(q@p*r%pJ7{uR8XB%y{ zn^Li8?>!flehb9yfqMZq-szN%p##HP&MD`v(&_5YHPM)bvRBC4J1pDgmynJtSw=?2l3bPYwe@26 zByOjuJSOgGQNvQMB&Vu^FEVe8O+Q^9y*kIb)_x^%Hf%8^$P45r1#U@CLCoNC(VT9L0H>2(rwv-REDW;ZIJ;}|=B|u62svTHQiaQrT zjV$J_Rs*3dm?%G=XyVyh$8blE&sGA}-JXfaX&Z!*AX6A#z&l4h#l%6?1~Q~n-Lsfl z+|9kNVL4*0@kmxKu$~*N$@5yKVlnW-fYE*XxRIGxv$`b;yIlP1=vQd3RDQQ^%+q~_ zPjj`HFC*K&F~s)TE$hd&y&jxNtcK2Lt`80+eI}{KhtPaCl_gR2_XZagJ8PR?GrV5h&dpQxA-=D#IL+PADfAiAT_qp6tGXx7 zKa6WdaF;(e>#37G%=>XVbw`j6l^6s<6;=<8^6?c>UpH&j0vhqr8MHJZqsRG2w;f3R zicogjhH$bU)u8mSJ-`nCo2yW3v)Guby&~$QCER_uj>g(g&I=Q7PRqPvfsc(fiesP&(?F`5-B7K(EDMDi&#)QMP&$kRg zx(5@&@OFlnQC&HB%48D^mJ_Kh%p!{wJq5h}Wx~!zz_U6h?Y@m5Y%fgJYgf$yY{rp4 zE_ao~Vys!2z+p~`fV%eN<>k0nt0z(=-VeUfYF^u{tm}#??lGQy%UMAh;f>W~P6w+n zKwWjN4&xTct&%)oJglA3c8(Xf{7N>fEm5=@V7=)N_fNWC^q6~$%vZQzFm#G++lvCX|GPM=S4 zcBG=hL390hC#Y*#4AoqOl*#_L+G-F9KJ|z@cMb1e~HDY=BW>Z z`!(WUjQ&x#|1ar?`}d~p{cae9Vu04~#_>DG(f%j8)!&Rt?03URy?gQScjNfo!sgn} z$^LF-|6>?Y8s^}~eJZ*URXwdRm1jg1vltP(xKjY}U_arSNiJMy`r4fl0jQo&?8_Kf z(?NE+k5WCnZKaIvLujl8c6vST<21<~9%rDP2i3zAK!*gq6&*rdze#Ps5KHQ++^f<$ zH!*}WW_gFM!sH@QyrH2r3d26AP22JPiUu2P#r*tJ0HHs_m#DoS>(t#j?sN*EExEo> z0+>|GaI|M~VK!(&lojKOpW+nNlVG=JmFB|os|7k-N0lwc;^*Ez%E~Y8BZOfVIm(;x zaXZWGx%oF^HQSHx%P>W{BV~+AbcR#L18TmW0utpXrRx~07){>`b1+izBJxX{TCTII zYhj$cl8AhN>4gepVX-=SVQ;bzq^00tH2f za{eB_>K@N3FAB%?{J>nJR-+3D5R$0y<>i@j{~L3dxv94Sdidwcy{Up;i@E6JY)9g8M_6+hons@=k0X6a6f(3S#o<}GnQyJY3^K2&O@i5 z&75wtG>gzx@TqTac);8fTj3laM;#<{aPvN`)`i=+W7V0_iBmv~eSSlgEGBYK@{@RQ zju}rNM1b|Wg2HHRN4n7EH`A$+m$)nWq+Ync_3T?#8i0*X>NY1L3P^q_6#>4*8fOD# zEeeV;#KoMjSyi$w5nsAaXlU6;v53}@Se!B{D`v2@k|8ZL#lM{m#k) zv0;Jo81|wcA29x^JZK-Eg;AzYty zm>yx51be9L)J`*8+$}JAUVa%KKTXqGsl(v&z&8)GnVW+ic3^QM0{}jEf48(tb(JS@ z{!>6T=}wpYbL5|=SL&6lR2wPX-LS)WzTm(yMl1vAG0egOOJl=qTo*#AYT9c z{P*&I)eQC-e09;Ibby>Df>Q19<{eN%@7k3aA_H(8#j%@UcQC37HFUuW7lzdBYgxZA ztAr`TejDVZEMb%5nJ`w_vOq#`E1@(aEXl=$E)xa^9d_;jndPKj zUS9^`@UfYKCNZ0o*d?4Ae;9U3QPJE*D_2hct(=CW_9W+E^G>Ae2xewtT0N;P6gzg3 z-FA?w{_V%K&l3ABWejJoqF_$rI?Z8#JOv!d` zG~tsDvWt{MZdq#OcMXO;J~!uEDp0%N5Ho}AZKHl$-TX;MSA{*&i*_te=7ulv=m|)v zM`k^>&2n_kN?=+UA^MRBpt(BW2G2{D3g;5z`D8tscM;UK7*I$LMyO+NdwODDeQa+7 zZn>i0dR1zeVz+4@(h=c1n}T;eYjS@igmHlwsOMzX^+>1I#J`9mZi9w11rlhm;afjv z+KG<5kC9+x8t`0K#%n~1=f>5Jq#j)*XbJjQ^jh|2meEapTQC~Pf|TN)-kR9J>c@f# zgS|#GbpqduA@e)T$I*EeeHd?XbWo{?Zx3m#YR9h%qQRvo)=3YI@=e9y?Gn0l!PSTT z3JO}xsc!vjQOSKkvt`T4C0-HTE)?$YTtEbIU*cZp8mp%MKI2^Z)zpoVRZqceiILgdJ!)7(C2^}E==pA@l(2JaEo^>DTqRDCsW5HU}L;nG371KoW zVI=fH*Ut}kocs+ZCxMKxq9QhqR6>FJXzZ=YK73wIq`NKJ^8Gqevj=~xw()Hd_s{Q% z==C2X^c;2u5ndn*Wjq#GB(pF(0EMc@`Jkt&GH#ht)+ox}$Z6K`F6=Mb=p#?#LrE`` z3GQi>QtM&9?FsEh{IQ6zq+orLoS6f3PBpKOG^*rX2TLN{= z?U_NeuNT;(B;m)LMsQ-g;J(_$K3QO^SOLelUt>{XY$Jl2>Rx`bjQ!#2*I#pw(-;{74X8rS{R#W{ z1%Rh3JY25e(sv5MyB807URdw^@*PIe1jvvS2y?=Z%j=lvEc zOTK-!{uTCW3ZV6!J@t2((O*^mKdV?uDH6eP7jdYnxxi3iXFSXg1!QsPdbg=eu4h+}2ZhIB@Q-2q}!1WdjmPbd4P=sZl zA#I_&LhYQyeIuXg>W(a7KrAd~Di#S{KEeegrTNKDfsgMtH9J0;wC^VkJDviZj81Nx z0%Xwx2LjWl0O0hEW3#};+nrx3qV;6iZ3KPNIWYA(k1#_6q za@Nya`)3k)yIC>ATP7f%1nm}WqM_ll0)sF@j*d8_YKyPTu-?UbLSwml@oGBn5S+&G zPAInZFu-tMGjeywr`9p=Gde`b=Mj!zqj0v?!k=ZUZ0`1{l13*M6-0^F5*zf26? zxi5YB;5CE)<9eGvRa!6Kef`2$jBH}-rOJcH6LO!JN|vr2%KwSowQsk*obRMTw$cAE z)DtXGOE&Z88QygBkX9jovr{j9#@@q2)lj8j(Js=-Ul%9vcXm&VjBk+Bue}aMzEdX( zV;QW(=b3H7PxAkbrj^YHpF1GkQ8RyS^e1VzUVjKo7PjtZ*?oKbpWFDlipBYJBZ6*X zXz~kA+7S>cJE*UxEMk(=1x&zKBxuBZ58pJN+QBdEqZ!bcV!a|G?b6|o|JXk){POSU zAa4Er-F@8mK0~Nnkjgype01jZ5?hj%zo9cNPtRu)zMw(e!c`GP=6**wexOJ5N-h(4H{W$opdk0hn z9d?D&&wgMmr3lD0awNeXP=Ec9qL5`*v~8#$_W5AXN0I8KFwAY5z;DKbbxOHQZST$f z6f+Ar;D{ncSR<_g_7=5+pA^3q+1&+!V3&cgh9-Iw=s1t9ZUF^D_w_67#`yF`ND~l& z_%@sgL-EEkF5?M%K~HB(K3D zbh|P=RL=}KM~*sp#^P8LGgD7?BV$tgK7>zv15!OS6KwvL#2GtD6#>F)bi}dq^u<|r za+nB+h3H&?RQMSUA+e31;l+g8=>pKn>X906%=|VTQ8DvPc9e}%Wkry_i<_GKdBUxv z;A52dk$ck4WjV%D!bsB?jx{Pdq)bGUwaL9~?&wJ9K+OKCaFV%q z_ef6YXsok~b^T(ybZ%rzZ6tpz@9;|mi3lee*!RdLv3JJi;u`aDRn+**{Cr2Dro@Bi z>O2&*%3wughMKoB}J|$pi+7D$k^z0*oC!s!kxA(R*!y@o(OEnsmqtpB&uS`uXRM zJ8aPn(*drG#~nYN%+!3f!;Q91G%b9ox{5q z`nm$cqDKK2Wg~tg{JW#Qm9)_F+~heS1-=5B`|v$D=K2gk2>lcL-;4i6%PdsaWlQQ# z0k53OL-IXRFg85)G}eiwZOwY8fL1uONw}D36>R81(9-4)Bm<=6)wy5~Zy6j+v9mRH z{S*M`oq7?ZqsAoWeH1`%=)_caLUmtA^h-Ar1gjGV9DCl6D63&}D`*G0 zLe{^(ePm?mc5yskVn*&svAQd*4*M*lB91jDexz57JJpc`>>Zlry=Z> z+Kk4a0)U3a&fV_OnxCG(OXfW~@|)pGtn$ge=&LA~5&iD9P43stuRk*{ zF8xfiN`Dd{>v9d1(cRkhgIzA>2j$Bq-H|wlPtX5HK9~N}*yQucjj?hZc0N8S zll<{){n{*!#+2S7!evk82Le0l6kxyTddoN3^&qrR!YNXD?Wgm1lz(#%>OWqC%d0f#kbElR@jkf7B+ge}fT6t|Sz3G1TtG59ZWe#~Y zkRNtG&1iUvypGku(zFdahvTB!A9jSf!{|}@eT?>>UFYH*2Ul}dy&5v;jTh`hT~!pp zQDL!@{Pe{#ys6MxiOML6&dB^CjmETBTDbK-9`R)@5!yeQoKP9-RHyOE)}->5IE5sl&|N$?&ID4}OC+!fC4EG_++Cw#lMRN@??H(~d7}lkR@*X*YRB)=O`ZAiBoD)8WgoF;3W~ z_Z4%qb`Enz1znIhc!S$&YsI+%2K<7bHwbw&qx$&RQF-h{Goi~#@GEv@jXBo zR2>FXSPTuRaj5ya$Xc=3^nBHOn=Oku+2msKvoDdc3Nbno?^@Y&V~8#w?o+^dL6gU? z7gOcG7!SU*CYbWy(_9X^L&2hxuqCOaSU4`D7qvbpMD;ETstDUW+eppjNxOG%npK=P z{2i9@dR`+MQEFE+>>Uy*z1lh0mwIyrR)(vL-SbiDp-l{m$KJ-wtuZKglm}ewdvHU6-FI5Ak z9#qVQ9IP)3dD_21B~FPIF!D){Z~ zB%NIKTA9E8YeJQ-*6tU1M@J_7^53vgf5>x0yE)Xw`fI`{ofLb(EQ|AHxc)aRTa&31 zk>>08fXP1BbjiC%zb1m-LR?#-{(8Ioe_*9T8pe?v^Yys;4YgM5N6x<{Mq9>eGdACf zR`mYBdeQ{diz;^q>IY6FMRhrUPY5$LNo_ql=3n0b4Xd}tK^0&Voqke$3iv3*_HOcf z;@a*l6qB{oTJ-#H=%T-=#`0{0WGp4o_-oQIAG>A3p-hT4``^@Di#P*YF}qbJSM!dge#&w-EgoY#9f$JR#fQr=rMSxo_4rzqkcWVPwIC&xbcf`+JxzZreg%i zESaR#!w{|9{}bH`{_E%M5HVfv1s?_B8rdY#m^;zREAm-~fml}E zYs2|CEn;d{bC9AjpJMNiH3uMC#l>a_DNnh%ZBrY#+0A+zmbay{hI8C0I_7O~*!+q{ zHO*+`vfgoi{Mz7aJH;eWvpLC#l&1n@#SrE&If@M754frs$-W~s>l1qu4I`R^Cq9amw~=mqIsh5+TVXmqP3jzr~Te8C-(Axth zg6p?`{NlVuSLaM0Yi>NCqat@1Yxrb4)BqDgvYXbmsAJtJ(_VZy$}{Bb?{9JK{oQ)bunwU9KR?Rn8Ln30Oymm)@kE#~isA7oWt9RzBzFc+wXjQ=o`3WhJ! zf;$G;d2W3&I(mZc4&v~DerdK+P;kKvXFJZGbT}PxZstxLux4s_CA(YZUP21ciGDf- ze1gbEiqP0p;QhFV6$~WSSI3E=IJRtY-pxsvLd%X1oNxdfNJ%wUvS0tFeBaZ_fI^}&+z*%4p9 z?(gt09TW=d(kb1e`@vOYMC&r25|^-%5|`*$-?zeVR=49`?Yc@d6*&tdKH_nd%Y7mm zg<@k^6T2pbbt!w|2L9c)Yj}qCwYpUS>C*UKSSK6wdgL>vKJjXqRin6or-YgC>KEqu zE15kOH~EXS-12;6SUxH$$0a-(-9lNONUU$L9tRg`Mmh(P@#U`?18P3PRAeZNi_stl zJ+xWTavbx~C5=YHohSRuhif1d>zA)d z!;FWA8D$Yg+DF&JKM<^pA6TWl7nH8;e%`o4x{eKsiU}2^Z28oJVJC#u-Jx_eN)lq2 zeKh{|!#ZwK%Fyp^(x)8zhbrGHHLvE^cVep-B{_cZGEv$dAQM6QcRYe@4Ix^h;F5<* zy&MLE^<3r7kVqs|Ry7eO=y7!!2fOCbqsbAHD9}C=}LsM4l7S zm`jOt?cna2-u79yTmQ+{PT7*1u4c-I4=D3$mJRG6&zqvlmmD@&+ctn{39BwK5%l(| zQCgHS)|N@?Rv;x6LX~lK(p3ViCVdv9yoAK{;yD(y7qRc~!lRP+)5cV74YLrkW6YPn2lzU{k+`OOC##g&!G zB?vPl+G?;=L1TAw_+zu)v!)}p(1K+DM>?xJkUqj@iGEL6mxAu>v=Z9UDqw^L@g>8(as`GZnr+D;5`Co!u7#~ zcIdWlwe(k`q=4hOr$jDW5#VQ$^PTSg15~dbMoHQ}5AF6Xi_=~Y8+EXNDh}xSgDj3h zhmQ8FiwS=AtZN21dmof9rl1rij4-neo7(i%ir821>S7UjPBxA=;(w3Al)OQ3B;=*c z<%JOfU&V>Nt&L|R_*0E59l+J&2-y6FB^1WGr5&PH)ifaSy043_mWd+z)=3?t?839uUA$Du-+k;HTHGDiiovd}Y4-8g@I6UXVV{;=f zL_{F88-gMi3bCt^o#1>;+?!=K~|C7ok^#F`2@<;+HShhStAuM zOko-V?IVWKurSY7PcR@I(RYfj8xrXFV{W$FxQ%9~x}&AQ;9Y@q2C+vw3%~dx`!}uT z1S@*6$m6>Wjg#OxVVSQw+-wZ{VyLtKdRV<_P+dVwN}*fIq`AJDEz3^SR3Tr!1iQrJ znpVHwS(tdX(wa$Ar@1}!Hgmyl#LzBZlnb>A@%-j8`>r-|=%9_5j{_`n- z_bPSfR>+Z{_`Go7NFIRp(?6Ow;M`;Vla0wcV+J>N@_U291A#Sn8Hj~Qp@ZWIk2t@gnCMi?A?8O%w>({IQ3_wT8Ce59kzFT>e8udSzG z*|T;GWO{ueZne{{bH(V!+YO9MiXJwi>yG4H z2%fhO+KguZx?v?GEFMZ?EOp|m1y&Tvu$eTBI|-`LcnRJrK`Bmm3F!5}{7n1?HUdN0 zz$HbB9Kmmz7zZU{EoNfktl7Qc*xUL6xFqio-GvLPyKOo7_{R7RZH3zHEu+O-%-r7W zbBo%XCb{g(&MOYB!$7uHY7K`b(VZWQy4|^q942Nu>m40kz(i%MkQtAol6L9Kv^-Mc zXWJZeoxXwUWs_=7f{XC!H`JTDnVZTCjfL})Lqldo(EY0eW?wr!6ybI&8kTOb&*N)Y zIw14XBibCZ0Arv_olKy1--1{aWh;^*r?F)6ow#2`&#*q-C`3!ASRNleax^NYx?|MC z$ORvN!nz9ig^mANZA`yuNR}JEKeUkWjJ>T+3DsGH4=yTE>y_s(Y~0?I6jii$!Ur3e zWS*VOD)%kn+B>{w$m}LJjz2>2fE<>!a|*aD7rGH}uIb_S2YAZzgJY8JOR zS+0sbWh=|7Vb+yIv?VTR`D57-p+HHZ6PK65y;1_7H=e1J@AL4%A>L!xojE9l&N?db zqawMoulTXanT@YZvb)_dWSR)ZsuIsTZWEeoQ@0ltAS-lkO>O3u(9hdK=l}n_Y~VOX zF6PtYQ$YMFfTUpWYUH7CqAYy*MEeu~qbYu9axXLZ6j0c73ZS@C8?t64x2s-1|DxXY z+=NWK-)pWdmNk|SzOe_F)MhUHNAHd898r3s^f$1+-)1q_rJxK8k&XoeS5#wDyYK`o@cEI_zKKm8c{#^uK&(}Yjoosxov_T4m(=O%cS(fsspXPZSTm65 zrXi}JEfR#@Lvz5UzKWB1XKU95JabvK-FsFAs`i5T<3-CVBrGi9ANwQxBHEb?%&G;J zRz@egH|YBrS0&9>hgNFAxthu&cv2)zs>A%H{=&K>pM`{w@c=lAaWZ~n;2%IB=J&)(m& z&)z3%?e*P1nkCAlhDGjI#jdy(5AVcA1>#iC za=E!`bI$wz7C=rD{jFn5_Kzv}63-5G%3GZu$U@xWdkXT0-1jIxc65s0k5T;jAxg?G zV#=(;W|5Ph;?08b$6x$$t^a*0{E0)|i{@i>fBC2HPu?%41DyqNR;7O{R7N4CpXlZr zBUa_~!(?Ubx0UVTb}M$PBCcDd&;nT*nv%TNeKndL~n-k7s_ zuj)`la&?Q49_xX~Xy#IkSKO`7ABW39Cpn`l%aaKG{1gn&cC!E8$c(Df|Kq{CzgzzH z)e}+vgO4S=`rYzJ$`^kX^AW{=7aP*W+bEqpavwX*T|TIhu|i>n3!9pnXLKWZp4jA#2ResEvvAXc4Xd~8iPE#f8$w>8hoUHOn+WM zKy;<7O=pK4SJAh0`EK5{BM_t>IMLCYDH)0lB&w)-*s_Fqi;B{3)xul)^bt8l3ccq* zJ!naf&Ea;Ho}NX-w$jk;LQHD>is?1AaG}0lQ|{=vl`MT1IWRp?ssqxQPt0eSLsOY` zG4R%oBidY36O-b9iiUWB$z;{N+`ap^8>;j`Ln%s$^88J@5r82V`QS~Tk_$d1Pk+iP zO&?GKTd$VkWd^LN)vLk5tQf_7bC4QLC_Rap*vqY^i_3_icY8RXHV2ILnQtCAwO-$H zcJ)f_OKE-ECJXGbspTE_8lY7?cJR34((pi({yT_g?GQwNu%isrbNnEEcLD>pxW0R5 zV^@1H){z}*9Wmo4GGi1jLQm~#o;|bWGrE)=$vI%znj|eq;8_*)uPoe)h4)`YNh|Y_ z@Qzw1y8u9MfX(|Bwj?p04r1!SWy4y48XUInOp*XL#plLA2gnH%mBDGu;z#952a2=Z zy;|w*n8%>)dgz8(>LF4+!^bW>9fsuPYwnXgm!8f!9XSkho0JwnRn1?m*W$^MhZM!s zJ*_FTa)GVl)P{!6xQV?@h?1b|!M{I=2vO<-Lz7ZofvVZT=KaJ{D9lucUS|oO#C`}X zm*(`M?+j}=cuyGf=L_nwm>LKTij<9`>j|{Epy@fUXzA6s5#@JaEWT?l2D8H=$1NO6 zhT9X^+nJ3jo}9?bIbU;Vy4kQ9b1UC6kL>0W*Z`dT+=c_1aP-b`6m$%kD%??7hti!N zz^&>`2RW z!Y>`4sqWvFZ~vqbT&^TZG%{zS&kdx#L_3BL;?5$oQr)IQoN+OZM-Seu4PxEvR4$Je za>E&oyrybU=huJ|U)CICTb?-ABaLzrqg~!hOWoe@g5kbvhIbhzX2|aifWaCqky8ao zzHK?bZ^?=FPKiTTc8@|=#oc2+X@~`eS$vW3D3kyjq=_uW+SUc6cP;YzCQGY!gG5Gi!wLN}$vPb&b+;XXTOu~&ZK~$!OS(~2@XAK4f z08OQBOhvdBJI%RXbWAttRlO+bkY>Gj|AAt$etOpX3~wnRYcfE`Y~vB>k{VM^Y+dKO zG<$rV<91P=6u@jQbCt+|fMrLru~7Z}oUttFo+Iojm@t$9_;J;)j7;Gk*Bw`LK?hAwQd!KB{AtaykN3CITOcAsy^Lhy zzB6|pNbbG8{^O(lB=Y(r8k%>uwxo$(=H|H!Gv~8t2yO)>fZ{K?d6MJATo~aoHB9o} zp2R6|303N4kCiSpJWuu!Z$g7VgZV zB*=)61Vm*Ww;zT?_~w!6Sm&?z=@(_mNu`lMmMP$X;EupWp}u*xvhHo_1GvKTgec`* zn!2{|qx(A%NSo9FF)>{=hXyg!9{NT9|0W1F)j+VxjNxaMVmg)#P|3fhDGAy89~_ zA~XAV+;I_RzXnU?8p|kw$e|8$4BCPm*_t|NAUN8wxMw!LNy`bOzxO0X;R^P+hl5ED z#}PbfIiU9FFs`1=OEs@&DA|6uzTE)Y_+x=nW_g~qHD%vG3nwRbn^2ThVf456pC|fl zvbj3+C)NeNul%8U_BvT;dL|UcfB{|<)_4S5mU;Q>0L|ce+Pu`k^IvDS1(y3{$qCL= zLuoMqbr$<_i|dMAxfxcZw3D7RDqgt6e3aP~W>@~)M9)0~z`N;ch_qbE`qxAdg z$u0MC-RjV91v<~h!yQ&LcQ(x3r`+)4rk#r`w-&X(hzxN)W}kjdC%>CL_v#S1j?KV0 z#;N~q_6yS(hsOWvq0c*L5h^!q=zqI}(kx6*k?P_WEB~Bw$jr%a8>)c$@r3O^wxZ!GeaAp`mBai}tR0YjlPp%m(o)L)R?Olpa7%lcePT=rH`3 z!c{r7qV$z7o)goOvd-G!9zP(sa&xA05@F>_Z%BY)QK&%=-z2c2H?z&IYZH-?dtHRF zAg>aMswsoW5k;VN@3RVu)OwqyL>5l;(sDDuvioT%ro>_FkE;n~ln#?l%!Bi)Ql#4bk)3RA)GM?X~}U{-2*C8b)4 zJ5FF1-2^2Q5&?))n+T8RWc0`)=^?;c2JV`VQJdue7kL@9 zr&XE^q$$fg#VG-k7GN8gYD7-Y+-`_vwsNsal{1t2o!sx z3|cpogGQvGHXQB2L!c?QZTtayyOL<_wtD$!I?R&Sa;TKJRrgDW>yTRXhKF79`-EnX zdz*C<#~PMzjOoo|hF!hifG>-yJm%nHNQcM> z#?iVC0d|4WBtTWY$qE|#S+EEe9 zL^@hEVoxn9db8i;f~eQYx%6VJutp8KCr%TuPIcVnyWe*3xxmZW3Dwyh8bh&`XwnAZ zY_+638_w~&JquXoZe&)-y&cV_Ksl#?YFv}S)A#Bvj<4b>i-4{%g@&>+AaNOdWY*c( zAi#X|=w0~inGxrCxBnAs1Ge{*U^U;3f)5n?y;$2X%L{GqxJ=qzO<%o!YPs;-<#=Cj z|B8QEtk1RudBHIvc`30@?0ohjGoXFy%epHoYs!dbgQkD>9OvL>N$Ev76)3F#aZ*C3 zdFsX12MYa0%S{-PMCD9MxU>-W?80U0+}Z!x2MRuCmH60Vc6@9~oy;x@*W+Il_+``n$J2rq zHu!c7_g{^8`<4gFqu(!$;o$;y$?rU+!q%WQ|a*w#4mXo30y?o65urZ^m z5o+;pJXMvt|KWs;YRD59Z`RdshorSc%r*cnbpe{;^YTzGo{XmM9+AgkNXc#oa(qUm zX~`Fimo;_`1t!0JOmK%sAomE;VaS5*GxQsZz23|}#!|{`CX4al*9`+VY7lGpw2w{N z0AxNPg+XY&_WK0!Eg*D#&@<8pT{N4V$Gx3ER z(NPAM49V#>j(Stq@Vkv3f|WW6<{{3U_kLW^^B5}sH7RnQXq>~9&@|f-ki^sMAGtjM zA~xPOe~1oxrz~${ShH^YcD;iu$x!7=bM#aMj;$k}E%@5duv0%D!QO;Z=6$Vf@M^DgVsErlnx%{v`zNk zN%3KCOIH<20u56Ggi^G?3|UB7_s79>!d=XDjlBV911)<_=K3@>fiUMqr<4KsE-IFW zM%#NQ_If#OUHaIP*xLg+vqj94f*Jw-*#<}dh~c*dFV>ZLUlyWg*;*XLa131HdgkUJ zAM$2nwpkBu?Kq|R(!={JBO$kd#hhhQMd0ubPK8rDW-ofh2voLaZT6AFSTsoD(VKu&{)#tlKYK&6~*f^04kYQ2&^FtK%umq0MtA%6Av=j8;ZW@gEzR0b`9 zA0{)uFGNU2m`)2W#H|B*g(lGP0yzTYEqpo@!>ks?&xJ>DG+8sOEHt6pSLaa64HZ*; zbTVP}t-9koKK0C<74U{JQJbf zyE-9#{8L@1b}6iBsxU1(6zU}5oenUb@`PDbvwon+)oE8{It}VQQLA?oOMgQ3dY3E6 zFY4USVDW!EjxT*cQ`AblVW1P`lne@vXp5PaS09yt?s`0_DX6$4VI5iWdN%}? z5u#YWO>g0nUbSL*+uym-Djh4{keq+LNUMF82&MxORK|CZeNXmIT6+D8$wnQfuC?e>Zu6>tvr+Mz!CL?i*6LbZKIvKMc07IzROh>nLfB>;4oL5hiHWpa{9{!dUWlxF$GyR+2jKX2 zr9}Tg@DE4{6_mZ=vW#zghg&afP%qvox>dm)`q3=VwVR{&ueshy=7rU4*RY1w_%ybT zy|}Tqg`pUQ=D*d=5eVl`ek9IAs53${q-3unn*J4hIqC{DePHoIUON;jwi6xiH}K9Z{^itAV_I z!_r6Q`W8RKFp8xM+-Kf%HAk$$;bL`VU83KmeI3=#fEEju1wIee zJG0B-3{*pAC`tJC_ETIpdh>Tr*?;uM{}*@8*P#`aJ$mbPMIOnc9Sv3PNpHH$%?*+A zcYSs27pBH*+wB}O(#Jhbp_glG5tcdsWDQ3b{$>0LyT8!*vM2q8#$RasHE#TiQM52W z?^#?Vn}QTGx~LF=`9QJF^Nk)*aUm@1(+)`gi5WsN(5mv5r9G$`Hyu$LjhU4?X@dKh zZ3CJkGVPXX2ATw(-sm3b>xqFN%Vh5RhE|H_mYR91O_zQ3&N`4ddV z{_yHaSFqbgGD%3sfJb*g9uOoBk*YbwxTg3x3X16`RSX1^`lTden%s3>bt_%wk2fBN zr6yJEXc(Qx6^%ij$4SfKE-)^h-KWE$!lBU>QXyib=53dH$mE^mee3*E5RYUjw)h!y z3P`GZ$g<3*VKoAcfRSn#y`}H;iLkboS%$;Wj)oZJ*MLwFD0N&ur5=kw>TpXsqp>cU zhhScmhRxg{^(?oxj2rKMpom>90X*Oc2<$^`i&}+f0GXH}uj&&AW~tin z$aR5%7RBP2+9b4|=7Rk)zEaHxRb@IB?F#B8vk(X1jTISf(Qm3cb@beA_+P)%QUX)0 z(mpJxzDJinbf4jo+t}LUpFmP=EMr?9hBdw#6=Xx4Yq)RU>@CQ-H}q9u=j((7-J>W1U9Y{(TeAx zW7%ztIpqQvD!9Sv`yJz)m3q~~LnT9>trlXcYima>fcBLjoP70T?4W)e!GL?+Uns1! zf$eI;YRnAxPzM`b`GdDnQuHnAve~y<0$YdBF>Ecv@?*D!4o}8ijckB5@7<#ElcH1j z#88Ok_vDl#h)U&?o6Q~kj_^ol!IHtFATgnow)hly0eWO8+HhS{_M2$ClxIXMy$(7_ zkf)z5Q`NP1^Fo5&&5Uxk5i!S31(k!!SGcV0f$GZ>MG(#$Iohsxlz{D=kw7a_0BN^% zfaE8WUm{bQHrs~a#5#)Qxnsc%`f9TTI&+LkGsy)fA;($@pr-9GB1W1M(t2Gj;<@fm zDR`Jj@mW!=?xk3_4oagm#f;^eq}RZhNud1uxCsS2(_4{3HJn@2zR;Qt*HgTd)YPC1 zk-RdcD;kVkvTgGUy*?={jqA^ACJM{z0RdAblmdFxQP7M@D_65&EO-p#2)mzBI;`G+ zL3DR&58cme0UeoFbQmH42GqS-awGeIIDwm{^k)J!5Yu80QzSg&@$7EluYDGGA0BlX zj9d`HO99WXjSQ_TU$qvZmrP8R-hHq(X}Mxq7%vO6bDw<)urGq&bfkArk+GV{z{y&Dz7~z5L}FTW_DG(@D(JNV1KjxNr1kw5~DQMWD@LJF5htF z-5?Z(0oC5a8?QL-4OScyUlu*1ub2O}pO+xf&*qh9GB#AR*Aon;rSs5Aa@bUreQ*wI zPmbt3Mc?UXEhV_Qx!_}iyK|o<$x<4NpkTmK--SRwjK$YE+yE~_2i&Q}QNBL!>G|PB zgoCm<2E?qiu9sD5CeW?tft{@c|FmvO=EO3kS1c3jXehBa(kuo|#i#+lY8(5xy;eDr z-RNN?@2%(8mg}&$_9|EGY_;J0$o!z5pfYKYwR|aiEqNC#001UP2ZpZa<#n??ZD(RA z8?Zt!IY}nns)Fk#2;@4HU8rR>~FK4If2tKZ$`(Mbw}~v_oXw$qVjF8bxP4z->SU|Kn14Pm&ut)Cxc1 z0|(?BdbK#4ti3G2j;&_0x_N%L7-7nxIckM)&v#Jl7qoOWR$mBM20Q9Ytf!v-tPHO{FNqXrZ7lbe&xR+TCnTWGFa9nV`I z={W_MM>-v96{btKYcPy|!x0SN-q0t5(B0)#5k z30Ne(U9sRTY`IC&)j-5F6XFGY~#PJiSPM$t}>g1_Yrzy@+oIZ2*%&Ak9 zRFr4Woj*@?{xrn}>I>(oN&WMGB02Ksn`FmMl3qN2=F}O|=)c(Z-vX#k9(i}{4cQSk zz)`9rWK>7?>j2DuvPyR3&jvVbWXDgCA3J&U$SKle*m=MaG7{G(PMtn>ob1SnC&iBmxh`>jWh-_dhunO@|) zo?L|E0!rL=b#wRlBaMWX@lSGpQvE@bq^<90$w?0-r~pTglEgg*IDYiRQ8ED8pAV_X zj$OENTv+o2^-U8t5vRz{kCJI<^K05i*>90+ncjZ9vfmG&AUi@5jEo8(57-Al-~J8v z|Ly(1tN|~q2ZMoPF?JDNkPf7|00osj{UU#x0qg#jeT|DEQ}ySQR!GBRkSST4AOVIPmIy~EH&G3X&=x49oYwjrtSll^2EnNIen_+ z6DhbRFyRRSPHqwOHzAs4$oz%0=;tj>*1SCM2v?10Dn`+Z*cA>-L||q}ybd?;eO%N; zg4Pox2uCQ85tvpNy<&3ag?NA2+?JItGyj*el!Sp| z4zHQ8Z^j~gQT*Y%tN!uU;Wg1~21?20oozS0a?m(c&%{Lit;N7xH)#wrn7y|E$22oo zR%keETkisQzUN6S*0q56vL|=K7je-rS22&%HgopNc5e3@`QY0g_`r=~rIe|9ZAq*G zyil2Kjb~O)vI+(-wgIn(H@;Nc@j_R0no8SaPov_~K~S+sV!eZ1G-I5(D`FF=!^bj( z$q!XxOQlxroG5{BfQxtIN8`@|>*;MRMISGN+*7NJxbc>A`sRsJncY^BChkQaCoE$G z*JYrDGyJrD`+y6eeLxnSG2fP}>(J{(`QvuQ0guPS9Nfer4fzZlQtg3d=wiJ&rp8Kk zJJ*%UxiT~FHH+GlVo(k?FT%U@$o-$6qy4RC*WWokF zKC`$+q=I!S>Kw(J#f3!%Pi5<

0sSY$+=<&<|N(s>lO&LpE0KqszyARR%yIr$$36 z+-C#}6_4Q}^rQ7Sl<+{HW=L+x7-4K)*~URvMp#_9@5F|jP|}tV{)=*_tB$mNn}18mX+)K}^lZkU zTAu_&Vp?e-w%*w?4(Exsl4+2?Iuvz}F*7P(t& zm8}Gd*rt+wK<5tmK47@laUX!cMR%RQYM{8@GBZ$g2?^R;>naa33l#xY$Bt+r{2w9r|rW+Tie}K#=r;c_pzOu&PJwsX>Hwe zG4~dAZeWM?Q^8ymVb>EJI>n!XS(=3@ITJ@C^3O8XdG@#1DM^N*mYNXWftTZ!K7~Xv z1b^kGw{}k9i{)&R@>`CWro{Sp)lc^0_gpk~Tv@rsm@EQK`Zk*T0-TD4>cjf&_tZir zo7%OTyk|9wIzDVOh_~^vCp(mu_!L)$+nRb?7Vu_QTpasS%+Fouhvq!yxkIU&8s4b+ zEL)g%j3GOL(qMi(7|qp2tFc-|FX29+%2FvJk?hRYDWSFxh*k8;9^caTdwiBtUEPTk zI8F4YM{OPW>O2@NxobTTAx~xd7iDdkVd?^cOP&LK(N~<)I@pRi%)NNcy9gA;#60>? zs-m!jam!&o_F5dVW7Kwt^{NM#Vh%!pSv?i@G2C83Bg9B=@@`flobmy&C~7IE&7ZRR zN%oV;GI4uH>t0^jRFi?-v4RLX)4BLUewxik8)cYKX&P`w`r~jns0!^s>0Dx{wrw}Q zQfBw|y^^NWa~jI`g3B31s+yh6-C}$Vq!G5c>Kyz&Z1%S#=Vk6_Oqg}+(KkTbI26=n zq^Ui6BMJBo2@I1(z>+R@{_$0;K{FAFLSf5dUDzB@ufj23 zt=FlT6AucLgmGOCA9YJiLFId4)neZFd*DqUwZw2hhk&nWHC;XsKEZ6h`QswZr?R(P z{m!2Y73k5>ySWO@9Qfb^s>i)7Ah>47#Bo+Z<)S|Xf8JzN77(d+6Oj$dkYt!f7b)SX z7=n8EJyDynNiW#6$4)P{1Y>}&RS`+($8r{rVMCVHqg*(bZ}18W*z6c*hEtV~(~em6 zpt3*@YulJK1nb^C7-#ABE>7^mjky@DoZJy9I()8c;D>cS%{ByI0fF{?qN{LeEyF0B z%+g@*TIEfeo>t420l+sa_@^2LQ6`*-3WZ0TrmjS*S*u{_CDBJ%;f zT;?vz*(L!{SKovshu!iFLRKymsu4{q!jZ2;t!c-DS1byxIzPbGy3#TSmw2}9l0JM< zQfpv|u0PvP{dF5>bRYBoI2?r-Ec#VeZ&gA+CO0ktDM9lQRs^V4vRz^QZO@p*@ZiM9Ky%?$ON`PiXS3G(shq$9obD6` z2>N3FGUEK8dv%Kk-+BWk%-!Od;3=>yG$IUPj>_UpQjZlFFTyeHMR&wuCd~L5O@}u2 z0eM@z%?_DMZ#S!mMX!9k-$`@Tf=bQQC%O2l%uZpEx(;^X^Lmt&t^_cc8Tv&bzcGC# z8?99CaTxLHR$#IiJ>H}P3j17NMxh)Kbw+1+=|0r(bvU{VC6&p_FZ!WCsmCg2 zeGY5hZRabtbIG1NWt`$DKk#TG^(Y6*Sr`0#~)vjlO{gHdby+ZptlR<(+_noKx#Z~LK zicin+Odz`InYsC)Ps4(ihWIYq1x`um&_bp50nM!YfM+t0*gON}ChNuTi zB{|$5+DhN4(_y+&2-geYUt3J9BADfscGOMHsd5D*tO{ciU3Gxe?9-qGZF{HrLOX~_ z!J>8nb{=x;o2Fr)kgI6l{0y&Yt5-7G-s~#_Z3~hkfJ-nY8y=FVbiTeyP)9Kagw5Qp zV`UlQb@KJ0>~HO+x9%-ZdXvao$7iNJj9~<;QVP_}yk|Zt?;+#GR-%#X%}&cObhS&0 znO`Coz4*-L#aGa4Q@-O|Cm(l$Ed%;o`~+mBGg8#zc&EcKp;;><85-}WY=R(Q)%Y6i z<&s^00-eEF^{ahA%}@q@z{b|b9w%|y8ZnPs)#ZrJMf#nJUjyn$*eEK(3kgyh4`}K) zJsA59ecyd$F;Hm2pg3b)IJ`i|umRc=8!4Lw!Yyt5vHa~Y5y(VXbSG@Qxs8xSZJMJV z#D~?PkPhVK#Yk@BLd0II-q;+Ru*(cdTxogWuTp2rnw5j#uH#+5JfVHu&iRf-acZ49a6H1@WnP# z46Vni+@EcO)h?3`qt%CX5+RK;u#<6)x>G5*E%P|b;E?udF)&MA!ce@KY)fi402uf1 zfB&pQ!Fz8|{AG*#s zUIZ^9qqaXiyAHaEwQ|OH!n#j1^kPJmKX*hbp{VN&W%Th_FWqa}m(X=Op?Z`hpzd8& z@a_AVBP!)HmxmqV(hbwgJUW~ZY*;KWo=fko;i^AJe+9$wJMYBxAZE3je%B_tx_b5F zBm1UO@x%^d``gUG{9+7`fn2mwiI@&XP&LtuQJlJF)0RxnthQIHleX%yM3vzH)3oHO zY}54nuXtF8Y%2`r6AjPqK%0|GszS^))n9~3rJ6HJWmlV0D8mB!fJS_26FQj_S#e#s zc57StI<(Fbtp8&7e0v^8vrHl9lVV`uD{}YgK}oYr`+6Qc4Gh-9i`lw|ZlQNMgE5aZ zi4b!PY!UeM>iU)PqdQTU{9@UecVqmF0V{^EnBska{UpR>AjF}#$1%-vEGxfCEQ(8# zYM4I)B-m2nEvg`UG4`xp{sR{LsD`3))1pqWsa=z&GoHj`dFh^ZAN@GrH)Jn zvx~YZL(MiewpmzVpCa;%tEF{^rs6**%Lw4isXaf{xvR>_@U7poO#;Aw>=N4Ip zdqoNS8W}$07A$+faQMp}b|b890;-Y;k13ya5`QDngdyMz+f!i+>K5z8^Eh4u`% z@;o`1LVs3oXz6{@p8kn?yG=U0Q0I0s3aOAoIl0@5(4?}|$xoUY$6SR0H*RwgD^O#)u{VY-<;G8e&y zZ$`4Q-4{P^xQfVoL{d@KP_fR#NnG6#LLNe)5V+n}sE z>zcD9#=R zwYjKlIw0}(E3U4Xkz%Plhfkw_C#(rG4q;Nm=Qt5y9N}TvNXsy&Vsp^MlY5LD=!$}` zXK7rFnLQ*Mm0M}m9W9J1eKy+pU&R&JuHZPlQcKrXq|OWBrOVCxSjpM?GYHj3#GB)96esuopvN ztB0`b3AVM{2%KYPCuXUN_HME1e)%}zl43fQ3mZiWs%4ffUFg+rdIOw0vv=`bmb#u2 z!7OE1y9-QvhGSc)qn1BBJdWuVEt{?A9cCuVHL5$RS{D8BWs-~K_oO+ChIy8u7d6|f z!t)(yibC#V+>Qga)7`guxmAkVE1578E@p~~V{tqMUBf`eLooP8K7m=ahu6OOck&?8slO`|oFP5hoMd?7cC_b%uh8Pe(X=Att-p{2+*)o?F_aOU6|cMiJ>@ z+mnk*oP?t$6lN3@KDq2>ESOP^@wf&)bOeNd!fglE=RVxVBgZr4EVJ&WX(+4wJMBA zN(dpACZOYt(JfE~MaB2M=6z>dr`{5G#G}qxJrj(kCc@gC@%wlHu zdAP!zKM2piukY z-}wArivO(!Ok_BA=;t2Pz60k!$XGBr(zkfKrmafJ@!S8iu6{cy ze<$|5Lwg88w7bru!_|NF8t2>tJC-0>jBKVSfQw z{%4Q=OA>Dm;4j|zj?(hmFD(A|C?BNO84ws**_$DeqQegwPNvJhQ2aWu$kHW4xhi_D zPM>9JRb~|iPTP%`{v*kB7u;LhvX-~gyeR*lX6zrS^H*7|QtKES-Y==fETXESGJ?5@ z-@uIrJl1#mF)OF+1EN%@O#gxU=>Nky{YR|l@4#=qpomBMPUG>8QX&d_taWc={Ua4; z%{Q(^(2$fZl6b){WL@55l)JYYvT^NU)prX#_;t|d?q`YjEt{$vLS4Jh4rG0IWbyXf z5q61#_gKEeo1G-bog?!N9XSB;{0<*_`M{|EG)h_V%Y%eeq}3SCW!TR;WZj>7<$#E= zTM~G$!C&n>Tm9(;ritHt?ug%?zWv+afA5I@NNK;?`yZlAd~)6~-3}flpx#-r8}Y;b z?Oss-ww?3;qQL(F9Uh{Tx36PJ@vVrO;^i`EC8YsAzKsnbSnYh+oA6Bj#Ph!Cz(PMP zdUWT1)6D-+>a~|oZnOJyw0RQU4O+u_lXO1IAG#L3{Krmj;9kl3Gc+>fl85u$KVhi< z+`Pt$UpX;lb7dlK#<@gx`i}ay&FAqG>@2`RfOu?nufag2 zX7f~6yQ#rb4Hs=?{`Q-yj+wb}CLwn=VBv`pmHDCuK$Xl;2lP}Hicz>(yfcI^7e+TI z18b4vA zyCTZOy|NfW6z}$XA=nUQy6jon92YbiQc1_Hxs6mq6jyX}=-3DO+g5tU1r_o|2}SC^@gTbagS1 z536M`X>t0aD$uuGqbc2CPPr-CN!ZLbdQpdvMa6(|@J!au+uCcWcGgkWIR3S3H>|2W zfV5DT^f;C6g}L6KAAxL$Z!a;s9>~Q|rzwXYDZhc~Gv)n>k{~M%u4F6gq&F_>TPP?&-_FfQA!5}(GN+wgJVCCMSVKo_VKTdDX@Ft zf8@KrTQB03czJP-+f(--m}$N?q_f>X;!@q5yZj487*Y`D%oizR;TW7d`pq+8`rHt5 z_g?V`wyjNEVBC2PJO8QWm}kLw_ZjLbQSPXxY3 zMn^<(4<#o9h!-x=yny8#Th-h9B42tClmbj|oNZ!}B^=uKyDPjk47TM6?Eh=Q|GNVJ z*azGO&Gc*!-q>j2KSe9&nhPI?SGbLiUk^X5GBgFg5%5+hTo zJu9eSzJztu7x{M<5qKnk3H=!W*orb6;igRa1S*hb7!h)HF|ZkePJ@`1TyZ7nr1AhR_KQPzV9pcW zDV7)v-U)VH*kU90c%tmWR*&_7oW=10x_AqoB zYJ9II91s{kBZ`<5m59?;O~o>&LVK*wgS`X1`BIGSX=CO#?)8^p&<1Q7={8at2_eNJ;&1M;X3$PYH*n@okc>)QLC3_A496vng-lzn(JZa~~ zsb~R?l7@e_<2>I|gsMhtL|*J^(vLV6iKK2xbW|@36@6pHS4Hmwy!g3mksw6>C>Zn-^o8t`4s{HIMeMXzjJB#>WxaP z&V4|BQpX=B%W1#<3MHgCsksl3U${|8AFce21a;Im_7mXrjlb|if4F@=Q$JC-JLT^z z^7n+=*2IRx{B!Hh{OFtvehWzZ3yyPhKewrhQtH}U=6lh5L~g=^lZMvf}EFL;8094TIXknbu{H!*uu^t(&wt}SVP)){QEadP^I$mR*F9j8^;ky2B`eXrnO1HO-$TdNQ5RX}=9B8po8u(e zw5EeY*F3fF;pit-O+FJ2XRquS(5e(Rgt_?^7z}Ch;<~zAKl3h`#qfV|pUmxONKwH| zC*vCnd{0r%F@B>%y+oKGPn9?gV&XvjZ}Ri&`HN<`rt}yZl!DlMr?~>0Rv=eLLC^2y zP&?T-Qvrd!7ddizp;kvV)f@O4^l zG(#3c{!&M{(dn;1Ot;?yk&LslOrTdxy}&a2JE~(fNRb!Js$B6QD;#+6`tgJP1^(!Y z7e8srOBxKFQve*l{EMjcNi#N;4Eq3sz`ymVXdfO?`3n?l*Q|iSWYo6K4m9}ZlmbS6 zJ1@>JJpo?JUbt=evuEVkWEJt{(v2hU9RUDJddkH-E8Rj`^(Q$VB_O9Tb0xO78wD1Y zh&8il5qFi~>Ut`e=xuLFrITBd_ZCit^)Tu$(VsF8vd~x4qb)D<8;AEeDJVL#!!zU7 z*j8FR6W~n-j-M0HWfZu*DzlNppHvo{Ry*6>%2VzbUp=dhZL9mD45o;tmez0r8m%|W zL{bBdPCQaBS&Y)ro2sKS4Pq;~ZD&9KqH6&0$-Ibns^o=2fx^(5_3XuPSMQ?ifo%Sd zLypFKVB>wjO8VYYImA03&c+?pz7rHNRd8u#O1jp|1^N;&h$I4;C9qniACPnP`K$#s zL#XNd;Ay;zg2HXftGCpq+7Ql2H)iw>cc!zSr(Emj-CEyYoFhxhmW;}uDO2YMU@5V!5oH^@zEw>`S?Oa5Mw&i=9)Zk zCDK{AG{WR%@v`i&zHL}?*8Ngb-cno(uX)oB0&*+72{;y^F%;t^5sWZ8;S|oAH2oz% zG!gv7uVm4mtQ&sSE^2V%@uUtR#pgndN9Rq53@Ap*ipb#;^I@tI(>u846WKzj60!PJ zv?zT+JzSs^a^aJB-)vq>o*xEU#<7lQf@zFE?nI{C=)t|u)h2MO>o4+V21HqH$qfs5 zMtSKmb2fECyh=takQ!jOM?4uB7E-hJRh()zL@u;#AK&0$p#xg_7NVEmK%&uj{nj(4 zk*xa(G(|ixTEF4eSv|wRdoFZRo|sWxRJSKAVc{_E`F`Wq*>!F z03herK&QcKms3`4SGvP&$`^)i9z9+YlO1M(9Z;p7)J+ax@a8dVS(QZ=bG@H6cc0x( zYESy8e5QXgF>wcXS|z7kwyqa7YhNa0Z^^Ax&KJiH5p;<@N5f)KR9sIN{5}uKRx6 zoZnI}^`3(lX3lV?kq=LII)y@xqv5*piOyP`kQGo$<8h7nmlk*(MGXQ6sVpd)J(6kp z(<_adM)$?=`+{_&m8V2Kkrp@Rvs+ab5Ih@*7=4Tyy0P02Q=5!#8MZ7>vlD<%IJ5EM z!h8mV8-0sowEA;qC75iUe=W!$iO*5h3>n*IH+Ai-$(-L0q#CFG+Eq7m1C*hP@>{kn_Z zwTcfd_2OlEtj$u*p^oW358^$bJx!CA-oRyg4riWMwZIsH;HLVqPC42}@!ZrA!4iaJ z#_o_T2NDjS!$YRUFWp0($e$l8_o9Xbo5v3yBkl5}4N04;>>&?jljG}}ZlYhaK8HaK zHFoEGxdyIXJ~NjxXSEC&Wymax+v(a=eEefi2|NIe$#mnoI~QQF51PRE{Qek(T6`P;G ztN6(uOBh;5bJ_Tns8yE#Zrvuh6(fj#ur5b+@1YPTBrB;B{0HJdO1hIrw+~P${d++m zKdwSpdYM&Ie3Rp6HmBKK5srIEsRMc{N%w!(NlXpE_)uSm`s6?Vn}yi^XCQ+60Je}0 z!os~nyn3x^6OsN;#0?rJ;_KRE;`ad=TU1pwESGkJ83GMHm2|4Z_{<_P>*n@4yg*tiQNnf0O#WJq(cp6& zLmau)X;7wd3!nSAMZpyv^9OX8VlWug^l}1gx8OrXD%R0)CnBacCTm>A+mTkCxPz#C z(l#q=rr6_}t=GBg*f5|2axVYe;otjr?(S_x+pkR6+MK?q8Z;}+Xx*nDnGSWnQNRZy zeT-7bsqsZA<9IV?iwk`94*n+TgcFcB0?Kw#r@j)QHC3h`7=+pfaLD%m8ZmY&-X0lc z9%@9Z?pfhQLfqg`iNnB_y`X_ZcI@_fA$nxn*SOt=15I3LFN=x-y%4pC7u^R?$w%xV z#eGy(ev85XN|jWH@vHlP*7m;wLcL2Vk!n2eL^dFrB>~;46*s^&eRrm_3+Wcme=Ea zjBggG2Ij6sFJ3iGkL&y}3o~T4kWL>Kb(+@3u{-F^8_M5!)tYgy)H@Na_r^%DepB$$ z@GM8l8$$_KeWHtw2+C5GLaoYgfFY!_!@i!A-2SC`2r{)Vf0MsERDzR}j;6j;4=2HM zL5g3;-Ob*7)j@aC*U=BAHq%j02wWqWMY?9t3*8X&PQLDMk5Ca3*q@sw}o%!QE@3T8BpZt&H5$IZBDlD>2^I zO>%>SER*m&rYd=>$;$xpQlpTj)II;+M+jvgj^E`~U|Zt`jV1^_uL`J4v=tpv~vbEitj*7L&CCYe^=wjwB=2Tc4BTb-208bg*|IxPk8E6ZyDsN;?2xYM7D?w% z&x#oTingU26-Mgv!&YC}zw1O!rJ;1d=0&_VUaQ?$h(GJ`!i=?bJbQnmsAtfb3p50* zEh=<0HX_kB(JQa}(-s)yU3f0b1@UIA{;JP~{;xVR1YdXmXVELxfFtYoei-Wb^frT| zQg!#(&(1N$djPE;k4gWMum4Q-uqAM2`xf|wN(Jf0%8;X6-bHB- z?5#ZIJhyK5Isug=3Qbk6SzfNE(a!q3Zdfd>5X3gX%abRNnTJ$!i^NQ#BE!sB)BE_r zh-|m1@;47$jEtLHbHg-Ewkd>QXPIhxvne|Tdj7tOzYXkv{9{Qo(C&P^aixTN%qx`hL=F0V+}4z8H2Gk#4&?&%h8*s8<=ltz0L zq^a-XLM$ht1*SL#A-|q=Rib&S%6biOwhcPt2cf`=@v54|^x$E;SmSd`NiQCrV2h_8 z)gLHdpm`ZS0Bn+w3{SUEXXcf}&pEt+f6<5)o7ISwd+4*}cJWPVBR|sFH8O=Btg(52!}qMZZYP0cw}cE3Vp$5+{wQ8_mUmDvT=;#*Iqz;d)|8Kqn{lrT>vSa^s^n=y@MoH z>B*=2<5V(K5hMIb9o7A5TS7HtKX3Yhoqy|2eM5Wh1B71ym;yfk#BtC$vg$sxBHp#A z4i28b`o^+3pMR~{9$nE~q!QOOtJyStW78^GNh3^;X-bkBddC}&37iIw=)RKE57i5D ziZ<#l_IkANGAy#Vki6@o(GpXDx1N0uLurz%SZ;u2Hm7qr1jc}ge1|7}iaL^k5bWRc z&sah<#^t23t< zl8h~7}%J?VU>Vh?yI zqmEzm=(@c4IfX>xAy5cZJmRWLjAN=sL70@N@fg46*zvV?x6ZFNE0kh}uDkudx9ZRT z=SB{fbnq}rumc*jw>z(sEHTlYkhiOe&k>UvNZidAjjJgM| z2Rxo3-8&zOCib5n;i(EwZTBanowK7%YnRh%S@U%XlLAU?Mjo=DuoHawHzS0Y1|Ho$ zut&gwj931vk%%NxIRcN=4$DWxLP4+1kZFmIs`B03E}Ww+OO9)(p~{CZAQd8bG)uHA z+Syaqrz=6a^L}GgW*g#FF_v);Th6)a@%lHLtIN*)&ehZ49~VD0=n!mDIXJK|UiJOh zK7h`yB%=A780mQV%|DWH;eyPKsza_1;fhJ+7KdG%18<}I*G9$->;@F)t#pF}>YwEn z!6;E-yi+l_JeZ-DY-TquT%%wJgTY|EqaIr{V8N_dw-(k6S4Tsv{Y|k_?Zu5zW=|Hl zq+4qB=)B?j2ULOs^e!ytYT)khO5mbYAxMvO)y?Ivb&2`FbrlM`>XDaL8U-bske~+< zFaGqE?lw{w{O}d=yE)tb8=~8HA1!9vv+Oj)m>W{egvqaf$9!9gnG4JY`HDiGMw~Oc zbiC{lPy6#SB)20_fxw&v4_@M3zjOK(l@xto#MP}4$acK)EQrIcjL3aSRncCx+@?CF zG;Qxrb}=0IpuY5IS@SbdKREK;-klIeuiK35FkP&7iiF^~7Fzl`g)bO}YQnr#Zz0)j zzbG1m0VUiLT7`Y2K$%wwiGY2?1SU#(efd;zw&hp73%s0@6-ez`|6ug`Rr<17NU(Ns zr82vuJCo7b`Ft$i2&`z-K7{C^NUCRg5-nkLSalwO;}N(WbV2$)?RS0%P8 zDog8g5hEea zq-D{aumguvOtt6cis!Lx7Ps1C@Mj%UdPgdq`E`Dx`}a?$tU?RMCl~2RNom200qwr% zrxjU(Fc;u9SE~f~C0f^(S7o#!I#x(Y(Pj}WIBq_{Ur4#zdaZPAwW+Yp7P)xVH23p9 zV8>YCev%BU#^YaSE0W)IaUbr~$%`#J9J0Q|*p=AEZrYG)hO2{q1e%7}@AL3~K+c~F z64lS0%PsK``Sd^MFPxe67JNHBj^1-&HB6*UeoB2phdi+N&2prCM_xV!*P(+*o2nqg zc4B1-6~?c+6x%rVcx^xI15PE%e=vVY&QtN*#s=VIKu&;|x4BJCMqFcIsTvmix>MP& z*2hC^bA`b=!PHFIv7zrL%;;O~kiZ5$g;}&fjT%!p%sMz%zkz$_Ztq#!H}Oy1I?oQ_?007+PC+F+05nN}t&>;qT?`0^W0)b<8h<5n%o z4K8O7+ID6utz$bAW2R&DX2DsMe_TX&7HLO~qUILHtwLZnSUCAJZFKD!o<)cYoMDE| z>Vx^&&Q~DExJyXxm+O#E-`LdyGwRn$_1h`8F=7EeX|BTJSlv$km$Q*&gTtq4bOcXY z0tUYYDJo|AszoJ zsyaVEnA+Cao!m%z82F31j?W&h&S(Bs((e1>5bKH9`@K_bf}XMO%9_bob!RVlT|`P= ziGe~59m4307!35W3*(+&*rai*RNGhmG`u6ZDEopUI|_+OvDTx))Inhc zUxza&LABs>>^aJ5trvTAHa3$j#isCIBeV+$&^HI5g(|*?vBtG#}wRn(HFEsX8t*kNM{l2GMz1FiE zAxdkVAeP4;JmJ&dL52CoYx4*i?3JBU1J5RE~uB=t&XG5yjh>j<+m}Y|) z5m+fnd*4?lx00#SETi_()W(L|vco(GVn~#)UU=~Km|Pdxl+ve!6!dWS=K;>X%cN5h zul~p-wh3E7fAz!!>FCZ>nbj4MwtBgE@)mEGys7!>G1*12H#?B~26-!-t61SYT#FaE zc#&^{yT56b<`_m&5Q{|R6VG=Lw1|%-B)EmCx!8#enBcfdmyWpUMmgk<1gdCf&zeIL z(JF$ggu-&AJOnYjGO(fZdGDRP$ z)aC;7Q80Sooy4;(W%03LD4S}o5IVv9fxO5uKKrJ}NY#52&%1@&H)kUB;Jrggv{z$% z0l~Q$*nx*k0p*gLpX8a$WP`Ywo;!QVuZKB|uIvM*@U}6f6N{G__9RJ_TuFM88#_(R zXV4ZhPMwWdRStJsq-AE%7_J$G;Q|@UsiY1Q5pS&Tt3$Gop=`M&eeCy6WEAeEkG7;~ zbwhjgbf`1UZMgz4;<%Y`MeXu>mncpGp))DCLP~VtNcb~D^qjuuTu~IHQLx3`w`Usz z0Js5s+n*Kp6b%G)@$eI)95Uw|nUDtXqI|r~m}?ol-bc3w(-2%GlBX^Tg|?haule#I ze*I1e@%rjyyVCc(m5!W_P((K=Z$(N79VU)fw4Ga(;K7z}$P77Ej7 zhsb|I-(?t0qEl5nk93gj@XhOadwBKq{zuiIW!FJwi)^aPp{IKO7KV#&>S-M_FODfT z(m4s@k-95F-s=6Vi*-V-aqpD*SBnzTUUq(?E$ty-``Ur`{j4hd+4;AX7yjbjfV13Q zTO#7~luZLC`c{Uj+MTPK1Mvm%@gC>3<*H*-@{Lty#fnq8QzYj<2IA4y?zI8-DZxXU z5jKFyQ4u1`{L?(BN7cBLgqr|iITucXSWY`NbATRBm6JbS z0S5?161B#pY=xwoN}VYbwhWzu2k_6hTB=}i@{N-W7hv6yIo5cK$c|7hj7F2p|2*sE zm)G8pSz?6OmFSv8(TwWz8GQa@RcN=Kdg!!Vf=z6Ch=9YY1nKhvn~w|aBJ!n{;Y8Yz zQ)W`0CK?`Y{fetSZWzQqV5m)%DVDA(2Da6F?5-l&%lZ}l&>%v+rjAGcribxrPS4U8 z*tat2lR{+~i#Bsthaek8>og;ln9FCDvUc>ngC(H5xw49MS05eL)P)h{ZNZxAP{pHZ zl3gkO0dbxojloFLw+%JpzC7Tt&$+c{h%kai(-iQ6b$__FY@TgDUPujruzV$|(J3|y zif{w>cXnxhr^aJ9O|)^cy#@v)z`0)OucFY^nCnvw?tHF&#NmQn@A8f=DEQ9y_0e=? zz6(GX=3MxMA~9a1Gsd{yg&;NR_nj4`xg=ahW=iYXppR=0lG+<@Cx_h>-*R94>i+)d z4(363V#)!_iitQ?*#ovCfD^RONt*LNlot|1bJUFZes@etDikI#2&=$~ja0 zK4q=I=i-5b@Xx%SaN_3CD2W(Pet?cFXdn9pT@2w@cK&s8N5UG!Jk(zCxM$SD`UML# zoB9GdH;!3fOkww>#jSI+3cp2;w;eZT{l{*wC}Ge_N$NIW)jzDn3zeT3KlV1%fF=V$@-JHGEv1Eu7Fq{1L5ey+3qzNebC@h@ zHF+g({cbi4Fe|IMBRf782qjRv{Ni_d&%m?&4$ga(T=~SW&f(}{G@xjP zn!>6DKdj`*Sjw!eRt9*;Q3j0IZr-G(2KsG3@8D< zuT{J|u;oIpc>0^P6LzJ?HUEC^e@nyvoxHdcmXW&!J97IW$T8-03{LTxUKXTQV~XBB z^!Z@u6?`p#{x3+3zstYOroTnIGz@VVd|nQW?-6tkOi%~M59EeVyh)BMfF8t_KWI~ z)r1jPnkTuSx~D)xUw!GSCx7a4Jo6QKTPPnNAHUIFib6-Z%yX2b^$*pfD@h~=#@&o>LW!>fQxzWejL2KhA>2=%d zbDhiY>R%a}U)p-UguKqxf36q#^56c_aX-}@#z^Q275y4^`tSAf-;6g{uk^c7%|mJo z;_rjh7wi#4KC%^~UXL__d^dUytiFjQGYnn3ItYB7+v5;1VF=b|x@opJ0rP1la1eX! zQd3!}R8~;1eVeL+YbEZorKC?p$z^MHeu9YE)6kVc>mCg)Al0UTD~f;xIHrP(9_6@J z*d`ufA<#mueU;YYOg1Cn(6b0xbutViGdpAI_+~2IJfL=bAC;xrz*CZoijfS|#8r;C z8RAM0!8UT$80(xe{YR5eU63$I74!#~J!^Zo6}RQI-Bb`ijFm-~D<)ZzO_d95B+g#w z)YzN;@TH}w%e|6KA1;}c9=EjFm8cPO$0~Y$$Pr(ADol5)*Vdjm!8#b2EEY!!faZ_6ruovE)SO9t6NCJ>+6M z?dMtVcO@^DBvVTjuF?di0QCa(3c9>VXo;>+*{e_zbU!64xo#4&)lRgr(=cmOjvq2d%}4u|V0QkYMS_C}Mjj zn{r7TunlNRnH=A~{KTjSu?0bd9?KK7jfg>^LAR;AV+*}43};KfFvmBAW4@1`+@iK# z)cE*)7YIzt6yc9=d&P>-pu=UoITklodvNBdMV)Fu$2DPs4%Z(|7;9ZCM?e3wI7pG1 z;hVePymiM3>pCP{2S2R3^`;{$qrgI8$%Md6$r-c{RDTNTWaUOfC!^BXr_-ME+}7DX ztH0#c7WXqoe=^9a(RhCYvlC2$_%Sd9eR;eu1wmgv{C`#%)G=gJ-rw6Ecw}+o4-pQxJcXD3ky+;8wj? zkP~~iMz0ey@B@#>HS6m(3aZ?gP29nDNV$Wlpm^zSyg|TT@=O!s+WQ?~qKJQ_PlR@->Na7&8b zwj#g~c=m_?CmD2BcIfB~py~l@&g*B}Wmgo7Wk?Z7)-G*2m?yx+keyOsM`dPs6yu6q^ z+~L9E6&ZQgK_NX(0hZUTdtS&i2oLg3*=Wimbf*6XZZ!lO?y%qY8lVsfr)Ab&g? zN;gp#J@XzP=i;vl@p}SW8v)whrx`p&vvbWk_crs#^B}FoAjOlhiO$Oyi-@u!70gJv zi@vYV;*_=#$Se5f(;Qm9ot@Tnbt)1J9+hBJmcg@jgsVCkk(X{Q^?N_}5XyRHnQn-Y zXwt3T2Ti7pj9spNz`*+_z}DH;UT)lqRFZ3HS>Hu&yKK}3#>em6xJegjPV znB|o2W`n!6OvIngwjJ1XRjP=^$ttto0HWP&VyBL6k~SC^4xTVad5E(#yXb1fEO+)b z8S>TNFs+v>0!u;Co3!}EwXsg<>Ex{+mr4d!o`((?I+lR6v^DNhhi|=2+xN zc%5Jgs9H!!eJL4i%>!^T!Q)~8`yB-@w;gW0e&t&D!j+ms$ck{jZ{MnpY}athZ=j-c z!`{5M6~oXoXRM=q4U?Co_33bo&qk{{pemg2<6sbE!s2?ozsLMITnm@eToB=krcveP z_U=P++#4<~1gAK4sYrHD9SdbHT>?@{K=8ryefMiC za~u{sRYr_rxe+>+RzWN6nbSz`GErBQqYI$IKMo-d@2WPE-6Kzt7S%f5Z|iYun*SKF zdO59d>!`sffgUn$rgzW%+$Kn}M6*~>l#2$@gEcD{KN#X*A5zY6_Jz+c3=F4Ghgs!M zkZY{f6me!;e6yu8l}MT{NYcgju1-`etZ8QF^dnb)f~DN z7{Zz3xP)B}WK`1esz!KM>ThZ{#GSQ0tI}^E{-(c=K`pRJzYpB%7@?RpWZr5A%{AZ@p}CFDXkoHu1=&P*;0mQC&9o;_#ae8;zW zQXT&7mk)d||Epq`&JU*!{;QZY{!1cRd&B1OnZMfZVYy#EQTO%MkL_+UC@_wH=A|$1 zr+ytDOE?;N*kD4}rg1M1cey~Jl}&0c^j?4hd|s8Y?y14I{PD!S^k$+HRwPMAgUV8K zN`Cqo%iXu2`@Yn$(+|IUWkP^!N3WmW4Z6380tJyR2m>h=BaETi2`mQAILp*}uLlfG zh%vt=6%M zifO0y@(Jc|DDMTXgz5}SlxjAqh|J%QP#{g9&fF(efa(Ri)VJdc zw1`Cqsd+PMekJtq+GbVZ@NC|~iv~e=Xym$$$24{2-m%E4BAmnd7YRzrV8)^8u!3}u z@$Hz8;Th_GS-1$toxCvC-D`PxX!h;hTe+Z2Pw-Sz*dwuPao_)Sm7OPdYYJn(`1QxR znr*DrSe+y-4l+K=f|MhzR{|*(!_Bd$0b)nlTO_js2p_9QTL9R?~ zudwpu@h)m*secoyH`>EASwC^I7O@LIT-^Bl^p{Wm;!KY_{o}$rQ736>{D+gj9KAB2 zr2}Vfkqr&bk+wfE{4n^p;9BX5xpi$WZ)&~IE^qqhyN+Y&vAYvuawXRhd7DCB)8S_R zLFQ}W-=E(9Ox?(n#g)>lto}v!|H&%ueW`_4d1+L}e*36hBsIVTJTWD=$9(c2e9oo+ ze5j^Uo&Om@c)y=&ThR+LYJe!b&H*gAW6odAoAHic%{6Jx0f)6Bp+M>JsOaB6CY=y$ zx=$BJnMu({Sz>%LJmD%v!d>bS8SQY*068PlqAGv0+HTtXqFB-A%)B&ZK5NT_KeU?c1a5) z04|RR(~m*JJ}0!+R7i%!^mIbAe)eC&O^q6tav)q$=9qjIe67W zEcq}?ynoaA>YfFOvC9Yqk*iHxZ=XrzW>z)iHhr?$ne?5T`TmAd(Yssjl(Dl9QXfu) z7q1uzbc&L64F#fWp7{6BBZeST!77mSrbL>5n$ihDH>%ZrD%XI8n0F(gss7fk2kv0U z`9Y-pjrZeIv9|h3hrJ(a!mid2y085H55{XIIqzftXS@9E;b&fLJo!p7`mZH(od^5J|N*9a=0WbyC4;We#AD>o1-d0zSA+KN+ zTTa8hxSN}b4d(B?MMaEg2vHBXhFr)<*Tikc3zxbRdjcanZ!Yb|Iq2+i;>w*Zt zY#J39(L_v7*U;NinXj6AACnh@1XEg*mBfDujx7D?+eXg{g-Eyp~0VGrFXU6Jk%c`Fb>QvK$ikfPPxWD7jJq%#IBW&o%uxMLLtF*S{Vv`(q%e8!TI$IN0lQHwPBK zqCc00M0n6^)Z>{UwO-4R&->Gy+$}`*#O-Y*)QdE49_*{e((cX2Q(_O%_dN$I+~}pV zN2XywSmQr;eX6@Oc;$iwAa4H?gYDUJbQ-~kW8=AL+#SHjs1shk(vw*E&&wtUZ~WgA zR3HXVMk0Lf`v(v&S@zO_m7c4m`r(e~kjZ+7Am*hEPzzEoHM)r{2JngDRK(j)lIabM99V2@ zlj&Q%c>kHYll|rJ?cC7{-^tL`FpPpZh41Y1>BNcQCW{yp>Vi#d>fiLn;I4`X8u()_vC5b^_#_a)?LwTIMjd)b-s_N zeK+ZVMEu91#m^Qh;~hF0i|eWA52UCMCfb7ep%Va(*2O5o;;wiPlQ7;snpHD=a%71& z>G@HoYst5kMRx0DH!#c9)hQ)*Mz7+fE4>pupo3hdtVIDRBbjO-;G01Ylc3%m=57 z`@n~_b`NW;Z314JSmrt6kcx$x+}%EQI^e)(@OlTBZacL_*xMe3(@k> z(J;21x&I+GuWDv{UBV=HG)%OPt<-jR1h3=uQnMEPhU(Z|_D2N^LU?4&_wXUA z)^RSDYV@%{7vXDYKg%R$v5I&=?py)_*=rF9be_T)1beAwD5qoaP551vT10s3CkD2T z!I|~#;hn$=g6BcPpnuu(ZJyShv5L18@pVj&`wGOSxrOfd{@EV?@Y@dKX-_#B>Wz5B zCk9O((w;Knhi`v7HEV@*t$_Nw!@eK$WG&9+a1PLt2iF%A>ZDB@b~ka6#NE4Q7APQA zgt01C`8cJ3LVmO1@MgVq5Pw{kw9d1eSFMT5T+G6EL+#SV0hU?DDfRM*HC!mwgYF6Y z|3GJt)NSL^);9z~41EYviDhSX+O}W_;JolnlI!NEI8BxlY_&KezM2@r)*%uvZ6&7H zC$L#aOZNdnzA2;k6+o+r7X`qyZJGG&cPO%8nRWYyshn#Eizffy3T}5;&^kZ?EgKORRf^79aAS z?LE>cd>@GWx$EVP6-WKnK4lxHV6e^DK<7?mMT+V-Z}J=6ij5%x$fszcrry&L{>n@~ zfOt+fdMne@Kt;iucwsOI!>IBWc`#0lPEcPirZw{cB*ybR3tmpFpiFHKp!rTQ!~!|t zAL4=-Wefs!IRIIZnY$x7!2`rCK99|`bQFh0;N2fGVVs6saQmAb*r#-uL0qHgqoMac zAx5`N`fPh0o0adO2d-CEX(&xA?3eU!Yh*tGy-G1%36 z2NUXwLiWqD%ZsCo1X9Y?FzR=0fHC2z3D%hnBF z1(}9f&?cI|EjZI&TUnpdq03zaz@q5;9wW+~opOIDN7QUqymu8VF)y#$YOdq)Cx%Pw zHaE`=9wk_(7HF6#Cby7gtw9`)xVTRY((3y{MbU=Q*;;}Xs`13(y-YrO9PgO4#~#e9 zRJhGkW4I(Hp&lN@E?K3jP)(svyE|gzE!^t%jVuzi^BElRlbtxAz~utB2o`S@WF*l5 zMX;&2oRG*K!0DBg#^%w3;E%{6@gZT~PS)Zk#ty84zLLviOoElnZvZ*}IVA&g$3hyh zUZOYo$G-9>DELQ}Mzrn4aAgedr0lteNQAk7FEkpvC3u%eI#fP7+u^OJ7;Ii9aRKcH zS%n1e9MDTc>~y_{iS=yp)@75Yk6~+ zaMK|Zn+R`TySo*NF{ZL|MI~qA*EFbXk$1@BA>XViGKQuQT{7~!41EWocfTFsSD4Ee4O z{O$y^BquQ?Oy`ZWB7Kutpu9NE4^rLDqMcQH_|7J$HwQvQpUZ?;Be77|00=_90CzrE zQTMj zlWFQ|ZXMMP!cPy+iNhLY+DByd+EsWuMB86?_vph%T8dN&c>~}!kzRF#(09h6r4v|> zaboJv>x1GYWa?swp>FDW?e}kUuHuf>N85%ADwAbC#9K<5}HY zx_;>-r53r&$;)8%b3ckJH53YBN*K$Ab}+uA;%2+_F_7W6v?41hoxZmym?@AVQzKUq zp*uI}oiI0+5bky^g~jTJqW)Fyq|=$Eb zJ3jh0X7ew5U|w3HL+BA!-N@H4*8m$ll)h`W!JZq@s9bpdXxNA0opMFTV&#!%+Lg8o zQ?KNSHIYo5OYH|!On~ZFaByY8D-#s!473MI^N&8J4<^;OQV3(2?s%rb4dJw?qHycW zAoF7qjn)0GhrP0sX;`#jF~v1`nRdi)=fq%WvbrHGb1_avT}@ZoSU}geVaz}v%VWM7 ztbIVBsDB1dAme=QP=yG-?XY}lXD^G|39By0gYudRdlnFrzh)LSGF7a;f)lCb084aU zgD0hm(48F=djf#?7na#VN%tQ?-t}DI8-Joo$*_2lE#=4b$Llv#L|5ZK5}i2#W!DST z_oIe8sT9ztY+wo4rnn<={YVT_qy5Gh)LjJ?tm}~#RFI+-rVYw(+2;|O_i!FOp|Se- z?pDLo!-N|6#)tHv+qd8Ek3Lx}ynm1+TgYSs0(CniidsI%&{3IsZJC+acWvcZSZSK8 zO7`Oes~3Cc%AvGR3=OmIOvrAKEb9-mVN;}tfVm_tRA%t=Ay%(?5LdE5PTvHeFTgm! zZC`U&9EZan;*?a`9RqK!9r^SrCeeWxP9M|ZiZ&cTNo+2MAvS%g?Ay;7BF9c$j9E{= z{`=03m8AZlB8Q8>-RE34his{DnL<=M05;xLCkx@zGwwx1Y=%Aw^g*n~Nf zt%=8i^|@o!E#=jMe4JrthY`{e>G#nktZIuVaY4TNgK1;;CgrWP+aPIDY~8n@$5)Ap z+IM+VP7E4Bm0`Sr`aM_MHPjJ&M2XWB&>t@&8Do{^oBNW@5bCq8>VkGNocaAlD?oC# z#fGu3b%=|QVG7@K5%+RE*i-ZHo68svS$q%$YQt|DkU{*!z@F@1RYsb#R`{{u=e)#1 z-8f~80v(#lo8G!cEDMqA=6<-9zfWirD4MEq(^n+3+6b$*4$yu-dr#soMEEZs_(cAW zk;lMro6u!Z(I&K2xI?)51*(pYr1!&@`i|-^pK8aRb@0wg{c`HJ(a$#jzu&+4Z-E6L z<*uO779-DXuiM67pY3wTfN!^PVz&*qHHl#ah)bF4`I3WHSg3Fv!Xl>J-46M(-c%r9 zrl92wy^hRys+*pPaw4tnK!dLCwjNX}5K75}L?^+#UPeXCfF~_xOpfav!Fl`pU!`<8 zLAV+3MH@VB-ouAmCJ+@Ti)^iU3ktJLuvrbB*f7tH?;l(+!D+D)I&1Jb-I|2M+b#I+ zX4L_MTLacNERm}3oEPS&!_5xzk~Jv9TaZrj{CYyp?MPbk`n;fGnk-zQ*w(GW$8nx&piL>SK^#lWGccZ|=Yr@hIW3{a-|NbD2n!&q$~jA=R6=uCbKJsW z6V`7v+yH0~K;HmYZPpy~@Zr(8{pWq8$}pkQm84i;;rEs~rK5Sa`PpgA0x`x^5_%R# z=ONasm?xL}*XruT%tD7483775HPMQYI&rWypq3?u9L%yP@oKHAjz39=`C1?fpnE-j*(2~`rWs8Y+b+U=KO#(zm~Jt@NVH%y7d7DQ&{~2;!WF~ zVz23KniXlZ8vdFjb{zFFw`(Su@E7pwn~A43k?T!_$|slla;&Zvt3(}Lwk>O^wV179 z28Ce*Is>n^u?Px}r!-@LHyRBiaD(S_3mBk7YJ&!x1DH&axBM1K#hF&^x4Jds61D7! zReVIjyYzIV5p_F#ne(Y=6dTb-TP)Q-Jg(w&J19VSMLcLu49A>LP^Ju zL?%w{F!G*SN%LZ>PITi&?Zu(Ct85-3(j zR2O_7!-^c00{EL~xY8MTD>|Z+G>F{>I^mk-k^ePdeNx zK_dlQ6_&vipqRaA-eYwGCgh41a5%Ko*yz27j=0!c9jx@f8$Fun&1PPyu!{CmzpRUR z6fA6iAgLpvyoTSZD%uC~<(8T99rkG1(-6~yxpANQEyeVVgu$*?@4()ewWl}aUqRBi zuU~Il6)1UF!)PE_V^Xfe>2XVGog~*1qM$9R@qM* z!h}%HiE<_P0cBuSI#q4Pvr%y%ma!#b zE?|hd{-7^|mwyE9xSS-QP7PEZ2x#{n493=Wz6ytAcj>M~Yin4_Hcq5vsT4FAbyLXosDqk~NGq3?ljPYYUSIS2fw+&C8<+ zWSRa5D4veBI+SWqUSOGy-7fpYFexGzQ=On(qM@$g(wR43XU!44AhRbo9XV?{;At4E zA0lq^Y9l>wE7EW~_mccJHYq}WItR*GA>I`;W88310jt9qGF(2j#1zNeio1EK6MXy7 zF6k4)4(=gNxk>OfZ-tf-(7i2q4(zK#x24D42@Ar|bG1En264$arj*~8_xiXd>Tm;H zyU|M~>1ka8l3CXagX4_Bnq`FUSFEArv!?rg_fU5qQXjoF!n2N0ZeyB{_Oqr##1>qw z6Hz(M_liH3cuk5cnZEWNQ-z^u12|RdSC%S+b!Ym7-GE!Ho%Nhga>-S7sOin7bi_iu zkqClJ$tr$hL&TzreU(z|C=s9NL~m@U2m{O!;fXd!a+# zIS{$Yj=T}hpbx zthU5ICl)XGdpC5@!$IR{S92rXO{dI2YtxE^$V+KzRdG%k+n7stqZkyGQVS;1Fx~Il z!XnBuEn`Rff6PXb5oK7PDj$WsC~V3c6&!}er@cRt>-A>s-maY9)UVP&3?V~~k;l7K zoYj(BMOSriH3u2P{hW5yW&(7mP1&*07dcqhT)gCX{f%$C)q~78iB5G}KI`tbrWLvg zF`1e8Wcn(@l2R#Y0g@e8M-_-J-PRnc+A=*a7+G969ba>br=t?Sm#WUpEhF*&m=#^< zWfq%Q0rdj0D0j-wz4GVuL2tUhF^N%H$qm`&_7+QLn?aKx$+^yT7q{`r22ZQG4YLuPEb?^Q$7ADH2LZ;=RUo2_YsL$k3MP*y7sk}GM0jas(O?_- zip~n!hy1^9YyI4Pkbu|ynV9mcr9lrJ^QLuDR5+~G)zc#}J7f+kJQ1j>ZZ*AL7Bjm9 zZj%!M0|7JGS)x~5LT*P0O+6FY#a{Ff3!6=MtWj^J)iRu_FS= zDDP^P21}W=!~?l-D95Z){oG~zdD{yYO@NO%Vq<_MZ?<3Pgy+~2GgTmj6#vXMH`p8< zfyYLYJLPge)KhG*tjLn^a7_1?fYpeBjK=(^`^^Hyq1L>UN6M+` z>06oeA$C@@QTXU#A)G!d(O`crNq2!AdlH;rLi!%zjx6dwF&xveI`o;h%D){P{eBQH znskX`Ot~os$6m-oD=@QG0T<9j5qfEUI8|yg8taq;XL0$++StD%Vj$5-E~x2p^D85` z+vn%fqUf|Y_9}*9Kd{oVvAMj|e8uAm(=#pPClV;EZopQE*HPbUWTA@OZIi7Qms>OU z@?%U^h&mux9Ih#>`yfSd_~p^q71!DN2#UdC%=77(tb}4QgkEnQ5SVHt#OFWE4E830 zq>bAQ_oW{~v?2-XjNk)`xtoLKg88>#^&*ue4jnQDMHvuWuSHP2AQ%LMzCTSSsVtEq zo*L_2d7x) ztH7pKOP`g>rdv$$S~rz3V4shOG*Xc%^L*kzxzJi^h>|!c90^eJQBCTCs%S(Ow(*;L zHZ^I*_+}S&xTub@Y=!~hOs1I6EHVp(T(JO=#MwBy=jdL|HMtjS-dVnM+2EvoC$!7ygq zRNuh*d3h!ZG6s1A8(H14=Vr`qxWAkZzf&!*;zIr-5bpp2{=2ZB;$p zxkV!zRGp-JpLJ>S%2LpMj$&gQrr-R}0-T9!sU zWflBb18-Ozn|B*O6}1x=QSQfgn|C5wdcQB`#-Y>0UExhC?)aCm-m;8}CSUlV;;w)s zSsU&E84+0gsV8=7dmiDa`%wYCAEx9`HMfLI@#TK(i7bSA1LZC$d1gNz9ZUy{zOR-AUSlg_xxeP?=7mu2P)+|8tcq7S zW*SQTrdd8wPt2A?^s%($&kTO)g7S9yGp+=jjQ)N8bkF=Y2UmJ;?&T|_TvCMhfm*j> zn9jUIX19I7)!Z4kno^ACR?_xM1_eXn8UE`-#MQk!`z|k#0)&EcTKDTFgq(3)G!^Pd z_m1$aQgew*URlM1OA>PICW<;20su~dKu7@+X{vxQ5JB^;soiXPrz2@HfCoh=K9z1% z?b5&9Nt8FGd~lNQxG*^sKE`Z*BO}-1yfDXt_3#K^8)qdSfS9Xit#UR@VTZai_M|++ zi&)a{vg+OA{Stve6?q_=RCc^_RURcVcWAovdpXHphwli*TH=YT-8zi3C<~x5dqA z1aYqEp)Fyfqt&q<>23`9*@3>e#(o}h9RZ1#-*VVi*}O7vOZ9RB z^YEk&3|@@|nh~DI=CJ9Sb2%pq>68)Pry6B2sY$3W20BEFJBw8{2==*|f}t2i7gO1x z;|A$Y6|wH>%N3S1@BL9dY_hP-M48>;JDpwK&;_0Dysnsixaj&xowjodaPS@5V48|| zzc0rguUjxY9ngYp&Q#&fq2#=SW-j>QRJn|@R8~Jj*rX2DiNmj%J^`_@6H7nR3jx#? z9^L?YF8{>fEWDQ3NfwQDzMf-rUsb8c*F3~UX>T@UFnA$)r?aG0tWY(v!hweI@n$;% z(hdk3sCRwpWl@|RR7T4;vF|a3QV8e@WmHX>N=CP9^M%`2I^gKKHYMT7N5ulpb2Y$8 zx(h-f&Du!zM0VGC>0_^N6XdXUr38zQbCIp3izt=bnp@49yHZ=1q?<+Ke62RUEbFf- zYl=}wRVu^v%oV(<^f7_UbU+$2WdJy1mK-z}a{LiRHwFFhVip?{o%p>pIr|zt{9x|& zgzq(=A@w?e_%8iZIiq|k1+WrWv0f)=VOB5c_H1==aF(mvVAzp@?rD>BPfHu|F`;L$ z&cqKADTNV%)$-H6Y8l(-XWQ^7*|})B8?av+3T`j>tp#ugz3~3Ta4-K#Z-v=(4u_9> z$BVlp}YBZ!KMn#Fjy>VU0m06x zK@`gxQY`joQLsdII`?dO)*#Xs?yHRzvF@24uflVcSQ+L(d&7O$m=5(6#~eemsY({w%9-IH`P3}Btn9*IpgH6z z4AM(>@6999(MioSou=r>4b`f6+I5_Mmk33%aQ?$lT*g+1MbpA4DVE?A`|P1_QmE}z z^MbEksLnQ+l~WQeoS`X&T~NMPb3@=;Ez^N~asrSM z?A%-jaFPg*un0M0jfr;lIt~sCKPE*4o01PynJj&lxe-$Sy`qJDNb4m}j2yHoW-fi1 zAyg3a+nxRC4n4SOjRb*38e#fc#jEG9SepV*dp)RB$)L&lg7ig0H_e{~o^==8*R9p8 zQBJ2pogqo>_FqZaHvQNmyjBnk#X!d*T?Gb=@ANQ7y`FndOJey5%Z$&BdgV z(=lA~6$*24_4c)<^s{ z9&*K}KqX!>@B}3PUqg4K$IkwIamhBJifDb8Zlgpbb>~VZ_3S(wbstzL`o7ZDy;}C$ zD^Wz2+>$I&Q{`ZE7I1e znBx7qA7K!*I?5|v{;)?QsB(Q_e%!6c%%t1&*RjX5_9S@7SvdgzMxaL49se#+QR zfJ}V;liHuDtSa{0lROay1;n=4Yv-qfJdeRs`$tc&T}^TEQwVJ<7mnC5HL|gS78Vo` z$S_-T0Jve|Z44=gIWJ8l5!$eqYtDUc_w%ZrU;%+GN^gKL;C^+4Qy#6ud#vqXyr{Vd z9y7l7WaRTtUH-hRnK52LOY@1dHJ`8H>NM*8dGv=dUVfPJ4`1Job87m(p%j+f7-%=}Jm_LH= z>Cj44SkraQT)BNIj%P;S!+A(yV5HDd-tSck7_zN5!<25GtFEQZU#tO1cr2(7bvR^v z(W31_@J1oDnb-hFe7xzOmjVTP^~@v&cU5T*g_`p-645fqjO8)L71>T!9CMDYxuN(0 zJ0V+Bmy@1x#(<1&XiUb$&K|^tGQQe@oR3_1+4ESKU0a_3vJb`A(T9dBw9SFR$JX}a z@DrlU#-tZtUT2KUb4pBe;n7;bL*<(GGgbx$>Pay3b|NJvZ`)*`v^OfGCY?8-;I{&dzs44n1twy8%BArtvftE+|9 z$5EpKKtb`R!4xBLOouAF_wnCAm&mzI5QT%fo#b?OO^X z-^K~F{(+#*_VLQ+t1ey5nyxXQP%yc@uk?`n#bm>UC9)}p0v^U`&Z?A>8^Wkr27<(9 zcxUo$Wo8O}2k8mo{fc}wYP4-`X^Fb*mI*;(U*y_DWnDgk2+&pk$#P}kg`NjXPsLFq zMi(D+U&ylJkOY|(P+6HR3g`qdIK$+~d82U%kVAe<0h=4$Xj=B(G_E4fX!4f=ne(Ee zuxa#Bhird8H$U>zw!iPz)#i#DN~qwC*qi$(F~H=a;6^q)AKP@@w(Y5_kG$Xa_Yqi* zZ1o_Z2$+?XtB((V)!10$2pRj;ji_--jlcvhqV^!HIuw-f1d5eeW-2+9D5kPAP~euztO>_zmt z@_wTgm*k8@)udh{!2CG3rVler?^Eyw`P<_8We&LKZE!iq@{l|;{-TjZsBi^KjvmYJ z1VWs;$-2gPUf#^Od+KLF^LdY%G`7?1sB!l;DwJJr)+{GIu_@v3B(vu(fBa@p)+-{T zQP#x~D|mN58|$EtXqlOup)f1AL?1FM2I@JKJRDLkwrx0g^s{Snk^(>wlW*Sh5Tzyb zbf=`9R4~j6W{J3)%zd&=fF-LtUClg|UaoOA*?N^(A#WQt;TGw7u_A^yR`HgFbx8xp z7~f^Dx?I-^rWKt5eYbi;u)%?_iJ4HY7ifhlG+PI)ETc-`k+cG*fc|Glhg1kMr8L9Q{U?BY~$bQF5~@`KB3ODvgvvl^MIb!(d?x0aWhl7mTS z&N7*a`CGJUMJ|_qi~(HVy;lSw8|!U?=}99x=qYKN@SDo;S34vXkZ!K<&SXriA+eoi zF~Oe%*Ru78L4Ax(FBbT~S;1Ff7wAoT>rh>Jf9eQ(ld|fJ>5g8I8T= zb;4q6Qt4uD)Yjfy^Qz#*I*~);j;|D!V)3?$W{9#(vXmT(7ZWTZX@*WfLrPwIjbtq= zon28~(G6?>TL~``T)HsaTGAYGNUk|P1)gSak+deMg(=*n>d3Ds&dqy|+P#r~U|!kQ z`cbs#16aUT%y&=;Sd8v)kM+}4`_B8lG|)=2D6cmc)`U=3>k6v+yE5kLYOAB`5{JRQ zSCpN|mwLRVPu0%w*Ypg#5c#8Dwhp>nM z9^`SJ0VxS-B8^Q%z+RI*FtD_S0hgETiMBd1K)esiiJnd89eiRpy!L~9kK$uqW2!$4 z&!*h!FNm~vEA%O!0(%fD1%fDem}O2>OkQuMckrqRjw|8CtEsVs#B0s{ulA~|*FAGW zOg<$3d^ud(TQXlCK$!F|P`AcyDLLqqGd32r8WR)o>@4lHJCHwj^~VO@JjDnV23T%m+DI_2>U7%7wG6@odSiG?p;L{r?a)>yvrC73f$=yrHgt!bT@$vx!my8Nm`Ax z?46<}fVc__`JjfaVIA`i)--aiRcigiZj4N>SJh83Z@XE}_)DJTRC)M!KEqW|Ub8`; zWRf0b1WW3y_Hn`zGoci=48`xMa9+pfK`uM~u6vKRL9$r*q4fQ_f_M}KWF!ZqCjk6& zt?lK$W|l1RlKe#1z-UJ|4{ePxVTm&hjxtjaY2#P&Qq*5OW@N9w%x=@wBl}jRqR?6j zVDLL*bq#A(dxLWk84>k{d%PFz{OnS^%U1JT(YmLlr`;yc*0Hi$MA0*WC}VXnq-1kV zvG4CE=#Cm`alN)U!X({ttYfx*_hZ9w;RPR9h22kssx51)y)Ry5)c2U&uaD5GKfeH> zA^1CiKu`<463z0^|HtRA_6+(S-=bb-E%{)j^;5HCtaO>m;+#n8oWyrj2>nfc>h~dG zxDPW;ay3|iZ$JAl-Xw9KJN-V22A;tBx7M;PO?rEMZQ{ZdQK1wm1oA%8=y@nI}S_)-_z8{+# zUDog<$#v*qrIy341&N9x)6GWkx82Zu3`$d-tOekcMC$ce4EK1=Pb6_2uzzg~dBYSV z+%hn8G-|8i3|f8%5c5y7TfwiHlGuyL_jw6nD&^9?cC${~GbShB-itmqrCecf6nVD* z+##AEOY|_@mOV5I7u{=QG28w@e)FwEw z4{vC+A~l6v&v&sBpRJ3$)%4`t5T|3KZ>96i{y7!CXtvm-oK_N+xaA4nCLtoC{#o|vHziC0FV`I8bgQ4`@<%{64N)oK3H!9(zXAz3F=qEuT|b%3s*O$ z7(M!$!R_i*b<42GI7fe`bEcc49F13>v6gu%6t`VbQpPM2ks4r4{CDFt_3?<`?Ptqq ziZ?mpBH!3%eqxBi?bC%m;zVCn$dUweEjRTk`|ygRmW+yDvhR%y3Thl#^W%OH*K^dhbgae0<;yah_ZEK-@7Su!qt zZ8x}X8&S?pJsWx&Br;(WX!G4YybI6wU)u&870ndil7F~rKi8{j_BvNaI&dmC`L(R` z{P23KC+T`Tq0VcR7K%%uW9-W6RV&+DpNsDNedr%vUZ|BLWc{hsGy*gsIHc`weLEwj z*`Vv6!hn8E62$V?h5j0nk+YxsNA%zh#-utL;22*RDRg&6Z(8p_yIO7-?rryFCQZ}l z!x`a7)jO~-)#57x+k@Awu--pLm<#b-8pk@-G9XfZISH@);~x&z8oTGALopBEfJO`N zcXmODg~le@j?wU~qzljDg$PIjJ!gmLP+agv*;$G76hMgp_ZJlH<{}Qms)<=kC&$Cp zU6ez)ulu_X0jsBiYmuZ^|IOD#+o9F-udbH%m__$onkhj9nh6U-VF0K=*Br_2@tJzl z>E5cg91<@fJB{?Yo2I%fCW=l)#f_qI@fJO%lpJtN zp7(12NWbFa(I*B@nO1#Sv})=<7Qc`iy=J6w2`!33B4O!u;>LO<^m)#B+5G@HxHS`# z)R28!Ur>h!=gjo~PhWSt4`~jJUac!{3(R`sy}3~hm8HpD8ga-np9^BY zc)#E`(Tju)$*Ls^R}~#@I`StOWi|IITgLo{SdN$r;YDUQ0Ur#LZG9&#V^dp%v4#7@ zeB`!aSFM+~+eFUQcULO*QjXf~2_A7Hg*>jU`!No$WtX0!hX=&e&l3}Ko^9%XJEMG$ zra-vVrCF6Vz&SiSO~+z{l-Hl|%HF#$h6n5~n-;MZ-_a*m4=#D6kBNF4RI0Zc%14yh{vffF-HhqNJM4?P z(~@+3{78L_XSe<@!TWX!Pa_=?u#W3jJAOqv^*bVZ5RX^!X3Px`Km?%pQN>8S7B$4u zX|9g&ZJT6}NFj~kTdN~WHcY<*`JO^0TrK3`@teCT_-es&8(ok7t6ghIj_It!w;{Yz zV4eSzOTA^~MO?~M1(w^NOjphuf{YY4;*Y$drP9rPZyho!H)>k9BP)wxwg=^X$($Fo z-6tx?mo@=z$8_&Qwz{gGi9d`jsdf6#k7vDH8QmA~?j2#cUPI$|{YHGXlI)3do$Y_` zGT`O$nu&R!Z`hJzM`a(EaUPgU6Kj{Z&-e!mm3&^cubaRX@8JdL+`S5rK4av*?|*C5 zZ{y)zHd#_%7HffD(O;XVx4z8P4(49LYTWIokPUIuxO8mA$PM&epT*G8*TbDU!<1Jx zMz#HK?oZNbzWtjJlX3@@)CP_AfyhFnYwLlT2+fh?ATMy<@SJ{yJCNS^CaA>^?5n4| z2W?OH4S+^6;tXS!&wLy)(85rn4@Rt$NTp6O2Ok7fd$G)Tvkj2 zgl=r%KEqD0{;4jbP`9AhtG%E%;Lj@YXuNLAkGOd8cik<2Ri)YEz*OpAyu5Q| z(Gs_SQ)z{bC#YhAEoGWat`G~1fuN7qJXJ@){wfujjm(xc}lK^qzZ=i z;WU}fUQ-KodeLhOGvn0)W*MHa<|B{|TAn4<+$(XwMUxXEA`ZS4haYG&jwl@rw`Aw1 zzk^Uha7pvDa~y5*ayvOD&~JE7K*5`^Nu(>$m}1DOF`~6M*d>e{6A`6 zX^y$A(u`s!&#w6}nZ5uD!`D-ONhr^>y)y(DW*^>*%}z{YGvkBWrofV>{Vuxo*b{!T z6RgqJ&`{%H(gz9hH5v89UfA)G4v#_ctVtZ2CL4L;+!(bLP}uBy;dt3i*YLiR=Qdc6 z7J9P*wE&@&IXiJSC8E{S^cI_=5r}%-NA{z`6*=@^%K4$9?X36|rGZYE-g9%VtAY;F zekz@mxS7OIF*|yf%}mzIaL)M<fi(wHoTb$d*e?EhJIezc0or)_0KDpV}H7vuEK3zpz(GG|O zdqUeK+UX@J$0f-2bnl*u?4`O7pA=kcHZIuetFTmuw0@G8dBGT(2^=0HZTbAw@pNnK zu0*-b1Kl2BbZzk5sty;E8B6i^+QCK?%$IGXy!8C64x6`PHu%rs(vI;{I)C%8l5wmh z)BFj)Dw&`yu(U$oE}_JY?c6B0x!4`8I-1dfguI~{a|U#cuJ<8fHaV2lN`-W{t{>8% z3q%?~s$E3ED$yOE*nb!+`nLOOn;v43+L72oT|j%KduOQbfAG!JZt!hjJ}&vdDE?ZK z-uhvE&VDq;(bOes1DD<%Yj_Ic*~Y14=VzsoNN&qK{pzVMoWn1Kv=Ei&ZV_XIT1lVN zL}y8Necznux1W0DtR&4!&_M=vDqkY1L%5&WtbTgZ=A0MHFcSmht6Uf&3so+C10VH_ z?Wqu3_qn(*UUb3_A$y;wL-|K(UPfv>atfLcnC~&KRncx=Dw?pueJ83?riqD`VrgnrwI4k zhIjA0|4oLu-66?17EJ~jN7@oe{#R@u{8gpkB~01nWB{l^I%PCIZBeT{{JwnvN6X9f z6)x7$X!{>nvU(;*?Kj+-SJ#r*)6Itu!{3Qy%$!3g)+0SrB-IRsjRF?JpV|L>-URv~ z_MQ&>ETcglfo~4;3!3t8R*lTnn&z=HiWu20s#%})?G8cP%JWn>_f0fIhBGpSByakY zjm+)SHL~#pZC1>KlOA~AH^*g6wr2mBT}Up|LmMKfcUdsS$j6muig&fC#JE=Cx3aE( z|1_=aU4)h^bPM)Q((YE?MeDU@Y#wnuKgERJEe^rVgo-``m38E83}N@SJE2KcIZ10j zJ9yNO=&h<;u*}#ORt@{n%+lfzN(`<(YgF#zj)e3PtB;**05Q{fLqh+6b6aT;luS+w z|Hm*Hm@A_d1X4+J?2NX~ux`<6}iOfPu5x z_y%rB)_o+PIC;lNS2Gd+;BBBTy9@5I3s!z!SiWyISEac zv)IAk9xo06$&KBC>^kflshqVoH`Wpcv*VJE zYkDBNqhHQi__-v?t1L845G&k_R#x7!){tTmf|1DU-?{J~F8%Pgeb>MG;_V%ch#Q|1 z+`oqr>DG zIAhZ$id_F-$n&+FfXanHjh<=I5r>L;>j(W?psS90$_Q~`UWT=Il&}FCy`HZoENhTK(;e&FTKg;n2KB)rxUo7)gy354xz%nbW_ppwBd_1x!;UnnSDAUck9OXGboehx@ z!S((tVnwH$A1+zNpacG^XeD`Zpuon5+O{-9}EyWiH1NH_jP54FSU$7x==QWDVV<$ce-Dt z_GPIG-~qkmYPp5Oz~+q*CWPy=?V7}GEh z#HGc}2GwGMR-JC#qT*_v*o1Qn3*^LvT3K+|UT!>&sHsJt4=*+M&Ob2kc9|xjSknrO>9YdiXzU{;&!>q^QyWbiXCh{og)8sR ze|}z-!WRWK4xFJEt6tcJn3s+3HS7=X>5*8-id`l+44ZK|F8XK+t4AY&pWZ^6p~~sP zoh$IpQ<6C2v0H^r+C~wVXJHOrRNvg|P~nLF3dplW^q~_Dt`8+{d^onl0+!I#Xz(bLiCG~d@(E+mE@sj$0Y z^ptaOP?t?x-g{3_Q~BnXZI31| z$lLuMJ}j43#@3E6Uq8iutQvg~^(6kx?rTiui4hMRyO3(U(~j-|(-K3hK8&2x_o{ywsHgKru;I7dY4@Gq?H6|G!`Rf{DUic5m zFs~n)tXm77m88~g2<$|STz{(dP>9vsXwDAXLbkjawNnRiZE<>9&XwZ674qu7#nS;f zr9AM{E9KV8^ZF`v#;0@k7sf{c!U{8JyIl9ILb*1*MOL5&5>4xn6EJra8KzGU&?3}K zmG1SBN7FV&lrP(Y(@!d`+IWy8Hi1ZbN!Z!^oYaysj)$bdk3V}X@!63u>6%#u`=RS( z{a_?Y#t$Agb8vn2b_-ra4GdqO4 z^JSY|OHL2OeXW{e{FBBDy)Ba`hH2@VG@(;8Cv%_BH`YT^A>8Z#OseJPZ_Oh%K}}>l z@*$`fr_m#pF(J-Fxh$T~X3X%zJ+mr}jJL!AwQ^WccW8zbNA94ERTv@>lOMYk>|3g$ zuMw(7WWIq!)92DLOP3nT?6U&}9hv<)USGC7&?EGY4wi$_wh=*>AwjOL8`m%0D6bKX z$;X9zx=SLG1DtbICQ6i(mR41btD1IU38~E2EatH+iISx9(G@MV8RlKm8jLHpq%u=N zqTp(yUeDH&`08xrpk9Db;gN&QFN}388b&C-V}c?|rLOhQ(mP1=!}Srom^VOwqpmQL zezx0QBQxb9Qu*DZH7|Lg2l*8pG!E9tw4INTE#Nn=Wfpd@?q{Uu(nJ&t@{Z-Y9De2G zLCpBd&++IfOmIqgSahp%ssYH~Zghl|}C>{w2ToWWTTkbZ7fni1S6u-wry)1~6% z@zcsL)SRob0v>N9sETg?RyVOe4pgf|ywy>#Gdf`LdcrwvK<{M0Wlvg33T)Rj4TP8fJHEV#%C980lrSmQlT>bq8ZSD7(pQ4C#OG7h0(&O) z%vAonu+^@5HZ-qFo^!u@?{oh+#q(6}y{mSuRjbx-t*TYExu3dU0z8tJmXij+!2tkp4}XCB zS%3rp0sgo2TaNIM5RnmoOQ^`mNXTfY=;&yuXlUq|IGE@d*cfPNSh!f&IFBCTK0?RD z!^eAs|8V{2w?^Q8dlLZ>_2I=w7-$#|PydVQz8Qdv3il213j!Ps03H_(0T=GR4M6e0 zBs>D#LjwGf5Ks`2kWt~`&>o6kKLR`ueqb9J6$23&83hsJx9W&UxX5@Y_)ky?o)glD ztEde>=6r49;1rib%cZLB7ZaOZR=!L`$NiG$jj?0EH{z!f&L6VqLH>cCT3Y!eU2^jn z_@$&xUB6ZwuRo9=dm#NA2Y--%_=Q7&e_#RSp->d}H~J5^2qa5|t6()z_@9lfMeedAcxGQ9lwn2U&8!aw$Y27rn1dwpDh7~rljo94gM ze@oz(1Xy{C$0rnVP2-fUP&Co`)4S#@_QwfF);ke!L1ibn8F*xV+u!E0WAI^Ars~>4 zKzk>ZdG8R@IxEuTj`8v!t&W&_ZgHWDeUlQGXK!V&Erq%OkCafbI zRV0jZ2r7Z0OmffGij2H?+sK&n-ghaa>Hw2p!6wKho9|?P6cDawe!K8w7t`A38=yPx z489NnLS0PDr>(zkZ)aBOvR4w=hyyv!8TvtfR6?6`P;Ov57YFUvc2j=I>_)Sgw<#pR7 zt}{Il7^zSo_c`xnm0*o=e2I_HT=PYhbU*@LCr>lE2|pElxn@MElU|K4s51?e=R$ZA zv@QDetCp1arB3qaEz4cwYF?d4wNhuj4m5Jw=<4Uy)}#^utFKnGZcCS>6+tPZ$zd8+ z3I+vb3mY{$wr(D_Ad-S4%;+=w2z=7#vCk60$GLLytcIVn_C_;?#$jklP-qs>(J)sN(9-x8(o_I)-M znx5Z?uk)dh6*yz(w(JKMa1WY&v!)DyV25(C0TWJ6JBTMncZ6HE-+8tDtYXu_;C$>7OnS3E@qA8xq0h^kqn{8F2|- zO@nN`6g0Iz6Wgi2U{h@+YhAE0wc%tG?{{2h{ocuO62TviWC#AVq!Ck#mrVT zrs+iqUOvbwF3zr>2iJBtDGyedX8?GDz^(cJQSC4SIDxT{@O6Mp|PiiX0=A4J1pDT>)ViTRdh)~GsVD~oN4S4h2 z18%GW?g4w1>K`~Vv;)&smk6`Va%yKyYX;3jmtWo7XuT;&k1wz2Cr~A-5+Lt8u-1DM z%^l-!>`q0n+DR@ysdFGg2<1P`KJbiWHZ8vg(7Dd04Qh8IvcKi&&v$;VL-T%A4$jMv zc#~g#IT1gVEk#WZl;vJFgAu|@3ay`v;L0ve6ht+Fjmy2FcP2uiINMOti}~_JpvXx< zdouw!kw2x8xMTAk;E3`aamOMNWe9 zXZ2-xgu-^71Xjj=Hs}_xGkzTrV74TFq<6Yo=|g4C3hXdvk9DpMeY*{ z^MTtK*8+T*Tp!!oE$f_2L{B7C7Q>5@x%Yngp?2PoSQg{45gEGQZUbufiZ~6`iIc07 zHo(drVx81fRh^{{4JBzeIr671!0|41*gUZ9Ffww{N(P7Mqqa(jm zfr1ImB({Z@ywmI(Fk>C7a3MwI6gLjB&k8MrN&&6e;0hofk0GHgIpc_dc5&5Qk+Vx; zu~T<}x6&a2whsXOxguh?SGzyDY@U2daE(P*fm&7wDdrsqszHL5n^!v&II~mg)*H{$ zxF~@SE`$`um$hTskgz$Wy$77fZ=_-Hei?#=$zik4yZZs%YUjx$c?j9EYzCDK%|44I zqUEuP^p~6Fr5Hsa0;qQPg|tGQa-E$@z20F~>rV~jY~0ay->zQmovl07HSV2thW6&* zV5Mt_02w)8LR6O6m{u5hSlIzupYvg|PcmbxW;JDi+E1>E{XJGpR1-ZB$mPyVwXO2^ zHMn={1b52{5T$9&GHO10+uj4Jzh|(IHf8k8M%H#buKrM+X2?h4ViTr556yFV95pi9 z7-{uUkl1R?Myt?^SZShmz-aEpZiRXCll)08-u+vulceZ9`5K6=>x>{op0qBIn~>+w z**UtKS?A_sW#GYE8wU0#5l+D*&4^b>vGsF#o`0LvFjx{wpeTQLjAkn@JLnbny^o(F36&CaM z=QQ#B*&d%yAqJ6+utAMLyO@ z{wOxP)E6YIyQxfu{?0Bsvzo5HV>cP9JGy~AIg$p<`RNMh7F-Hg`iz$Dc<+z)r($v+ zcab(9Elv0ZXZ59vt!PM>J`#(=7#Ca&p3(j2d{!x#cB#>+b16e)Up_IXqys0EqCgNB zP_VNEcgE)S@*-$i6VF!7wSmN?L9^RnI|XrIR8!E?T#)*dng?&Riaw^-{WM*8xq8)z z`H2UL)Q-@PsPcUO%I)v{Qb{v8hJ#?pfpZ(Lr=U*Cxv{CYEp?EvUs*}#%Oe>*0DeSo*$grK+N?CU2)$BIYZ0uc`_na-Q5JcqKvj z#(h3{vh|Y4bL)%@DSL1vBw4@2_A1xHiQ*$bS{wxtzP`Kd>cCsH!8(#mr&1}iZi-!x zp=rS^w|<_!rdLWGfXl0)D3nI4O>5?)z-Kep{DPR7D&SUP>=G(6aP7tC^|bZl3{5&1 zXmS12`Blvb?7cv>K$~s5Y|P&CD$#5TFzx5PkSh-?c1CHPhUR)l;XWx3#4=8BMwh z;t_gntt3rufMQ5EqtBdIdo)q9&JQnNM;=#?1K0$`I^0aA@f%yLM&WufTMm%UatB3J zPP5NQV`wHa9gdFm^~f3F!c?SZq$6!R@g|b0J{q&}tH`J5jp#4dGkVQo@_4qCSkF*) zqVTj*2SmU8Vc=C}3F$X&k5KvcJ-=$4cb5pe=rI$sB5U@s2u%JX5Hs&QP85abq=q=T zc@nRsa@z8#qo?O~2OFLJqJ+VlS_2*)iW10o39sz0R&b^;17mQJ3(r$y2J4&!(y7Rc z5L#^K_iI-S5{uObbPSra*Wb>5^eXGJ@?q$gz~p9C+|$m^`~|g5tkb*ZX#*BI5;}Ox z7Myy;`2n#n_{T|m6+!J1%Ck9E*)@9e=Kk`}7S#r>CS^N2E+xrnXRdOMQ-2g&C+n=> zNNv_>l=;kTf2+fa(y|UvU@L3Xw&Qk#RZBR$?|ci1t5s`cct`f>m925_!aLgPSU7Sp z=9}UXVW#B>n=lnCVv{BZ$NbPGx(4;W?(JE9XUp)ilgB|##(GBpJbt@(n=abk{P1#j zcJ}}b%Bt_a{P%$Py-x8tckOLbVR5*{mH5_dn_vE(`pT^na!5k7UGb(O(Rfv$qOI#4LrOCMwA zB3k>HtE_E(-vO(GK8QNjee>dhVl9TXZRf=Qhh98$T?DY3S-OSs5T- zr+30c4mz)cjp%N=HQwayvp86uX>hN>NlI!JO8!5Y^v>&ESbPValWnuu$3U(0H9buR zy@w}cw{$V8pRZMLcXU)#W^(&2k8LXC)=;!!;kxHHSq< zyy{-_vYdGV2o8`Iba&RDVfpHYSJvjL2jb^ywK(g~k(J87cCAC|?YZdSEYB(*(QwLm zo*)o#otfB8FTMBZWP~>jioV}WiMPm3mT)v*xw8*~Sbw?5wWH&tr18g?4|UX1COw`4*4SB|h)@Uje!Wra=a z$Kp|flkM5ua~>XNLY!DL1(s8iJO_3MdwLn7aNLSjTaM=w*=_uh=&^9?_7jw7N0Umt zyKCREB)=LI=vx;gnAc2vfXSV3spKX|X+#*#bT}i9EpOHpvX%&{ zNQ@y#<*MS{k3NUBUdn&#sCe9^M^S-?1}G^eNUS+gdo#QRopl4aD_GWp77Dm?a(pOW z*Qkz=1%e6~A=N}q+1buP$$TI2mNq(2MWoT^Wm*;S2{Y8DNKD$1E7BqP*F+vE-{&eh zsz}o6-fdkFH-TAy!*!r4Vj^?G}7RLNTs*#FDwaZhW140RQ=p z#!Ox++hpwQ-^G8fzi7?5&{DrAH@ZZ&)x&G>^mL zObuAHAi%X3z{vt+#`(;bRgvj1wbjDx)~ zG=9q)2Q*jGXJUXrvk#r_@U$UVl93yB8<;%`U&sMR#1uqtf~|y;=<>11UM^Ld!`G-6 zAI6i0scx%)^mPgBCyAj}j4P#)V!=HH*)e5fl{uMsm8%rOf~@NIfFQnkn|2XC-?6jg zcUX*BX0eT(K>5vHZIITMq>cA%jx}{8OMPXy(4lbbSb zj5JDZ`-cz~>!?Oow;k6hov){p+iJg8m=+gpHzpQhO)Q##zB~&Ytx@iK5B2@9_j=Dp z-2^d;b6)jz>f|3j-mxr+cN$ji^+(v=Hi;j4`N9EE7BQRtS-)Xs*t%(!si?Zng~z9{ zia{Kt0D2V5m}B2!oqX^CcZ->oAhEVX?!^!WCGN@|#Jn z3grHZB^&&Yi5^GWO9K$XhD8#c14UT3=cK-l%rljE=5KnhtIk=?%Bf(EN2RB#gFbRH zE_Bpph7cd2Hq!x0=~S83;GfDU=Jv4(DIbM>ya6iWz~|&-p)kNo1AvJU(_!yaDU|N#L~l%JFyH|;K6@kK^MIv~qkeX5x&2s4@=H3`luMexgM@L?kOZm2nz7w~b=+2Rv$V5>um zi4Cm?M^(wPS%7QG7))w{{RXV1rlD?7_6Bu^|{99tNEn zGB7>HhsO#Aw1!(Pl1 zO!ah9lo~foyt$D`m*MT1prh6Yw}~CC?2~xyAhNhuxr*e{`Y~&Rj4aj?FzMA%$RME$Pc6xx z&*k0+t1h?%HqQ6d?VxVId2offZX%!hy{lB7g}DADD+x7IFRK!rFzy}#a2HHMWgH1h9g6iltK;u@ATgi z`1d4Gk%1;^dYLgSis#*XD3(1!Gq=rEKH1;vb`-Z^=N>2X5O{wK`> zKV)!zAKa=4PNR2yHCOHXj~1MhrnwyFZ|SZR7knkJ)d<)5HIIn zJ4o7qcj!B6=`8g2{=JV<(3D#dvF{~!Z!(n`xyp%ZZSQ^#(y!uNDUts@a{3QD;L}h%s1*D@R8w!5IK(a1CHa&Pk#O!l2FfAdrAMotmK1U{j0R` za|gOp5)OZA8N*NQ(veh(6QZ}(ZThJWzZBFGSMYz@Y}kRKc&~}lgCx1!R)vLEL=0($ z3U|Z^70+d^=Q4lS6w80;HRJyUw*J(upS``I5bt-ebM7GCe8)A0cJ3Xv`U_a{%SAq z;9tfH_*ZlBpCR>M!SyeFHFU7TwzNO^JoD|a!?Wq-P~o<>=?&Ny>Hj^g)EDoW{xK@_ znc`{?^DZz8H16JhuBb9%a+FkS^ULu5U&ZuWoALQO6vdwP<@K8Rj2D+Biq1N2xA=Dz z_W%I2P_g)JU>v!ee^qdB<#^YF(+EX{7MK?%M3Q#V6}(4aQH&<~DoU0t7&QV`6(`T8 zq4^*w)veI3h>xXskaT|DW;x1i>r_6jabx8sg(51Q)F#dFxN|J1CN+97`~@eeezUSH zc}XP!w~O;7IzRI^-G(@b%q--J)`^@G8%Jd*iZE~V=DVyFbbVLJa(7OB{Kn z>@AK*OtBqDMAVa^o=N1m%04H3e~xH*S$%k#XR3etw$Q@Wz%aQThMB!fR_4dhF_*FN zPZP`{ps#Xdsk9rKudsE=Uf)tJ9~lgoO`TjL=v%WnsoT$f)Y-A}OtZ^$-pERhDKQmJ z)Kw$24J@IKZe`ZfK|`SSOmLnf-DV~V|G+L}ttcR8`PBi>Mn-?#$27A@L(qrS86vhY z&1Ff_l@718A?mrzY@p-xgp6Dd`SIkd;hF5ZZB}z2CawOwdyIGGyj8Mq!=M~_;^W2y z&~Z4fI03dXPV!J!s$jI@fxB0xzKS{2_2cM zMcU(rTRrUgz?IC1Z!s;(j)gu2KawuNOo=m5(;1#%_m&Q51~v~8R^+s49%NIsszjT? zD@bRvHmew`1Lw1nlDCXf+S^^i;l$pjjpyWSA=T(wkQA~=1;lXd7BPCY7Cl||M_N`y z<7cyOiaO7u{b{@?(P#ca#Sf3jOzjq8XeDOmv)z+-k#k|bkSX~gUhEeazp5ADK6wiK zRh;^>_70+~9O&flJp19{Y1q&?f6L0Nq)TrQ@O^TvCj6^ysk^5c;&(_dviU$ljMr+7 zWAFODkNy(Oi_q7Sh5vjOg7zQDQs8F`W2n)w;C~JS9o@gv7&EWMcI_jxT))bxTdJtk zKl$Rj^7Osbzioe!iS)0p{qN=rBA61MJfg%cB-t$iTq|?-L1B4uVDW+9kN06Q_x>KB zfwQf7{vba)-rTsdd%%V7PDZ@-HAl#+6?!Mx8N&+OPAD@|)bTywXzlK0q zVc>dqX-aPj&2!7;9H%e0AC2&T$@SlaRQI2h(*J*vkBxS|dIRp4BMx6ZdME1DsQv+5 zaI4NYPPOUi&kth!rw)P8f9aL)xN<$+%E52PI}(9b=^#l`QAj1Q@&!ambjE8G=<_ZB*{Fcu6EL`=fR}dAYSi zz%)Ojmh}>>cwhNq0ZAyTV{g1|Gr87%k)vR+6+4jLK%Ov25PP_D6WUC*_`6%d&w z(^K>d{O1vv9HSjmMja`1JeJ^h?jJp zrS6xb3fR69qv9{{_zBGs@CxXM=Iwy;GX&HB^T_%`WMgB)qX_g&a<^nndw0OR*Nnd= zFNkk*DgH1wf5IZ4;7{B9=d=H;J^3#p1*F#b%0s)5_Q>X8`0k)+K=oCkaUMzAe9%9! z`hSWkySIuF6?V8?1_B>T<~-pyCZ)i>En>UCBXr0j(?ABZMR1t9!GpW6?`}C5AeS0S zHF2$MG}zjx8if@OBn!-KTVEG>NpI{E3)p*o9d(lCT4~@7hQT_%I+bYJw2zsPl6{Hz za1ZEKSIM5iYKNkq-+e5-2h=It2D;C5=bAMhMYi=LWU*IBD-mG_uhW)ngd*<4t1(vemdoIcC08{x_#t4De=xDD zPRFeledu&;-`3p0=M>`5s)H;?w_3>jGTtD1T48%bGtE7sbwgRd(k8pVb*Eb2ET>AM zb*9%PTV_;;a_(S#kj%qbDjsR1OKA9viIoAU1Mj zLoi!_6{%t^fosq@&pX0An9V7`QO=-LD!RsA*}}gbYCM$OcMOkqs5~g;Of)NH3TsH_ zErAp!bQNy9jaF?ikpLCoFbhJp4Em>?OEA6CN8UEz7e3qJKP=M43Qz-q=Ju-~g7T0w zc8C1jSS!s*L5Br8zcPS8QEOO$fdMZOpS4D(>gbbMRyR@;<3u_7??m*Kz-EE%p1ff97s5G2QamGlR6*vbvkKB(Q z4c<6XZS@}T2UM$rx+s0l z-m_<=bVBeREb_Y3WsJck%Kq#AA86P&#C*utPnmJ6sqO2QSaSh^XW^Noq-xgR`-=<7z9@eLB#PnNV*FNBYb${|DDi8B2nI8?ph&&lu4i$5KiA5i5AVh0f$SVtL_ zRi>edNBw)vP)}fo*iP=<%S_P7yXW@+^@lY*e3ZKA!<1j`9|vO@ciue}8~5#7hfn|0 zUXn9D`)9@U#12D<+mZ4g<_sl}HXfdaQTgw5gF`(_i5m+cNFtiyhHB%K5H-}`yqABa z$QGt_5AgS$$XFs0gs}?Uq1^*?eBY~UT&sV&Gp&%6Dkyyz6$7hf9E5%~@+Hd1yW{T45vH!@ip+N@f;JCoH$6G*wUM$ zQTn6133p8VW>U@fb_~%EygfEr8u^Ak9SuCu&t*p3h@{NlB?C3LWGs^^zg*9s!8B%N%jCeIChuS14 z%=)3qnb&zMWi2 zNj`5VQ$yyD9cI=KKQknpV_5#tea@*dB@kJZ49j*}0F$j11;i+OisfGg&0z<$*NAA? zKss)G#V*RY=X}&P^RKpxb`RAqdVNAmR>kdLvu}AgWbzMOxCW-*fHx-vSDG!~;KCd) zrH|wy8TjjoDxsMpRlCtEi+7RM6f$0URiVd_9Vm1?c3~$(L0?e@2q*k`JxsvuY4J>l z-xD@>{Zwy;q3DIIrWVV>pnMYQlthZ2!cQZ;7-ZzCy-IXzy|h|g%`tmUK^-oq(oaTG zsfTswS7sAZTywxI2_Avmu}pn!7Ekb zL-m<#nn{g^^)F4%Im$P%+IVlt&0bfJyoqIrd-r;bLuxCB{%K3%1{^QjygTE~7F zO7wo&HJHf%^pdRjV&;HON)FMpXJ@C^6rw0&`eoV}YNL+D>LPh&6YD|Q0`Q*SF2Q2s zd|pCq%g0<`yQMz1>>|uiYt>uy=~9S+0J(M~R*{EQx74+i!GLx2`G*Qzaut-d1O)PE z>HSh199k@wFVF?7PIVE=f-y}p^_xZA0v^mswB>r;)tqQJARrY?TB_-91hH-I;@PflKD}OUs=IXvV&l;1&LRuFaS26WR}Uifb~UPcn{g@r7*knWZu$P=>a)OrLX#>RWo60eFok#}qsR}< zAWX!zn+#Jg8aCfx>@n71H#bZ1C$La+w|9n}1Je5$yEgMtN)C|)uxd9d4BMNl#5ZVu zwH)+-bB57*XVk*R!NC>F*n@d{cKspL@3pWHkJP@jS%GxdIrkf`sCd2L+8ON9%k=>E zKn75ccgZ73JTdoZaP~a_Z!q&70H67%DSvjZpCS2A!;FhdaO(RJd=GG0>{CPcXli}f z(}EptFd@Oo{nOg3*&ku^yMNyII*RB>ozr(BXjmNF5Lo~2cF-~3Iler)u1P_)`fv=i z`!>+yAVcfe=(vBz=R}>%?vp-A9tRw1R(D%c=i2Mpuc>F(L|M=Nu=dXmh{BJI&mjK0 z2QchBLOrTa*J(SFan)7B4@N(@A^ET7^5L5C&p0P&>>X3ePSk0w_?arf?}n8{$E&!! zJl~2*A)e)&;2sbeDv-R{d3P$__2XHY)657$IDRLrN79h9mjD-Lxo+a{#nx$I2|=bP zZYhlB1)k=}vqEM@=A{SaRO5Nn1T_g+-gg_4d6_4LW$%t{8LfhgEC0Yjdfai;O`%wH z0l`^DxbL{!ZQzTXabWhp=XNqefaiv%0x!UYSDzl-&OLzZ6FLg|LnD9pP~g1d#GSNT z0(4J9l6QrRE#k|v>n}nDlOa}29!$GTH2$c`hIHliJmD6{bm{L|y!1DQz4Lhzbn~8i z!?g$LSf(&be9U2`GnFrx$fsl^!i+o?>U7tB@NR8?^Xb2bfhYt7o4kR{!Ns*Q3Hwo3NVY-wijmS~KG4YPF!3TQENf!Dn9a$N>k*hon{UCmWoGFTurnvcX41 z&C!>9?JuAi$CI)Rnwp*TD6AT9?zT`!UO2{$r9aKVj`16aavixi=ya%o010>ToTsIUO2e&Et-&k_k?Lv#JTD7$*PSb|IQ+0DRGw#rbHZ=dtO4N5 zK+QYp)$=MoUYlX$eQa+ps<=&rz~V7)%<*cEgOgD` z8!RG>ploQE_eeNr;|yaKcAI&Ddv94PU%{Lw%gl^u)8Hls5ovl@L-nz*#Dw0nw{df8 z{_C3a*%6%{uYeG=q&L?+-A zMT8g!CT3S?k{#*ut*4pi=1l88?+3VvzgcdC-HknS>Ay*o8&-J9lwDdkJ zGM%aBm7+D-qp7>{{!QumI=Cm|HcvQw@C-aOW@uPl93S+KUGzAjBPIONxL(PeT9tB| zfKnIz&;@C7f*iX?0Vi*{R$(5_llmNdt_|DI(F;awXL!&-^va72k;#w3W;Hs+634u= z zHM@~hY`NaKQ5GkT5pICU&mGae%7BP{Z0~6tbh#rm&kLT^ldanci+Fsui5e=1;lq{s zty838=R8}}Z2Ck6M3<-j zX!OPsVL8UVGo*Wd6)W~`969utS_Q$?3FQw$X4xq{GbI{jIM!kog#I9rI_tjGV2Ca! zC-e^vdm2i^&S%lcvRelxHm;bM&v@DhY4kK#`8-41)v=#xWb$~)Wl+!MESFuV1^Ba8x5jf=b%THGhLT89?b$RH3Coa z+A!Te=tFxGrgg_^-RYj^wZYLfe?}O)pqP{ng)Y%&Pd>cr@~(=IJ(47wz)1}hoO=7w ziEIedFUY0yI?7PJAPguqZn>L4aE|`UN$2|gGWH|cIlEcg6?B^XI`Q#i`m~9lt*>sT zWFLaEE?VgCY;`MyzZMV=h#+o`5r(y1^I*&_j`cfP5Vy_u5gJepLNj5V>`R4vmKIKDQu<36hcc(pBc zwO;R@Idpam2;K%)o$K4SN)pWbfZ$YGil>qs&ZdZO(i+BF{s_bV#bK-Lz!v_pGGPzd zVcdS>BBgfRiZ9BwW^XN%Q^hhhq&I zIW*tMPciDx@aXEKSXKMOQ6~Cm-Jkt> zW$KAWgx{Kcbd2@&K#FLjz5UBEVl(o)l+Lk7!NFmTKNnH;mpG{W{tG-pYRRs=lj~qT zuO-OqKIE4_Qkn6!b8xxE43{tWCIO&A+xSP7Qt_{_7$47v4m2ND^mPt*3pB+}uxx#91L z;G8Nn|0d`5K&ukJQvt~Osx5fMIDG|K3DCU-Ribp*2?&M-Z|7T17jFodJ_ot(=w(Hl;F%j-;<|D!{+2{*1 zr*Q9$pGo#z?OCTyTAq<(ny>Tkgi#TYz0RH|>x235nMMu- zbKzH9q}A3<&cYGA!@?uL$(+C;8477%tF+wbJAvE}X}80@N(M z!p@Nfbha6pJscFC&r@3(@iUBYrTX5*f5|NdB9TdC>ZFjvwDwbp9LJ%Nx1coD8$d(!u02`OsgL8 z9GVsD^N{M!o-6K)syXTnx!yzR>bugb+qpH8sw?N25>sUdikUbW2i~|4=VXIKkP574 zR*{wK&|X>5HdT=>PJy`4>5Y3*RA7lvK&P8695#PdRNt4vZ{0(rAEO>ac3D%xX3Sc3 z33ueOA~(T#NgIB9Atzrn%K9+j_QI=E$N~iNim*;+)Qg}>2s+lqFvoLEO{6M+vdW9> zRXL05IbB_>e1^_P+_w|gb!oIFY5nHOlS(hTNO*NZ%M!Y}Kd1vOQi{}I8k4q>oxZgi z{RJ#8MK+KYT?ZC$Zr!k&n!rJR$cnHOSt>#>`;>%4@VCmCH=S>m*5iU+5~bI~AnY8v z=__QhlE2EC;DZUhyBV3!Ze$*s7>_7^i$^O zjZrW51%lsmLU`8C>hc*WihE7&px<+~-?qwrmL;mt!OzPoii&d@*@*}iTMa<&0n#cy zoW6u1WP#LsYUDF^rr*D%{+?$_eR>uz#+x$z?x+}xK6Ge|=$c20@>5ReHltIT>fFKH z-hn+OAT>3b)6r?AYAd>+*yiB~@AzkO9TQn(90y@4`=3PrV2h4<9oV0LVOY=jd(QOR zF5;1tBsG@8{A7Ww_^iEHJ`|pQQAFIKKwQ`tjr`5z z*R4;wW@OgNYkzM>;P?DtkJFs#-S6xv{ht56GyIS7sS)Mf(k*>-Rw)7nDqx97@#tFV zb?4ce6k|C#MB}K`;6_)G(~hX$;7g8LeqSpaSLf6=6)r?Gc|Fi&EQdfly1XL4u*icH z_wZ?8{N1*q_$%yTx)bD&LWuho$4+0+ut*qCvO$Z*gjF96i!V``pXrjVZ-VM&N66Bq zXh%4tMSKWuUY9$8BaM)zhqX8LsB2++MhjCJ?L2W|VegW&4I_UkIpzOk!hNW0IEC&=sf0W)~ zX=ZE1W9$^Dj<-vmk)@IV72E1)AKX14LkjjcHKt?hE1J#)uf0&@6}SGvm!9sR7gVs^ zgj1RJjVwQB6blOR^XKhi$_r0`m$k0qH!+Euykg6@J9_wt{h;nT)b9m`&PmU#GI+Qy zevh&!hmN6%=OwK=!$Sa+_lDlTG@hdh`5XLSv7?b32eO2H z4^IfC5Vt8P{b(PXz*Mlv%$q92fER1k`8?M?m)Qk9Ukwq_66jP#EGP*EGVb%e9$br;Z|z`x z%0QTr*AZ!KEwo$QsL|#<$|YxpI$LKe%~jVrR3b0dc@JofyG9oZJRSVHOD6PqAxtPn zmuY~WRGOjc_J{O*=ue>i%pdmuhyG(gKJ4kM-Tu`eXt`PUXSd5*A8HEfkQyy5*hm&f z)J47T76D2OKY6VRW#n$m0$|HcA$|SpKn6Nj{CaccQA+4Mxswp04t;8*fYU3wYqx zTLMJATq9}$=^vKPc2{zdR_o;yBnt~I55_5VwS>=Je0HiY$AqVG*bsV!SA6$7-t^3g zxZ@4#h6#1v^}PW@y<>V>nFl)eTovRH@_pQo;atMfCT~XKUTi0wQ<&1lzho%qeF%*z zEUdog^hN=TcoP8XV6!YcmX*^5#MxSVgJvDKcHf4M_{8;V-14pVZ$gH8RkSs%5S4or z-q?0j7dH1s3f^#_JnUruBrk@^A0yXRM|Y{kTCS|1AVy=MPaTa=@krteCx?$jb4j}# zhGFZg(QQj^k~|?MxJFeo*|@&no-pSY6gq%2rgHrF`v#t6%v{mnD@;St7uwKM^93K2}*l4op+ z#i|Mau9u(w2#ftZTk?0K`*5)2uMd7ZRq_{0rp%PR**QaIhbtW@=xY9LrbD^$YdXS* zNNN+AYN)tae5~MYAVxf7H-VJavhgk-?f}cStv4#axpQn!2kTXaho^hk_5M3) zX5MUh`z|iU)XvdC5#g1rpZNGotPU_O_jmSaKCArW!NYN)zmxst)c^Fa|3dXw10ax` z(yPz;olEm~PCHuie}w+X7*U-6ic@&TK)nB1KmJ!)QUpEaUpGq;3$VNH`S$Cy9$De1 z|3l;n8@|*==6FPk1bp^17kyt_`R6ni^5vvxJ8fbZ_c5g5Z!_UPZU_-NQiKWS)T(zd z2GO0(DzfWz3g3>gjwN~unoDTSB+d8cM&?B=Tzd?Ev;owayf7!$JUR8HD0Zo~rK?)a zV?1ljg3Ue|b-`DjoHx#fR+h(}?93mDNE#|0QGgqI=@OUT4eM(|5NiU{ zZV))R95!uU$U{(7m?0;x$p5*_U#7N>A*SELGB)c@M81O1h1Ws*>pE(17*gy*z-2`R z*ZFbwe;j#fWMZPu^_Pbf<^&OInZ(MGlTL7BCgOPlP*ml+jbtt2zI}fczDi45@>ryC z)vivJvvJ%IsI9=}st9B@iI}1z7sw6pM}3$_q=zj}5rr(!Q)wZeE z5sjF}(g3O#s6}6@Cz7Cg&^D>P8C9hyW!c@ zI@RO5u#fZVDX&lhKu-j?#;_vq0pM%@wX4Bz)){#HC0_ay=?S{-@sXx4TswKbD~Vh1 z)7n8x-bsChwL33oplk$o#gI0vqipl2jMkewO z3QbU9g|GZ|6qlVHZY+L{^pwz8zV;g`B9 zmRUS5L*+a!+Hi%vYx;UthxM>%Zm6Tb?)529KWiT2>lOos-1Ml7wGWdW+n6)gkcygP zoARsW#GaA;K`g{2V?cUR*Kwz!x=n9D*>Pn|@9WwysO?*02-!0m_IK>W9%gRN?>HTi z9qnb*42Xc^JjFt-$xe_tbag{bqVtySel}+~>U3HsWExZq*eVset+xueQpyrGI-;h# z_+qv%6GN2M>@*SutL7axH zVV}LRLMrlJ+5qN^Abu6gJ!=x_iKfG&>kUfo{H1q3fZll&l8DXQf+M~g+{}FJaIMC`rwaGix^3QMQ0L~MFjmaR?MT| zI@`Ijn@ z=qoPpM#uFOYjxxe^4cLXp?FI=m*G`J0rTEuiXtYafAqK%E3?7e>wVkFhhxb~xBkzM zfk5@f$_oe?64?$`Y~&(B>aCia*3^iB7`q*~v;8drYZK1&!jx83WR}BZA;TbU#h%zx zN!Q}+4ISt6PN#bSoQg0%Lch?=XsnNK!1h^&YVY2uA@KF`(}QbW@=|baF|;VY$b!v@ z793u~M+a{|y7Ho@+Rl}XEjNpI_PjhD_PItZkqhCy{gM4SCAi=`o~Wf)oi{B-QD=+M zVk;LMn-*P;+iT%Yz21mI7wfbdmqEx`KUz=g0Bk$2my*vs1-0>FZU$R0j8NlTovWE6 z@(1KYE6*+MRROeB35M+0O{E7;1gq5by-j*f_VQWa>=|U=%ktTjSs~xCSk%D3& zG^%NEFf>-wD};oV!K-V>lj8;!-Ndim#?(k{aa~Dh10$8&hCBs(>6}cZ>rB+}#^ZSVP=)m zlAA<*WpPU$Y-B`*4{iRglw~__E-N*|@=78%rt}{bfg>Hr>a`oPro^80^e7LN=L<5q z@6zF^@zV2ZMxhA9)!fw=eqt{h?#-XO_7hF{K@`bPdEHc*g8#wJJ4pKys~ud#Y0I!h zLg(oaxaYEYM|RW{@|caayU zepVZaX?7WO;8tMXR>-`OVX9*~5S6Q$=ibSYTtY2U6Z_mE?FXh0L*$O6q;g~WL!hnp zfO>Lq-;;CSFmKtpTGnV@X`-{wUV!9DV=P%LGLFiUCQ-A9Lg_h0;~{<3A*T#t8M*)% zj4ROgYbV1~tk3hNSWT+6W`tRDd}~Ls)=R4+A=!A%fvfg^F!$C$admwfAOu1~a0sqJ zLV(~j-UNcXyC#G{)3|$(;Lungp@X|M7TgK$(nxT(MuIgELh^3E-+cLIW@l!1t9GYq zPgVE1b^p2N$~osJ&-47&lR+H-{0S0>g|OJwfi$L>nLxY5`nq3AM$1Yslce*hrCRH~ zI_iAV;LzcZ*Lm@fF689#)|>b54&@6Z1J?)JiUrO)Xi%uhBLCn2j$HQr{zYvnW*B0c|YIK{+1fMJh*q zV8z;N`a(swUFkhvL&e9nUst#gE>cZXn%#9Mn;YQWB?SxLKufiK06qNzK^T!NKI-N5 zK{F%&*?e`3p{WEd{s(=p#|AiWFJWqo_{z-NrP2(pLEN2IiOI?Q!>mgKiBw`6*rv3_ zpPeceXY1O@HMVd+-RuNJjJvKY4d)35QNmyi4Z4W$fhoW?Q}dgTBvV0HVm|`?eR(rv z%`7xra!XtEyGZn2{@8J)>7|`fVeAZbZI{?q%-I4im5<9hR~D3pv1(5Q&#=Xd3FioB zN=`7mxIBo{`3QR(=~N%ziytYxq{0>%!V;V{fzJF}=dP8b~5LSUB6}URx38bQ8p_BBZ06a7aozkSs(VU{(id>h46xmYLMP{dDOUFmz zex{?l0_I|rXt$=*LNXw-tuYi8dQ9T>nH|c|C551q(UK7IsuXLY|yRBKR!KA3r`V4A)A5HItQG%vs#EHv|l3j;9Zu zwd3kDe@7`CF=>8K-toe|ES5Hd4QlA2LJ&NNRiB(0Tut)0`bC*NETW;a$iu~A&cY<> zku&Bvm{3xfOg5VZ?o$0KD0AB2wV__e_cul3*e^VW-Mn#d*)}j2 zssQ4-4LMY&Qbs}fzqAZ_0+ErkFYow=auf2F@vZ8aNVhW(6E`+4PA#Bj+O_*^ z1~-X-w1>QhG3tZ7ywTr&&);~S|Nj9l|6{V{-;Nb_PhXe`&xw+8vo*RdN06H*bPnrz zwjhuAf&1rjC713bEZx58d7ePPUXeT zMl<3k34M6E$%)=F>>=Xst<&S7Xbi%0&rt412}C3BuH{^v%EpvkK-=#pCjJ`f+P;j{4y*O{e)t zEzWfV=;kLH2Ub?Jl z(R0=O@svbNnmd)uA_Hp{>vH`mfDkjh7f^Pz(Q3(J0^}pVRgyUp=U;(5DQF&59CWl* z+B~7VpaD+mi}s$DO*<+3VX?-zdbWq)uvi?>tugb- zRw>0vM4l9h1^9Fs?>QRGj=xu6{lrMq`IbME@qNhl_O#cK409MJAn)PJJHtE|-P0L( z2NldYz3~w`I}m#dE0uZ_YKc4z|Ebdb2qtaWy5T8UnDzu zY2uv@k-F`0T3AU${ZC1b+M6``d&?h!Q~TtpB`k8X2}2~Jtk#Wz!JFCD3kH<1UxEYk zf(a=K?A*nU0r=5U10P9FLvkpC7JO>d#Hf)+t4i0Wy`XIr@Z2 zrZ0qjeR*RZ^TJCaD=QoP@8(!e;nuW=(#E9SiDyui>-XmG+c~BX4)aGQJlHGC{AH^X zu8EnrEE*XD8;U_DNAXo1q+ZTfKh4gjy)>%(q6k9%&J;b=W~h9cRRfOo9y-TgH@>N1 z5^l*SnNg|)NHVEdEC4udy3StDxpV;MNahgwGb0PB`wA1up)KdP%~)65f`2IT9MgWI zE{`5DK51F7GI~01{i@ikCZtCwIp@Pk^pC{-A$vf6g}QpwGMCA)@G+bBi{Z3UH_u-Aqe%VMpgNfWoZZk|I}nk;)SdFMQ8`-$iWqQc zbnRKme>hWob{$FYL;kzYZlM;p-XEPSou)-2`51>y94b7WUp*OjjrhjOm|67#O0VAO zy0_}+6&abWQYARDprySao(v4_z`*t!^m(Zjt|fOcIn@e7^>tE@yO$M*LAT8a4Y=tC zn&tO5$06O~V#72@|L`kLwL}sVi*PMl=&e*cw_n0oZJ|8!*4sVZ+ zvbj0?lRb?BFg?{e;-2M--(bQ=b;is#ac39hLZRuZtm*@o+essrtn~CEcg|cYQD?1G?kGx(UvWIFP)JglG5*%`FnL;GAn2X@=IO`CU56&U zISnIy&Uft%6EO+T-Pl4DJ(@7y`2LQ_`I=>CRW0l!o!g?Vpr-xfO~m+nQo8F*XM46J z5cM$f5l^uNz_+oev_0^uq$sA_S*iy?{=@(BpqYr^Jjw(Dw=#IsIXxrsNl!)E`h5)| zy(Ra7ia5Yq0yQCR;PY&|q$J`(g@CknCTm6eD#Y7+aJ|{e!fnx@2iV>P?Cd*5Bq!A~6;fgP2Ygu&TDO z?QjX#p^T8;bHXiKY>d6yjlHLjOzL>&&F$6M5bkcE%K$7~%4^fZOwI;rqyGR)(~R(% zZtxS$`kE-TO%J)MRP4*`JezKPcq*-#C#WGr5b^oj_=Ok~`9j~-@KM?etcK#E1zOF3 z;vyRcVFQ;)&PA%{>fQ618=hip@z5-gb=AeP7CU=udWs!zMh>6V&%RfM5F`1*(}WOW zie-v{VU2l!+ZpOou=Yi}AkaAr;l$soDpj<;_SEk_X8F^icBa^NF?=plC0xrK%t_~5 zo#HL{R9-#_r*_gME4z6)JjVn?X~!sL00&MKG+ET|@XsO?6gjQPk#saMTT0DcM9kJv z!k)vIfl?HyS?oFNVzm; z*X|Wh6NQ0AkC7y=BB_XI%3nBKdeml_(`d#^!IEDNg;H~TBJ)cT2(9wg(o4ZZHsZ!o zeFN({(K;2cVPIWZy3VZFYz2+R*5CQJO%%a)?o8lW^HPYx<|x0-w!biN& z=^rpWfon)MfKs>?W_IK+ZU*+kw*h0vr!~O&XoVf$dm_Sii)R)@!48)s+zZGTM)ZcH(7Qc` z$GjU0PH?`HE3>BYaYfrN>eM*uXB<={FAjB*hHOr7EaLZ##Jj306Ms9bZr_jrJHt!s z<%%2>2n-z4X6D>!I0GU9e3M`-jQw5?*4grMC4Hi4-Y4(7HsuQ*cI|v8I!e6p-@mH1 z(ydMW2>C{jW$CKuADKdYz{;f((rLC4B)D&g@KRG#i_#9L_!OYHMUagvnGjFPRJSYU z6wvOVL1@>pe)z*+&ei3E<6^t>lRBH}mb|J%#KhsWXN6ofkI(0MXxmc#FD;f$J@q}A zU(4=6Jd*{kqaFtn25yUJg^#5p`B{s?IT9Z(({#meWq!vW14A;0_rxMMJW-kVu~aji z=N$+axNx{E3byB0ara=#$GcB0I+d6HglpG8i%HC{+za(E7o4qM9HtUK`~8zrm+#vJ zf*&Q$3%;2@RzY0z@@S7zscGSu3%np7Da}C+c#Rw0RmvFmaTA)^3I^eU5jPnb?6zz! zdz4bmqMlG!dyYYSSC@(s&A3u|g(h3DmR4g4SkbL`YFhG>J%-A6ObVAl5PlhO?{h&O z!hPV7)1hSsG0gW|b+zyT-eHnf$-MvF@q2~pibGFe1y?$iP$rG5&C;y}oUmN;HppsoGOs|4>3>Aj`wUvSGBmv7!!Q(J#=I)|SnG?Uqn zu^hskpueZv`-WDyh~ooIww|UeoYWb=aRNSedwFZ)#r@`cK%~lOsJEzXHkgTOzPi+~q0JGNzXgU1F=~l=KnCE==;Rc^AOK z*tRbtTeukh&ctKtPF%<4Q4|gRTEtg@3wN|)NC)-GFv=0Uc=5%{oG>B`KsU4**_xGd<~;{zdKJJRE64PaxnbDWI!X1tQE+^8*=x zwD{o`rHgIUm}kTy6ugTMHTJ%3?i)j0-2wycq2fr8M&EjvK{}UmsU6JMWHezbpOo6~ z;Z7T1YvYO$B&~aK-P#Yyl2oV;2)y&+df`e3vf`(1#l>)C8yg!e9olxGMsW}~Jbaka zqawe_!(5TB`f26YV!#qD|7K#e%H7K&pk{5C!dI)vR__qH1clD@IRm(Z#+8gkQCxbBkf)!|7d!QM?cCSA zcP<}-oAKdZ!40)WbI31~r(p7BD&j)VM?>01Ts=y;?%~XEpstv<{_gfoV@OQZAV?sqe3o_*WRS0(ajYR1}Te6vC+j`bAk9t5U@6 z-mZ4KF^XN;4Kl`8MJ8dfUc5Li{H>d-;FO;111Vo|pm5H-^THr33^tT^7G-^BExzWb zATJcXVju;Uutp5;Byz%nk@m~=y3T`=8BIQYmzQlchnv%u2XP=5mF7AB7sT#JkZ2VB zyGr6i$%Mpq=tll5!kLT^0I zN-t$x0StmtTCqH4b`Y;X6F~lF2$TECBPDhXi ze3<7r274wR*!0mRUHQOWP|^QKk$J^#@*v}>xs&rgm8AWE$~nibl`2*7l;;tgtG}`7 zBTif2=n8X&AbehfSIak@xcq@sk^WS$j7_Vtvhr1}a+>o-pnJB5KV zPl__kqdqgeo;)%>bjsQjKRCj#>k$~1o%-tRu3^x$N`9B^_%(Lc&m3@Q4Vp1`s6#4i z;F_D~sSd9yDe*jACP_D}=~ye?L4lN_;FP=p#)57kE!kWSqs1}Pz@^AZB?bj`qJ)G! z;g;kmDdCQ85@j0HZBqR`>B7w^^WjMA&tK18n=}Rd2nl2IuG)%_HQWW+<&4= zkVR-B_kZSGAJl)p<@g2ujp|zBu`Naavfmw9~7E>frg0${T9Y_D*P7gv9-ym0Z+hl~7tM zF^_k(S|+{J_BIW5@M-mDC#MB=z67tljg7~$in6^yav%w>3g~9gjkZ-~!-a#BN{I{b z%19M0Oj&1!e6|%j?F7k75#LR|?>@V1W2S6*Prxv7Y)DM9Rn*SHvMu(p`jq}fQN5uq zSmpH0ocH>tt^QV7l7jq5`8Q58)|iKr-pKT%fu>TMi#=;{Lagrq{-CQGmo@$q-*K_CT?t(2E%>Y%U? zzY2YFvv@U_294AW13fEu>Zh9ZqHL#XN@kHiXHDip&pe0hKM@+OKSY^-P%U{h6yY;h zA3t5841R}kXBOpssLslNX>qQ&a-02$kpuL&9AZ@Dj9IxcrH;p}B1QO1%XZ*=Flym9 z^S5XASHI+0OeIoq9 z#%^mO)gst{{4eP8Vf@#U09NKJZX^r_gW~9->jGg55LQA(a2g6`F%Br%Pk+5dcs|_^ zYLS&wrKcIo_$ljlhBY7PPpqSBS~Wkqi*2L^@&tOOhS|V4oFO%?{`c=mUh=#${I){- zY8=_u7hjvb()U}pZkn=cx4eE1){1Yt^cRM#l966G0r@O2(U$`wMAJVzd|t=Iw1eAw z-=wFYg_-bog?;B6NDkenMFtuVWnyPR#1Rd)-62{Jr2iCAa=_%#v0|NH6d!W<))(R) z2pG{S^-=?#PM6x7c+{C&!;Acw)$n^Gm7=8!E*MK|MIRGdMUL5#$Hy16doUtnHu)X` ze&`!;NnlZ0nyBUiN9|BfbpYQ)r=P5b4np`fx- z4Xx+XKA@fi$fc8E`YCVh6E|L=wOrh`R*_#&P(qVpLWFS?0(z4=g*lsslRewWW8_Tb zBQ;QQC2W|tlwm!jqj z%7QinghXV*W&5n4(`5PDV7*@<8Id`DlMNzF%OIH7f1wXV;oqrNeBuvH$={_s?jB?k zYud(XPN-&nlQiRxh2+jt=sv4TEV6G*%q!HN$OwoL2go&ID<5N!P6napn*SSxyrP9x zi6$Mj!b2}YwvT9jE7E#9Yy5T`K&KXEUQeb-r1bJjHEp8S;a=a*lheqR#nnb31MQ)v z98FQu%r|zkaXPSwB30+g5jYPx%$)>xhnyex*sv>_*1yf*V7V&0ajp?o+(=~ZE|s)$ z3IMZK;bJo--5D`;PtZE>GU%*+F6Am)IeMS;DKG5hbq)UuUbYayliuE`>AUCGpcH{R z#p{kl%yAD*SwMGKmyXBL0i}^CoB@0}k7l-fucz7ZzaPAP@Rd@H+gb69=kd$-c-lP8 z2{-Bi(QTh7EsFYr`*-)^|DaI+(bxn;ES6||81BY=s_vhS&X}UrG)(@HAo|B2-=KIU zIADc#K0ZYg17GjWJgu3o1`rgkt-Ua+HLs{QwvZmbEyl%+u zobHEZ)_jYZLwXt8ffLCjGj#LO5$cq z5)S``QIt}1=-B8_+fs1=7xqK}8Q#L4Q4gbIR({=Yey5=vTbKFt9h;YUAEWB)>>Q_* z8G92+lj^T6S?seEykY&evmesDruIE`ntyyd|F*?{|E^q>H^Hop8L3#Y$S1ABI`*!2 zWzg2lts?xc%gO=9-4R=rgb+!qpRkqh&-@5z{)ic#Bo`HBna%iUy`+iX^i_j7TBf5$ zkS2bNi2PM9*@i#;Hus|+l~=_XWnpLl-n;zV?83Lt_C5$FXWH%1TxDkwmsbAFT3USD zH0ss5AgiK;FINaNimc7eBu51V_Fr`UTPamX%Ij`U6B_d?KLLH{nbvmpH6i2f>#r4JX83=ej&aqVqZYaePM}l3)AV+%J_ zi;Ps^62}yrV&WxX1mQo>VX6)XZlcVHG^(S1hE8#dT4-rTZB&l-Wb9Z;SxIG>M7hd# zHVusF-^>LhE^8iOK9h3j3ym*TyGpwoW`TB;_L4Pm*3iz9Kie}D5N7tO9v2>B~ zD||*=4rnR`f=51IrzXN{Iy3Gecoq=_TzWzwpPwk z_g~AaN53|TVS;!Nn(JP|rjbsvdO@8kW+pyPwX=^6W?~(DUj1`-npF}BvK7w2dLtT62pFXRWTUt~{7i4xCSIRh$J0hXOuKhF^oe8( z_d&0*!H<#goM#G_ZY7%MScb+<*C1jrV(!UDh|^55S;Lw?Pu55$3|?DXt2^G6G30?C z<2)7~g?iMh5N8pS#!G{%-&nb6M+$s0dwqbDRoLi83yoz^WU!J-S$^|| zgK`^KcCpCl<9LXgX&R3Hp7r`28?M{y?pmA zI1pa)&(ZPHN{ibs0^{msn$@}cWefHsK9j&gPTFg})1O{?#FK9ry(vRlPF#IhdHJ@^ zxxlqBc6;~P!LmjaAV#zi^fP*i0opvwAmLm1uzHI-!8;uOd$)KY?0j}8A!9{_@m^)? z!RmQx1Taw7tF9P^Vt9PL$tp5TvGm=!s+u+A;?b~y*O(I|lvG7b4INC-PX#gG^&qoHF7wSyR#zP6 z^#}X)9m!O}WG=#>ga;;<;T3l6UJ7vN{H--#6U`A8XTma zeMZ-ZU?2m&bCs)o5qnz`Y0^3YfF7DLvM*-$6{1A&^XdZ?Eo|}V8u0N#4&+-p$NyvBQhPhawB$?D@VPZwvm^yIJ?x&hq5 z+jVdAPhO#wq3S8OKV(bF59Y2tr2-X+brgERCju0++GAtOy=m|n)DLcINbq6i)5$5) zmK70OQ4b)#;gyQTQQG}#kI(xlWeiB0pOl?;g?nm8|AXcQiG~VU47|x9y zWD)S>*pmYL?7F?YF=!4C50SMg7@KaK_BYZTGYU{g^#@uuFey%Cc#2b=PUi!VF@ut6b6^wF$TCJOoB*CtLRd^0;OrC-NAw#q-YX4J2@uqS9l7a@0p2YWYFYN>CSl=OG&=sffOM)Ztz=lmJYq zP$(>3=MeL*UAowWhWzR3P)tuQKN=IQz3&MUWpk{!HMQx_a(jE+YIVOzFaEd_-2p}FiY6eM`Y?6TjAwQ}&WXJ^ zrpgKQm=y7q-d&D*YCbFogki!J^s&Gt>bTrNef~)3r?Rsk9#nKWQ5O?RmFcUOcLo0( z=jin%`hzH*lS|g@)wBYu?Wbc8HMM90Nx2wF^OK&EVxdIUKQrs{9f2=Hm2ejsC-04Y zE_Br?u==~stnlTIRqL;^nuIK&c9!4 z)hEO&ql|A>brg!>K&rM?PtRF>I8nNhB9WjB28N&t8wQ}Tl7YRCjZ2fonJfq*z<;a97cuokuF+H*MBZ;V$4fj#4_ASsz&G^W|Kt`m#B zYF-8ugr2CB`H{t^FvG}w`(7FQGqh11sQcgwRRLi&F40V-bsQzqB*{Edx@962`m%+NVR#2 zHSw^Z#ERc;s$|ZOiNx1Vb6Vj})^l&wxX`2d40$fdDPy={dC$ul`g+go;rKpZ?tW%aau9CBfc!$8fC7*}v&z@>v#+o%CO^?fajE%& zq{KY&jse#pf-2H}!*>$?TMw?hZzLKdN7ejAdK!y!GC%s2H4|o#w>#p63#vyO{;`=j z%eYEBzwt7W?r$XyzhL>QH~ss?Z~B}eLpEH?LUPXk*4`S?8pxv1Dj!HlxhCwJSkk4_ zw4i&-@j&oQpp6ad0FcKV4juicahE1wwK%b8Rkb^JffNs@ydi$3CIfai-}-fGCoE-6 zWq03#u{~Jc{J7iFL#AZ`(z*Q|reRGCl2RWPT{#VE2_Fk7`JTq~Vf`E9&OiIz2hTz} z=+nw8x1MG+t=k}gt;}g@3)lF2Goz0knS?wi?u{kRxcMCa-nL>=}5X#TCCrqw%Pe*onV@N7RAEs`# zLOEOqR{KVsco-}KXZ5C)51F$?qC%T7_F4+#!$S|TNw32BsS>?zV#>`d)6E1Ls1FKw zlddOuFCA^%*xQa}B3dF*kS)#lGOzso&ag`r5QlgzPRD21!RlTTt}8lV^SibDCOng> z-E~>l&;dUUnO>5iid42Kf!sxbs(6Esi7$vnvArhzAFdr((8=ar&52~9YqA}`Z}|>W z2Pogrr-n`u%P{6jX{xLjkAhn%cyzA)v*GH229)8u93)%VP#=u*$=PzF7%75O({sF~ zcx1V%^66V%PvW;v>6@2&M=>@R2>{b!LEK-qpN2NOiE1F0N~?5qXJ-r^efj}s;rLMn zCR!4d7Ln~aV`VUK>E|UpIx*-gLDI1$`EaFb+LU@}_Nvq-fJe2m&SB;v;oCv3&RjV;kjC)RPsq~;0A11>mbzU>UkNI(Z@UNtI;Eo&4^rVAZX1`=}N=efYN&V^|}q%VrP{QO1oH*cubp7oM{=z z>1b%^BK?YDj-hYkKds1O_U;0z5Af_Gt3$*2Yu@q^`N($hX=OhjAEEUzB@(tzmLY#WxPfykMFTfhl{PH} zcU!i~Qd@NwPd~7SELsMu`N`nbrk8y_hUM1p5oifScVb745tjvx>PT2%`pQFW)C=PJ zNpSNFxt9$`rL-h(CV0RGPMrOdBe51{R7=D-iiA6wVzaiEtL;yPO#AjysLS5GDt!n| z804dq0Fdu}wCnRkc04VWCO?xiI#^>?3TjtTer8W6WRt?#L^Jxyn17vw{}?qGHpG{G z8~#$HsTJ4}QKFkhm9;xq%>H@E_2fgVd_A!-NQvT>orA1_T_i72yD?&{#7TI%^>eW4 zf>;##>$eEe_lX4C2SZG%5eL=xFG?1d*H@<+fm}x5@w`6TB;pS)yBWWe z1p~guh2K3~5k{aWuPsen(_0JLRajXc2%vew^<*9!{&yZ1yn2eRlm#m1q^)B}#YrtK zVT3I1`xyE+kk3&mZc|b_cWxVkO@TyHcZn~-4sp(fFaaI~lWZ~8RNsgqIG{RG1$*O- zum4D`j>DlP0|Lb$^0jELtPsigmcM0YQ2q3c<^~i09rR^|*zi|8u~fgNwZ+oSreW*{ z2`4bAhOxE%2P>reeiLY$YW9^$x`f@g%JiRX9Ax$Qmz!TgVGcb z!XW>~70;`y3q1sanh6`>xZCCTR2ESsi*Xk?1&@39P@}5fxzTWr78a|jYz=#@veyw$ z=+TNo%DyPV#pj`1tZ(>v_jPyPk)O=0*6)5uQyC zn4yP6>7b$$|E#ksqGr{?#3!;2@;RUG^(z7!oAtzmEzQe=!wB0yCPV1sZf-r58MvpU zh_Q(%t>Y&$EwRUdWV=}U` z&J_Ncq?)cAk+c<2Gppr5LIp`0({CB{B&wEpU^hM=bz*vAExpadrpGcy2c{8gCDx-A zOE?Ki>NOcGcvw^JqT6#?|7D+3#;<1oZ_eVeyRN+1-1Q+p_DsJsE7VUzGuM27RNm8F z%-~A;Cnlk;u8@uQLNjwxFQ#YxU)|u$bpljacL;iYK>dBf^hr4HwDb_%`Nj*X;3KCr zPfxTN1l~w_w#z8cGR`b{MV=--8Z9izMVqGK@ENb!L*S|AhT^nq zPWpk@GOL9Riy-2%z(236;Fi1R#IE;5!^C%%LO8xCJ7G#t>?z;A*buH8H8mC(&5AxW zQVp3Zk(vDtG<)I^yP&xc7BJ-cZG&^E#`4Hcah5eDRZHnh- zbAnYI%98>U5cAcl?jdSFq|od`Du6J1Z=}B^1ZX=zTOM29WH9s^yjznlkOyDgp3ay6 zKtY(^(Ity+nxWQSI&hoY+ik(u(wSQMKWz~i&y$o4@OR_r2SmXhLYv74x<3DO4lgGk ziHY|Sa`Z)7RXa!2>wk6Qk#msyxp&=CfZ?}J-LU5SH>&R{r0i_1I-t^2q675>zD92r z7oUPH*GxwP1+h8)w}u4vzk)}cW#uJqsk{uVZnp(Ze0x!X}Q^uvB_KS?&iS+*mSB|B0k{`MQ-`qUsrPU&Fpb&)lxGEJPnujhvo_#?79sW&E7SM>dDJ z0F>6+nSSI_)T%u^R$Lj8M$(1tY_mj<2;ZEF7(7}d2LD|QqH3X~iewh?lC!t$Ektgc zh!?YG+$YqdHJ-8S<+0P1aAWHuh%JG6xr4F^_VZy$~Fi? zI<@Lt225UNi|C<=s&{|F;eYr40|J99K?l%mUbk4T4&Pux@9~}RMm8y zj8-W0E?F*<8b1+JJvMJsi@u^cqMYkFMI$b{-dK=4nH+1jDXpnU7b{>J%_?vem@^w{ zG39Ns*}}JqN35r^#e|764m@IE;jQ}*0D*tir_i}K+b+74MGkMWV+u%SW66l z^R$Qx!QmgYn$NxaWSMlDZ*b9& zFZA#47Mr>y%2c()Z8=)Z`CPc+v8Pn_*&*YpW81fdtoQVym>XV1NQ!T9amBu2y~fSW z4fUR#;kChivMWwfr!yiaOQioQoD}m<5{(Xq( zLH=)51JO5(J9+3id;c;{Z5Dj+B)1Z#Z0&*Ha!wgVVwDltT>$-LXBZT`?VlEQ`toL^%GK3>pJw)GgoEd4Vy(D zDEzAc&%=LV*!nIg<-%%o{MPHMvN~5#2HABme;xTdMa0YDQ<83UigmY7M5(Ed`eGB2 zJA7u;m{Hg~1#Z(IT(jZv0uhiJC|g=u3E9~Dt@yX`0;-FYJye((zFyu1!~J|pS4`&X zgCsn+X(Qdr)@khN0;mGME}|v>*YwvJ>xoH#30}noDggZ%#K4}4=2CI*XsWkgwnvCK zHG`nydYY+(W$Z5crL^LYYx!0c7esPpcw}7wL&WwG3y8yWGhv0^y0c#m&=R+Dgh$*F z20dPSyl*I7@SC46xmcni=oP{f_~yyGdfR#U3t;~Rsr1ntoF8C|WBJNnP^|WIYj{Hu{J;%SqOkC%eh}Lo zFJV^{k5dC|X`B8OZWx)Q+>M#|;AvpN>_tHSocj*es_x23Nfp*5BHofOk5p2JJ!9t&=8;JoPY@$h;bT5K2GayYJ0t z;NWiG1G*^BiNH<7T-_AR%-FP9^+dp{g5NSr#eh&U{%3-KuWPgm_yra76$c|$k1=js zn3RA$(4lp}1*!iQ+jbYr8CzM>CnlAwbnSubsKo(X&8@}3^of!ZkTkUNyL#$p&SWT2 zFTIk?f|r=Ij}}=qo4gq#;}lOj@b#+n^u%-Q3^(jkyD7|{EU~p|WHVX}Z298rbf4*l z4i=K9euovwTa<`0%j)wzxV7ygWU4%JhJ{7zOw*b$`w0nZo?YyvpAfR$38f|)6;@Y0 zzmA-x#kUwP<`I#=JG5WKFK5za8vD?ZxBm(fV!yN@D0`IVu?&2g)uFGYV%N1?v0r>B z>^Y}1a@)E+LCDrrtyy7G>S-C;x%TYTG-cSatoaK;NstWPdi{jD+_F=B=Mw7dxj6GDaQ%rpiZ>k4>n{-hc(8I%PP5-e+MS%DNx8Sa%8X? zV`Zhz?u7Z@&A(>h^U?@Q#R6a`9kl=G)g(LT);w1QDFoU((d+6<~NZ#Y-BSxr9HKZ7ajo!6T6Nk9PqS*gl*do$wDQWNp&o-a@H|C^&3E zt_+R9n7if*_|gO=vR&RnPFya)9GPC7gc;A{-Wk!{Gc_S*G%|aw$U?)&Y~}r zlu<74V9IoLJ=-~=XyLTyAIY|5np)-#j(7tiR5f@om^g9%57yo)uB~ou`wdW_xU{$y zC=@6TMFXXHu~OV=fg&M5aMuFGt+=~Giw1|{F2UU)!QBeoScHIb^*#7an}4^=!ubfUt_ssK6Pj4UFR<&n_- zOK0uF|Df037X7E_&lTGVh`K0A64ac2R5mm!Ymz1TWAPF=@UJjrQLmA|J}vQVS>|Q1 z|E#PjShvXPyNL;J%8{z1ThaMu^n%21WBE;5ylTL9ys_bTE4_RYrr zx=Dt2lU6D){c=@F{(6GlWuQ<2k4-xmF%3N5g&%D0C;PU5&$>}MJ3^hDH3Z@ic(R2(I zRiiSKZ%ubD@!-M*wVFa+Fg==L0|W4j@ZGz7q4k`btP*F#rgjC&lwxGcnfyh$4MV2M zE}2lk`UZ~IdOVEQuW98qT-mtf#&!`e4g%~KNuQQwA_^<4bw9+10Nm$XKZ>*chA@ zH#v>n`e+XAaZ;at9P0^LJ$5F(q@07HchY!Y(G?-9A-|=jugeU-#Dfs8){3^5y_$>JC;F0$Ry!IlO9WPywYzP8u-SF_C z^i@ISo=33Xx3j~af%`mgG;L zFQg5DRPXb0=3ubHf!8K}24-g}JD5J5stPxK4;oqh8rc8DtH|m)BxCB|`iWdFG2lj> zHpP4ivOrQV$Vr{Itc{=N`v}J}?Z{#a?{c%fgZGmJ&sk@sZ3xR4jkoz5pM=L-zs z8a)*@O+l$3ZDS6Xg-D<#*xXN9l$DLX<&Azgf;HEkm*Q1wZ|^S82-Qo=1>>1}-wbW- zZ7o%doF838o6)kQ6rtR8q+@mp%vK3s>4;uOc zB%`NxE&7&~dI7oh6Lu}i!xi~?ajz6?vKxDWu&^BKaRLlLc!Vs3oH<)$i) zv}sn|fd3I%KFyUmRw5ADr%Z*CS7h_N_NSsQE^*&03$c-=*l3^Y#;@`xluM|Ud&wz} zE}EB5ejO93E|oqY6{RJj3C1<;mmDeB$!t&5ui4ryd=VCEBg9zh7GF}#)#S5o8zp3!BJ_G&9xFtY6)|Vm0LC8Rq%H-%J~^$rf?QQR3W|KJ8MfXt=X) zlO4A@V&Rf-QmxP!O)!)exy3Ix9v=}E2pKsnU})Ebdjd6&QeBq5WEaJiE0A$)m{KWT z^PyQSr$^wrnQ*)(ea=zVww=XyyKFgeFIBZ68kR}n`4s$J(!b4U)3*rHG^+6>@es?a z{ZxNVdch%;-KoxTQg+Agm?!z#{dizOE^P{cEOqJA-*42s7ySeHxDFOc3UY2b${n*v z96i)9WKtJl`!K)dh?zO+vMT0>{}uuyC@wt?k`qr-`C&3-Z#pbWuv>bW%AshjVJp*9 zQm**a>e-g)ZG=yIQlnw&wopkK`-QlFvvs%`;gF{6+*jrA#3N$~v0MHDkanS466XbW zZ3vaZ3Wr8L7UJ+~AbZDgJ^ZweY6%DDhZoh4!;7Q+%;Xvl*nl8jiS3enK~YhZzKw$+ zI*4-aM31jEAAr?A1Q}2N1YQb2aN)Wz1!De;^5xVH8y8cy^4uVU+h$)=;(xMdv z&ci&qgyy2KSs3M&-p@KV>=0k>6<3Kn{dP+q*6Q&~Ez$*%c+eDxvLEmx1|{Y*(=qM- zl(MQB`|vw$&s;7Ny)OBMI*Ir!6eOizIOjo7i&?e_$ZVo(NJ$Nt_ecF5nuD3rJrm;3`8pO#Bt zyVWB*5*iMxja%F(3y71G@O6vnr`qKXto8EotKFq#1aeWbergmrZcxQjH+7j!HUo93 z7O!2fk9cgpv$k#%O39r{Otwf}SGu9&n8_3g^E%+Ei7{g0M6cBSd|+z`OiHTiV7*|f?uxAzNkzNVEojQ^wcO($F+1J9Gu#=si0)d zQc##^xlYRqEN2odDv$6<$oSYLhf~UqV-ng>;jreeV-z|Hn=x$6-IywjDX>xpO#%uIO5DE3=|n*Fl~y|HhpQM;B? zPE%1uNVF-hE#_qw_nAw(#~%q}IQ`6MKRE^EDE zWK<;m_JMDfHs|k7tFr+k^wgf0X^bq7t{!O>rfgE#(dtKj86DR9+1*e`lZ@W1v@z-) zA-6l=|7oTEmbTqj)XpVIEUVY_(@p!nN1JBN-z{ZVWLJ5UNYO7pi3(khi=7lSSzpcT z0gF&SO_(6ykAm>D@3n-?o6;>fo)3@3v-FPP9z}0@W&J+MKg21zOt2th&P-f{6h{?` ze6sQ4Y28G96EH)1LqzZ17j1B-tuS9brcXwA)KWH0y^$Fwza51g^{MoHKBVC+mB4VN z8Z*CaNbQtB#)`d#lt!-D%90&{X3%fG978RS-i=C*PoPcMQgeZ=;k0gHR0}w$p7WI5 zeV8pf30W9fjo=y!1@mvrl8Pj$a^?hQB@36+G>~6EgR;BYflXhrrr3MZY0T-cIEu69 zF9;HyS+H5qRB-;Ql#ha~{7}f2UrZ=GK@^?>93(HK3U?oV9dmH&5!_6NsEQh<{ZO$! zBFC%CMu9$QnM)PksI=Yf;+3Hw^ar5pkqxk8-{w43G~6(Tis&a=d_TPC6}rwEnzn`- zE->tO18qjw^^2@ZtX@W0z!aV>nMDY4qAFbgMV!zoN#^RVS|##XIR!{WJSHr{5QiJB z7ZBQXIbyhEvyFbBPEEeN`jP?KF6)%#&QeQ%AQ?sk6h9QR*g5_IkJDq zi<~xCW^sF{|Bb9i_fW0%eWts2fnQWOPx><_@-F^CLhRy0`i=+j~@d+;yjV|5O++-vahxz=qf+~*D6bO8& zhUw+PBrV&g7p*d+*@QJ<$t!T-s3$G_TMa1LXt2CCP5OX}(>LvkU6hV>lm|%Y@!Qa) zzS0X+PE12>!c~pO{V{Y8u0S1z(mNPDJXOGEIldRLnN9Gz2xAyyiTj>=d!VH z1=w@XTBm;-&m;zuKW-MoA6E2E&LB23gJ-1XSyMk~YP3L3`MI^S>=&Bo82ldC&xd?} z(V$#dMrP%e3cHtZni_0ClE!BG=g$gXH+P>6@`H<0SM~T9F$@nQ_JJlwom26$aMDE? z+fu0Vhx>Ns>XggQsqN{PCYco&-%hNWYNI7mf7o^|;IJcT7UzXUWK-={K< zv;73{%U}{yDh4PlUQl82>0M`~ISNXz%41oU^HtX$K)h9*bMu`5Pd!HgN=0%kH6at3 zd;Z})dn7MGKm|0GG`H~tzq0P{(hI~ zcGX(xP4%X3tuY@fO3~}BFu8a-otnO!)Tp;S z+4b--rx$hkWHr?CCT4aI<|xATT3B|=TLQB8Bi6@&c1RgrW^`*^RZ)?uFje~kL_QIx zx~FHzSmD&x3|6eom`}pr(_;pidQy+fcXGI4Vq#YEOfYdSD-L;RF528hlF+=YV8SHd z!DI%0vYkLiZ0(o$8Vy!pwNj^qraqFWeSwCyA>r@89^%?fzHocFMD|I@e|*ZGRkhFG z0xX=KDywZQ4@HY-;&(jK><;&vc!uyc?R-_X-5AZLt#7?y4)GbE4Uz8!j?zBCh{%$B zQpsMUlHC5mz^=|=1$^n$tdFyn*C?Z%;!4cq$}x(!M-yPL;v*AU+?%dItd+sxaD=Ex zt9ZR~M>0o?2;44wni$f-ph^YxeBb)7@Dlm3npw0}9_NZyVegk;;7QBAuSn1&QV%tJ z_9jkD67R_!WDkuI_(jqXXsTY6r!&=FJ}X>KEF~R(LFHS8-6Jz>Us|SZ?DVpJw{&mE zyw~JmN8Vk<6RqDg+@2?K?FRSePTTOW+bCe)>SC*W;hmC#46W?P>r|lm1t4rK;cGp-) zLm_wQ%O(FYqAfe3n1B#B2Rra-wN*zQv$Ih=xwPR(WhRuOqZbm!WT`R$l&Fl%41HZv zN;xt7gqoP>b4taGF8~%Ix7!A6%v3anGkTX4 z<~vU#9P>*(DIf6mKabsCg`%@<7s(465pA&I%JZ zIp~vlDmX2p1S1#UQc(XA6XP!RL7n5||7}!a)vUkk&E%|{F+NrIun)wFz|Xhs3Wbkz zP?dSi3Rr36X_M9tpm{Y=IUe|GPS)JN{Rsmf?#;k5G!ST{!reSmZ*Id)hPQB0nFQnM z=~Qxagl-`ltqpn~)UAq)>9hHVorM~zm8*x5Wvh$gd@UHT%w+g%MXSS<(!|1>EVccl zYe6~Hr=?x4&pzx#{H&m5aq$8IGX02~s5qUI!Z?nqxHznNrWU~zz(xMZt+DI{*Dc$I z;@VV3ow=JJYwuO15vB>57r|eEL2=T*(L&J92qh`#I3;^3N`Tu5q)AB45%4A_hgYoWNk;bWfO^Nx5hO&n%XWttNkhFg+J;iP)HO-Zw zFEw<&_{PGkLH9r_jABt!am6gPBHCWu-M078(Oz~beRmUm21MiPT5BJP^fI@8e0YhJ z=zKrphrTnPheDlLOx#%>R$6)MJrECF%RmlX%E#R4=kY>K2?;;y$Xv*2B9#Vc?qX4xSZNPH+Gy^p_tmI~=i zm-7by7>W0Gm7%3KwFiK@I=xV$@Xc=Hb7HolZU~a%wmNVYTj!eTx-Oh^ zFKDwE+1zG~-_C$0#@SX@sbiVJ73Bkz!)W+y-nWPAtunYMlQ-kBsc3(NZ5{c^1>Ber zY5`FRw>4J&l~#{WzahVc))~acl}cgK*bA?yBb@H-BNZN(p9k2VKB>)Y zXBhZ9CM}Pt(^*%`8R5nDfBrC7HNScn|MzA>URUz3Iqt6wYLT-sZE*4i#RMlzHIi-T{kUjTT zPCzZ3;D+b0A<(U?VdJyL&P@|NLwjKr(4{7Rg#){BZN!Ha3~5fmGb8s=ufI0RF$J4)%N5)d_xsvlT%M zWN~l~;&}WReUr6r3-!e>**}1zx|zb+>RH*L{77$*)g}q2U0}h2VCu)_a+=&K4>0m_uz1oKNdEyc+2pR0GOj|~mw;R`#yiLkJOXJ&|Ljwma9 zgfW}A4cnp>^%wMF?8oNqyHH-TXu}X+kf+4Sj1Xc$H0Wy3*Z=AhCeCsaRMz{yvNbFY zWmm3``7!^a2jbrSA0xu`uVjt_)<1pnpYCO1`d?bY_NXFnD=GQ=uToZ5_;{p6@7@i@ z%nciRqo>wMViX$}e5{T4QW;&j zZBz3(43<LLR3f)0c?(q^7G|GNu#pYWTm0rFC8>AtP!<@Qf>E$&pBzTBl88 zAFj3{2T;Siw>6eJxJ6u0ft+!~E$B4FKg8TiA1YjQ*i=X(LxZ^Fpm{l1dMRU;Xc^fS zxBijo*;MZY`S-?N_t9DT9f8S~x(X(umE5u7y-=OC`GoFVhGo5eWYV~%w0+{cu~Jr# z$v;XRr_FC9%e9>ZBGkPo8|mv#0RL zuGMPaaOXqE-Q6oT1B#67R{uUE1F7uguG2B`Ji;SkG)Y>tCD@!gr7!C z>&+X~O0Y4mc+<+0Pn~m+;Xn!rRz~rbgu}WGPHF=yxo7;B7Ovm=fGZf)Vop|GITam$ z02)xD3v^|5C9j<~5{0nGp#kq~^(myfTe3QT*WWO|0}_I|B-sb;zxFfv#@2tR22}k8 z5)iI%_xq4kpT&af%hS<59fv0OIJob;k6>@+=6xSFt1-e+zpy)aBFU14wGU@z=o6TKlNsnrwzN!#wKIN46h|2PF1?2Dov0gr zKVlV$zp^eEvl~R9-Ar-VJb($ z>O-RLjuM&;$rG7T0-5&vvoj)>8jE^rHTkd9xMzm#j#(~%)3h&7gKIV-CFEufTi{uS=i8+`PwynO!t2$Wz55vh#LhX?RR1X8otbIDQ(IAg z{0)iWdXcdH=o-4mxTk`d*S(wlsifi{tBD(D5lh?>kGfv_638visKC;FbNNy?19cZ5 zYt*7daAOp+@5A<`s(2jUvN(~ytA>%%J=If?Qz+K6`^qLgyMyrEvOQJULcG!RGKXHu z)Z~yN6&rmbd836D1RvxlB+k(XAJr6ft6N0e9IJsc>*6M-tqrSN>@3d_yjNzcxSv8){$y_=wt>=1| z{6#?abe`=Tmw!9U!x(#?ainXn2u6ylvo&JcW0PS4!V&kRE)ox=L%M^?l_^UeViB&o zZa_Lf$G?wSofTU>cDqHQGIg%?v7aen*cj=};W^=khq|G049U}RF14Fme@iR1 z;}3S1j?-UcG({VB)Mu202%l1Q(1=kV!;B!L?5jWqSt&sR^CK1%C=AA3sKpTG~T-wlRb}4 z7Lt8zZG0dbbXF$QC|X;@CIK7K&DsNgcH9nh%l3_$;eG90sqwkWi=yDKC?0T$$~dfW zOnHB}x*|4AhKn}cc(LZ*S80#$5>iwI%qWHDw03IrEpVd1Xx3xM-YESD5CjZg!uR_7 z8O3Hnua*ZUFQ54Jw*CAX4`(2j_+E0O|Kwsh{Zd)!42z+-uu1tTsEr+6{Fk^?W3)tp z)ILx4Q1EKQR$Gef=+SPUw$q~J?wbNY$1yj4%-lKMt0zNm)CtVrbS3c;&vG zY9bA03YEJ>11V<-?h;LB_t*3|CHi|$t^WX$P#T(ct42O+j<+;GE%eel?R=G!XwQ{T z?dGq)9v)KJ-U)i!@j(b6pz}~05u|3_US;Ck4z9v-%DdbpDg*S^#7&qMh)XVkCqJb9 z;N2x#Tf@%`aFA0arj#<$6N=1bJ08x>%p`qdZQbN>IBJ^OVK4(VbxmE9>^RljwJxQK znDNB(dfs#41REU;1&nbV<8JRxq9Bn`@$_Ed=YcM1fl$0sdN2I1OF^AeqcLF60z!*h zh)!z=nu!$VuDy{_SuFcxs?a+w_X>J*Hss}jWLDsh zJwGblsHoH_h`_eA{^G{j>8QkMp{hI7d559~x%mUQTWI^8G5ZH_nyc5KRlg7}qUq=O z#r^VkQVg<|#Y^$ZeQmuM{$z1RV2JlMhK@dqMeVl%=rB2lpXA2TAsTIJ$$Ek4k@Ka- z2{x4O9l}kz;LKr#a*}%IV$R6HKnye5|KZ>mFOimdWD9$+sYtxZLgRehM#b!&{7tQS z7nQUbb$Xm55|?!4Usn3qQ`;jmgF<6a=OpXG2jEp#V6{h^!hbh#Fm;#O_I#Dfwv^wQ zvr&_mGN9v8BV&Q4YQck=7@r`D&}i|pjfzW5$&;%wuf-ckH`O>%nT8e*z{S#c_ zJ|{V2R9&vboK-5+H0t7g4YZR^Q=Wz2^ZbH#Ji+0{S~hX;^yAV&sQp-dPLINA@eu1vnU_mZx5E}z;B^{097{$dxnn1Su{H8 zo(3~sVuWacatT|vZU6})BEaT8cFXuGdLQMUA)cmGg}4rL&@Tz$Q7$pRqei?H-T2LD zLSD%9RdGt?bfSmP#V)CS_%IVqIetQyy*P!-lJ5a}$DWkJ!w=oPMOKceyN+2fjmuE% zI;;{iq{5}OL~p&SVvKRbIg^{9JhX7^6uS&m=CPJafKTlxdWD2r?Fy_CxP?od1RZi% z_m4a7hoA%#FMWlg3^Ah_n74+`>o||rE;aLa+X_;KvdR>ui&*MWpT4#;0dXIgVNsHM zMER}VX{A+{aYW`#*%lNR$K?;aXSRtA{#oALjAO5&AbNg*ehU<^8qex>;{b)W##3qRF{y()6`Kd6)#n; z-z&mhHJN1z2UYqXMvPq>c*?nz!Mngk6noE?CYJc8fKa+vc*i9x3Hdtxf$8v)33jHSl6sn7?ZP)J&?fd56FIxfv8BiHD?&sKI z1C>+_OqkxHC}75=^i7T}>{mg8N+s{q_2)i*X~u4^kz0yUcTdP;y*GB+j1x%`Et}Dy z2FK&Iu5WPwMiO{9PJ?i?Eg}iOovq$eC$K?L6caELu?f2&K;#SOA82Q`&D2F2ccsDv zFp{vHDXCujVSbRO!s-AVZiPNcn6@%CakXH5LOcUo%=%JkR^>CRG#mda$@VGcNm_k? ztn4^sTHEmZi=3iq?Lv`KQ>}Y9p1vWJ;1`UpLg~Lv6anNXFI_}F6)Y$2Q03Wj%wm2~ zGDXc*hhJR4&(`f968j&=LA|nGkv#V|qy_ql*39(-lmaDGb!Us8wXo{g|L{cYO$&&YKGC z2tiGpH6Vzc=J*LKeyr7Nf~?nAE*uo1$y9NYiv??V`sD`nnU^kAyK1lzZJimOo}(gv zKie+^{!0UN{;)C3?u7dF1}dB1vaOmr@Iw;*XI(g@47~lbSUS`wC%rVgXi=?vv$>mY zGPr#;J4H_ULp0K^g70SB4vyCIaieKQT&sHQ9}-x+e#?~;yD!?D6GcSO+AspevygTp zEgWoj71#psanaOtqCP}h___q6DTzqX2+hsPMVH*B?ONLJl2Sl^oB}w@EO1h4AAs5kg{Q6Fyd=mr+LB zf02=LTpk-Z$mAfU()Wn9;x1mliS}^TUGk?`d%0QdIW6CnrG{TuUgYoWUA{Zh^g$i(n1OZ})-x=S-p?XjgW2b`HF ze#!!9&z||zekE$6%B-nDe~vS%L5hnF$5|~=?&w?)YVs*tbX@f#sRddA-kMfvae?Q` zqj_fsABVktel&p2SaNn-v&h1F)Q%KP)H@*3r`{4cjxY7wM``n79VGFw&U9|LFKhcU zh36+ul~m^n!}Mao>Z;XP$;TCC3+TlNISNcI7`G#to`Pr~2yUp^lR=cn#XVOw%gTU+ z9e4)5a9n(5V2{&(a*?Ibw`b5`<&G35M3WGpf+A4TF!WhcuT9c#OrB-7zP*zXUU)S| zN5Y9(Qf_LD&Qo+UG(TuLb{G=XR$%24oViZ+^x&{O_|tX2rZ4Ok(dl4KBLVZ zjeeJgFTk;$4aVfR-XMB~V(!p=|A3$9hz~1tSrfuhKZRyTIBNtyt-N+Dx zX5&j_1=?fpvsomnFZ32On+@bg-0Rk0g0gmWtkZd0UfKi|s@w&-e5<{Q>f zd$NxnQ)=NVT!&SNFB)M0IEyn=D*3a*Dv`mh~Lv(?eK7aSGw=Yfb{B6Sg+P z5J2x0tVs~ca(Z(epon*4gTd4_XNy2(DsRM$)?(qb4I7& zhtaFU3r+4>DfQ6_CJ8s4XCo|41dd)g2V>Z zF+6D!0fI{2kBL1v27Mtygxuo}Qb-m#U{CW)9Wdg&uW4z5-7Q*m;=-D=_9?!XCThyr zkM_@p!GLN5gU{}n_mxxMMQqD7=d<+fD}!4;1P}IGEwYQY^W4i1Er!_cc%1vEbIyiU z)c4GvJ*^P1Jz@x3SdbExmP=kE46z24PSS*)T8bNN{TjJ0>yqkwgpzn&demZ&g5g*?)t32st-Uu zuTRD6z-5+#9ZfSU@j*miWtGK*u$&^h;}20Rn^~R>O{4j|<(IQF%8J^*^Sxyk4-ZeJ zTrV@sg;&}+_f)r~e<6Fi%}Q6`xDbSmW1Xd{sFK7}rjY;wN{<9=G6)n+GYJaoQb^u3IEJ zxCNM}Kb{A)js_LvCnLzwAo%vYE2s5?H=91t2}*~`d7G&_1`fFoH>+acI7vePuZR6U zlPdBzZM1gA?-oE{IXt;1Y?t`Syo))h%UA_ACbE0vyFASpE0G;9TP5CYh$_PmCEcoi zYUuGl$k?dK@X-ly@a4NfI9^n8JlRB}r_=ulH!N7dlLM~_<>yv&FMqC)#Mz7F_E`$9 zSL*d3!yuz3M6P&i;q4#K9xE#<^fheM!F23tDl|f1e1LGDBLI4Q_G`15ec4ts1|cFc zco(|vH`(@T%QJmOcxk!YqDxfP+f6?h^!S{XBTatttx+>P_bkUh`B-;;J^iU&;Qa8E?^713 zN5(=o12sa+nQi5ly&grSWxuZU3U-6}SN&fLwKP6x4RixnUvEG0AVT_PYr0zZ*Zw|* zUuiW}4Jz}IH;TGA;EycU!27BG72e?FI=f_MAIvh%gzkTCaA%!5ZCs;htcvNC`U%6( zCWP!EtET6#PL|t#U~)-tt|JTuO0$=nxy03#-@Vaiqo_N? zizXbtI^KWu;LH7@Q!a=e(^R*bXCGNl#f!X)Z;bMm_TTd>7S|MY zt#lHu@w&*2`ZK%s(pTzN7uevouYyN2)%d3%!~Gqd4-Dj55^WI1@=)<%w%!GBTK*i zR#jlNN4HYqGKTvi*iBKdjleNUGMbvcsY3SlYj76PrzfXNli~^0Y%u$NA-QR68shGHO9I17aOG|FN|G6 z-Zu6sEdl^n0b#Nfn)%cwYFTAF8+28xjJ3mOvj1+8fdsP5}?Ljg^aj1!Ib3}E$_ zo)$|pM;)P+F8cnnwqi&5W*j{MiBUrx*xxDyJv>{u=k~XwN+On@f34S!t?TL#N<6e5 zOAEmKkgc6>ggp5R-ja_IjcM!At8Z4>X>^WHvnE#-S&X@hJwrjb&rzJ+2c>n{^2`gx z8|#8|>Zd0(K1#E6_f!%@+gs5`qhisBYZ`+x*(~=}NtB$I#Iq~o=GC6_85pgPEscv^ zHITndfoHZ^B36b}?rBAj5p4)AiOhn{e0ZV$CD5Ds+MQ*0A^e zf*uIn>4yR6Q=A|#3u$XK-pbm^Q#8?PZ*t)iEmW*|uka72%#GgbqQJKiWg;zfT~7#k zdr_V^0Af%wn#ao6*sS0TPM7L^NW+4$*v<+%1@^P|O_n9b2Dfvp^Arh_M?+G80T*u_ zjdttzirXrgHEqF(R^V}s{<}WeNol)<2_hFUOkE5w-nSPK+xDYW5AA+}k}d_@jkVfX zuXI1~B;%?$D*B&TLEI@$Z3+Cbeh_OPlxR?ZV&k&kpUj!xYWs_y+((x+6+kM5hH+Ky)j{X4lNrn9iwoSc`zlInwuNObdS;=Sd)i`TjGS zZ#(Z+P8q+|w^8ZlHZ8H{m&O&^Nj)!qajW~yuy&MFlVYeclmRwpH(!L-il;K$qA?0FU5w>`ejDO9bZze*?jyI-hjdas z5i`d2i=Um=RR?cS3Vxer<%Q>)?yd66A|_x<(i3`>+nScJyWrI7%6PH5wY@nrGD3l@QRSqWUP~b+Z4h-j+%4Ws_cZYU+C{X@TeGiFT&- zA!cBf*TVEd{uz~f++~#$yeg0Qx}{|d^B>suwmb=xtGMN-3CiE4yzLu%pzlwSr9GzUMm))u+AF$ zR#*K)Cue_-Q#y2j9yoVlMjOad6mYI;ab2&%F&BuxfvzTWfq%#qU1FRrw-%K4Eosp6 z;@Tq>D=+ki(p(o3LtzWmx1ZKWz7$Q^fHhV6jd6#gQ)i^FaR;d$O(-1^`|;x( z5*81ja)I9XQYhl;+$^}bM^$^k#^4J405Esj5>$KZov2r_wW`xn1IuR6zW526om0`=EIo~{ z-|3-ynpRO7>8IO9d=#HCz?e!in^NnB>FjdY>g5PGeE2uQqhy(*t|Q(6*L*As{ZRd|#~96o?HFizx4hvxOO8&T^G zLm3jMYvGDZ4I3we{!L;mMwS=(#A5jNj__AdTIT!JquGZ(I-*)72!kot>ptI0L!Y$v zaqBb^(ww@aRO`L3M)k9`9R&{dm$R?5;kr$EYc9ls)L}c?f{~@!im#fs;UsZFHY+69BG~q85{=YRt ze|?AlBGkU^3b~rlD~`E+mg|xcaC=sc)Nmo^fs=#na#kK zgx@20Rozgmw$^(b_U;xZ1^-;wr#CZl0FdsM@X>w;$VEraK?0f3a_71BU&?V@Fs5)k zRWaUQ*!X4djoAnL;v$m)Yu2KHS{{-A0i0uJu1{-l&fXb>I6LorMS<^&l7{bFr3SJt zY0^gyDJ%w{gkGb#l;}JZ+@Pt#YB@wQv^4tbw`)6!0g%Boqn#}Pyzw#nsc3{t^}Ti% zxOjTTg(tqgHSu8ZOz!nBbDs{(sI_%w)nFWLXP-1B)%k>|fYd@yo%%^>Mzh72&)!%r zIGimJ&j+if1=eonAkQt;)Ser>Yp9kpxg~wJ{~>F-lfYMOvaW0a%-1L z+S+0QA252Q4@6Jd+a4SbIi`>HD4l)q1F>p=L^Pk7WAfJqN;jmk71ttM*0x-f&Fk5K z3N#%22PFjgeD7}qQ?Ftd~j=iz|@A5-~7^?+AYBgL60UqHk=_iAAw zm{)4psU;K(bHskdKvqsIDl^E?r7-gigg8L3+3M^y=l=T5?ZZ&cZr>k(w2_O-(x%WK zz-=K>`Huplf)0AIz{JPM&{$EF+Jjb_(V8p&VngG-D06iSmH5aX&)1xdBnU8rLx5vR zi4hr4dK5y2g9^_@$5!}c{{pjrLOE_w#ZA=Q0TiIRN#*B(5B;@k>%4#qd6>oGzwFGV z+BE%sy*_lToGbu4>o)Ic?sOSh_E9=ichSuhJSASIqJiF$n!@(i3`m=yGIHG&G8!zn zPGk!WyCy!93nA2pK$;8MAQ{sl%|g77lQSvQZyA&CJgNi>5Ls4~v_WRe!bzw*b?^ED z`h<@QH@84FPy9AtR6LxiXZ9)$~d=( zx4~QaI^MG{mgJwA)8PK@XQQ5_FxzRZ`~nG}W2HB96SA0uj?TpKguD-d3Igvb{s8>i zaeqejFD-A{RUv&b)-7Pwd4|_i*s(i&El;;xzC9$Bm2D7EEmiAr5XphL zaq5SI^P1gS@QLuiN$0@itT(IIn&zOQX6#j&(wo6M_r2^i&ka<;q{G;bBM7F;E~>*( zqk#oTSC^_b&*;Dw>FD){YVVP+Fn?|!>X|;*QaEvjzexeV8=QmGAevu0G7vl?;`aV5 z_(0~|R2A8NMtQ}rt1-u$>C_TZH04LR)AV9gfY%JwEP&CJ`r2H?+D+haWIkpXOpI|! z<#L|89J`r**az3~vU5qWpg@$`YK7f?#m{$kG%W{7-vsl%3FvAVUYO*Y#q46|s%|+| z>}C(tImlvs?SyX+6xO%Fg!!u&s%^=D&(-(=0c=~KseVKhCLJb$Ayf_9cAv3bxY%PZ% zK1QE-(@{a}GprN%@&mjRp6n?ON~+!B8+7j*kfPj6O9dp7VNr*!hHk=}MhjF)ntL>h zKjx{gRF5LX;bJS$%f%fiA3)S{Wgc@NS^$y`C=OjRgdKvq!kcG9 z9%eKSQPgmR8ht@4D0a^M64yG3B|aIG0lHuBrMn-P1@E|c%#9D3VZS!&YHS|`D;^f2 z$|k;v5*ml-4|SVwUuvydE+&wif4x#{6?-{#jeVz=EM3u+Itv?Ik0FS{2a3P+zOr)0 zlfXG9D31(CND>KA!BO#6W+l3n>sAd`C#r^1C@NC0Q!cm+>R`FkL(%MF= z-`4_Ob$oLiZsyM+0&?28XVrlfJn;K%8h&-` z8^lIj!4ZAkL}R{O!*swrueiZ(B}Q`u-_O=8e*UOEx~*Z0iBt+~m-i`g5?TTwqB0n& zc~x%O0*M&wb+SMm|F+X-eUrW~5&C5}m#K#woIlF0Aw>ylA?v$@3SKrxl^hX`jNt3= z;NMy>SMgW14R7Zwd~^~{dJJ(bT7DiqX@CF%j1Kq@@el`bZ}$ri4Had?8)_S?QtIB# zOmNzpZlCARH^7?3hIb3!{4Q->&(P$9h7_X2M9zA=<)Pio1le;#;1_pd9-G)5hsv*Q zHb`2l+^o*{SxWjITK2IcVRpr-(zVr++$O4G0fI&zUERjkMs(w4)^%31MP3WZKiVSN zi+$cMj<%1&fueqQC6BK%zx|9rqj?sZiD9S_2wfqXYIBa2@w z3WBV5VK&*^;eCD(&+m|-#VEYE6Gazvn-^^LiM?EeBL%9ta5E#k`J-4hb|rUB6{mv z;KJnY|3lbY2esjCUBjVJD8))?aV=2V;>8_Gad)@kPLSYU+zJ%;;_fbm1_&A?I0SbH z?$$5Ad++({6dt^(t+WDd+|6SC87E)=%N{Sjq`b{4tYpWky&r3wrIuaL6&rxgN z_sYxGs<1TVA9p7j`(hun8_MA!R(j)BVn6$$#sAxx_(yY-vSK$z8Xzl% z>7a>|lYl;keoS#L@u&ks^p(Vp83M}j7tqPp)lM%*9;Ok>OYTm-@x;Git|!G87}AQr zwQhWM8IBPE45H&se?E{LDa_gk2;E=Z*Z`j}htS^}!DAR+JUF(dgKFA(|7 zU%+k@^q?78v|SVtiOd`R`p6sRWacn<#smRBcD&Sgy-V7g20E&u3ml_=!zkHuXLjhC zV6vUFZYZ54e4i*WyOz+Sk$?JJD>>6}&Z4gyx|krg-ALQ8B|@=(zY0YSWn8g^SlU#+Lc7NO2L=e-((~#g2 zLyCJm!Qs>KQq`B@>3;!2Dd~rJieHblSZ#Ik!}mn5P2K$=OH9o3MapmaC2EY@JzvW_ zGwWUkJtk)zbzbF{?72^~rWsXA^d#3x5v`N+r=r&og~EDMdf1!?dSkaKRE#@MNXs6L zEF$M^vuD*5t*i@hYymE7wzME*Goai4+=~d0{L}Bc3XnFJ8$7zG>@2vlw~w*VI;I9w z2RX-%yS_m(RziiRco(UECGT3RF7s-0I7M<&hEf?MiIE!}eD=E4Zv`nTw(i>K?zg)hhI)^s8ezvkl;Opm(HDaPfesi!3s*7IQ*C~uHVxC( zQ3k8}g~_5U*e>&UA2?r+PtIacTK3}-17ceZUDlcPX^r+1c8r@Y^NX~b>eAka8Rb$ zgl@3P5;GwVibU}zi-0Li>m`WG!Yn}7sXsZsof*Zkz3l_mQXFBKhFODltdF0k>D1Xdt zs!E3_S7=|2uB@eEB-j$4y@F-FhH=yiFj7ScacJjNf4DAcRuUuisc9GHerr9BYfij#(e#E9nSR*jvzE(+NkH zku^O3%CruD;9DiU){lE&k_97dRpQT>b>X8e?%Ea;PG#wbE z=PY2Qm9>TN!(~~X25-#BQ8RZ)4OKE#`|izaZJP%a0(*R=iX8c$*$>uDj+<_$fJp1(?==?M*@Vs-j_=%9-_%sNFJElvhm6f+Xzi~D`py8TP ztWv6R$eiMHKZee;jJ`8(?zxWF5F;7#*l6=soH>y2yDD{B!g5+fi2@r1?K|#uQB8(t zjTFCCcU`#J6;*Uh^D7dPrY=nJ`Fbk+P6VCByZPugJ<2!&>?aS`FX?{HEL@RDXeEA6 zQt${k%T|0~b9|s&%g&PC^(9O(_atG!tD+C1HZ(_G)&iduwTn#sd1BEfg_yN>xJfGi>W;*LJQHR-A^A)ma;G!yXS2^MNTeu`2^_P0t-;mriAkw{0I};KK}{|aK_+E6UjAu`c5>XUfO`f2RWkg(YdMJSH@X1iwk^n53na0 zXB(>Si_}@F*gTk18em+x*r=sJGJe#UbTQL?3%c#n5@pP9X6>2oLtH*Aa_rcf=8p2k zuhqI@2s?R>g^ww{wJ_wm%n)~~n=jj~g& z=EE(=27#{0AMc7<+_bP-0t-(zT4&Soqun07-|{HJs&IIg_=+%@SElgUyG~5aOHIpt zi{`CC7z+8?>Sb~cqWN98?rWFml#*@PibJHdqqN^Ty(>MMkzl2Yl9GRNk#2g+vW}7* zu}_T2^A5-}Q5>jLtRgN-4`jJu{cX``F2g!bwf#&)4TR|#1LAc`X9XbP^Ag{(6D=*8 zDecQ3gp$e|AMxheqnT+ z+k<3vnEU*%L@zFvgd}V;7G6f5zS)(sj`(h!NqOaYKx5Spp3RSPIwpg*Wv0wzDt8-&fi^F;1Djo&(Hgg}a6GJ_y zf7||b!l0V9-q3+7MM|x~t~}qI`{Q*?UtZl{J9Iulx?d7QRkQ!Np8`dyk4IJ0=_y$@ zoqu_Q9x-Z?&C_t`5)2}GHTeR1h@im80~#dl)24*Gqf5#lMVl<>HXb{$0@k1FeOrHD z=;NF$Lvd7ZO-0PBE`sKlWKauZt^|MN>tvI6d?&e$nMUJ$dHl+^)KGiTXh~0#=+Y=g zGLnwB=2n|RiHh1s3MUnfVsq)S(l{@BrRaOrPmPGOR&82KRy*(Wvz?T<$HKXj>*g~ zgRol6a8Y=OBL|1_{w!kN~_}JFP_0KKBL+5=Auc~lN>yQ=V$v-VNB-feI zWSy|YEZDYh8L6lA!vaWE@ycJewmI2#j(oO($0LZ=!8awmtMV#Q?MrjTfmi>&WTBerhyuW={7;>0F>Fm2VSZnQMpA~uc z2=iL+>4lOnRcpPV68mCYR@f@JlX9j@JAAT=zBcRx61{kctVh^0zPtHg#*6XWOE}N( zBuFxQjDJQmL9c0B>Wkoqw6&y+qslrrB0)1gf{w4JBVR5#Au(j@x>fo1%5x^1Ik0zW zr8ohfx}U>k9QeE_nvx>shemThay9x&q$ukj*e*(5!TR(>hb+Q}7h6Y}lkS%QRtL@b zaY`D&mAse5u8NiXnoYAhe8DNp`?Ssh5YkhC`dpXY2$cRGb`3;e=fGNY_oO{TW~)*5O4nkiX|3c1Yik=4)uCD0 zfyFmj+MkVJ7@ouf`(GZ`w!&nbj>QP)jul#Ta|vb@4C(DEe?*^?V+k$+MvVQGsu@EN zdUU7Oj2t6J_8(gb2fF$B71f$}Z(Da!A^lWjJ50MfA9~ zOG(Y<-Z!^_2KHqPl6C;u5M;&%+$rK6V^ot;9v70kzRkL7BlSv=VrGg# zL8REq00lA)en^rlQ!i2$5(!I`EkO`$E#Xt7)GF)2eD4hu-$|Ljz+lX>CR zD-|>oic`zEN$$V^J;=^N7gRKFtfbHvb5xGA%g?Fs8(vUrJ(PxaE(TCW?VBzO-C3eVZ6@h4)!F+EfALJo%h z8LDvLcCysA@wIa!Om3FadG5*{89hcW`H`604bN$S8E2nk)yoUf?AyM`y-D~N5cB9)$F8)yY!165C>78CXZLXVww&e>ml`$R&5HFa+3F%JPdyp z-gEWSP$wZ7n0;VoGE${J23}$kx^E|mkrSuFu^nB|17D5C2~1u3ZR9vJMdK(h8hPz4 zQbB@0Gt=oY7y0jk!OLD`*O(AC=!o>OXN@+m@^P(e$nFGg=R721n5C^}tZ7FlHn;2) zE8V-HA1!IzIG?6nT&{Z{K@`y;N{-U%@hbae&ZErEwcsKOv{yD_VQyHW4r*k*GOAWj zle$QIgUZS)+oXgQoVqRTkJ{9xa6un%6I16H4v1W@1wleWjT@-TzY$HU6x#Unf@rRM zC%cJAsG=aEn#sb6909fJ7dsi;_4?#3dO~Y3#~UT?)FOq=y=bl_(p3kdMDjXjY6Lvs ztL`WPN^yb9rw1XrJ9_(NjQz0F#&c=U6u`tVzt{W;|~ zEZ-1d#a&vOEx2Ba%;n_!-cC_RlkdDdPMaW9F=|p(dU1@m&$ZQ-srBXKYDuvPeURkH zHrMw%(eoZgxt^{M8mBC9r{p!e(1fBDM$#j>2-AfVXadJ^@3c;P4vUsgY-WoM;nCyu0G$D`4_nagZv!_j}7B zeJ-UjYLyS{2fz7lr-mHdcbTEChw3ok4SPDX+aP`XR9s=GBy*uAG^eZ$hy z4<9Rdi_OtsOrC7%L`;3-rG?rT%OM4p-6u2S<^YzIheHvtv-Ez4?w*Ci1o2Mo?$evSnfI%JLD{OUo!L9!&-YfjRbG)jG*>2^_8nO)tVIUkxKp{gk z1c+_AQ%F`#^TdevcvWiUr)kOQ=}3@IG*Q-nT{iu?sL1gHYMIIAWoEnv%u09EUA|TG zF6%^c3<;a02*~JN&t*?l62!l;(6=lYba-s`?HU+tgx$sZq5C`w`t_;2!&SM#A<!96S(#F>d2n1C)le&x5;)HLz-+6%6LnHy9_ylzgN@X>P&mrhzALeVZMN||kP z`R-RI#kc0jhW5yY{j~{-FWknSc$r~C*zxb&Uty3fcInjL$_*@Q#7LC}AABigUf=Y!#n*i<8qRQ2gq ze=HueUD{yhU%+wjE|NLWg=9HI{!8_s?75Iuw2}HWWtsYm@Is^*=l^}M{15Yih`)_l z;0g0DK-WJ&L-SVS=aV^7uejv@+t1|xE${#TuTu)6`NlJj=3a>e@{KlbUQnx(Fi*(C?X1PLI$#y;Y*eJeG$MWy8Tg76%{^S)OtttnJ zdToVgN4Z7RGMqAJ;Z}6Xz}K2uW_KNnjTDVFUuVRGIM)!yxIk~i7YvgaOvYZHeQlc7 zMy@bD)+C%_r|r!tBxXz%@U89eiHMw|?S)WqndjKY5#4Cm+Cqua&@z&J0px&_ff_k~ zS@;2hNj%iRGmrSisuHbOe1JMjip>Ik{JrVoJ%)a#uvY<|LKV!}6MT&D1&e zK+!@0#hf-3$t@6LLdUnvEY!f-y2y=5V7)N{yB+-+Lyj7UVG?~(^nyrWrrlmjO~Gj1 z>myb!e%-A2i}{@fe!gGE6hZ`YWMbC4!o4%j&(%!(dxh&-^=u;vz3& z+#v8_#ztA#Bl3z?RSHMZLVJ*axk#zr>d~J?H=Y3~AEr2-gBj57B$Xz8G`aTX6K0U= z1JX$4w0C~x@kKUIZ24=W@2VnHTkyQoG^U0(WAm_0SC1ygcWndhc*b97irA#(XXxl! ztw<=O+6NhW#GO>HF!nFt<+&CeLWPc`8p@2Vl-{R@+@}osO|$Lh!YGfy?)De2-QzU!4;$Op5WVQwp6}$_tp)^lojBHogoJo+iRitv z`3?D|;Z~LG!ZrrV=H>$LUC212%Pv|s5v)#wB5#Pu0RYsJ!$w*>e8xF)Ok|_Hc(%I* zms(Rwp;tjpZJC1T{k1mY^Z^u)>;~kQKos-$G_<%i&^2ml+K-f{D=;LV`e_YeF~hs3x*a6t*Dp22G;AQDL1j+V6*$p< zt)~7J(bK}-)p+>BZ7%`Bi>Iih*rM%Qu{@LYiF_6Fcy8?B`Pp8+qnU#>p93%+IeEOC zsws3eHJp8h0!Zxb#mq6Jy)rDlT7K7Ys#)r2#YiZ;LP~8xYC~AV#`cic_b6}bQy+7t zOF3-scopIVLidUJFg7@bG;@=P--)=s%YTlUF3@`~7DS7`_z`D$$pW@Q`^hq~TUf=vxKo2xrl!sd(6-d{(0TpOM>T@;Pw(H<$ed3tCM+%jT z`5><@v11EcEAd-Q8&4Y~S{*PP`co=orAJkFF-76$H{qyfne9u(L;!try4YkU`gEI? zm7G#H@yntWDQee5yp=FZyU1Zg<3jvCiV4sX{aS30$~ z@Hj(14$>-#G^EidBm19w-T3KkH_+xuhSJe(Y!LaDQrVag8{SD<9KH)cldo@~$~uK9 zBwZ`SYIf=XKJl9?SjM}ouGR#X87 zei)N^yL0L8@8uM-)|ulr+yTPM_P-$>ng{-(hsLx%ifmjN;8xYDA)c)-M|fK2ZCbs5 z(N{~>{h(@$>}0}$2ASMqeWa2Q3hHguDYi6So(&6sjGXfeN(#{Nh55Iu0B}pa@AK&0 zk78iav1_XiUq9g_Jj=V1oW4a=;=>fuYN=6kRp2G~UhntiP3No#vTN+=&VM_~WtYRtpd2EHzy?G(Trm1BCnDelAWeZTtB&$YY@F&lNLJOTc4EY z)|XOl$O=zn@SQH~FD)rp6;5T!51df6UkV@<4ZE*j!otWOecmD*q;+hLX)TeVtSTX+ zIGCn^U7b|q+0Xgn zPi??Vz^@~N$^hs%YVMC$B4WEjLPRnP>AWkwWZqe1GH0CPbe6@!Zrx5pb@7?vFx-S9 zNeqKhU2FwhATTK}H%RJ>G};{s14j3CXW6HBQQ?|b1BXVpAUzMk=OHH}f?6tGlggMd3R(Dkh}0T497;Z-seA{DoBZmhb}M^4)pUyG<7lqBl_bFuIFcEq z2*qU;RML87=@Tro{IY_RY@OVo9=afByy9&^JTMPmY}5C9;el;A0tv){1Q^8GsN!}t z^Vm8won|6P_|hHHP9aKo#WUqq6tduI*LH9yg=ZdQFDnv^kVZO|SZQ8un(5?;`123t zfSdS%d%b=}%~(6t)zxqh9%Z-NvUSD)7glyO3+yWppMQ7++-;ULQnqWz-ZL;ZqM=5- zu>wElu)>xjbfao}M>uh6hu-)~E|%Np(>$535^#_*2U6WHJ$og+{Gwla?CIjq{b2@D z*+T0H16eJ^6ib|H(^Jr4B-#wq#z*%MF|qj!$D<>6_=Q!7CYx~0DaoIe8L3tS&rh%gAzVxkgA6(r?+g%D7-&^wyP)ihXm^CIsO1X`zl3%pz2`0(`vhSgTD?o&@0-(rlLQnf+Xx|G_ZP}J;)ADDogMB2TV8~>;2312w@ zUSb){dim-);JRSxkpQ15!ghI%dvx}mC{B;kifrImdS&ihHVUtwi>Vd}oGMTQT*Bm| zGY9dk1LWV1Tl0snANFv7Kdw3^u&}N5HWq)DA4`FP{N>{E8G{e2bST?fdk4OeAy|Q+ zo|bP`ZK&%QhWW6-eJp?Mi<}E-4)rd{zQ%4sfa3ATS1Ygi9r}HF$0BxHmxoo^PG7Fh zv=D4s$B92JKR2bYIs`%jV`rJQ&Y#n6F|e~$NSB9zimV?OCqO^`amsMVsAh9@?Hv4q9r$FzqcL;r><%a zb@x_YWtQ+}QpYaJ%d0+qqq21>S0mUW$eZGc=ogQG`hNX%Q1>W-$~8SiY&Ufx$JHdo zZME{zg_J&y31y7dde_=Y^^tMOZz2Smx|{`lo(qo?<^Z%T zN%r(~S(UbY7PdC>@kwUyCo4EADDo^iO2rMwe@`8=j6BbIV)^d?lqkAL>CV6f)lu5b zqV2kMapLXZkLWyR4L70DINh#v^};CXow*DVnR>s&1fMM*#Qz0^L?8>%&6ujmXqTED z_jRGKxRUcuH?VDOU zo^RdbJ_XFPmRh=29txfnZNeDik^80Py1T>abG`VEwhNLJ+dRA^Xs-mF34?=(vI*pR zF<^TLJT;pZb*T@zB_&mEnxteYT$QvZUuz3?SVV{&dU_Ir&p4jq+O`%$Nwx%$RD1R^ zB-vKU@v7OFEG%oe_UtdfEr~*Qrab)`Jz*JlQ6{FV`1)EeS@^@LMTpebNrZx=YOAfD zMu7ly{A-xv$&e-UFH$6st9lid@TrQ?z|@Tihx1hJtNsAkfMHb|F-BXSwPVUqMLMzi ztiI(@2ge<~0b!;-H4mBr_ z46`hxz%VI~6>8m{05gEs*9m~~7oab<%KjH%`m)$tg!{uyv)Lk=r&AdKvA=S3MwwlW z?Tc0(T-Onu5;pr9{Dbv#TuY!@2TFWc6bPG_0)9Z@0dy8q3(e??+=8T-NCh~mjX`@_mx;US_XwkRm;MR{g=^b z&95IaBAje9(8bUdc(I zh!l5xau16$p1m`H3gK*jemiQPc}0rf&h60#)-ha^dwBl^p(4&@ zUOs@-lBPi_DTC%(J*aP6{J?t@{F|;^S~gmoK=i=EZhVXHH~-9fyLRZ8HiPF^r1>kS z$u?tW792FW(*(*RRE9M#?@5 z;nU#$(Y9C0dyX@jOlDnJ)-f>TTNOWIx_td&%Xmqz9cWF#9pp2<^h`i_kVER@^f!*I zP6z{)Vd*mY7+K>{vWL@oH7760McQ2B+$i7F94WCjU2_l`#lXY8S7nxEFO}8B2kXWv zuZyE{_5M}y)E$`zZgq=(Dn&&)6x4vq?5XMGV86QYVY6S^wLfFiE?HUaI`V07;+R~V zml}7foMaZGGcRZF4nz;9mrD;hne%JQ@v3}GI9?T@aNhRMxTpFk#%{X|B_5q)F z*2MAt07!{ksfG6Zkh!eUF8G0nHGWM>#tsc%qWYUM^47yUMy& z2(;?BsaHEi4rO>1WlIt0-37Db-(+h)1BEPHRaO_}C~Mmb#(`BQhLv}cV=I%^axEcw zAuH8ejHNZpA1ZxQ?jIGZvaO!KmEdt_dLUc6OX^ezf+c? zfjr7#)&B)h`aAEo9ty>Kww}90=llh{`O~4V!*}eUFo2pu@DD)Eccs+sV{cit@BS+W zC^n@8Zli*^Nn00u%=!6yJ79&44;B3j_^{aNSJ1V0-RDIthU}z9g%Uu@p($7nlkQ?9 zh8A_)B|ECFD(~=|f#CbJQW1at?|N4|F7ia3mRAPNCwkYew7l>vFi68~-bRv=W{DUH z6h^V{`Y(2uFVjlT9Ke;&ytq#dawY^|d1m`2sm+8(XfMzlq!B~gI>=<|UqBDE_EnxP zcL6HHSW0Y&I=}yz*`n!f2_pL$?$G_Gvt32sUcKqyIfY+Feyf6D<7)}7uL;dRX3#Ov z!=1g_>V}Zt=q$%muy!wsOi~NHBU?F$j?*WWDpq8T7YC&!8O-zbTM;- zr(8R>)<`;|7*eapuLm;4681%Q7GzxZ)6%Nm5O3f2i@Eu%|L{~C4~gW`9RzDprEPY78DP!VhFA6Y^GEpAQ@Q#D|PrNTHf;5 zL-`-{@7{Pd8IFyv)0$5^P5Qix)%Hx3X4q_8#mQWvROwz!SQRCPkGob1FiuIZzBIKZG_%ITlqkW}*csmq|fIF8i&%hvCEmqb9gJbw~Y+J9@Mt#+?k9@v}wI>_U zo^aH}Z|C4<@her6&0ic?$W^6%aYOS-ctn&iUpU_`{qD(5eXzU*=J32k8k1;9BROFk zt_hAVHmxR=11v|At{~~|{-b;$(aTMrtH82IBqTkz?!0^eqZEOE(}f!_=DmidvOn@} z5i}S4*_xd7o7O38dYpL+FBLk`)zK=ka^PS>6RI3B*B8t(PJ_HwsOX{KQREgTZ}heTIE*tR&f{rQ;4 z1Gc%Nz}noo#>9`g9PNnM2Z!KZLpuKmgIv`D16$ZWn`a($mERJdHQj|Wv#LJqO=r%> zM$-)NsWfX4pZ4fjDG&)KihLLW5d?VESHV}dhGv0=`2i}nAHL0k_!u2u*Kuno$nNQW zoQIhf<9D{l!gg`}#`#UHxG<0{Bcp#>M(c9zv-#lriqeH=Wgl+Z{Y?jvJi&<#WE(9% zJ8?Xscrxc?!<2xWY9_(x9&R}#<_r# zMA{!JkQC|uXF=Oq^2j#)JqK3$cHnl{EAfEE>`o_t4*`Q*+lIs}a62;BUEP%I8qQHi z2<)xtdt3W@8|tezV5k#;+NVKzF^89V=Ki9mcDF#}u)`ckDLp~&?E_dvb1y_+V8iJh zdb7#Gwrl^HJBCk8+$MDR(|lJAGxI)vGs#z_84>e|o8M#{TJ|xvHo*#DhBk6*?rMxN z`jE9{?QNb7HzF*SOpKEC=Zu^d@$JgX?+M&>%E;wa)_I5!!R%KGh$xN1o={9MqFIR&HHTM_djt@KQOBy z#dl_z%1Gj^=^MUdhK1Qr@d5{9%?-&Nra^IsMd^E7k?9CIpM+E~9JTBj9&hYQPxjWT z0oxbn`nXE5pK*)pna}2d4J*t|kE~)=oQYr#xb&_5``29E7%qm_TOIXJcAjv+zG76g zOP%moI;j*D*Y-TJiH%BQ)R|xzYiUzU0-X?>-8+8SRhd|Yj@a*CGRGJ2>h+VGt6dhj zzY5kL+K^)!D&T?x9;Eq*dj-{d#rS{?EHDL7AdECaheK9Puk2gwFD-F7-RowvYLb4v z_3=a3Q$AX1FaAgIK4#3kBVoB|qU9|Op^xdj_YITgu`E`6rY|UT*EV(yCD-iUaAhjM z8mii05Hbg#^kDt=z%IPA60o|pm6tE%yap2@vR$YuO3pFT^K8VybITUtu#EsHaAWR8 zQ+8!uAVrX_bczk8ODD`3h#YIIvsxDz95vP(s)=uks>6=BP({_NhVm+jseblG>x%JS zKU%!(x@>BTQ*t^gH<&kxZOvN1+(XMqosG!sukAGjVX*Nw=kfMSh40)j^vPAf-m9wU zShW6F#vJ?o{9{xwon6KWcT2*Ht(zmhu>gpHW}6%DAp|Y1EG8a{zVy$RS(7#eN|)%3=Tc8F&5!r~s?8wW$*Y5-%3rXNNjH4e z*_oyU(yA+RB$?DxMV6-Jg!#yI`B9z}$xcI&AqLeAJQ$ z!+HmP+`^RYEoHMla-TSPP8NCp0yLTX$pwgAgQVZn8FAy%%pKjm#HF77JqWt~vuC-} zE?CU96>CMi;fbJWApz;Z7_ew%4iNNJ5-O9k4);q<$caf5CU2gj#nT0jtg4gCPn8!< zAdngk&y2g;mrw28-{YZN`cTEC9YNR-{NTJcHY(>ZMaj0RZ3T(AzH3U)=GRms2p)=G z%sgYCg~kLJUmV0w7izi=_eflhas=bDu-6ERT`#ekiff$`uwLBDCa+DEsajW@*V@Y~ zii-7$q1wrhJmVvwTIB9}n(xB#RK%lupguElJ*q`)p5OApWA1BrEI~Y8?Az6ntTY5> z6a{@4$ziN(JUlrAAijskhiH$Tv>Tyis_m2APKsXPk9&&5u!*r!vr?97mqj%7Q%CP$ zz>?Pw09nS~MTlvG_o-$Us?2>+R3VJKF(~CLMGNXZhdf?-W^TxVk*tS5^)124dqY) z(+CtWTF_@9&)c^m?(lRsMpr232rp#FBY58XVUb#iu~FiWKJPsE-lEYWsJwM#F{8+w zBa<2!k|;u^ZFC?Ius2TinoScDE+{|wz>!;2JykdLtxEh2zFJ?G)Q`^_L89FQP^AK< z{uD18wZch|23mvIIqpVVY(90`nw#vu;P~>)`U3+2pGl@!PK^Rx3I%$#qWF*^ZmIUP z7Ow@}RlnFYU3-lFthaqH_oEy#d~H8j8QaChmI3};JtNg$di`P$wF*yV!sOa*-d;9~ zY#-j@}UXN$!UTgNeA-%qQ zU;#n*EXn<_&z@gjffa~->O{~k&;kQc2I=tdv~^-wF4{8q71J}CO}8nLWzvQlg`^nA zpM89nJ3RwyxKUw7g}nx=eM_Cxz+PG-(!#KV*R6;60x4~|bK?)edE1*Ysu9`ExL#Hi z0YlNi(w@)91+;$WeNx_a^1AUTAvIsK*K!?Ey}i2iEAxt3^z+a}8iHw^f<>)jvQC%& z>zWI!V!POsQ`s!%lyvbfl&^d(7F{*PIp0U2AYxmxK`>dwB^-#w%|R{O2kp}-sc%oh z+mZB5Q`{22oyf7!TozM_(EjkhUR9$`O$z!SAqRnfe?muXaYpVsS`pNaCf36%*~P++V!_$LnojB9 zvKvn+@ZSFZugF<6?B?kdhh!3yxD@hOR2-xqwu{#@a$v)16j7`4ldI+Y&m(n_ERrj8 zvkjS1NYRQj@wkD=GELoIz+b>O&DAN#fTHF4^|a*Z2po|$0-T3DjR)%0M~0Ul9cWVK zkCL>cB%tx*;rX_vD>YncK#Y^vK^DHf03pH9imc z{}B%8c@3CDts(pR$t9`-y=XmYJNA*XD0v!L@O@Z@$uh*8hP94TJDAwkU7KwNv5yfQbPxX52$Q)C{@`tzD1`~tCCwv6ZGM@ayKqfBualsUwOcS)&* zYxhf1G@u^=p=N%HM_pFkJq+oEhl3Ws);V=&;#3u$cV(|H{^V{cnS<&rtkIg<1*}R^ z@^f;~WIMR*Fs}S}SnS+V^$lPaPg&feLJLbK#qeB`Wp@p=uFhZj)-xU~ucW;Lsb2un zmL$i`B!_+>rvslMmeebLwM%*X{@Rcs0{N_?3RncwkD8cxi!!@piUxqJ_zPeZMTNriOOf%YT<2_?saKU#aV%+R+u^FxUeeF<(roK$|BVSByk+nLyjR zMUU!b6_ko?pat`vq16Z=JEX%($ zTpgpvsXzE}wLgsN+k-Q0Oa!e-(@;)q$8OU~#UZJA<-j^@iQAY0x~jO-R&*oLt?Nf< zc))sQ0T>gHj{*HU1Q%Z7cBE*^d6tF3b@<-Ypv#yss?F3vG^H`A8-^+q%@$7?$#W zt)9u&NB2R1p+rDpXQ{-WpW|@oKO49L8a*bN>F-{sL{(xz9DIa^+${Hts&8WkT1%(i zVYS5$#2U9*(4i8LJ=X);esAjfw6JGg=J0y)h;XO%!Y$h52b+i^du*H&E-sE^!7JSt zyi2p33@;F*tYS-?bj_q0(L{cpRcYOlhUO8qf^67_WV-AL8=04yO3rx?yJPhq|FfK` zkAom2SQeg&U}s0Ll@rZ=3utF8YSzaa@ZO&Ae9o8_m3n7_aVm^ct_4QnW-gHO3*jCdUnxMRp-;F}8Tjx8xnQ z!F|=>IgemtlUrGp(0XUdz6k2 z4rk|h{eoQp;yZ2MtN41M5gPhy;*HyxE_LG|>aJnGO9pX7D0l5B@Ht{`2QM1e$=OLP zF;t(q+38qn%KGC$M?Hn>*_ifzB&g}5XS6LAnpr#cTz?QINO5D;iOKNa38VJ!^DQa9 zd+LBN8aY)RKKvK3|M_vI83KP`=yt=Cv?Mtj^H5I?Je+r2d#Wv(VV<6DJcYT8;?V9b zl0*wt$GsK-Zkxo4ysbD0UYp>}nuo%Ag;cpTg!8gtLgLyBS11n<4?|Si1Y(l008;Tdm43 z^$U*=Jx91pgUgmKzXgxV?d|`_3D=CjA;oMaE&E`P6WpR6K<7e%Ey+z3o6$c@9vyx6 z$0_A|6ZEa_{@!ZKuyc#PneF}oC79S-r#-nJ(%O@k2fav;i(nwesr&D|Ri43hps(Dv zMBlm?`W$nUdZq4WD~42*ekV`Svu^qWZ|eD1E5nf{dy-1(>CgLoRX?X#eSWH|P1F#I z{Na&#FYRf}_q@VnnRG4M_^_q3TbsAoC;vC3%%V|UO*N8_*!t^XeBhC{G+DatVdpI$ zADI_6`BLY9Z4X9t{wUn3QV8!ws-Dm=7LZ{nVh)7mdDma8+S29*z6Eqp_}>Rv9c~d>xT$X}&VVhsX{eyA zB6)Gh#q-F6zy-VC_#ps823NG74ZyeG&_T0m&>rHJ)a=Qc06)@Dakm&T-u(Ol(>6*R zH7h}XCSlzd0N~$V{_ky~Hf53+Y}thqe>iK(OgTy|Iq|aA-|J&nc*P;5IhYn#Xz4w< z??}>@rF&Lwmb<%r%~>1SQv1fFNv)|x3ILpTdHtFrlbr3(##+6AGtw5GLW2E;K{rb?V# zVoZ3W`s|aekn%?8b&C$lF${~?Mc}Hfn;P9=+cHW*lOR%FwNO{# zZ2;E6!%UY%=PZvxcIa8K{`x0B2zy8ZzCWO$%JV5BLREt95lg%u;-BWnm)O{i@T5dwmiR zLXmyr50WS(J{14(M7mj9JD#;}jw0LtvOHhQj_hkG;yC>>e&FlDlWg&@i7%t&wGjsv zn1lDM+nxB;DKe`ikw_RN?*yRAXxb_)@}N7@oyafqd!3VW=8C(FjN@T4OfS=eEDJDn z7bvFO)gZpOdENC7l1CCG%iW?X7ej4ff7UG2Xk7Y)ct53S{g36iR|XoGtwL1F1f+H3 z#A_k2{yWJji8yTo4ZX`uQplvmsV{O}q;qdnw#*057|8^{)>E7Fz&tCPs8ZeHkr^~% zVNEHezLdr)#q*<(+Wu^zBcNd`mVvUe{|Dc$&j;Ao;Acc`t*lQMbFqQ@DR?TvzDkBW zMN4jxE2rYD?wXuFhV8Um1TPZc?L(Y2_&`R(k`@^?3i@0&h}B|Wb1kxHfj}LAqN4LX z3rlr5f|;eDzA(@f3Ea0nTWk6|ItOdQU9omfs*+(LPB!? zK0T~m>lXA_2UeVy*U;x$-XK#}T~k`iCf zYdeMb0Tt#!?z};+&q+=!acWYxcs_8tJjoZYZ*`F6HCx$Q(ESQ4%+GTIL=oD(@pNX( zk<Pbhv4PW9g?5V}z$rPuOGQeV^7Qniml<%>_^Rwdi-t}8`9 zXNvwc1)S8N{m15U` zp`%04J~=u*6F~n!LsDfK6E!fkTyll;DyC-1Aph?N*Fo6%weO3I3%R)0$4 z*Xi~BJk`y;!Ief>;ARaIgopl^%4vR}D~>Sr<1iwMOCoUit;9|*nb#+9wVTW4pty@H z`FFMt(~#{H&4CV%cGRDhQipRx_JUyTO%>@Bch z_79TFplROS5aY6?E!T4DNDL~W;%%n0X(W?d=rRC3*fu#B4A)Bk8HIJ;e0ZiHiEK$& ze8_hspeH=z^J&`c^?_eYU%mJ$dbsT8MO4q7E98~av(GB}0`FN5FnVR3!Gs^R5s{JC ze`WFSn{)Lr+6AFG7KPYr`SV-kAQMPj_}rpBNK8_c@v=HQov4u-u5yzrXEqM17%C>k z{@v19yZx2m9cpbV?zp7Dm);0XTya@NyrETAsJQ6EPe<)pYOC=S z-~BGF^tQT?a+0<}BBm-e-zyoKcYxr^%sz?l-dOd&xmF~u*E)5B3I-F{HU*Y>F&E%8 zcoMQiJ4wAgTU%X-*9x1c9cPm^vadWeP1+yWGu19CemI(kGuA(y?6Yb6Xzdq~O5PfT zy~CCv<|#xL_S-}FMSFmXy6%ERRWeqqY5i&&I<(R9>~S_vD%POEMX1KIAJgn;F)Ni9 zHmVCREDWSgX3tw9Zoj84Xwsi@C{3N&;&iQ3?TI1Y7YMi$z;8*z*t=tFe3!E>e~XD#Qu@YMQ{(8AnTdf3VtcuITXwGId-Gy zxUX8N?%dFQ(n^futxSgv=`?|yQ23M1g$>s}HDG;`j3`=$=#i~*z%m_ms_w`j)`aGl z7lQ|T7{4ahP9{||#9)#Kot~c6z7hes6N<_*qRG|y5GK#=t+g&&HJJaftf?`l>b{5w ztVeBGMJ>-~QUoc9Y_XqN`^8BfryUtm>pi7Udns@i;}W!ii)C>%6;keaVQF5h1=>c8 zb^2Lz$!LCod3ZE#d4Tko`eBNPd(11lWP#(vJR3O1M9HDzlC5n|O$N!ZNY2VbE&>J` z^d+XAOD#sKaJ*S#T6#llKg^bp;O@BSx<=tq0Kw|>4T|T#t@K21$Fo%w5^W-H>|r`W zq4-if_}_{)C-3x$Y%^!l_CJ{&c-B149N@Jy;mSIq-bO=Dx>M<5<5oq3a0_-F)1n7P z&_773L!!;r)jXUGavOB&s%>8pt25gd4DJEB?_aHh){;hH_!@G?d z>XZVAqC-sHL%?w#R~_cxMf<}+AP{?GA|MD#_0njj=}uS3oJ{cJmGA)?2cUp^l9qkB z`?9TgADv|kSom@j$d1G+cHQ9N#RTl}*gwl}UI=0N;0urUhqO6ZL~*RjT!@PMl55X` z%+7~&?*w(1h3eAaFjVDfVf|#^`xs{@6t58nVQ!OHrGUP^wpj)mWR{Gt7r$?I&(m{w zT7HyAA7e@Y(_unI8{aRM(S?Eqb8}mFaWzDhPj=o*&EAV(t&;oQR8G%NA`$DkOE=Te zyR;>PuQQWL&*uZU^L04{va1ji8r?PRWe50c{E~Pjos~cx9mkwH#x(gA8iHClj|!uU zr}`xG{FNRaY;%KQ`r>DoRSVPDIgQe!7nQqqYAahld( z!A)TEs6TMTw_0J??ph--GrO*v>_NWe9=||H3L~M3b!jo5qri1r{5~7KOLAX(v6HC? zsxAvvHhU;rROBcY&`PBGq=~YdURafI?Uf;^JiAazgy3&&P>G(-b8}lhh?&6CzVLbn zHT69gRuK*qBJMWlRX_0H6Vz7A`X@OwVB#LuT(8>g3YO@>?m`l|v8qDK&!YyZIT;XP zASQwJw6C|ksq@53z4M!JWl7AyB1s)BP2rDqs>#dJmjENdN$B#vSxEjbJ0v3GqVhEB zxupaf?zSO=u_&z_w!HOFT=L)=_{EKSxSZL#zHyacR1&4{+$iFtW65Q*D62NHXO*{L zit{B{i;2|2Vj60k+BJRB4K z2di;^DaRO8jFxqxHzmXJ_Tt0b>VEj$_{NU{^+<|c`&jGg@QoqLo}ntQpz)60=+Vl9 z$P!wm2cAm{)HIW0=Ap|!vN{Pbve*Jb_e(IQ54kT*p3 zad1k5xLgXF5#P`9Loi)glAq?zwzH;7!C0S7QAtTbgQH_SgyD`z)F3mil#x6&-HfvE z*cDq#Wa|X z_gbozJU6t}P}H{ge4|%Iq2jTlA%{vPc$48BKrS4q0+q^MN$plupYA4WND9#5OpWxA zc~mnkI!lP3gRR?+?2ciy&6p>+N)pQ5jHY9-n=pAMiS4gnN`7r^{sgMpGfAeB?|l(n znMu??x*3iHg%ziX& zDoPcPUp7*)1&^R!D~ZVuwecq+H;J?p2`By~NnVc{J6KK3wn!+=-dxU`Bc4oxZK(w4 z?`X<^y0VH;sgGCrxGno#;{Z@xiSutzJb?JPDye^eoGwOJ2c3la-Zd{8cr1+Z9ewVL zThL9{8G70m`qQ#ML56$75<#y}h?cX9f|O2`!G#U$GKZBwO5m4ZMX79(OA7WbJ$B@m zgrgODP4ChkyXK33v_%XQgs3x-w|m><#RwVUiA4M4=0OVNH&5kFTP`Ms{&lfj4AKE* zf5rA#rG78!y}NPq^ll?7UnZCkGh8x+EwV*TGjVXz$+<1z(-yy5axM==$5SbE{Jdg^ zWHAGlCz6_SK{RY=*9tp63&vfAk=X5kShcjZ@6y{`MD``J2k5S1UqpC|oUC&ZGTw6v zL9bofK$>~5RBTiFN!=BZ*)FJaolc}upW-_8guTlOnbxr64TZIm+_b-h(FP1$V09$d zbaB8uGLM_4uwpR!d#%vbcSd}FyBwhS7kq#_4k^*|rBKU% z%=^uYA~ra1$Pm(6YtwewHtYVV;Xgc2{txB%m4;`TpvlV9$+$0vrl#OpP3|f$y-poI z&pv7Q&Lzf0KQ)@z$Qv$hRay(k%G+T#+k;3KiM;X?8~EleU!PxcP)P!fdLhMs_x_Yj z$`Ni$`3s@^PeWqFxe$X3HoLi&uyB1fsHz8HX9Xk+Ike`zU_Fr^ma{Ru)wNSd6|~c zYm?(zmro!ku^AXf=_pTX)~r8?jEvfiuEU{x4bbT?2;` z{Fm#KcFzl?&KWXgJ~$W1s+&nU7@mEbPubIzT?p zKop4$Y7nr?wK_L@hr%g6Ez=rptyL%%qlnHIy?tao?iZC}|EBNA_L2gEf!;~0_@DC_eSV{xx3 zp|6&FBZ{Pyroad`4H`1t7l_{RcEt_vj??i(A40Ttsv3SoI37CVy_%$dN!$C`VqOOG z|Mvy|&t8Q8svhcO=rxoK#L<7%ku`czP+$#`c2SVgIG}5|HJ%kpyUOK5?K;^NAKGwy z$xy2S{?OqJ5jBEIz?w=JyEj4%h2jz&{6ruFiSvhY9wrsTmAi?8feX}RZmC$fovTZ4 z$e_zD0|An&M)W^SXAkI+Q{KoUa81IF{s5&{Jyx4jN3XXkODzi_rbc!@ejs+ZHW0Bg ze?w`i&8Zrqz^WfKOCm$=C<9{<~p1zGVBZ8-ab7OK?LFTXE)nM zl(f2}mq(Vt2L}3#SMB*20EjvGp9}G)w7s8CT#)|<5B#qh{y+P?NmTjUX;8zO1bE=` z1nvY;6jC0Sm?!xxT{~6>Ao9>W$hJGo+0oe*K4wLo&pN*simF|M-(N%dnZ|V#*{yNp zHBWKZn`|m0N9~rrjViLXCsxK*#;fxq-erF3t@!eeX8i5Tx^(1AVExI9OSSv(y1BX* zq^}dmSmbeq0;|EqDzH!kvwMq4T6#%1jVeO*R(E3@P^)ve2&25}%ZSblQCj}MJefThp8oc*zc=?eo5?0ZaCq?KTogj1OGUJO`h6ZQ(b)k+9pC`A5PElEKBv&g|Tm> zCb?tbKknB~mehx@WAeiH9Gzwms&+8$31=5rr+&M5Z+_Tzu#g7hQ0lEm@@MliX=Bis z@zwp#vTnDUp;1}Xs^UwOrs7P_TMao#9E zs!1R6yg|=)ME||PBy;!(wFi4K>rDQ(Cb%p%!6k`F(b6(IHczoaVr&Gi8RvCcgfwrZ z8t$)w=2%)GNR}Edja>yOP!3C$UEXq(Fib%;})?$B3ui5qd^$=!e- zIrJ#~Byra83b3bv^WNm@buY~&!Vnqf#!3a z7ma$x9TsjM1?l5|oAe@q&c)svq!A8@&lZWMsXZ&=7(USZ%EhmvFAGeTERtX)M>pyT zR#zh8cs4vIn5kH6W>bNZt62~0jWxf@Z?{i<<}<11lg+7r&RcsuK~~FFHraE8@$SPT z-90@{Em)!Ue1B?3g3XHLmrcvhUF*PCAipYOr%5QEMn7uGI?HdJv*4B_w~#?i&$uz$ z^(YD<>pcLWVZu+IJm~fFWnf~Q25m{pFKhpY!`4=Nd3bVEwe5ncCt6<3?EjJnsqa$I zdH;Ouv8xkt9wRFv*;>1Bk)g=w1>9Ff3`KkXJh3Qj3ZW3UDvE!a!WyHqJWjP>ENMAEc2l#vUcav7ca-+KtkMa<42P4 zM)-}K4ShQ3hUdDq#`J0-@4X;3IZ(g3z1J!SpJX=84ML?wU(BD#;!jx}tCxZ<{?{Xk zzaf}1n)HZN;2p}TB3wYtlLtk`sb^iOs3{8qW|Ib~<)nYJyEbrulgNAh=pe)zC}nET zs#EsByVBh{vFORlY-nfxYF=dbOd)$XbD@vYvHdbeol~h}!0Y}a?Udm!wj4A(2S(DC ztp*Iyn8_Qa-3I3pTQ4Y@Lh+`s zc+^B2^FtxOa>gmqH-a^5fHJ{p|Oo+6^y3(L5b zn{o3W1>g!|05li7vl=tmG6oIoWq+g;-S5etua=L`G9d+T9K9NKdCpXUZzAEkPY;C|Jfm zyDz?cJgVpwP&-`j>(HcUlbKV8n~gik0|_)5DiRxVt@$!(2g!x5Oz-a9>{wkO-B}(i zDRd`P5Zkmc$@kHrM@9tbvb-_BXR^b!t3^ZyOHq;GD_rAZ>3kH~(`&Mt9w~F|H$7Zu z)VdU;cK(VuYOA|Ut4it{_Sbx+3Dyoueqa^TvoeltT<-jQ-FCmwpbkzb`}~ihudtW< zzd&DQziOUifk;bX)|wa6em+J%sc}~J=osu|VjkU0ANs`n2ZU)SP{3+>m%o#>wl<-UJ{j;=XGREy} zch_MxUT2%g+T{+c09<*K-kY+~I?&;G)L zO;O)O?9@U=K{5gg4McAb4Ny(-SxNVbNlUraw{Q|9Cf7yT4;SjDL$ozV?&*%Ps+HDy z-3A{tPM)qi=hW}$uG=DjR|=WObUV%CJeAM4(E)b{oK5xc)G;L(`aW99vYn_g;#C5lKJ{F`4<~s&8(lm2XBDeLyr~q#k z>XkKab^5{5_=tC5acc-S>sxnZrE3>F?>2U}bq?~R&)v1%l*)86fDR}q(3@q|U<@Ea zMsRvv9VPH^-?3fc%p8@CpnB!-mEb`Wv74CQV%7TSalaB5Z@Hs=6GHsFL1XjEPYR5| znqd{%7^^>GW{XHpmGu-IGkpZIkE{);>Y)0B4NQFV@+xK4#)OKo_pD}t!_*=?cef9S z(}hO{o%^-h!g_sF7whGP)s*6hQ%rjMd$||y z$2ATcEOG6aB-(S8X%#8=UY|UxMNpXjnDYtNwG+6#-EFj!Z!~_al4F(r*xrw8sgG>) z(c3~k(J^%LH)c_voyW@zv$2fp-{`A0NxjCH)m7I9?>cBY%a&v}Z|`n;3uB&+D1cZs z`D=z6lzA7-zpDaKlwqG3yOA3SD`P88yj+84-pIb&qms!8s#oLv%WABqU{FG3*UGdy z$t53!R+eDYG8q6TN9t{;ZI7B@&36FWI~(lS7mFl8zZCktmAF0B$6pr57$|W)YQS!F zB;#2`=y$;8yxh0fGFXb(Am*a=d=~M*=h<-I_AhIv4(rWj*~7P)J>KgNlvM`HcB?%o z7)=Ih?4Eih*0x@?bnUaY zyCp_VI&Sxw;U~@Z?vyrwcEW+%-|LCrP+>_U1OibvCb`69RgkEnF3?Is?Uza%QM`5; zYVO}sg-lsaD4nz3(;X;0IE=tRL$gl7O_Pbtz|#Wu@XT#(#mmaQ?==!HvY=v*OoS*kt#@}UtfwZ{Tb_Q!Q+ z5xM>}j5dz^I!~t*d&^n|sFk|RV7RYw4~(g3hSiPVj$`LC$Rbly6Hd&Bv7krmDJAYQ z<1+%caD1fP~V6rGGHmP3^mww%U3#?x2vl-FZFR~vyt&~|RW2zMHVi(h-p8g^+nvie z4ky2iMnOyVW1kM`?cL=8UYTU>BKQb+GL!VZ;~9o{uyy-qF*w3fdX2%4^^1v9Jpc3e OKNk4^+yZz0nD}46?!;jL diff --git a/src/c++/perf_analyzer/genai-perf/docs/compare.md b/src/c++/perf_analyzer/genai-perf/docs/compare.md deleted file mode 100644 index 5d1a36413..000000000 --- a/src/c++/perf_analyzer/genai-perf/docs/compare.md +++ /dev/null @@ -1,251 +0,0 @@ - - -# GenAI-Perf Compare Subcommand - -There are two approaches for the users to use the `compare` subcommand to create -plots across multiple runs. First is to directly pass the profile export files -with `--files` option - -## Running initially with `--files` option - -If the user does not have a YAML configuration file, -they can run the `compare` subcommand with the `--files` option to generate a -set of default plots as well as a pre-filled YAML config file for the plots. - -```bash -genai-perf compare --files profile1.json profile2.json profile3.json -``` - -This will generate the default plots and compare across the three runs. -GenAI-Perf will also generate an initial YAML configuration file `config.yaml` -that is pre-filled with plot configurations as following: - -```yaml -plot1: - title: Time to First Token - x_metric: '' - y_metric: time_to_first_tokens - x_label: Time to First Token (ms) - y_label: '' - width: 1200 - height: 700 - type: box - paths: - - profile1.json - - profile2.json - - profile3.json - output: compare -plot2: - title: Request Latency - x_metric: '' - y_metric: request_latencies - x_label: Request Latency (ms) - y_label: '' - width: 1200 - height: 700 - type: box - paths: - - profile1.json - - profile2.json - - profile3.json - output: compare -plot3: - title: Distribution of Input Sequence Lengths to Output Sequence Lengths - x_metric: input_sequence_lengths - y_metric: output_sequence_lengths - x_label: Input Sequence Length - y_label: Output Sequence Length - width: 1200 - height: 450 - type: heatmap - paths: - - profile1.json - - profile2.json - - profile3.json - output: compare -plot4: - title: Time to First Token vs Input Sequence Lengths - x_metric: input_sequence_lengths - y_metric: time_to_first_tokens - x_label: Input Sequence Length - y_label: Time to First Token (ms) - width: 1200 - height: 700 - type: scatter - paths: - - profile1.json - - profile2.json - - profile3.json - output: compare -plot5: - title: Token-to-Token Latency vs Output Token Position - x_metric: token_positions - y_metric: inter_token_latencies - x_label: Output Token Position - y_label: Token-to-Token Latency (ms) - width: 1200 - height: 700 - type: scatter - paths: - - profile1.json - - profile2.json - - profile3.json - output: compare -``` - -Once the user has the YAML configuration file, -they can repeat the process of editing the config file and running with -`--config` option to re-generate the plots iteratively. - -```bash -# edit -vi config.yaml - -# re-generate the plots -genai-perf compare --config config.yaml -``` - -## Running directly with `--config` option - -If the user would like to create a custom plot (other than the default ones provided), -they can build their own YAML configuration file that contains the information -about the plots they would like to generate. -For instance, if the user would like to see how the inter token latencies change -by the number of output tokens, which is not part of the default plots, -they could add the following YAML block to the file: - -```yaml -plot1: - title: Inter Token Latency vs Output Tokens - x_metric: num_output_tokens - y_metric: inter_token_latencies - x_label: Num Output Tokens - y_label: Avg ITL (ms) - width: 1200 - height: 450 - type: scatter - paths: - - - - - output: compare -``` - -After adding the lines, the user can run the following command to generate the -plots specified in the configuration file (in this case, `config.yaml`): - -```bash -genai-perf compare --config config.yaml -``` - -The user can check the generated plots under the output directory: -``` -compare/ -├── inter_token_latency_vs_output_tokens.jpeg -└── ... -``` - -## YAML Schema - -Here are more details about the YAML configuration file and its stricture. -The general YAML schema for the plot configuration looks as following: - -```yaml -plot1: - title: [str] - x_metric: [str] - y_metric: [str] - x_label: [str] - y_label: [str] - width: [int] - height: [int] - type: [scatter,box,heatmap] - paths: - - [str] - - ... - output: [str] - -plot2: - title: [str] - x_metric: [str] - y_metric: [str] - x_label: [str] - y_label: [str] - width: [int] - height: [int] - type: [scatter,box,heatmap] - paths: - - [str] - - ... - output: [str] - -# add more plots -``` - -The user can add as many plots they would like to generate by adding the plot -blocks in the configuration file (they have a key pattern of `plot<#>`, -but that is not required and the user can set it to any arbitrary string). -For each plot block, the user can specify the following configurations: -- `title`: The title of the plot. -- `x_metric`: The name of the metric to be used on the x-axis. -- `y_metric`: The name of the metric to be used on the y-axis. -- `x_label`: The x-axis label (or description) -- `y_label`: The y-axis label (or description) -- `width`: The width of the entire plot -- `height`: The height of the entire plot -- `type`: The type of the plot. It must be one of the three: `scatter`, `box`, -or `heatmap`. -- `paths`: List of paths to the profile export files to compare. -- `output`: The path to the output directory to store all the plots and YAML -configuration file. - -> [!Note] -> User *MUST* provide at least one valid path to the profile export file. - - - -## Example Plots - -Here are the list of sample plots that gets created by default from running the -`compare` subcommand: - -### Distribution of Input Sequence Lengths to Output Sequence Lengths - - -### Request Latency Analysis - - -### Time to First Token Analysis - - -### Time to First Token vs. Input Sequence Lengths - - -### Token-to-Token Latency vs. Output Token Position - - diff --git a/src/c++/perf_analyzer/genai-perf/docs/embeddings.md b/src/c++/perf_analyzer/genai-perf/docs/embeddings.md deleted file mode 100644 index e508f9eff..000000000 --- a/src/c++/perf_analyzer/genai-perf/docs/embeddings.md +++ /dev/null @@ -1,93 +0,0 @@ - - -# Profile Embeddings Models with GenAI-Perf - -GenAI-Perf allows you to profile embedding models running on an -[OpenAI Embeddings API](https://platform.openai.com/docs/api-reference/embeddings)-compatible server. - -## Create a Sample Embeddings Input File - -To create a sample embeddings input file, use the following command: - -```bash -echo '{"text": "What was the first car ever driven?"} -{"text": "Who served as the 5th President of the United States of America?"} -{"text": "Is the Sydney Opera House located in Australia?"} -{"text": "In what state did they film Shrek 2?"}' > embeddings.jsonl -``` - -This will generate a file named embeddings.jsonl with the following content: -```jsonl -{"text": "What was the first car ever driven?"} -{"text": "Who served as the 5th President of the United States of America?"} -{"text": "Is the Sydney Opera House located in Australia?"} -{"text": "In what state did they film Shrek 2?"} -``` - -## Start an OpenAI Embeddings-Compatible Server -To start an OpenAI embeddings-compatible server, run the following command: -```bash -docker run -it --net=host --rm --gpus=all vllm/vllm-openai:latest --model intfloat/e5-mistral-7b-instruct --dtype float16 --max-model-len 1024 -``` - -## Run GenAI-Perf -To profile embeddings models using GenAI-Perf, use the following command: - -```bash -genai-perf profile \ - -m intfloat/e5-mistral-7b-instruct \ - --service-kind openai \ - --endpoint-type embeddings \ - --batch-size 2 \ - --input-file embeddings.jsonl -``` - -This will use default values for optional arguments. You can also pass in -additional arguments with the `--extra-inputs` [flag](../README.md#input-options). -For example, you could use this command: - -```bash -genai-perf profile \ - -m intfloat/e5-mistral-7b-instruct \ - --service-kind openai \ - --endpoint-type embeddings \ - --extra-inputs user:sample_user -``` - -Example output: - -``` - Embeddings Metrics -┏━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━┩ -│ Request latency (ms) │ 42.21 │ 28.18 │ 318.61 │ 56.50 │ 49.21 │ 43.07 │ -└──────────────────────┴───────┴───────┴────────┴───────┴───────┴───────┘ -Request throughput (per sec): 23.63 -``` diff --git a/src/c++/perf_analyzer/genai-perf/docs/files.md b/src/c++/perf_analyzer/genai-perf/docs/files.md deleted file mode 100644 index 6ebdf69fa..000000000 --- a/src/c++/perf_analyzer/genai-perf/docs/files.md +++ /dev/null @@ -1,129 +0,0 @@ - - -# Generated File Structures - -## Overview - -This document serves as a guide to understanding the structure and contents of -the files generated by GenAi-Perf. - -## Directory Structure - -After running GenAi-Perf, your file tree should contain the following: - -``` -genai-perf/ -├── artifacts/ -│ ├── data/ -│ └── images/ -``` - -## File Types -Within the artifacts and docs directories, several file types are generated, -including .gzip, .csv, .json, .html, and .jpeg. Below is a detailed -explanation of each file and its purpose. - -### Artifacts Directory - -#### Data Subdirectory - -The data subdirectory contains the raw and processed performance data files. - -##### GZIP Files - -- all_data.gzip: Aggregated performance data from all collected metrics. -- input_sequence_lengths_vs_output_sequence_lengths.gzip: This contains data on -the input sequence lengths versus the output sequence lengths for each request. -- request_latency.gzip: This contains the latency for each request. -- time_to_first_token.gzip: This contains the time to first token for each request. -- token_to_token_vs_output_position.gzip: This contains the time from one token -generation to the next versus the position of the output token for each token. -- ttft_vs_input_sequence_lengths.gzip: This contains the time to first token -versus the input sequence length for each request. - -##### JSON Files - -- llm_inputs.json: This contains the input prompts provided to the LLM during testing. -- profile_export.json: This is provided by Perf Analyzer and contains the timestamps -for each event in the lifecycle of each request. This is low-level data used to calculate -metrics by GenAi-Perf. - -##### CSV File - -- profile_export_genai_perf.csv: A CSV of the output tables printed -in the GenAi-Perf output. These may have more detail than the printed tables. - -#### Images Subdirectory - -The images subdirectory contains visual representations of the performance -data. All images are in both HTML and JPEG formats. - -##### HTML and JPEG Files -- input_sequence_lengths_vs_output_sequence_lengths: A heat map showing the -relationship between input and generated tokens. -- request_latency: A box plot showing request latency. -- time_to_first_token: A box plot showing time to first token. -- token_to_token_vs_output_position: A scatterplot showing token-to-token -time versus output token position. -- ttft_vs_input_sequence_lengths: A scatterplot showing token-to-token time -versus the input sequence lengths. - -## Usage Instructions - -To use the generated files, navigate to the artifacts/data directory. Then, -the next steps depend on the file format you wish to work with. - -### GZIP Files - -The GZIP files contain Parquet files with calculated data, which can be read -with Pandas in Python. For example, you can create a dataframe with these files: - -``` -import pandas -df = pandas.read_partquet(path_to_file)` -``` - -You can then use Pandas to work with the data. - -``` -print(df.head()) # See the first few rows of the data. -print(df.describe()) # Get summary statistics for the data -``` - -### CSV and JSON Files -Open .csv and .json files with spreadsheet or JSON parsing tools for structured -data analysis. These can also be read via a text editor, like Vim. - -### HTML Files - -View .html visualizations in a web browser for interactive data exploration. - -### JPEG Files - -Use an image software to open .jpeg images for static visual representations. diff --git a/src/c++/perf_analyzer/genai-perf/docs/lora.md b/src/c++/perf_analyzer/genai-perf/docs/lora.md deleted file mode 100644 index d30867eda..000000000 --- a/src/c++/perf_analyzer/genai-perf/docs/lora.md +++ /dev/null @@ -1,53 +0,0 @@ - - -# Profile Multiple LoRA Adapters -GenAI-Perf allows you to profile multiple LoRA adapters on top of a base model. - -## Select LoRA Adapters -To do this, list multiple adapters after the model name option `-m`: - -```bash -genai-perf -m lora_adapter1 lora_adapter2 lora_adapter3 -``` - -## Choose a Strategy for Selecting Models -When profiling with multiple models, you can specify how the models should be -assigned to prompts using the `--model-selection-strategy` option: - -```bash -genai-perf profile \ - -m lora_adapter1 lora_adapter2 lora_adapter3 \ - --model-selection-strategy round_robin -``` - -This setup will cycle through the lora_adapter1, lora_adapter2, and -lora_adapter3 models in a round-robin manner for each prompt. - -For more details on additional options and configurations, refer to the -[Command Line Options section](../README.md#command-line-options) in the README. \ No newline at end of file diff --git a/src/c++/perf_analyzer/genai-perf/docs/multi_modal.md b/src/c++/perf_analyzer/genai-perf/docs/multi_modal.md deleted file mode 100644 index bb9f33c60..000000000 --- a/src/c++/perf_analyzer/genai-perf/docs/multi_modal.md +++ /dev/null @@ -1,122 +0,0 @@ - - -# Profile Vision-Language Models with GenAI-Perf - -GenAI-Perf allows you to profile Vision-Language Models (VLM) running on -[OpenAI Chat Completions API](https://platform.openai.com/docs/guides/chat-completions)-compatible server -by sending [multi-modal content](https://platform.openai.com/docs/guides/vision) to the server. -Currently, you can send multi-modal contents with GenAI-Perf using the following two approaches: -1. The synthetic data generation approach, where GenAI-Perf generates the multi-modal data for you. -2. The Bring Your Own Data (BYOD) approach, where you provide GenAI-Perf with the data to send. - -Before we dive into the two approaches, -you can start OpenAI API compatible server with a VLM model using following command: - -```bash -docker run --runtime nvidia --gpus all \ - -p 8000:8000 --ipc=host \ - vllm/vllm-openai:latest \ - --model llava-hf/llava-v1.6-mistral-7b-hf --dtype float16 -``` - - -## Approach 1: Synthetic Multi-Modal Data Generation - -GenAI-Perf can generate synthetic multi-modal data such as texts or images using -the parameters provide by the user through CLI. - -```bash -genai-perf profile \ - -m llava-hf/llava-v1.6-mistral-7b-hf \ - --service-kind openai \ - --endpoint-type vision \ - --image-width-mean 512 \ - --image-width-stddev 30 \ - --image-height-mean 512 \ - --image-height-stddev 30 \ - --image-format png \ - --synthetic-input-tokens-mean 100 \ - --synthetic-input-tokens-stddev 0 \ - --streaming -``` - -> [!Note] -> Under the hood, GenAI-Perf generates synthetic images using a few source images -> under the `llm_inputs/source_images` directory. -> If you would like to add/remove/edit the source images, -> you can do so by directly editing the source images under the directory. -> GenAI-Perf will pickup the images under the directory automatically when -> generating the synthetic images. - - -## Approach 2: Bring Your Own Data (BYOD) - -Instead of letting GenAI-Perf create the synthetic data, -you can also provide GenAI-Perf with your own data using -[`--input-file`](../README.md#--input-file-path) CLI option. -The file needs to be in JSONL format and should contain both the prompt and -the filepath to the image to send. - -For instance, an example of input file would look something as following: -```bash -// input.jsonl -{"text_input": "What is in this image?", "image": "path/to/image1.png"} -{"text_input": "What is the color of the dog?", "image": "path/to/image2.jpeg"} -{"text_input": "Describe the scene in the picture.", "image": "path/to/image3.png"} -... -``` - -After you create the file, you can run GenAI-Perf using the following command: - -```bash -genai-perf profile \ - -m llava-hf/llava-v1.6-mistral-7b-hf \ - --service-kind openai \ - --endpoint-type vision \ - --input-file input.jsonl \ - --streaming -``` - -Running GenAI-Perf using either approach will give you an example output that -looks like below: - -```bash - LLM Metrics -┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━┩ -│ Time to first token (ms) │ 321.05 │ 291.30 │ 537.07 │ 497.88 │ 318.46 │ 317.35 │ -│ Inter token latency (ms) │ 12.28 │ 11.44 │ 12.88 │ 12.87 │ 12.81 │ 12.53 │ -│ Request latency (ms) │ 1,866.23 │ 1,044.70 │ 2,832.22 │ 2,779.63 │ 2,534.64 │ 2,054.03 │ -│ Output sequence length │ 126.68 │ 59.00 │ 204.00 │ 200.58 │ 177.80 │ 147.50 │ -│ Input sequence length │ 100.00 │ 100.00 │ 100.00 │ 100.00 │ 100.00 │ 100.00 │ -└──────────────────────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┘ -Output token throughput (per sec): 67.40 -Request throughput (per sec): 0.53 -``` diff --git a/src/c++/perf_analyzer/genai-perf/docs/rankings.md b/src/c++/perf_analyzer/genai-perf/docs/rankings.md deleted file mode 100644 index a316ef857..000000000 --- a/src/c++/perf_analyzer/genai-perf/docs/rankings.md +++ /dev/null @@ -1,100 +0,0 @@ - - -# Profile Ranking Models with GenAI-Perf - - -GenAI-Perf allows you to profile ranking models compatible with Hugging Face's -[Text Embeddings Inference's re-ranker API](https://huggingface.co/docs/text-embeddings-inference/en/quick_tour#re-rankers). - -## Create a Sample Rankings Input Directory - -To create a sample rankings input directory, follow these steps: - -Create a directory called rankings_jsonl: -```bash -mkdir rankings_jsonl -``` - -Inside this directory, create a JSONL file named queries.jsonl with queries data: - -```bash -echo '{"text": "What was the first car ever driven?"} -{"text": "Who served as the 5th President of the United States of America?"} -{"text": "Is the Sydney Opera House located in Australia?"} -{"text": "In what state did they film Shrek 2?"}' > rankings_jsonl/queries.jsonl -``` - -Create another JSONL file named passages.jsonl with passages data: - -```bash -echo '{"text": "Eric Anderson (born January 18, 1968) is an American sociologist and sexologist."} -{"text": "Kevin Loader is a British film and television producer."} -{"text": "Francisco Antonio Zea Juan Francisco Antonio Hilari was a Colombian journalist, botanist, diplomat, politician, and statesman who served as the 1st Vice President of Colombia."} -{"text": "Daddys Home 2 Principal photography on the film began in Massachusetts in March 2017 and it was released in the United States by Paramount Pictures on November 10, 2017. Although the film received unfavorable reviews, it has grossed over $180 million worldwide on a $69 million budget."}' > rankings_jsonl/passages.jsonl -``` - -## Start a Hugging Face Re-Ranker-Compatible Server -To start a Hugging Face re-ranker-compatible server, run the following commands: - -```bash -model=BAAI/bge-reranker-large -revision=refs/pr/4 -volume=$PWD/data - -docker run --gpus all -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.3 --model-id $model --revision $revision -``` - -## Run GenAI-Perf -To profile ranking models using GenAI-Perf, use the following command: - -```bash -genai-perf profile \ - -m BAAI/bge-reranker-large \ - --service-kind openai \ - --endpoint-type rankings \ - --endpoint rerank \ - --input-file rankings_jsonl/ \ - -u localhost:8080 \ - --extra-inputs rankings:tei \ - --batch-size 2 -``` - -This command specifies the use of Hugging Face's ranking API with `--endpoint rerank` and `--extra-inputs rankings:tei`. - -Example output: - -``` - Rankings Metrics -┏━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━┳━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━╇━━━━━━┩ -│ Request latency (ms) │ 5.48 │ 2.50 │ 23.91 │ 10.27 │ 8.34 │ 6.07 │ -└──────────────────────┴──────┴──────┴───────┴───────┴──────┴──────┘ -Request throughput (per sec): 180.11 -``` diff --git a/src/c++/perf_analyzer/genai-perf/docs/tutorial.md b/src/c++/perf_analyzer/genai-perf/docs/tutorial.md deleted file mode 100644 index 15cc53efe..000000000 --- a/src/c++/perf_analyzer/genai-perf/docs/tutorial.md +++ /dev/null @@ -1,301 +0,0 @@ - - -# Tutorials - -- [Profile GPT2 running on Triton + TensorRT-LLM](#tensorrt-llm) -- [Profile GPT2 running on Triton + vLLM](#triton-vllm) -- [Profile GPT2 running on OpenAI Chat Completions API-Compatible Server](#openai-chat) -- [Profile GPT2 running on OpenAI Completions API-Compatible Server](#openai-completions) - ---- - -## Profile GPT2 running on Triton + TensorRT-LLM - -### Run GPT2 on Triton Inference Server using TensorRT-LLM - -

-See instructions - -Run Triton Inference Server with TensorRT-LLM backend container: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - -docker run -it --net=host --gpus=all --shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/nvidia/tritonserver:${RELEASE}-trtllm-python-py3 - -# Install Triton CLI (~5 min): -pip install "git+https://github.com/triton-inference-server/triton_cli@0.0.8" - -# Download model: -triton import -m gpt2 --backend tensorrtllm - -# Run server: -triton start -``` - -
- -### Run GenAI-Perf - -Run GenAI-Perf from Triton Inference Server SDK container: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - -docker run -it --net=host --gpus=all nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -# Run GenAI-Perf in the container: -genai-perf profile \ - -m gpt2 \ - --service-kind triton \ - --backend tensorrtllm \ - --num-prompts 100 \ - --random-seed 123 \ - --synthetic-input-tokens-mean 200 \ - --synthetic-input-tokens-stddev 0 \ - --streaming \ - --output-tokens-mean 100 \ - --output-tokens-stddev 0 \ - --output-tokens-mean-deterministic \ - --tokenizer hf-internal-testing/llama-tokenizer \ - --concurrency 1 \ - --measurement-interval 4000 \ - --profile-export-file my_profile_export.json \ - --url localhost:8001 -``` - -Example output: - -``` - LLM Metrics -┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━┩ -│ Time to first token (ns) │ 13,266,974 │ 11,818,732 │ 18,351,779 │ 16,513,479 │ 13,741,986 │ 13,544,376 │ -│ Inter token latency (ns) │ 2,069,766 │ 42,023 │ 15,307,799 │ 3,256,375 │ 3,020,580 │ 2,090,930 │ -│ Request latency (ns) │ 223,532,625 │ 219,123,330 │ 241,004,192 │ 238,198,306 │ 229,676,183 │ 224,715,918 │ -│ Output sequence length │ 104 │ 100 │ 129 │ 128 │ 109 │ 105 │ -│ Input sequence length │ 199 │ 199 │ 199 │ 199 │ 199 │ 199 │ -└──────────────────────────┴─────────────┴─────────────┴─────────────┴─────────────┴─────────────┴─────────────┘ -Output token throughput (per sec): 460.42 -Request throughput (per sec): 4.44 -``` - -## Profile GPT2 running on Triton + vLLM - -### Run GPT2 on Triton Inference Server using vLLM - -
-See instructions - -Run Triton Inference Server with vLLM backend container: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - - -docker run -it --net=host --gpus=1 --shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/nvidia/tritonserver:${RELEASE}-vllm-python-py3 - -# Install Triton CLI (~5 min): -pip install "git+https://github.com/triton-inference-server/triton_cli@0.0.8" - -# Download model: -triton import -m gpt2 --backend vllm - -# Run server: -triton start -``` - -
- -### Run GenAI-Perf - -Run GenAI-Perf from Triton Inference Server SDK container: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - -docker run -it --net=host --gpus=1 nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -# Run GenAI-Perf in the container: -genai-perf profile \ - -m gpt2 \ - --service-kind triton \ - --backend vllm \ - --num-prompts 100 \ - --random-seed 123 \ - --synthetic-input-tokens-mean 200 \ - --synthetic-input-tokens-stddev 0 \ - --streaming \ - --output-tokens-mean 100 \ - --output-tokens-stddev 0 \ - --output-tokens-mean-deterministic \ - --tokenizer hf-internal-testing/llama-tokenizer \ - --concurrency 1 \ - --measurement-interval 4000 \ - --profile-export-file my_profile_export.json \ - --url localhost:8001 -``` - -Example output: - -``` - LLM Metrics -┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━┩ -│ Time to first token (ns) │ 15,786,560 │ 11,437,189 │ 49,550,549 │ 40,129,652 │ 21,248,091 │ 17,824,695 │ -│ Inter token latency (ns) │ 3,543,380 │ 591,898 │ 10,013,690 │ 6,152,260 │ 5,039,278 │ 4,060,982 │ -│ Request latency (ns) │ 388,415,721 │ 312,552,612 │ 528,229,817 │ 518,189,390 │ 484,281,365 │ 459,417,637 │ -│ Output sequence length │ 113 │ 105 │ 123 │ 122 │ 119 │ 115 │ -│ Input sequence length │ 199 │ 199 │ 199 │ 199 │ 199 │ 199 │ -└──────────────────────────┴─────────────┴─────────────┴─────────────┴─────────────┴─────────────┴─────────────┘ -Output token throughput (per sec): 290.24 -Request throughput (per sec): 2.57 -``` - -## Profile GPT2 running on OpenAI Chat API-Compatible Server - -### Run GPT2 on [OpenAI Chat Completions API](https://platform.openai.com/docs/api-reference/chat)-compatible server - -
-See instructions - -Run the vLLM inference server: - -```bash -docker run -it --net=host --gpus=all vllm/vllm-openai:latest --model gpt2 --dtype float16 --max-model-len 1024 -``` - -
- -### Run GenAI-Perf - -Run GenAI-Perf from Triton Inference Server SDK container: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - -docker run -it --net=host --gpus=all nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - -# Run GenAI-Perf in the container: -genai-perf profile \ - -m gpt2 \ - --service-kind openai \ - --endpoint v1/chat/completions \ - --endpoint-type chat \ - --num-prompts 100 \ - --random-seed 123 \ - --synthetic-input-tokens-mean 200 \ - --synthetic-input-tokens-stddev 0 \ - --streaming \ - --output-tokens-mean 100 \ - --output-tokens-stddev 0 \ - --tokenizer hf-internal-testing/llama-tokenizer \ - --concurrency 1 \ - --measurement-interval 4000 \ - --profile-export-file my_profile_export.json \ - --url localhost:8000 -``` - -Example output: - -``` - LLM Metrics -┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━┩ -│ Time to first token (ns) │ 13,546,815 │ 9,821,658 │ 48,317,756 │ 34,361,913 │ 16,541,625 │ 14,612,026 │ -│ Inter token latency (ns) │ 2,560,813 │ 457,703 │ 6,507,334 │ 3,754,617 │ 3,059,158 │ 2,953,540 │ -│ Request latency (ns) │ 283,597,027 │ 240,098,890 │ 361,730,568 │ 349,164,037 │ 323,279,761 │ 306,507,562 │ -│ Output sequence length │ 114 │ 103 │ 142 │ 136 │ 122 │ 119 │ -│ Input sequence length │ 199 │ 199 │ 199 │ 199 │ 199 │ 199 │ -└──────────────────────────┴─────────────┴─────────────┴─────────────┴─────────────┴─────────────┴─────────────┘ -Output token throughput (per sec): 401.62 -Request throughput (per sec): 3.52 -``` - -## Profile GPT2 running on OpenAI Completions API-Compatible Server - -### Running GPT2 on [OpenAI Completions API](https://platform.openai.com/docs/api-reference/completions)-compatible server - -
-See instructions - -Run the vLLM inference server: - -```bash -docker run -it --net=host --gpus=all vllm/vllm-openai:latest --model gpt2 --dtype float16 --max-model-len 1024 -``` - -
- -### Run GenAI-Perf - -Run GenAI-Perf from Triton Inference Server SDK container: - -```bash -export RELEASE="yy.mm" # e.g. export RELEASE="24.06" - -docker run -it --net=host --gpus=all nvcr.io/nvidia/tritonserver:${RELEASE}-py3-sdk - - -# Run GenAI-Perf in the container: -genai-perf profile \ - -m gpt2 \ - --service-kind openai \ - --endpoint v1/completions \ - --endpoint-type completions \ - --num-prompts 100 \ - --random-seed 123 \ - --synthetic-input-tokens-mean 200 \ - --synthetic-input-tokens-stddev 0 \ - --output-tokens-mean 100 \ - --output-tokens-stddev 0 \ - --tokenizer hf-internal-testing/llama-tokenizer \ - --concurrency 1 \ - --measurement-interval 4000 \ - --profile-export-file my_profile_export.json \ - --url localhost:8000 -``` - -Example output: - -``` - LLM Metrics -┏━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓ -┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃ -┡━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━┩ -│ Request latency (ns) │ 296,990,497 │ 43,312,449 │ 332,788,242 │ 327,475,292 │ 317,392,767 │ 310,343,333 │ -│ Output sequence length │ 109 │ 11 │ 158 │ 142 │ 118 │ 113 │ -│ Input sequence length │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ -└────────────────────────┴─────────────┴────────────┴─────────────┴─────────────┴─────────────┴─────────────┘ -Output token throughput (per sec): 366.78 -Request throughput (per sec): 3.37 -``` diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/.gitignore b/src/c++/perf_analyzer/genai-perf/genai_perf/.gitignore deleted file mode 100644 index 973a71df2..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.json -*.cache diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/__init__.py b/src/c++/perf_analyzer/genai-perf/genai_perf/__init__.py deleted file mode 100644 index d656fe629..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -__version__ = "0.0.5dev" diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/constants.py b/src/c++/perf_analyzer/genai-perf/genai_perf/constants.py deleted file mode 100644 index b951524bf..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/constants.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -DEFAULT_HTTP_URL = "localhost:8000" -DEFAULT_GRPC_URL = "localhost:8001" - - -OPEN_ORCA = "openorca" -CNN_DAILY_MAIL = "cnn_dailymail" -DEFAULT_INPUT_DATA_JSON = "llm_inputs.json" - - -DEFAULT_ARTIFACT_DIR = "artifacts" -DEFAULT_COMPARE_DIR = "compare" -DEFAULT_PARQUET_FILE = "all_data" diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/exceptions.py b/src/c++/perf_analyzer/genai-perf/genai_perf/exceptions.py deleted file mode 100644 index ff4170af0..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/exceptions.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class GenAIPerfException(Exception): - """ - A custom exception specific to the genai-perf - """ - - pass diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/console_exporter.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/console_exporter.py deleted file mode 100644 index 460fe5976..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/console_exporter.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -from genai_perf.export_data.exporter_config import ExporterConfig -from rich.console import Console -from rich.table import Table - - -class ConsoleExporter: - """ - A class to export the statistics and arg values to the console. - """ - - STAT_COLUMN_KEYS = ["avg", "min", "max", "p99", "p90", "p75"] - - def __init__(self, config: ExporterConfig): - self._stats = config.stats - self._metrics = config.metrics - self._args = config.args - - def _get_title(self): - if self._args.endpoint_type == "embeddings": - return "Embeddings Metrics" - elif self._args.endpoint_type == "rankings": - return "Rankings Metrics" - else: - return "LLM Metrics" - - def export(self) -> None: - table = Table(title=self._get_title()) - - table.add_column("Statistic", justify="right", style="cyan", no_wrap=True) - for stat in self.STAT_COLUMN_KEYS: - table.add_column(stat, justify="right", style="green") - - # Request metrics table - self._construct_table(table) - - console = Console() - console.print(table) - - # System metrics are printed after the table - for metric in self._metrics.system_metrics: - line = metric.name.replace("_", " ").capitalize() - value = self._stats[metric.name]["avg"] - line += f" ({metric.unit}): {value:.2f}" - print(line) - - def _construct_table(self, table: Table) -> None: - for metric in self._metrics.request_metrics: - if self._should_skip(metric.name): - continue - - metric_str = metric.name.replace("_", " ").capitalize() - metric_str += f" ({metric.unit})" if metric.unit != "tokens" else "" - row_values = [metric_str] - for stat in self.STAT_COLUMN_KEYS: - value = self._stats[metric.name][stat] - row_values.append(f"{value:,.2f}") - - table.add_row(*row_values) - - # (TMA-1976) Refactor this method as the csv exporter shares identical method. - def _should_skip(self, metric_name: str) -> bool: - if self._args.endpoint_type == "embeddings": - return False # skip nothing - - # TODO (TMA-1712): need to decide if we need this metric. Remove - # from statistics display for now. - # TODO (TMA-1678): output_token_throughput_per_request is treated - # separately since the current code treats all throughput metrics to - # be displayed outside of the statistics table. - if metric_name == "output_token_throughput_per_request": - return True - - # When non-streaming, skip ITL and TTFT - streaming_metrics = [ - "inter_token_latency", - "time_to_first_token", - ] - if not self._args.streaming and metric_name in streaming_metrics: - return True - return False diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py deleted file mode 100644 index efbb9b754..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -import csv - -import genai_perf.logging as logging -from genai_perf.export_data.exporter_config import ExporterConfig - -DEFAULT_OUTPUT_DATA_CSV = "profile_export_genai_perf.csv" - -logger = logging.getLogger(__name__) - - -class CsvExporter: - """ - A class to export the statistics and arg values in a csv format. - """ - - REQUEST_METRICS_HEADER = [ - "Metric", - "avg", - "min", - "max", - "p99", - "p95", - "p90", - "p75", - "p50", - "p25", - ] - - SYSTEM_METRICS_HEADER = [ - "Metric", - "Value", - ] - - def __init__(self, config: ExporterConfig): - self._stats = config.stats - self._metrics = config.metrics - self._output_dir = config.artifact_dir - self._args = config.args - - def export(self) -> None: - csv_filename = self._output_dir / DEFAULT_OUTPUT_DATA_CSV - logger.info(f"Generating {csv_filename}") - - with open(csv_filename, mode="w", newline="") as csvfile: - csv_writer = csv.writer(csvfile) - self._write_request_metrics(csv_writer) - csv_writer.writerow([]) - self._write_system_metrics(csv_writer) - - def _write_request_metrics(self, csv_writer) -> None: - csv_writer.writerow(self.REQUEST_METRICS_HEADER) - for metric in self._metrics.request_metrics: - if self._should_skip(metric.name): - continue - - metric_str = metric.name.replace("_", " ").title() - metric_str += f" ({metric.unit})" if metric.unit != "tokens" else "" - row_values = [metric_str] - for stat in self.REQUEST_METRICS_HEADER[1:]: - value = self._stats[metric.name][stat] - row_values.append(f"{value:,.2f}") - - csv_writer.writerow(row_values) - - def _write_system_metrics(self, csv_writer) -> None: - csv_writer.writerow(self.SYSTEM_METRICS_HEADER) - for metric in self._metrics.system_metrics: - metric_str = metric.name.replace("_", " ").title() - metric_str += f" ({metric.unit})" - value = self._stats[metric.name]["avg"] - csv_writer.writerow([metric_str, f"{value:.2f}"]) - - def _should_skip(self, metric_name: str) -> bool: - if self._args.endpoint_type == "embeddings": - return False # skip nothing - - # TODO (TMA-1712): need to decide if we need this metric. Remove - # from statistics display for now. - # TODO (TMA-1678): output_token_throughput_per_request is treated - # separately since the current code treats all throughput metrics to - # be displayed outside of the statistics table. - if metric_name == "output_token_throughput_per_request": - return True - - # When non-streaming, skip ITL and TTFT - streaming_metrics = [ - "inter_token_latency", - "time_to_first_token", - ] - if not self._args.streaming and metric_name in streaming_metrics: - return True - return False diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_factory.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_factory.py deleted file mode 100644 index ac226bdf5..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_factory.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from typing import List - -from genai_perf.export_data.console_exporter import ConsoleExporter -from genai_perf.export_data.csv_exporter import CsvExporter -from genai_perf.export_data.exporter_config import ExporterConfig -from genai_perf.export_data.json_exporter import JsonExporter - -DataExporterList = [ConsoleExporter, JsonExporter, CsvExporter] - - -class DataExporterFactory: - def create_data_exporters(self, config: ExporterConfig) -> List: - data_exporters = [] - for exporter in DataExporterList: - data_exporters.append(exporter(config)) - return data_exporters diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_interface.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_interface.py deleted file mode 100644 index 56bde9a53..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/data_exporter_interface.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -from typing import Protocol - - -class DataExporterInterface(Protocol): - def export(self): - pass diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/exporter_config.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/exporter_config.py deleted file mode 100644 index 0d9c7cd0b..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/exporter_config.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -from genai_perf.metrics import Metrics - - -class ExporterConfig: - def __init__(self): - self._stats = None - self._metrics = None - self._args = None - self._extra_inputs = None - self._artifact_dir = None - - @property - def stats(self): - return self._stats - - @stats.setter - def stats(self, stats_value): - self._stats = stats_value - - @property - def metrics(self): - return self._metrics - - @metrics.setter - def metrics(self, metrics: Metrics): - self._metrics = metrics - - @property - def args(self): - return self._args - - @args.setter - def args(self, args_value): - self._args = args_value - - @property - def extra_inputs(self): - return self._extra_inputs - - @extra_inputs.setter - def extra_inputs(self, extra_inputs_value): - self._extra_inputs = extra_inputs_value - - @property - def artifact_dir(self): - return self._artifact_dir - - @artifact_dir.setter - def artifact_dir(self, artifact_dir_value): - self._artifact_dir = artifact_dir_value diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py deleted file mode 100644 index 2ec24fae1..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -import json -from enum import Enum -from typing import Dict - -import genai_perf.logging as logging -from genai_perf.export_data.exporter_config import ExporterConfig - -DEFAULT_OUTPUT_DATA_JSON = "profile_export_genai_perf.json" - -logger = logging.getLogger(__name__) - - -class JsonExporter: - """ - A class to export the statistics and arg values in a json format. - """ - - def __init__(self, config: ExporterConfig): - self._stats: Dict = config.stats - self._args = dict(vars(config.args)) - self._extra_inputs = config.extra_inputs - self._output_dir = config.artifact_dir - self._stats_and_args: Dict = {} - self._prepare_args_for_export() - self._merge_stats_and_args() - - def export(self) -> None: - filename = self._output_dir / DEFAULT_OUTPUT_DATA_JSON - logger.info(f"Generating {filename}") - with open(str(filename), "w") as f: - f.write(json.dumps(self._stats_and_args, indent=2)) - - def _prepare_args_for_export(self) -> None: - self._args.pop("func", None) - self._args.pop("output_format", None) - self._args.pop("input_file", None) - self._args["profile_export_file"] = str(self._args["profile_export_file"]) - self._args["artifact_dir"] = str(self._args["artifact_dir"]) - for k, v in self._args.items(): - if isinstance(v, Enum): - self._args[k] = v.name.lower() - self._add_extra_inputs_to_args() - - def _add_extra_inputs_to_args(self) -> None: - del self._args["extra_inputs"] - self._args.update({"extra_inputs": self._extra_inputs}) - - def _merge_stats_and_args(self) -> None: - self._stats_and_args = dict(self._stats) - self._stats_and_args.update({"input_config": self._args}) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/output_reporter.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/output_reporter.py deleted file mode 100644 index ec8123b95..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/output_reporter.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -from argparse import Namespace - -from genai_perf.export_data.data_exporter_factory import DataExporterFactory -from genai_perf.export_data.exporter_config import ExporterConfig -from genai_perf.metrics import Statistics -from genai_perf.parser import get_extra_inputs_as_dict - - -class OutputReporter: - """ - A class to orchestrate output generation. - """ - - def __init__(self, stats: Statistics, args: Namespace): - self.args = args - self.stats = stats - self.stats.scale_data() - - def report_output(self) -> None: - factory = DataExporterFactory() - exporter_config = self._create_exporter_config() - data_exporters = factory.create_data_exporters(exporter_config) - - for exporter in data_exporters: - exporter.export() - - def _create_exporter_config(self) -> ExporterConfig: - config = ExporterConfig() - config.stats = self.stats.stats_dict - config.metrics = self.stats.metrics - config.args = self.args - config.artifact_dir = self.args.artifact_dir - config.extra_inputs = get_extra_inputs_as_dict(self.args) - return config diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/__init__.py b/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/__init__.py deleted file mode 100644 index c6959fce1..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/farewell.txt b/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/farewell.txt deleted file mode 100644 index cfbe41a7c..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/farewell.txt +++ /dev/null @@ -1,104 +0,0 @@ -The period for a new election of a citizen to -administer the executive government of the United -States being not far distant, and the time actually -arrived when your thoughts must be employed in -designating the person who is to be clothed with that -important trust, it appears to me proper, especially as -it may conduce to a more distinct expression of the -public voice, that I should now apprise you of the -resolution I have formed, to decline being considered -among the number of those out of whom a choice is to be made. -I beg you, at the same time, to do me the justice to -be assured that this resolution has not been taken -without a strict regard to all the considerations -appertaining to the relation which binds a dutiful -citizen to his country—and that, in withdrawing the -tender of service which silence in my situation might -imply, I am influenced by no diminution of zeal for -your future interest, no deficiency of grateful respect -for your past kindness; but am supported by a full -conviction that the step is compatible with both. -The acceptance of, and continuance hitherto in, the -office to which your suffrages have twice called me, -have been a uniform sacrifice of inclination to the -opinion of duty and to a deference for what appeared -to be your desire. I constantly hoped that it would -have been much earlier in my power, consistently with -motives which I was not at liberty to disregard, to -return to that retirement from which I had been -reluctantly drawn. The strength of my inclination to -do this, previous to the last election, had even led to -the preparation of an address to declare it to you; but -mature reflection on the then perplexed and critical -posture of our affairs with foreign nations, and the -unanimous advice of persons entitled to my -confidence, impelled me to abandon the idea. -I rejoice that the state of your concerns, external as -well as internal, no longer renders the pursuit of -inclination incompatible with the sentiment of duty or -propriety, and am persuaded whatever partiality may -be retained for my services, that in the present -circumstances of our country, you will not disapprove -my determination to retire. -The impressions with which I first undertook the -arduous trust were explained on the proper occasion. -In the discharge of this trust, I will only say that I -have, with good intentions, contributed towards the -organization and administration of the government, -the best exertions of which a very fallible judgment -was capable. Not unconscious in the outset of the -inferiority of my qualifications, experience in my -own eyes, perhaps still more in the eyes of others, -has strengthened the motives to diffidence of myself; -and every day the increasing weight of years -admonishes me more and more that the shade of -retirement is as necessary to me as it will be -welcome. Satisfied that if any circumstances have -given peculiar value to my services, they were -temporary, I have the consolation to believe, that -while choice and prudence invite me to quit the -political scene, patriotism does not forbid it. -In looking forward to the moment which is -intended to terminate the career of my public life, my -feelings do not permit me to suspend the deep -acknowledgment of that debt of gratitude which I -owe to my beloved country for the many honors it has -conferred upon me; still more for the steadfast -confidence with which it has supported me; and for -the opportunities I have thence enjoyed of manifesting -my inviolable attachment, by services faithful and -persevering, though in usefulness unequal to my zeal. -If benefits have resulted to our country from these -services, let it always be remembered to your praise, -and as an instructive example in our annals that -under circumstances in which the passions agitated in -every direction were liable to mislead, amidst -appearances sometimes dubious, vicissitudes of -fortune often discouraging, in situations in which not -unfrequently want of success has countenanced the -spirit of criticism, the constancy of your support was -the essential prop of the efforts, and a guarantee of -the plans by which they were effected. Profoundly -penetrated with this idea, I shall carry it with me to -my grave, as a strong incitement to unceasing vows -that Heaven may continue to you the choicest tokens -of its beneficence; that your Union and brotherly -affection may be perpetual; that the free constitution, -which is the work of your hands, may be sacredly -maintained; that its administration in every -department may be stamped with wisdom and virtue; -that, in fine, the happiness of the people of these -states, under the auspices of liberty, may be made -complete by so careful a preservation and so prudent -a use of this blessing as will acquire to them the glory -of recommending it to the applause, the affection, -and adoption of every nation which is yet a stranger to it. -Here, perhaps, I ought to stop. But a solicitude for -your welfare, which cannot end but with my life, and -the apprehension of danger, natural to that -solicitude, urge me on an occasion like the present, -to offer to your solemn contemplation, and to -recommend to your frequent review, some sentiments -which are the result of much reflection, of no -inconsiderable observation, and which appear to me -all important to the permanency of your felicity as a \ No newline at end of file diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/llm_inputs.py b/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/llm_inputs.py deleted file mode 100644 index 057c33562..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/llm_inputs.py +++ /dev/null @@ -1,1585 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import random -from copy import deepcopy -from enum import Enum, auto -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, cast - -import requests -from genai_perf import utils -from genai_perf.constants import CNN_DAILY_MAIL, DEFAULT_INPUT_DATA_JSON, OPEN_ORCA -from genai_perf.exceptions import GenAIPerfException -from genai_perf.llm_inputs.synthetic_image_generator import ( - ImageFormat, - SyntheticImageGenerator, -) -from genai_perf.llm_inputs.synthetic_prompt_generator import SyntheticPromptGenerator -from genai_perf.tokenizer import DEFAULT_TOKENIZER, Tokenizer, get_tokenizer -from genai_perf.utils import load_json_str -from PIL import Image -from requests import Response - - -class ModelSelectionStrategy(Enum): - ROUND_ROBIN = auto() - RANDOM = auto() - - -class PromptSource(Enum): - SYNTHETIC = auto() - DATASET = auto() - FILE = auto() - - -class OutputFormat(Enum): - OPENAI_CHAT_COMPLETIONS = auto() - OPENAI_COMPLETIONS = auto() - OPENAI_EMBEDDINGS = auto() - OPENAI_VISION = auto() - RANKINGS = auto() - TENSORRTLLM = auto() - VLLM = auto() - - def to_lowercase(self): - return self.name.lower() - - -class LlmInputs: - """ - A library of methods that control the generation of LLM Inputs - """ - - OPEN_ORCA_URL = "https://datasets-server.huggingface.co/rows?dataset=Open-Orca%2FOpenOrca&config=default&split=train" - CNN_DAILYMAIL_URL = "https://datasets-server.huggingface.co/rows?dataset=cnn_dailymail&config=1.0.0&split=train" - - DEFAULT_STARTING_INDEX = 0 - MINIMUM_STARTING_INDEX = 0 - - DEFAULT_LENGTH = 100 - MINIMUM_LENGTH = 1 - - DEFAULT_TENSORRTLLM_MAX_TOKENS = 256 - - DEFAULT_BATCH_SIZE = 1 - DEFAULT_RANDOM_SEED = 0 - DEFAULT_PROMPT_TOKENS_MEAN = 550 - DEFAULT_PROMPT_TOKENS_STDDEV = 0 - DEFAULT_OUTPUT_TOKENS_MEAN = -1 - DEFAULT_OUTPUT_TOKENS_STDDEV = 0 - DEFAULT_NUM_PROMPTS = 100 - - DEFAULT_IMAGE_WIDTH_MEAN = 100 - DEFAULT_IMAGE_WIDTH_STDDEV = 0 - DEFAULT_IMAGE_HEIGHT_MEAN = 100 - DEFAULT_IMAGE_HEIGHT_STDDEV = 0 - - EMPTY_JSON_IN_VLLM_PA_FORMAT: Dict = {"data": []} - EMPTY_JSON_IN_TENSORRTLLM_PA_FORMAT: Dict = {"data": []} - EMPTY_JSON_IN_OPENAI_PA_FORMAT: Dict = {"data": []} - - dataset_url_map = {OPEN_ORCA: OPEN_ORCA_URL, CNN_DAILY_MAIL: CNN_DAILYMAIL_URL} - - @classmethod - def create_llm_inputs( - cls, - input_type: PromptSource, - output_format: OutputFormat, - dataset_name: str = "", - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - input_filename: Optional[Path] = Path(""), - starting_index: int = DEFAULT_STARTING_INDEX, - length: int = DEFAULT_LENGTH, - output_tokens_mean: int = DEFAULT_OUTPUT_TOKENS_MEAN, - output_tokens_stddev: int = DEFAULT_OUTPUT_TOKENS_STDDEV, - output_tokens_deterministic: bool = False, - prompt_tokens_mean: int = DEFAULT_PROMPT_TOKENS_MEAN, - prompt_tokens_stddev: int = DEFAULT_PROMPT_TOKENS_STDDEV, - image_width_mean: int = DEFAULT_IMAGE_WIDTH_MEAN, - image_width_stddev: int = DEFAULT_IMAGE_WIDTH_STDDEV, - image_height_mean: int = DEFAULT_IMAGE_HEIGHT_MEAN, - image_height_stddev: int = DEFAULT_IMAGE_HEIGHT_STDDEV, - image_format: ImageFormat = ImageFormat.PNG, - random_seed: int = DEFAULT_RANDOM_SEED, - num_of_output_prompts: int = DEFAULT_NUM_PROMPTS, - add_model_name: bool = False, - add_stream: bool = False, - tokenizer: Tokenizer = get_tokenizer(DEFAULT_TOKENIZER), - extra_inputs: Optional[Dict] = None, - batch_size: int = 1, - output_dir: Path = Path(""), - ) -> Dict: - """ - Given an input type, input format, and output type. Output a string of LLM Inputs - (in a JSON dictionary) to a file - - Required Parameters - ------------------- - input_type: - Specify how the input is received - output_format: - Specify the output format - - Optional Parameters - ------------------- - dataset_name: - The name of the dataset - model_name: - The model name - starting_index: - Offset from within the list to start gathering inputs - length: - Number of entries to gather - add_model_name: - If true, adds a model name field to each payload - add_stream: - If true, adds a steam field to each payload - extra_inputs: - If provided, append these inputs to every request - output_tokens_mean: - The mean length of the output to generate. If not using fixed output lengths, this should be set to -1. - output_tokens_stddev: - The standard deviation of the length of the output to generate. This is only used if output_tokens_mean is provided. - output_tokens_deterministic: - If true, the output tokens will set the minimum and maximum tokens to be equivalent. - image_width_mean: - The mean width of images when generating synthetic image data. - image_width_stddev: - The standard deviation of width of images when generating synthetic image data. - image_height_mean: - The mean height of images when generating synthetic image data. - image_height_stddev: - The standard deviation of height of images when generating synthetic image data. - image_format: - The compression format of the images. - batch_size: - The number of inputs per request (currently only used for the embeddings and rankings endpoints) - - Required Synthetic Prompt Generation Parameters - ----------------------------------------------- - tokenizer: - The tokenizer to use when generating synthetic prompts - - Optional Synthetic Prompt Generation Parameters - ----------------------------------------------- - prompt_tokens_mean: - The mean length of the prompt to generate - prompt_tokens_stddev: - The standard deviation of the length of the prompt to generate - num_of_output_prompts: - The number of synthetic output prompts to generate - random_seed: - Seed used to generate random values - """ - - cls._check_for_valid_args( - input_type, dataset_name, starting_index, length, tokenizer - ) - - random.seed(random_seed) - - generic_dataset_json = cls.get_generic_dataset_json( - input_type, - output_format, - dataset_name, - starting_index, - length, - tokenizer, - prompt_tokens_mean, - prompt_tokens_stddev, - num_of_output_prompts, - image_width_mean, - image_width_stddev, - image_height_mean, - image_height_stddev, - image_format, - batch_size, - input_filename, - ) - - if extra_inputs is None: - extra_inputs = {} - - json_in_pa_format = cls._convert_generic_json_to_output_format( - output_format, - generic_dataset_json, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - cls._write_json_to_file(json_in_pa_format, output_dir) - - return json_in_pa_format - - @classmethod - def get_generic_dataset_json( - cls, - input_type: PromptSource, - output_format: OutputFormat, - dataset_name: str, - starting_index: int, - length: int, - tokenizer: Tokenizer, - prompt_tokens_mean: int, - prompt_tokens_stddev: int, - num_of_output_prompts: int, - image_width_mean: int, - image_width_stddev: int, - image_height_mean: int, - image_height_stddev: int, - image_format: ImageFormat, - batch_size: int, - input_filename: Optional[Path], - ) -> Dict: - """ - Retrieve and convert the dataset based on the input type. - - Parameters - ---------- - input_type: - Specify how the input is received - output_format: - Specify the output format - dataset_name: - The name of the dataset - starting_index: - Offset from within the list to start gathering inputs - length: - Number of entries to gather - tokenizer: - The tokenizer to use when generating synthetic prompts - prompt_tokens_mean: - The mean length of the prompt to generate - prompt_tokens_stddev: - The standard deviation of the length of the prompt to generate - num_of_output_prompts: - The number of synthetic output prompts to generate - image_width_mean: - The mean width of images when generating synthetic image data. - image_width_stddev: - The standard deviation of width of images when generating synthetic image data. - image_height_mean: - The mean height of images when generating synthetic image data. - image_height_stddev: - The standard deviation of height of images when generating synthetic image data. - image_format: - The compression format of the images. - batch_size: - The number of inputs per request (currently only used for the embeddings and rankings endpoints) - input_filename: - The path to the input file containing the prompts in JSONL format. - Returns - ------- - Dict: - The generic dataset JSON - """ - - if output_format == OutputFormat.OPENAI_EMBEDDINGS: - if input_type != PromptSource.FILE: - raise GenAIPerfException( - f"{OutputFormat.OPENAI_EMBEDDINGS.to_lowercase()} only supports a file as input." - ) - input_filename = cast(Path, input_filename) - input_file_dataset = cls._get_input_dataset_from_embeddings_file( - input_filename, - batch_size, - num_of_output_prompts, - ) - generic_dataset_json = ( - cls._convert_input_synthetic_or_file_dataset_to_generic_json( - input_file_dataset - ) - ) - elif output_format == OutputFormat.RANKINGS: - if input_type != PromptSource.FILE: - raise GenAIPerfException( - f"{OutputFormat.RANKINGS.to_lowercase()} only supports a directory as input." - ) - queries_filename = cast(Path, input_filename) / "queries.jsonl" - passages_filename = cast(Path, input_filename) / "passages.jsonl" - input_file_dataset = cls._get_input_dataset_from_rankings_files( - queries_filename, passages_filename, batch_size, num_of_output_prompts - ) - - generic_dataset_json = ( - cls._convert_input_synthetic_or_file_dataset_to_generic_json( - input_file_dataset - ) - ) - else: - if input_type == PromptSource.DATASET: - # (TMA-1990) support VLM input from public dataset - if output_format == OutputFormat.OPENAI_VISION: - raise GenAIPerfException( - f"{OutputFormat.OPENAI_VISION.to_lowercase()} currently " - "does not support dataset as input." - ) - dataset = cls._get_input_dataset_from_url( - dataset_name, starting_index, length - ) - generic_dataset_json = cls._convert_input_url_dataset_to_generic_json( - dataset - ) - elif input_type == PromptSource.SYNTHETIC: - synthetic_dataset = cls._get_input_dataset_from_synthetic( - tokenizer, - prompt_tokens_mean, - prompt_tokens_stddev, - num_of_output_prompts, - image_width_mean, - image_width_stddev, - image_height_mean, - image_height_stddev, - image_format, - output_format, - ) - generic_dataset_json = ( - cls._convert_input_synthetic_or_file_dataset_to_generic_json( - synthetic_dataset - ) - ) - elif input_type == PromptSource.FILE: - input_filename = cast(Path, input_filename) - input_file_dataset = cls._get_input_dataset_from_file(input_filename) - input_file_dataset = cls._encode_images_in_input_dataset( - input_file_dataset - ) - generic_dataset_json = ( - cls._convert_input_synthetic_or_file_dataset_to_generic_json( - input_file_dataset - ) - ) - else: - raise GenAIPerfException("Input source is not recognized.") - - # When the generic_dataset_json contains multi-modal data (e.g. images), - # convert the format of the content to OpenAI multi-modal format: - # see https://platform.openai.com/docs/guides/vision - if output_format == OutputFormat.OPENAI_VISION: - generic_dataset_json = cls._convert_to_openai_multi_modal_content( - generic_dataset_json - ) - - return generic_dataset_json - - @classmethod - def _get_input_dataset_from_embeddings_file( - cls, input_filename: Path, batch_size: int, num_prompts: int - ) -> Dict[str, Any]: - with open(input_filename, "r") as file: - file_content = [load_json_str(line) for line in file] - - texts = [item["text"] for item in file_content] - - if batch_size > len(texts): - raise ValueError( - "Batch size cannot be larger than the number of available texts" - ) - - dataset_json: Dict[str, Any] = {} - dataset_json["features"] = [{"name": "input"}] - dataset_json["rows"] = [] - - for _ in range(num_prompts): - sampled_texts = random.sample(texts, batch_size) - dataset_json["rows"].append({"row": {"payload": {"input": sampled_texts}}}) - - return dataset_json - - @classmethod - def _get_input_dataset_from_rankings_files( - cls, - queries_filename: Path, - passages_filename: Path, - batch_size: int, - num_prompts: int, - ) -> Dict[str, Any]: - - with open(queries_filename, "r") as file: - queries_content = [load_json_str(line) for line in file] - queries_texts = [item for item in queries_content] - - with open(passages_filename, "r") as file: - passages_content = [load_json_str(line) for line in file] - passages_texts = [item for item in passages_content] - - if batch_size > len(passages_texts): - raise ValueError( - "Batch size cannot be larger than the number of available passages" - ) - - dataset_json: Dict[str, Any] = {} - dataset_json["features"] = [{"name": "input"}] - dataset_json["rows"] = [] - - for _ in range(num_prompts): - sampled_texts = random.sample(passages_texts, batch_size) - query_sample = random.choice(queries_texts) - entry_dict: Dict = {} - entry_dict["query"] = query_sample - entry_dict["passages"] = sampled_texts - dataset_json["rows"].append({"row": {"payload": entry_dict}}) - return dataset_json - - @classmethod - def _check_for_valid_args( - cls, - input_type: PromptSource, - dataset_name: str, - starting_index: int, - length: int, - tokenizer: Tokenizer, - ) -> None: - try: - cls._check_for_dataset_name_if_input_type_is_url(input_type, dataset_name) - cls._check_for_tokenzier_if_input_type_is_synthetic(input_type, tokenizer) - cls._check_for_valid_starting_index(starting_index) - cls._check_for_valid_length(length) - - except Exception as e: - raise GenAIPerfException(e) - - @classmethod - def _get_input_dataset_from_url( - cls, dataset_name: str, starting_index: int, length: int - ) -> Response: - url = cls._resolve_url(dataset_name) - configured_url = cls._create_configured_url(url, starting_index, length) - dataset = cls._download_dataset(configured_url) - - return dataset - - @classmethod - def _get_input_dataset_from_synthetic( - cls, - tokenizer: Tokenizer, - prompt_tokens_mean: int, - prompt_tokens_stddev: int, - num_of_output_prompts: int, - image_width_mean: int, - image_width_stddev: int, - image_height_mean: int, - image_height_stddev: int, - image_format: ImageFormat, - output_format: OutputFormat, - ) -> Dict[str, Any]: - dataset_json: Dict[str, Any] = {} - dataset_json["features"] = [{"name": "text_input"}] - dataset_json["rows"] = [] - for _ in range(num_of_output_prompts): - row: Dict["str", Any] = {"row": {}} - synthetic_prompt = cls._create_synthetic_prompt( - tokenizer, - prompt_tokens_mean, - prompt_tokens_stddev, - ) - row["row"]["text_input"] = synthetic_prompt - - if output_format == OutputFormat.OPENAI_VISION: - synthetic_image = cls._create_synthetic_image( - image_width_mean=image_width_mean, - image_width_stddev=image_width_stddev, - image_height_mean=image_height_mean, - image_height_stddev=image_height_stddev, - image_format=image_format, - ) - row["row"]["image"] = synthetic_image - - dataset_json["rows"].append(row) - - return dataset_json - - @classmethod - def _resolve_url(cls, dataset_name: str) -> str: - if dataset_name in cls.dataset_url_map: - return cls.dataset_url_map[dataset_name] - else: - raise GenAIPerfException( - f"{dataset_name} does not have a corresponding URL in the dataset_url_map." - ) - - @classmethod - def _create_configured_url(cls, url: str, starting_index: int, length: int) -> str: - starting_index_str = str(starting_index) - length_str = str(length) - configured_url = url + f"&offset={starting_index_str}&length={length_str}" - - return configured_url - - @classmethod - def _download_dataset(cls, configured_url: str) -> Response: - dataset = cls._query_server(configured_url) - - return dataset - - @classmethod - def _convert_input_url_dataset_to_generic_json(cls, dataset: Response) -> Dict: - dataset_json = dataset.json() - try: - cls._check_for_error_in_json_of_dataset(dataset_json) - except Exception as e: - raise GenAIPerfException(e) - - generic_dataset_json = cls._convert_dataset_to_generic_input_json(dataset_json) - - return generic_dataset_json - - @classmethod - def _convert_input_synthetic_or_file_dataset_to_generic_json( - cls, dataset: Dict - ) -> Dict[str, List[Dict]]: - generic_dataset_json = cls._convert_dataset_to_generic_input_json(dataset) - - return generic_dataset_json - - @classmethod - def _convert_dataset_to_generic_input_json( - cls, dataset_json: Dict - ) -> Dict[str, List[Dict]]: - generic_input_json = cls._add_features_to_generic_json({}, dataset_json) - generic_input_json = cls._add_rows_to_generic_json( - generic_input_json, dataset_json - ) - - return generic_input_json - - @classmethod - def _add_features_to_generic_json( - cls, generic_input_json: Dict, dataset_json: Dict - ) -> Dict: - if "features" in dataset_json.keys(): - generic_input_json["features"] = [] - for feature in dataset_json["features"]: - generic_input_json["features"].append(feature["name"]) - - return generic_input_json - - @classmethod - def _add_rows_to_generic_json( - cls, generic_input_json: Dict, dataset_json: Dict - ) -> Dict[str, List[Dict]]: - generic_input_json["rows"] = [] - for row in dataset_json["rows"]: - generic_input_json["rows"].append(row["row"]) - - return generic_input_json - - @classmethod - def _get_input_dataset_from_file(cls, input_filename: Path) -> Dict: - """ - Reads the input prompts and images from a JSONL file and converts them - into the required dataset format. - - Parameters - ---------- - input_filename : Path - The path to the input file containing the prompts and/or images in - JSONL format. - - Returns - ------- - Dict - The dataset in the required format with the prompts and/or images - read from the file. - """ - cls.verify_file(input_filename) - prompts, images = cls._get_prompts_from_input_file(input_filename) - dataset_json: Dict[str, Any] = {} - dataset_json["features"] = [{"name": "text_input"}] - dataset_json["rows"] = [] - for prompt, image in zip(prompts, images): - content = {"text_input": prompt} - content.update({"image": image} if image else {}) - dataset_json["rows"].append({"row": content}) - - return dataset_json - - @classmethod - def _get_prompts_from_input_file( - cls, input_filename: Path - ) -> Tuple[List[str], List[str]]: - """ - Reads the input prompts from a JSONL file and returns a list of prompts. - - Parameters - ---------- - input_filename : Path - The path to the input file containing the prompts in JSONL format. - - Returns - ------- - Tuple[List[str], List[str]] - A list of prompts and images read from the file. - """ - prompts = [] - images = [] - with open(input_filename, mode="r", newline=None) as file: - for line in file: - if line.strip(): - prompts.append(load_json_str(line).get("text_input", "").strip()) - images.append(load_json_str(line).get("image", "").strip()) - return prompts, images - - @classmethod - def verify_file(cls, input_filename: Path) -> None: - if not input_filename.exists(): - raise FileNotFoundError(f"The file '{input_filename}' does not exist.") - - @classmethod - def _convert_to_openai_multi_modal_content( - cls, generic_dataset_json: Dict[str, List[Dict]] - ) -> Dict[str, List[Dict]]: - """ - Converts to multi-modal content format of OpenAI Chat Completions API. - """ - for row in generic_dataset_json["rows"]: - if row["image"]: - row["text_input"] = [ - { - "type": "text", - "text": row["text_input"], - }, - { - "type": "image_url", - "image_url": {"url": row["image"]}, - }, - ] - - return generic_dataset_json - - @classmethod - def _encode_images_in_input_dataset(cls, input_file_dataset: Dict) -> Dict: - for row in input_file_dataset["rows"]: - filename = row["row"].get("image") - if filename: - img = Image.open(filename) - if img.format.lower() not in utils.get_enum_names(ImageFormat): - raise GenAIPerfException( - f"Unsupported image format '{img.format}' of " - f"the image '{filename}'." - ) - - img_base64 = utils.encode_image(img, img.format) - payload = f"data:image/{img.format.lower()};base64,{img_base64}" - row["row"]["image"] = payload - - return input_file_dataset - - @classmethod - def _convert_generic_json_to_output_format( - cls, - output_format: OutputFormat, - generic_dataset: Dict, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - if ( - output_format == OutputFormat.OPENAI_CHAT_COMPLETIONS - or output_format == OutputFormat.OPENAI_VISION - ): - output_json = cls._convert_generic_json_to_openai_chat_completions_format( - generic_dataset, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - elif output_format == OutputFormat.OPENAI_COMPLETIONS: - output_json = cls._convert_generic_json_to_openai_completions_format( - generic_dataset, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - elif output_format == OutputFormat.OPENAI_EMBEDDINGS: - output_json = cls._convert_generic_json_to_openai_embeddings_format( - generic_dataset, - extra_inputs, - model_name, - model_selection_strategy, - ) - elif output_format == OutputFormat.RANKINGS: - output_json = cls._convert_generic_json_to_rankings_format( - generic_dataset, - extra_inputs, - model_name, - model_selection_strategy, - ) - elif output_format == OutputFormat.VLLM: - output_json = cls._convert_generic_json_to_vllm_format( - generic_dataset, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - elif output_format == OutputFormat.TENSORRTLLM: - output_json = cls._convert_generic_json_to_trtllm_format( - generic_dataset, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - else: - raise GenAIPerfException( - f"Output format {output_format} is not currently supported" - ) - - return output_json - - @classmethod - def _convert_generic_json_to_openai_chat_completions_format( - cls, - dataset_json: Dict, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - # TODO (TMA-1757): Implement a way to select a role for `text_input` - ( - system_role_headers, - user_role_headers, - _, - ) = cls._determine_json_feature_roles(dataset_json) - pa_json = cls._populate_openai_chat_completions_output_json( - dataset_json, - system_role_headers, - user_role_headers, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - - return pa_json - - @classmethod - def _convert_generic_json_to_openai_completions_format( - cls, - dataset_json: Dict, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - ( - system_role_headers, - user_role_headers, - text_input_headers, - ) = cls._determine_json_feature_roles(dataset_json) - pa_json = cls._populate_openai_completions_output_json( - dataset_json, - system_role_headers, - user_role_headers, - text_input_headers, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - - return pa_json - - @classmethod - def _convert_generic_json_to_openai_embeddings_format( - cls, - generic_dataset: Dict, - extra_inputs: Dict, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict[str, Any]: - pa_json: Dict[str, Any] = {"data": []} - - for index, entry in enumerate(generic_dataset["rows"]): - iter_model_name = cls._select_model_name( - model_name, index, model_selection_strategy - ) - payload = entry.get("payload", {}) - input_values = payload.get("input") - - if input_values is None: - raise ValueError("Missing required fields 'input' in dataset entry") - if not isinstance(input_values, list): - raise ValueError( - f"Required field 'input' must be a list (actual: {type(input_values)})" - ) - - payload = { - "input": input_values, - "model": iter_model_name, - } - - for key, value in extra_inputs.items(): - payload[key] = value - - pa_json["data"].append({"payload": [payload]}) - - return pa_json - - @classmethod - def contains_rankings_tei(cls, extra_inputs: Optional[Dict]) -> bool: - """ - Check if user specified that they are using the Hugging Face - Text Embeddings Interface for ranking models - """ - if extra_inputs and extra_inputs.get("rankings") == "tei": - return True - return False - - @classmethod - def _convert_generic_json_to_rankings_format( - cls, - generic_dataset: Dict, - extra_inputs: Dict, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict[str, Any]: - pa_json: Dict[str, Any] = {"data": []} - use_tei_format = cls.contains_rankings_tei(extra_inputs) - - for index, entry in enumerate(generic_dataset["rows"]): - iter_model_name = cls._select_model_name( - model_name, index, model_selection_strategy - ) - payload = entry.get("payload", {}) - query_values = payload.get("query") - - if use_tei_format: - passage_values = payload.get("passages", []) - passage_values = [item.get("text", "") for item in passage_values] - else: - passage_values = payload.get("passages") - - if query_values is None: - raise ValueError("Missing required fields 'query' in dataset entry") - if passage_values is None: - raise ValueError( - f"Missing required fields '{'texts' if use_tei_format else 'passages'}' in dataset entry" - ) - if not isinstance(passage_values, list): - raise ValueError( - f"Required field '{'texts' if use_tei_format else 'passages'}' must be a list (actual: {type(passage_values)})" - ) - - if use_tei_format: - payload = {"query": query_values["text"], "texts": passage_values} - else: - payload = { - "query": query_values, - "passages": passage_values, - "model": iter_model_name, - } - - for key, value in extra_inputs.items(): - if not (key == "rankings" and value == "tei"): - payload[key] = value - - pa_json["data"].append({"payload": [payload]}) - - return pa_json - - @classmethod - def _convert_generic_json_to_vllm_format( - cls, - dataset_json: Dict, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - ( - system_role_headers, - user_role_headers, - text_input_headers, - ) = cls._determine_json_feature_roles(dataset_json) - - pa_json = cls._populate_vllm_output_json( - dataset_json, - system_role_headers, - user_role_headers, - text_input_headers, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - - return pa_json - - @classmethod - def _convert_generic_json_to_trtllm_format( - cls, - dataset_json: Dict, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - ( - system_role_headers, - user_role_headers, - text_input_headers, - ) = cls._determine_json_feature_roles(dataset_json) - - pa_json = cls._populate_trtllm_output_json( - dataset_json, - system_role_headers, - user_role_headers, - text_input_headers, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - model_name, - model_selection_strategy, - ) - - return pa_json - - @classmethod - def _write_json_to_file(cls, json_in_pa_format: Dict, output_dir: Path) -> None: - filename = output_dir / DEFAULT_INPUT_DATA_JSON - with open(str(filename), "w") as f: - f.write(json.dumps(json_in_pa_format, indent=2)) - - @classmethod - def _determine_json_feature_roles( - cls, dataset_json: Dict - ) -> Tuple[List[str], List[str], List[str]]: - SYSTEM_ROLE_LIST = ["system_prompt"] - USER_ROLE_LIST = ["question", "article"] - TEXT_INPUT_LIST = ["text_input"] - - system_role_headers: List[str] = [] - user_role_headers: List[str] = [] - text_input_headers: List[str] = [] - - if "features" in dataset_json.keys(): - # TODO (TPA-53) remove enumerate if index isnt useful - for index, feature in enumerate(dataset_json["features"]): - if feature in SYSTEM_ROLE_LIST: - system_role_headers.append(feature) - if feature in USER_ROLE_LIST: - user_role_headers.append(feature) - if feature in TEXT_INPUT_LIST: - user_role_headers.append(feature) - - assert ( - system_role_headers is not None - or user_role_headers is not None - or text_input_headers is not None - ) - - return system_role_headers, user_role_headers, text_input_headers - - @classmethod - def _select_model_name(cls, model_name, index, model_selection_strategy): - if model_selection_strategy == ModelSelectionStrategy.ROUND_ROBIN: - return model_name[index % len(model_name)] - elif model_selection_strategy == ModelSelectionStrategy.RANDOM: - return random.choice(model_name) - else: - raise GenAIPerfException( - f"Model selection strategy '{model_selection_strategy}' is unsupported" - ) - - @classmethod - def _populate_openai_chat_completions_output_json( - cls, - dataset_json: Dict, - system_role_headers: List[str], - user_role_headers: List[str], - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - pa_json = cls._create_empty_openai_pa_json() - - for index, entry in enumerate(dataset_json["rows"]): - iter_model_name = cls._select_model_name( - model_name, index, model_selection_strategy - ) - pa_json["data"].append({"payload": []}) - pa_json["data"][index]["payload"].append({"messages": []}) - - for header, content in entry.items(): - new_message = cls._create_new_openai_chat_completions_message( - header, system_role_headers, user_role_headers, content - ) - - pa_json = cls._add_new_message_to_json(pa_json, index, new_message) - - pa_json = cls._add_optional_tags_to_openai_json( - pa_json, - index, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - iter_model_name, - ) - - return pa_json - - @classmethod - def _populate_openai_completions_output_json( - cls, - dataset_json: Dict, - system_role_headers: List[str], - user_role_headers: List[str], - text_input_headers: List[str], - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - pa_json = cls._create_empty_openai_pa_json() - - for index, entry in enumerate(dataset_json["rows"]): - iter_model_name = cls._select_model_name( - model_name, index, model_selection_strategy - ) - pa_json["data"].append({"payload": []}) - pa_json["data"][index]["payload"].append({"prompt": ""}) - - for header, content in entry.items(): - new_prompt = cls._create_new_prompt( - header, - system_role_headers, - user_role_headers, - text_input_headers, - content, - ) - - pa_json = cls._add_new_prompt_to_json(pa_json, index, new_prompt) - - pa_json = cls._add_optional_tags_to_openai_json( - pa_json, - index, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - iter_model_name, - ) - - return pa_json - - @classmethod - def _populate_vllm_output_json( - cls, - dataset_json: Dict, - system_role_headers: List[str], - user_role_headers: List[str], - text_input_headers: List[str], - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - pa_json = cls._create_empty_vllm_pa_json() - - for index, entry in enumerate(dataset_json["rows"]): - iter_model_name = cls._select_model_name( - model_name, index, model_selection_strategy - ) - pa_json["data"].append({"text_input": [""]}) - - for header, content in entry.items(): - new_text_input = cls._create_new_text_input( - header, - system_role_headers, - user_role_headers, - text_input_headers, - content, - ) - - pa_json = cls._add_new_text_input_to_json( - pa_json, index, new_text_input - ) - - pa_json = cls._add_optional_tags_to_vllm_json( - pa_json, - index, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - iter_model_name, - ) - - return pa_json - - @classmethod - def _populate_trtllm_output_json( - cls, - dataset_json: Dict, - system_role_headers: List[str], - user_role_headers: List[str], - text_input_headers: List[str], - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: list = [], - model_selection_strategy: ModelSelectionStrategy = ModelSelectionStrategy.ROUND_ROBIN, - ) -> Dict: - pa_json = cls._create_empty_trtllm_pa_json() - default_max_tokens = ( - "max_tokens" not in extra_inputs - or output_tokens_mean != cls.DEFAULT_OUTPUT_TOKENS_MEAN - ) - - for index, entry in enumerate(dataset_json["rows"]): - iter_model_name = cls._select_model_name( - model_name, index, model_selection_strategy - ) - pa_json["data"].append({"text_input": [""]}) - - for header, content in entry.items(): - new_text_input = cls._create_new_text_input( - header, - system_role_headers, - user_role_headers, - text_input_headers, - content, - ) - - pa_json = cls._add_new_text_input_to_json( - pa_json, index, new_text_input - ) - - pa_json = cls._add_required_tags_to_trtllm_json( - pa_json, index, default_max_tokens - ) - pa_json = cls._add_optional_tags_to_trtllm_json( - pa_json, - index, - add_model_name, - add_stream, - extra_inputs, - output_tokens_mean, - output_tokens_stddev, - output_tokens_deterministic, - iter_model_name, - ) - - return pa_json - - @classmethod - def _create_empty_openai_pa_json(cls) -> Dict: - empty_pa_json = deepcopy(cls.EMPTY_JSON_IN_OPENAI_PA_FORMAT) - - return empty_pa_json - - @classmethod - def _create_empty_vllm_pa_json(cls) -> Dict: - empty_pa_json = deepcopy(cls.EMPTY_JSON_IN_VLLM_PA_FORMAT) - - return empty_pa_json - - @classmethod - def _create_empty_trtllm_pa_json(cls) -> Dict: - empty_pa_json = deepcopy(cls.EMPTY_JSON_IN_TENSORRTLLM_PA_FORMAT) - - return empty_pa_json - - @classmethod - def _create_new_openai_chat_completions_message( - cls, - header: str, - system_role_headers: List[str], - user_role_headers: List[str], - content: str, - ) -> Optional[Dict]: - # Do not add messages with blank content - if not content: - return {} - - if header in system_role_headers: - new_message = { - "role": "system", - "content": content, - } - elif header in user_role_headers: - new_message = { - "role": "user", - "content": content, - } - else: - new_message = {} - - return new_message - - @classmethod - def _create_new_prompt( - cls, - header: str, - system_role_headers: List[str], - user_role_headers: List[str], - text_input_headers: List[str], - content: str, - ) -> str: - new_prompt = "" - - if ( - header in system_role_headers - or header in user_role_headers - or header in text_input_headers - ): - new_prompt = content - - return new_prompt - - @classmethod - def _create_new_text_input( - cls, - header: str, - system_role_headers: List[str], - user_role_headers: List[str], - text_input_headers: List[str], - content: str, - ) -> str: - new_text_input = "" - - if ( - header in system_role_headers - or header in user_role_headers - or header in text_input_headers - ): - new_text_input = content - - return new_text_input - - @classmethod - def _add_new_message_to_json( - cls, pa_json: Dict, index: int, new_message: Optional[Dict] - ) -> Dict: - if new_message: - pa_json["data"][index]["payload"][0]["messages"].append(new_message) - - return pa_json - - @classmethod - def _add_new_text_input_to_json( - cls, pa_json: Dict, index: int, new_text_input: str - ) -> Dict: - if new_text_input: - if pa_json["data"][index]["text_input"][0]: - pa_json["data"][index]["text_input"][0] = ( - pa_json["data"][index]["text_input"][0] + f" {new_text_input}" - ) - else: - pa_json["data"][index]["text_input"][0] = new_text_input - - return pa_json - - @classmethod - def _add_new_prompt_to_json( - cls, - pa_json: Dict, - index: int, - new_prompt: str, - ) -> Dict: - if new_prompt: - if pa_json["data"][index]["payload"][0]["prompt"]: - pa_json["data"][index]["payload"][0]["prompt"] += f" {new_prompt}" - else: - pa_json["data"][index]["payload"][0]["prompt"] = new_prompt - - return pa_json - - @classmethod - def _add_optional_tags_to_openai_json( - cls, - pa_json: Dict, - index: int, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: str = "", - ) -> Dict: - row = pa_json["data"][index]["payload"][0] - if add_model_name: - row["model"] = model_name - if add_stream: - row["stream"] = True - if output_tokens_mean != cls.DEFAULT_OUTPUT_TOKENS_MEAN: - row["max_tokens"] = int( - random.gauss(output_tokens_mean, output_tokens_stddev) - ) - for key, value in extra_inputs.items(): - row[key] = value - - return pa_json - - @classmethod - def _add_optional_tags_to_vllm_json( - cls, - pa_json: Dict, - index: int, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: str = "", - ) -> Dict: - row = pa_json["data"][index] - if add_model_name: - row["model"] = model_name - if add_stream: - row["stream"] = [True] - if output_tokens_mean != cls.DEFAULT_OUTPUT_TOKENS_MEAN: - number_of_tokens = str( - int(max(0, random.gauss(output_tokens_mean, output_tokens_stddev))) - ) - sampling_parameters = { - "max_tokens": number_of_tokens, - } - if output_tokens_deterministic: - sampling_parameters["min_tokens"] = number_of_tokens - sampling_parameters_str = json.dumps(sampling_parameters) - row["sampling_parameters"] = [sampling_parameters_str] - for key, value in extra_inputs.items(): - row[key] = [value] - if "exclude_input_in_output" not in row: - row["exclude_input_in_output"] = [True] - - return pa_json - - @classmethod - def _add_optional_tags_to_trtllm_json( - cls, - pa_json: Dict, - index: int, - add_model_name: bool, - add_stream: bool, - extra_inputs: Dict, - output_tokens_mean: int, - output_tokens_stddev: int, - output_tokens_deterministic: bool, - model_name: str = "", - ) -> Dict: - row = pa_json["data"][index] - if add_model_name: - row["model"] = model_name - if add_stream: - row["stream"] = [True] - if output_tokens_mean != cls.DEFAULT_OUTPUT_TOKENS_MEAN: - number_of_tokens = int( - random.gauss(output_tokens_mean, output_tokens_stddev) - ) - if output_tokens_deterministic: - row["min_length"] = [number_of_tokens] - row["max_tokens"] = [number_of_tokens] - for key, value in extra_inputs.items(): - row[key] = [value] - - return pa_json - - @classmethod - def _add_required_tags_to_trtllm_json( - cls, - pa_json: Dict, - index: int, - default_max_tokens: bool, - ) -> Dict: - row = pa_json["data"][index] - if default_max_tokens: - row["max_tokens"] = [cls.DEFAULT_TENSORRTLLM_MAX_TOKENS] - - return pa_json - - @classmethod - def _check_for_dataset_name_if_input_type_is_url( - cls, input_type: PromptSource, dataset_name: str - ) -> None: - if input_type == PromptSource.DATASET and not dataset_name: - raise GenAIPerfException( - "Input type is dataset, but dataset_name is not specified." - ) - - @classmethod - def _check_for_tokenzier_if_input_type_is_synthetic( - cls, - input_type: PromptSource, - tokenizer: Tokenizer, - ) -> None: - if input_type == PromptSource.SYNTHETIC and not tokenizer: - raise GenAIPerfException( - "Input type is SYNTHETIC, but a tokenizer was not specified." - ) - - @classmethod - def _check_for_valid_starting_index(cls, starting_index: int) -> None: - if not isinstance(starting_index, int): - raise GenAIPerfException( - f"starting_index: {starting_index} must be an integer." - ) - - if starting_index < cls.MINIMUM_STARTING_INDEX: - raise GenAIPerfException( - f"starting_index: {starting_index} must be larger than {cls.MINIMUM_STARTING_INDEX}." - ) - - @classmethod - def _check_for_valid_length(cls, length: int) -> None: - if not isinstance(length, int): - raise GenAIPerfException(f"length: {length} must be an integer.") - - if length < cls.MINIMUM_LENGTH: - raise GenAIPerfException( - f"starting_index: {length} must be larger than {cls.MINIMUM_LENGTH}." - ) - - @classmethod - def _query_server(cls, configured_url: str) -> Response: - try: - response = requests.get(configured_url) - except Exception as e: - error_message = cls._create_error_message(e) - raise GenAIPerfException(error_message) - - return response - - @classmethod - def _create_error_message(cls, exception: Exception) -> str: - url_str = exception.args[0].args[0] - url_start = url_str.find("'") - url_end = url_str.find("'", url_start + 1) + 1 - error_message = f"Invalid URL: {url_str[url_start:url_end]}" - - return error_message - - @classmethod - def _check_for_error_in_json_of_dataset(cls, dataset_json: Dict) -> None: - if "error" in dataset_json: - raise GenAIPerfException(dataset_json["error"]) - - @classmethod - def _create_synthetic_prompt( - cls, - tokenizer: Tokenizer, - prompt_tokens_mean: int, - prompt_tokens_stddev: int, - ) -> str: - return SyntheticPromptGenerator.create_synthetic_prompt( - tokenizer, prompt_tokens_mean, prompt_tokens_stddev - ) - - @classmethod - def _create_synthetic_image( - cls, - image_width_mean: int, - image_width_stddev: int, - image_height_mean: int, - image_height_stddev: int, - image_format: ImageFormat, - ) -> str: - return SyntheticImageGenerator.create_synthetic_image( - image_width_mean=image_width_mean, - image_width_stddev=image_width_stddev, - image_height_mean=image_height_mean, - image_height_stddev=image_height_stddev, - image_format=image_format, - ) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/dlss.png b/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/dlss.png deleted file mode 100644 index cdba23dd3771bd3633b3961113b21e3f9b170634..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 150094 zcmV($K;yrOP)E;B-KNFg5F8N>84VI25)T^?tjyyJ77VDz;G@Id2@(ph&EvS#=e*YF!P)7t&*inz z=9anGw9n-)C@C2u79buWp~2m&$>GS{>J%Oq1_}l%EHs`Ix1jZ zE}sf{i=8i<&lBq=tjhGkDv zJ~>8WZfr4AKr)AYK9!AuU~O`Yp|4C?c3WI%Kr|ymOfiUof2*jRYn1} zx~*<=N4w6}z?*cZpMpzQXlhnLYlfV8b8eQX!+d*jYI&DrX+xi!ke9Q_my~jQW>1!t zoQa!i(TV+IQ!Z4 zMp+kYfrxp8m~)Y>b$EcUv!RuVL(i;%5)ux+Sc8U3mxx zUrZBQ9>+-)^#gYH$h_c23&7K zL)UEdW-D^T1Gf*}U;-uG2!sT?%{4W+-KIWlGB-gVI(=|&*Qc^CCE0W4U#Eq8{j=HM z`JLZ6b55JKna_NGztfubD?dbDMOSl5lMXi+S!bySAQ3*p-FM=ETgcGl^SQM*6+uT}AE#g)X^lp=!!~ zJ^nk2n=ssnJDQK1o!z!zxZMmuv1&X9gI;5`Y77RZ?OuWL#3+hkJl!J&h9I*p+c|>D zwzb$@j;lVGld^dW3?Pf92|`QLw&N+y$@3=*>YB(aBw4&X6Uxs6(gr{)yRPK^Pi~-= zjaXjgKy(%6Jm7Pg7{C*B%k2})^~;yO$th7K?-0szLs5R^{iG^*D8Z=UU09V?K)_&A z$QxW#C@R->AyW*(HIFKUwHjDNaH(;=b6MCDCjXm>_m+Lw9Uz;$q$vMvmVn)KPThMR z*fF=?Z-jhWyxu4~bw5-@*oDn^&Otuq23Cw^S~G#>=H|N2 zat`4SCyCS$PElDZ@bu>Gr>=9jZTWS9 z0Tk1;766(`*`h16Z?`Np#TSuu&w8|U^EVrUhAO!ZqOSDd378_Kj+rGOr?z{SH8|md zfLcBJSnQYM)Wc-gUseVn&M0UD5K|CP?h56ERKiujRQVoI6+D?3D&mO=*W#K2LxqNk zqe66X*h#8RbWA6?F6gH)CVm)9dfheZg1bH-(FS7C%&VMs|C7bOg zO~gJGz_Z>&gDFAakXU@rk-B;l?ilRo)6b0uTPg4F2oq`c*~yrzsoVW|N8d4juit1; zTN=Ib(55$&P9w;|av(eStZnr74pVEVd2~GwsFm?27s@Iw#fL`zTIj$s>vj9~wzf|n zbVhod`%~7K!P1~Ns5N?nUT?VYWq|?G=rQyG^&D#_AxEe{rU)oE5h3MV zB(+TJ4mB;$5sp)Y?+{l6Lu7PxJ{S*nQ-zK8s}nWp)wO)E+5A}|x>8+(DvJmH4Ul6?kkeW;c`0ah5uk{0 z#bSDkK6Y&*|8QaM;t!nKqVNp@q-dE*=t>?=0a8>HSzsG7E^|_lk;-!@UP-Qy7{$;W zEJs1)J0>WqJXRH>T*I&&>~Ml$n26~PNyT&sCNhf%;0n?zS5R5*|7b~N*}409Wd1r# z&7()xGoiLPGVIOg1qeck%mTaeDBDKC5)|B4qK^leq#0O)f(ld*ikg>8zs@0_zgvPRZwS)K4+G+rvO+;+{3?Gg-RC4! zbA0B{Nm^PKP+<_OQw~$Coc=l2Nu2I>Y~rg?2~kYaQcg}GhcIHiZekVkDT?V11^xJp zA4vwh83dV6PiC9K=5b^`kz~W^L`K+NdAB^3Vb}V}Q3xmy9y<=DGz&Dy1oNZ{>JrX; z@xu4n)%~3ZNlsSrCX%wpaNJgMG`QUzerN#D^!N>qyTP9M$@t_{cCpb;Jf#PjyJ6*W+64p?z9aO4E3(7XWHB7!4YOvC(@fzQuqe46(Q8 zL5#K@!NCKhpI!{cXP$g{_k!#7md9Rr{u~gx1tgTFJ%z>SA5MeX7^w-Kdz25hI!-=$ zBXTJvdERPSXW{!=IAen;my2O6v6xkFu^f20R6Br9yxHIiR-lR8`#3R0Hz1h+TV*Ao zN)b`G!-*)TuJ0>O6+Ekovakv|0}cG&xVWaX4VAw zG?|-hT`s7kz#*O>!E0fj$4|NK7oWm&KG;+dxQj5^0S&gXz0Gcr;~N_MYJbOESonKj z)|*ea;);Juy%K5_?iainbcvP`UM> z{?vnGe8Y*Z{Mu^>C~+8wyoXM~zk^z0eUDe)J#~@8|9b9epM3rn0Q&8ZTaZwr{~v$< z8+cwOYSgzD@(AJtMX!k+Fj)S%RI41|kN)DpO37)a z5c%2^h^tInc~q4Z8DFef-*eFC3)7rbmGZJF)~9^pR|u%Wy&MN8H-I2h)=C+bQ;J~x z*-4y=xgU{4mCO24kt&s@GJGqLkBBthAIEV4T7jfOLc|8i=+ zF=^y!9JjKX6!sm#G1)CD0c0E!|D)Amv2xZ)JlyPwCpKcsPLJsyw(i~wF9S zh8Iro5o|_H+ov`GPFR2x{@5iNy_?B>;rRgo%ImDl z3Kz>)=@SJy^2)(^L7~mB4PI zN#D%qVdIzgAKq_o22Yqq{k&>x0HBZof;1R)Js-K16EqY&EI=?l7DWfgMwjOtpyP5hiwnid>tnovDmlz* zHN|34g9(ipG=}>?L7j?<+%LSuOI}MwLivLzsr54BFX#PYp(+J_)%%jZwj`+4GSe3g zzq-VskoW7QW)q^^YMj5-LYNhTAs6Zf8;9I_!huzqX^KjY^A#t6S#V8P!J z*bNVU`|8KZJ2UN;u8vQK-6$~yx%DnL?sl~`2wkR5%?V#T2nt?4a3_aCBgpI%fb#L0 z+S;0$+Max-z_Lf~M`Y*@e<*YdsU?Fe?8>9_O*W(%H32;P4H}a{Xfu)V;cq6)Vz`6h zv33xV>{V&T@m3n1;m5VRF%sR&b zqDp|JvdpGXPAZjxxT=ojF_RU-`}++BJf`s(-mJ>6>t0eWqqk7 zr4m<8Dfg>NtRB8)rKGQ|^!#!=swkwgnp7*)sKP5-{}bt{B%|fdsfejogi}iWx-y~_ z9jPL*<#Vx4mG8O(k_vDM1~%wO;BIhZDQu%7sYLr_;?LpBBf$5Q8CLyh(KFk(_|GeX zfV%a~06O7sj95e&1&K)Sg4^QD=Q7XZ^h3RUau6GX?zHN0JpGaHd<+$WiV5WS6M1!z z0&$F{5=LUjQfosH*N4Yz;2KM9S3W`J^G5}AB}vrwcnFWJr6555oA^ZHszKA($UD5o z5Q8ApL@sWz_oAYF>S61bF$QpY%d2aGBd{7G5Q75$sfwmw>`AFg5lST~sH{#^)&I;%lvbAY%aN&CE~aARw6f8!)gEwr8Kh+~)yY^@ z7aLQ&j%v%{UIt}}QFW!19I;EDBh{$ak*M6HE<+@AN4*Bz9H`crP)<5sK6*VD-;~*H^fWa0tE$tnjoN{e3!}4`Ng)PUdy-f zGTVo7y6>PW6{;ey+x%)_A{^WfF_w}74wKqy8# zfFvyrzuoD(`qd8X9vnn2#!wtVdV3McEd@Sd_a|=_x_+EvA9#D(91d@n>r7`~>`W(? z-@xvC)}Zn6c57-VBU$jCfdPKR3VhboZ!($uB+F(ddo>eL*zM+@`2z@;yvx);ZpAUjMfpj ztz;AjAs+4Q2C!$APHL(KzIv?%klp?3>b4-9%5i~E{))p;gJJ4l^n%M@Dv%`RZ`P74@&rntx7;Ny<@|kPDRsGTTI!R^F)fRzIK~b>lZ{q#-rL}^Vpz-?#;7}Yx~HcBCK`TgxWrN1hBxjhj5xflbwKef2wX>W zP#dB6Y|GIco?XcV=~+t`;Q(2S2XxWFf-=Ot2+QZsVG}0$1v7ek z43s4}i~~YZ(t+9ibaHw3MXsBOE}YNhCMOww5M-jBe?xgsK#k$5&u^ttk2hDhoeEHD z@*1jCjEiSyeQFBib<>R%fm($M0P9B$vHlnyGoUEi>9>L-YBCva|H1k0P2#j@{7wl> zr^Kg9`WApnDo%?TzrH+|a=y1Tt*+2fDcKh%P?dNJH>$PTvhf%FzSxyIkfLwG>0yT ziTK(M(q~3&=8G44JA2KxvGX|wVHl8&7(T`W1PEdE7y1YZkNUO0AG1{;QO+^yaNEde$K7&*yPF=Rt7G{ng zuQ_hdr!oY^^8a;nr@9-a8~IUEBOl};E6JTJto2ZjqJ{6L+wqT3H-o3su5>zWwHH9Z zKed&-`fBp@wX2`}J%^`pl%$p#P}4~|!&SZ9e7U|leheU~61g1bLy=Ov%syy@V$f88 z4@wGU9s@i={ZSB-Knb1(3t;xK{^;$!f2)+v_irlMUd~SvttwSCBTHn^@f}!bY;PAc^{U(jOjpQ1moQlC3A~iEu*K%4uR)dQoG-PWojsCVGYYKtXMRplz}Pg1Yc>Dch5>vyr<)Bxo2Bs6$);CDC z*+4q_8hmI0=z`Bs)KdY_t2=g#QXnFFqXu%x(aBx_M8s$eWcJwz;8VnSEI>0c=t0%7 z7@WsPGoQZu{cnEv!EWb$0UP`OFIXixPV=s`5Thj9x$ac)K&ohAs(gx4l=xEx-;WaI zAej-wIF)$&&8^u1{hLfjGkL)iJO!Mn>-|&jd=Cj9!^5P5Bo7XLeRF@gXDUq3 ztdGogW4)hw)T0hiM|Hy%AfVEEH^{V7x!*=R2hjj^uU*{U->+%XTQ6DA_8^U{xzgz) z7PgSW_eqiKoGEy6xfjUV!1-xY^$GO+=a&bnf0lLGXo6bAQ#Rtg5lE<14c&znlv~v1 z^xz9kD6_8(+L%X1d^Y?Xdu^S-Fi)1=ecN+?rN>LcbY#JRt^)OA9~R6Kt9ZFx2gDPk z{Io4jP}?;eq`6Do#k$Cn8k1w)36PQ)wxoj+uQtP^MDG9X4?k{i*Bq;a<~8@=7_H%g z71u^(kE8Nafl}6`O6>VV!Y@eWn^BTlzB^T*^e0Qcz>W$oqQd$~Y1z*=QF#DGWwcb# zjLPktXHAge#&5ZZ!y8${mhRUZlby%m(rNd&wy4NtP zKmj$l>$lJ#5Hy@{i8a=&UuM#yp_8ZE!-$GVC26(~swx%CL!!y`Fjzw25qYa|PFGi- zIPv-A&dXLbK!jbyfNd$Tac;~ALcCcG_=Fpw27KI%C!a55uPe<`Jn$3^+JfZk;Nral z(qLoKv_kRy4~dx{9()SzM591gzRM}{N1~Xe||Uy9?V_<_fNY*YdJVs*U74A zO>LqZ5GoD%s4V2uf<*zIN~Jk^@Mn;m*xJwiM^=27NM2VDuk;~D z;yO3Wr07eNmpZV`N1M&fz2sS2I8G7Q`%;RyYmM4?^6#hXJ6Da9Hzr(H&13^U`}pQB z*0Etwz_!dD@^vh}M*{Ui4XmwMc)tcSHZF?ezDc8;t?&HQ=2VkmBJO zUM*D`mCOGCxe6ZKiRYPX0I-DC{Eh*%vlAm9oQbgvGPS-+u!n`%-967V#bp#jVgj3@W z1lq&j_#%5xp6y({)6pB>>b^h*Z_Cm2w^0%VVNA#FKHgZl67BEFWDb^N{Ia(yjOfoo(~Y$#7!o>uDU7MZWRM%MwxL?$bIqtZle)Mw;3v^+@kf^QlrfCsUf3M z8N=kBsq`Y@+c5+{;-6c5x@=Z^@N9xYcJD6+B42+3e;Z1xB42GWK7y*)ptsT~T&w|n zRvm9?`WKS@6Rh)foQ$#;`^UuwK&S=ABCQ;YLfc&+3OD%4Z% zQGivztmFu8`7u-;+LBvKN>}-hMpGojRFscU+}1wVG8GjIs-iZ(V9M>9=4Sil6Oer>Al3pJ>svAGtJT7(a8>T z@6tReZx~XO4r~|$1jewT`IUY3>UAKY{V{U<;SJ}6LwC6obFg<$UZev)?*{rQ zN15=cCX-35m+4KW;D{^DEZI^i3YoPAkfJqhUNsd01%=-zu4}=4!CoB4)6=FmUHX%q z_>#sXmWhv}slGObx*xn@gc%9WzvzDpYH`t53Ag&VB@~rlOWey zTiwGLroQW%@ETW#*7sv(uQQdLc@+Y-R0l_*!#dI-H`GfFYONaHmK2i?cB0lQ+39wZ zNv6@X#=k(Vwlv`jP=vpcp@aj5+^md0b7-mxQ#5cPm<6%5u>}Cy^{bdC+h?j0i^X`r z8MejSQU92GObHuYS_AZ;XowlX-EgiH>;4N7wgre;W-;bL8EOU(q*AGwulCJa52*C2 z$UY459vb|Ljc0*0UNQcX9!NVE61c3YTBp;8aoHJ=n_z)wYdoYBJTD@v9+pp7;(>kw z2fXs3Mv$oZmv*k3TFIkpFBJdEa%x&4sYUarVooJFi)ugmMmO(jBFiw33~ zrzPH$#g_A@g0_^RT+%Pm(xN;i^ac4b0F@@@C?fvqdb3>J(01x_XpDP z7inVc-t*mO%lpd7D`zrZbTFHE;E+d2*kl3-#teoF%^gE{$NuvBuOBWh(a9mqp?9$V ztkcUuey?{+)xd+QW75XPrpwjh&%`YzeP3Ifb!oH}4Qw&-G?kbQQH6Ym6c%K1?X>?A za=kHWqrEG{XJWS>lr3kX*4vY) zy`)V_XS>e`pgFv6Q8wLpSu z0BDG|F@`=#l1heFV`FsW8quDxHh43u4q)R0w0P@>Q(+)R>CiOPlSD?T1b>k_3@fud z6I1721?iwiSfSPa(Rp6==T|=qs&c^0Qz?VIfS19iOa@GnLZVPSd0Yg|g7wdlXkLsK zR6s8q?sF@~`X2&mIS0}5auLt#7n~HQOmpSZQuCw*qyIi-%7ZGe_{8sn=9m;=6{(!H zQibll>i6@hQeFhsqPeKN1c(Ha{?Hy!hx2wfiQ-JtoUoz-k46I{x1*c4u|;gGN?OzX{b9DCf%P*U0Hr`YbVl~N;NmTi~ylpQok;N3@Xt` z06r}}jVgmnihg9P#|`y{c9+eoqI{l6G)^ZMlHx){XBm{ksFHV4nIA!T^XVjxw|BrE zYPIxigG~s_`gl7*7=$|KQ?p}8f&q&pfdSrX^AbR3U>KnLd-noy|5(sRImrHx6A7=+ zHy21x?b2fL{<8y2Mr1EQfO<3Oh^KoUb$iQvXSRHA>D}b3Oe3ziP3;ws#i)`uRT(Zgwxu zf3k3^8JZ-f5TB}>Ye_X9q%0dZdIIsC)n&!ao@*zKry)Syda}0Gq&C!ICLsP=JI<=> zN8KOcLZMVAm9}?TJ?C_d@%U;y7!UCZxU9J==#l(do=>Rdu5bMh$17UxTC`S$NfVVQ z#HX&kOTx7spd zpz^oBonM#9NEoWKl!hcxl9ZKYPwmg$$+XNSGfsoH`Rl#=`^mA*{VD-OB|Ko9XTU1R zcr{Rhf)J05OBC3D9|PnHs`7@ahI{DaoyB0~w(yHF_clk7bFX}B9@m)SI zX)59#OXKvtbXwG?kFYq+O*`j{r3lxncUYKmWhq2nROkNyPL)MYN*;=YkVrR0*jk9D z({8rm6W>iwEyZWM_V(7c_y4jl|EBYc&+q={b!wpJ?_d1;fCNIbAC7_7$z+zBGgn4i zMuxBcxIEL{t=QR{nHcR%k*Rq=K@yW)j{yp`SS+U}UAm27qrJZpkhD;}WK1|?D(7Ax z9*C#&N|~F@VkJl}!TUxjXO%NMdLZ@(V~_N}ZRzz=?`BGyK(#h0dW;yU1d(2+GqY`e zn9Q00IzuJ{_L}l5k(cuk!s%kKH%n#dg=`wW_IKOQ4qzF{nt^&)9=Y}&B3K?y3sia}nf9_s%`Ld>zNi4xl2lqPdWT9yrJ_C0=_72rUZMO{ zn1Vjkc!Cry*P4{o7{;FiGDJPRv2X z{j=MlP-rzsrNZIxz@UfnEH+usB;fn%^h&64#m|fRdu1B`=#RF3c9Z$@Lnt#~aS{t?h11>awxT(WPd+4wafaFOjgYs?|Sjt~{^PRaRD* zhsI$A3uc*MqFx4gaSXiA{<(T;ab!?$KvBINx;U#X;qzp#6PZ9}`5FadGzk%E&8ERy zuPHQ{vaR?}%H0jmaCXxvyZ9=;`i~dlwMfAD_Hj-A=*x%t-_R5QD8MH~r=qC|Flbf& z#w{RcGQnQZ$Fgfq1spzVdRbbn;z1N9{aKtctCG@`k*5q&3!=2B_*4R>EKWs7(`nJ3 zCalv(c+MYTe{#4xiiK$2?IQ>~pROyXu| z=Bt7FMtg(Jxv}EbqCgMp_2~H}wYLLps2dvU20FBM+=KyL2;KDx_$}q3XC2wl+flU+ zeF|Gz`>hgF;=H$3k@(n(-8`6|pB^{`2R5vlEa2_gOPXuxve4Gg0-Eqd7h>7d_m^X@ z=*%9SAs(%zIfSw(1q^C3Suu>sgEkF4>C15@%c8l3Aa|)oz+twE%T?8sbW^=~n6jW` zH0gUuRm3CIUM~66dwBPrBUR~9yYw7cBt;6l@*u2F;&P_&b~s^Ll$;B(-v}wip$8AXeRwrIAHATKc?PM$ z%ijZphDp+*l`LOgS&gTY&Zf55@n_(8`i?FGuWm!5v$7{xFGG_~<()eOK)bzD_g16zuZUqLN{CJ2gB! zIK<4vOQ8CM^>LE$KhLC<;L>MZpsF8(1D=+<2PE}iYIgtL|2a9|m^AV< zj>}kGSK1dBLeX@OjxHz^qgFtz;6FIeNenig=c$dO^=QCaa}r-DDe)GoO(e=*qRCC! zHoYvJ_1dMIf*JP;p|MCWS<-||Zri;C6RwxQyt$Hl#lGT_`~CPkSoJRZgUpP7UKsP? z`#j(0`TZW(?tN4YkQSukw#PF9!p1A%#<&%XWq=B#uqP`tVK9t-pK80iKKsSHk%D}} zb&kQn=KvsHm)d>G82XpPn8`Ro2V#WbG+*o9OR!rceSAoK25 zb(`=!K&wm!+YI16um+OTr;|ixZ_b_)6?=e9N-^uKluVs8 zFQ%u&{2ZuhzYlI)@SI_@lv|vo9x+gvOVM#s?g`Z>Bu{5<###1!9>N-XcH9Pj_C^#W zZ>Hdr&&h1|g^AiY0|2FcPTR9L(c9fkPKR!_MZw3VYOQtpN_`DM!r1ud-~707quuSW zSaCi!G~r;reD6*q#z4v$f3O;woLY6&#_6e{AkSqZA4k^&m;f3rk*FRPdczpuj^Rb^us5cPaUo8Kqyl!)1_`)T5jblP1AI9ik;+S2bBf{bF zNf_UuiO&eAnL%oKQAdjYZ7Uo+kv>(GUXrcRo+RQAF)(qDMN#p#N(|CaOVSiE8ypx` z!UW#gn3OSPPHaS%yv%)?Ca8_*;V!4Ku`vrExHpbK1(m~U0nE1}N+tU5k?NyKKxXAC zRFy|_)V!Fow6yvZSWB}FQh83 zNLF*FNs%nEA3~ZL8CjK8&|09I!f?xGU7Ld9(IiyRH+Dd-D?D(sf73w>Hsd0EtaCPK z9~x~@Y$X}n)5EvZXhS4YdF_2!uaZ&Se_$X+S|v8K=Wr&L(Y*v<%{zem6+<5q)!N zr7MXXK8`~^U!k5m3vQ5u3e1>FqJ_MbBq^USs8e)`zwz$ux)>`NUiZ_1K;RvxvHIKy zgtQU9z?;F$63C|nB%w+aeKFEl3}#+W^FUTt=dgsTnpIYIZdzGcc`BlLQu8WWSobyCsP)OJaRi!x{8q@Tj(A23OCDC(p%o57%MU!lRfyFCu# zp1~5BAqp_&i)E}0Gf44xHer5D8xr$I*Y7o0o_FJ7k@P!>+0ibZ!y~vv+l*Osvl~kO z94vzZ^@IwlwpiMlIyyRrJ7j029WNu0&+fYsK9$g8$*+c4KkN{IlTtyVqX-4-n3C=~sr*a}ZD{p}-}fD7yBvF(R!&CN`e)~H6=lB@-?4wTtx``eD<`caktDZ526PVVk=J|LyJxx@ zL^2$niStO$rU6yKqU6MteJV8Tl*sjXd%FN*yj>II-VVarSD527XX6UR+$tfvuZtiq z8~B|=<{3$j8mGq2c3K9n8gHm@T(536hu59dgc9ZR#S)1=L#XmD<;2rrJf4ou3exbUf4pclZyAR=7vk@uI>be8vNwJi&~ao=wB z>lpc*Ox^Rn{#iu@4Do6D&W)rwg@8hjr1qdH5HHsL`6o`}5%J_yAGbJpSgUhZD)0S| z7%0yFPECvGJc&|nKI-QMm7Arsv}ou4$wgBxdy>TQksmqIEX`Veq)?1P5O(NK{}4jz z3lDVkwdm@~%32jI7>K${VRGHeyKmzKNQ&VFX5g7PJmLtfL_c4m3?vTEQ zv9V^e7EAhX!11%4M4TiIlS(-FCf(=+gzC-7B}b=Ld+&)<`bq1pyDvcsy11}l(lP%^ z=p`V{aM_ctjlmuoL24K3VJQ@(tr4SjZ+*6;4E&c`MZf#w*Rh?YSVcwBfZclX?GJ#? z#x$5CP(U*vGMGdTK8_xb3RpsP`O9qZ&m;gDF+GV`PC-#~ymE4C%7&DaeN~=VKut5s zDPxVN>kRof3u*&?b@z7O;B2^~#Bgo4B{ya6`Co3y& z)z)p&bYOImdhWA6kMg{fg$#$LBYm&lx;2Kljkft05-JA)GzR&rSGebYWW3|QymoWr z#&Yj=W>jm$qnhgN>UD3wS~`ryyl+S*XsjEa*O;%0L?5d^J#*$vdHIF&=g-$)4Z9G; zMn$C9$`Gc<=s=QQwTfF=?sjOXNr4MNT)AwsCF_w&4 znJ&IIlq5p`W$b)o(n#|-E@*AH)E5_mV7kT8f{0^Kt55_LTjNP$=tg^cY9qCj7OXYe zY;s`An)o-zMC=Ppma=yDg-0wNdsi{jv~VVfvrRNj$b`7PoK0+BIJ~)QF8hLaY;w=c zFfibs-aIn=2|*J-e1G5H?|GizII79O4!(A?-+7V*zE|*tTpsVPd3FEe#$jlB8jtn; z_|rSixe`SQ#b_C8P`i$Sg08PiwWH9|pA-zAX^%=j`piM%$vK~X#NjY5wX7^#kz(@v zGB&Lzn$z>FqA9D$f20z{W2|XmI@G7_Y2k@Elqk=evO1gdPHl3=*3-N}Gb@f-&?$84 zVX+FXvW3Ob@vXSaYSp-W(|F&!+q4q9?nThK$bb)71vG@7UE9B8NG3q2KL{g#Dks5ODy!_A+^Z(eEk)_@G@OcB}f_6$gsid8C^KEcO# zm2GXo-O$`kb5p`T>v?e{$;GV|K5oF2Ao8~qoG|G2LGPE*BsqP~$70Tg372taVqzlY zv6_Ry)|QNo;o8a+SB2F zJ~=YwpNr{+<5I^0N|Ejl-b4wtcu@^#tzTET2gsrrqhmgwH+ZA_|=j+|6g2 z1)ZRtSzk@}Vmk~W2p+}*p|Ms=IXYzCxqSJSTq*zta>C&tJ?N-bDr})9E_dpaFPwFb z*p?dLRu`|e!JUuhCOmiy=RcdD0QQ93Cai@X&9*`0W?}Lpk|OTmDuA*8Bzv-ps(Yq2 z)bZpQ@~)6WiYFjaK@9*WDWlXWp>M5jX{>J1LRu*_I{Bs~Z37gH4PYxOYm^<$rWi_T zLFF)lN@^>IJ~F11xt$1UVS4##VSY6#f4VfQp_zML4yIXHPpL++=c9O^>$uNSPL^Fo z5X2S31E@=*szS$2;c=G<*2Ll8O!(7ScO+(-UyKZI<9Ak~{sd7;&-RN0o8Mepf*`NM zGC**j4PpF1C1D31v>g$N@d4#5!l&RzMjZO8(o&U5Co!&Qic4i?NvW#n)((I~TfW}2 zm=JBOEGD;C<_rhz$){dRv!O!YZAjpK9@JMUPCJoo(;zOp^Ci;^BU7#be-(icL|}Qi zzBT203d>un+skj7%|U2M3Ed`_Gf_<&%<>|6YbxoiOGWBxz@G!FH9C#0VPBha7*gIF z!60yr``U8oNjSmJJnmc=Arhg}XdPm||LJ23u zCKKVw-eJ|kY9Sz9Ak{)2en=`*9%`!{AD>NgGrSLud93#_4%f zA&03(F*8z`xSwS;&y*JChy215Aw4GaXGHZRG3Dl*D2ifyCywzCx?#8iMV-;RHXDd} zaJMPCHvZ7H@>q?Ux_qAJUD(tvh9|^~5O0JwpZ{fYU10FwlLi98{9!04Z72Bt+XME$ zBZmZmB;9(W(Fouy)tlf}Hy{-@pD8Mp=}JLjENGvNMG5Z4cW$3Q+H6^Q?2TC~Tvf$q zRF0U>Wr*>)Ou39^Fndt|F?*jjtK7>Y@IU~={cfwV-mwzdH(si+pl!ou^9{>P+o%#E zT_{jPGjJL8Uv*TSy;=qAMzKnxgJrM`NG91OQQD~0YH$DipY=vVa&}aB!;SoTiCVFo z0>3^ys1+0h=Ulc9nQTr|1;@wxUU>e?;lHI3P2W@uv z(Pq)SBgh7#*2D&4$A|?D+#E6ZqRa7kT+C2VeC2XOjF( zMU^W$eE;m1NvK_^Ok#d)Z%!I4$EKFrR=LRbh{4+dO+LBvSl1O@xS?;7UDc~ZgxYPJFYB6c`p1z)80Z5d#(a6489-MGNqYyb8A5^I>0tu~$>(n>X*M5LCYMmMy z8k!64?+cXeN^f*EV1KeM6-X~FA|lcIPeF3MU8)ir5J0n7LG4om49g_1^)Rfny^f{6 zN0J|z_)g+UT2b;2c+d_K&}!*ABmnaL;SG&&?(OR|tJJeH>al`tPjl1$vLgnS!#Ze5 zrPlFf=A6=LJ|~mzbB@dN=`5cXQ}#6J`8+kvR`au*rpu-|s>(ZEQrYQ#&Z0hE22nPq z;;75R#RFl?_dqs-TF~f=yK6R0?bz10%VT44NnCAn_UcM!{15aqGme=(3(ice(jN9O zbRVSbO7AWfhNQw#FZd^V4Ejb08d}&31;WEs7I&n%>58oMqN+|sE-~u=@`9YK_3RRK z!~Z=LfB(b~i}uZIzkU!`)phE92##Um95*6neWS!jyR}g73i(69X?tJSqdCFgR5XEL zBhenRIHXU4RPD_qd7Yz{ltn7-!uY&`PceNB{FOSY*T8VE)n##6?{5GmOK3DHHq&=x znTZ4xD5#S3pyzUvd|GA|6q{vqnngx`MqhJssNdwxH=S4tF$Scw2mgs0FR%lP=z!OOnyYw!O4 zx884m+^SW>pVaEEUiH4LY0zki2rYzjm(N~KO{(1cwxOl{V7Xo@lnS)wOC+3kUTy{s zx!mxuPE*y{ZL4zWBy#csN+7(Rz0iN*%tfQzz^D1!Smg6IBO};>(}#Uy(^||1oy1F-@g;80WO>WqZO0)4`UwQ@g}+ zSxj9oXtjU^hOxCs0kSpd4xe{W8!B^d0RTs9Y2 z8@ea_u+IeIxMhOWHfi&Qti?m7Q>UXV^}q);Tqf#HPsM*|7U+}*j7i&9cmd^0e2*EV z_dmp^l*v}99J1&U&BLWdFY#EGL8d+*?1!-L*?U))^% z^v#=&rKR%K{fudeDGwf)30iL-wZ@a9-1?k=K#Mvud%>fp#5@1=_PO2!HU@Q$Xdi4T zA9BW8E{|T{vlE_P-`cumODjCde%scCB7rTI1kY3~5XX{n@^oFo>jf%~>lC;;tNEbV z=zx|i^WwsjgzHX{e%@1CW$nHlaJPL@U^SI7`txOFnl?*?p#WZmXwW$JY^hwc|Dvw8 zY|D|(&W7$}EP&&Yi(Qt)g}scYy=~;2!O+;a!loYgn>CtJn2{U+R6wi0m70bXj#KM& zd0y|`K<%mL_{z~o<(I8H8;-moW`5W%k(q%h0JMrP@$l3Jy}ths+Xa^@*i>VNimofF zET$WbOdS_w&cB-MO5PN_txRb^FtB+HHRPi01i5^I0Px|19qnlYy@zM0*T zQohfk=C-G_8J$#sC#r}i5@{WoiVR-s4EJ8`2)qCM@W#!P7wsoMf4=%=b-B#8moWu@ z-We>ra(h?bZBXMC=(?pnRXHJSL~hTOTGOYMpkr-AzUxv>fOhI$m6| z^)Os;`Lp@fpTb)f93^K4)FOD`n^2&k2*I%g_xUf^I4q)8kZCNzDHQJ{GRIk?i>_qc zql)R&mM-+$@8 z@7BxJ*Q@=0b8vTOTf^GGfYTfl`=AcZ!7ElPi1~R*I=$9nPYm=nJ~*^(+t$m)_12{r z{f*3JQ9RD_d~RQZA-zGPCDOGkmN@i7s<^(A;rz5rQQqfITeV$C| z#mta1kxVAjzR{kFnQ5yJ+^o5H4A=Dg#Kqio(f9}b7RITTU_ z59uYxA_F6B75@Hqzeiu=H)w*Vj}Oq^%R@6D#?RX3Fg!AF^w=?YAq_mc9xwq6DB!aa zd~yp#sG#8dLyHAC^hx^DeMFMrP@OkE6G(IDCeaj10qepd_rgWwY_-RF*NZEnoy2cE z@U#;LJUm&yy8>=`hS$(kI)2^v;%3+Rou*@^a?f6ncDib6UcdhF$H~{P@7F9-L4Gid znW8-K=rGt7E2!}6_2wXxHyk4drxr$zE!pli>?^5rW@&N2B5~Ao@wSo zmeH)C5)eh!`=&@Xq$)8#iPivvfB*m>07*naRGw5?c`C2s|36bvN>qhrERR}O(#;_H z7Nf3D>Kl@HWR{q_dg`j(={k$C(a8>bwfD=r^o0&j?K!HAVam_*d*T{mqd;!eT+!mQ zzy0g`Ta))|UXMSCVubIhr?JgoFqn>ya^9gtM631rui?XuW`o&$hA|yKd>Bq4!NW%Y zp!1c$t#%7S489HC3x{H91wDKEM0^5Qpsq0COoo#RIo54>wW{c-CD#&~or+J#+1Z3I zJd$>C+7}Jn6a{2EXoi4RSWfjAN)H~^y|wyz0z!o{zk#DDf|-Ji;T@kL$L%!?`PIsvRX4*YH}LL?PzKGK}8 z1h`6lDKSaCYd}mTD}ca1IZ^-ZA-hsUZb)S}`6XYfl%1+lXiSNGsy0ec8?NiiHK^|& ze1&I;@b&$j1Tm4Am}?yFuwiGvO{IqYwwCd)9!H)o@88SR9=K?NS5sT4){%&BEQ#Hy zuD9;IFh&BXATAy}+Ce;g}i~j|9zoZ8Jt^Ax6C1$3=!e+737d8U~o49zA#TWCN3iDBtRccji z6GsB*goOWYM8)<#7>FpU0;k|z1bH8&h(xJ8qyP5IzEWUSoQsnCQ9P;0I*G69Q>XfY(eAJGHamq0= z{QFzC{`k?~?^N64coFyvNjwhtWa+P7f=@#8p})A>?Mf31eh=mG^jG^ZEHN~~ zH0k4L!kea7$OnNL{?O-EhAO*qs;8$7p11=yL;(ryOVcj6|B7xolz^v**m`ZYeujF) zoa;{GUV@ZHbu}RD~+GuH;aSxnXx^Dlj8EW7M-~qpU&^&7? zEv>4mvkGlc14PtJ{WpK3VQg;FO2NtqUK!Q#NRq@C3ss9M9n?}*y~yFZ#msasNa=f& z6yYi5p8~sx5kWSAsS3&NN=a3pmrl9=q>}WMPgF_-MFcQ2;uM0aawbi4wnZrgEM-}J zR}WPQP@rjg3|C_|UyNo$<2B>YJC+CI-jDrE7h|fe4fj(HCTRI5fVE=$u`k4Gb*VY` zL|TEeV{AN5Vn zz=N*w79;fCtFxFIu!z2jxC60K$N`DtIIGA46U~`~$XqO&F_+f#f=eaTTHVEk&yv?JM&{3?5B4bTvjVc%Ymyr#L(keV8T1fFY13m|e6&Q-IC;XCCHF;z; zHQ8Gj%G@c07Esy(sI|~v0~Jxp@m+qVz$$|(y^s!tQ&~={FOMcsNBX7S%%+Z3QyT_P z6&`uhX1@I5#m45vi+{MoErm0N*ks1{^ws_@2!Nxp!FYI>#3do4LR%oyHX5-a7p+vj zD17uGY^w;sdu)9KG5x^JbM!S+O5dD_2^_=W>SJbdTNzoboCiN!CeaW>FT&+?e>*nT{bFbhu6zTRZLqEB|e<~?xo2^GsOC3BXBI!hOYR>C+ z2WRe%KQ7Sa=(A;6KJ^NpdcFOhw*a27OdoI3+$q2{xw^jP_03%Tsb7HeOgM_f{k^lZB|oV~ z=^2m-j|~kCt=Vi-9cmh6?kvk^jm9j_ExHe`f&C_P>fY|y z+JZkGd)Mjga~kBG5P?%Zf7pRyoRFmoi@*H%<1_CDoTKaSyC#^zt8T_@L>q6!&0Yps zz17wu%59J*?ti)z>AHP?Ydt+umdFH&jr|?nvRN)|4ZCAw8Oyc%`8&a#2d@Y6 zP-o2)f)t*NjxwPe4A7vZm<2?6|51o@#*Ha(aACO(Am zw_^({nW43f@NDP8C25B{o59*>B9=50FsJ|5j6zBC4jUS%o0lTcWfv>Q|p5DizXrD9CE-?@N5D5Tt2Zzp>(v z9!dPs<_T4(pbaJ!;iDryuh$*-x&N{H`c<%Ne`1F+W9u6wm(HR4t&UZ8on3n@myIsr|ai+(7(v`<<;0r)c8_Ii;X zO=4!1(hajdn8q>mn47c^wAnj1w|651N4&)U1F=OL3lV zWj3Q6ncfLSpX+rU;`+09YUtI4RC93ks-wN#VF*A4wS2XFvZk+9OJL;fIw3_-7zL6b z@JV3|EtJ6ExMVS(6_`9uaR+&t$!B#MZNI77kRmav;aL^NdngtVsgd50Qz(s2($p4h zX^qt%;7l8*OzV&;#VA!R(?=SptOWD zdELQl!JGR;g27Yy;z}M!%gb->l7%b($z9pEbS~UV&UuFemRNZ8cBM>z;kCG2yMI}Y z4f-yPZt}dx2>cV=oKNOuV_OaNIoM|alHzEy+GdNT=*GnM0R$98GlA<)P!uVm2y)CE z#4$aH=$rQl2M?ZWt9jl6vAqa)zXwk!cEc<+JKrkD1_CNhY+atZFr$Uyss{A3eRY8XV~g z69f?>EDLjU{@ENuG9ve6V0!fWg@sa(AS?!#vuou3Qi$eJRGS)&5iV=nwa>ytA_{z< zhi8p^E#jhCe^N?fycW0_w-sa%Z%vf&=`%m+5!Gmv;rzXWIe|)UY-0i?sB4qAHBGG1 zShLdg8d|e`QLZ#C9Um8;{r&R5w=bVPd+39Fw%e>WaA{brwyVEe@eT&B{5h1OlfXtp z^j@ZbsgrjpFsC-bAImVsmF_-};sCh^A-<}hr+43byJzpq(A0c_c}z$1CBJ|AN&HXC zC-r(6Ouek5dPPA40jtY0?0ofHCw*&K(OH0~3CpS(x?kXEjN-+LSV|U4Ij#i4d#Pf^ z7?LBC70N6|!If?nvbAhq15ic5PoOx?n#?>5{#8WlnA%}N1qiLVQ%ws7Ri&~kpsKz< z@N4U#U$&@-s!3AiY5nHtFFVvKrL~pMj#m5OcTq$_QYEw5(BX%o<`f-b5&FCF-~4qX z_;tK%-W?={!~0<%sKAX{oIo$(AbhJ`H}_(h&M6U589g2~wd3_|%~;pgR=h+kt*V+h zqY{1*gdaEr*derAT0z0_>vO+4*E5?tclJjg^axo}h@bH%M5?%u8x<)!B$^vLy3qep zcK$IW}MAgOlX26H=SH^a>qZ_T*4pszVnN= zfbPB})6P_DYCe3Q@AJIR`%Yy&O|yfyE-SGKml9}#c<|uwQv}Gv((d5CHeNybouSM@ z_gAA!C=2Yws)tjGLB;?#fK!3YZgF^cHk422UpP(uw_ZWlY-!;59gVzI>z}l*oPM?3 z7@a^TLX$cQ%?zJ=JXxcwsTew-pfHn%))$2M;HS%?fYwMyA9&X2Q+L@$oB^XaOw(kl zNM>lDo*+>wz={(X7=|K1WHElKR2Z6iEkYDn1HrHbEP?pM(626)QK+O^0H{rrR327= zQ}xb?zLY;fEJbr_``#5PK|JNsX?X%w)-R33m6uaVJ0)=yR`9WT4RL4V zVeXFW?9BXUu73CV{xj>btNvp*7iWHc^v@Un^Xd9~C(uL!T{!vC%b}P*Tr56I&1QN| zpsWid6Q+p$ii>OH36+-x!b<#YPa}Al=(3x5K;LaT+;ZSRQH;CwvDl*OJ0P1oK8;QSOJ*l>0lf9)o?;SE#`x@4O!O9@0^*W zaXnHf7*$>+Yk=ZO+{H4P6lR2T$s-|x@@~KoZ-yZr5?b{!J+N|W{del<#bsvcOI@|j zw!B=etKK40i1dPjTHyL>cykex{hLXh{I(#ma0-bk;8+$Ic^YRfkQtn$=|Co$VKFY1 z1&pQ`6a&5#^XVxg;nS(5JgJ^lXq6a?B{2rbt5+*cONp3jOF$Jk)e4-#un(=OOsIO- zM8974rqZMS?fJf`xlACS*N)+a>adq z8Y2p&g;;(hQ6+etxC9l{a?sr5_|e`ya2aVh+|}Rpla4Ie+x)8zPpXe=G^p}fWo2vZ z_EuR<`P}bLt%``oNU;eJAT{adcE7(NDBdpKV)p2TJ>DoFpv=vVfz3ueYXD!KAPmXX zVZB$J!&3lcUtfiHNp9eTf1v8fKssqN!tW1eu>wd6O5jGMsfe9>y|yixEq$#J zKVmJ{1|s}HAckIH>Fl1n$alFvkZ8}?eK#UcaRU|t{p zBK$}hy+2v4%ui~q0+CjkOgY8_1s$k^<@(wnutwiZ)>y4tC07jhO$bs712Pjs?_`Y`!Rem1R<5j#YX= z5L1a!Hk4RfyL|ay=oxfAv~=Uu)mZzbSZCIdGE!;qPX1hOsNd~&4-NJB{oUREZol1R zhkZct_(~k!Gc|6Gn@=@(ZeEB+@nV)A9K#hjKy!g(y7<+Vb**L)-S_PMS(69Y&C~}S zVb43iZ0`Nxho0fVS&k`4l8BUdb(NRTkg|>2kn+04)|@bZns^R{d!NY(mvVqSX;Uv; z4h0UkHp9j8gZu@=n^;=N8cO@gsn;lUg&0m9n0=Dq)?0wHULWs6M8Ugz-pq!;Uq_U7RW$gtYwVs8w-#i%#%0` zVm^*XqZuHgcqXA$8z?fLiYMQkx)t+nT1imL z#vAe|sU%f`=bPoUQdCQ;r&}gfv8A2c^|bsHk*ihUL8Ucb>Yw-WBiFxPgbKP6vy7i_ z4;NB}#N6D2yf=9+vN(dSy06dN$z6GP$L*dTisU}ax%#IgPB7LZLoXsdLsvmk)#H!Y z`L$}n&bGEraPa%>etV}qc+gyDdDr1^3><6nD3RnR`xmA<`Tb)-kbi>CGFeTjNFu7r z<*?h=9wHUYDoT46!PiUVfNs|YVcWkm-dk6<*$A&{LdV42_PUE2;mc1`=?0<1J$WPX>p@&m@=3IK>m&_n11ZFsUkG zWpt%Q8Ckw=+*uXXbAJKx19M=^w^1B0cwd?!nh1k6fVyJAV|o zqjB&*d@HrsGY{vKn55g}bk^ovGk4D5`De*wGM`VUpH56n%wL(}H-`;P<-LkR?ewy%aMvHzwjh63P=P$093UQ^mh;|4DucbOrV3G0d1|G`lx?Y%)JsL(XBE`t^wZJ#S4URXKVIp%J|4pI>DBxBVl{CjmMiWr11RzIlI2lHtsu~vtGmr1teKSdaxpyS%_{H zn<7tSa@}S^t6_M9O-c!@UQ8EjGlHxwOj*Tf6o<}7)Q!fO6_bHTcQ~b&*3NOxayNmI z6+f6O+Km~*B5=+4o%XP_+lwDI^sw*mcYkz}oh9k{?sSsQ#n8ag2}GT%?g#NTrQqU#bSwYELFaUjeMmEE>lz322BbY75N7T=`6~q zYzZiV3oyJ7EIil1NQeGXAutOew?ZH)2;uIa7p4_A_**#ot-4efH?l=jXpR@h+GWv9>C5a_i2+ z8=E`77yl|Sbud86{Fxglg95gdfV>n8CWC)`SVFcEc=5eFlo8h(l*5N6rl<3#$gMWM zl1NnoH^ZrK#+A~{Pra9CW}0i>j-uFVn+}@puRrK!d5uOcBaTMXJy%(`V$nPwe%kI$ zGQc*8+c}arP~W*ega<*6+zWo7WV+uVJ1o=RwdL+&a4ng{6C;<^3;@&(V1(_sfy~jW zrb0*Y)2abhx4T^&C-qFkNt(_F7eD>=F5ymsPk(mQ>F_8_6Wn%?p5=Hr4FFBY;^~+m zOwDf0&Tb2U)ZENMqalF@?*dl{SQGpvFl9iX92!vAx*{|&X2qx@OA?&6ETGe6F3zXh zeWFkGI8@XES*Mjqqg{<^*H=QPu=!yZC3w}4`5csLiAkBERGXQgqD^Ks$<;ssrPGPr zy1<=%{P^>q)ZRQ-kiQ&*U30L2G>gH=J9YJwHR(QfaW{T{GB^Fmu7YojQuFzxlW zv~~Zi{L%XAN8vnf;L7U>?JymU2~%nOB* zY?EaJQie#RZE(?30ML^-pM#~}a_K?pK)oC*4)Ytz+aGX3S|}(r#X(U_!1%?w6xid_ z83nXv)PlCgGY*Z-l7Cz#|^&-hPc6*e73n7YQ+D)KOAr~*l z)2VbQ9f|`$W5w`Br6_bJV}eqd-DvJL6sE+8SyhY%6qUtRODXZH2!eD$K?j0RXIYWU zx(0YS$?1~^2LY9X1fB*+t%PdF0`mVxr20q{KVr~EHH_-~sfYQ=%YN+*0!WiQ?UsO) z{W;8kPa5?wZoA4AHd&U>Zq{!8Ru=EncK~$=IS_yU{_S`EBo#^w%J}yF@OTH|bA4z4 zq0ITQ;M%ez(`IX+aTVa>03xhBL1uX2A&ASK!Y@*wz8VA6PHE!GO!~6q;b>W-E^Ee2 zujuY;MCcC4FrZ{Md-adfsC_F{IiL5QB5`^=O%~%5)97?Zm99-aIQ`1$cv0=`$T3H9 z*zYdl(t%3{cJW$dxqa*i>a;#<7cb4X2!nb$<3OO7@~5;^(!ceyHPPyJ+f|Wc@NI0$ zz@PDWI-O30!!LwGiBu{*J5|mVQ=y#H5Q-b6c0u$oBIl8~w!${9gH>k>PK2|d3lh@+ zodu>|^t(XNwxSd+ZFUYiU4D{R)j?|TDeUdOEal5S)!#Q0rbd+C5}1m1*C_Ft2t)~i z5{;^1mAqhr(-5l9$*6~*+TWGJzta-`d{?IV8p0&&qikFITQza(tt{=Y{^$1N3mozR zcT$cF<;ysh#unGE3|QL&yL6TTuW6W~&_vz~E)uxK_m)-p!o}06iW;DJZlPRm=pC>$ z)dxS?(j3$Klg+kny64KG`;aYu=lh+FA$+e+FzQJ>Taf_u+~%7m%QrZ#j>Hi9bOkXdc`6{MEaLLp5sW zvammn7;_}XQvd)U07*naRHh^z2pi=xi73Mj0IJSd9Lppz#3%`TGlH~AAMSV3iH z{T}(5v}tcK%+R>M`ak*4*(^p?R!5Q zSyrh`Ywy*#Fx#2Sl^-ysWVRM=cbz;-?2PIe-O^Pp)~JPco{dvMsG`-2;gEJGS)Ar1 z#yuIl?#b|_e|=yse*MZ+9lo6{_x07Z7NP#SzeWITlNW!?9RA9!?~}q4F%){K?5s zS?J`-xq8hN00O0|Y7?_%rJNJ$BK*Zg)D{ZuCWr)5T;cz$+PNYIWp^0=nmz$c5=i>1kD4>~| z98dv!px{}dUTHQbV>xh+N*qG!++L%Jv-brFjJzl*1x8d9P@-;?k?mQ%D8;37D4t;I z^Kihj9O^nzx+nGYxRl*e(igSlr@BbB*nJbTx&uYl=#xQer}}91%i`6pGmXq=Xf1i* zK%7(%xeU#J(pnlDOHK{<)(u$k!VggV#7i%EadYKs;pP9)cD|u)<7XVN+TGA4D5?CX zu3lUkhfC@@L}6|+-S7>$}L_}rKLelPT7gl`7 zz%Sf-x`9Q&XkMX3oe&%J#R_TQWWU*)zVV9#zu5Eqev&MuX|_8y$x)+^)h$vhP76K^W-p;L^ zkKTXskep{jxYL!CoH7CaXL4d99?yV)PNZfeb!jFmfsHrIs@kySin&*9$QI650>RBm zss;*Lt=d+tZJLs#)@0MHHCn4$y;IRN%V5G*eL!%ErU;|B$qNwy$(BDaa9-3Dh@}1- z&(zaX)^~g`+V?|Btv@mS81hqZ)gMZ+OkfPvcl6Di*5G2>))ylKTtzk@ZFmGX>24Ki+!nd;%miia|9Jnz(WT#S;E4 z;IC%XceTAw%{J>bHWHmqMt^?(&+?U@#OCK?3%xOX*@wr5r`*-u;NyUb+BZw&9&-r{ z;IXZ93;+!gJjd_fe(@DKYlyE}T33>!nH0z;I-i;3L^7G4!3ar_q)a+hP8FM#Ru>08 zA%3rz8=an1l$1TI)6(ijySovt6q`+5@95^o4Q+LA-O@FHxoT9a;X=eKJ*68QjtaLv z?^q3RM|qL!E2uv$MVLJ#;m-!f_WM~WH+#zY$M^f`Q_M(tCZ9U0uB7^<^q^PD9;i!U z4+L9Zg9Ld~c6JL};DnaPjy&CXd-ey5lapr`Kb$<9N~t$b{|`tM7>6g)=tXpz4-c%j z!oc1q*;^gPc=pqC2>=v=mJ;5q9(1kC3T{O%S_Pb2Iw%q&PddiX8c4CQ|73gt`HanX zF&q8#cSJ#l0~_Zn2&-HsgyJ|7xk1zD9|pVSwfV&8wcq|?$2n(+salqaHa?S1PE2QW znPeK)tfFQzVC0o}Di=?InikDnYp+vV1$R@|i|S)Vx3(+Vy4=3fv@{UV>l+pDPSEeY4XFohU`8|Dow{pUL;wTgE)y?{LgR4_fD>B|gsNX4f;d;+5ZMWIM z#;0e2Peg{azIhN!bWhVJo6f5`IcCv!R|7&QAbH~f-g*o>^^A<{zDpH!=cKEk!$CH^ zpH79#G3-!3oT+!kb5%K(^-Z);(7X;36PYI(D-k;d_DJ$jQ1(S!@@_i z^uE~9(PEfg?XWZ{f#U{;V#oG(8OApY!H1&L;)S7oUKqC@QGM@fx!Y-LYUen^JgvQh zU1(6#@J^kmpiVnDce)t8k^`srny@~7aUZ?Nwch>}^|AJ=V}ASYH}yeE;j4QF3_Lbq zjLt<9beVhpr>Db1sGacO?1E^^vIS8OMo7tIG7gx6l*Z#q1_GsWCIdccvz$_zx@A3X zS{1-hElXHi0&)8-wW3v&Vi8;fso31u)Afo90aIlgf+jLoq|WmU@(F;3IHWWp*s@j8 zw_EbMVRN+4M1&|TPkaDG(n(8k{qV^}l=qEL5&7g1Lf$`M>MxY?gUTH?D%D>osC_wQ zSutSp`#ykQXDO=UK@YZnK($9OYzG{c8Q8cnI=bBI$n8U5cNOx-%Xl*boOMwitN+Eu z00=Q#ZNW))hw+0+(e+mP4Bd4mA~K@Weu(M!BJ$4O-MFKaZ<+FOd{6bwaU2*QkIldT z=mWgnTzAmlVVemS=(;!CBFn?)b3pb&xAvWxbhie8?hr2f;(5~D1c)8(oQam+z&ulF z+`;qOi{DQT4-Gj0vv$4RXjCda1pu8$es?-KolRyzL(_3ZQ8H;Iqhuyh2xw6?)uO6` zGyb;4OfbF0Dwas;*jEbII(cXAMsA)D}K z=O9$N@#N>jLjpDLfM{r-4ti!)ijpZQN-7Ns3T$RmsTokvnX6JJ1(bpps-)nAnWieK z5*qm)NMECGS;E#fcN&vIf((6WKD#Bx_DwnSsq^pT7SM=6( z4dm6R+7RE`JXO$$^PMLT&o?Fhsn6&KDdmWvC!VJi-imv$= zeF-ZjD6P)Gs;i^!%qk{8r&k&1^`j!CEWqm4^6Y47`J=yT;Q{e5j~C7x*j&Ddfh~3| z$}Nqt4YiHf?q$28JhFibO8*xe^jLcAT#M~1-I0y6@*^YoYZ|-yrF8c=eK((Agp`QI z-oy^iZ(e@10EEIxg3Y|Tn~|%a`0Fn8YK!My&nzADz})dfbJ+b^fY8nvlx#Hfb91Am zxf_4m0mIIbawp(($S`&c3%o|%Y!;=NiRlZcP9@XnX^_rr9O8H>m&s((@iY+H?Bx*9 z;>Qhn8>lQp96uv1DTs9@o5FM9A+0oZRlyxtNWpVmdj9gc_rLY%%~SI^ zwbubh)oQ=;Wt>~Yi2Lu1MicXEy??#++ConU1tqdcc*J1o{d=KYs~0f`n$0prQz9JO#m18hAvF#MvM<+1kb= z(+rBMqLi@$Dw`(zNajk@q&uLX;1mFYwm@tB8Kd1Wa17KAx37YJOBmP5dz2lz}DRqxoo_eXgn=K6rYQRx_MNk!VQ(ZoVK*|3BtQpo{ zc>QnA&L*^t^o-+?U29)|OyrU6I3|fBCF^MEjUr9B_j-~P!_fb#E=$U3`U2MPC4|Vp0v>Cd1po% z`C|{2=1U_*w!?4!&+~uYj|rP$iPLy|K}sFy;xm~HlEg{>iS+ZR;PE$CWpzPJJ*2E z+n}I<+v78hy-1^G{H^BrIShr;o`=&~m~iF4Nr>^o4ddHMNJKAroG_X!nkP(2?WcTN zfKoz}$uVEj_v$}zGSpcP70V&U6S{0)H;zI1c zeby0cD=NcU1u831Jxys_uG$)j!d&R9!l&v~ts=o`Zkq~=hoGRzp_>N}o_}8Gbcl1f z**^;;HU8;iI;s4Xis&yuu;-VnE^=zf_W9!tzY|^(H`W&>-BDp)&%WVI z1tsx6jXk+CgLjz=1Qr8*k7iN_`R(~qX`pS+xbQ=lxp$r||G`wy@UJIl8~>sTy5H~V zH~I8!CM|1QoLO}@`>}?`KEXNdKu2ei~HUYqZ}ISOqWAFT3J#Q6(mm(IZot6 zNJ9Zok@t&Wet=Fs;^&Y)QTC&6!aQ_224X5;|2%BK#~yl|Aj+1C$~>%)fP%H(!ZA(y zAhXYcg!UMc7D`K!o~}WRN0(H}mGYWY*fhe}wYo_t?M{tqN)feSZ4*_hrxZ^M)H9L) zRFc}vQLW+>+ssqXQB>#XXuBgdAs6HU=Spb!LJ?*lpL_n;H$9Ym`|#hTLNCEtz1zf? z?s~Rw-cKg6`ETfEjTs?FbJ04xJ`Ni#A;jv0NLSnFb96kkE)S+z?xhMx9ExWice-5V z^_MH-?&y3e_YS`k{?f{V`^xa}l|WaZyDt#v`(|c#S2}(>6JlM1-q1r(P}a~lH>E-$tK{?fEW~*5O4(y0&EUWlFFr8Td)w5M z5<1wHpjy=^lTyac0a*5|#U~DhZINp-YG4i?I=l<_xovx zzeA_KK@;yw?tFpgL!n|O)SEHV<(|wwHYA;#WS3V}IRX;82552|&qqZ$h6;)M2%32j z7SYs4a8L<>7ot25(H}EV_z)=QoJ>39({A~Yg3A6}Bazt348%jk=wX<&QT_5^6>u!% z62fh+ zl*5LO<#jYZ5fo6)FG9$UBw~mf&w+4q5nfn{P-$F7KzYE@9|7S+^%Npl){k(Ypdhm7 zoOqs^dL9yjTt2@k86(Vqlk8lDefsD!RF;Yh|w&2U*d2sn^vyS=?2G+LUF*4?%2m94z@O+T4=baytkT(2HqYNNA1Wa(#% zSDRH&J%%^(8k+_{$EK4zzqCA6kER`$NpVw?j?2C9yXCMk9uedw{8507gUBxU@wJF& zMbJ-B&^Z~k6tokpzUYsF(-(sr^2tRY6%c`1Sa=@rj;SSH0I#HAFJ>d2RurkA)$~7u zc7l69*uwlXms8XwHJi<9rt##p&Ddb z?WJw<>1dofl?7PtpRrN^re&~LhILi~n082=167;v+l!sXGL@h2_&$EMwx(>PU#y9B zZNUVTJ)C;#BFzso&yzRqVaK8y$(R(+?e-o`3ET(4SbZrNOGHYNtJ(R`{8nc&?VNPz z;zu}h_>bt*QEg$ls_m)uZ(|Kz+sM6h-@WtX!^Q5!zUvs`1Kq*jB%*Es64c7iZqGJ0 z=Ib35z4zR&E`R2me*NLK)b@%y7@NNYR8o@!JCjjV)aIcHt*M}~d)Q!xBa$%0|H4cQ zLZM#iBvTyC9<3ikPPm~i=Wx&m*d6Zs+=OURiJ}Rl$}+eh2=90zf$#XiMFGVegm%bS zqtS^Vhj>OXQw2j0i(g30^WsuT#Y(6VAC+)~h@uoqz+*uQ$N4pQ4{t5!MHJYWf9!^)D=T>#0c z*;LwCK@t7&52n_p)|7npXiiL(N^?^HDC622SEZ}=r^C!2>D@N&z%y3OJZr!jbboN| zE?o0Vs_vq-iq4+4E{N?!wtCNA)VDUS?X5ukUP)DpM{i=QvRXS)b5r8<*pn_G^ZMfT z>j>zqI_JKf0*hby>D^#rg|8R2s_WeEEWh`SjZb!`l_Eb@i*?=xR=CRf&c2aR^mER9>^Y-izg>1$9i7LEiEMy6vwq6aT60$+}tXXDMpb> zntVne@WUxlfU`IvBpO9Sg<{Kb!3pf8=ZB2of1I6NXdBrT$1N{XlqQljlAN^OZC2`9 z+FII)5>>X09pMr&tzhta$zrlKO~Lh6w~58hLl-&VU9buPxt&tjLS;8g1V(u%dT=bJ z)Di|oI1h2P463sDCEx~X>#0kh+`hQ9=iK>}V=tjs8jWT=wru>H|M|G*-V4`(Pa5~b zmy=-V$)dbd-}6+Qo|c@Ja}&4N|O86xU+QYQpLGw?2QMJkS2_YkvdMP?w~phP8~)a|A`%^W?xl(K`I#~g{3D0RHuNkpIEZmEn_B8WvV|7 zXD7yFsj&R(u}C;cvhq1QQxQ11)N3I1`;6}q7EknNRu6V(cfMm;v0b^6NL)#6mkK~R zbFL+ScEG%fRG!%2?`Uq#hLeZO&&Z(#2o)!#bXwdgEqrwgJWsIjZ+tLxerRMUA-An( zQbz~9_v1u}#0sAKKr3^yf@a18w?+~~%#MtV^r&FED3nEH6@VN-Gq0Z0c*Uui`tJtd zR_q_n41fA7Ra-69^7`}QfxMJo-bR#Zk$qz6WQ<>2X|>0w5GHv)eh|ooG9f?mp&W@Km)jB z=C?b|{A5RtCoTpq{-iz?3oaHq+p_4F%67)_|1`$_&DWYOxv&`}^Wss1bS(Db`erZ^ z@-G$+Dqjs+&(Os}-vB3c#QJ(FVmov8t)s$Ht-pyq;EQR@8ax>VBv>{`$0jSil+)uQfsT(0Y7jFHYqLgf=r^o7)J5{ z9F_5m8>Kwwe?_78nIi;h`xht1_xfs;Z~%V@*#@c}-KKGH#|T z>eoXluh-zz;MP|CArZGl~6AX#68_^ituD6hSkkr3QGPY0-~8PqIXHQxt_X4a!8J zzP+8>FMea1b!&9uE9{)~49gl8-_X-zc$(jP09rt$zrDv~^b{Y}q^D+W?LOf!m!zIt z1fU06TK&HnOrwld(^=TQwK?%lyFK3)MO%2p8cL4=pyfs_k5ZQ` z^8SAH^NFFeLxXd3b70}Y#9L>`ZL7E|vv$FPBSujYh4uMg&0+XK7bE z1nC^lbHUqyCMqPfAP)0X+)%%t3MCymkLN*KLpX>&5>Es%pvTe5evXovL}3laV=?Ko zwFWw!5{bpZQy^<;+NB>8(c)85>r$h<+{9_|us+pEi&Q~MkyI+BX#43Yg-mVJ!YM6l zdIG09p=#eL<5onKF(Yg4K7AR~?xy#q>TUqjJtc+dvTnE5kX?8-AGB{wjE=Ui3SnG- z+j>?arG2UNJS%V8wSffQ{P#1y@o{)6RlI@zAAPxXFSS&wR@V3G`C8e2%7-&dmqB2T z{Tklg2Slp1JWcRp?e@zr_UiMCGVoO1x>+h)?2Z@t@R0RFAh3bU#A6t5ow!tZ!C zaoDIyh0R3|-WS^X2FAjMUIu(n-^Se2Yu~$*+n>JNqj2d0`P3LiJO_%Jmge7?V-588(_-dM^th_UZ47wUq@^EU}E5Q0QTe;&Pl8}~Y(wEq*MbUNgd8hb2k z20%H8->1MM02?3iN5vSHIU}Ixn?~ZnSkh0VUrKU59ZivFD1<#sX=VI9xVCjdCnH_M znH{U;T?gw#I*~@FQ{Cf^+IsTs-5)3)2oq{!TAn z_(lK#AOJ~3K~yisM@-XVKcP65>2gZ6`^5O|m5~e0YIR@vc5J)VTz;=swJ^>Xxl3?% zdcA&QXz(Jq6KLSU2fa0D4Ts^DQ9tU7|6r5l}q&g*dRelhIZxOOdZ z$AdE=^k_KWD>|I-Lo2_Zk1FzPnw1&wfwBR+xVMPQzw9`luDgw z>tEK@E|<02_8a{Guf}Ybt3c72UTL@6H$MF6qfZdf`O(MiP>jn)l3Wnta_w^-zM1)M zzmE=s2=r%uQ?WN+JpY53Uq9IIWt_+Sj}w~K|Es=psL3`p6M>CY5j|&DPQed#7q@2R z8`jAC8v$G@6r%XI0~{`OJhpz_C1>hO}Ap;WT!8XVNbv#6kde;0zJxfI@I zUN0)>fR3ekujAU+H(i|e_H}UBF`ZvI0C(g+K6bg>Mb18)ag{ie3s(t)^d_Q%KQ{-HsKo?~iX>Z9i_0#>d*9{P(df63vEf zwvqTYGUu*zE_<&+@`O@IwJmE|?Yn$JhDCv~iwsJn0m-X==k=h?$^m%m!y-=df|W zMhBG5=v1UziUxB$p=FXoHe4qDy-uHD;SFf^6Kd2>x zP&yP+@#v8O5Vr*44)!b`U>2L_(GR>7Q`l9FUU4=rM{an|Bu6}lZzu=}=4D}}r^Ppu_ zz7;mj^_fS34>FDLoy+^CbmS2j~ z_zLC8ysH%b)OYFelx2B&aP$>?>;7@9WI!uxDi3Wn@%YaA-yZ%s^XrcmV$Ihts>{X0 zW@%r#3x9=_q&U4PZ6?y;fIkxrXMj#Hpb+R;;VOu!O8_7l)`4vHq5%~YMDYUZDJJu* z2h$?#b3!N)PY0&(0_03UXYGKY(`EqE+0FOF0y(|~%mC+63Zd?1&*#$P zlc}|BqmQO5Ocf@RdeAXdEU6m$_06OxsFG629#s+4azx8q>PwZiO7Bb6T2o2AM5P2T zd5G8}8VQbQ)+&ctl{s=HROspgAkSU)X2MZ_AlXKV6A`#zwoUJT2NdqB*yPIR$mj4| zXBr1L9L)tsE*Ux)+uzSpL+ns8Q!Ey9yFMemy*Qp)Ozf;QY5%P8`C6LNs%}J;43Zq7 z3FTDT30gTPw*7lMUs&3Qmj>%0k%u7P(a;zS*haUmJG)yT^Y?r9c5yM85|zrY#J7Q# zzP)i1()hjC;QiUYee5VS6*={df1Qi^BSV7F=NFo@bxINl5d$Q2>277EDejZv74i6{ zl$yx+lhNEvG7|Db3Xg!KLOD^QHXYDE?qKvT_^Yz78Q0AlK~88G}i*t&ih zQMlU`!-7({?yxy)Z3ctQfd!}ybBq8d`DV| z8%>o2Jw6dl-X6(B7yOYB7vUqE3(}=r4@+4YIh-?;7o)t{4haqd3UOPID@HLFGMH_Y z$I)rStCR<52BV5PYsTn4077b@Ty+#LUgUAoCyrAbtjmQBpd9w1VvrBy`TX2OdOn|? zOh~jMp$eL`!Y3kHhN-DsSWR@fJJpjaU23%^028U2)t7tImt5DISM?u_V4^e#psK9w zN*7lJS#>yz%r$NN&G|OF+bX8E#I@+zFdyj?>?cmTPql%`N`0u>mK0 zyU@@$_(rzbkS*A+W%tLjh2{b~bZ{_s?MZfs4aN9J69S*fohJ2PuDcdh*vf3SN~W>w zs6ELOB$$YJ6A}L-E;#a_-rZ2W3^Ll#-`NQv?_{?)C(h4&_BLUni4v2_JoD;Yp3=9t zddbG#Dbje!d+V6SW?TWa0-tLt;=ZkLrZ7|hJ8gHvR}v8IIwLRS1sS2m(%O0#gfqUG zl6D}3qJJGMexksUfP2STQMQrWbByIh3gkRcsJY(kM0OR~!B5QYY2n%9Nwc>XFbA zTC;%e1T-ZrdD^m4fzmgR?5*_T zJ2=v@u>H_N~!h3`Ask zg*UoNJ8HXj08x{+*YEsub@CJ>@_(|N8Mc6Y;>gYoY;ba3q)xusBi>t^;QxIJl6`eC z868vY*oNA=fKMD*@?aPu z#qb4O<6{c4cq#Q|d~+qWDs4*A$ok@H`u4<3((hmJa}j@(=OPg<%Cla!zn^h14zm@c z6tq(?o2_!=JR_%0SRKN$PG@f?BtyOZl+8?G9&ht<0BEfN0Ih`>kK>meT>8UXEgq^D zy=*TfPo75wAgPCt4}u$t1;Tkglb@d#^Wjxv+~o7gl~xr%rDjJ}NKM)`RbijwRE_pp z{-idlsx4NeNpG3bdi!M_P0#UJW>#M$sYfd@amXU5va5Pqg4C@oj&S|U)Y5~crS`dz z@!$VU{H|{%CPIn|S|aXj5=1Jzo6ILR-@$1*&ftpJ4Q{vFV7Cj*J^emM;@>(E}mhO3>1sr;ndLJ(ztm z3n^05`d^394ldUZ;>rnw%hliS;p^WVxX4W}26GAIQ<;L+s85-!PB#l#{Eg*~jtu~G zeX9G>F=Vr*Qi<$w-#4E|C$6tGNPEz%tgm{;>1skw8|%42ITX@{S`zjjuupt&Zw61E4-GPRLN4V zMViP`4{Lc-fL5}iH?~$CrK+VaEVZA2e&(L?+H3VT<8P;y+8+E|%*4tei)96y{i>USr8%Ll&a zp7(j)_r3c(B@K$S=aCo4x@a<@Jsi4r|0krnG!qP=4__c!7ipAga;Na6DKUs?PJiAtt zsg-;x=b~VU>Q2-PIX@+=0JiM?YnoQ}WLs;K1f{mXdeQgRvUrq|m4;S;$e*W{mcbp5 z+TByjJuLhAnD3q^pIiv#jd7#FYM-%zI>Wg#JjB_drlD?m^F9>h2h;Lyw0Gqc>|X$= zNi>qdV ztvJ@v*wN2TCM)4CP%r82GyE?)-WN{(M~DO{Cs$9@aoS+EF1sFDoMD}Z;qb`*;^yWO zFzAnki>7-H6yni>V5%!P-TvUbKRzBxL{$oa=ii5=mBw@ft4g zKcES{h?7Q`IEZ) zGofJ6KNnro?XO!atgZ2w<*Xpsh>e`wsxrbRFhipkJeA3dme1^U7 zG3%LW=t&lwNl(-tcbWD1ovqzf+Dy@5r}Hn34L=`}9G8eyU5s2M7R$jLb@SMitC|2x z2%nUS2rJFB*^Jo;Om_R(Ck~WSkF7|=+rw(dEqUnIfX_R-#o(QZVi6&RD$1J$a+Si; zk5p)b1>kw**fGoK4d1<3{-fC=b=Qi14={Wnv zjrnQ#VBX2*-GNX(8H=)+MRy`@bU7SAq8F_5Uj%~RWFXu-Zr##5+Gu=8-5^%s-S{$Q zVi=l+5ROZ5&6o=_jrdUVo?(14@J3<{27D?Z1UMnR`j8EipozB%~pC_$Xe&MS0<2kpDaz~ zj!q7<_S>gBa><2-f8Sp5F2o|S(C*G|5!Ajfr&*skEWSJP?q z$87RT2=#Qw8Ou>Lvu$b4_~U1MVb8mAz$a*2a&~=VXLq+~Hm??oTkvIzMN~dWciYT1 zv(NEp`~9V*qsg=CoNMYCWRXylUBMH}W>?l%R#M3T3zmG^ySc5`=33M(xBAl%4%Cl6 zdb7VBU09^SG7K7Y+#mP*C*67WU3VfslbB4*gvR~Be9{W^sh^HmBcElw-()g|k>49e zV64CcdP^?ud z#g*sa1D$}@0R^X12N=3j(IC^bV>E3#Wi^h56G@-4*VjAhGbV3`ow|#K!kUi!etD5VR|)9acLW~&1N+k!tto6QVSrqBSoYxXn%C4Mz ztvIl9`jsq6hEmBuSgy^^x*HpZER#JyjlFdyuKw6lSeaW3c$|TaUD)@lHe0u?PcOs7 zVpVgKRM6bqH3TkRp-s0?2%B`eS#C??@@C}ipA!fOGog}GaNn1DWg+p9uTfOPd5!3- zn!~y*z%z(8cXe>JWw3>7`$L|3-#+SAhXpv4E3%^g4M|_hp0J(+e}sPK>~HSuep@U~ zTt&WW7aiMXvpL<3S_^Ep3pXEaZ~pP^2b($DV2;a!*6Xqhe~ml-wzhCTw(k7U4>3MD z?Y+7^s8PRj`d8i&UKl1Mo zx#9#2$v@#0!NSw5}$haoEDAzHRloF%An-wYZb#koSwn93onS^w*n$AtV z_Bw4;7^#@K-RYK%?If*9keIb_5mGIc&2SO3{&6n1ZY>eEEv(eTShAQ}VaD6LqhK4t z*tL*?5$f&1^``yjNLhMA?s;R4vD|U)8#2aBFhBg{`#j(Gectz_fH)%P#8AOT`F*!e zSH83CS5^n(rS&$eL=*Ld4GtA8QQJGVa=;X{uYwM?e!nm@^!CO}-AJ;{n_sYEl^E#Y zG^if{>eretIOLp~L4?*9F5S(I`wH82qC%?o;o2&>@QQz@-$Ne+63|y!CI8&7$)KI8 zLQ#aGWP)m3Bo#;K#*GsLqXTg@Ln-kc?^fRVVX<#DEj156dGcg+u`ku)jR`z2Hebl9 zrtrwczP>p@m6c5Isn1WYkHw?jgGOFRf?*egq(_h>o{vN%55I7Aj2EJ!=oLdfvB`^) zI1(F~9A=qWQS1qsNQPo`dV{<6xR+(@Ru;#Of-w?BFj1@>n8P`Pk>L!r77GT6Dur@@ zv*6*mcE~}w!chGY?6AR5TZ?fz=CH*A=;BPf4i`NwEuf%ja7bMZbw{kJL@Ke69s)1F z_fF+vVMgluT-9x(TKeNPysx1&7n1-at>)zq)+(3#V$GnO6a~>rb12;eVT^WF*l(uS z&`U7C;jl^vtBP4{sn%8%YXr@u16=DT_LPhwR6iL2J~JOj{#1GX;MXI$j!)*sy)DD@ zQ*96{d$Num2T9Tp$}yWwufJdK*3G!j{G`0hNXC7Htv&Urnky8MI&B7!*g|MVT6zSx zccm8Z?%iYFI=ohTDZYz|-vXav9t@Jd$2Gj<&t(WoF?AyqEhj`!(8dCys1kMGQ4a3i z+4{vU@RL;&e}A6W)L=dbpIIKHl#=tRCKGISAPjBv#0j_&cd8VncI(F{*sfD4t>7hGoYjb5AY+K|gPgm$A-bz*gVYjBMMiv!J@J+sC8r^4}!+VRcTedjt- zBPx}>!Bf_cu8-%n3;h78M$vW}u~H~_tG_?vcGs7mDK9_ESqOcdKzbE$@Fr)582u$) zuu71R5;Se;3Z(v)UfaEu%r|SmswVz2HMh9@Z2Fns4|qQRYYqs#lv7nTTaZzL=s+QLj2=o1Upi@INt+Kz(1bmJTKnu@g zG8uyQ?5JEEj9eQ}3Vho&flprh@OraLB^_k9>i1F^Q`pqwa!LGTvN!jSQ^)on(&^R< zmJc+>Rua4?3JFq(A9*|$3Ge|{6zx87)+KsL(o1@2CueYT zcE%_ASg)PgkGIplDB_c~bDRY?SZKzG8I2S;bsuivEXK1KZn0Tt+|q#47VcoJo&#}p zV(^&5kPJZJ_t^o+z*q`R{O}|iq76q*MiXe-=iqJ7u;1LivoUM61kaky8`ag@j{jAg ztgiR3)~qh^vM3~7F2w<21oA19koO@#JrPOpNFFJbNc^;FTm1q5y)&!G>?XA(GX*9d zt8umE&!OgYtP?yRp1g%)`BL|YTQC%H5hNx!xFqF-b9`!)9S4|F*Vv~TVBdf{|he92<*$gu?&KdVX zeZ9WP?JO&+u-Q;V$J*O%R?-KlUxsS=>gt%lS|JBTDhgloKx)_9yQ{VoeQh0nGym`` zul2tezsY|wG(Gm&^s}jpPyhI6{<{C^_35X>{!fuXr|g}D_+HpY7HxWbS!nn&7R&Rm zG>uQCWFn~bLYSA^@_AW?gjsb^3hFJg>D~L!0iW@i8T4E0j=d`phbH#$WcJDn9=Rb1 zQt&<%C2Pu{ad^T+fA3%LffetYwilJn5 z76Hn#K9cphSd#P|rF=By!x;`u1woC$0Z=yT1qlUsA+GCCA5*QKu?Kw&hZ$)*1C(OA zAj5%sw>TNY*%@OQR-rRAVHj5^&$HufR0v#6ply~to>r^1?qp%rTeL0hLOOkWV!iTu z9naUdwn%7A^&8l3vzg7SE6G?}Qjq!KOZ+TJk&sU{L!1goQTx@`sHgw{AOJ~3K~zFl zDjKxfyYcQ+dd*yv;4M`Eo5U9$P*uY38&xg;i|L8))Ofgaz&A88e|P@8N~ol)X4n=! zraAICQxI%CMIEOb<6xoPAf9@RHei^({GBu96+j~s^~EVrP^vSrHnBM6A*?jypfrX{ z`LXep+g0Xb#bWh3mBk~(cR(=zVmJ~RoJ-I5&Hrxd;gx?ry8Ge1-dj(4KO4FF^7)lm zn+px;0H24@tbDlcXa2+2`Np(ypK&~uIvQ;Pr&Bpzpr{}bU%=-OAHltvEPpOLsB#Px zUyPTiCNY@WCj(a|&8!J9Tf?g}d*lUG=im&KCR)TGa_~%iwuo#O6;+~jtz#jQkqnd2 zHdWk6-x%$M#hxF5U@EN`jE()ueD3>vzrW|X-zkOd&Fy@?Hz6^0ZIP7E+2w@%-9R3U zfl8A=gV(4lq*c|`*P|XOJT3a!dxxe{uMChRiRk zv3DvDrQ>;kSJG<|L*tEfMz0}uk|;$H6xdy$I-`-GT;#sJ_>U`Z*S);hTt$7usM&1x zBXepLCsE&B^uz_T)otwXX0v8+m_8``Jb~OL&jX2|ffT`=jj$YDS<2d4Xx7Jq3q>RL z4z|NhaylI#iRw33bJ9c;d@Tw|Ag8r89FCvm8C}`cPN?xfDs~!!_yk#AhuZf_(${1r zo2))oFyCaDRtB{_A-2VCrTXV%A;l{;3qd`|Y2mr6io!hgL-k+1xDpUW|B|s6;-Nq3 zy*@&iq+Z!VSYzv<{%|}TW*v6MmmtFr(gp<@h}dmvm8h4CAtEd0vH^c09%tiDr&UbG z)~D7_7#0_QS`f|D1yt`pez_{E4qYx$OgtNu<-t+s2_)x!Kfrm#NM$&wLvv$cp)V)r z600&{vngvaDDIu*RZ!e<96mf1l~FMly5GNYiT<5)_Mhst#b~fMKj|5e^v33a@7!on zGM;wbG8F%KcTIRgp)On@J^~6lz#8+$v_i4i_NnLY!nb{oVr&2YrfX?qn2O(fvGn4$ z>)y3?2=S1pQAR>vXrUl)A!GvFv7-Zxl_bDJN~m{_)|nDA;7K9R{F`f|=rrl->NmhM z9}1=%5uBW0XN>}gdur-%@%rNW`fiVE9&>V z%3NT?zcARoxfGbb_uJ5~2iv>ryHS2WhAtyk`nXD~l6sph77e9UIqZNXXr{^z(kUl; za*}GN!b8>05a-Shjv@s$RhzW+9Q%ljCOq&ZAKBD{O)|?({g5VWUwLouH*>tODO5uo`fn49zkC zBF7!0d5#y{m1tN5R25!9KT4c}6i@0Hr4EhYnQ&8?2`3m%$JcTg0YRw-J6p@~HYNUg zIR>uNRQ{#uG-fLAgruYcNhPg!NFUiG2xz}ilBsOncI6aBlJpb!EEC5*cZ}By^&o{jUHjk8bg-Ch5~ z`FaE>3Q47KfBbR8(%5WSrF8mCwIg8xJTn=n=#o9(GnA7-)TzfADnhjnbLTcT20=q_ zS~3>|`cNz^PBw#Yg@c(QEGz}E?zH?>QZwX0ur8>TG8E5 zQQzKj^e7x06&0hSHoimj`_1t{(u!)n(QCe8^qbvAHy;6UWEh&^+19cKx}}xkZLAU} zc$AVzhH%3(oyIK)fG`bt2k+E!CeToVhop}Im$evH#^JRy8lJ;U7^cKJPlDGkuY!zJ z12tY*tEA(QG%s@G|9=(0SSYKC0+U)0r(WW! zUh#{;DMPV36ify^aj$>V6O2K*{;RA1>9fJrE`!VI0ty3T5H$RPU2vw7=vYb4haXJ6 zg~*zl3;OdS)PJ`F&1`^*+KaUId@~3N+663?4wZawY@p=8G}d0jOTGN@M<3nplkbGk zTPD^*(;oSH@PCwD4{Vb88FtGw%O%>Li5O>YX%h<9LdU_R_Z@XmQQC`NJ=3y^O{7|= z0*C!WGhyi{wpl;LP|>BHhouKMTW6S^v|U@E(g0Ozm;OnGil!t$GB~!b-%Sy3t^c-5yDJ5+ji_vB?wjzq7PPJ>%Y*@%Ai0zX|bW{QgI&ba_K& zJ;YFBY6$9NHerhC({Qyzzf_E(Uwik^)|CR>!1p;kg}VN zwnNmwpbQl2vFt8#7MJWRBL<0u(9TIYc9NhPDHyWJBLW4;v ze7;ddM{J~c>wkmiYv7u*L9XU*P^=qpY>!#SoSnLn;wPTLMC_tFaMEft!4d{y(Xp#n z9-h0^k;vKlB4M@FTb*H>Oc6l&h0%oxpmWa?Sj|qS*Vx2F2}iUcuJ!BHp!l~yRILv2 z)cA*>Sntlh_1fx|e?4+@UVJ`3*K=*xOJ%!l+;sBBl>?93oY8E;Toz9^8m7r~oSQc+ zr51kgeOieWtulqDlvGq1?=X3Tv|IyZ$9ExIz{mhrE22F~;aO>8HXJq!R?K!ys(numPdHKrHgUFc!lP8Vs zTy7cnbaoC+9z^WZc$3DUA=#N~6p=CDzPN;hZ}V%&_}tQwn-^nk%flFn4Y_J3=aZ@a zp|7o<2CiS*B*2=KNDG0nV+h!Luxjw40P!qBLm{Ss5z-`UdwNo-`AaQ3cV>rA0fnM3 z?P}*=iCK>fhOI_^i(ApnZuTi;1Yv7+O9->OO(K&KP3{UM;bIEeM1H9-PbFqn5x7>V z)QW^)i6Fu&aUsD$?ek~=2@SAG1)>Da31UDDW?r;=H|S0Z3&VwU@a|$!5oIrRMoytp zkeSMsHD%{9bGubwlYoK`qgjnO%GmoB1`@NyKiDvoBKM74`X98ww-QscQIFRdi*-hd zw?#&*#wm|I>Kt?s%@(gN8ok}mzv#)?HR4&am_UoWE8X?J`q5a&MBwhk;)TFs#N#!F zdoz9}6XoyV33?A!s$?@P01p+w_4NxNTK|VCm*T%4Om)@ndUY1;i<~tW_Duzs z`s2&8eq~oY4Fn3@ls6=$&J?~Y2KEH|UN-tmU)i-&$13iQ{4{u6QKhi;ewCT7)UU6r zN#IUDrmjRmzPehQfxw?ZDIJbNA)@X7^oMVw*0o@tXM2wQ>B`F5bYo-u*q`k77m?3` z=Wlx+{Omy@c>aq6cg_b-KASU~)$Ie5#6Oe6j zo77aM;teWrR%G(>CA8I6D#xOe`1Eg)DKA zP)tPt-78ROMG)-i6p>gyvsVhAk|NW=|!qAPy0-o7fkM0IPP-Bmbk6UH+=Qh~+-m=gH`>{G z&tz)t4Lh9y#lpmefr*6)PuS@3TEob^hO{B95V_E5f4l)>hDx%fnT#zZJRAo)-GWO7 ze2QCJ-we@D?kC%SQS(o@zKA|E%(b*!?2VIDq47#Xdm18rBU#rogsMg4a=EmG%2;kW zar0_RUD@@ifYTW+Z!=AJoDMzMqjilxGo1!JA}YnQ`JYg>^kWO7X#XL_hojGD-?kSg zoQ*`_yY`%%r-L>_aynf*6%CI6_5q;uv#9rWr}yO9%gJ-PFIEPp@7zHEMfJ)%K0;Z| z(o!DCV!fd;^s*EcV>7GF{TXuk=uy%zoj!d^C6^Ds^M|{-dsD&t_aA?rKHOc;G_iLn9zlx6DHYz90X=pS02`tP98i^f#!q0112et%k7Fwn)!XsC9BcW z8a}dM;>!@&WDYmp=4-VyS$e}}v$@hCX_H8rWJH_7#qZ8jwhWa^lr#)ix^N{&C}I;} z_Cdq`oSgou2b=32# zRWnL>T33h}!UQ~2->!y86%zTI)nJ<#Y7s}P2rNGNR&w^6RPBZ!|Ev4S9>kz8KM*{d zobJEccj-9U(B*Qj_Ec1+4Te9a+qr_0!lQEPa6zdQ6zGtu_Jf9al_UHNOpyevt8V&v>dA=qw`Rw=JU<&vX;d2LaN^mSQnfkQ85))to0mKWTj3nc6 z=JL@(04R7m0CB-J-$KWQ;-`PR|M+pT>to#wQ!Ta$L~|(AP#wx9<>7?wN2NL041K74 zj?cz~OlAQ{(B#3%IyC#{-8@uMUsp$%9Vf!U$l_SQwi>pYBzz_AB1E`D(yVL(YPGGl z+H4MoxyfNNO9-Nh-R5pn9%GwL#Eu~q)1@SEF%t+^sS*ma*(MrYFf0@@0itpctM3hG z=@dbc_nuV9`Uhv%ACqQ&#`}*ZZr9OlwQlS#wTXZT%)m~Ia3|-xV4K6b$UCGaMFpw6 zZ~Q^E8LXV_a7v2<*B}>niy8fdv*&FwK zwc2_=vSdIAyuSbN_49r{&*%9*&+~%jvjCVZV9*=@G5q0BK7^nU&0-KH0GUcryZo_t z-)A~>#|uj=XqxDfhnDj1jO*Kl=Pp$LAEa(i_&pH1L=eU}I0Fxg#^T}jg~w&C(xBao zw&6P(sl9t%dz{<$Cj`)sezTLjoq9BKyV|&X>-J!2q__IEMzabo z{`QSJOaknQM$KhQFfKP^uUaY4 zl=+OTtgQD1M|F+k5uIMGRt_vpC4cDYeAsR(cUn*}Xh+l#>HP`Kqwq|95&q=doqHcA zVRe?*(4^xngu)v`TvR@GlBDFZG?&je$n;8iTUrYNi0@<`7fe#wo%+f2;V$)^)Bzd* z^#01s#JpZ-Q;N@rYC=f*l4}N}m!Ra*dW%?N3Al2E7#!x7p3i+2u!{Eqy{t3G-O1Y_+ zULRCOJ33xcfCQmIX;VfigOH;Fbh7iPEG z&zfs~y7P4-G!^#nJn@a{@UUfaV6;ALG=?XZ%wsGc!w34ApPlarbn7gaKl_tw?0L&VVbXf z?9h)dwY9CaJumCBD92Q)Qe21Ru=!-^-d0QNGR=^3l4~Xdf7jh+)%_v`+zE~ zq=_J`T#j(ppw3aVCmlnRhmCM>J8dv;>qyo z)2q>Fq|?8J(0Kgz0!T%aLPa(*wGq~s$BXahdEnCE?GITZ4)`a=K)q!U*ma;Hj#h~@ zSb@waNEQkZ*#OXDhm={sLTsAJ5lD|Zbh@D*(7t2W>kTUX^!|tDe#PtTm?uu#o_p;H z4-8)_?VU(@9^EsG@NEhcXXA|SZbt2D8Q~s%G2H>M+Vqxht_Ss0$M$v(8bo#+<7~ST zc9Xmb^_#P^-p(pUU3FQg?p~_@`$SJ13Da_F`SLv^@bHVGlpvCfZ+PJG^~%4lgpHS5 zTAUY~n~QR@x6?hywIojFePk~UbQ_dT{rFIITl&fLTGu5@Z=m75vELnL^dD<{_|^GG z=j&^1i}x32SIo`X{nvl{=Y0+A?gYkv90PzR0v}+9afvX-O}BE-4(yTn#BCy!B23921B)m&90@3`h}^_*e~MJjaLl(ShMU zmJdUvpcp13F&s|HX;9&SPzej2Oh_?3DaBDqtjzeF8l>S=y_dxVp5dd%5MYTpLrG3e^fx9%}#LgI1a~_ zdq71+QU_Bi5DA8U^c|?&z3GL2@+<(fwkOqdUBTN)k-a+6b7Db?Mbn48&;pa||AR${uPgOc-umWMt5l;7`9{^iNs)P>Jx&z+kd%&l!kP5G5# zaMp}`aU$V6ad)YWFXGFkx`%xW&L>YEl-A_>5)D3aWo_i<&Ec-H!RKqw2fsWwJG-)W zvHy-=F;%}W;p?u|1mtO9A~D9-mDZH&!NR4}iCB88FJ7yyBhF}RYJowo9Y&>n`Qlu0 zQBkf;X3+P2wX`(Zejy(3b$idW%nSHdQx!pIesA}#_RZh2t-2rz)i}{xofqtqN#itm zTUTw8>x)84LyvDy!%OvGZ(fY)pL~4v)Cpt*r)YL>B=A zM%r|w(bq~xNgP9xJ+w918CZbU=1@-^I;JeE+Y8vdq2Y#?HCiqWw7 zOit#n8l_E{2I+X}`%((~Q~q9u^x4-RnsZAWAiG0~Z%iQeC`O)q`Sg!xpFVX~7NMec zbQ=W*b9K7iHn3WbOcBZ=@!#H!Y+%letpBog z^)YFrX}teva+~xd+iQE*OSYH@)($;0oZ1oYpiV=(xp8fT*r^x4KrAz4$OeJn)z-hfR*SoKDACxEC-x;f+84vgaSyfW%)&e)Imd z%i~?J*baC?F07{qDgH=*({SYW7Mx`OazKs00<5~m6lw|By8yZV+`~IEVon;SKJHyk zrpGo`)*r8LY^D0&>oY7HF6>BH`fI7KXA>(aHDJvSH%X%MOlK;S$qJ$%%WYA9lxyj) z+Iak_u4XSlD13uLIhWf6%pW-soHaXjCnuarqskR1#A$z(wwzEoVw>*GiOpw@Coz{j z<#0>f?tt66TAvEw^s!BsT}9AZ>lH~ez^B$v+6fG!g%njz>v3Erqp01OAOCfuLWE-q z+=45LJb*s=o>q*Zi82N50sMKe5!@;jQ3Ro4WHe=`YfjMo6F5-$V#QC33x$A2kAL*E@9TLs`0(WcNZJ=#oS1M9SV#Ke@uBp+o{yfhkM1uI z^?ccL(Xo(%eev*Xcdq2F{lgZ?IW3t&<$9^sI-LX#*=9srO3!H&jMy{mycZkWAgvLvuf2ZJ&Jb+tb#?~sQx9Vx#^fKpvm%`^AE zYC8f4rM1IzPVOY5*OqlmB-0P?`iy~Iqe{Y5DvS*TrK)eH0t3%tv1iV)*i(nY0U6Zp zaM@d|oz^C<_?XT}l`{fu^Qy$|)>2K^V=fZMP@D?L-=}H-03ZNKL_t&#idG&`iQ5#o znC1ZnwNSW6EC7sJS`6#%iWC$Oa%f9=3Z6m)^&W&Qqfy8h84%`$LOu>WXt8i_u|`8^ zZbZEEX7(+!x1g@i8-Kx@xev~(%PTH@jn{qOrqq}GxsOdQ&zcPbz+Xx`!X2>It0E8X zHH$>V;&fm7K|BumWGGh6z)D4}CS;1U&%x(VI_b3PteeakA{%Qv>vDCN`hv3#*H(%g z4r#!m^%E$9C9s6Y;{lFEp()NkEZmcym!Dr~gY7~2eA(UaJ!Lp#@X7W7y4~|P0MK&g z{P}3Z*LM$GZ|gsC$1O!}8d`BWfbv2}}_BRk^yjCpbBBN1P zM5#ifYEqt+`_ZUC;4i1@Hy(clx0&D^X*eUayvnVykv`{a{O+Pjd)#%LG}1M-Mv%(u zQ|@zBj?LIJofE*+;gD3v+CGDM7iki$Ukh`(efL&5mtFo&YIxyhYP!^O%jR#si zQlhMw5W`Y=g@r^ZDNpRdQ9oVUst}Ym@=%cYK{x^uuPFA=0F!~87C}CkNwUfV?OkhZj?~AwRlcWt;V?F}bj?H&tG?`|9_(ED( z5I@;_V^20(?kwG$xp@1+=b!)PaknAZ)3&m@($$`uECNz>?kv8Y<_mpJ$7;rMTImZI zwO4)J!;8-PTHlCn^2m*~xw++J9m|5n&iU;|SATzh`+wj=*d3D@Ny@j7o|@}!5+~4b zHXxS^2!#LzVN)KKt5HmVszOTT!uqeCzWd9)2jCzoIDfHgz@|}3VrynYM8i-u?KqU zD=fv~dzkRbqlJaIW;inCo%hCf_T7OB4BQfDJArPe(QS8MVfam;a?;ZSWJk#4j=8XC zHmrtKH8KJLE>tAa6PgGxWZuzR0CyLyE7fvTuJcVB49imwc7oqgcW=<-X4FwBlXV!D zLgQPP?nDfiJ2MGO!crZUTc>oLy^_iP+QQ-nQ?GSg&{)(cLW+tSU$mE3@NJ6^zl+ac zNL0b+i^hJpzVNR<-LHZ*mpGDN_D;OlRwx|a_4>%m;7!8+@V=R*a&E2!t-VIA4u>(3 zP>;w4a$}Y=u+1-=l1d4hbYEVdnRBNSiPqKsyP35y9-7zICO1Q>%hV9I2Eyox6VTX4o4ou$q7!23^d)tZ#&I8LF4#6)*}B5-D1`6|!4f>PAQZISQh@1MsKS;b4-b z=~Ee#Tv0xmEmS}yC|1CRnBo%jKBQ0x zBky9Ug2aF>g~U+yBnbTwYDEYMv%juN)szJR--}95&nC7I<0E1l~>S;dwd;Vl-Os&zB_kX;=DLYZj{A`R^=pHG|4$O z#UY!9ik1Xqq*#UvrjF(o?;%v|3Q|9;w5IUwK%brvis8?`eD>^fpg~E&q)@EBG?MBY zG@HfhV9F~cN~P`x;(Z2XI8;^+J~3_Cia1y3c4~JUlJTA7TykeK9&Z$|kr(apwzBfv zobxrD$Ygb=sgm>V)#Z`HfW)SK=G>fgAV^pAyIWd^-M>y{?#We16 zCa=DA_KlRu-pf|CBD)A6=HJN_hpv7jmIm_sZs6ZMi@I z$+N1FirY)~#_*Hx9b=v3FewZ6M$9w_(}dOMH-gvah6x;NB5*=g<#3q`(T0$_#brhETIO7}xU1_?JuXG7^)eP)Ka16_I&FFpuN;PP^M{LzH8l`RK~B}w zOh)=hREBXxSwL%^x-bQzItP=A1o}J~#59R0xI9dVAtt1dg)vS(uMp#k&4?5#6f2IQ zg=&O0dURp_dT^RbQ3imIXL6zceWm^2;uVp4Cfsgkquvy&n>QL&wL2^2t+?N#G2+am zVf7`%JFf{pew^5I^U;r*Yhho^L<5bH*fvCUo{(E*Zk4V`)tcevRm+X}X`Sv`LvkaI zHlZa$&B=m@EW#08IRE3f-~L$>U49YmGF{AW8^F$>suviRhCkR8zl*1)h(Ap zi|1<=IcQrw04O^v!Y2EmsSDUCD36m3Y>`5NF=f#zz@``v7$*hr@F;wS3-F0aB@#@U z2dU|+TrQ+w6%w6DEEx29s6f<1fybbv4paO$Ao#y__|X zUPnwMEwgJYU3SNkZ|Rt&i6hl9A#9L8EqMvNXA~(*OV}s5Dt$+-oP}k@Gongg2EXkt|0` zeih)ShBZZlJu_pQIe;W!=9C((Q%ln{&L#YHGlW>&HS^&9#QXrDL*MP%sq|m?hrhg$ zG5~N5lw?(Y{@&p?%(i$Ds~)lU>k{BGbly6&e{&S|OG?R481+X_E)gL28{UVwS*XfwF8Q(ABO*uoP`l5u3~q$!j=dpCn^SF5$8YL?=XJOpz zDH2$C5hksW6ry65SSE(_5<-rvxn@ZoHv>q3Jo*o`CfE~;d6eBcet;jC1}LRc`DyofGzgY!Y8^2E=*OvNS;2DhLL2g1SiurBBqXQ}=EsMqWA>vZzQ_LsmURjDg0FX;mO zOixgQn}s+`cjA}2-+7(a&Ai5JUSqaNve@+KLbGG7d4WlKM=q@J*eH=qwr^^avsJyG zOzK{dJz4O6@Cm{GNu}#P|C9TH#Ehw;C+Zou4Q-;R8Q#@uceUkG3A`f2OwKb%Uk3M& zy}!NXNy((Bhc>g68OPdxv#VP-4qPI&lwZ%3Ihjez3`CFI2TExBN<+N&-;AAMOd4q# z$J5-W`*2q;U1E;Q<>&{y_Ey`~J@;zqvO+4oNRO?uO1QHkS_BdlYs8c zOFo=16xE>i6XGW)te&pfINvva_;I6}_g*1a^W~)P!z>bHvJ^!}m8YXh{{YPLdUh4x8iO`|00Nrvpi&R2Zcyt_p!&m?!+I&a<(!R2JxaS(549-jv4jqN z(X6mb3=;;Ayjfl#=xMl+KWQj0d?0{OLRVA({XHaeLMWMWNO+~P?sBsra_%EY zTcVZ-1Zqi=j8+auHMDMYB@xk8erfaSmAPlPPKwLq1#h?fYbU>$0D#tY$T~VoD}Vck zU9YDdW}yZ(LRR2u#PaxQd*><)nGB6 ze43nhxd*2Z7O-hL8xM|61+i#2nA%*}C^`6|qq6&a|CidLQ_*T&(z$OZ$lhloQ!af= zR5yL8cg44LKjE7;ZSU&j8qo-$EEN9cIRH>!URsU)g#X4eh=ni&SsNbTl!X4&gBoI_0};Q6ZyTDS%R%g4L$W{HpjfVTkt$r(+s)cDk}NOUfK{Y;}+ zEZ~_%W{KTiuWvZgHP3X3x(qLq6plRsy* zLv^?QebU;Xs_#w)^7sH!-w#J^rK}WUvg>u}>@#)QwgeR#$K8vmRgCD~e^_m7XxM$a?X#_o3AbZ%)~Ejl^Gg!ghO)AmY$B!E`+HrN8i zZ@5q&8^b)uKY6rs3$d6koWHrlN7=x$dv{l#l$~;>WP6VP0MK0sMW;fj$TPE6WS1G% zzGk;B-milK1KdyFy);+Pb!~uvmO|$vixOp!Ewce z5|4gR0Q3Bz=7MIS#9nWNbi&N7*xo!K6bcm*!Hix|X^QmH>l(@|Gc*5Rx2qboP*j*PDOSnB$JZXLE(%)77AHxqyi00Mq|((N4{jW~GBoQQHLom0~$5|=dNkJ`)hua&cMuR1a z_xJCHk(*m=x7}e`C^SAV{&DS@8$nwvk}CS-d0rd~%l7X+^(a0vf<3N+fcAX5V;aN! z+kpOg8_&d+FUx`zTGb@=td*>}Dwb#e3U8k8pU3zF<9=9mQpNEJ4U!ScnN8mo^bh@W zgF(zOv*7JlW;S(gq6G#|h0S%-)6~UmYJF2XkV}n?;eL;&)h|7I)Zd_(>?xC2t#*kO zW_Gl3V8m}Wmm6l(9(9wZ90p4@Z?bt(jK{k4ki;*VdHKySp(`dQ<%T^_f(q<{9y@eP z0&{nTTqc)SNQ?rsx6z>2al3m%Sm<~1a)Z2EqCu{SXoiKc4;0r1Y%qc@Mga6ldEX)U zLn2>Z;lDb+dJ`Idb}Gui!AY5w_tq7sQb`WflaV~S)MtRI!txPk=8 zrmxP5i~t<$G1jAr_pOPi%oPf3HZRxtUEB zsXgXJpO4Lwpw448p`w1T4Gzz@y z%ve*0Q`b$kTiZHS8oRRxix9mk^UlZf`#sO|{@$4vMmI+R zF7k$J<+1(vuw=xG4SeIJC=RvqdXw)9_;mXj-{G5|nh?dt+HKq3%)WhJM|dd3Iv<(9r|8mX^x?^tvlU8lGj)KUb~G0uIzs>3^J6 zsnts!>2l`tasx7&UG?|Z1{Mw+-IrTxj`v6+^Y_5d(Gyq*nk2RQx=A(l#DPqaPjKki zb@oy1;x(6cWt1;=j)rjehi;FjOO?{ri+8m&+Lr!JHpn!RL3@f!X(f|wp3cdXL4Pp+ zA^*_RxOgmOdRwegm%ZOKQvmr~Ry!jT^}kgs%ZDT-w%Llu3-$QE9r=|F2P-S9@#)~V zSMjh48Nzkpp=QNfEs9b-9yK8W9m4|Xj-bQXn)XsC`#b(3!Z55_%kWh%rPAl=^?4rk z6ODNGJduW460lA(cy3T#6R+v0cb+`d-O^dp*5)h{0?ZRO#7hvGb9jg=kgMb6%ZDqv zG`n}oEh>e&Vv2zIdmuwolpo4jCU9;#!gm0Y!5{`M4fUM9AcZ!fQhMnQfe!_f7yXns zl1F(_ep0Cyjq(dWVG+L<@_gDIjz$?Qi3IS{;teM-C_qkqJ!?DIdzL`xG>T$a5-)AW zU<(IgzyHbdUFIpCuQ|51YL*fQmcPoLCwqg~R@SvROPqEpwNfIe?ePj&K%qD>>i& z9UF^l#ifd}+-n>7HE8%5_4)IHm;ESD@YzwMIb1US@jRqs+F^>0{2Xp{9XJYIlI_^9 z;Li_N0V&bSSoH2ulsVzqs7snYh1-V2rOMc>iwB#qD$xmT1>N-y?JjIQt%dAmJf{LntNckf=kh9s*CWm0Kl>aSPJG{l9} zFe&kPI@=#7ZW=3Rx}`UXw{MKkjCChJ-!*QQL2!!2V+G~5LYb^k)TXc0Zx_qN=7wG4 zwbg+=dXetqLcKy47(mfj&=kf8bYa944p+8Rmv%Z#odjY)LNMOgA^;+o-%E2m@Ty)= z`pch}iT=f3z(r>$WF%>n0+7N&K*=v1RkKrvn~*Spgfq@Gh_IQkMc$zfVm{9mRY`k8 z$(H!jVE50E%aFhDk`F>7+7!FYUj_rf<7GJEi<|_|j?V<;!>K zo0;4Mp*&!Pnj~gLy#nCHC=9017&RATtlkJN@9&p4N9s_@uPAOdG5$H%sjnBh51QJZ zwjmfVmW6C*vlXNb+g{uL!%u^`J^a(Zt+^>h!v|((C4X?8Pr17)a5R1WFp0W8J96Yp zt=32cQ6CANWx_t-z#U01;H+sn6QgOX6|>gmJ=+(^6F%Tvi#;KZ;=Vt5k_KyLIS4sr zje@UaykqDnF0MU@G?e8W(s*GGpJG0P{%>PL4$B_{8u2^xkg0r7H2i|kBH)W#H(>@n z_BuF+l>BU!;9>MICM6YhjOgV2EFpzl(bBP7qs$7Q>C?a6JbHQ$Ur6Frg-C4caRDrJ zjqAfbgi1FURzKD{!X7e_uqQ@Cz5#o}sa+f$eUxygj87mhYEmAx%sgnU7nvJdz8kLI zQEBTQtZtM_C4>EzYW)IUHhe*BlfG z!d9)qv+6|>U4zj72iF_{T}xbv8*r2Ze6WLm8Qq??j?Eocsmu^Fki;-QOwtHcn;nxu zCyv|ISMEsco=Htt*Tc;-2!j(a*y9SQJ!U0X3T`jnk@j*S zNpC5=4$Q3+7Gapz3nRi`l*i6d-b^5qVVDt=@dAp;AYny-R_UU&UVegsXLjvBcNZEP z80b&&jJ1fx=0F^8#$9O1wSjr}(Ad`yVPC=dYG#cxA>eSNo! zUK*JLAK?R|iLS}XeF4?Rje4X#H%mnyNLPQs6?TNrhsIc>TNp^hnQ8R`v~k z^WAr6mzG7xtf@`=zT+!NLcrg)IePJjZxq^*ep%^;4aJ*xmdnlVzWb{RRglFN{{C)5 zK>@$!4WB`~U3q|;aDlXsBvYhXrE-(}tBNkK*b_=0p-m8pm6gTBCPbWX!$z}h;h(Nxy+|!38r%2Sx@Aks_FIRR?lj88g*TeUX9}Cf`GL^)p-mT=%{H@0 zr14nH>VEl7y{JiCwh2PE+7$e1y|_}EM5lw(XMzkqWa`yH?|Zi4Y=29sWS|#k5U5H` zTv03y=Aesbi~Ou`Ej*upt->n(c06?z}q~{{mIRT*P7z(}mRwy1K01K<4u^62hiFpAX#TYsh_+ayE^1yTkf)>L< zpWWp%sKe-KiyNZ2e1A*;*7@YQDya_UjT3V`3QjgQ@CfB=@$J|rUkr9VI=-ywTHX@( ztQ_{JM(QFFz;Pi8%Vdl=yoc^7z8NeQgU$diBGmTL7|SCOa6UZE3ixAx@Z=K5f>(z= zpy&@>|1VqDAJfKphSNV9XU{W;r?Ml1X*NMBNu0mputR- zdDsYUXf5R2fbnp!!fI?&%4iO zP>?!Ct)ay;L%;c=ITt=Z{rMWdo>KDZ?Z&TxN&2(C{ABR{j#eteO*z{>ZGL%;q86=} zN8H`@HG;C1`aOFpAAzs2l>*EzefII8p7P@StjD=I>Co}D=!ZE#%Wd=NY`@d%#Jwbr zdtEawm)qm)@|%)2d?ShDwhR0+rxzFsjU%Fq1o}GfMvi@Zg*zEAU$`c7bj+E}rb!9qTv7o0&+EQHC&`>JvRN5B8 z7&bl3z*^g+ff?2UD}U0W8VI$AskmusVZ}sKKqRS=6v021`H93o0p(MOZU<`fR;F4~ z@`8_vfdfHA%!Dl*1Psch{7_7-R>)F0E_!PAdX6K4sK$a+GPK)jC2=`^(P%=$st;GYziiuFoCQSJFZHLhllt*@`E2St^A8!jAYDL+l4ms7MKciI5S zcyW?1p;;`Okby6X#q4%12$GSPflY_B)Q?ux#X7srLq8iB+M%Z24qeqHrp8|rq>fXM5b@%LxLI1f-6 z_`O*d?(E-~Pfa|s9m~4BUg$!W-`rU%*VNT^ibUq7(z&5`lS$8Cg$`G0;+M)|v++Un z-eIG-PAKJtJrEG+i-o|hOMA+N4(p`w?paTV9O|*x?Kx;x0#wcw?Rl$6So&*o`y0yP zVbk!iZBjNd+8|qChqWr}zl;qH?eIT>izb>iu?sX!E#AP;;2J<^-Xc9Nr#(gL4;b~C z&vCe_I+2E0Ydc zn315*NAc50xZrXjR{)4NigC+CFzO~_h+-vvhh6ajx&jR9=(ypBRZh9h)?NP^1SKk7 z{K<=ZckkYdY&P|lz5i1H?Nc2c`BCwJLeGw$xq9K>o15$F3B0KuPiC{EKk8-}D>!ce zXu5C`he!w?w2)8?;M34+hwKI||E!F7oDDH6ERFFVkH~R9X2X0=dEY;O`+>-!iHIz6 zhh8A&OXh+iK>(<({y6|GB#z%4UFxdsI1MruF4y|F>+CCj+wqY8^v|0KVn=hqVmIl* z(kMMyR#j!UIc;!#B+?^-Uu_8t7!2^ld++cfKYOV0l)n6N(LgF4`dcuX_JrN6E9rJ- zt)$nB+aTbFfJuVznUDPL@tH?}K=FYwIV8U|pwmPggJT+<8dpjkSO4QyNu_4TN0Z_n zp{%|3(#;OD(E;3Ez$^_2#6<`4bH;yHTwMNosnlUS?{z2xuS+F!W`S9GK={hrX68*{ zXM2&#Xj_`4(P`K^YHFAm4WESVO!t6Yr-GN_qf688n=o{AYJpApKw3v(7?JPiYTmxG z`xunl7__;haLq@JFl9Tdk=PNnM4}dH8)VD^8chKPQzt4b;S@()pA8a#1PQ-7aj@bb z6XZPMRHkd(sIsQ%)WtALyu@L$9)k!;h>)$B?MDak!5AV@ClW}NqAI>G-wwnTTsait zP=g^+p-_aPSUNATWGFqwvNW2eF&}^>w_FJHetCHrL)~Pb1@;9~H$aSVneYETF7B9U z7Z?G;U~Ro3mOzlbyLRo`y}LKY58eDNmYxh5#X#WmZLFMZ#*4mBNHmq;|Y7w4PI zHar_{7lXUqytD)Mv_KH)@`AHP(F+7#t7V}7;y4*v0e)?TFFUoCY%j?@UGiw=8F3x} zR1-rqP2{awhhDrjgGQR4mGs>Y>dw4;aF_cRJYBbBiv`b?`o8+)liOF<0j&}_|Bl?x z7X2v8c0>OEy-~drA=9Z$Xdq%!8D85HFeKeF19XPJh1afrEGy#MLGx9n!0KgcJ~fY; zfFyu}$0tsXxh~H1XOqsc>`Z@ua$~07g=b|NjX{ogYHIr;w_HYzZdE0C*!YK%5Fp_> zXZuiRPp5I{tPzrqsR9Ck{6Ik5Y36&g02syKBrq50^i3*p%Uc26U9(txK-%*5oS;KF z6rMDiJeiR@BMZ!V)})#k9c4ps10c~is|*0KE2c?!G)|ucD8LDt(I1=4a@&#K#y&Q`!ESibSlbNpe@x6EZQXvBMLWGns4su451!#xgl@0E{fV zMIwm=(PGr>2f;%0W`{*XevrC!iXf=ef|b-9iCGfBR>k;d1#HFumL;2;5@dhXt4th( zO~&m)KfI)Z74`eoWeEfZl$peB3}oQCkeSMqiM2#IEtcTmhKx+AB4S~Su{%xyd6!j%HvOwIDSu*L)<}*)Vj=X?4NdrV0>@h>kt^hABqlFCQCXjg_mNv2JIAG9c z0J#mUJhgs#|IFWcJ}~&sw}3v6(doi%7lW%o(wfUqnd9GF_UHvYMYsKN0Z^Y>cv10&&sC74mHKI{#g6Y z#i~%`#cRt;Ne^;NT8Pkld%W0XnCpAs!TeDxS3A+ZOrR_;uRq#t?x+JRp`_@RE=@+& z+m@>_3)wzjye+-kef53gOM-dfeLm0UdA`3sZ?g>q^;SV?m5k09%osset7pYkvy;mK z#`f7C;JM9xdQy(&ihu-#&nq&$8=7)YMl!&iG}^<~2OmxY5_EDqK?hxKohOleu)PKP z6VoUam;x<0b{dV^+9yV*$@GtZUhm+Er>$B^m(#g+qkGg&4Af6MtwrGXtfnrfRto{* z6!S&ePOyMK+^BBi@vXJMxlQ7x@^b*&uG?eUA?{InCABOc8aFryvm?y(%a7q5vie@R z9CySRrhm?CZg0?=lqL~Iu=6;MF&xJ(FIkof zLreLQ8lcp%aCq76_L(iA^>ly>Ed2Pc3AANka?qy>mIGyu{)6$A~YpVdn zi6&lI(&+>m6QeQ5qIxpB*{$1lHLd}xWa0UEwssjCu}lujVNu!cuI)q+on?^TaUzPN zfMJ$HZiZvH@U!Aqy6T$0#EEeFV6!KC!;%Gh%5EpSM56p^QZbrY&*R zd;LMSo)XG*$jYz0ch7zl8XpZKfs3D9jGUQr)2toTCx-caeQ*wX5^k5L=eEw{+M+Kd z&_rd-r1U9FCJfb@Yg>D)o(7niB)8s1D)Zc^WK{R~?qg-o#6eT%bjxuo@4&RP%QSGU zeJ~*3&>raR#~~ihS>aBsZDJr$ky>@o@8oGeoM`Z+3=sne&z{tbWzNX1gh$vBS$+MX zrep0Yd0@<8j)i~Uzc6nwLV*H8XI+ zx%rGGWC(%P>+$6XmJD0MhWi^GA#a*K<_ND313*REenSt~(B9ID0$E3|y5?*JU*4v| zfUNbcQE>E4oxopwF zNZCk|;Ea-WecP^}N#pYwXO#-ILgBZ%s(I5^=ZoPt{Z>Chxn%TS&*)&g>zy`}cKYl& zKN`!FnC^$2pG-(3-FzPVk+{h}-6XH&`$c?zQ&G{Wjv}W>Qq6v3y&87JMzr=vhIDE< zrlr%%ju_r#A1SE?KhMM*X2*yMXSwjaB{jbgwxsTaE$KTzpm~UTcVU`G>v#5jiB92< zLJNt>^lT`U38H>^pYGS`^%Y8X#f9kx-AgmWcdLIhWG6|I)!Ng=%`?V&qf8T|>6O*u z=5%{!^=%SU#07F@CKMp;Hid#C?VemTTGtxX(HkIWK^vCh3?pY)ESgiO=-u&+1358zMPC;Rk5j_D=#JKRr*-N*oxt|qd-#C z$uM2ntFV<-RY}#IcFB_t`JoeR3jh>VS}m>Ub&5bxAy(makXZmIz2&lDkla9(ZNq_~ z)Q-s0c3!HNqKPjQa!5i-#ZoGqqRvz2h3DT?aWS|dT(7!b*j(KFwSyf`FMqI@o!XpC zWU|@pHZ+l_X?ML|9-q8-mUiABM=k)+`Lu8cZSzowa9a3$JCXj=5}@@dLTq#K)8`K# zp8WKs=<~&`y4F3-#oxUdqogucb)ul6w3Hy-9NVCrT;Q~|Z6#GD<>h*rk$#lAcJddu z4<1IvWDoLDx%kLpdMd)vTT_uK8iKygm1uW)23$HMy+1qC1NcmOuDI@wLUQk_Q`Z?a zmy8OYbyO?%&nmC}9KJEX*d#GYBrTFNb6Ty_-g0323DTSR(Q^f34bn$E3Tf}Os z_;&}+@%>f6oZ`d$B9T+u9edrMu`;!yj*jZmk(A>%Y zAkMtJIGIRwII=xX(DA7i;#aYF=!~)~XGW zV(bJ@sojt{uo2@4kaon1iR=@SV+{U--B{pysc&MX;TE%;%b!d+%e!(mB(@Wq*bX+s z_Ccu5P+QvOh1dqeFxrJlmOQCRSoWfprJ?OPy#84Q68CEaYd%i)|(aGs!ot6+ilsHLc0);++T;BAD_afgFE*l zSoSN~gva;3#VEgXZS4n*Kiq-eiO<#%y%~jYQ%w4oYL+YkpATd08oT=zWpi;4W=*a1 z&0vloTsWkao6(1<5r2C>NbW`s(>B}-_kkbR^dr>9{^%1d`jL9GSX65?>h{T)nb=aw zztmJ3_ZXC*jo|dQG>FD+aO=S-O|D~+bTFKbMiLzV&yqk6(l6``g(7BSXo6$Z8k0x} z{Ge`~3yY*uv3LakjGr3utEn6@X6r)c>~2u(y| z>pv=u=6t0&$Mg9K&u78&qCkT%(|{<*dyAM)NhLr9+Q8~vis;y*qpQ2V(R?tSe!a9< zj_()Vy=Z^?M+!qo~1?WFQkIIm$~nfxcus zkD$zZKN{~Py9}9v#RP=*S{nr8OQfMoO#VKrpA*c@4YbSa2PTQ3djmtm$ECMy5WaK* z`ReeQdHG1Hxox{OUfVX`ReOpba;~%XI&S|`N{U-MrQ);Fp?o$tn8NS{JgZ%ayKdWV zt71EgNfnFda=`^KjwukyO*ZJ;z-7$~4+8{i*aSxh zxIKtQ{U(jlqlhvy?=rFrw4b6WgUP-r67`#k7bFs)f(5h3h4Deo8ABB) z=Y7dA#j7GkXgVB=#U=nWA`dFhDS{fRgc5#bb_D#V z-E2;D|Bs>|WP)HWw=oQdYrZ(Q^3{`19e`SErt zN?loZXv4Qd7!`={4F7kxBFIUHUpprf3Xfj7d`zf}{!4h~-S1QpiC;gv^Ygy9D=OaO zhs8jO*L}F3$C_57oP%)ypl*LgucZ8iJ(|cAnwmO1em4XprJq@95RjJ4#~zE4ac6X0 zWV@w%NlcP=H*8Yrb8Q*a_!e(p-(9U+C7A2e3P@d-bf64%@Wn#fFyiwawKnTCK{#j_F+> zq3dxh7K`H$l-BQO*S^-v##eN7?d#pImk?fC3kJhjpvmJ`E@`@qHdfosKJv_R9FE)M zM1CfjKeAG%jW(uJ*a#eeF=*^7={JMnt(M`~!vM2cvuQam8+nq;Mgtj*Mq_$vn57&B zgWW+5k6So`rTk_N3eTqKRR6L`BRcik;nNCainDN~IPHir!Lh8F_oz-l^mo(Y zKmrXVCW32k9@)q7cV6kv{nfX+TB`7c0#)!bFO7`1;I;Ax<{S0Wl7u=vg`}5Z0pgCm zz)Uny8mH&mZ@jix@B}_t49x5$`JCw6{bKL4$B!RBBQGXB>Z(I}I1QuT2}tsbS)BXs zDbP{LnKOs4)Ky%T2v489T>ZB2r*+NG?%Z`8=RGL+T{y1w@6!>wBaq7nynbqy0rP3G zE5U!#lYVapx5qDeli6YnOICT875%{bf!pnID|@@GR(bb87b$i(O$np}`m%v>nY@+i zTR09|16!ZglUDKk-1u+Go?p*BoT@lxm3_ai*QNTyJY=WI_bbZCC4r=D&MF(e9>p;% zn;mr7R`T!bZuH7l!V}tBvX!mx=I5owT?zIEi^W`V42yw;V(alpc6tN&+i7gv_&-i` z=f8ORzDZ99Y8%YrDSFrzAuQ4KDpX$^zA)S5+sOMaZE#UvkjvpIY{5n_G7CVHPsv*J z#0o`aG-|5SKriMu!nq^>TJ>DhZAMKd9ZO%IwcJ>6n82j!y%bs_T2?M^@+*}f#^o*IY8K0Nbk&qY z%)KYm%lyowd#>ClGpo)!S!Pq;CofIvQ!{Jq6_QRP%V9!J6@}s`p8PBc9 z{~_!8W12|Quzy@JCVSC84%kS%t5K>_Jr>qhZLw?J?Xs~14-NL3xGfBaQvyf3S&88=cU<=)rmfB5Qgo{ZdwzRatCY6g#Hjz7O{6pd;JrW&;eLqpBK=uj8Gqm-&X?8I zD4Q15{z>6vdvPN69@AA?8g;#%!FKa;Zepc1?G)XR;Rsc{P^QA;*8G(K)yyB*X? z6>1A<0u36_Wo_APgKar$(E&qhG*+wHc;9ewYFe^qYT30c8DXfC5W~fdY=m!{vNEOr zT)le}FE0lxC1Hr(HnCMI#eXAKU#$C5JYXC`1;FY2lbqLUm`guYOZ!`g4T-N+YBjD@ z-t>pSAw`t9*`K&OXP6*{2?M+)R<=TWviZaRL+6KuLcG9YcPjz)KFu~4yz!M>{K!r( zF4Hsj?vK9JU*OsqipUu#hVrDw(?AApiE5spg8tN3mL7>Bfc3mV6^&?Kz+ry1wy&n6 zbv}=suN?}a&hL8p5G6l+dL~@b@@GC%YF3FZ4#`ENI(D3j!7$t24$Ae5H5(&}~Ut)G4 z@4#_i?BT>jrcgNIJn|DlQP${BKU`khGVeg34pf<8+mKoXR(7isC7Pzh8XM}&sn9YG zl#ZlgL;-cMngyY_0Y5ou2Lc@>G7Aeclg#&zue|t|xSp}~^lhuyzFovCDFZ|b_aEAU zDq-}Zy(g;}))Xl^YC8tY^@Z>Jd0+zxbG$NH<3 zt;dnI08u)=a@Zzsw*)^oYA`iPS}}6KWU)xq&n($&cEqJqlL1oz)9CWjLn&q0xCA_>%YadE_mr!LSYM@zuF4=@p$~_;`FDlb0Pnz`?lNp&+m%=mE;nMZ99$7}eaU_fhmIbylqe*3hrY-x$amOyvnk)hAg04pJ<)6vka4a#Y> zlP=Qp_eQl$aTAs~MED@!0-C|U)Z-oaqA&0B4L0v|X3&k!08oJ{w`-Sk#*h~YcO3K4 z8gu%>O~0g3XTj)txn4+iM7(gNu<%l;wG>s`iIZQ#ZE_8m zTH9#?aC001BWNkl|+B&>Kc-uynDB0v|sUewQ# zJw483W;N%r=OYxV8U5Yvd&;JMDwpx{IwA>1tGw zazI{Cfu>!^6%g4Km;zxF5hwukv@CF#clM|cG$$@6@km5Lv)q{rXEI(;F!@#*_bo0C z&W0XN{BqYj`MDE(xMyu0<>uM+5_;OWytYx4vN!8-QI-o;+;u2|B!~(;s%1B?e4)ID z@=OA4+8t4R!?T{6ob=3iJjG(n{@}asK6}mWlAW)VZPV7uc(9IfINOE7{ha+J2%%La zoL_J_oP8BlXFsYCVfMw|*N<;VK0a7ieh?*HfkBlgu6pvLo*2lAnm^jss3O?6q7CTu z8YQ{2wt3_i1A=+`ned?*yANjhRYjR0-pnG-YQ|%$M|IUzmB{c626T?b0@tF(o z?C4NQ=M4ZYLt@x$rshF+hUV@X5_e;BX#zab*h*n+Ecr7^OtP@tTJACTikon|*-v&Q z-z|XY|Au?kL9xHZ$=*Za%&g8BjQEfnb!Z`!O+DV(+03_++{&oq>baU&CYQ^L4srO$ zKRMmx>wj=zbu<8;eY5XbjW$ARZJW_3c1|yskqUczRHVm6sBfOIL==gj9i?bQ;RBR( zqS@tpLH!vbtu)G5o8!*7cXq;=yeRpPa^>KdTovM*v@cb*9daH>%w(dgwkuw{3* z004SQz=n7XLO0#1Z6Niss2tl2Vsea(!hs9|d@UIOfCkR;Xdg*yaZsw@-xV~zI18$C zmowwd7$Uf=7XX?FFNS{ouA*W05p<>g`7**MOQ7rP=m`%0?h**PScsyc<mMYp;XToUjVCcqYk6*aBr3cT;$!)}9t-cxzJfhGK z4(&j#$weYy&wZTjzyDxg#lELlHAI;6ug~02R+gg%p<tNF z#v0n&OclSk4?X|)%;d`+snMj7{Fkt^k7*)L<2IW+0H3OH5xVPMA&^sQYS2bj4J+6KP-a&@F%=a`rn zSnb3n2EMAQlHn^|wr-QMKp`odl36AvpIx5~nQLp8EA6ivreszE1oU)Un=@*C^~7El;>kn4!jfxrgg;f$oU*z_=FxVh-oXrK(8zGZvp0Al}xG)g5Eh%c7q zC_qBb#Wp6p)pb}uy>Jz-Z`eRUH6GI!H*Uouf4@7iSLb{;nuy*2A^ZoQ z-$6WI@{9CHOJD9#Of}jmjkW@ys7A_(<1N`B;5Zo$NB!;;7xfcRZdBr?*yvX~f!U$a z6br9v|KHOGkH-W84FEd25%HkbzK1|D4-_0gfaXi3QlUb4`0yn-6+($5=gOn4^ZHLk zBK}rVD@c~S!*G=iqG=uwuH7&yJ5p>ovyDp8tEto|)(|Aiot57jsILP>l-)bk&{5TB za6xiUyUe2cF^bBNpf} zLnEpboZW8Z=&+Fp1On4hl3WIWPQ@aSU_H`oaMT;on9Y$V{3fkqQ1TX(NfZ~1^GaoY z_BRI1-`(!r66^FEAj06l4HkG2>e~Qj8Uzys33$ROQ(B2OqY-|#tU)XEE?io1w*1`Y zkS7`iW?MWS5780<4UgDn*Vjj5G?WY#r%pvcL^ry6f5X=}U=a;?o(3WL9A)=CJYVi{ zx`hO^RgM~mAr;jSoSmCTR|7aSQ;>*)X);9#m&U`ntVNO8*_1z!a__M0b}*gZ+M>j* zXt2>IMN+g$BrEH{fx`u;K{{ITke(3c=jRI#9sH~yFFRW)lqdnve#^45LIY9s6K^x=}hL&w7FsUDP}e|KbrCeIknf@==OTU59k`R zSang%cwKUEcL>nxOXE(JlaW($<|KL)eoQ4QE(J*y-B2lnC`e zB+;1!ME7IlcT4aoj@VpFzE{w-pCWW^@7P@0d=Kc6cWeBB?yr_s)>iMW9_<|TyXg*& zPOLXp2E!?rqRjgWYnW+gU$JxSl(kG!`@&wkyiB&W?KWf{)WNT5aLP^>lC-f+h9=DJ zzO{puXd~vyHZEp@94z_8p*f}@-SB)>&?AvuOR+%!pQ~g!yt!&<&TsU#KOail*=wxc&|1zY$ErA^H{%bx z*`cS;=k#z@Q^u)gf#|qcN2%rGlt9H$a8!>o;`&Eol?Y($>H1?mXttbf)9yXJCKjXA>O%8HYPbYNg$cH-z8s2fNB^iPB+qH;SJ+)woEpjOMj zcQwBgKwOTu5Oy3b6yVVyz0Glfm!t>EQ4J0j=$l2`!}Z4PFb6`n%>tdg;=^_HiSPvE zaG~;G!2v$KXCXEf`AXqukl-&}xo}cgUM!RduWX(F19t3lzQwWtber+|OGt;I19F8G z&yv`;cR)!|bFd&@`1JYpPQ4l~5WhEgZHTVHw2YCy+7cQX{o9W}{(H@#<~npt zd9HxMC`$ae@3@LlTNtx^(5)4Xi^b!rF{j#52XLxHqA@0Yy-##9w^i*SDCXdnznTqwaa&X$E!n3t`gp+;cvRT>! z7h+L?BWbi|PCU2$P!Yih%M=43abf^YtyY%h#2K6bfKmj(s#Rzt-nt(gD2|E4@vKz< zCkvpo9xMRjjNBDaaAMj0mu@uvVZ7s`e+6qAXDT5eBmNZUi1-&*{K$eLm7fqlS=^}} zO$?O-doP0t#&)qll#9M_V8y}-@V7WIOugS7i+D&>h~nb$iPiF{4LZ&T4g2A`m3S7I zP{{Ck$v~kpK%`7snv#~5mLe^cmMUag`TVIXp1)*l`XicCe@G*5Aqk0*pjr6m~I8P9;?6;1h_+vW(AUDA{JVWsT2~abd4kzO9CPd ze}CHG`i)gFP zLj-9T;7#oWkpqS`JzbTqg#>KV0zLtzxJreAod7CD_{0IZEK67|keJr9BxG+0GBF`+ z<(6@}|6Wbsjq5cRW0jWvNFbchbg=*`notzuA&t+=HR3MdiIWg@M#3(ToSju;tiTJ= zxDXe7ixdzjjvc^R+5^{(3o9JwU!2gO(Wlf@=q2O_ti|$8K3qO3Z=j3>G>Y&k`D{3) zG!1^z(lWQKHx|wB-0DHc#b=0Brc@Vcu@+Db+i^@LQBu5c%)jCa&;>wbYX~uD7h=!WR&%Hcq4O7gN6>5%#a&GAy&{8-&Go+Deixm;K=fyDCLYh9 z7hyUpYW~`{wf?mU3sCc*{i=hX9RJkQ5UiQb@j9O0o_2Z6)cnN!vk?tZWxX1J@7(93 zZgH^)9k!4@tCm_`MrA{YFuN4el;mANp!w>8bJy=hY76@D%+kCTRD);i*-FLdS(}a1 z(=MZ@FkOa4?Sfx;5O4>g++m(PU@}##=J+BK?U1!hE>pmuDwQ@}rc$M=&=e6+dBO?V z7z3Az;VN`Ex)azGN3)e#l%T}v%V8nR2{cWU)^nG}X9nEl{(umBQ9nrheGGG{Z1mk=)ItlCz{3kGL|CVm zAb@_Ag|VBZ(oC^XpP3{AR3`h=n$L#Q;FhvsLuraaTjHAk>sCDiXyVw0RE;Ij_?5~@ z*AmX`-2y!bo_7Uo=M?oA+S)+O;;p3&1Cw36q0n1+Ciru-_xR|0 z5|8rH+@rDH(MNA9p$SBu`AwAe&y8& zKSb*t%_BX}P4vm$SmT|gAjjV#Jr)|L!z67X@(6^b`vgO46)7o6D9Yxmcf?}X!|ELo zLLtr606-7fcPkb96q~VqTS~Tb0;g5UfSdv|Gr_3aM+y{EQD#ERI#C_e#sGB*97hts zoHiq{D4yd~X)&iL@eskXvYZ4MfGiB4is7tUjrdf3kUa<(g?ls`3ca4TMr!WO9z7c; z+5X_bVsJ6A_#%$7(dt+d7e^Gj*eNC?oj6qlEsUo*J6sQ1Q{v15t|VwOPSSS5s4^Nc zx|Zx0x9SFsFQhc$=FB=AdC=S}NwRoQQzpts%4EPNP`@l$RCs6`Ql!H&+2fI&&1D&= z&LkctE0>qp$XwMHyTI5<=zZF4N1$&W=HmV%ue#$=M>O=)5aW-(@PIG%P+pQ^+zo=c zj4^!K*AEgub7pDJqtPydq0<1Y=#m?CXfV@ddRF=D$#|$jr)&Aoci)wlFKw5Dnb2f> zt^p~(&{kEX6M<|C1tbX&PyuAE}<8MxX{q-EcbZV-%zN@v%3^H#PoA~@f(>o2( z=UnyJ+&lP8n9#YgYUD#7i4x!V|1jvP4gD2L&;JUhsG)KG1VR(jPx|hBv@|*1@oD>w znZDqS$@*qPv#;&sKVw&~*90Nv3Kr^PBXQTqu+oAF^1zSF(&4ryWfh=lX{GCiGao;{ z(rU{11g!`D5bgCSPghbb?qn#MWFQ+3QpfmXs7~*XcSmn?JXz>4PsI5vLtfIy^JbnL z^bL^$&rC;S!p?>Ar886iQ+=2~g4m)JT3c)$x7^cf z>%8mJtBdHf+CtKbl@(e~N1KUmjzbONaab^$(;FRHyR#LmLf!R;$F1F=Q%?&9=_SRo zijm~Z$X#}y?}OF0*Sjw~qL`RReLe5b`~7{sKZ5o^s6*aUD{oN9t}is2!AD~tQ=%^FQa zqtWQ~mS(8v5SXrZb^x87prW0lFVUGapaaPSzX7Np5o+;S=%h41@c&Z$CZ`)<{!bC@ zd;Z)ab?XK{y$a9Ky92s;)RP!$>mUDPX!h&bk%2{>o3uHsUK6Axt6R-Eo~M0xN}wIO zGo(i1!Qe13jDKpvIPtezLm9c8)a^(?C7+wbli8CO9$v!#;3a9%({tty;|4YS!w?jPYRUkBPGMSb2)G!-(5}2j{9vwJNTBL3!f+`3IBq1+4x%DH z9|RR3CDHgiP(ad3dat2r|0R>lcNj#BVFnNPiM>qfGUzp<8RD6<{BulwX$Gy1^nI13#&A~vS84niRe*C ziiCaoj+(-X-X?IL6)48{(tzp_ncwN3r=k}dz!*Ro$=~jez(YeH7P|Q@uN3+Dts^O( zFDa=1^_2FZxh9)2Dp~Hk>E*C|$yHl?Z9Nx9(Vgx`*U-x8j)_}~i+kFzE(98@Dp7Z*v845QUqsq6mn zrf%#?u?bW*x%wic2W0+ktD)vWEd}kwv?}p1Md4scQBSDx?c9R7#tdA<=ZJ7E8rcN8 z3D*Q4YCd0t05y_o@SyM$9O|y!+< zL4+yka5_$cQe#lkRcc$zan}7V;XYJUN|s zHe6Q1dYkQA*J|`Lu%fFgbC$Jm`LJ?=7jzWdp zRs$58K}XrpN0~h`-XT|%D@MV5 z`s|S<&`^uzPV4wmZ^z8cOvg-3#V8c{r2o{zu>^!RMIwpFMu8uQ3`8Q$%^FKMoCLBR zJ=@#E@Mxz$L+_wwx4+WWx(8QbM_VF?BYKR-I%E4@CmiE>JjMllSf@$HdBqle1=}$gUu}hPe%|_1~g6tva_^Q zsx=T)H9>(|YKej&QnYVw_y~rHIFRF|7=Z(ph)aYhAL*XaP0$NAsY&lj^qhe_N0Nkk zXr0E<4VfIdP$Z$Kmd;Fn3zv@>(TSci8AitH-Suut{AK~AQW5U6;E3y;l(GmfImyMa z+Xgr)9+S@w{_H#+ogu3VfPyNz&kqBZF zd;m?|a(fs!4Ew_LftXBwL~Op=l90C80aN=+-SzXEudjgmf**Z>(hV_9V%qoBKhR*4 z?%CN9UqaE_ac33?JysQsqWde0IxjJ_=$*`F?O^eQ(%cLlg9Bx4JCl*0von1+iM9`$ z_@*+Om0k{jN=zp?EY^kmoCCYx`EumW!#W0!B2U-q&^|=nu1ZI-67@3Cw^zp zY}0dwBN)iBgEpaPl6@CFmWduW1Xc9YdkwS);c`*8J5Wl-9L2U6MP=ktsydUB9EL)V zx|xbFMdNj>8rQ;!Ije`UQqeAMa{q9Y;xB(ObdY&`2cXCh@OYe5CK@=xOyTgEtV~)y zQHlLiC{R4 z&To@Sc>2OV_?D06GW^1${bf=un__B7*fn;IzuDhB-|T-uHD3u$TLSu8I_dTaJo@@B zfAZHz=lA-tQ0V5(YgMjkC+VJcjoH+v)yByknR|rVJpv(5z)WRG_n*jMGIth;&{n|* zhbFv&>^#n4IRMbOCv^ww9sJa6=*HW-|1x1@akQk`yEqp5VD|1!Tb;9~T=9J3g~}jC z_mSwkE0`3QVp78tAv8d-C&WeIH*-mYN~y-vaW4UUYP&Jfq zHwkVsAKU%V5nFe}puESd)6sRHbj4P+vU1PSzdHT2yg^cK9^^{QjS_fald*eF(m_@$ zmCk2H27{0XfRe*1m08s@EX=JZ&^Q5iF|I8YvFY(8Y~(Vwh3q5$+6nn6Gc`2@fTZtr z7ZG{&001BWNklvNyGgrd(q`M`jB6d*Ky&Ahx?7`v7)hGD=b1r^?cU7m3nGICKYYK>_nmiU zo_)Jf0_gOyKgYe0$-Nud{CDG1 z|G2WWgz8v;dNfy{kt#s5sFGaIFf5bbwQk=*N*N#$abEfrY;)M^NoGFJBmn}sqVC=O^|G=$AvB?A zZ@H*OBx?D}WAtk(q<4dVY4mGUW*n(MWVDNB8vgjF>$m!Qg!cB%8hQ9t`r`Rd?%ulf zc_P)3o^v|3uR~)3({nWv-x~?P)?!hqv|2ez=>A66Bta6^wDA+rwM%5XT53VP#PKGA zfO~$W%8$`TE{)!ahH)mID@>?CTBT~a`u68sh{r1kko#hsmm=lL_z_*=8fU?D)>4JwuNBzH;@EquEt z+;dnd!Tp?NqH>vl+smmD;O?ZZin2QRl-hw+t?oxq(2pyR*H%`Fb$<^vO{{XIV3y6Y z`66yWD7&#SeR_FuJufBN+4Sf)1>Mk7_sNF=s8p+tmNm=%eHB4I@Zp_OR_@)P~ngPBtb z!i!_@j`qX$nVKX0=hf}XmWBr9UV$+X>$%b2f8*A*MB*&epg7|VomOnJ0-%#dBD|kO zg9nSpbo70rG5i7eYt&;3^te5fohCWNb`Vgx0qiDU^EF#JlaO~$;7QVR0PTxwTV{>V* z%Nv-SjK!Sp)%6dj^2c`E`WHQMhi<^x6xTbOo)=&ApE&si-qDa(Kk~e9nOUS*#BmXL zTimV*BQ&n`cD~_54K{u$g0@RBSx!TDxa8K?M3!z%m%I?2$DiQSB20l|d<+Qn5<_2L zN*AORHrnFq)P?!^g*R)@qJ+Mtw20;-PD2NaLOS?w=&uKZH}0PQ>bB>gQX-Me*o5Uu zMSYtF{zv8YGL2SS4+s-}%#6Lg9c0$t3^8hNjnossvyt9?dfOU^ z1p=JIb7vPZSx*HZq!_`{{eFYNq5wciKWPlR!XyFd-xT$b1PP9AQ9)FK5&&vXNM=f| z0#(YJo12wtJe>s2bwWTP<^#DP`kOD)crbkCY%DRiwzfHTHZk?UoXF_)`Z$MZ2uDJz z%~n_XziFJs?NArR&3%gW1TVD=SN3QR}}LD##J{`%1UqZjdzq+0Ikixb>15Kkz`5>%f6C5zz) z93MkEh%5I~=0fYp@Y?d_D{D8qV*&kK(t9>BJC=Eve_c*)eH5>>O_<|OZ_`j@@$w>* zWzIF}V@)Pq)4<5=(2J!n$NCB!j*${1OP5lfcJM6mah^?<`{GDvx6#Z~O!3)x7dy~> z+?1tRn&LW!W(yQmU}=V0XZk2$bIHf#82T#5JGM7CMZPa{=D)DR>FCc$Xpk4sqJ(mU z?#L=E*9U_)zB&KJ;X?;3>NYI9WHy;hD6E1!P%qePS4unW63qaYL1siV_Ld$Tb`pwe zYJ_%yJnD)1jUKhy6Ly1lJMsJ@IPiVaM5;a34m{oi$Cvyn1)>W12=^lytuBKUHZmS` zoAG+RS{!6jDWTXC2)K;Y%ykgq@zi%#$lo^&#?nNFpK*G7^6XmW12cWQ0C zdwi8AG>%yw0ChuZNp`qA!_VBL)T|HH%0Wiok?T}P)>fXVOlDWLQaVXmR0L_lBJY6C z0tsqQTXR)~pb8>9WJa5QwvxMCe0B9h zezXU~lPf+Wg}wrn$7&R8KhGk!8ykO}|FkQ)? zE;GNuy_s3&UkB39rE?B8IA>~ow z4IKH*7YXGVg&rSfDKvhT&q^$aB+V4E03|fGkcXQB>stmkeH86uBfutmg`#o@=sDJR zb(>Chn@zq^Tii-*@#9kvsOAyW;5-mnL^Q~?BAtVe_!}O}T|NKgNB#XzZ=WDLEgD(5 zO;lb68d_#={*_P!|E~iapvM7AdxH>4R7p)kO+$lJAO{ontBvy8sW7B*JOMa=oez^k z?e?Ty)(`vyxu4W3{OMQ5Fliu-F2B*54qFadhpk~J$QVHw2%NG)XhA*&N&)mL#Iu?3 zr0?K-6A(~-mwmHRi}O1n`~*C$y}f_vdAT+`H5V5o$emz+>o3Xj!C|?wUk)y9q<51+6KTS=Y zyXO14Z~fcl$i1Zq=c>t|EX$8x!KA1)DJ!51l%U|OIf^xKkJYVvui=4B1v~G{_R(}7P)g<498Kk@B_B}g`);>z zV%wsjZGq9#*e=tLu{uRai?M(H>OwGxsBY^dv`3?(|0#y0oyv4@^z9}1=$lVLPo*kF zg+M5h2<^%SD9jFU^FW@6YV39qci!5T!_sKy|JXYJkT%jZj`xqNOTmBDKiYCB)%c@A zY9!ifbeXKN$Bq~A0-3s0W8-Bvp*a(rjt4n%v)(!xI+a+_h$k9uPONL11f@M2HVpS; zw}(iU)NO;;JEY8&rPI~k75`XJIJoDTiMF_+CNycIv=Or<#Ee(Dz>A(6Y5-hnGv>7?89ylh+ zAp?Z+MTK6z2EzEijN(!xemqG;kAQ-_T5Iqu5`XZ5!P~tVnSM2s{BJBmWfE=qojhu& zlK?`ObCVce_3|M>9LUaeHQRZ_39O9E2=u1 zD$9?Zh4AR?KiWUp^9oUs=A%)nt~P-5BLTY zqr2k*0Qnr6ab_I&-`!^fW*?qbUhw--CJNi)zr}q)0nc+zUT33o?0B4y=EmvVkx)uw zQoYUL9Ms$fHYlL5ryDenWrOE86h8H@6x89=?crgh6Yx|aJ(Sef(bIb{y)pdotH0j+ z^vXw#pWW+jC_YuyYl1Mj9NER*1_#;U^+XUoRq&!X0tm#swz66j?~-In**+3q`l5!4FquQ?O&d8 zg09<7>$->uZ;9PrVW{`n?RKlhhPhYuCCxo&k01LDc*K<_`ypgM-qUa#oKXnfi@@Ef zF0MXT^hpCaqLoeO08lA;sih1r^MO|tdHIjmd;+0cc+9U~2N#m&g$LWS?!n~bf;*Fu zWH#{#6qAT7Z7xv|Pd=SlT$EV%WZoGH^r~xGfg;1ZuJt!JgBMCjiV{w~n#`L}KeHg8 zGQ%qh!!ru4sE7G2N(-RwzuEam3#b;UNG>?+QRBvMNT!D8mT1 zXmMWzMx!#`iL@*-vLG{z41b=H1c|1@bF4DQMwOg4$#hURAiPH;X`CRXv4Cu<`v-R@ zpnxY2|H>0+o?to5KE*|O9=?yxcL_as2EzT}uRj0uit(p7)v3F%rsni$i4(>8h9V4M z08kP-U*i6SW>rD8HI|J=)!B5BUI>ahxH=Ac_Vi37n4WQYJbEhN-;9udj@i7N1(zvH zr(@JigaoB5ZPXOhicG>gqoktyX;rlqRP;M*Q)!K!8@R8#I+hL9T7n&2xm&N=o@IB? zo=%_>y6mOWn_YF(fPSd)Ql#~_KVGn3dRk>EIsI9lm|w26jV#yHci}7T_WIK$cvaZu zp7P7bFvBdDd;B>34W7Dl^J@2{K-Xxm(QYxU7|!=#p!Ab+0QB!|M7`NuRbNqt>)}s) z3~C86D}3CkI8Gco*l>X>5EJgiEbC@j*}=-9BL@QNc4yp!o00r*DFfUeVVfXoVbUR9mvJ9nPkxcw)S$!T&@-cJq_dx}rh7y%$ZCLH4k`-dcF>7Bq)P`svV ziI+5z7LJO!AULu{2Jj^LCCa}Pq+*oUV)N>+Zg1Lx6IQ>s5h5w7of^R05O78@SVunL zJp2Q@{FL*#dc<3qowvkbIO<_ElTrA23WV>+4s#*) zHueg(p$?Zk*mwY+ZX5udWy8`Rq(F<=Y%Wt1nsmBQ{)@h;$*F~@g~f%%gN&hBw>u1E z&H@~>ELI1bV;wn195&!73GN(@xMq$8!s#RezLkmiOX>SA1xe~!cykY#;`*3NXkHwq{LkWRRm+eO+XVtFX@8Pio1*mfZi2A zI(GrkyFwzmdW+s3SIHCwX&-QwG(a<=Lt+xEa)udH=G@Q2aXLe@-_Sf8hi|70`=jof zwvRmSwAvgsKBXL%6yCIrBc|o1;)3Njj(~b(-1K|cxC7<(ld0~nbEb*V#Lpq#|MK%c zUa2>)VK-k~q$jx`_=VPUHKpV^y+05LsdGuHf}*y9x(-=MkHre%QhEuzPJhHk#v=Y1 zmu_@(#2avRam_~H5*0KSNmHD9&gYP!(Rd9o)t`aD2>jcc4k7K40h{i=&7J+(PESw&=zNLKvs_+% zzBT)Dt<%Dd*zM-Evo)3G79RjwRk3pU6adO^0E-9n%yYnL6bOx~Wp8~C`tGgut*xy+ zwzVEyP@oe}zcT$GJOGJEv6v_n4g;j_#RUL%x^KF#uWw;;VRC9v1=p>z6pw?r!Rr;7 zam2$53z;yGywEpK1VXxy(>X!R<0@DoqLs)SuWk#8RY?$`2NxJ2 zAxJM)w|U@nKLO-MAJGYsMnG2)P}xnhq9eltpyBl#zsIigxvf3=`gII~Kq=?odno7R zcn%vDvrxcNijq274ytySdKXa@Q#Ix4aq8E@4-2Fg2>t#I4MjvB6V54LuT%FsRZs8S zzVYyHUrv2-<)iZ__11u;RIiQ#;i@4JrJxrG9iI&5v)E9uo+0Aw!cr-H4U1X<#5 zH&PL&%UN}1sW)i5$fbk0?8s7VCc*_lMgPZvSL-KnLjD>iIO!WoivUqImsnqFXsT%{ zI`Mz9&OfG!G>zl`XpIT~0?A==-2eqDNnJfSEn2oc(2d-QNHVRm0)lf3k+a>F`lCtH z<)odx20^Zc5bU61x6U$Ku!P&%q}yb}We(G(p1qL`v6@cK0Git;X4fwa+WjjlQ{PW%OxjjC{2#U2A%7DvYu9?m%wDs%9gYl`c{9&PX`VJm zX^>zLVNM8BQ7#My0t+4J2~vP(wk3mH7!b`7DnV-5Qc$oYm)rmb|7!tnzG<<#A-;4& zz?%fs1Twib+U>BqX$#G9F$)iXT4^rHTfKtFvo419rG;-GA&e((C2&b3ENnX7OJHRk z0asLdl9bd@!6ocKZlUEmNTsk1&(wP&x;zaXH@Rn~E?<^VmE`p2tbRhjJFSJ-5&`}C z=@zzsyW0nO)adC$Sk0+cE6Nmyw3w%~;j?S^fmL*qlMRq9Mf4YNY)M&<(E<2)h&+qQ2-NOfvg$G{ z(Ui#yd3lP7*MGVAp}JnT|InvblQ(9sUahMGK<#n>^wY;P6(kP1v)*|`P54ZrM5aJU=Q9pkePN#>{B;g` z&kzTnFJJ%hJvJpHcOlsV${GYNPx777%9~?OM`9IDL3_#2Y z3hK9mg!agLPSi)HY&x|{(Wq$n^?tX_=CnB*oRP^cW27tp+_1fKcgRp*_$woraebE$5o@B@=gtM!B`n{}0| z1`09RdDSW`}1ycFN3vWsOT7xDj)freGPD5b;ck>rr+KslyX`BjjRts{3Q6U6(! z-zAp@o@w>ICfi3LzlUQ$qL31&9UO<4K~m%1q*5_-?dR1CsZAH^?GG=NFd zkappQ62e@VA{t#TzA8xFe@nU){`nxIiUmn>$^7q!q<%53#I1fJn3h8G^1*aoAT!APLWRhn~Mt}cV? zN9RxCRAh0~(`K*wtE8ttlMddV%*GFr+XpWH(e=;o{CUNX!{wfQdH$+lU}&(ryHTZ7 zsZX8nFglG9Ti1Zz))9ek8^&ID6n-$SJz=xqlyHN`mNv%6@LgXMdK@Cv|ElkB?!R!U zUZHMK$RPeaqih5Re`#;E4ER*5s|yOw7Sz^OH2&vbf9$AiE>sp4{egRO`^NF(04Vr) zPe?vfQr4_KYmi-Ox3>>HU#-}aXTZBd6%~cx;w!Rn5`#1C-;FE2~G z%gdJoc2BG4&i3R!yY%%f-US(*d-Ua(*Z;oa*Mfv%5SoAFgQg~(LZwn+;T;I4aZ=yW z1(=R^I0uarOOKnz#&t&9(o%5a?PSFKmP~YjZ=XW}op3t8tN*vEUR|V+6%~Pas#Lr3 zWEXHg)9Nzy-h#5)lH%fuipGtH&nwG{iV9SPA9M3h=AYbt_;CIEm#^dygRWNAst%r2 zequ72`a2C^h^V31uut0-GHl~>&*8(R_`1PPbn)aKbbRFhDjx}{meeMM`jzFOx$ zP!76VO5Qa0(iQbWs!YPID70)I@4!NJ9AW`_N7C%bNN+CD!bmNV-b}D@*I0nyDRpQu z*KDlagtOOrv|72L)8jV>DPpdeNFrJ|PL^$2nUkIOA?<{7-Uw#v2miV^D^GyJ#MrYEs**t?WdN_=L1`}BYzJD(EeFgSt#c%h6g2IXW_^Y|=*Uudr_-*6fi|3ur6Gn9MkPYEv^`B{$#@|j% zng)Q+x6lS7lbYA#;N8_LRF+x?Nb&hvCKU$Z-;De^`(PYz*17uSz^d5 z-l5uj^1QYH%LMbTSm)>QopBG>K|(|FknXIyaj~fHLq?Ae;duDGAWb= z(gM=POEiIuJpsLtAGu;K{69ND8-&zCFnlissS^=UTEY|3r&zS^s`teI7(4%%HqJAS z|5J#N{BMG^Bq|m^O(Kz+5|Th1+?K7H+b}liut{S(Ay<)%)b7~^g2JrL{5aV(0AoO$ zzg>!lJbWg-rk3G!sX-ApG~;LkE*BPRbET!n!89Cb zZx9ufO(CE>=a;MqXqc125*>EPJgPbjL69YiFd2GA^$NYhB)}>N9F_(@`p z!f+hLVNOi|4MxHNHWI`W1_hS!v0-+=>cb8P!fLRmXV8tfETANL4 z8)ERz}x%7NTbp0$<8w77Zd=c`A4Kj>tmYP98kyh-h21(xv;-#PGTXR=3cy|Sw zu@8m;M%|#Ohv745gvt zw@s(hDR`Y=OVVG%i_Kmh&zzg23&>+ge$^lqZ~>n%5-*;>2c6^=Cn%_Md#Xr+%20fW zqdsN(i#zJOx&cp6Q2RxFoOW=o5btEtoD_;ch^OXUR-TSY5uSq(FH(Lf>=qbsdc87* znFoFevWt?Q!T%Vp3D^N!08|vcOQdDsi0LpbO6b*OFQcOgs0_<-JzOvt22O*K0MXY0 zHo}GjIvv2vGT}hLM;ejW8R(J73~q;pJEhF@_;E9fzxK?KwtXP&lK~I(Plktq)ZvBa zoy+h>0QB`sZNt+KajwevpB@G}3~H@bdwgi{TC>5>$NZtCq`PBqbq;qa92y+FW*-S` zT((U#oG1PWC#bd>PL!6OgxH>QK$U}hsg9Pl;>O%6V^&r{>*3!OXQ}d!eE#TplVa8@ zx0aSL$OE9yJF7;k^2SuE#`ZV63SNbCg;15Mq-E5&OF^-oS)wTDSn`cyqrW|jlVjIc z-@BY4H(MNFd9iRvg>S{h6y9?D^A-QW>fGF>LbAtl|9yv>cX(x&0~1f;^noBn+8h|{ zdP&V%Dak{u! zKTj#I6JL@cr_$u=CK&mU;3C(RPXVZsE0p$z&=@6NkRpOJ0hEG*I(J%>r+Ar5F|4In zZ@35wT7(MPrHj!HTJlm(<}Uy!Et4EH01C>}DKQ{f=>lnKs9=c5mzk;MnxVh;;{uMqcx zG)SsXH^8tg!(w3>_{1fuCfzx*;naf8y3-ac5V~=}i;i@U)dO5wJ#YaIXv^S4cW!}{ zAxH5^Fzd4ndID0FOEQP8FXu=H%}|1#L1C_S)s8 zZ3X?|`SMuaXjS#>k+*A3zcH#AJ-fZ4U5W9tmWt$VK+7S-=PH0MFRx#?@X>|bBhBQ1UiW}nGig^2-TP?XwNNC~MSaNhl;~d=fh2ZgkJN%EvjMAeB8Elns>1srSX>*Uk$<>sTjOjEHy^I7}xuKKq+TI_H2 z+b?#2g3>N|TR~|rDS<)`8gm7C&Wa|dy!gOUU#zcO`8GNs!#mQ)-ZC-*3j309)9R*Z68V~ zP7$ds%Pz})4K!4>Cx0J={OqHLj5!6TPMm@aG{>k>&GPrwF~fw`z6J`qorEsKzdtrR z+g@|JsdjyI{bvy!a)HKA|Gm5m&}vZ7rd-7&C6mY2uYUC2<&}-g8!Ia-E_o5x8Min#AxsA<;e>in`w9-N@;or zQ;BEU01GXcK>fUdcoGFolQ&&Yr=uA+QP2TPvAwyg{+J0uyxE?oi^m*69@Lrf(o!hs z-BHk(#H2*4%NM7tQka$iP`@O(1w<0~q$D@3B+XGuf;^LY04I3IcS{NJ5-MmWnxR2L zfy@vnCA`6(Xk8zr*MG=RB6U<4KRo*^10In z7I00H#pCywP+*~(lmiH;&I)u6_&h$3?k3sH5Z9;gHNR`Bsd@K<_dT8&UyVV1P+MDB zSEp7N)s@$k8?;5`C6!urQBke7R;_OCu$?xXHq-&^{ak%{cg^w7m#Y&mH$?V)!_}dopctI! zuKmx(>dNiGw{vkeQ}x&wg!sEW7vn(<#GV1%XlB44R_c4ih!PtLwg_Y0Bysh!{8R=X zF7WbXC=!mjcbZ7(#O{cgYN{&s7Q#>b8;4?x($44(pd>bLNyXkabOLCK@u!Y{Nr$tWd%OWHf~R{?08b zGkY$%{c~?0xP1BizTb1sJ#{^XK&8wEKxwkoGCRu7Kx7Ht$P-jMbgt2k5V9)20S?dpdm|VNr4Mjsta?qT&j5{UaF9GA)XG zi_QRn8g$w}Y7GXIe06g(-d2}Gx7h2|;9(W)04yl=F)hrN zZm80z`mI1|Zi`alD^JZXoS$9jsrz|wF_%j^`u?>>tL(_)`s&1qD>3eItsW>{U2HNH zAtAAQI#mxwUL!MvQUZKE3Dj$YK)saZH#CYCrv%EgjRhrI#w!-aiLPO$&0z(d98G$H zK+~p^7xs77b=XyJ9L!!K3%w^OgJwUMnfX|t9~#%!zv)C-jwTY5^pzJG0_acu9|)Rn zyc9^wVmjvO*luq|ivPUGe*B!h@pe4i7nVU#zk3`O;9C$h?Nb4=*s&5#i^1G@=17^g3>CgbV}ve&QJq#>t}`Gu(OOJqbvfQO zwiH*Z4b8o0nr~jmQSHt1>e>Qz!IlzD$@Z=7C7Ob2y8O)#-4?J#oQ0` zvz?Z92(dl#3n{dz>DlXfB52W$9oCHikB7=x>Ir2w)OcaGb5o4Jj0m4k@v|>}KDPTo z&))jki=#a~>&uDg=xS~vmv!~7^_o}2>_hYUD`ZETPSY((teZ)t0zv0uVHzQvW><>! z<`G2|iPAV`*f|{Lhd*pkYaCY|Cx&<^GLKIDRY{^;+L88l?CAV-#NJx-(vc@<=J}=) z|Ev7biyI%mKziELs53MnM^WbK7N?n?SdGtsp!w!@gP_^TFei9gPcXF~ec?-nV=$MPjU6%>QKSN1raAZR=iB7p{(>trE`$QdPul0Z2*C@sa8R;8s_ zZZ(%ZHw5ZsBdGA8BUY;qW|N9KVbr8Z)p`J?)kCZJww*S0&>C%Q0Y=r+Jv*$8 znw^(U96E9a*3$rhR<~5^kdk0W4{ATt8S%snfSPf{pwr=~L2WkQH20o>KDD$;Q@dto zJ9hZlUa%Eeej9~&B&IEv%2QqSomJIo@}QQjPm$PVCeT++Up;#Rf~r)P4-Y8;-ctH8 zFfFCU6~7}WQhY@nmH2%N7n8}wZw@st968cGI=JkPu1@@8Vo*MNzP74rMd1E&X>D|U zeKq@ckFKJENt6US4}v=5_=lhN+L%OXK+jJarO6$6f>P%an2Z9y*5*B5KE!iBb3hZ>ZC-v3(<+8@dEzei}|V}V9C1UmCwUZ5@yT0?_dLtno1Rb^o<$x06xd;~w1QZ8~=+4U!XmW6HX>pN%y)>Ac&&{XqB$+@3R~WHJ zOeh5X5+$Ap3T+0S58NM!hB=Np?;%f@p9z#CirgeB1Pz6Syg*rxGYy)8pp@c6w|q>Z zib5&Hw_4fq(#ZYfPXBg?W@OSg_H^z)sD*{Rer4^@T9>XEg}tTP2p0;LA}^I2fK+-Y zH-o80ow3?bu7)2q80dyh9B9$((igOsK%1KC?K?l&T3EVGUsYQLKUr8=iHuaQhX;+K z#Ftt$vrh|+QF7oES}Y(ncki=T@}U_s8ge}h-O7wTt1 zjBk2P`c#!2IC^Zz@`r_dhPPNwRVqb(f6?Y{rqYTcge0ZJFRZLwT<^Z}@?PUY_h1eL zog1B4n;6WVRd1`*uf*hs=WBZ=Cf;Sg5X%k|Jt0!`2qqkdF;xI#!XDv7Dptdx~fakNxW@|nhLU5{EHnHo*Cx9ttK zbYwX$CxRKLJ0fSeuhWhjQXdp2If6ks?%>d4h^z2(&-t2@pA_{Ew%;|Lfe7Dc5-5*3`7fagr$d!&K%$P_$*o za#SIxD8yVbMo^E@#4>~>mWLS~tH6PQb@1&J=nF617JirLB*>*)2 z#`b8n$J;e*0mMhmmvj|I8XekWEYhiol9$U!lI7+WJU1GVpNtyd6#}J2MgB+C8OAh` zrg8ksx|d7%TKUj6K(4Z-n1J-8tk9~gYz&wqN>VwZ&{ezV<&Z$@2+6!$}cp@sBA$&wK7dEP0?&I}Eu^Pw4j^M9WI z`#kTwRp9SS_u)cg3R%`~VatN#QBhc0SXc-?sT2XNR~$WBu!IUK`}@F9d2u@l@sNc^ zB8dk>4|1F3IbWs=6or{MU;1e5r}eQ%vHEbax_lSVu&AKw1Io-eXhGs>B7N%Y!E3Sk zvaabyM=4iE*!7@rYP54*yzP%cQPAi|gDo;G6f2o!Ym zqxm5Yok#vMxb7(1`W#!A06jdWwFGZ5(O>l*3m8tEJ93pAZ1 z5gynC*L2*1%f;gZHzjo{aOu+DsZh+;m1sPGw9kN&&;cm(;uTZKXz*i6jeUnJ< z97?o2I&-KnmN?TkM|&UF9J&zV|1@7yRk_Z2|GB^Pe2a5jm>L!pRCxdwp;_aN!~Pkg z%^eUuC0h7lqM$C5=#oG|-R@oKG@_sH_QRcz3hEH+O8QQZsGuvLLhQ=O(Ba0`R=ClJ zOkxgtb=3PSK>wFfP^9qr+dU!v7Yy+j$)E30&|Vy90tNk!gm~P{q~5gp?p<`fZBC#! zQ@|plKSY0S|H|pO++7(pY`WP&VH-Q{>G&WbibY>DP2qexq?GdDEIn+I?jZ_FTBkf7 zZztxU9(3>E-C33uk|3dx0lzSiT!bSDm>P{yu;*m(y(f+e>T7=;u-nNg3w;8h5Qq?Vx$QQXG&&mO*^kX4K|UzfJxAkWEeQ-f$|nOk=+);R zvH~-WXF$}k-~BQr(44^1G*2-!xO7g>=poBwJE=ixJBVc7yZUPdy@xaIKtWSIG!-=W zP$qPCytxOyr3Z!6g919~aX1+G@&INvDCgE9jshk_{>|iO6r!{%6XN4;gT8(8_2>$7 z>stUc@X{SHe*M>P89nb{+`WLO8x<5HJP^t`IL5(X?>71?qM#(vce}6)!qQ}EY;*-? z0nqzED4>aW_67cUPFyI8l%1Mz&M`jC;DDq~DnC~8nQFhZ=r;{ubmSV%Md`nfJUUpZ z1`buV9obqiM}*I8tx}~eQhf><3Lk-&FS&4}uoPrZab6K7Lz%m!N|~$_`{oL*+)$7e_39^QOo@%>tqvgV>jXPz9WpCO-+ri#Di6pfe=LKOU*|{BO*{S8^vXEMy zI4&W8N^+aa6A@~1#`t6Y8{N2d!DhGN0EpFQGucdbQTG^}G`g|jyy0{@cdA;P zcI&I?9s9JtH0NS@^N`LM^aXuB#@OxfdY70b zZ)P)+S9rSwKRo$$iG0Yx4<8T4DSl%M7bw+pTU#5QA3Kc&OY_(l!hwQLdM44v zLk=pi0*EFYegOytE$!dj41oj!q1Y3`@Mx0SeLbT+oa@;W$4kI^`oGgJo&lgN!{9j# z0y@P4pMn>VcMt{b1qEeHR%>P?FMtOwzl#crgd(D33j_kKIkyaVb$1D&W5>m%3~)M` zvNq>foh++Css$IG)sP4O&rj;ZpXJx7H5V$2zK~0czmV6ev}$!W#P|m3C*t!Oa8`U# z-US8fqavIU&pm(=pMzmuRJgSHp>rly1~-7AAlH*T^swkV|UQ5)=# z$opq{b*4@)GwvLJ?|i>=?dg2kj`K&0{kGjP;9K@^l$ZB=Hv!Ozi7~r#;+1{EIbnL$ zZ)k1IkyM0p8(W>;xAFMfw;3Wo@ae#}KqUNuS>i|e;1X;M>`U-L5XJvsqM7dg1J;lt7sT0h3LDGadw0Bn;~5xR4?t9^j0N3TocXK`Aq(H{;tL zvyU1E1@-YXQBZGZCs9z3XA%`u&jX&QoKd*?0O%Uv$D8DSy!?DeBGbrEiKI& zxk{7sdkqA7rB*FhN~PGe35H#hUz@M`+t1hn5-#{ZRp%GdM!JUaUYzYx*xNK0RU)iz z%~sfz;5M9$n$+!5stBeK?L}hRbU3G+xHXf`D#gZgXg3K-F^X0~62lH6Hf3~L#H^4E zU2>c>7rPXkip4$Hrsra`v$39B7zjwt`%bd`V`NCiD5$@Ap6C6(_xt7(M_n#=(rN`< z8@ILZL9NaXHMTg~>+4$!*E3H|1QC0^SDWm-c&LmrX}UBT%@yD?>&Zf8K9Dw$`- zWdL-Azd)#oCbdJ^`eucGX7Ae!+o{Nvt!PA1&LAB90BD#8?_V6Z_-~BijqTUE|Jwb7 zIsWA5M8n%vCcOfHT70C1XT+JM@chz}eJd1M3Wql%o5N1E!{LBHpWp-uynM$30-8>9 zA1dfhO+j~(Lh{uP7p(%zDu4o9awN*ZRhd+7dlsz%T0>M)0Ie$M#8$aPml#V~8F&&T z3=j&S$-oqYO;FO_hYBi73@w4h(>^GYgtAXS1!W$Bg8F>eFM>^;&mJy6%xCdq}>D7gwC zviTJ`UzD+PBnqDp>Z=NRM+u;d>k6TGPfOcu=GpSUfzb7;&p%jyuzFc@XR~8-GXWxM z0*lAJMO02e)bZJ9uL1kmKR@;PK#piWMW}V^=1cl(*PN%DiFTdiYrqp?C|Gg*7pHP3 zJ_E7EW$&LJKYq{S7#PqtIk?S+f-yKl*nI1jfR;Gq>b=qnI)plA{^*Tr6rP4MhPQ@XuEWE_F;^v9sbqVuh2^K<`TCYMPvO#)N${UH(}w2S zWc;y8cIbPv`4$#c`fY8Y99WzN1w}xaqRf{inwiYYD{S%GnJ$7Lu$jqVXeh|ZRa}}m zBhG-igM#`IQczGH+zA22*^o%Qf^8>j-M{?U5`Qv%tA!$qBY6uh&)`{}p?FFRha%L> z0hPj;yy1ohb&gQkoYP@WdL_L|bC!=4l#5ke9ghH?qM#fq=sQ5A0G;IMfRKd0izMKC zq<0Zinxm_L28!WC8~_#B4-_;h1mpl(JSZqtVj!;G3`p)Nu=|?30p_$8?pnVf#sHtPZgxfGHck5KC9Ch`mpaVEuUZ*9Rt=3$7drs{%xu%=dxpv)2 zEwBl6f)zWPaya5ceEHS!z%1f$9vq zcA!yLue;Q8b;|{^SHuhoF7ou^8(i|y)~gx4VC*s)8%=!18UO$w07*naR5o6YxrT-+ zo(gDbW$61J8Fcie`Pz}I^_zQeaeEF`DqNKG7y;dKRk|A+r}Zrb*OA!@3c7HsuwN_# zp!6r^!TzJS z`)@06|JVJ?`Od9>&c_EuuUu15NTNxq$TKMs;FNipOtF5Q19FGT*4AcLP~%N_pZGJNJKzE9R*zVi7>j8fjt0LQPAqr zI!Q(q?q5rScuobTY6@DWB^oZ&`%on9;zm2lraLLxz3ZM6+;eV0z`0IHpv#st_l#V$ zh(Yo7-hQblM_I5qnJK%c{_~o|Kad|3bQP-jsNDj9E|0JnK;bd|{5jAL3d(_(uYz8B zsac+pC-V$F9#b0gz~XBPic3TM3rHs_= zxAj_qyz9SiYXMH}fYy=wx^K|t$Qg$RZ8lqj0x15-69Xq2t(`4xPTkdP7BI{17pp}) zHr)L&{mAfW8?tSeQKM0fWfhzEREAt*hcC@XrR-SGHFK-Epg+@H$_|YcRLaH;O%7}0 zF^|VH)>=sDw+lUH>%d`oj+#e{zqu>ZMLE9*Mbb|+T+B>pUV}S}sMQ82lCX*oS$GS@ zlI-kS;!1yTiwcJm!L@MWULm{$)%>t&Z6dOEboIuK^XI!y=U##ch$O|&9*`oNioy4Z zOT0)fQ6VzK^Gr0w2UW!5292$+t*=8=6!drQADsa(g zDO%#9;W(S50}wTF4V{ufs}}F00}`Bog8&wem5^IeP_T4hR!HI*;WfDVlwk0Z+x|K}a)~jdwslHz!a+J4|?^B!dC>C3Mt{wyCh$ zlE?vmknv78eKl%qJEwxOJ?CiFIR=OefahrQsIj+Mr!#_%5yrl0wDG%tAcQy?pUV*% zn<3Y3>vJmoCvfI3xPSaNr#pXWF@HC&()DMdG(@wljhA-koS8MxD8K)zRUR zxv696V8C`AGS8=nxGz6>-C+-vL2Tw>p5`!Sm@EWYy+SR zJujj9mv$N2ZQ;vv3~s_PY58e(a2hucQ^SSW=Bz0wmy*fYZ1>f(g~3BAY)=HCavYtW zNCZ3V_IOGZ|91V$GbgR*{(k+7twrFIv`^d)QBX#u;*{U?-TaoRho?x0V0;)<*GN1z z+H9KMVbhcM3Yz9VQqY=WDz+}9qibjXQQ}G>o8(H#B&sKecjY5lmMfLuC15ArR0iiQ zvV|iVwBT8Z1Fy0dIQ_|UPgr)iB{jtMPYJD`PKn_vsW&T;A~(nN~{ zDmH-<3Y0BN2xLNm<>u`34xZhdvom7AK{CeQ z{Gb2x&ig!X(K7Y{eLyx2O1jM{Xd;@}@FNdu$3SKs1*_jqOkpFVag_Ffk7!h%S*-DiR91<^)75K?_=m zE&volJbz9?U!m!%Lg7^cGoTRwvl%RcYqow)u{XO8F&FX>skDjDzl&h&0esI~y!-f!`Aflz}Fw?7Rt9u#O?5yPGvKi!gMJ8%!xp?kNZAT{r`eCG` zD9{@pbs8T%g24a7@L{J5RdtH^b1f|=j01hH(L>*p*-vJdS$t?8OHLe=O*m{@7Q_ks-dsXYW zGZ)m{x3=v|ldmpb%fyWY}S9*`7bJf8;!<3G4tQNzX&n zw$k)J1*Nu4T%qyU%rsiT0VPahkj1-6@E6>ABAd$RT+!U7>G{s)^PQba%xwWa6Z0Qa zo^7tHSTS2ITvo2dX9lwy@emw`EI#2++Z?gN`np8ng+vjJgIc3eTL9gMazG9r@pz(D z1r3|cX0->^$h|{oawaqlPc@2>oFfM|RFl(aaxT=`4)qSJzqlq!&66lPHxR-e2~S zKA#Vs4{)S%2ZommXh4(Ce_}MASf8Dpn>&2{_1*Q+(YjBo&zE%UYpMbQ5-OH0&+JP~ zF_cQK7GEWEziEO?M^um2uXU}?r`we}2<)U^E?-dLxg6LxS9MxR9cN=${>v4=`AUn)aoDCd^xM+H3|W}?tA)0o|~1SZr?ZEr(5 zzXV{S&QsHi(~C3Hi}8TfqbZlmRK-*_Q7Lp*6{=DM#^Kaba!oV}ghGh-gcfIJvQ|(~ z*id{P9E@iZprHPQEsgl}dKO%_Zp|s&$06R2CO5#LsAI&FPJ28^KhxF-s@GI|(wuxw zPNu6nMEPQPq&g`t;@03ef$qbDr4vb{(iqMYwW)7c;ch(My`A^P;L>t;|;r(x*VcyJ;7pu0`|z~%E>nK%R1GYTFwkc5Yq z12oUVFI5$k`m3IhLKa`I#^vl%RY94bX7T>`f5(~pprDISfBVZj>)z{TAA{g5mEgLn z%#$gQe!GdHAZ#;WJDE%@pAQ%CLdR>LluMNRGXNT8n&*jw2NTJD(p6%< zE274XnhA1`GE(?{g;P)y8o;Me|4V|Sg-T9Zw?E7b4h{y^;%lIw@HvTu?QP<% zEn#!yMo-_mtMH=YPly+sf@*pJ&Jh)Se_ZvT>985Hcu>%9-%TD93UsbbuI0qNfTug+ zwrX$#cu)yi)&;G>M&oM1`L^bZJYM&m+U{;l&ue{g7J(0AM+1l{q=W+l#$fDG_)t4K3Nl;U)W~nRQ~UTB&}+NE zr=+eMw3zA71v!#UhNj-*w^Anw)uS^w#B^v{x6|&s8rtk!@n)=gNM4@L^;(f;-j1K^{7X^Yjr8T zs`L{^P{T-y4WM2&ULE9_3T>CU9|sTWCAp0SYwAN29drS3hHfUpCQ~{;07~NdaL&1x zWTWU!nh7S<+k=5~7uDgK9E&{Y;{PdV;FwZ0{u%drZBNg3pKZI>_B(^lJC%1#If7oF zmyCJIgAh~QN(?g}&BrDt*jU~P3c3_yQ)Dv5rVc7hnMJ*tNah};U{wcxHF@uuqeN0W zczfu_anP!Qjv?8cP=acLkNg11PBsw8TH#DVMB`SEoSR&aCO1XV zbQm-j&w}b&Em81~o-_*aevj_D{!IUnBRvSYe0DIgfl8ASN@X0xGm+SM3xIm@F7YLK z)q`>h8VT5Ja+N1NE?13=dsM2Ckr4y@&&=xaG$curYgu4)d~$94U3PM9q-O**8Pr4~ zVFOySn^$$A5`lowi<-{#w_P}Mp|SCNldQ4f%%$!|jBb(vlSn+#gTF#wxgvy6e)WZr zV^b)EFFg-(qAo&Vkmo$^-S+l}4_Di_GBI!&Z^RIM>m*~!%7N0w{zjZDLJpVn3*-Tp!8@xDWp46 z;-q$5D?;bS4P+y3%BGu!1Y@hyadcuah7cPx*-~PKNswJq%pNWI5rbuE6}LY&TkWl6 ze;n(td*5&3_P%eD850w6^5l8m=lkC89a#aI_4AcWzy32jv9!AmL>jBP*MIzV&b_l6 z^Xc=bo$hu+NGnzxw z${92Y8Js|xIfsKbNu0P&__A;xOppwbAg=}mZSCsW3l}bI3i*{}sx7&tm>U^%(EahgcVGBgzqQNmsx@HHiT$Ow?oGFS){uBNVbBnXK}!Ho9H`HY z$PPy-2Npb)t0IH?q9t(S=^U6O)^XUd7N761u#Lbc0)ebT7*tmT7w1~LqZY`E9NB0t zlXhe?vFNv2VQXtEyOmTh1$xPcoT`zf?ny=ATwKo;C+i1r;wnzRGC8qlA9E6k2l^Zb z@G9fVB<9=Q@%zIh#oq7#it8g6@`BIpq}5}iX%N|&(Sgy9j;_(mqj)qhH z`6JAKHZk2)cMD!~p~1DjPE6ALyS=@!(`~0s#Z$#z99}(frpSO@>g}cJV(;jG-k%*D z6PNe40ql`~c{eK&`&gUF)^D=i{_@KsZ+?6I+r#gkR!1Jr`D)J(UcY(%^MluKu;l!| zf0`Q!k;;Y8@N)40PzuvXo7=W)&&OY`L|reUl8^vC4C+#X&uhS-cG261F(|(lsf4dZ zRx6J``%7e2RTF|BgyY_Auq?XajW$hPL)q?73Q2XqLum$8o7U@QPA9lVBmX9lRx7mB{N~q>Lk|e0Q zE-mlGs`-4`JDV5s<%2Sq_3FcrzH<&3RNPNKlWxtsfI$ypHEBYMxFfMc1`X990L3wU z0H_b5P8_ID0fSak)heJXmm{nHeLm25!l1502K8lqj+9(U21YDTtwEcAI` z%fA9DJ3L(=?HwJ~E~@CU0f(Bq%pGR1^q~JHc!jNzW=tl!v5_`GBw%jpW*}<)=zMp} zv0hAo>M$Zzj0eovkV~lrL*mz888b=WG#7g@-?NCl7vR|oXe}0*;o{L_$NqNqlP3*l zKk1%w#^dqr*5>AZphN2w^O3o9E$zb!uz_SHDNpQXSv?fJb>*CC?Nm-4zkt4kALTZE-DJDdNnP_E@`(vBMd z8hhq$Edzr-({I%XgT`v?CjZ{;T@lL%4@QWKUjg!*ivdbO;z@z;kyK1~8>N%e_i};d zzz$wZ*^x%I4~9$iBGRcqx(at}B|&P|loEYh1x0@2o)CQQ(82bsDO=I4Y#|FE%pNu{ z@d4a`Zli%Hyb_L+&c)t;q$VrX&Zh)pN%yi^Dp1+kvilQd4Kq0z~iLT1f+jDdg8<% z8*QY)J2ucX@$gSfEmX^qg5u{y)-RGJ|D2tLjn{>pdbMsWx{Z~$WB290Zma+fUtrUd z=J#Ku3Kui^4c(UvBob=E1)sti%c@G4PhhoxupiGqU^GEiZn;D+8xG5BaTcfV6*E@m8i>s@vVO3Cr0L(`k?l5gBOLjOq4+k^$LwshCHcPNW zY&u|bM3=Oy7ydp4K{Xqxl$Y{lDPCFTm#QN|S&n!kk;vnkP?MgzAB2S1d-q^j%!9!u~L>+Up?mN+u&qJZP=W}(5$809wh?@Yk6nk48F%SxNJXw)J!>grKI-T1n zD5?5+OJyBonn+=JgxN*_M7po1r;{5{3J|KgK!O*ui-12NOOP1&{T_OB1Ms<#t@rOX zvN%_ig^nAJDv|aS0Xn%zI1|8#sKjzNMDhrxgJcLt*khM_DCIBEsB>_7Hr9QFScSd}6R<4qK0IpSXsox`n8XU}zK z566j}onS9Th%z9Q!KkL*eD^2f9XQk3k5iqe$7GT=pWqW}m<`)qA}5MHqR4RwMqIhk z=c=b8!V}yeJm=*w$NO645l`x3ur6tM$s==VD%0Rnh zDAdRaz2`70;H-ehpG&W?nM~T~B|@5HwQngy^feW=+avMF*iAdTIUzlg__!)h;8+m^ zhN|R?E*-=|(k{f9dhDiqaNo5r+|UPKfk_HIbi}+Bc^esY_TVRjK~dd9J0URUAn^u+ z-dmQR0-KV$`KcUuR)18nNtm}`0%=K|17@dWM zB+_LTc@%7U@o*@WDd|DVw14OLN zaNOKrLyTe`%m3)Qx|lYyE6hVe6e+KcA6RR62(6VTx5h%q1F}U_tY|T^R-!SB>>x@v zK_kV*wc-Z3j&OFUNW27N15Sv2gnw+5r|5g zr=IWJ!6E&*^FJOFW&F)K-#K&d`S`IQ@YeNrO5HzPk*tjvUmfKtRCwCUDsE0bi!NMr zJJlC!c7CK#f04YfPb(G#tLt{TT)um*dmccO_FbOqo@=hpef_s?wcqj)G=07s1W)qn zTNK^TPG2R(GC*9PN(;lLU`ClbCv9Fzinpj5c{ z)v=9l#wNn4wW?n$M*leM_WJB}C#F&6Gj$c=dMbl@iN3}a9sM++sTGTzWy~#*{`JtW zX~e5WikfZg-QngldExEzc^FpCX;O=(!7F36HZ&u!_}g?Yot>cIgDfbVI=nh9*vuRq zWsiW=qZ~?wsdkK|2%+lx0KxOj>C;!vo;}@bYUnkgXMIC`uZiHNM+O}jMW&KA6mU13 zIeqFBaZa|eOa=_cj@%#)BKznJd;8f*lYsIHbYCW zo6vdDNNZvp26Hz*005?}0aQP5`fQ!c<~01k(tN#jE5p7ai=r$gt)72a)KFE`>WjAy zSw!)~6Ll*-gpp(dd~U*@>&Gsi%j3Cr!~IhR=ym7yYpxqUAI2Q?3n&xxr}_OH$d&M8 zB`${DgUXC31)7v3S#FZA3{aG`))QV%4bN^7S=DOFKr^JoQyGzvS`HIDRc2`K*WwQ| zUR6{kil@+7e;B389Bv!dc{n$ZCSkOREV0ccQwogF5X)mw`K3XkR_qdj)x`h-ZnaU{np}5y$ z@l9F2_?hzyyP`BI6iZum3K@}PlI~w}aWN9*+KFhQ!-wXb8I+@=F%j_y7d~^6Yt8_b z@qG&8W5-7G2s2GN=AvCABO_5!1J9~MZziPb`DYhokA6wi+R_j3fj%e$mcS=y02X4G z?Ay(Nj#+0k8#GW#3{0}0-7W0$>70#mCy*x&u-{olmj3>$SFiT}wPGcJ8Vo^0P#9b7 zYJgd1ZZQWn=qqfU&HU1eVv_R>b)Q(5U?j8RE@f*i&8Y+wNJD9=@9s~u@_&pxe3R8` z3Ay0Y36QX%pSgVP{p8*;IEgrAigi9{9bbm(0jj-a{x}qi#3G}&V>EqzJ9?YP&WNgk z88+DYPLCuv84Di`SUle0Cv(g3<#;N*yjF$43rh{H(ljQHg!a%_t>OGPdx9_%?E~Vu zq+S@&br{JW407Jv^AkhB$faEtIo@RHSeB!!>{i~e!tCL0C6U-fZ)NVXEF|p$y8-{B zu|!}DW0IJAE`|45gp#N&T6|)H=Cz4wq<@kMOZzHPkbo#EeXHxk%TH8#I#YSv)oJv4 z2Odk=V^j)E!a^2Di?JdZN-~wv4-iQg%-}qKMh$1;FQ10ILq|EXm^pzoH_opT-R_FUNyk-Uf8hM7tcb8j<%5Ws#dGvK?o^T!{MrCb*&q1dkx31$zYf_$ zUSO2AjMb3W8&VV{s3;K7qdmI+eGygdDTl*z>krgwIef6km68P}^W?nQeG}x0#iGDn zOqj{6HVMS>C7LZAVrNbbmhvbahl}S6g`F*qjsoAs5l67CP>>5q+{rq{Vws{CX@H^* zj!oLQA>G;5(iUtXG%sw3s2OT&t-E&}eIGP>rTaqSilk7_-`}&f7jy^}L!`7!w zo>q@XSWu@V`PN^|6Fx6r{%(Jt3z%`e5N;TAgjsC#Mjr1*XS>Ou>Wl^*TTq$kZ(X@u zx>?Gi!gPtdy=u27|6!|ECtBOsg6apXjZIS%HJ7SY01m6b_)WZ3K2}sYE?-?LWwg@O zH_IhW%i>vC<%-rQ6|7PepvG*TpuWjakexcKm*O-xFZ76@n4oCK968TX1^=5Yn9=ii z&yR|t<23PV9KAGw)<^zFj}nuR=>!swTZNYjub^;`YWM^xq> zYE~^n2H`Pe`2O+wLA57PeO4BLgYzC`EX9Dhs0h7t*&IU`fSVDx&re4lR z7PvDfL0M0Lm4Rsh8(v;EnPd}jR1?}oj6$ZRyRAjEj+PGKy2GKFV#cw+at+a+O zPxkZGO9V-ccNG2*c3Yxv(IN#q3_#K`=g3yRBk|A15j> zS}fXm%!j1YKbHrn6)v1QbKz8dJu-=HNHzkY4Q7jXe`0^B(W)~yov%l0Fa?7qWXu_! z9~I7F*c%aoHd&NSz-etX+?6ZnF3R- z_hwqLDnS8itvSThtgTx+KkIjQChtNY%MKGN>*?yc#R&003fCEtSYbw=1Hem3SNiG)(Xu2`MUl}6_$O+ShX>V70#R9^L$@x^SuR1z`3v*REjiyG?W z>+$k~4|3_KyOTV;tTWwBxsx{Cb(ilJO%!aoBko1KdyX6Vd)=tD<}b&`P1@1fZPt-d{U zR05fS&yI%aA^!5?pZwyV5A5F_q_Q7209NzynwuX=YgwYS2FqMJm&?Iq(m7b(t}ZN# zKOadZ9g=;^ZueRVnDrML2zK2@6#67q$0UzTl+=u2YF_)(r+-^&v_RmxyQQ0!jv(M= zx_JKLd5Yxg|IOGLhqRGraoj&NOQHWJ`6C;W(2U7yAZBAUVv;&(*UGIX7~#%HoTQCs zqok6qbeUG8SI4GsW$RVb>}EN-y~=X-sG`W?f*6XHB^(~PZa0)ID~IhrcAK7J*gd)+ z+5zw)jlM~-xX1x+K^mt-#Z`z_5Yu#=JSXWdHGObh~w6U|>9A z+1^gw%OREh^nn2n963``my)skiFolf(%@*|cMPd)^A8QBIS}jWM!dZ+9Cie!!GzAs z6gX88&I@BUc2s)hX0unVQ`0_2T(Q&nLSZ@%KA!eO9`gqPAHIdqiy-UN=xI@f);ozS zC%|+$vin*mF0dj7l-wvscolExM4dp<>=f@q#&;c9h~Yd~(0_qe#tOEt7GQd{b<$#t z)JkWK3?Vd|Eo*NFp(o^1LS7&=2Dvm466B%`qm?Ti3tEoj0)hNul6TbL9UoyF&t&ua zPo5M`0>oEHS#%8dU;20IxNFM|sPMiUIAbf6#@w|hl$#oEPZr2U4@A!Pkvm71p(E9t>2HlBpwj&ES!;U&|6#bh$L2gZfT5{~4&Q~c^swx%5laOhpU}3mqw5p@=sW{H^0pWv{=F>s7 z)^q`FW+hl%Sd3n?0(Gg=*;P@Fr&f_O!MT)X>4q zM3GuoFQtm0>8yBUy(XStFFYv#poQmT9tWvwU+O}o8QJWsmKveWI#Gf;!kwLE?WFIA z<7}Y)NKisXEn>m6RqRUoO4Oj6>oF%@A7RIWk&(wG831_V+^`tW;`F1waarBXUs&6UM(c$4Z zIb%?C8}O64fFMvfg-OaNh}ls|gJxc3GxMvUy~#>uUg!JwiPslE;VP;XY~SO9tmf#= zL1MfEKi*`>_0f%;Gd_FIqiVa|Zn_i#LIq?Kwakc#qA_AH5lbWaYlX9T6MKbT z{|-Py3?sxpK&XraI|?ORKo_qd4K1fig$E#|3WWg$CN9<~E$Gkr1x(Z<~6=YyzQ8_Xv|$;CmGSh(EB zbj{%9Uq)wfzId>pI3koOY;MgZa4u}jeg1tp`PuC9%HK9uRyH@q$^1rhIh-pkQF?$( z!?XY>wbV@F`WG_|M}x7d@mRY}EAQ5KS674lRB9aWW*tg7XPoadRabZ0`=%mG!N!H7 z?hb|B{jtSovR8-g@l$LJ3tbD7xPaA$>mYZHAS5ZyOv!e>S%GHW#%Sg(e!oax;zXrn zfz=BPi&jZo1VPWee&Vxn+#6<)KKp2UXT)c3w(GmQ6^2u{xm*6wswjA9ItjSMketX& z2z}BR{qV}JcPb7Yswh|0Aa%rRE2|iYfaKUcNU@RC%>D?3YU>aiIE0h~!+swtiI%)* z`(RUT(>69PFk!~Q7_lQ=O)$}Dz!X-II7vZJ1kKPzQKis|YXq@;-%Evb2t7+9=uLhdN)1*cVN{Jzm8bWk)eX6t#LbR;>PYIT{xU@+I{ z^-2SSMWyuJ?x=j+l}t#p@NEHHIQ97^8zl2 zCA7Y|J9wSUrTEMB>w~-J2G8x{VA9>KtzD8i$96uK^0PNHUnD~L+h0Enh5m41Y3b6X zrM?DUsUaTTDCew9+y|}aYV*~r&8K`mi?62{!i>A$errGK8`vB0SuB>NIE$rSgglO5 zo49%I+08#cy><Qv%vU@Ad$9f7G_YW@cUK#}{>Jj*(q92zk_ts@*+xXB09bM@>2&X{u`!`H9Z;h+ z($%JWd)s<@)9Kn;vq}Nx=~!(8gf^9Cv{+!*szz@fFR-Ksp>np+C zM&-a~7^@kg1r?nzAwp{PV9UHTG*XETEh#iIpaWA@*lQ0om!PGvN{|AcaM{%JybdiR ze@r8G(A$FSbYT_&Wyf)*ChQl*8%K!s)5Qx2RV})46_JP}Nc+}&H zxZOSZs@fh<(8?Oq0%>FznM(V{ML@+S{N_HF| zvbgU>v1Cb!F`X03dd$#-@{E{lrwaVy?{^-2V=?(mzV5mKi*L{Jar131oyOtnZNy}1 ztVX7BJRH_Lm_BVZ>S?kvVGx*RU2PzJdgyezL05^x({+IF>7ibD34#$|ICiWh-Fxoo z%~|$DpuvmM(`+W?kU>-uZuByQSf;3}lFJz!<_w4$C1dqcF8-gcGyX|yJ>z(P=8aD_9qc ziQ;KIg0mu0;e>vj{A3?S5eS$hx5P((HUU`GObJz~E~wRvni*td3|0o!_yBdu;L6}i zqfXQ((hn_rZe=H4ch=#}dz`pI)Ld^&-CgcwG>@(_7Y70DFRmB{B^R%#hptFgmX||G zjX|zddNmq@Mw!n6oOj~!VyQ4Okxulyw2PYcjfZWGS7xA1kG|wiaUdb2)3yWgeS~jUkhxP{x2q99TUHkL9wk>H=_KU*%SX z&CN}vl3V4hWozJQ?dT}*e5&+e9!CQ2R@UPt{84jYXL}o@_s#A$<#6Z8{LY`A{dWG% z$!DGY$!ENt{JIM0->QD$->F}SYi57sCtBCZm;KR!ObKfZ|KJ2!re(5CL+PmCTNm9|P- zQ;2H-2(^N&a&n+@lT*^UDmIk>>S{TFz{2aR0*|TPF6wCfFxv777#7=@h1Z zOcx3uy8D3bB3KLQ6c`iJOHf@}NRRPYs*;;lT zmR)66K7R^lK7V+aFCJ_hY?L+@Xq|q@K<-%tQw8d zCFeetN8gvr2E4Gr@3{B{_~t|!&~hZL$dXoNwOS3?jesJw4A`CxO$}EGtY9&=TP(xF!+j#rh)xH9*7zoG4gD1zM~?N{`7^n)%c2ssMqxtkS~P#RY~^NTK>QOn?qOvT$jgSS#4`Ns&dVgy&V zgzggn#e7hV*jDXV`fX~APP8yB2yI74zlu6|?J61v)q{c#@FuO+pz&(8&@u+x)iPF% z0*QY!6-OBWilaKoz4+!8oWs^+vnim3U`L8=u(5+IYp}76Hl3-y-W>^gJVwk`_WS*g zdzj7ZxjW#w=I-n5<4LNXF`G>kGw9)E65S+;o6M$GyS`qd65B^Y@kBgZ#9_6tWgp^V zc2E)+V!-j74QRC+uO_Vxa=Hx)?tAKr@E+u9si@Up`H}PKA%*|*o+K$~| zEm$U@ppAeALez4tyB)nSbnOu|hJF=uNlK_>m~%xv!?hZ$V$NewSZM(T4dQKrQ|RHB zS9Ch&()s%?o$+5KR(+1|W$fVB*Yz#Z3t+_pNDhaCZ)=P2N#1S35^}rS+-*GH#^da# z_DFFfY$VxC#MoYrIQ~yDf>rD>l7FN*{eI8Afg8NX@4t8ZYHwe!sXr7PxZ2my+h^x- ztRX2QsGq(LisyPQ-tH9X@DdV5Xd;^tbJ=Ken+FpBaZ(rjy*sm6%qD_oo$=0DoGdE~ z{GzU=39_lbV0<<`_kt`rbbLHF_qu0;mh0CyHm0y64dkpSPJ&AS3}FY4%!*bm+OMN! zfkTr)qw#KnjzKRitU95V&zoQ2rv%RB8qK>nqj90|(@*FR2-^*$ZG`-`X1yV0ku*DTT2hVBN+E9TOn8;96VMu{CO zl~!DKrb^#Ap`XIZd>W}@#BBg5I7#s!2{SDZoqw_#fc>9nRTa>F9cEIme|hPG9l9b1 zIsbqT5|h%zBHyMWpL* zRMdQ_UtbB;!xpTT$aw;0GU*Juf-VpesB-z|iLDa!%BgT{Ar{_>-A6GkOc%Z-7Q!?= zPGjDievv}>-<8~O4Bgj3;!VGR(s);IE;xk3N!9Z&dNzAMk{tm-7L|I zG*HcqIh$=Dx^Z9+xQ!NClU>*)x@))07A9m8$}(kL5-jUN2$PG!_ChIS35K#u$a>Ss zB#mStcV+f@zGMF~-?1KR)h3?bdEV!J&-W*NT1UudsufNn2TS5n4x4Dg--f~=MFE|( z4e*62A+3r?(88b@n9j|mK}fpZYBw60f3cJRC&_;av;2@0Q*Y$(p}YZl79&FGaB3tz z_<3PtGqc%f7YdmQTBfZbdAoeO{OQgPC#D;mogM$p(lOru!*MR%3`YW4^EE)=WFZ+Ie^vT7KUiPB9coB&gh<&MiZx%&6 zfu8#hga-v4kkHav-nM3W=z=G7w;M6leTaT^L#gxlD84eFu4{5qt zd15XxC(;VK%{`d3Q@|8flx*hDfBg4U}!e``TC&_F&yS zIRUKSS2DqaR&&p<9Htk;fUOy-hUS{-a41pBg(FHh3D{0WJqs*{_&+8E$w`sVh}mCq zX>qTyojcCGXtiAJxHC-_Uxc^dHOaZ=YO}Ce)JBwLg6#rX@{do9#)slVL!$`jgUzL- z=H29R`=DKc5y5+MHJn4CYx7}zMo95g75vhX>O8n|%MOcX3RD=@DjdngP~6|#*%A7& zy2!3;=g*sYb>`d-#71axuu*H*5JKS7af)emyrlN4;6F$%{q6g&zWVh)_BTHN>lb%^ z|6pb1n+Gf0ZenAbn)*ZUPmvO_iDkWB(fVpUEvs#@gdsMQm*>cz9&ho#{}?I}y#Xz1WXvJFcO37_6TY9RVi6_4+^_Q9u4#oCnc-LWE;34Y@io zkw{dP#B|gVvpZwasLv6z>P=34*E^=JEAN=}E}yJgTy_=$_{X5v8;p`uuh%;bdV^$_ zSd;D2JMj*ub8-z~qSac>9)ac~PxT0a6*o>)7eev?L94pGYx%wAYO8QiXqC&YVDaSP zK8^xAZGL%gWB=*n&g#lw^)XacjCH%ee_z`@-e1_vWGcf**YF2A24bLSASYM}9u``` zMgjJ`?2S`Sw%hGO zuu@!J9?leit5$v)2EV}-8&kj&wn;Y4Uvob8UNTlG04 z8BLX8Wo4htWPp`VD_rNfYauE;r}b0v<`2)FXV!7{yqWqV6lh{LJIm+;?;Yq3!0xiS zBls>o2oWBpW(rNW!YJ{@FE;o0H#a&<1&ev1IOgllty>Rnt%%ANYyc?j>$CP5EU)E2 z2=NpCLn!EBwq!$)e}$ptO_two){h6)dwR}NK}SYr;{8LQu)BXKK0+YlaShYBX#GU5 z|L~wSr1e&T?yhrOC)*76Gn`%;CY%Eu%w}Y2W}B^lx;KB#?e$Fi>~^QqK537+>@hoX zFRv`wOxGqPZl!Q*ug@zRRhXBqyQOgJ#o}F4Gk~=S4?bha=!TXZieE zU~MhHT|QugugT}}T#3cDR)c(1$QDT(9$wyk_}zCWh0Xu|@3&$s%7RAeg0?-r@p5|% zVcpCWmv^x(^B%imu{d18#|<<1RvH;>LD9509BeaEw3~~ocav09Fi2U&(M;Bw;q2mK z6V8#ga3fP`2gy3D1wVY3r@-y|luO=`eVE^kdOiTpVU-VQdME>+Tt=nM=Y8|wgv$tC9B623= z{ag?H9wPrH9*70PUyfV(c46TjEJ`Z~fOd>u^a{PR(gRY*k-bq>_NwlvPt>)d)7Lzn5?Bpo_ZE2( zPh6SPY^t~{lGe(z_}P$vh)%lWBHF)bztnc(x$BoMU%oueLOl+U{}jgx7POPa8#Ewa zA!#(e1*i@_PjnX1>CW`ejL?J5aE^VmKr&7?akCLs8&}brvB87#TBiu{Jq+>vCBAez z?^c;l@{SilJd6#VNFouzmhgBIxJx9q<~{RUiOUh3zR*kf`^5ZqYD-Irw^hj{OTKM~ zMQ@iSqtoSz=@9AxCmJBt>y1vGL1(vG?c8`d!EU!o20M6$iS@0=Kx-Bz)xWp5dXx`% z13n)r$TzEj~yrlm%GbF;#*)Sr(g=NKkDE4w7iiK5c70+_sS_mIWMJML3Rr%=w388O2ECmGO0)K)qO?#y;`a;6JnYLP5`=IX>Wer$#b zQd#8-qj zc^X=r_ozO6E3&9&+jRyY2_8Y2!x5vC73EfoMXUB%wIa79Ibtzsa?)#Z8}Y0db@3!-r7bp zX-4er7OxK-#Uv$j#~RSAe<0K~vCF9YvT!5llp1z}olur82rfR{0TIeFTl=6d4wAkF z@uhUF6(4*kl~$y$V4`(Z}&@EZZ;E^H4JW=E{4gH7M@8!NdD zQJx;??5PUI@=YCycBC5MJ)@87#OQyq`~Od9CB9B0zy z-~e~^c^WquBL6>D7Z#eC>g>#7j|Du@007UTv>w1K z2YQ1D?A#}aLQE@$f}}}LG>D&cMk_ral`L%XyMvyfhZ|Xyp?L`1SBnMmEa^wGcEnRT z)r7a|_~*|vQ+=`F21F6AU4}nBhCiJvz7EHluyr`r0ez0IreP(_VKNpZ)lpUHyzjWo zP}DR$EM4nd<%O_>^;ukm#8i91i-jWW>mGb!?D2 zWW)B0k?GRNyN{UpOvaK45{~IOa5M^17&y{8Yj}wH6h)`&>*`$fb+xS&qMmDPv6NQ# zGnM2$rgQ)RHt@D(Ghrnh?WU%uJ*3-ETC%o00=IU~LM5DcS9A5%j(rzb{_Yp>*7|Fu z{229nyk|#8`~LX!kxcyWZ+}mf8Y4h4#ouOSh2>^eu`s&2zPfUquji^`C!b$l1)r+X z?f%x?+tz!hm*?HJ_rL6S)t&n6#}#7~ncl=dRx7Hbc(5Iw>GJc#$Cqn3~4k4@=Sw85zEtHOh|ix&!>O$jnDhC@!KeS zOwY9e5I!9fDWr#bO~4$x=@x8d(r4a6cySVz<5(7EE9e@!f&0+m!4h2&2dq$<9`~Qs|HRSsM&=?aWtGW+6yVfW7=2Z}w-#=UGJZOOq=b zm;6#p!xl(7g!m@<^-7M3XJ26NDyZV19UQ0}{MPoHzJadc2a>11*0!20o)m4TY?lq& zVikrq&qx?{retLDrTj=?%!UHW7fMo+Nq{-iwV+CWy`Sa>`Y~GD|B{U-Op_s~s^a|O zN~xHbH7&j#b_iY%`-T<)*vZ($eksDlQe^Czk%&N%r7Y7*XkcB;x(3LTSX^1rPG)bp z=)+UkqCc%~z)NpDT-#gYpI(#QIDpGcqF?o4IwoY=MpdcBEW1N*UHm zSGIDC_*w_xa?JlvGAV`o@J4t&%=<}h!V_UW2bs!J0M+R7W<_FBi$ZjEA5x?qZ~j_# z9^(DpamDshX0()>esjni#PU2CbZj~!Z3G#pJ!nU3$AX{=`q&|BF&7k|CDMQx_SJbP zmRLa5+3n-eDeZf87E`@Z|M^OhrMznoVG4zrNkvR2HI^CExZe>J z(;`$wBLe8+F*Jv*xqNpmJ;ygs>o+w=?&&^!js^k3sFu9UO*Y_V)db zfu^qO6x$%XRc0u3S|cAObkcE#Ohz6UBZt^3!*ALM^L8qgxeH=-c6{RG@zH}GS8Fud z-`Li`1ZO(uq?ROgRMz%G&DZ1n1P}#S1x{8C8n0;Dc+75yb;Pb)vwdP zGg~!3|L^T1DA3K%_FmbdZ7}4Et&wuZ8ZAhcTr!|dj%uDPN zVg+i{;|)-)OO#|1RGQGpe@r|1*#`tP3L+b2<;A+1I$oFqa@FgBnqnji5~+SJAS-WT zm9g8Ugl00q^Z~jz!HAD(;Bw)=*GcOY+X$qUg$zt>>@y{Kn$~2aa4$YA4lwxh;N;-s zJsZy9ABQVrbb0doMJfYs}K)6jw)yQ##AcwveH@cH{ttD z16^I$&z*VMj$qstNQd-52zYa2YWb`=v3YZ*<1^=+gn2iCPnh=2Y1Z92O!?FtuOdbW z2eeRc+qvc;o~fOqE0&y%WG^RGmNA^$%CD44e~1rd6LBMz28kwR^tm){kGQa9`zCUM-!%6STdu4`dp=W)7@+s?1Xgs9>OWG#QBQ z)c8Ac_$$iO%Z#zhRdc!q2=jU>9iw>Y{?zK@j)FnONw)f1S)gMib=9;qiU3Iap!WeV z#|Q0>Ut~ubP-{vJmpdbs8Oe;`A*Mu7t~%pyX{qz*Ze3X>^01d}a0*W~Ep-)KHA-9H zuxhsB5@*>}s!-`nzSMN->ZMCvSFd(mrFH$9b^rTQ3bcbf>!pivzk?5yKcB8MaI2rC6zPi-)05*woF$7e?had#t?*Taml1c( zaeixeF^xja$#S8bNQOe$gfj$OL0>Ux{W**HX-IGsph=RPBh2UwPQ|qniZ~jBo)y1TlW??VX#$;}b7o>rvc5s!X5(frR`75P{ zQW_xH%{M_n%2wR~#cgV8i%e7OY_i)Cc40{L!q_Ff=w?Z5xNwt(WG@y;Pc|!r7kswKwI1oT#rRd039JdBNfvcBCF}ClbdV!1UeoC0>1BLC#Z0vT8 zQ8z_tw|&^|{~Yy-1-mvs-z=j>4^uZ+sT^NaTw~Tp2xPis=9_Hbzt~9e1<#$j%iq>w z$>B(4r@G2$u9De=6aQMq=bhu@)7`tD?qDOOJAZh}($VTNcs#--luVlt25)Y1pqHSy z`FV;p&@1te@fByn9hnM+N@au(RX$G+B?*KsUSKF#=0wSfZKrsiU%{#3RRH>!?A+Ry zdD5*`y~_3aX9^?IX*Xi)=ZVB-%DJ-Xo?))hS)ynuO6fYqG$>k|u4pseWU#7D!72YF`;gq8HA7cO*lfH9|F zkKWCQ^oy*wk3&n1sb9fGR5)>`dxxT(OiQOp8mulJF-CC{Lq$DY?j*vxIiLb0s8g2eZ^7iD_$;nChdUp5MWsiUT zc})yVsg51EUL9E0k;H zte8PqYste3kkS-tbQ8j4E_CPM7=>TVy~8C1L~|#|T-_JZTt>R^Oq24>!yO4Hupj3Ov~BB?XlqH`B{A@kV;@V|i@r`Q{QR<=6zeYb-PV%|O5Ba<3f~_WrUV zJqb9EgU_v1`8{9n*5bAFN5mR4bPlzdRC*@5qtiyCK5c|c=@B2~22r?MGNX~GE`fHM zeoȁ(^bLfAJeq@{R}q4kkFdyEgLBy-HEF$hAZpZg{#OR`uj{=V*kuP%Jn#oTyj zyU;z;3tA&{Bm6+t&ifofKus@16Nd*N>}S#}mq`PO>7zKyXVPMi*dwyHaRz9UEL_7u z2n9g%>IJ9RDLxIgLYnNu@^U%++$>E@Oi0q?Q zXn_Z|Wy1A3-J$7}GUiOSASH(afiDI}dV70s4$m_Y%8V4qs1lRe%{oL!o*| z9zA{fb+!82>dPN~zp}FEjxQp#S_D8NPF-X@3cs34ISC2CY;;O(*SoWei?)bueM*lV zfeP6<5qJnYDNb6XM)C={DnKKgysJfVGE`Sx?BtKjN{C-`C$*-WC`qBd|9WpDwL|N{ zN0*@->oTD(51`zHaE ztmiB}Yv@EK)qbXnz^E`V`KajZQI2a&cE6^3Ksnyi-lGw%qMc{Dy4po=JScg&mW1@y zBKj@_*Su{)^a}a*f$#7Golb(=8KZ|s6f-#!A9PV?>S)GRDmQAoj@P@*#sS14>xYNz zptz^S?LKfWh6~|hI2`8bG{xe>hsB5AKI7@JubzF&+v&8O?LFJ8r`WU35>94&vSfYg z85yZ{nABpSV6%={hsH*E`pv~W* z0-{rJ$?NANm8V5R>uMF&lg%pSM8BXqx7e_X42#1OE`zSZ2G3zTtlef*XXKi!-(s;7 z=3oi!Q~JDpSi=Q0$dtx4DB$+=7%T?OxM92pjH3d*BMMO~obB)WOnFWE(>LbxH$FFM z6#ZoSj9Lp<5EiqC(x^)}G_a`P^a<@?NjpS^yd6Y3NO~}n z?OdZ{6w68#B@!fnJpgV~~>fOSV$sWtgt= zvv56AMvFz=i~)$5vJ|jdhsc6{&s6=qEy;DfkevOSVNA_7Fy3Ye^ph+lO)??LMF4Ne z%M}+vd8@31WNQ!_zhH~Vsta`~gr#yi&z~F6ngZDb?>Wwr7^qbidx#ll8Rn8!omao;71g6({bv|OLJFvqRcf$)#4gF;PhI-PKx|iJwe0@o^r+>^w^|vyOhd`B0Sfd`OLGYmf4&fAeGCrv z`frZ(CF;ELo=<=S)ZEECe_Gv-c;$fBW?Q z{g0nAO9872jY1IaP*!39p5dNRBQ=tvMx{+lq3ac|_kQxKp8-d?651z0?9-7?0!tEfFkY?E@?u;wY!8-7WwM~VjoMr@uO=U_i}Ehq z7<)kJ^tRE+WftS5>;L}stK!2)nd_)LOAjPS`5%zW4_Mnqz8V=lr?LhD(i4w&XviD7 z9g@rhPYOH>l+6=Dlb9hZ1;(s_p)qu}y%@QfXHF`#6qJ%en=AmSEngd=r_T9Ub^H* zml-PM8ijIz5`RjS`ufm*rl92x{(J29y^-G0-tLP#tN>41 zgGpNaUcghd610pZSE%)>po$(=GHZI=>3^{kmFej|CVifOXZvM$lUFdYF@RVF5ThuL zf$5%r#8~8!p^}&m84LyU*t%gVI2Br-%1?!OFC0lSOBgWf1$C&Mrt1UQ6)P-Eem@sU z2&fvrMWGxSQBSEo!BB|@28zQmhoD4m6cClJGE)+**qTWQ*i?sO?$jZk$r2g~j&<2a zG9*=S9zh=8WeqNI*$x}{u*V_eO@F|3UsUIFFW&z@RoC~LR-VQ4ursB9fbk)S%1#oe z5mKWz)kMc@>{XJx29t+!2l2MnHVSIA%StrbLi1x@x;oUfj0+p=gtBBoNXdf_lDDDG zgL!EY`_M(wz9iyoO9uKNEbRGw@7->D4c8k9tzOUfoXw z9J5c3E!fA$$K2}gFE0=OdfBWcn3rmc1&J%`;%wdrU5jER#N_qCb_>**Y_H$zm&-cr zqM(&$Kx3LRlfRosN1tOK5ts7@hqYRvfXKXx&!?q_Z|JgN&1KQL=GM#_EMYMqdIKVb zf2Zgby%pEAc~KQ&g{O*z-0Yls&0sc+QWj4ZN=}t?N*Ig>T@pKLa_=6JIpup{+L1s= zrl26Whd|XQlY_Cv&8qmP1EdsMhRRkg<#JiF76{HtWkr4hb~c4zr^sSJg}ELdW~E@v zpfZ?xTBFxvc;MO`IV79Q7P9%LCMU+1SyO>V>4Oqj^MsdhhX_5P zdFpAv)Q`xhk4iA=Qt4fdE8gn}`QmCYa&eSG>Pq+LU(2H(j(nQ>>BP8p#D?4*(5ODZ&B);AsyPHI1E zkWF*_Qmd3b@<46)?1mVk6>lY&WPC2}(EDvWYwbOW*6-or)9hey*e-c$p?2wAe~&AZ zA2DDzP-{gJ+Z!yn!ZG<>?C(9jtf%E^9c*HBzmlqG%w{$AU(&E4A`W@r9n96>+ur_# z-V&y+Jiuwlp|SYpw7#`zGq$XWYxEkwE$H=n$s)bMfDbeI%&rt6)|HCQ_HfRyhP{&d zt(_WXcKQI?0_ddgkjd_K`o&1S(B}%V-*`09i^(D)d#7D2b~;LjU)syLH|tFF6i`wt zn5r3Pt%lsBJmt(Vs)(gj1~s0^mAd^pw@!SZ9GIl>GX@*oS(kffpvoewuiuehV!zVG zx@ANb$P6nC$Yr~n_To$)Ri$@ZAws2P1t?8QuZm7vY@Gj9$B^0VC z4`zjUNeZ$Jx@gg|0!Ul2M&^JUa$d-qx(u_*K;m`v^CB+*>T#*o>7diu$@5{;R$@Qj z>Exn(;(c1+JCSfc{~md8LLLf~W@O^-2@I1Coau7#c5q*GX$z+MJ{dUE90`1+PPx4i z$Q+a}yM1PKhJaNiRBj*7zy6U@S2EB>_CC;kgB&T_u-54;KMyHlvr-MwpjE35nrF=_ zs~&OCyWg6{PT!-mqcc`AP>%9zx>+H~3c&R4Xr5f?d=E3g$%UF#8v2646jP6?%^GH_ z*mab?ag984L<=r?8@E!%XG7B%6K$K*J1&omjO#7a(~FCKR|@;hiQZt)hsh*njh|eq z*INnN$UxO78OxxL)KbQf(PcWnH02{;*z_tbUt?a8Uo$y8ObNs$ zV;Lhc+K}ghi(()YSQov3bYvv90J%)Mobt!njA6ofdi~@VgSE)13^T68_~f#E5>cMg zsj|c=sai>?HpoY>=Ybxn#rUADnjG(u!PPzm{ze0wkp>*0N6zB1a^?gCaHw$b8A7FG zE1PBITL6Jx5B(>lF(HFoR_uTA?_)6fq4glEN(k_rc|P3{(h!U0dHy{aW`6hR@WXCL zNT7Jice^{yGnSUI^WbcN64%LwHpD})dM^!SAMBZjoRsNQk!2?%^`*`F=7F{4sW#`U z^M@y%8ac*o4|dMkMoFa6D76o6>DnjRQVE}r(OlbYPqTS^-0O3gS@#^9zRsHC80TTI zQi$h}xzi~W*OGyj7y$awgAjk9!v-GU8^<+tL1>Ld(KrlM_N-cRM1-?8+Tery{4;W; zA)CeGf>z(fYj{8a03ZNKL_t(>>GGuBvW6h3nxY`*^`=A$b8}|1o(Z7Kj@BbNf|jOOG-d+UV8At?+vLJhM5@P( zb#5!XRbL2iZRK*|RXXKz&g(Y`Oy6{}$IaCRQ`A(CWsI>*hR&u8U7PTd$+VE6u#Zw- zEJpZ3lBu5TKR`B9C}SU1iRqmGm-h8hZ0Mts#q5}M99Rc*{x6> zNdisKvwd=Mzkx{^HSXY7U2Ra^o+k7;bS5ZR|lzgxzpFI`$x#mC}fiw?a- zOD@)-*0{)yF)8IMR)s*Z_sZ838K&g=nk;m}DD0zEQ-qj1W(?O&$#kc9UD(e>!r@|( zPZlE}mwu6q)b_w*f^cgeRrSQ*c6lL5rv1~=?VY=KUo~Fc{pl`tp8v<~+uPtO{gX*F z|MCNNL_(GR?|=RQ8;R5D^z`ZdqgT7&H1Y)3_Sv_Msqx7$n2vQ*nBGOQn2hAZF)Pbs zpc7=oJ-}wE94aVmS&jvIR+r#dO#-6>6g!3QWaTO5Fc0VduL@yRest)oi14sZmPhqU z9;l~~Umbfsyhyybmv}+n<_9pA$tH`tP$vsZLV_$pbisI+Z^QVnY zhmb&FXYks?npS7B^Bh#OhbP(XQf1v2;CKcFx&ma`vKtog59NiAc;#bP5V{}x$7J2j z`C5T}(AqY(B_r<%mdCAI<SnKB>`E*b0fC8OOA!mDnR z@T#sNZ9LoV4+V1Gpf|X>YP%No1|D9EEkwhq)znIA#fvz<>ZRy^jZAn&ym^O^spZY|upq!=E>nDVhMvbf%Ff@+J@MBpoYutZg(I!_>I2c_cM1cnL9iv(Z^- zo4k&pX=Y-y8#Pdji!Ml-&OP`0YH78C4On`%)fl zlYcBeVl~p@lP8ORT71I4=>GjjWz3HL{MCyT!n&>8R_g89St^l_epqoj@7S;xiGHUO zEqdE@~!`QI5!hB+G(e%1*|@Oa41M8eK>^uJir@oUzik0Puxes(59qy#lxJUThAtrI0KBw?%&86K-FP3n@`CXI(0s#)C zQGN-~(od|5#JoQ+P&9waoTds4P`{tm>R4K#yu7h76$7coa}!F_o2deU59MN26@@($ z(JbE*NEcN~p%&tq3sZ0=Hy5a`cUzfO?^*W>yHl@-Oz>LG99?AtMaZ-U=;ez=?Er5& zYYf`=SV!0Xstr&9J6)W{M(Cy4UZcn3F`9IJ9%IjFk3|b-_~5u32n|xAcg?y+##}Bp zrllUcQCD59Lv>sYYS&KXRBszq(JsPi(@;farTj`$Lnk4%vO#GMg7%$}M=5*%6EBWPoe&yCOV(uTd{`hm2f;GW2 zlC{}5l17*5zYIf8czxKgqLam*EPF<+nx_y=_9>$yJOYEPHuxaO3&hD7kGD1tnQ>Y% zOAx0};GKZ!w;8_Bq8H>^%r>p3gvUf!bd!un6Dx^0W$-wK`RFoL@bLafAVCv?x-@}E zq9hoeygf&;ep2RJI%QUP9&&Xg)JOCX+$bnr``6B6zR4F$6CKUMnTX9k*J`*BFMVV} ziaa0x^qEz40f2O>D0sJ+&2w{8XqnIzxH`2!06jb+c6)NPc*IZdAb^?`_05FP!3WxD zYzvu_&v2}}AAM>9b;~O3>RfC z;XP~pDCtt&s=IAUOCsxZ zIKB!B#R6T}Uv}^~p3u5Slytk4h?e39k!dacEKvxR6Dx?JanYbeT==!wvZH;L85<-- zRFSVd`tJ^dagku@SZxWZ*3QfH2q1cLbh22E9hYH5l~Sp4jy2z8vR8m-ugBHWZ8Ev{ zQ`>BZ5)N;3>BKP=;wjs}K`~GyynZ1vG3UqP830Oe#U^EcNBKy2fwUt<6m!ZKYF335 z8%l2%PT{PoS`b^>gzOgN{{=uXiEaOP^d=DH{sc)q8cFCV4Pxidz(kqq|+M-qf2^Eubv7eoa>})tnCL0r5ed?Cxm)*6f|AI-@BoX8h7>e z*ET|IS8HjvjJSe9`;1ZN3EGX-ZG_93#_DQ)TN4i4qtJ;|&pTX%(F)=G2${5|Rf#p| z8mUw!Q43}D6^TUc1%PX$Y9+=Ka*DPR4Dxttj8O=>LmTw3FpONaR1Llww2X|7%#4qX z_{RX(doPm77eD^^7x6sthyQuVHuvTW%?(`HSH->Y$a#pI}&HF7O1rg879g+$uj(h01d#ygi zHg7Qc^0O~oOENuw4p}7qpr5$SB%bckX`%2_)Ec@B14W+a@%EGESHw)-5l~;hE|sx2 z-FQ;j~_pO{QUWAoS5}oIJElj@Vxvj zo4(%@56ieZQD3iUxa_^FY#LHF@Sy>NjqHS}WH}GL1DZfKqRkFadaqMoQ-p}H6*4J^ zSH7xOg;_k~&vC&fg&h^^x0Ief1pdwezci~rvH8xTSAmSqr_<-(lqU|VixQ5DNnON87i0vQ{F{k!c_c0PWEkM1NS`86RHHSj=s!;QU>atWW8{$JrE>qrDd>y)g zr24kz(>wJ3?d=r=f4c?2Pob)1x@AH8YU8f5uIq+Z?N#GOf3o^S4LcesEL7zbMlYH4 z`tDkj#~q{ydb^>Cbyn9XgaV=VG4w)-pBnX4*h8{MEVR3^!K-BJ087uPi-PKy-G!RU zW2_#1V4-wBZa3D}HPk6(U!9{yT-+^NVVUDd_x1s@%gRl>I{e4wRObBXNMxXthR=_QDlV26 zshmG{oFB*5Qu_~MId^aGxan_Osr^;oxW9QKv@k#=67~pi3P7BDrSRkv;x{`(ZNn_` zvor7u5v*ShG}BXs%<1lKbk4j=smKhaFeQ27p$b+1jaGPV{Eh-?J)6(mWwSDzVm#sA zEMq^*;n+n8t-5)QFsxt@S1A-|h*sV7UcY`_jpLuxGBta4$k>KLA_!C^h@jm%Ds~8= z-Eegr;eO4G0x{A7~3^>9H< zZHiP%lVfY8KQ6SirEQ$XhI5kDk`}Pj9ccoUYL-x^;w1d-MVGEY$d-(TNKAH3Fd8$d z2{(J8#fusfb{mbs?E*I}0o{1fO`4GI`@E+(GyBylr3y##ocH&6pO=s<=vviAiUmUS z*Sr)7q}5J!PpH55S};WJpDEt5)+UN#K-7U;fHCUE<_1cZ`ZRrgU)3wZz>QyjeVKJa z1jM6SEpuvY77PzNEl%f{Y_mJZoCs9tQ-&$baO$TL>$6`rHU@*)5O$Xv=ZA8U66QQ5 z)LL)e?7h@Ac>V70ZqM9)^yty-jh8oG-lOyK<-LE+KDzzmZ-4yfPd{8S(M1Y&L8IwA z!yUI1$A>|LSt%T~Z?!j>|{sW6_Fe{qfoCdrQ< z$w-wT)Bz1gf7xG(A4t)Is6jtviBF7>b4>uGTop=jsXf6R&V&aRHNVU>mi=!}Ki;f7 zELK1g;XdakuvtL{wNUv?x%@(9c_v8!O(rM54`$b{-|XrQVPm^WrJ94S^pBKN7{pKU zI6g)@AoK`)R$;gOhp9vGjw~DY_Lg>xo;V|(`IU^jxY7+9*?(?$Dwd1oB#SJI`f159 zlzVa+E5MaDJ0aAuSYh@n1)*d?=g(uwK`jieg`=2=7Q$^@@oC_h6=j5(JTN`l>ItAO zvym*0wKwfkn_*M1s;t!*x`tL#ueX<=Y2r9_^eUs#r|uMV0UZD;Q*cUYk^<3?)o5Q^}&K1H-aHOy)Z z{5+?sz3vA=0we@(F>ybWi=s*w>4H+Qe^#3nz}OPUqOaW(CA#S-rk2CuB_wy*?)-BB^htHmB3JYP;NTxk_g_5Kn{&;j=;DPg(7mJI7*k;!qy%mXpW_C*N zbRtQ#KNaXcX_AN44xje{iX>P!N{3g!^Y+kSNI6(ln(Ysv_a5SrYM zp$nD;XnB!1BxrerrpPUQWfEp}b#;HWu)i{ON)TSv^kkYqmk0!!C^C|#E>2mring|n z)5^v=mNd|i6TN(yykQs=VaxmXPfiZcz^%`q{KHfcKAXVj9ReyPOEb$03&}}D)WQDj z;Gj1YEUHxFD&P0mS};DYW43WSPsrH~1d{S0XZKYeGs^QwN0p;fHmC&Tk@(Wmh(G#1 z>?Btvk*m7@iM3Cw`H>}-j7*gyw+2=Udpm^!8WDT+0Yu!;x8Gq$;Gvs-`Vyj+eEz?R z%p`0!eS_+RlC}9I{aD1~k^B*V*>LHYOYO2+33O@-JH1vDyu6?lTaEq5<0-Q2B^Pf3 z(+<$><(5+5Z-X9Vpj zQIKWyw45=63@IDu6j@gbYps#JaN2ESG4krUX|ldDK?xZXK^IRux&N@zO%H2j1Oj?` z(=zw_+1c6M$GfwSF$9m_AMZY94Uva07Q2Gob8kIw6LHC6pEAS@ zG;v^N-yBJgaE1R_vNp_DTrEIz*BFv$D3O!)~)tSQ9hU*hekO{*SR~1Kr4BK^8EFosyfMgQN%~d zEBVuwmOAj`^&3)2D*MY6V5IVjoGs2oia=PMSo%0Y5sAdQ8U7ItKdLaj5Id`u6Xjfd zLP@xGpe~qkLu_zwzffR|CiY9I{nE;Qfk&6QJSyngdua0~= z=5m=}KV1Q_$2t_UV%Y-ax}ww3JL(ONdegnvLLsBt>PVA+znV^yNin(9CRYPwk#!<_ zcsO}0eU$ZjzXLvlS#>KGkD1M#m>-2VZ^VRa9q-%2u!4Nv=EM|3m={~{@&(Fh061{o zV+b^zh6trgEKEj!I&FI1dNBsb+9~TXd^3htdel>Juisf%xP!fo_ig~IZ?=+|)xz7A zmATQ;2qmIX$&+*T^)+i_F(cX9$;Cg+yrG!1c6RYlivBY*FOXnOz5 zYR~8>9rVZJOi}VcaYi4Hb0q5V#B=!QZ3^y?C)tb1ckeE|BNMt_tAKKvL<#=(hfQ+$ z3uHqHo|WWQaq(_(t9zih#C{>m2vudlwj?Y18=hiET2f!n)jz^&TYVcwIoj=Gl7un* zgN=je!3LR2De9@>Vp55koHQ(7KA-Sc%l`$j%XT(-> zpi>O=sM3VctKK29p;ncVLI5vf(sVlQGa7vyt9Ay+fLd8RjsfbunuU%Wg$8QMx704P z&TN8SM4*$hc&^h^R?pd4BYW*SWq1Jj8-`xO0w+o2fx{iZtA&yy=a`<0_;yQ7j|^X* zqZbA=nLKkILU7b0Q53ZE?B92GA3hu;huRhFpU?U4kNg!a)VWgBkc-8JLcz(!pZ@V? z_Qt(?3;%!6mD-wlxX1*lY*4`x%R*$ZJVeQgRv?gz%4i`|U@86`>@;xiC~!SOF|2NF z280LaK>Keoi>2O1u66}~x zmJX!&0foe+!vo6fEQaV|s!Y&=L8U95DBl|H$1d5uOy;#4nwD<%_7Df$%RspmdD#dV zY8d83)&D=Pt}mpmJPY?Bu?T%_`j9O#BsH-LYMN@|c-t7$CS)8#^r1~KrnDK1L)tOx zB8I91+ctfev{$!A_72Ptkg~H2%9cF1Q2H`-Z4pIW2pvj`Wb%?`Qd?YbF)+-Y@BD7s z*}XTpiPxKyCZFH=^E>A|@F9L>C3Wrx#kNQITt|>ZPztoWjTGX1>df@FHydo-0b{~u z6WOCokWlFQ)P)KXz;PsFr>l4rlg;P!jbvcuM=}}UbBtJ2As{<_0h`?@1?*162YDz- zI;}s=10dDpfhuhxp5TM&(rDGhf}V)htOHshYG;X%|4KZIg;5IS9i8KCQ=PH3H8;ib zcgStOcxf#bCvK_UpWU5}^;F{?}hWP5U1g+BX(977iD9 z`97QdcCx&hPA_p906G8|8N#cWPdr~0i{8GyHx$Dtvu97L@qzIRhipdetWwqVtIs_Ki0q} z?M-fX+YJKe#i60yi@R^z9u37_%)EGqLJUIYkJU*4l)(9hAD-3Vkk&XMN&w9*71Huj z+9SX33r>up%sauO-3X{W7;)MfW`H73tsb-@&!cVBnoLBgEH=v>bh@blNe3`&a^{HL)^9|uoo?6|Xda9s6t_7k4hmeblN;)tpsg!bmCOJkkB`?XcR|9WNe_d5)ms&M5=%&bcF`2jgz?b`pb^AeKEN-n@RD zC4gpsqJ19q1P}L9rOhuVqU8Si3{0T_Xzw)BsU+#eH^VU0G9s~XBDy;3`AE@)^sk|w zE@oQ9ZW3 za1xX@3@B_p&nro-Ep)oIrAyHF5ue=-2Ur_9LHwsg&#Oj5XyhQ> z@^M-(x45#BUqsg>=7Vv6*i?_Zvy*Ow&8eQY`{bu~I-sdVR)6gdL`7frw3u7WFArY5 zCLCs=9tO8GTes?{Fj*DAWMrN-kOg||@vEi8`;)avQ6sgmaF_+p9@0+O-l^4z7IRSx zn=-|xC_R@|y@vWqyT>!!^s!Sl^G@qLXB2l~yg59dHnmM^` zxPWu8er_Wb*(qMSlD{eKRo1U3lk=&Sf=qDCO^Ql-?jl8{dn%u+QS6k>Eg)tpDz^-V z?`tcC>k|_}S7O>F=6z$cPEY$Vp~@=thP48f_)-{`KxYG4e_jq`$!^FvYe1Suc`=${ zj7keFf2HKLnj>h84u{DW>eZRsNOM}PR?&}swyEjC18(Jcz>j}_dAa3s%dyJ>q=afS zzCe2Dq9bn@s&T~Y!`th{@D-GS~w)P)AQo(WM9(oEiwQ5bnI1GFCs zkM>?YuU~H*W+!(r>b?wsJykoDflbb2Mi=E2uSVv|n8$p`kj$z#rd^hK zN~a4HHF1540qZ8CqqEV1CyzNHIga0yHPJoeP9>-l`e zQxUi$*9_h9cn>V{6Zosk$k3>=7nPET%{+ObZS^a`b0TP7C1T=U#f% z^JBj|c8oj)=$6r~3w_a3#G>>M_e?ojbp%km)S*Ka6XrP&yKJL&H?gI?J8;3}3Ua1? zWCRHH0ii+Icp}scVH4TWekYlCj*jRw`KlR;;uSBsu1ZD$JiFT^Ows9Rz$6{6|EObB z^P#(1Q$K@(X8@F&yXbC4OQFT$AScw^<+wdH+|vdDYe%PB$Oy40;o4P48^ulIiIb3j zU-6{of2^VVmWOKZJY`JYVzntHiXN!xgkkg?p)*zFX*4@`3C6V7cCy%U!xMrgQls^k z)syr-C3G*nn%!Ao7hp$(SSe0KrPPriF!sW*gDO=MF9e-LLlRS_2g4moo-KA(^Nd8C zt*FTY%yaOlu0%yu6-T~ieQup=JUG-%R_8WRkqUPKi}S>u2zc1cBHw9tACNaHI};V!MH5BdxX`A2C2pTJdM8 zRUMm#x;iwQe9Lgt@lzI^<#a@6Hj`IyO>+%$^#tXJITATLLu}prqe&9x6?mt|L;~_T`lvfenG_a6ww=aS&3h^^l+O1 z{UQ861V-6IrPnM0CRo;FrbB?F;u4Jdp=3`iiK+rVa)mW>czG1(F|`30g}=bfx*TzB zd8&nyKr>*xNpbQ_EV+)9_w||Bz`?-5Of`8>Es=3Yk=@V!{r!D^soHmCQ7&eS^agSv z8h!Z8Gxwk5%s^k18+(UXS0O*^LblKz-8bb{mHeFM9#d(nE;UP59@Pc+LOGQmCruAJ4Jn;E%^7oEu;mQdg`7 zOrMo0tkLTsup#R7Q04?Z^AjEBmTO0kLQw*>;3&^8ft1dL`l5>31RT9dbb9Yk_fYxt zJ?(V6Cl1(`zJUA#UGZ7yvyh^UGBgtm&2vkS(`q_@b$bAA?3kc_~R3=7y2tia9Do%4HWMbC2 z#wi(tP$bIacaxi2EKIt!m*4a6m39kX75Wzasn1_rS-ho8OuT>p?$?38{&MwVbaY}O zI+{3}IBvBzoj^0u3G4KkUa1$i-f+0#zf@gcNLzUp_8~D8`da%Sl9ja9wL;BJx;1V) zO;K+{=O$w2VQ`v8$X+^C%$(amiZ44rvNbqQs<=|dS5K_7Ns`p|*C6cO2gVHiOM z$_|&smm!!sPnnkq+4G(AyU|{&CO7{o`SP9foxjYGBImmWo#Kpff#f;wdE!n42+6?x z$p?J+*RX$a)E`D+(I8zQB!<$ zrBXze>?x8=n!3L!@OuiTg zZ7v9JipJ+LCR+??0`l&beC_P+?Cx$7o@cp!y29gC4i66*S{VNb|KYn|zT;XZFeGrM zR4hkcr0^MU2CTop7g7J$Y7PV>7VrzYWvV3=Ttn5<$sS?zgi}054%JPCLe8+8&0Mo- zXAQ3K)OCI1<*NS4Ki7;^)W>JO*IyZ}cL$l^(?=OgfAi#tv)WwE8O~46hGX7&@@Dq& zBi=tdwul*xi*vWd%fhUYD?L|xt}O<~=EepV$38`2+3m65@T6{9nQfkRWf&Rr>6=gg zmhS5uewRQu_FY)k>f-tUfB5j#h(0HjvKO2p>%=QmYWzX1adf{{>mt;JBM7J8Lagc9 zz>R@~mWbd8N5fiJBW#u;k#q^n37AIs0uUM_o6hMYw*8S3Y(HK5a)tqW7d~i=&^d5+ zm*XiICF0aAMql~l_|@Cp(fZw|Zh*Q!!4aSC&a@zLU3*dBLUekG+usKlWQ+__c~Mo{ zfx-`ex)`Ugl_G<*gH1X?z{IFOgz2$yPkLd=Rjm+g&mg&c=I6I%!e+jRSuL}QZF!DO zv{HNqq9u}*bUo5rqx3&F*zqc+8kXBYOyLwFP_+-> z&O+g34hh0+*05jN$mWn=+9JT%*^@oHaALQ0Ot#;)o^HP^?AvJ6J@%~)f!UiU!efDg} zMhkSyGAz@~O!G7m#iV|AN7Hu>_u*I97^C=0d0LtTLpch^tJjca;_+K%kEAmoO9=PB=6Y^&^rDySZ zdoiz~GxNyLnQx@R=}bC3lPB{XUsoulpD^YXzf1@qH#I~ik3@WdUSaf%2KU zTrSfC0cuVii@^hHl!PZ{i(~$%(IijSBA{B*?1@Aw%9i)`(Es_0z~9cEAcbis zGX!DV#>)_kXWQtD{=7|GZWwI~^Go%|kFWFg9bP*s`>)sJvsicOk?>jH$530b;f%yJ zR)-=GfnNBgb!FmfUDMz{3>HBr(SLeJfKbJus1YIR#54pg9f_t(;8INxnnDl>iXP&= zBra%#7!* zHsRGWi6fJ|%K+&eQpLex9w;|vW)2Q`(w*wE7&Z-y@M#zjT!v$0u|ol4b+AK;sIl47 zdBHfh(%W3ykAdJ>hx_eu3nr!K8n=VN&$D)J_3@*}tC-fXtRfh-37)*U=Ef(M)jSM* zE+PHg^e67-@@vNdMLf8xG);=8GCmF&M`B}W@Uw*0KRHLPW<-mIqY&~b;4>0M)pCl? zYi#Dp0Y&HuF?n`E=@c3LSPE?+k-I8Vs*R7Mu89Pahx28gPWAx=$t5IoI^cEHBRJ3} z+eIaL@8Kvh$2x#(jwZ8L;FYlQX-dk29;ch&&rn|{+5tys=`2v`^7RVyXk1Oc^ZR8i z(9+V)kRKEXfC(1yT`qUvS+0U|^Xou10ony8Q)j*^ttVSAl>Cw>p`(gfSOjPB8W4NA zb9lJ_eDm2eMav}Hb~kt5zJ0qaouRMM@X5JTRIIJ3agWRFoYm^T76_g#LxzLEJln9v z+^B(B3|aIO>qyzRl8pm06TW5lMEn$1G;@JS zz#}1EwF5^|Yd+K|2Lk84Zj@N;I@2d^^m<(($y|whdAS+S=jA>;aawU7x<>5jM=Y+- zEpY1)D3`gp7+pyFG2~CGIXUm=N1P-d2Vzu*Q`sd?1=2FGSz%9=2TGkXb0r_I&c7gR z(vKH{Quzj0iES&w&j@n4|EYAmd`6&^3EWlsqE{&{{{uaG9InDbMb19Kb-TxJ9F+dv zhg@Tr>qkI|!@*IWh-Si0V?}w=F&*+19gJfLI`5M@j59i#FhNIeBuyCd7IsYO=2I{y zT0WXdoks?sxZerdhS{`|U{FhM1Pu%?LCG>k?w~EoAUL<^kpO**UMu;|2FvF7RSbG6 z!FjSbG9F>=@HN+ki^uYdgc=l^z1m!?q; zKOUO8h49VLwW$HJ@6o8{CxAx75td1jC?V88nI>Q+ghQib+#{)2DNGDnDS<@cq)Ma- zStlY;gqWse<2_PI%z%KNPl3(nk;pKR8c?|jm2C5}lX*@ZUUo}WbW}_|eAYlf;O*hH zvj`+#QwA54Rr-iY&lq}^O$gGvAeG^GWZq-$o$~Sg!#K$zWzJoXbH2P1f9Sdc{2XV1 zuMGRg#$DlbzA7&6!XTpJN=4yC^Yb4c<@qYVW_3OPT|?>BjaR!MxD6xt^*Sv~0+$rYn^*&`m6zM0pg0g)<0NTPO6Y&edi3FT?x&#k)|35m+p@bky} z_wRRs49SX*-x$Fg|Apr-SEepRAdrYW{aQcScn~IjCAc+7i^2;9K#4@*k^+_yk@qCq zMI;ekQD`?VnRx8EM>P}A2<{FrDXO9_iQDsPV{|B=$8iqS=m~hwpAVpa-X{&n=fn;0 zsH;8?^nBnPV#{RzaXD~vu#Zsy93ZiAYU(?OoJq4`CSjA}+G2Xh5@EUm;q&kzIRNYH z>nl}aHn^PVu9Gt){vm1)7Cr_S>h@2l5-!4->x6a@=R znJus6zS8a9Qyr&2O1pG<+Fni55M`>KSu~!@P6F97Y=xq?Rm!ZcggU7K~4NGO1sXOtR9 z_(M?!1InK_*pU0_2~Z|5$s&MWigUPLWA}$%4znZ0>>7{91Bizjo`JxVrv*I2&T~4$ zN2s~>hLNY|Rq(_2@mIGRGC!e1^ely;4tfDvEw`6S+v5%hMtg?B?nBFU;7Ds64{a*#ztvwI$I+PK)^1kag%^3 z4kp`veZjyad6^xcX?_GbY3pQ`wD9|Mj z(3RVMQMECHk`K&L0J5PUj#j=#Z=cK?qua{qV}!V{Kk~n9*ROqj@ex`56$~szhmy3V z{`@M&+>j@vLhuMJp(>Pa-{xq=UnRnXF>24~`s~YfWIf(I_D0b7P6(aL=1I(XIr_Zy z$dZW>H}izem>C0=lEo*$#>NPmQF;KjB+CzxN;05Y&5)}^Xk`ZFd?=Dk8CI#}%42b; zRNW;3r?|@bsyMxWhaURyzz5O-e%PLEddRN_L)f068b>2D>Pzip-uB|2;RPb)VZtz7^!2H7qXx>L zUT>9vagwS8H_$F1iUJ9e(aB+9rL{l z)i&99RJ_wi9WoM2F>;flv3aLR_lpRNW(by1Qm1VDflxe0P840)bs~7C<}#T%N!YjB zz-8NY%^hSiF_X2c&whw zrr-VJ`y-BW=82F1LzL2(!{O1W4`BHv$0t`a4V4Opg%jln->N^kG34UZeR$TD6eaEy z+Fervk(fkHcwr5Z)(kb1#K#w{0gQ#c(~b}P<^pM zIFeF7Yxr_N23YV0Og{Ap5~Kaj3Wmh1B#VROwoM<1jd5?D7t*o3Fk}U!i;ow6^4dh5 zTOMYJbZbPfQ zwMwq!ZqUZ^ShWBy1_UEXowMQz(*SOB9db>O-}u5nm#SH3p}_#Esqq` zT$ZG-bzCZ7;t{i@Ne&Si=^RGfOkR$7^fsod^8%Rli2?d zMukWF8{FjIVCv@00;V~Uh}e;hK{%nWLJHzA_4`#$%lWHYc~^TdXm^=r8KH`GOFS&S zTjmDl5V!}6=I9u&bT(W4&3!~mQc_2+)xsjocM<1&cDI8{G3>x-s`>3Dd_@MU z4=Y5cxA)$~;`Dz4ZU|L09V z5(@d5mZcX--N^B10`BT=l-~F~;ZfXm-cP?-|Bmmse4lqt!hyN}y~|gY z5?H!)W-2k2-)QY_9p3uWEi^9d?ss-YNp^PV(&>RSG5TfT|WjPMwsb9T(pS$}<@BbC}Sh@o=0o zM?qEXJV+? z)ENj^jAf8l$!p0^G@=Yzetpg4o-+cYU{qn16b+9_!`~uU^d(;~bO6I#cVV>w%(G0_on1!! z{#IvOadVRkdPG5^5NG+qNVYt_T;=AtY9?E@eZdz97KT`h>THds3Vzxraml0~h1Sh(60%RNiNc85A&+&#lD+d01vo+F7JnSx7(Xao1QtaTRAxdnO(%&^{jh6if}79Qz%Sbzxnw)4{kF7wiGjVcInvP#WaBnA73d< z>hFE`?B5^!D9YpRNOi`eEBR@2H039SCSOIa~?$a1zO03Xmb` zu(WkH>$9LkP?}J+LZ1yUOx3)JY4!RQvCVo)IH zV04iYD3e_{i?bOT`rhyN9Zdy2i4;YVltezh@4er@?^8K{$?8KEb{)$Of!yngCVh8@ z+pQiP{Q2MP7z0+n0{Kq)7dM)VT8-u3t*)=EtiRiiT0y#CB=#ow5dZ{$i9-m_=z6Dq zhM;Y4G?xXFMFA`UDiDt9%{`Fl3i(y4?IIyg!ZQr&{6eNoT-bStQJ;H3n;sZDUBfL? z;PkrIPKBu@GE{$V098P$ziAmLC-KpM2B67^6<)ncj~c#Q?HQm90(Bw`z76h!>(dg++!IW`B5V?g}jN`jTCBHP-mKS}Z zUP?YEGbkMv51E319Lf5QM27hxrG%~S{^a-nM;Z{V`zuSn5~sk;HnC#M8JMLVzp;xC zGs;iY=r}OXDFaNF#`z8#{cBUPC?}lXR|1Yh_h%W-oU_Xe`)_XM&Y#~JT@_(4oYk!Y zrS(qz<1c^x{>@+g;pUyU-#&i+-P0eQK0P{k^$cd`AGAr^ViL!?cx5@_kfp%$z&3;T001BWNklo>Y$lbqPaafebYYf?id(Lvms43mV*HZ9%w{@D&#d_L&Gc z6lcX7x)oCauCA${lB6#05|k>;#QTL~`-(u`dWKM6n}6zyX`dL@?Zd-CoI0xQbo8U~ zRb2PGaRnaHw|qLj>2o1rq1tv@NhJojk`-63 zoRj*gH{bmJlRLNn^77Sz#5SMpJSN#bd@L(E1j5el@eWNor?}kRJ$`v`@U`Ic2YwzM zk$QEb0UUE1?I^s&iOQT-+iaQsb=jqK4RB;J{F)bnmB5hr{)U*LXt^B$p&hSO62Igr z+Y-7Gb=un-$CB&}OYHp&x%rz%#puE@HN8MVsxL_jI>*myRhb+ zSkh@^wH!;8*+5Uzpqc$3?K=Sdu_xc+l;@0}ZY>HUe%0&G=9nj4pZP#CO6gnPsktGWaFwl1VHq%WgqjbWUcDYoQ}$WjHOQ-CtIc zF9M?1j88G^e7CC#p&PeywnMUhi?@eqUs!r{gX{Bf=1_|@oWSvnI~<((1~;GF+In#J z^6@j-BON@dp6RYC101{1vkwKMPww3Q^qJhc7 zurV@A!pf9OoyZGqyIktks%)CCUuj~gZxV|DBvdK5)oUw2;rAn{V3MT#yD_KWL4tZ% z^&vo8Qg#=>C^hlxcoqaII3=DbB_Y|L(6F0AX3KU=-LgNQ&G;s&pElevYZ~TzGUOs&m0rDC@*#0d zqd4`W?ku!YXzC@Lk8Ni<`NA=++_K2jVs%B+WA-{o;*_<%{PpZLQyX zymNf()w91YE``-)K$A+8cB$DaMILNEK&d7PQWX~?G4jkYu@!^_8nm#|Q;9}( z_3=#>IYktufQ?jqSX#a8XPv~)x+b}l(RwCc?_(i1txVRuiR))fOMNvA8j?uT{1kXd zMmz$ZQK1(o+q!H#Olq7;O4x=0<6JJ40nsUm5;vD4kb}j-8Ixcv-Ds40I9g|*XA97_ zfO$1v<&D6nosE79-J;6aH1)$&RlWjU0p0}m*zO2?GIxoa@rnu?Alc;09VWv({`h?8 zGHa{rhFC&wE|E>PCq(GUX_ThN{$xW1xh;d%t*H>g6}9nE@prp9>|)B7Wy&_!^%l`-V@) z#>{H7LBHdbVZy%;NuVQ-Q2W!(Z({rJyNr;^lLzxQfW8tF$S<; zF?E;EL&6lt+>BDfoHTtSoK1mx52ufY_g9*w&2stTqs2#yXSW_aR!Iq~1RV-M?%&go z(f&;EfBnJ-pMUx0$KOLCeE!E@{*`r3bM4jDHeC+qhybH~K+>%xHXA8(wYBAXR7>1k zwc3k>F3FmNV-=JcEno47(K?s-h)`|TV_{HMiI6}&&2=ho0>hB6#Aizbu6U6$plYA* z=!_Ytrfo+VN2~0l#aLcKL9nEeB~Gh@^3oY(X(BX-#2EJ`9tCq-5ED()-$Sq6eIsXFwiq0g+pJ^;MVDmx0nXE%32h0QV zeW6NLyg%3eK_vudDlId?#Layhy_}gtm1ZbSHG-XrZ%mv^kAN#bNY%Z0kB#LN-98S{=yJ8bzjhF4m@e?JYHp2D>Ts>ZE^RD$P0N= zKwh=fA|^eOs^Aj_%1mnlKv~7a%kiFe&PnTWi#Li)8*+_XD>!ZffytLdXw1w2qU>bF z6RYVgU4*RH#mXXBGp1sXO4&6TCl^vdt48v{6o*#Y7or!OX~#45l!-gyQL|ai4XDCk z>0YAS;LKx1!!gfK+A`N*%C#GXGY!0neP8d((3KoA-K9|p^3{BhhZ+qB5-6a$Xs~Fc zP^f?|NqLg|$(V&%D&{oa>&OUy3mgHFWhXU8yQo=XJsT@vJBbeYl35Eg{o9PehXzx+ zqqV|qn|VdmnB&?!Va>q{*|ZDC_G=6`oI>)Ck1;$w-&!2do^yYjZ`|nj*Vos#wzkN= zTFoLMEiCg8q*(eVAF(*0-s2win~xq5rCVE1?mgVSy!-cE0V$9<(EvSN7G*3i;o9|a z|N12kexB=Z^!HZVoo2Df2F_L1CVM>&nZQ49m#cLd{S}l`IM&4tu~NSDKen!>wUI0d z`cNMTbZzNSZM@>iT3Z(tvX(-06?msGD;U~h*o$5CY7Pc31M>s4=+=ih8RpO@U;DT0 zLG!ORA~Ije9#-!0$g(Ass^VovW=7_7d=j8bbMDOH(}uF;lR75$cCx%AYdTq06ZOeg z1kk)`3TE0RpEBGGQup!_gPgLUX8L$R11@R8i09+SfjB`h`Xz+mvcho}-ps_SpQoU; z0~^(rhTPKqFi%QaB^0PozEyBRDD?!`3{*B4VNqJ=9+S3WxeW_Sb30HToI3q^>%yDv ze!SPHsK+CtB^>QN9)nzr$E68F8H>%;(UO`^4k1hW=2T_Rx=@2O(*tN19xRux@!>Q# zCG!IKb~V0fVuy?|(Ab7x-zG4&Xo7*4nfB1Y@HUFAUub-Zt#+O&0KKA!YUf{Pbm%a7gyLQG zPen!(ZdgOoc}LgYE@eF{v!E|<@xbRl2-TN=nL z8n9H|c?6k^oGf|4oy}mDN93=gJ}H)vHj;e+%WodVn}7w3lxe4x#8c-bO!}N^YFgGY zX@OD~Vw}p&XNh&B@)L23i9L6i@Ekm2AlOqxd9@-z)z8*Qe;}&){?}YgYNtW@#MCC~b8&1r@8C{VV%rdJ?O@ zeZyyg?R`DWr`!Uhe!_p3pDw?Bd;I;k|IpRjv3+Ho;BEea39RpDxA*HEB6--*+tvMg z_2UjEw4K0&c0+{o^BKeMw%yN~?c{tppG6D0V3BE7F?630pRL)@4G{XW`N9y1wfGnP zf3uXHi4=6QT`->6a-tpuwY<8PZ*~=kcqlm^tjnNpN`fZ^1%zec5Jv%{P)l6_qy`_l znt3MCMqFiCq@0j>Y#G#UEyN$#(*n)PA)wx zOV#ER?T^B(dvrF)QGFLbF54*a7M)J8-jninW0_Z#2cA!vqlnYz7U1N0&)*Il1fd{8 zAD?5s{`s8+_3wwa^iuS|Ytv*4U45g$c@fL;j{oZoaCmqCXSxPZwO!5M{{HjNkB{FU z50B693$A@_>+D`~Id&P{0;B8u^^fcO<;rGy0!b`%HqC@N&HZF0X%nM)L`jU?XY4C( z6~@cWzq}x9Uz^>0u?XqKCyBO3KWT`VrmrS4rL>;ZcXoNOiD_!r3<8e=6Wn8X0{a0H z*p#)%A(*FWn}i-B8+SAlM|O$u5i?_XKgd^Y2vWw-_>oT)wGBd1Hce$O)7+MqwAeVT%#CM|2M`tE zN@3pyGY#dD!5KeF7)X`~bSl|vrZX{k;_$sO)@#F-4Zu>Zi@F?3Atix66;)!dl3rP; zl#D#d*QFX-EYn5BVL$mrK`YCuKrr%m!36VU)C7qvvM9d*q#~g1o zA-Lp6T(=TeOv3rHkg0OveJm}-K|1DT`81}Z)ajWAo5~jSDiUJg@^kC(kHSAAu5{q# z6;V{;Nk}L<&v5DLJH)MOFwmiTri=_y?NXwXl&I1ZxCo}BaJ^t_Rpc#2v|hQK zcERU$xp=WYZc#+JXV3vsmuN%OH2W|lrzzD*lr-61JucrWIb^Nm{;N2FW8qcfIT^Ms z#8a|mBc|{93^R_hOzM*ZIf(GG3soa z0A4)P@rRJdNW-$O9%1D{?-XZ9B|eYPX7K7^(_G`N0J^%fKR20>(hAl&1@4Hog3qCE zdBODsmu!vw(P3zk9~H91FpdX*NB3g4k(2zT6xhU;COc0SaHeQ;X5$m_ z)k^bls!HuJ4dX8v)6TM=-dCJ!Oo)dai3*sKf--9NQPoM@IVBFo$!*q%BPnwspO`Eu z_4zw|@HfdHFn4<`X;3W)+O4?+9Yq5vSjAjQj zI3KZ!&@p8cQrcJcQj5nh8EHNLc#<~<6h4Zw?F1^w#Y~f! zgtdFP_@8ds(6J)7+*dG-v-^o6B0%sCLC&mNEa2b?Q1DLXkL2Aa3gn(#mwA-8_)ONBp^F&e^jy5EvU*=9dq7>ftnXMcYoKxy@C{ z(-`RVZhp$gwb#$o>HPG|jwb9JTdTxo?+?ackCUIyh{7%-1(RSLU#c^9*Z%v1miOxWxKuKsk0a`^tlWi?>(VjZaY&63EzBko;p){ycp@cAT zSjpl}R4TH;pfAyOu2t{^Ky6~EJiPSv3l4pRdFj~pQNYAx!E!&j%aWD+ah{fXaz02o zBC6_YliUnav^_@fluuXRyuzHsz$yQ%3V@rV!VHoJ|E>zM`T3)MDzq%4vCz~T_(DQL z+&Kp*jC^?VL9-_YDnMpdYnqQlfMHViJ-4*j%mx}W7XN3qj)O9 z&La)hET+WtLr}(a?I2bnHu?%dobjI(}6Fo3NGpS=-Lq#R9MbKpDz6vq}X_ zTF{CHl$oT~eEfPHHv$nz86vG;h}m|@2%fOsR?Gx6x>0K1Da9GWX>OPyl5wo4Qc#+` z=ZVoJ@s35eW`mT*$=@{ICz|#sI~v3In2IW5m!3B?>sOY-SAo2+y)2BbWz@4UHrM?2 zq?|XxznUmmCS?@F>TG9W8NC7HEv`7$Pt(vfSIwyKmOu_fp>iWgz9H*jLHjT$c|nAR z;2Rob-c%qyWBbMU%CS$I#hwE%OJtELrQ$C!?GU#|5F5EplYZnNSu%}3eMPMD2iKTR zbF+(5>I|74#&HmEu^eSH!_&B&f-on$8dCkC5)N?>5hLgwZ1A`|vSh{v@?sj~?H`mm zlUaQjejE&r$1{Lz3p036zs_Kstw%e}41-5*_Y)b=?GIGR-GONSkEt`*ZQV$ss26M! z4QM;gf;I+1KiCO368r|x<`H;+kZrhl8-u|7K|a7AF(2m7n!4v!(@6r^k!6``R^O_+ zb-U`aT$U}obAaDDjCIA-Bm|KFPvy@y$wGSAnevhU2|w-YBM`mBAEYql=8XFN5ApUo zn9)74J2vT-RRwj0yj<{QpVvyS#o(1Pf^vzO6|c%)3OD|1Yy}C9D^=qm-p`)nz4N&2 zQ#Pey(c^GL1!*60)23%5nEc!&E*(d`yedziDAs=k4PY%)@vFdtgMGhU``^>c6WNof z%>x6I2rf;`5Y0uC^9W-*V%8)6mU-y&HPK!&!NEN(r{nw#Xk7Z#Bj;li5rp8n0G{8D z&I5y!L$76NC0h;k!#6M;6E1>1TG<=?ywRGqpCH$s5_Q7=#oL_T837x}r}{H}NFO7Fy^o0{T`q~M+}QSz>pGRvP1 z$2m{x&uWU(%u-iOR%<_}B9hbMDhoC0wHR?au74J-yQNe%J<>iULA54@Pn76XzL#Ao zZSIXi)^KXOq;h(>)>0@L=?{FU_y=BDT$hy#+cgbiRUddg-;Zm!V}-TGiR57em>{Ye_?kP-k0|Gl7=p9aT(>$<#Y4tLQFHXo@ z2IW^Rl#==HJ4%S5umWxcXHB`y>QsGK8gLDtVx5iaci9dO+%2uQFD{;SBPpMLvtskH zJ7bf^dOvL6w`W=C$4w`U-tN!0zc~v3QwGPBL`9%Vk#2v6$$_7nj`7FiZQa)MZE;L_ zmf};taI1auNvAKSE?bA?y~R70?zs|w zNt=8E6m*5n9#b}Hr~q}Q>Xg5Y)iuAk#f{M_qVNO9W~BJ3uj1_${=&tQa!2-ao#Q1G z6THC)aEvjMd9aKR!X?RU;rZ zt@E&k%YA;fU=u*Q2{O;W{vxZt+4cfLUm_O$&s_kjY4MlG#~0=qFX6H`jpIDu)An?L z(^L`$e(naUZ&%*TzI{`k#8J-+HFPJ{xS26s5i2mU_t>ojYlF|<@KAFIT7SY`zjc#M zw_OJ7+0FAQTWpBh7_1zz;8^BzYFAz;~k+LEgi;bJbK!D0n(!tH)e#roIJ$0vD(62S^b!C zsU;g3JnTI~aS(KJI(@=bO?WpIrzD0dl=X*RjRhpCrK1917c~h&k?Q@Bn-{fs-8paN zQo~rLuM}O(Zgur>^6{!+AjuN_)*KENPbMsV)6vm)IM$$iO;5oOH9`P8ZAa)muoy~) zNxD=8(b~R{%UYm@)Jgz2QR5rSNdai80`-DIAS%)#w!A-jN#db?-2!`gJU+Spz+AAR zPa|h_GJ2CzV*d>az``Ie>w^k8*X8o}+fPLAm}DU6_YVn^H>s9HE#1SP|GFsHTy@ZV zd40TW;80og6N~(Xr+Y zCRx#);Yk2Hk#Rpg9Ts|$B}InwWb7oBk1vuVQ6>l%8p|#3Hrq-ZlMTIx1v_cR;hRUB zvo~c?;w(Q3a82!$ES4g{q6@(qoZ{#76qwRu?4&92A&J!_eeJ{nFm1WXhAD477&j;T zTX^Ge5qq9&$dpKu8^ZUvbL?V0Jk_+yZg%yQPh=Y|bTNH(y`!rc3&STr1%lH+RpXA8 zd;RmLWXr^bn<_?Or8l*!(e(IVk=<-!oL(4tv|c!1u|PPj%YCtd-VZyyLB-z^L*vU^ zx_lyty9a!!JNP^``|wJq`Z65hllBqAyll1LVS4=J>U^Bu8`=~NByn-p({dcuakebg zdz5&_!2oLEJ{1500^Mn}B7h_Y+;EEpeSeZYK35tmJp|r9rLe`89P2HhyzcD))+XwY zOskA8vnRPujSL}E&*FPY)y${6q^17y5oD9GwPLq0PQ7Uub=4D6Dz;;iZz;8M$Lh>t z9D0l<;Q|-^L6Qh@_&0Gqr1E37V|QPo(PwBN^f4eo`vji=iO@#DA7bd9@`M9;N*8p3 zcS}c8YkF}kRxg#1k}$V8VsODazI(XoSYZV8KYsjr!;IEV?s?nd>lS%BjXeW9SLH>w zmyPyj5rST>^F@OWVeM^NnITeTTr8f@mox)3R1o4$dk&amhRo#3v+nZo7jbPQRDot{;Dlle9 zkh4Vz(@3i9+f&GNIz{q%jci>tO#$jBT!PtW^rHKf<5ee|ZgWPHgydwW%pT)-mJUq~ zU%gdmW@r7O%&`1p+Nr9y$sjAQnR=agbUd#mbup^=_^?mdc#z5*|2q3V!69YJ2^lsH zopH^%@y#IXxLGOOW7ZK0h6&z6+c&8i`!3at*EcsB001BWNkl z&PCKIQ{g6QV1$|oul8AW!Mz=bQS853P3bjWI|CXLl`)SWyQsxMUoH-_!YCDq47IoV z#T4CwvGW{ElPIkdp>_PjywUNA><*ga(9|~M6sqRLFoBaF^r#gca~hHsRbC(h4_>3o zA+lScR-%n2kM>5^&;5|QlE6y_+H9}wWFCr}&CFVIwdS_)rokUJS|Wa`F0$SJKL1+| z{^RyN%=W^BBN>>AK4HlJm5DCnbrgW=*v574?IIm<-ZqNtGSI~)D&w{&YPsbR-5&H1C)X);#iyF}i%g>pqj{QJYrzAx`yTK)vXqEfqBo4QnX6VAHH+2q)%NFu zdyTXljg!YJDW~F_r4RK9y+{uVN*_m|vX4~i;h0qWE3OcHo>Z=$rO=w24qoZo&wU@^ zp5ykZq}^KFBU6kuDM*iChfVNMT{A-7SIyMCh4XG1Ko&E#^e<-4ZL`JR!^>OSxA=VC z?06cXd1F}wg@l>52GgKTO-}40xHO-yBWL4cTCvX6j@igpPI-gin{*D+Q@okfy~k>c zm%!_jtI1#kK%47==p3vrCG08qv@sDSzJCa#vet5zxacK9&&c9GXy>_F6}Ng?$7Owd zMI$$lIv`F@x=~Y-m-Q-@r<}R)a|zU4=J@Wq0;I;#>tZbC0qpFda)){ErDA3iouxJ@ z#?ae1RnJ$L%0Ujd1V`EC+nw25yBH^Vlr@Z9e*3QKR;H z`H27XwqV^*^SP8+EjBH#h$@j#LbRJg8H!`h>|)G+0j?`-{eZH+n4+DIW`9}1LH^t_ zo-Cygp3nx`ktDPA8X4p9X{r|vbCn9f2w$cXN4SQhk5GBp;h(6Y8{spX7LNq@GN2+p zgK~F*5gC|LoIKR1%a;dGcmwx4B)#bf6yQKl(K~QB~bj zIevm{Jf+v6YpE>?cy=ghtu?`%fi|TfQ*(vFsq=Do){r+e8)x$DOp;HKMe1c zGE=r#F#$4VtwHkujbjOF!hL`?0r*LQEIIi;5(g&M{<;!~C}t-ZL}s1~4p$IQF!H`t z!dwu-Rv$L9L{ptcs5LMakRhYR9WT6@9vFSO!XxZ%qHnOg`@jGE7Z5Va9e1MlAG%yt z9`pw5^Qe4BGf}&tm*(fs8-%&Dhk^Q2R3&ZO{Wje|(QY)48|V1np##Aa+~V;x_rW7a zge1_&QuuO7H_D6+5yYqwQR*gAGe8oC_nI;}`Vb!Q$YEQoTcnXCfPtoHqGE)DA*5V_ zD2Y`+ymdg@MFq2gP?pRD_q|Yb#3CL_>OW8P?_u^ARUg1Bg_2Re-;~3-i|S&SEySbi zy3sw;=LSJePn~%<4d}!U)+k!b6D^^LzDv!A*@YM5V5r;1Ys!1X`;dC6a+5JB6-p)^M@HV!6ORz9)|=! z`l@iW<9J`GjkSPp?20YF;(t4H(08v`e!;W7|N8L{75uLs2-o;^`$U%vT;Uzn(Z(St zKUd#W7^qvU$L^c^M?`nkOqg58_6%H_;PLH~^w4?OuiDIpUaR~V!(nlUI~FJ zJ34u~7+`}^R#p~*yI@T0bir_D>UPiOclyYpHAPkzepULs-Xs4Tj5|@n8dMN%Nl_H? zs1ka9V5lCvKOx7NC=?1yCcHWv9$JwRB?6D?M8pvYYhz&EXoRd*gj?x?lfph1NV?#F zRlL)~ni5}pybv%^;>SLJN-+xsU5ZCoF^T|&swBnaE_NNeFG~g-#N|MK$%<3TD3PFa zo!p%TKSEcZa%*V5*XILN=kctb$a|pv2Qx&${_&yc{sA|7tm}>7(oyUSJvDXx**4ta?hzi0 zvwsDD++oEx3gu^m%#)_-B^4-YSS}LOX!bejt7vd)VTh>-{x{HC!1iM~nEmTSOZ zpYxjtT~`X8a#xlb18ZAw=q-DWI{7N{d@)9^H1bQMXZ>8_5<6f^TPxWD9YuW&3=sju zlSyA6HJDNcNwsNAoI>wYAwOo&v8W)lh-8E0pE6@+VoMa}NFCO^5So$=)f$9dgF+o4 zcU62B_E{jERx6j6SP6wJvqoDKc||6b8nwpcCWwkssb{3n6Rw+L@3Wp_OvwH}nb_6HVUV`1G=W=V9nq{|OJu zo8I0%0zaSY@#nZHJ0kAsiRlM@=pA<-+*k|UI#gr66MuiVvZIFqJ?*$X%FR2vh>>i1 zL`PJgH$FrOymY!%FP`QM^IN`Ir+jEwC{|)K;Y8d}7-(%#p+pm*ve0f+m^GeBesH76 zCSgx$E4n7=HBqHRj7XHIU=F}E<@B;zQ@ochzB` zfQRx2o{X+`qu#!8OM;z(^_CjKHsLmMIttn=z8W!gVXTCB_vslOMn;S6v5C2Oah=5VC9@5PAS+UH>H9|P>LY1&i8|NLN8Ic@GSc!P^H`@2qGewCJhrK zIJZr5s>b;kequ5D5&}D8Ekz-U+a@vxr7g_R%GDhxIz++2KSTob&6PCC;(tsp^I{t5f&ujT~?I+#OKQYRNAjZRkv z5EpurwgQf$?rx&>mhD8UutpcR)M$E|k%J#UWUhk;reOZWJIC-UG{)_D?9ai7p7|6q z(4rZFThk0#0p~6Do)8)nOX4j}W6?N{!A{Kz3^(@}fqje4yOw&*^L7-v)q2ve@N}dL z7GegY&}O1%MM#6CcnnyXWY$X*0@)ACu+8IBr})vJqM5id_y@U3q*QbuZ{u+P#UfK8 z!Dz&i_J(R~wI3{aP$|t1!YjZ>9ML!Xd&9|mLgO`IqQu!rxC1?cju^u@8ydvBea*V; zUn_4T9EDURMjbRcEE=dz+gF`F}AiK5bZAv(6 zs`u!lPNkAku_&KU)Yh zj!68msM!uFGTiLYmx&j#>t6?(0Cu5s#e?e*ge@?NKpVFG!9S8>2?fc5FTI(kQq#ZX z&k9#MSL*3lvAcz?A3!jXVYWU27Jk`yl&@BI7$`!DyU~OA9zf}GxYuNd5xQ~xWyw{8 z-S2BeeQV!1kcnU!6WXEOkB`5C{sghT!kH~V>5$u#ink{U1L_&rw5SQ+| zyR8`cNLLn}9o1suIsSp0ObNCt9CiOpoI%Fbx$+COA+$cFg zNn2F=5Raw=riN04Iub0Ju>4Ehci^-vp^!?RM6I6x5V9OqglI=-xBWP6gWhDGV zAzD}QALvz)uvm?sS*>*TJ|r=oDpN?yB*Wlz8;)P1D}FKT#yG3*5?+tPAAKRz@oY?B z2_fjrQ08PUAa#63k5a-AL7_Ib^xcG|aQK8|3jr=IfmTL~SCw2xBVDT0oPrdpswP=d zjYCuy3+^Xyt3I;v6$To*dR#NGD1njgQD}n1Au&rsZ-urWsE)w!h^oOIzb*E>K>=3K z3qJjCn|skuhWqVU0kp&Vj)_1!$KLH)f7=cw2EpF|mnt&+jpjL?K45i&l+y#-d`0XuLLeg?I+Gu`B zq_1HK5KN5>^d(5>%%ses(>`wK>C!|h5st=T^(_*T4S$?HdB!)#A<7KIzG&d8(}>|( zgo4RdVmL}Dk8(zTCYUNvDCt*f+tjr&LSQ{KZmIxrMMo1{OaGRumn7?iQ z&?`<>MFg?&(?EAg#c^R$mGPX5dXaM<`Cz$>7hX>xbBL9x`4A_8Y|_NyjBI^TGZzq( z(0DKpAg9dLwiG%i_ALp2jlZ*EBFGs4=}QlkH{a!$QBEpjUL~Y}n&Aq-uy!@gM&Eq6qxvCcK=m1zcr4G7K%v5+~{erNfM?@E2TRjLylS0eAS}m&W-z76eekV9M7~$ z?*SvlXSWiQxth`6y`-E?^J<#O(ONNP^jN?IYSZ$Nh3N8*)O$+x~DJb0M??LP2}RFS(yHB;JV zS6|kB<5)J(DS)qlr}zU_M{dNdQt2~Q2aTEb_`P1goF4O=FHIl%|Ey;;u=XH0B_oo% z>~V_KSy{r{@KU9$rRbUucbkntOH$!5^7q>cAVq&5gL2jEirTaFx|H;d zs&-7;C+YS&X(!Lz1g;@Z9#TCQHViK`u9D~%TB{Prgrlf(xF(76xigW7sn(`>N9O>o zm}u1OV>x&DwZU$>nxaVwco(>v9!jB8>hNT8#_|Ree!xHho~*n+;h;bi6FIi|r91{5 zhS=GCJ|=KVtl2&*e5HSGFS4i$a1Xjb!7}hmy`z_t%sk=G<%P~)^1bIj2D>)S28t-_ zM~Did;t7YUwx5)l*x6GiWJ43&`zw^UGN}eautxwwJllYnN{*z4ANj)3NygLb8$peQT(uGWnPT7{>utB%((?TBX zyp#pAWCnyJwxkRH1Sd9wQFk-1(ZihR5m4-jaNL0$5}ppl!|03N9$#@pR)P>Y`$j%5 zQ?TWeFQ`2!M9!9N?Z_9L6emN>981r;sH)6)l^Mn>86)M= zb+DC)MSj@t*~jBag0v_Jr!FjHT>)f6KcKy4vw_1k3{UDIq8k0|nM7nf$c-Op;o9ZH zhJ2b0t(cT=zR^028(P$rlOgi2>Bq<4$-kpW-(k(GRU=G^F*o12Jb1C(54PugX&O1o z5B;37$DTdApy(-VFRCETPa0y~jobUoqb_M7l^CE~Q97zg)@eB${nZYvQDAU~pQ#Uw>B!&RcsV zApv(OPafUKI=6my9KC&6oaR=f@^KiZ~>)o|C zOR*IwgiR?{>``)hfw-1Eu`%)vyu1frd&NumNbWEPgI4{c(zyAJ8PmPcjs(IbCewiYMiL^Rjf@4mSQJ;!4C9V^NnLb#=Mb5ZEY_gT`h30#I zp*1p=`|T@bzAy5x$e8QZ&yRfdl@LfVfCH_&O&XqDb}Zm`()6^>4p06jOtSgFqae9w z%2kKPZs@2FjGoaf>-{d4zwV3QIS(BTq%wz)1941hD~U2u{V&QnspOsXGsr}j zKQN4aSjevy->uTnrNGGoxNJB`>ZPox_pp>?cpfF^IrE22-apmek74>GQ5ns8@Y$^W zH8r%8oU`uO)AI#q6C+$WDw@s&yt!)RJ9Rxwr!B(_9XjGDq`X zUioo5h|u4E`_3yl(eQ#4osmx~Tk!J#tZ*dmlFCP!t&Bbr>CFUrT~==rWE^;^!g6zX z{Maxp()yAp6%C^l&h;3P^sVRLaM!;cbQ=S*1?n^Te)XgR}-128J$hq761^$OZohy1 z^UuE_Jn4y$8)n(%S(1>Se=7R-t|su)ULi_SMK`E`wrYL@eoY*e&Gc_)n;Gr7YYWN=(eKcg?)1022=A%R z3lC>Ca#24Q0?1`xC<vN_d`r3vO?37+>p>DR08mgtS9mo;z*Wd+vNYyA4d%3*2<0fr`09w8-x!_Fo|r zTnII2za*$KufaY{HP^5wL~nduH@i@rV{%R|xhCB2;G#zkWP&hJ4J3pleuXjR{R;ks zB*dYgxCeZW>i;4F9aBW8pt07Oxpbg2#bt$oVE_R>jZn#K)kG+CTJ2!#^hFF79CBnSja3ef}4Lgzx=G#63eE3}NnW4R*LW>@RAWe>1d0@ocOri18 zMo)y8&=BjblYI-4IjDqe5}0`wrIp@CHy zb2{CBfl!r(YaEi8nQ@&Gf{7qfh`w(LO2UNOAGJwBAf0!2;Qm0NRlMnFv}^p`Xx^}` zz-pykMTo&2wmaE4+-<4d@VIgYe{CGz!}u~(d|VKQzr+h9v`f*hA$K_?m#y>rOHTLLR3T3F*#sGhZp@B;@I2qm>c-Y2^l00A{DE{%rlG$CX|AO5j^+{zMhx@b(a^ zOGvJ2$4SgW60~`PZgMCV6doOzH^m@{*&Yz{o+ z=AhDV+p;LA1?{fRlt0z9Mcvdp3*cVrrYW%bD^-`c%Lx6c8jn9~54@tmhmR|?#myx4AGI^KrItXhB!KeSwo%sT ztOfIo$Bo+B#5UR(I)g4_%GMg&yjf<*V!Ja5?lQ)fFLvbIDUXSSWhBu^WgEn@2xrPS z!{&iup{vRlALYvEYHCBm)Gvf$P}@R}okd$_4IkFop6Rl;Ch)JV+e&HzsIpXlxI4+# z4ORlGphWGcTCDG+*_$1^r4Vyf!~WSQ5KWNq;&P@)&z=Fa`cO8Dl}p zv){UXk1N%-idc$1AfU~P*(a~(GzA0aes%~x<)$HEnRk{(Ep{OO@9;&v^j#ym#PUQ!>`rm1>W*jfb;BBp8o?kH7%2(efEP_|iQXfhFR#N24ea=neh5_&^s# zf|O@pD*`BcC%TD4T`syM3V89|V>s=*z4@5C(3oO9Ips&>>EjiRf$FY5f> zx^D53wX2Nk@a#N;GQa`9;U)C5A>D0CWhy+~jQlWRP?Qjf8qI+cB|?0gf7PYhVb9Rb zHfvf}sS;&v4T@xZ!7H`51Ul{%j$m$(*`M_o9LV_iQ~djO%&;hW+o&COJp^ujY)jHG zYTK$3rPgR~IFzBS6}asi6RFxdw66*Oh+)SDH+F?u_r-<_wH9z3lO#kP5NO*ZPC^hK zGK^~R_-Ok?00_dN_1TEgblf!-NX1$bSG;n(Do@6?!hne46Y{mEcZvHITN% z>%gC&sg)t3+LGF!!+@xGxdSTI62a@Xb=y3#RffBs8)-6I5E@%lr`X!IA_fJGXO`X+ zyeLk`ph$1l6s1KwGDCnC4Ss4F+9)!ZEpoX*mbb2F&|_w)+o{UaHL{v9jULyaEj9H$ zP_3?2W!~_DMq$wqVgL&J5%ne3_4+rgjXe=HilCjy^{qkkI>Qd#K7A{)n%qr?09K`q z>Bz9#Gh^;S6lXKxvpH9N6c-$%nZOjZlX#2@CIo_bV18JBmu<^!$*pLtQ-w86E7I%Q zAtQXlW_eQ1n$ra=ezk-z(P9ixIN1v1r8jkphmd(1b*$0ALx1Xd+Zcbi-Iu$%LEVYg z0YU)xBh4O>ms&F_Ju@inJQfB8VGF~d>!OLg@ctD7l*qBsPT~Sx9isw31XU%wJ?QEn zr=um{0eb21m)e&9?&Jzq9mj#7zG8gXvoS1z%-$YmVBy_z9e+md{yu+$|HPn!Ku~P8 z@b_eiY2$YtEx*`WX#;(WgZY@nPaI8Nfw~v;Q3=Bir#obEA!;@^4r=m zai@8VLo0c9=m+fRcVm z!jpYA_g#PgCRv73;u$OVJ%grUO(}nJw(rZEnCzEXM&lenG$1!UY=j+!11&_evJ&$w z0m=+McHOu52OpB#Inu$geN0e}Yl5!2T9G2>BDw9eUhuI0Xg#-AQ zS9LC!Xja+2`LAV<$Lkhvd5{cR2j3VtHFgoxHhsqD4VD09&R}*c&$mRTV9*EFTpw6q zE1$fFLYH19EYX;!ez^93|N6{Zsh!X@Q~&@5F-b&0REH8D@6&j&craaaV2(nL1+sWN zGH6ZAx0JSi`}P%5;(6IB_l@OOUb^Yp58ZW*2Pv3knbF8w%s(YS_cR?ZvhkQy8JD@N zgkaEGX(UhX&tU25tycnB3d!2_)6jL@cGMlw^!jKHNmvx#>?=Yo{%9db-C+mI9rb!s z&?O7?PGQ9I5qdn-A=c|G;z$A?d)zVT?G}Y8^S?O%8`Bai$yt+VMk_1yC<@CpJy|^S zOBVVQ6aSD6&GJdc4M{_NiO7urcZo3zc1*0CX&v(|6P8AP`*zqz%da}v?H`h)dD<8p z#Gn#WHG}dszDWRmyQO|B^7t;03RdOy!;<)AQ)E~%gHBcVe|b_Y@r?hTgXQin%#IsV z;4L{If$Psjh@{+klpRMBA6X-Q8_8>B30h6TV*7nZ#zB_zn@{ z*3~kf$9^WFOazwG^f7f++^!guRh{L05F!74Dh!D9>7eW-W`OjSl*fS{i+%S#(xK$zdfI&+J zl^12h)fAqm@iIgMwXv|0x>CI8H_ge70zrBM}WXrwHgQrnrKhVkIxaenPpqpX$ zEB9d{Lzs{uyDSZ9ki)Qj7nLKBKSAM(ON}gl9wM($~)bLy>;MFnnUTJZz&e~=< zxUNa4Jn$S2FdaqEZR2_e60Z7VE%^U&-#aM{ z#uqz>RZNip!LgvgJriQ<05Ekf?Hmg9T!L5g$?-TOLc>J$vRdan4DY`Y@ZHDqo+98# ziZGiv25}7GoVgh-8^lTR$0^+!AcIqtVYKYkS*sQi`1-vP%`_&`rUG?+Bn6aVCBx>C}wEVF|CY4hfpU@%D}VN2K%_#JAi9?vj|Qg- zf&0@Z_8~~<2oAb>VgY$eh-wxB1kXa`oYyqc8X7+hV@y|PW>6u7?dw&FKlBKI(OiTb zxB9P#ok^8`4;L1UJrGKxfdCV`Xa~a7a|sRo$voqC8ge_{U*F5<^8lE)*>VCy-CCx! zZs!gJ|9RRO`M~^3t|7&Q*0+(kt?-_3b%<}j$afHS% zIRxtn4a~Ja)M_=*6&sj)NX?W6&;Z;Bw&yH3R4#aDN}%`btz}l_RG9KcL#{BkHk2xA z{rG_zC?V#=v< z7>PN=Fd=drW|r6(+idf@^?rXo-^b(k*YENBqCa}<-hE%M>%Okn>-l`Xp4Up>$_Ql3 zS#v9Mh{PHQL<0PUtPDU-K_vb!e^zhue|ZZDiIrN2{5pvtNe4-Zy^uBX5|Z)~E8ie6 zaGY!Zy*K2)ABi=RQfsAU)~%P_0A5hG1+qp$QgV%y zW`A8~ufM{Pn8Y{h)J}e=+wRagr>=kF?!EQ0iaV5)ckbJ-p{aG?sKK%0hH#@(r_Y#M zSX!NRJb&S$)1}MKZtge#@$mGz6%ZH{91|EA-zobyu>V(F@?czRq@*OJWLD#nSQ7&Nla!ZQyGKuY>j`_A>;BvJ9*J3}a5C}D zhr0D@`VMp3Z`|#aRa8GZx{tjY+TY0jzXf*h|5s%H8`%Ges~@saQUZK=lJXEFMCe-A zZoC3{unT1p$Z|6IFbyEZWtvzwlcxyE3WQV*WGC!o{Zj+X$xVK?GPv2>$`JWhe9wuU z0@Tdc8&V8H_a;Xp9mPl2p-eFJ#700hvrxTUUn>QH15hlhVuIXBI5Mtxu3+ql^kF0v zAYQk4=4Wnb`a;Dk#=3P$gcK}0}aLAji(Tt(v#60+EHh`4Fkk zBT@@>pCK!WZ#4Kjt0!V98c0T0jGQ{e3`$R5SXzOwCZn~&PJJX&)C67pQ=gCoVDY+B z#QR0x4nPkj?-~fhtrx9@c<@7~{m_#v|d3KMBFbbfXDG%8hO1Vl12wAb< z#B30sSb^*$k7E~LSBdO4)r)X?w0|==eLEqUd+h?mfP>e@gUtfM3WREqxx+MT`jfac z%>8v%Iu6B{one_Ntnt%-3do7c;IcScDU&Zkel0agCai0smzyoHEK+gVVxkBG$xxHY z1@y!U!&)a00fewwXXC$@;x*~J+0;B=ioi}9cqT`JE`qNY^KG5@G?9j%EBLzA3^^*e z3oZx3r9Zae^fl31N*{3g3(Z+A&4c(?d3lm-3i?*%he#@avBB^R!OT#AUx8eM*d4Hf z#6At6@R?>$%A+kv5h23qrDrlOz!o4?TIUGTL9cf{Lw3z!TRncV8P*~ve~P$s=*pgB9^d@Zb}~Uu>ID0@1O zZ$EK&{YWywk}I52wp7EoadP?sXjm>iukaeD4;KbsK(i7pp4DPFhF|H*jD+=*Rs$sd zfN%vOcQWx@h{hBc--GpZXe8kBxnMxQ@mG|LK-Q$BSEc>1LJ0jWm0|WPi6sJ@Q>1QU zHrK2`=x6N+>qeGWATb3)S5&K3Aj8N}s|nbt6$lR8vQqOD>K`#VTS&5KR*juM`Ys)_ zd6>v>5@t0qiA!O^2defES7IKl+YZW}UnY~7$b*d1FvNPbDG3-6+;C67q3i_-9UQB6 z;DtC=C=Sfs){8I-V0PR^zLucEkd*>)GN*bw9viqYEApyAyBKPa=)Jgmr6v^=M|yoPKf|YXq?>A+n>~wo2F$*ZzZAOE06=yxx`3d1VC#{U``&XmO%sT zoc*pIAxA#3BMRK9l@qZu zJUmOJ_0?qaOTkk>FgT_8X2Rnw>=C%sJyMPwk<)GE`<>OH; z(VAhW)ZGkO&9mI2!v}wosiV$&4~^4zFU;V3UQW<`F!G}HGh)CsySNQJz!)AJI$G(AuaCc>B$Luw&-bRL3C#X#CQG2?Rp^-Wd_bn{^N9DrmwyP(Od z??3lwn51d-XM|iK_}qc?%|;AilBcF6vraX$G@{r7PRE(c&A08j?-tV@uBtbi2Hpy% zhxZF$;eJIL0Z(>gJ-nb|ggGU z3s!>en{av_DX+rC+p$HhLVOQ+k*n0iB7SpAgVPyH0JQzO0!tKM z2ya1zF_IfC&Zz>j=tIF8d~k2w)CeBt7Ax1d*apxH>0rhaPU)?I!-YrzH^Z$-4Pmib zv9dU}7&a>re5^=I!lKo1_Zk^Q6oDb2MI=+uQR!kbgU?jL8Ezm9BH!lzMh3=wilwP8 zNDwDhARhrV8~mPw(p*3+{-oSImDdv!AMKyW>cGVnA&Iw6$x8**fpfpneJ7+{Q@@T< zt}pWBG*1Qa04~dnLUl0BA~J0`wX9D7F$eO`izcdxzzx>(!cDt75KQT^-I|B}(-)ay zg?i=Mk!|f5f4;a>;`KQG1V8QMefn+!nbuW$E&}>ibO7=A4?r!~+?23SKj}OJ9F)9B z>#>qDYd$JWNUc~X+?#MhiS@@z>J}mlKJy!a$Ul3)DleSnOQbFwd|6(!2=p}i#p^-^ zE0FZbr<8UJjW^9R0@+T@fJVp)#I&1mnF}NFE-IG4E?s`iz!L{WfE6FDm-WX24a^;- zDJeCnfzRaj8JOq@BV7t7PaEUv$t+b6vGL5(bvh$C40t)u_06v3cpn%PJ7~lwNuOk;$^*gsBmX z**L;TB;xIw=PAXQ926Dz}KCsJ?O?@&QP z!7;Buav&b@byMWki2blwb}uuLQiM_#*Zcrl7O7*%Hwu~Zzue}LJ<*!{Qv+f?jBXac z28GJ=Uua*8=A2MZ?kzVI2Zn`Pv{Fg@<_uZWOLPJeMeBNgV;RITPWNx(*FJY5E_h*N5;mV^yx&QjzMlKMHD!+vT-UHx1fC~>aw%3J2FSmB|` z<~S8YHyp6DY7Y{fi~QEb*XR)6cwAOetRQs-rBWx}8;*B8dqW+I@8z^3b6oV?=}_!K zZPVEE7E_`?j(0IJ?PF!g3M5t(ITUR#@45(k308q-Lp3ZII~OY69u8iY2|QS{7cL_M zqJst}lL+xFOMq$_wQiNn15OC}bm7?Ze?eMkBW8_1E&zub2fpGDjbI5?a?h1!2WkKm z=6|=mtg4+1e0&+;G6$p&hZ(AwEG@QOyHp&8sB~L_JW`lfs;h&~(1e-}s57EF;v09g zzta`ruuj0P9^t$vnjD(smBap~>$Fp8Z~lt-bRMo>_9w%z7;7QIJb zonLSWqo5-I3`wb7dKUqNN9lWOmD{ZlAYMgb)&*QUJ})L0MkMStS%FBv9PunmyV$SL zdwNtT>?*kVabc7wi&3+9T(yOLB8#$+adq~i5cr;PgH^|1Q}_Y_?W9B_Eu9cB9aVKP z+1v{1MmiTjqvdsXjuV0Jc4zMxXU3(9sjStU8&5%MjS$gjQZ<@`^9!I=pr`d_S+<-{ zRAetHS8oN3)8hq!zdICQCL!63Xv10qS$K&(JikyN@3r!jnF zQ5cQ^%YdhPUNO{1hCsxRY08g7WLhGl$X9>NE74^`7m;HD;9VCo_Umzo`A~6a{{KNf zW}0D_r;Rpl#w-VAbEsFh1`J~Z8sXxg%o~U9hEmAr)}vujt%3^@1UHShA`9MMbbc`| z^*8zX3dH$oqo0M+0?g$Xh#H@_2MdU>4AdHAVKhG$}Mi231#jE!{V-TneP?Col{=3i1%5iAET!G#A_AgQ=o z#*{yQ{v4D!32ZlvN_ZUdy$Yf=BchQ@(#H}!A<3={I-N+=LJ zhSaSpX!_kUN4RIRf4lnj5eRQM6T+jVoO3s_kN-JOcz$N56d zwHmwn`1CL%?A6molJUEUTrU@(D)d=EgTuY66B~BzPfA zw*$){!x&&ux6%A{R~ogT@o6zH$a|qod?qD=41m#*EN_6I;X81kdV*{sG6Ldwj@U3s zLTI~Rc;qJ3|0bh!wMH}U4E6f-iQE@UchBSQdqBlifKit`MDAQNAyx&lpfId?@30IE z2j~wl9VTZpFU}9oOPWxAkx&4t5r3!{3iY#4qp=Gm#r8BIvFN@zbyRFe?BaNFO$0 ztkUKVJvYeGB3SjiS4q;Y9ZC$=&{O5*zeMcCja?T4aiHzD-{klZM4~Nc-R4mIM%aU13*HE&3GxG(crI?^(Yn zpq;w}Zw%#@_5@4tCV*+YG@C(kcUC%w@Hr0k>a~hO=f0_b+RxK{2k|C@55WjJz_Pq+)?;Cxy8#$jpNv zC=?rmjdHhJE1@WNolj((z^gMdpsI`k5MdGT5wMoc>E zHZm^&xYM)GteS<7nuIc@DPg+_WxXhUq$aIFRY`QBheKSU`R*)9*4EY6ct}ez9L8zd2Tpc0RJd7j-`U0rC5ICSZ#(tI;?aU6&jm%SMmA@#)y>x#o5 zu?zIxpm5A-7!&jJK@a)h9;M6QsN#+#| zoX8wpizhZaon0pJU^LCr%LniI>8Fd87G|v{+^`^VbcqZVr-pvB)yE;IJcr+=S>RTb zmC@4hzM8Q`o{FBQfce@8tfIqHpB3jrvw zJb=!&3ivUW6xzYxaPaQSw3BQ6WTt_}(g>m(KjDO;@m7f`L-9307Ctc{0?_!Jd-(q{ zT{frD9~XH9ywv191A+y(B8*u2=}EAVo)GL*7|stN4&Qg=fmL8w{uis<`IR@G@4NdhMu5AMy?J;XZIYQOYJNaY+QILRpQUTT_> zyFJhEq~=zNRj-+4+?cjpV)gb`@5Y;8=OQgCNc<%PD9eHTAEn8t(WgP9!Wt3GfYxhe z;u>Ibt(^M20F?SRvRT&uCGlx~d@>ChTr0vvsd$}p^En(4)BY<8-e!$C_=gl#$pVmH z#AY5!wbSI)QN?by7^Ix)&5L?jv;z4$w5Gf2+MTft1KLhs1vkkL-~aXvQe^8s3M6jk zKb>|?El>13Iy-aaaZtAo+M&9nL~*fUV%t#vGqh$+$c(96XeOuGU>(;NQoq4HwcB(r zPvczbx*L@@?}2h=MUsZ0hMV5L!}Zl+mx|PyJ_?2}Q2yzI?%!vgJ_tP%YX9oNg|_wk z>>HVRoqvKmT0ijKMGb6<{dPm^T0?i%*`OOaDr3L4l?aVXn$Edy(QBpz470goDc@`j znG}24Jjt~yYQ501cSeU)YNJTG@UgUe^NS@TAihd309s*P;$QYm+qEIwCt@nTvQ*!B z^r5;59VyY?~c83WCg<2yKHif+n?cHsdjYgcpgpBG8CZ{CSVB%<($%tG-h?I z3&HH6)++orpB&vS)4fO~O&#nms*#IuEbHFgmCXyIBWE_ez4@uU1YeJNKmY55&-q_% zvR4Mq{gWW2-o%zZuXH5r)+trRCEIG652LvIebH~-!fa2U+p~p}rC-X^^vf@KRJ`7s zrtIdMnsi$I*o3QWdSEBzI{U2druZzEKb_}qos1Z`mZyAHr}dih180M`l^3#{UtRq2 z`oPn&86o@05;J7nT|x4mUb^b%6$lK2Iw+~39%%vekX{G^aHgM^SOO3xE5p^Mjm2dh zc?Z3HiQIg`goF}8W|f-caH>D>@Y6QeIP)}QBC;n&$s~aIH90w$Y^KRK{U)*$zsUA1 zHgY3W1BGb4m|tR7P}4K{&_`s575J||2UaZ9wsRiAp^3wJS7V2TlXzWl%248J&;RKn z+)MWhTVe{|nNc~Szn`f7t3wqm%K@|svMJ#;b3mpVR7~nv?a~%LKs{fqP=l6Cl2e{_ zHg&i!^3NRmNGS>aefFTR&gobOM&(aeCk1c*SG`d=>fjFW+bGJUr~?vKi- z2Xgy>cAzo2Sv3w6ZE6e58VG*t1w?U~M#>RsLj)gDLw>M!zKA-v0yz@*yb)rEzyNG? z&r^$}^-3DOQ^p~b3Fp0fR9QH~WKo5mEkDliWzb-FSRBvpocrfjy-qimRv>8qR};8U zCYKJTa^!>E*zojV733aJ7o>5%2&*H-1aUV_sU1$uU0A?k9sxLe zY@>(j&t?|C=P4dvNi5PAjAe4Dx7{*UAe&!g+|+)|6cE|DA2&v1r3)5?Hl9*kB$)B9 zI;5z4;feu<2}p!G;yzPYBCr1tva^ICoYijRDsT)wo;Qtb1l7W@Cd6k|#{AfmCrkmJ zT_CpKO$giV#+<@<^nr>PE{>I$2FbKbUt_c|4oav9W}&i1o@q6{S2#&hs->rk6><+B z-W-5kToQChhdnA^7>|UrD9$L&ZIl&A0UL49>@;dOn+L>ad^^F6`L!hUuVaAlZ}RXF zFqJE`#eEVD=p5ZD_Elb|JKVK|E)Q(jvcK zzh9b?5F#m{?oGJdY>eTK1Bb>^x!z^uE&8&O>A26(HO2vr;STF6K?*1t0}3$CU7Se# zq}Ik$YHEbo#NJr`!eWEQ(br`|wI2Vqt4=AIxQmgZ+ZP|jAQ}f zHqGLL_ru1}M8Cj>%;l+FAj;v&LfZaY-`j!Ke~Y`OB>K9EJY@tp*Ez9R+aEpx9Qr|* zN7mzW*P2-Hq9g^NlBh>dycZSic+bE6#HxT5LE(uNq!8_h4@9uP6H)9Z{Z}d0di;~T z8OQ;Zdp|iQxM}mkT>Zk3jE;`ATS4uROFeaV%Y1YgoPnRbwUiVlU@tI+nZ3jwy!sM# zQij*Dc6Xc!d$vwoI;)Ei{R_ZCzsB$#%U1<@6kDW~+w)dd^)g~TGH$ib=?;UGh1C=N zFZ|Z|t6INfJbw-U1yNNzopn9^9)k66P+%<7b6u}he-W4<60npg+L-3J?u5jFZTl-mJylpKhI-YO-8fDr6( zW0(?s4%Sn;Mm1w|)V=gA%X0JL@D6EAqh3=Tbn|PZb1=9#sH0B8e@VJyf8Vd+!7G=3 zp3!cb7@zC!ANcxUk}x)QCP69lOsh$f<#hbJBj3%SX}heB4JJAHFfVHl4kZi@MOVy@ zSNvoCV+G>PK?-fwN*lI^d#pgZ57PGYtk~wm=OYz;<6Mqib2h#nD!9S(bq?dpfossa|-*T0ZP$LPjSP4g&%Ki<)&@Bvv_vv7Yuh97x&=XzK}T5_M!9R z$(@Mbm8E`lMhrD39snEP2yoNP6%klp2^t@_?V6SIepYZx@@iRo;@g43FR8x@k*{v& z?mqwU*TfDej+c8h5_PNT=%7WbpNhMp(=HXcJ@vN_*u}nXIs5d4RKiry)IkmB)WP?u zmX$MuT(WO!dF#*i6^PWAJBnc67wE44*|acAoUkqqn59A8(t8pso@(7F+HU30prq;Q zcb*U(TPKs=9loBbhVLcbroRzi$n^S6TL{^gTVaDKPzaWD1Z9X_GLiM^!QyB-yTOGs z-;HlYQWa`0{q~Ct-!=`10>9byAy8mz;=iOxo}iOC$fhp78+LI80td;Ul&e7O_Dc}H zbFiyQZ7edl)xNtFxhXXEGOhH1`48WPLYQ6m?b6P@l@!sNAdfpo6;JimE1a>`%SaY+OID9o1UB{4=^6) z`00^Hc_nJ|t(iOi+wNVo^iU+6e_KDf8GF8Hm!FKmBhiz05B}NtPo})av5&|;pYoiq zK!haVeJo>XaNg+8y1I?{v1^bmamf)02K#LxPTt8VEW zoz>a%&Y2S6X6Si&>uGFa5=FxcCUi_fvI3bgmcXVcEVaRh=KD z%&~6YQs0WYyE^JMRS7>QX+Hg3>odCd#&o$~S47G;nLg!R?LC|q-RN!&na)2DR8#5j zUgdN$cAvOe)x{$-FtBlL%GC&~(%5hfpOYuaBQN!(j%$7F_#X3Dc?0`R>Cu zC~Rro;;4RSTS11Ovh^Fg)=%^}arTbbTX^7ko=>UbGUz^KC-1 zuX@fbR~v=z|9IhMtVXHtPnOxME3`5YdF}}9b^1cvUHP!hpt5?y`G?!9uxGU7!bg7= zZ7_OU=M~~pFEeBfBflBDcp%7aXO&5o`IQ@*ld^d_@7m7~t}FXq&GO`08Q+R1vsI~Y z)TZcNWtRjF?aUeY=wl-OamG9Exc0Nk0rUiIv6^kuo;V7V7M9e zt*zoPa(G=|e{Wa6k*_)S=)U;PALxB#kJ(3BY~9+P;0ohFqWqN0dDVULr$X}fbu~Ku z7LfXHKgoG(b6V{v>uAET*Rlh$X?hn}4ZQ}B4PW6s7}Hh!#Zz;cdtW14(_Ljy80vJj z%Vb~3`>W1O%#ZKRRBRyW_v=Gfu7O{;^d3`_EG4nSuFGLi>R{-^W1-eutIfw_QM>2b zNm7fL)D&BtL$?n9qx0ii> zq#)xfu5>JY+<-Q8IwI_)g1F>}Z%30Rz zqf&3gq$JgD4OrF6w{d5G7L2VW&#b+kH|WB{TrrN^K%c#DV&qe1JiAqBQgL1J5&eno zxn=Fg!=aSX!EdjwUF>U)9DVd>#)RQHG|GzRCA~^&nm5>4n#}kbd1FhuTH8&-GltCH z+%t|a@Vm8`K!^& zu+m`N+cIzFl4n#cAMe>8ZG8QG)fx^RQHyS3f7u>nlKtxGNdDWZVsp?#U`821AmC%wS!1(}y7$n# zBCNy5$Pa7Ox-{5m;(^d)Guj=l`?VYG>pHKquifAA<>u>#v;w6=2A&26rJs_RnV-vX z0KAW{(?}=h(`9f~inSm$N$*`yM7_FRlD-xiteCB_KF2omVMn2o#Y^vhx@)v6f>GfS zipaHSNtg4tzS@;kT}^UwHpoi@NDlE>IdQQhD3{6s^ZX05hnCbp(e}YEIXLnZ%jHd@ z2j7~4S{}y`(pMnKX(-(cx-aQ!lvEF{{;7;9*xd3dRf++v8{%q^1^nCIwOL;Y^7 zMn6XzJrF#zWP~;lk1+3ha49g>U%-J2SSZ9u&T)WUD6gazLyQ2aOv7>4RYuE-I=cN& zfkAkIuQz&LODU4P7fCw$^(6(A<*7{Sxt_RZsB?5?+R1Bi7gzvx_X>AyZcr*fD*D6Bo$6Q6j` zg0YJC`>zyhD!@nlxe5e3YEhWRFyUvx!$Yt!Bx(h6!SIcml)L~i3XeJ6Xl@1#clDUd zG4UYh zh)1bIqxEPw_{i4HfarWzwA(K7A#kpC>tlhrLyKT1B{$Mj4`xi2zIn;b188kh*FRYqu79>u&a*=sFd(ia*Djzi{UZ>ekmy5>Rc(KCIIUJ5& zDc$rFgN+p3svfW4Q+S{Qp$0gNY)nqN=D2Wc#9rRbP)j-1h;OH4D1o;y2Bf4+(S}9ee)e@-AD6L9-HSUcXU=akziqh%bMpd zwjI{`9~vs7p<=`d7aN4P8W&?i;FA2V7qun~wP9iG-$nq2-3xXe%xp8{>;Mu{gdvxh zJeUfXW6W5({W$>@Avd~#jOS-XMe!1ph@MX~DM_a%_a@HFI(;*gq9cm^`OATHva{4_ zU~|-`qY@UN!6ZxQ^DQ@>jv$H7epoql{hA^^c#B-)Kq2$1DMHS~4IIP}E-VZ{_tKfSClm*~ z1)Ts|_ayGGWRUfD!CV+srf9$(0);x%zl~fAlPWTa#a__YybZRTa&&5s!2fKzwr)Mc z1XF}mOFrwin=XtI;zuovf0JSJYm)Lz+|$FuBeg84QsLn9emI$sfv)GRszOyzyU=Bv zOegw+iqVMX@5aM=Niw?yF@!9o_JDzm@#srip30bEmf(hl#@5$A5M;iu`B3NLB_K8y zfcDKrfA3&L7g&u=_{GkJO(zTn|t2BIyK+h*&zRl$F_gg?cHyv zc71!5y+$)Sfz7}vYd|~_n**RESVG|;#7<7ZgU}FL*^gHfZe?d_wjU?!R_CN)enx`Q zUnuZ`z2Bu|qt3zg=B7P?sYdMgxxZYBbdEp1__(KqYK<6ffoyMtJKlr;@;jOIQ@II} zSZ9(CfjX8ab|15%Tn!p`^m@IT-kTOe7-I3V>_oo0j2P$SeGH4yaHK!w=1jqV=xDd z@!;0U$)U>&T5g^6Zo2%2Kv3MAp;tU)c0YXmPlDxG;!9P97>ks?(Fn|Fx{K3;HqD#- z>a(Gb&ZfWZz-{WyxwuK|Dc^?lh%4#{45k%un$ZVrerJyO0o^gUF={BTOag@7mU z{L4jF2W)M$;UtCUk@$|SF}4_@nRUptSv#De9<7&TexPtMBhBV@&s&a(b?%kcw?`B8 ztlX}g_G&e+&9^}7jz>fnZI7WFG)L%&%1k7dPaGobnp>*%-Ne(k@7}BDd~4L8^;N4@ zUo3RaTIR`vH+=&aUHfesZv@**KD$xFU$zXjUHTdYq2+y<8?JxYEp*KF4jxjS%X^qS zb|#tEXuQiB&V6hwa-L?OKhn6?Hl~-00;irWS$nfc$CU2JW1kHz3C}<)m6Pb~ZjO*D z%^cM;{TkXIcq7N&zUt81o;3f-6o)TX)2~Z?WBdrZoA7m`(>IDg;k{f^+~>Flj8B~1 zoYS3eJ~XpD)QeHoUfv(!`=e%$CuTBcj3hrzl*x1_2s38QTfaF!>9o5HttMY z^|zMtkone)-}gUQTh-%=El6EnTV&8Wm86Yy+?gFrv+(!!%D8o7gQ3oiP2hnCo~Rigueojm&+ z?BEJFZ~6*k%{0c_3-jt)Uu2PS}dor>-qxQ|{!ov@FdTgC1+dE6UwdtQ9=oG5g%+L5_ z;V$gBNXu-KYGHrcm`lzL4L{uSiFB}j^2H~+MQ&!60o6=8>%$GfUp}UP!5=VOfpp%iNX#1g?X-AHPBGMB z1Yg1<*3*(}IYF;=uFi$;8}PsZ&7{{zuW;D9(B=ALLmQv{_(=FVdUnkSQu-H>L zehK-(y{7Dlh;Ypr@$|*G<4$GmKN>IPoqVT-dW1uyUFNU8k@CCkvM#<#Pj{Q6-R@Yzdgx^S{KRYJj3|ITd;z<9H8zHL0<E(H$bs<|w;+Ikz&DSrRghAb-%C0qUFRNN|+NhS$zuqIU{TKym>0+T_vK9he zWUx`vNZ+lFoLWHoDy<9=p6*}=0{+weLT78|9YLzN%*7R z=?5VXW$bQ{ChN_+)zNz=*SlzChE6>6yw+uz{p6YD_>=C3PFCAhlr0ama};&fdvG4Ju$)L6x+##8!DPDB(U_G>DPk zUck%RK6Cg-D#oTUFh}-jQbnbMwxO+P>_=DROowP`^Qg0G%R7hrV&Sdpp_pO$~3XF|vIU^xmuF)^X|`k6hlB zv3(D!DxQVt9ynQZpxkq*s{ehj3^$>7jPiA0&a{E8J1Y8fLhX0Wk-5QI6Upf7-F-nELGjrji!cC+z zo4$ay0XSRk9M<#3n2j7|r#sLufA!82wI5?YX7qLroZ1DP^mE$Q)6U_Uzsfmevu~$i zLeK$2o;ghb$4+$943+jJG zxyN$;$uBX-;`@E$iE}n9kUn~2lwwb*wcO7Y$Y|WdAnO6G#?<$rK>e{V>R)^^)bGMLO7xmt{t>{ z;*J|QU-G2P=85WHiN)T`_vK0ZyJD7f+x;)vJo@SsT4wtyCm}g0#eG8QJg_zSyw}fu zs&!#Fp~&Ke-M7vMn%!ynCp3qw_K&?!O&uDn>P=j{R9#Yi&sbzSdE>ce%y@;@zEroq z?4!=W&p$n0R>o&@zI@(YeD}yE!<~q|iz?>rEL)n7+iMRm?kfda(5tW6e;&62zhl&o zA2_#7{lo24m1pPu8f!4T3!`$fEX=MuzFRzN)H-x<$z#{{nV<@<<`77pdy{7rnc?Ba zua7&#Wc)b#@~VWvgKqfRp0fhypBD}3k7kQ5F6=&?HrdW{Bt(8vbw@#y_LX=@jC)^j z2yvZHKwRR5aC%x>MLB)?f2_eho&k+p;dv^JQ3P&AzfU)?3 z-Iha%BayGqLPL`qYBpWjq|qoKKa5h3F!|8sV*o$YZyD6ONyE8Yn;VYU@eS=km_Os4 z5;}S6ko?u}J;|R$toxw_n_k^KpBL}2xR7*z)HY?>kdyt&K2O=e)+V6A>dc9Xwc%HV zt?@^8_L&5sTr=~}-|}k7)@pF+v1+%sDVR>3AzuT|2dQj3`{Pd0`!W@yMnV7L&=v2u zK6mz{>g--EOv!h*woEr%Qk?pRHFQFMTY=z*rG3O(LdPPF_N4(EZ#7;S-Z;H=HgYk; zoYsfZIad8Aougfqr(U(;j$f@ot5d(lVkK|*R_!ffvBk{($4MJJ)5u@71C*NoxkSa6 z_C5`knczZu+lF@_J!hd2&fV^F7t59vqxuCGpI~}EcsW1lt&>|n-6P(`R{i#{^?WJY zG>(07aY$QnEKF}qy7I%*O$J5%x(knQ28xgmLMvpR(MM4O=R)!vw zD8V87P3W;#DcPA7zBZ<(^qT#Z(oM#*r#VNM?+51U6`&O_VRZ3G`w>v=i zsnKXed?BltK8!GAf93vaGH$B=X9~#Sx^s3L+78?fR4lJK|X)t1Wup$m2_Z@ zAsuu-Xr1a~EQrgr%V&F0wKh2fsgHdZxws$d>G#2i1LWF|$Ro47{mEoApW>XcMINdcy0&{q_F z#0lJwlH%~ri8*PR3N;OGo0U0uDs{Je6gt%6RGf4q*~J!k=6$O5&w2_CS*X(>h(glSAXJVeX!KlQ^3(qPl2fp1 zzKBnb`Q?hiX+1O% ztR8gWcC3}tMf`P79sOz))QHM6318yQfSzSlifZi$aKIt%&A94jW4Wjl3BrdF)nCat za*@{?89O+f{sHQdPXQRtYfP&=2)t|#iae>gCLa6NP*rqBFIhuF4axXaxNUtMi=W{8 z9G5*$!vaqZ78a5EM?4giOnHsKRZ&#Rx>~BOxsluUHxpb2bC^X)QCL8NCsr@njc0ie z+m(dx|2_zMj1mr7Twgt0X7Bjdy-e}=Df-@hm`*?yuNq{~njT8RFbc!Tq-sPbfUv5& z`r&u8hwmB7og4e8<6Ka1g?6EWoAbpd0TdP`c|;Z{4~9UZAU`^HAmr#YzT-EK4!QZ6 zGs6H&U>PUSZP@z=3lrDK@$4r?{IQHFWFaWlu6jUjQylb$Re)5qNmUJaO3AmSDc6UvY9vG3+IE;>7)+-6BiasMGhTI99T-SaUs#HUO@CP)G!~6$}kZ z>U$n5*Q$$OQi=;&|_1 z401hL$XkUZi|GLT#5ujYp+0$esQQEsy?|k25G-eZhqNAxs=TU{LmcF{H$#_$#3^@7 z<2ayVX#yS=vM08JoZ-T%0?6tm!}35c%PB?;9`$@WA$&dPWSplmZS(?Yq=@tj?9E|| ztVEDHar}9bJnkZk$+P>1k3oaY0y)vuY&D89Bs@T@5azLJ^KmkHFW_C2Bb&3B{ZZ*z>hIuA)i2zh%DoDlwmmYi z!+*E7&D_DUwUm0_c<239$yLrl-wJYAp0e*uQ!#9cG2HVXJ8gIQp@T}+YNsuquJIPq z+0VBfTEd1VZ^>pHFr=J(Y;*09A7Fd#90<);;p}UZ9TJA{mEn%VZ5NR^?R`FH;-HMlnGyu$yAN$Fz9T3XIoLhzOpBEWa+*$2i0>$Ta^C%KQx_nTvP4;#!>V^5kVA`Mx{$Y zI)@^ogi+EVB{2c%(Mn57D=|bmM|XD!14c;42~+{V+qrk{>vLW2 zi_bh_dEmZRi=9_pFihjIvckO=e3s zj%zYom096(;zyQI3S>db6arr2QD%py&Q8k$igO^$&NLwMc);WS*wF239zedV>Tf_LqSUTS<@353>% zblojp_X^43W@aAo+~vT;_B5V0*g5>b>tIuYoE+ATO>Iw2>08^M3*5AvsxIyqO*$Oh z)vA2L7WoeiA*=-Sj(?U%53|r4=Wq#ytq5PKLXvxZOl(#jb;>x->^VMxSt;s`?pj;V zv0&n}t&+it=V8YuH$Qh<-R}snSZ;^vCf^HT8daAla_bD$e%_}_%hXLa&$prDi@%w zG_VaY>ewut&KCLEEz!-o?o$nymB~xaS{~_XFq>R$Y1Ij+i!P*Ohb@RVM9=rr?z0+& zFHh44D_`U(JbnFfWoXe$@Q0&T`+dbV&pqnPN4`)DN4T`&id1Z5t@j3E#l&U0*Posl z^Kr}Y?o@GsexFEhtgCsUfyr&*1`&Gb=lA<`Q*0r*34>qz<2aeNF8N_+tB+#wEZ==! zpnM@Kn_f;#OWl$Tjdy#>vMmdKjbOG@9p)Vhj63UCvw6a*Ug^Gj7wj+#%D~0)6(qgq zQxRc#BjIAgS@b4bc;3mUsYhALv1Z>tBhez;+x>~Gy`zAh`arL;-I3$;R*?y0JT00B zGWmY9s&-n}oI7Jm%+v6F`#@jvzeJYhb#<-0QPiL6TeS=YVsgno;uLqYF^Z?=X3?4V z2siNtSLv;H7I)xJl>J$JIk3+V%QDx`j3#$is%xupkt=pJk!$r6>@8y=4=6sr=lx#q z1FKeZTz6Qhu0vhV%;9^+)=(R`y*Vpb)HM|ANEHTIG#EuySQG zCK>kqTI&ScTC4?$2R(_N%(>zTtOT4h~1i2f2=_ zW&XK?Qw3~Wn6E^!^5qL(W=v~M>6WPHH2gLe(rrd+Yx)dxvU}R5nS0NPe!*j$$&PKfXTc@vh}*S*&9Pt^+IA|-3ahmB^xk&20LSSF#Yo{h7_E7(u86ZZo4V}o#QATK%l$^rGwOtd#W_b` zuPLcI(C`?{S-sP@eljs{EsNkJaRI0v}oS zIuGo>Kx)pR7F!#y;Ie#+cvkFw(yFk2S#@M4zbckJ!*4fgYGs8Llv#JxVJ_mhmS9Pysvns@F3Vb1+6aej`!E)um7V+@&)|j zq~nAI!`nykGe7sorTvABNNY>LGIP!fPr)EpZE8)b_kK;C(cC()>4JzpN=}{1FfX1p zTu{!l*Y&sviOJ4W^DCTFv-Tu4e&Kwxrdl#N?#A<1D`Tcw;q~(}zHM)?>{*yct9qer zc984J&c(wkMwUN(%`uZTJ!#u;5uA`Tb^)s7HYC1>PBX#0X@^k4pd)p&mZ2N%*$UF1 z2*y_-vd`H|^C1||ZprSc>14`zds#BL7h)q24JhgeH-*4EzgC0{g@ytwH#++rn6R*j zVM|b!*ePA`muAvg|NIBMk?SR2s<#N4WN2$+#zBcSLuc*xLhi73{9!~w~j@NDGW|Tk(+?p`fdOKC#iQ#Z=m?E5SvsPD1uHGnv_O+C(&KITi zR~=_kf~5O-n-ci zpFK%k{dH}6r70(1d5^Uw$-at__Q`^gpgF?Z02h0BD>J5jT-xhdAv*R`W6#v5ETaLZ z4=Toh8%<|i-v04X^^EVqeVohWjUQ-8rf#@Un!f8rz840$QkM0gD`1)brm5m{VV6Eo z$gu=wF^;SKjyWtv1k`WoU6K~jVYzzbm_%WB!sKHdq0_VzA=T%;Zx7my&>W?e_*y`8l3DXUqe|{$rrKcoB5mYKB-J z(`_Fm%ODl(Yh8aK%y1Xf@K!ukT~j1PP01u9pvyC%rBjX%uIv`OXC&ZzRuxT{00e#t zQdr=xVe_)zeH)#`#dgykXU=bfLBH$5_9$NmxfC_Zejxv#({uU8f1r(ma1XE2YKGHe zB5TGVBiI_Iq@@bxaffFR45-6a@eB@@16J+^x+2Ps0MDDzUL)Cr4VjihU^Pe9}u`9C%tI@*dJhsnE5+r**^t!9m z9Z=Ki#Mb8iGhzLbfS=voa%GP<#jjIMra@=>){m=7$hHpiMed-IV~huftJg9R`6F}8 zHLY}5_Q+LW6ZN~58#7X9T?+H*mb6Z;__0IhTkLFEXxA8Z+uhF7LE3r@ol2`Y5&GWo zar&VrO)U}cl7?Vxmdvd_2A}Dp9!%e9*i}KB<$}~3oC#)WIV7`>KRB{*9ttN3)4*2C z(oD-TD-^&Q967}qW9+LyR!JE%;M}=F-7e$0Y zNtvEzf_-Hd)}An8n-{43xS=MV=OB#oW@Al5jH*%iS-Eod+miW*gqXr!+J_XkLweB8 zkUR1;nHA}n$#rmcW$Id?dhkTzkE`in|Fk@R1tx}Jp6OEgyDIKBDv(s+3d*RhTnB6A z0g)e#Dw4%Af80jU%s|U-Z%e!?W+6VqQ@-}jD!q|ytDm;wXFcovew15^p^9P(hpXt< zH2SBL25rx4KfaNBE9u+5W*Ou8_N0AaTZtjTGzEKyKx%{?F=3je=l^xIst}D{d?$5q z3%4nUl`PliSaR~oS^Q0BGgo4*VX*o<8cC(HQ;{zA&90RzfZ5;$S{t*|%}kom&F}63 zzlr6ot=w*WpdejaM>$v}j z80Ho^hboA_;8IUGtu8F?F`O?Q;9HH2TQz0wvyQ(Q;h^9qcW2qNZPgi1;XeJGx1Uk7 zJ9%c6MdBau+W*z4g~T$0Pf+GMu#Ntg$TMi}9H5R+9Reo519;|Oo6aJ#QO#S#eJKk| z2%+F-RK{1}kCe27MgR_7i>cs)sJOq`o>Ma*X8jLL%Xz}ewND+eF6RG<1Lm3@A|WYE znQK}!?Q0>&z`Pz1A)?;K{>e{iztWdj_GblLg^Fs~_$#g(;u6S>KWqcJo`hcn%X4k% ziYwH`)R#yS8WQP*-!ddj^Q2}xGXeaBnwu`w2|jjbcIWdWx8#M}OpsB3j*B3@ zpZc3iUnDJKeqC%N3?LC3oZJGf2*-@QO{b?H!&ve+zXZHc0;E2c0$Mam+VXJ6km-*r zP55c-7Aj`m`Gv}DPZWXVu+3nWHv|uNlRsT!^Ac;8T=YQWD0ep%n>10Ql z>J31qeOlsRHnD4oAt#L^(o&x+?HSZvY1Eb;J(ymqC5Ug*4Ua?+|MtQ+o}3AzD0iYwx%$TWuZ zF4O!8M_twxiHaONAJEx%B3dnxS;t7k&TL|S79c%cbf!oh650?w=lViXa+~?968No9 zCUIi;`3MMz`Mtch25eMh3Y(NU#2o=1>Zr}OdO#;&Y4ZaJcS}h@;yryl@IJ&&j6_~| zsN5P80By>(qZ*7SVn74x7ieuQrUV3`B)*|4Y;dEPM|XOb*ika+Dz_F=VJ$ zfMjLvIRAf<&o>+LFgG)I^HejyIa);sAg$I0Ot4qx|Kn@iNNTQ% zSwgk?1i9vUThmK41f8qzALNju8aGY;s_m-lK`3Y7%vCC>9(*KO zdz=}&Jwh;82lq`)&kTSU47KFN>Vw? zKeC>5i@(|WTh2dAm-?Tm=ienvFkV0jxBwwcdX)s=Y$brb%Jn4_UM_n*DyA7=qk3zu z$IJjIv#ebhaJQal`y#CWyj<3E#vinC^YKR;4hfpAi?f`Wj%B#_n%m}FY<@r|;{4Sq z$q{Hg@l|~X!`arH?1XFU5U?x?aE2pkZ+sLSjXBRf>^p<(=+*u&&}9MS?$jWSmu<`C zoHU;Z_XT$Y%4`oiMy?@OodFIHBMkWRcYe%p#Q_r5?=xxgC#$ck!pd9cn8QyW6hj`ISY>lHOlB#M47?CU_|cDFNJ%lC z`x|`zS?t3r!=cIbG6P(zS;$#jH?vi0-b3k&B}J zKdv>pv*K^`z6dz?vC~@*hT#3g`(@LeFD4C+&)=8j25K$Zs|<)@h;LJdTfkO6Z|C^t zOQZ9WO04Sspm)j?8LuuQfUw&U_jr#5R=jWdkg9Bcf2(wC?*n?&$W|fa|KlRKAECSG4d*M%73|}# zuWY`jzG(G8;L|((Y1Zkzt1rz#c5t4i@EN_k_fU>^z`tf8Gik5WgW^6ttyP!RQuBv? zs3fI4=7Mp7*@9eFNtKTTjBYasCpI*5%XtgZce`ArTOc9%H{J_vGFp~`<4oI zRW6WO7QQPEnDGtN-lUxPE8>rk1x7GLJy_s|8;c0=Z6(ZSlCNL_qVa6lgRWK%>~vQ# zY~Nwm*P+Z;mLeAx&-m1C*K%r>zj{t2r(!7Yrd7u8~?0`?ERFLA@q$P2Cgf1Hl2utadWWpb){?CRA#j>T#nA(e_IHs*< z%`%?CN|mjX&m~aLW>{@3h-w;|S&4GPmYKVp9{%#ByZYf+!h4Vfx+^O^?CfbZ_PW&k zyRX7{wsYZ3xk-i5-4Bwc-MQMxdia5zdyP2ZBVO_Hsjte^F{AdYpwn*Lfem@Fxo7?W z+b>LO!~NWeySE=0ZcAErIWsX-DL$dcJaQpx43Ju?cG?AD6;}|EzN*WS$Y@D;o!82q z)=-ZrUHnle6XRHlVMA^BysczZ%B;PD^BpxOzMk|-@OaMs;SZI(74qBzhrV_xtIj-^ zDh3X|cM1oKT;Fs}*4PTK2p8xUtFSLc#UG&xXb;KNo=Jm zA2#f_svGSy;w}|2U8ZVs3w96dX5ddzvptVy7EJU&&{~^EhQ62CzDiUkN;=X}72u*? z$lHcGpC+ToyJoK{Me78cA+D^R$&L@@G}Dmr2Arpr=iK-$r*)qM*~p5Jo^W;#=YBzFXOwUb6yQrs@zf9%$#E; zH=EyF9)dTpJvDF|pL z17Vo(5XZl4HXCV*17Z1>&pKKx=2lBr6}UjLxHY>_#WN)EIY5v73f_8sp3^{<%$J8N9f z;C1qNE$F=X(%4t99vc>Xe87Nv4vi(pzs9*1*bz7|@Rn1%y=QJ$HsGevOXX%eqIAd& zPk~4%&U9O+e!T3!l6z2;xzL}Su((v!)(r}4y^nnq;m_+UjPpU8sH(;AQ{#wxn1nHe z$W;n5Sl);-ugEanj@qO*yk;GI4n{`7zIO*cQRG{UV&MJS)n zox8|6c*4ss-A=q!XRpX%&$?m}mVL^26^pm=NLl2%O837H)r*_UE%Ed;r9>%o8uk9% zA{}<3dd_M;(#>mqJ<#JLy#wsg9@`}dXSTBVd1WtP1rfUIE3x9hzBt!4eL)^aET6v> z_Y$ri=L-GD>fXz-CzW3*5+&t^%KPX1i|elP+H9J>%MM}tRv21sh=*{WR=|&414&M^ zHp}~g{8s`l3Hae&PA5ESMr~jNg79{^P&?6-q8XS?PbtP z#>Y)nE4<*@pc8E5)|Fydm2?wppJw;+_WE(5>U=4gFlH>t`{O5!eBU9XsWh|#@e zP8J9|_gNNQCz04{&hTCLKTxntBePZ0Bx|{69z?f=Oh+6!IO&3UO%^ z*{cT~ThlSJxEofMEU^gt(_`FStoEBnbg_YWl2s%F%%H3+d%W3T1wvtOcp6k}WHhf5 z=p&=8R@+dS_`TcnUQQ`r?%*)gxfW)nfUL(+WAhebGcnn#yy7>?6FR(ckICXy(kMr* zKFT(usV}2>r?2Toqzl%++XhQ#Hb+dUKQLY>eV3}|V|yn1RG;^i*LTc`_M}v&ZnYuA z@pP$e-d`}F;}wAk7L|F^S1BCgy7IfaL6|;VmqTF7^KQW+g!=SE_f5pf=UOgJSq%uzyeH`X-S~IiSmv#5 zf>K<(bqs;$Gw)E3_=mdDfwjWWJv7S`C1#{kWSb=OAyNjGfS3z)07q*l>4q%{^Um8Veu5E-eJZm8~IGLduOV^@eR5#29tvJ)r$ zAh8?|Yl7Ta#E7Sq4TF zj;#JP(xes4?aY*b{TY@2f@Z`wZ(A^ZjcGxhLgZY{e!_;<2kWUad(IC6bzf||74_N` zv`h!^^LLzQbF?o$nQRs;wQXe`-KU#=Gv-uULr*&)I3rUM$xWhTee@*7DMgxBMeV_J z0c+mqD^g!hU^fzx)dbOYGhT2!~dpsFfkn0 zXApAi@u#r^-8q436pIuG3h3;)FEa%@5X%x;v#C$Rc1zJ}iYxq4pbGa-JTR#qmlxKB za_7m6Qd$-)<&)@;sJHJ)cF$s42#ffJ0X07pi2<{ii8H{WNs3Di@L!bp0Uyk;donMMi1u#te!Hd==# zv9GXwpURrq(2Ux71^OfYINH| zmjxwjNpk!mHN)RH^)^f}Y~Q7Ve0$1_8TPewN_}&&QY$v0?n)rU7QMx$dSS$#gOZjy0ChP^uH_&e=r1+e2CYVP}yNnBzyMT^ID zb@FF$@NLPm~-7f`Ioea5OjSC;)xUL(S zKHE{9T&Rc}3mc}3!wI3=Vn7rRRq1J(O@N2kq?%cKI>&67liScRvvzfgLvo?h!(6DX zW69Usr3dD*% z_LNnq_y}7}S9k5L)I|0>x~kB;R#eZ=1OxR4?I2qKph|d%^@x3zn{PY&x{>g)3Tb(B zBmKs7@_XX>t)0{MEMv-zxu*)TvSJFI1IV8?c)za}uHnnBCB+mU3&A<%`l;32lg)dSTo3Qq{ILB|@6pV{Ia!Ksygb(%4_nk; zCMhgM5zCY=#206Kz0X-D8gSA?PxMK>%Hv`63fI#Z#plW!rb`p^DJ!X z3d}-*4}9dY{y&*TVCj1O6}_Q|-KVok#hr`t_ej;|p8Kcx(qI{D6L{5ZKT~c0pf%UJ zZep~5Gt14QW5I>&)x9^^ns(OdpU%&?^Bz(bY6WJHh`US&=jPZS9!?mtuWZ(!4sBZE zG(*|7qU>u7x$@P1r)S%67BAEy$`%p%6Jp-Rf{x87e)c9(#c%cl7CWr+5|jf_C;Nvw z%%bm7*T0f+2ZhR-h-~9jyIJ&9Vec@TpZEDzrmu+!23SF(3hnZtf-)s8^NQ9bvcqD! zW?T_Vz`YExYZ8$UU#8yHw1=Ckh}q}f8r+5` z`moL;97LORO&EvEzPSgcp!DYpEg~6vGJ0pI9JTJQc|MXOTl=WdDEcPF!@l%qC@lEV zHD6rf#}$ufa)4JwRqE;MPnkEH1lrq?To`nZi%ADZh6KXihdr9@6>JJ3Aq|HDjOL=<}q$LZ_-eig{j{Y^vCq35@$` zZBa)q*vL{dqyPQ}$@^mr(@8y40kRuNIAgun)S*t25N?hf``(<3w5t^x+`4DD!f!vaehmK!?e%C^#NF z&2S}|X?8H5CXto?*3RO$3OP2cI?8dvnzcI8&_yc$j+Be|c7O;Pqt!m0%=80l<}SQH zUUT8*skW4D4dO4D4KM{P0MSf_XEc3f0lCHi0L{Mz#I1c#<{#x9xR0L@;G;tr{e|zQ zt8l&7`YVAb{-u64m)EMh&b5NRW@Y!16y7)#XV}DFYK~mwUil7ko?Bk#pXA)SHG(?c z1Z(t+#$$wL3J5XQdV#GfK@qn0nU)s&lejs^lBR)WY*T4z@%JFZ(lb-9m7n!CXwH#W0 zQQ<2U#qoeY53ae%8c_8qitGX%J5@$=E)Bq?P}gJs*YNO|>pg+NhIu6u2`E ze{GbDW|x1k%BEkkc3m$542g38_lLY{SVzasQmpAWMtiK*KWa~2Dzd~DFarqw@Q%x_ z^vi%x-@B!*;n2sc2rn3c@`8E8j?4zdEJv}~@ty2|*EudK-IFO3tuC-R>L-re(jyDd z@Ui)XFF3?bgOUOJWcWQumIM&1|7#ia;BPVgA33hzKac5M_JugP{MY^x2L%7b|4I=` zzk0{}0fzOHLTbVOfxVrr-<+a`tWB(1wnuSMe?ep#nk}(tf_U-e?MVGE6Vb3v^n&GIBE~+1H>tPtNr~6`62@%p zraY5iC)~Mxv#qY?$!nYS5t)Aio^c-}!bZF=EE3+Xy^XSRoyc5|?aFjK&d;lJ0xl-e zh^C&4kKK=@AKW+A-d=-;(VPHhCy{$t>EeuH1nv{GZnq6B2*s4q2TGxSEC-e|%cmvi zoKFKBwr}4Qhh&!kL@?m+-9*^8tfO&?TZ$5d%4Forw;!Tb$x>T z4S#D`qN6b0nd^T*?-$!E;m#S^@P(^JS73mG{c>w9A?N}dljz!4x4FK#gZsmP>qoO% z5iiBG!aHL*mzSA-En`5}POiB3JB2@+I&U zx~Kosi~qDkedNB$Q7hX(6{5TxBlE%eq&V#_fIKjW)Iezdb`bFvojWqJ^^vdoRP&qV zSSFm;(~1TY4(-g&d-E`xK9Abzj{Cs+)|u)QQge5MZ@~1e^&l%JggyO^hQM_m>swVMej}?2Tzqvc}y}FV#pcg ziKDx#Gi$n0I%!RI+R8|i#bSic7|!)l$(Ii6*+~&0%Q0GWCA%*rArcMVRG4_DAZYKc zQ|D4sI-&mEMj%m7K2%pbi55Xot9ygNlJUiII^`Tw#?A#PU}1~GF?X8EV#vRt`GmB_ zv#`|3nb6fce%z`}=7kwN8er&Grt1U3Lr`b>PS#t+tmX5PR z@3IX?0mqFbmMra<=O4U?5Z5X}z-Yz_@ma`M0cW1q<hxPQ(*_@ z{M8M-xlqSh2cyzcnV+qV*}8Y@2G|C7_nH&bw~F7_09hK4q5m^q@lgVYuM`#(ctT0W zQ*1ZRmUi{E`QdC7%WzdjvM8K_SE*GetMns`Em?%yjC!RFqsAObmks0Z38;d^JY#@u zE64*5 z!1}P=xRJsjSz3x3xcj|%yhiFBTs$vQTh)kBciy%lyo6Ct570G%uO~<_c2cdRWD`22}*Z10`uP-pe3(jw^ZF}J6<0NE+d4{WT zrVB|v>JmfsR#ty~Y31;A$D#in$6!_RR=@tlCa~QCW6dY7+dULGJ?^bSNlihK}TVcRM@ z&f?7f4VmI}RY!9vPe$>4&s{oUJ;AJ=xvDPa)pUv}1sv-0HP67Q`uAqTY=s(K|8#9J zy`oH$)Q>(F7T9W>F8vpSRO8I`zSX`!SqYpAx|it@RxzTKZ3-_^8diWxVnZCvv- zA*Fdd!Lv4h-fFy%;gu!7OD_LHqj!x|b-u@*JN*EkhiU$Ch8M6sGMG0o`Jw3GexuyD zhwSxR7@H~1qKzIfkB6l)$G8JO_rZGL8mw}2tP$oc+M?pK zySU?6@{Zz^>f;Eh$TZc0FD8KffhRx4_MP&RFVnkv!6Nh^CKyB>8yaGn%U4!q&Dd4q zvM{S=;|6l~vOfePh?Sk(SnDk2D1VwKa1NRB01K*c!*nw>N4P`jewqQMambJIF5_~V z@?4Wfs6^E!e|+%5tM60`RN-w>RbrVdb@x^(o3ZQPa?HaAkHUD#pNfYUcE&ud_RC<9 zcfs+o7OL3qbK5VP7;7d^_P*8SAYH5Y`e<(pTB_8wm?7pjZkSs$)k03P+!iML(ANw@ z9=i(gT|x-|BGqi(>M5)xt92x*S z@dcOlArc_d#p4BV4G&X_=Bxd0;aWuCAw`cq@K(o$-;;Zb$t#}=>LN3JTE~p}yx6~z z44uG~JJa!&1-x-`Q#JavX1mAB#@EC;_fe6CEwAEmjv-K=%Yy#XusQrK^aKARF73#| zknHdnu^TywSrP4%AAA$?miQ$jPt_cAAC6DVT?^0c_M+3ShU$cs7eCq`OoH`ptIQX& zW!Daul^inTIK+B0N$^iGz_Z=%rQ0(%2&t`TYD^R6bhgLQak!~TEdcSmqHGKC%&H!) zT+@GF1pY4NQc01X2OP9Nw-o7ZlXAD^s<(pqG}tg{vMe{}d}*d!K2P4somDKscz({f z$5~h`PT1Zd%G71RVS}&JO9U5*)$TH^>p+H|UX&(M%A2ztn?A;z+d$JqjeK(9n`&Rx z7_^4Sgx=lev(`cx3{xqrk#DsA#8qK4J7pztf-7$@`C+`mb~VZl?x88_N8i=%ylM6M z<*RB<6yxyOhj*5-abD2k(5~*`@KbQgEjB`7KW7;S;>+q`#6FGj)IIk%AQy2jaAHfY z2#t~cAP+J(3E6kV)tnb2joAYbaRRlW;Wr#H-4a9KU(AWbVHR#skCgNXSjRx(bu8t zd_Ps${K|aV5kdpvVZ^+z*~@=@HOJI2xFa-Go(CrwsU(3X34HXP8=jcT#iub31cwAQ zx&|5(b0vq;44pMK-7-pM)2UH2u6CQh8}q|_d0Y@x>NVG57Z0_GzUZTgr64qq?}2Gu z1R#k}I5`IW59Lf}S|z!r$5FzW$0p4P3GQ9Xef|f(7H31_$IMd_t-*0&KiA{pa&5jA z^Ax4T!F9jLZo0gX&O;Qez=+8R;PX26{zh5tHdWShUH@b5`C)-qdVl94ZfUAtKdo}D?bIB|NUA#B8 zVkg}M(gULM68jiU+|Y5d(ZEJJu2%E22HDgL zm9}T8{U!adYY z=!}whvpKO8Pu9o>mHl(CTx7amCEOjlS(}k#(zE!`^68phsy~uRqeVbv_9*rFvgc7oP%|4XzNS3iJ_clz^tHNQ-69rS=g zMEyC(YSd`=N;gm!EM-v?!HH`|HVFaTTipql_W9vqZxXd=lKDURZ zi*RY@Y})K$itTd8?aJPiP9jQAP6a3OlhIroG)^8N%077b_8XH71&8E|x;ZocSx^{3 zb}XPG)xk-l66F@26~A>NwNvnZuX=^9I)&N31+HWnIv>KS%o@SsNPPWQaVjF(pBkz$ zaI>v@=^x`TvK)og%sV6fT{mb_Oj-MXn|NeZxjs+wz( zbdCH_24+U{vp+SN9`IGC8B7#9ZNzulm5N+Sf$rFO!&83zt`plpf>SXI19morY60%n z4n?X|M~VgZdc~fM5kJQTAC=yw1X~;~ZNx?_KCPd?dzPiCp83c;H}bnasA{mlw^29h zU_Kr_!5Lf4hd)iQCr7gzl;tLUOMTgQ087r02Jl61cn~(x*2~ z>6fZ0B|YsiyMOQ3U$c;n|*J z2K5kk|LUUR?DJet2I5|rHOIL*&cW-{k6C9jcy?lTUe99u_AEBxbrouV&y?Z&k)eS* z?E->HKui2>1ajRod6_<70fFP_3$CH(54lK*s_B16m-*RUtg;50z%qZAN6~ANaq*>o zT4dAbvnZmQ)uaNHa#wX7z6YZDu^np+hdt)#m(AN#>9gydsOZ8%#W%w$cABAe&WTPw zCjNeXUl~|2Nmb9xto3HcqwIRQN8H({IcQ~cM4NRZP4gEmq~VIM3E7N|1Ys&3RNO8o z*F^BFg9e=^1JEL=0Fxvj=Cl<;`UlhKXQQL{bWg9rThr08sM5#k5<5r;(90!!P0t+Z zgm79{J1lqolmPcak9cvK0eL z`bPj143y*U=#esDz%qb@$*uK2!}mw7koIK;fA)l-KOJ=7viJYN5=8%VNCLb|5{&?c zAmwr^|F#sT-m%{==vJEi+^S9;92bu_7bx*DCfya8) z1!Kq4s)_p<4rMg~IYbRR>URKlT~!$c!f7<~=s4Swd7q$9c=?g%e{a<;OnS{9 zNX?h(Mwlzh_%(W*{*^HE?VmLPheDabs7@WoBrgaoTYKlI*D!IQ0>nQo?$eImD>EMs zTJ&_3C7`_` zxOFR&Akck)j=mTj<>OCF^DmLK=FidqoPTE0$@RFX%^3JqK++TSuLCCo%vbh}797&< zea^iyGMtAo$}DyAfgmEGVTdF!$EG;Cp4@xNyg4TvBGo??{^RM_v{RhfUxsVufgO>#7xUJ$3PU`8E8}ABmpe;BhRJ3)kK&*vmcodif)X zYz7Pj5Cuw=%hS^-UN75#$qNV@JMgFH@6TLaCR2E;e#Nu0IxLP4JJ4;I# z^la%@qy-2VC?zVUZz4EizMidqS!m5L}24xnI^^M7x| z@Rr0upD%M??oIJsp>zfcvm+E!wd+vZa$7%Xt67^&eXdTQDTHMXZ$J7PBNfn;Iia=J z<rD1=Q&Zcp|@MJF#pRXcJKb9PT{e3-#-_mNBit<6eJ;eZ~4RAfTK?hU-7( zZXvQ}y0mTi6J&^a0aQxw5!5S=Z1eXbG`|7Fbbt||q_KP~?S?dS1WTN)=EF^xT7-?R z+JVkiNeYr)T>>-^G&z@K{6)fs^u>t4^ZG@9QwizmX1nL{{faNUXo>fC*_275{2$)}kQt0eWt}>#LzyIP>E9x}Vm~7}iM#f!V-0ARAD3Dr+Xd~Xm(-v)sIym_(M}uiZc$BhJi9N}4OZ1KP=bJq= zN^7fm_exIqtJK`xo!y~O=YrdP@G<_wA*_7+MOfDo11_vHHSzAvk=P;=L!vIHuW=(g zRN2JzrUE57z9p`K&)ad=a*d*TytZ%Z}*mhph^l=hZka)u9M_ZZmi z$_Vo9X;U1qpA4sY8DnE^b|;`i5vYIn6LpalfO%Qjv8p~P6y^?{-BGWP8n!NQFw3BO z6))=d^C$5SaRV(wypLFyf=Y%P0J550`k}l~dY@3{mq%KPP+QcS0fiWpdNz9cNx9AA zjXphB6laNb%m#j;E{Z*VB|YhK;HusqQTd{G7f5jfOU{JHkW!So4m^rPoKXfw!)n0A(I2+|BW~;hp+0v6?MRS&LLLPtD0u@tXY$xv~6!^XOrbBs$gTsO_2*fd*4bUVl|r}PX98BA<8 zj=$$Sc!uXfH@(#p{9Tv8mWvR$4vyN-B`~N?Rh=76!B8^H(cgF;z1r(%H zLFon=IYL0`MyY{xcSuWzFkrx>W7L?$$N^*b@AAGs-{0T6w#RnP*}1OsdcWS!m!*

tNO8h-~2OHWbmAhbktQUwtYe=k74@e9X4-V zDsTgc4q$7;1|r*7zTg=fAJISRBY0$(xNfTEEURqo5?!LUtZ_?a#)-})i9`Q*f3J~- z_m>Y#!#@O8ma!xlk+qv(5u;Z7cFn^+qn&0=8BK3?VD}cF*~U|vu2)<^hZ-AO4sk~+ zHY(5;{>^sto5;u$C}j;c=;^Sqh4g41vzftz2r{vVx>#=XxxG%JHaw35rJqtU@8#40 z-)wo;Xjab>H{>{iA8#mIT|Ch(LjKCTr#B_gn2i1q7mF>s5H{_uu3eEvhlAccQ-o5x_h)Kj$19EsYj$$3)x*EQL zLp3ogy^#CbHC(Bh{FV7lqvb}6EIBK>_3Er~&SQ)pd|B^da2&^`u??f?l~q9xsi2ns`zp_lW!y_gPFOHNx>+t8vWVV(>)&iVSBO zI*-3v+iDcyr3V5P*DhqcbF>Q`88m(0)6N4$KbHn2HjO^PjOhZtEi-2be9q-$I59WE z=si_I%_yB=q({v&bTX~d7GVR8R)sJr@-g6bV5x`n^T6(_u0pA?e(OeJQq>JVsS|6} z`5Ire80E(zTwZ+fc=XFaw`F9nn>&!KpXJaJINO4&`|yl4SZX}mRD(IhaLUR1b0?dz z{Uq9;)%m(VOFGGT@bGMBzCS1-Iwg47?CX@ip6J)b0PZZQ_&%iLsK6U|zUV?$A=w)( zu`}~cmgvT*IcKz{Xs>Rb!up4eG9|T=V-<=Rpx(Mw@}!Txt3@k60N@ zc=s4`t=pK|{pcGaYLszkm^$~yhnu*t@Do(=p#wx_6zvy&eC{aB^5^-!hS z*XwQQ9~h(0+zJ!=yXm(LAM+yPyCjZ}6OM**xqV%`egQ@mAg>fB@Z{x6JtT&YmRT!3 zTGTfVp55K}>kfktotf3@3CiLj(se5)`-&jW= ziRDh`R2t2ZMQdj^8_Kt&Ap@u-%BEEzYMOF5;WV0Z3a={SIK$b#OlRGBtLaK1`|I8D zDWCSb*SmSIW-{v{6JTR9HLc^@nW_Aqba{4mCdHtWtn=e&nY}%3cX44>tNQaP@Mm)D z$=K>l8Z5KK+Nw`i?J@g*=`Xi1R+XiE+>^W;YoRUpLoUdOaM?@j#q>y&SNHW{KWhEbC+q_buIrK_U%p2k3H$A2%>Q4(#dvn%?5RIs3bsUpBuWGcKn-#_{kN+oWmw`yadH1&n$}{4rDN+x6MFO@!Zn z?VivbOu>3XeItS}F%&#ht&=qG=*+3D8lkMExQjNuyKWPw$A2xL{o}Ky9pC2CdwXg2 zrOMwn5%!h)&eIZK16ln60H zbjcUe0CFafHXgw5z(wmXM8$+tx@)*zl{3xP(Zux2RpYU|NeqG<9zGbwI<-m z&T%`Xe8ih;!0na2r?QyOdqib}6a3pfwW>qtRKU(Ekyow5DQkAA@iDl7eA`mS`W`}nndPoUF3H<1}AVONjh(} zjC_4HJCJ;(3Wgh{>&%=oss7Pf6QJxz-lcAo@TjoAcm%TYH##`$Vx? zl&V-$^}}}HzUDZ;lD$I0FDsR+u+Bn-Em8Y9j)CDO1j1l9S40%caaYlpaol6QggC)U(?h8f)VS_;w0B-vX!LmrrVbqq?L2r7l>>My4{W; zj%cS~>PrSEYu7|fZR@0Kw#4;|x!+BfFnp9zW>zA!u&Dhs^`)s=bLh=2!CC~zBZ2Oz z5_B(#qL}sJOboSy@`YH(Y(~BPx;s^5UAA9UR`#M4lk5yTeg5OH&ZQSOnUa_OOGJUKZ^y|02*wK^4zIvN9wj>XA&Lq#pWLSrP z*B2O$)so~y?qN$8MkSwVsRzPnkm2Q63fl_k61He6UppaW57?uB8#AB`9*=*|kzZzH z)Dyn><3|+R>-cZk-Go^;l0#rju!njs9w(ZcjrhF`!-eOaX&2Xkae(!AyGYscP9oT{ zC*wnMk%J-rX-nwY+meMb_orqSOV`mlYM=CeHdFE@AF+5WmRmDH$}Gb>DJ=;DZqq1Y7xb_-&?9gp zpb?IfaC!=-aqbRI9(8nTBX|dUqD#UCMP*ydJ^&9$d+tid#C69>{blf~ftgaDy{&(_ z`+e&ybuE3Z2wqoA`nuW#f#S}pjW#qJE=t?pTz+hARcXOzOdngdX839#*bGhn&K-;?kj7NSzKOp)H zrLL`6eR@0i!;iT)K1f+qfYd`0Nx0|{1f2zh69CzQfBxowA`!Fk_zmPK zuCw#h?in$ln$Bn&9Vbs;W_h1==l}0g-8qC5ve*f8B1(HCy?r={&S={V-0zS99CgU} zmw@oB71Ba@^xx&TfFBowWK8md9u_s&#ss_h+Z@9h&4hy8JTKx8a~~6PLUpD+@yh?n zL4Y0ldbu5wbFxOu%HSR)=?y&Rf221GxB$bF*`zyK|wjGaeWS4S;9AASf z1D_Xe+9W+k$Eni+ z9>I_Csdjl*+R|P-BHgm*#ZF83SrB<=hZM4zdHSAm(`*LyC#&l<*c$vul(ze-wb3F!GgfPvIkiMU7|0LCs?QUByD zQXv>548gMCJ8r1DQFIdIe*YFjAa4e>k~~4)LgFx5HruxXrMhw0@`c-;m+tYS)YJyM zAV7?C0x|b_N@pc-V4;6@b4Od1fGW#9F55qdx{ciZ@~rd>x(^*iR6f&H z0OgHCQf-P3s>&xJ-8b+3E6Sq1GXMexZ(fQ@Ycaa z3{<--R_0$r?wczLg2WIMqAS1;9vXb-T3ZBHvZXz2U$#77wVbQ}jL;hDxd8a1`C&>I z!4Q-B2CEx!?MXKzx94N?IYZ)l2{C)u4#_&UK(IAXa@mpgv&|8~!%m8Q>9+HR(1=i( zy_0eVHw9V(Z(6_yjk5(*V-UaqvTt)nyB@PE3uH8J;w@*xpMQ07M=(NruexGsdvZjQ zz{IF5HyQedN1VYTh~|p}&6>A$&t)-#xd0G?qYT}O?^;7FzxbXsb#-~tj5@EY>H%|g zpTMxBH*1FOxF6&|lni>Z60{{@7bq=m1xYK_RT=WEZ3IG2^85eZtQTXb+G>e7MNCbk zvN9c`lGXp3ehofatup%|X<=Q6cvruV zvuI)|rU+luV9iN6y9zli)j|TGunG%PxK@K+A1EWduOYqF?V0ibKn1a^uz8G#{{`3{ z?9FT^teeedx&-2W$Y+D{fE9}O@^$drJ^@WdR{=AAGV$<*m6znk-`U=dn-;Z-w*Zg; zRTt%oAfO|U0u|+xx&pQ6J?SUVBT39cF0fqtS^j6np2-gT8(2>ID)%Gt?rpo)gMVl9 zzyDv`ZK3JaW7tvo3&Lr_(_9V@L2^GgNtW%|VD4MnSnh@$~cB7k_6bW)JnpKt-V_sB;F@YL-=`cA-Ju zp;2;+TxP4i?IqtQ!Yr2!#v9Fy?t^A=o-3u*N^muHa^FRAk);H}tm9E~;_91SamN`` z16z`ROnoi~xH}wC?YkQCU&QY0G)o4q_z)A*R(oQ@4rgKOT|e(Q0|Q5kW;Jd)Y|4Khn5*nr6c`~I z|9aZ0QR3r0XLoN=nj;3$D7@)s&Ljr3=Ci?6Ys$#&CB9(N+!!kYVUds+2-NS2Wp%m9 z+xea-311Tas`tE4G5+J(F$FO;Zp{jTtXKIFHMt2FlX4a_dfF284sL0-_`IA~iG?es zr|e`~3++9#V9EgMeWlqf|R)CkHV6Ht~nq8}Tka~PEpa3v0VgDg`3Gbd1`xcO` zp_cJD!J+ozyE-WenXuV~e^}Yxhr8$_VS3zDjBpk+ITA~ueND`W7sH4YK9f$_#OS5- zw^hO5uzI->58YQ#bv9h9xv%N36?hdtp9^0Vz}TGMnzgHPu6q0tKp;PPpG5LKk&ccn z>Qx0Za1TRjn{QDiP7rqgWW^8&Ad1B;-1-3k)P5MuGmme1^XBtI6ISX3_Q<{m+`%ra zf$lFlHq~Jr{vmiRPleHHti5^(Tk8ngT3+Uw6=X|V-&UIGs%!i@b^pNq-lsEz!~B&< z%dO*Xnx=Dk9=LTI+*04Xd8+^YS^YbxeWXrn#>kfMFCBcW^~Z$eMWygZuS|y>w^VGG zyXjsbEij6XxFH z{X8g>Yt-$BigLz=XA0(mu!1Ed$67rt8)8SXZQNk1HKkweS2ArT)srJb5!)}Nn3bv#kl1La`Db|fELIiJchCJ7PuzVMF<{?`(ujiT z%}2iq`52wD($d@Phsz6~N_((yJZ~3PWIg_K`a2QkNQz)tdkljj(etR@K6?ICzSb8d z>V3M{biIkq=9scC>|OU`ZX{mF{xXhcOzKNA+GootNFHW8QXhs%RW}AWmad>6cVyl9 zi;k2_%@%O7*ta*eetfXwKfR~APSZxhf2(L~7T(iI68-vv^AY=>_rEX3D6s`$Eis-+ zf^wb{hSLM#++vQc>L#9MWA-1vHwH4yEafO`R252dHCKcSRuP=!1@I%Vf z0LfE6rT*h(@cz;5aZkRKAdZHq4bUn18>E#!I_pQCHaRzL(#5%Hh1W|R6fvIP(LA?2 zG;H5^R&UOdxsM2h5jH3zyR@o8-Z z3OiClGx*A|{&)O{`aIn+`wuf|on5n$Czh^E-)6kp-c9^0QVlGgFG*6%ZI0KheSe!^ zKc1-pGB>+NmzpowQ^6)w;V#`EJ)3u($G-f%joPfSf0~Tcn45K3+vQ2~XM2q5TPbyZ z<c2%SNFuJ8cV%*?RGddO zCZFL)x5G)Xt+jQ<$(t!PHIp7!>WV_K)x|x?y0)n$ST8k`i6poyo-Vf3xI@niZm3gL zDr4F)9;Tc08zySdsbu<_SK2|H5C!j>v=>h_res=9XR9$+zJ)PYscQ=?kdgJ_<`3v+ zM0{?DwxqP<%F}i}=7>udRwt>=FbC;Q7eimgaq;sW>@U5CPyAM!J)bbPP@MW@BGKe^ zlP7aD<#0p7XX@8wT;;0U0BC;O*I;+vqrGUx0G&*Ep6jwAH58wL!r$t3cU; zOB1O+ebIu;{ws?=1*e(Ou7;kMR6M{!Z6VcPy(m{!jN`KFNPC*aI-a3p&u3`MW2)*( zk7@n_`xHQlJfMjbTC~fdoKeZxRTP{(g>v?`%6@iF^_SVRkXgO+`1(czUNuR^k}U(5+_0y-iC>f+M0H~A9yy2jN+Ce z3dZfjc{iuM85qdC1jII{X%36o8j^-eqK`s--`!S(>I#{CeZS9hnhMpTNxN|1eOWET zm!z|Wz)_&9^A>-bspn(KdOr3_w2n=;E)M1HAA6e)ox}=D5p+M{<7M6whD4kWug$j} z#9pS{H^s{Iq-7~s7n@<@UeP8Mn}c~pB^Ts8SNDi}eX=&nSEO0Ks_B#|tX5O(E$*Q(3$Eq}n{#t5n@ZJ|!2y_PL28KGyPc2F!_Kc_O=0fm`f zp9`r!Yn~ZBvHZA#P-`c#y|?nEg01b)A%Xn*3Fj(r%@U{8w?0B`T~Ce8)hSJkE<8SmwP@Dd6mmC@?B|nbA;1Y8 z7Kb=h^Ldj5C{(Us^YuFGK7je1{@gd&SymF1$<1ygs}{v|+O#@xT2V$ttUR?`);>RQ zMC~*L`B4_)5Nn=WFKt$Ba^x-~VL{(GiMyB^PXL057NMpNDOSetu7oQ#z-@z{^XzDOk%o?JORc;m zlz>}00lbWW{zS0D0LI#D zQR%B>PF^UT(H%W`n$YD4=`$q3B|gdb5DH>BR<=eQ#M~ z*3ai&Bfx{`psBXVDc*zPs3^KGc`u1$gLqJQ7O0xJft{>CF++KJd)m4TA^U8htViwL zvg1tJ3r0ck@lhzm2-oSM%1Dkogp{75EJ&`nfoH`vNH-`_{(<0Ufp-(mr*cMC<(-8U zKvRfpH`z_=M?$?>8yAn8$1*NiMp+3NI^>9v!8})Cp~8(xhXb#JQ{FfP>ZWltrv4~j ze^)-PYp}AS4&gS?o(FB?fb$z>u2Y24zjt0ETdXkuj)}M_8rviZy)Smw-jb8t<<9igm$mQA4hLIy$DyUUP#pJfGaJgdWh@iE zxO!9J)pKSjAk`tBKaBB}W%RcO&1P<{Zp56>(IMsA-&KYaY(s(1oY9~rkdu-RP_x78_wsN37|cGNE|)#AMh745?m2Aks}tx9S-OE!Lhk|6lWX`M-%u zTzMYuD+u^+AnF_6ndv-`5EmDT@6Ee*SpJEIZGgD# zUth05M}q(2jH)KC0H`_x+uB#fD5vlx9;ucmE6#akSppa&- zD84E)a8oXlb{>pERA2mc9yWx<=Ci`9*MP2qQ+?Pof5-|e&i<|y`AP!IuSxGEh~**r zfTcbVae56xq281;##{(_}?+Y~Q zI^de`#elYol&tsZ*@E^_H@XD~buhLY_nZjXmGK-x1ENzt9jz>8j30b)4ah>!RFpDb zRF3iE?Hr*5Fr)QVQMwhNjDf;U2yVo0t2~+rQcV9(?z@%LX70m%XE3Ov7MNse>JywN zFplMSe{>TfPz#j|v=E;o*zy0|tU{?#jLeY>IS`HKkxY@b$V>g~*gb%D>-c0yl9O%_ ziN-5)ia84a#=%l!K(5=W_}e@`bzk+B>i-wA4UQtuBey_3QJMeBi?3B>+wf$#;I@Y2 zqjO|p_jX*@Hgz5a-RXvZ&_maM*QFgGd%|!I&nu{^Amr`dfUKqKgdRGGM?rOFD*!?A zFBThsj%@hkl3q~+dHA#Z%8Il9_-{|X@$b&BI7r_V5%4N9d1!ArKD!f0PZoSkDiboO z-64gBR^GC?PrxK}fS#(jb!(LZSY@%2FBFC>0S&tP`NN@IiV3)4uqR#>;4=pqkGl_@ zvmh0S*}Oon2kFpL!92R*oW*DZ@vK!(EGFJsLD!>w`*#dr4kz#31z8^EkIMZW)PwmV zZdD5rR1BWFtl*|;k)&LppiWn{13}n-ep}~mwzI(rAVJF=WuRf@4byRHe?=v=O)Ck| zfh!3V@B3_}|2q&4&qd-(Verkuxu0xEpiiga28qIeB-16lc0jIH_s{US8-*O`C9p7_ zDXc($0UVDCAf}yI*5-`y-Xu&*cnfCmmjY z0W_ZBcz;NZHR;Z-v>$*@e!P0<$2>Sy@j7m0J0sv<%PPIPli}kSH}}PpLF`rnL=T58 z9f=P(ladW;AR-LCAtl9Tvki)6oB#c}2WBev55e6R)X6c{VF>vZ4_6#ps*@-3VEHJvn@%DS< z5owj5Mo~@*5$Ker5Lwa zS*xXP+5Uz)Au>^Q)^==+HjJ0Cd67;DD#FAkRR>z>H{7!}Zop)pS@|8gCxBR9B91E6 zJUKS90pxcRPC7`2rAfJx>9SJ#`~;_?34i~tj6{ln8M4e6=x69#G}2u~LXiE5ru*lU z5LA+9s7E(p1dFJ>g?$mQP`;O2OOF3xbn}ZL@IRA1HlNLwCk=Mfq?|s~2NGFgPHqJ; zhFLf}IKVWGcmC?de4P$gUQ^kdj&l+Se}J0+ftovkY06jQzd%@ULRY$A9-}F5m72r& zJ|;3_tZk(uecCs{X#E*k0c-ETwOFY%JD2##jnMH01NeTmGg579R!mD|EX|z50F>aj z9dQb|=lG~qzTRmz{p`_$`Nf`J)tjSaMDO`yM!G43M~uXUNp2aQAP%9gLzzCFtNnP^ zgcocIQ_oQWaqo)nv1PFL1ta$thh}x2SiNiL{IQYC4dM8Qz*XlH-?t-DOAfGK{fOCV3 z>@Yh5r(WquYqV!nVR#9+(}dzd#&A>0=P3GTL!?nc{XwGvPMF7skRE~0COZiQXL#Ca zC#SDeEtY6IQO&2>K{hSZtm%2(v6)S(YfX`mGoHQ=AMP0YQ1TV95*)W`j!C?E{+Ey) z{{fj)8Dq#y59Mq~o@{m&n-h>Rb?$UjtksfIlwPtx7d0UeftD_Vj=96`YYf$t=q4UU zpA4FBM2*R2rVGQ2KJznu;=v6n=LSuQUG*F7sc&H1n;La_283vq$kzDJ-YawUNa85( z$OhXP_QU4hB@Ru09zH3c?)WLXuXLRO#?2!$Ms4N_?__cO*sNWDQLE~E*NIe^2Nzb> z&~^BE*q_nEy3c~E^+)RHiw)zQwCH;^o?n`&v#wZOcsI13 z_|L`a7EQD#l1XRK@`1Oh@2TrO`#rvnlzE@f1}=`UzMw}KdZ~ul6WHe(#aZBSb#S5h zIN-#Fq{F!J4m|2(*7`zaMD^De!)-0j35gtLp zCaD<#dd74Un}NDa$i1_1)t71|x$9*~>xh3)d#L)UK}L(-bj;j(hEqB%oji}%aDKm# zm)>8Qt?;gf*N)f4NSs;%&Er{tFD^;@vvTqSQ{6fxW(xLn01tzsRLI`uC3jKy>evJP zgT-e_JioE;7*l0TKP*peL84nzYJT$8Vn2{I!b7=i6LB&HMUlC*tg&Wl=YAE^K(axU z&zRfI(6Qu(_kZBonU2e?^CRQ1so9{MB75;InnMU=etsTA({4XN*@*KBgrMDzyRedUZ zn%t@*WF^WS(0noS)Zq{Cb__aG#x{rHm6g#iTPsh$eAc#jpQu93#qD0mv7U+M(zu?^ zMx>uEZ@$5166aeJ@nE=os+;|%Qtf`GXdzzlP(4RlMc=y3Txhs`}j~n>I;83!g zZj-p7hXq@7>K9(dtsh!_ns1c7-y_x&HAdwHZfxZwZz{%ZZ@L zq&IKRJkKQ%l#j%|T?bJX_X3%i;4bsoQ=Y3ph8MG2Q^?M?v~|gYDyRIzz=URX^U#s` z!MH#B$oIOcXK!LuX5MwN<*0+MA=|KRSfxEj!G0?bC>y%z+MW{)vm6sZ;3;&>vG!}_ zd$#JB?6DLoP1%h1RbfuE7|~*b>geN zdkihQ_k#Mhu3mz9){1*qBB3{fo&vc$&c)FItW`Kkj2W{$zTMUV;nD-x^W-flU83*p~n_9;T zZG+B!(5iH4+fXqU>YwiJn9Q}tt}vH&?OAbV@zxo!D+q~$z-r)4mC`?Uj=GE2AH(jI?aQRec@nZMM zHR49U)U0iqZl7Y+ZP0?z*XdG~LauR#Hw#AJjzB@AOB95ty zbgx>r)J#js<5()X8o43M|>t=mzjqKB%V z1sXl`=k283YNigviXeA()8<_=TDN`_stGj8xc*7M5{x+88kg#ed(s(C{$bHm4sro1dB$B3h)E9UL%3`QMbKf!$lqmk)6k)&0H5 z%~znE%ri7Hg`Y}41V;{Zkq13O0Rg9tze?F*XTGKkz!CIQxnbA~F43m?1v4^7t(yYW zYVY*Z=#;0Uc`4c{S1duW%J@F+Isa5|O@U}Hj_;@uA)WN|_VM}RVsn97m=5n~*IcQJ zWGT0{QPXv$ghOUI9{oM2c>(rk)RM%m8AOhsd5ck6T`sH!mkns5T}e@$;*jW-zi5y=EZf$ca|R=3Pd!Fi6gX} zeRO@Gy3JqmGL@$TWSEe460@+kXFNj|gGSIEm_)KWThd%$8Hn3xS&7-Q|KJ9es)v~j zt5b+hr!irhC$C*|CN`L5k5ImHPNC_>Mx7g`Epr|4XXiEim}dW~;N#6!;437Wem5yV zg7NLK?A5JeQR^7j!9bn$!Jfv$t}nQ^>r9GBLrWTP6D{kE*gb8{ zg*NK?%%0jM)_JiLwYN0LVckc!1&iA2dqHja^Y+zmYj4l{gV~da9w-TC3F1OixCh>p%=E55dz0nT(MX!IzEpMU zj-KPQ=(PPk8!wV=G@h+i%C;mmU@+%1XMZH?xb{nDE&evPtN%D6>S!LO*V1gOXzZFB zCqRJ`7ILY4^f^F_CO3b-Jwt#kw>s+%+i{YPd;tR06(dh65!=aZY64a@ICRP8s4qvk zmZI+O^1Z!Rm=^s$(A%D=&0K|}#a!Op4T%n&L5Zd-XqLOfJD|u zuzPyWp5-gr(IX3%)v%$=-sbb#0NKMB!4Vd{!gW}$xC$AU{|f#M^x-iz*E zOpDH{C$1rD#Jyu+d!2HW;nY|m%HUGDdalv3UV*8yeoRnpRXjOdK&)>{Z6SpJk7p=B z_aP!!IikZE0$E$4v##M5T{0IV%ZycIudRz3=r@=!Hl43}8`BWWo75^BBa!uldF*LA z;bW`XZO-JJ#hi;>f4tbqHy2}#4(S=86jr?qbv=E#@9yesD{aqqr|M0r`Ed`+wX>4) zc(ENa*^dn~t?fBk({vPyn(7POjSack-wM&K_Jlp4FvaF%$| zq2N;W#J^HrjsMYJjq3KWSAiLWa>(+N`ae@<+%8cE9yp^1>&yQBqjW`$?R7ak<(i}R zY!O?tyCe@B9Y1r#BGc&JWw0elJxw%cdy%N-tqbzSli~v55GUv-Lm`W~s_d)h0fdL+ z8F=2t*DjS9&NaA(Qz1q?`ChwQ0V3yKyHUaPUYcl2|Gts1|`jX`s5CehETny z!86> z-SA5x+$%boi#+iD6}f96r{@r_kidF`O_2E|^P9KG?$e73pc(%UKYnSq$mq&}to8}72QvXDl7 zMO>R=Fg3x=kT~~b0iYdN?+jT>w_Q4X4rogkZ$e;lu%O6lhpUoKwE@by7*^VTcAI?a zBy}nEIrjIy60W`5SsV)A@PbWS;h7Y0&$iPcKOu=`QL}X& z%?JpfHhJ7-M0T-61VZHPKi2MD#$!rJFCTy&Kq_Sg2yDF2i6~SS@Yu<*u=GW#4G3ZS zUPKJy+OF2Y6!P?J+`TC|Bzd7_)P2H|O3NH}R3LUM!w-eRzn~?|?U(pZE*qFR!Fz>J z-(_}R;lyd(c=p3&cIpcEncCWI54t5CWR(Bz5&}Z&UHI;}$&s8f7p0VN${oW3kj;e* z)D7w%)hejMa&#nR@zl3yx04rv{t_vGrVFgbsT7N_unYXRq6Poj1Zp#8kgz-USiQcO zkiatrSOOFp-;xQXU~8KH5lo*O*zBqZVK4PQCI>&?PJ|xgJq>VO5I7((YOxY{ok+@2 z$U!JJpWnZO*^!5CKui=1?qtm>P z{f?pdMrI3b_whjg_<>*^XJ8blr-GqQ-Y_=6ehQre*bdK8D|D;daS(3B+$=^GZSWuy zxeB1qE9QIyeMX*a1^+M5t$8ynfw+tKa+`YU+rdJx=b+BcJ&9fD%4Gr3yDUaeI1A#q z;EsQs(601MZdg~bs~I^&c1k#8m@Q<#C<4 zgE&2p;fae*T(kl5yDIsA!tzji@qWIvgDo&XZU>DNEQCKkp2kZS6-?Frw|*6B-E-u> zNdXfJ)H57jT=in`Duuh`6wMQpW&2$>Bpa&!C@5<-v))P`_q>*#xS8N41b_kn!y&1b z(=hFu(A%n@RE{YRnY-KYxQ6TEDi76{@!IZ)C@}4=hyQUR*56O>?<&6WAdfYU*ugw< z0f&Fc_fW&nvHS$6@PyuyA=xEn5(xtY^g@#9`yRx%yyO?-(YljbTHXdYMOZEQzY{w3 z$Mr2fuwHjkefrPe>OwG~_}v(<0|2|BFX0dq>br+vo?EX-LB&v*0h5l%d(K)cC!N!R z{`XU!S5X1^e1E0-TdN!Q1BIn4;?f8@63cVtcgCQjD zSmV`4*aeQem%}}Vd~iA0iqr#$NleDRhOvO!F7e4nEzciDNHO_dy6>G9Jo9jm@!X3a zh2x@Xa&O8_undY)vn3C*0fQ7S#x`?sj=FXUuDZXSBm^*-4}~t0FS4eu9>GsRNG)#y zH<_!esqJ{^(ltc=!tC)v=$&)eX1|Qfetwb8s?7A2A^c*3nu}8oOo%%LG8Uzf+vgC= zsUX8UcZf@f(JI8bxNx0D9oN4iJpXN>1)QC(jw|2-uDO37EcXMbsa#zK$uGLg^TAJ1 z$f^B?JvsgpHGtG6@Sx+DhBHx4v2FRC$5aojHG4hnniCYBhIu-_5BO|6N7>f<)6DI* zCkozSy?fZ6R!|CAY8}Dg7EJBluQZ60JRge~&^JC?6$v}SS%0dGU&967mF>WC8+|DZ zw0~Pv_9nD(f2$xZbAE~Lj}2`UH~?`^I(o@7Mek!>a+Idw_gPKtvwaEe#}7BGC`R)h z-*uC^%3Wn#Q}&XMXp#lxTcA}&9I+=Gp}v*QHqDyFU-ki@lpWEp31zlKB^ix`Zx8oL zeFY;@XB^XN9zP_ABMt~lD&O@bu0=1zT_O@t^mPhr*DAa0feOfuh(5CdLy1Ydb8$mY z+BLpT@Y}BoWQDOFwIf9;uh4B5;}!I}jj8F?u44{eN~cjCb%t6iT7VxMp}gf!4i(OB zGdK2&FuYq;5oAE*IY3M|08ptpo8bFGA=4YygjF}Up;p2|@J8S9oT0Mz)sO~c&JA{j z$1w@y>ZeFJQ{|`VqkxUV(dm^s|eIfo>b==D19PxXcTWNiQXTc_c3h! zxKL)1w$oq&?TOr5SSOy8XGFyF3r$?v_O zcC(EFa8YrpXnYlB2yJAB^NZ;xn9gR+jTI=)9DutI4;6r@~!a z#utuurr?fk#yj|rwk4ySbHItqv$U#GE-j3vczsWC9=~m_b!hi7Y4?73yfH#7RZq4F z`d)oqP}E~wf7XfHa2i@i29^M11R7bAk~u8L`b>bxdO3t1oU^rlx%~=1e|XRn`^Xdv zTPLHmzyJ15b^wvB!!Vtmh=brl?yr*Kb;uo~UHKvUA)k8cLHrrEC6m6;S}@vBbppL<3k3tdAvzBBb4>1{|;Ntn+-v zn}Py;R$Zaa|LH8j^0%g=4`s__#oe4&-(+|8Wz&e*w{1yNpG_AN+qa6iCu__%RhQMb z;;LTS(oTOWXxNHNN2oL%G+TZvUe{!y1E(9!G%gv-s&f=pI2g`1<$241r!NqfvA>$r zlPX+;R(+dA)So+Fy~AnIhsT9u7S{Pv{~-vGmGLf7FS75tj{`!O z(3i@MuxxktyOg^=hYp4&`t8e3NjiLmy}t?e_%=$zkewjR%E0u@$yL`E5Vh?4^~{Am zn%M)j8CRTjCE9HXMK!dw@+Unb9F~1%_rWm%bLan&_1=MOz5n~amZDX&wP%Z}O^w>B ztzBZ3+HFx{)*eAxirTfJR@980)~sE%XAs0*QA%t=th~SH^&X$!AHRPyPEO9a@B3WO z>-o4QdL>dxI?_cq!5Ey(XFKHTq*BKWu{nb zrn%sHWk?T^qyf0Hf8;wg90KqM4>s^gJzK<7dWeUouiZ+>gN?4tvE*U9Oo&P5X)m0) zPGW>XBZS>WgN-yXX;+2I`8}^T=a3n{U(#Ja%-F!0E2FK#tjulZ^$uR1gOu(Nmp1}k zOE&}V{9Y8PC#)25-wuF{xOr{-K6mWa$h96_Yiw?>X|bgb&{x*^eb{Nz)G+1it7H$3fOL~O7|QCEu>nTrmEJg~?B-6|-K_Pnt^J;) zS)kWj1a~pL&qq)3a7O}**tgUM%6;s^LLotO1{l0*7?(hEd|j-lGY(Xfcp(Xp zOPO2<-<5WtZLa$9@6uys@h1l=c`H!nOZ|f> z1Y%KUst7qlNbIH$Q2c$a`5FIYyuXx+9U3)TRmb*F7R>#5RpObXv9I^sGy!cvaJyh6 zPAmeyQOQ54B0&gW!2sq98bS`3uoXG6rKP50w9fo>tO}bLqlR}+QYu<)h z&9W7n9T>a5Wi_3-kJO~K_!Q`T^)@+=y@BnEZ-IVA&atz}d{*rE!1=4qJZ*HPMb7gg zJ%j9d3u~St=Flzu)3H#>Q|{?3>{2w+Cy~({B(w8psVAcal>gV1xoJHs;5A`*1w=iA zTGk2{%i;I?ExR34*l}$HkoN#xj*ea1t+mXH$hX61_P$`^<)QPP%a`%w-)hetvAyK1DH zvVSPIGzE|L2c#s*tU$msza&QruV@UvV)u_qgIM6#rNk2FmwbPhKBCnOxwmg;K8N$n1TF-oXd% z>^(@-!_|ep%D7QHe65Qe8%K0ihc{IdJ0Fz6RyF6<(hmA`staq{@!W(ux4+! zRTtq`)Gb(jt_L--1~rc?J5>M5YLP@wvy5 zeDFgcF8BT9^2FLzj=a90`ZI{e6v%q<3f{YTP5~$Zn7GRuQqxxTANkvo$cv-F-pYwl zm%c^L+Ws5tC3KdWx3=h?6%Od$_=(0pM4EgAyYU7fF7IM{LU~j;2oMDLlw43rxMCQx z8ZW609)UNzN>iKu;Zl{X*g=wR&L)mT-%eL>71M46I{->6K_GqSjyEts^-2ZkoE&zV zqJ05p?yZ4zvud$aUsV+e`hnv{;?a5G^sKrv8*x_U&lNf8js z)c)N@%IjIs0zHUbS+H`$r0LPKgQcilF7R%pXd1t7?PPI-8pMsytH9Ek>Bo`=gA2P$ zRv9vLYUokWNZ2P&xdy$>2W8&IG2Zd3$41(GNVgoMw~m~GT(Gq&czAnLxVc9uyC%H} z?H8oI(7aG7J4w_bq+rEru&QAa0W_le(Cv#q-?>=K@zgr&A?#fS3Y)XOF(?_-w3{?x zv^{EuX?$q}lJnTAJ*R+a(Bn`g1yTtP<(+J~SE0>WcwW?U=e}BLue9Wn^xJh^OIU5D zQ^(IV)NI9CseZ%MX63PL*}LS$&KC_wdsVrLY6*+{A{}1a z8eJMmX`mj3wC=9xqF}YLr%)&JyY-lz4~BpXjHmJSP!Y^&GB1@jyD=j2d2oope%5ML z&HgMUxi=8M1QwiQaQ7X#D7ZkIT~TeJn4M)Vh;i zM=s_F5taD3F4^BXJK1L)R<-K^Go4iTmTu~H2BUNN>`;Tu)S?3mDRsv)*1i^&0D=y! zWGcqND*@m&gMT^v;{C2nDaS*fO-CuSIAc@U!NTh#-z(4kBGC|ME3|o=V3e+0+9Flq2dP=HZpQxO241!NE>5$D zmoE^{l4f_L)6f`S_U7}3c35v!w|lh!&+BtLmh&)g^c)2b#$g}2ii&P`M8?@r5C&Hs ze|6@fp1LuNk7MX&z_oF-MAYul6TEOc5;m5t@P=>mHk(J4-9Gmf98`H&pQzmoc$F;>>}=}HwA2RUjZ>gK1u%uI+bUffudKQS{F?HOTeWvli^ zNLQq!Wg*tFkjm3XeY};NHOE)pL`GRT9u6)=xID4txQOK1UXKf|3XX_0>mAN@4XrJ- zLl*}S+whVXo#AMt`;{hiB#F6uS)Bp&RqDWi|D3|`qit>EI_^T&m1pFf4Rscu%uMG@mS+_)( z=R}xV?>AETrK9x!+_B+KBOw3^fz3#cgES=((T+1FV_EYihEQK4VNt?gJsJMLc%^%{ zXh~@MjIMpS*3oSac#1DK!$ti4D2hP_g4PzNqm-T#@2>&WT$Kgz(**Ksa>pF2#gy_W z0Zi_}w})gLk5@I1i~dg)diL>Z?CjQ)0+!x3TBXVO*ksRsTtDDWeo{tm$!)bvJC*Q1 zuj%i`Cm>J25@>Xnv)v{aA>#tzOaMP|Oyfoz$3Y7G*t-|COKdN^tfF&WK0hHjIR7Wy zDNVviL*rquq9>{ioBJJ0R+6j453dx;>7GIoE z@zgny`y1m7K$8IEFjrqA3iT#%9 zZMP{dR;JM=o$%ec3OF~KIQkF4vx^i{54EXfsmBWVb78rWI$ulr1q75M1j(>uiAVT? zOn~#ibMKafC%WC{(%4TNu?6ix+>ATQ1~R#J7Qcvrc!;98f5Nr3tC|5r`lD3K*{&N; zIK-fLbPQf&6GPjliiEu0%bh2Js=($7ZrnD|F0@yQ>CdzOi1k*8yd5-GIBqe5Bq6+* z62FN8bVn|sr<8KiLh+pm2zcwTmX`_+nBniM{-@=8%*YUcwEX)OgAa5XQ0}~wA}S1z zmo{t1D&nDJFU1!0{P_2$+u5&!py%acfq@}_?RY7zj#n5)cH9dZ$<|OKEnNf4+UR*&ZR{>W}uGd34dMAKJif>g~XIH>>fwP!-P2hUe=junH9X9@=6(>0*S|}Ae&afu6P8zr&#AsT%NGpuv|a5!oC7CtQg$uKpUibH{_51B=95= zDn2lu`tbG7WF`U0ejX!D!=xGtj=0dTJ4SU8e=wgfWlDK}i5$z}OIaTEI9|L%fL$Ee zHP~bwKF?;=reR^Se;6nG$SZB1_w#u_Tz(jjlE?1C0|bzv;T%b3+Zj|3{$hrn-H%p# z-6MaaQD}a-#3`Mn<3?6CsqM(6CQMgs=*Lf82z+KT|6ItI`B9iVChlcwtqr~vG+EtZ zJN4Z}7!m08RI0T$)vjuU7^1LXwI^b4*U(NH_Jox}fZIk6%h`;vv*UHp-(9AMuZagQ z2hmLanrSB?^4T9RhX$LOJE+KB4_FLD*>u8#=$@RCd8r|DO0}KvO}ct}XteLCpTDw> zuYFESliwjwi>VwT4m^Fb7T%EHJqmPg=Em?Te@tu_Nm##H0G7HE6p|o!oQS@J z#!fY#CfN9=pmFv`8!-5NCrj@FE2>=}8;>$1I4TEa>8A5Ias+XeNstF}p8K*~1#B6` zW#^;+B>(Z7+MMylg{Xu^j+K_xC+eLndEmScvWQf^`zRf5#?`C*{HZ!x+D)R=zGg&l zj@Y0<`U^AwQl(O?6zRCpJ0pHC5t!F&aXHtzF8DK~FXXd^%+}7>-)jyia1Zc zs%yJB9XhUBotqHM?RmZZv(ilu2$q{xC+TKYigxby7XpkW1(xPh>gMBdM@Vckp%-*O z@%6bG1N~)fFx@_u@Ul)=*xO(JM^+cxOYByr*Wc5r^P$JGV|woOxWF=zFPiQ(to)82 zfBX>=e9ICXqEtf_F|=t|uHEG8tgr8`ry-dtU$y7r;ihnA1>7ORx>N!;)n#r_J?ySW zTUb{$i}jsny|_vVR(51+fUF?nOcw<*Q5x;mG6@K9p(VA&>YM?3t^PC{jK;OJ+RkW z_4PZtuRl(4CrQo2)5e3O0TJ_bNGzEML3tdb5$>yLC<)QlD66ksGto{AjSE4Vc zbhSkzhAYOcMSiWxXqNRaFEL-rVPrPlxI3HnqGW8Te_>{qqKl@)f~~zd6N)(E5p(_E z*FG82ILlY|jnQnE&J5PGx-uQPFWz__BR>XHuUyd5f!R=>tL{%Oqcda__o$s`?K4Bu z%C#NXt%vg#ZPXy8^E1@ic|=s0xvNs+yJXIeH!$5#hoIOcaw5GGb0{~uHkZ@7a^rLG z8}c;&kdh@Q*og|~{_#oaeV3r6NG|pes7_5Yvmd5DUKF?k9e*VF<~(R>s@D#dA3twp zQ_!bh)9!AbgJD26?J2e@BOYKQx01Tcsrq_rhWc|+--hAfvz04VwaJRQzk8v>Zf@n~ z(-wMVIwvfafZOPS<>&3HIz3JOn(E04*PS}{3*nODBQg7WYqs5cZ%R>~6fP^Nv3dvo zv31<^bC+yW{B8S_@l}a8w&64wCGK*UlOStY$_+fe7Jk1z{YkFj56BhBaB|xp$UdU4 zlm5Hx@qqU3Br*aSxpT?Epem&^DJ~`^&hs;7b!y8{X)99x&3uXm?`ylCnumYl3V41! zQl*wD(2Pia#PNxrMS|blU+`XnPMT{u{lMb^zaPj)L8weN+xBDP)kw))k^p3DvcBf; zNJK@QvP-w04Ts4-G5zYtDzz+s-Iy<@(%+1>NmKUMK$hM%!{XqMx3m%H-l3cSo*=s- zt?7PLq)Z=CDnQ}%x9G}d-IaLP_@H~Qaec!+e(}f3#b#A^YwPU1g}Rij^~^w~6+Mcq z>D$lom3m?2ZNpreu{aC&N|(|WZYN7A=}HT;pCjq_5SWaS0Q|nv0 z46^S$UTFw#Iomy9~z6$wmg2k8f?Qt zXvT^ssxUNGf3F@@;&%CV$xQHtBCQi~T$&xd{zAmNevpa%LwIEj;u{0t1 zNYMoaLX4b+r>UNyyJXo^v$=4fz{H6MK_D_^)q^s0uuZmk0=mLvE#AXTcd5KWElItY z?i=Wz@4_$_khm%e`C%u+{SJ_OUT zN_#L7Af1@^-DR%(BcEnrNyYP<%$dPBH)$znW=zei{!-`6;TW5}n)ar#of}szF=5hc zN>bhh!BPZ(%^NIh{uv@pz7Pz;j9SjB`3GV_(c(_Nlz1mMhcB%o@r417&&RgK#MT}I zl9yjYvZeV=Y+Y1F+kto_evi$mv}|GU4MP85dT|g!I!K=Dp}sW9yrZhON{4G zX_=4_BCzC5&3bFmZF^@Q%WHPeKP|aYI{qD)At}Dkb1^(`YhN1v5D)O3f z*LT|PJ3jK_Na?KzXwzL5JNl-(-#^+j;qKYWq!9yOy?a^=W`CfRP{o4W=Fj*uJ)%?l@F`&Xavvyk zhE&(2XxFjpOzB+t${)#Tw}=KY`wFRm)qSdCb!_*~rH;hA*dl?N%_>Lao?Z>wqDQ@^ zF-q@y1KRXL3ue^sN!MV=;V;$w$Ewpe+GaN3Tk0Ofm+?Y<|M5w9{&{MHq;832KbV1c?ghU$SyM%*N!TiVJ$puBm_0RVDWTvbmhc77Q+{hkWYu7x5zV zQgxV%8XJY{3tIX%JFFIn*wE`+O>7}_XfpM8$K$6mw||c^lg{Av4&tUS85wu zOuu-ruqZsd(89VczGTs6i>|f%(lq}R1&J-Mo7geMZSUhGmo#~j55#sI*ISga)W*j9 zew9TQ)N=ivZ^UvdXRX10X0{&H_bleMJw@ARu?g%EF}ss)8_|`%>`oqeUv$X=@a>Gj z0)*&Q7wPJWjq%JS&6tDZ&$}M~2q+kB(aDO(H>o?nCEJO8gJJIQ!}scy%PSWqibMT7 zY{@P?i(82)UW?q-DK;Xa9!QyzojjPXFnA-TM-Wbzgepj1{ zu8pXx3NP)cXN!-+hz1ReKS=^fq~(8XbBRcPH3~GegM__VV47RU|CG04kx}W$d(|z? z3b|20?AF_>O01c>HtoPct)n}|dE?>=Ion#v7{>T_5JN!Zz+UH6=)Ly#$X{Uiy%^%j z<54_20M%%^5MeSKowl`D?82CWM7O=P%HCwWtF_^aX~u^%_^ z+agMlu*KCW#fSKtb$`%1m5yP;RFUyMud;1=>BWZ7TJthO6W=E6bd0rY*X!S>zRmo* zi*%8OyT+g`M>+ihIUUW-siS?G3V(zo- zM``Zyna9KAf%!giieBUjTsM2U0V7B6C)cVKjITBCOBegL7&#=BnlE)1GgEpg=T6RW zUDu-)scxCdt{beBvy!YNC8FC34!J``CJCWNirLxwz|)?v1tUVmYJ~Q40u4?WO$5R_9VU#( zH9U=02<7D+e(JFz-raRo7D_&QsnwG4 zfPmi5q!#~e%f6#_eXq;n8FD(9`9MK#re#L&s;=6p?Lm%7nR9*8g8tit)d$edC;95H zHT>+_`5{~dn3xmWnRaY(!-edn?P%!z%@c&_{&v#*UoO@C!YcLL*cPhFygH1J<=2@{ zdY)_Tb3To>*U=^owT-6MRqR_oZB;A3y!+{D#Ye1~e-n);D$>yt1d@i${95ZL3;IT? z%go`oidgw~%vv?fY70zfWs2}%+v*waij3PKAC7n8J4-jh6!!zRsFg|;wdBs89IF=j zwJGfHCKfD$GnYR3iUY2%s8E@4w_pG&)Rh!=e@aQR>>F%aXZp9Zg=^fod!_3yZ4ao2 zh}Zq(Ypc)Pgc?WGRKF-W3S53%Y%l$X64RnA^73YnkIU0LpHiBFPfmT>TbXvmVp|it zac{N^$`xdI`;D@*9hes^_No$2&4BI#^G9BiC}ib>c|b1LSAZWkM^n!gOJXXOZbzV$ zX|dMKfNF)@M7E%kPHhv<=EdJx3{QK$R3VUht+CE02{fY~V=x)uWhb)9Tc;%aPc03GFjL z$YF~^TM}p+?xH8IK>E26TEBK#Q2M=`sLuNHv zvwKh15_6XTnBv*1Vg{BEQhm$VIP90uj7#uw0ZWH+k0Lxui9wkh1n?VidxGkspTBDk ze(-n9CI+4x|M)`cop`h}o9f@eXOAb^Mit40DXU6Alk)n-O3!6Hy~>x6<*fe>0+N>C zdmnhdnv;avBLs~Zk2I@xUJkGz{>b;^vhkbaUv>P93CXIZrA=trqYj=Rdt&N;!mA|i z%b`fQ7MwM#Zhyohu(-3_gTmLKXJN_*ng0g$ex)1#p#7`#1Bu<``j;@zFwL{`@eTjO z*#TUk{#9e+rrg8KKR6DTSn$d&7p*p&><>igBZ(T`s{AbRTG z))EksoWtOOgWHb5mzbYGF_#J#{SQlG!zZD(fDzXT=#p-WBE0;cW)DkHR$PPlmb8ok zw`~X%UplZ7=|v&81zZa4|F9-L9PM=9{XN|~gb?6VfLv|4?*_g(P4(^NxHv8SukDdx zOiN3yhnyxrAlDrc^Q4n3A+T3~w1H;>aKBoV|L;=&H{v47P&)}2Of~Q}UNrK!O~U`m z4%fPMuyX5PLZmkFg*3E$8~=#5%ga7}eEe{v2CJXns~-qQyoK!m;GXl2-YHB~EMPpEd6u?b6&P%hORb|ARy zLy^|$xjmqAy?E`o#Ih?mhx&E}?Md+qB=g3wp8KwaYJA|3-@F>~-8;r<(Io^ZapeSY zJ5zn|DoDYoE>rV((SUe>V|k0j%Kli0WG`haq!ld7@QAV($$M^8 z4|M_H5#*MW;3O#dMM)&nfF2 z0LRUunKPPP5Z$zRCv>fngJGOq*PrVag-oGYO zD^6;uL9`!Rc=)Q)GD3biLGr`hFe*uu!ClGTZ}Td@wyJcDj9?^d(KT{3@53B3KEavo-d*5BSAdwnL zC;N4C`df3yo9FZRX5A0(6-Wx<0UdI-y#W8rRzH8-^WzbTZjW&OiLs#kR^ zV)-?-@fCmSn&Wk}Dxi6DfuVnuI|a!c!^qY8c{vZlv)wxQNYAbgP$%O15yZ%!h0WzLeLs*0o3B%c)RQ^R-L|H$_@B5z^5jU$i#X zkLG2Z#?B|@j)V+6OC9zsE@zzHY~2d0cHEM;+qo7{-8jpiEYIC< ziFL?*;qyy@IlA@jtfR6PK7AP1UCVOaHIz7W{*yCVD>eK4wrjDPmy44jid|rHN@q$+P zJ(+NE_q*3Lm$}q!A`)nPUnpxpf+G8sj*|}|S1;32d$-b!Yte-&nqj(^mBD5Xb!%iy zq`LDi<-3eu+~hSn(M24xLT=8Ph|O7{P4m#G27G{esY0q5C=@KbnO|46Q+Kkh>EZ26 zlb!D>9GjTtQ2Xh!Vn$NqqMqsZ-i6H&(~{c}9sBy;tnkwqYw_m?q7~mm@>NxJC-0L< zr)Y9iOqoS$7rVqpf>d10jb4qL)Fmvu5qDNv!IN=QtB6{L!1)P|tC~zVun}{hRHzeG zGF9uD?1w(rhK3oXU$WnCA;?Hi%%WA7Gi(8bdfV*)(k}Q7g8Jhw6n#MXlPtpnv#2VW zT-v{Es0bf%I`Pk|`w3O3sx4VqX2k9fJIU7XO?oMR&l@6kD&J{Dp>$?p0<7`LMY`8w zO4jDmYd_^NGLjvqi`IeN{7!0ul*)d`Ka?M6^fw%koAyGBUF* zW&bxdyG85%dXj(0N+KP#YWE#I#>KDaH%>B;R`Y+0WD4urCyqGkF_~6u5qXiiv6GRY z5m7s!73otj@5+^c3FvbuSJT0IZn?{>f7~T?*`@_5t5__@+ZiV8@K5ZReul5;m~O1g zoF(q3V+?U#)7yhO!`v}uI@pHY%9N?@hOpOwvsKlm;)6|Ny}ikk-ERFlwz7$`-vu#N z17as*E`KjhRe~Qqb$5UMsEd)6g#WYo-GLY*(E%!^8`;xb2 zWQdQWyBJ&=9-kz=C>V-Z+s|oFab`)Tu0Us0Vq@-c_>Ei$KRD{J2uvADS*87ue-z%& zVW@{Ce>K|d7B96T$Tv-L&tIzlPszJ?@^&NZMF^>8?T5`WO<=x@RKEJ@l>Oc>%Y6*kvugmwMWO+LAsU>ExPeMbHjF( zi*M8wHi>*P5CUBm_H73^yi2OOiT!)2w=}INfUq3iMu#o>$(COX53vQA=WzOWv zIZ+&B*~P~T>@e$kA_fb!3(b?9PN+AWE?EyE5ha9VQ5Brg_|Dsj02aaDxCQp67PI!RQHwen4O+)$@E~vNp;@sz$QQz9%-#5td3_`)$-dTd{#5j^lA+nd(WNyA&b|>`<{6W$qcoy}dtQad=mC#c)bMv7m#J)N zZOjR)BUD*y<1*3G;!J-}lj{~Ln#dck^D#Uf>)x>FD4kU=12Z(jG|_uIuHvqH_u(f! ziZ%sZY!sb)RL`KCu$(GnH??Y4y~=&S5K0QR>#*S`$d@?{uQ@+{C{4gCLh0ICy=k(_ z&Fc$K-ghe1V`M#T9y;PwSP9|3#l;#uR}<;x{!TS4vgJN+scx*9=ydk?UYzb#&}e!b z;eP$u8#i{Gf=V8LUCSLDn|^kK2Z!;{e#o``r4y2?B4E|+M)jPKv`>!t1()783zKOE z=ulu4iOFEcZv=PYuP5EUSXwgYMfM4qmj^28rB0_9!2BDmls;?1<~AQxhrd|V^aw%J z5>4$M>qOp{6Y?&#DEv16lGP;HGfP zPqe#{VysJP1?OW+kT?5g2fe#Ee*?_r-ktvUNIuUDwm5)rv!i}LS&1t2Ntl+FOU`3E zH{lIEruj2By6cMe99><@rRr2iw5;**%ZEdD$=Z)^i|W4=e!453Q`gq!KW1rbJTnS; zD{46}wEA1Gn=p9zeBmFLyJ0!-lO7iRs+YH>MSV)jE7n6~ifCw7##WW< zhy*t@i52lT%`85Xso3?fqyKHGV%@1SbDiBg!}L`r)%&tht6rhkr)VMysNpBVt8T)e z1Gk*wsFk_c9}j+%YF0TZB&J$`7J)?wTelK@1?Ub6 z539Sl7VL##JH)=&Z;$d|z`mLi156fl)0a>8D-3X4+)WWhqnyW;AXD@0!uW`+Wh!ud z{6d%1TuAYBEm0VoJ5#XQyM43*YatXd<$NQ!m}*yTi-}ySuUuL9ALvEqmgu_KdC)}o zdb-VgO6 zvd6mLl!0K3e+R7kg@xtKiyZvl%{A%SLg0$|GjmRlgPIrSEoteTRP82~fl5a)TIT9n z<>{8`7oDN!IQ7?bQb2>_NmswhAi18-ey}&I)BZH~1ON;~%XafJU7Ihn^>&bU| z@IBK9M*V1acMdckX6Ghzb=6|wHLnvP%J#?oL6=iks29Ml{*m%x!t&XV_DK&PL1a%u z<#b5Z>5HZs&o?v~zUv~7)IIFL{+!!P>52SJ3!UEYZ1{aQS_!vj6EE)v6>SAH=vTLo zdR(gV7stvBjXIy#RTVraxQnrPcAfX99+Q(>ib2B-cY3k>4BcL`A?aVH2oST5eln89 zYc=Qb;N8tpog%FUVHcs`FOpe5sH9iPmNRFmx22&x9w{);AovauyiyY0bpHwAXw z^zjMUm0>Gj8tYFDb*7Gq?lU}|?#SzhW?E_24B_%5hifjJXR;hD!>IwzR zh*9d>v{b9y$3pd)wg|o8{upN=adQnp?Ncz{pTDziLDQMceH-i_HmYb1i9seuPpVpO zXG5~NzAe)yq5b8yHJwN6N0A(?WbR&ar0}&nXZ9Q1@y^|a`qv)F3M9qRLE^Zs?Vb6Q z#yRf{`fO*`%z^ZQyp!*{O)_IZLezLGpm!rWa zIbb2r>+|$qBn@{0+B;M^FR#5h=v}F$H>ZL$d)&S>+3#W+GjE8vw3q?owv;VNXiGWExwkPz{`QVEADjOUZ+a_K3&LXlvS$ zpACX3Wj~JZalcDBnF>0RMv70K`)>Ybq$nZ!(7nHYkof0(?2C68jl!Ya`B>7H@Cok< z8GB86oo$H7|wTh(6@_fP)EdzkTzFs(BX zDOW}h2-SHhcca;y(AUQKAP}Rjs8Z)lORjv0Wyg;yEqHvl;xEC@O?!hBz^KD5>nZrq?hYoX?f*& z(a58Mb3l`AeRVTsVtE-);tl_>oPCVIlG(8kPv!P{7rS|Q7#hj!?TH2Lprf59P9w|> z?wakbqN>H0EmuIxzXSM$I>xUBU)UGxfZWel9v3m zSVtrY2b)icb?qL9mlq%Mw2vu9_krH@IC5>{5ekZg#yvIH>=HlUw{_j zECFg0^7nt@-quqZ4O;!SV|T7^AzFLdKOI>)rLyDvHqZV~n42)xC3%8_pKTH$qgD@9 zCHk>QTMVM||D3#>$Avwf@TGKB+wrfJ9g0z*EgPGC#e1$Byl|3@I6HV7Ug?0?j@(*I zzxjkzZr5Aa&_m@P1PPn>V~TaFWh@Z=81#Pz-NFBgn*rxSZMGLnz!V>o|Nnb4ki`?O zp?s9P2_mD@Qo4m$r~c&~=g4^zMF@g^X?wk)x&}G2S{(CpRCAILAR$Jkeo;tz8Qcwo zaP_1NSz)u!J##lSYprwtq1*mtY9Rfk99rA*!Fxn1@f-dWU)J`TBQE?5YN7N?lb0_6 z1T>86sfrs^Lv}WylT3{LZ+`VqcB}Gq$JNdzkMdswjLJ(K#U;d0|e9(>RE`P9eX1bop0LIp;iz4-zU; z%@UrH(j%`5+W+=LKv`M2tnKhU6!t!}w;Q7Uclb2=#;O!qu zO-Y)#*g&9h`it*LF15FysIzV}M*zi~@TmcPRK&n>`}GB$SUah4;(sPCE{>a|0DemR zvcK931PoS)Kl+PawBI)2v=)`?ua`fL!WHcrH;Q<^eCPA*{S!Ws7#wPop9{czDdl*0`+pWdK1vPYxEsJPCZ}Z#L=+8gHxLS zoT&tDf%m)=A0V$!ila?l2fYqe!ZrYTVwAx&XFV$IwnrlhV=Z=hBdWKFA%dkc`OvWc ze(z-ihaHC_641X(7w*}c-hTGHmxZ-Yi4J5W6?g5j?AZfyP1`?bULoy$B`+dAD%Zr_ z4%6#}1x5AjLMTGE|iUn$-v#K>&6nHy(@_{NUw zhRinE_=(PHEeVSDk!FXF7SMTlPSa0>UF(p)u}HQ52NB_e+Md&{60-|=W;Edd-z;#+ zimfy{cY7MhFVXe!tDg7$WICAoi$5P_G-i4ZZ<3IX!n;1g7v6gle0r5FwTts=iGys2 zVV9c|?X~afVX8Z-UqM${Cr^1m65%;MZaqbqX}Lw_swURJgBC{1Qtg#6^~*-8dh z-|5MMl=S^;{oh~EM}O9HwI>}hw05Z2Nh5*xro=H&7%04khe&A1A(^X3ciUIwe;(L3 zW%WBRvexfk6~p51i#F%zSnfPMRBP$xwZ2jPDedK(6N8EX17)4rWrm-8_8*|P88mCW z!!NBNG1yP`XzwuX{z7yJXM^9-MNsCaw4FJM6(FlgiAJhy_5yM34%LRnHkO{Tdf9!%!87ON~C6(ixaQM;9nkhF=~ z!}wqs@ZQ@WDf*3Ku+ z?u2Eh?M)RHaD$ESyqRDK@p_wi5^GA!eJ3P=lpKbMUu`_&wNqTI2juB?B?K4inhA;snK^T5#a1y+9ZFX_ zG>^5|Qs=pBYI?Ob2`0Bs2EWmnVej#s;N2tf(C%=7(5ut*CcFU_`|G42rGtgyi`=Fy zwWZ=v^;VjM1R~M|3|wqVN*s+Arl&+fa}NmLc?~)+l$+l9FvmK+Nx#9qw(IZ$C=O$J<@Z2#8Wc=RtB*ruGc8p{(`e z0iuP*0+B5*J-MH8*a&`)=v$@+k{u~O{kjuppU1kHtQQ&?+C!8WZBef1Ul|sEF;OeZ z+kM#7>saZ0GTS;mRZFn0Tz9z$5mrxh+X{s3KNWeIHrym#{gEq>zpJs(PTY}=x?6Tf zTNvM9-7J16lK}cu%0IGIw-5*jafE-9~y+5`wG zt3VFCwPr`qUL0G`hShi*+rlityVKYiwy%C;1WA()v8$A6D=4R&3Ngnv9*XVGy0qVy z-$ucEf=T9tRtZGg7d%Ydp92IRlWxpmuly@qJsL0``;U;FkMU1Aq#%~rp zb9+3dz(2X3;jJ7oHxp7kvJNpXM0@Pul@-91@0{$jKodJLbBi-NeGc`F5vKCXf#Pn4 z$zX2*7mGP5loBU!zkIyCvt%Aaop27yN4+Mvm2uc@HL${%S! zPFRcI@9VoMdZ!F7`;#Q|T!c}ittHSgDm+~j)>7Fsm6mB1TkY92=~29q6zyAzW2-iF zAFnUZ+!VI8J7Xs-0skoGs*1=eS{>lh$)?%E`A8n&IHh!>R3!7$+(5+x+%t@^_9X9@ z=cIQVn5V|1YCRYFuGt+ZFLiow1k+;nYLU^QbeK!#)eGw%#j&eY%s+n8S!wWDPQOT! zXpBJLWSq?T)$qQ4@ATeLWCK)GJA`MfbL z=3)e!b-WcoMXksEVb{}nDXit_E>Xauu8^@j0$c|zT>06UA6n>*DHq@du85pnK2a5mdd z3J_bRw{g{~yiAYLdnElTOzi6(ADUhA(;bvZ*)um%G-!tzI0H*z=De{d=7bO1aCg3} zRodv_QZk3{Zv$F9nf@PD=NZ-1w!M8ka6qIT6{UkxRhsk~VxtQPD!r&kC-hztKn0~2 z0i`4;B_K_DZ_-7G5FkM4K|%;UlmH?AxA)vT-tm6o%O-oTwbzi(2G2(rIyVeZM0al<^--*O;&jI1(9@%ZcWfwM zP2??-kKN>#(Xr{4(#FBwG38BQfKx=D)C|YSL;tTz&S$~zcSh#oxvXw&js#ae z0-`Duz`@>A*FR+n3&(_6Rk~PKCQv!apRu-Ml;=Cg{xns3C$F=@4o_B7{vIhToVFsT z88(l~ztxy+P)rKWKWj0}GK*fiBs~MZ0_}zE=(mXLMgCY^JbAAFlUhK^6bNl zSsc0URMGrn`lom0@{2EC8r4{hS6bxgfxWY ztc0>{`#^=t8Av6hUCP>O-fA}34OG7JcV1)Uat4QUE_c(qw}QKY<rFmGcxENIY_dX>-R=cxz7OY+Z&N7pjvI zuD9qHrYJDEQ2utbxP<+Fah3^u(P~|eR`l4qzO4M3FYl@~qxgXO#osgx>|y72j1D)W zpg*7z6Y_z({B0#`7j5RqwifQSfTTE|>vO7~#f120j!^PH_p!$mV@LBZqd(>>}JKDz>ih zMvLui?UmHysX@TrOGrmJ@IA(HkQTOV|!MOA<@1 zs;lPnP`-kIthTy|zKx~$W+sxRz9Xv3DlF`&7SOBYq;Dk3ot>X{=g5J2 zZhR)#M-W4C)Ky(Y_@tE7)%ELv%h@e9qk1XFj&3f=YT~yqAi)4aY=g9WJ_z?s)t{2q zGt-`BkFN7)4&-SG8t)Ho@YZ-r`Aayp#ha9d0RVtSsQe1B)x!Nu-jQ*g!h zRqC&Tj#frV%+$sP_hYVX7wzcp|GaoM%J_Qh?WAs@*o^;gnrXJfP;%Q+xc)Z{(2g%r zm9@cHV%t3$8SI|@bke*6(p&Q_15=+S{CL{#LDcf3d8*EWPkGqH&S$j_#`UAa$3%iu zCam}?T7J{*QAeg_!0o}3x%nvrDFlCZDDtf6c!Qg^7*%lxvgr2C&M0wmo~HhCD!jafdv(ToMXD?G_jV7OY6J5M+O|@b1{)Oh zS~X|LG%X^GKNT`fSKSxyPrr%etj}$mFkvy=4Ve?J6s^Qj1%LoxPVG~5>PB3msP3R5 zD#Iip%X9Rn-7}T3^25d+VJ{9dD}{@UE8I+*%hC*7gb-iPip1YS1dZv8C0OKrC^djDw)V=oi;02jZJC4!b-D$+ep8@eks}1EOmsbh@Q*iuWql0 z-fA6NqTi{TRj+nK?@mC9eo`gL+V^96HNPTrYoDZr{d^RUbhufMV@IcFAou#eKt~XqFh>-d zix+eh#wbORQF@!)TjX{XO=uoTQ_Od-Xi(HGbK%I$3JYY8J1lnp)tI=dqtE#~T`e8Z(w!PT<9neGzUFyX^2&iqY1`_nE92f(Jys&f+5vxrf|T9<=by_GSm!fA_)+QEyke+oFQP7mBnw#%@}EJK6I3w4ws% z;CNodfBnArn+lr zkUO8=hmVE{R>qYvB_^C%>WMj7u}R{O0&7er0ILHfvwCg*`slAGiJp>Lx9NYG1goc7 z`x2&p(?}I*kBJ%!OJAhprD2ET(|+QJBOy7f9Uu>lYr_z=;K#{N5|wT*MxyCLvNpoN zyN-b$uLn1JM=&WeiUbYmj>ra6jP#a1<&1eJsMape)aBN8w9HYk4cVl1_!VSOliPxU zJTzp%>NibB$MfMUIiI4E`*Wf=(onu|WvXrZ7Pz{h&l$=P&7arblOK$E@d! ztH^w_a8yCkDkI+D(}xvM{)XL#k5#WD=*3E-lyRv% zpM;cGo9DD`T2J{hl8U?BICU;RiOjchY|g@3RadvYsgQb3;(vN7JzY>A<+R``obs2p zki0+f+3xNe^X1#5C#8?Q(^a(f^?(Yo_5?AHHV>Z{x97)l)+Ulbs#&^fwN0r86}h=B z9Uo8Ka7L^G4CGRqovCL1HTlxxXVv5Oov%#L=E2PYNRO^4UHVTB!W>@1Jn|+5-*@G- zo+ink?>7b%)&mp{vmSy$P>1GM_- zHhrbay@MCE-eZ~i>J&|a9B>IhInz6}TZe-8k0OPWp(f|~)R;+Ia-BfwcVSSV3NrUs zP?*ilzQ-%^w|bRHH^1GI{%F(ghz_wOmX99uJ7uD16M^-?|D@oTkv{{2V~$P6(d60F zG8^gCAXL*eCsNUN|4yRnx&A+GvABw_kdGt(&~+=5kkqhZb>xcS83(lrrsTB>FB1sdyUw@q2UW})Nc#ru=SNP;8P+(1V~ivJ*Idkf75() z_&}SGce>k;0iQu{whVS&xWh2+!{BjC@c7y(DK)%}jZ{=ai@t`(0Au{Ooz|N{0oeg) z(-HXLxkxgJe7KsdLBqKw16)x4h>_;B`+!L4$W{8$rM|-daRgNL)0Z%r2HEN#I4Va_TWxlQjC3P(f~c>*p2fNB6pfaBk-e8EQDRGMQzi*!is$%(qNk211P zj2SmhBd)_V>a3M``Hu$x2C32QwD_E}^VF!gQoFqW>4>}$+5ic7ETV^Z1>rvNTvU00 zVgw#}B?V$bA0H6(|EnfH|KD`E=fuY!tY%q6tQ*xEDVz%50MW`*>s} zM&bJ(9tStTNch*5Tc_hgTT_0S;E99uk!*3g)C7OyN+!D zf-nGZoQK$hUx9luUk5@dY*b@nYmFY`YBV7g77)JJ%RFcl4lTai7FU)XzCD8KS27V1xYx ziy~_uMvYkjnOH!oW)zMOSb^P*1$|!cu-jvE%pg>DJbpaF6VZ1U#h;Vtn;3HHt)1P1 z{H_&5OgV<;OqypVYn#Xs7Zw04mkRP-N2M`k_YBMp9SwkC0G|T#)4%IfPSXLO2G{tY z9@q~elwVsp80hTcefDbD>7zHA_n-?nV*yx|;0-V^@(aLVfmlNLO=EQbPq=dV7=E7D zDba|yfK3WiC?fQzPcF%Bw?LNpSp)Q#cntkt>A*_T{;+`jiVvGL zj`x|<*sGhgDkC9YDrdK1vV7xlv}wejp`_uzJnu2`?yK;f+7p2(`Zsk%;2%D+`_F?H zC1o)%q#gTHWb{|<^ug49^+X5Mst6-SoQJ)<#2SGb66Ft$ z-!T}#$E+$BlJ0bZycZJjSb=iK<0)D+@_r;5_%O&Z2}W#&AczEQ)s61yW2(T8fwat-sPExUMjz5rbyvJGMQZj3|METJrrU`p zRK))NxNJ%y4MT|-&<69&`I5j^l4rI$)Y{ZDORj5k;MxIin(pcz)ZGa#Iu_Z7P>Jc znv;Njr2MUSYSV--*u5zg?m#0uqmB~4*?x~A)nOB!?>LZ|p(HbB*_xv5&5b%Y2b6@Z z?dZi$&GCNuK9oMwnPPbr?Qv@b?;HGaO`wwKQ{G%LR8N6SFIXMa z9ZYzVUC?&^<0gTjGc)2)yM&eyyNW2!X4nj9B)C6JfPkiN{;1&CQZ=?w(C{_EFFUj) zr~ULHU?T`jEutr$T zM{?!qnA0kr*jKtSOYeD>0YFWW*h$?q11KQ#vgrB1J2;#3vmF?uWKx`q1;1*bK_lzn~5gtaW%HAX^gNp|KVkPZYu)BdzGK|31g979==KNT?o6tVHheXuEN*L^(94oa4o%KYhXQDDUe*CF;FB{pwcQf|;9z7}7u$ zQd6}RI(%dP`=ODW;!#KGLJ?eh-*{)J_YkVO1B+?3dii2cAy?&PW8zh2i1boQ`GL3? z&vY_&zFSpL=I>Ac7$RCSWWznXPS{UCnF!C4HPo}CQI`Nqg^{*p9zbWsY9a}TOCDIG z_A$dW)^~!pdRclaIutfA-yEB&eEJ+Hac4kf0w5aEH@mubCd7%5DUY#wUmBJ_UY3 z8>w}j)qms{9w)6 zEC}+mS?JniQt8p{pjhblT5vzt}D71RQ>&WmV>dcr?01=&X7XuTRo+oU5&u>ZP>Dn6TtxC%F_0hL91;;+5S|t>J7LHc9 znDh-u+;LMuVa8)p3aHn1HByB*wQxm_=C;T%*b+ozZ)WE9G~6m&4+5%s?XHy?9OLri zWqR4L6ZHZD*Ad;^i+WSH5OH|ba;{XK!_vP$9^x+^=M<5=Y`5XiXhR&mpHcQ8&Tzd6sa* zBPHok{?hoys$aO>!(DugG-TTk8%`}=13t?U#!89E z3@gU1)u0TsC>OrZ0iHui(F;3|E8`HHVS!8AF}954i%TUG{pC2a-`p)0{k}^s^(b$z zz8gQ#*4^9^dR9_*121q2dy3Bb3UGrL^TAhPjvenpj=H7aF4W*-{OqncUlz>`nw-DY zIKHt}wz(x$@hr_~q71SUeK-!<9DGxhQt>SQt(bYL$Qw2BRZerWXsI~pH?Jq&)g@O8 ze*Jyg)O9y>g5B~}dj=-F7RhDWiDI!;;In+%R>Y9rXKXw9=;@M%lk-glF2!=`02#wk zT_^)}ZlOjKwb6B|6@>1*XsM$vQZlQ%Tp80Kl@NzuAaW@AnqwK&3uATn-QI4=t7SfX z;PjPQvEW_DN*h->r1HfjY_Eh5*!0oL%H|E)D5TJPlk*FS{a60j9{%P&D+2U9%DJ7) z9&Sum3xaO_>w0rJ=5?zr3e>~B@Wxg=4&?Z4a@Bmo^I4olO^xiWB8U{k!9pV#;k5F+ z4&kvM{cN4-=H80%ZIN_R{4d?PoR!E(JCLp4mu7euYWfMJt4*oCT&!+>J`-bHtZZQ* z*n2FpRaa-z=#cl#XR>N0wQZ!C?r>Nuka?@q4 z)(LsQ5jJK6bCN8i9D3cFozf3P+TQhcP(K`_L@wug7+oYM+3eDNc@geuK7QaBt;onM zO5Jn&L}x50?A>)f-9Gxps8FK(6V0bIY$Z^hX~Ilpw=`s#eja8QMvc?E9;n5 zLn+w=wU*Lek-&;1+b=|vy}Rb|O%W#m6ciXP9lG8Q+hOis#IN?l5QrbI$d3a{_CG-ZD#uV>i6MJiQaMPnmSp*Wvb*5BtqZ$3@!06=(Xvm@mXd(;{Z!KOVhun0_yJQ2 zf70@>YQHvS#R^hiSqCIcL37cRvVs8pFzE2=T~K*z zYWih%i8!wsm7A%|35j0x73B}*OH7Lnig5DinPH`)e-&bgtViV0?nq8~-JaHhP^hTHl%RlK# z37w4-52U;8u1*n6_1qQal&pg+&4t_054?n<#7j#x;Wio|eKUyqph7KCGV?Iw6T8ws zb5`2nKF?F~sew*_;t@fy9t{&Ooexs#rqZ>IQfLE`CfdVvfz~3_G{r;GiIrq{3Oq&nstIbJWu|3`j9%IAv}?oy=!(TdzR5Ez|!MO z&g?^QD)4c9!<^K{jyz+*78{J-M2h&W!qtYe~-BYdn^GZXIhwF=9%)0a$Z~6UF^$)MO)P|{j?J3}< zRaUUA7iE?M%FWeFGTjG%mX3j{?Y8IctP8w8iRZrpW4in0H;qe0m6MyV+p>74)JkiF zZuLv_=cY@I9GEzl_T6){RW$>9A@PD}lPZKkdo)j}IMJePThimo%c#}KWyR=aF8dc< zxGWC8XV~{8Y7J~{6g(7zsEygYek$rJJGCazz_y{byR1|A&wXUrRqD*HKrCeME&kNX zsP<=_C!5Q|yhuTCoX1D2>!#YmW&`t_z`Peh=h}tqTAN41{1))S>u4|2%lnnhSe{9Bg zCDpXXMffA`#a?T;ww}M+&$yyxAgV9^a$6+wZH=`!Qw!C>l|&eu#x*;#Tn}V1Ty5#F zeRP(h+KlGtYkPyr()ldNTw{)EQ?mJ4W`^Lz|VfGC*5nG6dvKB7TtGn-s%;G|dIa^txHNG16%!l*! z9?DogWazi!0xy1s)p@>FSJ}b!W|?hX*@;>9j8_PC8yZ5e1_gG`^KoxSNPWrzJ{KJq z>PrjHzNj63yxyED_O&Ly_9{jZbu&?g*y9OXM8X=BdOhhVnmhgR>IO?zC|08-D`L3Q zyNs=-do3*d%p^iKLuTF&vrgVSOV6Zv}J*4Ta}&d9x$WrS*GpI z!8Ck9SrPNKRkl|nxA@!BjBUM+C>M8s{hO>qyY4G?gTCC3cQY&-of6`p_U+buT`sA^$oy{<}^$&9vkNi8O^rITPFTQ-azksugq zaBnOqzS4cHu*Xq2-O64f5o-SZ3QSHILMuk20L`cNcU%Evni!}Q26`pv$T-UG4dU{O z0fQY}{7viE9ZalbQK|q-(CRl6g9`bfhTE=6kBY4l_*b9KzKCY|O_S$kcEw2aM;=n? zfcrp*_aSepakk6N0xF-C$E2Aa{)hKf!y0DcT=}*(&IZN;toLZ`95!HVa2_+d2}3?u z!Sf}m{i8#Cl^C%*z;JC>NBCNl4lQPZ7o2_#k*F-}i{Vq*0JDvi<_5rGjsfunxaa^zAQX#Ud&c&Q%D$j>jkO66 zI5N#s7G438ns-hhKF9Z`HyY5Evgp~ol+};;g80_{KMCYxW`v4HJ_v01enFleUf|!4 zZh*OtTz=K(6B-Zi$b^Nc@Ajz$;L|qy$)h7Jw-2GP$o$pFoJ7Q#r)S7x!9@XUf82p! zP3vMX4)HjNpq(X?SIZv0OZWpl!lVO{c+ABQNMfh>$s{RCJXMA$9VWn6sPLVVK7jmtMT zPY^r*6=s|n&oiXN+k^3yxcSTIK^f7LGf zn4F-lM=Ap#BP7xYT_>Fd>e#NN);V!`V|e$8RC#z{BPD_Vf7;>aLr*}u2Bcbfi(}r+ z!(n>k#Pb4)u!pA4GZA*)Ijl1M;9T$sUq=Zw3ZUF30csk3#C!WxHe3>#y~z3*oZ+GW z!Qpa^`qId4oy}KN&Fk4~JVmpoq;oO>$ErS4lGDoDJ*GokV zln4p|jKlvjN{+9z0!o=F^<$nR4bCIP7h%^8zxgnx_!?XH@pAQ#oCwvIP8Bl!?J&7M zWhB?_+cKN+2#lBjo_EZjL-UA0eU_KLT?xUg+mU1$B~#XdctfFW;BS%Ah9N)%QuD)s z3mxCRqw`;9H6q8_-KNFl&?fq5bM7y{F%CG*{Y^6vmO}klxcZx>4iN529^^NnmXZh- zjt>(V!PYon1yg<3gFitF?01E|!2Mr~)}>H*AP;c@NVjJ!((*FO%Dnb;z;)$1)z4D8 zW?|Q-0w^#*&cp_-1>-b2@hOS0!W$pXmKHjW`N!LknZTtqf07K2F&|M6eK8;q;bEiD zs`_aPu!zbQ)V3Axs0W}J_!0Png3G)W+qS1|9VZKbxV_dPK<58XMngVq8%7CX`X|lQ*)hsKca(K49Bim#|sPx=ZZ;XxKrC!Mr zTk#1#q>cAQQ_ez6xSF1|^5VyRf9}bpQNwTet4X)P(&$pB#z{4OIPxBN|Dra6iiX0@ z5*$DFb1uTC2Di!fu`VAH0RTR*n{Oqa6zQdJOBif^BCBWrR3KP>*iXP0_93>qG6)T)=eUE0M>qbhliC+hXWK_`NY` zBoGdgzX$UqEq5PqmZ6jnX77T6j`n$BDcZRxF%2X?@U@2|_Ws#(K0rl>qLld|@P}yX z@-Booo;Q(^LEWEsml`e$5AZp+a)Njm>N{U!>7|&gntBdj$&mP7IL{{pn5CbGKy}F` zhtpYDH=x;bP#rTrzIuhCTLFCf4D}42(D8O>zViZFWw;v*KND33k2WK9@yr7KoT$7F zZPLGj_Mh4UY0B_@6^2SXpm%3!q;^hEvUhxdvPwTx6^01;K_W%@Xt7xz;6NRj{AV|r z^Nu)Al3^c2vr;_`zy&P_|4EG$JBTHElVLzEJ)zZ$u#vMwuw{`rTP6O&0{>-eE!KZPdgauN>k9bboUe28aG)UF75Y0s%l7y)zt zP~yTN3|pb<%%k{oC@%h(V`KkcXI7-<(?7!nQWQo$U|H~?r8;g3zbkfJ?!Yy;<#iWj zov6HxQHT1KD$<}HZ~0=TV2E#p!(wg$n4)?NY<+vcI)wy;he|?+xa;xoI@pdX46TJb z`uNZcsy7H#p%@zY0l@{8L&AWp1U^vTFQ~u&tqO89A zQ(*jYEBeoGh*ncJSlRBLZ4>VO&>5sa?RzuZ!Xf&LIQ^uo-1j^~?pJ}3C7S{gk-x}=^ zNspOcA)3Qg89=Uv?H-|K()n+9IXsB8UfCwRk`d_k(U zqZArw7xX7RI(#3zG_bj@-9!Ofj-v4Zxn|&nHb^c$07r-a84<#pwQBjSaWOvS=V_8_ zm0N&)qZa=8waZg0i)jeMrp_(#fsvg!u889I!AM1JSqD4-9I%(j2|6dPTx zWeEzMmL4jtTFV&2F{nc*$0skH6dF8lF5;t}P#HKu8g+eLz6ni$=s7j0+_~bE_dKOj z_|okl!Ub1u{~wb;t#e?(x+0oWYM(g%ps7AXt~2d>hI9XiHzEe9$}-Juk*y-Q=NA=j3MLJtIdCcj3e( z$N2kx+q^IQqRqFulrKi8{yoT0EXJp!e8`C#`*e43WYjy}+QievMtqMLE`@HZvwab& z5ffZAKWm9y=U`b1s+*Hq{%#<+{dioyrq~6}*EVjVTZ2cRS=!9=bqS4K& z|B3Xalr;?&AO{#jy%prvZ%3#8Qa)g(p+}m@IeFmMRL$)d!hf8vP0up>%v^D)o`E-@198Ak9xT0 z^vlKumR|-UUmJ7o?1{_Y3x4hJpqf6$xM|Yr-YzKmRk75^FGT94U8gsj2R9*;yYBY8 zo&eZ^WzPDTb*PQ>PnxekBe0>B=NcHDrr{%V>?I zZ=I#Io0{U`soJqKj8EVym3wGM?l5Y%|J$75f?->n)_CVmdkxYOD_PO81Tivt^>}Bb z&feSYQkSpiG8>DXg)L8IbhF|aBCvs=z{D0!9vc9DJ?aH4q>r+yjor!`bjVUf+cQXph;Q@U@a zGMdV1L#`RAs>>R(*jfGbTq(NJlqc?k$G(l#w@jCysHM+#!+f{N3HN5#jf?ABWQ_#- zL?YIeK0bAks(c`*U%dIijjye`F_xyqOtgbz_;xj5N9NMJ8Z|l$-$F?6h{m0G-*g|uN^npK-l@K)o*=RLN+R{rkG@}i$i=rf^98au=x3PUuPx!ubcZ}l?o9NPl-leU4Z0Vf9M;=s7<4U_1L&}O z)Ek_NGz_b;j)e$svu__2U?2x*d*|=ohDsqCFDaXz1Unn}zm2k_%7ioAwQu(`#Uy0~S+3Hw~dX@)4#RltFmhrg6#M{_6Z347k?WP~r z!GD6#08Hrr_`8AsTBE7~3jyX?UZz-|(VSa53<=F!H+h(NfN^OLo=z{utpdfVNB;|40db{*4XeHCExu~RK4)w*ABeO;()v2)&u=1z04r6C5 zJ`PpApbv;u6?>fp`^WV}G-7vsMl4Q2GaUTQZrayXsbfaOOdgi?P z+%LN4x#w(s@OO6wA}GeiK>=!hJHHO*CN|-v`Sa5@9$y=)8y!;B$`yAs{a;9z70^2i zViColA)E7uT<&Ur1eJ^Creo1#gaqFqeLJUeT#bzh5qlEemKjH1DVE$E zVm&oen|$$W^|$+B1X^7oZSHLei$^}c28ZM>S4(bYv+>GoI9?MM*3xNm)J!h!x_+-? zZstj;56cTXGLQAn!%@v;vd8|M7Rji70#bgEAc}jKbSiiv+*s9AU4{7m#_#m4jbqf( z)7RhcM}yX9$Z?vn3nuqVU8>(s%6^_-Jug_DqjT|Bqo&g=sg%iIx>joTlJ1pmrY89V z0}Wd%%d;!BdfU4gs~Xcy7t2ZS<`$3dn2>eMVhACvNjy1yW?N}zPYdTi*BLi@Rc&st$UKojqR~pTkFd=DwM~B8k_D2H!D)D zeIUrEzFHo#CUpf(RvVSWFPlsE%^=4^$R7n2Wi#G;Sd5Xu#>c@hiPU8`CTENGIrXCZ z0iG`te%eNI&(h0P3wx`^cb45b8@-m$*TN(K;Bb9K=QIXpdL@2-WWMUHtzzmtw^_kD zDseZ{EA_IlJL~rCmCK~cXlVr&vEIOWMy1S)cU4kn#3SFQy19j;AlKRWcWNxkyk{f% z#oj4hmHN&@V6pFPusxS#)Hb02y5zYD5ye_WucLZ21a>cDr{-P>U&RnV+b$$rAUukc zN`f{+^F7+_W+alEYrY)tc*nC>2Kv>fqW36&>sQRL*BqcvCVN#1Vl7skd)$CXI5GJs zw&&R1CxdI{5)EBW)yfzIfD*=^&7V6YB&(i{wfWq>Q(+0_SW}Ag4`i)l8RzyG*vjIu z>WdQn@LqF{}B)q9>E`l6L%Cch0&1Tnq(lp_!TF`A+Rr|HRy1FS! zEOvQ!z&?a~*@CW+UxoRSu;JHp90R8E9z23gJZGk>Lp|J@y|Yxugi;F@`taAs+|VLZ zvnaE!1Pg`G7m%XBS?-{#a&Hi9pWp|76&UgCjA+VuDO7-7z7;dkx+ zCZJO5-M=?K6O7nV7hCI`nt!xgPJft@PpIIVpb38UPbpXWuRZ-+$y4R^JM6YM$S%b} z&FG%1W*Pka6Z>8Y)6X)XQIG?$R)~{FYcIzX!&SUCwCo#v&~w8}TLO&5QL7J0{Ts&TE#<WO6<$I5-d_-Q%SnTx27X{kB4>U#2lpDRt%i)jOuMeWT6vmt_0}!oIxf4VtgV zOwK?vi>DM0CiC;=D)Iwa#@U#;bABUuB9KD9eN%z12L3TRf8xfgdRW0G#f&i6AqJu-?3*r z(r5p%01nEw*=cwq1u-m?ikSJaS7Vg#0vC+4Yelb0+>Lv}tu{}nSK^6eaS$O{W0xk^ z&kJ8jSI*JocnUC;h0DtezaNH6jbSPwQS2+9kJBdR#LE>IwmiRJt*j5&=-lSHle>d` zB{7E@rJnx&Jmv;&bofUZeVShr;9l3CBt6pitbd@BvUpigKu)kNZAqzTzYVjcE*$(xxhtaOz0JgXp>QW;D^}krROAa(XU^uvTCFQ# zQ)I%W4ZYv->C7catluZ>IhiTnX5BQ^mi(J$>B?|sw1SPlr=Z)g+tTczM;O>Q{N;np z1~Xg!wrB&z@CQ{FEv+rKWZgf0TD|0@h`YvnG~#t$Oc|xj7j6)C^YjvQlq1W4ny@VX zL9n?dl?fu&mnHH;G*nVsx9yH_Frb8tkl{n12DkqWP#JjFO2U<$uH}pQ^Sn6Q!zCA?v z#rb{Ekuy+4OMyGS^!XAek>jQmBrDd;V{#t9n)CSm5ZgPr;v4ZL*Se9#cJ53wlWMd+ zFtKHMAVb^Pki2RH!J{!L@``Q&s`+>n(=B<5Zdpu<}G^ZMp^XC)ib(C`YB zP&(L3Dknfs5l+pUn2L_c1)+f!HLK8rF(U9`${LR$VXHb1#RQC65Twa0N1>b~l)&(v zvV4sowGP^T4BX{2G&`HdkL~~mU|$vm{zXpzoFJhdX@w660Z@>GN22O`vbT-ID(khC zsql&SPJ?Q}eC@4H4U@^JpO>`ha0ejpVBUW;#LoBe=ZJ~7!`YCwA>klrIi=?W0s-Y| z*-t|8H7c*{d1H`|`*E>!EBl0asK&7XzT(Dz6rf7{7Rx{s;&iEFeV~@cm zey)c8)K;byu=AwWiOsMlYx#t-Y8x3Dh5S)B0L7{UuI-HyN^b0=*R$TtN$H5Q(a1i?^8;ibH& zL^gZ4t~&KeIFf-6mWhi*e2@6ApT6es6~IcS0>5^RwqI;Jl+HD@b_xy=t8Bt)5*mXv?VSP*&Ym1+wDKz)ch~9=3g7R`|+Y5 z;7li*Fb=%X#pSJs{-$X;bK~<+9)aec)2B?Sr1oZAc|9hKYnCWjCg`<;yefz`q z;7=@D6V-v!o}v9m4y)1Xy#*@-^s9-E z;c#l?f6c_s1`&7F5%;_Y0L5)9C;1LdN}qS+S~&RRAQotL>coW?15DGb9fi_4;8uo3 zm<9|b-;|1hH63oRzXD#i+%ptPIA!YP%aa4!@!-~Q+_CpDgjyGO7#Sr82gxK2oF;{6 zj0Mrar&bO+2MCGCosxg`*rW_lO3f<5I`I`PftX~c$2^7l5pW3TKovo>6*5V#{TzQs zf`Y*a0i$2{tz`rh>D3b3w2xQ+ZNbM|wb72VHdvq97iNXF`XhroHlc_!*Kcw;IhXE$ zs%7A+Cn0PhcD1Ke#MyHoKLNO8Nv!6FTH9G5BOo==Giq+&%aG z`t?_m!x^j_X67Ib=I?lmi!k!^I4hxBid3PcdzBc>zXE&n{CJTSVds%|YEP19`rP=G zfh8^FCtytW{Y|r8rsZ*#=Z!K#c66{uLCvFSg(49ip$MbG=h;A0L=1+AE&`+YBN|n> zP+WhO4{A^TT(cBCYX~X+B4IEpqvJ5%YuS~R2*vrT`=Ou6GvkOMG z$vlz`%1nJh0x`BBKk1Jf>7M2AlF|?)%2`R+S^hRmhWgi6=ubw#U`jxpf-Rk!X>wAv zIL~3@;m1^jp)+kBsSBSNyajDjCX!8p@riA|{l96-sNrsGn;;e905BQ-F3c1!9pRPT zGw(Y*MV>yvVt`j4NaMJuZ!MJ2f?C)BMT+<$9Jv65uLb$=f) z^8l91qC;Z13a*l1E~%o-x#Hv7r|KKed>VMNnpA5anKlbo#lc&D0;q@?T@Az|HgL=_ zh46PL1J@C!Ff<2&p8Tpg@VKiBZ0n6hvKJ9SK?itn#SPjZ)X3egZOq9z?HM%>JQ~RL zaDO}1^-vk$4HOOpK;tmIzfr|qM-rGK0a%?=C!?hSuMIUJTpr3QhTa1GEo&F5zY0b)7(i>H19h~sX9 zzR;W_;(9oP9c(N@sg>d~5%1D{^r_&ArvkRnAKj2h^eL|Ae z4*Okc=1=bm^ne9nxyrNgI1n`AH;n}KDJnaI=3LtN&J<+_jI0QGBvq}l?wdBCPGvvW zahQ_x63=zm2LzW3|L&0lU!qS)l$P=e4vfc?i_Pe&fIg(e?{6adcd5^4^G@$>!LDiV ztK@HrCC=wpQip);1MGSf-Iy#9H&Hf(mo$DrIn#)2BBJ()$1kGyYbI6aLFhS+BTF~5!JGOoN0y~Z zp%ruWR{Uv2wsCKTo;{IGc%7D(t}Glm!s+`e`K~&~`0pVV);;y=t3zrna{O_bjLlbf*1OC}n+3({Jd$@$t+JhG z+>K%j10eWx2Yd`3c)pr`yb7Fcy>WfN+`0bCJx?d@jMVr5Q-f)&W9rWn3X78+6JtX_ zYJ3an1{RVLSJ=i1cl%XS+t}&{e_2!C+?*=zEZo$TsumPKk}>&H+`wDy%az#kqXi!a z5=DZ!F5-8#I*{bddt>UF1=dXELN;hg1q0bNiIm_&vnq+N1EYE+5=PNj3E9v3%%7Ii zFFo+Lbuy7%a8?~jcd!5I%3T-_9F%M1<@5tT&#HU=(^Msc`inkABnZjUSgW%4!=t5F z`T|ySv4dgldh^QTSCGxwYONL->bTE=#(G*Jv0XVqWw9X7&q zXOC-Feb$?;8XzIL8A*a32D;;Q4M)Ld`hhJS>8+UZUJbdyC9TjC)*xkYuFDK|e%(N! z2vNLt0wL73Hr6*|3Nse;r0rc4c7W#TS5|Zn*se`kD2_#+1YI(@GRW%}2m~=@S{0Do z2D?Ak<2P#F@QHGdnFsX^eP{karT(D`1+0*;7B#}N-&U6h5H)+{ip_1DlfGDWdz`-w z_@L%5#{pH!myM5XD~1)?6WsR~+$hbliXJGZ4x|20bCo?EOyv9DY6w6L_VI2<^tM8F zXgF;bhpIVXkSZyt!@HOJ%Xg0YvNi_ufuh_Bsz3*D=$8I__J@22%X*e!VW?lb zpsGk=df8Bk%Zz`Itc`PKNCJMFKc%!jt=QakG(29vV19ZXz$!RTX%-VYT&tV)vt@#v z6xzn6AtE#P*$X#UusoC&oh!#`fJc9zUJX`(${wbwerPYC&cdaU= z_KH=lSroPRsM#R)OsZOKHTEhZw07-1iclj|iJeyLs2DY3izN5&+~42h^QV6#=j5F8 zKJV*wUC*m1W&Vrqr=p3rXCVn4mVZo*0^rXPW?rI?q^385g@k;dUCoZq^9oz!KWdY{ z1uuGhIowzLt;V@|!p?BW(WF;}zrNA#`{nlJ(}u@lfB55PeKIrUOpRz`+=3Tn^jhDp zKj=^=3X=xC9E6F&Ri&p+hRj(=-o1g^_9*sY9|1kKPtLbVx*43GaPy8f-U{+Rds2S* zCUnXU=1YtBgwZJDIaHv*Dw;UJp8?8;nDIF~pRPnwv z6YPk7P=Wc%<>}@F3HK^pw|T1arr%*>n=5LNOAyS`)k|czom9I|HpGIQmyKCZJ(jcH z)%aFIG0SB~(LR0u?WBjDORP3oxk=F1zOM0`hu(KmyJx;He81AyEM2L2Y3V`JNAQil z5M}sgR-ut?T>OOB_UD#t31`o^1A*|bx9pp@9ju(9+NxEapj!4$J{*DBtoyq@uAmh9 z?%bq0rQ13_?E$(Zb-$V*7tBq+UHS4pz0CDhV1jbM+g;&hvFD{#+w%r1i-xmU!yg$s z>^noYg(vpM*ufLjELI;y)akp_Ay)J&%%D30{VwY2k}6k!JL}sWvQ6fscQcfIJcrPi zW*ZjUOER3vW`~RKHUg@=jkQhR3;4Ck4(7F+A&TArq674Zo99ah?H`UIEH@qf!) zS(&~4!?q^-f++u0oX|gap3%R#Fya8LC2(HZcYO--S+j3Z35~ft?0v>O4fA?_6=4Ab z7(D~b{ZY2sSqH0GuM?7|Juoyv|0#lK@9D-5Z*%pt-zhUty<*?qlqs9aQ5j^fy=HCx z?vqF|+IJkMz5Y0t=Vy1Vbqd4mkD~kcX7iK+=JoUf<*TrxnC9d9CzA-bx#D{*?oD1^ zrH1U#IQfdbU9EcL10C%tx$-9KDv1aA&)Z+4b$vKW%;Py44OGl3C<)7NnSG*#$PnzvGZ}p!F}! z1%odKz+4C^xx7nfwXxq^sFoQOys{#c*W`G6E9ZP;{My^%R%M5z;1b6LkT(F4%?mDc zZJpDSzMTLFHu-R}-d-&AwnKf6PH&!p5z#L;c$ZEK; z+PASP@X5DYWgtG^8z%I6wZ49^{(g;z$6hX8S}Du;)Rw3wdPlTC(r0P$h5$%GN>=9a zfU2Tjyk}lvru+|GxYmWM#y)$tc%V|NrC?;wU?q4oW94q0_h&NginY)^#z)}&Pn(qO;Pg~oCN@eiV@8k!gooh(X#sL#KD}_Vxq$kx z{}%g>LEn{RPr-lxf_F+A8yofo^DVq~@&|JSt==|XdjGsEY`i&| z>D<763&jRoHac~*!^%o-8}PkWV}#G28uaFv#}60ahQ4xff~?gtD(6*qKith>3i|l5 z%7{mev^g{T-LFoyvJPWya4t-pjKIFD>0l2Aj^`|27xh+_@+9iwO8A3dL;0C7_sL=1 z_Q3Lx*z5Wg9lhcPt%&64Se^eeJ>Fsfg5EPC;iTL6qia8uISeEPTRRo&;{rUr6&uw% zK?W7_IsFzZgkJVkyI*#;T@o&nl7|P}jxXvnmS;GWcM6Zww@NwmYUVHt&=WTCQr3u#rhjiiXaLApv z-(TDMlqoL9Yz6z#hJ_C2*sB<}H`zNdynEi!J}o>YadZj!I2q`knKZODaBUzoCCamM zS{0>?UGyuG^ZPAxT-Eul`Hth?Ga{slVgI%t)1q}-sKDKD5nWf^G)AZAAxer$cUu}U zzwpN$A(vntyYH*$V~10C2Nu|+QdxJ9$NN&(4{mY%82@9Uzik>ev@8{v)+ug_dVmnH z4z$<#;;_AtwJ|%AbARgj%dcj7B83P)-`Q_KyxjxM4!w}LHlQkIG!uOFimvV=W^5h12mB~-=o15Q)=IUU7l3{5WyFuL~w0~JuMSEPRsWH>1)5TC2ZATQE z=zF&v%38j)2sU_D*FeSV*4LZS+Ovd3=>p=PMMPv?TJ8+c$_+X!o77SyS^L z=GiGNMy?p4F#bd?WHp+~G=1*v@cQE(yUi7T0hzF*9x2z*03o9EwQvmcC4OEFh8Dm= z_TpZ-QyuupOT1iXONgaULx|dY>N8*#~ zKO4YAyHWaaa`l3>3yV>Jp z;wg-p72U58MC3x+ItRui-yQJl>PTk&u_^-Tcu)-(Whxwh1c1gAWb)hd#G#y!X?%@ z`u>n%qNzcKVdTZ834p5HzJT*=I;63)q$+=q;F}|#Evg+OeZsbnqUxvL8 z5;+SScXRMzI%1$PO|GA%exvo0yIkUW^qKCtOU73{??nGeOe@9ptFEo)nx!sGY+3FX zEf>Z4~Qn*r{WmqZ#>Ige$ZH+Y~K+W81T3C%z&c< z-al7tKS$oe%z{5^qh#|X zcjOky*CLNAr#x&kPILDhb+XN4)2hSzDuQ|}8~4onaz6~ZJoWxj02z(y2U4#e{-xmq ztBHFdnOaY?mc{J7PoAdKbZB`kv3`sw?cEwt+Ft8yF~D&3FDo93fgMC=!UPP4bG<_K zcq&t3j$H!co@AHH?#a1w{Y!Jb)nQhSqj>AvPLtm=>$p23U-$HEln-Q^r-iXFqgg)y z>8YNO1-*Hi{a~$1GOiLNL!{m5aFj?`KCOJvXvnZSzbU_GW3yplJCJa(p+6uqM`AD8 zYkxLf>&9cuIoIC1{+R_y)Hqq`V*f|0upPY7M3sZ>dobAfTUWA9vA37_Xv!10jJ}i$ z6Cf8W!S;rbe`)kWd2c9?xrT|$yACUh$FZlDF1h35mO8|YUSSP)bK_c#ThI3Z*Y`yl zc||@jSpc|B45D23ZpW0aFWf!dUFx(jyzg1k&^`%zutFbnDfI2tz%tvqur*pdcvF6r z`|?K53y@CiPyeA4Xb+RlIY&w~xlm0YNfK?xmdf;ih(_ zVAb#gk$Yw3!iUD=xC4jqnvID3xTY%0-r13FzrEY#D5JS$wg%s#6hi7{1L<0?_7s^C zQN+G=z)mz^Im^VK+{?Miz`71&X}d*II@hB9?D}UWO27}_DrfM(7Oi%B4{$al@+cU6 zYyQ}8aYu`yz<7595fxS6u(04uW<1GH426vP|5&RAM3lArvrj}_dTguNL! zj9PlNZrCI6T61VKf_RCVt5CLUIo@paOV&DWmJN#TSOj%34@cL;Qf;M|Q3TQqXX_R`W@r$z5|Q-B@J7Nfkrp1K-N2rnUF6}b zMIRuO6VL&SRE#=c`Cp@pxPv$kx3P&~RP#3;JwnEy3#y%?jNEHUs_h7P5M4gCl}u4x zr}Lj+u(N;g0cDY&}W zh$Qbv&`!3PV6nxLv#NkKna>V_o2g?adzA9@VE9TWCvqfcMG*Or-EHjIGX}z}QjfZZ zG%Fr0i}F$hq-rM*Y;rgtl0TJEP#j<^8;$;#24mxut27Ws6Us@bF(#4@0HK(m$_hH! z*wJu=N~@*!1h5L#YVrs8>+tBnx>xGyd%5Af0NuiY@f3O!=|8yYf$)djq4{;Ne~Cc9 z8lXtlG1=ZANzR6Kzs!FI`wSeQg+l(uSqxP;ZB*T{TJeN~+DO}SoIz-5-qGtO5KjuX z6&Y(*U^R-B#tJ-4N=FzzfaveIkW07@PgSd;s(R7|ZgX?H(ZvotRE+@m_1EM5@~LC$ zJBbUWs#JuiWJWVjQ2rcy){?Ne3&-iHeFn^VmdyLVkAfT*t_WkuD9lcTxcGI1*pYy$T;B zREJ?eu#_ZMo!I~g5(y~m>jM9gLGKW#6AVT$b!zKM7G~54M*Iua;`{OHe_0L%>iAgf zTm%B>4~bUKF5G6so>4i@wAEQh+YbDV9{%dV7(YFaoS+m|v)Un$GRV@>qyPKN*lnKh zT{NGgD+xj}jV`TGC+y?*Dp?y-Hp7P~5vwd6|I$1)jV_!!z?e}O`Sj`jheuh5PDu3t zbx+ZdEnNwz>3U=1Jx&JTZp6?7eWc&hNA*?@s7JJa7lVOzTvCKzbkXX;fD|02rbIFg zrxx)_rLsh<)l$b4SUwRz$N0HP=$NKR8K#Yg5$F{ljTz?E5L^q+bx@~JtKxI*$fTdF zYhsMsoU5ruigxlU0jiz<(omk~d+}dSW%a#;HtGn>x#$7Vs~z{Yb`?g(8M!bms!)U} zR|@cyP}j1$yLx@9j>M)dMGC}j=}Uwwy<`qj;wv>d&ADs>KBi`^@Ay*6Aec!YlCeK= z<+aRcK;UHQ#zYqTpxuEZ!dZj&OQ)dTBo0&Qez#y{~_hgSDt z;`u*e`9EqCx;g%-JH)7}0#8~SwF$^KC5*`^gCgTrH?_0uR=x)?RI2HiKiB;mh%1E| z#UVrnHzC-9?fl8TN;>rt<7fUH!Rz5Xoskg-`a(x9Nvpvp7!+3aVv|89>PD1Z%76~# zh$1aof-mO;9lTOJ*+rwafP+Kq8?m14s8@;RUY^5?h=|Z5=SWc4kJrWQx1xEQsVKhQ zu3VHSU7M0>nBozhaUXv+K`A=OQrtkckzUDIJ;KwtA(oC^G|9Bvr^vP<1%c0gQ1i}n zd=68?HqgQg%s05|xsseT$)n0i1F`-}JtWr5+Wzl}*NRwLPyHB?lAo1GX+M}ua_BhPwfeFOkb4rNSs9Y&fRfhm)x%vJ@Vwb* z1;k*Tmjt+ns?-@veg*y6&>(>isKW!sOso@iZ@U=swx7oz;_f(J| z<@Yx<*~V|bs~^Rydepz$+v8laIFIqckBFH>4yz2~tQv6!Dv|vtuqN2eN>cWGi?dpG z>!a9u$!lkSLHLtk<{SOnHfn8=MGNS1uYEywJ)R#Dr!H;pbwa@pGWrZJP6dA7JZ>1e zEvVgdAJ6=ym81CC&Fe^*R-$sG>qO&nD1+y zDPRyciTu(g*t!vmh>01(Xl`KCV&wu zg%Ig|uG@7LFB^15T2B%(dz^BOr-Q@>m+FC#xwS!hqDIz9``fkLi~Y@+GdUTJrqVjn zPsHcKzCNp_B&il^KeEoU; zWAgYuCHZqus`~HJrTBW?12KhO*cBzVZz|u)(hQv{?SFsJXcd9eTnx6zuUG%u1RiPm zyRoN#x0egGYJTQ-;Y3^y9I(f^)M}5b+Hn%>4fp*a{CWo$Q7G3r z@~t1lY{K(7D0jnJejC)sr_aY{b6fRDD4tcv9B4u5`@C=tISpq|`*Euip&gT(p<;%^UC01P-sM&Z7A=a>+K- zWWR%#ZI3M874tMAV4Dtcjdmv>!n-ssN`cm~;WSh;)>$PgPrx|Id5I?%JTM0*cs|lQ zCPaznncEf&p-cS3dDGE)wx3pd;YWUlz1UplC zUbD5k-7e zY~fJkMR+WtPDvM$iNSQrd1iXG`;Vlk;~i3*Ph%QZ-+#$oy~NVQ2;L-plWDI~KQTwY zR!F(raS>{pAU3=B^Kd%oUmBSa_eSo7QVxz)Jm1p@Leq=6-|!o?bu<^C&OO~r9z05` zu6s2aX-1>KxJJjZV>=B~B$%jj`Hk)GkD_3=mWIr5-@=sO+i51YxshQ@tylg0F1I$k zK(2PbNL`!JyL}HgB-K!q#FlE(=*(H|`CVUjIXLv@>K~tCuy4)DuL%iL-y^}wnyQHq zpv$enaS~LIH@09DOIzgkK$e!!uCgHT6ZMV;Lr%|@Bso{p&^)@Ukud$klSu*BBsGEV zA)GQi>FJY1sVOJkX*w^+9y5`kNFxYrm>k9TqR+mr!l4`wz_4C&m&Lcu__eH1LIqiY z!YXag)8h6xM0WaLLDvq z!L{DmcrYIK5QtD0(=4{i^~K`@;hb>uTW(0*CGEWKi)%qKE^qHV zGPo7T@r8anQE}{|B@-sk?Agi7MTNQ9T@qBVY$LJ?iOf7lEoYg|d21|5L=x2gr6HU_ zEXI(UO2BI%YcG)5tS)@aNfi&>I1Q;%iLAPN#lIyYdV6Fr^eRX>U4Cp^JgWF%bGH8= z?sC=x8DFQ6i;7h*B+Q|fl7iJI21hcGH7^ITQKMnoyPp+Tr`V*Zx4ZF5>k$A!_0%Ii z)9atmlqEg>m_;d#1MzJ(C+9nXY@AlSmF7l9QZeSu2dRG{ui5>0m&l!vMeP39j~SEc zLOfG~M|}rrL9Cd?jLz4!EJ~x`*es!V7IV zvtWH|hJmv*_Lh^eLqcY$Ieu=+jOzCVXeW8(`V4 zlPwoSzqKyEbnSi3ff?q?%eDMFrYx6}H96_8`2Y)R%c=Y#bCmxA|JZhXn9M*ZJA3nM zIcJuP%C=M=iikR3;Ws-TzS-?~*UKd#&`*@Fq28bAzzV+fI^+N=SemJ+#Ifoh7?a{) zNwi(=N+D|XL=a+|UVh!Qz#VR(z1*q8GtP-x&SkxwxeQ|O7=l=k;06rKl2A>cC>lfe zvE=N<-@HOoM03XW@PnYJmXN8H{_(iG>0ffvzqA#0HZ{QHd~r2tjbu@ZC$Gw_j3xx>c1WANJcxI**0 z5$lqvEHO(9sAJaArVmryB2ewubF?v;7zwka6e2v%Tn`4sgkJDOBHI{&jmp!54+EO` zF+QrG8QZsw6Cd2s7*>H%U~NDdkDxwE8)2mJH^x&GD;p%FhLrH?3fxKFezU9xCmUG6 z^<5p~gChbrMeIpvUz+Sg8V3Mr21c4Krt@`oEDsh-#{$vUBfhMs?)C73x=qEU<=U`gK0@&o(Ln;-={0MY z6nYZ1Jn^ala1_>@2A||Cc+x52OYH#h!0>=ZNhC>g7vZm7gXWTlYQ?=D0RgOz&`-P{jc*)@B$X+W zftYsHg@(>b?+*dW9tV7*X`|e$u8fRG>Rh0;w!_1Y;8ix|#7^VGf|15^B&{kb@(x46 z0tuicI8IJ>HvS}YvvBQR&<;aGA!iX2ob}zAj5T&A+^#bXpb2Rj0kw|ASR8N+MiPu# zNPO9UJLb3leVXSPVGOd1MMOZhjxaZORijzY$YL@-@#PznfY4&8hRPH96s;ObqVlXu zNG+^S-GHazCN~-V)fzcty164sP$!t<_xipf?id_TBouIj_8lLgRb>uuGK%EbQ2DS& zI9>gSy7frr3+j@D38XM%0sj0qUOnqfE@KU;p?D<7W(1jh&58n`%dR`Ax*OR4OL`!*pb?=fUngHh$rz$C_gw?T_2AsVw)tdB^uqQxXNgnmra2<1l4wN6kK|bZq2ZB7R>& zB1nf6(WpJmO{OD}mKqMVDcVMn6T;jo)XkNgb9I20AQ(i)O;to6R62r=#Ep64)RTGB z!+|DH^_u0Fej0FeVaniovs zhKy3CPqn?XvbwrT{IwLM+RD>OG*aN$MK9+M-+C{hM1$ZdNQA^tL6h*`Xnx|L$UZN^ zb#lMJeKBwI7b!+pgy$FQaA^&&N&iMDiHuE*9Rnmc;B@l4o!Vi~1&?10?cSf(317n0wG+s{IdU^wC6u_Vz;SXc!1fC!MS5y2u6?j!r zX7UJ5Y+Q-}U3e=65xP557X?_SF$6R>lo1D1elD%D=sZm-1q4((JAY|u<)Z=BNY}H- zb5Vpx>;Q{DWNVSPbX{de5s{Qt%L6=QB$0UmGGqHXQFy+08yaoqyI^SaRX-4dnE*fD zSZ7dXb&Ozc;}8rc#3DR!iO7sJ0_#(XwA2yiu-G>Cz>SDQ&K&|*vn(_A0L$-M4KwZ~ zpM3UZRaf{C!5pA`g06wg)Pt5_q%&$5M}cwdNg_iS-wGq>q~I##;Vx!Py;x<`^wRo6 z9Y;IjB!a5y!?#U0MfD)xQ=nr^RoxGx+`FN-;}hPEK>Euk7A6f&z8zp-#FpzaVzg0`+LaeFjJ z!+J^2ggG`9R|6Lle*w>d1UeIg7iT#uPLYY94iC|ePhki%>Ua$Hj1LRgZA<)B^3Q>B z$lH5uXq4&!d5{de%6i_2_uYnyV-JR}(XI!cL2pIsq0WWKrfKi9JKFTP!LEs|RJK55 zDaR64ju0~%U=&H*ao?G=I5avhLROD> zRv$k7>i7G3si}fpy&(=e@-L0OarEoeCEi!Br8yqb;{oi8Q=&3KasBGLWU57`>;YX5 zf1F#Y(p|BC5|y$pJb$*iH*|A+h3-IqV`hrY(U)N4S4D zffKD7$(7)H{>;a(dEUT(#zLMHT&N#-9LZ(=8w}=h{q$KIcjutVUzvuHAh)6a)o~UJM%L9c3>p0u%ZKXrA33qIt`I z?UF-mdg33w9;aUKrR!)$fo2v~&>k0K8AnH!0G}x5SwzSx_;@v$!H9{UH>~pI&d!O= z6YJmWyV1T?P->p;xYxHX3dy#)#+SlEhezQgK+OBU>mSxmfqkpN!}OW9kggd$0hiR& z%rAs(h5O*0YdEw3$fxN=(sdA|J>=T)v9A8)8{IbI6;r~}@9F-D1}rSvH;!rse&We?TEdT$N3Jvm=-f@dQ>CwLHx0Z9aC&<4US zuyL}m>tQwKS&#Rc-&KZXJ#YTt(Ox@txHWGY>1pd9Z~JN6IlC%jTL1e*`a&hW$1shh zd_DZN5*;VH9K)%2xQ(`pXUX?BB_39po*U=v8${-QycC>gsWZ7-U6qk4_Azr(Vmn8# zBbom|aBeTo>@vb>Z>6htL+)+DMe{9;y+MXx=3Hi4bb9V~jnP-Np%LZLucaPP?aE`l z;;s+H;6{{L<#c6}BkQIaP@tSu=GorgYNnQ5VGUqr#~C=8XRy!E^1b<8*FF=N{ec&o z-Vkl$y=1BN03BX(=t=^YR##hlsvpm3t^;Zu+&DZW{=hdlci!Z0@ZfKj+~C5wE%w=g z1`qG`>MR=rM|*1wbH%M5nLgk97oD_eoUx^f1f&L!>br@$UeNP$M|PuY6}Jl`EQ3m_ zh}G_^xzX}dUw^>ek-X;IHs{`IHeOX&+v4`IG){Up1}JJQ@<@O*<>vZIldeF}4RN%} z3W+y~K^9^|p`9wEiHF9Wuk_Z9B4O_is`_uw8vaKD(nUDEV%l9qB>rA2a9eeXg$J+e zyrlAYkK6Zh43eYKie=g10$zt(-8`y__{F>RIW}TC6%k^G^Rs_$@+h#CGR7o5r8@*R zacbEI|L^&YJ>;wf2Fx{+x1lebE?L!%{GTw@>@Ng2v#%*T1a;3EflQtC)U?L z6uk92{(ga%*<%%#_vf{AIw$0~62weD z4t6#s>nMvA)gks%`(=f7-COefj}LkUQ}o~c^G-k2_BuqU$Dn-Mwf&;6INr-kB2=t))jPe z(oTNAzjf!W2uN1cwS3yQS(qc6po|zbotAI1DgHVghP6PU!T~h}Rtm zP#mBcT7sp`MVM-TY-Ns9q$}WN>Ab&xw~YP}pKtthi5Xv8q9^ODd0ATzNQiX%d$0Um zVkVT4fw5NcqnPJ$^D#XHpih8I@t(Nl5HJ-p;7;hVsVE>iGDv0MJ(ZRjin>n{9%n&Fh_ofms#jHQK9reOQ*?3>=?B%Rznf zEufT1T>kL>VQF|j?W~qqLN#2WD4yvbdyZR9mC^-C$*(NEdcIUfcPg2#l^bW<=H!}q zZB*ebw%pr)wFf-Q$r5*;`Jt!hF4Qpuz@} z)}vIhH#a-mK4)ufi(6o16W>_dvAmY+|7iCfV>;Vy5OhPIwk00pWnt+q^r^uY@a>5R z;&}ow*qUOaD@?XmjP!#yEsd9g*}zY5IpWjycgKQs^VHcdy!vVC<0y=*D&|MJ4!asv>$` zqz-hbgohI`jC+UGgUvYyUg{lx8ny}hJ0=-cX(*;EtkeP%=x(I8XGz^mfa&jKvayMIY=zw)ym^PF{eZ3`}; z_fhDME}qeQ^-v%UfRUy2o3Tr4FeTKDT5~XHFR^9|T4)bme)v3-R>%2$pR=f|Q~Ai0 zp8m!}g>pdGiP4S33PVYsDl_>)hvM6m9C%H~i%SN%VAb`Os*K7*X_v^(M>7Z3jpKfe z_P1)>8)FTpCAW{R8W^-aRjN-tUbuI6T6`6rRXc65@VlDaR+zKE^-0*KSoWQ>DTD52 zI5(gKN5S`{5(_Rbc1`d2IVUcJ8$5a!@c3)Wqr_X-yNc7-l6hVUa{IZo3dXNhGtoQi z(RD@O1(C5a`Nq(J_J?D~&oe}girVhzn+qeVJb&A(lpra#bAQUsW=E!;J^MBz_B=0R zVWy!gsr>Bh92|99O<)Y&pTvq|lFPqp^8giJ@Rnt3mql}}F`|x+YbVGf?d8a@e4eO> z^DVeD?B379KRp9VM}Kunk3@2)DUzlt?@OAI^f?PFqWrC|pIgR-30Wk1x+SDn2&Oa$ zFwiT@h|=F?-1~N%b*ib*v-sZ9qS<6HN?=7Mu=3PqDRsQZOYpCRthJy}cIo)4Gwp$H zrmbZ|>x+4r913IHfuxs_)#cQag0_touuGE(Dw=e_d)qlq-=ylI+iXBX#CDBOa5HWL z*Zby_$9Pjcxd4Nd)`14VK2^r}KZFc+qm}R!Adc7S*a++$KBKWonDUx3#jBbMigQeZ zg1m}+?@(tW_p*4nkkOG;4t!ZctO7khI-E;LG933raym{;+zr=$F7vsWX@|gHN{J)z`{t$O0(c*-`)9pNxX5$S_47 z5;Np**IFCertSdi4m^d*BEuMKj00ewZ_-w~pKNL#tt)!~-jBEcddl=q3#R?c+fDCoLAzBHeRw{DXU93QtsMmuL z;7hvjN}hfR;dZhEqc~OxJe5K0MI<>6t1V2!Jq9#DY%Jl21Qbf$=#c5`&xga&2-4w~ zEz}W3?j)v;;IxE@8H@1XVPWMc0OTS1LsbYoGMH+f?5m1dGA^CG8C}5b0SY`+BL~3L zjwJMg&$}E6kQV5oU`n+IzI;?_adMKQ=*DesSAM70^R0wZ&BFm75x^;d!t#xo9LJ;$ zGIB5B1q8+dF*9)#Txm73Q~jqlIzLec5pg5wkFGEpg5icGDOS2FZFR()5>Hps?U*S` zkRxha<=LM_z~ppwv1Kl6)rYA~UBT#-6zKub=0fv#F@YF4xW}e>`!O%Dr&pUVJJ?rq zq2(<)XHl=&|2)*mMZR`t?ImJ|!g|DzZU`sOcV41CsBnPd*vz2h&hl(giP7)!g1RUwnaB6i*)st2IPkhl2f zn3s^z3ODj6bF`!Z_9F1}%g+9~S|}wFhu_*dR3w=h_AsJ&G(b0!0?*C~a;d%wuH1Z2 zlVvc=lU&S(E#E{v%dAaN)Ds zpk?@=5kb5Zor&b9N=JjEdh0USDKoQ4dopUd8+UGr+5(C_O}UV*zG2*ltGt0oNMiPm z5BI3b^?YZ{mcGZ89pEycau$`4wkVRP>gxRRpo7gYj(=$s)hXUukl0kC2q!>&q)=e` zLo&#Rk-({&BvC}W!h0|xa|EENiF|)5ZU2bwrE%J5*lKiioBH6G1k?IL$0`*#S?WLP zpc7$RlT%d0zUun9_CqDkQRQRO9f$u>TnoV2&E)>LkR#B(E}_P#m6YSC4%)#$SyUnd zsgV{B^MQr84ZSfUf{+KZPjeBu+ZQKe`>-irKIlST;6Q z#6(5V;pW`ajXrBI!aZsPHs%O?8SFDN&`ve`?q3J>5k&Z2_%`AI4$HT3ROjKU))8qD zz+(3lE1e-dbVR`vG!M12{P7%(F30F4)o>9`Qa-WZYCZ@CoG{Izg8|L?QwUX&BF1mW z=X#oiiP3KeR>G$Z;ggI$krgTT=$z&x0YTe#3&DrkD-9?6`(HZQJxkiKLV5BomYz<8 zpKWRk7}EzP!UlIWxE#XG^5z25CV8YLZ!+!s`!j8%VXI-IKHPJ$@MJ(`R<~pI_LvQ) zPo-IMRXfGB@Tj9@X&bTh$uhAxRdrLyHlR}~0%Teq%yP<1o!WRB`~n1Hw#YkQw~%)n zy9+Gq#OJiN5~^qDat=By68WjXjtwwz5y)X=h}A;)XgH<@M!B5S7~vm@ZyVaeENgH< zAv?WP68cCs-*Yu#UTP792*sR{x45<$r;;$F8=CptgfpPOPp63n%-Pd@b#Qf+T9^aE zQtklKIA|Cq|IQ5H-=|oXEuOEX*)5Rg|)^Z;Gw_2SU21DmlQ#> zOl=nBeX|0<@})C|?~Le7*q_s6-V959IETmp`Ep{OX`U<{_eZ(I-aLogtb6E}+I}2h zIU%FxP0s7G3Flq2wH`$PAbj>6l<;K4)>=FC={CL%s3vBR8D#*zGc(kxTBX-!z8{?k z{pa`4SMNJ<*&gh%i-xtxnF&SN2l@tF=h5D?qsL6p@#q)jl@{zPSGKIyYNu|PFogzs z$eQ5n&&|8_)P~t-eILIZ)4*P{Wm>!Z)SptasasXmSJNz)xni}HFmK~-)$^isInQFz zYU~hoQ>WAbNHM4?Pnu*-5CxJ zJN1O3UGfEtf9xBNoY@_*#lv%A)7{d;Gk@6PEF-I66`xMj?tD;_*;wEg{bS7cuDXfy zi<1`94nR1U#yp!7ykIdEG@=KdzU3Ba{C92cmLILLZ}YUo12-@JtRJpC{dN3;`K@iQ zeY{p)WLR^$#d%$C5&dg<@i~^3J7ml|b<>^GV(WQJtYPY=9?$2nM$7ir*S~KLjx&Bd zxA1U@uN)TCJ-dc(u~1nJ%@l;?2(W*;kW3Ri2bX^MV{72=lHyB$ri#)_{l*!BU)8*X zrYj^Tj}~8U>YGBErd+DNT22c#<95#kYT}OYDGmykU82{YyIwTXrWNGoc5>xrS-L~1 zIaMOkstHBO?V&#M2IpGX1}-n&bZU@3{rD(;mM&a)?KjILH&yezP&CFky zfFD;Nn|98Yv*&1tlYsmLI>+Mp>OA@~#ybMb$jV&^;auY7ORkeS+A9ALLy}u=oQ4&7 z>*SDfFtCEVdlmBaS*&eU*PpJqju`5RJuk^UFwSov-H}sG9;w zQoEF0KvxIhk*eOCKlESNGIB6-A7SjMAUIZ?d+Am08jq$DJzWa$NFzpDB`sIo{PY&1 zbsC~p3RqJ?mdHh0+sCu^vW90YGWGf+oiH_Ir-~qLiZr}RfRz@V@LP~@p;#|8 zs(Y0evVk$d{FyCZpUo>tD*KlP?6YtU#_d^W+vx8}2XJKB>J^AXHqEyai;7xGaHh7wxN27o z!!RSQQ$-71ctxZ--dl4#6jWi}t9q5&l^B%+FURz?yJdVCXmRX|wLHeoapV$0S-rz`?u!JX>G>r#o#vGi`iXRsO@_mM*1Xw|xIJW4;}l}uZU2gedTm>OmJ(nX7% z{{qXyh!B8$(d16=BIIDH#Fqq%Vx7qcg9AhMA5;~O;J&wIfSob}@2p1ci);SW*Xwq_ ztbRGocSj+=Mv40vbBYaJ#=OtJPUW-dXYa+_K8<}`m$SOyXcZPh(4VLg?#eb7&fFm# z=uEA4ys7`bo|WQY^Mp0~rbV{dTA<5|_%}Ce)5~YR=J#3{Ni|I$&v)fXf?MPZ%G!!; z3QKu2_2%boqpSYZXuY{4ZJrG0sjN!=@dLUQAeB@g(8@iXo}lrJK_;=3pI3DNro2@X zT*DUi7#!Nyr8FgO)K+RcJK|H(|Hax9+x5@EB6uXO&Zk)GMe$I$_qP}COq%LiQnlH1 z{JF0S1Z;7tKw5(aSDjuLma76jcM+7**WRgeQ@*aE^{K?P*wm~}Y(~jWQ~Um5?bq)Q z^TqGqPx0RLGnB3OaU)4SdIG0QW`HF2o*_|2s$VCVW8mCQygDtPF@K)D5qZs`ID}Co z5%PP9<>NFw-Dr_yR-;v9S02SD@NV#B-5vr52O(*+AO=I;R6<;n2CU)-s zqJAm){tyt<(x?Kc2%0nj&OtO5c+bL?ykk~Q%2NFX1rjxmQ)Zl+OScA;Z=8|F=slHV zkSHlaV&vFG6t6ZU28yKw%qz1=t}f0&;IQK(3lZv;~GBrI*=eY5;>N$z(@Ix~|1X4N2+ zStwETvy+8xvEksR7Q@xWGFh7LODvE*G!3Uhad|_nC8?GslV^tO!>qT2Y|=|` zr-fp2&E={RRR^1%*(ovqeCToNeqkk@T3N|*IpbwH{e>c@n2CzU_~*$+tbu?gs9ND@ zp7k`e$w26z&-Kl(_g8mV%N8CXZ5Mx3YUO@>W0msMbdAJr6D2PB;s=kLA3q6KV(x{ zy?5`hVnWu5o0*sjbs*r5j=oAt^J;EmoBhMk4%6d?yQrbxn^h=7_>0>umMDW+%Jhkj zs{H4Y-pbk8tx!^8QlXw}U?`;A3(5P9sCwAY1ClDq^(e;Kdo{0k-A%Q9*D=+^Wve?Q zl<>Iz=QZ$Y3X~70#k_FoG*%KWxNi|{pfwZ4wZck!iq?(E3Y_nXsib^ew4NB(m+^-3 zOv_#R{^RX(!s9sQi8<}tR)UXnM5iiie_RnLAGHOgznIl<>wN`5u-)fjaAZ04dsaUU zZhYh?JCw811D*LYvtaEcKT)gYen6i(2UJ{C#*Q4UrE0kGV;^{Pw7EFHFHS7izdm8* z_`+}u5j2hZn5$FO?%J}gI0-} zrl;8is;EB@uX^znh5yZ!@5xwrB7!oK%HJYAlVA;i79e0|q|3eM8uZRdB|2)qMGzk9 zE6S$Xr!)^iiL|SDlxhT2?$>HY`=Spg7tHhQ)zE+bZ#$Ue)|&&Iz=IUH(*<>I(u@L9 zN1$d!>XdNip5sf)^KJ}J%HPoP^8L%O+(xG^g>g0?K&bxk?O!2@ zQc;nuLQ@H4UxuNw$4Eu?3Q3H8-^Nlzmh5GjQT9FiZtO#1WNeMy$TTMF7{-{He&_Rj zp6~Pf&mWF6&b;5}ocp@3`*r2cC3z=(0g^Wk&3JP3y1IoAF~vci$de}mfDUODMASef zbsberg|BzHaV5TO68S|7!qZN>#KhVoQ`=LpS^$H2`^UNK{H zahiTs@gdL;N{xyq6DeYckM^tK08^>9!o8ly9hf|bD-t+lxwx*6FFJ;w2vrMuVxV7y z63Jzm``=2GvrBAE9b>y{KV2+beiqv6jR*k6vtp6GBzkZA4vB(R(a7_Tpz>_MynrC?IW(5NOsy^c8J^WUATEUwRrjEr9-^g;CyP$Qcrj;u*l)Mfe-dUrOw{Z1D#w7&m1M-#?c6 z)G-23xBo3aIp<5r3Ci)==P%^lTcsfagtk?{G$DW{b-LG29FCx8X?ZiJ#l5`bW*3X6 zYS_E4kV${eKyLqIktvkhU<+clA5nlACoqc+9)kjhLGXJNcVte;e61%M@`(|+U-3di zYlzPIkL5ftji$m;vX!Xcg&$+6nVk65($%G|YmFqLf#8ftjU7xFJVOr<`(I{9&~>MU zk}0Gt0D}h(T8*H3YYJZXM(|jBFNUiwF-1_LIc};fb0Qt;x`66YAqR0KAUvSOVIBb+ znn;eMAFNCWcKg377Wl&dvE2JwqcNop1%g;iIPQR6blX!y7qNhi7qS7UC9aTx8t^{G zG=EG8(5`^`+2}~eShnvdg2)W3uWGCAxLhJ#1-!wo!x+U?oXX1ULEc-_#X^tiOx1nn z!5)d~3siwHv1Br9>LGrD0Y`#QEPd1kGvL%RY2awqcMFLNX23M1>)|T0Bgx|~8XsZI z`%!;TJsbq2C&Pb`7iumpF~$Rg)%6G{Dup)A$RkfTssf+oH}$8_zB7GjHOy{$aMmSJ zc>q52H?)Q#kAMpYMNGDEP|$#91<+BPP;0-6VyWs{bhw5bofsLlzCq3Ypm;(bV+=KB zsL=nhq=RrO;^z*Nevd*_2b9FQJ&c3Ts|Ny_7q$b0J297{C9rkK{VGDa;b0a@`n~QJ z2sA=TOFZKd9v;DGs4Aj~#*(5N+yN>Sv^9@40yrsDJ?*SqystPdDA+_&1GA~fedrpk z3}(5bE3!%pz4-vhp%3-jQ|T6{(DHZ^k)p+_0urK;8ESw{;@DxL+MXDaNHe(@ikxTo z+XdFTsj#J@HKC%T5nd-ef^vYp{I{InxP>%bBc3=P0j7m17YE`wP|bfJ5CcUn;B*Qj z-P|hR)v#xOylK3;I8K7PS|pDv1p}x*6q=cwb?96w;BE;9M*v&p)tqkz(q?UJp%^v#R=l} z#X@im2+zZp*@P4o4)D=|@+xv5G0>lW>5%>Z4kFDdu`Rg?41}sNben90e(pyTlhGw4 zph>O*YDA-ue)_e^N8~DAV@FkKcyXsvV3x|PsIqd4$bcJeov!}B0+dxe^2Y+5S@Pm%e9Nu9rK88p-$ciS55x&nj9_H zmXt@K5NT5&TEopOsL4)8m)#A&@rKO##bOSNvA%o$2Ud?f{Tf!s zfo^vb62Ql?V6qHkAw|NZqwXZx;z_CG%y$G(aEFquvNruky<)-wUHT#>V-ZK`A5 z;@tW<=c!iLsaNEON2-fb<2Y5EjZVt2a0^#yATNeh`Si(@E4^Xs1D75?4XIv`2iR)R~azgKc_ z0D8QRT!T3kd@3NriRSVkld!9t4`tQIZi9{i~7_JrLU;j5{4 z3TpSd)|z%0!Rj;;zl{myJGqOwr47g@Ly~W`O*kb!f4afKy{n^&ZC9#r?vx%|-jRu> zgbJFC$=I$5!d?Qe%;P=IQlMa~5_}6{Q+ZfR6>J)*gEr+ch@B|Bn6H|)f^*L>jknI2 zHh2D|_NKn${gdK?3F|alqEhwKhDPGFVLnZG8D)2ZI5b)4F!`{7Az<1^e{ z=ZfCjQva-SY}^s5+Yx3THe@H@ zGs4=`iC<#)Z?6t!5`)y9;GMfHiFT+d8h zVyg`93yoJ#UWUfOT~FH?)lODG?1i0-Y9?U$ZS|S`c0w9FR8JhvSBh<1%ePV=Y$zY> znDCIL-Mfbvxh^?W`M&qX?9(^>WETM~13ybkdXr7CI##s5b$j$&bcbgOygNoo~Og8Wt4fGOWMx)4HE8S}*rR zpU-)NKf-6(s9peksymI(Pt$J40@s-^#!qQr1H@*+jfbO+YH2x$jjVxF%;LZ8S z>eIyme#pCzQMLec9EraE-t~eaI?JWk?mOz;sV2i_6|M`4`Rma~!3X?R%qUd>!AeFJ zwST<<9(Uua8s$&vwj;2wDWTP&!6>4Fd*SKX5&4uKBjTxQ`r=h#|HXeKz5LPt>Ej9i zb3dQlMg;Jmk|RaXhc>1;5+&CJk~SX0QcDBS`=_>7{DD^z9EZtd$`#eEi++BujPr!Q z_}i1wsW#NHchjbfGRRV{h;Vl|8TcM>eQNEMN`pC73N(S;?y8*X4qDx;-P(_v z+UOj+YQAm$7hbG;>W<0ywc#?VDsk=5QZ_~V{Z7uVmnRJiD~#@IYAtdFGX%-UDNE6__TM}W*%LP211F0paR9c zM3ggxdL!mN{e@yP_)gY^oXbk@s;z7g>MfqJ(L?RROMx4|{c2c-=5D;Z`0eG}&fw%RK7JM!u z&lZT}g%__&o;q4V{*@@QGW3`Ei#}5Lp7B%vF8pHHv}NbF;^#}lp#8?8X3(*mO_MVo zPoA{S6gCRTC@Hw*HZCkCPM|H~k zAIX~d#YUG~?xNJ|{ZJg;Y1!&U(x*9UjDD1`YG9^IV!)_M^?;fkPHbPWdHM>J>Ij|D znZtOD>+d6RNh`K3@;5FNC1*m%Lgc*NJ;b<1YFqOjmS+B@ z#SIJ(AH?m*9EW#1pk5D`cRUKaLDy*+^Vw@`VxmR4nS69e%8DUMp7BKn(g>=Jo0@-l zQ{0FHv1Ypdy_~C2&)OZEh9P>L|G=7fi%f z%Z4wUfBj(){1pJ($}a^_Cmu6YjZl1e0ESMIUC;iS~e_{`WU^TZ#LNl z9;G@R84z)EoSw^DQILB7_pn!{SDVL~r>G5Ia&BJj(f7ZQb-MbaS63t_(F{ER6|v@_ zW`ungZGi2jJmF_vK)Y5>uU+5jJDdI(dpj>OaKL2;_>9a`)%c?idSY!m<{Ju?)&+M9 z#@CUe74@09T`DOrfE5=rHCA3TWY2b^W$={dNMKkE@J~waxPATMCXf1X5asP%%ET_7 zb;1Z+-1afLd2&cRg6ed<@oAYZ^JOU`yBP~p=WT7BwW*%>oml;VoiR2$@J(}XrDvN> zZ50vrxIlfPiPK!!qBCv#X}%t_R;EAc7rqwpaJ6i;J7mjh9W6cgo;YVANA2Cu@puTS zsqTCIr0Twl-TBT_e_lXgL%#UpkKTXYCcbr2pyS`5nikspto$Pt4C7Oi%Yg0HTe7*7 z)26#(C7~>=`E(xh{ls0uW#mRfwYB6ey6SeDyZAqrqJX-U&Mzri9G=ZP>*Wrf-rTmkrN!Lu`!(w69+Fe&U6>~g?dC4HP4qdOil zbI~zTK<23^p#9sYrIC}SG#_F}I zMrQuAl0=ODw_0qa$wi6hwM~YvX}SGz0~LMbW|Wu9mNE!3LZ;f6;U_swzTD7BFyQ-b zfQ5Mjh%T5ogVZ=3R*WG8y79>DpY3g}Do9=XkmB51Fb~{SaYK6@iM<YR69h7G z3lN(EhWP1nbP}I6Ybx^74g%^4M1&?;vUd-XfC3-D^jEiQgkkEA5>g@RL3;$^GzV+P zjT~AcasgO=({#_yF_i-n)01ARJ-`7ty2*e9glNvYeFSNqAt#0%h;Dh)|By!mUQlxY ziQ4RD^MJAbzQ7=ZU3pH(R}=xyIDPCtP3YH=&M}E-ogjPB(a-w;+C{F5w!ej)pj5!Y z{G==grhv8a*N3BQ!ubI_N;Xb;%n;)cUmV3h%08 zUih$|^eH|bUjjzRCDunTCC@S745vdA!w+WU4Mt$T(+1mlB?kt)iy#^Sv9tsKAITD! z7mGl|2*djbv@ky8Ymk5n-5Y@-#QW!oamvqc{j$Q+4@h4c2)=|8^VZsa=Fif^fW4!BQZyWLr0{}~mo6smjmSwJqks|-xx|4Gdk#Q5;{ieyyyM8gPchnF z=vpRf8Kv>}7kQND%3e1y5fKxQpU(0}+&w^@fiP@yN!->;IW&>LF8p{>Jwk;|^r0sf zIXp`<{om+t(62)PnPF6;p`)ctJEi62|J-{T0mKG~LQ6@{dhk)0wLpdr{Fat#)U*Kb zt-BZgznJfQ6T?t=wT)4=GCY83Pk&tj)sWeGq+4%;1YlUiKZih|GJ=kPklz zZHMNNFY=t8;7$oTV68r+tki;;^Py0H%Vm9Zgl(To)cDMSghCAl^PkN*35h5ZI3VAX zz{x|T8A4L=p+X`E?2vLo_c3NV718eSxNq*fT*PJo{mupXS60eI;8NxT} zl0n6X5bIP3%PIw*M-e5E3*@*(58iLeZ3YpxX;~`&?_Lr_WF`q z?H!I6B&ruA*Iqz#2|-;Y{NCRq?j00!dYoh-1Ees5Xw{pvzYT5wSfn6J5oJzSkhZeH zIi54y!O0kaPp8~1mgBz~bijO73*I*N#u!g@dS1cj;5i`6SlW>Z0noT<%uj+E0pibh z0pJk=j&Xe)Lg&S->A;iw?GOK+99oU{OdbvGK=tGjnu-F}^)Zx!V#lg#rXcgAph&Px<_=;CnH161ffZQ&=`u55 z$?5Ps+)`sqtGjy$5B8**-8X5@5lTbNb(X>yZiG!2m#^ ztO4>joiiHY%nqPgGw=~J$^$~8DQjh<)}t}O6H7T8;=5XeCRYkGjVn@l?=Pn^Z8dJd zxrIZcWnGm90K|-ryQg_U?YxH|F?p1g#McLQ(-ek9L<=GD2mBiRzai0_MwU%uxx)q^ zzlVmLraN;V3Sg$EQPuqP*R7kKd(uxRE;KA1RbJ;lg1(nWBM`Ur_Y-+W29-yRAs8w% zU;qzxj37;jjv)|xfpcD=qX6Ve`Rjk5uf_fs+-`dj=?6O`G1eIHfo#HRGC~DF4XQDH zKT03Ndw>Ao7{iAvV$`a0Og5N6qWTp3?E3@RGIrB+CBi%~SU>8|jTgYYp#CHPjWmpL zjBuzoFfJ`AiUA~h$g&nvokmMdT4k3ku7j>=0LOXOf?oWL0e*QpZ`3nq_2A03Z0IrI zp4KOKCYKSEG)AB^HJeRO@CbBssCz+pjE&(2FIUY`#Y-?C#w7q>@%|^K4Cg&Fz`#pDSBbSCH;La!sp}ceXgM< zZ?w>Jn3(4_{Pdf9Q`8>J`_0Hap(VZWPeWp29~IAPad@D$0ROS}iQJOKXIATexRH*6 zEjHD{uZF@MbkA>`-hAHdDGHxGX01C>l$q*WU(@#U+TeSz`5WrBVk%`4C= z_mowR9)fKsQL8TH^J#BfsjTY#53TgieEZVpa*q2y7cPNTO?t_ipZJ4RN818d zGUh*C2-sXT)BQL2N`&ALcSY6NH9e(6*NX23XH&kvEx#!#d2(Wj6M`$kvH6pLe z3AM@!+eNGri^x}$=muSPIY~kr-P>_uW$Bi{CZrMk=+w|v9kP&?dzCiZt=;4sdihU` z+V}z;ZXAg+PqD6$LsvvusnR!}p$mx9w2j@!yM%SDuG6bi1htI~S2qFw=bNoB=nC6H z4u3(B0m>Y#0;@MMLLTZnS@h}HyO3u;%1o)mWjz1Aby-gsNMWnLu&ULfYRmplEiELC zuiiAf^SkkobV5127nu*QfyT?bpLP+w6cX&X94y0)dKbn^jpQ=Y)oHH{jgxnwzI-nf zF%$NJIUIJPWuo&kG3$Yn;*^pdU)WVG@6tKoxoD|A7sgKYcW1O!+s@b#Z`26D7C0A_ zeYrl}cF4KSE7kqcssozzk44buA4`1Lt)K3D)o0*6O8HbP&8F?n&wR9+$*QV~+fK~^ zpp?}aV)E?liR7uN0X66AwFxN;!NX$-Qa+|}y^edLvXA}J9;clLa*2}-6zA=Xe1Yue zP2aVa$4Pd+;uZP7EVry&#*dJiT1Rh4)$i=yLO3*g&k=|SsfoKe9y(v}f_Ys&y_=v; z9m#t#voigMBD%di5gBw3tL4cCqG$s1izhYc*rGsnw&4}QAoC5j_zxH))w54W^0O6ypJ;2q-K=ZN~HKTmRiBo73YxU z+U3n}=#x_mVU5_|vs)BKBgDB!vAAFDgJ*c&On`Y;tLG4D6uJDt^d$eECwtxe@=1*cq*9k9$yeC!#&?75UWJGyIXK zUATR$YRjdbW+b>27iX@^ln=-zOlFq36wz{<;s7_NJFe`=a-t7XuiH*uvmSS z!`WD;Us~tLdToP)_4Oc#Ekq2q+z}?(WJ4Xu*eTgfzoECF ze1~M)6|j{<**U4|6MfcS6isT`9TK8{ZGF8GeXHPHa+es(w4x}lgi32_S>a7}6|2rE zXWVe{LZd>t!A`tAKd?sK%9w~CVwpx%_6o93?OP8C&6ICtb9(`aYZ&ssD7M8h!S9cc zHy6j{Y^lxbd&|umNIsD+Y8wulYHPe;PFW%-^P#)A={&FmLBUyw&${1%1C# z2{XJFNQj$7p(b@u%^)>g4}H8K)ZZ-rBJ}jSDLby2ythBF##VFJ7W(cmJyzg(%i!L6 zKw;s$E^a#rR%WwNL~6Rvu4^T24&MUJwvm}tNqWK4X=@e^Y47-D^)M;M0SkTM$5vSr z=boQpAGB%Xhk=FIyII+|35k6G`tC)$5n)}9HHx|*8yxyIYy79+DCw89#pA(w=Lw*$7$y$%!K5uX~j z6?E}*5vu7IZf_jd&ctCr*@}gN7qeYmqMMB3nz=PEq>-MaS=(<&Z&YPF6LuF!{*xE; zTE61mrCM3+*)%p8wYRlFPF1qnzEQ1+l5{`W5Ru5UB`uI;kKmL%?~1R>!C%zt+_GdrLnpJ|DD+x+0QZ zD83*szBv^l$M)R!NMR`TrI(&rmNo$)HzX7nrvNe#LW~w}`Ah1W%~>6}*qD)ar3;?q zV&iNoj$}Xt28?v#Z_13y(lLyy5Ksf-3m;6!2PuKyg1&LEga+UzM2!)KlGJR$?0(!6 zY0fjT&Mx)G)%35)c@>;5I5%VVuqi9Y;c)S6{meatVrNRB!7eK&%w%)6n~jiO9*~jQ zGo6;Jya?+c1lyTjkeIT~cw2ZdYmZC#xpWuJKd7!L0IDuk+1kRDcT9!B`c@Ig_*n?* z47H=_cN&Et{jeLj$iU-r1#+&+nD1@;;l^@p!nfYGW_&*T*n8(5>5N}V(t*|R@eB5M z=BidS=U5&e9(TF{BKl^V%_2Zy#MOaa z){1jQpRa18;zg~=k=CGX7h!XC8cZS^$e9*=O{w*L0SX@2{E1D+{bR}cS@#;d`oS!H zKC@fW$FK;uxHmz&X4M5xaz=|MPsE?m-^ke$+ONn?&`}`W3dK)7;0V`*wT$w3ag_c! z9YRw`ib>T088ZC#6`feNns0^FKHU|O7kjZxj`wU65Eu@HFVEFt*!(OD$g{O*aag!{ zfCOa9I6H#1~&7D38jbr09RKst9815bx!b6tpM9{)y9CX)|BlT*^_(KyvROV_C%jXqZ5Zk z0hW&E2hYk5gt=#_O|#kG)_5F2=B>Mc^j&f1vT8Tz6#688AoAdio1=Y4)Y*c-?ASxU za6W>v2({f;(&9-m-+cBP_b2)JD4oW}a@1q(n9lD(R@KFRsPe_ij+f-VR+G6irNt%P zOSZGT0CB_+7?d~pxyV{8M;b?RdY8baYEjx69Z{rPuSwsbKjni%j|_0aq!NZ;Z~s$8Gy!F$SSiLX2I zB7!aBouZb+#xbZbD6WBEbB(J$BY<16)_E#rZoU^>GsOQ1eKH~blQOnH*m7_A*2h(^ zxK#b}fgQVy5enApH)qf362%GkV6;!BA!B&r$b3JX+5>oQFYE~Rb_%s(pKlMetUS{Y zrk+?B73CT6^BLjNZ5pF=SPC2W47zw~mwe#L%oyK#oH68vC*!8nTBLr#uSlh7xyB-( zqrXU*BKhjFcQGBIl1JtNwQ7reRPSX*LSG#b&QmYwB6$e?=#XjK`NFr{MsMm{H(M1b z<+|TFna|+5-wzz^p`Sn&_#3rG(hgVoZhKxBd=%an5#C~|t0~LxF>LYbQ+0*?1?9#` z&}E-|!QJn+DYNmi_+2P@m1@qTc>}!Odrno_Le4+s{OWvjBtT zz}Jnegx}lFu&#~I=DIhGGi$unp7O@H@Im2F8S0jqk-B>$n((Vik`#cORQUa_6uX%w zj402q!)~>Fb7XO<#Wny#FfDhQ+)pp5efQHVo8J&%?P%0WIt{vczpdmDxX zA=cy#4Aj->@X`NTsesd9t|s9TxwJ50BN4dqXE9|ugHbMq!8`_pw>|(rwyw1?M?2c1 zJsJVrS069;*d8SY;`y1@yiB7TpWwmBdyt-5Zo)^o_}~a8oX0%~s#e6sPMZQIWP!sVBWR1IAdo;-*#5Q6+S6UERjT+D720z+VlQ$@q{4Z?T{(t1{BfjBR6sq zIYr+CO2s|VX_7^c)*P8g+ADN67ec_rs0dVP$^X?aVb!<6oq!M)lclRdMC6m1V$|{2 zA-p8A$n#KXkMxACMTmAF02!t|ewee$)8{!$>=z={aqvt1?Eo4ca(WMy1!FjbbyYAe zc)IRX+C!DY%+hsTBF11uHaU_LjIY+hfGNe2DLn4kiN>gIq~;5rAD7fqdH+2~&Nj29&_)zOwZX{k;8;OEI7y|FD@-7EhmSXcyu z*_MxQL5s1has#$bBDhCiEe5EWh5>PmRKZ}Jfr6jM2ko-~`pJuKBJhVXs4&#`ck=WO zyJEU1$?<>L_jZfS>HE`>-hToaDnv@5EG-=v=}@EjRP0=55g-j>JP#>^q&GIJ$*GG7 zsPPF1bSgWmqiL_9^C+e+6wtdML}H-BmB^D|0@7AlD5)>>=7PNsUNp&Ww1gADn9TW- z(8d7P&#D^i#|B)(Fi&J}mQV@_Zg}T^vjOClkXp2VED1M1;Zvh1SDy1!+gW1H-&xlm zqCkql!KR;C&lJy4_6FHRNrt(BzyQwwJ5q*I6a4WmaT}apxTLKb`hH`09EDY00|oadI`X7t9nUQ|N8Li1#WhVtxm4NZ8x!f3$Fh zKX;D%OaMN|!4vb)L~xD2OZe_A325R2PTaZS0b9?H>hN9=YCK2W^|??|DS<*KsTXu_ z{Nb@Ch@?fE=Bq626IiQRoKTN7Eh@2g*LFTd5#yO4EQ-)C;taW~4BkNmU{CJ8O*TfpeadC*Xgcrmw{Vk+G~Csg!;C z4f6ZJK~{t;JOd6;804wSgT!vJs7*bM9Q!E*NZgmj8?7G80jQ@mlHlk8dQ6m*<L z?e4nAOgmOdz<(h$vm}w2!Z$z$p4bik5rG$_!mvK0voS&16>XgG82)7=DK5 z=FOt1AE%R)SWCpJ4_WCqACPiZ_x_%NgZJp{FBy4GTZj{?OQK<%2YYxvl(UJ(HY;Np zfqF>!qS(mlFk0L)NX!8qSKqUwIng7Bdn5|5mcta1_Ukw2b-myvdGjSWR&LQRTO=mj zAY^=t0k@l~IE_-TJBShXOhf{3>-%?^2co9N8sSr**>eHpW1u2E0$@~i^%du)9k8!l zI69<1@u!T!F@>gStwq>br~)Ij3wFr3vuM4_L!dp5KV<#LL!2fdiwmCvM&lqTIND{ElFb=Y)`l7CqQJbZG=8?B=u4ReQ|St*zEi zjXj;uiQ~*5RkU|#)f}^P;{4M)HGMkZ@!p2exsJ4~H5(k%Y_X?JY<&1u^VGii!5g3P z@8;&A6uY&3bz8xd^**7Oy77@G?hgzo>DVkv7Q}rwSHY{_&3;K>**cacB%;Y~?46>= z^4v?GqEd0+XIx*rwsS4HNlj_E_3?1qnYP^6SD=iSKaBMiBJeWjdzbA_Qd9f9V3PT- z4`|`s9@SC}_0`<2{#8H4nbsEE?Wpfs-J%y%$~$p-?<4z*(K_)kx3D4Dt!{;p z`NIBH7qJ_@ZQNCMUPy_gt}wNEaeoocou^;>lrYj?zN}vkZ9nG_B~G@ze_Z<_UxuIS zb$-OH0$XV;&u5LMWh>}e+hIw=z+m4Bm(~z-Q&;!v78~-xBzC!ytF|^whh~!>`mfww z87ZCT{=n;X2S&uY&G!-DaUb`y)T9U4&D+dN0R{(X8uN4Uo9mS8+0bvO$;)eNF0UG+ zzFUvYPyQkCMQq35C^PLn9FZJfWbH%GHHi7SL=jhRG# z@OmR@C^`Sj%803V^rHP~A^jFrw8~j_-E6;>YPCESv3xQG+qeCLmK580bwRg`G?giG zx#>Z&Q;b1lw0V1OJJF}SWd&C<&bw_HaMV+NOIIX2@Ae0grDW=To#y$A2RHg`iE=TV z?K+7DsEo3zXDpIsjFR>X0F`_{n~~RfVQ!tN(l)LkO4r;0z1>p@(J!)eUbxw6cX}{O zCMLT-OST~$kr^tI?&5N>RJa1_&zD@QEtnyVlAM_jq)mU)pNWI@mhwA7@2;{+L=$j+ zQO!|NgBP&xf4q6(WB(Tq&A5fEE4{c@Sl~;BhRXl?jLkoJWQfT!O~+b^r!)?2!Tf>` zkq-=aczYGQoicU53G`iE*+xcJPSlk@K;2fn*liJY*1{u@4R+-+?!$^3((NrkM=o%V zJij1cZ&RMh!ll;ip>*CO{J4hkB`!Nm@s7vd}uOf zV=+UVYT~`X)v;33V4fO$`{w05+a${=pJp)R4FC+SS-;r~4V2i06!`Ca*Y=}_43l1jTeBdN@14Q*K4MYYw$xY<7q@U`H|>VYSy#)OT$ z$+al=G{mbf9yVq&vJ!9Wo4iv>3e%2z;Gu7t$xxEbBWK2HWr_V}; zCknaQ&j+;npH=To}77I5D}kCI(^Z+&Xk#4KR-j&0z;!Xwx!`0RXPVIMD-1zx`$ z%QmRz_r#;y<=IO4@Gr@6>-9|oKCHfcy|I!dEFV=E`()*}WMw7HE;U$Pv4ftJSFIs8 zH#1V)hZ8c|A1-W)QPRDWQuyJym4gmT*9@n?s<&|`wFd6&>>Yp;`JDY){8L38A^4j6 z`q-CY(bk|0jRx{Se)&HZ=YX>7_0funobAIdhI*UgNh6N$F~S9}^j|sZwGWADiD~S1 z^J^YY0DOe+7p!K8!%j{`^=3b(=VPg1_y6cL?KAORcH0!{HXX3yBwp zJud_4s+_LM1-p(4($@b?h=Z%oY6e6XsOjmGDFb+@&?1#Ka} zCMk*bi~e^WjK59m`5Oxa6N0B2=Fdz&o|13blnPIJ)YLWsD{N?L)oqSklQOt~@Q+?T zkzZ_G&;9M}^jY@^dY7;n2an9q`mxFFn!5`c8A zu0kOFccKOiD`tov0$&YzI!pcd(YQXslH{K8w^*_!(W9cR?w!=fBe5OPzOBp7if+B8 zdRq_n$8_B#(p%;9J+I})XIl@59RHEX-*`f=>#+T?W@p<(ySX~0wD0dm;F+Tj!LLUu zE}aoDbt(NCuPSO*+d0F?mNcQuM>x>A= z4)F)zLlHK3nD?_#N$8lnbQZPWh1>62f5`~^bVI$4Y2*Ibo#V5kEv#NTQ9hZ{b9FV6 zQv3P&0cJOFuZOZnp4Ah@+b>nmZQp@1qqQl-aEMq@6*Q1z+=W zI`*?dO4}M--gMrNzmU3`*b(^hNTL#%-sZC&1bVArCloMjJ?YaSV>Y?fUg~)95fE&f zt+yNltq-Jd#co!)Rc=&ne9?P$cpy6v?dH6H^ZL`=GlF5KK6ZWdw)TIE3$_70>P&oQKAfUwb}?~Hr+pXsBT)~ha4uiK z;>l)eZq3KHEIPK~ZVEi-jQl-D+^`Q07T~{rjDGTHtPwYos%@el~hNnJ)}Um)xkr{cR<|c%ani*5c@5q$bkz=_GD* z>X#E%fuwHP7fHO0ftCF^I0*6`zjrKp`9*8GspgKNF#l6VApF~&r0|>d-2q@9;MSgP zRQg&4x6Rt~)|(_4OmT__i#E1KHzw+kZ9xfH%0L!{A>bo*UcWq2nj#tXrYUlCJn*(- zG-v+DK=NSt=O=Zu?dRj6S0u%qG%Vs97h~&_&RY`)Y#_yQ_hu=qRi2J$M@afQXF_W-tmNb~&y{_C zWiqbfdagMe!mS|~qEC(R_gzNDEF9m7NeuB6Hy?IdMxF?OKboYJxYi$XuK4w4t7GGWT{E(V51?+l3-3MDP?q8Y7 z$;HD?B>|(7E^QGnU`|4fvKS$``CA$ODQmuw3b9r9y4@}>Tz~CvDDEQP__NNSfo|?w z{Ewwa%c;=9THn8@J4hzKJ@}Ecx2IduwVBwzQ!6_#X>r-j6#lBsi+5(^R7Bo!Ly?#=xX*MQ3U zX(Psm>zZonNBzpgWBwlCjW`}q54W-Xc4vI6&k>t&qv-_!Uk6CWofGwK!G}J+88@WC zAF8izK4(l6f6hFJ({q}K@bPtIyFIRhIlB}N)7xC@J7{zD<3YpPg0r6&A2d$q?;Zid z1C5~97`M>-0O(UBnMP0HHVuabgKy@yq^P}s??3z6*+H(zSl+x+pJf(lfj^S3<})=C zH^~~v&nGf3*R)_A+C%j#A6++mS7!xT5a)D5^Ytv=?J_GHP{VKAbP!tP{G{)%)0Et)(sQZ3INP=o7qe1{!cT3@ zDeL3%jY=0uSM{wWd@fi2)!+GGLOl*lHDu3++!+_N@W3jeek` zbat?BiM}uRTQ>aqk)hS=m!4N!oL5}HsFUloCo&bbE{b-rESg&NRh>N77m+K^0RE?S z0aTt;tdi(cPkxygcnWngB0qnRyaI|l z^DeXmc3&ld&7iwu2=d7!zFq!$MuV?W*Kn{%c}^rx6uoD z1>?HXGnUe$>G)0YqQ6;nZ$^)t>zb0Se|n^@85LzDC^izSp;Ad47+{xCm@n}z8cE?ix@>y%rPko`2Q;I*t4V4#u z=d*p%?$HK)ItYI7aIgwQj$U*8sOqLMn-_9<8yDg+8O96g$8dBnx53Z82@=Ni4?dp~ z?cp%~SxG=_nbkH<+}=xgwRtf>ys=T=RjNwjCB(0!=dtdJc~5nW1`D;taVd#C#JZju zlczlPx4}*roe#Hn#)W+;TevXsuA`ln29&x@SSj(Q%+aj1`Rlm_agQ$-t*75>i{~q8 zzV{Rc8sRD~B9f|YhwMtQ#ezq>K|1qWp+Pbr>TFYDK)!X1*`%oQq zdYa!AeeS~Vk;(4fYljGjiqTC6Pz{Y^iTTOBp^+L>=B#LDdy(o?*jeW<)zN;ZaG|nL zOKL5heKCh@yeah6;Cl5G**iXDNa zRCmK|N_tNJ?XPulKb#lDcX*Ie{fo1NU*bOl6<6jhR$Yfdd2}09yIyFN1|Jpv#dZN& zMy=2)p(a{=a@|0GUM-xY`=wZns9Dccu70!pg;C+vcji1_k|8b@A}?WF7(_{X+^gNd zhL->-92NTfrPzpO6c`(o=m=pDLksIeZ8p~%NK{cVpq1M`?VDg|+wK>7-*R$Ofit($ zeF5s_CVQcI_W(7B^0`)mzOVs5)2=4k2aUBhwkSXPuZAI0yU-P^~Q@OUHWVQ<3*5$6BjyNqOee-use9~|coOc0LbUsdBcUk|&pNv=w zSP^K^V-}z6-Tr=;yGg<3d_W}!*;RDM~7bB%n`bOFs>hGc= zzfn>@Jy4~AH68OLVew@5N=+q99}fUrK<6`YsAvvwZv9pK8yZRa%L8{kkzP*WW(QS< z1_HBYkA!&i`j1AG`5c&mmfVs!C0x@g)R;R2kd2g|o*$qZ9MP~K(wP4oFxT9aK|4*u z@7KpWy~zYZMm$hRax{a5PHGH7jU8gDjMRU`@b63|U@=4Fez+Z%7dQtYYEmSJeQ3LV-kxRBqul|5t=P zu%&}V6mg#5Wan48)2T?FM*yIif-oMUgX)5k4)euTc!McAB<$8BsJ+>t`|^K3XfK^I zQ9=S>4U7-|hWPRLf&?f*Ji>8bZva7s1gsilnAdfWU6ihv4&X^R~BIT)W*h=Yx(xastrh{lagT)pMyd2R5R#)v7T`uUkM{q)61*zDV#1dcP?T zCjq3u6M^Ompx(}{@J;v>9a`uVV!aUHGl5fFJwAW%->90N}W41@u=$uDb=#WlQ#<;eD4>)1(*~(lsbq($0~R74zc4vF6{pz6Ms}wMpXa{1N8a-fee)XXAatYz5%BAA2HNa z2A56K_XL=wM8iP+M^W;jxFR}1NdK16{7f|G0PS;c9St?|sXF*PQ;i+3>4{HBO(6Rx)s14@9Ar^p*j_=r5UQ$E~Y-rd?LNHW04;9gC_jYw_8 ztL51u4k*pZ|6QZT*@prh4o%yj>s$IVi##BK(}Sn?g%szz$f3Ng%n=jk_1?bBxm@Qp zkr`|g_{k6qxdj9uK^P~KUICqiz@;630&DA1?CSMNj<+Au?4mm2zn*Y{C%aM86?r-J zZJ?{RbhViuDogSEA7jy##%vR3TlBQ(EU?j8sQnja4cr}y&ZfoaO=OJlq1kg09zMk^= zDL`o6QUOn|5Ez6KUedX1cG!naS~K_wRTQEh(UyiB4Un9Uir)R%zj{1-c)02htPV3X zTn~n&JF#7bf{yTj6J5g-7>I2Cj^GPoNYsIx z29&{!`Iq+4JamGT9I{&PMcLWa-K!zo+}LPXdmPYHWSrBMLs?n_zeel}<9ztNse40D zxFKCzTPE^Lx=7wr)d;6k3)GbXfdj<>Quai|BOiDc*TAM)@YXDF+Qu`pwyF+0md?*V zdfPjBrN!$60ZD%~;9=1L?DCUb3o_|_4m}J@*mf!ScpZaa_C?h|8a_BNQvYMF(Az6K zeP*z5Nsx>Mk`LHY&ebjs@GgD=Y(Ci*XK3&Sz{lxJ?v>Pu$WL0MMB0o>9(8)gDe+K| z0pDPP(Nl>(QJYC|b#*BT`NjP|U>ueuG!SKoh)8Y4*=7GHY?3esQj zh5+C~?j#d5$*&YoL!JHnIj-Z$SrVlK_XK7&L+*L?3e-b~y6_d%xWv7OhMVD&#`9i( z7@#6chD5Z1ls~7{6bXhxwou$56xqu@EpLwcd5~3Hn?aOtVfmKlx`W=DeO}!#<;;(H zG{C@X*x@H_-IlK`h;!-})PwIJm@3T7~%sMO2klUQ^xa57Lj9L&$Hu#a?@ zBfGOdRP0)XpsRpF*7&IJ$a-&e7_c@;TTjL04&W;SGz~oO^|)}rXTNet(ew%}+sBDFUWqiM1D+a&tbcqYOaMZqGk@PEZnL9?cvZ zZGChvrEJDghBf&KtN+0I6`c(EV8vhcd+4uy43th_o$>0QcWXvI*@1>3|ELUx&K`2T`@qTbx6A$+i$z+h zG*I$)(K>i3Wl`>cZYOdj@B8`fw(%cd?)JsYd8p`XnZ1{&d558?;=1rOFG3WVjmo7*lyTn93vsv<4iNq8}in721iG`4 z%J~7)n&s}zV%uKVe+L)1b0~)o`92&P(@n7%WcwugWN5U}yMHKP$(h%EXa7t(Xkasv z+4CrZ>Sf(a>cO{EIOu9A{H0z$jwZ%2xoa2=Nm!#}%S^RhH#V5ru58#n`Wr5rY98#| zlrkTNU)v|kx{f;1M{tI{CKsz(gf8J@fsIiP9^b)=F!;|z5fJ%v4yi0!c~qzs?s0?C zR5mZ4D;C>Up7BL&w

J&q3eu$5l5PhAJS5OUxsHRxFU+%<;Umj?BcgEIH`=?}_~J_3#f z!+DR>A&B1&(3yR0ea)`* z7!aTS44P(XY~nHzX6>1g7V}J ze%Hh6Naw91V-V=K&2AO=h0Dd99t8zojCu}tFNUu9RpX~X*Ja0P10cxVEAci1RCl8x zq5UG!sxwnNZNrs{~cNNC%yYJ@J zTzMSH|LrLf8br?cnn>ul#K!Vd=&ql@}%)V^lrRqNm}}R>flun>x*>smb8I)Jj(w`U5(@fblZYsNHZN;qPd-X_`uNOxZupI>m(tzOJTJWO_Wa_?(-sZv zZoKI7#Cp$+@4$6E#aGskJ6UaqEopUY2AXu=6hGDE3823-Rb7^bWq|bd4#%=r zl2zu&w{ma@0{_m>9-@OFto#nX@jeqe2bMoRm>&*PpYGIQc#~}7l6aRYl&>dg#`ai; zu(&<%y65YKsrs!hOx!9Zf_`B>SN&>}>4aTxB+OTRYSqt_9bC4BUjy{F!RE<()nmLU z$Li)uow(=;4rANc@f}(P`Lfdrm*8UY6#u|bwV~Z=1I8+hV7g@k!g&61{N0yMv4Tz# z>5{o3Kc5=Q9$wxZo(8Aoh=rxKX*IeDd@M+DopFujdfOi{GLqcRMf+w&zFSUCRk$_$ zvz}SzdV<}LY(VlLm8pN+jvX+YcXyMm5EIWtc~sP%ooz;Zp$r1NjF+Gfyi9wRh%>7& zH)1T|#B9|6R#2G4vT+} zR;mWxF6n>h0}^%f0M19emcZ==$(Anu`(GZ8{EoUs{Zc+b?nAd$s8(E(>djaqYlR)N z8bm%O!QA{M25ppo-_U`n%&Im027&&7e^V*jK}XcSEOs$r%JP_f{55J&bP8%LHaY&}6JHr+ z5_X#$jc{Zn((8~@+OG$pyCcL6Ee#<6b$8FPM{@-m`NM6h!N%0O{cCeTX1!Hr`p+?U z7rf}3N>jBzaXOMxr1xG#JRpN_;EHgIck<^<W!jSoQB0%fi^H$+?9d}iTvJ8T9jhFjBEkq>yxUlTRt4!aXi#{3#f5ORAUWYQ?- z>N0c^P`l&v>&8vsuB!{t-f?qXi}*)HFdpMkgAMVvC!&P>i#wJNjU$gp9=f385#pGh#e)yr2&)z>7Vdz8(EPcn;9Yi*%r z;D*-%vCHXl?+t=Gl>L-mB3eRcn|RCxbl6^AN?Ud9d9Qt2-L$kzrjg^>uj>1V>jIAM zXbz?Znc2I8t+2<}jYZ1E>J=Y8zJl2u`P`bk*2_U;GbLfx-(lb9=c8)_Onquk>{dH6 zF$?pfZ3&)uQ^R>%TktMs>pXoi4g;bBL^ZF=VT{BfID42O!M1aeRTQuDvC0qrJ`+g2 z`LzXhHNQes>7Uq`Y36Q?x(7h-eS|36x-Gnc%vqPxf2FO(^fF2$M%Imd+v{HGI2X@~ zeCNp6&iz{1&w&pmi;RO&6XN1j`IcKM6f~OMaE3Zh2vaFVZfK5>;o;xon;fh*hO$%J z)%xRt8t`Oy#G=72w6E_IJnuXN$)<& zN)eq*>&Dh5cGf5Sp82~SwHbf#T|>%-)8#qQ%02+pP^PGLF1RUdT;nF6;zYAFH>1 zr~2xNeNWZ(SmQD~8)!w!5F^jD&n=16D_}g^x#XT|CSN>4?IzxE(tqQcxWw+8T|9qB z+XrKT;14J0`sys`t&@I>pLPO8?>9*P1Ff)q=Xj2{o957|qqRg`?*0|+nblgZl@Gz1 z^a46E7naVa{nz2a{$)?l$yy$wk{G+oMb@8N_mG|`vz}KPp<}APHMn(Q?NQyQ%O896 z+0CFnY2W!?WhR8rdsYaATHBKt&5Af&oO3F(Jn=>(TU$z~+hzlhRBIQ)5XV6Yz_Yo% zkyui)WlDr@{Z_c^DKS!}YDjOl{=B~exY(MP&4(m=%joYXSSrL*M2KfAg3zGFKFWjt zxXjnR*x^nKg6*iIOEt5&*#S46$#Pyh$zX=qo}nN7)|khJ(ax{< zKX+b>>GdtM80dpl3DmAVHCU|kZ%>~rSXc<;6BP?Ug#(?e{zda0@H`bUr{MFyhpB7~`>@_4D{q^t@ zGw2~5;o|)MVuSqLj#Os+2F>e(VSg@*E;!MG>2>$DZdTG<#88=EG4u;Xxj`5UhCVT{ zLqO2uf~QGo4@EyfRpxY`E&X(qWXSk4k$#h{2@%gFv7J!G0b6*=(yKQ(_~6--8ZB0D zj*yN83z~q}$6rfqd~+=BLCeY<#@zhD$0j@N&64_`-2ccYmdKTcfCDmIT(+i%)%nq$ zLK*sXQ*yT9@++Hs?~JDdh43xU4aK}di@WJJ1=O9YyL34De2c{o&QpGdcNX(c;w;QAFt^?2?Sj5 zJfGc6m!$M|bGtvquGk}aXq>;pXBXm5@Ql$r4gqg#* zQ=${AD^rui%Dqo71>;_y!xA+_N1P*qRm*3Kf z`LVi6=FtVsu9~&&edpN6$Ub-RmZOh$mpq7$d2@+yMBTGP6`l&dMl^cnEMv#%#=MCc?xk{s(stC zS-DuY`Bo$|Y@W3y$pdkS!pq8nMY6Waz$&^!=*-VO^7YjcVxlLkqCVQW7xbVy=2RDa zb=pC%&KZ}BRe6?;nTfvB59}q38s54(W(>DJ=iT>J6hbz;xx6`Y9Wk1`aXNn{PP`wK zw<1*&kJYfaYcInKY=VM10=NC~UXBrPtR?N^3rtIpb0u7KLUs(M;2ISb@(FJaIaui& ze56t^DH7>es_vTRa6EM~ziY;YQ{3uRQzQQsD7>PYI3HIsk6a^K)O=M15V35Oh$HUg z>=U)lt?cK{Og*nspbin~E-bE9VfBhQn#6V(5VsG}xJ(h~8i_255RMd!#?QsXz+&oy z0Ag*v=EOBXsEkO;)x(D5filIQa7F!4clz8wQP98@;r8ZDS)br+M`(lC&XBXBMv{@SR3| z>H5w{s3{Ax3G6URThQq>aw76*BR?BzBuU5fk^k%fd5Sj*;0LZg1W6SVssFBiMGD1p zkR3p2%B8~#6?2{gfbmF)sZ2ff?on@$@Mky|6-?%f(#`}nd9g@{r=+|yfdNpiZ&C$2 z&2qD-6aH%UcWvm~CA_HZM=D_tDE`)P+L+VT>)AfZJAkHJaZmCxMvq2d9fwLusd%69_G7AYVeT5BSGC;7QR-Bps#%75bZDkS^JmH4j6zZ|Gm(=-?+{d1t^!b>ztplYPa|JVfeCDb2GFNX)w0g2G@#Z7GJE&bCI6u8OCa--(fHvVEB@xGmn~J%;y=a#L?_MaD zZG{u1OU3R?jsG6EJ|ggc9XlS4#QMreqAr|0(z z{_9lGF6#o7=m9i*75aH^Lt~Eq3O82^d|x`Ef%*ExYz-hA^po|JI0e2)ZEf^%pGAK5 zo)2^kQKlo)3BmfJVKIC-w!Lh>RhYD-G6RkexnYP=GbZo5bz%brNzT_}7F8os?ST*c z$9H4yQpFW<_C19p4RRLktVT~)0qhb=>C2SlFVCD5mlzHve1mRCcG5y(wj+OK<(xpz zFS-KI2}i;f{+cE&$xmr%)vpsLuqeCDoz3I}@Oo6=N}9d`kzxX8VZJqah zFELV7MAE+1*1)PJQU1pl(~!C|pqkux!gZvkNbx&PqT`}va9)b(+!#ID894c^NS#PM z61+(*q(;r{$H|35fWW*%)T_`?pvsGueEIO)0Pxlx)oxRkPHWs0RVKh8+A?4ELGdV_>R}V%65}vB+U$cqs>9Bmq>T+NAT*E*{F9a2 zyuiIs#eLAfk#w+Oybl2=5>BeF!1z5HZ&q4Kg)8*E1R167#L?{EgVzdo)4k>gZu{)R zuEK&FZE)oITL8%vPtg!uVv=U2scJAjh!2V@5&Em9$fK*eirdJnWATag$W!!etAJ&`DPU*SpF2}m;)iPX4fd&By4<&RMC#Z+rCICbIwA+1_Ze4M)h8WA z?$c0LWIJ(r24m=-hb-MM{@t#?&cMv?10tVl&eb4+I;>|QTK*7PYMsVwMT&S8oau~il8Dzi;)gR z!pGBpnQu4t>QG|PFgoKxrPvBZMYFdGzREr@pSiu+O=TV6yQMz$@Zv>znN4+T$4Q{$3R>M@GNyt6UC za9sQ+SipcCJ;K|EPIGfV;QJM%NYg~)84*{tU)tOCLDBMDTV-iq=Mf&JBxi8_R+(6c z`^W!DBk>WP5<(sHdZ}rzB?1phXYAhLN~zu(ZEvX&WhYdT9ajC6ga+TsoR7NO!g3kQ z6QxQvG)XajvoC3EY7~6JDNw}i*C$srRX>KW{4_{m_S#Q++1h?W4h{)WEQ*Z=Jy=aa z3N$oyIC_PYZ^ZQXD(~UZ4IQ9m`>=Y((PFNV%MOhKYHg%Fn2)vDbi<1%C9KlU04E*^ zY^?RqivOn69NBnOb|fJS2^%UKkx;KarFx9+Dg=f_CH=`2E~4dCt40^m?eEy2W}34r zL%h9%HnOQCnKwg2*9~qJgCx*Zc0h4~mQ7W74*6zy1vxV7?l`#YwCo0#j!?T=i!Z2q z!$MF1xY5ZI#6t>Yo+_2(3-vPV65uiB22esrrEOK&9n1&&=V#TS3iFYU zC<^RLH9zcI-vofFBASav*vU(WGR43nfaQQLQehyE!Oopfyd4D=$x*A(XjS0cp7QA% zRh+Dk6`}mW{`(r=BkS57feRZKuCf?MYD6J=>XYMVH{DisgE04!B;E|e@w#Ez3D^<3 z+^XRY3{B7QN>V()>3G-f(!5zSZL`mHu`-#>##}||MI89ieg37>gzgka7ck(i9Omd) zCOSSiJkzUu`NiLQT>ebTY?_?gQ*`!cD6a~Xi7DTao17Ris8t%2nj6wp8F(pe8xY86 zs<(>9M%=e3y7^U6vjhAwq0}PYrd>Oa%XVDE!lj^L;*9sEF|;&&6l&t;Xys;ejIvrd zp6Gou@StlzC{Y<(uhF3mn2^}tK`KdU@S4v~mJ`u#MJ zCHWWBefKC3xlj?m@cX)|wy|urt0i@$U|wtfJk(~fvczlN>u)+Z*aJcq_?qni z1MmH;75ky7fSS2Gx@foMB<9WWKVw}2?odqt9Wgtr<0rPWnif{L-70Y=Pi*NSwKx3; z^29DD)Z4Hn_#5AFoNsu4&IjgzuW16KCLh)B5aHd*>QJFq7BzKfaoHXH)h}Y(g(>%y zo!+O;Lf6;bcMX4Cb1UAgY!>4=44D72Hu%G;9s|7R#2u&$j~{+rlebJKWp(@bLm0%k zkEIGSW%qxy%sYH45lrOV=gPK~pYCQ!&=zI{J&C{K8G-T%h0xlQ?1c3pVFW>-P%#VmI~92^BYSl;It{B z_j(8sTai#h^WHM6HBY;%X*ojqgGz?nD|!DQ`CfUf?M|Uo7!I$cY@6Z z5VZl=ZeKmOi`%T59?I_DUsg9J>bbC&d2qicEW{=}W_Mu`*m!uW0S+vCm&)jMXw%DrZw&rcwF0^8E?-XW zZM#@?E(1330>)!shIzMl1ejcICT@AiB~)wOG7$Lc5y9l(G` zBe#1K!&!2%wT~2z{lH*3Tn5kF>}n<_XWi40zbxGz%RJhlwqV=gOM3S#bfKHU5|h{u z(RISN3TiL$zj~(8;C!uA=WV=!KZpY%b}fS6{iZ||cCya?>a`bMHww#EvKCp5$=~nL zKm;>m4=pb*1RRR8%j@o?ss7}RBM4~zoVxC?NB6NV2h<}!H71|s>RP53D)*)7gC<0E zOy}Z!o*rmst)U?fQp2adY$j&nTmbC+`G@eU@)&h2IG^A1~jn zs93$3+cB}tKC-e_=Xz1p92fwjH;kN50 zRQ=+m8TYqjAJ@yBd8SqD&g>H&G({@2DD}YW}V^&f1bXxpY>4qGg-IZzO&QPQvdgl z#jOmnEU2M`4)|}zx%xcv_qRhHc@O0^SNN|D|GSeZU9ucgrN_y(h=B|j#OEG8Z_mf2 zpGB*m{LH_UNth`tMUYvO2L>&5HLJ*OH}C1T5-Up65< z#Qx-i&D_j~JB_2Fu6hX+iDiH!o*7Q5eUT4VUie(b)!kS67qdtB{dpwoijRjg#xv;! zRc5HQYjfJG^pmS8Y4M`9H*wH{D~hG5%Eu zquDaYahV@U#(Q$dv4w7yA-#FwJdG&XG7tG~Lue=z&7t7S#n{q$B7;v~#stq}Wcg%- zb8l?n&p_w4#&kLEk?$iU<9=l{=hM1p)NEKPyBrt2Y_J^bqlIoR+5e+*?LYA`dLHkJ z_|n+)EL&KDi*Qv<_+Ul#d1gveVQDlEHF^iXIArKTDhgwOQHQd z7mlgp&brz8_H&}<3N!QE!sZ@0q@esjtBg_SN$B44ATT)MMsjj^pvlx~AWUEz^^{bgoX}b5_qn$SatZBHQkd~X*U*%~#k3dw6uNtQ-Ti5?U zSk=zjAI^9*?RjK$F@O9qc0}(;hp2={@{D|yyM?^AkzQXDy!Qt>OOI&PO(f=NH74j% zcbcmEF-OcEhwaTNvlp!dRh(aRam&@CVuu7_4VowM;`SbPeTA=&p~ztWyD@gQ;olRa zG-*EzdI)`U4M>iFg@MV%?KeOl#pFVHB3Fz*dYr2*KRYC|i%Fh8QV4JmgpSawOt)TJ z@o-#;C|Y)8b`J>|yow2mdrU~oSuPmoZwgoF46u@NBVDKXfnh4diai-C+fVkFy1S=1 z(n+!FAAh(BV~nC#Z5(9mb!3V#;z!}Zg%8mu&E%c+TJO!o6A`Zu>04x*jK%^pOhMVF zd@NYPzpkRxve^@JbVJd|5-yCGY|66IUyMI=pGV$mAn**%8D)ed#ngSLuWo8?qs+Cw zz4eVt@R<h0VILdo6=G`u1&QOm~}!9ja==LW=Z&!Hw6&D@}YPr)yo zo^`v2TuP;XN8^b_TnqB`vF5!y9tOz+Rtho6IvvBzNK9Yn^b}J^g(S~9#OClfPaGaM ze70Pl9N%0->h3;?+|Vt5JW>?XI9V~zEn^ebulay2Ay9#i?$6vUI&UFj)BxTAQhHk6 zR%#=kQgp*B(7AlAA@guzrbLX}G-P`=Tv^V4D{^92U4ZzCna1XCt>((K9Bv}v6B|2> zkDYzbmP$&M!^}P7{j+gqC{3l&W(`vPle3P?!Y@zHpYHu2(zBy`ts+7)N5Zv?X7-< z$j0I7$BJ&%^r?(xeYuu+lQOP}1oQkn1-9(jwyby$jQ(+W18KX}wuHZCLv*#@GyeFh^(IflhofN&Rkb|{V*)Sj_on}0>d!xL1(CA*E!Lri0eYJ|;3L2!)O&TBWtHm00WPtY^Q&)moNR$EK2 zE}iq_xvO(e`0e;Qe(>{(Huzlq6 zLOz3$W+dJPmVjAA5xwd0a%Rgu&vpmo@3DGB<`*9^x`b2`^*Yj;!pMhp8 zt>xCxTq3yp_$GTHC0K0kh%=qSK$nAK3EUK@Vt;~sVRd6Z)o9R^RNPMK#*6^#Ek?l8@ zy6KJDU>Q}@>f0bL#SQev)1bf}5X)g!o5&N?4+1{6E(XiQ&gi*Z{%rRf_( zh1_;}h&OA$;K+=79lznz>A zuse;aD};WE*Q+0Qa~muiB>miBJRF~oREu0jaueYz3Xa?4J68(gEEM2`)S&mTu?BAc zsOtTnEl;ov+-<4pCuqw3U~x|VkvV3afBOgh@?LtdgK0?i1Hq$R1Ma0IXwM;?;N-lM zNz+zW=0kwld;NA(ZTvs z`gTR&L;VV^!S#R@Mbu$G1fFh&vt_tb0;fbNNw_pH!2UZKY7yZ z`(t*)B&?rP2nu!3R-)T^@jk+a^yablLL5H;Vnr#E%<6XJ7mccGQEygsMYqK&ws%%E zsJV~M!6djy*CWwE!q=OIzRQ1qN&DC+ z$3HNK?iR?dMU>#dcTWqYTY6T?ECb2gbK`D*bD*D_Wiv)LzV-4dGlXDi=(zVt2O9=! z3=C98-qI@6sE(>reCW$vRc_|g>;xw2F3!#^@&nHM{ET^t?PL;@kyN%-08V~3^>Gk3 zdYkSaRcTqB64mv$SCsmkh)MdW=W(i);W~hD%LS<8Bn#h5Dp5?q$mJC1WlGNL8)!c> zVGd!%6H-!@dsL;QkxVqQn98pcyH_%U&b0(s*CglFa20dfN_+-wgyIGzDEH;HFFwhK zxJHuCY};cE8axd;dQ-egBa;7(EqiX}z(GgOXZfO*rV{+|XcEcyuU7 z?KGDih^*ps1bRuQiU(t9aY7|qeJHisQFBJAE+ox479mnmS%q0L8t48(g`^JN)a;9V z6Cv9zjv7GLQ}yFAOHowRSoKC+2~QtihNJt3A$6xl00+LU;D_Dkc`z?F&2jU|YT^}v zm~G{WTcn)1ganvW_EH4zq!qGrV2Q=&s5e|%t?kS_!tJyzDJ+zr1_9m_(~OndI(Xqh z?0O^NfOwAGl+jzqmW(K>2I59TEkLpd6M?GOy^Z*f`Hx0n#!6HHsstrKTb{I618(=w zdlap1dA$v|x3UwuG9oU?=0ivLAgt%}G>J-^xufZ*^!?LZEbT#8-w_vX{JYF`s0LU6 zCKaxDq>rEPsqh!4v`#=jTy;u8^5Jf%9}Sp|Xbf;tQK?DI^6696ai}qyYG8L06u46| zeG}f&^C~Ve<*2EU-J9E&_miWe@?p$E8+c=Fs-N5!jIbk`eNX91fTMVNFJ=DDW^|$o zuO5$)A}KUny^cVwsKgHorP%S(6uXK3Gb&4E+C)cJiO=8?)^*<{0_usWUdlh-~xbTPS2>vigHnT$yR`ufQ zwNo4$^-HO@)OOz5inEDBmo93=Xa4V3Gb7PEE+Gk7Trmc;H$H+@K*f*xZ!2eraaJ4#;g^R~qTTh9H zn%ifw7#IJ|qgRpfJ&rai1c&*IEz(fFM&azko&UCFrJi7*lBD8=HEwNXs{Jq9{^9U; zrS?w&7R5g?o#$;bKn9}ZVsmuDgHF{0Xb8z)?!LdG(Au^Vk`BcknQST|&V+)_5$3zp zs5kJ##wwu^Ry$Lajwv+fy-JkPKdQC)NXt9yxWKuK`pMb8>ba{mUun269!EC0~~1U z`wH!FAO!{*9g%-dpe7|#1o(;zvl#ZRsQgBhmJWJ@53xxj^^;aU3{3*LRM`wCJMgfH z&FbW7|NcJy13-lj!8Ndj0-+jSM>KM@%9Xd}X!g7{WA1sRYa-LQ9<8quod68LaP=#Z z)}sqkx|o!Z=qj$xUKK0->U$d8+~JU;ksxeCNX`pRPdej>ujCLvRny~k@Gj5=JbY>s z4&6U%11*t`7L9Z_;)sw&KZ-bW{ai@oXKj)-a(=|yC}`K0YPJtu6|!SF`Ru|UV^t

fEcPdgLKuUZoXx*T2m5A?3?!6F|U^2@-ogSI*LqH zR@gWxJEpXI)x+=42SsQY2$L6lr7!uaQS;%7Zj8)rWJq1#hvw1bh5Je>1I-P8+S}k= z3^P4K*{jKQ*LxK^ffUrepOqBe)?CV3b=PelRKhW?2z^+Q_eoE1jkwkPk1A&4V4R2X z@cib^tQI+Vb~X69pIDmuH%t&xfSXAriBb=iH&^PSivH5M>H`C*9yl5~p38iTc}b^w zo_||WHPn~6N+@B^?ott9(Bwl?A{S&ry!S@<_| zA|usHE&2@iH+aL*kuo-XvFL+X>)psuGCb6&fZV7j3mJIxIb>gVZ%)gD;pAfe+wld( zZ0*78Ds@>-0m7$BAg(&Z ztD;uHX$(teXCoXQouopGlx93pS*ej69$qJ0_jpV@&C*g`OdY1|j{@R%$+O|B&4$?x zXv=!iy~omOnrUL!Tyi12&7ijj`}{e4JB#s&hR6CDd!9!iSZv^P zsvbj?GxF-tK66K5`{}mIta7^` zc(06XJ)hHClzwuUXu2T$#yA;ZpM4J z8F=|3V;rwWTqUaS>+Zgg=y=vf*e%HevwW6bv1SKkQe`+wHa9luvafsA49Lr%0f2cmvHO}YXD>+RxQzqw@yL1k$+XNm8_PvQ+X(wR zTrjQy?Hl}fu!CWn>6Qn841vpom9I zK|fZe7P`Ag0}Y`c#@F>crpCUj92`@Hs2)9Ym^nLr=M z<}7zBz0kQzmD3q9o@RgLL$6?qqV?1kL5y)(gKHBq$mt1?WGbH9II%*fXJFFG=3grp z<|pfLXDI#>!vr@ZR4ZMOJ5wFkXOVr&;awVmCkCF$p9c|)-?eJq%Ufd955==;ezzt< ztTNAx5riHa#$9;v7GQZ?`YyLE@1-_}&eTOT-Y!TVnSXC0;KHZdbtV?k)tL+2>9f}R3=vwJ6R3|U7TM0UfpVuIld73?M~R8h2SyKV)A@ZZ2QIE z;V%ODy1iJRh>C6Bj$epmBkH}Wyg7cKZP9+Z=~+=*3)t5)E#*wzvCh7?{#u^Ki<274 z^`TO;&u$a%*?j|{C8luo`nHvVdli1DF-OCZo8Jnq`qsJW*R;>o1kBosryq6T)+3qP zdCpA;+8Gduk3d`I6@t!j*4>>{mY@^uJ?4+tjwJSU4uuFlLKzWmQJdKNo7X^*=^JmJ zTcluau#f&90ui|kibjZ4hnWzM_Okj|-O=f!a*RZj>J{KElAQ-)k>hzA(Hc#;?Lbs8 z4A+>M8vuLV%}M*4IKFhZ?w&%rl6vX598eGQcg#)aA<^{(cjm?2f)ck#OmkA)E@Q=1 zg`ltfZ3BDhO{MJ-T^da%tqaoy`uGCp3O5`-QVRNZw_al`4RMI1gWCb9FHtUm~z7Yq@%+S%WU&0 zZLroBHM~bT#8~cRVq>Ymg*;=gCWkIYT_^=eXf zhiUBl-i`P>bC=WUKQevtxmyxi_HldRiU)>sVf}%*b5K$*AA1nJc`ZQ;_tyT$o>=U= zdf4sM9Ptfj8Nd`k>kQS^yjM!Yt51!o{;+(udP`7(%ZJ3b($eoKd*x8%{}lBmZb_|e z*f)krmTBWo6VZ|~o6MmE70KE;rk1rEoJ-Bj91urLaA?>SfkS2)LS{;4YJ>A!8fcoB zsF)KfCL+#&10ua^f8YBY?;oIRv5vLw`?{~|JkK9NDLR^yVWxVjOeu?e%I;VAcRguyRC#O6z}Stq!zo$tpN$iFfho&LV8A=-T+ zb8+Y2VV5`Vcn5>1A0NNrj#Al&cz;4y`D#g;OMy~%WrSm^BR8u=WcK!k(fet$I$8B^;>kBao;CaI2fsa^DY@x=X{5-N=UP@1&Vu438J0(QK-eVH zpW`-aZqhaDtC^HId?Eq&3Coj3Wt zoybJr51%%2bP7}tS|>FAF?!i|@YX|bE#FM1LD~5tkLG<{cVY_N&n_TSuSPVij)7+O zy9MosgoRm;j-_H!F1>7WY6iSG^EV0*7afJB|5}{CC)yR37ZkC{@A?QxGegl^x>0&7 zMo+c#QHTbzqW^DtYgAyhUPSnNcAS~JXvn}M{SPQ&cU0#zGw5)sogmI@CE)WYD+#8v z95qsn-V$xe8yU|pnylKj`0{3^bE9EF*%PIg`B(cUPecJU@!W1$quc32tdZ5gz#!{5 zABWP+$Yvns?;uEzg6sB@Zsr8h8KGm;1X$1awn_KXisn z?THN>^Dl`GAGF-ZmaJp<|1!C7xoWc0wPdfuEMamDyP-NKU{8jI~-GP zVBk2E$!D1!T}CTWAJ1r92DFZ!gKjVyHf6+?SB5(A0O_XBw~z~v#$%VCz8kcjKSw__ zc%T#?$YmEVM;JZ1{nI2v*Rep<;>md;-x(AVOpVokPS{dI;2pWtj@}fV;e29iW5f|z z0Os)0uJvQonc?Tb`wl$09c*ya19VpZ@Vl#_fi13^t*1TBH<=eyr@q&H)H&qv!YMSj zrs6zh`mpWS$ zu5C2^Q=-uD-Z|Brw~1E637>ek@)6fhY{9VEzbCyzkG`iEB61yD78oX}cdE>96~4n= zk@y_`J6qW-JSNp{b;e<;@b+Te+j-hOuMf=ocdpJAe5C?|6(-OXiZ=D&Z2l%Mn41A8 zPdx>rTKhY-T*iC)Srv&heLsVPOIBm!P94a~N&QuHN&jTTEop$|&VWq9{cy69 zzo*jz85M1k{?e+4|KYC;r=szGM|o#mDLZ(7M%%V0NMhJKo5P~ieQC`xXzj(D!qQ6n&%yemMlWxDpzlq(Y zyx3xuNval5r~+Y#oUb1yE-hkRy>6Q6-7oR5=6igg7zJkD&wqQQr!qo6Uf%&d4gZ`d zKi(BI^2qORPfO<=wz`%cPe`-L3X8sa@x9lt7&Z^aMVl;1*Pkb35y!ocI6iBpa zWmue<6OF9=G?=RUwix*GvH8nOwG}-k*+*AyIJ`ZoJ#_?LaJ(i(K4VJ-{2Yb@ChtJS zL`g_eU*@Em>WiXZTvu*O_E{?Xc7tljgv zy^ikJHbi)z*jzCc4MZ|vj|SUnrf>Oc_};kE|sNGUa=&3btlPI%`3&<8MQdR0Jeum_*XM7fp zbJYaZL4M_j0%DJp0rJQz-t=%PJK z3!vMFt{jde3Qn{*%7|kt%{5K$qV?c2*V3riRE(#4z%`!ZSE^gm<)_*vIW8fCfh&^^ z>6T@Kfr2jj@q+vl(}AuhdN{Gc@B1Qu%2-eFd+w;s)xFF;bw^P;IP*5+N@b9}^!xPw zQso1o$$7NAm2FHN;{9z7x_5q-99YLG!?Vs`y@T%9pUV0gRcpx!Txzy^`;R7ms z@`=emd}+h4l#^LQ0UBdZKPz6Dv7?Xl8;v|W9si4^xXoq)JMf4*m+go?x3wkWa3tG4 zwe>Ft_>b!E9>0RVhkjdlm7G(*u9ld#(^;9)_}APO*YjBiIx+XmFWi0INfhMkwHzvK z=M|Kub&WDzB8~-<%kmGk_92m`dN*kBp&#~rMMHsJk!8y%w|lo=@o+5%JvepJ0-IL; zif0je=(>T1fSnkXzPZCFK1M4+$RS3~x>pA*27|l{??JLEEx;-9MjQBP9b*fQ7lRHq zZM`>Pv|$*Wko1MeCXPk$cCKn!Fv5s#MBjdU;hDfQbgq9eEoQ-}@${vaRlx%8YvVOhVI4*ui}5)Btt@@Cba1vC@5&Wd8-`zY#Qy;S`8QPRn~)KUhgQS1 z8l?1Ey>{;D#plh6wv|Y@KPN?Qv0G$N(tKC#*nFb(=rM#A6MYZ8YSjg<*Wss7c*QH? zcUPXu%7$9YW!i!QhrDN#T+=a}=D?oArfMTlZ7+HGdBORZ?`EHTdY3?wCE@Ifh|jwa z>I6-o$ePkRVu#800p0zOqfszxHLY4FG=o+ymRZyn(=tmCk&!?^jmW$_E6z$|3WxKT z0pVTmmcY8n3M>~@6hg8jkzQ7hxS_LPr3cYVBjPANlW|p#$g7SIRMrw8dMo8CmJ<6? zX>Pe-4QDQe{a7;NzXZuzx>Di^8TsJclcIdMl6F(HM2D~Kgx>n-H@q9sphGLhn~SA# z_lq_c1V9H6la`W*GUNgs*Z$>Zk`w^oli8J3doyCqY$aR)1(1B z+Vz1c+gY#~US`@i?7KG7ehe$%JR-IOtdNdHNI5z|+N0ekqB65J#^z^J$a8te*%*qC zxfL|j$qs$ig`Xj=FBV9zzH|(=juEgk1s|LiVvk`aPWq1FxM)Q|Lh+}!T0x4@Tt zk%w!{VJS%+(j^NY5^gCc6VM^)9PbO=bcb06QdD5_f0(pKD<*aE(*#VfTTD~MF<4f; zQ-yXOdUpubJg8xqAIcP*N~@}7i@{g(_!>~6T1IyB*mmxMbTQAQqD2Fxq=t{frio>N zhubmziP$7I{p}wRVh0Ni^@U0)e?X@n?K?3YWEndNB>wb)ue70ToUzhJ&L-PHhzT=) zOBos9)Od#e{$vWmuv{QcNyTHg&=v^|!bD6;^EB|NS!de%K1bEozft-~(}2)@xaGlU zGn=R0+i(~n3_D5K_+3z9Qv*GYV%=gad@*fK})*9{}xINVW0NvTCw@ z@>wS&@;?xShMt(GO+WtaKQc`UyHzi*fEDsXGr$tJl?1>##QAro{IkWp;swSbMW-+& zWnF|0KZ$Dlh5u?X{}QjrmVsni;K5Q@`VnsE@6;I}j_k=;3O3ofV~ke}RU-sv>5qo0 zFPJGL5R-AkuxP1RFW38Fy0Zog^USi=N_`5~^*Yq)Bt&bDkPoQ^hX%3$#zBec-MYJ* z=|mj1S5+-94ma<#-Nz!O>)9+#1i)<4{tY(w=aT0)k-t852Vk=WSZcg#EgC(6$a~Yn zTG*Y0_bgz`)gpTj8prvvRiEQV1HN-MC$ISE=@z461k)`=nBW9m$`T*_yanCd zRS=PQ(&Cv03Y!8@Gm?a-(_%j!C?CY-lBIe&or}! zwC~&8jzbh(@!(eoQW{`cE0RU}T7eEAOz?*dEu-R*J2n~yO=hm(w9Zw5eDn~L3X4C^ zX82Gm1hiIwDg7fXaXM0BN#)baog_N7n)s#QVP8xK?wspg+++mpmbz!0wURaAm5xuo z6nmkt7Ys~!AoHv{tD_Z?TE61_;RtjTf)5I^z$K-)JpO95$=@Oo!|uS)n6>K<T^=O$O} zQ6o~VEWE?N^Vl5mD<}Qx(&f+P4j;P=A%bSX6#8v`&y=?>IV{>z20|Ykxo{4Yke1MH zWBFnd!OZ68?=)RZ(76^rwgqV5$Dkm=)^P`RKXNl@oB1}njvi@qTOlopi_Q?bS4%JSe!~>z`8ZuxyrMRK#2F zRBl8`Dd7Os%B#UYv!uMB1WQsIY|vnNzM|zFYR|Zc94i8?g`xh+^SJyW)NIX+!Xw4B zW9&qz-Gr!SH5)fcdlZoerE=W~Uap^xD>9-E=#_ED&&0gy#>9KrDh?0xg-dbf7w#9A zr200a!~LoBwPa&z*LxrhGGTJloi`->1TzF*DNiiiw-AIxD2C!+ur%Z}r|#d4SFW}A zBqfJhM>7HV^6`U0bqF6ll?WI^EYC~vr6)WIzzTf~<%F_AD3lQ+bTuY%Ji&7Jo38Wm zU-%Rsuw^4XPtEzar&CpjMk_Qq)TjcCfKs6TNOe~E`<9|0c!oy%F4$dv5&}eti@gBm zTs}_@W)qWR6k(yp%wk;WPMQ0^;>>_zl`6!%X1T%Mp@DcGn|BIw~YfscVz`{ z=jRHFJ`{ZoDU?RvQ5o_%qooY;DKA%9$9Fh3lGhPY5b)NGST<~Xh+!pQgQfU?Mjwyv zH9C!*P{#@>7n7QzHB=Q)$lt9hvUj{VG%hXEkrn=+XuI2hF8Gc3@Hd7tN;N#nKYG6>yj z^o2>Qn}hcDUhbsw+pCR1;c4HX9S=N?jv~#nqk@xS;$OZzxxy>E1VYoAKq#DPiQ8_3ZR--|D{p zf3Z6E z3p?)eq`Nz-l#QL33pUu8Uun9l)lb7D)iEHtgu~5UI}em3)pPpgPp-|C(O#jtAOCW< z1D9AYv}F6PS_tT6sFU4M2^5B)~Iz zT;p;JB|vzwuCR(k{NTxl^qBnyI5Vr!Z);WCn?wEc(zNHS+dLBm;-G3O-DMscysVj zK0Po-)1C^$j6L--2o7j1ll@o&F!r}ux!2|)4^)Cym*IWHHq~c-DtAe?HP1prZ5a@x z9%a0+=wpsM(smm7YB`v%VG!1p=wMLoW5!l|>c7;1Am#r7e3x4Sp zE*eMQK0zrNm5>W-4WwT6mvB$}deSQFkIsx<-BH$x{(Lg@{m;ZZ&z&0VQx&d<+O4Z3 zZ?$_pl>B)pFlys_T|cvLjfa%}MQn^;zY)Bi`Bpc7Z$)I+kxh(kBepmGZc%U6Q;8je zE`8CZpLSjidb!@;dzG%7YP&K1*x^_45N97!7WPV48+Ky~CjU{BNxy4Peqcb(WrsYW zqe%z5_uLcOr@X??zaOn_HV;H84cyqIn9`tF+W9yq(fPuGFe8W#O2USn}P1o4` zS(l^+<@`9d{IB(N#jJk%KVXo;z!PS^SkFR^3eFgP;g|T^)c5YaQNM-_t>eLDi*j8b zC^ZbZzwGY2U-$*PiMm5}(>amju@RAVeFZOC)BAopjAaA{^)c(2yxLpV|2kSwufmU+ zU$~Wacid#;DVT+qB{+7~xP__B?0bo^Sx>!@cGwR7_WJX_4dCC`*y;_sOmQuy=0#*>0`6VfIndg`T3q{i(;V?cHPkL17?1o^TFQ2f6jj9`oeQ%3w)- ztsttNTh`P2^l3g#|h(T@`X%p%hmlx88K(?wV2!XJtWS7wVG5CaS04$Pi_6y zBvcd4(A;T+fo#_M;O>(`O!$Tg7)3Vtw}T$Hy8i*iMvhu(F)-9R%BAC+hewZ|J*WSb zfrMiy2N-I>h5|OSD8}Z+(D~~AD}O)@3)&^6wbNzYv-FTNl@n}Fi^qFkCN}9Hu&LFd zb`T3nICKp2W1Xz+1A}vx1=A(25Krt)ytq#FpYmOB$;2^?=k?=P6U^U0c{+*MY9JAn zko!+{_wUl=qax8rISyugU)IblbPr${{tT=38mx)@W$wCml)4sd`cG@jAJACOG#~%i zI#!bV^{no{o*_4RH+t>#!hR1V+qCsIv>rpDwbYA!D!xA+9CFL|?X$Z6hG7aC)5^K0 zXMS#nQMV_m8dO~X8r$5UVY-=Uc`pWuKpgK8WOh+w4|s#^FO0XJdI+{j>qreV zrZwCs+A&ZWb$N)>(=H5Yt*FensyE4&&ER@+-Ur`xb9c`_SAErOPc(y_Xk-{4wxMO1 zV|H|!yS4Uf%AV(UDjWVeCre5Yoq(K9;UJ5Anp_iKc3b=P!yT4FPp()&)cZEUj0`(2 z?>oZIo3d{@Dog9%hxc6O(Qo6coGAU(;{p5FuN`6ujm2sE+V!XE&OfOlq^^<{Cq<5B zcP)b7%x=6$`MB~(R;dB7!Yp98OFqKL=!QmxW(_7%%sf7R*|cb3#>n_!N_ncPb4e$Y^ZWWgYyF6~1u3N5hiFxg`Bb3aAj}Kkw569GC70M^1~+T|i>F0tu7+*7WO48< zDpWx?f0`qtb{G4M-h`)LJGATB>obUrZu{(r@pvDWIy9jfns-S)#CF9#=18)yFrn5#XKZ4Np}1&8Btk_VVopf5on!ZX|~&{L#ZHHC74qi zosl3(yhd#+w=p`PM|*i60JKxpZm;2_yZYL@_kHXLjk75uWbnM+{thGQu78lf_*7Rz zSZv`^9hehvIb?puu7*pATX@S+Z7T*#--8T6Lqzir(*hU!xI#V_}JT2VPZLu_+2Es z+38;G5K-p817*rw=1FIi*?M{1;1fTSTqY#X+8W)sVDgf6f#%t$eSfog>F1kwQWuV^ z{H`2ucK3-_qkGY>oi#(v=k@O0_!7Jpqm%C-#x79?e1buBF5@4^z;6~1;<3TG`9 z7q#?ywtBVq02&^kS$uk>TnHT6SX4TMT`6mM|8@2Gf*}y8%hnT+ol2Bg1;h<$fy~qq9m}! zZNb`OG}m+Ao@;%xLF+T?%SFO(mH|-&j?9!lnBLsy0V}Uvo$xY+V7|2Q$WEw}K-z-I zcm-0ptgn?)z{!v+C9=-~B*u^l3UF;=#L{GM248SVD+MZTY_NIgc3g9tk zb;SOEAYg!DdaE&on_IlR8y4k>A<~$j1&DYw`Fj)TIRvFdK#LybjH^)6*c$v~8@5dH z#>m@y89*_GZJyV};0b+7OD29|GG;{vlJ*NAZ z+(Q18Yuqo9$n^iOmU}2Tws=K$^F8ojY(n_*9q0bTzybmKk?RQ0x8eNDJ?lNu2b0}N zO@LyRDFzo8dc&7dF)nd25Yn(1Q1?ea9pL4If!*k@UD%&%Cc-i6AvP>Z5EG=os3ZO<;n7bndcbca^j>N*qDeW*s@;_j1C|I6CA$ za%nhRw?fzQ0l9}#9&K)9_XVkkEa?q$_abpDopAF z7@lKNYE7BoQjuEuv~^*N(WC(W=DS*29^ zZqPf`nU`p{C|E=9zGXK{Pm`B{^H;B_^NC>yq;zTx-R z7GA?VjAopY0;?F3N7&87_l3RLW-hD2Fx?X}*Uv)Wv^cJlG0997NH7LPCd#1kF(_xt zq4+-_gIicH{>{NzMy&rwtq0{J!f$aE5*3hCDosY!uSMrfzmI8_iu$=b&<+5Z!#ip} z@Rr^2R|GdOtWl~rJ?uU{8B_(l$b@Ql!&+3cfaZx>9!9N9W(ctaPwc8b`a8Ecm)sujQ52>+3U#Nstnf@Lb@S!Y%e< z>WW^eif`S8X=(r{<_|)SY`uL|Hu_r{nJ-NF!b|duU3-P)9JEn9ffSEn$*z_|m@b&J zh6X>+8N>qv5__>>!IqfoIZ*$Ue)^7_%P6oa|FE^Hoq+j!Hd9nT4Nei^=_}I>QWvei zd72eprkuUb7CIk}#e3s)1J~n&czFoqEHxDUmj2}CJ%)UUB3i+E2CTj6on$KV^xjG) z2r6Pp?(WAy`yK=7a}9k;HVAbcphJ8M>C-Qj*B4%aJ+1mB828uE)zLl~qh)>pU4B9w z7GpsH7z4*JVp#Whec#{yisI=ne62;b`8?n*3KTjLZIH||8>bvm-s@+Bg)4-N@qPG0 zbjE*p=Q9HCT=KD&!4-e>_t$?wcN5x=@jw*c)um!%`OoXdsX5tS8gzWLl+fvh#mnAq zq=DhN*m)q*sQa{?&~MF6R`v&ULhq2d+dHo~K$skdI<}lp9?Y_8V6`%ViHD>+Y93e* z?vP#7ptRsfW}Ez7XN>7 zv_qQHK;cSp2Jq{GdmICL*JkhdC}OxA`gy>}a*L8#0HyFT5Ga;pwl#%=S}RD|b=yv} zVw#0iUj#md^NaDlUmy@v{X5|HCotrTsh)LS9Ix8M)Zl&%s%6%A=V%4!emM=EdTiU7 zLkSJg6C2tI_VY2t<_}uh3FoP1K0yENGT}C7B(QL!e6J-z|3#PrJP*1B!`#&6ja}Bx z^2I)DM!Kk-^_NL3@Nt2|*a`Nt<39{WPZ>r=MFCVbd=Key+hQp8>CJ`eLcFj#cP%E; zv~aej@-?k(PfC*Nm(Vch^S#?pD4|zV-`!N9v4?TuQHz)9=poToccL^Hu6j~VIY2v- z#K{Yb7EXX>mm>;mFNIi|E4M0X-?(U*{Tcyu`w722b?Lj-yvikOSA={(vA#xdTycA6 z%_>5-TS?p8gL0Mze*@R6jJ3Z>-hLzeL1I&Fk9mh z?M9)$v?`&I0&3hXzn(ms)HB!LByJGI|Ke#{0c04NyZ#r{rM2*dm3>{v-@liD7##d1 z;n98PHU)!ncsd~96=$6@BvNfKj+xg#mc+*Iiwa_>szYH)3SU^)#6K{O`JXAqJ{Up5 zGKRbmu&>+ml;)AI2LZ;sk{SY*#73Y0`z@R8F9qSwCm9))-YXpEnyhFKXeGeJ3gx4Rx= zSUnCQ)%YVJWyl#@vIpxs{NhDne+Xcr}c&B;cs z1u_XGW07Cr(+KzEPzamN7U#9RHLkkn!=?^Lq3+x8U017a0tMia68ZZ&d!TOoCHOQs zx9Qs6RA}3dypg3-w3q52H5V%(tIi$w#_!Dy-LA0MRk5&Z_(sM4x`@kP4u{?KQ5QBd zP{x#bQr0~ak4i-)ogj-?1vZ^syZ^N`ZZWQ=zEBZ&ta}9ZAc5jbG9xGpNO3}Am#!yY zP8l!cD}0`eCE}mYu3+pUbbOKo{cnLO3f(JLY1DeGEEK~A%mxy%7|D`vU0-vURI%G+gJRtP|}Nf2E2+*ehx=S>>s48sn|98>dZ%=Op8 zUA7_GqN5x8*IQewD{#~;77Qgt@bBN;(w@+DwQxbxWV*?>sB2xkM95ih(wY^RP5GyC z@VNiGaxMU|pU^tr?RKQ)yNJR^VX1?0pF6Y7-rS9c>&!LztD`V%Qtwhj^CYsO_kKeQ z3YRuXa1TREyU?oKSa_mgN{6-&nSK1=ciB(ebZ1`yu>ZveJK%l$94Pm6;UWq(%!;Kw za_;fq%agZdgE3=asy6u+p&Z_XwF-YKErYIw!^N^aqYf*Ls#*hP z(=QBFV~R*{*1LK>A(cst1ZXHm2iDslZ!`>341okKbjzFU8(E8=`{f(7P`PbsA=cON zKQ-6cvf321oSYT6pp=kBILu!AbJV+up++c_aqsYaUtz!fwN#6-^}{f13v94GHrD?U zzIgYy6EYP`_yy9KuPP-JZJ%$G$?aKOU*J$lA1Ubh+;u&U8JKPZRavK`3|*S;*PUOy z`_z(MO;U$OAXw@eQt9RPvV|R)`U;xq>11dUL(Lg*N42JS_U8^getc6?W1sbCD29j& zN3iN4DB)Bzc1vxR{kGH~R;GS<=MUUO@p5h9*o__2`_3Ea<9C;fdMB`QuH4+NW|4t% zCbahCD@PWgA6o#W(pfdH&b0O~5ZF*w3aDm-RAmEq!hcwrZ$Kp%a z1_(t}1sM~d+F;|t__vqef%{s=__x-Pj0H2Y6@Rq-Tu9B6ARw4X{eiUpxP z(X87Yev`_~5m`OD&_#5Tsf$`TsY^%8-)pxg?HyC4YiP&w$^oA*$&V{#19e!zv7?7y zf!Bbr%6e9jn1hXaR`?D~V>LSuxp+&Mn58+zzxtQApz$)CQo^IBLTWUlKxrZFrLgDuy9 zL76?quNkSQ^-;p7{Igw0Z`L;fsH{dDw~YiXKb-&PmfO9ap^9yfRY+J_l;njd zDttjFtXZbpw#H-FlbDt3X>Gbxgt!^q^FPj4hk*t{7w{Iw%j?%}g}g|$17^OoVrS7% zeo3X`uvkc;5Tx}7L!R#e0nS+TSDtE2q#kf86S`<9s}JM07|IGPTFW`q2B7qKFd5sq@_+=K zW<&zHmdK$m()AChk?=)kX%&jrXva{Uq9wvSA7e4yeX4RS)l*E@|$+xxnB;+`2jqUuW9YKNsv3h zF9FT~rya6Z2$$q__-BWQT0Yz0sDD7&0JG{K`K6pA06f~SY5&_p+ktk%i+AN(c1XS~ zcAMccF4ko&r^v~$U+W{&T|~y4U($b(#M2Wn)V$`zbmfQpV%j@nBV~x9(^wytmMLLE zPoih?+NV7Z@b&R~J~q9#)gFPQ{Qc zlio7HlAR=9^Z2P4Ie{GFYW+0PMy$RT^J;T;=q}<|nxGw0wgFaB7zSGHh14NsPkr$b z5BSHJ!DdANe4sMin`Fp{fm7)k!)nEVhtX9J*kVAM^#KCzf2U--#Ui>cP*El=C&^=b zPPbv2XQyYydG9M*7yyIFihL4lyScuEI!*JGNlBLVyh+W3NrD1XcYY0P0PLy$|LnvU91G+7RvoBOp=O~F42QlT7CNq82s70#_F48pXZgm^i;L_3fLxQC zokKX%>ksyKzhuibWzSySeoP^dk!zqIN_@DCsYv=IAZbnQ=q$X@xcM!5mae>_zPECF7ew#{F{o8q0_hrPF$FM`NI+oMlaf{}cVR0Uu9 z09ej}1u4*}bs{PhpMLAL2ilX>`U8iX(7Z8S>jct^hgeagVN`MARQhBLB#j~sXB^UO zKa6SqzD|wTv5F6;-q@;c}|g*b=W_G)3509U}T9UE44#*NQXl{N%$2cTowmwE4of z)(u(8AeH&$$Z2M7Oz(h8uG6JUaoQJY-1?BV@Bljc48gHfUc2e?rg1mBUFYUMffG9N zS)MP}5XCQ!!(#(mpKs)~rf?{85EL$oCo0bK(>}@@P+&r{a?mvTZ4RsM=soYA;2q&9 z=~A`P^Bgn@Gwr2Cdi*x${N&(36J`qUh&BZ&`z--_VaES@%kUu0;Yo6@1 zV%eLgbWP?~iD(w4F>kB2U1S)-AP^l`mI6-;m49jPPMWR;LVZl_EU8NYrX1#MRSvX= z)=&Za;Ojx5ZfLgjpVpg-?My^pts#Z4C%tf1VSG!fD!-o+daNm2rk@*`5#@d8Q}P>P zyq%sUl}2CLvHUp5Mac-_NeKtua_!&2likyG0?(xR8IjA;j7@O7_QgxQH<14+H?uR9%? zk&$C6P|EPIctQ>B{I$eD$96VVwe5Qi4Aic6{&K|Qze}&2@Zqb&Rq@^hTz1sTH?RMN zI5aeoqcSQI@jm?n{$i641(s8)U|-)Ywmj`IJfi5{6pwt`q3C@>B@dE_SUVV`I%NL% zP$gUg!Gn5xV>0ax`JY|G%7cJfh?WV?BpngG+oMN%iNB3yIXzHk#4XcXH4nY>fdU!N z%3v6$^ChH1|Z3R%M!;3y^zt_i99zr8)SLcIM5sH9o zbyAZ3X6drRV^Hdx8?8n=Kp}Rd7OgO|*>YyPN`dw%dmhv`R85Xbk>x*IQt5l?q@dy|(SIosNk4Y?i=RG!4?z`Dt_cr|faSYsU zCKht2zh0^M#T7194f{aIc;J!Pi)ov@p2dH4+vlcB+=dV^PsU8eOde|W+n8#UYzKL} zD6`28r$Q^E`DZ*Ss&y4|3W36*KfS!D-W(8tIx^mO`B!0Iz3}6YhlyDys^xL!G1P{A z*NxO5RJ!cu5M~xwm3olxKkDy^RRShU2;l8do1;UJ%q5H%sqMNtSoeE*jsI{;CXv?< zXYM}>PD=zztrx%;!s4rHlz8gIACMKT=k2Uz5>TD&_I{d1q&1&Ao6{N;1$+(x8cv(m z8Hyp)EJlpUPXO|~k|4*|=iYcoM#We%S33NVut7B~W*)jw@xR5$$N2{{lmK2sKw#UMj8NzIN>|T83lDLnktVy(CPrQ$t4a`jG)XkVd>o`$A{Wgvu`EXN| zz}Nuw1>Rf%r=#b}(|9Y^9Tn9FWdMn@ERx-@7!^-ULmZ|@q@vR1#)CGJ~Gk#twv3c6J%Ns5v0 zrNX5o!!K**n5*}Ipp5>cHT~0+FeAdj7Y@(*JbC9F;^KgbS@2RF+Z!ma-1+pvJ)Wjmyi!1~ECWo|D`Aw(VtjL^sHkmls$+4}J)d*%? z?0Ehz1k%EX3duEY1B5Ly`SGKU5@9UfD?<`#e#kBR)eQts0+te~J+2b|8gD{oYY z3(oG5VBxfVyOjoifk!Vl!8%j}0rK^hQ`;P_kBcVIUh}U%KWSZOlP%Wfo%w%rLXAJa F{tvaC)!hI9 diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/h200.jpeg b/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/source_images/h200.jpeg deleted file mode 100644 index eb0633b270acdb5b42885de4a025e3bf652ebb2d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 101670 zcmeFYXH-+&_b!Tppwd-9KtQQVRjNQFuLTegBE5v70z!x(qLcuEC<;iI{t5yjB_f0f zBy@;$0TJmEYJ&8ZAcRN?m)|{ioIA$-bk8|o{`ZW#_n4pd-fOI9t-1D^^Lgev{B=0b zA!Kf1X2NmoI0whEqnqP!isLrNvH!*EKaKn^HgW9O;a3jflgDN`Z8(p~avT>v#wmR4 zu#-dPsGlePTiYD}*LCbTC)WvXo|C6e^B%o`7vea6jFa;?7w3r+TwF(QM;txp;1WJ@ z=F+vB+-I#H^2i39(|nm)cv9|GRhx*-5Jg_=(X*&ir$xoii%TddDqX&!tgWM~cU|A$ z_8nsr)4OKows!Uoj!w=lo?hOMeS9H)&jW*kLlB{1uU<#T#J-7(PfO3p%*y`vUCzg% z;*wJIr?Su0H8^~2U427idq-zich9%pzTuJ4vGIvtlSC4E;rHUw^2+KOb?49S9&Mlg z_uxOcj&X4Q_aXal!2Tb&gpY6?=i=hz;`tA*W5+{|j8mBF#HDN8XKq^aJPbH1tNHTe zxm&4)Rc)u_v}`CMkDd*k7M0f~DNz3d?f*pf{|;Ex{}i(S4($KNMdaY)Ja#mBoWdMH zjzbRM!R3!HPXGV*KQIO*=jDA$O(Nehfq5JXBDC?`Lk_M*79KyyPe0pjB*`j3!pMWX zNHZM1l@i%P5-vfYEk-eLwSm#IgW(_63|>o^pC21H8JF&QkgpGW?9~vr;BK-^yTGc1 zZ89AL3u=}-H*X1Y&KebvC$&8UT%ij$TPy;b!lAoAr>wK|)5e{a(zKI^IvPt)uJzVt zc}Sfyzn(c2b5`RMR-3s*dtOC0(0?20vSqxKY4Vfs3|`qrG8# zw$sy-VAB#noe;P~`?;m6U;jD(3Wic)8*J6YWTl{ba zzft$+#18XeX;&4g`T>SdxZCAw>dhxB)#E%Ju*cS=-Pj;5i{~Mavbo*dRg}{7)xqHR zf6vmJI^G(@aK)8WKb1$NT$6B%O=wkGm&lF=)%|57;2 z!WlmlB%j6OkI|PGfFejGI$HM?Ns8CDI=lU#pt~Sm zqZqM>2cChnat?1`VNBbHFy3EK&}YBrZap`$K9->&*+KxV+;?ZD{&rIayVeutMBP@y zqQ?sqG7OYnF}2r21Y`Cj6>D%4%hb{@QBw}NZbSYdekkvEdGf9Ae#a>!sVz_Ox29|g zQXQO8KKvdxqr%Uyb*Z3e47|I*o77~js_Qs=ky^p_TyYoq3Fy+Ra|yqmw)W?Hs*Rav zG9ob@qD)Juv05XRe4ew^*)T6|z?9cG*3M;Uqw0^ETbdf9X4Ed#H`eE?-0i&v zHr2+;g1f=#7nIxORXIie0Rc`2TDvNl4=Mb*{4t*6e+N^!ZGLN?)@MY0JoUnCrmA-H zm~(=E+cEop-*adh`%GAnbUZfyU4oneR)gK$RhkdHTcu}-Wp13u+MpBA zeLabkmrTdXsRTn04*Jx6#&rT^sU#fbqwqTNI6Z!R{O8(|2^b}~9vo&GFL(Dt)r9QW zqvGEMhL)cg)n>S9w}w*8@Rc>(7^I#g$!G#rPYwMIs|&=NGcHIXbONXDvK0{?uCKbJ zFHiw4ID_F|7N7b<=4HfYT|xf&f>f!o%JO2s&S0@i2gcW>w?14d#3Fx5iz_TEu3~jO zxEVr95n_7MpOdvIEn`WvI!fFYzzl-`tW9LM%8dgqK`s#etH=d{&Ti`l%!%4aJ{)dj z3kiD2KkZl>rZ74yAK&LDcW)f48q$0PGm8|Va$^)qQaU9!d8`u=rsR|;ub=+Aq42;w zqdg^qn0KMxMU$&p(%KXwJXExSwCS?Fc@?}CeB3azWvFs%cYU^ds?8b!>a3j;LTXTz zTw}W*-36d1BRxc)S2_DuRO44I?W_BZsUt{WkgT(ZE7)VjU4t^f%)V_kMw$TJC~tfA zupzXFXjWpWJu_O?>syS&NoZ(Q&6Ln;E;T=!1=EkCul@yc;? zj0CcSVT)=bc!&^uG@^3~x?sQs^`wOX$&Q2Q#7Oy?gtoA()Q}?Ii3(V9lM27*{dlki zc=ZWpTZk(>u-PKU^PJ5@tZAI4%bxvd#F~G3foww3UB-;aA{RbPEhtPXt-tDRtYMM* zopjjsNkwb~LuSxY-1T0~mC`in@)C8haDVi%`?k-5v>d5y} zwQQl$7hH!Np|NE>*H^oILfuc77shwn6xu5&iI<@tSLUbs4Fs|7mPBJcLL?x-g}{L{)G zKO>2eEu}bh(FmheuPCKjLT&!6F<;fYh(9DArmzx?K;NYFbPz-%#n^V`c3-HPj+CTxU3%NCAA^n~l^+psn9TuVLG7$5AoW{iNZGpwmw%$;$`gvL zqV4hws?+bM`K=9`VO8J}YR^2QJFWcR`~I+(YZht9cFT3s*&Ss@d?Y+*l|{AjA9BD6 zF|7hrnPeRgZiJG{FhHeb1}aff7tMrnW6O1-(@wz^Yev8zT>d5o^SbU?hk&;(<=#+H zV+H)E#~zcC3yxg{Z=|K@(D?z$I}KFR_~i!NPtqt7^l?>J%<+qpcC2ZTE}OzU_joWk@kHgi{$$o z+=xA$x1X)sry&vE&)Xzg4lSf|YCJ%R0!3kfzIw1@4<)P@9ymH7vL?=UG zl5hX1o9jP2R;d7M6X{9WPONyE^M||4_V2GMI{nkb1B$*aO>LZ+o`uxW?#w!`?o)0{ zV?1>-zpF`?z=HT&rtg~%gWY9KgG&8mRm#mU{+@qqeLW1om>U;E*c@I0=k;;>m76e0I#Lex3%%~>)}T#yW(Gd+a? zGHamBytU}>=Oh8oxJ|FX#F9&+PAa(BFGbC{3=ib?!=B`HWBC{K`C)J+LDH{{kIf^$ z^#&9emdJ$CV%4Fx3voi=nt*2Yzb>4Tw-r;M~XT1?;%IgUfa z?nv0r9CCEey$%zZE||2Pexn0QoI8b7f=RfoOCY<6J8obyszdF>(pfwh}X7eKLs3OgqHETflbkLv5=GBWxwQoOS(6thSrY@Q=$|5*L5ou0$yHg!vG` zpz#W(Az6xhNAgWzIFW8ki=``wwdZ-5P{m$tYcv~E;N%C6nQZo@Q9Jj z>fjQJ*XI=()DZ!{OGkgab;!ZXR)C%vAZw98)oa-(!Df%rDa#YFT?7H*-1H%b@2dNC zieG(7kF;2GOR|g%Qom_*Gk;6XcoSg!aYD<=rnbt=F46z*@M3&UOCHe?pE1jAeR~ zZrBT78e?7~-_V&K4NNi-0{l|gK_|~{N#;5MzVXqnZ+4^FYb~{NLafFkTL~d$qHynk z%O!0KbLf`yN_F>qri^AFlbf(`jtrZH=}1fVxd*!(%NLfDBM5?>f2;=lM(V<^o{7Kw z@yA3>?ZoQz@%YQd$BfV2|1WFqNtCn!c&h?Jndg3A&R*uEgSd_A``I(o(pvt47ZU|K za{lDdu#>dW8KfPB)NPCl%Nsi6@G?caT~v^F5r zFU824B-|U%x+Y%t<&5LjL_9#4gc7I2-6`2|xOGd%j#kBC9VuS$h`O-5%8Z6RKCSOy z_>R}ZIN0yY=d*s~i*J$-8;`rh zP;dD#14A(TVIFsLwmOUIPpSU=F76`2&zeIT(Mw2kacpi%cBjlKziVsK64dP2U4qa` z_Ja?a&VE^`ZpM5hsm_qBrdcA5`x$Nj=>Xrqh+l$TH7;6(?s;BwYNirjhQe#Clcrr< zbQA&`r?w8()U0w}nXRPTOiT~Ku%`aRARnqB!-{#Gir(O1*vzcD1XqO?LFWVjo{$C9 z>qxaM!>(Azq0Pz(JIdn5pQ(=1)3-xQ2{euP4a~Qg`PPP#)Zb0Aha6S~yukU20t)Dm zLz)Ss_{1^6RL0T9(E%4(ZB?<{oRy>c^Kyh54(($Ra6c+a5rJ$ zTsyZzpM05?6V(39Jo@ZJ5i3)UzDLnFQl)!7;v>rlyin7X0q_?HbBQ%r2Wp4)<1WM?!Ue?I`!v+B=x%B$U|yY18v9B@X%Ht!OL_ z_&S<>x`1lSKDEiPLGm7Qyhf7<`3wu&wQP?FY7DTu^2thhm*vE{G0VrfsKGh zfjVuMlo*8*JT%K61s5KH`z?B~Z7NCJ`$|C#{)Zge;ABK^|M8pqJOIxx6%9C*@9(G1 zHi`cYJ%8bWkd^*7*ZDJ``~1(m0Xn8vum(SStndaupJH#}uYV4DGz_}K)-Z%&k^=M- ztFF0@4!?KFP!QMM_T8>5pQv6BM-`be&rZ>ZbW?_F9JgGg4Tzl)63Na|J z5|x!>KkYFal~qW)B9y#xI{(!63&b?2N}l2z7giv2^SfAMF(0=U|4gUX1KQ3Eh{Nka zVpuXX9v}k}Om^B3eekJ%oc1NYgr^THrVMMO_^Xu}qfekCG=BaGvB|D3w;Z)nR_iUt z9C6WkIgW0qBM#gMsns04$4jdUKU%Ag$jkSris_msu|pN=^0?;srBRKX>_)dvEn|Sy zvE<83HukU~m3lho{D4wd!Abuon}-~v^Xy?5yfdl%1``@y)^%exNi>)Am&-8+_(pET zW!mzSYg-6-Kq}dzmyR6*W0`-^H(y5=LUa7vv)XJK}p)F z0Ty~4tr9r1Q9D&gZkefV&Ch|?6)-t}1)&6hFMuQU#HY^N<5X6PB*tK+&BF6;FjwD{TQe)U^? z%T7}KWHQ38OSxeOL^s=W#`o?q)*H6Av5KJ@-dVTYf19Ha@;4+G8AXyeIgd20-2q^y zS(#PfU$A#Z`H3HSpJ1Lesk^s+pUMpuon4iKLP=kmIxE-i4Y|Cu+5}@k;2Di(F^l!- zF+6?(R*5Ayg2NG5^oaJ1k;UC%uKFnOidx~ny{F{{uEdbiLNc0)Oto!4rCr7ENvDS8 zpxQuFV3$)mt%f;V#3xR|y1ap&FniqyWuq$>%XowgKS`Ys6JIXJdt$Ic|#2OQTEc*}D7Onj`0uHQu<~=(`#C z%eV=b{>^4xbsAqH92^ z(D^;f*HJ=8n<+|QH6-V~krEZ#mBLRC2aVf=FOKf|2F-~;jAU?W-8BRP|m_E*vF@=HK_7jzWpDN4O?$RWlQU+7O#oE0k4}~{`@o6MQLlg( zphBLLo|pw5K_l(mqK{4LHUh0}L^}NjFUG)A&t6BQSNWPt%b$+FT!R z>VU7S8}n?eZ^--l@B+{GM9Zg)d(X7}oi2L1w3q0)KeJ7+D?D%EmEAfvvkMDqnx(mM zE$En^LfXq{i&X5!LzFP-nJl-7=d`BILyoGRE|k;}1EPoWDIorkQWg|Pcy@>p&T3=N zIJIW!NIh?B<;w1^fgFhJp4F+@pr~hmUU8ZEQQ|3<2`z!TUm0VXK2>*g9@~Dvayr_f z-05{RCsq|eN3U%z{z!C-6AFNNH}Tf$d#6 zs(SVr(W|_$;9mx=e!`QHJ*c+pOp%We845^avivKN7G2`HDzV?@CcE!MVGX<&lL*?% zU063P5b~3Nu_~$t;h|ECmCaO-@>7qRf-}A~1uy@F9Cap_{mny+F#ldg8VO|FitKl( zZ6OtKuFfjK&m&asf0_KG_uAjqwrSMS!nwF6aS#m=`rar|R_J%@fZwzO=517{UIGm* z@ACOu^Zn`Hk*?ok>;1fO$%9N1%nz%+2m_UZo2Nyd`Xwu$TQIPC z?XLJ9GcZ(lqTGKQrt8a^VwD|o%qPv&pnHypt>swc2}%Jeq<#C%c$s3={GbTk@oTFT zr5fdb6MnIF1YkmxPT^`yiN%`)#nALDVrw1>6hZi^|AB3xa)6T~9djpuk3mF154r-w zk3GQV8ErN~9z{iB6JtSxid_?QBszQ1zcz~+oq8h%WXv8XNo6W4%lOErUb0>R97~1vjFV&!GR768`UqQ)ZZ$&kNdd?dy1Rl%zakY8&pQuh3rQHG-*-@?3qx2O zN_^$aO%el=2F_J8STlV2P;KnHD4f3_{dgFt8&1E@Ny}t?V2dM)NJ?FUFJ!8F;+Rt8 zrc36+hPd|Ul+}uLs4#}oT96cm;_PaW0})X?2%taWDX1_u_dS04#pck z`Gs9j9wEE|DApxJd>Z`hyE1H6wkilH4@ch_SzVVJ$oT0VDaZbHM9W*tBm=AJx)Ol# z1iptqmmn~&;bwahzumeFC4oM7GhKdF`XU}b`Y%6!bfP;2ZImSXd85wJz{^6RI4m|Vz*j9R z^peLLf6v>hSh%F=vDCx>T$D+i+4Y`)Q4XXA`GEJ&UwY_FOw3iW;&Qu;)LMym@jumIw>=lR_pOs%&yL7D zUkF*K)N&1d-Ej4K_D9wXzIDsm0rs=}#1l1Zt4mGeCEeeI zl;&0MpD}tk$N7bG`b!7)ImrSnXoJmC4Qom(rY=)sH8 za&$bak?`1?m2U(@6m_IdHX$568qzx>g<5^5zBLT%hUu1!I5$V;Tg!nVM41rZdtn_y{k!e>scc({`Q8`(Mzw`&@K8+S!dKd9}` z4Rw6}z>@@`U%_tde`XKM)X-`^A%Wq(Mrurhx`NJG{w#~@=!nNZCVK0~nVJ&!b-1tGq$<$Nhvgad514>XL|U(vfa}XeTFhM4fg;I4-fg6tu~XDr(;l6ALPMD zG`G4&dt29Qlvgsu)&*vTP-q!*ktaj-8Ab0SMI1o9E8ByU4|_bTA|mq+Ilhq_P%*;v zbXR&TNkaf>LbZz&rbISXPOjw4B>-zGDtn^Iqotzv*Trg~-h64loZQVg*IpakXi2Sa zIFoDp+RBG+2~H`|PSKYcD)h7F^weSmw&RdPn0ZaHN;-+G z`(?;~qmrpT@%fecsD@ruJj{r_QCqN7+5tz<0;<3HXM%prRfF1Pq$5X-fTOMAb&1Vl z^D-iN(lB_3Fd}BcPB;H@g4OK7cpm!3o2IvZVK8)d*4On8iXOHy6xj_ z7FW^Eb{#i?kn4ZUM*JKCGGImPP(p}1?cGC__PQ?tco5ihJD4f_o1enC|!4tK$#I+p8HVw{7ZPkw)DG(U!6pSTizql9df?tZp`WoaHsbw{MSff3WQp2k_Hux?i zv->O}OhNhj#qn!>OS;qPrhseV>PH(PR|o5hY*k};gwofrjG;E|RAL8NLF#pTR5F}dX?BKICote}Be(NsLcL~UwP%(o=NCZQ-pcVqx`RwM=r~Ly0 zmaGg`p|}G#|KadR;6PRx+OlNU5~5j+^Y7E+J9BO!)r}`=8&@A>S2H1>FQv@vM75f?rcGGa5$o38p%r2#n=^xDMTruUE1?OovVCw4(}gsQj1u6^%P%S)#%N)a*+L(-P!=O45V zs|Vd}ohVz;we9C?hg=+2#Wgjk5^d6q3b?*kR|K3P++zBT9uZxH$vGN)1Qy6>T4hEY zF)S%EV!v@UFCXVc5|R8Ann;}5atli1ZWY}PgL}e|f;IQf5|7vM&1uwiix<<)JSjU7 zH!6d|HKZW!^6aU4#A3|y%^__!56}osvfk;Px&v=Zh+XDAbf0KVLXd0V-d~h2?09dh z)J>>#5eAZfo|S*38pW0uN%9&>0{IYsI&}%cYA0x~qesmk{DX;)#&s-B%p|sLM%D3G z{!xDrZ**lCt8@!jb_)@Ifnw8y+|A6*$g9k4VV!j}I9CX z?}F3OH&>B>d`q7D1s(ea(5W?o(4!Fm&{Jxrf?^3PU5^^8z&`kM252;KpW%iXwRvan z9e;V*%th&p<19hINYb}eln$qIn&Fz<4A#kdv61JU`%;~oCmc=h7<5-*r~E>y?*Yli zx-C8=H5W^zzK5S?h|cNC{6@kq2HR21A{aV@#PddRM0;Hg_%&><9iwHfr#mtzPVsp4 zBd*jBqR{gGV|XuYw_hc<9~}I)SFitAmacM9A7UDy5Z}=ry*go4l#^>|W|No!^UB_? zT&~<7%Nyz#T`X}M_?$8QAid=;*hA?^Hh~8DRI}3v^93Gtm^3=Wjg@XB>p_GJA;O(n zDT7~U9zQ?Pg6WPXrAxo^q|it;7i`?SV5PiR0kbO0J;IcBouaR*oXls`g-CI_94(3t zA#21$0;=j-hVcC=Z#FBXFZ<-)9d$YN5jUpFTtp;ht!obr^7V~RjN42@_RG}r#=bv8 zohJUM`;2P8!dVAm>hu-Xi#DM^&4$}JuEI*@pbpm^zk7f zVckOxYv6yF?x~zPS}HxPRf3wdD}?l>C>B6qeheqqZbEA8Vb`!qhgw zFFaJmr!OgYK|W1(h%GgYJE{WJfu{>((=xr%S<};_qd3qE+<4z<|7zxf?S2p}+*uzc zzmXFe5xmhnu@lGej%<@wW#t%ML!`88IUB5K&Q80nI9hL~je~jPw(wKvOZlxu330v<3hOrbaD%bw%5FZ`!)`0l+F1omEruoYdegA~ zC0rdfKNT7hK(chXcc**)L&nNPw#tRH9?x)9>t=Pi%qKx7G4R(~%Kgv~CJJdb{=`V+ zC_p-LZ{|bq&k%PxN@%00+SS$DnTh8FS_(#E}Ev~0cmo9|ke zW9p+SeQuK0jh;M~7HV!BEzNQ5uS~7>n^I9}VLq8Sc0tY9;k~}^s6RDi-)p0?0yn;0 zv@=TYVhgPwDFv$UsgpNYf9$~|b)3OJzYjU;f%G|aDRdjmuhc2g{m%1fU7w;l6c(>J ztcIZ191PA)1dC;DzpVkg5^>bN*X36(55(A@`A$|0cvk}SebkGvbZBRzadbU5d1t@F z<$Lh>!le)k&0mcs(QiPSx6123&F375yt8hK@3eVlvj`=289}ijnRt~s3os#dIl=|4 zoLyb^7#kvr8~S6E#S992f>fP5jsGHepmnsB%Eaz-D>orECdAlo^0dFF290u2c@gi5 z5rlT)-p}fx(p>ANYdC^;?QoEDC7-Y6R+U16mhy{`wvQ66rDI;zke}f{g70lG_p5M0jApNM<W{uUA(qb$zQJ5B2iqO4C(um+QDroxju>`jsp3xESE<5B(n}0`LdkWJ|-2eWLewJ;mAUzinO* z)M_iV5lf2i+;>e>2a7MD<;VmvrULbGcJZ3K_6Sl3>xz(VC(q8=Q2D}OqX+7#L7@R6 zm=gRJh6{uS<7d8CbwDm;pWZQ(?SA=hwA8t4*D7!UX)O&O$;1BR2{mC&r7a;^85csV zRM47`mRhU9T|i9y<;sso!s&F?X`HsT_32jC^||+<+KtW0lbaP=#R+N2w5=14Jz%mBp+rKr}Rqkhb}enye|#OrCg~s?HDGw=V!Cl-GEl}S@2)rEoo$Mix!6`Vz-?lY=H5k(@^^M?r~U74_r4`4XVC~f=ZZ80zfw^v#+^BGModoablmA$`^waC|~HbA7_ z;D>>7=K1mM3r>D#2FV)S?@GSK-ajT#S8UuIx#|BPDF}@CcroJREbH6j0{(+Wq~*&L zy$=h^=~I%o&KSKAs_(w4^0$n#w^T+-dTvG)NYa{IYgA*k7I_+o+SG|wt9&vH^~&Hp zS2jdWGk(x#UO?2~M&7Tc7DVr&&P$x!ZZzo?x% zWu!SrkG(T842(VGI5&5H0$2Yrt}CkmQ{zuHd41a?^COVq(CrmJDG4=hj8*+Z5p)J$h~F0KcT(a7)DmR3ds6-*WEtAr|hfJ zFqT$tk8gsQImRo2+6!V)=O42QS}!31bTo;jEG4ntp%+GY-Ks>l`mwgVCNZ08gnmY3 zCCwx)c1>F38I56S$FS zuH1#W4Bz@b0DmqGSV=Hh=N~IBms)&3uQfF@bsOL}d?$*KA@j!lJpC0z1sPZX8qpCC zERbMoQ*}xSBthZMlnZ)abPg9~FFWB>oCX3z z14nk&BC{d~k2JoRm(+cdX>_aNio6TI-_ZOx&e{K7wzL#gF;8xCy-{TN${&#m{K?-8 z)#^K#P_%_-gm@b7r(%>OtCr34i#?1wxgFSa5|&Hmd7aeVG%vX%=^ak4I?{N2`L*99 z))HC8K(Eu@chlH6jMm(`r_<>}3_12sUEs0SYqv?Xh7?P>KDF_NqK>o1?2aHGy>)yP z(&&#@nYPdwXb!6^Sz2+a4>c2>_RSVEx?^xBPG(CBQM~zZX?`bBZ<@b&UP$?aWt!i>;qn@{E9VC1Hi8)U{0P<9?!d7i$5uQ=FfLz)mudfV8g!QE3?9&Skd}wu za_k>P%Af)ObvG9vzlRjYpJbnHsgFHKjO`At8-MFQ=?hD!;GHA544Q6e21!@ z?a+*q%enF~K-Isvs3%H5iWdd&s9kQ;;9Zds+KAMsOnI&m)f`ejIY}q0DIVOoy5fE* zv$nSQ<>t~$H6)08xjxG~`FfCVrNX+iQQ`re)@?649-$0e({<0YbyIq%3F@1m_80v9YNRwVx%;r;~@a8%{nMesC06J zgjMDRDrE8YOa5}7T{>k?pOUvRP&wA9fBIW&7zSkk6frs~x#$2gy<3JROk+DCK1&ER ziA;~`w-`qP$*lA4iYz(1Vf+mv+W68Ev1@x5cGI;>TaQ2zlQz?v)jNjdGATjk@6*YUI=ZXPv@y6G1TjjH~k;+Vi z>ga6=+sw?7V6ckEGS(C(cv|iGNCLw)F+zQ8Y7@QGVjjI!e0RR4Hs6VSCmyJ-E9Z4r z`1y-}F8#w5ynOwWhw=E9rg7qWa>FeFtqHE@JtN6`2V8S!F?)CAvf09flgfP@o)1YZ zEi2aru8)(Z7t96??MgvaW43ogObLbFpOZdJf*U91KAJJ43$S)HT3GQud+>T zDa)PthIEaEK|!Pm<;L^@{0+b`RO&n6n;Lx6Icg4Kka4+}mSkvC`v;~@^5?VF`s6%( zC3}T;!1i9oH-}+`^+e?zt7qZwpWfhq8rC)Tiib7)Ofso_dxGO&pBbZ^>1ci3zO=+D z07u>!v6?DZabCx+n8#b{1ddo5t{1Y-Fojt~NFcR_#7l z3&N_iI+VpPIb@LFkNz|7bij{O(LXB{QvYp8w|twCe@cy@Wpt7mI2w5pF?{3EP-#Jj zOO2^FxkX&&N)S5qkV9>=8OFek?ha|!5byq@Wila}X3+JtZ>}Z>*oiUhG;qkntPm5$ z4jjrMCpMFQpWw3vTYV_{ovD)S;QL#0!P^V>t`U)C=MU>V%Q}z;C4L#Gdn| zA^*LLazOrO$B@7;{URN%n@|w%+WyOrV;3Kub5Zi zkZ+D|-G}wYi0*fvA$8-y*}-8k1rdVj0dRRWzL(^jD_3=YCzh9S9S3p}{5>XT`<7^o z1;;2%R|#e^&EpNx9#fTbekt%70DPjD=ecRXrMp49NvBM`Mw{!LHOzKBel62JH;s6W z7#%|zQ_o3Y7AxO9IHE6+-tr@WFm|d!qS!Tr$&N*dmM*riQ@(DbxDm0`RE-Z8Gij)D zR%hLtJ7Ibi&9|;*rL!rBdH8|&-tTtikH)lBP^4YUB06oU-7!l7&Xdqg#%`?j{h+2< zWV2@+L+S?aq+|jisL5PgR&uMxiiePqcpMC%9@JKORP^V1RdskpBCiRDC1HHcWG1(~ zYhNLcOMV)tY$z1Zvdg%!E6n~C*@wbSbij$=uNI$N5kBoipA@E{NDDfdsTw$=&+b6U z{!1Rm6$Zdnhv_Kr)qw!?_pU|!dr;}LUV-nRx?pFx;Ay5UC9-NBb$+t6s1WL~bj5N1 z7J1mKw{G-_qQ36_xaQ|xs4D8J^2(~b^b=GkN1#0$5UVlS#9Ge^$>G>&k*ilv-tM2Gr05EnA z7yFt#!PNNR>vB}Ib!xJo6&v!<6IueM7g(JrMe(m|x^$e*`lFX8df>Lyy<`&iwYKqX z&)-aj2opu=*;pnw5ng*pAVmsYnYyDoO$ip|DQD?$%hs{h^YnMczUo-Th+D%Jos`AJ z&WAcvQd^BjJT_GqcXwVQF~p;#Mx14?_hbTd$mc&sF8V3 z3PtZOnAjhA`zuh)wME0L>9#fThR#3~f0Nlj<|FU(cHgv8d{;)((YGp!|9;uaJF1IQ zvB;u1j6o{1RK(26Zp*x^d9OMnpZVF;qO^8DDAH$b3q$Fcmr2-#mP=WXJUZNLO(p{O zqpZcEwdF*%3c|U&5gKZ$uV4Da{6pJ!7%A_*7jm_CsluvY0Jz!25OGHyAI0v@2rxWG zTTjzl>C^OQbU()t_M(iovyX4ATy zsa``_Za?IRBW@<`ztkz8xots5kqaYeMI;M`3f_#VM&{1stIe_BsJoAPpwm$f(J&xR zYdeUE4nYVd`3BM=^c0etXIs8@Ro!^AVI^1+%rXkvD3KQ5EdC{5+lDS zPCb9&)pg~1_WAF2eg^gLCCC2g_1k2;w<)^`4ecH%-i7&X?0u#bz9Z$vs$S!@vXK8v zH2*O^>i}uNPd%q+Q3Yf1f&zY;ko}<@&7W3<&E@|-?@YbMd7V+M;WVwOC#Q6pV2gra zCrw=_G6^#6)fU`EB-UdOt*@;rjalR0+_(|@ldc2Wy--*ggzeW=2JY1~2As}*3VVgj zBD^zLiv2_nv27aG0f*GJ)a2atHsLxx)FdY&&zIC^`sk#xiE(62bQg&lSTQmeZ2Z#CF#i9PhmJ1s%^M=R+=9B!Ln8f5+pgu3)L`sx0GFb_X%EcXQ3VpS?F%LK2~J& z{$p{8Xbx&(xANZO)p6V8)$X~MlnJJ40x)Hc!$=ZoSM}P@AoFPTSKtOk+%MjJ2e2-g zGKQ6_{^WjJnC0qb(v|8JgIK(C?2U}FpF`{q(*95N)W>|H7TEb1353d3`#nt@mlTa$qIiSY;eNSzN( z(45TQkqVSi%u$N&_G_h93?sFxXJB75U45gz_Wq_YF3U{rOugyjxARE!mS=3C#cZKT zlIYl+!sYVf+TcTuSBr)7efG+RR?iExF_7Mt4Se`2*3wWW0{N6xOMnzirU74iaI*#I zb0A7=3{#Jx!xW_o6qVF$%I(3rW#*{HHlrKj}-9Qf!1uE?nxDQx8|Rq9hc` zi%=sc%bx~B&9P7SFoMhF^<>WdAIzQiSJMCc#;t6cnYp()bD-s3X_hl`=U$l;3CWGS za_`jC)YRNta{%r=Qd4s)xWJVI1ydwN-(SA}#pegk;T(S8aL)5}KX0!4x*m5h3!uH^ zZO*wiF@(e=qsQ<$HTL>YpQ6#{0?6)9Oq9Cuz3KHZB%IKBQBL+OgCo>1cPW)VEUJwb z-a{|nCm9iX9nVM*`Q-epeu)(5Y^X?eL!d>d(;Sb;r#+38;joT}B`~PuNbkgicgS->Mbc^GO3Xm?mUm|AE1 z(#RC7bzAF>>YQ1VvHo%zBKNFlSDqBj5(9r4rV;+)cd>Pwp4U^pZT-gAt*zUzQBF%Y-nIL62&-YC0EZ}l*sb`CJ8jHg~7X&A-*BzOu&r8sPmhG1b=LDKd37}I9ZjCG(9LOfj(IY24bHAEtczF7H;FhsUA@gIX~<<{2(Q~ z;P;7ZHTtAO%_xe^O!u90i{_g)t1h`qu0MS?a|bm9$YpYo{c=HjXY({J1NZ9jV$-R~ z@Pqz%Ull1T9&kg%n~?zniVvs(o{&!quZzi3>S$?bX~u8wpPp;EPYpX(SnZk-{TYIp z30)s#bc-@>yNdCH)##5+7x`~fBcC)37U)ppo8PH&S3mk)pwDn>t1uN8zYTiIq9+@Y zhx~0+W$eqlt8WbYvr+>9%!jBn9sNELqB!$rLnp`42ch(rBaPcBocZ@u{Gmwd#J1{A zUUbTb8-5Uuk0?fVOsG&;&kGgnb3GXJoCB-fGx5>tKECEx;K9c#vhq|grOq&pg}e`I zlxX?`g->wr&Bd46TIJ` z9pdKVzDLp_dLz$l4=Mbd;qkZPQ;d-?n4M1XeQ90R6sI>v<^o9JtqIa2X#CjFhjxw< zYg_Hs|EPun6UjgNd>KohlDkBQ!mthuCI7pn*Fo9j5^(4C*(~Ja^H&9n#Iy!!)U1+XCz)F-KYX_x@DlUGtfMF z>K1iHChZf5joQ&Kbq+Hs&+E4L#cyD#6o2+8+)R3Wv0vzVdXAD}d>FFK2R{@TK&Xn6->@4@Z zMyYO$+qV4WLU39F{UE%retvwq1B#M$QTn9WE1dBa=Dn>eCej(jF8$S~4+`T~nd? zHqaNN{ca=ZgfppgcKCA6Di@J>&P_3D7pB|37!2c4m4jVw2A4a5e8Ol*${71kKDt@1 zqBWd>HQNci+MLx@?>~IjfliC}Be8qn?0q5dW4Fy3V;JmZF}o>m zdTy(3arU&WzkxQ=dp{?iL>W%+F9+QHZE!9e>~SvJZ`|j3E%di4Rcjo~IiYw`r!koN z&Y#iGBj@2ln__>z1)E^&c*r7zve@G-JXc-vB_}D&yaPcn2oDyl+-^ToPc(R7fNbweQfuMAxR_w;` zT0Z?sSZ{3$9Df>P$ILvRnaOAUZ}XB*SbChE8?4~jYDc#+%zufa~#A{Z!ZBUX?NidKJhg` zd)LO)AWNTINuxQV8kv8pll+Ssown^jsNXzC?MBZ2xMT3GVhN*p(SXPQ?pgA<#znF9 zw!JKqaUYfpsHpP{szuqp!)-y$9}f(_Osn3c@j@BJH@+Mh+jDk?1Wzo`Eu7jWn_UzH zOLcM*D53H2^)PzXTmlz_{P^Lt1HLo2Xg7}ntc!UwJy-{3%+8EbQ7he5+REn}bx)d{ z01FUMwzhm1!Z*o`Lqx8eOI0`zsLzNkUV zIMH7xj+Lx;$ME&8+acg>w!bboOwzvl;sa@&AWhUJ<)~IrzVTC@G%2NIVfQp1wBDhN z&Fr~g>79GBm!^TtHmkunV)A1{rJF;ZABk}s;`%e5%FS9>E%6|LVP1S^6oY@=kYx1g z?MvztK*p-*`AS9{kZo(Nz|8&>NwaSeMUn)j z_4)Og%CjfVp22eT;Mt5|T@Olex=9xMzV`rT50snEGVe!?Oe&MCsMJ*-%vdXCPBm`sdfb2yolgJ6K z5HPX19B<9-Am)TzyA6;9$ZgGIxz16mRxb-Jwi5DN9o|ps@#X(Ah|9l4rdtQRutog7 z6eW4T&gnbzC6+)cmSB#yzh%dOFX3?Z^o)X9$}>!Of}DcZ9q&MBaTQ&u3frlSAUjyE zc^|uGJzU zdqxD7%iyk=O-dY+k0b1NTYqI-GXA)F9WC=iw)6Y^&xY^!b6~(iUG|8b(W7b!tL#1{ zL|}<=QMO$yZtHA+QkG!)L~5(Da1$)dS~3aUfhY&$3+E3JD90e*&aE)+ILWXu?h)1h z(CgY6Wft|xh7`OQI?@W6s$`xgd#`hW5Er-#x!eMY%Nuh}ll~&Ys+P~&H|VBd;#Xsa z?VniAQY|+{8-I{^FSl=$ijGnr?isH8thV;;?CMTf{dTXb+s`zkCc=&CMb=(-ZlPJh z*zTQZ&56=m@0_DTH=}6r-`UiO(Qlp&$`x_-e;j{t{g3Ad&+NO-1hvdRJV@bBCx`Kr za9HxEP)V3+_8xYAU$F9zJ%c#KrG_&{m38m^0m#@hp%T#XIbYj_%m2vb31mf_K3?2G zSowWU=@XxV2h_IIS&|_n&$Z-)6`od>z_9pZW$RJ+$0FD}IPCL436M)W*Lkhqc+%~ZT4Gr*|BhkLWR2C)i(wm%q|N* ziQmfsH)n%$?fV9Yv@<{2UDG80@-Trptwd;54jmwQ)InT^dJjtY3~7J<`J|~u7^*P- zw5NM`qUGE#Q+0hw8NXgUrkZ3-|)8~ zn(tC6`l!jN4;?3B!)^#z9TI(*52CO{WsU1Pf@QgmrlLa+8@lO_j-N4uS z!{A*w;VKc!!pwF1eruq=wLrRFX*w42LbtI$Q8dF8-Vmv-5?s9Tt$rKBVZdTn82EzL z`BK<8yA@aHhq{o_wtJ6zo_%O)8>rdUv+(b)M$-dupo$*ds6@rvB-NkgcS1^~x?5ij zXep7~*MGmINFWhP0(pugWU1S3jX>U``Sr=n48aY8OC$I@e?#*fnI}46s>C~{if>-&jlJ%p)6@ zf&A7s((Op;Vey21y$r#u&Ug0=sP3BOC3=iPu!`&9PAPvvHJr3n#A%6TWsCJjlnd$i z#QShJ9?=8L5BCCYtJj5fZd0Y1xdlvNv>VJS1N7cNpys}#iM`c+dBZ@Ha%5gUWRQ%c8XC|p~hNo40tiSTX>(^12G z!Q@UN5qb~S>HP%+IZk|{1CHd!DOH>Qx|vt2wK+|R<*IXs-f#R` z+C}ok)ZJM4$*$?&LKqtCikvqoY^YK@lre~AKL#{O{9)hZxU<)`(U15{(%-kPl3s;H zjX$mwG&a$jt`q!>%ZmNSV;sW`6F3<(-}xXk8RmG%sQe%wpB8LNGAZnDNq73#i=M9T~+Y=*!Zb1gWqB*Sr_}BkP7PZosG*FlXXKJh5+7|5Nb!H!ep7} zYdh#tB8tOC&DgG8TJ2I@#wc*Lp2+@omu@AQKZCFIuYlQ?&z+ZJHKKv_;aRU04d1jb zaHPN03Y=H&!27ePJsQ+I`W!I+h_FF5L-TFVANO)!5n^4y;e{4Q17Z(7VyWv;tXAvlu^lFma$Z6 zJ=fI-6-d&=i@EJqf^c8Rl`=|pK)x?KuQR(fBNNhLGjG$^5Jj92A$l+B;xl zIT@O-9UI&t6TPf_h#Zgz+J1O&kq;@||?2ZRd} zy38+vC$vcgVe-*kPOCRzP#nXc??e2`inVOMWzs^{asC6?mVat5L~8Yg{9|N{_~K+Q&Y^Dygnw*$T8L8S8stX5BV8L!Q|05MAxbR(XON0GHV= zM1&YveVgzO@h^EEf30vVz-JDTm~w8NF-ZpCSNo#of?mk3Aj9r@Yu5D&PRnqqy}I$(1d4Wzb;sI zt8JZ*es5k5>(m_Io5_%k#D-52mwE?d>0Rn;L21h6Pv z(*Z0~H1XMFp58Kt!_D{RO-aGYEk26jXnwI9=Fd#OL>bq%eY~si@Jm&#>QO^pbYhl{ z+gI*?*O1)gprpK|tFvIYS;Ou z?w-1fdiA*ES1en4?n5CYc6zU=dFs>2(`d>OW!M*N8Pl#o3 zUDsBK;GIgi?4up1Z&}A|SKZ7u9BgQKaeGjD;l4KKJ-T;3r*&DB_V~Nf7v|Q?{rNSp z`3=cil;%*YqWqa4+%S8)-##XvJ~%)e$2rmuRQ*^B^VRMCMQ>g8hR=K!3EqMj$}6&( zb}PP-Dw_H9zB7xzrS8LjR35qdE8E0xgv(^;<2<@+mLy3`u`7>;Y=@eAHHAAB}N{lWty?xzqA!OS$=22vlL zJXOQECdh=*Js!TprDBh*qfv28*^aq&-QRrnEmyhO+?a`nlKtvdf-%y%LUwP8s1g^} zpPaVU(d+CA42iQiZ}Q2Z8COBU$BHX9cYtZwd)3Xio8zdHcN8^ zQyV{tV7%fqQ#-?bLw#KL20U&v!I4}K+(-pN1-gFfA<#ic??W{jQrT<7MUIk%whVQIf{qnzyVKb4WjY7rsaWC>4cAv}L(7LLS0=xADr-q!X{Yy3`LDrZM16{q)#fdfh z$OItQOo8kw3yHmNT`jTa$TiZ;XuU2@RWRxO?zl_RtA>q!O5*>tk0n%v^cRXrWMW-* z58MxBoTAcJuB{#G3YRe~&AVzx5wY(NWQuSW4iO(|MoXa=~WOBV_s@C!2CwjpSA?CZ%i5q;eC?L17%Zv98K zQLEb}J4eby&Nilts2D|vHqD;67UkgeMNgtHb*Yk`|eco3WY~_ z?6JtI&6{xlB`j-hzI)}`G$SOaf^2}jJ@j02b~7Uu+I&^ZCx^drWY~oh1UouiM64kyw*}t*m=I^)gVz4FqiWBvQMD-BM+A0~}GnpVxW< zr3XGxL&{*@22FJ^|sM6PO1`GT!v7C6W`z=N2X;ZKjy_`HOEiT_!B@5T0A zt?9J$FgL6JsN&U}^CMT`+8-i@33No|*?k#0&OX)v=b6g~|55dKj<1@uI1946fgCX% zuWj|C4p@)XSZo@Hxv=ZgkeYP)hTvVtd#pHWCm}_F^{wTkEa5Vy{nfio6e$T7wK`x= z*A6YCHR!fyIHm<+hLUQoM#u5ckj4kMb=zCoV_SP}6rcG{FBR(pAOL7eoa|4KLOsb1 z`~Gi2ro1RbJ7{ns%km@;?XWxm^Mql2Hh+!PaL4`ssEU?f_%q9WtoN_zVIm8_4y|(fR}=|lF|||yWOeKWJ=i4e zdQ!!O$enrUdO9UGt zGv?Ip!@w``;|1k?f?txX*jzd$?X~7=3qJ+SO%Wkl6arnyZTHI;oB>G(FT?qzIZ+D? zf9oAno$vNvr)VL=x999Rv16>U=|`!7gV(HYflTV zB2aknPiUjHY>Xm&*6^e)E#Yy$P|llMWmmP*Kq&n(8D|{d9f9oGYV-HHtV;h;mEJdP z=3VFgmQ0yJyXJF&vy)?GdG3R~p3Q~Q;SB~t{DvVl4;T?S`V==a-M)Z0Ogv?GdI!W_ z4@q)FkKQHs6fD@-E$;ocdhU`9=CJlfQ$Nzqt+Z%-Gw^UpC;pAu8{PL+sY@X@@paET zC?zgG=`51`0JTH`vfTESMS%Flw&df21Ms80&88_Il=tDZwv6`*ig_z@`*sODJvkk< zJAKxr>!K-okdT$-K;@=nsq~pcbhcU7DWtV^r&ZPFNSp0rP8T`w$Hp7_qb2X3Du1Wp zzAKz_9xAd9HM@Ruh$EnQtLBHK1F@2i8M?L5_oLUBr1J9(A}}tRVjZ?lchxr{_}lqe zDT0uN?!-Kf|KHt9*;y;M?5GONM8nRU*O=g3yOlaq<9nG46>1Ys14&-{k&RElyQx%$ z<^{rq`>E!C!G|lCIczQ7%IMI!Y0d46K%Q5muz~E1igT4<-#OXWAC|#+JS6|M1-+Vj z3WML0R0YCDB@A491W*aP>*C_29wylO;0g)na8r1{U*fkvO;9ftN0Rp8PyS@jle%hI zT8ba!TzJ>ePE%Z-BEAi*K^{JIvn^)6q{bU0wd+=hY1rN~vyaQ>N9c&ISPZMseF;vP zLFan`HicD=O?+-2JX1?yisGtJc~#_BW+{lmm)#l}pCc6>cP3rXR2)=#Hnb%n0(VSu ztWWK?5zCk9*wNLSZc0)M3p+?Dd%N9J4E&a)fX;$0xa=l#@0Nn|Pei(>FH>)o!}{x# z7s;Na=}Nn>-guIB=_scT#g#~dO-`uAVGzC%x**I%q#a%3V{K-+7fj>PSk`sh1!E6J zr;Yp@t@ydd3N*=1SxD|~%PxUsOE@L=4_@RR>lF;tk_PJsbk z=)@&YtbDFA#;>u)rYuto3uPvok>V>+jWS+B8m*6~ww@1kKL5(PSqfja5w~&nwK8#^ z53Y=~C@b0jSE*S;`GqcP`GR_q2Sb%+?M2|_J@9^;TK>owkBeT`8eQX3ol)YVB>8Dj z@}}Jh=8(y@Z=Lp9FLSnPid2@08YOSLB(-E)sSN>b2k`0My|2r7C>@Z}Upw$DWkCYm z52+^aRarc^S|j`G{wLmMs-}j*D`|2HOFB1sQ;^)9X#BRu3<6{#V;01Q&mFA zHCoKR4fyq}?U5nfL9h4U2UX+m1I1_4n>O`SGjuIgk?tsq;gt!k<3Q-7AT0BO6(l5Pv_IP*VZz0 zvpLl|Op?^SieBZAlqqL(jzb&&RuOZjP}E?hv`T3GsRHNrDVZ^rmu-o?Q0p98Ndv@J z7YiJ3;umwxbM4hW!Nc(5bISr)=F2(R(#P&fawQA~Rj{mdCJ-g{zEpV}c3+k+b=98(DC&O-83pK9W-`SCR{AAaT|M&4fU4>uf zfw{#G%6ZA(iz8A4Arp@FNnK221K|W!kzied6Y?!6*od;8g%uXL$?!`)*R@|Mskq-| z`2#I6a^wBg4^&)guPKDczrk)Aff3zeARl- z_9?%3M8@Zvdfh|zi7r~ajW2bfUsLCMk^%%$Sb5|Ep5D_J8~+LN8fB2nm+~wFzuW}| z?LhJ}Lh3wnS^pu~f!TnTm-_P{&GJQiJxG^t8I?PF~~^CAdC0VUm|5d)`5 zEw7S>uoBUgY_$EK2mKL=_#s=Z1dSgt>zvMta)nmhogSUbuk7OuZdaFBg(AWRq4S{0H|Lnb^@>0DzYHs`QIsg9W_{(TAvy6Oc zO-MLHdw<(rNJBI2pWSQ2;6&$p?YL%D_&WL8JK@$J1zOt{hdfXR6^h9sP&en4fhIpM zF`5{x)90udgaEh15wo~TUsAY~?YC4dg(vjmw zuQDj1WY*Wbe=}MUrkva_*K)hLmTI`csL3t!<5si73!9kprHYWMP~pbQSyW;>1TWu1 z{MDu+jd#_XgF5K0CBB(eeFSS82xF9^mx1!D1m28SQ3SUvXT3$lNjDW-+giQ#4}gWx z*WQ=yfbXu_6aw8hddfsP@&)}fs92GB}cM#Q~a}zsD`Jt{DE7OQAh+3~a%DfLP z&oB&MBl&&@ao;pI*%@QL_HTDQSuZ<+DRl(HEDgLTpslTKu&5n71 z?|)PewB9z>x9KTq8_+H&Av^$E$)lF8gge2z4avt|rP6J((FCvaS1p8B%z=Q2Y_pW& zKkQt&Q8_Y)-h!!13#Ikm)M?63QZ+kfC4vXXRmQsCP+bpbLbsuz)H}NLd~O2ii1ar| zr4_ii$j>`o#Ed!U_~V-NJB-;|WQX$F8f;>ip}~o{6?_j(_yEYjY9I`6vRW|9a({4K zK}ajM{;&Ok{L}vMt*;t0%wCU*p&S;kQBRfd$>T)Kjf%hdV*8c3vfa?R^3F{=(gps7 zCcRAGYf=k-YfxvBI^p*ctV=s7_#uWsqs*jo|CY>^i>@8!CySuD_k5xQcOQ!fy%r_c zmus?8QJW5S7blkSr-B6i5ASPtc(=-KQWnfC*rP@{GXlRb zXA)Dj3Ki@|&eWRFzz5};8c(Etmh&eXD%gJfXxdc(2KuhkvH!VyxCw*m;L}9pf}}%g z7I*!TMjZ74d`p@{x8pvK!tqJ$SNp1*n1MD!@7(2*%70!Dx~upk6fw^SM-%)vf&znx zd9<^DChPOGFvp9E;$1nGK$ohLtZXzcV>%*l3DA3$00dR2gUYq}n9_WkW9@h?$FF~L zs|Qkgk}7J-S-*Wp$jjhbY2b#J_q^6PXW|0u^`*S#B{`xZrh{TH;g0R>sVA6|nf7|Q znc7K;7z4Bg3~JfhHK`)JZvUZ+I}CHD`A7JGKQ{|uH4;jupkh95&oS-S(M7>&J*J2& zY&haDWSs*peWmDG6**}v?3_mmODjx_R9ru82$pqG9O4ld#U-qZeF#7WnxA^E=ijgl zwjhZRg4Xg{pu`IntadnD?2*r@8*#M8YH>+6`?=tCvqPiCjQ^-~n*M#3D+wr7{#gg1 z86wMY=mFQnd79{Y5AVawdpgCMJ6a8BVtsK^D{71+k3(Q*(qW&f9KN#@l4&-LnZZdc zQ!uUS$9u)5;TDZcXYb(K)4k7S_mHArzHPF{y{>#+25U++-UQh_l>-SfR;Mi0)ArIE zg&QT-C{(!bubc+euxQo~01kVHPbfqT%#Cz{$O*v6Bj3<~c{lI=R4jY&13PshM~A8g zT*KP8BXt|sAap3YfI9Gd*d}?Gtd187FC=T><@<5`L!WmpMW)-;N;<1BV9LV1Xu~Ky zj($T@Qjl0$`K}>o+G@hnu?#17N-HDX!|RavoxE9NtsPckxhF@c{2H(RKCU~3qwP)C zuK;8ls7C!wvDMhy?S%R+3f!9uET<~dKzRBqTiQVEBHD;0&=+2sERhvbmP=<6b!zbY zJ5mG`P&D?mQDP#OhAhnf0 z_pf&eJeX&q_w`}IBb-MV=>py1eAb?u3IX3ww$qGv6 z#*Fl;40GQt>dWq|uaS+o-RgJwYtmP2_&!e~YGjcqg>X+T%AU5?#~K>l$w#rlw*#uJ z>~`e>Q`ogWR`mpCZVjH%wB0K0NJZT)Xlm@3uDkoWV>W#|g>T>~{3N&WQig2XML11r z7xebWP|nGA+49|*ve6Jo;joMnWO<-7v2=FxJr0HQwIak<2 zxkd|<;yvqNk`$lVeVH1^?!d0-1F~`*%1W=wDko9Ra7LUA#Do~`WX@X&6&2a-Ab#lj zmg#}t>laX^ipBdh2if0i%g_S@S*Ez=i43<6UEVt7PR8?w4EXa2UHyUPbxLu(``{+O zYK#S)n2pq2l9?d}+u(d+m}m(~TydoeseGIydA2aI$1RXt8l7f9R5F zsxBLFz1`w58J&BqG^_zW>>x<08EY8O7!rC7ZW9{`4hmy*b0Jpyf&94O78X_{?LKzDPp4_C+5}a(#fnv{W%r{i zPSo;NIo;g$4+X;#aFTYvfBufK0WxUphy$f~a-C`aLSRd}?4JpYp=>}Vmn4IRP6<-@ za|B5jfA1Q8wf9V`jyVTQOV@v?j8D)l*`XlijvpQDOSH@SVwH7#sw`@K+cZ-TrXVhq zkD9iTjoLE+${WqGzC9B<{f78lt1r0%|2TyoA&S4)I8D#%`R=}QSFrWc0yUvh%_f&+ z(eR8!TTcC|wbjM&=Q<}8CCrqNhpa*=v`*JJ;e|drItt<{7|?y?6!&y*Pc9C!4g001 z!kn3=!Ks8+RoK|? zn^!{470Hgj(HLYnT#aK_>Q5doUb|nc83$-Lz|f$-ZsF&;%7C61xhmPwQNGrC5HL z3250eW!GWi=DMcKXiw-BlDs!QXb5$*H`hux5b{7TNI$=C<;j$aHn!$k-;ceNozuf| zQsByT4>HiN?Fl^lsnYDp`J*jlO6{9=Rza1$9{WZ1lc*X<9mq=$%f(JreSH-D^U%S| zLzX&qMb^djoYCA+^U=-G%(v-B>n=Qn{$Y(lms3jGQ5jQ*J)zu4ypJj?1xf`~tuGIu&CPy*ao@{^ zDau8&N?EKKnjhrpP_$4|tmj|0qKT~#FUx=(3W6<8_Nrhe?u)E|IfYcoSRUE}%EC|R z1Cka%58Cy^*E&&UdFM1`0_44sTB;-baSPEB@pFoA!W{>dyVmpxSA@F|HNl79@!I75^LfCW#|6FakNcIR;r*{E-gw0<-ls(@?S_!Ze(5Fh#G*7H{n zKyexGmki9d2QTT0%u=E$dQ*_rz8TOu&GXB)PV^`Z1Ihv`QCjPzp0!S106EVB*hXWenIxOI7R_ z&z3JUWh=l2Oil9KTJ?UJ2d!VKxR&NThm~0mLWe^c@TCO=)m?)aRfZ`QZ%n)a4H)c? z0eBo?$F~hr@h{$FCq;X^r|NQ7eTR3?rlaIF;%1U1Z2QqOsZ;1h<(lcEpsLtnTxims zc{9b}w#wYe>2}FPIP6OEDiDYEvxOxJa>F{O2jnPYliCz(+YS!Qx! zqR475MOPB#oZ{SKVdO_|b4uo2ZODfvUp~F|emLyYse2LYb~2aRwfR(~jqIbG4(16v zDE$sfBFnpEH}FP5wL7yJi#b59$n*Qa`!KADXnc@;lRb!;Qo2#h3dOAnwsni^P@yMV zIM5@iaz6pdg7t(?S3m1Z-P%@}=b7cl2`iY^0COw!o{!L$Fs@W|1pwa7ArIX5qrqiU-F z^@t#YhpsHIgba0`H9%A`>&iUgq2eSkK4W^Ii$$_svK(EOK-TmwcI#yOL71s2hc)&; z`n+Nv)+>oUl-6z!yZ0xL6PfdJrurQeAf%n7L;ot^-_35NvE*>FIc9=>FO+9W#kv@6 z-{a|BwD&&CCuiuBn^TbO%d7GRcizh7aaJz%wLaZpONm~{yFa7KL$bzh59G6hR%zch zO;b44$#!`CtWr|m?t}&YUP*w5FF1^itTqQY{_{O`NWUYJ7jGSnW6IqUysyz{TF zPw6zAvZ-f!=)FDZimzI1tIU0{66M|`Yx{z+f{DHyv92wdufV!Ba4{MVOFqO*L1N9p z3SXwKCx`YObRcCqE^Jd4-n7bO&1T$Z&f>M*}*@0umN8s~M{rrhrz$gj-5b~zh zW5v>otWCU19fcQFmVCU*JW<}OiPkOL4RqfjEKYFZaq$)z^SCg`=M14`^e|XTg1#ti zG^_-0$=6fi&OA3Hj5()r{QQTmOxYJ372|lEh=QHuU{%3VQ|1SoZl3@ z>@o!s_AyLI`4-9hX`E4ESm42vZ{gprdZ}-Let!c@*m)TxJRE+cZOZs1WCI?;(Q!tU z&h-VDD%c)im(y`odNs;oswnpW)6Cv1-_@wjC-~%;cHc4*)Re#zL#7M#Sk;`V<7=J@ z=D(Tfl)B8K5oJ-6_y4$!cO?Q3tIdayN zeioQ#SqyU>V84ryBUY5Ig66Ky{oY#87Cb3Qtu}hxnteRslExG;1q3Sxl}Gk~%ycIO z44#ty;qR^Xv_hi$q0zlZ=a|7d0W+(}nx)deCV2Un2&1AuXJb>v_MnDY0ZR{=r>}b+ zyo%hHxZV7Tqlz+@gGk9MXm_579}`psM$v&z)>eR-ETdpw6OMui7!ttb=5 zN_vZLZkjs|sBf`%91@WCliXYeoq8%6fPAla3^dj2-8B||z6c(hBEnHG&w?p}J;Ks^ zcyDwhZ~h-;y(#{}VEFBm%fPU_u8D6S(vO9QD79HCSa|*&9B|`c-HrrWzqLIROpCQ- z3f_VI9`tD9dedKZm;Ey8ulDtQ_xhUUHf#6ynvC+H8az*@W&fuV-)HN@ds7ndI@I}^ zAUwTodBuu!Fk#V%NKzCGIFu0sj=i0dO=1eL&E6fn&nyr?r(x`A&p%&6W+$ar2Xm2X zDC;?*!o^eELvIpX;<2m|?_MvdI|MNz=OkW79r<$bG%eUrsk=!y8II`iUOR7_YM=QL z>j5Z4IJ$y6=cH;groLWdH5#N(?|{+e})TVn6D+9n#gWed@kWz2^97CA|5Bsl%1+q12O8>uY&G zy>0}4GEi+f{(i5hkJ8ERDamg3@U6@n|7d9u>4HVA?A|VZWcV*@k>icfJ^dFqZ}E^Q zOMgD42P+XI6X8&jd&rBPReb!;iYlpy5Z~Q!ONLgukBG{MU8{@Z7R9fABIsNKve=sDGh(Q_QeoKfHDrJ6i=zE*JOXm{k-ww+Q6~~ z(R&oc+n*vW^sM`hTCBaI=&rw*>u2=|**~qTQDp5xSWds{jd}6n>2^y@jZ2VP!#}-i zi6;7Adl$4{V^4}`?niERVD@r9!vIFl+$Uq75slIxevFi`&E)5B zcs5B+crD;=nQY9#ibvNLxup#>8HgNx<71r@BPk*kCo;Mqk&H)1T0)Q5QZ#L6ZFbKz zl)7InFK(d?pFiGqFcqiRHNNZe*f&aby}B&mp|Y%Mk{;z2 z1A~o>w%xiXFJmC0;e0)@>_>Cz?tEY7U&`~}WNy}AtuvCD68S(B^R)%GDOk+= z(EI%}Bepv^K);(=Zh>F?vJ)V%%6A8hRx1xa*^90i7mugJ^3e&liTgh($dFAhe4=9| z|0d89hweM8_-l9(buO3b5W7J0NJN5FM0D@GKl6>H)<4G2Bl;c5J^5@a51X@B&^ig2 zBH;NaB2)I%AEfW(#rBsyW(KP!B4I=H zM+OTOo$5?NEk{zg*Km@`Lh*~#;`w5df)#1HcV3mXO@xV<$x!nZiSQO2NbHMsY|8}e zsElzRzks$pd?CP|J?sl}fTOJtVYDjRm8beW(4N{DpS# zMOsI!b6u|iJ1NK{-fVJvQlbp$Wj5lHQkW}f`7jTKRdh%aSxXmytw@W%8C;Tb(9wqr z%-CRyiB@|ERKOCW(=cIWxI}zOCA~5gwUJ z`%0MjF3PR!BQ^2V&SV(=Dv+y}Bs>uPD*YNM%rc;~KF&8KbW|Zs=wKkd-+Rc@Rl?zV zhH@mD`YVNJXh{%`R?!nCn#Xf!cZ6F;3^-zSjG1$V6Sp2)+XYalk5iHvReaX{yoiW} zN(aE`tWa|sp>?;DYX%2s8ne5?U4;4f;r!aLzbllJfifV|q1n}xw|!OBYMm4Ar*6$B zI#Y;q$qrJ@#8LDC$>(;7!agg*rC`!j?|N@1AA(SuES@?j!53|j@ksE4%?t7Hb7@QZ zm_m&>BJ13Wy$I@&qys(Mc1s>)WqK_P^#Uv3+fnp_oh9}i^w0+vrq@m|ExD{$(k`>6 zlytYoz6z^W%{QmU$s$*Or3HA2?=Z$i_sC4J+35ICB+E2|`VeI!Wd6%p!Q9Y^Fb-RI z#}D1olu2G@^CxXRoRjp9;;AT^t$p_#--ekaofjc?Tk;S6N_ScRh;jcN+)H)mL|QF#Cq$o#8Ll>V-}sQ>_D z6$*j3$m@#rGpU2vMTdY|=QKtUQ`dJ6>Pt6tc(3YoD+u%6brcr9$>#JI*s{^m+1wse zpI&=)G2k7VI5+@cfBIcP5I_2%P5H(x;IbD)fi?&d@m% zwC?go?Oq_%hYVnlqKBEL6j~3=o3A~G!>4LtwQ05jl*wmXXA~H6Z)}YtcvNvK>S$$6 zKBEU{y`*7t_oIOGq-#R#)dt|*ZmOaACu)f@e=!%P?;!BxM)ay%c7(&bXY(~a=RuaZ z1E*9UcnmE0r>^9I)gz>Pk7SvfIxK%s#l+h`;Au25y|t&@5-pVD%g--xB%7loApe~} zY<>pz7c?af)gKxD=5Owbb7LPG3}wjvi#*W=!8i4$m{DVswg-`|pGha6(e#?RcHJ4X4ifWDXxSdYqIUXwyr>&z~x%L!q9xPvG-+5s%{I z)w@1CF)SHfZokPQA+>*`S30WanTc8$GuXOD#}7oamLD>E_ok%;pT)0fz8(S3jp(Ny zRd|1_YjlofJP>bIR}Qo9my@nXns9qBLMP2agS!M32*`-rz|3UC@O@`a6guVORA2EAT1l81F zhKjHG{jH296KicPtXC>5M`~oZBb{0V6p)?Ip)$H(q%{K&x0W3(m+!UFi`&oJag;bMs!^g*qkRm_mt^Z^0E#I2{-^Xth6zP&K zky2nF-5@CCL`rgWk2yMqq;z)(2#Bdmm1BW4wZ62cDbO{OA-zFNwEF4cj9zI}imOsmGFsj*bm z)`;+(--zu0<;cBai^}u=j@PCku)^_AwDB}oo|bqD#K!85IcT+K8QPKtR8(CsZ+$QJ zMk>W>9;E*R<8WR#fTycdyxJ&Crq$*gvV1IL2@t?ykOh%qSt!DOL8a1-s+sAw&`KJs z=lz#=jX|4vtJ=rHfl`NFay|bJp-!Gv9)2uv>1OBBKg~&7dJ*c`Nioyf0;K~U1M3Y- zVL@(vM)+|!v$xG%3|Q@WHUF|bsYO9Iij@bvaj@7BshOibN0D;N#;S(f^&K-Q^Xa4P znoi}7XxA(MLT#AAfGEy&b4^ux4H?WdzYf*tz1kX*w%lD5`VKha6(F?&A0^@;5K$zL zzn?yGN84SsrEsVAXVLCth^XmHW$}MXApAlgXGeIyTM^z(4`JbqKI%}?M_<1wIHIsp zZ)u^6Pn0@n1Pl)4@OVA@RLj8aRtb{Ll@g`4bF}!g7eW9-LhW8fnv?KrX36?Vk4J3R z%j%+t9{{br!rWgt59yhWcta)Uq0Q;j1>YJo&@JfNQm-$C9V$cO!&_glgAOS}j%0n2 zGTTyG>@VJVQsFKtCYIbqd+(VXDnBD;eMt~{@5!IJKk2!RAIjAHMMH~T%3LOS6p&1j zQUJ7(D49IrQh%r{4K7CntnH09Tm9KbK2 z<`u}`{I@uxTPopI3K7i{tkzjgO6pWKIxvVgO=njTr6lk%~F;^gYRpx z4)cvXuDE>!QThEet)Dk8Q-WaVc2RFFAOMfsPL>4yJ50#c`81JrJ_o(|6% zX`nr}u{)weL&9UoyS<3bQ!`LxQ_HpqF;~5=ZZx7Nh z;{Nq?J;z%HZbN#yB&LtxoR49SciQsCGe^Iwn@+!1Pq#I%<|6lyCT>Et-vAF*zhEq0 z9+Uq({Hgl^q4n?o%neI#&Tm^^?e>&ww8i#RF;oc zn?DRO57kg{Px!3{6x^uVwHCP41DTl&J>ns~O`bLUmc~ym_0!IGXm0dN)4N}#U$_K+ zbE4%^SX7HVZXkFn(hDWIkiHDOEsN8w!m_kHO6fWOjUc;!Ew3o@l}L;m0MA4t`TQe^ z4!kKCB*)k89y|v2hi%6o-{OVNZ9zR*&OexlG)H6U(_Xf#KC}1HP$#gDQgav<9DFEd zA{*>+HO_gv+;+rGDB;$in z)Q~hKF;PT5xplM59a|Qg-2+dh^2NpaT@XXIANAX^d(|jJZofZeQE5=A@Y|--%PylF zKm^u>Snm^HMn{0|k(&~a(Pu4zh1dMn+vJJ;b%aDt-OcB_#s@nzdjNr&63<5fazD{G zcy6U<=RlNvOYXA1Ofj$JIAHLn7f*t1L>e!_opH!|6=!4QpHSrw6i%D6t?i2nl33J% z64#8+gv;65*u(C!VF0|%`)a6UG5g-xsf}=W{Sk}5h=MY0*Fa!$I7qGShaLGZ&`$y* z*_Jrnn@`tE0NY1L?S^TBeo0JrFCE1E^(yb$mDfwH2s#w6Uk6V$qIZ9gbm*)D-YBz} zO5!@g zln(mxCZ4lf*d*!5nYckoZ^I|FgbGmOS^OsB&o)LIN?IPG+}fQOB>d~hZRMT0+mduK z?|K+cqYhDZ6hG)RB-DYB_94Q9tW(XV>7RQ~zh5bcuDE#B3_Pbm=LASb9@wJgjYJ=& z!mpzc(e`@cs&!6s8qBZLrkU|OJm<4o>w-NK4tO{YVXQsPYg-GDVcJH5YjrD$O=sz= z(NC$(vI1)_CX$9Aob*;5p0Qs~x!_$n3Vpn^$o?m7NaFypQ6nGyiirlbDPc;(;;DC9 z${&%zj5o?97W^)$ZEbFD1BVg-{vT$Ghbf0OBcOdRCbk1_SI2y*;@Rw z8~P%?Sg@ZLseV~9XNa|1?j|06Z=SOT>i zW%_{BM|A#o-ZKW43{SIh{-{ks+0|FC_>445PRwMf+X8C|hp=ILb$!aOun!mejIYkS z>ee|Qt-)UP=gxoX2=A?{gjn*mW42TFjIZpFdn`vKY|fHgq)(FqtZa+Y{g7lQ^&OHah1jN zF&?m#e1O2(UF2bj3h3~|8vUDYwD}tnpN&pHL-07ff>N(*Zblv`*s+`Ymf=Tjl}`(q zDMC?BEGb=!M~P-DpW6Z>9Ve+nhKqmKHiO6)@N4R9Yzs3X*M9~r)2yW>nYZ7_7CJ9TFnpz7kt#O)_}ttA*yop>wKv64buA(9u3Y*Kw_W1 z$ZniFOs#PPtObe>TstOIkayeST!-Ic(3Q}OWICAeW!=G9KG+`9RT|_~1`fBt(=s)p z%b<3^=|9nim{Tiu*7sreDokGm*?Fe8WdE$6F!wTae!m@8u_c05*%$P@p0EOyElloB zE+8f^Wo)0TR&5)UWeQZiz68MqmXCes`Oin(hp)ADBpN|rP)gdkNPbgDa-G=aCWtjS+Ju;gD>Me5KDf^Q4iZ>#0JUSDh? zAGxMI{!}InaASLT$PM&s5s~;Y@VCs?-g<9v@n{O=w&5+uA1A??P{3XY#46>~{QR|d zr9FXJ5b|E5Q6oQH1)qYk6JkrtxZDAq3Q(Ruz|q2zLKSAgYf@s4=Z{c!JqiTkOg-w* z`;9n=(Ju*zw%+3<93e_ zv9t@D3w^8iI}f%|iv1ISHmwVW&Z0o^1s^wZa2Nlg1d57$?;|y~pq;V5kW#pP^O$Py zYOut&>nTQpMRF?c=tJ$Hcc<_2lO|BW@;H(UK34>1$tqS#2!`Zs949IOD?qUFJ_aT3 z8MhA6+p3UqShDf#v4kw5n*t`T+Z_HHYp-wd!(mZk&h6mV5bBuq`fey}HyZV@R3J3S z6oYo&!R)J*gTTJcEEXrIH59Vnfz(_k)^}CN=nA5aU5n{WkkW0vj+eL?X?z=VM= ztQyqU{9@uKjJU7xL5EE-esV*t?rsK6hy!gYe$Zfy31qH-84q542|+PjOlyaHcfKl% zg@lv~C)T_vbdJ_Z)5NFWcWdQ6@#Uzc5haEiPbE%B0qJ}P*JeDs+xuzsF=*z-xD+`> zS@`kS-szx=HGEEwH&4@pegN>`wnHSyPU43plE*-sKY7bEQJ@ya%b>VOBV@@ zmq-QywiLzRCcpU~p@xh52WBm#ZBJ|0&11U8(_vyeoh5! zRH0}918tp2)lW}FXiacV<`~Dq%@_saN+8~-AA6+8XvmmsaBCm7ltn%Kgih*d1F%pp#b@r_sn8gyI{T za-;AF+s;n;xi=SvTnw#6ZH-OeC!~L2LCTbSA1AgzV3y#b%9Rqj)iJknvlXiqHLw$T zwsf|>iW}QZO-+7>{)&Z8<7iWuEMHEV1i-?maw7=XpMdo3EZ_U6eRfZ2)~0fKb$8K& zAeM zA^j6#zo*`c!u0zV--ep2lrQ(%E;{EL7UY3{YtVPLX;C-wa+#D~zb$Fww|Mi$dl8_X zyW;{fuVmRLFxsTD@5(u$TN)f-pFURZk&-P_J^m^b}o@E1*n^;rw|nXP_R zjPtX!uV0Y(H?3e*Y=VpLUsOgSm5RRQxSc8LljV`M*8qGN=isrmU(>I}O&aUx4x3g` zuSXWGa3Fn5X;l1XqSBN(9Mc>#c!MUzD((ph)&Mug@&edBE#$87txn!)M2=c@OB8yO z*rdNWf98vgQ4Ps=A1eu~e!iK`!*AVxO7Hg149njjB;~Z!`kucpUln{dm3!S;^B)0G zQ4!1t-x9d;{Z#8L)wZ)@1lR;}K9+D71dGDgFiF}Q-Osl&|A@axwxY|9rCrVt65d!g zuD7C?cntjgdatMUM8%?<$U5{P$$?Z7<;&Xm4dOwyUu%Q0K@}jB3&-<0o|e|;03Zme z6wsc-*OChZ9CF8v&ReR>zjhz4|NOmz0)CY_pOJ2=U9%b=U?Jj^?-3-=i^vfCXSm&x zQSz*PiSM5^iM0$5=~+fbTH12yZQwtJpe{sg-)MGbnw zB~AA?NdznMPKv{I!Eyz+`%A(SQbzZ-1XB}xFe+!m=1HFNTjgf`Ewc>bPg6?Bw~A!` zF#T2jS;jpv`2|S(%DwxEWwb1l()ypLC3j>)*XZ*AEbg0#@ zU)yrKzFB?pC_N@alP68+qR#$P3Z)ENg7v*GXS?b9bIC3U-pt%#D33m||5JbmXV!bv z*q=#yTGfHKX=*!fObto3cv8i}8FmR2!Y`t5YFj!;!hX5(p=%H)&Hfs#f*{nU6nAaX z60JA4j$KGE*`+qyHP*oQgQT&A(H)9TLFT~RfITez+Y|d zg#&(k#i}tW8!Omn);*9MdB=$tTRg?N$RD~5qaRGuzH-{EVbLn>=)@{V!j~2qt_m_7 zvYizQ?@O+^a1*JsAb{KG+TtbWXK(!<0`4kAAJ6|IP|oLl@PEZP z%}`x?BqV%TSHl+oX_t=?u5o?J>(JcVoWmCdTk>pp{g<6e#(~ZFAJsKypeH9>y2mOi zgr27(?bvkTM$ye-^U~q-i1ZDg++*%HSdrYFlAuMF{OZN&lF6Cd+>_-}Co!Sq8)@U? zwHq3$tbJ+L^Maci<6$+dZ+odJ3 zS7F#9a{haA|5!L*C^%3p?XP)}MWm}LlF3p+ta`z5wZQadX?nfo+WtOM;9m~3moFc@ zb$yL-_YM$l(ETHgiy;}mvOL5YKG-+$xyb{_gww~ZO(lfO&RrDLNk+DguLrO+7sp85=ln&oc@m+th-}5IeZk;6l z;m>Cs&+jlqY2r7sy;G_$)Lhk`%Mzt9smJNOO`` zR{^!f<(iM;PPOK3JL+4SGFN$G`YS-ZhZ&zuP3`CXOeE zD0r$=_OfYxsMqmk_C^)+1ly;BA%EoLi{;H+7?s|7Ne&1SY6*Z;?QZa+I$=>b@y+8y zkIlOH!h6*Uj6p$oJVZz%ReuOO-oxc{!y*=99f%LW5o<%KRvH^y6a%?63z?gki;oPA zezd$7*#VAW_VC$rrtL1WaCrJV{reN-T=XN=RV zeb75M67t-FLpp(OCvl_UMQ{@S&GDU6qxmE433#PfWrnV+ZMrAwHN82fct$_bYKkVC z^vBEZi|rQ7D|Q2B$SUz`;dG%szJ}l zY%~OD@pfHOiupVv-FHf!8)ZxUXF+Di2BJX%!0&7C`W7;mOo6Mq$c%$+> zYOSYuDKx+Kdycf7oJuIQ@4%LCIbJSXu{8X($^ zbZP_N|AL=7z1B#xm4fXM7$iL!^9m26)R18p6JP8E68X6?X$ePE4(Y~Zw(?llx30*t z23wP+{#^g{@Kg8PMcLBt?1Ygt3DqhrLDiEH zfWejj2%eF@!;Q+z3uNxI91-k1lHty624>CqP_XQLem5#C#BcO$7C*t-ZuqHJ^bNTi zqG8#An1`jtIk&T*`2L3>ik3K)lT&h6k$Q!#6a5UHKOEn{< zqiQ4qbf{~K9)ziF{zX{PgRinOtM)5>6pTI&B|`wE@nGK(8#$D@nblOVwS5-sXqrC& zCJd&9vE%v9BSkciJ7msf_y(YkXL$V zQx9<|1tS;o_$tMAz0AkT`<^BOnYK-jq&VSPZag09mZ))|RN8wfg20pFN<8va6wvYF z+bf0zQx5%$AtSQ1eQwt9FcsFMnf@f%L8{kl)?DfRa)Uo(9cRy;56})P@aZtY47|P6 z?!Lbss@^bF3Z!51RAG8)>w2!Bg4Je0zF#=}CZ*?cad92!wc#AW(001kg>YaK-4kS* zJvWt9R67=W@nHwDzmP>@cC_9t?@IgHlqRDpbkwkyNx@y^Pl0>_hN?9DzF3NC10IV+ zoTmAcUl!TFN5UTl5%voatONNFiiuF+lIR{UVE?|*qGRT22_L$8Y%nbSnLXe2>D=qR z(gw8I1l(3~%-?zJ@btz7H*iObO^hnqk~u`&>W^$ho9ocwKK`~ac-!8tC8^D66d)aE z@+}scA*qHLKjTQAq`AgT)zkXPadDU>|q@pTT@5;)+i50tJlT>_2bQQ9aIK(SGj zEHBsg+RZ-^r`Nm1|t*)dHX)4aIi>0cgn1Xiyl-msL<;yd=Sr9!3WL4=-_*^$HF1_ z{>o$6)+S-tKZb6S8?KESjW=SSxhFbUERJ%BntVr|IRaN-4H`}PR@VYHCui{fl>L1{Tpg;cYZeP0%b#G9<~QmQ<+G7B`SmnX(t!QK3=RFwXp3V$TSSi;2< zKbd}-J#{ki`!5NFmX*b6ZPM0~S6gF~)N$Ja>uj|2u|J8~35G47pA0ux;iJ09R6FD3 z-G^UpOtA2P&be$so`@_P8)#a8+F%a1EoqdkX)>wD7Xv5kaJ+*iU~_>d?Ger06!=BL zBci0)H$SQzC8)R+YAe1s$O`v+@Zj6=|FW9Rf)00|`#nl9hqoV9@8|}$ZgNpe4cYuw zC!tmQExf)XIwG7C5b|Ima;4sMFE_9Ts?cR)BJz|gMF%RXPOBj#%#*mq8&M%`9Yrrp z1GNK>;qyS>y9aA?ytODkd{iRwsMZ_?_BW+tT{8v!+xLD#|2c~C@s2GA4wzz^dk#C ziOxw(=}qD`8TZflB8 zHlpU>VVA`oq#A4~BXsHuM|-Y3yKHK)hVo`j@Kx(xj%|}IyLJ)3ZpfFN2t5VQw|mlo zGe2qGt$tPVn@1w?{UDz12lbRB?fZ4*mz=gh>{fratQI03#;n3>gv1C~r%-cp3p@@i z((EB53?(STpMf1ryU`09yeVwASvT89Y}=qeGJicL0{uQ#`U$T1|6sQ7bx4e>BTLRC z_t<(Ak(v-#y?i~d3#ufP*CFu4+fVA)F4+GvK0W%6fa*kPSov`vJ2y1il=cgG%m>6n z#YuO7y3DZb4W*h29*#*oTyMXin&NwS&3n}?D}!9v&l-~X)9q!&DOtq(<}x-ZikXx{ ze8f8Wq;lxkF2T$2oE=aHjeq6AW1OPdXf}@a;P4K4LDjZOrol%#a{7gD^K0h)&}}p1 zzugd=-4T>?2x-*fbT`@stp8v~N8K=_J6ck7-#(^+_7S?(JWM?0<&i`o?H-zT@xUjM z??OJv;GLY$0i#rU$+Q^S)Xb$`)v-DCo7eWL8pR65ldQ>eVJI(&2Ux6p01q{?Qi=o% z>`8g4vG)&Ow50Ka+Wm`G(iB^`xLcduayE73cs<`&|0I0=PmqcZd=&~g6!t%oSl-Em z=AMW`ycaedO(^ES=K3_}e0vY(qMJ-)T9xj$V1K}DvRmBZohxtJe*_ktL9g_@ zxD3%8zl{W#-_4;oOrA+zTK%t-gFA*`4KAE#0!6=T{f70gA8z5u^a_A^IZmCJ5rYbt z-0rumBM_{(arRI9v@Ik6V#)?qtJNOj^1d~yZ#URc*EBSC)E`dMTrt%luwx_qvZaUv zBkCvT;ZDV7D*|sO{;3FTd4|u`Y7mx5OHtl?@=RGuLk;T5AtmG+>EPaTyQngX4e$3Q z4j-2MLv0?*&v0ZlmLet^XW;r-$u_<#)`wDa?M}yIWy%ApS3QP48nB z_Om-a^MF~(ks>@v&xK2$l?R@vv5qgN78}{#C!7J8@#JiiG5JTRtzQ<_~ zUc{n?yGgo8ZjDNCqT zoRxk{(+@0vgbz%zYTpC_0s@;qc>~n7DFx%%U%jpW(LA@^0_1F{I~l9R4`m2+yOAvm z>%=ICu2a$P>v-Dd4K(9v;CyLU>T&FgjA<@e`YzIoq-c9j3(FU+8P+Kl9iwDX$Ft`e zzkla#iG<~LWp66_Y2f%PG|IsLBLTem?WNA>9<`|3}uqnL&1)K(n>pe(g~b#>V38+-=Y3m9qZ zHaWs;d>5YD%x=%@#7ua32zlHknuPT7;QL2xM{^zW800b`*9)~RfJ_u2Awj?OO{6YK zU&{S;>U-=;Mfg8Y_HCJuV`3I(6bo~cZutaOdp6dWq8@9rfhxZKO2U=tcVD&1|Le?t z8y;F+`}b10pM&wXQ1VtwSh#~`!-BQX?8DaE1(9NGc0!=eRx%AP13mHhjsfSet|)NP zd!ylBW~xEA*LED^gXf^Z7-k&W0Go)oXAJmdTZRsPj9~AN{>>n|3m7h5A-pFj4=yW1Dt5fVAo#>tvXeS$lW~teU%i`Zag^b!)~I`{E6)_(e*L zFn$A5sBl{}3~$jegoVDX81JHqp(iE~puVRsPkMj9?XG)c5glx~*H+kjXlbFe)gR0> z_rbQ~6H+|*gyifx<>5ZrwB&di%3QX$sX$z*(6iw=&XCeLWw>`j1;R?Qt*xSrhu^!Itv|%S;g9RyV@~(VL+{r zBrB`wcG!tq{*3`vXXCIuA-lJW2uHu32%5x_f@3!Uo}A2TMa$qrcj!VuUSl=5!Ag6( z5N@%;Tbpj@G77D+GB6q3x@!wm$JdEI-pvF{;_NW5HiGjv*TUN#Mq{G2uoigA>M;{^ z4w3vABE>SXP~8L5TL86k*IP*a{##8q_No>2(5KNUB>QNf8j5%kzM z!ogj7Mhzoy*oNqfCgXl3YTUj$;_w=FbY%Lnpw zWBD3A7rGdK!Rsh-XybY~B2{U%4pzB?g&tJ7?6ac4EU)<{PtNb=E43B73Y|!;?!JN7 zFu>#lDZD(MGp3sy>A&SIK0+M(XStP0cv?7{vN$$a5G3>GfVI#MXY9uU!pn<0sr4;x@)iNxH+Fy@OR^jgjVCGHp+WeX(8=a za5J1+UoaJxDyHN8^w0M7v`bfoAjr4Bsc(z=`va=XA=GV$*{__Jc@!QZuylQG-Kax+ z{v8pm**1Gbt1N~dFU_iP4i)-mBy!=8KjcqCTatBJa;nF9?e6BpD~xaTl`(IoSOxB` zH_IaoHAh69&1OBO9J5|{Bsl22l!Ka$;J0zZ^l(jD3?R3JEB171!rQ_=WJyuJX+4*i zi?ioFLY}mjqbVdmGc&VadIIat%P&`Og8g@d7?JocI!J+F$9xwXeu5U$91nLc7&gO_ z?H1A?CIroz+M4+L?il9vgIM)v&iso_&jTuKpl>Ej6p{dF-ooww2=3#gAce8H3%P)E zra*bS+uAF@&VkYHMZT;gRl~B*p9yCnBw4uEAHJn;FVpKXe$x8}kKq%E-e-Xz38N~` zq1$KXrhB^R%XFNwZmwXA-RAEF=b^5ehL>I>I~o45qg_<(F!77WjhN)n8;$h|4NJ^# zy!QcD*N^X^`QLDnt|37%h$hx2>~9z-e!p@s@u+AMWMSmInMUM)2GmFa3Yd+sGKwLN z?}7`;e_Ang(ZJ{~Tz+-%;$x(8R2LmAZ_rW}XSiX-*WkG4Hbc zE~Ts~f_(gHd*VC;{I=@{;m8ZJqs%Y27!wbKlm{9?YB)i2N5PDN&wg~%+{X7`vEE2y zZvJ(FSIPES@XZeuIv}?=y5G*FX`u;&9LiP-K^3)x%!R~oVssL{hPs|0u#eZ6$`Bw$ zvfeAj&ODg9{O20{G0a_p)1tInIHWnOGXSRj?UnG9fChP5Zu%C0OHY^Z|HHh09xL#x z!Xaj1vC-E?U1V|d_s<1?;NlF0*7H~BZB0@B_*+is>wjhARdgZ6z{0nD~UyIZG!* z9L*dBHZ{LvSsS=$i)wr@*Y$8#2tJgChHa&5B^GNRHfwwg{z~nMJ!kQ|x>(%h+JCgfc_-Jy5`I#brqlMK~ zlfR&l);?KbApc4wI@ZwiXvDFXyk`^tSGayaugFH)93oxUXQl5e?dhbF^ta8=|w4E!vOCHB_lAbBkB9S;R)wrwc-hcWQF1euQM5yH0b0O$?K%#-p zoOKukl=oTW8}NA@{HNPy=Zqcle{1xCI5T$J{|F4NIB;2-$HkwVW@q2cax6PFY1esC zzD}gdt;vcZq~;C}y?>!3ew*xl%#ut2tO-=XmJV1lV#+et50%g|)k;$wsYO=%a!EPl z?h}u(sG!Vq6kHQJE>&`r2_3KFe#VA9z3#5tRTjMS3F+IxMO<8+jg{eoM?)Gx%g(+G zcNCZbY;}Vxu*NK)e45%^#|cbE&wcQVT|+m@s$Rp1D9fi#=G4{W!tZ-p`AzB|bAt6}+VkdrUV(?FD7jRbzSO-NR?xs?MXLOeQ`Vzet#c}&X z|6E|2Jkk9lY&M>c_}JMkMu=)nzeGCf`)1L%@*jk+j{q$Dtmt6yK9pffJ`IQzmFXuH z-+pYL{-s{XByAu?cI(e=h*fmyokZ*H*Fa#^UKR1hLg7X-<+9nMtUcw2IL~Xdxu=bZ z1{1KN8R+3rpi1RP>ng=$X0d7I$pH2h(f$3&8)IzYfTxagE@dRNYH9o+w{$peP1{0i zQ=Ob+2*5I{HtYQ#fnu;lGPqKKT>Q_Z7RA4*8&RwNnA3c(A=-PreEaI;au{ZB@z58d z33f~bqz%e;oRKBMk~%D!1D>q9)7iw#*WH-pD5bAQQ_Lg(-=AQ2Bw7fe!|7~{R&H!) zz=T|M(6}^XV4lMRQ{ZoC8v6Mj%+591WX*-aLm?qzSm1?W0hxLZ?E;lN;*^Cn|P@2l!NK*+~2vlV~te3wLXi$=tc8b^T6x zX>EhPy*|r|MTgH5xW7>UNd#t!joZkTvKTA~fh=4FK+&v>!N z@UR~=XFCr#2Qgcx>vX4R6Xp8lpUAesvP8rTlT_~ua*<@DBRc*gc=czB`9^#`m=4!$ zj0Z-MO~|bcu(;s`v31MHztUO3e2DfAEDr|Ys_C?zcvn?X5Sw+i&f8=$jIUhNQshzDw-O(-Mx;HF_!xTJfwJHLY77v}eS}|7;FLN_+v*T(Sh3Bv~m}6ns zboMcG^*K=!B*N}#J*+^}{#bG6UD_`s0=abtPi3+BjIU6~vdOg=Q=I11t5GNT>mde!~%5TKUdSx8{aw`#ldDtd48U( z>c*CkkW(e+Z()!29$jJ89z1xg_wxVm7iU=^=1pPZA*SjgceaB+1O~W~#s&}HQCGoR z@*GOcaQSW>BCyc`wTrB-B#WoSPuL)|Pn4B87kC(F$2@3JpcTu>iZE7rrh?7A-$uOL zVGFBbrz`JX#xZ68&eAZn{MP-chS7LVg2D>8D3O=lFfRdB9eY1pyTxmd=$bvUZAY{u zggt^eMaeRzj$%~m4s|YkaQ!ye;tk?YO8nqiReP*R6g)ov9TVktGWUS6%<;aEVEh+2jZXdytQ*T%uF78!4)c-5LT-k*`i44 zYHq86@k6!G3I&edEX`z-uK=xHumOLvTZKo$?$E(vy{ ztZ4#@b5T4^p%hQ%A9b=HqZd309E(;Xe?|9$WgJr*S^2juJ7N27C22x;?KA+&z@Qw+}TRm)50S`$llSHbwu~^31v&rS|mf6;C2vZ@f41 zS4{S2&7smk|3zik_k$>Z5;dA|p_Z7DYXq@;p_ir?x7dj$Q=L$Kr!qK|Mf))G$)|dg z3PKpZ4=padlaTHFFH=m0)Yud7x;Ec?aM{fCNd)^!g%?llL)i653*nINA8v_zN6JJu zdQj+=2;KpiiRv$h7+t9s3VF2wI=`qBT)7rOTe*P{2vo%}AabQAKH#rkMow1esou0D zvqN)B#GnMN7J2;$!ctn}WSLTf*Mw93#<#AWK&lpRAH3SqWOz3!J)jiNxO0u)43I6&1=(H+|G?;YeAxfKTancj z)YFv3@7N8Glf4=FS2M8d=U)96?Lgo^f0X*E=Rkvi&7I0RL}aLS7yhkh=Y{x}Pd}Mt zRzpD^++_sY4qev7r53Q@akf;??d|)ESD$(~)|`)Vf9RuclOI&~ITw39Gv&5|0~L{< z1i9Rww)=06G5v~u#$;_>tNE2mx@D$s`w@zmKU8x{wnsqq=v;=4KvptU`14oyQg_$` zd9Pu?zZzes_UCZz19-aoYKO+)6+Zb=kaY`s61B2#+-8}4F_M{4?VG5OHRbWl_a%3@ zSK8?PH23!~7qT%R1ZANFi3k22$B)ml*3*xFXrC_1Fv7Eadj6G?5<<;%L_kg#8jx@?q|D)UwP21Brrb(k zBX3y8ywhoBOYdPzd#LT(EJyhzzOa5-W`s4dnqd7;t=;Zfb$Ia-AMrAjiu=xDQ}ELl za{s`O_T+Y72MIj+W+%|LiGN6OcC^Pxj~2p4ccLmm>lAktf+(Wtw&*d3)A#)?qI$wl zg}AMmB7;i9>iemsKFxnx|5uMf#A&f-G??+!{T^*fLo_Hv(khD%s7e3Smry;<7Whu) z5jWTs`fl)uxz@?hhSgj)KU7GCbjCh(veo(#rMd~-OE&w);bs!aMsn)@XL}DRr6Q;J zTX}r176Fx3d0J<OH}G8D<%- zR|GFwh8PK$STZ}L{`oPK9U))2Dg0or5iZU}JI1LemW6GKF_@Hidy$nHy}md9V~K5N z{KZCS0bA5%{0BhN;O8bSp7lRnTC#fugxKupI{@rXCko%X5k5RWrh7TdJ1Gob<{5(h4=Ws%wV`-CVu}72X{xNWmWHkNn zqkq*7v-WdNqA#mFHo8~3#b|X{bc-wyE-*x z-n$*yKTVoXI9-bToF4XhQVTw?{jO`1r@&r>@eS?ZAQ&cme6Qan7jTjHDEDY4Aiyz& zKS!}S=Cn!XL*LtAoV^4mKThp+)RrTcM_@CigY0hZTdorlvkD zukL7^O0!0te7#R%*h`Q}JGzdfB%e5bL_r9NOg7 z^y0VCUPTYyLszUtV7gr}YPeQUi!!Kh|8U4VIcEAtfPhGLh(3mJ&KV-z%n)NV*+-Ta z0cs^(T|@3>RY9D}E%uDlS1i#)Nsp`>>+{DUzgH~Ya3LDQHjgx+j?Sf51;ej6(If@+=Vr(<~EA=re&!3{Hw0s4cbh5yZD(&w0 zIwe^^O!Km1Vk7LDX63x+@a*rsF>_GW=yJTZO{jF296llv*>4rBY}=LULJVy_=a5oT z{^mL22zR<mC=42NhNc%YNkD_q9Ffk>4 zjkoqrU-6(83&6cxp0xK7nm1!lvPWax2S&J(p0la9T*jzBS+^_j=6nl}lFC(ddeh>5 z59n;5dkXS;aQPM)sE5rTXslx<>*ABx;3(wzU|AR0!L+SoV9G9C+=ehdO0BT3A zqJmIch0E;*nBy>nTWkR_mOXnwIqHg|(Wzw5>@X?2!ug*sRq%<5gXCmkVD;09aYvNi z?8M?lZ7K{~a~|3G<-)I-Q#xA!_@sx2m^GBYR&$A1;DuM13IX=zLZZ?H=cwOMas-DM zjulG=!fz!9h*Fl8j{Grkk-D?bHI6xxkLA4^!wN@w33*7OldJlo0!*1{ovkJsI^)oH z3wvlzfx6vO{BlaJg7h`;MmdCP%FCe?ciR8U!s#!34Xcoa>YT6-Ev+~)}4fhIx z%JK4ysg_VQ-CC@1uc=146iz57C~$8o--8ufVw~dP@e{-YXy$q0GpL(pcg&!VO~vvX z+mYGu-ZMIrV8cGLi^AP;a23&5=F0PmL=yhHEb?Ziy#%bscaoyJthwk{s8!Fr( z=HDiD=GLtDp$jV&&FcmUu-8VX-5qb+oacJz7=39!TD`%6=;B8sRfaX4R*m6d1uKaeB>5#(DC`3=8hfPx)fUx6#{`~s|nWeGC2{Ah0A z*1~(hif3NegDm$*Py5SQVYRjT4spLtEPu-Y28JHpASuiQt6;*=qqN`X%^~J3Z}(jV z#y6&>%<3%Cs@0EpURbYev#emoz(~bv7T+AOn~h7lJa0{#(2M7va~w|Q=~apSoCFA% z6$yI2_WXN2m&xZyYt6SJFPiRV8)vpW9kVX3$f`c4v6^U)H|@GP@pz5-1h9$NmG2Zc0k7lzMR?<; z`D}dgMW&OjCdaiMq{*=GV-lvuy!ZyG6q*=gmGDrM27xsJ#Tto@1i!H~vsO2P*O-^u zzJh$%_gJPbk0(N?$fRAd=agbhTLg*sToNbRHe(rz)&q8p6qSdbWnJt{1}1)-)8@+- zqcv1r{iz-9x^>xs4sBkpPEYv!e)h9g~q zegeWY8O7cy*VSvRKJg$YLx{sM@r^cac)c(l{nS))xr64L<=-DL1O2wvkJT5+H-l^ej5tNK<=PV0U-mGH;ZVX}E67c;h+BKNHWE#GD>)Tqh zJ65I!<&MbKO!NU(9Jp0)o0Q`L2f5ESmoO6$xq)=^!?F7}1AnrF9v!A8wa7dXsrZwA z_=iIji}%=51In;|v1cNbiK==OVf%84HD8)%?YZJ+(2n>mq5RS;^m9<=7ONV8Pq4_m z#OnVe?!DsSdf&g_iJs`hXwgS+(S>0UZ6XNKd+$R;kKTJt5Jd0M>*%5fLG8_wVMJM9lm$Jh-a8u5r4) zNt`eJqPJ+&T8KgOR$KSX?Z=+E7B#}7me_L>+*$0V9*fJm%5CO$r3A*Z<>lVoEE^j+eA&_H|ERdy|@&8G%-%`HRUn(7mChg#$vbeqXci6Ba)pu zcAMn^7HC46j!l@3Cz0PM8+|(iv1ca1(l9z*^;CF?_8@fe;AEqgVXWEalxv9yy7DpM z3x=ozzbGJa+BwyusKD8Y(s{D<`w84+zxOqhHbH+jS-~Tg)g6FHq~-6fkCU=)@@amE zj;!L<>hG1E4Y1w!9`o)aL4%~bp)$=iY~0*Gdq>Yh)T{yfY_3uk6fe%c;m14n5{V`g zPJNx)3cHc1t*LG8t}y!57TL4Ll6*pF@3r&e>Z*~(T4;%8w;68t$#~!yQ9b9+by9TA zSG3;n!Kr92uTig**h6C3nv;-3x;0}}SuUGSZdiGa2kvDPt!s1p4xP<)CGNKT{)!Qe z%8%HZHgDEG}zT`Ve(nlwy#rL-X~xS;)EC)TjBud zIg`|$WskPC>%NOjk$Bbt=z0|14(-F21nvBhQ4eH7kS^_y+<51gI;moet+=f%!o~;Y z$T;*yzZuBCbBS3*AdBfrw#tutj-9+MTr*Ys6d`Bb8n-9EQ$A^wo{%v!4g;IpIoSBx zCE28cq8wIiDn(s7i+q1kdAe$azWqJ~urTfV$csxJ#ffDw_ngP~#b1NuZ=Qb+3@dFx zd_FEI-YngXBx~hvGWG8Rabfgt$eLa1CKp!;u*()qYPZwm04e1O^BhT{-&5t@_ewQP z;cNzU=Hv;jhR>#OVe@R6s1|gL)MNF@;Z)$3wHT{KZZl#gXgGk;OlFV>_22qDD9%GK>Y~ zZ=U{a3-@cZ6K|@}6kaRiIe9AV&L`C2sSj*WeWN@p4LejFqrz?ft3$N&!2IykMhL`5 z zuwp(|Tll2G#0z-%RJLB4Ay7;mA*Q53>{5cOn~t}NWA_GAN%EZ9;m>|j#4|+48BQ`z za6inpEWj8A+lap7_9%ADj$A0-B*vN@T)a_QZ*j?7_#j@?!@3COw)Lu5F;a(dlz29{ zJKKw58mY|h&ex2qT#9$!iX42n%CB=a#}06JN3m%juOdAV(kp5hD2%zUzHz*IV>4ai zj!#CKsgrkhP$jHfG&=%#EiqwVZKQZoyuNNS!d@-M1QZpB=_`gl`*-X^FaST}d{VC3itq;$-=5loBJPkN9co|-5DH2kx z7t)E^og>I9to_3|hVc}6c{{&A1u~rw29i6}eLI_EP(8LJKf5Uf=dzjOE>4dvQYz={ zo=OBQSgRCTFtp`{{~MS=CeTiGxi)i@T~((yurg8D|LV5NM!$Z2(FDIY|KtSE@nAr;o1Du2s9=SJeI6KiAj~Rs0c~ciFm?(l) zm>N~@3j>~mowo>$9UdfnHKD35HO1sAkildUN8~7<8AXK3M>`7c+iZ1+p3&TJ_8t;lZ=V-@ud3rzVpK~x29{%-!2SAI&ZlB8&XnN(Oz(U=aNFwZ*cJ_Ij* ze|T{UD{pGcn>nO~oz|455$5=9JCud_ei`8t9^M4MS7L2cO~U4_)B}XS(Las8dn$M( z%*Z|4lD{myWtR0Uk7d>}^DQQAk}HLqq#mpZhq8B}GfS37;1hENx)i>r3&gW)yiCBN z9|MDL`_LBh3`wM~6YTH8AX3C1)q|YtKWQmp|DUE3LCWfW@osaY%Wq}T+A<2qCcepi z6r@;3@}INv_~~dg61_L0_yn5moApvsY}&)Na`x)lEEixFJuQj*iWvw)+-ldux_1Yp zIXnvGc-pCQ$9*q!Nd|FE^7lm^9yg(EyQok}7vJ<3VI-+2PW4;%B56_%LCOOxwKd^4 zG+T_PQ@1ZQB}XvypDLRYtKI?z!a=xR>rHyKh;qiwmv_cy;Y9p=^+u}$USx*ZA|Q6# zC}BE$qS3wnlilxQFKO%i{vP@heTd21qIO*CQP7opf*p~>XMrVbLYek~@kNcP)v?UZ z&*58hpQSkW3~?0bNt_AIbV(X(C#sz`Jc^!oLR4)xQ#;b2!tmL-+NKH_36w3R*eOkg z4V%-|V-hVfl#$2%Q_T5xt8%q#{W(hX%HxpQJMDNzKw#lWy%Ar%zCnWQKzIryWI{8u zw>L~<-LCcS|DMWc}92cHBTYA2@U82P3-K8Xy8QBDWqxW?w*e-RDpj)ei^P+ z8xJk%2uV^e=d@{5347=LNiG4v`Gw+#{EI0ZlpIqfQ^Up}*V&2*T0ig|jk6H3@qjWE z;+;MD<>QZqX~f za@pvY?}S+(7msScGup*&u0`4xwi)H91JavaGalM0NpyC%j1XQMzz*@a?kuL&YD@j3 zwR+?E4f}=62df?#bmr}Iv^fHQSo2AM?c%l@nJL;?zHJ(^1)LY)d>9eoA74A@=nhjp zdaA$RCbO!&ysCVaneW`%nY!8505v}}&IkJXnO>Wk0xwQ$lGX%uq(RMr3eu4PgLH#! zm@t~Jy8neD*YwzdOhXRCom$x|g#oT8eT`N zUH~wVh;D?#owUd12Jq;^$$pdo-8`hpbIsgvhQE3%3aSe9Zq4qhbn|gKa1JapUxGR< zyl?E=`M!X_xM!1EObHDZ*fQms=U7_cGtD(p&J!5FaH6qMcSDn!^Mt>SGnZv!{F}Mm z7!y8fvYYjlc7FzH~JdJl@K+3GF_?+ndPK0~rqrkLl70Hkkv7 z7HWqJ!UrX6aeKqSULvoomq{bNbeKmOVpK9c3G~EZa(*@K5q4v@-m;_5<%r@dl0hlP z?X=hl+eyK6gKa7nB*)%bDTVF7<@&5-9$Mxw{1hKn*XXU}!PvX+YyEXmZfwir6k`pY zs(n;D>ud_N2t%Aamr~|7>%0dNbS0~Z(cLq2{`=P=V_|~C?R?*}bCjMuwo?Fo`6b8p z{k(7P`0jv3wN5DopIlUfHk3=5U=q(CN%9zf7C(QX(LR2chu}?cWQi)FEvym#Igd6U+XfyN!rQvZ9!FpsTcQUrGZ>N{;_g+$)eK{5R@$3o zI{pWMlEgE}ig!59AyjvM7PHf2p`1xA92M`LV(_X$cR%d;qn!!BFZtyNdv+*ya)5On z{{COYTB9q$VVdXPZ|B=@@Z?_5f_!AVee0~4nl?_9+7Nj_Rt)ADx> zGda&8=yl850O=_WOG+W=O~)o z+xh}Uzv?^CFJ8Ap7#rfPJpU;xdzoN$*q|TO_N%czF}D040FmIWtxZmHgp^E-a76lP z56D*@KO<bpHC@gRy4??&9~7?xEc`1OwT z=hzx_;jPsAv$X7FLuqV@gcevocEV04csuheYJ;tqmRpHuoEV>?-AmH--Yh2%1rx~A zNu)ezk4**d5jQ~rUcB9i*k3^5&mZ$LuSC$hFkbO;gy7QIYo%<}&rarackDQ+V<+#d z68kr#{w7iyN>`n|nK6N}vqet<&NEw^~RWALTe1;q9K|fIX++0)ZHxNv5-gS z41Hy~H*isj#>j_4JG#9b4B|p8E9-d;EgqjFEq8SjHny&Y|s;eu#&$?s(a6%k? zGJ5-X!tYZWuO52To{io!)hJk~8h3R!g5H|1)i=q`YV0L8sYu1clsQR&RJ_+V1kDwE z%u-5TaFgLsyz;ShgGpprAAMqjJ5XIitZ9jv$A!c{mJG$#&w}}!?^frnVQ_m zLGF{D`2h5|ev0TgE%d^EZ#YVm^=R}P@B~ytXY(y2bX;KS6r8#no2oGcPASygwh~J7 z!Ch9}#-iYn*cFGs)+21=k8|90QDHBQA%j69Tbu^Y19%oBRV9p1l+C2Y;=v zME(Of?n6cEpYJ6+&ITFFh-=%m8CShw`RLL7jw@1#zmHB0V^Z%EtF?=yet5!;V$-n} zAwc}>wdze?go(%*mPQGKSp-zopxTPMM3d(o9b-Q&NDRG$-7j!{8Urb0N*qD28%t_( zfB*Jj!zz?pKCJB(*E!{(>Vm81(hse3ASaS)oG~p#4zrc=x-Eg3)kF5~e6Zl+{;!o~ z6H5v@7= zlQm8mb{DEqi@M0}oSt$!u^ItG&w5C6p@g_9l4g(SA&Yci;}_7t6!w5Id-ln9Da2DK zpOgd*La0uDR0^$74lV|fCMmn`2#W~jfFF&vhphzE$-N(Mx=oHYa@bC2^$~#T>9WyZ zoQLSfRe*_hwl}heGrY8aY<E3qW`7aRvKSL`0)%tj|f9}7TiNtY^A#O8bZnb5rU zaX8YoRxPs-`xDmbQ(9I<0}BeYw@{C63@#L;wfE}uhpe^S`<|&&h+u6UPDo|~Z*l6E z22KYsKy^exZuqez?Tbf2(_C+eoVLA%v5B6gW{mP^{A6-|ClB7FyBBS}{}P1rI?Uf3 z&AE67l-8qh)IWiccHtK`S4oKtR0$MT8#ZgyoD*b`4STb0;d%mto09utQa9bu9MpGI z99LB2E24RE=7qAJjKCD>!sx`u#BByn!g&P_t-==*gN!7 z8hy|cuklBb%vS}(=J}0 z>`cvhDXbZmkL|+AhcR%IuJG@PL|P+~m(Q+X$e&Qf1pbmNV?@0#TSo>L5pHU!OMydy zf|C6jnrTxxom>!OzAQ%}Quouths=?0>z0xSlHACld1G%jfMo`CQ3qXfjIb{H+-xOV zo&wCBh6rnv^B>Qwn!Uc(*QWr(sfD{{rbH7aZSoBFtd7s%(w{S|pqvn)Ugv=6S1I&? zAufuxDJOa^_V2B&pQWCMwd*HbbcNnLTiC#IN$u)Iv)A=^N`lLDa|SVQ1B2kuH-*hg z1m{odQ5;3O6r938Ds)tuK>}M%hSr#(!{bs*(mP7{72Wg|^y$amxZcfrGu$5WNsyi4 zE*-3NyTk1<9n7F)tz|4EgzSa6{Wa5L_MHW4Hsn6&Z8BC8tF)wb}!5+L48u7)24J0qO%szEES(<~q$ z#P>C>_EN^xsNcUY3|3xeEa=sb-jm+FyRVcbxqFJ_TNhxePrjp!AM!~(TxLwP#`smS zG0&31Sr9jR>_Qtw{O1EKO{u_vk*PWLIBjotZ%)iEdwpzq=0#cCdQK_RJ9Yd4bG(CI zvf4Y3TcL;12eRod+*=v+1|p~(aTa;~Add1*!Kn9PCB~+~TjIqYC`y%P4BXWU<684r zn?fP$7w#{ogh8e-b;Orp(scJp zNXCx{(qxq(bFvoVmje%^fhxmNtMv!#6lnj=dp5;5-p*urj;SDYw)tKb)7xt{?T?Y+ z(y}s39Klv>?mAWKpDv$U$=}qBOp9K1)abHn-KH5&8HKA&&0DJ2ZTb)S@pEr?mi8bS;$_<{&{c(<6P!HsaNxykfi2*vmeT*RkE^e< z`CI^R-G6#}a5%%>D$E^w!DPMIU+ZTF{_xQ|N&RjvEJm92DorN&&tK#$f-!4*>WQz< zP`2q0JO;OcY}b}6n=BiDySbVjM}7vmRmSy0!cLjVu5o8BW-_Hkj7x-6`9E zJTpVTtTZud@@0EF2iDr!70);dx(SZUm|W3`Jj=lC>}YQ2nPo{^pYhLPY0T8gQ}az( z8{flGcMOJnyLXup^M#$X@V_2w5XiCg=bL%)q~Ytgx06;LjWj!6X(}rXWJ2!&cwY|| zy@4IJ5gqdITaH5VjxaTasF_3QZ)pQwlpNN!X&i>ys`Y7QG->UY``Ckv9d@x8K#Gkf zdryn(mj*{tq9Sg+lTv}S5%E`r2C}dUcF4d`6j67k zBQ+{f^jjmt`kvPM#b|hf>=#f|GrTZPH5KZ3c*f$MZBK(>#|R>t~vSmFVbiGdWdOh|H>b| zh*;^Cf)G56)5$h$_F^p`ee_k(WW1AGtPapcefoXX!v*i1Esr*8!m#|rs|tPn#Q15@ zbvn+(GM6W;OEzg1ixyxyZJ zv*12!L0GXN2j)$+?kvDY0Zu_j;FoOfl?}HO1V$ksyRJfU+;H+tY+us zYi+(adxe^TbwO?|0$hH^ z-;hEK*))IoIucl^Ff{#0`AUD|;qm?Z3CzV8w*F1KejsGIee2{#|7x;_5pw}f4-*~u z%T$reG1gKeaXbDmYQ}(+YXlbULcvo7LXe5u+h+a zoW2$6@idK74Iwx1*i$*#<9&(? zg)&Ik;E)Kx)B;S6=ja4%X(fGM7JXL2N;9Vb(gU_0?R{?dcD%z_y$X4$m%R)kEI%6X zj$uP8ay1_a**7zF(=^dskUJ_5T_t!F;-w{W`$`wtv1Vk_@(H;{ZqJR;2%`Qfm?-eq zl&F?S=o2-3PG+dUj8NvXloQX&F0Mu~7d&v7%+N6|k-n|DbUVPyZD6+3S(|Hv{C1c@ zP5yVHw<>IZ(sAX%?6&7k97?_*8wcXm{+jj&#h3#9M^F8%8Y6w#2 zY^m||c!dVDPN-pI^N&qi)4P6n!tYi{Ti%{^l2AWT62RJ({n+@V0%Xl!ZA-`E#y^rl z`wt+$Bslp}Wqk8HGQKV_S z5pA^6^EKovG}uirB;M_* z^;Z5#``0e5A=*HEh`@bI2@m!<7_<>|@V%&3Vx zefJ}i`nm|A9?D-5KbbA#ZR-eoSh)(BwC|};#l!MrTR{cy1@*3DekIDmcE(wzn3A2a z!1aw{=?SJg5Co0^V+2<>bgHyBc&EJp8y62>H4Zqt4iCHAI!wJ&|8iZ|VEgytDiWn1 zDbPb5K#ST7-%LjaP7P_s_C5<@gGX4P@{_u>@M4ikvxAI}k?@GFCl72W{(k^>>~zV| zp7+`tBcslhhjizM+BI{t6GVwvqk_Zv#Jx?%BMIn@VPdJ?R<~hBaO*pfH+JeCJ%>nbgRmy%b_sU=oh*6Q)@Jwl)u zAnQt5-13Y4I8dA-(UCwp50tp6nq+dagjGvpWn^c?$1uxN{j#^l(Mx!wj%NVp6|l0N2bs`oF_7y4q(|l%I5no zo;f-2xmtk;NTVPo1EC5l2s5^(J7jt+m}~6mL*wk3cbN`N;x3L&U?K>bFmh}_);4^A zLR=PPN9oB~*@jB`Gro<`k34xv^Lf0Lz#4dzh8%s@7+9pqe_t&2!WHah?*txYo)z_f z>rS@ZXPxGI`E2}lj(h|pye5>sPVh9vUhUWSgpfcV&O55xtMiN2<$#xHxUnZ#@(2NETefHdd+Y+PiSG=tQ#9yDbgx+$crSq29&^r=7Zlo%^6p$zb|t2Z0dp4 z|IU9b`{Y1if}z<*WO=wlW#HBil)=s4B6bUYYq~mIYa6fK{9dnDD5x)Bqk{)$>`C2c z{%wd8FMA55*l3dEf?7PksGPm8=TUPO<7EG-KG_USiQH^pRzNPmv4c+%Bk^0S7? z;5`3~0StcCBJlb7^f&TUORn>&&7S`}@=L=zgaEx8iTB7DwAW z)`=I9$NZ()hA*M}rNg=QSKe)Ld%M#a60@*h+~&$?STJU;a~)d?mS@R+%sLI$$II5H zU0Zpx@Q1#Jn>?Mf&0!C?yjN0#I|#pKDNv4du@;zn^k?Km9mO=#aTXP9&znz;?j6J! z`IB2}IEViMJPUwsGuRt+b)pz65xp@Rdgsd?%l4&Y~qF= ziSNmo%^3HuPVQS>i90-R7cC2k(HsYH#zZYzWJP@nS2D$!Bmk^>&& z(XtB@oTjePE!0Mww1g2_g z$tFs?v&p#xX{)SKS3FucL&6iA6W+uj(u4UVSURzsO1bcfcf*fSCx!4F{6+t*Ep60ATW=zTHY z(ep%hmV($djhUy|sVMmIZn0<(?}8Y|Xt>&RWcsI_b~yoip}M8`uUo{`zIb9-T*aQy zIs?4u*87sPQ;wNCiQ*OOz=_M<+e-RSDKB`Yt3iXYMWqK1>yRg6-5xuzW!wtu(M9g{ zU~f#vXA!3!FlxQ{S&}jAI?iil9uFdbRAkM0^97vj3SVeqos!7eMX2j*q(qo$tG~Lk zNHuZtslieaXEe@PdV3sXZHTK+?ZUWJ*ac5RYzHW;SrS$_!D-%2(77vLynMNv+8yjg zkCmWw95)U!sZKFjSK!}Gm~#1+!_PMUm|3|V=d=3Z@S!eF9oKtBV*c>Sv%M;H=FpRk zDQ+Jho*VtJP60EU>1vjk~wH`t-6-4bleR>f> z4?DdiG-OXQsIE1MarL6Ly&ngN1hff>o+tCiTu3yD!pVb+Xjjwioik@$VI^8TtE*GE z1w-AT6czH^!)xPR`Aw>wk6@X!t?fzTiP`R)D_gDYcAH_aPZ7c4Kc|IZWerBLvAy zI&8nxJ6>UkKfPCG`N3b+919(;Z+1@-@@QsTy6sfEcpWPb7Zz{UcGqgU2ib0H@2h3$ zoPz6_?kG90LeYk{C^n-7S;jl%`2d!!{OBHnIj`R>D`$@}(sahYCi-)BL-hB|=&#+Jp zw3MFy@1IAUnP)e_m%q>N{{d(|bVHQ@D!aE2`S%~dy}`2>Y#ydh68Ybte*l;amRYjk z21Z3Vn)~zyC&;|_;2*$Z@%Z&U?~4-dai`(?mijgeP{9BN9tdL8DCuB`gP^}L@4 zar%Bb$S3IX3ueR7L6-FtUi76ILklCHzkhF+#dZzyJTSA~Bv8)T3?^8zsBA@Er6fe( z?hOM2LZCRE6dw7n_p^&pr+fav;Xxd;CApw@b1DABDn&ZXTK^7OZukmLz_6F5I?=uJ zBkt=I{a^ukg75tv9M!4Vq~(d39*A~IOKLM4T@UL=vW8Mo%PN1?z~Z_sKz;F#N?E#j zh`CX|`J>`2T8uMXyt|)!%(jwr8;?6I45h(q$ILMK#}CaJ&+eoh9X%>IBdWo)n@iU4 zruJR~j9N4>6_xkolln6)6OiUr9r0QuF{MdtYH2GspBc>etdEJo7mnZ@a*GZkp1)*z z;v^Lu54PW$c*Nf&Q{ZTV{CxQMRgoL<*uw&(nXweH zv5b#c@7!dfI3z4^dXPbN4@zYCysNMz)QL9O^>@7X_*vWB*t zaV|lQQ1(CSef5o2T4nGDlh0QMd-UR~+q|0N79?FkLcdYN=EMK|4T3fSqM)^D2 zMejr6{QNE2U#l(C1-LxeV*-4uj=x-chWmruaD{fS)n|Ys-^TFUmjK!6_qTs~W8qJ1 z2S=F(98c05b~5~4oz8(3J+Q=fzxmebd73tf=fa#6LihE8K&V!@GfAO+g>y%44MN@% zt~XIxO|74iXuZ2SNHNB)YMtG8{gTK#{&||U`;#Ws*CHF}YA0Z}ViP+4)?S;ME0K0opVchjEDQQcyCKXWA3m9r9;?h>*{sQP6J59!|W6X`*Vo(f5Mg%D3?-*no$M-%mmezdcU5Qt; zcVv=M#Z8YdkB2~dNPV5Omf#Q z!*?fR`Xu7V@wL$U=j)&NwMxig`sOZk^vk=KWj{o7U#U4KstrL%OIoE8#3c}uFJLuw zr}k;me+Ah>tM`Ue?VmMttgKY4u^XZ-H`hGmQNtf=eWNvJnsX)8n&w(4-v_+ia?Xq+ z$6fB9B95x@a2G35uhe+(uWxKHjRP0-u_4g`GKi%BP3j2iSuY4$wP2RgG^uZU1tg`_ z){vyxbk-BG^7q0JKfNdpTln=^l;0zX7EBrLg9vgmy3+aPG}h0#`0XL&^a^BGirJo#4SC*M+svt8IOc8jBc9hf!d<{F*G2#apIQdqU|Be1UDJ@uam)8 z;76Y5PhwfBr7ufOx^S&#Eh+1xITbOzU8X1;v%R}5i$Duf-6bCqn`hP?u3WDc$33Kg zF^6x|T@D5J=!=XUYYBx-!6+gAx0*B|oawwd7X=jzR~AHcP(sIsd+<=b1H{be^>b+)Q#=^hknclc z*Ri0|TG0x@R??)6)eK0GgWCBROQB%r>U{tm>MW+rjqH$aI0?QE60DlMSdVe#=$wDO zoI6{hb&y2icq({CcVId4`CJv`6Z?&sCK#FMIHaf@07W{9eG?L2A%1UfoJwy{-Nc71 z;ydRbRKuZeXsHLj+B`69JQ+f8#{;{bKX&f)# zvr)9>Zobcq>loP=lg)CVT4&Q zdvy2~DwxQJWI?_T)Wu3OwEBDDq;iB;s>IGt4`(=W??gvBnH{;mRl+ER{36RScLxO~ zQ=RNJlp8Ipl&r0F?`4*mSsnqFu5;!^1e!P*xu$wmAN<@v2OpC=X@dxNG;L+>8{l(PUEd8$Tm7RQXl=|_ z(^CNN%g(+z5voWGLU+UPJd^0_i2L`>mGThIs(K7Kg&XWy5^ipLD>>n@8OnBTCC(es zF^xQZ+wGRD1gu$?-6S}^nrUg2d`6N>IPVStBcdcoH3Fg4Rpakyney-latAtic(aU$ zQ7?*rxL7v6{7|aPNId?&nBh|g;8&L`Bn@)PkhY8cx%z`Q!03wK zMy-iuhuOJhSzn!IeLJU)W;&9j=#;hr}Io)T!SxG;-oPIY4J-6;lZCBT@95)00wW;LmnHP-1@vAFJw& zsBb9>5~f4TjJ7YO_hZCQJjKu>)$)Z`tf0KIUSQG2u9-eMQ3Zb=b72_IuF05mU3eZI z@+=-q=M{fKa%M8`CfE{a$oz58;~>_tXuR6Y@IYwQWc5%yW_EI65o?AE=$u7mo1VsH zMg81n<$w{d7d^ok4~+G3D{(NR&fCyK-VYc1@c&rnofos2srj=Qe8OGinD&zXrC8~O zL9?=SG33(>6+n|DK-cX__qnfi-!e%;<~4f@wp)d>exhf_jLFJU&ETTG#7EMmL7-zD zl`C6@h^U13LRiEsKzo!AtiufA?qYg5J~VWNs!#>u9A!UP-0jhDvCFkG2m z;*}s(QHJ!6l9$=X?YgKg_AO09wbGYcGSV1v{kB_LeOip3-|SWo?nqK-%~*^rGaAw7 zMch2f3gvrmtV#dsYB6z_ny~>hZO#vK0Sol7!IAb+m+BnDV}7UX83>zsLD4k+s@Ri)9IeLo9)llMPqj>zn*af87=&`ptQ1L z>Ip0>=Su$FUyTpEY-ld>L92*$c6h z6FAeI#;>Q92vu(5bGerkCrRVXcm|V+AM&5Mz?P}PReo++#eyhkirqL27Sv*hQper7 zQ@w-Esap8H%0zEJY!La5B5IB?`WLnCO|Q6JVsyO5e>sXrbq9q;`)- zk)(s(viIiO(wX@^|G*e}rDhJm3|iX1uaLezuiBeh!){-ywanFk`Ejzn2I0#&PI`@w zQ-OY#K5lLlhHGM93GA|=J=0e-A+jdPi2Wp!vDQ46Hexs51Fpt!s4}BP>J>$*>|ES} zubH$xr8+3JZi=pHLTdu0kDza92}jqNO87X6zB4MgzaR8oSh34sbEA0f_v|HDnKtqy zMTEwS@bj~NndD85D#jMl_|w;oj_@9K6khi>eQXgt4sl4Y*4a?(Ko?jd@~KrP5+<O&h5Cv z9(n(j-trgsLm=uqTeiQZz|zTfXG_|15|sg|xj65U~EZH5F?nWF8Np?{!Fy)563Jsc$%;~5)(T{OuOH+fm>a{0Jaj%@{J*<{wFiYM~B z-Z+^r?xW4ci?waodreB;k--Oy=`z>_v%b2?1T_xGtWLRRfxf6bihC}NyRsZMY5cEt z^XCo=07hiQ0bGav+-stz8v-G3A4Cti9(7XBUP88&|J^!UQGj=Bt~vJ)Ad&vx%^F1^ zwDmR*;du}v)A$2SVtqJQ2_Rv06AVEWTipbe-#N~`!S?=Fqe+ZExXDMAYsnG+Kct)g z?*E@XpJ2}>FW$`lF2Hl4IhwLQK-+zhlTqS-3RwI1nF^a77UaJY)ig%7qVU7WR;2=chryc2^644O34h3+z2o{Nqj~V~6!~6Q z>TMW>$m~%*vVh=9a2BJ@Kbvu3EXN33Prei!Yi_@K*8^Qu@354WoP-!v;m1X`L z6bG!Hf5SQBV*a5SbB$akB#%ZA4p_DSYb!kL1JTUW(b2azKXFa}xBpwk4md0c^!U%+ zPzUG=B@I4+F@5I$ClFQA_?7pN{=|Rxm!lGwC_jV@Cng@;B%`X#{u`^ZW67z5$zZ-W zxK;#*+&q({g?EW84DvU0fI1bUPX#iP_rW#)BLreG{2TVN|6BAc|dc(`l3-G4Z!hZ*|LH`$7jmiCC(3~9f zGr+nUO}DpMS{_#eyiiXFQ81K}vl8sZReJjS=U?VGW#h3%Zc~mIk>xesOk?%)!Z?!dQ?Up%ao*}}PuCFKL z7V?jU@uD<_7(#I_BY?=VdRE1w3#$2j|pAO-|#KW~)qUvYYFHHN6Wcnshf7T$Uq;GHC;$?hE~^#>s}=qYR7L7(RQ8 zoxAmAE?k;F@vR&DU?0bh>`qU-s(Pd*BWfyMX$ZSLPy*c@|BL)AH;r89HcsClC7H^ELICPf5g4~k?2VDlU%Iy zcUx*r{)BOf=Q#-d_SM`Oh96Y?Nc7!0y3-7a8TRxD>=XH!1~BzxCB{E$(?oq_St+Bu zM)Md}28ajddn697Z(LessXh+gOvPBuiyl!4*2KGsk2zx$1*5=by(ktoE`ReAQi&fa z4d>gBVqVHI-XpQLYI_(A9Nsul-(U#bQMqn{S04=SJJi*5;?&Lj_8#V&=Pc>Gn%`FI z9Z}^St%OS7*AmnaiTEiOZ_~HSf@KRmWLgUX*o)M+;$d2BO7&^mItv)+MDhHYMU);{ z9}FfkRiT0t#jwyMN})tp9h;2Io_e5Q!uG8i%Eb6vV?=*92dWJ^mX4H~9JHc(vh3*( zQ-;7z2NFKQ!yi{i8)Sd7DB-AJQKgeptlErVW$Y`I&$BSuV%#@VF~YkEC(h> z{rNOss9nj5o1yM>Dr;>~N6RNE3Ai=i-m7un>cu)N)>ssyZ*S@JufbCnd@JQLd>~kT z`Ud0R8CP$}9z(2e@%LjyNaGIE``qx(KCU@5G`QTvmqD~@ytDg7zzge=-&d^*9mNNv z_EfDOEj>8kq8Vd!Cv~T;u6k?sexmbHqgplgoqj?WkYQv zcZWt1i~B#|i%i}1Pq zI~#S88`12gt-Ma;t2rxd@{BtE8EU05RCEUc6k- z5;Ap?SXi(#`(zy-ixDsM^tfc^?x=~Zb@tQ?w*#|e^)k4rTh`QX&?Fuz^3}VHLw~(m zRGatQSrjw%9#oOJ&oMAzpRB1kREJf7rze1ze3EfId#8U)XdFI7=I%u^W|9?+7>TYH zqZAuT-+ADlZ{fn77-Vv2rBt`1Gl9a`n};Fj6eFTRowTfP+93x4w#n)?fx&O{xhzL?eXG;gPImdtdM-AHrmYJ(IoHUf;FM3lhmd$q#RC z@UHKiFeXj*M{|Mqwzwxm9`rQ0Y3!qo`3pQLEEl{`HIFQ^+B7lRuFprCm}H>(YXgbG z`*fjNzxvv+^Hp$iW3occXn*mx z-)PrJr(z#n0>-q;1)*ujMZe`q0pKe<*Mudslg*SFevu+SB7Tt@1Qk?oEBbT2Lgrx|gu%ZyA^m9m}I#IQMVz`vIDY^v7JYSmphPh@_j85B&CqmQx2}K;i)bkp9*(vw zubD5Z8L9@Moct7iShmtHe_4(I@0K5N2lhwk`8J2Ip@5;R{`Yon|4*4ks8VX|oojM&5Yq1nJ0J&a(%?%fR73-R68ZbX zl@f)NH6mc}Vd7c28> zmB)_-c!Ut*VMfHNQewD?+WhTCGCBI?c1>+l=pM*u!J_U*pksKMW2Dgm%KN;XihC7O zQqmEHup--teTk7CIMlVib_`al=xg;rNujMP2&L}Fb};kogwQK=!;6Pmu9m!MLofDG zDC@(|IJ-OgR77j{%Q}K-Yc#8PJ~>Rlmif+NnK&u~!mDh*wFE~fZNy$0)=`N=8=T(7n?yRekPx==nNm+9MmAL~Lm^z9OHCHPe*L1olvm*f zWr&vO&YYLrgXWS;T9)ZX)M6_^g=T|79s*zH*v)W4S5IS15-)6*MRi{o*%P#552rF# ztE#{G?^QSCiFr~)16ekQn6lo#DORJ<45lTtts71%>o3Jo{_|r@4*Fzd1TqsW^0Yej z>}=21GTVjE7w|AXWw4S%@;sf zehues2xsa-L{rq`hOs?!f{%{Mu^gtcTdMq{cZ8Ql#TE<@;{fiZ_XXpEv9T8=33OeN z0V3NH z5^z20khv7!%iEThs0b*rNmvXGk~vq>6&txOOkhp(>c4&d$Nl?)roBwXd1vOc8vkpc z#rMCWms^}mWc&&2C#H$I^c-!8qCML8iy)QSf%3DL{ZWCzOguMVzF#z;THra-GwC*} z9WGbafR|-6x8cYG!$1dDfBteVKm1E|^rOP;r`i%t&_{`L1^$bYf-;s7ye%{dEW5Znxek1D%JSpR^m^)gJu?U^y(=F^VeT z4Oi;5QpEA^_)~=3(fUqSeoVG58LxR9qR2+pTU1wC+IXe)j9Z)K2-fozC~_KDRLS<{ z(uV9zrmkAeCkxGBF@A6$ZOkZD@$o}{CaX?b5R+Jfd@sv;A{+{gR=unJYBCLE?_*Cs zahuc;d$$u4@y9&)~B)|7q>e}d%`_F#UrS5QvBknqPrwYPkN8pD@tuZpHmY0%0 zD#uTw&2k&Z9sK;!H-HY#HdK{=N|wYOG3<3*&gOc27NT#GU_SP0REV!u_tsiU9e;z} zv~@$m6zM??a*6ta#fa0==0^j8e^;MSIn!Z`1uHyxt4yEgQiBND6BG~H#CPkP5E#SIZLIX{*gqEaUA|%V4jESeFnnlQ%U#RmQ`Zs{x%*zI z47=UjGNRH)HUR^dxX5!prCJA3;pQ;7yYs-6*@ZhU$C_FJrD^J)d9sVA1?bxeNIS#| z35XLt1RggKBvY0G#hLFjTpF9d=T+d5t4Vuv;uTBTmf5>I=F1IRYf=42%|1f#;@m*G z=6~`@uHE@~xftA=g!YH;ASd2E*s%?emu#}!Z<)S?pB*C1NSl{rkz|}V@_^(Wu1?lc zj<~8;6ZVi3Sq6zcgQv$iG~q9fFIoofUjcXxE+2N9PpJU2xj8n8e*y6>SLjPNfDfMJ zEWdkvxV>w<^PiYwD>~gWxTMBb!)Q!`E6Ot(pTWb;rOqGz1*qaO*q*=D=Y7){`HGN%zi@!*f7*_}eFbpv;sN|Wg=yrCYA$)C(*VNy_zM0XW7N!ZnXtp| z(F+o51LOBgc-uRH>DHCxnQ8_50JtQf=vU%QVTgy`&mPrmAQvD8!nZJbI!w+KHzdi) zu~JYNJ?ggYJ~j65rSdpizaR&GetRc6Y;rG$=I~|oJqnKnPRjkld-Pec!7_ig^ki9v zoLhLvAVxsqHJR-s7b{$S;y#u3NDe@ZRuW#}`5u z*OLMh2cH7oy$gue`=3-)0=VgK2OpfsR%84BOMd@Kf+#~Z*8daebPVFNNNj;%vYD#@ zYOBAl#zw<5iX1%~eTq=09{pM}Zv*^5BJ0kIPr^ny2N;2*vU`-uIlM0t%E0-GiwiL5hH;tcmIqnCvXMHwq5`{{VJE~_7&cSBoBsFoWJ}Ou|Cfqy3rBO;?tSyQXRp(VcFVs` zfBLWD83^AwAjb$iA>DJ4p6y%RR2=Kp7~5BSNnTlEpMeCg&=TB!cZB{(R|87*JWH7@ zXqoJ}WF*9$yS8w7pE@w*ZiX<7{dxNBbNv14I77R;kNTLn(62w!U5*2iK=v!-!d%IA zHZAwhJc&r7rh`$QV3lq3n6vbS4dLkL?PR@SWzoD2uc28>`62>rZx*jCDep&Vpv0nmviZ4~KwGyLJT6ss%Gxyjp+^$luQ+UnAaA_sj4LpLNPd zy+P=z zNh4661ciR)TsA*WmQK^n;zqtE~d2mchoIT){3s_H?rk7VWDb|1@GC+&0Jz)M1)L4 zeg-PV404xY9{$=Ov0gL3W~S{|KHK~Kfm`b}(Cg+pu@=}>DtuSnpXd_>PvT^!>(&Oz zt(X8c@lBLKFEF^UD*;xjq6!?IgAQPWvJ|_jbjjzm$KZM?`ZTWUh_Z@zS~k|kFO#(~ zA_%`OgZsUw&;9%T6WSUa>xtD%M%=%3%`VJQ71R%sKt9Fh>bKWp^RrV&{FMye0tSnT zWh3kDe*xDH^?2|67{0y;q24jZ&P~F%WXSf79N#$) z0a3)z&A7|;OSZlH?(~8iDr;Fc)sP3F=@m=hQP1N`#CsVJH$G}6ix$~|dLzP3af6j@0MFThxca%`J#p#ylYH&6Y7%FuZC|Y^APM1O zfoEj2-l*6A_{K*s0H+&F+a9n|c49DNWz`?-lZ&0+4A4V+v~!9cmt;$y?Q6Y?{1x#+ zMB-N<|Ht_UqZeL*-?m6xkeTqL$Gf+6XE;$oec2DhpY97qIoLVIL!bNwJdfAQ*9Eap zVBe}ymj^wxbf>wHC-{AQxtMI+M4FlK;m)+&a9>LDwrn{6C+@2_kNDN<)kYSz(p5?J z;BGHj$XW~SAw;x=)5X+Ozi^IT@x(Y#^v2z^b1m*{ak0{_+D}42XL-ZEZ%tXAdWy24Rpu5N-Z%h%?B z!znMc0uT7E5^mrf7YV|X5l5E+e+)+NRE^vjDZQ8~Me%laUUqhTwdCO77*W2Xpb1-! zw@+N-0vrNAnp&uq6AX>5$HaEL#qSJHkSsfzGqA7oUG#xdzXLoV$H@kn9Z8!*!6~`P zyMFiD%znFWPQdz%!k!R;->~LWWoc~FqmFt~bnuPD*5d@`KDVHcyCYAc9NLUmuO-HB zswb`_>0cwTdcu&MWXpblO&-D860C?K#V|$d8O+!+V&<^w)p>?q9`y0q07_{QY=i+5 z+DBEH5MPcbWnUF)UebZ>u#aM3bXf7p62fb z_{#BtmZN{fgA~46xF`yjz%*KWeCPvcxnW0h+NP6omgfy$d}wnH1_E^6g~v^ZADeM&=;(rXMvw#w0H+Aqru;ng>rU<{8-`rUw1NuWn~ZO0Ug;YDUJH zU#3z;uQ=Ul&_T9wwZtpmrdrm-ON~!jG$t6eOz!zmP~8CfyK=(aTio{ueJQiBy zuU6ZeA;PZ5$0qdy%}(*Ry!+g%%!OMby+!ruT@o$2__SPh@1Wyn`co~V%0zTy()0a= zByHrDmCD&+1?TMn@Ti@`IBwGn=u>t*d9cV$wNLq|*6dF zE#i|M?hHgvOYUjQ3-P8zPx{H(8E(O)gQ#|^r=E?_QgA= z3(5MH@gVS09KFL|Kr}>atP!;Q0`nqD*H#(2)F;}z`cg*W>~3bhaGoddbBs`gFkjG~ zkFwvgxITy&RSpTDUm88yl^#CKzP3T`AQ$%Gp0QTYi@vwJw^SGIoGL8En^;s zqs?^2ARetJh_v$Y61vh~Wgx?=RT%XYW7&tdFp62yAkQD0s?e0C{D#YnHDOHBBa79> z>i2P-KP6VK%C_gZk^z>QYPfwc6kI$ti|u!W`#T6#MH*tNKE^~rjYJqG`n-zrq@kZc z{$Q%4d-rD4R48#n)`CCD*_&BG4?5k!t-=;&rBL)slgGED`bw+2d_(6oblz$_8X`KE zZqqO6^TK8fTN=h48gygyXuy4#3o;8+U}fP_llr#e3b!tF5K3oIti^TX8h3=MHLhQw zi$OM+zoYrCJM*kHM1p+#Ot(THuW8J_3`NF|+z2{#m%xcO{Ze|8X)n>ZtH1OYz_g(V z$L0|BzTORYES$>((qh}`F@^D;5BF9>id{@D|WYw1xVWz|OVF zNV~smFoT|Phl0lq9`)nT=4fl_wHLkNg%0S7__;4KF%H>jQVpWogp9p)Bf>zrGmmG} zPK5>7m+3)6pUjqqUNt6KmQ#2oTE$FIio#?uNk=FX!Ht(a$q;TFf;E`q(Usb?wwn2s zxwcJTAUa$omYHCH*;jufmHhNJVSWcB8(r!x&bJSt7xQomm*Z{@taXja;Xp?_m zt2~)BoB9a2(McFEkqk322G6q+(xR?Sl?CaEUrxj)CDJ80J73-S@hVo`Y^@_T!8a%)engkB35>@?50g85N9VVBx90uo<~7`XnXO|(>glJU(a&2@RYr`o@A z46f<7b6>A!F=!DeGdyZYe+PD+9rCh zOPz<-bfKyf6H<3r?4b~snCr0l`K1YR)=;<_4gTOyUk^EWb@ zGrw=v|1&iGJ0p0vdgTY)*}Sc1r)FQZtzHk2f0GVy;AU@?^fOD6VatL)Qk83~{@&za z9^q~iFR94io_E%3PVMA4v2beDEJhOhdi3k>ii)<`o)<-L)@hAmTIDm!;`qXN$D#hyj>Yn{r&wr>!z6+g3tFVx1G3DC?YFv3c@)``%j_(e{ z!Fga_&&F1Cxxdfr$sd?kLS6kG)0HdS`4DMo#@S2C-^Z5hOpIZUyU>Y6 zQa->QhpBKlv9_yAN3XMHUX&z6*3e0w$8pA+IA7 zOIn9k|3MO zJ$l5yYyoBP+F)E!Td$QJWfHrt?A0wYiNT-%&a8IVyKOy4vJp1{-KtIS=kVu{f4V>d zg!soo4#CtD8FoK6PaT%$-^qOVXMOcgw~z-hgNCA&|6j98^PM;RX{x8Kn^VTmpa`$9 z*@QPk*ZkEH3@R1K?f>%-oj#H20S&u z>U8}{SqkHS!L(2Ra&^@Xf{ez4p1`g!P8IEKG8vacGY6KBwda8&T!a6olKkh3BAb0$ z;~Uq@lmhH!|Me&Ri`A8~axN~(u0ycG@zZ|qdu9IzIH#TJSuj{NQwB^xIt6yyXWckT=NWt+R{da$ZdTbqN`?RtI zz5aNa7kGAvFV^pWPjo=;-@EcP{M?@KlT`5e3H&4FZWqP+0s@$AyWhH*ObYpo|KLr% zFH51(%eRnTa4TTNfiKIM*Z&B(E{FOLM>J#-;auPR_rlWIy9HBkkcFf#H1S&cA@DM_ zOHr>Lkuu;-V8;lzC+zTyam9ra`OpDD4SYuO|BhWK?;G8|^aKuCY|@tjd-;Dyf5b|O z>>VJVkp(BfS_VTQ|8KYikn8oEW7k9mTp;u2ic84K7C)>%ge}%_aLsU zy?a~`GsOSBcW}G&iG6F=LJv8cV%(Z4HFAgqIQJerx_D4B!*hUq4g>y%+fKZblIKYr zf+M7|{sPX58n{PK;c?(bzQ*^X<@{Fm{3)iekJOwOz>dQUmn-R1KCCJBRByeAM+HgO z2ZaKCwuuH*vR|>%m4d9^j*hQ%932HBj2_cnr()`PB>NZee=O#k>(udC>(o3vL(~gM zI%QBGPEWy_pp^$0lZ!z;BwKb)lgz;K6Ak-nkCQL$eQ18a@b2v#bAXcj^LTTVV$Zs= zt`BN*NXfA5uz3;fN-m^53W?0a(v*5~ze}aU_E~HA-?N`yz&f>;dNmiwzN{pj=)i{D zu`+S=d7Yr0V%9T3tW^64!B2VQ2z{oF7Cx-F(PWziOApIAd6D&^iNR3#rQ*x{rde?z z^8OZQZKBaXGimx#6f2W)u4f#vJKiW8J=?Am(!EeUQ3I(>_T$(JmEU(o!Y`UCWv=ry z_Am>*>Y=IeFZ;91I^pGtRmNbjDbZjH$3D(o;Zf)af6eb&QGQcE)jX~&)$u5h*aKS|FnbO0qea+dL0;npd^~l-{pr5@X zm0`EQrxF?IV)i%d5+5Zl%-QniE_Y`QHjd??C05p_^-+!0=#1TRA-J@;9n4)btbMKi z`#u|M%@E|!1Gt#Vk8WxXD}kjM;Jd=VZOrqc5$6W=lR%g9$O*n@CIppLtc96yy{l=Z z#mqM=YjSP>|a^e-^R zyGhY|B zyBAUM)x`je^ZRl=ZY5Fuj(A5=y(lC#VdD=oIU7by*?1}LX}4}<7o6$>>gAso=j0#4 zw@H#5+d+58RDVuA@oQEkD6}7@E|rDmc|ml{-YcG`uLdC7w z^l19f6fXXz8pux*Bvw}$Uah3Uw`>|MY+Ie4Z_L(a*~*xGpS()?Xi{TpMDW-8=WSt7 ztZg=)JOzkEm0xJ6*3_J^XAICTHvjpCLp^I_s6(FqkkGzUcbgSEX5Us2dd^wjC4YMO2CTQhl8vY zDs9u&F5QMKvpI=hAu?hmrwZ=NN~i8ZQ42566*5`nE2pirs|-H*}rh{qD*rS8m=yVvcw7y4^wIqBk>=h^7kS`#YxZsUKKa8~1I`if{jr1(6# zkWC11h}f9bY9s9Gb-2awxKt`2=vZdLm;k!fN%XatR%Vb;li2mxF3vR(;m|Q|nj2g#;!<{)$V2>YdkKpVf zzyAU#P$73RC8?$@kl`7-1IhgDEw_m0%kU0;j&0kUi;b1hmL{HGph?B9J~@3tUyg= zK@HR9sV)flqzT;F+3eHIXI~q8K3{1o*_y!a<)bzO>S2PE58Q*%NB^zG>EuvtQg;|J-`-vy5EsrDRSJ>t(c03zg)%Bim2}Grpy( zpLtoYkr?UaTuzMaR_#;Ng)<~jIBSMp z?{huAU)SF`NbBODG=p>wh?fG~1A=-8-$__JEY=GtBd(ns>k{b+j2feFLbzbm zU7ob&5p_!mA8l?9A5BTh_MKZU{8(^_$ssz?ey{E$yG8D^>-L3@K2MX|VaDouspL&G zIVS9epG$9ycHYfCzkR_78!B?p51nWB{UqUA+*j8%vw~4(8r?Y6esjPGzQy;P6&(-jB@Ytb_sq=Pq=wf%2ljQmxw`t7DgI~AcTB$(3 z5H{lau^zFN;MGRXO(n4r>6Pyn6BF8v;GKEI=O_A-Q?he5*WW}FjX1@jeC=kKQQ^9M z`u@Bk8FTAisp`6f)6xj2{!7H#!MT$tBvX_-^ZQA|&-jSN`=JZZu(-g8Xic-&q*QK% z(U;gCUGaD0Z@)a2F-qYY2si62@_hMJ;>9YYb}TyS^H)*O-7=IDAvZerqiJ?#NsrByB?!Hi$6@#nsaXzab3>!<7 z#G-E94*k)Iq`!DfOmXLuj69rszUgEabSe20&g8@C#Ng;9kR(Ze)!AQ&TI2@$5%q8A z@-gyQojAw8b9ByYO^kfT&W}&&WQuA@MmaZvZ6QUbmiL z7bK0p2oWQc#~x+hH-Z8e7ed9op%Bn;-l$x{O=2dfGNz;6B(1J8DbVUd0!U6e>sE2X z(fbSrfba$}Z)R(cK>hc>;w{M-WWcWytc!Fb@? zm*XbGy%Sfr>z9b8@Pds1o$QDnvJ!rFHZZ8iEULyO+7RPMgHI33AIU@JlSD?Vw=exV zKsZMF_CbDDj+B5nl3_k8HPs|Op^ZK+&`#F!M06@e7=aU#Ea?yJKGKG zCVf!4T0?uX+AZVWyVS9Ur*c4o`oE0m1f@xCEP?^rHb9eAs?DH~H&ajLu6G7VY&6C(!! zG7xRm2qviFeu$q6A_^?h-m|wSfUaS-0r4ioI@gnKc}_Cd-+m~%hxiBk?)+{F`P=c= zWx0EP;x37kSo}*j-2$X{(qa1nD^w+wzQTP1Lu|cXmC?yb%DLfqAbsn{9uT}RlWRWS zEI{;og5TXu0tcwf#TVI^=$ss%D5_$Fxbm|wH}k_U)Ay13wUK&Bj~jbb2LoYuBVO2r z#Uvk-2~lQs@s`*|+t)(Qo+>Hvb#Gpj-I7+oF1Q=KjDfbhDLchuXI{c(tCP-?1KuPb z8DuTU&6vdafxR(4;|-GXF(G7=Waw+s7#}}=549}r*B$_EPG71eD@>B*EiRJb!&X1f zV6Z>p;-TqzkY$hS(W11@GJ5`W?9KXE->(;^sX%(6im5&iACYXT^-IZ41wZ-3u5c5x zy}NpR*O_cI9U=ZsMmwzjuiur=!>_|Kmu)-si6W8RO}^F|FUSN-OrvNS@%_=GYNZsU z&G3a}pnd)pOQyJ@L$XOj(!}HHY*!IbZv=G{dp!GqsM%it;!>qFJnlD#V(>fjq#IxB zoHK5gI{Fu==QrYJsXs-TuSJywZQvdZw{?jeNMFocO@G-v)NUHPy;@uNp=)Wxqa?C8 zm+RAURKxyQY)Upt99refNBb~04j~bDU&NHsc8mC3>A~k>&n^$oh`1+tj(fdbgWhNS zdC02r3l=2KeV2FfyXqa7ttnA=shr*hVyeiF^dK;HBP2!;`+F0cVtp0pQRh+APTp9) zy!?T+?USoLuZrBc|LH~(!^y*Ac%qp-NOMG$6;{$k&f(WxM!D znkD$A6@p13gsW>QW0vp{TrY9FE( zzTsIm-*I<`0$2Fzj%xZepKmo(sc8gzMsBPO4YDjFbvED=d&^8;cgc6+#DUESIW3#KpNJW)?v*o}*xPU&vNXFy z=*B6w|BfDg{)5BwHxxamc<*qV!ZRZT35D82oZW9)Hf1Af7W7B~eB}Jr9eq4UD@iP~ zou4&CX6x)3x!1S3YT|+(zx}iRIPthib#I4WSv}Pu!fpNm)a`LJkLQIXYBo}B*vIpu&9Cej*nhhA zYR2;GMLdDJqlKN=hvPv=5`=5Mol!^o+z4txt1H)}>iP*it-@kw71epiOW6~}hHl*U zr!!XLwJPFOAum89%(-Ng&$(|mHZgQsf*1GCO5}l!YUErC&@1Weqe|$0L*Q_60S-<+%!CHeaL)S!gjDBl+up(U|ZlA87 ztHKfts|kf48!a#Mg)@ji^5q;CcOxx)*Ma6X)g-h%vBr2kuUxRSgq#F?mM#`EY)U=k z#sNrWAw%S&?*LsGr%aDc27C)VbOjuwJ5>UMfbGE|&+CC0g!<4{^#ib8k4qK3P zY2z5%K&atkpwyrK)!p-I(*BVZ@Nz05heD~CX z4f8SSj+$yLLGMD95C2Ragprpw`81P6;Q-0~q znU0V8^+fiu2bsMi$x=j2EDIj>aPDrahY2vIB@|sMnGDygHXC_T8^HDT37!1TKX1_^ zfN4cNIdtl76ob?U{OkCFU}Tm(+3^++w3vOE*h+sE(&#}P<>K=vJE6`N7l*DXOkXCf zz`MS@-U1wP4jv9}THa;U=srCL4t(1Wd5x%DJ~}xYx~7ilq2yIXE_X`h^a>^%3<|sr z?tgPDElK}?6p>*EaOO`0B4sHOR(+PsYnM0sA9i^H2A6v;q$aAB%UZlwlF64~o9lH< z(lyv_?9nDk58x!L(PFq8cDVga%DG1M%S8*<5SedNw*S74EIhr4lg&A7-OU=h22Mca z1#1IGGKQ184JSvD17%|+t7R4P+ugdl z^;b!9_F&Gz*OOysaKz_Xze7?j`FstodMbtyM_zgCJXA%lyhNbe!Z7?3@9um{`%mRX zX|ftnXtF{@5|KX>&z#=TSg{PT5RJzlt%UG>#sfChm>y)sh9!JGEK;M)C!@3fhP)Y% zKUd#vJv%9+;vInNZ|iUV56Pry(&k)&D}JG44)!=RX7iNxWPt5%>QnOeo*k&s8Wn0B zkwU)}85b^JUInP;QNBGCSQab-`27W_6e|Y&pn5;Z`*{2P%)#0_;iAc$Gse}Itvv%$ zJFX6YCU04VSS=HBXs=(BnI$KB{sma%Q3}y5%UMYoDQ2y20Bs(;`gy3Lil(q9cgpak zJjmG!Jm}^}qSQ(mttd}Jtj2zk3I!+2*VS0^?8}s1c?O3xkKg?t89VlLHY_=zdvm0a z*&A^A=pry0V0e8`inx9@+W7w80WT-uIB0)FemGhbnoyTX11T03l>KsOkdAy`wzKTB z&Q>x$PKR|d$n3$HW`JK%J`enPcmk5UCkykR0)U-in+e-$JcLRc&&Ma-`#q!-6J3e{ z4&}roY0%)U^_GCG2{yN^%SR+96QR}!Hn&vhwk!N zNoFgn#yGj=+*=%$V$T%h4J;RaHNV6!YYah4-OJ<{xnMFjBXY={n%WULRFDfcmj88w zZWY7}Tr#8hmF3&#R2J`KW!I&YalI?^+p9#$1E5Kx^|=+SNL9-9*w>;fvxq#gyHF|I z%t-s-E_T=lC+Z*F2UdFr4l?%RvMtC?eUV4 zBefTF@1mA0m29cWc4oEFi^qblh8m0TTf`U~SstP-wCwg5>??$6EUoWseuepUScJ-s zgf|fL2^yVTYlX|@cNUDSr6-MJOov}xm=VHyaj1ZU4%G4jEiCYPL&n*zrFMb^LGyQN z)!bHE+QZz16x9MRX*=s`QIMJuUsp0cg}qU&lv#%|-Klyg*jKLuo$A{ACLF8SmBSFR`~sa~?Le@} zY%G!3lpAETQmml5ZxW+dEBpz;Ng)*XOC~gk<{XE-5D6XEj`W_pwNVkAHETZI2^Jqt z5i|ugVw|DPAU{)qT&3?~Ua=End7CY3Zy3~!JUx=0dn_cUbPIcQ-c2dPS~ZTKGeG$_ z4qOk1t;^M1PSEDbD1k(!>cFMrEN<`#KD3CO-#v;a$9zHhA*9yre^wjlfHeZ9w&CXSj_Iqj z+LpX=W*6doV!z*>TI=ZPY209Y`Yl1)@3Xy0lbOv#Ow4#*5I=<-0XEC-VbtVk5iWMZ zt3hw5)IIeC^j%BueB$%2H`WBK;OveG5x5AMZ1lh*x}OtNkvWz`qnR!bO~*p4ev!rO zmId{Pa$HrYXhURnc;RuQFBX+O*Oe6kRb9H>F#4j;EvfquxpRK5>w@9q89VGdzvLos zBs3^CO-AiX1gZNk{b6ipOIb_E+YF$?riPUgAx8LPCra*+m{9ONHVI*?&Fl^lsU77y|J|6OXEJi~V#pgv zW1&~1#}><>xQBwW4b*uKxqZm*9J6)~3t~jsa4W^c{Gr&eus)Q@Q*;h>BtjSeu!3c# z^r&RY3O$y+%04NaCW5z*YmLrsr5RQ?CeeZylaGx5+$lvJsI=e~b}53FJaBL_iW4Bl zWletW1u1wG%)v^QtOsL!=oz|xt<9f7P$K^f=S@_r(EdXZzML9KvgHBaRk0c29mZZp zZ|OEY-(F1s5Vx0)6H8_^c>X{NAbk93dKH-FF*e~!E}J?WEfL*!x{_smnX9W1kpKDV z&wq-bjC(1$AG;4V$(u{TqE?aZKggG$+m2-fTS9#7ti#0%Qnn#Z-2BP##n3VU`@SgloLATZJ8Q1 zgBd&ey&79xPn?tle8 zT^Bq_%ebhcexOcmVYObspwcDU-DtW94z*-+<@wr00w(|M`MOCOwVyMT=7*uFx+I+JF+WgOF zlS&9Oqt8+^@4FiTuk)r!A}wRH^R^dEe~Guwz?r+@71DV#)8kpxaEq(JX@D*qqBK<#a@U!4Mi|S$3JLhW-&2DJ^1n5!t0)} z(=>UacddTBN>%dr18pWARguRvd~8L(Qk1kLMK68rh z^yCI;FGD>>?HK)rk!qhGm{D1J%5#N4@x%M}?$^GZOb)x6tjU|_MLZ@)uH}Udg|Gy< z5Z2olM~KBxB@jk;=F()|@((xIBw*QUE8w40#dO;mYwN9mpbS)pV(`G%V)M3)ywq!f zk4?FjqwppNwa1CAWT0kzVtG7YgdlhV3I950HqZ7gQ zRvGi>2I#7I^W{;ey7b&Mo4z(Am29ts+cIIY9ZnB<2#fw>G^(tzv~UwP+Frlg{NA4h z-VqN%Cq&2xc=kelz1V)s59OuCPgFAAYZGrVRV6%CLGAwq2vXt(JLrvZGGNmm)?0kM z`1mSiF5ZHKMR!IRrsK>#)+bIS12@lERxolXPHQnC8ztFzsr)33!@{akY@#7((x3Ef zL{gO-tQemuRZnMoZ=3)7dyPdxuR=0DP_%mh5VN%T|Xkl}|T~Jkr z+E&DDys}G%R<>lP^^NMxdql{sA2fIlNi<`2pYL)+zxXp|ifj_MAb}=M_p?XPW zL_;E>WRoQQP9oj=!~&5mpXl-0!xhVVET2S+deZ5`L9@1#X-(PM!(Sgw9#?LO1x*kz;ly3MQG2uCXc{7R=41x99}8C zU-e_1*LE(=GOiFr$9mSXi0s{ z4i;i{Tj_Ub*U%!9)r{kV;?sK2} zecjjl{l2b&of(sG%ebql0nJE}P-r$yEhv7k;?+u^uUe!!%;Vb2F}BI|gLyUeSnt=g zrRuT@4{?WNM^il#v?AjRG+hG!4=CZ?dx_24Y&8?MY%X_yU!x?QcckZTRUz|>$6@?J z_ozMG^RDUoVZZid&Bc>@I+adLbR@r4JA(~syqnLx!akIJWQBcY?Z!J3^*xsu+%5MH zFwOqFs^ncC`>Pqz^VoeQhpYNrvd$8*JD==tl^TE+t^hU_S>75(30;+X>fM#P3N2GX zF?zP%OSWMp1raS@c|4^=RB6*zuXG%-Ei$xz zkB8lJ-EDgam8y9DRLyF=z3JC#Ec_;*xjHw%J9_Z?{E8#u%eSZZbdUI!%wv|LZii^H zptsx6EyWj(zt`T`Q1e{6_L#eutfQxDPUF+ai$)ox6YL_EIBRJ~2OxQxWT zH!CCN&dXfAR957bYc>JdOXpOVu73BZNxbFZvShq+$;>@O=DB~>px@#M3?XY1MO+1N;+SuEp>Bpac*x%*l7 znXW%<7T;j|n@AkmJBru+MujC7SNlPI;3PtIcGZa2o3{E95A9caf3B&p#>80|~tsKv4thh!TWi5OMd(w5X3obqq6y5AIbj1Uv zS=(Ds*|%V4*7^Rp&)`V=JtdJ8_uw0)PNQ#bWt>O}Nb_}~MSXo)8TZJh|2_K~`Mgl67Om1k84ZZvlJUL3cXJ;>#J5V9@sYRjqgG8#s8!w(psO{TW z{kNxEub$Fn_&K$;ytT1bA1&T^`i~iM*QoJ2q`uZs?UBj#3o-vh0@93%n}biioZAM* z^=L>@u64P-Uzp*fNa{7Ebq|<>N%-LLzLd1F4-y7PzO~&GdtoztC{VrfZXh}s?d2}$ z5K=T*b6@q+Gk3K3HT&7l?_JGS`klLtTRtyqjQ70m@GpgZ(;4^hU!Om=NU5p8ZMbn$ zkOAR}b9#{8#=W63>^+pt#YclM=-a;c22&_T`F$Szv28;yt7&i0zjPEkY`8C%y#g5dM^_6D9IsbaN1-=WWAmc_pmZz478ZVz86UOB$ zKBtvgmKvZ^FmF*IG_BRa~2$WEDZmSV3VcflpDdaR=+k-{TvXY&R#W5#0j zRqfWPS+gry|6Zy+!eUg}?8;IPp1y8oUO!?n+9 zJvf1bYTCC`6h9R|xpv%D;xF^#I<-G?Zr=%A7oKLR>h5UiFESEys*#m={Bi17yQQs) zWX7w~iM&O*WZc#7Wm~M323gx=5+nR@Z+7KPY0FH zC^_kLJ=V-Pq>(nDb>X7FXI!t@D@VGZ&bxQFHOu4#O^!S?S$p>Ql|m`G3U|-&;j2dn zWd!#5m@CHL{bvpFWrxF2>%@lydEJ%gPkh@ARW_zx{>5piZzJxBTuDfb|Mu{@KKv~0 zd*f@4b@fmKQ4h!;$LV|`Ep6BRQU7qUW9ae_ zPns81xpzscMRSd|;d|Ce&GH$Esnkdj~?g27-jt@lI(>#pb!Zh;&gJ(Z>w@q$m~bqBQr9oJia z?V&hn2zi5m0hziUmDg$y!KRoRw`|0q9B}yPGVRmIE-o`$KqkJbmv8=<56U{@jQ(_w zz#Y&8-(qJ6ZSg`ug}^kd0kdg+luxKbb|B{te}r}d*15de?80HA?{Zr{*i_JY9+pHs zf2`eHJAh7G{H@amRC!vO{jReGEI2Me3^uHKw>z49@5ztSleUiXRav)6uNWfU0y7+B zq2o@E))yt$R4}%XS!RIJp@IYG@imtC(kmSZF~dSIh+nTQBVb!%5OhTRCrD{s7C^)I zb-JLPJZFl&7z428D7Z%qC)O4cOHB-cdSZPcK@cojyh>ay%78LN?iUGWa0pARYyry@ z(m;N0GZX~Bf70m*$O0^n5YZOFgumYkD%dGsqgOA^aum_m-NI&p^kR>;Aox!4Dfn>? zhNHPfpyrRqLB^#+JaE1G2~-hxzRn7DMlZXs*~KLwd)L{g!^b8Hp#kYXMYNG(+3MnCl+9y z9YH)KhHG(~IQ=F;AnElVVOY~7z79=IDxtYYFzORJ!k1J;@><1XO<{fSz=-0gEb&;?2zel=zh5k*x@$SmI)P}R=q_c~=&BBC_z%iiA@HR$o%k7)SP_)RP6#yA=f zS@%*+IG$$A{aADSk!Y1C|K7PDI7tx!Ds1j<-7EH`_DF29(rh9nZ70^)ns(-y#{*A}AiiVz3rN|Gp+Mf3j zjcH#!iuab-YfQ=}PoK*5-(5Ws2)$T#ZGGu)Vxl z6Z>-cr7ynTYjppRBVVdp-Zi9!6xEvjW7M7W?dGil)o(bl#AJbP-)>i1eo5&Fg|A_M zB{ec%k4AYN(b`Lv(eNcf(ZmZD2e6-cGd!clptiM#Y<0?UtDm5&1EJ7m!pDOTUM=sv z=)b2_MDft5V#uSi(o;dDRprqm?rY)qZ}=>Y4bN%Ye!(Z)R!_^gR$*B%@~|#vQ=e^p zp~sITJckw0Q|!!Uhu<}&o+JCm-FD0%o7WGNCX+6u4J%D8eMbLNaeJS^t5)ld2HdC0 zcAoFx({GL*mV!I99yz*)eZc9g_)vAj8^UhAU)MhC@a+7X*KXJO->HRWkqWgQ`vsZLZYHx+T(ASby37e}) zMgNj?yvoL^3aKV-H{?A-*)aVd$wdYX_xpzT(x{r z7rB&Ro_g8xyDI%yVvZ_!i1hCBKUG)voVjQ&CVd}w_V~WnSLY;Q&pNunx_(gl^T+MJ z?>~a#4CGVejE;x|NzOHAE)B1D>KU5d|Fooe`oSmVD(`etf9lB_hPp-lMRN1zsLQ72 z=8Mm2E8mQ*s(z9Rlh!Sq={tPpo?^FE!Jdx_sR!5IRI_|;t%Z4VDu;*+$flvz$!o>z zLk44FXn@ajrU}a$+Mh!XIfOqbDf(MB!qDMj_pnl)nrFB=uXpe~fJFBD+pb=|N~!ib z*tK75CGqe2WjC=@S)AlU8UJ>b+2x>&zayi-HkZv5|)PbGP zcN$q3VldHtVLRPEmZ9WF#L{2$;asSZnE}Yp)TW~B+|rktC{Yi+!wrFdIsHAj@a>V3 zX93wd(_e9K0)LH&p@^YSnSri+apmkqpAEigKCJedWXGew<%Bsa+8GSb1!@|j_5i=8 z7Re_J<;<%-SGEBX!>bBOZ*@23KfP4;e)(26$tYypsCEu&7fZ{3ZiX^kTlFZn96?5& zoH>_#)g7qfm%wGzlofvC5dFn81x_BX5&?lT=5vTOrKAo|y>0STYsAOLWEvBORaWR!tMj=ZAsM?>Kf3itzndzX!gTL|P;Gwe>X>Fznc^R2;XV zQPre#l!o7$N4{wi5S}#jC6mecrfJI!jTGixXR`Ub(uD&T^Ene| zrDxFofL(WCWTsG`Xx?j8alUpHI$55KP%l+7mo+=&@*=8Gxk#6js@Op)LZ)IM4}Qdr zM^@WjFP3u&bdsMCB7c^*@fmpk9uY8?*SkSJ+@vPS6d5Oz=}u#eTiu(&ASN|KN9 zb!~offkV~X`JHX&MLT8UVl^VnA+lOjU006X)%?U2hdp`DDKoPC{PGAvr1Uz;5p9)q z?DHKezm^;fJy_uwkL9B~y_m~Q>!?JRq=H`Pzt|(u3|F1b3<==UVN^#ohmNk=0C+y+ z+k|@>(a8{E8aw;tlQLMxHj<9_38pmt1YMCTs><5AP@1b_c62YDBt?=f!^}oz{xwQX z`*+Nw9plEqX9gmvRZFOo1hMPJ`-q9=;a1J?7tO!{2W?lzY_EBDvnDC#2L!1?(j;jz zGHuhV)>g${dza+mTxAg|xbV>4*X4_ z-i4+;!&LBz_n1jFf7*zbNU30CvIJm87dB2mx{>-a;_e~Wvj_c}Ek;+scWRw530>cH ztq+c}4;epUd~1~NlyZIekjP&@u@c6nP1Jt&dhrCbE3R_%@R`glE#6hd&!4Uu#T35! zWFLn~dVFi${=IcT@A}v8jKvVgRi9eq4J)-2PxV`JKSA&OyGpCS%`b3=+m+%E7%ykF zpVnNg)pZ|&g<$#*^^Nem>O7j)oQ7eWc@ia$Yb**jn|-ubRFOn(Y;jSmMoJG~J3=MX zuN@0%Z#dhhRXgOHm@C3&5G9GFc5YBX@Yzg%0K3xrNIhZL7lkJLDf*(CF2(|`&=AgA zHxgcY&MZQW*yPKa?)wtWt_*jy_D1Xk8NJ`M+9;<3971OcDBjF>W~C>ua{ml3+MwaIuCMCP1FLabGTtedsG&% zA_~>we0mc;zM1|=(>+C6^5qo(fQpI*J*gu~lLH9`8~~TA#)5)<45WRxN5D~W(wOBz z?lF&dPXf0fYzzJ(RfN&3f`&{=G*^@Bn{iEx$&CQ+M^Qrs2W#Zv2@bcF@Aylu!6_I1 zPnzqGn$$a+061x*ukm-UY2FIa2m6%WBVZGaSY~5c^DYrEDm`JTphtku1R=zUHXjNz zqVps)whvgpY!3NE4e;h+6$y0INX}~>wma{SPX5U>ofK{pV3OF(gr}Z>yeL{ifTxe0 zS#G8bq4h;yrH~BF|6F# z$3ZI>k!W!~!2v|V3eOK{XapK{tzGcZrzY-ghWQ}#K<9sH>>oQ5NoEzT{gPTc`scWq zXk=kDfEOhg0Mo~^sUzK?jB4fy`2@)#OxCi6`U5h7jbXbPHVe1UC^S4O|M%~2%jg5Q znnou=(d}W!c)zEp=nO2vb*Y(WvlLygwFZo5A>hX-rEy<50w>qq4j>PKq-NR1kg#LT zFyL9O6Wz8HERuRuBf>txPZb_S19|jn$ViK10|sh65b8+o~8^YG)kf8|i?8WRu_mY~JWO9P#2}vQ63FUC<~c`= zqBq}fA&?Lhu_t~)eCInYDM_N^v^V+(wi&Z(hkeu4&WH#%M34-|W%1fP+uq9_`BuvH zB+sm{s8xWgD5s5Hxd|G*FsivSM1?A=(YkN)Q`>8mE5#xLw3GQLJ`(`*A-#r-zx(FB zui=(PjW5ZU32UO324GSNPfvA4DSyDkH?TqBPCr48#-(q#6V!5OFx;6K3!*gcDQROw7bBfxxZB_;qz+g>FI?CP!EiK%bIfW0H>z-2JS5*FpKoP=8D9z;NVy#kYgWXTiVRio_-%d5Uzogq8 z;T`Ha#vd?rNp#m}jky{TFJ*iq?-@(yx!AARv!(K1-lB3jkY`}jn@%E=rb8p3$#Yfl`A}<4K;s?q7K#$hb)ZSFd zDe+{-{!Av#FSP+>$7>5>B)dGb3vVlslA7RWyK~aYo{a8NFqmK>mN!CJbf?UH->>c? zf&NmILO_nF8aKn^L*luET7Iq%(>Y9{JXx|GNzzoO!-`r-E&-=M4O^h2Fw`OUT8HWv zb0>XzvkLC7Gik{MgWOezeP7_8>lR1#8rXDJ8L2Hn*g_|(-i5DQ+eM^~F$i=<>%$PG zbBx=;mF`C#nD3eonGWFaY%To;P-hbp}G4dww*2(a7`oO@@>TAC&ZFW6!BiQm2&%uMu z6CpOtjhqW~5PG}&v`|td&MgA$2&*F@=8-@89){A_BN30<5q-dRdevbmKR1aTq(ANM z!Y{v7W`qFo(r2^#7F*xmy%rY}VW_fs;zR^z$xT+G2Gt-5+${{StbCY6ijUnD0$OqGirJT7-Terd42d$PW& z32Ls3&~vDssnspVCXbU)t9ofmoEGE*S*5=|x2{P|z>G&pgIwI^vaoE|Awz3qx|Xi% z_$sqqiv*c(heBnP!X1U($6?>zuBqEw^>tkw)Rbf~a3U!FrbflrKaK?IU-ho@9O7`I zX-wAayD)((9}7$wRVj&J(AM`L;^g?oVA7YM(MMn@R}Nl#O_|2G8@Y{p(#BbhYz(a639# zjm#oB-%9c_cEPxEk$LjBb&891hET;Jm%jz_nyh<5eZvp6z72aL_V7b+yP$Uo*j$@} zvwuWGzE5^To=$H}*V5v+59;OG18z^N==_gjcM==xX_)}vAmrap`6_baiCC?sG|qiI zlFpN6mfUPrp0#v1{dp^Noka`tdsiQ~8=aq@Gholv*tlJO`^1BbV}i$qgCu>zG{Hc6 zU6WzEczt{qttXR`Nny!ywIq7!8k_F_QUsz&h1Ik?F929^R#TS!wZ!wcom%bAXrw$q)U^j8z&}B+Dz9n@>-h1HVZz1vY@$FNM0t*t z%|||YEiiY}>W_sE=pHT#VEBKV(D3bA7=e{rR-8yYy_j zMI;e<|MGB03#F-i^5RinoErwvGW`g$+qQnrjuJtV21+*EL{yk?-YXBM=@7SsUO*Ss zdIAIKu>6e=EUi-|b@|x4-G8f4?9e{;(O9!ZkDdE#1cBd0)HwLrje{VMX|egeMz7ap zS;$GNulk~@smP9A(QRx};sVP#)%&6%b6H>XtJNRzSPckv6_8=Dv5pm6WOjSxiuH-H zeyM8g!T*LLe+;nQf7O>;-RG|fuRFyj2+ro-=Yy;p)%;cK+3$LjH^(g++BT%-bIe)e zUq()NoYB*2MkWAor}RG&kc49)E8##1dbz#?pqbj6DT>+83wza^0M3CI0>Ik32GCms ze7Xw(X^OpLkqEq!eu7K`dN3ydzwPSF+f$q;gsL5s zI0M4FyFuj@VSs?|M@^X@^;|Vh-=`ei9YG5aw#{?YI%-B=Vy)r__UUTD9j{wYJvuaX z-D}Gyt+wYT4`ja$QcaT!02G^;!w+~ZMf|aSA;1RH` zIy&2SXhJ?SgxSdrLOz7&Im8N6aD;*)*f(Fc2&>5JZVDMTq{(2f^RN$h-VV7y@n-U* zSIMJGoUpxK!?cg_=yv_-kOaVd{ZeIRHy@o~K8A@8$MbU3M`#(IQ!!q;)KLVogL{+j zRMw%8!@HGr_#(pUovDO@2EEbqJKC??OU1Qo(t4&THNG6n9FMvYG)|O*XVXK)8JS23z9g-=8mO-p&&FN)92Fn{j*3XO`eF^#6Q9J#=^JhBD zN1aht=?-E^qZ#as*#*~W_d@!HZ~!CI?m^t)s7d3FPFWgB_66{Yv_b28tu5m1Q&+ot zz%}49NGEp$5Z2tc0`;` z{t*lavJ$0@E8mW|u|@;)Yu#%u)4`j1-&H@Pe^ph*+~okXvRZQc+qw6j&+gNXGU?U{fPEiU z@BZj#Ct%sPlXpqBfK}+0SU)yS8RXdI=WU!+{lE*ub~33@#c-mH@~G;9>|A(OgC-|F zy-eI}RrEbxHN45vKH2J(%7$jH5|2P zZQ3QaJ+u0s-~g&j-r?KkwC!W`jYrFs7Fxg{)}!N4!*IDtFW&sD=m$@G=J{;lic2FU~urV zPS(MVsW)PRGOpwN;FT@J+`9%3$afkGszS&nIfY^_@^=88f+VPufwCI7-=!W2k8Lf9p)*%e{bg} zTaCV5ZDuk_fDHA{wgWOX`WztB?HL1zQ-I}|oxG~AxE1u`z=QJaL#p54Z{E61w#cWn z>+qVKZ%t*LH8p{9HnArf=r5inq(%*gVw-9TOC} zBsOcd{H%9qRPkt&?t~c0yss{5u>;!%?% z_(X_RRS85OWSxIo^n+z{d_AGBw{D{{0ZYu}Wnr~X?=V{rh!iwBJ6h(u_|dkvuk_Pt zZ+jdj5m|bv?$8V8GOWXm*z304YIX3<>F5M9mzA4*<*-~@$Jl_2@T+Ejq9#cP!;?l6 zofMznOh~g?!bo$_tZ5*UT?{;J5nR(TffYhtIJ$;tK%QWIu5wI76Q$#^i62!m1#<-V zz(d`~m-yvJ)k&RF*3Dd%gf7nJAiAh+Fmh0ON6;dk@Tkn;(i>Fy%trVnt+|jM!OLgy z_Q(fV2iJdsC1L$R?FJ0?^uaSR(h*^AftUcwnZZ-H_x!_Wz|uU4JJ}>OgUqoOFcvYy z0*(v6eCVn~iWAy^wPE*3ddUz0T&$q`1_MMeV6V4Q5Tfue{min!i!06Z5DL&YCK2-6OtJTW zx0d_cKCt({AXRn;h{4S5NFMrHdjU?R>5G4oUM4m1bN(p6dv!@tkWS;DQhtxbXHf)= zJ90$@?`;HdQFQA7&H2Evogmw7F6UIXW;=M&UO6Os1K`oX(?@p{{6;>1=$y+(cP;1v zB@VlPtKEaFN5UX8hTD9R!%^QB$#pun@E3<9&Sw?-#)jFB-Esc(Hb^Plv0EW@n?gCt zfA$w>3BXnY?SQ~KdiA|+9_hT4w`(4Il2@0#`-O>S3yl502wZXxw2pMd-q-Oo+QVji z0om^ASY*Uk0lELA)`SmycIUkCm*96?Io90Dv26xEd-pXt(qZ(EV4mMnOg)jlb!*@k z&L|K7`H||Scu)B3e*r4%C}D54MI7*|-|Uxj;ag0g#|9-Nn@DrqfgxT%ZV)G9z0Z!v zzP4D7#)lyt5|2V-rjG6)q=h5_oMJ0poUa=)X;<^#Llm70{sHcD1#rq;0)U7pOuC~M%=e>|C1FbKS(-Jn0BV}B&)?SK`fkId5j1zq$d&VaUs@EPEjR3ME{ zw@v(mpa_2!BgkerVX{Rpo%cjMaeqW^xE75>x8qg-4~|;S8~Wuw$!Bv|7LSDR8Z>2~ z=0VJ@Tg0?o6gi$0amIXC!leAX7Qh@{k|BWzo=v@3m0JWZ(0^BIr;+p(4G@Awld5W& zApjc(K*hD+m}eWT+5RyhAh)7{lghHbL^Cx)hssX?n7ADW*8FA{%a<-@4>hndVI5y8zRyKn1P)!7f}r-n8(1=!mBI1ywap7vON>N zOkOal8f3Jy#*|wSUq`3K6R0$tm&1sltc=eP(VAR<*x14zyIZnaJ0AaxsYi&RUscb- zqlvV6qgt;)plR{`gQ?u|0?YWia{b;a?%l(_nOJc69J%p~ zPYA?V1DDqzNxc#3%r134LlKOhEU?Ge+bl6^&{2BUxD|svq*WT-V)#mgrsQTr1y3HR zy8+#{yR^DTq)E}29iA1m4(pTZ-28eqgL^0RF{Mktb|n*LB0ICbNak+ZC%wPoChay2 zbfOaq$ScsDK)FvsX;W{mOrU&$xluN->YiejO5-fZXt5 zm~dwY|5vFl-&6>Flv*q*@22CK-O=%NOlrI)pl%58C6?`xzU?aW2%j)*j)lNxr`&$7 ze63U6PY{y#HcwXcGeEe>bJo|2WBINn;hQ@8gI(JK>D@sT`GoQU^D0n}4-nV&gf(!zcuQ`tqwt(1lJU_n6={>xBgFm|jdvc#kxo-#j>8ym_cZZrm=g}PT@3}9lX%kHy`fv_X+bP3(~_wS){?iCgAc?GdZK1T!6;p zHXxB7b|eWrAnIcLxM7?BK0lbb+8#t>&Xx)BtciDAUm%^dfC50nhk;_Wad`P8mz^y% zD*%Au4h|K#k-rN8flnB|IZTa+C2tJ8%Z6fLF-_%;fjrw?k7diS5c){O;cBZKfi)V9 z%*{x|?L#PB%4u_rH)bK7^MeZBxG=#8fUmOHsU zOxdv5d0F&{T&6z-)?WCqox&wCIa}eWZ@z{SP#rFCr}qKOFLq+MBwR=mjW!aY*q#@# zyqFn=EkLzZD-|W{_-#L(mL%|mr4QpLnF~TRd_xehYg^|&enBZ*`w`;0MDLAI!@1uC z7nEsJ5tSwfB>>)}Zyng{G>d1I4WvkLDtAqnyiZr!Wp$jZ*AxeGKjj|oZ9uxx6}j~iUVERG92TG1hqq{+aBpHV2xT{SL=#i_rTAqF%JqPcqWXBr zc{gd1oU))!el1;|Qx?Z|RkU4`TgOEl8(0>dY!I5)apxkDcPzkW`@E_~42>me_y`Dn zP@x4Y3h7_T+#1`Y zAbV5(_10j~+L5ghYHR-mm%?EliKp`uG*FC-kiOO=a{jmRaQ5zh^qP*r*t%HL-|Ph4 z$mIv{y&Y@3jPS++8`f__ z>2HF8?_G8g$e3BOvUi6-05E|x^-9COMVh|7=JZ@;Tn0jo{`k+Y@SD9`Q187M6DUFC zkpFN94lsb{Fm^>e3jz;4_9OQ!kYjCAOWwW#ON{qBHiB@r1-aM@{^;`N)|vyjsEid$rN>s zB!s$iY_nC-s1=g$wug^`P&bBek;nwlPHy;R03B4$ME6aHA-DV*9pazCp8}-GHAyI& z$q#Iz3cpR8?OtzZ+CDv#b^*fQnFz%@KoM;ORy#2DN6rc_qMz{paV&E?YX1P1r@x>d z%WIqDz-{9&P>HB}1gh4VX8ym`wYy#yq{+c5g#Q9$*}Gdz7zt(Pb-EBF0PpXv{4f7D zigY{(D*j#4`Dcdgex?7s!3MFQ(nuh$W>&jJ*giGc0iY~_$`k*Sf6U(fQf&@Avvuq{ z@aFA=Z(*jl(7=B>Yyu|O`V#Md>`%XTCz>TS@rDnItg#Z{*BE-cFCb52m4%^ z|M^ZAC>h_XO`y+oRxpwGBIrmq8^0}7y#$Yd_3KN6-r0r7JFlxrgm5)4BDpm=Y* zF5U>9&06EwWvaNvYj0xBc|cGd&`jJv3f11OwAAje!5x6AB(-b%VB1DBEp~R@)LVpQ zHRA7KIUCUPeh1QqCnA$Uw0Vpl@&V#jL+W|y_Le=~sUgg0%u z&I9VXfvC#%@u!3*uCPyFOelgqQ6qatFO|^fp*q zKpg!`p@C7p0oM(h9Xs_#RK!Ocx4uZ+fYRGeOn%PNPE-b8BS#YvU&8*P7#oa})={%T z3>GJ3@V?VI-Os{-bT-D9u*wqjebZKNBuSjb$_{R<#zvgKuj@xb(FubHL2WzA2Myq+ z;nXIixkCLz0R&QsrS~4I0O~Ma3W9DXko%alFB+HPe<&qOtmAMO;ofipmc6n|bKT7>oy_ z!jJN+Pj!bkyjB^+^|i{rUh*ZNmgfwC{e|97edJ3j$c8mJVA?g}F`S5y^=#ppdlS>{ zO91iQZ&?LbXpURYaQyYuLh` z*jO}9j3g{k^x6tM%`e{0mHii}o^l4N2bk2RJfoSDwbMuWqw&%3c;9wl|K=1e zLORKXbR5Qakwt7OJM~#Vb8Qjd)CG^*XSjp)15)n1N&@J=kC*CXIMhTH9(wV@O1-3j z@QO0f|2eRA+v1!S32Q!{p0^THyS0LBk5F@aj3an{Ym6!UVpxIh;Vd_EHGu-DqwK}Tgnb=(KdwPeJ$KNi+>SYPU;G(MdoN#}?Z%zHS% zhPFV6tq{9c%|aa$@B+3~m11;rGdzKxoTM-L-DI8n@zmJ|@~|x}WCGj26e}l+l6I{v z-=Ke#e}ZUQB=8zQuIcrb9T-%l7SAM3^GD=x*O)U~XUW%@*i29IVjSr>}E?9t- zmM4Rd}abqjjZ2 zT#*Ci!NKw`sC8>_2g3^@-bMu*;s>%v%<0Z_4zJYgH$m{NiE)x@zkU2arg4(I{1YVI zgLJ2|#Z6a&z+=L^x*YdSBpX0%4rO=Xr{&9;u)gQNJn5niR^G0xxIi47ksSvo&@TMp z44hX;wRE0%YxX{6%{6hMm#!RmA+SmbrSeY+lkjYwVY$opUN0elj$~U120zBfv3AxKooS#lVHl812^$sh=V1j#TY$&w`t3=A2`k|o0s zXAqD)0|JAj<+uB4U)BD#_4d_%Z`Y@)tNQk>{&e5I=iGBX=Vtz94M3r#rl|(FLjVBW zxjg_kivVT7o&Ur3UxWM~4sqws%?|+O!#l$SK6meM00=1W+@-v8(+gm`ea?ITV{E|x z)9w)5CA@e4!9yZqlG_dq6aa!dckdDq-o1B^knpzmhuiA_Ldtto>|!tOKQ?&tfWw_y zJR~XqA?M55E*irLESH3xM<@|7Egd}rBR3B(AHM)lQc7CpnXIyks+zinrk0VhiK&^n zg{8fNqm#1>*wxd^+sD_>|NW=1@QBFIQPJ?^l+?8JFBzEyg+;|BrQgcR>*^5=jmW0v zmhPV3zW#xqgF};3(=)Sk^9v|6X8rfZ=GLF>9o*6J-;>ib{Q1BC;&lgb_kX^!|3U13 z;YE4Ni-3^uF5!d!;&q3>_ZD|43GcCs-KTnC@ZgR6V-E3EcHZ2#AUFmI@%3D0mr7x)H0B^m$qpBL*L ztip7-?Le+{qRV@F6h@)ZZg0T-o5o~|O38|7eMR2Nl4^XgMF;r}po_taoDGZ|&cD=} z@z%9jA=>xSryDHV{m0f*72Hdy4JDcaCQwvoNQR9d>BL?z4_L%M<9RnJ^z<5gs84QM z-uC)DIAi}Bc63}jqy50LuFb9>w`>%3o#`5^AZYJ#{n~sGu8@`gB&yT@>}Br{IcA5P zuj@3%p+tlf>V%Y`4`rUjFX2vV_JxAGI)hof6C#uUytF0SQG7a$XcyS(36jCKH75Ga z_0X*_w>3B9ULULAw4}N0BasE55oMx?v)k?@wmd^A@YPytg`5$G-Jq?J-lA*$E@)tXP)Y335 zd*fP_`nPr4?drYv2g&y6vXC0WJi)YdS3&fmdR5Er{3TUE>*{AkqVyj@PdczO1E)pE zOs{>izP{^JsRZWv=f0o?>w234hrG-UyFgN)#*dJJ1@rL_vY_s&O|6#$M-u!#GA4Fk zQ^#d~*tvgv1bIN4m0+?Nua@ZpEHs5xF2wBa&S#|m31wG?*>ELl-Bk^rcEjq zlwKELSV1-eC4sU+wlDEf$8p^~)1(Cpn!r8*h4|r9(yNl7u7L8L&inGgh1*iJWWFrZ znZlSI#;N)`RcPV1p$hDfJ28TH2+=E6z3l^+&`MQDa82!`<@vi8n!4gjd0H3Mx0mqn z=+i@%Kr-BF2n?h(7bsTc@-METNZQQOt?-TaqhdD^O_zeEko|RjF`YcVYur0ujyuWg z(o#rTPkFkh+Jv=m)@Ox&%NpyOW+tyce7;}YZ?Fsk1zu3C^Q~rzoBGU1Uik=4sM2tS zQyVd*TJL_AO z&|9Gbuys*t_k+}rf~0)r4BUxfs)M|>*Y!7mwM;gwvhAlT3=3T^TSIQyH@}!(C7j62 z8|+wWnQoz9fS#E?f(guNEd|=wdb-P9yi#1Tg7+C7Z-g^3fu>Weif!>6k(ZA-^=I1n zI_WyN$dpxZYORG}R!;0EGQ|l-KFMb_g@`m^BcqJHx24}3kf13pA8F)MZOF`vCWYn2s{CNuiWFhyRz+$0&IB%@S8l>z&XcUPy<+@y;F#^iVcSAaygfQN z6k#qeSAv&&ITa`=915O%3Cw!t<@a|^RTG*YzoLLsmaV%V&rcbdX0FWnnp_E|6t$1Y zVKLL1*{j;DeX2)V@hFR*wl*fNA7XwA>e#e^tAy`I`$xP$dL)D?iUrLw#wQ7{Bzu07 z#}1W{woNdHY&yaUKZ6)JeIF|)6IR!qJ25O2Yl7Z?$uv&y*t-bsx&egz4)_vS;#bDY zhDMFb^{MT9o4p4?L`@2U3v;O_t{>pFH5F;F3E>FXqaTK7VGb+;zKzos`L#>@0=TE1 zM;ODdNq<$a%p%`x4~_H?=0rF2#a(`u)`zuw3+mqxQYp)hs3tATW^~f393@E@`PFfiA9ly1)F~?6XthQXyZ+ zIvhQPrig;$1SP$e8%)oy8ExxYw*%$mo*AZ~G+S4;7b>``_AXYo=q@~Nmw)0L!0Wul zEZApnVV1zhIBE;4AJX&{m`dEF|0jL&OX?TCm}AL6Z0rw@Fr&s?XynNhJO z`bi1sZJqo0Aj}9o2TCbGcaifAYT}d8227= zZ-%`c?ta3VpfWyzYnHA|a{B4gFf+v48O9Yo1m~lO@!ObxGQTVEk}#wDDqA8|O)(sM z*oW%;Y)8>;H@)eFY;Mdon|HG@_WV5Q0Xc<2(IUBiPx|8R1*rmppaDuo-g8>MEa z+xnbo?ysKMSnEW1{l?Ke$>Yv0T03-ft5J7`wjO$J9e!lHa|3YFe_*+QxBa1Xr%eH^ ziLhWA4g2Fm(dL;ha>lk@;;+pV?iA1GMnN6VF4$7TW^Wnz7+YQcp=^nq%nur4KQeVf zCZll#(%}?wNdB4jgzJuu9n-r7CjwQIxCfOY3l*fLK3sm zL=W8#)k61n=vv4y;KQH_?ye9{60V&N)AIt-icLaeBX+|Z0N*f;b>ZL%o2WtNanCjP zucFpa87?ZIMr9q*b7gO0weJYdfYuBOy;`VK6m;1k#7(l$P(-4XNa0`|EL^i{dInW6 zE6b6MuXC@^dmJg`U-OV(63?v~w+(lJY~tyzStz2-!=H=Fq15nEmy8K++T;e1JLd;M?% z2Rl`aG{C2?@fYLMu_jJQ2MxTppA7rX6%{ zT?s<#KxED)2x7gLZcQ%D@-dBd2F$M&mU@o-eUyCEafnl*{8RiS^aVXghVu8V;x zHQ+x5Gt}yL(I4Po7h9A*`s^cbJtAGDX?lI8VRv_~En;OrZ9;k{Gi8*!l_LeD{p@n% z%@eUu3I?gWI1L6F7JY)wuWWeEgb=$a9^72~YvU#yZCUwzpd5DnVK019ovSgSdYGiO zh~wF2TZ6NB3Y$|cEaR>W6MDNGp1PdEKV}PTpEm} zqs+mE$bWy9A#Yu1>Do9<7sXT8q+P~3yW)r1nf$F4u3|>#08)Ie$tlBa?oq)5P-5~^ zwwY*CU}?-c^=s3{kboq=PrGpv1gOkFX?mw(B68mJPM(+W=x%DzjhdQAbNgCCZve51 zz@32~*N?HwDS6=LqnZ;jPb!qlAJAJQ-P4r3z^AlPzqos;nhlc*vy-+k?q2V_%1z5m za~QqWwhWD+d_~Cj7LBLYu#%Ch$a|6A*0Nq;t;)@5b}b4Im_z#v!ZaPG+S(D39(Tp@(8gID0i>7=EK8xaPZtjA5+07pHFMV@lZH>C{?lw<@uTZ{h(eY zia^NyWK3R^Y$zCctsLdmWaSTE_zv>pZAyxY7i`)GxHE$V)Zcyovc%*?b6T@^1GtdK ztD|}1U@R>;p1oxBAJlv4hUTQnrBg!9_a@B*9i2jay1*pV;d(6Dv2zosoMUjPCXs}A z|43pplYO+L)2sVWv%VxF);R%dqzNPSdV&?Pu`7&5`7cE@P2EH)C+j>6>VL=RFE^}O zizFbrHlFqvm+BdB&-JwiZLiL&wu)%0Y3I5(R_muK2%RYsPuu|BBt)7@o0+AX*6Q6( zNYKJzQ5vqgq&7tp=E{#lb8R8@k2N0XA_){ zU@VSm&)Mwgj7~reV-Bz7KrR!W*oflRhYPl+=dt9yx&uI%S?cHoew8dHMPW|eJ5W* z7aI3x_U(7bBd^8VqXo?aRXO^~e^gNW8d8%10~lp8Hx}mmhd07_UVN{sbS<9cVN(Hj z@Fv;C`~Qx|EPNSoPi|7%`&zjFXe5Tu4q!Nu45xRd!BXN#XW!doR6uY&-uB>3mXmmO{0a(m+?$ZV` zjYXO*3ks(;&h6}eFR=gir^A>^^fK7;p713dbuyD zA#{`<0aC|6PcCP|L&K7p^j;Zn(+lg$9lN}d#+E3iK#eg20-Z1kE)R!>`c^>|jJBC5 z{pSQr3PbSnUS8!}#LDb$@SM>=9RH+yvPWQ*VEdAhfqUj|r&;xdg5;%Uq00?`5-8{= z`lKP5wNhw$VL01-%3 zrLGu`*;OvJTqd zb-Jl^_^W)0MbUpc9=$}lT<~XWHQy5d&P-Y;P<;?cWF_dR??4M~ZV}WO2H#bw1 z`(JHs?vm2;x4~UDov%MgoJ!&M11bZ>V_s#g1^1F^9UOLt9!#t$GEO_Fb|m*)jGP6z zWq3M^yRx#jq-~U>jqjUE)3;3?ga;{`{gV&w(zyZHUD2IpUuCp;7VLHb6CUIBHCv{I z4i7kz2cxY8v`4ijcV^&p=A5oaKmGD+`Z*`CO_!!H`m_R64?0qHCw8kYudSe znnk(u{(twJo3ne`5%0H8dO*|cWd~ai z6D5`G7=Koid+CooG@d?m%w+LF z&Gf>*H;8|WO%v$#ucBRN7pU~m+Ir_G7a`wgoSY67lK#T}iGg`7OajR7$(g;MXn7^;Z@KbEC_aX{$A&XZOgaXO~P5YR&br|Umt z$6_}117MHxg$x~*RU=!Vg`Pkof#ardFf|pyPhYaOS*>2AxH|VQbC$@&!6#38!(YDj zkm}E3W6E+9Hx`+E6hud6&|es*VH~as5s04CEjroDglu@8pz!4;;NW^anld&101aIr zO8b2c z@UHsibp5uvM8#nrIF77iaJJ((ET7SD>vQ%Ez@OE|c(td}wk4}o(-tQ8HVD!O&lS8U z@X9nS%cBEn0ItfaOGj56d@#U3hBM@qoJ8_tdS$&B1CMD|NRg($=&ZH)Q~C=W(^WlB zH8kVR-Qe@|6Q^H~t$8sld11WRxkxt=CZh>cX(=-!IG3)(t4z#8`{_4!rV1~A^>{-( z>JE{KAS<29onP8KY9Sbs)=?5$?BB{<);W8SuC1oMJ0GIauj*_3oI!-tv8lDlwVVsI zdW}GtJ_$ql1b)POo`>!yACwH2Sl~?Dwi2c;i_jYK-xooTGX}uS1{L z30l^y59-{b$a(Q-_)AR;;(OI#?#iOY_YwF=T*{H7cf{`~&&TH;1ijt3ptB6DZC`~x zJ)TwAOrs6?eFoUdaBl^EG_ty2*{k~-TvlgJOA;pw=E)ruVJGhAXOB~f=W*CfKpM^sgK?|)ba~J6gr>nG_)BF1e#u|g(MA51r z27RJGE2FIW>>|t^EHL6~AUcglzv{x{@2<<#0Gpi~a(FsxAHQ&94!_+AAu0`W-68UC z&*L$Y_aXXXlvM|0y;>M08MEHQbzM&tA7pM9>)c?+%nQ3($-XR0RfViYzPkYfWUY%$ z&*QeVn=y;E{VZF3%K`x^IK3#K^iz;FgyvM_cl0KMhy*n1_1(9o>7>5~Y5q`2He6K> z^*`D2E&AFFifgI*2W+_k*w_Smp*~sD+yI!Wyq%}skH=mr%2%}Ju%EylKxorx88?bn zR7dnnb!qyFPjY(2RqA?Y4F|m606lcMzV}IhdM)h3O3(k^hRob=U4T*bQ=6+2*vSpx z(VsaoY*zo~(0bQ3J2KiIh|fT@+O05kw7lD;Ei3JDOPBg3AHFf=X2)k;RZS~;z!2zO z0F<7ZXJ{Z}vV4u&N@LZia&zmeYVT-}Q9CO1Q$t|reU&lC@G8;#>36-0 zT9-51YF-Yj1A5|cq;*bP&I+ouprdG$`i(7-q1F=CYGSIcy;Hm5TK)W=fj;~hhaH@= z|3{+9bQz&H{S#LH)Zc8Z)CQ`6{{#eL>*AYViw!(eEaf~VG~2P=o5Jz-&awir{i%=- zA82M%*JiMlQDh_V()k*BgPI-HCrBZJy4&6Rf|gW@NZ7MriMz*0bY8>;4wfJ<{8b%x zTGTl!G`Y!Ad}dKJbY{!9;KJ=ImR5ouI^tuwqB6YxP`|5F6Jr91uSwRbX=A3y>! zmYx;DBEQ#}j*4dTBoIL#_5v7aNhrT;`pG|Hu&EZM)QZ+Vw@OaQOQGuwN}E%@aJT`4 z23*qEM x_s~nS$bKN&!XD)K(iH!;b^fyCTFZ1%<|aH}2DSmEiyzYabAg!_4`Gac zF+Cj*h7FR z8ulcyu~Dnng+Sd@k}T#q8~PUS8JonTh(M!`gn$LM#2WzBtpjvrcG`p8IPBB)HJS;c z4H`Z**lAR}hi%U-$%MsROOVlCZw|b|Nk>b%IK}U(U;~yGmbyw^7*(RYD};>nzpXOq zi=gy>xH$e@SEh@N)9#`C93jvsRmsaMi_e5t8OBn>Pg7H>Q|$H%s*0d58CZ!|ym<-klZCM52{?yCux7yBecQPb=CPn@e4%Z`R`7QSXJ}`K5c5$hXfF z>98?2elcc-r)IQ4v<=s@P0{`@o~KKfn`ur~Qv|M<7JsvBJ$i2VC{XYP9)~l+ycI6{ z!^(>72zTU9dw6gO#l+bi8aussV*O7v0#Q@PRV4Uk9VhhDC$^UO&gEzPW3qw%QM*H1 z?95DLNC!FI%(PioyH8s8UPL(hkPO^d7pE0K^76aoc!0;xBvoKPFkuAln>1g2h`0@a zFoWZP$6GVM(qX`1>ceatAXSLm%|4z4oYzYhD(r(~l&z72BnwW+6 ztYIsdVtOWGGIIpJMCxV5TKShvTGejqN)_LqS(Tia2uHVxSZ3qsx6$meH3wIwl}`~n zV0^CDNc>=Z9fJ4}j1P7t6y;PV9sZX^ z*3WA1Y2Qwsr?HYj&PejoAtmG3>eM8fERn7r#Apnmm`Zq^`xAzq`rw4xB+xy zbHR8uUb&aZo2n$s7gBDy)c)J_5K;~1v8%f)VX>qR<(hC4AbG)+|&4cj4)>G-`phYJ*F#PX1HwZIFyJD}A zPH}%#=(9zrn&2jR6dOXcNAz=|Qe+$CLS}oF-fIH@;=iT#a#7t6Ccr%h|gPWBr0O@&uEtjnhCOqp#= zq4YF)s;8Q}uJU@D%sZW<2af}R2c@Jcf@^~Mkfv`Jf$}*uhu;iagZ6<4#da*Rx7~b) z_*!sfuH`w598v{t-cNttN-!fyA9+ZkPqWzPE&YYS>iS4?Bwbux{ESrwN2thNKgV4^ z1DCtvo;lY#QL@l<_QP3p&)q@j7b3$ntU5f6!Fv2gc-zMkskddLF3(%<@7B*nv zr3^fs#0Rg76rvAl=NqB|RmoDXn(L>nDMLb3XJV6~%vP1H72|>I7m%PCw)=t4I~cD@ zI~{KTj4pW#_XEK4M(7U$A6~s}g?sD@XV(t%Td^7Zb!QgE76S*(A&v_z_9SaJHF;0nYMnXH$?yHm zg=WK1O=;r>w9?;x)@!@dio3!v$XR(M`6)cCFBD}B(#a&E1wDfn`rDj#cWaXMCh%wa zy-sp~A#|VGDoR1eZkK3RHsnCJuA`(96`swku5H$9S#jLBbxKrY3RW2Qv0iwzc&f*n zuwSXRp?##5_mEI*j3wsDUlz902CPT}dYP`jkxn+KwzP6*Qk^uaQziEe6KaQcj`mnx z+)CLST6Z+Gk+}4Exz{tJ>aw$9SU^@_#!i-UuerKgl)9bNV%cnc?Hf>IW zN;Tfo-CH8uh_6~%4}SwF#+Q)6)Y>W3As-}{`$|fSW&F}^0NUQN(C%8}0IxRf0N)mM z66(EragA~Lt}9#2b&NC47R~y(#d}&hJ?*`*WkMux^gE{}sHpddPo;;`<3;UzD>0so z;i*+_Hon5A4i9$h4JAs7^|W+ls^lyn!PY_@F6Ur11F)AbYSwKI3M%lu7`k2ZI(Wcm zw2TfdQVo3Z3YHKCw8`tkQtQQx>roFRWeQw_*1|!o8BW^w0kSD4U>0!UfVW)5lvWeEbMYm+7#6)+?fr8u;+8WJ}sp0!ob9uK+45LoE}-=1GKQs zMHi|u67!h(%!YoJqBQ!=YvSz{=#u+@*XQQ6l|%i`^!-|`%9Xct54nfrx`Ry-P$|*J zm$bO$Xf7kGga`O)87?OZ?YlYBrgccLn>8~@Kn8RzVL!`38OC8%NWWmsY$gGpSP^f( z0m#+t+P>&7&$i~N8p*t*`;*hU7o72^9l{+^^KHI;00o~s+N2k`TR$LA!hXd2xt2>6 zsA%{*3?)%^!+0k8-7AfxSrRYB#P_4XW348Lj0iC2 zuG*A{HU%>3)3XMa=YQtJY=&8eUN9#%O02uL450BIxpaWTm=OtPRw^R3l!6)8 z_@bg^^C2R&9Hz#p_Z5~n=RY<~ueububKVY3lU3M332uxpPZ@)~H5u8jFC~G#+ zvV!Z$TKC!907`7hgYC1fJWu1VzP)V=sM@&!(0i|jN#{PqLQi8bHIG+aLl2d4>5A;2DXuZh;3=@rYE!VLg(xYyfp2d|6`K#O#iSexS$x=dSgvr1f~ z8B@axAd1vYbKa0RS&jYmX~!?FX_;P0pjFfo=tuIB@Z|icFCTlSrqWstTYYELr3G4_ zBxpg2QC_S7PuClygl%su&i5nNY@Vxe#7X7&b1$th;>Z(taNNEB+II)g+d#n^0Qvmx ziVw1285jd>4AXTKvTaP?TZH1+On!+<+AEzJvjTs$SrpoK$r~u7wHDo)jqX3PqkkK3 zwLN|?!;bpKrZf^K1gKefa>yUfVDl`Pcci_;HUo`a=nyyT?w64^qzIZKS7r$9)`ORq z-P(Pk*)kJL3N6=)ltuSXw!au2am0(>iz-W8dwK(~zIE(hmC4A`$9`b;5zo;?mwv9e zK&8IJyG+2ni=~S7jM3bDz#D+zwY=Z^b@YS1XEiSW^fJDIPc1t?BU=5T`)|3AC7+!T z)oey|%d8!f{zRtDCdfY@WLGt4_#|@p=>}jwj_1kSC&FBrE)K9UT#{g8UgF8WuDG`N z_!5$eiz-kzO2wzcZxRMGj`>)j@Uc}hf>FlJ(l~CWu=*A}LWrUlTV7XLix z^#~hQI|qV}yTI3+M&M(3V`{WBp8RT)hJr;hce1$Sb+rv=@*i8RXc%hDgSLP1j=XH# z6jFK@D6vMDr&(9qfIY}tYp2uF7wGQp9-1A53bVtO7hl|KqO|B=RF*C0tnJZY)Jaal zxZlsAPSh1krG4@dy23P^=%nsDywQQxjXOGUPo8o4h86Xj?(uPQf^wbA z6N+qpY(2jQz~}%HLB9uwLBrgYnnI4W3WE|t_5t+(LtS)8zj#da-#gwc5pzwRRHca* zn5g2bF^?Z#3DlAva#20J`(V?m$^*O0s9PHlMuh-i(}WJp@9KreNQVUS}#7BU>qg8nvCUpC4; z%#*d*j#$w=I5XR>9+)&v?z8)b7M7$tXdYiqUf6bMc<(yZp4*ZGi@N4D2WRTcCM}{C zx!|LXhm_q8Ax`1E@lOEa8H|ROG?WRFQ8H$M*M5QO)A9;90t`q&ultUm)z=G0gp+n7 zT%4>84qSPy{&++qHZeMuj*q$SG)NjpTjA|u_!HHSVQ{n9^lcHBpG%}QpJAuq7iMr0 z8Y!pg{>Wv*N|>ly;=d{GFzx16VwPqsO<76ANJq(siV-`i&7v*5igDwifdK(%rrmSa$yF^zi{vgd*w%q@yn;9j=kpof z#Gj_Xwk8h-WyTAgiWmvHkA)1Fa)WQP$(9$Efje0~sWa8Hw^WmJ!3E~?#M!6$%cwQISoSDl@X2_IW#@*)*UgGP~T3|?1^C?*(% zUqxTKG-`izqa`K&==G1}4FK^#g2;nJDAD)Ws|9o?>h9WMzvvxp9Qhh+M!=dcBSOFI zQZ}hLv!}=RU$W))q${I?nL+zef!?O^p;Kihpt{e@4M z`^e%Rcjs!n6<*ESL&389amk`zOt-IT9x4r$G7IL~oHMKAcQ3I~Ngj&wb**9PAs0}) z&}pSH66t&6PR~49{64u#f0w${(3=mltyjcpLz1WE=~euUn=sS8#nQ-9DC$G~? zO9ns{?Kq<(o8Pd(CSrFw|J(|n5BAaFI=K$WOyxeRhZa+R(@&kJq27mgt^8cCN-{TX zNg%R1p$lPjhL)oRJB&i_tObD(>OY44Y~s9r9dmATNgAA~VZj-U0o%DkSgCKW!z(wdi_* zuGu)xZ88lrH7f*HM&-Z-H-Jy)d0TY2dd+E|XmpU=cR5adSAz5`ps_j?p15oOtI})QK!>YVM9sqZt&x^Pv*=^2$9p?N z6bUBVpdxY1Mk1fq6C(c^2NB_V@DV%Pg>{p{ZyP=^LWY->4VptA3OU9fVKmlKxnRwm zYYD*=*LSaP1zMdhoe}kyZzmMl19fLR*zmR%_)lec7OG>kb0||!=2d@6pjeR~ZfgNk zizw%Taqp9z*JJCoqgE(4{*G@`kh_@7F8_5_Cet{n zWfTMM9~^v}^p7plsAtm(8q%XxSIP|PeT}1GrV~ol5ml z!A5%_+ISKxyA#W;#}-cWs5b-F6y1tOz?L=E~Beh(~W zxhA~)-ymuuRp5}}xR;yiM^!5iG3Uk_L*>Ub#NSA0ew=q0;BQkg_u8-OE4}AMs;?hi z>dgRE-*Hb11DDmVU$|&hSRrg3LWTRE`ihVL6uQHkbS_kvt>+n;3j1bLW!_(uADQw- z)>rFS3_LAPu4#06Cu=!x;%S;uH$Iz(G0j`1MY>e4gqO&clCf$!_Iy{CSW7Cs2edTd zQ4Qe=23-^2)O&3gdiqWOImd8JN&apN7U}2o70Lbgn|N&)sLSQvMO#bF2OqT?Gu3JD zy*K5_HbZ{@(i~e8{ns33XCW-&U~>F-@nZyL(k0%8K{LrXMd3$x_HU|(HS4?Pw@zdH z?rK9#k1sYT0jfime71t_wHlELJNbkmsK)G6+dTEwcgDPr}-E`GDY|shHi_v(w__-NBaqx zN;7E1i#Oa8twL95nfn5#(ON}C1%>+@Sz%TtDk#HbZQChZra;4~*fU1mNR$|3ZKjGc zW7?Z0e?x(NnY+9XV5412j}-NA>9IzJ*)QbJlGUWe)ScsuhWj@(?Jjx?f4klFH!RH6 zvyB27{yKEyY_63vRb{f^P9kBB^^c9=Px&aVI2pVCr21VO_lR`I6F57VugUzyZZk~F(je)E-6mW2)2h|#J?C%0P2$vWEMbSkAUfqhP9B=B;AkRxcH|3a!} z54p?$2JX5FDQ;OAug>x@b8bjCh_kXR<7McliYqSEcl7oSZns|j?I@_tEXLhNIZBPI zs%xbv;g&;&*%8IXJ9-ql3f9#MCz~6(Xa>BifRU$5wnT}DJbL*6#`Da=3aPqsM_{M5 z$PVNyIjc?%e}e@7QW@ToJO2vQYBDCKXDQA~ag%uhP^YOV1#uU7NN8?FnQPuwg;ZO_ zU$f6E3sSw{bYIHX(-aWHiT8{Zt88)2)bN%Jt%4WJcRqMqy)4z!B0h2z;OBMrNpC~8 z=x@~6H{l_bi+HmhkFmzvBD11C0ct8PeaYqC2yn3rg_`!ykKYA|qQ3ill(`JM{S`i6 z%GKgiaiBHCudcFf8&Z5sA--O3+6~AtcFj6_=#Zy5N7fr;+=&yy-Os5mYN<+Tu|n~GK6{8m=DV6q(iqgV#EZ)L zEVze9=_buK-Odb+u+FP;cFhmM)+U=LT4GUy4p)=xUE z@d1T9E3^|pK_14&V(8<2X@eK?jrniPqBj@EyrV~<@nU=qG9DJkNwy01L&;{D&Ml#x1caLS88=2lESHpG$G7$&(X5M+VzWNla(-I z)L{9S$UU$}8MmO|v!WV)K5KA*=jvUqj4PeX%Gv8dc3+O@B>gyJ zg;uj_2^Vmmnb_ zFK^KDD*X1pKwmzey!5~D4^+06BAR3)1LrdTvP-jox>5O6c|pWkS1>ci!^EvW;Z^+? zpWSBthR@&71hZE3_i|OtXuJ8km4-x5J+=kaxa?gS1}lJC4{j6vcCY^f*?tnWyw`H= zoS1~(JaRn91S=JJn+IZaSs|Tc9N+0i<)g5 zZeue82LqQ(=UC33P6TiK4d5+|z%Rnx`KsbV4*cFJtDMlH8fdMyj!};|iBWKR$dKfG zaqlp4pVftguPGcgT}O0QbG%^Nv5Ixe>-EA0?{{K4AL@ENNx=C2SY>eWF_5?iinD({ z)AkJ0>P`G&1-osj*C5WfwV_Z)KI)!XrARlH^GtKPG$d6+Yrm;K7Hw$icB&=jHmq&4 zGpBgg{crjY?p8=JRWq3Z=J?BZ|z|@l4Y>4qYL-b^CS7h1tLS2NmXR>IAPmc3U(Nx z?KZA08&XdnOxEzJmP~fUUT0rF3hK+0?q<2as*Sx$2>VOF5uSEnEizHs z)|6(V%!NEVQl~~H({AWguAPTa(2OamDAK!Fihc~d=UpT(UA=!UC11LJ&ksX{4D^L6hmf_S8wiSi z=8wu6<^xCH0Rp>*5z_YzJ2)wdt!GO4W-1P~Q(FT@hXl-4&JH2m4uOqey%Mlm8iDx` zah&YmQY~s@PR2HPwAZxT29tVq+;aV2SZ|t75NBH(L)a50Nb$kH$c=C!I~Q>LP#UsEE@N%PyX_T zUB000WcHjN^gii3vxAzIn(Sb=goVzRSKm5_ci2c9fQsSQK~ZL%D;yLVCTJ*EtV$_$}W00n3V%=lZgMn3syxxF>s9Xj7 z0!BYRGa^&u!6)D|d0u<80+U`xCU3}zwJXi+$+`FNg$7sZ+coW05^aB_$)Zq~a^+m>2TLFG ziw)*7rqGt`-_;K#kG*HoKf1A6zZ~!RN9uNWTyW6>^>S~BS)HSeqVU_womcd4W;BIE z1rwTZLOvV9(CTWATDDdf+Rfm4PM)=IugwGo1w?)YBJO=QO7vxm$>kt63QL7jX?CfK zB!Gr>p^gWOKQ9{85Yzs&EPO{L<$CUx7T{qmkM@}cO$AT@n_fr9Fi+lT$SK6Y{aq|f zgy5Ox5TVt`1ZVS~#d!MWVg5k{x`ZIbOENt8>k@>OA9ohkCSBr!z1K1=(g0ER8T@pr+-#0SzIWJ!N1@^kg4$4kWV;M z55K)_bSKu7Z2@VE56g7!wGH?6^#D_H9s6~5F&dMc3JNwOznlcfLF62KkU91@0Nq$k zu6I9oxeUTY%qG!8^np(H`s5&*6OEY^e?ikvDJIQkg6=uV;11&@nzMBE-|mXMG-(^& z?)bn5bLDT!w1Q1~OtN`R%`BI@FOytv0A%MuLlAw}tM9>UjYu2znKyXuizlER*^*L> zf|>&34WK^_6}-qr(RvOONrp9P2ER*c9i?{ne@FK5=lFCWH+HdSg&Y?=0MVzCP7ppB zr?7eXZ?>u!-{2h||L||^V=gZe>AeA>P8lgnk@uaht zjc7r+=)V#7-S2GualG1UQB-N|Rm9$V(`t=S)Tljb3u13UTdQ`(s$F|i#9p;mQM+Qs z-V!vTBHx>PpZgEoAMU+*p8S^QBsu4EKIi>@t@n`>{*$G!8Wz_zoMt*jk-a1}plas@j)XbW-HH=RB^v+}xonC3Yz2LIz`MMOJZsOGzC=9w5|%PG6A zS-a7xDXO(x<(*)=+QZ-SA3Uww;y85^Tx5oPNd~eU5Mc>})aRgdj59+%}4{OlnuD_ym zuvB_uu)jK3O{QuSpj^>Q*drephSxr~CbA>{O^4cT3d-$)g$)?(Y4u+_)gCdRHlBS; zUM`iY^a&i8fcmc&8r>(lAl)i}Gj4O1brvT-b2;3yej2F1sK}W*^v^%9-u%xx==HlF zFmrx35Au$c!$aYJY#EHPf;+bT4l2$67VtKCIBFyT0zwv`3$yGpw1by6GY1P1$Dw?5 zh*_JCcI09Qx~0QEMZPT{$1s0jEJ1yD;<-)=-^aiISXfG`s)3a}vkC3n&XwAdlhov9 z@IxP^rI3Pe{mr~ackoIbosy+SXxI0QO4%Wvc6E(0K*wn#R`{+FYtbWnSBeu3ELuZ= zm5osO&3>hK`CeWGbNfB{&V>5>B0NSaD$#7O;4;*gt|j{;V$?0xn(u=*R@-BcQ5|%6vaVR{-3h%|C{aa>Am~Z z!=v0cz48nlj@n1(I4>?*KlSaV)t?{y%yV*8KnuW8EeLV@->7!c$gN)Ln16ci=Jl0- z=W#a4PHPHhY#i`eO-e_um_|ZPQhBae<7W!HNxLtFWI8N8EJZrh2lK@er|xDAp6O@V z@V6%usB|%G7%3~T+4y1Hz*Z#;4#5OBR8#v^b$+%Mzg9*UV$8Ql>z@T)57H3nEkbmQ zF)^0f?!4>PN})Q;DhSTYTCJPD`J2etk`JbusK%aMe3@<$HW1DbCI<@EO+p}bqrw=m zg0qXJ3PGsbZN-`f5CeF!tBG+<{AU<#eyE%-?J?JkKS|VSPqO_5MT-?b-vBrKKLS!E zVcZZ4J%$?@kM6xI!rsRzU{X+X2|QtihG=lN#2UZtoHrN@ z_FIKF*RQ`Y?%(vOx6?^fV4)IbeAPkCg`lugsU|LWKr_ zQnM`#f+bQjDt&LE9bM3{7%cx?R?qW5;rA3<6-SbFE9Qa|L$*ftym0-!I$}fEZ^GeBNa><)-nd7{ zqA|0Z1P2dmrFQU|KCTd`S-7pJ>w3%8g;FQBlAGnsFQ+chl%EEk9P8Hk(?v$TY#8=$Q``I)L=0L!sdL zgY?1Ky>B;ejSo{FYP?h+HL-3WQf=0g@KJB?N=t&v6u<7JA~g0_7G>Io_O%fNPY(;n z%IQY3UHqN^&pXp3T7`)A=%ML* z6_`%#MyCwk&9pH_dOHv)Z#*I7)3-AL@oP2Uz5*E}Z!0+g?GAm>Zfpwvong9_wj%ly z&PFJ%d19jZ=&6*Y7L>owPdUNAA6etAxVF8jAcanefvUTKX$aS{&Jvt3ymtR>O4BF&0XxgZq=Ut&6rI8suE5<4lf zTyn|krP_J0`|ECJR@+oD8?s`q-J&3l{&JVsjMrc|+ihjUI+W@LIRh5!U$oBEp6*C_ zzfn!_ZX8~ErcDM-hjHn7(2I@%9&!+1AJ(|AK7$x+h8+6-X(sk%tTAT<36;lbyFX_K zYo2J@dC1)pOyJDsH2HN~6scCh|1WwTA`PksWlCk|<N#XhfDPQ}R zE=@zgTt)HXUh>tKFr@j!!Uf>d~a|CD=Mo3yV0Mmtph^Ds(ydisWa|*wq?bbb%`=ki?M(SH_J-t zCupEDlh4eGFp95q$tP-x-N|4K)%j-%rrFmUbg<&RI)snPy>NC{nik-D^FjG9(UU3J zXd)7$yixQjK6$rhfK*p#%BxMSK#G>xC>jpWX+G&_uI}{> z=T>HrkVZ}qZN%bOQf!Q86hgNqMdAaqi7Ed4oj758sd5z zSvz{*_GdpRG0vvWCB-v$8?d#_l?^JZxm8R~tE$xpm37}ZFJv!Vh2WE_-o5<)6TCsN zo$``(#TrZ5+aFH$AAvgshATNni4gL7R?x4r;1k}rjWtJLGQFj0%kgS`Ym2}owaykl z1BIuS%Yk**y1F+GY;*Q+pGTo?g>PlU`)G0Z1DyuVjN>8J;E<}r7K5Qs4+J+JJkA}V zcZmCEx2sUdY$wcOF!|!+ds1GUx$@>8YdDo~jB>O3P(eh;RMC%v-se5|=A~b&#Psd> zm~?y?tF;Fyq5Dg!X+&uFAl2Jvd)b1D_eNvihA;0AZmc|+=cwG&+ zqUxL9s@xFxq*RA*6OyMlU=!AmX^>(5W!Oj$4J(can@ZI_+B%dI(^U^yQLFu@Trz@O zK@`NC!0@Q(KdVnA2N|&-r*bn@YM9jT8%^6dz&$g`SI|Dg1`go?dkj%Bo?d4qI)n=q zJ4?#DhNSx^Jd<#y&&nc<`cwD&%-{R|#}e_+FYEVRxA(EsYWybWux_a>6TdIZMn`|0 zl+37BR<-Cd?3fy;Rc@CpsrwTM#+B)nk~IvFXIX~Vf*xy3A6p8M9A_J_Ev?T|@DD&PI9(UI{d z-k!=lIFazaS6$3yuY4R3r!LEYPbH>xg7!&Us`7v}_6on7)CZ!Md4rMe{5NYTPi{tbfd? z9Q4^@m#yR6X<2)FqSn0-Te-uqM>d|dQxS-m(7<)sUOg^3Th&zx9Nef}t?}c|q_d?ssoz-foE<9*MzjIG?Q4@u`8n)> ztLHbe{%5zXw*Er-SIM=>lpbC$&zx)rU$tl+Gw|YQ`L*hJdXR>bzcC>;6Ird+HRn-N zR|FC)PBJMmTKx_AULDkKEfMkYm7^6sZtv;R8QxKkZ(z*}=nsP3uD?zSj=!uWKh}sG zP5KVAS9$qf|06q$pYbb+L3(}eblMLW$zp0csd;EpZSeK%H3*yBjBtL<%8*vQkXFv3 z&e(Fj+r5Te&sex}VnZ6BgBaRkrZP+tyHG$trspsq$-z1Pd#vWSKTUN2dXIv9+TEJgR$dvaNBn2Hq|Q=kG;)1?*?d`p-N3 z=n|~rob5uw9{ZR7+XWppuao;LPA#%9^m{_tpAOb*+l+~U(_ea+C$^%pb(@MRTiRh- zKdVJz_w}jMWIkw8;jAXd6<_wTzE?`QEA%9*aCo2TJt=$BMrWu^)9j zJfX!GwuYHGM)7*@2_HS?$YeC48w$hcKJ<0rfrZqcOaJmtkLmUne4nvJsuXflDyaRD z8K$RpcSuj~n;?8}mVkrcHvS`^6;^hwU*Qstv7#WY2z<7=Fo!W$rx^l7n4lg{#nvC) zrcnpt79N8#%6-4E!9a$U>9Kb=_#V{(V}xI7oLhM=3%;FSeC_WqBA!$3x!G3i)2|bH zG-x@f5!i|-cR+ah?2rm<5w%%><2$wbMzFMGOlt1Iura(k!X3^@uPV8LnHwa^+`QGUwYbCztU88Oa=u(0Q~3QblWL?yWeR6hupL6`n+{|AlSYV}k)81{dGvX2J&0kL z^(qTfi$4nUOb_d^SxhAPZ-zLagy`U8vkx>*ZM#+W0i6sW!Baapyq`{q1-Gb7EVzq` zgbS5c9FtVTU6ttOH(R>YVL|SKi}E=jJw!fZj?TGIXkrJ0+-uwV-y!aoSQ^W0~(Kbq{1WuRc`z9 z`F?x23o=d`NTpW|Jvg;|{d$2iY@*8KZ7H*3t2fHSq^|h0Cra27!f7E7Sz%nO_e?lmm<=1fM)HZ4f3R97>8EN7RUsGq8~Zt8v7yIE@% zGa|E4)X{}3y&J0W(keN6#$)dKi5}R~7nam=DNwj<0CBzzu(4ZL30H<0XC;hXtcC`g z)ELp^z160Z^F3}W*B9v(OI%On4)O22P%7Zq!d}N9F$px*iPgFf<0~2368KULk90(L z_KgA(aupXBkA1^31DCd0y0_GFPNnM}_5pId*zz5wHfP`Vb_xvK%aQFHJFwi6OTR}yi%d5mio#Uj{t&ILpa;K}OhVk?ZwPU7|eCm&r-itrwjMHU!U-4TC z>H~#iii{3IzFD!AXKBBFrZ-p1%dg_Gn(7btKFVJT?#a$?C|P~`LzOu2yUH6^*0lPs zVfrtb_CJT&B?L2^U7bpu?SPw;r~Q~ZhE27^ncG$pp5D8c<@re5$_%;!7dli}@M=#Y z{v+Zw;Vm-2U9s&>ZuL8i%SDc>)?MHO^nxoxf~JZA$jO+wn;<-8v3+0Z^;d#(E3L?~ zT}H)cJsFL&|E@RG@!!P)0+C@Qz*)zH{VV+EWI-$ev6zQr42Z0Ttx!XR(e*{tCgjnkD z%Y8+EKrKmRZo;P6<298xCd*lUK)@ihZb3wqL734n$$WfdoA-mP-I#u@S?oo{$Xc9{ z_Sj0Of6tOqG4Di`U3*zMok7|5G%tud1Vo3!N4{a(F*djCE64ws6H&0S?R#A?KS=1QFP0Oyjb?{^Q!r5=^V6iL2=f&d80gQchRWTXnQwN>H&=8{;S=hq)1*twsC}MdZ-VSA5nne z89CJ^I+;TX8@A8=ecAg|A%uB4O5s6SzyB(JPw5z{bOn?4W}y{X;5OWOiOB(+B@S_; zu2(&meVN&2x{Ci)A9jh3DKshFhdr&^hrVvNO_F>+;UV*8btHMmB}B!{@AQ``awclqpXycSr3g~hCC&1Ix-uo zL(P?`2;S&c6Wi;U(Mp%^rmevjB=SA?LDypVM$_`+`f>KhRQ-&-Z9zFpi{31z-L+VD zp!Dz{)cn__r*^p3#{DkkI%^7Uy*V}|3la0T`aH`sg0>%lcVW`NFd4s2`;H6)58^MGg0wkQTKDW2eh^JwzSTGqxqWT(lJt%TOM%SK2`UCH zi8!C$A0rf&__C{&^(rO%-OCm3Us@04!~CsChQd9YwS<4pmecX{=K#7b)L`tkJCwQ^=M247R0458!^9tp7UMEF~?gbnh) zSE+HXrvYAAr|XSm0r~1!8GMy} z)ZAche(i8m{LzQ7IWil2y093IR+JYKK7NTPPZYY+>gEGKF#i45RN{70>Vp2{Twt#y ztLS2#SC0uy%*^ze`LSFJ9Rn_K>u>@v#j{5rs_ek(%H+Vak)ePcv*nelY3J^Y@2aM6iU`6ZS6`8H{W%e*GVW;J8RXgW=6LmoE*n*%4t*>u)KY8y2!^;TIRB+{H~^mEEu| zeb>5U*PfVRg%nx&_i4tC^&WtKxR@KMz&U(Q=1_7B5f1NRjLw%w+Ion_LUa-Mpq@{w z1e*9^0wybcZ1cATRoZ+88C1jrNlR9{stBE3Bp->kSPju{HMJU z<6C{qzmWdkRH)`$(6OoQNsrkW6Z(?>J3IjFhBLw~g(F>iPhQ=c9A9GJV0fm)( zM>PL*+P9OFS!vF|>of~^fckmhoC?x7#xAn=e6Mj9!uH+dD3j)(;shY~`<_4c#56I~ zZi@oLNra^|M?1ohtx;X%x0Hw0u=3$#dTp;_tTe_eq&_fWSV;)A*J|h9Xl*Ij`vs!o zix*<+stQ0c)x+?kp~8c&KcbXje)M>NZk%3-PlvKgwrnvztD@W^SYLA{skIa2Y~P4C zzPe^G$jYx>44IuSEBy;N^uhN0%tWjiO>XvYS_@5ZG!Hlh8=37h% zQhxJE4$9hU&W?EFpoa%jyd82!^k&{;OGW?qmcLKyLo44T*=2u~`W7wr`F{jB;+Rs5 z-J90<&Ai?|60GZoDztPM`R_u^tBT{VgPfNF&AJqDOvHO6S8b>~ou1>|305$Osr^r; zo7t-6TWHOPT`kI(L7<0DS}8BMF}a|5gSQLG?-$2r1hXOscRSoN-agaFSOVd?l+YeL}_eKRT3aNvcIBf?AN{e zNXzxh_!wDT0v#%S3%0dtpAA|rtC_o07s11_AMyM&!T;?y?<+oUJftV&lalcC6%&JI zHXI%@hG=e%HjmGVwfjuWmbi<@j{TB&ijpLqpg@4%8&&;Zzn9-j^=@0jXN^)i?1-79e@|B(xk2xyyl3{OLrvg)`#B4$%Px z_y)1;br7O}$Vx&?IRa*>_CPSVqeQT6q+(<6;9|JX+Liceq#9JWEQ}8~n!O)mbh+j9 zu}jg*rUa8QH9aV|Sm0oTZo4`)($WhOsxF1SzI!MH8FD>Udi^*h{>uX?jCw@=mdK5j zFp)~9;cimYn^H!eVb9$(!@1@0C%3gyi=%haQDP?pB7L?f+Vp@-IZ0-EHSb){zXp1F zvjH#!&qe1lC)c#byl{D2_nf5V!xLC%yz$5AERE2Hc2N?-mu|yyw1f}G`5#ICbGVhq znl_-wqw*t^V(MLS3MNj`YqdS2mzEk|rW#Gh!K1Cs7hB>D;V+3`llH9 zhN-r&;aAeqT#+^#@sgVry2#PK4EmJu;hhY^2EhFrkKf6WjhMv=iy4SIVx{xg(g4^Q zw+C>hqBGN2){6hgA-|B4WuH)ezw{#~|IiRVZ>MM*usZ9W)~t!fbI+ae8=-F9tg9cq zF8U24;$aRG3@K)1FzphU?(9bFzrN}~|JzrAe&LKN=(#m&nng;OuvmjlyklDO^=JF)Kn?P& zwG5(ym&Vq(60Evp*X6lgSggf|D)I9VYB#=eY}9zrsL|*a5Nn@e>?&gU5r{aCnK8H2 zWNTXy3KUJJ@Q>$UQYbSWm)m*h+`sR9`~9>&dhq?-z`(+_lQ!&REw83~sngt}ce7W$ z-yq>grAv^{M^jhZ3?#AMVXD|QErZ(sImtf)73wSlDy7$ycq#~kB=jEH+oIHcqrNeT zKVui2TIBdFZb>m<{iT2sN4`E#8kjtlKP4N{SE+WIDJ5uCh5BqOVjo5ys+>z-u}|JVCpfBpPHm`^g+w8hN$dge;&&U#qcrg7FcoUF2fgOQMl@Fis1sWP)Rkhca|k_}CU5ypL&#-%YUHo)ok&3D@RyWqZkK`GK00- z3SP~=h!tW%oY_9!wGVJk*d#Gx6)omm+fyNoq&iT)sxMh~#!<`&)(kq=I=NR3+K_fR zOW)u5vD@_7=Tp)Xaclap!2)s!Gg2DyC=;>9<~wma3Z6X!>nv|~OV3%VaO2Ze$e<*} zzA4134Rq-xujvdEi30FZ0qS2E4Yg&Q`)5ktp)W!U0qWWV#B`#%VpKWhaQ~5Z5_t>B zuuoZPE%LN~O@uU`v)4c&g+NT}H zGS=~UsSIrdj{bNl*s8q}ZgLU}N%2wO5gBTU+Hqo2BREV+;=hbH@3_lQcJX+I>9Bt4 z9BdYttV`)au{BA(IA9V5c&l$xQ#^^m|YSF==IA(uiK{}_gb_bDmy;+F9V8itr> zD5-(IUOENYd&`AZtBs(Z7qO3j2?~|8IJwG7iJi3Hn^hc{(*G0{&dd6=M;He$_@SQg zmm->#?&>y?XwX03&=_ek1;EY0p3DsSfkWJrPbc}mB{ZU8bgHbQT79>-$@XMes z^$E(vEWPSZKdq*aZvGwqS*vX(*jbWsXpOI7Dr9IG`!Hk$MAqEZu@gQWfo>Aw7Vk;s zP2&7`wJ#@U5}^4kMP6RHg*;<;X9&fSA(nRm$K;aH+G0%Gf&WK~NbN$GQ~7BZ2C2!; zwIO#iQ7=Y<5=|*li|@w;UYO+iz@{M_p;qFPp!(TaS+&mcmsYaHz#S*oHVy+84lW>e zw$qBnhcAJ>Im{)zPri4v1c^x<=#XF$YM&K00&-||a~%x$5bIsacW8IN{fN)i`uO>Y z*vtAAORj-|b&=lGE%pBPt3v7%y_RxBr&RojG>bE<^ZiwERl>$fc?4yR$H`h4V~jXc z0O|g7RN&+Kl*M-=4nAPDQbA$;gq)qq(Gs|(!gb4C*kytXLZ(SLiB)_+KQ)?pT40)2 zlPQ@B-b`lskQEe}XMC1lInS)ws?HgG)xc}@^Y5=mBibr?Oe4gu3 za)^*{2+$>3S$4Klut2nN1B=Bi&`n4A4AYFYi4jH(D|JeK*k`6lUeBlQZK=*NJSs~E z0g1hi+Ki?%e!4UeSy-wX>cLBo`Yu_Xz|HKfM`AiHnv*#|pctLae+0=vbXACZ^<%o! z7`&5ci97mi64BiNebWxh;GTq43ypF3dbDZ9icyS73pVGCFJ1MJ#)<^xv(P07Y)BO4 z9vT7-%Y9*9-RHg9iWIwDa{`0A%}4$ImBjOCy(*}u_Z-kS0T#TGp=EMzcdST@{u=*w zw&t`bA@c8YDF-K>x9keC zy))X{)MD}+W}XO2W?cG)`x*Y+Zwb|lXP7pQ%wTw-cdIAdDdI1=c1cF)GXz&k1wInS zRcrRle$CGgTw4+#cZW$bnMMCcpf3*WPB94!!FtViS(iD_x|*eJIOZ55^NU@-n`-R> z1D=#`nF?STOV-W$0Agp4ZmnrkG}$g~GWxv%@Px#F6pu1Ci0i_hSRR&whtm<^WEd%V z5PPg)PhR=TPtrc3Zx2V)E#oG4A9{^L&K{R0RWfP%c%l6SU-elIPf0^vri87mlC@jK z_0cmG|6FD>X7zR({k4F~sSW=kZneS|7?%2kBeFB9>jG1Xg50=TTN`K{%F^M|whbfV z?Ba=$z%=J?Lazw(Hiqg?b#MIxkGDMH>$qTx^+K$tbHpYS%HQZT#MunkJS0`vZAFTA zi5VXxw}fw2E3+t=*J6gJ5eqHlUN2f&8r~qvc?`Uxdvb_KQxfc;3NJc*xB7q_+g?3-Dt3&MUXI4K_j5z`p9^MP81 z;T6pVgtJhspdM!d3E_S9YIA4Cbk%0+2nGfwZCN`K`}EF28I|1nN0jdS3CpK_)cZ*bN$_ zYmDbs5Ay;0Ea!FB&*Dv;PtKdMr$Y+Vrgiq4yW=asR^DDeKwUaqc{)GtyoU!OjN0!@ zW(mwhWDahSyRcBAdL#XKVYMNz=C?__Kz<~I3f^kd9)2J%daZc*6;sFNt`;bNJjg!! zjnB`eQP>t7RF;<=jCacyPvLK_Y**ha*Bv}b!}foB1RIf6`fVZpd>!7UDF`Cx252{0 zoKC47JZRDpJeyq@%gtJ=7mVgh?gn8wmDXlc1rk~;9(IYpzX-lD_;IVc_9JFAd^7() zf*bT$1Mm>xeaRhL?8z16Xbsn#WDXcYJE&C&J%pUEU_-N{Vc(Y{?*uzgk z`+oZy-yA&6xDvlz+Zul*e8Ng1VP+j8G24Qcdk>`(IIZn!mO>R3-pjLnSNE&1>FaiY zwOS%|O!?pJs!LFZ;jO8qVMfwo2=~!RFrB8Tj%3w}@ok%8PytYKS+TBmar*U^Fx(iO zgz^}+W(}+!%pNWmSnQbGDFt7HTa(s^HQ1v8^6W_f4Sl+wq|Y@eMcV352GC6Nu-@eG zADKUe6MmQ`8ZGG|Ei*rcv71K=pnC+6Y^i`;89oD>_7MRLTvC|GS(o3dVKjPW8*o5vBc`_ zaL$~WkJOSH<1VPn+&p zQUOO;+OJl<+pN3hEU``tD-R?X3xCf3Z|PfCUZ(gt=YWIZnfyDxQzPs2m+Nv*MQ{2L z9x*ID80DafLnXFo83FwZ^A$l=2Eg38V7}S?2&Ym~#vE&>;8+70Wc9(UFMlu=TeAc%fVl&}Y>vQZuV< zR+V!?NAPwj*xI2!=!Q&K>c3Zek2z#vNTt%E(G_usCc= zC?zM^oc1u^v=p?LZMu7t1lsi%&@s)*iUmM{viO(nJ=yb(Pu8yKp2P3cs{ELh>+D{n zrkhP)>s91mRhFdBx=2Gic0J4s5}npxIMFxNz9v_$*mUCMF(`x9%jpAUmMW_Ppx&BK zr1W9+_Di$yrPEKhsz(7rM8g!U%%b?-bxHd_g2-Z%=BDJg_$WPdetN;H<4zt24MD-$ z??QGIGEtmgo1nVkoAoQ#=vbt|N^@qshX=#zxE({(#1ZN&D3stsN-H5TossQg1Eq?JcLif}sMxX`(7+BnpID z^*XG{7jxFkujUZXcj`fs{woV-r)xbU=gI>V7#U%7KPIHq^q`e8P}icN`O83-@5&v` zCdKKeT0o`r=jLf5fn@684{th>j<_5Ao7#XMG-x)~W3WsH+tW|iNT(UFk_&ZbRM%FW zFgfX=a3=g7Y-4FA!!k=8-fTU1>O+(4=Uz+Sld*eV%b7pBnK+|Gd#TX@X_Mto$bI%_ zhv{Afl*l9ahs2mBa$9C>+$&;fZ#c9wIruyY{B2Bynz_?*{mcCHt{hMQJLgokE_^ap_*?tCQ-ap?JHag{2%={^S89ck7bYb}k+9$kuc zn59kQFW_@u`O1>!v=OhTu-q{Zp9kNeQbtBRZz^FsqD-H49;qK2|F6hLPSo_kTKK#0 zQ?SwcoGA_a#oW7jD|6?aFArHiv@$}z?sK*Rhm|%DM8h|8@F;mN3vMn%OCCiYlJz~=5mz~q0mvn`+m@-O7pT%cCK7+fKt;~E1#N!N0wh^TpCoC zi9Wu45w@;Gg()mE9ob@S(iU8UQFg<1i3@yQJf#0LK{fJqAoZ(`>ynZ%gb2x<{I?zJ zy2)&=m2|D&sx}WZPcsjyV-fCDY$~Lb_(hLh_*$)@Lha`@joP@SIO-8b#>5NyP8>>B zuw}mqk#$VEQu&_#Q+1ePd?<5I4{I6xAAyN=Q(x~Q*tfme&M!boPjC-J7DPzK4Ar>p z`#BM$_1o?%xsGZ6UON|8H1)X5x66zE<~Il(lRQ*)`I(o6!(c7tuIc*Q-Wt!XF zVpXbx$(9LG5Cjc{G{5KTeb8Z?E<04RmfD@nvCu=YGR!?&rs`~dN)_W=Doqna+1BFx zRgJtV8kB8p3ZNRTrF7sdxa8k*jPlv`h{F+LOsUs)yG!?a32AxR1(KQ5f*hcT?O4FA zd?SUBO4aY51P|c=yE_7`(HyLv#Nx1HvDvG{*^0K!ho-g4WeG@_4f#ch=nkpezNK&8 zpi45T+iQt>y7Q|rz<4{O;Yq4+Y>#NzTrW{A7$}@uP2k0^v?$kXO4=R{i zuCp}1XH?70IFqeQZoQu1(yzaB?Kqc^odMl(Upw`Otle5;oZiHvKVnpob<=v%G^k@% z3WlQ%AHiNU_orHg*IUfyCyTTFEiXM-YB2U&(6%~c%MJ8eDyKD_)>j@Tf?>fvW!ITyg)DX0&m zb@vW&&w62vEnPMb|0Y##F?MtJM=zeqrr?~@?LTvP+zY_Zx{P}XB?gCeVGdCYO9QOU z>tD0wN62IPbYnU^A|xgfW#`-&st!{U)wZ~jZRKy8V9%&}^!~i!0P!DI`N8wK~4R)K%x?5KI5@r)kh7KnfK-`Y-ot|vP+{I}yMRIffvXjXjo|ks_U|13C znmhOx9;Y-ZdE%e?_Qh;ry3MVMSWQ*K1e1MpOvn9G8@R@FPn8cHnJ&`+_SH{CXJuFoRA!7H_Tro-bR*DTZtJlZ%j5qJ?XAk zwHPURx$a)<=jlZ1@r+N+-tY_eH_puP(OVw$m?|!aI zY7T-Iue(}t^nALuDW3HmftrYHH^RaIO;6Aw;PU-SbY*)j#yZ>-fs?KA$EfzFAm&wJGIxBryl%7l*DWA`@A%J=Q@oG5`tz=D_D%d;(FO{@;FpvqrHS4Jfr*b6H<-hK<6X(O1DT( ztqohuv#hj&ic``Qyhou;oj=m(RyYj{jn^a2K7U`rOBhqkN>v3J6D-ahZ=Io9O>_F5 z^ZRr@QYCq{JQG<#z4D#Dc!3`_JG{?7jNJSvUnpk#NqT7?OEY33qA0NHZ2o~#IpX&4 zveeoVER%8-e?#7o82qOn=Fku<%EKNd_tzZkqT<4l1C1ZY+ ziA8~DqK*1Org`*Ft2PT)rxetz!RDR|8~rRW2~aN z!fJnNqLZGmcM%?MUz!Wp55G{tlXLugQ~w7O%5KenhBxBp{)8{b3@XEfV{sB7t49l| zdP{B!50GcqeM*2u#t}ip#t+WJ%%Nbr=9n0(i@iPcgr+ouf1xYqVy3sGRxSIwi4d!q zp^I@QCr|tMlu?I~meEQoFckL@VxY=P2MA_z$`2j(^bAW&|N4K_35VX{d)m#rWT9}k zWcceod{|D@WWhk9xY68SiZ5or^v3NXRDsxK0m7N$8-1(3hXYzK9u_u6@4aVSR6rIq zQJm1;8sL^%rO%PN2&>i_JoWl{?|QKJX|pfxCngAyr!-8{*4w=ZF|YjM+crD3#sUJL zI&}2dTd$do4)WrndL9Jwt>8%RZrMn%oRxvWtgL~1uD2es%G6v?>F6vv&_^15LZ;S5fa^6JXv`#r^>n_xZRONn0j6r^jYn`7B+%G3QQn&ACHimyY7}Se02$rvtlJ&?Y43t5 z-U^Sosuh|FKgnE_B#LtQ^I7J5LtvwW@cyqCsz8F2V&|D_1u!MEH;OK@WSd{-2rhB5 z2A%QkbuDfDbC#~;G(F*N><5@*u)}V)67Yg>CqQ)FiA= z_H8weBpkJ3ewW|tEYl+x==9D~*c@4s{ix((LV3C7uQi2wbqxzQa`Ge+{L~e$J-B!X z$kB(R1nY9xpHl3*H~BBM=5#fyOzsLi9yUtmW|gi} zmyN3a)8{z&#tZ}*Q&J!1X^ejM{wc>H9%{82md4_{!jIk{P`%kOHq^U)j!no?!^jgo<^rOrqZH)5X{Ea*u0|MR7vW=UxgHLgZ!O2(}3LHsq z4OWsIMS+e(mS%5&P-{w=<%d1&3ks?>z73!2sgp8`*2mBloDPfz6GZ#kCy{Yj#k*4F z%}wj&-k)!Eb%;fSIMWy?_9jBn+O zqEBm#%t>1X0szUEyYD}76YYDlFxm(5#pwjDYL^D~cWo6`8yTQQBl23@wae;uea=O# zxV)HM#EN6`DIT6mq#^$VX=4Vhrz{#;I9s8^Hv8oc)uk5g)gEp@cUc7ajl zgW4b@BfssO*%$4(Oge9lM z^bdgMpL>pj2Xkq3?4tsgI8w#1G)%;%Vb24MBGMJ%m`Y|0$7B8m7MPK3(i}G+hf;Z6 zegAp%KZlc2I2^vBZhad5R3&cvlqS`Oz1xWRLtkM^3k#Rl zrl4TeRo;gueQxa8+iI?z*6iehA1-aWJET)!JgIa!_CzNcR7uY$n7CNd6(kuXIdw!z z(%7ravrE;AvR$+0$cyYF`kbXf*DcnpEiI1ZnE&oUQxvBnucgJTL8-6&B#de1 zR&)*@4D|pPUyO$53de=tyE-Qe%s-nLVBwI9P8K*rbKzDTuu``<>L%qc;g1?i*^D zj*dl%XxorU0MgrYW-MLm9agOVbx-kK@y*1gdNzSfA;6n5Xy=k@%NQ$HJkM4?lqsp4 z;7Q3g-RVfZQr5Pv?p(bjf2X3{@VZuY=kb@?TJUDJ4{2k2TMw+3ngeyE@sgK%&7C@jm#KOD6M-~>EOc7Zw)%gC(GKZ+}MkK zHh}2u3p{O}emdg1UC*NP3irD`TI$ElRR3pYUQ4q#dTm+%gR%39YNC(YJt|lLrS~S% zlq$Ung3_h;8k8DB=$$|iq&F!6rAn7BHFTs)mkyzKDS-eIA_U$uYn`j_oSW|=x3gAe z&Fuf4y`Sgz?6JQaC9dFfrn$gAGLz+OFm^shdHbEWE$umqxi7>{)x~KXoFS$(4zNgz zSG05Tkeg(M^(@vq9TDPPF|BWJrDt}h(U~l=dO=G7MbGLUO;O?zMzw0viQNjd@`va3%=XMk zlIQ^wsD7c2rkS+XkO~F#u)_HUT5^4pt_4tfz_u^Hy0Q}c@Tm{$AX-y&EvI){p(6-c z6%-OuHPKrW+66GX`rYDM#X_ET-sIrDiuOg)GKYcnO`q@|;RD~gyOV$gr+lr~J6Vg~ zSRG+zS_d!JwF6C)Et=du)T_X=oEpx_O#HM`c-_{}9Ff}3IGdB+e##;J`uD;47_6pN zWN!$u&40(#HaHeuF%i|pTGeh8>|wt#qJuQscS%P@jCw7ZFD{MKSS~3|(@zCFOZb>< zOIYoDvk`lygIdBSOv^JMqb?wiRfrPbaLTkk`hij3NjG+X_;?s`uZw>CNk|Y)|L4Mt zo4_2Imu;WeA8C{}xE2=_6TK)oLIpwgeS_0MKM@CepNAX~ygppoRUjz;n;PS=QX-QK z+I%ucRg}<28Fzp`Wc@-I*s>dFU`%?TOdIA_KQpcKN||IwngwWoTV~jD4aLsX2fWW! zWq^GyefomaWVBQ8CRM?akBr*Fe;6Ls`LK1qVTPi+;;_}KI^TPP0I&p26=sTmra-vy z69ke&b`==y)Lli`V+qxw-S|8Kc zRm$ZEyA|l;bRX&FVM-!=^5peXntvZTN#}RXsu&lBG5zBS=8eTK#zlA1D18`H>+9;~ zusUzBM_~+!W*YvFb|H@=QybrLG4xMo>BduK+xkcp(sz5oczK5h59@F`8%-4fUAGi~ zi$!3WfRpS#qNJZuT@<`EU?lGk>Gz6*=m;mt>= zxT|;!zDITCtdRqMNwWd{WS5DJ)O6(lm4hF8T#{AV>k$;}C)#YJR%JtF*|Z1k;`e9K zCa9S_S)2g4+k*-$`>nohTWrM6@&r>Q6mk^Knpn-2Z!T;i76Iz%ay7DC4;QwH&+Wz?Ut(~CigichRFW$B_ zYm+QgJn1eDHGZ|FHrH^NBZCE_4F{gJBK?U2qVF+Ri~fE2khHkJI1Xe z*NLMBuh0H4WxS=8v$I}8N?2yN@!VP-V8L|6y*Vu(9L#%!R}ZpxxLV1G$CK1pXv&pg z16)7MR0&+DeN{S;u|b55Kwj8T-xsO1oS(`izA0(!qVyje2zh}UMR`kJP2|9#)lL9- zV}Bl*dU9X(i|Lvolu_7BZs-_FXj2!V7pfrH`PhFG#1!&t?J-`nY?*7IYCVa34abW9 zweTOo;;BmN0)yK4rCRJahu+Xw&qvQXczQ!b+NZgjI_g>yH&GIy6A$xUr&9m`={U-Gn2~_;ts3+%*ZzBKv{5X#mid!SRWx+d78}!N0 z=E|{e7Q@$+UA||E@BHljJU7xLYb(qt7urg$c(#!HN)~H?qbPGS-^~D*I6=Sre_rEi z`br%}5B=c1oKSA1JxE-@&K|XA0rB6uHk{1p^YYY?t3!afh8`BP8earCw?c#UyY@sP zz(WT5u9PWlZ91jmQ_~hg&DVQaKHP&UoI=0CV{a=#6ZvPeKC&b?A5agYvvj7c6$Oqm ziwe|G;%bQSqRNBhqAL8TW~R5lLFAR*e1BMn2SFWSi}{F9FDU!R1sV{7 z@$I;g9lh;Gq`TWUxKquN?Z)1b31-3y7mVrcGiH^!`37^e6^KfJ1w%yE_E%|tG%J$e zf_lyoO0MYS?J(@ws{{K7j_G#-Z{1vPUpim?M-c6H5!Ck8P4I$bptYqX?Uz+?u8w$E z4CCu5SI*-*7M!B;?gp`@K(gT&?ab4nhDmiK9TJgy=;$W7Vr=+buT(p+4l2>0@lw-x zsq{sMCC#u}#+UCr)bS|%M2_7E)qc^Mii@-c-v!oE? zhb_d&cdaWh^xBtz{OcxNB1_CUT(2U{{$9_=aKN9Py)c)p2v(WZ-;~tj|5m8gcG@(* z*;FBj@Su9YYH}rUhrfRpWriPk{@sY;Ygkw>p-76Nq}wg`L3*IW=O!<8?Du3>KhcQK zEq)X!-FdFm(1*UY{OWgps1|C9mNB~0*pyGD?36u{El-B>(1m2oTeVtcpOu$tDVab( zK2386D<)M{0$lCw=2VVUWM7O;w>Kp z^`K|+(6gmgWe);B)7hoo!^=*$Pcm!h{PjmL_9|5EY2;VPt4m<9NvXvk8R+oc=A3n4 zSF6)Ill6Oa?W+0XO`@~LhcjBP`CU(7ko7J~yxggA$9fmtzMqUxjGSbd<-Sa3mTv?% zvaK<>5ayfmf`C#{go+u{?)7$zse&zcU}H-=WG>$B$AyLWaz@Xkdn?pj!Lh<(budFp z>aXvgwZp}K%#GA~mYn~akpKbV=LekGcMh~dCU56%NwU_Hj=Zj4!Cu*?Pw2Cedh3uw z=1RBi=3pUXn=DE%^rGeMC=Wvn^8{Z<-}NL4v4Y*faNxCZi-2*Qk7QYOGmpjJOQSVb z+`*wXL;vFiDIrmFZQ@1@3jrsF@51Dkcy>$r!i{FEpjC&cGfO-&6%2izA{IHVMSF_s zNtVC+Fuy@JI+mJy>SgnTxe>G`SzL{OS&+0r!glJcYvb|ts|K+InUqrfBJd1Cs;F41 zNB@&Ww)_juEfzKu*g2_h%2WNKs37+2UC;jp_7y6M%Ql%-sCPT^16)aSx7<1;_`5iR zO*pG1SD&u4Oh-;C3P4hw6DNsUoC>T*sZEDfRQwZJVJagJCu*-$%G=Mv8UpjK0C@(y zn3YifTGL4)&*@;ade|{pIPfW6<2WrR!FgZuS~6Ph9ihx_d49M0`)MlWgx_`lGKd#n z;VhAKaUS0LQqw`Rd2NJE9jkTon#!^rVlqzeAs-mGH6;$ZetkcB|CSXqkYr!D7_#GDZ*`N&)izVZ z$!~4-MlWR-y^Q?I#|l9U-3JixE2L{fJ`6Li6lO-gF3Lt9h(mSKp8Pg=`eR7S7Nq`Z z$bG?&7-3f{o;oFxbo^o3`jQtq#ChB@ZDIoEDXmfEFMbd(G@EVi8|$10=SG+0T>c69 z0PhAXICVv_VYF%%G?j6WF99;f5AYb!$ z-Q0RJ*Uq*{M&k5x>84PPN4ep&IJe9XLLaW*`uphbm*O9}2U9Qb4{o1G-4qITiU9ga zHVoi}?jJ{B;2R}T`9rP_=TF1?te@xZ^_#bUZ@I&ns@0A;G9j$_V!fa;xYG|a0VZ)X zI5gW)&Z&|Pv;XA)<3z99U6>ta3op}jN}B=ZE`*n?n&YxwpGriW-VJ^*77Ae{12FvU zqNR$PYG#NFDC3Fww0AGp+83(vtHGv0lTx#4vlsfG{?*WH({7uChnFoGEd=xznN3@V z>HA5Pr+Uu@idbe?bN^O0Lai*)ud1qigBecqHo9WqK+R*`XAHjhwAIrolsIoNS)Jc5 zdCNBE4ShXw*V3sqnbl}wq2`>lw!S&me> z^zaq!8!4NG>wl;_PeX2jPW81sy`~4yRe>fM&=(3M%jPwGYvHW}Ik2m_jDp6CrmsQO z{;F^12WEnwp*FE2#uz*lYuAQJtk?+F;;@J-eS@EaY0tX0Yr?-NGO5vGhv~_(*lp@M zt&g5a^7is|_9=)wK&d{mR@RUoq0rCcv%X4+GX`tSCAPU09w3%;k56f)3+Wi-O(3tb z8@lZNw&7W>{nw)8#LJ!C{Hh0CsvvH{8KN)sc>9)i&Ws}MUCQ+09di}OLegO`r+ftH zeqM1bsRA#NGu@?$-mi~Y^t5y1y49@0&(m0$?il^>`_So!nTsN2TgbnE0zu0~ylBML z{NR;e9~sM^Ra#``swAbg`sZg&#_t)J+1)>;g~5Gk7DU5SyK5d|B2zuKv_kgGXZ5~K zqY;c|t_9Ddi$i7Om}>oyT8M>rL3Pc#1tJpmV%y2K&?-ys+AWsB zWghTNwxg_QqV0v|^!sLo!rTVOWTEoRr$Hg3b6(aRxk;r?*rs|XA+C<%I!n{Fw`0_A z|MZYN|4m{P3ldoyOJ*DTc*#9^riwOLhsUDa;@cmytc(|y;1w|NqnqnjkWA<&=y$8$ zR}+yTB>uhckSS_neUJr47Duyb2O!~n(Qf)t_)DfGG)WE0QDqix-*w*~q=RgTGn^8O z28{B{>f6n=0Z1ekNeGtB46Kg64!st6$aap&t7_DNN|^L2cErInhlg} zOB`E0QT9!FRW43f->r^hb>U(tYpz*Uo+?VKPK8QVchHW!Ik(uV%B|mmYM=F78fV@e z{-?*O!l|6O++NN;EPTc^qNMWZEs8kJs2}Hc%?*>Bnp2>0?qmZ#`hQ%DO|BhiV*l8v z#njCS{VEVy4+b*E#rPN{~&#r?Y@Q|=pfZ&j;dNs;PhFKh6I!jmDtV9GCuIS2LK z_h&EHcvS{yY?A*KMLaRBWdNd?plkVT#O-{g5{a%gi|xPcZHd~->uQk*Gfrc~rZImR z^LXuWb$Q&+2k|=vP`9<}8;iAFKZB$WrYf4O{l>=Wo!ZgETnZ>vx4xBSrEqPclEhI$ zh#gy-km z*{_^xW^ekw+b$c7D#?%Ee81qDY=2Z>u`gN(veFVqAG|)AdN_Z=&;9}8XvQq{u(EK1 zQ|qZ_Z`k!@%}@4Q;QY3Rp&Y_>?h=-XB7eC@W3~9Ba%cg z7XCpJ6SA0+a)XNFW{q)d3K-k!d{~8_+_$O4#n;e94%!^FN>z$08)wg6TCpYokdDm8 zoQqdmfCUmgxLI?@+~2m_ygJ+~U&}AJd<~brxxv8Mo|bD z84>crU)BI~Tknc88ZV_>mM^pQ7q}R4A978-iG7=#vZ4o&AXR^?1PELmg8F$hM`QloPS&bn))%orW_Fz=pbE{wdfqZHy z=aEj7X7Y1)nR{b`_dE$3%xBoJ={FgX?XQca=1R4r>L)}Q zg6O0T^h=G!ErW|X>T5uyE>N}=dtlP$PiOqTCPdTDmrJq zInrCn>Ox27S~@|2sgu(%!QHzUL|+~O0ZoM7byMGJXS4lSaGN7E$0O0`VIrutUeXLGLx}lML z$WPjnVf46)GxhH1Njm5D{0>h%pkVg$ZyU9+P;2;S^M++*r)r9yV)_14evK?$wNSy! zL&P@aBUTmG51UD%-1gpuBlJU z(ZE9anw#Gskma)#-pe;PI3J8Gumq9Sm`E_Sq~cK$ONaH`t)8CJ=llrSM|1lH_wtvVwqllXVo^>U1PiHC`I)f0dyb4hSvaObKh_$1E=cu6TVyT`%Viht5x$P zg4KrBIqpUeD!@_{w~jOSGM;QX&**2bZ=+0H^$#+qPF%6mAVt-9PZth-EP~HSOGDE7 z;;6xVh;P3*L+8!a=TkDYj@Noz67H?5TUSGspeyYQSB|vd+MgC=pIZd0OG(bMU76SC zQ*Wn{t1P&$*K&9jOh}dw^JiRCk9q%CJoF6eVJ}y6k0YB%u+pFmdvR+ABf2)nRannA zbjbFb1h$u0M58xmpsapvMj;do>Rj_X1j*-AVWPhjnlnOP)pgxu!c0*mSwZgw^1>Pe z1jI`XSVrToApbmDX$9N9o+m8dz4LV4q~}yy74#s!dloiT!>>hHT$q>|{wbx+d0JzL zYd2GVFbdc9C}+K%@nxZQ_WP(l4QMfgp>s~r_KZ;;j|jRXqXj>U=b5jTfc4E|J3~;_5LvNVg_e2pzcNdrejJ!K0Yh@-}p<96-8^}TRYZ} zkj9pb*O-;&OibErV_S#*f!U{6yF^Hnsh@ClwS~!vKaP1Cm z!k}?l97?5r3J@n6J1lfEdj#}<=cn~P!w=ItUV?gSt>{xE$W_R8+uB zMFcj`i(K<`53e{ajaLGNv!LUP)uUL?w2XmGbWrt`c!6~XS=_BSo&uLg7gud}DrVq} z0a*qYO!v{E1;$G9UB{K`+FZ!hV!L-^^*Ss(NZ0TF#+j^J{Z-3Q)n?LFp$nJ%%3;$* z@KWULk^QKK5<%r_jq9!ME-ENq8A2Pk$>cA{O)yB8`De@H0TGB^)g`%sdHc+x`z8oS zv2n6M%NE55-HVnd$9v*RPi08xview=CfijD_XFu)3ZXb#r&SWZEgzDB6Na8dDZ#Z#L06c)^=F=?&!$f%32eOA+-%%6%T0%+fK3jPd zRsoYai4zXUD;HMhPRtt>nwNk4l}dLLyl$V6pQ=XUzsNJfh;U5nZA=WAR^t3H@{D;} zTPHgN_nc584Q1IqmydU>$_+UPX7V*3Xd+Dds_jepKcHJ(kIxd@Y68H)SC;uI*=Xq{ zBd2O4c%`Yqc=A;-;K+xo!G8$hDf84Z3ZJ0z?0A*>slO0^0h?=@s_SmTDOJ|FOn_%< zzAABNI7Y(tp1U4-OcoAEISG%DXMsJN?t;|N9_A7AOjYyL(qaz%WZqTq%fu6hNr0_h zn+b~jQ;}P46BFWXOQv>i0W}EW=`^wPxGk_C;ycWAu3_xS*(2MQ-`+%Viqrb9PIi`} z#m*^*=ykqG%o0kb-=9qV_ofP_G~daM`?QAZxjiw-#5d6CuV`|f4N?E>e>xi9p9i7r zkS0iHp4lwlIMv0H=d7pd#wPLV#~MRdhMMNkoh9_yMnbmwX>{T{tY%`BDc}`xu#X>z zXW^fA?BeN@WWk6kXPI~1NVDZ3Mp4p$ e{R9)1mIO6TR;!jOy&(5?(Jc3H5_iOiD z0TabjAfG(+fBuSDRA+n`*rHGVek0shh8M8mDK2jx8UbhE#I(YDw{Sqwf<|Caz1S$+ z;I)$fJyl$uKt@cv(PkA#PskliSA1oLAqE)*dsXOfVf9;oOFvGjrM7j>C&zScO|a_K zC}Yr~xy3*|Oc;20^nm*g4PkI*vg^}buLSm&S%Aqk#OCtaoH-iP_*5Ji`w4O=YT8mcn{u)A7Cvws6+ zMGy8dI#hEbdzpGer;YCJCFR3aGNNh0c|xM!D{FY2Wy*@SxGF7FPgRwDa-}eI37ri7 zrKT8#KF}loq@Sz8n49G}#=Quh=aF7Lh${fRAM3 zn=_nBwIA6v8$u4F(V>7?(_u=!t_MBV2o{{^?-Xms%-$k4A3K_y3asV@1Qh-`J!lGi zRT_mFIKFzUai~YJpEsF@<^V4J2j0!D-~^9=tUngTI%3v-KDHi~>iQwigD3iolO4l|JyU#h7S@_qvyWH^2710v(Z>hl}?M__Mh9$eHW+9o|Tbo1kHG~NU9 z4eS>fQurO0*T;C2aaeBQEYo;gD!EXYKE)?%B?9^S_c}R9zPfVVjA=~-waTO!=U1l&3G=s7_`1<@v zpoBF(G-)XN?UYDb8N2>Ib$lJndCJwNCccvuqrB2t1`TE=|5Ij{Jj7ZIt#S6t&@=DD z$cTMz@T;&(9WWRruPndYJak>jE%~A*+0frFszyJrK0>XL4RU|`ggcMF%5Dj9)Kr zc{S9^7i<&_;$J>3dm0X!1h#uR3pIu~o$q+|-*}I$iTj6{50dyBZ)DBdPLHeJ-@)uf zw@beF;<}77=x@y_{hUMDG*yfnjsH| zHR{Q=8R}8?nI8c*BhMy(P=a83tt+x^qbgHmkb(b(#o#Y)-oT-URMTrgQy>gb7~*~54{r(lT_uo>K^8SEFy;CFvAob*W+ze2oz9v~s4+c{ zms~6}IFRL&iOfK)dn6N|@!MQ~g|3suirdHLz?BSPG&n!o;*d#SHgdQ#TTF&8IWig# zs`Nhxwl?jr978bATYzy|UT=rr+f?o8sW1z~oUEV=4qxwL!>CV0fiUmZ zl0t}P;Ern)hqvrE+43^`f!NaZAT}Cn<#PYjpRCU@{bOPNc|Wfr>q(8*mI7U9QRAs) zI6tG-ra0Y{frBmW8M}`5sp6TF{|E@mzt0t0XFE@3I~ySW+0%I+q2OLx%+2lGJolo@ zA_jEJqWbiAuxZZyJsx|p*>1F{Ch@7(?*QrI@7@1d-v4j8r@q@q@Q8E$&T?+}0pw~F zxLH#0?p9O{TA;+KtHA{Q)vXc-S^xVVf#VG91*UcUK#R`!G@HLnjIL8)PA}y|b&@Ng zucC0@;gthgDY)dI8#ZDFe*|Cu3dk|LZ%P$hPQu}jkB{ybZXCV>2&*XX6qq!BSdv=rA;PB^JgBB0&vhoF-rEbg;|qe_QK>|75_|UbV%J0;GS2V z7MS8XTPzG4f4*vF7WuEnm1IIJVlQ1Kz>h5TVnu0Ov&Bbtzc6#pR+Fn&Z90HZ^uwzb zKF%phY8Pozb)$ct5WF4^sSw||+w^-hzbIYG?n~Df3N`*aN%V#bAN>h#F|a{o7A_K9 zkU8--00o(gmG*gI7;Qa1tCz06%$CNUU}d}Wb)owHR!2uBA~+|X&JSvC!8zrNE}t}8 zG)J*1wHO+HxS6%LKkipR6bAi4%@_Lm%5;$QPwo#~1MXa#Fb2(l^SWl!q|sV@QRsJu zuRjSW?|K@}5C2Edu!jc3SK$g|`1f0A16{ki-(vd!=|CEh1%#16?&lS~PVBp>yE=BV zjiHvds#AYzS|BJlxRuc#OrOH7Bo*}3_;+-G%L^IXC`1RGqciwB*A)22&CPA|c&(6=QfrvT9QxA)0CFz+Qeo|k z>MaB}jT;;N7P~GJv}1Dtz>1|52?$2H&wAMUIBMX{)DtevM|J<4zF{~N8~d(F82!mE{;laoSeP;l$(L$b_FZ`e zq{>s;5saJ@3gW77Ij7;=k$#Rewj4LeHq6XkGTAxvg;mdip-Wgm1qwYa>;pAS08615 zg?gQ&8bR0_wy;i?UdhPx12wrJ4opdEkbMyto4Vp^>e{$Q)bF?cYx8-R^9)QDgOxIK z6_CThspa%GAPHofmoe?)-e#Ma^u{VPIumZmGoig0uJ|7h><(yVBABM1+#zJViM{#M z<+u`rWj$R*oM!b~b4uQ_H=UC89tOi-PgA(!MI8$*@qm+9Sg~B~&(xae+A^xw%e(ds zp~UoZw+j~pA@pzU+5imR0HN}*3BRBDGkDj;%Zp75Y&Eq8O#%{^N-I<^L2K$|J&BLn z@0hsJm-;Ths*?1*gRA78a5Wuh`#2dN{72xvKm5e}V25$ia^yDnq@qK^h@DQQff-6{ zlJ9~KgxGuCiyP~zB zh$deC;RMZJwpctQn2^l2Sn0QgMISU7S-ngA8f9_kYfoy*H@e`_vDA<2c~9Cc$MQ{= z7SYJt2@%C0Y8R^#aPhL8BLOc*g{$s80;&|XEfiF&HuyY1`_}sxrUVP!-botv>@>5q z4OM)4)}FXsR8Z5DliPGQ|M#I(!#}I6gsl12&TBt>lK3Q^>brlG?(OuPV(dfacTA(4 z$Q1`jq^=r^S8A9c+O-)TFQ@*vYBgtr>y6{m+a<5o8eK2_7<>s@j@$oFx8jPuboo9UtaMXl+ zIKla;eVKei%+&`eeW-*VFHe?P$ji&!EGbe9@BgfF0xjvqn%tlzfrzX@i!1pqjN$O_ zx=AZRFqiLK_T*wyWoBdE0bGxb2^MU>)fK%);bNe1>KL`O0qX9;88DVa)i@*PH7lI0 zS*LF4miIGnTD!f7PU8$4JLjsbNy?60`pZDFu2~#ELufIf9i2_n-rgw(rj$ExB2$cs zet(HABC<;nh&1!fIhKDt{8Eyw4u}b=p~T%KLm6zkMZskAtxZ=@3;RQxLl*&=Hzs;b zsXbTTl3CD>};U z8gWY4k?yywZasPt(Bt@W+hl#@Jm?f8s?&hvr4aN>?nJETmAFBT2g)ef=G z>Y<-FpGFc-$vQr`vHSSuE|=n!EynHVl}j#bed}C`+0oG_uZQw}p?NE^e7&UF(B%P6 z4|X5Zc%c~{HU%eEhRJ!g^sBckgv3ZL>Lq?pwPy)q{e1-wlC!1Keib^4KL2GJR&cEl z5%OHM$jR@@1|&HCu&0kilX1k2rM<)~8@tAr*o1FaLY_@WmKB)o>o>eKHGlSz;={)*m?Nbs?+J10{j>lxRe1}J z=sAji4KVc}CM_tovTnJ*bcs?iRZ`^b>F9Xn*+BV^5$f7E43WzybxKi6a~=1ag3hPQ zxU9ZR#P-Wa2tlr}XN+!K!^5_T?9S*oFT%N3v6FW6>mGszZWamTufT(k8% zgmLLg=CAhTZJa(D5vU}E7Eg~Za=7mI{Pu_k_K|Iq9p1=|(mm)qzPZ@+ECJeE^MkhF zxr2$4MmIWzPcU{Y1YFRZ0-=jNP~wb*zeGQ#j{hWa2>H6Ln;)ih3?0}W5W)-La@dak zVcZ7t7<{~AgH=JH=4O4nI+B|8(POvi4oe-K(|pYd!pX$_3?Kd|!Irj!C`bI9?)B9T zdF>8QS4OUn>%L$0qRSbW9wDZgGVKMJ(5|AWbe^xGFtBVNU!%%WKzyb&eynh|J z(rh)g#BPl_ZD&wuY@iqF-x?YHTrW>C{RdM{-8E}6#k9bL(8AOmt-E?@KPFSW;@!ND z(cTx1zuiLuQnwd(i(uT2@*&kMB5!H=WIwPPZFKlhb=30z+ zm7DGy51-sLiPQV$6l%4v(a)4sNmW#dqUF{r{EOph zt|Y;=*Drq4j0vI`&iOwN(CpVFxVT=0hvGu4POF?Pr;ah+!Eey4JipiL&-y~x@`%JA z3(%^zwpSOJO+9I4Hae-s8L74qr9xUHvi`$nMYOwnJ0z+<(ikJ0 zNLeiOVgGr*KA5|PzG-_lHSol5-Zi-o_8!C%pU#aiGt9Rdo=we!mT_*jRBq7(Q#Sq$ zyto8C_HShZ(fqk=O;P^7ShZ8zhHUSf)S~$OK>h8<@ViCf)A!^~L8`bPI2DB$d6S*58ELi1e+6UP+5R;gSy2+^-99 z(lp3z(dWHFJR>X|zax`&s8on-h%ZP<3qb7#V!^H%$M(qEHXD7pY#XCfM7qTK{g|sfKQy~N`syGijQc9B z*E<{Gr{V`Y3)iO5z{Xoscq{Ku^?HWqevALmX*%umLU}~2{$+|W=Xn2@7TaI2$O%hZ;?B~J zz{>XpQ|Z_MYiTZHe?zH%VJYQ5?@x@X5}YS$%+K=z4Q@eY>Y@OpabsoEl_^;)g zqGE~VV(uO8lZsR*^5A6u_toJ#bE zxKw=iYB*;6ZUiw02aGxGl5`bMZ+@qaf`oz!%9ehSrH0ov2L8=0Xzx0$Ja~8 zO@qcXR*?D|U>rrn>bfgHqDQBe+RZ4%PIB)@YFaYWiQRL0(%#x52*A5lU=H$@?@FIGsBc9k+o_`+!}R3bs0cJ&ADtmyglFQWH+_rJ<75GSp`!M} zilV-vja-j91BZgQyNdJ>&6YgVsdvODJq8jtn_@O+>IeMCN_A*``GZj0_TMq@_a0uo zBPfGZ)CjbpbN??Wxv;_BN1_HI);P7=q)^Pz<>a<_y>(M*&35wxFcyHq*_ex+Hj(g{ zNnFU`PavPj_1SY8o*5VjgT3s{+(BdKh^mEH_jhdkIP5RqALe0HK{zfUw5aVKj2%~T znt^pX)y}^+WDEAF00V&*@~jriKR*1hLP5+!lDFb4ykYEQmE(u3N1EI}A*bORmjiy& zH7@zHEDm*)&l>J)O{R3Qk4Uo*bmyf(hnVd?bV0j5z$1Xc2;9l3<}t9-edU2M7{F!} zJZ4jA#wY`>Ubxfy_(2caD#z-D2GL&O^cT>CLit%UL=JuOv2Hq(NYQdxw~>iw94=D} zLjPS|;_}jEzEChnqmg$YdrP~B(HCJt(hn|l6*&WgG{G}?*_(FwdL*dv^2n{&nT=A6 z1b}Tt;iz2gayohO%-7nDv`J3c0iA-F!IO)LCn<%anE|xeo;9Aa4&?=%?W9n=G`!+a z*Zd9Xr@k?{h;Vj2a#BTT+icPu6EJ7Ct@OPc<<18o{dtU?BxZ-J^7S`me}$quA2m*I zdwpBLeYmumgjqr4r^S|M4h~gT7V02 z*!+I@`Wp3$#$_Hw2&=!@W=8kb$~jTyo+0Kh#rG)Hmh4D63EhIF5sn{D=mlE1yqS-T z4+pOiRcxj~czkNs53(VAY?~9E!>W+r8#|LoRHI3mVt!3fhg2^}i_((P+|C# zkdjQ-<9E{U^;;XpGf@|@??EH7oV(mlJT-)FqPb6Ks&ohQ7r^e6w7{71m@ z7BHs{i~fHj9)gLxoqz}+40=17OMg=fII49P+py5nrZ$DhcDmCyn_TG`TaV4h)$(Va zVVW}RgEs|2rZqBcCi}||0*#Y=O2DZyt=&NZqyy4~Ar}7;6x8(R;kX>}5l{%Jxa~XKkcwO4OYbZcC99rZ*-Du$_u1zCEL%|mZ!}CAYoVPCFG#2XMfO$nRo|Q zD{s*42v|N4`QR!_@C-P|Yth5i=Sf;Kw{-gtdaMa%E-VI-)9uO|YR8UwZ!xng+X>qX z&R5KmJa48@wlr1P#-NywMex`!LO1GUduKUf@~4g@F8=8LRzoIb~Ml#(oif zo~G~feLOJX?^cJeN3b=V90|QI!`ox1y+S4x>L8<+zHIdERK=F^wd++f0cKSKA%Zyh zH8kBVc}oPTht?*O8`57sfRV=P!_>is*x#FYjO0Kb6Hc=KmIX5rdm+53@c0mYZ-13z z+Re7PF>}?1voSZIFB0X%ZB@tFqa5qBu;sa)2Urm(Ll#WffHAToTeJ!GmS?)LKfDLY z3-yVv;;QXvo%z`WEEO0s50>ax1qxTW*bvGPE7Z?@pbLeyxCtlYH z5OrM9TPD}yw~3rBXDa!2+&8G%uJww%`vNcl+WKN#o6aokB9mH^uPEJVcUiBV_(yVx zi|3!0WZJ;476`+=3mjZ-06zQkO)ZS??OjShgaI4(JUuG+FU+_rCna=+7-`uN_GTn_71FRacrl& zI`lB9*7kHAGG{Df(PgWbxx_!(sk1r5l;=DVGripNybN!OzPQRb@A#DOIyX^Pi|Y=| zA{?Wz1&EA$f=b4vFvbMC{kuK*eGP72|8ylX11sRbr=`%F%tpRHjYExEoIhQsmB zTjfrf#J*_AqXg||P*t@@y!D%dJD(kY<9b76oYAcR^{Lk?-%X)Bf5@x?C5)yJuXf7S z=YGf-6@_Pq6aqlgMKLJrni;@PYgF0vRilLjePOrta>s;1Kg-21(y>{XaF2@Mt!dj| z$kE;vE9UpAEGdlcz2+`gl1o5Jzp&CO9iE-7o){CY1|kHP_{VK6@~F&<89(<-_8KJc zn$j<-Jn=$EHGS_9$rEuc?Waa_=S_n!y70tXT3kI@m{<~ta|%S~QQN6Pj9}-In~ziL z>h~IKQEhAw_F#B*!>{iL27CMW#i_NQG`RNpIjZVvsVKH#J&m4};iw)tz zIc`)X#fkxX_NE3_k=Y}$HWqVwj^;4griO5n*kcPluZQ4`AUA317}eFlH9X*J+dRQj zmSP*A@U6jE*rQXvjlMwILJtgR^Ff4={wkaj=N|X+w?b^Sg%D>dUJOz}# zv&@_0lJUHIK$FPWvqP10`MHu_MNB11UHQ48D`jJrd)(aMU!blLexBKzW2^ALMr?s%n&!^vp#nB z@~zc7WR#$*g~lFkkN!D(!*JIk@40(<<*t;X3&Xn<(tli#q5kdD0gcM?xs(X=L=Ey4gG+t4EaNHd z(~gGvgNZ+QH!4gQsr}ztdRQh!pN)INj~~0)_9_}-dQo-xPNlp^%X8?kzUQv?JV*=P zZL6?eDB0C?nb66Clj!mC*boX|!=$b#drvh~1Z{$7)QgqBKl~bV|FO*A+q#eMppDgz zZAz9(RiHTPJi{v1th! zL9J({8V_h22>#z6To;6KDeWnq4UvF(;gEh#Qwa}#V&(7uwl_t#9++Qj%!-R7h3UO- z0Dv~e^i1|fw&CG7h5b-0^CD6m08euW|4ObKUhW|M!BFx4cN(9@#R>oT)aSE8NepZ2*XYKQH1_8y^2lIgr zUr7bZoynD5$7m5?=EMpn{^Uw~6_H9itEuPe^6kkkFy&x)P3l8H*B_z3cXV1;q?c8v zsdB_Xoi6*yqj~voXQt2=h`_!;V972F@(kVKkSI?{#%&=^iMw zlMQcq)7bvzKY~XlgE@9EVH`!ix=*zxW=W$X9d)hS)F`6iCTCP89L*1AqAyZ2}s8Zzcoy8}XN?i$8+=t##i8U&LsQX?_o(ACwt2CIdMZB{@$+~tk%?@7k!loqr+6U z`XtAAbxfF~XF8UTW(!uZ7H8<4a%uJ`&knB99jWu#3W@CEM_kuMbDxv^2j+puFkv8K zO9aM?qgekmg=b&4V(QI!1SWnvhZPQP-L++-TsaUaT(-8Az9j5K z<*(|G?TfWqPuMb$i8S%NMwsm42TyZ45FH0*iX@BXMX50YyDCqXtTzfotQ>)h%(2iYHh7>9ttfal2XB=yg>-(=*<&&~h4$a^$dS>;J`;oMgWqDwsTU6bI_Yjr~^j(ZJUet z(g^ZwBhpW2?+_bys@&B_hT_|*iEAix$Klbq3?w9A8qkJAZopniru+dz^V4s z>xeB0g$%q^{3*)!d320k`X`e{+4a%4&v&y;a_~?zpFQW?yr1`CF6L@xE@tKz&+~oOd~ZZ72do4&aAIHF8cE6 zf?_-bjjg^?pkVbVQC5Znl0vOzFE5ohH?4O%Vzi>6`_opbNi+iz(YquBli$CtehyV7 zrD{bXNd9>en!&<~&sw4P`^wz(xL3+N?WyhhZEkM%vZ*8ZkvHcNvigEtyV{!qA^36T zw6RFwAGa4aF9Bj)bzaik!9h|7@$KeWwWnDehhGC=Ur+>kOM0TQb-NT*jc+GbIcEuzW;E{A4Uh*}int zB{@T8)CjX)n62J}Tx$jjXL;S*nGA=NIzG7Z8;g&@x98V2McQ)AQLSdS^yY1b^GFND zGuK&wmaP(2AdI-Q9(`^>=ZQTw14{xdC01@Db%AmUSw%je!_)CG&=InbKHGHAQZjMM zlI;_n9tX0of*wAuu`XKOYWG3MZn-HPKI77ZrJPA-6qOCe;_{Wh$5%}nh>1Fg5K&i@ z>co?6mzEbTt*e7=()OEPzcrSpo&kCgMd5f?C#5;{@yNKVOf_D-TEYj+Nppe_Au7$u zs!`URExLcP0)L;%gJazJd`B$Ic?6 z$2>RHn(^&jp||Goj{Rg<(N`9(le$0CS;Ps*$i6p{K*oUIdp{cLv>zBilA((Y`qSX! zv+AdpbmTuJ0qjuHnQOrQ`foU`M7a@ZR5R~6NH~>~WlxXmn+Ovb7b7lf4if)G!1_)V zY$1}z(BykJa7KYCtrvy+&Dn71n>L()%me?!WJ5cHP(CaK(*NY*Vh*5Pb-yh2JISwar zxWArqHpZR<*E62M6FLN*XJB(B$ERwqjZx%s77@O!+AV%5k@HQmlrsyP^Nl>`mac>w z;G1=vF&26F8#5&|tKmkl_V)QU2$1Qg-^Uplr%9aFW$;!J2Qa`__{N)TbR9ckoub1k zfWF;6-$mSy)`yTuAg*f9oxF{L*!VZ!NERM;W1z4)UYawjJ27co6xYbuFVQfUrAvN( zfR$4hCK1Dn{@32lFh7Rf*>p_1*-yyzzNOgs`nbFO{OGx>kzI*D*GN(}OnrFLl?KWth z`)9kn)0UQvEf8QqD{{usC)_^-9}ivE{qb?Ulal{E*;T-8)N`!ln{8ayfnd(rKx_}5 zW*3_I$Aq^Vr3vLz!Z^t)Q0|G&{VZQ5`F(P($>dXIg;A%)BP~ZiCZN%lZ2w5T$~oSi z(&x<)RU$lc5ABCG!R0UD3 zT}LLa+l0p~rfxrkMCPX zy9datYF#Biqke}uMMAt%zoPEat+8mqJ%4+0SDU{mJ&91;TH>zqd_?S-^H^bLj*Dvl zeSN^hUY^{vVfwOC18l!kxPE_N!{V}UR(+!HD*y9SLu0J5X2R|4>}N~TNV=%f}V`K`NHKOQTlr-S}O$s1-b17}ojSS{Nwt`9P?GfDD{jD1VB4hQUR zrsUFQ?m&F<@t@1QG$O^jGAisTQ*RB#!E zATCtV?YO=X`S&2rS-+L)>S(#9AnEx!q!+m^+ia(M`>9D9?fECKc|T>|HD__cy8){d zx%omHf?tm5@yl_&fa*DhZ|DbcAJ&6s5m%4if#S~vFMRe$ooHXe4{`4zSo?O~Kgy<* zl8s)!qU95!Jus{NkCq7N_R*0Xjih~o1ooV|0i9H`&a(CqB1Qb%_mE_kAXGnC}?-AW&gGW zDsH{09&j>$?=>$u$}PEuee{9}8QPdE>NFY&0In9DHbLH`Dw{T(H5lps|pGK}4K&9dctGPw5C##7eI!jF^% z(xQNMxWr&>pm0yQ^kt6Fi^;yTINNa=dK*L;6l`!=v2{GXQ0!E*<2h?!mSbLWw^(=3 zsHNbM`9FCbG#mYNQD%Y&2{?N125~UWNk%XgeE4gJe5r3y-@{(YE4Q*Q4{Bp3(7z;M zN?e~tEL)q?jHq(_n47a`*=aP+UA}d1{xRc%PgdiV?>J=0!{bgX7O(s!p*DkaWp^{6 zDT(yZbptMo#&o&cfqTva4lblvO8UC}$FP)mu+Wq-wp{yuxjJEXOmt4PyO#4A>&kT5 za)D!SN6>&IepIC~%ivfZwS3s1h4GiPV2$u*(h|&)#u_cO4f`}*#KXzeP}6hZw&grt9;$@COZcA&#n zc38u1xcQs`u)Bc-bcD;`0$<0%S*9VCPx zLL?Emh18N8f2#M&H+}m;RjsP7D~IX%u6}h1heu>Z11F%%f#kv9%;$6*xk^%>g@83a z&2)gaCsemWqI{I74UAq2dCIpqqw4n1B6S0$Gm~kPjtOLOtivBoBEw+IrAl#qD!#FV z<)R`K>bg|7iCR8tio8%G-eBWfpMNhu#{$i^3f{e+b3bRjHT*Ob*yu1bVWpmqPry=y zcC?clTssjGDwIYcX2hBO5Tez#_(Yg-D*}(NE?)}7>Vnsv-qS73$#2!-`63J7L35l_ zh;g`MT8=s_B<{j1N{~1XTjbUy{ALRGh>QBX!H4hn6AyCB5dG3`4kC8+)0gnpXA0HQ6Uur zXKatl6=XhSE>f53%liotUZY{VXavbvY0NPWh{)Ew!)y~f#~b>gK`t7xhy>EPy9WmB z;C0%7w?pvdf3IpBtU5Y4&yKavmtjkm)CBfTV9Mo#ywTp+;C(Pj_=9wxY0NhdK z21;@Hy&;f$-3A35zP1W!EQ}pL918=75!@lpO>?N7QB%QiFu`=OdHuQ>u;N%w)5d|S+_gK%cjW}Rj}Op&C@J%=dM9rhYop(YNbQ^_PU;H?H!9I#*%xhrlPuxMlin# zGPB@`1O8s(TM!nvG^xG}AR}4UVJ|EhPirY*yGIP2u}f)I#(FAloe2@(2$$(KBS{$U zB2He%6heoomso5|S@mB|a>wdF?0+j;ov1pxBx?(0ao{rI6XXYpL4OB5|RoN#^Q%)^bkji(6wd)W2#F*Y{FzIxnMGwf?=w^O$x9THeW^dgMN zKN-xIhT|26)bwQ#7#SU>jP9wHsxt8#_tmI;(31o64UIh-xDEf6LB{QT^EazSAFf7K zZmaK@z2PP_-U+htL%TKb76DR08+}~IlGL1G9jDdQNtKoOSsp#lM{waBx;RfqDyd*r zTstR1=nKg}DPtzeh#SLFFGziXK0-3I=gH+ZNPM0o*kD2~r$l`N=kq;JJL@?4Njs49 zJb15HtdvJgm=|N!21lRgiyo!fJey+j?m=^-XKoS9v=YU5=H z&mip~M7}C6Zx9~(4Xy1i?;NiX&O4fhrGz1Ny-xXgA;e4n?Mx<|c zg*0H5^&p)#0+5&wa3a;s2~~xen@*O?^Q)J4Av90CvjryelB^ED%@s%{7NX$zTZMDZ zl+*vXhoie_jusc{dYo(3hQHz12<41(muo2&m+RevA>(JCi`b@<+rH>oOpQS|GR*x< z;R(1qaB2x-VfO+aQV7TTaIywe^NVIB()+_$oNJw)u^7OFnE4f-YVb3d`hU;z|L-|T zW#M64!H=6D3At&f{W&49(Iqyn^w^q@s)v{BwrG(_;#ZH!3--VxV_~P?yb?vfSUM<%rmEHFXaU%Lv z^lhW&qh29yo}0(&?@e8rt#d*`WjZzx3?jXGI-4#}M0UE-4~`BuXO=s9vt=E#_*vw_ zJ)l)wX>XHNC6e)m;t9g4itwE`%=#`?h%Dqq8+r*Hj6PkOIp*`uF-1T%!=2BqIygz% zB7vZ3=!h|93*RdF{8Z{RY04b(W$vV;d-c-Sjn=_Sj6S7-*-D0l*y`zbz^+aSCgfGjX_CBOHRoq`dNK- zm|};ja;{oPhe*$b>i6Ny9b2!gbhd}mc~|6&g=M+L7HwVlud@Z9$VLF9`mBy_V1@04 zCh+~A=}+{>l=H=8L>)F}14!3@=oP(6tUn;Xx9XF@XKG0cG_D>bTshJP zx+B>d$x6DI)PDMWg1$+!Vz$iKH=oR2ibqK#9Kz zeiAbLz*tL4aq@M^7!$kK*a2Ugp~8fkBfaDu zW0y;L!m;wO2btMBd2MH=6<3ZH*8P{(ig$Cn$l2B(JkFn zsLnmr@D7J6CEK;g$Sue*|0cULedqx@-Hg^k%~26lDzL8TWOI-b#Vj|@m+7!3@@+qQ zZ)&I98SV@hQSdFqD6WI&Lt8vBPGo_MLCri#dj`Tli%4cD3?Ry8Ce2 zBQl&v;cvJesF2{JU0YWA2ZrWvdA&=h!O<_%{%uFu#t!iD_o~nGe+PDqZyrGa+om zqC|_4Z!{J=&(`LiIDSu6AHhC2s;XLP-ljfLdD~v!bk-PYY)VxRxdRqw2pk1KCHz*r z6H>ara3EH{7CLmv5n*wBll{8OQX;(fj!HZa+rK#QKa4!}9A7kLVRyb+Y zGi3We6k22$j!eYjlwr~A9xDug)2IKNtInYK?iX%GOHV|x8EZk&dRiiQDYlpahmQdd zg-ZD`V<+;5q}|TcHyPaPZc<_h+~q6)9x@~Y@k!XxPqUsuM)I*7Xd}#F#{J&qOpV@c z03Sy-S?KVjLGG71FD5ebF>P0Mwa3JahSIy{ zemu{Gm-*7QwBLvf)AY0M25y3vUPneuCbWuG)u-%=*Z0${h*vuET(`uXA zvium2sV9YM*{w&xSt3SQ+M_PPZ`|%LnMKS=?u7iVMS%D2z80Wc&DG~bld+l%GQ5cQ zA8eN|#Q^O6!7yqRW#&k2l$zJjm#e{Grk1W`+uWG+RJqLe$n@Y=u;ZTs&^AmM7@VTQ8_JQAZC68g7lHBtS!y}43r zn~%ie*LUg}{l*j>*A#!&fWe#VdOIfvNgmp;Zoyn_aC33C&Pp>=TweA5)XzYVoj7?@ z{$Hy=%E`|WTtn0~Gc*U_`gR`4bo*Z*<8e)0nnNq;E4mq?m6NwDd$CL*z7U%%E;|FB(3pmLSKZ&<`4;XX|kwP9h> zwOvX=;Y6BRtN+>XRs7}FS?c?|O^oVx0&)Iy`#j6Uq3%w-5AD&T=_ajqiM#oX_usXF z?~_5o5%kFulzZP+3*E)g!s9gA=y!Zrns*_jJJYO=7tJBFPZu#E9nL3dO|=0KNaIHL z6JEVv#D-`1)1G%0z`c%&q?wb0 zUV|_9X>wwS8BmAjmxY&{xxTAFAo9k}@CxGnii%T-SWa+-IOdOHh5M2%E<{g2t)EyE zR5Z>`3S%T*P8=S!q%i4PoB7P2JlLhF)hpMorg#Qgu3XWxYdQ<=vYs!g6}t9c^PYX$ zC^Nn}^pXX9qmu-RV3P`_I1DU(IiisD4Qi*? zne*Y63`ztPa>Zg>qAg>*Q|S}t3XT#^&$Ojq)N|CS7uAGp&x+K_dCLDo$$FXyl!Ng! z!>$G?%f3Hpm907$O=h^mHL!~?Y_x$9`wO2e9gT}$D>G9V$Wg`Xx-w%~S30B7b`5;~ z4~6UMY(?mC`qAWxaynSZ*YgX(%F5BY$Uktl6rFa|E3&$uptpHJ!XbXBug57&s6b|n z0>t?yA>oumDdqN*Y3oT9#z3188v$82w4Tb!qIa$WSFhiX$ugvUL(hIX5Ew7}S~cVB ziHz*=UQAnA%g8mJuVm?%gzAwPZlv|80%b2Kf-4(NYJZiG_VpIib3AW~0;mZEX5rW5 z!!^$}_2~@*@X(~8p0DDY@1?|Z?PjGKjwT85WSK+{O5H9t;6nIJ+#`=gwdnB}4?eU9 z(vSm|g}?u9*JQTwAYeC|wYb$kr)<5df!#cBY2I#*8H83*G0}#$QT>NPVSK{HW7`y_ zxDXYd{iyasiDWA_`EB`UtQC{Eeu<|#@ub9o^r@*tGc{}r>T4MucRaq0Qn8&f+1!P^ z&TC}mYEr#2Ia9n}&kgE^*deZYuP@;C0gF7pECer z1p_>;c529R4x;~xyitjX@MAy&2;8twjn!X;4Zr$&2Y{>hpG5Lyu;vhFj-H{Qz*nMdD!;fPIjMFPuZWW5{jvjtnEJ(dg%m&3mjc*M)3ZWzLi!d0v8on2V!Ktrz=?NML{y*mAdQ9 z2Thf>%}*~X9TtpUehW?EfsVGPxqc-Y9?lpy#uS;an3zYT&j_}MN{R3FjXSy#*9mA4 z=z?TY<7l5&Bq~P6fXo`KfKV*>9Q8Bb!%H`^+mr5M>ED`%^Q2Xd3r!?MbzpCEvp^-{ zj5U)bOjU-~!NzKx5sS(l4cY4w4o)=41RUqHVmsSxB+pLj6AL3mSzYj8oY#)BE~P6z zMh%d0#j^ca=}+UUl2oRj4CR6On=PsubGAFU9MKhe&7@(v2EI2`Z!dX@G%NF!Zg&t2TgWjR zYcIFPa2Me}!2+z81v+%WtcxdwtvD%t@(#yh<1m&oudPJ~>*T9XbMF`9ESF#U=Rw(1 z;JCTzf>HC>?B#mI-Btn_2*{=7NYH?U<1;<_(G&DJe;0RGtKW_7!@N z+A@#M-s>#Qfa~;%oR#@r^k{P)C#~$}87omlAB$Dzyk8x#xUy^wwN4j_8dJs^QSdZy zBqEt2A6$%9t7gJ1u7dhAa|;{n`yUm~bIDbTH&!h7J67|gWv4%u1vmzrhwv>5BR`yE zq#2syE*_48O?SdqOiF@8eP`5v#;H)IN2#Mn4tq#G%kJuuJRl)CZwL`GKaCJYz~{mj z>gDS0&|sd14M(U~hkc^2%P`k3Q_B&6tW`Ajy{ehq&(9Q?mS#HU*(6txa+PqPPC7i` z?(`)@;BT&p_2TMdO`KH02$Fs=lK3l{?f*r0{j_3|jJ>-46^-&AiiQk=)f{PPV2SMQ zUShp<`GcU?tPR(<8`!{-WcrP`GH;)54A>w?_$X z8tj&R-@ZNdMD%!DfzXmpbA&(kn$fh*5e`_45i3Qs-Nxi4ACoZb@|Y-Cr=Q%p(V_ef|LoS)rS z>(;rBP2kvUne`Gi79G&((=F1{8#~C<9oRclYgW-L)c4ZEobk@~v{CCUVW}w?F-qve zk2J@Zq$-v#F;c^#k$GmYUS%O1r4+3HWFirjt+{StD;fGT{B<{a_m{gMyW2?EznMDj zvPi*BHuaIQnSP=D5)V)2bU}JHdu~hpB*VDgw7TBfMRV+@k1cSr(Aw$?A(u1J!mea& z7Jf`a1!~TA>uhWKi(56aWb(J5}8vY6%NpTy$n-WSDD|I=@*LqZdd6hs0w_0< z1ADj1Gb-(b8hy|D<>Ri=nxHzS1c2F?DHLBF^QcuxD=fa<_I_R*k;Z{F9j))7!d+%P z13asqUypyD>SME6IxSa;XDQ&!Xxpp>oX+mYmCi)%%6XDsO}^Xeld9A zT?~{jT05rPM}CcXApz~}F_lU!T`hiVyX`GL&ChW3zh2CXxr?VA(j3ScwaTm!Q;Bi| zcd_L;kO>lt{f$z*t=;+CbWBww))*O2?(2|zy5*CBwvEU@>+D>`ZOdr-oYRu*V+k)7 zgFFI7TLsR?`A>;o^Qy#ytBz#Iv{mhSxSf9jtuz|%8$iXk84g49`OHcNmOwm=#9L^` z&RAJ&Wc}X0iqTog?&lCp0y@*x-L7)Rn>LrUTS}?$rc20dEnEuav5*PPo?5N;6P1oF+op&D9X@g3M z@Q%x#Om9dGE%$ApfrAhiQ8G)Y{A@uHz^1d3X(WW78T>XmEm5sqK-w} zof$kDjP6u>^oL+`^Ik7=cVL$wW{1~V8z8Xi;qV&4IJxnDGAMe)Y39A#Fu+GVzw@%g zfUJ&B)d!y(AabC}Q)~PKrhV7>*P2g{)%FZaTic;~&jD0=db{GX>@mqS_UDNtyCV5} z$&c}}u>M*9;Fk_7b-rl-E)cM}WTxFLB&L8EIgHw%^mXi8+#u;-wqX77LfKm`lwR#t z8dU?qM>Cm3TS$KaKnS@0IW#Lz(dJcG2z{@Y<*uGjv4lFV6G_Q{w0(QJ{+vb#L z`PmISh`Wxt!2JT%g>w&}pJ6mp2F(t>PZ!$_J+uA}nC{S)7o2Xv*u{?taV%KQr;>Wr zZLbmDMHj$}YIcmS0pEyp;$D^CobU%h$swYjpEPjkny@qCOT-9T)^yW@)Hrc=0$VlLxP}8-(J-=Std)jCM3>Gsoc80Y3$cP>O zioU;;`(Kd?C|(pdwMQ{8m{YZZk_f3sfMY%_65NTDG^zqV zi%62T87+@RCF>stWSC&Sl9@c{t9^BBPAm;TP;IRzhu`2+ASyq?&RS8W&2RIbNM|O8 z>t#{L1yXtpMt?qQXt;zfEvET42`eCMdSW9t&eRHo*$=yOd12)GgsS~QtkupZH5Q~lD zWsze8Y3IbTa^uI1P9mihcvMufDmq?(JGZnMZbRCBKmp!IUVQOZdn=7L&Tp!#)lp(PfA96E zbnIhvTbkac(33J+FMOvdG*In@TO1{5AW*4N`q75hdC4o@ewmya{Ar6LzS>SOEql=v zY%?~~o#FD&liP;}ObN+{3fVn)%5ZhtZ_p`~FPT86(AwXXq{UTnQ?$0Xv}BAZrwPBk zKj7j(e6h5p?cfr}T+Y7@QBWw~iwwW$kH0H;NpJ`I%G8|5jg3{*$lVZzR` zKmIXkUOxC&ICs606H{6JXXywk+&D8#RH(n%VZ6;rW4t3=P_qV=57{ygACvT}Rh%72 z_gjzr4j0f%Z_$GBxAa)*I)({yz-IOveodE*|2LBGB zkw3anf`_ub2HT(oB!OP=rXLSJ1LkoH?83Zz|GG#6Mdnz;`kOBrk)0i(Lq__j{W+Ys z==-f}>KFak5|siWqd#%8xmKf;x{G#3jYTTF#W~)M6r)jM9OVG5*sAc9WV1vW(%_|!sS(<7zB3E!y7@SMV$N=M%D|aT+8*}a!C;m;8DGr^Vf4G!ww*J&>CbtuXA@wofV_@hjr<9)I ztV8jkK)xbH8=piYB{#ujweGuy+C0J7vh=iKKTLp4UVT4(N=msS9(B73&&e*C5K-YJ4U{CfK{;JyLSl+Qpg{k=ESaG-U}xQlVL%aW~T53917XXA7b;5_6G$SY}YkwIZkbHZ`V!%9KF02yv{|{iJ_X-?ybR z>F!P<7ks9BVQV>tg4S%N{-FxQ%hHw2Howa;Nr1O!)yx2-roh`jp#RqVEos7O$sQ(M z=0Y7^FTs$s=w`br&Zg&w$u{$PP^FOmoXMW?x(T{*ZbZ?vR~DRyqzk+`v=jvwE8v{J zuG6)lV2cOUW9olKK#6zX;o%FdA~rNN@DDjEjL)F4W^&0(mx~c^;wMf?Fae$tf7DSp zXYCh+AHc;K+I-63n6cvD`d5-a3G`G$$#z*SBWyMWP!xmx;N3qao3{QoEDTexhLzpg z%ynqlGQiP7UP+T32GcE+xR2x36yPCsWJdM&T~mJTXSkK14Os?A(&@oW(K2=PU-a*Y z0IgZ1xC@R~R5iSBZoX<_qOecuR)1rrkXr6*Xj7~$a0ehy;Tbs)QGv1kc=CGWdFc=4 zfvr+5x!@m}zcU^ni`kYCZ7iE<HPhbA3LG?Yef>9RLopU`IJF_B!|{F_tV_--9=+lC(xrg zCp00rvF1Xg$_8lPqP9dCM!3dN@p$^gDGK04#U3(tNw27g)vX~G;j|)7A>aC3FDlp5w6rUs zONu&W-^c&vU_u`!%ab*-rjQ|7XrOXmv6#dYM;Wi4l(%jf?b9;9pi@OM4^1&N^RE5@ zOYr{8_p-R7$zdfpp*z0{UgwpYqi^88qVUX}aWX%>=#6dNs=PQ^ApK$KZC_k7IYvn) zI;wXdlBL10FYOy7e0$guMQ~SKves*o+5fI59fgNq@hUX-w{O*b|SO@0i=|oe}49ab8~byx-AYw^b!tPVroNQ~TmCpj1fZO6!r4&>WFbL8DmQckNdl z_?a0VUnT#b41A(ukDJuf1(R5cij3Lf|40dt{H&O@%7jvA(yGP8|Mfo6=Uima9wZhRWBH}1(g4Ftxf-q zSJcKEO09Bip@so*L!~wGCDX=T6r%+?`Tus=Nt^NSy@lC4u@_J~Lu^-%a+K({+HT`6@1rno?epT;tq{!?S(?N$3g2YIZZ$9Jm{LDv|+l z*bK&jl?)0Hd7GUM6;o#%$;GWZVFh_$YOmyqCbCInre@GLEy!Y$%M;f%en>J? z52DnG4s)pacvd!mEKKspNBt>38z%=&hmfQ+D^z!q%wBh{G!-QH-+udz@X%?U6%zeu z@UFej6w|k4Nts`sMR=6TRWITdy66E(OekME`KDDfoq1VFfccQS48+NqBn>$c#|)}R z*|VLZy7Uf3m{>I3pjdWD*$`Se+}FjGl`3gto(tq}%yi+o?WM$BI0mx|)IlG>WxHyL z;j+C_{JuL6JLm~6{;uApO9$j(yR-1hHogx(z+B4=w&jG~&ajx|Zb*vZt z_0#?JUAa|O;#c5NzCgjY=p?^bujCE!t2z${sd`K+7yh|Kz6E7eQYqrS#OtFZ{j-Y? zhJEib!T>eWwwni13C$*WU(|RWL{WHKN10upe&eAyRldfjc6Xaat>IQJQE;#JDjg>l zX!2S92wL;GD zb6VvlY%IK7shq;y>w7!NYYt<-5~>ezz4o-I5ceX|IBi*OOv|gOd2qPzy!+jlx3;KS zy-;bMG><@up^faHsfkGX#jNQ8<1SZTNMSgc+t7!Aiof8%)%^aDVk(R<_ac3u-1s@G|(I zq4>#>BP$?Y^m-w^4dKQWRrNFO>XL1!L+!XyiO7YDsdk&W;KMy@(d?yk-RzT4G4erF zkzC9kuBc;1x&T+$`+vVB(`q9g|G3ef_Ud3fpD{(z|1`;C^20L|%wXVl)=LfR^7dq;3-(g( zAXW9Ke&t9!k}~{Nx6X!9Gx~HT(HTnS6|nI{(!MG3t%J8TkOwjS(_G%A@=596HQ1ZV zx-K0gF%_RhP!@ zv4nyck~AthoT_AGoYRh9$Pk^ihtwO?RBJA{UIn+_-A@ zH-2x3v$fUv4Pqd(%gbd8&&?99pa>D(-xZs1A$yhMW1>_m!Mrok!4kluOj^PQ{@3ot zi|@Q0e1%7p!oTEK!z||NSrxE7nB9MznQuh?d&a-a z-oVj^>M+0AEy&wNz8;abi5*Au!IaQCF(;3JbPb@14^8Htp#4|UFIci-hqh~%WR4lq zSGFh=>DVcuSP=DEdH?OFKZo9b#w9os74c-pZii15^LC~%o;2DT8Wuwu(wv!p&}fBX zf*ITe?u@hEif-ceqzrN1Z!XcfiWodS%IrGBg zvE~#e$yP3Fjw|~1Gg*0Fp5|uvKs4LeN3pD1RP8r#7I?{7ZEWym^1(j+v9drTY^zOd zs!`8QMTkDNyb^H0k^;1^UG`gj@eZ*|SWYpSznP$&S=30s8x)_|Yow&ju$Eh!2;oR= zj>MS`Ps`ROyCgiWdc(5A^!MMSX6nQ&5C`~@s!Bk~L8ma4M$GZ$j8s%5@vtW=% z>q)f97mCAwm9GFjxZ%8&f6lE5CJZ!+*>)Mom@WJFL|3695?Nhk>##HD@~=@pyGKEj z6ytq8KwV!kNt1s|S*CP@@`mnPH))4NkjpkVYrp@g&I%CVDEU691{2Mxs@$E}OqP$r z1n_k$wMiw(j5(O0@O$Mb7x`&_5<^n0IE4W3~j$Tzn!Fl$S{DWeh%w{>opH`VM1Kz>ItA zq`jRE3sE%KJh-{g1z1(gb|N#tbJ6LAa$lO5sFQy^7eZoh$mw|sZ4y+v0>K>S#}_LF zn;KsOUp*?S`5|kp7ZRQVmu=1!CVpT!y1=K(t#NtUX%D~OqnMA9@r7sArU$Liy0R&n!9jh?E4>_6PL$>?~E z2k&6YkY2^^&3rn7ZcCY{wiy!#kBvayxto=C7yJ`fzYMx116)9|!q$XWlXG*6VHge1 zt^Dqng*pKRR_M?6P~Hjgmy8hSgcZ#vCq(OZErw$f(tM`1DMKK%A6M-_hO9SQ8wg*{ z4L1&wIZr@E@%zv-kZog|LOQFyf(1eeUbd-T4jWTgysiwZLE!6krgO6zeAxK%hc}CU z7BV9Drn!?KuWpK}6S9$R9aof;y)HvV_q|L^gCI0E571gMYxGFEe!Or;x1Q~A`+iU^ zmlLSHf9q54(yiOkZvK=|@Ee()i|6|P7pDJa8mm=J=T4E_qM-|&d5H0Nkc`@_6Zc+w zXLnb78a zknYzPT1L3cj~nbDPK9aDcAwVHIo_IFfM!1>d(Uo1eb>^@a5INIMkR~K`$ziVLAr+c z@aVYjl*jge*gC7A zIJ$6aLx2E*K!D&HJh;1r1RLDleSpC=SRlAFSn%NPFoR2QcN^S;GeB_o&Yb`9uXE8i zUDaLHyLxx;{l05GYq3-TUmd8ztHPmtXJA_Xcdkc4%UCwM*r(eT^!s!zcgd?O`sC(* zsm~Otdn?8iAt<)nthYl{rUQ8qLy&}!FB&xG=G2;Oj_h27E7k(Xd<)m~5NoOP!6zdJ zEWdEDgJD+X6Klr1{ww1mrR^Z?AjHM6p`H!imn2WqP`O?7zSh=p~ zzd#pF`%fjfM7rm9Cx$16$y5q%pxPQ=y=oPpa{<-vRIT

q*szT>7Qprkm__h6-!W zP!@i(#9HPLx|d~_W?bq|9(#A(#e12_ifM^JoyJ1v5IAS{rpaR|T@4ALmn{A~!oS^k zj!jtNwrZW1*8d^px$`$QD2$*D{MvM;9LMg}uu5lfL3txjVTMzz`H~BiC8{Q#AmfS? zaV;-wkHwGiItFh{-r8uv3Hp|9T}xFyl+7L_h@>Fk0Y#mq>J05R8gMTlK6b9C6?Y2t3J)o*Q~Dt z%vPh^kjLo*8lUB2Wd;A-qXSacnY_PGl8iVF9P)@kZka8<&b%yG9r-wovC|UShAGpZ z2zb*97Ig?Ye%H`AjGMvmR3m%0-<8&!?YU}L{6KYpNB^)-Uz4SLW_!>C_#43Mu=9(< zsl#@svKe9r@~Etg!Sf;8Djt7&w8pzh652p&kM+Mr5 z#FzwBFP+6uK(5Z3a#u1IGVog0x=(M~X<(LM$Xz!sw+pP(AKx2D$=U$wB{jkL~ zgQYi(j}8Dm4?|+q{so0E(84M)MZU)8&s`x&UE zV|a;za=5=vb&Rks!f`=Zl0eSr#YL}I-mW>Dv^_-& z<61nT%~MxJicR}wTWvZ2Df)8*6N9rwWpFA!;O@K|-pVOKdHfik>djvkGC&(wf>oF0 zrpAeW)`Jb{8M1{1TtoJPfk_8TI+w!21BY({F)0IbdCZ< zIdV<$o;~lwcH3)1BsedfL$k;(&MD}1T^n&El*bjx<{XL&h_5Ht)^%fitIjg3kp zUwQ9a6sge87M7wpy0^*VkBtXBHnEot`tcDEzitQ9!CdT1?}3D{K&ti`BgDK%YT_gd zTr}Z>3p9fqf=XdNwp-327plQDbL>lFytR>mw8vxtakGlN-x$j+vDF)=uwQkrn07fS z>=7`HXV2ZB$PKzy@O8h0Fx;ebXTs#@ifmVX>7Qt)#`#y|cNI3RZWV?091AE&wsSSR z%H}S29oe}o6<&9RDqN)ztJY-Te#Kf|S1%P}2?uHo6IdyImU=OU{n?oC$Y{YyMd>Ms zW5MV<M7=Ll}y2B4cWXPBrRFFp1ZaM_?7q8!lW z_o97t)R>3`EBtJCC%ZC@xYcwZYt%1j$K+XAAAP%qIjEHwRwop0oyF9V?HM5LV&=rhDVRGrDt-{?7NG@^f{-dLPNUm z#*#0rnPS3v_o><8Yf=M`ZTAZ@K8sQT{e=|7}zdbLd#)~j6+f^dR<0hnNfa@a4t z;hL;{`5%^Tw6l#|qA@(D7_QBl;YhK%Lqu)mNf$L(2Zj$;o?bLOaXnHg!fVJRsAjAz z^}Q2bl0+T)hm{7hPo*j5%lN2nDqmPC+!NS!-+x*KZ+hE@_~$7zn8)+k6=776uH1>$ z_I{E#^Ljg}s)Wujq6d+DAV?im9tw z%@>4ZPuAV9EGnbVSrW2dut+0VvT-y}Q3kUmGqZ2bL7LMN-EMxV*&`*rdv zJsHSsXJD@|3ORqZ)9dln!DksP`9M`%S9xVN9$cYOhlqLZx z+;$C$Bmlf(U^L5>EGAb0TlR1IL57r^vZpKFS)UO8#0jxLcf|x2F8JV&1|DgBOD`~f z`zNc(x6*(Po^o`QuX7ea1JMY(skB)nl+^35CB3x7?liXj?KAl_Ruv&XpfOo0%jxu4 z(ax%d`X@`J&)n<5%5y;H#(>4#PWB*`L*>HaxH{&l1|@n98+qtgS{wIIQ4CnLg5BLT{)*WtQ<7_oX z0zI}QI7aUK&keM{^}=`Wl1m}k4{ttGTKU$sXj1`Yend_a5Hu3GmH%jE``Wml&5_}t zna-md+v;t+=JAwwxa`wj$$4Ic*jVU4ecTp<_#})r(2C^=qx}-;Is>D_qGy}FZWEi? z&^Tco!2Ux@MH!wU#Xy7Pyw*br)lE>$c4BoD0iqczM<7}z+;Jt9Mefs0AhgsL!Z*m2T#@tLa@0IxBPI8J0UW^sWNm>S-a0*ZSBrkXm!5Md@VZ)D{a!fn&ohAM zczwB}uCgUHRznpD8kcW;M#jcuPy28}1JpNAWiLuw&nR6{w)<6`e;r!+k)~e0DDxYB zRixSjfYfES9Y7|4%1&avP9Ot?!UYnY9JQn{t}Q)t6@6mm#`W%G^(_+o+PQ}JQ~D80 zYqZtE`^nT2WlvW(v~5K-LKCE?j6~n5{ULHuX~hr}i5o4kV8Xtt=CP1!5P^ZH-P!8m zqvv0T=u&IXO5oUx!I*C8v!G@=d|_;JwR*PFpyomBPyVuij(nP(jwJ7$g#8eGqHyG& z3lffUdbhl{(HqC`u#U3i!7-69^=Z*^SHTv=HVztId?EU@*tRjYDe#ml_j3thElAQn zvD?Hx7q*~GGD++wY^@FczA*}#2;RoAeE|Kp@65G)e!YanYooXD(>3t$%+&V}{fg$r z>SA|z*sO9LVqcu?1{aem^SHCz(|NmDbRZCuo*;nYYz&Yc<1+MsZ=57lwA~xSgY(@ z-kUAId04>avNLT;nO%lew3VZIeg?Kig2vabEx!*e2i0~Ut6}spgamkqWt3E&E~JW7 zp>CKi`(GLTl=<+;*<6YI$*Isq?q@AdR4d!x=e|dif!z^~^IsBGkz&11HlZbwQ+)7i zt8f@P30xrr;Wnwt3{~?JTURJ(QZ{hIz`EW>yFk3@wr%{T1w>l%)XV1X=RNDQ%fL%v3|Go&R z*i`2fuilfynI>%1J&NE4Pho(H&&c0+Vvg+uQ~M}uq)RE&lcTxk$Av=AE?(516A{!r zb@UW?b2Pi?iBjlDx%0@RZVKUh$ce7;qE=hB|F+b~59kG*xTl0DJ5?PiURbi9%tW;^ zJ|!0-dCRIu=$y1r^=Bim@>5+Fe@&F4_z-yKBuRx!>8JMgU)9B7|9%w4(;$EzdHGQC zv+$Hyf~%>CKj)HT4PQ*P1b~t4(Ctrfrg)8HRQy(vz}I~(j&~gsl!kQLy{B%=aq*4oJH)UpKEkzcKM=Srz=fJNhQqOjmndgG&9gwaLqb zA;ZXl4r1Qs(E5~NUD^#@N>U?Bl3`K$hgKx+H(eQ*7iX3`syIe{o|cvCQhTNEDf_~# zCsm$v3caQyP-qCC*}78(ho`HTy(Le&KZ6{Cg4+gDc{(oTc8mpgEJ+5@k@o1XD_otOK%N zK0cqRvkvq=S$_FhkKmwG+;=O^(au~Hh{SaOX^WfOL;-A7I6czN3eq?@tGJ;@FBc<8m~QN?kMzU&30Bk52I)stu~ z9}x1CRcB+B*-dS3U!7|J)b&3kxeg@eoHt>ZLcJ?4Kaq!o2e*fq&ZmAz2+eo!Z&xdb zQ>%eDyc^OEOgXHh7H5S~d3l^t^(rfCz7|rpN6D%uJ#13Ua9!=QzyV>NyWa&Q^y!?q zX(>{L_rTRxVAk514)sbq?q6r;*$PQ~0<&*=IjT7%WHma_hhtUrm31H3A=;t=3e2+hAB2?A%$G)9><56P2RvN6B7WC+1P;vdu{OW9j}w<&ecS z^O3hVGqpR0-Rd$T>eQm;va1!yUcFwtgGZ32V>fw;8Y3bwg0IEOrXkxP*ZgPqVDsHd z-`jM84L)VD8GtKq+T=lMnrvTo@KqtRHj7?G2#&J%?FLITMBv1drQqwhl>UeJCs!b8 z7Pm0@fC0nt)z&`Y_r@(i+3aLDWpS&lnSo}HM}F%ikc3C6$6D3;t+Qt_^z6}pDmhTp zpKAW~{rw^c2a#Iy`&f;!dOJWzi==y&gXAQjx~po-|P1I9ujiLm3wzZ*1qRCEp%1HT;v<_2!X4TNl7r{~xY4J%o* z{(?8IMt3%BqcWacgL;u(h5SF>TOcw*5&n<#iet&9yVDtoT-ZTTn@MgA+RL^| zgzjT%MXPWJUa4FenA{$x-mQBYs~*-w`$x{S%O3yzJ02fjY-|kB(a}Ig)B7NW6oNs6 zXYgI%XN^xhobLknFAUONCeKS59#?8&?;S6$0~;khG+#=LDE1iD5a^6KiiBFJB=ZGNWs*5cPT1e9u;{KoyR046%&7Vmf zn?jVsvq^LPTDw0X&*K4J@;7}X_T@X zZw)I8Y6%-Zyzgx5s6x)+eL=owdVP2FgzdAWqjNuTS*aDWtC>its5@I*W1X08VV$ZR zUJT@X9AtARe}g$%_er%pyic=aTTHr@{4T|KYYVF3t*d(l`}_A#o2E8dzecGJZkpKZ zjb%-Pe}xK9%#1OsY3nQk>x4G(Qi!0lhl@@TKeJ8mU$z_5VTp?jKJ}%$8mPE0o^@#&bMKgZkWxt?%A})%1NkpuW&lKMK?a0M+XkH%n1M-9ZhnL9>TsmBBjorPBi$_BYeBX8S@P-4>)0j~L9e;j&ucL4e<4>(?l&;r zwc9|x&tfk)YP(qsXZ~l*^bIa1aGMU3=q%+O_y{wpxAguTa?F~*JL2lLIZ=A%jM!tH zUzYZnMoBQ;d+$<;8s1C6iKScPMRTxpu1r?&j|EyF-A&z)lx2H}LC83G#du{H4nW2p zuc|@M=dc@9z*ksQR!|=FA7PbqIO2eHdK@*eT7GU-;MLX&SaH8rz;9D-IxR>(s@U9n zow`qbk_qaSkf8SQ(}YWe!SRQN5VjIS{hO^`||k6T~$Euw|M zZ_f35SdgAGAg%YrJDiakK1`9*Ps`2-e@E|~z|;qQo2cw{n8NRVxEv>25i8@r zVH&cB?g7*K?wFg@Ok=Em1IeiBZJr1s@1!BO5GE_oG~n~*o$O-VFG(SioyNeA{_OC) zkPgRmMB~HL)`{!;RIv@Vv=xAgzO({eBPmMlhyBzar*^MX}e#siYAKwaewP(sm(YdvIo!e=eJm ze%Gs=HG#al=J4kgb;uR8MaeQu1oiu!9DxsVX6R?3`yn_QZ_RanZNiQBjn~(Flv?XP zqAJKzAINwxO5ptdj}XBxMD~n1q5`<*g^dk0-=o!oupt_I9x)cG`|E96+5#%P@gjT^#emNAvAMQ0)J;LL z?4V~|Tgx_?xUIINUD45P)VX9lu)cK^Q)E@5M#wc(uS&!4tUgS(q>2PHqM zm?s#_AwC4}?w!{AC2i+DMXMR_q^J27;zAp{SO1Y8+0IpMePg@QQ^rm}Yfxp=pHt1? zuT(ZvJl;q%HdyDV8D<%s-5`p~IsgM4SG|GsY1PmQoyL`W-7~=_72spSY1EUszvt}v z1zWQ-%y~z(Z*v8G+rGFswcI(B8k29#l;ZhYdptL(pSWbdAXI|du-$=4lVM^4fmG4o z%Se7$nJeT?`7*BQL_)E0al#QHicBHf0NaE9Ssa(;z- z_dEDCQegdugq6NqMAZGE!|czB#L?W1UoEy>(u9zp4eSqpQeBj3hKSVx11)#9=ja~A z*|4zq+wbtD`cN87Vw{a^i6v%P{tuJ=LNzBX*p z=r~JZr1|{Mhv+3m{sM`*d(7Xx{0I4dG}?-vKoM*bI%|{t_~hQ>bVev(Jr{?Qw@FS# zDXCNj!DM9tWjM$dqgAn#y~ASXJxZ>oT+gULV!*sj3Mk%irU^_MQ}LUIQC3~rI$jVB zIbIMnH228%+0z5{gZS{q)tjWWq%`L7hOwVXPsKX!SKI0$FFWEXtsg-)DIWhCy8qSY zwkuPbHG$SQ+Z7gH0aUCdL7;c~%~f8P z2BoV-xySp}DG371TXFulx5^!Mfp~%Qxy19$V>zsfOC6+JC!N}79p5Yq20hwwiKWmM z`ZJYJSngA+&z#S#lqKe|s(cH0+ob7!r>oxw6$th}nZ7?sG_`}6(6ly9pVJU6js>A5 zL_JF(cZUC2mib;|>7yezkL_A;Ao}grld<#g9pSdDS9Dn&I7UY|xfF|WBh5*`c)eNB zvs5LNw&;P*fMaO|&V}>Bxw0LM?D)?IKYaC!Rt*E^sEXF4&Ok$v!~DNjgDt0j=&GviLab~G<-LRpjWZ*V{vk`GuZTvyoa^j&$< zJT8xZNcyL5Bvtc71RW7jTAh+R-DDj<))jVOWcaq#J5`x%IDrJ8k0 zKop>{m6)$h@zKSQT>UO8qZEjMF1ZZ}#~CshWe&_$7wQu_J!4UY=IHg@)ZUhbWMB zIQs-E1yAHGA%taBLK{rN1e9W;(uQ)JPZ7s zXCyi~CcIoJS4Swy5*$hYsM3zm)wwqD%91?SSbfsZv-a5AD5* zexBb}EN{)UWyc4l^j5Ua;SyQ)4c0rpp@%p(#WM8fB9(g z0CDBsG|ek95XCJ1s20!22QSMfNHf)Uj{OXXGIL@o$~?6irY%EC9(_1XyOPFvj2~YS zz?n3)AUMzMLN^X?r62cjh~o2g-heZKDshB!5kBJ6w0^dW7b_MulacX$#O9OscsY4U zq+2VNsdcP}-Io2(&K&`$sj8~eFObSRNK%0}mywJ!j=$E#+Hb|sIM9!yblenAOe5{h z#vpA*r%e2jWsDhKKcSjfma$wGi!%9-sJoQ2PqMyoR0pE&&SJ#)LhSoJV-&ISYBh?9 z<~yHBqw`aKGc$RD`IEiLJvcpMAT90wonuYlrqN01iZX%v)SPbbIwVWo&tS&)SN-cX zIVGV#hAgh=zUKc(x3LP#(D#<@@(mrLix7?2#GtjYHhQ&=E;~3BW*RGQo{=4!Yww=8 zmB1i1HAyFE6!mNkJDnO8ogdz1wig5`5bAtAxiBOf6X z`-yMFDhWoe*k6mwfUwHt>P_$zb1F@5<6*HBB&6FtPu93a7U&KS=MVQP*@c zXXA#P_tSj*`%TN>`Tjp>+Ff2i`c$=F2A_2nF{hQAhU1S<#{A{xHeTr3o1o2XJD@wV zfX;qd`P9(*3745E2YPeW%;=IGUmH^nn%`o&xO>|bS2~IrZVD6ao5p;`0;Y=>4Fv~9 z$xg4G9{;ZVct`)m>8!tH3=*AyH3BFaNQ=U`2=^FmtwFB_Y;5Brk;Olq_S{}&c3Jc4 z4W;%zW@0)mnCM=a0L9AI3pW&gDRNjXUrR$Hf}O@K@!}KI+M3hUKURfoV?_j00V!)T zWelx9lSe5PRk1)CVV}UuW!iRxMPh@k?(a7;;TYK@PWjO?(TwW!8gwMtw4;Wj$yWB8 z_BLsxE6-(upQ_=T<2gG|4sCHYRnMFIPT5VG07GSV2iWA8NoCh!K*kk<&YMSM2m|Vh zcr4ytjg^06^Mb$?$b{6>onh~az${^C@_mFreC}p15H%CyERNI&CGNy#?w39Hqj}x> zXNNEvxDwYFN{iR=;8_6%U){;RYo^X&H#F&)$IJu~kqCW-YSl^0frC!5DZ`3U6>Jfl?@_tQu4V9EIm$@ND; zB;S&^j{hMQii#=pkj#)Qe$hAz8!OmtyO7PBNE%Cxv-+k zhOcI(WhMx!-U~brlk|~NCVX2?usoZ7#8aNcq&wfm=HU^s&33;*DOScK7MO^|{5|5r zB`Et{@)@ngA4l-*QBP$y5|Y@zI|O)U33`=ppL~w`fL-fo5KVpY_p%6)m%(En<7ChGO6lfF}z@g3BqM(!&`b@ZPfv297FP{iRwrL`a1v(Uw5MLkZYc33^;3#AZxX1~iCr|H^)|iLHw$-86 zaH+Vi&ZFE1eAg`ab+M2>?JuPjB zH<~obJMkalejb!aHMVLg0M{RGbTSEaLc=3YxB=tt7e^{ZbD`B9>urWL?c$kNSA5xP z56(^6>Bq0mhwBY-AJvUY%w;~RA^#u8+s@RDXDM#s|B%w0^M2hRliY2YyXXGE`BItC zv-Otr8t~yV+WkEl0_Zo~Q1)Ygo2nxuF#AgGKO}=>S9;VJ5qRFwH~f_zp(&F+_jnkOowZ@Qiy=8umhWD0{+x~*1aifx(-cg-?bPLZl_PZ$A_%h z6lR*${9^+x(q6E)TCDh9i7wx-@8Q8eFi(uy#RQcSgk4#X_tuAk-BCV2NYPvP3}UP> z>V0O34{1Uz*5R!H#TnAhX+w3M#3p_=Gxbnu?V0`BqcWpON(!&}CG2i@Yv6DQnNt34 z-;8pC`_mMGUPO=L+a%y$$|wUa%^0utQiRHeP$sk%u^}c)y^<`g9?>2r;Y&E8FoBH_ z+m;wuQR6wkV)%b+g(m;$63ILOQ7x(E5KYb=KjfXkG-L$sWjZ&Gto77THHy_7Em-f> zngHIUmf0;9UZW)0U~lVI;o-eK+tSVerc29R#_r3)feli)Y+L|;@kPWxQFfIL`C<>R zv_(zY3~a}ua#FRQA*4UOrm8~*Dl}Zmvdbp%!{k5nPfUW{6&4Spbz^21Hz7BblgY}W z#>X2qlVdjzFGDweh+-6?SsS7sH~||vbFK18-wHAC=U(biEGX^I`{2XbN0gTIw@YO1 z++fGp%uc6rZ^h1pN07MzlaQ=WCwd293pM`A$#b$N8M%RepZvu4jDIh_)4d!&1-O(8 z{)dYN%q)!+0~X!~uIeu}J5vF=I6v403c|$@g|V=>!8k9p5TNdbvs2Qqnu4#5LP|j} z+}o%5E|F)2d+}Y-vDFAfmZgI+*_maIbS-LH^=th3T7&N?W?X%!Ms?gMYtd{Mtxxi{ zc+m#`Jvy9I`%-XV-Ftv2vwG=ghcVj(_Ap~BxNA=T7`kB(FqmdG$+T# zq#C|rrFq%1>0gnQ7=m_bu$0}75_(H;iqJ(M|7$-Y-F$P^ZB{Z`vc7xB`TgtID=%2e zbB8*Nifvvcc-!zGNvRK6l*2h7wVMI$PRblGJ!a2(Z9*6G`ity?Yx1` z{(zU@E_xq#y@k_fWOrYp`r@hm?c;nGIA)3y$Um@C?cFDd|8Fx2A>xSXAP(^L+`i|pwv?Ue5V!pe%q-KR z?`WuNnk73w_w$h^7773D-vD2!s!e^83tZ{OcsAx=it6I{wu6w-akN0|8(TY*)vpU% zo93!5p-mFj^U5ymbjTlBk?ZA+D9BzxHuKLxmq7!(K#9+@&F{$#>$0)3lx3OXRYd5_XH?Al(2L-pQUtMLKL zAamUHomzT&AxijN@Ol)2GqwJi>JY7Ml@!;HtyV9E9fylL?ZB?2hSN!Hjb@Q^7WFWZ zgj5Z%$_KFh(&3%Q>y@9$*)qI4CXXx->wc;~Y2*IguED+EM-eweBZH15U3H^%(u33k z-z_T-=7s^XK}#2GM@6fh{Cm%x)i_0+#@V4b_Gf@IKa5Wxj>a#|Lrjq-m1z6NS6%FJ zWb-gKnLxk$U^R#D4f)dQ<)*%0Q1*Xy%`v5@ZCPU*DrDnGv{1D_@zagKHI?G#hb5+1 zk*t>U*cNx$mV1%0;#D<=#A1o~4Y+PuH~_30y1I?tar4es3zZv>N$A4b(YMjc6ZR<@ zUMV13T(8k75M?uB%rNxXBZ8Z-%4y^An4!6Ek_K03`B>kXH+&c&*e?Dv)W)Cf_$dMp zt~x+4f(`MOa;8gutEI0#+8WD@ye(m+KC0CeJsK}IT!eEi7B9{u*EH93f#Rd?q~UAv zQ<-~B=Aa*nmg4L?4yJs|$7-D3V{A!)h!vmL=ObTWgo!HV+*- zkn6O(){r;he=DRXU;U0&_RE%cM{zBy{W>T>QXqh0ZyrB9iayIgsMx`9tW=dY;q+Pz z+KuN)9vZs-A5u}ImRQo6e6nM7k|1^tT|FTx-n4xbz+BA{F~asxvh2N+J-#+y9}R2t4JQU z&~WQ`4{dFuuOE1bvGXNmJH#vbOepizpsmroV!CHcncJjaD?bd7`Pq0Dj^#u&}~R}d>+H=Tp(wTXMm|zh*!pTgM*%0+hisG zxb)CR4VE-{O$&7005F(~ayZj8>lt)OzJoh(5tUb8`v)tz!fxXuJguJnwwv%Uy$Bdn zaIZq1hkp<4p9OfbRm}K!e?k0&gDtvS;y_#N_`TR5+F`I~5FUT)zYFRk81LjA-Z5Df zaXQ`r0f>k!U}}}#Ta7R~C3dI<#Di4!_PNo0gZ`y_XHks*$>??NdqAZf72dt*CC<{V z(LJGqA6CrBE3>~#t~hbO$mu%jbF=8V7Ps@y$)2cK1?B@ zP=&iw)Kqr<%n1r9#St$5THu~~+%g-j0f4#1GBFf^)k_)o3mcRLQZMd3pLG27o#UyS z5(c}YyM>$K3H_${#|F}>-t0$^%V&|;MG1-~;$$?xl*Z{ZHg>R%G)~eRJmhy`ekx{k z48f>3Thy+D=>G6y^zR2M-XD19oy_O6bJlRsg%4W}bGc{q^FLI??!R78e-U~_IKA>P zvKrTm+A9HN8yoQmg6OWcy=n$KB}@mvd}IZlp|1j;S)Ou+B=@&{j9pE|x5aVoL^LzMl)5|7GI})z7&NDM-ytzXG7VBw4>=y?1Lt-`8CZtH z76hT^^D=|es|x#r*sz#4a>Ktlrlp*-1xYj)JE8|iK`JsZ>{)qh&)9R8x8yi3F6d*) z$+;@XlW0Uh{r~_C{;=W%So)_K>E0m|sul7TST(Tu1m=}WSs_}s%1v#1h|a>bq9(A5 zia-8VepoCUyC6Gfquj1ty!o}uFA( zXqeJ1zf#|qoKN*3LUcT`2wcUWh`K#THCQx2Qlaa*Af`VP`Iic;@VrinA`gCT23`aI zp+{hrO*14YPG0HB$dB@y@hFzA1@h^MHl%r#l9eptpR{@C(+L8vY~o1@d@R>lN2Q#^ zPj0{0cP{zs?skg&nPk`T>kjW?^r?1YKj-j{qd9t9>vI&c;^`|@2)DtP!!+9`F;AZj zB=cuX)@^TX>b{I6BPuK(^R5KKJTk*33;l?)%GMb3ZQ0}V5I}zG`UVH~uYf zUH?o`fq>Qxmj{b>zq-f8g{-S$+nr`ZXH8Ao%Th)>qryjP2@vg^S63Y0&c@oQWarai^(%4PGX8LY{F7%&eHKa9KNS~oS{8$Dw z!qc;FWhCF;?Z*~Ng5PDq20qZD8*|zDw%}v(xNOf=?!4g`7V;2B`)fhMjIhu-r|;4Y zHL@tDw{q+zP4NW%El=)O1)Xmn*aouDfkps{TWRwyrNomaV$4q`eB|y#rR2^Y*6UP< zasJEEHZrCg!fSf|g^WD0e_zesjf(QzHyyFE8sPGHzbN4tgh5Ra`(Dwg2}53?Prn`{zc-Z-j40fWY-gHP#p6P9hhGf?bB z8!I5ycc4XYRy?tnO}YM#TkFa?5nqGxJ>wF5J*Y<{AjKdQTI)c8_r_f{4E1l6F8) zb%dzk#Phl+=szTssVNOCYiHq0^kI4$LH{>}^xh*B9v8666>lhI zBsTJ+pVqW6F&;r&Eg2D#S_f&F3BIs!n5kIh`D@($i;0+BY}8FC7UgF(WNhT)w?#X^O~Yc+^_bh<<$5?d5n%J!!(sB>2a-+ zJc3lQ?RwY(tOC`mlZ5opL==iud3vh>mX{&ZFFh$i7UbX`6osrNcYcP$`8`+q!BqER z@!W#}?H*I#a{%b{|8DnoI`dekpn~z{pz0>l?b-` z(6>i5R39cL^f{WnU8-LrgQ`o{gycxek{(;`YnCXvxp@(XIv5s#8Y=QVJyRwblG^~$ z;s#|nY`1J1TEDQ*S$~>kuJ@8vj=X@&BFEkl{;g@mVfE7v-9C z>1&aEt)j9Q`CKyBh7w3#+=hyV$tbJ_Kwn77&i)<(pafi~wQl| zbnvi6ez6uGl%njV?4_W34W)8$6E0}e`0F){K7mI^w?rw(X~ac(c!)wOGN@|+yF&mw z7J{{dt-46KscV2xNbe(q#LNSPp)$&00K?kFg3<^=) zp*JHr^s+=yq$6F%#g*<43X4JEZ_pj%eXGkj*S@i_HVwH5s5&#_f_UCw=;O85Naz|@ zMWoVD`Ks**7?1lNA*{1K=|9q3j%pp23+=t|P8cgfY#k!4h?YQ!ODHKV zM$~D?;zdOCK{YZsyy5Q`A#tV>ohF?E2ziYz3hV}91S~i)K#{#puWyHN@L(!&0p6vN z1FklG#N&zU_#eIPAe#hYC^)IU6ueRnC~Pr@gc+a>{9<$Yj3N!}5WO`J-YEVTcaoL+ zslttR`#NhSclxB{j~Sn`gxAU6KI{swaVIUc3Wb!Fl2 zP9al+hv;!X)zfmw!LuQ{)4k2wS}}$I){_9X_Oou4;TGkAYUA3Ek4-hTAXS5Vz~!dW zOO-$SuH?$D?ozVSOKl)_5t83+4Rc4h#bV~@j!~p(m`fW4eerwNI7!|rC|R+qH@K( zf2!UJhoZ4wnJX!|7jCE|@V#8W1p0kCg83d}f;I2_EV(28?h58-tf|-nUVNOCFCrFj zXIMiObC;q|AcY-soy1z9{F4j~atff^Svb$D@VB{?32)u;-7*Z&9fa|B7Lg5BY~xGv z8rA3WmK=Lix8rw?#Iu%DSd9WuuQY!jTTsx#QqM?6bz)XaM(>UxT`)?#{=wwC3O6Cx zKMLQ}@4%8|MPRR}tYJ~@cvbC2;PfUAB3Hx@v{f7TsN6&rdz-eooXZ@n-E`(H(+Q2= zRbFs((z3lvYlN-8iS-|500<8=U77CO^(l6TQ3=j_1Q4jD?ZDQ-4#KJbA?@xgTH}Ac z91dlRv>b)1cyo$HzwE#K_6)*&oy%)9tGg^o@^*%cZWlo^yTkpZyxr=$inrNu0TIg0 zm(A6o*yhbdcIn>A%$bTYj-eKDM+v&bU04lgB7%V*CE7mMtWGsD9 zTP$BoUj>_KF4%y1&hDcV3%R-VrK$PX$~NmXG<17{0&tHp*G+pR$kKr9!{I|p_=cyX zYttA9!VjO4q)bI*vJR7I6C?l7r9VjSN=EnT0g3?`UZWb!(MXUhC5D`L!`xAVU!&>Q z4y4f*f_1+A>o~J^-DcGXRe>0?o+L&WLLL~IE$kYSxw(gxXNP*0NoYS#40N#I%;{=Zs!z;>zUxL6|i@2tFN(a_wT?KXZWJk*e$Ebsyr+{~WbqPiMQ z5%>|JELs0uW%5%K__Gf(C_@K-5Kz;-MP2`G1s2{l)zl>14E0f&_qe{AUIq8#Mv}Mt z1^U+;@iEb7czAM;;l^=bCfm`+N#91-G}&I8YW z1m9~sVr?x|Io-uY;XPK|k3N>TIMPq}5-s*ms62dnl+SmVr2`pv5Nw@;AGDVwW}-M^ z-1R^2t8t7S`&DLBT=9enYM`%~D+JhFRqP1&Nx!pS0NMIy90~w+%-iGi78z=Pf|TM{ zjaF7x6$#Lq)9b~wKJ-wBn;ejM9<`_4A<8pSU{`!x)um*K@6+$+h=#JyMsM1DgeUWeJe+a8HnZAiI zd5|J1eR0!+d3|)IO?!b@SDO~_IPr(W*hEoDe*hhhk*w^UC4%T?=-hMSWMJUQOgr1w z*jcdFY{M#gwsFNtDsW3i#GH4ISOob@o@wk`NV&n432}lmSUq}|Q>o8KB1~BVH^5`c z;RR_^a_@)H>CB334j_yuY#s2<7G^@+(rgmiz^NMgJ(FOFs2Vlvg8B3|h3mmj*t*Tw z|90ty382Ee=$Ua1GQP-{%35p`4-PlOshiP07Ho#r$70y7VJ9lrkGVi}|AY=3jD5sx zQ%6Kt>{)55;E51?%6U&eJZYE|V(@-qm`Q!pM{^UZ1$HyF8+4%(>Z)E_F*YR$W5c9} z$D%4f5b5@Yfq1E0jT6!jrHFmVI&DYP^|M)wj8~$-=KdlU4G>@d~IuO0v_1^P^QKm$DMR7S2<@A zVaa95*6^Xz?`TQ3-QmJl#6;E{Yp&lzq(o4q_9bOO#yqMaUKc5}r%TXVLr)^aq(H_^ zq2&Pg$72j@lchq5ri==E_kHfe*$zfa7+JB}r0j>ghkKYpXcI#aS;{yVlZWB9%gFnA za6|$fFn2H-4HDm?)lix+wQd8fR3{;2yhS4=AJ|OzqS~0@?9B^fj7;mzYJcDN0|y5w zta%9kIAs=z{)f51=a_gW=CA+(u`;vg&Tfeec^8o&5*Dfb|Y2Df1f z8BD$o99F_~Ro$Q*Nsaoh%^ZCV+%w6|7(wU!rl@`~yO;n`H;L9vBR;~pSEh>YTD4$- z_)pFf)5HObgPVCt;K+?&do!s=U*Hvw$xJ(kOSycWJ^X!wKFKmCfR(!PHr3ln1_^0v z;$1t@#sKdDt7i1Vkd@_TRJ()6T$ZN3b~OAe4Z@@nQ;iT5@1i_v^-5m>G?5RCO;unt z^hd6$%_SRCwCp+-#)|AKMsZJmt^4K|o3PpS`;Fr9;YwVgbH+iNB_Xo|%WrGuf~7}uY?4w$!WgR~EO zASCdo>e1PCHmQb7D2`QG>#esEJ{^LM3+cf8 zn|hael>v-t|JZBqCKIaOom?ckFr)p={U3rLQZ5c!G8XaAhX(6rZ%r)_D@382EV?Wq z{Zi6l*`gx>D!=>l{qDJ4|ECd7E8o;S5O#B{kWBYa4lvsAe%U|UhuQ3v3(&&pP6w8)Rgu+?Sryo~-VkTE^hYh<9eg{*u0 zt!z-l&&w0&G^1heJ?kh1KfC_=uu`Rm=XX8>QaLiUf7!@`Qx>nVwe8{XouoezB~}Oh zGlga3+dg26dU4rA?!A>lw6z)b*O9A-7^bcat&Yikdvx?#AWBbS5^tV2BN^p8B!m!W zN`2@fpAwz7ek7`fU{bkhZz7SrQg>E~Z*OJA_g1z%w6J2L>weYkGYVPqBz@3=R;ZY* z-gT<)sg<=;L-g_(8_JB_%Ba!}Dw+S)bTR{)f)?OfTv>R1@_6xhQ6A)GvB3rCDSm>? zHs0xVJXiPzj?nNt8Vy=66ejN~H8bHeP<$CJ>I{apG4M(`{vZaWYI(l*^bu+`* zcr_Ikgy%?2-u0*P3fj|uC1ECXd0$@+upAqB=FUQStH{q{med=4BlfSUe=l{FzHra= zKz#i^mlD*~fOaamZH^K&C1g(7ZdDq9b(ITe1Wmm+Tsx*Fx*~}U_>}L1t8@j*sFG`K zYWV9jv^7|<1+$$rWzhUXte=fACm~re0hCxEZVpb>U0J@OEFTavx*;7ym~d#l_oy=6$V2kd2>b7IW>&+knNMY|hKlKUWI%HWn6J z|BRHX?8Z>*FAE?Rz?eet^MH$$K9+VtdD*mjWq}#Kt6sD(+^dnfqAZLyMp_)`tQ%Vp z>00pn7Y`$I=Ipef#u_|(;^qMmXTGSa@( zqEs-FEd2R0A)SToK@y3TG4@++>78|~5I=el^3|^}m)i~nU5NGqkKnF6J}+OTd);ur z;5hflOM7WEA1ej==k$MofACzI9sEEVG)dF2(LWXbT=G4@YBLZ~!o1Qk=p9@>3wQEV zbARY1hwRn^NYWv9okfAzokquKYLSzJZ$4v7U-sgkfM`9^_zluc64@pR{C_}z%n58T zJKED|Q6&bZMZxc${qZ1O7-Iw$-|NXiaBv_5Gv0)g8_uMxv>`=4q z#1X*9GQ;Yzaz#6;^wgQIt*e<;3nUm((ow?rD{K7 zumPoi>QGEx;-0SlTUfTd|8b3^$fu)sYmEp~*&=p%N;~k`+KYv3l4EM{Q`wCnqIHCz8u(8lJ5RsH|l4rLhGLU9i)*Q@BbXF6~?%rI+}5c)NRKPVq?JPyf2D zSlINn5@BM($2GCh|Gp|pxl_~0!EKdhBRXmA@zf%{(CiS5gIfT6BU#)XYVRQK5*K{J zo0caE8FL$P9!zzm2g!ez2vQKUNOofv&_)HE0LJ#ecbEPjKWkSI(Istuk5{x(mPhcc6*HA( z)aDZbOMO-t$|c<`_@E-3$L@=odKA6;jb@xiC;Oi-+;XPJ)X|?jgTWv*ULtPlUxE1a zed)#&*+bET`c`x^MK;J&WDvJj5(jl>c)$g<5IWE0vN+pJc5v}djNA9z;}ETcW~Qdk zN63jCN8R7r3rVUlThY8X56nl3Ul^MCPjsjxSbe`7ipDOody9mC6ymms)*`i@)ZN9n z_52OrHMm{u+dJ`~4tF=DOt7mf3FFNw*+uNUGDV)`2z8db$iwXvT%|l^YjLj`W}yY+ ztb0UTiSFk7hhT~zq4WvCU3z`z`Y0jsP0jdcWI{46zQM}@RUP6aVh_V@ITtx9lVJ#& z2qz9A*#YUr^9*CNr7xphr#?#wb*A0T6bs^z*KyZJu?~_KDN_ZQ7g|d!)sk`*CPfCw zcJgt-)?kf?b6T9ftNc=NeFL`<7~Ho`)4E8ameZe2)gY(*YPvOC*k(%guFx$eSk?4_ zoP&+M&PfjJ9(=YZ-RNpv*?`w~Gqm zT=A*V?VLY76PUs|ekh+PGd=_*7(99#Wn|q<6m%2Ms&%F$0~#K;n!wsrXOQc=8?&Z> z2{Xr3#IbVqlk3+?bxM4Z$yac2)R9SDmBXJ3wU=oyy5)96f0SEx55ULS5 zOR`EN0%6T)*^nFIK{X>a$Dx(C{7I_pJzD({QB_v@MjBycuq8{5-|bBr`<(0J@6p$* zxL(-#Osk7(;hpH^^#hZNQS->MXPv6!h5v?H>#=LPS&>_@n8?^ryrJU zb*BE(e+cy@5e;1*0<H*?CP+V}Ei z2y_uW8%zWJVIp)9`rP9~jKerWgSl-4A-Gz1G^e311x5j`f2~^Ee6@YJzu`OK4Kdbm zqA)~A40!W2l&&56ve0n(Hmf{FYVNh$C=@6Us&G{Z)S#Zvs)6*HV#lXtxf_LTi^v8i zxmVy}{HCs{xwoHEUETZ1!pMc-kK$z|kMxM?nA#G_ZKz?ohpMo10R!i|B&S@m&tQVKVB=-U_708mSJVoRElDc*UJUv) ztB=5FF<s5*!mPq3F?8o8&I=a@$W3d+DFOT2z z-gQqgnfNpKqDs*GkuAxE=@EzBVJ>b{xjZs`bVj`4T0V{sGKm29N|Q2QK1y7Q$01is z1ue$WhX>H#-ahv>RS-T2@+K<^qvuycQet_m5M|0ZQ}Kq*`-FlA0JKD*&9lz-SC7W> z!b`t2(SHcmxMYq{VppTJ0G04#-crXCyY~}aKRC|=+{)@qj|Q9@ikpo6?Qa+_VMXKB z70j}&K{)X01gAI2$C@kQL2te>H@EGJ-%`9+j5iuylBY8x)sMfl=Ayx7CK4$m+R`iD z9sQFt<9kJkobxYtmlr#>T@|liuZRxuR(+)J<^IJ7Njau7WyL~JV4+tydHKip5tR<9 zO2o!6^&9=!O4Jf=n{adsJgKGSxSJr85qMPQ0OT>{u4)GrmGuAT{$&SJu34P5X5ZA> zIKgUvQ4gtxg8_!G%SD=@gfkw>H_m(N+!qSx1!v^?z-n7awewN#RwyMlL1w($kaO2B ziRZ<-0Eq?TPrv$G(|~2b!qhyZ3eyeIrf>m_rx z&jOA9sWYFin(3W4iRJuymi4J0(Co}5JF@_ApLdhlMKZ+4wLrp#Ps_W(pm27v#bqoH zN>;7{RxV4tLJM@{@@k0&P0eIo3jc(bugr*3Q|9PrRSg%o>kjU{*6ni#_FSW3L&KpV zq}{vXzC)P&Er^Vmca9aymDwWsKfP7*_VIx$G>CBw9xe#<-x6X8L(Rge`7})y_A}ao zni^q{vj-42mGSM(Ta%b-My<#1Ed{esZ{KWyrTX3q<_;S20fW8O@UWmD+3sxRfv()m z-a1uJUpcq(;HELxFlQk_h+9veK*m5vk~}3}idDQ7wo0)#wJcRazRQS>i5>FJyJi=% zlVq1NrTio2<^Z5k!W3O!FjcwkzgrsBo`{$hS{0SziMp9(5OZY5c?^ll$kv4e65|~Y#4SX zIpY@)N-vmjPyg|da8&ugCwNnnXoq{lwv}ZlA-q$AvM$eGwVt&d`0lh-t;v3UR;LkS zB$`33gYluY)+PQY3)1(OtQX}+j)B(Tvz{xYff)7lZfrIb7ple#7qjSu>NuOs} zqr;IYzrc1ugh@hTGQLUs=)D_6Xg4|bq|3BXD&lGoVpTlhW#?qI%Fq%~Roo82ELTQS zs-A5p!Hzv5#@F+Xm{}g}m1Ch%h~fNZ^TWzXXixX6Duo2UNtygjNS1TcvRvIqd3a9G zdKgd`*}8!z@8v>Se7p}kXNn}V^rF!%d?`0rjm* zLxM-m?q-VJHw<7T{t-U<%1Raa)?>9k_u)K~I%iR7x0sx|%!7QBDA>_um5`a*(d<_r z5v5U0E$6rAsOgF}Mp*T&^Syp&TU%Os;+QwbVE5z^WNp;$D3&pUVi^#PdoFCd2fZnk#Nu;F6kULsMx=~2z4(j zsMmwSF_GTTK<`zTpt$qE38Hz(3y1j0q8e~92-i-&>ZL#WX* z=1%ym(?rpIx?PJ;S0DjdvQm#}okA%S3bBwLVm;kTuRJe*!VQhr{QKHzuwzQwYXGPGsXmYqq2k+ zd&5$f3zzbB*@==QI8Dy3ORzJH8~zG<%{LsLT`2QaygUEfhl!Azm8hf)u|^h&q*P51 z_h5`q-N^sdq43qlc1P;#*_AO+-1ng~dbB_@y^Lhm^j3W;D5|*T(qS#|4SKKIhmb5{ zs^gfC{8LwJJDw4&@>2*mO|?NYg6a|07E5B{&lO-Q#Or$a9|FLBTe*kNqb1?XREutb z4t-!I%G8CzroQTspOc>O>r*6Diw>tSzF^XcPWunXJBf*nHlD^FtxUFk&Kf%2ZJWM4 z`}W~!t+WK;eFe}`?5b-WTg03FoC2u6D+NQul3lC324JkWuy8grRR{O~9!vRYl_oc} zHo?Sdj=blt+j4LXmQLPe=FWlrrT5oVQmUlYkYW_wv-gt>k>AQ+Zg2fmFeJSkKXgAV_h-1BzfeO&AwYRwl?;$f zurH-fcI~Z1hJf%E5TJZT+4RQswq1%Vi8xz|ob z;f|q#YC50iZdMUVVF8oRh~(y)SxmtZZY&K{IJT4N74^~d5_jNDbgq;4@n-WPE{NrX zn(A@2`lK}{QZPb-V#g7Py!vRVnuE6-3|ueHss`w%7sm+R9jfua;Zv!vulK(L>mP8g zvs|go56n7{@7K67R_L0V$wVtYDPL#O|6=)EG%6W# z>Dr_g^tvDuM~~(UdlaQw=Gf938r1O-PtjB!#S}X%>h&XxaFj(Wf%7x>LLgY2A$HD} zO5#dnHSq(L3e4svWuwRHDuOnorEDq!N3&55Vq)OkgUsx@GB#y+T~v8BA6#2f(Q!y3 z1Qg$wkem-bw1a`m9Zky&(93PZiLHNf?Uu?iE6TP`6&!r$pV;)+dkT5}5SoZ2T{iBD zb>v3wYZGn?yM9G#5J47Z%&e9fjdtrkSb3!a5T<*b^85a+NQ_8Om2a`4wo7kj|4nzp z^pyQRw<6i?hcm;VA7vA;RYH{_JbsLPxMNiutou2$;+^bud7-slOE|gLM+3i6o(E5G z;_}MXf{~vi=*?-bZ%(Cha}VI*w0Z^|=^D!0Z~BEZs-P_EmVNSlql7m2`2-$(($E>H zit>mm6derG$$a;z_xAZ(9NY8Ekh6#^=}wP5s9Z_9v?i@ANXf-|x4m=faOM?f{z~-? z_8Jb~m_&T~KSb~TmvQF*_PutWK)i_0Dq0?u?l0HAJR0-Tp3>4hafgtjyk^zXs2c3% z4QhAI%vYG`=f5;2NMQi5VknreJ?CcaZs+-~Qrz)ZV;XZDa@n2+%xh{qTDrCG3Jv*N zh-0M|(Hi~sUvLO6A9D|w1PQ_rGnRQ~L&|{gP@U?3<>D(Jz&_HDTxDN#uy{iNa`*=d zE`e_r!c?v?P9`yqzUSP`R4e0WI?=>QxLt%Uy#;vajsw4MWLhzl zZ2CNV6+Jt*@y3&pejrW!K2^eibTVbGqPdH5l9})v=$9G&CLmTS5$1cgb~k12JeQ%N z>4O|E`eqtD^e#l)97B~VYosf4D-XF?{bNfXU*-ccUZ$T{nS}9<_=Mp=yElHjR#xPA z|CU>C*(7hK9U2;$MC1}(*I&0)JF}9y zdtT6Ya8lL^u)+m+-&AQ*{t5%SoWNNddUA-@pUz<#EhmYYcT`hdLXi0O8R_wWx{9fF z*it6-CkD|wiTyM@EF8CylfMbs!6kP@97bkX2GvRiXVu8tY&bz4NbD-8QktXvu1Zs8 zGDXPw$r;7-ZT4L?GN7h5ah{H)yPWUOgIRVJj7FVhs1R!m0C%yTO^nI~Izu!`k`Sq{ z1BRu7=zzO4o>e_L>~_Hsv;baHWfT^^pR!^a_O`vTYD&a^a#mha3Dd9UNS-##ZB_i` zK2C5^c(UQDXFlCKcIZ7TB)hPyT>3aMn2=lWE z_m!+utTBTen`53^H(bR=cn}OH%8NC)D-3*brcx17)T~789#l~Dy9Vuo-9;uvM2Bd8 zPr0SvO8=^!Ik{&e!T+U=s>FgrS}GCy)m=t}iesv;@eJP^pe)d4p8>Vqu&FW5V&_N` zH-!b8#;s}XXyEm2MF9#Ig65I0 z|Mr0H7wU>H{k1onmmdLC#|=?H)I_fnp1T94A1jXZQpM4ZIW(_IN)-BS8qT zMCGMr9WB|@B8_`=SnRu4@s_((>=$>ToGOTLksz~gjfo~tZRn_fTh4wSrBG%H-zGMW z^7={PK%e$~ScQ+>h4N#x@7C013rAxI6zFsd9Xa>oE5k7sn%SFo+D$jdY3i(Ac{=nL z9?C(=Szf*OfqNsu9$U}hcG}0-gUy4}We0t|xHI@&_|6+a9L3Su`}WAiE5*E}-`u7N z`KYYRVf%RDqRGjz~n zwx-F?e}0*Cj9$SYffR30^hv=ZgwG?HG5hQ89dkWr=k@MoTk7*Bj$|Bdc3`cMP?unL zygSl6G>NQbQPM#rkwHmGhojNg+JzbLhAQ*oD-F-rN#e@Wz?{u0DK6>WR)JGcM^#&M zOp`@*T2|;&24=o277z_2_CcM7Xz#wr?Oucq9*G^1f*gOk!5+p3`ha=qaXs_d zFZqpZrYed1A<8pNwz?=4+m05ua@sMmM;j{6)O%_b}6rtoR z1k}Vi8~wg74*9)?KynTVqf^sRL}_1lHe#%E-_S)Zg955k_gkqDLuJ~ z|EDJ;{(H5a6Ju~&&F4PFin}Pt228rnBie#V8m^UWd5VM=)&~|tpr}&zFNK|W$9cHX z^1E2jT9#LquVq@3&V49AZ|?5l=#jkm>8aluorIW>!>nL){N01}{&ej}IM!JG87<{> zjo@l)W9vD`sG6tho>%exbl35o;pudN0i6{&1~FeV#cwARRK9AJk6*qCBSar$GFO!w z+8f!2@xE+VevSVe?aD`jjE|!gq@BAHntmi9bDwUJZPFV-h`vYjEzzHs;};FD$p^n{ z38F*iQ5Luso=v@$@X*i9Y{o0NFjjsGa$7Nx1_Y9G%ogB5Bo_wxu{ zUsSRL57Nr)9&$k>(K*>C~`U?M+ z-AhZVW7?o^H1RsBD_>jdvoQZE7dh*dj#N0d896k1OM-U#TT6cL0cEe*ZeJW9@%L92 zj?QfSwV2v%>6l=}d+DHr>~(owm_qsCzLs$ML>Ia7L~(`>fhV1o&UqVnqXmNjRU4c1 z6WMwQZ|cst4H4(c`=+>pN56dx+Mhh0#)bvq>*Gh$D+2=jgA5I!kuXBudQ0)Pw%>WS zNN0G1+hPY+9T=XMVv|gasrxs~wqX33X!U4YFRX=AhEQ-)Q!8ihw)@E7zg=3;(8slJ&fxUk#QA@kliY z-OrcG+vKVZJWc&!%7oig%+V}F(34tB8V-|nm*^1tVdBSZMMzH zl5U(HAz~oJNVFi=HaE-XIR1{Gw9-WVLL8} z1wr=)N>nK-SE&VipMLGimcmEFn&eU0i-?Q|pJ;|qraFU|wj$8Ju7BCg8V6edR61cSlT8;z~@9_9@x(VjK9M`A7! zdPqU>tg()ctVv4TLSZKo3E!P^D?>*{`AQYzt;2)E1#n~henof{MNamv^sey(@g<=A z_0DQD^}iypHoKG-5n}VabNnBNN{0fs#wN&(J=4zdM!!_Xpw1zt47Vi9#8Cq1On+OM zbG;Trty&guruu{l?9FJinbMVhIM<%*^wE!&I7%tYM(4tb<)xUlrbQB&=Ilf4RmLmJ z9h?n&8oRBxvllA_>3!uYdV6ZZ8}c;=t5w7 zxt(EwQ(!~b0!=@DlNs$+Yu!F3O+(;qYgXnaCo)kMPeyI_NOv}15p&aaD5UU%>(bdZ z`Q}`aJ?dKJiNFq>x~BVAvCiklwatq&I6h~n!o+CAIXxMY`xvV2`1?jcgDcv5JaJ`! z)L^fnvo$G^Tn*Xc0RyvE0vdO^FnXhrC1agEVwF&!N$XfDY_wL?q|NvDGp8*g8@IJj zw&uA>!fi^_tvSP14V+tQ3Q!}8*@ycx!pV!)!~_fDH5WK-Y~Uax@rH4IQh-d=sg~=EmACna-qzgxU0kQ`3S^S~w9)9Ga<%$lc2+>~R zn15{M?UK_ACf4Y_Hz_yo0fuTjeY<$Ws|MeLcW+_Z63W>UyM3` za2`{WMj|evv=ho))X)uzSikqXT~uE^xski_)yD=!SHv42{@YnZi_NskcI44iOL5=) zCW;?dSWfd)TQ#8;P?53{bo&F}_t@jFwOx~++mzA7Jb|=L#+#p1F<+1A!4Nj*gC{UH{mHZ*C-iOZyuiN2jyqH^<0EvgW?~|Gg@WP1OLj$t3LE z9~6T0kgc#g(C?)(b^ZQl1Z6tc;hDcO+{b0s^P8e;ZbN)*%<-|`PG;t2wYuMSo+l>G zXFJBsXzW|6XOGOQgy`7$Nks;^6MBA*ywN&vA&sWu$+3t8`G%SvMOD}qG64c%tKD$Y z?V3&^R`!c>bat5Fb}Ny*jzR$$z&l^NDTnz5n8zmY+~~*@*!fUXUHP;72%7ll@doeM zkz~u~!)3H$Jb*Xh+`(_zPkNOYag)Ln&{q^|NhcNEvi+kiD=6LD)OmBPrqtoo0TIH^ z{}8fsX4sET&-LwZhS@Quw3VdZz8|_he&ry059S|Xyhb=^a?1>J7{ocH_Hp{Qiz_2K zv@`c0a3{+fD=D6HGgXCqXP%v_R2UW9HkY$a8`F+}F3LciGt(JU`{?3Q{)$k4Rn=a{ z^e-pED}#+FNkHRWRjidsz)RiMgY^z*=H1;BEEF$v2`6o8FBaZ@b8=(u@ro9XOAs`f zLoD#VuCriktniAf90NfYn!PUKoGu4KLza=jV^EsZ5k-ov6g$X>sd;H*`gd->dWF+5 z-$hXp4A%JjSfT!Vr2&d>%}JN>4?K!);)UcUGUy(ZRt*9KO}jBhV{o1-pZvW<^_cTK zGIj1C03;_E3=6H9>~HH*Y111pyYt_;D-DX-F}e-qiVq?(WM?%~|Le>;cEkL-X1~17 zvuG-ixl(87jxpkqwJ4t&ISRhS+@>#EDnQV%TOk0=`}Ic`e%Q~DlA3-J~I(-GvNM@ESt zN}X0|!9*Uvuzu_EFcetxaN@!x*kzUhH(KB>81F^$psd0^i~qDIrB{)QB53{2=6aBZ z__=wiq>uYc zy30y_oIT!9?PZ%5#GszzwL3AS&A>{;2p%whBQg(=nGWK3j6Emo`qNId1=KK~bEdL< z5df_Owu3I|vJLs5B;-Gy19O3C<uUhW3KD+ZyXRop;S^)19_U?(o zeuR0KDtFrWrS3({F=9m&niyfinT`x22gpTntJ&zY(SfciU}D0~-~EK6sYUx~*$-sw zYxhb>GI6;WDTVY(1IkpsOu6@d3@qMQ9rJl$L#^M@?RtzWb&9xb0!x^2))VxG8jSud zL>xidRH|(5hEE48rr zxh4h~EUQD+=8-_p6l-huITgXgf%86|pZj1H{d(VLD(f97okP;R`3ExN4^%5Smcr@FXBT>FV*j5cE!}5ocMQP`Yf1^ z;LC*!_A#8t`OQ$%e)-{Uc^ZDvVOUlVx($Ow>4{+w$HHr&&kpEYTasJ}y+DkTW*zUY zr&fq6HHJu4q%n`2YmoSSNekq1meZE6(xe)(n{*hN+WjLcs`8+(Xg2w*Wh_s_@ZMVf zFrnNoKlVEeQY06hDTdP9iDo3tbgC^_R+I<`%>fD$2USKEvSFel#sy_9q~0gc;WGY^ z)0H###b8%lW0#>A?!JfyBMiuCC%QMa`Pp~r2&_Wjt!OB-&&MvGF#?OP^{;nj@VtM@ z4QhzHk0nIgKyXAQD;`vbvauFl9WP1l(I2xM;eI!G--*YTUhP(czvw9^osU2l-se2P z!h-%^qp>C>?A9=am1uXk!l9ymIzbrx{Xy7vTc^q510(RqZnG#UY0t{19;9fb?j!TR zJaBsM@8`ReQ@j^WpJ4g6KVMs~6k{5Ah&a6oq;glBzwhW@31BuswUr$yuU4>{tzy?! zI5=O5*H!plNKs+fPG7&a=we1*BwuTL`!R2@`)!Qe>bJsON*d``YLg_9YAauNPhXgK zsDQ0X_qe!b1|@w}SaR)nih%$NLL1;kwR3MnsA1yN9KD)?eHOa?!V!xN4gZr{d6oNv z8%0X=7%K@LxY9J8juvA)m979{lKctk_&$~hLK#YxbYJFTogRC52dMg%=G{;iSO)U7 zbZJ1>a^{vxXineyY(yqE<~-9|KL7q9jetRU?}w@T*N?q1@)E~^zjirj^_IMy=>B}zH8tK)uVYLUUt0y&TIk7y8XsWxeEonvto_3` zqmB{LVLp*KWkTQPC$@!^31h$yL{N)I=F_{6lF;U6y;G#KSdWj6$P3z978 zA-GMv-}yrhB@?wgP!7RM#C&pO+NjnhUnn8I4D|BiS`26tGRV=SF_GFe(O&2@inU5- zqo(xYDz&NCPwE7n_Mx0WVqJCyL}7XawE&9Q+@G8Z>k>!B$CsDve??!iGB$kMj4g zVf2~G!)aVe8?GPmE>dQZ?n&Q@lOrdJs$)!{ZGKFFccO(!f^p`pt6f9ELT@Gt&@EGb ztz=clRSHL6e2F2>MfZWMXyhQ1uonx3)ZZgF#VdLwH(aUTg3Ml(W(#HlbbSF%x(xmo zu+;7qq>tSw-iZ5*-yycPY|yY=npqco$15y~rcmsku%cueOsfd3D6xv5V|~sta%Y8f zlYT}~M$Ff`$F^X`PG<04+PJbezx6q`Qo>&}cz?YO9vSrGr|{_F!(R3$-}DkvS!J$S zU{QYS$^J8!v{7ymoA~;Sza-pMv=C?h$kOy^J2`i>aL?}mApL5~k4RZ0mu=jt!C2mJ zU8=B&>?vi88lPVCBI{$N5*+@_oM%yL=)glYJHqLz{u}s>smJ(lwsM4f&vF$oevG?FKmfeVIC;&2avk zgnsAyE9NCq<&yQ~p;8|mWk^;`RtT8sUx7u;Olfj5h)BCk1voaotS)T*rb53Y?V4wdH({ zQcuwLNv4<(f6?gk=%!!uHcI)G1)Kv3l{4Pjrr=T zh?UDV=;!8L@lpwV97V)Xcxz2wjYh3zfTX8w{M!{HM7>{|G+v35D6RS5zTx8gwx3KM zn|pQId3ryZir}J==Z^K=Q4sWHw11%2^H_deeY(EO%iC^I_X&`<+1iaIPLP-kT)LYQ zIm(7fiY?eI!M=3_dx=5Kc-cR?^#3snR#}`MJW73C=IrL(jmJPL7T-NUda~mU0WTa$ zIWyq%NYHTgZGKgd*Nv_FL_E8(31Bp-ul1E zs{aqaqg42hR6Xi+@-HwmmQ9&>e?D)1m~fJ@?C9;GFFc52)>B%_mY(OwqlD82JU_-K zywy-Hipv8quFsPoRd%zp&yK@tpy_8S!sL_>AMyLxt)%%Cjy22>hh$L$hd%%PTKWqi z#NF>5`jpOEQ0nfBz64Yl9YJ|tMhpcqhqH#zjl-^0YK|D{4?@drPVx^9E`h3a0dOx? z7o_x?vi0XBQ%3wZU@tjOSs%jedai9~zwKFBEw<3&NGgl$dJ7&0V|ViUeBa_UY-c=lkC5?R+gASnv;lqI-A^=Y%VS9 z?WzMq1m?i&>gUfO=+_O{HOR&LfjMVn<5o<~+(fMC^kGqhX>Y#IIbpP_oq@&+UvO=V z9N}I0lZn?5{hz%Su$h#>Y)J~~O7pEmva`t3a(M?45y#ik)kJUq2NzY?`Gw8m9e$)T z*IDC#2xB(aO6;&$slT}o@Bc%{O`O`(WgjS`_ZfzJmi@f+i10v3<^Yz*q(Jk_i-FoB zGt%V2Lr4=OWBeNiUJhq@>*ggE#W4h_YfBD3f34)OCu5C|Y>(yp(C)*o8XLYoQZ5cMPot~ksulS_bnFcZ_G z!!5f8upMphNNqn?vw$-RX&*A3>~Li+=JOYz4eJ(2`yLZsL_z3v;V%w4KlRvRIXzbH z&~cHU9s`O2O7iZe`mvsv7My`8qrwDhpX{gUqZ&3Es`ZSPH0TQ~hi;k0*!6N1N;u ztu`%6|G#vv7x3~(!3l5GgP;MA_m3{G~Tvd-XmTF8^3f{WFw* zV#fRTmFAM=K01AY!}bboK-DF@^L^}*-lJNH0qhZ~d6isa$%OsLanO@#ju)(^5B6UVg1S_j4=lC|A+aSH4kMP!WP89_}u3okQz7 zlV3Npw~aF*s_hbX(|wDQ0aQ^E;L*L^gb`6&TTA8L#F72{D3ny|=vc|83;0T**orY> z5>;J!WGAN@cNSVDMb~ODgyi<{K=yn!YcBiuPz39%^hkcP^!j8 zj7$Y&Ud`>^tg{){1}K z4BU#2Go2Bj0MZYxh^`flkN(V$=0z`HmcsjtKfXLP5%SR^Q$OidtSua!Mz{U@WUNv@ zt0~kMNXmPmDI{8rWG}R z*~VuNR>NI!8PN5*uD50X?jN;Wpm>n!j-xXy+O{e{>56>f!m+$K*vQ`HU@bGBqifGEr(=S`xLDXa;tYjFIv zt$P)BO61W{b^0Cvvcq))I2T1F;Q$GY z2nvRP@N;-cH`9L@#oX2hr_264KdmWQtEm~&Ntg*>eqevlAO9=clCJp8ARcX8D5UbK zXP2rl`hEY+KjgQ;Y>7w5!r&f>bHV=*h>unVp=cqUYysNS?Y#FuB(;<;bysF>TWWm! zz5WwHw-HYLLJ#^W?-uB} z)+C@k%+>K;@eQMGYwmI21v9|*(VXgM$Hs5Ee+3c*Wd>?)JIOQar0w!sSJo#qKUwO< zK*csHGX5LQ~tZOG16q8%!>CvT05(+D7digqoAaS^hgN|-JQ}f45f4q zAPvI|B`KkZ5|RT$J0LK0NOvhICEX02(w)ycf57*hyz4#KC;MQpwfDiju6?iT`rX+z z4dz7*+7p+?G`{j~&F(D8b7HaDD%wu<+^Bcy!b3IIKb~~(0{xX#)_-1`@H|z*rJYRK zb~5^{i4T733D4xcDd5Mp-0soyZpyQ7aCo!DBh6e!O_k z?08kmsFet(w0;{P;V-gc3NHW^?TH|eQesbsHtXQ%GW70IRwNuF?Rp`JHr%bZj{MgW0Fc`i@)+_ zq%CqbO!t5Y0ztmDg-T3U^$A!vpRR-Ad5JDLM?<68$@(vRZ9X&GEfDqLd77tX1!{&% zM3H98^>b0kay9>f`a1Ko&(oyl0k9;-mGHnwUfl(qdCL`q7;w5)iJNjBKDIw?4iVTp zmn|MoDhAe#I-P4^eJ79$N5dlLIpcUSVlf(Fv*l*>L*6l-l-Sox=G`o`Yfof*k`=aZ z!H$pgqbBcOe_&7XXKTgG9xx0>M;JL1rAmMwVi@XY2NnOiA>Nw+>}!19oLBaufA|%z zX(F8LV;%!kw8@f9HU_hMS@TNZ`8MT-;CZ(?r=FuSIyKRQnr)6I;TJOm3pcFv6945?X`^l`u3!2WnggWUG&bEgvewQBXCagle?{}`s`NpDsNgC zI5JeD*^WX6p5j5Sv8!<%>~OgN>7S*y=De4=ia*)@R`{C`aT=hc8kP7e<~JeZa<4l- z+Mg|ks7l#`%pGNSG1%;N#L>+Ig?X+omDT*L96lrcVLL|&)Ff2;WNV*54;=nCtXY4E z+a8{(s=?2|+CG5Xxp#qq`{&Fn_!x`)&pzyI{YX0szT0`3pKPo0Ld#efOc|D2MLV0W zo&;0)NAIcQQ~K?d1=C*;w(^jMdj}N^6wE8tiNHjLS$x>3pBR16j3nI;18=DkWv)sv zgP&dREzd{%TX9GScDHpBb4CJ<5~{*&BhzJnG9MX)7I@+O$=$gZC%*R?rW**UUNr3| z@o249{UEESN@-6`nJ%Gy!sQ!Lnmo?^1*bR+*uv)YE3tsC-6IdtcF}qsuy=aSZh#ig zr~(%p`fAI!RtQ`%(fV2SN>7Ie^Enl$H&m;i$1(arbtiKoEJGYiZ{!LJ%|!JrRfQpz zfSP^#rJL0y-S@0%2=>#^!@WTJ6=F=dJazCZKM{jC8ac+}tcaR%(!`*P3~+|=Pw64V z%x=NuKJ|}~l|sIp){!)NUpf>kYl7v65+`FHCh`IVd(_zGZ=<%!vVAdN%ra8+if!sy zS!`y>288E3uX(kNI10R2^|UepzzsFTF3j_D>-Q0Vp-tsK{{Bj*Y< z@>tn`$z@V%#zvSC{fQ+1^!8sns`-np+YQ|%GLXW7v~yicr@N-B@ciy;`@06bG|4wE7};=a^ z3wV9iFNF_SFRJ){QS$s-SK)TC_`RWeXF=J%TYJ+qL@Ed^?e(D-IeX?jdv|eLD|ntd zGx(1Nt|NI#PulVxuxz_Pp9#fsY?-CxT-}%kJz}>1h@hnFqz?QTpl(J_GB}sltrJjf zxxn+j!ybQ*Xonp(>IZ6`24-G;R(t@HU#QF((;Ktn;`#Z1wdroP-;wS31! z_&;55Tt#GUClW?<3~s#M8h(3ob^Uos)#`S8|Dk5B>D(%nF#*cW|4o{jIqF&JxfGs9 z=K1-4`sU+(8v}d*)+y?7))OSPR)Ob*rXOr_4kuEy_R1uCE^Ez{L{AdECTrvueNH6R7203Zf8MmBlQk{48d(kbCKf|wB=B6h zkb&+uS^08;pQFOp*K~@wp1CwBT9tJuJze4v-dJ7Dcxv`)bZCJRjb8Dh1}ed0W6`R9 zZIn()qGhAl@}Dd!2GbU&GskE_6Z*WUlfh5&0m;@zGP>*|>%H}Cdb+*fe)5><^DKRK>4{Tjt(O z#4%l$oJ}9WLitB)GLK9DIT5o5vE%0a@&vl091d(&O2c z*gqUFg>+b<1gQ#tzWZRZS&tlfn=Ehp8F8ZsB^IrQF5mW z``33OJ?sr%r%qH&@&sbSkEFR?THu{ay~?_blg-Lt09l4d-REE}r>{*QSLufreOCee z6|j-0c6;nse90{1wd;-mBRU&n263K#EG)iDZoUWsR?Z{PIuH}a=MGxA`V?kK)51kz zT^QpU>-?$j6EFA6yGIja%sgXv-D}UQH?5`zZ|gyA({jv#;(xt)zMO83o|SPJAcr&m z^8MOlWE?FoNX5dBFZZ_qHtE-)$~@)~EA_&i?@ys-W_e8p#x{yCP;5R6GcO`8@!t+g zu(Gmxeuh5&!?Kk9T;x8pmI1nQ5m{t`0p*Xpb4Hu^*_^M%Zk{c;i2I~aN7zkx^Au9S zlb^{dlYV#2Um{o$5$3^gr=DSG*ngB;Df4=w^J2K?RLQ)EMA78xmjJsM*)7}#7KND@ zutgA@hB(|r!!y^ZZXHcG#{;#}-mllWC#5%Z835>NL)gn+>m|Y@pf`)o_LYl6MUt@w zk-yEJ*d%JOT30bA0PF<90X#`I9f>C*Py9OnG}b`B`7cT7aiO)bzo$)$wv|nmz%CpG zdk`O^1ed8WPL{qC%!pE$1?0iY18g(88fOuWRyRl^L)@k-M!!?$pR$KETRmsc7#RMf z;z7+C+=VnvmO?4mNO0A=lt^DYX*{N{DISU~efBc3WPr9FxU}T#y$@TO=PwZFF7IPxwx)_EKV#x!HqBzI zM27;7`PX9JB}--vswxnWB7QFXu!SMu7+lQ=rQRm>4x?`iwfj^x^GD2!>w+M^6%|nA zAKot({k-LY-KGFD$xdT=IpQ!Yx#<@90+-tv_G!Nk9+5+Dg;GQ0l$f&$vq-$6A^G>= zgi0&5GU@m?nqtM%poaidpgrLm5E@mqw}VkqZvk81IL`HbF>7Z1@S@CRq(9YuQ`uXip_!HY-$t-Y-3biuye~@P9!?&J^4*K)c42iHCjaj@X`A# z=_ebf4+UcF%PCE0*J7zSm4xaY2%HOJzAHf@+KZO?W3L8Xf*}}zX=q?^~ z{ATm&j#c(HpUd$SJ<@AdWS*V)sQ5}^P{Eq6!T~2ZC3vW*Y@&_kCvOiFjo^Ke#Xck0 z_gFky?TYSAGSC#~*+wF>VPfB(V%1d_ecx)>Ykik~tER)Hdsmp*jv9DA%}d@O#Sy-E zQ#~RN4SDKfN!Cz(d>=+;?lKp&f!=McB1;jP?c<+tVRS+;RaMWmXW*fqj3?DcXb^wh z4ulk$`(<4C9C>#SCowl)F&}$kG_b`YMoD4k)cq`NQK)2Hi>iJRpk#!iYGAgTWbP9} z1nt}J(4lfx{9~NN?YZ}%aHxk|+h0xJch5~T*dq{J{eHBp?eFlkSwLHgbS1CZsnYi| zo_EQhs5|(3dToa+4NzTd_EMDB*xukG{Q5v9pjU*Z~%>2=)+tcTk?A zt^Ng));>P}@S=iD!I1Eo%!u<3tJGhFjOG}@is2zE`58Z7$OO`8T%#HJJBioEDbeQx zm{SFZo?P=;tNLmi1rrU?s52&=upocVc^b-Ep4CcSdX|)^?UG&><8roW)hsLkfe;ID zxM@YNdxa7J;JFpKj2;FxwJhs`ojpQ`Y)7dMK>usCr&5v((21NY(^d12J6=>}36SOM z>L2zqxYSQT$%=Fz<*e~Ohq`;bc_4$9$x4^~(poV|_{k#AW>Au1&lc;mR&2d)XPhND zkft#bl3v{&BHytS#^%syHg!|NpJ6#~a^m(v#dgwx1KZpu^la$lBYMh;7uQmjNipnS zD*Z3e{+$?dG9@!A_M#ix@C)&|ZTcu3sd(NA*s<{TuSFIQ`D3s{JirgR<~$cfoHK&io0F=7PzrkwsZ73gV0;hLmuk(EHqlCi2lXj*k>OoQ$&k;uQORMYufcQZzwd6p?73lUn>il-rZl1YyyafO&0YTjrAO0lNMDR`Tlb$kt z>9aAH(p13A5Zw^vfSthbisk zsQ7QhX^-#omW`=7!K`DZrX&M*2HRhSgzUdRj{F7O<$(;J!G3Li9e znIZ!c*~efZX6Z&Hw;eh`vBBB*Qk^^VQ4?%hQG~cSB&5rB54KX5XM3i}m>>|WE10&- zf@G`V5K=R>)I6bs zBXepPD-IKCO%oJohn21EwxTy04sd#)sYjTbZwd3S)-VHm=hvWh9=#om^L0l2@@YRE zt~II^EViRK$91VA?_IlhO`}N=Xirddx&!$m8w!G^u5I zRJc5&0COkWal~lkPcfq(eZ^nIBg>|UKCmDUm%N)80#i)5O zavwixNp_40%jha<>|Ju$@)3{y=#Dc#`o$j{3PzLs_Bk;i|D6z<@xifP!Rx#nt3Q!? z3hV2~vFa8_l7}YLx@woqCgg%T5`tS$#_Uk3tFw^D}3m z!-rXm1g?Afz61XYkp6tw>(jQ!4HoK#XMEh2wTzh?>6Vz=Xv->ch8@YTs-(Qs$+hLX zuPaU0l6GhiT_bVk;eqg{9us~KY`7GW6E)3awdqH`ps;`s{PRo|m*AM4JJ$!$gcYtVSRh)z+t{op3(spepdzBy zJdzi&s(>){Tk947=7!^|Ejt6EpAvA$Qw{9y#+_%TGb85Z({Qj*oR7>3BM{A7k@KOHDbxOH#0v_ zk_C3Ru-PwLV9TeCX+NCi)Hs57Ij7%FIuhg<)XkJWeOv+}Dg6<={8ZIgUSJ7u0Nr!1 z)b03#b$_II>9!mDb>`pw8w(C671t|@E#i_nl|kFv4{;5+@m$s?Oy;8<>QV#;hz7%W zJ_~0yHEW3o;rYkWM`*c92y*6KyFPW>kz)>G89m!xT&wgC>}$#X28`}Z)9~-XYGl?0 zjT+u+&GnB}A_s&>QL@@xjko?Ng~VWWnY#usoB+TUdmuR5+4s0*&7c7#V*wm1zm1rU zYQ}B0kyFIy_T6aH69m5=_USY#3Qv0Zp{C}+*xpSJ^8@MfHcJ|6GB?Se*jjv4eegCrf4*6xJxl zlIC7L220pY3S6r6ET=f-`NZd`8OgcY;V1n_n0V2^Dx`oBe7i7RVbql9zTtAff4Ti{ zY(6LC(|WOn3>f?3`RzwK=BxDS?*|I5E4B2VqzYGo1qD8Tb9nAlJOdLdCQd^2HJR3*|8Sb00CdqYCoip#$^mkvhwh2mA^--& z3N5|AYa~@Qca8E3K_g#xBBt+YZO7~_5^AW`)!kjwa(-Zf>QJ8p%JuqK>!@dIt8<=S zh*^(FP{9=3FY8_Qv=lj~y@M{T+iT2eEeoy)GD`OkI@F(b6$8NdEQvGH$faI#@$+pp zrV~ogfT_CRh*t(Dt53|H>V~Nos3c2qzt#y6(U~;0w4N4pd#cxI|CHm~Gj~29d1TKl z^E)`B>icUw5$11nZ))ovx+dUlBLtb6aEXY&q`dqb`ahow1PqaQ^6W3=F_hvqxI(N9 z+v}@pPvArVtoo$~sww@n#PBMp_t`-{J{A}L{vS?@l&4vlA)N5F0WxcmX6imZt3tc%rH}h@U)R?Ls zFT0%1vHCR2qa)N*eLn(a6p_$IZS3Z4!U zy7nk~Dt>*rOqK4(x+d}IoETzrt3B}^6Z`a?T!O!NZV0-!+&N*v96z=*Cl;a;q_3>j z3?;m)1P;-YHAHtTNtC!qG^yn}G`-)Pv6w4)6Betqk*%k;gHSV+naAdjB*-XnEp$GY z?_!RsMd^Soetaq2mLy1)+E_XTM1DO?dWg;(MA0js5#+OJXOE0N0QAj0oyqjNNOeJk zi|$NCvn;j&=I-SlGIBLuT)1J%of(ixv=mtBb!q|ovB+8W^ZiNfP;N3@ zWKDhx$nyj{)20yu53p1Ruu+Dm)iaF*GYm*yds_2uBuHZ%&ZQ=1s{`kPaS$}h?4BoL z`@c<$S*1HrXE!fj`r_^W4Gj^x)J!!EoK=;1T&mk;R^hJ}*xA%Q!0`@iM=qiwkUhyb zT2N{hQIacdI>wbdJJjFs-QbAG+qK_f0=qn589ZJ0qHx#79v$?MW zvPXjHZ0+qH8ERBZl5`Z}d*I_Cz!v=OI2Ga{6xYG(d+A2aJ2y3cAzd@92`tUwgYn8i zycAmf^?WSilQ<;Y++9>^mr}Qb;0mS+x8tm#Y|HiM=}zmCIQCQ9+?5^lUx3eTg0%g< k_+Ry*b;WX$SoUX2D#mXXkHs9VbE0^X{(rCr{QqYE2f^dev;Y7A diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_image_generator.py b/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_image_generator.py deleted file mode 100644 index a2df14d87..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_image_generator.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import glob -import random -from enum import Enum, auto -from pathlib import Path -from typing import Optional - -from genai_perf import utils -from PIL import Image - - -class ImageFormat(Enum): - PNG = auto() - JPEG = auto() - - -class SyntheticImageGenerator: - """A simple synthetic image generator that generates multiple synthetic - images from the source images. - """ - - @classmethod - def create_synthetic_image( - cls, - image_width_mean: int, - image_width_stddev: int, - image_height_mean: int, - image_height_stddev: int, - image_format: Optional[ImageFormat] = None, - ) -> str: - """Generate base64 encoded synthetic image using the source images.""" - if image_format is None: - image_format = random.choice(list(ImageFormat)) - width = cls._sample_random_positive_integer( - image_width_mean, image_width_stddev - ) - height = cls._sample_random_positive_integer( - image_height_mean, image_height_stddev - ) - - image = cls._sample_source_image() - image = image.resize(size=(width, height)) - - img_base64 = utils.encode_image(image, image_format.name) - return f"data:image/{image_format.name.lower()};base64,{img_base64}" - - @classmethod - def _sample_source_image(cls): - """Sample one image among the source images.""" - filepath = Path(__file__).parent.resolve() / "source_images" / "*" - filenames = glob.glob(str(filepath)) - return Image.open(random.choice(filenames)) - - @classmethod - def _sample_random_positive_integer(cls, mean: int, stddev: int) -> int: - n = int(abs(random.gauss(mean, stddev))) - return n if n != 0 else 1 # avoid zero diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_prompt_generator.py b/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_prompt_generator.py deleted file mode 100644 index 68b77fdc4..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/llm_inputs/synthetic_prompt_generator.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools -import math -import pathlib -import random -import re -from typing import List - -from genai_perf.tokenizer import Tokenizer - - -class SyntheticPromptGenerator: - @classmethod - def create_synthetic_prompt( - cls, - tokenizer: Tokenizer, - prompt_tokens_mean: int = 550, - prompt_tokens_stddev: int = 250, - ) -> str: - """ - Generate a prompt that randomly samples lines from - Washington's farewell address at farewell.txt. - - Args: - prompt_tokens_mean: - The mean length of the prompt to generate - prompt_tokens_stddev: - The standard deviation of the length of the prompt to generate - - Returns: - The prompt. - """ - - num_prompt_tokens = SyntheticPromptGenerator._sample_random_positive_int( - prompt_tokens_mean, prompt_tokens_stddev - ) - - farewell_lines = SyntheticPromptGenerator._create_farewell_lines() - prompt = SyntheticPromptGenerator._create_prompt_from_lines( - num_prompt_tokens, farewell_lines, tokenizer - ) - - return prompt - - @classmethod - def _create_farewell_lines(cls) -> List[str]: - farewell_path = pathlib.Path(__file__).parent.resolve() / "farewell.txt" - with open(farewell_path, "r") as f: - farewell_lines = f.readlines() - random.shuffle(farewell_lines) - - return farewell_lines - - @classmethod - def _create_prompt_from_lines( - cls, - requested_prompt_tokens: int, - source_lines: List[str], - tokenizer: Tokenizer, - ) -> str: - get_token_length = lambda text: len(tokenizer.encode(text)) - - line_iterator = itertools.cycle(source_lines) - - def word_generator(): - while True: - next_line = next(line_iterator) - words = re.split("[ \n]+", next_line) - for word in words: - yield word - - word_iterator = word_generator() - - # Fast add lines - remaining_tokens = requested_prompt_tokens - prompt = "" - num_tokens_in_avg_line = get_token_length(source_lines[0] + source_lines[1]) / 2 - num_lines_to_add_fast = math.floor( - 0.5 * requested_prompt_tokens / num_tokens_in_avg_line - ) - while num_lines_to_add_fast: - for _ in range(num_lines_to_add_fast): - next_line = next(line_iterator) - prompt = prompt + next_line - - curr_tokens = get_token_length(prompt) - remaining_tokens = requested_prompt_tokens - curr_tokens - num_lines_to_add_fast = math.floor( - 0.5 * remaining_tokens / num_tokens_in_avg_line - ) - - # Fast add words - final_line = "" - while get_token_length(final_line) < remaining_tokens - 3: - next_word = next(word_iterator) - final_line += next_word + " " - prompt += final_line - - # Final tweaks - diff = requested_prompt_tokens - get_token_length(prompt) - for _ in range(diff): - prompt = "hi " + prompt - - return prompt - - @classmethod - def _sample_random_positive_int(cls, mean: int, stddev: int) -> int: - random_pos_int = -1 - while random_pos_int <= 0: - random_pos_int = int(random.gauss(mean, stddev)) - - return random_pos_int diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/logging.py b/src/c++/perf_analyzer/genai-perf/genai_perf/logging.py deleted file mode 100644 index f5cab490a..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/logging.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import logging -import logging.config - -DEFAULT_LOG_FORMAT = "%(asctime)s [%(levelname)s] %(name)s:%(lineno)s - %(message)s" -DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M" - - -def init_logging() -> None: - LOGGING_CONFIG = { - "version": 1, - "disable_existing_loggers": False, - "formatters": { - "standard": { - "format": DEFAULT_LOG_FORMAT, - "datefmt": DEFAULT_DATE_FORMAT, - }, - }, - "handlers": { - "console": { - "level": "INFO", - "formatter": "standard", - "class": "logging.StreamHandler", - "stream": "ext://sys.stdout", # Default is stderr - }, - }, - "loggers": { - "": { # root logger - avoid using - "handlers": ["console"], - "level": "WARNING", - "propagate": False, - }, - "__main__": { # if __name__ == '__main__' - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - "genai_perf.parser": { # must use module name for loggers - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - "genai_perf.wrapper": { - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - "genai_perf.plots.plot_config_parser": { - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - "genai_perf.plots.plot_manager": { - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - "genai_perf.export_data.json_exporter": { - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - "genai_perf.export_data.csv_exporter": { - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - }, - } - logging.config.dictConfig(LOGGING_CONFIG) - - -def getLogger(name): - return logging.getLogger(name) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/main.py b/src/c++/perf_analyzer/genai-perf/genai_perf/main.py deleted file mode 100755 index 9ff7b5b9a..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/main.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import sys -import traceback -from argparse import Namespace -from pathlib import Path - -import genai_perf.logging as logging -from genai_perf import parser -from genai_perf.exceptions import GenAIPerfException -from genai_perf.export_data.output_reporter import OutputReporter -from genai_perf.llm_inputs.llm_inputs import LlmInputs -from genai_perf.plots.plot_config_parser import PlotConfigParser -from genai_perf.plots.plot_manager import PlotManager -from genai_perf.profile_data_parser import LLMProfileDataParser, ProfileDataParser -from genai_perf.tokenizer import Tokenizer, get_tokenizer - - -def create_artifacts_dirs(args: Namespace) -> None: - plot_dir = args.artifact_dir / "plots" - os.makedirs(args.artifact_dir, exist_ok=True) - if hasattr(args, "generate_plots") and args.generate_plots: - os.makedirs(plot_dir, exist_ok=True) - - -def generate_inputs(args: Namespace, tokenizer: Tokenizer) -> None: - # TODO (TMA-1759): review if add_model_name is always true - if args.input_file: - filepath, _ = args.input_file - input_filename = Path(filepath) - else: - input_filename = None - add_model_name = True - try: - extra_input_dict = parser.get_extra_inputs_as_dict(args) - except ValueError as e: - raise GenAIPerfException(e) - - LlmInputs.create_llm_inputs( - input_type=args.prompt_source, - output_format=args.output_format, - dataset_name=args.input_dataset, - model_name=args.model, - model_selection_strategy=args.model_selection_strategy, - input_filename=input_filename, - starting_index=LlmInputs.DEFAULT_STARTING_INDEX, - length=args.num_prompts, - prompt_tokens_mean=args.synthetic_input_tokens_mean, - prompt_tokens_stddev=args.synthetic_input_tokens_stddev, - output_tokens_mean=args.output_tokens_mean, - output_tokens_stddev=args.output_tokens_stddev, - output_tokens_deterministic=args.output_tokens_mean_deterministic, - image_width_mean=args.image_width_mean, - image_width_stddev=args.image_width_stddev, - image_height_mean=args.image_height_mean, - image_height_stddev=args.image_height_stddev, - image_format=args.image_format, - random_seed=args.random_seed, - num_of_output_prompts=args.num_prompts, - add_model_name=add_model_name, - add_stream=args.streaming, - tokenizer=tokenizer, - extra_inputs=extra_input_dict, - batch_size=args.batch_size, - output_dir=args.artifact_dir, - ) - - -def calculate_metrics(args: Namespace, tokenizer: Tokenizer) -> ProfileDataParser: - if args.endpoint_type in ["embeddings", "rankings"]: - return ProfileDataParser(args.profile_export_file) - else: - return LLMProfileDataParser( - filename=args.profile_export_file, - tokenizer=tokenizer, - ) - - -def report_output(data_parser: ProfileDataParser, args: Namespace) -> None: - if args.concurrency: - infer_mode = "concurrency" - load_level = f"{args.concurrency}" - elif args.request_rate: - infer_mode = "request_rate" - load_level = f"{args.request_rate}" - else: - raise GenAIPerfException("No valid infer mode specified") - - stats = data_parser.get_statistics(infer_mode, load_level) - reporter = OutputReporter(stats, args) - reporter.report_output() - if args.generate_plots: - create_plots(args) - - -def create_plots(args: Namespace) -> None: - # TMA-1911: support plots CLI option - plot_dir = args.artifact_dir / "plots" - PlotConfigParser.create_init_yaml_config( - filenames=[args.profile_export_file], # single run - output_dir=plot_dir, - ) - config_parser = PlotConfigParser(plot_dir / "config.yaml") - plot_configs = config_parser.generate_configs() - plot_manager = PlotManager(plot_configs) - plot_manager.generate_plots() - - -# Separate function that can raise exceptions used for testing -# to assert correct errors and messages. -def run(): - try: - # TMA-1900: refactor CLI handler - logging.init_logging() - args, extra_args = parser.parse_args() - if args.subcommand == "compare": - args.func(args) - else: - create_artifacts_dirs(args) - tokenizer = get_tokenizer(args.tokenizer) - generate_inputs(args, tokenizer) - args.func(args, extra_args) - data_parser = calculate_metrics(args, tokenizer) - report_output(data_parser, args) - except Exception as e: - raise GenAIPerfException(e) - - -def main(): - # Interactive use will catch exceptions and log formatted errors rather than - # tracebacks. - try: - run() - except Exception as e: - traceback.print_exc() - logger = logging.getLogger(__name__) - logger.error(e) - return 1 - - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/__init__.py b/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/__init__.py deleted file mode 100644 index 01ca53c59..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from genai_perf.metrics.llm_metrics import LLMMetrics -from genai_perf.metrics.metrics import MetricMetadata, Metrics -from genai_perf.metrics.statistics import Statistics diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/llm_metrics.py b/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/llm_metrics.py deleted file mode 100755 index 13dff8a63..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/llm_metrics.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from typing import List - -from genai_perf.metrics.metrics import MetricMetadata, Metrics - - -class LLMMetrics(Metrics): - """A simple dataclass that holds core LLM performance metrics.""" - - LLM_REQUEST_METRICS = [ - MetricMetadata("time_to_first_token", "ms"), - MetricMetadata("inter_token_latency", "ms"), - MetricMetadata("output_token_throughput_per_request", "tokens/sec"), - MetricMetadata("output_sequence_length", "tokens"), - MetricMetadata("input_sequence_length", "tokens"), - ] - - LLM_SYSTEM_METRICS = [ - # (TMA-1977) Make the unit consistent with statistics dict (e.g. tokens/sec) - MetricMetadata("output_token_throughput", "per sec"), - ] - - def __init__( - self, - request_throughputs: List[float] = [], - request_latencies: List[int] = [], - time_to_first_tokens: List[int] = [], - inter_token_latencies: List[int] = [], - output_token_throughputs: List[float] = [], - output_token_throughputs_per_request: List[int] = [], - output_sequence_lengths: List[int] = [], - input_sequence_lengths: List[int] = [], - chunked_inter_token_latencies: List[List[int]] = [[]], - ) -> None: - super().__init__(request_throughputs, request_latencies) - self.time_to_first_tokens = time_to_first_tokens - self.inter_token_latencies = inter_token_latencies - self.output_token_throughputs = output_token_throughputs - self.output_token_throughputs_per_request = output_token_throughputs_per_request - self.output_sequence_lengths = output_sequence_lengths - self.input_sequence_lengths = input_sequence_lengths - - # Keeping chunked ITL (old) as a WAR to preserve visualization. - # Excluded from data. - self._chunked_inter_token_latencies = chunked_inter_token_latencies - - # add base name mapping - self._base_names["time_to_first_tokens"] = "time_to_first_token" - self._base_names["inter_token_latencies"] = "inter_token_latency" - self._base_names["output_token_throughputs"] = "output_token_throughput" - self._base_names["output_token_throughputs_per_request"] = ( - "output_token_throughput_per_request" - ) - self._base_names["output_sequence_lengths"] = "output_sequence_length" - self._base_names["input_sequence_lengths"] = "input_sequence_length" - - @property - def request_metrics(self) -> List[MetricMetadata]: - base_metrics = super().request_metrics # base metrics - - # (TMA-1975) The order is hardcoded as below to avoid introducing any - # breaking changes to the users who might be parsing the outputs. However, - # we would eventually want to impose some consistent order such as a - # base metrics first and then task specific metrics. Uncomment the below - # line to enable this order: - # return base_metrics + self.LLM_REQUEST_METRICS - return ( - self.LLM_REQUEST_METRICS[:2] + base_metrics + self.LLM_REQUEST_METRICS[2:] - ) - - @property - def system_metrics(self) -> List[MetricMetadata]: - base_metrics = super().system_metrics # base metrics - - # (TMA-1975) The order is hardcoded as below to avoid introducing any - # breaking changes to the users who might be parsing the outputs. However, - # we would eventually want to impose some consistent order such as a - # base metrics first and then task specific metrics. Uncomment the below - # line to enable this order: - # return base_metrics + self.LLM_SYSTEM_METRICS - return self.LLM_SYSTEM_METRICS + base_metrics diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/metrics.py b/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/metrics.py deleted file mode 100755 index 7e047094d..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/metrics.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from dataclasses import dataclass -from typing import List - - -@dataclass -class MetricMetadata: - name: str - unit: str - - -class Metrics: - """A base class that contains common request level metrics.""" - - REQUEST_METRICS = [ - MetricMetadata("request_latency", "ms"), - ] - - SYSTEM_METRICS = [ - # (TMA-1977) Make the unit consistent with statistics dict (e.g. tokens/sec) - MetricMetadata("request_throughput", "per sec"), - ] - - def __init__( - self, - request_throughputs: List[float] = [], - request_latencies: List[int] = [], - ) -> None: - self.request_throughputs = request_throughputs - self.request_latencies = request_latencies - self._base_names = { - "request_throughputs": "request_throughput", - "request_latencies": "request_latency", - } - - def __repr__(self): - attr_strs = [] - for k, v in self.__dict__.items(): - if not k.startswith("_"): - attr_strs.append(f"{k}={v}") - return f"Metrics({','.join(attr_strs)})" - - @property - def request_metrics(self) -> List[MetricMetadata]: - return self.REQUEST_METRICS - - @property - def system_metrics(self) -> List[MetricMetadata]: - return self.SYSTEM_METRICS - - @property - def data(self) -> dict: - """Returns all the metrics.""" - return {k: v for k, v in self.__dict__.items() if not k.startswith("_")} - - def get_base_name(self, metric_name: str) -> str: - """Returns singular name of a given metric.""" - if metric_name in self._base_names: - return self._base_names[metric_name] - else: - raise KeyError(f"No metric named '{metric_name}' exists.") diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/statistics.py b/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/statistics.py deleted file mode 100755 index f0d12cef6..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/metrics/statistics.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from collections import defaultdict -from pathlib import Path -from typing import Dict, List, Union - -import numpy as np -import pandas as pd -from genai_perf.metrics.metrics import Metrics - - -class Statistics: - """A class that aggregates various statistics from given metrics class. - - The Statistics class goes through each metric in the metrics class and - calculates several statistics such as: - - average (arithmetic mean) - - percentiles (p25, p50, p75, p90, p95, p99) - - minimum & maximum - - standard deviation - The class will store each calculated statistics as part of its attribute. - - Example: - - >>> metrics = LLMMetrics(request_throughputs=[2, 4]) - >>> stats = Statistics(metrics) - >>> print(stats.avg_request_throughput) # output: 3 - """ - - def __init__(self, metrics: Metrics): - # iterate through Metrics to calculate statistics and set attributes - self._metrics = metrics - self._stats_dict: Dict = defaultdict(dict) - for attr, data in metrics.data.items(): - if self._should_skip(data, attr): - continue - - attr = metrics.get_base_name(attr) - self._add_units(attr) - self._calculate_mean(data, attr) - if not self._is_system_metric(metrics, attr): - self._calculate_percentiles(data, attr) - self._calculate_minmax(data, attr) - self._calculate_std(data, attr) - - def _should_skip(self, data: List[Union[int, float]], attr: str) -> bool: - """Checks if some metrics should be skipped.""" - # No data points - if len(data) == 0: - return True - # Skip ITL when non-streaming (all zero) - elif attr == "inter_token_latencies" and sum(data) == 0: - return True - return False - - def _calculate_mean(self, data: List[Union[int, float]], attr: str) -> None: - avg = np.mean(data) - setattr(self, "avg_" + attr, avg) - self._stats_dict[attr]["avg"] = float(avg) - - def _calculate_percentiles(self, data: List[Union[int, float]], attr: str) -> None: - p25, p50, p75 = np.percentile(data, [25, 50, 75]) - p90, p95, p99 = np.percentile(data, [90, 95, 99]) - setattr(self, "p25_" + attr, p25) - setattr(self, "p50_" + attr, p50) - setattr(self, "p75_" + attr, p75) - setattr(self, "p90_" + attr, p90) - setattr(self, "p95_" + attr, p95) - setattr(self, "p99_" + attr, p99) - self._stats_dict[attr]["p99"] = float(p99) - self._stats_dict[attr]["p95"] = float(p95) - self._stats_dict[attr]["p90"] = float(p90) - self._stats_dict[attr]["p75"] = float(p75) - self._stats_dict[attr]["p50"] = float(p50) - self._stats_dict[attr]["p25"] = float(p25) - - def _calculate_minmax(self, data: List[Union[int, float]], attr: str) -> None: - min, max = np.min(data), np.max(data) - setattr(self, "min_" + attr, min) - setattr(self, "max_" + attr, max) - self._stats_dict[attr]["max"] = float(max) - self._stats_dict[attr]["min"] = float(min) - - def _calculate_std(self, data: List[Union[int, float]], attr: str) -> None: - std = np.std(data) - setattr(self, "std_" + attr, std) - self._stats_dict[attr]["std"] = float(std) - - def scale_data(self, factor: float = 1 / 1e6) -> None: - for k1, v1 in self.stats_dict.items(): - if self._is_time_metric(k1): - for k2, v2 in v1.items(): - if k2 != "unit": - self.stats_dict[k1][k2] = self._scale(v2, factor) - - def _scale(self, metric: float, factor: float = 1 / 1e6) -> float: - """ - Scale metrics from nanoseconds by factor. - Default is nanoseconds to milliseconds. - """ - return metric * factor - - def _add_units(self, key) -> None: - if self._is_time_metric(key): - self._stats_dict[key]["unit"] = "ms" - elif key == "request_throughput": - self._stats_dict[key]["unit"] = "requests/sec" - elif key.startswith("output_token_throughput"): - self._stats_dict[key]["unit"] = "tokens/sec" - elif "sequence_length" in key: - self._stats_dict[key]["unit"] = "tokens" - else: - self._stats_dict[key]["unit"] = "" - - def __repr__(self) -> str: - attr_strs = [] - for k, v in self.__dict__.items(): - if not k.startswith("_"): - attr_strs.append(f"{k}={v}") - return f"Statistics({','.join(attr_strs)})" - - @property - def data(self) -> dict: - """Return all the aggregated statistics.""" - return {k: v for k, v in self.__dict__.items() if not k.startswith("_")} - - @property - def metrics(self) -> Metrics: - """Return the underlying metrics used to calculate the statistics.""" - return self._metrics - - @property - def stats_dict(self) -> Dict: - return self._stats_dict - - def _is_system_metric(self, metrics: Metrics, attr: str) -> bool: - return attr in [m.name for m in metrics.system_metrics] - - def _is_time_metric(self, field: str) -> bool: - # TPA-188: Remove the hardcoded time metrics list - time_metrics = [ - "inter_token_latency", - "time_to_first_token", - "request_latency", - ] - return field in time_metrics - - def export_parquet(self, artifact_dir: Path, filename: str) -> None: - max_length = -1 - col_index = 0 - filler_list = [] - df = pd.DataFrame() - - # Data frames require all columns of the same length - # find the max length column - for key, value in self._metrics.data.items(): - max_length = max(max_length, len(value)) - - # Insert None for shorter columns to match longest column - for key, value in self._metrics.data.items(): - if len(value) < max_length: - diff = max_length - len(value) - filler_list = [None] * diff - df.insert(col_index, key, value + filler_list) - diff = 0 - filler_list = [] - col_index = col_index + 1 - - filepath = artifact_dir / f"{filename}.gzip" - df.to_parquet(filepath, compression="gzip") diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/parser.py b/src/c++/perf_analyzer/genai-perf/genai_perf/parser.py deleted file mode 100644 index 776535d15..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/parser.py +++ /dev/null @@ -1,834 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import argparse -import json -import os -import sys -from enum import Enum, auto -from pathlib import Path -from typing import Tuple - -import genai_perf.logging as logging -import genai_perf.utils as utils -from genai_perf.constants import ( - CNN_DAILY_MAIL, - DEFAULT_ARTIFACT_DIR, - DEFAULT_COMPARE_DIR, - OPEN_ORCA, -) -from genai_perf.llm_inputs.llm_inputs import ( - LlmInputs, - ModelSelectionStrategy, - OutputFormat, - PromptSource, -) -from genai_perf.llm_inputs.synthetic_image_generator import ImageFormat -from genai_perf.plots.plot_config_parser import PlotConfigParser -from genai_perf.plots.plot_manager import PlotManager -from genai_perf.tokenizer import DEFAULT_TOKENIZER - -from . import __version__ - - -class PathType(Enum): - FILE = auto() - DIRECTORY = auto() - - def to_lowercase(self): - return self.name.lower() - - -class Subcommand(Enum): - PROFILE = auto() - COMPARE = auto() - - def to_lowercase(self): - return self.name.lower() - - -logger = logging.getLogger(__name__) - -_endpoint_type_map = { - "chat": "v1/chat/completions", - "completions": "v1/completions", - "embeddings": "v1/embeddings", - "rankings": "v1/ranking", - "vision": "v1/chat/completions", -} - - -def _check_model_args( - parser: argparse.ArgumentParser, args: argparse.Namespace -) -> argparse.Namespace: - """ - Check if model name is provided. - """ - if not args.model: - parser.error("The -m/--model option is required and cannot be empty.") - args = _convert_str_to_enum_entry( - args, "model_selection_strategy", ModelSelectionStrategy - ) - _generate_formatted_model_name(args) - return args - - -def _generate_formatted_model_name(args: argparse.Namespace) -> None: - if len(args.model) == 1: - args.formatted_model_name = args.model[0] - elif len(args.model) == 0: - args.model = None - args.formatted_model_name = None - else: - args.formatted_model_name = args.model[0] + "_multi" - - -def _check_compare_args( - parser: argparse.ArgumentParser, args: argparse.Namespace -) -> argparse.Namespace: - """ - Check compare subcommand args - """ - if not args.config and not args.files: - parser.error("Either the --config or --files option must be specified.") - return args - - -def _check_image_input_args( - parser: argparse.ArgumentParser, args: argparse.Namespace -) -> argparse.Namespace: - """ - Sanity check the image input args - """ - if args.image_width_mean <= 0 or args.image_height_mean <= 0: - parser.error( - "Both --image-width-mean and --image-height-mean values must be positive." - ) - if args.image_width_stddev < 0 or args.image_height_stddev < 0: - parser.error( - "Both --image-width-stddev and --image-height-stddev values must be non-negative." - ) - - args = _convert_str_to_enum_entry(args, "image_format", ImageFormat) - return args - - -def _check_conditional_args( - parser: argparse.ArgumentParser, args: argparse.Namespace -) -> argparse.Namespace: - """ - Check for conditional args and raise an error if they are not set. - """ - - # Endpoint and output format checks - if args.service_kind == "openai": - if args.endpoint_type is None: - parser.error( - "The --endpoint-type option is required when using the 'openai' service-kind." - ) - else: - if args.endpoint_type == "chat": - args.output_format = OutputFormat.OPENAI_CHAT_COMPLETIONS - elif args.endpoint_type == "completions": - args.output_format = OutputFormat.OPENAI_COMPLETIONS - elif args.endpoint_type == "embeddings": - args.output_format = OutputFormat.OPENAI_EMBEDDINGS - elif args.endpoint_type == "rankings": - args.output_format = OutputFormat.RANKINGS - - # (TMA-1986) deduce vision format from chat completions + image CLI - # because there's no openai vision endpoint. - elif args.endpoint_type == "vision": - args.output_format = OutputFormat.OPENAI_VISION - - if args.endpoint is not None: - args.endpoint = args.endpoint.lstrip(" /") - else: - args.endpoint = _endpoint_type_map[args.endpoint_type] - elif args.endpoint_type is not None: - parser.error( - "The --endpoint-type option should only be used when using the 'openai' service-kind." - ) - - if args.service_kind == "triton": - args = _convert_str_to_enum_entry(args, "backend", OutputFormat) - args.output_format = args.backend - - # Output token distribution checks - if args.output_tokens_mean == LlmInputs.DEFAULT_OUTPUT_TOKENS_MEAN: - if args.output_tokens_stddev != LlmInputs.DEFAULT_OUTPUT_TOKENS_STDDEV: - parser.error( - "The --output-tokens-mean option is required when using --output-tokens-stddev." - ) - if args.output_tokens_mean_deterministic: - parser.error( - "The --output-tokens-mean option is required when using --output-tokens-mean-deterministic." - ) - - if args.service_kind != "triton": - if args.output_tokens_mean_deterministic: - parser.error( - "The --output-tokens-mean-deterministic option is only supported with the Triton service-kind." - ) - - _check_conditional_args_embeddings_rankings(parser, args) - - return args - - -def _check_conditional_args_embeddings_rankings( - parser: argparse.ArgumentParser, args: argparse.Namespace -): - - if args.output_format in [ - OutputFormat.OPENAI_EMBEDDINGS, - OutputFormat.RANKINGS, - ]: - if args.streaming: - parser.error( - f"The --streaming option is not supported with the {args.endpoint_type} endpoint type." - ) - - if args.generate_plots: - parser.error( - f"The --generate-plots option is not currently supported with the {args.endpoint_type} endpoint type." - ) - else: - if args.batch_size != LlmInputs.DEFAULT_BATCH_SIZE: - parser.error( - "The --batch-size option is currently only supported with the embeddings and rankings endpoint types." - ) - - if args.input_file: - _, path_type = args.input_file - if args.output_format != OutputFormat.RANKINGS: - if path_type == "directory": - parser.error( - "A directory is only currently supported for the rankings endpoint type." - ) - else: - if path_type == PathType.FILE: - parser.error( - "The rankings endpoint-type requires a directory value for the --input-file flag." - ) - - -def _check_load_manager_args(args: argparse.Namespace) -> argparse.Namespace: - """ - Check inference load args - """ - # If no concurrency or request rate is set, default to 1 - if not args.concurrency and not args.request_rate: - args.concurrency = 1 - return args - - -def _set_artifact_paths(args: argparse.Namespace) -> argparse.Namespace: - """ - Set paths for all the artifacts. - """ - if args.artifact_dir == Path(DEFAULT_ARTIFACT_DIR): - # Preprocess Huggingface model names that include '/' in their model name. - if (args.formatted_model_name is not None) and ( - "/" in args.formatted_model_name - ): - filtered_name = "_".join(args.formatted_model_name.split("/")) - logger.info( - f"Model name '{args.formatted_model_name}' cannot be used to create artifact " - f"directory. Instead, '{filtered_name}' will be used." - ) - name = [f"{filtered_name}"] - else: - name = [f"{args.formatted_model_name}"] - - if args.service_kind == "openai": - name += [f"{args.service_kind}-{args.endpoint_type}"] - elif args.service_kind == "triton": - name += [f"{args.service_kind}-{args.backend.to_lowercase()}"] - else: - raise ValueError(f"Unknown service kind '{args.service_kind}'.") - - if args.concurrency: - name += [f"concurrency{args.concurrency}"] - elif args.request_rate: - name += [f"request_rate{args.request_rate}"] - args.artifact_dir = args.artifact_dir / Path("-".join(name)) - - if args.profile_export_file.parent != Path(""): - raise ValueError( - "Please use --artifact-dir option to define intermediary paths to " - "the profile export file." - ) - - args.profile_export_file = args.artifact_dir / args.profile_export_file - return args - - -def _infer_prompt_source(args: argparse.Namespace) -> argparse.Namespace: - if args.input_dataset: - args.prompt_source = PromptSource.DATASET - logger.debug(f"Input source is the following dataset: {args.input_dataset}") - elif args.input_file: - args.prompt_source = PromptSource.FILE - if args.endpoint_type == "rankings": - logger.debug( - f"Input source is the following directory: {args.input_file[0]}" - ) - else: - logger.debug(f"Input source is the following file: {args.input_file[0]}") - else: - args.prompt_source = PromptSource.SYNTHETIC - logger.debug("Input source is synthetic data") - return args - - -def _convert_str_to_enum_entry(args, option, enum): - """ - Convert string option to corresponding enum entry - """ - attr_val = getattr(args, option) - if attr_val is not None: - setattr(args, f"{option}", utils.get_enum_entry(attr_val, enum)) - return args - - -### Types ### - - -def file_or_directory(path: str) -> Tuple[Path, PathType]: - if os.path.isfile(path): - return (Path(path), PathType.FILE) - elif os.path.isdir(path): - return (Path(path), PathType.DIRECTORY) - else: - raise ValueError(f"'{path}' is not a valid file or directory") - - -### Parsers ### - - -def _add_input_args(parser): - input_group = parser.add_argument_group("Input") - - input_group.add_argument( - "--batch-size", - "-b", - type=int, - default=LlmInputs.DEFAULT_BATCH_SIZE, - required=False, - help=f"The batch size of the requests GenAI-Perf should send. " - "This is currently only supported with the embeddings and rankings endpoint types.", - ) - - input_group.add_argument( - "--extra-inputs", - action="append", - help="Provide additional inputs to include with every request. " - "You can repeat this flag for multiple inputs. Inputs should be in an input_name:value format." - "Alternatively, a string representing a json formatted dict can be provided.", - ) - - prompt_source_group = input_group.add_mutually_exclusive_group(required=False) - prompt_source_group.add_argument( - "--input-dataset", - type=str.lower, - default=None, - choices=[OPEN_ORCA, CNN_DAILY_MAIL], - required=False, - help="The HuggingFace dataset to use for prompts.", - ) - - prompt_source_group.add_argument( - "--input-file", - type=file_or_directory, - default=None, - required=False, - help="The input file containing the prompts to use for profiling. " - "Each line should be a JSON object with a 'text_input' field in JSONL format. " - 'Example: {"text_input": "Your prompt here"}' - "For the rankings endpoint-type, a directory should be passed in instead with " - 'a "queries.jsonl" file and a "passages.jsonl" file with the same format.', - ) - - input_group.add_argument( - "--num-prompts", - type=int, - default=LlmInputs.DEFAULT_NUM_PROMPTS, - required=False, - help=f"The number of unique prompts to generate as stimulus.", - ) - - input_group.add_argument( - "--output-tokens-mean", - type=int, - default=LlmInputs.DEFAULT_OUTPUT_TOKENS_MEAN, - required=False, - help=f"The mean number of tokens in each output. " - "Ensure the --tokenizer value is set correctly. ", - ) - - input_group.add_argument( - "--output-tokens-mean-deterministic", - action="store_true", - required=False, - help=f"When using --output-tokens-mean, this flag can be set to " - "improve precision by setting the minimum number of tokens " - "equal to the requested number of tokens. This is currently " - "supported with the Triton service-kind. " - "Note that there is still some variability in the requested number " - "of output tokens, but GenAi-Perf attempts its best effort with your " - "model to get the right number of output tokens. ", - ) - - input_group.add_argument( - "--output-tokens-stddev", - type=int, - default=LlmInputs.DEFAULT_OUTPUT_TOKENS_STDDEV, - required=False, - help=f"The standard deviation of the number of tokens in each output. " - "This is only used when --output-tokens-mean is provided.", - ) - - input_group.add_argument( - "--random-seed", - type=int, - default=LlmInputs.DEFAULT_RANDOM_SEED, - required=False, - help="The seed used to generate random values.", - ) - - input_group.add_argument( - "--synthetic-input-tokens-mean", - type=int, - default=LlmInputs.DEFAULT_PROMPT_TOKENS_MEAN, - required=False, - help=f"The mean of number of tokens in the generated prompts when using synthetic data.", - ) - - input_group.add_argument( - "--synthetic-input-tokens-stddev", - type=int, - default=LlmInputs.DEFAULT_PROMPT_TOKENS_STDDEV, - required=False, - help=f"The standard deviation of number of tokens in the generated prompts when using synthetic data.", - ) - - -def _add_image_input_args(parser): - input_group = parser.add_argument_group("Image Input") - - input_group.add_argument( - "--image-width-mean", - type=int, - default=LlmInputs.DEFAULT_IMAGE_WIDTH_MEAN, - required=False, - help=f"The mean width of images when generating synthetic image data.", - ) - - input_group.add_argument( - "--image-width-stddev", - type=int, - default=LlmInputs.DEFAULT_IMAGE_WIDTH_STDDEV, - required=False, - help=f"The standard deviation of width of images when generating synthetic image data.", - ) - - input_group.add_argument( - "--image-height-mean", - type=int, - default=LlmInputs.DEFAULT_IMAGE_HEIGHT_MEAN, - required=False, - help=f"The mean height of images when generating synthetic image data.", - ) - - input_group.add_argument( - "--image-height-stddev", - type=int, - default=LlmInputs.DEFAULT_IMAGE_HEIGHT_STDDEV, - required=False, - help=f"The standard deviation of height of images when generating synthetic image data.", - ) - - input_group.add_argument( - "--image-format", - type=str, - choices=utils.get_enum_names(ImageFormat), - required=False, - help=f"The compression format of the images. " - "If format is not selected, format of generated image is selected at random", - ) - - -def _add_profile_args(parser): - profile_group = parser.add_argument_group("Profiling") - load_management_group = profile_group.add_mutually_exclusive_group(required=False) - - load_management_group.add_argument( - "--concurrency", - type=int, - required=False, - help="The concurrency value to benchmark.", - ) - - profile_group.add_argument( - "--measurement-interval", - "-p", - type=int, - default="10000", - required=False, - help="The time interval used for each measurement in milliseconds. " - "Perf Analyzer will sample a time interval specified and take " - "measurement over the requests completed within that time interval.", - ) - - load_management_group.add_argument( - "--request-rate", - type=float, - required=False, - help="Sets the request rate for the load generated by PA.", - ) - - profile_group.add_argument( - "-s", - "--stability-percentage", - type=float, - default=999, - required=False, - help="The allowed variation in " - "latency measurements when determining if a result is stable. The " - "measurement is considered as stable if the ratio of max / min " - "from the recent 3 measurements is within (stability percentage) " - "in terms of both infer per second and latency.", - ) - - -def _add_endpoint_args(parser): - endpoint_group = parser.add_argument_group("Endpoint") - - endpoint_group.add_argument( - "-m", - "--model", - nargs="+", - default=[], - help=f"The name of the model(s) to benchmark.", - ) - endpoint_group.add_argument( - "--model-selection-strategy", - type=str, - choices=utils.get_enum_names(ModelSelectionStrategy), - default="round_robin", - required=False, - help=f"When multiple model are specified, this is how a specific model " - "should be assigned to a prompt. round_robin means that ith prompt in the " - "list gets assigned to i mod len(models). random means that assignment is " - "uniformly random", - ) - - endpoint_group.add_argument( - "--backend", - type=str, - choices=utils.get_enum_names(OutputFormat)[2:], - default="tensorrtllm", - required=False, - help=f'When using the "triton" service-kind, ' - "this is the backend of the model. " - "For the TENSORRT-LLM backend, you currently must set " - "'exclude_input_in_output' to true in the model config to " - "not echo the input tokens in the output.", - ) - - endpoint_group.add_argument( - "--endpoint", - type=str, - required=False, - help=f"Set a custom endpoint that differs from the OpenAI defaults.", - ) - - endpoint_group.add_argument( - "--endpoint-type", - type=str, - choices=["chat", "completions", "embeddings", "rankings", "vision"], - required=False, - help=f"The endpoint-type to send requests to on the " - 'server. This is only used with the "openai" service-kind.', - ) - - endpoint_group.add_argument( - "--service-kind", - type=str, - choices=["triton", "openai"], - default="triton", - required=False, - help="The kind of service perf_analyzer will " - 'generate load for. In order to use "openai", ' - "you must specify an api via --endpoint-type.", - ) - - endpoint_group.add_argument( - "--streaming", - action="store_true", - required=False, - help=f"An option to enable the use of the streaming API.", - ) - - endpoint_group.add_argument( - "-u", - "--url", - type=str, - required=False, - dest="u", - metavar="URL", - help="URL of the endpoint to target for benchmarking.", - ) - - -def _add_output_args(parser): - output_group = parser.add_argument_group("Output") - output_group.add_argument( - "--artifact-dir", - type=Path, - default=Path(DEFAULT_ARTIFACT_DIR), - help="The directory to store all the (output) artifacts generated by " - "GenAI-Perf and Perf Analyzer.", - ) - output_group.add_argument( - "--generate-plots", - action="store_true", - required=False, - help="An option to enable the generation of plots.", - ) - output_group.add_argument( - "--profile-export-file", - type=Path, - default=Path("profile_export.json"), - help="The path where the perf_analyzer profile export will be " - "generated. By default, the profile export will be to profile_export.json. " - "The genai-perf file will be exported to _genai_perf.csv. " - "For example, if the profile export file is profile_export.json, the genai-perf file will be " - "exported to profile_export_genai_perf.csv.", - ) - - -def _add_other_args(parser): - other_group = parser.add_argument_group("Other") - - other_group.add_argument( - "--tokenizer", - type=str, - default=DEFAULT_TOKENIZER, - required=False, - help="The HuggingFace tokenizer to use to interpret token metrics from prompts and responses.", - ) - - other_group.add_argument( - "-v", - "--verbose", - action="store_true", - required=False, - help="An option to enable verbose mode.", - ) - - -def get_extra_inputs_as_dict(args: argparse.Namespace) -> dict: - request_inputs = {} - if args.extra_inputs: - for input_str in args.extra_inputs: - if input_str.startswith("{") and input_str.endswith("}"): - request_inputs.update(utils.load_json_str(input_str)) - else: - semicolon_count = input_str.count(":") - if semicolon_count != 1: - raise ValueError( - f"Invalid input format for --extra-inputs: {input_str}\n" - "Expected input format: 'input_name:value'" - ) - input_name, value = input_str.split(":", 1) - - if not input_name or not value: - raise ValueError( - f"Input name or value is empty in --extra-inputs: {input_str}\n" - "Expected input format: 'input_name:value'" - ) - - is_bool = value.lower() in ["true", "false"] - is_int = value.isdigit() - is_float = value.count(".") == 1 and ( - value[0] == "." or value.replace(".", "").isdigit() - ) - - if is_bool: - value = value.lower() == "true" - elif is_int: - value = int(value) - elif is_float: - value = float(value) - - if input_name in request_inputs: - raise ValueError( - f"Input name already exists in request_inputs dictionary: {input_name}" - ) - request_inputs[input_name] = value - - return request_inputs - - -def _parse_compare_args(subparsers) -> argparse.ArgumentParser: - compare = subparsers.add_parser( - Subcommand.COMPARE.to_lowercase(), - description="Subcommand to generate plots that compare multiple profile runs.", - ) - compare_group = compare.add_argument_group("Input") - mx_group = compare_group.add_mutually_exclusive_group(required=False) - mx_group.add_argument( - "--config", - type=Path, - default=None, - help="The path to the YAML file that specifies plot configurations for " - "comparing multiple runs.", - ) - mx_group.add_argument( - "-f", - "--files", - nargs="+", - default=[], - help="List of paths to the profile export JSON files. Users can specify " - "this option instead of the `--config` option if they would like " - "GenAI-Perf to generate default plots as well as initial YAML config file.", - ) - compare.set_defaults(func=compare_handler) - return compare - - -def _parse_profile_args(subparsers) -> argparse.ArgumentParser: - profile = subparsers.add_parser( - Subcommand.PROFILE.to_lowercase(), - description="Subcommand to profile LLMs and Generative AI models.", - ) - _add_endpoint_args(profile) - _add_input_args(profile) - _add_image_input_args(profile) - _add_profile_args(profile) - _add_output_args(profile) - _add_other_args(profile) - profile.set_defaults(func=profile_handler) - return profile - - -### Handlers ### - - -def create_compare_dir() -> None: - if not os.path.exists(DEFAULT_COMPARE_DIR): - os.mkdir(DEFAULT_COMPARE_DIR) - - -def compare_handler(args: argparse.Namespace): - """Handles `compare` subcommand workflow.""" - if args.files: - create_compare_dir() - output_dir = Path(f"{DEFAULT_COMPARE_DIR}") - PlotConfigParser.create_init_yaml_config(args.files, output_dir) - args.config = output_dir / "config.yaml" - - config_parser = PlotConfigParser(args.config) - plot_configs = config_parser.generate_configs() - plot_manager = PlotManager(plot_configs) - plot_manager.generate_plots() - - -def profile_handler(args, extra_args): - from genai_perf.wrapper import Profiler - - Profiler.run(args=args, extra_args=extra_args) - - -### Parser Initialization ### - - -def init_parsers(): - parser = argparse.ArgumentParser( - prog="genai-perf", - description="CLI to profile LLMs and Generative AI models with Perf Analyzer", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "--version", - action="version", - version="%(prog)s " + __version__, - help=f"An option to print the version and exit.", - ) - - # Add subcommands - subparsers = parser.add_subparsers( - help="List of subparser commands.", dest="subcommand" - ) - _ = _parse_compare_args(subparsers) - _ = _parse_profile_args(subparsers) - subparsers.required = True - - return parser - - -def get_passthrough_args_index(argv: list) -> int: - if "--" in argv: - passthrough_index = argv.index("--") - logger.info(f"Detected passthrough args: {argv[passthrough_index + 1:]}") - else: - passthrough_index = len(argv) - - return passthrough_index - - -def refine_args( - parser: argparse.ArgumentParser, args: argparse.Namespace -) -> argparse.Namespace: - if args.subcommand == Subcommand.PROFILE.to_lowercase(): - args = _infer_prompt_source(args) - args = _check_model_args(parser, args) - args = _check_conditional_args(parser, args) - args = _check_image_input_args(parser, args) - args = _check_load_manager_args(args) - args = _set_artifact_paths(args) - elif args.subcommand == Subcommand.COMPARE.to_lowercase(): - args = _check_compare_args(parser, args) - else: - raise ValueError(f"Unknown subcommand: {args.subcommand}") - - return args - - -### Entrypoint ### - - -def parse_args(): - argv = sys.argv - - parser = init_parsers() - passthrough_index = get_passthrough_args_index(argv) - args = parser.parse_args(argv[1:passthrough_index]) - args = refine_args(parser, args) - - return args, argv[passthrough_index + 1 :] diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/__init__.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/__init__.py deleted file mode 100755 index 086616e41..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/base_plot.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/base_plot.py deleted file mode 100755 index 470e0b942..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/base_plot.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path -from typing import List - -import pandas as pd -from genai_perf.exceptions import GenAIPerfException -from genai_perf.plots.plot_config import ProfileRunData -from plotly.graph_objects import Figure - - -class BasePlot: - """ - Base class for plots - """ - - def __init__(self, data: List[ProfileRunData]) -> None: - self._profile_data = data - - def create_plot( - self, - graph_title: str, - x_label: str, - y_label: str, - width: int, - height: int, - filename_root: str, - output_dir: Path, - ) -> None: - """ - Create plot for specific graph type - """ - raise NotImplementedError - - def _create_dataframe(self, x_label: str, y_label: str) -> pd.DataFrame: - return pd.DataFrame( - { - x_label: [prd.x_metric for prd in self._profile_data], - y_label: [prd.y_metric for prd in self._profile_data], - "Run Name": [prd.name for prd in self._profile_data], - } - ) - - def _generate_parquet(self, df: pd.DataFrame, output_dir: Path, file: str) -> None: - filepath = output_dir / f"{file}.gzip" - df.to_parquet(filepath, compression="gzip") - - def _generate_graph_file(self, fig: Figure, output_dir: Path, file: str) -> None: - if file.endswith("jpeg"): - filepath = output_dir / f"{file}" - fig.write_image(filepath) - elif file.endswith("html"): - filepath = output_dir / f"{file}" - fig.write_html(filepath) - else: - extension = file.split(".")[-1] - raise GenAIPerfException(f"image file type {extension} is not supported") diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/box_plot.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/box_plot.py deleted file mode 100755 index 38aad36dc..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/box_plot.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path -from typing import List - -import plotly.graph_objects as go -from genai_perf.plots.base_plot import BasePlot -from genai_perf.plots.plot_config import ProfileRunData - - -class BoxPlot(BasePlot): - """ - Generate a box plot in jpeg and html format. - """ - - def __init__(self, data: List[ProfileRunData]) -> None: - super().__init__(data) - - def create_plot( - self, - graph_title: str = "", - x_label: str = "", - y_label: str = "", - width: int = 700, - height: int = 450, - filename_root: str = "", - output_dir: Path = Path(""), - ) -> None: - fig = go.Figure() - for pd in self._profile_data: - fig.add_trace(go.Box(y=pd.y_metric, name=pd.name)) - - # Update layout and axis labels - fig.update_layout( - title={ - "text": f"{graph_title}", - "xanchor": "center", - "x": 0.5, - }, - width=width, - height=height, - ) - fig.update_traces(boxpoints="all") - fig.update_xaxes(title_text=x_label, showticklabels=False) - fig.update_yaxes(title_text=y_label) - - # Save dataframe as parquet file - df = self._create_dataframe(x_label, y_label) - self._generate_parquet(df, output_dir, filename_root) - - self._generate_graph_file(fig, output_dir, filename_root + ".html") - self._generate_graph_file(fig, output_dir, filename_root + ".jpeg") diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/heat_map.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/heat_map.py deleted file mode 100755 index 7f4dbe166..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/heat_map.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path -from typing import List - -import plotly.graph_objects as go -from genai_perf.plots.base_plot import BasePlot -from genai_perf.plots.plot_config import ProfileRunData -from plotly.subplots import make_subplots - - -class HeatMap(BasePlot): - """ - Generate a heat map in jpeg and html format. - """ - - def __init__(self, data: List[ProfileRunData]) -> None: - super().__init__(data) - - def create_plot( - self, - graph_title: str = "", - x_label: str = "", - y_label: str = "", - width: int = 700, - height: int = 450, - filename_root: str = "", - output_dir: Path = Path(""), - ) -> None: - N = len(self._profile_data) - - if N <= 3: - n_rows, n_cols = 1, N - else: - n_rows = (N + 2) // 3 - n_cols = 3 - - fig = make_subplots( - rows=n_rows, - cols=n_cols, - x_title=x_label, - y_title=y_label, - subplot_titles=[prd.name for prd in self._profile_data], - ) - - for index, prd in enumerate(self._profile_data): - hm = go.Histogram2d( - x=prd.x_metric, - y=prd.y_metric, - coloraxis="coloraxis", - name=prd.name, - ) - - # Calculate the location where the figure should be added in the subplot - c_row = int(index / n_cols) + 1 - c_col = index % n_cols + 1 - fig.add_trace(hm, c_row, c_col) - - fig.update_layout( - title={ - "text": graph_title, - "xanchor": "center", - "x": 0.5, - }, - width=width, - height=height, - ) - - # Save dataframe as parquet file - df = self._create_dataframe(x_label, y_label) - self._generate_parquet(df, output_dir, filename_root) - - # self._generate_parquet(df, filename_root) - self._generate_graph_file(fig, output_dir, filename_root + ".html") - self._generate_graph_file(fig, output_dir, filename_root + ".jpeg") diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config.py deleted file mode 100755 index 2408d0591..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from collections.abc import Sequence -from dataclasses import dataclass -from enum import Enum, auto -from pathlib import Path -from typing import List, Sequence, Union - - -class PlotType(Enum): - SCATTER = auto() - BOX = auto() - HEATMAP = auto() - - -@dataclass -class ProfileRunData: - name: str - x_metric: Sequence[Union[int, float]] - y_metric: Sequence[Union[int, float]] - - -@dataclass -class PlotConfig: - title: str - data: List[ProfileRunData] - x_label: str - y_label: str - width: int - height: int - type: PlotType - output: Path diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config_parser.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config_parser.py deleted file mode 100755 index 00588f6bb..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_config_parser.py +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path -from typing import List, Union - -import genai_perf.logging as logging - -# Skip type checking to avoid mypy error -# Issue: https://github.com/python/mypy/issues/10632 -import yaml # type: ignore -from genai_perf.metrics import Statistics -from genai_perf.plots.plot_config import PlotConfig, PlotType, ProfileRunData -from genai_perf.profile_data_parser import LLMProfileDataParser -from genai_perf.tokenizer import DEFAULT_TOKENIZER, get_tokenizer -from genai_perf.utils import load_yaml, scale - -logger = logging.getLogger(__name__) - - -class PlotConfigParser: - """Parses YAML configuration file to generate PlotConfigs.""" - - def __init__(self, filename: Path) -> None: - self._filename = filename - - def generate_configs(self) -> List[PlotConfig]: - """Load YAML configuration file and convert to PlotConfigs.""" - logger.info( - f"Generating plot configurations by parsing {self._filename}. " - "This may take a few seconds.", - ) - configs = load_yaml(self._filename) - - plot_configs = [] - for _, config in configs.items(): - # Collect profile run data - profile_data: List[ProfileRunData] = [] - for filepath in config["paths"]: - stats = self._get_statistics(filepath) - profile_data.append( - ProfileRunData( - name=self._get_run_name(Path(filepath)), - x_metric=self._get_metric(stats, config["x_metric"]), - y_metric=self._get_metric(stats, config["y_metric"]), - ) - ) - - plot_configs.append( - PlotConfig( - title=config["title"], - data=profile_data, - x_label=config["x_label"], - y_label=config["y_label"], - width=config["width"], - height=config["height"], - type=self._get_plot_type(config["type"]), - output=Path(config["output"]), - ) - ) - - return plot_configs - - def _get_statistics(self, filepath: str) -> Statistics: - """Extract a single profile run data.""" - data_parser = LLMProfileDataParser( - filename=Path(filepath), - tokenizer=get_tokenizer(DEFAULT_TOKENIZER), - ) - load_info = data_parser.get_profile_load_info() - - # TMA-1904: Remove single experiment assumption - assert len(load_info) == 1 - infer_mode, load_level = load_info[0] - stats = data_parser.get_statistics(infer_mode, load_level) - return stats - - def _get_run_name(self, filepath: Path) -> str: - """Construct a profile run name.""" - if filepath.parent.name: - return filepath.parent.name + "/" + filepath.stem - return filepath.stem - - def _get_metric(self, stats: Statistics, name: str) -> List[Union[int, float]]: - if not name: # no metric - return [] - elif name == "inter_token_latencies": - itls = stats.metrics.data[name] - return [scale(x, (1 / 1e6)) for x in itls] # ns to ms - elif name == "token_positions": - chunked_itls = getattr(stats.metrics, "_chunked_inter_token_latencies") - token_positions: List[Union[int, float]] = [] - for request_itls in chunked_itls: - token_positions += list(range(1, len(request_itls) + 1)) - return token_positions - elif name == "time_to_first_tokens": - ttfts = stats.metrics.data[name] - return [scale(x, (1 / 1e6)) for x in ttfts] # ns to ms - elif name == "request_latencies": - req_latencies = stats.metrics.data[name] - return [scale(x, (1 / 1e6)) for x in req_latencies] # ns to ms - - return stats.metrics.data[name] - - def _get_plot_type(self, plot_type: str) -> PlotType: - """Returns the plot type as PlotType object.""" - if plot_type == "scatter": - return PlotType.SCATTER - elif plot_type == "box": - return PlotType.BOX - elif plot_type == "heatmap": - return PlotType.HEATMAP - else: - raise ValueError( - "Unknown plot type encountered while parsing YAML configuration. " - "Plot type must be either 'scatter', 'box', or 'heatmap'." - ) - - @staticmethod - def create_init_yaml_config(filenames: List[Path], output_dir: Path) -> None: - config_str = f""" - plot1: - title: Time to First Token - x_metric: "" - y_metric: time_to_first_tokens - x_label: Time to First Token (ms) - y_label: "" - width: {1200 if len(filenames) > 1 else 700} - height: 450 - type: box - paths: {[str(f) for f in filenames]} - output: {output_dir} - - plot2: - title: Request Latency - x_metric: "" - y_metric: request_latencies - x_label: Request Latency (ms) - y_label: "" - width: {1200 if len(filenames) > 1 else 700} - height: 450 - type: box - paths: {[str(f) for f in filenames]} - output: {output_dir} - - plot3: - title: Distribution of Input Sequence Lengths to Output Sequence Lengths - x_metric: input_sequence_lengths - y_metric: output_sequence_lengths - x_label: Input Sequence Length - y_label: Output Sequence Length - width: {1200 if len(filenames) > 1 else 700} - height: 450 - type: heatmap - paths: {[str(f) for f in filenames]} - output: {output_dir} - - plot4: - title: Time to First Token vs Input Sequence Lengths - x_metric: input_sequence_lengths - y_metric: time_to_first_tokens - x_label: Input Sequence Length - y_label: Time to First Token (ms) - width: {1200 if len(filenames) > 1 else 700} - height: 450 - type: scatter - paths: {[str(f) for f in filenames]} - output: {output_dir} - - plot5: - title: Token-to-Token Latency vs Output Token Position - x_metric: token_positions - y_metric: inter_token_latencies - x_label: Output Token Position - y_label: Token-to-Token Latency (ms) - width: {1200 if len(filenames) > 1 else 700} - height: 450 - type: scatter - paths: {[str(f) for f in filenames]} - output: {output_dir} - """ - - filepath = output_dir / "config.yaml" - logger.info(f"Creating initial YAML configuration file to {filepath}") - config = yaml.safe_load(config_str) - with open(str(filepath), "w") as f: - yaml.dump(config, f, sort_keys=False) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_manager.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_manager.py deleted file mode 100755 index e548a7de7..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/plot_manager.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from typing import List - -import genai_perf.logging as logging -from genai_perf.plots.box_plot import BoxPlot -from genai_perf.plots.heat_map import HeatMap -from genai_perf.plots.plot_config import PlotConfig, PlotType -from genai_perf.plots.scatter_plot import ScatterPlot - -logger = logging.getLogger(__name__) - - -class PlotManager: - """ - Manage details around plots generated - """ - - def __init__(self, plot_configs: List[PlotConfig]) -> None: - self._plot_configs = plot_configs - - def _generate_filename(self, title: str) -> str: - filename = "_".join(title.lower().split()) - return filename - - def generate_plots(self) -> None: - for plot_config in self._plot_configs: - logger.info(f"Generating '{plot_config.title}' plot") - if plot_config.type == PlotType.BOX: - bp = BoxPlot(plot_config.data) - bp.create_plot( - graph_title=plot_config.title, - x_label=plot_config.x_label, - width=plot_config.width, - height=plot_config.height, - filename_root=self._generate_filename(plot_config.title), - output_dir=plot_config.output, - ) - - elif plot_config.type == PlotType.HEATMAP: - hm = HeatMap(plot_config.data) - hm.create_plot( - graph_title=plot_config.title, - x_label=plot_config.x_label, - y_label=plot_config.y_label, - width=plot_config.width, - height=plot_config.height, - filename_root=self._generate_filename(plot_config.title), - output_dir=plot_config.output, - ) - - elif plot_config.type == PlotType.SCATTER: - sp = ScatterPlot(plot_config.data) - sp.create_plot( - graph_title=plot_config.title, - x_label=plot_config.x_label, - y_label=plot_config.y_label, - width=plot_config.width, - height=plot_config.height, - filename_root=self._generate_filename(plot_config.title), - output_dir=plot_config.output, - ) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/scatter_plot.py b/src/c++/perf_analyzer/genai-perf/genai_perf/plots/scatter_plot.py deleted file mode 100755 index 35dca8fc3..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/plots/scatter_plot.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path -from typing import List - -import plotly.graph_objects as go -from genai_perf.plots.base_plot import BasePlot -from genai_perf.plots.plot_config import ProfileRunData - - -class ScatterPlot(BasePlot): - """ - Generate a scatter plot in jpeg and html format. - """ - - def __init__(self, data: List[ProfileRunData]) -> None: - super().__init__(data) - - def create_plot( - self, - graph_title: str = "", - x_label: str = "", - y_label: str = "", - width: int = 700, - height: int = 450, - filename_root: str = "", - output_dir: Path = Path(""), - ) -> None: - fig = go.Figure() - for pd in self._profile_data: - fig.add_trace( - go.Scatter( - x=pd.x_metric, - y=pd.y_metric, - mode="markers", - name=pd.name, - ) - ) - - fig.update_layout( - title={ - "text": f"{graph_title}", - "xanchor": "center", - "x": 0.5, - }, - width=width, - height=height, - ) - fig.update_xaxes(title_text=f"{x_label}") - fig.update_yaxes(title_text=f"{y_label}") - - # Save dataframe as parquet file - df = self._create_dataframe(x_label, y_label) - self._generate_parquet(df, output_dir, filename_root) - - self._generate_graph_file(fig, output_dir, filename_root + ".html") - self._generate_graph_file(fig, output_dir, filename_root + ".jpeg") diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/__init__.py b/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/__init__.py deleted file mode 100644 index 2e7798c40..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from genai_perf.profile_data_parser.llm_profile_data_parser import LLMProfileDataParser -from genai_perf.profile_data_parser.profile_data_parser import ( - ProfileDataParser, - ResponseFormat, -) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py b/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py deleted file mode 100755 index 183f21fd2..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import json -from itertools import tee -from pathlib import Path -from typing import Dict, List, Tuple - -from genai_perf.metrics import LLMMetrics, Metrics -from genai_perf.profile_data_parser.profile_data_parser import ( - ProfileDataParser, - ResponseFormat, -) -from genai_perf.tokenizer import Tokenizer -from genai_perf.utils import load_json_str, remove_sse_prefix - - -class LLMProfileDataParser(ProfileDataParser): - """A class that calculates and aggregates all the LLM performance statistics - across the Perf Analyzer profile results. - - The LLMProfileDataParser class parses profile export JSON file, collects the - core LLM performance metrics, and calculates summary statistics for each - different Perf Analyzer runs/experiments. - - Example: - - >>> ... # run Perf Analyzer with concurrency level 10 - >>> - >>> from transformers import AutoTokenizer - >>> - >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") - >>> pd = LLMProfileDataParser( - >>> filename="profile_export.json", - >>> tokenizer=tokenizer, - >>> ) - >>> stats = pd.get_statistics(infer_mode="concurrency", level=10) - >>> - >>> print(stats) # output: Statistics(avg_time_to_first_token=...) - >>> stats.pretty_print() # Output: time_to_first_token_s: ... - """ - - def __init__( - self, - filename: Path, - tokenizer: Tokenizer, - ) -> None: - self._tokenizer = tokenizer - super().__init__(filename) - - def _parse_requests(self, requests: dict) -> Metrics: - """Parse each requests in profile export data to extract key metrics.""" - min_req_timestamp, max_res_timestamp = float("inf"), 0 - request_latencies = [] - time_to_first_tokens = [] - inter_token_latencies = [] - output_token_throughputs_per_request = [] - input_sequence_lengths = [] - output_sequence_lengths = [] - chunked_inter_token_latencies = [] - - for request in requests: - req_timestamp = request["timestamp"] - req_inputs = request["request_inputs"] - res_timestamps = request["response_timestamps"] - res_outputs = request["response_outputs"] - - self._preprocess_response(res_timestamps, res_outputs) - - # Skip requests with empty response. This happens sometimes when the - # model returns a single response with empty string. - if not res_timestamps: - continue - - # track entire benchmark duration - min_req_timestamp = min(min_req_timestamp, req_timestamp) - max_res_timestamp = max(max_res_timestamp, res_timestamps[-1]) - - # request latencies - req_latency_ns = res_timestamps[-1] - req_timestamp - request_latencies.append(req_latency_ns) # nanosec - req_latency_s = req_latency_ns / 1e9 # sec - - # time to first token - ttft = res_timestamps[0] - req_timestamp - time_to_first_tokens.append(ttft) - - # number of input tokens - input_seq_len = self._get_input_token_count(req_inputs) - input_sequence_lengths.append(input_seq_len) - - # output token throughput per request - output_token_counts, total_output_token = self._get_output_token_counts( - res_outputs - ) - output_token_throughputs_per_request.append( - total_output_token / req_latency_s - ) - output_sequence_lengths.append(total_output_token) - - # inter token latencies - if total_output_token > 1: - inter_token_latency = (req_latency_ns - ttft) / (total_output_token - 1) - inter_token_latencies.append(round(inter_token_latency)) - - # The new ITL calculation above loses all token-level ITL information - # and as a result breaks ITL vs token position visualization. Keep - # the old version of inter token latency as a WAR to preserve the - # visualization. - chunked_inter_token_latency = [] - for (t1, _), (t2, n2) in self._pairwise( - zip(res_timestamps, output_token_counts) - ): - # TMA-1676: handle empty first/last responses - # if the latter response has zero token (e.g. empty string), - # then set it default to one for the sake of inter token latency - # calculation and to avoid divide by zero. - num_token = 1 if n2 == 0 else n2 - chunked_inter_token_latency.append(round((t2 - t1) / num_token)) - chunked_inter_token_latencies.append(chunked_inter_token_latency) - - # request & output token throughput - benchmark_duration = (max_res_timestamp - min_req_timestamp) / 1e9 # nanosec - request_throughputs = [len(requests) / benchmark_duration] - output_token_throughputs = [sum(output_sequence_lengths) / benchmark_duration] - - return LLMMetrics( - request_throughputs, - request_latencies, - time_to_first_tokens, - inter_token_latencies, - output_token_throughputs, - output_token_throughputs_per_request, - output_sequence_lengths, - input_sequence_lengths, - chunked_inter_token_latencies, - ) - - def _pairwise(self, iterable): - """Generate pairs of consecutive elements from the given iterable.""" - a, b = tee(iterable) - next(b, None) - return zip(a, b) - - def _preprocess_response( - self, res_timestamps: List[int], res_outputs: List[Dict[str, str]] - ) -> None: - """Helper function to preprocess responses of a request.""" - if self._service_kind == "openai": - # PA sometimes receives multiple SSE responses at once (as a single - # response). Handle these responses by merging into a single response. - for i in range(len(res_outputs)): - response = res_outputs[i]["response"] - responses = response.strip().split("\n\n") - if len(responses) > 1: - merged_response = load_json_str(remove_sse_prefix(responses[0])) - if ( - merged_response["choices"][0]["delta"].get("content", None) - is None - ): - merged_response["choices"][0]["delta"]["content"] = "" - for r in responses[1:]: - text = self._extract_openai_text_output(r) - merged_response["choices"][0]["delta"]["content"] += text - - res_outputs[i] = {"response": json.dumps(merged_response)} - - # Remove responses without any content - indices_to_remove = [] - for idx, out in enumerate(res_outputs): - if self._is_openai_empty_response(out["response"]): - indices_to_remove.append(idx) - indices_to_remove.sort(reverse=True) - for index in indices_to_remove: - res_timestamps.pop(index) - res_outputs.pop(index) - - def _get_input_token_count(self, req_inputs: dict) -> int: - """Deserialize the request input and return tokenized inputs.""" - if self._service_kind == "triton": - input_text = req_inputs["text_input"] - elif self._service_kind == "openai": - input_text = self._get_openai_input_text(req_inputs) - else: - raise ValueError(f"Unknown service kind: '{self._service_kind}'.") - - return len(self._tokenizer.encode(input_text)) - - def _get_openai_input_text(self, req_inputs: dict) -> str: - """Tokenize the OpenAI request input texts.""" - payload = load_json_str(req_inputs["payload"]) - if self._response_format == ResponseFormat.OPENAI_CHAT_COMPLETIONS: - return payload["messages"][0]["content"] - elif self._response_format == ResponseFormat.OPENAI_COMPLETIONS: - return payload["prompt"] - elif self._response_format == ResponseFormat.OPENAI_VISION: - content = payload["messages"][0]["content"] - return " ".join(c["text"] for c in content if c["type"] == "text") - else: - raise ValueError( - "Failed to parse OpenAI request input in profile export file." - ) - - def _get_output_token_counts( - self, res_outputs: List[Dict] - ) -> Tuple[List[int], int]: - """Return response-level token counts and total token count.""" - if self._service_kind == "triton": - output_texts = self._get_triton_output_tokens(res_outputs) - elif self._service_kind == "openai": - output_texts = self._get_openai_output_tokens(res_outputs) - else: - raise ValueError(f"Unknown service kind: '{self._service_kind}'.") - - full_text_token_count = len(self._tokenizer.encode("".join(output_texts))) - - output_tokens = self._get_response_output_tokens(output_texts) - output_token_counts = list(map(len, output_tokens)) - return output_token_counts, full_text_token_count - - def _get_triton_output_tokens(self, res_outputs: List[Dict]) -> List[str]: - """Return a list of Triton response texts.""" - return [r["text_output"] for r in res_outputs] - - def _get_openai_output_tokens(self, res_outputs: List[Dict]) -> List[str]: - """Return a list of OpenAI response texts.""" - output_texts = [] - for output in res_outputs: - text = self._extract_openai_text_output(output["response"]) - output_texts.append(text) - return output_texts - - def _get_response_output_tokens(self, output_texts: List[str]) -> List[List[int]]: - """Return a list of response output tokens.""" - # Exclamation mark trick forces the llama tokenization to consistently - # start each output with a specific token which allows us to safely skip - # the first token of every tokenized output and get only the ones that - # are returned by the model - encodings = self._tokenizer(["!" + txt for txt in output_texts]) - return [out[1:] for out in encodings.data["input_ids"]] - - def _extract_openai_text_output(self, response: str) -> str: - """Extracts text/content of the OpenAI response object.""" - response = remove_sse_prefix(response) - - if response == "[DONE]": - return "" - - data = load_json_str(response) - completions = data["choices"][0] - - text_output = "" - if "object" not in data: - # FIXME: TPA-47 workaround for vLLM not following OpenAI Completions - # API specification when streaming, missing 'object' field: - # https://platform.openai.com/docs/api-reference/completions - text_output = completions.get("text", "") - elif data["object"] == "text_completion": # legacy - text_output = completions.get("text", "") - elif data["object"] == "chat.completion": # non-streaming - text_output = completions["message"].get("content", "") - elif data["object"] == "chat.completion.chunk": # streaming - text_output = completions["delta"].get("content", "") - else: - obj_type = data["object"] - raise ValueError(f"Unknown OpenAI response object type '{obj_type}'.") - return text_output - - def _is_openai_empty_response(self, response: str) -> bool: - """Returns true if the response is an openai response with no content (or empty content)""" - text = self._extract_openai_text_output(response) - if text: - return False - return True diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/profile_data_parser.py b/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/profile_data_parser.py deleted file mode 100755 index 74eb48a23..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/profile_data_parser/profile_data_parser.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from enum import Enum, auto -from pathlib import Path -from typing import List, Tuple - -from genai_perf.metrics import Metrics, Statistics -from genai_perf.utils import load_json - - -class ResponseFormat(Enum): - HUGGINGFACE_RANKINGS = auto() - OPENAI_CHAT_COMPLETIONS = auto() - OPENAI_COMPLETIONS = auto() - OPENAI_EMBEDDINGS = auto() - OPENAI_VISION = auto() - RANKINGS = auto() - TRITON = auto() - - -class ProfileDataParser: - """Base profile data parser class that reads the profile data JSON file to - extract core metrics and calculate various performance statistics. - """ - - def __init__(self, filename: Path) -> None: - data = load_json(filename) - self._get_profile_metadata(data) - self._parse_profile_data(data) - - def _get_profile_metadata(self, data: dict) -> None: - self._service_kind = data["service_kind"] - if self._service_kind == "openai": - if data["endpoint"] == "rerank": - self._response_format = ResponseFormat.HUGGINGFACE_RANKINGS - elif data["endpoint"] == "v1/chat/completions": - # (TPA-66) add PA metadata to deduce the response format instead - # of parsing the request input payload in profile export json - # file. - request = data["experiments"][0]["requests"][0] - request_input = request["request_inputs"]["payload"] - if "image_url" in request_input: - self._response_format = ResponseFormat.OPENAI_VISION - else: - self._response_format = ResponseFormat.OPENAI_CHAT_COMPLETIONS - elif data["endpoint"] == "v1/completions": - self._response_format = ResponseFormat.OPENAI_COMPLETIONS - elif data["endpoint"] == "v1/embeddings": - self._response_format = ResponseFormat.OPENAI_EMBEDDINGS - elif data["endpoint"] == "v1/ranking": - self._response_format = ResponseFormat.RANKINGS - else: - # (TPA-66) add PA metadata to handle this case - # When endpoint field is either empty or custom endpoint, fall - # back to parsing the response to extract the response format. - request = data["experiments"][0]["requests"][0] - request_input = request["request_inputs"]["payload"] - response = request["response_outputs"][0]["response"] - if "chat.completion" in response: - if "image_url" in request_input: - self._response_format = ResponseFormat.OPENAI_VISION - else: - self._response_format = ResponseFormat.OPENAI_CHAT_COMPLETIONS - elif "text_completion" in response: - self._response_format = ResponseFormat.OPENAI_COMPLETIONS - elif "embedding" in response: - self._response_format = ResponseFormat.OPENAI_EMBEDDINGS - elif "ranking" in response: - self._response_format = ResponseFormat.RANKINGS - else: - raise RuntimeError("Unknown OpenAI response format.") - - elif self._service_kind == "triton": - self._response_format = ResponseFormat.TRITON - else: - raise ValueError(f"Unknown service kind: {self._service_kind}") - - def _parse_profile_data(self, data: dict) -> None: - """Parse through the entire profile data to collect statistics.""" - self._profile_results = {} - for experiment in data["experiments"]: - infer_mode = experiment["experiment"]["mode"] - load_level = experiment["experiment"]["value"] - requests = experiment["requests"] - - metrics = self._parse_requests(requests) - - # aggregate and calculate statistics - statistics = Statistics(metrics) - self._profile_results[(infer_mode, str(load_level))] = statistics - - def _parse_requests(self, requests: dict) -> Metrics: - """Parse each request in profile data to extract core metrics.""" - min_req_timestamp, max_res_timestamp = float("inf"), 0 - request_latencies = [] - - for request in requests: - req_timestamp = request["timestamp"] - res_timestamps = request["response_timestamps"] - - # track entire benchmark duration - min_req_timestamp = min(min_req_timestamp, req_timestamp) - max_res_timestamp = max(max_res_timestamp, res_timestamps[-1]) - - # request latencies - req_latency = res_timestamps[-1] - req_timestamp - request_latencies.append(req_latency) - - # request throughput - benchmark_duration = (max_res_timestamp - min_req_timestamp) / 1e9 # to seconds - request_throughputs = [len(requests) / benchmark_duration] - - return Metrics( - request_throughputs, - request_latencies, - ) - - def get_statistics(self, infer_mode: str, load_level: str) -> Statistics: - """Return profile statistics if it exists.""" - if (infer_mode, load_level) not in self._profile_results: - raise KeyError(f"Profile with {infer_mode}={load_level} does not exist.") - return self._profile_results[(infer_mode, load_level)] - - def get_profile_load_info(self) -> List[Tuple[str, str]]: - """Return available (infer_mode, load_level) tuple keys.""" - return [k for k, _ in self._profile_results.items()] diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/tokenizer.py b/src/c++/perf_analyzer/genai-perf/genai_perf/tokenizer.py deleted file mode 100644 index 052a478e5..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/tokenizer.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib -import io -from typing import List - -from genai_perf.exceptions import GenAIPerfException - -# Silence tokenizer warning on import -with contextlib.redirect_stdout(io.StringIO()) as stdout, contextlib.redirect_stderr( - io.StringIO() -) as stderr: - from transformers import AutoTokenizer, BatchEncoding - from transformers import logging as token_logger - - token_logger.set_verbosity_error() - -DEFAULT_TOKENIZER = "hf-internal-testing/llama-tokenizer" - - -class Tokenizer: - """ - A small wrapper class around Huggingface Tokenizer - """ - - def __init__(self, name: str) -> None: - """ - Initialize by downloading the tokenizer from Huggingface.co - """ - try: - # Silence tokenizer warning on first use - with contextlib.redirect_stdout( - io.StringIO() - ) as stdout, contextlib.redirect_stderr(io.StringIO()) as stderr: - tokenizer = AutoTokenizer.from_pretrained(name) - except Exception as e: - raise GenAIPerfException(e) - - self._tokenizer = tokenizer - - # default tokenizer parameters for __call__, encode, decode methods - self._call_args = {"add_special_tokens": False} - self._encode_args = {"add_special_tokens": False} - self._decode_args = {"skip_special_tokens": True} - - def __call__(self, text, **kwargs) -> BatchEncoding: - self._call_args.update(kwargs) - return self._tokenizer(text, **self._call_args) - - def encode(self, text, **kwargs) -> List[int]: - self._encode_args.update(kwargs) - return self._tokenizer.encode(text, **self._encode_args) - - def decode(self, token_ids, **kwargs) -> str: - self._decode_args.update(kwargs) - return self._tokenizer.decode(token_ids, **self._decode_args) - - def __repr__(self) -> str: - return self._tokenizer.__repr__() - - -def get_tokenizer(tokenizer_model: str) -> Tokenizer: - """ - Return tokenizer for the given model name - """ - return Tokenizer(tokenizer_model) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/utils.py b/src/c++/perf_analyzer/genai-perf/genai_perf/utils.py deleted file mode 100644 index 4b625352a..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/utils.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import json -from enum import Enum -from pathlib import Path -from typing import Any, Dict, List, Optional, Type - -import genai_perf.logging as logging - -# Skip type checking to avoid mypy error -# Issue: https://github.com/python/mypy/issues/10632 -import yaml # type: ignore -from PIL import Image - -logger = logging.getLogger(__name__) - - -def encode_image(img: Image, format: str): - """Encodes an image into base64 encoding.""" - # Lazy import for vision related endpoints - import base64 - from io import BytesIO - - # JPEG does not support P or RGBA mode (commonly used for PNG) so it needs - # to be converted to RGB before an image can be saved as JPEG format. - if format == "JPEG" and img.mode != "RGB": - img = img.convert("RGB") - - buffered = BytesIO() - img.save(buffered, format=format) - return base64.b64encode(buffered.getvalue()).decode("utf-8") - - -def remove_sse_prefix(msg: str) -> str: - prefix = "data: " - if msg.startswith(prefix): - return msg[len(prefix) :].strip() - return msg.strip() - - -def load_yaml(filepath: Path) -> Dict[str, Any]: - with open(str(filepath)) as f: - configs = yaml.safe_load(f) - return configs - - -def load_json(filepath: Path) -> Dict[str, Any]: - with open(str(filepath), encoding="utf-8", errors="ignore") as f: - content = f.read() - return load_json_str(content) - - -def load_json_str(json_str: str) -> Dict[str, Any]: - try: - return json.loads(json_str) - except json.JSONDecodeError: - snippet = json_str[:200] + ("..." if len(json_str) > 200 else "") - logger.error("Failed to parse JSON string: '%s'", snippet) - raise - - -def remove_file(file: Path) -> None: - if file.is_file(): - file.unlink() - - -def convert_option_name(name: str) -> str: - return name.replace("_", "-") - - -def get_enum_names(enum: Type[Enum]) -> List: - names = [] - for e in enum: - names.append(e.name.lower()) - return names - - -def get_enum_entry(name: str, enum: Type[Enum]) -> Optional[Enum]: - for e in enum: - if e.name.lower() == name.lower(): - return e - return None - - -def scale(value, factor): - return value * factor diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/wrapper.py b/src/c++/perf_analyzer/genai-perf/genai_perf/wrapper.py deleted file mode 100644 index 76ef3e321..000000000 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/wrapper.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import subprocess -from argparse import Namespace -from typing import List, Optional - -import genai_perf.logging as logging -import genai_perf.utils as utils -from genai_perf.constants import DEFAULT_GRPC_URL, DEFAULT_INPUT_DATA_JSON -from genai_perf.llm_inputs.llm_inputs import OutputFormat - -logger = logging.getLogger(__name__) - - -class Profiler: - @staticmethod - def add_protocol_args(args: Namespace) -> List[str]: - cmd = [] - if args.service_kind == "triton": - cmd += ["-i", "grpc", "--streaming"] - if args.u is None: # url - cmd += ["-u", f"{DEFAULT_GRPC_URL}"] - if args.output_format == OutputFormat.TENSORRTLLM: - cmd += ["--shape", "max_tokens:1", "--shape", "text_input:1"] - elif args.service_kind == "openai": - cmd += ["-i", "http"] - return cmd - - @staticmethod - def add_inference_load_args(args: Namespace) -> List[str]: - cmd = [] - if args.concurrency: - cmd += ["--concurrency-range", f"{args.concurrency}"] - elif args.request_rate: - cmd += ["--request-rate-range", f"{args.request_rate}"] - return cmd - - @staticmethod - def build_cmd(args: Namespace, extra_args: Optional[List[str]] = None) -> List[str]: - skip_args = [ - "artifact_dir", - "backend", - "batch_size", - "concurrency", - "endpoint_type", - "extra_inputs", - "formatted_model_name", - "func", - "generate_plots", - "input_dataset", - "input_file", - "input_format", - "model", - "model_selection_strategy", - "num_prompts", - "output_format", - "output_tokens_mean_deterministic", - "output_tokens_mean", - "output_tokens_stddev", - "prompt_source", - "random_seed", - "request_rate", - # The 'streaming' passed in to this script is to determine if the - # LLM response should be streaming. That is different than the - # 'streaming' that PA takes, which means something else (and is - # required for decoupled models into triton). - "streaming", - "synthetic_input_tokens_mean", - "synthetic_input_tokens_stddev", - "subcommand", - "tokenizer", - "image_width_mean", - "image_width_stddev", - "image_height_mean", - "image_height_stddev", - "image_format", - ] - - utils.remove_file(args.profile_export_file) - - cmd = [ - f"perf_analyzer", - f"-m", - f"{args.formatted_model_name}", - f"--async", - f"--input-data", - f"{args.artifact_dir / DEFAULT_INPUT_DATA_JSON}", - ] - for arg, value in vars(args).items(): - if arg in skip_args: - pass - elif value is None: - pass - elif value is False: - pass - elif value is True: - if len(arg) == 1: - cmd += [f"-{arg}"] - else: - cmd += [f"--{arg}"] - else: - if len(arg) == 1: - cmd += [f"-{arg}", f"{value}"] - else: - arg = utils.convert_option_name(arg) - cmd += [f"--{arg}", f"{value}"] - - cmd += Profiler.add_protocol_args(args) - cmd += Profiler.add_inference_load_args(args) - - if extra_args is not None: - for arg in extra_args: - cmd += [f"{arg}"] - return cmd - - @staticmethod - def run(args: Namespace, extra_args: Optional[List[str]]) -> None: - cmd = Profiler.build_cmd(args, extra_args) - logger.info(f"Running Perf Analyzer : '{' '.join(cmd)}'") - if args and args.verbose: - subprocess.run(cmd, check=True, stdout=None) - else: - subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL) diff --git a/src/c++/perf_analyzer/genai-perf/pyproject.toml b/src/c++/perf_analyzer/genai-perf/pyproject.toml deleted file mode 100644 index f1f78a7e2..000000000 --- a/src/c++/perf_analyzer/genai-perf/pyproject.toml +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -[project] -name = "genai-perf" -readme = "README.md" -description = "GenAI Perf Analyzer CLI - CLI tool to simplify profiling LLMs and Generative AI models with Perf Analyzer" -dynamic = ["version"] -classifiers = [ - "Development Status :: 3 - Alpha", - "Intended Audience :: Science/Research", - "Intended Audience :: Developers", - "Topic :: Software Development", - "Topic :: Scientific/Engineering", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.10", - "Operating System :: Unix", -] -authors = [] -maintainers = [] -keywords = [] -requires-python = ">=3.8,<4" -dependencies = [ - "numpy<2", - "pytest", - "rich", - "transformers", - "plotly", - "pandas", - "kaleido", - "statsmodels", - "pyarrow", - "fastparquet", - "pytest-mock", - "pyyaml", - "responses", - "pillow", -] - -# CLI Entrypoint -[project.scripts] -genai-perf = "genai_perf.main:main" - -[project.urls] -"Homepage" = "https://github.com/triton-inference-server/client" -"Bug Tracker" = "https://github.com/triton-inference-server/client/issues" - -# Build -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[tool.hatch.version] -path = "genai_perf/__init__.py" - -# Pre-commit hook tool configs -[tool.codespell] -# note: pre-commit passes explicit lists of files here, which this skip file list doesn't override - -# this is only to allow you to run codespell interactively -skip = "./.git,./.github" -# ignore short words, and typename parameters like OffsetT -ignore-regex = "\\b(.{1,4}|[A-Z]\\w*T)\\b" -# use the 'clear' dictionary for unambiguous spelling mistakes -builtin = "clear" -# disable warnings about binary files and wrong encoding -quiet-level = 3 - -# Linting/formatting -[tool.ruff] -# Same as Black. -line-length = 88 -indent-width = 4 diff --git a/src/c++/perf_analyzer/genai-perf/tests/__init__.py b/src/c++/perf_analyzer/genai-perf/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_artifacts.py b/src/c++/perf_analyzer/genai-perf/tests/test_artifacts.py deleted file mode 100644 index cdcc4afc9..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_artifacts.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from argparse import Namespace -from pathlib import Path - -import pytest -from genai_perf.main import create_artifacts_dirs - - -@pytest.fixture -def mock_makedirs(mocker): - return mocker.patch("os.makedirs") - - -def test_create_artifacts_dirs_custom_path(mock_makedirs): - artifacts_dir_path = "/genai_perf_artifacts" - mock_args = Namespace(artifact_dir=Path(artifacts_dir_path), generate_plots=True) - create_artifacts_dirs(mock_args) - mock_makedirs.assert_any_call( - Path(artifacts_dir_path), exist_ok=True - ), f"Expected os.makedirs to create artifacts directory inside {artifacts_dir_path} path." - mock_makedirs.assert_any_call( - Path(artifacts_dir_path) / "plots", exist_ok=True - ), f"Expected os.makedirs to create plots directory inside {artifacts_dir_path}/plots path." - assert mock_makedirs.call_count == 2 - - -def test_create_artifacts_disable_generate_plots(mock_makedirs): - artifacts_dir_path = "/genai_perf_artifacts" - mock_args = Namespace(artifact_dir=Path(artifacts_dir_path)) - create_artifacts_dirs(mock_args) - mock_makedirs.assert_any_call( - Path(artifacts_dir_path), exist_ok=True - ), f"Expected os.makedirs to create artifacts directory inside {artifacts_dir_path} path." - assert mock_makedirs.call_count == 1 diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_cli.py b/src/c++/perf_analyzer/genai-perf/tests/test_cli.py deleted file mode 100644 index 2ef5d52ba..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_cli.py +++ /dev/null @@ -1,855 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import argparse -from pathlib import Path - -import genai_perf.logging as logging -import pytest -from genai_perf import __version__, parser -from genai_perf.llm_inputs.llm_inputs import ( - ImageFormat, - ModelSelectionStrategy, - OutputFormat, - PromptSource, -) -from genai_perf.llm_inputs.synthetic_image_generator import ImageFormat -from genai_perf.parser import PathType - - -class TestCLIArguments: - # ================================================ - # PROFILE COMMAND - # ================================================ - expected_help_output = ( - "CLI to profile LLMs and Generative AI models with Perf Analyzer" - ) - expected_version_output = f"genai-perf {__version__}" - - @pytest.mark.parametrize( - "args, expected_output", - [ - (["-h"], expected_help_output), - (["--help"], expected_help_output), - (["--version"], expected_version_output), - ], - ) - def test_help_version_arguments_output_and_exit( - self, monkeypatch, args, expected_output, capsys - ): - monkeypatch.setattr("sys.argv", ["genai-perf"] + args) - - with pytest.raises(SystemExit) as excinfo: - _ = parser.parse_args() - - # Check that the exit was successful - assert excinfo.value.code == 0 - - # Capture that the correct message was displayed - captured = capsys.readouterr() - assert expected_output in captured.out - - @pytest.mark.parametrize( - "arg, expected_attributes", - [ - ( - ["--artifact-dir", "test_artifact_dir"], - {"artifact_dir": Path("test_artifact_dir")}, - ), - ( - [ - "--batch-size", - "5", - "--endpoint-type", - "embeddings", - "--service-kind", - "openai", - ], - {"batch_size": 5}, - ), - ( - [ - "-b", - "5", - "--endpoint-type", - "embeddings", - "--service-kind", - "openai", - ], - {"batch_size": 5}, - ), - (["--concurrency", "3"], {"concurrency": 3}), - ( - ["--endpoint-type", "completions", "--service-kind", "openai"], - {"endpoint": "v1/completions"}, - ), - ( - ["--endpoint-type", "chat", "--service-kind", "openai"], - {"endpoint": "v1/chat/completions"}, - ), - ( - ["--endpoint-type", "rankings", "--service-kind", "openai"], - {"endpoint": "v1/ranking"}, - ), - ( - [ - "--endpoint-type", - "chat", - "--service-kind", - "openai", - "--endpoint", - "custom/address", - ], - {"endpoint": "custom/address"}, - ), - ( - [ - "--endpoint-type", - "chat", - "--service-kind", - "openai", - "--endpoint", - " /custom/address", - ], - {"endpoint": "custom/address"}, - ), - ( - [ - "--endpoint-type", - "completions", - "--service-kind", - "openai", - "--endpoint", - "custom/address", - ], - {"endpoint": "custom/address"}, - ), - ( - ["--extra-inputs", "test_key:test_value"], - {"extra_inputs": ["test_key:test_value"]}, - ), - ( - [ - "--extra-inputs", - "test_key:5", - "--extra-inputs", - "another_test_key:6", - ], - {"extra_inputs": ["test_key:5", "another_test_key:6"]}, - ), - ( - [ - "--extra-inputs", - '{"name": "Wolverine","hobbies": ["hacking", "slashing"],"address": {"street": "1407 Graymalkin Lane, Salem Center","city": "NY"}}', - ], - { - "extra_inputs": [ - '{"name": "Wolverine","hobbies": ["hacking", "slashing"],"address": {"street": "1407 Graymalkin Lane, Salem Center","city": "NY"}}' - ] - }, - ), - (["--input-dataset", "openorca"], {"input_dataset": "openorca"}), - (["--measurement-interval", "100"], {"measurement_interval": 100}), - ( - ["--model-selection-strategy", "random"], - {"model_selection_strategy": ModelSelectionStrategy.RANDOM}, - ), - (["--num-prompts", "101"], {"num_prompts": 101}), - ( - ["--output-tokens-mean", "6"], - {"output_tokens_mean": 6}, - ), - ( - ["--output-tokens-mean", "6", "--output-tokens-stddev", "7"], - {"output_tokens_stddev": 7}, - ), - ( - ["--output-tokens-mean", "6", "--output-tokens-mean-deterministic"], - {"output_tokens_mean_deterministic": True}, - ), - (["-p", "100"], {"measurement_interval": 100}), - ( - ["--profile-export-file", "test.json"], - { - "profile_export_file": Path( - "artifacts/test_model-triton-tensorrtllm-concurrency1/test.json" - ) - }, - ), - (["--random-seed", "8"], {"random_seed": 8}), - (["--request-rate", "9.0"], {"request_rate": 9.0}), - (["-s", "99.5"], {"stability_percentage": 99.5}), - (["--service-kind", "triton"], {"service_kind": "triton"}), - ( - ["--service-kind", "openai", "--endpoint-type", "chat"], - {"service_kind": "openai", "endpoint": "v1/chat/completions"}, - ), - (["--stability-percentage", "99.5"], {"stability_percentage": 99.5}), - (["--streaming"], {"streaming": True}), - ( - ["--synthetic-input-tokens-mean", "6"], - {"synthetic_input_tokens_mean": 6}, - ), - ( - ["--synthetic-input-tokens-stddev", "7"], - {"synthetic_input_tokens_stddev": 7}, - ), - ( - ["--image-width-mean", "123"], - {"image_width_mean": 123}, - ), - ( - ["--image-width-stddev", "123"], - {"image_width_stddev": 123}, - ), - ( - ["--image-height-mean", "456"], - {"image_height_mean": 456}, - ), - ( - ["--image-height-stddev", "456"], - {"image_height_stddev": 456}, - ), - (["--image-format", "png"], {"image_format": ImageFormat.PNG}), - (["-v"], {"verbose": True}), - (["--verbose"], {"verbose": True}), - (["-u", "test_url"], {"u": "test_url"}), - (["--url", "test_url"], {"u": "test_url"}), - ], - ) - def test_non_file_flags_parsed(self, monkeypatch, arg, expected_attributes, capsys): - logging.init_logging() - combined_args = ["genai-perf", "profile", "--model", "test_model"] + arg - monkeypatch.setattr("sys.argv", combined_args) - args, _ = parser.parse_args() - - # Check that the attributes are set correctly - for key, value in expected_attributes.items(): - assert getattr(args, key) == value - - # Check that nothing was printed as a byproduct of parsing the arguments - captured = capsys.readouterr() - assert captured.out == "" - - @pytest.mark.parametrize( - "models, expected_model_list, formatted_name", - [ - ( - ["--model", "test_model_A"], - {"model": ["test_model_A"]}, - {"formatted_model_name": "test_model_A"}, - ), - ( - ["--model", "test_model_A", "test_model_B"], - {"model": ["test_model_A", "test_model_B"]}, - {"formatted_model_name": "test_model_A_multi"}, - ), - ( - ["--model", "test_model_A", "test_model_B", "test_model_C"], - {"model": ["test_model_A", "test_model_B", "test_model_C"]}, - {"formatted_model_name": "test_model_A_multi"}, - ), - ( - ["--model", "test_model_A:math", "test_model_B:embedding"], - {"model": ["test_model_A:math", "test_model_B:embedding"]}, - {"formatted_model_name": "test_model_A:math_multi"}, - ), - ], - ) - def test_multiple_model_args( - self, monkeypatch, models, expected_model_list, formatted_name, capsys - ): - logging.init_logging() - combined_args = ["genai-perf", "profile"] + models - monkeypatch.setattr("sys.argv", combined_args) - args, _ = parser.parse_args() - - # Check that models are handled correctly - for key, value in expected_model_list.items(): - assert getattr(args, key) == value - - # Check that the formatted_model_name is correctly generated - for key, value in formatted_name.items(): - assert getattr(args, key) == value - - # Check that nothing was printed as a byproduct of parsing the arguments - captured = capsys.readouterr() - assert captured.out == "" - - def test_file_flags_parsed(self, monkeypatch, mocker): - _ = mocker.patch("os.path.isfile", return_value=True) - combined_args = [ - "genai-perf", - "profile", - "--model", - "test_model", - "--input-file", - "fakefile.txt", - ] - monkeypatch.setattr("sys.argv", combined_args) - args, _ = parser.parse_args() - filepath, pathtype = args.input_file - assert filepath == Path( - "fakefile.txt" - ), "The file argument should be the path to the file" - assert pathtype == PathType.FILE - - @pytest.mark.parametrize( - "arg, expected_path", - [ - ( - ["--service-kind", "openai", "--endpoint-type", "chat"], - "artifacts/test_model-openai-chat-concurrency1", - ), - ( - ["--service-kind", "openai", "--endpoint-type", "completions"], - "artifacts/test_model-openai-completions-concurrency1", - ), - ( - ["--service-kind", "openai", "--endpoint-type", "rankings"], - "artifacts/test_model-openai-rankings-concurrency1", - ), - ( - ["--service-kind", "triton", "--backend", "tensorrtllm"], - "artifacts/test_model-triton-tensorrtllm-concurrency1", - ), - ( - ["--service-kind", "triton", "--backend", "vllm"], - "artifacts/test_model-triton-vllm-concurrency1", - ), - ( - [ - "--service-kind", - "triton", - "--backend", - "vllm", - "--concurrency", - "32", - ], - "artifacts/test_model-triton-vllm-concurrency32", - ), - ], - ) - def test_default_profile_export_filepath( - self, monkeypatch, arg, expected_path, capsys - ): - logging.init_logging() - combined_args = ["genai-perf", "profile", "--model", "test_model"] + arg - monkeypatch.setattr("sys.argv", combined_args) - args, _ = parser.parse_args() - - assert args.artifact_dir == Path(expected_path) - captured = capsys.readouterr() - assert captured.out == "" - - @pytest.mark.parametrize( - "arg, expected_path, expected_output", - [ - ( - ["--model", "strange/test_model"], - "artifacts/strange_test_model-triton-tensorrtllm-concurrency1", - ( - "Model name 'strange/test_model' cannot be used to create " - "artifact directory. Instead, 'strange_test_model' will be used" - ), - ), - ( - [ - "--model", - "hello/world/test_model", - "--service-kind", - "openai", - "--endpoint-type", - "chat", - ], - "artifacts/hello_world_test_model-openai-chat-concurrency1", - ( - "Model name 'hello/world/test_model' cannot be used to create " - "artifact directory. Instead, 'hello_world_test_model' will be used" - ), - ), - ], - ) - def test_model_name_artifact_path( - self, monkeypatch, arg, expected_path, expected_output, capsys - ): - logging.init_logging() - combined_args = ["genai-perf", "profile"] + arg - monkeypatch.setattr("sys.argv", combined_args) - args, _ = parser.parse_args() - - assert args.artifact_dir == Path(expected_path) - captured = capsys.readouterr() - assert expected_output in captured.out - - def test_default_load_level(self, monkeypatch, capsys): - logging.init_logging() - monkeypatch.setattr( - "sys.argv", ["genai-perf", "profile", "--model", "test_model"] - ) - args, _ = parser.parse_args() - assert args.concurrency == 1 - captured = capsys.readouterr() - assert captured.out == "" - - def test_load_level_mutually_exclusive(self, monkeypatch, capsys): - monkeypatch.setattr( - "sys.argv", - ["genai-perf", "profile", "--concurrency", "3", "--request-rate", "9.0"], - ) - expected_output = ( - "argument --request-rate: not allowed with argument --concurrency" - ) - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - assert excinfo.value.code != 0 - captured = capsys.readouterr() - assert expected_output in captured.err - - def test_model_not_provided(self, monkeypatch, capsys): - monkeypatch.setattr("sys.argv", ["genai-perf", "profile"]) - expected_output = "The -m/--model option is required and cannot be empty." - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - assert excinfo.value.code != 0 - captured = capsys.readouterr() - assert expected_output in captured.err - - def test_pass_through_args(self, monkeypatch): - args = ["genai-perf", "profile", "-m", "test_model"] - other_args = ["--", "With", "great", "power"] - monkeypatch.setattr("sys.argv", args + other_args) - _, pass_through_args = parser.parse_args() - - assert pass_through_args == other_args[1:] - - def test_unrecognized_arg(self, monkeypatch, capsys): - monkeypatch.setattr( - "sys.argv", - [ - "genai-perf", - "profile", - "-m", - "nonexistent_model", - "--wrong-arg", - ], - ) - expected_output = "unrecognized arguments: --wrong-arg" - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - assert excinfo.value.code != 0 - captured = capsys.readouterr() - assert expected_output in captured.err - - @pytest.mark.parametrize( - "args, expected_output", - [ - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - ], - "The --endpoint-type option is required when using the 'openai' service-kind.", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - "--endpoint", - "custom/address", - ], - "The --endpoint-type option is required when using the 'openai' service-kind.", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--output-tokens-stddev", - "5", - ], - "The --output-tokens-mean option is required when using --output-tokens-stddev.", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--output-tokens-mean-deterministic", - ], - "The --output-tokens-mean option is required when using --output-tokens-mean-deterministic.", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--output-tokens-mean-deterministic", - ], - "The --output-tokens-mean option is required when using --output-tokens-mean-deterministic.", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - "--endpoint-type", - "chat", - "--output-tokens-mean", - "100", - "--output-tokens-mean-deterministic", - ], - "The --output-tokens-mean-deterministic option is only supported with the Triton service-kind", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--batch-size", - "10", - ], - "The --batch-size option is currently only supported with the embeddings and rankings endpoint types", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - "--endpoint-type", - "embeddings", - "--streaming", - ], - "The --streaming option is not supported with the embeddings endpoint type", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - "--endpoint-type", - "rankings", - "--streaming", - ], - "The --streaming option is not supported with the rankings endpoint type", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - "--endpoint-type", - "embeddings", - "--generate-plots", - ], - "The --generate-plots option is not currently supported with the embeddings endpoint type", - ), - ( - [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - "--endpoint-type", - "rankings", - "--generate-plots", - ], - "The --generate-plots option is not currently supported with the rankings endpoint type", - ), - ], - ) - def test_conditional_errors(self, args, expected_output, monkeypatch, capsys): - monkeypatch.setattr("sys.argv", args) - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - assert excinfo.value.code != 0 - captured = capsys.readouterr() - assert expected_output in captured.err - - @pytest.mark.parametrize( - "args, expected_format", - [ - ( - ["--service-kind", "openai", "--endpoint-type", "chat"], - OutputFormat.OPENAI_CHAT_COMPLETIONS, - ), - ( - ["--service-kind", "openai", "--endpoint-type", "completions"], - OutputFormat.OPENAI_COMPLETIONS, - ), - ( - [ - "--service-kind", - "openai", - "--endpoint-type", - "completions", - "--endpoint", - "custom/address", - ], - OutputFormat.OPENAI_COMPLETIONS, - ), - ( - ["--service-kind", "openai", "--endpoint-type", "rankings"], - OutputFormat.RANKINGS, - ), - ( - ["--service-kind", "triton", "--backend", "tensorrtllm"], - OutputFormat.TENSORRTLLM, - ), - (["--service-kind", "triton", "--backend", "vllm"], OutputFormat.VLLM), - ], - ) - def test_inferred_output_format(self, monkeypatch, args, expected_format): - monkeypatch.setattr( - "sys.argv", ["genai-perf", "profile", "-m", "test_model"] + args - ) - - parsed_args, _ = parser.parse_args() - assert parsed_args.output_format == expected_format - - @pytest.mark.parametrize( - "args, expected_error", - [ - ( - ["--extra-inputs", "hi:"], - "Input name or value is empty in --extra-inputs: hi:\nExpected input format: 'input_name:value'", - ), - ( - ["--extra-inputs", ":a"], - "Input name or value is empty in --extra-inputs: :a\nExpected input format: 'input_name:value'", - ), - ( - ["--extra-inputs", ":a:"], - "Invalid input format for --extra-inputs: :a:\nExpected input format: 'input_name:value'", - ), - ( - ["--extra-inputs", "unknown"], - "Invalid input format for --extra-inputs: unknown\nExpected input format: 'input_name:value'", - ), - ( - ["--extra-inputs", "test_key:5", "--extra-inputs", "test_key:6"], - "Input name already exists in request_inputs dictionary: test_key", - ), - ], - ) - def test_repeated_extra_arg_warning(self, monkeypatch, args, expected_error): - combined_args = ["genai-perf", "profile", "-m", "test_model"] + args - monkeypatch.setattr("sys.argv", combined_args) - - parsed_args, _ = parser.parse_args() - - with pytest.raises(ValueError) as exc_info: - _ = parser.get_extra_inputs_as_dict(parsed_args) - - assert str(exc_info.value) == expected_error - - @pytest.mark.parametrize( - "args, expected_prompt_source", - [ - ([], PromptSource.SYNTHETIC), - (["--input-dataset", "openorca"], PromptSource.DATASET), - (["--input-file", "prompt.txt"], PromptSource.FILE), - ( - ["--input-file", "prompt.txt", "--synthetic-input-tokens-mean", "10"], - PromptSource.FILE, - ), - ], - ) - def test_inferred_prompt_source( - self, monkeypatch, mocker, args, expected_prompt_source - ): - _ = mocker.patch("builtins.open", mocker.mock_open(read_data="data")) - _ = mocker.patch("os.path.isfile", return_value=True) - _ = mocker.patch("os.path.isdir", return_value=True) - combined_args = ["genai-perf", "profile", "--model", "test_model"] + args - monkeypatch.setattr("sys.argv", combined_args) - args, _ = parser.parse_args() - - assert args.prompt_source == expected_prompt_source - - def test_prompt_source_assertions(self, monkeypatch, mocker, capsys): - _ = mocker.patch("builtins.open", mocker.mock_open(read_data="data")) - _ = mocker.patch("os.path.isfile", return_value=True) - _ = mocker.patch("os.path.isdir", return_value=True) - args = [ - "genai-perf", - "profile", - "--model", - "test_model", - "--input-dataset", - "openorca", - "--input-file", - "prompt.txt", - ] - monkeypatch.setattr("sys.argv", args) - - expected_output = ( - "argument --input-file: not allowed with argument --input-dataset" - ) - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - assert excinfo.value.code != 0 - captured = capsys.readouterr() - assert expected_output in captured.err - - @pytest.mark.parametrize( - "args", - [ - # negative numbers - ["--image-width-mean", "-123"], - ["--image-width-stddev", "-34"], - ["--image-height-mean", "-123"], - ["--image-height-stddev", "-34"], - # zeros - ["--image-width-mean", "0"], - ["--image-height-mean", "0"], - ], - ) - def test_positive_image_input_args(self, monkeypatch, args): - combined_args = ["genai-perf", "profile", "-m", "test_model"] + args - monkeypatch.setattr("sys.argv", combined_args) - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - # ================================================ - # COMPARE SUBCOMMAND - # ================================================ - expected_compare_help_output = ( - "Subcommand to generate plots that compare multiple profile runs." - ) - - @pytest.mark.parametrize( - "args, expected_output", - [ - (["-h"], expected_compare_help_output), - (["--help"], expected_compare_help_output), - ], - ) - def test_compare_help_arguments_output_and_exit( - self, monkeypatch, args, expected_output, capsys - ): - logging.init_logging() - monkeypatch.setattr("sys.argv", ["genai-perf", "compare"] + args) - - with pytest.raises(SystemExit) as excinfo: - _ = parser.parse_args() - - # Check that the exit was successful - assert excinfo.value.code == 0 - - # Capture that the correct message was displayed - captured = capsys.readouterr() - assert expected_output in captured.out - - def test_compare_mutually_exclusive(self, monkeypatch, capsys): - args = ["genai-perf", "compare", "--config", "hello", "--files", "a", "b", "c"] - monkeypatch.setattr("sys.argv", args) - expected_output = "argument -f/--files: not allowed with argument --config" - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - assert excinfo.value.code != 0 - captured = capsys.readouterr() - assert expected_output in captured.err - - def test_compare_not_provided(self, monkeypatch, capsys): - args = ["genai-perf", "compare"] - monkeypatch.setattr("sys.argv", args) - expected_output = "Either the --config or --files option must be specified." - - with pytest.raises(SystemExit) as excinfo: - parser.parse_args() - - assert excinfo.value.code != 0 - captured = capsys.readouterr() - assert expected_output in captured.err - - @pytest.mark.parametrize( - "extra_inputs_list, expected_dict", - [ - (["test_key:test_value"], {"test_key": "test_value"}), - ( - ["test_key:1", "another_test_key:2"], - {"test_key": 1, "another_test_key": 2}, - ), - ( - [ - '{"name": "Wolverine","hobbies": ["hacking", "slashing"],"address": {"street": "1407 Graymalkin Lane, Salem Center","city": "NY"}}' - ], - { - "name": "Wolverine", - "hobbies": ["hacking", "slashing"], - "address": { - "street": "1407 Graymalkin Lane, Salem Center", - "city": "NY", - }, - }, - ), - ], - ) - def test_get_extra_inputs_as_dict(self, extra_inputs_list, expected_dict): - namespace = argparse.Namespace() - namespace.extra_inputs = extra_inputs_list - actual_dict = parser.get_extra_inputs_as_dict(namespace) - assert actual_dict == expected_dict diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_console_exporter.py b/src/c++/perf_analyzer/genai-perf/tests/test_console_exporter.py deleted file mode 100644 index dda62e04a..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_console_exporter.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from genai_perf import parser -from genai_perf.export_data.console_exporter import ConsoleExporter -from genai_perf.export_data.exporter_config import ExporterConfig -from genai_perf.metrics import LLMMetrics, Metrics, Statistics - - -class TestConsoleExporter: - - def test_streaming_llm_output(self, monkeypatch, capsys) -> None: - argv = [ - "genai-perf", - "profile", - "-m", - "model_name", - "--service-kind", - "openai", - "--endpoint-type", - "chat", - "--streaming", - ] - monkeypatch.setattr("sys.argv", argv) - args, _ = parser.parse_args() - - metrics = LLMMetrics( - request_throughputs=[123], - request_latencies=[4, 5, 6], - time_to_first_tokens=[7, 8, 9], - inter_token_latencies=[10, 11, 12], - output_token_throughputs=[456], - output_sequence_lengths=[1, 2, 3], - input_sequence_lengths=[5, 6, 7], - ) - stats = Statistics(metrics=metrics) - - config = ExporterConfig() - config.stats = stats.stats_dict - config.metrics = stats.metrics - config.args = args - - exporter = ConsoleExporter(config) - exporter.export() - - expected_content = ( - " LLM Metrics \n" - "┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━┓\n" - "┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃\n" - "┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━┩\n" - "│ Time to first token (ms) │ 8.00 │ 7.00 │ 9.00 │ 8.98 │ 8.80 │ 8.50 │\n" - "│ Inter token latency (ms) │ 11.00 │ 10.00 │ 12.00 │ 11.98 │ 11.80 │ 11.50 │\n" - "│ Request latency (ms) │ 5.00 │ 4.00 │ 6.00 │ 5.98 │ 5.80 │ 5.50 │\n" - "│ Output sequence length │ 2.00 │ 1.00 │ 3.00 │ 2.98 │ 2.80 │ 2.50 │\n" - "│ Input sequence length │ 6.00 │ 5.00 │ 7.00 │ 6.98 │ 6.80 │ 6.50 │\n" - "└──────────────────────────┴───────┴───────┴───────┴───────┴───────┴───────┘\n" - "Output token throughput (per sec): 456.00\n" - "Request throughput (per sec): 123.00\n" - ) - - returned_data = capsys.readouterr().out - assert returned_data == expected_content - - def test_nonstreaming_llm_output(self, monkeypatch, capsys) -> None: - argv = [ - "genai-perf", - "profile", - "-m", - "model_name", - "--service-kind", - "openai", - "--endpoint-type", - "chat", - ] - monkeypatch.setattr("sys.argv", argv) - args, _ = parser.parse_args() - - metrics = LLMMetrics( - request_throughputs=[123], - request_latencies=[4, 5, 6], - time_to_first_tokens=[4, 5, 6], # same as request_latency - inter_token_latencies=[], # no ITL - output_token_throughputs=[456], - output_sequence_lengths=[1, 2, 3], - input_sequence_lengths=[5, 6, 7], - ) - stats = Statistics(metrics=metrics) - - config = ExporterConfig() - config.stats = stats.stats_dict - config.metrics = stats.metrics - config.args = args - - exporter = ConsoleExporter(config) - exporter.export() - - # No TTFT and ITL in the output - expected_content = ( - " LLM Metrics \n" - "┏━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┓\n" - "┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃\n" - "┡━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━┩\n" - "│ Request latency (ms) │ 5.00 │ 4.00 │ 6.00 │ 5.98 │ 5.80 │ 5.50 │\n" - "│ Output sequence length │ 2.00 │ 1.00 │ 3.00 │ 2.98 │ 2.80 │ 2.50 │\n" - "│ Input sequence length │ 6.00 │ 5.00 │ 7.00 │ 6.98 │ 6.80 │ 6.50 │\n" - "└────────────────────────┴──────┴──────┴──────┴──────┴──────┴──────┘\n" - "Output token throughput (per sec): 456.00\n" - "Request throughput (per sec): 123.00\n" - ) - - returned_data = capsys.readouterr().out - assert returned_data == expected_content - - def test_embedding_output(self, monkeypatch, capsys) -> None: - argv = [ - "genai-perf", - "profile", - "-m", - "model_name", - "--service-kind", - "openai", - "--endpoint-type", - "embeddings", - ] - monkeypatch.setattr("sys.argv", argv) - args, _ = parser.parse_args() - - metrics = Metrics( - request_throughputs=[123], - request_latencies=[4, 5, 6], - ) - stats = Statistics(metrics=metrics) - - config = ExporterConfig() - config.stats = stats.stats_dict - config.metrics = stats.metrics - config.args = args - - exporter = ConsoleExporter(config) - exporter.export() - - expected_content = ( - " Embeddings Metrics \n" - "┏━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┓\n" - "┃ Statistic ┃ avg ┃ min ┃ max ┃ p99 ┃ p90 ┃ p75 ┃\n" - "┡━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━┩\n" - "│ Request latency (ms) │ 5.00 │ 4.00 │ 6.00 │ 5.98 │ 5.80 │ 5.50 │\n" - "└──────────────────────┴──────┴──────┴──────┴──────┴──────┴──────┘\n" - "Request throughput (per sec): 123.00\n" - ) - - returned_data = capsys.readouterr().out - assert returned_data == expected_content diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py b/src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py deleted file mode 100644 index 6a60bc2dc..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from io import StringIO -from pathlib import Path -from typing import Any, List - -import pytest -from genai_perf import parser -from genai_perf.export_data.csv_exporter import CsvExporter -from genai_perf.export_data.exporter_config import ExporterConfig -from genai_perf.metrics import LLMMetrics, Metrics, Statistics - - -class TestCsvExporter: - @pytest.fixture - def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[str]: - """ - This function will mock the open function for specific files. - """ - - written_data = [] - - original_open = open - - def custom_open(filename, *args, **kwargs): - def write(self: Any, content: str) -> int: - written_data.append(content) - return len(content) - - if str(filename) == "profile_export_genai_perf.csv": - tmp_file = StringIO() - tmp_file.write = write.__get__(tmp_file) - return tmp_file - else: - return original_open(filename, *args, **kwargs) - - monkeypatch.setattr("builtins.open", custom_open) - - return written_data - - def test_streaming_llm_csv_output( - self, monkeypatch, mock_read_write: pytest.MonkeyPatch - ) -> None: - """ - Collect LLM metrics from profile export data and confirm correct values are - printed in csv. - """ - argv = [ - "genai-perf", - "profile", - "-m", - "model_name", - "--service-kind", - "openai", - "--endpoint-type", - "chat", - "--streaming", - ] - monkeypatch.setattr("sys.argv", argv) - args, _ = parser.parse_args() - - metrics = LLMMetrics( - request_throughputs=[123], - request_latencies=[4, 5, 6], - time_to_first_tokens=[7, 8, 9], - inter_token_latencies=[10, 11, 12], - output_token_throughputs=[456], - output_sequence_lengths=[1, 2, 3], - input_sequence_lengths=[5, 6, 7], - ) - stats = Statistics(metrics=metrics) - - config = ExporterConfig() - config.stats = stats.stats_dict - config.metrics = stats.metrics - config.artifact_dir = Path(".") - config.args = args - - exporter = CsvExporter(config) - exporter.export() - - expected_content = [ - "Metric,avg,min,max,p99,p95,p90,p75,p50,p25\r\n", - "Time To First Token (ms),8.00,7.00,9.00,8.98,8.90,8.80,8.50,8.00,7.50\r\n", - "Inter Token Latency (ms),11.00,10.00,12.00,11.98,11.90,11.80,11.50,11.00,10.50\r\n", - "Request Latency (ms),5.00,4.00,6.00,5.98,5.90,5.80,5.50,5.00,4.50\r\n", - "Output Sequence Length,2.00,1.00,3.00,2.98,2.90,2.80,2.50,2.00,1.50\r\n", - "Input Sequence Length,6.00,5.00,7.00,6.98,6.90,6.80,6.50,6.00,5.50\r\n", - "\r\n", - "Metric,Value\r\n", - "Output Token Throughput (per sec),456.00\r\n", - "Request Throughput (per sec),123.00\r\n", - ] - returned_data = mock_read_write - assert returned_data == expected_content - - def test_nonstreaming_llm_csv_output( - self, monkeypatch, mock_read_write: pytest.MonkeyPatch - ) -> None: - """ - Collect LLM metrics from profile export data and confirm correct values are - printed in csv. - """ - argv = [ - "genai-perf", - "profile", - "-m", - "model_name", - "--service-kind", - "openai", - "--endpoint-type", - "chat", - ] - monkeypatch.setattr("sys.argv", argv) - args, _ = parser.parse_args() - - metrics = LLMMetrics( - request_throughputs=[123], - request_latencies=[4, 5, 6], - time_to_first_tokens=[4, 5, 6], # same as request_latency - inter_token_latencies=[], # no ITL - output_token_throughputs=[456], - output_sequence_lengths=[1, 2, 3], - input_sequence_lengths=[5, 6, 7], - ) - stats = Statistics(metrics=metrics) - - config = ExporterConfig() - config.stats = stats.stats_dict - config.metrics = stats.metrics - config.artifact_dir = Path(".") - config.args = args - - exporter = CsvExporter(config) - exporter.export() - - expected_content = [ - "Metric,avg,min,max,p99,p95,p90,p75,p50,p25\r\n", - "Request Latency (ms),5.00,4.00,6.00,5.98,5.90,5.80,5.50,5.00,4.50\r\n", - "Output Sequence Length,2.00,1.00,3.00,2.98,2.90,2.80,2.50,2.00,1.50\r\n", - "Input Sequence Length,6.00,5.00,7.00,6.98,6.90,6.80,6.50,6.00,5.50\r\n", - "\r\n", - "Metric,Value\r\n", - "Output Token Throughput (per sec),456.00\r\n", - "Request Throughput (per sec),123.00\r\n", - ] - returned_data = mock_read_write - assert returned_data == expected_content - - def test_embedding_csv_output( - self, monkeypatch, mock_read_write: pytest.MonkeyPatch - ) -> None: - argv = [ - "genai-perf", - "profile", - "-m", - "model_name", - "--service-kind", - "openai", - "--endpoint-type", - "embeddings", - ] - monkeypatch.setattr("sys.argv", argv) - args, _ = parser.parse_args() - - metrics = Metrics( - request_throughputs=[123], - request_latencies=[4, 5, 6], - ) - stats = Statistics(metrics=metrics) - - config = ExporterConfig() - config.stats = stats.stats_dict - config.metrics = stats.metrics - config.artifact_dir = Path(".") - config.args = args - - exporter = CsvExporter(config) - exporter.export() - - expected_content = [ - "Metric,avg,min,max,p99,p95,p90,p75,p50,p25\r\n", - "Request Latency (ms),5.00,4.00,6.00,5.98,5.90,5.80,5.50,5.00,4.50\r\n", - "\r\n", - "Metric,Value\r\n", - "Request Throughput (per sec),123.00\r\n", - ] - returned_data = mock_read_write - assert returned_data == expected_content diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_data_exporter_factory.py b/src/c++/perf_analyzer/genai-perf/tests/test_data_exporter_factory.py deleted file mode 100644 index 1a1628ac7..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_data_exporter_factory.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -from argparse import Namespace - -import genai_perf.export_data.data_exporter_factory as factory -from genai_perf.export_data.console_exporter import ConsoleExporter -from genai_perf.export_data.csv_exporter import CsvExporter -from genai_perf.export_data.exporter_config import ExporterConfig -from genai_perf.export_data.json_exporter import JsonExporter -from genai_perf.parser import get_extra_inputs_as_dict - - -class TestOutputReporter: - stats = { - "request_latency": { - "unit": "ms", - "avg": 1, - "p99": 2, - "p95": 3, - "p90": 4, - "p75": 5, - "p50": 6, - "p25": 7, - "max": 8, - "min": 9, - "std": 0, - }, - } - args = { - "model": ["gpt2_vllm"], - "formatted_model_name": "gpt2_vllm", - "model_selection_strategy": "round_robin", - "func": "Should_be_removed", - "output_format": "Should_be_removed", - "profile_export_file": ".", - "artifact_dir": ".", - "extra_inputs": ["max_tokens:200"], - } - args_namespace = Namespace(**args) - - config = ExporterConfig() - config.stats = stats - config.args = args_namespace - config.artifact_dir = args_namespace.artifact_dir - config.extra_inputs = get_extra_inputs_as_dict(args_namespace) - f = factory.DataExporterFactory() - - def test_return_json_exporter(self) -> None: - exporter_list = self.f.create_data_exporters(self.config) - assert any(isinstance(exporter, JsonExporter) for exporter in exporter_list) - - def test_return_csv_exporter(self) -> None: - exporter_list = self.f.create_data_exporters(self.config) - assert any(isinstance(exporter, CsvExporter) for exporter in exporter_list) - - def test_return_console_exporter(self) -> None: - exporter_list = self.f.create_data_exporters(self.config) - assert any(isinstance(exporter, ConsoleExporter) for exporter in exporter_list) diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py b/src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py deleted file mode 100644 index f82e59312..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import json - -import genai_perf.parser as parser -from genai_perf.export_data.exporter_config import ExporterConfig -from genai_perf.export_data.json_exporter import JsonExporter - - -class TestJsonExporter: - def test_generate_json(self, monkeypatch) -> None: - cli_cmd = [ - "genai-perf", - "profile", - "-m", - "gpt2_vllm", - "--backend", - "vllm", - "--streaming", - "--extra-inputs", - "max_tokens:256", - "--extra-inputs", - "ignore_eos:true", - ] - monkeypatch.setattr("sys.argv", cli_cmd) - args, _ = parser.parse_args() - config = ExporterConfig() - config.stats = self.stats - config.args = args - config.extra_inputs = parser.get_extra_inputs_as_dict(args) - config.artifact_dir = args.artifact_dir - json_exporter = JsonExporter(config) - assert json_exporter._stats_and_args == json.loads(self.expected_json_output) - - stats = { - "request_throughput": {"unit": "requests/sec", "avg": "7"}, - "request_latency": { - "unit": "ms", - "avg": 1, - "p99": 2, - "p95": 3, - "p90": 4, - "p75": 5, - "p50": 6, - "p25": 7, - "max": 8, - "min": 9, - "std": 0, - }, - "time_to_first_token": { - "unit": "ms", - "avg": 11, - "p99": 12, - "p95": 13, - "p90": 14, - "p75": 15, - "p50": 16, - "p25": 17, - "max": 18, - "min": 19, - "std": 10, - }, - "inter_token_latency": { - "unit": "ms", - "avg": 21, - "p99": 22, - "p95": 23, - "p90": 24, - "p75": 25, - "p50": 26, - "p25": 27, - "max": 28, - "min": 29, - "std": 20, - }, - "output_token_throughput": { - "unit": "tokens/sec", - "avg": 31, - }, - "output_token_throughput_per_request": { - "unit": "tokens/sec", - "avg": 41, - "p99": 42, - "p95": 43, - "p90": 44, - "p75": 45, - "p50": 46, - "p25": 47, - "max": 48, - "min": 49, - "std": 40, - }, - "output_sequence_length": { - "unit": "tokens", - "avg": 51, - "p99": 52, - "p95": 53, - "p90": 54, - "p75": 55, - "p50": 56, - "p25": 57, - "max": 58, - "min": 59, - "std": 50, - }, - "input_sequence_length": { - "unit": "tokens", - "avg": 61, - "p99": 62, - "p95": 63, - "p90": 64, - "p75": 65, - "p50": 66, - "p25": 67, - "max": 68, - "min": 69, - "std": 60, - }, - } - - expected_json_output = """ - { - "request_throughput": { - "unit": "requests/sec", - "avg": "7" - }, - "request_latency": { - "unit": "ms", - "avg": 1, - "p99": 2, - "p95": 3, - "p90": 4, - "p75": 5, - "p50": 6, - "p25": 7, - "max": 8, - "min": 9, - "std": 0 - }, - "time_to_first_token": { - "unit": "ms", - "avg": 11, - "p99": 12, - "p95": 13, - "p90": 14, - "p75": 15, - "p50": 16, - "p25": 17, - "max": 18, - "min": 19, - "std": 10 - }, - "inter_token_latency": { - "unit": "ms", - "avg": 21, - "p99": 22, - "p95": 23, - "p90": 24, - "p75": 25, - "p50": 26, - "p25": 27, - "max": 28, - "min": 29, - "std": 20 - }, - "output_token_throughput": { - "unit": "tokens/sec", - "avg": 31 - }, - "output_token_throughput_per_request": { - "unit": "tokens/sec", - "avg": 41, - "p99": 42, - "p95": 43, - "p90": 44, - "p75": 45, - "p50": 46, - "p25": 47, - "max": 48, - "min": 49, - "std": 40 - }, - "output_sequence_length": { - "unit": "tokens", - "avg": 51, - "p99": 52, - "p95": 53, - "p90": 54, - "p75": 55, - "p50": 56, - "p25": 57, - "max": 58, - "min": 59, - "std": 50 - }, - "input_sequence_length": { - "unit": "tokens", - "avg": 61, - "p99": 62, - "p95": 63, - "p90": 64, - "p75": 65, - "p50": 66, - "p25": 67, - "max": 68, - "min": 69, - "std": 60 - }, - "input_config": { - "model": ["gpt2_vllm"], - "formatted_model_name": "gpt2_vllm", - "model_selection_strategy": "round_robin", - "backend": "vllm", - "batch_size": 1, - "endpoint": null, - "endpoint_type": null, - "service_kind": "triton", - "streaming": true, - "u": null, - "input_dataset": null, - "num_prompts": 100, - "output_tokens_mean": -1, - "output_tokens_mean_deterministic": false, - "output_tokens_stddev": 0, - "random_seed": 0, - "synthetic_input_tokens_mean": 550, - "synthetic_input_tokens_stddev": 0, - "image_width_mean": 100, - "image_width_stddev": 0, - "image_height_mean": 100, - "image_height_stddev": 0, - "image_format": null, - "concurrency": 1, - "measurement_interval": 10000, - "request_rate": null, - "stability_percentage": 999, - "generate_plots": false, - "profile_export_file": "artifacts/gpt2_vllm-triton-vllm-concurrency1/profile_export.json", - "artifact_dir": "artifacts/gpt2_vllm-triton-vllm-concurrency1", - "tokenizer": "hf-internal-testing/llama-tokenizer", - "verbose": false, - "subcommand": "profile", - "prompt_source": "synthetic", - "extra_inputs": { - "max_tokens": 256, - "ignore_eos": true - } - } - } - """ diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_library.py b/src/c++/perf_analyzer/genai-perf/tests/test_library.py deleted file mode 100644 index 09cd13d45..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_library.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import genai_perf - - -# Placeholder to add real tests in the future -def test_version(): - print(genai_perf.__version__) diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs.py b/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs.py deleted file mode 100644 index 028e72849..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs.py +++ /dev/null @@ -1,882 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -import random -import statistics -from collections import namedtuple -from pathlib import Path -from unittest.mock import mock_open, patch - -import pytest -import responses -from genai_perf import tokenizer -from genai_perf.constants import CNN_DAILY_MAIL, DEFAULT_INPUT_DATA_JSON, OPEN_ORCA -from genai_perf.exceptions import GenAIPerfException -from genai_perf.llm_inputs.llm_inputs import ( - LlmInputs, - ModelSelectionStrategy, - OutputFormat, - PromptSource, -) -from genai_perf.llm_inputs.synthetic_image_generator import ImageFormat -from genai_perf.tokenizer import DEFAULT_TOKENIZER, get_tokenizer -from PIL import Image - -mocked_openorca_data = { - "features": [ - {"feature_idx": 0, "name": "id", "type": {"dtype": "string", "_type": "Value"}}, - { - "feature_idx": 1, - "name": "system_prompt", - "type": {"dtype": "string", "_type": "Value"}, - }, - { - "feature_idx": 2, - "name": "question", - "type": {"dtype": "string", "_type": "Value"}, - }, - { - "feature_idx": 3, - "name": "response", - "type": {"dtype": "string", "_type": "Value"}, - }, - ], - "rows": [ - { - "row_idx": 0, - "row": { - "id": "niv.242684", - "system_prompt": "", - "question": "You will be given a definition of a task first, then some input of the task.\\nThis task is about using the specified sentence and converting the sentence to Resource Description Framework (RDF) triplets of the form (subject, predicate object). The RDF triplets generated must be such that the triplets accurately capture the structure and semantics of the input sentence. The input is a sentence and the output is a list of triplets of the form [subject, predicate, object] that capture the relationships present in the sentence. When a sentence has more than 1 RDF triplet possible, the output must contain all of them.\\n\\nAFC Ajax (amateurs)'s ground is Sportpark De Toekomst where Ajax Youth Academy also play.\\nOutput:", - "response": '[\\n ["AFC Ajax (amateurs)", "has ground", "Sportpark De Toekomst"],\\n ["Ajax Youth Academy", "plays at", "Sportpark De Toekomst"]\\n]', - }, - "truncated_cells": [], - } - ], - "num_rows_total": 2914896, - "num_rows_per_page": 100, - "partial": True, -} - -TEST_LENGTH = 1 - - -class TestLlmInputs: - # Define service kind, backend or api, and output format combinations - SERVICE_KIND_BACKEND_ENDPOINT_TYPE_FORMATS = [ - ("triton", "vllm", OutputFormat.VLLM), - ("triton", "tensorrtllm", OutputFormat.TENSORRTLLM), - ("openai", "v1/completions", OutputFormat.OPENAI_COMPLETIONS), - ("openai", "v1/chat/completions", OutputFormat.OPENAI_CHAT_COMPLETIONS), - ("openai", "v1/chat/completions", OutputFormat.OPENAI_VISION), - ] - - @pytest.fixture - def default_configured_url(self): - default_configured_url = LlmInputs._create_configured_url( - LlmInputs.OPEN_ORCA_URL, - LlmInputs.DEFAULT_STARTING_INDEX, - LlmInputs.DEFAULT_LENGTH, - ) - - yield default_configured_url - - # TODO (TMA-1754): Add tests that verify json schemas - @pytest.fixture(scope="class") - def default_tokenizer(self): - yield tokenizer.get_tokenizer(tokenizer.DEFAULT_TOKENIZER) - - def test_input_type_url_no_dataset_name(self): - """ - Test for exception when input type is URL and no dataset name - """ - with pytest.raises(GenAIPerfException): - _ = LlmInputs._check_for_dataset_name_if_input_type_is_url( - input_type=PromptSource.DATASET, dataset_name="" - ) - - def test_input_type_synthetic_no_tokenizer(self): - """ - Test for exception when input type is SYNTHETIC and no tokenizer - """ - with pytest.raises(GenAIPerfException): - _ = LlmInputs._check_for_tokenzier_if_input_type_is_synthetic( - input_type=PromptSource.SYNTHETIC, tokenizer=None # type: ignore - ) - - def test_illegal_starting_index(self): - """ - Test for exceptions when illegal values are given for starting index - """ - with pytest.raises(GenAIPerfException): - _ = LlmInputs._check_for_valid_starting_index(starting_index="foo") # type: ignore - - with pytest.raises(GenAIPerfException): - _ = LlmInputs._check_for_valid_starting_index(starting_index=-1) - - def test_illegal_length(self): - """ - Test for exceptions when illegal values are given for length - """ - with pytest.raises(GenAIPerfException): - _ = LlmInputs._check_for_valid_length(length="foo") # type: ignore - - with pytest.raises(GenAIPerfException): - _ = LlmInputs._check_for_valid_length(length=0) - - def test_create_configured_url(self): - """ - Test that we are appending and configuring the URL correctly - """ - expected_configured_url = ( - "http://test-url.com" - + f"&offset={LlmInputs.DEFAULT_STARTING_INDEX}" - + f"&length={LlmInputs.DEFAULT_LENGTH}" - ) - configured_url = LlmInputs._create_configured_url( - "http://test-url.com", - LlmInputs.DEFAULT_STARTING_INDEX, - LlmInputs.DEFAULT_LENGTH, - ) - - assert configured_url == expected_configured_url - - def test_download_dataset_illegal_url(self): - """ - Test for exception when URL is bad - """ - with pytest.raises(GenAIPerfException): - _ = LlmInputs._download_dataset( - "https://bad-url.zzz", - ) - - def test_llm_inputs_error_in_server_response(self): - """ - Test for exception when length is out of range - """ - with pytest.raises(GenAIPerfException): - _ = LlmInputs.create_llm_inputs( - input_type=PromptSource.DATASET, - dataset_name=OPEN_ORCA, - output_format=OutputFormat.OPENAI_CHAT_COMPLETIONS, - starting_index=LlmInputs.DEFAULT_STARTING_INDEX, - length=int(LlmInputs.DEFAULT_LENGTH * 100), - ) - - @responses.activate - def test_llm_inputs_with_defaults(self, default_configured_url): - """ - Test that default options work - """ - responses.add( - responses.GET, - f"{default_configured_url}", - json=mocked_openorca_data, - status=200, - ) - - dataset = LlmInputs._download_dataset( - default_configured_url, - ) - dataset_json = LlmInputs._convert_input_url_dataset_to_generic_json( - dataset=dataset - ) - - assert dataset_json is not None - assert len(dataset_json["rows"]) == TEST_LENGTH - - # TODO (TPA-114) Refactor LLM inputs and testing - # def test_llm_inputs_with_non_default_length(self): - # """ - # Test that non-default length works - # """ - # configured_url = LlmInputs._create_configured_url( - # LlmInputs.OPEN_ORCA_URL, - # LlmInputs.DEFAULT_STARTING_INDEX, - # (int(LlmInputs.DEFAULT_LENGTH / 2)), - # ) - # dataset = LlmInputs._download_dataset( - # configured_url, - # ) - # dataset_json = LlmInputs._convert_input_url_dataset_to_generic_json( - # dataset=dataset - # ) - - # assert dataset_json is not None - # assert len(dataset_json["rows"]) == LlmInputs.DEFAULT_LENGTH / 2 - - # def test_convert_default_json_to_pa_format(self, default_configured_url): - # """ - # Test that conversion to PA JSON format is correct - # """ - # dataset = LlmInputs._download_dataset( - # default_configured_url, - # ) - # dataset_json = LlmInputs._convert_input_url_dataset_to_generic_json( - # dataset=dataset - # ) - # pa_json = LlmInputs._convert_generic_json_to_output_format( - # output_format=OutputFormat.OPENAI_CHAT_COMPLETIONS, - # generic_dataset=dataset_json, - # add_model_name=False, - # add_stream=False, - # extra_inputs={}, - # output_tokens_mean=LlmInputs.DEFAULT_OUTPUT_TOKENS_MEAN, - # output_tokens_stddev=LlmInputs.DEFAULT_OUTPUT_TOKENS_STDDEV, - # output_tokens_deterministic=False, - # model_name=["test_model_A"], - # ) - - # assert pa_json is not None - # assert len(pa_json["data"]) == LlmInputs.DEFAULT_LENGTH - - # def test_create_openai_llm_inputs_cnn_dailymail(self): - # """ - # Test CNN_DAILYMAIL can be accessed - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.DATASET, - # dataset_name=CNN_DAILY_MAIL, - # output_format=OutputFormat.OPENAI_CHAT_COMPLETIONS, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == LlmInputs.DEFAULT_LENGTH - - # def test_write_to_file(self): - # """ - # Test that write to file is working correctly - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.DATASET, - # dataset_name=OPEN_ORCA, - # output_format=OutputFormat.OPENAI_CHAT_COMPLETIONS, - # model_name="open_orca", - # add_model_name=True, - # add_stream=True, - # ) - # try: - # with open(DEFAULT_INPUT_DATA_JSON, "r") as f: - # json_str = f.read() - # finally: - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json == json.loads(json_str) - - # def test_create_openai_to_vllm(self): - # """ - # Test conversion of openai to vllm - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.DATASET, - # output_format=OutputFormat.VLLM, - # dataset_name=OPEN_ORCA, - # add_model_name=False, - # add_stream=True, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == LlmInputs.DEFAULT_LENGTH - - # def test_create_openai_to_completions(self): - # """ - # Test conversion of openai to completions - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.DATASET, - # output_format=OutputFormat.OPENAI_COMPLETIONS, - # dataset_name=OPEN_ORCA, - # add_model_name=False, - # add_stream=True, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == LlmInputs.DEFAULT_LENGTH - # # NIM legacy completion endpoint only supports string and not - # # array of strings. Verify that the prompt is of type string - # # not list - # assert isinstance(pa_json["data"][0]["payload"][0]["prompt"], str) - - # def test_create_openai_to_trtllm(self): - # """ - # Test conversion of openai to trtllm - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.DATASET, - # output_format=OutputFormat.TENSORRTLLM, - # dataset_name=OPEN_ORCA, - # add_model_name=False, - # add_stream=True, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == LlmInputs.DEFAULT_LENGTH - - # def test_random_synthetic_no_stddev(self, default_tokenizer): - # """ - # Test that we can produce an exact number of random synthetic tokens - # """ - # random.seed(1) - - # def _subtest(token_length): - # synthetic_prompt = LlmInputs._create_synthetic_prompt( - # tokenizer=default_tokenizer, - # prompt_tokens_mean=token_length, - # prompt_tokens_stddev=0, - # ) - - # actual_token_length = len(default_tokenizer.encode(synthetic_prompt)) - # assert token_length == actual_token_length - - # # Test all of 500-600 to make sure exact - # for i in range(500, 600): - # _subtest(i) - - # # Test some larger values - # _subtest(1500) - # _subtest(10000) - - # def test_random_synthetic_stddev(self, default_tokenizer): - # """ - # Test that we can produce random synthetic tokens within a requested stddev - # """ - # random.seed(1) - - # def _subtest(num_samples, mean, stddev): - # prompt_tokens = [] - # for _ in range(num_samples): - # prompt = LlmInputs._create_synthetic_prompt( - # tokenizer=default_tokenizer, - # prompt_tokens_mean=mean, - # prompt_tokens_stddev=stddev, - # ) - # prompt_tokens.append(len(default_tokenizer.encode(prompt))) - - # assert statistics.mean(prompt_tokens) == pytest.approx(mean, rel=0.1) - # assert statistics.stdev(prompt_tokens) == pytest.approx(stddev, rel=0.2) - - # _subtest(50, 200, 20) - # _subtest(50, 400, 10) - # _subtest(200, 50, 10) - - # def test_random_seed(self, default_tokenizer): - # """ - # Test that when given the same seed, create_llm_inputs will return the same result, - # and that when given a different seed, it will produce a different result - # """ - - # inputs_seed5_a = LlmInputs.create_llm_inputs( - # tokenizer=default_tokenizer, - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.TENSORRTLLM, - # prompt_tokens_mean=300, - # prompt_tokens_stddev=20, - # num_of_output_prompts=5, - # random_seed=5, - # model_name=["test_model_A"], - # ) - - # inputs_seed5_b = LlmInputs.create_llm_inputs( - # tokenizer=default_tokenizer, - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.TENSORRTLLM, - # prompt_tokens_mean=300, - # prompt_tokens_stddev=20, - # num_of_output_prompts=5, - # random_seed=5, - # model_name=["test_model_A"], - # ) - - # inputs_seed10 = LlmInputs.create_llm_inputs( - # tokenizer=default_tokenizer, - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.TENSORRTLLM, - # prompt_tokens_mean=300, - # prompt_tokens_stddev=20, - # num_of_output_prompts=5, - # random_seed=10, - # model_name=["test_model_A"], - # ) - - # assert inputs_seed5_a == inputs_seed5_b - # assert inputs_seed5_a != inputs_seed10 - - # def test_synthetic_to_vllm(self, default_tokenizer): - # """ - # Test generating synthetic prompts and converting to vllm - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.VLLM, - # num_of_output_prompts=5, - # add_model_name=False, - # add_stream=True, - # tokenizer=default_tokenizer, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == 5 - - # def test_synthetic_to_trtllm(self, default_tokenizer): - # """ - # Test generating synthetic prompts and converting to trtllm - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.TENSORRTLLM, - # num_of_output_prompts=5, - # add_model_name=False, - # add_stream=True, - # tokenizer=default_tokenizer, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == 5 - - # def test_synthetic_to_openai_chat_completions(self, default_tokenizer): - # """ - # Test generating synthetic prompts and converting to OpenAI chat completions - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.OPENAI_CHAT_COMPLETIONS, - # num_of_output_prompts=5, - # add_model_name=False, - # add_stream=True, - # tokenizer=default_tokenizer, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == 5 - - # def test_synthetic_to_openai_completions(self, default_tokenizer): - # """ - # Test generating synthetic prompts and converting to OpenAI completions - # """ - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.OPENAI_COMPLETIONS, - # num_of_output_prompts=5, - # add_model_name=False, - # add_stream=True, - # tokenizer=default_tokenizer, - # model_name=["test_model_A"], - # ) - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - # assert pa_json is not None - # assert len(pa_json["data"]) == 5 - - # @pytest.mark.parametrize( - # "output_format", - # [format[2] for format in SERVICE_KIND_BACKEND_ENDPOINT_TYPE_FORMATS], - # ) - # def test_extra_inputs( - # self, default_tokenizer: Tokenizer, output_format: OutputFormat - # ) -> None: - # input_name = "max_tokens" - # input_value = 5 - # request_inputs = {input_name: input_value} - - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.SYNTHETIC, - # output_format=output_format, - # num_of_output_prompts=5, - # add_model_name=False, - # add_stream=True, - # tokenizer=default_tokenizer, - # extra_inputs=request_inputs, - # model_name=["test_model_A"], - # ) - - # assert len(pa_json["data"]) == 5 - - # if ( - # output_format == OutputFormat.OPENAI_CHAT_COMPLETIONS - # or output_format == OutputFormat.OPENAI_COMPLETIONS - # ): - # for entry in pa_json["data"]: - # assert "payload" in entry, "Payload is missing in the request" - # payload = entry["payload"] - # for item in payload: - # assert ( - # input_name in item - # ), f"The input name {input_name} is not present in the request" - # assert ( - # item[input_name] == input_value - # ), f"The value of {input_name} is incorrect" - # elif ( - # output_format == OutputFormat.TENSORRTLLM - # or output_format == OutputFormat.VLLM - # ): - # for entry in pa_json["data"]: - # assert ( - # input_name in entry - # ), f"The {input_name} is not present in the request" - # assert entry[input_name] == [ - # input_value - # ], f"The value of {input_name} is incorrect" - # else: - # assert False, f"Unsupported output format: {output_format}" - - def test_add_image_inputs_openai_vision(self) -> None: - generic_json = { - "rows": [ - {"text_input": "test input one", "image": "test_image1"}, - {"text_input": "test input two", "image": "test_image2"}, - ] - } - - generic_json = LlmInputs._convert_to_openai_multi_modal_content(generic_json) - - row1 = generic_json["rows"][0]["text_input"] - assert row1 == [ - { - "type": "text", - "text": "test input one", - }, - { - "type": "image_url", - "image_url": {"url": "test_image1"}, - }, - ] - - row2 = generic_json["rows"][1]["text_input"] - assert row2 == [ - { - "type": "text", - "text": "test input two", - }, - { - "type": "image_url", - "image_url": {"url": "test_image2"}, - }, - ] - - @patch( - "genai_perf.llm_inputs.llm_inputs.LlmInputs._create_synthetic_prompt", - return_value="This is test prompt", - ) - @patch( - "genai_perf.llm_inputs.llm_inputs.LlmInputs._create_synthetic_image", - return_value="test_image_base64", - ) - @pytest.mark.parametrize( - "output_format", - [ - OutputFormat.OPENAI_CHAT_COMPLETIONS, - OutputFormat.OPENAI_COMPLETIONS, - OutputFormat.OPENAI_EMBEDDINGS, - OutputFormat.RANKINGS, - OutputFormat.OPENAI_VISION, - OutputFormat.VLLM, - OutputFormat.TENSORRTLLM, - ], - ) - def test_get_input_dataset_from_synthetic( - self, mock_prompt, mock_image, output_format - ) -> None: - _placeholder = 123 # dummy value - num_prompts = 3 - - dataset_json = LlmInputs._get_input_dataset_from_synthetic( - tokenizer=get_tokenizer(DEFAULT_TOKENIZER), - prompt_tokens_mean=_placeholder, - prompt_tokens_stddev=_placeholder, - num_of_output_prompts=num_prompts, - image_width_mean=_placeholder, - image_width_stddev=_placeholder, - image_height_mean=_placeholder, - image_height_stddev=_placeholder, - image_format=ImageFormat.PNG, - output_format=output_format, - ) - - assert len(dataset_json["rows"]) == num_prompts - - for i in range(num_prompts): - row = dataset_json["rows"][i]["row"] - - if output_format == OutputFormat.OPENAI_VISION: - assert row == { - "text_input": "This is test prompt", - "image": "test_image_base64", - } - else: - assert row == { - "text_input": "This is test prompt", - } - - # def test_trtllm_default_max_tokens(self, default_tokenizer: Tokenizer) -> None: - # input_name = "max_tokens" - # input_value = 256 - - # pa_json = LlmInputs.create_llm_inputs( - # input_type=PromptSource.SYNTHETIC, - # output_format=OutputFormat.TENSORRTLLM, - # num_of_output_prompts=5, - # add_model_name=False, - # add_stream=True, - # tokenizer=default_tokenizer, - # model_name=["test_model_A"], - # ) - - # assert len(pa_json["data"]) == 5 - # for entry in pa_json["data"]: - # assert ( - # input_name in entry - # ), f"The {input_name} is not present in the request" - # assert entry[input_name] == [ - # input_value - # ], f"The value of {input_name} is incorrect" - - # @pytest.mark.parametrize( - # "output_format", - # [format[2] for format in SERVICE_KIND_BACKEND_ENDPOINT_TYPE_FORMATS], - # ) - # def test_output_tokens_mean(self, output_format, default_tokenizer): - # if ( - # output_format != OutputFormat.VLLM - # and output_format != OutputFormat.TENSORRTLLM - # ): - # return - - # output_tokens_mean = 100 - # output_tokens_stddev = 0 - # for deterministic in [True, False]: - # _ = LlmInputs.create_llm_inputs( - # input_type=PromptSource.SYNTHETIC, - # output_format=output_format, - # num_of_output_prompts=5, - # add_model_name=False, - # add_stream=True, - # tokenizer=default_tokenizer, - # output_tokens_mean=output_tokens_mean, - # output_tokens_stddev=output_tokens_stddev, - # output_tokens_deterministic=deterministic, - # model_name=["test_model_A"], - # ) - - # assert os.path.exists( - # DEFAULT_INPUT_DATA_JSON - # ), "llm_inputs.json file is not created" - - # with open(DEFAULT_INPUT_DATA_JSON, "r") as f: - # llm_inputs_data = json.load(f) - - # for entry in llm_inputs_data["data"]: - # if output_format == OutputFormat.VLLM: - # assert ( - # "sampling_parameters" in entry - # ), "sampling_parameters is missing in llm_inputs.json" - # sampling_parameters = json.loads(entry["sampling_parameters"][0]) - # assert ( - # "max_tokens" in sampling_parameters - # ), "max_tokens parameter is missing in sampling_parameters" - # assert sampling_parameters["max_tokens"] == str( - # output_tokens_mean - # ), "max_tokens parameter is not properly set" - # if deterministic: - # assert ( - # "min_tokens" in sampling_parameters - # ), "min_tokens parameter is missing in sampling_parameters" - # assert sampling_parameters["min_tokens"] == str( - # output_tokens_mean - # ), "min_tokens parameter is not properly set" - # else: - # assert ( - # "min_tokens" not in sampling_parameters - # ), "min_tokens parameter is present in sampling_parameters" - # elif output_format == OutputFormat.TENSORRTLLM: - # assert ( - # "max_tokens" in entry - # ), "max_tokens parameter is missing in llm_inputs.json" - # assert ( - # entry["max_tokens"][0] == output_tokens_mean - # ), "max_tokens parameter is not properly set" - # if deterministic: - # assert ( - # "min_length" in entry - # ), "min_length parameter is missing in llm_inputs.json" - # assert ( - # entry["min_length"][0] == output_tokens_mean - # ), "min_length parameter is not properly set" - # else: - # assert ( - # "min_length" not in entry - # ), "min_length parameter is present in llm_inputs.json" - # else: - # assert False, f"Unsupported output format: {output_format}" - - # os.remove(DEFAULT_INPUT_DATA_JSON) - - def test_get_input_file_without_file_existing(self): - with pytest.raises(FileNotFoundError): - LlmInputs._get_input_dataset_from_file(Path("prompt.txt")) - - @patch("pathlib.Path.exists", return_value=True) - @patch( - "builtins.open", - new_callable=mock_open, - read_data='{"text_input": "single prompt"}\n', - ) - def test_get_input_file_with_single_prompt(self, mock_file, mock_exists): - expected_prompts = ["single prompt"] - dataset = LlmInputs._get_input_dataset_from_file(Path("prompt.txt")) - - assert dataset is not None - assert len(dataset["rows"]) == len(expected_prompts) - for i, prompt in enumerate(expected_prompts): - assert dataset["rows"][i]["row"]["text_input"] == prompt - - @patch("pathlib.Path.exists", return_value=True) - @patch( - "builtins.open", - new_callable=mock_open, - read_data='{"text_input": "prompt1"}\n{"text_input": "prompt2"}\n{"text_input": "prompt3"}\n', - ) - def test_get_input_file_with_multiple_prompts(self, mock_file, mock_exists): - expected_prompts = ["prompt1", "prompt2", "prompt3"] - dataset = LlmInputs._get_input_dataset_from_file(Path("prompt.txt")) - - assert dataset is not None - assert len(dataset["rows"]) == len(expected_prompts) - for i, prompt in enumerate(expected_prompts): - assert dataset["rows"][i]["row"]["text_input"] == prompt - - @patch("pathlib.Path.exists", return_value=True) - @patch("PIL.Image.open", return_value=Image.new("RGB", (10, 10))) - @patch( - "builtins.open", - new_callable=mock_open, - read_data=( - '{"text_input": "prompt1", "image": "image1.png"}\n' - '{"text_input": "prompt2", "image": "image2.png"}\n' - '{"text_input": "prompt3", "image": "image3.png"}\n' - ), - ) - def test_get_input_file_with_multi_modal_data( - self, mock_exists, mock_image, mock_file - ): - Data = namedtuple("Data", ["text_input", "image"]) - expected_data = [ - Data(text_input="prompt1", image="image1.png"), - Data(text_input="prompt2", image="image2.png"), - Data(text_input="prompt3", image="image3.png"), - ] - dataset = LlmInputs._get_input_dataset_from_file(Path("somefile.txt")) - - assert dataset is not None - assert len(dataset["rows"]) == len(expected_data) - for i, data in enumerate(expected_data): - assert dataset["rows"][i]["row"]["text_input"] == data.text_input - assert dataset["rows"][i]["row"]["image"] == data.image - - @pytest.mark.parametrize( - "seed, model_name_list, index,model_selection_strategy,expected_model", - [ - ( - 1, - ["test_model_A", "test_model_B", "test_model_C"], - 0, - ModelSelectionStrategy.ROUND_ROBIN, - "test_model_A", - ), - ( - 1, - ["test_model_A", "test_model_B", "test_model_C"], - 1, - ModelSelectionStrategy.ROUND_ROBIN, - "test_model_B", - ), - ( - 1, - ["test_model_A", "test_model_B", "test_model_C"], - 2, - ModelSelectionStrategy.ROUND_ROBIN, - "test_model_C", - ), - ( - 1, - ["test_model_A", "test_model_B", "test_model_C"], - 3, - ModelSelectionStrategy.ROUND_ROBIN, - "test_model_A", - ), - ( - 100, - ["test_model_A", "test_model_B", "test_model_C"], - 0, - ModelSelectionStrategy.RANDOM, - "test_model_A", - ), - ( - 100, - ["test_model_A", "test_model_B", "test_model_C"], - 1, - ModelSelectionStrategy.RANDOM, - "test_model_A", - ), - ( - 1652, - ["test_model_A", "test_model_B", "test_model_C"], - 0, - ModelSelectionStrategy.RANDOM, - "test_model_B", - ), - ( - 95, - ["test_model_A", "test_model_B", "test_model_C"], - 0, - ModelSelectionStrategy.RANDOM, - "test_model_C", - ), - ], - ) - def test_select_model_name( - self, seed, model_name_list, index, model_selection_strategy, expected_model - ): - """ - Test that model selection strategy controls the model selected - """ - random.seed(seed) - - actual_model = LlmInputs._select_model_name( - model_name_list, index, model_selection_strategy - ) - assert actual_model == expected_model diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_embeddings.py b/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_embeddings.py deleted file mode 100644 index 0cefa38a7..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_embeddings.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path -from unittest.mock import mock_open, patch - -import pytest -from genai_perf.llm_inputs.llm_inputs import LlmInputs, ModelSelectionStrategy - - -class TestLlmInputsEmbeddings: - @patch("pathlib.Path.exists", return_value=True) - @patch( - "builtins.open", - new_callable=mock_open, - read_data="\n".join( - [ - '{"text": "What production company co-owned by Kevin Loader and Rodger Michell produced My Cousin Rachel?"}', - '{"text": "Who served as the 1st Vice President of Colombia under El Libertador?"}', - '{"text": "Are the Barton Mine and Hermiston-McCauley Mine located in The United States of America?"}', - '{"text": "what state did they film daddy\'s home 2"}', - ] - ), - ) - def test_get_input_dataset_from_embeddings_file(self, mock_file, mock_exists): - input_filename = Path("embeddings.jsonl") - batch_size = 3 - dataset = LlmInputs._get_input_dataset_from_embeddings_file( - input_filename, batch_size, num_prompts=100 - ) - - assert dataset is not None - assert len(dataset["rows"]) == 100 - for row in dataset["rows"]: - assert "row" in row - assert "payload" in row["row"] - payload = row["row"]["payload"] - assert "input" in payload - assert isinstance(payload["input"], list) - assert len(payload["input"]) == batch_size - - # Try error case where batch size is larger than the number of available texts - with pytest.raises( - ValueError, - match="Batch size cannot be larger than the number of available texts", - ): - LlmInputs._get_input_dataset_from_embeddings_file( - input_filename, 5, num_prompts=10 - ) - - def test_convert_generic_json_to_openai_embeddings_format(self): - generic_dataset = { - "rows": [ - {"payload": {"input": ["text 1", "text 2"]}}, - {"payload": {"input": ["text 3", "text 4"]}}, - ] - } - - expected_result = { - "data": [ - { - "payload": [ - { - "input": ["text 1", "text 2"], - "model": "test_model", - } - ] - }, - { - "payload": [ - { - "input": ["text 3", "text 4"], - "model": "test_model", - } - ] - }, - ] - } - - result = LlmInputs._convert_generic_json_to_openai_embeddings_format( - generic_dataset, - extra_inputs={}, - model_name=["test_model"], - model_selection_strategy=ModelSelectionStrategy.ROUND_ROBIN, - ) - - assert result is not None - assert "data" in result - assert len(result["data"]) == len(expected_result["data"]) - - for i, item in enumerate(expected_result["data"]): - assert "payload" in result["data"][i] - assert result["data"][i]["payload"] == item["payload"] - - def test_convert_generic_json_to_openai_embeddings_format_with_extra_inputs(self): - generic_dataset = { - "rows": [ - {"payload": {"input": ["text 1", "text 2"]}}, - {"payload": {"input": ["text 3", "text 4"]}}, - ] - } - - extra_inputs = { - "encoding_format": "base64", - "truncate": "END", - "additional_key": "additional_value", - } - - expected_result = { - "data": [ - { - "payload": [ - { - "input": ["text 1", "text 2"], - "model": "test_model", - "encoding_format": "base64", - "truncate": "END", - "additional_key": "additional_value", - } - ] - }, - { - "payload": [ - { - "input": ["text 3", "text 4"], - "model": "test_model", - "encoding_format": "base64", - "truncate": "END", - "additional_key": "additional_value", - } - ] - }, - ] - } - - result = LlmInputs._convert_generic_json_to_openai_embeddings_format( - generic_dataset, - extra_inputs=extra_inputs, - model_name=["test_model"], - model_selection_strategy=ModelSelectionStrategy.ROUND_ROBIN, - ) - - assert result is not None - assert "data" in result - assert len(result["data"]) == len(expected_result["data"]) - - for i, item in enumerate(expected_result["data"]): - assert "payload" in result["data"][i] - assert result["data"][i]["payload"] == item["payload"] diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_rankings.py b/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_rankings.py deleted file mode 100644 index bfe2be482..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_llm_inputs_rankings.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path -from unittest.mock import mock_open, patch - -import pytest -from genai_perf.llm_inputs.llm_inputs import LlmInputs, ModelSelectionStrategy - - -class TestLlmInputsRankings: - - def open_side_effects(filepath, *args, **kwargs): - queries_content = "\n".join( - [ - '{"text": "What production company co-owned by Kevin Loader and Rodger Michell produced My Cousin Rachel?"}', - '{"text": "Who served as the 1st Vice President of Colombia under El Libertador?"}', - '{"text": "Are the Barton Mine and Hermiston-McCauley Mine located in The United States of America?"}', - ] - ) - passages_content = "\n".join( - [ - '{"text": "Eric Anderson (sociologist) Eric Anderson (born January 18, 1968) is an American sociologist"}', - '{"text": "Kevin Loader is a British film and television producer. "}', - '{"text": "Barton Mine, also known as Net Lake Mine, is an abandoned surface and underground mine in Northeastern Ontario"}', - ] - ) - - file_contents = { - "queries.jsonl": queries_content, - "passages.jsonl": passages_content, - } - return mock_open( - read_data=file_contents.get(filepath, file_contents["queries.jsonl"]) - )() - - mock_open_obj = mock_open() - mock_open_obj.side_effect = open_side_effects - - @patch("pathlib.Path.exists", return_value=True) - @patch("builtins.open", mock_open_obj) - def test_get_input_dataset_from_rankings_file(self, mock_file): - queries_filename = Path("queries.jsonl") - passages_filename = Path("passages.jsonl") - batch_size = 2 - dataset = LlmInputs._get_input_dataset_from_rankings_files( - queries_filename, passages_filename, batch_size, num_prompts=100 - ) - - assert dataset is not None - assert len(dataset["rows"]) == 100 - for row in dataset["rows"]: - assert "row" in row - assert "payload" in row["row"] - payload = row["row"]["payload"] - assert "query" in payload - assert "passages" in payload - assert isinstance(payload["passages"], list) - assert len(payload["passages"]) == batch_size - - # Try error case where batch size is larger than the number of available texts - with pytest.raises( - ValueError, - match="Batch size cannot be larger than the number of available passages", - ): - LlmInputs._get_input_dataset_from_rankings_files( - queries_filename, passages_filename, 5, num_prompts=10 - ) - - def test_convert_generic_json_to_openai_rankings_format(self): - generic_dataset = { - "rows": [ - { - "payload": { - "query": {"text": "1"}, - "passages": [{"text": "2"}, {"text": "3"}, {"text": "4"}], - } - } - ] - } - - expected_result = { - "data": [ - { - "payload": [ - { - "query": {"text": "1"}, - "passages": [{"text": "2"}, {"text": "3"}, {"text": "4"}], - "model": "test_model", - } - ] - } - ] - } - - result = LlmInputs._convert_generic_json_to_rankings_format( - generic_dataset, - extra_inputs={}, - model_name=["test_model"], - model_selection_strategy=ModelSelectionStrategy.ROUND_ROBIN, - ) - - assert result is not None - assert "data" in result - assert len(result["data"]) == len(expected_result["data"]) - - for i, item in enumerate(expected_result["data"]): - assert "payload" in result["data"][i] - assert result["data"][i]["payload"] == item["payload"] - - def test_convert_generic_json_to_openai_rankings_format_with_extra_inputs(self): - generic_dataset = { - "rows": [ - { - "payload": { - "query": {"text": "1"}, - "passages": [{"text": "2"}, {"text": "3"}, {"text": "4"}], - } - } - ] - } - - extra_inputs = { - "encoding_format": "base64", - "truncate": "END", - "additional_key": "additional_value", - } - - expected_result = { - "data": [ - { - "payload": [ - { - "query": {"text": "1"}, - "passages": [{"text": "2"}, {"text": "3"}, {"text": "4"}], - "model": "test_model", - "encoding_format": "base64", - "truncate": "END", - "additional_key": "additional_value", - } - ] - } - ] - } - - result = LlmInputs._convert_generic_json_to_rankings_format( - generic_dataset, - extra_inputs=extra_inputs, - model_name=["test_model"], - model_selection_strategy=ModelSelectionStrategy.ROUND_ROBIN, - ) - - assert result is not None - assert "data" in result - assert len(result["data"]) == len(expected_result["data"]) - - for i, item in enumerate(expected_result["data"]): - assert "payload" in result["data"][i] - assert result["data"][i]["payload"] == item["payload"] diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_llm_metrics.py b/src/c++/perf_analyzer/genai-perf/tests/test_llm_metrics.py deleted file mode 100644 index 689e366cd..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_llm_metrics.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pytest -from genai_perf.metrics import LLMMetrics - - -class TestLLMMetrics: - - def test_llm_metric_request_metrics(self) -> None: - """Test request_metrics property.""" - m = LLMMetrics( - request_throughputs=[10.12, 11.33], - request_latencies=[3, 44], - time_to_first_tokens=[1, 2, 3], - inter_token_latencies=[4, 5], - output_token_throughputs=[22.13, 9423.02], - output_token_throughputs_per_request=[7, 8, 9], - output_sequence_lengths=[3, 4], - input_sequence_lengths=[12, 34], - ) - req_metrics = m.request_metrics - assert len(req_metrics) == 6 - assert req_metrics[0].name == "time_to_first_token" - assert req_metrics[0].unit == "ms" - assert req_metrics[1].name == "inter_token_latency" - assert req_metrics[1].unit == "ms" - assert req_metrics[2].name == "request_latency" - assert req_metrics[2].unit == "ms" - assert req_metrics[3].name == "output_token_throughput_per_request" - assert req_metrics[3].unit == "tokens/sec" - assert req_metrics[4].name == "output_sequence_length" - assert req_metrics[4].unit == "tokens" - assert req_metrics[5].name == "input_sequence_length" - assert req_metrics[5].unit == "tokens" - - def test_llm_metric_system_metrics(self) -> None: - """Test system_metrics property.""" - m = LLMMetrics( - request_throughputs=[10.12, 11.33], - request_latencies=[3, 44], - time_to_first_tokens=[1, 2, 3], - inter_token_latencies=[4, 5], - output_token_throughputs=[22.13, 9423.02], - output_token_throughputs_per_request=[7, 8, 9], - output_sequence_lengths=[3, 4], - input_sequence_lengths=[12, 34], - ) - - sys_metrics = m.system_metrics - assert len(sys_metrics) == 2 - assert sys_metrics[0].name == "output_token_throughput" - assert sys_metrics[0].unit == "per sec" - assert sys_metrics[1].name == "request_throughput" - assert sys_metrics[1].unit == "per sec" - - def test_llm_metrics_get_base_name(self) -> None: - """Test get_base_name method in LLMMetrics class.""" - # initialize with dummy values - metrics = LLMMetrics( - request_throughputs=[10.12, 11.33], - request_latencies=[3, 44], - time_to_first_tokens=[1, 2, 3], - inter_token_latencies=[4, 5], - output_token_throughputs=[22.13, 9423.02], - output_token_throughputs_per_request=[7, 8, 9], - output_sequence_lengths=[3, 4], - input_sequence_lengths=[12, 34], - ) - assert metrics.get_base_name("time_to_first_tokens") == "time_to_first_token" - assert metrics.get_base_name("inter_token_latencies") == "inter_token_latency" - assert ( - metrics.get_base_name("output_token_throughputs_per_request") - == "output_token_throughput_per_request" - ) - assert ( - metrics.get_base_name("output_sequence_lengths") == "output_sequence_length" - ) - assert ( - metrics.get_base_name("input_sequence_lengths") == "input_sequence_length" - ) - with pytest.raises(KeyError): - metrics.get_base_name("hello1234") diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_llm_profile_data_parser.py b/src/c++/perf_analyzer/genai-perf/tests/test_llm_profile_data_parser.py deleted file mode 100644 index d776a6a85..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_llm_profile_data_parser.py +++ /dev/null @@ -1,742 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import json -from io import StringIO -from pathlib import Path -from typing import Any, List, Union - -import numpy as np -import pytest -from genai_perf.metrics import LLMMetrics -from genai_perf.profile_data_parser import LLMProfileDataParser -from genai_perf.tokenizer import DEFAULT_TOKENIZER, get_tokenizer - - -def ns_to_sec(ns: int) -> Union[int, float]: - """Convert from nanosecond to second.""" - return ns / 1e9 - - -class TestLLMProfileDataParser: - @pytest.fixture - def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[str]: - """ - This function will mock the open function for specific files: - - - For "triton_profile_export.json", it will read and return the - contents of self.triton_profile_data - - For "openai_profile_export.json", it will read and return the - contents of self.openai_profile_data - - For "profile_export.csv", it will capture all data written to - the file, and return it as the return value of this function - - For all other files, it will behave like the normal open function - """ - - written_data = [] - - original_open = open - - def custom_open(filename, *args, **kwargs): - def write(self: Any, content: str) -> int: - written_data.append(content) - return len(content) - - if filename == "triton_profile_export.json": - tmp_file = StringIO(json.dumps(self.triton_profile_data)) - return tmp_file - elif filename == "openai_profile_export.json": - tmp_file = StringIO(json.dumps(self.openai_profile_data)) - return tmp_file - elif filename == "openai_vlm_profile_export.json": - tmp_file = StringIO(json.dumps(self.openai_vlm_profile_data)) - return tmp_file - elif filename == "empty_profile_export.json": - tmp_file = StringIO(json.dumps(self.empty_profile_data)) - return tmp_file - elif filename == "profile_export.csv": - tmp_file = StringIO() - tmp_file.write = write.__get__(tmp_file) - return tmp_file - else: - return original_open(filename, *args, **kwargs) - - monkeypatch.setattr("builtins.open", custom_open) - - return written_data - - def test_triton_llm_profile_data(self, mock_read_write: pytest.MonkeyPatch) -> None: - """Collect LLM metrics from profile export data and check values. - - Metrics - * time to first tokens - - experiment 1: [3 - 1, 4 - 2] = [2, 2] - - experiment 2: [7 - 5, 6 - 3] = [2, 3] - * inter token latencies - - experiment 1: [((8 - 1) - 2)/(3 - 1), ((11 - 2) - 2)/(6 - 1)] - : [2.5, 1.4] - : [2, 1] # rounded - - experiment 2: [((18 - 5) - 2)/(4 - 1), ((11 - 3) - 3)/(6 - 1)] - : [11/3, 1] - : [4, 1] # rounded - * output token throughputs per request - - experiment 1: [3/(8 - 1), 6/(11 - 2)] = [3/7, 6/9] - - experiment 2: [4/(18 - 5), 6/(11 - 3)] = [4/13, 6/8] - * output token throughputs - - experiment 1: [(3 + 6)/(11 - 1)] = [9/10] - - experiment 2: [(4 + 6)/(18 - 3)] = [2/3] - * output sequence lengths - - experiment 1: [3, 6] - - experiment 2: [4, 6] - * input sequence lengths - - experiment 1: [3, 4] - - experiment 2: [3, 4] - """ - tokenizer = get_tokenizer(DEFAULT_TOKENIZER) - pd = LLMProfileDataParser( - filename=Path("triton_profile_export.json"), - tokenizer=tokenizer, - ) - - # experiment 1 metrics & statistics - stat_obj = pd.get_statistics(infer_mode="concurrency", load_level="10") - metrics = stat_obj.metrics - stat = stat_obj.stats_dict - - assert isinstance(metrics, LLMMetrics) - - assert metrics.time_to_first_tokens == [2, 2] - assert metrics.inter_token_latencies == [2, 1] - ottpr = [3 / ns_to_sec(7), 6 / ns_to_sec(9)] - assert metrics.output_token_throughputs_per_request == pytest.approx(ottpr) - ott = [9 / ns_to_sec(10)] - assert metrics.output_token_throughputs == pytest.approx(ott) - assert metrics.output_sequence_lengths == [3, 6] - assert metrics.input_sequence_lengths == [3, 4] - - # Disable Pylance warnings for dynamically set attributes due to Statistics - # not having strict attributes listed. - assert stat["time_to_first_token"]["avg"] == 2 # type: ignore - assert stat["inter_token_latency"]["avg"] == 1.5 # type: ignore - assert stat["output_token_throughput_per_request"]["avg"] == pytest.approx( # type: ignore - np.mean(ottpr) - ) - assert stat["output_sequence_length"]["avg"] == 4.5 # type: ignore - assert stat["input_sequence_length"]["avg"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["p50"] == 2 # type: ignore - assert stat["inter_token_latency"]["p50"] == 1.5 # type: ignore - assert stat["output_token_throughput_per_request"]["p50"] == pytest.approx( # type: ignore - np.percentile(ottpr, 50) - ) - assert stat["output_sequence_length"]["p50"] == 4.5 # type: ignore - assert stat["input_sequence_length"]["p50"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["min"] == 2 # type: ignore - assert stat["inter_token_latency"]["min"] == 1 # type: ignore - min_ottpr = 3 / ns_to_sec(7) - assert stat["output_token_throughput_per_request"]["min"] == pytest.approx(min_ottpr) # type: ignore - assert stat["output_sequence_length"]["min"] == 3 # type: ignore - assert stat["input_sequence_length"]["min"] == 3 # type: ignore - - assert stat["time_to_first_token"]["max"] == 2 # type: ignore - assert stat["inter_token_latency"]["max"] == 2 # type: ignore - max_ottpr = 6 / ns_to_sec(9) - assert stat["output_token_throughput_per_request"]["max"] == pytest.approx(max_ottpr) # type: ignore - assert stat["output_sequence_length"]["max"] == 6 # type: ignore - assert stat["input_sequence_length"]["max"] == 4 # type: ignore - - assert stat["time_to_first_token"]["std"] == np.std([2, 2]) # type: ignore - assert stat["inter_token_latency"]["std"] == np.std([2, 1]) # type: ignore - assert stat["output_token_throughput_per_request"]["std"] == pytest.approx( # type: ignore - np.std(ottpr) - ) - assert stat["output_sequence_length"]["std"] == np.std([3, 6]) # type: ignore - assert stat["input_sequence_length"]["std"] == np.std([3, 4]) # type: ignore - - oott = 9 / ns_to_sec(10) - assert stat["output_token_throughput"]["avg"] == pytest.approx(oott) # type: ignore - - # experiment 2 statistics - stat_obj = pd.get_statistics(infer_mode="request_rate", load_level="2.0") - metrics = stat_obj.metrics - stat = stat_obj.stats_dict - assert isinstance(metrics, LLMMetrics) - - assert metrics.time_to_first_tokens == [2, 3] - assert metrics.inter_token_latencies == [4, 1] - ottpr = [4 / ns_to_sec(13), 6 / ns_to_sec(8)] - assert metrics.output_token_throughputs_per_request == pytest.approx(ottpr) - ott = [2 / ns_to_sec(3)] - assert metrics.output_token_throughputs == pytest.approx(ott) - assert metrics.output_sequence_lengths == [4, 6] - assert metrics.input_sequence_lengths == [3, 4] - - assert stat["time_to_first_token"]["avg"] == pytest.approx(2.5) # type: ignore - assert stat["inter_token_latency"]["avg"] == pytest.approx(2.5) # type: ignore - assert stat["output_token_throughput_per_request"]["avg"] == pytest.approx( # type: ignore - np.mean(ottpr) - ) - assert stat["output_sequence_length"]["avg"] == 5 # type: ignore - assert stat["input_sequence_length"]["avg"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["p50"] == pytest.approx(2.5) # type: ignore - assert stat["inter_token_latency"]["p50"] == pytest.approx(2.5) # type: ignore - assert stat["output_token_throughput_per_request"]["p50"] == pytest.approx( # type: ignore - np.percentile(ottpr, 50) - ) - assert stat["output_sequence_length"]["p50"] == 5 # type: ignore - assert stat["input_sequence_length"]["p50"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["min"] == pytest.approx(2) # type: ignore - assert stat["inter_token_latency"]["min"] == pytest.approx(1) # type: ignore - min_ottpr = 4 / ns_to_sec(13) - assert stat["output_token_throughput_per_request"]["min"] == pytest.approx(min_ottpr) # type: ignore - assert stat["output_sequence_length"]["min"] == 4 # type: ignore - assert stat["input_sequence_length"]["min"] == 3 # type: ignore - - assert stat["time_to_first_token"]["max"] == pytest.approx(3) # type: ignore - assert stat["inter_token_latency"]["max"] == pytest.approx(4) # type: ignore - max_ottpr = 6 / ns_to_sec(8) - assert stat["output_token_throughput_per_request"]["max"] == pytest.approx(max_ottpr) # type: ignore - assert stat["output_sequence_length"]["max"] == 6 # type: ignore - assert stat["input_sequence_length"]["max"] == 4 # type: ignore - - assert stat["time_to_first_token"]["std"] == np.std([2, 3]) * (1) # type: ignore - assert stat["inter_token_latency"]["std"] == np.std([4, 1]) * (1) # type: ignore - assert stat["output_token_throughput_per_request"]["std"] == pytest.approx( # type: ignore - np.std(ottpr) - ) - assert stat["output_sequence_length"]["std"] == np.std([4, 6]) # type: ignore - assert stat["input_sequence_length"]["std"] == np.std([3, 4]) # type: ignore - - oott = 2 / ns_to_sec(3) - assert stat["output_token_throughput"]["avg"] == pytest.approx(oott) # type: ignore - - # check non-existing profile data - with pytest.raises(KeyError): - pd.get_statistics(infer_mode="concurrency", load_level="30") - - def test_openai_llm_profile_data(self, mock_read_write: pytest.MonkeyPatch) -> None: - """Collect LLM metrics from profile export data and check values. - - Metrics - * time to first tokens - - experiment 1: [5 - 1, 7 - 2] = [4, 5] - * inter token latencies - - experiment 1: [((12 - 1) - 4)/(3 - 1), ((15 - 2) - 5)/(6 - 1)] - : [3.5, 1.6] - : [4, 2] # rounded - * output token throughputs per request - - experiment 1: [3/(12 - 1), 6/(15 - 2)] = [3/11, 6/13] - * output token throughputs - - experiment 1: [(3 + 6)/(15 - 1)] = [9/14] - * output sequence lengths - - experiment 1: [3, 6] - * input sequence lengths - - experiment 1: [3, 4] - """ - tokenizer = get_tokenizer(DEFAULT_TOKENIZER) - pd = LLMProfileDataParser( - filename=Path("openai_profile_export.json"), - tokenizer=tokenizer, - ) - - # experiment 1 statistics - stat_obj = pd.get_statistics(infer_mode="concurrency", load_level="10") - metrics = stat_obj.metrics - stat = stat_obj.stats_dict - assert isinstance(metrics, LLMMetrics) - - assert metrics.time_to_first_tokens == [4, 5] - assert metrics.inter_token_latencies == [4, 2] - ottpr = [3 / ns_to_sec(11), 6 / ns_to_sec(13)] - assert metrics.output_token_throughputs_per_request == pytest.approx(ottpr) - ott = [9 / ns_to_sec(14)] - assert metrics.output_token_throughputs == pytest.approx(ott) - assert metrics.output_sequence_lengths == [3, 6] - assert metrics.input_sequence_lengths == [3, 4] - - assert stat["time_to_first_token"]["avg"] == pytest.approx(4.5) # type: ignore - assert stat["inter_token_latency"]["avg"] == pytest.approx(3) # type: ignore - assert stat["output_token_throughput_per_request"]["avg"] == pytest.approx( # type: ignore - np.mean(ottpr) - ) - assert stat["output_sequence_length"]["avg"] == 4.5 # type: ignore - assert stat["input_sequence_length"]["avg"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["p50"] == pytest.approx(4.5) # type: ignore - assert stat["inter_token_latency"]["p50"] == pytest.approx(3) # type: ignore - assert stat["output_token_throughput_per_request"]["p50"] == pytest.approx( # type: ignore - np.percentile(ottpr, 50) - ) - assert stat["output_sequence_length"]["p50"] == 4.5 # type: ignore - assert stat["input_sequence_length"]["p50"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["min"] == pytest.approx(4) # type: ignore - assert stat["inter_token_latency"]["min"] == pytest.approx(2) # type: ignore - min_ottpr = 3 / ns_to_sec(11) - assert stat["output_token_throughput_per_request"]["min"] == pytest.approx(min_ottpr) # type: ignore - assert stat["output_sequence_length"]["min"] == 3 # type: ignore - assert stat["input_sequence_length"]["min"] == 3 # type: ignore - - assert stat["time_to_first_token"]["max"] == pytest.approx(5) # type: ignore - assert stat["inter_token_latency"]["max"] == pytest.approx(4) # type: ignore - max_ottpr = 6 / ns_to_sec(13) - assert stat["output_token_throughput_per_request"]["max"] == pytest.approx(max_ottpr) # type: ignore - assert stat["output_sequence_length"]["max"] == 6 # type: ignore - assert stat["input_sequence_length"]["max"] == 4 # type: ignore - - assert stat["time_to_first_token"]["std"] == np.std([4, 5]) * (1) # type: ignore - assert stat["inter_token_latency"]["std"] == np.std([4, 2]) * (1) # type: ignore - assert stat["output_token_throughput_per_request"]["std"] == pytest.approx( # type: ignore - np.std(ottpr) - ) - assert stat["output_sequence_length"]["std"] == np.std([3, 6]) # type: ignore - assert stat["input_sequence_length"]["std"] == np.std([3, 4]) # type: ignore - - oott = 9 / ns_to_sec(14) - assert stat["output_token_throughput"]["avg"] == pytest.approx(oott) # type: ignore - - # check non-existing profile data - with pytest.raises(KeyError): - pd.get_statistics(infer_mode="concurrency", load_level="40") - - def test_openai_vlm_profile_data(self, mock_read_write: pytest.MonkeyPatch) -> None: - """Collect LLM metrics from profile export data and check values. - - Metrics - * time to first tokens - - experiment 1: [5 - 1, 7 - 2] = [4, 5] - * inter token latencies - - experiment 1: [((12 - 1) - 4)/(3 - 1), ((15 - 2) - 5)/(6 - 1)] - : [3.5, 1.6] - : [4, 2] # rounded - * output token throughputs per request - - experiment 1: [3/(12 - 1), 6/(15 - 2)] = [3/11, 6/13] - * output token throughputs - - experiment 1: [(3 + 6)/(15 - 1)] = [9/14] - * output sequence lengths - - experiment 1: [3, 6] - * input sequence lengths - - experiment 1: [3, 4] - """ - tokenizer = get_tokenizer(DEFAULT_TOKENIZER) - pd = LLMProfileDataParser( - filename=Path("openai_vlm_profile_export.json"), - tokenizer=tokenizer, - ) - - # experiment 1 statistics - stat_obj = pd.get_statistics(infer_mode="concurrency", load_level="10") - metrics = stat_obj.metrics - stat = stat_obj.stats_dict - assert isinstance(metrics, LLMMetrics) - - assert metrics.time_to_first_tokens == [4, 5] - assert metrics.inter_token_latencies == [4, 2] - ottpr = [3 / ns_to_sec(11), 6 / ns_to_sec(13)] - assert metrics.output_token_throughputs_per_request == pytest.approx(ottpr) - ott = [9 / ns_to_sec(14)] - assert metrics.output_token_throughputs == pytest.approx(ott) - assert metrics.output_sequence_lengths == [3, 6] - assert metrics.input_sequence_lengths == [3, 4] - - assert stat["time_to_first_token"]["avg"] == pytest.approx(4.5) # type: ignore - assert stat["inter_token_latency"]["avg"] == pytest.approx(3) # type: ignore - assert stat["output_token_throughput_per_request"]["avg"] == pytest.approx( # type: ignore - np.mean(ottpr) - ) - assert stat["output_sequence_length"]["avg"] == 4.5 # type: ignore - assert stat["input_sequence_length"]["avg"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["p50"] == pytest.approx(4.5) # type: ignore - assert stat["inter_token_latency"]["p50"] == pytest.approx(3) # type: ignore - assert stat["output_token_throughput_per_request"]["p50"] == pytest.approx( # type: ignore - np.percentile(ottpr, 50) - ) - assert stat["output_sequence_length"]["p50"] == 4.5 # type: ignore - assert stat["input_sequence_length"]["p50"] == 3.5 # type: ignore - - assert stat["time_to_first_token"]["min"] == pytest.approx(4) # type: ignore - assert stat["inter_token_latency"]["min"] == pytest.approx(2) # type: ignore - min_ottpr = 3 / ns_to_sec(11) - assert stat["output_token_throughput_per_request"]["min"] == pytest.approx(min_ottpr) # type: ignore - assert stat["output_sequence_length"]["min"] == 3 # type: ignore - assert stat["input_sequence_length"]["min"] == 3 # type: ignore - - assert stat["time_to_first_token"]["max"] == pytest.approx(5) # type: ignore - assert stat["inter_token_latency"]["max"] == pytest.approx(4) # type: ignore - max_ottpr = 6 / ns_to_sec(13) - assert stat["output_token_throughput_per_request"]["max"] == pytest.approx(max_ottpr) # type: ignore - assert stat["output_sequence_length"]["max"] == 6 # type: ignore - assert stat["input_sequence_length"]["max"] == 4 # type: ignore - - assert stat["time_to_first_token"]["std"] == np.std([4, 5]) * (1) # type: ignore - assert stat["inter_token_latency"]["std"] == np.std([4, 2]) * (1) # type: ignore - assert stat["output_token_throughput_per_request"]["std"] == pytest.approx( # type: ignore - np.std(ottpr) - ) - assert stat["output_sequence_length"]["std"] == np.std([3, 6]) # type: ignore - assert stat["input_sequence_length"]["std"] == np.std([3, 4]) # type: ignore - - oott = 9 / ns_to_sec(14) - assert stat["output_token_throughput"]["avg"] == pytest.approx(oott) # type: ignore - - # check non-existing profile data - with pytest.raises(KeyError): - pd.get_statistics(infer_mode="concurrency", load_level="40") - - def test_merged_sse_response(self, mock_read_write: pytest.MonkeyPatch) -> None: - """Test merging the multiple sse response.""" - res_timestamps = [0, 1, 2, 3] - res_outputs = [ - { - "response": 'data: {"choices":[{"delta":{"content":"aaa"}}],"object":"chat.completion.chunk"}\n\n' - }, - { - "response": ( - 'data: {"choices":[{"delta":{"content":"abc"}}],"object":"chat.completion.chunk"}\n\n' - 'data: {"choices":[{"delta":{"content":"1234"}}],"object":"chat.completion.chunk"}\n\n' - 'data: {"choices":[{"delta":{"content":"helloworld"}}],"object":"chat.completion.chunk"}\n\n' - ) - }, - {"response": "data: [DONE]\n\n"}, - ] - expected_response = '{"choices": [{"delta": {"content": "abc1234helloworld"}}], "object": "chat.completion.chunk"}' - - tokenizer = get_tokenizer(DEFAULT_TOKENIZER) - pd = LLMProfileDataParser( - filename=Path("openai_profile_export.json"), - tokenizer=tokenizer, - ) - - pd._preprocess_response(res_timestamps, res_outputs) - assert res_outputs[1]["response"] == expected_response - - def test_openai_output_token_counts( - self, mock_read_write: pytest.MonkeyPatch - ) -> None: - output_texts = [ - "Ad", - "idas", - " Orig", - "inals", - " are", - " now", - " available", - " in", - " more", - " than", - ] - res_outputs = [] - for text in output_texts: - response = f'data: {{"choices":[{{"delta":{{"content":"{text}"}}}}],"object":"chat.completion.chunk"}}\n\n' - res_outputs.append({"response": response}) - - tokenizer = get_tokenizer(DEFAULT_TOKENIZER) - pd = LLMProfileDataParser( - filename=Path("openai_profile_export.json"), - tokenizer=tokenizer, - ) - - output_token_counts, total_output_token = pd._get_output_token_counts( - res_outputs - ) - assert output_token_counts == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # total 10 - assert total_output_token == 9 - assert total_output_token != sum(output_token_counts) - - def test_triton_output_token_counts( - self, mock_read_write: pytest.MonkeyPatch - ) -> None: - output_texts = [ - "Ad", - "idas", - " Orig", - "inals", - " are", - " now", - " available", - " in", - " more", - " than", - ] - res_outputs = [] - for text in output_texts: - res_outputs.append({"text_output": text}) - - tokenizer = get_tokenizer(DEFAULT_TOKENIZER) - pd = LLMProfileDataParser( - filename=Path("triton_profile_export.json"), - tokenizer=tokenizer, - ) - - output_token_counts, total_output_token = pd._get_output_token_counts( - res_outputs - ) - assert output_token_counts == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # total 10 - assert total_output_token == 9 - assert total_output_token != sum(output_token_counts) - - def test_empty_response(self, mock_read_write: pytest.MonkeyPatch) -> None: - """Check if it handles all empty responses.""" - tokenizer = get_tokenizer(DEFAULT_TOKENIZER) - - # Should not throw error - _ = LLMProfileDataParser( - filename=Path("empty_profile_export.json"), - tokenizer=tokenizer, - ) - - empty_profile_data = { - "service_kind": "openai", - "endpoint": "v1/chat/completions", - "experiments": [ - { - "experiment": { - "mode": "concurrency", - "value": 10, - }, - "requests": [ - { - "timestamp": 1, - "request_inputs": { - "payload": '{"messages":[{"role":"user","content":"This is test"}],"model":"llama-2-7b","stream":true}', - }, - "response_timestamps": [3, 5, 8], - "response_outputs": [ - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"content":""},"finish_reason":null}]}\n\n' - }, - {"response": "data: [DONE]\n\n"}, - ], - }, - ], - }, - ], - } - - openai_profile_data = { - "service_kind": "openai", - "endpoint": "v1/chat/completions", - "experiments": [ - { - "experiment": { - "mode": "concurrency", - "value": 10, - }, - "requests": [ - { - "timestamp": 1, - "request_inputs": { - "payload": '{"messages":[{"role":"user","content":"This is test"}],"model":"llama-2-7b","stream":true}', - }, - # the first, and the last two responses will be ignored because they have no "content" - "response_timestamps": [3, 5, 8, 12, 13, 14], - "response_outputs": [ - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"content":"I"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"content":" like"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"content":" dogs"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{},"finish_reason":null}]}\n\n' - }, - {"response": "data: [DONE]\n\n"}, - ], - }, - { - "timestamp": 2, - "request_inputs": { - "payload": '{"messages":[{"role":"user","content":"This is test too"}],"model":"llama-2-7b","stream":true}', - }, - # the first, and the last two responses will be ignored because they have no "content" - "response_timestamps": [4, 7, 11, 15, 18, 19], - "response_outputs": [ - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"content":"I"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"content":"don\'t"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{"content":"cook food"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","created":123,"model":"llama-2-7b","choices":[{"index":0,"delta":{},"finish_reason":null}]}\n\n' - }, - {"response": "data: [DONE]\n\n"}, - ], - }, - ], - }, - ], - } - - openai_vlm_profile_data = { - "service_kind": "openai", - "endpoint": "v1/chat/completions", - "experiments": [ - { - "experiment": { - "mode": "concurrency", - "value": 10, - }, - "requests": [ - { - "timestamp": 1, - "request_inputs": { - "payload": '{"messages":[{"role":"user","content":[{"type":"text","text":"This is test"},{"type":"image_url","image_url":{"url":"data:image/png;base64,abcdef"}}]}],"model":"llava-1.6","stream":true}', - }, - # the first, and the last two responses will be ignored because they have no "content" - "response_timestamps": [3, 5, 8, 12, 13, 14], - "response_outputs": [ - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"I"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":" like"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":" dogs"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{},"finish_reason":null}]}\n\n' - }, - {"response": "data: [DONE]\n\n"}, - ], - }, - { - "timestamp": 2, - "request_inputs": { - "payload": '{"messages":[{"role":"user","content":[{"type":"text","text":"This is test too"},{"type":"image_url","image_url":{"url":"data:image/png;base64,abcdef"}}]}],"model":"llava-1.6","stream":true}', - }, - # the first, and the last two responses will be ignored because they have no "content" - "response_timestamps": [4, 7, 11, 15, 18, 19], - "response_outputs": [ - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"I"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"don\'t"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"cook food"},"finish_reason":null}]}\n\n' - }, - { - "response": 'data: {"id":"abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{},"finish_reason":null}]}\n\n' - }, - {"response": "data: [DONE]\n\n"}, - ], - }, - ], - }, - ], - } - - triton_profile_data = { - "service_kind": "triton", - "endpoint": "", - "experiments": [ - { - "experiment": { - "mode": "concurrency", - "value": 10, - }, - "requests": [ - { - "timestamp": 1, - "request_inputs": {"text_input": "This is test"}, - "response_timestamps": [3, 5, 8], - "response_outputs": [ - {"text_output": "I"}, - {"text_output": " like"}, - {"text_output": " dogs"}, - ], - }, - { - "timestamp": 2, - "request_inputs": {"text_input": "This is test too"}, - "response_timestamps": [4, 7, 11], - "response_outputs": [ - {"text_output": "I"}, - {"text_output": " don't"}, - {"text_output": " cook food"}, - ], - }, - ], - }, - { - "experiment": { - "mode": "request_rate", - "value": 2.0, - }, - "requests": [ - { - "timestamp": 5, - "request_inputs": {"text_input": "This is test"}, - "response_timestamps": [7, 8, 13, 18], - "response_outputs": [ - {"text_output": "cat"}, - {"text_output": " is"}, - {"text_output": " cool"}, - {"text_output": " too"}, - ], - }, - { - "timestamp": 3, - "request_inputs": {"text_input": "This is test too"}, - "response_timestamps": [6, 8, 11], - "response_outputs": [ - {"text_output": "it's"}, - {"text_output": " very"}, - {"text_output": " simple work"}, - ], - }, - ], - }, - ], - } diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_metrics.py b/src/c++/perf_analyzer/genai-perf/tests/test_metrics.py deleted file mode 100644 index 2af489fc4..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_metrics.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pytest -from genai_perf.metrics import Metrics - - -class TestMetrics: - - def test_metric_request_metrics(self) -> None: - """Test request_metrics property.""" - m = Metrics( - request_throughputs=[10.12, 11.33], - request_latencies=[3, 44], - ) - req_metrics = m.request_metrics - assert len(req_metrics) == 1 - assert req_metrics[0].name == "request_latency" - assert req_metrics[0].unit == "ms" - - def test_metric_system_metrics(self) -> None: - """Test system_metrics property.""" - m = Metrics( - request_throughputs=[10.12, 11.33], - request_latencies=[3, 44], - ) - sys_metrics = m.system_metrics - assert len(sys_metrics) == 1 - assert sys_metrics[0].name == "request_throughput" - assert sys_metrics[0].unit == "per sec" - - def test_metrics_get_base_name(self) -> None: - """Test get_base_name method in Metrics class.""" - metrics = Metrics( - request_throughputs=[10.12, 11.33], - request_latencies=[3, 44], - ) - assert metrics.get_base_name("request_throughputs") == "request_throughput" - assert metrics.get_base_name("request_latencies") == "request_latency" - with pytest.raises(KeyError): - metrics.get_base_name("hello1234") diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_plot_configs.py b/src/c++/perf_analyzer/genai-perf/tests/test_plot_configs.py deleted file mode 100644 index 8a1dfee7a..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_plot_configs.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from pathlib import Path - -# Skip type checking to avoid mypy error -# Issue: https://github.com/python/mypy/issues/10632 -import yaml # type: ignore -from genai_perf.plots.plot_config import PlotType -from genai_perf.plots.plot_config_parser import PlotConfigParser - - -class TestPlotConfigParser: - yaml_config = """ - plot1: - title: TTFT vs ITL - x_metric: time_to_first_tokens - y_metric: inter_token_latencies - x_label: TTFT (ms) - y_label: ITL (ms) - width: 1000 - height: 3000 - type: box - paths: - - run1/concurrency32.json - - run2/concurrency32.json - - run3/concurrency32.json - output: test_output_1 - - plot2: - title: Input Sequence Length vs Output Sequence Length - x_metric: input_sequence_lengths - y_metric: output_sequence_lengths - x_label: Input Sequence Length - y_label: Output Sequence Length - width: 1234 - height: 5678 - type: scatter - paths: - - run4/concurrency1.json - output: test_output_2 - """ - - def test_generate_configs(self, monkeypatch) -> None: - monkeypatch.setattr( - "genai_perf.plots.plot_config_parser.load_yaml", - lambda _: yaml.safe_load(self.yaml_config), - ) - monkeypatch.setattr(PlotConfigParser, "_get_statistics", lambda *_: {}) - monkeypatch.setattr(PlotConfigParser, "_get_metric", lambda *_: [1, 2, 3]) - - config_parser = PlotConfigParser(Path("test_config.yaml")) - plot_configs = config_parser.generate_configs() - - assert len(plot_configs) == 2 - pc1, pc2 = plot_configs - - # plot config 1 - assert pc1.title == "TTFT vs ITL" - assert pc1.x_label == "TTFT (ms)" - assert pc1.y_label == "ITL (ms)" - assert pc1.width == 1000 - assert pc1.height == 3000 - assert pc1.type == PlotType.BOX - assert pc1.output == Path("test_output_1") - - assert len(pc1.data) == 3 # profile run data - prd1, prd2, prd3 = pc1.data - assert prd1.name == "run1/concurrency32" - assert prd2.name == "run2/concurrency32" - assert prd3.name == "run3/concurrency32" - for prd in pc1.data: - assert prd.x_metric == [1, 2, 3] - assert prd.y_metric == [1, 2, 3] - - # plot config 2 - assert pc2.title == "Input Sequence Length vs Output Sequence Length" - assert pc2.x_label == "Input Sequence Length" - assert pc2.y_label == "Output Sequence Length" - assert pc2.width == 1234 - assert pc2.height == 5678 - assert pc2.type == PlotType.SCATTER - assert pc2.output == Path("test_output_2") - - assert len(pc2.data) == 1 # profile run data - prd = pc2.data[0] - assert prd.name == "run4/concurrency1" - assert prd.x_metric == [1, 2, 3] - assert prd.y_metric == [1, 2, 3] diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_profile_data_parser.py b/src/c++/perf_analyzer/genai-perf/tests/test_profile_data_parser.py deleted file mode 100644 index fe303c514..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_profile_data_parser.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import json -from io import StringIO -from pathlib import Path -from typing import Any, List, Union - -import numpy as np -import pytest -from genai_perf.metrics import Metrics -from genai_perf.profile_data_parser import ProfileDataParser - - -def ns_to_sec(ns: int) -> Union[int, float]: - """Convert from nanosecond to second.""" - return ns / 1e9 - - -class TestProfileDataParser: - @pytest.fixture - def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[str]: - """ - This function will mock the open function for specific files: - - - For "triton_profile_export.json", it will read and return the - contents of self.triton_profile_data - - For "openai_profile_export.json", it will read and return the - contents of self.openai_profile_data - - For "profile_export.csv", it will capture all data written to - the file, and return it as the return value of this function - - For all other files, it will behave like the normal open function - """ - - written_data = [] - - original_open = open - - def custom_open(filename, *args, **kwargs): - def write(self: Any, content: str) -> int: - written_data.append(content) - return len(content) - - if filename == "embedding_profile_export.json": - tmp_file = StringIO(json.dumps(self.embedding_profile_data)) - return tmp_file - elif filename == "ranking_profile_export.json": - tmp_file = StringIO(json.dumps(self.ranking_profile_data)) - return tmp_file - elif filename == "huggingface_ranking_profile_export.json": - tmp_file = StringIO(json.dumps(self.huggingface_ranking_profile_data)) - return tmp_file - elif filename == "profile_export.csv": - tmp_file = StringIO() - tmp_file.write = write.__get__(tmp_file) - return tmp_file - else: - return original_open(filename, *args, **kwargs) - - monkeypatch.setattr("builtins.open", custom_open) - - return written_data - - # ================================================ - # EMBEDDINGS API - # ================================================ - embedding_profile_data = { - "service_kind": "openai", - "endpoint": "v1/embeddings", - "experiments": [ - { - "experiment": { - "mode": "concurrency", - "value": 10, - }, - "requests": [ - { - "timestamp": 1, - "request_inputs": { - "payload": '{"input":"This is test","model":"NV-Embed-QA","input_type":"passage","encoding_format":"float","truncate":"NONE"}', - }, - "response_timestamps": [3], - "response_outputs": [ - { - "response": '{"object":"list","data":[{"index":0,"embedding":[1, 2, 3],"object":"embedding"}],"model":"NV-Embed-QA","usage":{"prompt_tokens":7,"total_tokens":7}}' - }, - ], - }, - { - "timestamp": 2, - "request_inputs": { - "payload": '{"input":"This is test too","model":"NV-Embed-QA","input_type":"passage","encoding_format":"float","truncate":"NONE"}', - }, - "response_timestamps": [5], - "response_outputs": [ - { - "response": '{"object":"list","data":[{"index":0,"embedding":[1, 2, 3, 4],"object":"embedding"}],"model":"NV-Embed-QA","usage":{"prompt_tokens":8,"total_tokens":8}}' - }, - ], - }, - ], - }, - ], - } - - def test_embedding_profile_data(self, mock_read_write: pytest.MonkeyPatch) -> None: - """Collect base metrics from profile export data and check values. - - Metrics - * request latencies - - [3 - 1, 5 - 2] = [2, 3] - * request throughputs - - [2 / (5e-9 - 1e-9)] = [5e8] - """ - pd = ProfileDataParser(filename=Path("embedding_profile_export.json")) - - # experiment 1 statistics - stats = pd.get_statistics(infer_mode="concurrency", load_level="10") - metrics = stats.metrics - stats_dict = stats.stats_dict - assert isinstance(metrics, Metrics) - - assert metrics.request_latencies == [2, 3] - assert metrics.request_throughputs == [pytest.approx(5e8)] - - assert stats_dict["request_latency"]["avg"] == pytest.approx(2.5) # type: ignore - assert stats_dict["request_latency"]["p50"] == pytest.approx(2.5) # type: ignore - assert stats_dict["request_latency"]["min"] == pytest.approx(2) # type: ignore - assert stats_dict["request_latency"]["max"] == pytest.approx(3) # type: ignore - assert stats_dict["request_latency"]["std"] == np.std([2, 3]) # type: ignore - - assert stats_dict["request_throughput"]["avg"] == pytest.approx(5e8) # type: ignore - - # ================================================ - # RANKINGS API - # ================================================ - ranking_profile_data = { - "service_kind": "openai", - "endpoint": "v1/ranking", - "experiments": [ - { - "experiment": { - "mode": "concurrency", - "value": 10, - }, - "requests": [ - { - "timestamp": 1, - "request_inputs": { - "payload": '{"query":{"text":"This is a test."},"passages":[{"text":"test output one"},{"text":"test output two"},{"text":"test output three"}],"model":"nv-rerank-qa-mistral-4b:1","truncate":"END"}', - }, - "response_timestamps": [3], - "response_outputs": [ - { - "response": '{"rankings":[{"index":0,"logit":-5.98828125},{"index":1,"logit":-6.828125},{"index":2,"logit":-7.60546875}]}' - }, - ], - }, - { - "timestamp": 2, - "request_inputs": { - "payload": '{"query":{"text":"This is a test."},"passages":[{"text":"test output one"},{"text":"test output two"},{"text":"test output three"}],"model":"nv-rerank-qa-mistral-4b:1","truncate":"END"}', - }, - "response_timestamps": [5], - "response_outputs": [ - { - "response": '{"rankings":[{"index":2,"logit":-6.15625},{"index":1,"logit":-7.83984375},{"index":0,"logit":-7.84765625}]}' - }, - ], - }, - ], - }, - ], - } - - def test_ranking_profile_data(self, mock_read_write: pytest.MonkeyPatch) -> None: - """Collect base metrics from profile export data and check values. - - Metrics - * request latencies - - [3 - 1, 5 - 2] = [2, 3] - * request throughputs - - [2 / (5e-9 - 1e-9)] = [5e8] - """ - pd = ProfileDataParser(filename=Path("ranking_profile_export.json")) - - # experiment 1 statistics - stats = pd.get_statistics(infer_mode="concurrency", load_level="10") - metrics = stats.metrics - stats_dict = stats.stats_dict - assert isinstance(metrics, Metrics) - - assert metrics.request_latencies == [2, 3] - assert metrics.request_throughputs == [pytest.approx(5e8)] - - assert stats_dict["request_latency"]["avg"] == pytest.approx(2.5) # type: ignore - assert stats_dict["request_latency"]["p50"] == pytest.approx(2.5) # type: ignore - assert stats_dict["request_latency"]["min"] == pytest.approx(2) # type: ignore - assert stats_dict["request_latency"]["max"] == pytest.approx(3) # type: ignore - assert stats_dict["request_latency"]["std"] == np.std([2, 3]) # type: ignore - - assert stats_dict["request_throughput"]["avg"] == pytest.approx(5e8) # type: ignore - - # ================================================ - # HUGGINGFACE RANKINGS API - # ================================================ - huggingface_ranking_profile_data = { - "service_kind": "openai", - "endpoint": "rerank", - "experiments": [ - { - "experiment": { - "mode": "concurrency", - "value": 10, - }, - "requests": [ - { - "timestamp": 1, - "request_inputs": { - "payload": '{"query":"What was the first car ever driven?","texts":["Daddys Home 2 Principal photography on the film began in Massachusetts in March 2017 and it was released in the United States by Paramount Pictures on November 10, 2017. Although the film received unfavorable reviews, it has grossed over $180 million worldwide on a $69 million budget.","Kevin Loader is a British film and television producer."]}' - }, - "response_timestamps": [3], - "response_outputs": [ - { - "response": '[{"index":0,"score":0.0032476764},{"index":1,"score":0.00036117696}]' - }, - ], - }, - { - "timestamp": 2, - "request_inputs": { - "payload": '{"query":"In what state did they film Shrek 2?","texts":["Francisco Antonio Zea Juan Francisco Antonio Hilari was a Colombian journalist, botanist, diplomat, politician, and statesman who served as the 1st Vice President of Colombia.","Daddys Home 2 Principal photography on the film began in Massachusetts in March 2017 and it was released in the United States by Paramount Pictures on November 10, 2017. Although the film received unfavorable reviews, it has grossed over $180 million worldwide on a $69 million budget."]}' - }, - "response_timestamps": [5], - "response_outputs": [ - { - "response": '[{"index":0,"score":0.020177318},{"index":1,"score":0.01461567}]' - }, - ], - }, - ], - }, - ], - } - - def test_huggingface_ranking_profile_data( - self, mock_read_write: pytest.MonkeyPatch - ) -> None: - """Collect base metrics from HuggingFace ranking profile export data and check values. - - Metrics - * request latencies - - [3 - 1, 5 - 2] = [2, 3] - * request throughputs - - [2 / (5e-9 - 1e-9)] = [5e8] - """ - pd = ProfileDataParser(filename=Path("huggingface_ranking_profile_export.json")) - - # experiment 1 statistics - stats = pd.get_statistics(infer_mode="concurrency", load_level="10") - metrics = stats.metrics - stats_dict = stats.stats_dict - assert isinstance(metrics, Metrics) - - assert metrics.request_latencies == [2, 3] - assert metrics.request_throughputs == [pytest.approx(5e8)] - - assert stats_dict["request_latency"]["avg"] == pytest.approx(2.5) # type: ignore - assert stats_dict["request_latency"]["p50"] == pytest.approx(2.5) # type: ignore - assert stats_dict["request_latency"]["min"] == pytest.approx(2) # type: ignore - assert stats_dict["request_latency"]["max"] == pytest.approx(3) # type: ignore - assert stats_dict["request_latency"]["std"] == np.std([2, 3]) # type: ignore - - assert stats_dict["request_throughput"]["avg"] == pytest.approx(5e8) # type: ignore diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_synthetic_image_generator.py b/src/c++/perf_analyzer/genai-perf/tests/test_synthetic_image_generator.py deleted file mode 100644 index 5a79794bb..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_synthetic_image_generator.py +++ /dev/null @@ -1,123 +0,0 @@ -import base64 -import random -from io import BytesIO - -import pytest -from genai_perf.llm_inputs.synthetic_image_generator import ( - ImageFormat, - SyntheticImageGenerator, -) -from PIL import Image - - -def decode_image(base64_string): - _, data = base64_string.split(",") - decoded_data = base64.b64decode(data) - return Image.open(BytesIO(decoded_data)) - - -@pytest.mark.parametrize( - "expected_image_size", - [ - (100, 100), - (200, 200), - ], -) -def test_different_image_size(expected_image_size): - expected_width, expected_height = expected_image_size - base64_string = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=expected_width, - image_width_stddev=0, - image_height_mean=expected_height, - image_height_stddev=0, - image_format=ImageFormat.PNG, - ) - - image = decode_image(base64_string) - assert image.size == expected_image_size, "image not resized to the target size" - - -def test_negative_size_is_not_selected(): - # exception is raised, when PIL.Image.resize is called with negative values - _ = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=-1, - image_width_stddev=10, - image_height_mean=-1, - image_height_stddev=10, - image_format=ImageFormat.PNG, - ) - - -@pytest.mark.parametrize( - "width_mean, width_stddev, height_mean, height_stddev", - [ - (100, 15, 100, 15), - (123, 10, 456, 7), - ], -) -def test_generator_deterministic(width_mean, width_stddev, height_mean, height_stddev): - random.seed(123) - img1 = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=width_mean, - image_width_stddev=width_stddev, - image_height_mean=height_mean, - image_height_stddev=height_stddev, - image_format=ImageFormat.PNG, - ) - - random.seed(123) - img2 = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=width_mean, - image_width_stddev=width_stddev, - image_height_mean=height_mean, - image_height_stddev=height_stddev, - image_format=ImageFormat.PNG, - ) - - assert img1 == img2, "generator is nondererministic" - - -@pytest.mark.parametrize("image_format", [ImageFormat.PNG, ImageFormat.JPEG]) -def test_base64_encoding_with_different_formats(image_format): - img_base64 = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=100, - image_width_stddev=100, - image_height_mean=100, - image_height_stddev=100, - image_format=image_format, - ) - - # check prefix - expected_prefix = f"data:image/{image_format.name.lower()};base64," - assert img_base64.startswith(expected_prefix), "unexpected prefix" - - # check image format - data = img_base64[len(expected_prefix) :] - img_data = base64.b64decode(data) - img_bytes = BytesIO(img_data) - image = Image.open(img_bytes) - assert image.format == image_format.name - - -def test_random_image_format(): - random.seed(123) - img1 = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=100, - image_width_stddev=100, - image_height_mean=100, - image_height_stddev=100, - image_format=None, - ) - - random.seed(456) - img2 = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=100, - image_width_stddev=100, - image_height_mean=100, - image_height_stddev=100, - image_format=None, - ) - - # check prefix - assert img1.startswith("data:image/png") - assert img2.startswith("data:image/jpeg") diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_tokenizer.py b/src/c++/perf_analyzer/genai-perf/tests/test_tokenizer.py deleted file mode 100644 index 259389dcf..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_tokenizer.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pytest -from genai_perf.exceptions import GenAIPerfException -from genai_perf.tokenizer import DEFAULT_TOKENIZER, get_tokenizer - - -class TestTokenizer: - def test_default_tokenizer(self): - tokenizer_model = DEFAULT_TOKENIZER - get_tokenizer(tokenizer_model) - - def test_non_default_tokenizer(self): - tokenizer_model = "gpt2" - get_tokenizer(tokenizer_model) - - def test_bad_tokenizer(self): - with pytest.raises(GenAIPerfException): - get_tokenizer("bad_tokenizer") - - def test_default_args(self): - tokenizer_model = DEFAULT_TOKENIZER - tokenizer = get_tokenizer(tokenizer_model) - - # There are 3 special tokens in the default tokenizer - # - : 0 (unknown) - # - : 1 (beginning of sentence) - # - : 2 (end of sentence) - special_tokens = list(tokenizer._tokenizer.added_tokens_encoder.keys()) - special_token_ids = list(tokenizer._tokenizer.added_tokens_encoder.values()) - - # special tokens are disabled by default - text = "This is test." - tokens = tokenizer(text)["input_ids"] - assert all([s not in tokens for s in special_token_ids]) - - tokens = tokenizer.encode(text) - assert all([s not in tokens for s in special_token_ids]) - - output = tokenizer.decode(tokens) - assert all([s not in output for s in special_tokens]) - - # check special tokens is enabled - text = "This is test." - tokens = tokenizer(text, add_special_tokens=True)["input_ids"] - assert any([s in tokens for s in special_token_ids]) - - tokens = tokenizer.encode(text, add_special_tokens=True) - assert any([s in tokens for s in special_token_ids]) - - output = tokenizer.decode(tokens, skip_special_tokens=False) - assert any([s in output for s in special_tokens]) diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_wrapper.py b/src/c++/perf_analyzer/genai-perf/tests/test_wrapper.py deleted file mode 100644 index fd4c34b51..000000000 --- a/src/c++/perf_analyzer/genai-perf/tests/test_wrapper.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import subprocess -from unittest.mock import MagicMock, patch - -import pytest -from genai_perf import parser -from genai_perf.constants import DEFAULT_GRPC_URL -from genai_perf.wrapper import Profiler - - -class TestWrapper: - @pytest.mark.parametrize( - "arg", - [ - ([]), - (["-u", "testurl:1000"]), - (["--url", "testurl:1000"]), - ], - ) - def test_url_exactly_once_triton(self, monkeypatch, arg): - args = [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "triton", - ] + arg - monkeypatch.setattr("sys.argv", args) - args, extra_args = parser.parse_args() - cmd = Profiler.build_cmd(args, extra_args) - cmd_string = " ".join(cmd) - - number_of_url_args = cmd_string.count(" -u ") + cmd_string.count(" --url ") - assert number_of_url_args == 1 - - @pytest.mark.parametrize( - "arg, expected_filepath", - [ - ( - [], - "artifacts/test_model-triton-tensorrtllm-concurrency1/profile_export.json", - ), - ( - ["--artifact-dir", "test_dir"], - "test_dir/profile_export.json", - ), - ( - ["--artifact-dir", "test_dir", "--profile-export-file", "test.json"], - "test_dir/test.json", - ), - ], - ) - def test_profile_export_filepath(self, monkeypatch, arg, expected_filepath): - args = [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "triton", - ] + arg - monkeypatch.setattr("sys.argv", args) - args, extra_args = parser.parse_args() - cmd = Profiler.build_cmd(args, extra_args) - cmd_string = " ".join(cmd) - - expected_pattern = f"--profile-export-file {expected_filepath}" - assert expected_pattern in cmd_string - - @pytest.mark.parametrize( - "arg", - [ - (["--backend", "tensorrtllm"]), - (["--backend", "vllm"]), - ], - ) - def test_service_triton(self, monkeypatch, arg): - args = [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "triton", - ] + arg - monkeypatch.setattr("sys.argv", args) - args, extra_args = parser.parse_args() - cmd = Profiler.build_cmd(args, extra_args) - cmd_string = " ".join(cmd) - - # Ensure the correct arguments are appended. - assert cmd_string.count(" -i grpc") == 1 - assert cmd_string.count(" --streaming") == 1 - assert cmd_string.count(f"-u {DEFAULT_GRPC_URL}") == 1 - if arg[1] == "tensorrtllm": - assert cmd_string.count("--shape max_tokens:1") == 1 - assert cmd_string.count("--shape text_input:1") == 1 - - @pytest.mark.parametrize( - "arg", - [ - (["--endpoint-type", "completions"]), - (["--endpoint-type", "chat"]), - ], - ) - def test_service_openai(self, monkeypatch, arg): - args = [ - "genai-perf", - "profile", - "-m", - "test_model", - "--service-kind", - "openai", - ] + arg - monkeypatch.setattr("sys.argv", args) - args, extra_args = parser.parse_args() - cmd = Profiler.build_cmd(args, extra_args) - cmd_string = " ".join(cmd) - - # Ensure the correct arguments are appended. - assert cmd_string.count(" -i http") == 1 - - @patch("genai_perf.wrapper.subprocess.run") - def test_stdout_verbose(self, mock_subprocess_run): - args = MagicMock() - args.model = "test_model" - args.verbose = True - Profiler.run(args=args, extra_args=None) - - # Check that standard output was not redirected. - for call_args in mock_subprocess_run.call_args_list: - _, kwargs = call_args - assert ( - "stdout" not in kwargs or kwargs["stdout"] is None - ), "With the verbose flag, stdout should not be redirected." - - @patch("genai_perf.wrapper.subprocess.run") - def test_stdout_not_verbose(self, mock_subprocess_run): - args = MagicMock() - args.model = "test_model" - args.verbose = False - Profiler.run(args=args, extra_args=None) - - # Check that standard output was redirected. - for call_args in mock_subprocess_run.call_args_list: - _, kwargs = call_args - assert ( - kwargs["stdout"] is subprocess.DEVNULL - ), "When the verbose flag is not passed, stdout should be redirected to /dev/null." diff --git a/src/c++/perf_analyzer/ictx_id_tracker.h b/src/c++/perf_analyzer/ictx_id_tracker.h deleted file mode 100644 index 8d85067eb..000000000 --- a/src/c++/perf_analyzer/ictx_id_tracker.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - - -namespace triton { namespace perfanalyzer { - -/// Interface for object that tracks context IDs -/// -class ICtxIdTracker { - public: - // Reset the tracker using the provided input count - // - virtual void Reset(size_t count) = 0; - - // Restore the given ID into the tracker - // - virtual void Restore(size_t id) = 0; - - // Pick and return a Ctx ID - // - virtual size_t Get() = 0; - - // Returns true if there are Ctx IDs available to Get. - virtual bool IsAvailable() = 0; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/idle_timer.h b/src/c++/perf_analyzer/idle_timer.h deleted file mode 100644 index 419789ec9..000000000 --- a/src/c++/perf_analyzer/idle_timer.h +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once -#include -#include -#include - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestLoadManager; -#endif - - -/// Class to track idle periods of time -/// -class IdleTimer { - public: - void Start() - { - std::lock_guard lk(mtx_); - StartImpl(); - } - - void Stop() - { - std::lock_guard lk(mtx_); - StopImpl(); - } - - /// Reset the time counter, and restart the timer if it is active - /// - void Reset() - { - Restart(); - idle_ns_ = 0; - } - - /// Returns the number of nanoseconds this timer has counted as being idle - /// If the timer was already active, then it will first stop (and count the - /// pending time), and then start back up - /// - uint64_t GetIdleTime() - { - Restart(); - return idle_ns_; - } - - private: - std::mutex mtx_; - uint64_t idle_ns_{0}; - bool is_idle_{false}; - std::chrono::_V2::steady_clock::time_point start_time_; - - void Restart() - { - std::lock_guard lk(mtx_); - if (is_idle_) { - StopImpl(); - StartImpl(); - } - } - - void StartImpl() - { - if (is_idle_) { - throw std::runtime_error("Can't start a timer that is already active\n"); - } - - is_idle_ = true; - start_time_ = std::chrono::steady_clock::now(); - } - - void StopImpl() - { - if (!is_idle_) { - throw std::runtime_error("Can't stop a timer that isn't active\n"); - } - - is_idle_ = false; - auto end = std::chrono::steady_clock::now(); - auto duration = end - start_time_; - idle_ns_ += duration.count(); - } - - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestLoadManager; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/iinfer_data_manager.h b/src/c++/perf_analyzer/iinfer_data_manager.h deleted file mode 100644 index 33dd8ac8c..000000000 --- a/src/c++/perf_analyzer/iinfer_data_manager.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "data_loader.h" -#include "infer_data.h" -#include "model_parser.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -/// Interface for classes that manage infer data preparation for inference -/// -class IInferDataManager { - public: - /// Initialize this object. Must be called before any other functions - /// \return cb::Error object indicating success or failure. - virtual cb::Error Init() = 0; - - /// Populate the target InferData object with input and output objects - /// according to the model's shape - /// \param infer_data The target InferData object. - /// \return cb::Error object indicating success or failure. - virtual cb::Error InitInferData(InferData& infer_data) = 0; - - /// Updates the input and expected output data in the target infer_data for an - /// inference request - /// \param thread_id The ID of the calling thread - /// \param stream_index The data stream to use for next data - /// \param step_index The step index to use for next data - /// \param infer_data The target InferData object - /// \return cb::Error object indicating success or failure. - virtual cb::Error UpdateInferData( - size_t thread_id, int stream_index, int step_index, - InferData& infer_data) = 0; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_context.cc b/src/c++/perf_analyzer/infer_context.cc deleted file mode 100644 index aa868eba7..000000000 --- a/src/c++/perf_analyzer/infer_context.cc +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "infer_context.h" - -namespace triton { namespace perfanalyzer { - -void -InferContext::Init() -{ - thread_stat_->status_ = infer_data_manager_->InitInferData(infer_data_); - if (!thread_stat_->status_.IsOk()) { - return; - } - - if (streaming_) { - // Decoupled models should not collect client side statistics - thread_stat_->status_ = infer_backend_->StartStream( - async_callback_func_, (!parser_->IsDecoupled())); - if (!thread_stat_->status_.IsOk()) { - return; - } - } -} - -void -InferContext::SendInferRequest(bool delayed) -{ - // Update the inputs if required - if (using_json_data_) { - UpdateJsonData(); - } - SendRequest(request_id_++, delayed); -} - -void -InferContext::SendSequenceInferRequest(uint32_t seq_stat_index, bool delayed) -{ - // Need lock to protect the order of dispatch across worker threads. - // This also helps in reporting the realistic latencies. - std::lock_guard guard( - sequence_manager_->GetMutex(seq_stat_index)); - if (!early_exit && execute_) { - sequence_manager_->SetInferSequenceOptions( - seq_stat_index, infer_data_.options_); - - // Update the inputs if required - if (using_json_data_) { - UpdateSeqJsonData(seq_stat_index); - } - - sequence_manager_->DecrementRemainingQueries(seq_stat_index); - - SendRequest( - request_id_++, delayed, - sequence_manager_->GetSequenceID(seq_stat_index)); - } -} - -void -InferContext::CompleteOngoingSequence(uint32_t seq_stat_index) -{ - std::lock_guard guard( - sequence_manager_->GetMutex(seq_stat_index)); - - if (sequence_manager_->GetRemainingQueries(seq_stat_index) != 0) { - sequence_manager_->SetRemainingQueries(seq_stat_index, 1); - sequence_manager_->SetInferSequenceOptions( - seq_stat_index, infer_data_.options_); - - if (using_json_data_) { - UpdateSeqJsonData(seq_stat_index); - } - sequence_manager_->DecrementRemainingQueries(seq_stat_index); - - bool is_delayed = false; - SendRequest( - request_id_++, is_delayed, - sequence_manager_->GetSequenceID(seq_stat_index)); - } -} - -void -InferContext::SendRequest( - const uint64_t request_id, const bool delayed, const uint64_t sequence_id) -{ - if (!thread_stat_->status_.IsOk()) { - return; - } - - thread_stat_->num_sent_requests_++; - - // Parse the request inputs to save in the profile export file - RequestRecord::RequestInput request_inputs{GetInputs()}; - - if (async_) { - uint64_t unique_request_id{(thread_id_ << 48) | ((request_id << 16) >> 16)}; - infer_data_.options_->request_id_ = std::to_string(unique_request_id); - { - std::lock_guard lock(thread_stat_->mu_); - auto it = async_req_map_ - .emplace(infer_data_.options_->request_id_, RequestRecord()) - .first; - it->second.request_inputs_ = {request_inputs}; - it->second.start_time_ = std::chrono::system_clock::now(); - it->second.sequence_end_ = infer_data_.options_->sequence_end_; - it->second.delayed_ = delayed; - it->second.sequence_id_ = sequence_id; - } - - thread_stat_->idle_timer.Start(); - if (streaming_) { - thread_stat_->status_ = infer_backend_->AsyncStreamInfer( - *(infer_data_.options_), infer_data_.valid_inputs_, - infer_data_.outputs_); - } else { - thread_stat_->status_ = infer_backend_->AsyncInfer( - async_callback_func_, *(infer_data_.options_), - infer_data_.valid_inputs_, infer_data_.outputs_); - } - thread_stat_->idle_timer.Stop(); - - total_ongoing_requests_++; - } else { - std::chrono::time_point start_time_sync, - end_time_sync; - thread_stat_->idle_timer.Start(); - start_time_sync = std::chrono::system_clock::now(); - cb::InferResult* results = nullptr; - thread_stat_->status_ = infer_backend_->Infer( - &results, *(infer_data_.options_), infer_data_.valid_inputs_, - infer_data_.outputs_); - thread_stat_->idle_timer.Stop(); - RequestRecord::ResponseOutput response_outputs{}; - if (results != nullptr) { - if (thread_stat_->status_.IsOk()) { - response_outputs = GetOutputs(*results); - thread_stat_->status_ = ValidateOutputs(results); - } - delete results; - } - if (!thread_stat_->status_.IsOk()) { - return; - } - end_time_sync = std::chrono::system_clock::now(); - std::vector> - end_time_syncs{end_time_sync}; - { - // Add the request record to thread request records vector with proper - // locking - std::lock_guard lock(thread_stat_->mu_); - auto total = end_time_sync - start_time_sync; - thread_stat_->request_records_.emplace_back(RequestRecord( - start_time_sync, std::move(end_time_syncs), {request_inputs}, - {response_outputs}, infer_data_.options_->sequence_end_, delayed, - sequence_id, false)); - thread_stat_->status_ = - infer_backend_->ClientInferStat(&(thread_stat_->contexts_stat_[id_])); - if (!thread_stat_->status_.IsOk()) { - return; - } - } - } -} - -const RequestRecord::RequestInput -InferContext::GetInputs() -{ - RequestRecord::RequestInput input{}; - for (const auto& request_input : infer_data_.valid_inputs_) { - std::string data_type{request_input->Datatype()}; - const uint8_t* buf{nullptr}; - size_t byte_size{0}; - request_input->RawData(&buf, &byte_size); - - // The first 4 bytes of BYTES data is a 32-bit integer to indicate the size - // of the rest of the data (which we already know based on byte_size). It - // should be ignored here, as it isn't part of the actual request - if (data_type == "BYTES" && byte_size >= 4) { - buf += 4; - byte_size -= 4; - } - input.emplace(request_input->Name(), RecordData(buf, byte_size, data_type)); - } - return input; -} - -const RequestRecord::ResponseOutput -InferContext::GetOutputs(const cb::InferResult& infer_result) -{ - RequestRecord::ResponseOutput output{}; - for (const auto& requested_output : infer_data_.outputs_) { - std::string data_type{requested_output->Datatype()}; - const uint8_t* buf{nullptr}; - size_t byte_size{0}; - infer_result.RawData(requested_output->Name(), &buf, &byte_size); - - // The first 4 bytes of BYTES data is a 32-bit integer to indicate the size - // of the rest of the data (which we already know based on byte_size). It - // should be ignored here, as it isn't part of the actual response - if (data_type == "BYTES" && byte_size >= 4) { - buf += 4; - byte_size -= 4; - } - output.emplace( - requested_output->Name(), RecordData(buf, byte_size, data_type)); - } - return output; -} - -void -InferContext::UpdateJsonData() -{ - int step_id = (data_step_id_ * batch_size_) % data_loader_->GetTotalSteps(0); - data_step_id_ += GetNumActiveThreads(); - thread_stat_->status_ = - infer_data_manager_->UpdateInferData(thread_id_, 0, step_id, infer_data_); -} - -void -InferContext::UpdateSeqJsonData(size_t seq_stat_index) -{ - const size_t sequence_length{ - sequence_manager_->GetSequenceLength(seq_stat_index)}; - const size_t remaining_queries{ - sequence_manager_->GetRemainingQueries(seq_stat_index)}; - const uint64_t data_stream_id{ - sequence_manager_->GetDataStreamID(seq_stat_index)}; - const size_t total_steps{data_loader_->GetTotalSteps(data_stream_id)}; - int step_id = (sequence_length - remaining_queries) % total_steps; - thread_stat_->status_ = infer_data_manager_->UpdateInferData( - thread_id_, data_stream_id, step_id, infer_data_); -} - -cb::Error -InferContext::ValidateOutputs(const cb::InferResult* result_ptr) -{ - // Validate output if set - if (!infer_data_.expected_outputs_.empty()) { - for (size_t i = 0; i < infer_data_.expected_outputs_.size(); ++i) { - const uint8_t* buf = nullptr; - size_t byte_size = 0; - for (const auto& expected : infer_data_.expected_outputs_[i]) { - // Request output by validation output's name explicitly, rather than - // relying on the array indices being sorted equally in both arrays. - result_ptr->RawData(expected.name, &buf, &byte_size); - if (!expected.is_valid) { - return cb::Error( - "Expected output can't be invalid", pa::GENERIC_ERROR); - } - if (byte_size < expected.batch1_size) { - return cb::Error( - "Output size doesn't match expected size", pa::GENERIC_ERROR); - } else if (memcmp(buf, expected.data_ptr, expected.batch1_size) != 0) { - return cb::Error( - "Output doesn't match expected output", pa::GENERIC_ERROR); - } else { - buf += expected.batch1_size; - byte_size -= expected.batch1_size; - } - } - if (byte_size != 0) { - return cb::Error( - "Output size doesn't match expected size", pa::GENERIC_ERROR); - } - } - } - return cb::Error::Success; -} - -void -InferContext::AsyncCallbackFuncImpl(cb::InferResult* result) -{ - std::shared_ptr result_ptr(result); - bool is_final_response{true}; - if (thread_stat_->cb_status_.IsOk()) { - // Add the request record to thread request records vector with - // proper locking - std::lock_guard lock(thread_stat_->mu_); - thread_stat_->cb_status_ = result_ptr->RequestStatus(); - if (thread_stat_->cb_status_.IsOk()) { - std::string request_id; - thread_stat_->cb_status_ = result_ptr->Id(&request_id); - const auto& it = async_req_map_.find(request_id); - if (it != async_req_map_.end()) { - bool is_null_response{false}; - thread_stat_->cb_status_ = - result_ptr->IsNullResponse(&is_null_response); - if (thread_stat_->cb_status_.IsOk() == false) { - return; - } - it->second.response_timestamps_.push_back( - std::chrono::system_clock::now()); - it->second.response_outputs_.push_back(GetOutputs(*result)); - num_responses_++; - if (is_null_response == true) { - it->second.has_null_last_response_ = true; - } - thread_stat_->cb_status_ = - result_ptr->IsFinalResponse(&is_final_response); - if (thread_stat_->cb_status_.IsOk() == false) { - return; - } - if (is_final_response) { - has_received_final_response_ = is_final_response; - thread_stat_->request_records_.emplace_back( - it->second.start_time_, it->second.response_timestamps_, - it->second.request_inputs_, it->second.response_outputs_, - it->second.sequence_end_, it->second.delayed_, - it->second.sequence_id_, it->second.has_null_last_response_); - infer_backend_->ClientInferStat(&(thread_stat_->contexts_stat_[id_])); - thread_stat_->cb_status_ = ValidateOutputs(result); - async_req_map_.erase(request_id); - } - } - } - } - - if (worker_callback_) { - worker_callback_(id_); - } - - if (is_final_response) { - total_ongoing_requests_--; - num_responses_ = 0; - - if (async_callback_finalize_func_ != nullptr) { - async_callback_finalize_func_(id_); - } - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_context.h b/src/c++/perf_analyzer/infer_context.h deleted file mode 100644 index 7bacb16d5..000000000 --- a/src/c++/perf_analyzer/infer_context.h +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "data_loader.h" -#include "idle_timer.h" -#include "iinfer_data_manager.h" -#include "infer_data.h" -#include "perf_utils.h" -#include "request_record.h" -#include "sequence_manager.h" - -namespace triton { namespace perfanalyzer { - -// Holds the running status of the thread. -struct ThreadStat { - ThreadStat() {} - - // The status of the worker thread - cb::Error status_; - // The status of the callback thread for async requests - cb::Error cb_status_; - // TODO REFACTOR TMA-1046 -- This should be in the InferContext class - // The statistics of the InferContext - std::vector contexts_stat_; - - // Tracks the amount of time this thread spent sleeping or waiting - IdleTimer idle_timer; - - // A vector of request records - std::vector request_records_; - // A lock to protect thread data - std::mutex mu_; - // The number of sent requests by this thread. - std::atomic num_sent_requests_{0}; -}; - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockInferContext; -#endif - -/// Sends inference requests to the server -class InferContext { - public: - InferContext( - const size_t thread_id, const uint32_t id, const bool async, - const bool streaming, const bool on_sequence_model, - const bool using_json_data, const int32_t batch_size, - std::shared_ptr thread_stat, - std::shared_ptr data_loader, - std::shared_ptr parser, - std::shared_ptr factory, const bool& execute, - const std::shared_ptr& infer_data_manager, - std::shared_ptr sequence_manager) - : thread_id_(thread_id), id_(id), async_(async), streaming_(streaming), - on_sequence_model_(on_sequence_model), - using_json_data_(using_json_data), batch_size_(batch_size), - thread_stat_(thread_stat), data_loader_(data_loader), parser_(parser), - factory_(factory), data_step_id_(thread_id), execute_(execute), - infer_data_manager_(infer_data_manager), - sequence_manager_(sequence_manager) - { - thread_stat_->status_ = factory_->CreateClientBackend(&infer_backend_); - infer_data_.options_.reset(new cb::InferOptions(parser_->ModelName())); - infer_data_.options_->model_version_ = parser_->ModelVersion(); - infer_data_.options_->model_signature_name_ = parser_->ModelSignatureName(); - - thread_stat_->contexts_stat_.emplace_back(); - } - - InferContext(InferContext&&) = delete; - InferContext(const InferContext&) = delete; - - // Initialize the context. Must be done before any inferences are sent - void Init(); - - // Send a single inference request to the server - void SendInferRequest(bool delayed = false); - - // Send a single sequence inference request to the server - void SendSequenceInferRequest(uint32_t seq_index, bool delayed = false); - - // Finish the active sequence at the given seq_stat_index - void CompleteOngoingSequence(uint32_t seq_stat_index); - - // Returns the total number of async requests that have been sent by this - // object and have not returned - uint GetNumOngoingRequests() { return total_ongoing_requests_; } - - // Returns the number of responses for the current request - uint64_t GetNumResponsesForCurrentRequest() { return num_responses_; } - - // Register a function that will get called after every async request returns - void RegisterAsyncCallbackFinalize(std::function callback) - { - async_callback_finalize_func_ = callback; - } - - void RegisterWorkerCallback(std::function worker_callback) - { - worker_callback_ = worker_callback; - } - - // TODO REFACTOR TMA-1043 this should be in memory class - void SetNumActiveThreads(size_t num_threads) - { - num_active_threads_ = num_threads; - } - - bool HasReceivedFinalResponse() { return has_received_final_response_; } - - protected: - /// A helper function to issue inference request to the server. - /// \param request_id The unique id to be associated with the request. - /// \param delayed Whether the request fell behind its scheduled time. - /// \param sequence_id Sequence ID of the request. Note that the default of - /// `0` means the request is not a sequence. - virtual void SendRequest( - const uint64_t request_id, const bool delayed, - const uint64_t sequence_id = 0); - - /// Update inputs based on custom json data - void UpdateJsonData(); - - /// Update inputs based on custom json data for the given sequence - void UpdateSeqJsonData(size_t seq_stat_index); - - cb::Error ValidateOutputs(const cb::InferResult* result_ptr); - - // Callback function for handling asynchronous requests - void AsyncCallbackFuncImpl(cb::InferResult* result); - - bool async_{false}; - bool streaming_{false}; - const bool on_sequence_model_{false}; - bool using_json_data_{false}; - const int32_t batch_size_{0}; - - std::shared_ptr thread_stat_; - std::shared_ptr data_loader_; - std::shared_ptr parser_; - std::shared_ptr factory_; - std::shared_ptr infer_data_manager_; - - uint64_t request_id_ = 0; - std::map async_req_map_; - std::atomic total_ongoing_requests_{0}; - size_t data_step_id_; - - // Function pointer to the async callback function implementation - std::function async_callback_func_ = std::bind( - &InferContext::AsyncCallbackFuncImpl, this, std::placeholders::_1); - - // Function pointer to registered async callbacks - std::function async_callback_finalize_func_ = nullptr; - - private: - const RequestRecord::RequestInput GetInputs(); - - const RequestRecord::ResponseOutput GetOutputs( - const cb::InferResult& infer_result); - - const uint32_t id_{0}; - const size_t thread_id_{0}; - - size_t GetNumActiveThreads() { return num_active_threads_; } - - size_t num_active_threads_{0}; - - // The backend to communicate with the server - std::unique_ptr infer_backend_; - InferData infer_data_; - - // FIXME: update build to use C++17 instead of C++14. This is a workaround - // since C++14 doesn't have std::optional, but C++17 does. - const bool execute_placeholder_{false}; - std::reference_wrapper execute_{execute_placeholder_}; - - std::shared_ptr sequence_manager_{nullptr}; - uint64_t num_responses_{0}; - std::function worker_callback_{nullptr}; - bool has_received_final_response_{false}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockInferContext; - - public: - InferContext() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data.h b/src/c++/perf_analyzer/infer_data.h deleted file mode 100644 index abc52bb82..000000000 --- a/src/c++/perf_analyzer/infer_data.h +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "client_backend/client_backend.h" -#include "tensor_data.h" - -namespace triton { namespace perfanalyzer { - -/// Holds all the data needed to send an inference request -struct InferData { - ~InferData() - { - for (const auto input : inputs_) { - delete input; - } - for (const auto output : outputs_) { - delete output; - } - } - - // The vector of pointers to InferInput objects for all possible inputs, - // potentially including optional inputs with no provided data. - std::vector inputs_; - // The vector of pointers to InferInput objects to be - // used for inference request. - std::vector valid_inputs_; - // The vector of pointers to InferRequestedOutput objects - // to be used with the inference request. - std::vector outputs_; - // If not empty, the expected output data in the same order as 'outputs_' - // The outer vector is per-output. The inner vector is for batching of each - // output - std::vector> expected_outputs_; - // The InferOptions object holding the details of the - // inference. - std::unique_ptr options_; -}; - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data_manager.cc b/src/c++/perf_analyzer/infer_data_manager.cc deleted file mode 100644 index fe5e9fcd8..000000000 --- a/src/c++/perf_analyzer/infer_data_manager.cc +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "infer_data_manager.h" - -#include - -namespace triton { namespace perfanalyzer { - -cb::Error -InferDataManager::Init() -{ - RETURN_IF_ERROR(CreateAndPopulateInputs()); - return cb::Error::Success; -} - -cb::Error -InferDataManager::CreateAndPopulateInputs() -{ - // All combinations of thread + input + stream + step - // - for (size_t thread_id = 0; thread_id < max_threads_; thread_id++) { - for (const auto& input : *(parser_->Inputs())) { - const std::string& name = input.first; - const ModelTensor& tensor = input.second; - for (int stream_id = 0; - stream_id < (int)data_loader_->GetDataStreamsCount(); stream_id++) { - for (int step_id = 0; - step_id < (int)data_loader_->GetTotalSteps(stream_id); - step_id += 1) { - RETURN_IF_ERROR(CreateAndPopulateInput( - thread_id, name, tensor, stream_id, step_id)); - } - } - } - } - return cb::Error::Success; -} - -cb::Error -InferDataManager::CreateAndPopulateInput( - const size_t thread_id, const std::string& name, const ModelTensor& tensor, - int stream_id, int step_id) -{ - std::vector input_datas; - size_t count = 0; - - RETURN_IF_ERROR(GetInputData(name, tensor, stream_id, step_id, input_datas)); - - if (tensor.is_shape_tensor_) { - RETURN_IF_ERROR( - ValidateShapeTensor(tensor, stream_id, step_id, input_datas)); - } - - std::vector shape; - RETURN_IF_ERROR( - data_loader_->GetInputShape(tensor, stream_id, step_id, &shape)); - if (!shape.empty()) { - if ((parser_->MaxBatchSize() != 0) && (!tensor.is_shape_tensor_)) { - shape.insert(shape.begin(), (int64_t)batch_size_); - } - } - - cb::InferInput* input; - RETURN_IF_ERROR( - CreateInferInput(&input, backend_kind_, name, shape, tensor.datatype_)); - - - // Number of missing pieces of data for optional inputs - int missing_data_cnt = 0; - int total_cnt = input_datas.size(); - - for (size_t i = 0; i < total_cnt; i++) { - if (!input_datas[i].is_valid) { - missing_data_cnt++; - } else { - RETURN_IF_ERROR(input->AppendRaw( - input_datas[i].data_ptr, input_datas[i].batch1_size)); - } - } - - // If all optional inputs had data provided, this is a valid input. But if - // some inferences in the batch provided data for an optional input and - // some inferences did not, this is an invalid case and an error is - // thrown. - if (missing_data_cnt == 0) { - inputs_.insert({{thread_id, name, stream_id, step_id}, input}); - } else if (missing_data_cnt > 0 && missing_data_cnt < total_cnt) { - return cb::Error( - "For batch sizes larger than 1, the same set of inputs must be " - "specified for each batch. You cannot use different set of " - "optional inputs for each individual batch."); - } - - return cb::Error::Success; -} - -cb::InferInput* -InferDataManager::GetInput( - const size_t thread_id, const std::string& name, int stream_id, int step_id) -{ - auto input = inputs_.find({thread_id, name, stream_id, step_id}); - if (input == inputs_.end()) { - return nullptr; - } else { - return input->second; - } -} - - -cb::Error -InferDataManager::InitInferDataInput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) -{ - std::vector shape; - RETURN_IF_ERROR(data_loader_->GetInputShape(model_tensor, 0, 0, &shape)); - if (shape.empty() && (backend_kind_ == cb::BackendKind::TRITON)) { - return cb::Error("unable to set shape for the input", pa::GENERIC_ERROR); - } - - if ((parser_->MaxBatchSize() != 0) && (!model_tensor.is_shape_tensor_)) { - shape.insert(shape.begin(), (int64_t)batch_size_); - } - - cb::InferInput* infer_input; - RETURN_IF_ERROR(CreateInferInput( - &infer_input, backend_kind_, name, shape, model_tensor.datatype_)); - infer_data.inputs_.push_back(infer_input); - - - TensorData input_data; - RETURN_IF_ERROR(data_loader_->GetInputData(model_tensor, 0, 0, input_data)); - - // Add optional input to request if data was found - if (input_data.is_valid) { - infer_data.valid_inputs_.push_back(infer_input); - } - - if (!shape.empty()) { - size_t max_count = (parser_->MaxBatchSize() == 0) ? 1 : batch_size_; - for (size_t i = 0; i < max_count; ++i) { - RETURN_IF_ERROR( - infer_input->AppendRaw(input_data.data_ptr, input_data.batch1_size)); - } - } - - AddInferDataParameters(infer_data); - - return cb::Error::Success; -} - -cb::Error -InferDataManager::InitInferDataOutput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) -{ - cb::InferRequestedOutput* requested_output; - RETURN_IF_ERROR(cb::InferRequestedOutput::Create( - &requested_output, backend_kind_, name, model_tensor.datatype_)); - infer_data.outputs_.push_back(requested_output); - - return cb::Error::Success; -} - -cb::Error -InferDataManager::UpdateInputs( - const size_t thread_id, const int stream_index, const int step_index, - InferData& infer_data) -{ - // Reset inputs for this inference request - infer_data.valid_inputs_.clear(); - - for (const auto& input : infer_data.inputs_) { - const auto& name = input->Name(); - - cb::InferInput* tmp_input = - GetInput(thread_id, name, stream_index, step_index); - if (tmp_input != nullptr) { - infer_data.valid_inputs_.push_back(tmp_input); - } - } - return cb::Error::Success; -} - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data_manager.h b/src/c++/perf_analyzer/infer_data_manager.h deleted file mode 100644 index ccde8d2f8..000000000 --- a/src/c++/perf_analyzer/infer_data_manager.h +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "data_loader.h" -#include "infer_data.h" -#include "infer_data_manager_base.h" -#include "model_parser.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -/// Manages infer data to prepare an inference request and the resulting -/// inference output from triton server -class InferDataManager : public InferDataManagerBase { - public: - InferDataManager( - const size_t max_threads, const int32_t batch_size, - const std::unordered_map& - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - : max_threads_(max_threads), - InferDataManagerBase( - batch_size, request_parameters, parser, factory, data_loader) - { - } - - /// Initialize this object. Must be called before any other functions - /// \return cb::Error object indicating success or failure. - cb::Error Init() override; - - protected: - const size_t max_threads_{1}; - std::map, cb::InferInput*> inputs_; - - cb::Error CreateAndPopulateInputs(); - cb::Error CreateAndPopulateInput( - const size_t thread_id, const std::string& name, - const ModelTensor& model_tensor, int stream_id, int step_id); - - cb::InferInput* GetInput( - const size_t thread_id, const std::string& name, int stream_id, - int step_id); - - cb::Error InitInferDataInput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) override; - - cb::Error InitInferDataOutput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) override; - - /// Helper function to update the inputs - /// \param thread_id The ID of the calling thread - /// \param stream_index The data stream to use for next data - /// \param step_index The step index to use for next data - /// \param infer_data The target InferData object - /// \return cb::Error object indicating success or failure. - cb::Error UpdateInputs( - const size_t thread_id, const int stream_index, const int step_index, - InferData& infer_data); - -#ifndef DOCTEST_CONFIG_DISABLE - public: - InferDataManager() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data_manager_base.cc b/src/c++/perf_analyzer/infer_data_manager_base.cc deleted file mode 100644 index 9a06f86b0..000000000 --- a/src/c++/perf_analyzer/infer_data_manager_base.cc +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "infer_data_manager_base.h" - -#include - -namespace triton { namespace perfanalyzer { - -cb::Error -InferDataManagerBase::GetInputData( - const std::string& name, const ModelTensor& tensor, int stream_id, - int step_id, std::vector& input_datas) -{ - size_t max_count = tensor.is_shape_tensor_ ? 1 : batch_size_; - std::vector shape; - std::vector prev_shape; - - for (size_t count = 0; count < max_count; count++) { - int local_step_id = - (step_id + count) % data_loader_->GetTotalSteps(stream_id); - - TensorData input_data; - - RETURN_IF_ERROR( - data_loader_->GetInputShape(tensor, stream_id, local_step_id, &shape)); - if (!shape.empty()) { - if (count == 0) { - prev_shape = shape; - } else { - if (!std::equal(shape.begin(), shape.end(), prev_shape.begin())) { - return cb::Error( - "can not batch tensors with different shapes together " - "(input '" + - name + "' expected shape " + ShapeVecToString(prev_shape) + - " and received " + ShapeVecToString(shape), - pa::GENERIC_ERROR); - } - } - } - - RETURN_IF_ERROR(data_loader_->GetInputData( - tensor, stream_id, local_step_id, input_data)); - - input_datas.push_back(input_data); - } - - return cb::Error::Success; -} - -cb::Error -InferDataManagerBase::ValidateShapeTensor( - const ModelTensor& tensor, int stream_id, int step_id, - const std::vector& input_datas) -{ - // Validate that steps 1 through N are exactly the same as step 0, since step - // 0 is the only one we send for shape tensors - for (size_t count = 1; count < batch_size_; count++) { - int local_step_id = - (step_id + count) % data_loader_->GetTotalSteps(stream_id); - - TensorData input_data; - RETURN_IF_ERROR(data_loader_->GetInputData( - tensor, stream_id, local_step_id, input_data)); - - if (input_data.batch1_size != input_datas.back().batch1_size) { - return cb::Error( - "The shape tensors should be identical in a batch (mismatch " - "in size)", - pa::GENERIC_ERROR); - } - - for (size_t data_idx = 0; data_idx < input_data.batch1_size; data_idx++) { - if (*(input_data.data_ptr + data_idx) != - *(input_datas.back().data_ptr + data_idx)) { - return cb::Error( - "The shape tensors should be identical in a batch " - "(mismatch in content)", - pa::GENERIC_ERROR); - } - } - } - return cb::Error::Success; -} - -cb::Error -InferDataManagerBase::InitInferData(InferData& infer_data) -{ - // Initialize inputs - for (const auto& input : *(parser_->Inputs())) { - RETURN_IF_ERROR(InitInferDataInput(input.first, input.second, infer_data)); - } - - for (const auto& output : *(parser_->Outputs())) { - RETURN_IF_ERROR( - InitInferDataOutput(output.first, output.second, infer_data)); - } - - return cb::Error::Success; -} - -cb::Error -InferDataManagerBase::UpdateInferData( - size_t thread_id, int stream_index, int step_index, InferData& infer_data) -{ - RETURN_IF_ERROR(data_loader_->ValidateIndexes(stream_index, step_index)); - RETURN_IF_ERROR( - UpdateInputs(thread_id, stream_index, step_index, infer_data)); - RETURN_IF_ERROR( - UpdateValidationOutputs(stream_index, step_index, infer_data)); - return cb::Error::Success; -} - -cb::Error -InferDataManagerBase::UpdateValidationOutputs( - int stream_index, int step_index, InferData& infer_data) -{ - RETURN_IF_ERROR(data_loader_->ValidateIndexes(stream_index, step_index)); - - infer_data.expected_outputs_.clear(); - - for (const auto& output : infer_data.outputs_) { - const auto& model_output = (*(parser_->Outputs()))[output->Name()]; - - TensorData output_data; - const int* set_shape_values = nullptr; - int set_shape_value_cnt = 0; - - std::vector outputs; - for (size_t i = 0; i < batch_size_; ++i) { - RETURN_IF_ERROR(data_loader_->GetOutputData( - output->Name(), stream_index, - (step_index + i) % data_loader_->GetTotalSteps(0), output_data)); - if (!output_data.is_valid) { - break; - } - - outputs.emplace_back(output_data); - // Shape tensor only need the first batch element - if (model_output.is_shape_tensor_) { - break; - } - } - if (!outputs.empty()) { - infer_data.expected_outputs_.emplace_back(std::move(outputs)); - } - } - return cb::Error::Success; -} - -cb::Error -InferDataManagerBase::CreateInferInput( - cb::InferInput** infer_input, const cb::BackendKind kind, - const std::string& name, const std::vector& dims, - const std::string& datatype) -{ - return cb::InferInput::Create(infer_input, kind, name, dims, datatype); -} - -void -InferDataManagerBase::AddInferDataParameters(InferData& infer_data) -{ - infer_data.options_->request_parameters_ = request_parameters_; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data_manager_base.h b/src/c++/perf_analyzer/infer_data_manager_base.h deleted file mode 100644 index d92499067..000000000 --- a/src/c++/perf_analyzer/infer_data_manager_base.h +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "data_loader.h" -#include "iinfer_data_manager.h" -#include "infer_data.h" -#include "model_parser.h" -#include "perf_utils.h" -#include "tensor_data.h" - -namespace triton { namespace perfanalyzer { - -/// Base class for Infer Data managers -/// -class InferDataManagerBase : public IInferDataManager { - public: - InferDataManagerBase( - const int32_t batch_size, - const std::unordered_map& - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - : batch_size_(batch_size), request_parameters_(request_parameters), - parser_(parser), factory_(factory), data_loader_(data_loader), - backend_kind_(factory->Kind()) - { - } - - /// Populate the target InferData object with input and output objects - /// according to the model's shape - /// \param infer_data The target InferData object. - /// \return cb::Error object indicating success or failure. - cb::Error InitInferData(InferData& infer_data) override; - - /// Updates the input data to use for inference request - /// \param thread_id The ID of the calling thread - /// \param stream_index The data stream to use for next data - /// \param step_index The step index to use for next data - /// \param infer_data The target InferData object - /// \return cb::Error object indicating success or failure. - cb::Error UpdateInferData( - size_t thread_id, int stream_index, int step_index, - InferData& infer_data) override; - - protected: - size_t batch_size_; - std::shared_ptr parser_; - std::shared_ptr factory_; - std::shared_ptr data_loader_; - std::unique_ptr backend_; - cb::BackendKind backend_kind_; - std::unordered_map request_parameters_; - - /// Gets the input data for the specified input for the specified batch size - /// - /// \param name The name of the input to get data for - /// \param tensor The ModelTensor of the input to get data for - /// \param stream_id The ID of the stream to get data for - /// \param step_id The ID of the step within the stream - /// \param input_datas The returned vector of TensorDatas - /// \return cb::Error object indicating success or failure. - cb::Error GetInputData( - const std::string& name, const ModelTensor& tensor, int stream_id, - int step_id, std::vector& input_datas); - - /// For the case of an input with is_shape_tensor true, validate that - /// it follows all rules, and throw an error if it does not - /// \param tensor The ModelTensor of the input to validate - /// \param stream_id The ID of the stream to validate - /// \param step_id The ID of the step within the stream - /// \param input_datas vector of TensorDatas to validate - /// \return cb::Error object indicating success or failure. - cb::Error ValidateShapeTensor( - const ModelTensor& tensor, int stream_id, int step_id, - const std::vector& input_datas); - - /// Helper function to update the inputs - /// \param thread_id The ID of the calling thread - /// \param stream_index The data stream to use for next data - /// \param step_index The step index to use for next data - /// \param infer_data The target InferData object - /// \return cb::Error object indicating success or failure. - virtual cb::Error UpdateInputs( - const size_t thread_id, const int stream_index, const int step_index, - InferData& infer_data) = 0; - - /// Updates the expected output data to use for inference request. Empty - /// vector will be returned if there is no expected output associated to the - /// step. - /// \param stream_index The data stream to use for next data - /// \param step_index The step index to use for next data - /// \param infer_data The target InferData object - /// \return cb::Error object indicating success or failure. - cb::Error UpdateValidationOutputs( - int stream_index, int step_index, InferData& infer_data); - - /// Creates inference input object - /// \param infer_input Output parameter storing newly created inference input - /// \param kind Backend kind - /// \param name Name of inference input - /// \param dims Shape of inference input - /// \param datatype Data type of inference input - /// \return cb::Error object indicating success or failure. - virtual cb::Error CreateInferInput( - cb::InferInput** infer_input, const cb::BackendKind kind, - const std::string& name, const std::vector& dims, - const std::string& datatype); - - virtual cb::Error InitInferDataInput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) = 0; - - virtual cb::Error InitInferDataOutput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) = 0; - - void AddInferDataParameters(InferData& infer_data); - -#ifndef DOCTEST_CONFIG_DISABLE - public: - InferDataManagerBase() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data_manager_factory.h b/src/c++/perf_analyzer/infer_data_manager_factory.h deleted file mode 100644 index 6bf24bef8..000000000 --- a/src/c++/perf_analyzer/infer_data_manager_factory.h +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "data_loader.h" -#include "iinfer_data_manager.h" -#include "infer_data_manager.h" -#include "infer_data_manager_shm.h" -#include "model_parser.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -class InferDataManagerFactory { - public: - static std::shared_ptr CreateInferDataManager( - const size_t max_threads, const int32_t batch_size, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const std::unordered_map& - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - { - if (shared_memory_type == SharedMemoryType::NO_SHARED_MEMORY) { - return CreateInferDataManagerNoShm( - max_threads, batch_size, request_parameters, parser, factory, - data_loader); - } else { - return CreateInferDataManagerShm( - batch_size, shared_memory_type, output_shm_size, request_parameters, - parser, factory, data_loader); - } - } - - private: - static std::shared_ptr CreateInferDataManagerNoShm( - const size_t max_threads, const int32_t batch_size, - const std::unordered_map& - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - { - return std::make_shared( - max_threads, batch_size, request_parameters, parser, factory, - data_loader); - } - - static std::shared_ptr CreateInferDataManagerShm( - const int32_t batch_size, const SharedMemoryType shared_memory_type, - const size_t output_shm_size, - const std::unordered_map& - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - { - return std::make_shared( - batch_size, shared_memory_type, output_shm_size, request_parameters, - parser, factory, data_loader); - } -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data_manager_shm.cc b/src/c++/perf_analyzer/infer_data_manager_shm.cc deleted file mode 100644 index 8df7041eb..000000000 --- a/src/c++/perf_analyzer/infer_data_manager_shm.cc +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "infer_data_manager_shm.h" - -#include - -namespace triton { namespace perfanalyzer { - -InferDataManagerShm::~InferDataManagerShm() -{ - cb::Error err; - if (backend_.get() != nullptr) { - err = backend_->UnregisterAllSharedMemory(); - if (!err.IsOk()) { - std::cerr << "Unable to unregister all shared memory regions" - << std::endl; - } - if (shared_memory_type_ == SharedMemoryType::SYSTEM_SHARED_MEMORY) { - for (auto& region : shared_memory_regions_) { - if (factory_->Kind() != - triton::perfanalyzer::clientbackend::BackendKind::TRITON_C_API) { - err = backend_->UnmapSharedMemory( - shared_memory_regions_[region.first].data_.get(), - shared_memory_regions_[region.first].byte_size_); - if (!err.IsOk()) { - std::cerr << "Unable to unmap shared memory with key (" - << region.first << "): Starting: " - << static_cast( - shared_memory_regions_[region.first].data_.get()) - << ", size: " - << shared_memory_regions_[region.first].byte_size_ - << std::endl; - } - err = backend_->UnlinkSharedMemoryRegion(region.first); - if (!err.IsOk()) { - std::cerr << "Unable to unlink shared memory with key: " - << region.first << std::endl; - } - } - } - } - } -} - - -cb::Error -InferDataManagerShm::Init() -{ - // TMA-1062 remove the factory from this class and use only the backend - RETURN_IF_ERROR(factory_->CreateClientBackend(&backend_)); - // Calling this function for the clean start - backend_->UnregisterAllSharedMemory(); - - RETURN_IF_ERROR(CreateOutputMemoryRegions()); - RETURN_IF_ERROR(CreateAndPopulateInputMemoryRegions()); - - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::CreateOutputMemoryRegions() -{ - // Allocate the shared memory for outputs - for (const auto& output : *(parser_->Outputs())) { - const std::string& name = output.first; - const ModelTensor& tensor = output.second; - int64_t batch1_bytesize = ByteSize(tensor.shape_, tensor.datatype_); - if (batch1_bytesize < 0) { - batch1_bytesize = output_shm_size_; - } - uint8_t* output_shm_ptr; - size_t alloc_size = batch1_bytesize * batch_size_; - std::string region_name(TensorToRegionName(name)); - RETURN_IF_ERROR(CreateMemoryRegion( - region_name, shared_memory_type_, alloc_size, - reinterpret_cast(&output_shm_ptr))); - } - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::CreateAndPopulateInputMemoryRegions() -{ - // All combinations of input + stream + step - // - for (const auto& input : *(parser_->Inputs())) { - const std::string& name = input.first; - const ModelTensor& tensor = input.second; - for (int stream_id = 0; - stream_id < (int)data_loader_->GetDataStreamsCount(); stream_id++) { - for (int step_id = 0; - step_id < (int)data_loader_->GetTotalSteps(stream_id); - step_id += 1) { - RETURN_IF_ERROR(CreateAndPopulateInputMemoryRegion( - name, tensor, stream_id, step_id)); - } - } - } - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::CreateAndPopulateInputMemoryRegion( - const std::string& name, const ModelTensor& tensor, int stream_id, - int step_id) -{ - std::vector input_datas; - size_t count = 0; - - RETURN_IF_ERROR(GetInputData(name, tensor, stream_id, step_id, input_datas)); - - if (tensor.is_shape_tensor_) { - RETURN_IF_ERROR( - ValidateShapeTensor(tensor, stream_id, step_id, input_datas)); - } - - size_t alloc_size = 0; - for (size_t i = 0; i < input_datas.size(); i++) { - if (!input_datas[i].is_valid) { - return cb::Error( - "Shared memory support in Perf Analyzer does not support " - "optional inputs at this time"); - } - alloc_size += input_datas[i].batch1_size; - } - - // Generate the shared memory region name - std::string region_name( - TensorToRegionName(name) + "_" + std::to_string(stream_id) + "_" + - std::to_string(step_id)); - uint8_t* input_shm_ptr; - RETURN_IF_ERROR(CreateMemoryRegion( - region_name, shared_memory_type_, alloc_size, - reinterpret_cast(&input_shm_ptr))); - RETURN_IF_ERROR(CopySharedMemory( - input_shm_ptr, input_datas, tensor.is_shape_tensor_, region_name)); - - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::CreateMemoryRegion( - const std::string& shm_region_name, const SharedMemoryType& memory_type, - const size_t byte_size, void** ptr) -{ - if (memory_type == SharedMemoryType::SYSTEM_SHARED_MEMORY) { - if (factory_->Kind() == - triton::perfanalyzer::clientbackend::BackendKind::TRITON_C_API) { - *ptr = new uint8_t[byte_size]; - RETURN_IF_ERROR( - backend_->RegisterSystemMemory(shm_region_name, *ptr, byte_size)); - - // Set free as the destructor. - shared_memory_regions_.emplace( - std::piecewise_construct, std::forward_as_tuple(shm_region_name), - std::forward_as_tuple(SharedMemoryData( - byte_size, - std::unique_ptr>( - reinterpret_cast(*ptr), - [](uint8_t* memory) { free(memory); })))); - } else { - std::string shm_key("/" + shm_region_name); - int shm_fd_op; - RETURN_IF_ERROR( - backend_->CreateSharedMemoryRegion(shm_key, byte_size, &shm_fd_op)); - RETURN_IF_ERROR(backend_->MapSharedMemory(shm_fd_op, 0, byte_size, ptr)); - - RETURN_IF_ERROR(backend_->RegisterSystemSharedMemory( - shm_region_name, shm_key, byte_size)); - - // No-op destruction - shared_memory_regions_.emplace( - std::piecewise_construct, std::forward_as_tuple(shm_region_name), - std::forward_as_tuple(SharedMemoryData( - byte_size, - std::unique_ptr>( - reinterpret_cast(*ptr), [](uint8_t* memory) {})))); - } - } else if (memory_type == SharedMemoryType::CUDA_SHARED_MEMORY) { -#ifdef TRITON_ENABLE_GPU - cudaError_t cuda_err = cudaMalloc((void**)ptr, byte_size); - if (cuda_err != cudaSuccess) { - return cb::Error( - "unable to allocate memory of " + std::to_string(byte_size) + - " bytes on gpu for output: " + - std::string(cudaGetErrorString(cuda_err)), - pa::GENERIC_ERROR); - } - - if (factory_->Kind() == - triton::perfanalyzer::clientbackend::BackendKind::TRITON_C_API) { - RETURN_IF_ERROR( - backend_->RegisterCudaMemory(shm_region_name, *ptr, byte_size)); - - // Set cudaFree as the destructor - shared_memory_regions_.emplace( - std::piecewise_construct, std::forward_as_tuple(shm_region_name), - std::forward_as_tuple(SharedMemoryData( - byte_size, - std::unique_ptr>( - reinterpret_cast(*ptr), - [shm_region_name, byte_size](uint8_t* memory) { - cudaError_t cuda_err = cudaFree(memory); - if (cuda_err != cudaSuccess) { - std::cerr - << "Unable to free cuda shared memory for " - << shm_region_name - << ": Starting: " << static_cast(memory) - << ", size: " << byte_size - << " bytes, Details: " << cudaGetErrorString(cuda_err) - << std::endl; - } - })))); - } else { - cudaIpcMemHandle_t cuda_handle; - RETURN_IF_ERROR( - CreateCUDAIPCHandle(&cuda_handle, reinterpret_cast(*ptr))); - RETURN_IF_ERROR(backend_->RegisterCudaSharedMemory( - shm_region_name, cuda_handle, byte_size)); - - // No operation required for deleting the memory - shared_memory_regions_.emplace( - std::piecewise_construct, std::forward_as_tuple(shm_region_name), - std::forward_as_tuple(SharedMemoryData( - byte_size, - std::unique_ptr>( - reinterpret_cast(*ptr), [](uint8_t* memory) {})))); - } -#endif // TRITON_ENABLE_GPU - } else { - return cb::Error( - "CreateMemoryRegion called with invalid memory region type.", - pa::GENERIC_ERROR); - } - - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::CopySharedMemory( - uint8_t* input_shm_ptr, const std::vector& tensor_datas, - bool is_shape_tensor, std::string& region_name) -{ - if (shared_memory_type_ == SharedMemoryType::SYSTEM_SHARED_MEMORY) { - // Populate the region with data - size_t count = 0; - size_t offset = 0; - size_t max_count = is_shape_tensor ? 1 : batch_size_; - while (count < max_count) { - memcpy( - input_shm_ptr + offset, tensor_datas[count].data_ptr, - tensor_datas[count].batch1_size); - offset += tensor_datas[count].batch1_size; - count++; - } - } else { -#ifdef TRITON_ENABLE_GPU - // Populate the region with data - size_t count = 0; - size_t offset = 0; - size_t max_count = is_shape_tensor ? 1 : batch_size_; - while (count < max_count) { - cudaError_t cuda_err = cudaMemcpy( - (void*)(input_shm_ptr + offset), (void*)tensor_datas[count].data_ptr, - tensor_datas[count].batch1_size, cudaMemcpyHostToDevice); - if (cuda_err != cudaSuccess) { - return cb::Error( - "Failed to copy data to cuda shared memory for " + region_name + - " : " + std::string(cudaGetErrorString(cuda_err)), - pa::GENERIC_ERROR); - } - offset += tensor_datas[count].batch1_size; - count++; - } -#endif // TRITON_ENABLE_GPU - } - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::InitInferDataInput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) -{ - std::vector shape; - RETURN_IF_ERROR(data_loader_->GetInputShape(model_tensor, 0, 0, &shape)); - if (!shape.empty()) { - if ((parser_->MaxBatchSize() != 0) && (!model_tensor.is_shape_tensor_)) { - shape.insert(shape.begin(), (int64_t)batch_size_); - } - } else { - return cb::Error("unable to set shape for the input", pa::GENERIC_ERROR); - } - - cb::InferInput* infer_input; - RETURN_IF_ERROR(CreateInferInput( - &infer_input, backend_kind_, name, shape, model_tensor.datatype_)); - infer_data.inputs_.push_back(infer_input); - - // FIXME: TMA-765 - Shared memory mode does not support optional inputs, - // currently, and will be implemented in the associated story. - infer_data.valid_inputs_.push_back(infer_input); - - std::string region_name( - TensorToRegionName(name) + "_" + std::to_string(0) + "_" + - std::to_string(0)); - RETURN_IF_ERROR(infer_input->SetSharedMemory( - region_name, shared_memory_regions_[region_name].byte_size_)); - - AddInferDataParameters(infer_data); - - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::InitInferDataOutput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) -{ - cb::InferRequestedOutput* requested_output; - RETURN_IF_ERROR(cb::InferRequestedOutput::Create( - &requested_output, backend_kind_, name, model_tensor.datatype_)); - infer_data.outputs_.push_back(requested_output); - - std::string region_name(TensorToRegionName(name)); - RETURN_IF_ERROR(requested_output->SetSharedMemory( - region_name, shared_memory_regions_[region_name].byte_size_)); - - return cb::Error::Success; -} - -cb::Error -InferDataManagerShm::UpdateInputs( - const size_t thread_id, const int stream_index, const int step_index, - InferData& infer_data) -{ - for (const auto& input : infer_data.inputs_) { - RETURN_IF_ERROR(input->Reset()); - const auto& model_input = (*(parser_->Inputs()))[input->Name()]; - - std::string region_name( - TensorToRegionName(input->Name()) + '_' + std::to_string(stream_index) + - "_" + std::to_string(step_index)); - - std::vector shape; - RETURN_IF_ERROR(data_loader_->GetInputShape( - model_input, stream_index, step_index, &shape)); - if (!shape.empty()) { - if ((parser_->MaxBatchSize() != 0) && (!model_input.is_shape_tensor_)) { - shape.insert(shape.begin(), (int64_t)batch_size_); - } - input->SetShape(shape); - } - RETURN_IF_ERROR(input->SetSharedMemory( - region_name, shared_memory_regions_[region_name].byte_size_)); - } - return cb::Error::Success; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/infer_data_manager_shm.h b/src/c++/perf_analyzer/infer_data_manager_shm.h deleted file mode 100644 index 6a5ac9db6..000000000 --- a/src/c++/perf_analyzer/infer_data_manager_shm.h +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "data_loader.h" -#include "infer_data.h" -#include "infer_data_manager_base.h" -#include "model_parser.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -namespace { - -#ifdef TRITON_ENABLE_GPU - -#include - -#define RETURN_IF_CUDA_ERR(FUNC) \ - { \ - const cudaError_t result = FUNC; \ - if (result != cudaSuccess) { \ - return cb::Error( \ - "CUDA exception (line " + std::to_string(__LINE__) + \ - "): " + cudaGetErrorName(result) + " (" + \ - cudaGetErrorString(result) + ")", \ - pa::GENERIC_ERROR); \ - } \ - } - -cb::Error -CreateCUDAIPCHandle( - cudaIpcMemHandle_t* cuda_handle, void* input_d_ptr, int device_id = 0) -{ - // Set the GPU device to the desired GPU - RETURN_IF_CUDA_ERR(cudaSetDevice(device_id)); - - // Create IPC handle for data on the gpu - RETURN_IF_CUDA_ERR(cudaIpcGetMemHandle(cuda_handle, input_d_ptr)); - - return cb::Error::Success; -} - -#endif // TRITON_ENABLE_GPU - -} // namespace - -/// Holds information about the shared memory locations -struct SharedMemoryData { - SharedMemoryData( - size_t byte_size, - std::unique_ptr> data) - : byte_size_(byte_size), data_(std::move(data)) - { - } - - SharedMemoryData() {} - - // Byte size - size_t byte_size_; - - // Unique pointer holding the shared memory data - std::unique_ptr> data_; -}; - -/// Manages infer data to prepare an inference request and the resulting -/// inference output from triton server -class InferDataManagerShm : public InferDataManagerBase { - public: - InferDataManagerShm( - const int32_t batch_size, const SharedMemoryType shared_memory_type, - const size_t output_shm_size, - const std::unordered_map& - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - : shared_memory_type_(shared_memory_type), - output_shm_size_(output_shm_size), - InferDataManagerBase( - batch_size, request_parameters, parser, factory, data_loader) - { - } - - ~InferDataManagerShm(); - - /// Initialize this object. Must be called before any other functions - /// \return cb::Error object indicating success or failure. - cb::Error Init() override; - - protected: - cb::Error CreateOutputMemoryRegions(); - cb::Error CreateAndPopulateInputMemoryRegions(); - cb::Error CreateAndPopulateInputMemoryRegion( - const std::string& name, const ModelTensor& tensor, int stream_id, - int step_id); - - /// Create a memory region. - /// \return cb::Error object indicating success or failure. - cb::Error CreateMemoryRegion( - const std::string& shm_region_name, const SharedMemoryType& memory_type, - const size_t byte_size, void** ptr); - - /// \brief Helper function to handle copying shared memory to the correct - /// memory region - /// \param input_shm_ptr Pointer to the shared memory for a specific input - /// \param input_datas The TensorDatas to be copied - /// \param is_shape_tensor Is the input a shape tensor - /// \param region_name Name of the shared memory region - /// \return cb::Error object indicating success or failure - virtual cb::Error CopySharedMemory( - uint8_t* input_shm_ptr, const std::vector& input_datas, - bool is_shape_tensor, std::string& region_name); - - cb::Error InitInferDataInput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) override; - - cb::Error InitInferDataOutput( - const std::string& name, const ModelTensor& model_tensor, - InferData& infer_data) override; - - /// Helper function to update the inputs - /// \param thread_id The ID of the calling thread - /// \param stream_index The data stream to use for next data - /// \param step_index The step index to use for next data - /// \param infer_data The target InferData object - /// \return cb::Error object indicating success or failure. - virtual cb::Error UpdateInputs( - size_t thread_id, const int stream_index, const int step_index, - InferData& infer_data) override; - - SharedMemoryType shared_memory_type_; - size_t output_shm_size_; - // Map from shared memory key to its starting address and size - std::unordered_map shared_memory_regions_; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/inference_profiler.cc b/src/c++/perf_analyzer/inference_profiler.cc deleted file mode 100644 index a36f51c10..000000000 --- a/src/c++/perf_analyzer/inference_profiler.cc +++ /dev/null @@ -1,1867 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "inference_profiler.h" - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "doctest.h" - -namespace triton { namespace perfanalyzer { -cb::Error -ReportPrometheusMetrics(const Metrics& metrics) -{ - const size_t max_num_gpus_in_stdout{16}; - if (metrics.gpu_utilization_per_gpu.size() > max_num_gpus_in_stdout || - metrics.gpu_power_usage_per_gpu.size() > max_num_gpus_in_stdout || - metrics.gpu_memory_used_bytes_per_gpu.size() > max_num_gpus_in_stdout || - metrics.gpu_memory_total_bytes_per_gpu.size() > max_num_gpus_in_stdout) { - std::cout << "Too many GPUs on system to print out individual Prometheus " - "metrics, use the CSV output feature to see metrics." - << std::endl; - return cb::Error::Success; - } - - std::cout << " Avg GPU Utilization:" << std::endl; - for (const auto& gpu_uuid_metric_pair : metrics.gpu_utilization_per_gpu) { - const auto gpu_uuid{gpu_uuid_metric_pair.first}; - const auto metric{gpu_uuid_metric_pair.second}; - std::cout << " " << gpu_uuid << " : " << (metric * 100.0) << "%" - << std::endl; - } - - std::cout << " Avg GPU Power Usage:" << std::endl; - for (const auto& gpu_uuid_metric_pair : metrics.gpu_power_usage_per_gpu) { - const auto gpu_uuid{gpu_uuid_metric_pair.first}; - const auto metric{gpu_uuid_metric_pair.second}; - std::cout << " " << gpu_uuid << " : " << metric << " watts" - << std::endl; - } - - std::cout << " Max GPU Memory Usage:" << std::endl; - for (const auto& gpu_uuid_metric_pair : - metrics.gpu_memory_used_bytes_per_gpu) { - const auto gpu_uuid{gpu_uuid_metric_pair.first}; - const auto metric{gpu_uuid_metric_pair.second}; - std::cout << " " << gpu_uuid << " : " << metric << " bytes" - << std::endl; - } - - std::cout << " Total GPU Memory:" << std::endl; - for (const auto& gpu_uuid_metric_pair : - metrics.gpu_memory_total_bytes_per_gpu) { - const auto gpu_uuid{gpu_uuid_metric_pair.first}; - const auto metric{gpu_uuid_metric_pair.second}; - std::cout << " " << gpu_uuid << " : " << metric << " bytes" - << std::endl; - } - - return cb::Error::Success; -} - -namespace { - -inline uint64_t -AverageDurationInUs(const uint64_t total_time_in_ns, const uint64_t cnt) -{ - if (cnt == 0) { - return 0; - } - return total_time_in_ns / (cnt * 1000); -} - -EnsembleDurations -GetTotalEnsembleDurations(const ServerSideStats& stats) -{ - EnsembleDurations result; - // Calculate avg cache hit latency and cache miss latency for ensemble model - // in case top level response caching is enabled. - const uint64_t ensemble_cache_hit_cnt = stats.cache_hit_count; - const uint64_t ensemble_cache_miss_cnt = stats.cache_miss_count; - result.total_cache_hit_time_avg_us += - AverageDurationInUs(stats.cache_hit_time_ns, ensemble_cache_hit_cnt); - result.total_cache_miss_time_avg_us += - AverageDurationInUs(stats.cache_miss_time_ns, ensemble_cache_miss_cnt); - for (const auto& model_stats : stats.composing_models_stat) { - if (model_stats.second.composing_models_stat.empty()) { - // Cache hit count covers cache hits, not related to compute times - const uint64_t cache_hit_cnt = model_stats.second.cache_hit_count; - // cache_miss_cnt should either equal infer_cnt or be zero if - // cache is disabled or not supported for the model/scheduler type - const uint64_t cache_miss_cnt = model_stats.second.cache_miss_count; - - result.total_queue_time_avg_us += AverageDurationInUs( - model_stats.second.queue_time_ns, model_stats.second.queue_count); - const uint64_t compute_time = model_stats.second.compute_input_time_ns + - model_stats.second.compute_infer_time_ns + - model_stats.second.compute_output_time_ns; - if (model_stats.second.compute_input_count != - model_stats.second.compute_infer_count || - model_stats.second.compute_infer_count != - model_stats.second.compute_output_count) { - throw std::runtime_error( - "Server side statistics compute counts must be the same."); - } - const uint64_t compute_cnt = model_stats.second.compute_input_count; - result.total_compute_time_avg_us += - AverageDurationInUs(compute_time, compute_cnt); - result.total_cache_hit_time_avg_us += AverageDurationInUs( - model_stats.second.cache_hit_time_ns, cache_hit_cnt); - result.total_cache_miss_time_avg_us += AverageDurationInUs( - model_stats.second.cache_miss_time_ns, cache_miss_cnt); - // Track combined cache/compute total avg for reporting latency with cache - // enabled - result.total_combined_cache_compute_time_avg_us += AverageDurationInUs( - compute_time + model_stats.second.cache_hit_time_ns + - model_stats.second.cache_miss_time_ns, - compute_cnt + cache_hit_cnt); - } else { - const auto this_ensemble_duration = - GetTotalEnsembleDurations(model_stats.second); - result.total_queue_time_avg_us += - this_ensemble_duration.total_queue_time_avg_us; - result.total_compute_time_avg_us += - this_ensemble_duration.total_compute_time_avg_us; - result.total_cache_hit_time_avg_us += - this_ensemble_duration.total_cache_hit_time_avg_us; - result.total_cache_miss_time_avg_us += - this_ensemble_duration.total_cache_miss_time_avg_us; - result.total_combined_cache_compute_time_avg_us += - this_ensemble_duration.total_combined_cache_compute_time_avg_us; - } - } - return result; -} - - -size_t -GetOverheadDuration(size_t total_time, size_t queue_time, size_t compute_time) -{ - return (total_time > queue_time + compute_time) - ? (total_time - queue_time - compute_time) - : 0; -} - -cb::Error -ReportServerSideStats( - const ServerSideStats& stats, const int iteration, - const std::shared_ptr& parser) -{ - const std::string ident = std::string(2 * iteration, ' '); - - // Infer/exec counts cover compute time done in inference backends, - // not related to cache hit times - const uint64_t exec_cnt = stats.execution_count; - const uint64_t infer_cnt = stats.inference_count; - // Cache hit count covers cache hits, not related to compute times - const uint64_t cache_hit_cnt = stats.cache_hit_count; - const uint64_t cache_miss_cnt = stats.cache_miss_count; - - // Success count covers all successful requests, cumulative time, queue - // time, compute, and cache - const uint64_t cnt = stats.success_count; - if (cnt == 0) { - std::cout << ident << " Request count: " << cnt << std::endl; - return cb::Error::Success; - } - - const uint64_t cumm_avg_us = AverageDurationInUs(stats.cumm_time_ns, cnt); - - std::cout << ident << " Inference count: " << infer_cnt << std::endl - << ident << " Execution count: " << exec_cnt << std::endl; - if (parser->ResponseCacheEnabled()) { - std::cout << ident << " Cache hit count: " << cache_hit_cnt << std::endl; - std::cout << ident << " Cache miss count: " << cache_miss_cnt << std::endl; - } - std::cout << ident << " Successful request count: " << cnt << std::endl - << ident << " Avg request latency: " << cumm_avg_us << " usec"; - - // Non-ensemble model - if (stats.composing_models_stat.empty()) { - const uint64_t queue_avg_us = - AverageDurationInUs(stats.queue_time_ns, stats.queue_count); - const uint64_t compute_input_avg_us = AverageDurationInUs( - stats.compute_input_time_ns, stats.compute_input_count); - const uint64_t compute_infer_avg_us = AverageDurationInUs( - stats.compute_infer_time_ns, stats.compute_infer_count); - const uint64_t compute_output_avg_us = AverageDurationInUs( - stats.compute_output_time_ns, stats.compute_output_count); - const uint64_t compute_time = stats.compute_input_time_ns + - stats.compute_infer_time_ns + - stats.compute_output_time_ns; - if (stats.compute_input_count != stats.compute_infer_count || - stats.compute_infer_count != stats.compute_output_count) { - throw std::runtime_error( - "Server side statistics compute counts must be the same."); - } - const uint64_t compute_cnt = stats.compute_input_count; - const uint64_t compute_avg_us = - AverageDurationInUs(compute_time, compute_cnt); - const uint64_t cache_hit_avg_us = - AverageDurationInUs(stats.cache_hit_time_ns, cache_hit_cnt); - const uint64_t cache_miss_avg_us = - AverageDurationInUs(stats.cache_miss_time_ns, cache_miss_cnt); - const uint64_t total_compute_time_ns = stats.compute_input_time_ns + - stats.compute_infer_time_ns + - stats.compute_output_time_ns; - // Get the average of cache hits and misses across successful requests - const uint64_t combined_cache_compute_avg_us = AverageDurationInUs( - stats.cache_hit_time_ns + stats.cache_miss_time_ns + - total_compute_time_ns, - compute_cnt + cache_hit_cnt); - - if (parser->ResponseCacheEnabled()) { - const uint64_t overhead_avg_us = GetOverheadDuration( - cumm_avg_us, queue_avg_us, combined_cache_compute_avg_us); - std::cout << " (overhead " << overhead_avg_us << " usec + " - << "queue " << queue_avg_us << " usec + " - << "cache hit/miss " << combined_cache_compute_avg_us - << " usec)" << std::endl; - std::cout << ident << ident - << " Average Cache Hit Latency: " << cache_hit_avg_us - << " usec" << std::endl; - std::cout << ident << ident << " Average Cache Miss Latency: " - << cache_miss_avg_us + compute_avg_us << " usec " - << "(cache lookup/insertion " << cache_miss_avg_us << " usec + " - << "compute input " << compute_input_avg_us << " usec + " - << "compute infer " << compute_infer_avg_us << " usec + " - << "compute output " << compute_output_avg_us << " usec)" - << std::endl - << std::endl; - } - // Response Cache Disabled - else { - std::cout << " (overhead " - << GetOverheadDuration( - cumm_avg_us, queue_avg_us, compute_avg_us) - << " usec + " - << "queue " << queue_avg_us << " usec + " - << "compute input " << compute_input_avg_us << " usec + " - << "compute infer " << compute_infer_avg_us << " usec + " - << "compute output " << compute_output_avg_us << " usec)" - << std::endl - << std::endl; - - if (cache_hit_avg_us > 0 || cache_miss_avg_us > 0) { - std::cerr << "Response Cache is disabled for model [" - << parser->ModelName() - << "] but cache hit/miss latency is non-zero." << std::endl; - } - } - } - // Ensemble Model - else { - const auto ensemble_times = GetTotalEnsembleDurations(stats); - // Response Cache Enabled - if (parser->ResponseCacheEnabled()) { - const uint64_t overhead_avg_us = GetOverheadDuration( - cumm_avg_us, ensemble_times.total_queue_time_avg_us, - ensemble_times.total_combined_cache_compute_time_avg_us); - // FIXME - Refactor these calculations in case of ensemble top level - // response cache is enabled - if (!parser->TopLevelResponseCachingEnabled()) { - std::cout << " (overhead " << overhead_avg_us << " usec + " - << "queue " << ensemble_times.total_queue_time_avg_us - << " usec + " - << "cache hit/miss " - << ensemble_times.total_combined_cache_compute_time_avg_us - << " usec)" << std::endl; - } else { - std::cout << std::endl; - } - std::cout << ident << ident << " Average Cache Hit Latency: " - << ensemble_times.total_cache_hit_time_avg_us << " usec" - << std::endl; - std::cout << ident << ident << " Average Cache Miss Latency: " - << ensemble_times.total_cache_miss_time_avg_us + - ensemble_times.total_compute_time_avg_us - << " usec " << std::endl - << std::endl; - } - // Response Cache Disabled - else { - std::cout << " (overhead " - << GetOverheadDuration( - cumm_avg_us, ensemble_times.total_queue_time_avg_us, - ensemble_times.total_compute_time_avg_us) - << " usec + " - << "queue " << ensemble_times.total_queue_time_avg_us - << " usec + " - << "compute " << ensemble_times.total_compute_time_avg_us - << " usec)" << std::endl - << std::endl; - } - - // List out composing models of ensemble model - std::cout << ident << "Composing models: " << std::endl; - for (const auto& model_stats : stats.composing_models_stat) { - const auto& model_identifier = model_stats.first; - std::cout << ident << model_identifier.first - << ", version: " << model_identifier.second << std::endl; - ReportServerSideStats(model_stats.second, iteration + 1, parser); - } - } - - return cb::Error::Success; -} - -cb::Error -ReportClientSideStats( - const ClientSideStats& stats, const int64_t percentile, - const cb::ProtocolType protocol, const bool verbose, - const bool on_sequence_model, const bool include_lib_stats, - const double overhead_pct, const double send_request_rate, - const bool is_decoupled_model) -{ - const uint64_t avg_latency_us = stats.avg_latency_ns / 1000; - const uint64_t std_us = stats.std_us; - - const uint64_t avg_request_time_us = stats.avg_request_time_ns / 1000; - const uint64_t avg_send_time_us = stats.avg_send_time_ns / 1000; - const uint64_t avg_receive_time_us = stats.avg_receive_time_ns / 1000; - const uint64_t avg_response_wait_time_us = - avg_request_time_us - avg_send_time_us - avg_receive_time_us; - - std::string client_library_detail = " "; - if (include_lib_stats) { - if (protocol == cb::ProtocolType::GRPC) { - client_library_detail += - "Avg gRPC time: " + std::to_string(avg_request_time_us) + " usec ("; - if (!verbose) { - client_library_detail += - "(un)marshal request/response " + - std::to_string(avg_send_time_us + avg_receive_time_us) + - " usec + response wait " + - std::to_string(avg_response_wait_time_us) + " usec)"; - } else { - client_library_detail += "marshal " + std::to_string(avg_send_time_us) + - " usec + response wait " + - std::to_string(avg_response_wait_time_us) + - " usec + unmarshal " + - std::to_string(avg_receive_time_us) + " usec)"; - } - } else if (protocol == cb::ProtocolType::HTTP) { - client_library_detail += - "Avg HTTP time: " + std::to_string(avg_request_time_us) + " usec ("; - if (!verbose) { - client_library_detail += - "send/recv " + - std::to_string(avg_send_time_us + avg_receive_time_us) + - " usec + response wait " + - std::to_string(avg_response_wait_time_us) + " usec)"; - } else { - client_library_detail += "send " + std::to_string(avg_send_time_us) + - " usec + response wait " + - std::to_string(avg_response_wait_time_us) + - " usec + receive " + - std::to_string(avg_receive_time_us) + " usec)"; - } - } - } - - std::cout << " Request count: " << stats.request_count << std::endl; - double delay_pct = - ((double)stats.delayed_request_count / stats.request_count) * 100; - if (delay_pct > DELAY_PCT_THRESHOLD) { - std::cout << " " - << "Avg send request rate: " << std::fixed << std::setprecision(2) - << send_request_rate << " infer/sec" << std::endl; - std::cout << " " - << "[WARNING] Perf Analyzer was not able to keep up with the " - "desired request rate. "; - std::cout << delay_pct << "% of the requests were delayed. " << std::endl; - } - if (on_sequence_model) { - std::cout << " Sequence count: " << stats.sequence_count << " (" - << stats.sequence_per_sec << " seq/sec)" << std::endl; - } - std::cout << " Throughput: " << stats.infer_per_sec << " infer/sec" - << std::endl; - if (is_decoupled_model) { - std::cout << " Response Throughput: " << stats.responses_per_sec - << " infer/sec" << std::endl; - } - - if (verbose) { - std::stringstream client_overhead{""}; - client_overhead << " " - << "Avg client overhead: " << std::fixed - << std::setprecision(2) << overhead_pct << "%"; - std::cout << client_overhead.str() << std::endl; - } - - if (percentile == -1) { - std::cout << " Avg latency: " << avg_latency_us << " usec" - << " (standard deviation " << std_us << " usec)" << std::endl; - } - for (const auto& percentile : stats.percentile_latency_ns) { - std::cout << " p" << percentile.first - << " latency: " << (percentile.second / 1000) << " usec" - << std::endl; - } - - std::cout << client_library_detail << std::endl; - - return cb::Error::Success; -} - -cb::Error -Report( - const PerfStatus& summary, const int64_t percentile, - const cb::ProtocolType protocol, const bool verbose, - const bool include_lib_stats, const bool include_server_stats, - const std::shared_ptr& parser, - const bool should_collect_metrics, const double overhead_pct_threshold) -{ - std::cout << " Client: " << std::endl; - ReportClientSideStats( - summary.client_stats, percentile, protocol, verbose, - summary.on_sequence_model, include_lib_stats, summary.overhead_pct, - summary.send_request_rate, parser->IsDecoupled()); - - if (include_server_stats) { - std::cout << " Server: " << std::endl; - ReportServerSideStats(summary.server_stats, 1, parser); - } - - if (should_collect_metrics) { - std::cout << " Server Prometheus Metrics: " << std::endl; - ReportPrometheusMetrics(summary.metrics.front()); - } - - if (summary.overhead_pct > overhead_pct_threshold) { - std::cout << "[WARNING] Perf Analyzer is not able to keep up with the " - "desired load. The results may not be accurate." - << std::endl; - } - return cb::Error::Success; -} - -} // namespace - -cb::Error -InferenceProfiler::Create( - const bool verbose, const double stability_threshold, - const uint64_t measurement_window_ms, const size_t max_trials, - const int64_t percentile, const uint64_t latency_threshold_ms_, - const cb::ProtocolType protocol, std::shared_ptr& parser, - std::shared_ptr profile_backend, - std::unique_ptr manager, - std::unique_ptr* profiler, - uint64_t measurement_request_count, MeasurementMode measurement_mode, - std::shared_ptr mpi_driver, const uint64_t metrics_interval_ms, - const bool should_collect_metrics, const double overhead_pct_threshold, - const bool async_mode, - const std::shared_ptr collector, - const bool should_collect_profile_data) -{ - std::unique_ptr local_profiler(new InferenceProfiler( - verbose, stability_threshold, measurement_window_ms, max_trials, - (percentile != -1), percentile, latency_threshold_ms_, protocol, parser, - profile_backend, std::move(manager), measurement_request_count, - measurement_mode, mpi_driver, metrics_interval_ms, should_collect_metrics, - overhead_pct_threshold, async_mode, collector, - should_collect_profile_data)); - - *profiler = std::move(local_profiler); - return cb::Error::Success; -} - -InferenceProfiler::InferenceProfiler( - const bool verbose, const double stability_threshold, - const int32_t measurement_window_ms, const size_t max_trials, - const bool extra_percentile, const size_t percentile, - const uint64_t latency_threshold_ms_, const cb::ProtocolType protocol, - std::shared_ptr& parser, - std::shared_ptr profile_backend, - std::unique_ptr manager, uint64_t measurement_request_count, - MeasurementMode measurement_mode, std::shared_ptr mpi_driver, - const uint64_t metrics_interval_ms, const bool should_collect_metrics, - const double overhead_pct_threshold, const bool async_mode, - const std::shared_ptr collector, - const bool should_collect_profile_data) - : verbose_(verbose), measurement_window_ms_(measurement_window_ms), - max_trials_(max_trials), extra_percentile_(extra_percentile), - percentile_(percentile), latency_threshold_ms_(latency_threshold_ms_), - protocol_(protocol), parser_(parser), profile_backend_(profile_backend), - manager_(std::move(manager)), - measurement_request_count_(measurement_request_count), - measurement_mode_(measurement_mode), mpi_driver_(mpi_driver), - should_collect_metrics_(should_collect_metrics), - overhead_pct_threshold_(overhead_pct_threshold), async_mode_(async_mode), - collector_(collector), - should_collect_profile_data_(should_collect_profile_data) -{ - load_parameters_.stability_threshold = stability_threshold; - load_parameters_.stability_window = 3; - if (profile_backend_->Kind() == cb::BackendKind::TRITON || - profile_backend_->Kind() == cb::BackendKind::TRITON_C_API) { - // Measure and report client library stats only when the model - // is not decoupled. - include_lib_stats_ = (!parser_->IsDecoupled()); - // Measure and report server statistics only when the server - // supports the statistics extension. - std::set extensions; - profile_backend_->ServerExtensions(&extensions); - include_server_stats_ = (extensions.find("statistics") != extensions.end()); - } else { - include_lib_stats_ = true; - include_server_stats_ = false; - } - if (should_collect_metrics_) { - metrics_manager_ = - std::make_shared(profile_backend, metrics_interval_ms); - } -} - -cb::Error -InferenceProfiler::Profile( - const size_t concurrent_request_count, const size_t request_count, - std::vector& perf_statuses, bool& meets_threshold, - bool& is_stable) -{ - cb::Error err; - PerfStatus perf_status{}; - - perf_status.concurrency = concurrent_request_count; - - is_stable = false; - meets_threshold = true; - - RETURN_IF_ERROR( - dynamic_cast(manager_.get()) - ->ChangeConcurrencyLevel(concurrent_request_count, request_count)); - - err = ProfileHelper(perf_status, request_count, &is_stable); - if (err.IsOk()) { - uint64_t stabilizing_latency_ms = - perf_status.stabilizing_latency_ns / NANOS_PER_MILLIS; - if ((stabilizing_latency_ms >= latency_threshold_ms_) && - (latency_threshold_ms_ != NO_LIMIT)) { - std::cerr << "Measured latency went over the set limit of " - << latency_threshold_ms_ << " msec. " << std::endl; - meets_threshold = false; - } else if (!is_stable) { - if (measurement_mode_ == MeasurementMode::TIME_WINDOWS) { - std::cerr << "Failed to obtain stable measurement within " - << max_trials_ << " measurement windows for concurrency " - << concurrent_request_count << ". Please try to " - << "increase the --measurement-interval." << std::endl; - } else if (measurement_mode_ == MeasurementMode::COUNT_WINDOWS) { - std::cerr << "Failed to obtain stable measurement within " - << max_trials_ << " measurement windows for concurrency " - << concurrent_request_count << ". Please try to " - << "increase the --measurement-request-count." << std::endl; - } - meets_threshold = false; - } else { - perf_statuses.push_back(perf_status); - err = Report( - perf_status, percentile_, protocol_, verbose_, include_lib_stats_, - include_server_stats_, parser_, should_collect_metrics_, - overhead_pct_threshold_); - if (!err.IsOk()) { - std::cerr << err; - meets_threshold = false; - } - } - } else { - return err; - } - - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::Profile( - const double request_rate, const size_t request_count, - std::vector& perf_statuses, bool& meets_threshold, - bool& is_stable) -{ - cb::Error err; - PerfStatus perf_status{}; - - perf_status.request_rate = request_rate; - - is_stable = false; - meets_threshold = true; - - RETURN_IF_ERROR(dynamic_cast(manager_.get()) - ->ChangeRequestRate(request_rate, request_count)); - std::cout << "Request Rate: " << request_rate - << " inference requests per seconds" << std::endl; - - err = ProfileHelper(perf_status, request_count, &is_stable); - if (err.IsOk()) { - uint64_t stabilizing_latency_ms = - perf_status.stabilizing_latency_ns / NANOS_PER_MILLIS; - if ((stabilizing_latency_ms >= latency_threshold_ms_) && - (latency_threshold_ms_ != NO_LIMIT)) { - std::cerr << "Measured latency went over the set limit of " - << latency_threshold_ms_ << " msec. " << std::endl; - meets_threshold = false; - } else if (!is_stable) { - std::cerr << "Failed to obtain stable measurement." << std::endl; - meets_threshold = false; - } else { - perf_statuses.push_back(perf_status); - err = Report( - perf_status, percentile_, protocol_, verbose_, include_lib_stats_, - include_server_stats_, parser_, should_collect_metrics_, - overhead_pct_threshold_); - if (!err.IsOk()) { - std::cerr << err; - meets_threshold = false; - } - } - } else { - return err; - } - - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::Profile( - const size_t request_count, std::vector& perf_statuses, - bool& meets_threshold, bool& is_stable) -{ - cb::Error err; - PerfStatus perf_status{}; - - RETURN_IF_ERROR(dynamic_cast(manager_.get()) - ->InitCustomIntervals(request_count)); - RETURN_IF_ERROR(dynamic_cast(manager_.get()) - ->GetCustomRequestRate(&perf_status.request_rate)); - - is_stable = false; - meets_threshold = true; - - err = ProfileHelper(perf_status, request_count, &is_stable); - if (err.IsOk()) { - uint64_t stabilizing_latency_ms = - perf_status.stabilizing_latency_ns / NANOS_PER_MILLIS; - if ((stabilizing_latency_ms >= latency_threshold_ms_) && - (latency_threshold_ms_ != NO_LIMIT)) { - std::cerr << "Measured latency went over the set limit of " - << latency_threshold_ms_ << " msec. " << std::endl; - meets_threshold = false; - } else if (!is_stable) { - std::cerr << "Failed to obtain stable measurement." << std::endl; - meets_threshold = false; - } else { - perf_statuses.push_back(perf_status); - err = Report( - perf_status, percentile_, protocol_, verbose_, include_lib_stats_, - include_server_stats_, parser_, should_collect_metrics_, - overhead_pct_threshold_); - if (!err.IsOk()) { - std::cerr << err; - meets_threshold = false; - } - } - } else { - return err; - } - - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::ProfileHelper( - PerfStatus& experiment_perf_status, size_t request_count, bool* is_stable) -{ - // Start measurement - LoadStatus load_status; - size_t completed_trials = 0; - std::queue error; - std::deque measurement_perf_statuses; - all_request_records_.clear(); - previous_window_end_ns_ = 0; - - // Start with a fresh empty request records vector in the manager - // - std::vector empty_request_records; - RETURN_IF_ERROR(manager_->SwapRequestRecords(empty_request_records)); - - do { - PerfStatus measurement_perf_status; - measurement_perf_status.concurrency = experiment_perf_status.concurrency; - measurement_perf_status.request_rate = experiment_perf_status.request_rate; - RETURN_IF_ERROR(manager_->CheckHealth()); - - MeasureConfig measure_config; - if (measurement_mode_ == MeasurementMode::TIME_WINDOWS) { - measure_config.measurement_window = measurement_window_ms_; - measure_config.is_count_based = false; - } else { - measure_config.measurement_window = measurement_request_count_; - measure_config.is_count_based = true; - } - - // When request_count is not 0, the experiment will run for exactly X - // requests. In that case, we are not measuring based on window stability, - // and instead need to clamp the windows to be from the start of the - // first request to the end of the last request of the request count - // - measure_config.clamp_window = (request_count != 0); - error.push(Measure(measurement_perf_status, measure_config)); - measurement_perf_statuses.push_back(measurement_perf_status); - - if (error.size() > load_parameters_.stability_window) { - error.pop(); - measurement_perf_statuses.pop_front(); - } - - if (error.back().IsOk()) { - load_status.infer_per_sec.push_back( - measurement_perf_status.client_stats.infer_per_sec); - load_status.latencies.push_back( - measurement_perf_status.stabilizing_latency_ns); - } else { - load_status.infer_per_sec.push_back(0); - load_status.latencies.push_back(std::numeric_limits::max()); - } - - load_status.avg_ips += - load_status.infer_per_sec.back() / load_parameters_.stability_window; - load_status.avg_latency += - load_status.latencies.back() / load_parameters_.stability_window; - if (verbose_) { - if (error.back().IsOk()) { - std::cout << " Pass [" << (completed_trials + 1) - << "] throughput: " << load_status.infer_per_sec.back() - << " infer/sec. "; - if (extra_percentile_) { - std::cout << "p" << percentile_ << " latency: " - << (measurement_perf_status.client_stats - .percentile_latency_ns.find(percentile_) - ->second / - 1000) - << " usec" << std::endl; - } else { - std::cout << "Avg latency: " - << (measurement_perf_status.client_stats.avg_latency_ns / - 1000) - << " usec (std " - << measurement_perf_status.client_stats.std_us << " usec). " - << std::endl; - } - } else { - std::cout << " Pass [" << (completed_trials + 1) - << "] cb::Error: " << error.back().Message() << std::endl; - } - } - - // If request-count is specified, then only measure one window and exit - if (request_count != 0) { - *is_stable = true; - break; - } - - *is_stable = DetermineStability(load_status); - - if (IsDoneProfiling(load_status, is_stable)) { - break; - } - - completed_trials++; - } while ((!early_exit) && (completed_trials < max_trials_)); - - // For async requests, print a warning if the latency threshold is not met. - if (async_mode_ && !*is_stable && DetermineStability(load_status, false)) { - std::cerr << "Warning: Request latency is not stabilizing. " - "Please try lowering the request rate." - << std::endl; - *is_stable = true; - } - - if (should_collect_metrics_) { - metrics_manager_->StopQueryingMetrics(); - } - - // return the appropriate error which might have occurred in the - // stability_window for its proper handling. - while (!error.empty()) { - if (!error.front().IsOk()) { - return error.front(); - } else { - error.pop(); - } - } - - // Only merge the results if the results have stabilized. - if (*is_stable) { - RETURN_IF_ERROR(MergePerfStatusReports( - measurement_perf_statuses, experiment_perf_status)); - } - - if (early_exit) { - return cb::Error("Received exit signal.", pa::GENERIC_ERROR); - } - return cb::Error::Success; -} - -bool -InferenceProfiler::DetermineStability( - LoadStatus& load_status, bool check_latency) -{ - bool stable = false; - if (load_status.infer_per_sec.size() >= load_parameters_.stability_window) { - stable = true; - size_t idx = - load_status.infer_per_sec.size() - load_parameters_.stability_window; - - for (size_t i = idx; i < load_status.infer_per_sec.size(); i++) { - if (load_status.infer_per_sec[i] == 0) { - stable = false; - } - } - - stable = stable && CheckWindowForStability(idx, load_status, check_latency); - } - return stable; -} - -bool -InferenceProfiler::CheckWindowForStability( - size_t idx, LoadStatus& load_status, bool check_latency) -{ - return IsInferWindowStable(idx, load_status) && - (!check_latency || IsLatencyWindowStable(idx, load_status)); -} - -bool -InferenceProfiler::IsInferWindowStable(size_t idx, LoadStatus& load_status) -{ - auto infer_start = std::begin(load_status.infer_per_sec) + idx; - auto infer_per_sec_measurements = std::minmax_element( - infer_start, infer_start + load_parameters_.stability_window); - - auto max_infer_per_sec = *infer_per_sec_measurements.second; - auto min_infer_per_sec = *infer_per_sec_measurements.first; - - return max_infer_per_sec / min_infer_per_sec <= - 1 + load_parameters_.stability_threshold; -} - -bool -InferenceProfiler::IsLatencyWindowStable(size_t idx, LoadStatus& load_status) -{ - auto latency_start = std::begin(load_status.latencies) + idx; - auto latencies_per_sec_measurements = std::minmax_element( - latency_start, latency_start + load_parameters_.stability_window); - - double max_latency = *latencies_per_sec_measurements.second; - double min_latency = *latencies_per_sec_measurements.first; - - auto is_stable = - max_latency / min_latency <= 1 + load_parameters_.stability_threshold; - return max_latency / min_latency <= 1 + load_parameters_.stability_threshold; -} - -bool -InferenceProfiler::IsDoneProfiling(LoadStatus& load_status, bool* is_stable) -{ - bool done = false; - bool within_threshold = true; - if (load_status.infer_per_sec.size() >= load_parameters_.stability_window) { - size_t idx = - load_status.infer_per_sec.size() - load_parameters_.stability_window; - - for (; idx < load_status.infer_per_sec.size(); idx++) { - within_threshold &= CheckWithinThreshold(idx, load_status); - } - } - - if (mpi_driver_->IsMPIRun()) { - if (AllMPIRanksAreStable(*is_stable)) { - done = true; - } - } else if (*is_stable) { - done = true; - } - if ((!within_threshold) && (latency_threshold_ms_ != NO_LIMIT)) { - done = true; - } - return done; -} - -bool -InferenceProfiler::CheckWithinThreshold(size_t idx, LoadStatus& load_status) -{ - return load_status.latencies[idx] < - (latency_threshold_ms_ * NANOS_PER_MILLIS); -} - -cb::Error -InferenceProfiler::MergeServerSideStats( - std::vector& server_side_stats, - ServerSideStats& server_side_summary) -{ - auto& server_side_stat = server_side_stats[0]; - - // Make sure that the perf status reports profiling settings match with each - // other. - for (size_t i = 1; i < server_side_stats.size(); i++) { - if (server_side_stats[i].composing_models_stat.size() != - server_side_stat.composing_models_stat.size()) { - return cb::Error( - "Inconsistent ensemble setting detected between the trials.", - pa::GENERIC_ERROR); - } - } - - // Initialize the server stats for the merged report. - server_side_summary.inference_count = 0; - server_side_summary.execution_count = 0; - server_side_summary.cache_hit_count = 0; - server_side_summary.cache_miss_count = 0; - server_side_summary.success_count = 0; - server_side_summary.queue_count = 0; - server_side_summary.compute_input_count = 0; - server_side_summary.compute_output_count = 0; - server_side_summary.compute_infer_count = 0; - server_side_summary.cumm_time_ns = 0; - server_side_summary.queue_time_ns = 0; - server_side_summary.compute_input_time_ns = 0; - server_side_summary.compute_infer_time_ns = 0; - server_side_summary.compute_output_time_ns = 0; - server_side_summary.cache_hit_time_ns = 0; - server_side_summary.cache_miss_time_ns = 0; - server_side_summary.composing_models_stat.clear(); - for (auto& composing_model_stat : server_side_stat.composing_models_stat) { - std::vector composing_model_stats; - for (auto& server_side_stat : server_side_stats) { - composing_model_stats.push_back( - server_side_stat.composing_models_stat[composing_model_stat.first]); - } - - ServerSideStats merged_composing_model_stats; - RETURN_IF_ERROR(MergeServerSideStats( - composing_model_stats, merged_composing_model_stats)); - server_side_summary.composing_models_stat.insert( - {composing_model_stat.first, merged_composing_model_stats}); - } - - for (auto& server_side_stat : server_side_stats) { - // Aggregated Server Stats - server_side_summary.inference_count += server_side_stat.inference_count; - server_side_summary.execution_count += server_side_stat.execution_count; - server_side_summary.cache_hit_count += server_side_stat.cache_hit_count; - server_side_summary.cache_miss_count += server_side_stat.cache_miss_count; - server_side_summary.success_count += server_side_stat.success_count; - server_side_summary.queue_count += server_side_stat.queue_count; - server_side_summary.compute_input_count += - server_side_stat.compute_input_count; - server_side_summary.compute_infer_count += - server_side_stat.compute_infer_count; - server_side_summary.compute_output_count += - server_side_stat.compute_output_count; - server_side_summary.cumm_time_ns += server_side_stat.cumm_time_ns; - server_side_summary.queue_time_ns += server_side_stat.queue_time_ns; - server_side_summary.compute_input_time_ns += - server_side_stat.compute_input_time_ns; - server_side_summary.compute_infer_time_ns += - server_side_stat.compute_infer_time_ns; - server_side_summary.compute_output_time_ns += - server_side_stat.compute_output_time_ns; - server_side_summary.cache_hit_time_ns += server_side_stat.cache_hit_time_ns; - server_side_summary.cache_miss_time_ns += - server_side_stat.cache_miss_time_ns; - } - - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::MergePerfStatusReports( - std::deque& perf_status_reports, - PerfStatus& experiment_perf_status) -{ - auto& perf_status = perf_status_reports[0]; - - // Make sure that the perf status reports profiling settings match with each - // other. - for (size_t i = 1; i < perf_status_reports.size(); i++) { - perf_status.concurrency = experiment_perf_status.concurrency; - perf_status.request_rate = experiment_perf_status.request_rate; - - if (perf_status_reports[i].on_sequence_model != - perf_status.on_sequence_model) { - return cb::Error( - "Inconsistent sequence setting detected.", pa::GENERIC_ERROR); - } - - if (perf_status_reports[i].batch_size != perf_status.batch_size) { - return cb::Error("Inconsistent batch size detected.", pa::GENERIC_ERROR); - } - - if (perf_status_reports[i].server_stats.composing_models_stat.size() != - perf_status.server_stats.composing_models_stat.size()) { - return cb::Error( - "Inconsistent ensemble setting detected between the trials.", - pa::GENERIC_ERROR); - } - } - - experiment_perf_status.batch_size = perf_status.batch_size; - experiment_perf_status.on_sequence_model = perf_status.on_sequence_model; - - // Initialize the client stats for the merged report. - experiment_perf_status.client_stats.request_count = 0; - experiment_perf_status.client_stats.sequence_count = 0; - experiment_perf_status.client_stats.delayed_request_count = 0; - experiment_perf_status.client_stats.duration_ns = 0; - experiment_perf_status.client_stats.avg_latency_ns = 0; - experiment_perf_status.client_stats.percentile_latency_ns.clear(); - experiment_perf_status.client_stats.latencies.clear(); - experiment_perf_status.client_stats.std_us = 0; - experiment_perf_status.client_stats.avg_request_time_ns = 0; - experiment_perf_status.client_stats.avg_send_time_ns = 0; - experiment_perf_status.client_stats.avg_receive_time_ns = 0; - experiment_perf_status.client_stats.infer_per_sec = 0; - experiment_perf_status.client_stats.sequence_per_sec = 0; - experiment_perf_status.client_stats.completed_count = 0; - experiment_perf_status.stabilizing_latency_ns = 0; - experiment_perf_status.overhead_pct = 0; - experiment_perf_status.send_request_rate = 0.0; - - std::vector server_side_stats; - for (auto& perf_status : perf_status_reports) { - // Aggregated Client Stats - experiment_perf_status.client_stats.request_count += - perf_status.client_stats.request_count; - experiment_perf_status.client_stats.sequence_count += - perf_status.client_stats.sequence_count; - experiment_perf_status.client_stats.delayed_request_count += - perf_status.client_stats.delayed_request_count; - experiment_perf_status.client_stats.response_count += - perf_status.client_stats.response_count; - experiment_perf_status.client_stats.duration_ns += - perf_status.client_stats.duration_ns; - - server_side_stats.push_back(perf_status.server_stats); - - experiment_perf_status.client_stats.latencies.insert( - experiment_perf_status.client_stats.latencies.end(), - perf_status.client_stats.latencies.begin(), - perf_status.client_stats.latencies.end()); - // Accumulate the overhead percentage and send rate here to remove extra - // traversals over the perf_status_reports - experiment_perf_status.overhead_pct += perf_status.overhead_pct; - experiment_perf_status.send_request_rate += perf_status.send_request_rate; - } - - // Calculate the average overhead_pct for the experiment. - experiment_perf_status.overhead_pct /= perf_status_reports.size(); - experiment_perf_status.send_request_rate /= perf_status_reports.size(); - - if (include_lib_stats_) { - for (auto& perf_status : perf_status_reports) { - experiment_perf_status.client_stats.completed_count += - perf_status.client_stats.completed_count; - - experiment_perf_status.client_stats.avg_request_time_ns += - perf_status.client_stats.avg_request_time_ns * - perf_status.client_stats.completed_count; - - experiment_perf_status.client_stats.avg_send_time_ns += - perf_status.client_stats.avg_send_time_ns * - perf_status.client_stats.completed_count; - - experiment_perf_status.client_stats.avg_receive_time_ns += - perf_status.client_stats.avg_receive_time_ns * - perf_status.client_stats.completed_count; - } - - if (experiment_perf_status.client_stats.completed_count != 0) { - experiment_perf_status.client_stats.avg_request_time_ns = - experiment_perf_status.client_stats.avg_request_time_ns / - experiment_perf_status.client_stats.completed_count; - - experiment_perf_status.client_stats.avg_send_time_ns = - experiment_perf_status.client_stats.avg_send_time_ns / - experiment_perf_status.client_stats.completed_count; - - experiment_perf_status.client_stats.avg_receive_time_ns = - experiment_perf_status.client_stats.avg_receive_time_ns / - experiment_perf_status.client_stats.completed_count; - } - } - - RETURN_IF_ERROR(MergeServerSideStats( - server_side_stats, experiment_perf_status.server_stats)); - - std::sort( - experiment_perf_status.client_stats.latencies.begin(), - experiment_perf_status.client_stats.latencies.end()); - - float client_duration_sec = - (float)experiment_perf_status.client_stats.duration_ns / NANOS_PER_SECOND; - experiment_perf_status.client_stats.sequence_per_sec = - experiment_perf_status.client_stats.sequence_count / client_duration_sec; - experiment_perf_status.client_stats.infer_per_sec = - (experiment_perf_status.client_stats.request_count * - experiment_perf_status.batch_size) / - client_duration_sec; - experiment_perf_status.client_stats.responses_per_sec = - experiment_perf_status.client_stats.response_count / client_duration_sec; - RETURN_IF_ERROR(SummarizeLatency( - experiment_perf_status.client_stats.latencies, experiment_perf_status)); - - if (should_collect_metrics_) { - // Put all Metric objects in a flat vector so they're easier to merge - std::vector> all_metrics{}; - std::for_each( - perf_status_reports.begin(), perf_status_reports.end(), - [&all_metrics](const PerfStatus& p) { - std::for_each( - p.metrics.begin(), p.metrics.end(), - [&all_metrics](const Metrics& m) { all_metrics.push_back(m); }); - }); - - Metrics merged_metrics{}; - RETURN_IF_ERROR(MergeMetrics(all_metrics, merged_metrics)); - experiment_perf_status.metrics.push_back(std::move(merged_metrics)); - } - - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::GetServerSideStatus( - std::map* model_stats) -{ - if ((parser_->SchedulerType() == ModelParser::ENSEMBLE) || - (parser_->SchedulerType() == ModelParser::ENSEMBLE_SEQUENCE)) { - RETURN_IF_ERROR(profile_backend_->ModelInferenceStatistics(model_stats)); - } else { - RETURN_IF_ERROR(profile_backend_->ModelInferenceStatistics( - model_stats, parser_->ModelName(), parser_->ModelVersion())); - } - return cb::Error::Success; -} - -// Used for measurement -cb::Error -InferenceProfiler::Measure(PerfStatus& perf_status, MeasureConfig config) -{ - std::map start_status; - std::map end_status; - cb::InferStat start_stat; - cb::InferStat end_stat; - - manager_->ResetIdleTime(); - - // Set current window start time to end of previous window. For first - // measurement window, capture start time, server side stats, and client side - // stats. - uint64_t window_start_ns = previous_window_end_ns_; - start_stat = prev_client_side_stats_; - start_status = prev_server_side_stats_; - if (window_start_ns == 0) { - window_start_ns = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); - if (should_collect_metrics_) { - metrics_manager_->StartQueryingMetrics(); - } - if (include_server_stats_) { - RETURN_IF_ERROR(GetServerSideStatus(&start_status)); - } - RETURN_IF_ERROR(manager_->GetAccumulatedClientStat(&start_stat)); - } - - if (should_collect_metrics_) { - try { - metrics_manager_->CheckQueryingStatus(); - } - catch (const std::exception& e) { - return cb::Error(e.what(), pa::GENERIC_ERROR); - } - } - - if (!config.is_count_based) { - // Wait for specified time interval in msec - std::this_thread::sleep_for( - std::chrono::milliseconds((uint64_t)(config.measurement_window * 1.2))); - } else { - do { - // Check the health of the worker threads. - RETURN_IF_ERROR(manager_->CheckHealth()); - - // Wait for 1s until enough samples have been collected. - std::this_thread::sleep_for(std::chrono::milliseconds((uint64_t)1000)); - } while (manager_->CountCollectedRequests() < config.measurement_window); - } - - uint64_t window_end_ns = - std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); - previous_window_end_ns_ = window_end_ns; - - if (should_collect_metrics_) { - metrics_manager_->GetLatestMetrics(perf_status.metrics); - } - - // Get server status and then print report on difference between - // before and after status. - if (include_server_stats_) { - RETURN_IF_ERROR(GetServerSideStatus(&end_status)); - prev_server_side_stats_ = end_status; - } - - RETURN_IF_ERROR(manager_->GetAccumulatedClientStat(&end_stat)); - prev_client_side_stats_ = end_stat; - - std::vector current_request_records; - RETURN_IF_ERROR(manager_->SwapRequestRecords(current_request_records)); - all_request_records_.insert( - all_request_records_.end(), current_request_records.begin(), - current_request_records.end()); - - RETURN_IF_ERROR(Summarize( - start_status, end_status, start_stat, end_stat, perf_status, - window_start_ns, window_end_ns, config.clamp_window)); - - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::Summarize( - const std::map& start_status, - const std::map& end_status, - const cb::InferStat& start_stat, const cb::InferStat& end_stat, - PerfStatus& summary, uint64_t window_start_ns, uint64_t window_end_ns, - bool clamp_window) -{ - size_t valid_sequence_count = 0; - size_t delayed_request_count = 0; - size_t response_count = 0; - - // Get measurement from requests that fall within the time interval - std::pair valid_range{window_start_ns, window_end_ns}; - std::vector latencies; - std::vector valid_requests{}; - ValidLatencyMeasurement( - valid_range, valid_sequence_count, delayed_request_count, &latencies, - response_count, valid_requests); - - - if (clamp_window) { - auto [start, end] = ClampWindow(valid_requests); - } - - uint64_t window_duration_ns = window_end_ns - window_start_ns; - - if (should_collect_profile_data_) { - CollectData( - summary, window_start_ns, window_end_ns, std::move(valid_requests)); - } - - RETURN_IF_ERROR(SummarizeLatency(latencies, summary)); - RETURN_IF_ERROR(SummarizeClientStat( - start_stat, end_stat, window_duration_ns, latencies.size(), - valid_sequence_count, delayed_request_count, response_count, summary)); - summary.client_stats.latencies = std::move(latencies); - - SummarizeOverhead(window_duration_ns, manager_->GetIdleTime(), summary); - - double window_duration_s{ - window_duration_ns / static_cast(NANOS_PER_SECOND)}; - - SummarizeSendRequestRate( - window_duration_s, manager_->GetAndResetNumSentRequests(), summary); - - if (include_server_stats_) { - RETURN_IF_ERROR(SummarizeServerStats( - start_status, end_status, &(summary.server_stats))); - } - - return cb::Error::Success; -} - -void -InferenceProfiler::ValidLatencyMeasurement( - const std::pair& valid_range, - size_t& valid_sequence_count, size_t& delayed_request_count, - std::vector* valid_latencies, size_t& response_count, - std::vector& valid_requests) -{ - valid_latencies->clear(); - valid_sequence_count = 0; - response_count = 0; - std::vector erase_indices{}; - for (size_t i = 0; i < all_request_records_.size(); i++) { - const auto& request_record = all_request_records_[i]; - uint64_t request_start_ns = CHRONO_TO_NANOS(request_record.start_time_); - uint64_t request_end_ns; - - if (request_record.has_null_last_response_ == false) { - request_end_ns = - CHRONO_TO_NANOS(request_record.response_timestamps_.back()); - } else if (request_record.response_timestamps_.size() > 1) { - size_t last_response_idx{request_record.response_timestamps_.size() - 2}; - request_end_ns = CHRONO_TO_NANOS( - request_record.response_timestamps_[last_response_idx]); - } else { - erase_indices.push_back(i); - continue; - } - - if (request_start_ns <= request_end_ns) { - // Only counting requests that end within the time interval - if ((request_end_ns >= valid_range.first) && - (request_end_ns <= valid_range.second)) { - valid_latencies->push_back(request_end_ns - request_start_ns); - response_count += request_record.response_timestamps_.size(); - if (request_record.has_null_last_response_) { - response_count--; - } - erase_indices.push_back(i); - if (request_record.sequence_end_) { - valid_sequence_count++; - } - if (request_record.delayed_) { - delayed_request_count++; - } - } - } - } - - std::for_each( - erase_indices.begin(), erase_indices.end(), - [this, &valid_requests](size_t i) { - valid_requests.push_back(std::move(this->all_request_records_[i])); - }); - - // Iterate through erase indices backwards so that erases from - // `all_request_records_` happen from the back to the front to avoid using - // wrong indices after subsequent erases - std::for_each(erase_indices.rbegin(), erase_indices.rend(), [this](size_t i) { - this->all_request_records_.erase(this->all_request_records_.begin() + i); - }); - - // Always sort measured latencies as percentile will be reported as default - std::sort(valid_latencies->begin(), valid_latencies->end()); -} - -std::pair -InferenceProfiler::ClampWindow(std::vector& requests) -{ - auto earliest_start = - std::chrono::time_point::max(); - auto latest_end = std::chrono::time_point::min(); - - for (auto x : requests) { - earliest_start = std::min(earliest_start, x.start_time_); - latest_end = std::max(latest_end, x.response_timestamps_.back()); - } - - return std::make_pair( - earliest_start.time_since_epoch().count(), - latest_end.time_since_epoch().count()); -} - - -void -InferenceProfiler::CollectData( - PerfStatus& summary, uint64_t window_start_ns, uint64_t window_end_ns, - std::vector&& request_records) -{ - InferenceLoadMode id{summary.concurrency, summary.request_rate}; - collector_->AddWindow(id, window_start_ns, window_end_ns); - collector_->AddData(id, std::move(request_records)); -} - -cb::Error -InferenceProfiler::SummarizeLatency( - const std::vector& latencies, PerfStatus& summary) -{ - if (latencies.size() == 0) { - return cb::Error( - "No valid requests recorded within time interval." - " Please use a larger time window.", - pa::OPTION_ERROR); - } - - std::tie(summary.client_stats.avg_latency_ns, summary.client_stats.std_us) = - GetMeanAndStdDev(latencies); - - // retrieve other interesting percentile - summary.client_stats.percentile_latency_ns.clear(); - std::set percentiles{50, 90, 95, 99}; - if (extra_percentile_) { - percentiles.emplace(percentile_); - } - - for (const auto percentile : percentiles) { - size_t index = (percentile / 100.0) * (latencies.size() - 1) + 0.5; - summary.client_stats.percentile_latency_ns.emplace( - percentile, latencies[index]); - } - - if (extra_percentile_) { - summary.stabilizing_latency_ns = - summary.client_stats.percentile_latency_ns.find(percentile_)->second; - } else { - summary.stabilizing_latency_ns = summary.client_stats.avg_latency_ns; - } - - return cb::Error::Success; -} - -std::tuple -InferenceProfiler::GetMeanAndStdDev(const std::vector& latencies) -{ - uint64_t avg_latency_ns{0}; - uint64_t std_dev_latency_us{0}; - - // calculate mean of latencies - uint64_t tol_latency_ns{ - std::accumulate(latencies.begin(), latencies.end(), 0ULL)}; - avg_latency_ns = tol_latency_ns / latencies.size(); - - // calculate sample standard deviation of latencies - uint64_t sq_sum_latency_avg_diff_ns{0}; - std::for_each( - latencies.begin(), latencies.end(), - [avg_latency_ns, &sq_sum_latency_avg_diff_ns](uint64_t l) { - sq_sum_latency_avg_diff_ns += static_cast(l - avg_latency_ns) * - static_cast(l - avg_latency_ns); - }); - if (latencies.size() > 1) { - std_dev_latency_us = - std::sqrt(sq_sum_latency_avg_diff_ns / (latencies.size() - 1)) / 1000; - } else { - std_dev_latency_us = UINT64_MAX; - std::cerr << "WARNING: Pass contained only one request, so sample latency " - "standard deviation will be infinity (UINT64_MAX)." - << std::endl; - } - - - return std::make_tuple(avg_latency_ns, std_dev_latency_us); -} - -cb::Error -InferenceProfiler::SummarizeClientStat( - const cb::InferStat& start_stat, const cb::InferStat& end_stat, - const uint64_t duration_ns, const size_t valid_request_count, - const size_t valid_sequence_count, const size_t delayed_request_count, - const size_t response_count, PerfStatus& summary) -{ - summary.on_sequence_model = - ((parser_->SchedulerType() == ModelParser::SEQUENCE) || - (parser_->SchedulerType() == ModelParser::ENSEMBLE_SEQUENCE)); - summary.batch_size = std::max(manager_->BatchSize(), (size_t)1); - summary.client_stats.request_count = valid_request_count; - summary.client_stats.sequence_count = valid_sequence_count; - summary.client_stats.delayed_request_count = delayed_request_count; - summary.client_stats.response_count = response_count; - summary.client_stats.duration_ns = duration_ns; - float client_duration_sec = - (float)summary.client_stats.duration_ns / NANOS_PER_SECOND; - summary.client_stats.sequence_per_sec = - valid_sequence_count / client_duration_sec; - summary.client_stats.infer_per_sec = - (valid_request_count * summary.batch_size) / client_duration_sec; - summary.client_stats.responses_per_sec = response_count / client_duration_sec; - - if (include_lib_stats_) { - size_t completed_count = - end_stat.completed_request_count - start_stat.completed_request_count; - uint64_t request_time_ns = end_stat.cumulative_total_request_time_ns - - start_stat.cumulative_total_request_time_ns; - summary.client_stats.completed_count = completed_count; - uint64_t send_time_ns = - end_stat.cumulative_send_time_ns - start_stat.cumulative_send_time_ns; - uint64_t receive_time_ns = end_stat.cumulative_receive_time_ns - - start_stat.cumulative_receive_time_ns; - if (completed_count != 0) { - summary.client_stats.avg_request_time_ns = - request_time_ns / completed_count; - summary.client_stats.avg_send_time_ns = send_time_ns / completed_count; - summary.client_stats.avg_receive_time_ns = - receive_time_ns / completed_count; - } - } - - return cb::Error::Success; -} - -void -InferenceProfiler::SummarizeSendRequestRate( - const double window_duration_s, const size_t num_sent_requests, - PerfStatus& summary) -{ - if (window_duration_s <= 0.0) { - throw std::runtime_error("window_duration_s must be positive"); - } - - summary.send_request_rate = num_sent_requests / window_duration_s; -} - -cb::Error -InferenceProfiler::DetermineStatsModelVersion( - const cb::ModelIdentifier& model_identifier, - const std::map& start_stats, - const std::map& end_stats, - int64_t* status_model_version) -{ - // If model_version is unspecified then look in the stats to find the - // version with stats that incremented during the measurement. - // - // If multiple versions had incremented stats, use the highest numbered one - // and print a warning - *status_model_version = -1; - bool multiple_found = false; - bool version_unspecified = model_identifier.second.empty(); - - if (version_unspecified) { - for (const auto& x : end_stats) { - const auto& end_id = x.first; - const auto& end_stat = x.second; - - bool is_correct_model_name = - model_identifier.first.compare(end_id.first) == 0; - - if (is_correct_model_name) { - uint64_t end_queue_count = end_stat.queue_count_; - uint64_t start_queue_count = 0; - - const auto& itr = start_stats.find(end_id); - if (itr != start_stats.end()) { - start_queue_count = itr->second.queue_count_; - } - - if (end_queue_count > start_queue_count) { - int64_t this_version = std::stoll(end_id.second); - if (*status_model_version != -1) { - multiple_found = true; - } - *status_model_version = std::max(*status_model_version, this_version); - } - } - } - } else { - const auto& itr = end_stats.find(model_identifier); - if (itr != end_stats.end()) { - *status_model_version = std::stoll(model_identifier.second); - } - } - // FIXME - Investigate why composing model version is -1 in case of ensemble - // cache hit. - // - // In case of ensemble models, if top level response caching is - // enabled, the composing models versions are unavailable in case of a cache - // hit. This is due to the scheduler sends cache response and composing models - // do not get executed. It's a valid scenario and shouldn't throw error. - bool model_version_unspecified_and_invalid = - *status_model_version == -1 && - (parser_ == nullptr || !parser_->TopLevelResponseCachingEnabled()); - if (model_version_unspecified_and_invalid) { - return cb::Error( - "failed to find the requested model version", pa::GENERIC_ERROR); - } - - if (multiple_found) { - std::cerr << "WARNING: Multiple versions of model " - << model_identifier.first - << " are loaded in the triton server, and the version to use was " - "unspecified. The stats for that model may be inaccurate." - << std::endl; - } - - return cb::Error::Success; -} - -// Only for unit-testing -#ifndef DOCTEST_CONFIG_DISABLE -cb::Error -InferenceProfiler::SetTopLevelResponseCaching( - bool enable_top_level_response_caching) -{ - parser_ = std::make_shared(cb::BackendKind::TRITON); - if (parser_ == nullptr) { - return cb::Error("Failed to initialize ModelParser"); - } - parser_->SetTopLevelResponseCaching(enable_top_level_response_caching); - return cb::Error::Success; -} -#endif - -cb::Error -InferenceProfiler::SummarizeServerStats( - const std::map& start_status, - const std::map& end_status, - ServerSideStats* server_stats) -{ - RETURN_IF_ERROR(SummarizeServerStats( - std::make_pair(parser_->ModelName(), parser_->ModelVersion()), - start_status, end_status, server_stats)); - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::SummarizeServerStats( - const cb::ModelIdentifier& model_identifier, - const std::map& start_status, - const std::map& end_status, - ServerSideStats* server_stats) -{ - RETURN_IF_ERROR(SummarizeServerStatsHelper( - model_identifier, start_status, end_status, server_stats)); - - // Summarize the composing models, if any. - for (auto composing_model_identifier : - (*parser_->GetComposingModelMap())[model_identifier.first]) { - int64_t model_version; - RETURN_IF_ERROR(DetermineStatsModelVersion( - composing_model_identifier, start_status, end_status, &model_version)); - composing_model_identifier.second = std::to_string(model_version); - auto it = server_stats->composing_models_stat - .emplace(composing_model_identifier, ServerSideStats()) - .first; - RETURN_IF_ERROR(SummarizeServerStats( - composing_model_identifier, start_status, end_status, &(it->second))); - } - - return cb::Error::Success; -} - -cb::Error -InferenceProfiler::SummarizeServerStatsHelper( - const cb::ModelIdentifier& model_identifier, - const std::map& start_status, - const std::map& end_status, - ServerSideStats* server_stats) -{ - int64_t model_version; - RETURN_IF_ERROR(DetermineStatsModelVersion( - model_identifier, start_status, end_status, &model_version)); - - const std::pair this_id( - model_identifier.first, std::to_string(model_version)); - - const auto& end_itr = end_status.find(this_id); - if (end_itr == end_status.end()) { - // In case of ensemble models, if top level response caching is enabled, - // the composing models statistics are unavailable in case of a cache hit. - // This is due to the scheduler sends cache response and composing models do - // not get executed. It's a valid scenario and shouldn't throw error. - bool stats_not_found_and_invalid = - model_version == -1 && !parser_->TopLevelResponseCachingEnabled(); - if (stats_not_found_and_invalid) { - return cb::Error( - "missing statistics for requested model", pa::GENERIC_ERROR); - } else { - // Setting server stats 0 for composing model in case of ensemble request - // cache hit since the composing model will not be executed - server_stats->Reset(); - } - } else { - uint64_t start_infer_cnt = 0; - uint64_t start_exec_cnt = 0; - uint64_t start_cnt = 0; - uint64_t start_queue_cnt = 0; - uint64_t start_compute_input_cnt = 0; - uint64_t start_compute_infer_cnt = 0; - uint64_t start_compute_output_cnt = 0; - uint64_t start_cumm_time_ns = 0; - uint64_t start_queue_time_ns = 0; - uint64_t start_compute_input_time_ns = 0; - uint64_t start_compute_infer_time_ns = 0; - uint64_t start_compute_output_time_ns = 0; - uint64_t start_cache_hit_cnt = 0; - uint64_t start_cache_hit_time_ns = 0; - uint64_t start_cache_miss_cnt = 0; - uint64_t start_cache_miss_time_ns = 0; - - const auto& start_itr = start_status.find(this_id); - if (start_itr != start_status.end()) { - start_infer_cnt = start_itr->second.inference_count_; - start_exec_cnt = start_itr->second.execution_count_; - start_cnt = start_itr->second.success_count_; - start_queue_cnt = start_itr->second.queue_count_; - start_compute_input_cnt = start_itr->second.compute_input_count_; - start_compute_infer_cnt = start_itr->second.compute_infer_count_; - start_compute_output_cnt = start_itr->second.compute_output_count_; - start_cumm_time_ns = start_itr->second.cumm_time_ns_; - start_queue_time_ns = start_itr->second.queue_time_ns_; - start_compute_input_time_ns = start_itr->second.compute_input_time_ns_; - start_compute_infer_time_ns = start_itr->second.compute_infer_time_ns_; - start_compute_output_time_ns = start_itr->second.compute_output_time_ns_; - start_cache_hit_cnt = start_itr->second.cache_hit_count_; - start_cache_hit_time_ns = start_itr->second.cache_hit_time_ns_; - start_cache_miss_cnt = start_itr->second.cache_miss_count_; - start_cache_miss_time_ns = start_itr->second.cache_miss_time_ns_; - } - - server_stats->inference_count = - end_itr->second.inference_count_ - start_infer_cnt; - server_stats->execution_count = - end_itr->second.execution_count_ - start_exec_cnt; - server_stats->success_count = end_itr->second.success_count_ - start_cnt; - server_stats->queue_count = end_itr->second.queue_count_ - start_queue_cnt; - server_stats->compute_input_count = - end_itr->second.compute_input_count_ - start_compute_input_cnt; - server_stats->compute_infer_count = - end_itr->second.compute_infer_count_ - start_compute_infer_cnt; - server_stats->compute_output_count = - end_itr->second.compute_output_count_ - start_compute_output_cnt; - server_stats->cumm_time_ns = - end_itr->second.cumm_time_ns_ - start_cumm_time_ns; - server_stats->queue_time_ns = - end_itr->second.queue_time_ns_ - start_queue_time_ns; - server_stats->compute_input_time_ns = - end_itr->second.compute_input_time_ns_ - start_compute_input_time_ns; - server_stats->compute_infer_time_ns = - end_itr->second.compute_infer_time_ns_ - start_compute_infer_time_ns; - server_stats->compute_output_time_ns = - end_itr->second.compute_output_time_ns_ - start_compute_output_time_ns; - server_stats->cache_hit_count = - end_itr->second.cache_hit_count_ - start_cache_hit_cnt; - server_stats->cache_hit_time_ns = - end_itr->second.cache_hit_time_ns_ - start_cache_hit_time_ns; - server_stats->cache_miss_count = - end_itr->second.cache_miss_count_ - start_cache_miss_cnt; - server_stats->cache_miss_time_ns = - end_itr->second.cache_miss_time_ns_ - start_cache_miss_time_ns; - } - - return cb::Error::Success; -} - -void -InferenceProfiler::SummarizeOverhead( - const uint64_t window_duration_ns, const uint64_t idle_ns, - PerfStatus& summary) -{ - // The window start/stop is not instantaneous. It is possible that the PA - // overhead is smaller than the delay in the window start/stop process. Treat - // it as 0% overhead (100% idle) in that case - // - if (idle_ns > window_duration_ns) { - summary.overhead_pct = 0; - } else { - uint64_t overhead_ns = window_duration_ns - idle_ns; - double overhead_pct = double(overhead_ns) / window_duration_ns * 100; - summary.overhead_pct = overhead_pct; - } -} - -bool -InferenceProfiler::AllMPIRanksAreStable(bool current_rank_stability) -{ - int world_size{mpi_driver_->MPICommSizeWorld()}; - std::vector stabilities_per_rank{}; - stabilities_per_rank.resize(world_size, 0); - int my_rank{mpi_driver_->MPICommRankWorld()}; - stabilities_per_rank[my_rank] = static_cast(current_rank_stability); - - for (int rank{0}; rank < world_size; rank++) { - mpi_driver_->MPIBcastIntWorld(stabilities_per_rank.data() + rank, 1, rank); - } - - bool all_stable{true}; - for (int rank{0}; rank < world_size; rank++) { - if (stabilities_per_rank[rank] == 0) { - all_stable = false; - break; - } - } - - if (verbose_ && all_stable) { - std::cout << "All models on all MPI ranks are stable" << std::endl; - } - - return all_stable; -} - -cb::Error -InferenceProfiler::MergeMetrics( - const std::vector>& all_metrics, - Metrics& merged_metrics) -{ - // Maps from each metric collection mapping gpu uuid to gpu utilization - std::vector>> - gpu_utilization_per_gpu_maps{}; - - // Maps from each metric collection mapping gpu uuid to gpu power usage - std::vector>> - gpu_power_usage_per_gpu_maps{}; - - // Maps from each metric collection mapping gpu uuid to gpu memory used bytes - std::vector>> - gpu_memory_used_bytes_per_gpu_maps{}; - - // Maps from each metric collection mapping gpu uuid to gpu memory total bytes - std::vector>> - gpu_memory_total_bytes_per_gpu_maps{}; - - // Put all metric maps in vector so they're easier to aggregate - std::for_each( - all_metrics.begin(), all_metrics.end(), - [&gpu_utilization_per_gpu_maps, &gpu_power_usage_per_gpu_maps, - &gpu_memory_used_bytes_per_gpu_maps, - &gpu_memory_total_bytes_per_gpu_maps]( - const std::reference_wrapper m) { - gpu_utilization_per_gpu_maps.push_back(m.get().gpu_utilization_per_gpu); - gpu_power_usage_per_gpu_maps.push_back(m.get().gpu_power_usage_per_gpu); - gpu_memory_used_bytes_per_gpu_maps.push_back( - m.get().gpu_memory_used_bytes_per_gpu); - gpu_memory_total_bytes_per_gpu_maps.push_back( - m.get().gpu_memory_total_bytes_per_gpu); - }); - - GetMetricAveragePerGPU( - gpu_utilization_per_gpu_maps, merged_metrics.gpu_utilization_per_gpu); - GetMetricAveragePerGPU( - gpu_power_usage_per_gpu_maps, merged_metrics.gpu_power_usage_per_gpu); - GetMetricMaxPerGPU( - gpu_memory_used_bytes_per_gpu_maps, - merged_metrics.gpu_memory_used_bytes_per_gpu); - GetMetricFirstPerGPU( - gpu_memory_total_bytes_per_gpu_maps, - merged_metrics.gpu_memory_total_bytes_per_gpu); - - return cb::Error::Success; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/inference_profiler.h b/src/c++/perf_analyzer/inference_profiler.h deleted file mode 100644 index a73651319..000000000 --- a/src/c++/perf_analyzer/inference_profiler.h +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "concurrency_manager.h" -#include "constants.h" -#include "custom_load_manager.h" -#include "metrics.h" -#include "metrics_manager.h" -#include "model_parser.h" -#include "mpi_utils.h" -#include "periodic_concurrency_manager.h" -#include "profile_data_collector.h" -#include "request_rate_manager.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockInferenceProfiler; -class TestInferenceProfiler; -class ModelParser; -#endif - -/// Constant parameters that determine the whether stopping criteria has met -/// for the current phase of testing -struct LoadParams { - // The number of measurements to account for during calculation of load - // status - uint32_t stability_window; - // The +/- range to account for while assessing load status - double stability_threshold; -}; - -/// Data structure to keep track of real-time load status and determine whether -/// stopping criteria has met for the current phase of testing. -struct LoadStatus { - // Stores the observations of infer_per_sec and latencies in a vector - std::vector infer_per_sec; - std::vector latencies; - // Records the average inference per second within the stability window - double avg_ips = 0; - // Stores the average latency within the stability window - uint64_t avg_latency = 0; -}; - -/// Configuration for the Measure function -struct MeasureConfig { - uint64_t measurement_window{0}; - bool is_count_based{false}; - bool clamp_window{false}; -}; - -// Holds the total of the timiming components of composing models of an -// ensemble. -struct EnsembleDurations { - EnsembleDurations() - : total_queue_time_avg_us(0), total_compute_time_avg_us(0), - total_cache_hit_time_avg_us(0), total_cache_miss_time_avg_us(0), - total_combined_cache_compute_time_avg_us(0) - { - } - uint64_t total_queue_time_avg_us; - uint64_t total_compute_time_avg_us; - // Time spent on cache lookups/copies for cache hits - uint64_t total_cache_hit_time_avg_us; - // Time spent on cache lookups/copies/insertions for cache misses - uint64_t total_cache_miss_time_avg_us; - - // Combined average of cache and compute times - uint64_t total_combined_cache_compute_time_avg_us; -}; - -/// Holds the server-side inference statisitcs of the target model and its -/// composing models -struct ServerSideStats { - uint64_t inference_count; - uint64_t execution_count; - uint64_t cache_hit_count; - uint64_t cache_miss_count; - uint64_t success_count; - uint64_t queue_count; - uint64_t compute_input_count; - uint64_t compute_infer_count; - uint64_t compute_output_count; - uint64_t cumm_time_ns; - uint64_t queue_time_ns; - uint64_t compute_input_time_ns; - uint64_t compute_infer_time_ns; - uint64_t compute_output_time_ns; - // Time spent on cache lookups/copies for cache hits - uint64_t cache_hit_time_ns; - // Time spent on cache lookups/copies/insertions for cache misses - uint64_t cache_miss_time_ns; - - std::map composing_models_stat; - // This function sets composing model server stats to 0 in case of a cache hit - // when top level response cache is enabled, since composing models are not - // executed and do not have any stats - void Reset() - { - inference_count = 0; - execution_count = 0; - success_count = 0; - queue_count = 0; - compute_input_count = 0; - compute_infer_count = 0; - compute_output_count = 0; - cumm_time_ns = 0; - queue_time_ns = 0; - compute_input_time_ns = 0; - compute_infer_time_ns = 0; - compute_output_time_ns = 0; - cache_hit_count = 0; - cache_hit_time_ns = 0; - cache_miss_count = 0; - cache_miss_time_ns = 0; - } -}; - -/// Holds the statistics recorded at the client side. -struct ClientSideStats { - // Request count and elapsed time measured by client - uint64_t request_count; - // Only record sequences that finish within the measurement window - uint64_t sequence_count; - // The number of requests that missed their schedule - uint64_t delayed_request_count; - // The number of responses - uint64_t response_count; - uint64_t duration_ns; - uint64_t avg_latency_ns; - // a ordered map of percentiles to be reported ( pair) - std::map percentile_latency_ns; - // List of all the valid latencies. - std::vector latencies; - // Using usec to avoid square of large number (large in nsec) - uint64_t std_us; - uint64_t avg_request_time_ns; - uint64_t avg_send_time_ns; - uint64_t avg_receive_time_ns; - // Per sec stat - double infer_per_sec; - double responses_per_sec; - double sequence_per_sec; - - // Completed request count reported by the client library - uint64_t completed_count; -}; - -/// The entire statistics record. -struct PerfStatus { - uint32_t concurrency; - double request_rate; - size_t batch_size; - ServerSideStats server_stats; - ClientSideStats client_stats; - std::vector metrics{}; - double overhead_pct; - bool on_sequence_model; - - // placeholder for the latency value that is used for conditional checking - uint64_t stabilizing_latency_ns; - // Metric for requests sent per second - double send_request_rate{0.0}; -}; - -cb::Error ReportPrometheusMetrics(const Metrics& metrics); - -//============================================================================== -/// A InferenceProfiler is a helper class that measures and summarizes the -/// inference statistic under different concurrency level. -/// -/// The profiler can adjust the number of concurrent requests by informing the -/// concurrency manager. And after the adjustment, the profiler will actively -/// collecting the statistic from both the concurrency manager and the inference -/// server directly until it is stable. Once stable, the profiler updates the -/// 'status_summary' based on the most recent measurement. -/// -/// The measurement procedure: -/// 1. The profiler gets start status from the server and records the start -/// time. -/// 2. After given time interval, the profiler gets end status from the server -/// and records the end time. -/// 3. The profiler obtains the request records recorded by concurrency manager, -/// and uses the request records that are recorded between start time and end -/// time to measure client side status and update status_summary. -/// -class InferenceProfiler { - public: - /// Create a profiler that collects and summarizes inference statistic. - /// \param verbose Whether to print verbose logging. - /// \param stability_threshold The range that the measurement is considered as - /// stable. i.e. within (1 +/- stability_threshold) * average value of the - /// last 3 measurements. The criteria are "infer per second" and "average - /// latency", or "infer per second" and "percentile latency" if valid - /// percentile is set (see 'percentile' below). - /// \param measurement_window_ms The duration of each measurement in msec. - /// \param max_trials The maximum number of attempts to obtain - /// stable measurement. - /// \param percentile The percentile in terms of latency to be reported. - /// if it is a valid percentile value, the percentile latency will reported - /// and used as stable criteria instead of average latency. If it is -1, - /// average latency will be reported and used as stable criteria. - /// \param latency_threshold_ms The threshold on the latency measurements in - /// microseconds. - /// \param parser The ModelParse object which holds all the details about the - /// model. - /// \param profile_backend The ClientBackend object used to communicate - /// with the server by profiler. - /// \param manager The LoadManager object that will produce load on the - /// server. - /// \param profiler Returns a new InferenceProfiler object. - /// \param measurement_request_count The number of requests to capture when - /// using "count_windows" mode. - /// \param measurement_mode The measurement mode to use for windows. - /// \param mpi_driver The driver class for MPI operations. - /// \param metrics_interval_ms The interval at which the server-side metrics - /// \param should_collect_metrics Whether server-side inference server metrics - /// should be collected. - /// \param overhead_pct_threshold User set threshold above which the PA - /// overhead is too significant to provide usable results. - /// \param collector Collector for the profile data from experiments - /// \param should_collect_profile_data Whether to collect profile data. - /// \return cb::Error object indicating success or failure. - static cb::Error Create( - const bool verbose, const double stability_threshold, - const uint64_t measurement_window_ms, const size_t max_trials, - const int64_t percentile, const uint64_t latency_threshold_ms, - const cb::ProtocolType protocol, std::shared_ptr& parser, - std::shared_ptr profile_backend, - std::unique_ptr manager, - std::unique_ptr* profiler, - uint64_t measurement_request_count, MeasurementMode measurement_mode, - std::shared_ptr mpi_driver, const uint64_t metrics_interval_ms, - const bool should_collect_metrics, const double overhead_pct_threshold, - const bool async_mode, - const std::shared_ptr collector, - const bool should_collect_profile_data); - - /// Performs the profiling on the given range with the given search algorithm. - /// For profiling using request rate invoke template with double, otherwise - /// invoke with size_t for concurrency search. - /// \param start The starting point of the search range. - /// \param end The ending point of the search range. - /// \param step The step size to move along the search range in linear search - /// or the precision in binary search. - /// \param search_mode The search algorithm to be applied. - /// \param request_count The number of requests to generate in each - /// experiment. If 0, then there is no limit, and it will generate until - /// stable. - /// \param summary Returns the trace of the measurement along the search path. - /// \return cb::Error object indicating success or failure. - template - cb::Error Profile( - const T start, const T end, const T step, const SearchMode search_mode, - const size_t request_count, std::vector& perf_statuses) - { - cb::Error err; - bool meets_threshold, is_stable; - if (search_mode == SearchMode::NONE) { - err = Profile(request_count, perf_statuses, meets_threshold, is_stable); - if (!err.IsOk()) { - return err; - } - } else if (search_mode == SearchMode::LINEAR) { - T current_value = start; - do { - err = Profile( - current_value, request_count, perf_statuses, meets_threshold, - is_stable); - if (!err.IsOk()) { - return err; - } - current_value += step; - } while (((current_value <= end) || (end == static_cast(NO_LIMIT))) && - (meets_threshold)); - // If there was only one concurrency we swept over and it did not meet the - // stability threshold, we should return an error. - if (current_value == (start + step) && is_stable == false) { - return cb::Error( - "Failed to obtain stable measurement.", pa::STABILITY_ERROR); - } - } else { - err = Profile( - start, request_count, perf_statuses, meets_threshold, is_stable); - if (!err.IsOk() || (!meets_threshold)) { - return err; - } - err = Profile( - end, request_count, perf_statuses, meets_threshold, is_stable); - if (!err.IsOk() || (meets_threshold)) { - return err; - } - - T this_start = start; - T this_end = end; - while ((this_end - this_start) > step) { - T current_value = (this_end + this_start) / 2; - err = Profile( - current_value, request_count, perf_statuses, meets_threshold, - is_stable); - if (!err.IsOk()) { - return err; - } - if (meets_threshold) { - this_start = current_value; - } else { - this_end = current_value; - } - } - } - return cb::Error::Success; - } - - cb::Error ProfilePeriodicConcurrencyMode() - { - auto& manager{dynamic_cast(*manager_)}; - std::vector request_records{manager.RunExperiment()}; - // FIXME - Refactor collector class to not need ID or window in the case of - // periodic concurrency mode - InferenceLoadMode id{1, 0.0}; - collector_->AddWindow(id, 0, UINT64_MAX); - collector_->AddData(id, std::move(request_records)); - return cb::Error::Success; - } - - bool IncludeServerStats() { return include_server_stats_; } - - private: - InferenceProfiler( - const bool verbose, const double stability_threshold, - const int32_t measurement_window_ms, const size_t max_trials, - const bool extra_percentile, const size_t percentile, - const uint64_t latency_threshold_ms, const cb::ProtocolType protocol, - std::shared_ptr& parser, - std::shared_ptr profile_backend, - std::unique_ptr manager, uint64_t measurement_request_count, - MeasurementMode measurement_mode, std::shared_ptr mpi_driver, - const uint64_t metrics_interval_ms, const bool should_collect_metrics, - const double overhead_pct_threshold, const bool async_mode, - const std::shared_ptr collector, - const bool should_collect_profile_data); - - /// Actively measure throughput in every 'measurement_window' msec until the - /// throughput is stable. Once the throughput is stable, it adds the - /// observations on summary trace and returns whether the setting met the - /// threshold. NOTE: the requests are being sent regardless of the - /// measurement, so the data returned by the server (see struct - /// PerforamnceStatusStruct) will include more requests than what the client - /// measures (we can't get the exact server status right before the first - /// request and right after the last request in the measurement window). - /// \param concurrent_request_count The concurrency level for the measurement. - /// \param perf_statuses Appends the measurements summary at the end of this - /// list. - /// \param request_count The number of requests to generate when profiling. If - /// 0, then there is no limit, and it will generate until stable. - /// \param meets_threshold Returns whether the setting meets the - /// threshold. - /// \param is_stable Returns whether the measurement is stable. - /// \return cb::Error object indicating success or failure. - cb::Error Profile( - const size_t concurrent_request_count, const size_t request_count, - std::vector& perf_statuses, bool& meets_threshold, - bool& is_stable); - - /// Similar to above function, but instead of setting the concurrency, it - /// sets the specified request rate for measurements. - /// \param request_rate The request rate for inferences. - /// \param request_count The number of requests to generate when profiling. If - /// 0, then there is no limit, and it will generate until stable. - /// \param perf_statuses Appends the measurements summary at the end of this - /// list. - /// \param meets_threshold Returns whether the setting meets the - /// threshold. - /// \param is_stable Returns whether the measurement is stable. - /// \return cb::Error object indicating success or failure. - cb::Error Profile( - const double request_rate, const size_t request_count, - std::vector& perf_statuses, bool& meets_threshold, - bool& is_stable); - - /// Measures throughput and latencies for custom load without controlling - /// request rate nor concurrency. Requires load manager to be loaded with - /// a file specifying the time intervals. - /// \param request_count The number of requests to generate when profiling. If - /// 0, then there is no limit, and it will generate until stable. - /// \param perf_statuses Appends the measurements summary at the end of this - /// list. - /// \param meets_threshold Returns whether the measurement met the - /// threshold. - /// \param is_stable Returns whether the measurement is stable. - /// \return cb::Error object indicating success - /// or failure. - cb::Error Profile( - const size_t request_count, std::vector& perf_statuses, - bool& meets_threshold, bool& is_stable); - - /// A helper function for profiling functions. - /// \param status_summary Returns the summary of the measurement. - /// \param request_count The number of requests to generate when profiling. If - /// 0, then there is no limit, and it will generate until stable. - /// \param is_stable Returns whether the measurement stabilized or not. - /// \return cb::Error object indicating success or failure. - cb::Error ProfileHelper( - PerfStatus& status_summary, size_t request_count, bool* is_stable); - - /// A helper function to determine if profiling is stable - /// \param load_status Stores the observations of infer_per_sec and latencies - /// \param check_latency Whether to check latency for stability - /// \return Returns if the threshold and latencies are stable. - bool DetermineStability(LoadStatus& load_status, bool check_latency = true); - - /// Check if latency at index idx is within the latency threshold - /// \param idx index in latency vector - /// \param load_status Stores the observations of infer_per_sec and latencies - /// \return Returns whether the latencies are below the max threshold - bool CheckWithinThreshold(size_t idx, LoadStatus& load_status); - - /// A helper function to determine if profiling is done - /// \param load_status Stores the observations of infer_per_sec and latencies - /// \param is_stable Returns whether the measurement stabilized or not. - /// \return Returns if we should break out of the infinite stability check - /// loop. - bool IsDoneProfiling(LoadStatus& load_status, bool* is_stable); - - /// Check if observed inferences and latencies are within threshold - /// for a single window starting at idx - /// \param idx index in latency vector - /// \param load_status Stores the observations of infer_per_sec and latencies - /// \param check_latency Whether to check latency for stability - /// \return Returns whether inference and latency are stable - bool CheckWindowForStability( - size_t idx, LoadStatus& load_status, bool check_latency); - - /// Check if observed inferences are within threshold - /// for a single window starting at idx - /// \param idx index in latency vector - /// \param load_status Stores the observations of infer_per_sec and latencies - /// \return Returns whether inference is stable - bool IsInferWindowStable(size_t idx, LoadStatus& load_status); - - /// Check if observed latencies are within threshold - /// for a single window starting at idx - /// \param idx index in latency vector - /// \param load_status Stores the observations of infer_per_sec and latencies - /// \return Returns whether latency is stable - bool IsLatencyWindowStable(size_t idx, LoadStatus& load_status); - - /// Helper function to perform measurement. - /// \param status_summary The summary of this measurement. - /// \param config The configuration for measurement. - /// \return cb::Error object indicating success or failure. - cb::Error Measure(PerfStatus& status_summary, MeasureConfig config); - - /// Gets the server side statistics - /// \param model_status Returns the status of the models provided by - /// the server. If the model being profiled is non-ensemble model, - /// only its status will be returned. Otherwise, the status of the composing - /// models will also be returned. - /// \return cb::Error object indicating success or failure. - cb::Error GetServerSideStatus( - std::map* model_status); - - /// Summarize the measurement with the provided statistics. - /// \param start_status The model status at the start of the measurement. - /// \param end_status The model status at the end of the measurement. - /// \param start_stat The accumulated context status at the start. - /// \param end_stat The accumulated context status at the end. - /// \param summary Returns the summary of the measurement. - /// \param window_start_ns The window start timestamp in nanoseconds. - /// \param window_end_ns The window end timestamp in nanoseconds. - /// \param clamp_window If true, the actual window range is reduced to the - /// start of the first request to the final response. - /// \return cb::Error object indicating success or failure. - cb::Error Summarize( - const std::map& start_status, - const std::map& end_status, - const cb::InferStat& start_stat, const cb::InferStat& end_stat, - PerfStatus& summary, uint64_t window_start_ns, uint64_t window_end_ns, - bool clamp_window); - - /// \param valid_range The start and end timestamp of the measurement window. - /// \param valid_sequence_count Returns the number of completed sequences - /// during the measurement. A sequence is a set of correlated requests sent to - /// sequence model. - /// \param latencies Returns the vector of request latencies where the - /// requests are completed within the measurement window. - /// \param response_count Returns the number of responses - /// \param valid_requests Returns a vector of valid request records - virtual void ValidLatencyMeasurement( - const std::pair& valid_range, - size_t& valid_sequence_count, size_t& delayed_request_count, - std::vector* latencies, size_t& response_count, - std::vector& valid_requests); - - /// Clamp a window around a set of requests, from the earliest start time to - /// the latest response - /// \param requests A vector of requests to clamp the window around. - /// \return std::pair object containing of the window. - std::pair ClampWindow( - std::vector& requests); - - /// Add the data from the request records to the Raw Data Collector - /// \param perf_status PerfStatus of the current measurement - /// \param window_start_ns The window start timestamp in nanoseconds. - /// \param window_end_ns The window end timestamp in nanoseconds. - /// \param request_records The request records to collect. - void CollectData( - PerfStatus& perf_status, uint64_t window_start_ns, uint64_t window_end_ns, - std::vector&& request_records); - - /// \param latencies The vector of request latencies collected. - /// \param summary Returns the summary that the latency related fields are - /// set. - /// \return cb::Error object indicating success or failure. - virtual cb::Error SummarizeLatency( - const std::vector& latencies, PerfStatus& summary); - - /// \param latencies The vector of request latencies collected. - /// \return std::tuple object containing: - /// * mean of latencies in nanoseconds - /// * sample standard deviation of latencies in microseconds - std::tuple GetMeanAndStdDev( - const std::vector& latencies); - - /// \param start_stat The accumulated client statistics at the start. - /// \param end_stat The accumulated client statistics at the end. - /// \param duration_ns The duration of the measurement in nsec. - /// \param valid_request_count The number of completed requests recorded. - /// \param valid_sequence_count The number of completed sequences recorded. - /// \param delayed_request_count The number of requests that missed their - /// schedule. - /// \param response_count The number of responses. - /// \param summary Returns the summary that the fields recorded by - /// client are set. - /// \return cb::Error object indicating success or failure. - virtual cb::Error SummarizeClientStat( - const cb::InferStat& start_stat, const cb::InferStat& end_stat, - const uint64_t duration_ns, const size_t valid_request_count, - const size_t delayed_request_count, const size_t valid_sequence_count, - const size_t response_count, PerfStatus& summary); - - /// Adds the send request rate metric to the summary object. - /// \param window_duration_s The duration of the window in seconds. - /// \param num_sent_requests The number of requests sent during the last - /// window. - /// \param summary The summary object to be updated with the send request rate - /// metric. - void SummarizeSendRequestRate( - const double window_duration_s, const size_t num_sent_requests, - PerfStatus& summary); - - /// Given a model_identifier to gather stats for, and a map of ALL stats, - /// determine which version of the model should be gathered - /// \param model_identifier A pair of model_name and model_version to identify - /// a specific model - /// \param start_stats The stats for all models at the start of the - /// measurement - /// \param end_stats The stats for all models at the end of the measurement - /// \param model_version The determined model version - - cb::Error DetermineStatsModelVersion( - const cb::ModelIdentifier& model_identifier, - const std::map& start_stats, - const std::map& end_stats, - int64_t* model_version); - -#ifndef DOCTEST_CONFIG_DISABLE - cb::Error SetTopLevelResponseCaching(bool enable_top_level_request_caching); -#endif - - /// \param start_status The model status at the start of the measurement. - /// \param end_status The model status at the end of the measurement. - /// \param server_stats Returns the summary that the fields recorded by server - /// are set. - /// \return cb::Error object indicating success or failure. - cb::Error SummarizeServerStats( - const std::map& start_status, - const std::map& end_status, - ServerSideStats* server_stats); - - /// \param model_identifier A pair of model_name and model_version to identify - /// a specific model. - /// \param start_status The model status at the start of the measurement. - /// \param end_status The model status at the end of the measurement. - /// \param server_stats Returns the summary that the fields recorded by server - /// are set. - /// \return cb::Error object indicating success or failure. - cb::Error SummarizeServerStats( - const cb::ModelIdentifier& model_identifier, - const std::map& start_status, - const std::map& end_status, - ServerSideStats* server_stats); - - /// \param model_identifier A pair of model_name and model_version to identify - /// a specific model. - /// \param start_status The model status at the start of the measurement. - /// \param end_status The model status at the end of the measurement. - /// \param server_stats Returns the summary that the fields recorded by server - /// are set. - /// \return cb::Error object indicating success or failure. - cb::Error SummarizeServerStatsHelper( - const cb::ModelIdentifier& model_identifier, - const std::map& start_status, - const std::map& end_status, - ServerSideStats* server_stats); - - /// Calculate the overhead and put the results into the summary - /// - /// \param window_duration_ns The duration of the window - /// \param idle_ns The average worker idle time during the window - /// \param summary The summary object to be updated with overhead stats - /// - void SummarizeOverhead( - const uint64_t window_duration_ns, const uint64_t idle_ns, - PerfStatus& summary); - - /// Returns true if all MPI ranks (models) are stable. Should only be run if - /// and only if IsMPIRun() returns true. - /// \param current_rank_stability The stability of the current rank. - /// \return True if all MPI ranks are stable. - bool AllMPIRanksAreStable(bool current_rank_stability); - - /// Merge individual perf status reports into a single perf status. This - /// function is used to merge the results from multiple Measure runs into a - /// single report. - /// \param perf_status List of perf status reports to be merged. - /// \param summary_status Final merged summary status. - /// \return cb::Error object indicating success or failure. - virtual cb::Error MergePerfStatusReports( - std::deque& perf_status, PerfStatus& summary_status); - - /// Merge individual server side statistics into a single server side report. - /// \param server_side_stats List of server side statistics reports to be - /// merged. - /// \param server_side_summary Final merged summary status. - /// \return cb::Error object indicating success or failure. - virtual cb::Error MergeServerSideStats( - std::vector& server_side_stats, - ServerSideStats& server_side_summary); - - /// \param all_metrics Individual metrics from all intervals from stable - /// passes. - /// \param merged_metrics Output merged metrics from all intervals from stable - /// passes. - /// \return cb::Error object indicating success or failure. - cb::Error MergeMetrics( - const std::vector>& all_metrics, - Metrics& merged_metrics); - - template - void GetMetricAveragePerGPU( - const std::vector>>& - input_metric_maps, - std::map& output_metric_map) - { - std::map metric_count_per_gpu{}; - - for (const auto& input_metric_map : input_metric_maps) { - for (const auto& input_metric : input_metric_map.get()) { - const auto& gpu_uuid{input_metric.first}; - const auto& metric{input_metric.second}; - - if (output_metric_map.find(gpu_uuid) == output_metric_map.end()) { - output_metric_map[gpu_uuid] = 0; - metric_count_per_gpu[gpu_uuid] = 0; - } - - output_metric_map[gpu_uuid] += metric; - metric_count_per_gpu[gpu_uuid]++; - } - } - - for (auto& output_metric : output_metric_map) { - const auto& gpu_uuid{output_metric.first}; - auto& metric{output_metric.second}; - const auto& metric_count{metric_count_per_gpu[gpu_uuid]}; - if (metric_count > 0) { - metric /= metric_count; - } - } - } - - template - void GetMetricMaxPerGPU( - const std::vector>>& - input_metric_maps, - std::map& output_metric_map) - { - for (const auto& input_metric_map : input_metric_maps) { - for (const auto& input_metric : input_metric_map.get()) { - const auto& gpu_uuid{input_metric.first}; - const auto& metric{input_metric.second}; - - if (output_metric_map.find(gpu_uuid) == output_metric_map.end()) { - output_metric_map[gpu_uuid] = 0; - } - - output_metric_map[gpu_uuid] = - std::max(output_metric_map[gpu_uuid], metric); - } - } - } - - template - void GetMetricFirstPerGPU( - const std::vector>>& - input_metric_maps, - std::map& output_metric_map) - { - for (const auto& input_metric_map : input_metric_maps) { - for (const auto& input_metric : input_metric_map.get()) { - const auto& gpu_uuid{input_metric.first}; - const auto& metric{input_metric.second}; - - if (output_metric_map.find(gpu_uuid) == output_metric_map.end()) { - output_metric_map[gpu_uuid] = metric; - } - } - } - } - - bool verbose_; - uint64_t measurement_window_ms_; - uint64_t measurement_request_count_; - MeasurementMode measurement_mode_; - size_t max_trials_; - bool extra_percentile_; - size_t percentile_; - uint64_t latency_threshold_ms_; - - cb::ProtocolType protocol_; - std::string model_name_; - int64_t model_version_; - - std::shared_ptr parser_; - std::shared_ptr profile_backend_; - std::unique_ptr manager_; - std::shared_ptr collector_; - LoadParams load_parameters_; - - bool include_lib_stats_; - bool include_server_stats_; - std::shared_ptr mpi_driver_; - - /// The request records of the requests completed during all measurements - std::vector all_request_records_; - - /// The end time of the previous measurement window - uint64_t previous_window_end_ns_; - - /// Server side statistics from the previous measurement window - std::map prev_server_side_stats_; - - /// Client side statistics from the previous measurement window - cb::InferStat prev_client_side_stats_; - - /// Metrics manager that collects server-side metrics periodically - std::shared_ptr metrics_manager_{nullptr}; - - /// Whether server-side inference server metrics should be collected. - bool should_collect_metrics_{false}; - - /// User set threshold above which the PA overhead is too significant to - /// provide usable results. - const double overhead_pct_threshold_{0.0}; - - // Whether to collect profile data. - bool should_collect_profile_data_{false}; - - // Whether the client is operating in async mode. - const bool async_mode_{false}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockInferenceProfiler; - friend TestInferenceProfiler; - friend ModelParser; - - public: - InferenceProfiler() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/ischeduler.h b/src/c++/perf_analyzer/ischeduler.h deleted file mode 100644 index a854b64b4..000000000 --- a/src/c++/perf_analyzer/ischeduler.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include "rate_schedule.h" - -namespace triton { namespace perfanalyzer { - -/// Interface for worker threads that use a schedule -/// -class IScheduler { - public: - /// Provides the schedule that should be followed - /// - virtual void SetSchedule(RateSchedulePtr_t schedule) = 0; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/iworker.h b/src/c++/perf_analyzer/iworker.h deleted file mode 100644 index 3a72f4c10..000000000 --- a/src/c++/perf_analyzer/iworker.h +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -namespace triton { namespace perfanalyzer { - -/// Interface for worker threads that generate inference requests -/// -class IWorker { - public: - virtual void Infer() = 0; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/load_manager.cc b/src/c++/perf_analyzer/load_manager.cc deleted file mode 100644 index 1f648a7f4..000000000 --- a/src/c++/perf_analyzer/load_manager.cc +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "load_manager.h" - -#include - -#include "client_backend/client_backend.h" -#include "infer_data_manager_factory.h" - -namespace triton { namespace perfanalyzer { - - -cb::Error -LoadManager::CheckHealth() -{ - // Check thread status to make sure that the load setting is - // consistent to the one being reported - // If some thread return early, main thread will return and - // the worker thread's error message will be reported - // when derived class destructor gets called. - for (auto& thread_stat : threads_stat_) { - if (!thread_stat->status_.IsOk()) { - return cb::Error( - "Failed to maintain requested inference load." - " Worker thread(s) failed to generate concurrent requests.", - pa::GENERIC_ERROR); - } - if (!thread_stat->cb_status_.IsOk()) { - return cb::Error( - "Failed to retrieve results from inference request.", - pa::GENERIC_ERROR); - } - } - return cb::Error::Success; -} - -cb::Error -LoadManager::SwapRequestRecords(std::vector& new_request_records) -{ - std::vector total_request_records; - // Gather request records with proper locking from all the worker threads - for (auto& thread_stat : threads_stat_) { - std::lock_guard lock(thread_stat->mu_); - total_request_records.insert( - total_request_records.end(), thread_stat->request_records_.begin(), - thread_stat->request_records_.end()); - thread_stat->request_records_.clear(); - } - // Swap the results - total_request_records.swap(new_request_records); - return cb::Error::Success; -} - -uint64_t -LoadManager::CountCollectedRequests() -{ - uint64_t num_of_requests = 0; - for (auto& thread_stat : threads_stat_) { - std::lock_guard lock(thread_stat->mu_); - num_of_requests += thread_stat->request_records_.size(); - } - return num_of_requests; -} - -cb::Error -LoadManager::GetAccumulatedClientStat(cb::InferStat* contexts_stat) -{ - contexts_stat->completed_request_count = 0; - contexts_stat->cumulative_receive_time_ns = 0; - contexts_stat->cumulative_send_time_ns = 0; - contexts_stat->cumulative_total_request_time_ns = 0; - - for (auto& thread_stat : threads_stat_) { - std::lock_guard lock(thread_stat->mu_); - for (auto& context_stat : thread_stat->contexts_stat_) { - contexts_stat->completed_request_count += - context_stat.completed_request_count; - contexts_stat->cumulative_total_request_time_ns += - context_stat.cumulative_total_request_time_ns; - contexts_stat->cumulative_send_time_ns += - context_stat.cumulative_send_time_ns; - contexts_stat->cumulative_receive_time_ns += - context_stat.cumulative_receive_time_ns; - } - } - return cb::Error::Success; -} - -uint64_t -LoadManager::GetIdleTime() -{ - uint64_t total{0}; - size_t num_active_threads = 0; - for (auto& thread_stat : threads_stat_) { - std::lock_guard lock(thread_stat->mu_); - uint64_t idle_time = thread_stat->idle_timer.GetIdleTime(); - if (idle_time) { - total += idle_time; - num_active_threads++; - } - } - - // TODO REFACTOR TMA-1043 InferDataManager should have an API to get - // num_active_threads. This method of determining active threads isn't fully - // accurate - if (num_active_threads) { - total /= num_active_threads; - } - - return total; -} - -void -LoadManager::ResetIdleTime() -{ - for (auto& thread_stat : threads_stat_) { - std::lock_guard lock(thread_stat->mu_); - thread_stat->idle_timer.Reset(); - } -} - -const size_t -LoadManager::GetAndResetNumSentRequests() -{ - size_t num_sent_requests{0}; - - for (auto& thread_stat : threads_stat_) { - num_sent_requests += thread_stat->num_sent_requests_; - thread_stat->num_sent_requests_ = 0; - } - - return num_sent_requests; -} - -LoadManager::LoadManager( - const bool async, const bool streaming, const int32_t batch_size, - const size_t max_threads, const SharedMemoryType shared_memory_type, - const size_t output_shm_size, const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters) - : async_(async), streaming_(streaming), batch_size_(batch_size), - max_threads_(max_threads), parser_(parser), factory_(factory), - using_json_data_(false) -{ - on_sequence_model_ = - ((parser_->SchedulerType() == ModelParser::SEQUENCE) || - (parser_->SchedulerType() == ModelParser::ENSEMBLE_SEQUENCE)); - - data_loader_.reset(new DataLoader(batch_size_)); - - infer_data_manager_ = InferDataManagerFactory::CreateInferDataManager( - max_threads, batch_size, shared_memory_type, output_shm_size, - request_parameters, parser, factory, data_loader_); -} - -void -LoadManager::InitManager( - const size_t string_length, const std::string& string_data, - const bool zero_input, std::vector& user_data, - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation) -{ - // Note, this is already caught by the CLI, but adding it here for extra - // protection - if (on_sequence_model_ && batch_size_ > 1) { - throw PerfAnalyzerException( - "error: sequence models do not support batching", GENERIC_ERROR); - } - - auto status = - InitManagerInputs(string_length, string_data, zero_input, user_data); - THROW_IF_ERROR(status, "Failed to init manager inputs"); - - THROW_IF_ERROR( - infer_data_manager_->Init(), "Unable to init infer data manager"); - - sequence_manager_ = MakeSequenceManager( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data_, - data_loader_); - - InitManagerFinalize(); -} - -cb::Error -LoadManager::InitManagerInputs( - const size_t string_length, const std::string& string_data, - const bool zero_input, std::vector& user_data) -{ - RETURN_IF_ERROR(factory_->CreateClientBackend(&backend_)); - - // Read provided data - if (!user_data.empty()) { - if (IsDirectory(user_data[0])) { - RETURN_IF_ERROR(data_loader_->ValidateIOExistsInModel( - parser_->Inputs(), parser_->Outputs(), user_data[0])); - RETURN_IF_ERROR(data_loader_->ReadDataFromDir( - parser_->Inputs(), parser_->Outputs(), user_data[0])); - } else { - using_json_data_ = true; - for (const auto& json_file : user_data) { - RETURN_IF_ERROR(data_loader_->ReadDataFromJSON( - parser_->Inputs(), parser_->Outputs(), json_file)); - } - std::cout << " Successfully read data for " - << data_loader_->GetDataStreamsCount() << " stream/streams"; - if (data_loader_->GetDataStreamsCount() == 1) { - std::cout << " with " << data_loader_->GetTotalSteps(0) - << " step/steps"; - } - std::cout << "." << std::endl; - } - } else { - RETURN_IF_ERROR(data_loader_->GenerateData( - parser_->Inputs(), zero_input, string_length, string_data)); - } - - // Reserve the required vector space - threads_stat_.reserve(max_threads_); - - return cb::Error::Success; -} - -void -LoadManager::StopWorkerThreads() -{ - early_exit = true; - // wake up all threads - wake_signal_.notify_all(); - - size_t cnt = 0; - for (auto& thread : threads_) { - thread.join(); - if (!threads_stat_[cnt]->status_.IsOk()) { - std::cerr << "Thread [" << cnt - << "] had error: " << (threads_stat_[cnt]->status_) - << std::endl; - } - if (!threads_stat_[cnt]->cb_status_.IsOk()) { - std::cerr << "Thread [" << cnt - << "] had error: " << (threads_stat_[cnt]->cb_status_) - << std::endl; - } - cnt++; - } - threads_.clear(); -} - -std::shared_ptr -LoadManager::MakeSequenceManager( - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation, const bool using_json_data, - std::shared_ptr data_loader) -{ - return std::make_shared( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/load_manager.h b/src/c++/perf_analyzer/load_manager.h deleted file mode 100644 index 799bfa75f..000000000 --- a/src/c++/perf_analyzer/load_manager.h +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "data_loader.h" -#include "iinfer_data_manager.h" -#include "load_worker.h" -#include "perf_utils.h" -#include "sequence_manager.h" - -namespace triton { namespace perfanalyzer { - - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockLoadManager; -#endif - -class LoadManager { - public: - virtual ~LoadManager() = default; - - /// Initialize the Manager class to set up shared memory and inputs - /// \param string_length The length of the random strings to be generated - /// for string inputs. - /// \param string_data The string to be used as string inputs for model. - /// \param zero_input Whether to use zero for model inputs. - /// \param user_data The vector containing path/paths to user-provided data - /// that can be a directory or path to a json data file. - /// \param start_sequence_id The starting sequence ID to be used for iterating - /// through valid sequence IDs. - /// \param sequence_id_range The maximum sequence ID to be used for iterating - /// through valid sequence IDs. - /// \param sequence_length The base length of new sequences. - /// \param sequence_length_specified Whether the user specified the sequence - /// length. - /// \param sequence_length_variation The percentage variation in length of - /// sequences using autogenerated data as input. - void InitManager( - const size_t string_length, const std::string& string_data, - const bool zero_input, std::vector& user_data, - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation); - - /// Check if the load manager is working as expected. - /// \return cb::Error object indicating success or failure. - cb::Error CheckHealth(); - - /// Swap the content of the request records vector recorded by the load - /// manager with a new request records vector - /// \param new_request_records The request records vector to be swapped. - /// \return cb::Error object indicating success or failure. - cb::Error SwapRequestRecords(std::vector& new_request_records); - - /// Get the sum of all contexts' stat - /// \param contexts_stat Returned the accumulated stat from all contexts - /// in load manager - cb::Error GetAccumulatedClientStat(cb::InferStat* contexts_stat); - - /// Returns the amount of valid time each worker thread has averaged in - /// nanoseconds - /// - uint64_t GetIdleTime(); - - /// Resets the counter for tracking valid time - /// - void ResetIdleTime(); - - /// Calculates and returns the total number of sent requests across all - /// threads. Resets individual number of sent requests per thread. - /// \return The total number of sent requests across all threads. - const size_t GetAndResetNumSentRequests(); - - /// \return the batch size used for the inference requests - virtual size_t BatchSize() const { return batch_size_; } - - /// Count the number of requests collected until now. - uint64_t CountCollectedRequests(); - - protected: - LoadManager( - const bool async, const bool streaming, const int32_t batch_size, - const size_t max_threads, const SharedMemoryType shared_memory_type, - const size_t output_shm_size, const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters); - - /// Complete any subclass-specific manager initialization tasks. - virtual void InitManagerFinalize() {} - - /// Helper function to retrieve the input data for the inferences - /// \param string_length The length of the random strings to be generated - /// for string inputs. - /// \param string_data The string to be used as string inputs for model. - /// \param zero_input Whether to use zero for model inputs. - /// \param user_data The vector containing path/paths to user-provided data - /// that can be a directory or path to a json data file. - /// \return cb::Error object indicating success or failure. - cb::Error InitManagerInputs( - const size_t string_length, const std::string& string_data, - const bool zero_input, std::vector& user_data); - - /// Stops all the worker threads generating the request load. - void StopWorkerThreads(); - - protected: - bool async_; - bool streaming_; - size_t batch_size_; - size_t max_threads_; - bool on_sequence_model_; - - std::shared_ptr parser_; - std::shared_ptr factory_; - - bool using_json_data_; - - std::shared_ptr data_loader_; - std::unique_ptr backend_; - std::shared_ptr infer_data_manager_; - - // Track the workers so they all go out of scope at the - // same time - std::vector> workers_; - - // Worker threads that loads the server with inferences - std::vector threads_; - // Contains the statistics on the current working threads - std::vector> threads_stat_; - - // Use condition variable to pause/continue worker threads - std::condition_variable wake_signal_; - std::mutex wake_mutex_; - - std::shared_ptr sequence_manager_{nullptr}; - - virtual std::shared_ptr MakeSequenceManager( - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation, const bool using_json_data, - std::shared_ptr data_loader); - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockLoadManager; - - public: - LoadManager() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/load_worker.cc b/src/c++/perf_analyzer/load_worker.cc deleted file mode 100644 index a32976c6a..000000000 --- a/src/c++/perf_analyzer/load_worker.cc +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "load_worker.h" - -#include -#include - -#include "client_backend/client_backend.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -bool -LoadWorker::ShouldExit() -{ - bool bad_status = - !thread_stat_->cb_status_.IsOk() || !thread_stat_->status_.IsOk(); - - bool done_with_request_count = - thread_config_->num_requests_ != 0 && - thread_stat_->num_sent_requests_ >= thread_config_->num_requests_; - - return early_exit || bad_status || done_with_request_count; -} - -bool -LoadWorker::HandleExitConditions() -{ - if (ShouldExit()) { - CompleteOngoingSequences(); - thread_stat_->idle_timer.Start(); - WaitForOngoingRequests(); - return true; - } - return false; -} - -void -LoadWorker::CompleteOngoingSequences() -{ - if (on_sequence_model_) { - for (size_t ctx_id = 0; ctx_id < ctxs_.size(); ++ctx_id) { - size_t seq_stat_index = GetSeqStatIndex(ctx_id); - ctxs_[ctx_id]->CompleteOngoingSequence(seq_stat_index); - } - } -} - -void -LoadWorker::WaitForOngoingRequests() -{ - while (GetNumOngoingRequests() != 0) { - std::this_thread::sleep_for(std::chrono::milliseconds(50)); - } -} - -uint -LoadWorker::GetNumOngoingRequests() -{ - uint num = 0; - for (auto ctx : ctxs_) { - num += ctx->GetNumOngoingRequests(); - } - return num; -} - -void -LoadWorker::CreateContext() -{ - auto ctx = CreateInferContext(); - ctx->Init(); - CreateContextFinalize(ctx); - ctxs_.push_back(ctx); -} - -uint32_t -LoadWorker::GetCtxId() -{ - std::lock_guard lk(cb_mtx_); - return ctx_id_tracker_->Get(); -} - - -void -LoadWorker::RestoreFreeCtxId(uint32_t ctx_id) -{ - if (!async_) { - { - std::lock_guard lock(cb_mtx_); - ctx_id_tracker_->Restore(ctx_id); - } - } -} - -void -LoadWorker::AsyncCallbackFinalize(uint32_t ctx_id) -{ - // avoid competition over 'cb_mtx_' - { - std::lock_guard lk(cb_mtx_); - ctx_id_tracker_->Restore(ctx_id); - notified_ = true; - } - - cb_cv_.notify_all(); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/load_worker.h b/src/c++/perf_analyzer/load_worker.h deleted file mode 100644 index dd7e0297f..000000000 --- a/src/c++/perf_analyzer/load_worker.h +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include - -#include "ctx_id_tracker_factory.h" -#include "data_loader.h" -#include "infer_context.h" -#include "iworker.h" -#include "model_parser.h" -#include "sequence_manager.h" -#include "thread_config.h" - -namespace triton { namespace perfanalyzer { - -/// Abstract base class for worker threads -/// -class LoadWorker : public IWorker { - protected: - LoadWorker( - uint32_t id, std::shared_ptr thread_stat, - std::shared_ptr thread_config, - const std::shared_ptr parser, - std::shared_ptr data_loader, - const std::shared_ptr factory, - const bool on_sequence_model, const bool async, const bool streaming, - const int32_t batch_size, const bool using_json_data, - std::condition_variable& wake_signal, std::mutex& wake_mutex, - bool& execute, - const std::shared_ptr& infer_data_manager, - std::shared_ptr sequence_manager) - : id_(id), thread_stat_(thread_stat), thread_config_(thread_config), - parser_(parser), data_loader_(data_loader), factory_(factory), - on_sequence_model_(on_sequence_model), async_(async), - streaming_(streaming), batch_size_(batch_size), - using_json_data_(using_json_data), wake_signal_(wake_signal), - wake_mutex_(wake_mutex), execute_(execute), - infer_data_manager_(infer_data_manager), - sequence_manager_(sequence_manager) - { - } - - virtual ~LoadWorker() = default; - - protected: - // Return the total number of async requests that have started and not - // finished - uint GetNumOngoingRequests(); - - void SendInferRequest(uint32_t ctx_id, bool delayed = false) - { - if (ShouldExit()) { - return; - } - - if (on_sequence_model_) { - uint32_t seq_stat_index = GetSeqStatIndex(ctx_id); - ctxs_[ctx_id]->SendSequenceInferRequest(seq_stat_index, delayed); - } else { - ctxs_[ctx_id]->SendInferRequest(delayed); - } - } - - virtual std::shared_ptr CreateInferContext() - { - return std::make_shared( - id_, ctxs_.size(), async_, streaming_, on_sequence_model_, - using_json_data_, batch_size_, thread_stat_, data_loader_, parser_, - factory_, execute_, infer_data_manager_, sequence_manager_); - } - - // Create an inference context and add it to ctxs_ - virtual void CreateContext(); - - // Any code that needs to execute after the Context has been created - virtual void CreateContextFinalize(std::shared_ptr ctx) = 0; - - // Detect the cases where this thread needs to exit - bool ShouldExit(); - - // Detect and handle the case where this thread needs to exit - // Returns true if an exit condition was met - bool HandleExitConditions(); - void CompleteOngoingSequences(); - void WaitForOngoingRequests(); - - virtual uint32_t GetSeqStatIndex(uint32_t ctx_id) = 0; - uint32_t GetCtxId(); - void RestoreFreeCtxId(uint32_t ctx_id); - - void AsyncCallbackFinalize(uint32_t ctx_id); - - uint32_t id_; - - std::vector> ctxs_; - std::shared_ptr ctx_id_tracker_; - - // Variables used to signal async request completion - bool notified_ = false; - std::mutex cb_mtx_; - std::condition_variable cb_cv_; - - // TODO REFACTOR TMA-1017 is there a better way to do threading than to pass - // the same cv/mutex into every thread by reference? Used to wake up this - // thread if it has been put to sleep - std::condition_variable& wake_signal_; - std::mutex& wake_mutex_; - - // TODO REFACTOR TMA-1017 is there a better way to communicate this than a - // shared bool reference? Used to pause execution of this thread - bool& execute_; - - // Stats for this thread - std::shared_ptr thread_stat_; - // Configuration for this thread - std::shared_ptr thread_config_; - - std::shared_ptr data_loader_; - const std::shared_ptr parser_; - const std::shared_ptr factory_; - const std::shared_ptr infer_data_manager_; - - const bool on_sequence_model_; - const bool async_; - const bool streaming_; - const int32_t batch_size_; - const bool using_json_data_; - - std::shared_ptr sequence_manager_{nullptr}; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/main.cc b/src/c++/perf_analyzer/main.cc deleted file mode 100644 index bf5176294..000000000 --- a/src/c++/perf_analyzer/main.cc +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "perf_analyzer.h" -#include "perf_analyzer_exception.h" - -namespace pa = triton::perfanalyzer; - -int -main(int argc, char* argv[]) -{ - try { - triton::perfanalyzer::CLParser clp; - pa::PAParamsPtr params = clp.Parse(argc, argv); - - PerfAnalyzer analyzer(params); - analyzer.Run(); - } - catch (pa::PerfAnalyzerException& e) { - std::cerr << e.what() << std::endl; - return e.GetError(); - } - - return 0; -} diff --git a/src/c++/perf_analyzer/metrics.h b/src/c++/perf_analyzer/metrics.h deleted file mode 100644 index 8fbb7584c..000000000 --- a/src/c++/perf_analyzer/metrics.h +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include - -namespace triton { namespace perfanalyzer { - -/// Struct that holds server-side metrics for the inference server. -/// The keys for each map are GPU UUIDs and the values are described in the -/// variable names. -struct Metrics { - std::map gpu_utilization_per_gpu{}; - std::map gpu_power_usage_per_gpu{}; - std::map gpu_memory_used_bytes_per_gpu{}; - std::map gpu_memory_total_bytes_per_gpu{}; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/metrics_manager.cc b/src/c++/perf_analyzer/metrics_manager.cc deleted file mode 100644 index 0e1262ce3..000000000 --- a/src/c++/perf_analyzer/metrics_manager.cc +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "metrics_manager.h" - -#include -#include -#include - -#include "constants.h" -#include "perf_analyzer_exception.h" - -namespace triton { namespace perfanalyzer { - -MetricsManager::MetricsManager( - std::shared_ptr client_backend, - uint64_t metrics_interval_ms) - : client_backend_(client_backend), metrics_interval_ms_(metrics_interval_ms) -{ -} - -MetricsManager::~MetricsManager() -{ - if (query_loop_future_.valid()) { - StopQueryingMetrics(); - } -} - -void -MetricsManager::StartQueryingMetrics() -{ - should_keep_querying_ = true; - query_loop_future_ = - std::async(&MetricsManager::QueryMetricsEveryNMilliseconds, this); -} - -void -MetricsManager::QueryMetricsEveryNMilliseconds() -{ - while (should_keep_querying_) { - const auto& start{std::chrono::system_clock::now()}; - - Metrics metrics{}; - clientbackend::Error err{client_backend_->Metrics(metrics)}; - if (err.IsOk() == false) { - throw PerfAnalyzerException(err.Message(), err.Err()); - } - - CheckForMissingMetrics(metrics); - - { - std::lock_guard metrics_lock{metrics_mutex_}; - metrics_.push_back(std::move(metrics)); - } - - const auto& end{std::chrono::system_clock::now()}; - const auto& duration{end - start}; - const auto& remainder{ - std::chrono::milliseconds(metrics_interval_ms_) - duration}; - - CheckForMetricIntervalTooShort(remainder, duration); - - { - std::unique_lock query_loop_lock{query_loop_mutex_}; - query_loop_cv_.wait_for(query_loop_lock, remainder); - } - } -} - -void -MetricsManager::CheckForMissingMetrics(const Metrics& metrics) -{ - if (has_given_missing_metrics_warning_) { - return; - } - if (metrics.gpu_utilization_per_gpu.empty()) { - std::cerr << "WARNING: Unable to parse 'nv_gpu_utilization' metric." - << std::endl; - has_given_missing_metrics_warning_ = true; - } - if (metrics.gpu_power_usage_per_gpu.empty()) { - std::cerr << "WARNING: Unable to parse 'nv_gpu_power_usage' metric." - << std::endl; - has_given_missing_metrics_warning_ = true; - } - if (metrics.gpu_memory_used_bytes_per_gpu.empty()) { - std::cerr << "WARNING: Unable to parse 'nv_gpu_memory_used_bytes' metric." - << std::endl; - has_given_missing_metrics_warning_ = true; - } - if (metrics.gpu_memory_total_bytes_per_gpu.empty()) { - std::cerr << "WARNING: Unable to parse 'nv_gpu_memory_total_bytes' metric." - << std::endl; - has_given_missing_metrics_warning_ = true; - } -} - -void -MetricsManager::CheckForMetricIntervalTooShort( - const std::chrono::nanoseconds& remainder, - const std::chrono::nanoseconds& duration) -{ - if (has_given_metric_interval_warning_) { - return; - } - if (remainder < std::chrono::nanoseconds::zero()) { - std::cerr << "WARNING: Triton metrics endpoint latency (" - << std::chrono::duration_cast(duration) - .count() - << "ms) is larger than the querying interval (" - << metrics_interval_ms_ - << "ms). Please try a larger querying interval " - "via `--triton-metrics-interval`." - << std::endl; - has_given_metric_interval_warning_ = true; - } -} - -void -MetricsManager::CheckQueryingStatus() -{ - if (query_loop_future_.valid() && - query_loop_future_.wait_for(std::chrono::seconds(0)) == - std::future_status::ready) { - query_loop_future_.get(); - } -} - -void -MetricsManager::GetLatestMetrics(std::vector& metrics) -{ - if (metrics.empty() == false) { - throw PerfAnalyzerException( - "MetricsManager::GetLatestMetrics() must be passed an empty vector.", - GENERIC_ERROR); - } - std::lock_guard metrics_lock{metrics_mutex_}; - metrics_.swap(metrics); -} - -void -MetricsManager::StopQueryingMetrics() -{ - should_keep_querying_ = false; - query_loop_cv_.notify_one(); - if (query_loop_future_.valid()) { - query_loop_future_.get(); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/metrics_manager.h b/src/c++/perf_analyzer/metrics_manager.h deleted file mode 100644 index ae6b6135f..000000000 --- a/src/c++/perf_analyzer/metrics_manager.h +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "metrics.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestMetricsManager; -#endif - -class MetricsManager { - public: - MetricsManager( - std::shared_ptr client_backend, - uint64_t metrics_interval_ms); - - /// Ends the background thread, redundant in case StopQueryingMetrics() isn't - /// called - ~MetricsManager(); - - /// Starts background thread that queries metrics on an interval - void StartQueryingMetrics(); - - /// Checks if background thread threw exception and propagates it if so - void CheckQueryingStatus(); - - /// Puts the latest-collected metrics from background thread into vector - /// output parameter to be used by main thread - void GetLatestMetrics(std::vector& metrics_per_timestamp); - - /// Ends the background thread - void StopQueryingMetrics(); - - private: - void QueryMetricsEveryNMilliseconds(); - void CheckForMissingMetrics(const Metrics& metrics); - void CheckForMetricIntervalTooShort( - const std::chrono::nanoseconds& remainder, - const std::chrono::nanoseconds& duration); - - std::shared_ptr client_backend_{nullptr}; - uint64_t metrics_interval_ms_{0}; - std::mutex metrics_mutex_{}; - std::vector metrics_{}; - bool should_keep_querying_{false}; - std::future query_loop_future_{}; - std::mutex query_loop_mutex_{}; - std::condition_variable query_loop_cv_{}; - bool has_given_missing_metrics_warning_{false}; - bool has_given_metric_interval_warning_{false}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestMetricsManager; - - public: - MetricsManager() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_concurrency_worker.h b/src/c++/perf_analyzer/mock_concurrency_worker.h deleted file mode 100644 index 636b92743..000000000 --- a/src/c++/perf_analyzer/mock_concurrency_worker.h +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include "concurrency_worker.h" -#include "gmock/gmock.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockConcurrencyWorker : public ConcurrencyWorker { - public: - NaggyMockConcurrencyWorker( - uint32_t id, std::shared_ptr thread_stat, - std::shared_ptr thread_config, - const std::shared_ptr parser, - std::shared_ptr data_loader, - const std::shared_ptr factory, - const bool on_sequence_model, const bool async, - const size_t max_concurrency, const bool using_json_data, - const bool streaming, const int32_t batch_size, - std::condition_variable& wake_signal, std::mutex& wake_mutex, - size_t& active_threads, bool& execute, - const std::shared_ptr& infer_data_manager, - std::shared_ptr sequence_manager) - : ConcurrencyWorker( - id, thread_stat, thread_config, parser, data_loader, factory, - on_sequence_model, async, max_concurrency, using_json_data, - streaming, batch_size, wake_signal, wake_mutex, active_threads, - execute, infer_data_manager, sequence_manager) - { - ON_CALL(*this, Infer()).WillByDefault([this]() -> void { - ConcurrencyWorker::Infer(); - }); - } - - MOCK_METHOD(void, Infer, (), (override)); - - void EmptyInfer() { thread_config_->is_paused_ = true; } -}; - -// Non-naggy version of Mock (won't warn when using default gmock -// mocked function) -using MockConcurrencyWorker = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_data_loader.h b/src/c++/perf_analyzer/mock_data_loader.h deleted file mode 100644 index 0eccdabff..000000000 --- a/src/c++/perf_analyzer/mock_data_loader.h +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "data_loader.h" -#include "gmock/gmock.h" - -namespace triton { namespace perfanalyzer { - -/// Mock DataLoader class used for testing to allow JSON data to be read -/// from string, rather than file. -/// -class NaggyMockDataLoader : public DataLoader { - public: - NaggyMockDataLoader() { SetupMocks(); } - NaggyMockDataLoader(size_t batch_size) : DataLoader(batch_size) - { - SetupMocks(); - } - - void SetupMocks() - { - ON_CALL(*this, GetTotalSteps(testing::_)) - .WillByDefault([this](size_t stream_id) -> size_t { - return this->DataLoader::GetTotalSteps(stream_id); - }); - ON_CALL(*this, ReadFile(testing::_, testing::_)) - .WillByDefault( - [this]( - const std::string& path, - std::vector* contents) -> cb::Error { - return this->DataLoader::ReadFile(path, contents); - }); - ON_CALL(*this, ReadTextFile(testing::_, testing::_)) - .WillByDefault( - [this]( - const std::string& path, - std::vector* contents) -> cb::Error { - return this->DataLoader::ReadTextFile(path, contents); - }); - } - - MOCK_METHOD(size_t, GetTotalSteps, (size_t), (override)); - MOCK_METHOD(cb::Error, ReadFile, (const std::string&, std::vector*)); - MOCK_METHOD( - cb::Error, ReadTextFile, (const std::string&, std::vector*)); - - cb::Error ReadDataFromJSON( - const std::shared_ptr& inputs, - const std::shared_ptr& outputs, - const std::string& json_file) override - { - return ReadDataFromStr(json_file, inputs, outputs); - } - - cb::Error ReadDataFromStr( - const std::string& str, const std::shared_ptr& inputs, - const std::shared_ptr& outputs) - { - rapidjson::Document d{}; - const unsigned int parseFlags = rapidjson::kParseNanAndInfFlag; - d.Parse(str.c_str()); - - return ParseData(d, inputs, outputs); - }; - - std::vector& step_num_{DataLoader::step_num_}; - size_t& data_stream_cnt_{DataLoader::data_stream_cnt_}; -}; - -// Non-naggy version of Mock Data Loader (won't warn when using default gmock -// mocked function) -using MockDataLoader = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_infer_context.h b/src/c++/perf_analyzer/mock_infer_context.h deleted file mode 100644 index e1c15d03c..000000000 --- a/src/c++/perf_analyzer/mock_infer_context.h +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "gmock/gmock.h" -#include "infer_context.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockInferContext : public InferContext { - public: - NaggyMockInferContext() - { - ON_CALL(*this, SendRequest(testing::_, testing::_, testing::_)) - .WillByDefault( - [this]( - const uint64_t request_id, const bool delayed, - const uint64_t sequence_id) -> void { - this->InferContext::SendRequest(request_id, delayed, sequence_id); - }); - } - - MOCK_METHOD( - void, SendRequest, (const uint64_t, const bool, const uint64_t), - (override)); - - std::shared_ptr& sequence_manager_{ - InferContext::sequence_manager_}; - std::shared_ptr& data_loader_{InferContext::data_loader_}; - std::shared_ptr& infer_data_manager_{ - InferContext::infer_data_manager_}; - std::shared_ptr& thread_stat_{InferContext::thread_stat_}; - std::reference_wrapper& execute_{InferContext::execute_}; - bool& using_json_data_{InferContext::using_json_data_}; - bool& async_{InferContext::async_}; - bool& streaming_{InferContext::streaming_}; - InferData& infer_data_{InferContext::infer_data_}; - std::unique_ptr& infer_backend_{ - InferContext::infer_backend_}; - std::function& async_callback_func_{ - InferContext::async_callback_func_}; -}; - -using MockInferContext = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_infer_data_manager.h b/src/c++/perf_analyzer/mock_infer_data_manager.h deleted file mode 100644 index 8f9cd7ec0..000000000 --- a/src/c++/perf_analyzer/mock_infer_data_manager.h +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "gmock/gmock.h" -#include "infer_data_manager.h" -#include "infer_data_manager_shm.h" -#include "mock_client_backend.h" - -namespace triton { namespace perfanalyzer { - - -class MockInferDataManagerShm : public InferDataManagerShm { - public: - MockInferDataManagerShm( - const int32_t batch_size, const SharedMemoryType shared_memory_type, - const size_t output_shm_size, - std::unordered_map - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - : InferDataManagerShm( - batch_size, shared_memory_type, output_shm_size, request_parameters, - parser, factory, data_loader) - { - } - - // Mocked version of the CopySharedMemory method in loadmanager. - // Tracks the mapping of shared memory label to data - // - cb::Error CopySharedMemory( - uint8_t* input_shm_ptr, const std::vector& input_datas, - bool is_shape_tensor, std::string& region_name) override - { - std::vector vals; - - for (size_t i = 0; i < input_datas.size(); i++) { - int32_t val = *reinterpret_cast(input_datas[i].data_ptr); - vals.push_back(val); - } - mocked_shared_memory_regions.insert(std::make_pair(region_name, vals)); - return cb::Error::Success; - } - - cb::Error CreateInferInput( - cb::InferInput** infer_input, const cb::BackendKind kind, - const std::string& name, const std::vector& dims, - const std::string& datatype) override - { - *infer_input = new cb::MockInferInput(kind, name, dims, datatype); - return cb::Error::Success; - } - - // Tracks the mapping of shared memory label to data - std::map> mocked_shared_memory_regions; -}; - - -class MockInferDataManager : public InferDataManager { - public: - MockInferDataManager() { SetupMocks(); } - - MockInferDataManager( - const size_t max_threads, const int32_t batch_size, - std::unordered_map - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - : InferDataManager( - max_threads, batch_size, request_parameters, parser, factory, - data_loader) - { - SetupMocks(); - } - - void SetupMocks() - { - ON_CALL( - *this, UpdateInferData(testing::_, testing::_, testing::_, testing::_)) - .WillByDefault( - [this]( - size_t thread_id, int stream_index, int step_index, - InferData& infer_data) -> cb::Error { - return this->InferDataManager::UpdateInferData( - thread_id, stream_index, step_index, infer_data); - }); - } - - MOCK_METHOD( - cb::Error, UpdateInferData, (size_t, int, int, InferData&), (override)); - - cb::Error CreateInferInput( - cb::InferInput** infer_input, const cb::BackendKind kind, - const std::string& name, const std::vector& dims, - const std::string& datatype) override - { - *infer_input = new cb::MockInferInput(kind, name, dims, datatype); - return cb::Error::Success; - } -}; - -class MockInferDataManagerFactory { - public: - static std::shared_ptr CreateMockInferDataManager( - const size_t max_threads, const int32_t batch_size, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - std::unordered_map - request_parameters, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::shared_ptr& data_loader) - { - if (shared_memory_type == SharedMemoryType::NO_SHARED_MEMORY) { - return std::make_shared>( - max_threads, batch_size, request_parameters, parser, factory, - data_loader); - } else { - return std::make_shared>( - batch_size, shared_memory_type, output_shm_size, request_parameters, - parser, factory, data_loader); - } - } -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_inference_profiler.h b/src/c++/perf_analyzer/mock_inference_profiler.h deleted file mode 100644 index 7e08e489b..000000000 --- a/src/c++/perf_analyzer/mock_inference_profiler.h +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "gmock/gmock.h" -#include "inference_profiler.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockInferenceProfiler : public InferenceProfiler { - public: - NaggyMockInferenceProfiler() - { - ON_CALL( - *this, ValidLatencyMeasurement( - testing::_, testing::_, testing::_, testing::_, testing::_, - testing::_)) - .WillByDefault( - [this]( - const std::pair& valid_range, - size_t& valid_sequence_count, size_t& delayed_request_count, - std::vector* latencies, size_t& response_count, - std::vector& valid_requests) -> void { - this->InferenceProfiler::ValidLatencyMeasurement( - valid_range, valid_sequence_count, delayed_request_count, - latencies, response_count, valid_requests); - }); - ON_CALL(*this, SummarizeLatency(testing::_, testing::_)) - .WillByDefault( - [this]( - const std::vector& latencies, - PerfStatus& summary) -> cb::Error { - return this->InferenceProfiler::SummarizeLatency( - latencies, summary); - }); - ON_CALL(*this, MergePerfStatusReports(testing::_, testing::_)) - .WillByDefault( - [this]( - std::deque& perf_status, - PerfStatus& summary_status) -> cb::Error { - return this->InferenceProfiler::MergePerfStatusReports( - perf_status, summary_status); - }); - ON_CALL(*this, MergeServerSideStats(testing::_, testing::_)) - .WillByDefault( - [this]( - std::vector& server_side_stats, - ServerSideStats& server_side_summary) -> cb::Error { - return this->InferenceProfiler::MergeServerSideStats( - server_side_stats, server_side_summary); - }); - ON_CALL( - *this, SummarizeClientStat( - testing::_, testing::_, testing::_, testing::_, testing::_, - testing::_, testing::_, testing::_)) - .WillByDefault( - [this]( - const cb::InferStat& start_stat, const cb::InferStat& end_stat, - const uint64_t duration_ns, const size_t valid_request_count, - const size_t delayed_request_count, - const size_t valid_sequence_count, const size_t response_count, - PerfStatus& summary) -> cb::Error { - return this->InferenceProfiler::SummarizeClientStat( - start_stat, end_stat, duration_ns, valid_request_count, - delayed_request_count, valid_sequence_count, response_count, - summary); - }); - }; - - MOCK_METHOD0(IncludeServerStats, bool()); - MOCK_METHOD( - void, ValidLatencyMeasurement, - ((const std::pair&), size_t&, size_t&, - std::vector*, size_t&, std::vector&), - (override)); - MOCK_METHOD( - cb::Error, SummarizeLatency, (const std::vector&, PerfStatus&), - (override)); - MOCK_METHOD( - cb::Error, MergePerfStatusReports, (std::deque&, PerfStatus&), - (override)); - MOCK_METHOD( - cb::Error, MergeServerSideStats, - (std::vector&, ServerSideStats&), (override)); - MOCK_METHOD( - cb::Error, SummarizeClientStat, - (const cb::InferStat&, const cb::InferStat&, const uint64_t, const size_t, - const size_t, const size_t, const size_t, PerfStatus&), - (override)); - - std::shared_ptr& parser_{InferenceProfiler::parser_}; - std::unique_ptr& manager_{InferenceProfiler::manager_}; - bool& include_lib_stats_{InferenceProfiler::include_lib_stats_}; - std::vector& all_request_records_{ - InferenceProfiler::all_request_records_}; -}; - -using MockInferenceProfiler = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_load_manager.h b/src/c++/perf_analyzer/mock_load_manager.h deleted file mode 100644 index 2088a4053..000000000 --- a/src/c++/perf_analyzer/mock_load_manager.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2023 (c), NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "gmock/gmock.h" -#include "load_manager.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockLoadManager : public LoadManager {}; - -using MockLoadManager = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_model_parser.h b/src/c++/perf_analyzer/mock_model_parser.h deleted file mode 100644 index 72222a826..000000000 --- a/src/c++/perf_analyzer/mock_model_parser.h +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include "model_parser.h" - -namespace triton { namespace perfanalyzer { - -class MockModelParser : public ModelParser { - public: - MockModelParser() : ModelParser(clientbackend::BackendKind::TRITON) {} - - MockModelParser( - bool is_sequence_model, bool is_decoupled_model, - size_t max_batch_size = 64) - : ModelParser(clientbackend::BackendKind::TRITON) - { - if (is_sequence_model) { - scheduler_type_ = ModelParser::SEQUENCE; - } - is_decoupled_ = is_decoupled_model; - max_batch_size_ = max_batch_size; - } - - // Expose private function - cb::Error GetInt(const rapidjson::Value& value, int64_t* integer_value) - { - return ModelParser::GetInt(value, integer_value); - } - - // Expose private function - cb::Error DetermineComposingModelMap( - const std::vector& bls_composing_models, - const rapidjson::Document& config, - std::unique_ptr& backend) - { - return ModelParser::DetermineComposingModelMap( - bls_composing_models, config, backend); - } - - // Expose private function - cb::Error DetermineSchedulerType( - const rapidjson::Document& config, - std::unique_ptr& backend) - { - return ModelParser::DetermineSchedulerType(config, backend); - } - - std::shared_ptr& composing_models_map_{ - ModelParser::composing_models_map_}; - std::shared_ptr& inputs_{ModelParser::inputs_}; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_profile_data_collector.h b/src/c++/perf_analyzer/mock_profile_data_collector.h deleted file mode 100644 index 94467892d..000000000 --- a/src/c++/perf_analyzer/mock_profile_data_collector.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "gmock/gmock.h" -#include "profile_data_collector.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockProfileDataCollector : public ProfileDataCollector { - public: - NaggyMockProfileDataCollector() - { - ON_CALL(*this, FindExperiment(testing::_)) - .WillByDefault( - [this](InferenceLoadMode& id) -> std::vector::iterator { - return this->ProfileDataCollector::FindExperiment(id); - }); - } - - MOCK_METHOD( - std::vector::iterator, FindExperiment, (InferenceLoadMode&), - (override)); - - std::vector& experiments_{ProfileDataCollector::experiments_}; -}; - -using MockProfileDataCollector = - testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_profile_data_exporter.h b/src/c++/perf_analyzer/mock_profile_data_exporter.h deleted file mode 100644 index 90e96d736..000000000 --- a/src/c++/perf_analyzer/mock_profile_data_exporter.h +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS"" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "gmock/gmock.h" -#include "profile_data_exporter.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockProfileDataExporter : public ProfileDataExporter { - public: - NaggyMockProfileDataExporter() - { - ON_CALL( - *this, ConvertToJson(testing::_, testing::_, testing::_, testing::_)) - .WillByDefault( - [this]( - const std::vector& raw_experiments, - std::string& raw_version, cb::BackendKind& service_kind, - std::string& endpoint) -> void { - return this->ProfileDataExporter::ConvertToJson( - raw_experiments, raw_version, service_kind, endpoint); - }); - - ON_CALL(*this, OutputToFile(testing::_)) - .WillByDefault([this](std::string& file_path) -> void { - this->ProfileDataExporter::OutputToFile(file_path); - }); - - ON_CALL(*this, AddExperiment(testing::_, testing::_, testing::_)) - .WillByDefault( - [this]( - rapidjson::Value& entry, rapidjson::Value& experiment, - const Experiment& raw_experiment) -> void { - this->ProfileDataExporter::AddExperiment( - entry, experiment, raw_experiment); - }); - - ON_CALL(*this, AddServiceKind(testing::_)) - .WillByDefault([this](cb::BackendKind& service_kind) -> void { - this->ProfileDataExporter::AddServiceKind(service_kind); - }); - - ON_CALL(*this, AddEndpoint(testing::_)) - .WillByDefault([this](std::string& endpoint) -> void { - this->ProfileDataExporter::AddEndpoint(endpoint); - }); - - ON_CALL(*this, ClearDocument()).WillByDefault([this]() -> void { - this->ProfileDataExporter::ClearDocument(); - }); - } - - MOCK_METHOD( - void, ConvertToJson, - (const std::vector&, std::string&, cb::BackendKind&, - std::string&), - (override)); - MOCK_METHOD( - void, AddExperiment, - (rapidjson::Value&, rapidjson::Value&, const Experiment&), (override)); - MOCK_METHOD(void, OutputToFile, (std::string&), (override)); - MOCK_METHOD(void, AddServiceKind, (cb::BackendKind&)); - MOCK_METHOD(void, AddEndpoint, (std::string&)); - MOCK_METHOD(void, ClearDocument, ()); - - rapidjson::Document& document_{ProfileDataExporter::document_}; -}; - -using MockProfileDataExporter = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_request_rate_worker.h b/src/c++/perf_analyzer/mock_request_rate_worker.h deleted file mode 100644 index 0132a9a0b..000000000 --- a/src/c++/perf_analyzer/mock_request_rate_worker.h +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include "gmock/gmock.h" -#include "request_rate_worker.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockRequestRateWorker : public RequestRateWorker { - public: - NaggyMockRequestRateWorker( - uint32_t id, std::shared_ptr thread_stat, - std::shared_ptr thread_config, - const std::shared_ptr parser, - std::shared_ptr data_loader, - const std::shared_ptr factory, - const bool on_sequence_model, const bool async, const size_t max_threads, - const bool using_json_data, const bool streaming, - const int32_t batch_size, std::condition_variable& wake_signal, - std::mutex& wake_mutex, bool& execute, - std::chrono::steady_clock::time_point& start_time, - const bool serial_sequences, - const std::shared_ptr& infer_data_manager, - std::shared_ptr sequence_manager) - : RequestRateWorker( - id, thread_stat, thread_config, parser, data_loader, factory, - on_sequence_model, async, max_threads, using_json_data, streaming, - batch_size, wake_signal, wake_mutex, execute, start_time, - serial_sequences, infer_data_manager, sequence_manager) - { - ON_CALL(*this, Infer()).WillByDefault([this]() -> void { - RequestRateWorker::Infer(); - }); - } - - MOCK_METHOD(void, Infer, (), (override)); - - void CreateContext() override { RequestRateWorker::CreateContext(); } - - void SendInferRequest() - { - if (thread_stat_->status_.IsOk()) { - LoadWorker::SendInferRequest(0, false); - } - } - - void EmptyInfer() { thread_config_->is_paused_ = true; } -}; - -// Non-naggy version of Mock (won't warn when using default gmock -// mocked function) -using MockRequestRateWorker = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mock_sequence_manager.h b/src/c++/perf_analyzer/mock_sequence_manager.h deleted file mode 100644 index 522079c13..000000000 --- a/src/c++/perf_analyzer/mock_sequence_manager.h +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include "gmock/gmock.h" -#include "sequence_manager.h" - -namespace triton { namespace perfanalyzer { - -class NaggyMockSequenceManager : public SequenceManager { - public: - NaggyMockSequenceManager() { SetupMocks(); } - - NaggyMockSequenceManager( - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation, const bool using_json_data, - std::shared_ptr data_loader) - : SequenceManager( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, - using_json_data, data_loader) - { - SetupMocks(); - } - - void SetupMocks() - { - ON_CALL(*this, SetInferSequenceOptions(testing::_, testing::_)) - .WillByDefault([this]( - const uint32_t seq_stat_index, - std::unique_ptr& options) { - this->SequenceManager::SetInferSequenceOptions( - seq_stat_index, options); - }); - ON_CALL(*this, InitNewSequence(testing::_)) - .WillByDefault([this](int seq_stat_index) { - this->SequenceManager::InitNewSequence(seq_stat_index); - }); - ON_CALL(*this, GetNextSeqId(testing::_)) - .WillByDefault([this](int seq_stat_index) -> uint64_t { - return this->SequenceManager::GetNextSeqId(seq_stat_index); - }); - ON_CALL(*this, GetRandomSequenceLength(testing::_)) - .WillByDefault([this](double offset_ratio) -> size_t { - return this->SequenceManager::GetRandomSequenceLength(offset_ratio); - }); - ON_CALL(*this, GetNewDataStreamId()).WillByDefault([this]() -> size_t { - return this->SequenceManager::GetNewDataStreamId(); - }); - } - - MOCK_METHOD( - void, SetInferSequenceOptions, - (const uint32_t, std::unique_ptr&), (override)); - MOCK_METHOD(void, InitNewSequence, (int), (override)); - MOCK_METHOD(uint64_t, GetNextSeqId, (int), (override)); - MOCK_METHOD(size_t, GetRandomSequenceLength, (double), (override)); - MOCK_METHOD(uint64_t, GetNewDataStreamId, (), (override)); - - std::vector>& sequence_statuses_{ - SequenceManager::sequence_statuses_}; - std::atomic& curr_seq_id_{SequenceManager::curr_seq_id_}; -}; - -using MockSequenceManager = testing::NiceMock; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/model_parser.cc b/src/c++/perf_analyzer/model_parser.cc deleted file mode 100644 index 8ffea56da..000000000 --- a/src/c++/perf_analyzer/model_parser.cc +++ /dev/null @@ -1,467 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "model_parser.h" - -#include "rapidjson/writer.h" - -namespace triton { namespace perfanalyzer { - -cb::Error -ModelParser::InitTriton( - const rapidjson::Document& metadata, const rapidjson::Document& config, - const std::string& model_version, - const std::vector& bls_composing_models, - const std::unordered_map>& input_shapes, - std::unique_ptr& backend) -{ - model_name_ = metadata["name"].GetString(); - model_version_ = model_version; - - RETURN_IF_ERROR( - DetermineComposingModelMap(bls_composing_models, config, backend)); - - RETURN_IF_ERROR(DetermineSchedulerType(config, backend)); - - max_batch_size_ = 0; - const auto bs_itr = config.FindMember("max_batch_size"); - if (bs_itr != config.MemberEnd()) { - int64_t mbs; - RETURN_IF_ERROR(GetInt(bs_itr->value, &mbs)); - max_batch_size_ = mbs; - } - - const auto txn_itr = config.FindMember("model_transaction_policy"); - if (txn_itr != config.MemberEnd()) { - is_decoupled_ = txn_itr->value["decoupled"].GetBool(); - } - - // Get the information about inputs from metadata - const auto inputs_itr = metadata.FindMember("inputs"); - if (inputs_itr != metadata.MemberEnd()) { - for (const auto& input : inputs_itr->value.GetArray()) { - auto it = - inputs_->emplace(input["name"].GetString(), ModelTensor()).first; - it->second.name_ = input["name"].GetString(); - it->second.datatype_ = input["datatype"].GetString(); - bool is_dynamic = false; - bool skip = (max_batch_size_ > 0); - for (const auto& dim : input["shape"].GetArray()) { - if (skip) { - skip = false; - continue; - } - int64_t dim_int; - RETURN_IF_ERROR(GetInt(dim, &dim_int)); - if (dim_int == -1) { - is_dynamic = true; - } - it->second.shape_.push_back(dim_int); - } - - if (is_dynamic) { - const auto user_shape_it = input_shapes.find(it->second.name_); - if (user_shape_it != input_shapes.end()) { - // Update the default shape to be used. - it->second.shape_.clear(); - for (const auto dim : user_shape_it->second) { - it->second.shape_.push_back(dim); - } - } - } - } - } - - // Check whether the tensor is shape tensor or not from config. - const auto inputs_config_itr = config.FindMember("input"); - if (inputs_config_itr != config.MemberEnd()) { - for (const auto& input_config : inputs_config_itr->value.GetArray()) { - const auto name = std::string( - input_config["name"].GetString(), - input_config["name"].GetStringLength()); - auto it = inputs_->find(name); - if (it == inputs_->end()) { - return cb::Error( - "no metadata found for input tensor " + name, pa::GENERIC_ERROR); - } - const auto& shape_tensor_itr = input_config.FindMember("is_shape_tensor"); - if (shape_tensor_itr != input_config.MemberEnd()) { - it->second.is_shape_tensor_ = shape_tensor_itr->value.GetBool(); - } - - if (input_config.HasMember("optional")) { - it->second.is_optional_ = input_config["optional"].GetBool(); - } else { - it->second.is_optional_ = false; - } - } - } - - // Get the information about outputs from metadata - const auto outputs_itr = metadata.FindMember("outputs"); - if (outputs_itr != metadata.MemberEnd()) { - for (const auto& output : outputs_itr->value.GetArray()) { - auto it = - outputs_->emplace(output["name"].GetString(), ModelTensor()).first; - it->second.name_ = output["name"].GetString(); - it->second.datatype_ = output["datatype"].GetString(); - bool skip = (max_batch_size_ > 0); - for (const auto& dim : output["shape"].GetArray()) { - if (skip) { - skip = false; - continue; - } - int64_t dim_int; - RETURN_IF_ERROR(GetInt(dim, &dim_int)); - it->second.shape_.push_back(dim_int); - } - } - } - - // Check whether the tensor is shape tensor or not from config. - const auto output_config_itr = config.FindMember("output"); - if (output_config_itr != config.MemberEnd()) { - for (const auto& output_config : output_config_itr->value.GetArray()) { - const auto name = std::string( - output_config["name"].GetString(), - output_config["name"].GetStringLength()); - auto itr = outputs_->find(name); - if (itr == outputs_->end()) { - return cb::Error( - "no metadata found for output tensor " + name, pa::GENERIC_ERROR); - } - const auto& shape_tensor_itr = - output_config.FindMember("is_shape_tensor"); - if (shape_tensor_itr != output_config.MemberEnd()) { - itr->second.is_shape_tensor_ = shape_tensor_itr->value.GetBool(); - } - } - } - - // Check if model has response caching enabled - const auto cache_itr = config.FindMember("response_cache"); - // response_cache_enabled_ set globally for reporting purposes if any - // composing model has it enabled, so don't overwrite it if already set - if (cache_itr != config.MemberEnd() && !response_cache_enabled_) { - response_cache_enabled_ = cache_itr->value["enable"].GetBool(); - } - - if (cache_itr != config.MemberEnd()) { - top_level_response_caching_enabled_ = cache_itr->value["enable"].GetBool(); - } - - return cb::Error::Success; -} - -cb::Error -ModelParser::InitTFServe( - const rapidjson::Document& metadata, const std::string& model_name, - const std::string& model_version, const std::string& model_signature_name, - const int32_t batch_size, - const std::unordered_map>& input_shapes, - std::unique_ptr& backend) -{ - model_name_ = model_name; - model_version_ = model_version; - model_signature_name_ = model_signature_name; - // Get the scheduler type for the model - scheduler_type_ = NONE; - - // Will use the user provided batch size as max. Relies on the service - // to throw an error if not supported. - max_batch_size_ = batch_size; - - const rapidjson::Value& signature_config = - metadata["metadata"]["signature_def"]["signature_def"]; - if (!signature_config.HasMember(model_signature_name.c_str())) { - return cb::Error( - "Failed to find signature_name \"" + model_signature_name + - "\" in the metadata", - pa::GENERIC_ERROR); - } - - // Get the information about inputs from metadata - if (signature_config[model_signature_name.c_str()].HasMember("inputs")) { - const rapidjson::Value& inputs = - signature_config[model_signature_name.c_str()]["inputs"]; - for (rapidjson::Value::ConstMemberIterator json_itr = inputs.MemberBegin(); - json_itr != inputs.MemberEnd(); ++json_itr) { - auto it = - inputs_->emplace(json_itr->name.GetString(), ModelTensor()).first; - it->second.name_ = json_itr->name.GetString(); - RETURN_IF_ERROR(ConvertDTypeFromTFS( - json_itr->value["dtype"].GetString(), &it->second.datatype_)); - - bool is_dynamic = false; - if (json_itr->value["tensor_shape"]["unknown_rank"].GetBool()) { - if (max_batch_size_ != 0) { - return cb::Error( - "Can not specify -b flag for saved model with unknown ranked " - "inputs", - pa::GENERIC_ERROR); - } - is_dynamic = true; - } else { - bool first_dim = true; - for (const auto& dim : - json_itr->value["tensor_shape"]["dim"].GetArray()) { - int64_t dim_int; - RETURN_IF_ERROR(GetInt(dim["size"], &dim_int)); - if (first_dim && (max_batch_size_ != 0)) { - if (dim_int != -1) { - return cb::Error( - "Can not specify -b flag for saved model with input not " - "having their first dim as -1", - pa::GENERIC_ERROR); - } - first_dim = false; - } else { - if (dim_int == -1) { - is_dynamic = true; - } - it->second.shape_.push_back(dim_int); - } - } - } - - if (is_dynamic) { - const auto user_shape_it = input_shapes.find(it->second.name_); - if (user_shape_it != input_shapes.end()) { - // Update the default shape to be used. - it->second.shape_.clear(); - for (const auto dim : user_shape_it->second) { - it->second.shape_.push_back(dim); - } - } - } - } - } - - // Will not extract the information about the information about the outputs. - // As by default, the TensorFlow serving will return all the output tensors - // if none are requested. - // See here - // https://github.com/tensorflow/serving/blob/2.3.0/tensorflow_serving/apis/predict.proto#L27 - - return cb::Error::Success; -} - -cb::Error -ModelParser::InitOpenAI( - const std::string& model_name, const std::string& model_version, - const int32_t batch_size) -{ - // OpenAI does not return model metadata hence we can not obtain any - // parameters. - model_name_ = model_name; - model_version_ = model_version; - max_batch_size_ = batch_size; - - // OpenAI will take a single json input with a fully formed payload - auto in_it = inputs_->emplace("payload", ModelTensor()).first; - in_it->second.name_ = "payload"; - in_it->second.datatype_ = "JSON"; - in_it->second.shape_.push_back(1); - - // OpenAI will reply with a single json output - auto out_it = outputs_->emplace("response", ModelTensor()).first; - out_it->second.name_ = "response"; - out_it->second.datatype_ = "JSON"; - out_it->second.shape_.push_back(1); - - return cb::Error::Success; -} - -cb::Error -ModelParser::InitTorchServe( - const std::string& model_name, const std::string& model_version, - const int32_t batch_size) -{ - // TorchServe does not return model metadata hence we can not obtain any - // parameters. - model_name_ = model_name; - model_version_ = model_version; - max_batch_size_ = batch_size; - - // TorchServe needs to upload a file to the server. The input will hold the - // path to the file which should be provided as json to --input-data - auto it = inputs_->emplace("TORCHSERVE_INPUT", ModelTensor()).first; - it->second.name_ = "TORCHSERVE_INPUT"; - it->second.datatype_ = "BYTES"; - // Supports only a single input file - it->second.shape_.push_back(1); - - return cb::Error::Success; -} - -cb::Error -ModelParser::DetermineComposingModelMap( - const std::vector& bls_composing_models, - const rapidjson::Document& config, - std::unique_ptr& backend) -{ - RETURN_IF_ERROR(AddBLSComposingModels(bls_composing_models, config, backend)); - RETURN_IF_ERROR(AddEnsembleComposingModels(config, backend)); - - return cb::Error::Success; -} - -cb::Error -ModelParser::AddBLSComposingModels( - const std::vector& bls_composing_models, - const rapidjson::Document& config, - std::unique_ptr& backend) -{ - for (auto model : bls_composing_models) { - (*composing_models_map_)[config["name"].GetString()].insert(model); - - rapidjson::Document composing_model_config; - RETURN_IF_ERROR(backend->ModelConfig( - &composing_model_config, model.first, model.second)); - RETURN_IF_ERROR( - AddEnsembleComposingModels(composing_model_config, backend)); - } - - return cb::Error::Success; -} - -cb::Error -ModelParser::AddEnsembleComposingModels( - const rapidjson::Document& config, - std::unique_ptr& backend) -{ - if (config.HasMember("platform") && - std::string(config["platform"].GetString()).compare("ensemble") == 0) { - const auto step_itr = config["ensemble_scheduling"].FindMember("step"); - for (const auto& step : step_itr->value.GetArray()) { - std::string step_model_version; - int64_t model_version_int; - RETURN_IF_ERROR(GetInt(step["model_version"], &model_version_int)); - if (model_version_int == -1) { - step_model_version = ""; - } else { - step_model_version = std::to_string(model_version_int); - } - - (*composing_models_map_)[config["name"].GetString()].emplace( - std::string(step["model_name"].GetString()), step_model_version); - - rapidjson::Document composing_model_config; - RETURN_IF_ERROR(backend->ModelConfig( - &composing_model_config, step["model_name"].GetString(), - step_model_version)); - RETURN_IF_ERROR( - AddEnsembleComposingModels(composing_model_config, backend)); - } - } - - return cb::Error::Success; -} - - -cb::Error -ModelParser::DetermineSchedulerType( - const rapidjson::Document& config, - std::unique_ptr& backend) -{ - scheduler_type_ = NONE; - - if (composing_models_map_->size() != 0) { - bool is_sequential = false; - RETURN_IF_ERROR(GetComposingSchedulerType(backend, &is_sequential)); - if (is_sequential) { - scheduler_type_ = ENSEMBLE_SEQUENCE; - } else { - scheduler_type_ = ENSEMBLE; - } - } else { - const auto& sequence_itr = config.FindMember("sequence_batching"); - if (sequence_itr != config.MemberEnd()) { - scheduler_type_ = SEQUENCE; - } else { - const auto& dynamic_itr = config.FindMember("dynamic_batching"); - if (dynamic_itr != config.MemberEnd()) { - scheduler_type_ = DYNAMIC; - } - } - } - return cb::Error::Success; -} - -cb::Error -ModelParser::GetComposingSchedulerType( - std::unique_ptr& backend, bool* is_sequential) -{ - for (auto parent_composing_models : *composing_models_map_.get()) { - auto& composing_models = parent_composing_models.second; - for (auto composing_model : composing_models) { - rapidjson::Document config; - RETURN_IF_ERROR(backend->ModelConfig( - &config, composing_model.first, composing_model.second)); - - const auto& sequence_itr = config.FindMember("sequence_batching"); - if (sequence_itr != config.MemberEnd()) { - *is_sequential = true; - } - - const auto cache_itr = config.FindMember("response_cache"); - // response_cache_enabled_ set globally for reporting purposes if any - // composing model has it enabled, so don't overwrite it if already set - if (cache_itr != config.MemberEnd() && !response_cache_enabled_) { - response_cache_enabled_ = cache_itr->value["enable"].GetBool(); - } - } - } - return cb::Error::Success; -} - -cb::Error -ModelParser::GetInt(const rapidjson::Value& value, int64_t* integer_value) -{ - if (value.IsString()) { - std::string str(value.GetString(), value.GetStringLength()); - - try { - *integer_value = std::stoll(str.c_str()); - } - catch (...) { - return cb::Error( - std::string("unable to convert '") + str + "' to integer", - pa::GENERIC_ERROR); - } - - } else if (value.IsInt64()) { - *integer_value = value.GetInt64(); - } else if (value.IsInt()) { - *integer_value = value.GetInt(); - } else { - return cb::Error("failed to parse the integer value", pa::GENERIC_ERROR); - } - - return cb::Error::Success; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/model_parser.h b/src/c++/perf_analyzer/model_parser.h deleted file mode 100644 index ac76b3e22..000000000 --- a/src/c++/perf_analyzer/model_parser.h +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "client_backend/client_backend.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestModelParser; -class MockModelParser; -class InferenceProfiler; -#endif - -struct ModelTensor { - ModelTensor() : is_shape_tensor_(false) {} - std::string name_; - std::string datatype_; - std::vector shape_; - // Indicates if this tensor holds shape information for other tensors - bool is_shape_tensor_; - bool is_optional_; -}; - -using ModelTensorMap = std::map; -using ComposingModelMap = std::map>; - -//============================================================================== -/// ModelParser is a helper class to parse the information about the target -/// model from the metadata and configuration returned by the server. -/// -/// Perf Analyzer depends upon the various properties of the model to correctly -/// generate and issue inference request for the model. The object of this -/// class will provide these necessary details. -class ModelParser { - public: - enum ModelSchedulerType { - NONE, - DYNAMIC, - SEQUENCE, - ENSEMBLE, - ENSEMBLE_SEQUENCE - }; - - explicit ModelParser(cb::BackendKind backend_kind) - : backend_kind_(backend_kind), - inputs_(std::make_shared()), - outputs_(std::make_shared()), - composing_models_map_(std::make_shared()), - scheduler_type_(NONE), max_batch_size_(0), is_decoupled_(false), - response_cache_enabled_(false), - top_level_response_caching_enabled_(false) - { - } - - /// Initializes the ModelParser with the metadata and config rapidjson DOM - /// for the target model obtained from Triton service - /// \param metadata The metadata of the target model. - /// \param config The config of the target model. - /// \param model_version The version of target model. - /// \param bls_composing_models A list of BLS composing model identifiers - /// \param input_shapes The user provided default shapes which will be use - /// if a certain input has wildcard in its dimension. - /// \param backend The backend object. - /// \return cb::Error object indicating success or failure. - cb::Error InitTriton( - const rapidjson::Document& metadata, const rapidjson::Document& config, - const std::string& model_version, - const std::vector& bls_composing_models, - const std::unordered_map>& input_shapes, - std::unique_ptr& backend); - - /// Initializes the ModelParser with the metadata and config rapidjson DOM - /// for the target model obtained from TF serving service. - /// \param metadata The metadata of the target model. - /// \param model_name The name of target model. - /// \param model_version The version of target model. - /// \param model_signature_name The signature name of target model. - /// \param input_shapes The user provided default shapes which will be use - /// if a certain input has wildcard in its dimension. - /// \param backend The backend object. - /// \return cb::Error object indicating success or failure. - cb::Error InitTFServe( - const rapidjson::Document& metadata, const std::string& model_name, - const std::string& model_version, const std::string& model_signature_name, - const int32_t batch_size, - const std::unordered_map>& input_shapes, - std::unique_ptr& backend); - - cb::Error InitOpenAI( - const std::string& model_name, const std::string& model_version, - const int32_t batch_size); - - cb::Error InitTorchServe( - const std::string& model_name, const std::string& model_version, - const int32_t batch_size); - - /// Get the name of the target model - /// \return Model name as string - const std::string& ModelName() const { return model_name_; } - - /// Get the version of target model - /// \return Model version as string - const std::string& ModelVersion() const { return model_version_; } - - /// Get the signature name of target model - /// \return Model signature name as string - const std::string& ModelSignatureName() const - { - return model_signature_name_; - } - - /// Get the scheduler type for the model - ModelSchedulerType SchedulerType() const { return scheduler_type_; } - - /// Get the max batch size supported by the model. Returns 0 if the model - /// does not support batching. - /// \return The maximum supported batch size. - size_t MaxBatchSize() const { return max_batch_size_; } - - /// Returns whether or not the model is decoupled - /// \return the truth value of whether the model is decoupled - bool IsDecoupled() const { return is_decoupled_; } - - /// Returns whether or not response cache is enabled for this model - /// \return the truth value of whether response cache is enabled for this - /// model - bool ResponseCacheEnabled() const { return response_cache_enabled_; } - - /// Returns whether or not top level request caching is enabled for this model - /// \return the truth value of whether top level request caching is enabled - /// for this model - bool TopLevelResponseCachingEnabled() const - { - return top_level_response_caching_enabled_; - } - -/// Only for testing -#ifndef DOCTEST_CONFIG_DISABLE - void SetTopLevelResponseCaching(bool enable_top_level_response_caching) - { - top_level_response_caching_enabled_ = enable_top_level_response_caching; - } -#endif - - /// Get the details about the model inputs. - /// \return The map with tensor_name and the tensor details - /// stored as key-value pair. - const std::shared_ptr& Inputs() { return inputs_; } - - /// Get the details about the model outputs. - /// \return The map with tensor_name and the tensor details - /// stored as key-value pair. - const std::shared_ptr& Outputs() { return outputs_; } - - /// Get the composing maps for the target model. - /// \return The pointer to the nested map describing the - /// nested flow in the target model. - const std::shared_ptr& GetComposingModelMap() - { - return composing_models_map_; - } - - - protected: - ModelSchedulerType scheduler_type_; - bool is_decoupled_; - - private: - /// Populate composing_models_map_ based on any bls composing models passed in - /// via the CLI as well as any ensemble or nested ensemble models - cb::Error DetermineComposingModelMap( - const std::vector& bls_composing_models, - const rapidjson::Document& config, - std::unique_ptr& backend); - - cb::Error AddBLSComposingModels( - const std::vector& bls_composing_models, - const rapidjson::Document& config, - std::unique_ptr& backend); - - cb::Error AddEnsembleComposingModels( - const rapidjson::Document& config, - std::unique_ptr& backend); - - /// Populate scheduler_type_ based on the scheduler type of the parent model - /// as well as any composing models - cb::Error DetermineSchedulerType( - const rapidjson::Document& config, - std::unique_ptr& backend); - - /// Sets is_sequential to true if any of the composing models are sequential - cb::Error GetComposingSchedulerType( - std::unique_ptr& backend, bool* is_sequential); - - /// In the json produced by protobuf, int64 and uint64 values are - /// represented as strings. Protobuf doesn't provide an option to - /// disable this (sigh) so we need to correctly parse these fields - /// for ModelParser to receive appropriate requests. - /// \param value The rapidjson value object with the int value. - /// \param integer_value The output integer pointer. - /// \return cb::Error object indicating success or failure. - cb::Error GetInt(const rapidjson::Value& value, int64_t* integer_value); - - cb::BackendKind backend_kind_; - - std::shared_ptr inputs_; - std::shared_ptr outputs_; - std::shared_ptr composing_models_map_; - - std::string model_name_; - std::string model_version_; - std::string model_signature_name_; - size_t max_batch_size_; - bool response_cache_enabled_; - bool top_level_response_caching_enabled_; - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestModelParser; - friend MockModelParser; - friend InferenceProfiler; - - public: - ModelParser() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mpi_utils.cc b/src/c++/perf_analyzer/mpi_utils.cc deleted file mode 100644 index 2923f6552..000000000 --- a/src/c++/perf_analyzer/mpi_utils.cc +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "mpi_utils.h" - -#include - -#include -#include - -namespace triton { namespace perfanalyzer { - -MPIDriver::MPIDriver(bool is_enabled) : is_enabled_(is_enabled) -{ - if (is_enabled_ == false) { - return; - } - - handle_ = dlopen("libmpi.so", RTLD_LAZY | RTLD_GLOBAL); - - if (handle_ == nullptr) { - throw std::runtime_error( - "Unable to load MPI library. If you are trying to run with " - "MPI / multiple models, check that 'libmpi.so' is on " - "`LD_LIBRARY_PATH` environment variable path."); - } - - CheckMPIImpl(); -} - -bool -MPIDriver::IsMPIRun() -{ - if (is_enabled_ == false) { - return false; - } - - if (MPIInitialized() == false) { - throw std::runtime_error("Must call MPI_Init() before calling IsMPIRun()."); - } - - return MPICommSizeWorld() > 1; -} - -void -MPIDriver::MPIInit(int* argc, char*** argv) -{ - if (is_enabled_ == false) { - return; - } - - int (*MPI_Init)( - int*, char***){(int (*)(int*, char***))dlsym(handle_, "MPI_Init")}; - if (MPI_Init == nullptr) { - throw std::runtime_error("Unable to obtain address of `MPI_Init` symbol."); - } - - MPI_Init(argc, argv); -} - -int -MPIDriver::MPICommSizeWorld() -{ - if (is_enabled_ == false) { - return -1; - } - - int world_size{1}; - - int (*MPI_Comm_size)( - void*, int*){(int (*)(void*, int*))dlsym(handle_, "MPI_Comm_size")}; - if (MPI_Comm_size == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `MPI_Comm_size` symbol."); - } - - MPI_Comm_size(MPICommWorld(), &world_size); - - return world_size; -} - -void -MPIDriver::MPIBarrierWorld() -{ - if (is_enabled_ == false) { - return; - } - - int (*MPI_Barrier)(void*){(int (*)(void*))dlsym(handle_, "MPI_Barrier")}; - if (MPI_Barrier == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `MPI_Barrier` symbol."); - } - - MPI_Barrier(MPICommWorld()); -} - -int -MPIDriver::MPICommRankWorld() -{ - if (is_enabled_ == false) { - return -1; - } - - int rank{0}; - - int (*MPI_Comm_rank)( - void*, int*){(int (*)(void*, int*))dlsym(handle_, "MPI_Comm_rank")}; - if (MPI_Comm_rank == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `MPI_Comm_rank` symbol."); - } - - MPI_Comm_rank(MPICommWorld(), &rank); - - return rank; -} - -void -MPIDriver::MPIBcastIntWorld(void* buffer, int count, int root) -{ - if (is_enabled_ == false) { - return; - } - - int (*MPI_Bcast)(void*, int, void*, int, void*){ - (int (*)(void*, int, void*, int, void*))dlsym(handle_, "MPI_Bcast")}; - if (MPI_Bcast == nullptr) { - throw std::runtime_error("Unable to obtain address of `MPI_Bcast` symbol."); - } - - MPI_Bcast(buffer, count, MPIInt(), root, MPICommWorld()); -} - -void -MPIDriver::MPIFinalize() -{ - if (is_enabled_ == false) { - return; - } - - int (*MPI_Finalize)(){(int (*)())dlsym(handle_, "MPI_Finalize")}; - if (MPI_Finalize == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `MPI_Finalize` symbol."); - } - - MPI_Finalize(); -} - -bool -MPIDriver::MPIInitialized() -{ - if (is_enabled_ == false) { - return false; - } - - int (*MPI_Initialized)(int*){ - (int (*)(int*))dlsym(handle_, "MPI_Initialized")}; - if (MPI_Initialized == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `MPI_Initialized` symbol."); - } - - int initialized{0}; - MPI_Initialized(&initialized); - return initialized != 0; -} - -void* -MPIDriver::MPICommWorld() -{ - if (is_enabled_ == false) { - return nullptr; - } - - void* MPI_COMM_WORLD{dlsym(handle_, "ompi_mpi_comm_world")}; - if (MPI_COMM_WORLD == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `ompi_mpi_comm_world` symbol."); - } - - return MPI_COMM_WORLD; -} - -void* -MPIDriver::MPIInt() -{ - if (is_enabled_ == false) { - return nullptr; - } - - void* MPI_INT{dlsym(handle_, "ompi_mpi_int")}; - if (MPI_INT == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `ompi_mpi_int` symbol."); - } - - return MPI_INT; -} - -void -MPIDriver::CheckMPIImpl() -{ - if (is_enabled_ == false) { - return; - } - - int (*MPI_Get_library_version)(char*, int*){ - (int (*)(char*, int*))dlsym(handle_, "MPI_Get_library_version")}; - if (MPI_Get_library_version == nullptr) { - throw std::runtime_error( - "Unable to obtain address of `MPI_Get_library_version` symbol."); - } - - std::string version; - version.resize(MPIVersionStringMaximumLength); - int resultlen{0}; - MPI_Get_library_version(&version[0], &resultlen); - - if (version.find("Open MPI") != 0) { - throw std::runtime_error( - "Perf Analyzer only supports Open MPI. Please uninstall your current " - "implementation of MPI and install Open MPI."); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/mpi_utils.h b/src/c++/perf_analyzer/mpi_utils.h deleted file mode 100644 index 862c8a3c3..000000000 --- a/src/c++/perf_analyzer/mpi_utils.h +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -namespace triton { namespace perfanalyzer { - -class MPIDriver { - public: - // Initializes class. Saves handle to MPI library if MPI library is available. - MPIDriver(bool is_enabled = false); - - // Returns true if the current process is an MPI process with world size - // greater than 1. - bool IsMPIRun(); - - // Attempts to call MPI_Init API. - void MPIInit(int* argc, char*** argv); - - // Attempts to call MPI_Comm_size API with MPI_COMM_WORLD communicator. - int MPICommSizeWorld(); - - // Attempts to call MPI_Barrier API with MPI_COMM_WORLD communicator. - void MPIBarrierWorld(); - - // Attempts to call MPI_Comm_rank API with MPI_COMM_WORLD communicator. - int MPICommRankWorld(); - - // Attempts to call MPI_Bcast API with MPI_INT data type and MPI_COMM_WORLD - // communicator. - void MPIBcastIntWorld(void* buffer, int count, int root); - - // Attempts to call MPI_Finalize API. - void MPIFinalize(); - - private: - // Attempts to call MPI_Initialized API. - bool MPIInitialized(); - - // Returns MPI_COMM_WORLD symbol address if MPI library is available, - // otherwise `nullptr`. - void* MPICommWorld(); - - // Returns MPI_INT symbol address if MPI library is available, otherwise - // `nullptr`. - void* MPIInt(); - - // Attempts to check that Open MPI is installed. - void CheckMPIImpl(); - - // Bool for whether user has opted to attempt to use MPI functionality. - bool is_enabled_{false}; - - // Loaded object for MPI library. - void* handle_{nullptr}; - - // Maximum string length for MPI version string. - const uint64_t MPIVersionStringMaximumLength{32768}; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/perf_analyzer.cc b/src/c++/perf_analyzer/perf_analyzer.cc deleted file mode 100644 index c10101e1c..000000000 --- a/src/c++/perf_analyzer/perf_analyzer.cc +++ /dev/null @@ -1,473 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "perf_analyzer.h" - -#include "perf_analyzer_exception.h" -#include "periodic_concurrency_manager.h" -#include "report_writer.h" -#include "request_rate_manager.h" - -namespace pa = triton::perfanalyzer; - -namespace triton { namespace perfanalyzer { - -volatile bool early_exit = false; - -void -SignalHandler(int signum) -{ - std::cout << "Interrupt signal (" << signum << ") received." << std::endl; - // Upon invoking the SignalHandler for the first time early_exit flag is - // invoked and analyzer waits for in-flight inferences to complete before - // exiting. On the second invocation, the program exits immediately. - if (!early_exit) { - std::cout << "Waiting for in-flight inferences to complete." << std::endl; - early_exit = true; - } else { - std::cout << "Exiting immediately..." << std::endl; - exit(0); - } -} -}} // namespace triton::perfanalyzer - -PerfAnalyzer::PerfAnalyzer(pa::PAParamsPtr params) : params_(params) -{ - CreateAnalyzerObjects(); -} - -void -PerfAnalyzer::Run() -{ - PrerunReport(); - Profile(); - WriteReport(); - GenerateProfileExport(); - Finalize(); -} - -void -PerfAnalyzer::CreateAnalyzerObjects() -{ - // trap SIGINT to allow threads to exit gracefully - signal(SIGINT, pa::SignalHandler); - std::shared_ptr factory; - FAIL_IF_ERR( - cb::ClientBackendFactory::Create( - params_->kind, params_->url, params_->endpoint, params_->protocol, - params_->ssl_options, params_->trace_options, - params_->compression_algorithm, params_->http_headers, - params_->triton_server_path, params_->model_repository_path, - params_->extra_verbose, params_->metrics_url, - params_->input_tensor_format, params_->output_tensor_format, - &factory), - "failed to create client factory"); - - FAIL_IF_ERR( - factory->CreateClientBackend(&backend_), - "failed to create triton client backend"); - - parser_ = std::make_shared(params_->kind); - if (params_->kind == cb::BackendKind::TRITON || - params_->kind == cb::BackendKind::TRITON_C_API) { - rapidjson::Document model_metadata; - FAIL_IF_ERR( - backend_->ModelMetadata( - &model_metadata, params_->model_name, params_->model_version), - "failed to get model metadata"); - rapidjson::Document model_config; - FAIL_IF_ERR( - backend_->ModelConfig( - &model_config, params_->model_name, params_->model_version), - "failed to get model config"); - - FAIL_IF_ERR( - parser_->InitTriton( - model_metadata, model_config, params_->model_version, - params_->bls_composing_models, params_->input_shapes, backend_), - "failed to create model parser"); - } else if (params_->kind == cb::BackendKind::OPENAI) { - FAIL_IF_ERR( - parser_->InitOpenAI( - params_->model_name, params_->model_version, params_->batch_size), - "failed to create model parser"); - } else if (params_->kind == cb::BackendKind::TENSORFLOW_SERVING) { - rapidjson::Document model_metadata; - FAIL_IF_ERR( - backend_->ModelMetadata( - &model_metadata, params_->model_name, params_->model_version), - "failed to get model metadata"); - FAIL_IF_ERR( - parser_->InitTFServe( - model_metadata, params_->model_name, params_->model_version, - params_->model_signature_name, params_->batch_size, - params_->input_shapes, backend_), - "failed to create model parser"); - } else if (params_->kind == cb::BackendKind::TORCHSERVE) { - FAIL_IF_ERR( - parser_->InitTorchServe( - params_->model_name, params_->model_version, params_->batch_size), - "failed to create model parser"); - } else { - std::cerr << "unsupported client backend kind" << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - - if ((parser_->MaxBatchSize() == 0) && params_->batch_size > 1) { - std::cerr << "can not specify batch size > 1 as the model does not support " - "batching" - << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - - // Change the default value for the --async option for sequential models - if ((parser_->SchedulerType() == pa::ModelParser::SEQUENCE) || - (parser_->SchedulerType() == pa::ModelParser::ENSEMBLE_SEQUENCE)) { - if (!params_->async) { - params_->async = params_->forced_sync ? false : true; - } - // Validate the batch_size specification - if (params_->batch_size > 1) { - std::cerr << "can not specify batch size > 1 when using a sequence model" - << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - } - - if (params_->streaming) { - if (params_->forced_sync) { - std::cerr << "can not use streaming with synchronous API" << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - params_->async = true; - } - - std::unique_ptr manager; - if (params_->targeting_concurrency()) { - if ((parser_->SchedulerType() == pa::ModelParser::SEQUENCE) || - (parser_->SchedulerType() == pa::ModelParser::ENSEMBLE_SEQUENCE)) { - if (params_->concurrency_range.end == pa::NO_LIMIT && params_->async) { - std::cerr << "The 'end' concurrency can not be 0 for sequence " - "models when using asynchronous API." - << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - } - params_->max_concurrency = std::max( - params_->concurrency_range.start, params_->concurrency_range.end); - - if (!params_->async) { - if (params_->concurrency_range.end == pa::NO_LIMIT) { - std::cerr - << "WARNING: The maximum attainable concurrency will be limited by " - "max_threads specification." - << std::endl; - params_->concurrency_range.end = params_->max_threads; - } else { - // As only one synchronous request can be generated from a thread at a - // time, to maintain the requested concurrency, that many threads need - // to be generated. - if (params_->max_threads_specified) { - std::cerr - << "WARNING: Overriding max_threads specification to ensure " - "requested concurrency range." - << std::endl; - } - params_->max_threads = std::max( - params_->concurrency_range.start, params_->concurrency_range.end); - } - } - if ((params_->sequence_id_range != 0) && - (params_->sequence_id_range < params_->max_concurrency)) { - std::cerr << "sequence id range specified is smaller than the " - << "maximum possible concurrency, sequence id collision may " - << "occur." << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - FAIL_IF_ERR( - pa::ConcurrencyManager::Create( - params_->async, params_->streaming, params_->batch_size, - params_->max_threads, params_->max_concurrency, - params_->shared_memory_type, params_->output_shm_size, parser_, - factory, &manager, params_->request_parameters), - "failed to create concurrency manager"); - - } else if (params_->is_using_periodic_concurrency_mode) { - manager = std::make_unique( - params_->async, params_->streaming, params_->batch_size, - params_->max_threads, params_->max_concurrency, - params_->shared_memory_type, params_->output_shm_size, parser_, factory, - params_->periodic_concurrency_range, params_->request_period, - params_->request_parameters); - } else if (params_->using_request_rate_range) { - if ((params_->sequence_id_range != 0) && - (params_->sequence_id_range < params_->num_of_sequences)) { - std::cerr - << "sequence id range specified is smaller than the " - << "maximum possible number of sequences, sequence id collision " - << "may occur." << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - FAIL_IF_ERR( - pa::RequestRateManager::Create( - params_->async, params_->streaming, params_->measurement_window_ms, - params_->max_trials, params_->request_distribution, - params_->batch_size, params_->max_threads, - params_->num_of_sequences, params_->shared_memory_type, - params_->output_shm_size, params_->serial_sequences, parser_, - factory, &manager, params_->request_parameters), - "failed to create request rate manager"); - - } else { - if ((params_->sequence_id_range != 0) && - (params_->sequence_id_range < params_->num_of_sequences)) { - std::cerr - << "sequence id range specified is smaller than the " - << "maximum possible number of sequences, sequence id collision " - << "may occur." << std::endl; - throw pa::PerfAnalyzerException(pa::GENERIC_ERROR); - } - FAIL_IF_ERR( - pa::CustomLoadManager::Create( - params_->async, params_->streaming, params_->measurement_window_ms, - params_->max_trials, params_->request_intervals_file, - params_->batch_size, params_->max_threads, - params_->num_of_sequences, params_->shared_memory_type, - params_->output_shm_size, params_->serial_sequences, parser_, - factory, &manager, params_->request_parameters), - "failed to create custom load manager"); - } - - manager->InitManager( - params_->string_length, params_->string_data, params_->zero_input, - params_->user_data, params_->start_sequence_id, - params_->sequence_id_range, params_->sequence_length, - params_->sequence_length_specified, params_->sequence_length_variation); - - FAIL_IF_ERR( - pa::ProfileDataCollector::Create(&collector_), - "failed to create profile data collector"); - - FAIL_IF_ERR( - pa::ProfileDataExporter::Create(&exporter_), - "failed to create profile data exporter"); - - FAIL_IF_ERR( - pa::InferenceProfiler::Create( - params_->verbose, params_->stability_threshold, - params_->measurement_window_ms, params_->max_trials, - params_->percentile, params_->latency_threshold_ms, params_->protocol, - parser_, std::move(backend_), std::move(manager), &profiler_, - params_->measurement_request_count, params_->measurement_mode, - params_->mpi_driver, params_->metrics_interval_ms, - params_->should_collect_metrics, params_->overhead_pct_threshold, - params_->async, collector_, !params_->profile_export_file.empty()), - "failed to create profiler"); -} - -void -PerfAnalyzer::PrerunReport() -{ - std::cout << "*** Measurement Settings ***" << std::endl; - if (params_->kind == cb::BackendKind::TRITON || params_->using_batch_size) { - std::cout << " Batch size: " << params_->batch_size << std::endl; - } - - std::cout << " Service Kind: " << BackendKindToString(params_->kind) - << std::endl; - - if (params_->request_count != 0) { - std::cout << " Sending a total of " << params_->request_count - << " requests" << std::endl; - } else { - if (params_->measurement_mode == pa::MeasurementMode::COUNT_WINDOWS) { - std::cout << " Using \"count_windows\" mode for stabilization" - << std::endl; - } else { - std::cout << " Using \"time_windows\" mode for stabilization" - << std::endl; - } - - std::string stabilization_metric = "latency and throughput"; - if (params_->async) { - stabilization_metric = "throughput"; - } - if (params_->percentile == -1) { - std::cout << " Stabilizing using average " << stabilization_metric - << std::endl; - } else { - std::cout << " Stabilizing using p" << params_->percentile - << stabilization_metric << std::endl; - } - - if (params_->measurement_mode == pa::MeasurementMode::TIME_WINDOWS) { - std::cout << " Measurement window: " << params_->measurement_window_ms - << " msec" << std::endl; - } else if ( - params_->measurement_mode == pa::MeasurementMode::COUNT_WINDOWS) { - std::cout << " Minimum number of samples in each window: " - << params_->measurement_request_count << std::endl; - } - } - - if (params_->concurrency_range.end != 1) { - std::cout << " Latency limit: " << params_->latency_threshold_ms << " msec" - << std::endl; - if (params_->concurrency_range.end != pa::NO_LIMIT) { - std::cout << " Concurrency limit: " - << std::max( - params_->concurrency_range.start, - params_->concurrency_range.end) - << " concurrent requests" << std::endl; - } - } - if (params_->request_rate_range[pa::SEARCH_RANGE::kEND] != 1.0) { - std::cout << " Latency limit: " << params_->latency_threshold_ms << " msec" - << std::endl; - if (params_->request_rate_range[pa::SEARCH_RANGE::kEND] != - static_cast(pa::NO_LIMIT)) { - std::cout << " Request Rate limit: " - << std::max( - params_->request_rate_range[pa::SEARCH_RANGE::kSTART], - params_->request_rate_range[pa::SEARCH_RANGE::kEND]) - << " requests per seconds" << std::endl; - } - } - if (params_->using_request_rate_range) { - if (params_->request_distribution == pa::Distribution::POISSON) { - std::cout << " Using poisson distribution on request generation" - << std::endl; - } else { - std::cout << " Using uniform distribution on request generation" - << std::endl; - } - } - if (params_->search_mode == pa::SearchMode::BINARY) { - std::cout << " Using Binary Search algorithm" << std::endl; - } - if (params_->async) { - std::cout << " Using asynchronous calls for inference" << std::endl; - } else { - std::cout << " Using synchronous calls for inference" << std::endl; - } - if (parser_->IsDecoupled()) { - std::cout << " Detected decoupled model, using the first response for " - "measuring latency" - << std::endl; - } - - std::cout << std::endl; -} - -void -PerfAnalyzer::Profile() -{ - params_->mpi_driver->MPIBarrierWorld(); - - cb::Error err; - if (params_->targeting_concurrency()) { - err = profiler_->Profile( - params_->concurrency_range.start, params_->concurrency_range.end, - params_->concurrency_range.step, params_->search_mode, - params_->request_count, perf_statuses_); - } else if (params_->is_using_periodic_concurrency_mode) { - err = profiler_->ProfilePeriodicConcurrencyMode(); - } else { - err = profiler_->Profile( - params_->request_rate_range[pa::SEARCH_RANGE::kSTART], - params_->request_rate_range[pa::SEARCH_RANGE::kEND], - params_->request_rate_range[pa::SEARCH_RANGE::kSTEP], - params_->search_mode, params_->request_count, perf_statuses_); - } - - params_->mpi_driver->MPIBarrierWorld(); - - if (!err.IsOk()) { - std::cerr << err; - // In the case of early_exit, the thread does not return and continues to - // report the summary - if (!pa::early_exit) { - throw pa::PerfAnalyzerException(err.Err()); - } - } -} - -void -PerfAnalyzer::WriteReport() -{ - if (!perf_statuses_.size() || params_->is_using_periodic_concurrency_mode) { - return; - } - - // Can print more depending on verbose, but it seems too much information - std::cout << "Inferences/Second vs. Client "; - if (params_->percentile == -1) { - std::cout << "Average Batch Latency" << std::endl; - } else { - std::cout << "p" << params_->percentile << " Batch Latency" << std::endl; - } - - for (pa::PerfStatus& status : perf_statuses_) { - if (params_->targeting_concurrency()) { - std::cout << "Concurrency: " << status.concurrency << ", "; - } else { - std::cout << "Request Rate: " << status.request_rate << ", "; - } - std::cout << "throughput: " << status.client_stats.infer_per_sec - << " infer/sec, latency " - << (status.stabilizing_latency_ns / 1000) << " usec" << std::endl; - } - - bool should_output_metrics{ - params_->should_collect_metrics && params_->verbose_csv}; - - std::unique_ptr writer; - - FAIL_IF_ERR( - pa::ReportWriter::Create( - params_->filename, params_->targeting_concurrency(), perf_statuses_, - params_->verbose_csv, profiler_->IncludeServerStats(), - params_->percentile, parser_, &writer, should_output_metrics), - "failed to create report writer"); - - writer->GenerateReport(); -} - -void -PerfAnalyzer::GenerateProfileExport() -{ - if (!params_->profile_export_file.empty()) { - exporter_->Export( - collector_->GetData(), collector_->GetVersion(), - params_->profile_export_file, params_->kind, params_->endpoint); - } -} - -void -PerfAnalyzer::Finalize() -{ - params_->mpi_driver->MPIFinalize(); -} diff --git a/src/c++/perf_analyzer/perf_analyzer.h b/src/c++/perf_analyzer/perf_analyzer.h deleted file mode 100644 index b75fe35f0..000000000 --- a/src/c++/perf_analyzer/perf_analyzer.h +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include - -#include - -#include "command_line_parser.h" -#include "concurrency_manager.h" -#include "custom_load_manager.h" -#include "inference_profiler.h" -#include "model_parser.h" -#include "mpi_utils.h" -#include "perf_utils.h" -#include "profile_data_collector.h" -#include "profile_data_exporter.h" - -// Perf Analyzer provides various metrics to measure the performance of -// the inference server. It can either be used to measure the throughput, -// latency and time distribution under specific setting (i.e. fixed batch size -// and fixed concurrent requests), or be used to generate throughput-latency -// data point under dynamic setting (i.e. collecting throughput-latency data -// under different load level). -// -// The following data is collected and used as part of the metrics: -// - Throughput (infer/sec): -// The number of inference processed per second as seen by the analyzer. -// The number of inference is measured by the multiplication of the number -// of requests and their batch size. And the total time is the time elapsed -// from when the analyzer starts sending requests to when it received -// all responses. -// - Latency (usec): -// The average elapsed time between when a request is sent and -// when the response for the request is received. If 'percentile' flag is -// specified, the selected percentile value will be reported instead of -// average value. -// -// Perf Analyzer determines the stability of throughput and latency by observing -// measurements in different trials. If the latency and throughput, are within -// the stability percentage (see --stability-percentage option) Perf Analyzer -// will report the average of the throughput and latency numbers observed in the -// last three trials. All the measurements gathered during the last three trials -// is aggregated to generate a single report. The number of total requests is -// the sum of all the requests in the individual measurement windows. -// -// There are broadly three ways to load server for the data collection using -// perf_analyzer: -// - Maintaining Target Concurrency: -// In this setting, the analyzer will maintain a target number of concurrent -// requests sent to the server (see --concurrency-range option) while -// taking measurements. -// The number of requests will be the total number of requests sent within -// the time interval for measurement (see --measurement-interval option) and -// the latency will be the average latency across all requests. -// -// Besides throughput and latency, which is measured on client side, -// the following data measured by the server will also be reported -// in this setting: -// - Concurrent request: the number of concurrent requests as specified -// in --concurrency-range option. Note, for running perf analyzer for -// a single concurrency, user must specify --concurrency-range -// <'start'>, omitting 'end' and 'step' values. -// - Batch size: the batch size of each request as specified in -b option -// - Inference count: batch size * number of inference requests -// - Cumulative time: the total time between request received and -// response sent on the requests sent by perf analyzer. -// - Average Cumulative time: cumulative time / number of inference requests -// - Compute time: the total time it takes to run inferencing including time -// copying input tensors to GPU memory, time executing the model, -// and time copying output tensors from GPU memory for the requests -// sent by perf analyzer. -// - Average compute time: compute time / number of inference requests -// - Queue time: the total time it takes to wait for an available model -// instance for the requests sent by perf analyzer. -// - Average queue time: queue time / number of inference requests -// If all fields of --concurrency-range are specified, the analyzer will -// perform the following procedure: -// 1. Follows the procedure in fixed concurrent request mode using -// k concurrent requests (k starts at 'start'). -// 2. Gathers data reported from step 1. -// 3. Increases k by 'step' and repeats step 1 and 2 until latency from -// current iteration exceeds latency threshold (see --latency-threshold -// option) or concurrency level reaches 'end'. Note, by setting -// --latency-threshold or 'end' to 0 the effect of each threshold can -// be removed. However, both can not be 0 simultaneously. -// At each iteration, the data mentioned in fixed concurrent request mode -// will be reported. Besides that, after the procedure above, a collection -// of "throughput, latency, concurrent request count" tuples will be -// reported in increasing load level order. -// -// - Maintaining Target Request Rate: -// This mode is enabled only when --request-rate-range option is specified. -// Unlike above, here the analyzer will try to maintain a target rate of -// requests issued to the server while taking measurements. Rest of the -// behaviour of analyzer is identical as above. It is important to note that -// even though over a sufficiently large interval the rate of requests -// will tend to the target request rate, the actual request rate for a small -// time interval will depend upon the selected request distribution -// (--request-distribution). For 'constant' request distribution the time -// interval between successive requests is maintained to be constant, hence -// request rate is constant over time. However, 'poisson' request -// distribution varies the time interval between successive requests such -// that there are periods of bursts and nulls in request generation. -// Additionally, 'poisson' distribution mimics the real-world traffic and -// can be used to obtain measurements for a realistic-load. -// With each request-rate, the analyzer also reports the 'Delayed Request -// Count' which gives an idea of how many requests missed their schedule as -// specified by the distribution. Users can use --max-threads to increase -// the number of threads which might help in dispatching requests as per -// the schedule. Also note that a very large number of threads might be -// counter-productive with most of the time being spent on context-switching -// the threads. -// -// - Following User Provided Request Delivery Schedule: -// This mode is enabled only when --request-intervals option is specified. -// In this case, analyzer will try to dispatch the requests to the server -// with time intervals between successive requests specified in a user -// provided file. This file should contain time intervals in microseconds in -// each new line. Analyzer will loop around the values to produce a -// consistent load for measurements. Once, the readings are stabilized then -// the final statistics will be reported. The statistics will include -// 'Delayed Request Count' for the requests that missed their schedule. As -// described before, users can tune --max-threads to allow analyzer in -// keeping up with the schedule. This mode will help user in analyzing the -// performance of the server under different custom settings which may be of -// interest. -// -// By default, perf_analyzer will maintain target concurrency while measuring -// the performance. -// -// Options: -// -b: batch size for each request sent. -// --concurrency-range: The range of concurrency levels perf_analyzer will use. -// A concurrency level indicates the number of concurrent requests in queue. -// --request-rate-range: The range of request rates perf_analyzer will use to -// load the server. -// --request-intervals: File containing time intervals (in microseconds) to use -// between successive requests. -// --latency-threshold: latency threshold in msec. -// --measurement-interval: time interval for each measurement window in msec. -// --async: Enables Asynchronous inference calls. -// --binary-search: Enables binary search within the specified range. -// --request-distribution: Allows user to specify the distribution for selecting -// the time intervals between the request dispatch. -// -// For detail of the options not listed, please refer to the usage. -// -class PerfAnalyzer { - public: - PerfAnalyzer(pa::PAParamsPtr params); - virtual ~PerfAnalyzer(){}; - - // Main runner function for Perf Analyzer. - void Run(); - - private: - pa::PAParamsPtr params_; - std::unique_ptr profiler_; - std::unique_ptr backend_; - std::shared_ptr parser_; - std::vector perf_statuses_; - std::shared_ptr collector_; - std::shared_ptr exporter_; - - // - // Helper methods - // - - // Parse the options out of the command line argument - // - void CreateAnalyzerObjects(); - void PrerunReport(); - void Profile(); - void WriteReport(); - void GenerateProfileExport(); - void Finalize(); -}; diff --git a/src/c++/perf_analyzer/perf_analyzer_exception.h b/src/c++/perf_analyzer/perf_analyzer_exception.h deleted file mode 100644 index a0b8ae708..000000000 --- a/src/c++/perf_analyzer/perf_analyzer_exception.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -#pragma once - -#include -#include - -namespace triton { namespace perfanalyzer { - -// Perf Exception error class -// -class PerfAnalyzerException : public std::exception { - public: - PerfAnalyzerException(uint32_t error) : error_(error) {} - - PerfAnalyzerException(const std::string& message, uint32_t error) - : message_(message), error_(error) - { - } - - virtual const char* what() const throw() { return message_.c_str(); } - - inline int GetError() const { return error_; } - - private: - const std::string message_{""}; - uint32_t error_; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/perf_analyzer_unit_tests.cc b/src/c++/perf_analyzer/perf_analyzer_unit_tests.cc deleted file mode 100644 index bcc78fdd5..000000000 --- a/src/c++/perf_analyzer/perf_analyzer_unit_tests.cc +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file exists to hold a macro-expanded main function for the unit test -// runner executable. -// -// The old contents of main.cc are needed for the unit test runner to compile, -// but since two main functions cannot be compiled in the same executable, the -// contents of the old main.cc were moved to a new file/class, which are now -// included in the compilation of the unit test runner executable. -// -// The new contents of main.cc just include the new file/class mentioned above -// and run the primary function from there in a simplified main function, which -// runs Perf Analyzer. -#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN -#include "doctest.h" diff --git a/src/c++/perf_analyzer/perf_utils.cc b/src/c++/perf_analyzer/perf_utils.cc deleted file mode 100644 index 6088c1b6b..000000000 --- a/src/c++/perf_analyzer/perf_utils.cc +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "perf_utils.h" - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "doctest.h" - -namespace triton { namespace perfanalyzer { - -cb::ProtocolType -ParseProtocol(const std::string& str) -{ - std::string protocol(str); - std::transform(protocol.begin(), protocol.end(), protocol.begin(), ::tolower); - if (protocol == "http") { - return cb::ProtocolType::HTTP; - } else if (protocol == "grpc") { - return cb::ProtocolType::GRPC; - } - return cb::ProtocolType::UNKNOWN; -} - -cb::Error -ConvertDTypeFromTFS(const std::string& tf_dtype, std::string* datatype) -{ - if (tf_dtype == "DT_HALF") { - *datatype = "FP16"; - } else if (tf_dtype == "DT_BFLOAT16") { - *datatype = "BF16"; - } else if (tf_dtype == "DT_FLOAT") { - *datatype = "FP32"; - } else if (tf_dtype == "DT_DOUBLE") { - *datatype = "FP64"; - } else if (tf_dtype == "DT_INT32") { - *datatype = "INT32"; - } else if (tf_dtype == "DT_INT16") { - *datatype = "INT16"; - } else if (tf_dtype == "DT_UINT16") { - *datatype = "UINT16"; - } else if (tf_dtype == "DT_INT8") { - *datatype = "INT8"; - } else if (tf_dtype == "DT_UINT8") { - *datatype = "UINT8"; - } else if (tf_dtype == "DT_STRING") { - *datatype = "BYTES"; - } else if (tf_dtype == "DT_INT64") { - *datatype = "INT64"; - } else if (tf_dtype == "DT_BOOL") { - *datatype = "BOOL"; - } else if (tf_dtype == "DT_UINT32") { - *datatype = "UINT32"; - } else if (tf_dtype == "DT_UINT64") { - *datatype = "UINT64"; - } else { - return cb::Error( - "unsupported datatype encountered " + tf_dtype, pa::GENERIC_ERROR); - } - - return cb::Error::Success; -} - -bool -IsDirectory(const std::string& path) -{ - struct stat s; - if (stat(path.c_str(), &s) == 0 && (s.st_mode & S_IFDIR)) { - return true; - } else { - return false; - } -} - -bool -IsFile(const std::string& complete_path) -{ - struct stat s; - if (stat(complete_path.c_str(), &s) == 0 && (s.st_mode & S_IFREG)) { - return true; - } else { - return false; - } -} - -int64_t -ByteSize(const std::vector& shape, const std::string& datatype) -{ - int one_element_size; - if ((datatype.compare("BOOL") == 0) || (datatype.compare("INT8") == 0) || - (datatype.compare("UINT8") == 0)) { - one_element_size = 1; - } else if ( - (datatype.compare("INT16") == 0) || (datatype.compare("UINT16") == 0) || - (datatype.compare("FP16") == 0) || (datatype.compare("BF16") == 0)) { - one_element_size = 2; - } else if ( - (datatype.compare("INT32") == 0) || (datatype.compare("UINT32") == 0) || - (datatype.compare("FP32") == 0)) { - one_element_size = 4; - } else if ( - (datatype.compare("INT64") == 0) || (datatype.compare("UINT64") == 0) || - (datatype.compare("FP64") == 0)) { - one_element_size = 8; - } else { - return -1; - } - - int64_t count = ElementCount(shape); - if (count < 0) { - return count; - } - - return (one_element_size * count); -} - -int64_t -ElementCount(const std::vector& shape) -{ - int64_t count = 1; - bool is_dynamic = false; - for (const auto dim : shape) { - if (dim == -1) { - is_dynamic = true; - } else { - count *= dim; - } - } - - if (is_dynamic) { - count = -1; - } - return count; -} - -void -SerializeStringTensor( - std::vector string_tensor, std::vector* serialized_data) -{ - std::string serialized = ""; - for (auto s : string_tensor) { - uint32_t len = s.size(); - serialized.append(reinterpret_cast(&len), sizeof(uint32_t)); - serialized.append(s); - } - - std::copy( - serialized.begin(), serialized.end(), - std::back_inserter(*serialized_data)); -} - -cb::Error -SerializeExplicitTensor( - const rapidjson::Value& tensor, const std::string& dt, - std::vector* decoded_data) -{ - if (dt.compare("BYTES") == 0) { - std::string serialized = ""; - for (const auto& value : tensor.GetArray()) { - if (!value.IsString()) { - return cb::Error( - "unable to find string data in json", pa::GENERIC_ERROR); - } - std::string element(value.GetString()); - uint32_t len = element.size(); - serialized.append(reinterpret_cast(&len), sizeof(uint32_t)); - serialized.append(element); - } - std::copy( - serialized.begin(), serialized.end(), - std::back_inserter(*decoded_data)); - } else if (dt.compare("JSON") == 0) { - std::string serialized = ""; - - auto values = tensor.GetArray(); - if (values.Size() != 1) { - return cb::Error( - "JSON format does not yet support multiple json objects in the " - "input"); - } - for (const auto& value : values) { - rapidjson::StringBuffer buffer; - rapidjson::Writer writer(buffer); - value.Accept(writer); - - std::string element = buffer.GetString(); - uint32_t len = element.size(); - serialized.append(element); - } - std::copy( - serialized.begin(), serialized.end(), - std::back_inserter(*decoded_data)); - } else { - for (const auto& value : tensor.GetArray()) { - if (dt.compare("BOOL") == 0) { - if (!value.IsBool()) { - return cb::Error( - "unable to find bool data in json", pa::GENERIC_ERROR); - } - bool element(value.GetBool()); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(bool)); - } else if (dt.compare("UINT8") == 0) { - if (!value.IsUint()) { - return cb::Error( - "unable to find uint8_t data in json", pa::GENERIC_ERROR); - } - uint8_t element(static_cast(value.GetUint())); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(uint8_t)); - } else if (dt.compare("INT8") == 0) { - if (!value.IsInt()) { - return cb::Error( - "unable to find int8_t data in json", pa::GENERIC_ERROR); - } - int8_t element(static_cast(value.GetInt())); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(int8_t)); - } else if (dt.compare("UINT16") == 0) { - if (!value.IsUint()) { - return cb::Error( - "unable to find uint16_t data in json", pa::GENERIC_ERROR); - } - uint16_t element(static_cast(value.GetUint())); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(uint16_t)); - } else if (dt.compare("INT16") == 0) { - if (!value.IsInt()) { - return cb::Error( - "unable to find int16_t data in json", pa::GENERIC_ERROR); - } - int16_t element(static_cast(value.GetInt())); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(int16_t)); - } else if (dt.compare("FP16") == 0) { - return cb::Error( - "Can not use explicit tensor description for fp16 datatype", - pa::GENERIC_ERROR); - } else if (dt.compare("BF16") == 0) { - return cb::Error( - "Can not use explicit tensor description for bf16 datatype", - pa::GENERIC_ERROR); - } else if (dt.compare("UINT32") == 0) { - if (!value.IsUint()) { - return cb::Error( - "unable to find uint32_t data in json", pa::GENERIC_ERROR); - } - uint32_t element(value.GetUint()); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(uint32_t)); - } else if (dt.compare("INT32") == 0) { - if (!value.IsInt()) { - return cb::Error( - "unable to find int32_t data in json", pa::GENERIC_ERROR); - } - int32_t element(value.GetInt()); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(int32_t)); - } else if (dt.compare("FP32") == 0) { - if (!value.IsDouble()) { - return cb::Error( - "unable to find float data in json", pa::GENERIC_ERROR); - } - float element(value.GetFloat()); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(float)); - } else if (dt.compare("UINT64") == 0) { - if (!value.IsUint64()) { - return cb::Error( - "unable to find uint64_t data in json", pa::GENERIC_ERROR); - } - uint64_t element(value.GetUint64()); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(uint64_t)); - } else if (dt.compare("INT64") == 0) { - if (!value.IsInt64()) { - return cb::Error( - "unable to find int64_t data in json", pa::GENERIC_ERROR); - } - int64_t element(value.GetInt64()); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(int64_t)); - } else if (dt.compare("FP64") == 0) { - if (!value.IsDouble()) { - return cb::Error( - "unable to find fp64 data in json", pa::GENERIC_ERROR); - } - double element(value.GetDouble()); - const char* src = reinterpret_cast(&element); - decoded_data->insert(decoded_data->end(), src, src + sizeof(double)); - } else { - return cb::Error("Unexpected type " + dt); - } - } - } - return cb::Error::Success; -} - -std::string -GetRandomString(const int string_length) -{ - std::mt19937_64 gen{std::random_device()()}; - std::uniform_int_distribution dist{0, character_set.length() - 1}; - std::string random_string; - std::generate_n(std::back_inserter(random_string), string_length, [&] { - return character_set[dist(gen)]; - }); - return random_string; -} - -std::string -ShapeVecToString(const std::vector shape_vec, bool skip_first) -{ - bool first = true; - std::string str("["); - for (const auto& value : shape_vec) { - if (skip_first) { - skip_first = false; - continue; - } - if (!first) { - str += ","; - } - str += std::to_string(value); - first = false; - } - - str += "]"; - return str; -} - -std::string -TensorToRegionName(std::string name) -{ - // Remove slashes from the name, if any. - name.erase( - std::remove_if( - name.begin(), name.end(), - [](const char& c) { return ((c == '/') || (c == '\\')); }), - name.end()); - return name; -} - -template <> -std::function -ScheduleDistribution(const double request_rate) -{ - std::exponential_distribution<> dist = - std::exponential_distribution<>(request_rate); - return [dist](std::mt19937& gen) mutable { - return std::chrono::duration_cast( - std::chrono::duration(dist(gen))); - }; -} - -template <> -std::function -ScheduleDistribution(const double request_rate) -{ - std::chrono::nanoseconds period = - std::chrono::duration_cast( - std::chrono::duration(1.0 / request_rate)); - return [period](std::mt19937& /*gen*/) { return period; }; -} - -cb::TensorFormat -ParseTensorFormat(const std::string& content_type_str) -{ - std::string content_type_str_lowercase{content_type_str}; - std::transform( - content_type_str.cbegin(), content_type_str.cend(), - content_type_str_lowercase.begin(), - [](unsigned char c) { return std::tolower(c); }); - if (content_type_str_lowercase == "binary") { - return cb::TensorFormat::BINARY; - } else if (content_type_str_lowercase == "json") { - return cb::TensorFormat::JSON; - } else { - return cb::TensorFormat::UNKNOWN; - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/perf_utils.h b/src/c++/perf_analyzer/perf_utils.h deleted file mode 100644 index 6975d694b..000000000 --- a/src/c++/perf_analyzer/perf_utils.h +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "client_backend/client_backend.h" - -namespace pa = triton::perfanalyzer; -namespace cb = triton::perfanalyzer::clientbackend; - -namespace triton { namespace perfanalyzer { - -constexpr uint64_t NANOS_PER_SECOND = 1000000000; -constexpr uint64_t NANOS_PER_MILLIS = 1000000; -#define CHRONO_TO_NANOS(TS) \ - (std::chrono::duration_cast(TS.time_since_epoch()) \ - .count()) -#define CHRONO_TO_MILLIS(TS) (CHRONO_TO_NANOS(TS) / pa::NANOS_PER_MILLIS) - -//============================================================================== - -// Will use the characters specified here to construct random strings -std::string const character_set = - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890 .?!"; - -// A boolean flag to mark an interrupt and commencement of early exit -extern volatile bool early_exit; - -enum Distribution { POISSON = 0, CONSTANT = 1, CUSTOM = 2 }; -enum SearchMode { LINEAR = 0, BINARY = 1, NONE = 2 }; -enum SharedMemoryType { - SYSTEM_SHARED_MEMORY = 0, - CUDA_SHARED_MEMORY = 1, - NO_SHARED_MEMORY = 2 -}; - -constexpr uint64_t NO_LIMIT = 0; - -// Templated range class that tracks the start, stop, and step for a range. -// -template -class Range { - public: - Range(T start, T end, T step) : start(start), end(end), step(step) {} - - T start; - T end; - T step; -}; - -// Converts the datatype from tensorflow to perf analyzer space -// \param tf_dtype The data type string returned from the model metadata. -// \param datatype Returns the datatype in perf_analyzer space. -// \return error status. Returns Non-Ok if an error is encountered during -// read operation. -cb::Error ConvertDTypeFromTFS( - const std::string& tf_dtype, std::string* datatype); - -// Parse the communication protocol type -cb::ProtocolType ParseProtocol(const std::string& str); - -// To check whether the path points to a valid system directory -bool IsDirectory(const std::string& path); - -// To check whether the path points to a valid system file -bool IsFile(const std::string& complete_path); - -// Calculates the byte size tensor for given shape and datatype. -int64_t ByteSize( - const std::vector& shape, const std::string& datatype); - -// Get the number of elements in the tensor for given shape. -int64_t ElementCount(const std::vector& shape); - -// Serializes the string tensor to length prepended bytes. -void SerializeStringTensor( - std::vector string_tensor, std::vector* serialized_data); - -// Serializes an explicit tensor read from the data file to the -// raw bytes. -cb::Error SerializeExplicitTensor( - const rapidjson::Value& tensor, const std::string& dt, - std::vector* decoded_data); - -// Generates a random string of specified length using characters specified in -// character_set. -std::string GetRandomString(const int string_length); - -// Returns the shape string containing the values provided in the vector -std::string ShapeVecToString( - const std::vector shape_vec, bool skip_first = false); - -// Remove slashes from tensor name, if any -std::string TensorToRegionName(std::string name); - -// Returns the request schedule distribution generator with the specified -// request rate. -template -std::function ScheduleDistribution( - const double request_rate); - -// Parse the HTTP tensor format -cb::TensorFormat ParseTensorFormat(const std::string& tensor_format_str); - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/periodic_concurrency_manager.cc b/src/c++/perf_analyzer/periodic_concurrency_manager.cc deleted file mode 100644 index a8375ed65..000000000 --- a/src/c++/perf_analyzer/periodic_concurrency_manager.cc +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "periodic_concurrency_manager.h" - -namespace triton { namespace perfanalyzer { - -std::vector -PeriodicConcurrencyManager::RunExperiment() -{ - AddConcurrentRequests(concurrency_range_.start); - WaitForRequestsToFinish(); - return GetRequestRecords(); -} - -std::shared_ptr -PeriodicConcurrencyManager::MakeWorker( - std::shared_ptr thread_stat, - std::shared_ptr thread_config) -{ - uint32_t id = workers_.size(); - auto worker = std::make_shared( - id, thread_stat, thread_config, parser_, data_loader_, factory_, - on_sequence_model_, async_, max_concurrency_, using_json_data_, - streaming_, batch_size_, wake_signal_, wake_mutex_, active_threads_, - execute_, infer_data_manager_, sequence_manager_, request_period_, - period_completed_callback_, request_completed_callback_); - return worker; -}; - -void -PeriodicConcurrencyManager::AddConcurrentRequests( - uint64_t num_concurrent_requests) -{ - for (size_t i = 0; i < num_concurrent_requests; i++) { - AddConcurrentRequest(i); - } - num_incomplete_periods_ = num_concurrent_requests; -} - -void -PeriodicConcurrencyManager::AddConcurrentRequest(size_t seq_stat_index_offset) -{ - threads_stat_.emplace_back(std::make_shared()); - threads_config_.emplace_back( - std::make_shared(threads_config_.size())); - threads_config_.back()->concurrency_ = 1; - threads_config_.back()->seq_stat_index_offset_ = seq_stat_index_offset; - workers_.emplace_back( - MakeWorker(threads_stat_.back(), threads_config_.back())); - threads_.emplace_back(&IWorker::Infer, workers_.back()); - active_threads_++; -} - -void -PeriodicConcurrencyManager::PeriodCompletedCallback() -{ - std::lock_guard lock(period_completed_callback_mutex_); - num_incomplete_periods_--; - if (num_incomplete_periods_ == 0) { - steps_completed_++; - uint64_t num_requests_sent{steps_completed_ * concurrency_range_.step}; - if (num_requests_sent < concurrency_range_.end) { - AddConcurrentRequests(concurrency_range_.step); - } - } -} - -void -PeriodicConcurrencyManager::RequestCompletedCallback() -{ - std::lock_guard lock(request_completed_callback_mutex_); - num_completed_requests_++; - if (num_completed_requests_ == concurrency_range_.end) { - all_requests_completed_promise_.set_value(true); - } -} - -void -PeriodicConcurrencyManager::WaitForRequestsToFinish() -{ - std::future all_requests_completed_future{ - all_requests_completed_promise_.get_future()}; - all_requests_completed_future.get(); -} - -std::vector -PeriodicConcurrencyManager::GetRequestRecords() -{ - std::vector request_records{}; - for (const auto& thread_stat : threads_stat_) { - request_records.insert( - request_records.end(), thread_stat->request_records_.cbegin(), - thread_stat->request_records_.cend()); - } - return request_records; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/periodic_concurrency_manager.h b/src/c++/perf_analyzer/periodic_concurrency_manager.h deleted file mode 100644 index 40a0634b4..000000000 --- a/src/c++/perf_analyzer/periodic_concurrency_manager.h +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "concurrency_manager.h" -#include "periodic_concurrency_worker.h" - -namespace triton { namespace perfanalyzer { - -/// @brief Concurrency manager for periodically increasing concurrency by a step -/// amount based on the number of responses received (request period) by the -/// latest N (step or start concurrency for first-issued concurrent requests) -/// concurrent requests/workers. -class PeriodicConcurrencyManager : public ConcurrencyManager { - public: - PeriodicConcurrencyManager( - const bool async, const bool streaming, const int32_t batch_size, - const size_t max_threads, const size_t max_concurrency, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const Range concurrency_range, const uint64_t request_period, - const std::unordered_map& - request_parameters) - : ConcurrencyManager( - async, streaming, batch_size, max_threads, max_concurrency, - shared_memory_type, output_shm_size, parser, factory, - request_parameters), - concurrency_range_(concurrency_range), request_period_(request_period) - { - } - - std::vector RunExperiment(); - - private: - std::shared_ptr MakeWorker( - std::shared_ptr thread_stat, - std::shared_ptr thread_config) override; - - void AddConcurrentRequests(uint64_t num_concurrent_requests); - - void AddConcurrentRequest(size_t seq_stat_index_offset); - - void PeriodCompletedCallback(); - - void RequestCompletedCallback(); - - void WaitForRequestsToFinish(); - - std::vector GetRequestRecords(); - - Range concurrency_range_{1, 1, 1}; - uint64_t request_period_{0}; - uint64_t steps_completed_{0}; - uint64_t num_incomplete_periods_{0}; - uint64_t num_completed_requests_{0}; - std::mutex period_completed_callback_mutex_{}; - std::mutex request_completed_callback_mutex_{}; - std::promise all_requests_completed_promise_{}; - std::function period_completed_callback_{ - std::bind(&PeriodicConcurrencyManager::PeriodCompletedCallback, this)}; - std::function request_completed_callback_{ - std::bind(&PeriodicConcurrencyManager::RequestCompletedCallback, this)}; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/periodic_concurrency_worker.cc b/src/c++/perf_analyzer/periodic_concurrency_worker.cc deleted file mode 100644 index 9af3a9d87..000000000 --- a/src/c++/perf_analyzer/periodic_concurrency_worker.cc +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "periodic_concurrency_worker.h" - -namespace triton { namespace perfanalyzer { - -void -PeriodicConcurrencyWorker::Infer() -{ - CreateCtxIdTracker(); - ReserveContexts(); - RunInference(); -} - -std::shared_ptr -PeriodicConcurrencyWorker::CreateInferContext() -{ - std::shared_ptr infer_context{std::make_shared( - id_, ctxs_.size(), async_, streaming_, on_sequence_model_, - using_json_data_, batch_size_, thread_stat_, data_loader_, parser_, - factory_, execute_, infer_data_manager_, sequence_manager_)}; - infer_context->RegisterWorkerCallback(worker_callback_); - return infer_context; -} - -void -PeriodicConcurrencyWorker::WorkerCallback(uint32_t infer_context_id) -{ - if (ctxs_.at(infer_context_id)->GetNumResponsesForCurrentRequest() == - request_period_) { - period_completed_callback_(); - } - if (ctxs_.at(infer_context_id)->HasReceivedFinalResponse()) { - bool has_not_completed_period{ - ctxs_.at(infer_context_id)->GetNumResponsesForCurrentRequest() < - request_period_}; - if (has_not_completed_period) { - throw std::runtime_error( - "Request received final response before request period was reached. " - "Request period must be at most the total number of responses " - "received by any request."); - } - request_completed_callback_(); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/periodic_concurrency_worker.h b/src/c++/perf_analyzer/periodic_concurrency_worker.h deleted file mode 100644 index 7242219b9..000000000 --- a/src/c++/perf_analyzer/periodic_concurrency_worker.h +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "concurrency_worker.h" - -namespace triton { namespace perfanalyzer { - -/// @brief Worker class for periodic concurrency mode. Issues one request only -/// and waits for all responses to come in. Notifies manager when N responses -/// (request period) have been received. Notifies manager when final response -/// has been received. -class PeriodicConcurrencyWorker : public ConcurrencyWorker { - public: - PeriodicConcurrencyWorker( - uint32_t id, std::shared_ptr thread_stat, - std::shared_ptr thread_config, - const std::shared_ptr parser, - std::shared_ptr data_loader, - const std::shared_ptr factory, - const bool on_sequence_model, const bool async, - const size_t max_concurrency, const bool using_json_data, - const bool streaming, const int32_t batch_size, - std::condition_variable& wake_signal, std::mutex& wake_mutex, - size_t& active_threads, bool& execute, - const std::shared_ptr& infer_data_manager, - std::shared_ptr sequence_manager, - uint64_t request_period, std::function period_completed_callback, - std::function request_completed_callback) - : ConcurrencyWorker( - id, thread_stat, thread_config, parser, data_loader, factory, - on_sequence_model, async, max_concurrency, using_json_data, - streaming, batch_size, wake_signal, wake_mutex, active_threads, - execute, infer_data_manager, sequence_manager), - request_period_(request_period), - period_completed_callback_(period_completed_callback), - request_completed_callback_(request_completed_callback) - { - } - - void Infer() override; - - std::shared_ptr CreateInferContext() override; - - void WorkerCallback(uint32_t infer_context_id); - - private: - uint64_t request_period_{0}; - std::function period_completed_callback_{nullptr}; - std::function request_completed_callback_{nullptr}; - std::function worker_callback_{std::bind( - &PeriodicConcurrencyWorker::WorkerCallback, this, std::placeholders::_1)}; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/profile_data_collector.cc b/src/c++/perf_analyzer/profile_data_collector.cc deleted file mode 100644 index 8cca26a70..000000000 --- a/src/c++/perf_analyzer/profile_data_collector.cc +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "profile_data_collector.h" - -#include - -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -cb::Error -ProfileDataCollector::Create(std::shared_ptr* collector) -{ - std::shared_ptr local_collector{ - new ProfileDataCollector()}; - *collector = std::move(local_collector); - return cb::Error::Success; -} - -void -ProfileDataCollector::AddWindow( - InferenceLoadMode& id, uint64_t window_start_ns, uint64_t window_end_ns) -{ - auto it = FindExperiment(id); - - if (it == experiments_.end()) { - Experiment new_experiment{}; - new_experiment.mode = id; - new_experiment.window_boundaries.push_back(window_start_ns); - new_experiment.window_boundaries.push_back(window_end_ns); - - experiments_.push_back(new_experiment); - } else { - // Window timestamps are always increasing so it is safe to check only the - // last element - if (it->window_boundaries.back() != window_start_ns) { - it->window_boundaries.push_back(window_start_ns); - } - it->window_boundaries.push_back(window_end_ns); - } -} - -void -ProfileDataCollector::AddData( - InferenceLoadMode& id, std::vector&& request_records) -{ - auto it = FindExperiment(id); - - if (it == experiments_.end()) { - Experiment new_experiment{}; - new_experiment.mode = id; - new_experiment.requests = std::move(request_records); - experiments_.push_back(new_experiment); - } else { - it->requests.insert( - it->requests.end(), std::make_move_iterator(request_records.begin()), - std::make_move_iterator(request_records.end())); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/profile_data_collector.h b/src/c++/perf_analyzer/profile_data_collector.h deleted file mode 100644 index 3a726bbf4..000000000 --- a/src/c++/perf_analyzer/profile_data_collector.h +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "perf_utils.h" -#include "request_record.h" - -namespace triton { namespace perfanalyzer { - -/// Data structure to hold which inference load mode was used for an experiment. -/// Only one data member will be nonzero, indicating the inference load mode for -/// a particular experiment. -struct InferenceLoadMode { - uint32_t concurrency; - double request_rate; - - InferenceLoadMode() - { - concurrency = 0; - request_rate = 0.0; - } - - InferenceLoadMode(uint64_t c, double rr) - { - concurrency = c; - request_rate = rr; - } - - bool operator==(const InferenceLoadMode& rhs) const - { - return (concurrency == rhs.concurrency) && - (request_rate == rhs.request_rate); - } -}; - -/// Data structure to hold profile export data for an experiment (e.g. -/// concurrency 4 or request rate 50) -struct Experiment { - InferenceLoadMode mode; - std::vector requests; - std::vector window_boundaries; -}; - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockProfileDataCollector; -#endif - -/// Data structure and methods for storing profile export data. -class ProfileDataCollector { - public: - static cb::Error Create(std::shared_ptr* collector); - ~ProfileDataCollector() = default; - - - /// Add a measurement window to the collector - /// @param id Identifier for the experiment - /// @param window_start_ns The window start timestamp in nanoseconds. - /// @param window_end_ns The window end timestamp in nanoseconds. - void AddWindow( - InferenceLoadMode& id, uint64_t window_start_ns, uint64_t window_end_ns); - - /// Add request records to an experiment - /// @param id Identifier for the experiment - /// @param request_records The request information for the current experiment. - void AddData( - InferenceLoadMode& id, std::vector&& request_records); - - /// Get the experiment data for the profile - /// @return Experiment data - std::vector& GetData() { return experiments_; } - - std::string& GetVersion() { return version_; } - - private: - ProfileDataCollector() = default; - - virtual std::vector::iterator FindExperiment( - InferenceLoadMode& id) - { - return std::find_if( - experiments_.begin(), experiments_.end(), - [&id](const Experiment& e) { return e.mode == id; }); - }; - - std::vector experiments_{}; - std::string version_{VERSION}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockProfileDataCollector; -#endif -}; -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/profile_data_exporter.cc b/src/c++/perf_analyzer/profile_data_exporter.cc deleted file mode 100644 index ea79d6856..000000000 --- a/src/c++/perf_analyzer/profile_data_exporter.cc +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -#include "profile_data_exporter.h" - -#include -#include -#include - -#include "client_backend/client_backend.h" - -namespace triton { namespace perfanalyzer { - -cb::Error -ProfileDataExporter::Create(std::shared_ptr* exporter) -{ - std::shared_ptr local_exporter{ - new ProfileDataExporter()}; - *exporter = std::move(local_exporter); - return cb::Error::Success; -} - -void -ProfileDataExporter::Export( - const std::vector& raw_experiments, std::string& raw_version, - std::string& file_path, cb::BackendKind& service_kind, - std::string& endpoint) -{ - ConvertToJson(raw_experiments, raw_version, service_kind, endpoint); - OutputToFile(file_path); -} - -void -ProfileDataExporter::ConvertToJson( - const std::vector& raw_experiments, std::string& raw_version, - cb::BackendKind& service_kind, std::string& endpoint) -{ - ClearDocument(); - rapidjson::Value experiments(rapidjson::kArrayType); - - for (const auto& raw_experiment : raw_experiments) { - rapidjson::Value entry(rapidjson::kObjectType); - rapidjson::Value experiment(rapidjson::kObjectType); - rapidjson::Value requests(rapidjson::kArrayType); - rapidjson::Value window_boundaries(rapidjson::kArrayType); - - AddExperiment(entry, experiment, raw_experiment); - AddRequests(entry, requests, raw_experiment); - AddWindowBoundaries(entry, window_boundaries, raw_experiment); - - experiments.PushBack(entry, document_.GetAllocator()); - } - - document_.AddMember("experiments", experiments, document_.GetAllocator()); - AddVersion(raw_version); - AddServiceKind(service_kind); - AddEndpoint(endpoint); -} - -void -ProfileDataExporter::ClearDocument() -{ - rapidjson::Document d{}; - document_.Swap(d); - document_.SetObject(); -} - -void -ProfileDataExporter::AddExperiment( - rapidjson::Value& entry, rapidjson::Value& experiment, - const Experiment& raw_experiment) -{ - rapidjson::Value mode; - rapidjson::Value value; - if (raw_experiment.mode.concurrency != 0) { - mode = rapidjson::StringRef("concurrency"); - value.SetUint64(raw_experiment.mode.concurrency); - } else { - mode = rapidjson::StringRef("request_rate"); - value.SetDouble(raw_experiment.mode.request_rate); - } - experiment.AddMember("mode", mode, document_.GetAllocator()); - experiment.AddMember("value", value, document_.GetAllocator()); - entry.AddMember("experiment", experiment, document_.GetAllocator()); -} - -void -ProfileDataExporter::AddRequests( - rapidjson::Value& entry, rapidjson::Value& requests, - const Experiment& raw_experiment) -{ - for (auto& raw_request : raw_experiment.requests) { - rapidjson::Value request(rapidjson::kObjectType); - rapidjson::Value timestamp; - - timestamp.SetUint64(raw_request.start_time_.time_since_epoch().count()); - request.AddMember("timestamp", timestamp, document_.GetAllocator()); - - if (raw_request.sequence_id_ != 0) { - rapidjson::Value sequence_id; - sequence_id.SetUint64(raw_request.sequence_id_); - request.AddMember("sequence_id", sequence_id, document_.GetAllocator()); - } - - rapidjson::Value request_inputs(rapidjson::kObjectType); - AddRequestInputs(request_inputs, raw_request.request_inputs_); - request.AddMember( - "request_inputs", request_inputs, document_.GetAllocator()); - - rapidjson::Value response_timestamps(rapidjson::kArrayType); - AddResponseTimestamps( - response_timestamps, raw_request.response_timestamps_); - request.AddMember( - "response_timestamps", response_timestamps, document_.GetAllocator()); - - rapidjson::Value response_outputs(rapidjson::kArrayType); - AddResponseOutputs(response_outputs, raw_request.response_outputs_); - request.AddMember( - "response_outputs", response_outputs, document_.GetAllocator()); - - requests.PushBack(request, document_.GetAllocator()); - } - entry.AddMember("requests", requests, document_.GetAllocator()); -} - -void -ProfileDataExporter::AddResponseTimestamps( - rapidjson::Value& timestamps_json, - const std::vector>& - timestamps) -{ - for (auto& timestamp : timestamps) { - rapidjson::Value timestamp_json; - timestamp_json.SetUint64(timestamp.time_since_epoch().count()); - timestamps_json.PushBack(timestamp_json, document_.GetAllocator()); - } -} - -void -ProfileDataExporter::AddRequestInputs( - rapidjson::Value& request_inputs_json, - const std::vector& request_inputs) -{ - for (const auto& request_input : request_inputs) { - for (const auto& input : request_input) { - const auto& name{input.first}; - const auto& buf{input.second.data_.get()}; - const auto& byte_size{input.second.size_}; - const auto& data_type{input.second.data_type_}; - rapidjson::Value name_json(name.c_str(), document_.GetAllocator()); - rapidjson::Value input_json{}; - // TMA-1777: support other data types - if (buf != nullptr) { - if (data_type == "BYTES" || data_type == "JSON") { - input_json.SetString( - reinterpret_cast(buf), byte_size, - document_.GetAllocator()); - } else if (data_type == "INT32") { - auto* val = reinterpret_cast(buf); - input_json.SetInt(*val); - } else if (data_type == "BOOL") { - bool is_true = (*buf > 0); - input_json.SetBool(is_true); - } else { - std::cerr << "WARNING: data type '" + data_type + - "' is not supported with JSON." - << std::endl; - } - } else { - input_json.SetString("", 0, document_.GetAllocator()); - } - request_inputs_json.AddMember( - name_json, input_json, document_.GetAllocator()); - } - } -} - -void -ProfileDataExporter::AddResponseOutputs( - rapidjson::Value& outputs_json, - const std::vector& response_outputs) -{ - for (const auto& response_output : response_outputs) { - rapidjson::Value response_output_json(rapidjson::kObjectType); - for (const auto& output : response_output) { - const auto& name{output.first}; - const auto& buf{output.second.data_.get()}; - const auto& byte_size{output.second.size_}; - rapidjson::Value name_json(name.c_str(), document_.GetAllocator()); - rapidjson::Value output_json{}; - // TMA-1777: support other data types - if (buf != nullptr) { - output_json.SetString( - reinterpret_cast(buf), byte_size, - document_.GetAllocator()); - } else { - output_json.SetString("", 0, document_.GetAllocator()); - } - response_output_json.AddMember( - name_json, output_json, document_.GetAllocator()); - } - outputs_json.PushBack(response_output_json, document_.GetAllocator()); - } -} - -void -ProfileDataExporter::AddWindowBoundaries( - rapidjson::Value& entry, rapidjson::Value& window_boundaries, - const Experiment& raw_experiment) -{ - for (auto& window : raw_experiment.window_boundaries) { - rapidjson::Value w; - w.SetUint64(window); - window_boundaries.PushBack(w, document_.GetAllocator()); - } - entry.AddMember( - "window_boundaries", window_boundaries, document_.GetAllocator()); -} - -void -ProfileDataExporter::AddVersion(std::string& raw_version) -{ - rapidjson::Value version; - version = rapidjson::StringRef(raw_version.c_str()); - document_.AddMember("version", version, document_.GetAllocator()); -} - -void -ProfileDataExporter::AddServiceKind(cb::BackendKind& kind) -{ - std::string raw_service_kind{""}; - if (kind == cb::BackendKind::TRITON) { - raw_service_kind = "triton"; - } else if (kind == cb::BackendKind::TENSORFLOW_SERVING) { - raw_service_kind = "tfserving"; - } else if (kind == cb::BackendKind::TORCHSERVE) { - raw_service_kind = "torchserve"; - } else if (kind == cb::BackendKind::TRITON_C_API) { - raw_service_kind = "triton_c_api"; - } else if (kind == cb::BackendKind::OPENAI) { - raw_service_kind = "openai"; - } else { - std::cerr << "Unknown service kind detected. The 'service_kind' will not " - "be specified." - << std::endl; - } - - rapidjson::Value service_kind; - service_kind.SetString(raw_service_kind.c_str(), document_.GetAllocator()); - document_.AddMember("service_kind", service_kind, document_.GetAllocator()); -} - -void -ProfileDataExporter::AddEndpoint(std::string& raw_endpoint) -{ - rapidjson::Value endpoint; - endpoint = rapidjson::StringRef(raw_endpoint.c_str()); - document_.AddMember("endpoint", endpoint, document_.GetAllocator()); -} - -void -ProfileDataExporter::OutputToFile(std::string& file_path) -{ - FILE* fp = fopen(file_path.c_str(), "w"); - if (fp == nullptr) { - throw PerfAnalyzerException( - "failed to open file for outputting raw profile data", GENERIC_ERROR); - } - char writeBuffer[65536]; - rapidjson::FileWriteStream os(fp, writeBuffer, sizeof(writeBuffer)); - - rapidjson::Writer writer(os); - document_.Accept(writer); - - fclose(fp); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/profile_data_exporter.h b/src/c++/perf_analyzer/profile_data_exporter.h deleted file mode 100644 index 820148d7a..000000000 --- a/src/c++/perf_analyzer/profile_data_exporter.h +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include - -#include "client_backend/client_backend.h" -#include "profile_data_collector.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockProfileDataExporter; -#endif - -/// Exports profile data. -class ProfileDataExporter { - public: - static cb::Error Create(std::shared_ptr* exporter); - ~ProfileDataExporter() = default; - - /// Export profile data to json file - /// @param raw_experiments All of the raw data for the experiments run by perf - /// analyzer - /// @param raw_version String containing the version number for the json - /// output - /// @param file_path File path to export profile data to. - /// @param service_kind Service that Perf Analyzer generates load for. - /// @param endpoint Endpoint to send the requests. - void Export( - const std::vector& raw_experiments, std::string& raw_version, - std::string& file_path, cb::BackendKind& service_kind, - std::string& endpoint); - - private: - ProfileDataExporter() = default; - /// Convert the raw data collected to json output - /// @param raw_experiments All of the raw data for the experiments run by perf - /// analyzer - /// @param raw_version String containing the version number for the json - /// output - /// @param service_kind Service that Perf Analyzer generates load for. - /// @param endpoint Endpoint to send the requests. - virtual void ConvertToJson( - const std::vector& raw_experiments, std::string& raw_version, - cb::BackendKind& service_kind, std::string& endpoint); - virtual void OutputToFile(std::string& file_path); - virtual void AddExperiment( - rapidjson::Value& entry, rapidjson::Value& experiment, - const Experiment& raw_experiment); - void AddRequests( - rapidjson::Value& entry, rapidjson::Value& requests, - const Experiment& raw_experiment); - void AddRequestInputs( - rapidjson::Value& inputs_json, - const std::vector& inputs); - void AddResponseTimestamps( - rapidjson::Value& timestamps_json, - const std::vector>& - timestamps); - void AddResponseOutputs( - rapidjson::Value& outputs_json, - const std::vector& outputs); - void AddWindowBoundaries( - rapidjson::Value& entry, rapidjson::Value& window_boundaries, - const Experiment& raw_experiment); - void AddVersion(std::string& raw_version); - void AddServiceKind(cb::BackendKind& service_kind); - void AddEndpoint(std::string& endpoint); - void ClearDocument(); - - rapidjson::Document document_{}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockProfileDataExporter; -#endif -}; -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/rand_ctx_id_tracker.h b/src/c++/perf_analyzer/rand_ctx_id_tracker.h deleted file mode 100644 index e850909a1..000000000 --- a/src/c++/perf_analyzer/rand_ctx_id_tracker.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "ictx_id_tracker.h" - -namespace triton { namespace perfanalyzer { - -// Context ID tracker that is always available and returns random Context IDs -// -class RandCtxIdTracker : public ICtxIdTracker { - public: - RandCtxIdTracker() = default; - - void Reset(size_t count) override - { - distribution_ = std::uniform_int_distribution(0, count - 1); - } - - void Restore(size_t id) override{}; - - size_t Get() override { return distribution_(rng_generator_); }; - - bool IsAvailable() override { return true; }; - - private: - std::uniform_int_distribution distribution_; - std::default_random_engine rng_generator_{}; - - size_t max = 0; -}; - -}}; // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/rate_schedule.h b/src/c++/perf_analyzer/rate_schedule.h deleted file mode 100644 index d45ecd31b..000000000 --- a/src/c++/perf_analyzer/rate_schedule.h +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include -#include -#include - -namespace triton { namespace perfanalyzer { - -using NanoIntervals = std::vector; - -/// Defines a schedule, where the consumer should -/// loop through the provided intervals, and then every time it loops back to -/// the start add an additional amount equal to the duration -/// -struct RateSchedule { - NanoIntervals intervals; - std::chrono::nanoseconds duration; - - /// Returns the next timestamp in the schedule - /// - std::chrono::nanoseconds Next() - { - auto next = intervals[index_] + duration * rounds_; - - index_++; - if (index_ >= intervals.size()) { - rounds_++; - index_ = 0; - } - return next; - } - - private: - size_t rounds_ = 0; - size_t index_ = 0; -}; - -using RateSchedulePtr_t = std::shared_ptr; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/report_writer.cc b/src/c++/perf_analyzer/report_writer.cc deleted file mode 100644 index 3d9cac6a2..000000000 --- a/src/c++/perf_analyzer/report_writer.cc +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "report_writer.h" - -#include -#include - -#include "constants.h" -#include "perf_analyzer_exception.h" - -namespace triton { namespace perfanalyzer { - - -cb::Error -ReportWriter::Create( - const std::string& filename, const bool target_concurrency, - const std::vector& summary, const bool verbose_csv, - const bool include_server_stats, const int32_t percentile, - const std::shared_ptr& parser, - std::unique_ptr* writer, const bool should_output_metrics) -{ - std::unique_ptr local_writer(new ReportWriter( - filename, target_concurrency, summary, verbose_csv, include_server_stats, - percentile, parser, should_output_metrics)); - - *writer = std::move(local_writer); - - return cb::Error::Success; -} - -ReportWriter::ReportWriter( - const std::string& filename, const bool target_concurrency, - const std::vector& summary, const bool verbose_csv, - const bool include_server_stats, const int32_t percentile, - const std::shared_ptr& parser, - const bool should_output_metrics) - : filename_(filename), target_concurrency_(target_concurrency), - summary_(summary), verbose_csv_(verbose_csv), - include_server_stats_(include_server_stats), percentile_(percentile), - parser_(parser), should_output_metrics_(should_output_metrics) -{ -} - - -void -ReportWriter::GenerateReport() -{ - if (!filename_.empty()) { - std::ofstream ofs(filename_, std::ofstream::out); - if (target_concurrency_) { - ofs << "Concurrency,"; - } else { - ofs << "Request Rate,"; - } - ofs << "Inferences/Second,"; - if (parser_->IsDecoupled()) { - ofs << "Response Throughput,"; - } - ofs << "Client Send,"; - if (include_server_stats_) { - ofs << "Network+Server Send/Recv,Server Queue," - << "Server Compute Input,Server Compute Infer," - << "Server Compute Output,"; - // Only include cache hit if enabled, keep out for backwards - // compatibility if disabled - if (parser_->ResponseCacheEnabled()) { - ofs << "Server Cache Hit,"; - ofs << "Server Cache Miss,"; - } - } - ofs << "Client Recv"; - for (const auto& percentile : - summary_[0].client_stats.percentile_latency_ns) { - ofs << ",p" << percentile.first << " latency"; - } - if (verbose_csv_) { - if (percentile_ == -1) { - ofs << ",Avg latency"; - } - ofs << ",request/response"; - ofs << ",response wait"; - if (should_output_metrics_) { - ofs << ",Avg GPU Utilization"; - ofs << ",Avg GPU Power Usage"; - ofs << ",Max GPU Memory Usage"; - ofs << ",Total GPU Memory"; - } - } - ofs << std::endl; - - // Sort summary results in order of increasing infer/sec. - std::sort( - summary_.begin(), summary_.end(), - [](const pa::PerfStatus& a, const pa::PerfStatus& b) -> bool { - return a.client_stats.infer_per_sec < b.client_stats.infer_per_sec; - }); - - for (pa::PerfStatus& status : summary_) { - if (target_concurrency_) { - ofs << status.concurrency << ","; - } else { - ofs << status.request_rate << ","; - } - - ofs << status.client_stats.infer_per_sec << ","; - if (parser_->IsDecoupled()) { - ofs << status.client_stats.responses_per_sec << ","; - } - ofs << (status.client_stats.avg_send_time_ns / 1000) << ","; - if (include_server_stats_) { - uint64_t avg_queue_ns = status.server_stats.queue_count > 0 - ? (status.server_stats.queue_time_ns / - status.server_stats.queue_count) - : 0; - uint64_t avg_compute_input_ns = - status.server_stats.compute_input_count > 0 - ? (status.server_stats.compute_input_time_ns / - status.server_stats.compute_input_count) - : 0; - uint64_t avg_compute_infer_ns = - status.server_stats.compute_infer_count > 0 - ? (status.server_stats.compute_infer_time_ns / - status.server_stats.compute_infer_count) - : 0; - uint64_t avg_compute_output_ns = - status.server_stats.compute_output_count > 0 - ? (status.server_stats.compute_output_time_ns / - status.server_stats.compute_output_count) - : 0; - uint64_t compute_time_ns = status.server_stats.compute_input_time_ns + - status.server_stats.compute_infer_time_ns + - status.server_stats.compute_output_time_ns; - if (status.server_stats.compute_input_count != - status.server_stats.compute_infer_count || - status.server_stats.compute_infer_count != - status.server_stats.compute_output_count) { - throw std::runtime_error( - "Server side statistics compute counts must be the same."); - } - uint64_t compute_cnt = status.server_stats.compute_input_count; - uint64_t avg_compute_ns = - compute_cnt > 0 ? compute_time_ns / compute_cnt : 0; - uint64_t avg_cache_hit_ns = - status.server_stats.cache_hit_count > 0 - ? (status.server_stats.cache_hit_time_ns / - status.server_stats.cache_hit_count) - : 0; - uint64_t avg_cache_miss_ns = - status.server_stats.cache_miss_count > 0 - ? (status.server_stats.cache_miss_time_ns / - status.server_stats.cache_miss_count) - : 0; - - uint64_t avg_client_wait_ns = status.client_stats.avg_latency_ns - - status.client_stats.avg_send_time_ns - - status.client_stats.avg_receive_time_ns; - // Network misc is calculated by subtracting data from different - // measurements (server v.s. client), so the result needs to be capped - // at 0 - uint64_t avg_accounted_time = avg_queue_ns + avg_compute_ns + - avg_cache_hit_ns + avg_cache_miss_ns; - uint64_t avg_network_misc_ns = - avg_client_wait_ns > avg_accounted_time - ? (avg_client_wait_ns - avg_accounted_time) - : 0; - - if (avg_network_misc_ns == 0) { - std::cerr << "Server average accounted time was larger than client " - "average wait time due to small sample size. Increase " - "the measurement interval with `--measurement-interval`." - << std::endl; - } - - ofs << (avg_network_misc_ns / 1000) << "," << (avg_queue_ns / 1000) - << "," << (avg_compute_input_ns / 1000) << "," - << (avg_compute_infer_ns / 1000) << "," - << (avg_compute_output_ns / 1000) << ","; - - if (parser_->ResponseCacheEnabled()) { - ofs << (avg_cache_hit_ns / 1000) << ","; - ofs << (avg_cache_miss_ns / 1000) << ","; - } - } - ofs << (status.client_stats.avg_receive_time_ns / 1000); - for (const auto& percentile : status.client_stats.percentile_latency_ns) { - ofs << "," << (percentile.second / 1000); - } - if (verbose_csv_) { - const uint64_t avg_latency_us = - status.client_stats.avg_latency_ns / 1000; - const uint64_t avg_send_time_us = - status.client_stats.avg_send_time_ns / 1000; - const uint64_t avg_receive_time_us = - status.client_stats.avg_receive_time_ns / 1000; - const uint64_t avg_request_time_us = - status.client_stats.avg_request_time_ns / 1000; - const uint64_t avg_response_wait_time_us = - avg_request_time_us - avg_send_time_us - avg_receive_time_us; - if (percentile_ == -1) { - ofs << "," << avg_latency_us; - } - ofs << "," << std::to_string(avg_send_time_us + avg_receive_time_us); - ofs << "," << std::to_string(avg_response_wait_time_us); - if (should_output_metrics_) { - if (status.metrics.size() == 1) { - WriteGpuMetrics(ofs, status.metrics[0]); - } else { - throw PerfAnalyzerException( - "There should only be one entry in the metrics vector.", - GENERIC_ERROR); - } - } - } - ofs << std::endl; - } - ofs.close(); - - if (include_server_stats_) { - // Record composing model stat in a separate file. - if (!summary_.front().server_stats.composing_models_stat.empty()) { - // For each of the composing model, generate CSV file in the same - // format as the one for ensemble. - for (const auto& model_identifier : - summary_[0].server_stats.composing_models_stat) { - const auto& name = model_identifier.first.first; - const auto& version = model_identifier.first.second; - const auto name_ver = name + "_v" + version; - - std::ofstream ofs(name_ver + "." + filename_, std::ofstream::out); - if (target_concurrency_) { - ofs << "Concurrency,"; - } else { - ofs << "Request Rate,"; - } - ofs << "Inferences/Second,Client Send," - << "Network+Server Send/Recv,Server Queue," - << "Server Compute Input,Server Compute Infer," - << "Server Compute Output,"; - - // Only include cache hit if enabled, keep out for backwards - // compatibility if disabled - if (parser_->ResponseCacheEnabled()) { - ofs << "Server Cache Hit,"; - ofs << "Server Cache Miss,"; - } - ofs << "Client Recv" << std::endl; - - for (pa::PerfStatus& status : summary_) { - auto it = status.server_stats.composing_models_stat.find( - model_identifier.first); - const auto& stats = it->second; - uint64_t avg_queue_ns = - stats.queue_count > 0 ? stats.queue_time_ns / stats.queue_count - : 0; - uint64_t avg_compute_input_ns = - stats.compute_input_count > 0 - ? stats.compute_input_time_ns / stats.compute_input_count - : 0; - uint64_t avg_compute_infer_ns = - stats.compute_infer_count > 0 - ? stats.compute_infer_time_ns / stats.compute_infer_count - : 0; - uint64_t avg_compute_output_ns = - stats.compute_output_count > 0 - ? stats.compute_output_time_ns / stats.compute_output_count - : 0; - uint64_t compute_time_ns = stats.compute_input_time_ns + - stats.compute_infer_time_ns + - stats.compute_output_time_ns; - if (stats.compute_input_count != stats.compute_infer_count || - stats.compute_infer_count != stats.compute_output_count) { - throw std::runtime_error( - "Server side statistics compute counts must be the same."); - } - uint64_t compute_cnt = stats.compute_input_count; - uint64_t avg_compute_ns = - compute_cnt > 0 ? compute_time_ns / compute_cnt : 0; - uint64_t avg_cache_hit_ns = - stats.cache_hit_count > 0 - ? stats.cache_hit_time_ns / stats.cache_hit_count - : 0; - uint64_t avg_cache_miss_ns = - stats.cache_miss_count > 0 - ? stats.cache_miss_time_ns / stats.cache_miss_count - : 0; - - uint64_t avg_overhead_ns = - stats.success_count > 0 - ? stats.cumm_time_ns / stats.success_count - : 0; - const uint64_t avg_accounted_time = avg_queue_ns + avg_compute_ns + - avg_cache_hit_ns + - avg_cache_miss_ns; - avg_overhead_ns = (avg_overhead_ns > avg_accounted_time) - ? (avg_overhead_ns - avg_accounted_time) - : 0; - - if (avg_overhead_ns == 0) { - std::cerr - << "Server average accounted time was larger than client " - "average wait time due to small sample size. Increase " - "the measurement interval with `--measurement-interval`." - << std::endl; - } - - // infer / sec of the composing model is calculated using the - // request count ratio between the composing model and the - // ensemble - double infer_ratio = status.server_stats.success_count > 0 - ? (1.0 * stats.success_count / - status.server_stats.success_count) - : 0.0; - double infer_per_sec = - infer_ratio * status.client_stats.infer_per_sec; - if (target_concurrency_) { - ofs << status.concurrency << ","; - } else { - ofs << status.request_rate << ","; - } - ofs << infer_per_sec << ",0," << (avg_overhead_ns / 1000) << "," - << (avg_queue_ns / 1000) << "," << (avg_compute_input_ns / 1000) - << "," << (avg_compute_infer_ns / 1000) << "," - << (avg_compute_output_ns / 1000) << ","; - - // Only include cache hit if enabled, keep out for backwards - // compatibility if disabled - if (parser_->ResponseCacheEnabled()) { - ofs << (avg_cache_hit_ns / 1000) << ","; - ofs << (avg_cache_miss_ns / 1000) << ","; - } - // Client recv - ofs << "0" << std::endl; - } - } - ofs.close(); - } - } - } -} - -void -ReportWriter::WriteGpuMetrics(std::ostream& ofs, const Metrics& metric) -{ - auto& gpu_util_map = metric.gpu_utilization_per_gpu; - auto& gpu_power_usage_map = metric.gpu_power_usage_per_gpu; - auto& gpu_mem_usage_map = metric.gpu_memory_used_bytes_per_gpu; - auto& gpu_total_mem_map = metric.gpu_memory_total_bytes_per_gpu; - // Currently assume GPU metrics will be appended to existing line - ofs << ","; - for (auto& entry : gpu_util_map) { - ofs << entry.first << ":" << entry.second << ";"; - } - ofs << ","; - for (auto& entry : gpu_power_usage_map) { - ofs << entry.first << ":" << entry.second << ";"; - } - ofs << ","; - for (auto& entry : gpu_mem_usage_map) { - ofs << entry.first << ":" << entry.second << ";"; - } - ofs << ","; - for (auto& entry : gpu_total_mem_map) { - ofs << entry.first << ":" << entry.second << ";"; - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/report_writer.h b/src/c++/perf_analyzer/report_writer.h deleted file mode 100644 index eeb09c9a4..000000000 --- a/src/c++/perf_analyzer/report_writer.h +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "client_backend/client_backend.h" -#include "inference_profiler.h" -#include "metrics.h" -#include "model_parser.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestReportWriter; -#endif - -//============================================================================== -/// ReportWriter is a helper class to generate csv files from the profiled data. -/// -class ReportWriter { - public: - ~ReportWriter() = default; - - /// Create a ReportWriter that is responsible for generating csv output files. - /// \param filename Name of csv file. - /// \param target_concurrency Is there a concurrency range or request rate - /// range? - /// \param summary Returns the trace of the measurement along the - /// search path. - /// \param verbose_csv Print extra information for Model Analyzer - /// \param include_server_stats Are server stats included in output - /// \param percentile The percentile in terms of latency to be reported. - /// if it is a valid percentile value, the percentile latency will reported - /// and used as stable criteria instead of average latency. If it is -1, - /// average latency will be reported and used as stable criteria. - /// \param parser The ModelParse object which holds all the details about the - /// model. - /// \param writer Returns a new ReportWriter object. - /// \param should_output_metrics Whether server-side inference server metrics - /// should be output. - /// \return cb::Error object indicating success or failure. - static cb::Error Create( - const std::string& filename, const bool target_concurrency, - const std::vector& summary, const bool verbose_csv, - const bool include_server_stats, const int32_t percentile, - const std::shared_ptr& parser, - std::unique_ptr* writer, const bool should_output_metrics); - - void GenerateReport(); - - /// Output gpu metrics to a stream - /// \param ofs A stream to output the csv data - /// \param metric The metric container for a particular concurrency or request - /// rate - void WriteGpuMetrics(std::ostream& ofs, const Metrics& metric); - - private: - ReportWriter( - const std::string& filename, const bool target_concurrency, - const std::vector& summary, const bool verbose_csv, - const bool include_server_stats, const int32_t percentile, - const std::shared_ptr& parser, - const bool should_output_metrics); - - - const std::string& filename_{""}; - const bool target_concurrency_{true}; - const bool include_server_stats_{true}; - const bool verbose_csv_{true}; - const int32_t percentile_{90}; - std::vector summary_{}; - const std::shared_ptr& parser_{nullptr}; - const bool should_output_metrics_{false}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestReportWriter; - - public: - ReportWriter() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/request_rate_manager.cc b/src/c++/perf_analyzer/request_rate_manager.cc deleted file mode 100644 index be12282ab..000000000 --- a/src/c++/perf_analyzer/request_rate_manager.cc +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "request_rate_manager.h" - -namespace triton { namespace perfanalyzer { - -RequestRateManager::~RequestRateManager() -{ - // The destruction of derived class should wait for all the request generator - // threads to finish - StopWorkerThreads(); -} - -cb::Error -RequestRateManager::Create( - const bool async, const bool streaming, - const uint64_t measurement_window_ms, const size_t max_trials, - Distribution request_distribution, const int32_t batch_size, - const size_t max_threads, const uint32_t num_of_sequences, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const bool serial_sequences, const std::shared_ptr& parser, - const std::shared_ptr& factory, - std::unique_ptr* manager, - const std::unordered_map& - request_parameters) -{ - std::unique_ptr local_manager(new RequestRateManager( - async, streaming, request_distribution, batch_size, measurement_window_ms, - max_trials, max_threads, num_of_sequences, shared_memory_type, - output_shm_size, serial_sequences, parser, factory, request_parameters)); - - *manager = std::move(local_manager); - - return cb::Error::Success; -} - -RequestRateManager::RequestRateManager( - const bool async, const bool streaming, Distribution request_distribution, - int32_t batch_size, const uint64_t measurement_window_ms, - const size_t max_trials, const size_t max_threads, - const uint32_t num_of_sequences, const SharedMemoryType shared_memory_type, - const size_t output_shm_size, const bool serial_sequences, - const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters) - : LoadManager( - async, streaming, batch_size, max_threads, shared_memory_type, - output_shm_size, parser, factory, request_parameters), - request_distribution_(request_distribution), execute_(false), - num_of_sequences_(num_of_sequences), serial_sequences_(serial_sequences) -{ - gen_duration_.reset(new std::chrono::nanoseconds( - max_trials * measurement_window_ms * NANOS_PER_MILLIS)); - - threads_config_.reserve(max_threads); -} - -void -RequestRateManager::InitManagerFinalize() -{ - if (on_sequence_model_) { - sequence_manager_->InitSequenceStatuses(num_of_sequences_); - } -} - -cb::Error -RequestRateManager::ChangeRequestRate( - const double request_rate, const size_t request_count) -{ - PauseWorkers(); - ConfigureThreads(request_count); - // Can safely update the schedule - GenerateSchedule(request_rate); - ResumeWorkers(); - - return cb::Error::Success; -} - -void -RequestRateManager::GenerateSchedule(const double request_rate) -{ - std::chrono::nanoseconds max_duration; - std::function distribution; - - if (request_distribution_ == Distribution::POISSON) { - distribution = ScheduleDistribution(request_rate); - // Poisson distribution needs to generate a schedule for the maximum - // possible duration to make sure that it is as random and as close to the - // desired rate as possible - max_duration = *gen_duration_; - } else if (request_distribution_ == Distribution::CONSTANT) { - distribution = ScheduleDistribution(request_rate); - // Constant distribution only needs one entry per worker -- that one value - // can be repeated over and over to emulate a full schedule of any length - max_duration = std::chrono::nanoseconds(1); - } else { - return; - } - - auto worker_schedules = CreateWorkerSchedules(max_duration, distribution); - GiveSchedulesToWorkers(worker_schedules); -} - -std::vector -RequestRateManager::CreateWorkerSchedules( - std::chrono::nanoseconds max_duration, - std::function distribution) -{ - std::mt19937 schedule_rng; - - std::vector worker_schedules = - CreateEmptyWorkerSchedules(); - std::vector thread_ids{CalculateThreadIds()}; - - std::chrono::nanoseconds next_timestamp(0); - size_t thread_id_index = 0; - size_t worker_index = 0; - - - // Generate schedule until we hit max_duration, but also make sure that all - // worker schedules follow the thread id distribution - // - while (next_timestamp < max_duration || - thread_id_index % thread_ids.size() != 0) { - next_timestamp = next_timestamp + distribution(schedule_rng); - worker_index = thread_ids[thread_id_index]; - thread_id_index = ++thread_id_index % thread_ids.size(); - worker_schedules[worker_index]->intervals.emplace_back(next_timestamp); - } - - SetScheduleDurations(worker_schedules); - - return worker_schedules; -} - -std::vector -RequestRateManager::CreateEmptyWorkerSchedules() -{ - std::vector worker_schedules; - for (size_t i = 0; i < workers_.size(); i++) { - worker_schedules.push_back(std::make_shared()); - } - return worker_schedules; -} - -std::vector -RequestRateManager::CalculateThreadIds() -{ - std::vector thread_ids{}; - // Determine number of ids to loop over for time stamps - size_t num_ids = 0; - if (on_sequence_model_) { - num_ids = num_of_sequences_; - } else { - num_ids = max_threads_; - } - - for (size_t i = 0; i < num_ids; i++) { - size_t t = i % DetermineNumThreads(); - thread_ids.push_back(t); - } - return thread_ids; -} - -void -RequestRateManager::SetScheduleDurations( - std::vector& schedules) -{ - RateSchedulePtr_t last_schedule = schedules.back(); - - std::chrono::nanoseconds duration = last_schedule->intervals.back(); - - for (auto schedule : schedules) { - duration = std::max(schedule->intervals.back(), duration); - } - - for (auto schedule : schedules) { - schedule->duration = duration; - } -} - - -void -RequestRateManager::GiveSchedulesToWorkers( - const std::vector& worker_schedules) -{ - for (size_t i = 0; i < workers_.size(); i++) { - auto w = std::dynamic_pointer_cast(workers_[i]); - w->SetSchedule(worker_schedules[i]); - } -} - -void -RequestRateManager::PauseWorkers() -{ - // Pause all the threads - execute_ = false; - - // Wait to see all threads are paused. - for (auto& thread_config : threads_config_) { - while (!thread_config->is_paused_) { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - } -} - -void -RequestRateManager::ConfigureThreads(const size_t request_count) -{ - if (threads_.empty()) { - size_t num_of_threads = DetermineNumThreads(); - while (workers_.size() < num_of_threads) { - // Launch new thread for inferencing - threads_stat_.emplace_back(new ThreadStat()); - threads_config_.emplace_back(new ThreadConfig(workers_.size())); - - workers_.push_back( - MakeWorker(threads_stat_.back(), threads_config_.back())); - } - // Compute the number of sequences for each thread (take floor) - // and spread the remaining value - size_t avg_num_seqs = num_of_sequences_ / workers_.size(); - size_t num_seqs_add_one = num_of_sequences_ % workers_.size(); - size_t seq_offset = 0; - - size_t avg_req_count = request_count / workers_.size(); - size_t req_count_add_one = request_count % workers_.size(); - - - for (size_t i = 0; i < workers_.size(); i++) { - size_t num_of_seq = avg_num_seqs + (i < num_seqs_add_one ? 1 : 0); - threads_config_[i]->num_sequences_ = num_of_seq; - threads_config_[i]->seq_stat_index_offset_ = seq_offset; - seq_offset += num_of_seq; - - size_t thread_num_reqs = avg_req_count + (i < req_count_add_one ? 1 : 0); - threads_config_[i]->num_requests_ = thread_num_reqs; - - threads_.emplace_back(&IWorker::Infer, workers_[i]); - } - } -} - -void -RequestRateManager::ResumeWorkers() -{ - // Update the start_time_ to point to current time - start_time_ = std::chrono::steady_clock::now(); - - // Wake up all the threads to begin execution - execute_ = true; - wake_signal_.notify_all(); -} - -std::shared_ptr -RequestRateManager::MakeWorker( - std::shared_ptr thread_stat, - std::shared_ptr thread_config) -{ - size_t id = workers_.size(); - size_t num_of_threads = DetermineNumThreads(); - return std::make_shared( - id, thread_stat, thread_config, parser_, data_loader_, factory_, - on_sequence_model_, async_, num_of_threads, using_json_data_, streaming_, - batch_size_, wake_signal_, wake_mutex_, execute_, start_time_, - serial_sequences_, infer_data_manager_, sequence_manager_); -} - -size_t -RequestRateManager::DetermineNumThreads() -{ - size_t num_of_threads = max_threads_; - if (on_sequence_model_) { - num_of_threads = std::min(max_threads_, num_of_sequences_); - } - return num_of_threads; -} - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/request_rate_manager.h b/src/c++/perf_analyzer/request_rate_manager.h deleted file mode 100644 index 8c9131bb4..000000000 --- a/src/c++/perf_analyzer/request_rate_manager.h +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "load_manager.h" -#include "request_rate_worker.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class TestRequestRateManager; -#endif - -//============================================================================== -/// RequestRateManager is a helper class to send inference requests to -/// inference server in accordance with a Poisson distribution. This -/// distribution models the real-world traffic patterns. -/// -/// An instance of this load manager will be created at the beginning of the -/// perf analyzer and it will be used to simulate load with different target -/// requests per second values and to collect per-request statistic. -/// -/// Detail: -/// Request Rate Manager will try to follow a pre-computed schedule while -/// issuing requests to the server and maintain a constant request rate. The -/// manager will spawn max_threads many worker thread to meet the timeline -/// imposed by the schedule. The worker threads will record the start time and -/// end time of each request into a shared vector which will be used to report -/// the observed latencies in serving requests. Additionally, they will report a -/// vector of the number of requests missed their schedule. -/// -class RequestRateManager : public LoadManager { - public: - ~RequestRateManager(); - - /// Create an object of realistic load manager that is responsible to maintain - /// specified load on inference server. - /// \param async Whether to use asynchronous or synchronous API for infer - /// request. - /// \param streaming Whether to use gRPC streaming API for infer request - /// \param measurement_window_ms The time window for measurements. - /// \param max_trials The maximum number of windows that will be measured - /// \param request_distribution The kind of distribution to use for drawing - /// out intervals between successive requests. - /// \param batch_size The batch size used for each request. - /// \param max_threads The maximum number of working threads to be spawned. - /// \param num_of_sequences The number of concurrent sequences that must be - /// maintained on the server. - /// \param string_length The length of the string to create for input. - /// \param string_data The data to use for generating string input. - /// \param zero_input Whether to fill the input tensors with zero. - /// \param user_data The vector containing path/paths to user-provided data - /// that can be a directory or path to a json data file. - /// \param shared_memory_type The type of shared memory to use for inputs. - /// \param output_shm_size The size of the shared memory to allocate for the - /// output. - /// \param serial_sequences Enable serial sequence mode. - /// \param parser The ModelParser object to get the model details. - /// \param factory The ClientBackendFactory object used to create - /// client to the server. - /// \param manager Returns a new ConcurrencyManager object. - /// \param request_parameters Custom request parameters to send to the server - /// \return cb::Error object indicating success or failure. - static cb::Error Create( - const bool async, const bool streaming, - const uint64_t measurement_window_ms, const size_t max_trials, - Distribution request_distribution, const int32_t batch_size, - const size_t max_threads, const uint32_t num_of_sequences, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const bool serial_sequences, const std::shared_ptr& parser, - const std::shared_ptr& factory, - std::unique_ptr* manager, - const std::unordered_map& - request_parameters); - - /// Adjusts the rate of issuing requests to be the same as 'request_rate' - /// \param target_request_rate The rate at which requests must be issued to - /// the server. - /// \param request_count The number of requests to generate when profiling. If - /// 0, then there is no limit, and it will generate until told to stop. - /// \return cb::Error object indicating success or failure. - cb::Error ChangeRequestRate( - const double target_request_rate, const size_t request_count = 0); - - protected: - RequestRateManager( - const bool async, const bool streaming, Distribution request_distribution, - const int32_t batch_size, const uint64_t measurement_window_ms, - const size_t max_trials, const size_t max_threads, - const uint32_t num_of_sequences, - const SharedMemoryType shared_memory_type, const size_t output_shm_size, - const bool serial_sequences, const std::shared_ptr& parser, - const std::shared_ptr& factory, - const std::unordered_map& - request_parameters); - - void InitManagerFinalize() override; - - /// Generates and update the request schedule as per the given request rate. - /// \param request_rate The request rate to use for new schedule. - void GenerateSchedule(const double request_rate); - - std::vector CreateWorkerSchedules( - std::chrono::nanoseconds duration, - std::function distribution); - - std::vector CreateEmptyWorkerSchedules(); - - std::vector CalculateThreadIds(); - - void SetScheduleDurations(std::vector& schedules); - - void GiveSchedulesToWorkers( - const std::vector& worker_schedules); - - // Pauses the worker threads - void PauseWorkers(); - - void ConfigureThreads(const size_t request_count = 0); - - // Resets the counters and resumes the worker threads - void ResumeWorkers(); - - // Makes a new worker - virtual std::shared_ptr MakeWorker( - std::shared_ptr, std::shared_ptr); - - size_t DetermineNumThreads(); - - std::vector> threads_config_; - - std::shared_ptr gen_duration_; - Distribution request_distribution_; - std::chrono::steady_clock::time_point start_time_; - bool execute_; - const size_t num_of_sequences_{0}; - const bool serial_sequences_{false}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend TestRequestRateManager; - - public: - RequestRateManager() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/request_rate_worker.cc b/src/c++/perf_analyzer/request_rate_worker.cc deleted file mode 100644 index 48ccb361b..000000000 --- a/src/c++/perf_analyzer/request_rate_worker.cc +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "request_rate_worker.h" - -#include -#include - -#include "client_backend/client_backend.h" -#include "data_loader.h" -#include "perf_utils.h" - -namespace triton { namespace perfanalyzer { - -void -RequestRateWorker::Infer() -{ - CreateCtxIdTracker(); - CreateContexts(); - - // run inferencing until receiving exit signal to maintain server load. - do { - HandleExecuteOff(); - - bool is_delayed = SleepIfNecessary(); - uint32_t ctx_id = GetCtxId(); - SendInferRequest(ctx_id, is_delayed); - RestoreFreeCtxId(ctx_id); - - if (HandleExitConditions()) { - return; - } - - } while (true); -} - -void -RequestRateWorker::CreateCtxIdTracker() -{ - bool is_concurrency = false; - - ctx_id_tracker_ = CtxIdTrackerFactory::CreateTracker( - is_concurrency, on_sequence_model_, serial_sequences_); -} - -void -RequestRateWorker::CreateContexts() -{ - size_t active_ctx_cnt = - on_sequence_model_ ? thread_config_->num_sequences_ : 1; - while (ctxs_.size() < active_ctx_cnt) { - CreateContext(); - } - - ResetFreeCtxIds(); -} - -void -RequestRateWorker::ResetFreeCtxIds() -{ - std::lock_guard lock(cb_mtx_); - ctx_id_tracker_->Reset(ctxs_.size()); -} - -void -RequestRateWorker::SetSchedule(RateSchedulePtr_t schedule) -{ - schedule_ = schedule; -} - -std::chrono::nanoseconds -RequestRateWorker::GetNextTimestamp() -{ - return schedule_->Next(); -} - - -uint32_t -RequestRateWorker::GetSeqStatIndex(uint32_t ctx_id) -{ - return (thread_config_->seq_stat_index_offset_ + ctx_id); -} - -void -RequestRateWorker::HandleExecuteOff() -{ - // Should wait till main thread signals execution start - if (!execute_) { - CompleteOngoingSequences(); - WaitForOngoingRequests(); - - // Reset Ctx IDs because CompleteOngoingSequences() - // has destructive side affects - ResetFreeCtxIds(); - - // Wait if no request should be sent and it is not exiting - thread_config_->is_paused_ = true; - std::unique_lock lock(wake_mutex_); - wake_signal_.wait(lock, [this]() { return early_exit || execute_; }); - } - - thread_config_->is_paused_ = false; -} - -bool -RequestRateWorker::SleepIfNecessary() -{ - WaitForFreeCtx(); - - std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now(); - std::chrono::nanoseconds next_timestamp = GetNextTimestamp(); - std::chrono::nanoseconds current_timestamp = now - start_time_; - std::chrono::nanoseconds wait_time = next_timestamp - current_timestamp; - - bool delayed = false; - if (wait_time.count() < 0) { - delayed = true; - } else { - thread_stat_->idle_timer.Start(); - std::this_thread::sleep_for(wait_time); - thread_stat_->idle_timer.Stop(); - } - return delayed; -} - -void -RequestRateWorker::WaitForFreeCtx() -{ - if (!ctx_id_tracker_->IsAvailable()) { - notified_ = false; - // wait for signal from callback. - std::unique_lock lk(cb_mtx_); - thread_stat_->idle_timer.Start(); - cb_cv_.wait(lk, [this] { - if (notified_) { - notified_ = false; - return true; - } - return false; - }); - thread_stat_->idle_timer.Stop(); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/request_rate_worker.h b/src/c++/perf_analyzer/request_rate_worker.h deleted file mode 100644 index e6d1804c6..000000000 --- a/src/c++/perf_analyzer/request_rate_worker.h +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include - -#include "ischeduler.h" -#include "load_worker.h" -#include "model_parser.h" -#include "sequence_manager.h" -#include "thread_config.h" - -namespace triton { namespace perfanalyzer { - - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockRequestRateWorker; -class TestRequestRateManager; -class TestCustomLoadManager; -#endif - -/// Worker thread for RequestRateManager -/// -/// If the model is non-sequence model, each worker uses only one context -/// to maintain concurrency assigned to worker. -/// If the model is sequence model, each worker has to use multiples contexts -/// to maintain (sequence) concurrency assigned to worker. -/// -class RequestRateWorker : public LoadWorker, public IScheduler { - public: - RequestRateWorker( - uint32_t id, std::shared_ptr thread_stat, - std::shared_ptr thread_config, - const std::shared_ptr parser, - std::shared_ptr data_loader, - const std::shared_ptr factory, - const bool on_sequence_model, const bool async, const size_t num_threads, - const bool using_json_data, const bool streaming, - const int32_t batch_size, std::condition_variable& wake_signal, - std::mutex& wake_mutex, bool& execute, - std::chrono::steady_clock::time_point& start_time, - const bool serial_sequences, - const std::shared_ptr& infer_data_manager, - std::shared_ptr sequence_manager) - : LoadWorker( - id, thread_stat, thread_config, parser, data_loader, factory, - on_sequence_model, async, streaming, batch_size, using_json_data, - wake_signal, wake_mutex, execute, infer_data_manager, - sequence_manager), - num_threads_(num_threads), start_time_(start_time), - serial_sequences_(serial_sequences) - { - } - - void Infer() override; - - /// Provides the schedule that should be followed - /// - void SetSchedule(RateSchedulePtr_t schedule) override; - - private: - RateSchedulePtr_t schedule_; - - const size_t num_threads_; - const bool serial_sequences_; - std::chrono::steady_clock::time_point& start_time_; - - void CreateCtxIdTracker(); - - std::chrono::nanoseconds GetNextTimestamp(); - - uint32_t GetSeqStatIndex(uint32_t ctx_id) override; - - void CreateContexts(); - - void HandleExecuteOff(); - void ResetFreeCtxIds(); - - // Sleep until it is time for the next part of the schedule - // Returns true if the request was delayed - bool SleepIfNecessary(); - - void WaitForFreeCtx(); - - void CreateContextFinalize(std::shared_ptr ctx) override - { - ctx->RegisterAsyncCallbackFinalize(std::bind( - &RequestRateWorker::AsyncCallbackFinalize, this, - std::placeholders::_1)); - - ctx->SetNumActiveThreads(num_threads_); - } - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockRequestRateWorker; - friend TestCustomLoadManager; - friend TestRequestRateManager; - -#endif -}; - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/request_record.h b/src/c++/perf_analyzer/request_record.h deleted file mode 100644 index 91b5ca19e..000000000 --- a/src/c++/perf_analyzer/request_record.h +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include -#include - -namespace triton { namespace perfanalyzer { - -/// A record containing the data of a single request input or response output -struct RecordData { - RecordData(const uint8_t* buf, size_t size, std::string data_type = "") - { - uint8_t* array = new uint8_t[size]; - std::memcpy(array, buf, size); - data_ = std::shared_ptr(array, [](uint8_t* p) { delete[] p; }); - size_ = size; - data_type_ = data_type; - } - - // Define equality comparison operator so it can be inserted into maps - bool operator==(const RecordData& other) const - { - if (size_ != other.size_) - return false; - // Compare the contents of the arrays - return std::memcmp(data_.get(), other.data_.get(), size_) == 0; - } - - std::shared_ptr data_; - size_t size_; - std::string data_type_; -}; - - -/// A record of an individual request -struct RequestRecord { - using RequestInput = std::unordered_map; - using ResponseOutput = std::unordered_map; - - RequestRecord( - std::chrono::time_point start_time = - std::chrono::time_point(), - std::vector> - response_timestamps = {}, - std::vector request_inputs = {}, - std::vector response_outputs = {}, - bool sequence_end = true, bool delayed = false, uint64_t sequence_id = 0, - bool has_null_last_response = false) - : start_time_(start_time), response_timestamps_(response_timestamps), - request_inputs_(request_inputs), response_outputs_(response_outputs), - sequence_end_(sequence_end), delayed_(delayed), - sequence_id_(sequence_id), - has_null_last_response_(has_null_last_response) - { - } - // The timestamp of when the request was started. - std::chrono::time_point start_time_; - // Collection of response timestamps - std::vector> - response_timestamps_; - - std::vector request_inputs_; - std::vector response_outputs_; - // Whether or not the request is at the end of a sequence. - bool sequence_end_; - // Whether or not the request is delayed as per schedule. - bool delayed_; - // Sequence ID of the request - uint64_t sequence_id_; - // Whether the last response is null - bool has_null_last_response_; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/sequence_manager.cc b/src/c++/perf_analyzer/sequence_manager.cc deleted file mode 100644 index eaf5d6e00..000000000 --- a/src/c++/perf_analyzer/sequence_manager.cc +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "sequence_manager.h" - -namespace triton { namespace perfanalyzer { - -SequenceManager::SequenceManager( - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation, const bool using_json_data, - std::shared_ptr data_loader) - : start_sequence_id_(start_sequence_id), - sequence_id_range_(sequence_id_range), sequence_length_(sequence_length), - sequence_length_specified_(sequence_length_specified), - sequence_length_variation_(sequence_length_variation), - using_json_data_(using_json_data), data_loader_(data_loader) -{ - distribution_ = std::uniform_int_distribution( - 0, data_loader_->GetDataStreamsCount() - 1); -} - -void -SequenceManager::InitSequenceStatuses(size_t num_sequence_statuses) -{ - sequence_statuses_.clear(); - for (size_t sequence_status_index{0}; - sequence_status_index < num_sequence_statuses; sequence_status_index++) { - sequence_statuses_.push_back(std::make_shared()); - } -} - -const uint64_t -SequenceManager::GetSequenceID(size_t sequence_status_index) const -{ - return sequence_statuses_.at(sequence_status_index)->seq_id_; -} - -std::mutex& -SequenceManager::GetMutex(size_t sequence_status_index) -{ - return sequence_statuses_.at(sequence_status_index)->mtx_; -} - -const uint64_t -SequenceManager::GetDataStreamID(size_t sequence_status_index) const -{ - return sequence_statuses_.at(sequence_status_index)->data_stream_id_; -} - -const size_t -SequenceManager::GetRemainingQueries(size_t sequence_status_index) const -{ - return sequence_statuses_.at(sequence_status_index)->remaining_queries_; -} - -void -SequenceManager::SetRemainingQueries( - size_t sequence_status_index, size_t remaining_queries) -{ - sequence_statuses_.at(sequence_status_index)->remaining_queries_ = - remaining_queries; -} - -void -SequenceManager::DecrementRemainingQueries(size_t sequence_status_index) -{ - sequence_statuses_.at(sequence_status_index)->remaining_queries_--; -} - -const size_t -SequenceManager::GetNumSequenceStatuses() const -{ - return sequence_statuses_.size(); -} - -void -SequenceManager::SetInferSequenceOptions( - const uint32_t seq_stat_index, std::unique_ptr& options) -{ - options->sequence_start_ = - (sequence_statuses_[seq_stat_index]->remaining_queries_ == 0); - - // New sequence must be initialized before setting the id. - if (options->sequence_start_) { - InitNewSequence(seq_stat_index); - } - options->sequence_id_ = sequence_statuses_[seq_stat_index]->seq_id_; - options->sequence_end_ = - (sequence_statuses_[seq_stat_index]->remaining_queries_ == 1); -} - -const size_t -SequenceManager::GetSequenceLength(size_t sequence_status_index) const -{ - return sequence_statuses_.at(sequence_status_index)->sequence_length_; -} - -void -SequenceManager::InitNewSequence(int seq_stat_index) -{ - sequence_statuses_[seq_stat_index]->seq_id_ = GetNextSeqId(seq_stat_index); - if (!using_json_data_) { - size_t new_length = GetRandomSequenceLength(sequence_length_variation_); - sequence_statuses_[seq_stat_index]->remaining_queries_ = - new_length == 0 ? 1 : new_length; - } else { - // Selecting next available data stream based on uniform distribution. - const uint64_t data_stream_id{GetNewDataStreamId()}; - sequence_statuses_[seq_stat_index]->data_stream_id_ = data_stream_id; - const size_t total_steps{data_loader_->GetTotalSteps(data_stream_id)}; - if (sequence_length_specified_) { - const size_t varied_sequence_length{ - GetRandomSequenceLength(sequence_length_variation_)}; - sequence_statuses_[seq_stat_index]->sequence_length_ = - varied_sequence_length; - } else { - sequence_statuses_[seq_stat_index]->sequence_length_ = total_steps; - } - sequence_statuses_[seq_stat_index]->remaining_queries_ = - sequence_statuses_[seq_stat_index]->sequence_length_; - } -} - -uint64_t -SequenceManager::GetNextSeqId(int seq_stat_index) -{ - uint64_t old_seq_id = sequence_statuses_[seq_stat_index]->seq_id_; - uint64_t next_seq_id = - curr_seq_id_++ % sequence_id_range_ + start_sequence_id_; - - // If the next sequence ID is still in use, reuse the same sequence ID - // that this sequence_status used last time - // - for (uint i = 0; i < sequence_statuses_.size(); i++) { - if (next_seq_id == sequence_statuses_[i]->seq_id_) { - next_seq_id = old_seq_id; - break; - } - } - return next_seq_id; -} - -size_t -SequenceManager::GetRandomSequenceLength(double offset_ratio) -{ - int random_offset = ((2.0 * rand() / double(RAND_MAX)) - 1.0) * offset_ratio / - 100.0 * sequence_length_; - if (int(sequence_length_) + random_offset <= 0) { - return 1; - } - return sequence_length_ + random_offset; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/sequence_manager.h b/src/c++/perf_analyzer/sequence_manager.h deleted file mode 100644 index c419a87f0..000000000 --- a/src/c++/perf_analyzer/sequence_manager.h +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "data_loader.h" -#include "sequence_status.h" - -namespace triton { namespace perfanalyzer { - -#ifndef DOCTEST_CONFIG_DISABLE -class NaggyMockSequenceManager; -#endif - -/// Manages operations related to preparing requests to sequence models. -/// -class SequenceManager { - public: - /// Constructs the sequence manager object. Involves initializing the - /// distribution for randomly assigning input data streams to new sequences. - /// \param start_sequence_id See associated data member description. - /// \param sequence_id_range See associated data member description. - /// \param sequence_length See associated data member description. - /// \param sequence_length_specified See associated data member description. - /// \param sequence_length_variation See associated data member description. - /// \param using_json_data See associated data member description. - /// \param data_loader See associated data member description. - /// \return The constructed sequence manager object. - /// - SequenceManager( - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation, const bool using_json_data, - std::shared_ptr data_loader); - - /// Initializes the sequence statuses data structure. - /// \param num_sequence_statuses The number of sequence status objects to - /// create. - /// - void InitSequenceStatuses(size_t num_sequence_statuses); - - /// Gets the sequence ID for the specified sequence status object. - /// \param sequence_status_index The index of the sequence status object. - /// \return The sequence ID for the specified sequence status object. - /// - const uint64_t GetSequenceID(size_t sequence_status_index) const; - - /// Gets a non-const reference to the mutex for the specified sequence status - /// object. - /// \param sequence_status_index The index of the sequence status object. - /// \return A non-const reference to the mutex for the specified sequence - /// status object. - /// - std::mutex& GetMutex(size_t sequence_status_index); - - /// Gets the data stream ID for the specified sequence status object. - /// \param sequence_status_index The index of the sequence status object. - /// \return The data stream ID for the specified sequence status object. - /// - const uint64_t GetDataStreamID(size_t sequence_status_index) const; - - /// Gets the remaining queries for the specified sequence status object. - /// \param sequence_status_index The index of the sequence status object. - /// \return The remaining queries for the specified sequence status object. - /// - const size_t GetRemainingQueries(size_t sequence_status_index) const; - - /// Sets the remaining queries for the specified sequence status object. - /// \param sequence_status_index The index of the sequence status object. - /// \param remaining_queries The new value of the remaining queries for the - /// specified sequence status object. - /// - void SetRemainingQueries( - size_t sequence_status_index, size_t remaining_queries); - - /// Decrements the remaining queries for the specified sequence status object. - /// \param sequence_status_index The index of the sequence status object. - /// - void DecrementRemainingQueries(size_t sequence_status_index); - - /// Gets the number of sequence status objects in the sequence statuses data - /// structure. - /// \param sequence_status_index The index of the sequence status object. - /// \return The number of sequence status objects in the sequence statuses - /// data structure. - /// - const size_t GetNumSequenceStatuses() const; - - /// Sets options related to a single request to a sequence model. - /// \param seq_stat_index The index for the sequence status object that is - /// having its options set. - /// \param options The options object for the request that is being prepared. - /// - virtual void SetInferSequenceOptions( - const uint32_t seq_stat_index, - std::unique_ptr& options); - - /// Gets the sequence length for the specified sequence status object. - /// \param sequence_status_index The index of the sequence status object. - /// \return The sequence length for the specified sequence status object. - /// - const size_t GetSequenceLength(size_t sequence_status_index) const; - - private: - /// Initializes values for a sequence status object. - /// \param seq_stat_index The index for the sequence status object that is - /// being initialized. - /// - virtual void InitNewSequence(int seq_stat_index); - - /// Determines an appropriate next sequence ID for a renewed sequence status - /// object. - /// \param seq_stat_index The index for the sequence for which a request is - /// being prepared. - /// \return The potentially new sequence ID to be used by a renewed sequence - /// status object. - /// - virtual uint64_t GetNextSeqId(int seq_stat_index); - - virtual uint64_t GetNewDataStreamId() - { - return distribution_(rng_generator_); - } - - /// Generates a random sequence length based on a threshold. - /// \param offset_ratio The offset ratio/threshold of the generated length. - /// \return A random sequence length. - /// - virtual size_t GetRandomSequenceLength(double offset_ratio); - - /// Data structure holding sequence status objects - /// - std::vector> sequence_statuses_{}; - - /// Current sequence id (for issuing new sequences) - /// - std::atomic curr_seq_id_{0}; - - /// Data loader to be used for various sequence operations. - /// - std::shared_ptr data_loader_{nullptr}; - - /// The starting sequence ID to be used for iterating through valid sequence - /// IDs. - /// - const uint64_t start_sequence_id_{0}; - - /// The maximum sequence ID to be used for iterating through valid sequence - /// IDs. - /// - const uint64_t sequence_id_range_{0}; - - /// The base length of new sequences. - /// - const size_t sequence_length_{0}; - - /// Whether the user specified the sequence length. - /// - const bool sequence_length_specified_{false}; - - /// The percentage variation in length of sequences using autogenerated data - /// as input. - /// - const double sequence_length_variation_{0.0}; - - /// Indicates whether to generate sequence request input data or read it from - /// a JSON file. - /// - const bool using_json_data_{false}; - - /// The distribution for randomly assigning new sequences a data stream in the - /// input data JSON. - /// - std::uniform_int_distribution distribution_; - - /// The random number generator for randomly assigning new sequences a data - /// stream in the input data JSON. - /// - std::default_random_engine rng_generator_{}; - -#ifndef DOCTEST_CONFIG_DISABLE - friend NaggyMockSequenceManager; - - public: - SequenceManager() = default; -#endif -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/sequence_status.h b/src/c++/perf_analyzer/sequence_status.h deleted file mode 100644 index 16ec3bf40..000000000 --- a/src/c++/perf_analyzer/sequence_status.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include - -namespace triton { namespace perfanalyzer { - -// Holds the status of the inflight sequence -struct SequenceStatus { - SequenceStatus(uint64_t seq_id = 0) - : seq_id_(seq_id), data_stream_id_(0), remaining_queries_(0) - { - } - // The unique correlation id allocated to the sequence - uint64_t seq_id_; - // The data stream id providing data for the sequence - uint64_t data_stream_id_; - // The number of queries remaining to complete the sequence - size_t remaining_queries_; - // The length of the sequence - size_t sequence_length_{0}; - // A lock to protect sequence data - std::mutex mtx_; -}; - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/tensor_data.h b/src/c++/perf_analyzer/tensor_data.h deleted file mode 100644 index 6f5cf7191..000000000 --- a/src/c++/perf_analyzer/tensor_data.h +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -namespace triton { namespace perfanalyzer { - -/// Data for one input or output tensor -/// -struct TensorData { - const uint8_t* data_ptr{nullptr}; - size_t batch1_size{0}; - bool is_valid{false}; - std::string name; -}; - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_command_line_parser.cc b/src/c++/perf_analyzer/test_command_line_parser.cc deleted file mode 100644 index 2d17bbc24..000000000 --- a/src/c++/perf_analyzer/test_command_line_parser.cc +++ /dev/null @@ -1,1904 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -#include - -#include - -#include "command_line_parser.h" -#include "doctest.h" -#include "perf_analyzer_exception.h" - -namespace triton { namespace perfanalyzer { - -inline void -CHECK_STRING(const char* name, const std::string& str, const std::string& val) -{ - CHECK_MESSAGE( - !str.compare(val), name, " expecting '", val, "', found '", str, "'"); -} - -inline void -CHECK_STRING(std::string act, std::string exp) -{ - CHECK_MESSAGE( - !act.compare(exp), "Expecting: '", exp, "', Found: '", act, "'"); -} - -std::string -CreateUsageMessage(const std::string& option_name, const std::string& msg) -{ - return "Failed to parse " + option_name + ". " + msg; -} - -// Performs a doc test check against all the individual parameters -// in a PAParams object. -// -// /param act actual object under test -// /param exp expected value for object -// -inline void -CHECK_PARAMS(PAParamsPtr act, PAParamsPtr exp) -{ - CHECK(act->verbose == exp->verbose); - CHECK(act->streaming == exp->streaming); - CHECK(act->extra_verbose == exp->extra_verbose); - CHECK(act->max_threads == exp->max_threads); - CHECK(act->max_threads_specified == exp->max_threads_specified); - CHECK(act->sequence_length == exp->sequence_length); - CHECK(act->percentile == exp->percentile); - REQUIRE(act->user_data.size() == exp->user_data.size()); - for (size_t i = 0; i < act->user_data.size(); i++) { - CHECK_STRING(act->user_data[i], exp->user_data[i]); - } - CHECK(act->input_shapes.size() == exp->input_shapes.size()); - for (auto act_shape : act->input_shapes) { - auto exp_shape = exp->input_shapes.find(act_shape.first); - REQUIRE_MESSAGE( - exp_shape != exp->input_shapes.end(), - "Unexpected input_shape: ", act_shape.first); - REQUIRE(act_shape.second.size() == exp_shape->second.size()); - for (size_t i = 0; i < act_shape.second.size(); i++) { - CHECK_MESSAGE( - act_shape.second[i] == exp_shape->second[i], - "Unexpected shape value for: ", act_shape.first, "[", i, "]"); - } - } - CHECK(act->measurement_window_ms == exp->measurement_window_ms); - CHECK(act->using_concurrency_range == exp->using_concurrency_range); - CHECK(act->concurrency_range.start == exp->concurrency_range.start); - CHECK(act->concurrency_range.end == exp->concurrency_range.end); - CHECK(act->concurrency_range.step == exp->concurrency_range.step); - CHECK(act->latency_threshold_ms == exp->latency_threshold_ms); - CHECK(act->stability_threshold == doctest::Approx(act->stability_threshold)); - CHECK(act->max_trials == exp->max_trials); - CHECK(act->zero_input == exp->zero_input); - CHECK(act->string_length == exp->string_length); - CHECK_STRING(act->string_data, exp->string_data); - CHECK(act->async == exp->async); - CHECK(act->forced_sync == exp->forced_sync); - CHECK(act->using_request_rate_range == exp->using_request_rate_range); - CHECK( - act->request_rate_range[0] == - doctest::Approx(exp->request_rate_range[0])); - CHECK( - act->request_rate_range[1] == - doctest::Approx(exp->request_rate_range[1])); - CHECK( - act->request_rate_range[2] == - doctest::Approx(exp->request_rate_range[2])); - CHECK(act->num_of_sequences == exp->num_of_sequences); - CHECK(act->search_mode == exp->search_mode); - CHECK(act->request_distribution == exp->request_distribution); - CHECK(act->using_custom_intervals == exp->using_custom_intervals); - CHECK_STRING(act->request_intervals_file, exp->request_intervals_file); - CHECK(act->shared_memory_type == exp->shared_memory_type); - CHECK(act->output_shm_size == exp->output_shm_size); - CHECK(act->kind == exp->kind); - CHECK_STRING(act->model_signature_name, exp->model_signature_name); - CHECK(act->using_grpc_compression == exp->using_grpc_compression); - CHECK(act->compression_algorithm == exp->compression_algorithm); - CHECK(act->measurement_mode == exp->measurement_mode); - CHECK(act->measurement_request_count == exp->measurement_request_count); - CHECK_STRING(act->triton_server_path, exp->triton_server_path); - CHECK_STRING(act->model_repository_path, exp->model_repository_path); - CHECK(act->start_sequence_id == exp->start_sequence_id); - CHECK(act->sequence_id_range == exp->sequence_id_range); - CHECK_STRING( - act->ssl_options.ssl_grpc_certificate_chain_file, - exp->ssl_options.ssl_grpc_certificate_chain_file); - CHECK_STRING( - act->ssl_options.ssl_grpc_private_key_file, - exp->ssl_options.ssl_grpc_private_key_file); - CHECK_STRING( - act->ssl_options.ssl_grpc_root_certifications_file, - exp->ssl_options.ssl_grpc_root_certifications_file); - CHECK(act->ssl_options.ssl_grpc_use_ssl == exp->ssl_options.ssl_grpc_use_ssl); - CHECK_STRING( - act->ssl_options.ssl_https_ca_certificates_file, - exp->ssl_options.ssl_https_ca_certificates_file); - CHECK_STRING( - act->ssl_options.ssl_https_client_certificate_file, - exp->ssl_options.ssl_https_client_certificate_file); - CHECK_STRING( - act->ssl_options.ssl_https_client_certificate_type, - exp->ssl_options.ssl_https_client_certificate_type); - CHECK_STRING( - act->ssl_options.ssl_https_private_key_file, - exp->ssl_options.ssl_https_private_key_file); - CHECK_STRING( - act->ssl_options.ssl_https_private_key_type, - exp->ssl_options.ssl_https_private_key_type); - CHECK( - act->ssl_options.ssl_https_verify_host == - exp->ssl_options.ssl_https_verify_host); - CHECK( - act->ssl_options.ssl_https_verify_peer == - exp->ssl_options.ssl_https_verify_peer); - CHECK(act->verbose_csv == exp->verbose_csv); - CHECK(act->enable_mpi == exp->enable_mpi); - CHECK(act->trace_options.size() == exp->trace_options.size()); - CHECK(act->using_old_options == exp->using_old_options); - CHECK(act->dynamic_concurrency_mode == exp->dynamic_concurrency_mode); - CHECK(act->url_specified == exp->url_specified); - CHECK_STRING(act->url, exp->url); - CHECK_STRING(act->model_name, exp->model_name); - CHECK_STRING(act->model_version, exp->model_version); - CHECK(act->batch_size == exp->batch_size); - CHECK(act->using_batch_size == exp->using_batch_size); - CHECK(act->concurrent_request_count == exp->concurrent_request_count); - CHECK(act->protocol == exp->protocol); - CHECK(act->http_headers->size() == exp->http_headers->size()); - CHECK(act->max_concurrency == exp->max_concurrency); - CHECK_STRING(act->filename, act->filename); - CHECK(act->mpi_driver != nullptr); - CHECK_STRING(act->memory_type, exp->memory_type); - CHECK( - act->is_using_periodic_concurrency_mode == - exp->is_using_periodic_concurrency_mode); - CHECK( - act->periodic_concurrency_range.start == - exp->periodic_concurrency_range.start); - CHECK( - act->periodic_concurrency_range.end == - exp->periodic_concurrency_range.end); - CHECK( - act->periodic_concurrency_range.step == - exp->periodic_concurrency_range.step); - CHECK(act->request_period == exp->request_period); - CHECK(act->request_parameters.size() == exp->request_parameters.size()); - for (auto act_param : act->request_parameters) { - auto exp_param = exp->request_parameters.find(act_param.first); - REQUIRE_MESSAGE( - exp_param != exp->request_parameters.end(), - "Unexpected parameter: ", act_param.first); - - CHECK(act_param.second.value == exp_param->second.value); - CHECK(act_param.second.type == exp_param->second.type); - } -} - - -#define CHECK_INT_OPTION(option_name, exp_val, msg) \ - SUBCASE("valid value") \ - { \ - int argc = 5; \ - char* argv[argc] = {app_name, "-m", model_name, option_name, "2000"}; \ - CAPTURE(argv[3]); \ - CAPTURE(argv[4]); \ - \ - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); \ - CHECK(!parser.UsageCalled()); \ - CAPTURE(parser.GetUsageMessage()); \ - \ - exp_val = 2000; \ - CAPTURE(exp_val); \ - } \ - \ - SUBCASE("negative value") \ - { \ - int argc = 5; \ - char* argv[argc] = {app_name, "-m", model_name, option_name, "-2000"}; \ - CHECK_THROWS_WITH_AS( \ - act = parser.Parse(argc, argv), msg.c_str(), PerfAnalyzerException); \ - \ - check_params = false; \ - } \ - \ - SUBCASE("floating point value") \ - { \ - int argc = 5; \ - char* argv[argc] = {app_name, "-m", model_name, option_name, "29.5"}; \ - \ - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); \ - CHECK(!parser.UsageCalled()); \ - \ - exp_val = 29; \ - } \ - \ - SUBCASE("missing value") \ - { \ - int argc = 4; \ - char* argv[argc] = {app_name, "-m", model_name, option_name}; \ - \ - CHECK_THROWS_WITH_AS( \ - act = parser.Parse(argc, argv), "", PerfAnalyzerException); \ - \ - check_params = false; \ - } - - -TEST_CASE("Testing PerfAnalyzerParameters") -{ - PAParamsPtr params(new PerfAnalyzerParameters{}); - - CHECK(params->verbose == false); - CHECK(params->streaming == false); - CHECK(params->extra_verbose == false); - CHECK(params->max_threads == 4); - CHECK(params->max_threads_specified == false); - CHECK(params->sequence_length == 20); - CHECK(params->percentile == -1); - CHECK(params->request_count == 0); - CHECK(params->user_data.size() == 0); - CHECK_STRING("endpoint", params->endpoint, ""); - CHECK(params->input_shapes.size() == 0); - CHECK(params->measurement_window_ms == 5000); - CHECK(params->using_concurrency_range == false); - CHECK(params->concurrency_range.start == 1); - CHECK(params->concurrency_range.end == 1); - CHECK(params->concurrency_range.step == 1); - CHECK(params->latency_threshold_ms == NO_LIMIT); - CHECK(params->stability_threshold == doctest::Approx(0.1)); - CHECK(params->max_trials == 10); - CHECK(params->zero_input == false); - CHECK(params->string_length == 128); - CHECK_STRING("string_data", params->string_data, ""); - CHECK(params->async == false); - CHECK(params->forced_sync == false); - CHECK(params->using_request_rate_range == false); - CHECK(params->request_rate_range[0] == doctest::Approx(1.0)); - CHECK(params->request_rate_range[1] == doctest::Approx(1.0)); - CHECK(params->request_rate_range[2] == doctest::Approx(1.0)); - CHECK(params->num_of_sequences == 4); - CHECK(params->search_mode == SearchMode::LINEAR); - CHECK(params->request_distribution == Distribution::CONSTANT); - CHECK(params->using_custom_intervals == false); - CHECK_STRING("request_intervals_file", params->request_intervals_file, ""); - CHECK(params->shared_memory_type == NO_SHARED_MEMORY); - CHECK(params->output_shm_size == 102400); - CHECK(params->kind == clientbackend::BackendKind::TRITON); - CHECK_STRING( - "model_signature_name", params->model_signature_name, "serving_default"); - CHECK(params->using_grpc_compression == false); - CHECK( - params->compression_algorithm == - clientbackend::GrpcCompressionAlgorithm::COMPRESS_NONE); - CHECK(params->measurement_mode == MeasurementMode::TIME_WINDOWS); - CHECK(params->measurement_request_count == 50); - CHECK_STRING( - "triton_server_path", params->triton_server_path, "/opt/tritonserver"); - CHECK_STRING("model_repository_path", params->model_repository_path, ""); - CHECK(params->start_sequence_id == 1); - CHECK(params->sequence_id_range == UINT32_MAX); - CHECK_STRING( - "ssl_grpc_certificate_chain_file", - params->ssl_options.ssl_grpc_certificate_chain_file, ""); - CHECK_STRING( - "ssl_grpc_private_key_file", - params->ssl_options.ssl_grpc_private_key_file, ""); - CHECK_STRING( - "ssl_grpc_root_certifications_file", - params->ssl_options.ssl_grpc_root_certifications_file, ""); - CHECK(params->ssl_options.ssl_grpc_use_ssl == false); - CHECK_STRING( - "ssl_https_ca_certificates_file", - params->ssl_options.ssl_https_ca_certificates_file, ""); - CHECK_STRING( - "ssl_https_client_certificate_file", - params->ssl_options.ssl_https_client_certificate_file, ""); - CHECK_STRING( - "ssl_https_client_certificate_type", - params->ssl_options.ssl_https_client_certificate_type, ""); - CHECK_STRING( - "ssl_https_private_key_file", - params->ssl_options.ssl_https_private_key_file, ""); - CHECK_STRING( - "ssl_https_private_key_type", - params->ssl_options.ssl_https_private_key_type, ""); - CHECK(params->ssl_options.ssl_https_verify_host == 2); - CHECK(params->ssl_options.ssl_https_verify_peer == 1); - CHECK(params->verbose_csv == false); - CHECK(params->enable_mpi == false); - CHECK(params->trace_options.size() == 0); - CHECK(params->using_old_options == false); - CHECK(params->dynamic_concurrency_mode == false); - CHECK(params->url_specified == false); - CHECK_STRING("url", params->url, "localhost:8000"); - CHECK_STRING("model_name", params->model_name, ""); - CHECK_STRING("model_version", params->model_version, ""); - CHECK(params->batch_size == 1); - CHECK(params->using_batch_size == false); - CHECK(params->concurrent_request_count == 1); - CHECK(params->protocol == clientbackend::ProtocolType::HTTP); - CHECK(params->http_headers->size() == 0); - CHECK(params->max_concurrency == 0); - CHECK_STRING("filename", params->filename, ""); - CHECK(params->mpi_driver == nullptr); - CHECK_STRING("memory_type", params->memory_type, "system"); -} - -// Test CLParser Class that captures the usage string but suppresses the output -// -class TestCLParser : public CLParser { - public: - std::string GetUsageMessage() const { return usage_message_; } - bool UsageCalled() const { return usage_called_; } - - private: - std::string usage_message_; - bool usage_called_ = false; - - virtual void Usage(const std::string& msg = std::string()) - { - throw PerfAnalyzerException(msg, GENERIC_ERROR); - } -}; - -void -CheckValidRange( - std::vector& args, char* option_name, TestCLParser& parser, - PAParamsPtr& act, bool& using_range, Range& range, - size_t* max_threads) -{ - SUBCASE("start:end provided") - { - *max_threads = 400; - args.push_back(option_name); - args.push_back("100:400"); // start:end - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - using_range = true; - range.start = 100; - range.end = 400; - } - - SUBCASE("start:end:step provided") - { - *max_threads = 400; - args.push_back(option_name); - args.push_back("100:400:10"); // start:end:step - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - using_range = true; - range.start = 100; - range.end = 400; - range.step = 10; - } -} - -void -CheckInvalidRange( - std::vector& args, char* option_name, TestCLParser& parser, - PAParamsPtr& act, bool& check_params) -{ - std::string expected_msg; - - SUBCASE("too many input values") - { - args.push_back(option_name); - args.push_back("200:100:25:10"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = CreateUsageMessage( - option_name, "The value does not match ."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("invalid start value") - { - args.push_back(option_name); - args.push_back("bad:400:10"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = - CreateUsageMessage(option_name, "Invalid value provided: bad:400:10"); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("invalid end value") - { - args.push_back(option_name); - args.push_back("100:bad:10"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = - CreateUsageMessage(option_name, "Invalid value provided: 100:bad:10"); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("invalid step value") - { - args.push_back(option_name); - args.push_back("100:400:bad"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = - CreateUsageMessage(option_name, "Invalid value provided: 100:400:bad"); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("no input values") - { - args.push_back(option_name); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - // BUG (TMA-1307): Usage message does not contain error. Error statement - // "option '--concurrency-range' requires an argument" written directly - // to std::out - // - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), "", PerfAnalyzerException); - - check_params = false; - } -} - - -TEST_CASE("Testing Command Line Parser") -{ - char* model_name = "my_model"; - char* app_name = "test_perf_analyzer"; - - std::string expected_msg; - std::vector args{app_name, "-m", model_name}; - - opterr = 1; // Enable error output for GetOpt library - bool check_params = true; - - TestCLParser parser; // Command Line parser under test - PAParamsPtr act; // Actual options parsed from parser - PAParamsPtr exp{new PerfAnalyzerParameters()}; // Expected results - - // Most common defaults - exp->model_name = model_name; // model_name; - exp->max_threads = DEFAULT_MAX_THREADS; - - SUBCASE("with no parameters") - { - int argc = 1; - char* argv[argc] = {app_name}; - - expected_msg = - CreateUsageMessage("-m (model name)", "The value must be specified."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("with min parameters") - { - int argc = 3; - char* argv[argc] = {app_name, "-m", model_name}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - REQUIRE(!parser.UsageCalled()); - } - - SUBCASE("Option : --streaming") - { - SUBCASE("streaming option - without model") - { - int argc = 2; - char* argv[argc] = {app_name, "--streaming"}; - - expected_msg = - CreateUsageMessage("-m (model name)", "The value must be specified."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("with model") - { - int argc = 4; - char* argv[argc] = {app_name, "-m", model_name, "--streaming"}; - - // NOTE: This is not an informative error message, how do I specify a gRPC - // protocol? Error output should list missing params. - // - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), - "Streaming is only allowed with gRPC protocol.", - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("with model last") - { - int argc = 4; - char* argv[argc] = {app_name, "--streaming", "-m", model_name}; - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), - "Streaming is only allowed with gRPC protocol.", - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --max-threads") - { - SUBCASE("set to 1") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--max-threads", "1"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - REQUIRE(!parser.UsageCalled()); - - exp->max_threads = 1; - exp->max_threads_specified = true; - } - - SUBCASE("set to max") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--max-threads", "65535"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - REQUIRE(!parser.UsageCalled()); - - exp->max_threads = 65535; - exp->max_threads_specified = true; - } - - SUBCASE("missing value") - { - int argc = 4; - char* argv[argc] = {app_name, "-m", model_name, "--max-threads"}; - - // NOTE: Empty message is not helpful - // - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), "", PerfAnalyzerException); - - // BUG: Dumping string "option '--max-threads' requires an argument" - // directly to std::out, instead of through usage() - // - check_params = false; - } - - SUBCASE("bad value") - { - int argc = 4; - char* argv[argc] = {app_name, "-m", model_name, "--max-threads", "bad"}; - - // NOTE: Empty message is not helpful - // - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), "", PerfAnalyzerException); - - // BUG: Dumping string "option '--max-threads' requires an argument" - // directly to std::out, instead of through usage() - // - check_params = false; - } - } - - SUBCASE("Option : --sequence-length") - { - SUBCASE("set to 2000") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-length", "2000"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->sequence_length = 2000; - } - - SUBCASE("set to 0") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--sequence-length", "0"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->sequence_length = 20; - } - } - - SUBCASE("Option : --sequence-length-variation") - { - SUBCASE("non-negative") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-length-variation", "33.3"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->sequence_length_variation = 33.3; - } - - SUBCASE("negative") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-length-variation", "-10"}; - - expected_msg = CreateUsageMessage( - "--sequence-length-variation", "The value must be >= 0.0."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --percentile") - { - SUBCASE("set to 25") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--percentile", "25"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->percentile = 25; - } - - SUBCASE("set to 225 - overflow check") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--percentile", "225"}; - - expected_msg = CreateUsageMessage( - "--percentile", - "The value must be -1 for not reporting or in range (0, 100)."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("set to -1 - use average latency") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--percentile", "-1"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->percentile = -1; - } - } - - SUBCASE("Option : --data-directory") - { - SUBCASE("set to `/usr/data`") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--data-directory", "/usr/data"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->user_data.push_back("/usr/data"); - } - - SUBCASE("call twice") - { - // QUESTION: Is this the expected behavior? There is not enough details in - // in the output. It is marked as deprecated, what does that mean? Is it - // used? - // - int argc = 7; - char* argv[argc] = {app_name, "-m", model_name, - "--data-directory", "/usr/data", "--data-directory", - "/another/dir"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->user_data.push_back("/usr/data"); - exp->user_data.push_back("/another/dir"); - } - } - - SUBCASE("Option : --sequence-id-range") - { - SUBCASE("One arg") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-id-range", "53"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->start_sequence_id = 53; - exp->sequence_id_range = UINT32_MAX; - } - SUBCASE("Two args") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-id-range", "53:67"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->start_sequence_id = 53; - exp->sequence_id_range = 14; - } - SUBCASE("Three args") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-id-range", "53:67:92"}; - - expected_msg = CreateUsageMessage( - "--sequence-id-range", "The value does not match ."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - SUBCASE("Not a number") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-id-range", "BAD"}; - - expected_msg = CreateUsageMessage( - "--sequence-id-range", "Invalid value provided: BAD"); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; // Usage message called - } - SUBCASE("Not a number 2") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--sequence-id-range", "53:BAD"}; - - expected_msg = CreateUsageMessage( - "--sequence-id-range", "Invalid value provided: 53:BAD"); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; // Usage message called - } - } - - - SUBCASE("Option : --input-tensor-format") - { - SUBCASE("binary") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--input-tensor-format", "binary"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->input_tensor_format = cb::TensorFormat::BINARY; - } - SUBCASE("json") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--input-tensor-format", "json"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->input_tensor_format = cb::TensorFormat::JSON; - } - SUBCASE("invalid") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--input-tensor-format", "invalid"}; - - expected_msg = CreateUsageMessage( - "--input-tensor-format", - "Unsupported type provided: 'invalid'. The available options are " - "'binary' or 'json'."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - } - - - SUBCASE("Option : --shape") - { - SUBCASE("expected input, single shape") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--shape", "input_name:1,2,3"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->input_shapes.emplace( - std::string("input_name"), std::vector{1, 2, 3}); - } - - SUBCASE("expected input, multiple shapes") - { - int argc = 9; - char* argv[argc] = { - app_name, - "-m", - model_name, - "--shape", - "input_name:1,2,3", - "--shape", - "alpha:10,24", - "--shape", - "beta:10,200,34,15,9000"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->input_shapes.emplace( - std::string("input_name"), std::vector{1, 2, 3}); - exp->input_shapes.emplace( - std::string("alpha"), std::vector{10, 24}); - exp->input_shapes.emplace( - std::string("beta"), std::vector{10, 200, 34, 15, 9000}); - } - - SUBCASE("using negative dims") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--shape", "input_name:-1,2,3"}; - - expected_msg = CreateUsageMessage( - "--shape", "The dimensions of input tensor must be > 0."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("equals sign, not colon") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--shape", "input_name=-1,2,3"}; - - expected_msg = CreateUsageMessage( - "--shape", "There must be a colon after input name."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("missing shape") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--shape", "input_name"}; - - expected_msg = CreateUsageMessage( - "--shape", "There must be a colon after input name."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("missing colon") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--shape", "input_name1,2,3"}; - - expected_msg = CreateUsageMessage( - "--shape", "There must be a colon after input name."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("bad shapes - a,b,c") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--shape", "input_name:a,b,c"}; - - expected_msg = CreateUsageMessage( - "--shape", "Invalid value provided: input_name:a,b,c"); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; // Usage message called - } - - SUBCASE("bad shapes - [1,2,3]") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--shape", "input_name:[1,2,3]"}; - - expected_msg = CreateUsageMessage( - "--shape", "Invalid value provided: input_name:[1,2,3]"); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; // Usage message called - } - } - - SUBCASE("Option : --measurement-interval") - { - SUBCASE("set to 500") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "", "500"}; - - SUBCASE("Long form") - { - argv[3] = "--measurement-interval"; - } - - SUBCASE("Short form") - { - argv[3] = "-p"; - } - - CAPTURE(argv[3]); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->measurement_window_ms = 500; - } - - SUBCASE("set to -200") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "", "-200"}; - - SUBCASE("Long form") - { - argv[3] = "--measurement-interval"; - } - - SUBCASE("Short form") - { - argv[3] = "-p"; - } - - CAPTURE(argv[3]); - - expected_msg = CreateUsageMessage( - "--measurement-interval (-p)", "The value must be > 0 msec."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("set to non-numeric value") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "", "foobar"}; - - SUBCASE("Long form") - { - argv[3] = "--measurement-interval"; - expected_msg = CreateUsageMessage( - "--measurement-interval", "Invalid value provided: foobar"); - } - - SUBCASE("Short form") - { - argv[3] = "-p"; - expected_msg = - CreateUsageMessage("-p", "Invalid value provided: foobar"); - } - - CAPTURE(argv[3]); - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; // Usage message called - } - } - - SUBCASE("Option : --concurrency-range") - { - char* option_name = "--concurrency-range"; - uint64_t concurrency_range_start; - uint64_t concurrency_range_end; - - SUBCASE("start provided") - { - concurrency_range_start = 100; - std::string concurrency_range_str = - std::to_string(concurrency_range_start); - args.push_back(option_name); - args.push_back(concurrency_range_str.data()); // start - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->using_concurrency_range = true; - exp->concurrency_range.start = concurrency_range_start; - exp->max_threads = DEFAULT_MAX_THREADS; - } - - CheckValidRange( - args, option_name, parser, act, exp->using_concurrency_range, - exp->concurrency_range, &(exp->max_threads)); - CheckInvalidRange(args, option_name, parser, act, check_params); - - SUBCASE("wrong separator") - { - args.push_back(option_name); - args.push_back("100,400,10"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - // BUG (TMA-1307): Should detect this and through an error. User will - // enter this and have no clue why the end and step sizes are not used - // correctly. - // - - check_params = false; - } - - SUBCASE("invalid condition - end and latency threshold are 0") - { - args.push_back(option_name); - args.push_back("100:0:25"); - args.push_back("--latency-threshold"); - args.push_back("0"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), - "The end of the search range and the latency limit can not be both 0 " - "(or 0.0) simultaneously", - PerfAnalyzerException); - - check_params = false; - } - - concurrency_range_start = 10; - SUBCASE("Max threads set to default when concurrency-range.end < 16") - { - concurrency_range_end = 10; - std::string concurrency_range_str = - std::to_string(concurrency_range_start) + ":" + - std::to_string(concurrency_range_end); - args.push_back(option_name); - args.push_back(concurrency_range_str.data()); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->using_concurrency_range = true; - exp->concurrency_range.start = concurrency_range_start; - exp->concurrency_range.end = concurrency_range_end; - exp->max_threads = DEFAULT_MAX_THREADS; - } - - SUBCASE("Max_threads set to default when concurrency-range.end = 16") - { - concurrency_range_end = 16; - std::string concurrency_range_str = - std::to_string(concurrency_range_start) + ":" + - std::to_string(concurrency_range_end); - args.push_back(option_name); - args.push_back(concurrency_range_str.data()); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->using_concurrency_range = true; - exp->concurrency_range.start = concurrency_range_start; - exp->concurrency_range.end = concurrency_range_end; - exp->max_threads = DEFAULT_MAX_THREADS; - } - - SUBCASE( - "Max_threads set to concurrency-range.end when concurrency-range.end > " - "16") - { - concurrency_range_end = 40; - std::string concurrency_range_str = - std::to_string(concurrency_range_start) + ":" + - std::to_string(concurrency_range_end); - args.push_back(option_name); - args.push_back(concurrency_range_str.data()); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->using_concurrency_range = true; - exp->concurrency_range.start = concurrency_range_start; - exp->concurrency_range.end = concurrency_range_end; - exp->max_threads = exp->concurrency_range.end; - } - } - - SUBCASE("Option : --periodic-concurrency-range") - { - char* option_name = "--periodic-concurrency-range"; - - // Add required args that specifies where to dump profiled data - args.insert( - args.end(), {"-i", "grpc", "--async", "--streaming", - "--profile-export-file", "profile.json"}); - exp->protocol = cb::ProtocolType::GRPC; - exp->async = true; - exp->streaming = true; - exp->url = "localhost:8001"; // gRPC url - - SUBCASE("start provided") - { - args.push_back(option_name); - args.push_back("100"); // start - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = CreateUsageMessage( - option_name, "Both and values must be provided."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - exp->max_threads = 400; - - CheckValidRange( - args, option_name, parser, act, exp->is_using_periodic_concurrency_mode, - exp->periodic_concurrency_range, &(exp->max_threads)); - - CheckInvalidRange(args, option_name, parser, act, check_params); - - SUBCASE("more than one load mode") - { - args.push_back(option_name); - args.push_back("100:400"); - args.push_back("--concurrency-range"); - args.push_back("10:40"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = - "Cannot specify more then one inference load mode. Please choose " - "only one of the following modes: --concurrency-range, " - "--periodic-concurrency-range, --request-rate-range, or " - "--request-intervals."; - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("no export file specified") - { - // Remove the export file args - args.pop_back(); - args.pop_back(); - - args.push_back(option_name); - args.push_back("100:400"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = - "Must provide --profile-export-file when using the " - "--periodic-concurrency-range option."; - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("step is not factor of range size") - { - args.push_back(option_name); - args.push_back("100:400:7"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = CreateUsageMessage( - option_name, - "The value must be a factor of the range size ( - " - ")."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("step is zero") - { - args.push_back(option_name); - args.push_back("10:400:0"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = - CreateUsageMessage(option_name, "The value must be > 0."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --request-period") - { - expected_msg = - CreateUsageMessage("--request-period", "The value must be > 0"); - CHECK_INT_OPTION("--request-period", exp->request_period, expected_msg); - - SUBCASE("set to 0") - { - args.push_back("--request-period"); - args.push_back("0"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --request-parameter") - { - char* option_name = "--request-parameter"; - - // Add required args that specifies where to dump profiled data - args.insert(args.end(), {"-i", "grpc", "--async", "--streaming"}); - exp->protocol = cb::ProtocolType::GRPC; - exp->async = true; - exp->streaming = true; - exp->url = "localhost:8001"; // gRPC url - - SUBCASE("valid parameter") - { - args.push_back(option_name); - args.push_back("max_tokens:256:int"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - cb::RequestParameter param; - param.value = "256"; - param.type = "int"; - exp->request_parameters["max_tokens"] = param; - } - - SUBCASE("missing type") - { - args.push_back(option_name); - args.push_back("max_tokens:256"); - - int argc = args.size(); - char* argv[argc]; - std::copy(args.begin(), args.end(), argv); - - expected_msg = CreateUsageMessage( - option_name, "The value does not match ."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --latency-threshold") - { - expected_msg = CreateUsageMessage( - "--latency-threshold (-l)", "The value must be >= 0 msecs."); - CHECK_INT_OPTION( - "--latency-threshold", exp->latency_threshold_ms, expected_msg); - - SUBCASE("set to 0") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--latency-threshold", "0"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - } - } - - SUBCASE("Option : --stability-percentage") - { - SUBCASE("valid value") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--stability-percentage", "80"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->stability_threshold = .8f; - } - - SUBCASE("set to 0") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--stability-percentage", "0"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - } - - SUBCASE("negative value") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--stability-percentage", "-20"}; - - expected_msg = CreateUsageMessage( - "--stability-percentage (-s)", "The value must be >= 0.0."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("floating point value") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--stability-percentage", "29.5"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->stability_threshold = .295f; - } - - SUBCASE("missing value") - { - int argc = 4; - char* argv[argc] = {app_name, "-m", model_name, "--stability-percentage"}; - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), "", PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --max-trials") - { - expected_msg = - CreateUsageMessage("--max-trials (-r)", "The value must be > 0."); - CHECK_INT_OPTION("--max-trials", exp->max_trials, expected_msg); - - SUBCASE("set to 0") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--max-trials", "0"}; - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --request-count") - { - SUBCASE("valid value") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--request-count", "500"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->request_count = 500; - exp->measurement_mode = MeasurementMode::COUNT_WINDOWS; - exp->measurement_request_count = 500; - } - SUBCASE("negative value") - { - int argc = 5; - char* argv[argc] = {app_name, "-m", model_name, "--request-count", "-2"}; - - expected_msg = - CreateUsageMessage("--request-count", "The value must be > 0."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - check_params = false; - } - SUBCASE("less than request rate") - { - int argc = 7; - char* argv[argc] = {app_name, "-m", - model_name, "--request-count", - "2", "--request-rate-range", - "5"}; - - expected_msg = "request-count can not be less than request-rate"; - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - check_params = false; - } - SUBCASE("less than concurrency") - { - int argc = 7; - char* argv[argc] = {app_name, "-m", - model_name, "--request-count", - "2", "--concurrency-range", - "5"}; - - expected_msg = "request-count can not be less than concurrency"; - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - check_params = false; - } - SUBCASE("multiple request rate") - { - int argc = 7; - char* argv[argc] = {app_name, "-m", - model_name, "--request-count", - "20", "--request-rate-range", - "5:6:1"}; - - expected_msg = - "request-count not supported with multiple request-rate values in " - "one run"; - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - check_params = false; - } - SUBCASE("multiple concurrency") - { - int argc = 7; - char* argv[argc] = {app_name, "-m", - model_name, "--request-count", - "20", "--concurrency-range", - "5:6:1"}; - - expected_msg = - "request-count not supported with multiple concurrency values in " - "one run"; - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - check_params = false; - } - - SUBCASE("mode and count are overwritten with non-zero request-count") - { - int argc = 9; - char* argv[argc] = { - app_name, - "-m", - model_name, - "--request-count", - "2000", - "--measurement-mode", - "time_windows", - "measurement-request-count", - "30"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->request_count = 2000; - exp->measurement_mode = MeasurementMode::COUNT_WINDOWS; - exp->measurement_request_count = 2000; - } - SUBCASE("zero value (no override to measurement mode)") - { - int argc = 7; - char* argv[argc] = {app_name, "-m", model_name, - "--request-count", "0", "--measurement-mode", - "time_windows"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->request_count = 0; - exp->measurement_mode = MeasurementMode::TIME_WINDOWS; - } - SUBCASE("zero value (no override to measurement request count)") - { - int argc = 9; - char* argv[argc] = { - app_name, - "-m", - model_name, - "--request-count", - "0", - "--measurement-mode", - "count_windows", - "--measurement-request-count", - "50"}; - - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(!parser.UsageCalled()); - - exp->request_count = 0; - exp->measurement_mode = MeasurementMode::COUNT_WINDOWS; - exp->measurement_request_count = 50; - } - } - - SUBCASE("Option : --collect-metrics") - { - SUBCASE("with --service-kind != triton") - { - int argc = 8; - char* argv[argc] = { - app_name, "-m", model_name, "--collect-metrics", - "--service-kind", "tfserving", "-i", "grpc"}; - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), - "Server-side metric collection is only supported with Triton client " - "backend.", - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --metrics-url") - { - // missing --collect-metrics - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--metrics-url", "localhost:8002/metrics"}; - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), - "Must specify --collect-metrics when using the --metrics-url option.", - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("Option : --metrics-interval") - { - SUBCASE("missing --collect-metrics") - { - int argc = 5; - char* argv[argc] = { - app_name, "-m", model_name, "--metrics-interval", "1000"}; - - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), - "Must specify --collect-metrics when using the --metrics-interval " - "option.", - PerfAnalyzerException); - - check_params = false; - } - - SUBCASE("metrics interval 0") - { - int argc = 6; - char* argv[argc] = { - app_name, "-m", model_name, "--collect-metrics", "--metrics-interval", - "0"}; - - expected_msg = CreateUsageMessage( - "--metrics-interval", "The value must be > 0 msecs."); - CHECK_THROWS_WITH_AS( - act = parser.Parse(argc, argv), expected_msg.c_str(), - PerfAnalyzerException); - - check_params = false; - } - } - - SUBCASE("Option : --bls-composing-models") - { - int argc = 5; - - SUBCASE("one model") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - CHECK(act->bls_composing_models.size() == 1); - CHECK_STRING(act->bls_composing_models[0].first, "a"); - CHECK_STRING(act->bls_composing_models[0].second, ""); - } - SUBCASE("lists with no version") - { - SUBCASE("a,b,c") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a,b,c"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - SUBCASE("a, b, c") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a, b, c"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - SUBCASE("a,b, c") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a,b, c"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - SUBCASE("a, b,c") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a, b,c"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - SUBCASE("a, b, c") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a, b, c"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - - CHECK(!parser.UsageCalled()); - REQUIRE(act->bls_composing_models.size() == 3); - CHECK_STRING(act->bls_composing_models[0].first, "a"); - CHECK_STRING(act->bls_composing_models[1].first, "b"); - CHECK_STRING(act->bls_composing_models[2].first, "c"); - CHECK_STRING(act->bls_composing_models[0].second, ""); - CHECK_STRING(act->bls_composing_models[1].second, ""); - CHECK_STRING(act->bls_composing_models[2].second, ""); - } - SUBCASE("list with version") - { - SUBCASE("a:1,b:2,c:1") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", - "a:1,b:2,c:1"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - SUBCASE("a:1, b:2, c:1") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", - "a:1, b:2, c:1"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - SUBCASE("a:1, b:2, c:1") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", - "a:1, b:2, c:1"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - SUBCASE("a:1 , b:2, c:1") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", - "a:1 , b:2, c:1"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - CHECK(!parser.UsageCalled()); - REQUIRE(act->bls_composing_models.size() == 3); - CHECK_STRING(act->bls_composing_models[0].first, "a"); - CHECK_STRING(act->bls_composing_models[1].first, "b"); - CHECK_STRING(act->bls_composing_models[2].first, "c"); - CHECK_STRING(act->bls_composing_models[0].second, "1"); - CHECK_STRING(act->bls_composing_models[1].second, "2"); - CHECK_STRING(act->bls_composing_models[2].second, "1"); - } - SUBCASE("list with some versions") - { - SUBCASE("a,b:3,c") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a,b:3,c"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - CHECK(!parser.UsageCalled()); - REQUIRE(act->bls_composing_models.size() == 3); - CHECK_STRING(act->bls_composing_models[0].first, "a"); - CHECK_STRING(act->bls_composing_models[1].first, "b"); - CHECK_STRING(act->bls_composing_models[2].first, "c"); - CHECK_STRING(act->bls_composing_models[0].second, ""); - CHECK_STRING(act->bls_composing_models[1].second, "3"); - CHECK_STRING(act->bls_composing_models[2].second, ""); - } - SUBCASE("multiple versions of the same model") - { - SUBCASE("a:1,b:2,a:2") - { - char* argv[argc] = { - app_name, "-m", model_name, "--bls-composing-models", "a:1,b,a:2"}; - REQUIRE_NOTHROW(act = parser.Parse(argc, argv)); - } - CHECK(!parser.UsageCalled()); - REQUIRE(act->bls_composing_models.size() == 3); - CHECK_STRING(act->bls_composing_models[0].first, "a"); - CHECK_STRING(act->bls_composing_models[1].first, "b"); - CHECK_STRING(act->bls_composing_models[2].first, "a"); - CHECK_STRING(act->bls_composing_models[0].second, "1"); - CHECK_STRING(act->bls_composing_models[1].second, ""); - CHECK_STRING(act->bls_composing_models[2].second, "2"); - } - } - - if (check_params) { - if (act == nullptr) { - std::cerr - << "Error: Attempting to access `act` but was not initialized. Check " - "if the test cases are missing `check_params = false` statement." - << std::endl; - exit(1); - } - CHECK_PARAMS(act, exp); - } - optind = 1; // Reset GotOpt index, needed to parse the next command line -} -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_concurrency_manager.cc b/src/c++/perf_analyzer/test_concurrency_manager.cc deleted file mode 100644 index 1941a018e..000000000 --- a/src/c++/perf_analyzer/test_concurrency_manager.cc +++ /dev/null @@ -1,941 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include - -#include "command_line_parser.h" -#include "concurrency_manager.h" -#include "doctest.h" -#include "mock_client_backend.h" -#include "mock_concurrency_worker.h" -#include "mock_data_loader.h" -#include "mock_infer_data_manager.h" -#include "mock_model_parser.h" -#include "mock_sequence_manager.h" -#include "sequence_manager.h" -#include "test_load_manager_base.h" -#include "test_utils.h" - -namespace triton { namespace perfanalyzer { - -class TestConcurrencyManager : public TestLoadManagerBase, - public ConcurrencyManager { - public: - TestConcurrencyManager( - PerfAnalyzerParameters params, bool is_sequence_model = false, - bool is_decoupled_model = false, bool use_mock_infer = false) - : use_mock_infer_(use_mock_infer), - TestLoadManagerBase(params, is_sequence_model, is_decoupled_model), - ConcurrencyManager( - params.async, params.streaming, params.batch_size, - params.max_threads, params.max_concurrency, - params.shared_memory_type, params.output_shm_size, GetParser(), - GetFactory(), params.request_parameters) - { - } - - std::shared_ptr MakeWorker( - std::shared_ptr thread_stat, - std::shared_ptr thread_config) override - { - size_t id = workers_.size(); - - auto worker = std::make_shared( - id, thread_stat, thread_config, parser_, data_loader_, factory_, - on_sequence_model_, async_, max_concurrency_, using_json_data_, - streaming_, batch_size_, wake_signal_, wake_mutex_, active_threads_, - execute_, infer_data_manager_, sequence_manager_); - - if (use_mock_infer_) { - EXPECT_CALL(*worker, Infer()) - .WillRepeatedly(testing::Invoke( - worker.get(), &MockConcurrencyWorker::EmptyInfer)); - } - return worker; - } - - - void TestReconfigThreads( - const size_t concurrent_request_count, const size_t num_requests, - std::vector& expected_configs) - { - ConcurrencyManager::ReconfigThreads(concurrent_request_count, num_requests); - - auto expected_size = expected_configs.size(); - - // Check that the correct number of threads are created - // - CHECK(threads_.size() == expected_size); - - // Check that threads_config has correct concurrency and seq stat index - // offset - for (auto i = 0; i < expected_configs.size(); i++) { - CHECK( - threads_config_[i]->concurrency_ == expected_configs[i].concurrency_); - CHECK( - threads_config_[i]->seq_stat_index_offset_ == - expected_configs[i].seq_stat_index_offset_); - CHECK( - threads_config_[i]->num_requests_ == - expected_configs[i].num_requests_); - } - } - - void StopWorkerThreads() { LoadManager::StopWorkerThreads(); } - - /// Test that the correct Infer function is called in the backend - /// - void TestInferType() - { - // FIXME TMA-982: This delay is to avoid deadlock. Investigate why delay is - // needed. - stats_->SetDelays({50}); - - ChangeConcurrencyLevel(params_.max_concurrency); - - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - - CheckInferType(); - } - - /// Test that the correct concurrency is maintained in the load manager - /// - void TestConcurrency( - size_t response_delay, std::chrono::milliseconds sleep_time) - { - stats_->SetDelays({response_delay}); - - ChangeConcurrencyLevel(params_.max_concurrency); - std::this_thread::sleep_for(sleep_time); - - CheckConcurrency(); - } - - /// Test sequence handling - /// - void TestSequences() - { - size_t delay_ms = 10; - stats_->SetDelays({delay_ms}); - - auto stats = cb::InferStat(); - double concurrency1 = params_.max_concurrency / 2; - double concurrency2 = params_.max_concurrency; - int sleep_ms = 500; - - auto sleep_time = std::chrono::milliseconds(sleep_ms); - size_t expected_count1 = sleep_ms * concurrency1 / delay_ms; - size_t expected_count2 = - sleep_ms * concurrency2 / delay_ms + expected_count1; - - // Run and check request rate 1 - // - ChangeConcurrencyLevel(concurrency1); - std::this_thread::sleep_for(sleep_time); - - stats = cb::InferStat(); - GetAccumulatedClientStat(&stats); - CHECK( - stats.completed_request_count == - doctest::Approx(expected_count1).epsilon(0.10)); - - PauseSequenceWorkers(); - CheckSequences(concurrency1); - - // Make sure that the client and the manager are in agreement on the request - // count in between rates - // - stats = cb::InferStat(); - GetAccumulatedClientStat(&stats); - int client_total_requests = stats_->num_async_infer_calls + - stats_->num_async_stream_infer_calls + - stats_->num_infer_calls; - CHECK(stats.completed_request_count == client_total_requests); - - ResetStats(); - - // Run and check request rate 2 - // - ChangeConcurrencyLevel(concurrency2); - std::this_thread::sleep_for(sleep_time); - - stats = cb::InferStat(); - GetAccumulatedClientStat(&stats); - CHECK( - stats.completed_request_count == - doctest::Approx(expected_count2).epsilon(0.10)); - - // Stop all threads and make sure everything is as expected - // - StopWorkerThreads(); - - CheckSequences(concurrency2); - } - - /// Test that tries to find deadlocks and livelocks - /// - void TestTimeouts() - { - TestWatchDog watchdog(1000); - ChangeConcurrencyLevel(params_.max_concurrency); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - StopWorkerThreads(); - watchdog.stop(); - } - - /// Test that idle time is tracked correctly - void TestOverhead() - { - stats_->SetDelays({1}); - ChangeConcurrencyLevel(params_.max_concurrency); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - // During a run of 100 ms (100,000,000 ns), make sure that the idle time is - // at least 95% of that - // - auto idle_time_ns = GetIdleTime(); - CHECK(idle_time_ns > 95000000); - StopWorkerThreads(); - } - - std::shared_ptr& parser_{LoadManager::parser_}; - std::shared_ptr& data_loader_{LoadManager::data_loader_}; - std::shared_ptr& sequence_manager_{ - LoadManager::sequence_manager_}; - bool& using_json_data_{LoadManager::using_json_data_}; - bool& execute_{ConcurrencyManager::execute_}; - size_t& batch_size_{LoadManager::batch_size_}; - size_t& max_threads_{LoadManager::max_threads_}; - std::shared_ptr factory_{ - TestLoadManagerBase::factory_}; - std::shared_ptr& infer_data_manager_{ - LoadManager::infer_data_manager_}; - - private: - bool use_mock_infer_{false}; - - void CheckConcurrency() - { - if (params_.max_concurrency < 4) { - CHECK(stats_->num_active_infer_calls == params_.max_concurrency); - } else { - CHECK( - stats_->num_active_infer_calls == - doctest::Approx(params_.max_concurrency).epsilon(0.25)); - } - } - - - std::shared_ptr MakeSequenceManager( - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation, const bool using_json_data, - std::shared_ptr data_loader) override - { - return std::make_shared( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - } -}; - -/// Test that the correct Infer function is called in the backend -/// -TEST_CASE("concurrency_infer_type") -{ - PerfAnalyzerParameters params{}; - - params.max_concurrency = 1; - - SUBCASE("async_streaming") - { - params.async = true; - params.streaming = true; - } - SUBCASE("async_no_streaming") - { - params.async = true; - params.streaming = false; - } - SUBCASE("no_async_streaming") - { - params.async = false; - params.streaming = true; - } - SUBCASE("no_async_no_streaming") - { - params.async = false; - params.streaming = false; - } - - - TestConcurrencyManager tcm(params); - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - tcm.TestInferType(); -} - -/// Test that the correct concurrency is maintained in the load manager -/// -TEST_CASE("concurrency_concurrency") -{ - PerfAnalyzerParameters params{}; - size_t response_delay{50}; - std::chrono::milliseconds sleep_time{225}; - - SUBCASE("sync, no-streaming, 1 concurrency, 1 thread") - { - params.forced_sync = true; - params.async = false; - params.streaming = false; - params.max_concurrency = 1; - params.max_threads = 1; - } - - SUBCASE("sync, no-streaming, 4 concurrency, 4 threads") - { - params.forced_sync = true; - params.async = false; - params.streaming = false; - params.max_concurrency = 4; - params.max_threads = 4; - } - - SUBCASE("async, no-streaming, 1 concurrency, 1 thread") - { - params.forced_sync = false; - params.async = true; - params.streaming = false; - params.max_concurrency = 1; - params.max_threads = 1; - } - - SUBCASE("async, no-streaming, 4 concurrency, 1 thread") - { - params.forced_sync = false; - params.async = true; - params.streaming = false; - params.max_concurrency = 4; - params.max_threads = 1; - } - - SUBCASE("async, no-streaming, 4 concurrency, 2 threads") - { - params.forced_sync = false; - params.async = true; - params.streaming = false; - params.max_concurrency = 4; - params.max_threads = 2; - } - - SUBCASE("async, no-streaming, 4 concurrency, 4 threads") - { - params.forced_sync = false; - params.async = true; - params.streaming = false; - params.max_concurrency = 4; - params.max_threads = 4; - } - - SUBCASE("async, streaming, 1 concurrency, 1 thread") - { - params.forced_sync = false; - params.async = true; - params.streaming = true; - params.max_concurrency = 1; - params.max_threads = 1; - } - - SUBCASE("async, streaming, 4 concurrency, 1 thread") - { - params.forced_sync = false; - params.async = true; - params.streaming = true; - params.max_concurrency = 4; - params.max_threads = 1; - } - - SUBCASE("async, streaming, 4 concurrency, 2 threads") - { - params.forced_sync = false; - params.async = true; - params.streaming = true; - params.max_concurrency = 4; - params.max_threads = 2; - } - - SUBCASE("async, streaming, 4 concurrency, 4 threads") - { - params.forced_sync = false; - params.async = true; - params.streaming = true; - params.max_concurrency = 4; - params.max_threads = 4; - } - - - TestConcurrencyManager tcm(params); - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - tcm.TestConcurrency(response_delay, sleep_time); -} - -/// Check that the inference requests for sequences follow all rules and -/// parameters -/// -TEST_CASE("concurrency_sequence") -{ - PerfAnalyzerParameters params = TestLoadManagerBase::GetSequenceTestParams(); - const bool is_sequence_model{true}; - - TestConcurrencyManager tcm(params, is_sequence_model); - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - tcm.TestSequences(); -} - -/// Create the case where the sequences do NOT go round robin due to -/// the first request taking longer than the rest. -/// -/// This exposed a bug where we were constantly resetting ctx IDs -/// and issuing over and over again to the first sequence even though -/// it was the only sequence that should NOT be issued because it was -/// still outstanding -/// -TEST_CASE("concurrency_free_ctx_ids") -{ - PerfAnalyzerParameters params{}; - params.async = true; - params.streaming = true; - params.max_concurrency = 6; - - bool is_sequence_model{true}; - - - TestConcurrencyManager tcm(params, is_sequence_model); - - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - // Have the first request (sequence ID 1) take very long, and all the other - // requests are fast - // - tcm.stats_->SetDelays({50, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); - - std::shared_ptr thread_stat{std::make_shared()}; - std::shared_ptr thread_config{ - std::make_shared(0)}; - thread_config->concurrency_ = 4; - - std::shared_ptr worker{tcm.MakeWorker(thread_stat, thread_config)}; - - std::future infer_future{std::async(&IWorker::Infer, worker)}; - - std::this_thread::sleep_for(std::chrono::milliseconds(15)); - - early_exit = true; - infer_future.get(); - - // The first sequence should only be called two times, once at the very start, - // and once during shutdown - // - CHECK(tcm.stats_->sequence_status.seq_ids_to_count.at(1) == 2); -} - -TEST_CASE("Concurrency - shared memory infer input calls") -{ - PerfAnalyzerParameters params{}; - params.max_concurrency = 4; - bool is_sequence_model{false}; - - const auto& ParameterizeAsyncAndStreaming{[&]() { - SUBCASE("sync non-streaming") - { - params.async = false; - params.streaming = false; - } - SUBCASE("async non-streaming") - { - params.async = true; - params.streaming = false; - } - SUBCASE("async streaming") - { - params.async = true; - params.streaming = true; - } - }}; - - const auto& ParameterizeSequence{[&]() { - SUBCASE("non-sequence") - { - is_sequence_model = false; - ParameterizeAsyncAndStreaming(); - } - SUBCASE("sequence") - { - is_sequence_model = true; - params.num_of_sequences = 1; - ParameterizeAsyncAndStreaming(); - } - }}; - - const auto& ParameterizeMemory{[&]() { - SUBCASE("No shared memory") - { - params.shared_memory_type = NO_SHARED_MEMORY; - ParameterizeSequence(); - } - SUBCASE("system shared memory") - { - params.shared_memory_type = SYSTEM_SHARED_MEMORY; - ParameterizeSequence(); - } - SUBCASE("cuda shared memory") - { - params.shared_memory_type = CUDA_SHARED_MEMORY; - ParameterizeSequence(); - } - }}; - - ParameterizeMemory(); - - - const std::string json_str{R"( - { - "data": [ - { - "INPUT0": [2000000000] - }, - { - "INPUT0": [2000000001] - } - ] - } - )"}; - - MockInputPipeline mip = - TestLoadManagerBase::ProcessCustomJsonData(json_str, is_sequence_model); - - - TestConcurrencyManager tcm(params, is_sequence_model); - - tcm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, tcm.factory_, mip.mock_data_loader_); - - std::shared_ptr thread_stat{std::make_shared()}; - std::shared_ptr thread_config{ - std::make_shared(0)}; - thread_config->concurrency_ = 1; - - tcm.parser_ = mip.mock_model_parser_; - tcm.data_loader_ = mip.mock_data_loader_; - tcm.using_json_data_ = true; - tcm.execute_ = true; - tcm.batch_size_ = 1; - tcm.max_threads_ = 1; - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - std::shared_ptr worker{tcm.MakeWorker(thread_stat, thread_config)}; - std::future infer_future{std::async(&IWorker::Infer, worker)}; - - std::this_thread::sleep_for(std::chrono::milliseconds(18)); - - early_exit = true; - infer_future.get(); - - const auto& actual_append_raw_calls{tcm.stats_->num_append_raw_calls}; - const auto& actual_set_shared_memory_calls{ - tcm.stats_->num_set_shared_memory_calls}; - - if (params.shared_memory_type == NO_SHARED_MEMORY) { - CHECK(actual_append_raw_calls > 0); - CHECK(actual_set_shared_memory_calls == 0); - } else { - CHECK(actual_append_raw_calls == 0); - CHECK(actual_set_shared_memory_calls > 0); - } -} - -/// Verify Shared Memory api calls -/// -TEST_CASE("Concurrency - Shared memory methods") -{ - PerfAnalyzerParameters params; - bool is_sequence = false; - bool is_decoupled = false; - bool use_mock_infer = true; - - const std::string json_str{R"( - { - "data": [ - { - "INPUT0": [2123456789] - } - ] - } - )"}; - - MockInputPipeline mip = TestLoadManagerBase::ProcessCustomJsonData(json_str); - - cb::MockClientStats::SharedMemoryStats expected_stats; - - SUBCASE("System shared memory usage") - { - params.shared_memory_type = SYSTEM_SHARED_MEMORY; - TestConcurrencyManager tcm( - params, is_sequence, is_decoupled, use_mock_infer); - - tcm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, tcm.factory_, mip.mock_data_loader_); - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - expected_stats.num_unregister_all_shared_memory_calls = 1; - expected_stats.num_register_system_shared_memory_calls = 1; - expected_stats.num_create_shared_memory_region_calls = 1; - expected_stats.num_map_shared_memory_calls = 1; - tcm.CheckSharedMemory(expected_stats); - } - - SUBCASE("Cuda shared memory usage") - { - params.shared_memory_type = CUDA_SHARED_MEMORY; - TestConcurrencyManager tcm( - params, is_sequence, is_decoupled, use_mock_infer); - - tcm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, tcm.factory_, mip.mock_data_loader_); - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - expected_stats.num_unregister_all_shared_memory_calls = 1; - expected_stats.num_register_cuda_shared_memory_calls = 1; - tcm.CheckSharedMemory(expected_stats); - } - - SUBCASE("No shared memory usage") - { - params.shared_memory_type = NO_SHARED_MEMORY; - TestConcurrencyManager tcm( - params, is_sequence, is_decoupled, use_mock_infer); - tcm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, tcm.factory_, mip.mock_data_loader_); - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - tcm.CheckSharedMemory(expected_stats); - } -} - -TEST_CASE("concurrency_deadlock") -{ - PerfAnalyzerParameters params{}; - params.max_concurrency = 6; - bool is_sequence_model{true}; - bool some_infer_failures{false}; - - const auto& ParameterizeSyncStreaming{[&]() { - SUBCASE("sync") - { - params.async = false; - params.streaming = false; - } - SUBCASE("aync no streaming") - { - params.async = true; - params.streaming = false; - } - SUBCASE("async streaming") - { - params.async = true; - params.streaming = true; - } - }}; - - const auto& ParameterizeConcurrency{[&]() { - SUBCASE("10 concurrency, 10 thread") - { - ParameterizeSyncStreaming(); - params.max_concurrency = 10; - params.max_threads = 10; - } - SUBCASE("10 concurrency, 4 thread") - { - ParameterizeSyncStreaming(); - params.max_concurrency = 10; - params.max_threads = 4; - } - }}; - - const auto& ParameterizeSequence{[&]() { - SUBCASE("non-sequence") - { - ParameterizeConcurrency(); - is_sequence_model = false; - } - SUBCASE("sequence") - { - ParameterizeConcurrency(); - is_sequence_model = true; - } - }}; - - const auto& ParameterizeFailures{[&]() { - SUBCASE("yes_failures") - { - some_infer_failures = true; - ParameterizeSequence(); - } - SUBCASE("no_failures") - { - some_infer_failures = false; - ParameterizeSequence(); - } - }}; - - std::vector delays; - - const auto& ParameterizeDelays{[&]() { - SUBCASE("no_delay") - { - delays = {0}; - ParameterizeFailures(); - } - SUBCASE("random_delay") - { - delays = {1, 5, 20, 4, 3}; - ParameterizeFailures(); - } - }}; - - - ParameterizeDelays(); - - - TestConcurrencyManager tcm(params, is_sequence_model); - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - tcm.stats_->SetDelays(delays); - - // Sometimes have a request fail - if (some_infer_failures) { - tcm.stats_->SetReturnStatuses({true, true, true, false}); - } - - tcm.TestTimeouts(); -} - -TEST_CASE("concurrency_overhead") -{ - PerfAnalyzerParameters params{}; - SUBCASE("sync, conc 1") - { - params.async = false; - params.max_concurrency = 1; - } - SUBCASE("sync, conc 4") - { - params.async = false; - params.max_concurrency = 4; - } - SUBCASE("async, conc 1") - { - params.async = true; - params.max_concurrency = 1; - } - SUBCASE("async, conc 1") - { - params.async = true; - params.max_concurrency = 4; - } - TestConcurrencyManager tcm(params, false); - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - tcm.TestOverhead(); -} - -TEST_CASE( - "send_request_rate_concurrency_manager: testing logic around detecting " - "send request count") -{ - PerfAnalyzerParameters params{}; - - SUBCASE("sync") - { - params.async = false; - } - SUBCASE("async") - { - params.async = true; - } - - TestConcurrencyManager tcm(params); - - tcm.stats_->SetDelays({10}); - - tcm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - tcm.ChangeConcurrencyLevel(4); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - tcm.StopWorkerThreads(); - - const size_t num_sent_requests{tcm.GetAndResetNumSentRequests()}; - - CHECK(num_sent_requests == doctest::Approx(40).epsilon(0.1)); -} - -TEST_CASE( - "reconfigure_threads" * - doctest::description( - "This test confirms the side-effects of ReconfigThreads(). Namely, " - "that the correct number of threads are created and that they are " - "configured properly")) -{ - PerfAnalyzerParameters params{}; - std::vector expected_config_values; - std::vector expected_concurrencies; - std::vector expected_seq_stat_index_offsets; - std::vector expected_num_requests; - - size_t target_concurrency = 0; - size_t target_num_requests = 0; - - SUBCASE("normal") - { - params.max_threads = 10; - target_concurrency = 5; - target_num_requests = 15; - - expected_concurrencies = {1, 1, 1, 1, 1}; - expected_seq_stat_index_offsets = {0, 1, 2, 3, 4}; - expected_num_requests = {3, 3, 3, 3, 3}; - } - SUBCASE("thread_limited") - { - params.max_threads = 5; - target_concurrency = 10; - target_num_requests = 20; - - expected_concurrencies = {2, 2, 2, 2, 2}; - expected_seq_stat_index_offsets = {0, 2, 4, 6, 8}; - expected_num_requests = {4, 4, 4, 4, 4}; - } - SUBCASE("unbalanced") - { - params.max_threads = 6; - target_concurrency = 14; - target_num_requests = 15; - - expected_concurrencies = {3, 3, 2, 2, 2, 2}; - expected_seq_stat_index_offsets = {0, 3, 6, 8, 10, 12}; - expected_num_requests = {3, 3, 3, 2, 2, 2}; - } - SUBCASE("no requests specified") - { - params.max_threads = 2; - target_concurrency = 14; - target_num_requests = 0; - - expected_concurrencies = {7, 7}; - expected_seq_stat_index_offsets = {0, 7}; - expected_num_requests = {0, 0}; - } - - for (auto i = 0; i < expected_concurrencies.size(); i++) { - ThreadConfig tc(i); - tc.concurrency_ = expected_concurrencies[i]; - tc.seq_stat_index_offset_ = expected_seq_stat_index_offsets[i]; - tc.num_requests_ = expected_num_requests[i]; - expected_config_values.push_back(tc); - } - - TestConcurrencyManager tcm(params); - tcm.TestReconfigThreads( - target_concurrency, target_num_requests, expected_config_values); -} - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_ctx_id_tracker.cc b/src/c++/perf_analyzer/test_ctx_id_tracker.cc deleted file mode 100644 index 8625fbd6d..000000000 --- a/src/c++/perf_analyzer/test_ctx_id_tracker.cc +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include -#include -#include - -#include "concurrency_ctx_id_tracker.h" -#include "doctest.h" -#include "fifo_ctx_id_tracker.h" -#include "rand_ctx_id_tracker.h" - -namespace triton { namespace perfanalyzer { - -TEST_CASE("CtxIdTrackers: FIFO") -{ - std::shared_ptr tracker = std::make_shared(); - - // Reset will load up context IDs 0-9 into the queue and return them in order - // on consecutive Get calls - size_t count = 10; - CHECK_FALSE(tracker->IsAvailable()); - tracker->Reset(count); - CHECK(tracker->IsAvailable()); - for (size_t i = 0; i < count; i++) { - CHECK(tracker->Get() == i); - } - - // Manually restoring values should be returned in-order - CHECK_FALSE(tracker->IsAvailable()); - tracker->Restore(7); - CHECK(tracker->IsAvailable()); - tracker->Restore(13); - CHECK(tracker->Get() == 7); - CHECK(tracker->Get() == 13); - - // A reset should throw away any values on the old list - tracker->Reset(10); - tracker->Reset(1); - tracker->Get(); - CHECK(!tracker->IsAvailable()); - - // Calling Get when not available should Throw - CHECK_THROWS_AS(tracker->Get(), const std::exception&); -} - -TEST_CASE("CtxIdTrackers: Conc") -{ - std::shared_ptr tracker = - std::make_shared(); - - // Reset will load up 10 instances of context IDs 0 into the queue and return - // them in order on consecutive Get calls - size_t count = 10; - tracker->Reset(count); - for (size_t i = 0; i < count; i++) { - CHECK(tracker->Get() == 0); - } - - // Manually restoring values should be returned in-order - CHECK_FALSE(tracker->IsAvailable()); - tracker->Restore(7); - tracker->Restore(13); - CHECK(tracker->IsAvailable()); - CHECK(tracker->Get() == 7); - CHECK(tracker->Get() == 13); - - // A reset should throw away any values on the old list - tracker->Reset(10); - tracker->Reset(1); - tracker->Get(); - CHECK(!tracker->IsAvailable()); - - // Calling Get when not available should Throw - CHECK_THROWS_AS(tracker->Get(), const std::exception&); -} - -TEST_CASE("CtxIdTrackers: Rand") -{ - std::shared_ptr tracker = std::make_shared(); - size_t max; - - auto check_range_and_variance = [&]() { - size_t num_trials = 1000; - - std::vector results(max, 0); - for (size_t i = 0; i < num_trials; i++) { - auto x = tracker->Get(); - REQUIRE((x < max && x >= 0)); - results[x]++; - } - - // Confirm that the distribution of the picked CTX IDs is random - double mean = - std::accumulate(results.begin(), results.end(), 0.0) / results.size(); - double variance = 0; - for (size_t i = 0; i < results.size(); i++) { - variance += std::pow(results[i] - mean, 2); - } - variance /= results.size(); - CHECK((variance > 10 && variance < 100)); - }; - - // IsAvailable is always true for this class - CHECK(tracker->IsAvailable()); - - // Reset should define the bounds of random CTX id picking - max = 10; - tracker->Reset(max); - // Restore should have no impact on this class. - tracker->Restore(9999); - check_range_and_variance(); - - - // Reset should RE-define the bounds of random CTX id picking - max = 5; - tracker->Reset(max); - check_range_and_variance(); -} - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_custom_load_manager.cc b/src/c++/perf_analyzer/test_custom_load_manager.cc deleted file mode 100644 index ced79af7d..000000000 --- a/src/c++/perf_analyzer/test_custom_load_manager.cc +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include -#include -#include - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "custom_load_manager.h" -#include "doctest.h" -#include "mock_request_rate_worker.h" -#include "request_rate_manager.h" -#include "test_load_manager_base.h" - -using nanoseconds = std::chrono::nanoseconds; -using milliseconds = std::chrono::milliseconds; - -namespace triton { namespace perfanalyzer { - -/// Class to test the CustomLoadManager -/// -class TestCustomLoadManager : public TestLoadManagerBase, - public CustomLoadManager { - public: - TestCustomLoadManager() = default; - - TestCustomLoadManager( - PerfAnalyzerParameters params, bool is_sequence_model = false, - bool is_decoupled_model = false, bool use_mock_infer = false) - : use_mock_infer_(use_mock_infer), - TestLoadManagerBase(params, is_sequence_model, is_decoupled_model), - CustomLoadManager( - params.async, params.streaming, "INTERVALS_FILE", params.batch_size, - params.measurement_window_ms, params.max_trials, params.max_threads, - params.num_of_sequences, params.shared_memory_type, - params.output_shm_size, params.serial_sequences, GetParser(), - GetFactory(), params.request_parameters) - { - InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - } - - std::shared_ptr MakeWorker( - std::shared_ptr thread_stat, - std::shared_ptr thread_config) override - { - size_t id = workers_.size(); - auto worker = std::make_shared( - id, thread_stat, thread_config, parser_, data_loader_, factory_, - on_sequence_model_, async_, max_threads_, using_json_data_, streaming_, - batch_size_, wake_signal_, wake_mutex_, execute_, start_time_, - serial_sequences_, infer_data_manager_, sequence_manager_); - - if (use_mock_infer_) { - EXPECT_CALL(*worker, Infer()) - .WillRepeatedly(testing::Invoke( - worker.get(), &MockRequestRateWorker::EmptyInfer)); - } - return worker; - } - - void TestSchedule( - std::vector intervals, PerfAnalyzerParameters params) - { - for (auto i : intervals) { - custom_intervals_.push_back(nanoseconds{i}); - } - nanoseconds measurement_window_nanoseconds{ - params.measurement_window_ms * NANOS_PER_MILLIS}; - nanoseconds max_test_duration{ - measurement_window_nanoseconds * params.max_trials}; - nanoseconds expected_current_timestamp{0}; - size_t intervals_index = 0; - - PauseWorkers(); - ConfigureThreads(); - GenerateSchedule(); - - std::vector expected_timestamps; - std::vector observed_timestamps; - - // Determine what the observed schedule was by getting each worker's - // schedule and then sorting them together - // - for (auto worker : workers_) { - nanoseconds observed_timestamp = - std::dynamic_pointer_cast(worker) - ->GetNextTimestamp(); - while (observed_timestamp <= max_test_duration) { - observed_timestamps.push_back(observed_timestamp); - observed_timestamp = - std::dynamic_pointer_cast(worker) - ->GetNextTimestamp(); - } - } - sort(observed_timestamps.begin(), observed_timestamps.end()); - - // Determine what the schedule "should" be - // - while (expected_current_timestamp < observed_timestamps.back()) { - expected_current_timestamp += custom_intervals_[intervals_index]; - expected_timestamps.push_back(expected_current_timestamp); - intervals_index = (intervals_index + 1) % custom_intervals_.size(); - } - - // Confirm that the expected and observed schedules were the same - // - REQUIRE_MESSAGE( - observed_timestamps.size() == expected_timestamps.size(), - "Mismatch in size of schedules"); - - for (size_t i = 0; i < observed_timestamps.size(); i++) { - CHECK(observed_timestamps[i] == expected_timestamps[i]); - } - } - - void TestSequences( - std::vector intervals, bool check_sequences_balanced) - { - auto sleep_time = milliseconds(20); - for (auto i : intervals) { - custom_intervals_.push_back(nanoseconds{i}); - } - - PauseWorkers(); - ConfigureThreads(); - GenerateSchedule(); - ResumeWorkers(); - std::this_thread::sleep_for(sleep_time); - if (check_sequences_balanced) { - CheckSequenceBalance(); - } - StopWorkerThreads(); - } - - std::shared_ptr& parser_{LoadManager::parser_}; - std::shared_ptr& factory_{ - TestLoadManagerBase::factory_}; - - std::string& request_intervals_file_{ - CustomLoadManager::request_intervals_file_}; - NanoIntervals& custom_intervals_{CustomLoadManager::custom_intervals_}; - - cb::Error ReadTimeIntervalsFile( - const std::string& path, NanoIntervals* contents) override - { - return cb::Error::Success; - } - - private: - bool use_mock_infer_; -}; - -TEST_CASE("custom_load_schedule") -{ - PerfAnalyzerParameters params; - params.measurement_window_ms = 1000; - params.max_trials = 10; - bool is_sequence = false; - bool is_decoupled = false; - bool use_mock_infer = true; - std::vector intervals; - - const auto& ParameterizeIntervals{[&]() { - SUBCASE("intervals A") - { - intervals = {100000000, 110000000, 130000000}; - } - SUBCASE("intervals B") - { - intervals = {150000000}; - } - SUBCASE("intervals C") - { - intervals = {100000000, 110000000, 120000000, 130000000, 140000000}; - } - }}; - - const auto& ParameterizeThreads{[&]() { - SUBCASE("threads 1") - { - ParameterizeIntervals(); - params.max_threads = 1; - } - SUBCASE("threads 2") - { - ParameterizeIntervals(); - params.max_threads = 2; - } - SUBCASE("threads 4") - { - ParameterizeIntervals(); - params.max_threads = 4; - } - SUBCASE("threads 7") - { - ParameterizeIntervals(); - params.max_threads = 7; - } - }}; - - const auto& ParameterizeTrials{[&]() { - SUBCASE("trials 3") - { - ParameterizeThreads(); - params.max_trials = 3; - } - SUBCASE("trials 10") - { - ParameterizeThreads(); - params.max_trials = 10; - } - SUBCASE("trials 20") - { - ParameterizeThreads(); - params.max_trials = 20; - } - }}; - - const auto& ParameterizeMeasurementWindow{[&]() { - SUBCASE("window 1000") - { - ParameterizeTrials(); - params.measurement_window_ms = 1000; - } - SUBCASE("window 10000") - { - ParameterizeTrials(); - params.measurement_window_ms = 10000; - } - SUBCASE("window 500") - { - ParameterizeTrials(); - params.measurement_window_ms = 500; - } - }}; - - const auto& ParameterizeSequences{[&]() { - SUBCASE("sequences off") - { - ParameterizeMeasurementWindow(); - is_sequence = false; - } - SUBCASE("3 sequences") - { - ParameterizeMeasurementWindow(); - is_sequence = true; - params.num_of_sequences = 3; - } - SUBCASE("6 sequences") - { - ParameterizeMeasurementWindow(); - is_sequence = true; - params.num_of_sequences = 6; - } - SUBCASE("9 sequences") - { - ParameterizeMeasurementWindow(); - is_sequence = true; - params.num_of_sequences = 9; - } - }}; - - ParameterizeSequences(); - TestCustomLoadManager tclm(params, is_sequence, is_decoupled, use_mock_infer); - tclm.TestSchedule(intervals, params); -} - -TEST_CASE("custom_load_sequences") -{ - PerfAnalyzerParameters params; - - // This is needed so we can confirm that all sequences are being requested - // equally when serial_sequences is on. Otherwise we would keep creating new - // sequences and wouldn't be able to track it properly. - // - params.sequence_length = 1000; - bool is_sequence_model = true; - bool check_sequences_balanced = false; - std::vector intervals; - - const auto& ParameterizeIntervals{[&]() { - SUBCASE("intervals A") - { - intervals = {100000, 110000, 130000}; - } - SUBCASE("intervals B") - { - intervals = {150000}; - } - SUBCASE("intervals C") - { - intervals = {100000, 110000, 120000, 130000, 140000}; - } - }}; - - const auto& ParameterizeSerialSequences{[&]() { - SUBCASE("serial_sequences") - { - ParameterizeIntervals(); - params.serial_sequences = true; - check_sequences_balanced = true; - } - SUBCASE("not serial_sequences") - { - ParameterizeIntervals(); - params.serial_sequences = false; - check_sequences_balanced = false; - } - }}; - - const auto& ParameterizeNumSequences{[&]() { - SUBCASE("2 sequences") - { - ParameterizeSerialSequences(); - params.num_of_sequences = 2; - } - SUBCASE("3 sequences") - { - ParameterizeSerialSequences(); - params.num_of_sequences = 3; - } - SUBCASE("5 sequences") - { - ParameterizeSerialSequences(); - params.num_of_sequences = 5; - } - SUBCASE("6 sequences") - { - ParameterizeSerialSequences(); - params.num_of_sequences = 6; - } - SUBCASE("9 sequences") - { - ParameterizeSerialSequences(); - params.num_of_sequences = 9; - } - }}; - - - const auto& ParameterizeThreads{[&]() { - SUBCASE("threads 1") - { - ParameterizeNumSequences(); - params.max_threads = 1; - } - SUBCASE("threads 2") - { - ParameterizeNumSequences(); - params.max_threads = 2; - } - SUBCASE("threads 4") - { - ParameterizeNumSequences(); - params.max_threads = 4; - } - SUBCASE("threads 7") - { - ParameterizeNumSequences(); - params.max_threads = 7; - } - }}; - - ParameterizeThreads(); - - TestCustomLoadManager tclm(params, is_sequence_model); - tclm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - tclm.TestSequences(intervals, check_sequences_balanced); -} - - -TEST_CASE("testing the GetCustomRequestRate function") -{ - TestCustomLoadManager tclm{}; - double request_rate{0.0}; - - SUBCASE("custom_intervals_ empty") - { - cb::Error result{tclm.GetCustomRequestRate(&request_rate)}; - - CHECK(result.Err() == GENERIC_ERROR); - CHECK(result.Message() == "The custom intervals vector is empty"); - } - - SUBCASE("custom_intervals_ populated") - { - tclm.custom_intervals_.push_back(nanoseconds(100000000)); - tclm.custom_intervals_.push_back(nanoseconds(110000000)); - tclm.custom_intervals_.push_back(nanoseconds(130000000)); - - cb::Error result{tclm.GetCustomRequestRate(&request_rate)}; - - CHECK(result.Err() == SUCCESS); - CHECK(request_rate == doctest::Approx(8.0)); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_dataloader.cc b/src/c++/perf_analyzer/test_dataloader.cc deleted file mode 100644 index c8db7df66..000000000 --- a/src/c++/perf_analyzer/test_dataloader.cc +++ /dev/null @@ -1,1639 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "data_loader.h" -#include "doctest.h" -#include "mock_data_loader.h" - - -namespace triton { namespace perfanalyzer { - -/// Helper class for testing the DataLoader -/// -class TestDataLoader { - public: - // Static function to create a generic ModelTensor - // - static ModelTensor CreateTensor(std::string name) - { - ModelTensor t; - t.name_ = name; - t.datatype_ = "INT32"; - t.shape_ = {1}; - t.is_shape_tensor_ = false; - t.is_optional_ = false; - return t; - } -}; - -TEST_CASE("dataloader: no data") -{ - MockDataLoader dataloader; - CHECK(dataloader.GetDataStreamsCount() == 0); - cb::Error status = dataloader.ValidateIndexes(0, 0); - CHECK(status.IsOk() == false); -} - -TEST_CASE("dataloader: ValidateIndexes") -{ - MockDataLoader dataloader; - - // Pretend we loaded 2 streams, one with 1 step, one with 3 steps - dataloader.data_stream_cnt_ = 2; - dataloader.step_num_.push_back(1); - dataloader.step_num_.push_back(3); - - CHECK_EQ(dataloader.GetDataStreamsCount(), 2); - - // Step in range for stream 0 - cb::Error status = dataloader.ValidateIndexes(0, 0); - CHECK(status.IsOk() == true); - - // Step out of range for stream 0 - status = dataloader.ValidateIndexes(0, 1); - CHECK(status.IsOk() == false); - - // Step in range for stream 1 - status = dataloader.ValidateIndexes(1, 2); - CHECK(status.IsOk() == true); - - // Step out of range for stream 1 - status = dataloader.ValidateIndexes(1, 3); - CHECK(status.IsOk() == false); - - // Stream out of range - status = dataloader.ValidateIndexes(2, 0); - CHECK(status.IsOk() == false); -} - -TEST_CASE("dataloader: GetTotalSteps") -{ - MockDataLoader dataloader; - - // Pretend we loaded 2 streams, one with 1 step, one with 3 steps - dataloader.data_stream_cnt_ = 2; - dataloader.step_num_.push_back(1); - dataloader.step_num_.push_back(3); - - CHECK_EQ(dataloader.GetTotalSteps(0), 1); - CHECK_EQ(dataloader.GetTotalSteps(1), 3); - - // It will return 0 if out of range - CHECK_EQ(dataloader.GetTotalSteps(2), 0); -} - -TEST_CASE("dataloader: ValidateIOExistsInModel") -{ - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - SUBCASE("Directory does not exist") - { - std::string data_directory = "non_existent_directory"; - cb::Error status = - dataloader.ValidateIOExistsInModel(inputs, outputs, data_directory); - CHECK( - status.Message() == - "Error: Directory does not exist or is not a directory: " - "non_existent_directory"); - CHECK(status.Err() == pa::GENERIC_ERROR); - } - - SUBCASE("Directory is not a directory") - { - std::string data_directory = "tmp/test.txt"; - std::ofstream file(data_directory); - cb::Error status = - dataloader.ValidateIOExistsInModel(inputs, outputs, data_directory); - CHECK( - status.Message() == - "Error: Directory does not exist or is not a directory: tmp/test.txt"); - CHECK(status.Err() == pa::GENERIC_ERROR); - std::remove(data_directory.c_str()); - } - - SUBCASE("Valid directory but no corresponding files") - { - std::string data_directory = "valid_directory"; - std::filesystem::create_directory(data_directory); - std::ofstream(data_directory + "/invalid_file").close(); - cb::Error status = - dataloader.ValidateIOExistsInModel(inputs, outputs, data_directory); - std::filesystem::remove_all(data_directory); - CHECK( - status.Message() == - "Provided data file 'invalid_file' does not correspond to a valid " - "model input or output."); - CHECK(status.Err() == pa::GENERIC_ERROR); - } - - SUBCASE("Valid directory with corresponding files") - { - std::string data_directory = "valid_directory"; - std::filesystem::create_directory(data_directory); - std::ofstream(data_directory + "/INPUT1").close(); - std::ofstream(data_directory + "/OUTPUT1").close(); - cb::Error status = - dataloader.ValidateIOExistsInModel(inputs, outputs, data_directory); - std::filesystem::remove_all(data_directory); - CHECK(status.Message().empty()); - CHECK(status.IsOk()); - } - - SUBCASE("Valid directory with multiple input and output tensors") - { - ModelTensor input2 = TestDataLoader::CreateTensor("INPUT2"); - ModelTensor output2 = TestDataLoader::CreateTensor("OUTPUT2"); - - inputs->insert(std::make_pair(input2.name_, input2)); - outputs->insert(std::make_pair(output2.name_, output2)); - - std::string data_directory = "valid_directory_multiple"; - std::filesystem::create_directory(data_directory); - std::ofstream(data_directory + "/INPUT1").close(); - std::ofstream(data_directory + "/INPUT2").close(); - std::ofstream(data_directory + "/OUTPUT1").close(); - std::ofstream(data_directory + "/OUTPUT2").close(); - - cb::Error status = - dataloader.ValidateIOExistsInModel(inputs, outputs, data_directory); - std::filesystem::remove_all(data_directory); - CHECK(status.Message().empty()); - CHECK(status.IsOk()); - } -} - -TEST_CASE("dataloader: ReadDataFromJSON") -{ - DataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - SUBCASE("File does not exist") - { - std::string json_file = "non_existent_file.json"; - cb::Error status = dataloader.ReadDataFromJSON(inputs, outputs, json_file); - CHECK(status.Message() == "failed to open file for reading provided data"); - CHECK(status.Err() == pa::GENERIC_ERROR); - } - - SUBCASE("Valid JSON file") - { - std::string json_file = "valid_file.json"; - std::ofstream out(json_file); - out << R"({ - "data": [ - { "INPUT1": [1] }, - { "INPUT1": [2] }, - { "INPUT1": [3] } - ], - "validation_data": [ - { "OUTPUT1": [4] }, - { "OUTPUT1": [5] }, - { "OUTPUT1": [6] } - ]})"; - out.close(); - - cb::Error status = dataloader.ReadDataFromJSON(inputs, outputs, json_file); - std::filesystem::remove(json_file); - CHECK(status.Message().empty()); - CHECK(status.IsOk()); - } - - SUBCASE("Invalid JSON file") - { - std::string json_file = "invalid_file.json"; - std::ofstream out(json_file); - out << R"({invalid_json: 1,)"; - out.close(); - - cb::Error status = dataloader.ReadDataFromJSON(inputs, outputs, json_file); - std::filesystem::remove(json_file); - - CHECK( - status.Message() == - "failed to parse the specified json file for reading provided data"); - CHECK(status.Err() == pa::GENERIC_ERROR); - } - - SUBCASE("Multiple input and output tensors") - { - ModelTensor input2 = TestDataLoader::CreateTensor("INPUT2"); - ModelTensor output2 = TestDataLoader::CreateTensor("OUTPUT2"); - - inputs->insert(std::make_pair(input2.name_, input2)); - outputs->insert(std::make_pair(output2.name_, output2)); - - std::string json_file = "valid_file_multiple_input_output.json"; - std::ofstream out(json_file); - out << R"({ - "data": [ - { - "INPUT1": [1], - "INPUT2": [4] - }, - { - "INPUT1": [2], - "INPUT2": [5] - }, - { - "INPUT1": [3], - "INPUT2": [6] - } - ], - "validation_data": [ - { - "OUTPUT1": [4], - "OUTPUT2": [7] - }, - { - "OUTPUT1": [5], - "OUTPUT2": [8] - }, - { - "OUTPUT1": [6], - "OUTPUT2": [9] - } - ] - })"; - out.close(); - - cb::Error status = dataloader.ReadDataFromJSON(inputs, outputs, json_file); - std::filesystem::remove(json_file); - CHECK(status.Message().empty()); - CHECK(status.IsOk()); - } -} - -TEST_CASE("dataloader: GetInputData missing data") -{ - MockDataLoader dataloader; - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - - TensorData data; - - cb::Error status = dataloader.GetInputData(input1, 0, 0, data); - REQUIRE(status.IsOk() == false); - CHECK_EQ(status.Message(), "unable to find data for input 'INPUT1'."); -} - -TEST_CASE("dataloader: ParseData: Bad Json") -{ - std::string json_str{"bad json text"}; - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - CHECK(status.IsOk() == false); - CHECK_EQ( - status.Message(), - "failed to parse the specified json file for reading provided data"); -} - -TEST_CASE("dataloader: ParseData: Misc error cases") -{ - std::string expected_message; - std::string json_str; - - SUBCASE("No data") - { - json_str = R"({ "notdata" : 5})"; - expected_message = "The json file doesn't contain data field"; - } - SUBCASE("Not string b64") - { - json_str = R"({"data": [{ "INPUT1": {"b64": 5} }]})"; - expected_message = - "the value of b64 field should be of type string ( Location stream id: " - "0, step id: 0)"; - } - SUBCASE("Not b64 or array") - { - json_str = R"({"data": [{ "INPUT1": {"not_b64": "AAAAAQ=="} }]})"; - expected_message = - "missing content field. ( Location stream id: 0, step id: 0)"; - } - SUBCASE("Malformed input (boolean type)") - { - json_str = R"({"data": [{ "INPUT1": null }]})"; - expected_message = "Input data file is malformed."; - } - SUBCASE("Inconsistent elements in data array") - { - json_str = R"({"data": [ - [{ "INPUT1": [2] },{ "INPUT1": [3] }], - { "INPUT1": [1] } - ]})"; - expected_message = - "Inconsistency in input-data provided. Can not have a combination of " - "objects and arrays inside of the Data array"; - } - SUBCASE("Not integer shape") - { - json_str = R"({"data": [{ - "INPUT1": { "shape": ["a"], "content": [1,2,3,4,5,6] } - }]})"; - expected_message = "shape values must be integers."; - } - SUBCASE("Content not array") - { - json_str = R"({"data": [{ - "INPUT1": { "content": 6 } - }]})"; - expected_message = - "The tensor values are not supported. Expected an array or b64 string " - "( Location stream id: 0, step id: 0)"; - } - SUBCASE("Missing non-optional input") - { - json_str = R"({"data": [{ - "NOT_INPUT1": { "content": 6 } - }]})"; - expected_message = - "missing tensor INPUT1 ( Location stream id: 0, step id: 0)"; - } - SUBCASE("Invalid input") - { - json_str = R"({"data": - [{ - "INPUT1": [2], - "INVALID_INPUT": [2] - }] - })"; - expected_message = - "The input or output 'INVALID_INPUT' is not found in the model " - "configuration"; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - CHECK(status.IsOk() == false); - CHECK_EQ(status.Message(), expected_message); -} - -TEST_CASE( - "dataloader: ParseData: Mismatching Shapes" * - doctest::description( - "When the shape is provided and it is incompatible with the actual " - "model shape, then an error should be thrown")) -{ - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - - std::string expected_message; - std::string json_str; - - SUBCASE("Mismatching fixed shape") - { - input1.shape_ = {3}; - expected_message = - "The supplied shape of [1] for input \"INPUT1\" is incompatible with " - "the " - "model's input shape of [3]"; - - SUBCASE("content json") - { - json_str = - R"({"data": [{ "INPUT1": { "shape": [1], "content": [1] } }]})"; - } - SUBCASE("b64 json") - { - json_str = - R"({"data": [{ "INPUT1": { "shape": [1], "b64": "AAAAAQ=="} }]})"; - } - } - SUBCASE("Mismatching dynamic dimensions") - { - input1.shape_ = {-1}; - expected_message = - "The supplied shape of [1,1] for input \"INPUT1\" is incompatible with " - "the model's input shape of [-1]"; - - SUBCASE("content json") - { - json_str = - R"({"data": [{ "INPUT1": { "shape": [1,1], "content": [1] } }]})"; - } - SUBCASE("b64 json") - { - json_str = - R"({"data": [{ "INPUT1": { "shape": [1,1], "b64": "AAAAAQ=="} }]})"; - } - } - SUBCASE("Mismatching multiple dimensions") - { - input1.shape_ = {-1, 2}; - expected_message = - "The supplied shape of [1,1] for input \"INPUT1\" is incompatible with " - "the model's input shape of [-1,2]"; - - SUBCASE("content json") - { - json_str = - R"({"data": [{ "INPUT1": { "shape": [1,1], "content": [1] } }]})"; - } - SUBCASE("b64 json") - { - json_str = - R"({"data": [{ "INPUT1": { "shape": [1,1], "b64": "AAAAAQ=="} }]})"; - } - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - inputs->insert(std::make_pair(input1.name_, input1)); - - std::shared_ptr outputs = std::make_shared(); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - REQUIRE(status.IsOk() == false); - CHECK_EQ(status.Message(), expected_message); -} - - -TEST_CASE( - "dataloader: ParseData: Mismatch Input Data and Fixed Shape" * - doctest::description( - "When the size of the provided Input is not in line with the Tensor's " - "shape, then an error should be thrown")) -{ - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.shape_ = {3}; - - std::string expected_message; - std::string json_str; - - SUBCASE("Normal json") - { - json_str = R"({"data": [{ "INPUT1": [1,2] }]})"; - expected_message = - "mismatch in the data provided for INPUT1. Expected: 12 bytes, Got: 8 " - "bytes"; - } - SUBCASE("content json") - { - json_str = R"({"data": [{ "INPUT1": { "content": [1,2] } }]})"; - expected_message = - "mismatch in the data provided for INPUT1. Expected: 12 bytes, Got: 8 " - "bytes"; - } - SUBCASE("b64 json") - { - json_str = R"({"data": [{ "INPUT1": {"b64": "AAAAAQ=="} }]})"; - expected_message = - "mismatch in the data provided for INPUT1. Expected: 12 bytes, Got: 4 " - "bytes"; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - inputs->insert(std::make_pair(input1.name_, input1)); - - std::shared_ptr outputs = std::make_shared(); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - REQUIRE(status.IsOk() == false); - CHECK_EQ(status.Message(), expected_message); -} - -TEST_CASE( - "dataloader: ParseData: Mismatch Input Data and Dynamic Shape" * - doctest::description( - "When the size of the provided Input is not in line with the Tensor's " - "shape, then an error should be thrown")) -{ - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.shape_ = {-1}; - - std::string expected_message; - std::string json_str; - - SUBCASE("content json") - { - json_str = - R"({"data": [{ "INPUT1": { "shape": [3], "content": [1,2] } }]})"; - expected_message = - "mismatch in the data provided for INPUT1. Expected: 12 bytes, Got: 8 " - "bytes"; - } - SUBCASE("b64 json") - { - json_str = R"({"data": [{ "INPUT1": {"shape": [3], "b64": "AAAAAQ=="} }]})"; - expected_message = - "mismatch in the data provided for INPUT1. Expected: 12 bytes, Got: 4 " - "bytes"; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - inputs->insert(std::make_pair(input1.name_, input1)); - - std::shared_ptr outputs = std::make_shared(); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - REQUIRE(status.IsOk() == false); - CHECK_EQ(status.Message(), expected_message); -} - -TEST_CASE( - "dataloader: ParseData: Mismatch Input and Output" * - doctest::description( - "When the size of the provided Input and validation Output data are " - "different, then an error should be thrown")) -{ - std::string json_str; - - SUBCASE("Normal json") - { - json_str = R"({ - "data": [ - { "INPUT1": [1] }, - { "INPUT1": [2] }, - { "INPUT1": [3] } - ], - "validation_data": [ - { "OUTPUT1": [7] } - ]})"; - } - SUBCASE("content json") - { - json_str = R"({ - "data": [ - { "INPUT1": { "content": [1] } }, - { "INPUT1": { "content": [2] } }, - { "INPUT1": { "content": [3] } } - ], - "validation_data": [ - { "OUTPUT1": { "content": [7] } } - ]})"; - } - SUBCASE("b64 json") - { - json_str = R"({ - "data": [ - { "INPUT1": {"b64": "AAAAAQ=="} }, - { "INPUT1": {"b64": "AgAAAA=="} }, - { "INPUT1": {"b64": "AwAAAA=="} } - ], - "validation_data": [ - { "OUTPUT1": {"b64": "BAAAAA=="} } - ]})"; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - CHECK(status.IsOk() == false); - CHECK_EQ( - status.Message(), - "The 'validation_data' field doesn't align with 'data' field in the json " - "file"); -} - -TEST_CASE("dataloader: ParseData: Valid Data") -{ - std::string json_str; - - SUBCASE("Normal json") - { - json_str = R"({ - "data": [ - { "INPUT1": [1] }, - { "INPUT1": [2] }, - { "INPUT1": [3] } - ], - "validation_data": [ - { "OUTPUT1": [4] }, - { "OUTPUT1": [5] }, - { "OUTPUT1": [6] } - ]})"; - } - SUBCASE("Content json") - { - json_str = R"({ - "data": [ - { "INPUT1": { "content": [1] } }, - { "INPUT1": { "content": [2] } }, - { "INPUT1": { "content": [3] } } - ], - "validation_data": [ - { "OUTPUT1": { "content": [4] } }, - { "OUTPUT1": { "content": [5] } }, - { "OUTPUT1": { "content": [6] } } - ]})"; - } - SUBCASE("b64 json") - { - // Note that these encoded values decode to the numbers 1,2,3,4,5,6, which - // is the same data as the normal json case above - json_str = R"({ - "data": [ - { "INPUT1": {"b64": "AAAAAQ=="} }, - { "INPUT1": {"b64": "AgAAAA=="} }, - { "INPUT1": {"b64": "AwAAAA=="} } - ], - "validation_data": [ - { "OUTPUT1": {"b64": "BAAAAA=="} }, - { "OUTPUT1": {"b64": "BQAAAA=="} }, - { "OUTPUT1": {"b64": "BgAAAA=="} } - ]})"; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 1); - CHECK_EQ(dataloader.GetTotalSteps(0), 3); - - // Confirm the correct data is in the dataloader - // - TensorData data; - std::vector shape; - - dataloader.GetInputShape(input1, 0, 1, &shape); - CHECK_EQ(shape.size(), 1); - CHECK_EQ(shape[0], 1); - - status = dataloader.GetInputData(input1, 0, 1, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - auto input_data = *reinterpret_cast(data.data_ptr); - CHECK_EQ(input_data, 2); - CHECK_EQ(data.batch1_size, 4); - - status = dataloader.GetOutputData("OUTPUT1", 0, 2, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - auto output_data = *reinterpret_cast(data.data_ptr); - CHECK_EQ(output_data, 6); - CHECK_EQ(data.batch1_size, 4); -} - -TEST_CASE("dataloader: ParseData: Multiple Streams Invalid Cases") -{ - // Mismatch because one stream with wrong number of steps - std::string mismatch_case1a{R"({ - "data": [ { "INPUT1": [1,2] } ], - "validation_data": [ { "OUTPUT1": [4] }, { "OUTPUT1": [5] } ] - })"}; - std::string mismatch_case1b{R"({ - "data": [ { "INPUT1": [1,2] }, { "INPUT1": [2,3] } ], - "validation_data": [ { "OUTPUT1": [4] } ] - })"}; - - // Mismatch because wrong number of streams (3 output streams for 2 input - // streams) - std::string mismatch_case2{R"({ - "data": [ - [ { "INPUT1": [1,2] }, { "INPUT1": [2,3] } ], - [ { "INPUT1": [10,11] } ] - ], - "validation_data": [ - [ { "OUTPUT1": [4] }, { "OUTPUT1": [5] } ], - [ { "OUTPUT1": [40] } ], - [ { "OUTPUT1": [60] } ] - ]})"}; - - // Mismatch because same number of streams but wrong number of steps - std::string mismatch_case3a{R"({ - "data": [ - [ { "INPUT1": [1,2] } ], - [ { "INPUT1": [10,11] } ] - ], - "validation_data": [ - [ { "OUTPUT1": [4] }, { "OUTPUT1": [5] } ], - [ { "OUTPUT1": [40] } ] - ]})"}; - std::string mismatch_case3b{R"({ - "data": [ - [ { "INPUT1": [1,2] } ], - [ { "INPUT1": [10,11] } ] - ], - "validation_data": [ - [ { "OUTPUT1": [4] } ], - [ { "OUTPUT1": [40] }, { "OUTPUT1": [50] } ] - ]})"}; - - auto test_lambda = [&](std::string json_data) { - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = - std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - input1.shape_ = {2}; - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - MockDataLoader dataloader; - cb::Error status = dataloader.ReadDataFromStr(json_data, inputs, outputs); - CHECK(status.IsOk() == false); - CHECK_EQ( - status.Message(), - "The 'validation_data' field doesn't align with 'data' field in the " - "json file"); - }; - - test_lambda(mismatch_case1a); - test_lambda(mismatch_case1b); - test_lambda(mismatch_case2); - test_lambda(mismatch_case3a); - test_lambda(mismatch_case3b); -} - -TEST_CASE("dataloader: ParseData: Multiple Streams Valid") -{ - std::string json_str{R"({ - "data": [ - [ { "INPUT1": [1,2] }, { "INPUT1": [2,3] }], - [ { "INPUT1": [10,11] } ] - ], - "validation_data": [ - [ { "OUTPUT1": [4] }, { "OUTPUT1": [5] } ], - [ { "OUTPUT1": [40] } ] - ] - })"}; - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - input1.shape_ = {2}; - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 2); - CHECK_EQ(dataloader.GetTotalSteps(0), 2); - CHECK_EQ(dataloader.GetTotalSteps(1), 1); - - // Confirm the correct data is in the dataloader - // - TensorData data; - - status = dataloader.GetInputData(input1, 0, 1, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - - const int32_t* input_data = reinterpret_cast(data.data_ptr); - CHECK(data.is_valid); - CHECK_EQ(input_data[0], 2); - CHECK_EQ(input_data[1], 3); - // 2 elements of int32 data is 8 bytes - CHECK_EQ(data.batch1_size, 8); - - status = dataloader.GetOutputData("OUTPUT1", 1, 0, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - const int32_t* output_data = reinterpret_cast(data.data_ptr); - CHECK_EQ(output_data[0], 40); - CHECK_EQ(data.batch1_size, 4); -} - -TEST_CASE( - "dataloader: ParseData: Missing Shape" * - doctest::description( - "When a tensor's shape is dynamic (-1), then it needs to be provided " - "via --shape option (which is not visible to this testing), or via a " - "shape option in the json. If not, an error is thrown")) -{ - std::string json_str{R"({"data": [{ "INPUT1": [1,2,3] } ]})"}; - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.shape_ = {-1}; - - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - CHECK_EQ(status.IsOk(), false); - CHECK_EQ( - status.Message(), - "The variable-sized tensor \"INPUT1\" with model shape [-1] needs to " - "have its shape fully defined. See the --shape option."); -} - - -TEST_CASE( - "dataloader: ParseData: Supplied Shape is valid" * - doctest::description("Supply the dynamic shape for an input")) -{ - std::string json_str; - - SUBCASE("Normal json") - { - json_str = R"({"data": [{ - "INPUT1": { "shape": [3,2], "content": [1,2,3,4,5,6] } - }]})"; - } - SUBCASE("b64 json") - { - // This b64 encoding is the same as the unencoded case of [1,2,3,4,5,6] - json_str = R"({"data": [{ - "INPUT1": { "shape": [3,2], "b64": "AAAAAQAAAAIAAAADAAAABAAAAAUAAAAG" } - }]})"; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.shape_ = {-1, -1}; - - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - REQUIRE(status.IsOk()); - - std::vector shape; - dataloader.GetInputShape(input1, 0, 0, &shape); - CHECK_EQ(shape.size(), 2); - CHECK_EQ(shape[0], 3); - CHECK_EQ(shape[1], 2); -} - - -TEST_CASE( - "dataloader: ParseData: Supplied Shape is zero" * - doctest::description( - "Zero is a legal shape value and should be handled correctly. " - "GetInputData differentiates between an empty valid result and an " - "invalid result via the is_valid bit in the returned struct")) -{ - std::string json_str{R"({"data": [{ - "INPUT1": { "shape": [0,2], "content": [] } - }]})"}; - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.shape_ = {-1, 2}; - - ModelTensor input2 = TestDataLoader::CreateTensor("INPUT2"); - input2.is_optional_ = true; - - inputs->insert(std::make_pair(input1.name_, input1)); - inputs->insert(std::make_pair(input2.name_, input2)); - - cb::Error status = dataloader.ReadDataFromStr(json_str, inputs, outputs); - REQUIRE(status.IsOk()); - - std::vector shape; - dataloader.GetInputShape(input1, 0, 0, &shape); - CHECK_EQ(shape.size(), 2); - CHECK_EQ(shape[0], 0); - CHECK_EQ(shape[1], 2); - - // Confirm that the zero-shape input IS valid, but with size=0 and ptr=null - TensorData data; - status = dataloader.GetInputData(input1, 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - CHECK(data.data_ptr == nullptr); - CHECK(data.batch1_size == 0); - - // Confirm that the unspecified input is NOT valid - status = dataloader.GetInputData(input2, 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(!data.is_valid); - CHECK(data.data_ptr == nullptr); - CHECK(data.batch1_size == 0); -} - - -TEST_CASE( - "dataloader: ParseData: Multiple Calls simple" * - doctest::description( - "ParseData can be called multiple times (due to " - "multiple input-data files). The data should " - "accumulate in stream 0 when input data has no nested arrays")) -{ - std::string json_str1{R"({"data": [{ "INPUT1": [1] }]})"}; - std::string json_str2{R"({"data": [{ "INPUT1": [2] },{ "INPUT1": [22]}]})"}; - std::string json_str3{ - R"({"data": [{ "INPUT1": [3] }], "validation_data": [{ "OUTPUT1": [30] }]})"}; - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str1, inputs, outputs); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 1); - CHECK_EQ(dataloader.GetTotalSteps(0), 1); - - status = dataloader.ReadDataFromStr(json_str2, inputs, outputs); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 1); - CHECK_EQ(dataloader.GetTotalSteps(0), 3); - - status = dataloader.ReadDataFromStr(json_str3, inputs, outputs); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 1); - CHECK_EQ(dataloader.GetTotalSteps(0), 4); - - // Confirm the correct data is in the dataloader - // - TensorData data; - - status = dataloader.GetInputData(input1, 0, 3, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - - const int32_t* input_data = reinterpret_cast(data.data_ptr); - CHECK_EQ(input_data[0], 3); - CHECK_EQ(data.batch1_size, 4); - - // Confirm that only one of the 4 steps has output data - // - status = dataloader.GetOutputData("OUTPUT1", 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(!data.is_valid); - status = dataloader.GetOutputData("OUTPUT1", 0, 1, data); - REQUIRE(status.IsOk()); - CHECK(!data.is_valid); - status = dataloader.GetOutputData("OUTPUT1", 0, 2, data); - REQUIRE(status.IsOk()); - CHECK(!data.is_valid); - status = dataloader.GetOutputData("OUTPUT1", 0, 3, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - CHECK(data.data_ptr != nullptr); - CHECK(data.batch1_size == 4); -} - -TEST_CASE( - "dataloader: ParseData: Multiple Calls array" * - doctest::description( - "ParseData can be called multiple times (due to " - "multiple input-data files). The data should " - "accumulate as multiple streams when input data has nested arrays")) -{ - std::string json_str1{R"({"data": [[{ "INPUT1": [1] }]]})"}; - std::string json_str2{ - R"({"data": [[{ "INPUT1": [2] },{ "INPUT1": [20] }]]})"}; - std::string json_str3{ - R"({"data": [[{ "INPUT1": [3] }]], "validation_data": [[{ "OUTPUT1": [30] }]]})"}; - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str1, inputs, outputs); - REQUIRE(status.IsOk()); - status = dataloader.ReadDataFromStr(json_str2, inputs, outputs); - REQUIRE(status.IsOk()); - status = dataloader.ReadDataFromStr(json_str3, inputs, outputs); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 3); - CHECK_EQ(dataloader.GetTotalSteps(0), 1); - CHECK_EQ(dataloader.GetTotalSteps(1), 2); - CHECK_EQ(dataloader.GetTotalSteps(2), 1); - - // Confirm the correct data is in the dataloader - // - TensorData data; - - status = dataloader.GetInputData(input1, 1, 1, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - - const int32_t* input_data = reinterpret_cast(data.data_ptr); - CHECK_EQ(input_data[0], 20); - CHECK_EQ(data.batch1_size, 4); - - // Confirm that only one of the 3 streams has output data - // - status = dataloader.GetOutputData("OUTPUT1", 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(!data.is_valid); - status = dataloader.GetOutputData("OUTPUT1", 1, 0, data); - REQUIRE(status.IsOk()); - CHECK(!data.is_valid); - status = dataloader.GetOutputData("OUTPUT1", 2, 0, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - CHECK(data.data_ptr != nullptr); - CHECK(data.batch1_size == 4); -} - -TEST_CASE( - "dataloader: ParseData: Multiple Calls mixed" * - doctest::description( - "ParseData can be called multiple times (due to " - "multiple input-data files). An error should be thrown if there is a " - "mixture of nested vs no-nested arrays in the input data")) -{ - std::string json_str_not_nested{R"({"data": [{ "INPUT1": [2] }]})"}; - std::string json_str_nested{R"({"data": [[{ "INPUT1": [1] }]]})"}; - std::string json_str1, json_str2; - - SUBCASE("Nested then not-nested") - { - json_str1 = json_str_nested; - json_str2 = json_str_not_nested; - } - SUBCASE("Not-nested then nested") - { - json_str1 = json_str_not_nested; - json_str2 = json_str_nested; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = dataloader.ReadDataFromStr(json_str1, inputs, outputs); - REQUIRE(status.IsOk()); - status = dataloader.ReadDataFromStr(json_str2, inputs, outputs); - REQUIRE(!status.IsOk()); - CHECK( - status.Message() == - "Inconsistency in input-data provided. Can not have a combination of " - "objects and arrays inside of the Data array"); -} - -TEST_CASE( - "dataloader: GenerateData: Is Shape Tensor" * - doctest::description("It is illegal to generate data for any Tensor with " - "is_shape_tensor=True")) -{ - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.is_shape_tensor_ = true; - inputs->insert(std::make_pair(input1.name_, input1)); - - bool zero_input = true; - size_t string_length = 5; - std::string string_data = "FOOBAR"; - cb::Error status = - dataloader.GenerateData(inputs, zero_input, string_length, string_data); - CHECK(status.IsOk() == false); - CHECK_EQ( - status.Message(), - "can not generate data for shape tensor 'INPUT1', user-provided data is " - "needed."); -} - - -TEST_CASE( - "dataloader: GenerateData: Non-BYTES" * - doctest::description( - "Calling GenerateData for non-BYTES datatype should result in a single " - "stream with one step. If the zero input flag is set, all of that data " - "will be 0. Else it will be random")) -{ - bool zero_input; - size_t string_length = 5; - std::string string_data = "FOOBAR"; - - SUBCASE("zero_input true") - { - zero_input = true; - } - SUBCASE("zero_input false") - { - zero_input = false; - } - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.shape_ = {3}; - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = - dataloader.GenerateData(inputs, zero_input, string_length, string_data); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 1); - CHECK_EQ(dataloader.GetTotalSteps(0), 1); - - TensorData data; - - status = dataloader.GetInputData(input1, 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - const int32_t* input_data = reinterpret_cast(data.data_ptr); - if (zero_input) { - CHECK_EQ(input_data[0], 0); - CHECK_EQ(input_data[1], 0); - CHECK_EQ(input_data[2], 0); - } else { - CHECK_NE(input_data[0], 0); - CHECK_NE(input_data[1], 0); - CHECK_NE(input_data[2], 0); - } - // 3 elements of int32 data is 12 bytes - CHECK_EQ(data.batch1_size, 12); -} - -TEST_CASE( - "dataloader: GenerateData: BYTES" * - doctest::description( - "Calling GenerateData for BYTES datatype should result in a single " - "stream with one step. The zero-input flag is ignored. If string_data " - "is not null, it will be used. Else it will be a random string of " - "length string_length")) -{ - bool zero_input = false; - size_t string_length = 5; - std::string string_data; - - SUBCASE("valid string_data") - { - string_data = "FOOBAR"; - } - SUBCASE("empty string_data") - { - string_data = ""; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.datatype_ = "BYTES"; - input1.shape_ = {3}; - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = - dataloader.GenerateData(inputs, zero_input, string_length, string_data); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 1); - CHECK_EQ(dataloader.GetTotalSteps(0), 1); - - TensorData data; - - status = dataloader.GetInputData(input1, 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - - // For string data, the result should be a 32-bit number indicating the data - // length, and then 1 byte per letter - // - // For "FOOBAR", the length would be 10 bytes: - // 4 bytes to indicate the string length (the number 6) - // 1 byte for each letter - // - // For empty string, the string length would instead be the value in - // string_length (5 in this case), and the characters would be random for - // each entry in the batch. Thus, the data length would be 9 bytes - // - // For a shape of [3], this data would be repeated 3 times - - if (string_data.empty()) { - // 3 elements of 9 bytes is 27 - CHECK_EQ(data.batch1_size, 27); - - const char* char_data = reinterpret_cast(data.data_ptr); - - // Check all 3 entries in the "batch" of shape [3] - for (size_t i = 0; i < 3; i++) { - size_t start_index = 9 * i; - - // The first 4 bytes are an int32 indicating the number of characters - const int32_t* int32_data = - reinterpret_cast(&char_data[start_index]); - CHECK_EQ(int32_data[0], 5); - - // All of the characters should be in the specified character_set - for (size_t j = start_index + 4; j < start_index + 9; j++) { - CHECK_NE(character_set.find(char_data[j]), std::string::npos); - } - } - - } else { - // 3 elements of 10 bytes is 30 - CHECK_EQ(data.batch1_size, 30); - - const int32_t* int32_data = reinterpret_cast(data.data_ptr); - const char* char_data = reinterpret_cast(data.data_ptr); - CHECK_EQ(int32_data[0], 6); - CHECK_EQ(char_data[4], 'F'); - CHECK_EQ(char_data[5], 'O'); - CHECK_EQ(char_data[6], 'O'); - CHECK_EQ(char_data[7], 'B'); - CHECK_EQ(char_data[8], 'A'); - CHECK_EQ(char_data[9], 'R'); - - // The data would repeat two more times for shape of [3] - for (size_t i = 10; i < 30; i++) { - CHECK_EQ(char_data[i - 10], char_data[i]); - } - } -} - -TEST_CASE("dataloader: GenerateData: Dynamic shape") -{ - bool zero_input = false; - size_t string_length = 5; - std::string string_data; - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - input1.shape_ = {-1}; - - std::string expected_message = - "input INPUT1 contains dynamic shape, provide shapes to send along with " - "the request"; - - SUBCASE("BYTES") - { - input1.datatype_ = "BYTES"; - } - SUBCASE("non-BYTES") - { - input1.datatype_ = "INT32"; - } - - MockDataLoader dataloader; - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = - dataloader.GenerateData(inputs, zero_input, string_length, string_data); - REQUIRE(status.IsOk() == false); - CHECK_EQ(status.Message(), expected_message); -} - -TEST_CASE( - "dataloader: ReadDataFromDir: Error reading input file" * - doctest::description( - "When there is an error reading an input data file, the error should " - "bubble up to the return value of ReadDataFromDir")) -{ - MockDataLoader dataloader; - - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - - std::string dir{"fake/path"}; - - SUBCASE("BYTES (string) data") - { - input1.datatype_ = "BYTES"; - } - SUBCASE("Raw Binary data") - { - input1.datatype_ = "INT32"; - } - - inputs->insert(std::make_pair(input1.name_, input1)); - cb::Error status = dataloader.ReadDataFromDir(inputs, outputs, dir); - CHECK(status.IsOk() == false); -} - -TEST_CASE( - "dataloader: ReadDataFromDir: Error reading output file" * - doctest::description( - "When there is an error reading an output data file, an error is NOT " - "raised from ReadDataFromDir, and instead GetOutputData will return " - "nullptr with a batch1_size of 0")) -{ - MockDataLoader dataloader; - - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - - std::string dir{"fake/path"}; - - SUBCASE("BYTES (string) data") - { - output1.datatype_ = "BYTES"; - } - SUBCASE("Raw Binary data") - { - output1.datatype_ = "INT32"; - } - - outputs->insert(std::make_pair(output1.name_, output1)); - cb::Error status = dataloader.ReadDataFromDir(inputs, outputs, dir); - CHECK(status.IsOk() == true); - - TensorData data; - - dataloader.GetOutputData("OUTPUT1", 0, 0, data); - CHECK(!data.is_valid); - CHECK(data.data_ptr == nullptr); - CHECK(data.batch1_size == 0); -} - -TEST_CASE( - "dataloader: ReadDataFromDir: Mismatching Input Data" * - doctest::description("Successfully reading input files but having a " - "mismatch will result in an error being thrown")) -{ - MockDataLoader dataloader; - - std::string datatype; - std::string expected_error_message; - - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - - std::string dir{"mocked_out"}; - - SUBCASE("BYTES (string) data") - { - datatype = "BYTES"; - std::vector string_data; - - SUBCASE("Dynamic shape") - { - input1.shape_ = {-1}; - expected_error_message = - "input INPUT1 contains dynamic shape, provide shapes to send along " - "with the request"; - } - SUBCASE("Supplied shape") - { - input1.shape_ = {1}; - string_data = {"InStr", "ExtraStr"}; - - expected_error_message = - "provided data for input INPUT1 has 2 elements, expect 1"; - } - - EXPECT_CALL(dataloader, ReadTextFile(testing::_, testing::_)) - .WillOnce(testing::DoAll( - testing::SetArgPointee<1>(string_data), - testing::Return(cb::Error::Success))); - } - SUBCASE("Raw Binary data") - { - datatype = "INT32"; - std::vector char_data; - - SUBCASE("Dynamic shape") - { - input1.shape_ = {-1}; - expected_error_message = - "input INPUT1 contains dynamic shape, provide shapes to send along " - "with the request"; - } - SUBCASE("Supplied shape") - { - // An INT32 of shape {1} will be 4 bytes. However, we are supplying 5 - // bytes via char_data. - input1.shape_ = {1}; - char_data = {'0', '0', '0', '7', '5'}; - expected_error_message = - "provided data for input INPUT1 has byte size 5, expect 4"; - } - - EXPECT_CALL(dataloader, ReadFile(testing::_, testing::_)) - .WillOnce(testing::DoAll( - testing::SetArgPointee<1>(char_data), - testing::Return(cb::Error::Success))); - } - - input1.datatype_ = datatype; - inputs->insert(std::make_pair(input1.name_, input1)); - - cb::Error status = dataloader.ReadDataFromDir(inputs, outputs, dir); - REQUIRE(status.IsOk() == false); - CHECK(status.Message() == expected_error_message); -} - -// FIXME TMA-1210 -- the output data is not being ignored here and no error is -// thrown, despite the mismatch -// TEST_CASE( -// "dataloader: ReadDataFromDir: Mismatching Output Data" * -// doctest::description("Successfully reading output files but having a " -// "mismatch will result in the data being ignored")) -//{ -// MockDataLoader dataloader; -// -// std::string datatype; -// -// std::shared_ptr inputs = std::make_shared(); -// std::shared_ptr outputs = -// std::make_shared(); -// -// ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); -// ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); -// -// std::string dir{"mocked_out"}; -// -// std::vector char_data{'0', '0', '0', '7', '5'}; -// -// std::vector string_data{"InStr", "ExtraStr"}; -// -// SUBCASE("BYTES (string) data") -// { -// datatype = "BYTES"; -// EXPECT_CALL(dataloader, ReadTextFile(testing::_, testing::_)) -// .WillOnce(testing::DoAll( -// testing::SetArgPointee<1>(string_data), -// testing::Return(cb::Error::Success))); -// -// SUBCASE("Dynamic shape") { output1.shape_ = {-1}; } -// SUBCASE("Supplied shape") { output1.shape_ = {1}; } -// } -// SUBCASE("Raw Binary data") -// { -// datatype = "INT32"; -// EXPECT_CALL(dataloader, ReadFile(testing::_, testing::_)) -// .WillOnce(testing::DoAll( -// testing::SetArgPointee<1>(char_data), -// testing::Return(cb::Error::Success))); -// -// SUBCASE("Dynamic shape") { input1.shape_ = {-1}; } -// SUBCASE("Supplied shape") { input1.shape_ = {1}; } -// } -// -// output1.datatype_ = datatype; -// outputs->insert(std::make_pair(output1.name_, output1)); -// -// cb::Error status = dataloader.ReadDataFromDir(inputs, outputs, dir); -// REQUIRE(status.IsOk() == true); -// -// // Confirm that the data is not in the dataloader -// const uint8_t* data_ptr{nullptr}; -// size_t batch1_size; -// -// dataloader.GetOutputData("OUTPUT1", 0, 0, &data_ptr, &batch1_size); -// CHECK(data_ptr == nullptr); -// CHECK(batch1_size == 0); -//} - -TEST_CASE( - "dataloader: ReadDataFromDir: Valid Data" * - doctest::description("Successfully reading files will always result in a " - "single stream with a single step")) -{ - MockDataLoader dataloader; - - std::string datatype; - - std::shared_ptr inputs = std::make_shared(); - std::shared_ptr outputs = std::make_shared(); - - ModelTensor input1 = TestDataLoader::CreateTensor("INPUT1"); - ModelTensor output1 = TestDataLoader::CreateTensor("OUTPUT1"); - - std::string dir{"mocked_out"}; - - std::vector input_char_data{'0', '0', '0', '7'}; - std::vector output_char_data{'0', '0', '0', '3'}; - - std::vector input_string_data{"InStr"}; - std::vector output_string_data{"OutStr"}; - - std::vector expected_input; - std::vector expected_output; - - SUBCASE("BYTES (string) data") - { - datatype = "BYTES"; - - expected_input = {'\5', '\0', '\0', '\0', 'I', 'n', 'S', 't', 'r'}; - expected_output = {'\6', '\0', '\0', '\0', 'O', 'u', 't', 'S', 't', 'r'}; - - EXPECT_CALL(dataloader, ReadTextFile(testing::_, testing::_)) - .WillOnce(testing::DoAll( - testing::SetArgPointee<1>(input_string_data), - testing::Return(cb::Error::Success))) - .WillOnce(testing::DoAll( - testing::SetArgPointee<1>(output_string_data), - testing::Return(cb::Error::Success))); - } - SUBCASE("Raw Binary data") - { - datatype = "INT32"; - - expected_input = input_char_data; - expected_output = output_char_data; - - EXPECT_CALL(dataloader, ReadFile(testing::_, testing::_)) - .WillOnce(testing::DoAll( - testing::SetArgPointee<1>(input_char_data), - testing::Return(cb::Error::Success))) - .WillOnce(testing::DoAll( - testing::SetArgPointee<1>(output_char_data), - testing::Return(cb::Error::Success))); - } - - input1.datatype_ = datatype; - output1.datatype_ = datatype; - - inputs->insert(std::make_pair(input1.name_, input1)); - outputs->insert(std::make_pair(output1.name_, output1)); - - cb::Error status = dataloader.ReadDataFromDir(inputs, outputs, dir); - REQUIRE(status.IsOk()); - CHECK_EQ(dataloader.GetDataStreamsCount(), 1); - CHECK_EQ(dataloader.GetTotalSteps(0), 1); - - // Validate input and output data - TensorData data; - - status = dataloader.GetInputData(input1, 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - - const char* input_data = reinterpret_cast(data.data_ptr); - REQUIRE(data.batch1_size == expected_input.size()); - for (size_t i = 0; i < data.batch1_size; i++) { - CHECK(input_data[i] == expected_input[i]); - } - - status = dataloader.GetOutputData("OUTPUT1", 0, 0, data); - REQUIRE(status.IsOk()); - CHECK(data.is_valid); - - const char* output_data = reinterpret_cast(data.data_ptr); - REQUIRE(data.batch1_size == expected_output.size()); - for (size_t i = 0; i < data.batch1_size; i++) { - CHECK(output_data[i] == expected_output[i]); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_idle_timer.cc b/src/c++/perf_analyzer/test_idle_timer.cc deleted file mode 100644 index 18f9d7518..000000000 --- a/src/c++/perf_analyzer/test_idle_timer.cc +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include - -#include "doctest.h" -#include "idle_timer.h" - -namespace triton { namespace perfanalyzer { - -TEST_CASE("idle_timer: basic usage") -{ - IdleTimer timer; - CHECK(timer.GetIdleTime() == 0); - timer.Start(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - timer.Stop(); - CHECK(timer.GetIdleTime() > 0); - timer.Reset(); - CHECK(timer.GetIdleTime() == 0); -} - -TEST_CASE("idle_timer: GetIdleTime when inactive") -{ - IdleTimer timer; - CHECK(timer.GetIdleTime() == 0); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - CHECK(timer.GetIdleTime() == 0); - CHECK_NOTHROW(timer.Start()); -} - -TEST_CASE("idle_timer: GetIdleTime when active") -{ - IdleTimer timer; - timer.Start(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - CHECK(timer.GetIdleTime() > 0); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - CHECK(timer.GetIdleTime() > 0); - CHECK_NOTHROW(timer.Stop()); -} - -TEST_CASE("idle_timer: reset when active") -{ - IdleTimer timer; - timer.Start(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - timer.Stop(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - timer.Start(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - timer.Reset(); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - CHECK(timer.GetIdleTime() > 0); -} - -TEST_CASE("idle_timer: double start") -{ - IdleTimer timer; - timer.Start(); - CHECK_THROWS_AS(timer.Start(), const std::exception&); -} - -TEST_CASE("idle_timer: stop without start") -{ - IdleTimer timer; - CHECK_THROWS_AS(timer.Stop(), const std::exception&); -} - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_infer_context.cc b/src/c++/perf_analyzer/test_infer_context.cc deleted file mode 100644 index 951fb2b10..000000000 --- a/src/c++/perf_analyzer/test_infer_context.cc +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "client_backend/mock_client_backend.h" -#include "doctest.h" -#include "gmock/gmock.h" -#include "infer_context.h" -#include "mock_data_loader.h" -#include "mock_infer_context.h" -#include "mock_infer_data_manager.h" -#include "mock_sequence_manager.h" - -namespace triton { namespace perfanalyzer { - -/// Tests the round robin ordering of json input data -/// -TEST_CASE("update_seq_json_data: testing the UpdateSeqJsonData function") -{ - std::shared_ptr mock_sequence_manager{ - std::make_shared()}; - - EXPECT_CALL( - *mock_sequence_manager, SetInferSequenceOptions(testing::_, testing::_)) - .Times(6) - .WillRepeatedly(testing::Return()); - - mock_sequence_manager->InitSequenceStatuses(1); - - std::shared_ptr mock_data_loader{ - std::make_shared()}; - - EXPECT_CALL(*mock_data_loader, GetTotalSteps(testing::_)) - .Times(6) - .WillRepeatedly(testing::Return(3)); - - std::shared_ptr mock_infer_data_manager{ - std::make_shared()}; - - testing::Sequence seq; - EXPECT_CALL( - *mock_infer_data_manager, - UpdateInferData(testing::_, testing::_, 0, testing::_)) - .InSequence(seq) - .WillOnce(testing::Return(cb::Error::Success)); - EXPECT_CALL( - *mock_infer_data_manager, - UpdateInferData(testing::_, testing::_, 1, testing::_)) - .InSequence(seq) - .WillOnce(testing::Return(cb::Error::Success)); - EXPECT_CALL( - *mock_infer_data_manager, - UpdateInferData(testing::_, testing::_, 2, testing::_)) - .InSequence(seq) - .WillOnce(testing::Return(cb::Error::Success)); - EXPECT_CALL( - *mock_infer_data_manager, - UpdateInferData(testing::_, testing::_, 0, testing::_)) - .InSequence(seq) - .WillOnce(testing::Return(cb::Error::Success)); - EXPECT_CALL( - *mock_infer_data_manager, - UpdateInferData(testing::_, testing::_, 1, testing::_)) - .InSequence(seq) - .WillOnce(testing::Return(cb::Error::Success)); - EXPECT_CALL( - *mock_infer_data_manager, - UpdateInferData(testing::_, testing::_, 2, testing::_)) - .InSequence(seq) - .WillOnce(testing::Return(cb::Error::Success)); - - std::shared_ptr mic{std::make_shared()}; - - EXPECT_CALL(*mic, SendRequest(testing::_, testing::_, testing::_)) - .Times(6) - .WillRepeatedly(testing::Return()); - - mic->sequence_manager_ = mock_sequence_manager; - mic->data_loader_ = mock_data_loader; - mic->infer_data_manager_ = mock_infer_data_manager; - mic->thread_stat_ = std::make_shared(); - bool execute{true}; - mic->execute_ = execute; - mic->using_json_data_ = true; - - size_t seq_stat_index{0}; - bool delayed{false}; - - mic->SendSequenceInferRequest(seq_stat_index, delayed); - mic->SendSequenceInferRequest(seq_stat_index, delayed); - mic->SendSequenceInferRequest(seq_stat_index, delayed); - mic->SendSequenceInferRequest(seq_stat_index, delayed); - mic->SendSequenceInferRequest(seq_stat_index, delayed); - mic->SendSequenceInferRequest(seq_stat_index, delayed); - - // Destruct gmock objects to determine gmock-related test failure - mock_sequence_manager.reset(); - mock_data_loader.reset(); - mock_infer_data_manager.reset(); - mic.reset(); - REQUIRE(testing::Test::HasFailure() == false); -} - -TEST_CASE("send_request: testing the SendRequest function") -{ - MockInferContext mock_infer_context{}; - - SUBCASE("testing logic relevant to request record sequence ID") - { - mock_infer_context.thread_stat_ = std::make_shared(); - mock_infer_context.thread_stat_->contexts_stat_.emplace_back(); - mock_infer_context.async_ = true; - mock_infer_context.streaming_ = true; - mock_infer_context.infer_data_.options_ = - std::make_unique("my_model"); - std::shared_ptr mock_client_stats{ - std::make_shared()}; - mock_infer_context.infer_backend_ = - std::make_unique(mock_client_stats); - - const uint64_t request_id{5}; - const bool delayed{false}; - const uint64_t sequence_id{2}; - - mock_infer_context.infer_data_.options_->request_id_ = - std::to_string(request_id); - - cb::MockInferResult* mock_infer_result{ - new cb::MockInferResult(*mock_infer_context.infer_data_.options_)}; - - cb::OnCompleteFn& stream_callback{mock_infer_context.async_callback_func_}; - - EXPECT_CALL( - dynamic_cast( - *mock_infer_context.infer_backend_), - AsyncStreamInfer(testing::_, testing::_, testing::_)) - .WillOnce( - [&mock_infer_result, &stream_callback]( - const cb::InferOptions& options, - const std::vector& inputs, - const std::vector& outputs) - -> cb::Error { - stream_callback(mock_infer_result); - return cb::Error::Success; - }); - - mock_infer_context.SendRequest(request_id, delayed, sequence_id); - - CHECK(mock_infer_context.thread_stat_->request_records_.size() == 1); - CHECK( - mock_infer_context.thread_stat_->request_records_[0].sequence_id_ == - sequence_id); - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_inference_profiler.cc b/src/c++/perf_analyzer/test_inference_profiler.cc deleted file mode 100644 index 2941867fc..000000000 --- a/src/c++/perf_analyzer/test_inference_profiler.cc +++ /dev/null @@ -1,1132 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "doctest.h" -#include "inference_profiler.h" -#include "mock_inference_profiler.h" -#include "mock_load_manager.h" -#include "mock_model_parser.h" - -namespace triton { namespace perfanalyzer { - -class TestInferenceProfiler : public InferenceProfiler { - public: - static void ValidLatencyMeasurement( - const std::pair& valid_range, - size_t& valid_sequence_count, size_t& delayed_request_count, - std::vector* latencies, size_t& response_count, - std::vector& valid_requests, - std::vector& all_request_records) - { - InferenceProfiler inference_profiler{}; - inference_profiler.all_request_records_ = all_request_records; - inference_profiler.ValidLatencyMeasurement( - valid_range, valid_sequence_count, delayed_request_count, latencies, - response_count, valid_requests); - } - - static std::tuple GetMeanAndStdDev( - const std::vector& latencies) - { - InferenceProfiler inference_profiler{}; - return inference_profiler.GetMeanAndStdDev(latencies); - } - - void SummarizeSendRequestRate( - const double window_duration_s, const size_t num_sent_requests, - PerfStatus& summary) - { - InferenceProfiler::SummarizeSendRequestRate( - window_duration_s, num_sent_requests, summary); - } - - static bool TestCheckWithinThreshold( - LoadStatus& ls, LoadParams& lp, uint64_t latency_threshold_ms) - { - InferenceProfiler ip; - size_t idx = ls.infer_per_sec.size() - lp.stability_window; - ip.latency_threshold_ms_ = latency_threshold_ms; - - return ip.CheckWithinThreshold(idx, ls); - } - - static bool TestCheckWindowForStability(LoadStatus& ls, LoadParams& lp) - { - size_t idx = ls.infer_per_sec.size() - lp.stability_window; - - InferenceProfiler ip; - ip.load_parameters_.stability_threshold = lp.stability_threshold; - ip.load_parameters_.stability_window = lp.stability_window; - - return ip.CheckWindowForStability(idx, ls, true); - }; - - static bool TestDetermineStability( - LoadStatus& ls, LoadParams& lp, bool check_latency = true) - { - InferenceProfiler ip; - ip.load_parameters_.stability_threshold = lp.stability_threshold; - ip.load_parameters_.stability_window = lp.stability_window; - - return ip.DetermineStability(ls, check_latency); - } - - static bool TestIsDoneProfiling( - LoadStatus& ls, LoadParams& lp, uint64_t latency_threshold_ms) - { - InferenceProfiler ip; - ip.load_parameters_.stability_threshold = lp.stability_threshold; - ip.load_parameters_.stability_window = lp.stability_window; - ip.latency_threshold_ms_ = latency_threshold_ms; - ip.mpi_driver_ = std::make_shared(false); - - bool is_stable = ip.DetermineStability(ls); - return ip.IsDoneProfiling(ls, &is_stable); - }; - - std::pair ClampWindow(std::vector& reqs) - { - return InferenceProfiler::ClampWindow(reqs); - } - - cb::Error MergeMetrics( - const std::vector>& all_metrics, - Metrics& merged_metrics) - { - return InferenceProfiler::MergeMetrics(all_metrics, merged_metrics); - } - - template - void GetMetricAveragePerGPU( - const std::vector>>& - input_metric_maps, - std::map& output_metric_map) - { - InferenceProfiler::GetMetricAveragePerGPU( - input_metric_maps, output_metric_map); - } - - template - void GetMetricMaxPerGPU( - const std::vector>>& - input_metric_maps, - std::map& output_metric_map) - { - InferenceProfiler::GetMetricMaxPerGPU( - input_metric_maps, output_metric_map); - } - - template - void GetMetricFirstPerGPU( - const std::vector>>& - input_metric_maps, - std::map& output_metric_map) - { - InferenceProfiler::GetMetricFirstPerGPU( - input_metric_maps, output_metric_map); - } - - void SummarizeOverhead( - const uint64_t window_duration_ns, const uint64_t idle_ns, - PerfStatus& summary) - { - InferenceProfiler::SummarizeOverhead(window_duration_ns, idle_ns, summary); - } - - - cb::Error DetermineStatsModelVersion( - const cb::ModelIdentifier& model_identifier, - const std::map& start_stats, - const std::map& end_stats, - int64_t* model_version) - { - return InferenceProfiler::DetermineStatsModelVersion( - model_identifier, start_stats, end_stats, model_version); - } - - cb::Error SetTopLevelResponseCaching(bool enable_top_level_response_caching) - { - return InferenceProfiler::SetTopLevelResponseCaching( - enable_top_level_response_caching); - } -}; - - -TEST_CASE("testing the ValidLatencyMeasurement function") -{ - size_t valid_sequence_count{}; - size_t delayed_request_count{}; - std::vector latencies{}; - size_t response_count{}; - std::vector valid_requests{}; - - const std::pair window{4, 17}; - using time_point = std::chrono::time_point; - using ns = std::chrono::nanoseconds; - std::vector all_request_records{ - // request ends before window starts, this should not be possible to exist - // in the vector of requests, but if it is, we exclude it: not included in - // current window - RequestRecord( - time_point(ns(1)), std::vector{time_point(ns(2))}, {}, {}, - 0, false, 0, false), - - // request starts before window starts and ends inside window: included in - // current window - RequestRecord( - time_point(ns(3)), std::vector{time_point(ns(5))}, {}, {}, - 0, false, 0, false), - - // requests start and end inside window: included in current window - RequestRecord( - time_point(ns(6)), std::vector{time_point(ns(9))}, {}, {}, - 0, false, 0, false), - RequestRecord( - time_point(ns(10)), std::vector{time_point(ns(14))}, {}, - {}, 0, false, 0, false), - - // request starts before window ends and ends after window ends: not - // included in current window - RequestRecord( - time_point(ns(15)), std::vector{time_point(ns(20))}, {}, - {}, 0, false, 0, false), - - // request starts after window ends: not included in current window - RequestRecord( - time_point(ns(21)), std::vector{time_point(ns(27))}, {}, - {}, 0, false, 0, false)}; - - TestInferenceProfiler::ValidLatencyMeasurement( - window, valid_sequence_count, delayed_request_count, &latencies, - response_count, valid_requests, all_request_records); - - const auto& convert_request_record_to_latency{[](RequestRecord t) { - return CHRONO_TO_NANOS(t.response_timestamps_.back()) - - CHRONO_TO_NANOS(t.start_time_); - }}; - - CHECK(latencies.size() == 3); - CHECK( - latencies[0] == - convert_request_record_to_latency(all_request_records[1])); - CHECK( - latencies[1] == - convert_request_record_to_latency(all_request_records[2])); - CHECK( - latencies[2] == - convert_request_record_to_latency(all_request_records[3])); -} - -TEST_CASE("test_check_window_for_stability") -{ - LoadStatus ls; - LoadParams lp; - - SUBCASE("test throughput not stable") - { - ls.infer_per_sec = {1.0, 1000.0, 500.0}; - ls.latencies = {1, 1, 1}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == false); - } - SUBCASE("test throughput stable") - { - ls.infer_per_sec = {500.0, 520.0, 510.0}; - ls.latencies = {1, 1, 1}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == true); - } - SUBCASE("test latency not stable") - { - ls.infer_per_sec = {500.0, 520.0, 510.0}; - ls.latencies = {100, 106, 112}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == false); - } - SUBCASE("test latency stable") - { - ls.infer_per_sec = {500.0, 520.0, 510.0}; - ls.latencies = {100, 104, 108}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == true); - } - SUBCASE("test throughput stable after many measurements") - { - ls.infer_per_sec = {1.0, 1000.0, 500.0, 1500.0, 500.0, 520.0, 510.0}; - ls.latencies = {1, 1, 1, 1, 1, 1, 1}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == true); - } - SUBCASE("test stability window of 5") - { - ls.infer_per_sec = {500.0, 520.0, 510.0, 505.0, 515.0}; - ls.latencies = {100, 104, 108, 102, 106}; - lp.stability_window = 5; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == true); - } - SUBCASE("test not stable in 5 but stable in 3") - { - ls.infer_per_sec = {1.0, 1000.0, 510.0, 505.0, 515.0}; - ls.latencies = {100, 104, 108, 102, 106}; - lp.stability_window = 5; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == false); - } - SUBCASE("test stability window of 2") - { - ls.infer_per_sec = {500.0, 1000.0, 1.0, 505.0, 515.0}; - ls.latencies = {100, 104, 108, 102, 106}; - lp.stability_window = 2; - lp.stability_threshold = 0.1; - CHECK(TestInferenceProfiler::TestCheckWindowForStability(ls, lp) == true); - } -} - -TEST_CASE("test check within threshold") -{ - LoadStatus ls; - LoadParams lp; - - ls.infer_per_sec = {500.0, 520.0, 510.0}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - uint64_t latency_threshold_ms = 1; - - SUBCASE("test not within threshold") - { - ls.latencies = {2000000, 2000000, 2000000}; - CHECK( - TestInferenceProfiler::TestCheckWithinThreshold( - ls, lp, latency_threshold_ms) == false); - } - - SUBCASE("test within threshold") - { - ls.latencies = {100000, 100000, 100000}; - CHECK( - TestInferenceProfiler::TestCheckWithinThreshold( - ls, lp, latency_threshold_ms) == true); - } -} - -TEST_CASE("test_determine_stability") -{ - LoadStatus ls; - LoadParams lp; - - SUBCASE("test inference equals zero") - { - ls.infer_per_sec = {500.0, 0.0, 510.0}; - ls.latencies = {1, 1, 1}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - uint64_t latency_threshold_ms = 1; - CHECK(TestInferenceProfiler::TestDetermineStability(ls, lp) == false); - - ls.infer_per_sec = {500.0, 520.0, 510.0}; - CHECK(TestInferenceProfiler::TestDetermineStability(ls, lp) == true); - } - - SUBCASE("test determine stability without latency check") - { - ls.infer_per_sec = {500.0, 520.0, 510.0}; - ls.latencies = {100, 106, 112}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - uint64_t latency_threshold_ms = 1; - CHECK(TestInferenceProfiler::TestDetermineStability(ls, lp, false) == true); - } -} - -TEST_CASE("test_is_done_profiling") -{ - LoadStatus ls; - LoadParams lp; - - - SUBCASE("test latency_threshold is NO_LIMIT") - { - ls.infer_per_sec = {1.0, 1000.0, 500.0}; - ls.latencies = {1, 1, 1}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - uint64_t latency_threshold_ms = NO_LIMIT; - - CHECK( - TestInferenceProfiler::TestIsDoneProfiling( - ls, lp, latency_threshold_ms) == false); - } - - SUBCASE("test not within threshold from done profiling") - { - ls.infer_per_sec = {1.0, 1000.0, 500.0}; - ls.latencies = {2000000, 2000000, 2000000}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - uint64_t latency_threshold_ms = 1; - CHECK( - TestInferenceProfiler::TestIsDoneProfiling( - ls, lp, latency_threshold_ms) == true); - } - - SUBCASE("test stability from is done profiling") - { - ls.infer_per_sec = {1.0, 1000.0, 500.0}; - ls.latencies = {1, 1, 1}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - uint64_t latency_threshold_ms = 1; - - CHECK( - TestInferenceProfiler::TestIsDoneProfiling( - ls, lp, latency_threshold_ms) == false); - ls.infer_per_sec = {500.0, 520.0, 510.0}; - - CHECK( - TestInferenceProfiler::TestIsDoneProfiling( - ls, lp, latency_threshold_ms) == true); - } - - SUBCASE("test underflow") - { - ls.infer_per_sec = {500.0, 510.0}; - ls.latencies = {1, 1}; - lp.stability_window = 3; - lp.stability_threshold = 0.1; - uint64_t latency_threshold_ms = 1; - CHECK( - TestInferenceProfiler::TestIsDoneProfiling( - ls, lp, latency_threshold_ms) == false); - } -} - -TEST_CASE("test mocking") -{ - using testing::AtLeast; - using testing::Return; - MockInferenceProfiler mip; - - EXPECT_CALL(mip, IncludeServerStats()) - .Times(AtLeast(1)) - .WillOnce(Return(false)); - - CHECK(mip.IncludeServerStats() == false); -} - -TEST_CASE("testing the GetMeanAndStdDev function") -{ - uint64_t avg_latency_ns{0}; - uint64_t std_dev_latency_us{0}; - - SUBCASE("calculation using small latencies") - { - std::vector latencies{100000, 200000, 50000}; - std::tie(avg_latency_ns, std_dev_latency_us) = - TestInferenceProfiler::GetMeanAndStdDev(latencies); - CHECK(avg_latency_ns == 116666); - CHECK(std_dev_latency_us == 76); - } - - SUBCASE("calculation using big latencies") - { - // Squaring these would exceed UINT64_MAX. - std::vector latencies{4300000000, 4400000000, 5000000000}; - std::tie(avg_latency_ns, std_dev_latency_us) = - TestInferenceProfiler::GetMeanAndStdDev(latencies); - CHECK(avg_latency_ns == 4566666666); - CHECK(std_dev_latency_us == 378593); - } - - SUBCASE("calculation using one latency") - { - // Edge case should set standard deviation to near infinity - std::vector latencies{100}; - std::tie(avg_latency_ns, std_dev_latency_us) = - TestInferenceProfiler::GetMeanAndStdDev(latencies); - CHECK(avg_latency_ns == 100); - CHECK(std_dev_latency_us == UINT64_MAX); - } -} - -TEST_CASE("testing the MergeMetrics function") -{ - TestInferenceProfiler tip{}; - Metrics metrics_1{}, metrics_2{}, merged_metrics{}; - - SUBCASE("all metrics present") - { - metrics_1.gpu_utilization_per_gpu["gpu0"] = 0.45; - metrics_2.gpu_utilization_per_gpu["gpu0"] = 0.52; - - metrics_1.gpu_power_usage_per_gpu["gpu0"] = 70.0; - metrics_2.gpu_power_usage_per_gpu["gpu0"] = 84.5; - - metrics_1.gpu_memory_used_bytes_per_gpu["gpu0"] = 10000; - metrics_2.gpu_memory_used_bytes_per_gpu["gpu0"] = 12000; - - metrics_1.gpu_memory_total_bytes_per_gpu["gpu0"] = 100000; - metrics_2.gpu_memory_total_bytes_per_gpu["gpu0"] = 100000; - - const std::vector> all_metrics{ - metrics_1, metrics_2}; - - tip.MergeMetrics(all_metrics, merged_metrics); - CHECK(merged_metrics.gpu_utilization_per_gpu.size() == 1); - CHECK(merged_metrics.gpu_power_usage_per_gpu.size() == 1); - CHECK(merged_metrics.gpu_memory_used_bytes_per_gpu.size() == 1); - CHECK(merged_metrics.gpu_memory_total_bytes_per_gpu.size() == 1); - CHECK( - merged_metrics.gpu_utilization_per_gpu["gpu0"] == - doctest::Approx(0.485)); - CHECK( - merged_metrics.gpu_power_usage_per_gpu["gpu0"] == - doctest::Approx(77.25)); - CHECK(merged_metrics.gpu_memory_used_bytes_per_gpu["gpu0"] == 12000); - CHECK(merged_metrics.gpu_memory_total_bytes_per_gpu["gpu0"] == 100000); - } - - SUBCASE("missing multiple metrics") - { - metrics_1.gpu_utilization_per_gpu["gpu0"] = 0.45; - metrics_2.gpu_utilization_per_gpu["gpu0"] = 0.52; - - metrics_1.gpu_memory_used_bytes_per_gpu["gpu0"] = 10000; - metrics_2.gpu_memory_used_bytes_per_gpu["gpu0"] = 12000; - - const std::vector> all_metrics{ - metrics_1, metrics_2}; - - tip.MergeMetrics(all_metrics, merged_metrics); - CHECK(merged_metrics.gpu_utilization_per_gpu.size() == 1); - CHECK(merged_metrics.gpu_power_usage_per_gpu.size() == 0); - CHECK(merged_metrics.gpu_memory_used_bytes_per_gpu.size() == 1); - CHECK(merged_metrics.gpu_memory_total_bytes_per_gpu.size() == 0); - CHECK( - merged_metrics.gpu_utilization_per_gpu["gpu0"] == - doctest::Approx(0.485)); - CHECK(merged_metrics.gpu_memory_used_bytes_per_gpu["gpu0"] == 12000); - } -} - -TEST_CASE("testing the GetMetricAveragePerGPU function") -{ - TestInferenceProfiler tip{}; - std::map metric_averages{}; - - SUBCASE("all GPUs present") - { - const std::map metric_1{ - {"gpu0", 0.45}, {"gpu1", 0.23}}, - metric_2{{"gpu0", 0.52}, {"gpu1", 0.27}}, - metric_3{{"gpu0", 0.56}, {"gpu1", 0.30}}; - - const std::vector< - std::reference_wrapper>> - all_metrics{metric_1, metric_2, metric_3}; - - tip.GetMetricAveragePerGPU(all_metrics, metric_averages); - - CHECK(metric_averages.size() == 2); - CHECK(metric_averages["gpu0"] == doctest::Approx(0.51)); - CHECK(metric_averages["gpu1"] == doctest::Approx(0.26666)); - } - - SUBCASE("missing one GPU from one metric") - { - const std::map metric_1{ - {"gpu0", 0.45}, {"gpu1", 0.23}}, - metric_2{{"gpu0", 0.52}}, metric_3{{"gpu0", 0.56}, {"gpu1", 0.30}}; - - const std::vector< - std::reference_wrapper>> - all_metrics{metric_1, metric_2, metric_3}; - - tip.GetMetricAveragePerGPU(all_metrics, metric_averages); - - CHECK(metric_averages.size() == 2); - CHECK(metric_averages["gpu0"] == doctest::Approx(0.51)); - CHECK(metric_averages["gpu1"] == doctest::Approx(0.265)); - } -} - -TEST_CASE("testing the GetMetricMaxPerGPU function") -{ - TestInferenceProfiler tip{}; - std::map metric_maxes{}; - - SUBCASE("all GPUs present") - { - const std::map metric_1{{"gpu0", 10}, {"gpu1", 55}}, - metric_2{{"gpu0", 12}, {"gpu1", 84}}, - metric_3{{"gpu0", 15}, {"gpu1", 47}}; - - const std::vector< - std::reference_wrapper>> - all_metrics{metric_1, metric_2, metric_3}; - - tip.GetMetricMaxPerGPU(all_metrics, metric_maxes); - - CHECK(metric_maxes.size() == 2); - CHECK(metric_maxes["gpu0"] == 15); - CHECK(metric_maxes["gpu1"] == 84); - } - - SUBCASE("missing one GPU from one metric") - { - const std::map metric_1{{"gpu0", 10}, {"gpu1", 55}}, - metric_2{{"gpu0", 12}}, metric_3{{"gpu0", 15}, {"gpu1", 47}}; - - const std::vector< - std::reference_wrapper>> - all_metrics{metric_1, metric_2, metric_3}; - - tip.GetMetricMaxPerGPU(all_metrics, metric_maxes); - - CHECK(metric_maxes.size() == 2); - CHECK(metric_maxes["gpu0"] == 15); - CHECK(metric_maxes["gpu1"] == 55); - } -} - -TEST_CASE("testing the GetMetricFirstPerGPU function") -{ - TestInferenceProfiler tip{}; - std::map metric_firsts{}; - - SUBCASE("all GPUs present") - { - const std::map metric_1{{"gpu0", 10}, {"gpu1", 55}}, - metric_2{{"gpu0", 12}, {"gpu1", 84}}, - metric_3{{"gpu0", 15}, {"gpu1", 47}}; - - const std::vector< - std::reference_wrapper>> - all_metrics{metric_1, metric_2, metric_3}; - - tip.GetMetricFirstPerGPU(all_metrics, metric_firsts); - - CHECK(metric_firsts.size() == 2); - CHECK(metric_firsts["gpu0"] == 10); - CHECK(metric_firsts["gpu1"] == 55); - } - - SUBCASE("missing one GPU from one metric") - { - const std::map metric_1{{"gpu0", 10}}, - metric_2{{"gpu0", 12}, {"gpu1", 84}}, - metric_3{{"gpu0", 15}, {"gpu1", 47}}; - - const std::vector< - std::reference_wrapper>> - all_metrics{metric_1, metric_2, metric_3}; - - tip.GetMetricFirstPerGPU(all_metrics, metric_firsts); - - CHECK(metric_firsts.size() == 2); - CHECK(metric_firsts["gpu0"] == 10); - CHECK(metric_firsts["gpu1"] == 84); - } -} - -TEST_CASE("test the ReportPrometheusMetrics function") -{ - Metrics metrics{}; - std::stringstream captured_cout; - std::streambuf* old_cout{std::cout.rdbuf(captured_cout.rdbuf())}; - - SUBCASE("regular output") - { - metrics.gpu_utilization_per_gpu["gpu0"] = 0.45; - metrics.gpu_utilization_per_gpu["gpu1"] = 0.52; - - metrics.gpu_power_usage_per_gpu["gpu0"] = 70.0; - metrics.gpu_power_usage_per_gpu["gpu1"] = 84.5; - - metrics.gpu_memory_used_bytes_per_gpu["gpu0"] = 10000; - metrics.gpu_memory_used_bytes_per_gpu["gpu1"] = 12000; - - metrics.gpu_memory_total_bytes_per_gpu["gpu0"] = 100000; - metrics.gpu_memory_total_bytes_per_gpu["gpu1"] = 100000; - - cb::Error result{ReportPrometheusMetrics(metrics)}; - - std::cout.rdbuf(old_cout); - - CHECK(result.Err() == SUCCESS); - CHECK( - captured_cout.str() == - " Avg GPU Utilization:\n" - " gpu0 : 45%\n" - " gpu1 : 52%\n" - " Avg GPU Power Usage:\n" - " gpu0 : 70 watts\n" - " gpu1 : 84.5 watts\n" - " Max GPU Memory Usage:\n" - " gpu0 : 10000 bytes\n" - " gpu1 : 12000 bytes\n" - " Total GPU Memory:\n" - " gpu0 : 100000 bytes\n" - " gpu1 : 100000 bytes\n"); - } - - SUBCASE("too many GPUs") - { - const size_t num_gpus{17}; - for (size_t gpu_idx{0}; gpu_idx < num_gpus; gpu_idx++) { - const auto& gpu_key{"gpu" + std::to_string(gpu_idx)}; - metrics.gpu_utilization_per_gpu[gpu_key] = 0.5; - metrics.gpu_power_usage_per_gpu[gpu_key] = 75.5; - metrics.gpu_memory_used_bytes_per_gpu[gpu_key] = 12500; - metrics.gpu_memory_total_bytes_per_gpu[gpu_key] = 150000; - } - - cb::Error result{ReportPrometheusMetrics(metrics)}; - - std::cout.rdbuf(old_cout); - - CHECK(result.Err() == SUCCESS); - CHECK( - captured_cout.str() == - "Too many GPUs on system to print out individual Prometheus metrics, " - "use the CSV output feature to see metrics.\n"); - } -} - -TEST_CASE("InferenceProfiler: Test SummarizeOverhead") -{ - TestInferenceProfiler tip{}; - PerfStatus status; - SUBCASE("normal") - { - tip.SummarizeOverhead(100, 63, status); - CHECK(status.overhead_pct == doctest::Approx(37)); - } - SUBCASE("normal 2") - { - tip.SummarizeOverhead(234, 56, status); - CHECK(status.overhead_pct == doctest::Approx(76.068)); - } - SUBCASE("overflow") - { - tip.SummarizeOverhead(100, 101, status); - CHECK(status.overhead_pct == doctest::Approx(0)); - } -} - -TEST_CASE( - "summarize_send_request_rate: testing the SummarizeSendRequestRate " - "function") -{ - TestInferenceProfiler tip{}; - PerfStatus perf_status; - - SUBCASE("invalid zero window duration") - { - double window_duration_s{0.0}; - size_t num_sent_requests{0}; - CHECK_THROWS_WITH_AS( - tip.SummarizeSendRequestRate( - window_duration_s, num_sent_requests, perf_status), - "window_duration_s must be positive", std::runtime_error); - } - - SUBCASE("invalid negative window duration") - { - double window_duration_s{-1.0}; - size_t num_sent_requests{0}; - CHECK_THROWS_WITH_AS( - tip.SummarizeSendRequestRate( - window_duration_s, num_sent_requests, perf_status), - "window_duration_s must be positive", std::runtime_error); - } - - SUBCASE("regular case") - { - double window_duration_s{2.0}; - size_t num_sent_requests{100}; - tip.SummarizeSendRequestRate( - window_duration_s, num_sent_requests, perf_status); - CHECK(perf_status.send_request_rate == doctest::Approx(50)); - } -} - -TEST_CASE("determine_stats_model_version: testing DetermineStatsModelVersion()") -{ - TestInferenceProfiler tip{}; - cb::ModelIdentifier model_identifier; - cb::ModelStatistics old_stats; - cb::ModelStatistics new_stats; - old_stats.queue_count_ = 1; - new_stats.queue_count_ = 2; - - int64_t expected_model_version; - bool expect_warning = false; - bool expect_exception = false; - - std::map start_stats_map; - std::map end_stats_map; - - SUBCASE("One entry - unspecified - valid and in start") - { - model_identifier = {"ModelA", ""}; - start_stats_map.insert({{"ModelA", "3"}, old_stats}); - end_stats_map.insert({{"ModelA", "3"}, new_stats}); - expected_model_version = 3; - } - SUBCASE("One entry - unspecified - valid and not in start") - { - model_identifier = {"ModelA", ""}; - end_stats_map.insert({{"ModelA", "3"}, new_stats}); - expected_model_version = 3; - } - SUBCASE("One entry - unspecified - invalid") - { - model_identifier = {"ModelA", ""}; - start_stats_map.insert({{"ModelA", "3"}, old_stats}); - end_stats_map.insert({{"ModelA", "3"}, old_stats}); - expect_exception = true; - expected_model_version = -1; - } - SUBCASE("One entry - match") - { - model_identifier = {"ModelA", "3"}; - end_stats_map.insert({{"ModelA", "3"}, new_stats}); - expected_model_version = 3; - } - SUBCASE("One entry - miss") - { - model_identifier = {"ModelA", "2"}; - end_stats_map.insert({{"ModelA", "3"}, new_stats}); - expect_exception = true; - expected_model_version = -1; - } - SUBCASE("Two entries - unspecified case 1") - { - model_identifier = {"ModelA", ""}; - start_stats_map.insert({{"ModelA", "3"}, old_stats}); - start_stats_map.insert({{"ModelA", "4"}, old_stats}); - end_stats_map.insert({{"ModelA", "3"}, new_stats}); - end_stats_map.insert({{"ModelA", "4"}, old_stats}); - expected_model_version = 3; - } - SUBCASE("Two entries - unspecified case 2") - { - model_identifier = {"ModelA", ""}; - start_stats_map.insert({{"ModelA", "3"}, old_stats}); - start_stats_map.insert({{"ModelA", "4"}, old_stats}); - end_stats_map.insert({{"ModelA", "3"}, old_stats}); - end_stats_map.insert({{"ModelA", "4"}, new_stats}); - expected_model_version = 4; - } - SUBCASE("Two entries - unspecified case 3") - { - model_identifier = {"ModelA", ""}; - start_stats_map.insert({{"ModelA", "3"}, old_stats}); - start_stats_map.insert({{"ModelA", "4"}, old_stats}); - end_stats_map.insert({{"ModelA", "3"}, new_stats}); - end_stats_map.insert({{"ModelA", "4"}, new_stats}); - expected_model_version = 4; - expect_warning = 1; - } - SUBCASE("Two entries - specified hit") - { - model_identifier = {"ModelA", "3"}; - end_stats_map.insert({{"ModelA", "3"}, old_stats}); - end_stats_map.insert({{"ModelA", "4"}, old_stats}); - expected_model_version = 3; - } - SUBCASE("Two entries - specified miss") - { - model_identifier = {"ModelA", "2"}; - end_stats_map.insert({{"ModelA", "3"}, old_stats}); - end_stats_map.insert({{"ModelA", "4"}, old_stats}); - expected_model_version = -1; - expect_exception = true; - } - - SUBCASE("One entry - version -1 - valid and in start") - { - model_identifier = {"ModelA", "-1"}; - start_stats_map.insert({{"ModelA", "3"}, old_stats}); - end_stats_map.insert({{"ModelA", "3"}, new_stats}); - cb::Error status = tip.SetTopLevelResponseCaching(true); - CHECK(status.IsOk()); - expected_model_version = -1; - } - - SUBCASE("One entry - version -1 - not valid") - { - model_identifier = {"ModelA", "-1"}; - end_stats_map.insert({{"ModelA", "3"}, old_stats}); - cb::Error status = tip.SetTopLevelResponseCaching(false); - CHECK(status.IsOk()); - expected_model_version = -1; - expect_exception = true; - } - - std::stringstream captured_cerr; - std::streambuf* old = std::cerr.rdbuf(captured_cerr.rdbuf()); - - int64_t result_model_version; - cb::Error result; - result = tip.DetermineStatsModelVersion( - model_identifier, start_stats_map, end_stats_map, &result_model_version); - - CHECK(result_model_version == expected_model_version); - CHECK(result.IsOk() != expect_exception); - CHECK(captured_cerr.str().empty() != expect_warning); - - std::cerr.rdbuf(old); -} - -TEST_CASE( - "valid_latency_measurement: testing the ValidLatencyMeasurement function") -{ - MockInferenceProfiler mock_inference_profiler{}; - - SUBCASE("testing logic relevant to response throughput metric") - { - auto clock_epoch{std::chrono::time_point()}; - - auto request1_timestamp{clock_epoch + std::chrono::nanoseconds(1)}; - auto response1_timestamp{clock_epoch + std::chrono::nanoseconds(2)}; - auto response2_timestamp{clock_epoch + std::chrono::nanoseconds(3)}; - auto request_record1{RequestRecord( - request1_timestamp, - std::vector>{ - response1_timestamp, response2_timestamp}, - {}, {}, 0, false, 0, false)}; - - auto request2_timestamp{clock_epoch + std::chrono::nanoseconds(4)}; - RequestRecord request_record2{}; - size_t expected_response_count{0}; - - SUBCASE("second request has three data responses") - { - auto response3_timestamp{clock_epoch + std::chrono::nanoseconds(5)}; - auto response4_timestamp{clock_epoch + std::chrono::nanoseconds(6)}; - auto response5_timestamp{clock_epoch + std::chrono::nanoseconds(7)}; - request_record2 = RequestRecord( - request2_timestamp, - std::vector>{ - response3_timestamp, response4_timestamp, response5_timestamp}, - {}, {}, 0, false, 0, false); - expected_response_count = 5; - } - SUBCASE("second request has two data responses and one null response") - { - auto response3_timestamp{clock_epoch + std::chrono::nanoseconds(5)}; - auto response4_timestamp{clock_epoch + std::chrono::nanoseconds(6)}; - auto response5_timestamp{clock_epoch + std::chrono::nanoseconds(7)}; - request_record2 = RequestRecord( - request2_timestamp, - std::vector>{ - response3_timestamp, response4_timestamp, response5_timestamp}, - {}, {}, 0, false, 0, true); - expected_response_count = 4; - } - SUBCASE("second request has one null response") - { - request_record2 = RequestRecord( - request2_timestamp, - std::vector>{}, {}, - {}, 0, false, 0, true); - expected_response_count = 2; - } - - mock_inference_profiler.all_request_records_ = { - request_record1, request_record2}; - - const std::pair valid_range{ - std::make_pair(0, UINT64_MAX)}; - size_t valid_sequence_count{0}; - size_t delayed_request_count{0}; - std::vector valid_latencies{}; - size_t response_count{0}; - std::vector valid_requests{}; - - mock_inference_profiler.ValidLatencyMeasurement( - valid_range, valid_sequence_count, delayed_request_count, - &valid_latencies, response_count, valid_requests); - - CHECK(response_count == expected_response_count); - } - SUBCASE("testing logic relevant to valid request output") - { - auto clock_epoch{std::chrono::time_point()}; - - auto request1_timestamp{clock_epoch + std::chrono::nanoseconds(1)}; - auto response1_timestamp{clock_epoch + std::chrono::nanoseconds(2)}; - auto request_record1{RequestRecord( - request1_timestamp, - std::vector>{ - response1_timestamp}, - {}, {}, 0, false, 0, false)}; - - auto request2_timestamp{clock_epoch + std::chrono::nanoseconds(3)}; - auto response2_timestamp{clock_epoch + std::chrono::nanoseconds(4)}; - auto request_record2{RequestRecord( - request2_timestamp, - std::vector>{ - response2_timestamp}, - {}, {}, 0, false, 0, false)}; - - auto request3_timestamp{clock_epoch + std::chrono::nanoseconds(5)}; - auto response3_timestamp{clock_epoch + std::chrono::nanoseconds(6)}; - auto request_record3{RequestRecord( - request3_timestamp, - std::vector>{ - response3_timestamp}, - {}, {}, 0, false, 0, false)}; - - mock_inference_profiler.all_request_records_ = { - request_record1, request_record2, request_record3}; - - const std::pair valid_range{std::make_pair(0, 4)}; - size_t valid_sequence_count{0}; - size_t delayed_request_count{0}; - std::vector valid_latencies{}; - size_t response_count{0}; - std::vector valid_requests{}; - - mock_inference_profiler.ValidLatencyMeasurement( - valid_range, valid_sequence_count, delayed_request_count, - &valid_latencies, response_count, valid_requests); - - CHECK(valid_requests.size() == 2); - CHECK(valid_requests[0].start_time_ == request1_timestamp); - CHECK(valid_requests[1].start_time_ == request2_timestamp); - } -} - -TEST_CASE( - "merge_perf_status_reports: testing the MergePerfStatusReports function") -{ - MockInferenceProfiler mock_inference_profiler{}; - - SUBCASE("testing logic relevant to response throughput metric") - { - PerfStatus perf_status1{}; - perf_status1.client_stats.response_count = 8; - perf_status1.client_stats.duration_ns = 2000000000; - - PerfStatus perf_status2{}; - perf_status2.client_stats.response_count = 10; - perf_status2.client_stats.duration_ns = 4000000000; - - std::deque perf_status{perf_status1, perf_status2}; - PerfStatus summary_status{}; - - cb::Error error{}; - - EXPECT_CALL( - mock_inference_profiler, MergeServerSideStats(testing::_, testing::_)) - .WillOnce(testing::Return(cb::Error::Success)); - EXPECT_CALL( - mock_inference_profiler, SummarizeLatency(testing::_, testing::_)) - .WillOnce(testing::Return(cb::Error::Success)); - - error = mock_inference_profiler.MergePerfStatusReports( - perf_status, summary_status); - - REQUIRE(error.IsOk() == true); - CHECK(summary_status.client_stats.response_count == 18); - CHECK( - summary_status.client_stats.responses_per_sec == doctest::Approx(3.0)); - } -} - -TEST_CASE("clamp window") -{ - TestInferenceProfiler tip{}; - std::vector reqs{}; - - auto clock_epoch{std::chrono::time_point()}; - - auto request1_timestamp{clock_epoch + std::chrono::nanoseconds(5)}; - auto response1_timestamp{clock_epoch + std::chrono::nanoseconds(20)}; - - reqs.emplace_back( - request1_timestamp, - std::vector>{ - response1_timestamp}); - - auto request2_timestamp{clock_epoch + std::chrono::nanoseconds(3)}; - auto response2_timestamp{clock_epoch + std::chrono::nanoseconds(15)}; - reqs.emplace_back( - request2_timestamp, - std::vector>{ - response2_timestamp}); - - auto request3_timestamp{clock_epoch + std::chrono::nanoseconds(7)}; - auto response3_timestamp{clock_epoch + std::chrono::nanoseconds(17)}; - reqs.emplace_back( - request3_timestamp, - std::vector>{ - response3_timestamp}); - - auto window = tip.ClampWindow(reqs); - - CHECK(window.first == 3); - CHECK(window.second == 20); -} - -TEST_CASE("summarize_client_stat: testing the SummarizeClientStat function") -{ - MockInferenceProfiler mock_inference_profiler{}; - - SUBCASE("testing logic relevant to response throughput metric") - { - mock_inference_profiler.parser_ = std::make_shared(); - mock_inference_profiler.manager_ = std::make_unique(); - - const cb::InferStat start_stat{}; - const cb::InferStat end_stat{}; - const uint64_t duration_ns{2000000000}; - const size_t valid_request_count{0}; - const size_t delayed_request_count{0}; - const size_t valid_sequence_count{0}; - const size_t response_count{8}; - PerfStatus summary{}; - - cb::Error error{}; - - error = mock_inference_profiler.SummarizeClientStat( - start_stat, end_stat, duration_ns, valid_request_count, - delayed_request_count, valid_sequence_count, response_count, summary); - - REQUIRE(error.IsOk() == true); - CHECK(summary.client_stats.response_count == 8); - CHECK(summary.client_stats.responses_per_sec == doctest::Approx(4.0)); - } -} -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_load_manager.cc b/src/c++/perf_analyzer/test_load_manager.cc deleted file mode 100644 index 3908374ed..000000000 --- a/src/c++/perf_analyzer/test_load_manager.cc +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "command_line_parser.h" -#include "doctest.h" -#include "load_manager.h" -#include "test_load_manager_base.h" - -namespace cb = triton::perfanalyzer::clientbackend; - -namespace triton { namespace perfanalyzer { - -namespace { - -bool -operator==(const RequestRecord& lhs, const RequestRecord& rhs) -{ - return std::tie( - lhs.start_time_, lhs.response_timestamps_, lhs.request_inputs_, - lhs.response_outputs_, lhs.sequence_end_, lhs.delayed_, - lhs.sequence_id_, lhs.has_null_last_response_) == - std::tie( - rhs.start_time_, rhs.response_timestamps_, rhs.request_inputs_, - rhs.response_outputs_, rhs.sequence_end_, rhs.delayed_, - rhs.sequence_id_, rhs.has_null_last_response_); -} - -} // namespace - -class TestLoadManager : public TestLoadManagerBase, public LoadManager { - public: - ~TestLoadManager() = default; - TestLoadManager( - PerfAnalyzerParameters params, bool is_sequence_model = false, - bool is_decoupled_model = false) - : TestLoadManagerBase(params, is_sequence_model, is_decoupled_model), - LoadManager( - params.async, params.streaming, params.batch_size, - params.max_threads, params.shared_memory_type, - params.output_shm_size, GetParser(), GetFactory(), - params.request_parameters) - { - } - - std::vector>& threads_stat_{ - LoadManager::threads_stat_}; - - /// Test the public function CheckHealth - /// - /// It will return a bad result if any of the thread stats - /// have a bad status or cb_status - /// - void TestCheckHealth() - { - auto good = std::make_shared(); - good->status_ = cb::Error::Success; - good->cb_status_ = cb::Error::Success; - - auto bad_status = std::make_shared(); - bad_status->status_ = cb::Error::Failure; - bad_status->cb_status_ = cb::Error::Success; - - auto bad_cb_status = std::make_shared(); - bad_cb_status->status_ = cb::Error::Success; - bad_cb_status->cb_status_ = cb::Error::Failure; - - threads_stat_.clear(); - bool expect_ok = true; - - SUBCASE("Empty") - { - expect_ok = true; - } - SUBCASE("Good") - { - // Good entries: expect OK - threads_stat_.push_back(good); - threads_stat_.push_back(good); - expect_ok = true; - } - SUBCASE("BadStatus") - { - // Bad Status: expect not OK - threads_stat_.push_back(good); - threads_stat_.push_back(bad_status); - expect_ok = false; - } - SUBCASE("BadCbStatus") - { - // Bad cb_Status: expect not OK - threads_stat_.push_back(bad_cb_status); - threads_stat_.push_back(good); - expect_ok = false; - } - SUBCASE("BadBothStatus") - { - threads_stat_.push_back(bad_status); - threads_stat_.push_back(good); - threads_stat_.push_back(bad_cb_status); - expect_ok = false; - } - - CHECK(CheckHealth().IsOk() == expect_ok); - } - - /// Test the public function SwapRequestRecords - /// - /// It will gather all request records from the thread_stats - /// and return them, and clear the thread_stats request records - /// - void TestSwapRequestRecords() - { - using time_point = std::chrono::time_point; - using ns = std::chrono::nanoseconds; - auto request_record1 = RequestRecord( - time_point(ns(1)), std::vector{time_point(ns(2))}, {}, {}, - 0, false, 0, false); - auto request_record2 = RequestRecord( - time_point(ns(3)), std::vector{time_point(ns(4))}, {}, {}, - 0, false, 0, false); - auto request_record3 = RequestRecord( - time_point(ns(5)), std::vector{time_point(ns(6))}, {}, {}, - 0, false, 0, false); - - std::vector source_request_records; - - SUBCASE("No threads") - { - auto ret = SwapRequestRecords(source_request_records); - CHECK(source_request_records.size() == 0); - CHECK(ret.IsOk() == true); - } - SUBCASE("Source has request records") - { - // Any request records in the vector passed in to SwapRequestRecords will - // be dropped on the floor - // - source_request_records.push_back(request_record1); - auto ret = SwapRequestRecords(source_request_records); - CHECK(source_request_records.size() == 0); - CHECK(ret.IsOk() == true); - } - SUBCASE("One thread") - { - auto stat1 = std::make_shared(); - stat1->request_records_.push_back(request_record1); - stat1->request_records_.push_back(request_record2); - stat1->request_records_.push_back(request_record3); - threads_stat_.push_back(stat1); - - CHECK(stat1->request_records_.size() == 3); - auto ret = SwapRequestRecords(source_request_records); - CHECK(stat1->request_records_.size() == 0); - - REQUIRE(source_request_records.size() == 3); - CHECK(source_request_records[0] == request_record1); - CHECK(source_request_records[1] == request_record2); - CHECK(source_request_records[2] == request_record3); - CHECK(ret.IsOk() == true); - } - SUBCASE("Multiple threads") - { - auto stat1 = std::make_shared(); - stat1->request_records_.push_back(request_record2); - - auto stat2 = std::make_shared(); - stat2->request_records_.push_back(request_record1); - stat2->request_records_.push_back(request_record3); - - threads_stat_.push_back(stat1); - threads_stat_.push_back(stat2); - - CHECK(stat1->request_records_.size() == 1); - CHECK(stat2->request_records_.size() == 2); - auto ret = SwapRequestRecords(source_request_records); - CHECK(stat1->request_records_.size() == 0); - CHECK(stat2->request_records_.size() == 0); - - REQUIRE(source_request_records.size() == 3); - CHECK(source_request_records[0] == request_record2); - CHECK(source_request_records[1] == request_record1); - CHECK(source_request_records[2] == request_record3); - CHECK(ret.IsOk() == true); - } - } - - /// Test the public function GetAccumulatedClientStat - /// - /// It will accumulate all contexts_stat data from all threads_stat - /// - void TestGetAccumulatedClientStat() - { - cb::InferStat result_stat; - - SUBCASE("No threads") - { - auto ret = GetAccumulatedClientStat(&result_stat); - CHECK(result_stat.completed_request_count == 0); - CHECK(result_stat.cumulative_total_request_time_ns == 0); - CHECK(result_stat.cumulative_send_time_ns == 0); - CHECK(result_stat.cumulative_receive_time_ns == 0); - CHECK(ret.IsOk() == true); - } - SUBCASE("One thread one context stat") - { - auto stat1 = std::make_shared(); - stat1->contexts_stat_.push_back(cb::InferStat()); - stat1->contexts_stat_[0].completed_request_count = 2; - stat1->contexts_stat_[0].cumulative_total_request_time_ns = 3; - stat1->contexts_stat_[0].cumulative_send_time_ns = 4; - stat1->contexts_stat_[0].cumulative_receive_time_ns = 5; - threads_stat_.push_back(stat1); - - auto ret = GetAccumulatedClientStat(&result_stat); - CHECK(result_stat.completed_request_count == 2); - CHECK(result_stat.cumulative_total_request_time_ns == 3); - CHECK(result_stat.cumulative_send_time_ns == 4); - CHECK(result_stat.cumulative_receive_time_ns == 5); - CHECK(ret.IsOk() == true); - } - SUBCASE("Multiple thread multiple contexts") - { - auto stat1 = std::make_shared(); - stat1->contexts_stat_.push_back(cb::InferStat()); - stat1->contexts_stat_.push_back(cb::InferStat()); - stat1->contexts_stat_[0].completed_request_count = 2; - stat1->contexts_stat_[0].cumulative_total_request_time_ns = 3; - stat1->contexts_stat_[0].cumulative_send_time_ns = 4; - stat1->contexts_stat_[0].cumulative_receive_time_ns = 5; - stat1->contexts_stat_[1].completed_request_count = 3; - stat1->contexts_stat_[1].cumulative_total_request_time_ns = 4; - stat1->contexts_stat_[1].cumulative_send_time_ns = 5; - stat1->contexts_stat_[1].cumulative_receive_time_ns = 6; - threads_stat_.push_back(stat1); - - auto stat2 = std::make_shared(); - stat2->contexts_stat_.push_back(cb::InferStat()); - stat2->contexts_stat_.push_back(cb::InferStat()); - stat2->contexts_stat_[0].completed_request_count = 7; - stat2->contexts_stat_[0].cumulative_total_request_time_ns = 8; - stat2->contexts_stat_[0].cumulative_send_time_ns = 9; - stat2->contexts_stat_[0].cumulative_receive_time_ns = 10; - stat2->contexts_stat_[1].completed_request_count = 11; - stat2->contexts_stat_[1].cumulative_total_request_time_ns = 12; - stat2->contexts_stat_[1].cumulative_send_time_ns = 13; - stat2->contexts_stat_[1].cumulative_receive_time_ns = 14; - threads_stat_.push_back(stat2); - - auto ret = GetAccumulatedClientStat(&result_stat); - // 2 + 3 + 7 + 11 - // - CHECK(result_stat.completed_request_count == 23); - // 3 + 4 + 8 + 12 - // - CHECK(result_stat.cumulative_total_request_time_ns == 27); - // 4 + 5 + 9 + 13 - // - CHECK(result_stat.cumulative_send_time_ns == 31); - // 5 + 6 + 10 + 14 - // - CHECK(result_stat.cumulative_receive_time_ns == 35); - - CHECK(ret.IsOk() == true); - } - } - - /// Test the public function CountCollectedRequests - /// - /// It will count all request records in the thread_stats (and not modify - /// the thread_stats in any way) - /// - void TestCountCollectedRequests() - { - using time_point = std::chrono::time_point; - using ns = std::chrono::nanoseconds; - auto request_record1 = RequestRecord( - time_point(ns(1)), std::vector{time_point(ns(2))}, {}, {}, - 0, false, 0, false); - auto request_record2 = RequestRecord( - time_point(ns(3)), std::vector{time_point(ns(4))}, {}, {}, - 0, false, 0, false); - auto request_record3 = RequestRecord( - time_point(ns(5)), std::vector{time_point(ns(6))}, {}, {}, - 0, false, 0, false); - - SUBCASE("No threads") - { - CHECK(CountCollectedRequests() == 0); - } - SUBCASE("One thread") - { - auto stat1 = std::make_shared(); - stat1->request_records_.push_back(request_record1); - stat1->request_records_.push_back(request_record2); - stat1->request_records_.push_back(request_record3); - threads_stat_.push_back(stat1); - - CHECK(stat1->request_records_.size() == 3); - CHECK(CountCollectedRequests() == 3); - CHECK(stat1->request_records_.size() == 3); - } - SUBCASE("Multiple threads") - { - auto stat1 = std::make_shared(); - stat1->request_records_.push_back(request_record2); - - auto stat2 = std::make_shared(); - stat2->request_records_.push_back(request_record1); - stat2->request_records_.push_back(request_record3); - - threads_stat_.push_back(stat1); - threads_stat_.push_back(stat2); - - CHECK(stat1->request_records_.size() == 1); - CHECK(stat2->request_records_.size() == 2); - CHECK(CountCollectedRequests() == 3); - CHECK(stat1->request_records_.size() == 1); - CHECK(stat2->request_records_.size() == 2); - } - } - - void TestIdle() - { - auto stat1 = std::make_shared(); - auto stat2 = std::make_shared(); - threads_stat_.push_back(stat1); - threads_stat_.push_back(stat2); - - SUBCASE("All active") - { - // If multiple threads are active, their idle times are averaged - stat1->idle_timer.idle_ns_ = 5; - stat2->idle_timer.idle_ns_ = 7; - CHECK(GetIdleTime() == 6); - ResetIdleTime(); - CHECK(GetIdleTime() == 0); - } - - SUBCASE("One inactive") - { - // If a thread has no idle time, it is considered inactive and not - // factored in to the average - stat1->idle_timer.idle_ns_ = 0; - stat2->idle_timer.idle_ns_ = 7; - CHECK(GetIdleTime() == 7); - ResetIdleTime(); - CHECK(GetIdleTime() == 0); - } - } -}; - -TEST_CASE("load_manager_check_health: Test the public function CheckHealth()") -{ - TestLoadManager tlm(PerfAnalyzerParameters{}); - tlm.TestCheckHealth(); -} - -TEST_CASE( - "load_manager_swap_request_records: Test the public function " - "SwapRequestRecords()") -{ - TestLoadManager tlm(PerfAnalyzerParameters{}); - tlm.TestSwapRequestRecords(); -} - -TEST_CASE( - "load_manager_get_accumulated_client_stat: Test the public function " - "GetAccumulatedClientStat()") -{ - TestLoadManager tlm(PerfAnalyzerParameters{}); - tlm.TestGetAccumulatedClientStat(); -} - -TEST_CASE( - "load_manager_count_collected_requests: Test the public function " - "CountCollectedRequests()") -{ - TestLoadManager tlm(PerfAnalyzerParameters{}); - tlm.TestCountCollectedRequests(); -} - -TEST_CASE("load_manager_batch_size: Test the public function BatchSize()") -{ - PerfAnalyzerParameters params; - - SUBCASE("batch size 0") - { - params.batch_size = 0; - } - SUBCASE("batch size 1") - { - params.batch_size = 1; - } - SUBCASE("batch size 4") - { - params.batch_size = 4; - } - - TestLoadManager tlm(params); - CHECK(tlm.BatchSize() == params.batch_size); -} - -TEST_CASE("load_manager: Test public idle time functions") -{ - PerfAnalyzerParameters params; - TestLoadManager tlm(params); - tlm.TestIdle(); -} - -TEST_CASE( - "send_request_rate_load_manager: testing the GetAndResetNumSentRequests " - "function") -{ - PerfAnalyzerParameters params{}; - - TestLoadManager tlm(params); - - std::shared_ptr thread_stat_1{std::make_shared()}; - std::shared_ptr thread_stat_2{std::make_shared()}; - - std::chrono::steady_clock::time_point start_time{ - std::chrono::steady_clock::time_point::min()}; - - thread_stat_1->num_sent_requests_ = 6; - thread_stat_2->num_sent_requests_ = 5; - - tlm.threads_stat_ = {thread_stat_1, thread_stat_2}; - - const size_t result{tlm.GetAndResetNumSentRequests()}; - - CHECK(result == 11); - CHECK(tlm.threads_stat_.size() == 2); - CHECK(tlm.threads_stat_[0]->num_sent_requests_ == 0); - CHECK(tlm.threads_stat_[1]->num_sent_requests_ == 0); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_load_manager_base.h b/src/c++/perf_analyzer/test_load_manager_base.h deleted file mode 100644 index 6bbdf6d23..000000000 --- a/src/c++/perf_analyzer/test_load_manager_base.h +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -#include -#include - -#include "command_line_parser.h" -#include "doctest.h" -#include "mock_client_backend.h" -#include "mock_data_loader.h" -#include "mock_model_parser.h" -#include "sequence_manager.h" - -namespace cb = triton::perfanalyzer::clientbackend; - -namespace triton { namespace perfanalyzer { - -// Struct to hold the mock pieces to ingest custom json data -struct MockInputPipeline { - MockInputPipeline( - std::shared_ptr mmp, std::shared_ptr mdl) - : mock_model_parser_(mmp), mock_data_loader_(mdl) - { - } - std::shared_ptr mock_model_parser_; - std::shared_ptr mock_data_loader_; -}; - -/// Helper base class to be inherited when testing any Load Manager class -/// -class TestLoadManagerBase { - public: - TestLoadManagerBase() = default; - TestLoadManagerBase( - PerfAnalyzerParameters params, bool is_sequence_model, - bool is_decoupled_model) - : params_(params) - { - stats_ = std::make_shared(); - factory_ = std::make_shared(stats_); - parser_ = std::make_shared( - is_sequence_model, is_decoupled_model); - } - - ~TestLoadManagerBase() - { - // Reset early_exit in case any test sets it to true during execution. - early_exit = false; - } - - // Helper function to process custom json data in testing - // Creates a model tensor to pass to a mock parser which is consumed by the - // mock data loader - static MockInputPipeline ProcessCustomJsonData( - const std::string& json_str, const bool is_sequence_model = false) - { - std::shared_ptr mmp{ - std::make_shared(is_sequence_model, false)}; - ModelTensor model_tensor{}; - model_tensor.datatype_ = "INT32"; - model_tensor.is_optional_ = false; - model_tensor.is_shape_tensor_ = false; - model_tensor.name_ = "INPUT0"; - model_tensor.shape_ = {1}; - mmp->inputs_ = std::make_shared(); - (*mmp->inputs_)[model_tensor.name_] = model_tensor; - - std::shared_ptr mdl{std::make_shared()}; - mdl->ReadDataFromStr(json_str, mmp->Inputs(), mmp->Outputs()); - return MockInputPipeline{mmp, mdl}; - } - - // Set up all combinations of parameters for sequence testing - // - static PerfAnalyzerParameters GetSequenceTestParams() - { - PerfAnalyzerParameters params; - bool is_async; - - SUBCASE("Async sequence") - { - is_async = true; - params = GetSequenceTestParamsHelper(is_async); - } - SUBCASE("Sync sequence") - { - is_async = false; - params = GetSequenceTestParamsHelper(is_async); - } - return params; - } - - void CheckInferType() - { - auto stats = GetStats(); - - if (params_.async) { - if (params_.streaming) { - CHECK(stats->num_infer_calls == 0); - CHECK(stats->num_async_infer_calls == 0); - CHECK(stats->num_async_stream_infer_calls > 0); - CHECK(stats->num_start_stream_calls > 0); - } else { - CHECK(stats->num_infer_calls == 0); - CHECK(stats->num_async_infer_calls > 0); - CHECK(stats->num_async_stream_infer_calls == 0); - CHECK(stats->num_start_stream_calls == 0); - } - } else { - if (params_.streaming) { - CHECK(stats->num_infer_calls > 0); - CHECK(stats->num_async_infer_calls == 0); - CHECK(stats->num_async_stream_infer_calls == 0); - CHECK(stats->num_start_stream_calls > 0); - } else { - CHECK(stats->num_infer_calls > 0); - CHECK(stats->num_async_infer_calls == 0); - CHECK(stats->num_async_stream_infer_calls == 0); - CHECK(stats->num_start_stream_calls == 0); - } - } - } - - - void CheckSharedMemory( - const cb::MockClientStats::SharedMemoryStats& expected_stats) - { - auto actual_stats = GetStats(); - CHECK(expected_stats == actual_stats->memory_stats); - } - - void CheckSequences(uint64_t expected_num_seq) - { - auto stats = GetStats(); - - // Make sure no live sequences remain - CHECK(stats->sequence_status.live_seq_ids_to_length.size() == 0); - - // Make sure all seq IDs are within range - // - for (auto seq_id : stats->sequence_status.used_seq_ids) { - CHECK(seq_id >= params_.start_sequence_id); - CHECK(seq_id <= params_.start_sequence_id + params_.sequence_id_range); - } - - // Make sure that we had the correct number of concurrently live sequences - // - // If the sequence length is only 1 then there is nothing to check because - // there are never any overlapping requests -- they always immediately exit - // - if (params_.sequence_length != 1) { - expected_num_seq = std::min(expected_num_seq, params_.sequence_id_range); - CHECK(expected_num_seq == stats->sequence_status.max_live_seq_count); - } - - // Make sure that the length of each sequence is as expected - // - // All but X of them should be within 20% (The code explicitly has a 20% - // slop) of the requested sequence length, where X is the number of - // sequences (This is due to the shutdown of sequences at the end that will - // create shorter than expected sequences) - // - auto num_values = stats->sequence_status.seq_lengths.size(); - auto max_len = params_.sequence_length * 1.2; - auto min_len = params_.sequence_length * 0.8; - auto num_allowed_to_be_below_min_len = expected_num_seq; - auto num_below_min_len = 0; - - for (size_t i = 0; i < num_values; i++) { - auto len = stats->sequence_status.seq_lengths[i]; - - CHECK(len <= max_len); - if (len < min_len) { - num_below_min_len++; - } - } - CHECK(num_below_min_len <= num_allowed_to_be_below_min_len); - } - - std::shared_ptr stats_; - - protected: - PerfAnalyzerParameters params_; - std::shared_ptr factory_; - std::shared_ptr parser_; - - const std::shared_ptr& GetParser() { return parser_; } - const std::shared_ptr& GetFactory() - { - return factory_; - } - std::shared_ptr GetStats() { return stats_; } - void ResetStats() { stats_->Reset(); } - - // Verifies that the number of inferences for each sequence is n or n+1. - // - void CheckSequenceBalance() - { - auto first_value = -1; - auto second_value = -1; - - for (auto seq : stats_->sequence_status.seq_ids_to_count) { - auto count = seq.second; - // set first possible value for seqs - if (first_value == -1) { - first_value = count; - continue; - } - // set second possible value for seqs count - if (second_value == -1) { - if (count == first_value + 1 || count == first_value - 1) { - second_value = count; - continue; - } else if (first_value == count) { - continue; - } - } - - if (count != first_value || count != second_value) { - std::stringstream os; - os << "Sequence request counts were not balanced: "; - for (auto x : stats_->sequence_status.seq_ids_to_count) { - os << x.second << ","; - } - CHECK_MESSAGE( - (count == first_value || count == second_value), os.str()); - break; - } - } - } - - static PerfAnalyzerParameters GetSequenceTestParamsHelper(bool is_async) - { - PerfAnalyzerParameters params; - - params.async = is_async; - - // Generally we want short sequences for testing - // so we can hit the corner cases more often - // - params.sequence_length = 4; - params.max_concurrency = 8; - params.max_threads = 8; - - SUBCASE("Normal") {} - SUBCASE("sequence IDs test 1") - { - params.start_sequence_id = 1; - params.sequence_id_range = 3; - } - SUBCASE("sequence IDs test 2") - { - params.start_sequence_id = 17; - params.sequence_id_range = 8; - } - SUBCASE("num_of_sequences 1") - { - params.num_of_sequences = 1; - } - SUBCASE("less threads than seq") - { - params.num_of_sequences = 12; - } - SUBCASE("num_of_sequences 8") - { - params.num_of_sequences = 8; - // Make sequences long so we actually get 8 in flight at a time - params.sequence_length = 20; - } - SUBCASE("sequence_length 1") - { - params.sequence_length = 1; - } - SUBCASE("sequence_length 10") - { - params.sequence_length = 10; - } - return params; - } -}; -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_metrics_manager.cc b/src/c++/perf_analyzer/test_metrics_manager.cc deleted file mode 100644 index b6fb1eb7b..000000000 --- a/src/c++/perf_analyzer/test_metrics_manager.cc +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include -#include - -#include "doctest.h" -#include "metrics_manager.h" - -namespace triton { namespace perfanalyzer { - -class TestMetricsManager : public MetricsManager { - public: - void CheckForMissingMetrics(const Metrics& metrics) - { - MetricsManager::CheckForMissingMetrics(metrics); - } - - void CheckForMetricIntervalTooShort( - const std::chrono::nanoseconds& remainder, - const std::chrono::nanoseconds& duration) - { - MetricsManager::CheckForMetricIntervalTooShort(remainder, duration); - } - - uint64_t& metrics_interval_ms_{MetricsManager::metrics_interval_ms_}; -}; - -TEST_CASE("testing the CheckForMissingMetrics function") -{ - TestMetricsManager tmm{}; - Metrics metrics{}; - std::stringstream captured_cerr; - std::streambuf* old_cerr{std::cerr.rdbuf(captured_cerr.rdbuf())}; - - // check that no warning gets printed when all metrics are present - metrics.gpu_utilization_per_gpu["gpu0"] = 0.5; - metrics.gpu_power_usage_per_gpu["gpu0"] = 50.0; - metrics.gpu_memory_used_bytes_per_gpu["gpu0"] = 1000; - metrics.gpu_memory_total_bytes_per_gpu["gpu0"] = 10000; - tmm.CheckForMissingMetrics(metrics); - CHECK(captured_cerr.str() == ""); - - // check that still no warning gets printed on a subsequent call - tmm.CheckForMissingMetrics(metrics); - CHECK(captured_cerr.str() == ""); - - // check that warning gets printed when missing metrics - metrics.gpu_utilization_per_gpu.clear(); - metrics.gpu_power_usage_per_gpu.clear(); - metrics.gpu_memory_used_bytes_per_gpu.clear(); - metrics.gpu_memory_total_bytes_per_gpu.clear(); - tmm.CheckForMissingMetrics(metrics); - CHECK( - captured_cerr.str() == - "WARNING: Unable to parse 'nv_gpu_utilization' metric.\n" - "WARNING: Unable to parse 'nv_gpu_power_usage' metric.\n" - "WARNING: Unable to parse 'nv_gpu_memory_used_bytes' metric.\n" - "WARNING: Unable to parse 'nv_gpu_memory_total_bytes' metric.\n"); - - // check that no additional warning gets printed on a subsequent call - tmm.CheckForMissingMetrics(metrics); - CHECK( - captured_cerr.str() == - "WARNING: Unable to parse 'nv_gpu_utilization' metric.\n" - "WARNING: Unable to parse 'nv_gpu_power_usage' metric.\n" - "WARNING: Unable to parse 'nv_gpu_memory_used_bytes' metric.\n" - "WARNING: Unable to parse 'nv_gpu_memory_total_bytes' metric.\n"); - - std::cerr.rdbuf(old_cerr); -} - -TEST_CASE("testing the CheckForMetricIntervalTooShort function") -{ - TestMetricsManager tmm{}; - tmm.metrics_interval_ms_ = 5; - std::chrono::nanoseconds remainder{}; - std::chrono::nanoseconds duration{}; - std::stringstream captured_cerr; - std::streambuf* old_cerr{std::cerr.rdbuf(captured_cerr.rdbuf())}; - - // check that no warning gets printed when interval is long enough - remainder = std::chrono::nanoseconds(2000000); - duration = std::chrono::nanoseconds(3000000); - tmm.CheckForMetricIntervalTooShort(remainder, duration); - CHECK(captured_cerr.str() == ""); - - // check that still no warning gets printed on a subsequent call - tmm.CheckForMetricIntervalTooShort(remainder, duration); - CHECK(captured_cerr.str() == ""); - - // check that warning gets printed when interval is too short - remainder = std::chrono::nanoseconds(-2000000); - duration = std::chrono::nanoseconds(7000000); - tmm.CheckForMetricIntervalTooShort(remainder, duration); - CHECK( - captured_cerr.str() == - "WARNING: Triton metrics endpoint latency (7ms) is larger than the " - "querying interval (5ms). Please try a larger querying interval via " - "`--triton-metrics-interval`.\n"); - - // check that no additional warning gets printed on a subsequent call - tmm.CheckForMetricIntervalTooShort(remainder, duration); - CHECK( - captured_cerr.str() == - "WARNING: Triton metrics endpoint latency (7ms) is larger than the " - "querying interval (5ms). Please try a larger querying interval via " - "`--triton-metrics-interval`.\n"); - - std::cerr.rdbuf(old_cerr); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_model_parser.cc b/src/c++/perf_analyzer/test_model_parser.cc deleted file mode 100644 index dabf8c9e2..000000000 --- a/src/c++/perf_analyzer/test_model_parser.cc +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include - -#include - -#include "client_backend/client_backend.h" -#include "constants.h" -#include "doctest.h" -#include "mock_client_backend.h" -#include "mock_model_parser.h" - -namespace cb = triton::perfanalyzer::clientbackend; - -namespace triton { namespace perfanalyzer { - -class TestModelParser { - public: - constexpr static const char* no_batching = - R"({ "name": "NoBatchingModel", "platform":"not_ensemble" })"; - - constexpr static const char* seq_batching = - R"({ "name": "SeqBatchingModel", "platform":"not_ensemble", "sequence_batching":{} })"; - - constexpr static const char* dyn_batching = - R"({ "name": "DynBatchingModel", "platform":"not_ensemble", "dynamic_batching":{} })"; - - constexpr static const char* ensemble = R"({ - "name": "EnsembleModel", - "platform": "ensemble", - "ensemble_scheduling": { - "step": [{ - "model_name": "ModelA", - "model_version": 2 - }, - { - "model_name": "ModelB", - "model_version": -1 - } - ] - } - })"; - - constexpr static const char* nested_ensemble = R"({ - "name": "ModelA", - "platform": "ensemble", - "ensemble_scheduling": { - "step": [{ - "model_name": "ModelC", - "model_version": -1 - }, - { - "model_name": "ModelD", - "model_version": -1 - } - ] - } - })"; - - static cb::Error SetJsonPtrNoSeq(rapidjson::Document* model_config) - { - model_config->Parse(no_batching); - return cb::Error::Success; - }; - - static cb::Error SetJsonPtrYesSeq(rapidjson::Document* model_config) - { - model_config->Parse(seq_batching); - return cb::Error::Success; - }; - - static cb::Error SetJsonPtrNestedEnsemble(rapidjson::Document* model_config) - { - model_config->Parse(nested_ensemble); - return cb::Error::Success; - }; -}; - -TEST_CASE("ModelParser: testing the GetInt function") -{ - int64_t integer_value{0}; - MockModelParser mmp; - - SUBCASE("valid string") - { - rapidjson::Value value("100"); - cb::Error result{mmp.GetInt(value, &integer_value)}; - CHECK(result.Err() == SUCCESS); - CHECK(integer_value == 100); - } - - SUBCASE("invalid string, alphabet") - { - rapidjson::Value value("abc"); - cb::Error result{mmp.GetInt(value, &integer_value)}; - CHECK(result.Err() == GENERIC_ERROR); - CHECK(result.Message() == "unable to convert 'abc' to integer"); - CHECK(integer_value == 0); - } - - SUBCASE("invalid string, number out of range") - { - rapidjson::Value value("9223372036854775808"); - cb::Error result{mmp.GetInt(value, &integer_value)}; - CHECK(result.Err() == GENERIC_ERROR); - CHECK( - result.Message() == - "unable to convert '9223372036854775808' to integer"); - CHECK(integer_value == 0); - } - - SUBCASE("valid int, lowest Int64") - { - rapidjson::Value value(2147483648); - cb::Error result{mmp.GetInt(value, &integer_value)}; - CHECK(result.Err() == SUCCESS); - CHECK(integer_value == 2147483648); - } - - SUBCASE("valid int, highest Int32") - { - rapidjson::Value value(2147483647); - cb::Error result{mmp.GetInt(value, &integer_value)}; - CHECK(result.Err() == SUCCESS); - CHECK(integer_value == 2147483647); - } - - SUBCASE("invalid floating point") - { - rapidjson::Value value(100.1); - cb::Error result{mmp.GetInt(value, &integer_value)}; - CHECK(result.Err() == GENERIC_ERROR); - CHECK(result.Message() == "failed to parse the integer value"); - CHECK(integer_value == 0); - } -} - -TEST_CASE( - "ModelParser: DetermineComposingModelMap" * - doctest::description( - "This test confirms that the composing model map will be correctly " - "populated by DetermineComposingModelMap()")) -{ - std::shared_ptr stats = - std::make_shared(); - std::unique_ptr mock_backend = - std::make_unique(stats); - - rapidjson::Document config; - std::vector input_bls_composing_models; - ComposingModelMap expected_composing_model_map; - - std::string parent_model_name; - - - const auto& ParameterizeListedComposingModels{[&]() { - SUBCASE("No listed composing models") {} - SUBCASE("Yes listed composing models") - { - input_bls_composing_models.push_back({"ListedModelA", ""}); - input_bls_composing_models.push_back({"ListedModelB", ""}); - expected_composing_model_map[parent_model_name].emplace( - "ListedModelA", ""); - expected_composing_model_map[parent_model_name].emplace( - "ListedModelB", ""); - } - EXPECT_CALL(*mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillRepeatedly(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)); - }}; - - SUBCASE("No Ensemble") - { - config.Parse(TestModelParser::no_batching); - parent_model_name = "NoBatchingModel"; - ParameterizeListedComposingModels(); - } - SUBCASE("Ensemble") - { - config.Parse(TestModelParser::ensemble); - parent_model_name = "EnsembleModel"; - ParameterizeListedComposingModels(); - - expected_composing_model_map["EnsembleModel"].emplace("ModelA", "2"); - expected_composing_model_map["EnsembleModel"].emplace("ModelB", ""); - EXPECT_CALL(*mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillRepeatedly(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)); - } - SUBCASE("Nested Ensemble") - { - config.Parse(TestModelParser::ensemble); - parent_model_name = "EnsembleModel"; - ParameterizeListedComposingModels(); - - expected_composing_model_map["EnsembleModel"].emplace("ModelA", "2"); - expected_composing_model_map["EnsembleModel"].emplace("ModelB", ""); - expected_composing_model_map["ModelA"].emplace("ModelC", ""); - expected_composing_model_map["ModelA"].emplace("ModelD", ""); - EXPECT_CALL(*mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillOnce( - testing::WithArg<0>(TestModelParser::SetJsonPtrNestedEnsemble)) - .WillRepeatedly(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)); - } - SUBCASE("BLS with an Ensemble") - { - config.Parse(TestModelParser::no_batching); - parent_model_name = "NoBatchingModel"; - - input_bls_composing_models.push_back({"ModelA", ""}); - input_bls_composing_models.push_back({"ModelB", ""}); - - expected_composing_model_map[parent_model_name].emplace("ModelA", ""); - expected_composing_model_map[parent_model_name].emplace("ModelB", ""); - expected_composing_model_map["ModelA"].emplace("ModelC", ""); - expected_composing_model_map["ModelA"].emplace("ModelD", ""); - EXPECT_CALL(*mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillOnce( - testing::WithArg<0>(TestModelParser::SetJsonPtrNestedEnsemble)) - .WillRepeatedly(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)); - } - - std::unique_ptr backend = std::move(mock_backend); - - MockModelParser mmp; - - mmp.DetermineComposingModelMap(input_bls_composing_models, config, backend); - - auto actual_composing_model_map = *mmp.GetComposingModelMap().get(); - CHECK(actual_composing_model_map == expected_composing_model_map); - - // Destruct gmock objects to determine gmock-related test failure - backend.reset(); -} - -TEST_CASE( - "ModelParser: determining scheduler type" * - doctest::description("This test confirms that scheduler_type_ will be set " - "correctly by DetermineSchedulerType()")) -{ - std::shared_ptr stats = - std::make_shared(); - std::unique_ptr mock_backend = - std::make_unique(stats); - - - rapidjson::Document config; - ModelParser::ModelSchedulerType expected_type; - - ComposingModelMap input_composing_model_map; - - - SUBCASE("No batching") - { - config.Parse(TestModelParser::no_batching); - expected_type = ModelParser::ModelSchedulerType::NONE; - } - SUBCASE("Sequence batching") - { - config.Parse(TestModelParser::seq_batching); - expected_type = ModelParser::ModelSchedulerType::SEQUENCE; - } - SUBCASE("Dynamic batching") - { - config.Parse(TestModelParser::dyn_batching); - expected_type = ModelParser::ModelSchedulerType::DYNAMIC; - } - SUBCASE("Ensemble") - { - config.Parse(TestModelParser::ensemble); - - input_composing_model_map["EnsembleModel"].emplace("ModelA", "2"); - input_composing_model_map["EnsembleModel"].emplace("ModelB", ""); - - SUBCASE("no sequences") - { - EXPECT_CALL( - *mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)); - - expected_type = ModelParser::ModelSchedulerType::ENSEMBLE; - } - SUBCASE("yes sequences") - { - EXPECT_CALL( - *mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrYesSeq)); - - expected_type = ModelParser::ModelSchedulerType::ENSEMBLE_SEQUENCE; - } - } - SUBCASE("Nested Ensemble") - { - config.Parse(TestModelParser::ensemble); - - input_composing_model_map["EnsembleModel"].emplace("ModelA", "2"); - input_composing_model_map["EnsembleModel"].emplace("ModelB", ""); - input_composing_model_map["ModelA"].emplace("ModelC", ""); - input_composing_model_map["ModelA"].emplace("ModelD", ""); - - SUBCASE("no sequences") - { - EXPECT_CALL( - *mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillOnce( - testing::WithArg<0>(TestModelParser::SetJsonPtrNestedEnsemble)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)); - - expected_type = ModelParser::ModelSchedulerType::ENSEMBLE; - } - SUBCASE("yes sequences") - { - EXPECT_CALL( - *mock_backend, ModelConfig(testing::_, testing::_, testing::_)) - .WillOnce( - testing::WithArg<0>(TestModelParser::SetJsonPtrNestedEnsemble)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrYesSeq)) - .WillOnce(testing::WithArg<0>(TestModelParser::SetJsonPtrNoSeq)); - - expected_type = ModelParser::ModelSchedulerType::ENSEMBLE_SEQUENCE; - } - } - - std::unique_ptr backend = std::move(mock_backend); - - MockModelParser mmp; - mmp.composing_models_map_ = - std::make_shared(input_composing_model_map); - mmp.DetermineSchedulerType(config, backend); - - auto actual_type = mmp.SchedulerType(); - CHECK(actual_type == expected_type); - - // Destruct gmock objects to determine gmock-related test failure - backend.reset(); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_perf_utils.cc b/src/c++/perf_analyzer/test_perf_utils.cc deleted file mode 100644 index 74bf6afb4..000000000 --- a/src/c++/perf_analyzer/test_perf_utils.cc +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include - -#include -#include - -#include "doctest.h" -#include "perf_utils.h" -#include "test_utils.h" - -namespace triton { namespace perfanalyzer { - -/// Helper class to test perf_utils.cc -/// -class TestPerfUtils { - public: - /// Given a distributionType and request rate, confirm that request pattern - /// matches what is expected. - /// - static void TestDistribution( - Distribution distribution_type, uint32_t request_rate) - { - std::mt19937 schedule_rng; - std::vector delays; - - double avg, variance; - double expected_avg, expected_variance; - - auto dist_func = GetDistributionFunction(distribution_type, request_rate); - - for (int i = 0; i < 100000; i++) { - auto delay = dist_func(schedule_rng); - delays.push_back(delay.count()); - } - - avg = CalculateAverage(delays); - variance = CalculateVariance(delays, avg); - - std::chrono::nanoseconds ns_in_one_second = - std::chrono::duration_cast( - std::chrono::seconds(1)); - expected_avg = ns_in_one_second.count() / request_rate; - - if (distribution_type == CONSTANT) { - expected_variance = 0; - } else { - // By definition, variance = mean for poisson - expected_variance = expected_avg; - } - - CHECK(avg == doctest::Approx(expected_avg).epsilon(0.005)); - CHECK(variance == doctest::Approx(expected_variance).epsilon(0.005)); - } - - - private: - static std::function - GetDistributionFunction(Distribution type, uint32_t request_rate) - { - std::function distributionFunction; - - if (type == CONSTANT) { - distributionFunction = ScheduleDistribution(request_rate); - } else if (type == POISSON) { - distributionFunction = ScheduleDistribution(request_rate); - } else { - throw std::invalid_argument("Unexpected distribution type"); - } - return distributionFunction; - } -}; - -/// Test all distributions across various request rates -/// -TEST_CASE("perf_utils: TestDistribution") -{ - std::vector distTypes{CONSTANT, POISSON}; - std::vector requestRates{10, 100, 1000, 10000}; - - for (auto dist : distTypes) { - for (auto rate : requestRates) { - TestPerfUtils::TestDistribution(dist, rate); - } - } -} - -TEST_CASE("perf_utils: ParseTensorFormat") -{ - CHECK(ParseTensorFormat("binary") == cb::TensorFormat::BINARY); - CHECK(ParseTensorFormat("BINARY") == cb::TensorFormat::BINARY); - CHECK(ParseTensorFormat("json") == cb::TensorFormat::JSON); - CHECK(ParseTensorFormat("JSON") == cb::TensorFormat::JSON); - CHECK(ParseTensorFormat("abc") == cb::TensorFormat::UNKNOWN); - CHECK(ParseTensorFormat("") == cb::TensorFormat::UNKNOWN); -} - -TEST_CASE("perf_utils: ParseProtocol") -{ - CHECK(ParseProtocol("HTTP") == cb::ProtocolType::HTTP); - CHECK(ParseProtocol("http") == cb::ProtocolType::HTTP); - CHECK(ParseProtocol("GRPC") == cb::ProtocolType::GRPC); - CHECK(ParseProtocol("grpc") == cb::ProtocolType::GRPC); - CHECK(ParseProtocol("hhtp") == cb::ProtocolType::UNKNOWN); - CHECK(ParseProtocol("") == cb::ProtocolType::UNKNOWN); - CHECK(ParseProtocol("http2") == cb::ProtocolType::UNKNOWN); -} - -TEST_CASE("perf_utils: ConvertDTypeFromTFS") -{ - std::string datatype; - cb::Error status; - - SUBCASE("Check for correct conversion") - { - std::vector> tf_to_datatype{ - std::make_pair("DT_HALF", "FP16"), - std::make_pair("DT_BFLOAT16", "BF16"), - std::make_pair("DT_FLOAT", "FP32"), - std::make_pair("DT_DOUBLE", "FP64"), - std::make_pair("DT_INT32", "INT32"), - std::make_pair("DT_INT16", "INT16"), - std::make_pair("DT_UINT16", "UINT16"), - std::make_pair("DT_INT8", "INT8"), - std::make_pair("DT_UINT8", "UINT8"), - std::make_pair("DT_STRING", "BYTES"), - std::make_pair("DT_INT64", "INT64"), - std::make_pair("DT_BOOL", "BOOL"), - std::make_pair("DT_UINT32", "UINT32"), - std::make_pair("DT_UINT64", "UINT64")}; - - for (const auto& type_pair : tf_to_datatype) { - status = ConvertDTypeFromTFS(type_pair.first, &datatype); - CHECK(status.IsOk()); - CHECK(datatype == type_pair.second); - } - } - - SUBCASE("Invalid tensorflow datatype") - { - status = ConvertDTypeFromTFS("dt_bool", &datatype); - CHECK(!status.IsOk()); - CHECK(datatype == ""); - - status = ConvertDTypeFromTFS("dt_uint8", &datatype); - CHECK(!status.IsOk()); - CHECK(datatype == ""); - - status = ConvertDTypeFromTFS("abcdef", &datatype); - CHECK(!status.IsOk()); - CHECK(datatype == ""); - - status = ConvertDTypeFromTFS("", &datatype); - CHECK(!status.IsOk()); - CHECK(datatype == ""); - } -} - -TEST_CASE("perf_utils: IsDirectory") -{ - // Create a temporary directory /tmp/abcdef1234 - int status; - std::string temp_path{"/tmp/abcdef1234"}; - - CHECK(!IsDirectory(temp_path)); - - status = mkdir(temp_path.c_str(), S_IRWXU | S_IROTH | S_IXOTH); - REQUIRE(status == 0); - CHECK(IsDirectory(temp_path)); - - status = rmdir(temp_path.c_str()); - REQUIRE(status == 0); - CHECK(!IsDirectory(temp_path)); -} - -TEST_CASE("perf_utils: IsFile") -{ - // Create a temporary file /tmp/test.txt - int status; - std::string temp_path{"/tmp/test.txt"}; - - CHECK(!IsFile(temp_path)); - - std::ofstream file(temp_path); - CHECK(IsFile(temp_path)); - - std::remove(temp_path.c_str()); - CHECK(!IsFile(temp_path)); -} - -TEST_CASE("perf_utils: ByteSize") -{ - std::vector shape{3, 4, 5}; - constexpr int num_elements = 3 * 4 * 5; - - SUBCASE("Single byte elements") - { - CHECK(ByteSize(shape, "BOOL") == 1 * num_elements); - CHECK(ByteSize(shape, "INT8") == 1 * num_elements); - CHECK(ByteSize(shape, "UINT8") == 1 * num_elements); - } - - SUBCASE("2 byte elements") - { - CHECK(ByteSize(shape, "INT16") == 2 * num_elements); - CHECK(ByteSize(shape, "UINT16") == 2 * num_elements); - CHECK(ByteSize(shape, "FP16") == 2 * num_elements); - CHECK(ByteSize(shape, "BF16") == 2 * num_elements); - } - - SUBCASE("4 byte elements") - { - CHECK(ByteSize(shape, "INT32") == 4 * num_elements); - CHECK(ByteSize(shape, "UINT32") == 4 * num_elements); - CHECK(ByteSize(shape, "FP32") == 4 * num_elements); - } - - SUBCASE("8 byte elements") - { - CHECK(ByteSize(shape, "INT64") == 8 * num_elements); - CHECK(ByteSize(shape, "UINT64") == 8 * num_elements); - CHECK(ByteSize(shape, "FP64") == 8 * num_elements); - } - - SUBCASE("Dynamic shape tensor") - { - shape.insert(shape.begin(), -1); - - CHECK(ByteSize(shape, "BOOL") == -1); - CHECK(ByteSize(shape, "INT8") == -1); - CHECK(ByteSize(shape, "UINT8") == -1); - - CHECK(ByteSize(shape, "INT16") == -1); - CHECK(ByteSize(shape, "UINT16") == -1); - CHECK(ByteSize(shape, "FP16") == -1); - CHECK(ByteSize(shape, "BF16") == -1); - - CHECK(ByteSize(shape, "INT32") == -1); - CHECK(ByteSize(shape, "UINT32") == -1); - CHECK(ByteSize(shape, "FP32") == -1); - - CHECK(ByteSize(shape, "INT64") == -1); - CHECK(ByteSize(shape, "UINT64") == -1); - CHECK(ByteSize(shape, "FP64") == -1); - } - - SUBCASE("Unknown data types") - { - CHECK(ByteSize(shape, "bool") == -1); - CHECK(ByteSize(shape, "int8") == -1); - CHECK(ByteSize(shape, "uint8") == -1); - - CHECK(ByteSize(shape, "int16") == -1); - CHECK(ByteSize(shape, "uint16") == -1); - CHECK(ByteSize(shape, "fp16") == -1); - CHECK(ByteSize(shape, "bf16") == -1); - - CHECK(ByteSize(shape, "int32") == -1); - CHECK(ByteSize(shape, "uint32") == -1); - CHECK(ByteSize(shape, "fp32") == -1); - - CHECK(ByteSize(shape, "int64") == -1); - CHECK(ByteSize(shape, "uint64") == -1); - CHECK(ByteSize(shape, "fp64") == -1); - - CHECK(ByteSize(shape, "abc") == -1); - CHECK(ByteSize(shape, "1234") == -1); - CHECK(ByteSize(shape, "") == -1); - } -} - -TEST_CASE("perf_utils: ElementCount") -{ - std::vector shape{3, 4, 5}; - constexpr int num_elements = 3 * 4 * 5; - - SUBCASE("Static tensor shape") - { - CHECK(ElementCount(shape) == num_elements); - - shape.push_back(1); - CHECK(ElementCount(shape) == num_elements * 1); - - shape.push_back(300); - CHECK(ElementCount(shape) == num_elements * 1 * 300); - } - - SUBCASE("Dynamic tensor shape") - { - CHECK(ElementCount(shape) == num_elements); - - shape.push_back(-1); - CHECK(ElementCount(shape) == -1); - - shape.pop_back(); - shape.insert(shape.begin(), -1); - CHECK(ElementCount(shape) == -1); - } -} - -TEST_CASE("perf_utils: ShapeVecToString") -{ - std::vector shape{3, 4, 5}; - - SUBCASE("No skipping first dim") - { - CHECK(ShapeVecToString(shape, false) == "[3,4,5]"); - - shape.push_back(10); - CHECK(ShapeVecToString(shape, false) == "[3,4,5,10]"); - - shape.push_back(-1); - CHECK(ShapeVecToString(shape, false) == "[3,4,5,10,-1]"); - - shape.pop_back(); - shape.insert(shape.begin(), -1); - CHECK(ShapeVecToString(shape, false) == "[-1,3,4,5,10]"); - - shape.clear(); - CHECK(ShapeVecToString(shape, false) == "[]"); - } - - SUBCASE("Skipping first dim") - { - CHECK(ShapeVecToString(shape, true) == "[4,5]"); - - shape.push_back(-1); - CHECK(ShapeVecToString(shape, true) == "[4,5,-1]"); - - shape.pop_back(); - shape.insert(shape.begin(), -1); - CHECK(ShapeVecToString(shape, true) == "[3,4,5]"); - - shape.clear(); - CHECK(ShapeVecToString(shape, true) == "[]"); - } -} - -TEST_CASE("perf_utils: TensorToRegionName") -{ - CHECK(TensorToRegionName("name/with/slash") == "namewithslash"); - CHECK(TensorToRegionName("name//with//slash") == "namewithslash"); - CHECK(TensorToRegionName("name\\with\\backslash") == "namewithbackslash"); - CHECK(TensorToRegionName("name\\\\with\\\\backslash") == "namewithbackslash"); - CHECK(TensorToRegionName("name_without_slash") == "name_without_slash"); - CHECK(TensorToRegionName("abc123!@#") == "abc123!@#"); - CHECK(TensorToRegionName("") == ""); -} - - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_profile_data_collector.cc b/src/c++/perf_analyzer/test_profile_data_collector.cc deleted file mode 100644 index 926a90151..000000000 --- a/src/c++/perf_analyzer/test_profile_data_collector.cc +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS"" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "doctest.h" -#include "mock_profile_data_collector.h" -#include "profile_data_collector.h" - -namespace triton { namespace perfanalyzer { - -TEST_CASE("profile_data_collector: FindExperiment") -{ - MockProfileDataCollector collector{}; - InferenceLoadMode infer_mode1{10, 20.0}; - - std::vector::iterator it; - it = collector.FindExperiment(infer_mode1); - CHECK(it == collector.experiments_.end()); - - std::vector request_records{RequestRecord{}}; - collector.AddData(infer_mode1, std::move(request_records)); - - it = collector.FindExperiment(infer_mode1); - CHECK(it != collector.experiments_.end()); - CHECK((*it).mode == infer_mode1); - - InferenceLoadMode infer_mode2{123, 0.0}; - it = collector.FindExperiment(infer_mode2); - CHECK(it == collector.experiments_.end()); -} - -TEST_CASE("profile_data_collector: AddData") -{ - using std::chrono::nanoseconds; - using std::chrono::system_clock; - using std::chrono::time_point; - - MockProfileDataCollector collector{}; - InferenceLoadMode infer_mode{10, 20.0}; - - // Add RequestRecords - auto clock_epoch{time_point()}; - - uint64_t sequence_id1{123}; - auto request1_timestamp{clock_epoch + nanoseconds(1)}; - auto request1_response1_timestamp{clock_epoch + nanoseconds(2)}; - auto request1_response2_timestamp{clock_epoch + nanoseconds(3)}; - uint8_t fake_data_in[] = {0x01, 0x02, 0x03, 0x04}; - uint8_t fake_data_out[] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}; - RequestRecord::RequestInput request1_request_input{ - {"key1", RecordData(fake_data_in, 1)}, - {"key2", RecordData(fake_data_in, 2)}}; - RequestRecord::ResponseOutput request1_response1_output{ - {"key1", RecordData(fake_data_out, 1)}, - {"key2", RecordData(fake_data_out, 2)}}; - RequestRecord::ResponseOutput request1_response2_output{ - {"key3", RecordData(fake_data_out, 3)}, - {"key4", RecordData(fake_data_out, 4)}}; - - RequestRecord request_record1{ - request1_timestamp, - std::vector>{ - request1_response1_timestamp, request1_response2_timestamp}, - {request1_request_input}, - {request1_response1_output, request1_response2_output}, - 0, - false, - sequence_id1, - false}; - - uint64_t sequence_id2{456}; - auto request2_timestamp{clock_epoch + nanoseconds(4)}; - auto request2_response1_timestamp{clock_epoch + nanoseconds(5)}; - auto request2_response2_timestamp{clock_epoch + nanoseconds(6)}; - RequestRecord::RequestInput request2_request_input{ - {"key3", RecordData(fake_data_in, 3)}, - {"key4", RecordData(fake_data_in, 4)}}; - RequestRecord::ResponseOutput request2_response1_output{ - {"key5", RecordData(fake_data_out, 5)}, - {"key6", RecordData(fake_data_out, 6)}}; - RequestRecord::ResponseOutput request2_response2_output{ - {"key7", RecordData(fake_data_out, 7)}, - {"key8", RecordData(fake_data_out, 8)}}; - - RequestRecord request_record2{ - request2_timestamp, - std::vector>{ - request2_response1_timestamp, request2_response2_timestamp}, - {request2_request_input}, - {request2_response1_output, request2_response2_output}, - 0, - false, - sequence_id2, - false}; - - std::vector request_records{request_record1, request_record2}; - collector.AddData(infer_mode, std::move(request_records)); - - CHECK(!collector.experiments_.empty()); - - std::vector rr{collector.experiments_[0].requests}; - CHECK(rr[0].sequence_id_ == sequence_id1); - CHECK(rr[0].start_time_ == request1_timestamp); - CHECK(rr[0].request_inputs_[0] == request1_request_input); - CHECK(rr[0].response_timestamps_[0] == request1_response1_timestamp); - CHECK(rr[0].response_timestamps_[1] == request1_response2_timestamp); - CHECK(rr[0].response_outputs_[0] == request1_response1_output); - CHECK(rr[0].response_outputs_[1] == request1_response2_output); - CHECK(rr[1].sequence_id_ == sequence_id2); - CHECK(rr[1].start_time_ == request2_timestamp); - CHECK(rr[1].request_inputs_[0] == request2_request_input); - CHECK(rr[1].response_timestamps_[0] == request2_response1_timestamp); - CHECK(rr[1].response_timestamps_[1] == request2_response2_timestamp); - CHECK(rr[1].response_outputs_[0] == request2_response1_output); - CHECK(rr[1].response_outputs_[1] == request2_response2_output); -} - -TEST_CASE("profile_data_collector: AddWindow") -{ - MockProfileDataCollector collector{}; - InferenceLoadMode infer_mode{10, 20.0}; - - uint64_t window_start1{123}; - uint64_t window_end1{456}; - collector.AddWindow(infer_mode, window_start1, window_end1); - - CHECK(!collector.experiments_.empty()); - CHECK(collector.experiments_[0].window_boundaries[0] == window_start1); - CHECK(collector.experiments_[0].window_boundaries[1] == window_end1); - - uint64_t window_start2{678}; - uint64_t window_end2{912}; - collector.AddWindow(infer_mode, window_start2, window_end2); - - CHECK(collector.experiments_[0].window_boundaries[2] == window_start2); - CHECK(collector.experiments_[0].window_boundaries[3] == window_end2); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_profile_data_exporter.cc b/src/c++/perf_analyzer/test_profile_data_exporter.cc deleted file mode 100644 index ffd958c5c..000000000 --- a/src/c++/perf_analyzer/test_profile_data_exporter.cc +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS"" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "doctest.h" -#include "mock_profile_data_exporter.h" -#include "profile_data_exporter.h" - -namespace triton { namespace perfanalyzer { - -TEST_CASE("profile_data_exporter: ConvertToJson") -{ - using std::chrono::nanoseconds; - using std::chrono::system_clock; - using std::chrono::time_point; - - MockProfileDataExporter exporter{}; - - InferenceLoadMode infer_mode{4, 0.0}; - uint64_t sequence_id{1}; - - auto clock_epoch{time_point()}; - auto request_timestamp{clock_epoch + nanoseconds(1)}; - auto response_timestamp1{clock_epoch + nanoseconds(2)}; - auto response_timestamp2{clock_epoch + nanoseconds(3)}; - - // Request inputs - const std::string in_buf1{"abc123"}; - const int32_t in_buf2{456}; - const bool in_buf3{true}; - const std::string in_buf4{"{\"abc\":\"def\"}"}; - - RequestRecord::RequestInput request_input{ - {"in_key1", - {reinterpret_cast(in_buf1.data()), in_buf1.size(), - "BYTES"}}, - {"in_key2", - {reinterpret_cast(&in_buf2), sizeof(in_buf2), "INT32"}}, - {"in_key3", - {reinterpret_cast(&in_buf3), sizeof(in_buf3), "BOOL"}}, - {"in_key4", - {reinterpret_cast(in_buf4.data()), sizeof(in_buf4), - "JSON"}}, - }; - - // Response outputs - std::vector out_bufs{"abc", "def", "ghi", "jkl"}; - RequestRecord::ResponseOutput response_output1{ - {"out_key1", - {reinterpret_cast(out_bufs[0].data()), - out_bufs[0].size()}}, - {"out_key2", - {reinterpret_cast(out_bufs[1].data()), - out_bufs[1].size()}}}; - RequestRecord::ResponseOutput response_output2{ - {"out_key3", - {reinterpret_cast(out_bufs[2].data()), - out_bufs[2].size()}}, - {"out_key4", - {reinterpret_cast(out_bufs[3].data()), - out_bufs[3].size()}}}; - - RequestRecord request_record{ - request_timestamp, - std::vector>{ - response_timestamp1, response_timestamp2}, - {request_input}, - {response_output1, response_output2}, - 0, - false, - sequence_id, - false}; - std::vector requests{request_record}; - std::vector window_boundaries{1, 5, 6}; - - Experiment experiment; - experiment.mode = infer_mode; - experiment.requests = requests; - experiment.window_boundaries = window_boundaries; - std::vector experiments{experiment}; - - std::string version{"1.2.3"}; - cb::BackendKind service_kind = cb::BackendKind::TRITON; - std::string endpoint{""}; - - exporter.ConvertToJson(experiments, version, service_kind, endpoint); - - std::string json{R"( - { - "experiments" : [ - { - "experiment" : { - "mode" : "concurrency", - "value" : 4 - }, - "requests" : [ - { - "timestamp" : 1, - "sequence_id" : 1, - "request_inputs" : {"in_key1":"abc123","in_key2":456,"in_key3":true,"in_key4":"{\"abc\":\"def\"}"}, - "response_timestamps" : [ 2, 3 ], - "response_outputs" : [ {"out_key1":"abc","out_key2":"def"}, {"out_key3":"ghi","out_key4":"jkl"} ] - } - ], - "window_boundaries" : [ 1, 5, 6 ] - } - ], - "version" : "1.2.3", - "service_kind": "triton", - "endpoint": "" - } - )"}; - - rapidjson::Document expected_document; - expected_document.Parse(json.c_str()); - - // FIXME (TMA-1339): Look into the testing the order of things in the json - const rapidjson::Value& expected_experiment{ - expected_document["experiments"][0]["experiment"]}; - const rapidjson::Value& expected_request{ - expected_document["experiments"][0]["requests"][0]}; - const rapidjson::Value& expected_windows{ - expected_document["experiments"][0]["window_boundaries"]}; - const rapidjson::Value& expected_version{expected_document["version"]}; - - const rapidjson::Value& actual_experiment{ - exporter.document_["experiments"][0]["experiment"]}; - const rapidjson::Value& actual_request{ - exporter.document_["experiments"][0]["requests"][0]}; - const rapidjson::Value& actual_windows{ - exporter.document_["experiments"][0]["window_boundaries"]}; - const rapidjson::Value& actual_version{exporter.document_["version"]}; - - CHECK(actual_experiment["mode"] == expected_experiment["mode"]); - CHECK(actual_experiment["value"] == expected_experiment["value"]); - - CHECK(actual_request["timestamp"] == expected_request["timestamp"]); - CHECK(actual_request["sequence_id"] == expected_request["sequence_id"]); - - CHECK( - actual_request["request_inputs"]["in_key1"] == - expected_request["request_inputs"]["in_key1"]); - CHECK( - actual_request["request_inputs"]["in_key2"] == - expected_request["request_inputs"]["in_key2"]); - CHECK( - actual_request["request_inputs"]["in_key3"] == - expected_request["request_inputs"]["in_key3"]); - auto act_inkey_4 = actual_request["request_inputs"]["in_key4"].GetString(); - auto exp_inkey_4 = expected_request["request_inputs"]["in_key4"].GetString(); - CHECK(std::string{act_inkey_4} == std::string{exp_inkey_4}); - - CHECK( - actual_request["response_timestamps"][0] == - expected_request["response_timestamps"][0]); - CHECK( - actual_request["response_timestamps"][1] == - expected_request["response_timestamps"][1]); - CHECK( - actual_request["response_outputs"][0] == - expected_request["response_outputs"][0]); - CHECK( - actual_request["response_outputs"][1] == - expected_request["response_outputs"][1]); - - CHECK(actual_windows[0] == expected_windows[0]); - CHECK(actual_windows[1] == expected_windows[1]); - CHECK(actual_windows[2] == expected_windows[2]); - - CHECK(actual_version == expected_version); -} - -TEST_CASE("profile_data_exporter: AddExperiment") -{ - MockProfileDataExporter exporter{}; - - Experiment raw_experiment; - rapidjson::Value entry(rapidjson::kObjectType); - rapidjson::Value experiment(rapidjson::kObjectType); - - SUBCASE("Concurrency mode") - { - InferenceLoadMode infer_mode{15, 0.0}; - raw_experiment.mode = infer_mode; - - exporter.AddExperiment(entry, experiment, raw_experiment); - CHECK(entry.HasMember("experiment")); - CHECK(entry["experiment"]["mode"] == "concurrency"); - CHECK(entry["experiment"]["value"] == 15); - } - - SUBCASE("Request rate mode") - { - InferenceLoadMode infer_mode{0, 23.5}; - raw_experiment.mode = infer_mode; - - exporter.AddExperiment(entry, experiment, raw_experiment); - CHECK(entry.HasMember("experiment")); - CHECK(entry["experiment"]["mode"] == "request_rate"); - CHECK(entry["experiment"]["value"] == 23.5); - } -} - -TEST_CASE("profile_data_exporter: OutputToFile") -{ - MockProfileDataExporter exporter{}; - std::string file_path; - - SUBCASE("Empty file path") - { - file_path = ""; - CHECK_THROWS_WITH_AS( - exporter.OutputToFile(file_path), - "failed to open file for outputting raw profile data", - PerfAnalyzerException); - } - - SUBCASE("With file path") - { - file_path = "/tmp/test-" + GetRandomString(4) + ".json"; - CHECK_NOTHROW(exporter.OutputToFile(file_path)); - CHECK(IsFile(file_path)); - - std::remove(file_path.c_str()); - CHECK(!IsFile(file_path)); - } -} - -TEST_CASE("profile_data_exporter: AddServiceKind") -{ - MockProfileDataExporter exporter{}; - exporter.ClearDocument(); - - cb::BackendKind service_kind; - std::string json{""}; - - SUBCASE("Backend kind: TRITON") - { - service_kind = cb::BackendKind::TRITON; - json = R"({ "service_kind": "triton" })"; - } - - SUBCASE("Backend kind: TENSORFLOW_SERVING") - { - service_kind = cb::BackendKind::TENSORFLOW_SERVING; - json = R"({ "service_kind": "tfserving" })"; - } - - SUBCASE("Backend kind: TORCHSERVE") - { - service_kind = cb::BackendKind::TORCHSERVE; - json = R"({ "service_kind": "torchserve" })"; - } - - SUBCASE("Backend kind: TRITON_C_API") - { - service_kind = cb::BackendKind::TRITON_C_API; - json = R"({ "service_kind": "triton_c_api" })"; - } - - SUBCASE("Backend kind: OPENAI") - { - service_kind = cb::BackendKind::OPENAI; - json = R"({ "service_kind": "openai" })"; - } - - exporter.AddServiceKind(service_kind); - rapidjson::Document expected_document; - expected_document.Parse(json.c_str()); - - const rapidjson::Value& expected_kind{expected_document["service_kind"]}; - const rapidjson::Value& actual_kind{exporter.document_["service_kind"]}; - CHECK(actual_kind == expected_kind); -} - -TEST_CASE("profile_data_exporter: AddEndpoint") -{ - MockProfileDataExporter exporter{}; - exporter.ClearDocument(); - - std::string endpoint{""}; - std::string json{""}; - - SUBCASE("Endpoint: OpenAI Chat Completions") - { - endpoint = "v1/chat/completions"; - json = R"({ "endpoint": "v1/chat/completions" })"; - } - - SUBCASE("Endpoint: OpenAI Completions") - { - endpoint = "v1/completions"; - json = R"({ "endpoint": "v1/completions" })"; - } - - exporter.AddEndpoint(endpoint); - rapidjson::Document expected_document; - expected_document.Parse(json.c_str()); - - const rapidjson::Value& expected_endpoint{expected_document["endpoint"]}; - const rapidjson::Value& actual_endpoint{exporter.document_["endpoint"]}; - CHECK(actual_endpoint == expected_endpoint); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_report_writer.cc b/src/c++/perf_analyzer/test_report_writer.cc deleted file mode 100644 index 5d341c30a..000000000 --- a/src/c++/perf_analyzer/test_report_writer.cc +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS"" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include - -#include "doctest.h" -#include "report_writer.h" - -namespace triton { namespace perfanalyzer { - -class TestReportWriter : ReportWriter { - public: - void WriteGpuMetrics(std::ostream& ofs, const Metrics& metrics) - { - ReportWriter::WriteGpuMetrics(ofs, metrics); - } -}; - -TEST_CASE("testing WriteGpuMetrics") -{ - TestReportWriter trw{}; - Metrics m{}; - m.gpu_utilization_per_gpu["a"] = 1.0; - m.gpu_power_usage_per_gpu["a"] = 2.2; - m.gpu_memory_used_bytes_per_gpu["a"] = 3; - m.gpu_memory_total_bytes_per_gpu["a"] = 4; - std::ostringstream actual_output{}; - - SUBCASE("single gpu complete output") - { - trw.WriteGpuMetrics(actual_output, m); - const std::string expected_output{",a:1;,a:2.2;,a:3;,a:4;"}; - CHECK(actual_output.str() == expected_output); - } - - SUBCASE("single gpu missing data") - { - m.gpu_power_usage_per_gpu.erase("a"); - trw.WriteGpuMetrics(actual_output, m); - const std::string expected_output{",a:1;,,a:3;,a:4;"}; - CHECK(actual_output.str() == expected_output); - } - - SUBCASE("multi-gpu") - { - m.gpu_utilization_per_gpu["z"] = 100.0; - m.gpu_power_usage_per_gpu["z"] = 222.2; - m.gpu_memory_used_bytes_per_gpu["z"] = 45; - m.gpu_memory_total_bytes_per_gpu["z"] = 89; - - SUBCASE("multi gpu complete output") - { - trw.WriteGpuMetrics(actual_output, m); - const std::string expected_output{ - ",a:1;z:100;,a:2.2;z:222.2;,a:3;z:45;,a:4;z:89;"}; - CHECK(actual_output.str() == expected_output); - } - - SUBCASE("multi gpu missing data") - { - m.gpu_utilization_per_gpu.erase("z"); - m.gpu_power_usage_per_gpu.erase("a"); - trw.WriteGpuMetrics(actual_output, m); - const std::string expected_output{",a:1;,z:222.2;,a:3;z:45;,a:4;z:89;"}; - CHECK(actual_output.str() == expected_output); - } - } -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_request_rate_manager.cc b/src/c++/perf_analyzer/test_request_rate_manager.cc deleted file mode 100644 index 07b9016dd..000000000 --- a/src/c++/perf_analyzer/test_request_rate_manager.cc +++ /dev/null @@ -1,2242 +0,0 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include -#include - -#include "command_line_parser.h" -#include "common.h" -#include "doctest.h" -#include "mock_client_backend.h" -#include "mock_data_loader.h" -#include "mock_infer_data_manager.h" -#include "mock_model_parser.h" -#include "mock_request_rate_worker.h" -#include "mock_sequence_manager.h" -#include "request_rate_manager.h" -#include "test_load_manager_base.h" -#include "test_utils.h" - -namespace cb = triton::perfanalyzer::clientbackend; -using milliseconds = std::chrono::milliseconds; -using nanoseconds = std::chrono::nanoseconds; - -namespace triton { namespace perfanalyzer { - -/// Class to test the RequestRateManager -/// -class TestRequestRateManager : public TestLoadManagerBase, - public RequestRateManager { - public: - TestRequestRateManager( - PerfAnalyzerParameters params, bool is_sequence_model = false, - bool is_decoupled_model = false, bool use_mock_infer = false) - : use_mock_infer_(use_mock_infer), - TestLoadManagerBase(params, is_sequence_model, is_decoupled_model), - RequestRateManager( - params.async, params.streaming, params.request_distribution, - params.batch_size, params.measurement_window_ms, params.max_trials, - params.max_threads, params.num_of_sequences, - params.shared_memory_type, params.output_shm_size, - params.serial_sequences, GetParser(), GetFactory(), - params.request_parameters) - { - } - - std::shared_ptr MakeWorker( - std::shared_ptr thread_stat, - std::shared_ptr thread_config) override - { - size_t id = workers_.size(); - auto worker = std::make_shared( - id, thread_stat, thread_config, parser_, data_loader_, factory_, - on_sequence_model_, async_, max_threads_, using_json_data_, streaming_, - batch_size_, wake_signal_, wake_mutex_, execute_, start_time_, - serial_sequences_, infer_data_manager_, sequence_manager_); - - if (use_mock_infer_) { - EXPECT_CALL(*worker, Infer()) - .WillRepeatedly(testing::Invoke( - worker.get(), &MockRequestRateWorker::EmptyInfer)); - } - return worker; - } - - void TestConfigureThreads( - std::vector& expected_configs, size_t request_count) - { - RequestRateManager::ConfigureThreads(request_count); - - auto expected_size = expected_configs.size(); - - // Check that the correct number of threads are created - // - CHECK(threads_.size() == expected_size); - - // Check that threads_config has correct number of sequences and - // seq stat index offset - for (auto i = 0; i < expected_configs.size(); i++) { - CHECK( - threads_config_[i]->num_sequences_ == - expected_configs[i].num_sequences_); - CHECK( - threads_config_[i]->seq_stat_index_offset_ == - expected_configs[i].seq_stat_index_offset_); - CHECK( - threads_config_[i]->num_requests_ == - expected_configs[i].num_requests_); - } - } - - void TestCalculateThreadIds(std::vector& expected_thread_ids) - { - std::vector actual_thread_ids = - RequestRateManager::CalculateThreadIds(); - CHECK(actual_thread_ids.size() == expected_thread_ids.size()); - - for (auto i = 0; i < actual_thread_ids.size(); i++) { - CHECK(actual_thread_ids[i] == expected_thread_ids[i]); - } - } - - void StopWorkerThreads() { LoadManager::StopWorkerThreads(); } - - void TestSchedule(double rate, PerfAnalyzerParameters params) - { - PauseWorkers(); - ConfigureThreads(); - GenerateSchedule(rate); - - nanoseconds measurement_window_nanoseconds{ - params.measurement_window_ms * NANOS_PER_MILLIS}; - nanoseconds max_test_duration{ - measurement_window_nanoseconds * params.max_trials}; - - nanoseconds expected_time_between_requests{int(NANOS_PER_SECOND / rate)}; - nanoseconds expected_current_timestamp{0}; - - // Keep calling GetNextTimestamp for the entire test_duration to make sure - // the schedule is exactly as expected - // - while (expected_current_timestamp < max_test_duration) { - for (auto worker : workers_) { - expected_current_timestamp += expected_time_between_requests; - auto timestamp = std::dynamic_pointer_cast(worker) - ->GetNextTimestamp(); - REQUIRE(timestamp.count() == expected_current_timestamp.count()); - } - } - early_exit = true; - } - - void TestCreateSchedule( - double rate, PerfAnalyzerParameters params, - std::vector& expected_worker_ratio) - { - PauseWorkers(); - ConfigureThreads(); - GenerateSchedule(rate); - - std::vector worker_schedule_sizes; - uint32_t total_num_seqs{0}; - - for (auto worker : workers_) { - auto w = std::dynamic_pointer_cast(worker); - total_num_seqs += w->thread_config_->num_sequences_; - worker_schedule_sizes.push_back(w->schedule_->intervals.size()); - } - early_exit = true; - - CHECK(num_of_sequences_ == total_num_seqs); - for (int i = 0; i < worker_schedule_sizes.size() - 1; i++) { - CHECK( - worker_schedule_sizes[i] / expected_worker_ratio[i] == - worker_schedule_sizes[i + 1] / expected_worker_ratio[i + 1]); - } - } - - /// Test that the correct Infer function is called in the backend - /// - void TestInferType() - { - double request_rate = 50; - auto sleep_time = milliseconds(100); - - ChangeRequestRate(request_rate); - std::this_thread::sleep_for(sleep_time); - StopWorkerThreads(); - - CheckInferType(); - } - - /// Test that the inference distribution is as expected - /// - void TestDistribution(uint request_rate, uint duration_ms) - { - ChangeRequestRate(request_rate); - std::this_thread::sleep_for(milliseconds(duration_ms)); - StopWorkerThreads(); - - CheckCallDistribution(request_rate); - } - - /// Test that the schedule is properly update after calling ChangeRequestRate - /// - void TestMultipleRequestRate() - { - std::vector request_rates = {50, 200}; - auto sleep_time = milliseconds(500); - - for (auto request_rate : request_rates) { - ChangeRequestRate(request_rate); - ResetStats(); - std::this_thread::sleep_for(sleep_time); - CheckCallDistribution(request_rate); - } - } - - /// Test sequence handling - /// - void TestSequences(bool verify_seq_balance, bool check_expected_count) - { - stats_->SetDelays({10}); - double request_rate1 = 100; - double request_rate2 = 200; - - // A single sequence can't maintain the above rates - // - if (params_.num_of_sequences == 1) { - request_rate1 = 50; - request_rate2 = 100; - } - - auto stats = cb::InferStat(); - int sleep_ms = 500; - double num_seconds = double(sleep_ms) / 1000; - - auto sleep_time = milliseconds(sleep_ms); - size_t expected_count1 = num_seconds * request_rate1; - size_t expected_count2 = num_seconds * request_rate2 + expected_count1; - - // Run and check request rate 1 - // - ChangeRequestRate(request_rate1); - std::this_thread::sleep_for(sleep_time); - - stats = cb::InferStat(); - GetAccumulatedClientStat(&stats); - if (check_expected_count) { - CHECK( - stats.completed_request_count == - doctest::Approx(expected_count1).epsilon(0.10)); - } - - PauseWorkers(); - CheckSequences(params_.num_of_sequences); - - // Make sure that the client and the manager are in agreement on the request - // count in between rates - // - stats = cb::InferStat(); - GetAccumulatedClientStat(&stats); - int client_total_requests = stats_->num_async_infer_calls + - stats_->num_async_stream_infer_calls + - stats_->num_infer_calls; - CHECK(stats.completed_request_count == client_total_requests); - - if (verify_seq_balance) { - CheckSequenceBalance(); - } - - ResetStats(); - - // Run and check request rate 2 - // - ChangeRequestRate(request_rate2); - std::this_thread::sleep_for(sleep_time); - - stats = cb::InferStat(); - GetAccumulatedClientStat(&stats); - if (check_expected_count) { - CHECK( - stats.completed_request_count == - doctest::Approx(expected_count2).epsilon(0.10)); - } - - // Stop all threads and make sure everything is as expected - // - StopWorkerThreads(); - - CheckSequences(params_.num_of_sequences); - } - - /// Test that the shared memory methods are called correctly - /// - void TestSharedMemory(uint request_rate, uint duration_ms) - { - ChangeRequestRate(request_rate); - std::this_thread::sleep_for(milliseconds(duration_ms)); - StopWorkerThreads(); - } - - /// Test that tries to find deadlocks and livelocks - /// - void TestTimeouts() - { - TestWatchDog watchdog(1000); - ChangeRequestRate(100); - std::this_thread::sleep_for(milliseconds(100)); - StopWorkerThreads(); - watchdog.stop(); - } - - /// Test that idle time is tracked correctly - void TestOverhead(uint request_rate) - { - stats_->SetDelays({1}); - ChangeRequestRate(request_rate); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - // During a run of 100 ms (100,000,000 ns), make sure that the idle time is - // at least 95% of that - // - auto idle_time_ns = GetIdleTime(); - CHECK(idle_time_ns > 95000000); - StopWorkerThreads(); - } - - /// Helper function that will setup and run a case to verify custom data - /// behavior - /// \param num_requests Integer number of requests to send during the test - /// \param num_threads Number of worker threads to create - /// \param tensors Vector of input ModelTensors - /// \param json_str The custom data json text - /// \param expected_values Vector of expected input values for each inference - /// \param expect_init_failure True if InitManager is expected to throw an - /// error - /// \param expect_thread_failure True if the thread is expected to have - /// an error - void TestCustomData( - size_t num_requests, size_t num_threads, - std::vector& tensors, const std::string json_str, - std::vector>& expected_values, - bool expect_init_failure, bool expect_thread_failure) - { - CustomDataTestSetup(tensors, json_str, expect_init_failure, num_threads); - if (expect_init_failure) { - // The rest of the test is invalid if init failed - return; - } - auto thread_status = CustomDataTestSendRequests(num_requests, num_threads); - CustomDataTestCheckResults( - thread_status, expect_thread_failure, expected_values); - } - - void CustomDataTestSetup( - std::vector& tensors, const std::string json_str, - bool expect_init_failure, size_t num_threads) - { - params_.user_data = {json_str}; - - std::shared_ptr mdl{ - std::make_shared(params_.batch_size)}; - - std::shared_ptr mmp{ - std::make_shared(on_sequence_model_, false)}; - mmp->inputs_ = std::make_shared(); - for (auto t : tensors) { - (*mmp->inputs_)[t.name_] = t; - } - - infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params_.max_threads, params_.batch_size, params_.shared_memory_type, - params_.output_shm_size, params_.request_parameters, mmp, factory_, - mdl); - - parser_ = mmp; - data_loader_ = mdl; - using_json_data_ = true; - execute_ = true; - max_threads_ = num_threads; - - if (expect_init_failure) { - REQUIRE_THROWS_AS( - InitManager( - params_.string_length, params_.string_data, params_.zero_input, - params_.user_data, params_.start_sequence_id, - params_.sequence_id_range, params_.sequence_length, - params_.sequence_length_specified, - params_.sequence_length_variation), - PerfAnalyzerException); - return; - } else { - REQUIRE_NOTHROW(InitManager( - params_.string_length, params_.string_data, params_.zero_input, - params_.user_data, params_.start_sequence_id, - params_.sequence_id_range, params_.sequence_length, - params_.sequence_length_specified, - params_.sequence_length_variation)); - } - } - - cb::Error CustomDataTestSendRequests(size_t num_requests, size_t num_threads) - { - std::vector> workers; - std::vector> thread_stats; - - for (auto i = 0; i < num_threads; i++) { - std::shared_ptr ts{std::make_shared()}; - thread_stats.push_back(ts); - std::shared_ptr tc{std::make_shared(i)}; - std::shared_ptr worker{MakeWorker(ts, tc)}; - workers_.push_back(worker); - - workers.push_back( - std::dynamic_pointer_cast(worker)); - - workers[i]->CreateContext(); - } - - size_t sent_requests = 0; - while (sent_requests < num_requests) { - for (auto i = 0; i < workers.size(); i++) { - workers[i]->SendInferRequest(); - sent_requests++; - } - } - - return thread_stats[0]->status_; - } - - void CustomDataTestCheckResults( - cb::Error& thread_status, bool expect_thread_failure, - std::vector>& expected_values) - { - if (expect_thread_failure) { - REQUIRE(!thread_status.IsOk()); - } else { - REQUIRE_MESSAGE(thread_status.IsOk(), thread_status.Message()); - } - - auto recorded_values = GetRecordedInputValues(); - - // Check that results are exactly as expected - REQUIRE(recorded_values.size() == expected_values.size()); - for (size_t i = 0; i < expected_values.size(); i++) { - REQUIRE(recorded_values[i].size() == expected_values[i].size()); - for (size_t j = 0; j < expected_values[i].size(); j++) { - CHECK(recorded_values[i][j] == expected_values[i][j]); - } - } - } - - std::shared_ptr& parser_{LoadManager::parser_}; - std::shared_ptr& data_loader_{LoadManager::data_loader_}; - std::shared_ptr& sequence_manager_{ - LoadManager::sequence_manager_}; - bool& using_json_data_{LoadManager::using_json_data_}; - bool& execute_{RequestRateManager::execute_}; - size_t& batch_size_{LoadManager::batch_size_}; - std::chrono::steady_clock::time_point& start_time_{ - RequestRateManager::start_time_}; - size_t& max_threads_{LoadManager::max_threads_}; - bool& async_{LoadManager::async_}; - bool& streaming_{LoadManager::streaming_}; - std::shared_ptr& factory_{ - TestLoadManagerBase::factory_}; - std::shared_ptr& infer_data_manager_{ - LoadManager::infer_data_manager_}; - - private: - bool use_mock_infer_; - - void CheckCallDistribution(int request_rate) - { - auto request_distribution = params_.request_distribution; - - auto timestamps = GetStats()->request_timestamps; - std::vector time_delays = GatherTimeBetweenRequests(timestamps); - - double delay_average = CalculateAverage(time_delays); - double delay_variance = CalculateVariance(time_delays, delay_average); - - double expected_delay_average = - NANOS_PER_SECOND / static_cast(request_rate); - - if (request_distribution == POISSON) { - // By definition, variance == average for Poisson. - // - // With such a small sample size for a poisson distribution, there will be - // noise. Allow 5% slop - // - CHECK( - delay_average == - doctest::Approx(expected_delay_average).epsilon(0.05)); - CHECK(delay_variance == doctest::Approx(delay_average).epsilon(0.05)); - } else if (request_distribution == CONSTANT) { - // constant should in theory have 0 variance, but with thread timing - // there is obviously some noise. - // - // Allow it to be at most 5% of average - // - auto max_allowed_delay_variance = 0.05 * delay_average; - - // Constant should be pretty tight. Allowing 1% slop there is noise in the - // thread scheduling - // - CHECK( - delay_average == - doctest::Approx(expected_delay_average).epsilon(0.1)); - CHECK_LT(delay_variance, max_allowed_delay_variance); - } else { - throw std::invalid_argument("Unexpected distribution type"); - } - } - - std::vector GatherTimeBetweenRequests( - const std::vector>& - timestamps) - { - std::vector time_between_requests; - - for (size_t i = 1; i < timestamps.size(); i++) { - auto diff = timestamps[i] - timestamps[i - 1]; - nanoseconds diff_ns = std::chrono::duration_cast(diff); - time_between_requests.push_back(diff_ns.count()); - } - return time_between_requests; - } - - // Gets the inputs recorded in the mock backend - // Returns a vector of vector of int32_t. Each entry in the parent vector is a - // list of all input values for a single inference request - // - std::vector> GetRecordedInputValues() - { - auto recorded_inputs{stats_->recorded_inputs}; - std::vector> recorded_values; - // Convert the recorded inputs into values, for both shared memory and non - // shared memory cases - // - if (params_.shared_memory_type != SharedMemoryType::NO_SHARED_MEMORY) { - auto recorded_memory_regions = - std::dynamic_pointer_cast( - infer_data_manager_) - ->mocked_shared_memory_regions; - for (auto recorded_input : recorded_inputs) { - std::vector recorded_value; - for (auto memory_label : recorded_input) { - auto itr = - recorded_memory_regions.find(memory_label.shared_memory_label); - if (itr == recorded_memory_regions.end()) { - std::string err_str = "Test error: Could not find label " + - memory_label.shared_memory_label + - " in recorded shared memory"; - REQUIRE_MESSAGE(false, err_str); - } else { - for (auto val : itr->second) { - recorded_value.push_back(val); - } - } - } - recorded_values.push_back(recorded_value); - } - } else { - for (auto recorded_input : recorded_inputs) { - std::vector recorded_value; - for (auto val : recorded_input) { - recorded_value.push_back(val.data); - } - recorded_values.push_back(recorded_value); - } - } - return recorded_values; - } - - std::shared_ptr MakeSequenceManager( - const uint64_t start_sequence_id, const uint64_t sequence_id_range, - const size_t sequence_length, const bool sequence_length_specified, - const double sequence_length_variation, const bool using_json_data, - std::shared_ptr data_loader) override - { - return std::make_shared( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - } -}; - -TEST_CASE("request_rate_schedule") -{ - PerfAnalyzerParameters params; - params.measurement_window_ms = 1000; - params.max_trials = 10; - bool is_sequence = false; - bool is_decoupled = false; - bool use_mock_infer = true; - double rate; - - - const auto& ParameterizeRate{[&]() { - SUBCASE("rate 10") - { - rate = 10; - } - SUBCASE("rate 30") - { - rate = 30; - } - SUBCASE("rate 100") - { - rate = 100; - } - }}; - - const auto& ParameterizeThreads{[&]() { - SUBCASE("threads 1") - { - ParameterizeRate(); - params.max_threads = 1; - } - SUBCASE("threads 2") - { - ParameterizeRate(); - params.max_threads = 2; - } - SUBCASE("threads 4") - { - ParameterizeRate(); - params.max_threads = 4; - } - SUBCASE("threads 7") - { - ParameterizeRate(); - params.max_threads = 7; - } - }}; - - const auto& ParameterizeTrials{[&]() { - SUBCASE("trials 3") - { - ParameterizeThreads(); - params.max_trials = 3; - } - SUBCASE("trials 10") - { - ParameterizeThreads(); - params.max_trials = 10; - } - SUBCASE("trials 20") - { - ParameterizeThreads(); - params.max_trials = 20; - } - }}; - - const auto& ParameterizeMeasurementWindow{[&]() { - SUBCASE("window 1000") - { - ParameterizeTrials(); - params.measurement_window_ms = 1000; - } - SUBCASE("window 10000") - { - ParameterizeTrials(); - params.measurement_window_ms = 10000; - } - SUBCASE("window 500") - { - ParameterizeTrials(); - params.measurement_window_ms = 500; - } - }}; - - ParameterizeMeasurementWindow(); - - TestRequestRateManager trrm( - params, is_sequence, is_decoupled, use_mock_infer); - - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestSchedule(rate, params); -} - -/// Check that the correct inference function calls -/// are used given different param values for async and stream -/// -TEST_CASE("request_rate_infer_type") -{ - bool async; - bool stream; - - SUBCASE("async_stream") - { - async = true; - stream = true; - } - SUBCASE("async_no_stream") - { - async = true; - stream = false; - } - SUBCASE("no_async_stream") - { - async = false; - stream = true; - } - SUBCASE("no_async_no_stream") - { - async = false; - stream = false; - } - - PerfAnalyzerParameters params; - params.async = async; - params.streaming = stream; - - TestRequestRateManager trrm(params, false); - - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestInferType(); -} - -/// Check that the request distribution is correct for -/// different Distribution types -/// -TEST_CASE("request_rate_distribution") -{ - PerfAnalyzerParameters params; - uint request_rate = 500; - uint duration_ms = 1000; - - SUBCASE("constant") - { - params.request_distribution = CONSTANT; - } - SUBCASE("poisson") - { - params.request_distribution = POISSON; - } - - TestRequestRateManager trrm(params); - - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestDistribution(request_rate, duration_ms); -} - -/// Check that the request distribution is correct -/// for the case where the measurement window is tiny. -/// -TEST_CASE("request_rate_tiny_window") -{ - PerfAnalyzerParameters params; - params.request_distribution = CONSTANT; - params.measurement_window_ms = 10; - params.max_trials = 100; - uint request_rate = 500; - uint duration_ms = 1000; - - - SUBCASE("one_thread") - { - params.max_threads = 1; - } - SUBCASE("odd_threads") - { - params.max_threads = 9; - } - - - TestRequestRateManager trrm(params); - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestDistribution(request_rate, duration_ms); -} - -/// Check that the schedule properly handles mid-test -/// update to the request rate -/// -TEST_CASE("request_rate_multiple") -{ - PerfAnalyzerParameters params{}; - TestRequestRateManager trrm(PerfAnalyzerParameters{}); - - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestMultipleRequestRate(); -} - -/// Check that the inference requests for sequences -/// follow all rules and parameters -/// -TEST_CASE("request_rate_sequence") -{ - PerfAnalyzerParameters params = TestLoadManagerBase::GetSequenceTestParams(); - bool verify_seq_balance = false; - bool check_expected_count = true; - bool is_sequence_model = true; - - TestRequestRateManager trrm(params, is_sequence_model); - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestSequences(verify_seq_balance, check_expected_count); -} - -TEST_CASE("request_rate_serial_sequences") -{ - PerfAnalyzerParameters params; - params.serial_sequences = true; - bool verify_seq_balance = false; - bool check_expected_count = true; - bool is_sequence_model = true; - - const auto& ParameterizeDistribution{[&]() { - SUBCASE("Constant") - { - params.request_distribution = CONSTANT; - } - SUBCASE("Poisson") - { - params.request_distribution = POISSON; - check_expected_count = false; - } - }}; - - SUBCASE("num seqs 7, threads 4") - { - verify_seq_balance = true; - params.sequence_length = 100; - params.num_of_sequences = 7; - params.max_threads = 4; - ParameterizeDistribution(); - } - SUBCASE("num seqs 13, threads 5") - { - verify_seq_balance = true; - params.sequence_length = 100; - params.num_of_sequences = 13; - params.max_threads = 5; - ParameterizeDistribution(); - } - - TestRequestRateManager trrm(params, is_sequence_model); - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestSequences(verify_seq_balance, check_expected_count); -} - -TEST_CASE("request_rate max inflight per seq") -{ - // Confirm that we can have multiple inferences in-flight for a given sequence - // unless in serial-sequence mode - PerfAnalyzerParameters params; - bool is_sequence_model = true; - params.num_of_sequences = 2; - size_t rate = 1000; - size_t time_ms = 10; - - bool expect_multiple_in_flight_sequences = false; - - SUBCASE("sync will never have multiple in flight") - { - params.async = false; - expect_multiple_in_flight_sequences = false; - - SUBCASE("serial_sequences on") - { - params.serial_sequences = true; - } - SUBCASE("serial_sequences off") - { - params.serial_sequences = false; - } - } - SUBCASE("async may have multiple in flight depending on serial sequences") - { - params.async = true; - - SUBCASE("serial_sequences on") - { - params.serial_sequences = true; - expect_multiple_in_flight_sequences = false; - } - SUBCASE("serial_sequences off") - { - params.serial_sequences = false; - expect_multiple_in_flight_sequences = true; - } - } - - TestRequestRateManager trrm(params, is_sequence_model); - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - trrm.stats_->SetDelays({100}); - - trrm.ChangeRequestRate(rate); - std::this_thread::sleep_for(std::chrono::milliseconds(time_ms)); - - auto max_observed_inflight = - trrm.stats_->sequence_status.max_inflight_seq_count; - - if (expect_multiple_in_flight_sequences) { - CHECK(max_observed_inflight > 1); - } else { - CHECK(max_observed_inflight == 1); - } - - trrm.StopWorkerThreads(); -} - - -TEST_CASE("request_rate_streaming: test that streaming-specific logic works") -{ - bool is_sequence = false; - bool is_decoupled; - bool expected_enable_stats_value; - - SUBCASE("enable_stats true") - { - is_decoupled = false; - expected_enable_stats_value = true; - } - SUBCASE("enable_stats false") - { - is_decoupled = true; - expected_enable_stats_value = false; - } - - PerfAnalyzerParameters params{}; - params.streaming = true; - - RateSchedulePtr_t schedule = std::make_shared(); - schedule->intervals = NanoIntervals{nanoseconds(1)}; - schedule->duration = nanoseconds{1}; - - std::shared_ptr thread_stat{std::make_shared()}; - std::shared_ptr thread_config{ - std::make_shared(0)}; - - TestRequestRateManager trrm(params, is_sequence, is_decoupled); - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - auto worker = trrm.MakeWorker(thread_stat, thread_config); - std::dynamic_pointer_cast(worker)->SetSchedule(schedule); - std::future infer_future{std::async(&IWorker::Infer, worker)}; - - early_exit = true; - infer_future.get(); - - CHECK( - trrm.stats_->start_stream_enable_stats_value == - expected_enable_stats_value); -} - -TEST_CASE( - "custom_json_data: Check custom json data to ensure that it is processed " - "correctly") -{ - PerfAnalyzerParameters params{}; - params.user_data = {"fake_file.json"}; - bool is_sequence_model{false}; - - std::vector> expected_results; - std::vector tensors; - bool expect_init_failure = false; - bool expect_thread_failure = false; - - ModelTensor model_tensor1{}; - model_tensor1.datatype_ = "INT32"; - model_tensor1.is_optional_ = false; - model_tensor1.is_shape_tensor_ = false; - model_tensor1.name_ = "INPUT1"; - model_tensor1.shape_ = {1}; - - ModelTensor model_tensor2 = model_tensor1; - model_tensor2.name_ = "INPUT2"; - - size_t num_requests = 4; - size_t num_threads = 1; - std::string json_str; - - const auto& ParameterizeTensors{[&]() { - SUBCASE("one tensor") - { - tensors.push_back(model_tensor1); - - json_str = R"({ - "data": [ - { "INPUT1": [1] }, - { "INPUT1": [2] }, - { "INPUT1": [3] } - ]})"; - - switch (params.batch_size) { - case 1: - expected_results = {{1}, {2}, {3}, {1}}; - break; - case 2: - expected_results = {{1, 2}, {3, 1}, {2, 3}, {1, 2}}; - break; - case 4: - expected_results = { - {1, 2, 3, 1}, {2, 3, 1, 2}, {3, 1, 2, 3}, {1, 2, 3, 1}}; - break; - default: - REQUIRE(false); - } - } - SUBCASE("two tensors") - { - tensors.push_back(model_tensor1); - tensors.push_back(model_tensor2); - - json_str = R"({ - "data": [ - { "INPUT1": [1], "INPUT2": [21] }, - { "INPUT1": [2], "INPUT2": [22] }, - { "INPUT1": [3], "INPUT2": [23] } - ]})"; - - switch (params.batch_size) { - case 1: - expected_results = {{1, 21}, {2, 22}, {3, 23}, {1, 21}}; - break; - case 2: - expected_results = { - {1, 2, 21, 22}, {3, 1, 23, 21}, {2, 3, 22, 23}, {1, 2, 21, 22}}; - break; - case 4: - expected_results = { - {1, 2, 3, 1, 21, 22, 23, 21}, - {2, 3, 1, 2, 22, 23, 21, 22}, - {3, 1, 2, 3, 23, 21, 22, 23}, - {1, 2, 3, 1, 21, 22, 23, 21}}; - break; - default: - REQUIRE(false); - } - } - }}; - - const auto& ParameterizeBatchSize{[&]() { - SUBCASE("batchsize = 1") - { - params.batch_size = 1; - ParameterizeTensors(); - } - SUBCASE("batchsize = 2") - { - params.batch_size = 2; - ParameterizeTensors(); - } - SUBCASE("batchsize = 4") - { - params.batch_size = 4; - ParameterizeTensors(); - } - }}; - - const auto& ParameterizeSharedMemory{[&]() { - SUBCASE("no_shared_memory") - { - params.shared_memory_type = SharedMemoryType::NO_SHARED_MEMORY; - ParameterizeBatchSize(); - } - SUBCASE("system_shared_memory") - { - params.shared_memory_type = SharedMemoryType::SYSTEM_SHARED_MEMORY; - ParameterizeBatchSize(); - } - SUBCASE("cuda_shared_memory") - { - params.shared_memory_type = SharedMemoryType::CUDA_SHARED_MEMORY; - ParameterizeBatchSize(); - } - }}; - - const auto& ParameterizeNumThreads{[&]() { - SUBCASE("1 thread") - { - num_threads = 1; - ParameterizeSharedMemory(); - } - SUBCASE("2 threads") - { - num_threads = 2; - ParameterizeSharedMemory(); - } - }}; - - ParameterizeNumThreads(); - - TestRequestRateManager trrm(params, is_sequence_model); - - trrm.TestCustomData( - num_requests, num_threads, tensors, json_str, expected_results, - expect_init_failure, expect_thread_failure); -} - -TEST_CASE("custom_json_data: handling is_shape_tensor") -{ - // Test the case where is_shape_tensor is true and is the same - // across a batch: it only ends up in each batch once - PerfAnalyzerParameters params{}; - params.user_data = {"fake_file.json"}; - bool is_sequence_model{false}; - - std::vector> expected_results; - std::vector tensors; - bool expect_init_failure = false; - bool expect_thread_failure = false; - - ModelTensor model_tensor1{}; - model_tensor1.datatype_ = "INT32"; - model_tensor1.is_optional_ = false; - model_tensor1.is_shape_tensor_ = false; - model_tensor1.name_ = "INPUT1"; - model_tensor1.shape_ = {1}; - - ModelTensor model_tensor2 = model_tensor1; - model_tensor2.name_ = "INPUT2"; - - std::string json_str{R"({ - "data": [ - { "INPUT1": [1], "INPUT2": [21] }, - { "INPUT1": [1], "INPUT2": [22] }, - { "INPUT1": [1], "INPUT2": [23] } - ]})"}; - - model_tensor1.is_shape_tensor_ = true; - model_tensor2.is_optional_ = true; - - size_t num_requests = 4; - size_t num_threads = 1; - - const auto& ParameterizeBatch{[&]() { - SUBCASE("batch 1") - { - params.batch_size = 1; - expected_results = {{1, 21}, {1, 22}, {1, 23}, {1, 21}}; - } - SUBCASE("batch 2") - { - params.batch_size = 2; - expected_results = {{1, 21, 22}, {1, 23, 21}, {1, 22, 23}, {1, 21, 22}}; - } - SUBCASE("batch 4") - { - params.batch_size = 4; - expected_results = { - {1, 21, 22, 23, 21}, - {1, 22, 23, 21, 22}, - {1, 23, 21, 22, 23}, - {1, 21, 22, 23, 21}}; - } - }}; - - const auto& ParameterizeNumThreads{[&]() { - SUBCASE("1 thread") - { - num_threads = 1; - ParameterizeBatch(); - } - SUBCASE("2 threads") - { - num_threads = 2; - ParameterizeBatch(); - } - }}; - - // Being optional should have no impact - SUBCASE("optional = 0,0") - { - model_tensor1.is_optional_ = false; - model_tensor2.is_optional_ = false; - ParameterizeNumThreads(); - } - SUBCASE("optional = 0,1") - { - model_tensor1.is_optional_ = false; - model_tensor2.is_optional_ = true; - ParameterizeNumThreads(); - } - SUBCASE("optional = 1,0") - { - model_tensor1.is_optional_ = true; - model_tensor2.is_optional_ = false; - ParameterizeNumThreads(); - } - SUBCASE("optional = 1,1") - { - model_tensor1.is_optional_ = true; - model_tensor2.is_optional_ = true; - ParameterizeNumThreads(); - } - - - TestRequestRateManager trrm(params, is_sequence_model); - - tensors.push_back(model_tensor1); - tensors.push_back(model_tensor2); - - trrm.TestCustomData( - num_requests, num_threads, tensors, json_str, expected_results, - expect_init_failure, expect_thread_failure); -} - -TEST_CASE("custom_json_data: handling missing optional is_shape_tensor") -{ - // Test the case where is_shape_tensor is true and is_optional_ is true - // and data for that input is completely omitted - PerfAnalyzerParameters params{}; - params.user_data = {"fake_file.json"}; - bool is_sequence_model{false}; - - std::vector> expected_results; - std::vector tensors; - bool expect_init_failure = false; - bool expect_thread_failure = false; - - ModelTensor model_tensor1{}; - model_tensor1.datatype_ = "INT32"; - model_tensor1.is_optional_ = true; - model_tensor1.is_shape_tensor_ = true; - model_tensor1.name_ = "INPUT1"; - model_tensor1.shape_ = {1}; - - ModelTensor model_tensor2 = model_tensor1; - model_tensor2.is_shape_tensor_ = false; - model_tensor2.is_optional_ = false; - model_tensor2.name_ = "INPUT2"; - - std::string json_str{R"({ - "data": [ - { "INPUT2": [21] }, - { "INPUT2": [22] }, - { "INPUT2": [23] } - ]})"}; - - - size_t num_requests = 4; - size_t num_threads = 1; - - const auto& ParameterizeBatch{[&]() { - SUBCASE("batch 1") - { - params.batch_size = 1; - expected_results = {{21}, {22}, {23}, {21}}; - } - SUBCASE("batch 2") - { - params.batch_size = 2; - expected_results = {{21, 22}, {23, 21}, {22, 23}, {21, 22}}; - } - SUBCASE("batch 4") - { - params.batch_size = 4; - expected_results = { - {21, 22, 23, 21}, - {22, 23, 21, 22}, - {23, 21, 22, 23}, - {21, 22, 23, 21}}; - } - }}; - - const auto& ParameterizeNumThreads{[&]() { - SUBCASE("1 thread") - { - num_threads = 1; - ParameterizeBatch(); - } - SUBCASE("2 threads") - { - num_threads = 2; - ParameterizeBatch(); - } - }}; - - SUBCASE("no shm") - { - params.shared_memory_type = SharedMemoryType::NO_SHARED_MEMORY; - ParameterizeNumThreads(); - } - SUBCASE("system shm") - { - params.shared_memory_type = SharedMemoryType::SYSTEM_SHARED_MEMORY; - ParameterizeNumThreads(); - expect_init_failure = true; - } - SUBCASE("cuda shm") - { - params.shared_memory_type = SharedMemoryType::CUDA_SHARED_MEMORY; - ParameterizeNumThreads(); - expect_init_failure = true; - } - - TestRequestRateManager trrm(params, is_sequence_model); - - tensors.push_back(model_tensor1); - tensors.push_back(model_tensor2); - - trrm.TestCustomData( - num_requests, num_threads, tensors, json_str, expected_results, - expect_init_failure, expect_thread_failure); -} - -TEST_CASE("custom_json_data: handling invalid is_shape_tensor") -{ - PerfAnalyzerParameters params{}; - params.user_data = {"fake_file.json"}; - bool is_sequence_model{false}; - - std::vector> expected_results; - std::vector tensors; - bool expect_init_failure = false; - bool expect_thread_failure = false; - - ModelTensor model_tensor1{}; - model_tensor1.datatype_ = "INT32"; - model_tensor1.is_optional_ = true; - model_tensor1.is_shape_tensor_ = true; - model_tensor1.name_ = "INPUT1"; - model_tensor1.shape_ = {1}; - - ModelTensor model_tensor2 = model_tensor1; - model_tensor2.name_ = "INPUT2"; - - size_t num_requests = 4; - size_t num_threads = 1; - - std::string json_str; - - - const auto& ParameterizeJson{[&]() { - SUBCASE("different data") - { - json_str = R"({ - "data": [ - { "INPUT1": [1], "INPUT2": [21] }, - { "INPUT1": [2], "INPUT2": [22] }, - { "INPUT1": [3], "INPUT2": [23] } - ]})"; - expected_results = {{1, 21}, {2, 22}, {3, 23}, {1, 21}}; - } - SUBCASE("missing data") - { - json_str = R"({ - "data": [ - { "INPUT2": [21] }, - { "INPUT2": [22] } - ]})"; - expected_results = {{21}, {22}, {21}, {22}}; - } - }}; - - const auto& ParameterizeNumThreads{[&]() { - SUBCASE("1 thread") - { - num_threads = 1; - ParameterizeJson(); - } - SUBCASE("2 threads") - { - num_threads = 2; - ParameterizeJson(); - } - }}; - - SUBCASE("no batching is ok") - { - params.batch_size = 1; - ParameterizeNumThreads(); - } - SUBCASE("batching - no shm") - { - params.batch_size = 2; - params.shared_memory_type = SharedMemoryType::NO_SHARED_MEMORY; - expect_init_failure = true; - ParameterizeNumThreads(); - } - SUBCASE("batching - shm") - { - params.batch_size = 2; - params.shared_memory_type = SharedMemoryType::SYSTEM_SHARED_MEMORY; - expect_init_failure = true; - ParameterizeNumThreads(); - } - - TestRequestRateManager trrm(params, is_sequence_model); - - tensors.push_back(model_tensor1); - tensors.push_back(model_tensor2); - - trrm.TestCustomData( - num_requests, num_threads, tensors, json_str, expected_results, - expect_init_failure, expect_thread_failure); -} - - -TEST_CASE("custom_json_data: handling of optional tensors") -{ - PerfAnalyzerParameters params{}; - params.user_data = {"fake_file.json"}; - bool is_sequence_model{false}; - - std::vector> expected_results; - std::vector tensors; - bool expect_init_failure = false; - bool expect_thread_failure = false; - - ModelTensor model_tensor1{}; - model_tensor1.datatype_ = "INT32"; - model_tensor1.is_optional_ = false; - model_tensor1.is_shape_tensor_ = false; - model_tensor1.name_ = "INPUT1"; - model_tensor1.shape_ = {1}; - - ModelTensor model_tensor2 = model_tensor1; - model_tensor2.name_ = "INPUT2"; - - std::string json_str{R"({ - "data": [ - { "INPUT1": [1] }, - { "INPUT1": [2], "INPUT2": [22] }, - { "INPUT1": [3] } - ]})"}; - - size_t num_requests = 4; - size_t num_threads = 1; - - const auto& ParameterizeNumThreads{[&]() { - SUBCASE("1 thread") - { - num_threads = 1; - } - SUBCASE("2 threads") - { - num_threads = 2; - } - }}; - - SUBCASE("normal") - { - model_tensor2.is_optional_ = true; - params.batch_size = 1; - expected_results = {{1}, {2, 22}, {3}, {1}}; - ParameterizeNumThreads(); - } - SUBCASE("tensor not optional -- expect parsing fail") - { - model_tensor2.is_optional_ = false; - expect_init_failure = true; - ParameterizeNumThreads(); - } - SUBCASE("shared memory not supported") - { - model_tensor2.is_optional_ = true; - params.shared_memory_type = SharedMemoryType::SYSTEM_SHARED_MEMORY; - // FIXME: TMA-765 - Shared memory mode does not support optional inputs, - // currently, and will be implemented in the associated story. - expect_init_failure = true; - ParameterizeNumThreads(); - } - SUBCASE("batching with mismatching data") - { - model_tensor2.is_optional_ = true; - params.batch_size = 2; - // For batch sizes larger than 1, the same set of inputs - // must be specified for each batch. You cannot use different - // set of optional inputs for each individual batch. - expect_init_failure = true; - ParameterizeNumThreads(); - } - - TestRequestRateManager trrm(params, is_sequence_model); - - tensors.push_back(model_tensor1); - tensors.push_back(model_tensor2); - - trrm.TestCustomData( - num_requests, num_threads, tensors, json_str, expected_results, - expect_init_failure, expect_thread_failure); -} - -TEST_CASE("custom_json_data: multiple streams") -{ - PerfAnalyzerParameters params{}; - params.user_data = {"fake_file.json"}; - params.num_of_sequences = 1; - bool is_sequence_model{false}; - - std::vector> expected_results; - std::vector tensors; - bool expect_init_failure = false; - bool expect_thread_failure = false; - - ModelTensor model_tensor1{}; - model_tensor1.datatype_ = "INT32"; - model_tensor1.is_optional_ = false; - model_tensor1.is_shape_tensor_ = false; - model_tensor1.name_ = "INPUT1"; - model_tensor1.shape_ = {1}; - - ModelTensor model_tensor2 = model_tensor1; - model_tensor2.name_ = "INPUT2"; - - std::string json_str{R"({ - "data": [[ - { "INPUT1": [1], "INPUT2": [21] }, - { "INPUT1": [2], "INPUT2": [22] }, - { "INPUT1": [3], "INPUT2": [23] } - ],[ - { "INPUT1": [201], "INPUT2": [221] }, - { "INPUT1": [202], "INPUT2": [222] } - ]]})"}; - - size_t num_requests = 10; - size_t num_threads = 1; - - const auto& ParameterizeMemory{[&]() { - SUBCASE("No shared memory") - { - params.shared_memory_type = NO_SHARED_MEMORY; - } - SUBCASE("system shared memory") - { - params.shared_memory_type = SYSTEM_SHARED_MEMORY; - } - SUBCASE("cuda shared memory") - { - params.shared_memory_type = CUDA_SHARED_MEMORY; - } - }}; - - const auto& ParameterizeNumThreads{[&]() { - SUBCASE("1 thread") - { - num_threads = 1; - ParameterizeMemory(); - } - SUBCASE("2 threads") - { - num_threads = 2; - ParameterizeMemory(); - } - }}; - - SUBCASE("yes sequence") - { - // Sequences will randomly pick among all streams - // (Although this test is hardcoded to pick ID 1 twice, and then ID 0 - // forever after) - is_sequence_model = true; - expected_results = {{201, 221}, {202, 222}, {201, 221}, {202, 222}, - {1, 21}, {2, 22}, {3, 23}, {1, 21}, - {2, 22}, {3, 23}}; - ParameterizeNumThreads(); - } - SUBCASE("no sequence") - { - // For the case of no sequences, only a single data stream is supported. The - // rest will be ignored - is_sequence_model = false; - expected_results = {{1, 21}, {2, 22}, {3, 23}, {1, 21}, {2, 22}, - {3, 23}, {1, 21}, {2, 22}, {3, 23}, {1, 21}}; - ParameterizeNumThreads(); - } - - TestRequestRateManager trrm(params, is_sequence_model); - - tensors.push_back(model_tensor1); - tensors.push_back(model_tensor2); - - trrm.CustomDataTestSetup(tensors, json_str, expect_init_failure, num_threads); - - if (is_sequence_model) { - // Force GetNewDataStreamId to return 1 twice and 0 every time after - EXPECT_CALL( - *std::dynamic_pointer_cast(trrm.sequence_manager_), - GetNewDataStreamId()) - .WillOnce(testing::Return(1)) - .WillOnce(testing::Return(1)) - .WillRepeatedly(testing::Return(0)); - } else { - // Expect that GetNewDataStreamId will never be called - EXPECT_CALL( - *std::dynamic_pointer_cast(trrm.sequence_manager_), - GetNewDataStreamId()) - .Times(0); - } - auto thread_status = - trrm.CustomDataTestSendRequests(num_requests, num_threads); - trrm.CustomDataTestCheckResults( - thread_status, expect_thread_failure, expected_results); -} - -/// Verify Shared Memory api calls -/// -TEST_CASE("Request rate - Shared memory methods") -{ - PerfAnalyzerParameters params; - bool is_sequence = false; - bool is_decoupled = false; - bool use_mock_infer = true; - - const std::string json_str{R"( - { - "data": [ - { - "INPUT0": [2123456789] - } - ] - } - )"}; - - - MockInputPipeline mip = TestLoadManagerBase::ProcessCustomJsonData(json_str); - - cb::MockClientStats::SharedMemoryStats expected_stats; - SUBCASE("System shared memory usage") - { - params.shared_memory_type = SYSTEM_SHARED_MEMORY; - TestRequestRateManager trrm( - params, is_sequence, is_decoupled, use_mock_infer); - - trrm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, trrm.factory_, mip.mock_data_loader_); - - trrm.parser_ = mip.mock_model_parser_; - trrm.data_loader_ = mip.mock_data_loader_; - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - expected_stats.num_unregister_all_shared_memory_calls = 1; - expected_stats.num_register_system_shared_memory_calls = 1; - expected_stats.num_create_shared_memory_region_calls = 1; - expected_stats.num_map_shared_memory_calls = 1; - trrm.CheckSharedMemory(expected_stats); - } - - SUBCASE("Cuda shared memory usage") - { - params.shared_memory_type = CUDA_SHARED_MEMORY; - TestRequestRateManager trrm( - params, is_sequence, is_decoupled, use_mock_infer); - - trrm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, trrm.factory_, mip.mock_data_loader_); - - trrm.parser_ = mip.mock_model_parser_; - trrm.data_loader_ = mip.mock_data_loader_; - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - expected_stats.num_unregister_all_shared_memory_calls = 1; - expected_stats.num_register_cuda_shared_memory_calls = 1; - trrm.CheckSharedMemory(expected_stats); - } - - SUBCASE("No shared memory usage") - { - params.shared_memory_type = NO_SHARED_MEMORY; - TestRequestRateManager trrm( - params, is_sequence, is_decoupled, use_mock_infer); - - trrm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, trrm.factory_, mip.mock_data_loader_); - - trrm.parser_ = mip.mock_model_parser_; - trrm.data_loader_ = mip.mock_data_loader_; - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - trrm.CheckSharedMemory(expected_stats); - } -} - -TEST_CASE("Request rate - Shared memory infer input calls") -{ - PerfAnalyzerParameters params{}; - bool is_sequence_model{false}; - - const auto& ParameterizeAsyncAndStreaming{[&]() { - SUBCASE("sync non-streaming") - { - params.async = false; - params.streaming = false; - } - SUBCASE("async non-streaming") - { - params.async = true; - params.streaming = false; - } - SUBCASE("async streaming") - { - params.async = true; - params.streaming = true; - } - }}; - - const auto& ParameterizeSequence{[&]() { - SUBCASE("non-sequence") - { - is_sequence_model = false; - ParameterizeAsyncAndStreaming(); - } - SUBCASE("sequence") - { - is_sequence_model = true; - params.num_of_sequences = 1; - ParameterizeAsyncAndStreaming(); - } - }}; - - const auto& ParameterizeMemory{[&]() { - SUBCASE("No shared memory") - { - params.shared_memory_type = NO_SHARED_MEMORY; - ParameterizeSequence(); - } - SUBCASE("system shared memory") - { - params.shared_memory_type = SYSTEM_SHARED_MEMORY; - ParameterizeSequence(); - } - SUBCASE("cuda shared memory") - { - params.shared_memory_type = CUDA_SHARED_MEMORY; - ParameterizeSequence(); - } - }}; - - ParameterizeMemory(); - TestRequestRateManager trrm(params, is_sequence_model); - - const std::string json_str{R"( - { - "data": [ - { - "INPUT0": [2000000000] - }, - { - "INPUT0": [2000000001] - } - ] - } - )"}; - MockInputPipeline mip = - TestLoadManagerBase::ProcessCustomJsonData(json_str, is_sequence_model); - - trrm.infer_data_manager_ = - MockInferDataManagerFactory::CreateMockInferDataManager( - params.max_threads, params.batch_size, params.shared_memory_type, - params.output_shm_size, params.request_parameters, - mip.mock_model_parser_, trrm.factory_, mip.mock_data_loader_); - - std::shared_ptr thread_stat{std::make_shared()}; - std::shared_ptr thread_config{ - std::make_shared(0)}; - - trrm.parser_ = mip.mock_model_parser_; - trrm.data_loader_ = mip.mock_data_loader_; - trrm.using_json_data_ = true; - trrm.execute_ = true; - trrm.batch_size_ = 1; - trrm.max_threads_ = 1; - - RateSchedulePtr_t schedule = std::make_shared(); - schedule->intervals = NanoIntervals{ - milliseconds(4), milliseconds(8), milliseconds(12), milliseconds(16)}; - schedule->duration = nanoseconds{16000000}; - - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - trrm.start_time_ = std::chrono::steady_clock::now(); - - std::shared_ptr worker{trrm.MakeWorker(thread_stat, thread_config)}; - std::dynamic_pointer_cast(worker)->SetSchedule(schedule); - std::future infer_future{std::async(&IWorker::Infer, worker)}; - - std::this_thread::sleep_for(milliseconds(18)); - - early_exit = true; - infer_future.get(); - - const auto& actual_append_raw_calls{trrm.stats_->num_append_raw_calls}; - const auto& actual_set_shared_memory_calls{ - trrm.stats_->num_set_shared_memory_calls}; - - if (params.shared_memory_type == NO_SHARED_MEMORY) { - CHECK(actual_append_raw_calls > 0); - CHECK(actual_set_shared_memory_calls == 0); - } else { - CHECK(actual_append_raw_calls == 0); - CHECK(actual_set_shared_memory_calls > 0); - } -} - -TEST_CASE("request_rate_deadlock") -{ - PerfAnalyzerParameters params{}; - params.max_concurrency = 6; - bool is_sequence_model{true}; - bool some_infer_failures{false}; - - const auto& ParameterizeSync{[&]() { - SUBCASE("sync") - { - params.async = false; - params.streaming = false; - } - SUBCASE("aync no streaming") - { - params.async = true; - params.streaming = false; - } - SUBCASE("async streaming") - { - params.async = true; - params.streaming = true; - } - }}; - - const auto& ParameterizeThreads{[&]() { - SUBCASE("2 thread") - { - ParameterizeSync(); - params.max_threads = 2; - } - SUBCASE("10 thread") - { - ParameterizeSync(); - params.max_threads = 10; - } - }}; - - const auto& ParameterizeSequence{[&]() { - SUBCASE("non-sequence") - { - ParameterizeThreads(); - is_sequence_model = false; - } - SUBCASE("sequence") - { - ParameterizeThreads(); - is_sequence_model = true; - params.num_of_sequences = 3; - } - }}; - - const auto& ParameterizeFailures{[&]() { - SUBCASE("yes_failures") - { - some_infer_failures = true; - ParameterizeSequence(); - } - SUBCASE("no_failures") - { - some_infer_failures = false; - ParameterizeSequence(); - } - }}; - - std::vector delays; - - const auto& ParameterizeDelays{[&]() { - SUBCASE("no_delay") - { - delays = {0}; - ParameterizeFailures(); - } - SUBCASE("random_delay") - { - delays = {1, 5, 20, 4, 3}; - ParameterizeFailures(); - } - }}; - - ParameterizeDelays(); - - TestRequestRateManager trrm(params, is_sequence_model); - trrm.stats_->SetDelays(delays); - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - // Sometimes have a request fail - if (some_infer_failures) { - trrm.stats_->SetReturnStatuses({true, true, true, false}); - } - - trrm.TestTimeouts(); -} - -TEST_CASE("request_rate_overhead") -{ - uint rate; - PerfAnalyzerParameters params{}; - SUBCASE("sync, rate 10") - { - params.async = false; - rate = 10; - } - SUBCASE("sync, rate 100") - { - params.async = false; - rate = 100; - } - SUBCASE("async, rate 10") - { - params.async = true; - rate = 10; - } - SUBCASE("async, rate 100") - { - params.async = true; - rate = 100; - } - TestRequestRateManager trrm(params, false); - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - trrm.TestOverhead(rate); -} - -std::chrono::steady_clock::time_point mk_start{}; - -TEST_CASE( - "send_request_rate_request_rate_manager: testing logic around detecting " - "send request count") -{ - PerfAnalyzerParameters params{}; - - std::vector delays; - bool is_sequence_model = false; - size_t rate = 1000; - size_t time_ms = 50; - size_t expected_count = time_ms; - - SUBCASE("sync") - { - params.async = false; - delays = {0}; - } - SUBCASE("async - fast response") - { - params.async = true; - delays = {0}; - } - SUBCASE( - "async - slow response with sequences off should not slow down our send " - "rate") - { - params.async = true; - delays = {100}; - } - SUBCASE("async - slow response with sequences on") - { - is_sequence_model = true; - params.async = true; - params.num_of_sequences = 5; - delays = {100}; - - SUBCASE("send rate can be limited if serial sequences is on") - { - params.serial_sequences = true; - expected_count = params.num_of_sequences; - } - SUBCASE( - "send rate will not be affected by response time if serial sequences " - "is off") - { - params.serial_sequences = false; - } - } - - TestRequestRateManager trrm(params, is_sequence_model); - - trrm.stats_->SetDelays(delays); - - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - - trrm.ChangeRequestRate(rate); - std::this_thread::sleep_for(std::chrono::milliseconds(time_ms)); - const size_t num_sent_requests{trrm.GetAndResetNumSentRequests()}; - CHECK(num_sent_requests == doctest::Approx(expected_count).epsilon(0.1)); - - trrm.StopWorkerThreads(); -} - -TEST_CASE("request rate manager - Configure threads") -{ - PerfAnalyzerParameters params{}; - std::vector expected_config_values; - std::vector expected_number_of_sequences_owned_by_thread; - std::vector expected_seq_stat_index_offsets; - std::vector expected_num_requests; - bool is_sequence_model = true; - bool is_decoupled_model = false; - bool use_mock_infer = true; - size_t target_num_requests = 0; - - SUBCASE("normal") - { - params.max_threads = 4; - params.num_of_sequences = 4; - target_num_requests = 0; - - expected_number_of_sequences_owned_by_thread = {1, 1, 1, 1}; - expected_seq_stat_index_offsets = {0, 1, 2, 3}; - expected_num_requests = {0, 0, 0, 0}; - } - - SUBCASE("max_threads > num_seqs") - { - params.max_threads = 10; - params.num_of_sequences = 4; - target_num_requests = 8; - - expected_number_of_sequences_owned_by_thread = {1, 1, 1, 1}; - expected_seq_stat_index_offsets = {0, 1, 2, 3}; - expected_num_requests = {2, 2, 2, 2}; - } - - SUBCASE("num_seqs > max_threads") - { - params.max_threads = 4; - params.num_of_sequences = 10; - target_num_requests = 20; - - expected_number_of_sequences_owned_by_thread = {3, 3, 2, 2}; - expected_seq_stat_index_offsets = {0, 3, 6, 8}; - expected_num_requests = {5, 5, 5, 5}; - } - - SUBCASE("not divisible") - { - params.max_threads = 4; - params.num_of_sequences = 7; - target_num_requests = 13; - - expected_number_of_sequences_owned_by_thread = {2, 2, 2, 1}; - expected_seq_stat_index_offsets = {0, 2, 4, 6}; - expected_num_requests = {4, 3, 3, 3}; - } - - for (auto i = 0; i < expected_number_of_sequences_owned_by_thread.size(); - i++) { - ThreadConfig tc(i); - tc.num_sequences_ = expected_number_of_sequences_owned_by_thread[i]; - tc.seq_stat_index_offset_ = expected_seq_stat_index_offsets[i]; - tc.num_requests_ = expected_num_requests[i]; - - expected_config_values.push_back(tc); - } - TestRequestRateManager trrm( - params, is_sequence_model, is_decoupled_model, use_mock_infer); - trrm.TestConfigureThreads(expected_config_values, target_num_requests); -} - -TEST_CASE("request rate manager - Calculate thread ids") -{ - PerfAnalyzerParameters params{}; - bool is_sequence_model; - bool is_decoupled_model = false; - bool use_mock_infer = true; - std::vector expected_thread_ids; - - SUBCASE("normal, on sequence model") - { - is_sequence_model = true; - params.max_threads = 4; - params.num_of_sequences = 4; - expected_thread_ids = {0, 1, 2, 3}; - } - SUBCASE("normal, not sequence model") - { - is_sequence_model = false; - params.max_threads = 4; - params.num_of_sequences = 4; - expected_thread_ids = {0, 1, 2, 3}; - } - SUBCASE("num_seq > max_threads, on sequence model") - { - is_sequence_model = true; - params.max_threads = 4; - params.num_of_sequences = 5; - expected_thread_ids = {0, 1, 2, 3, 0}; - } - SUBCASE("num_seq > max_threads, not sequence model") - { - is_sequence_model = false; - params.max_threads = 4; - params.num_of_sequences = 5; - expected_thread_ids = {0, 1, 2, 3}; - } - SUBCASE("max_threads > num_seq, on sequence model") - { - is_sequence_model = true; - params.max_threads = 5; - params.num_of_sequences = 4; - expected_thread_ids = {0, 1, 2, 3}; - } - SUBCASE("max_threads > num_seq, not sequence model") - { - is_sequence_model = false; - params.max_threads = 5; - params.num_of_sequences = 4; - expected_thread_ids = {0, 1, 2, 3, 4}; - } - SUBCASE("large example") - { - is_sequence_model = true; - params.max_threads = 4; - params.num_of_sequences = 7; - expected_thread_ids = {0, 1, 2, 3, 0, 1, 2}; - } - - TestRequestRateManager trrm( - params, is_sequence_model, is_decoupled_model, use_mock_infer); - trrm.TestCalculateThreadIds(expected_thread_ids); -} - -TEST_CASE("request rate create schedule") -{ - PerfAnalyzerParameters params; - params.measurement_window_ms = 1000; - params.max_trials = 10; - bool is_sequence_model = false; - bool is_decoupled = false; - bool use_mock_infer = false; - double rate = 10; - std::vector expected_worker_ratio; - - SUBCASE("num_seq > max_threads, on sequence model, CONSTANT") - { - is_sequence_model = true; - params.max_threads = 4; - params.num_of_sequences = 5; - expected_worker_ratio = {2, 1, 1, 1}; - } - - SUBCASE("num_seq = 7, max_threads = 4, on sequence model, CONSTANT") - { - is_sequence_model = true; - params.max_threads = 4; - params.num_of_sequences = 7; - expected_worker_ratio = {2, 2, 2, 1}; - } - - SUBCASE("num_seq = 4, max_threads = 2, on sequence model, CONSTANT") - { - is_sequence_model = true; - params.max_threads = 2; - params.num_of_sequences = 4; - expected_worker_ratio = {1, 1}; - } - - SUBCASE("num_seq > max_threads, on sequence model, POISSON") - { - is_sequence_model = true; - params.max_threads = 4; - params.num_of_sequences = 5; - expected_worker_ratio = {2, 1, 1, 1}; - params.request_distribution = POISSON; - } - - TestRequestRateManager trrm( - params, is_sequence_model, is_decoupled, use_mock_infer); - - trrm.InitManager( - params.string_length, params.string_data, params.zero_input, - params.user_data, params.start_sequence_id, params.sequence_id_range, - params.sequence_length, params.sequence_length_specified, - params.sequence_length_variation); - trrm.TestCreateSchedule(rate, params, expected_worker_ratio); -} -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_sequence_manager.cc b/src/c++/perf_analyzer/test_sequence_manager.cc deleted file mode 100644 index 243500b85..000000000 --- a/src/c++/perf_analyzer/test_sequence_manager.cc +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "doctest.h" -#include "mock_data_loader.h" -#include "mock_sequence_manager.h" -#include "sequence_manager.h" - -namespace triton { namespace perfanalyzer { - -TEST_CASE("get_sequence_id: testing the GetSequenceID function") -{ - MockSequenceManager msm{}; - - std::shared_ptr sequence_status{ - std::make_shared(5)}; - - msm.sequence_statuses_.push_back(sequence_status); - - CHECK(msm.GetSequenceID(0) == 5); -} - -TEST_CASE( - "test_set_infer_sequence_options: testing the SetInferSequenceOptions " - "function") -{ - const uint64_t seq_id{5}; - std::vector> sequence_statuses{ - std::make_shared(seq_id)}; - std::uniform_int_distribution distribution(0, 0); - const uint64_t start_sequence_id{1}; - const uint64_t sequence_id_range{UINT32_MAX}; - const size_t sequence_length{20}; - const bool sequence_length_specified{false}; - const double sequence_length_variation{0.0}; - bool using_json_data{false}; - std::shared_ptr data_loader{ - std::make_shared()}; - const uint32_t seq_stat_index{0}; - const std::string model_name{"model"}; - std::unique_ptr options{ - std::make_unique(model_name)}; - - SUBCASE("start false, end false") - { - sequence_statuses[seq_stat_index]->remaining_queries_ = 2; - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 5; - - msm.SetInferSequenceOptions(seq_stat_index, options); - - CHECK(options->sequence_start_ == false); - CHECK(options->sequence_id_ == 5); - CHECK(options->sequence_end_ == false); - } - SUBCASE("start true, end false") - { - sequence_statuses[seq_stat_index]->remaining_queries_ = 0; - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 5; - - msm.SetInferSequenceOptions(seq_stat_index, options); - - CHECK(options->sequence_start_ == true); - CHECK(options->sequence_id_ == 6); - CHECK(options->sequence_end_ == false); - } - SUBCASE("start false, end true") - { - sequence_statuses[seq_stat_index]->remaining_queries_ = 1; - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 5; - - msm.SetInferSequenceOptions(seq_stat_index, options); - - CHECK(options->sequence_start_ == false); - CHECK(options->sequence_id_ == 5); - CHECK(options->sequence_end_ == true); - } - SUBCASE("start true, end true") - { - sequence_statuses[seq_stat_index]->remaining_queries_ = 0; - using_json_data = true; - data_loader->step_num_.push_back(1); - data_loader->data_stream_cnt_ = 1; - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 5; - - msm.SetInferSequenceOptions(seq_stat_index, options); - - CHECK(options->sequence_start_ == true); - CHECK(options->sequence_id_ == 6); - CHECK(options->sequence_end_ == true); - } -} - -TEST_CASE("init_new_sequence: testing the InitNewSequence function") -{ - const uint64_t seq_id{5}; - std::vector> sequence_statuses{ - std::make_shared(seq_id)}; - std::uniform_int_distribution distribution(0, 0); - const uint64_t start_sequence_id{1}; - const uint64_t sequence_id_range{UINT32_MAX}; - size_t sequence_length{20}; - bool sequence_length_specified{false}; - const double sequence_length_variation{0.0}; - bool using_json_data{false}; - std::shared_ptr data_loader{ - std::make_shared()}; - int seq_stat_index{0}; - size_t expected_sequence_length{0}; - - SUBCASE("not using json data") - { - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 5; - - msm.InitNewSequence(seq_stat_index); - - CHECK(msm.sequence_statuses_[seq_stat_index]->seq_id_ == 6); - CHECK(msm.sequence_statuses_[seq_stat_index]->remaining_queries_ > 0); - } - - SUBCASE("using json data") - { - using_json_data = true; - data_loader->step_num_.push_back(5); - data_loader->data_stream_cnt_ = 1; - - SUBCASE("sequence length not specified") - { - sequence_length_specified = false; - expected_sequence_length = 5; - } - - SUBCASE("sequence length specified, smaller than input data") - { - sequence_length_specified = true; - sequence_length = 4; - expected_sequence_length = 4; - } - - SUBCASE("sequence length specified, larger than input data") - { - sequence_length_specified = true; - sequence_length = 6; - expected_sequence_length = 6; - } - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 5; - - msm.InitNewSequence(seq_stat_index); - - CHECK(msm.sequence_statuses_[seq_stat_index]->seq_id_ == 6); - CHECK( - msm.sequence_statuses_[seq_stat_index]->remaining_queries_ == - expected_sequence_length); - CHECK( - msm.sequence_statuses_[seq_stat_index]->sequence_length_ == - expected_sequence_length); - } -} - -TEST_CASE("get_next_seq_id: testing the GetNextSeqId function") -{ - std::vector> sequence_statuses{}; - std::uniform_int_distribution distribution(0, 0); - uint64_t start_sequence_id{0}; - uint64_t sequence_id_range{0}; - const size_t sequence_length{20}; - const bool sequence_length_specified{false}; - const double sequence_length_variation{0.0}; - const bool using_json_data{false}; - std::shared_ptr data_loader{ - std::make_shared()}; - int seq_stat_index{0}; - - SUBCASE("next sequence id not in use") - { - sequence_statuses.push_back(std::make_shared(1)); - start_sequence_id = 1; - sequence_id_range = 2; - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 3; - - uint64_t result{msm.GetNextSeqId(seq_stat_index)}; - - CHECK(result == 2); - } - - SUBCASE("next sequence id in use") - { - sequence_statuses.push_back(std::make_shared(1)); - sequence_statuses.push_back(std::make_shared(2)); - start_sequence_id = 1; - sequence_id_range = 2; - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 3; - - uint64_t result{msm.GetNextSeqId(seq_stat_index)}; - - CHECK(result == 1); - } -} - -TEST_CASE( - "get_random_sequence_length: testing the GetRandomSequenceLength function") -{ - std::vector> sequence_statuses{}; - std::uniform_int_distribution distribution(0, 0); - const uint64_t start_sequence_id{0}; - const uint64_t sequence_id_range{0}; - size_t sequence_length{20}; - const bool sequence_length_specified{false}; - const double sequence_length_variation{0.0}; - const bool using_json_data{false}; - std::shared_ptr data_loader{ - std::make_shared()}; - int seq_stat_index{0}; - double offset_ratio{0.2}; - - MockSequenceManager msm( - start_sequence_id, sequence_id_range, sequence_length, - sequence_length_specified, sequence_length_variation, using_json_data, - data_loader); - msm.sequence_statuses_ = sequence_statuses; - msm.curr_seq_id_ = 3; - - uint64_t result{msm.GetRandomSequenceLength(offset_ratio)}; - - CHECK(result >= 16); - CHECK(result <= 24); -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/test_utils.h b/src/c++/perf_analyzer/test_utils.h deleted file mode 100644 index 168aba71a..000000000 --- a/src/c++/perf_analyzer/test_utils.h +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace triton { namespace perfanalyzer { - -/// This class will create a thread that will raise an error after a fixed -/// amount of time, unless the stop function is called. -/// -/// It can be used to detect livelock/deadlock cases in tests so that the test -/// will be guaranteed to finish instead of hang -/// -class TestWatchDog { - public: - /// Create the watchdog - /// - /// @param max_time_ms How long (in milliseconds) until this watchdog will - /// raise an error - TestWatchDog(unsigned int max_time_ms) { start(max_time_ms); } - - /// Stop the watchdog so that it will not raise any errors - /// - void stop() - { - running_ = false; - thread_.join(); - } - - private: - uint sleep_interval_ms{40}; - uint max_time_ms_; - std::atomic timer_; - std::atomic running_; - std::thread thread_; - - void start(unsigned int max_time_ms) - { - max_time_ms_ = max_time_ms; - timer_ = 0; - running_ = true; - thread_ = std::thread(&TestWatchDog::loop, this); - } - - void loop() - { - while (running_) { - if (timer_ >= max_time_ms_) { - running_ = false; - REQUIRE_MESSAGE(false, "WATCHDOG TIMEOUT!"); - } - - std::this_thread::sleep_for(std::chrono::milliseconds(sleep_interval_ms)); - timer_ += sleep_interval_ms; - } - } -}; - -/// Calculate the average of a vector of integers -/// -static double -CalculateAverage(const std::vector& values) -{ - double avg = - std::accumulate(values.begin(), values.end(), 0.0) / values.size(); - return avg; -} - -/// Calculate the variance of a vector of integers -/// -static double -CalculateVariance(const std::vector& values, double average) -{ - double tmp = 0; - for (auto value : values) { - tmp += (value - average) * (value - average) / values.size(); - } - double variance = std::sqrt(tmp); - return variance; -} - -}} // namespace triton::perfanalyzer diff --git a/src/c++/perf_analyzer/thread_config.h b/src/c++/perf_analyzer/thread_config.h deleted file mode 100644 index 4c4845a6e..000000000 --- a/src/c++/perf_analyzer/thread_config.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of NVIDIA CORPORATION nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#pragma once - -namespace triton { namespace perfanalyzer { - -// Holds the configuration for a worker thread -struct ThreadConfig { - ThreadConfig(size_t thread_id) : thread_id_(thread_id) {} - - // ID of corresponding worker thread - size_t thread_id_{0}; - - // The concurrency level that the worker should produce - // TPA-69: This is only used in concurrency mode and shouldn't be visible in - // other modes - size_t concurrency_{0}; - - // The number of sequences owned by this worker - // TPA-69: This is only used in request-rate mode and shouldn't be visible in - // other modes - uint32_t num_sequences_{1}; - - // How many requests to generate before stopping. If 0, generate indefinitely - size_t num_requests_{0}; - - // The starting sequence stat index for this worker - size_t seq_stat_index_offset_{0}; - - // Whether or not the thread is issuing new inference requests - bool is_paused_{false}; -}; - - -}} // namespace triton::perfanalyzer diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index ecc33b84d..67975dd68 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -34,7 +34,6 @@ project(python-clients LANGUAGES C CXX) set(TRITON_VERSION "0.0.0" CACHE STRING "Version for the clients") option(TRITON_ENABLE_PYTHON_HTTP "Enable Python HTTP client libraries" OFF) option(TRITON_ENABLE_PYTHON_GRPC "Enable Python GRPC client libraries" OFF) -option(TRITON_ENABLE_PERF_ANALYZER "Enable Performance Analyzer" OFF) option(TRITON_ENABLE_EXAMPLES "Include examples in build" OFF) option(TRITON_ENABLE_TESTS "Include tests in build" OFF) option(TRITON_ENABLE_GPU "Enable GPU support in libraries" OFF)