diff --git a/.github/workflows/jupyter-test.yml b/.github/workflows/jupyter-test.yml new file mode 100755 index 0000000000..f12cf6ad9b --- /dev/null +++ b/.github/workflows/jupyter-test.yml @@ -0,0 +1,31 @@ +# on: +# push: +# tags: +# - '*' + +# jobs: +# test: +# name: test-notebook-${{matrix.config.name}} +# runs-on: ${{matrix.config.os}} +# strategy: +# matrix: +# config: +# - {name: x86_64-macos, os: macos-latest} +# - {name: x86_64-linux, os: ubuntu-latest} +# - {name: x86_64-windows, os: windows-latest} + +# steps: +# - uses: actions/checkout@v2 + +# - name: Setup Python +# uses: actions/setup-python@v2 +# with: +# python-version: 3.7 + +# - name: Install dependencies +# run: pip install --upgrade pip && pip install jupyterlab pytest nbmake + +# - name: Run tests +# run: pytest --nbmake examples/user_guide +# env: +# PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python \ No newline at end of file diff --git a/README.md b/README.md index 1467befbfe..89f05f8ef4 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,9 @@ nncase -[![License](https://img.shields.io/badge/license-Apache%202-blue)](https://raw.githubusercontent.com/kendryte/nncase/master/LICENSE) -[![compiler-build](https://github.com/kendryte/nncase/actions/workflows/compiler-build.yml/badge.svg)](https://github.com/kendryte/nncase/actions/workflows/compiler-build.yml) +[![GitHub repository](https://img.shields.io/badge/github-repository-blue?logo=github&style=plastic)](https://github.com/kendryte/nncase) +[![Gitee repository](https://img.shields.io/badge/gitee-repository-blue?logo=gitee&style=plastic)](https://gitee.com/kendryte/nncase) +[![GitHub release](https://img.shields.io/github/v/release/kendryte/nncase?color=brightgreen&display_name=tag&logo=github&style=plastic)](https://github.com/kendryte/nncase/releases) `nncase` is a neural network compiler for AI accelerators. @@ -14,6 +15,7 @@ Telegram: [nncase community](https://t.me/joinchat/PPcEPZMLaTViNDI1) ## Install from binaries + ## 从二进制安装 Download prebuilt binaries from [Release](https://github.com/kendryte/nncase/releases). @@ -21,40 +23,47 @@ Download prebuilt binaries from [Release](https://github.com/kendryte/nncase/rel 下载预编译的二进制文件 [Release](https://github.com/kendryte/nncase/releases)。 ## Build from source + ## 从源码编译 [Build from source](./docs/build.md) ## Supported operators + ## 支持的算子 - [TFLite ops](./docs/tflite_ops.md) - [Caffe ops](./docs/caffe_ops.md) - [ONNX ops](./docs/onnx_ops.md) - ## K210/K510 + - [Usage](https://github.com/kendryte/nncase/blob/release/1.0/docs/USAGE_EN.md) - [FAQ](https://github.com/kendryte/nncase/blob/release/1.0/docs/FAQ_EN.md) - [使用说明](https://github.com/kendryte/nncase/blob/release/1.0/docs/USAGE_ZH.md) - [常见问题](https://github.com/kendryte/nncase/blob/release/1.0/docs/FAQ_ZH.md) - [Example](https://github.com/kendryte/nncase/blob/release/1.0/examples/user_guide/) + ## K230 + - [Usage](./docs/USAGE_v2_EN.md) - [FAQ](./docs/FAQ_EN.md) +- [Example](./examples/user_guide/k230_simulate-EN.ipynb) - [使用说明](./docs/USAGE_v2.md) - [常见问题](./docs/FAQ_ZH.md) -- [Example](./examples/user_guide/) +- [示例](./examples/user_guide/k230_simulate-ZH.ipynb) ## Resources -## 资源 +## 资源 +### K210 - [K210_Yolo_framework](https://github.com/zhen8838/K210_Yolo_framework) -- [Shts! 's Blog (Japanese)](https://www.shtsno24.tokyo/2020/03/nncase-v020.html) +- [Shts!'s Blog (Japanese)](https://www.shtsno24.tokyo/2020/03/nncase-v020.html) --- ## Architecture + ## 架构
diff --git a/conanfile.py b/conanfile.py index d4440757b9..8a3a0c72b7 100644 --- a/conanfile.py +++ b/conanfile.py @@ -51,6 +51,7 @@ def requirements(self): if self.options.tests: self.requires('gtest/1.10.0') self.requires('ortki/0.0.2') + self.requires('rapidjson/1.1.x') if self.options.python: self.requires('pybind11/2.6.1') diff --git a/docs/FAQ_EN.md b/docs/FAQ_EN.md index b34319f186..3360df5da0 100644 --- a/docs/FAQ_EN.md +++ b/docs/FAQ_EN.md @@ -1,6 +1,6 @@ # FAQ -[TOC] +[toc] ## 1. Error installing `whl` package @@ -8,9 +8,7 @@ A: Upgrade pip >= 20.3 using `pip install --upgrade pip` - - ----- +--- ## 2. Compile-time errors @@ -18,7 +16,7 @@ A: Upgrade pip >= 20.3 using `pip install --upgrade pip` #### 2.1.1 Q: Compile model reported error "System.NotSupportedException: Not Supported *** op: XXX" -A: This exception indicates that there are operators, `XXX`, that are not yet supported. You can create a issue in [nncase Github Issue](https://github.com/kendryte/nncase/issues). In the current directory `***_ops.md`, you can view the operators already supported in each inference framework. +A: This exception indicates that there are operators, `XXX`, that are not yet supported. You can create a issue in [nncase Github Issue](https://github.com/kendryte/nncase/issues). In the current directory `***_ops.md`, you can view the operators already supported in each inference framework. If 'XXX' belongs to quantization-related operators such as `FAKE_QUANT`, `DEQUANTIZE`, `QUANTIZE`, it indicates that the current model is a quantized model, and 'nncase' does not currently support such models, please compile `kmodel` using a floating point model. @@ -28,15 +26,19 @@ If 'XXX' belongs to quantization-related operators such as `FAKE_QUANT`, `DEQUAN A: Use `sudo gedit /proc/sys/fs/inotify/max_user_instances` to change 128 to a larger value. +### 2.3 `initialize` error + +#### 2.3.1 Q:"RuntimeError: Failed to initialize hostfxr" appears when compiling the kmodel. +A1:Need to install dotnet-7.0. ----- +--- ## 3. Runtime errors ### 3.1 Q: Compiling `kmodel` is fine, but when inferring, the error `nncase.simulator.k230.sc: not found`occurs. -A: You need to check whether the versions of `nncase` and `nncase-kpu` are the same. +A: First, make sure that the path of the nncase installation is added to the PATH environment variable. You need to check whether the versions of `nncase` and `nncase-kpu` are the same. ```shell root@a52f1cacf581:/mnt# pip list | grep nncase @@ -46,9 +48,7 @@ nncase-kpu 2.1.1.20230721 If inconsistent, install the same version of the Python package `pip install nncase==x.x.x.x nncase-kpu==x.x.x.x` - - ----- +--- ## 4. Runtime error on k230 development board diff --git a/docs/FAQ_ZH.md b/docs/FAQ_ZH.md index b469d00017..04714cc500 100644 --- a/docs/FAQ_ZH.md +++ b/docs/FAQ_ZH.md @@ -6,9 +6,7 @@ A:升级 pip >= 20.3 `pip install --upgrade pip` - - ----- +--- ## 2.编译模型时报错 @@ -16,25 +14,27 @@ A:升级 pip >= 20.3 `pip install --upgrade pip` #### 2.1.1 Q:编译模型报错“System.NotSupportedException: Not Supported *** op: XXX”。 -A:该异常表明`XXX`算子尚未支持,可以在[nncase Github Issue](https://github.com/kendryte/nncase/issues)中提需求。当前目录下 `***_ops.md`文档,可以查看各个推理框架中已经支持的算子。 +A:该异常表明 `XXX`算子尚未支持,可以在[nncase Github Issue](https://github.com/kendryte/nncase/issues)中提需求。当前目录下 `***_ops.md`文档,可以查看各个推理框架中已经支持的算子。 -如果`XXX`属于 `FAKE_QUANT`、`DEQUANTIZE`、`QUANTIZE`等量化相关的算子,表明当前模型属于量化模型,`nncase`目前不支持这类模型,请使用浮点模型来编译`kmodel`。 +如果 `XXX`属于 `FAKE_QUANT`、`DEQUANTIZE`、`QUANTIZE`等量化相关的算子,表明当前模型属于量化模型,`nncase`目前不支持这类模型,请使用浮点模型来编译 `kmodel`。 ### 2.2 `System.IO.IOException` -#### 2.2.1 Q:下载`nncase`仓库自己编译后,运行test出现这个错误"The configured user limit (128) on the number of inotify instances has been reached, or the per-process limit on the number of open file descriptors has been reached"。 +#### 2.2.1 Q:下载 `nncase`仓库自己编译后,运行test出现这个错误"The configured user limit (128) on the number of inotify instances has been reached, or the per-process limit on the number of open file descriptors has been reached"。 A1:使用 `sudo gedit /proc/sys/fs/inotify/max_user_instances`修改128为更大的值即可。 +### 2.3 `initialize`相关 +#### 2.3.1 Q:编译模型出现如下错误`RuntimeError: Failed to initialize hostfxr` +A1:需要安装dotnet-7.0 - ----- +--- ## 3. 推理时报错 -### 3.1 Q:在编译kmodel正常, 但是推理的时候出现`nncase.simulator.k230.sc: not found`的错误。 +### 3.1 Q:在编译kmodel正常, 但是推理的时候出现 `nncase.simulator.k230.sc: not found`的错误。 -A:需要检查`nncase`和`nncase-kpu`的版本是否一致。 +A:将nncase的安装路径加入到 `PATH`环境变量中,同时检查一下nncase和nncase-kpu版本是否一致。 ```shell root@a52f1cacf581:/mnt# pip list | grep nncase @@ -44,9 +44,7 @@ nncase-kpu 2.1.1.20230721 如果不一致,请安装相同版本的Python包 `pip install nncase==x.x.x.x nncase-kpu==x.x.x.x`。 - - ----- +--- ## 4. k230开发板推理时报错 diff --git a/docs/USAGE_v2.md b/docs/USAGE_v2.md index 224ebd33c5..68944d7bbf 100644 --- a/docs/USAGE_v2.md +++ b/docs/USAGE_v2.md @@ -46,7 +46,15 @@ Type "help", "copyright", "credits" or "license" for more information. k230模型编译推理参考Jupyter脚本:[User_guide](../examples/user_guide/k230_simulate.ipynb),脚本中包含了单输入和多输入的示例。 -如果在Docker中运行Jupyter脚本,可以参考[配置Jupyter lab](https://github.com/kunjing96/docker-jupyterlab#32-%E9%85%8D%E7%BD%AEjupyter-lab)进行配置。 +如果在Docker中运行Jupyter脚本,可以参考以下命令,之后在浏览器窗口打开即可。 + +```shell +docker run -it --rm --privileged=true -p 8889:8889 --name Kendryte -v `pwd`:/mnt -w /mnt ghcr.io/kendryte/k230_sdk /bin/bash -c "/bin/bash + +pip install jupyterlab + +jupyter-lab --ip 0.0.0.0 --allow-root +``` 在执行脚本之前需要根据自身需求修改以下内容: @@ -153,6 +161,8 @@ subgraph A end ``` +##### 动态shape参数 +详见[动态shape参数说明](./shape_bucket.md) #### 代码示例 diff --git a/docs/USAGE_v2_EN.md b/docs/USAGE_v2_EN.md index 4ff2a12942..5800ccfc7e 100644 --- a/docs/USAGE_v2_EN.md +++ b/docs/USAGE_v2_EN.md @@ -46,7 +46,16 @@ Type "help", "copyright", "credits" or "license" for more information. Model compilation, inference for k230 can be found in the Jupyter script [User_guide](../examples/user_guide/k230_simulate.ipynb), this script contains single and multiple input examples. -If you run Jupyter scripts in Docker, you can refer to [Configure Jupyter lab](https://github.com/kunjing96/docker-jupyterlab#32-%E9%85%8D%E7%BD%AEjupyter-lab) to configure them. +If you run the Jupyter script in Docker, you can refer to the command and then open it in your browser. + +```shell +docker run -it --rm --privileged=true -p 8889:8889 --name Kendryte -v `pwd`:/mnt -w /mnt ghcr.io/kendryte/k230_sdk /bin/bash -c "/bin/bash + +pip install jupyterlab + +jupyter-lab --ip 0.0.0.0 --allow-root +``` + You need to modify the following to suit your needs before executing the script: @@ -154,6 +163,9 @@ subgraph A ``` +##### Dynamice shape args +Refer to[Dynamic shape args description](./shape_bucket.md) + #### Example ```python diff --git a/docs/shape_bucket.md b/docs/shape_bucket.md new file mode 100644 index 0000000000..7af9f2a459 --- /dev/null +++ b/docs/shape_bucket.md @@ -0,0 +1,48 @@ +# ShapeBucket使用说明 + +ShapeBucket是针对动态shape的一种解决方案,会根据输入长度的范围以及指定的段的数量来对动态shape进行优化。该功能默认为false,需要打开对应的option才能生效,除了指定对应的字段信息,其他流程与编译静态模型没有区别。 + +对应的不同CompileOptions中的字段 + +| 字段名称 | 类型 | 是否必须 | 描述 | +| --------------------------- | --------------------- | -------- | --------------------------------------------------------------- | +| shape_bucket_enable | bool | 是 | 是否开启ShapeBucket功能,默认为False。在 `dump_ir=True`时生效 | +| shape_bucket_range_info | Dict[str, [int, int]] | 是 | 每个输入shape维度信息中的变量的范围,最小值必须大于等于1 | +| shape_bucket_segments_count | int | 是 | 输入变量的范围划分为几段 | +| shape_bucket_fix_var_map | Dict[str, int] | 否 | 固定shape维度信息中的变量为特定的值 | + +## onnx + +在模型的shape中会有些维度为变量名字,这里以一个onnx模型的输入为例 + +> tokens: int64[batch_size, tgt_seq_len] +> +> step: float32[seq_len, batch_size] + +对应这个输入有如下的配置 + +```python +shape_bucket_options = nncase.ShapeBucketOptions() +shape_bucket_options.shape_bucket_enable = True +shape_bucket_options.shape_bucket_range_info = {"seq_len":[1, 100], "tgt_seq_len":[1, 100]} +shape_bucket_options.shape_bucket_segments_count = 2 +shape_bucket_options.shape_bucket_fix_var_map = {"batch_size" : 3} +``` + +shape的维度信息中存在seq_len,tgt_seq_len,batch_size这三个变量。首先是batch_size,虽然是变量的但实际应用的时候固定为3,因此在**fix_var_map**中添加batch_size = 3,在运行的时候会将这个维度固定为3。 + +seq_len,tgt_seq_len两个是实际会发生改变的,因此需要配置这两个变量的实际范围,也就是**range_info**的信息。**segments_count**是实际分段的数量,会根据范围等分为几份,对应的编译时间也会相应增加几倍。 + +## tflite + +tflite的模型与onnx不同,shape上暂未标注维度的名称,目前只支持输入中具有一个维度是动态的,并且名称统一配置为-1,配置方式如下 + +```cpp +shape_bucket_options = nncase.ShapeBucketOptions() +shape_bucket_options.shape_bucket_enable = True +shape_bucket_options.shape_bucket_range_info = {"-1":[1, 100]} +shape_bucket_options.shape_bucket_segments_count = 2 +shape_bucket_options.shape_bucket_fix_var_map = {"batch_size" : 3} +``` + +配置完这些选项后整个编译的流程和静态shape一致。 diff --git a/examples/user_guide/k230_simulate-EN.ipynb b/examples/user_guide/k230_simulate-EN.ipynb new file mode 100644 index 0000000000..a8630394df --- /dev/null +++ b/examples/user_guide/k230_simulate-EN.ipynb @@ -0,0 +1,437 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "38c8a2f0-423b-4c62-8cf0-235e881e1d35", + "metadata": {}, + "source": [ + "If you have any questions, you can join the `QQ group:790699378`, or `create an issue` in the nncase repo:[click here](https://github.com/kendryte/nncase/issues)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "4beb4c04-ab79-4e1a-a7ad-ba53d9a9b123", + "metadata": {}, + "source": [ + "# 1. Install libs and set python env" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82a8f9c1-c2bf-4270-9f1f-ac25c9fdd898", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install --upgrade pip\n", + "!pip install nncase --timeout=1000\n", + "!pip install nncase-kpu --timeout=1000\n", + "!pip install onnx onnxsim scikit-learn\n", + "\n", + "# # nncase-2.x need dotnet-7\n", + "# # Ubuntu use apt to install dotnet-7.0 (The docker has installed dotnet7.0)\n", + "!sudo apt-get install -y dotnet-sdk-7.0" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "a17fa3cf", + "metadata": {}, + "source": [ + "## auto set enviroment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7cdadfc6", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import subprocess\n", + "\n", + "result = subprocess.run([\"pip\", \"show\", \"nncase\"], capture_output=True)\n", + "\n", + "split_flag = \"\\n\"\n", + "if sys.platform == \"win32\":\n", + " split_flag = \"\\r\\n\"\n", + "\n", + "location_s = [i for i in result.stdout.decode().split(split_flag) if i.startswith(\"Location:\")]\n", + "location = location_s[0].split(\": \")[1]\n", + "\n", + "if \"PATH\" in os.environ:\n", + " os.environ[\"PATH\"] += os.pathsep + location\n", + "else:\n", + " os.environ[\"PATH\"] = location\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "b89f3faf-bab6-4f74-a658-1f27a0e49912", + "metadata": {}, + "source": [ + "# 2. Set compile options and PTQ options (quantize model)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "09bb9ca5-f086-45d8-9ba5-c2415f24e167", + "metadata": {}, + "source": [ + "You can find more details about [preprocess args](https://github.com/kendryte/nncase/blob/master/docs/USAGE_v2_EN.md#CompileOptions), [quantize options](https://github.com/kendryte/nncase/blob/master/docs/USAGE_v2_EN.md#PTQTensorOptions) and [Mix quantize](https://github.com/kendryte/nncase/blob/master/docs/MixQuant.md)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7eff82e-295c-4cce-afbc-ce64c84dc40a", + "metadata": {}, + "outputs": [], + "source": [ + "import nncase\n", + "import numpy as np\n", + "from nncase_base_func import *\n", + "\n", + "def compile_kmodel(model_path, dump_path, calib_data):\n", + " \"\"\"\n", + " Set compile options and ptq options.\n", + " Compile kmodel.\n", + " Dump the compile-time result to 'compile_options.dump_dir'\n", + " \"\"\"\n", + " print(\"\\n---------- compile ----------\")\n", + " print(\"Simplify...\")\n", + " model_file = model_simplify(model_path)\n", + "\n", + " print(\"Set options...\")\n", + " # import_options\n", + " import_options = nncase.ImportOptions()\n", + "\n", + " ############################################\n", + " # The code below, you need to modify to fit your model.\n", + " # You can find more details about these options in docs/USAGE_v2.md.\n", + " ############################################\n", + " # compile_options\n", + " compile_options = nncase.CompileOptions()\n", + " compile_options.target = \"k230\" #\"cpu\"\n", + " compile_options.dump_ir = False # if False, will not dump the compile-time result.\n", + " compile_options.dump_asm = True\n", + " compile_options.dump_dir = dump_path\n", + " compile_options.input_file = \"\"\n", + " \n", + " # preprocess args\n", + " compile_options.preprocess = False\n", + " if compile_options.preprocess:\n", + " compile_options.input_type = \"uint8\" # \"uint8\" \"float32\"\n", + " compile_options.input_shape = [1,224,320,3]\n", + " compile_options.input_range = [0,1]\n", + " compile_options.input_layout = \"NHWC\" # \"NHWC\"\n", + " compile_options.swapRB = False\n", + " compile_options.mean = [0,0,0]\n", + " compile_options.std = [1,1,1]\n", + " compile_options.letterbox_value = 0\n", + " compile_options.output_layout = \"NHWC\" # \"NHWC\"\n", + "\n", + " # quantize options\n", + " ptq_options = nncase.PTQTensorOptions()\n", + " ptq_options.quant_type = \"uint8\" # datatype : \"float32\", \"int8\", \"int16\"\n", + " ptq_options.w_quant_type = \"uint8\" # datatype : \"float32\", \"int8\", \"int16\"\n", + " ptq_options.calibrate_method = \"NoClip\" # \"Kld\"\n", + " ptq_options.finetune_weights_method = \"NoFineTuneWeights\"\n", + " ptq_options.dump_quant_error = False\n", + " ptq_options.dump_quant_error_symmetric_for_signed = False\n", + "\n", + " # mix quantize options\n", + " # more details in docs/MixQuant.md\n", + " ptq_options.quant_scheme = \"\"\n", + " ptq_options.export_quant_scheme = False\n", + " ptq_options.export_weight_range_by_channel = False\n", + " ############################################\n", + " \n", + " ptq_options.samples_count = len(calib_data[0])\n", + " ptq_options.set_tensor_data(calib_data)\n", + " \n", + " print(\"Compiling...\")\n", + " compiler = nncase.Compiler(compile_options)\n", + " # import\n", + " model_content = read_model_file(model_file)\n", + " if model_path.split(\".\")[-1] == \"onnx\":\n", + " compiler.import_onnx(model_content, import_options)\n", + " elif model_path.split(\".\")[-1] == \"tflite\":\n", + " compiler.import_tflite(model_content, import_options)\n", + " \n", + " compiler.use_ptq(ptq_options)\n", + " \n", + " # compile\n", + " compiler.compile()\n", + " kmodel = compiler.gencode_tobytes()\n", + " \n", + " kmodel_path = os.path.join(dump_path, \"test.kmodel\")\n", + " with open(kmodel_path, 'wb') as f:\n", + " f.write(kmodel)\n", + " print(\"----------------end-----------------\")\n", + " return kmodel_path\n" + ] + }, + { + "attachments": { + "1855346b-3785-4867-9c92-0e811d00b9ab.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA5AAAAGOCAIAAABubXuxAAAACXBIWXMAAA4mAAAN/wHwU+XzAAAgAElEQVR4nOzdf0BN9/8H8Nd2c0/KvdXq9kndrWjimkuRoaEyYUu2uCNlm9g0mzCu+Ez4Un4lm5+jGZmtmF3tox+Te/ehmIToko9rWT9s19XcmtxLOleX7x8VocjPe+n5+EvnvM/7/TrX2NP7vs/7vHDjxg0CAIC7XL16taqqytRVAAAAvWjqAgAAAAAA7gWBFQAAAADMGgIrAAAAAJg1BFYAAAAAMGsIrAAAAABg1ixMXQAAwFOl0+ma2bJVq1ZPtBIAAGgmzLACAAAAgFlDYAUAAAAAs4bACgAAAABmDYEVAAAAAMwaAisAAAAAmDUEVgAASk1NNXUJAADQJARWAGjpatMqMisAgNlCYAWAFq1hTkVmBQAwTwisANBy3Z1QkVkBAMzQCzdu3DB1DQAAT88DvemqqqrqiRYDAABNuKIprf6Xmz2HiDDDCgAAAABm5kphZnLST8myIxeMRITACgAAAABmxtLeyc76xeo/s36U5V0wYkkAALQ0T3FJgL708NFSg7O4r4d9/aEKVU7B31yPXt7OresPGSsK9x/IK65g27iIe/XxduXduvxoQemFcp2BLNs4tBWJxc43Txk0ypzCygZD8dx8erhx647be/QTO3Mar6jgeOn5Sl21kfiCdh63utSXHj5aeEFXbSRLG2c3cVcPe26DywwaZVbOyXN64rl08fHzdL55ruJMXkGpRldFxOU7e4i7utddpi/KO/rX5QY91JZkqDiTV1BarqsisuK38+ghrrvZisLfCjQ1t5dq5dbjdTcqyjv6l8FZ7ONhT1RRmFOgMdxqwa07TqQvytl7+PeKqwzPVezjI7712QLAM8xYofz5x/+WXLlu+Yr/KARWAGhZnl5gNebMHTJNruf5xaQsCeAREemz5obMklc4S9anSD2JiOhs6tzp8XJ1fQzj8MRjFn850ZtHRPrUaUMW5Rhvdsd17hs+f164mEdEmuTw4atUDcbqELHt+3C32uOF3tLUNRJ7asRvcwOkcv3NH1u7Dfviyy8CnO8ci2PvPX7J4nFiHhFdLUye8/mq3ypuXmTvM/mrhaEerRspw75HxJL4cHFrKlg5/OOtmlsnOLUl3dGe69x38pJFEg+Szxo4N6tBFCUiEoZu+HEyrRn+8dZyv4X7lrx5V/HErT2uSZ/18eKsivrixVNSNox2buzmAeCZczOzkoWpSwEAMAGlUnnvBp6eno86RqXmnyoi0mcly0oDwt2INDuTsiqIqLxcayDikrEwMTpermY8Rkonv9OV/9fe9V8m5Hw3b5nHtgVv1k+mCoctiJG0rTi9d2tC8m8J05a4bFs4qC6LcjxCF0z2sSUiYqycm5/ReG9+8eWYdvqCn+JXylNXbB404AvvBmO9pN67eXli3sZlaYO2hAoNeetmrfqtwr5H+IyJg92odPe6hYk5q2atE2+bJubeLGNZtL9F6d5Ny5KPJiakDVszsq5A7wlrxnarbcNvZ1s/fF3707u/iZf9tmq9fPCXQ3uMi/9yWA1pdsXHKzQeo5d80pNLVs5uHCq9u3i/yYvf8yAiIsbBnchYmLY1q4LcQr/8MuxVg+ZUKYmRVgGeGxx7z+AhZQk7Cq4gsAIAPBn/VJQTcXk8g0omOxwm9TyRvKOAeDzS6ysu6onsqSAz7YyBunyyZNowZyJyD59PJcNnybN2HdS/OagusbZu6yHycCMPcQ+Hy5JpqVmpey8MkjjWnmN4zu3a2RMRcW1v+wr/3rg2zmKRmDxYnx/ksspyzc1J5NqxRG01uxLzcio0fxM55qTu0lBrn4gFEX72ROQWsUBXIFmUJ0/LmyT2qRuS79zRQ2zv4axOTVbmaf7WENUGVi7X3rndywwRUWue/a0lCnXteSqZ7GRh+YUKIjeP1308iEpPconIwb2Hjw+PmsBt7ez8cjuGiDhce1si4nJfJDLqC0+X6sU+Yj+3Zn8MAPAMqDy967//u0KEGVYAgCdEp9cbyfktiZs8cXdKlqRiz+4L9kPGvHHgu1R9RQWRvV5dqiFyFt+aEuSJu3bkyPPOl5w30p2RrbW4S0dKzdGUaohqA6uxICE8MIGIiLyjMtYMb3QRQCMMZ3NSd5ZWKFN3XyBy7djJiqg2s2pzklfq6ULe3kNE9j283YkuaM5XEXmIu97s275rDyHlFZdoKuvLoPIT8lQylubsyCPiebzqdnOcnMXDA4mIiPvmkn0L/eoOGzVZG1eVGkrzsguJ49y1+4NNiFbsmjV8FxEROUrW/Cz15rgNGz9s9/zUvG+mhWy2F78VPnmiRGx7n04A4JlQeTr9pwxV5XVL5zfeQWAFAHgiDJXleiLntoMlg3Zn7Vg/6w+Nvn24xI9O/EB6vY6IGIbLJWKr2FvXsAaWiF7kcht5ZMpgqCLiEJepP8Bx9psQ6t2aiMihW5NTknfTH01edJSIiOc+SBod7nFzrMqC1O0FZCSy95GuivazJari1o5sMBLVNjMa9FeJiLlVobFUvnKRnIi49t6jZ0wedKsSj6GTh3lwiYiEHg3G1+TtTM4zEnGcB/3fGqln86eGiYh4npLwAW5cIrLq2I5DRGTv98WWzkGZ//kpLT2rYGf8NC1325fDmhveAcBcVf4v/afM2rQ6IsTHGYEVAOCJYC8bDEQ8K3vvEUEeOxIK1VzvKIkHbzePSKO/TERcUVc3TlbhYXmO3rv2O/BSxd7fjWTfsePds44GlSz1FBGvS1e3m8ecvYdKGn+46p54b0q/HNPV3tHN+Y51BB0itn3tvfvTzxLP/J5XrJe429O/OorbUkHp3r2F4R4iLhEZCjJzzhMJxZ0arkldGj2kbZu2LzvzbuuP69wnVPLmXcNzvKU/L3be+OG0nZqT+Rp9gPMDZG0ibnu/0JHedx50FA+bIB42Uj4reG5Wfl6BcZhfo5skAMAz48r54vOV1y2dfUaE+DhzsCQAAOAJ0VfpibhcK4Zch0n8ZKtPvREaYE9XuQyR4YpOT8QTBo0blDRrV+qsj3TD3mzHPZ+3W15gaC0OG+1zK/hpjyavM7S5UJCVlacx8LwnjvO7tR9WSda6+NLaphbOfuNCvW3rju9esaiwdiLWbVDkGO87EiHXxk0s8qBG8cThMyV7I5KzVq/M6r3AjyeWvO+Ttjgn8fOPzr31hguVHEjPKjXyfN4P8mi4JlXk4dFIbjYUZiyKP1p3Kx5B0mGi+jMcns8k6bCD01J3xicO2jL5QSZZ9cdl8XFZdTcilkx+y16++N97GHHXtjz2fE7BVSJXFzx1BfDssxYFvsd0Mjh3cKz9ywaBFQDgibis1xMRtxWXyH7YwoxhtUeNXC4RVRn0RDzi+f177QLb+PU7smSbsoiI18Fv8tTZoe61TS25VkSVeanf5RFx7Tv4hIZO/uQttwYjVOSly/Jqf8kRO4fUB1aqKFCkFtT+0tMjYsyd85H3xu0SPvmt3dPS5eu3jvGb4OH8zvy1tHrZN5nyrYVExLUXD4uaETm0WZlQk5Mqq+/VRzz5VmAlIp5PxASfvbE5sg2pkrWS5kdMQ1GWrKi+D4PP5EGGcnVJztG8rNph2vpERIV6YHoV4Dnwom37Drd+wj6sANCy1O7D2pxtrR75xQHNZtRXnNezrR3u/I6eyFBZoTdyuTwe78GWej52Br2mXE88B2dTF9IofYXmHz21tnd2fKD1BQDwzMAMKwCAqXF49sLGkxbX1t48nh/i8pwfbLHpU8Wzd+aZx+cEAE/Gi6YuAAAAAADgXhBYAQAAAMCsIbACAAAAgFlDYAUAAAAAs4bACgAAAABmDYEVAAAAAMwaAisAAAAAmDUEVgAAAAAwawisAAAAAGDWEFgBAAAAwKwhsAIAAACAWUNgBQAAAACzhsAKAAAAAGYNgRUAAAAAzBoCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAAADArCGwAgAAAIBZQ2AFAAAAALOGwAoAAAAAZg2BFQAAAADMGgIrAAAAAJg1BFYAAAAAMGsIrAAAAABg1hBYAQAAAMCsIbACAAAAgFlDYAUAAAAAs4bACgAAAABmDYEVAAAAAMwaAisAAAAAmDULUxcAAABwm1atWllbW5u6CgAwI5hhBQAA84K0CgB3QGAFAAAAALOGwAoAAAAAZg2BFQAAAADMGgIrAADcolcmTpMEBgQGTvqh1NS1mAVtTpLiDPvYujM+tp4AWhTsEgAAADdV7N6QTO9vU7xj/5g7NpbKYlMdJk32e7iOdbmblshONUh7lq5BUZ/68h9TdTepM+LW77tQXfsDp/OYeeO0O1bK3ggK6MA8THenZTHZwikRvevr1MkmDZT12LrtI74yQ8n4B4isHqw/tlixPkF2sPSSTcdB4ZHjegsepiiAZxECKwAA1DNWaMrbduzyuNMqEZGm8Gjhw88vWrt3DwhyJ8Oh9dKDXRdO9eHRS+4PFSHvo7xAnsuGznzHnUtEHEcXC9I+Snfnc9P2eX0S0bv+Z37QghRfayEZs2XLkkQ9HjiwMhyu+5CpI9qTMkE6Y8nLvy4PeBIfAoAZwpIAAAC4iSVjqWxyYOCw4YuyDKQvSJ79QWBA/wDJpFVZGiKiiqz48OEBAQEBgbPk+lLZrJDAwICAgIDh0xbFz/pgeGBAQPCkhMN6IjJkLQqRDA8MDAyZLSs0EBGR4UTCR4GBwR8knHzwujgCT39fX//+nduSY4f+vv6+vt2EDKtWLIsMeTcweEJMejFLREXboyPCggODAkMmxMjOsETEFqfHTAgOfDsw5AuZmoiILUqJiZAEBo6KiNtZxBIRaRVfRY6VBAa+O3bdUSIixrVPgL+vr7+vb3+RgNOgBqNa8VVkSFBg8LjopOM6Mqo2TY9RlBERaTPjorcWERFV5a6cvk7Z9AqC4p0rkg/piIiMp5Knjx07QZp0mohY1faYiFHBgaMiV+5V32yszYiRblbV/rpoe3RMhppcfYf6i4SuooD+7Vn9Jd2Df5AAzygEVgAAaIDjFroqIyM15Qs/ylkzL7X1uC2Z+zJiehQuWyTTEF3V/F7pszhToUhdMshKrynhSlYpFJnfhbKZJzwWpGQoto3nyjalaYi4fl9sk6VkZGwJvZq4ab+BiIjbNeLbjIyft0R0eSyFsspVEUvLBi/csjXeX73030lqIvVR+aX+i7Zu2TzPR70yelMRsftWLSh6PT4lLWX15EFCIt2+mIiES0GLN2+eH3Dh64i4HJaM5/IVxZ1nbN6auCysCxGRTiVP2pqUtDVJdkjdcDzlqoilhb1nbdi8ejQjmzI7/R8XR4M89aiOSHcoI0mWmaslogJF2kW+S9PTnhfOHMovY4mIOO0HTYuNnT01qB3p9sZEfk+hy7/b+kXng3NjZPWTugIho9ohVxmJSJ29I5echEREZ7M3LZN+uFYf/lkQVgRAy4HACgAAjTH+nneU6z/Kz55DXJFkmOj3nGP6W2cbTj1ynLt2cTawBiKyF4vdyjUVRqr4LWFWeEjIqM82n9JfrtDf2fljKK9YvlcrFFzKzUjLZXn8M/nKKiJiLPkOfDuB6J0A95KiYiMj9HApykyUHdDynfhElJ+Z7fDe1KEdBIJOkqnDHeSZ+URExOW9JODbCfi1QZPV6S7qdBd1uoYTmEaVXEGDPgvzdBIIA6aEeRzafZjp5eOuOpDPsvm550We/+Qe0pEqJ9fBx7d5OdLSwUUodBXyGcpXyMmNr96flnaKBFanlKr6Jl0G+Rqz5GeI1HJ5pW+QJxERWdk4Ct1fe0kt35WLGVZoObCGFQAAHhWXw6Xa78FbMWQk1piTsPiA88INSzx5eXHDNxMRcYmIah7jmJdYlmEYohoijlfoDKGIQ7dmRK0secQSkejTbZtFSYkJoweuDV29ZZyhirW0rpv/ZKws2St6Iss7+uV7SiZ+KmpsOK5D/aWWVmS4wgr69XfYvC/3EP0hnjiVWSHbryw6wPVdLHzAG2ENVSxxuFRDRPzeY6a4t68/w/EMGkLSXcqhVnL9wHmetf9IEHgOHe05NPDlyMGJ8vG+EsyyQsuAGVYAAGgMp6N3D8PeH7MqjGQ4I0tVdfTpzmvutUaqNjI8u4bt+fY8TWGpgchgMDyW8jp7daZLVr0l74eFvR8WNtq3iYewGHf/cbFbVgdVpstPM6IenYsUcrWRyKiW7y3q3MvzvuNwLYhlWeJ09hKXy3epWCIqU+xWunh68UkYFOSSveLLY6/69/YK6K7aMF/OGTSoQ3Oqt+Ez2nPldRWKPEVslcA3NCzs/bCw9yW9GyRe0XCJIHPJ7HQaMVJE1GBXrPK/tMSzxCNX0GJghhUAABrF9Zk0v3ThwpCBc8m26+BpX0icidT3v4yIiOs9bkLm3MhAGTEMh9vFi0scj6DRXaYtCAwgnv+s777wa3b2bRJ/6MyZuVPHDtzp4MI1kO/CbZ/fnT516dNHr1MLeKy63GlEfCcSdpoz5ejk0W8m80lHPaasGiEgOnfvYTr3769eMiPJZ3OYdG7upAkDM/mMnnUfv3pcByISBL0jWvmV48xeDGMR5HtVdmqUxP2O65Vxga+vJCLi8IOW/9q/9iBHFDSc9+HHAw86MO6fbI0fvfAT5eTRbyY6OHANLwWt+mbcrU6EQRLRygXGuRJXIiLV2pBIBetgZTinIc/PVwU89m29AMzVCzdu3DB1DQAAT0/tykSlUnnvZp6enq1ataqqqnoqRcFtbG1tm9+YvajVEV9g1+Rko06rZS1ua3DfS+4ao271ARHpyrR0c7XrI9JptcYGZVRptVcY/v36Zi+qtTriOwkfTw0AzwgEVgBoWRBYzd8DBVYAaAmwhhUAAAAAzBoCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAAADArCGwAgCAebly5YqpSwAA84IXBwAAgHm5du1aZWWlqasAADOCGVYAAAAAMGsIrAAAAABg1hBYAQAAAMCsIbACAAAAgFlDYAUAAAAAs4ZdAgAAwLy0atXK2tra1FUAgBnBDCsAAJgXpFUAuAMCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAnkul8i8Tch71/aZ6+Zzhk7Zr7t/QmBc/6oME1SMOB3fQKlPSVTpTVwFgBrBLAADA88hYfmJ/gduYR+yF1+eDL9raODd6Tv9bwrK//BeM9iAi4nSVRM3guj/icE3T5W5aIjtlvHXA0jUo6lNfPqvO/j4x7WhRudHBvcfg0PcD3K3qrzidnpiYdrD0kuVLr3oN/2RKgJCablyPLVJsSkw59Mc/1TbC7kGfTBnagXkC96JS5LC9hnjy79vSWJS2VtbeZ6jo/k0BnnMIrAAA0CReB29xE6f0f+UdLe1R/xPXrUdTDR8Ha/fuAUHuZDi0Xnqw68KpPjx6yZ0xFiVNHJtoHTrzk9D2HHX2t3Gjw//4bstEEUO6Q3GjpQe9Pp06L0JIZar/1RA13bgeq0r4cMJOYfjnU8PdGa3qmJ7zBNIqEf0pX7Ge4gM8+Zwn0j3AcwmBFQDgOVKRl7BwkSxfz3W0p0qHcCIi0uyKX7R5b8llsu8evmCOxI2jkS+etf5gBXF5PjO2SH24hqLU+MUJe88auE5BSzZLSv5vVsKR8wYjb/DCbT67gpPFW9YM18tmzUpUVhiMxPMY/EnU5EGuXCLSy/8deJDLdJuy4f/aJoUu4y3aEu5Omv/GL1qz+8RlrlvvsOioUA8eFWyaNG/H73oDSzZdJVGLI17nPfB9cQSe/r5ErH4nFXXo7+vPJyJWIV1fFrT654meDBG5uy931I/4cEVGWMJwbdIymSAyI3akgIiovbvoXo3rZy+1aSs2sqFJ8eM6EBG5txcRERnVilVLN+4pZgVekilRYd34dDF75fzEg6VaPQlEgz6Z+WlvyohZqpXEjxURUdH26GTrT+YMJMWqpRsPFLOOvcfPihranmFPJsWsTfufWk/iid+EEmnS5n+cb8n0mrpmoqc2e92yRHnhJRuxZOasMBGfiC1Kj4/bmKtm2gr0emr/qP9NADwPsIYVAOC5oc9aM2+vk3Tbr4qfvwoV137frU6e94Mh9OuMjNQNkquJq3ZVUHHapgLv+T9npPy8TdqLS8bCxLmbdUEbMhSKjI2fiKmitJAkaxUKRYr0dW5dx0a9poRbe3BJn9KV0YmFRiIi3qDFGakZKTGD7G+WoE5e9GWpT1zGvswt4xjZrHV5BiL9XyXO47cpFPu2RbRJXZlcaLyr8IeiOqqkXv09b06DckSD3hCcysuni/nK4s4B/QXNanzTydz8f/kNuj0eKldFLC3sPWvD5tWjGdmU2elaIl3RQaXD+PVbt64Ms9k1Y+leViBkVDvkKiMRqbN35JKTQLkqYmnZ4IVbtsb7q5f+O0lNxBbukxuDVidt/ebz/gIicvCdGB0bO1si4hRtmh6j9Jz5XdJqiTFRmqAkYpXxESu0AYs2bI6P6CXALCwAESGwAgA8P4yleQU8/2Afew5x/+XswCUi0h/N+11zID5i+PBRkzYXUvnfFSTs4cPNWrY4UX5GTxwi9YEDem/JUGcuEXG59xmCeB4jQ3v8k5d3vvHT+qN5v4uHSTpwiWPvN2Iw92jO7w3iqX1Pn44XSprxDFezVLMs38qy4RGuNZdlDcSyLDGMVfMa12NZlrXiMQ0DolElV9Cgz8I8nQTCgClhHod2H2aJiDg2PAGf7xoQ1JspKlRTl0G+xiz5GSK1XF7pGyQulu/VCgWXcjPSclke/0y+soqIiKwchXy+QMAnIuLyHV2FQqGAOZstV/GEdCwtI/uSlYNWeUxtVMkPMIM+koicBEJPr7tW2QK0UFgSAADw/GgsbxrIK+K7L4c1/Bp+8uYNBXJZ4r9DUkeuXdPrAcfgUF3ue4glnlaW903Ezefe3kW7/w8tedZPpepUqnMuovZkxxVaFxWfIerRjMb1GOHLQs0pVRUJb2XESyzLdai7S8bSigxX2IYFMFYMsUQcz6AhJN2lHGol1w+c58m5lMYyDENUQ8TxCp0hFN0xS2pBdDPEG1iWYep+bB80pVd3PrENBgWAOphhBQB4XnA6evcw7N4i1xiIiAy139p7+nQsSEtW6omI9Ho9ERn1eqO9+K2I2e+3O3G4UC/09m59QJauMRCRwdB075cr/tYTkUaeeqCNWNyWeLw2BnWhxnjbVbwe3m4FqbIzBjJWZO3Ybejh0/GJfaktGDKmV+H6pRnq2hSpVcSty/MKG+ZOTG9JICP7ct3NDaHYKrbJxjeJRgx13vf1qlxt/UVsTWcvcbl8l4olojLFbqWLp1fjj+uLhksEmUtmp9OIkSLidPbqTJesekveDwt7PyxstK/7HemzNY+n116oISKiVzq/xugZsSSstvEQEZ/Tvre4XP4f5W2bWbFq5VE1SwAtF2ZYAQCeG1yfSfMlccs+DlxmIOI6+vtwiRwls6dpFs4fLrtMXJs+UxIWDPon7d9RSSUGojYdw6L68Di8T/4vPDbu48A1BmrTZ8bG9xrv26jPWflhwDw917FH2JxwMYeob5gkfdaHQxK57uFfrarfIkAYOn+qZl5U4PpKrrNP2Pwp3o9xSvVOgqELV16YHT164FoHByo/Z+wcvmKZREhEjOfnq6LmSycMTua7ONDFcrZXbMaiphrX47hPXL5QP3NGoB/fxYGr/7tc9HnKaunc3EkTBmbyGT3rPn71uA5EZxurRBgkEa1cYJwrcSUi/tCZM3Onjh2408GFayDfhds+97ytsesgiWj0jLeDXRih5JvVUxYciowaOJDnwjPqX5uWEuvPD5g1N3dKZOBgB4fW+nNV7lEcYvetmDCTnZu1eij2t4KW6oUbN26YugYAgKdHp9MRkVKpvHczT0/PVq1aVVVVPZWizJ6xYFX9JgBPga2t7QO1Zy9qdcQX2N31PXqVVq1liS8QNjjVZOObDbRqbRUxAqHg5pauZVqyE/Af8Gv6ew/EarWstYBfNwSr0+pYa4HA6vbLaxh+3bCsTkf8B60A4DmCGVYAAHi2MXYCQaMnrARC12Y3vtlAIBTefoTvdO8rHrCqulEEDeInw7+r7e2XM3zMrULLhsD6AIqLixMTEw0Gg7v7U5lkuCeWZRmG8fDw8PPzM3UtAPC844gn/7jF1EUAQMuFwNpcUVFRpaWltra2bdq0uXr1qqnLIYPB8Oeff+7du3fFihVSqbRv376mrggAAADgiUBgbZbFixefP39++fLlbdq0MXUtd0pJSfnuu++uX7/ev39/U9cCAAAA8PghsN7ff/7znyNHjmzYsOHFF81xF7Dhw4dfu3bt66+/RmAFAACA55I5JjBzc+jQoW7duplnWq01atSo69evHzx40NSFAAAAADx+5hvCzMcff/zx1ltvNXKi5qmX0rRXXnnlzz//NHUVAACPwZUrV0xdAgCYFywJaBbuXe/XPrvu3cEHJYe2jLF52D4v7Jcdd5EEtL/96JUja+d9/ztLRGTZ86O4D7rWHS9OW7jT8uPPAxyb6M3Ozg5b6gLA8+HatWuVlZWmrgIAzAhmWB+S6wff7I5/76HTKhG7b9OMnwruetPepeOKI/T6SIlk5Pvv9XElIqq5cGRT1ODhE9bsOoG/vwEAAKAFQmB9SOyx7xf+eIKICpOiPhw+2M/Xb/AHc2QqlojYvG8nh7472Ndv8PAJy9PPskRUc+LbyIWKCiIiyl3+4Vf7WCKi6n2rPhgVOipqR4M3/V26dPVfXQf169+/X8+eHWyIiK6cPvx319mxH7liNhwAAABaJISgh1StOaE45UFEZ4+kXvLfvjPU6ez2yJHTv+35y2e2RfvTLCRZKYPo1E9zPx81xyY7rk9l4f4jxBIRsX//vu9E7atXLLuNiokbyLS2b/AmlvK/LxTKo2f9/brfe+8P8WCIyKb/Z/8myptjgpsEAAAAMAOYYX0MGBsnG3vHriODuhUfL6x9EsvaxtXe0bXfZ7PDLBU7DzV1oc2/XMVmMkwAACAASURBVF3buzo2XFjQXfrj97FjetoeXjZs2FcnnnTlAAAAAOYPM6yPTxsbSyNV337M1tr26hWWiCELYu/aVeCuFaxE1o4eYkcPcc/+LmW9Z6adiOzaFb9FAAAA0LJhhvXJqGZZIqo5K//1dKeeHmTh5Op4+nj+pQYtGJs2zCVN2Z0X1ofaC2f/rLS2tUFaBQAAgBYPgeiJqM6K9n1jjWXNperO0m/CXIlozLT3fpraq/daV8sLhYzfe0TUf+R7Cz8e3FvmZNN3we75tS+pYtOkvgtPOTlR2el/XD9eM8b1nqMAAAAAtAQvYPPO+3rvvfdmz57t6trc9Hjpxw89sySn4/tfqrF0tGFunahhL1y4xNg42ljXH2EvXbhENrc1IlZztqymtdMrjg0P3tfXX3/t7u4eEhLyIBcBtEQ6nY6IlErlvZt5enq2atWqqqrqqRQFAAD3ghnWJ4Wxtrlzk38LxtH59mOMjeNdbwJgnJsdjQEAAABaAATWZjEYDM1vbDPqu5IRT66WxlVWVr7wwgtPe1QAAACAJw8PXd1fhw4ddu3a9WDXPPV/CPz555+vvPLK0x4VAAAA4MlDYL2/119//fjx49evXzd1IU3atm0bEfXp08fUhQAAAAA8fgis9/fuu+/27Nlz6tSper3e1LU0YseOHUePHp00aZKpCwEAAAB4IrBLQHPNmDHj7NmzdnZ21tbWzd8x4MkxGAwXLlz466+/qqurpVJp3759TV0RwLMBuwQAADxzEFgfQHFxcWJiosFgcHd3f5R+NBpNenr6hAkTHqUTlmUZhvHw8PDz83uUfgBaGgRWAIBnDnYJeADt27ePiYl59H6+//57Lpf7iIEVAAAAoIXAGlYTEIlE165dM3UVAAAAAM8GBFYAAAAAMGsIrCZQ+6SUqasAAAAAeDZgDasJvPzyy5aWlqauAgCeLIMyOX5nqcfILyQijXz15jxrv4hxPvZEZCyQLf6p0OM96UixRrEqOUtTftXAbe3gLPIZ9o6fG48qfktIyGK9x04eJCQylqYu33TSeVhkH03CD3m6hgNwOr0XFdrxL3niD5l5f+kZ27Yeb4755C0PronuFwDgycEMKwDAE8Gq81J3ZeapDWSsOJ2dmvrNrM+/KTAQkfF8nlyeeew8EWly0lL/m1N6Ua/544BszawPPk0sNJK+8EBq+u4T5bXdaE4q5KkHSw3687+fOnmy4GjWLrn8v0dPFpw8qdJUXMqJnzw3MauUcbDnVp7MKWSRVgHguYQZVgCAp8NQ+N28+M7fffH67Yc5XUOXrpE46uWzh8/N2nugNNy/0as9Izb8GEHGrLkDZ8nFERtWDbMnIlXC+kriDZi8OMaP9+RvAADAVDDDCgDwVLj6DRPrUxcvlJ+/4wRLNQbDhd9LNCwRw7R+kD7dxN6OpFfMDflobsKuggrjYywXAMCMILACADwV3I6SBTP8KGvZ4lRNw2RpLIgf3r//sEmJKnIbGjbY+UH6bO0jTVgjHd6VOStPnP9xyOeyUmRWAHgeYUkAAMBTwnUcNHvGbyGz5IVEtxabcpz9xki8nR2cO/fw6WBPRGxrhsigu2wg4pLhso4lYrhNLk519JZEeUsiC5OlH606KttdLIno8DTuBQDgaUJgNQGVSlVTU2PqKgDABHh+M2YEHJ2lqGhwzNl7RKjEscHPYrEzFWQlxCdXdmEPJecYyKNL18aXqBryklcc5Xbu6MCUn64kIh7vgVYUAAA8IxBYTUAkEllY4JMHaJl4fpERPrmL8u7RpEv4/Iml8zamropNJeK6vSmNHu3WeMu/CnOyk/JSDEREXHvv8ZODhI+/YgAAk3vhxo0bpq6hxcnLy5s4ceKRI0dMXQhAS6TT6YhIqVTeu5mnp2erVq2qqqqeSlGNMegryvXEc7a/3/P/+gsavYHhOdrzsKkVADynMM8HAGCWuDx752btVcVzbF47AIBnFnYJAAAAAACzhsAKAAAAAGYNgRUAAAAAzBrWsAJAS+Tp6WnqEgAAoLkww2oChYWFpnz0GAAAAOCZgsBqAh4eHlZWVqauAgAAAODZgMAKAAAAAGYNgRUAAAAAzBoCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAAADArCGwPmuqq6ur79fk8n1aVF+urK65vdM7L7n7yO1qqivvNwoAAADAY4HAal4q9y4M7CPdU9P4yV+i3uzeZ8AAr/bdP00pu3W87JdPu7ef8AsR0eX85cO6+wT4dO4n/aWssT5q8pcP8xkw7O3uou6Tfi4jopKtH/mIfQb4d27fL3pPJRERnd4Y1rP7AP/u3T9ILmmsksqfJ/n0GzDCv3vnt5fnI7UCAADAE4bAaj6q98x98+2vcs6XNZUBbbtP+DYnPycn/1ufvWuT/qg7qlr92Vq1HVVXE1HJ99FJr8bnHDyWMTI/Ou5gI31YeE1PycnZk3Pq+wE5q5NKiNr1m5105FjOkWPxLyevTaskqkxZsJyicnKO5EirYxemNVKMbdCanIM5/z2SM91y7dq9j+fmAQAAAJqCwGoCKpWqTZs2dx229InKyNk+XmRZf+DyL9KR0XsqqVIhHfHFnmoip1fbWRLR5cqLFk7tHIiISLlQevDtNZ/XXlSdv1/V3d/Hkqid/wDLIzkqopLVYR8lllRXqzZ+ELaxlIiILKi6smTPXpWlt1dbIhK2a2dJRNXnKy3bCi2p5tieI+0G+NsS2Q4IaJezP//uMsiCqKa67OQvB7VefURP+tMCAACAls7C1AW0RCKR6PLly3cft2xjSQ2/gm/ztjR8T9hbPldb+8RuH1CXY6tVayfEVk/bMdyWiErWLjg2/Msd7UomERFR5cXLlpZtLImI2ti2vlxSWUN9Po72+iBswDfUdtzGJDciIqrJ3/ipdOORi90XtavPxpUHY8I2CmN/8bekmsqLl23t2hAR2baxvFh5sdEyynZFj4/7pYQ3frztY/94AAAAAG6DGVazZmlBRNVkYVn3L4uako0fhP3Sb2NSeDsior3Ll5eQ6pvo6MRj5wt2rP3tBbs29Y9kXa682sbO1qKuk+oasrS0rIunFl6fJf/32MHoq3Ojk8uJqDr/q7DPVKE7vh7uRERka9em+mI1EVHl5Wo7W7tGyiByCorP2H9qR78dn32Z/5Q+CwAAAGipEFjN2OVfYtdZzk47lvJ59dpFe6qJVKs/2iiMT/rcqy56ij9LWi0d8XbggJ5t7dp293nVxqtnu2P7D1YTlR3MqfTq046oZEPsnoCNOfs3+uyKXlvaoHNLy9ZUebWGqnNjP0vz2bhpfLvaMGrR3cdLdXB/NVFlzv4Srz6iu8to2Md9dyQAAAAAeEQv3Lhxw9Q1tDh5eXkTJ048cuRII+dqUsLEe8YXrBnQyGKNkrUDusdqRe3aEBF1j/plTXD99/F7J3VOHHBqy3Cq3BM9fNIey7bVWifpT0mhbnf1od4Y9l5yZVvL6jMllqEbk/7dp2Ru9wHfk8jJkohaB8T+d8GAauXyEe8nUVs632Z80vbPRHdVcnDum9PzbNvWlKgudo/+6dtGRgEwYzqdrpktW7VqVVVV9USLAQCA5kBgNYF7BdbHoLpSXWnp5GTZ1Prk6sqysovk0M7p7ue+GrYpJ1uhrWVT58tKztfYtW26AYDZQmAFAHjm4KGr54+lrdDp3ued3O73qJSlrZPwnued2rV74MIAAAAAHgbWsAIAAACAWcMMKwAAmJdWrVpZW1ubugoAMCOYYQUAAPOCtAoAd0BgBQAAAACzhsBqAoWFhXj0GAAAAKCZEFhNwMPDw8rKytRVAAAAADwbEFgBAFq2M4kfSOJzDKYuwxxolekZSq3R1GUAwF0QWAEAWjbXwTP+LRFzH6EHjTx+carm5o9ZcwP8AwIDAwMDh89V6OsOGjXy+SHDvyx4pFKfsNyvI6XTI1ceepQ+WO1xhSxDqX1cNQEAEWFbKwCAlo7rLO7xaD1cKsk7RcOMRBwiIv1Vg/27X26bIr553qCSzY1NLq1hqdejDfSEeb0/N8qVfB/p09CmLYqM00/8eYingPO46gIAzLACALRw6uSPg+PzjGTIWfVBcGBAQED/ISGzfigwEBGVymaFBA4JCAgIGP5ZvPysgcggnxUwS2EgIrogmzRsUU7tF+hnkz4PDgyUxOcYSV95mcfjNRyBa99VsuC7JW87P0qZ7PFN0gkhwUGBwWGRcZlqIiIqkk0PCX47MPhDadJx3W1tPpSu3LwuekJI8Lshkauy6+Y7y7LXTR8bHBQ89osklY6IdNmrpHGKW5OhWuVu+a7d+Vqis7LoD4MD3+zXz29gyNR1uReJjKpNkSHBg/v16zcwcFyM7KSOiOj4urGjQuL2sWRUrZsQErIkm63t6JxMOio4ZKZM/Sg3DAANILACAAAREftP6fnOMzIUin3fSi7/kJB6gcio15RwJWsVCkXKkj6lK6MTC5ta3+ka9tXPGRkyqQ+HDEbS7PgscNTHczflVNSedfTwduc1cWWzyys6tI8jWb1l6+rJrx6bHyPTEpFw0Ixvtv2cEh/Iro9JKrrVZnN8ICtLUIqmrf5m5RhmZ8z6Q0RUtGl6jNJz5ndJqyXGRGmCkogllqUGd8SWFSlPFmmvEpWrsg+pWPcgSX+H4syV0V8ridSnDihVHE/JsN48VVL0x7PTLxJdOnfquKroIhFdOFegVJ3V1gVWxqG9uPtr7QXMI94zABAZdWWlJaUIrAAAcAuXiITePYSaUk3DwzyPkaE9/snLO3//HtzGrMnIyNi2KIy/d968FM39L2g+xkZoxxf2GurrUqT6k4gYtvxQ2vak7IsMv7y87mEpxkZoJ3AfMrizkWvTTiBwDRjsqVOf1dHZbLmKJ6RjaRnZl6wctMpjahL4zlgdNUTQ1GhC//Ap86YGCUhd+L/aaVjGY9iUGbHfzPBlLu7bncM2WaeD36fRc+ZE+DbZNQA02/VrV6qu6BFYTUClUtXU1Ji6CgCApvB4DNEdk6kcsqz/pYG9PatZEF2/qwt3v9DBbr8XlD6B8vh8a6Ia0u2NHj1dds6Cz797JtOa4ZKBWCJiuAzDGlkysCzDkJGohqh90JT3+/CbNZYlY0V0+1/YDI9Htz4Elox3fRrGprMsADygVvbunbuI8dCVCYhEIgsLfPIA8Ey4XPG3ntx5GnnqgTbioLZcpr397wcPVrw1yP5mE2ser+JEqZ48rAwGLqvXkL0zj66W5hzTtO32SOtW701b8D/qP2/KaE/SsunfF9+n9SudX2PSGbEkrMeteKs7k3vOtreo2ROhrDo3PfPSqa37WE57TzGfOC4uHMrPTFLYUf1mWHxHPkMnD6ZlZl/gOXr1FzUvFgPAfSA2AQBA04z6nJUfBszTcx17hM0JF3OIRkqHzV0UMnAZMVyeu8SBiJwHhw3KXPZewLI2PSJXj6tYPEtWqDdweG6+EQtGuz250tzfkrh/EjnwhLvQqD5Fvpb3bs34TllwKDJq4ECeC8+of21aSqy/OmlGRLrvtozPRc0dUiWTTmWJI+j92bywDkQkmRoqn5G8MnIfEZGAb8MQv//YcaKj6zZNj9hkJ0nIivXFOlaAx+GFGzdumLqGFicvL2/ixIlHjhwxdSEALZFOp2tmy1atWrX0tygbC1aFLuMt2hLu/lSHtbW1bXZbVqfVsRZ8gV0zgyGr0+pYa4Gg9m2DVTodw+c3Z/8po0LaM1LRf/X+Bb2IuX0RQpVOe4UYfoODRlar1TF2gkbWKgDAQ8EMKwAAPLsYvuCBHm26vb0V/0G/sufffYUVX3DHy7Y5jMAJD1wBPE4IrAAA0ASOePKPW0xdhHngBMQfOW3qIgBaLuwS8FSVlpaaugQAAHgonLpXeQHA04fA+rTNnz//r7/+qq6uLi0tDQ8PN3U5AAAAAOYOgfWpcnNzy8rKyszMfOGFF/z9/X19fU1dEQAAAIC5wy4BT1tWVpa/vz8Rubm5lZSUmLocgBYHuwSYvwfZJQAAWgQ8dPW0+fn5iUQilUo1b948U9cCAGCmLl+5YuoSAMCMtNDA+p///EetVnO5XJOM3rdv38uXLxsMhm+++ebpj15UVGRnZ9e3b9++ffs+/dEBAJqjjbW1qUsAADPS4pYEpKSkrFq99kYr3pVrrciitanLMQWD7oaRtbe1frmtw4L/m+Pi4mLqggCeKiwJMH9YEgAAd2hZgTU1NfXrDVvKr7cla6GpazED2jwX2+tLFsWKRM1+LSHAsw+B1fwhsALAHVrQLgE1NTXLv1pZft0ZabWOwPtc+dUdKT+bug4AAACAe2lBgTUtLe06pw1Z4xvwBuw679q1q6amxtR1AACYL51Wq9Oxpq4CoEVrQYG1qKj46vUWuWj1HiwFnFatz507Z+o6AADugy3OTj+kfdqjqtOlg7u9PnDgwIH9gr9SEak3hXXr9FqnTt36xeQ8RHda1V7lHffAqpXZmQpFjkp7KxLr1IcUisxsZXGD5SuHYgZ269TptU7dXpcqjA2uPpl7W7PGsGWq7Mx0xb6GQ9RerMo9pFTfvJrVqvYpFHuV6nsvhNGplXsVCkVu0cV7D8Gqj2crMrNVZffO+qz2dLYiU5F9sv5SozLu7W6dXuvU6bVukZn4dwLUaUG7BFy/cYNaNfrYKdPR3+f9N1za8S3YinOZKfu2n3mYPyFcJ48xkq59XdpQxdntW/dnnqshS9dxH3XvZstwjaxGc+7AnuN7/jK/P3svvPDCCy+YuggAgPvQ7lu3tHjq0F6CpzmoOnNjuloUtXvbuPqlZOM2HZbkxAROyn6Yv81PJkun/zH1yOqA+le8qndGjv5CoXtJwPyjZV3DViXN8eWrZZNGR+9jBS+RVku9Z2/d/L57bWO2xn3clq1TuvAZDhERe7Fo3/cxC77O1Q1ZfXxFQFNj6g7FjZ6wSW0lYKq0rEvYqqQ5vnZERHQxO+bDCNk5gWTT/lgfoiplXOiHm/7kCyy0Wp5k9bbYgEY/6TObxn4Ql2sUCGq0WhJNTNw2pRvT2BBaxRejpTt1/JdYrU44blNKVA+m0eqy54+evFXNODFsmY4/JD5lxVABxzPq58NTypIi3l5JxsYughapBc2wNsGi55jR64PtNfv2L92g+CHvEvuwfz4cXnN1OXdixbd7d1Z3nBvR3ZlDZPmvnn1szv33yA9ZRSUvvjL1/96f0wcbtQAAPCTdvqVjx42NWJatI9Id3RQ9ITjw3RBpQq6OiEitWBI5VhIcGBQ89oskpY6ISLt3ZeSowMCg4MjvVURERrXiq8iQoMDgcdFJx3VERLrcTTMjQt4NDBwVl/23ctMXMeln6wczFm2aGDz2u2IyFidPDAwMCo5R6IiIGIZvdXv2MmqzV0UGD+7Xzy9w7FyZqoqISHcySRo2sF+/foEfxqQXExHpFDHBkzYVVR1a+m5goCRacZGItKpC6hwa/+v+/b+uGcovlqXlEZ3NVpzlD12UsX9/SlQPXe4OeVGDoRiGz9QOzqZL/YYvPeng7nTvz4zN//Ug22Xid1n796+W1A1BRKRVxEZn24mE9dFZl7ku6Uz7KbL9+zNifS/Kvk4parQ79alzjPfEhF/3798d60uqtHRV40OcTft6p9ZrXsb+rJQpHYqSEtIanxuvOnXqojBoQcrhrP3fjXXXKmTZF+vuk7FqNOBCy9WCZlgb59R93JsWOxf95+szNUREZ84REVn+653R/UZ6OXArz+2S7dl04grZe0yd8HpPxzY8qi45mrN0ayHj99ZUh1MzfjxrIOo6LPid6n0xckUMERGdPnj20w9tXF4kDREZq0tPFB24QgcOnthzTpI0pn9m3q4j10x3vwAAzyx+j7A5k3szVgJ+mSxiurzz8m+2upxa+cmMOI9fY/uX5+8r7jxv8yftdfIlEdKvX/t1RvX62KyXv9y6uhNpdQwRKVdFLC0Mjd8w16FgfeSU2TY/rR5adUp+iDd+29ZeHGKsi0+xbPXNKQuOMGjaIsfvI6U7hCNiZ/pxiOfCb7Qq9fYZkxPUQXHfhDvIZ0+MjrR69ddZDvJvk1X80G9+eDlpXOSCVb0DVgTwvUM/9c+N3C4YETvTj6ntTBAwY3WAUatSpB/MVOmc+vuJiZzCEn4JIyL25Dr5GRIGejX+mDATsHB/AN+6eGVQev69PjPGd/bPvkRk1CoycrVWXl4eRETajAULlL3mznt5xURZbbti1Sn2Jd9XDbnZfws6u1Cuqpgl97szo/CdOQnvsOqjCtkBxSmjcFC/9o0Owaryi0k4zqk8+xC92lnA7lMVGUnAuas7q94TV/Smi0XZGTJFrprvHerV+GcM0OJnWLkeLp0qi/aUNnzqyKLnmOBPXzm3YtHW6F9r3pr01kgBEc+pZ7sr2+O//2TlYf3rb099gzmnpXZveHRrRcT5V9++LlRxqe5qjs1bA911h08dvyuVag4UnuC59Hzl6dwZAMBzx1ro7ioUChjdIUWulZBXKE/bq2ZeonxlMRERcXkvCfgCd8kQT13hH1qOsL1refYWmeJPEggYMqrkChr0WZink0AYMCXM49DuwywREceGJ+DzBXzGynPi8lhJ+5uDMYIOIpEDQxa8V7uIRF1EQrtGa9Ip9+WzwkGSISL3XiMGiUidd7CIhEHSORN7sVkpuWoj6bQXdERk595ewK3rrZOQfzO9scqNC2bH/arzChzmWf8tvHZvXMi4lee6RK2e0buJmUaGz2/2HGSVKmnq6EgFI1kUG+ZKpJZFLykeFDM3wPFWk0tXWOIyf/0nZkZiETFE7KWm1zxosxNmxyTkMj4jAkT1AfOOIVgDEZcKN86YL/uLw1AVe68VFKrkmDlxsr/bBw3vLbw71AIQEQIrteIw14zs9YZHXPy70n9/PnKk7NLv+/btPOfq78UQERmryy9c0ZQW7lJVt3vFwaA69Ru5+rtbkHPHN6yL9vyvhoiIYzNgwnvjWx2O/vGc4e6xDDXsNYs2rVr8rDYAwKNhq1jiWlINUQ25+H8S3t/httPWlgyxRMKwdVvneJfLJgUGLszW0SWW5fLqMh5jaUWGK4/noYJqI0scYoiIGMaCqIZIp5g9euyCfQaBSCTkNWx71/8ZjCzLCYjff/x4kuTSZqn0ezURaTOkoyclUeDqrRvGiaweub4q5bqPP4wpcI/asjV2iJCIcjeuzK4h9dbZkXFytVGXu1aadJoc7fikv+T+ecbhuO6snhi+oKm5TrZKEPbN4eO/rfItXhmxIF3X2BCMnYAhPdsz/vDuKPcqHdnxbZoqj9Wxveb8euzwr5/ayL6YvP7kI98vPKdafGDVXiq3t293298IFkyrGsPV2jnXGr2BmNsjpqG6hjjEvXYu8yj17ePStW97Xv6pI9VExPQcEzyz7f+i4w//3tiX/ly3f7V7sby0DHtIAQA8MD7fhtXWPdEu8PIS6qtfHhoW9n5Y2Pthkh5NPInFCHuPjkpYF8pNT8unzl7icvkuFUtEZYrdShfPO79+1hUdVWmb8xSDDZ9POq1ayxqJiO/VxZ3OHcsqZkl7KFdFgq6dhX//UfQPufiMCBKRtpyopm5+keFyqUZ/QUtkZFlj3ePwr4fFZRer1We1emLZGqKzSTPmpGs9x306VFCuVCqPq5vYAoDVlanV6gs6IxF7Qa1W62of7WeVSfOj43beXIHK5i6TrlTyh34a3p0tVh5VFmlZof8nUeNH9Orm5eUh4BM5uHsJ21D7Xr0FVw6lZRapD6Rlq/m9fLxqr1dnroyeuym3rgg2+4vXu/lFrDuqVp9TX6giYqsbHYLEvXtZq7PTs9XFirRDOkGvXiJOY+Wd3RTS/fXAOelFanXRuUsssSyesoImtPTAalCdyqxwHR/q4Vz7Z4nD8LhlJ/5s84aPK4+InF4b0O7SicIrjV1ac2LP/8pf95/ZhzJ/PWsg4r7WP6pHxdIvD56obqQ1z63zzI+7MQfyd1U+wdsBAHhe8ftJ+hfHBQYFB364TtUhfOFI/dJhA4MlwYFvR8oafaKHzY4JCgwJCwmeuIPeCfLi8IdK5762Z8LAtwMHvrfCMHbRuA63t9fKY8ZNWHmoGaV0GjHxnfanFvbr1jV43Rly/2hhlE/5+ndf7+Y3W9lhXGykL9PeL8ibr4of+PrE3F5jA/gnV0auUhGRMCAswCk/5s1Onbr2izlAxPEMnz1O9HdSxNsDA6ensb2mzBwpZE/m5lcRe3RdZFhISFhIyMfr8xvNcMbcuGEDBw6OSDpL7N6YgQMHzkjXERGdTFu/PZfc3OubFR/KU5NRnT53bEhYSEhYyOydWmH/sHEfjRv30bhx73rxOfxXh4T5ConpP2VuqMO+LwIHTpDR0Lkzh9ameW3u9k1pZXz3umzP+H46V/LKqZVhAwdKYvZZBURNC+I3NgTZDZ05ayjtiBj4tnSfQ1hspG/d1PYd5blKZn3uSxnSwIGBEd+Xe4bOCe/WjM8fWqQW9GrWuGXx239VkY3HHce5L3ed+Wn/AfY15foaxtbynOz7iSdcY6UDenKqydqiWJ4W9eNZvVv/pJnWX0/edeCaRc/wsVHXd4V9d85A1u/8++OZnH2S2GMaoo6jxiYNs624UkNEZCz/cem2TZXdV67y71rNGjgW3OrKI/tyVqQUaszsiSur8j0/bN7wyitYWgstBV7Nav6afDWrkdX9o6OXBHULQI06rZZlBAJ+UwsfjaxOqyO+gN/gazRdmZbsBI2u/2Qv6siu+StDb1el09Xctq70Vm8sS0yD41UsMQxzq2ZWp9WxjKDJ7+BrHYrpNy5X8mPGlC73alX0dfDoPEnKprCHfKNjlU5H/FsfV5VC6rfUZnXGnF63fSrsRa2O+AK7+31URp1Ox/AbNGu8vNrfxzt+U7RJY/3ieHGHVwdiuwAgQmC9iSewd7YmfcUljb72K3sLeycbRl+haXR29QFYcHkc5hqrb2za1RwgsEJLg8BqLcWs5wAAIABJREFU/poMrC3ZoZh+H8qok8jd0XfquomejWd0rWziWNXolDn9H1PIOxoXvNZl9UPH3zs1rzxjUdL0mLTz6uLj2l7LEVihDgJrS4fACi0NAqv5Q2BthK4oV6lmichC6OXj/lzv/qQrOpqvvkJEjKOot+ipvikCzBeeWAcAADB7fPfe/d3v3+x5wHfv4dtCbhWar6U/dAUAAAAAZg6BFQAAAADMGgIrAAAAAJi1FrSGtebaNbpaRtfNbFspU7vG4pkSAAAAMGstKLBaWHD6dGsvFotNXYh5+fHHc6YuAQDgTpevPOqeggDwPGlBgbVVq1a9e/cOCwszdSHmJTMz09QlAADcqY21talLAAAz0rLWsL7wwgumLgEAAAAAHkzLCqwAAAAA8MxBYH0ySn9JyTPly1hVaSkqc30ZLAAAAMADafGBtVq18YPubXmt20/7Zc/iN30mpJSpk8PEPtG5RERUU11d8xCdlmxctIfcLOt+KleVVDY4WVNddlJV1liarDy5J2Vryp7TlY2cI6ouy//lp5RffitpNIjecbad3bHlq1UPUToAQItjfIpXAcBDaemBtVqxXPqT5fiUnIwon7ZuXt1FbS2rz5eUVlYT0cmFPnY+y08/cJ9l3y/PD5AOdyCi6pJdC0f08YneW3eqcu/CEW+HxW7PKbk7lFYfTErOv1hdsvH9txcq7+61MmfrDtXli/lfjhixruS+Zy37Rodqlz9E8QAAzz31IVl2cf0Px+MC+0qz2ftfxRZnpx/S1v1gzI72Gxh3/OFr0J2UxUSODfkwMm5nUTMGfwSsOvvb6MgPQ0LGRcZsV+mIiEi9My56plQ6UyqdGR23s4iI6HRS/RFp9CqF9p5dAjx9LT6wVl6srin5ZVl09FZVdfn5Eu3F287WlGyc+GbgtJQyKtuz7KM3+3T3GRn9i5qocs/yDwJ9+nT3Cd145zRmWUrsQZ/o95yIiC7n71G2GxEkqjtVmSKNOf/Z9h1rFozv40RElfm/qaqJqDz/4Olqsuzz2aLp48OnT/Knkr+q7zxLtgM+j50ePn56ePeLJWVEVJZ7sKSa/p+9e49r4kr/B/50SZlRIQYlbdC0ElBrqK6Atkq+9UIqVkF0BWqVVItgvSFYJWIr0a4SXUXwAlJFDVrdoKuAW1G0oFzbAFUJijW2gsE2Sn4GSwxUZ9jQ/v4AFbmJVg2B5/36/uHMnDnzzPT7cj+eOTkDRm3Bd+qWRwFI4Sqfyi3x6mcZIUYIoS6MLvl3VLLqQUocvjDh8JpxxJNP0+Xt3HSyvHHDYlz4v/cvHP6sJehORHwqSb4CbKokcVVAZJ6hZRPD9Vz5xpDIb54uOrY8S3cyckVcro7J7q3Nk6+ZH5VHA0B5UXJyZkm5RqPRaH6trgMAw095ad/klVRoNBqNRnv3xWZohJ5eN1rWqk2kk3CWyHUQp/pIVlaZ8PFjNi6e/r7O/FeOhomitEGxkh4Hg4NWuarWqFOOFtutifF1duE9foLyK7nd/BROw4aVW9AXbso1O04CAACVnXJSp70/3yveSiiOCnOzheoMsShdaFei4G2QuwGANl2yWJqq95KvIwGo5kdBnRS6JDoTgtPcAAB08iWBWUJSUey+w+29FkcBgOUpdhFJM4N2TCYBIYTMHl1+ete2QznXfwNrO0fv0GgRX5UYnvaGJNzDBuDCzpAil+jFo0GxfcGWDB1tyR27ePVyD66FJnPjJvl5jY4G9jC/zz4XOTMBgC7atSAghXCctnq1QJX01W3fDX66+NBdJQ05jXCavTncXZccFiFX1QCb77d8jWg4EwAMeZsCAnsT/LmbV3AzZEnk/NVTuGC4KI/anqzUEQ4eQSuDPbgWUH5EEvXNjxpDnbXdaL8V4X6DCM3JqG2asWsWjGYCAIDuTHJeNds7JkE6NDNkvDgtNS987BRm413qSs4kJx9JSSvSQH9n78EGAPaDJ2AoORS1LamwXE8Tffvzfb+Mm/1gQKTts9g+m1PfJbhcAhSSMYHJmhsGAGZNDQ18/zh5IPdh19V3aWLUZ/vipvR84f8hEXoG3X2EFQCA5Aln+/u/x2vlEIPl6h3kP5l/vaBAS11J2iCRK/V6laoSABg2PHd//8n8ZmHQxV+oTspqdRaqXqt3CtwrP3Iy/p2ssJ1KAJZwcRB5VKoYEhzkTAIAcDylifJo5yzJHnUrR4HnHyWXr7KTrUnSAnAmh3nW7pBWCMQf8loeBQCglLJzTiJ3TKsIoa6AzotcsPmKy4Lo/Ylx3pBZeIMGuH39wpXbNAAArb1SpKoCAGKo/+bE1NTElY7fr9uWRUN9lTLvutOK3YcSo6fQ+8RfNc64cpqxWvrl6oXuXLj3a/H56zogXGatln4pXTmNq7lu6TCICcCduGL34WOp0V70rkh5w8gqc4Ro9ZfS1QGjmfU1188X/3ofQHciYmkyMStu/57PXVSbQuJVAKC5kHF37IZDB/Z/KdBslySWA4CRppsMWpb/XE5bOPIHEdDLcWB/oMvLdQ9mxGoOhswM2572m3P412d+OHtY6uP48CzdyYj5a5PLmaO9fZyJayVFl28/PNTOWQBMLpeA+nL5/gwd4Tz2/9gAutu3aVDtCwkURx0qabh0ja4K6KJtcxeItyarWhnwRcjEMLC2wCB7GKsrf9UDSZJGvfqaVq+nOHYc0so1eGfKyfzi4qNh/HZOHxIcxk6Kzm7ll1Esrl31r2oAYHFYUEMBqOPDUwRHVdFW8ZKjDyIuiyf04FWq1K0fJVl8b6HdtSuVRlBuFitnKYqXVUrXZFEtjgKAMi76fmCYG+ZVhFCXUHg613bW6kCBI5vN7t2z7Vf4DLo8O1l+8gr0pquqGpKXpXUfNpPt6DfJ2fBzmQ4AgLDuw+UO4LKZj84j2Fwus1yeoBy3ab0fFwAIuqoo7Yg8t5pgVlU1BspeXMcBXC770dUNirQiftBSDy6b4xy4cGJNZoaqHgAIkmnLtGHzp3k4qsuv1wN32uq40NEPr9aQXgkGgIWlpQVAPdQ9OMQe7ScScOHaiaiV4oityYU3HuZcuuh0noEYt3SHNHzxBw6Pvx9t+6yGU8vlSwIivyc81kUHOgAA0/Wj8KWzJ3J/L0pc+0lArAoArP9v7tJQv9F9buftlcxclKjp4H8VhF4WnBLQAlfgOQYk66KV50Qij2ixj51sSIQiP0aS6SMZ4yQG0i2qWDG9vQ74IcE95ksL3KTNwiLpESzaE+w13wlUELTPDQCCE+XAAHBOEQJAmWzeqiywotQqEO0UAJCPHTUqY+ZIi61ISqUmF8tdGABfpOwFANiR4t3a0asx8bqg6PcwriKEuoi6ezTZq1lOJQAo+rFsVp44NyDNca5oFLPKCNBsEn8vkoB2JmfqTqyNLJsW9/UoJgAYsiWzNuq8Azz6tzu9lb5HA0E0NiEI67rHy+lJWrd2xdc4bADdzSoApq7qDoCDra3Fg1sa6rc60S9cU5h2VC5PjYyEt08uaxgkoSmaBsKazWxxX+2dBQC6EysDIvPAI/JQ3LSGKQBMZ59AZwCo947ynJmYm1O+jO8oEC0SAADtsWrMgm/yCqsD/Wzau3GEXrLuHlhZs1Puz278szC28j4AAIRlVAZTFEmSLscrg6r0lBWLRYJbRmVYlVYPLI4tCcBX1IS12SnpErZYISug3NxJAHBZp5A/3H88K0irB1sOq+HBN338A4P2Jvpq9cDishpjZtOjDJewA3KtVk/aclgtU2iLo8oC0n+VkPUsjwQhhDojvrNDeV6uboYf24JuTKkW7P62N5UqAwx4MHZJX1eqHUU7Av3YUFiy60rrPRHWPUFXqQPgNt2r+2bdNp1/Qgy/IX3qSn+EsV8uneUMOvrEwesAwGT2pnUaA0CTYVlgu7j0TzieqfOYwqZVGTlVQxc7WICy5TW1qpKa/s6DGk/ljxvHTUhM2yV34udl6AjnT0Y/nKZKXy/M+7kGAHrzpy7+fNxda+sHR5hv8x0hL29f7Im6AYWaxzNr22cBFO3adFrHHBE4GgqTU4HsP3oKv+rEaZ31AGu4mVmgBULwBht0ham5NRyuNV2edsEAfbhsnMmKOpnuHljbQpKNqZC0fZQPSVsOp4Onjw4ObuMIq50+rFgcq7Y7ZZAcbtvnPn7UZW4b10cIIfPEnfXl3KIQn4lyW0bNTR2MmgIAjn7BE+ev8vLaZQv6mzBqKhCj/Hy+knw4PXkAUXPdwA1pvSsXH7+az2Z5nWBajl59bFbDvvLkvZm6KtWCiUkA4Dg7IWGyn+PCkAmXHLn1miswjgRgjvEbmxDh5S1n9pkYvefBz1uHLPzy41Cxz4Sd1rTBcuKaXR7M1q5YIgv55Mrck3JRY0YevlC6oDg0ITLkJLDHhn85+9GUU03mppCtj5afIdylE8f6NfTJD1i96HzozgRxoQUB9Y/l5nbO0l0v1wHAhcTICwAAhEe0x7zynet3NiymxRzkt37FFCade2KrJLlhaQEbZ9G6pR1ZNgGhl+mVP//809Q1vCRbt259/fXX/f39TV1I5+Lj47Nt27Y333zT1IUg9JIYDB39Rcmrr7567969F1oMahWL1eb7IbpaZyCYRavGfOuRH+dFAADUG3Q6mmCymQ8HBe8ZdAaasGEz20ldtEFnAGa7TRoa0Qwm2+ZBq3ra8JsB+rCZFi0aVgOb02pYbTjRYKCZzGbDlgadhmY2nRHbEXS1gYa8iPHiokkJP2wa91TnNunFoNPqaILNfVQzbdBodPVMLpdNWLR3KkImgSOsCCGEzAZhw2Y3mxVqwWQ3e/nUk/nkN9oEk81+UpuWjSzaOI1oUUMzFi3SKgAw2dxWmj6pJhsm8ddXSSWY7AHM5ru4jm0nboRMDFcJeE4oVWraEz+Fqs06VtDqh1URQgh1GDEl5ofG4dXuiZgSV3z1hw3POryKkBnq7oGVOrXEddiS9Iez143KGE+BaF/Lb58+gSouptjmwUqulFZV8VgupapUyjI9AMeuTCZtbcUrhBBCTwHfWVvgQ0DdS7cPrFWVKm2TBMlg8ZxdnbhPuZjH1ZgYnb/kPRIAqKupkqkCr1hF4yGjKmmxl+9yWXppJTSseHUwBkdZEUIIIYQ6rrsHVgCA2izJRIHAc178eQqAqtaqK/UUlCXNE7q6ugi8FstUFIAmXeIjcHURvP9wlf6HjOr4qEqfVUISAECvyFS7fih8OJdJtXmJfNiOkweiI6bzAQBIl7D5INuqxMiKEEIIIdRBGFgBGBzh3CVC40nxohiVUa8uyCquuA8cF/8QafQqQfVBsTSTUh2UxpyzC4qShk11arYKKpUZo3hH7Nn4k1aWMCTMx/5hE1XqcXV1pth3qq/4UOM0A3J0sOe1+NSql3V3CCGEEEJmDlcJACCdhLP8BZASs7hYRQkb9ulLUuOjZErt/WojZVNF8Ty8BLFS8WJ98LodwpGcppmVdBc5LZYp50a4tFzP31hZSQklSXs9jVlL3CVJk+X+LIAyeTrLJ9r25dwbQgiZpdrffzd1CQihTgQDKwDcp/TqrGwlcHzsGp8HnbVbmm6zQ53vFM0XqgBI+6AUlb9ija/v8hjhh3s9mz420i0s8GRQnFK+wqV5xww7nk1lpR6AY2dHUveNAKCWRak8NwTjN6gQQqgdVr16mboEhFAn0t2nBJAkSeqTfNkOolOc4C1it8YkSgi8Rbz8JTy2MF5Dskgo2CK0Yzt47VTZeQhdW4R88j1xkC4+5mrL7vmiEF7SHNG8GUuyxgT72oL24Hqlh8QHh1cRQgghhDoMv3QFQOn1eiBtWWSzJGqk9HqKZDXup6q0egaLw2r54h8AAPRZ8cdsgue2GGQFgFq9liI5tiSAOmmrUrjMp4Pfd3058EtXqLvBL111fu186Qoh1D11rykBv/76ayt7SRar1QjJIFm2j+IpactpL2iyhMFz2zhkxeJYNfyJ57+M10Yjk/njjz+6zz9aEEIIIWSOutGUAB6PV1FRYeoqOpfi4uLq6ur+/fubuhCEEHqS67nybM3z6ar++XSDEHppulFgnTJlilqtzs3NNXUhnUhCQoKXlxeD0b0G2hFC5shwPmnX6fJnPVmVebrkwVwQQ/KSd2fufdauAOCqXLJSLF4pFq8US2IzdY176fJvtkvWJ5djGkboBehGgZXBYCxevFgul585c8bUtXQKUqm0pqbG29vb1IUghNAL9kvGtl25NxujJNN7XWqcv+Mzd2b4KS/tm7ySCo1Go9Fo79IA9I3cnZ/5+KzcmXyi5DkNAiOEHtO9htamTp1qNBo3b9589OjR/v37c9qdldpVlZWV1dTU/PHHHwRBbN68GecDIITMDV2eGhWVVKix4I7zD186zZEAAF1hYvSutFIdDPDdsCMQ9i1Y9Y2mzoI9es7qcB9HAgBupa39VEkSoz7bsYj4ZluG45ql7kzQFu7cvCtDdbf3MO+F4sDRbKAvJkbEZ5RX1gDTwW32yvBJXLiWLNlHi74U8YnGyxuq79LEqM/2xU3p2bhHlRq164KlAwdUtGmeCEJdXjcaYW3g4+Pz7bffTpo0qU+fPg93/vLLLwkJCf/973//+OMPE9b2IpSVlcXFxZ06darpzgEDBsyZMyc2NhbTKkLI7BjyIhck3PX+1/79az1uf7UgSkEDaBLFK/IGLIw7fGh3hLeDBThMk36dnHpo46jyzVFpDe/sbcctkkilEX58C7h9rUippQHKE8NW5A5YGPd13ML+GSvEcg0AXV6UZ+EXd+BQXOjA4rWRyTqAepqiH8uhNboqoIu2zV0g3pqsMgAA8Bccys/5+lGkRQg9b91rhPWh6dOnP/zzhg0bYmNjY2NjAwICTFfRC7R27drQ0NCvvvoqNjZ27Nixpi4HIYT+EuXpXNsPD00ZxAbw+8wnedZp5eo3r2RcH7do92guAcAEAKDrbxZ9k/HrvTqi5+2begAAsGS+NoDLtWjS0Y3cjF/GLUoczSWAu2DRqAk7c7UibwAgenNtmDBqyrj+aapfAEaIomMeK8D6/+Yu7XXzZmlBxl5J5nnDSXkgtyeTCR1dLg0h9Ay63QhrUzk5OR988EFBQcHFixe7aloFgB49euzZsycsLGzatGmRkZGmLgchhP4Kuu4eTfZqHMskepL07zVwn6YtCeLhCIw2OXRmxPEq4jUmQD1APQCjtZUB6pqcxSB6W9CGx1bdZTJ7ARhbqYArEC1aHC7deXjzNCZdkldY/RzvDiHUuu4bWPfs2fP5559PmDAhLS2Nx+t0y6M+d7Nnz7548eLZs2cnTJigUqlMXQ5CCD0lBtB0HQ0Ef4RTeWaGph6gXpORXe40yhnedHq7vjDz/IMxzhuqctspny32m+Izxbnhy4I9rK1rdLebpc83XVyhMLPIAACGosxCwtVtQGvXpTUlFzRN5gToClOTMxWFhdnJaRcM0IfL7tnaWQih56o7Tgmorq4OCQkpKyuLjY199913TV3Oy/Pmm2/m5ORs3br173//e1xc3MKFC01dEUIIdRTTeazT1nURJ0dFz1i99ELorPeTmGCAEUtjfdlgMW7puqKQVV4TrG2t6xyD9n7sS4QEeOY62tDlv4C/BcCAiX78WSs8p/cnuH6747gNPRKjl0aOm7/Ka4I1QdfY+m3Y7WzRynt9Om/b/JX0mpy4KcyG7SsntkqSG+bF2jiL1i0dhzNXEXrxutGnWRusWrXq4MGD/v7+mzZtMnUtJlNcXBwaGmpjY5OYmMhms01dDkIvFX6atfNr89Os9TRtJAgCAICu1hmAybZpmhZpg84AvdjMng8aGAlmHybxYN4qrdPRD4427VOnMzDZbMIC2kAbDMBkPn4hjUZXz+Ry2zkLIfQ8da/AGhsbu27dutDQ0DVr1pi6FtP76KOPlErl3r178ZdYqFvBwNr5tRlYEULdVTeaErBs2bL8/PzvvvtuyJAhpq6lU/jPf/6TlJQ0adKkTZs2hYSEmLochBBCCKHWdYsR1tra2tmzZzMYjIMHD5IkaepyOpcff/xx3rx5Q4cO3bNnj6lrQehlwBHWzg9HWBFCzXT9VQIuX748ZsyYgQMHHj16FNNqS2+//XZBQYGFhcW777576dIlU5eDEEIIIdRcFw+saWlp77333qeffrp582ZT19Kp7dq1KyAgICQk5MCBA6auBSGEEELoMV15DmtCQsLnn3+elJTk6elp6lrMwOLFi/v27btmzZqffvpp/fr1pi4HIYQQQqhRl53DGhMTs2vXruTk5OHDh5u6FnNSXV398ccf29jY/Pvf/zZ1LQi9EDiHtfN79dVXe/XqZeoqEEKdSNcMrFu2bJHJZGlpaQ4ODqauxSzNnz+/oqLi6NGjvXv3NnUtCD1nGFgRQsjsdME5rDKZLD4+/vjx45hWn9nu3btdXV0nTZp0+/ZtU9eCEEIIoe6uqwXWb7/99rPPPjty5Iijo6OpazFvGzdu/OCDD7y9ve/cuWPqWhBCCCHUrXWpwPrrr78GBAQcPHhwxIgRpq6lK/jnP/85ceJEHx+furo6U9eCEEIIoe6rS81h9fLycnNzk0gkpi6kS1m6dKlGo0lJSXluPVIUBU9aEpeiKAZJtr2IBVWrB5LVpAFF1QJp1bTTlnsQAsA5rAghZIa6zgjrpk2bXnnlFUyrz9327dtfeeWVsLCwDrbXZ6/3chNnGVs/mB7+vqubUOji4Lo4VftovzZ9savD/PSGNlmr3ndyEwpc3l//nb6VPozKmKkC4VRPV77rkmNaAFAfmicYJhC6OzmMkWQ1nHFVJnrHVeju6jonSd16JQCUKilQ4LvnURXU1VTJnPcFwnlJFR28V4QQQgi9DF0ksF6+fFkikWzdutXUhXRNBw4cyMjI2L9//5MaUllr3vfcqqjUUm00YLnO36tQKhTKvYLseHlZ415VXHC8xgYoCgCgJF6cL0w5p1Dsczv5RbyqZR8Ml7BUhSJLceWgUBEnVwPwxkTIzxUrzhVHv5EUn6YH0Keui4FwheKcQkxJ16e1VkyZTOQeJL+m1VP3H+7xnS23C01RZO31t3/SjSKEEELoJeoigTUiImLjxo2DBg0ydSFdU8+ePXfv3r148eKrV6+225AUhJ9UHAniP3wPX5suniHJ0oM+U+y7KosC4AzkkQBQq69mcHi2AABQsl5c4LljWeNJ2gIF5SbkM4B0FrpWKRRVoI4TzdunpiiVbI5IVgEAAAyg9OqsbBU50sUOALg8HgkAVKWetOOSYCzOOscTurMAWEIPniJf2bIMsBfJshXRnpyHpWfFxsAMsQDU6tZGdRFCCCFkQl3hS1cpKSkajabj76zRM3Bzc1u7dq1YLD5x4kQ7zUgrEpq+grfyFM/NEk0W3O8hkB4RNuZYShU/X0otT/FhAYA6fl2xz5YUnnpJw8FqfTXLhgUAADYsK0qvB96nEpc5IuFusAuUye0BAMColC0Wy85Vu27gPcjG+oJIkYwrTXcnwaivrmXZWAEAsKzIan11K2U0nyCrKjhXXamVyyspZaZauO9kxEic/IoQQgh1Fl1hhHXjxo04dfUlWLFiRU1NjUwme6qzSAYAUMAgG/9xZFTL5ojSx8jkc3kAANkxMWpQ7ZZI9hVXlqbEf6e3YfXQVzcMclJULcmyauyEMgL58JdaDJfgpLPFBZL7ayRJVQBAKbeKglX+KV/5cAAAWDZWVDUFAKCvpWxYNq2U0YyRum90EkXtiN6yVx5Kyg8onvLZIIQQQugFMvvAmpSUZGVlNX36dFMX0i1ERkZGRkb+8ccfHT2hNl26k4xIK05dRsVvyKIAVHHzZNxo+TKXxug5LFgeJ/b19BK+Y2dj5yoYSHLecSPPK9RGgKtZCiuBqy2o90izPGSKfJnglCS+oknnJNkD9PeNQBVKg9MEssQgXkMYZbgKXFQF+RSAXpGvdnHjtyyjOQbPiVepKqUA4H4tRbJsnv0ZIYQQQuh5M/tlrdzc3FauXPmPf/zD1IV0F7NmzRo5cmR7EzCMqaJhWUGlO4StjGWq44WuUh2fZwUA4BqevmM6q/FI9hKnfcIrB3wAtOmLfcSlNix9tSAqPXoyq3kfGpnowyS9HUldU5P+MvkXbuo1rsKDwOeQANDDQ3p2nZAqifGdLQc7qLQKkh8J5rcx80X1L8ESK/nZEB4AUCUxvnNTSH4P9TWe5NheH+7TPhhkNnBZK4QQMjvmHVjPnj0bFhZWUlJi6kK6kcLCwo8//risrOzJTf8CSqvVW3E4Vm0d1mu11WDLa7NBQ5sqYHFZTzEX1ajXaigWl9PO+q+oC8DAihBCZse8pwR8/fXXgYGBpq6iexk9enS/fv2OHz/+Qq9CctpOqwBAsjj27abVhjZPlVYBgMHi2GNaRQghhDodMx5hrampsbGxuX37dp8+fUxdS/eyY8eO4uLixMREUxeC0LPAEVaEEDI7ZjzCmpqaOm3aNEyrL9+kSZPOnDlj6ioQQggh1F2YcWBNS0ubNm2aqavojgYOHGg0Gn/55RdTF4IQQgihbsGMA+vp06cnT55s6iq6KaFQeO3aNVNXgRBCCKFuwVwDa25u7tChQ9lstqkL6aYsLS3VarWpq0AIIYRQt2CugfX7778fM2aMqavovng8XkVFhamrQAghhFC3YK5L+BQVFc2ePdvUVXRfPB7v1KlTpq4CoU6v7pYiJfn0hZ8ray3t3hZ86O83rC8A1J3/d/TxXwd/HO432ALgVk5sooJ4b4F3vXz/d1VNv8RGvjEpxMcybffxS7dr68DS6jXeyIl+U537giYjdv95YvyCBe/1BagrPRR9tHzwx2FvnY85erW+6eWZIz8WT33t5+OJiRmqO7RFX97wSQFB4/tZvNyHgBBCf5m5jrAqlcoRI0aYuorui8fj4ZQAhJ6gviJp2SfLtycVVNKEsSJHHv3p3DUZtwGg5tYPxzMyLt1qaHb3Us6p49k/19TdrrhUevnyhYKMUxnSLMG1AAAgAElEQVQ5BZcvl16+pK6qq/05+5uMHNWtGv2ty6f2bVg4Z83ZO1B16dsTx7//uQYAAGj1hdMZGedv1d+5pbp8ufTyhbMZGadyLpRevnzlp8qaO8fXBm84dKHKsm9fi1vf/6Cuw7SKEDJDZjnC+ttvvxkMBh6PZ+pCuqOcnJzx48fX19dXV1ebuhaEOrU7p2J3Xaixn7Fjz/KR1gC3vln+yb8ytv976sTlrf/dZT9ry+FZACXRXguT+87YciDQHgBAkwQAfd8T7/liJFxLmBmwLyertO7D1s7vMV4sHw9w53io14bS8Sv+s268BUC9YsPlGuD6RUSLh1m+qDtFCKEXzSxHWH/++efBgwebuopuavz48WvXrn3ttdcAoKKiYu7cuaauCKFO6qcLpXUWgz+YNtIaAAD6TfZzY8EdZWnFs3RWV1dXU/GTuqYeiB6WT5E8Ld4aOcIaNMnBHy3Z8HVORc2zXBshhEzOLAOrWq12cHAwdRXdV05OTkhICJPJdHd3HzdunKnLQahzqqu9TwMQRI8HOyysmZYAdTV19e2d1qo73ywfO9ZjpjTnDmvk3I9GPs2pfSd++fWWRRPfgkvHd34+U9QwJwEhhMyMWU4J+PXXX9944w1TV9F97du3r2E+hr29fUBAgKnLQahzsrQf0A9yKi5drPHvZw0AUHH+0h2wdLO3s7CssLSE+lpDDQALoKaWrgcror1hU8uhUxe+P9jK7q2Ro4b16wGgsiQsoLamYby0rq62Doi2h10t+wk+WSf4pKY0LvhTecbxnKUTZ/R9vreKEEIvmlkGVq1Wy+VyTV1F92Vvb//uu+/+8MMPX375palrQajzGjzNb2RqdM6W4A23/Yb2qMg5kvwz9Jvq424N1oOd34LvFPs37wM34nJK9h3o5z68XztdWTtO9J/VZGDVfuRQVlLGqdjoN7wHV+UklYLliJFvtf5rqpqc3Qlq27/zeoP6Zi2ApWVPnMqKEDI/ZhlY79y5M3z4cFNX0YkUFRV9//33v/3225tvvvlyruju7n7r1q26urrdu3e/nCuqVCpra+uZM2c6OTm9nCsi9Ff18/vX5rr1m/cd37nhOAD0HTb1iwixwBoA7GdEiFWfx55N2HAWwLKfYP7aBc5P03MPwdII/8r1SclRpQBgPWiqJHxq66OmdRU/FXy7T5UMAACW/caHLpxo/RdvCyGEXr5X/vzzT1PX8NSmTZsWGBg4bdo0UxdiehqNZuXKlTdu3Lh9+7bRaKQo6snnmKeePXv+7W9/69+/v4uLy5YtW0xdDjJjBoOhgy1fffXVe/fuPYdL1ty5dd/S9jXr5mObNXdu1YC1bd8WBzqoruZWVQ1h26/vE86v09+pqqWJ3v36YlhFCJknsxxhvXv3LovFMnUVpnf58uUvvvji0qVLv/zyi6lreUnKy8vv3r27fPlyzKzInFj37ddqUmxrf0dZWvfrUAeWrL798K9MhJA5M8tVAgwGA5PJNHUVpnfw4MGrV692n7QKAH/88YdSqVQoFEeOHDF1LQghhBB6Scw1sFpbd/c3W3fv3s3NzdVoNKYuxARu3bp1+vRpU1eBEEIIoZfELANrbW0tBtabN2++8sorf3XSKoPnGRIs5Dynml4WrVZbXl5u6ioQQggh9JKYa2C1srIydRWdFG929N4vhOSjHaTLoh17F7m03prB91keZHaBFSGEEELdilkG1t9//71Xr16mrqJzIvnuoqA10WEPl8ixD5JuCBZN5pPtnYUQQggh1HmZX2C9d+9ez549TV1F52XHYamukkHhPiwAAFK4PJivUelt7OwAeHP3pqx2IwGAwQ8+IA9+qnUfEUIIIYRMxPwC6/3793v06PHkdt2UHcdGr/wqRukmDh4CwBEFe1fGb8iqZtlxGEByXQV8OwAABov/jhvfBkddEUIIIWQGzC+wUhRFkpi02sDg2Fjdry6TRx9liUKFLouDXbJjZKV6yoplY5ZL7iKEEEIImeGHAyiKIgjC1FV0VgwblpVeW0MVfBVfmb9DbqSSpqfr9fxqK37DquEkA7M+QgghhMyM+Y2w1tXVYWBtG2lDUvdrASrk8ZmkjTJ+RwlArZ4iWTYkVFaoYZirS/PIigPWCCGEEOrUzDKwWlo+44e3uz6GDWkFVC0A6FMDHex8ZFoAMN6nqB4sW9Afi46uFKWrr1xRpoq4ev19Cqji9HwyaF+0EDMrQgghhDor85sS8L///e/VV181dRWdVa3My1rWYmeSLzsJAAAK1gvt4rk8O/J+ZYVWbwQAbeoch3RbFvy17w8ghBBCCL045hdYjUYjg2F+ZXceeo1a//geqkrfelOEEEIIoU7A/KYE1NfXW1hYmLoKhBBCCCH0kphfYP3jjz/+9rdnLZuiqCe9+6Zqn9CC0jdrQDU/xfjkqyCEEEIIoQ7qUoFVn73ey02cZWz9YHr4+65uQqGLg+viVO2j/dr0xa4O89MBAGqVMVNdBR4CpzHidG1rfVTIRG4CLx+Bk4tIdhUAQH1onmCYQOju5DBGkqUHANAeWyJwEQjfcfWNU7aZWilVUqDAd8+ja6hPxczzFAhmxKtaLx4hhBBCqPsyv8D6559/vvLKKy12U1lr3vfcqqjUtpUSWa7z9yqUCoVyryA7Xl7WuFcVFxyvsQGKAgD1QYl8YLSioPjkDKUkqqCVPuyD5PmKs1nFJ+do4w8oAYA3JkJ+rlhxrjj6jaT4ND1QWdI16qBTxYqCaLtEibzV1FsmE7kHya9p9dT9hh3aY/N84yj/RIXiSDAfZ+cihBBCCD2uy+QjUhB+UsFIF7lkNe6oTRcHKjx3S13PiYOyPeUbhJyBPACAWn01gyO0BQCAkvXiAs8dy5Re+wCAUuarXGcJSACeu5A8olCBGxknWm8l3TGLks+Xwjp5kD0Ag9JrlOkF911n8QAAuDweAABVqSftuCSUFRSzhMFcABAIh81LOUcFuWc1K4O0F8myg9RbBUsa6jSqZFFXPNcE22jUelse62n+g9TX19vZ2T2Pp2dm/va3vxmNOBaNEEIIdRfmF1j/9re//fHHHy33k1YkNM0wVp7iuVmiyYL7PQTSIw+WGaVU8fOl1PIUHxYAqOPXFftsSeGpG6KjvrqWJK1IAAArVo9atd4Ibp9KXOaIhLvBLlAmtwcAgLJUSeiOrEpe2KCHK5fqCyJFMq403Z2E76r11nwbAACSZQWUnmqlDAZJNn3qeoWiTA9pcsqoSi/l7zgVLWR19FH07dt33rx5HW3dhfz+++/Hjx83dRUIIYQQekm6TmBtiWQAAAUMsvEujWrZHFH6GFnKXB4AQHZMjBp8dkskmuLK0ur47/7PzurBj6Vq9fetbFgMACOQDKCMQD78GtRA/x3p/lS2WLBcJswI5gGl3CoKVvmnHPDhAIAVSVL6+wAAlL72fkP8bV5GM7UUxfWJjpW6MCjBHCdZtlQ4vaOL+LPZ7Pnz53ewcVdy+/btzMxMU1eBEEIIoZfE/OawMhiMDr0Ork2X7iQj0opTl1HxG7IoAFXcPBk3Wr7swadJhwXL48S+nl7Cd+xs7FwFA3u7vMMrzi+gALQFCr2LGw9AvUea5SFT5MsEpyTxFY/6Jq1JqNFTAFShNDhNIEsM4jWE0SEC10pFlhaAUipKXQXvkC3LaI7D51Oq4ioAoKha0sYKPzmFEEIIIfQY8xth7WhgtfKMTvUEAJgcnTIZANRZacVqncTTDQDANTx9x3S+23sAAPA/O/Icz4VDwtwIT58goacdpeNIjgpJAF6IXA4AAMFJcgCg0pYIt6rtrCj1tfvCLal8AGV6ukoNwe4nAaCHh/TsOs+IcLnv5PdTyUrKRyblAkCzMloghWHhMl9vryy7ShUZJnf/q88HIYQQQqiLeeXPP/80dQ1P59KlS7Nnz7548eKL6Z7Sa/Qkh0O2leT1WrUe7LhtNwAAvVZrZHFsn2astFar1pM8boenrwJcuXJl48aNBw4ceIqrdBW3b98OCAhIT083dSHILBkMhg62fPXVV+/du/dCi0EIIdQR5jfCSpLki1yXn2RxOe0dZ3F4T0yVLE67XbTGisOzetpz2larVmpsXIZ0JP5S6ux0pZEv9OA3a60vU1P2PA4DAEB/OSvrGrh4CBuL1CrT8ytt3IRuXPLJm5S2IFNR/YbQ05n15E2EEAJ49dVXe/XqZeoqEEKdiPnNYX3BgbUrUG0VCSeL02s70HKz17xD6spTS3xXFTx6pkZtQZxI6LbkZBUAAPWd2Hd5emVZ0rwZMSojQFXqvA8lCq1yxyyRrOJJm6CWzfHZcblSscZzXpr+SZsIIQQAgGkVIdSM+Y2w9uzZE1/StYfKis8XRAcqZYe0np+2O9RrLJAd4QRnh/mQLio3WdYaN8+GWQyXs7KsfD1dZI3dJWa5fFEc7E7ZlQpk58MkKvkVz+i9IXwtw0t0VO1r295mkHdqEhUk/yKIc1kvWJOiH6Rvb9M7CEdZEUIIIdSS+Y2w9urV6/fffzd1FZ2X/phM5R4UNNefOiRTPdxbEj/Px9e34f9Ck9QNOym1Gng8EoDB49uo1Q+/y+XsHzFXYNf4b5lKtcaONwgASJ49VKoptVrNs+cBAGcgr1qtbn8TflVXc3kcALDn8X5Vqyva3cRPASCEEEKoNeY3wtqjR4+6ujqj0chgmF/xL5425ZCS46FKPwe8mhT5+QjpSAAAGBoUvduXMgIAkCSrcSCTQT78XRgFJNn6j8R6AINqOJEyAjAAXiUpIwVAgpF68iYDwEgBABiBYvQgGfr2Nl/UM0EIIYSQeTO/EVYAYDKZNTU1pq6iUyqTJ+kEArK6sgr4Y2zSDzxY+bUsVbpKIlklkaySiLekN46lknwnUllcBVCrLK7mu9oCtDI5mMMfUl2spAC0Vy7bOA0j+UN5aqWKAlCVXLEb5tT+JgxysStTqgAoZbGaz+cNaXeTAWBsDMet/AEhhBBC3ZVZDlL27t377t27NjY2pi6k01EdTScD5cENU1f1Ngr3pKxaoacVwBD/6N3+LZrzg8J5Ph/6pr9aSYbK3SBrybD1vFNnwwY+1ki4OFg228v34P1Ke0n6ECDtxaJ9wV7+NverhDGpHJLR3iZY+YaN9g2aeqWHTi/aKSW5VHubAAVrXMOsUxRf8Fv+4eU8QITQ06m/lbM7eteJ0lt1lm95LF0bNrGfBdxRJHy5/fit+0TfdwMivphqbwFwKyN6za7sO2Ddb/zCL0PHv2bqsv8KWpN7cFdyfpnOgv32pMVLZ/CZpq4Ioe7A/NZhBQAXF5d9+/Y5OzubuhBTem7rsNZqtUYOp2GWAEVBqzMDjHptFcnhPJpBoNdSJIdFdmgTqCotZdXkcDubRooCkmS09ocmcB1W9FfgOqzPU/2tnG9+tvcYb1+Xs2HeZgg7tmrE+Q0f7bLasDeUX3V85acZ7+7ZMYM4Lp6T5hwf/3G/W/8ODlZ9eHj9ROt2e2WxOu8vMHWpC7zWXnEY69y7PC/3OtNv9xnpWMLURSHU9ZnllAAbG5vffvvN1FWY3v/+97/n0IvVg7QK0HpaBQAGq0laBQCS1SSPPmkTSNvHD7ezyXiQTVv+ASH00t068qnH6pw6AIC6nNVey1PObJg6Z185AADcTl4ydYOivt94n/H21gB9Bw9+vc5AA9wqvWo50p1vCRb93Me/9dMPl2vq1T+X9xOMsbcES/vx7v1KzpfWP0sx9MVE8fyZ0729potCok5rAACgPDls5nRPr+mfiOUXDY+1+US8ff9OyfyZ0/8xMyQ2V9fQhTZ3Z1jAdO/pAavkKgMAGHJjxVGZjQfhXm7UJzNnbsylAUB7QvLRzJBD5zPXzpw5f2dJPQCUyz+bOXPVCfDZnHryzOG4uASJNxt0mhsd/fcPQuivMMvA2rdvXwys/fv312g0N27cMHUhJnDw4MG33nrL1FUg1PX1GyWwv6g4XwdQ/9N5Vb+Ro8YKht+5cOEOANRcUKiHjRxm2diyRrE/ufoDP4ElvMbrd7/0/PU6AKirB7hvqLPoZ9/vVumFWwBQV0/DPUPdswXW8qI8C7+4A4fiQgcWr41M1gEAd+KK3YePpUZ70bsi5eWP2uyP9qKTE0r4y+N2b/+Y+CZyVxEAlCeGRZY4r/xaHudXv0+cUAJAA03Dw2J6OjlYqEq+SSukwZCXlnax6rXBI/n9oeT7lIzLADdy0zJLajiObGByuQTUl8v3Z+gI57H/x/6rTxkh1K56g7ZCXWGWgdXW1raqqsrUVZhY7969J0yYsG3bNlMXYgKFhYWjR482dRUIdQMDPnB/7XxOSR1ozl/qMXKknaXg/RHqwu/vQE3BdxXDxrs1vNmvKUlYvln9wT9DR/YAsJ4YumJY6bqZPh/NXHawFFi21tBvaviCfmeX+3w0c96q5J+sba0snrUeojfXhskdNWVc/3LVLwBA0FVFaUfkudUEs6pKV/+wDdtx0gdO9Za9eWz2AI8PnA2aGwa4kZuhsuZCcdrJ3Ls9bXUlxRpgj1sRFz7pYeJkj5symqguyizS5SkK6f7jvJ2B6zHRGTS5WSpNdkYJ8KdM5gMA0OXyJQGR3xMe66IDHf7SA0YIPdEf//v93u81ZhlY2Wz27du3TV2F6fn6+t65c2fTpk2mLuTloShq0aJFb7311kcffWTqWhDqDvp94Nnv/ClFheJCncB9sAVYjpr0f+qc78sLsn4aOmm0NQCA5viXa78f9uWWufzG4dZ+40O3HExN/U+8n73lsHeHWQJYDpgqjj+c+p/DGyf3sx4+8q1nDqyNmMxeAEYwZEtmhSXfZDCZLSeR9iIsoQ5oACAsCYKup6GOpgkC6gGMAA7eS2e7tfyxFHuCn4eNLuPopjQFzfXwdrYAGDDR25koPy3bdqoE+BMnDgIA3YmVAZF54BF5KG4a9y/eCULoiV7t6+g0dJhZTg98/fXXr1y5YuoqTG/o0KEbNmxYv379ggULGAyGjY3Nm2++aeqiXhSVSkVRVG1trZ2dnVQqNXU5CHUXfT38hh5MXFMGH/xzMABAj5ETx0RHS/9dNyJ0nTUA3MnYsR9m7wh1fvxnVHW3FPs37L8zde3Evg921VScTViTQoi2jW//F1cdpyv9EcZ+uXSWM+joEwevP6H1m05vEyeIYX6iEY/ireFa4U3WaP7DMVamx8c+jidkJ3ItHBf9o+F3vVzvGWO3rzxx4gYxep2fIwAU7dp0WsccETgaCpNTgew/esoojK0IvXBmGVg5HE5WVpapq+gUuFzuzp07i4qKvv/++5c5r7eqqurw4cNLlix5aVcEADabHRIS4uTk9DIvilB3Zz3+48m75hV84O7YsG05cpq35X+/dV810hIA6n/67sKt8yWfeskAAKzfjzi8fFjG2k92lVrZC/w2xk4d3AMA4NaR5UsO3err5P7xti0TBzy30hwn+zkuDJlwyZFbr7kC457w8RFi3NJ1RSHhEyZY97eur3l7earUXSNfseDEuMMnlz1aOM95hq/z/ijVSH/fIY17mJOC/L7KTKS9505hA4DuerkOAC4kRl4AACA8oj1GcXGZAIReNLNc1qqoqCgkJOSHH34wdSHd19WrV6dPn65SqZ7cFKFOBpe1ekp3cqTLTjtv3Til75PbPidPs6wVbdAZaAaTbdPB0EgbdAa6F5vdEwAA7hkMBJPZOEVBU5haWF4qjzpS5b3zpHQsrq+KUCdiliOsDg4OZWVlpq4CIYS6tjrFljnrc+p474vXvsS0+pQIJvupfqf/ePuezEexVFu4TyrJrWePnrc5HNMqQp2MWY6wAkC/fv2KioreeOMNUxfSTV29enX8+PFardbUhSD01HCEtfPrzB8OQAiZhFmuEgAALi4uFy5cMHUV3ReXy+VwOKauAiGEEELdgrkGVjc3N4VCYeoquq+Kioq+fTvtK0KEEEIIdSnmGljHjRuXnZ1t6iq6r59//rlfv36mrgIhhBBC3YK5BtYxY8ZUVFRUVFSYupBu6scffxww4PktToMQQggh1DazXCWgwbRp044dO7Zs2TJTF9IdlZaWTp482dRVIIS6rNrffzd1CQihTsSMA+uMGTNWr16NgdUkLl26FBoaauoqEEJdllWvXqYuASHUiZjrlAAAmDhxYk1NTX5+vqkL6XZqa2t/+eWX9957z9SFIIQQQqhbMOPACgCffvrprl27TF1Ft5Ofnz9q1ChTV4EQQgih7sK8A+uSJUsyMzNLSkpMXUj3kpKSMmXKFFNXgRBCCKHuwly/dPXQ5s2bL1y4cPjwYVMX0l1UVlZyudxbt269/vrrpq4FoWeBX7rq/Drbl64MOh0QTCaTMHUhCHVfZh9YAWDIkCExMTFeXl6mLqRbCA4O7tmz5+bNm01dCELPCANr59dqYKWv52bqnKaMYr/UUjQnxEERJ7TAJIj+s74+tsw6UeQVVUIDgy3amb9a8LTd6VTZN23dnZveA60pKbysq2NynUfw2Y2R2KApKlJVW7IHuzg7MBvbFUVOmC/XGIHoNSW6INrD4sHZl5VVPZ0eNWuO1l1Vld+lHmyS7MHOjjbtFFhe8kud7VA+90E4N9woLFLVWNo7jxvyhCdvuF5ypcbWaTj3QSkt7uKeRlWquft4JYbUBWNW59IAxKClh1MW8S3avwjqvsx4lYCHpFLpF198gYH1JThz5szx48cvX75s6kIQQt2OLm/npuufveTAqjktO6Hhh397OJDbuCcw8Qc/RaTXklz6Gbq7nCQOK/vsXNyDuAmab0Jmrco09GETv+noAaJY+epxTE3yklmSPJrdB3Q6GB1xaP9sx4bGtNEx8MChpUOZhAUAAF1dnncwct1XhYZJcRe3ebRxSV2aZGbUo7+ziXGbfkiY1tpQcT2tuZy2SxKZfN1h6X+PLRoEAKBJDZm1Oo9mM2kdzV/29eF5/NYvQutUZ3atXSsv6Snaf3b1aAuA+tbuonTf/E/kNJtNWAAAMe7Lk1J3gjkt9odJdOFGrwXKumd4oqj7MO85rA38/PxcXV3FYrGpC+niampqFi1aFBMT07t3b1PXghDqjgx5mwICAxZszjUAGC4kSuZP9/rHTHFCoQEAQJO5MSTAb7qX9/SAVfISAwCALnt7yEdeXt7TQw6qAADqNZlbQ2Z6e00PlMgvGgAADIWJKxfM/IeX10dRuf+vJHFV5IkbDy5WX564aHrA19eh/nrSIi8v7+mRmQYAAIJg9nw88NXrcmNDpn8wZsx4r4A1yap7AACGy3KxaMKYMWO8Pok8cR0AwJAZOX1JYvm9ok3/8PLyk2RWA4BO9TM4+Uefyc8/s2MK83py2nmAG7mZN5hTNpzMz08NH2EoTMkob3IpgmASDRenT4jH+2y6bOvIaf+ZcQOTrzY4GTGayfHwE7Q+sUEV6+O1UE45OjS5r5KkhEx6UvSZnDNfz2aWyGSZrb9v0MkDJ3yy6zZ3UJNR3lbvwnDXYOG88FB+fk5+fs4ZqTsBAGBBED0bIzhC7egKI6wAsH37dhcXFzc3N19fX1PX0mV9/PHHvr6+M2bM+KsdURQFJEk+qQ2DJNv+f0+qVg8kq0kDiqoF0qpppy33IITMG3OEaHXoaKInm6lNXhCW4RSz+1D/K9sXrogafEY6tkqZd93py/0LHQwZGxeIv3r7zApqlzTnjS2H4oaAzkAAQEnsgk0/+0fvWWNbuitkaUTvo3FT7l3JKLIOOnxolAUQva5foWmq/sHFLLjeyze8djBEnML1la4cbwHW/Vt/7a45siI0QeMdtXuubUbEIklIz4FnPrfN2JukYvrv/vcb8sCQdbGjPbZ5MEf6L3YvDDnC9pWuHE80dMb2WBHnUa9TZZ4oOK0ycMaOHwbAESWkiwCAvrwz4xpwvVy4rV6V8Fif78HsdX279wllR57dtZ0RW8tHRW72aGOEmr/g0A+hTDixIDPzduOu38quaMBhFvt6tooYzCcM5WU3wWNQy1PZfjvzRUxm7qp3T9x8sM+hlbsw1NTQUJUXHZBWfre3i2jl5378nh0pHSGArjHCCgC9e/dOSEiYN2/e1atXTV1L1/Tpp5/26tVr48aNT2ypz17v5SbOMrZ+MD38fVc3odDFwXVxqvbRfm36YleH+ekNbbJWve/kJhS4vL/+O30rfRiVMVMFwqmernzXJce0AKA+NE8wTCB0d3IYI8lqOOOqTPSOq9Dd1XVOkrr1SgAoVVKgwHfPoyqoq6mSOe8LhPOSKp54lwghU+jFdRzA5bIJQ1FmYU+u9c8Zadkaog8oS64DAICldR82k+3oN8nZ8HOZzoLrMKAq90By5i/AZhNQr8rIhInBImcOm+uxVDS46NsfaAAAi97WbCaTzSR6Oi+Kkfo9GmEk2IP4fFsCGNYDh/L5Q/nc1qd+GkrylDR3ot8kvuMo34l80JwvKAeut3j1olF0Tmqhph4MutsGALBxdGBbNvY2hMt8OKZIl8jWRUSdMbh4TX04uVWXHTUzcPvNoeFxK0a38VMv4ml+BKZL3rxPNWjuSq+2J1S0HOakDRQAaSzeGbYx5zcAqKlpY0Y3wWw9yje7CxosHbnWRP+xfh6vaVIkIbG4wg96Cl0ksAKAh4fHP//5z9mzZ+OPJJ674OBgrVablJT0pIZU1pr3PbcqKrVUGw1YrvP3KpQKhXKvIDteXta4VxUXHK+xAYoCACiJF+cLU84pFPvcTn4Rr2rZB8MlLFWhyFJcOShUxMnVALwxEfJzxYpzxdFvJMWn6QH0qetiIFyhOKcQU9L1aa0VUyYTuQfJr2n11P2He3xny+1CUxRZe/3tn/hIEEKmRN+jwZIEI4AR+rsvnDvW9rHDvUgCaACuaOeh1SOrkpd4ea3PNcBdmra0bsx4BNkT6n5/lmmoLVH1NFgAAQBAEAwAI4AhM2JWwLq8Ojafz7Vu2rbFNM16mrbwiM6/eFHud3e/WHxQAwC6k+JZS+TgFXdoT+DzGRtYIWgAABdGSURBVIO8mrTvexg7x6/1wdq2MF/rzQAKJiYUHxb1qaPB1vZpFm9oeRdsn7iT3x5LWBEoCg335YOm9EfdU9WDureuE1gBYOnSpePGjXsO76xRE4GBgRqN5tixYx1oSwrCTyqOBPEfvoevTRfPkGTpQZ8p9l2VRQFwBvJIAKjVVzM4vIb/iSlZLy7w3LGs8SRtgYJyE/IZQDoLXasUiipQx4nm7VNTlEo2RySrAAAABlB6dVa2ihzpYgcAXB6PBACqUk/acUkwFmed4wndWQAsoQdPka9sWQbYi2TZimjPR5O/smJjYIZYAGp1a6O6CCGTYzJ70zpNwxIPbBcXbg31xhSRaLZINFvkN6KNgUOCO3pWeMJOf8sTaUpwchlWlXFKRQOANvPbkv7OLs3GBQ3lF1S6+lY7elxvJhMMOo2OrgcApstQR7hZnHOdBl1RoQrYf3fi/r+y8t+gv8DXmw+6KgAj3RCNCUtLMNbc1gHU03Q9QH1JlOfwd0VRudc1mhu6GqBpI8AN+YrVJ3TOgYunsKtKSkouatpY1YI2aDUazW1DPQB9W6PRGBrGaugS+VpJ1DdNJ75CeVZOOcNlnKDp/RoKEySS2MxHkfGeTqPRaKppGupqKjUarQGYzqOGEqrMtBKNKi1TCQPc3LitX4Ku1mg0Gt09gPq7VRqNrppu9S5UW72Gvxuw84JGU5RRdB3Yjo5tLW2AUEtdKrACQHR09Ouvv/7RRx+ZupCuoKamZsqUKUaj8ZtvvmEwOjTdmbQiH5sXbeUpnkutnyzw/BcELxc25lhKFT9fSi2X+rAAQB2/rthnQxDvwRnV+mqWTcO/4m1YVpReD7xPJS6nREL3oPQxEpE9AAAYlbLF88SJlU7v8B5kY31BpEjGlUrcSQB9dS3LxgoAgGVFVuurWymD0WwSrargXHXlObk8KX7emPfXn29rhBghZDLMMX5jr0d5eU/3+mSnatDc9TNqNk2dMN1vupdnSHKrI3V0bqS310zRzOmLUmCat4sFc4p4zdtZ8yd4ek34cFtdwIbAZtMxdRmRgfO3F3WglCG+i6Y5XFk/Zvjfp++8Bo7z1ocLqnb9493h4yNKBgVKQ8YRDuO9RzJV0RPeXVQ4KsCDeXl7SKwKALgeIg+OMvL9IUP+PibyewAL57kRgfz/J1/gOcErLI0etXTlDC59uVB5D+gLO0NEM2eKZs78dJey1QxdXxg1dcKEDxbIbwCdHTlhwoQVJwwAAJfTdh0pBHvHpg/ievl1sOFym8bDe0XJsrQfezo8TPqapJAJEyZ4bSyE+vLE+RMmiHaV1HNFq5eOrto1c8L0yMtOiyLnNq451fwShrRVXhMmTJCcNoDuhPiDCROkea3eBf/jcNGg8u2iCRM+2VXuvGjzsrZmOyDUii7yo6umZDLZJ5988uGHHx49etTUtZix8+fPf/LJJ97e3h2Zt9oOkgEAFDAeBFmjWjZHlD5GljKXBwCQHROjBp/dEommuLK0Ov47oS+rh75xkJOiakmWVWMnlBHIhyGT4RKcdDa4KlU0RpLkLve3pZRbRcEq/5QDPhwAAJaNFVVNAZCgr6VsWDatlNGMkbpvdBJF7QizB+0eL+EBRcRI4V+5a4TQ88f2iE4fa/jNAH3YTAuABQln5hl0Oppgs5kWAOAcnv7gRZBAmi8AAFj939EGnQGYbGbDi3Wuh/S/HgatDmzYjfM/BwQePvuwf7+EnInw+ERVx9CTV0NblsKdsuHYlA0PN50Dd54JvGcwGB/OK+UHfv2DqNoANkwCIDCAhobf9g/wizvrR9+jgSAaJoyyx4Yfzl9q0Blogs1uCJRecRc7skijxTjpD1elLXaXFxXTo+b6D2+6j/CIudj85x2lhSXE2JW+j3Itd97hq/NadDckMOGsyFBNEzaPJsy2uATTb+dFv+ZnerRyF+xx4fL8pQYDzWAy8edW6Cl1tRHWBl9//XWfPn0++OADvR7f7z6L3bt3v/vuu8uXL/+LaRVq06U7yYi04tRlVPyGLApAFTdPxo2WL3NpjJ7DguVxYl9PL+E7djZ2roKBJOcdN/K8Qm0EuJqlsBK42oJ6jzTLQ6bIlwlOSeIrmnROkj1Af98IVKE0OE0gSwziNYRRhqvARVWQTwHoFflqFzd+yzKaY/CceJWqUgoA7tdSJKudZbURQqZjQTAb42nDJpPNabLZansOu1k2YnLYbf1aqWkse2o9m38K61FvxOP7exKP/7yJYLIfpNX21WuSJTMD5u8saXPegk5ZWucd8OS5quWlP1p/uNijQ3/VEczHHktHL9F2d4+lVUN2VIBoZuQZnM6KnqArfOmqLatWrTpx4sSBAwecnZ1NXYvZqK2tXbJkyZUrV7766quRI0c+SxfGVNGwrKDSHcJWxjLV8UJXqY7PswIAcA1P3zH9wRz+7CVO+4RXDvgAaNMX+4hLbVj6akFUevTkFpP8NTLRh0l6O5K6pib9ZfIv3NRrXIUHgc8hAaCHh/TsOiFVEuM7Ww52UGkVJD8SzG/jRYLqX4IlVvKzITwAoEpifOemkPwe6ms8ybG9Ps/8lzHq9PBLV51fZ/s0a6dgKC8s0dAAwOC6CLrQ7E+tKvfn2wAAvRxHj+DiJAHUlq4cWAEgISHhs88+27t3r0gkMnUtZiA5OXn58uUffvhhTEyMaSuhtFq9FYdj1dZhvVZbDba8Nhs0tKkCFpf1FAuxGvVaDcXictpZ/xV1ARhYOz8MrAihZrp4YAWAvLy8BQsWTJw4cfv27aaupfO6efPmF198oVQqo6KiJk+ebOpyEHqBMLB2fhhYEULNdM05rE2NHTv2woULtbW1w4cPP3PmjKnL6Yyio6MHDhw4YMCA0tJSTKsIIfT/27v7oKbOPQ/gPySYI8WYdEhLxmhBZMewqwOovPW6SFq9LWhdFF2RKwXjBRHQtkBfBGwroTO1odoiarkGeukgW1dwHJE6OvJSdwMCa6B1ErsKUUFJxWo2xXKCod0/UGsltOKNngDfz1/wnJznfMk/fj3nOecAgKMZ+4WViFxdXdVq9dtvv52QkJCamoo7se45cODA7NmzGxoaTp8+nZuby3UcAAAAABvGRWEdFBMTo9freTyet7d3UVER13E4Vl5eLpfLP/7447y8vIqKijlz5nCdCAAAAMC2sb+GdaiWlpZt27ZdunTpww8/fOmll7iO86TV1dUNrgHw8/OLj4/nOg7Ak4Y1rI5PKBT23rrFdQoAcCDjsbAOKi8v37Fjh0gkUqlUs2fP5jrOk9DQ0KBSqdra2jIzM5OSkriOA8ANFFbHh5uuAOAB42hJwANiYmKampqWLFmybNmytWvXXr9+netEj5FWq12zZs2qVavCwsIuXLiAtgoAAACjyPgtrIPS0tIaGxt9fHx8fHw2b97MdRz70+l0CQkJixcv9vf3v3z58qZNNl4yCAAAAODIxnthJaJnnnlm69atTU1NLi4uPB4vJyeH60T2ce7cudWrVwcHB8+YMaOzszMzM9PJyYnrUAAAAAAjhsJ6h4+Pj0qlam5u/uGHH/h8vlKp5DrRo2tqaoqLiwsJCent7e3s7MzJyWGYEbzvCQDAEXXUl9V22WeqAftMAwBPDArrb/j7++/evbu+vv7y5csMwzzwaNK6ujqOcg2rrq7u4sWL9349ceLEsmXLVq5cOWfOnO7u7qqqqilTpnCXDgDAbswt+/cea3/UnfUnjrXevdvOfDA1cPW+R52KiIz1e96KXx29OmnLnkbjMCMAYFcorDYEBwcXFRXV1tZeuXJlwoQJaWlp33333eCm999/n9tsD0hISPD09CSiioqKwMDA9PT0yMjIS5cuZWRk4KwqAMAdl4/v3Ft/5c6JVcHSbZUFa7wfcaoB/Scbkj6p/T/xP4m7jn2SlLqnvX/ICM7gAtgbCuuwQkJC9u7d293dPWXKlKCgoFdffdVsNr/33nuff/4519HuCA8PX7hwYUlJydy5c3fs2PHGG2988803iYmJXOcCAHisLO2VuUnRkZH/nrT9cLtlcKynsfit+KiIyKjkYv0A6fclRS2NjPy3+NzKux+4euT9v8bHJ+9pHaCOwzv3nzYTERkb96THR0VExb9V3NhDRGRpK85IXB21NDIqNm37sS4iovMHs7eU6S13D36pru48ef9FVfBBgWqtt0VfVfXfQ0Y6nuz3ATAO8LgO4OieffZZpVKZlZW1a9eu1NRUV1fXd955x9PTc+HChdwGe/fdd+vq6p5++unvv/9eqVS+/PLL3OYBAHgyzF/nJn3Gvrbr86CB+g83J20XH80J7SnOyPw66KOCLF++2SJwJlqm/PtaARmKNyVsP7Lgs2gicg9Lzk7wnsgXO1Pj+dPapyxE7cXpmfVBHxW8Le0qz8jM4Jf/PVbQfvpr5+jK0sX0vyUZr+UenPtZ9ICFtVh+PbyrYAqRTt+o7+Jb+JNp4ErPT0NGeoh8OPt+AMYkFNY/YDabS0tLzWbz2bNn3dzcrFYrEcXFxcXFxU2fPp2rVIcPH9ZqtRKJZPDXqqqqjo6O+fPnBwYGchUJAODJ0B6rd19ZvsRHTBT92vKDMce0OdN1xzvCkouCpXwiARGRZeDK6cPHO3/q57teu2IiIqKJgmeek0qd75voUv3xy2HJxcFSPkmTkoNe3FNvjF1KRPwpUpGAgpaETT2iv0w0N1aVf99eHks3rD+yaV9uVG0uEZGzVPDckBHBk/kmAMYRFNbfo1KpDh061NfXd+PGjf7+/nvjVqu1uLiYw2D3a21t1el0x48fF4vFEokkLy9v1qxZXIcCAHhMLP0/WZin+IO/8F0Zy60fqc9imcjn3/sHzXhw0+qSiasS/uxBNEA0QMSz9WSA/vv24vGnOFvMv3mvmUDwFF23Dg0gCH79P5oUXe1X2yuyNxVf8QuaMXTEnn8wABAK6+9Qq9WVlZVtbW2j5d2MFy5cmDZtWnp6em5ubkBAANdxAADsikcWS7+F+LK5vu0njnetipVS1/Hadt+X/Gj6xH8e2H6ixRwcJCAiuqRvd1/y2cZob2rv3F/XT0STJk/+seOalWT3n2Gd7h9AuSdOm4NDBebTJxr5AXnPEbUOOa6lq/UsyeZK+XcHetqq6rXtjV8drDrLD85KDnO1MQIA9oXCaltTU1NJSUlLS8vt27e5zjICnZ2dEyZM2Lt3b1FREddZAADsSeD3r747tmUdDVKtytn8P5tiXtgvIDPN3fzpCjE5h23edjptS+SLk90n93sr9v1lBT8tPqLeW2Rpv0xrnImeWxwti8mMiJrKl0YXFUgHZ+QHb84NS9wS+eJkvuVH9+gPivycyTzkuJavdya+ZdlaV7Dk7oX+K1U7cyss0tlhm/dsWBcutTkCAPbl9Msvv3CdwREVFhbm5+cbDAaugzyK4ODgL7/8ksMltgCOzGwe2klsc3FxGS0XWMYYoVBoe8OAxWLl8/lERJabPWYSiEX8+zZbzD1mekoscL37AStf8LSAf/esqqWnx3J36/1z9vSYBWIx35mGYTGbSSDgD7cZAJ4AnGG1rbW1tbe3196zMgzDsqy9Zx3CarV2dXWhsALAWOPMv1cr+SKx+MHNfMF9Y0M/wBeLbbROZ77YY8hMD0yLm6gAuIbnsNr2888/21gMwMizSlVrZt434hGhLFUud3+oOf23abqr073sFfF34cQ5AAAAjBkorCPh4R8Zk56/bfndi1WMfIsqK2Z5qOdD7a39OFaeWDgqFxkAAAAAcAeFdSSkEqFRf3NBdoofERF5KtKjSHtRJJQwRCRcuUvzbUfHed3JIoWMISY4veJAVogbEeOfUlqR9SdGuChFuU7OEBPyesXJZp3uvO5M9S7lB/tONut02pP7/urPEJFfSllpioxHRExITsW+BC/iyRRFJzXf6jrO604WK5VFRzXf6nSnytL/NMwaLwAAAICxBYV1BBixRNRVmf0FKTZFMEQhG1NkR7LV5xmJOxGRqTZfsSggYFG2YYFKuVLINlZW8xTqbfKQzMJ0YfX+/2IZD5n8XyRE5BUiF9UqIsNjC00R6YtMeWsjV3xgCN2ujHUnRiQLmS8T8oiIJLLQAClDPEnIIi/d1sjQpdn6+elreGrF0hWpDbLsjxReWIEMAAAA4wAK60gIhcyP3TWfFmrD0xV+a1JXmgo/re7uZUXuEiIiq1AWlZKyTkYmkkhFRAb1pmxt1NGadd3ZG9UPrARge4yGLm3ZES1rvdl9waA/UqkxeXl5Dndg1tRtMJ6rrjzVzfZ2Gy7qaw7UdHvIZCisAAAAMA6gsI6AUMhQ703WWFb4lSS9XBnSsEt9jlhTHzNZSLwQZW21MnwS28MSj4iYwV0YK8vyhJLhrt6zLMubdOcnKzE8IiKWR5OG+XgfyzKDn2dZFm0VAMaoW7ducR0BABwLWs8IiEQilmWJ2JpP1YaoWO3uShPRTZYVCkUM4+U/06COys6/yCyfn+FLROSl2K0UFsnlVlX137JqwvO6H+IQrMFgcl8R4kk15x7zHwMA4Khu375tMpm4TgEADgSFdQSEbkxfL0tEdC7/BUn+4CDby06SCqn3aNl/phc26GIvssKZQuO3rMdaVZZ4/4odWi2lqpbXqNIqYx/mCawX1cqS2LJT3bHGmyIpo2t+/E9tBQAAAHBseNOVbQqForKycqT/xWeEHhL3SX1Gg/Efe+cA4+4lEdIjzzNv3rydO3c+//zz/1AIgDEKb7oCABh1cIbVnliT0WCPq1jsdYPhuh3mAQAAABgDcNOVbRMmTHBxceE6xaNzcnLiOgIAAACAfaCw2ubv7+/m5sZ1ikfk7OwslUq5TgEAAABgHyists2bN2/q1Kmj8SSrp6fnnDlzpk+fznUQAAAAAPvAGlbbAgMD4+PjBwYG2traRtFdF9OmTfP19d2wYQPXQQAAAADsBoV1WAqF4ubNm05OTn19fTdu3Ojv7+c60bAmTpzo4uIiFoslEkleXt6sWbO4TgQAAABgN3is1R8wm82lpaVms9nd3d1ec169erWqqioxMdFeE167dk0kEs2fPz8wMNBecwKMVXisFQDAqIPCyoGWlpbk5OTm5maugwCMRyisAACjDm66AgAAAACHhsLKAb1e7+rqynUKAAAAgNEBhZUDMpkM1xkBAAAAHhIKKwesViuWDgMAAAA8JBRWDvB4PLw6FQAAAOAhobACAAAAgENDYQUAAAAAh4bCCgAAAAAODYWVA3q9nsfDS3EBAAAAHgoKKwdkMpnVauU6BQAAAMDogMIKAAAAAA4NhRUAAAAAHBoKKwAAAAA4NBRWDuj1eoZhuE4BAAAAMDqgsHJAJpOxLMt1CgAAAIDRAYV1tGH/uOuyvX/wCbbXxP7mKQXskF2GjgAAAABwA4XVsZhq8yJDMmpsP/PKVP3mCwEhcrn/jICNlcZfx43VGwNmJFYTEfVq818JCF0U6rsgo9poaw6rNv+VUPkrEQGygNRDRiIylK8PnR0qD/edsSC7xkREROfUsfMD5OEBAXH7DcM9fYvV718XuuJvvx7D8FX++ojQ0FWFejywCwAAAOwKhdVxsDVbX4jYoek2DndqUxiQuE+j1Wi0+0JrC8su3BnVF6QUdomIZYnI8EV22UyVpuHM0VXa7O0NNubg+adXajQ1Gt0Xck1BmYHIa0FWWfMZTfMZ1bT9hUdMRKbKbfn0pkbTrMlglXlHbIW5oI4NV5SdN5rYvsEB46H1KwrYNcUazYEUGV6JAAAAAHaFwsoBvV7v5uY2ZJgJffOo5oBCdu92rN7qjFXZNSYynchYsaWGJfKY6cUQUa/pJs/Dy52IiFrzMhoidr0+uBOrPaUPCA9liLzC5UyzRk9kKIhdX2JgWb06LlZ9kYiIeMSaDDW1emaev4SIpF5eDBGx3SZGImXIeqam2UseLiQSyhd5aU5ph8Ygz1h1rUYV4XEnp1Wv3q6LSIsQdRlMOL0KAAAA9vb/t/LURK+7o2gAAAAASUVORK5CYII=" + } + }, + "cell_type": "markdown", + "id": "e248f46c-fb41-4efc-88b1-bd7f8adbccbb", + "metadata": {}, + "source": [ + "# 3. Compile model with a single input.\n", + "Before all, we need to get the input and output infos of model.
\n", + "Open [netron](https://netron.app), and click 'open model' to select your model.
\n", + "Click the input node, you will see the details of model's input and output.
\n", + "![image.png](attachment:1855346b-3785-4867-9c92-0e811d00b9ab.png)
\n", + "\n", + "Before compiling kmodel, we should set the `CompileOptions` and `PTQTensorOptions` to specify the attribute of kmodel.\n", + "\n", + "Then, we can set the `model_path`, `dump_path`, and `calib_data`.
\n", + "The calib_data format is `[[x1, x2,...]]`.
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c957fe20-99c9-4a54-bae8-38361a8f8830", + "metadata": {}, + "outputs": [], + "source": [ + "# compile kmodel single input\n", + "model_path = \"./test.tflite\"\n", + "dump_path = \"./tmp_tflite\"\n", + "\n", + "# sample_count is 2\n", + "calib_data = [[np.random.rand(1, 240, 320, 3).astype(np.float32), np.random.rand(1, 240, 320, 3).astype(np.float32)]]\n", + "\n", + "kmodel_path = compile_kmodel(model_path, dump_path, calib_data)\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "015b6422-3bf1-4f28-93c9-dc6ef6e27856", + "metadata": {}, + "source": [ + "# 4. Simulate kmodel with a single input.\n", + "Set `kmodel_path` and `input_data`. After running, it will print shape of result. And result will be stored in `dump_path`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7f617edc-781c-4b8b-b45d-fef2f0b36a46", + "metadata": {}, + "outputs": [], + "source": [ + "# run kmodel(simulate)\n", + "import os\n", + "\n", + "kmodel_path = \"./tmp_tflite/test.kmodel\"\n", + "input_data = [np.random.rand(1, 240, 320, 3).astype(np.float32)]\n", + "\n", + "result = run_kmodel(kmodel_path, input_data)\n", + "\n", + "for idx, i in enumerate(result):\n", + " print(i.shape)\n", + " i.tofile(os.path.join(dump_path,\"nncase_result_{}.bin\".format(idx)))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "80216eab-2738-4167-ba03-7a0e218c8d5c", + "metadata": {}, + "source": [ + "# 5. Compare kmodel result and tflite result.\n", + "\n", + "Here, we will use the TensorFlow framework to infer model(`.tflite`, not kmodel). And calculate the cosine between the tflite result and kmodel result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4003a7ba-a1b4-4488-b3ca-d9a00a55e964", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install tensorflow\n", + "import tensorflow as tf\n", + "interp = tf.lite.Interpreter(model_path=model_path)\n", + "\n", + "inputs = []\n", + "for idx, item in enumerate(interp.get_input_details()):\n", + " input_dict = {}\n", + " input_dict['index'] = item['index']\n", + " input_dict['value'] = input_data[idx]\n", + " inputs.append(input_dict)\n", + " \n", + "# print(input_dict)\n", + "\n", + "interp.allocate_tensors()\n", + "for input in inputs:\n", + " interp.set_tensor(input['index'], input['value'])\n", + "interp.invoke()\n", + "\n", + "tflite_result = []\n", + "for item in interp.get_output_details():\n", + " tflite_result.append(interp.get_tensor(item['index']))\n", + "\n", + "for index, (i, j) in enumerate(zip(tflite_result, result)):\n", + " print(\"result {} cosine = \".format(index), get_cosine(i, j))\n" + ] + }, + { + "attachments": { + "562afee7-c078-4323-bc19-49e03c80d0e9.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/UAAAGgCAIAAABkBdDDAAAACXBIWXMAAA4mAAAN/wHwU+XzAAAgAElEQVR4nOzdeVzM+R8H8DejmRwzYppUBh1EVhTtSuuoXWXp2JVByq5yxUossVapda6jdYR1rWPbX7E27SpHW4sKI0TR0hpSyJSOVTOkGc32+2OSVKxSTU2v5x89vn3ne7xn+vad13zm8/18W5WVlREAAAC8NYlE8pZLamhoFBcXN2gxAABVtFF1AQAAAPBaBWnC1EdkYG5toPViVm6q8GYB28TGTL9iKbk4JU7410Mpsbv2s7Yx12dWrH47KTVTLCkmYnL0e5r178WteEiannTlwZNKu+KaDDPTZ8jFKUJRoXK6xoqkmVeuiLIlJQrS7KBtYGZpoqPcpLzgdlJqZr6kmKgdx9BkkFkP9iur3Uu6cDH14RMW18jyw2Em3Bcbl4tTk9Ky85+UEENTm29iZmbAVj5UIBKmiuUvN8DUN7M24b66vEE/M1N9NkP5CghFhVVK5ZpYm+lLRcJUMXUbZG3MJkVB6oXUgtJKG1XOJ6ICUVx8UkahjMU1tBxi/eJJATRLrdB+DwAAUCuN2H4vjVxgt0ZI+i7bDi22ZBIRFUQuGLtGKDeZfShkigER0TNR2LKvgs8VVKzDtfbZtNrNpC0RicM8XYLTXm6OaWzv862/oBeTiFK3uMw4KH75GMPSN3KbgCsO83QJFimnX1PRJ2uEioq12CYTlm+aZ82tui+m/lCftWsEJkwikibtXvDNT6nSF2sx+fa+369w7kHVy2Dq2fgErRAYM+lcgJ1vjLRS7TarE9Z+XG35Hvb+36+w5xeEz3IISnm1VIaZzy973DID7HxjmC7bji+2rFo8EVc5/3aY95fBSdKKPa04vtaeTQDNFdrvAQAA6iIlJeXNC5ibm7/zTgoK8oiIxCfDoj0snXWI/goNuygnooJH+UQGRPKkHUuCzxVwB3kumj3KgDL/2LF6vzB4yQ6zQwvMypugGSZuG/w/YYmvnwrdFRETtFhTP2ypddvyHVjO3OYxQLkYx1CregGvwXde8a1T50dJYRt3CQ8Hh4+x9ur1cl+2bf7+Y3dQ+LngnTGjNjqypac2BO5LlerZ+Cycat1Fcv234KCImKBvDfv/6GlQ3orPtPTe7mMu/TtqZ9DRuOB9caNWl8drto3Pd+NNiIiIpW1csfvy5a//Hhx8LGbD3k9sAi1t523Uf0x0Ozxwh1D2gdda195EbAMdoswail/6jb3yyw9Wl95ElPRbaJKUaTZz+3JHrlz8VybLBuEemjXkewAAgKZKkV/wD1FbNvuZMOx3kfNMvbiDUZlMNvOZVPrPP3IiplwYeVJMba29VnjZcInIwGuFJFWwJikmKsnbzLo84HP0e5uYcE1MBpkxc8euORcdeW6utZ0ywTKZXH3DbiwiorZsbs0dcmrSVs+kn5lBv95PTu8XniooeETU6+W+zLgm7LTw8L9E+bkFRPIzx+MKiOu8cIXbUCYRGSxcUZDmuksUFSny9DEt314HPTOTfmTS9lb4MZHoUb6UqLy+tvr63QxZRMRgcit9/ChfvjQp7FimuEAsJSbX1NqaiFhCJhFxe1tbW7+2eEYHfT1DwzZExGRzK5K8vCD9lrjU2dLc3uCtXwaApgn5HgAAoKlSPMmXEvV1EjDC9x8LF35sEn5WajDe0+DX/XGFEikRN1ecXUxkYta/oi8Nt/8gPiXdzRAXEulU2RzX0tyAzonE4oIX+Vku/M7FgYiImB+vTVht87aFFd06czSSJRZGCuXE7G1iVFGwOG5vcKY8MyleRAz9/gP1SXEr876cGL379XvRo51hYGbKpbR8cZacTMv77ouvREYWFYhiwkUK4hobaL/YXsHJJS4niYhIR7DtN1/L8k8g8szY4OBr+SJhnJiYJoMsa+xJ9Fr3wrxdwoiIGCZeISGexmTp5mVzISjuVJD3qWB9a8GsObPsjdH/Hpox5HsAAICmSpr/REHMDn2cRtuE+0UHByRlkqXvWDNRNJG0QKogbmsmEZFcLlcQKbOvQi59RkQsZk2N8U+kciJiMV6GVxNHH2cTJhER36QWheXG7foujoiIa+I828/55ZW+4qSjYUkKIoa+/bfbfM2ZpGCxGEQklVe6VFau/KX1yzmiiDVriIiY+kO9ls62rqiPbS7w/MiASUTtehtWekaZZ8MyFUTENJmwdtNkg1pUTkQ61p6TrblE1IbTX/kRiO+8NswyKSY8PDwqThgWcCufeWgF+uhA84V8DwAA0FQ9kUqIiM3UHzZ+lF5MeLqYbeczSo8tbksklUqJqEtvMz1KzTxzRuRpYsokInlqtDCbiG/Wp3pnemlS5KlMIv3e/SryOFN/iJvg49oXxnde8a3ARIurz+e+0tDNsPT97Tv9vVMWHBX/lSyW2umzGQZmfdl079Yfp8SCSfpERIVx0RekxDDrb1qxKtNy9nYfazZX14D7aqpmGtm4TbCstnumzYo/F8kDvlgel5maWqCwrkXPIiLqaDZqgsCgysy2+paf+lh+Kgib5RKc8tf1dLJ596snAFQE+R4AAKCpKpY+IWIzOxDTTDDB8o99+U4TrdkkZrGIiiRSImKYCT63jvpOuP+r6Q9Hf9iVMs4fi8tUsK0/dzKpiLwKsfB/uwoUD5Pi4lJziWszy/NlcpWLjq8JulKes02cfJ2VHeIVGX9sXiNiERGRgf3cyZZV27Lb6pn0MzGosWYG29rb1/nCgsijQfvtQ3zMmdaT3czidqVumzEjfZS1jjz1VKSwgLiO7k4vW/2pA9/MpFcNG5NeCw9aH6ecZpoJfEZX7JPJHb3I69SVNedCNxwetadWTfh5Z/atL+Aop7WtPadayyIC1iRrD+qtzZJnRqcTMbja2m/eBECThnwPAADQVBXLZQpisVhEZDBpW+wk5Vwmm0kkkz+RE7Ul/U+Xb6etG3ZHxxwUERGTa+a8eNFcx/LszGzHJBILD+4XEjH1zOxnus+abFO5t7pYGBlePsm0NvMpz/dUkBobmaqcNDfxmly9Bf2N2NZeM63PrBKG74kUbBfoG3tu3MTa8P3+mGNhqUTE1LecMMvX+636v8jT48LTX2xVbl0p3xMR13muZ9Sl4NT/7YxxXGv/9oP/FIpiIkTl0z3YzlMsJQ8zM+JjkmKJiKitgc28RQL+W28NoOnB+PcAAAC1oxz//m3Gx2zE+9fKpeJ8KbG19dnMqg9IpXI5sx2X3bbmNRuNvECc/4zF1uOya9WdpnHIpQW5BVJic5tmeQC1gfZ7AAAANcBk6+vX2CLOZLNrN7xMg2Fy9fX/eykVYbK5/KbyQgG8o9b/vQgAAAAAADQTyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADUB/I9AAAAAID6QL4HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADUB/I9AAAAAID6QL4HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADUB/I9AAAAAID6QL4HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADURxtVFwAAAABQzzQ0NNq3b6/qKgBUA+33AAAAoG4Q7qElQ74HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAEC9KhAGz3FxcHBwWBJZoOpaAFog5HsAAACoT5lHdv6h43Po+PHja5259blheepPS3Zdkr/bRmTpx3esXOjlMcXLd+2++CyZcm7W8fVbjmdVLJR3Zsf6o1mvna9ID1+7Pvx2+bqSK/tWBsfm/deO8/46tm/TyvUR6e9WP8B/Q74HAACA+iTOLTA0M2M3wJaz065kFL3LBmRpwa6TdqYbOc5f5j97VLuLKyf5hN8jIspPjdmx2GtlgkS5nCQtJuav/NfOZxhbGaVv+WZfmoyoOHHLsjCZ2WDem/YrSdzkOmlloqzHcPtBb1wQoD5g/HsAAACoVwr59R1fOBxgce2Wh3hQ2NoNoZcy5R37O3kv9bHRp4K4IN/gP7KkxBy06NAsyeol+1ML5HJim9laM/8WpmZLmb0F33zn9QGbciOXzD0geiaTcT+ct3KpPZ+IZMLvxzpsZfWetHHjJINaF5YXtfl/rFmHgtyNiIiMewWzHzss2hXvtGYEERl/ZHpj9aJwo10C/isr1TifP2HVvLMuS/cOmS9ZGW++LMKW8+IRSfwmv5TBQfOsWUR5x1ZvKZqwSpC33j9+yNZf5pmy6vR6AtQS2u8BAACgfjH7zw45HhkRMre3cFtgZNupIdEJx1cOEm1YEy4meia+VWj9XXRsbORa+3ZScQZTEBwbG/2Tmyz6usmKiOOxh6Yxw/dFiYlIx3ntLxERkce322TsDEsiIiKW9cLfjkdG1CXcE9Fficn6I0YYVfzOsrK1opSUuwoiYjFNZwd5MXd+vS9dUXmd183nCfy+5h+e4nPGYtmSEZyX8znG7LzwY4kyInocHxVbotNVlng0RtY5LzzA13/tvvgsAmhoyPcAAADQMBS3kq4wbSfacBnENBU4m94SXpW+fJRRaUmGfv9++nKZnIi4ZmYG+eICBVFWTNCCL1wnui75PTO/6Mk79rsnIplMJmOxXmlDZ7FYMonsxW98l9Vf9zjiG5xSZV81zy8tkSuIioukxa8ubGff9WJCsowkCfHpg0YNbpeXnkVdzeycPKc56l5cOXt92iufEwDqH/I9AAAAqB6TwaRSIiLSYJGCZCSN2bbh1sAVIb8cCvEexKJ3j/fEMujJF99MqxTH80Q38noYdX35SYNjt2TxwFi/zalVdld9flboii1FUw79ND5r3fLwVy6u7WHv1CP+2MWsmOg0q0+HK5v2+f1GmPcxtfrczUp69UL2uz8VgDdBvgcAAICGwehtOUh+5pe4AgXJb4dHpvW2HliLy25L5MRms5lEzPIZzA4dmeI7mXIied3Sfp9xbkYXf9gUn6cgIpLdC19x4KHdJKdXrnjljJi3xCI9odooN6/Ozzu8cmeR22pPU3OvNYLsLSuOVk74fKdxponb/UOz7AUfsoj4A804aSkpMiLKSr+j4HfrXKfiAd4a8j0AAAA0EKa193LnJztdRw53WHzFYMFSgf7br8u2nepGYV84OLu4bBNxuR2YRJYCT+1TcxzsHL7YlVSnevjuQRvs7610+Nhh7GcjR07ez/LYvdqOU2UhzvDFXzvWMMrNy/mS2HU/ZDktnWrMIGKZzlpsnxa8JV5SacmR7iMeJbPGuZkziIjMp31tdd7XZYrH2JlHeHNn27WrU+0Ab61VWVmZqmsAAABoTiQSCRGlpKS8eTFzc3MNDY3i4uI3LwYNQUtL600PyyRZj4mny2nE8WxkeffyqBOfV/XTBED9w/iYAAAA0MKwOHzdxt4lrwf/v5cCqA/onwMAAAAAoD6Q7wEAAAAA1AfyPQAAAACA+kC+BwAAAABQH8j3AAAAAADqA/keAAAA1M3Tp09VXQKAymB8TAAAAFA3z58/LywsVHUVAKqB9nsAAAAAAPWBfA8AAAAAoD6Q7wEAAAAA1AfyPQAAAACA+kC+BwAAAABQHxg/BwAAANSNhoZG+/btVV0FgGqg/R4AAADUDcI9tGTI9wAAAAAA6gP5HgAAAABAfSDfAwAAAACoD+R7AAAAaDyiiDVhKaouooEUp8ceTsxSdRUAyPcAAADQaOSZV85cz5OruozXkt2NP3Yxr44r58Xv3RWbpXj9AveObfkhvtrW8+J/8Pf92tf3a1//tcfw8QDeHfI9AAAAQLm8hB3rjqfX/3YVeYkH/V0n++07fVNS9aH8lJgbOrYCgYvAcbQ5r/73DS0O8j0AAADUgugnbxcHOzs7O4eJC3adKyAiKogL8nSxs7Ozc1gSIyV5euSa6Q52tnYO09dEpsuJiBTiuI0zXOyG2zl8sS9FRkSkEK5x9g7PJSKSxy6xWxIjJyJpapjfFw52dnbO3mHpRIWp+5d84eLs4CDwDhYWEJH8yq4ZAgc7Ozs7n/C6lP44ccd8D9fPHBwEHv67EvMUlHd8pe+BNOWDaQd815+REJEkYZ3HVA+vDfESIsm1UP+pYx2cXOcGlzfMpx9dOXfKWAensXN/TieSpR9d7zXRwcHJdWV0HhGRNH7dF2MdPvPw/SHxlXb6p+k3c8zn+wlqiu9Fkmf8vrZWVoOtrAbwWXV5YgCvwP2tAAAAoBbyMzP0px3a5sItuBL8lV8gd982wb/iW4XW30X7WhIRiXYF7JJM2HP8U23x0YA53+7vc8CLeyxow03LtRF7zNpkhn31xfWaNyxP2hEY2dYnJNqGS3I5Qy78bvWVfmsPrTWglOAvVu23HexrkH0rs/ei46ttmG/oA/MGkpvxKdqT/7dhsPxm2LJFM2W7fxvBStsek/a5qSmlxfyeRqs4dI84g9yX+Vix2vE4ecfmzgvX8dt6wCw/arnv3O3833xMs4RRedY/HZzUVUYcWYK/1y6av+XgcG2JhHgkIWpnPm3N14MpZaf3onWmfwbZvojrHKupXxFdW19DVaXS3Mdp4QEr083txrlYIeDDu0P7PQAAANQFd5CnoO914VXpy1kMoqzz56UfChz1mcQ0cHT+8J+kpGxKTUo1tBeYsYna6mtzXxNgFbeEl5i2E224DCIGk6m4lXRJfOvIAleBi+uqOOmz/Ozi8gWZyh3VDaOjth6PZzRino+9NDYmzdRpBMXEpBHlXU1+ZjXclIiI2vONe/D5PJZEGHXRdNo8Oz5P13zqLHtpbEyagohYmhxtDofD41BidLz2hFmOvTicTnx+pxfb5/N4PeycrFjporfrS88aHBC6df4nppTgP2l2KPrfw7tD+z0AAADU1Vu3E8qqzmAySSZ/9TpbOVHl7C9naDt9G+HT7+UcKdWfjhyWXCZjmI5zZPocS3EzSciy9rRgUOVONbJiGbFY5SWxWGy57NVnIZMXyzRZNX9cYbVjVX/Or8Hh9eLwepmaD+545+O9MVnuU/m1fzoAlaD9HgAAAGpFLskvkBPJ06Ni0npbmrNfeZD/4Yfs8+HHxHKSi49Fnu9saalHg6wHiY/uF+YSEZGyaw1D36BbpvCs+GXCZ/S2NpOeCY8rUBCRXE69rc1kfxyMEcuJSC6V1tOQOwqZrJSIZGmn4/JM+xoxyHi8Oz9284qjWfaOViwiDqejLC9LeQksz8Kia0pkbB4RydJi4vL7WRi98r0By9TcKD025k0D5ryBLCvlSpaMXrwgRJT3IE/RkdO+zs8NoBza7wEAAKB2smMCxv5aIG+rZzt7hYBPVLlPCcPE81uvVWtmOGyWMw1svb71NGEQjV60PHt1kKddoJyorZ7TeCaRvrO3Z9K3U0buJhaLrecgIGJaz1t+a23QF5+sljO5owJDfOctF6wNmuGwQc5gGkzYtGeqST2U/jjGf0yydjtJPg0O2ObIISKe0+yPdromOv1mTkTEGSYYvsvPwSmU09k+aN+swMk+vi4jd7BlEqZ9wE47zqsb409aPS/FZ5J9mDabWCNWH3KpRSGyhM0zv5YFxG0dHO016UCuNpseiqXmc7c6daqHZwktXKuysjJV1wAAANCcSCQSIkpJ+Y+7NJmbm2toaBQXF795sWZHuNwhzCxkmwtX1YW8iZaWVg1z7+1znfpwftQ846fE41VkdVnKWtfNXbce+PxFtxiFTPKPhDrzOMrWepkk7zHxdDk1bFCpOC/vKavSBt+STCIhDodFRCTJy3os4+jyObi4FuoD2u8BAACgJWFxeO2UU7LEtVPWJeST2aytiyr1eWewODzeK8vrvnGD7XgvNljLOio+EXB4/Np+OgB4PZW13xcWFoaEhEilUh6voe7kkJGR0blz56FDhw4ZMqSBdgEA0Kw9efIkJCSkqKiIy63/ttirV69qa2s7OjpaWVnV+8ZVq4W33zcLNbffE5Gi6tg7Mhm95hJZgOZKNfl+w4YNsbGxxsbGnTp16tKlSwPt5f79+1Kp9N9//9XQ0PDz8+PzcTk6AMBLmzdvjoyMNDEx4XK5Ojo69b791NRUhUJBRFwuNygoqN63r0LI903fa/M9QAuggny/d+/es2fPent7GxsbN84eg4ODb9++/c0337z33nuNs0cAgCbuwIEDcXFxXl5effr0adAdlZWVrVy5Ui6Xb9q0iclkNui+Gg3yfdOHfA8tWWOPj3nhwoXff/89ICCg0cI9Efn4+HTo0OH3339vtD0CADRlV69ejYiI+Prrrxs63BNRq1atAgICHj9+HBYW1tD7AgAAavx8n5SU1KdPn06dGnvwp+nTp588eRKNKAAARHTx4sVevXrp6r75msH6ZG1tffr06UbbHQBAS9bY+f7u3bv9+/ev4YHSht2vsbGxrq5uVhbu+gwAQOnp6W/XX7Ho8pHwywXVZtf+jO3q6ioSiWq9GkBdPX36VNUlAKhMY4+PWVZWpq2tXWXmvR2fjboguBgyuWNdNyu7mxD1sI9g2JuuD2vTBoOBAgAQEZWVlb3VBbWlopiNezT7Ct6vNLhOtTO2THR027Zfzt4v6Wg2bqGve//XnclxuxVoTM+fPy8sLFR1FQCq0djt9zXq8cXuP4LG1zncE1HOnxtXH/+73goCAIDXqHrGlt1KuER2C9av/+oD0XczgxJVWBoAABA1kXwvu/rz6l+uE5EodPEUl1E2I2xGfbEsPE1GRFSQ8P3siaM+trEZM9Fna0JuKVHp9R/nro5Vfl+c+P2UTQkyIiIqjF490W3ilJWxRSp7HgAAzZpM9MuyKWNsbMZMWXZEJCufmRuzcuKoETafzfg+9j5RpTN2OVb/6asXOlmamAwb79T30f2Hsho2DAAAjahJ5PsS8fXYmzlEdO9yZJHthqMRhzcMEfkv/PEeERWlnr3cxfvno4e3TO9yZubnm64TFYrOXr4nIyKSPbqVcD1XuRGtITNWrlq/wmv4u3wPAADQYsn+XPZFcJHT94cPf+9UtPGLZWfLk3oPB7+fI35a0Pfs4lnbRZXO2FVXT/3553t2AhvcKAgAQMWaRL6vjNVRtyNXp/8EpwF3r4mUl3AxOurqdNTpZef31fjcY1HXX3ddV0cdE6MePXTw1gIAUBcJxyJ1Jy8UmOromAoWunaMPXqRiIh0eg/sr8PtMfzLOe/fjU24X/O6stv/mzkrZnjQBqf6vw0uAADUTpPL9+U6dNRUUEmVmVqaHUtKZETUhmSvpvy21OAj8AAAqDVZiUzGat9W+Uvb9m2fyV7tacNgaWk+k9V4phVHzfX4scvyw37D8AUqAIDqNYchZRSyZwqiNjLRnwk5/b1NNHV76Px9LbmIur98I2F17PgsN7eICO8tAACVyeXyR48e5eTk5OTkKCcePXokEolsbW1fXZA1+P0PAqIj77lN70H3IqPvDXAcQHSv4uGiP8NjWR/s7k50+cWctMv3uO/31ymKClxdNO2X3SNxAoYmRENDo3379qquAkA1mkG+f/bo15nDL/XoUJRbOmzVfqeORJMXjP91/mCr7T00c0Usm/FE1NFWYL9xkc2IbR25TsGHF/av6WmVlpZOnjzZ0NDQwMBA+VM5wWKhSw8ANGOPHj2qkuArTxQVFenq6urq6nbp0kU5YWJiUuO4gTruKxde/mKU1f90qYjeX7jbTYcoR7O9aM/0UTHti3KLekzesvL9NvRiDAPRj/Oco2z/iFv0KPbsvUuXP7P4gYio46gtcauH11hnWVkZh8Pp0aOH8vRbeYLH4zXUqwMtFcI9tGStGnlA4vnz548aNcrKyuptV7i7/bNJOQtO+/Z5Sjo6lRqHSmW5uUWsjjod27+cU/RPkWZnHdZrPrPMmjVrwoQJz58/z8zMzMjIyMzMVE7o6OhUZH2DFwwNDVu3bqqdlwCgJSksLKwxuFdMaGtrV2T3yjleOVH9liNEtHDhQltb2w8//LD6Q7KC3KI2HXU6sl6Zo2BVzBJtGjXxkV/y2uH0tEjG6vi6U24VCoVizJgx48aN69KlS7t27R49epT5wr179+RyeZXEr5yosfimQCKREFFKSsqbFzM3N9fQ0MCt01VCS0tL1SUAqEyjtt9nZWXl5eXVYcW2rI46VT6Ht2Hp6OtUmdPxjbdradWqlZmZmYmJSfWqlHE/IyPj/PnzoaGhyl9rDP3dunWrQ/0AAK9TXFxcPbLfvHnz+fPnyjlMJrNKcO/Vq1flEM9gMOqxHha36pn05ZzSy987z9yTazJ3/2AiovYda/vt5/3793///ferV6/27t174MCBI0aM+OqrrwYOHKhsebl3754y8QuFQuXE8+fPqyT+bt26GRkZNdncDwDQFDR4vs/KykpISEhISDh79mxubq6FhUV+fn4t1jea8/v5+imztLTmK3D5fD6fzx82bFjlmf/++29FA39mZmZsbKxywtjYODs7u3ru19XVrYcSAUAdKSN7dna28md2dnZubm5FmpfL5VVa3M3NzbW0tD777DPlr23btq33klq1apWbm1vr1dq8v/C3xIV17dPYunXr7du3K6eTk5OvXr165cqVsLCw5ORkZdxXmj59ert27ZSLFRUVVbTxZ2Zmnj9/XiQSZWdnl5aWVm/vZ7PZ1VtwAABaoAbJ9w8ePFAG+oSEhPz8/GHDhg0fPtzLy8vCwmLbtm3Xrl1zdHRs5BrT09NzcnL4fP5bLt+6dWsjIyMjI6Mq8yUSSXZ2dkX3nqNHjyqn5XJ5165djYyMDA0NDQ0NKyY6dOhQD9UDQBP27NmziuxeOcpXTPB4PF1dXT09PeVPHo83dOjQijSvkl4ExsbGN27cGDt2bK3XrGu4P3ToUOXwbWFhYWFhMW3aNOWvV18ICwu7e/eurq7uwEoGDBhQZWuFhYUVjf337t07d+5cZmZmu3btbgqkdcUAACAASURBVNy4oWxzqbjUSqniAwM0FEVSkFsw+9sQL1NVV1IHCqL6/A4MQPXqLd/fv39f2U5/7ty5/Pz84cOHV2T6yosNGjQoJibm8ePHnTp1qq9dv429e/caGxvr6emNHz9+woQJ9vb2ddsOh8PhcDi9e/euMv/Ro0e5ubkZGRl3797NyMg4depURkZGRkYGh8OpHPc7deo0cODAHj16vPMTAoDG8/jx44qm98oT//zzz7179549e1Y5vuvq6lpZWSnb45W/1m//mXoxePDgkydPPnr0qEuXLo2zR6FQ+IYTrzLHV/xaOe7fvHnTyMho0KBBli+0atVKS0tLS0ureu7/559/lN+7ZmRk/P3339HR0crpTp06VcT9tm3bDhkyRDndUM+2BWL0FyxexDSu/w1Lz+3a8MB2xaSG+mYm60dXl0uC47sFjX2Jd3Fa+PYL/DlTrZSfPSVpoZs2R4mop93sxR7mnEYuBtTOO11fm5ubG/fCP//8o2ynHzZsmLm5+RvW2rt379mzZ+fMmdOzZ88677pWtmzZcufOnW+++YbD4fz666+//vqrWCweP368g4NDtRHi6plYLFa+u9y9e1ckEj148ODu3bu5ubnK0K/M/RUTHA7+owFUIy8v79SpUx07dqwe4rOzs1ksljKsV4R45URJScmQIUM6d+6s6vLr4sCBA/Hx8TNnzuzTp0+D7qisrGzFihWlpaUbN25kMpl12EJSJTdu3FCmfB0dHRcXl/fee+9ttvDw4cOK66zS09Pv3buXkZHx4MEDw5rovPFSLiVcX9toxAdnzMj0Ov6NZW1XfNtvxorzsiQcvm5jjqQnyzoTunnLzmMZfVfFHRB0IiJJ7EKXHzp/vWYCM+ob/wefR2z9FCNKwTupdb5/8uRJXFzcjz/++ODBgzt37ti8UKWd/s02bNgQGxvbs2fPTp06vc2ZtG7u378vlUr//fdfDQ0NPz+/yp1zUlNTf/3117i4uKKiImWLfmP22iwpKVEmfmVj/90X2rdv37179z59+phUghG+AOpLTk5OVlbWw4cPs7KyKk9kZWW1bdu2e/fulZvhK0+oZe+OXbt2PXv2LCoqysTEpHPnzg1xKk5NTVVe+KStrR0UFFQv2ywuLk5KShIKhampqZcvX87NzX3//feVif/999/v3r3722+qtLQ0oyYymczMzKxr1669evXq1auXiYlJr169qozgiXxflSI12G0De02Ip1bMGp+d5wulcjnTYKit3gPhlSwp6Y1atM7XRodEP3kvOXxLKidmZzPnuX5eQ7lEJD4VtGbbH9efMA2s3P0Xu5mwSfS/BQEHUwvkpDd++1r2BtfdmWw2kzVg3p75HUL9gv4Qy1jMQbN2rbB/482SX5Pv08MX+oWmSYlnKlgQ4D6AIxNu8U0dsdWrUrtkTuKODTtj0oo6mjnN8p1qxSPZtX1+22PSs6XEMRry+deLP+HT4/gty/dfyMyTEs/UftbXX1rxbof67yf3QHfTNlnHVu0smrjMvQ+LiOhuuP+P5LlGYExE1/b5njYO+KrvxR/CZB92jZt1zOrEAUEnIsmxuaOiRpzYJehEkuNzRx4d8eduARr84F28bf+cTZs2FRYWnjlzRigU2traDhkyZPHixdbW1nXb66JFi2bMmBESEiKVShvi0jElJpPZq1evoUOHDhkypMpDZmZmZmZmRHTu3LnDhw+PGDHCxMSkT58+W7Zs0dTUbKB6KmhqapqampqaVu2lmJOTExUVxWQyRSJReHi4SCQSiURdunQxeVX1qwIAoIJYLL5x44ZEIqkc35XTWlpayuvpu3btyufzR44cyX9BLRP86xw5cmTNmjU6OjpbtmyZPn16SEhIUVFRQ5yKNTQ0dHV1HR0dazEm8n9p166dsv+n8te8vDxlu/5PP/00d+7c1q1bjxgx4sMPPxw6dGj//v3fvKk2bdooE3yV+UVFRSdPnmzVqpVIJDp9+vSuXbtEIlHr1q0rx/1PPvmkvp6RunmWn1FsvTzS17IwZon7Ts7qQ8cHyYSrpgQfEdnMNsnPzNCfdmibC7fgSvBXfoHcfdsE/4at2Zhpvfn4NiNp3HczluwwObTYMj/9lva0Q4dcuKQg8WFi23+nbL8XH57xh57PbzttmHXvMc+3X7TbqRMr66ivx8pQq/DZvJybF9P6Vlogfd/CRfGDN2xdws866LvIl3XwJ3dO+sUEhiAixJ5E+33nrwwftEtQnH4hRXvaoeDBsotbvBetM/0zyNZphMJl6a731nA275C4H+jz4guBrrySszviswTGfEo5eSSdE8whnt2X80gRH1exz0fpWe34fA4REYffjZN196GCOE2uWx80J2/K9xKJ5OTJkydOnDhx4kSvXr1sbGyWLVtmY2OjoaHx7jvW0tLy8fF59+28o6FDhw4dOjQ4OPjAgQPR0dHa2tqenp5Tp06t1dcR9UVXV3fGjBlVZmZkZCiD/q1bt44dOyYSiXJycgwMDPr162diYtK7d29l6Ody39iOAaBG/v333+oN8BVzdHR0OnfubGJiogzu5ubmyjTP5/NxM7uEhIQlS5Y8f/7c39+/4uLaL7/8UrVVvQsejzd69OjRo0crf719+/a5c+fOnTsXHBz8zz//KIP+gAEDRo4c+fbb7Nixo6ura5WZubm5t2/fFolEt2/fPnz4MPL9f+P2628gy1QwiZhmZgYFSWKil9+Tcwd5Cvo6xF2VjlIk3TJz3tiLScS1GTdq57fCW4pKXXFezbj6Frb6B3cG7JC4j/vETKcufb2IWLL8hJjYB5JiFic/P09BVfvB3IuPuT9i9j4rPov4XrMHj9wRn+PuRESsjvxOHBrsOKJrVNp9Im0iRkc2j8Nh2DlZrVsuyiJbY7sly+JdZ04h+9WH7F5ulmXlNMx//5m8qW75MeeZ9htrukZBRvI2VN64yGCxFDJZnZ4bQIUa8v2dO3c2b958586dP//8c8yYMaNHj165cmWtvvRsjjw8PDw8PO7fv79///7PPvusV69enp6e7u7uqq6LlP1BR40aVTGnuLg4KipK2bZ06tSpHTt23Lp1i8lkVm7jV+b+Nm2awf2JAV4nISGhVatW1aO8WCyuaINX+uCDDyrm4LCv0YMHD6ZOnXr79u2lS5fOnDlT1eU0FGUTu6enJxFlZWWdP3/+3Llzv/zyy2effTZ06FBl3B86dGgdWql0dHR0dHQq7gim7J8Db8RiMohKlVNE/1Z7/C3vIcmo9LG8l9ue/ZZxEftWe4bbrvvRq1+tI77kjP+ktXlOHnZdX/dhXy6TMVnld21rw+rIkEle6V3F4bSn/FdH22a1Y1FFHmeyWMV5RU+JXo4hwrL61H5lcFR63/x4jtOuql8XERGRtjZP8iCvlIhB9E+eRMtIG4338G5evhEKhUJlU31OTs6YMWNcXV1/++23hus80zR17949MDAwMDDw8OHD+/btW7x4saenp4eHR6NdCvw22rVrN3HixCozs7OzRS8IhcJbt26JRKKePXv269dvwAsYLAKaJolEcrcmyp51yuBuYGAwdOjQihDfqlUrVVfdnOzevdvX11cgEMTGxqq6lsbD5/MnTpyoPFv+888/ynZ9f3//xMREZdDX0NAIDAzErcobl1ySXyAnLqVHxaT1tp7PZpOlwe7I8Ns2bkbSuCN/yAct7c2gpEorsNkd5FkiscJSXyEnhUyuZWIzdQVlOISm5nv106/t7vNSb9DwwHmTzClPduznu5UfktxOfKhlZdrdYiCtjL0osbLmSC7GJrIGru5B9B/XWZRvO3bV+jy3gwcZKz0CQq32uPNfZHSWpcDpqa/fRhl/3MGaR+nWtRrcZX+8UGJny0o8k8ixcn/bwbwBXqPNlStXQkNDo6Ki2rdvP2bMmODg4KFDh6q6KtWbMGHChAkTUlNT58+fHxwc7OTk9OWXX9Z4L/cmQnkV4IgRIyrmlJWVxcTESCSSa9eu7d2799q1a0+fPlUG/f79+ysnmuCwfaDesrKyquf4p0+fGr3w3nvvOTk5KafrpStgC3f37t2FCxfm5+fHxsYOHjxY1eWoTOfOnZ2dnZ2dnYno2bNnynb9kydPbtiwYfTo0cpvqvX1ax0WoQ6yYwLG/logb6tnO3uFgE9EbsvniwMXO+wsZOpbuy+fZ1mlTZ491F1wbMmUT/YzjT2Xjrr+/Y+pMiJWt1E+H9fl72U8WmA8a+7I68Z8RdZNGlHperv00EVex0YcOv6V1byVI2YudRjJZsmk2oI1u80Z9DZf1kjObNmcP27XJD6flnlGT1p5xH7XhBeddBim48bx9u3lB3/yulFxjN2/cZr5tcvYLZTfxn7DnuZ4EwFoWloZGRm5u7sPHTq0zkPCqz25XP7DDz8EBwcPGDBg4cKFzffzT25u7rVK/vrrL2XKNzMz69+/v5mZGe7CC/Xl6dOnlUeIysjIuHTpkkQi6dy5s1E1enp6qq5XPe3evdvb2zswMNDPz0/VtTRRBQUFJ06cUF5p9t577ymDfuWR+F8H4+fUjXC5Q5hZyDaXxrhg7PXjY8okeRJZGw6v06t9dIolEhan/KpWhSwvT8Lh8ViN2Qgmk2TlyHg9eC39OiGoD+80/n1Ls3379rVr13788ceBgYFq0NdFLpcrg35qampKSkpycrKOjo7FCwMHDmy0G99As6ZQKG7fvn3nzh3lT6XMzMyePXsaGxv37NlTOaFQKD7++GOM99pofHx8hELhzp07LS1rPXB4yxQbG3vy5MmDBw8qv812dHR8Q7MX8n3dNI18D6D+kO9rp6ys7Ntvv127du133323YMECVZdTz9LT0yvfR6Zbt27vv/DBBx+gxzMQkUQiuVWNnp5exVBOygncpFm13Nzcnj9/fvDgQVxtXAc3b948evTo0aNH79+/P3bs2LFjx1Yffgf5vulDvoeWDPm+Lm7cuOHr60tEW7dubVKX3tavmzdvJiYmJiYmXrhw4d69e0OGDLF+Aa2wLYRCobhx48aNGzdu3ryp/Hnnzp2+ffv27dvX1NS04ieuUGxSPDw8FArFzz//rOpCmr2MjIzDhw8fPnxYKpW6urpOmjSp4r4lyPdNH/I9tGTI93W3bt26devW7d27t2IYaTUmlUrPnj179uzZhISES5cujRgxYvjw4TY2NhV3mQH1UFhYmJqaeu3atesv8Pn8iis0zMzMqt8MCJqUwMDAy5cvnzhxQtWFqJXk5OSff/45JCRkyJAhnp6eLi4uyPdNH/I9tGTI9+8kJibm888/X7p06bx581RdS+MpLS09derUqVOn/vzzz/v379vZ2Y0cOdLOzk7tb5Kglp4/f3716tUrV64of16/fn3gwIEDBw5UXoNhYWGBQWyakejo6GnTpl29ehUXzzSQkJCQ3bt3SySSc+fOEfJ904Z8Dy0Z8v27unXrlkAgmDRp0tKlS1Vdiwrk5+cff2Hw4MGOjo6Ojo4mJib/vSaoTkFBQeILFy9e7N69++AXBgwYoOrqoO769++/bNmy8ePHq7oQNRcXF6ccYwf5vilDvoeWDPm+HojF4lGjRk2fPr1FteJXFx0dHR4eHh4e/sEHHwgEgvHjx3fq1Om/V4NGIZPJEhISEhIS4uPjz507N3z48GHDhil/ampq/vf60ORt2rTp/Pnz4eHhqi6kRUD/nKZPQ0MDl4pBi4V8Xz9EItGHH3544MABBwcHVdeiekePHg0JCYmJiZkyZcqUKVPef/99VVfUcj18+PCPP/6IiYmJiYnp2bOnvb29nZ1d5fuggdowMDD45ZdfWvJNrBoT8j0ANGUY9aJ+mJiY7N2719vb+8mTJ6quRfU+/fTTI0eOZGRk6OjoODo6TpgwIT4+XtVFtSxPnjzZv3//p59+2r179+PHjzs4OGRmZl66dGnVqlUI92rpl19+6d27N8I9AAAQ8n09cnZ2dnBwCAgIUHUhTYW2tnZAQMCjR4+GDx8+adIkDw+Pv//+W9VFqb/z589/+eWXfD4/IiLC3d1doVAcOXLk888/53A4qi4NGlB4eLirq6uqqwAAgCYB+b4+BQYGbt++/d69e6oupGnx9vYWi8VGRkb9+vVbu3atqstRW4cPH7azs5s8eTKfzxeJRFFRURMmTFB1UdBI/vjjD0dHR1VXAQAATQLyfX3i8XizZ8/et2+fqgtpigICAkQi0fnz5z/55JPbt2+ruhy1Eh4ebm1tvWnTphkzZmRkZCxdulRHR0fVRUHjuXTpkrGxMY/HU3UhAADQJCDf1zNXV9eIiAhVV9FEGRkZRUVF2djY2NranjlzRtXlqIObN28KBIKVK1cuWrTowoULaLBvmVJTU83NzVVdBQAANBVtVF2AurGysnr06NHDhw+7du2q6lqaqCVLlhgaGn766acnTpwYOnSoqstpxn7++Wdvb+8lS5ZgSMQWLj09vWfPnqquAqBpwfiY0JIh39e//v3737hxA/n+DSZOnPj8+fNp06ZdvHgRtyCpm82bNwcFBUVHRw8ZMkTVtYCKPXv27NGjR6quAqBpQbiHlgz9c+pfz54909PTVV1FUzd58mRHR0dfX19VF9Is7dy5c9u2bfHx8Qj3QETp6emffvqpqqsAAICmAu339c/AwCAzM1PVVTQDQUFBPXr0OH/+/IcffqjqWpqT8+fPf/XVV8pLKlVdCzQJ+fn52traqq4CAACaCrTf179u3bplZWWpuopmoFWrVl999dWePXtUXUgz4+/vv23bNjMzM1UXAk0F8j0AAFSGfF//dHV1c3JyVF1F8+Dm5vbbb7+puorm5PTp04WFhdOmTVN1IdCE5Ofnc7lcVVcB0LDkf4Wt+V+qKisoTo89nPim1jtFpZ8AKoV8X/94PF5+fr6qq2geunTpMmnSpL/++kvVhTQbp06d6tSpk6qrgCZEoVC89957yPeg9mSZSWfSCt51K5K02OgUSd3WzYvfuys26zXxPetH1w9mh+eRJNz7A9cf04nyUo7HphXXvdJayboYHn+3kfYFzQLyff3T0tJ6/PixqqtoNh4+fIjLkd/ejRs3vL29VV0FNCEMBuPcuXOqrgKgmbgfs3ln/MMGaGLnu22NWOHEI47TioitbsakuBm+IfRqHT9J1JYs5X/rw9NkjbMzaBZqk+9LS0pKKv9eUvKk5HXLtmRsNlsqlTbmHqv+IZ4UlpQ25v7fCZ/PR/v922vQkc5LCqv8R1f7H696EgD1h6MC6oU4Lthb4GD3iZ2ds2vAMTGROHLZFw6f2NnZuQRdksvj1rgKXBwcHFz9wkVyIsoMX+Lq8ImdnZ2dy5ygmHty5UZklzY4ONjZObgu+V+qvO6lRC2f4eExe0eKgignccdCj7Fjxnp8vS8xj4hI9leo/2zXsU4OY5ceyyOS/BW6cuZYhzFjPTYlSohIGr/ui7EOn3n4/qBc/CVZSti6o2lEdPfo5rCLEiIixc2whR4eM31D/668XFbshrmunzmMnbny2F0ZkSR+k+/6M3lEJLmyw3dDfB5lxa6d6yFwcHBy9VoemlLeWihLj1jpJXBwmOi1/mi6jIgoL3bTXA+Bg8NnHjuuEJHs4k4vj6leK4/i8j8gelO+zzrh+9HHq1PKf8v5zdvawvqj9weO25pcQpRxcLq1mfVHtn2NhvmfLny5UsbP7n0t/C80n3DZENq1a/fs2bOaH3v1Va1BSVrYVOtxe97Qfb8keeu4gVMjyt9PnyR/7zzQ2s667zDfEzlETy6sdh440G7MQNO+7vvTXq5UeMF/mNG4/YU1b1KldHR0Sktb9hFTGzk5OXp6ejU88I6HVuZe9yHWDi7WfS3c9/5NVNP/eJWTADQDOCqgiRCHr9mQOmjlodjo2I0fyzNz5aQoyBSRYHtsbGyE7wdMps3SQ+ERx4+HuD3bv++snBRScQZT+ejaIZlb/PeLFERErEHzfjseG7tHIP/frsjcuhajPWK2/6pVfgJTRvq+hYvie8za+tPWWV1jFvmGZhHJRAkxCqetoQd3fzWcl3fMzzucNWnrwUO7V03qyyGidubT1uw+sMW948lF68680l4uy7l5MS2PiHJvX0zOkRERMYzsF6xa5TffyfDlUinBXutyRq0OORhkm7Xum9As4oxwNL66dsWx2/Fbvo03drLiKfKTE+72XXDgwJ5AJwqfuzA0i0iSsNJrV5HTdwcOLLfL/cFrvVBGiofJsXf7LjpwcP8G935ERH0nLFsVuGyWLb+urwuolZrzfWGUt/X47cl5j8tTasnpVQEZ005eFV4I0tvnH5pDhsP8Qi9fFV6+GtQtbHtUeWosSfT3/q1Qq8W35GhoaDx//rz6/KqvanV39rrbTgu9nVNY8ppFSpO/d7b2PZmd/aL9LONn/9CeQcILV49PSPZff4E6mLptPH31gvDmb+4ZG0NffNDKCfPxT26rWVLaFP80rVujk1gtFBUVVb8jWD0cWgbTQs8KT52+evyLnO0hyUTV/sernQSgicNRAU2H9Krwltl4d1M2ERExqy9QcG7XEk9X14lzDtyUPimo/AU422SC26B/kpKyiYiIwWQSkb6lGV+cKa5rNUyOTg8+n89j3YuPuT9itpcVn8e38po9+G5UvPIYbqfD53B4PI5EGHXRdNosWz6Hw+PrcoiIGB21+TxeDzsnK1a6KItuh851d3Wd6Op7uMZWc03trnx+D74s2t91oqur+9x9N+/GnMnj84oSj0clytic28kpxUS9Zq+eJF3n7n/j09Wz+7CUJbJ5PJ6uqeOiL83TouJzKDk6Xnv8fMdePF4fwXwX7Zjo5PLFOvM4nXgcFhGx2J35/B58HqeuLwuol9ZERE9O+E7wP11IhbG+45aeLiHSsgs6fTbUreIT550LV7U+suYTdbD+yCzt9OUS4hsaahJRSXahph5fk4joyWn/FYVzgqcZtvgh9Vu3bh0QEFB9ftVXlXLCZrp/n1JCWRHebquTS4kM3PeeEQaN0a1YImOr+/T9GSUlaXu/cN+bSdTGYk7Y1VNLrTuVv8glyWfTBtpaaxIZ2n6keVmYRlqGPbWIqCTvcYmenm4bIqKMPXOOWAStGtZEbxMbFxcXFxen6iqaDT8/Pw0NjSoz6+HQIqI2JYVZF05ceDbwfUMiqvo/Xv0kAE2GjY1N9Zk4KqDpkCvkxKgh1pdTCHd9d15/3p5Dvxxa+lG1kV4ZpFl1FpvNquswNW0qrSiXyZgslvL9tA2rI0MmefVyWFmxjFgsVk2bYbVjkYyou/38ZYGBywNn2/LesE/e8FmBywMDl8136lYkk7FYLKJSIoaF2yKBKYOIiEqJxSJpvrRqD3oWs2MbmaRYJi+WabZnvdi1puxpo/YBhuaoNRFRhzG+niWrR1uP+Y7mLPhIk4g0NTUrx/TCx4VsrU5ERJpaHSq6YxZeWOm+l7/K31aTqCR5/feFM1ZVeqdo0b799tsa5lZ5VUnXbalD2tyPrMfvNVyw0KINURtNzVdPY4Yz/C1Oun9kO+3EMH93AyIizQ6VHy98/ERTs4MmEVEHrbZPsguVDfaZYdMWJ7uvn2ZIRDlh/lEDV821qHZ+bCpsbGxqTCdQo4Y7tOhOhP/MhdtvGw7pVbFopf/xmk8C0CTU/AkZRwU0Gdx+g/RSIiPvyYmkUmm1nvMKKlGw2J3Yr859UvBISkTimMjzHczMauqWWBdt2WxpXq7yvbK7xUBKjL0oISLJxdhE1sAhPV5ZlmdmwUv5Izavhs2UY/GM+5ia9jE15lX/FNCRw8p7qBxLrxPftI+paR9jHruvRV8qamcl+Nzd/XN390kjjFkk+3uH3zHjVb9uHZHot16ovCBXLpcREUmSEpIZ71l0ZZkO6pseG5OlIFJkxZxJ7zvYvEod7HaUl/2GQqHFKT/9a7YhohJqo1nzDW07aGqWFD4jIiopfPJMs4MmUUnyJvc5aW5HQlx0iajwRNDPj6kkyP9CWnJO9t6fky08m26gbELaaBJRCb3mZSciIs02VFJKmlXfiJW0OnV4cWXbk8JnHTpptSHKiZg+aa/h+iMLzTWJKHlPUHKpdWiAf05iRkaHvSdG+41B37yWoA6HVk+3bSfcSs74Wi/Y+1HMHMMq/+M1nASgucFRAapi7O4/JWDV3LH7FSw2Q8oc9+qjTMupM6MD5jqEE4vFYPazYBIRKaTCLVPsAqVMnUHuyzzNGFQ/TdY97AWmkxaNGduVxRfs3jpv5YiZSx1GslkyqbZgzW5zBr0y4E0/z9WTFvm6jNzbhS3v7BTsV5sdMUydXNhTZoy8oM0ynnUwyEHZb4bj+PXXifM9Rh7V7sqU04jVh3zYoSuP9Vxw0EqXY/GNlcvynSnh9qTICvUeGaPFkhQy7QN3W7GIJiybd8Vn0sdhHJLQoHnB43hEDyvvzcJFIJ0/yeEYh2m17LclVu/8MkGz14aI6MmJVTs0/aKu9r3sO2fNaev1H1U9SfexHpi993TOwmlaycLUgdZrNEsS/edEWe898aI3Tgdr/4PfF5YSPW+b/OtNi/cNcZp/CzlhK0JNN53+UTti+uLvk8P8LKq96WbsWXXabq9wH+2d6r99WOgcgyqPa1q8b7jq7IWS0UMKLwgLLRYaUk6YzypafGKVbXlvHNPP9+61LSGijPyIm9pDTJtoJx2oX3U/tDTZmiQtLCEqSVz1yv94tZNAIz4dqBc4KkCFmAafrQ2ZQCQXh/vOuM7XJwbT55eQl4+6rAhxqbS4goih57QmxNP45Ty248ZYR+UkV7A9oq6V8AU7zjrl5cna8zjtiPiLD52al5cn4fB4LAYREcdl16WXlXDMvXb96SHJeyxTLnDoVPkDpl8dr3JrxrycXFYnNhGNWHd2hHIZr0OXJuXlKTi8TpVa9/mOq8IdZY/zJFQ+f2roceUjrMHLjp8gUqTEMIw89xxy15KwOpdXRSxjwebjTpXWIjJffOJlCZzBi4/HzcqTEAcd8IGIyvN9rPnr6AAAIABJREFUhzFBEWOIiEYHHRld01KaY/wWh44b/XGEZnaJy95VfErbfSItg+bYHieitnarTq34yNRKl4ioNFurw+OBfZAi34au2+4jRETk9uPhmpcwnBsaSkREc8JCa17A02+My7SPxuiV5On6//qR5pOwI7HZaQ9crNcTtTGc9nPoNAOLIQZERFpntDrp9jXsUONmQM3U+tAqifL+aFOGXoeSjNvPPtoYYUqUfKLK/3jVkwA0NzgqQHWeCYOnrz5TSERsw48XLbV5fV/8RsHiVepPw2DxdN/Ue55YHN6b+x4rUra4zQ19ZDxrp0XVhzi8GjfN6lTz/Ff3WnWR/1irhjWg5WpVVlb2tssW5uSUaulqo4Xmv7VqVZsX9l2VFGYVaurqajbPK5uXL19ORIGBgaoupHlowEOrMCejkPT4bzyQcBJoknBUND6JREJEKSlvGHyUiMjc3FxDQ6O4uLHuYgqVVB9qrAHJZFTzhbh1oiBi1NvGoGWqTSTU0sXVs02SphYffxl4Z1q6hv/5boiTQEuDowLgbdRjuCeEe6gHGHocAAAAAEB9IN8DAAAAAKgP5HsAAAAAAPXRPC/JBDVy//59VZcAAADq5unTp+3bt1d1FQCqgXzfIHr37q3qEpqN7t27q7qE5gSHFlSHowKguufPnxcWFqq6CgDVQP+cBnHr1i1VlwDqCYcWVIejAgAAKkO+BwAAAABQH8j3AAAAAADqA/keAAAAAEB9IN8DAAAAAKgP5HsAAAAAAPWBfA8AAAAAoD6Q7xuEubm5qksA9YRDC6rDUQEAAJXh/lYNIiUlRdUlNBtPnz5VdQnNCQ4tqA5HBQAAVIZ8DyqG+4cDAAAA1CP0zwEAAAAAUB/I9wAAAAAA6gP5HgAAAABAfSDf16fMzMz/nANQL3BoQXU4KgAAgJDv693y5cuJyMDAgIg8PT1VXA2oERxaUB2OCgAAqA75vj4ZGBhkZmYaGhpmZmba2trSi/ddgHeHQwuqw1EBAADVtSorK1N1DWpF+XarnM7IyMDb7X9SNkAGBgaqupCmDocWVIejQlUkEgm9xZ0HzM3NNTQ0iouLG6UoAIByaL+vZwYGBh4eHkTk4eGB91qoRzi0oDocFQAAUB3ub/UmSUlJQqHw8ePHenp6b7+Wqampnp6eqanp7t27336t7Ozszp07DxkyxNLSsvaVQjODQwuqw1HRND169Oinn356+vRp165dK2a6urqqsCQAgDdD/5yapaenf/PNNw8fPszNzX3+/LlcLm/oPTKZTA0NDR0dHT6fv2bNGmNj44beYxMREBBARCtWrFB1IY3k7t27S5YsycrKysvLa+RDq2vXrt99913LObSaERwVTda333575swZiURSWFgok8kq5v/999/UOP1zCoT7d8QVmLv5OhqIjgaF39K3n+lmqUVE0qSftkYW9Js6z1lfFBl8WJgvlcvbdNDu0d/Gydmaz6SsmOADSSwbL6+hXCJ56sGgX9NNJi/snfT9r38rKu+AYznZ11lHFLlvX0xagYzBNRzwicc0G31G3UsGAJVD+30Nrl275ufnd/369QcPHjTyru/cudOtW7d58+atXr16wIABjbx3lWAwWtDbyPXr15cuXYpDCyrDUdFk/fDDD9HR0deuXSspKVFZEc8yhScjxQx7X0eD/JQ/Ik9Kz2Qx92wSGDDkmRciYzLpk3nO3PS48JNCdg8zg3aZqWdjIiPifPdvE+Rf/+NYJFffzWsol0iWcSU65pLko/na4rS//pKRLFdcIGdy+dosBrertCByx5w158jgg0EGJD5/KcNtpo3Kni8A1IfWJ0+eVHUNTc6ePXtu3rzZ+O+1Sg8ePLh58+aPP/6okr1Dg9q9ezcOLagCR0XTdPLkydDQ0KSkJFWG+5pILwUv+SG12lc8zEHTt+/ZH3FoniVJk+IuFdS8clsb39CIiPA9XuZEbW0W/RIR8cseL7Nbf/0lJf4ov6C1a4NDjv/oadCwzwAAGlzrK1euqLqGpiU7O/vy5cuqeq9VevDgweXLl7Ozs1VYA9S7nJwclR9a9+/fx6HVpOCoaLKuXLny8OFDhULx34s2JoaJ/WgD8aHAoHMFNfTikopTM/OJmMy2zNpss7flIDZlhc+Z6L3mp7hMab0VCwCq0jo1NVXVNTQtDx8+LCsrKy0t/Y/ltIdM+8rFtGH6N5WWlpaVlT18+LBBtg4qkpWVpfJDS6FQ4NBqUnBUNFmpqanKQTCbGE7/GSt8BkkjvwsWPq48Xx63bLiVncuSiEy2qbvnUHZttsm1D/xp42z73nQ9cscSV/eAmNz6rRkAGltrXF9bRVlZWatWrarPN3TyC40RXk2+Koz8cY6VJvEd5iwYY9iQ1y/gT6NmcGhBdTgqmqyysrLnz59XnatlMS34iPDyVVVU9ALLQLBkrrUiKeneK7MN7Lx8Fixdu/1QxI9eZmwiFpPFoCdSZWu8XP5ETqz/s3f3ATHffwDA31zd91Cn1Pn1cKMHTxemloc8FVaYq5Gyxc2kkIwyko1IxCaZVUi2wrZilozKQ5EerGLRGXOG68KVm+vJXdp9r47fH5HoQc931fv1l773uc/3fe/euvd97vtAbXBVn2owYdHWH04m/sAZAk+STqc2cHgPQqiTwPNrm8boix9/chdtXuKe8Z/+SDMokwFN2SGhrgFLC9WFVaGiaNN2nAgeGef+ZeChxFRlBmLwsY9n6qJvMmsdSkM1suYssKvVwBuNHqEVk3Q2NPg9hyFFqTE3gWo5emj9lzOQph6MEOi+b9wXBAXlAFRq7+Yc3oMQUj14f6smoRmzjKtyU46m5HKzzvwceeYOAABosYMzbt/mXU8Mmlu9rqY1b2/mzby8e7cvHnRn0QCYc4NPZ17n5eXdzDyxYy6r+h1aw8J9f+J1Xt7tlB+/GI1v2t0dlhaqC6tCVdFYg/QfZ0XHXc5VdiRgYO+1eESjXXivCd4bF4yk3IwN2rEjKlNq8rGf78c69Y6U5/+TdT4iaPNXGzdHpBYZTPFaPr1Zh/cghFQO9vdNIvszOiZ/WnBGYrDnNOOat8jy6/uWstnzg8scI4PnawFA2aXd7nYffGDnJ5gcHDhPC3Q/mDbm8T7HDz9cuvexXeSJHdNoQJsVdGIj84z71AnuycZ+P/hZ4Dco3RuWFqoLq0JVlZ35+QzN80xm1MaO2ydzwQ9/ZCd+PRoAJvgnZ/+x17m6SacYLfgxPfvchgkU0LT/Ljs7/Vu7t9t9nQlePySmJ8fFxSWmJ/+8YTrz9SMfh2ZnX9o6pXo5nzrS41By+rnEuNi4xOT0uG+dh+DyPUKdHPb3TVOe5Tf5A6dQnvGy6Nv3Ejda0QAAqh4L7ggE3LjojFLWSBYAQJUWy/GLL9xYUAb6TG0AgKrSx/kCQXaMz44z2g5zJ2hNmPWRtkikNWux+7ReZf8NGz9BS5kvCykflhaqC6tCVQl+dvpg6tpEmKbsQJqOqmlgYKDz7oadqqVjwDTQwYV7hLoE7O+bTCZICfdxsmBxkll+mzm13yVl5TJQA5ra+MBLZwKn9pKJZaAG8NYBs2Wl/6nRaECj0WT/VQGoAVRej9waeV21LqyMlAFLC9WFVaGqynJitrt9qOwoEEKoMfhlbdNo6OnRRKIiAJA9LpKBEa2e41hpZhaDBJGOfrvzaXPH+JhVb1TrRVMDAJqFwzRt3r7rZdeNb9I+KE/ZvS0L32cRAJYWqg9WharSYurJhCJMJkJIxWF/3yS08T4pMe40kUBG09fXEOz+PFoEPm8PKk+M/m3tvqzbnHyZ1iAt0U0ZAIDW3EheXqCslz5k+s2LFIEsco3fhKNxAsfSxzIaXFo5wesMvlV0Z1haqC6sClVlzNmfGTjmv8fY4SOEVBv2900iS/Yxey/QeJCxFpQJ7gjKqgDA74P3Xj6Y4mVWvXgW42YWp6Wnr9vrP5FAVA5gDlAU7W7hl6sBj/NfviHIbu3jjIzUYuprQ+ljYRm+S3RzWFqoLqwKVSXY97F+JJPF0qOlXkxVdjAIIdQg7O+bTFYmuPXua6LJykSCsje3lIsERXVGCd8ahboxLC1UF1aFqpIJeblCZQeBEEKNUqv31ondWVsmhOs3YTBN1qIVs4buaok6LywtVBdWhcrq0aOHurq6sqNACKGW6Dly5Ehlx6BaDA0Ne/TooabWNt9syFr0ZqumptajRw9DQ8M2iQGpCCaTqfTSolAoPXv2xNJSHVgVKmvkyJF9+/ZVdhQIIdQSPS0tLZUdg2rR19cfM2bMe++9p8QY3nvvvTFjxujr6ysxBtTm9PT0lF5aAwYMsLS0xNJSHVgVKmv06NEGBgYUCkXZgSCEULOpffTRR8qOQeUsWbJEIBBUVVU9evSo4/f+3nvvmZmZLV26tON3jdrb0qVL8/LylFtay5Yt6/hdo0ZgVaimmTNn5uXlKRSKGzdutOyLEYQQUpYeL168UHYMqojP52/YsEEoFD558qSyslIul7f3HqlUqrq6ev/+/Q0NDb/55htTU9P23qOKcHR0BICTJ08qO5AOkpeX99VXXxUUFGBpoRpYFSpry5YtKSkpUqm0rKyMJMma7Xfu3AEALpfb+NPNzc3V1dUrKiraN0qEEHoT9veNycnJycrKKikpae4311u2bNmyZUuznvL48WNtbe0JEyaMHj26WU/s7AICAgDA399f2YF0qD///DM7O7vDSqtfv37jx4/vbqXV6WBVqKZ///338OHDFRUVtU9RcHFxAezvEUKqCvv7djFhwoTMzExlR9E5dM/+vsWwtFBdWBUdTyKRAPb3CCFV1VPZAXRNWVlZyg4BdU1YWqgurAqEEEK1YX+PEEIIIYRQ14H9fbuwtrZWdgioa8LSQnVhVSCEEKoN+/t2kZ6eruwQUNeEpYXqwqpACCFUG/b3CCGEEEIIdR3Y3yOEEEIIIdR1YH+PEEIIIYRQ14H9PUIIIYQQQl0H9vcIIYQQQgh1HdjfIyVLTU1NTU1VdhQIIYQQQl2EmrIDQN3dlClTlB0CQgghhFDXgf09QgghhLoadXX1Pn36KDsKhJQDj89pS/n5+e/cglCbwNJCdWFVIFQDm3vUnWF/38YCAgLg1e3iFy9erOxwUNeBpYXqwqpACCFUF/b3bcnIyCg/P9/Y2Dg9PX3q1KnVW5QdFOoisLRQXVgVCCGE6urx4sULZcfQpVS/3Vb/WyAQ4NvtO1UvQPr7+ys7EFWHpYXqwqpQFolEAgBcLrfxYebm5urq6hUVFR0SFHqDlpaWskNASGlw/b6NGRkZubq6AoCrqyu+16I2hKWF6sKqQJ1LYdyauRuTpMoOo32RwuzEZF6pssNA3ZvawYMHG3k4JyeHwWDY29uPHz++w2JqV0lJSe19ChqLxdLX12exWI3ntvWMjIymT5/errtATYelherCqlBNWVlZCQkJRUVFlpaWLZvBxcWlbUPqJgxslm8Yqa/Z8gnkN49szmRt9RhLbbug2hh5YafH2mTmipOJXqyWT/IgO/2alGlrx6K3YWioG+lhaTuxscf/q4IXL3T76Qw2GRQWEtpRUbWL8+fP7wwKqurxvEJBglrn/+Ki6nkfNYLyouf6db4zZsxQdjQt1wWOz0lKSvp2504sLVQbVoXK8vL2+od/v7i0GHr0gF4tvEh0yokzgMfnKIE86St2yoeJ39q9u79X2vE5El5C3HVNW44Ns+Vz8HbZOh7W9T1/zK0Vk6DuTA20ezX2uDYAQBFUSv6+sWix64H94b16NTpeVZ0/fz50/14JUQUa1C5z1f9nACAlQ/fvBYDO/o7beSUlJYXsD5NQK0GT6GKlFYKl1VJYFapJJpN5eC6/LxSQ2lTojeuiSiBNWDM36+Pk7VNuRq30P/GPVE5C3/edfb/xGKspzwxdsiv+cTlAL31LJ6+Ni0ZrKjJ3OMYM+XGvc3+QJ3/Fvjgt8dspAGTmbkd2GDF0/nffzTdqdgSlaSEBh7LyxVJgsKYvX7/CigHA+9FjwymhnMKw+nyT71xTomaMGsPCzpqRl576z9O+Qx1Wb3QzpwOQwuTQnZF/5JH9rdy/8rU3IYSJQd8LrTd7WL0sqcKs82eT+hs62CiS/DZH5wqLJArC0Nxptb+nlTaZHer1fTK/oIwkdMymL/X1ZpsSZFqQWzjf+qsID1barkXhPJuvfvAkAEDBO7TSMb7PByv2brLTbsvfAuoOmrqqJO9H5T8QHD16tF2jaT87dwX9K38KGqr7jV4LaRL/Vkp2BgUpO47u69ugnf+ST0GTUHYgbU2TeIKl1VJYFaopJiaG/0BAane5N4JOSPpIYOB+LDk5/ZiHxumQmLsKIEvyH5ttTExOTj7gRj/jvz25oaP0iQlrTyaejmtJcw8AEn4WV9f9wNGjIZy+Z9ftvEQCgMnswCOxcUe/HcffFRQvfj3m8BZrYWTMI+tNB38ItpMcCviFD0ByQz12imZs/+lo8FThzq+jhQBQRZJkrV08K+De4PKLSCjipV3hkaYOzta6eedC/PZzASRCbhq3QHPcXAcW5Eb5egRdIYGU8m9xcx88BQDpQx73VsHTlxPRDYd+8IGZCaPL/SFBHaAZ3xrL1F+cTohvv1Daz/nz5xU9nnfB99pqGtSqHs/Pnz+v7Di6o6SkpCro0qXVE0ur2bAqVFZ8YoJMHS8Zp1p0xkwY+kRQ+OpHKgAYTHF1MLh55WZ77ZLSV5NBpw+0c7Ai+HeFAACKgiunYmOz5UTvJwVlr8cwRky3HkzStE0ZDFOHqWYF/DxQ5CVdEjMZT7MT47NJTfq9XG4FMGdvCvOyauj7IObUxd7+qx0YILz7t7h6E/0DZy/fsJDl5iBMOpfbcJyG01dt2rSRY967TV8+6h6ac1RoX6LgkbDdImlHDx48eFZFvntcExiPdZ49XKNNpmpDFQrywYMHyo6ihcrKysrKypQdRQvl5+dXKLp0aVV14tJSFqwKlSV8+Aj6tsXnLk2zNpgEVetNq/f7FLkCAKhUIOXy9toz0ZsAEkAU6+Wy8XQR0Z8OoABQvDGEUAOoqv5X9UNPSZIgCIAqAIrFgnXOLEpTdkUjer+c5zU6vS8AWUGCGlDhrf0CqHXRBQLUUZp3YGgnvlh+/ee3aQydylk40cyYTiVLH15OOhR1TfT2EIremA/NyKyUv6QAQB06kTObf/nU3+XtHnCzdOaz9zr9JYqxtFBdWBUqqaG3MCpj7CdzZ08z6kdAecHdlMijZ/+RvT1G03TaRzr3j199CACgM66dI+2mSGlRoRwMyJvxFwtHOg0FCmn0Xn5qRqHzfINXQ6gafamF9/PldkNADtQ2OdLqAY+vax+xwtkU+I9iUt/xaYJiZmEGf/e2cl5o+roBF/G4UkPzwU0+o6OCf+VcsjAnOltBWJibAVHQXxskObHRl+QFT14uDWhqa4IiL/10silLkznOyhSX8FEzdZETv1qEOuazXUFjS36LPXT8MegyB+k+r+9NtOeg2c6z8/6ufrtFqCmwtFBdWBWqSu+jQP9lhtxDkUfuF/XSG/YeQGU9o3TN57kzo3+/+rDdVpMRAD9mjWNEsULTaPq6gI90AODjlYtztiyyPQgEoanPdgaA0c6Ldb/+gh1H1fk44Niq0W2w05EOTsQq11lpptok/yEseMd6PN1+/frs1a62p3QNqXKw2X7sS3Nu5KpFtxcnRnOaeqkbSXa4bzYoCKad76a5DKAwFn9pnx2QsM0zGQCIgfS+AEy2u/Nxn9jQVdkUU89fE71HtPZVou6mG/f3eh+7fUg9tSNg/z05AMC9l5c50xy/KsR5rK66vODv2KCoswIAoAyYszJkUmX55ZhvCgGMp/pHT6VC2d9nYw7+cq8cQGMMe5mbzXBDKLmcEPZ9+kO51qQVS+ZNNOynWZm7f8d35zrrsSeopbC0UF1YFSqKOnH2gjH5B50jzhYDANz+q/pwaMqgz3zWzjbUgGf3zx4Ni/qrBAA0hy8+sHUe+fCU3wVlRtypadp/l2wPADDBP3FC9Sbq9G+TpwOAFIAw9zq2fUrt8VTWgm9/XfDGFIOdv4t1bnkEA92OXXz5T9aXiScBAMAzOsOtVCypIuj96AQFAGrGMDhHMqr/RcyOuDobAACY9oGx9mSpWAJ0hjYBAOZfxWWQ9Ner95abMu5sAgCAmn8wfM/f8QUAEAMA6HEOn/Y2A4JOf/kdAHN2cKL9ZrGYJOgMevVSPcMu8MwNX7GYJBgMvNQTar7u299ThwwfVnY1PP/ttRjprd+28Q4VqZuv3rDWc3yGbxaAQnQ5NuxUsbxIJB8DUPjXIb8Eke60tUFL5/359aEiq7WBttI93637S3ferpWr5vDWHe8zeOKQkuObAy4DVY7vtd0PlhaqC6tCRVEGjDLt99f5jOK3tisenj0ScKqs3GC6f+ii2X/4HpIDkPxTO3+9DTJRMW2gUoJF7YfQZjBaPJ5Cpzf3+Jk+dPpbXxRQ6Ay9t0fRmxcUQq913/4e1KlEpZx8/vInHes1gaZXvQ9dlj/XMB4/bao2QAXo6lSf2SYvEt3/pwCqz+wnix8KxCJBSoZgpsUwdSo51hyKzxmMnmkAUCQf9P4g6vHHAAppUXFJcX1f8qKuD0sL1YVVoZp6UqnqclL26nOXupnbmnnyXwN+yQdCx+yjiQPpveSkpp5hTxAAyGUlgvz7cgAwUmLEXVbN0n6XxnD+4Y4zADTprFyEWq6znibVBkSiIp0Bxq8+cxNaA0fpaQLFbIX/ds/hhFwih55Q/f5avwo5SQEAIKhUshKgJ0BP6Y2k6BQhHpzZ7WFpobqwKlRTpaiwmGo8YMCrnzUMjQYY0Kg61l8fWvmRwfPiIvzUhNoWBZt71BG67/q9/M7Zc8W73BdM+ivqcmHNX3D1gUP1RKd2HfpFTJ1mOs8YAKCcrOynq0OFgnrfR+V/3X+oOaL40ulYQc0kRu0ePFJlWFqoLqwKVVVyMf2q+2cebjkBUfden/FsaDyA/Cvi+5NX5XoaMz8cCAASWTlVq58mwNtH8iCEkOrpvv09KO7/GBKms2JV9D6PojI5oaNH3iJBxj2b5bx+e+RHYrmmnkbRQzlU3j53uTzoy8gxUhCc8kmpM43gXNjPg76O2je7SConnl32Czj0lxJeDFIlWFqoLqwKVVWcvttP52vfr37+VCKSQD/DPiW/VMrvZF4sWPN1bJBISulnoLhCAhRfS/nTfllM6GxSenXbIfzaBCGk0npYzrNtxvC80pycnHYLpr0cPHjw4G8/gXaveh+l6gww1gRpsahQ+vJPNrVPP11NKlkmKn51CWSqlp4upbywuMGrUFM19XT7yIvEJXJFQ0PaU+l/y+Z9vmzZMmXsu7UCAgIAwN/fX9mBtASWFqoLq0JljR49Gky0639MXcNAT494XlIkKpFWZ5VC1enXj3heXlRcLn81RkdHQ14sklZCyo8nAIDL5Ta+R3Nzc3V19YqKirZ7EaipOv3NVRBqhW68fv+KvPjhP29+4Sp/VlL47M0tZaJCaIxcKirE61WjN2FpobqwKlRUZXnho/tvbFHIi8Wit8YUi1TsXmOoUeXPnr17EEJdEfb3CCGEEOqCNPr0UXYICClHN75+DkIIIYQQQl0O9vcIIYQQQgh1HdjfI4QQQggh1HWoQel/yo6hQ/xXBdB1X+l/VcqOoBvD0kJ1YVWorG7ylodUDykRSxR0hjah7EBQt6C2bN7nTR998ODB9gulXVkOH2VpaansKNrLtWvXlB1C94WlherCqlBZzXrLQ12QhJecSY6baU7vyJ0q+NErXYPSJQSdgCHL4464Fe1iLzrMJ4FgeRw76cVq5nSk8Fq21MSGVftarxIh9xpPXKVpMtrK9NV2yb3sK3elVCbLahTz5acKRbLPmFUJJIAa0y3qgu+rv1KSPO5tqa7ZKOY70iLmcx/KdUewmI1/SCHFvFsF1AHmpoxXW0r52Tl5UrrJuHGmr3YhEV65wiulMoZYmJu8sVvxHS7/maaZpSkdQBLnMXlTGglADPY+dsKThbf+bTK1Zl3DuBP395aWnfFqzU3UeX8vXQCWFqoLq0JltdXvRSKRtMk8qKM9TPr+AATbmdM7slO8deJQusR6R0bY7JeNLHNN3NUlyT5TfPJacAsLMvvAaj8iOGPTuFdb7kW5fh6UrWAwqsRiYHkeOuY9CngRixbt4QGDTpaI6XbBcd/bVzfbpALMv0w8spBJVPfopJh34UBAQDS3N+fwxU1WDaVFQQpvxR/w2xabZ+L9+0nPwQ1GJ3mQHRvsF5QstPLPODyfUR2eCyeIRzAIiZiYEHg03JmpEMaunO+XTjL6gVgMVhuPHl5o+vLF3Qhx/TycD3bBV8PsCaDPDr06k8z+lu2RizeVax48/h4hhBBC3UZhfMBSV1fPcK4CQJQWvtbV0cHRdUM0TwIAwD/u58FxZDuwXZZti71HAgCZl7BtmSN7FttlQ6wQAIDkx23zcGazP/UIOsUnAQDEyXtWuTqz2XNcw69J0kJ9gpLFNXvjhrqw18QKFeSV0PlsB0eXPdkAABSCoL+5Bq4Qp4WucpwxefIUtuvmWF71LdFKs8NXO9pOnjzZwSPokhgAQBjr4+wXLxbHb2Cz5zgGZQIACG8XEKM9Iy5kZJwPtAFefAIPFLzUdKnh/NALGRnHlpiKk2PTSmvtS40gXnb34mg320UHnjAHv2Phnhc6l708WmZq8o7cZvqxP16X1NvU9PXnBDLtxwNcbc6RCxkXgu0h/UDMDYAHackP6PY7EjMy4nwtJdknkvjVYyuyg76Opg42fT0hhSB60wlctm8+7O8RQggh1G3o2nj6BQZudGZR+FFrt3HN1x+JDnNWHPKJ4AKA8FrSU+sdR3867D9BGOIXxQcyPXQrf2xwXHxcmNd0JoAkfZtHxFOHbw4fDrB7st8jKJMERUFucp7ZusNHD+3ijCCBJKHWqrzJXP/tS8bRKfT/+VElAAAgAElEQVRxS7YHf7PDf55ZvUEJj6/ziuAN9zp4eOd0WYLfqlAuAPBOHEjgmy6POuxtlBsVcCBbAaBv47nQ/OVsgTsWjAAAYM7eFBG23PRecuzR5NsKps1kE6CYe0YnnvS3oZdmx6cJwcRieP0NPMM5PONqfJjDwHfkjOVx9Orlk9un9n/H2QOWvolXM46tGqdZ65Xd/kdCDGERt9Ly+puZgvD2bTGYcCLOJAbPZpC34pPuAdPCggkAIEnb5ZfE9PWfzWhoetR02N8jhBBCqNug0vsPZDKZDOJBWhJPkwnX4xPTnvbWFXOvCwEACBpdl67NYM22MxXw8xQEc4gh/9yh2D/EdD06AOSeS9Odt9p+MIMxzHn1XN2kc7nVk2r2Y9C1GXSCYbMuzHfm6w6VzmSZm/QngOhvYs4awWLVf4i7hJueSzKnO89kmY5zms4CYU4WH4C1YJPvQjPx2QRuCUDpk6IqAArDdGB/eDVbrcnEaREbt0VkExOc7Fgvt5IPEvw4HtFS68CQ5Q0duU7Qm3YmQhMX0Qn6W19LgEIulQNQJKlBXuF/AgDInpEvI74U5OIWUjDCN2ydFQEgSd667RJr/XZnwyYFhN6hG/f3t2J2/yZo+OGymM85kSIQnNwdw5W1zR7LeCkn4+IuCdpoOqSqOr60QCa4FBeXzCtro+lQ21NCVQCATHRHhH9wGiVKCduX1cj/nGw/9oYsKMuK3HNG1HFRoXajBq8X1+UkSRCgAKgCMHHwXjj+jVa3N00TSABgrTh2+AsTXsR820/DuSQpryBpfV72sERvGvlM2iZxyRQkUIAAACAINYAqABDHrp7rEZELA81MGO9YNycrGJyDV29cDrXJC/HYmiABgLxor898krTdIo6FOZso76I9FE1dTSAr6JyYGxHT4CkQffvRAUCc6DN/ZTSww47+4MbqDaDgHQpNEPeWnt+6yu/EbSC5kevDuaTSou4CunF/r6FnrN+r4YdlpWVlAKCtb6ynRWuL/QkiNwSmiEp54U7TvuG1xYRIVXV0aQFvF3vJUcHjsyudNmRhM6eiOrwqoCg30muaxcLIRj5VIACatpFxYyn/T1Yq+w9oWvpG+m30i0FK1UtTUyp+Un2J1wFmwwkpMdKZs5DDWcjhzGQ1sJRNmE51C/wpzKEsIekOwbI04ycnCRUACmHSJb7ZOPO3RkvuZfPE9c7zFs2+dJCIhGISAOgWI0yh4HpqHgniK9k8YLxvxlTweXdJwtSGY8ckxRKoksuq+10aQQD5pEgCACQJAGTahrGjpniEXxMKC4RPKgBIGSh4IWuC0sBmuYcN7SGXe40nrKg/CLJUKBQKxRUAiqdFQqG4tHofkuwIP7/QWqcRVIiFQqGwlCRBLn0sFIpenl8uPBfitzkqu+Zsc4VELBQK/5WSADJJkVAoJhXM8VamcDM5/p6Qey4pT401zpIOD6LXbUoQm7utsGcUcbncG0IJRddmoa/3XGuLURZm79GBQjcdMUhTrSlpRPXrxv19GS/zXinIcve5ObFnsdkL/OLyAQBkd2J8FrDZH38Rdw8AoPReJq8IAMpyw1c6zfqQ7RUjABAlb1/yCZv9ud8ZIYCMF+nlxP7YackPjXftxu77owM93Td6TZPxeNiEdWUdXFpVWZHH9b74bu0XQRtZyZEpWFuqqaP/4IAgI0Vm5zQBe9J3efxn7uMqEJ3043zMZs9yWhmeWwYAUJayZ4nTLLbTnur/UmW8PwUyAMg/4/c5+8NZTkoNGbXCwOnOrLR1sxwdZ62KFtt4b7W57mtrO8fF0YHtd6ne5WJJwlo2+1NXFyefJD376cOA+ckmb3rM/A/Z7A/nx2h7b3J662BxfvQ6D59fmrCKR7HgrLCjX/KZPGqU61Gx6ZLtvhOKDswZO2rKRu5gt8BVNgTFwm42C9L9xk7ZUDTTzZxIC1odLQSAYQ4LxhHpa8cOGz5qbigXgLBZsdl5wO0Qjq2t87b03na+axzo4uu590gQpwUtc3HhuLh8viomr94gJPEb2La2tn7nJCBO8JlhaxuYDgBQcSU2Mv7v3iY1r00Ys8rW1pb9bTYo+FHLbG05B7gKABBnH4+KF9FrrnkJvAPzZ9jacsJ5CuDucbSdsSzqAbDcN3kOub1tjq1LRJG1jz9nIJC3snMrgLwWvorj4sJxcVl6IFfBMP/EzW2Jm9sStwVjmaBmMmWhnSmeVtsK3fjDkZiXyfsAqgQp9z4IvrRRP37JtB1nZh003r0iUjsoMXEYb/vHfgBQxsvM1QLZWb8vMiZEn95rXCUDUaTT1lKf+ERW1pJZW89Mm3dmX5V75ulZtJpbvnD3Ldma8vJUdaZTcOgC45qdVgki9+dOWxqI77ldWQeXlkwgAGMODUDNmKUtEIgAjJTyslGjOvwPjrHj2i9kcZlB+G1h40p52de1ZKD9Z0qvxSnRHwm2z1oZaXfR/aaPT+6sM6fn0n7jTPsToEqQmS2YBYLoFbth84mLVlp4fcxOi+kcnuEgFpN9GPTeAEzfYxe9JWIJ2YfB6A0AYLMzw6Z6IGEfdtUeAGB3orVYTKrV3JrK1Pn7RIdSsQRqtpj7njn5an5Tz5gMDvHmNwHjNmX8valOJARrflji/JofGW7hF9wqJJKqmmPYCasvT151l5B96HQKwILlJBAEABAst6gMN5Ik4dVVcJj2gbH2m94IiXP4b04TskF3Dr/hXHfzzWwuYb3e6fV1bJhLjt1ZUmdYBTf7FsM5zOH1R5wRvhf+9q0zzsr7SMZyiYSseWXssBvsBmNiLDx8Z2ETYkeN6sb9fQ11ADXQsrDQP/S4rLzstnS8jzkN1PS1NF4P4WVlGjsEG6sBqNHgZm6u6HrwMieQlcloj8vGcziH/NgLMt03+C0YQQMAGOEefNBJVgUAQKNpadXMUiWKW7bkzNS90VOxve8eOqa01Gg19SQDGg2LS8V12B8c1FxqNKCxxg/778wjEOReZ320V08NQK/WYTnlWddLx/uMxhx3egSj9vHsBJ3xjgu21B1AaDf8nN5NPGW1Sc99fcIqhXjjIHqCeOuY+sZCehPvqI/LJYaDXxingcvY82/+rTnP3067/kdrTZTLH7E4bFyTDu6ve+ZtU0guBXn9eF38UAz4366ZsL9/RQ0AZEAz04dEQRlY6L7xoL6+viBfAMACAGDo6w3m7Dv+hd6rR9fGXHTnbndaupuVsdFCDeB+XOB3KWXVq2vMWYFb51aP5IW5Rw4OPrGUhQ1Y99LepUVjmdEirxeBhUbu9VLWyjfnRyqq/f/goBZSAwDQf09fcP/Vr6AGTU9bligoBwtsNVAnRTFfER7hXAUABPN/DY4yXXLsZIMP1mLpezKqrSKrH53lsNhjHABAH1MTPFynObC/f5PaeJ8N0RwH9onBNAGPtvJVevQW+rE/4XyYy9LTmOBz0N1vGGfurExjXTCety9Qex9nv6BXleA/iy+Mq8cPWxB8cEGdqXlxMdcfa/s5ZQHozg0+6M7C3Hcr7VhaLHdf47nznM6oP6Z5RY/HuupE2rEqUKvozQ90+tx9wlxj4/Jc2ehXR9urTVvrG8P5iH1isFb43nClBohQyzBYE2xY7x6mMvRYNnqdKV7V0ePFixdNHz169OicnJz2i6adVN9NvRm3Ja+SlZXJaFpatDdbJVlRmUxD6+WVFsrLyqpo1T/IyspkQNNqq6teNF+zX6AqCQgIAAB/f39lB9ISKlRa5SJRlZ5eWy8rdurSUhYVqor20XmrorlvYfWnukpWVgY9qXIA4HK5jc9gbm6urq5eUdHAtUtQe9LCo9VQN9a8tb4ePXq0UxztTSRqzpWL1WhauvW8d9J0a/2Z13j9l0OZ77QAACASifT0Out38mVlnfui7apSWhrtUgGdurSUSFWqon103qpo7ltY/alWo2npgkQib6OgEEKo7TXj+pg//fTT0KFD2y+U9jNw4MD8/HxlR9GOHjx4MHDgu+4uraq0tLQ67yqLkZFRly8tIyMjZUfRyWBVqKyhQ4f+9NNPyo4CIYTaXTP6+6SkpKlTp7ZfKO1nxowZIpHo9OnTyg6kXZw+ffrx48czZsxQdiDd0fTp0//9999Tp04pO5B2UV1a06dPV3YgnQxWhcqaNm1aUlKSsqNACKF219T+fv369ZqamgsWdNazuLy9vRMTE7tei3/69OnExERvb29lB9J9eXt7nzlzpus1c1harYFVoZoWLFhAp9PXr1+v7EBQR3j27JmyQ0BIaXpEREQ08vC1a9eqqqp69OihpaUVHBzcYWG1h/Pnz4eEhOjr6w8cOLCTHjxam0gkys/PF4lE3t7enXrxvlOfX1stKSkpJCTkf//7n5GRUdcorQcPHjx+/Lizl5ZyYVWoLB8fn7KyshcvXqipqVlaWrZsEhcXF8DzaxFCqori4ODw1qZHjx6dOHHi7t27BgYGZWVl/fv3d3FxcXd3V0p8bWjQoEEcDqdXr15VVVXvHt06W7ZsmTJlSrvuQkNDY9KkSZs3bx40aFC77qi9paWlAUB7p6tdmZqa1i6tP//888iRIwqFohMdo5yamhoTE0Oj0QwNDbtMaSkXVoXKmj59OoPBKCwsrKioMDAwaNkkI0aMgCacSK2np0ehUCorK1u2F4QQapkGr4+5Y8eOY8eOLVmyxMvLq4Nj6gImTJiQmZmp7Cg6hy6wfl/jzp077u7u6urqu3btGjNmjLLDaZ6cnJx169bJ5fLIyMhhw4YpO5yuA6uiS5JIJIDr9wghVdXg8fcbNmzYtGlTfHz85MmT79y505ExdQFZWVnKDgF1NCcnp+HDh8+ZMyc1NbXTtXEAMHr06EuXLs2ZM8fMzMzJyendT0BNgFWBEEKo4zV2fu28efOSk5OHDRtmYWERFdXO9yBGqNMSiUTOzs5lZWWnTp1at26dssNplXXr1sXHxz99+tTJyal5F3FHb8KqQAghpCzvvn7ODz/8cOnSpf3797u7uxcVFXVATF2AtbW1skNAHSQuLs7CwsLMzOzixYv29vbKDqcNsNnsCxcuDB8+3MLCIi4uTtnhdEpduypOnDih7HAQQgg1pkn3r7WyssrJyfHy8rK2tj548OCkSZPaO6zOLj09XdkhoI6wfv36mJiYQ4cOzZw5U9mxtLGtW7dOnDhxyZIl2dnZQUFByg6nM+kOVXHlyhWsCqTi1NXV+/Tpo+woEFKOZtzfKjQ0dMOGDdbW1j///HP7BYRQp/Dff/85Ojrev3//9u3bXa+NqzZjxgwej5eXl+fo6IgnCDYFVgVCqgObe9SdNaO/B4DPPvvsjz/++Oqrr/bv399OASGk+kpKSmbMmGFkZHTixAlNTU1lh9OONDQ0YmNjjY2NZ86cWVJSouxwVBpWBUIIIRXRvP4eAMaPH5+cnPztt9/iGbeoTaSmpqampio7imaoqqpycnKaNGnSnj17lB1LB/nuu+8mT548d+5cvIx3Q7AqEEIIqY5m9/cAYGZmdvLkyZUrV6akpLR5QKi7mTJlSue6udWiRYvMzMx27Nih7EA61Pbt20eMGLFo0SJlB6KisCoQQgipjpb09wBgaWl55MiRFStW4PGXqFv5/vvvHz9+vG/fPmUHogR79+79999/u8/6dNNhVWBVoFaS34rZcfTmG5sUOcGffh7BU1JAbU18LSH6eJrwjW0k/1JCWh6ppIhQF9fC/h4A5s2bZ2NjU33zUYS6A6FQuGHDhr179yo7EKUJCwvbsGHDo0ePlB2ICsGqwKpArUfm51y6VfzGJsr7zr7rHExfb5DyYncsmWs31dru069ibkk7OMLWEaft89m2P12oqLVNFLttpY9HQKz49SYJPzHEz9PFhePqsyeBj8unqBVa3t8DwJYtW3bv3v3kyZO2igYhVbZ3714PDw8zMzNlB6I0ZmZmnp6e3bmXrQurAqsCtQ+qkeVIA2qtDSR1xMofki9d+MGh+ND38YVKC6yN6Dms9vcN9HJg1GzJi924KSpbwmCoCZMjfLz2dZUvL5AytKq/19fXnz9/Pt7rBHUTR48eXbZsmbKjULJly5YdPXpU2VGoEKwKwKpAzSTnn96xhG031Y69ZMdpvrx6I8k94MK2s7abuzIktVABoLgZ+unnh/ivn6Vp/vHH5joAVIMhxlRSKm/ZvkvTQla7usxhs+e4+uzPrl475/3o4ejAZs9x3RbHJ2uPcXb1i4gKWe/qOMfRdX0UV1IdqDB51yqXOWzHZdsS8kgAECYG+URkS2rtRJwZ7sNhT55s67gqJK3mjs+lSdvm2E6e4bgqIlsMAIrbaYlJCem3Xz/NxC3s9IUL0WFh4autCRAW8PHYHdRiLe/v8/PzAaD2HS6rt3RndTOAOekCqn+JycnJCoWCxWJ1599pfn7+sGHDXrx4cf78eeje5Y1VUQOrAjWP4u6hzREShx8SLyXuc5BEbDl0t/qoFaOPvzuZnB4XMJK7K/hsccNPz489kmPs6GDUsr1L+FlcXfcDR4+GcPqeXbfzEgkAJrMDj8TGHf12HH9XULz49ZjDW6yFkTGPrDcd/CHYTnIo4Bc+AMkN9dgpmrH9p6PBU4U7v44WAkAVSdbuxB9Er1sZklxq4sBmyf8I9/KOevUhhW4ycbqNblHyHq+d5yQA0kc3ubkP3jjQiMFkAJC8w7+kk3TrqeOIlr1GhFq5fh8QEGBlZVVeXg4AixcvbqOQOrfqExKsra0Bc9JVHDlyJD8/v6qqavjw4ampqd35nJPqVAwfPvz58+eYCqyKalgVqHmEf/whnehsb0AFqpH9xxNLcnIeAwAQWgYGVADNkQ4fGty89k/9z1UUJm1Zc7q/V8Bcg5YHQOmryaDTB9o5WBH8u0IAAEXBlVOxsdlyoveTgrLXYxgjplsPJmnapgyGqcNUswJ+Hijyki6JmYyn2Ynx2aQm/V4utwKYszeFeVnRa15fenx2BcPBL9j3q7BNsxnkraS0BwAAoG3FWecbuHu5OUVyJfN2fZEBgIQb4bEolMf8ZNfm2YwGxiD0bmotfqaRkVF+fr6Li4tUKp06daqRkZGRkVHbBdYpVefE2Ng4Pz8fc9JlLFq0aOrUqWZmZpWVlVOnThUIBMqOSGmqU8FgMKKjo6OjozEVWBWAVYE6jjRn71cHKl33bpmi0xbTEb0JIAFEsV4uh6ifLJ6hB6AAULwxhFADqKr+V/VDT0mSIAiAKgCKxYJ1TBalzrwkCUDQCQIAaL0JAJKsPSeFoAGQVfUfesP/2WvZnlzGJ8GHt9pgd49ao+X9PQD4+/sfPnwYAFJTU/FvejXMSddjZGQ0ZcqU6l+rq6trd/7MVpOKP//8E1OBVVENqwI1D3PiRM0vYxMK35+tW5Rw+o9+o531AbgAAHIA6pPMmPhCyyUjAfJfji+7eTqNnDh7tA4vJjjz/Q2HPzao21K3xgMeX9c+YoWzKfAfxaS+47B+ipmFGfzd28p5oenrg2dEPK7U0HzwyxV85pjxTEpU/I/hFmLidKIQ9DjjB8J9ACCFt9OT+ecO5Sro1uMsAK68fHpFdojnztwR60M9pOFh2RKGDWfE07S4WCAG2bDNsctHLdOq43OMjIxcXV2h27+91YY56ZL8/f3f+ke3hamogamogalAzUAZsniLB/X3peyp7KW/0z22LB5CAaKfgf7N7Ww7O7vFoUUzAjbaadYMl9+OPxx5+qYUCq/l5D8+7+/MZrPZbMc1scJG9tEcIx2ciBjXWS6uHJ+Yh0B9x4cHuv369YOSXW1nObrMYbvs4QIAN3LVoi3xr8MZtTzYywYyQ1atDkoGK+8gb3MKQaPToTQtyHPVtgSxyfzt6+3pAPByXf+Z8PZNXi6PL/n3Pv8ZgDgtfLOf3wY/v8DY24r6g0DonXpERES05vllZWXff//96tWrtbS02iqm2oyMjKZPn94eMyclJbXTeWCdNydKUX20bpu0BVlZWQkJCUVFRZaWlq2f7S0HDhwAgOXLl7f5zDk5OQwGw97efvz48W01J6aiBqaiBqairhbnxMXFBQC4XG7jw8zNzdXV1fFGkC1RfHoNJ2lC9F7nlh6L06y3YLJULKki6P3oRNO+HCBLxRKgM7QJAACFRELS6b3fHFEhFpYAg8l4vcwvEQslQO/HoPcGeJAdmxx9YE8yueBYxkZzkEhIOh3PpkVtqIel7URlx9Cwqud91AjKi57r1/nOmDGjrWY9f/78zqCgqh7PKxQkqLXqGwwlaJ+cKJGjoyMAnDx5spXzeHl7/cO/X1xaDD16QK9WHXjW0f6rghcvdLV1BpuahoWEtX4+TEUNTEUNTEVdrclJyokzgP19uyk+9dXn4TcNZgd85zla893D69dOS2xtQvijCzuYSwy2990b7DxQ2dGgrqiH5TxbZcfwLlLyf9S+XitWtkk7e/78+dD9e/+tlIAG9d2jVVab5kS5Wr9+L5PJPDyX3xcKSO3O/DsFoJbIB79nfGB/eK9evVo2A6aiBqaiBqairtbnJOXHE4D9vWpT5f4eofbWGVavNYl/KyU7g4LaZLKdu4L+lT/t3M09tHFOOruYmBj+g07fuwCAvB+V/0DQmvsEYSpqYCpqYCrq6jI5QQihenWG/h4ANKhVPZ5X3zylNZKSkhQ9noNmlzjIrY1y0gXEJybI1F8oO4q2IVN/cTohvsVPx1TUwFTUwFTU1ZVyghBCdXWS/h6gQkE+ePCglZPk5+c/a+Cis51Rm+SkCxA+fAR9u8RnNgDoSxQ8avlVITAVNTAVNTAVdXWpnCCEUB09Ax3NO8c3lG11ImzdeXQmrfZ0fl/95U+alou7XU46uRcv6q7DUd9nr1k9Ue/lT5RBnyxZNduwY8NqqfpeTmuei6mogamo0U1T0egknTgnCCH0FrVpw/S3wjvOEOrieg8eM3rAnR9j/wIAAKqe+bRhou6ek85Pd8jkMYWnX/7Qs99QC3PyMhXgHbcu6ZIwFTUwFTUwFXVhTrqe8mfPlB0CQspRfUUw6vvsrz0nDdDtA/Liv8/GHPzlXjlomn/mtni2aT94lhsZ/N25Yo2hHy5eMc3cWBOkZQ8v/vpN1F0zN+/JhYfCzhUD1dIjZMTf3kcuy2mDZn+2+JPhA6hlub/9svc4Xw6gMYa9zM1muCGUXE4I+z79oe7UNZumDqz+ZpS8G+39y+1Rbw5Qjb+mmJOuSGfS6mXzxvTvpwnlgmvRO49eLlSAsfUqX/uxhjR5Qdpu7xO35YyxbgvmfWikR31eUsg7tfNQClivWd3vnN+J23LKgE88OZDwzfF80LFYvN5x0jBNueBq9M6jlwsBqIbTVsyfPXGAhlRwMfzHmD9l5it8Fo+uvs6HQnQu6pvfn785QIqpwFRgKlQ1FTUayImyw0JNotGnj7JDQEg5Xh7gYTDEnP73bu+AgO/vDHRfOm8oBQxsF7trZvj6urvtjfmzDDTHrwr9dMA/Cd8s3/bNRRg7So8KoGEyZJABDQCA0Bow6r1+BFAnfubvrnNla4D3tr/03T04QymgM3FtoC2cClu3/Oj9UYtWzWFAUU7M91FhO2OvkHr9pA8fatYZoCIwJ12Q5uAxxiXHg72Xh/wmHbt29UQNUJ/k/pm54Ki3s++6oPSHcsogtzVff6TI+P67db6xD4yHD9ftCUS/waOY/QAAemoYmZoZ9aGCnnOg59jCUxs5wdFFY1etnqgBlEFLVnoMFRxavvmbU5SPNn1qTq28//svYTujIo4XaBhTRXdK6gzAVGAqMBUqm4oaDeRE2WEhhFBjXh/ATUpKCosf/pmSIWAMGqYO0oIH0gGTP/vQXLdMVKygTrQaW3QpYn/W/ULxw+L/GphM3WyihUZRmcbEqTNHU8tJQ7NRvaljxppD8TOD0TNnGkGRfND7g6hyqeifR/JRH39ET9+9Lb2k7oCOeeVNgTnpghTSoiclhfmXz/LKjQcMgOeiPJHGpOmzp70HBeJyMJj8YX9u5KHTfz4SFZRWNnQuto7FuFEykXzAtHljB5BS6rAhg6jvTZ5oWF4MZrOnjtWRleuamhlSygsf3S/U/tD9/Yffhx36i1F3QIe+7npgKmpgKmpgKuqqLyfKjgkhhBrTk3z+5p/oCjlJAQCQXv1u8XfnpCyPqD273AZTCSqVlJXXeTqVpv7Gj4S6nFQA9ASouvd77Nk7lUBQqWQlQE+AntIbSdEpQjkAdeicte6UU1t/5Uqh3gEdq5Ik1anEqxaaTqNiTrqCSjmVVvO5iEqog7zyjTTKZXKgAFVxP2rHuu/van+yOjLac5oOjUqVy+V18k3t+cZHLEKdCtW/055QdOO3X7giuTqVqpCTANAT4HHGjxduSxQAWpPWLzb/68juUyKAegd0DExFDUxFDUxFXc3JSYcHhxBCzaBWeP9+Q81j8Z2z39/JuOPz88IP9LbmP1z9/iTjhFhBpcbLVvi5qFCqN9S0H+SXvHxC5f1/HlKHybi/nLhdM6f0/kPNEcWXTscKKl9uoZpyNtvKIwNj/6kEAPlfdQZ0tOLbd8pmT7MecPbsQznN7KPReoU53T4nnZ78Bv+hru2HE7Vu/1EGBuM/HAP3dxYA6NU3VvYw5eR3KX89+23D5DExF/+heEwcoZGSWw5UqD4jokgsgkmDh6mn/PXqGUX8+0Uf9StIjzklfrWpz9//VE6C/NNR12o+8vWbuchj6F+73a6WAwAU1h3QMTAVNTAVNTAVdTUvJx0dHer8SIlYoqAztPHCrKgjqO089xCgnsM/hjrt2jVJQ1qpodvn/q+nHv5TGPHbsK+jQj8qeq6hqwl/XQZQ3P/9VEbIosjTs0pAU4/kXQQo+f3IofdX+8ePLS+qpMKtsMWHrgrOhf086OuofbOLpHLi2WW/gGjdj2YOpVPd/X51ByDzo5fvf2vAob86eNmmkht5JGWXR/h5+xIpTQPu/+aHOen8ii9EhA/zD9zzq1QKmn1KUqICUmT1jaNNCgzyMH5WToz+9bwAACAASURBVKX3g6thfz69/Ncvk0NWRp6WyIk+/TRl5wBAevW3n6f6h+wbW/RMQ1en5De5XH4vemf615u//fWzsnJCXfTLto3HxZf3H7HY5fHzaamIVNf4J9bT7+7MT6z0dKVfx1oCPC+5+KP32wPSO6iPwVRgKjAVbZ6TDo4NtRkJLzmTHDfTnN6RO1Xwo1e6BqVLCDoBQ5bHHXEr2sVedJhPAsHyOHbSi9XM6UjhtWypiQ1Lu9Y2iZB7jSeu0jQZbWX6arvkXvaVu1Iqk2U1ivnyU4Ui2WfMqgQSQI3pFnXB1/LVyDzubamu2SjmO9Ii5nMfynVHsJiNfEhRSPg5V/JKqQyWhfnAl/ORIl42ly/vbWo+jsUgXr2KG9m8x8A0t2LpEQAACm6Qw6KoByQAYbf7athM/CDUKj0s59k29BhVS8+QJi8Ql8hfNZeaOnqa8vIxK3+ddvVT74vlAADqGgb9NUBSUiitWZ6maur004TyouLyWpv0dPvIi2pN9fa+3jUASv9bNu/zZcuWNfcV1nbw4MGDv/0E2r3qeYyiYfCeHiF9KChu7GCYrpcTpQsICAAAf3//Fs8wevRoMNGu9yGqzgBjTSgseCht5JsQdQ0DnX7wTPT696WuYaBDlepwopbANt+wvyoBADR19HTV5UXiEmnNr4PWz0CHShaLimuaAApVh9GPeFb7V/+mdw4AgLzSnJychsNtDKaiBqaiBqairtbnJCX8CABwue+4krK5ubm6unpFRUXLY0UtpaWlVc/WWyFsPwg+4c3qyBM8bgTZLohm7cgIm/2qf1aQpCTZZ4pPnuvJxC+b2d+TaX62fkRwxqZxr7bci3L9PChbwWBUicXA8jx0zHsU8CIWLdrDAwadLBHT7YLjvrdnAIAiedUHq8SrEo8sZBIEAQBAinkXDgQERHN7cw5f3GTVUFoUpPBW/AG/bbF5Jt6/n/Qc3MCwCm7I54vC7xEMOikuIay+Onp4oankStD8ZVHC3gyiQkwackKjN9loi5M3zPc5JaH3I8USpltUnK9ldTAkKYr2mBWiGXQ1jI39fas0doMkeZlIIHqjuZQWi97+s1tZXljw1ka5tFhUWKuRBQC5VFQoarhPbcKAdqcoL8y/33hzD90tJ52fvPjhP/mNvk8DQGV5oejhG7+vyvJCUQn5ZualxSKBqFbvAgCyksKCWr0LACjkxaI69QDNGdBuMBU1MBU1MBV1NTEnHRQNag+F8QFLXV09w7kKAFFa+FpXRwdH1w3RPAkAAP+4nwfHke3Adlm2LfYeCQBkXsK2ZY7sWWyXDbFCAACSH7fNw5nN/tQj6BSfBAAQJ+9Z5erMZs9xDb8mSQv1CUquOTQNuKEu7DWxQgV5JXQ+28HRZU82AACFIOhv9q8KcVroKscZkydPYbtujuVVfyQszQ5f7Wg7efJkB4+gS2IAAGGsj7NfvFgcv4HNnuMYlAkAILxdQIz2jLiQkXE+0AZ48Qk8UPBS06WG80MvZGQcW2IqTo5NK621LzXiZXMP4mg320UHnjAHv2Phnhc6l708WmZq8o7cPvz7EcPO96eMjNRE3xGS7FNJfCBzL2SRIzyPpGZkhDnT82LjcwAexO8/JbbwT8xIjfMezI+OiH+ZL4IgemNb3zbUmv8U+algx7PPO/zvb9Vz1ZrnDZ08J51cjx492uCGlnXI70RwfEHe4adA9OjRozXPxVTUPBdTUfNcTEXdSdojJ6hz0LXx9FtsSiUYFH7U2m3cmWFH/DXTt7v6RAxPXGcuvJb01PrI0U90C075LfOLsvjVLS90K3/s0bhwpkRMMgAk6ds8ImSr9x4ep0jb6e0RxEjcNK4gNznPzP/w8iEAvcncFBJqfdA1meu/nbF/WeCVcUu2rxhJgFb9d0UWHl/nFSF0CDq4WDdpo6ffqt6DLnxlzjtxIIFvujwqGELn+wUcsLbeZKVv47kw7UrAFfMl21eMJDQHAAAwZ2+KmE0KryXH/pF8W8GcPtkEKHTP6ERPACjNPpAmBJPpw+tv4BnO4RkcOj1tw9iEgsZyxvI4etWLDgkeyclPGhs3jBMczpHkZSefSE96SLDmj2cCYbrxpA0AKMTJidni3hYWQ4Dk5eYB002vKO0KDDJjkOk8vgIYqnPJrC6hsfX7BlXKO35RuY8aMXDgwFZOYmRk1JvSPh8NO21OugDD95jwtKEr9bWG/K2rZ3SEp6The8wWPxtTUQNTUQNTUVe75QR1BlR6/4FMJpNBPEhL4mky4Xp8YtrT3rpi7nUhAABBo+vStRms2XamAn6egmAOMeSfOxT7h5iuRweA3HNpuvNW2w9mMIY5r56rm3Qut3pSzX4MujaDTjBs1oX5znx92xo6k2Vu0p8Aor+JOWsEi1X/Ie4SbnouyZzuPJNlOs5pOguEOVl8ANaCTb4LzcRnE7glAKVPiqoAKAzTgf3h1Wy1JhOnRWzcFpFNTHCyY7066v1Bgh/HI1pqHRiyvKHjkQh6085E6E0nmtx/F5zbuXF7FE/XznmaycuWq4IXvXr+qmTCeUcgZyAAKQegwt3IdQGxjygEVJD4v7HNtai/73hSkvKi54wZM1o5zfTp09Ve9IRyZX7522baKCddwMf2DjR5GyzpqQKavMfH9g4tfjqmogamogamoq6ulBPUPGrwenFdTpIEAQqAKgATB++F499odXvTNIEEANaKY4e/MOFFzLf9NJxLkvIKktbnZddK9KaRz9rmTssyBQmU6mtSEYQaQBUAiGNXz/WIyIWBZiaMdyxNkhUMzsGrNy6H2uSFeGxNkABAXrTXZz5J2m4Rx8KcTTruoBdSQpqsOHn1esZ20yvbPIPSSIAKbvjSRdtumvr+dDRwJhMACG0GAVJyTPDV876mFRLQpvftsPi6jc7Q35fL/0ftu36db5tMtt7X93/qdJB28s+KbZqTzm7BggWDjIyJ0k7/sU29RG4y0Gj+/PktngFTUQNTUQNTUVeXyQlqtl6amlLxkyoAABhgNpyQEiOdOQs5nIUczkxWA0vZhOlUt8CfwhzKEpLuECxLM35yklABoBAmXeKbjXv7ZsaSe9k8cb3zvEWzLx0kIqGYBAC6xQhTKLiemkeC+Eo2DxjvmzEVfN5dkjC14dgxSbEEquSy6s6FRhBAPimSAABJAgCZtmHsqCke4deEwgLhkwoAUgYKXsiaoDSwWe5hQ3vI5V7jCRs4x5ssFQqFQnEFgOJpkVAoLq3ehyQ7ws8vtNZpBBVioVAoLCVJkEsfC4UiSfVm4bkQv81R2ZJXrz3OY+xYW5/jPKGw4FGpBEgZAJm9yyeES7dfsfgDMo97jcsXkzDSalwfYVpCmjAvOf6KhDFuXIee7tw9qEFpQ3deVQFVz3tTCLUXal7eK9tqobp6np27ghSlVc+qSFDrDJ9wamuHnHR2NBrt8KHDq7y97vLvF5cWQ48e0KsFJ5Yoz39V8OKFjrbOEDNWWGhoa2bCVNTAVNTAVNTV6XOCWmzgdGfW/HWzHA0JpvPBMO+tV1b52tpqGmoqpMPXxAVOrbvOLUlYOz9cyNAkhUV6TsHDgDlsk/c1r/kfxtBBApbeoU4MgNqHrvOj13kk2Bx791VxKBacFXa5e3wmjwIr/wuHl2z35fmEzBl7AIBu7ha4yoagkHazWbERfmOnsDir3Mx54UGro62iOMxhDgvGxYesHTvMlzB1PZK4ztxmxWbnu1tDOLYhAMRAO981DnRxbO49EhRpQcvSAAAoTLdfL/iOqBuEJH4D2+9SdU+f4DMjgWCH3dhtBxVXYiPj+csW1xxmJIxZZRv88mpRUctsowzdjiX5mlPE2cej4tU2eb/6YERn+276wytos6MtANBZzhtX26jlheQIQQEJm10TAADA3OfCsSX2679K9Qj0sP0Z6CM4u1bZ4Em1ba5HREREK6fYsmXLli1b2iKYehgZGU2fPr09Zk5KSsrPz2+PmaHT5kQpWn99zBpZWVkJCQlFRUWWlpbvHt1MqampADBlypQ2nzknJ4fBYNjb248fP76t5sRU1MBU1MBU1NXinLi4uABeH1O11X99TAAAIMVisg+D3vvlTxKxhOzDYPRucCqJWEyqvXFrKrJULIEGblZVIZEQdHrLFqQrJJIqgl7r0jqkREL2odMpAAqSBOL1QfAkScKrq+C8M6Qa1dfH/PLCMddGz2O5ss3W58n602F29V9FtibaZJ8pO/uGJW4a9+ZOKyTiZ0Bn0N/RtSskEglBfytgcbTrlCC8Pmbr9XjxorVXEfjkk0+OHz/eJtF0GRMmTMjMzFR2FJ1DG/b37erTTz/t2bPn0aNHlR2I8nWWX1kHwFTU6G6pkEgkgP29amukv+++FMmrPliVrmfO0mM4+IVxGriMPf9HF58K/3ffeOtakOM+w7AoThuc8w4ACn702m3xj4V5N8TjdmN/31qt/UbywYMHT58+/e6772xsbNpjTaiTunv3rrJDQG3syZMnxsbGyo5CJdDp9OvXrys7CpVw7Nix1atXKzsKlfD06VMdHR1lR4EQahTFfEV4hHMVABDM/zU4ynTJsZNNmc3S92RUW0UGQGFYLVzMfAYARH8WNvet1dqjzwcOHOjn5/f777+7u7vHxsa2SUyd3Z07d/r2xXPBu5Sff/753r17P/74o7IDUQm2tranT5+WyWTvHtp1RURE6OjosNlsDw8PZceiEs6ePWtu/vaJhgghFcNgTbCxsbaxsbYybdqFMTsQ3dTyZWwsxrtHo8a1wdmlkydPTk9P/+KLL86dO2dmZva///1v/vz5R44cKSkpaf3knVFmZmZ7HCeKlOLq1asTJ0786aefDh482LNnZzsbu32MHDnyyy+//Oyzz5QdSEfLzMxcs2bNoEGDhgwZwuVy9+7dGxwcrOygVEJ+fr6amhqbzVZ2IAghhABaf3xOjaVLly5duhQALl68ePv27aysrB9++OHGjRt6enoWFhajR48eO3aslZUVjUZrqz2qrLNnz9KbeMMIpMJCQ0PDw8N1dHQWLly4fPlyZYejWrZs2WJjY7Nr165169YpO5Z2lJubm56efvny5ezs7BcvXowZM+aDDz7Ys2fP+++/j7eWq23Dhg3d8PMeQgiprDY4v7ZxZ8+effDgQX5+vkAgEAgEeXl5CoVi0KBBgwcPHjJkyNChQ1ksFovFeuMk8M7s/v37I0eOzMvL09fXV3YsnYNKnZZ348aN2NjY33//vby8fObMmba2tk5OTsoOSkVdu3bNx8dHR0cnMjKyCxyQJhAIbty4cfPmzVu3bt2+fZvH45mYmAwdOnTIkCGDBw8eNGiQra2tsmNUUVu3br148WJiYqKGhoayY+k4eH6t6sPza1F31u5X/P3oo49q//j48ePHjx8XFhYWFhYW/J+9+45r6mrjAP5AyGBFVpQlUxFwBcWFimjFtqK2ClqVtqK2buvCUUURxbZa1NZZbR30FbEWsVWwCoqgMlSQWBQERQIGCIQRE0YmvH9cSSPLBSTg8/0jn5vkjpMoJ7977rnnFBZeu3YtNDSUw+E8e/ZMQ0PDysrKysrK2traxsbGxsbGycmpV69enavJ/+uvv962bRuG+06ExWJFRUVdu3YtISHB3d195MiRP/zww7Bhw0xMTFRdNLU2ePDgyMjIffv2WVlZff7551u2bOnRo+XbtdQGj8fLysrKyMh48uTJkydPcnNzc3NzGQyGra0tUe2MHz9+/vz5PXv27Nmz53sVWN+On59fQUHBb7/9ht8VQgipj3Zvv399Dx48KC0tLSkp4TYobiAUCs3Nzc3NzS0sLMzNzS0tLS0sLCwtLS0tLXv27KmlpS7zkvj4+BgaGv7666+qLkhn0vHt9zk5OYmJiUlJSSkpKXl5eUOHDh06dOjgwYMHDRpkb2/fYcXoMrKzs//3v/+dOHGCyWQuXrx40qRJqi4REBcM2Wx2foOCgoKCggJjY+OePXsS9QZRjRBVSu/eLYwSh1p25syZ1atXz5gxIzg4+L0K91lZWZcuXSL6o2L7vTojk8m6urqqLgVCqqFG+b4Vjx8/Li8vL2vA4/GIR0JpaalMJuveoIcS0wbtfZ2Ow+F88cUXDg4O7z5f2PumvfN9WVlZWlravXv30tPT79+//+zZswEDBvTv379fv359+/bFThdtpby8PDIyMiIi4u7du7Nmzfriiy+GDx/eTsficrkcDqewQVEDoi3AzMzM1NTUzMyMqAGIR6Jm6NWrVzsV6b1y7dq1oKAgDQ2N9evXT5w4UdXFaXdisTg+Pp64EyMxMdHV1XX48OHbtm0DzPcIIXXVOfJ96/h8fkVFRWVlZUUT5eXl5eXlxEJZWZmhoaGJiYmxsbFJAwaDQTwSunfv/hYNUX/88ceSJUv8/f2//fbb9viAXVsb5vvKysqcnJzCwsIHDx48fPgwMzMzMzOzd+/ejo6Ojo6ORF9qBwcHBgNH3mpHLBYrOjo6KioqLy/PxsZm48aNkydP1tDQeOWGdXV1mpqaJSUlxcXFxBU84mpeSYPS0tLy8vLu3bsTf6qKR+U/ZOLvGkc6aicRERH79u2rrq5etmzZ3LlzVV2c9pKXl5eenp6ZmXn37t27d+9qaGgMHjzYxcXFxcVl4MCBxDwY2P8eIaTOukK+f02lpaV8Pp/P51dWViovVCohzhPMzMxKS0sNDQ2NjIyMjIyIhaZPpVKprq7utm3b/v3335CQEE9PT1V/xE7p7fJ9fX19Tk5OdnZ2Tk6OYqGmpmbcuHE0Gq1Xr172DczNzdun4OgV0tPTExISLl68mJKS8tFHH40cObJPnz4AoHwVTnmZmCCJCOjEgvHLjIyMjI2Nu3fvrupP9t4RCoWbNm1KTk6mUCgrVqyYMWOGqkvUxjIzM9PT01kNtLW1XVxcnJ2d+/Xr169fv4EDBzbdBPM9QkidvUf5/vXV1dUpsj5xHUD5qeLFsrKy0tJSTU1NY2Pjbg0MDAyaLhMLfD5/4MCB3bp1I5PJqv6IauSV+b6ysrKgoODZs2cFBQVJSUlCoZDI9Pb29kR7vKJh3sLCogMLjqC+vp7f4Pr165aWls1mdx6Pp6mpqampSSKRSCSSubm5vb19//79bW1tlS+j4eynaigiIuLPP//8888/vb29P/jggy4wUGxdXd3jx48fP35M3IdTUlLCYrEsLCyYTCaTyXRxcWEymaampq/cD+Z7hJA6U5c7U9UKEdlbShs1NTV79+7dvXv3tGnT1q5da2tr+7wBn89XXsjPz1d+EQAKCgr4fD6ZTG6a/psulJSUuLq60ul0Op2uPvcQt5O6urqCBkSUVywDgFUDAPDz8yMCfZf/Tjqecl5XIP4PN0sgEBg06Natm6J7jIODg5ubm3J2J0bBqqmpSUtLS0xMTExMPHz4sJmZ2ciRI+l0+qJFizDcq5WrV6+ePXs2IyNDX19/xowZR48e7aRDoD579oyI8orHx48f29nZEcOeenh4DBgwgMlkdtJPhxBCLcH2+zeQmZn5+++///TTT59//vmyZcveejL26urqRucDTc8Nnj9/LhQKiWWhUEihUOivp6ioaOjQocSyGs4qwOfzC5RkZ2ffu3eP+LyKEN+zZ08rJTiG8btQZPH4+HgbG5uWknqjvE6cZLZE+d13KVt6enpiYuK5c+dyc3M1NTVHjhw5cuTIUaNGDRgwoK0+Pnojt27dCgkJuX//PoPBmDFjxowZM4iT6k6hoqJCkeNzcnK4XO69e/fodDox1woR6InHNmkawPZ7hJA6w3z/Wq5cuXL06NGbN28uWLBgzpw5HT+aXk1NjUAgEAgEz58/FzQhFAoVy1VVVURQI35+XvOsoLCwcOjQofoN3r3Aym3wyurq6pSzu0Qiyc/P79at2549e17nFkwkEAga3UbSOkUQ19fXNzY2bjajK1PV53r8+DExOMmtW7cqKirs7e0nT57s6uo6ZMgQQ0NDVZXqfSASia5cuRIdHR0dHd2jRw9ra+utW7c22+NcfdTU1Dx9+lQR5YkFkUikCPEODg4ymeyTTz5pv6nEMd8jhNQZ5vvG4uPj2Wy2n58fAMjl8pCQkIiIiLq6ugULFixYsKBzZVCRSNTSaUAj1dXVfD5fKBQKhcKqqir9FtDpdMWyjo6ORCLJyMjo0aOHQCCorKwkZg56/vx5QUGBubl505Z4KysrIyOjRoVUq/lrO5hMJlM+Z8vKytLW1m49r+vp6bXSst5U5/ofSygsLPzxxx/JZHJqampqamrPnj1dXV2JrD9w4EAdHR1VF7ArKCkpITJ9YmJi//79vby8vLy81HAegNLS0qdPnxJznysW9PX15XJ5o4b5Dr6THvM9QkidYb5/CZvNtrW1zcvLKysr+/3330NDQwcPHuzv7/8+jPGsjDgZEAqFhYWFbDb72bNnhYWFXC6Xx+NVVFQQXYZEIhGVSqVQKABQX18vl8vFYrGGhgadTifuImj9JEEgEPTp00dfX//o0aMUCuW7775T9Yd+V0RMV4T111kQi8XE9ZNu3boRDY3W1tat53USiaTqD9rR/v3339TU1Lt376amporFYiqVOmTIECLu9+/fX9Wl62RSU1NjY2PPnj2bm5s7ceJEItY3PevueGKx+OrVqxoaGooQTzxSqVQ7Ozs7OztbW1tbW1vFgspPXDHfI4TUGeb7/7DZ7Dlz5ty4cWPUqFHl5eWfffbZl19+SQx13IXJZDLFJEHKC8+ePeNyuWQyWTHHp0UDYtnMzKzp3kQiEXFioDhDaIp4q7i4GACIBYlEIhaL9fT09PX1W3pUfvr48WN3d3fld9t2SKLq6mrlFE7c4/vKvE5kdEVYf50FnFvxTVVVVSnH/cLCQldX18GDBxPTDzs4OKi6gOqIx+P9/fffiYmJsbGx3bp1mzBhgoGBgQqvmBUXFzcK8Xl5eVwu18rKqnfv3kSCVzyq7Z2vmO8RQupM4++//3Z0dFTV72JGRsbZs2eFQqGzs3MHH7q2tlZbW9vR0dHd3R0A7ty5M23atMLCQh0dnSlTpoSHh3dwedpVXl5edXW1IsErp/nS0tJGwV2xYGBg0KNHj/Yu28cffwwA//zzT1VVFdE7qNlH5ad8Pr+2tlb5XU1NzUbnAMSjjo4OmUzW0tIikUgaGhr19fXEzAZSqVQqlYpEIpFIVFNT0yipU6lU5RSup6dnaGjYNKMrlhWt76iDVVRU3L1796+//iotLWWxWOXl5cQkRMRAh+3Run/jxo3Y2NiKioqO76FeUlJiZGQ0dOjQIUOGNHrr5MmTRJdCZYcOHSopKYmNjc3IyPD09LS2tl66dGlHzuBbWVmZn5+fn58fGhpqZWWl3LtGOcETj9bW1h1WsDaB+R4hpM40vLy8Hj16lJ+fT0zw2atXr759+/bp06dPnz7t3XCyevXq7KxMQUW5poYGjdTR803WAcjq6jWp2jRt7YAtW0Qi0fbt242NjfX09NhsNgB4eHh0ok7hZWVlxU0UFRURCy4uLnw+v9kcr/Lpn16z/31NTY2i7V/xmJiYaG1tTfT+Ly8vr6ysVAw9VF1dXV1dDQBEJyIymUwMwS6XyzU1Nevq6mQymVQqlUgkIpFIW1tbV1dXV1eXSOrEuUEjLBbro48+Upw5KFN5VwFEKCsrI2YpSk9Pv3PnTlFRkSLrE4/v0rtJJpOtWrmS/TSXX16uoQHaHV5lyerqZfX1Ovp0AyOjoO3BikaZuXPn2tjYEH9BT5482bVrV2Vl5dWrV62srCZOnOjp6Tlu3Lh2LVh5eTmR49lstvJCXV2dtbW1tbV1bW3txIkTFWm+Te7gVznM9wghdfaif45YLH706FF2dnZ8fPzz58+zs7MfPXpkaGjYpwGR/tuwicV/9er791JNSHUqT0aVYrm2Sfc1a9eNHDlS1WVpTUlJiSK4JyQk6OnpKUd5fX19s5eZm5srltXnlkSiA49yTD9x4oREInF3d1fc/ts0xxMjhCr67isehUKho6NjK91giGHXW1dfX1/VAuL6ACE9Pd3MzKzZ1XR1dZueDyjk5+cPGzaMWFZeU7GM8521E6FQmJ6erkj8GRkZTCazT58+w4cPZzKZxGRzr7krPp+/bOnSkgI2g6LyGgt4Ihm9u1nAli0uLi5jx46Nj48/f/78tWvXrl69KhQK3d3dP/744/Hjxzfbg+5dlJaW5jdQRHkdHZ3s7Gxra2sbGxsizRMLNjY26tCtv/1gvkcIqbPW+t/n5+dnNyDSv6mpaV1dnWLGUOLxdRpj2Gy2jY2N4umZM2eO/nLIlFTXJp/h3VWIZXo9zCPORaqwDHK5XBHWuVxu08Z4Y2NjRV7X1NR0cXFRTvMdPNS9WCxulL+bDeVCobCsrEw50xMdaZRjekFBAYVCGTt2bNP4rvyozrNZtX5ikJycbGtrW1VVVV1drXhRsVxdXa2pqdk0/fP5/D59+jQ9GWj2qaq/gM5BIpFcvnyZy+WyWCwWi3X//n1LS0vluN9o/mPldvEjR46cCTtlRlaXu5W4tVILu96JSUl5eXkaGhqjR4+eNGnS+PHjXVxc3n3nRUVF+fn5ycnJMplMkePz8/NpNJp1A0Wa79atW5e/SalZmO8RQursze6vLSwsLC4uzs7OzsnJIXJ/UVERhUIh2viJxN+nT59mq3vlH8uv5s/Pf/SAQVOjZkuunLTzx5CmHVvbUHV1tSK7N3rU1dVNTk42a1V7DJwiFovz8/PJZHLrMb3pW/X19cr5u5VonpeXN2rUKMUrxHg7yt7n8TEJIpGoafSPiYkZMGBAs6cETZd1dHSIoG9kZEQmkxUnAIrTAOWnyo+1tbW2trbv7W2+N2/efPbsGZH1WSyWlpbWwIEDmUwmEfepVKqtre3WrVsDAwMnT/KS8ysMqOoyeFFdHeTXyrglJbW1tSUlJTY2NitWrFi5cuXr70Eul+fn5xcUFDx8+LC8vLygoIAI8QUFBYaGhtbW1paW5pMgNQAAIABJREFUlkS3eEWg7xr9atoK5nuEkDprg/Fz2Gw2EfcVjyUlJYqsr1iorKxU/FiO9fDoriklqVOvZZ4UFq9cPW3aNABgs9lsNtvDw+NNd1JeXq5oeldO8BUVFWw2WyaTmZmZmZqaNnok2uPfZc5Oom9604FrWlpWPK2vrx8xYkRBQUHrGb3pi214uQDz/btTJP579+5ZWFgonwMoXyho+pTBYDx48KC6urr1kwHFAtGjvdmzhS5wE0JBQYEi67NYrLKyMm1t7ZKSEgaDYWtra6+nRu0RAFBWpxX03fcjRoxgs9lTp0718PD45JNPmtZaNTU1iuCuaIkvKCh49uwZEdz19PRcXFysra2trKyIV16nVxvCfI8QUmftMj5mdXW1cuInFvT19evq6rhcrpGRkWOfPpY0jab5nmw6YsLsaUN7GZPlwkJW5NmTCTx5WxTIqJ/HCOrD6DRey6vw6zTnL1vh7e0dHx8/duzYmzdvjho1qtE6crlcObU32wzfbIKvqakZOXLkKycHraqqemUiV17m8/lEstfS0mo6/1RLy8pP1eGHHPO9OmjpTKDRWcGTJ0/09fWbfVc57gNAjx49Wrpo0NKjpmZH37HauoMHDy5btgwA6HS6g4NDb3oz57TtVGXp9PEcxXgcc4vdyjqVGtTN27bHxsZu3bp169aty5cvb5TgiQWhUKgI7ooFYrkLnJKpEOZ7hJA6a5c+zbq6usQodcovnj179rPPPgMAmUwmlUkBGvfTAItJy3YvY9w5ev5QDp9q5mAlkbZJuAcgW02Y6gOFsWk8SWurlZeXu7u737x5c/To0VevXk1PT28U5Xk8XqPs3q9fP09PT8VTGo1WV1fXKJdXVlYKhcJTp069snFdV1e3lVxub2/f6HUKhWJiYtLmo7+j9xDRSP8uw6EqZ/3bt2/b2Ng0OmHg8XjEOK0tnULQaLQ3PSVo9NiGN2msWrXqr7/+Wrly5YoVK8zMzMa4uzezUrtVWYZDZ31qFXr9Flva8joSqeTzzz/Py8sDgL179/7000/KCX748OHEsqmpaduUCSGEUOfRQfcsxsfHr1+/fuvWrXPmzLGxsRnrMQZA9vIqFOZnc/o+OeC/J4oPAPDgcRoAAFiM8V0wh2lLqclLiDr6691C0Bn69TLfEQxDfagqenjhYNjlHCmAweA5vl96OhgC/86vOw8k287/0W+MOUVSnnP5wNGIBwAADI+5PzpIZQ/Of3uA1VzKl8vlAQEBPB4PAFgsFoPBMDIyMjAwsLGx6devHzHGIolEUs7leXl59+/fb5TRa2trW29Ht7S0bKl9HZvTUOdFDDBKLL9dZ7OamppW0n9VVVVlZeWzZ89auc7Q9N6DN31UnCrv3bvXwMBg69atBgYGs2bNaq68zVdZBkPn+Pp6OhhKCu+Ehx2NLZRQevsEzhhnbaAHUt6DxGMHojOroFGVdbh4bNAGDyt9qMpJPLb7r7QKANBlfh3w8yzJ03MHf47lN3d4TQ1NW1vb6urq0tLSRYsW/fDDD2/xnSOEEOqSOijfe3h4EO1MLSLZOPQxzrkQ/9IvGclhxqY1fTN2/3Q433zqlrkbJLyVoXyLfn1lMRvXxEAfv6Vrl05grYiGaQs2ePKPbgtMExroiWtAkhe9d8cfRdBrzqotX0+4tSIGACrSog+FF0qEpa024ROqqqouXLjQrVu3ljI6g8Gws7Nr9q339lZFhN6Rjo6Ojo4Og8F46z2IRKJWTg+qq6ufP39eVFTUys0JmpqaynGfiPi7d+/u08cB4OWrZM1WWRbTlq715B3dFpypP3J14MoFFRsPZHR3HqzL8t91vqq714ZVK2Y9XPIrm/FylSWvSjy86XqFsPuUHesWe7MW/ioFkGZHnjj5AGqLmg33AKClpbV523Zzc3M2mx0aGjp37twTJ0689VeHEEKoK1GbMQdJFDJFIq19OX3bjmGaZkSvSSishcKT4aPCZjEtQuMBoLaqlFchrYh6yAuxsKUYmI5xKIj0j83hA/ArAACkFLOhXmO6a+tLJMYGJhQAALmwNC+H3XK41ySRPv/884KCgnPnzvXv33/atGnEpYZ2+8AIoTZGo9FoNJqJiclb70EsFiuyfkJCwuLFiwFg0KBBNdXVjVdtrsoyGDrGoSDyYGwOHyD6j6Sxy8Y4UDIAQPq8nMcr5l2OL5w62IYC/BGNqiwKmdFv5DgbXW2JtJs5gwRFAJLa8sInOa30zyHY2NjY2Ni8xWAAqFORsK+FRcRnsHkSimX/jz6bPaG3PgCU3zpyJF7s6vfNBEsAOfvC7uMPzKcsH1F05FSqQHlrkuP0dR8KTx25nFNWJQFKN/M+Q6f4THDQh5yIXadyek7x/9yVAiBMOrE/Tuj6xVy9a/svPxMpbU8zGeX3jQc19eyJiFvZ5XKqsZ2bj99s1+4d+x0ghN6E2tzQJini8SjmtjYvvUihkiXCaqJLq1wikVMo2i9tUy0BMgnIFCrUSv77+bWauW736qFGYn6VBEj/tbk16e//Eg2AkSNHRkRE5OXlTZs2rdGA/Qih9wGVSjUyMrKystLW1l68ePHWrVvz8vKuXLnSzL2/zVVZZApVqqiLaiQSEuWlNv8qiYT0YjXlKsvAMzBowySGvJz/GlcX0ftHknH4qy83HblwvwxIguyYE1vmfx2SJAQAYU7ihagr/5YRqxU9iI25kMyWCIuzMx88yEiL/ycm5lrag4wHD7KKyiVFqZcuxCRllwmE7LQLR4K+nPNzqgSK/o2JuXCnSAgAAMKnSReirvxbJinL+/dBxoMHyfEx/8Qkpz14kPEvu1TCDt+w+ueIDIG+sR6wk1OLVPd1IIReh9rke6i4E5us+9FSL+cXk/WQ9fTIeRlsGDxyqBEAGIzwcJA/yCloZkt+dk51nzEjG6amIVv1sXiecPbomZjz8eznAADSKmG1nrFB6wG/ATFIP17p7jDEaKSqLgVCjdXX1wcGBrZ8nt9MlVX98EHli7pIz8FjkGHeg2avGTaqshi9rKVpZ8JOR8bEPiB641TXCilGxtjVDwEAcCL2heeA08LfIn4/cPD38ycW9iexIw5HsFtan7nw1z8iI/9Y66ENwFz4a0RkZJi/B9E01nNKwJFfz0Ts8ekORfHx/zZ/O7jxlO1nIiMi98ywATD+cFtkZMSZPbPMc1jZEpKDT/CeH0IOnDm/Zwo23iOk3tSmfw4AP3b3QdNvF3wf4VVRVk2iG0L8ga/2nD08eNWK0z8vEOpqC2/97M+qhaZjTEpZJ09cDpy7/4JXhYDyPGHvtqhESWDAiUGFVRSGKSTWSSAvIbFg8tzfTn0qKU8MXPFXqzcCoA6G10mQGnqd/5bNVVlHT14OWbr/wtQq0JWkndoWxW/uwmGjKmv/idjSgMCQI0XVJJPu8iQpAD859qHPsu9+nyZ9nnJ0zYGH2K7/HhOmpWZIwHXyFAcKAADF3mfy4BMZyWmp5XNd32J3ckl5bg67FqAbhf4Gm1H6DR1AuZV65KuZGR9P8Znh42b5eg1mCCEVUaN8D1Dx8H9rV/xhZGFlQpaUFRZUSAEgYc/G5KMMU/1qbnGNBACAf37FnPMAACBn/7VkCrEh69iKFaeNGEbUal5xjQTYS6acZ5jpksTVFRU1EgDIiV7jc93UWLe2nPdcVZ8OIdTFNFdlHVu05A8zi27i0sIKKQCAJHHbR4nE6lWROyZFEhs2qrJ2+IYbMIzJciGfVyUFgMKovV/GGzD0JbwX9R56b0nEEgCKnrZi8mAqXRsAxBLxm+8r98SX7icAAEjmHit8HEg5r7+p+Yw9vxue+OX0haSz+5L+uuDzw6/+bjifMULqS63yPQAASCoKn1S8/EoVr6Dq1RvWVvAK/3tWwyt+eUIRSQ23GKcYQQi1saZVVlVx4WvUWC9VWXIJn1v88m6r+K+1F9TFGduYG0NqButfyceuFACQ/Jv6QAL6ZjYmQNGmAkgEVRIACkiqBGIAKqW1dnVj19mzPMyNzQcMdnXoTgE5m0YFEFYRZwpVQiEAldLi1OQUG8+FP3guLI//7utNFy5EJ3/jNgHb8BFSW6rJ9xoa6tPv/wWpvA6Hn0cINU/9Kgd5nRyrrPfCYB8fp8tHogJXU30n2ENOTFhEMcVmznRXClD69zeHjPgjIaf5/cS3TydJwKHfgNYa1Q36fzTLx4HU8JTUp58z5UJSWMhhPY8e7JgoNhhMGGDd/KZF/+w7XWozwJIuyikSy4FK0Wvjj4kQalOqyfd2dnb5WQ8Y2mo066pck9SzZ09VlwIhpHaoVCrDxJjPrzCgkl69doeoq4MaicTS0lLVBUHtj+Qwd+cPkh9Cws7tS5UDUMxdfdduXNCfAgD95gYtZgceu7Av+AIAxeYD/4BZNm+ya+MpqwMe8H68EPpdEgAY958dsMKj+fMDITsj6cLfpyPkAAD6vaesne+GjfcIqTON+vr6jj/qmTNnjh4+ZKpV1/GHblaFWK7Xwzzi3DlVF+R9FBQUBACBgYGqLghCLTp69OiZU/8zJaugtmwWt0bad5Dr/oMHVV2Q95dAIAAAFovV+mpMJpNMJtfUtEXnULmwvERC6WGs3+g0UyIsLxOCvrnx2/aHl5QXlYn1Tcz1XxHZJcKiUiFom5gbY7ZHSN2ppp/MzJkzXQa7lsk11eHXslIs0zHuvmr1alUXBCGkpmbMmGHe06pUog41FvBEMiNT83lffaXqgqCORdI3Nm8S7gGAom9s/vbhHgAoxubmrwz3AEDRN7c0x3CPUKegsvtrd+/Zs3r16uzMTAG/QhOARuroM406qJfV1WtStWl0/bUbNowYMaKDC4AQ6iwMDAxOhoauXLkiP/cpv7JcA0C7w6ssaX29vK5eW0/f2NJi2/btDg4OHVwAhBBCnYUqx8/Zs2dPRkbG2bNnhUKhs7PzW+yBzWbHx8f7+fm9xbY1NTU6OjqOjo7u7u5vsTlC6L2ipaV14MDBGzduxMbGVlRUDBw48C12wmKx2Gz2p59++hbblpSUGBkZDR06dMiQIW+xOUIIofeHisfH7N+/f//+/d968/j4+OTk5AULFrRhkRBCqCXu7u7v0iJw8uTJ2tparLIQQgi1K7UbpxIhhBBCCCH01jp3vmexWHV16jIID0IIte7WrVtPnz5VdSkQQgh1ceo3f+2bYDKZf//9t6pLgd5JSkqKqouAUAcZNWqUXC5XdSkQQgh1cZ0738tkMpWM34/a0PDhw1VdBIQ6iFwux0uOCCGE2lvn7p+jpaWFM7QjhDoLEomkqdm5a12EEELqD39pEEIIIYQQ6jow3yOEEEIIIdR1YL5HCCGEEEKo6+jc+Z7FYolEIlWXAiGEXktqamp+fr6qS4EQQqiL69z5nslk0mg0VZcCIYRei6urq7W1tapLgRBCqIvr3PkeIYQQQgghpAzzPUIIIYQQQl0H5nuEEEIIIYS6js6d71kslkwmU3UpEELotdy6devJkyeqLgVCCKEurnPneyaTqaWlpepSIITQaxk1alSvXr1UXQqEEEJdXOfO9wi9MVmjIVVFoiocYhUhhBBCXQfme6S2ROn7vQfNi2wlffOv7/Aa4R/XShctziX/cR/sYL14xj2/zM3FbdyQQd7700UAeeFfufV3GzfW2W50QBz/v43y/ufr7BKQjD2/EEIIIdQJYb5HakmWvnuKm/8/xcUtNq6L4rZ8MHFvUjG3xfzPv7jMbfrBdF5lbcMWwVvy5v9zLyk5xOx4QBgXbEdvCrt7L+nuvZCepw9efBHwRSkBy87zDXDeNIQQQgh1Tp0737NYrFevhDojLZelp+9d2+hmqLi9InWH7zeXuDJR8o++y/7kA9Dc1kUnnZ3vpJjfrOqS/4yAOD7wY/29N8aJAAw8Q+Juhs22bVjhSfI9g3FulgB6buP6Z8XdFYGlrS0NAETFfJqZJQ0AoCouYBt/6b75tnhbB2oHt27dYrPZqi4FQgihLq5z53smk6nqIqD2QtN7+bnr0mWMg9NGj1vz2DtgqgEA0PRooJzC9Sb6zxXt+Nht4vewdPU4GgDQaDTlFfiVfH0DQwAAmoEeiPhECz0/ebvvMcvggLE0AFH6rt38r4MnmrbrJ0Pvr1GjRtnY2Ki6FAghhLo4bKVEnQWNRgOQAWjRWvpvS9MCAFGLK+jRaCJ+LQCAiF9VS9OjAYjS9/ouzZp97vdppgDAvxTyv0oQhQQkZ6Vzi4/9L91lrgutuT0hhBBSc2QyWVdXV9WlQEg1Onf7PXqPpO4OzpsfmXxpd89jwX/ym1mh6lLwYdqmi/ciV4kOfhfXTPd5R7dBxUlxXABRelLGILchNFFK8NKLbseON/TG0XMLCN+9bIqX18eDbPVsXYbYYrhHCKFOCsM9ep9h+z3qJFw3nXMFADD99tyIZlfQmxgSOREA4OOQcx83twJt4qZ1Yd4ffxBJKxZNOxZsCVlHL2XlwdKx0QCg7Rl8bds4p+GmAACyYgO9ykGOBu3zSRBCCCGE2lHnzvc4f20XNyokc1SrK2hNC8ua1uoaBvMv3FM8sZ0bdm8qlyszMDWhAYDLtnuV21rYbUbru0XobeD8tQghhDpA5+6fg/PXojdmYEqEe4Q6Hs5fixBCqAN07nyPEEIIIYQQUob5HiGEEEIdTs6OCNoXX/5G20gyQjccuSNppxJ1KEFuQnREwtMO2Qq9fzDfI4QQQqjjFeWk5ZTJ32yb4qy0vOftU5wOxjqxds320xnijtgKvX86d75nsVgiUTMDIaJOhM1m44ye6D2Rmpqan5+v6lIg1P6EGac3fenl6e7ps2xffBEAQNaRmV8cyZEDAKR+7/V1eBEAgOTfI195eU398sgDkCTt+3Kqp6enp+eULzeEpgoBQJ703ZRlEaUAAJLYDZ4bYiQAAOKk3VO9pkxbHc5+m4JVJvy80m/mp15en/r5H0rhAQBA1m8Lp0728vrUb3tkrlh5HR+/gCPHf17vN/XTqX7rj7MEAAAg5sT+uHzmp15TF2yPeioGAE70Lv8jKQKlg/CSDvv7eo0ePX7q8p8TuADAi9oyc+bKsFwAXnTAzM+Wh71ogBenH5o59bOZP98GyI8ImDPVy2P0aA+vmeuPp/BebyuEWtC58z2TyaTR8F7Jzs3GxgZn9ETvCVdXV2tra1WXAqH2Jkk6EHhBe97vl29Ebx+c8+N3EUUtrEgZsPC36Ojzvy/sB+IKdrHzpujY2Nhf5tEvBe6IFbawDdVtzfnoC5F7Ztm8TdEEucksk/m/hIf/7Nvtn7U7r4sBwO6T4NCIyPAfhuX+uOsi7791Tm515xw7/cx989FfQzwFJ4JO5QKIWfsW7uR+uOP38JCxnJ3fhnEAQCYWK7en54etXfZzbKXdZC8nSeLhb1YczwVxaTaLlVMmkYOY94T1IKuw5sW6dIu+g5wHWXQDKMtKuJ0ldpjs484oi9q1cH0Y53W2QqgFOPgMQgghhNqOPDs1jTL2ew9jEoCTzxSn05fvCX3sX2tTCgCYe/hNDvv6dgaMa5/ikbrpM+h0kufk4TuDcjgw1h7khbf/jnlWI6HqlBbyASgv1mGYTnDvfeKpoT2DAZPHOv98+ynIJTHXeZbuz1OiL4JYn/44nVUDkz7ZvF9p95wbF1NqGD4BIevcqO7i0X5nYxLyJ7RQFKr9J5s3f0IFAEgDALAcO3fFLDpTNnrh37EJ3DGv3gqhFnTu9nuEEEIIdQJaQKkVv9xtnAIA0MIcNhI5AFAoIJa02820VB0qiAG4Ed/M3HShjNqdDiAHeOl+ACpV60UJqVTirediMZVKBZABkFxmr/VxIjXZr1gMQKVTqQBA06ECiMVyAC0Aubi5XvNNX6N2o9MBRGLxG22F0Esw3yOEEEKo7ZD6uA6WXP8jvlwOkscRF7L6uA3SB1Nb86rUpBzltE431i/KYUsAJESIFwvLiiQAwoyL14r6D+4DJHObnuykm0VK21D0ulGKnhDbtFFp87NyTSatXOIzadokpskrP5qzizM81xnu84Wv7xe+vrPG2FMBuFmsx/91v7ccMsKSxLn42+HYy8ePRXPAdNAIa0ZPUzpwEiL+Tkh52tDvyIjOIIkzr0ck3IhlNdyVw2HFxEYfPnyZA6Z9B1m+7lYINdW5++fIZLL6+npVlwIhhF6LXC6vq6tTdSkQam8Ut2VB7B07Zo7fAgYDPly90cccACYsWnxri//4MAmVqm08digVSOaTZ/Vbvc3LE/THbghdDgC5p1dPPVIu17eZsDboY2MAmLJsburWOeOPApWqb+blAwCuPnNNvl3qFUkxnhJ0ZrlrGxS2/2Rv6nK/iQn2huLcApjdtD3+JfRJ69enrPQb/7eJBUUCY3acWcVkHVs+J3NudJivJbHKwEUh3+QuP/Tz8usApsNX7FrBJFHFi1aMYe2K2LgwAgB0mPpUALvZcz1jtl/etfAylbkh+kx/AADO5e3L/wYwZM77btFrbeVn2QbfAOqKNDppPmaz2aGhoWPGjAkKCjpx4kRoaGhgYKCqC4XeRlBQEADgPx/q2uLj4xMSEqytrRMSEgIDA+Pj4/38/FRdKPT2BAIBALBYrNZXYzKZZDK5pqam9dUQAAijVk9LnhK7w6OtdmhgYPD6K4sreQIZlW5Ep74i3yutD3SGIRUAQC4QiOl0nZfXqOFxKoBhyVDqJi8W8ARirYatGu0nbfto3zD7wKv7PqJT6crFaHUrhFrQWdvvbWxs4uPjU1JSKioqbG1tr1+/ruoSIYRQi2xsbObOnWtkZCSTybDKQkjdUA0ZjLden9Qk3AOADsOy8YtUepODNDkulW5If/OtEGqss+Z7AAgMDBw7diwAeHh4eHh4qLo4CCHUIhsbm8DAwLlz5wKAn58fVlkINaI/aU/sJFUXQoUGb775cDO83tUDhF6pE99f6+HhwWQyAbt2IIQ6Aw8PDxMTEwCYM2eOqsuCEFI/GO5R22mv9vvS0tLQ0NDq6mpzc/N2OgQAuLi4PH/+PCcnJycnp50OUVxcbGBgMHLkSFfXtriPBzXB4XBUXQSEIDMz88yZM9XV1X369Gm/owwbNozNZrdrlZWdnW1kZDR69Gh3d/d2OgRCCCE11y731wYFBd26kUAGqJeKqVqd+4RULK+Tyuu09fQNjE2Ctm+3s7NTdYm6Gry/FqncWn//rIcPaoUCTQBdSifutQgA1VKZvK5e38DQzMJyc2CghYWFqkvUNeH9tervje6vRaiLaftfskOHDiXfvGGtTdLWIr2YvaJLyOPkr/Nfszlw68CBA1VdFoRQm9n07bcP0u/Z6pJJJvRXr91p1D3Nzly2ZMn3O3c6OjqqujAIIYQ6VBvn+ytXrkRG/Nlbj6KlqdG2e1Y5W0O9HF7J7ydP7t67V9VlQQi1jYiIiDspKY7duuAwc3aGeg94pRcvXMB8j95bVdXVqi4CQqrRxvk+/d49skyipdkFfywBwM5Q7/adOyUlJT169FB1WRBCbeB63DVavQyga1ZZlnSd6OiotevWqbogCKmGnq6uqouAkGq08fg5WVmZhtpt0yeHbDv2Qw/7NtlVW9HS1NTXphYWFqq6IAihtvHkSa6pHq1NdmU63HtEf7022VVbMaCSNUCjqKhI1QVBCCHUodq4/b6+ro6s2eScodfk2T5DjWlkqaiy/Mmtm1GJXNGrd6Xt/OGHzOtx8bnSti3iu9HU1OykM/4ihJrTzJ+zqcfSSSN7apNBWv2Mc/fytfjc2lfvh2I5etaIJ4nJGVVtX8Z3oKHR1bpKIoQQeqWOGP+ebMlkWlakRp9LTnkCg5etXvOhcQccFSGE3gbF2HmIrfzujeiLqRkSy9k7F/uo14VEhBBCqHUdNRKciJt37y5Xfvd+sZFD0CgH2pVkkZ7DlK8/HOtkDBUP/z70V3wB9Jo8fc5khx5Q+/hi6J6LNQDQy3v1Pm8tSUXOP7/+dS1XSrIft2H1CEtdqM5NPXXgCquSzJw157NRPQx0QfAsJ+bkX9dypQBky7GTPp/ibK1b+/R61JHwHEEHfUSEUJchLc/NvMuSQmKetvOm4UzjiNxymv0IPz/3fj21Ku/fDD1444mE/FKVdQUATD/YvP4DMlRm3T31a1x2FTSusqptp6ya5G5vrAu1xVnJf/56I6sKAOiD5syYPtJSX8pNOf3nqcRyVX92hFB7EAt4AjmdYdg17/ZB6qaD56+lWDrbd6vkltcBffSqORMgee/6/f/LtZu3ZER3PdfP5vTM2rlz2erfwm+VywEAgHs97PstxyKe2c37enR3APmz1OPb96xaFpqo6z5viiUJtC0GOstuHQvcEHr+iennwbOH6wHJ/sOVCyyfhu7fuDOZ9OHs2Uxyx35C9KYePXr06NEjVZcCoeZQrBz6mNaWcmuA0tdv84fGd8I2rzp9v8eHS2dZkpqpssrvnwzdFnw+y+qjRd6WpGaqLOM+Q3QyDu0PDD6fYTRu3foR3QHMvOcsH1oeuWXnD6cFw5dNHa1ePfgR6moEWbGXWR3d7ifPDVs8euiI8V4TRw+dc5wDwPrRa2BfR8e+A6fuy3rz3Yk5aQlZlS+/JuCwrsfGxqbkKr0ueJwSGx2bcJ8j/q8ksf6DHB37OjoOHL8rTWnNp6yU+5xXfy28XFZaltLuWiogLyuNlctrfSUB53Zs7OUE1lOB8mus67Gx11m8hkMIIhcO7Ovo2Ndx4KeHs+SvLB/6Twe132tZeS8/OgVoutqCu1GHQnMkFNfhA6HsivHQj4wBnkt72faG5OIK+nAf9/yIGym5UgBtAKiqKCnmSksv55QHm1pToBTIJk6uo610tKVSfVNjEggAQFRRXsqVloZGOY/8auRAckmvvpZC7h2nER8AiKrpvZyMSSwu/p9QYzh4H1I/VPoH67cOB7KurvTxudM/JdZSmM6D9AQsvb4ffASUaqmFk52uhPtSlUUBAGnZM24pl3v1FteL2ZMMHHnjKgsApAImRr7SAAAd5ElEQVRueSm3/PzB5OFHXQfpPSQNsRVx86zHuluDVEi2dOxFvslSq5uOEOpSCmJ++gVCPJn0jpx788G5EzcE7t/d3P/Ji0k2LFdH3vkq1t/D/+lb5BNxyi8rA6ghNzcPa3jl8XG/L3elyBkMGY8HTotPnFkxELKOzJmzNwsYdHEFj+4ZEvnTJAaxtRyYq6JDv7CkEhcSxLysq78EBYWxdHxPXts8vKWvRS7mPLj4S8D2iKd2K/46v7h3i6UT5KdEhATsiuUMD7x5chajhb1xIpbNCrghZhgBjwfDN4Wf/MIenoYtnLM9QcxgyHgCU999YZvHGAL9k313PhKn/OC1MF3y5t/Ue62D2u9lBRf3L56z5/jdWpJEwKkCADKFIpUCaAJoVuX9E856VpVzfPXhSK7p1OCgkK8dXhrSSlojATIJ6B7rV6/80LiuQtDcr19tlZRMowCFoiWXSgFAE6Do1pWYrBoM9wihNyMWXNu5dfGSsMQKkFaU8wGATCZLpXUAmgCyJ8lnrzyTSFqssqolUhIAtF5lVdXWAplCJlOoICdqQhCkn4tL42K4R6h9FV0M+trPb/FhlhyAm3B4jd/UyVP9NoZlCQAAcs8GLPSd6jXZa+aC7RGPxQAgfhq1fcFUr4leMzdGcAAAxLmR2xf6eHl9tnDX37liAABe7N7lfj5eXp/6HU4TJOzz3xX7X9s1a99Mr9URHLn49r5ZXpOnztybAgBAolLpL3fUkfMS9i2f+uHo0R5eflsisogpjytTDq+cOn706NGTF+66zgMA4ET4+wRc5PEubvTy+nTqriQAAE5mIdV18ZGrN29eCR4DWRejskCeFX9DaDFr39WbN898Zc+LjUhQbu/XolJfpHte2Lzxc34ptez9itn9svZN81oUJrK3e8V3mxTgNWVtjI69feunT/kJsfn0Sd9F37wZuW6wIOVcTC4AK/xEQvWY4OibN8+vcyoMO/w358UXpUOnduTJWFfRYTOxS0AuKb924Cxz74yvxub8eD3v8TOd/hX3/zrH/e+UrIoTHxqalDn7wFLX3qFRTXZhbNdTyvr1r4i7UjM9V0+rF6+SKVoAUorVQJce3PRcKUfCkYyEvItX7qrXKBYIoU5GUnn/158cQgKmf3R/f/QTTj65Zy0r7myWUv5WrrLCc5rsoNkqi0ymAAB0H+lsXfHsXLUAnggmGZXHhyeXdsiHQgiZjFkcMNeeQmWQco+v2c76aH9ooP6NHX7+R/pGr2Vy0mKeu4eGzzAp/DtgQcBxlz/mPd23LXdoeORhSwFPzAAQ3Ni+8Iho5YGTw+QJO1cs3MWI3jysMD32qXPgyUUOADri9DgxKDUs2k0L3ME4tCD49rCvdizpTwUDi2YLxTm79psjnMm7js41idm0OGC5Tq+rG5hZ536JyrVfdDwE9s0KCPrF3X3zcLMxi79IuB10m/nVjiX9qfpWAACWn2w+8omYkxYbkRibKbecMNoOSPTFYdGLAaAy5ZcEDthN6Nt8gGf4HL7pS6cnbBwa1erQ304Lw+98Q4eohbGxrVZVg9dF3wmm847PjEpobTU73yOXfAFA/OBwzGOw9HKxBEgXi0FLv5suAL2nJRViM7IALFvbCWpVh+V7QuX9k+FDQuZ8yEz8M/rAtd7rVx+Z/FwoIVcnhgZc6Llh7ziT6lqyEb0yLvSxBJr8CXASr5evW79pb0ktyci47o6MeNVxTsDRyTUkI/Kzc6HRBVBdEHWc+dWSk1uFJVKyLuePZaHxGPQRQm9FxIoKTVy/ZI5r0vbk46G2/lu2DhcKpBTIOLjn1ycjNr1UZdE9G2/dtMoiA/T4ZOdWdyHQ9QXxe05nSKRw+s/r62eHhI/jC8nkkrjAzTcw6CPUrij07taWliSA/IsxWfp9P7p3MRpAx4THuscBJgCVRjehGzLon3jaH0p5KqdaOljkXj4R4bDIx90SANIvJ5hMD5/UmwHgs3JaxKzL6ZuH0QAo+kYMuiEAwJi1+8coHY1u6cS0604Fanc7plO/lsokYN1IF1v6+nzkZE/Sn+B0eFdqci4wnWZvXkdPyPwnqrACoLK0TAZAZdhbd4dm9sZLOLJpV6KY4b7I0+lFlhfnR21fvOmi2D3410VOLTSBU+mvaLl/QYdOBXhlx/vX3h0AAO/6rgXrj5f1W3d07XAqgPP4MYxzEdu+Fl+ArNtiALlY3FWnHuwQHZHvpfHb18U3PCm/8tvcKwAAkHVlh18c3ZSuKxaUVkrlkLdjAau7KVleUV5eBQAQvWF1NAAAyAuu+M8CAIDw/QvO0Y2NyPIqQXmVFIAOABm/Bu+/r0OuKC9/cSGg/ObBnUm/0rsbkasrygXYYQsh9EYkDw/5PWx4Unt379a5AABQeTH0myvaxkY68KL+udGoyvrRJ5XYpvri/pkXAQCyG1VZFFcATsT6w4kScnWFoJqonSofntqwKcLQ2IQqLeMKXmNuEITQ29OC/xrXJWIxlfriqd3kFcMGvZRNdWj6IAYApyVnTjqFnTgya/zB2ft/nyepEdN0X8ROqg5NXC0EaIM58kRyMZCIOEulagFUAwAvYuW0gFz3Fd9MsXv6iqArrmH4Hr3jW5mwfebChdt63tw/if407Js529OtFh/5acXwFrrBqxAv2n/Wulj9GfvDAzwtiQ6N7sHhx5kXb/Po/T/s/oN/DMMEw/276ODxcxqTCrjlxZXSF39sEkFpwYtfypbIJYJSbnl5lXIXVZmAqwj3/61WzMVwjxBqU5LacuX65+2qLEltKbch3DcQVZZzMNwj1P609fWFvFKiA4CVc1+qkNrfx/cLX98vfH0/cmqh7ZlqP3Ze8O/7J/OjYh5RnQY758bGcOQAck7M9VznYcxGawsep2S9YugYgn43Ogi4HJ4YAOgu/eyh8F78UzHwbqdkAWOAs6U8NytHTLUf4+tpKeYJQCYREe3nNCoVxKVlAgAQiwFAnLBx6ECPhYfTOJxCTmkNgFgE8qyfV+9KgDGLFo6hFbBYaVmcmuYLIa7kcDgcXg2A/HkZh8OrJI4hSDkSELBP6TaCGh6Hw+FUisUgERZzONwXg95wLv8csOV4imIIHLmAx+FwSoRiAJGgjMPhiYmA9zhi+5btYQ8aLgDkh63dHMVjzlsyiVHGYrHucwQA4qSfA35MoI6ZPKgyPoHDGDPa5XW+RNSSDu6f08YEl7esj5Xg7WgIoc5Akvrz7PtybHdASHWsJ/g4zVo7caoF1dLn6P4V224vXzd+vL6FvlzYd3Vk8NimTcaCqDWzDnMY+mJOmal3iCNYOm5ekfbNrA9O00EAg1fs82YAKHddzw1buzBqzJnoVU6vKAnJxXeJZ/pe/9EDYXjg1ZNf7ViX5f/zp0N/AaAz5wUvH0MliT0/cYo4EjDUw8l3+Txm1uFdK8OGH/e1dJw8e9jFn9cMdVxHtfcLjV7LHLNki0/Otp99x/8MQLX2XLd6Mp0Xkf5YDPKEXQsSAABIlvP+uLqumd5BgosbvQKuE7E7yv/DKKrX/vu7PaHmdsSxi7kL5ira/Tmnl48PYRHLxxeMP24x70zMOiaJl3L2+EWtzSsUJ0ZZv8z67DiHyPR7p47f57Ti4vnFdpAbGxZ23f6I/4uvV/wgJb0GxGmHl/seBgCg+xxJDh7DHDOMGrbrs/FAojO/+HGFOzbfvxON+vpmpmd/a19+7qtRzqVTu+yg809rZNu+3zl48GBVF6TrCAoKAoDAwEBVFwS9jz6c4GlFAZpWlx2d4SFfdOr0aXNzc1UXpKsRCAQAwGKxWl+NyWSSyeSamhbaTlF7MjAwaOktMY8n1mXQdV48E/AEYl0GQ6fFXQl4PLHWS1NTiSt5AmhhsqoagYBKf8vxN2sEAhmVrjS0jlggEOvS6SSiPzr1v5FkxGIxNIyC88oiKchjlw9azlt19Yxfq7eu3t4+3r90/YX9noatlzbW32Nnt/3Rm4e1nsV5YfPGR7hGnl/y6rnAxZUCsQ690dhCKUGj/VJ9zv+1oqW7CFBTnbv9HnUBbDZb1UVACCH0HqEyGEoBkkpnvKJ/etMVqIYtb6PzBveYvnLb/8I+ifpS6KVSG2Xq1or0sqxw/5nXGZMD9vu2MIx9bsZD/emBrwj3AJCVnttv7v5XhHuAGlYW133+9FeHewCgGr6U7QXXd33z2z1eAQ9aPFlDzcN8j1TMxsZG1UVACCGE3gMk5pLDR3xkAEC17NHiWvZfnTn/OnsbvO788ddYTccz+FKT8cVeD91p8tyFwwAAdO3tsPH+TbRxvq+uqa2pET8Xd9ku8dXSOlUXASHUZsRiSbFErqWp4pEG2o9YIlN1ERBC6oPh5DbmVXcGqBNTpzGmnam86qON8z2VRhswztPMzKxtd6s+Ll68qOoiIITajBaZPPbjKfr6+qouSHsJDw9XdREQQgh1tDbO9/r6+pMmTerCt5+mpaWpuggIoTajo6Mza9asLnz7aVRU06nAEUIIdXFd9qo0QgghhBBC7yHM9wghhBBCCHUdXSLfsy9FpjaZ+LEsLvI6VxWlQQihVjVXZYlSIy+x2/5Qcrk8OztbJsO7bNF7p7q6WtVFQEhlOnp8TNE/y9zWQXD6gYltduS8Y9/FGX438cWzsqw8LSdbAwAT28rw4Mj+B6aZtNWBEEJdSnV1tVT6isG+OqzKotlA3MZjTkfn27bVcQAAoK6ubvny5ZmZmY6Ojs7Ozk5OTopHCoXSpodCSL1IpVI+n6/qUiCkGh3dfi8qK87iNmlrfwfc/+1O9/SfZgIAorx/dniPcAu4TrxjO3+d06XvLuEfN0KIUF5efvXq1ZCQEF9f3759+xobG4tEr6iOOq7KMpnm75m++39tfNWRTCbHxMRIpdKzZ8/OmDFDS0vrr7/++vLLL3V0dBwdHadOnbpp06ZTp07du3evtra2bQ+NEEJIVToi3/Ov7/AdPWjQkA+WnecCAPCj/UcMcpvmH8kGAFHcdi83F+dB43x3xHIB8iJXe38wYtCg0V7LDifzAYCffnCJl9sQN6/Vp7Oa/shyI4OT3QKmmwIAVKXHsWy9JyuNk9pr6RrbyOBYTPgIvac4HE5UVFRwcLC3t7e9vb29vX1wcHBhYeGECRPCw8NFIlFzI2OK0g9/9cGQQYNG+x57AgAvV1my9IMz3Ab1b3hadilgxgduQwa5TfxqN9Eh8B2qLNPpAW7JwZHt0K+QRCL169dvxowZgYGBf/zxx7///iuTySIjIz///HMajRYdHT1v3rxu3br17t37k08+2bBhw++//3737l3s3oAQQp1UB/TP4Z/7MfiSfkDYNhdDW1O4CUBznrZ6Gneb/9LvJk48Os7Zc37AcFreiWX+W45NGzvu3j+XikeEBDjFBayeJrLJmv2Pt//dicc2jju3eqm/07jor02Vd51+KMxswbkXL+mNmP/tiPQtB6KVVnD6ejbMO5bnuaatLnnX1dWlp6d34QFAEerUnjx5wmKx0tPTWSwWi8WSy+VMJtPFxWXGjBnff/+9g4PDq3chuhSyMaz267CQsQZmROXyUpU1yG1ugBMUn964zH+f97QlWXEXM8227R6XFeI/Y6lZRpjpd+9SZZnOXmDmfSh92jaXNvtGWubs7Ozs7Ozt7a145dGjR1lZWZmZmTExMT/99FNWVpa5ubmzs7OOjs7HH3/s5OTk5OREp9M7oGwIIYTeRQfkewO3qRMNvwmeL/QN3j/OCQBotm7Tl0JsQBg3TyTLi/v9YPA/eSJRscigki8DADBwmjh71aB7x8fFpcbZ3i0G9qXgLbRKPl87IxPgpR9Ll9njDh6P4zPHGbRwbO75SNHHAW3Yn1Umkx07dmzFihXjGgwcOLDtdo8QejMPHjxgsVixsbEcDic9PZ1OpxOBfsGCBS4uLlZWVm+8R5qb93Rb3/1L/Tn+B4eMM4OXqyyu6Nz+gNNZ/Fq+SMSvBADQMrQdPXvp1OKw8JCkrDyzd6qy+HGn88bN64hw3yxHR0ei047ilcePH2dmZh4/fjwuLu7AgQNZWVndu3dX7sRvbm5ubW2tqgIjhBBqVkfcX2s2+eC9jzNDZnkt2zZuvOKHgzjyo9MhJ/LGXcoMZnubfffiHVEll/voXhYXzEz7mZrRaFqzQw7PdwK+yLTJz57j0jWMr0KuuwWPpTVz4LLI4Osua46aNvPW26JQKAcOHLC1tb127VpcXNwvv/xSU1OjyPo9e/Zsw2MhhBqRy+WnT58WiURE83x6erqVlZWLiwuTyfzss89cXV27d+/+zgcxcNuWlLfg2PyPA9b8b9olxQ36WgAApRcP7r7rFPb4GG21me+LN0R8Hjcv714emHkxzN6lyhJdDznNWPOb4zt/grbTu3dvotOO4pXc3Fyijf/WrVtHjx6tq6srLS3t27evs7Nz3759iQU9PT0VlhkhhFBb5vukpKTCwsImL+cdm+7snyICLdOJe1y6wbmX3uw10XvEgeCJhmFaIujlRgMAEKXvdTP7HkxHbQqb7uA2JPjSdH+v/jtAy3bN9achoxrv3Wn5Uu0Fwckjgkc0/rnkX/ruktO639p2MAqCkZHR9OnTp0+fDgCPHz+Oi4u7dOmSv7+/ubn5uHHj9PT0NmzYoKur2w5HRuj9IhKJ0l/m5OQ0ZMgQItAzmcy27y5SFrnUyTeyCsBgRPAIM3j80pvdR0+bqLfMt+clkInMvqYBAMjyjk0zOwYGLovD5vczMNjytlWWKDn4hPbSo06NN1AzxG0MkyZNUrzy9OnThw8fZmZmXr9+/eDBgw8fPjQ3N1dO/HZ2dgYGLV1kRQgh1PY06uvr33EXaWlp586dO3fuHJVKtbW13bJlS+Pu6TIRv4wPeqYGLbXpVPH5MpqBAQ1kyQH9x8XNu3fpa1sDA8Wvn4jPbW1zUcrBY7XzlzZqD+Ne2h1ru+aLNv6xXLhw4YIFC1rqf3/79u24uLjLly/funXL3d2daNQfOXJk25ahiwkKCgKAwMBAVRcEqYWqqqpTp06JxWIizT98+NDlZVQqtQ0PN2XKlF9++cXc3PylV0V8bpnIwNSU1kIDiIjPF2kZGOgBPNnt5nLM7UJSwBADpQrqbaos0fWDx7TnLx3e3KXId9D8B2xnOTk5mZmZDx8+JHK/oaFhcXGxcuLv27evpmbnnn1FIBAAAIvFan01JpNJJpNramo6pFAIIfTC27ffP3z4kIj1tbW13t7eYWFhrq6uixYtqqysbHIQmoFpq51k9BradrRMnT+eBo6GSuEeAF6xOW340qVNXzWduOaL1/kcb0YsFmtoaLT07rBhw4YNG/btt9+KxeK4uLi4uLhFixYVFhaOb2BnZ9f2ZUKoM+Pz+ffu3SPS/L179/Ly8gYOHDh48OBRo0YtX77cxcVFBUGQZmBq2fr7DTWUntO4qeOcexq8HOXfpsqijW2uHntndXV17bDXV3BwcHBwcPj0008Vryji/rlz57Zt25adnd2oS4+jozp1S0IIoU7ujdvvnzx5cu7cufDw8MrKSm9vb29vb+X26R9//LGqqopoke16+Hz+1KlTz5w506NHj9ffqrCw8Nq1a1evXr169aqRkdH48eMNDAw2btyIk8sQZs2aBQDh4eGqLgjqIJWVlXfu3CF6z6enpxcVFSna5gcNGjRgwICOLIy/v3+PHj3Wrl3bkQftMMnJyYGBgTExMaouSGNSqfThy8zNzcVi8UAlzQ1dqkaw/R4hpM5et/2+uLj4zz///OOPP548eeLt7b179+4PPvig6WoDBgw4fPiwUChU86r57ezcuXPEiBFvFO4BwMLC4ssvv/zyyy8B4O7du9euXfvnn3+Cg4PHjx/v6enp6enZwYFG3WC7XZcnFotTU1NTU1PT0tJSU1MLCgpcXV0tLS0nTZq0efPmvn37qrBsw4cPP3PmjAoL0K5+++23Dz/8UNWlaAaZTGYymUwmU/FKaWnpkydP7t+/f//+/VOnTt2/f9/Kyko57ltatnpJBSGEkJJXtN8LhcJ169YVFBQkJCT4+Pj4+Pgo31bVrEOHDt25cycoKKiLDZq2c+fOrKysVatWtcmAmFVVVbGxsVevXo2NjZVKpTY2NqtWrZo0aVJn75P6FrD/fZd0+vTpqqoqItCzWCxXV9fBgwe7urq6urqq2wltYGBgcXHxTz/9pKOjo+qytKVt27ZlZmZu3bq1k55CP3z48L6S+vr6gQMHmpube3p6Dhw4ULWnhYDt9wgh9dZivo+IiPjzzz8jIiI+/fTTKVOm+Pr6amm9bmN/UFBQWlqatbW1sbFxB9/X1eaKiorKy8uJSey//fbb9uhAn5WVFRYWlpKSkpSUNHny5EmTJk2aNMnQ0LDND6SeMN93DY8ePUpVMnz4cFtbW9cGJBJJ1QVszbp16woLC7t166atrd2nTx9VF+edZGdnV1dX19fX02i0devWWVhYqLpEbYPD4dy/f//kyZOampr379/Pz89nMpnKDfwdPGQZ5nuEkDprnO8TExNPnz6dlJTUvXt3Hx+f6dOnv924ZqWlpSdPnqypqWmrfC+Tyb7//vs1a9Z0cBtbUVGRoaHhyJEjXV1d2/tYPB4vKirq4sWLUVFR48aNGzJkyFdffdXFLoM09f/27iUktS2MA7jFrSaBpJPboCgyItIiCNwalT20pKCoiamDHkSPQUWjmjQJmkQERQNtkJBKBRaUA02zssQtVhBJLyuQMCQq6GkR2B3se7reyznV6Zpr7/p+A1Fwy38kf7699lrQ7ynK4/EEr7phMBhElSfm9JRbobezszM5OXl3d0eSfq/T6eh0eklJye9euL+/z2Aw8vLy8vPzPyMYSdzc3Gz9W3JyMlH0id7/2aMl6PcAADL7u997vV6FQjE/P//09CSVSmtra5OTP2Pj+I+TSqW3t7dzc3Oog4TD4+Pj6Ojo9va2Xq9PTU0lJvpkW9UQKtDvqeL5+RnHcbvdbrfbj4+PfT5f8KqbUBwsBf5xcXEhEAgaGxs7OztRZ6EG4iRjouu73e5AIMDlcjEMI15DfgcJ+j0AgMz+mJ6e1mq1ZrM5JydnaGhIIBCgjvRzPp9PqVSiThEmMTExXV1dxHuz2azX66urq2NiYgoKChoaGsJwJwEAwtnZmd1uf6n1HA4Hw7CqqqqMjIzghyNByDGZzL6+vs7OTolE8ufr+wsDGo1Go7HZbDabLZfLiY+Hh4cOh8PhcExOTm5sbLwUfS6X++VvigIAQIRQKJRKpTKZLCoqCnWYX3I4HAKBwO/3ow6CktPp7Onp8Xg8TCZTJpPJZDIGg4E6VAjA/J5sXC4XjuMmk8nlcnm9Xh6Px+PxMAzj8XiUW3VDdXV1dUwmc3BwEHUQant4eMBxnKj7OI5HRUURRZ/FYpWUlHxszSfM7wEAZBaC82vDQKFQGI3GmZkZ1EFIwWAwaLVatVotkUhkMll5eTnqRP8L9Hvk/H7/y5Aex/G4uDiizSclJYnFYtTpvrXz83MWi7W4uPirM7PBBxCjfaLxb25uvoz2MQxLSEh4549AvwcAkBk1+n1bW1t2dnZTUxPqICRydXWl0Wg0Gs3p6Skxzk9PT0cd6iOg3yNxfX09MTHhdrvtdvv6+nrwkJ7qe159McPDw1NTUzabDXWQrykQCNiCxMfH5+bm8vn83Nzc1NTUVy6Efg8AIDNq9Hs+nz89PQ3nm/zU9va2SqVSqVR5eXn19fWVlZWoE/0e6PfhtL6+bjKZTCbT6uqqSCTicDgVFRUYhr1/91sQfhwOZ2BgoKysDHWQr8/lcq2srFitVqvVSqfT839ITEz8zzeh3wMAyIwC/f7k5CQtLQ3+H980Pj6uVCr9fn9zc3NLS0tERATqRO8C/f6zHR4eqtVqj8djMpnodLpQKBSJREKhkMyP3IBgY2NjBoNBp9OhDvK97O7uWiyWxcVFi8XCYrGKfoiOjqZBvwcAkBsF+v3S0lJra+ve3h7qINRgsVhGRkY2Nzfb29s7OjrIP5ctLCyk0WhLS0uog3w1NpvNYrGYzWan0ykWi4uLi4VC4etLDgA5BQKB2NjYg4MDuIeJisPhMBqNCwsLW1tbIpGotLRUIpHQoN8DAMgqEnWAtx0dHWVlZaFOQRlFRUWzs7MzMzNOpzMlJWV0dBR1ojcIBALS7spKRcvLy93d3ZmZmXK5/PLysr+///7+XqfTtbW1QbmnqMjIyJqamvn5edRBvi8ul9vb27u2tub1emtqalZWVlAnAgCA1/wFRz579bPKhBwAAAAASUVORK5CYII=" + } + }, + "cell_type": "markdown", + "id": "905aedaf-591e-4d64-8e2c-f6cb3f1491b5", + "metadata": {}, + "source": [ + "# 6. Compile model with multiple inputs.\n", + "\n", + "After reading [Example of compiling model with a single input](#Example_of_compiling_model_with_a_single_input), you know how to set calib_data for a single input.
\n", + "We will show you how to create a calib_data when your model has more inputs.
\n", + "If model has multiple inputs, calib_data format is `[[x1, x2,...], [y1, y2,...], ...]`.
\n", + "```\n", + "e.g. Model has three inputs (x, y, z), and these inputs info are like this.\n", + "x:{shape: [3,100], range: [1,5], dtype: int64}\n", + "y:{shape: [100, 3, 192], range: [0,1), dtype: float32}\n", + "z:{shape: [3,100], dtype: bool}\n", + "\n", + "The calib_data will be like the one below.\n", + "\n", + "calib_data = [\n", + "[ np.random.randint(1, 5, size=[3,100], dtype='int64'), np.random.randint(1, 5, size=[3,100], dtype='int64')],\n", + "[ np.random.rand(100, 3, 192).astype(np.float32), np.random.rand(100, 3, 192).astype(np.float32)],\n", + "[ np.random.rand(3,100).astype(np.float32)>0.5, np.random.rand(3,100).astype(np.float32)>0.5],] # bool\n", + "```\n", + "\n", + "Here, we will use an easier model to show you how to do it.
\n", + "The model is shown below.
\n", + "![image.png](attachment:562afee7-c078-4323-bc19-49e03c80d0e9.png)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73f758da-bd81-44f9-9eff-734a23c427c2", + "metadata": {}, + "outputs": [], + "source": [ + "# compile kmodel multiple inputs\n", + "model_path = \"./test.onnx\"\n", + "dump_path = \"./tmp_onnx\"\n", + "\n", + "# sample_count is 2\n", + "calib_data = [[np.random.rand(1, 1, 1024).astype(np.float32), np.random.rand(1, 1, 1024).astype(np.float32)],\n", + " [np.random.rand(1, 1, 320).astype(np.float32), np.random.rand(1, 1, 320).astype(np.float32)]]\n", + "\n", + "kmodel_path = compile_kmodel(model_path, dump_path, calib_data)\n", + "\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "351e2e49-7869-4c86-a5be-e8b7412b4d06", + "metadata": {}, + "source": [ + "# 7. Simulate kmodel with multiple inputs.\n", + "\n", + "Simulate kmodel on PC." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8fc1f43a-99fa-463a-b1b1-c6a9174ecd7a", + "metadata": {}, + "outputs": [], + "source": [ + "# run kmodel(simulate)\n", + "import os\n", + "\n", + "kmodel_path = \"./tmp_onnx/test.kmodel\"\n", + "input_data = [np.random.rand(1, 1, 1024).astype(np.float32), np.random.rand(1, 1, 320).astype(np.float32)]\n", + "\n", + "results = run_kmodel(kmodel_path, input_data)\n", + "\n", + "for idx, i in enumerate(results):\n", + " print(i.shape)\n", + " i.tofile(os.path.join(dump_path,\"nncase_result_{}.bin\".format(idx)))\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "dd5a79f4-0094-476f-ac79-6ea6b100b5fc", + "metadata": {}, + "source": [ + "# 8. Compare kmodel results and onnx results.\n", + "\n", + "Here, we will use the ONNX framework to infer model(`.onnx`, not kmodel). And calculate the cosine between the ONNX result and the kmodel result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee2b661d-f6eb-4dee-b014-0fd3f95d589d", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install onnxruntime\n", + "import onnxruntime as rt\n", + "\n", + "# onnx_model = model_simplify(model_path)\n", + "onnx_model = model_path\n", + "_, input_info = parse_model_input_output(model_path)\n", + "onnx_sess = rt.InferenceSession(onnx_model)\n", + "\n", + "input_dict = {}\n", + "for i, info in enumerate(input_info):\n", + " print(info['shape'])\n", + " input_dict[info[\"name\"]] = input_data[i]\n", + "\n", + "onnx_results = onnx_sess.run(None, input_dict)\n", + "for index, (i, j) in enumerate(zip(onnx_results, results)):\n", + " print(\"result {} cosine = \".format(index), get_cosine(i, j))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/user_guide/k230_simulate-ZH.ipynb b/examples/user_guide/k230_simulate-ZH.ipynb new file mode 100644 index 0000000000..e9b9dc5329 --- /dev/null +++ b/examples/user_guide/k230_simulate-ZH.ipynb @@ -0,0 +1,438 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "809dd383-637c-4e45-a859-9c4c492f7e1a", + "metadata": {}, + "source": [ + "如果在阅读相关文档后仍然存在疑惑,可以加入nncase讨论群:`790699378`,或者在nncase的github仓库提问题[click here](https://github.com/kendryte/nncase/issues)。" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "4beb4c04-ab79-4e1a-a7ad-ba53d9a9b123", + "metadata": {}, + "source": [ + "# 1. nncase安装和环境设置" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82a8f9c1-c2bf-4270-9f1f-ac25c9fdd898", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install --upgrade pip\n", + "!pip install nncase --timeout=1000\n", + "!pip install nncase-kpu --timeout=1000\n", + "!pip install onnx onnxsim scikit-learn\n", + "\n", + "# nncase-2.x need dotnet-7\n", + "# Ubuntu use apt to install dotnet-7.0 (The docker has installed dotnet7.0)\n", + "!sudo apt-get install -y dotnet-sdk-7.0" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d8d2a6f0", + "metadata": {}, + "source": [ + "## 自动设置环境变量" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b32a0b5-90b0-4bc7-8448-2a544b7d06e9", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import subprocess\n", + "\n", + "result = subprocess.run([\"pip\", \"show\", \"nncase\"], capture_output=True)\n", + "\n", + "split_flag = \"\\n\"\n", + "if sys.platform == \"win32\":\n", + " split_flag = \"\\r\\n\"\n", + " \n", + "location_s = [i for i in result.stdout.decode().split(split_flag) if i.startswith(\"Location:\")]\n", + "location = location_s[0].split(\": \")[1]\n", + "\n", + "if \"PATH\" in os.environ:\n", + " os.environ[\"PATH\"] += os.pathsep + location\n", + "else:\n", + " os.environ[\"PATH\"] = location\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "b89f3faf-bab6-4f74-a658-1f27a0e49912", + "metadata": {}, + "source": [ + "# 2. 设置编译选项和量化选项" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "09bb9ca5-f086-45d8-9ba5-c2415f24e167", + "metadata": {}, + "source": [ + "你可以在下面的链接中找到参数的详细描述 [preprocess args](https://github.com/kendryte/nncase/blob/master/docs/USAGE_v2_EN.md#CompileOptions),[quantize options](https://github.com/kendryte/nncase/blob/master/docs/USAGE_v2_EN.md#PTQTensorOptions),[Mix quantize](https://github.com/kendryte/nncase/blob/master/docs/MixQuant.md)。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7eff82e-295c-4cce-afbc-ce64c84dc40a", + "metadata": {}, + "outputs": [], + "source": [ + "import nncase\n", + "import numpy as np\n", + "from nncase_base_func import *\n", + "\n", + "def compile_kmodel(model_path, dump_path, calib_data):\n", + " \"\"\"\n", + " Set compile options and ptq options.\n", + " Compile kmodel.\n", + " Dump the compile-time result to 'compile_options.dump_dir'\n", + " \"\"\"\n", + " print(\"\\n---------- compile ----------\")\n", + " print(\"Simplify...\")\n", + " model_file = model_simplify(model_path)\n", + "\n", + " print(\"Set options...\")\n", + " # import_options\n", + " import_options = nncase.ImportOptions()\n", + "\n", + " ############################################\n", + " # 你需要修改下面这段代码中的参数来适配你的模型。\n", + " # 详细的说明可以参考docs/USAGE_v2.md.\n", + " ############################################\n", + " # compile_options\n", + " compile_options = nncase.CompileOptions()\n", + " compile_options.target = \"k230\" #\"cpu\"\n", + " compile_options.dump_ir = True # if False, will not dump the compile-time result.\n", + " compile_options.dump_asm = True\n", + " compile_options.dump_dir = dump_path\n", + " compile_options.input_file = \"\"\n", + " \n", + " # preprocess args\n", + " compile_options.preprocess = False\n", + " if compile_options.preprocess:\n", + " compile_options.input_type = \"uint8\" # \"uint8\" \"float32\"\n", + " compile_options.input_shape = [1,224,320,3]\n", + " compile_options.input_range = [0,1]\n", + " compile_options.input_layout = \"NHWC\" # \"NHWC\"\n", + " compile_options.swapRB = False\n", + " compile_options.mean = [0,0,0]\n", + " compile_options.std = [1,1,1]\n", + " compile_options.letterbox_value = 0\n", + " compile_options.output_layout = \"NHWC\" # \"NHWC\"\n", + "\n", + " # quantize options\n", + " ptq_options = nncase.PTQTensorOptions()\n", + " ptq_options.quant_type = \"uint8\" # datatype : \"float32\", \"int8\", \"int16\"\n", + " ptq_options.w_quant_type = \"uint8\" # datatype : \"float32\", \"int8\", \"int16\"\n", + " ptq_options.calibrate_method = \"NoClip\" # \"Kld\"\n", + " ptq_options.finetune_weights_method = \"NoFineTuneWeights\"\n", + " ptq_options.dump_quant_error = False\n", + " ptq_options.dump_quant_error_symmetric_for_signed = False\n", + "\n", + " # mix quantize options\n", + " # more details in docs/MixQuant.md\n", + " ptq_options.quant_scheme = \"\"\n", + " ptq_options.export_quant_scheme = False\n", + " ptq_options.export_weight_range_by_channel = False\n", + " ############################################\n", + " \n", + " ptq_options.samples_count = len(calib_data[0])\n", + " ptq_options.set_tensor_data(calib_data)\n", + " \n", + " print(\"Compiling...\")\n", + " compiler = nncase.Compiler(compile_options)\n", + " # import\n", + " model_content = read_model_file(model_file)\n", + " if model_path.split(\".\")[-1] == \"onnx\":\n", + " compiler.import_onnx(model_content, import_options)\n", + " elif model_path.split(\".\")[-1] == \"tflite\":\n", + " compiler.import_tflite(model_content, import_options)\n", + " \n", + " compiler.use_ptq(ptq_options)\n", + " \n", + " # compile\n", + " compiler.compile()\n", + " kmodel = compiler.gencode_tobytes()\n", + " \n", + " kmodel_path = os.path.join(dump_path, \"test.kmodel\")\n", + " with open(kmodel_path, 'wb') as f:\n", + " f.write(kmodel)\n", + " print(\"----------------end-----------------\")\n", + " return kmodel_path\n" + ] + }, + { + "attachments": { + "1855346b-3785-4867-9c92-0e811d00b9ab.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA5AAAAGOCAIAAABubXuxAAAACXBIWXMAAA4mAAAN/wHwU+XzAAAgAElEQVR4nOzdf0BN9/8H8Nd2c0/KvdXq9kndrWjimkuRoaEyYUu2uCNlm9g0mzCu+Ez4Un4lm5+jGZmtmF3tox+Te/ehmIToko9rWT9s19XcmtxLOleX7x8VocjPe+n5+EvnvM/7/TrX2NP7vs/7vHDjxg0CAIC7XL16taqqytRVAAAAvWjqAgAAAAAA7gWBFQAAAADMGgIrAAAAAJg1BFYAAAAAMGsIrAAAAABg1ixMXQAAwFOl0+ma2bJVq1ZPtBIAAGgmzLACAAAAgFlDYAUAAAAAs4bACgAAAABmDYEVAAAAAMwaAisAAAAAmDUEVgAASk1NNXUJAADQJARWAGjpatMqMisAgNlCYAWAFq1hTkVmBQAwTwisANBy3Z1QkVkBAMzQCzdu3DB1DQAAT88DvemqqqrqiRYDAABNuKIprf6Xmz2HiDDDCgAAAABm5kphZnLST8myIxeMRITACgAAAABmxtLeyc76xeo/s36U5V0wYkkAALQ0T3FJgL708NFSg7O4r4d9/aEKVU7B31yPXt7OresPGSsK9x/IK65g27iIe/XxduXduvxoQemFcp2BLNs4tBWJxc43Txk0ypzCygZD8dx8erhx647be/QTO3Mar6jgeOn5Sl21kfiCdh63utSXHj5aeEFXbSRLG2c3cVcPe26DywwaZVbOyXN64rl08fHzdL55ruJMXkGpRldFxOU7e4i7utddpi/KO/rX5QY91JZkqDiTV1BarqsisuK38+ghrrvZisLfCjQ1t5dq5dbjdTcqyjv6l8FZ7ONhT1RRmFOgMdxqwa07TqQvytl7+PeKqwzPVezjI7712QLAM8xYofz5x/+WXLlu+Yr/KARWAGhZnl5gNebMHTJNruf5xaQsCeAREemz5obMklc4S9anSD2JiOhs6tzp8XJ1fQzj8MRjFn850ZtHRPrUaUMW5Rhvdsd17hs+f164mEdEmuTw4atUDcbqELHt+3C32uOF3tLUNRJ7asRvcwOkcv3NH1u7Dfviyy8CnO8ci2PvPX7J4nFiHhFdLUye8/mq3ypuXmTvM/mrhaEerRspw75HxJL4cHFrKlg5/OOtmlsnOLUl3dGe69x38pJFEg+Szxo4N6tBFCUiEoZu+HEyrRn+8dZyv4X7lrx5V/HErT2uSZ/18eKsivrixVNSNox2buzmAeCZczOzkoWpSwEAMAGlUnnvBp6eno86RqXmnyoi0mcly0oDwt2INDuTsiqIqLxcayDikrEwMTpermY8Rkonv9OV/9fe9V8m5Hw3b5nHtgVv1k+mCoctiJG0rTi9d2tC8m8J05a4bFs4qC6LcjxCF0z2sSUiYqycm5/ReG9+8eWYdvqCn+JXylNXbB404AvvBmO9pN67eXli3sZlaYO2hAoNeetmrfqtwr5H+IyJg92odPe6hYk5q2atE2+bJubeLGNZtL9F6d5Ny5KPJiakDVszsq5A7wlrxnarbcNvZ1s/fF3707u/iZf9tmq9fPCXQ3uMi/9yWA1pdsXHKzQeo5d80pNLVs5uHCq9u3i/yYvf8yAiIsbBnchYmLY1q4LcQr/8MuxVg+ZUKYmRVgGeGxx7z+AhZQk7Cq4gsAIAPBn/VJQTcXk8g0omOxwm9TyRvKOAeDzS6ysu6onsqSAz7YyBunyyZNowZyJyD59PJcNnybN2HdS/OagusbZu6yHycCMPcQ+Hy5JpqVmpey8MkjjWnmN4zu3a2RMRcW1v+wr/3rg2zmKRmDxYnx/ksspyzc1J5NqxRG01uxLzcio0fxM55qTu0lBrn4gFEX72ROQWsUBXIFmUJ0/LmyT2qRuS79zRQ2zv4axOTVbmaf7WENUGVi7X3rndywwRUWue/a0lCnXteSqZ7GRh+YUKIjeP1308iEpPconIwb2Hjw+PmsBt7ez8cjuGiDhce1si4nJfJDLqC0+X6sU+Yj+3Zn8MAPAMqDy967//u0KEGVYAgCdEp9cbyfktiZs8cXdKlqRiz+4L9kPGvHHgu1R9RQWRvV5dqiFyFt+aEuSJu3bkyPPOl5w30p2RrbW4S0dKzdGUaohqA6uxICE8MIGIiLyjMtYMb3QRQCMMZ3NSd5ZWKFN3XyBy7djJiqg2s2pzklfq6ULe3kNE9j283YkuaM5XEXmIu97s275rDyHlFZdoKuvLoPIT8lQylubsyCPiebzqdnOcnMXDA4mIiPvmkn0L/eoOGzVZG1eVGkrzsguJ49y1+4NNiFbsmjV8FxEROUrW/Cz15rgNGz9s9/zUvG+mhWy2F78VPnmiRGx7n04A4JlQeTr9pwxV5XVL5zfeQWAFAHgiDJXleiLntoMlg3Zn7Vg/6w+Nvn24xI9O/EB6vY6IGIbLJWKr2FvXsAaWiF7kcht5ZMpgqCLiEJepP8Bx9psQ6t2aiMihW5NTknfTH01edJSIiOc+SBod7nFzrMqC1O0FZCSy95GuivazJari1o5sMBLVNjMa9FeJiLlVobFUvnKRnIi49t6jZ0wedKsSj6GTh3lwiYiEHg3G1+TtTM4zEnGcB/3fGqln86eGiYh4npLwAW5cIrLq2I5DRGTv98WWzkGZ//kpLT2rYGf8NC1325fDmhveAcBcVf4v/afM2rQ6IsTHGYEVAOCJYC8bDEQ8K3vvEUEeOxIK1VzvKIkHbzePSKO/TERcUVc3TlbhYXmO3rv2O/BSxd7fjWTfsePds44GlSz1FBGvS1e3m8ecvYdKGn+46p54b0q/HNPV3tHN+Y51BB0itn3tvfvTzxLP/J5XrJe429O/OorbUkHp3r2F4R4iLhEZCjJzzhMJxZ0arkldGj2kbZu2LzvzbuuP69wnVPLmXcNzvKU/L3be+OG0nZqT+Rp9gPMDZG0ibnu/0JHedx50FA+bIB42Uj4reG5Wfl6BcZhfo5skAMAz48r54vOV1y2dfUaE+DhzsCQAAOAJ0VfpibhcK4Zch0n8ZKtPvREaYE9XuQyR4YpOT8QTBo0blDRrV+qsj3TD3mzHPZ+3W15gaC0OG+1zK/hpjyavM7S5UJCVlacx8LwnjvO7tR9WSda6+NLaphbOfuNCvW3rju9esaiwdiLWbVDkGO87EiHXxk0s8qBG8cThMyV7I5KzVq/M6r3AjyeWvO+Ttjgn8fOPzr31hguVHEjPKjXyfN4P8mi4JlXk4dFIbjYUZiyKP1p3Kx5B0mGi+jMcns8k6bCD01J3xicO2jL5QSZZ9cdl8XFZdTcilkx+y16++N97GHHXtjz2fE7BVSJXFzx1BfDssxYFvsd0Mjh3cKz9ywaBFQDgibis1xMRtxWXyH7YwoxhtUeNXC4RVRn0RDzi+f177QLb+PU7smSbsoiI18Fv8tTZoe61TS25VkSVeanf5RFx7Tv4hIZO/uQttwYjVOSly/Jqf8kRO4fUB1aqKFCkFtT+0tMjYsyd85H3xu0SPvmt3dPS5eu3jvGb4OH8zvy1tHrZN5nyrYVExLUXD4uaETm0WZlQk5Mqq+/VRzz5VmAlIp5PxASfvbE5sg2pkrWS5kdMQ1GWrKi+D4PP5EGGcnVJztG8rNph2vpERIV6YHoV4Dnwom37Drd+wj6sANCy1O7D2pxtrR75xQHNZtRXnNezrR3u/I6eyFBZoTdyuTwe78GWej52Br2mXE88B2dTF9IofYXmHz21tnd2fKD1BQDwzMAMKwCAqXF49sLGkxbX1t48nh/i8pwfbLHpU8Wzd+aZx+cEAE/Gi6YuAAAAAADgXhBYAQAAAMCsIbACAAAAgFlDYAUAAAAAs4bACgAAAABmDYEVAAAAAMwaAisAAAAAmDUEVgAAAAAwawisAAAAAGDWEFgBAAAAwKwhsAIAAACAWUNgBQAAAACzhsAKAAAAAGYNgRUAAAAAzBoCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAAADArCGwAgAAAIBZQ2AFAAAAALOGwAoAAAAAZg2BFQAAAADMGgIrAAAAAJg1BFYAAAAAMGsIrAAAAABg1hBYAQAAAMCsIbACAAAAgFlDYAUAAAAAs4bACgAAAABmDYEVAAAAAMwaAisAAAAAmDULUxcAAABwm1atWllbW5u6CgAwI5hhBQAA84K0CgB3QGAFAAAAALOGwAoAAAAAZg2BFQAAAADMGgIrAADcolcmTpMEBgQGTvqh1NS1mAVtTpLiDPvYujM+tp4AWhTsEgAAADdV7N6QTO9vU7xj/5g7NpbKYlMdJk32e7iOdbmblshONUh7lq5BUZ/68h9TdTepM+LW77tQXfsDp/OYeeO0O1bK3ggK6MA8THenZTHZwikRvevr1MkmDZT12LrtI74yQ8n4B4isHqw/tlixPkF2sPSSTcdB4ZHjegsepiiAZxECKwAA1DNWaMrbduzyuNMqEZGm8Gjhw88vWrt3DwhyJ8Oh9dKDXRdO9eHRS+4PFSHvo7xAnsuGznzHnUtEHEcXC9I+Snfnc9P2eX0S0bv+Z37QghRfayEZs2XLkkQ9HjiwMhyu+5CpI9qTMkE6Y8nLvy4PeBIfAoAZwpIAAAC4iSVjqWxyYOCw4YuyDKQvSJ79QWBA/wDJpFVZGiKiiqz48OEBAQEBgbPk+lLZrJDAwICAgIDh0xbFz/pgeGBAQPCkhMN6IjJkLQqRDA8MDAyZLSs0EBGR4UTCR4GBwR8knHzwujgCT39fX//+nduSY4f+vv6+vt2EDKtWLIsMeTcweEJMejFLREXboyPCggODAkMmxMjOsETEFqfHTAgOfDsw5AuZmoiILUqJiZAEBo6KiNtZxBIRaRVfRY6VBAa+O3bdUSIixrVPgL+vr7+vb3+RgNOgBqNa8VVkSFBg8LjopOM6Mqo2TY9RlBERaTPjorcWERFV5a6cvk7Z9AqC4p0rkg/piIiMp5Knjx07QZp0mohY1faYiFHBgaMiV+5V32yszYiRblbV/rpoe3RMhppcfYf6i4SuooD+7Vn9Jd2Df5AAzygEVgAAaIDjFroqIyM15Qs/ylkzL7X1uC2Z+zJiehQuWyTTEF3V/F7pszhToUhdMshKrynhSlYpFJnfhbKZJzwWpGQoto3nyjalaYi4fl9sk6VkZGwJvZq4ab+BiIjbNeLbjIyft0R0eSyFsspVEUvLBi/csjXeX73030lqIvVR+aX+i7Zu2TzPR70yelMRsftWLSh6PT4lLWX15EFCIt2+mIiES0GLN2+eH3Dh64i4HJaM5/IVxZ1nbN6auCysCxGRTiVP2pqUtDVJdkjdcDzlqoilhb1nbdi8ejQjmzI7/R8XR4M89aiOSHcoI0mWmaslogJF2kW+S9PTnhfOHMovY4mIOO0HTYuNnT01qB3p9sZEfk+hy7/b+kXng3NjZPWTugIho9ohVxmJSJ29I5echEREZ7M3LZN+uFYf/lkQVgRAy4HACgAAjTH+nneU6z/Kz55DXJFkmOj3nGP6W2cbTj1ynLt2cTawBiKyF4vdyjUVRqr4LWFWeEjIqM82n9JfrtDf2fljKK9YvlcrFFzKzUjLZXn8M/nKKiJiLPkOfDuB6J0A95KiYiMj9HApykyUHdDynfhElJ+Z7fDe1KEdBIJOkqnDHeSZ+URExOW9JODbCfi1QZPV6S7qdBd1uoYTmEaVXEGDPgvzdBIIA6aEeRzafZjp5eOuOpDPsvm550We/+Qe0pEqJ9fBx7d5OdLSwUUodBXyGcpXyMmNr96flnaKBFanlKr6Jl0G+Rqz5GeI1HJ5pW+QJxERWdk4Ct1fe0kt35WLGVZoObCGFQAAHhWXw6Xa78FbMWQk1piTsPiA88INSzx5eXHDNxMRcYmIah7jmJdYlmEYohoijlfoDKGIQ7dmRK0secQSkejTbZtFSYkJoweuDV29ZZyhirW0rpv/ZKws2St6Iss7+uV7SiZ+KmpsOK5D/aWWVmS4wgr69XfYvC/3EP0hnjiVWSHbryw6wPVdLHzAG2ENVSxxuFRDRPzeY6a4t68/w/EMGkLSXcqhVnL9wHmetf9IEHgOHe05NPDlyMGJ8vG+EsyyQsuAGVYAAGgMp6N3D8PeH7MqjGQ4I0tVdfTpzmvutUaqNjI8u4bt+fY8TWGpgchgMDyW8jp7daZLVr0l74eFvR8WNtq3iYewGHf/cbFbVgdVpstPM6IenYsUcrWRyKiW7y3q3MvzvuNwLYhlWeJ09hKXy3epWCIqU+xWunh68UkYFOSSveLLY6/69/YK6K7aMF/OGTSoQ3Oqt+Ez2nPldRWKPEVslcA3NCzs/bCw9yW9GyRe0XCJIHPJ7HQaMVJE1GBXrPK/tMSzxCNX0GJghhUAABrF9Zk0v3ThwpCBc8m26+BpX0icidT3v4yIiOs9bkLm3MhAGTEMh9vFi0scj6DRXaYtCAwgnv+s777wa3b2bRJ/6MyZuVPHDtzp4MI1kO/CbZ/fnT516dNHr1MLeKy63GlEfCcSdpoz5ejk0W8m80lHPaasGiEgOnfvYTr3769eMiPJZ3OYdG7upAkDM/mMnnUfv3pcByISBL0jWvmV48xeDGMR5HtVdmqUxP2O65Vxga+vJCLi8IOW/9q/9iBHFDSc9+HHAw86MO6fbI0fvfAT5eTRbyY6OHANLwWt+mbcrU6EQRLRygXGuRJXIiLV2pBIBetgZTinIc/PVwU89m29AMzVCzdu3DB1DQAAT0/tykSlUnnvZp6enq1ataqqqnoqRcFtbG1tm9+YvajVEV9g1+Rko06rZS1ua3DfS+4ao271ARHpyrR0c7XrI9JptcYGZVRptVcY/v36Zi+qtTriOwkfTw0AzwgEVgBoWRBYzd8DBVYAaAmwhhUAAAAAzBoCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAAADArCGwAgCAebly5YqpSwAA84IXBwAAgHm5du1aZWWlqasAADOCGVYAAAAAMGsIrAAAAABg1hBYAQAAAMCsIbACAAAAgFlDYAUAAAAAs4ZdAgAAwLy0atXK2tra1FUAgBnBDCsAAJgXpFUAuAMCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAnkul8i8Tch71/aZ6+Zzhk7Zr7t/QmBc/6oME1SMOB3fQKlPSVTpTVwFgBrBLAADA88hYfmJ/gduYR+yF1+eDL9raODd6Tv9bwrK//BeM9iAi4nSVRM3guj/icE3T5W5aIjtlvHXA0jUo6lNfPqvO/j4x7WhRudHBvcfg0PcD3K3qrzidnpiYdrD0kuVLr3oN/2RKgJCablyPLVJsSkw59Mc/1TbC7kGfTBnagXkC96JS5LC9hnjy79vSWJS2VtbeZ6jo/k0BnnMIrAAA0CReB29xE6f0f+UdLe1R/xPXrUdTDR8Ha/fuAUHuZDi0Xnqw68KpPjx6yZ0xFiVNHJtoHTrzk9D2HHX2t3Gjw//4bstEEUO6Q3GjpQe9Pp06L0JIZar/1RA13bgeq0r4cMJOYfjnU8PdGa3qmJ7zBNIqEf0pX7Ge4gM8+Zwn0j3AcwmBFQDgOVKRl7BwkSxfz3W0p0qHcCIi0uyKX7R5b8llsu8evmCOxI2jkS+etf5gBXF5PjO2SH24hqLU+MUJe88auE5BSzZLSv5vVsKR8wYjb/DCbT67gpPFW9YM18tmzUpUVhiMxPMY/EnU5EGuXCLSy/8deJDLdJuy4f/aJoUu4y3aEu5Omv/GL1qz+8RlrlvvsOioUA8eFWyaNG/H73oDSzZdJVGLI17nPfB9cQSe/r5ErH4nFXXo7+vPJyJWIV1fFrT654meDBG5uy931I/4cEVGWMJwbdIymSAyI3akgIiovbvoXo3rZy+1aSs2sqFJ8eM6EBG5txcRERnVilVLN+4pZgVekilRYd34dDF75fzEg6VaPQlEgz6Z+WlvyohZqpXEjxURUdH26GTrT+YMJMWqpRsPFLOOvcfPihranmFPJsWsTfufWk/iid+EEmnS5n+cb8n0mrpmoqc2e92yRHnhJRuxZOasMBGfiC1Kj4/bmKtm2gr0emr/qP9NADwPsIYVAOC5oc9aM2+vk3Tbr4qfvwoV137frU6e94Mh9OuMjNQNkquJq3ZVUHHapgLv+T9npPy8TdqLS8bCxLmbdUEbMhSKjI2fiKmitJAkaxUKRYr0dW5dx0a9poRbe3BJn9KV0YmFRiIi3qDFGakZKTGD7G+WoE5e9GWpT1zGvswt4xjZrHV5BiL9XyXO47cpFPu2RbRJXZlcaLyr8IeiOqqkXv09b06DckSD3hCcysuni/nK4s4B/QXNanzTydz8f/kNuj0eKldFLC3sPWvD5tWjGdmU2elaIl3RQaXD+PVbt64Ms9k1Y+leViBkVDvkKiMRqbN35JKTQLkqYmnZ4IVbtsb7q5f+O0lNxBbukxuDVidt/ebz/gIicvCdGB0bO1si4hRtmh6j9Jz5XdJqiTFRmqAkYpXxESu0AYs2bI6P6CXALCwAESGwAgA8P4yleQU8/2Afew5x/+XswCUi0h/N+11zID5i+PBRkzYXUvnfFSTs4cPNWrY4UX5GTxwi9YEDem/JUGcuEXG59xmCeB4jQ3v8k5d3vvHT+qN5v4uHSTpwiWPvN2Iw92jO7w3iqX1Pn44XSprxDFezVLMs38qy4RGuNZdlDcSyLDGMVfMa12NZlrXiMQ0DolElV9Cgz8I8nQTCgClhHod2H2aJiDg2PAGf7xoQ1JspKlRTl0G+xiz5GSK1XF7pGyQulu/VCgWXcjPSclke/0y+soqIiKwchXy+QMAnIuLyHV2FQqGAOZstV/GEdCwtI/uSlYNWeUxtVMkPMIM+koicBEJPr7tW2QK0UFgSAADw/GgsbxrIK+K7L4c1/Bp+8uYNBXJZ4r9DUkeuXdPrAcfgUF3ue4glnlaW903Ezefe3kW7/w8tedZPpepUqnMuovZkxxVaFxWfIerRjMb1GOHLQs0pVRUJb2XESyzLdai7S8bSigxX2IYFMFYMsUQcz6AhJN2lHGol1w+c58m5lMYyDENUQ8TxCp0hFN0xS2pBdDPEG1iWYep+bB80pVd3PrENBgWAOphhBQB4XnA6evcw7N4i1xiIiAy139p7+nQsSEtW6omI9Ho9ERn1eqO9+K2I2e+3O3G4UC/09m59QJauMRCRwdB075cr/tYTkUaeeqCNWNyWeLw2BnWhxnjbVbwe3m4FqbIzBjJWZO3Ybejh0/GJfaktGDKmV+H6pRnq2hSpVcSty/MKG+ZOTG9JICP7ct3NDaHYKrbJxjeJRgx13vf1qlxt/UVsTWcvcbl8l4olojLFbqWLp1fjj+uLhksEmUtmp9OIkSLidPbqTJesekveDwt7PyxstK/7HemzNY+n116oISKiVzq/xugZsSSstvEQEZ/Tvre4XP4f5W2bWbFq5VE1SwAtF2ZYAQCeG1yfSfMlccs+DlxmIOI6+vtwiRwls6dpFs4fLrtMXJs+UxIWDPon7d9RSSUGojYdw6L68Di8T/4vPDbu48A1BmrTZ8bG9xrv26jPWflhwDw917FH2JxwMYeob5gkfdaHQxK57uFfrarfIkAYOn+qZl5U4PpKrrNP2Pwp3o9xSvVOgqELV16YHT164FoHByo/Z+wcvmKZREhEjOfnq6LmSycMTua7ONDFcrZXbMaiphrX47hPXL5QP3NGoB/fxYGr/7tc9HnKaunc3EkTBmbyGT3rPn71uA5EZxurRBgkEa1cYJwrcSUi/tCZM3Onjh2408GFayDfhds+97ytsesgiWj0jLeDXRih5JvVUxYciowaOJDnwjPqX5uWEuvPD5g1N3dKZOBgB4fW+nNV7lEcYvetmDCTnZu1eij2t4KW6oUbN26YugYAgKdHp9MRkVKpvHczT0/PVq1aVVVVPZWizJ6xYFX9JgBPga2t7QO1Zy9qdcQX2N31PXqVVq1liS8QNjjVZOObDbRqbRUxAqHg5pauZVqyE/Af8Gv6ew/EarWstYBfNwSr0+pYa4HA6vbLaxh+3bCsTkf8B60A4DmCGVYAAHi2MXYCQaMnrARC12Y3vtlAIBTefoTvdO8rHrCqulEEDeInw7+r7e2XM3zMrULLhsD6AIqLixMTEw0Gg7v7U5lkuCeWZRmG8fDw8PPzM3UtAPC844gn/7jF1EUAQMuFwNpcUVFRpaWltra2bdq0uXr1qqnLIYPB8Oeff+7du3fFihVSqbRv376mrggAAADgiUBgbZbFixefP39++fLlbdq0MXUtd0pJSfnuu++uX7/ev39/U9cCAAAA8PghsN7ff/7znyNHjmzYsOHFF81xF7Dhw4dfu3bt66+/RmAFAACA55I5JjBzc+jQoW7duplnWq01atSo69evHzx40NSFAAAAADx+5hvCzMcff/zx1ltvNXKi5qmX0rRXXnnlzz//NHUVAACPwZUrV0xdAgCYFywJaBbuXe/XPrvu3cEHJYe2jLF52D4v7Jcdd5EEtL/96JUja+d9/ztLRGTZ86O4D7rWHS9OW7jT8uPPAxyb6M3Ozg5b6gLA8+HatWuVlZWmrgIAzAhmWB+S6wff7I5/76HTKhG7b9OMnwruetPepeOKI/T6SIlk5Pvv9XElIqq5cGRT1ODhE9bsOoG/vwEAAKAFQmB9SOyx7xf+eIKICpOiPhw+2M/Xb/AHc2QqlojYvG8nh7472Ndv8PAJy9PPskRUc+LbyIWKCiIiyl3+4Vf7WCKi6n2rPhgVOipqR4M3/V26dPVfXQf169+/X8+eHWyIiK6cPvx319mxH7liNhwAAABaJISgh1StOaE45UFEZ4+kXvLfvjPU6ez2yJHTv+35y2e2RfvTLCRZKYPo1E9zPx81xyY7rk9l4f4jxBIRsX//vu9E7atXLLuNiokbyLS2b/AmlvK/LxTKo2f9/brfe+8P8WCIyKb/Z/8myptjgpsEAAAAMAOYYX0MGBsnG3vHriODuhUfL6x9EsvaxtXe0bXfZ7PDLBU7DzV1oc2/XMVmMkwAACAASURBVF3buzo2XFjQXfrj97FjetoeXjZs2FcnnnTlAAAAAOYPM6yPTxsbSyNV337M1tr26hWWiCELYu/aVeCuFaxE1o4eYkcPcc/+LmW9Z6adiOzaFb9FAAAA0LJhhvXJqGZZIqo5K//1dKeeHmTh5Op4+nj+pQYtGJs2zCVN2Z0X1ofaC2f/rLS2tUFaBQAAgBYPgeiJqM6K9n1jjWXNperO0m/CXIlozLT3fpraq/daV8sLhYzfe0TUf+R7Cz8e3FvmZNN3we75tS+pYtOkvgtPOTlR2el/XD9eM8b1nqMAAAAAtAQvYPPO+3rvvfdmz57t6trc9Hjpxw89sySn4/tfqrF0tGFunahhL1y4xNg42ljXH2EvXbhENrc1IlZztqymtdMrjg0P3tfXX3/t7u4eEhLyIBcBtEQ6nY6IlErlvZt5enq2atWqqqrqqRQFAAD3ghnWJ4Wxtrlzk38LxtH59mOMjeNdbwJgnJsdjQEAAABaAATWZjEYDM1vbDPqu5IRT66WxlVWVr7wwgtPe1QAAACAJw8PXd1fhw4ddu3a9WDXPPV/CPz555+vvPLK0x4VAAAA4MlDYL2/119//fjx49evXzd1IU3atm0bEfXp08fUhQAAAAA8fgis9/fuu+/27Nlz6tSper3e1LU0YseOHUePHp00aZKpCwEAAAB4IrBLQHPNmDHj7NmzdnZ21tbWzd8x4MkxGAwXLlz466+/qqurpVJp3759TV0RwLMBuwQAADxzEFgfQHFxcWJiosFgcHd3f5R+NBpNenr6hAkTHqUTlmUZhvHw8PDz83uUfgBaGgRWAIBnDnYJeADt27ePiYl59H6+//57Lpf7iIEVAAAAoIXAGlYTEIlE165dM3UVAAAAAM8GBFYAAAAAMGsIrCZQ+6SUqasAAAAAeDZgDasJvPzyy5aWlqauAgCeLIMyOX5nqcfILyQijXz15jxrv4hxPvZEZCyQLf6p0OM96UixRrEqOUtTftXAbe3gLPIZ9o6fG48qfktIyGK9x04eJCQylqYu33TSeVhkH03CD3m6hgNwOr0XFdrxL3niD5l5f+kZ27Yeb4755C0PronuFwDgycEMKwDAE8Gq81J3ZeapDWSsOJ2dmvrNrM+/KTAQkfF8nlyeeew8EWly0lL/m1N6Ua/544BszawPPk0sNJK+8EBq+u4T5bXdaE4q5KkHSw3687+fOnmy4GjWLrn8v0dPFpw8qdJUXMqJnzw3MauUcbDnVp7MKWSRVgHguYQZVgCAp8NQ+N28+M7fffH67Yc5XUOXrpE46uWzh8/N2nugNNy/0as9Izb8GEHGrLkDZ8nFERtWDbMnIlXC+kriDZi8OMaP9+RvAADAVDDDCgDwVLj6DRPrUxcvlJ+/4wRLNQbDhd9LNCwRw7R+kD7dxN6OpFfMDflobsKuggrjYywXAMCMILACADwV3I6SBTP8KGvZ4lRNw2RpLIgf3r//sEmJKnIbGjbY+UH6bO0jTVgjHd6VOStPnP9xyOeyUmRWAHgeYUkAAMBTwnUcNHvGbyGz5IVEtxabcpz9xki8nR2cO/fw6WBPRGxrhsigu2wg4pLhso4lYrhNLk519JZEeUsiC5OlH606KttdLIno8DTuBQDgaUJgNQGVSlVTU2PqKgDABHh+M2YEHJ2lqGhwzNl7RKjEscHPYrEzFWQlxCdXdmEPJecYyKNL18aXqBryklcc5Xbu6MCUn64kIh7vgVYUAAA8IxBYTUAkEllY4JMHaJl4fpERPrmL8u7RpEv4/Iml8zamropNJeK6vSmNHu3WeMu/CnOyk/JSDEREXHvv8ZODhI+/YgAAk3vhxo0bpq6hxcnLy5s4ceKRI0dMXQhAS6TT6YhIqVTeu5mnp2erVq2qqqqeSlGNMegryvXEc7a/3/P/+gsavYHhOdrzsKkVADynMM8HAGCWuDx752btVcVzbF47AIBnFnYJAAAAAACzhsAKAAAAAGYNgRUAAAAAzBrWsAJAS+Tp6WnqEgAAoLkww2oChYWFpnz0GAAAAOCZgsBqAh4eHlZWVqauAgAAAODZgMAKAAAAAGYNgRUAAAAAzBoCKwAAAACYNQRWAAAAADBrCKwAAAAAYNYQWAEAAADArCGwPmuqq6ur79fk8n1aVF+urK65vdM7L7n7yO1qqivvNwoAAADAY4HAal4q9y4M7CPdU9P4yV+i3uzeZ8AAr/bdP00pu3W87JdPu7ef8AsR0eX85cO6+wT4dO4n/aWssT5q8pcP8xkw7O3uou6Tfi4jopKtH/mIfQb4d27fL3pPJRERnd4Y1rP7AP/u3T9ILmmsksqfJ/n0GzDCv3vnt5fnI7UCAADAE4bAaj6q98x98+2vcs6XNZUBbbtP+DYnPycn/1ufvWuT/qg7qlr92Vq1HVVXE1HJ99FJr8bnHDyWMTI/Ou5gI31YeE1PycnZk3Pq+wE5q5NKiNr1m5105FjOkWPxLyevTaskqkxZsJyicnKO5EirYxemNVKMbdCanIM5/z2SM91y7dq9j+fmAQAAAJqCwGoCKpWqTZs2dx229InKyNk+XmRZf+DyL9KR0XsqqVIhHfHFnmoip1fbWRLR5cqLFk7tHIiISLlQevDtNZ/XXlSdv1/V3d/Hkqid/wDLIzkqopLVYR8lllRXqzZ+ELaxlIiILKi6smTPXpWlt1dbIhK2a2dJRNXnKy3bCi2p5tieI+0G+NsS2Q4IaJezP//uMsiCqKa67OQvB7VefURP+tMCAACAls7C1AW0RCKR6PLly3cft2xjSQ2/gm/ztjR8T9hbPldb+8RuH1CXY6tVayfEVk/bMdyWiErWLjg2/Msd7UomERFR5cXLlpZtLImI2ti2vlxSWUN9Po72+iBswDfUdtzGJDciIqrJ3/ipdOORi90XtavPxpUHY8I2CmN/8bekmsqLl23t2hAR2baxvFh5sdEyynZFj4/7pYQ3frztY/94AAAAAG6DGVazZmlBRNVkYVn3L4uako0fhP3Sb2NSeDsior3Ll5eQ6pvo6MRj5wt2rP3tBbs29Y9kXa682sbO1qKuk+oasrS0rIunFl6fJf/32MHoq3Ojk8uJqDr/q7DPVKE7vh7uRERka9em+mI1EVHl5Wo7W7tGyiByCorP2H9qR78dn32Z/5Q+CwAAAGipEFjN2OVfYtdZzk47lvJ59dpFe6qJVKs/2iiMT/rcqy56ij9LWi0d8XbggJ5t7dp293nVxqtnu2P7D1YTlR3MqfTq046oZEPsnoCNOfs3+uyKXlvaoHNLy9ZUebWGqnNjP0vz2bhpfLvaMGrR3cdLdXB/NVFlzv4Srz6iu8to2Md9dyQAAAAAeEQv3Lhxw9Q1tDh5eXkTJ048cuRII+dqUsLEe8YXrBnQyGKNkrUDusdqRe3aEBF1j/plTXD99/F7J3VOHHBqy3Cq3BM9fNIey7bVWifpT0mhbnf1od4Y9l5yZVvL6jMllqEbk/7dp2Ru9wHfk8jJkohaB8T+d8GAauXyEe8nUVs632Z80vbPRHdVcnDum9PzbNvWlKgudo/+6dtGRgEwYzqdrpktW7VqVVVV9USLAQCA5kBgNYF7BdbHoLpSXWnp5GTZ1Prk6sqysovk0M7p7ue+GrYpJ1uhrWVT58tKztfYtW26AYDZQmAFAHjm4KGr54+lrdDp3ued3O73qJSlrZPwnued2rV74MIAAAAAHgbWsAIAAACAWcMMKwAAmJdWrVpZW1ubugoAMCOYYQUAAPOCtAoAd0BgBQAAAACzhsBqAoWFhXj0GAAAAKCZEFhNwMPDw8rKytRVAAAAADwbEFgBAFq2M4kfSOJzDKYuwxxolekZSq3R1GUAwF0QWAEAWjbXwTP+LRFzH6EHjTx+carm5o9ZcwP8AwIDAwMDh89V6OsOGjXy+SHDvyx4pFKfsNyvI6XTI1ceepQ+WO1xhSxDqX1cNQEAEWFbKwCAlo7rLO7xaD1cKsk7RcOMRBwiIv1Vg/27X26bIr553qCSzY1NLq1hqdejDfSEeb0/N8qVfB/p09CmLYqM00/8eYingPO46gIAzLACALRw6uSPg+PzjGTIWfVBcGBAQED/ISGzfigwEBGVymaFBA4JCAgIGP5ZvPysgcggnxUwS2EgIrogmzRsUU7tF+hnkz4PDgyUxOcYSV95mcfjNRyBa99VsuC7JW87P0qZ7PFN0gkhwUGBwWGRcZlqIiIqkk0PCX47MPhDadJx3W1tPpSu3LwuekJI8Lshkauy6+Y7y7LXTR8bHBQ89osklY6IdNmrpHGKW5OhWuVu+a7d+Vqis7LoD4MD3+zXz29gyNR1uReJjKpNkSHBg/v16zcwcFyM7KSOiOj4urGjQuL2sWRUrZsQErIkm63t6JxMOio4ZKZM/Sg3DAANILACAAAREftP6fnOMzIUin3fSi7/kJB6gcio15RwJWsVCkXKkj6lK6MTC5ta3+ka9tXPGRkyqQ+HDEbS7PgscNTHczflVNSedfTwduc1cWWzyys6tI8jWb1l6+rJrx6bHyPTEpFw0Ixvtv2cEh/Iro9JKrrVZnN8ICtLUIqmrf5m5RhmZ8z6Q0RUtGl6jNJz5ndJqyXGRGmCkogllqUGd8SWFSlPFmmvEpWrsg+pWPcgSX+H4syV0V8ridSnDihVHE/JsN48VVL0x7PTLxJdOnfquKroIhFdOFegVJ3V1gVWxqG9uPtr7QXMI94zABAZdWWlJaUIrAAAcAuXiITePYSaUk3DwzyPkaE9/snLO3//HtzGrMnIyNi2KIy/d968FM39L2g+xkZoxxf2GurrUqT6k4gYtvxQ2vak7IsMv7y87mEpxkZoJ3AfMrizkWvTTiBwDRjsqVOf1dHZbLmKJ6RjaRnZl6wctMpjahL4zlgdNUTQ1GhC//Ap86YGCUhd+L/aaVjGY9iUGbHfzPBlLu7bncM2WaeD36fRc+ZE+DbZNQA02/VrV6qu6BFYTUClUtXU1Ji6CgCApvB4DNEdk6kcsqz/pYG9PatZEF2/qwt3v9DBbr8XlD6B8vh8a6Ia0u2NHj1dds6Cz797JtOa4ZKBWCJiuAzDGlkysCzDkJGohqh90JT3+/CbNZYlY0V0+1/YDI9Htz4Elox3fRrGprMsADygVvbunbuI8dCVCYhEIgsLfPIA8Ey4XPG3ntx5GnnqgTbioLZcpr397wcPVrw1yP5mE2ser+JEqZ48rAwGLqvXkL0zj66W5hzTtO32SOtW701b8D/qP2/KaE/SsunfF9+n9SudX2PSGbEkrMeteKs7k3vOtreo2ROhrDo3PfPSqa37WE57TzGfOC4uHMrPTFLYUf1mWHxHPkMnD6ZlZl/gOXr1FzUvFgPAfSA2AQBA04z6nJUfBszTcx17hM0JF3OIRkqHzV0UMnAZMVyeu8SBiJwHhw3KXPZewLI2PSJXj6tYPEtWqDdweG6+EQtGuz250tzfkrh/EjnwhLvQqD5Fvpb3bs34TllwKDJq4ECeC8+of21aSqy/OmlGRLrvtozPRc0dUiWTTmWJI+j92bywDkQkmRoqn5G8MnIfEZGAb8MQv//YcaKj6zZNj9hkJ0nIivXFOlaAx+GFGzdumLqGFicvL2/ixIlHjhwxdSEALZFOp2tmy1atWrX0tygbC1aFLuMt2hLu/lSHtbW1bXZbVqfVsRZ8gV0zgyGr0+pYa4Gg9m2DVTodw+c3Z/8po0LaM1LRf/X+Bb2IuX0RQpVOe4UYfoODRlar1TF2gkbWKgDAQ8EMKwAAPLsYvuCBHm26vb0V/0G/sufffYUVX3DHy7Y5jMAJD1wBPE4IrAAA0ASOePKPW0xdhHngBMQfOW3qIgBaLuwS8FSVlpaaugQAAHgonLpXeQHA04fA+rTNnz//r7/+qq6uLi0tDQ8PN3U5AAAAAOYOgfWpcnNzy8rKyszMfOGFF/z9/X19fU1dEQAAAIC5wy4BT1tWVpa/vz8Rubm5lZSUmLocgBYHuwSYvwfZJQAAWgQ8dPW0+fn5iUQilUo1b948U9cCAGCmLl+5YuoSAMCMtNDA+p///EetVnO5XJOM3rdv38uXLxsMhm+++ebpj15UVGRnZ9e3b9++ffs+/dEBAJqjjbW1qUsAADPS4pYEpKSkrFq99kYr3pVrrciitanLMQWD7oaRtbe1frmtw4L/m+Pi4mLqggCeKiwJMH9YEgAAd2hZgTU1NfXrDVvKr7cla6GpazED2jwX2+tLFsWKRM1+LSHAsw+B1fwhsALAHVrQLgE1NTXLv1pZft0ZabWOwPtc+dUdKT+bug4AAACAe2lBgTUtLe06pw1Z4xvwBuw679q1q6amxtR1AACYL51Wq9Oxpq4CoEVrQYG1qKj46vUWuWj1HiwFnFatz507Z+o6AADugy3OTj+kfdqjqtOlg7u9PnDgwIH9gr9SEak3hXXr9FqnTt36xeQ8RHda1V7lHffAqpXZmQpFjkp7KxLr1IcUisxsZXGD5SuHYgZ269TptU7dXpcqjA2uPpl7W7PGsGWq7Mx0xb6GQ9RerMo9pFTfvJrVqvYpFHuV6nsvhNGplXsVCkVu0cV7D8Gqj2crMrNVZffO+qz2dLYiU5F9sv5SozLu7W6dXuvU6bVukZn4dwLUaUG7BFy/cYNaNfrYKdPR3+f9N1za8S3YinOZKfu2n3mYPyFcJ48xkq59XdpQxdntW/dnnqshS9dxH3XvZstwjaxGc+7AnuN7/jK/P3svvPDCCy+YuggAgPvQ7lu3tHjq0F6CpzmoOnNjuloUtXvbuPqlZOM2HZbkxAROyn6Yv81PJkun/zH1yOqA+le8qndGjv5CoXtJwPyjZV3DViXN8eWrZZNGR+9jBS+RVku9Z2/d/L57bWO2xn3clq1TuvAZDhERe7Fo3/cxC77O1Q1ZfXxFQFNj6g7FjZ6wSW0lYKq0rEvYqqQ5vnZERHQxO+bDCNk5gWTT/lgfoiplXOiHm/7kCyy0Wp5k9bbYgEY/6TObxn4Ql2sUCGq0WhJNTNw2pRvT2BBaxRejpTt1/JdYrU44blNKVA+m0eqy54+evFXNODFsmY4/JD5lxVABxzPq58NTypIi3l5JxsYughapBc2wNsGi55jR64PtNfv2L92g+CHvEvuwfz4cXnN1OXdixbd7d1Z3nBvR3ZlDZPmvnn1szv33yA9ZRSUvvjL1/96f0wcbtQAAPCTdvqVjx42NWJatI9Id3RQ9ITjw3RBpQq6OiEitWBI5VhIcGBQ89oskpY6ISLt3ZeSowMCg4MjvVURERrXiq8iQoMDgcdFJx3VERLrcTTMjQt4NDBwVl/23ctMXMeln6wczFm2aGDz2u2IyFidPDAwMCo5R6IiIGIZvdXv2MmqzV0UGD+7Xzy9w7FyZqoqISHcySRo2sF+/foEfxqQXExHpFDHBkzYVVR1a+m5goCRacZGItKpC6hwa/+v+/b+uGcovlqXlEZ3NVpzlD12UsX9/SlQPXe4OeVGDoRiGz9QOzqZL/YYvPeng7nTvz4zN//Ug22Xid1n796+W1A1BRKRVxEZn24mE9dFZl7ku6Uz7KbL9+zNifS/Kvk4parQ79alzjPfEhF/3798d60uqtHRV40OcTft6p9ZrXsb+rJQpHYqSEtIanxuvOnXqojBoQcrhrP3fjXXXKmTZF+vuk7FqNOBCy9WCZlgb59R93JsWOxf95+szNUREZ84REVn+653R/UZ6OXArz+2S7dl04grZe0yd8HpPxzY8qi45mrN0ayHj99ZUh1MzfjxrIOo6LPid6n0xckUMERGdPnj20w9tXF4kDREZq0tPFB24QgcOnthzTpI0pn9m3q4j10x3vwAAzyx+j7A5k3szVgJ+mSxiurzz8m+2upxa+cmMOI9fY/uX5+8r7jxv8yftdfIlEdKvX/t1RvX62KyXv9y6uhNpdQwRKVdFLC0Mjd8w16FgfeSU2TY/rR5adUp+iDd+29ZeHGKsi0+xbPXNKQuOMGjaIsfvI6U7hCNiZ/pxiOfCb7Qq9fYZkxPUQXHfhDvIZ0+MjrR69ddZDvJvk1X80G9+eDlpXOSCVb0DVgTwvUM/9c+N3C4YETvTj6ntTBAwY3WAUatSpB/MVOmc+vuJiZzCEn4JIyL25Dr5GRIGejX+mDATsHB/AN+6eGVQev69PjPGd/bPvkRk1CoycrVWXl4eRETajAULlL3mznt5xURZbbti1Sn2Jd9XDbnZfws6u1Cuqpgl97szo/CdOQnvsOqjCtkBxSmjcFC/9o0Owaryi0k4zqk8+xC92lnA7lMVGUnAuas7q94TV/Smi0XZGTJFrprvHerV+GcM0OJnWLkeLp0qi/aUNnzqyKLnmOBPXzm3YtHW6F9r3pr01kgBEc+pZ7sr2+O//2TlYf3rb099gzmnpXZveHRrRcT5V9++LlRxqe5qjs1bA911h08dvyuVag4UnuC59Hzl6dwZAMBzx1ro7ioUChjdIUWulZBXKE/bq2ZeonxlMRERcXkvCfgCd8kQT13hH1qOsL1refYWmeJPEggYMqrkChr0WZink0AYMCXM49DuwywREceGJ+DzBXzGynPi8lhJ+5uDMYIOIpEDQxa8V7uIRF1EQrtGa9Ip9+WzwkGSISL3XiMGiUidd7CIhEHSORN7sVkpuWoj6bQXdERk595ewK3rrZOQfzO9scqNC2bH/arzChzmWf8tvHZvXMi4lee6RK2e0buJmUaGz2/2HGSVKmnq6EgFI1kUG+ZKpJZFLykeFDM3wPFWk0tXWOIyf/0nZkZiETFE7KWm1zxosxNmxyTkMj4jAkT1AfOOIVgDEZcKN86YL/uLw1AVe68VFKrkmDlxsr/bBw3vLbw71AIQEQIrteIw14zs9YZHXPy70n9/PnKk7NLv+/btPOfq78UQERmryy9c0ZQW7lJVt3vFwaA69Ru5+rtbkHPHN6yL9vyvhoiIYzNgwnvjWx2O/vGc4e6xDDXsNYs2rVr8rDYAwKNhq1jiWlINUQ25+H8S3t/httPWlgyxRMKwdVvneJfLJgUGLszW0SWW5fLqMh5jaUWGK4/noYJqI0scYoiIGMaCqIZIp5g9euyCfQaBSCTkNWx71/8ZjCzLCYjff/x4kuTSZqn0ezURaTOkoyclUeDqrRvGiaweub4q5bqPP4wpcI/asjV2iJCIcjeuzK4h9dbZkXFytVGXu1aadJoc7fikv+T+ecbhuO6snhi+oKm5TrZKEPbN4eO/rfItXhmxIF3X2BCMnYAhPdsz/vDuKPcqHdnxbZoqj9Wxveb8euzwr5/ayL6YvP7kI98vPKdafGDVXiq3t293298IFkyrGsPV2jnXGr2BmNsjpqG6hjjEvXYu8yj17ePStW97Xv6pI9VExPQcEzyz7f+i4w//3tiX/ly3f7V7sby0DHtIAQA8MD7fhtXWPdEu8PIS6qtfHhoW9n5Y2Pthkh5NPInFCHuPjkpYF8pNT8unzl7icvkuFUtEZYrdShfPO79+1hUdVWmb8xSDDZ9POq1ayxqJiO/VxZ3OHcsqZkl7KFdFgq6dhX//UfQPufiMCBKRtpyopm5+keFyqUZ/QUtkZFlj3ePwr4fFZRer1We1emLZGqKzSTPmpGs9x306VFCuVCqPq5vYAoDVlanV6gs6IxF7Qa1W62of7WeVSfOj43beXIHK5i6TrlTyh34a3p0tVh5VFmlZof8nUeNH9Orm5eUh4BM5uHsJ21D7Xr0FVw6lZRapD6Rlq/m9fLxqr1dnroyeuym3rgg2+4vXu/lFrDuqVp9TX6giYqsbHYLEvXtZq7PTs9XFirRDOkGvXiJOY+Wd3RTS/fXAOelFanXRuUsssSyesoImtPTAalCdyqxwHR/q4Vz7Z4nD8LhlJ/5s84aPK4+InF4b0O7SicIrjV1ac2LP/8pf95/ZhzJ/PWsg4r7WP6pHxdIvD56obqQ1z63zzI+7MQfyd1U+wdsBAHhe8ftJ+hfHBQYFB364TtUhfOFI/dJhA4MlwYFvR8oafaKHzY4JCgwJCwmeuIPeCfLi8IdK5762Z8LAtwMHvrfCMHbRuA63t9fKY8ZNWHmoGaV0GjHxnfanFvbr1jV43Rly/2hhlE/5+ndf7+Y3W9lhXGykL9PeL8ibr4of+PrE3F5jA/gnV0auUhGRMCAswCk/5s1Onbr2izlAxPEMnz1O9HdSxNsDA6ensb2mzBwpZE/m5lcRe3RdZFhISFhIyMfr8xvNcMbcuGEDBw6OSDpL7N6YgQMHzkjXERGdTFu/PZfc3OubFR/KU5NRnT53bEhYSEhYyOydWmH/sHEfjRv30bhx73rxOfxXh4T5ConpP2VuqMO+LwIHTpDR0Lkzh9ameW3u9k1pZXz3umzP+H46V/LKqZVhAwdKYvZZBURNC+I3NgTZDZ05ayjtiBj4tnSfQ1hspG/d1PYd5blKZn3uSxnSwIGBEd+Xe4bOCe/WjM8fWqQW9GrWuGXx239VkY3HHce5L3ed+Wn/AfY15foaxtbynOz7iSdcY6UDenKqydqiWJ4W9eNZvVv/pJnWX0/edeCaRc/wsVHXd4V9d85A1u/8++OZnH2S2GMaoo6jxiYNs624UkNEZCz/cem2TZXdV67y71rNGjgW3OrKI/tyVqQUaszsiSur8j0/bN7wyitYWgstBV7Nav6afDWrkdX9o6OXBHULQI06rZZlBAJ+UwsfjaxOqyO+gN/gazRdmZbsBI2u/2Qv6siu+StDb1el09Xctq70Vm8sS0yD41UsMQxzq2ZWp9WxjKDJ7+BrHYrpNy5X8mPGlC73alX0dfDoPEnKprCHfKNjlU5H/FsfV5VC6rfUZnXGnF63fSrsRa2O+AK7+31URp1Ox/AbNGu8vNrfxzt+U7RJY/3ieHGHVwdiuwAgQmC9iSewd7YmfcUljb72K3sLeycbRl+haXR29QFYcHkc5hqrb2za1RwgsEJLg8BqLcWs5wAAIABJREFU/poMrC3ZoZh+H8qok8jd0XfquomejWd0rWziWNXolDn9H1PIOxoXvNZl9UPH3zs1rzxjUdL0mLTz6uLj2l7LEVihDgJrS4fACi0NAqv5Q2BthK4oV6lmichC6OXj/lzv/qQrOpqvvkJEjKOot+ipvikCzBeeWAcAADB7fPfe/d3v3+x5wHfv4dtCbhWar6U/dAUAAAAAZg6BFQAAAADMGgIrAAAAAJi1FrSGtebaNbpaRtfNbFspU7vG4pkSAAAAMGstKLBaWHD6dGsvFotNXYh5+fHHc6YuAQDgTpevPOqeggDwPGlBgbVVq1a9e/cOCwszdSHmJTMz09QlAADcqY21talLAAAz0rLWsL7wwgumLgEAAAAAHkzLCqwAAAAA8MxBYH0ySn9JyTPly1hVaSkqc30ZLAAAAMADafGBtVq18YPubXmt20/7Zc/iN30mpJSpk8PEPtG5RERUU11d8xCdlmxctIfcLOt+KleVVDY4WVNddlJV1liarDy5J2Vryp7TlY2cI6ouy//lp5RffitpNIjecbad3bHlq1UPUToAQItjfIpXAcBDaemBtVqxXPqT5fiUnIwon7ZuXt1FbS2rz5eUVlYT0cmFPnY+y08/cJ9l3y/PD5AOdyCi6pJdC0f08YneW3eqcu/CEW+HxW7PKbk7lFYfTErOv1hdsvH9txcq7+61MmfrDtXli/lfjhixruS+Zy37Rodqlz9E8QAAzz31IVl2cf0Px+MC+0qz2ftfxRZnpx/S1v1gzI72Gxh3/OFr0J2UxUSODfkwMm5nUTMGfwSsOvvb6MgPQ0LGRcZsV+mIiEi9My56plQ6UyqdGR23s4iI6HRS/RFp9CqF9p5dAjx9LT6wVl6srin5ZVl09FZVdfn5Eu3F287WlGyc+GbgtJQyKtuz7KM3+3T3GRn9i5qocs/yDwJ9+nT3Cd145zRmWUrsQZ/o95yIiC7n71G2GxEkqjtVmSKNOf/Z9h1rFozv40RElfm/qaqJqDz/4Olqsuzz2aLp48OnT/Knkr+q7zxLtgM+j50ePn56ePeLJWVEVJZ7sKSa/p+9e49r4kr/B/50SZlRIQYlbdC0ElBrqK6Atkq+9UIqVkF0BWqVVItgvSFYJWIr0a4SXUXwAlJFDVrdoKuAW1G0oFzbAFUJijW2gsE2Sn4GSwxUZ9jQ/v4AFbmJVg2B5/36/uHMnDnzzPT7cj+eOTkDRm3Bd+qWRwFI4Sqfyi3x6mcZIUYIoS6MLvl3VLLqQUocvjDh8JpxxJNP0+Xt3HSyvHHDYlz4v/cvHP6sJehORHwqSb4CbKokcVVAZJ6hZRPD9Vz5xpDIb54uOrY8S3cyckVcro7J7q3Nk6+ZH5VHA0B5UXJyZkm5RqPRaH6trgMAw095ad/klVRoNBqNRnv3xWZohJ5eN1rWqk2kk3CWyHUQp/pIVlaZ8PFjNi6e/r7O/FeOhomitEGxkh4Hg4NWuarWqFOOFtutifF1duE9foLyK7nd/BROw4aVW9AXbso1O04CAACVnXJSp70/3yveSiiOCnOzheoMsShdaFei4G2QuwGANl2yWJqq95KvIwGo5kdBnRS6JDoTgtPcAAB08iWBWUJSUey+w+29FkcBgOUpdhFJM4N2TCYBIYTMHl1+ete2QznXfwNrO0fv0GgRX5UYnvaGJNzDBuDCzpAil+jFo0GxfcGWDB1tyR27ePVyD66FJnPjJvl5jY4G9jC/zz4XOTMBgC7atSAghXCctnq1QJX01W3fDX66+NBdJQ05jXCavTncXZccFiFX1QCb77d8jWg4EwAMeZsCAnsT/LmbV3AzZEnk/NVTuGC4KI/anqzUEQ4eQSuDPbgWUH5EEvXNjxpDnbXdaL8V4X6DCM3JqG2asWsWjGYCAIDuTHJeNds7JkE6NDNkvDgtNS987BRm413qSs4kJx9JSSvSQH9n78EGAPaDJ2AoORS1LamwXE8Tffvzfb+Mm/1gQKTts9g+m1PfJbhcAhSSMYHJmhsGAGZNDQ18/zh5IPdh19V3aWLUZ/vipvR84f8hEXoG3X2EFQCA5Aln+/u/x2vlEIPl6h3kP5l/vaBAS11J2iCRK/V6laoSABg2PHd//8n8ZmHQxV+oTspqdRaqXqt3CtwrP3Iy/p2ssJ1KAJZwcRB5VKoYEhzkTAIAcDylifJo5yzJHnUrR4HnHyWXr7KTrUnSAnAmh3nW7pBWCMQf8loeBQCglLJzTiJ3TKsIoa6AzotcsPmKy4Lo/Ylx3pBZeIMGuH39wpXbNAAArb1SpKoCAGKo/+bE1NTElY7fr9uWRUN9lTLvutOK3YcSo6fQ+8RfNc64cpqxWvrl6oXuXLj3a/H56zogXGatln4pXTmNq7lu6TCICcCduGL34WOp0V70rkh5w8gqc4Ro9ZfS1QGjmfU1188X/3ofQHciYmkyMStu/57PXVSbQuJVAKC5kHF37IZDB/Z/KdBslySWA4CRppsMWpb/XE5bOPIHEdDLcWB/oMvLdQ9mxGoOhswM2572m3P412d+OHtY6uP48CzdyYj5a5PLmaO9fZyJayVFl28/PNTOWQBMLpeA+nL5/gwd4Tz2/9gAutu3aVDtCwkURx0qabh0ja4K6KJtcxeItyarWhnwRcjEMLC2wCB7GKsrf9UDSZJGvfqaVq+nOHYc0so1eGfKyfzi4qNh/HZOHxIcxk6Kzm7ll1Esrl31r2oAYHFYUEMBqOPDUwRHVdFW8ZKjDyIuiyf04FWq1K0fJVl8b6HdtSuVRlBuFitnKYqXVUrXZFEtjgKAMi76fmCYG+ZVhFCXUHg613bW6kCBI5vN7t2z7Vf4DLo8O1l+8gr0pquqGpKXpXUfNpPt6DfJ2fBzmQ4AgLDuw+UO4LKZj84j2Fwus1yeoBy3ab0fFwAIuqoo7Yg8t5pgVlU1BspeXMcBXC770dUNirQiftBSDy6b4xy4cGJNZoaqHgAIkmnLtGHzp3k4qsuv1wN32uq40NEPr9aQXgkGgIWlpQVAPdQ9OMQe7ScScOHaiaiV4oityYU3HuZcuuh0noEYt3SHNHzxBw6Pvx9t+6yGU8vlSwIivyc81kUHOgAA0/Wj8KWzJ3J/L0pc+0lArAoArP9v7tJQv9F9buftlcxclKjp4H8VhF4WnBLQAlfgOQYk66KV50Qij2ixj51sSIQiP0aS6SMZ4yQG0i2qWDG9vQ74IcE95ksL3KTNwiLpESzaE+w13wlUELTPDQCCE+XAAHBOEQJAmWzeqiywotQqEO0UAJCPHTUqY+ZIi61ISqUmF8tdGABfpOwFANiR4t3a0asx8bqg6PcwriKEuoi6ezTZq1lOJQAo+rFsVp44NyDNca5oFLPKCNBsEn8vkoB2JmfqTqyNLJsW9/UoJgAYsiWzNuq8Azz6tzu9lb5HA0E0NiEI67rHy+lJWrd2xdc4bADdzSoApq7qDoCDra3Fg1sa6rc60S9cU5h2VC5PjYyEt08uaxgkoSmaBsKazWxxX+2dBQC6EysDIvPAI/JQ3LSGKQBMZ59AZwCo947ynJmYm1O+jO8oEC0SAADtsWrMgm/yCqsD/Wzau3GEXrLuHlhZs1Puz278szC28j4AAIRlVAZTFEmSLscrg6r0lBWLRYJbRmVYlVYPLI4tCcBX1IS12SnpErZYISug3NxJAHBZp5A/3H88K0irB1sOq+HBN338A4P2Jvpq9cDishpjZtOjDJewA3KtVk/aclgtU2iLo8oC0n+VkPUsjwQhhDojvrNDeV6uboYf24JuTKkW7P62N5UqAwx4MHZJX1eqHUU7Av3YUFiy60rrPRHWPUFXqQPgNt2r+2bdNp1/Qgy/IX3qSn+EsV8uneUMOvrEwesAwGT2pnUaA0CTYVlgu7j0TzieqfOYwqZVGTlVQxc7WICy5TW1qpKa/s6DGk/ljxvHTUhM2yV34udl6AjnT0Y/nKZKXy/M+7kGAHrzpy7+fNxda+sHR5hv8x0hL29f7Im6AYWaxzNr22cBFO3adFrHHBE4GgqTU4HsP3oKv+rEaZ31AGu4mVmgBULwBht0ham5NRyuNV2edsEAfbhsnMmKOpnuHljbQpKNqZC0fZQPSVsOp4Onjw4ObuMIq50+rFgcq7Y7ZZAcbtvnPn7UZW4b10cIIfPEnfXl3KIQn4lyW0bNTR2MmgIAjn7BE+ev8vLaZQv6mzBqKhCj/Hy+knw4PXkAUXPdwA1pvSsXH7+az2Z5nWBajl59bFbDvvLkvZm6KtWCiUkA4Dg7IWGyn+PCkAmXHLn1miswjgRgjvEbmxDh5S1n9pkYvefBz1uHLPzy41Cxz4Sd1rTBcuKaXR7M1q5YIgv55Mrck3JRY0YevlC6oDg0ITLkJLDHhn85+9GUU03mppCtj5afIdylE8f6NfTJD1i96HzozgRxoQUB9Y/l5nbO0l0v1wHAhcTICwAAhEe0x7zynet3NiymxRzkt37FFCade2KrJLlhaQEbZ9G6pR1ZNgGhl+mVP//809Q1vCRbt259/fXX/f39TV1I5+Lj47Nt27Y333zT1IUg9JIYDB39Rcmrr7567969F1oMahWL1eb7IbpaZyCYRavGfOuRH+dFAADUG3Q6mmCymQ8HBe8ZdAaasGEz20ldtEFnAGa7TRoa0Qwm2+ZBq3ra8JsB+rCZFi0aVgOb02pYbTjRYKCZzGbDlgadhmY2nRHbEXS1gYa8iPHiokkJP2wa91TnNunFoNPqaILNfVQzbdBodPVMLpdNWLR3KkImgSOsCCGEzAZhw2Y3mxVqwWQ3e/nUk/nkN9oEk81+UpuWjSzaOI1oUUMzFi3SKgAw2dxWmj6pJhsm8ddXSSWY7AHM5ru4jm0nboRMDFcJeE4oVWraEz+Fqs06VtDqh1URQgh1GDEl5ofG4dXuiZgSV3z1hw3POryKkBnq7oGVOrXEddiS9Iez143KGE+BaF/Lb58+gSouptjmwUqulFZV8VgupapUyjI9AMeuTCZtbcUrhBBCTwHfWVvgQ0DdS7cPrFWVKm2TBMlg8ZxdnbhPuZjH1ZgYnb/kPRIAqKupkqkCr1hF4yGjKmmxl+9yWXppJTSseHUwBkdZEUIIIYQ6rrsHVgCA2izJRIHAc178eQqAqtaqK/UUlCXNE7q6ugi8FstUFIAmXeIjcHURvP9wlf6HjOr4qEqfVUISAECvyFS7fih8OJdJtXmJfNiOkweiI6bzAQBIl7D5INuqxMiKEEIIIdRBGFgBGBzh3CVC40nxohiVUa8uyCquuA8cF/8QafQqQfVBsTSTUh2UxpyzC4qShk11arYKKpUZo3hH7Nn4k1aWMCTMx/5hE1XqcXV1pth3qq/4UOM0A3J0sOe1+NSql3V3CCGEEEJmDlcJACCdhLP8BZASs7hYRQkb9ulLUuOjZErt/WojZVNF8Ty8BLFS8WJ98LodwpGcppmVdBc5LZYp50a4tFzP31hZSQklSXs9jVlL3CVJk+X+LIAyeTrLJ9r25dwbQgiZpdrffzd1CQihTgQDKwDcp/TqrGwlcHzsGp8HnbVbmm6zQ53vFM0XqgBI+6AUlb9ija/v8hjhh3s9mz420i0s8GRQnFK+wqV5xww7nk1lpR6AY2dHUveNAKCWRak8NwTjN6gQQqgdVr16mboEhFAn0t2nBJAkSeqTfNkOolOc4C1it8YkSgi8Rbz8JTy2MF5Dskgo2CK0Yzt47VTZeQhdW4R88j1xkC4+5mrL7vmiEF7SHNG8GUuyxgT72oL24Hqlh8QHh1cRQgghhDoMv3QFQOn1eiBtWWSzJGqk9HqKZDXup6q0egaLw2r54h8AAPRZ8cdsgue2GGQFgFq9liI5tiSAOmmrUrjMp4Pfd3058EtXqLvBL111fu186Qoh1D11rykBv/76ayt7SRar1QjJIFm2j+IpactpL2iyhMFz2zhkxeJYNfyJ57+M10Yjk/njjz+6zz9aEEIIIWSOutGUAB6PV1FRYeoqOpfi4uLq6ur+/fubuhCEEHqS67nybM3z6ar++XSDEHppulFgnTJlilqtzs3NNXUhnUhCQoKXlxeD0b0G2hFC5shwPmnX6fJnPVmVebrkwVwQQ/KSd2fufdauAOCqXLJSLF4pFq8US2IzdY176fJvtkvWJ5djGkboBehGgZXBYCxevFgul585c8bUtXQKUqm0pqbG29vb1IUghNAL9kvGtl25NxujJNN7XWqcv+Mzd2b4KS/tm7ySCo1Go9Fo79IA9I3cnZ/5+KzcmXyi5DkNAiOEHtO9htamTp1qNBo3b9589OjR/v37c9qdldpVlZWV1dTU/PHHHwRBbN68GecDIITMDV2eGhWVVKix4I7zD186zZEAAF1hYvSutFIdDPDdsCMQ9i1Y9Y2mzoI9es7qcB9HAgBupa39VEkSoz7bsYj4ZluG45ql7kzQFu7cvCtDdbf3MO+F4sDRbKAvJkbEZ5RX1gDTwW32yvBJXLiWLNlHi74U8YnGyxuq79LEqM/2xU3p2bhHlRq164KlAwdUtGmeCEJdXjcaYW3g4+Pz7bffTpo0qU+fPg93/vLLLwkJCf/973//+OMPE9b2IpSVlcXFxZ06darpzgEDBsyZMyc2NhbTKkLI7BjyIhck3PX+1/79az1uf7UgSkEDaBLFK/IGLIw7fGh3hLeDBThMk36dnHpo46jyzVFpDe/sbcctkkilEX58C7h9rUippQHKE8NW5A5YGPd13ML+GSvEcg0AXV6UZ+EXd+BQXOjA4rWRyTqAepqiH8uhNboqoIu2zV0g3pqsMgAA8Bccys/5+lGkRQg9b91rhPWh6dOnP/zzhg0bYmNjY2NjAwICTFfRC7R27drQ0NCvvvoqNjZ27Nixpi4HIYT+EuXpXNsPD00ZxAbw+8wnedZp5eo3r2RcH7do92guAcAEAKDrbxZ9k/HrvTqi5+2begAAsGS+NoDLtWjS0Y3cjF/GLUoczSWAu2DRqAk7c7UibwAgenNtmDBqyrj+aapfAEaIomMeK8D6/+Yu7XXzZmlBxl5J5nnDSXkgtyeTCR1dLg0h9Ay63QhrUzk5OR988EFBQcHFixe7aloFgB49euzZsycsLGzatGmRkZGmLgchhP4Kuu4eTfZqHMskepL07zVwn6YtCeLhCIw2OXRmxPEq4jUmQD1APQCjtZUB6pqcxSB6W9CGx1bdZTJ7ARhbqYArEC1aHC7deXjzNCZdkldY/RzvDiHUuu4bWPfs2fP5559PmDAhLS2Nx+t0y6M+d7Nnz7548eLZs2cnTJigUqlMXQ5CCD0lBtB0HQ0Ef4RTeWaGph6gXpORXe40yhnedHq7vjDz/IMxzhuqctspny32m+Izxbnhy4I9rK1rdLebpc83XVyhMLPIAACGosxCwtVtQGvXpTUlFzRN5gToClOTMxWFhdnJaRcM0IfL7tnaWQih56o7Tgmorq4OCQkpKyuLjY199913TV3Oy/Pmm2/m5ORs3br173//e1xc3MKFC01dEUIIdRTTeazT1nURJ0dFz1i99ELorPeTmGCAEUtjfdlgMW7puqKQVV4TrG2t6xyD9n7sS4QEeOY62tDlv4C/BcCAiX78WSs8p/cnuH6747gNPRKjl0aOm7/Ka4I1QdfY+m3Y7WzRynt9Om/b/JX0mpy4KcyG7SsntkqSG+bF2jiL1i0dhzNXEXrxutGnWRusWrXq4MGD/v7+mzZtMnUtJlNcXBwaGmpjY5OYmMhms01dDkIvFX6atfNr89Os9TRtJAgCAICu1hmAybZpmhZpg84AvdjMng8aGAlmHybxYN4qrdPRD4427VOnMzDZbMIC2kAbDMBkPn4hjUZXz+Ry2zkLIfQ8da/AGhsbu27dutDQ0DVr1pi6FtP76KOPlErl3r178ZdYqFvBwNr5tRlYEULdVTeaErBs2bL8/PzvvvtuyJAhpq6lU/jPf/6TlJQ0adKkTZs2hYSEmLochBBCCKHWdYsR1tra2tmzZzMYjIMHD5IkaepyOpcff/xx3rx5Q4cO3bNnj6lrQehlwBHWzg9HWBFCzXT9VQIuX748ZsyYgQMHHj16FNNqS2+//XZBQYGFhcW777576dIlU5eDEEIIIdRcFw+saWlp77333qeffrp582ZT19Kp7dq1KyAgICQk5MCBA6auBSGEEELoMV15DmtCQsLnn3+elJTk6elp6lrMwOLFi/v27btmzZqffvpp/fr1pi4HIYQQQqhRl53DGhMTs2vXruTk5OHDh5u6FnNSXV398ccf29jY/Pvf/zZ1LQi9EDiHtfN79dVXe/XqZeoqEEKdSNcMrFu2bJHJZGlpaQ4ODqauxSzNnz+/oqLi6NGjvXv3NnUtCD1nGFgRQsjsdME5rDKZLD4+/vjx45hWn9nu3btdXV0nTZp0+/ZtU9eCEEIIoe6uqwXWb7/99rPPPjty5Iijo6OpazFvGzdu/OCDD7y9ve/cuWPqWhBCCCHUrXWpwPrrr78GBAQcPHhwxIgRpq6lK/jnP/85ceJEHx+furo6U9eCEEIIoe6rS81h9fLycnNzk0gkpi6kS1m6dKlGo0lJSXluPVIUBU9aEpeiKAZJtr2IBVWrB5LVpAFF1QJp1bTTlnsQAsA5rAghZIa6zgjrpk2bXnnlFUyrz9327dtfeeWVsLCwDrbXZ6/3chNnGVs/mB7+vqubUOji4Lo4VftovzZ9savD/PSGNlmr3ndyEwpc3l//nb6VPozKmKkC4VRPV77rkmNaAFAfmicYJhC6OzmMkWQ1nHFVJnrHVeju6jonSd16JQCUKilQ4LvnURXU1VTJnPcFwnlJFR28V4QQQgi9DF0ksF6+fFkikWzdutXUhXRNBw4cyMjI2L9//5MaUllr3vfcqqjUUm00YLnO36tQKhTKvYLseHlZ415VXHC8xgYoCgCgJF6cL0w5p1Dsczv5RbyqZR8Ml7BUhSJLceWgUBEnVwPwxkTIzxUrzhVHv5EUn6YH0Keui4FwheKcQkxJ16e1VkyZTOQeJL+m1VP3H+7xnS23C01RZO31t3/SjSKEEELoJeoigTUiImLjxo2DBg0ydSFdU8+ePXfv3r148eKrV6+225AUhJ9UHAniP3wPX5suniHJ0oM+U+y7KosC4AzkkQBQq69mcHi2AABQsl5c4LljWeNJ2gIF5SbkM4B0FrpWKRRVoI4TzdunpiiVbI5IVgEAAAyg9OqsbBU50sUOALg8HgkAVKWetOOSYCzOOscTurMAWEIPniJf2bIMsBfJshXRnpyHpWfFxsAMsQDU6tZGdRFCCCFkQl3hS1cpKSkajabj76zRM3Bzc1u7dq1YLD5x4kQ7zUgrEpq+grfyFM/NEk0W3O8hkB4RNuZYShU/X0otT/FhAYA6fl2xz5YUnnpJw8FqfTXLhgUAADYsK0qvB96nEpc5IuFusAuUye0BAMColC0Wy85Vu27gPcjG+oJIkYwrTXcnwaivrmXZWAEAsKzIan11K2U0nyCrKjhXXamVyyspZaZauO9kxEic/IoQQgh1Fl1hhHXjxo04dfUlWLFiRU1NjUwme6qzSAYAUMAgG/9xZFTL5ojSx8jkc3kAANkxMWpQ7ZZI9hVXlqbEf6e3YfXQVzcMclJULcmyauyEMgL58JdaDJfgpLPFBZL7ayRJVQBAKbeKglX+KV/5cAAAWDZWVDUFAKCvpWxYNq2U0YyRum90EkXtiN6yVx5Kyg8onvLZIIQQQugFMvvAmpSUZGVlNX36dFMX0i1ERkZGRkb+8ccfHT2hNl26k4xIK05dRsVvyKIAVHHzZNxo+TKXxug5LFgeJ/b19BK+Y2dj5yoYSHLecSPPK9RGgKtZCiuBqy2o90izPGSKfJnglCS+oknnJNkD9PeNQBVKg9MEssQgXkMYZbgKXFQF+RSAXpGvdnHjtyyjOQbPiVepKqUA4H4tRbJsnv0ZIYQQQuh5M/tlrdzc3FauXPmPf/zD1IV0F7NmzRo5cmR7EzCMqaJhWUGlO4StjGWq44WuUh2fZwUA4BqevmM6q/FI9hKnfcIrB3wAtOmLfcSlNix9tSAqPXoyq3kfGpnowyS9HUldU5P+MvkXbuo1rsKDwOeQANDDQ3p2nZAqifGdLQc7qLQKkh8J5rcx80X1L8ESK/nZEB4AUCUxvnNTSH4P9TWe5NheH+7TPhhkNnBZK4QQMjvmHVjPnj0bFhZWUlJi6kK6kcLCwo8//risrOzJTf8CSqvVW3E4Vm0d1mu11WDLa7NBQ5sqYHFZTzEX1ajXaigWl9PO+q+oC8DAihBCZse8pwR8/fXXgYGBpq6iexk9enS/fv2OHz/+Qq9CctpOqwBAsjj27abVhjZPlVYBgMHi2GNaRQghhDodMx5hrampsbGxuX37dp8+fUxdS/eyY8eO4uLixMREUxeC0LPAEVaEEDI7ZjzCmpqaOm3aNEyrL9+kSZPOnDlj6ioQQggh1F2YcWBNS0ubNm2aqavojgYOHGg0Gn/55RdTF4IQQgihbsGMA+vp06cnT55s6iq6KaFQeO3aNVNXgRBCCKFuwVwDa25u7tChQ9lstqkL6aYsLS3VarWpq0AIIYRQt2CugfX7778fM2aMqavovng8XkVFhamrQAghhFC3YK5L+BQVFc2ePdvUVXRfPB7v1KlTpq4CoU6v7pYiJfn0hZ8ray3t3hZ86O83rC8A1J3/d/TxXwd/HO432ALgVk5sooJ4b4F3vXz/d1VNv8RGvjEpxMcybffxS7dr68DS6jXeyIl+U537giYjdv95YvyCBe/1BagrPRR9tHzwx2FvnY85erW+6eWZIz8WT33t5+OJiRmqO7RFX97wSQFB4/tZvNyHgBBCf5m5jrAqlcoRI0aYuorui8fj4ZQAhJ6gviJp2SfLtycVVNKEsSJHHv3p3DUZtwGg5tYPxzMyLt1qaHb3Us6p49k/19TdrrhUevnyhYKMUxnSLMG1AAAgAElEQVQ5BZcvl16+pK6qq/05+5uMHNWtGv2ty6f2bVg4Z83ZO1B16dsTx7//uQYAAGj1hdMZGedv1d+5pbp8ufTyhbMZGadyLpRevnzlp8qaO8fXBm84dKHKsm9fi1vf/6Cuw7SKEDJDZjnC+ttvvxkMBh6PZ+pCuqOcnJzx48fX19dXV1ebuhaEOrU7p2J3Xaixn7Fjz/KR1gC3vln+yb8ytv976sTlrf/dZT9ry+FZACXRXguT+87YciDQHgBAkwQAfd8T7/liJFxLmBmwLyertO7D1s7vMV4sHw9w53io14bS8Sv+s268BUC9YsPlGuD6RUSLh1m+qDtFCKEXzSxHWH/++efBgwebuopuavz48WvXrn3ttdcAoKKiYu7cuaauCKFO6qcLpXUWgz+YNtIaAAD6TfZzY8EdZWnFs3RWV1dXU/GTuqYeiB6WT5E8Ld4aOcIaNMnBHy3Z8HVORc2zXBshhEzOLAOrWq12cHAwdRXdV05OTkhICJPJdHd3HzdunKnLQahzqqu9TwMQRI8HOyysmZYAdTV19e2d1qo73ywfO9ZjpjTnDmvk3I9GPs2pfSd++fWWRRPfgkvHd34+U9QwJwEhhMyMWU4J+PXXX9944w1TV9F97du3r2E+hr29fUBAgKnLQahzsrQf0A9yKi5drPHvZw0AUHH+0h2wdLO3s7CssLSE+lpDDQALoKaWrgcror1hU8uhUxe+P9jK7q2Ro4b16wGgsiQsoLamYby0rq62Doi2h10t+wk+WSf4pKY0LvhTecbxnKUTZ/R9vreKEEIvmlkGVq1Wy+VyTV1F92Vvb//uu+/+8MMPX375palrQajzGjzNb2RqdM6W4A23/Yb2qMg5kvwz9Jvq424N1oOd34LvFPs37wM34nJK9h3o5z68XztdWTtO9J/VZGDVfuRQVlLGqdjoN7wHV+UklYLliJFvtf5rqpqc3Qlq27/zeoP6Zi2ApWVPnMqKEDI/ZhlY79y5M3z4cFNX0YkUFRV9//33v/3225tvvvlyruju7n7r1q26urrdu3e/nCuqVCpra+uZM2c6OTm9nCsi9Ff18/vX5rr1m/cd37nhOAD0HTb1iwixwBoA7GdEiFWfx55N2HAWwLKfYP7aBc5P03MPwdII/8r1SclRpQBgPWiqJHxq66OmdRU/FXy7T5UMAACW/caHLpxo/RdvCyGEXr5X/vzzT1PX8NSmTZsWGBg4bdo0UxdiehqNZuXKlTdu3Lh9+7bRaKQo6snnmKeePXv+7W9/69+/v4uLy5YtW0xdDjJjBoOhgy1fffXVe/fuPYdL1ty5dd/S9jXr5mObNXdu1YC1bd8WBzqoruZWVQ1h26/vE86v09+pqqWJ3v36YlhFCJknsxxhvXv3LovFMnUVpnf58uUvvvji0qVLv/zyi6lreUnKy8vv3r27fPlyzKzInFj37ddqUmxrf0dZWvfrUAeWrL798K9MhJA5M8tVAgwGA5PJNHUVpnfw4MGrV692n7QKAH/88YdSqVQoFEeOHDF1LQghhBB6Scw1sFpbd/c3W3fv3s3NzdVoNKYuxARu3bp1+vRpU1eBEEIIoZfELANrbW0tBtabN2++8sorf3XSKoPnGRIs5Dynml4WrVZbXl5u6ioQQggh9JKYa2C1srIydRWdFG929N4vhOSjHaTLoh17F7m03prB91keZHaBFSGEEELdilkG1t9//71Xr16mrqJzIvnuoqA10WEPl8ixD5JuCBZN5pPtnYUQQggh1HmZX2C9d+9ez549TV1F52XHYamukkHhPiwAAFK4PJivUelt7OwAeHP3pqx2IwGAwQ8+IA9+qnUfEUIIIYRMxPwC6/3793v06PHkdt2UHcdGr/wqRukmDh4CwBEFe1fGb8iqZtlxGEByXQV8OwAABov/jhvfBkddEUIIIWQGzC+wUhRFkpi02sDg2Fjdry6TRx9liUKFLouDXbJjZKV6yoplY5ZL7iKEEEIImeGHAyiKIgjC1FV0VgwblpVeW0MVfBVfmb9DbqSSpqfr9fxqK37DquEkA7M+QgghhMyM+Y2w1tXVYWBtG2lDUvdrASrk8ZmkjTJ+RwlArZ4iWTYkVFaoYZirS/PIigPWCCGEEOrUzDKwWlo+44e3uz6GDWkFVC0A6FMDHex8ZFoAMN6nqB4sW9Afi46uFKWrr1xRpoq4ev19Cqji9HwyaF+0EDMrQgghhDor85sS8L///e/VV181dRWdVa3My1rWYmeSLzsJAAAK1gvt4rk8O/J+ZYVWbwQAbeoch3RbFvy17w8ghBBCCL045hdYjUYjg2F+ZXceeo1a//geqkrfelOEEEIIoU7A/KYE1NfXW1hYmLoKhBBCCCH0kphfYP3jjz/+9rdnLZuiqCe9+6Zqn9CC0jdrQDU/xfjkqyCEEEIIoQ7qUoFVn73ey02cZWz9YHr4+65uQqGLg+viVO2j/dr0xa4O89MBAGqVMVNdBR4CpzHidG1rfVTIRG4CLx+Bk4tIdhUAQH1onmCYQOju5DBGkqUHANAeWyJwEQjfcfWNU7aZWilVUqDAd8+ja6hPxczzFAhmxKtaLx4hhBBCqPsyv8D6559/vvLKKy12U1lr3vfcqqjUtpUSWa7z9yqUCoVyryA7Xl7WuFcVFxyvsQGKAgD1QYl8YLSioPjkDKUkqqCVPuyD5PmKs1nFJ+do4w8oAYA3JkJ+rlhxrjj6jaT4ND1QWdI16qBTxYqCaLtEibzV1FsmE7kHya9p9dT9hh3aY/N84yj/RIXiSDAfZ+cihBBCCD2uy+QjUhB+UsFIF7lkNe6oTRcHKjx3S13PiYOyPeUbhJyBPACAWn01gyO0BQCAkvXiAs8dy5Re+wCAUuarXGcJSACeu5A8olCBGxknWm8l3TGLks+Xwjp5kD0Ag9JrlOkF911n8QAAuDweAABVqSftuCSUFRSzhMFcABAIh81LOUcFuWc1K4O0F8myg9RbBUsa6jSqZFFXPNcE22jUelse62n+g9TX19vZ2T2Pp2dm/va3vxmNOBaNEEIIdRfmF1j/9re//fHHHy33k1YkNM0wVp7iuVmiyYL7PQTSIw+WGaVU8fOl1PIUHxYAqOPXFftsSeGpG6KjvrqWJK1IAAArVo9atd4Ibp9KXOaIhLvBLlAmtwcAgLJUSeiOrEpe2KCHK5fqCyJFMq403Z2E76r11nwbAACSZQWUnmqlDAZJNn3qeoWiTA9pcsqoSi/l7zgVLWR19FH07dt33rx5HW3dhfz+++/Hjx83dRUIIYQQekm6TmBtiWQAAAUMsvEujWrZHFH6GFnKXB4AQHZMjBp8dkskmuLK0ur47/7PzurBj6Vq9fetbFgMACOQDKCMQD78GtRA/x3p/lS2WLBcJswI5gGl3CoKVvmnHPDhAIAVSVL6+wAAlL72fkP8bV5GM7UUxfWJjpW6MCjBHCdZtlQ4vaOL+LPZ7Pnz53ewcVdy+/btzMxMU1eBEEIIoZfE/OawMhiMDr0Ork2X7iQj0opTl1HxG7IoAFXcPBk3Wr7swadJhwXL48S+nl7Cd+xs7FwFA3u7vMMrzi+gALQFCr2LGw9AvUea5SFT5MsEpyTxFY/6Jq1JqNFTAFShNDhNIEsM4jWE0SEC10pFlhaAUipKXQXvkC3LaI7D51Oq4ioAoKha0sYKPzmFEEIIIfQY8xth7WhgtfKMTvUEAJgcnTIZANRZacVqncTTDQDANTx9x3S+23sAAPA/O/Icz4VDwtwIT58goacdpeNIjgpJAF6IXA4AAMFJcgCg0pYIt6rtrCj1tfvCLal8AGV6ukoNwe4nAaCHh/TsOs+IcLnv5PdTyUrKRyblAkCzMloghWHhMl9vryy7ShUZJnf/q88HIYQQQqiLeeXPP/80dQ1P59KlS7Nnz7548eKL6Z7Sa/Qkh0O2leT1WrUe7LhtNwAAvVZrZHFsn2astFar1pM8boenrwJcuXJl48aNBw4ceIqrdBW3b98OCAhIT083dSHILBkMhg62fPXVV+/du/dCi0EIIdQR5jfCSpLki1yXn2RxOe0dZ3F4T0yVLE67XbTGisOzetpz2larVmpsXIZ0JP5S6ux0pZEv9OA3a60vU1P2PA4DAEB/OSvrGrh4CBuL1CrT8ytt3IRuXPLJm5S2IFNR/YbQ05n15E2EEAJ49dVXe/XqZeoqEEKdiPnNYX3BgbUrUG0VCSeL02s70HKz17xD6spTS3xXFTx6pkZtQZxI6LbkZBUAAPWd2Hd5emVZ0rwZMSojQFXqvA8lCq1yxyyRrOJJm6CWzfHZcblSscZzXpr+SZsIIQQAgGkVIdSM+Y2w9uzZE1/StYfKis8XRAcqZYe0np+2O9RrLJAd4QRnh/mQLio3WdYaN8+GWQyXs7KsfD1dZI3dJWa5fFEc7E7ZlQpk58MkKvkVz+i9IXwtw0t0VO1r295mkHdqEhUk/yKIc1kvWJOiH6Rvb9M7CEdZEUIIIdSS+Y2w9urV6/fffzd1FZ2X/phM5R4UNNefOiRTPdxbEj/Px9e34f9Ck9QNOym1Gng8EoDB49uo1Q+/y+XsHzFXYNf4b5lKtcaONwgASJ49VKoptVrNs+cBAGcgr1qtbn8TflVXc3kcALDn8X5Vqyva3cRPASCEEEKoNeY3wtqjR4+6ujqj0chgmF/xL5425ZCS46FKPwe8mhT5+QjpSAAAGBoUvduXMgIAkCSrcSCTQT78XRgFJNn6j8R6AINqOJEyAjAAXiUpIwVAgpF68iYDwEgBABiBYvQgGfr2Nl/UM0EIIYSQeTO/EVYAYDKZNTU1pq6iUyqTJ+kEArK6sgr4Y2zSDzxY+bUsVbpKIlklkaySiLekN46lknwnUllcBVCrLK7mu9oCtDI5mMMfUl2spAC0Vy7bOA0j+UN5aqWKAlCVXLEb5tT+JgxysStTqgAoZbGaz+cNaXeTAWBsDMet/AEhhBBC3ZVZDlL27t377t27NjY2pi6k01EdTScD5cENU1f1Ngr3pKxaoacVwBD/6N3+LZrzg8J5Ph/6pr9aSYbK3SBrybD1vFNnwwY+1ki4OFg228v34P1Ke0n6ECDtxaJ9wV7+NverhDGpHJLR3iZY+YaN9g2aeqWHTi/aKSW5VHubAAVrXMOsUxRf8Fv+4eU8QITQ06m/lbM7eteJ0lt1lm95LF0bNrGfBdxRJHy5/fit+0TfdwMivphqbwFwKyN6za7sO2Ddb/zCL0PHv2bqsv8KWpN7cFdyfpnOgv32pMVLZ/CZpq4Ioe7A/NZhBQAXF5d9+/Y5OzubuhBTem7rsNZqtUYOp2GWAEVBqzMDjHptFcnhPJpBoNdSJIdFdmgTqCotZdXkcDubRooCkmS09ocmcB1W9FfgOqzPU/2tnG9+tvcYb1+Xs2HeZgg7tmrE+Q0f7bLasDeUX3V85acZ7+7ZMYM4Lp6T5hwf/3G/W/8ODlZ9eHj9ROt2e2WxOu8vMHWpC7zWXnEY69y7PC/3OtNv9xnpWMLURSHU9ZnllAAbG5vffvvN1FWY3v/+97/n0IvVg7QK0HpaBQAGq0laBQCS1SSPPmkTSNvHD7ezyXiQTVv+ASH00t068qnH6pw6AIC6nNVey1PObJg6Z185AADcTl4ydYOivt94n/H21gB9Bw9+vc5AA9wqvWo50p1vCRb93Me/9dMPl2vq1T+X9xOMsbcES/vx7v1KzpfWP0sx9MVE8fyZ0729potCok5rAACgPDls5nRPr+mfiOUXDY+1+US8ff9OyfyZ0/8xMyQ2V9fQhTZ3Z1jAdO/pAavkKgMAGHJjxVGZjQfhXm7UJzNnbsylAUB7QvLRzJBD5zPXzpw5f2dJPQCUyz+bOXPVCfDZnHryzOG4uASJNxt0mhsd/fcPQuivMMvA2rdvXwys/fv312g0N27cMHUhJnDw4MG33nrL1FUg1PX1GyWwv6g4XwdQ/9N5Vb+Ro8YKht+5cOEOANRcUKiHjRxm2diyRrE/ufoDP4ElvMbrd7/0/PU6AKirB7hvqLPoZ9/vVumFWwBQV0/DPUPdswXW8qI8C7+4A4fiQgcWr41M1gEAd+KK3YePpUZ70bsi5eWP2uyP9qKTE0r4y+N2b/+Y+CZyVxEAlCeGRZY4r/xaHudXv0+cUAJAA03Dw2J6OjlYqEq+SSukwZCXlnax6rXBI/n9oeT7lIzLADdy0zJLajiObGByuQTUl8v3Z+gI57H/x/6rTxkh1K56g7ZCXWGWgdXW1raqqsrUVZhY7969J0yYsG3bNlMXYgKFhYWjR482dRUIdQMDPnB/7XxOSR1ozl/qMXKknaXg/RHqwu/vQE3BdxXDxrs1vNmvKUlYvln9wT9DR/YAsJ4YumJY6bqZPh/NXHawFFi21tBvaviCfmeX+3w0c96q5J+sba0snrUeojfXhskdNWVc/3LVLwBA0FVFaUfkudUEs6pKV/+wDdtx0gdO9Za9eWz2AI8PnA2aGwa4kZuhsuZCcdrJ3Ls9bXUlxRpgj1sRFz7pYeJkj5symqguyizS5SkK6f7jvJ2B6zHRGTS5WSpNdkYJ8KdM5gMA0OXyJQGR3xMe66IDHf7SA0YIPdEf//v93u81ZhlY2Wz27du3TV2F6fn6+t65c2fTpk2mLuTloShq0aJFb7311kcffWTqWhDqDvp94Nnv/ClFheJCncB9sAVYjpr0f+qc78sLsn4aOmm0NQCA5viXa78f9uWWufzG4dZ+40O3HExN/U+8n73lsHeHWQJYDpgqjj+c+p/DGyf3sx4+8q1nDqyNmMxeAEYwZEtmhSXfZDCZLSeR9iIsoQ5oACAsCYKup6GOpgkC6gGMAA7eS2e7tfyxFHuCn4eNLuPopjQFzfXwdrYAGDDR25koPy3bdqoE+BMnDgIA3YmVAZF54BF5KG4a9y/eCULoiV7t6+g0dJhZTg98/fXXr1y5YuoqTG/o0KEbNmxYv379ggULGAyGjY3Nm2++aeqiXhSVSkVRVG1trZ2dnVQqNXU5CHUXfT38hh5MXFMGH/xzMABAj5ETx0RHS/9dNyJ0nTUA3MnYsR9m7wh1fvxnVHW3FPs37L8zde3Evg921VScTViTQoi2jW//F1cdpyv9EcZ+uXSWM+joEwevP6H1m05vEyeIYX6iEY/ireFa4U3WaP7DMVamx8c+jidkJ3ItHBf9o+F3vVzvGWO3rzxx4gYxep2fIwAU7dp0WsccETgaCpNTgew/esoojK0IvXBmGVg5HE5WVpapq+gUuFzuzp07i4qKvv/++5c5r7eqqurw4cNLlix5aVcEADabHRIS4uTk9DIvilB3Zz3+48m75hV84O7YsG05cpq35X+/dV810hIA6n/67sKt8yWfeskAAKzfjzi8fFjG2k92lVrZC/w2xk4d3AMA4NaR5UsO3err5P7xti0TBzy30hwn+zkuDJlwyZFbr7kC457w8RFi3NJ1RSHhEyZY97eur3l7earUXSNfseDEuMMnlz1aOM95hq/z/ijVSH/fIY17mJOC/L7KTKS9505hA4DuerkOAC4kRl4AACA8oj1GcXGZAIReNLNc1qqoqCgkJOSHH34wdSHd19WrV6dPn65SqZ7cFKFOBpe1ekp3cqTLTjtv3Til75PbPidPs6wVbdAZaAaTbdPB0EgbdAa6F5vdEwAA7hkMBJPZOEVBU5haWF4qjzpS5b3zpHQsrq+KUCdiliOsDg4OZWVlpq4CIYS6tjrFljnrc+p474vXvsS0+pQIJvupfqf/ePuezEexVFu4TyrJrWePnrc5HNMqQp2MWY6wAkC/fv2KioreeOMNUxfSTV29enX8+PFardbUhSD01HCEtfPrzB8OQAiZhFmuEgAALi4uFy5cMHUV3ReXy+VwOKauAiGEEELdgrkGVjc3N4VCYeoquq+Kioq+fTvtK0KEEEIIdSnmGljHjRuXnZ1t6iq6r59//rlfv36mrgIhhBBC3YK5BtYxY8ZUVFRUVFSYupBu6scffxww4PktToMQQggh1DazXCWgwbRp044dO7Zs2TJTF9IdlZaWTp482dRVIIS6rNrffzd1CQihTsSMA+uMGTNWr16NgdUkLl26FBoaauoqEEJdllWvXqYuASHUiZjrlAAAmDhxYk1NTX5+vqkL6XZqa2t/+eWX9957z9SFIIQQQqhbMOPACgCffvrprl27TF1Ft5Ofnz9q1ChTV4EQQgih7sK8A+uSJUsyMzNLSkpMXUj3kpKSMmXKFFNXgRBCCKHuwly/dPXQ5s2bL1y4cPjwYVMX0l1UVlZyudxbt269/vrrpq4FoWeBX7rq/Drbl64MOh0QTCaTMHUhCHVfZh9YAWDIkCExMTFeXl6mLqRbCA4O7tmz5+bNm01dCELPCANr59dqYKWv52bqnKaMYr/UUjQnxEERJ7TAJIj+s74+tsw6UeQVVUIDgy3amb9a8LTd6VTZN23dnZveA60pKbysq2NynUfw2Y2R2KApKlJVW7IHuzg7MBvbFUVOmC/XGIHoNSW6INrD4sHZl5VVPZ0eNWuO1l1Vld+lHmyS7MHOjjbtFFhe8kud7VA+90E4N9woLFLVWNo7jxvyhCdvuF5ypcbWaTj3QSkt7uKeRlWquft4JYbUBWNW59IAxKClh1MW8S3avwjqvsx4lYCHpFLpF198gYH1JThz5szx48cvX75s6kIQQt2OLm/npuufveTAqjktO6Hhh397OJDbuCcw8Qc/RaTXklz6Gbq7nCQOK/vsXNyDuAmab0Jmrco09GETv+noAaJY+epxTE3yklmSPJrdB3Q6GB1xaP9sx4bGtNEx8MChpUOZhAUAAF1dnncwct1XhYZJcRe3ebRxSV2aZGbUo7+ziXGbfkiY1tpQcT2tuZy2SxKZfN1h6X+PLRoEAKBJDZm1Oo9mM2kdzV/29eF5/NYvQutUZ3atXSsv6Snaf3b1aAuA+tbuonTf/E/kNJtNWAAAMe7Lk1J3gjkt9odJdOFGrwXKumd4oqj7MO85rA38/PxcXV3FYrGpC+niampqFi1aFBMT07t3b1PXghDqjgx5mwICAxZszjUAGC4kSuZP9/rHTHFCoQEAQJO5MSTAb7qX9/SAVfISAwCALnt7yEdeXt7TQw6qAADqNZlbQ2Z6e00PlMgvGgAADIWJKxfM/IeX10dRuf+vJHFV5IkbDy5WX564aHrA19eh/nrSIi8v7+mRmQYAAIJg9nw88NXrcmNDpn8wZsx4r4A1yap7AACGy3KxaMKYMWO8Pok8cR0AwJAZOX1JYvm9ok3/8PLyk2RWA4BO9TM4+Uefyc8/s2MK83py2nmAG7mZN5hTNpzMz08NH2EoTMkob3IpgmASDRenT4jH+2y6bOvIaf+ZcQOTrzY4GTGayfHwE7Q+sUEV6+O1UE45OjS5r5KkhEx6UvSZnDNfz2aWyGSZrb9v0MkDJ3yy6zZ3UJNR3lbvwnDXYOG88FB+fk5+fs4ZqTsBAGBBED0bIzhC7egKI6wAsH37dhcXFzc3N19fX1PX0mV9/PHHvr6+M2bM+KsdURQFJEk+qQ2DJNv+f0+qVg8kq0kDiqoF0qpppy33IITMG3OEaHXoaKInm6lNXhCW4RSz+1D/K9sXrogafEY6tkqZd93py/0LHQwZGxeIv3r7zApqlzTnjS2H4oaAzkAAQEnsgk0/+0fvWWNbuitkaUTvo3FT7l3JKLIOOnxolAUQva5foWmq/sHFLLjeyze8djBEnML1la4cbwHW/Vt/7a45siI0QeMdtXuubUbEIklIz4FnPrfN2JukYvrv/vcb8sCQdbGjPbZ5MEf6L3YvDDnC9pWuHE80dMb2WBHnUa9TZZ4oOK0ycMaOHwbAESWkiwCAvrwz4xpwvVy4rV6V8Fif78HsdX279wllR57dtZ0RW8tHRW72aGOEmr/g0A+hTDixIDPzduOu38quaMBhFvt6tooYzCcM5WU3wWNQy1PZfjvzRUxm7qp3T9x8sM+hlbsw1NTQUJUXHZBWfre3i2jl5378nh0pHSGArjHCCgC9e/dOSEiYN2/e1atXTV1L1/Tpp5/26tVr48aNT2ypz17v5SbOMrZ+MD38fVc3odDFwXVxqvbRfm36YleH+ekNbbJWve/kJhS4vL/+O30rfRiVMVMFwqmernzXJce0AKA+NE8wTCB0d3IYI8lqOOOqTPSOq9Dd1XVOkrr1SgAoVVKgwHfPoyqoq6mSOe8LhPOSKp54lwghU+jFdRzA5bIJQ1FmYU+u9c8Zadkaog8oS64DAICldR82k+3oN8nZ8HOZzoLrMKAq90By5i/AZhNQr8rIhInBImcOm+uxVDS46NsfaAAAi97WbCaTzSR6Oi+Kkfo9GmEk2IP4fFsCGNYDh/L5Q/nc1qd+GkrylDR3ot8kvuMo34l80JwvKAeut3j1olF0Tmqhph4MutsGALBxdGBbNvY2hMt8OKZIl8jWRUSdMbh4TX04uVWXHTUzcPvNoeFxK0a38VMv4ml+BKZL3rxPNWjuSq+2J1S0HOakDRQAaSzeGbYx5zcAqKlpY0Y3wWw9yje7CxosHbnWRP+xfh6vaVIkIbG4wg96Cl0ksAKAh4fHP//5z9mzZ+OPJJ674OBgrVablJT0pIZU1pr3PbcqKrVUGw1YrvP3KpQKhXKvIDteXta4VxUXHK+xAYoCACiJF+cLU84pFPvcTn4Rr2rZB8MlLFWhyFJcOShUxMnVALwxEfJzxYpzxdFvJMWn6QH0qetiIFyhOKcQU9L1aa0VUyYTuQfJr2n11P2He3xny+1CUxRZe/3tn/hIEEKmRN+jwZIEI4AR+rsvnDvW9rHDvUgCaACuaOeh1SOrkpd4ea3PNcBdmra0bsx4BNkT6n5/lmmoLVH1NFgAAQBAEAwAI4AhM2JWwLq8Ojafz7Vu2rbFNM16mrbwiM6/eFHud3e/WHxQAwC6k+JZS+TgFXdoT+DzGRtYIWgAABdGSURBVIO8mrTvexg7x6/1wdq2MF/rzQAKJiYUHxb1qaPB1vZpFm9oeRdsn7iT3x5LWBEoCg335YOm9EfdU9WDureuE1gBYOnSpePGjXsO76xRE4GBgRqN5tixYx1oSwrCTyqOBPEfvoevTRfPkGTpQZ8p9l2VRQFwBvJIAKjVVzM4vIb/iSlZLy7w3LGs8SRtgYJyE/IZQDoLXasUiipQx4nm7VNTlEo2RySrAAAABlB6dVa2ihzpYgcAXB6PBACqUk/acUkwFmed4wndWQAsoQdPka9sWQbYi2TZimjPR5O/smJjYIZYAGp1a6O6CCGTYzJ70zpNwxIPbBcXbg31xhSRaLZINFvkN6KNgUOCO3pWeMJOf8sTaUpwchlWlXFKRQOANvPbkv7OLs3GBQ3lF1S6+lY7elxvJhMMOo2OrgcApstQR7hZnHOdBl1RoQrYf3fi/r+y8t+gv8DXmw+6KgAj3RCNCUtLMNbc1gHU03Q9QH1JlOfwd0VRudc1mhu6GqBpI8AN+YrVJ3TOgYunsKtKSkouatpY1YI2aDUazW1DPQB9W6PRGBrGaugS+VpJ1DdNJ75CeVZOOcNlnKDp/RoKEySS2MxHkfGeTqPRaKppGupqKjUarQGYzqOGEqrMtBKNKi1TCQPc3LitX4Ku1mg0Gt09gPq7VRqNrppu9S5UW72Gvxuw84JGU5RRdB3Yjo5tLW2AUEtdKrACQHR09Ouvv/7RRx+ZupCuoKamZsqUKUaj8ZtvvmEwOjTdmbQiH5sXbeUpnkutnyzw/BcELxc25lhKFT9fSi2X+rAAQB2/rthnQxDvwRnV+mqWTcO/4m1YVpReD7xPJS6nREL3oPQxEpE9AAAYlbLF88SJlU7v8B5kY31BpEjGlUrcSQB9dS3LxgoAgGVFVuurWymD0WwSrargXHXlObk8KX7emPfXn29rhBghZDLMMX5jr0d5eU/3+mSnatDc9TNqNk2dMN1vupdnSHKrI3V0bqS310zRzOmLUmCat4sFc4p4zdtZ8yd4ek34cFtdwIbAZtMxdRmRgfO3F3WglCG+i6Y5XFk/Zvjfp++8Bo7z1ocLqnb9493h4yNKBgVKQ8YRDuO9RzJV0RPeXVQ4KsCDeXl7SKwKALgeIg+OMvL9IUP+PibyewAL57kRgfz/J1/gOcErLI0etXTlDC59uVB5D+gLO0NEM2eKZs78dJey1QxdXxg1dcKEDxbIbwCdHTlhwoQVJwwAAJfTdh0pBHvHpg/ievl1sOFym8bDe0XJsrQfezo8TPqapJAJEyZ4bSyE+vLE+RMmiHaV1HNFq5eOrto1c8L0yMtOiyLnNq451fwShrRVXhMmTJCcNoDuhPiDCROkea3eBf/jcNGg8u2iCRM+2VXuvGjzsrZmOyDUii7yo6umZDLZJ5988uGHHx49etTUtZix8+fPf/LJJ97e3h2Zt9oOkgEAFDAeBFmjWjZHlD5GljKXBwCQHROjBp/dEommuLK0Ov47oS+rh75xkJOiakmWVWMnlBHIhyGT4RKcdDa4KlU0RpLkLve3pZRbRcEq/5QDPhwAAJaNFVVNAZCgr6VsWDatlNGMkbpvdBJF7QizB+0eL+EBRcRI4V+5a4TQ88f2iE4fa/jNAH3YTAuABQln5hl0Oppgs5kWAOAcnv7gRZBAmi8AAFj939EGnQGYbGbDi3Wuh/S/HgatDmzYjfM/BwQePvuwf7+EnInw+ERVx9CTV0NblsKdsuHYlA0PN50Dd54JvGcwGB/OK+UHfv2DqNoANkwCIDCAhobf9g/wizvrR9+jgSAaJoyyx4Yfzl9q0Blogs1uCJRecRc7skijxTjpD1elLXaXFxXTo+b6D2+6j/CIudj85x2lhSXE2JW+j3Itd97hq/NadDckMOGsyFBNEzaPJsy2uATTb+dFv+ZnerRyF+xx4fL8pQYDzWAy8edW6Cl1tRHWBl9//XWfPn0++OADvR7f7z6L3bt3v/vuu8uXL/+LaRVq06U7yYi04tRlVPyGLApAFTdPxo2WL3NpjJ7DguVxYl9PL+E7djZ2roKBJOcdN/K8Qm0EuJqlsBK42oJ6jzTLQ6bIlwlOSeIrmnROkj1Af98IVKE0OE0gSwziNYRRhqvARVWQTwHoFflqFzd+yzKaY/CceJWqUgoA7tdSJKudZbURQqZjQTAb42nDJpPNabLZansOu1k2YnLYbf1aqWkse2o9m38K61FvxOP7exKP/7yJYLIfpNX21WuSJTMD5u8saXPegk5ZWucd8OS5quWlP1p/uNijQ3/VEczHHktHL9F2d4+lVUN2VIBoZuQZnM6KnqArfOmqLatWrTpx4sSBAwecnZ1NXYvZqK2tXbJkyZUrV7766quRI0c+SxfGVNGwrKDSHcJWxjLV8UJXqY7PswIAcA1P3zH9wRz+7CVO+4RXDvgAaNMX+4hLbVj6akFUevTkFpP8NTLRh0l6O5K6pib9ZfIv3NRrXIUHgc8hAaCHh/TsOiFVEuM7Ww52UGkVJD8SzG/jRYLqX4IlVvKzITwAoEpifOemkPwe6ms8ybG9Ps/8lzHq9PBLV51fZ/s0a6dgKC8s0dAAwOC6CLrQ7E+tKvfn2wAAvRxHj+DiJAHUlq4cWAEgISHhs88+27t3r0gkMnUtZiA5OXn58uUffvhhTEyMaSuhtFq9FYdj1dZhvVZbDba8Nhs0tKkCFpf1FAuxGvVaDcXictpZ/xV1ARhYOz8MrAihZrp4YAWAvLy8BQsWTJw4cfv27aaupfO6efPmF198oVQqo6KiJk+ebOpyEHqBMLB2fhhYEULNdM05rE2NHTv2woULtbW1w4cPP3PmjKnL6Yyio6MHDhw4YMCA0tJSTKsIIfT/27v7oKbOPQ/gPySYI8WYdEhLxmhBZMewqwOovPW6SFq9LWhdFF2RKwXjBRHQtkBfBGwroTO1odoiarkGeukgW1dwHJE6OvJSdwMCa6B1ErsKUUFJxWo2xXKCod0/UGsltOKNngDfz1/wnJznfMk/fj3nOecAgKMZ+4WViFxdXdVq9dtvv52QkJCamoo7se45cODA7NmzGxoaTp8+nZuby3UcAAAAABvGRWEdFBMTo9freTyet7d3UVER13E4Vl5eLpfLP/7447y8vIqKijlz5nCdCAAAAMC2sb+GdaiWlpZt27ZdunTpww8/fOmll7iO86TV1dUNrgHw8/OLj4/nOg7Ak4Y1rI5PKBT23rrFdQoAcCDjsbAOKi8v37Fjh0gkUqlUs2fP5jrOk9DQ0KBSqdra2jIzM5OSkriOA8ANFFbHh5uuAOAB42hJwANiYmKampqWLFmybNmytWvXXr9+netEj5FWq12zZs2qVavCwsIuXLiAtgoAAACjyPgtrIPS0tIaGxt9fHx8fHw2b97MdRz70+l0CQkJixcv9vf3v3z58qZNNl4yCAAAAODIxnthJaJnnnlm69atTU1NLi4uPB4vJyeH60T2ce7cudWrVwcHB8+YMaOzszMzM9PJyYnrUAAAAAAjhsJ6h4+Pj0qlam5u/uGHH/h8vlKp5DrRo2tqaoqLiwsJCent7e3s7MzJyWGYEbzvCQDAEXXUl9V22WeqAftMAwBPDArrb/j7++/evbu+vv7y5csMwzzwaNK6ujqOcg2rrq7u4sWL9349ceLEsmXLVq5cOWfOnO7u7qqqqilTpnCXDgDAbswt+/cea3/UnfUnjrXevdvOfDA1cPW+R52KiIz1e96KXx29OmnLnkbjMCMAYFcorDYEBwcXFRXV1tZeuXJlwoQJaWlp33333eCm999/n9tsD0hISPD09CSiioqKwMDA9PT0yMjIS5cuZWRk4KwqAMAdl4/v3Ft/5c6JVcHSbZUFa7wfcaoB/Scbkj6p/T/xP4m7jn2SlLqnvX/ICM7gAtgbCuuwQkJC9u7d293dPWXKlKCgoFdffdVsNr/33nuff/4519HuCA8PX7hwYUlJydy5c3fs2PHGG2988803iYmJXOcCAHisLO2VuUnRkZH/nrT9cLtlcKynsfit+KiIyKjkYv0A6fclRS2NjPy3+NzKux+4euT9v8bHJ+9pHaCOwzv3nzYTERkb96THR0VExb9V3NhDRGRpK85IXB21NDIqNm37sS4iovMHs7eU6S13D36pru48ef9FVfBBgWqtt0VfVfXfQ0Y6nuz3ATAO8LgO4OieffZZpVKZlZW1a9eu1NRUV1fXd955x9PTc+HChdwGe/fdd+vq6p5++unvv/9eqVS+/PLL3OYBAHgyzF/nJn3Gvrbr86CB+g83J20XH80J7SnOyPw66KOCLF++2SJwJlqm/PtaARmKNyVsP7Lgs2gicg9Lzk7wnsgXO1Pj+dPapyxE7cXpmfVBHxW8Le0qz8jM4Jf/PVbQfvpr5+jK0sX0vyUZr+UenPtZ9ICFtVh+PbyrYAqRTt+o7+Jb+JNp4ErPT0NGeoh8OPt+AMYkFNY/YDabS0tLzWbz2bNn3dzcrFYrEcXFxcXFxU2fPp2rVIcPH9ZqtRKJZPDXqqqqjo6O+fPnBwYGchUJAODJ0B6rd19ZvsRHTBT92vKDMce0OdN1xzvCkouCpXwiARGRZeDK6cPHO3/q57teu2IiIqKJgmeek0qd75voUv3xy2HJxcFSPkmTkoNe3FNvjF1KRPwpUpGAgpaETT2iv0w0N1aVf99eHks3rD+yaV9uVG0uEZGzVPDckBHBk/kmAMYRFNbfo1KpDh061NfXd+PGjf7+/nvjVqu1uLiYw2D3a21t1el0x48fF4vFEokkLy9v1qxZXIcCAHhMLP0/WZin+IO/8F0Zy60fqc9imcjn3/sHzXhw0+qSiasS/uxBNEA0QMSz9WSA/vv24vGnOFvMv3mvmUDwFF23Dg0gCH79P5oUXe1X2yuyNxVf8QuaMXTEnn8wABAK6+9Qq9WVlZVtbW2j5d2MFy5cmDZtWnp6em5ubkBAANdxAADsikcWS7+F+LK5vu0njnetipVS1/Hadt+X/Gj6xH8e2H6ixRwcJCAiuqRvd1/y2cZob2rv3F/XT0STJk/+seOalWT3n2Gd7h9AuSdOm4NDBebTJxr5AXnPEbUOOa6lq/UsyeZK+XcHetqq6rXtjV8drDrLD85KDnO1MQIA9oXCaltTU1NJSUlLS8vt27e5zjICnZ2dEyZM2Lt3b1FREddZAADsSeD3r747tmUdDVKtytn8P5tiXtgvIDPN3fzpCjE5h23edjptS+SLk90n93sr9v1lBT8tPqLeW2Rpv0xrnImeWxwti8mMiJrKl0YXFUgHZ+QHb84NS9wS+eJkvuVH9+gPivycyTzkuJavdya+ZdlaV7Dk7oX+K1U7cyss0tlhm/dsWBcutTkCAPbl9Msvv3CdwREVFhbm5+cbDAaugzyK4ODgL7/8ksMltgCOzGwe2klsc3FxGS0XWMYYoVBoe8OAxWLl8/lERJabPWYSiEX8+zZbzD1mekoscL37AStf8LSAf/esqqWnx3J36/1z9vSYBWIx35mGYTGbSSDgD7cZAJ4AnGG1rbW1tbe3196zMgzDsqy9Zx3CarV2dXWhsALAWOPMv1cr+SKx+MHNfMF9Y0M/wBeLbbROZ77YY8hMD0yLm6gAuIbnsNr2888/21gMwMizSlVrZt434hGhLFUud3+oOf23abqr073sFfF34cQ5AAAAjBkorCPh4R8Zk56/bfndi1WMfIsqK2Z5qOdD7a39OFaeWDgqFxkAAAAAcAeFdSSkEqFRf3NBdoofERF5KtKjSHtRJJQwRCRcuUvzbUfHed3JIoWMISY4veJAVogbEeOfUlqR9SdGuChFuU7OEBPyesXJZp3uvO5M9S7lB/tONut02pP7/urPEJFfSllpioxHRExITsW+BC/iyRRFJzXf6jrO604WK5VFRzXf6nSnytL/NMwaLwAAAICxBYV1BBixRNRVmf0FKTZFMEQhG1NkR7LV5xmJOxGRqTZfsSggYFG2YYFKuVLINlZW8xTqbfKQzMJ0YfX+/2IZD5n8XyRE5BUiF9UqIsNjC00R6YtMeWsjV3xgCN2ujHUnRiQLmS8T8oiIJLLQAClDPEnIIi/d1sjQpdn6+elreGrF0hWpDbLsjxReWIEMAAAA4wAK60gIhcyP3TWfFmrD0xV+a1JXmgo/re7uZUXuEiIiq1AWlZKyTkYmkkhFRAb1pmxt1NGadd3ZG9UPrARge4yGLm3ZES1rvdl9waA/UqkxeXl5Dndg1tRtMJ6rrjzVzfZ2Gy7qaw7UdHvIZCisAAAAMA6gsI6AUMhQ703WWFb4lSS9XBnSsEt9jlhTHzNZSLwQZW21MnwS28MSj4iYwV0YK8vyhJLhrt6zLMubdOcnKzE8IiKWR5OG+XgfyzKDn2dZFm0VAMaoW7ducR0BABwLWs8IiEQilmWJ2JpP1YaoWO3uShPRTZYVCkUM4+U/06COys6/yCyfn+FLROSl2K0UFsnlVlX137JqwvO6H+IQrMFgcl8R4kk15x7zHwMA4Khu375tMpm4TgEADgSFdQSEbkxfL0tEdC7/BUn+4CDby06SCqn3aNl/phc26GIvssKZQuO3rMdaVZZ4/4odWi2lqpbXqNIqYx/mCawX1cqS2LJT3bHGmyIpo2t+/E9tBQAAAHBseNOVbQqForKycqT/xWeEHhL3SX1Gg/Efe+cA4+4lEdIjzzNv3rydO3c+//zz/1AIgDEKb7oCABh1cIbVnliT0WCPq1jsdYPhuh3mAQAAABgDcNOVbRMmTHBxceE6xaNzcnLiOgIAAACAfaCw2ubv7+/m5sZ1ikfk7OwslUq5TgEAAABgHyists2bN2/q1Kmj8SSrp6fnnDlzpk+fznUQAAAAAPvAGlbbAgMD4+PjBwYG2traRtFdF9OmTfP19d2wYQPXQQAAAADsBoV1WAqF4ubNm05OTn19fTdu3Ojv7+c60bAmTpzo4uIiFoslEkleXt6sWbO4TgQAAABgN3is1R8wm82lpaVms9nd3d1ec169erWqqioxMdFeE167dk0kEs2fPz8wMNBecwKMVXisFQDAqIPCyoGWlpbk5OTm5maugwCMRyisAACjDm66AgAAAACHhsLKAb1e7+rqynUKAAAAgNEBhZUDMpkM1xkBAAAAHhIKKwesViuWDgMAAAA8JBRWDvB4PLw6FQAAAOAhobACAAAAgENDYQUAAAAAh4bCCgAAAAAODYWVA3q9nsfDS3EBAAAAHgoKKwdkMpnVauU6BQAAAMDogMIKAAAAAA4NhRUAAAAAHBoKKwAAAAA4NBRWDuj1eoZhuE4BAAAAMDqgsHJAJpOxLMt1CgAAAIDRAYV1tGH/uOuyvX/wCbbXxP7mKQXskF2GjgAAAABwA4XVsZhq8yJDMmpsP/PKVP3mCwEhcrn/jICNlcZfx43VGwNmJFYTEfVq818JCF0U6rsgo9poaw6rNv+VUPkrEQGygNRDRiIylK8PnR0qD/edsSC7xkREROfUsfMD5OEBAXH7DcM9fYvV718XuuJvvx7D8FX++ojQ0FWFejywCwAAAOwKhdVxsDVbX4jYoek2DndqUxiQuE+j1Wi0+0JrC8su3BnVF6QUdomIZYnI8EV22UyVpuHM0VXa7O0NNubg+adXajQ1Gt0Xck1BmYHIa0FWWfMZTfMZ1bT9hUdMRKbKbfn0pkbTrMlglXlHbIW5oI4NV5SdN5rYvsEB46H1KwrYNcUazYEUGV6JAAAAAHaFwsoBvV7v5uY2ZJgJffOo5oBCdu92rN7qjFXZNSYynchYsaWGJfKY6cUQUa/pJs/Dy52IiFrzMhoidr0+uBOrPaUPCA9liLzC5UyzRk9kKIhdX2JgWb06LlZ9kYiIeMSaDDW1emaev4SIpF5eDBGx3SZGImXIeqam2UseLiQSyhd5aU5ph8Ygz1h1rUYV4XEnp1Wv3q6LSIsQdRlMOL0KAAAA9vb/t/LURK+7o2gAAAAASUVORK5CYII=" + } + }, + "cell_type": "markdown", + "id": "e248f46c-fb41-4efc-88b1-bd7f8adbccbb", + "metadata": {}, + "source": [ + "# 3. 编译单输入模型\n", + "在开始之前,我们需要获取模型的输入输出信息。
\n", + "使用[netron](https://netron.app)进行模型可视化,将你的模型直接拖入网页即可。
\n", + "点击任意输入节点就可以在右侧看到模型所有的输入输出节点信息。
\n", + "![image.png](attachment:1855346b-3785-4867-9c92-0e811d00b9ab.png)
\n", + "\n", + "在编译`kmodel`之前,我们需要设置`CompileOptions` 和 `PTQTensorOptions`来指定`kmodel`的各个属性。
\n", + "\n", + "然后需要设置`model_path`, `dump_path`, and `calib_data`。
\n", + "校正集数据的格式为:`[[x1, x2,...]]`。
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c957fe20-99c9-4a54-bae8-38361a8f8830", + "metadata": {}, + "outputs": [], + "source": [ + "# compile kmodel single input\n", + "model_path = \"./test.tflite\"\n", + "dump_path = \"./tmp_tflite\"\n", + "\n", + "# 校正集的数量为2\n", + "calib_data = [[np.random.rand(1, 240, 320, 3).astype(np.float32), np.random.rand(1, 240, 320, 3).astype(np.float32)]]\n", + "\n", + "kmodel_path = compile_kmodel(model_path, dump_path, calib_data)\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "015b6422-3bf1-4f28-93c9-dc6ef6e27856", + "metadata": {}, + "source": [ + "# 4. PC上推理单输入kmodel\n", + "需要设置 `kmodel_path` 和 `input_data`。执行完毕以后,会打印出每个输出结果的shape信息,同时输出结果会保存到`dump_path`目录下。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7f617edc-781c-4b8b-b45d-fef2f0b36a46", + "metadata": {}, + "outputs": [], + "source": [ + "# run kmodel(simulate)\n", + "import os\n", + "\n", + "kmodel_path = \"./tmp_tflite/test.kmodel\"\n", + "input_data = [np.random.rand(1, 240, 320, 3).astype(np.float32)]\n", + "\n", + "result = run_kmodel(kmodel_path, input_data)\n", + "\n", + "for idx, i in enumerate(result):\n", + " print(i.shape)\n", + " i.tofile(os.path.join(dump_path,\"nncase_result_{}.bin\".format(idx)))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "80216eab-2738-4167-ba03-7a0e218c8d5c", + "metadata": {}, + "source": [ + "# 5. 比较TF结果和kmodel的推理结果\n", + "\n", + "这里,我们使用TensorFlow框架来推理`.tflite`模型,然后计算TensorFlow输入结果和kmodel的输出结果的余弦。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4003a7ba-a1b4-4488-b3ca-d9a00a55e964", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install tensorflow\n", + "import tensorflow as tf\n", + "interp = tf.lite.Interpreter(model_path=model_path)\n", + "\n", + "inputs = []\n", + "for idx, item in enumerate(interp.get_input_details()):\n", + " input_dict = {}\n", + " input_dict['index'] = item['index']\n", + " input_dict['value'] = input_data[idx]\n", + " inputs.append(input_dict)\n", + " \n", + "# print(input_dict)\n", + "\n", + "interp.allocate_tensors()\n", + "for input in inputs:\n", + " interp.set_tensor(input['index'], input['value'])\n", + "interp.invoke()\n", + "\n", + "tflite_result = []\n", + "for item in interp.get_output_details():\n", + " tflite_result.append(interp.get_tensor(item['index']))\n", + "\n", + "for index, (i, j) in enumerate(zip(tflite_result, result)):\n", + " print(\"result {} cosine = \".format(index), get_cosine(i, j))\n" + ] + }, + { + "attachments": { + "562afee7-c078-4323-bc19-49e03c80d0e9.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/UAAAGgCAIAAABkBdDDAAAACXBIWXMAAA4mAAAN/wHwU+XzAAAgAElEQVR4nOzdeVzM+R8H8DejmRwzYppUBh1EVhTtSuuoXWXp2JVByq5yxUossVapda6jdYR1rWPbX7E27SpHW4sKI0TR0hpSyJSOVTOkGc32+2OSVKxSTU2v5x89vn3ne7xn+vad13zm8/18W5WVlREAAAC8NYlE8pZLamhoFBcXN2gxAABVtFF1AQAAAPBaBWnC1EdkYG5toPViVm6q8GYB28TGTL9iKbk4JU7410Mpsbv2s7Yx12dWrH47KTVTLCkmYnL0e5r178WteEiannTlwZNKu+KaDDPTZ8jFKUJRoXK6xoqkmVeuiLIlJQrS7KBtYGZpoqPcpLzgdlJqZr6kmKgdx9BkkFkP9iur3Uu6cDH14RMW18jyw2Em3Bcbl4tTk9Ky85+UEENTm29iZmbAVj5UIBKmiuUvN8DUN7M24b66vEE/M1N9NkP5CghFhVVK5ZpYm+lLRcJUMXUbZG3MJkVB6oXUgtJKG1XOJ6ICUVx8UkahjMU1tBxi/eJJATRLrdB+DwAAUCuN2H4vjVxgt0ZI+i7bDi22ZBIRFUQuGLtGKDeZfShkigER0TNR2LKvgs8VVKzDtfbZtNrNpC0RicM8XYLTXm6OaWzv862/oBeTiFK3uMw4KH75GMPSN3KbgCsO83QJFimnX1PRJ2uEioq12CYTlm+aZ82tui+m/lCftWsEJkwikibtXvDNT6nSF2sx+fa+369w7kHVy2Dq2fgErRAYM+lcgJ1vjLRS7TarE9Z+XG35Hvb+36+w5xeEz3IISnm1VIaZzy973DID7HxjmC7bji+2rFo8EVc5/3aY95fBSdKKPa04vtaeTQDNFdrvAQAA6iIlJeXNC5ibm7/zTgoK8oiIxCfDoj0snXWI/goNuygnooJH+UQGRPKkHUuCzxVwB3kumj3KgDL/2LF6vzB4yQ6zQwvMypugGSZuG/w/YYmvnwrdFRETtFhTP2ypddvyHVjO3OYxQLkYx1CregGvwXde8a1T50dJYRt3CQ8Hh4+x9ur1cl+2bf7+Y3dQ+LngnTGjNjqypac2BO5LlerZ+Cycat1Fcv234KCImKBvDfv/6GlQ3orPtPTe7mMu/TtqZ9DRuOB9caNWl8drto3Pd+NNiIiIpW1csfvy5a//Hhx8LGbD3k9sAi1t523Uf0x0Ozxwh1D2gdda195EbAMdoswail/6jb3yyw9Wl95ElPRbaJKUaTZz+3JHrlz8VybLBuEemjXkewAAgKZKkV/wD1FbNvuZMOx3kfNMvbiDUZlMNvOZVPrPP3IiplwYeVJMba29VnjZcInIwGuFJFWwJikmKsnbzLo84HP0e5uYcE1MBpkxc8euORcdeW6utZ0ywTKZXH3DbiwiorZsbs0dcmrSVs+kn5lBv95PTu8XniooeETU6+W+zLgm7LTw8L9E+bkFRPIzx+MKiOu8cIXbUCYRGSxcUZDmuksUFSny9DEt314HPTOTfmTS9lb4MZHoUb6UqLy+tvr63QxZRMRgcit9/ChfvjQp7FimuEAsJSbX1NqaiFhCJhFxe1tbW7+2eEYHfT1DwzZExGRzK5K8vCD9lrjU2dLc3uCtXwaApgn5HgAAoKlSPMmXEvV1EjDC9x8LF35sEn5WajDe0+DX/XGFEikRN1ecXUxkYta/oi8Nt/8gPiXdzRAXEulU2RzX0tyAzonE4oIX+Vku/M7FgYiImB+vTVht87aFFd06czSSJRZGCuXE7G1iVFGwOG5vcKY8MyleRAz9/gP1SXEr876cGL379XvRo51hYGbKpbR8cZacTMv77ouvREYWFYhiwkUK4hobaL/YXsHJJS4niYhIR7DtN1/L8k8g8szY4OBr+SJhnJiYJoMsa+xJ9Fr3wrxdwoiIGCZeISGexmTp5mVzISjuVJD3qWB9a8GsObPsjdH/Hpox5HsAAICmSpr/REHMDn2cRtuE+0UHByRlkqXvWDNRNJG0QKogbmsmEZFcLlcQKbOvQi59RkQsZk2N8U+kciJiMV6GVxNHH2cTJhER36QWheXG7foujoiIa+I828/55ZW+4qSjYUkKIoa+/bfbfM2ZpGCxGEQklVe6VFau/KX1yzmiiDVriIiY+kO9ls62rqiPbS7w/MiASUTtehtWekaZZ8MyFUTENJmwdtNkg1pUTkQ61p6TrblE1IbTX/kRiO+8NswyKSY8PDwqThgWcCufeWgF+uhA84V8DwAA0FQ9kUqIiM3UHzZ+lF5MeLqYbeczSo8tbksklUqJqEtvMz1KzTxzRuRpYsokInlqtDCbiG/Wp3pnemlS5KlMIv3e/SryOFN/iJvg49oXxnde8a3ARIurz+e+0tDNsPT97Tv9vVMWHBX/lSyW2umzGQZmfdl079Yfp8SCSfpERIVx0RekxDDrb1qxKtNy9nYfazZX14D7aqpmGtm4TbCstnumzYo/F8kDvlgel5maWqCwrkXPIiLqaDZqgsCgysy2+paf+lh+Kgib5RKc8tf1dLJ596snAFQE+R4AAKCpKpY+IWIzOxDTTDDB8o99+U4TrdkkZrGIiiRSImKYCT63jvpOuP+r6Q9Hf9iVMs4fi8tUsK0/dzKpiLwKsfB/uwoUD5Pi4lJziWszy/NlcpWLjq8JulKes02cfJ2VHeIVGX9sXiNiERGRgf3cyZZV27Lb6pn0MzGosWYG29rb1/nCgsijQfvtQ3zMmdaT3czidqVumzEjfZS1jjz1VKSwgLiO7k4vW/2pA9/MpFcNG5NeCw9aH6ecZpoJfEZX7JPJHb3I69SVNedCNxwetadWTfh5Z/atL+Aop7WtPadayyIC1iRrD+qtzZJnRqcTMbja2m/eBECThnwPAADQVBXLZQpisVhEZDBpW+wk5Vwmm0kkkz+RE7Ul/U+Xb6etG3ZHxxwUERGTa+a8eNFcx/LszGzHJBILD+4XEjH1zOxnus+abFO5t7pYGBlePsm0NvMpz/dUkBobmaqcNDfxmly9Bf2N2NZeM63PrBKG74kUbBfoG3tu3MTa8P3+mGNhqUTE1LecMMvX+636v8jT48LTX2xVbl0p3xMR13muZ9Sl4NT/7YxxXGv/9oP/FIpiIkTl0z3YzlMsJQ8zM+JjkmKJiKitgc28RQL+W28NoOnB+PcAAAC1oxz//m3Gx2zE+9fKpeJ8KbG19dnMqg9IpXI5sx2X3bbmNRuNvECc/4zF1uOya9WdpnHIpQW5BVJic5tmeQC1gfZ7AAAANcBk6+vX2CLOZLNrN7xMg2Fy9fX/eykVYbK5/KbyQgG8o9b/vQgAAAAAADQTyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADUB/I9AAAAAID6QL4HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADUB/I9AAAAAID6QL4HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADUB/I9AAAAAID6QL4HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAAAAqA/kewAAAAAA9YF8DwAAAACgPpDvAQAAAADURxtVFwAAAABQzzQ0NNq3b6/qKgBUA+33AAAAoG4Q7qElQ74HAAAAAFAfyPcAAAAAAOoD+R4AAAAAQH0g3wMAAEC9KhAGz3FxcHBwWBJZoOpaAFog5HsAAACoT5lHdv6h43Po+PHja5259blheepPS3Zdkr/bRmTpx3esXOjlMcXLd+2++CyZcm7W8fVbjmdVLJR3Zsf6o1mvna9ID1+7Pvx2+bqSK/tWBsfm/deO8/46tm/TyvUR6e9WP8B/Q74HAACA+iTOLTA0M2M3wJaz065kFL3LBmRpwa6TdqYbOc5f5j97VLuLKyf5hN8jIspPjdmx2GtlgkS5nCQtJuav/NfOZxhbGaVv+WZfmoyoOHHLsjCZ2WDem/YrSdzkOmlloqzHcPtBb1wQoD5g/HsAAACoVwr59R1fOBxgce2Wh3hQ2NoNoZcy5R37O3kv9bHRp4K4IN/gP7KkxBy06NAsyeol+1ML5HJim9laM/8WpmZLmb0F33zn9QGbciOXzD0geiaTcT+ct3KpPZ+IZMLvxzpsZfWetHHjJINaF5YXtfl/rFmHgtyNiIiMewWzHzss2hXvtGYEERl/ZHpj9aJwo10C/isr1TifP2HVvLMuS/cOmS9ZGW++LMKW8+IRSfwmv5TBQfOsWUR5x1ZvKZqwSpC33j9+yNZf5pmy6vR6AtQS2u8BAACgfjH7zw45HhkRMre3cFtgZNupIdEJx1cOEm1YEy4meia+VWj9XXRsbORa+3ZScQZTEBwbG/2Tmyz6usmKiOOxh6Yxw/dFiYlIx3ntLxERkce322TsDEsiIiKW9cLfjkdG1CXcE9Fficn6I0YYVfzOsrK1opSUuwoiYjFNZwd5MXd+vS9dUXmd183nCfy+5h+e4nPGYtmSEZyX8znG7LzwY4kyInocHxVbotNVlng0RtY5LzzA13/tvvgsAmhoyPcAAADQMBS3kq4wbSfacBnENBU4m94SXpW+fJRRaUmGfv9++nKZnIi4ZmYG+eICBVFWTNCCL1wnui75PTO/6Mk79rsnIplMJmOxXmlDZ7FYMonsxW98l9Vf9zjiG5xSZV81zy8tkSuIioukxa8ubGff9WJCsowkCfHpg0YNbpeXnkVdzeycPKc56l5cOXt92iufEwDqH/I9AAAAqB6TwaRSIiLSYJGCZCSN2bbh1sAVIb8cCvEexKJ3j/fEMujJF99MqxTH80Q38noYdX35SYNjt2TxwFi/zalVdld9flboii1FUw79ND5r3fLwVy6u7WHv1CP+2MWsmOg0q0+HK5v2+f1GmPcxtfrczUp69UL2uz8VgDdBvgcAAICGwehtOUh+5pe4AgXJb4dHpvW2HliLy25L5MRms5lEzPIZzA4dmeI7mXIied3Sfp9xbkYXf9gUn6cgIpLdC19x4KHdJKdXrnjljJi3xCI9odooN6/Ozzu8cmeR22pPU3OvNYLsLSuOVk74fKdxponb/UOz7AUfsoj4A804aSkpMiLKSr+j4HfrXKfiAd4a8j0AAAA0EKa193LnJztdRw53WHzFYMFSgf7br8u2nepGYV84OLu4bBNxuR2YRJYCT+1TcxzsHL7YlVSnevjuQRvs7610+Nhh7GcjR07ez/LYvdqOU2UhzvDFXzvWMMrNy/mS2HU/ZDktnWrMIGKZzlpsnxa8JV5SacmR7iMeJbPGuZkziIjMp31tdd7XZYrH2JlHeHNn27WrU+0Ab61VWVmZqmsAAABoTiQSCRGlpKS8eTFzc3MNDY3i4uI3LwYNQUtL600PyyRZj4mny2nE8WxkeffyqBOfV/XTBED9w/iYAAAA0MKwOHzdxt4lrwf/v5cCqA/onwMAAAAAoD6Q7wEAAAAA1AfyPQAAAACA+kC+BwAAAABQH8j3AAAAAADqA/keAAAA1M3Tp09VXQKAymB8TAAAAFA3z58/LywsVHUVAKqB9nsAAAAAAPWBfA8AAAAAoD6Q7wEAAAAA1AfyPQAAAACA+kC+BwAAAABQHxg/BwAAANSNhoZG+/btVV0FgGqg/R4AAADUDcI9tGTI9wAAAAAA6gP5HgAAAABAfSDfAwAAAACoD+R7AAAAaDyiiDVhKaouooEUp8ceTsxSdRUAyPcAAADQaOSZV85cz5OruozXkt2NP3Yxr44r58Xv3RWbpXj9AveObfkhvtrW8+J/8Pf92tf3a1//tcfw8QDeHfI9AAAAQLm8hB3rjqfX/3YVeYkH/V0n++07fVNS9aH8lJgbOrYCgYvAcbQ5r/73DS0O8j0AAADUgugnbxcHOzs7O4eJC3adKyAiKogL8nSxs7Ozc1gSIyV5euSa6Q52tnYO09dEpsuJiBTiuI0zXOyG2zl8sS9FRkSkEK5x9g7PJSKSxy6xWxIjJyJpapjfFw52dnbO3mHpRIWp+5d84eLs4CDwDhYWEJH8yq4ZAgc7Ozs7n/C6lP44ccd8D9fPHBwEHv67EvMUlHd8pe+BNOWDaQd815+REJEkYZ3HVA+vDfESIsm1UP+pYx2cXOcGlzfMpx9dOXfKWAensXN/TieSpR9d7zXRwcHJdWV0HhGRNH7dF2MdPvPw/SHxlXb6p+k3c8zn+wlqiu9Fkmf8vrZWVoOtrAbwWXV5YgCvwP2tAAAAoBbyMzP0px3a5sItuBL8lV8gd982wb/iW4XW30X7WhIRiXYF7JJM2HP8U23x0YA53+7vc8CLeyxow03LtRF7zNpkhn31xfWaNyxP2hEY2dYnJNqGS3I5Qy78bvWVfmsPrTWglOAvVu23HexrkH0rs/ei46ttmG/oA/MGkpvxKdqT/7dhsPxm2LJFM2W7fxvBStsek/a5qSmlxfyeRqs4dI84g9yX+Vix2vE4ecfmzgvX8dt6wCw/arnv3O3833xMs4RRedY/HZzUVUYcWYK/1y6av+XgcG2JhHgkIWpnPm3N14MpZaf3onWmfwbZvojrHKupXxFdW19DVaXS3Mdp4QEr083txrlYIeDDu0P7PQAAANQFd5CnoO914VXpy1kMoqzz56UfChz1mcQ0cHT+8J+kpGxKTUo1tBeYsYna6mtzXxNgFbeEl5i2E224DCIGk6m4lXRJfOvIAleBi+uqOOmz/Ozi8gWZyh3VDaOjth6PZzRino+9NDYmzdRpBMXEpBHlXU1+ZjXclIiI2vONe/D5PJZEGHXRdNo8Oz5P13zqLHtpbEyagohYmhxtDofD41BidLz2hFmOvTicTnx+pxfb5/N4PeycrFjporfrS88aHBC6df4nppTgP2l2KPrfw7tD+z0AAADU1Vu3E8qqzmAySSZ/9TpbOVHl7C9naDt9G+HT7+UcKdWfjhyWXCZjmI5zZPocS3EzSciy9rRgUOVONbJiGbFY5SWxWGy57NVnIZMXyzRZNX9cYbVjVX/Or8Hh9eLwepmaD+545+O9MVnuU/m1fzoAlaD9HgAAAGpFLskvkBPJ06Ni0npbmrNfeZD/4Yfs8+HHxHKSi49Fnu9saalHg6wHiY/uF+YSEZGyaw1D36BbpvCs+GXCZ/S2NpOeCY8rUBCRXE69rc1kfxyMEcuJSC6V1tOQOwqZrJSIZGmn4/JM+xoxyHi8Oz9284qjWfaOViwiDqejLC9LeQksz8Kia0pkbB4RydJi4vL7WRi98r0By9TcKD025k0D5ryBLCvlSpaMXrwgRJT3IE/RkdO+zs8NoBza7wEAAKB2smMCxv5aIG+rZzt7hYBPVLlPCcPE81uvVWtmOGyWMw1svb71NGEQjV60PHt1kKddoJyorZ7TeCaRvrO3Z9K3U0buJhaLrecgIGJaz1t+a23QF5+sljO5owJDfOctF6wNmuGwQc5gGkzYtGeqST2U/jjGf0yydjtJPg0O2ObIISKe0+yPdromOv1mTkTEGSYYvsvPwSmU09k+aN+swMk+vi4jd7BlEqZ9wE47zqsb409aPS/FZ5J9mDabWCNWH3KpRSGyhM0zv5YFxG0dHO016UCuNpseiqXmc7c6daqHZwktXKuysjJV1wAAANCcSCQSIkpJ+Y+7NJmbm2toaBQXF795sWZHuNwhzCxkmwtX1YW8iZaWVg1z7+1znfpwftQ846fE41VkdVnKWtfNXbce+PxFtxiFTPKPhDrzOMrWepkk7zHxdDk1bFCpOC/vKavSBt+STCIhDodFRCTJy3os4+jyObi4FuoD2u8BAACgJWFxeO2UU7LEtVPWJeST2aytiyr1eWewODzeK8vrvnGD7XgvNljLOio+EXB4/Np+OgB4PZW13xcWFoaEhEilUh6voe7kkJGR0blz56FDhw4ZMqSBdgEA0Kw9efIkJCSkqKiIy63/ttirV69qa2s7OjpaWVnV+8ZVq4W33zcLNbffE5Gi6tg7Mhm95hJZgOZKNfl+w4YNsbGxxsbGnTp16tKlSwPt5f79+1Kp9N9//9XQ0PDz8+PzcTk6AMBLmzdvjoyMNDEx4XK5Ojo69b791NRUhUJBRFwuNygoqN63r0LI903fa/M9QAuggny/d+/es2fPent7GxsbN84eg4ODb9++/c0337z33nuNs0cAgCbuwIEDcXFxXl5effr0adAdlZWVrVy5Ui6Xb9q0iclkNui+Gg3yfdOHfA8tWWOPj3nhwoXff/89ICCg0cI9Efn4+HTo0OH3339vtD0CADRlV69ejYiI+Prrrxs63BNRq1atAgICHj9+HBYW1tD7AgAAavx8n5SU1KdPn06dGnvwp+nTp588eRKNKAAARHTx4sVevXrp6r75msH6ZG1tffr06UbbHQBAS9bY+f7u3bv9+/ev4YHSht2vsbGxrq5uVhbu+gwAQOnp6W/XX7Ho8pHwywXVZtf+jO3q6ioSiWq9GkBdPX36VNUlAKhMY4+PWVZWpq2tXWXmvR2fjboguBgyuWNdNyu7mxD1sI9g2JuuD2vTBoOBAgAQEZWVlb3VBbWlopiNezT7Ct6vNLhOtTO2THR027Zfzt4v6Wg2bqGve//XnclxuxVoTM+fPy8sLFR1FQCq0djt9zXq8cXuP4LG1zncE1HOnxtXH/+73goCAIDXqHrGlt1KuER2C9av/+oD0XczgxJVWBoAABA1kXwvu/rz6l+uE5EodPEUl1E2I2xGfbEsPE1GRFSQ8P3siaM+trEZM9Fna0JuKVHp9R/nro5Vfl+c+P2UTQkyIiIqjF490W3ilJWxRSp7HgAAzZpM9MuyKWNsbMZMWXZEJCufmRuzcuKoETafzfg+9j5RpTN2OVb/6asXOlmamAwb79T30f2Hsho2DAAAjahJ5PsS8fXYmzlEdO9yZJHthqMRhzcMEfkv/PEeERWlnr3cxfvno4e3TO9yZubnm64TFYrOXr4nIyKSPbqVcD1XuRGtITNWrlq/wmv4u3wPAADQYsn+XPZFcJHT94cPf+9UtPGLZWfLk3oPB7+fI35a0Pfs4lnbRZXO2FVXT/3553t2AhvcKAgAQMWaRL6vjNVRtyNXp/8EpwF3r4mUl3AxOurqdNTpZef31fjcY1HXX3ddV0cdE6MePXTw1gIAUBcJxyJ1Jy8UmOromAoWunaMPXqRiIh0eg/sr8PtMfzLOe/fjU24X/O6stv/mzkrZnjQBqf6vw0uAADUTpPL9+U6dNRUUEmVmVqaHUtKZETUhmSvpvy21OAj8AAAqDVZiUzGat9W+Uvb9m2fyV7tacNgaWk+k9V4phVHzfX4scvyw37D8AUqAIDqNYchZRSyZwqiNjLRnwk5/b1NNHV76Px9LbmIur98I2F17PgsN7eICO8tAACVyeXyR48e5eTk5OTkKCcePXokEolsbW1fXZA1+P0PAqIj77lN70H3IqPvDXAcQHSv4uGiP8NjWR/s7k50+cWctMv3uO/31ymKClxdNO2X3SNxAoYmRENDo3379qquAkA1mkG+f/bo15nDL/XoUJRbOmzVfqeORJMXjP91/mCr7T00c0Usm/FE1NFWYL9xkc2IbR25TsGHF/av6WmVlpZOnjzZ0NDQwMBA+VM5wWKhSw8ANGOPHj2qkuArTxQVFenq6urq6nbp0kU5YWJiUuO4gTruKxde/mKU1f90qYjeX7jbTYcoR7O9aM/0UTHti3KLekzesvL9NvRiDAPRj/Oco2z/iFv0KPbsvUuXP7P4gYio46gtcauH11hnWVkZh8Pp0aOH8vRbeYLH4zXUqwMtFcI9tGStGnlA4vnz548aNcrKyuptV7i7/bNJOQtO+/Z5Sjo6lRqHSmW5uUWsjjod27+cU/RPkWZnHdZrPrPMmjVrwoQJz58/z8zMzMjIyMzMVE7o6OhUZH2DFwwNDVu3bqqdlwCgJSksLKwxuFdMaGtrV2T3yjleOVH9liNEtHDhQltb2w8//LD6Q7KC3KI2HXU6sl6Zo2BVzBJtGjXxkV/y2uH0tEjG6vi6U24VCoVizJgx48aN69KlS7t27R49epT5wr179+RyeZXEr5yosfimQCKREFFKSsqbFzM3N9fQ0MCt01VCS0tL1SUAqEyjtt9nZWXl5eXVYcW2rI46VT6Ht2Hp6OtUmdPxjbdradWqlZmZmYmJSfWqlHE/IyPj/PnzoaGhyl9rDP3dunWrQ/0AAK9TXFxcPbLfvHnz+fPnyjlMJrNKcO/Vq1flEM9gMOqxHha36pn05ZzSy987z9yTazJ3/2AiovYda/vt5/3793///ferV6/27t174MCBI0aM+OqrrwYOHKhsebl3754y8QuFQuXE8+fPqyT+bt26GRkZNdncDwDQFDR4vs/KykpISEhISDh79mxubq6FhUV+fn4t1jea8/v5+imztLTmK3D5fD6fzx82bFjlmf/++29FA39mZmZsbKxywtjYODs7u3ru19XVrYcSAUAdKSN7dna28md2dnZubm5FmpfL5VVa3M3NzbW0tD777DPlr23btq33klq1apWbm1vr1dq8v/C3xIV17dPYunXr7du3K6eTk5OvXr165cqVsLCw5ORkZdxXmj59ert27ZSLFRUVVbTxZ2Zmnj9/XiQSZWdnl5aWVm/vZ7PZ1VtwAABaoAbJ9w8ePFAG+oSEhPz8/GHDhg0fPtzLy8vCwmLbtm3Xrl1zdHRs5BrT09NzcnL4fP5bLt+6dWsjIyMjI6Mq8yUSSXZ2dkX3nqNHjyqn5XJ5165djYyMDA0NDQ0NKyY6dOhQD9UDQBP27NmziuxeOcpXTPB4PF1dXT09PeVPHo83dOjQijSvkl4ExsbGN27cGDt2bK3XrGu4P3ToUOXwbWFhYWFhMW3aNOWvV18ICwu7e/eurq7uwEoGDBhQZWuFhYUVjf337t07d+5cZmZmu3btbgqkdcUAACAASURBVNy4oWxzqbjUSqniAwM0FEVSkFsw+9sQL1NVV1IHCqL6/A4MQPXqLd/fv39f2U5/7ty5/Pz84cOHV2T6yosNGjQoJibm8ePHnTp1qq9dv429e/caGxvr6emNHz9+woQJ9vb2ddsOh8PhcDi9e/euMv/Ro0e5ubkZGRl3797NyMg4depURkZGRkYGh8OpHPc7deo0cODAHj16vPMTAoDG8/jx44qm98oT//zzz7179549e1Y5vuvq6lpZWSnb45W/1m//mXoxePDgkydPPnr0qEuXLo2zR6FQ+IYTrzLHV/xaOe7fvHnTyMho0KBBli+0atVKS0tLS0ureu7/559/lN+7ZmRk/P3339HR0crpTp06VcT9tm3bDhkyRDndUM+2BWL0FyxexDSu/w1Lz+3a8MB2xaSG+mYm60dXl0uC47sFjX2Jd3Fa+PYL/DlTrZSfPSVpoZs2R4mop93sxR7mnEYuBtTOO11fm5ubG/fCP//8o2ynHzZsmLm5+RvW2rt379mzZ+fMmdOzZ88677pWtmzZcufOnW+++YbD4fz666+//vqrWCweP368g4NDtRHi6plYLFa+u9y9e1ckEj148ODu3bu5ubnK0K/M/RUTHA7+owFUIy8v79SpUx07dqwe4rOzs1ksljKsV4R45URJScmQIUM6d+6s6vLr4sCBA/Hx8TNnzuzTp0+D7qisrGzFihWlpaUbN25kMpl12EJSJTdu3FCmfB0dHRcXl/fee+9ttvDw4cOK66zS09Pv3buXkZHx4MEDw5rovPFSLiVcX9toxAdnzMj0Ov6NZW1XfNtvxorzsiQcvm5jjqQnyzoTunnLzmMZfVfFHRB0IiJJ7EKXHzp/vWYCM+ob/wefR2z9FCNKwTupdb5/8uRJXFzcjz/++ODBgzt37ti8UKWd/s02bNgQGxvbs2fPTp06vc2ZtG7u378vlUr//fdfDQ0NPz+/yp1zUlNTf/3117i4uKKiImWLfmP22iwpKVEmfmVj/90X2rdv37179z59+phUghG+AOpLTk5OVlbWw4cPs7KyKk9kZWW1bdu2e/fulZvhK0+oZe+OXbt2PXv2LCoqysTEpHPnzg1xKk5NTVVe+KStrR0UFFQv2ywuLk5KShIKhampqZcvX87NzX3//feVif/999/v3r3722+qtLQ0oyYymczMzKxr1669evXq1auXiYlJr169qozgiXxflSI12G0De02Ip1bMGp+d5wulcjnTYKit3gPhlSwp6Y1atM7XRodEP3kvOXxLKidmZzPnuX5eQ7lEJD4VtGbbH9efMA2s3P0Xu5mwSfS/BQEHUwvkpDd++1r2BtfdmWw2kzVg3p75HUL9gv4Qy1jMQbN2rbB/482SX5Pv08MX+oWmSYlnKlgQ4D6AIxNu8U0dsdWrUrtkTuKODTtj0oo6mjnN8p1qxSPZtX1+22PSs6XEMRry+deLP+HT4/gty/dfyMyTEs/UftbXX1rxbof67yf3QHfTNlnHVu0smrjMvQ+LiOhuuP+P5LlGYExE1/b5njYO+KrvxR/CZB92jZt1zOrEAUEnIsmxuaOiRpzYJehEkuNzRx4d8eduARr84F28bf+cTZs2FRYWnjlzRigU2traDhkyZPHixdbW1nXb66JFi2bMmBESEiKVShvi0jElJpPZq1evoUOHDhkypMpDZmZmZmZmRHTu3LnDhw+PGDHCxMSkT58+W7Zs0dTUbKB6KmhqapqampqaVu2lmJOTExUVxWQyRSJReHi4SCQSiURdunQxeVX1qwIAoIJYLL5x44ZEIqkc35XTWlpayuvpu3btyufzR44cyX9BLRP86xw5cmTNmjU6OjpbtmyZPn16SEhIUVFRQ5yKNTQ0dHV1HR0dazEm8n9p166dsv+n8te8vDxlu/5PP/00d+7c1q1bjxgx4sMPPxw6dGj//v3fvKk2bdooE3yV+UVFRSdPnmzVqpVIJDp9+vSuXbtEIlHr1q0rx/1PPvmkvp6RunmWn1FsvTzS17IwZon7Ts7qQ8cHyYSrpgQfEdnMNsnPzNCfdmibC7fgSvBXfoHcfdsE/4at2Zhpvfn4NiNp3HczluwwObTYMj/9lva0Q4dcuKQg8WFi23+nbL8XH57xh57PbzttmHXvMc+3X7TbqRMr66ivx8pQq/DZvJybF9P6Vlogfd/CRfGDN2xdws866LvIl3XwJ3dO+sUEhiAixJ5E+33nrwwftEtQnH4hRXvaoeDBsotbvBetM/0zyNZphMJl6a731nA275C4H+jz4guBrrySszviswTGfEo5eSSdE8whnt2X80gRH1exz0fpWe34fA4REYffjZN196GCOE2uWx80J2/K9xKJ5OTJkydOnDhx4kSvXr1sbGyWLVtmY2OjoaHx7jvW0tLy8fF59+28o6FDhw4dOjQ4OPjAgQPR0dHa2tqenp5Tp06t1dcR9UVXV3fGjBlVZmZkZCiD/q1bt44dOyYSiXJycgwMDPr162diYtK7d29l6Ody39iOAaBG/v333+oN8BVzdHR0OnfubGJiogzu5ubmyjTP5/NxM7uEhIQlS5Y8f/7c39+/4uLaL7/8UrVVvQsejzd69OjRo0crf719+/a5c+fOnTsXHBz8zz//KIP+gAEDRo4c+fbb7Nixo6ura5WZubm5t2/fFolEt2/fPnz4MPL9f+P2628gy1QwiZhmZgYFSWKil9+Tcwd5Cvo6xF2VjlIk3TJz3tiLScS1GTdq57fCW4pKXXFezbj6Frb6B3cG7JC4j/vETKcufb2IWLL8hJjYB5JiFic/P09BVfvB3IuPuT9i9j4rPov4XrMHj9wRn+PuRESsjvxOHBrsOKJrVNp9Im0iRkc2j8Nh2DlZrVsuyiJbY7sly+JdZ04h+9WH7F5ulmXlNMx//5m8qW75MeeZ9htrukZBRvI2VN64yGCxFDJZnZ4bQIUa8v2dO3c2b958586dP//8c8yYMaNHj165cmWtvvRsjjw8PDw8PO7fv79///7PPvusV69enp6e7u7uqq6LlP1BR40aVTGnuLg4KipK2bZ06tSpHTt23Lp1i8lkVm7jV+b+Nm2awf2JAV4nISGhVatW1aO8WCyuaINX+uCDDyrm4LCv0YMHD6ZOnXr79u2lS5fOnDlT1eU0FGUTu6enJxFlZWWdP3/+3Llzv/zyy2effTZ06FBl3B86dGgdWql0dHR0dHQq7gim7J8Db8RiMohKlVNE/1Z7/C3vIcmo9LG8l9ue/ZZxEftWe4bbrvvRq1+tI77kjP+ktXlOHnZdX/dhXy6TMVnld21rw+rIkEle6V3F4bSn/FdH22a1Y1FFHmeyWMV5RU+JXo4hwrL61H5lcFR63/x4jtOuql8XERGRtjZP8iCvlIhB9E+eRMtIG4338G5evhEKhUJlU31OTs6YMWNcXV1/++23hus80zR17949MDAwMDDw8OHD+/btW7x4saenp4eHR6NdCvw22rVrN3HixCozs7OzRS8IhcJbt26JRKKePXv269dvwAsYLAKaJolEcrcmyp51yuBuYGAwdOjQihDfqlUrVVfdnOzevdvX11cgEMTGxqq6lsbD5/MnTpyoPFv+888/ynZ9f3//xMREZdDX0NAIDAzErcobl1ySXyAnLqVHxaT1tp7PZpOlwe7I8Ns2bkbSuCN/yAct7c2gpEorsNkd5FkiscJSXyEnhUyuZWIzdQVlOISm5nv106/t7vNSb9DwwHmTzClPduznu5UfktxOfKhlZdrdYiCtjL0osbLmSC7GJrIGru5B9B/XWZRvO3bV+jy3gwcZKz0CQq32uPNfZHSWpcDpqa/fRhl/3MGaR+nWtRrcZX+8UGJny0o8k8ixcn/bwbwBXqPNlStXQkNDo6Ki2rdvP2bMmODg4KFDh6q6KtWbMGHChAkTUlNT58+fHxwc7OTk9OWXX9Z4L/cmQnkV4IgRIyrmlJWVxcTESCSSa9eu7d2799q1a0+fPlUG/f79+ysnmuCwfaDesrKyquf4p0+fGr3w3nvvOTk5KafrpStgC3f37t2FCxfm5+fHxsYOHjxY1eWoTOfOnZ2dnZ2dnYno2bNnynb9kydPbtiwYfTo0cpvqvX1ax0WoQ6yYwLG/logb6tnO3uFgE9EbsvniwMXO+wsZOpbuy+fZ1mlTZ491F1wbMmUT/YzjT2Xjrr+/Y+pMiJWt1E+H9fl72U8WmA8a+7I68Z8RdZNGlHperv00EVex0YcOv6V1byVI2YudRjJZsmk2oI1u80Z9DZf1kjObNmcP27XJD6flnlGT1p5xH7XhBeddBim48bx9u3lB3/yulFxjN2/cZr5tcvYLZTfxn7DnuZ4EwFoWloZGRm5u7sPHTq0zkPCqz25XP7DDz8EBwcPGDBg4cKFzffzT25u7rVK/vrrL2XKNzMz69+/v5mZGe7CC/Xl6dOnlUeIysjIuHTpkkQi6dy5s1E1enp6qq5XPe3evdvb2zswMNDPz0/VtTRRBQUFJ06cUF5p9t577ymDfuWR+F8H4+fUjXC5Q5hZyDaXxrhg7PXjY8okeRJZGw6v06t9dIolEhan/KpWhSwvT8Lh8ViN2Qgmk2TlyHg9eC39OiGoD+80/n1Ls3379rVr13788ceBgYFq0NdFLpcrg35qampKSkpycrKOjo7FCwMHDmy0G99As6ZQKG7fvn3nzh3lT6XMzMyePXsaGxv37NlTOaFQKD7++GOM99pofHx8hELhzp07LS1rPXB4yxQbG3vy5MmDBw8qv812dHR8Q7MX8n3dNI18D6D+kO9rp6ys7Ntvv127du133323YMECVZdTz9LT0yvfR6Zbt27vv/DBBx+gxzMQkUQiuVWNnp5exVBOygncpFm13Nzcnj9/fvDgQVxtXAc3b948evTo0aNH79+/P3bs2LFjx1Yffgf5vulDvoeWDPm+Lm7cuOHr60tEW7dubVKX3tavmzdvJiYmJiYmXrhw4d69e0OGDLF+Aa2wLYRCobhx48aNGzdu3ryp/Hnnzp2+ffv27dvX1NS04ieuUGxSPDw8FArFzz//rOpCmr2MjIzDhw8fPnxYKpW6urpOmjSp4r4lyPdNH/I9tGTI93W3bt26devW7d27t2IYaTUmlUrPnj179uzZhISES5cujRgxYvjw4TY2NhV3mQH1UFhYmJqaeu3atesv8Pn8iis0zMzMqt8MCJqUwMDAy5cvnzhxQtWFqJXk5OSff/45JCRkyJAhnp6eLi4uyPdNH/I9tGTI9+8kJibm888/X7p06bx581RdS+MpLS09derUqVOn/vzzz/v379vZ2Y0cOdLOzk7tb5Kglp4/f3716tUrV64of16/fn3gwIEDBw5UXoNhYWGBQWyakejo6GnTpl29ehUXzzSQkJCQ3bt3SySSc+fOEfJ904Z8Dy0Z8v27unXrlkAgmDRp0tKlS1Vdiwrk5+cff2Hw4MGOjo6Ojo4mJib/vSaoTkFBQeILFy9e7N69++AXBgwYoOrqoO769++/bNmy8ePHq7oQNRcXF6ccYwf5vilDvoeWDPm+HojF4lGjRk2fPr1FteJXFx0dHR4eHh4e/sEHHwgEgvHjx3fq1Om/V4NGIZPJEhISEhIS4uPjz507N3z48GHDhil/ampq/vf60ORt2rTp/Pnz4eHhqi6kRUD/nKZPQ0MDl4pBi4V8Xz9EItGHH3544MABBwcHVdeiekePHg0JCYmJiZkyZcqUKVPef/99VVfUcj18+PCPP/6IiYmJiYnp2bOnvb29nZ1d5fuggdowMDD45ZdfWvJNrBoT8j0ANGUY9aJ+mJiY7N2719vb+8mTJ6quRfU+/fTTI0eOZGRk6OjoODo6TpgwIT4+XtVFtSxPnjzZv3//p59+2r179+PHjzs4OGRmZl66dGnVqlUI92rpl19+6d27N8I9AAAQ8n09cnZ2dnBwCAgIUHUhTYW2tnZAQMCjR4+GDx8+adIkDw+Pv//+W9VFqb/z589/+eWXfD4/IiLC3d1doVAcOXLk888/53A4qi4NGlB4eLirq6uqqwAAgCYB+b4+BQYGbt++/d69e6oupGnx9vYWi8VGRkb9+vVbu3atqstRW4cPH7azs5s8eTKfzxeJRFFRURMmTFB1UdBI/vjjD0dHR1VXAQAATQLyfX3i8XizZ8/et2+fqgtpigICAkQi0fnz5z/55JPbt2+ruhy1Eh4ebm1tvWnTphkzZmRkZCxdulRHR0fVRUHjuXTpkrGxMY/HU3UhAADQJCDf1zNXV9eIiAhVV9FEGRkZRUVF2djY2NranjlzRtXlqIObN28KBIKVK1cuWrTowoULaLBvmVJTU83NzVVdBQAANBVtVF2AurGysnr06NHDhw+7du2q6lqaqCVLlhgaGn766acnTpwYOnSoqstpxn7++Wdvb+8lS5ZgSMQWLj09vWfPnqquAqBpwfiY0JIh39e//v3737hxA/n+DSZOnPj8+fNp06ZdvHgRtyCpm82bNwcFBUVHRw8ZMkTVtYCKPXv27NGjR6quAqBpQbiHlgz9c+pfz54909PTVV1FUzd58mRHR0dfX19VF9Is7dy5c9u2bfHx8Qj3QETp6emffvqpqqsAAICmAu339c/AwCAzM1PVVTQDQUFBPXr0OH/+/IcffqjqWpqT8+fPf/XVV8pLKlVdCzQJ+fn52traqq4CAACaCrTf179u3bplZWWpuopmoFWrVl999dWePXtUXUgz4+/vv23bNjMzM1UXAk0F8j0AAFSGfF//dHV1c3JyVF1F8+Dm5vbbb7+puorm5PTp04WFhdOmTVN1IdCE5Ofnc7lcVVcB0LDkf4Wt+V+qKisoTo89nPim1jtFpZ8AKoV8X/94PF5+fr6qq2geunTpMmnSpL/++kvVhTQbp06d6tSpk6qrgCZEoVC89957yPeg9mSZSWfSCt51K5K02OgUSd3WzYvfuys26zXxPetH1w9mh+eRJNz7A9cf04nyUo7HphXXvdJayboYHn+3kfYFzQLyff3T0tJ6/PixqqtoNh4+fIjLkd/ejRs3vL29VV0FNCEMBuPcuXOqrgKgmbgfs3ln/MMGaGLnu22NWOHEI47TioitbsakuBm+IfRqHT9J1JYs5X/rw9NkjbMzaBZqk+9LS0pKKv9eUvKk5HXLtmRsNlsqlTbmHqv+IZ4UlpQ25v7fCZ/PR/v922vQkc5LCqv8R1f7H696EgD1h6MC6oU4Lthb4GD3iZ2ds2vAMTGROHLZFw6f2NnZuQRdksvj1rgKXBwcHFz9wkVyIsoMX+Lq8ImdnZ2dy5ygmHty5UZklzY4ONjZObgu+V+qvO6lRC2f4eExe0eKgignccdCj7Fjxnp8vS8xj4hI9leo/2zXsU4OY5ceyyOS/BW6cuZYhzFjPTYlSohIGr/ui7EOn3n4/qBc/CVZSti6o2lEdPfo5rCLEiIixc2whR4eM31D/668XFbshrmunzmMnbny2F0ZkSR+k+/6M3lEJLmyw3dDfB5lxa6d6yFwcHBy9VoemlLeWihLj1jpJXBwmOi1/mi6jIgoL3bTXA+Bg8NnHjuuEJHs4k4vj6leK4/i8j8gelO+zzrh+9HHq1PKf8v5zdvawvqj9weO25pcQpRxcLq1mfVHtn2NhvmfLny5UsbP7n0t/C80n3DZENq1a/fs2bOaH3v1Va1BSVrYVOtxe97Qfb8keeu4gVMjyt9PnyR/7zzQ2s667zDfEzlETy6sdh440G7MQNO+7vvTXq5UeMF/mNG4/YU1b1KldHR0Sktb9hFTGzk5OXp6ejU88I6HVuZe9yHWDi7WfS3c9/5NVNP/eJWTADQDOCqgiRCHr9mQOmjlodjo2I0fyzNz5aQoyBSRYHtsbGyE7wdMps3SQ+ERx4+HuD3bv++snBRScQZT+ejaIZlb/PeLFERErEHzfjseG7tHIP/frsjcuhajPWK2/6pVfgJTRvq+hYvie8za+tPWWV1jFvmGZhHJRAkxCqetoQd3fzWcl3fMzzucNWnrwUO7V03qyyGidubT1uw+sMW948lF68680l4uy7l5MS2PiHJvX0zOkRERMYzsF6xa5TffyfDlUinBXutyRq0OORhkm7Xum9As4oxwNL66dsWx2/Fbvo03drLiKfKTE+72XXDgwJ5AJwqfuzA0i0iSsNJrV5HTdwcOLLfL/cFrvVBGiofJsXf7LjpwcP8G935ERH0nLFsVuGyWLb+urwuolZrzfWGUt/X47cl5j8tTasnpVQEZ005eFV4I0tvnH5pDhsP8Qi9fFV6+GtQtbHtUeWosSfT3/q1Qq8W35GhoaDx//rz6/KqvanV39rrbTgu9nVNY8ppFSpO/d7b2PZmd/aL9LONn/9CeQcILV49PSPZff4E6mLptPH31gvDmb+4ZG0NffNDKCfPxT26rWVLaFP80rVujk1gtFBUVVb8jWD0cWgbTQs8KT52+evyLnO0hyUTV/sernQSgicNRAU2H9Krwltl4d1M2ERExqy9QcG7XEk9X14lzDtyUPimo/AU422SC26B/kpKyiYiIwWQSkb6lGV+cKa5rNUyOTg8+n89j3YuPuT9itpcVn8e38po9+G5UvPIYbqfD53B4PI5EGHXRdNosWz6Hw+PrcoiIGB21+TxeDzsnK1a6KItuh851d3Wd6Op7uMZWc03trnx+D74s2t91oqur+9x9N+/GnMnj84oSj0clytic28kpxUS9Zq+eJF3n7n/j09Wz+7CUJbJ5PJ6uqeOiL83TouJzKDk6Xnv8fMdePF4fwXwX7Zjo5PLFOvM4nXgcFhGx2J35/B58HqeuLwuol9ZERE9O+E7wP11IhbG+45aeLiHSsgs6fTbUreIT550LV7U+suYTdbD+yCzt9OUS4hsaahJRSXahph5fk4joyWn/FYVzgqcZtvgh9Vu3bh0QEFB9ftVXlXLCZrp/n1JCWRHebquTS4kM3PeeEQaN0a1YImOr+/T9GSUlaXu/cN+bSdTGYk7Y1VNLrTuVv8glyWfTBtpaaxIZ2n6keVmYRlqGPbWIqCTvcYmenm4bIqKMPXOOWAStGtZEbxMbFxcXFxen6iqaDT8/Pw0NjSoz6+HQIqI2JYVZF05ceDbwfUMiqvo/Xv0kAE2GjY1N9Zk4KqDpkCvkxKgh1pdTCHd9d15/3p5Dvxxa+lG1kV4ZpFl1FpvNquswNW0qrSiXyZgslvL9tA2rI0MmefVyWFmxjFgsVk2bYbVjkYyou/38ZYGBywNn2/LesE/e8FmBywMDl8136lYkk7FYLKJSIoaF2yKBKYOIiEqJxSJpvrRqD3oWs2MbmaRYJi+WabZnvdi1puxpo/YBhuaoNRFRhzG+niWrR1uP+Y7mLPhIk4g0NTUrx/TCx4VsrU5ERJpaHSq6YxZeWOm+l7/K31aTqCR5/feFM1ZVeqdo0b799tsa5lZ5VUnXbalD2tyPrMfvNVyw0KINURtNzVdPY4Yz/C1Oun9kO+3EMH93AyIizQ6VHy98/ERTs4MmEVEHrbZPsguVDfaZYdMWJ7uvn2ZIRDlh/lEDV821qHZ+bCpsbGxqTCdQo4Y7tOhOhP/MhdtvGw7pVbFopf/xmk8C0CTU/AkZRwU0Gdx+g/RSIiPvyYmkUmm1nvMKKlGw2J3Yr859UvBISkTimMjzHczMauqWWBdt2WxpXq7yvbK7xUBKjL0oISLJxdhE1sAhPV5ZlmdmwUv5Izavhs2UY/GM+5ia9jE15lX/FNCRw8p7qBxLrxPftI+paR9jHruvRV8qamcl+Nzd/XN390kjjFkk+3uH3zHjVb9uHZHot16ovCBXLpcREUmSEpIZ71l0ZZkO6pseG5OlIFJkxZxJ7zvYvEod7HaUl/2GQqHFKT/9a7YhohJqo1nzDW07aGqWFD4jIiopfPJMs4MmUUnyJvc5aW5HQlx0iajwRNDPj6kkyP9CWnJO9t6fky08m26gbELaaBJRCb3mZSciIs02VFJKmlXfiJW0OnV4cWXbk8JnHTpptSHKiZg+aa/h+iMLzTWJKHlPUHKpdWiAf05iRkaHvSdG+41B37yWoA6HVk+3bSfcSs74Wi/Y+1HMHMMq/+M1nASgucFRAapi7O4/JWDV3LH7FSw2Q8oc9+qjTMupM6MD5jqEE4vFYPazYBIRKaTCLVPsAqVMnUHuyzzNGFQ/TdY97AWmkxaNGduVxRfs3jpv5YiZSx1GslkyqbZgzW5zBr0y4E0/z9WTFvm6jNzbhS3v7BTsV5sdMUydXNhTZoy8oM0ynnUwyEHZb4bj+PXXifM9Rh7V7sqU04jVh3zYoSuP9Vxw0EqXY/GNlcvynSnh9qTICvUeGaPFkhQy7QN3W7GIJiybd8Vn0sdhHJLQoHnB43hEDyvvzcJFIJ0/yeEYh2m17LclVu/8MkGz14aI6MmJVTs0/aKu9r3sO2fNaev1H1U9SfexHpi993TOwmlaycLUgdZrNEsS/edEWe898aI3Tgdr/4PfF5YSPW+b/OtNi/cNcZp/CzlhK0JNN53+UTti+uLvk8P8LKq96WbsWXXabq9wH+2d6r99WOgcgyqPa1q8b7jq7IWS0UMKLwgLLRYaUk6YzypafGKVbXlvHNPP9+61LSGijPyIm9pDTJtoJx2oX3U/tDTZmiQtLCEqSVz1yv94tZNAIz4dqBc4KkCFmAafrQ2ZQCQXh/vOuM7XJwbT55eQl4+6rAhxqbS4goih57QmxNP45Ty248ZYR+UkV7A9oq6V8AU7zjrl5cna8zjtiPiLD52al5cn4fB4LAYREcdl16WXlXDMvXb96SHJeyxTLnDoVPkDpl8dr3JrxrycXFYnNhGNWHd2hHIZr0OXJuXlKTi8TpVa9/mOq8IdZY/zJFQ+f2roceUjrMHLjp8gUqTEMIw89xxy15KwOpdXRSxjwebjTpXWIjJffOJlCZzBi4/HzcqTEAcd8IGIyvN9rPnr6AAAIABJREFUhzFBEWOIiEYHHRld01KaY/wWh44b/XGEZnaJy95VfErbfSItg+bYHieitnarTq34yNRKl4ioNFurw+OBfZAi34au2+4jRETk9uPhmpcwnBsaSkREc8JCa17A02+My7SPxuiV5On6//qR5pOwI7HZaQ9crNcTtTGc9nPoNAOLIQZERFpntDrp9jXsUONmQM3U+tAqifL+aFOGXoeSjNvPPtoYYUqUfKLK/3jVkwA0NzgqQHWeCYOnrz5TSERsw48XLbV5fV/8RsHiVepPw2DxdN/Ue55YHN6b+x4rUra4zQ19ZDxrp0XVhzi8GjfN6lTz/Ff3WnWR/1irhjWg5WpVVlb2tssW5uSUaulqo4Xmv7VqVZsX9l2VFGYVaurqajbPK5uXL19ORIGBgaoupHlowEOrMCejkPT4bzyQcBJoknBUND6JREJEKSlvGHyUiMjc3FxDQ6O4uLHuYgqVVB9qrAHJZFTzhbh1oiBi1NvGoGWqTSTU0sXVs02SphYffxl4Z1q6hv/5boiTQEuDowLgbdRjuCeEe6gHGHocAAAAAEB9IN8DAAAAAKgP5HsAAAAAAPXRPC/JBDVy//59VZcAAADq5unTp+3bt1d1FQCqgXzfIHr37q3qEpqN7t27q7qE5gSHFlSHowKguufPnxcWFqq6CgDVQP+cBnHr1i1VlwDqCYcWVIejAgAAKkO+BwAAAABQH8j3AAAAAADqA/keAAAAAEB9IN8DAAAAAKgP5HsAAAAAAPWBfA8AAAAAoD6Q7xuEubm5qksA9YRDC6rDUQEAAJXh/lYNIiUlRdUlNBtPnz5VdQnNCQ4tqA5HBQAAVIZ8DyqG+4cDAAAA1CP0zwEAAAAAUB/I9wAAAAAA6gP5HgAAAABAfSDf16fMzMz/nANQL3BoQXU4KgAAgJDv693y5cuJyMDAgIg8PT1VXA2oERxaUB2OCgAAqA75vj4ZGBhkZmYaGhpmZmba2trSi/ddgHeHQwuqw1EBAADVtSorK1N1DWpF+XarnM7IyMDb7X9SNkAGBgaqupCmDocWVIejQlUkEgm9xZ0HzM3NNTQ0iouLG6UoAIByaL+vZwYGBh4eHkTk4eGB91qoRzi0oDocFQAAUB3ub/UmSUlJQqHw8ePHenp6b7+Wqampnp6eqanp7t27336t7Ozszp07DxkyxNLSsvaVQjODQwuqw1HRND169Oinn356+vRp165dK2a6urqqsCQAgDdD/5yapaenf/PNNw8fPszNzX3+/LlcLm/oPTKZTA0NDR0dHT6fv2bNGmNj44beYxMREBBARCtWrFB1IY3k7t27S5YsycrKysvLa+RDq2vXrt99913LObSaERwVTda333575swZiURSWFgok8kq5v/999/UOP1zCoT7d8QVmLv5OhqIjgaF39K3n+lmqUVE0qSftkYW9Js6z1lfFBl8WJgvlcvbdNDu0d/Gydmaz6SsmOADSSwbL6+hXCJ56sGgX9NNJi/snfT9r38rKu+AYznZ11lHFLlvX0xagYzBNRzwicc0G31G3UsGAJVD+30Nrl275ufnd/369QcPHjTyru/cudOtW7d58+atXr16wIABjbx3lWAwWtDbyPXr15cuXYpDCyrDUdFk/fDDD9HR0deuXSspKVFZEc8yhScjxQx7X0eD/JQ/Ik9Kz2Qx92wSGDDkmRciYzLpk3nO3PS48JNCdg8zg3aZqWdjIiPifPdvE+Rf/+NYJFffzWsol0iWcSU65pLko/na4rS//pKRLFdcIGdy+dosBrertCByx5w158jgg0EGJD5/KcNtpo3Kni8A1IfWJ0+eVHUNTc6ePXtu3rzZ+O+1Sg8ePLh58+aPP/6okr1Dg9q9ezcOLagCR0XTdPLkydDQ0KSkJFWG+5pILwUv+SG12lc8zEHTt+/ZH3FoniVJk+IuFdS8clsb39CIiPA9XuZEbW0W/RIR8cseL7Nbf/0lJf4ov6C1a4NDjv/oadCwzwAAGlzrK1euqLqGpiU7O/vy5cuqeq9VevDgweXLl7Ozs1VYA9S7nJwclR9a9+/fx6HVpOCoaLKuXLny8OFDhULx34s2JoaJ/WgD8aHAoHMFNfTikopTM/OJmMy2zNpss7flIDZlhc+Z6L3mp7hMab0VCwCq0jo1NVXVNTQtDx8+LCsrKy0t/Y/ltIdM+8rFtGH6N5WWlpaVlT18+LBBtg4qkpWVpfJDS6FQ4NBqUnBUNFmpqanKQTCbGE7/GSt8BkkjvwsWPq48Xx63bLiVncuSiEy2qbvnUHZttsm1D/xp42z73nQ9cscSV/eAmNz6rRkAGltrXF9bRVlZWatWrarPN3TyC40RXk2+Koz8cY6VJvEd5iwYY9iQ1y/gT6NmcGhBdTgqmqyysrLnz59XnatlMS34iPDyVVVU9ALLQLBkrrUiKeneK7MN7Lx8Fixdu/1QxI9eZmwiFpPFoCdSZWu8XP5ETqz/s3f3ATHffwDA31zd91Cn1Pn1cKMHTxemloc8FVaYq5Gyxc2kkIwyko1IxCaZVUi2wrZilozKQ5EerGLRGXOG68KVm+vJXdp9r47fH5HoQc931fv1l773uc/3fe/euvd97vtAbXBVn2owYdHWH04m/sAZAk+STqc2cHgPQqiTwPNrm8boix9/chdtXuKe8Z/+SDMokwFN2SGhrgFLC9WFVaGiaNN2nAgeGef+ZeChxFRlBmLwsY9n6qJvMmsdSkM1suYssKvVwBuNHqEVk3Q2NPg9hyFFqTE3gWo5emj9lzOQph6MEOi+b9wXBAXlAFRq7+Yc3oMQUj14f6smoRmzjKtyU46m5HKzzvwceeYOAABosYMzbt/mXU8Mmlu9rqY1b2/mzby8e7cvHnRn0QCYc4NPZ17n5eXdzDyxYy6r+h1aw8J9f+J1Xt7tlB+/GI1v2t0dlhaqC6tCVdFYg/QfZ0XHXc5VdiRgYO+1eESjXXivCd4bF4yk3IwN2rEjKlNq8rGf78c69Y6U5/+TdT4iaPNXGzdHpBYZTPFaPr1Zh/cghFQO9vdNIvszOiZ/WnBGYrDnNOOat8jy6/uWstnzg8scI4PnawFA2aXd7nYffGDnJ5gcHDhPC3Q/mDbm8T7HDz9cuvexXeSJHdNoQJsVdGIj84z71AnuycZ+P/hZ4Dco3RuWFqoLq0JVlZ35+QzN80xm1MaO2ydzwQ9/ZCd+PRoAJvgnZ/+x17m6SacYLfgxPfvchgkU0LT/Ljs7/Vu7t9t9nQlePySmJ8fFxSWmJ/+8YTrz9SMfh2ZnX9o6pXo5nzrS41By+rnEuNi4xOT0uG+dh+DyPUKdHPb3TVOe5Tf5A6dQnvGy6Nv3Ejda0QAAqh4L7ggE3LjojFLWSBYAQJUWy/GLL9xYUAb6TG0AgKrSx/kCQXaMz44z2g5zJ2hNmPWRtkikNWux+7ReZf8NGz9BS5kvCykflhaqC6tCVQl+dvpg6tpEmKbsQJqOqmlgYKDz7oadqqVjwDTQwYV7hLoE7O+bTCZICfdxsmBxkll+mzm13yVl5TJQA5ra+MBLZwKn9pKJZaAG8NYBs2Wl/6nRaECj0WT/VQGoAVRej9waeV21LqyMlAFLC9WFVaGqynJitrt9qOwoEEKoMfhlbdNo6OnRRKIiAJA9LpKBEa2e41hpZhaDBJGOfrvzaXPH+JhVb1TrRVMDAJqFwzRt3r7rZdeNb9I+KE/ZvS0L32cRAJYWqg9WharSYurJhCJMJkJIxWF/3yS08T4pMe40kUBG09fXEOz+PFoEPm8PKk+M/m3tvqzbnHyZ1iAt0U0ZAIDW3EheXqCslz5k+s2LFIEsco3fhKNxAsfSxzIaXFo5wesMvlV0Z1haqC6sClVlzNmfGTjmv8fY4SOEVBv2900iS/Yxey/QeJCxFpQJ7gjKqgDA74P3Xj6Y4mVWvXgW42YWp6Wnr9vrP5FAVA5gDlAU7W7hl6sBj/NfviHIbu3jjIzUYuprQ+ljYRm+S3RzWFqoLqwKVSXY97F+JJPF0qOlXkxVdjAIIdQg7O+bTFYmuPXua6LJykSCsje3lIsERXVGCd8ahboxLC1UF1aFqpIJeblCZQeBEEKNUqv31ondWVsmhOs3YTBN1qIVs4buaok6LywtVBdWhcrq0aOHurq6sqNACKGW6Dly5Ehlx6BaDA0Ne/TooabWNt9syFr0ZqumptajRw9DQ8M2iQGpCCaTqfTSolAoPXv2xNJSHVgVKmvkyJF9+/ZVdhQIIdQSPS0tLZUdg2rR19cfM2bMe++9p8QY3nvvvTFjxujr6ysxBtTm9PT0lF5aAwYMsLS0xNJSHVgVKmv06NEGBgYUCkXZgSCEULOpffTRR8qOQeUsWbJEIBBUVVU9evSo4/f+3nvvmZmZLV26tON3jdrb0qVL8/LylFtay5Yt6/hdo0ZgVaimmTNn5uXlKRSKGzdutOyLEYQQUpYeL168UHYMqojP52/YsEEoFD558qSyslIul7f3HqlUqrq6ev/+/Q0NDb/55htTU9P23qOKcHR0BICTJ08qO5AOkpeX99VXXxUUFGBpoRpYFSpry5YtKSkpUqm0rKyMJMma7Xfu3AEALpfb+NPNzc3V1dUrKiraN0qEEHoT9veNycnJycrKKikpae4311u2bNmyZUuznvL48WNtbe0JEyaMHj26WU/s7AICAgDA399f2YF0qD///DM7O7vDSqtfv37jx4/vbqXV6WBVqKZ///338OHDFRUVtU9RcHFxAezvEUKqCvv7djFhwoTMzExlR9E5dM/+vsWwtFBdWBUdTyKRAPb3CCFV1VPZAXRNWVlZyg4BdU1YWqgurAqEEEK1YX+PEEIIIYRQ14H9fbuwtrZWdgioa8LSQnVhVSCEEKoN+/t2kZ6eruwQUNeEpYXqwqpACCFUG/b3CCGEEEIIdR3Y3yOEEEIIIdR1YH+PEEIIIYRQ14H9PUIIIYQQQl0H9vcIIYQQQgh1HdjfIyVLTU1NTU1VdhQIIYQQQl2EmrIDQN3dlClTlB0CQgghhFDXgf09QgghhLoadXX1Pn36KDsKhJQDj89pS/n5+e/cglCbwNJCdWFVIFQDm3vUnWF/38YCAgLg1e3iFy9erOxwUNeBpYXqwqpACCFUF/b3bcnIyCg/P9/Y2Dg9PX3q1KnVW5QdFOoisLRQXVgVCCGE6urx4sULZcfQpVS/3Vb/WyAQ4NvtO1UvQPr7+ys7EFWHpYXqwqpQFolEAgBcLrfxYebm5urq6hUVFR0SFHqDlpaWskNASGlw/b6NGRkZubq6AoCrqyu+16I2hKWF6sKqQJ1LYdyauRuTpMoOo32RwuzEZF6pssNA3ZvawYMHG3k4JyeHwWDY29uPHz++w2JqV0lJSe19ChqLxdLX12exWI3ntvWMjIymT5/errtATYelherCqlBNWVlZCQkJRUVFlpaWLZvBxcWlbUPqJgxslm8Yqa/Z8gnkN49szmRt9RhLbbug2hh5YafH2mTmipOJXqyWT/IgO/2alGlrx6K3YWioG+lhaTuxscf/q4IXL3T76Qw2GRQWEtpRUbWL8+fP7wwKqurxvEJBglrn/+Ki6nkfNYLyouf6db4zZsxQdjQt1wWOz0lKSvp2504sLVQbVoXK8vL2+od/v7i0GHr0gF4tvEh0yokzgMfnKIE86St2yoeJ39q9u79X2vE5El5C3HVNW44Ns+Vz8HbZOh7W9T1/zK0Vk6DuTA20ezX2uDYAQBFUSv6+sWix64H94b16NTpeVZ0/fz50/14JUQUa1C5z1f9nACAlQ/fvBYDO/o7beSUlJYXsD5NQK0GT6GKlFYKl1VJYFapJJpN5eC6/LxSQ2lTojeuiSiBNWDM36+Pk7VNuRq30P/GPVE5C3/edfb/xGKspzwxdsiv+cTlAL31LJ6+Ni0ZrKjJ3OMYM+XGvc3+QJ3/Fvjgt8dspAGTmbkd2GDF0/nffzTdqdgSlaSEBh7LyxVJgsKYvX7/CigHA+9FjwymhnMKw+nyT71xTomaMGsPCzpqRl576z9O+Qx1Wb3QzpwOQwuTQnZF/5JH9rdy/8rU3IYSJQd8LrTd7WL0sqcKs82eT+hs62CiS/DZH5wqLJArC0Nxptb+nlTaZHer1fTK/oIwkdMymL/X1ZpsSZFqQWzjf+qsID1barkXhPJuvfvAkAEDBO7TSMb7PByv2brLTbsvfAuoOmrqqJO9H5T8QHD16tF2jaT87dwX9K38KGqr7jV4LaRL/Vkp2BgUpO47u69ugnf+ST0GTUHYgbU2TeIKl1VJYFaopJiaG/0BAane5N4JOSPpIYOB+LDk5/ZiHxumQmLsKIEvyH5ttTExOTj7gRj/jvz25oaP0iQlrTyaejmtJcw8AEn4WV9f9wNGjIZy+Z9ftvEQCgMnswCOxcUe/HcffFRQvfj3m8BZrYWTMI+tNB38ItpMcCviFD0ByQz12imZs/+lo8FThzq+jhQBQRZJkrV08K+De4PKLSCjipV3hkaYOzta6eedC/PZzASRCbhq3QHPcXAcW5Eb5egRdIYGU8m9xcx88BQDpQx73VsHTlxPRDYd+8IGZCaPL/SFBHaAZ3xrL1F+cTohvv1Daz/nz5xU9nnfB99pqGtSqHs/Pnz+v7Di6o6SkpCro0qXVE0ur2bAqVFZ8YoJMHS8Zp1p0xkwY+kRQ+OpHKgAYTHF1MLh55WZ77ZLSV5NBpw+0c7Ai+HeFAACKgiunYmOz5UTvJwVlr8cwRky3HkzStE0ZDFOHqWYF/DxQ5CVdEjMZT7MT47NJTfq9XG4FMGdvCvOyauj7IObUxd7+qx0YILz7t7h6E/0DZy/fsJDl5iBMOpfbcJyG01dt2rSRY967TV8+6h6ac1RoX6LgkbDdImlHDx48eFZFvntcExiPdZ49XKNNpmpDFQrywYMHyo6ihcrKysrKypQdRQvl5+dXKLp0aVV14tJSFqwKlSV8+Aj6tsXnLk2zNpgEVetNq/f7FLkCAKhUIOXy9toz0ZsAEkAU6+Wy8XQR0Z8OoABQvDGEUAOoqv5X9UNPSZIgCIAqAIrFgnXOLEpTdkUjer+c5zU6vS8AWUGCGlDhrf0CqHXRBQLUUZp3YGgnvlh+/ee3aQydylk40cyYTiVLH15OOhR1TfT2EIremA/NyKyUv6QAQB06kTObf/nU3+XtHnCzdOaz9zr9JYqxtFBdWBUqqaG3MCpj7CdzZ08z6kdAecHdlMijZ/+RvT1G03TaRzr3j199CACgM66dI+2mSGlRoRwMyJvxFwtHOg0FCmn0Xn5qRqHzfINXQ6gafamF9/PldkNADtQ2OdLqAY+vax+xwtkU+I9iUt/xaYJiZmEGf/e2cl5o+roBF/G4UkPzwU0+o6OCf+VcsjAnOltBWJibAVHQXxskObHRl+QFT14uDWhqa4IiL/10silLkznOyhSX8FEzdZETv1qEOuazXUFjS36LPXT8MegyB+k+r+9NtOeg2c6z8/6ufrtFqCmwtFBdWBWqSu+jQP9lhtxDkUfuF/XSG/YeQGU9o3TN57kzo3+/+rDdVpMRAD9mjWNEsULTaPq6gI90AODjlYtztiyyPQgEoanPdgaA0c6Ldb/+gh1H1fk44Niq0W2w05EOTsQq11lpptok/yEseMd6PN1+/frs1a62p3QNqXKw2X7sS3Nu5KpFtxcnRnOaeqkbSXa4bzYoCKad76a5DKAwFn9pnx2QsM0zGQCIgfS+AEy2u/Nxn9jQVdkUU89fE71HtPZVou6mG/f3eh+7fUg9tSNg/z05AMC9l5c50xy/KsR5rK66vODv2KCoswIAoAyYszJkUmX55ZhvCgGMp/pHT6VC2d9nYw7+cq8cQGMMe5mbzXBDKLmcEPZ9+kO51qQVS+ZNNOynWZm7f8d35zrrsSeopbC0UF1YFSqKOnH2gjH5B50jzhYDANz+q/pwaMqgz3zWzjbUgGf3zx4Ni/qrBAA0hy8+sHUe+fCU3wVlRtypadp/l2wPADDBP3FC9Sbq9G+TpwOAFIAw9zq2fUrt8VTWgm9/XfDGFIOdv4t1bnkEA92OXXz5T9aXiScBAMAzOsOtVCypIuj96AQFAGrGMDhHMqr/RcyOuDobAACY9oGx9mSpWAJ0hjYBAOZfxWWQ9Ner95abMu5sAgCAmn8wfM/f8QUAEAMA6HEOn/Y2A4JOf/kdAHN2cKL9ZrGYJOgMevVSPcMu8MwNX7GYJBgMvNQTar7u299ThwwfVnY1PP/ttRjprd+28Q4VqZuv3rDWc3yGbxaAQnQ5NuxUsbxIJB8DUPjXIb8Eke60tUFL5/359aEiq7WBttI93637S3ferpWr5vDWHe8zeOKQkuObAy4DVY7vtd0PlhaqC6tCRVEGjDLt99f5jOK3tisenj0ScKqs3GC6f+ii2X/4HpIDkPxTO3+9DTJRMW2gUoJF7YfQZjBaPJ5Cpzf3+Jk+dPpbXxRQ6Ay9t0fRmxcUQq913/4e1KlEpZx8/vInHes1gaZXvQ9dlj/XMB4/bao2QAXo6lSf2SYvEt3/pwCqz+wnix8KxCJBSoZgpsUwdSo51hyKzxmMnmkAUCQf9P4g6vHHAAppUXFJcX1f8qKuD0sL1YVVoZp6UqnqclL26nOXupnbmnnyXwN+yQdCx+yjiQPpveSkpp5hTxAAyGUlgvz7cgAwUmLEXVbN0n6XxnD+4Y4zADTprFyEWq6znibVBkSiIp0Bxq8+cxNaA0fpaQLFbIX/ds/hhFwih55Q/f5avwo5SQEAIKhUshKgJ0BP6Y2k6BQhHpzZ7WFpobqwKlRTpaiwmGo8YMCrnzUMjQYY0Kg61l8fWvmRwfPiIvzUhNoWBZt71BG67/q9/M7Zc8W73BdM+ivqcmHNX3D1gUP1RKd2HfpFTJ1mOs8YAKCcrOynq0OFgnrfR+V/3X+oOaL40ulYQc0kRu0ePFJlWFqoLqwKVVVyMf2q+2cebjkBUfden/FsaDyA/Cvi+5NX5XoaMz8cCAASWTlVq58mwNtH8iCEkOrpvv09KO7/GBKms2JV9D6PojI5oaNH3iJBxj2b5bx+e+RHYrmmnkbRQzlU3j53uTzoy8gxUhCc8kmpM43gXNjPg76O2je7SConnl32Czj0lxJeDFIlWFqoLqwKVVWcvttP52vfr37+VCKSQD/DPiW/VMrvZF4sWPN1bJBISulnoLhCAhRfS/nTfllM6GxSenXbIfzaBCGk0npYzrNtxvC80pycnHYLpr0cPHjw4G8/gXaveh+l6gww1gRpsahQ+vJPNrVPP11NKlkmKn51CWSqlp4upbywuMGrUFM19XT7yIvEJXJFQ0PaU+l/y+Z9vmzZMmXsu7UCAgIAwN/fX9mBtASWFqoLq0JljR49Gky0639MXcNAT494XlIkKpFWZ5VC1enXj3heXlRcLn81RkdHQ14sklZCyo8nAIDL5Ta+R3Nzc3V19YqKirZ7EaipOv3NVRBqhW68fv+KvPjhP29+4Sp/VlL47M0tZaJCaIxcKirE61WjN2FpobqwKlRUZXnho/tvbFHIi8Wit8YUi1TsXmOoUeXPnr17EEJdEfb3CCGEEOqCNPr0UXYICClHN75+DkIIIYQQQl0O9vcIIYQQQgh1HdjfI4QQQggh1HWoQel/yo6hQ/xXBdB1X+l/VcqOoBvD0kJ1YVWorG7ylodUDykRSxR0hjah7EBQt6C2bN7nTR998ODB9gulXVkOH2VpaansKNrLtWvXlB1C94WlherCqlBZzXrLQ12QhJecSY6baU7vyJ0q+NErXYPSJQSdgCHL4464Fe1iLzrMJ4FgeRw76cVq5nSk8Fq21MSGVftarxIh9xpPXKVpMtrK9NV2yb3sK3elVCbLahTz5acKRbLPmFUJJIAa0y3qgu+rv1KSPO5tqa7ZKOY70iLmcx/KdUewmI1/SCHFvFsF1AHmpoxXW0r52Tl5UrrJuHGmr3YhEV65wiulMoZYmJu8sVvxHS7/maaZpSkdQBLnMXlTGglADPY+dsKThbf+bTK1Zl3DuBP395aWnfFqzU3UeX8vXQCWFqoLq0JltdXvRSKRtMk8qKM9TPr+AATbmdM7slO8deJQusR6R0bY7JeNLHNN3NUlyT5TfPJacAsLMvvAaj8iOGPTuFdb7kW5fh6UrWAwqsRiYHkeOuY9CngRixbt4QGDTpaI6XbBcd/bVzfbpALMv0w8spBJVPfopJh34UBAQDS3N+fwxU1WDaVFQQpvxR/w2xabZ+L9+0nPwQ1GJ3mQHRvsF5QstPLPODyfUR2eCyeIRzAIiZiYEHg03JmpEMaunO+XTjL6gVgMVhuPHl5o+vLF3Qhx/TycD3bBV8PsCaDPDr06k8z+lu2RizeVax48/h4hhBBC3UZhfMBSV1fPcK4CQJQWvtbV0cHRdUM0TwIAwD/u58FxZDuwXZZti71HAgCZl7BtmSN7FttlQ6wQAIDkx23zcGazP/UIOsUnAQDEyXtWuTqz2XNcw69J0kJ9gpLFNXvjhrqw18QKFeSV0PlsB0eXPdkAABSCoL+5Bq4Qp4WucpwxefIUtuvmWF71LdFKs8NXO9pOnjzZwSPokhgAQBjr4+wXLxbHb2Cz5zgGZQIACG8XEKM9Iy5kZJwPtAFefAIPFLzUdKnh/NALGRnHlpiKk2PTSmvtS40gXnb34mg320UHnjAHv2Phnhc6l708WmZq8o7cZvqxP16X1NvU9PXnBDLtxwNcbc6RCxkXgu0h/UDMDYAHackP6PY7EjMy4nwtJdknkvjVYyuyg76Opg42fT0hhSB60wlctm8+7O8RQggh1G3o2nj6BQZudGZR+FFrt3HN1x+JDnNWHPKJ4AKA8FrSU+sdR3867D9BGOIXxQcyPXQrf2xwXHxcmNd0JoAkfZtHxFOHbw4fDrB7st8jKJMERUFucp7ZusNHD+3ijCCBJKHWqrzJXP/tS8bRKfT/+VElAAAgAElEQVRxS7YHf7PDf55ZvUEJj6/ziuAN9zp4eOd0WYLfqlAuAPBOHEjgmy6POuxtlBsVcCBbAaBv47nQ/OVsgTsWjAAAYM7eFBG23PRecuzR5NsKps1kE6CYe0YnnvS3oZdmx6cJwcRieP0NPMM5PONqfJjDwHfkjOVx9Orlk9un9n/H2QOWvolXM46tGqdZ65Xd/kdCDGERt9Ly+puZgvD2bTGYcCLOJAbPZpC34pPuAdPCggkAIEnb5ZfE9PWfzWhoetR02N8jhBBCqNug0vsPZDKZDOJBWhJPkwnX4xPTnvbWFXOvCwEACBpdl67NYM22MxXw8xQEc4gh/9yh2D/EdD06AOSeS9Odt9p+MIMxzHn1XN2kc7nVk2r2Y9C1GXSCYbMuzHfm6w6VzmSZm/QngOhvYs4awWLVf4i7hJueSzKnO89kmY5zms4CYU4WH4C1YJPvQjPx2QRuCUDpk6IqAArDdGB/eDVbrcnEaREbt0VkExOc7Fgvt5IPEvw4HtFS68CQ5Q0duU7Qm3YmQhMX0Qn6W19LgEIulQNQJKlBXuF/AgDInpEvI74U5OIWUjDCN2ydFQEgSd667RJr/XZnwyYFhN6hG/f3t2J2/yZo+OGymM85kSIQnNwdw5W1zR7LeCkn4+IuCdpoOqSqOr60QCa4FBeXzCtro+lQ21NCVQCATHRHhH9wGiVKCduX1cj/nGw/9oYsKMuK3HNG1HFRoXajBq8X1+UkSRCgAKgCMHHwXjj+jVa3N00TSABgrTh2+AsTXsR820/DuSQpryBpfV72sERvGvlM2iZxyRQkUIAAACAINYAqABDHrp7rEZELA81MGO9YNycrGJyDV29cDrXJC/HYmiABgLxor898krTdIo6FOZso76I9FE1dTSAr6JyYGxHT4CkQffvRAUCc6DN/ZTSww47+4MbqDaDgHQpNEPeWnt+6yu/EbSC5kevDuaTSou4CunF/r6FnrN+r4YdlpWVlAKCtb6ynRWuL/QkiNwSmiEp54U7TvuG1xYRIVXV0aQFvF3vJUcHjsyudNmRhM6eiOrwqoCg30muaxcLIRj5VIACatpFxYyn/T1Yq+w9oWvpG+m30i0FK1UtTUyp+Un2J1wFmwwkpMdKZs5DDWcjhzGQ1sJRNmE51C/wpzKEsIekOwbI04ycnCRUACmHSJb7ZOPO3RkvuZfPE9c7zFs2+dJCIhGISAOgWI0yh4HpqHgniK9k8YLxvxlTweXdJwtSGY8ckxRKoksuq+10aQQD5pEgCACQJAGTahrGjpniEXxMKC4RPKgBIGSh4IWuC0sBmuYcN7SGXe40nrKg/CLJUKBQKxRUAiqdFQqG4tHofkuwIP7/QWqcRVIiFQqGwlCRBLn0sFIpenl8uPBfitzkqu+Zsc4VELBQK/5WSADJJkVAoJhXM8VamcDM5/p6Qey4pT401zpIOD6LXbUoQm7utsGcUcbncG0IJRddmoa/3XGuLURZm79GBQjcdMUhTrSlpRPXrxv19GS/zXinIcve5ObFnsdkL/OLyAQBkd2J8FrDZH38Rdw8AoPReJq8IAMpyw1c6zfqQ7RUjABAlb1/yCZv9ud8ZIYCMF+nlxP7YackPjXftxu77owM93Td6TZPxeNiEdWUdXFpVWZHH9b74bu0XQRtZyZEpWFuqqaP/4IAgI0Vm5zQBe9J3efxn7uMqEJ3043zMZs9yWhmeWwYAUJayZ4nTLLbTnur/UmW8PwUyAMg/4/c5+8NZTkoNGbXCwOnOrLR1sxwdZ62KFtt4b7W57mtrO8fF0YHtd6ne5WJJwlo2+1NXFyefJD376cOA+ckmb3rM/A/Z7A/nx2h7b3J662BxfvQ6D59fmrCKR7HgrLCjX/KZPGqU61Gx6ZLtvhOKDswZO2rKRu5gt8BVNgTFwm42C9L9xk7ZUDTTzZxIC1odLQSAYQ4LxhHpa8cOGz5qbigXgLBZsdl5wO0Qjq2t87b03na+axzo4uu590gQpwUtc3HhuLh8viomr94gJPEb2La2tn7nJCBO8JlhaxuYDgBQcSU2Mv7v3iY1r00Ys8rW1pb9bTYo+FHLbG05B7gKABBnH4+KF9FrrnkJvAPzZ9jacsJ5CuDucbSdsSzqAbDcN3kOub1tjq1LRJG1jz9nIJC3snMrgLwWvorj4sJxcVl6IFfBMP/EzW2Jm9sStwVjmaBmMmWhnSmeVtsK3fjDkZiXyfsAqgQp9z4IvrRRP37JtB1nZh003r0iUjsoMXEYb/vHfgBQxsvM1QLZWb8vMiZEn95rXCUDUaTT1lKf+ERW1pJZW89Mm3dmX5V75ulZtJpbvnD3Ldma8vJUdaZTcOgC45qdVgki9+dOWxqI77ldWQeXlkwgAGMODUDNmKUtEIgAjJTyslGjOvwPjrHj2i9kcZlB+G1h40p52de1ZKD9Z0qvxSnRHwm2z1oZaXfR/aaPT+6sM6fn0n7jTPsToEqQmS2YBYLoFbth84mLVlp4fcxOi+kcnuEgFpN9GPTeAEzfYxe9JWIJ2YfB6A0AYLMzw6Z6IGEfdtUeAGB3orVYTKrV3JrK1Pn7RIdSsQRqtpj7njn5an5Tz5gMDvHmNwHjNmX8valOJARrflji/JofGW7hF9wqJJKqmmPYCasvT151l5B96HQKwILlJBAEABAst6gMN5Ik4dVVcJj2gbH2m94IiXP4b04TskF3Dr/hXHfzzWwuYb3e6fV1bJhLjt1ZUmdYBTf7FsM5zOH1R5wRvhf+9q0zzsr7SMZyiYSseWXssBvsBmNiLDx8Z2ETYkeN6sb9fQ11ADXQsrDQP/S4rLzstnS8jzkN1PS1NF4P4WVlGjsEG6sBqNHgZm6u6HrwMieQlcloj8vGcziH/NgLMt03+C0YQQMAGOEefNBJVgUAQKNpadXMUiWKW7bkzNS90VOxve8eOqa01Gg19SQDGg2LS8V12B8c1FxqNKCxxg/778wjEOReZ320V08NQK/WYTnlWddLx/uMxhx3egSj9vHsBJ3xjgu21B1AaDf8nN5NPGW1Sc99fcIqhXjjIHqCeOuY+sZCehPvqI/LJYaDXxingcvY82/+rTnP3067/kdrTZTLH7E4bFyTDu6ve+ZtU0guBXn9eF38UAz4366ZsL9/RQ0AZEAz04dEQRlY6L7xoL6+viBfAMACAGDo6w3m7Dv+hd6rR9fGXHTnbndaupuVsdFCDeB+XOB3KWXVq2vMWYFb51aP5IW5Rw4OPrGUhQ1Y99LepUVjmdEirxeBhUbu9VLWyjfnRyqq/f/goBZSAwDQf09fcP/Vr6AGTU9bligoBwtsNVAnRTFfER7hXAUABPN/DY4yXXLsZIMP1mLpezKqrSKrH53lsNhjHABAH1MTPFynObC/f5PaeJ8N0RwH9onBNAGPtvJVevQW+rE/4XyYy9LTmOBz0N1vGGfurExjXTCety9Qex9nv6BXleA/iy+Mq8cPWxB8cEGdqXlxMdcfa/s5ZQHozg0+6M7C3Hcr7VhaLHdf47nznM6oP6Z5RY/HuupE2rEqUKvozQ90+tx9wlxj4/Jc2ehXR9urTVvrG8P5iH1isFb43nClBohQyzBYE2xY7x6mMvRYNnqdKV7V0ePFixdNHz169OicnJz2i6adVN9NvRm3Ja+SlZXJaFpatDdbJVlRmUxD6+WVFsrLyqpo1T/IyspkQNNqq6teNF+zX6AqCQgIAAB/f39lB9ISKlRa5SJRlZ5eWy8rdurSUhYVqor20XmrorlvYfWnukpWVgY9qXIA4HK5jc9gbm6urq5eUdHAtUtQe9LCo9VQN9a8tb4ePXq0UxztTSRqzpWL1WhauvW8d9J0a/2Z13j9l0OZ77QAACASifT0Out38mVlnfui7apSWhrtUgGdurSUSFWqon103qpo7ltY/alWo2npgkQib6OgEEKo7TXj+pg//fTT0KFD2y+U9jNw4MD8/HxlR9GOHjx4MHDgu+4uraq0tLQ67yqLkZFRly8tIyMjZUfRyWBVqKyhQ4f+9NNPyo4CIYTaXTP6+6SkpKlTp7ZfKO1nxowZIpHo9OnTyg6kXZw+ffrx48czZsxQdiDd0fTp0//9999Tp04pO5B2UV1a06dPV3YgnQxWhcqaNm1aUlKSsqNACKF219T+fv369ZqamgsWdNazuLy9vRMTE7tei3/69OnExERvb29lB9J9eXt7nzlzpus1c1harYFVoZoWLFhAp9PXr1+v7EBQR3j27JmyQ0BIaXpEREQ08vC1a9eqqqp69OihpaUVHBzcYWG1h/Pnz4eEhOjr6w8cOLCTHjxam0gkys/PF4lE3t7enXrxvlOfX1stKSkpJCTkf//7n5GRUdcorQcPHjx+/Lizl5ZyYVWoLB8fn7KyshcvXqipqVlaWrZsEhcXF8DzaxFCqori4ODw1qZHjx6dOHHi7t27BgYGZWVl/fv3d3FxcXd3V0p8bWjQoEEcDqdXr15VVVXvHt06W7ZsmTJlSrvuQkNDY9KkSZs3bx40aFC77qi9paWlAUB7p6tdmZqa1i6tP//888iRIwqFohMdo5yamhoTE0Oj0QwNDbtMaSkXVoXKmj59OoPBKCwsrKioMDAwaNkkI0aMgCacSK2np0ehUCorK1u2F4QQapkGr4+5Y8eOY8eOLVmyxMvLq4Nj6gImTJiQmZmp7Cg6hy6wfl/jzp077u7u6urqu3btGjNmjLLDaZ6cnJx169bJ5fLIyMhhw4YpO5yuA6uiS5JIJIDr9wghVdXg8fcbNmzYtGlTfHz85MmT79y505ExdQFZWVnKDgF1NCcnp+HDh8+ZMyc1NbXTtXEAMHr06EuXLs2ZM8fMzMzJyendT0BNgFWBEEKo4zV2fu28efOSk5OHDRtmYWERFdXO9yBGqNMSiUTOzs5lZWWnTp1at26dssNplXXr1sXHxz99+tTJyal5F3FHb8KqQAghpCzvvn7ODz/8cOnSpf3797u7uxcVFXVATF2AtbW1skNAHSQuLs7CwsLMzOzixYv29vbKDqcNsNnsCxcuDB8+3MLCIi4uTtnhdEpduypOnDih7HAQQgg1pkn3r7WyssrJyfHy8rK2tj548OCkSZPaO6zOLj09XdkhoI6wfv36mJiYQ4cOzZw5U9mxtLGtW7dOnDhxyZIl2dnZQUFByg6nM+kOVXHlyhWsCqTi1NXV+/Tpo+woEFKOZtzfKjQ0dMOGDdbW1j///HP7BYRQp/Dff/85Ojrev3//9u3bXa+NqzZjxgwej5eXl+fo6IgnCDYFVgVCqgObe9SdNaO/B4DPPvvsjz/++Oqrr/bv399OASGk+kpKSmbMmGFkZHTixAlNTU1lh9OONDQ0YmNjjY2NZ86cWVJSouxwVBpWBUIIIRXRvP4eAMaPH5+cnPztt9/iGbeoTaSmpqampio7imaoqqpycnKaNGnSnj17lB1LB/nuu+8mT548d+5cvIx3Q7AqEEIIqY5m9/cAYGZmdvLkyZUrV6akpLR5QKi7mTJlSue6udWiRYvMzMx27Nih7EA61Pbt20eMGLFo0SJlB6KisCoQQgipjpb09wBgaWl55MiRFStW4PGXqFv5/vvvHz9+vG/fPmUHogR79+79999/u8/6dNNhVWBVoFaS34rZcfTmG5sUOcGffh7BU1JAbU18LSH6eJrwjW0k/1JCWh6ppIhQF9fC/h4A5s2bZ2NjU33zUYS6A6FQuGHDhr179yo7EKUJCwvbsGHDo0ePlB2ICsGqwKpArUfm51y6VfzGJsr7zr7rHExfb5DyYncsmWs31dru069ibkk7OMLWEaft89m2P12oqLVNFLttpY9HQKz49SYJPzHEz9PFhePqsyeBj8unqBVa3t8DwJYtW3bv3v3kyZO2igYhVbZ3714PDw8zMzNlB6I0ZmZmnp6e3bmXrQurAqsCtQ+qkeVIA2qtDSR1xMofki9d+MGh+ND38YVKC6yN6Dms9vcN9HJg1GzJi924KSpbwmCoCZMjfLz2dZUvL5AytKq/19fXnz9/Pt7rBHUTR48eXbZsmbKjULJly5YdPXpU2VGoEKwKwKpAzSTnn96xhG031Y69ZMdpvrx6I8k94MK2s7abuzIktVABoLgZ+unnh/ivn6Vp/vHH5joAVIMhxlRSKm/ZvkvTQla7usxhs+e4+uzPrl475/3o4ejAZs9x3RbHJ2uPcXb1i4gKWe/qOMfRdX0UV1IdqDB51yqXOWzHZdsS8kgAECYG+URkS2rtRJwZ7sNhT55s67gqJK3mjs+lSdvm2E6e4bgqIlsMAIrbaYlJCem3Xz/NxC3s9IUL0WFh4autCRAW8PHYHdRiLe/v8/PzAaD2HS6rt3RndTOAOekCqn+JycnJCoWCxWJ1599pfn7+sGHDXrx4cf78eeje5Y1VUQOrAjWP4u6hzREShx8SLyXuc5BEbDl0t/qoFaOPvzuZnB4XMJK7K/hsccNPz489kmPs6GDUsr1L+FlcXfcDR4+GcPqeXbfzEgkAJrMDj8TGHf12HH9XULz49ZjDW6yFkTGPrDcd/CHYTnIo4Bc+AMkN9dgpmrH9p6PBU4U7v44WAkAVSdbuxB9Er1sZklxq4sBmyf8I9/KOevUhhW4ycbqNblHyHq+d5yQA0kc3ubkP3jjQiMFkAJC8w7+kk3TrqeOIlr1GhFq5fh8QEGBlZVVeXg4AixcvbqOQOrfqExKsra0Bc9JVHDlyJD8/v6qqavjw4ampqd35nJPqVAwfPvz58+eYCqyKalgVqHmEf/whnehsb0AFqpH9xxNLcnIeAwAQWgYGVADNkQ4fGty89k/9z1UUJm1Zc7q/V8Bcg5YHQOmryaDTB9o5WBH8u0IAAEXBlVOxsdlyoveTgrLXYxgjplsPJmnapgyGqcNUswJ+Hijyki6JmYyn2Ynx2aQm/V4utwKYszeFeVnRa15fenx2BcPBL9j3q7BNsxnkraS0BwAAoG3FWecbuHu5OUVyJfN2fZEBgIQb4bEolMf8ZNfm2YwGxiD0bmotfqaRkVF+fr6Li4tUKp06daqRkZGRkVHbBdYpVefE2Ng4Pz8fc9JlLFq0aOrUqWZmZpWVlVOnThUIBMqOSGmqU8FgMKKjo6OjozEVWBWAVYE6jjRn71cHKl33bpmi0xbTEb0JIAFEsV4uh6ifLJ6hB6AAULwxhFADqKr+V/VDT0mSIAiAKgCKxYJ1TBalzrwkCUDQCQIAaL0JAJKsPSeFoAGQVfUfesP/2WvZnlzGJ8GHt9pgd49ao+X9PQD4+/sfPnwYAFJTU/FvejXMSddjZGQ0ZcqU6l+rq6trd/7MVpOKP//8E1OBVVENqwI1D3PiRM0vYxMK35+tW5Rw+o9+o531AbgAAHIA6pPMmPhCyyUjAfJfji+7eTqNnDh7tA4vJjjz/Q2HPzao21K3xgMeX9c+YoWzKfAfxaS+47B+ipmFGfzd28p5oenrg2dEPK7U0HzwyxV85pjxTEpU/I/hFmLidKIQ9DjjB8J9ACCFt9OT+ecO5Sro1uMsAK68fHpFdojnztwR60M9pOFh2RKGDWfE07S4WCAG2bDNsctHLdOq43OMjIxcXV2h27+91YY56ZL8/f3f+ke3hamogamogalAzUAZsniLB/X3peyp7KW/0z22LB5CAaKfgf7N7Ww7O7vFoUUzAjbaadYMl9+OPxx5+qYUCq/l5D8+7+/MZrPZbMc1scJG9tEcIx2ciBjXWS6uHJ+Yh0B9x4cHuv369YOSXW1nObrMYbvs4QIAN3LVoi3xr8MZtTzYywYyQ1atDkoGK+8gb3MKQaPToTQtyHPVtgSxyfzt6+3pAPByXf+Z8PZNXi6PL/n3Pv8ZgDgtfLOf3wY/v8DY24r6g0DonXpERES05vllZWXff//96tWrtbS02iqm2oyMjKZPn94eMyclJbXTeWCdNydKUX20bpu0BVlZWQkJCUVFRZaWlq2f7S0HDhwAgOXLl7f5zDk5OQwGw97efvz48W01J6aiBqaiBqairhbnxMXFBQC4XG7jw8zNzdXV1fFGkC1RfHoNJ2lC9F7nlh6L06y3YLJULKki6P3oRNO+HCBLxRKgM7QJAACFRELS6b3fHFEhFpYAg8l4vcwvEQslQO/HoPcGeJAdmxx9YE8yueBYxkZzkEhIOh3PpkVtqIel7URlx9Cwqud91AjKi57r1/nOmDGjrWY9f/78zqCgqh7PKxQkqLXqGwwlaJ+cKJGjoyMAnDx5spXzeHl7/cO/X1xaDD16QK9WHXjW0f6rghcvdLV1BpuahoWEtX4+TEUNTEUNTEVdrclJyokzgP19uyk+9dXn4TcNZgd85zla893D69dOS2xtQvijCzuYSwy2990b7DxQ2dGgrqiH5TxbZcfwLlLyf9S+XitWtkk7e/78+dD9e/+tlIAG9d2jVVab5kS5Wr9+L5PJPDyX3xcKSO3O/DsFoJbIB79nfGB/eK9evVo2A6aiBqaiBqairtbnJOXHE4D9vWpT5f4eofbWGVavNYl/KyU7g4LaZLKdu4L+lT/t3M09tHFOOruYmBj+g07fuwCAvB+V/0DQmvsEYSpqYCpqYCrq6jI5QQihenWG/h4ANKhVPZ5X3zylNZKSkhQ9noNmlzjIrY1y0gXEJybI1F8oO4q2IVN/cTohvsVPx1TUwFTUwFTU1ZVyghBCdXWS/h6gQkE+ePCglZPk5+c/a+Cis51Rm+SkCxA+fAR9u8RnNgDoSxQ8avlVITAVNTAVNTAVdXWpnCCEUB09Ax3NO8c3lG11ImzdeXQmrfZ0fl/95U+alou7XU46uRcv6q7DUd9nr1k9Ue/lT5RBnyxZNduwY8NqqfpeTmuei6mogamo0U1T0egknTgnCCH0FrVpw/S3wjvOEOrieg8eM3rAnR9j/wIAAKqe+bRhou6ek85Pd8jkMYWnX/7Qs99QC3PyMhXgHbcu6ZIwFTUwFTUwFXVhTrqe8mfPlB0CQspRfUUw6vvsrz0nDdDtA/Liv8/GHPzlXjlomn/mtni2aT94lhsZ/N25Yo2hHy5eMc3cWBOkZQ8v/vpN1F0zN+/JhYfCzhUD1dIjZMTf3kcuy2mDZn+2+JPhA6hlub/9svc4Xw6gMYa9zM1muCGUXE4I+z79oe7UNZumDqz+ZpS8G+39y+1Rbw5Qjb+mmJOuSGfS6mXzxvTvpwnlgmvRO49eLlSAsfUqX/uxhjR5Qdpu7xO35YyxbgvmfWikR31eUsg7tfNQClivWd3vnN+J23LKgE88OZDwzfF80LFYvN5x0jBNueBq9M6jlwsBqIbTVsyfPXGAhlRwMfzHmD9l5it8Fo+uvs6HQnQu6pvfn785QIqpwFRgKlQ1FTUayImyw0JNotGnj7JDQEg5Xh7gYTDEnP73bu+AgO/vDHRfOm8oBQxsF7trZvj6urvtjfmzDDTHrwr9dMA/Cd8s3/bNRRg7So8KoGEyZJABDQCA0Bow6r1+BFAnfubvrnNla4D3tr/03T04QymgM3FtoC2cClu3/Oj9UYtWzWFAUU7M91FhO2OvkHr9pA8fatYZoCIwJ12Q5uAxxiXHg72Xh/wmHbt29UQNUJ/k/pm54Ki3s++6oPSHcsogtzVff6TI+P67db6xD4yHD9ftCUS/waOY/QAAemoYmZoZ9aGCnnOg59jCUxs5wdFFY1etnqgBlEFLVnoMFRxavvmbU5SPNn1qTq28//svYTujIo4XaBhTRXdK6gzAVGAqMBUqm4oaDeRE2WEhhFBjXh/ATUpKCosf/pmSIWAMGqYO0oIH0gGTP/vQXLdMVKygTrQaW3QpYn/W/ULxw+L/GphM3WyihUZRmcbEqTNHU8tJQ7NRvaljxppD8TOD0TNnGkGRfND7g6hyqeifR/JRH39ET9+9Lb2k7oCOeeVNgTnpghTSoiclhfmXz/LKjQcMgOeiPJHGpOmzp70HBeJyMJj8YX9u5KHTfz4SFZRWNnQuto7FuFEykXzAtHljB5BS6rAhg6jvTZ5oWF4MZrOnjtWRleuamhlSygsf3S/U/tD9/Yffhx36i1F3QIe+7npgKmpgKmpgKuqqLyfKjgkhhBrTk3z+5p/oCjlJAQCQXv1u8XfnpCyPqD273AZTCSqVlJXXeTqVpv7Gj4S6nFQA9ASouvd77Nk7lUBQqWQlQE+AntIbSdEpQjkAdeicte6UU1t/5Uqh3gEdq5Ik1anEqxaaTqNiTrqCSjmVVvO5iEqog7zyjTTKZXKgAFVxP2rHuu/van+yOjLac5oOjUqVy+V18k3t+cZHLEKdCtW/055QdOO3X7giuTqVqpCTANAT4HHGjxduSxQAWpPWLzb/68juUyKAegd0DExFDUxFDUxFXc3JSYcHhxBCzaBWeP9+Q81j8Z2z39/JuOPz88IP9LbmP1z9/iTjhFhBpcbLVvi5qFCqN9S0H+SXvHxC5f1/HlKHybi/nLhdM6f0/kPNEcWXTscKKl9uoZpyNtvKIwNj/6kEAPlfdQZ0tOLbd8pmT7MecPbsQznN7KPReoU53T4nnZ78Bv+hru2HE7Vu/1EGBuM/HAP3dxYA6NU3VvYw5eR3KX89+23D5DExF/+heEwcoZGSWw5UqD4jokgsgkmDh6mn/PXqGUX8+0Uf9StIjzklfrWpz9//VE6C/NNR12o+8vWbuchj6F+73a6WAwAU1h3QMTAVNTAVNTAVdTUvJx0dHer8SIlYoqAztPHCrKgjqO089xCgnsM/hjrt2jVJQ1qpodvn/q+nHv5TGPHbsK+jQj8qeq6hqwl/XQZQ3P/9VEbIosjTs0pAU4/kXQQo+f3IofdX+8ePLS+qpMKtsMWHrgrOhf086OuofbOLpHLi2WW/gGjdj2YOpVPd/X51ByDzo5fvf2vAob86eNmmkht5JGWXR/h5+xIpTQPu/+aHOen8ii9EhA/zD9zzq1QKmn1KUqICUmT1jaNNCgzyMH5WToz+9bwAACAASURBVKX3g6thfz69/Ncvk0NWRp6WyIk+/TRl5wBAevW3n6f6h+wbW/RMQ1en5De5XH4vemf615u//fWzsnJCXfTLto3HxZf3H7HY5fHzaamIVNf4J9bT7+7MT6z0dKVfx1oCPC+5+KP32wPSO6iPwVRgKjAVbZ6TDo4NtRkJLzmTHDfTnN6RO1Xwo1e6BqVLCDoBQ5bHHXEr2sVedJhPAsHyOHbSi9XM6UjhtWypiQ1Lu9Y2iZB7jSeu0jQZbWX6arvkXvaVu1Iqk2U1ivnyU4Ui2WfMqgQSQI3pFnXB1/LVyDzubamu2SjmO9Ii5nMfynVHsJiNfEhRSPg5V/JKqQyWhfnAl/ORIl42ly/vbWo+jsUgXr2KG9m8x8A0t2LpEQAACm6Qw6KoByQAYbf7athM/CDUKj0s59k29BhVS8+QJi8Ql8hfNZeaOnqa8vIxK3+ddvVT74vlAADqGgb9NUBSUiitWZ6maur004TyouLyWpv0dPvIi2pN9fa+3jUASv9bNu/zZcuWNfcV1nbw4MGDv/0E2r3qeYyiYfCeHiF9KChu7GCYrpcTpQsICAAAf3//Fs8wevRoMNGu9yGqzgBjTSgseCht5JsQdQ0DnX7wTPT696WuYaBDlepwopbANt+wvyoBADR19HTV5UXiEmnNr4PWz0CHShaLimuaAApVh9GPeFb7V/+mdw4AgLzSnJychsNtDKaiBqaiBqairtbnJCX8CABwue+4krK5ubm6unpFRUXLY0UtpaWlVc/WWyFsPwg+4c3qyBM8bgTZLohm7cgIm/2qf1aQpCTZZ4pPnuvJxC+b2d+TaX62fkRwxqZxr7bci3L9PChbwWBUicXA8jx0zHsU8CIWLdrDAwadLBHT7YLjvrdnAIAiedUHq8SrEo8sZBIEAQBAinkXDgQERHN7cw5f3GTVUFoUpPBW/AG/bbF5Jt6/n/Qc3MCwCm7I54vC7xEMOikuIay+Onp4oankStD8ZVHC3gyiQkwackKjN9loi5M3zPc5JaH3I8USpltUnK9ldTAkKYr2mBWiGXQ1jI39fas0doMkeZlIIHqjuZQWi97+s1tZXljw1ka5tFhUWKuRBQC5VFQoarhPbcKAdqcoL8y/33hzD90tJ52fvPjhP/mNvk8DQGV5oejhG7+vyvJCUQn5ZualxSKBqFbvAgCyksKCWr0LACjkxaI69QDNGdBuMBU1MBU1MBV1NTEnHRQNag+F8QFLXV09w7kKAFFa+FpXRwdH1w3RPAkAAP+4nwfHke3Adlm2LfYeCQBkXsK2ZY7sWWyXDbFCAACSH7fNw5nN/tQj6BSfBAAQJ+9Z5erMZs9xDb8mSQv1CUquOTQNuKEu7DWxQgV5JXQ+28HRZU82AACFIOhv9q8KcVroKscZkydPYbtujuVVfyQszQ5f7Wg7efJkB4+gS2IAAGGsj7NfvFgcv4HNnuMYlAkAILxdQIz2jLiQkXE+0AZ48Qk8UPBS06WG80MvZGQcW2IqTo5NK621LzXiZXMP4mg320UHnjAHv2Phnhc6l708WmZq8o7cPvz7EcPO96eMjNRE3xGS7FNJfCBzL2SRIzyPpGZkhDnT82LjcwAexO8/JbbwT8xIjfMezI+OiH+ZL4IgemNb3zbUmv8U+algx7PPO/zvb9Vz1ZrnDZ08J51cjx492uCGlnXI70RwfEHe4adA9OjRozXPxVTUPBdTUfNcTEXdSdojJ6hz0LXx9FtsSiUYFH7U2m3cmWFH/DXTt7v6RAxPXGcuvJb01PrI0U90C075LfOLsvjVLS90K3/s0bhwpkRMMgAk6ds8ImSr9x4ep0jb6e0RxEjcNK4gNznPzP/w8iEAvcncFBJqfdA1meu/nbF/WeCVcUu2rxhJgFb9d0UWHl/nFSF0CDq4WDdpo6ffqt6DLnxlzjtxIIFvujwqGELn+wUcsLbeZKVv47kw7UrAFfMl21eMJDQHAAAwZ2+KmE0KryXH/pF8W8GcPtkEKHTP6ERPACjNPpAmBJPpw+tv4BnO4RkcOj1tw9iEgsZyxvI4etWLDgkeyclPGhs3jBMczpHkZSefSE96SLDmj2cCYbrxpA0AKMTJidni3hYWQ4Dk5eYB002vKO0KDDJjkOk8vgIYqnPJrC6hsfX7BlXKO35RuY8aMXDgwFZOYmRk1JvSPh8NO21OugDD95jwtKEr9bWG/K2rZ3SEp6The8wWPxtTUQNTUQNTUVe75QR1BlR6/4FMJpNBPEhL4mky4Xp8YtrT3rpi7nUhAABBo+vStRms2XamAn6egmAOMeSfOxT7h5iuRweA3HNpuvNW2w9mMIY5r56rm3Qut3pSzX4MujaDTjBs1oX5znx92xo6k2Vu0p8Aor+JOWsEi1X/Ie4SbnouyZzuPJNlOs5pOguEOVl8ANaCTb4LzcRnE7glAKVPiqoAKAzTgf3h1Wy1JhOnRWzcFpFNTHCyY7066v1Bgh/HI1pqHRiyvKHjkQh6085E6E0nmtx/F5zbuXF7FE/XznmaycuWq4IXvXr+qmTCeUcgZyAAKQegwt3IdQGxjygEVJD4v7HNtai/73hSkvKi54wZM1o5zfTp09Ve9IRyZX7522baKCddwMf2DjR5GyzpqQKavMfH9g4tfjqmogamogamoq6ulBPUPGrwenFdTpIEAQqAKgATB++F499odXvTNIEEANaKY4e/MOFFzLf9NJxLkvIKktbnZddK9KaRz9rmTssyBQmU6mtSEYQaQBUAiGNXz/WIyIWBZiaMdyxNkhUMzsGrNy6H2uSFeGxNkABAXrTXZz5J2m4Rx8KcTTruoBdSQpqsOHn1esZ20yvbPIPSSIAKbvjSRdtumvr+dDRwJhMACG0GAVJyTPDV876mFRLQpvftsPi6jc7Q35fL/0ftu36db5tMtt7X93/qdJB28s+KbZqTzm7BggWDjIyJ0k7/sU29RG4y0Gj+/PktngFTUQNTUQNTUVeXyQlqtl6amlLxkyoAABhgNpyQEiOdOQs5nIUczkxWA0vZhOlUt8CfwhzKEpLuECxLM35yklABoBAmXeKbjXv7ZsaSe9k8cb3zvEWzLx0kIqGYBAC6xQhTKLiemkeC+Eo2DxjvmzEVfN5dkjC14dgxSbEEquSy6s6FRhBAPimSAABJAgCZtmHsqCke4deEwgLhkwoAUgYKXsiaoDSwWe5hQ3vI5V7jCRs4x5ssFQqFQnEFgOJpkVAoLq3ehyQ7ws8vtNZpBBVioVAoLCVJkEsfC4UiSfVm4bkQv81R2ZJXrz3OY+xYW5/jPKGw4FGpBEgZAJm9yyeES7dfsfgDMo97jcsXkzDSalwfYVpCmjAvOf6KhDFuXIee7tw9qEFpQ3deVQFVz3tTCLUXal7eK9tqobp6np27ghSlVc+qSFDrDJ9wamuHnHR2NBrt8KHDq7y97vLvF5cWQ48e0KsFJ5Yoz39V8OKFjrbOEDNWWGhoa2bCVNTAVNTAVNTV6XOCWmzgdGfW/HWzHA0JpvPBMO+tV1b52tpqGmoqpMPXxAVOrbvOLUlYOz9cyNAkhUV6TsHDgDlsk/c1r/kfxtBBApbeoU4MgNqHrvOj13kk2Bx791VxKBacFXa5e3wmjwIr/wuHl2z35fmEzBl7AIBu7ha4yoagkHazWbERfmOnsDir3Mx54UGro62iOMxhDgvGxYesHTvMlzB1PZK4ztxmxWbnu1tDOLYhAMRAO981DnRxbO49EhRpQcvSAAAoTLdfL/iOqBuEJH4D2+9SdU+f4DMjgWCH3dhtBxVXYiPj+csW1xxmJIxZZRv88mpRUctsowzdjiX5mlPE2cej4tU2eb/6YERn+276wytos6MtANBZzhtX26jlheQIQQEJm10TAADA3OfCsSX2679K9Qj0sP0Z6CM4u1bZ4Em1ba5HREREK6fYsmXLli1b2iKYehgZGU2fPr09Zk5KSsrPz2+PmaHT5kQpWn99zBpZWVkJCQlFRUWWlpbvHt1MqampADBlypQ2nzknJ4fBYNjb248fP76t5sRU1MBU1MBU1NXinLi4uABeH1O11X99TAAAIMVisg+D3vvlTxKxhOzDYPRucCqJWEyqvXFrKrJULIEGblZVIZEQdHrLFqQrJJIqgl7r0jqkREL2odMpAAqSBOL1QfAkScKrq+C8M6Qa1dfH/PLCMddGz2O5ss3W58n602F29V9FtibaZJ8pO/uGJW4a9+ZOKyTiZ0Bn0N/RtSskEglBfytgcbTrlCC8Pmbr9XjxorVXEfjkk0+OHz/eJtF0GRMmTMjMzFR2FJ1DG/b37erTTz/t2bPn0aNHlR2I8nWWX1kHwFTU6G6pkEgkgP29amukv+++FMmrPliVrmfO0mM4+IVxGriMPf9HF58K/3ffeOtakOM+w7AoThuc8w4ACn702m3xj4V5N8TjdmN/31qt/UbywYMHT58+/e6772xsbNpjTaiTunv3rrJDQG3syZMnxsbGyo5CJdDp9OvXrys7CpVw7Nix1atXKzsKlfD06VMdHR1lR4EQahTFfEV4hHMVABDM/zU4ynTJsZNNmc3S92RUW0UGQGFYLVzMfAYARH8WNvet1dqjzwcOHOjn5/f777+7u7vHxsa2SUyd3Z07d/r2xXPBu5Sff/753r17P/74o7IDUQm2tranT5+WyWTvHtp1RURE6OjosNlsDw8PZceiEs6ePWtu/vaJhgghFcNgTbCxsbaxsbYybdqFMTsQ3dTyZWwsxrtHo8a1wdmlkydPTk9P/+KLL86dO2dmZva///1v/vz5R44cKSkpaf3knVFmZmZ7HCeKlOLq1asTJ0786aefDh482LNnZzsbu32MHDnyyy+//Oyzz5QdSEfLzMxcs2bNoEGDhgwZwuVy9+7dGxwcrOygVEJ+fr6amhqbzVZ2IAghhABaf3xOjaVLly5duhQALl68ePv27aysrB9++OHGjRt6enoWFhajR48eO3aslZUVjUZrqz2qrLNnz9KbeMMIpMJCQ0PDw8N1dHQWLly4fPlyZYejWrZs2WJjY7Nr165169YpO5Z2lJubm56efvny5ezs7BcvXowZM+aDDz7Ys2fP+++/j7eWq23Dhg3d8PMeQgiprDY4v7ZxZ8+effDgQX5+vkAgEAgEeXl5CoVi0KBBgwcPHjJkyNChQ1ksFovFeuMk8M7s/v37I0eOzMvL09fXV3YsnYNKnZZ348aN2NjY33//vby8fObMmba2tk5OTsoOSkVdu3bNx8dHR0cnMjKyCxyQJhAIbty4cfPmzVu3bt2+fZvH45mYmAwdOnTIkCGDBw8eNGiQra2tsmNUUVu3br148WJiYqKGhoayY+k4eH6t6sPza1F31u5X/P3oo49q//j48ePHjx8XFhYWFhYW/J+9+45r6mrjAP5AyGBFVpQlUxFwBcWFimjFtqK2ClqVtqK2buvCUUURxbZa1NZZbR30FbEWsVWwCoqgMlSQWBQERQIGCIQRE0YmvH9cSSPLBSTg8/0jn5vkjpMoJ7977rnnFBZeu3YtNDSUw+E8e/ZMQ0PDysrKysrK2traxsbGxsbGycmpV69enavJ/+uvv962bRuG+06ExWJFRUVdu3YtISHB3d195MiRP/zww7Bhw0xMTFRdNLU2ePDgyMjIffv2WVlZff7551u2bOnRo+XbtdQGj8fLysrKyMh48uTJkydPcnNzc3NzGQyGra0tUe2MHz9+/vz5PXv27Nmz53sVWN+On59fQUHBb7/9ht8VQgipj3Zvv399Dx48KC0tLSkp4TYobiAUCs3Nzc3NzS0sLMzNzS0tLS0sLCwtLS0tLXv27KmlpS7zkvj4+BgaGv7666+qLkhn0vHt9zk5OYmJiUlJSSkpKXl5eUOHDh06dOjgwYMHDRpkb2/fYcXoMrKzs//3v/+dOHGCyWQuXrx40qRJqi4REBcM2Wx2foOCgoKCggJjY+OePXsS9QZRjRBVSu/eLYwSh1p25syZ1atXz5gxIzg4+L0K91lZWZcuXSL6o2L7vTojk8m6urqqLgVCqqFG+b4Vjx8/Li8vL2vA4/GIR0JpaalMJuveoIcS0wbtfZ2Ow+F88cUXDg4O7z5f2PumvfN9WVlZWlravXv30tPT79+//+zZswEDBvTv379fv359+/bFThdtpby8PDIyMiIi4u7du7Nmzfriiy+GDx/eTsficrkcDqewQVEDoi3AzMzM1NTUzMyMqAGIR6Jm6NWrVzsV6b1y7dq1oKAgDQ2N9evXT5w4UdXFaXdisTg+Pp64EyMxMdHV1XX48OHbtm0DzPcIIXXVOfJ96/h8fkVFRWVlZUUT5eXl5eXlxEJZWZmhoaGJiYmxsbFJAwaDQTwSunfv/hYNUX/88ceSJUv8/f2//fbb9viAXVsb5vvKysqcnJzCwsIHDx48fPgwMzMzMzOzd+/ejo6Ojo6ORF9qBwcHBgNH3mpHLBYrOjo6KioqLy/PxsZm48aNkydP1tDQeOWGdXV1mpqaJSUlxcXFxBU84mpeSYPS0tLy8vLu3bsTf6qKR+U/ZOLvGkc6aicRERH79u2rrq5etmzZ3LlzVV2c9pKXl5eenp6ZmXn37t27d+9qaGgMHjzYxcXFxcVl4MCBxDwY2P8eIaTOukK+f02lpaV8Pp/P51dWViovVCohzhPMzMxKS0sNDQ2NjIyMjIyIhaZPpVKprq7utm3b/v3335CQEE9PT1V/xE7p7fJ9fX19Tk5OdnZ2Tk6OYqGmpmbcuHE0Gq1Xr172DczNzdun4OgV0tPTExISLl68mJKS8tFHH40cObJPnz4AoHwVTnmZmCCJCOjEgvHLjIyMjI2Nu3fvrupP9t4RCoWbNm1KTk6mUCgrVqyYMWOGqkvUxjIzM9PT01kNtLW1XVxcnJ2d+/Xr169fv4EDBzbdBPM9QkidvUf5/vXV1dUpsj5xHUD5qeLFsrKy0tJSTU1NY2Pjbg0MDAyaLhMLfD5/4MCB3bp1I5PJqv6IauSV+b6ysrKgoODZs2cFBQVJSUlCoZDI9Pb29kR7vKJh3sLCogMLjqC+vp7f4Pr165aWls1mdx6Pp6mpqampSSKRSCSSubm5vb19//79bW1tlS+j4eynaigiIuLPP//8888/vb29P/jggy4wUGxdXd3jx48fP35M3IdTUlLCYrEsLCyYTCaTyXRxcWEymaampq/cD+Z7hJA6U5c7U9UKEdlbShs1NTV79+7dvXv3tGnT1q5da2tr+7wBn89XXsjPz1d+EQAKCgr4fD6ZTG6a/psulJSUuLq60ul0Op2uPvcQt5O6urqCBkSUVywDgFUDAPDz8yMCfZf/Tjqecl5XIP4PN0sgEBg06Natm6J7jIODg5ubm3J2J0bBqqmpSUtLS0xMTExMPHz4sJmZ2ciRI+l0+qJFizDcq5WrV6+ePXs2IyNDX19/xowZR48e7aRDoD579oyI8orHx48f29nZEcOeenh4DBgwgMlkdtJPhxBCLcH2+zeQmZn5+++///TTT59//vmyZcveejL26urqRucDTc8Nnj9/LhQKiWWhUEihUOivp6ioaOjQocSyGs4qwOfzC5RkZ2ffu3eP+LyKEN+zZ08rJTiG8btQZPH4+HgbG5uWknqjvE6cZLZE+d13KVt6enpiYuK5c+dyc3M1NTVHjhw5cuTIUaNGDRgwoK0+Pnojt27dCgkJuX//PoPBmDFjxowZM4iT6k6hoqJCkeNzcnK4XO69e/fodDox1woR6InHNmkawPZ7hJA6w3z/Wq5cuXL06NGbN28uWLBgzpw5HT+aXk1NjUAgEAgEz58/FzQhFAoVy1VVVURQI35+XvOsoLCwcOjQofoN3r3Aym3wyurq6pSzu0Qiyc/P79at2549e17nFkwkEAga3UbSOkUQ19fXNzY2bjajK1PV53r8+DExOMmtW7cqKirs7e0nT57s6uo6ZMgQQ0NDVZXqfSASia5cuRIdHR0dHd2jRw9ra+utW7c22+NcfdTU1Dx9+lQR5YkFkUikCPEODg4ymeyTTz5pv6nEMd8jhNQZ5vvG4uPj2Wy2n58fAMjl8pCQkIiIiLq6ugULFixYsKBzZVCRSNTSaUAj1dXVfD5fKBQKhcKqqir9FtDpdMWyjo6ORCLJyMjo0aOHQCCorKwkZg56/vx5QUGBubl505Z4KysrIyOjRoVUq/lrO5hMJlM+Z8vKytLW1m49r+vp6bXSst5U5/ofSygsLPzxxx/JZHJqampqamrPnj1dXV2JrD9w4EAdHR1VF7ArKCkpITJ9YmJi//79vby8vLy81HAegNLS0qdPnxJznysW9PX15XJ5o4b5Dr6THvM9QkidYb5/CZvNtrW1zcvLKysr+/3330NDQwcPHuzv7/8+jPGsjDgZEAqFhYWFbDb72bNnhYWFXC6Xx+NVVFQQXYZEIhGVSqVQKABQX18vl8vFYrGGhgadTifuImj9JEEgEPTp00dfX//o0aMUCuW7775T9Yd+V0RMV4T111kQi8XE9ZNu3boRDY3W1tat53USiaTqD9rR/v3339TU1Lt376amporFYiqVOmTIECLu9+/fX9Wl62RSU1NjY2PPnj2bm5s7ceJEItY3PevueGKx+OrVqxoaGooQTzxSqVQ7Ozs7OztbW1tbW1vFgspPXDHfI4TUGeb7/7DZ7Dlz5ty4cWPUqFHl5eWfffbZl19+SQx13IXJZDLFJEHKC8+ePeNyuWQyWTHHp0UDYtnMzKzp3kQiEXFioDhDaIp4q7i4GACIBYlEIhaL9fT09PX1W3pUfvr48WN3d3fld9t2SKLq6mrlFE7c4/vKvE5kdEVYf50FnFvxTVVVVSnH/cLCQldX18GDBxPTDzs4OKi6gOqIx+P9/fffiYmJsbGx3bp1mzBhgoGBgQqvmBUXFzcK8Xl5eVwu18rKqnfv3kSCVzyq7Z2vmO8RQupM4++//3Z0dFTV72JGRsbZs2eFQqGzs3MHH7q2tlZbW9vR0dHd3R0A7ty5M23atMLCQh0dnSlTpoSHh3dwedpVXl5edXW1IsErp/nS0tJGwV2xYGBg0KNHj/Yu28cffwwA//zzT1VVFdE7qNlH5ad8Pr+2tlb5XU1NzUbnAMSjjo4OmUzW0tIikUgaGhr19fXEzAZSqVQqlYpEIpFIVFNT0yipU6lU5RSup6dnaGjYNKMrlhWt76iDVVRU3L1796+//iotLWWxWOXl5cQkRMRAh+3Run/jxo3Y2NiKioqO76FeUlJiZGQ0dOjQIUOGNHrr5MmTRJdCZYcOHSopKYmNjc3IyPD09LS2tl66dGlHzuBbWVmZn5+fn58fGhpqZWWl3LtGOcETj9bW1h1WsDaB+R4hpM40vLy8Hj16lJ+fT0zw2atXr759+/bp06dPnz7t3XCyevXq7KxMQUW5poYGjdTR803WAcjq6jWp2jRt7YAtW0Qi0fbt242NjfX09NhsNgB4eHh0ok7hZWVlxU0UFRURCy4uLnw+v9kcr/Lpn16z/31NTY2i7V/xmJiYaG1tTfT+Ly8vr6ysVAw9VF1dXV1dDQBEJyIymUwMwS6XyzU1Nevq6mQymVQqlUgkIpFIW1tbV1dXV1eXSOrEuUEjLBbro48+Upw5KFN5VwFEKCsrI2YpSk9Pv3PnTlFRkSLrE4/v0rtJJpOtWrmS/TSXX16uoQHaHV5lyerqZfX1Ovp0AyOjoO3BikaZuXPn2tjYEH9BT5482bVrV2Vl5dWrV62srCZOnOjp6Tlu3Lh2LVh5eTmR49lstvJCXV2dtbW1tbV1bW3txIkTFWm+Te7gVznM9wghdfaif45YLH706FF2dnZ8fPzz58+zs7MfPXpkaGjYpwGR/tuwicV/9er791JNSHUqT0aVYrm2Sfc1a9eNHDlS1WVpTUlJiSK4JyQk6OnpKUd5fX19s5eZm5srltXnlkSiA49yTD9x4oREInF3d1fc/ts0xxMjhCr67isehUKho6NjK91giGHXW1dfX1/VAuL6ACE9Pd3MzKzZ1XR1dZueDyjk5+cPGzaMWFZeU7GM8521E6FQmJ6erkj8GRkZTCazT58+w4cPZzKZxGRzr7krPp+/bOnSkgI2g6LyGgt4Ihm9u1nAli0uLi5jx46Nj48/f/78tWvXrl69KhQK3d3dP/744/Hjxzfbg+5dlJaW5jdQRHkdHZ3s7Gxra2sbGxsizRMLNjY26tCtv/1gvkcIqbPW+t/n5+dnNyDSv6mpaV1dnWLGUOLxdRpj2Gy2jY2N4umZM2eO/nLIlFTXJp/h3VWIZXo9zCPORaqwDHK5XBHWuVxu08Z4Y2NjRV7X1NR0cXFRTvMdPNS9WCxulL+bDeVCobCsrEw50xMdaZRjekFBAYVCGTt2bNP4rvyozrNZtX5ikJycbGtrW1VVVV1drXhRsVxdXa2pqdk0/fP5/D59+jQ9GWj2qaq/gM5BIpFcvnyZy+WyWCwWi3X//n1LS0vluN9o/mPldvEjR46cCTtlRlaXu5W4tVILu96JSUl5eXkaGhqjR4+eNGnS+PHjXVxc3n3nRUVF+fn5ycnJMplMkePz8/NpNJp1A0Wa79atW5e/SalZmO8RQursze6vLSwsLC4uzs7OzsnJIXJ/UVERhUIh2viJxN+nT59mq3vlH8uv5s/Pf/SAQVOjZkuunLTzx5CmHVvbUHV1tSK7N3rU1dVNTk42a1V7DJwiFovz8/PJZHLrMb3pW/X19cr5u5VonpeXN2rUKMUrxHg7yt7n8TEJIpGoafSPiYkZMGBAs6cETZd1dHSIoG9kZEQmkxUnAIrTAOWnyo+1tbW2trbv7W2+N2/efPbsGZH1WSyWlpbWwIEDmUwmEfepVKqtre3WrVsDAwMnT/KS8ysMqOoyeFFdHeTXyrglJbW1tSUlJTY2NitWrFi5cuXr70Eul+fn5xcUFDx8+LC8vLygoIAI8QUFBYaGhtbW1paW5pMgNQAAIABJREFUlkS3eEWg7xr9atoK5nuEkDprg/Fz2Gw2EfcVjyUlJYqsr1iorKxU/FiO9fDoriklqVOvZZ4UFq9cPW3aNABgs9lsNtvDw+NNd1JeXq5oeldO8BUVFWw2WyaTmZmZmZqaNnok2uPfZc5Oom9604FrWlpWPK2vrx8xYkRBQUHrGb3pi214uQDz/btTJP579+5ZWFgonwMoXyho+pTBYDx48KC6urr1kwHFAtGjvdmzhS5wE0JBQYEi67NYrLKyMm1t7ZKSEgaDYWtra6+nRu0RAFBWpxX03fcjRoxgs9lTp0718PD45JNPmtZaNTU1iuCuaIkvKCh49uwZEdz19PRcXFysra2trKyIV16nVxvCfI8QUmftMj5mdXW1cuInFvT19evq6rhcrpGRkWOfPpY0jab5nmw6YsLsaUN7GZPlwkJW5NmTCTx5WxTIqJ/HCOrD6DRey6vw6zTnL1vh7e0dHx8/duzYmzdvjho1qtE6crlcObU32wzfbIKvqakZOXLkKycHraqqemUiV17m8/lEstfS0mo6/1RLy8pP1eGHHPO9OmjpTKDRWcGTJ0/09fWbfVc57gNAjx49Wrpo0NKjpmZH37HauoMHDy5btgwA6HS6g4NDb3oz57TtVGXp9PEcxXgcc4vdyjqVGtTN27bHxsZu3bp169aty5cvb5TgiQWhUKgI7ooFYrkLnJKpEOZ7hJA6a5c+zbq6usQodcovnj179rPPPgMAmUwmlUkBGvfTAItJy3YvY9w5ev5QDp9q5mAlkbZJuAcgW02Y6gOFsWk8SWurlZeXu7u737x5c/To0VevXk1PT28U5Xk8XqPs3q9fP09PT8VTGo1WV1fXKJdXVlYKhcJTp069snFdV1e3lVxub2/f6HUKhWJiYtLmo7+j9xDRSP8uw6EqZ/3bt2/b2Ng0OmHg8XjEOK0tnULQaLQ3PSVo9NiGN2msWrXqr7/+Wrly5YoVK8zMzMa4uzezUrtVWYZDZ31qFXr9Flva8joSqeTzzz/Py8sDgL179/7000/KCX748OHEsqmpaduUCSGEUOfRQfcsxsfHr1+/fuvWrXPmzLGxsRnrMQZA9vIqFOZnc/o+OeC/J4oPAPDgcRoAAFiM8V0wh2lLqclLiDr6691C0Bn69TLfEQxDfagqenjhYNjlHCmAweA5vl96OhgC/86vOw8k287/0W+MOUVSnnP5wNGIBwAADI+5PzpIZQ/Of3uA1VzKl8vlAQEBPB4PAFgsFoPBMDIyMjAwsLGx6devHzHGIolEUs7leXl59+/fb5TRa2trW29Ht7S0bKl9HZvTUOdFDDBKLL9dZ7OamppW0n9VVVVlZeWzZ89auc7Q9N6DN31UnCrv3bvXwMBg69atBgYGs2bNaq68zVdZBkPn+Pp6OhhKCu+Ehx2NLZRQevsEzhhnbaAHUt6DxGMHojOroFGVdbh4bNAGDyt9qMpJPLb7r7QKANBlfh3w8yzJ03MHf47lN3d4TQ1NW1vb6urq0tLSRYsW/fDDD2/xnSOEEOqSOijfe3h4EO1MLSLZOPQxzrkQ/9IvGclhxqY1fTN2/3Q433zqlrkbJLyVoXyLfn1lMRvXxEAfv6Vrl05grYiGaQs2ePKPbgtMExroiWtAkhe9d8cfRdBrzqotX0+4tSIGACrSog+FF0qEpa024ROqqqouXLjQrVu3ljI6g8Gws7Nr9q339lZFhN6Rjo6Ojo4Og8F46z2IRKJWTg+qq6ufP39eVFTUys0JmpqaynGfiPi7d+/u08cB4OWrZM1WWRbTlq715B3dFpypP3J14MoFFRsPZHR3HqzL8t91vqq714ZVK2Y9XPIrm/FylSWvSjy86XqFsPuUHesWe7MW/ioFkGZHnjj5AGqLmg33AKClpbV523Zzc3M2mx0aGjp37twTJ0689VeHEEKoK1GbMQdJFDJFIq19OX3bjmGaZkSvSSishcKT4aPCZjEtQuMBoLaqlFchrYh6yAuxsKUYmI5xKIj0j83hA/ArAACkFLOhXmO6a+tLJMYGJhQAALmwNC+H3XK41ySRPv/884KCgnPnzvXv33/atGnEpYZ2+8AIoTZGo9FoNJqJiclb70EsFiuyfkJCwuLFiwFg0KBBNdXVjVdtrsoyGDrGoSDyYGwOHyD6j6Sxy8Y4UDIAQPq8nMcr5l2OL5w62IYC/BGNqiwKmdFv5DgbXW2JtJs5gwRFAJLa8sInOa30zyHY2NjY2Ni8xWAAqFORsK+FRcRnsHkSimX/jz6bPaG3PgCU3zpyJF7s6vfNBEsAOfvC7uMPzKcsH1F05FSqQHlrkuP0dR8KTx25nFNWJQFKN/M+Q6f4THDQh5yIXadyek7x/9yVAiBMOrE/Tuj6xVy9a/svPxMpbU8zGeX3jQc19eyJiFvZ5XKqsZ2bj99s1+4d+x0ghN6E2tzQJini8SjmtjYvvUihkiXCaqJLq1wikVMo2i9tUy0BMgnIFCrUSv77+bWauW736qFGYn6VBEj/tbk16e//Eg2AkSNHRkRE5OXlTZs2rdGA/Qih9wGVSjUyMrKystLW1l68ePHWrVvz8vKuXLnSzL2/zVVZZApVqqiLaiQSEuWlNv8qiYT0YjXlKsvAMzBowySGvJz/GlcX0ftHknH4qy83HblwvwxIguyYE1vmfx2SJAQAYU7ihagr/5YRqxU9iI25kMyWCIuzMx88yEiL/ycm5lrag4wHD7KKyiVFqZcuxCRllwmE7LQLR4K+nPNzqgSK/o2JuXCnSAgAAMKnSReirvxbJinL+/dBxoMHyfEx/8Qkpz14kPEvu1TCDt+w+ueIDIG+sR6wk1OLVPd1IIReh9rke6i4E5us+9FSL+cXk/WQ9fTIeRlsGDxyqBEAGIzwcJA/yCloZkt+dk51nzEjG6amIVv1sXiecPbomZjz8eznAADSKmG1nrFB6wG/ATFIP17p7jDEaKSqLgVCjdXX1wcGBrZ8nt9MlVX98EHli7pIz8FjkGHeg2avGTaqshi9rKVpZ8JOR8bEPiB641TXCilGxtjVDwEAcCL2heeA08LfIn4/cPD38ycW9iexIw5HsFtan7nw1z8iI/9Y66ENwFz4a0RkZJi/B9E01nNKwJFfz0Ts8ekORfHx/zZ/O7jxlO1nIiMi98ywATD+cFtkZMSZPbPMc1jZEpKDT/CeH0IOnDm/Zwo23iOk3tSmfw4AP3b3QdNvF3wf4VVRVk2iG0L8ga/2nD08eNWK0z8vEOpqC2/97M+qhaZjTEpZJ09cDpy7/4JXhYDyPGHvtqhESWDAiUGFVRSGKSTWSSAvIbFg8tzfTn0qKU8MXPFXqzcCoA6G10mQGnqd/5bNVVlHT14OWbr/wtQq0JWkndoWxW/uwmGjKmv/idjSgMCQI0XVJJPu8iQpAD859qHPsu9+nyZ9nnJ0zYGH2K7/HhOmpWZIwHXyFAcKAADF3mfy4BMZyWmp5XNd32J3ckl5bg67FqAbhf4Gm1H6DR1AuZV65KuZGR9P8Znh42b5eg1mCCEVUaN8D1Dx8H9rV/xhZGFlQpaUFRZUSAEgYc/G5KMMU/1qbnGNBACAf37FnPMAACBn/7VkCrEh69iKFaeNGEbUal5xjQTYS6acZ5jpksTVFRU1EgDIiV7jc93UWLe2nPdcVZ8OIdTFNFdlHVu05A8zi27i0sIKKQCAJHHbR4nE6lWROyZFEhs2qrJ2+IYbMIzJciGfVyUFgMKovV/GGzD0JbwX9R56b0nEEgCKnrZi8mAqXRsAxBLxm+8r98SX7icAAEjmHit8HEg5r7+p+Yw9vxue+OX0haSz+5L+uuDzw6/+bjifMULqS63yPQAASCoKn1S8/EoVr6Dq1RvWVvAK/3tWwyt+eUIRSQ23GKcYQQi1saZVVlVx4WvUWC9VWXIJn1v88m6r+K+1F9TFGduYG0NqButfyceuFACQ/Jv6QAL6ZjYmQNGmAkgEVRIACkiqBGIAKqW1dnVj19mzPMyNzQcMdnXoTgE5m0YFEFYRZwpVQiEAldLi1OQUG8+FP3guLI//7utNFy5EJ3/jNgHb8BFSW6rJ9xoa6tPv/wWpvA6Hn0cINU/9Kgd5nRyrrPfCYB8fp8tHogJXU30n2ENOTFhEMcVmznRXClD69zeHjPgjIaf5/cS3TydJwKHfgNYa1Q36fzTLx4HU8JTUp58z5UJSWMhhPY8e7JgoNhhMGGDd/KZF/+w7XWozwJIuyikSy4FK0Wvjj4kQalOqyfd2dnb5WQ8Y2mo066pck9SzZ09VlwIhpHaoVCrDxJjPrzCgkl69doeoq4MaicTS0lLVBUHtj+Qwd+cPkh9Cws7tS5UDUMxdfdduXNCfAgD95gYtZgceu7Av+AIAxeYD/4BZNm+ya+MpqwMe8H68EPpdEgAY958dsMKj+fMDITsj6cLfpyPkAAD6vaesne+GjfcIqTON+vr6jj/qmTNnjh4+ZKpV1/GHblaFWK7Xwzzi3DlVF+R9FBQUBACBgYGqLghCLTp69OiZU/8zJaugtmwWt0bad5Dr/oMHVV2Q95dAIAAAFovV+mpMJpNMJtfUtEXnULmwvERC6WGs3+g0UyIsLxOCvrnx2/aHl5QXlYn1Tcz1XxHZJcKiUiFom5gbY7ZHSN2ppp/MzJkzXQa7lsk11eHXslIs0zHuvmr1alUXBCGkpmbMmGHe06pUog41FvBEMiNT83lffaXqgqCORdI3Nm8S7gGAom9s/vbhHgAoxubmrwz3AEDRN7c0x3CPUKegsvtrd+/Zs3r16uzMTAG/QhOARuroM406qJfV1WtStWl0/bUbNowYMaKDC4AQ6iwMDAxOhoauXLkiP/cpv7JcA0C7w6ssaX29vK5eW0/f2NJi2/btDg4OHVwAhBBCnYUqx8/Zs2dPRkbG2bNnhUKhs7PzW+yBzWbHx8f7+fm9xbY1NTU6OjqOjo7u7u5vsTlC6L2ipaV14MDBGzduxMbGVlRUDBw48C12wmKx2Gz2p59++hbblpSUGBkZDR06dMiQIW+xOUIIofeHisfH7N+/f//+/d968/j4+OTk5AULFrRhkRBCqCXu7u7v0iJw8uTJ2tparLIQQgi1K7UbpxIhhBBCCCH01jp3vmexWHV16jIID0IIte7WrVtPnz5VdSkQQgh1ceo3f+2bYDKZf//9t6pLgd5JSkqKqouAUAcZNWqUXC5XdSkQQgh1cZ0738tkMpWM34/a0PDhw1VdBIQ6iFwux0uOCCGE2lvn7p+jpaWFM7QjhDoLEomkqdm5a12EEELqD39pEEIIIYQQ6jow3yOEEEIIIdR1YL5HCCGEEEKo6+jc+Z7FYolEIlWXAiGEXktqamp+fr6qS4EQQqiL69z5nslk0mg0VZcCIYRei6urq7W1tapLgRBCqIvr3PkeIYQQQgghpAzzPUIIIYQQQl0H5nuEEEIIIYS6js6d71kslkwmU3UpEELotdy6devJkyeqLgVCCKEurnPneyaTqaWlpepSIITQaxk1alSvXr1UXQqEEEJdXOfO9wi9MVmjIVVFoiocYhUhhBBCXQfme6S2ROn7vQfNi2wlffOv7/Aa4R/XShctziX/cR/sYL14xj2/zM3FbdyQQd7700UAeeFfufV3GzfW2W50QBz/v43y/ufr7BKQjD2/EEIIIdQJYb5HakmWvnuKm/8/xcUtNq6L4rZ8MHFvUjG3xfzPv7jMbfrBdF5lbcMWwVvy5v9zLyk5xOx4QBgXbEdvCrt7L+nuvZCepw9efBHwRSkBy87zDXDeNIQQQgh1Tp0737NYrFevhDojLZelp+9d2+hmqLi9InWH7zeXuDJR8o++y/7kA9Dc1kUnnZ3vpJjfrOqS/4yAOD7wY/29N8aJAAw8Q+Juhs22bVjhSfI9g3FulgB6buP6Z8XdFYGlrS0NAETFfJqZJQ0AoCouYBt/6b75tnhbB2oHt27dYrPZqi4FQgihLq5z53smk6nqIqD2QtN7+bnr0mWMg9NGj1vz2DtgqgEA0PRooJzC9Sb6zxXt+Nht4vewdPU4GgDQaDTlFfiVfH0DQwAAmoEeiPhECz0/ebvvMcvggLE0AFH6rt38r4MnmrbrJ0Pvr1GjRtnY2Ki6FAghhLo4bKVEnQWNRgOQAWjRWvpvS9MCAFGLK+jRaCJ+LQCAiF9VS9OjAYjS9/ouzZp97vdppgDAvxTyv0oQhQQkZ6Vzi4/9L91lrgutuT0hhBBSc2QyWVdXV9WlQEg1Onf7PXqPpO4OzpsfmXxpd89jwX/ym1mh6lLwYdqmi/ciV4kOfhfXTPd5R7dBxUlxXABRelLGILchNFFK8NKLbseON/TG0XMLCN+9bIqX18eDbPVsXYbYYrhHCKFOCsM9ep9h+z3qJFw3nXMFADD99tyIZlfQmxgSOREA4OOQcx83twJt4qZ1Yd4ffxBJKxZNOxZsCVlHL2XlwdKx0QCg7Rl8bds4p+GmAACyYgO9ykGOBu3zSRBCCCGE2lHnzvc4f20XNyokc1SrK2hNC8ua1uoaBvMv3FM8sZ0bdm8qlyszMDWhAYDLtnuV21rYbUbru0XobeD8tQghhDpA5+6fg/PXojdmYEqEe4Q6Hs5fixBCqAN07nyPEEIIIYQQUob5HiGEEEIdTs6OCNoXX/5G20gyQjccuSNppxJ1KEFuQnREwtMO2Qq9fzDfI4QQQqjjFeWk5ZTJ32yb4qy0vOftU5wOxjqxds320xnijtgKvX86d75nsVgiUTMDIaJOhM1m44ye6D2Rmpqan5+v6lIg1P6EGac3fenl6e7ps2xffBEAQNaRmV8cyZEDAKR+7/V1eBEAgOTfI195eU398sgDkCTt+3Kqp6enp+eULzeEpgoBQJ703ZRlEaUAAJLYDZ4bYiQAAOKk3VO9pkxbHc5+m4JVJvy80m/mp15en/r5H0rhAQBA1m8Lp0728vrUb3tkrlh5HR+/gCPHf17vN/XTqX7rj7MEAAAg5sT+uHzmp15TF2yPeioGAE70Lv8jKQKlg/CSDvv7eo0ePX7q8p8TuADAi9oyc+bKsFwAXnTAzM+Wh71ogBenH5o59bOZP98GyI8ImDPVy2P0aA+vmeuPp/BebyuEWtC58z2TyaTR8F7Jzs3GxgZn9ETvCVdXV2tra1WXAqH2Jkk6EHhBe97vl29Ebx+c8+N3EUUtrEgZsPC36Ojzvy/sB+IKdrHzpujY2Nhf5tEvBe6IFbawDdVtzfnoC5F7Ztm8TdEEucksk/m/hIf/7Nvtn7U7r4sBwO6T4NCIyPAfhuX+uOsi7791Tm515xw7/cx989FfQzwFJ4JO5QKIWfsW7uR+uOP38JCxnJ3fhnEAQCYWK7en54etXfZzbKXdZC8nSeLhb1YczwVxaTaLlVMmkYOY94T1IKuw5sW6dIu+g5wHWXQDKMtKuJ0ldpjs484oi9q1cH0Y53W2QqgFOPgMQgghhNqOPDs1jTL2ew9jEoCTzxSn05fvCX3sX2tTCgCYe/hNDvv6dgaMa5/ikbrpM+h0kufk4TuDcjgw1h7khbf/jnlWI6HqlBbyASgv1mGYTnDvfeKpoT2DAZPHOv98+ynIJTHXeZbuz1OiL4JYn/44nVUDkz7ZvF9p95wbF1NqGD4BIevcqO7i0X5nYxLyJ7RQFKr9J5s3f0IFAEgDALAcO3fFLDpTNnrh37EJ3DGv3gqhFnTu9nuEEEIIdQJaQKkVv9xtnAIA0MIcNhI5AFAoIJa02820VB0qiAG4Ed/M3HShjNqdDiAHeOl+ACpV60UJqVTirediMZVKBZABkFxmr/VxIjXZr1gMQKVTqQBA06ECiMVyAC0Aubi5XvNNX6N2o9MBRGLxG22F0Esw3yOEEEKo7ZD6uA6WXP8jvlwOkscRF7L6uA3SB1Nb86rUpBzltE431i/KYUsAJESIFwvLiiQAwoyL14r6D+4DJHObnuykm0VK21D0ulGKnhDbtFFp87NyTSatXOIzadokpskrP5qzizM81xnu84Wv7xe+vrPG2FMBuFmsx/91v7ccMsKSxLn42+HYy8ePRXPAdNAIa0ZPUzpwEiL+Tkh52tDvyIjOIIkzr0ck3IhlNdyVw2HFxEYfPnyZA6Z9B1m+7lYINdW5++fIZLL6+npVlwIhhF6LXC6vq6tTdSkQam8Ut2VB7B07Zo7fAgYDPly90cccACYsWnxri//4MAmVqm08digVSOaTZ/Vbvc3LE/THbghdDgC5p1dPPVIu17eZsDboY2MAmLJsburWOeOPApWqb+blAwCuPnNNvl3qFUkxnhJ0ZrlrGxS2/2Rv6nK/iQn2huLcApjdtD3+JfRJ69enrPQb/7eJBUUCY3acWcVkHVs+J3NudJivJbHKwEUh3+QuP/Tz8usApsNX7FrBJFHFi1aMYe2K2LgwAgB0mPpUALvZcz1jtl/etfAylbkh+kx/AADO5e3L/wYwZM77btFrbeVn2QbfAOqKNDppPmaz2aGhoWPGjAkKCjpx4kRoaGhgYKCqC4XeRlBQEADgPx/q2uLj4xMSEqytrRMSEgIDA+Pj4/38/FRdKPT2BAIBALBYrNZXYzKZZDK5pqam9dUQAAijVk9LnhK7w6OtdmhgYPD6K4sreQIZlW5Ep74i3yutD3SGIRUAQC4QiOl0nZfXqOFxKoBhyVDqJi8W8ARirYatGu0nbfto3zD7wKv7PqJT6crFaHUrhFrQWdvvbWxs4uPjU1JSKioqbG1tr1+/ruoSIYRQi2xsbObOnWtkZCSTybDKQkjdUA0ZjLden9Qk3AOADsOy8YtUepODNDkulW5If/OtEGqss+Z7AAgMDBw7diwAeHh4eHh4qLo4CCHUIhsbm8DAwLlz5wKAn58fVlkINaI/aU/sJFUXQoUGb775cDO83tUDhF6pE99f6+HhwWQyAbt2IIQ6Aw8PDxMTEwCYM2eOqsuCEFI/GO5R22mv9vvS0tLQ0NDq6mpzc/N2OgQAuLi4PH/+PCcnJycnp50OUVxcbGBgMHLkSFfXtriPBzXB4XBUXQSEIDMz88yZM9XV1X369Gm/owwbNozNZrdrlZWdnW1kZDR69Gh3d/d2OgRCCCE11y731wYFBd26kUAGqJeKqVqd+4RULK+Tyuu09fQNjE2Ctm+3s7NTdYm6Gry/FqncWn//rIcPaoUCTQBdSifutQgA1VKZvK5e38DQzMJyc2CghYWFqkvUNeH9tervje6vRaiLaftfskOHDiXfvGGtTdLWIr2YvaJLyOPkr/Nfszlw68CBA1VdFoRQm9n07bcP0u/Z6pJJJvRXr91p1D3Nzly2ZMn3O3c6OjqqujAIIYQ6VBvn+ytXrkRG/Nlbj6KlqdG2e1Y5W0O9HF7J7ydP7t67V9VlQQi1jYiIiDspKY7duuAwc3aGeg94pRcvXMB8j95bVdXVqi4CQqrRxvk+/d49skyipdkFfywBwM5Q7/adOyUlJT169FB1WRBCbeB63DVavQyga1ZZlnSd6OiotevWqbogCKmGnq6uqouAkGq08fg5WVmZhtpt0yeHbDv2Qw/7NtlVW9HS1NTXphYWFqq6IAihtvHkSa6pHq1NdmU63HtEf7022VVbMaCSNUCjqKhI1QVBCCHUodq4/b6+ro6s2eScodfk2T5DjWlkqaiy/Mmtm1GJXNGrd6Xt/OGHzOtx8bnSti3iu9HU1OykM/4ihJrTzJ+zqcfSSSN7apNBWv2Mc/fytfjc2lfvh2I5etaIJ4nJGVVtX8Z3oKHR1bpKIoQQeqWOGP+ebMlkWlakRp9LTnkCg5etXvOhcQccFSGE3gbF2HmIrfzujeiLqRkSy9k7F/uo14VEhBBCqHUdNRKciJt37y5Xfvd+sZFD0CgH2pVkkZ7DlK8/HOtkDBUP/z70V3wB9Jo8fc5khx5Q+/hi6J6LNQDQy3v1Pm8tSUXOP7/+dS1XSrIft2H1CEtdqM5NPXXgCquSzJw157NRPQx0QfAsJ+bkX9dypQBky7GTPp/ibK1b+/R61JHwHEEHfUSEUJchLc/NvMuSQmKetvOm4UzjiNxymv0IPz/3fj21Ku/fDD1444mE/FKVdQUATD/YvP4DMlRm3T31a1x2FTSusqptp6ya5G5vrAu1xVnJf/56I6sKAOiD5syYPtJSX8pNOf3nqcRyVX92hFB7EAt4AjmdYdg17/ZB6qaD56+lWDrbd6vkltcBffSqORMgee/6/f/LtZu3ZER3PdfP5vTM2rlz2erfwm+VywEAgHs97PstxyKe2c37enR3APmz1OPb96xaFpqo6z5viiUJtC0GOstuHQvcEHr+iennwbOH6wHJ/sOVCyyfhu7fuDOZ9OHs2Uxyx35C9KYePXr06NEjVZcCoeZQrBz6mNaWcmuA0tdv84fGd8I2rzp9v8eHS2dZkpqpssrvnwzdFnw+y+qjRd6WpGaqLOM+Q3QyDu0PDD6fYTRu3foR3QHMvOcsH1oeuWXnD6cFw5dNHa1ePfgR6moEWbGXWR3d7ifPDVs8euiI8V4TRw+dc5wDwPrRa2BfR8e+A6fuy3rz3Yk5aQlZlS+/JuCwrsfGxqbkKr0ueJwSGx2bcJ8j/q8ksf6DHB37OjoOHL8rTWnNp6yU+5xXfy28XFZaltLuWiogLyuNlctrfSUB53Zs7OUE1lOB8mus67Gx11m8hkMIIhcO7Ovo2Ndx4KeHs+SvLB/6Twe132tZeS8/OgVoutqCu1GHQnMkFNfhA6HsivHQj4wBnkt72faG5OIK+nAf9/yIGym5UgBtAKiqKCnmSksv55QHm1pToBTIJk6uo610tKVSfVNjEggAQFRRXsqVloZGOY/8auRAckmvvpZC7h2nER8AiKrpvZyMSSwu/p9QYzh4H1I/VPoH67cOB7KurvTxudM/JdZSmM6D9AQsvb4ffASUaqmFk52uhPtSlUUBAGnZM24pl3v1FteL2ZMMHHnjKgsApAImRr7SAAAd5ElEQVRueSm3/PzB5OFHXQfpPSQNsRVx86zHuluDVEi2dOxFvslSq5uOEOpSCmJ++gVCPJn0jpx788G5EzcE7t/d3P/Ji0k2LFdH3vkq1t/D/+lb5BNxyi8rA6ghNzcPa3jl8XG/L3elyBkMGY8HTotPnFkxELKOzJmzNwsYdHEFj+4ZEvnTJAaxtRyYq6JDv7CkEhcSxLysq78EBYWxdHxPXts8vKWvRS7mPLj4S8D2iKd2K/46v7h3i6UT5KdEhATsiuUMD7x5chajhb1xIpbNCrghZhgBjwfDN4Wf/MIenoYtnLM9QcxgyHgCU999YZvHGAL9k313PhKn/OC1MF3y5t/Ue62D2u9lBRf3L56z5/jdWpJEwKkCADKFIpUCaAJoVuX9E856VpVzfPXhSK7p1OCgkK8dXhrSSlojATIJ6B7rV6/80LiuQtDcr19tlZRMowCFoiWXSgFAE6Do1pWYrBoM9wihNyMWXNu5dfGSsMQKkFaU8wGATCZLpXUAmgCyJ8lnrzyTSFqssqolUhIAtF5lVdXWAplCJlOoICdqQhCkn4tL42K4R6h9FV0M+trPb/FhlhyAm3B4jd/UyVP9NoZlCQAAcs8GLPSd6jXZa+aC7RGPxQAgfhq1fcFUr4leMzdGcAAAxLmR2xf6eHl9tnDX37liAABe7N7lfj5eXp/6HU4TJOzz3xX7X9s1a99Mr9URHLn49r5ZXpOnztybAgBAolLpL3fUkfMS9i2f+uHo0R5eflsisogpjytTDq+cOn706NGTF+66zgMA4ET4+wRc5PEubvTy+nTqriQAAE5mIdV18ZGrN29eCR4DWRejskCeFX9DaDFr39WbN898Zc+LjUhQbu/XolJfpHte2Lzxc34ptez9itn9svZN81oUJrK3e8V3mxTgNWVtjI69feunT/kJsfn0Sd9F37wZuW6wIOVcTC4AK/xEQvWY4OibN8+vcyoMO/w358UXpUOnduTJWFfRYTOxS0AuKb924Cxz74yvxub8eD3v8TOd/hX3/zrH/e+UrIoTHxqalDn7wFLX3qFRTXZhbNdTyvr1r4i7UjM9V0+rF6+SKVoAUorVQJce3PRcKUfCkYyEvItX7qrXKBYIoU5GUnn/158cQgKmf3R/f/QTTj65Zy0r7myWUv5WrrLCc5rsoNkqi0ymAAB0H+lsXfHsXLUAnggmGZXHhyeXdsiHQgiZjFkcMNeeQmWQco+v2c76aH9ooP6NHX7+R/pGr2Vy0mKeu4eGzzAp/DtgQcBxlz/mPd23LXdoeORhSwFPzAAQ3Ni+8Iho5YGTw+QJO1cs3MWI3jysMD32qXPgyUUOADri9DgxKDUs2k0L3ME4tCD49rCvdizpTwUDi2YLxTm79psjnMm7js41idm0OGC5Tq+rG5hZ536JyrVfdDwE9s0KCPrF3X3zcLMxi79IuB10m/nVjiX9qfpWAACWn2w+8omYkxYbkRibKbecMNoOSPTFYdGLAaAy5ZcEDthN6Nt8gGf4HL7pS6cnbBwa1erQ304Lw+98Q4eohbGxrVZVg9dF3wmm847PjEpobTU73yOXfAFA/OBwzGOw9HKxBEgXi0FLv5suAL2nJRViM7IALFvbCWpVh+V7QuX9k+FDQuZ8yEz8M/rAtd7rVx+Z/FwoIVcnhgZc6Llh7ziT6lqyEb0yLvSxBJr8CXASr5evW79pb0ktyci47o6MeNVxTsDRyTUkI/Kzc6HRBVBdEHWc+dWSk1uFJVKyLuePZaHxGPQRQm9FxIoKTVy/ZI5r0vbk46G2/lu2DhcKpBTIOLjn1ycjNr1UZdE9G2/dtMoiA/T4ZOdWdyHQ9QXxe05nSKRw+s/r62eHhI/jC8nkkrjAzTcw6CPUrij07taWliSA/IsxWfp9P7p3MRpAx4THuscBJgCVRjehGzLon3jaH0p5KqdaOljkXj4R4bDIx90SANIvJ5hMD5/UmwHgs3JaxKzL6ZuH0QAo+kYMuiEAwJi1+8coHY1u6cS0604Fanc7plO/lsokYN1IF1v6+nzkZE/Sn+B0eFdqci4wnWZvXkdPyPwnqrACoLK0TAZAZdhbd4dm9sZLOLJpV6KY4b7I0+lFlhfnR21fvOmi2D3410VOLTSBU+mvaLl/QYdOBXhlx/vX3h0AAO/6rgXrj5f1W3d07XAqgPP4MYxzEdu+Fl+ArNtiALlY3FWnHuwQHZHvpfHb18U3PCm/8tvcKwAAkHVlh18c3ZSuKxaUVkrlkLdjAau7KVleUV5eBQAQvWF1NAAAyAuu+M8CAIDw/QvO0Y2NyPIqQXmVFIAOABm/Bu+/r0OuKC9/cSGg/ObBnUm/0rsbkasrygXYYQsh9EYkDw/5PWx4Unt379a5AABQeTH0myvaxkY68KL+udGoyvrRJ5XYpvri/pkXAQCyG1VZFFcATsT6w4kScnWFoJqonSofntqwKcLQ2IQqLeMKXmNuEITQ29OC/xrXJWIxlfriqd3kFcMGvZRNdWj6IAYApyVnTjqFnTgya/zB2ft/nyepEdN0X8ROqg5NXC0EaIM58kRyMZCIOEulagFUAwAvYuW0gFz3Fd9MsXv6iqArrmH4Hr3jW5mwfebChdt63tw/if407Js529OtFh/5acXwFrrBqxAv2n/Wulj9GfvDAzwtiQ6N7sHhx5kXb/Po/T/s/oN/DMMEw/276ODxcxqTCrjlxZXSF39sEkFpwYtfypbIJYJSbnl5lXIXVZmAqwj3/61WzMVwjxBqU5LacuX65+2qLEltKbch3DcQVZZzMNwj1P609fWFvFKiA4CVc1+qkNrfx/cLX98vfH0/cmqh7ZlqP3Ze8O/7J/OjYh5RnQY758bGcOQAck7M9VznYcxGawsep2S9YugYgn43Ogi4HJ4YAOgu/eyh8F78UzHwbqdkAWOAs6U8NytHTLUf4+tpKeYJQCYREe3nNCoVxKVlAgAQiwFAnLBx6ECPhYfTOJxCTmkNgFgE8qyfV+9KgDGLFo6hFbBYaVmcmuYLIa7kcDgcXg2A/HkZh8OrJI4hSDkSELBP6TaCGh6Hw+FUisUgERZzONwXg95wLv8csOV4imIIHLmAx+FwSoRiAJGgjMPhiYmA9zhi+5btYQ8aLgDkh63dHMVjzlsyiVHGYrHucwQA4qSfA35MoI6ZPKgyPoHDGDPa5XW+RNSSDu6f08YEl7esj5Xg7WgIoc5Akvrz7PtybHdASHWsJ/g4zVo7caoF1dLn6P4V224vXzd+vL6FvlzYd3Vk8NimTcaCqDWzDnMY+mJOmal3iCNYOm5ekfbNrA9O00EAg1fs82YAKHddzw1buzBqzJnoVU6vKAnJxXeJZ/pe/9EDYXjg1ZNf7ViX5f/zp0N/AaAz5wUvH0MliT0/cYo4EjDUw8l3+Txm1uFdK8OGH/e1dJw8e9jFn9cMdVxHtfcLjV7LHLNki0/Otp99x/8MQLX2XLd6Mp0Xkf5YDPKEXQsSAABIlvP+uLqumd5BgosbvQKuE7E7yv/DKKrX/vu7PaHmdsSxi7kL5ira/Tmnl48PYRHLxxeMP24x70zMOiaJl3L2+EWtzSsUJ0ZZv8z67DiHyPR7p47f57Ti4vnFdpAbGxZ23f6I/4uvV/wgJb0GxGmHl/seBgCg+xxJDh7DHDOMGrbrs/FAojO/+HGFOzbfvxON+vpmpmd/a19+7qtRzqVTu+yg809rZNu+3zl48GBVF6TrCAoKAoDAwEBVFwS9jz6c4GlFAZpWlx2d4SFfdOr0aXNzc1UXpKsRCAQAwGKxWl+NyWSSyeSamhbaTlF7MjAwaOktMY8n1mXQdV48E/AEYl0GQ6fFXQl4PLHWS1NTiSt5AmhhsqoagYBKf8vxN2sEAhmVrjS0jlggEOvS6SSiPzr1v5FkxGIxNIyC88oiKchjlw9azlt19Yxfq7eu3t4+3r90/YX9noatlzbW32Nnt/3Rm4e1nsV5YfPGR7hGnl/y6rnAxZUCsQ690dhCKUGj/VJ9zv+1oqW7CFBTnbv9HnUBbDZb1UVACCH0HqEyGEoBkkpnvKJ/etMVqIYtb6PzBveYvnLb/8I+ifpS6KVSG2Xq1or0sqxw/5nXGZMD9vu2MIx9bsZD/emBrwj3AJCVnttv7v5XhHuAGlYW133+9FeHewCgGr6U7QXXd33z2z1eAQ9aPFlDzcN8j1TMxsZG1UVACCGE3gMk5pLDR3xkAEC17NHiWvZfnTn/OnsbvO788ddYTccz+FKT8cVeD91p8tyFwwAAdO3tsPH+TbRxvq+uqa2pET8Xd9ku8dXSOlUXASHUZsRiSbFErqWp4pEG2o9YIlN1ERBC6oPh5DbmVXcGqBNTpzGmnam86qON8z2VRhswztPMzKxtd6s+Ll68qOoiIITajBaZPPbjKfr6+qouSHsJDw9XdREQQgh1tDbO9/r6+pMmTerCt5+mpaWpuggIoTajo6Mza9asLnz7aVRU06nAEUIIdXFd9qo0QgghhBBC7yHM9wghhBBCCHUdXSLfsy9FpjaZ+LEsLvI6VxWlQQihVjVXZYlSIy+x2/5Qcrk8OztbJsO7bNF7p7q6WtVFQEhlOnp8TNE/y9zWQXD6gYltduS8Y9/FGX438cWzsqw8LSdbAwAT28rw4Mj+B6aZtNWBEEJdSnV1tVT6isG+OqzKotlA3MZjTkfn27bVcQAAoK6ubvny5ZmZmY6Ojs7Ozk5OTopHCoXSpodCSL1IpVI+n6/qUiCkGh3dfi8qK87iNmlrfwfc/+1O9/SfZgIAorx/dniPcAu4TrxjO3+d06XvLuEfN0KIUF5efvXq1ZCQEF9f3759+xobG4tEr6iOOq7KMpnm75m++39tfNWRTCbHxMRIpdKzZ8/OmDFDS0vrr7/++vLLL3V0dBwdHadOnbpp06ZTp07du3evtra2bQ+NEEJIVToi3/Ov7/AdPWjQkA+WnecCAPCj/UcMcpvmH8kGAFHcdi83F+dB43x3xHIB8iJXe38wYtCg0V7LDifzAYCffnCJl9sQN6/Vp7Oa/shyI4OT3QKmmwIAVKXHsWy9JyuNk9pr6RrbyOBYTPgIvac4HE5UVFRwcLC3t7e9vb29vX1wcHBhYeGECRPCw8NFIlFzI2OK0g9/9cGQQYNG+x57AgAvV1my9IMz3Ab1b3hadilgxgduQwa5TfxqN9Eh8B2qLNPpAW7JwZHt0K+QRCL169dvxowZgYGBf/zxx7///iuTySIjIz///HMajRYdHT1v3rxu3br17t37k08+2bBhw++//3737l3s3oAQQp1UB/TP4Z/7MfiSfkDYNhdDW1O4CUBznrZ6Gneb/9LvJk48Os7Zc37AcFreiWX+W45NGzvu3j+XikeEBDjFBayeJrLJmv2Pt//dicc2jju3eqm/07jor02Vd51+KMxswbkXL+mNmP/tiPQtB6KVVnD6ejbMO5bnuaatLnnX1dWlp6d34QFAEerUnjx5wmKx0tPTWSwWi8WSy+VMJtPFxWXGjBnff/+9g4PDq3chuhSyMaz267CQsQZmROXyUpU1yG1ugBMUn964zH+f97QlWXEXM8227R6XFeI/Y6lZRpjpd+9SZZnOXmDmfSh92jaXNvtGWubs7Ozs7Ozt7a145dGjR1lZWZmZmTExMT/99FNWVpa5ubmzs7OOjs7HH3/s5OTk5OREp9M7oGwIIYTeRQfkewO3qRMNvwmeL/QN3j/OCQBotm7Tl0JsQBg3TyTLi/v9YPA/eSJRscigki8DADBwmjh71aB7x8fFpcbZ3i0G9qXgLbRKPl87IxPgpR9Ll9njDh6P4zPHGbRwbO75SNHHAW3Yn1Umkx07dmzFihXjGgwcOLDtdo8QejMPHjxgsVixsbEcDic9PZ1OpxOBfsGCBS4uLlZWVm+8R5qb93Rb3/1L/Tn+B4eMM4OXqyyu6Nz+gNNZ/Fq+SMSvBADQMrQdPXvp1OKw8JCkrDyzd6qy+HGn88bN64hw3yxHR0ei047ilcePH2dmZh4/fjwuLu7AgQNZWVndu3dX7sRvbm5ubW2tqgIjhBBqVkfcX2s2+eC9jzNDZnkt2zZuvOKHgzjyo9MhJ/LGXcoMZnubfffiHVEll/voXhYXzEz7mZrRaFqzQw7PdwK+yLTJz57j0jWMr0KuuwWPpTVz4LLI4Osua46aNvPW26JQKAcOHLC1tb127VpcXNwvv/xSU1OjyPo9e/Zsw2MhhBqRy+WnT58WiURE83x6erqVlZWLiwuTyfzss89cXV27d+/+zgcxcNuWlLfg2PyPA9b8b9olxQ36WgAApRcP7r7rFPb4GG21me+LN0R8Hjcv714emHkxzN6lyhJdDznNWPOb4zt/grbTu3dvotOO4pXc3Fyijf/WrVtHjx6tq6srLS3t27evs7Nz3759iQU9PT0VlhkhhFBb5vukpKTCwsImL+cdm+7snyICLdOJe1y6wbmX3uw10XvEgeCJhmFaIujlRgMAEKXvdTP7HkxHbQqb7uA2JPjSdH+v/jtAy3bN9achoxrv3Wn5Uu0Fwckjgkc0/rnkX/ruktO639p2MAqCkZHR9OnTp0+fDgCPHz+Oi4u7dOmSv7+/ubn5uHHj9PT0NmzYoKur2w5HRuj9IhKJ0l/m5OQ0ZMgQItAzmcy27y5SFrnUyTeyCsBgRPAIM3j80pvdR0+bqLfMt+clkInMvqYBAMjyjk0zOwYGLovD5vczMNjytlWWKDn4hPbSo06NN1AzxG0MkyZNUrzy9OnThw8fZmZmXr9+/eDBgw8fPjQ3N1dO/HZ2dgYGLV1kRQgh1PY06uvr33EXaWlp586dO3fuHJVKtbW13bJlS+Pu6TIRv4wPeqYGLbXpVPH5MpqBAQ1kyQH9x8XNu3fpa1sDA8Wvn4jPbW1zUcrBY7XzlzZqD+Ne2h1ru+aLNv6xXLhw4YIFC1rqf3/79u24uLjLly/funXL3d2daNQfOXJk25ahiwkKCgKAwMBAVRcEqYWqqqpTp06JxWIizT98+NDlZVQqtQ0PN2XKlF9++cXc3PylV0V8bpnIwNSU1kIDiIjPF2kZGOgBPNnt5nLM7UJSwBADpQrqbaos0fWDx7TnLx3e3KXId9D8B2xnOTk5mZmZDx8+JHK/oaFhcXGxcuLv27evpmbnnn1FIBAAAIvFan01JpNJJpNramo6pFAIIfTC27ffP3z4kIj1tbW13t7eYWFhrq6uixYtqqysbHIQmoFpq51k9BradrRMnT+eBo6GSuEeAF6xOW340qVNXzWduOaL1/kcb0YsFmtoaLT07rBhw4YNG/btt9+KxeK4uLi4uLhFixYVFhaOb2BnZ9f2ZUKoM+Pz+ffu3SPS/L179/Ly8gYOHDh48OBRo0YtX77cxcVFBUGQZmBq2fr7DTWUntO4qeOcexq8HOXfpsqijW2uHntndXV17bDXV3BwcHBwcPj0008Vryji/rlz57Zt25adnd2oS4+jozp1S0IIoU7ujdvvnzx5cu7cufDw8MrKSm9vb29vb+X26R9//LGqqopoke16+Hz+1KlTz5w506NHj9ffqrCw8Nq1a1evXr169aqRkdH48eMNDAw2btyIk8sQZs2aBQDh4eGqLgjqIJWVlXfu3CF6z6enpxcVFSna5gcNGjRgwICOLIy/v3+PHj3Wrl3bkQftMMnJyYGBgTExMaouSGNSqfThy8zNzcVi8UAlzQ1dqkaw/R4hpM5et/2+uLj4zz///OOPP548eeLt7b179+4PPvig6WoDBgw4fPiwUChU86r57ezcuXPEiBFvFO4BwMLC4ssvv/zyyy8B4O7du9euXfvnn3+Cg4PHjx/v6enp6enZwYFG3WC7XZcnFotTU1NTU1PT0tJSU1MLCgpcXV0tLS0nTZq0efPmvn37qrBsw4cPP3PmjAoL0K5+++23Dz/8UNWlaAaZTGYymUwmU/FKaWnpkydP7t+/f//+/VOnTt2/f9/Kyko57ltatnpJBSGEkJJXtN8LhcJ169YVFBQkJCT4+Pj4+Pgo31bVrEOHDt25cycoKKiLDZq2c+fOrKysVatWtcmAmFVVVbGxsVevXo2NjZVKpTY2NqtWrZo0aVJn75P6FrD/fZd0+vTpqqoqItCzWCxXV9fBgwe7urq6urqq2wltYGBgcXHxTz/9pKOjo+qytKVt27ZlZmZu3bq1k55CP3z48L6S+vr6gQMHmpube3p6Dhw4ULWnhYDt9wgh9dZivo+IiPjzzz8jIiI+/fTTKVOm+Pr6amm9bmN/UFBQWlqatbW1sbFxB9/X1eaKiorKy8uJSey//fbb9uhAn5WVFRYWlpKSkpSUNHny5EmTJk2aNMnQ0LDND6SeMN93DY8ePUpVMnz4cFtbW9cGJBJJ1QVszbp16woLC7t166atrd2nTx9VF+edZGdnV1dX19fX02i0devWWVhYqLpEbYPD4dy/f//kyZOampr379/Pz89nMpnKDfwdPGQZ5nuEkDprnO8TExNPnz6dlJTUvXt3Hx+f6dOnv924ZqWlpSdPnqypqWmrfC+Tyb7//vs1a9Z0cBtbUVGRoaHhyJEjXV1d2/tYPB4vKirq4sWLUVFR48aNGzJkyFdffdXFLoM09f/27iUktS2MA7jFrSaBpJPboCgyItIiCNwalT20pKCoiamDHkSPQUWjmjQJmkQERQNtkJBKBRaUA02zssQtVhBJLyuQMCQq6GkR2B3se7reyznV6Zpr7/p+A1Fwy38kf7699lrQ7ynK4/EEr7phMBhElSfm9JRbobezszM5OXl3d0eSfq/T6eh0eklJye9euL+/z2Aw8vLy8vPzPyMYSdzc3Gz9W3JyMlH0id7/2aMl6PcAADL7u997vV6FQjE/P//09CSVSmtra5OTP2Pj+I+TSqW3t7dzc3Oog4TD4+Pj6Ojo9va2Xq9PTU0lJvpkW9UQKtDvqeL5+RnHcbvdbrfbj4+PfT5f8KqbUBwsBf5xcXEhEAgaGxs7OztRZ6EG4iRjouu73e5AIMDlcjEMI15DfgcJ+j0AgMz+mJ6e1mq1ZrM5JydnaGhIIBCgjvRzPp9PqVSiThEmMTExXV1dxHuz2azX66urq2NiYgoKChoaGsJwJwEAwtnZmd1uf6n1HA4Hw7CqqqqMjIzghyNByDGZzL6+vs7OTolE8ufr+wsDGo1Go7HZbDabLZfLiY+Hh4cOh8PhcExOTm5sbLwUfS6X++VvigIAQIRQKJRKpTKZLCoqCnWYX3I4HAKBwO/3ow6CktPp7Onp8Xg8TCZTJpPJZDIGg4E6VAjA/J5sXC4XjuMmk8nlcnm9Xh6Px+PxMAzj8XiUW3VDdXV1dUwmc3BwEHUQant4eMBxnKj7OI5HRUURRZ/FYpWUlHxszSfM7wEAZBaC82vDQKFQGI3GmZkZ1EFIwWAwaLVatVotkUhkMll5eTnqRP8L9Hvk/H7/y5Aex/G4uDiizSclJYnFYtTpvrXz83MWi7W4uPirM7PBBxCjfaLxb25uvoz2MQxLSEh4549AvwcAkBk1+n1bW1t2dnZTUxPqICRydXWl0Wg0Gs3p6Skxzk9PT0cd6iOg3yNxfX09MTHhdrvtdvv6+nrwkJ7qe159McPDw1NTUzabDXWQrykQCNiCxMfH5+bm8vn83Nzc1NTUVy6Efg8AIDNq9Hs+nz89PQ3nm/zU9va2SqVSqVR5eXn19fWVlZWoE/0e6PfhtL6+bjKZTCbT6uqqSCTicDgVFRUYhr1/91sQfhwOZ2BgoKysDHWQr8/lcq2srFitVqvVSqfT839ITEz8zzeh3wMAyIwC/f7k5CQtLQ3+H980Pj6uVCr9fn9zc3NLS0tERATqRO8C/f6zHR4eqtVqj8djMpnodLpQKBSJREKhkMyP3IBgY2NjBoNBp9OhDvK97O7uWiyWxcVFi8XCYrGKfoiOjqZBvwcAkBsF+v3S0lJra+ve3h7qINRgsVhGRkY2Nzfb29s7OjrIP5ctLCyk0WhLS0uog3w1NpvNYrGYzWan0ykWi4uLi4VC4etLDgA5BQKB2NjYg4MDuIeJisPhMBqNCwsLW1tbIpGotLRUIpHQoN8DAMgqEnWAtx0dHWVlZaFOQRlFRUWzs7MzMzNOpzMlJWV0dBR1ojcIBALS7spKRcvLy93d3ZmZmXK5/PLysr+///7+XqfTtbW1QbmnqMjIyJqamvn5edRBvi8ul9vb27u2tub1emtqalZWVlAnAgCA1/wFRz579bPKhBwAAAAASUVORK5CYII=" + } + }, + "cell_type": "markdown", + "id": "905aedaf-591e-4d64-8e2c-f6cb3f1491b5", + "metadata": {}, + "source": [ + "# 6. 编译多输入模型\n", + "\n", + "在阅读 [编译单输入模型](#编译单输入模型)之后,你已经清楚了如何设置单输入模型的校正集。
\n", + "这里我们会介绍如何设置多输入模型的校正集。
\n", + "\n", + "如果模型存在多个输入,那么校正集的格式就是`[[x1, x2,...], [y1, y2,...], ...]`。
\n", + "\n", + "```\n", + "例如,模型有三个输入,分别是(x, y, z),并且这三个输入的详细信息如下:\n", + "x:{shape: [3,100], range: [1,5], dtype: int64}\n", + "y:{shape: [100, 3, 192], range: [0,1), dtype: float32}\n", + "z:{shape: [3,100], dtype: bool}\n", + "\n", + "那么校正集就是下面这个样子:\n", + "calib_data = [\n", + "[ np.random.randint(1, 5, size=[3,100], dtype='int64'), np.random.randint(1, 5, size=[3,100], dtype='int64')],\n", + "[ np.random.rand(100, 3, 192).astype(np.float32), np.random.rand(100, 3, 192).astype(np.float32)],\n", + "[ np.random.rand(3,100).astype(np.float32)>0.5, np.random.rand(3,100).astype(np.float32)>0.5],] # bool\n", + "\n", + "如果你numpy有一些使用经验,想必你在看到这里已经知道如何设置正确的数据了。\n", + "```\n", + "\n", + "下面,我们用一个简单一点的模型来运行个示例,模型结构如下所示。\n", + "\n", + "![image.png](attachment:562afee7-c078-4323-bc19-49e03c80d0e9.png)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73f758da-bd81-44f9-9eff-734a23c427c2", + "metadata": {}, + "outputs": [], + "source": [ + "# compile kmodel multiple inputs\n", + "model_path = \"./test.onnx\"\n", + "dump_path = \"./tmp_onnx\"\n", + "\n", + "# 校正集的数量为2\n", + "calib_data = [[np.random.rand(1, 1, 1024).astype(np.float32), np.random.rand(1, 1, 1024).astype(np.float32)],\n", + " [np.random.rand(1, 1, 320).astype(np.float32), np.random.rand(1, 1, 320).astype(np.float32)]]\n", + "\n", + "kmodel_path = compile_kmodel(model_path, dump_path, calib_data)\n", + "\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "351e2e49-7869-4c86-a5be-e8b7412b4d06", + "metadata": {}, + "source": [ + "# 7. PC上推理多输入kmodel" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8fc1f43a-99fa-463a-b1b1-c6a9174ecd7a", + "metadata": {}, + "outputs": [], + "source": [ + "# run kmodel(simulate)\n", + "import os\n", + "\n", + "kmodel_path = \"./tmp_onnx/test.kmodel\"\n", + "input_data = [np.random.rand(1, 1, 1024).astype(np.float32), np.random.rand(1, 1, 320).astype(np.float32)]\n", + "\n", + "results = run_kmodel(kmodel_path, input_data)\n", + "\n", + "for idx, i in enumerate(results):\n", + " print(i.shape)\n", + " i.tofile(os.path.join(dump_path,\"nncase_result_{}.bin\".format(idx)))\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "dd5a79f4-0094-476f-ac79-6ea6b100b5fc", + "metadata": {}, + "source": [ + "# 8. 比较ONNX结果和kmodel推理结果\n", + "\n", + "这里给出如何调用ONNX框架推理的示例代码,以及如何将ONNX的推理结果和kmodel的推理结果进行比较,该比较结果为余弦值的形式。
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee2b661d-f6eb-4dee-b014-0fd3f95d589d", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install onnxruntime\n", + "import onnxruntime as rt\n", + "\n", + "onnx_model = model_simplify(model_path)\n", + "onnx_model = model_path\n", + "_, input_info = parse_model_input_output(model_path)\n", + "onnx_sess = rt.InferenceSession(onnx_model)\n", + "\n", + "input_dict = {}\n", + "for i, info in enumerate(input_info):\n", + " print(info['shape'])\n", + " input_dict[info[\"name\"]] = input_data[i]\n", + "\n", + "onnx_results = onnx_sess.run(None, input_dict)\n", + "for index, (i, j) in enumerate(zip(onnx_results, results)):\n", + " print(\"result {} cosine = \".format(index), get_cosine(i, j))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/user_guide/k230_simulate.ipynb b/examples/user_guide/k230_simulate.ipynb deleted file mode 100644 index 10ef9b5bde..0000000000 --- a/examples/user_guide/k230_simulate.ipynb +++ /dev/null @@ -1,213 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "82a8f9c1-c2bf-4270-9f1f-ac25c9fdd898", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install --upgrade pip\n", - "# !pip uninstall -y nncase\n", - "!pip install nncase --timeout=1000\n", - "!pip install nncase-kpu --timeout=1000\n", - "!pip install onnx onnxsim\n", - "\n", - "# nncase-2.x need dotnet-7\n", - "!sudo apt-get install -y dotnet-sdk-7.0\n", - "import _nncase\n", - "print(_nncase.__version__)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a7eff82e-295c-4cce-afbc-ce64c84dc40a", - "metadata": {}, - "outputs": [], - "source": [ - "import nncase\n", - "import numpy as np\n", - "from nncase_base_func import *\n", - "\n", - "def compile_kmodel(model_path, dump_path, calib_data):\n", - " \"\"\"\n", - " Set compile options and ptq options.\n", - " Compile kmodel.\n", - " Dump the compile-time result to 'compile_options.dump_dir'\n", - " \"\"\"\n", - " print(\"\\n---------- compile ----------\")\n", - " print(\"Simplify...\")\n", - " model_file = model_simplify(model_path)\n", - "\n", - " print(\"Set options...\")\n", - " # import_options\n", - " import_options = nncase.ImportOptions()\n", - " \n", - " # compile_options\n", - " compile_options = nncase.CompileOptions()\n", - " compile_options.target = \"k230\" #\"cpu\"\n", - " compile_options.dump_ir = True # if False, will not dump the compile-time result.\n", - " compile_options.dump_asm = True\n", - " compile_options.dump_dir = dump_path\n", - " compile_options.input_file = \"\"\n", - " \n", - " # preprocess args\n", - " compile_options.preprocess = False\n", - " if compile_options.preprocess:\n", - " compile_options.input_type = \"uint8\" # \"uint8\" \"float32\"\n", - " compile_options.input_shape = [1,224,320,3]\n", - " compile_options.input_range = [0,1]\n", - " compile_options.input_layout = \"NHWC\" # \"NHWC\"\n", - " compile_options.swapRB = False\n", - " compile_options.mean = [0,0,0]\n", - " compile_options.std = [1,1,1]\n", - " compile_options.letterbox_value = 0\n", - " compile_options.output_layout = \"NHWC\" # \"NHWC\"\n", - " \n", - " # quant\n", - " ptq_options = nncase.PTQTensorOptions()\n", - " \n", - " ptq_options.quant_type = \"uint8\" # datatype : \"float32\", \"int8\", \"int16\"\n", - " ptq_options.w_quant_type = \"uint8\" # datatype : \"float32\", \"int8\", \"int16\"\n", - " ptq_options.calibrate_method = \"NoClip\" # \"Kld\"\n", - " ptq_options.finetune_weights_method = \"NoFineTuneWeights\"\n", - " ptq_options.dump_quant_error = False\n", - " ptq_options.dump_quant_error_symmetric_for_signed = False\n", - " \n", - " # detail in docs/MixQuant.md\n", - " ptq_options.quant_scheme = \"\"\n", - " ptq_options.export_quant_scheme = False\n", - " ptq_options.export_weight_range_by_channel = False\n", - " \n", - " ptq_options.samples_count = len(calib_data[0])\n", - " ptq_options.set_tensor_data(calib_data)\n", - "\n", - " \n", - " print(\"Compiling...\")\n", - " compiler = nncase.Compiler(compile_options)\n", - " # import\n", - " model_content = read_model_file(model_file)\n", - " if model_path.split(\".\")[-1] == \"onnx\":\n", - " compiler.import_onnx(model_content, import_options)\n", - " elif model_path.split(\".\")[-1] == \"tflite\":\n", - " compiler.import_tflite(model_content, import_options)\n", - " \n", - " compiler.use_ptq(ptq_options)\n", - " \n", - " # compile\n", - " compiler.compile()\n", - " kmodel = compiler.gencode_tobytes()\n", - " \n", - " kmodel_path = os.path.join(dump_path, \"test.kmodel\")\n", - " with open(kmodel_path, 'wb') as f:\n", - " f.write(kmodel)\n", - " print(\"----------------end-----------------\")\n", - " return kmodel_path\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c957fe20-99c9-4a54-bae8-38361a8f8830", - "metadata": {}, - "outputs": [], - "source": [ - "# compile kmodel single input\n", - "model_path = \"./test.tflite\"\n", - "dump_path = \"./tmp_tflite\"\n", - "\n", - "# If model has multiple inputs, calib_data format is \"[[x1, x2,...], [y1, y2,...], ...]\"\n", - "# e.g. Model has three inputs (x, y, z), the calib_data is '[[x1, x2, x3],[y1, y2, y3],[z1, z2, z3]]'\n", - "\n", - "calib_data = [[np.random.rand(1, 240, 320, 3).astype(np.float32), np.random.rand(1, 240, 320, 3).astype(np.float32)]]\n", - "\n", - "kmodel_path = compile_kmodel(model_path, dump_path, calib_data)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f617edc-781c-4b8b-b45d-fef2f0b36a46", - "metadata": {}, - "outputs": [], - "source": [ - "# run kmodel(simulate)\n", - "import os\n", - "\n", - "kmodel_path = \"./tmp_tflite/test.kmodel\"\n", - "input_data = [np.random.rand(1, 240, 320, 3).astype(np.float32)]\n", - "\n", - "result = run_kmodel(kmodel_path, input_data)\n", - "\n", - "for idx, i in enumerate(result):\n", - " print(i.shape)\n", - " i.tofile(os.path.join(dump_path,\"nncase_result_{}.bin\".format(idx)))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "73f758da-bd81-44f9-9eff-734a23c427c2", - "metadata": {}, - "outputs": [], - "source": [ - "# compile kmodel multiple inputs\n", - "model_path = \"./test.onnx\"\n", - "dump_path = \"./tmp_onnx\"\n", - "\n", - "# If model has multiple inputs, calib_data format is \"[[x1, x2,...], [y1, y2,...], ...]\"\n", - "# e.g. Model has three inputs (x, y, z), the calib_data is '[[x1, x2, x3],[y1, y2, y3],[z1, z2, z3]]'\n", - "# calib_data = [[ np.random.randint(1, 5, size=[3,100], dtype='int64'), np.random.randint(1, 5, size=[3,100], dtype='int64')],\n", - "# [ np.random.rand(100, 3, 192).astype(np.float32), np.random.rand(100, 3, 192).astype(np.float32)],\n", - "# [ np.random.rand(3,100).astype(np.float32)>0.5, np.random.rand(3,100).astype(np.float32)>0.5],] # bool\n", - "calib_data = [[np.random.rand(1, 1, 1024).astype(np.float32), np.random.rand(1, 1, 1024).astype(np.float32)],\n", - " [np.random.rand(1, 1, 320).astype(np.float32), np.random.rand(1, 1, 320).astype(np.float32)]]\n", - "\n", - "kmodel_path = compile_kmodel(model_path, dump_path, calib_data)\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8fc1f43a-99fa-463a-b1b1-c6a9174ecd7a", - "metadata": {}, - "outputs": [], - "source": [ - "# run kmodel(simulate)\n", - "import os\n", - "\n", - "kmodel_path = \"./tmp_onnx/test.kmodel\"\n", - "input_data = [np.random.rand(1, 1, 1024).astype(np.float32), np.random.rand(1, 1, 320).astype(np.float32)]\n", - "\n", - "result = run_kmodel(kmodel_path, input_data)\n", - "\n", - "for idx, i in enumerate(result):\n", - " print(i.shape)\n", - " i.tofile(os.path.join(dump_path,\"nncase_result_{}.bin\".format(idx)))\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/user_guide/nncase_base_func.py b/examples/user_guide/nncase_base_func.py index da03c92b28..e785130825 100644 --- a/examples/user_guide/nncase_base_func.py +++ b/examples/user_guide/nncase_base_func.py @@ -15,7 +15,6 @@ def get_cosine(vec1, vec2): return cosine_similarity(vec1.reshape(1, -1), vec2.reshape(1, -1)) - def read_model_file(model_file): """ read model @@ -58,18 +57,18 @@ def model_simplify(model_file): input_shapes = {} for input in inputs: input_shapes[input['name']] = input['shape'] - - onnx_model, check = onnxsim.simplify(onnx_model, overwrite_input_shapes=input_shapes) + + onnx_model, check = onnxsim.simplify(onnx_model, input_shapes=input_shapes) assert check, "Simplified ONNX model could not be validated" - + model_file = os.path.join(os.path.dirname(model_file), 'simplified.onnx') onnx.save_model(onnx_model, model_file) print("[ onnx done ]") elif model_file.split('.')[-1] == "tflite": - print("[ tflite pass ]") + print("[ tflite skip ]") else: raise Exception(f"Unsupport type {model_file.split('.')[-1]}") - + return model_file def run_kmodel(kmodel_path, input_data): @@ -78,14 +77,14 @@ def run_kmodel(kmodel_path, input_data): model_sim = nncase.Simulator() with open(kmodel_path, 'rb') as f: model_sim.load_model(f.read()) - + print("Set input data...") for i, p_d in enumerate(input_data): model_sim.set_input_tensor(i, nncase.RuntimeTensor.from_numpy(p_d)) - + print("Run...") model_sim.run() - + print("Get output result...") all_result = [] for i in range(model_sim.outputs_size): diff --git a/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodeGenVisitor.g.cs b/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodeGenVisitor.g.cs index f73d629c40..b900856080 100644 --- a/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodeGenVisitor.g.cs +++ b/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodeGenVisitor.g.cs @@ -1,6 +1,6 @@ // Copyright (c) Canaan Inc. All rights reserved. // Licensed under the Apache license. See LICENSE file in the project root for full license information. -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:16 +08:00. */ +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:39 +08:00. */ using System; using System.Collections.Generic; @@ -262,6 +262,18 @@ private void EmitTensorCall(Op op) case IR.Tensors.Where top: Emitter.T.Where(top.IsTfWhere); break; + case IR.ShapeExpr.BroadcastShape top: + Emitter.T.BroadcastShape(); + break; + case IR.ShapeExpr.Conv2DShape top: + Emitter.T.Conv2DShape(); + break; + case IR.ShapeExpr.Conv2DTransposeShape top: + Emitter.T.Conv2DTransposeShape(); + break; + case IR.ShapeExpr.MatMulShape top: + Emitter.T.MatMulShape(); + break; case IR.Random.Normal top: Emitter.T.Normal(top.Type); break; diff --git a/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodegenVisitor.cs b/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodegenVisitor.cs index e382e6ab45..2b6a4e6eda 100644 --- a/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodegenVisitor.cs +++ b/modules/Nncase.Modules.StackVM/CodeGen/StackVM/CodegenVisitor.cs @@ -152,7 +152,7 @@ internal partial class CodeGenVisitor : ExprVisitor private readonly BaseFunction _function; private readonly CodeGenContext _context; - private readonly HashSet _refTextSnippets = new(); + private readonly List _refTextSnippets = new(); private TextSnippet? _currentTextSnippet; private BasicBlock? _currentBasicBlock; @@ -172,7 +172,7 @@ public CodeGenVisitor(BaseFunction function, CodeGenContext context) private StackVMEmitter Emitter => CurrentTextSnippet.Emitter; - public (BasicBlock BB, HashSet SnippetSet) SubBlock(Expr expr) + public (BasicBlock BB, List SnippetSet) SubBlock(Expr expr) { var visitor = new CodeGenVisitor(_function, _context); var subBlockFirst = visitor.CurrentBasicBlock; @@ -185,7 +185,8 @@ public CodeGenVisitor(BaseFunction function, CodeGenContext context) visitor.Visit(expr); var refTextSnippets = visitor._refTextSnippets; - return (subBlockFirst, refTextSnippets); + var subBlockEnd = visitor.CurrentBasicBlock; + return (subBlockEnd, refTextSnippets); } protected override TextSnippet VisitLeafConst(Const expr) @@ -389,9 +390,9 @@ protected override TextSnippet VisitLeafIf(If @if) return endSnippet; } - private void MergeSnippetSet(HashSet thenSet, HashSet elseSet, TextSnippet endSnippet) + private void MergeSnippetSet(List thenSet, List elseSet, TextSnippet endSnippet) { - var useSnippetSet = thenSet.Union(elseSet).ToHashSet(); + var useSnippetSet = thenSet.Concat(elseSet).ToHashSet(); foreach (var snippet in useSnippetSet) { snippet.AddUseCount(); diff --git a/modules/Nncase.Modules.StackVM/CodeGen/StackVM/StackVMEmitter.g.cs b/modules/Nncase.Modules.StackVM/CodeGen/StackVM/StackVMEmitter.g.cs index 0e1782487f..d69739ded8 100644 --- a/modules/Nncase.Modules.StackVM/CodeGen/StackVM/StackVMEmitter.g.cs +++ b/modules/Nncase.Modules.StackVM/CodeGen/StackVM/StackVMEmitter.g.cs @@ -1,6 +1,6 @@ // Copyright (c) Canaan Inc. All rights reserved. // Licensed under the Apache license. See LICENSE file in the project root for full license information. -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:16 +08:00. */ +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:39 +08:00. */ using System; using System.Collections.Generic; @@ -678,17 +678,24 @@ public void Broadcast() } ///. - public void BucketPad() + public void BroadcastShape() { _emitter.Write((byte)100); _emitter.Write((ushort)5); } ///. - public void Cast(DataType newType, CastMode castMode) + public void BucketPad() { _emitter.Write((byte)100); _emitter.Write((ushort)6); + } + + ///. + public void Cast(DataType newType, CastMode castMode) + { + _emitter.Write((byte)100); + _emitter.Write((ushort)7); _emitter.Write(newType); _emitter.Write((int)castMode); } @@ -697,21 +704,21 @@ public void Cast(DataType newType, CastMode castMode) public void Celu() { _emitter.Write((byte)100); - _emitter.Write((ushort)7); + _emitter.Write((ushort)8); } ///. public void Clamp() { _emitter.Write((byte)100); - _emitter.Write((ushort)8); + _emitter.Write((ushort)9); } ///. public void Compare(CompareOp compareOp) { _emitter.Write((byte)100); - _emitter.Write((ushort)9); + _emitter.Write((ushort)10); _emitter.Write((byte)compareOp); } @@ -719,14 +726,14 @@ public void Compare(CompareOp compareOp) public void Concat() { _emitter.Write((byte)100); - _emitter.Write((ushort)10); + _emitter.Write((ushort)11); } ///. public void Condition(bool canFoldConstCall) { _emitter.Write((byte)100); - _emitter.Write((ushort)11); + _emitter.Write((ushort)12); _emitter.Write(canFoldConstCall); } @@ -734,37 +741,51 @@ public void Condition(bool canFoldConstCall) public void ConstantOfShape() { _emitter.Write((byte)100); - _emitter.Write((ushort)12); + _emitter.Write((ushort)13); } ///. public void Conv2D(PadMode padMode) { _emitter.Write((byte)100); - _emitter.Write((ushort)13); + _emitter.Write((ushort)14); _emitter.Write((byte)padMode); } + ///. + public void Conv2DShape() + { + _emitter.Write((byte)100); + _emitter.Write((ushort)15); + } + ///. public void Conv2DTranspose(PadMode padMode) { _emitter.Write((byte)100); - _emitter.Write((ushort)14); + _emitter.Write((ushort)16); _emitter.Write((byte)padMode); } + ///. + public void Conv2DTransposeShape() + { + _emitter.Write((byte)100); + _emitter.Write((ushort)17); + } + ///. public void CumSum() { _emitter.Write((byte)100); - _emitter.Write((ushort)15); + _emitter.Write((ushort)18); } ///. public void Dequantize(DataType targetType) { _emitter.Write((byte)100); - _emitter.Write((ushort)16); + _emitter.Write((ushort)19); _emitter.Write(targetType); } @@ -772,28 +793,28 @@ public void Dequantize(DataType targetType) public void Elu() { _emitter.Write((byte)100); - _emitter.Write((ushort)17); + _emitter.Write((ushort)20); } ///. public void Erf() { _emitter.Write((byte)100); - _emitter.Write((ushort)18); + _emitter.Write((ushort)21); } ///. public void Expand() { _emitter.Write((byte)100); - _emitter.Write((ushort)19); + _emitter.Write((ushort)22); } ///. public void FakeDequantize(DataType targetType) { _emitter.Write((byte)100); - _emitter.Write((ushort)20); + _emitter.Write((ushort)23); _emitter.Write(targetType); } @@ -801,7 +822,7 @@ public void FakeDequantize(DataType targetType) public void FakeQuantize(DataType targetType) { _emitter.Write((byte)100); - _emitter.Write((ushort)21); + _emitter.Write((ushort)24); _emitter.Write(targetType); } @@ -809,98 +830,98 @@ public void FakeQuantize(DataType targetType) public void FixShape() { _emitter.Write((byte)100); - _emitter.Write((ushort)22); + _emitter.Write((ushort)25); } ///. public void Flatten() { _emitter.Write((byte)100); - _emitter.Write((ushort)23); + _emitter.Write((ushort)26); } ///. public void Gather() { _emitter.Write((byte)100); - _emitter.Write((ushort)24); + _emitter.Write((ushort)27); } ///. public void GatherElements() { _emitter.Write((byte)100); - _emitter.Write((ushort)25); + _emitter.Write((ushort)28); } ///. public void GatherND() { _emitter.Write((byte)100); - _emitter.Write((ushort)26); + _emitter.Write((ushort)29); } ///. public void Gelu() { _emitter.Write((byte)100); - _emitter.Write((ushort)27); + _emitter.Write((ushort)30); } ///. public void GetItem() { _emitter.Write((byte)100); - _emitter.Write((ushort)28); + _emitter.Write((ushort)31); } ///. public void Hardmax() { _emitter.Write((byte)100); - _emitter.Write((ushort)29); + _emitter.Write((ushort)32); } ///. public void HardSigmoid() { _emitter.Write((byte)100); - _emitter.Write((ushort)30); + _emitter.Write((ushort)33); } ///. public void HardSwish() { _emitter.Write((byte)100); - _emitter.Write((ushort)31); + _emitter.Write((ushort)34); } ///. public void IndexOf() { _emitter.Write((byte)100); - _emitter.Write((ushort)32); + _emitter.Write((ushort)35); } ///. public void InstanceNormalization() { _emitter.Write((byte)100); - _emitter.Write((ushort)33); + _emitter.Write((ushort)36); } ///. public void L2Normalization() { _emitter.Write((byte)100); - _emitter.Write((ushort)34); + _emitter.Write((ushort)37); } ///. public void LayerNorm(int axis, float epsilon) { _emitter.Write((byte)100); - _emitter.Write((ushort)35); + _emitter.Write((ushort)38); _emitter.Write(axis); _emitter.Write(epsilon); } @@ -909,35 +930,35 @@ public void LayerNorm(int axis, float epsilon) public void LeakyRelu() { _emitter.Write((byte)100); - _emitter.Write((ushort)36); + _emitter.Write((ushort)39); } ///. public void LogSoftmax() { _emitter.Write((byte)100); - _emitter.Write((ushort)37); + _emitter.Write((ushort)40); } ///. public void LpNormalization() { _emitter.Write((byte)100); - _emitter.Write((ushort)38); + _emitter.Write((ushort)41); } ///. public void LRN() { _emitter.Write((byte)100); - _emitter.Write((ushort)39); + _emitter.Write((ushort)42); } ///. public void LSTM(LSTMDirection direction, LSTMLayout layout, string[] activations) { _emitter.Write((byte)100); - _emitter.Write((ushort)40); + _emitter.Write((ushort)43); _emitter.Write((int)direction); _emitter.Write((int)layout); _emitter.Write(activations); @@ -947,14 +968,21 @@ public void LSTM(LSTMDirection direction, LSTMLayout layout, string[] activation public void MatMul() { _emitter.Write((byte)100); - _emitter.Write((ushort)41); + _emitter.Write((ushort)44); + } + + ///. + public void MatMulShape() + { + _emitter.Write((byte)100); + _emitter.Write((ushort)45); } ///. public void Normal(DataType type) { _emitter.Write((byte)100); - _emitter.Write((ushort)42); + _emitter.Write((ushort)46); _emitter.Write(type); } @@ -962,7 +990,7 @@ public void Normal(DataType type) public void NormalLike(DataType type) { _emitter.Write((byte)100); - _emitter.Write((ushort)43); + _emitter.Write((ushort)47); _emitter.Write(type); } @@ -970,7 +998,7 @@ public void NormalLike(DataType type) public void OneHot(OneHotMode oneHotMode) { _emitter.Write((byte)100); - _emitter.Write((ushort)44); + _emitter.Write((ushort)48); _emitter.Write((byte)oneHotMode); } @@ -978,7 +1006,7 @@ public void OneHot(OneHotMode oneHotMode) public void Pad(PadMode padMode) { _emitter.Write((byte)100); - _emitter.Write((ushort)45); + _emitter.Write((ushort)49); _emitter.Write((byte)padMode); } @@ -986,21 +1014,21 @@ public void Pad(PadMode padMode) public void PRelu() { _emitter.Write((byte)100); - _emitter.Write((ushort)46); + _emitter.Write((ushort)50); } ///. public void Prod() { _emitter.Write((byte)100); - _emitter.Write((ushort)47); + _emitter.Write((ushort)51); } ///. public void Quantize(DataType targetType) { _emitter.Write((byte)100); - _emitter.Write((ushort)48); + _emitter.Write((ushort)52); _emitter.Write(targetType); } @@ -1008,7 +1036,7 @@ public void Quantize(DataType targetType) public void QuantParamOf(QuantMode quantMode) { _emitter.Write((byte)100); - _emitter.Write((ushort)49); + _emitter.Write((ushort)53); _emitter.Write((int)quantMode); } @@ -1016,14 +1044,14 @@ public void QuantParamOf(QuantMode quantMode) public void Range() { _emitter.Write((byte)100); - _emitter.Write((ushort)50); + _emitter.Write((ushort)54); } ///. public void RangeOf(bool isRangeOfWeight) { _emitter.Write((byte)100); - _emitter.Write((ushort)51); + _emitter.Write((ushort)55); _emitter.Write(isRangeOfWeight); } @@ -1031,14 +1059,14 @@ public void RangeOf(bool isRangeOfWeight) public void Rank() { _emitter.Write((byte)100); - _emitter.Write((ushort)52); + _emitter.Write((ushort)56); } ///. public void Reduce(ReduceOp reduceOp) { _emitter.Write((byte)100); - _emitter.Write((ushort)53); + _emitter.Write((ushort)57); _emitter.Write((byte)reduceOp); } @@ -1046,7 +1074,7 @@ public void Reduce(ReduceOp reduceOp) public void ReduceArg(ReduceArgOp reduceArgOp, DataType destType) { _emitter.Write((byte)100); - _emitter.Write((ushort)54); + _emitter.Write((ushort)58); _emitter.Write((byte)reduceArgOp); _emitter.Write(destType); } @@ -1055,7 +1083,7 @@ public void ReduceArg(ReduceArgOp reduceArgOp, DataType destType) public void ReduceWindow2D(ReduceOp reduceOp) { _emitter.Write((byte)100); - _emitter.Write((ushort)55); + _emitter.Write((ushort)59); _emitter.Write((byte)reduceOp); } @@ -1063,21 +1091,21 @@ public void ReduceWindow2D(ReduceOp reduceOp) public void Relu() { _emitter.Write((byte)100); - _emitter.Write((ushort)56); + _emitter.Write((ushort)60); } ///. public void Relu6() { _emitter.Write((byte)100); - _emitter.Write((ushort)57); + _emitter.Write((ushort)61); } ///. public void Require(string message, bool canFoldConstCall) { _emitter.Write((byte)100); - _emitter.Write((ushort)58); + _emitter.Write((ushort)62); _emitter.Write(message); _emitter.Write(canFoldConstCall); } @@ -1086,14 +1114,14 @@ public void Require(string message, bool canFoldConstCall) public void Reshape() { _emitter.Write((byte)100); - _emitter.Write((ushort)59); + _emitter.Write((ushort)63); } ///. public void ResizeImage(ImageResizeMode resizeMode, ImageResizeTransformationMode transformationMode, ImageResizeNearestMode nearestMode, bool isTFResize) { _emitter.Write((byte)100); - _emitter.Write((ushort)60); + _emitter.Write((ushort)64); _emitter.Write((byte)resizeMode); _emitter.Write((int)transformationMode); _emitter.Write((int)nearestMode); @@ -1104,147 +1132,147 @@ public void ResizeImage(ImageResizeMode resizeMode, ImageResizeTransformationMod public void ReverseSequence() { _emitter.Write((byte)100); - _emitter.Write((ushort)61); + _emitter.Write((ushort)65); } ///. public void ScatterND() { _emitter.Write((byte)100); - _emitter.Write((ushort)62); + _emitter.Write((ushort)66); } ///. public void Select() { _emitter.Write((byte)100); - _emitter.Write((ushort)63); + _emitter.Write((ushort)67); } ///. public void Selu() { _emitter.Write((byte)100); - _emitter.Write((ushort)64); + _emitter.Write((ushort)68); } ///. public void ShapeOf() { _emitter.Write((byte)100); - _emitter.Write((ushort)65); + _emitter.Write((ushort)69); } ///. public void Sigmoid() { _emitter.Write((byte)100); - _emitter.Write((ushort)66); + _emitter.Write((ushort)70); } ///. public void SizeOf() { _emitter.Write((byte)100); - _emitter.Write((ushort)67); + _emitter.Write((ushort)71); } ///. public void Slice() { _emitter.Write((byte)100); - _emitter.Write((ushort)68); + _emitter.Write((ushort)72); } ///. public void Softmax() { _emitter.Write((byte)100); - _emitter.Write((ushort)69); + _emitter.Write((ushort)73); } ///. public void Softplus() { _emitter.Write((byte)100); - _emitter.Write((ushort)70); + _emitter.Write((ushort)74); } ///. public void Softsign() { _emitter.Write((byte)100); - _emitter.Write((ushort)71); + _emitter.Write((ushort)75); } ///. public void SpaceToBatch() { _emitter.Write((byte)100); - _emitter.Write((ushort)72); + _emitter.Write((ushort)76); } ///. public void Split() { _emitter.Write((byte)100); - _emitter.Write((ushort)73); + _emitter.Write((ushort)77); } ///. public void Squeeze() { _emitter.Write((byte)100); - _emitter.Write((ushort)74); + _emitter.Write((ushort)78); } ///. public void Stack() { _emitter.Write((byte)100); - _emitter.Write((ushort)75); + _emitter.Write((ushort)79); } ///. public void Swish() { _emitter.Write((byte)100); - _emitter.Write((ushort)76); + _emitter.Write((ushort)80); } ///. public void Tile() { _emitter.Write((byte)100); - _emitter.Write((ushort)77); + _emitter.Write((ushort)81); } ///. public void TopK() { _emitter.Write((byte)100); - _emitter.Write((ushort)78); + _emitter.Write((ushort)82); } ///. public void Transpose() { _emitter.Write((byte)100); - _emitter.Write((ushort)79); + _emitter.Write((ushort)83); } ///. public void Trilu() { _emitter.Write((byte)100); - _emitter.Write((ushort)80); + _emitter.Write((ushort)84); } ///. public void Unary(UnaryOp unaryOp) { _emitter.Write((byte)100); - _emitter.Write((ushort)81); + _emitter.Write((ushort)85); _emitter.Write((byte)unaryOp); } @@ -1252,7 +1280,7 @@ public void Unary(UnaryOp unaryOp) public void Uniform(DataType type) { _emitter.Write((byte)100); - _emitter.Write((ushort)82); + _emitter.Write((ushort)86); _emitter.Write(type); } @@ -1260,7 +1288,7 @@ public void Uniform(DataType type) public void UniformLike(DataType type) { _emitter.Write((byte)100); - _emitter.Write((ushort)83); + _emitter.Write((ushort)87); _emitter.Write(type); } @@ -1268,14 +1296,14 @@ public void UniformLike(DataType type) public void Unsqueeze() { _emitter.Write((byte)100); - _emitter.Write((ushort)84); + _emitter.Write((ushort)88); } ///. public void Where(bool isTfWhere) { _emitter.Write((byte)100); - _emitter.Write((ushort)85); + _emitter.Write((ushort)89); _emitter.Write(isTfWhere); } } diff --git a/pyproject.toml b/pyproject.toml index 180e5c44b6..04a03089f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,8 @@ dependencies = ["numpy"] homepage = "https://github.com/kendryte/nncase" [build-system] -requires = ["setuptools>=42", "wheel", "conan<=1.59", "ninja"] +requires = ["setuptools>=42", "wheel", "conan<=1.59", "ninja", "gitpython"] +build-backend = "setuptools.build_meta" [tool.cibuildwheel] build = ["cp37*", "cp38*", "cp39*", "cp310*"] diff --git a/python/nncase/__init__.py b/python/nncase/__init__.py index 8653d3b301..5784a37915 100644 --- a/python/nncase/__init__.py +++ b/python/nncase/__init__.py @@ -152,6 +152,16 @@ def __init__(self, compile_options: CompileOptions) -> None: self._compiler = self._session.compiler self._quantize_options = None self._shape_bucket_options = _nncase.ShapeBucketOptions() + self.init_shape_bucket_options(compile_options) + + def init_shape_bucket_options(self, compile_options: CompileOptions) -> None: + self._shape_bucket_options = _nncase.ShapeBucketOptions() + self._shape_bucket_options.segments_count = compile_options.shape_bucket_segments_count + self._shape_bucket_options.enable = compile_options.shape_bucket_enable + self._shape_bucket_options.range_info = compile_options.shape_bucket_range_info + self._shape_bucket_options.segments_count = compile_options.shape_bucket_segments_count + self._shape_bucket_options.fix_var_map = compile_options.shape_bucket_fix_var_map + self._compile_options.shape_bucket_options = self._shape_bucket_options def compile(self) -> None: self._compiler.compile() diff --git a/requirements.test.txt b/requirements.test.txt index 96087104fc..5afc08e761 100644 --- a/requirements.test.txt +++ b/requirements.test.txt @@ -18,3 +18,5 @@ pyyaml pythonnet==3.0.1 clr_loader==0.2.4 toml==0.10.2 +pandas +tabulate diff --git a/setup.py b/setup.py index 2380f65b17..6724cbac65 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,8 @@ import io import re import time - +import subprocess +from git.repo import Repo # See ref: https://stackoverflow.com/a/51575996 @@ -277,8 +278,19 @@ def find_version(): version_prefix = re.findall(r"NNCASE_VERSION \"(.+)\"", version_file) if version_prefix: + repo_path = os.getcwd() + repo = Repo(repo_path) + if repo.tags: + latest_commit = subprocess.check_output( + ['git', 'rev-parse', 'HEAD']).decode('utf-8').strip() + tagged_commit = subprocess.check_output( + ['git', 'rev-list', '-n', '1', repo.tags[-1].name]).decode('utf-8').strip() + if latest_commit == tagged_commit: + return version_prefix[0] + version_suffix = time.strftime("%Y%m%d", time.localtime()) return version_prefix[0] + "." + version_suffix + raise RuntimeError("Unable to find version string.") diff --git a/src/Native/include/nncase/kernels/stackvm/tensor_ops.h b/src/Native/include/nncase/kernels/stackvm/tensor_ops.h index 8de16e8c34..db59425a29 100644 --- a/src/Native/include/nncase/kernels/stackvm/tensor_ops.h +++ b/src/Native/include/nncase/kernels/stackvm/tensor_ops.h @@ -1,4 +1,4 @@ -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:16 +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:39 * +08:00. * * Copyright 2019-2021 Canaan Inc. @@ -51,6 +51,10 @@ NNCASE_API result broadcast(value_t input, value_t shape, value_t output = nullptr, kernel_context &context = default_kernel_context()); +NNCASE_API result +broadcast_shape(value_t inputs, value_t output = nullptr, + kernel_context &context = default_kernel_context()); + NNCASE_API result bucket_pad(value_t input, value_t shape, value_t output = nullptr, kernel_context &context = default_kernel_context()); @@ -92,6 +96,11 @@ conv2d(runtime::stackvm::pad_mode_t pad_mode, value_t input, value_t weights, value_t groups, value_t fused_clamp, value_t output = nullptr, kernel_context &context = default_kernel_context()); +NNCASE_API result +conv2d_shape(value_t input, value_t weights, value_t padding, value_t stride, + value_t dilation, value_t groups, value_t output = nullptr, + kernel_context &context = default_kernel_context()); + NNCASE_API result conv2d_transpose(runtime::stackvm::pad_mode_t pad_mode, value_t input, value_t weights, value_t bias, value_t output_shape, @@ -100,6 +109,13 @@ conv2d_transpose(runtime::stackvm::pad_mode_t pad_mode, value_t input, value_t output = nullptr, kernel_context &context = default_kernel_context()); +NNCASE_API result +conv2d_transpose_shape(value_t input, value_t weights, value_t stride, + value_t dilation, value_t padding, + value_t output_padding, value_t groups, + value_t output = nullptr, + kernel_context &context = default_kernel_context()); + NNCASE_API result cum_sum(value_t input, value_t axis, value_t exclusive, value_t reverse, value_t output = nullptr, @@ -225,6 +241,10 @@ NNCASE_API result mat_mul(value_t lhs, value_t rhs, value_t output = nullptr, kernel_context &context = default_kernel_context()); +NNCASE_API result +mat_mul_shape(value_t lhs, value_t rhs, value_t output = nullptr, + kernel_context &context = default_kernel_context()); + NNCASE_API result normal(typecode_t type, value_t mean, value_t scale, value_t seed, value_t shape, value_t output = nullptr, diff --git a/src/Native/include/nncase/runtime/bfloat16.h b/src/Native/include/nncase/runtime/bfloat16.h index 4d15803a44..b97a4e7c3c 100644 --- a/src/Native/include/nncase/runtime/bfloat16.h +++ b/src/Native/include/nncase/runtime/bfloat16.h @@ -61,6 +61,8 @@ struct bfloat16 { explicit bfloat16(const T &val) noexcept : bfloat16(static_cast(val)) {} + bfloat16(int &&val) noexcept : bfloat16(static_cast(val)) {} + constexpr bfloat16(from_raw_t, uint16_t value) noexcept : value_(value) {} operator float() const noexcept { @@ -153,6 +155,10 @@ struct bfloat16 { return (value_ & 0x7FFF) == ZERO_VALUE; } + void operator=(const float &v) noexcept { + value_ = (round_to_bfloat16(v).value_); + } + private: uint16_t value_; }; diff --git a/src/Native/include/nncase/runtime/half.h b/src/Native/include/nncase/runtime/half.h index 8ee136e471..2a53417192 100644 --- a/src/Native/include/nncase/runtime/half.h +++ b/src/Native/include/nncase/runtime/half.h @@ -59,6 +59,8 @@ struct half { std::is_floating_point::value>> explicit half(const T &val) noexcept : half(static_cast(val)) {} + half(int &&val) noexcept : half(static_cast(val)) {} + constexpr half(fp16_from_raw_t, uint16_t value) noexcept : value_(value) {} operator float() const noexcept { @@ -156,6 +158,10 @@ struct half { return (value_ & 0x7FFF) == ZERO_VALUE; } + void operator=(const float &v) noexcept { + value_ = (round_to_half(v).value_); + } + private: uint16_t value_; }; diff --git a/src/Native/include/nncase/runtime/stackvm/op_reader.h b/src/Native/include/nncase/runtime/stackvm/op_reader.h index 0ba037d227..5de273cab1 100644 --- a/src/Native/include/nncase/runtime/stackvm/op_reader.h +++ b/src/Native/include/nncase/runtime/stackvm/op_reader.h @@ -1,4 +1,4 @@ -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:16 +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:39 * +08:00. * * Copyright 2019-2021 Canaan Inc. @@ -786,6 +786,14 @@ template <> struct tensor_op_reader { } }; +template <> struct tensor_op_reader { + tensor_broadcast_shape_op_t + operator()(NNCASE_UNUSED span_reader &reader) const { + tensor_broadcast_shape_op_t op; + return op; + } +}; + template <> struct tensor_op_reader { tensor_bucket_pad_op_t operator()(NNCASE_UNUSED span_reader &reader) const { tensor_bucket_pad_op_t op; @@ -857,6 +865,14 @@ template <> struct tensor_op_reader { } }; +template <> struct tensor_op_reader { + tensor_conv2d_shape_op_t + operator()(NNCASE_UNUSED span_reader &reader) const { + tensor_conv2d_shape_op_t op; + return op; + } +}; + template <> struct tensor_op_reader { tensor_conv2d_transpose_op_t operator()(NNCASE_UNUSED span_reader &reader) const { @@ -866,6 +882,14 @@ template <> struct tensor_op_reader { } }; +template <> struct tensor_op_reader { + tensor_conv2d_transpose_shape_op_t + operator()(NNCASE_UNUSED span_reader &reader) const { + tensor_conv2d_transpose_shape_op_t op; + return op; + } +}; + template <> struct tensor_op_reader { tensor_cum_sum_op_t operator()(NNCASE_UNUSED span_reader &reader) const { tensor_cum_sum_op_t op; @@ -1076,6 +1100,14 @@ template <> struct tensor_op_reader { } }; +template <> struct tensor_op_reader { + tensor_mat_mul_shape_op_t + operator()(NNCASE_UNUSED span_reader &reader) const { + tensor_mat_mul_shape_op_t op; + return op; + } +}; + template <> struct tensor_op_reader { tensor_normal_op_t operator()(NNCASE_UNUSED span_reader &reader) const { tensor_normal_op_t op; @@ -1449,6 +1481,10 @@ class NNCASE_API tensor_op_visitor { return default_visit(tensor_function_t::broadcast, &op); } virtual result + visit(NNCASE_UNUSED const tensor_broadcast_shape_op_t &op) noexcept { + return default_visit(tensor_function_t::broadcast_shape, &op); + } + virtual result visit(NNCASE_UNUSED const tensor_bucket_pad_op_t &op) noexcept { return default_visit(tensor_function_t::bucket_pad, &op); } @@ -1485,10 +1521,18 @@ class NNCASE_API tensor_op_visitor { return default_visit(tensor_function_t::conv2d, &op); } virtual result + visit(NNCASE_UNUSED const tensor_conv2d_shape_op_t &op) noexcept { + return default_visit(tensor_function_t::conv2d_shape, &op); + } + virtual result visit(NNCASE_UNUSED const tensor_conv2d_transpose_op_t &op) noexcept { return default_visit(tensor_function_t::conv2d_transpose, &op); } virtual result + visit(NNCASE_UNUSED const tensor_conv2d_transpose_shape_op_t &op) noexcept { + return default_visit(tensor_function_t::conv2d_transpose_shape, &op); + } + virtual result visit(NNCASE_UNUSED const tensor_cum_sum_op_t &op) noexcept { return default_visit(tensor_function_t::cum_sum, &op); } @@ -1597,6 +1641,10 @@ class NNCASE_API tensor_op_visitor { return default_visit(tensor_function_t::mat_mul, &op); } virtual result + visit(NNCASE_UNUSED const tensor_mat_mul_shape_op_t &op) noexcept { + return default_visit(tensor_function_t::mat_mul_shape, &op); + } + virtual result visit(NNCASE_UNUSED const tensor_normal_op_t &op) noexcept { return default_visit(tensor_function_t::normal, &op); } diff --git a/src/Native/include/nncase/runtime/stackvm/opcode.h b/src/Native/include/nncase/runtime/stackvm/opcode.h index dfc066b913..26e98927f1 100644 --- a/src/Native/include/nncase/runtime/stackvm/opcode.h +++ b/src/Native/include/nncase/runtime/stackvm/opcode.h @@ -1,4 +1,4 @@ -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:15 +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:38 * +08:00. * * Copyright 2019-2021 Canaan Inc. @@ -130,90 +130,94 @@ enum class opcode_t : uint8_t { enum class tensor_function_t : uint16_t { batch_normalization = 0, batch_to_space = 1, - celu = 7, - conv2d = 13, - conv2d_transpose = 14, - elu = 17, - erf = 18, - gelu = 27, - hardmax = 29, - hard_sigmoid = 30, - hard_swish = 31, - instance_normalization = 33, - l2_normalization = 34, - layer_norm = 35, - leaky_relu = 36, - log_softmax = 37, - lp_normalization = 38, - lrn = 39, - one_hot = 44, - pad = 45, - prelu = 46, - reduce_window2d = 55, - relu = 56, - relu6 = 57, - selu = 64, - sigmoid = 66, - softmax = 69, - softplus = 70, - softsign = 71, - space_to_batch = 72, - swish = 76, + celu = 8, + conv2d = 14, + conv2d_transpose = 16, + elu = 20, + erf = 21, + gelu = 30, + hardmax = 32, + hard_sigmoid = 33, + hard_swish = 34, + instance_normalization = 36, + l2_normalization = 37, + layer_norm = 38, + leaky_relu = 39, + log_softmax = 40, + lp_normalization = 41, + lrn = 42, + one_hot = 48, + pad = 49, + prelu = 50, + reduce_window2d = 59, + relu = 60, + relu6 = 61, + selu = 68, + sigmoid = 70, + softmax = 73, + softplus = 74, + softsign = 75, + space_to_batch = 76, + swish = 80, binary = 2, - clamp = 8, - compare = 9, - condition = 11, - cum_sum = 15, - dequantize = 16, - fake_dequantize = 20, - fake_quantize = 21, - mat_mul = 41, - quantize = 48, - quant_param_of = 49, - range_of = 51, - reduce = 53, - reduce_arg = 54, - require = 58, - select = 63, - unary = 81, + clamp = 9, + compare = 10, + condition = 12, + cum_sum = 18, + dequantize = 19, + fake_dequantize = 23, + fake_quantize = 24, + mat_mul = 44, + quantize = 52, + quant_param_of = 53, + range_of = 55, + reduce = 57, + reduce_arg = 58, + require = 62, + select = 67, + unary = 85, bitcast = 3, broadcast = 4, - bucket_pad = 5, - cast = 6, - concat = 10, - constant_of_shape = 12, - expand = 19, - fix_shape = 22, - flatten = 23, - gather = 24, - gather_elements = 25, - gather_nd = 26, - get_item = 28, - index_of = 32, - lstm = 40, - prod = 47, - range = 50, - rank = 52, - reshape = 59, - reverse_sequence = 61, - scatter_nd = 62, - shape_of = 65, - size_of = 67, - slice = 68, - split = 73, - squeeze = 74, - stack = 75, - tile = 77, - top_k = 78, - transpose = 79, - trilu = 80, - unsqueeze = 84, - where = 85, - normal = 42, - normal_like = 43, - uniform = 82, - uniform_like = 83, - resize_image = 60, + bucket_pad = 6, + cast = 7, + concat = 11, + constant_of_shape = 13, + expand = 22, + fix_shape = 25, + flatten = 26, + gather = 27, + gather_elements = 28, + gather_nd = 29, + get_item = 31, + index_of = 35, + lstm = 43, + prod = 51, + range = 54, + rank = 56, + reshape = 63, + reverse_sequence = 65, + scatter_nd = 66, + shape_of = 69, + size_of = 71, + slice = 72, + split = 77, + squeeze = 78, + stack = 79, + tile = 81, + top_k = 82, + transpose = 83, + trilu = 84, + unsqueeze = 88, + where = 89, + broadcast_shape = 5, + conv2d_shape = 15, + conv2d_transpose_shape = 17, + mat_mul_shape = 45, + normal = 46, + normal_like = 47, + uniform = 86, + uniform_like = 87, + resize_image = 64, }; enum class binary_op_t : uint8_t { @@ -588,6 +592,8 @@ struct tensor_bitcast_op_t { struct tensor_broadcast_op_t {}; +struct tensor_broadcast_shape_op_t {}; + struct tensor_bucket_pad_op_t {}; struct tensor_cast_op_t { @@ -615,10 +621,14 @@ struct tensor_conv2d_op_t { pad_mode_t pad_mode; }; +struct tensor_conv2d_shape_op_t {}; + struct tensor_conv2d_transpose_op_t { pad_mode_t pad_mode; }; +struct tensor_conv2d_transpose_shape_op_t {}; + struct tensor_cum_sum_op_t {}; struct tensor_dequantize_op_t { @@ -686,6 +696,8 @@ struct tensor_lstm_op_t { struct tensor_mat_mul_op_t {}; +struct tensor_mat_mul_shape_op_t {}; + struct tensor_normal_op_t { typecode_t type; }; @@ -975,6 +987,14 @@ inline std::string to_string(tensor_function_t tensor_funct) { return "unsqueeze"; case tensor_function_t::where: return "where"; + case tensor_function_t::broadcast_shape: + return "broadcast_shape"; + case tensor_function_t::conv2d_shape: + return "conv2d_shape"; + case tensor_function_t::conv2d_transpose_shape: + return "conv2d_transpose_shape"; + case tensor_function_t::mat_mul_shape: + return "mat_mul_shape"; case tensor_function_t::normal: return "normal"; case tensor_function_t::normal_like: diff --git a/src/Native/include/nncase/runtime/util.h b/src/Native/include/nncase/runtime/util.h index 8740233cbc..68bd808025 100644 --- a/src/Native/include/nncase/runtime/util.h +++ b/src/Native/include/nncase/runtime/util.h @@ -468,6 +468,10 @@ inline bool is_contiguous(tensor tensor) { switch (_typecode) { \ case dt_float32: \ _impl(float); \ + case dt_float16: \ + _impl(half); \ + case dt_bfloat16: \ + _impl(bfloat16); \ case dt_int8: \ _impl(int8_t); \ case dt_int16: \ diff --git a/src/Native/src/kernels/stackvm/optimized/dequantize.cpp b/src/Native/src/kernels/stackvm/optimized/dequantize.cpp index 84a63b3dfc..b60862d2e3 100644 --- a/src/Native/src/kernels/stackvm/optimized/dequantize.cpp +++ b/src/Native/src/kernels/stackvm/optimized/dequantize.cpp @@ -75,5 +75,6 @@ result optimized::dequantize( NNCASE_UNUSED kernel_context &context) noexcept { DEQUANTIZE_IMPL(uint8_t, float) DEQUANTIZE_IMPL(int8_t, float) + DEQUANTIZE_IMPL(int16_t, float) return err(std::errc::not_supported); } diff --git a/src/Native/src/kernels/stackvm/optimized/quantize.cpp b/src/Native/src/kernels/stackvm/optimized/quantize.cpp index 81efee18aa..ff7960cdda 100644 --- a/src/Native/src/kernels/stackvm/optimized/quantize.cpp +++ b/src/Native/src/kernels/stackvm/optimized/quantize.cpp @@ -88,5 +88,6 @@ result optimized::quantize( NNCASE_UNUSED kernel_context &context) noexcept { QUANTIZE_IMPL(float, uint8_t) QUANTIZE_IMPL(float, int8_t) + QUANTIZE_IMPL(float, int16_t) return err(std::errc::not_supported); } \ No newline at end of file diff --git a/src/Native/src/kernels/stackvm/optimized/resize_image.cpp b/src/Native/src/kernels/stackvm/optimized/resize_image.cpp index 603d95e8ab..57d87462d4 100644 --- a/src/Native/src/kernels/stackvm/optimized/resize_image.cpp +++ b/src/Native/src/kernels/stackvm/optimized/resize_image.cpp @@ -221,7 +221,10 @@ inline result resize_bilinear_impl( auto a3 = (in_y - in_y0) * (in_x - in_x0); *output_ptr = bfloat16::round_to_bfloat16( - v0 * a0 + v1 * a1 + v2 * a2 + v3 * a3); + static_cast(v0) * a0 + + static_cast(v1) * a1 + + static_cast(v2) * a2 + + static_cast(v3) * a3); ++output_ptr; } } diff --git a/src/Native/src/kernels/stackvm/optimized/riscv64/unary.cpp b/src/Native/src/kernels/stackvm/optimized/riscv64/unary.cpp index 78b3c95cfe..b83cbbbf36 100644 --- a/src/Native/src/kernels/stackvm/optimized/riscv64/unary.cpp +++ b/src/Native/src/kernels/stackvm/optimized/riscv64/unary.cpp @@ -171,57 +171,68 @@ result optimized::unary(typecode_t dtype, runtime::stackvm::unary_op_t op, gsl::span out_shape, gsl::span out_strides, kernel_context &context) noexcept { + if (dtype == dt_float32) { #if __riscv_vector - auto *input = IN_CAST(float, in); - auto *output = OUT_CAST(float, out); - switch (op) { - case unary_op_t::abs: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::ceil: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::cos: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::exp: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::floor: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::log: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::neg: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::round: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::rsqrt: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::sign: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::sin: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::sqrt: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::square: { - return optimized_unary_impl(input, output, shape); - } - case unary_op_t::tanh: { - return optimized_unary_impl(input, output, shape); - } - default:; - // std::cout << "Unsupported unary op: " + unary_op_to_string(op) - // + " for optimizing, fallback to reference" << std::endl; - } + auto *input = IN_CAST(float, in); + auto *output = OUT_CAST(float, out); + switch (op) { + case unary_op_t::abs: { + return optimized_unary_impl(input, output, shape); + } + case unary_op_t::ceil: { + return optimized_unary_impl(input, output, + shape); + } + case unary_op_t::cos: { + return optimized_unary_impl(input, output, shape); + } + case unary_op_t::exp: { + return optimized_unary_impl(input, output, shape); + } + case unary_op_t::floor: { + return optimized_unary_impl(input, output, + shape); + } + case unary_op_t::log: { + return optimized_unary_impl(input, output, shape); + } + case unary_op_t::neg: { + return optimized_unary_impl(input, output, shape); + } + case unary_op_t::round: { + return optimized_unary_impl(input, output, + shape); + } + case unary_op_t::rsqrt: { + return optimized_unary_impl(input, output, + shape); + } + case unary_op_t::sign: { + return optimized_unary_impl(input, output, + shape); + } + case unary_op_t::sin: { + return optimized_unary_impl(input, output, shape); + } + case unary_op_t::sqrt: { + return optimized_unary_impl(input, output, + shape); + } + case unary_op_t::square: { + return optimized_unary_impl(input, output, + shape); + } + case unary_op_t::tanh: { + return optimized_unary_impl(input, output, + shape); + } + default:; + // std::cout << "Unsupported unary op: " + + // unary_op_to_string(op) + // + " for optimizing, fallback to reference" << std::endl; + } #endif + } return stackvm::reference::unary(dtype, op, in, out, shape, in_strides, out_shape, out_strides, context); } \ No newline at end of file diff --git a/src/Native/src/kernels/stackvm/optimized/x86_64/unary.cpp b/src/Native/src/kernels/stackvm/optimized/x86_64/unary.cpp index 78bedf4457..01d5d1af4e 100644 --- a/src/Native/src/kernels/stackvm/optimized/x86_64/unary.cpp +++ b/src/Native/src/kernels/stackvm/optimized/x86_64/unary.cpp @@ -104,13 +104,30 @@ struct unary_op_neg { } }; +static float round_onnx(float v) { + if (v > 0 && v - (int32_t)v == 0.5) { + float result = (int32_t)v + 1.0; + if ((int32_t)result % 2 == 0) + return result; + else + return result - 1; + } else if (v < 0 && (int32_t)v - v == 0.5) { + float result = (int32_t)v + 1.0; + if ((int32_t)result % 2 == 0) + return result; + else + return result - 1; + } else + return roundf(v); +} + struct unary_op_round { - float operator()(float x) const { return roundf(x); } + float operator()(float x) const { return round_onnx(x); } void pack(const float *a, float *b) { __m256 vector_a = _mm256_loadu_ps(a); __m256 dst_a = _mm256_round_ps( - vector_a, (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)); + vector_a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); _mm256_storeu_ps(b, dst_a); } }; diff --git a/src/Native/src/kernels/stackvm/reference/CMakeLists.txt b/src/Native/src/kernels/stackvm/reference/CMakeLists.txt index e7a2f660fb..84dd07203c 100644 --- a/src/Native/src/kernels/stackvm/reference/CMakeLists.txt +++ b/src/Native/src/kernels/stackvm/reference/CMakeLists.txt @@ -1,6 +1,7 @@ cmake_minimum_required(VERSION 3.13) set(SRCS ../tensor_ops.cpp + ../shape_ops.cpp activation.cpp batchnorm.cpp batch_to_space.cpp diff --git a/src/Native/src/kernels/stackvm/reference/activation.cpp b/src/Native/src/kernels/stackvm/reference/activation.cpp index b0b468305d..af96c6639c 100644 --- a/src/Native/src/kernels/stackvm/reference/activation.cpp +++ b/src/Native/src/kernels/stackvm/reference/activation.cpp @@ -29,30 +29,30 @@ using namespace nncase::runtime::stackvm; using namespace nncase::kernels; using namespace nncase::kernels::stackvm; -FLOAT_UNARY_TEMPLATE(relu, std::max((float)0, x)) -FLOAT_UNARY_TEMPLATE(softsign, x / (1 + std::abs(x))) -FLOAT_UNARY_TEMPLATE(softplus, std::log(1 + std::exp(x))) -FLOAT_UNARY_TEMPLATE(sigmoid, 1 / (1 + exp(-x))) -FLOAT_UNARY_TEMPLATE(swish, x / (1 + exp(-x))) -FLOAT_UNARY_TEMPLATE(hard_swish, - x *std::max(0.f, std::min((float)1.f, - (float)(1.f / 6 * x + 0.5)))) -FLOAT_UNARY_TEMPLATE(erf, erff(x)) // for k510 toolchain -FLOAT_UNARY_WITH_MUL_TEMPLATE(elu, alpha, x < 0 ? alpha * (exp(x) - 1) : x) +UNARY_TEMPLATE(relu, std::max((double)0, x)) +UNARY_TEMPLATE(softsign, x / (1 + std::abs(x))) +UNARY_TEMPLATE(softplus, std::log(1 + std::exp(x))) +UNARY_TEMPLATE(sigmoid, 1 / (1 + exp(-x))) +UNARY_TEMPLATE(swish, x / (1 + exp(-x))) +UNARY_TEMPLATE(hard_swish, + x *std::max((double)0.f, + std::min((double)1.f, (double)(1.f / 6 * x + 0.5)))) +UNARY_TEMPLATE(erf, erff(x)) // for k510 toolchain +UNARY_WITH_MUL_TEMPLATE_V2(elu, alpha, x < 0 ? alpha * (exp(x) - 1) : x) // FLOAT_UNARY_WITH_MUL_TEMPLATE(prelu, slope, x < 0 ? slope * x : x) -FLOAT_UNARY_WITH_MUL_TEMPLATE( - celu, alpha, - std::max((float)0, x) + - std::min((float)0, (float)(alpha *(exp(x / alpha) - 1)))) -FLOAT_UNARY_WITH_MUL_TEMPLATE(leaky_relu, alpha, x < 0 ? alpha * x : x) -FLOAT_UNARY_WITH_MUL_TEMPLATE(gelu, alpha, - 0.5f * (alpha * x) * - (1.f + erff(alpha * x / sqrtf(2.f)))) -FLOAT_ACTIVATION_TEMPLATE(selu, - x <= 0 ? gamma * (alpha * std::exp(x) - alpha) - : x * gamma, - alpha, gamma) -FLOAT_ACTIVATION_TEMPLATE(hard_sigmoid, - std::max((float)0, - std::min((float)1, x *alpha + beta)), - alpha, beta) \ No newline at end of file +UNARY_WITH_MUL_TEMPLATE_V2(celu, alpha, + std::max((double)0, x) + + std::min((double)0, + (double)(alpha *(exp(x / alpha) - 1)))) +UNARY_WITH_MUL_TEMPLATE_V2(leaky_relu, alpha, x < 0 ? alpha * x : x) +UNARY_WITH_MUL_TEMPLATE_V2(gelu, alpha, + 0.5f * (alpha * x) * + (1.f + erff(alpha * x / sqrtf(2.f)))) +ACTIVATION_TEMPLATE_V2(selu, + x <= 0 ? gamma * (alpha * std::exp(x) - alpha) + : x * gamma, + alpha, gamma) +ACTIVATION_TEMPLATE_V2(hard_sigmoid, + std::max((double)0, + std::min((double)1, x *alpha + gamma)), + alpha, gamma) \ No newline at end of file diff --git a/src/Native/src/kernels/stackvm/reference/batch_to_space.cpp b/src/Native/src/kernels/stackvm/reference/batch_to_space.cpp index 12df871f8f..d113cba4a6 100644 --- a/src/Native/src/kernels/stackvm/reference/batch_to_space.cpp +++ b/src/Native/src/kernels/stackvm/reference/batch_to_space.cpp @@ -103,8 +103,13 @@ batch_to_space_impl(datatype_t type, const gsl::byte *input, gsl::byte *output, dims_t infer_shape(gsl::span origin_in_shape, gsl::span block_shape, const paddings_t &crops) { - auto in_shape = kernels::stackvm::transpose_infer_shape( - origin_in_shape, fixed_dims(0, 2, 3, 1)); + auto d4 = fixed_dims(0, 2, 3, 1); + auto d3 = fixed_dims(0, 2, 1); + auto inPerm = origin_in_shape.size() == 4 + ? gsl::span{d4.data(), d4.size()} + : gsl::span{d3.data(), d3.size()}; + auto in_shape = + kernels::stackvm::transpose_infer_shape(origin_in_shape, inPerm); auto batch = in_shape[0] / compute_size(block_shape); auto out_shape = dims_t{batch}; auto m = block_shape.size(); @@ -117,8 +122,12 @@ dims_t infer_shape(gsl::span origin_in_shape, out_shape.insert(out_shape.end(), in_shape.end() - remain_size, in_shape.end()); } - return kernels::stackvm::transpose_infer_shape(out_shape, - fixed_dims(0, 3, 1, 2)); + auto outd4 = fixed_dims(0, 3, 1, 2); + auto outd3 = fixed_dims(0, 2, 1); + auto outPerm = origin_in_shape.size() == 4 + ? gsl::span{outd4.data(), outd4.size()} + : gsl::span{outd3.data(), outd3.size()}; + return kernels::stackvm::transpose_infer_shape(out_shape, outPerm); } result kernels::stackvm::batch_to_space(value_t input, diff --git a/src/Native/src/kernels/stackvm/reference/clamp.cpp b/src/Native/src/kernels/stackvm/reference/clamp.cpp index c9d42450e8..eb3bdfc72f 100644 --- a/src/Native/src/kernels/stackvm/reference/clamp.cpp +++ b/src/Native/src/kernels/stackvm/reference/clamp.cpp @@ -34,7 +34,9 @@ result clamp_impl(const T *input, T min, T max, T *output, NNCASE_UNUSED kernel_context &context) { return apply(in_shape, [&](gsl::span index) -> result { const auto v = input[offset(index, in_strides)]; - output[offset(index, out_strides)] = std::min(std::max(v, min), max); + output[offset(index, out_strides)] = static_cast( + std::min(std::max(static_cast(v), static_cast(min)), + static_cast(max))); return ok(); }); } diff --git a/src/Native/src/kernels/stackvm/reference/dequantize.cpp b/src/Native/src/kernels/stackvm/reference/dequantize.cpp index 3ee7202804..3c799b5a16 100644 --- a/src/Native/src/kernels/stackvm/reference/dequantize.cpp +++ b/src/Native/src/kernels/stackvm/reference/dequantize.cpp @@ -55,5 +55,6 @@ result nncase::kernels::stackvm::reference::dequantize( float scale, float bias, kernel_context &context) noexcept { DEQUANTIZE_IMPL(uint8_t, float); DEQUANTIZE_IMPL(int8_t, float); + DEQUANTIZE_IMPL(int16_t, float); return err(std::errc::not_supported); } \ No newline at end of file diff --git a/src/Native/src/kernels/stackvm/reference/kernel_template.h b/src/Native/src/kernels/stackvm/reference/kernel_template.h index b636007284..f01fc7a339 100644 --- a/src/Native/src/kernels/stackvm/reference/kernel_template.h +++ b/src/Native/src/kernels/stackvm/reference/kernel_template.h @@ -16,7 +16,7 @@ #include -#define FLOAT_UNARY_IMPL_TEMPLATE(_name, _compute) \ +#define UNARY_IMPL_TEMPLATE(_name, _compute) \ template \ result _name##_impl( \ const T *input, T *output, gsl::span in_shape, \ @@ -30,8 +30,8 @@ kernels::detail::get_reduced_offset(index, in_shape); \ auto src_idx = offset(input_strides, in_index); \ auto dst_idx = offset(out_strides, in_index); \ - auto x = input[src_idx]; \ - output[dst_idx] = _compute; \ + auto x = static_cast(input[src_idx]); \ + output[dst_idx] = static_cast(_compute); \ return ok(); \ }); \ } \ @@ -43,34 +43,62 @@ [[maybe_unused]] gsl::span out_strides, \ NNCASE_UNUSED kernel_context &context) noexcept { \ for (int i = 0; i < compute_size(in_shape); ++i) { \ - auto x = input[i]; \ - output[i] = _compute; \ + auto x = static_cast(input[i]); \ + output[i] = static_cast(_compute); \ } \ return ok(); \ } -#define FLOAT_UNARY_OP_TEMPLATE(_name) \ +#define UNARY_OP_TEMPLATE(_name) \ result nncase::kernels::stackvm::_name( \ value_t input, value_t output, kernel_context &context) { \ - try_f32_input(input_mem, input); \ + try_input(input_mem, input); \ auto dtype = input_tensor->dtype(); \ - try_f32_output(out_mem, output, input_tensor->shape()); \ + try_output_like_input(output_mem, output, input_tensor); \ + try_var(typecode, to_typecode(input_tensor->dtype())); \ if (is_contiguous(input_tensor)) { \ - try_(_name##_opt_impl(input_mem, out_mem, input_tensor->shape(), \ - input_tensor->strides(), \ - output_tensor->shape(), \ - output_tensor->strides(), context)); \ + try_(UNARY_WITH_DISPTCH(_name##_opt_impl)); \ } else { \ - try_(_name##_impl(input_mem, out_mem, input_tensor->shape(), \ - input_tensor->strides(), output_tensor->shape(), \ - output_tensor->strides(), context)); \ + try_(UNARY_WITH_DISPTCH(_name##_impl)); \ } \ return ok(output); \ } -#define FLOAT_UNARY_TEMPLATE(_name, _compute) \ - FLOAT_UNARY_IMPL_TEMPLATE(_name, _compute) \ - FLOAT_UNARY_OP_TEMPLATE(_name) +#define UNARY_TEMPLATE(_name, _compute) \ + UNARY_IMPL_TEMPLATE(_name, _compute) \ + UNARY_WITH_DISPTCH_OP_TEMPLATE_V2(_name##_opt_impl) \ + UNARY_WITH_DISPTCH_OP_TEMPLATE_V2(_name##_impl) \ + UNARY_OP_TEMPLATE(_name) + +#define UNARY_WITH_DISPTCH(_impl_func) \ + _impl_func##_disptch(typecode, input_mem, output_mem, \ + input_tensor->shape(), input_tensor->strides(), \ + output_tensor->shape(), output_tensor->strides(), \ + context) + +#define UNARY_WITH_DISPTCH_OP_TEMPLATE_V2(_impl_func) \ + result _impl_func##_disptch( \ + typecode_t type, const gsl::byte *input, gsl::byte *output, \ + gsl::span in_shape, gsl::span in_strides, \ + gsl::span out_shape, \ + gsl::span out_strides, \ + NNCASE_UNUSED kernel_context &context) noexcept { \ + TYPE_SELECT_WITH_IMPL(type, UNARY_IMPL_FUNC_WRAPPER_V2, _impl_func); \ + } + +#define UNARY_WITH_DISPTCH_OP_TEMPLATE_V2(_impl_func) \ + result _impl_func##_disptch( \ + typecode_t type, const gsl::byte *input, gsl::byte *output, \ + gsl::span in_shape, gsl::span in_strides, \ + gsl::span out_shape, \ + gsl::span out_strides, \ + NNCASE_UNUSED kernel_context &context) noexcept { \ + TYPE_SELECT_WITH_IMPL(type, UNARY_IMPL_FUNC_WRAPPER_V2, _impl_func); \ + } + +#define UNARY_IMPL_FUNC_WRAPPER_V2(_impl_func, type) \ + return _impl_func(IN_CAST(type, input), OUT_CAST(type, output), in_shape, \ + in_strides, out_shape, out_strides, context) #define FLOAT_UNARY_WITH_MUL_IMPL_TEMPLATE(_name, _alpha_name, _compute) \ template \ @@ -107,38 +135,119 @@ return ok(); \ } -#define FLOAT_UNARY_WITH_MUL_OP_TEMPLATE(_name, _alpha_name) \ - result _name##_impl(const float *input, float *output, \ - gsl::span input_shape, \ - gsl::span input_strides, \ - gsl::span out_shape, \ - gsl::span out_strides, \ - NNCASE_UNUSED kernel_context &context); \ +#define UNARY_WITH_MUL_IMPL_TEMPLATE_V2(_name, _alpha_name, _compute) \ + template \ + result _name##_impl( \ + const T *input, T *output, T _alpha_name, \ + gsl::span in_shape, \ + gsl::span input_strides, \ + gsl::span out_shape, \ + gsl::span out_strides, \ + NNCASE_UNUSED kernel_context &context) noexcept { \ + return apply( \ + out_shape, [&](gsl::span index) -> result { \ + const auto in_index = \ + kernels::detail::get_reduced_offset(index, in_shape); \ + auto src_idx = offset(input_strides, in_index); \ + auto dst_idx = offset(out_strides, in_index); \ + const auto alpha = static_cast(_alpha_name); \ + const auto x = static_cast(input[src_idx]); \ + output[dst_idx] = static_cast(_compute); \ + return ok(); \ + }); \ + } \ + template \ + result _name##_contiguous_impl( \ + const T *input, T *output, T _alpha_name, \ + gsl::span in_shape, \ + [[maybe_unused]] gsl::span input_strides, \ + [[maybe_unused]] gsl::span out_shape, \ + [[maybe_unused]] gsl::span out_strides, \ + NNCASE_UNUSED kernel_context &context) noexcept { \ + for (int i = 0; i < compute_size(in_shape); ++i) { \ + const auto alpha = static_cast(_alpha_name); \ + const auto x = static_cast(input[i]); \ + output[i] = static_cast(_compute); \ + } \ + return ok(); \ + } + +#define UNARY_IMPL_FUNC_WRAPPER(_impl_func, type) \ + return _impl_func(IN_CAST(type, input), OUT_CAST(type, output), \ + *IN_CAST(type, _alpha), in_shape, in_strides, out_shape, \ + out_strides, context) + +#define TYPE_SELECT_WITH_IMPL(_typecode, _impl, _impl_func) \ + switch (_typecode) { \ + case dt_float32: \ + _impl(_impl_func, float); \ + case dt_float16: \ + _impl(_impl_func, half); \ + case dt_bfloat16: \ + _impl(_impl_func, bfloat16); \ + case dt_int8: \ + _impl(_impl_func, int8_t); \ + case dt_int16: \ + _impl(_impl_func, int16_t); \ + case dt_int32: \ + _impl(_impl_func, int32_t); \ + case dt_int64: \ + _impl(_impl_func, int64_t); \ + case dt_uint8: \ + _impl(_impl_func, uint8_t); \ + case dt_uint16: \ + _impl(_impl_func, uint16_t); \ + case dt_uint32: \ + _impl(_impl_func, uint32_t); \ + case dt_uint64: \ + _impl(_impl_func, uint64_t); \ + case dt_float64: \ + _impl(_impl_func, double); \ + case dt_boolean: \ + _impl(_impl_func, uint8_t); \ + default: \ + return err(std::errc::not_supported); \ + } + +#define UNARY_WITH_MUL_DISPTCH_OP_TEMPLATE_V2(_impl_func) \ + result _impl_func##_disptch( \ + typecode_t type, const gsl::byte *input, gsl::byte *output, \ + const gsl::byte *_alpha, gsl::span in_shape, \ + gsl::span in_strides, gsl::span out_shape, \ + gsl::span out_strides, \ + NNCASE_UNUSED kernel_context &context) noexcept { \ + TYPE_SELECT_WITH_IMPL(type, UNARY_IMPL_FUNC_WRAPPER, _impl_func); \ + } + +#define UNARY_WITH_MUL_DISPTCH(_impl_func) \ + _impl_func##_disptch(typecode, input_mem, output_mem, _alpha_name_mem, \ + input_tensor->shape(), input_tensor->strides(), \ + output_tensor->shape(), output_tensor->strides(), \ + context) + +#define UNARY_WITH_MUL_OP_TEMPLATE_V2(_name, _alpha_name) \ result nncase::kernels::stackvm::_name( \ value_t input, value_t _alpha_name, value_t output, \ kernel_context &context) { \ - try_f32_input(input_mem, input); \ - try_to_scalar(_alpha_name##_value, _alpha_name, float); \ + try_input(input_mem, input); \ + try_input(_alpha_name_mem, _alpha_name); \ auto dtype = input_tensor->dtype(); \ - try_f32_output(out_mem, output, input_tensor->shape()); \ + try_output_like_input(output_mem, output, input_tensor); \ + try_var(typecode, to_typecode(input_tensor->dtype())); \ if (is_contiguous(input_tensor)) { \ - try_(_name##_contiguous_impl( \ - input_mem, out_mem, _alpha_name##_value, \ - input_tensor->shape(), input_tensor->strides(), \ - output_tensor->shape(), output_tensor->strides(), context)); \ + try_(UNARY_WITH_MUL_DISPTCH(_name##_contiguous_impl)); \ } else { \ - try_(_name##_impl(input_mem, out_mem, _alpha_name##_value, \ - input_tensor->shape(), input_tensor->strides(), \ - output_tensor->shape(), \ - output_tensor->strides(), context)); \ + try_(UNARY_WITH_MUL_DISPTCH(_name##_impl)); \ } \ return ok(output); \ } // _alpha_name is a var used in kernel -#define FLOAT_UNARY_WITH_MUL_TEMPLATE(_name, _alpha_name, _compute) \ - FLOAT_UNARY_WITH_MUL_IMPL_TEMPLATE(_name, _alpha_name, _compute) \ - FLOAT_UNARY_WITH_MUL_OP_TEMPLATE(_name, _alpha_name) +#define UNARY_WITH_MUL_TEMPLATE_V2(_name, _alpha_name, _compute) \ + UNARY_WITH_MUL_IMPL_TEMPLATE_V2(_name, _alpha_name##_arg, _compute) \ + UNARY_WITH_MUL_DISPTCH_OP_TEMPLATE_V2(_name##_contiguous_impl) \ + UNARY_WITH_MUL_DISPTCH_OP_TEMPLATE_V2(_name##_impl) \ + UNARY_WITH_MUL_OP_TEMPLATE_V2(_name, _alpha_name) #define MKFNS(fn, ...) \ MKFN_N(fn, ##__VA_ARGS__, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)(__VA_ARGS__) @@ -179,6 +288,29 @@ }); \ } +#define ACTIVATION_IMPL_TEMPLATE_V2(_name, _compute, _alpha_name, _gamma_name) \ + template \ + result _name##_impl( \ + const T *input, T *output, T _alpha_name, T _gamma_name, \ + gsl::span in_shape, \ + gsl::span input_strides, \ + gsl::span out_shape, \ + gsl::span out_strides, \ + NNCASE_UNUSED kernel_context &context) noexcept { \ + return apply( \ + out_shape, [&](gsl::span index) -> result { \ + const auto in_index = \ + kernels::detail::get_reduced_offset(index, in_shape); \ + auto src_idx = offset(input_strides, in_index); \ + auto dst_idx = offset(out_strides, in_index); \ + auto x = static_cast(input[src_idx]); \ + const auto alpha = static_cast(_alpha_name); \ + const auto gamma = static_cast(_gamma_name); \ + output[dst_idx] = static_cast(_compute); \ + return ok(); \ + }); \ + } + #define VALUE_ARGS_EXPAND(...) MKFNS(VALUE_ARGS_EXPAND, ##__VA_ARGS__) #define VALUE_ARGS_EXPAND0(_a) value_t _a @@ -236,10 +368,52 @@ return ok(output); \ } +#define UNARY_WITH_DISPTCH_V2(_impl_func) \ + _impl_func##_disptch(typecode, input_mem, output_mem, _alpha_name_mem, \ + _gamma_name_mem, input_tensor->shape(), \ + input_tensor->strides(), output_tensor->shape(), \ + output_tensor->strides(), context) + +#define ACTIVATION_OP_TEMPLATE_V2(_name, _alpha_name, _gamma_name) \ + result nncase::kernels::stackvm::_name( \ + value_t input, value_t _alpha_name, value_t _gamma_name, \ + value_t output, kernel_context &context) { \ + try_input(input_mem, input); \ + auto dtype = input_tensor->dtype(); \ + try_input(_alpha_name_mem, _alpha_name); \ + try_input(_gamma_name_mem, _gamma_name); \ + try_var(typecode, to_typecode(input_tensor->dtype())); \ + try_output_like_input(output_mem, output, input_tensor); \ + try_(UNARY_WITH_DISPTCH_V2(_name##_impl)); \ + return ok(output); \ + } + +#define UNARY_IMPL_FUNC_WRAPPER_V3(_impl_func, type) \ + return _impl_func(IN_CAST(type, input), OUT_CAST(type, output), \ + *IN_CAST(type, _alpha), *IN_CAST(type, _gamma), \ + in_shape, in_strides, out_shape, out_strides, context) + +#define UNARY_WITH_MUL_DISPTCH_OP_ACTIVATION_OP_TEMPLATE_V2(_impl_func) \ + result _impl_func##_disptch( \ + typecode_t type, const gsl::byte *input, gsl::byte *output, \ + const gsl::byte *_alpha, const gsl::byte *_gamma, \ + gsl::span in_shape, gsl::span in_strides, \ + gsl::span out_shape, \ + gsl::span out_strides, \ + NNCASE_UNUSED kernel_context &context) noexcept { \ + TYPE_SELECT_WITH_IMPL(type, UNARY_IMPL_FUNC_WRAPPER_V3, _impl_func); \ + } + #define FLOAT_ACTIVATION_TEMPLATE(_name, _compute, ...) \ FLOAT_ACTIVATION_IMPL_TEMPLATE(_name, _compute, __VA_ARGS__) \ FLOAT_ACTIVATION_OP_TEMPLATE(_name, __VA_ARGS__) +#define ACTIVATION_TEMPLATE_V2(_name, _compute, _alpha_name, _gamma_name) \ + ACTIVATION_IMPL_TEMPLATE_V2(_name, _compute, _alpha_name##arg, \ + _gamma_name##arg) \ + UNARY_WITH_MUL_DISPTCH_OP_ACTIVATION_OP_TEMPLATE_V2(_name##_impl) \ + ACTIVATION_OP_TEMPLATE_V2(_name, _alpha_name, _gamma_name) + #define BASIC_PARAM \ const gsl::byte *input, gsl::byte *output, \ gsl::span in_shape, gsl::span out_shape, \ diff --git a/src/Native/src/kernels/stackvm/reference/matmul.cpp b/src/Native/src/kernels/stackvm/reference/matmul.cpp index 52d6088f16..670e702503 100644 --- a/src/Native/src/kernels/stackvm/reference/matmul.cpp +++ b/src/Native/src/kernels/stackvm/reference/matmul.cpp @@ -52,8 +52,17 @@ result matmul_unit_impl(const T *input_a, const T *input_b, T *output, template result matmul_impl(const T *input_a, const T *input_b, T *output, - gsl::span in_a_shape, - gsl::span in_b_shape) noexcept { + gsl::span in_a_shape_, + gsl::span in_b_shape_) noexcept { + dims_t in_a_shape = in_a_shape_; + dims_t in_b_shape = in_b_shape_; + if (in_a_shape.size() == 1) { + in_a_shape.insert(in_a_shape.begin(), 1); + } + + if (in_b_shape.size() == 1) { + in_b_shape.insert(in_b_shape.end(), 1); + } auto new_a_shape = to_4d(in_a_shape); auto new_b_shape = to_4d(in_b_shape); auto a_unit_size = new_a_shape[2] * new_a_shape[3]; diff --git a/src/Native/src/kernels/stackvm/reference/pad.cpp b/src/Native/src/kernels/stackvm/reference/pad.cpp index db7bf2999c..0f11590fb4 100644 --- a/src/Native/src/kernels/stackvm/reference/pad.cpp +++ b/src/Native/src/kernels/stackvm/reference/pad.cpp @@ -208,8 +208,9 @@ result nncase::kernels::stackvm::reference::pad( kernel_context &context) noexcept { auto unit = runtime::get_bytes(type); bool padding_before_is_zero = - std::all_of(paddings.begin(), paddings.end(), - [](const padding &p) { return p.before == 0; }) && + std::all_of( + paddings.begin(), paddings.end(), + [](const padding &p) { return p.before == 0 && p.after >= 0; }) && mode == pad_mode_t::constant && in_shape.size() >= 3; if (std::all_of(paddings.begin(), paddings.end(), diff --git a/src/Native/src/kernels/stackvm/reference/quantize.cpp b/src/Native/src/kernels/stackvm/reference/quantize.cpp index 4754661899..0bd62af45f 100644 --- a/src/Native/src/kernels/stackvm/reference/quantize.cpp +++ b/src/Native/src/kernels/stackvm/reference/quantize.cpp @@ -59,5 +59,6 @@ result nncase::kernels::stackvm::reference::quantize( float scale, float bias, kernel_context &context) noexcept { QUANTIZE_IMPL(float, uint8_t); QUANTIZE_IMPL(float, int8_t); + QUANTIZE_IMPL(float, int16_t); return err(std::errc::not_supported); } \ No newline at end of file diff --git a/src/Native/src/kernels/stackvm/reference/reduce_arg.cpp b/src/Native/src/kernels/stackvm/reference/reduce_arg.cpp index 6322017fe6..74cd0bd0de 100644 --- a/src/Native/src/kernels/stackvm/reference/reduce_arg.cpp +++ b/src/Native/src/kernels/stackvm/reference/reduce_arg.cpp @@ -60,10 +60,8 @@ result reduce_arg_impl(TReducer &&reducer, T init_value, const T *input, out_map[out_idx].clear(); out_map[out_idx].push_back(index[axes[0]]); dst = src; - } else if constexpr (std::is_same_v) { - if (fabs(src - dst) < epsilon) { - out_map[out_idx].push_back(index[axes[0]]); - } + } else if (std::fabs(src - dst) < epsilon) { + out_map[out_idx].push_back(index[axes[0]]); } return ok(); })); diff --git a/src/Native/src/kernels/stackvm/reference/unary.cpp b/src/Native/src/kernels/stackvm/reference/unary.cpp index 8af2011926..f9da11ba02 100644 --- a/src/Native/src/kernels/stackvm/reference/unary.cpp +++ b/src/Native/src/kernels/stackvm/reference/unary.cpp @@ -45,6 +45,23 @@ result unary_impl(TOp &&op, const T *input, T *output, return unary_impl(funct, input, output, input_shape, input_strides, \ out_shape, out_strides, context) +static float round_onnx(float v) { + if (v > 0 && v - (int32_t)v == 0.5) { + float result = (int32_t)v + 1.0; + if ((int32_t)result % 2 == 0) + return result; + else + return result - 1; + } else if (v < 0 && (int32_t)v - v == 0.5) { + float result = (int32_t)v + 1.0; + if ((int32_t)result % 2 == 0) + return result; + else + return result - 1; + } else + return roundf(v); +} + template result unary_impl(unary_op_t op, const T *input, T *output, gsl::span input_shape, @@ -66,7 +83,7 @@ result unary_impl(unary_op_t op, const T *input, T *output, UNARY_IMPL_OP(log, logf); UNARY_IMPL_OP(logical_not, [](float v) { return !v; }); UNARY_IMPL_OP(neg, std::negate()); - UNARY_IMPL_OP(round, roundf); + UNARY_IMPL_OP(round, [](float v) { return round_onnx(v); }); UNARY_IMPL_OP(rsqrt, [](float v) { return 1.f / sqrtf(v); }); UNARY_IMPL_OP(sign, [](float v) { return (0.f < v) - (v < 0.f); }); UNARY_IMPL_OP(sin, sinf); @@ -93,6 +110,8 @@ result nncase::kernels::stackvm::reference::unary( kernel_context &context) noexcept { switch (dtype) { UNARY_IMPL_DTYPE(dt_float32, float) + UNARY_IMPL_DTYPE(dt_float16, half) + // UNARY_IMPL_DTYPE(dt_bfloat16, bfloat16) UNARY_IMPL_DTYPE(dt_float64, double) UNARY_IMPL_DTYPE(dt_int32, int32_t) UNARY_IMPL_DTYPE(dt_int64, int64_t) diff --git a/src/Native/src/kernels/stackvm/shape_infer.h b/src/Native/src/kernels/stackvm/shape_infer.h index 01c5591b1e..d88a5378b6 100644 --- a/src/Native/src/kernels/stackvm/shape_infer.h +++ b/src/Native/src/kernels/stackvm/shape_infer.h @@ -287,13 +287,24 @@ inline dims_t onehot_infer_shape(gsl::span indices_shape, return new_shape; } -inline result matmul_infer_shape(gsl::span lhs_shape, - gsl::span rhs_shape) { +inline result matmul_infer_shape(gsl::span lhs_shape_, + gsl::span rhs_shape_) { + dims_t lhs_shape = lhs_shape_; + dims_t rhs_shape = rhs_shape_; + if (lhs_shape.size() == 2 && rhs_shape.size() == 2) { auto new_shape = dims_t{lhs_shape[0], rhs_shape[1]}; return ok(new_shape); } + if (lhs_shape.size() == 1) { + lhs_shape.insert(lhs_shape.begin(), 1); + } + + if (rhs_shape.size() == 1) { + rhs_shape.insert(rhs_shape.end(), 1); + } + auto new_a_shape = runtime::to_4d(lhs_shape); auto new_b_shape = runtime::to_4d(rhs_shape); auto big_shape = std::max(lhs_shape.size(), rhs_shape.size()); @@ -304,6 +315,14 @@ inline result matmul_infer_shape(gsl::span lhs_shape, } new_shape.push_back(lhs_shape[lhs_shape.size() - 2]); new_shape.push_back(rhs_shape.back()); + if (lhs_shape_.size() == 1) { + new_shape.erase(new_shape.begin() + big_shape - 2); + big_shape--; + } + + if (rhs_shape_.size() == 1) { + new_shape.erase(new_shape.begin() + big_shape - 1); + } return ok(new_shape); } diff --git a/src/Native/src/kernels/stackvm/shape_ops.cpp b/src/Native/src/kernels/stackvm/shape_ops.cpp new file mode 100644 index 0000000000..7677b10c2b --- /dev/null +++ b/src/Native/src/kernels/stackvm/shape_ops.cpp @@ -0,0 +1,121 @@ +#include "shape_infer.h" +#include + +using namespace nncase; +using namespace nncase::kernels; +using namespace nncase::kernels::stackvm; +using namespace nncase::runtime; +using namespace nncase::runtime::stackvm; + +result nncase::kernels::stackvm::conv2d_shape( + value_t input, value_t weights, value_t padding, value_t stride, + value_t dilation, [[maybe_unused]] value_t groups, value_t output, + kernel_context &) { + try_dims(in_shape, input); + try_dims(w_shape, weights); + try_strides(strides_value, stride); + try_paddings(pads, padding); + try_strides(strides, stride); + try_strides(dilations, dilation); + try_output(out_mem, output, dt_int64, dims_t{4}); + auto out_shape = + conv2d_infer_shape(in_shape, w_shape, strides_value, dilations, pads); + for (int i = 0; i < 4; ++i) { + OUT_CAST(int64_t, out_mem)[i] = out_shape[i]; + } + KERNEL_FINISH; +} + +size_t compute_out_size(int input_size, int weights_size, + const strides_t &strides, dims_t out_paddings, + paddings_t paddings, const strides_t &dilations, + int offset) { + return (strides[offset] * (input_size - 1L)) + out_paddings[offset] + + (((weights_size - 1L) * dilations[offset]) + 1L) - + paddings[offset].before - paddings[offset].after; +} + +dims_t conv2d_transpose_infer_shape(gsl::span in_shape, + gsl::span w_shape, + const strides_t &strides, + paddings_t paddings, + const dims_t &outPadding, + const strides_t &dilations, int group) { + auto in = in_shape[0]; + auto ih = in_shape[2]; + auto iw = in_shape[3]; + auto oc = w_shape[0] * group; + auto wh = w_shape[2]; + auto ww = w_shape[3]; + + auto oh = + compute_out_size(ih, wh, strides, outPadding, paddings, dilations, 0); + auto ow = + compute_out_size(iw, ww, strides, outPadding, paddings, dilations, 1); + auto out_shape = dims_t{in, oc, oh, ow}; + return out_shape; +} + +result nncase::kernels::stackvm::conv2d_transpose_shape( + value_t input, value_t weights, value_t stride, value_t dilation, + value_t padding, value_t output_padding, value_t groups, value_t output, + kernel_context &) { + try_dims(input_shape, input); + try_dims(weights_shape, weights); + try_strides(strides_value, stride); + try_paddings(pads, padding); + try_dims(out_padding, output_padding); + try_to_integer(groups_value, groups); + try_strides(strides, stride); + try_strides(dilations, dilation); + + auto out_shape = + conv2d_transpose_infer_shape(input_shape, weights_shape, strides, pads, + out_padding, dilations, groups_value); + try_output(out_mem, output, dt_int64, dims_t{4}); + for (int i = 0; i < 4; ++i) { + OUT_CAST(int64_t, out_mem)[i] = out_shape[i]; + } + KERNEL_FINISH; +} + +result to_dims(tensor shape) { + try_dims(shape_value, shape); + return ok(shape_value); +} + +result nncase::kernels::stackvm::broadcast_shape(value_t inputs, + value_t output, + kernel_context &) { + try_tuple_input(tuple_mem, inputs); + auto begin = inputs_tuple->fields().begin(); + auto out_shape = std::accumulate( + std::next(begin), inputs_tuple->fields().end(), + to_dims(begin->as().unwrap()).unwrap(), + [&](auto sum, auto field) { + auto shape = to_dims(field.template as().unwrap()).unwrap(); + auto result = kernels::detail::get_binary_output_shape(shape, sum); + + return dims_t(result.begin(), result.end()); + }); + try_output(out_mem, output, dt_int64, dims_t{out_shape.size()}); + for (int i = 0; i < out_shape.size(); ++i) { + OUT_CAST(int64_t, out_mem)[i] = out_shape[i]; + } + + KERNEL_FINISH; +} + +result nncase::kernels::stackvm::mat_mul_shape(value_t lhs, + value_t rhs, + value_t output, + kernel_context &) { + try_dims(lhs_shape, lhs); + try_dims(rhs_shape, rhs); + try_var(out_shape, matmul_infer_shape(lhs_shape, rhs_shape)); + try_output(out_mem, output, dt_int64, dims_t{out_shape.size()}); + for (int i = 0; i < out_shape.size(); ++i) { + OUT_CAST(int64_t, out_mem)[i] = out_shape[i]; + } + KERNEL_FINISH; +} \ No newline at end of file diff --git a/src/Native/src/kernels/stackvm/tensor_ops.cpp b/src/Native/src/kernels/stackvm/tensor_ops.cpp index 0658fe6e35..7f668492c3 100644 --- a/src/Native/src/kernels/stackvm/tensor_ops.cpp +++ b/src/Native/src/kernels/stackvm/tensor_ops.cpp @@ -27,7 +27,7 @@ using namespace nncase::kernels::stackvm; using namespace nncase::runtime; using namespace nncase::runtime::stackvm; -//#define ENABLE_NOP +// #define ENABLE_NOP result nncase::kernels::stackvm::batch_normalization( value_t input, value_t scale, value_t bias, value_t input_mean, @@ -1233,6 +1233,13 @@ result kernels::stackvm::unary(unary_op_t unary_op, value_t input, auto dtype = input_tensor->dtype(); try_output(out_mem, output, dtype, input_tensor->shape()); + if (typoecode != dt_float32) { + try_(reference::unary(typoecode, unary_op, input_mem, out_mem, + input_tensor->shape(), input_tensor->strides(), + output_tensor->shape(), output_tensor->strides(), + context)); + return ok(output); + } CONTIGUOUS_KERNEL(unary, input_tensor, typoecode, unary_op, input_mem, out_mem, input_tensor->shape(), input_tensor->strides(), output_tensor->shape(), output_tensor->strides(), diff --git a/src/Native/src/runtime/stackvm/op_reader.cpp b/src/Native/src/runtime/stackvm/op_reader.cpp index f430b1c64b..04776e0f7d 100644 --- a/src/Native/src/runtime/stackvm/op_reader.cpp +++ b/src/Native/src/runtime/stackvm/op_reader.cpp @@ -1,4 +1,4 @@ -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:16 +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:39 * +08:00. * * Copyright 2019-2021 Canaan Inc. @@ -36,6 +36,9 @@ result tensor_op_visitor::visit(tensor_function_t tensor_funct, return visit(tensor_op_reader()(reader)); case tensor_function_t::broadcast: return visit(tensor_op_reader()(reader)); + case tensor_function_t::broadcast_shape: + return visit( + tensor_op_reader()(reader)); case tensor_function_t::bucket_pad: return visit(tensor_op_reader()(reader)); case tensor_function_t::cast: @@ -55,9 +58,16 @@ result tensor_op_visitor::visit(tensor_function_t tensor_funct, tensor_op_reader()(reader)); case tensor_function_t::conv2d: return visit(tensor_op_reader()(reader)); + case tensor_function_t::conv2d_shape: + return visit( + tensor_op_reader()(reader)); case tensor_function_t::conv2d_transpose: return visit( tensor_op_reader()(reader)); + case tensor_function_t::conv2d_transpose_shape: + return visit( + tensor_op_reader()( + reader)); case tensor_function_t::cum_sum: return visit(tensor_op_reader()(reader)); case tensor_function_t::dequantize: @@ -121,6 +131,9 @@ result tensor_op_visitor::visit(tensor_function_t tensor_funct, return visit(tensor_op_reader()(reader)); case tensor_function_t::mat_mul: return visit(tensor_op_reader()(reader)); + case tensor_function_t::mat_mul_shape: + return visit( + tensor_op_reader()(reader)); case tensor_function_t::normal: return visit(tensor_op_reader()(reader)); case tensor_function_t::normal_like: diff --git a/src/Native/src/runtime/stackvm/ops/tensor.cpp b/src/Native/src/runtime/stackvm/ops/tensor.cpp index 92bbe33a21..0439e7887c 100644 --- a/src/Native/src/runtime/stackvm/ops/tensor.cpp +++ b/src/Native/src/runtime/stackvm/ops/tensor.cpp @@ -1,4 +1,4 @@ -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:16 +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:39 * +08:00. * * Copyright 2019-2021 Canaan Inc. @@ -120,6 +120,18 @@ result stackvm_runtime_function::visit( return ok(); } +result stackvm_runtime_function::visit( + [[maybe_unused]] const tensor_broadcast_shape_op_t &op) noexcept { + dump_op("broadcast_shape"); + try_var(inputs, pop_value()); + dump_input(inputs); + try_var(output, kernels::stackvm::broadcast_shape( + inputs, nullptr, module().kernel_context())); + dump_output(output); + stack_.push(std::move(output)); + return ok(); +} + result stackvm_runtime_function::visit( [[maybe_unused]] const tensor_bucket_pad_op_t &op) noexcept { dump_op("bucket_pad"); @@ -261,6 +273,29 @@ result stackvm_runtime_function::visit( return ok(); } +result stackvm_runtime_function::visit( + [[maybe_unused]] const tensor_conv2d_shape_op_t &op) noexcept { + dump_op("conv2d_shape"); + try_var(input, pop_value()); + dump_input(input); + try_var(weights, pop_value()); + dump_input(weights); + try_var(padding, pop_value()); + dump_input(padding); + try_var(stride, pop_value()); + dump_input(stride); + try_var(dilation, pop_value()); + dump_input(dilation); + try_var(groups, pop_value()); + dump_input(groups); + try_var(output, kernels::stackvm::conv2d_shape( + input, weights, padding, stride, dilation, groups, + nullptr, module().kernel_context())); + dump_output(output); + stack_.push(std::move(output)); + return ok(); +} + result stackvm_runtime_function::visit( [[maybe_unused]] const tensor_conv2d_transpose_op_t &op) noexcept { dump_op("conv2d_transpose"); @@ -293,6 +328,32 @@ result stackvm_runtime_function::visit( return ok(); } +result stackvm_runtime_function::visit( + [[maybe_unused]] const tensor_conv2d_transpose_shape_op_t &op) noexcept { + dump_op("conv2d_transpose_shape"); + try_var(input, pop_value()); + dump_input(input); + try_var(weights, pop_value()); + dump_input(weights); + try_var(stride, pop_value()); + dump_input(stride); + try_var(dilation, pop_value()); + dump_input(dilation); + try_var(padding, pop_value()); + dump_input(padding); + try_var(output_padding, pop_value()); + dump_input(output_padding); + try_var(groups, pop_value()); + dump_input(groups); + try_var(output, + kernels::stackvm::conv2d_transpose_shape( + input, weights, stride, dilation, padding, output_padding, + groups, nullptr, module().kernel_context())); + dump_output(output); + stack_.push(std::move(output)); + return ok(); +} + result stackvm_runtime_function::visit( [[maybe_unused]] const tensor_cum_sum_op_t &op) noexcept { dump_op("cum_sum"); @@ -727,6 +788,20 @@ result stackvm_runtime_function::visit( return ok(); } +result stackvm_runtime_function::visit( + [[maybe_unused]] const tensor_mat_mul_shape_op_t &op) noexcept { + dump_op("mat_mul_shape"); + try_var(lhs, pop_value()); + dump_input(lhs); + try_var(rhs, pop_value()); + dump_input(rhs); + try_var(output, kernels::stackvm::mat_mul_shape(lhs, rhs, nullptr, + module().kernel_context())); + dump_output(output); + stack_.push(std::move(output)); + return ok(); +} + result stackvm_runtime_function::visit( [[maybe_unused]] const tensor_normal_op_t &op) noexcept { dump_op("normal"); diff --git a/src/Native/src/runtime/stackvm/runtime_function_ops.h b/src/Native/src/runtime/stackvm/runtime_function_ops.h index 8a5b37d76c..02841ebe35 100644 --- a/src/Native/src/runtime/stackvm/runtime_function_ops.h +++ b/src/Native/src/runtime/stackvm/runtime_function_ops.h @@ -1,4 +1,4 @@ -/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 15:04:16 +/* This file is generated by tools/stackvm_gen/IsaGen at 2023/7/12 17:07:39 * +08:00. * * Copyright 2019-2021 Canaan Inc. @@ -21,6 +21,7 @@ result visit(const tensor_batch_to_space_op_t &op) noexcept override; result visit(const tensor_binary_op_t &op) noexcept override; result visit(const tensor_bitcast_op_t &op) noexcept override; result visit(const tensor_broadcast_op_t &op) noexcept override; +result visit(const tensor_broadcast_shape_op_t &op) noexcept override; result visit(const tensor_bucket_pad_op_t &op) noexcept override; result visit(const tensor_cast_op_t &op) noexcept override; result visit(const tensor_celu_op_t &op) noexcept override; @@ -30,7 +31,10 @@ result visit(const tensor_concat_op_t &op) noexcept override; result visit(const tensor_condition_op_t &op) noexcept override; result visit(const tensor_constant_of_shape_op_t &op) noexcept override; result visit(const tensor_conv2d_op_t &op) noexcept override; +result visit(const tensor_conv2d_shape_op_t &op) noexcept override; result visit(const tensor_conv2d_transpose_op_t &op) noexcept override; +result +visit(const tensor_conv2d_transpose_shape_op_t &op) noexcept override; result visit(const tensor_cum_sum_op_t &op) noexcept override; result visit(const tensor_dequantize_op_t &op) noexcept override; result visit(const tensor_elu_op_t &op) noexcept override; @@ -59,6 +63,7 @@ result visit(const tensor_lp_normalization_op_t &op) noexcept override; result visit(const tensor_lrn_op_t &op) noexcept override; result visit(const tensor_lstm_op_t &op) noexcept override; result visit(const tensor_mat_mul_op_t &op) noexcept override; +result visit(const tensor_mat_mul_shape_op_t &op) noexcept override; result visit(const tensor_normal_op_t &op) noexcept override; result visit(const tensor_normal_like_op_t &op) noexcept override; result visit(const tensor_one_hot_op_t &op) noexcept override; diff --git a/src/Nncase.Compiler/Compiler.cs b/src/Nncase.Compiler/Compiler.cs index db653f4962..74eccd2190 100644 --- a/src/Nncase.Compiler/Compiler.cs +++ b/src/Nncase.Compiler/Compiler.cs @@ -2,6 +2,7 @@ // Licensed under the Apache license. See LICENSE file in the project root for full license information. using System.Security.AccessControl; +using System.Text.Json; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; @@ -11,13 +12,16 @@ using Nncase.Evaluator; using Nncase.Hosting; using Nncase.IR; +using Nncase.IR.Tensors; using Nncase.Passes; using Nncase.Passes.Mutators; using Nncase.Passes.Rules.Lower; using Nncase.Passes.Rules.Neutral; +using Nncase.Passes.Rules.ShapeBucket; using Nncase.Passes.Rules.ShapeExpr; using Nncase.Passes.Transforms; using Nncase.Quantization; +using static Nncase.Passes.Rules.ShapeBucket.ShapeBucketRegister; using FoldConstCall = Nncase.Passes.Rules.Neutral.FoldConstCall; namespace Nncase.Compiler; @@ -88,11 +92,16 @@ public void AddPreAndPostProcess(IPassManager passManager) public void TargetIndependentPass(IPassManager passManager) { - var quantMode = _compileSession.CompileOptions.QuantizeOptions.ModelQuantMode; + passManager.AddWithName("ReshapeMatMul").Configure(p => + { + p.Add(); + }); + passManager.AddWithName("SqueezeShape").Configure(p => { p.Add(); p.Add(); + p.Add(); p.Add(); p.Add(); p.Add(); @@ -148,6 +157,14 @@ public void TargetIndependentPass(IPassManager passManager) p.Add(); }); + _compileSession.Target.RegisterTargetInDependentPass(passManager, _compileSession.CompileOptions); + + passManager.AddWithName("BroadcastMarker").Configure(p => + { + p.Add(); + p.Add(); + }); + // passManager.AddWithName("NeutralOptimizeClamp").Configure(p => // { // p.Add(); @@ -158,85 +175,29 @@ public void TargetIndependentPass(IPassManager passManager) // p.Add(); // p.Add(); // }); - _compileSession.Target.RegisterTargetInDependentPass(passManager, _compileSession.CompileOptions); - - if (quantMode == ModelQuantMode.UsePTQ) - { - passManager.AddWithName("AddRangeOfMarker").Configure(p => - { - p.Add(); - }); - passManager.AddWithName("AssignRanges"); - } } public void RegisterShapeBucket(IPassManager p) { - void MergeOp(IPassManager iPassManager) - { - iPassManager.AddWithName("MergeCallToFusion").Configure(c => - { - c.Add(); - c.Add(); - }); - } - - if (!_compileSession.CompileOptions.ShapeBucketOptions.Enable) + var options = _compileSession.CompileOptions.ShapeBucketOptions; + var singleVar = options.VarMap.Values.SelectMany(x => x).OfType().ToHashSet().Count <= 1; + if (!options.Enable) { return; } - p.AddWithName("ToFusion").Configure(c => - { - c.Add(); - c.Add(); - c.Add(); - }); - + CheckShapeBucketOptions(options); + ToFusion(p); MergeOp(p); - - var singleVar = _compileSession.CompileOptions.ShapeBucketOptions.VarMap.Values.OfType().ToHashSet().Count <= 1; - p.AddWithName("LostToFusion").Configure(c => - { - c.Add(); - c.Add(); - c.Add(); - c.Add(); - if (singleVar) - { - c.Add(); - } - }); - + LostToFusion(p, singleVar); MergeOp(p); + ClearMarker(p); - if (singleVar) - { - p.AddWithName("ClearSomeMarker").Configure(p => - { - p.Add(); - }); - p.AddWithName("MergeFusion"); - } + // MergeFusion(p, singleVar); + Bucket(p); - p.AddWithName("FusionBucket").Configure(c => - { - c.Add(); - }); - - p.AddWithName("Simplify").Configure(c => - { - c.Add(); - c.Add(); - c.Add(); - c.Add(); - c.Add(); - c.Add(); - c.Add(); - c.Add(); - c.Add(); - c.Add(); - }); + // Rebuild(p); + Simplify(p); } public void ClearFixShape(IPassManager p) @@ -257,10 +218,13 @@ public async Task CompileAsync() { var target = _compileSession.Target; await RunPassAsync(p => TargetIndependentPass(p), "TargetIndependentPass"); - await RunPassAsync(p => RegisterShapeBucket(p), "ShapeBucket"); - await RunPassAsync( - p => _compileSession.Target.RegisterTargetInDependentPass(p, _compileSession.CompileOptions), - "TargetIndependtPass"); + await RunPassAsync(p => RegisterTargetIndependQuantPass(p), "TargetIndependentQuantPass"); + if (_compileSession.CompileOptions.ShapeBucketOptions.Enable) + { + await RunPassAsync(p => RegisterShapeBucket(p), "ShapeBucket"); + await RunPassAsync(p => TargetIndependentPass(p), "TargetIndependentPass"); + } + await RunPassAsync( p => target.RegisterTargetDependentPass(p, _compileSession.CompileOptions), "TargetDependentPass"); @@ -284,6 +248,19 @@ public void Gencode(Stream output) linkedModel.Serialize(output); } + private void RegisterTargetIndependQuantPass(IPassManager passManager) + { + var quantMode = _compileSession.CompileOptions.QuantizeOptions.ModelQuantMode; + if (quantMode == ModelQuantMode.UsePTQ) + { + passManager.AddWithName("AddRangeOfMarker").Configure(p => + { + p.Add(); + }); + passManager.AddWithName("AssignRanges"); + } + } + private IRModule ImportModel(Stream content) { _module = _compileSession.CompileOptions.InputFormat switch diff --git a/src/Nncase.Core/CompilerServices.cs b/src/Nncase.Core/CompilerServices.cs index 3cbaca536e..5d1bd6cb83 100644 --- a/src/Nncase.Core/CompilerServices.cs +++ b/src/Nncase.Core/CompilerServices.cs @@ -134,7 +134,7 @@ public interface ICompilerServicesProvider /// Evaluate result. Metric EvaluateOpMetric(Op op, IMetricEvaluateContext context); - Expr EvaluateShapeExpr(Expr expr, IReadOnlyDictionary? varsMap); + Expr EvaluateShapeExpr(Expr expr, ShapeExprCache? cache); Expr EvaluateOpShapeExpr(Op expr, IShapeEvaluateContext context); @@ -317,9 +317,9 @@ public static Cost EvaluateOpCost(Op op, ICostEvaluateContext context) return Provider.EvaluateOpCost(op, context); } - public static Expr EvaluateShapeExpr(this Expr expr, IReadOnlyDictionary? varsMap = null) + public static Expr EvaluateShapeExpr(this Expr expr, ShapeExprCache? cache = null) { - return Provider.EvaluateShapeExpr(expr, varsMap); + return Provider.EvaluateShapeExpr(expr, cache); } public static Expr EvaluateOpShapeExpr(Op op, IShapeEvaluateContext context) @@ -626,9 +626,9 @@ public Cost EvaluateOpCost(Op op, ICostEvaluateContext context) public Metric EvaluateOpMetric(Op op, IMetricEvaluateContext context) => _metricEvaluateProvider.EvaluateOpMetric(op, context); /// - public Expr EvaluateShapeExpr(Expr expr, IReadOnlyDictionary? varsMap = null) + public Expr EvaluateShapeExpr(Expr expr, ShapeExprCache? cache = null) { - return _shapeEvaluateProvider.EvaluateShapeExpr(expr, varsMap ?? new Dictionary()); + return _shapeEvaluateProvider.EvaluateShapeExpr(expr, cache ?? ShapeExprCache.Default); } /// diff --git a/src/Nncase.Core/CostModel/Cost.cs b/src/Nncase.Core/CostModel/Cost.cs index 45c6ebcc0b..b989507457 100644 --- a/src/Nncase.Core/CostModel/Cost.cs +++ b/src/Nncase.Core/CostModel/Cost.cs @@ -309,6 +309,14 @@ public static Cost GetReshapeCost() }; } + public static Cost GetShapeExprCost() + { + return new() + { + [CostFactorNames.CPUCycles] = 1, + }; + } + public static Cost GetActivationCost(TensorType ret, uint macPerElement) { return new() diff --git a/src/Nncase.Core/Evaluator/IShapeEvaluateProvider.cs b/src/Nncase.Core/Evaluator/IShapeEvaluateProvider.cs index ee37160c81..367572c61f 100644 --- a/src/Nncase.Core/Evaluator/IShapeEvaluateProvider.cs +++ b/src/Nncase.Core/Evaluator/IShapeEvaluateProvider.cs @@ -19,9 +19,9 @@ public interface IShapeEvaluateProvider /// Evaluate Shape of the expression tree. /// /// Expression. - /// VarMap. + /// VarMap. /// Evaluate result. - Expr EvaluateShapeExpr(Expr expr, IReadOnlyDictionary varMap); + Expr EvaluateShapeExpr(Expr expr, ShapeExprCache cache); /// /// Evaluate Shape of operator. diff --git a/src/Nncase.Core/IR/ShapeExpr/BroadcastShape.cs b/src/Nncase.Core/IR/ShapeExpr/BroadcastShape.cs new file mode 100644 index 0000000000..d319e6b92f --- /dev/null +++ b/src/Nncase.Core/IR/ShapeExpr/BroadcastShape.cs @@ -0,0 +1,11 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using static Nncase.IR.TypePatternUtility; + +namespace Nncase.IR.ShapeExpr; + +public class BroadcastShape : ShapeExprOp +{ + public static readonly ParameterInfo Inputs = new(typeof(BroadcastShape), 0, "inputs"); +} diff --git a/src/Nncase.Core/IR/ShapeExpr/Conv2DShape.cs b/src/Nncase.Core/IR/ShapeExpr/Conv2DShape.cs new file mode 100644 index 0000000000..838476354c --- /dev/null +++ b/src/Nncase.Core/IR/ShapeExpr/Conv2DShape.cs @@ -0,0 +1,39 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using static Nncase.IR.TypePatternUtility; + +namespace Nncase.IR.ShapeExpr; + +public class Conv2DShape : ShapeExprOp +{ + /// + /// Gets input. + /// + public static readonly ParameterInfo Input = new(typeof(Conv2DShape), 0, "input"); + + /// + /// Gets Weights. + /// + public static readonly ParameterInfo Weights = new(typeof(Conv2DShape), 1, "weights"); + + /// + /// Gets Padding. + /// + public static readonly ParameterInfo Padding = new(typeof(Conv2DShape), 2, "padding", HasRank(2) & IsIntegral()); + + /// + /// Gets Stride. + /// + public static readonly ParameterInfo Stride = new(typeof(Conv2DShape), 3, "stride", HasRank(1) & IsIntegral()); + + /// + /// Gets Dilation. + /// + public static readonly ParameterInfo Dilation = new(typeof(Conv2DShape), 4, "dilation", HasRank(1) & IsIntegral()); + + /// + /// Gets Groups. + /// + public static readonly ParameterInfo Groups = new(typeof(Conv2DShape), 5, "groups", IsScalar() & IsIntegral()); +} diff --git a/src/Nncase.Core/IR/ShapeExpr/Conv2DTransposeShape.cs b/src/Nncase.Core/IR/ShapeExpr/Conv2DTransposeShape.cs new file mode 100644 index 0000000000..5807b348e3 --- /dev/null +++ b/src/Nncase.Core/IR/ShapeExpr/Conv2DTransposeShape.cs @@ -0,0 +1,42 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +namespace Nncase.IR.ShapeExpr; + +public class Conv2DTransposeShape : ShapeExprOp +{ + /// + /// Gets input. + /// + public static readonly ParameterInfo Input = new(typeof(Conv2DTransposeShape), 0, "input"); + + /// + /// Gets Weights. + /// + public static readonly ParameterInfo Weights = new(typeof(Conv2DTransposeShape), 1, "weights"); + + /// + /// Gets Stride. + /// + public static readonly ParameterInfo Stride = new(typeof(Conv2DTransposeShape), 2, "stride"); + + /// + /// Gets Dilation. + /// + public static readonly ParameterInfo Dilation = new(typeof(Conv2DTransposeShape), 3, "dilation"); + + /// + /// Gets Padding. + /// + public static readonly ParameterInfo Padding = new(typeof(Conv2DTransposeShape), 4, "padding"); + + /// + /// Gets Output Padding. + /// + public static readonly ParameterInfo OutputPadding = new(typeof(Conv2DTransposeShape), 5, "output_padding"); + + /// + /// Gets Groups. + /// + public static readonly ParameterInfo Groups = new(typeof(Conv2DTransposeShape), 6, "groups"); +} diff --git a/src/Nncase.Core/IR/ShapeExpr/Functional.cs b/src/Nncase.Core/IR/ShapeExpr/Functional.cs new file mode 100644 index 0000000000..8631e5e26f --- /dev/null +++ b/src/Nncase.Core/IR/ShapeExpr/Functional.cs @@ -0,0 +1,17 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using Nncase.IR.ShapeExpr; + +namespace Nncase.IR.F; + +public static class ShapeExpr +{ + public static Call BroadcastShape(Expr[] inputs) => new(new BroadcastShape(), new IR.Tuple(inputs)); + + public static Call Conv2DShape(Expr input, Expr weights, Expr padding, Expr stride, Expr dilation, Expr groups) => new(new Conv2DShape(), input, weights, padding, stride, dilation, groups); + + public static Call Conv2DTransposeShape(Expr input, Expr weights, Expr stride, Expr dilation, Expr padding, Expr outputPadding, Expr groups) => new(new Conv2DTransposeShape(), input, weights, stride, dilation, padding, outputPadding, groups); + + public static Call MatMulShape(Expr lhs, Expr rhs) => new(new MatMulShape(), lhs, rhs); +} diff --git a/src/Nncase.Core/IR/ShapeExpr/MatMulShape.cs b/src/Nncase.Core/IR/ShapeExpr/MatMulShape.cs new file mode 100644 index 0000000000..4468ef15d4 --- /dev/null +++ b/src/Nncase.Core/IR/ShapeExpr/MatMulShape.cs @@ -0,0 +1,13 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using static Nncase.IR.TypePatternUtility; + +namespace Nncase.IR.ShapeExpr; + +public class MatMulShape : ShapeExprOp +{ + public static readonly ParameterInfo Lhs = new(typeof(MatMulShape), 0, "lhs"); + + public static readonly ParameterInfo Rhs = new(typeof(MatMulShape), 1, "rhs"); +} diff --git a/src/Nncase.Core/IR/ShapeExpr/ShapeExprOp.cs b/src/Nncase.Core/IR/ShapeExpr/ShapeExprOp.cs new file mode 100644 index 0000000000..69b2fa4a6c --- /dev/null +++ b/src/Nncase.Core/IR/ShapeExpr/ShapeExprOp.cs @@ -0,0 +1,8 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +namespace Nncase.IR.ShapeExpr; + +public abstract class ShapeExprOp : Op +{ +} diff --git a/src/Nncase.Core/IR/Tensors/Functional.cs b/src/Nncase.Core/IR/Tensors/Functional.cs index 303143d801..71c0bfc51c 100644 --- a/src/Nncase.Core/IR/Tensors/Functional.cs +++ b/src/Nncase.Core/IR/Tensors/Functional.cs @@ -22,9 +22,43 @@ public static class Tensors { public static Call Transpose(Expr input, Expr perm) => new Call(new Transpose(), input, perm); - public static Expr NHWCToNCHW(Expr input) => Transpose(input, new[] { 0, 3, 1, 2 }); + public static Expr NHWCToNCHW(Expr input) + { + int[] perm; + if (input.CheckedShape.Rank == 4) + { + perm = new[] { 0, 3, 1, 2 }; + } + else if (input.CheckedShape.Rank == 3) + { + perm = new[] { 0, 2, 1 }; + } + else + { + throw new InvalidOperationException(); + } + + return Transpose(input, perm); + } - public static Expr NCHWToNHWC(Expr input) => Transpose(input, new[] { 0, 2, 3, 1 }); + public static Expr NCHWToNHWC(Expr input) + { + int[] perm; + if (input.CheckedShape.Rank == 4) + { + perm = new[] { 0, 2, 3, 1 }; + } + else if (input.CheckedShape.Rank == 3) + { + perm = new[] { 0, 2, 1 }; + } + else + { + throw new InvalidOperationException(); + } + + return Transpose(input, perm); + } public static Expr NHWCToWNCH(Expr input) => Transpose(input, new[] { 2, 0, 3, 1 }); diff --git a/src/Nncase.Core/IR/Util.cs b/src/Nncase.Core/IR/Util.cs index cfd91b7148..1a7929d0f3 100644 --- a/src/Nncase.Core/IR/Util.cs +++ b/src/Nncase.Core/IR/Util.cs @@ -3,6 +3,7 @@ using System.Runtime.InteropServices; using static Nncase.IR.F.Tensors; +using Cast = Nncase.IR.Tensors.Cast; namespace Nncase.IR { @@ -32,14 +33,18 @@ public static Expr GetWindowedOutputSize(Expr size, Expr filter, Expr stride, Ex return same ? trueBranch : falseBranch; } - public static Expr GetConvTransposeOutputShape(Expr input, Expr weights, Expr strides, Expr outPadding, Expr paddings, Expr dilations, string autoPad, Expr group) + public static Expr GetConvTransposeOutputShape(Expr inShape, Expr wShape, Expr strides, Expr outPadding, Expr paddings, Expr dilations, string autoPad, Expr group) { - var inShape = ShapeOf(input); + inShape = Cast(inShape, DataTypes.Int64); + wShape = Cast(wShape, DataTypes.Int64); + strides = Cast(strides, DataTypes.Int64); + dilations = Cast(dilations, DataTypes.Int64); + paddings = Cast(paddings, DataTypes.Int64); + outPadding = Cast(outPadding, DataTypes.Int64); var iN = inShape[0]; _ = inShape[1]; var iH = inShape[2]; var iW = inShape[3]; - var wShape = ShapeOf(weights); var oc = wShape[0] * group; var wH = wShape[2]; var wW = wShape[3]; diff --git a/src/Nncase.Core/ShapeExprCache.cs b/src/Nncase.Core/ShapeExprCache.cs new file mode 100644 index 0000000000..b0daaba988 --- /dev/null +++ b/src/Nncase.Core/ShapeExprCache.cs @@ -0,0 +1,36 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using Nncase.IR; + +namespace Nncase; + +public class ShapeExprCache +{ + public ShapeExprCache(IReadOnlyDictionary varMap, Dictionary? cache = null) + { + VarMap = varMap; + Cache = cache ?? new(); + } + + public static ShapeExprCache Default => new(new Dictionary(), new()); + + public Dictionary Cache { get; set; } + + public IReadOnlyDictionary VarMap { get; set; } + + public static implicit operator ShapeExprCache(Dictionary varMap) => new(varMap); + + public static ShapeExprCache operator +(ShapeExprCache cache, Dictionary varMap) + { + var newVarMap = cache.VarMap.Concat(varMap).ToDictionary(pair => pair.Key, pair => pair.Value); + return new ShapeExprCache(newVarMap, cache.Cache); + } + + public static ShapeExprCache operator +(Dictionary varMap, ShapeExprCache cache) => cache + varMap; + + public void Add(Expr expr, Expr shape) + { + Cache[expr] = shape; + } +} diff --git a/src/Nncase.Core/Utilities/ReplaceUtility.cs b/src/Nncase.Core/Utilities/ReplaceUtility.cs index 3199652e21..c07cd9850e 100644 --- a/src/Nncase.Core/Utilities/ReplaceUtility.cs +++ b/src/Nncase.Core/Utilities/ReplaceUtility.cs @@ -4,6 +4,8 @@ using System.Collections.Immutable; using System.Linq; using System.Reactive; +using DryIoc.ImTools; +using Microsoft.Toolkit.HighPerformance; using NetFabric.Hyperlinq; using Nncase.IR; using Fx = System.Func; @@ -95,6 +97,11 @@ public static Call ReplaceCallParams(Expr target, IReadOnlyList oldParams, return new Call(target, ReplaceItems(oldParams, pairs)); } + public static Call ReplaceCallParams(Call call, params (int, Expr)[] pairs) + { + return new Call(call.Target, ReplaceItems(call.Arguments.ToArray(), pairs)); + } + /// /// replace the call params with parameter info. /// @@ -117,6 +124,11 @@ public static Call ReplaceCallParams(Expr target, IReadOnlyList oldParams, public static Call ReplaceCallFirstParam(Expr target, IReadOnlyList oldParams, Expr expr) => ReplaceCallParams(target, oldParams, (oldParams[0], expr)); + public static Expr ReplaceCallFirstParam(Call call, Expr expr) + { + return ReplaceCallFirstParam(call.Target, call.Arguments.ToArray(), expr); + } + /// /// Replace target in body with expr. /// @@ -137,4 +149,9 @@ public static Expr ReplaceExpr(Expr body, Expr target, Expr expr) }); return mutator.Visit(body, Unit.Default); } + + public static void ReplaceAllUsesWith(Expr expr, Expr newOperand) + { + expr.ReplaceAllUsesWith(newOperand); + } } diff --git a/src/Nncase.Core/Utilities/ShapeExprUtility.cs b/src/Nncase.Core/Utilities/ShapeExprUtility.cs index ef52290dcb..28d953a8ad 100644 --- a/src/Nncase.Core/Utilities/ShapeExprUtility.cs +++ b/src/Nncase.Core/Utilities/ShapeExprUtility.cs @@ -13,10 +13,7 @@ public static class ShapeExprUtility { public static Expr BroadcastShape(Expr lhsShape, params Expr[] rhsShape) { - var tmpTensor = new[] { ConstantOfShape(lhsShape, 0) } - .Concat(rhsShape) - .Aggregate((sum, shape) => ConstantOfShape(shape, 0) * sum); - return Cast(IR.F.Tensors.ShapeOf(tmpTensor), DataTypes.Int32); + return IR.F.ShapeExpr.BroadcastShape(new[] { lhsShape }.Concat(rhsShape).ToArray()); } public static Expr Positive(Expr axis, Expr inShape) @@ -43,6 +40,11 @@ public static Expr Replace(Expr shapeExpr, Expr index, Expr value) public static Expr Insert(Expr shapeExpr, Expr index, Expr value) { + if (shapeExpr.CheckedShape.IsScalar) + { + return SliceAndMerge(StackScalar(shapeExpr), index, value, 0); + } + return SliceAndMerge(shapeExpr, index, value, 0); } @@ -58,18 +60,22 @@ public static Expr Remove(Expr shapeExpr, Expr index) return Concat(new IR.Tuple(front, last), 0); } - public static Expr ShapeOf(Expr expr) => expr.EvaluateShapeExpr(); + public static Expr StackOne(Expr expr) + { + return Stack(new IR.Tuple(expr), 0); + } private static Expr SliceAndMerge(Expr shapeExpr, Expr index, Expr value, Expr indexOffset, bool valueIsList = true) { var front = Slice(shapeExpr, 0, index); var last = Slice(shapeExpr, Cast(index, DataTypes.Int32) + indexOffset, int.MaxValue); - return Concat(new IR.Tuple(front, valueIsList ? StackOne(value) : value, last), 0); - } + var c = valueIsList ? StackOne(value) : value; + if (c.CheckedShape.IsScalar) + { + c = StackOne(c); + } - private static Expr StackOne(Expr expr) - { - return Stack(new IR.Tuple(expr), 0); + return Concat(new IR.Tuple(front, c, last), 0); } private static Expr CheckShape(Expr shape) diff --git a/src/Nncase.Diagnostics/Diagnostics/CSharpPrintVisitor.cs b/src/Nncase.Diagnostics/Diagnostics/CSharpPrintVisitor.cs index 2c5501da75..1aac1d12be 100644 --- a/src/Nncase.Diagnostics/Diagnostics/CSharpPrintVisitor.cs +++ b/src/Nncase.Diagnostics/Diagnostics/CSharpPrintVisitor.cs @@ -78,7 +78,7 @@ public override string VisitType(CallableType type) => public override string VisitType(TensorType type) => type.DType switch { PrimType ptype => ptype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()), - PointerType { ElemType: PrimType etype } ptype => $"*{etype.GetDisplayName()}", + PointerType { ElemType: PrimType etype } => $"*{etype.GetDisplayName()}", ValueType => $"{type.DType.ToString()}", _ => throw new NotSupportedException(type.DType.GetType().Name), }; diff --git a/src/Nncase.Diagnostics/Diagnostics/ILPrintVisitor.cs b/src/Nncase.Diagnostics/Diagnostics/ILPrintVisitor.cs index ce1860626a..93fa794679 100644 --- a/src/Nncase.Diagnostics/Diagnostics/ILPrintVisitor.cs +++ b/src/Nncase.Diagnostics/Diagnostics/ILPrintVisitor.cs @@ -265,7 +265,7 @@ public override string VisitType(CallableType type) => public override string VisitType(TensorType type) => type.DType switch { PrimType ptype => ptype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()), - PointerType { ElemType: PrimType etype } ptype => $"*{etype.GetDisplayName()}", + PointerType { ElemType: PrimType etype } => $"*{etype.GetDisplayName()}", ValueType => $"{type.DType.ToString()}", _ => throw new NotSupportedException(type.DType.GetType().Name), }; diff --git a/src/Nncase.Diagnostics/Diagnostics/ScriptPrintVisitor.cs b/src/Nncase.Diagnostics/Diagnostics/ScriptPrintVisitor.cs index 21047b326d..62dac6f69c 100644 --- a/src/Nncase.Diagnostics/Diagnostics/ScriptPrintVisitor.cs +++ b/src/Nncase.Diagnostics/Diagnostics/ScriptPrintVisitor.cs @@ -129,7 +129,7 @@ public ScriptPrintVisitor(TextWriter textWriter, bool display_callable) public override string VisitType(TensorType type) => type.DType switch { PrimType ptype => ptype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()), - PointerType { ElemType: PrimType etype } ptype => $"*{etype.GetDisplayName()}", + PointerType { ElemType: PrimType etype } => $"*{etype.GetDisplayName()}", ValueType vtype => vtype.GetDisplayName() + (type.Shape.IsScalar ? string.Empty : type.Shape.ToString()), _ => throw new NotSupportedException(type.DType.GetType().Name), }; diff --git a/src/Nncase.Evaluator/CostEvaluateProvider.cs b/src/Nncase.Evaluator/CostEvaluateProvider.cs index a055a0479d..06bea9dc06 100644 --- a/src/Nncase.Evaluator/CostEvaluateProvider.cs +++ b/src/Nncase.Evaluator/CostEvaluateProvider.cs @@ -31,7 +31,7 @@ public Cost EvaluateCost(Expr expr) if (expr.CheckedType is InvalidType) { - throw new InvalidOperationException("Expr in Evaluator need a valid type"); + throw new InvalidOperationException("Expr in Cost Evaluator need a valid type"); } var evaluatorVisitor = new CostEvaluateVisitor(); diff --git a/src/Nncase.Evaluator/EvaluateProvider.cs b/src/Nncase.Evaluator/EvaluateProvider.cs index 3bee6d15d4..02e7f8b372 100644 --- a/src/Nncase.Evaluator/EvaluateProvider.cs +++ b/src/Nncase.Evaluator/EvaluateProvider.cs @@ -31,6 +31,11 @@ public IValue Evaluate(Expr expr, IReadOnlyDictionary? varsValues = if (expr.CheckedType is InvalidType) { + if (DumpScope.Current.IsEnabled(DumpFlags.Compile)) + { + DumpScope.Current.DumpIR(expr, "EvaluateInvalid"); + } + throw new InvalidOperationException("Expr in Evaluator need a valid type"); } diff --git a/src/Nncase.Evaluator/EvaluatorApplicationPart.cs b/src/Nncase.Evaluator/EvaluatorApplicationPart.cs index 28a2940c38..09bcddc39b 100644 --- a/src/Nncase.Evaluator/EvaluatorApplicationPart.cs +++ b/src/Nncase.Evaluator/EvaluatorApplicationPart.cs @@ -15,6 +15,7 @@ using Nncase.Evaluator.NN; using Nncase.Evaluator.Random; using Nncase.Evaluator.RNN; +using Nncase.Evaluator.ShapeExpr; using Nncase.Evaluator.TIR; using Nncase.Hosting; @@ -40,6 +41,7 @@ public static IRegistrator AddEvaluator(this IRegistrator registrator) .RegisterModule() .RegisterModule() .RegisterModule() + .RegisterModule() .RegisterModule(); } } diff --git a/src/Nncase.Evaluator/EvaluatorUtil.cs b/src/Nncase.Evaluator/EvaluatorUtil.cs index 54922972b0..e216b8ebe9 100644 --- a/src/Nncase.Evaluator/EvaluatorUtil.cs +++ b/src/Nncase.Evaluator/EvaluatorUtil.cs @@ -2,7 +2,9 @@ // Licensed under the Apache license. See LICENSE file in the project root for full license information. using System; +using System.Collections.Generic; using NetFabric.Hyperlinq; +using Nncase.IR; using OrtKISharp; using static Nncase.IR.F.Tensors; @@ -23,4 +25,11 @@ public static long[] ToOnnxPadFormat(OrtKISharp.Tensor pads) // note the pads will be int or long, need cast to long return OrtKI.Transpose(pads.Cast(OrtDataType.Int64), new long[] { 1, 0 }).ToArray(); } + + public static Dictionary GetMemo(Expr input, Dictionary varValues) + { + var visitor = new EvaluateVisitor(varValues, new()); + visitor.Visit(input); + return visitor.ExprMemo; + } } diff --git a/src/Nncase.Evaluator/Math/Binary.cs b/src/Nncase.Evaluator/Math/Binary.cs index f5f9d3c65b..0b5515ca28 100755 --- a/src/Nncase.Evaluator/Math/Binary.cs +++ b/src/Nncase.Evaluator/Math/Binary.cs @@ -118,7 +118,7 @@ public Expr Visit(IShapeEvaluateContext context, Binary target) { var lhs = context.GetArgumentShape(target, Binary.Lhs); var rhs = context.GetArgumentShape(target, Binary.Rhs); - return ShapeExprUtility.BroadcastShape(lhs, rhs); + return IR.F.Tensors.Cast(ShapeExprUtility.BroadcastShape(lhs, rhs), DataTypes.Int32); } private int Compute(BinaryOp op, int a, int b) => op switch diff --git a/src/Nncase.Evaluator/Math/MatMul.cs b/src/Nncase.Evaluator/Math/MatMul.cs index 760ac34619..444d221c28 100644 --- a/src/Nncase.Evaluator/Math/MatMul.cs +++ b/src/Nncase.Evaluator/Math/MatMul.cs @@ -69,32 +69,9 @@ public Metric Visit(IMetricEvaluateContext context, MatMul target) public Expr Visit(IShapeEvaluateContext context, MatMul target) { - var lhsRank = context.GetArgument(target, MatMul.Lhs).CheckedShape.Rank; - var rhsRank = context.GetArgument(target, MatMul.Rhs).CheckedShape.Rank; - var lhsShape = context.GetArgumentShape(target, MatMul.Lhs); - var rhsShape = context.GetArgumentShape(target, MatMul.Rhs); - - Expr lhs, rhs; - Expr front; - if (lhsRank == rhsRank) - { - lhs = ShapeExprUtility.Slice(lhsShape, 0, lhsRank - 2); - rhs = ShapeExprUtility.Slice(rhsShape, 0, rhsRank - 2); - front = IR.F.Math.Max(lhs, rhs); - } - else if (lhsRank > rhsRank) - { - lhs = ShapeExprUtility.Slice(lhsShape, 0, lhsRank - 2); - front = lhs; - } - else - { - lhs = Enumerable.Repeat(1, rhsRank - lhsRank).ToArray(); - front = lhs; - } - - var end = Stack(new IR.Tuple(lhsShape[lhsRank - 2], rhsShape[rhsRank - 1]), 0); - return Concat(new IR.Tuple(front, end), 0); + var lhs = context.GetArgumentShape(target, MatMul.Lhs); + var rhs = context.GetArgumentShape(target, MatMul.Rhs); + return Cast(IR.F.ShapeExpr.MatMulShape(lhs, rhs), DataTypes.Int32); } private IRType Visit(TensorType lhs, TensorType rhs) diff --git a/src/Nncase.Evaluator/Math/Reduce.cs b/src/Nncase.Evaluator/Math/Reduce.cs index 1506b12fc9..69e512b717 100644 --- a/src/Nncase.Evaluator/Math/Reduce.cs +++ b/src/Nncase.Evaluator/Math/Reduce.cs @@ -102,9 +102,18 @@ public Expr Visit(IShapeEvaluateContext context, Reduce target) axis is TensorConst axisValue) { var outShape = context.GetArgumentShape(target, Reduce.Input); + var input = context.GetArgument(target, Reduce.Input); var inShape = context.GetArgumentShape(target, Reduce.Input); var axes = axisValue.Value.Cast(); var keepDimsValue = keepDimsV.Value.ToScalar(); + if (input.CheckedShape.IsRanked) + { + if (axes.Length == input.CheckedShape.Count && keepDimsValue == 0) + { + return Array.Empty(); + } + } + foreach (var axValue in axes) { var ax = ShapeExprUtility.Positive(axValue, inShape); diff --git a/src/Nncase.Evaluator/Math/Require.cs b/src/Nncase.Evaluator/Math/Require.cs index bbac242bdd..e286fcdabf 100644 --- a/src/Nncase.Evaluator/Math/Require.cs +++ b/src/Nncase.Evaluator/Math/Require.cs @@ -38,6 +38,11 @@ public Expr Visit(IShapeEvaluateContext context, Require target) => public Metric Visit(IMetricEvaluateContext context, Require target) => Metric.Zero; + public IValue Visit(IEvaluateContext context, Require target) + { + return context.GetArgumentValue(target, Require.Value); + } + private IValue Visit(bool predicate, IValue value, Require target) { if (!predicate) diff --git a/src/Nncase.Evaluator/Math/Unary.cs b/src/Nncase.Evaluator/Math/Unary.cs index cf4fab0735..64c3bfbfbf 100644 --- a/src/Nncase.Evaluator/Math/Unary.cs +++ b/src/Nncase.Evaluator/Math/Unary.cs @@ -43,7 +43,7 @@ public IValue Visit(IEvaluateContext context, Unary unary) UnaryOp.Cos => OrtKI.Cos(input), UnaryOp.Cosh => OrtKI.Cosh(input), UnaryOp.Exp => OrtKI.Exp(input), - UnaryOp.Floor => OrtKI.Floor(input), + UnaryOp.Floor => OrtKI.Floor(input.Cast(OrtDataType.Float)).Cast(input.DataType), UnaryOp.Log => OrtKI.Log(input), UnaryOp.Neg => OrtKI.Neg(input), UnaryOp.Round => OrtKI.Round(input), diff --git a/src/Nncase.Evaluator/MetricEvaluateProvider.cs b/src/Nncase.Evaluator/MetricEvaluateProvider.cs index ed6df77b82..6709059a10 100644 --- a/src/Nncase.Evaluator/MetricEvaluateProvider.cs +++ b/src/Nncase.Evaluator/MetricEvaluateProvider.cs @@ -30,7 +30,7 @@ public Dictionary EvaluateMetric(Expr expr) if (expr.CheckedType is InvalidType) { - throw new InvalidOperationException("Expr in Evaluator need a valid type"); + throw new InvalidOperationException("Expr in Metric Evaluator need a valid type"); } var evaluatorVisitor = new MetricEvaluateVisitor(); diff --git a/src/Nncase.Evaluator/NN/BatchToSpace.cs b/src/Nncase.Evaluator/NN/BatchToSpace.cs index 7e070e0c83..d60473b41a 100644 --- a/src/Nncase.Evaluator/NN/BatchToSpace.cs +++ b/src/Nncase.Evaluator/NN/BatchToSpace.cs @@ -4,11 +4,13 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Drawing; using System.Linq; using Nncase.CostModel; using Nncase.IR; using Nncase.IR.NN; using Nncase.IR.Tensors; +using Nncase.Utilities; using OrtKISharp; using static Nncase.IR.F.Tensors; @@ -17,15 +19,15 @@ namespace Nncase.Evaluator.NN; /// /// Evaluator for . /// -public class BatchToSpaceEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IMetricEvaluator +public class BatchToSpaceEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IMetricEvaluator, IShapeEvaluator { /// public IValue Visit(IEvaluateContext context, BatchToSpace s) { - var input = context.GetOrtArgumentValue(s, BatchToSpace.Input); + var input = context.GetArgumentValue(s, BatchToSpace.Input); // to nhwc - var input0 = OrtKI.Transpose(input, new long[] { 0, 2, 3, 1 }); + var input0 = NCHWToNHWC(input.AsTensor()).Evaluate().AsTensor().ToOrtTensor(); var blockShape = context.GetArgumentValueAsArray(s, BatchToSpace.BlockShape); var crop = context.GetOrtArgumentValue(s, BatchToSpace.Crops).Cast(OrtDataType.Int32); @@ -65,8 +67,8 @@ public IValue Visit(IEvaluateContext context, BatchToSpace s) var result = OrtKI.Slice(x3, cropStart, endRange, axesConst, strideConst); // to nchw - var transposeResult = OrtKI.Transpose(result, new long[] { 0, 3, 1, 2 }); - return transposeResult.ToValue(); + var transposeResult = NHWCToNCHW(result.ToTensor()).Evaluate(); + return transposeResult; } /// @@ -99,6 +101,53 @@ public Metric Visit(IMetricEvaluateContext context, BatchToSpace target) }; } + public Expr Visit(IShapeEvaluateContext context, BatchToSpace target) + { + var inShape = context.GetArgumentShape(target, BatchToSpace.Input); + var input = context.GetArgument(target, BatchToSpace.Input); + if (input.CheckedShape.Rank == 4) + { + inShape = Stack(new IR.Tuple(inShape[0], inShape[2], inShape[3], inShape[1]), 0); + } + + if (input.CheckedShape.Rank == 3) + { + inShape = Stack(new IR.Tuple(inShape[0], inShape[2], inShape[1]), 0); + } + + var blockShape = context.GetArgument(target, BatchToSpace.BlockShape); + if (!blockShape.CheckedShape.IsFixed) + { + throw new NotImplementedException(); + } + + var crops = context.GetArgument(target, BatchToSpace.Crops); + var blockSize = Prod(blockShape); + var batch = inShape[0]; + var d0 = batch / blockSize; + var m = blockShape.CheckedShape[0].FixedValue; + var cropSection = Enumerable.Range(0, m).Select( + i => (inShape[i + 1] * blockShape[0]) - crops[i, 0] - crops[i, 1]).ToArray(); + + var inRank = Cast(ShapeOf(inShape)[0], DataTypes.Int32); + var remainSize = inRank - 1 - m; + var remainShape = new If(remainSize > 0, ShapeExprUtility.Slice(inShape, 1 + m, int.MaxValue), Array.Empty()); + + var outShapeList = Concat(new IR.Tuple(Stack(new IR.Tuple(new[] { d0 }), 0), Stack(new IR.Tuple(cropSection), 0), remainShape), 0); + + if (input.CheckedShape.Rank == 4) + { + return Stack(new IR.Tuple(outShapeList[0], outShapeList[3], outShapeList[1], outShapeList[2]), 0); + } + + if (input.CheckedShape.Rank == 3) + { + return Stack(new IR.Tuple(outShapeList[0], outShapeList[2], outShapeList[1]), 0); + } + + throw new NotImplementedException(); + } + private static IEnumerable BoostRange(int start, int end, int step = 1) { int x = start; @@ -135,8 +184,9 @@ private long[] GetPerm(int xLen, int blockLen) private IRType Visit(ITypeInferenceContext context, BatchToSpace target, TensorType input, TensorType blockShape, TensorType crops) { - // todo: - var inShape = TypeInference.ApplyPerm(input.Shape, new[] { 0, 2, 3, 1 }); + var inShape = input.Shape.Rank == 4 + ? TypeInference.ApplyPerm(input.Shape, new[] { 0, 2, 3, 1 }) + : TypeInference.ApplyPerm(input.Shape, new[] { 0, 2, 1 }); var batch = inShape[0]; if (context.GetArgument(target, BatchToSpace.BlockShape) is TensorConst blockShapeValue && context.GetArgument(target, BatchToSpace.Crops) is TensorConst cropsValue) @@ -158,12 +208,15 @@ private IRType Visit(ITypeInferenceContext context, BatchToSpace target, TensorT var remainSize = inShape.Rank - 1 - m; var remainShape = remainSize > 0 ? inShape.Skip(1 + m) : Array.Empty(); var outShapeList = new[] { d0 }.Concat(cropSection).Concat(remainShape).ToArray(); - var outShape = TypeInference.ApplyPerm(outShapeList, new[] { 0, 3, 1, 2 }); + var outShape = + outShapeList.Length == 4 + ? TypeInference.ApplyPerm(outShapeList, new[] { 0, 3, 1, 2 }) + : TypeInference.ApplyPerm(outShapeList, new[] { 0, 2, 1 }); return input with { Shape = outShape }; } else { - return new InvalidType("BatchToSpace can't infer shape with dynamic crops"); + return new TensorType(input.DType, Enumerable.Repeat(Dimension.Unknown, input.Shape.Count).ToArray()); } } } diff --git a/src/Nncase.Evaluator/NN/Conv2D.cs b/src/Nncase.Evaluator/NN/Conv2D.cs index c46f22c3c9..67f361adc8 100644 --- a/src/Nncase.Evaluator/NN/Conv2D.cs +++ b/src/Nncase.Evaluator/NN/Conv2D.cs @@ -90,16 +90,13 @@ public Metric Visit(IMetricEvaluateContext context, Conv2D target) public Expr Visit(IShapeEvaluateContext context, Conv2D target) { - var inShape = context.GetArgumentShape(target, Conv2D.Input); - var wShape = context.GetArgumentShape(target, Conv2D.Weights); + var input = context.GetArgumentShape(target, Conv2D.Input); + var weights = context.GetArgumentShape(target, Conv2D.Weights); var pad = Cast(context.GetArgument(target, Conv2D.Padding), DataTypes.Int32); var stride = Cast(context.GetArgument(target, Conv2D.Stride), DataTypes.Int32); var dilation = Cast(context.GetArgument(target, Conv2D.Dilation), DataTypes.Int32); - var n = inShape[0]; - var oc = wShape[0]; - var h = Util.GetWindowedOutputSize(inShape[2] + pad[0, 0] + pad[0, 1], wShape[2], stride[0], dilation[0], false, false); - var w = Util.GetWindowedOutputSize(inShape[3] + pad[1, 0] + pad[1, 1], wShape[3], stride[1], dilation[1], false, false); - return Stack(new IR.Tuple(n, oc, h, w), 0); + var groups = Cast(context.GetArgument(target, Conv2D.Groups), DataTypes.Int32); + return IR.F.ShapeExpr.Conv2DShape(input, weights, pad, stride, dilation, groups); } private IRType Visit(ITypeInferenceContext context, Conv2D target, TensorType input, TensorType weights) diff --git a/src/Nncase.Evaluator/NN/Conv2DTranspose.cs b/src/Nncase.Evaluator/NN/Conv2DTranspose.cs index 369a4efbb2..626043680e 100644 --- a/src/Nncase.Evaluator/NN/Conv2DTranspose.cs +++ b/src/Nncase.Evaluator/NN/Conv2DTranspose.cs @@ -93,13 +93,13 @@ public Metric Visit(IMetricEvaluateContext context, Conv2DTranspose target) public Expr Visit(IShapeEvaluateContext context, Conv2DTranspose target) { - var input = context.GetArgument(target, Conv2DTranspose.Input); - var weights = context.GetArgument(target, Conv2DTranspose.Weights); + var input = context.GetArgumentShape(target, Conv2DTranspose.Input); + var weights = context.GetArgumentShape(target, Conv2DTranspose.Weights); var stride = context.GetArgument(target, Conv2DTranspose.Stride); var dilation = context.GetArgument(target, Conv2DTranspose.Dilation); var padding = context.GetArgument(target, Conv2DTranspose.Padding); var outputPadding = context.GetArgument(target, Conv2DTranspose.OutputPadding); var groups = context.GetArgument(target, Conv2DTranspose.Groups); - return Util.GetConvTransposeOutputShape(input, weights, stride, outputPadding, padding, dilation, string.Empty, groups); + return IR.F.ShapeExpr.Conv2DTransposeShape(IR.F.Tensors.Cast(input, DataTypes.Int64), weights, stride, dilation, padding, outputPadding, groups); } } diff --git a/src/Nncase.Evaluator/NN/LayerNorm.cs b/src/Nncase.Evaluator/NN/LayerNorm.cs index eaa7ef2b2f..4088d8f9e4 100644 --- a/src/Nncase.Evaluator/NN/LayerNorm.cs +++ b/src/Nncase.Evaluator/NN/LayerNorm.cs @@ -12,7 +12,8 @@ namespace Nncase.Evaluator.NN; /// /// Evaluator for . /// -public class LayerNormEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IShapeEvaluator, IMetricEvaluator +public class LayerNormEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, + IShapeEvaluator, IMetricEvaluator { /// public IValue Visit(IEvaluateContext context, LayerNorm layerNorm) @@ -77,6 +78,11 @@ private Tensor LayerNormImpl(Tensor input, Tensor scale, Tensor bias, int axis, float[] inputArray = input.ToArray(); float[] outputArray = new float[inputArray.Length]; int[] inShape = input.Shape.ToValueArray(); + if (axis < 0) + { + axis += inShape.Length; + } + for (int i = 0; i < axis; i++) { outputSize *= inShape[i]; @@ -84,14 +90,7 @@ private Tensor LayerNormImpl(Tensor input, Tensor scale, Tensor bias, int axis, for (int i = axis; i < inShape.Length; i++) { - if (i < 0) - { - innerSize *= inShape[^System.Math.Abs(i)]; - } - else - { - innerSize *= inShape[i]; - } + innerSize *= inShape[i]; } for (int batch = 0; batch < outputSize; batch++) @@ -131,7 +130,8 @@ private Tensor LayerNormImpl(Tensor input, Tensor scale, Tensor bias, int axis, for (int i = 0; i < innerSize; i++) { - outputArray[(i + (batch * innerSize)) % outputArray.Length] = (div[i] * scale.ToArray()[i % scale.Length]) + bias.ToArray()[i % bias.Length]; + outputArray[(i + (batch * innerSize)) % outputArray.Length] = + (div[i] * scale.ToArray()[i % scale.Length]) + bias.ToArray()[i % bias.Length]; } } diff --git a/src/Nncase.Evaluator/NN/Pad.cs b/src/Nncase.Evaluator/NN/Pad.cs index f82aaeb3e7..13a4111456 100644 --- a/src/Nncase.Evaluator/NN/Pad.cs +++ b/src/Nncase.Evaluator/NN/Pad.cs @@ -123,7 +123,6 @@ public Expr Visit(IShapeEvaluateContext context, Pad target) // outShape = inShape + paddings var padsSumShape = StackScalar(Cast(ShapeOf(paddings)[0], DataTypes.Int32)); var outShape = inShape + Cast(Reshape(paddings, padsSumShape), DataTypes.Int32); - DumpScope.Current.DumpIR(outShape, "paddings"); return outShape; } diff --git a/src/Nncase.Evaluator/NN/SpaceToBatch.cs b/src/Nncase.Evaluator/NN/SpaceToBatch.cs index 60b5c2b624..756f8c1472 100644 --- a/src/Nncase.Evaluator/NN/SpaceToBatch.cs +++ b/src/Nncase.Evaluator/NN/SpaceToBatch.cs @@ -9,7 +9,9 @@ using Nncase.IR; using Nncase.IR.NN; using Nncase.IR.Tensors; +using Nncase.Utilities; using OrtKISharp; +using static Nncase.IR.F.Tensors; using Range = System.Range; namespace Nncase.Evaluator.NN; @@ -17,7 +19,7 @@ namespace Nncase.Evaluator.NN; /// /// Evaluator for . /// -public class SpaceToBatchEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IMetricEvaluator +public class SpaceToBatchEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IMetricEvaluator, IShapeEvaluator { /// public Cost Visit(ICostEvaluateContext context, SpaceToBatch target) @@ -92,6 +94,43 @@ public IRType Visit(ITypeInferenceContext context, SpaceToBatch target) return Visit(context, target, input, blockShape, paddings); } + public Expr Visit(IShapeEvaluateContext context, SpaceToBatch target) + { + var inShape = context.GetArgumentShape(target, SpaceToBatch.Input); + var blockShape = context.GetArgument(target, SpaceToBatch.BlockShape); + var padding = context.GetArgument(target, SpaceToBatch.Paddings); + var input = context.GetArgument(target, SpaceToBatch.Input); + if (blockShape is TensorConst blockConst) + { + var blockShapeValue = blockConst.Value.ToArray(); + var m = blockShapeValue.Length; + var inRank = input.CheckedShape.Rank; + + var paddedShape = new[] { inShape[0] } + .Concat(Enumerable.Range(0, inRank) + .Select(i => + { + return inShape[i + 1] + padding[2 * i, 0] + padding[2 * i, 1]; + })) + .ToArray(); + var outFirst = new[] { paddedShape[0] * IR.F.Tensors.Prod(blockShapeValue) }; + + // var inRank = Cast(ShapeOf(inShape)[0], DataTypes.Int32); + var outMid = Enumerable.Range(0, m).Select(i => + { + return paddedShape[i + 1] / blockShapeValue[i]; + }).ToArray(); + + var remainSize = inRank - 1 - m; + var remainShape = new If(remainSize > 0, ShapeExprUtility.Slice(inShape, 1 + m, int.MaxValue), Array.Empty()); + var outLast = remainShape; + var outShape = Concat(new IR.Tuple(Stack(new IR.Tuple(outFirst.Concat(outMid).ToArray()), 0), outLast), 0); + return outShape; + } + + throw new NotImplementedException(); + } + private T[] RangeExec(long end, Func f) { return EndRange(0, (int)end).Select(f).ToArray(); @@ -142,6 +181,6 @@ private IRType Visit(ITypeInferenceContext context, SpaceToBatch target, TensorT return input with { Shape = new Shape(outshape) }; } - return new InvalidType("Can't Infer Shape With Dynamic Input!"); + return new TensorType(input.DType, Enumerable.Repeat(Dimension.Unknown, input.Shape.Count).ToArray()); } } diff --git a/src/Nncase.Evaluator/ShapeEvaluateContext.cs b/src/Nncase.Evaluator/ShapeEvaluateContext.cs index 65e54eb698..e9919fb3d6 100644 --- a/src/Nncase.Evaluator/ShapeEvaluateContext.cs +++ b/src/Nncase.Evaluator/ShapeEvaluateContext.cs @@ -19,16 +19,20 @@ internal sealed class ShapeEvaluateContext : IShapeEvaluateContext { private readonly Dictionary _memo; - public ShapeEvaluateContext(Dictionary memo, IReadOnlyDictionary varMap) + public ShapeEvaluateContext(Dictionary memo, ShapeExprCache cache) { _memo = memo; - VarMap = varMap; + Cache = cache.Cache; + VarMap = cache.VarMap; } public IReadOnlyDictionary VarMap { get; } public Call? CurrentCall { get; set; } + // memo used by reference, can't make new _memo with memo.concat(cache) + public Dictionary Cache { get; set; } + public Expr GetArgument(Op op, ParameterInfo parameter) { if (op.GetType() == parameter.OwnerType) @@ -51,16 +55,38 @@ public Expr GetArgumentShape(Op op, ParameterInfo parameter) var expr = GetArgument(op, parameter); if (expr is Tuple tuple) { - return new Tuple(tuple.Fields.ToArray().Select(v => Cast(_memo[v], DataTypes.Int32)).ToArray()); + return new Tuple(tuple.Fields.ToArray().Select(v => Cast(GetResultFromMemo(v), DataTypes.Int32)).ToArray()); } + // call if (expr.CheckedType is TupleType) { - var tupleShapeExpr = (Tuple)expr.EvaluateShapeExpr(VarMap); - return new Tuple(tupleShapeExpr.Fields.ToArray().Select(expr => Cast(expr, DataTypes.Int32)).ToArray()); + var shape = expr.EvaluateShapeExpr(new ShapeExprCache(VarMap)); + if (shape is Call c && c.Target is IR.Math.Require && c.Arguments[IR.Math.Require.Value.Index] is Tuple tupleShapeExpr) + { + return new Tuple(tupleShapeExpr.Fields.ToArray().Select(expr => Cast(expr, DataTypes.Int32)).ToArray()); + } + + // for split + else + { + // when it is if, it not tuple + if (shape is If @if && @if.CheckedType is TupleType tupleType) + { + return new Tuple( + Enumerable + .Range(0, tupleType.Fields.Count) + .Select(i => Cast(shape[i], DataTypes.Int32)) + .ToArray()); + } + else + { + return new Tuple(((Tuple)shape).Fields.ToArray().Select(expr => Cast(expr, DataTypes.Int32)).ToArray()); + } + } } - var shapeExpr = _memo[expr]; + var shapeExpr = GetResultFromMemo(expr); return Cast(shapeExpr, DataTypes.Int32); } @@ -70,4 +96,14 @@ public Expr GetArgumentRank(Op op, ParameterInfo parameter) } private Call GetCurrentCall() => CurrentCall ?? throw new InvalidOperationException("Current call is not set."); + + private Expr GetResultFromMemo(Expr expr) + { + if (_memo.ContainsKey(expr)) + { + return _memo[expr]; + } + + throw new InvalidOperationException("Expr not found in memo and cache"); + } } diff --git a/src/Nncase.Evaluator/ShapeEvaluateVisitor.cs b/src/Nncase.Evaluator/ShapeEvaluateVisitor.cs index a458f12cb6..f5e8c3ee4d 100644 --- a/src/Nncase.Evaluator/ShapeEvaluateVisitor.cs +++ b/src/Nncase.Evaluator/ShapeEvaluateVisitor.cs @@ -6,6 +6,7 @@ using System.Linq; using System.Reactive; using Nncase.IR; +using Nncase.IR.Tensors; namespace Nncase.Evaluator; @@ -13,9 +14,9 @@ internal sealed class ShapeEvaluateVisitor : ExprVisitor { private readonly ShapeEvaluateContext _context; - public ShapeEvaluateVisitor(IReadOnlyDictionary varMap) + public ShapeEvaluateVisitor(ShapeExprCache cache) { - _context = new ShapeEvaluateContext(ExprMemo, varMap); + _context = new ShapeEvaluateContext(ExprMemo, cache); } protected override Expr VisitLeafBaseFunction(BaseFunction expr) @@ -23,6 +24,16 @@ protected override Expr VisitLeafBaseFunction(BaseFunction expr) return None.Default; } + protected override Expr DispatchVisit(Expr expr) + { + if (_context.Cache.TryGetValue(expr, out var value)) + { + return value; + } + + return base.DispatchVisit(expr); + } + protected override Expr VisitLeafIf(If expr) { return new If(expr.Condition, Visit(expr.Then), Visit(expr.Else)); @@ -31,7 +42,13 @@ protected override Expr VisitLeafIf(If expr) /// protected override Expr VisitLeafConst(Const expr) { - return expr.CheckedShape.ToValueArray(); + var shape = expr.CheckedShape; + if (shape.IsScalar) + { + return 1; + } + + return shape.ToValueArray(); } /// @@ -79,8 +96,13 @@ protected override Expr VisitLeafVar(Var expr) return shape.ToValueArray(); } - // PrintNotExistVarMap(expr); - var shapeExpr = shape.Select((x, i) => x.IsFixed ? x.FixedValue : _context.VarMap[expr][i]).ToArray(); + PrintNotExistVarMap(expr); + if (shape.Count != _context.VarMap[expr].Length) + { + throw new InvalidOperationException(); + } + + var shapeExpr = shape.Select((x, i) => x.IsFixed ? x.FixedValue : _context.VarMap[expr][i]).Select(x => IR.F.Tensors.Cast(x, DataTypes.Int32)).ToArray(); return IR.F.Tensors.Stack(new IR.Tuple(shapeExpr), 0); } diff --git a/src/Nncase.Evaluator/ShapeEvaluatorProvider.cs b/src/Nncase.Evaluator/ShapeEvaluatorProvider.cs index 3f224d6ba4..6c9a785a5a 100644 --- a/src/Nncase.Evaluator/ShapeEvaluatorProvider.cs +++ b/src/Nncase.Evaluator/ShapeEvaluatorProvider.cs @@ -22,7 +22,7 @@ public ShapeEvaluateProvider(IServiceProvider serviceProvider) _serviceProvider = serviceProvider; } - public Expr EvaluateShapeExpr(Expr expr, IReadOnlyDictionary varsMap) + public Expr EvaluateShapeExpr(Expr expr, ShapeExprCache cache) { if (expr.CheckedType is null) { @@ -36,7 +36,7 @@ public Expr EvaluateShapeExpr(Expr expr, IReadOnlyDictionary varsMa DumpScope.Current.DumpIR(expr, "EvaluateShapeExprInvalid"); } - throw new InvalidOperationException("Expr in Evaluator need a valid type"); + throw new InvalidOperationException("Expr in Shape Evaluator need a valid type"); } if (expr.CheckedType is TensorType && expr.CheckedShape.IsFixed) @@ -55,7 +55,7 @@ public Expr EvaluateShapeExpr(Expr expr, IReadOnlyDictionary varsMa } } - var evaluatorVisitor = new ShapeEvaluateVisitor(varsMap); + var evaluatorVisitor = new ShapeEvaluateVisitor(cache); return evaluatorVisitor.Visit(expr); } diff --git a/src/Nncase.Evaluator/ShapeExpr/BroadcastShape.cs b/src/Nncase.Evaluator/ShapeExpr/BroadcastShape.cs new file mode 100644 index 0000000000..61cb3e9438 --- /dev/null +++ b/src/Nncase.Evaluator/ShapeExpr/BroadcastShape.cs @@ -0,0 +1,57 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Linq; +using Nncase.CostModel; +using Nncase.Diagnostics; +using Nncase.IR; +using Nncase.IR.ShapeExpr; +using Nncase.IR.Tensors; +using Nncase.Utilities; + +namespace Nncase.Evaluator.ShapeExpr; + +public partial class BroadcastShapeEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IShapeEvaluator, IMetricEvaluator +{ + public IValue Visit(IEvaluateContext context, BroadcastShape broadcastShape) + { + var inputs = context.GetArgumentValueAsTensors(broadcastShape, BroadcastShape.Inputs); + var type = TypeInference.BroadcastType(inputs.Select(input => new TensorType(DataTypes.Int32, input.ToArray())).ToArray()); + var shape = type switch + { + TensorType tt => tt.Shape.ToValueArray().Select(x => (long)x).ToArray(), + InvalidType it => throw new InvalidOperationException(it.Reason), + _ => throw new InvalidOperationException("Unknown IRType"), + }; + + return Value.FromTensor(shape); + } + + public IRType Visit(ITypeInferenceContext context, BroadcastShape target) + { + return new TensorType(DataTypes.Int64, new[] { Dimension.Unknown }); + } + + public Cost Visit(ICostEvaluateContext context, BroadcastShape target) + { + return CostUtility.GetShapeExprCost(); + } + + public Expr Visit(IShapeEvaluateContext context, BroadcastShape target) + { + var inShape = context.GetArgumentShape(target, BroadcastShape.Inputs); + var len = ((IR.Tuple)inShape).Fields.ToArray().Aggregate((Expr)1, (i, call) => IR.F.Math.Max(i, call)); + var bn = IR.F.Tensors.Cast(len, DataTypes.Int32); + return bn; + } + + public Metric Visit(IMetricEvaluateContext context, BroadcastShape target) + { + var returnType = context.GetReturnType(); + return new() + { + [MetricFactorNames.OffChipMemoryTraffic] = CostUtility.GetMemoryAccess(returnType), + }; + } +} diff --git a/src/Nncase.Evaluator/ShapeExpr/Conv2DShape.cs b/src/Nncase.Evaluator/ShapeExpr/Conv2DShape.cs new file mode 100644 index 0000000000..0be22ac567 --- /dev/null +++ b/src/Nncase.Evaluator/ShapeExpr/Conv2DShape.cs @@ -0,0 +1,60 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Linq; +using Nncase.CostModel; +using Nncase.IR; +using Nncase.IR.ShapeExpr; + +namespace Nncase.Evaluator.ShapeExpr; + +[EvaluatorGenerator] +[TypeInferGenerator] +public partial class Conv2DShapeEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IShapeEvaluator, IMetricEvaluator +{ + public IValue Visit(Tensor input, Tensor weights, Tensor padding, Tensor stride, Tensor dilation, Tensor groups) + { + var ty = TypeInference.Conv2DType(GetTensorType(input), GetTensorType(weights), stride, padding, dilation, groups); + var shape = ty switch + { + TensorType tensorType => tensorType.Shape, + _ => throw new InvalidOperationException(), + }; + if (!shape.IsFixed) + { + throw new InvalidOperationException(); + } + + return Value.FromTensor(shape.Select(x => (long)x.FixedValue).ToArray()); + } + + public IRType Visit() + { + return new TensorType(DataTypes.Int64, new Shape(4)); + } + + public Cost Visit(ICostEvaluateContext context, Conv2DShape target) + { + return CostUtility.GetShapeExprCost(); + } + + public Expr Visit(IShapeEvaluateContext context, Conv2DShape target) + { + return new[] { 4 }; + } + + public Metric Visit(IMetricEvaluateContext context, Conv2DShape target) + { + var returnType = context.GetReturnType(); + return new() + { + [MetricFactorNames.OffChipMemoryTraffic] = CostUtility.GetMemoryAccess(returnType), + }; + } + + private TensorType GetTensorType(Tensor input) + { + return new TensorType(DataTypes.Float32, input.ToArray()); + } +} diff --git a/src/Nncase.Evaluator/ShapeExpr/Conv2DTransposeShape.cs b/src/Nncase.Evaluator/ShapeExpr/Conv2DTransposeShape.cs new file mode 100644 index 0000000000..a61f731cb5 --- /dev/null +++ b/src/Nncase.Evaluator/ShapeExpr/Conv2DTransposeShape.cs @@ -0,0 +1,48 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.Linq; +using Nncase.CostModel; +using Nncase.Diagnostics; +using Nncase.IR; +using Nncase.IR.ShapeExpr; +using static Nncase.IR.F.Tensors; + +namespace Nncase.Evaluator.ShapeExpr; + +[EvaluatorGenerator] +[TypeInferGenerator] +public partial class Conv2DTransposeShapeEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IShapeEvaluator, IMetricEvaluator +{ + public IValue Visit(Tensor input, Tensor weights, Tensor stride, Tensor dilation, Tensor padding, Tensor outputPadding, int groups) + { + var outShape = Util.GetConvTransposeOutputShape(input, weights, stride, outputPadding, padding, dilation, string.Empty, Cast(groups, DataTypes.Int64)); + return Cast(outShape, DataTypes.Int64).Evaluate(); + } + + public IRType Visit() + { + return new TensorType(DataTypes.Int64, new Shape(4)); + } + + public Cost Visit(ICostEvaluateContext context, Conv2DTransposeShape target) + { + return CostUtility.GetShapeExprCost(); + } + + public Expr Visit(IShapeEvaluateContext context, Conv2DTransposeShape target) + { + return new[] { 4 }; + } + + public Metric Visit(IMetricEvaluateContext context, Conv2DTransposeShape target) + { + var returnType = context.GetReturnType(); + return new() + { + [MetricFactorNames.OffChipMemoryTraffic] = CostUtility.GetMemoryAccess(returnType), + }; + } +} diff --git a/src/Nncase.Evaluator/ShapeExpr/MatMulShape.cs b/src/Nncase.Evaluator/ShapeExpr/MatMulShape.cs new file mode 100644 index 0000000000..8708d689a2 --- /dev/null +++ b/src/Nncase.Evaluator/ShapeExpr/MatMulShape.cs @@ -0,0 +1,71 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.Linq; +using NetFabric.Hyperlinq; +using Nncase.CostModel; +using Nncase.IR; +using Nncase.IR.Math; +using Nncase.IR.ShapeExpr; +using Nncase.Utilities; +using static Nncase.IR.F.Tensors; + +namespace Nncase.Evaluator.ShapeExpr; + +[EvaluatorGenerator] +[TypeInferGenerator] +public partial class MatMulShapeEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IShapeEvaluator, IMetricEvaluator +{ + public IValue Visit(Tensor lhs, Tensor rhs) + { + var lhsShape = lhs.ToArray(); + var rhsShape = rhs.ToArray(); + var newLhs = To4D(lhsShape); + var newRhs = To4D(rhsShape); + var bigShapeSize = System.Math.Max(lhsShape.Length, rhsShape.Length); + var newShape = new List(); + for (int i = 0; i < bigShapeSize - 2; i++) + { + newShape.Add(System.Math.Max(newLhs[i + 4 - bigShapeSize], newRhs[i + 4 - bigShapeSize])); + } + + newShape.Add(lhsShape[^2]); + newShape.Add(rhsShape[^1]); + return Value.FromTensor(newShape.Select(x => (long)x).ToArray()); + } + + public IRType Visit(TensorType lhs, TensorType rhs) + { + var shape = new[] { lhs, rhs }.MaxBy(tt => tt.Shape.Rank)!.Shape; + return new TensorType(DataTypes.Int64, shape); + } + + public Cost Visit(ICostEvaluateContext context, MatMulShape target) + { + return CostUtility.GetShapeExprCost(); + } + + public Expr Visit(IShapeEvaluateContext context, MatMulShape target) + { + // todo: broadcast + var lhsRank = context.GetArgumentShape(target, MatMulShape.Lhs); + var rhsRank = context.GetArgumentShape(target, MatMulShape.Rhs); + return IR.F.Math.Max(lhsRank, rhsRank); + } + + public Metric Visit(IMetricEvaluateContext context, MatMulShape target) + { + var returnType = context.GetReturnType(); + return new() + { + [MetricFactorNames.OffChipMemoryTraffic] = CostUtility.GetMemoryAccess(returnType), + }; + } + + private int[] To4D(int[] shape) + { + return Enumerable.Repeat(0, 4 - shape.Length).Concat(shape).ToArray(); + } +} diff --git a/src/Nncase.Evaluator/ShapeExpr/ShapeExprModule.cs b/src/Nncase.Evaluator/ShapeExpr/ShapeExprModule.cs new file mode 100644 index 0000000000..bbc3631372 --- /dev/null +++ b/src/Nncase.Evaluator/ShapeExpr/ShapeExprModule.cs @@ -0,0 +1,24 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using DryIoc; +using Nncase.Evaluator.Tensors; +using Nncase.Hosting; +using Nncase.IR.ShapeExpr; +using Nncase.IR.Tensors; + +namespace Nncase.Evaluator.ShapeExpr; + +/// +/// ShapeExpr module. +/// +internal class ShapeExprModule : IApplicationPart +{ + public void ConfigureServices(IRegistrator registrator) + { + registrator.RegisterManyInterface(reuse: Reuse.Singleton); + registrator.RegisterManyInterface(reuse: Reuse.Singleton); + registrator.RegisterManyInterface(reuse: Reuse.Singleton); + registrator.RegisterManyInterface(reuse: Reuse.Singleton); + } +} diff --git a/src/Nncase.Evaluator/Tensors/BucketPad.cs b/src/Nncase.Evaluator/Tensors/BucketPad.cs index 7ec5565367..30595c208e 100644 --- a/src/Nncase.Evaluator/Tensors/BucketPad.cs +++ b/src/Nncase.Evaluator/Tensors/BucketPad.cs @@ -21,6 +21,11 @@ public class BucketPadEvaluator : IEvaluator, ITypeInferencer(bucketPad, BucketPad.Shape); var pads = shape - (Expr)input.Shape; var paddings = Transpose( diff --git a/src/Nncase.Evaluator/Tensors/GetItem.cs b/src/Nncase.Evaluator/Tensors/GetItem.cs index a1e932fc3a..4c3d3a76de 100644 --- a/src/Nncase.Evaluator/Tensors/GetItem.cs +++ b/src/Nncase.Evaluator/Tensors/GetItem.cs @@ -9,6 +9,7 @@ using Nncase.CostModel; using Nncase.IR; using Nncase.IR.Tensors; +using Nncase.Utilities; using Tuple = System.Tuple; namespace Nncase.Evaluator.Tensors; @@ -45,6 +46,10 @@ public Metric Visit(IMetricEvaluateContext context, GetItem target) public Expr Visit(IShapeEvaluateContext context, GetItem target) { + // [n] 1-> 1 + // [n, c] 1 -> c + // [n, c] 2 -> 1 + // 前面n维度减去index的值的长度 var input = context.GetArgumentShape(target, GetItem.Input); var index = context.GetArgument(target, GetItem.Index); if (input is IR.Tuple) @@ -53,7 +58,18 @@ public Expr Visit(IShapeEvaluateContext context, GetItem target) } else { - return IR.F.Tensors.ShapeOf(input[index]); + _ = 0; + Expr len; + if (index.CheckedShape.IsScalar) + { + len = 1; + } + else + { + len = context.GetArgumentShape(target, GetItem.Index)[0]; + } + + return ShapeExprUtility.Slice(input, len, int.MaxValue); } } @@ -82,6 +98,7 @@ private IValue Visit(IValue input, IValue index) private IRType Visit(ITypeInferenceContext context, GetItem target, IRType input, TensorType index) { IRType ret = new InvalidType("Need Be Reset!"); + var indexExpr = context.GetArgument(target, GetItem.Index); switch (input) { case TensorType tensorType: @@ -90,6 +107,26 @@ private IRType Visit(ITypeInferenceContext context, GetItem target, IRType input return input; } + if (indexExpr is TensorConst indexV) + { + var indices = indexV.Value.ToArray(); + if (indices.Length > tensorType.Shape.Rank) + { + return new InvalidType("GetItem index count should smaller than in shape rank"); + } + + if (indices.Length == tensorType.Shape.Rank) + { + foreach (var (i, dim) in indices.Zip(tensorType.Shape)) + { + if (dim.IsFixed && i >= dim.FixedValue) + { + return new InvalidType("GetItem index value shoud smaller than shape dim"); + } + } + } + } + var shape = index.Shape switch { { IsScalar: true } => new Shape(tensorType.Shape.Skip(1)), @@ -101,7 +138,7 @@ private IRType Visit(ITypeInferenceContext context, GetItem target, IRType input ret = new TensorType(tensorType.DType, shape); break; case TupleType tupleType: - if (context.GetArgument(target, GetItem.Index) is TensorConst @const) + if (indexExpr is TensorConst @const) { var indexValue = @const.Value.ToScalar(); if (indexValue < tupleType.Count) diff --git a/src/Nncase.Evaluator/Tensors/IndexOf.cs b/src/Nncase.Evaluator/Tensors/IndexOf.cs index 4d01d33354..b87f288149 100644 --- a/src/Nncase.Evaluator/Tensors/IndexOf.cs +++ b/src/Nncase.Evaluator/Tensors/IndexOf.cs @@ -42,7 +42,7 @@ public Cost Visit(ICostEvaluateContext context, IndexOf target) }; } - public Expr Visit(IShapeEvaluateContext context, IndexOf target) => 1; + public Expr Visit(IShapeEvaluateContext context, IndexOf target) => System.Array.Empty(); public Metric Visit(IMetricEvaluateContext context, IndexOf target) { @@ -56,6 +56,6 @@ public Metric Visit(IMetricEvaluateContext context, IndexOf target) private IRType Visit(ITypeInferenceContext context, IndexOf target, TensorType input) { - return new TensorType(input.DType, Shape.Scalar); + return new TensorType(DataTypes.Int32, Shape.Scalar); } } diff --git a/src/Nncase.Evaluator/Tensors/Range.cs b/src/Nncase.Evaluator/Tensors/Range.cs index d2e452561f..5f185fe350 100644 --- a/src/Nncase.Evaluator/Tensors/Range.cs +++ b/src/Nncase.Evaluator/Tensors/Range.cs @@ -3,7 +3,9 @@ using Nncase.CostModel; using Nncase.IR; +using Nncase.Utilities; using OrtKISharp; +using static Nncase.Utilities.ShapeExprUtility; using Range = Nncase.IR.Tensors.Range; namespace Nncase.Evaluator.Tensors; @@ -11,7 +13,7 @@ namespace Nncase.Evaluator.Tensors; /// /// Evaluator for . /// -public class RangeEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IMetricEvaluator +public class RangeEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IMetricEvaluator, IShapeEvaluator { /// public IValue Visit(IEvaluateContext context, Range range) @@ -87,4 +89,12 @@ public Metric Visit(IMetricEvaluateContext context, Range target) [MetricFactorNames.OffChipMemoryTraffic] = CostUtility.GetMemoryAccess(ret), }; } + + public Expr Visit(IShapeEvaluateContext context, Range target) + { + var begin = context.GetArgument(target, Range.Begin); + var end = context.GetArgument(target, Range.End); + var step = context.GetArgument(target, Range.Step); + return ShapeExprUtility.StackOne((end - begin) / step); + } } diff --git a/src/Nncase.Evaluator/Tensors/Reshape.cs b/src/Nncase.Evaluator/Tensors/Reshape.cs index 7a5ee6d8bb..615728f7c7 100644 --- a/src/Nncase.Evaluator/Tensors/Reshape.cs +++ b/src/Nncase.Evaluator/Tensors/Reshape.cs @@ -65,7 +65,7 @@ public Expr Visit(IShapeEvaluateContext context, Reshape target) } var dim = Prod(inputShape) / System.Math.Abs(shapeArray.Aggregate((s, x) => x * s)); - var rhs = Enumerable.Repeat((Expr)0, negIndex).Append(dim + 1).ToArray(); + var rhs = shapeArray.Select((_, i) => i == negIndex ? dim + 1 : (Expr)0).ToArray(); var newShape = Stack(new IR.Tuple(rhs), 0); // dim = Product(inShape) / Produce(Reshape.Shape) diff --git a/src/Nncase.Evaluator/Tensors/Slice.cs b/src/Nncase.Evaluator/Tensors/Slice.cs index ccbdaae8a3..219894f0da 100644 --- a/src/Nncase.Evaluator/Tensors/Slice.cs +++ b/src/Nncase.Evaluator/Tensors/Slice.cs @@ -6,6 +6,7 @@ using System.Linq; using DryIoc.FastExpressionCompiler.LightExpression; using NetFabric.Hyperlinq; +using Nncase.CodeGen; using Nncase.CostModel; using Nncase.Evaluator.Math; using Nncase.IR; @@ -140,19 +141,45 @@ private IRType Visit(ITypeInferenceContext context, Slice target, TensorType inp return new InvalidType("Slice Input should not scalar"); } + var begin = context.GetArgument(target, Slice.Begins); + var end = context.GetArgument(target, Slice.Ends); + var stride = context.GetArgument(target, Slice.Strides); + if (begin.CheckedShape.IsFixed) + { + if (end.CheckedShape.IsFixed) + { + if (begin.CheckedShape[0].FixedValue != end.CheckedShape[0].FixedValue) + { + return new InvalidType("Slice begin, end, strides should be same length"); + } + } + + if (stride.CheckedShape.IsFixed) + { + if (begin.CheckedShape[0].FixedValue != stride.CheckedShape[0].FixedValue) + { + return new InvalidType("Slice begin, end, strides should be same length"); + } + } + } + if (context.GetArgument(target, Slice.Axes) is TensorConst axes_con) { if (input.Shape.IsRanked) { - if (context.GetArgument(target, Slice.Begins) is TensorConst begins_con && - context.GetArgument(target, Slice.Ends) is TensorConst ends_con && - context.GetArgument(target, Slice.Strides) is TensorConst strides_con) + if (begin is TensorConst begins_con && + end is TensorConst ends_con && + stride is TensorConst strides_con) { // end in onnx may be the maximum value of int64 // when use int, result value is -1 var ts_begins = begins_con.Value.Cast(); var ts_ends = ends_con.Value.Cast(); var ts_strides = strides_con.Value.Cast(); + if (ts_begins.Length != ts_ends.Length || ts_begins.Length != ts_strides.Length) + { + return new InvalidType("Slice begin, end, strides should be same length"); + } outShape = ApplyAxis(axes_con, input, (i, axis, inDim) => { diff --git a/src/Nncase.Evaluator/Tensors/Squeeze.cs b/src/Nncase.Evaluator/Tensors/Squeeze.cs index b6035f10be..05517c5b4d 100644 --- a/src/Nncase.Evaluator/Tensors/Squeeze.cs +++ b/src/Nncase.Evaluator/Tensors/Squeeze.cs @@ -43,14 +43,19 @@ public Expr Visit(IShapeEvaluateContext context, Squeeze target) var dims = context.GetArgument(target, Squeeze.Dim); if (dims is TensorConst dimConst) { - var dimValue = dimConst.Value.ToArray(); var rank = input.CheckedShape.Count; + var dimValue = dimConst.Value.ToArray().Select(x => x < 0 ? x + rank : x).ToArray(); var outDims = Enumerable.Range(0, rank).Where(i => !dimValue.Contains(i)).Select(i => inShape[i]).ToArray(); if (outDims.Length == 0) { return 1; } + if (outDims.Length == input.CheckedShape.Rank) + { + throw new InvalidOperationException("Bad Squeeze Shape Expr"); + } + return IR.F.Tensors.Stack(new IR.Tuple(outDims), 0); } diff --git a/src/Nncase.Evaluator/Tensors/Stack.cs b/src/Nncase.Evaluator/Tensors/Stack.cs index 17795cf3ff..00cb5c4472 100644 --- a/src/Nncase.Evaluator/Tensors/Stack.cs +++ b/src/Nncase.Evaluator/Tensors/Stack.cs @@ -58,7 +58,13 @@ public Cost Visit(ICostEvaluateContext context, Stack target) public Expr Visit(IShapeEvaluateContext context, Stack target) { var inShape = context.GetArgumentShape(target, Stack.Inputs); - return IR.F.Tensors.Concat(new IR.Tuple(inShape[0], Tensor.From(new[] { 1 })), 0); + Expr one = new[] { 1 }; + if (inShape[0].CheckedShape.IsScalar) + { + one = 1; + } + + return IR.F.Tensors.Concat(new IR.Tuple(inShape[0], one), 0); } public Metric Visit(IMetricEvaluateContext context, Stack target) @@ -72,6 +78,11 @@ public Metric Visit(IMetricEvaluateContext context, Stack target) private IRType Visit(ITypeInferenceContext context, Stack target, TupleType inputs) { + if (inputs.Count == 0) + { + return new InvalidType("Tuple count should not be zero"); + } + if (context.GetArgument(target, Stack.Axis) is TensorConst axis_con) { var axis_v = axis_con.Value.ToScalar(); diff --git a/src/Nncase.Evaluator/Tensors/Trilu.cs b/src/Nncase.Evaluator/Tensors/Trilu.cs index 980ae79986..3963c4a983 100644 --- a/src/Nncase.Evaluator/Tensors/Trilu.cs +++ b/src/Nncase.Evaluator/Tensors/Trilu.cs @@ -14,7 +14,7 @@ namespace Nncase.Evaluator.Tensors; /// /// Evaluator for . /// -public class TriluEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IShapeEvaluator +public class TriluEvaluator : IEvaluator, ITypeInferencer, ICostEvaluator, IShapeEvaluator, IMetricEvaluator { public IValue Visit(IEvaluateContext context, Trilu target) { @@ -49,4 +49,13 @@ public Expr Visit(IShapeEvaluateContext context, Trilu target) { return context.GetArgumentShape(target, Trilu.Input); } + + public Metric Visit(IMetricEvaluateContext context, Trilu target) + { + var returnType = context.GetReturnType(); + return new() + { + [MetricFactorNames.OffChipMemoryTraffic] = CostUtility.GetMemoryAccess(returnType), + }; + } } diff --git a/src/Nncase.Evaluator/Tensors/UnSqueeze.cs b/src/Nncase.Evaluator/Tensors/UnSqueeze.cs index 11e600e833..b8c937c44a 100644 --- a/src/Nncase.Evaluator/Tensors/UnSqueeze.cs +++ b/src/Nncase.Evaluator/Tensors/UnSqueeze.cs @@ -57,7 +57,7 @@ public Expr Visit(IShapeEvaluateContext context, Unsqueeze target) } else { - var index = IR.F.Math.Max(new Call(new Rank(), outShape) + dimVal + 1, 0); + var index = IR.F.Math.Max(IR.F.Tensors.Cast(IR.F.Tensors.ShapeOf(outShape)[0], DataTypes.Int32) + dimVal + 1, 0); outShape = ShapeExprUtility.Insert(outShape, index, 1); } } diff --git a/src/Nncase.Evaluator/Tensors/Where.cs b/src/Nncase.Evaluator/Tensors/Where.cs index a9b7acf2cb..db614bc6d9 100644 --- a/src/Nncase.Evaluator/Tensors/Where.cs +++ b/src/Nncase.Evaluator/Tensors/Where.cs @@ -72,12 +72,19 @@ public Cost Visit(ICostEvaluateContext context, Where target) public Expr Visit(IShapeEvaluateContext context, Where target) { + var x = context.GetArgumentShape(target, Where.X); if (target.IsTfWhere) { + var condValue = context.GetArgument(target, Where.Cond); + var condShape = context.GetArgumentShape(target, Where.Cond); + if (condValue.CheckedShape.Rank == 1) + { + return IR.F.Tensors.Stack(new IR.Tuple(new[] { condShape[0], x[0] }), 0); + } + throw new NotImplementedException(); } - var x = context.GetArgumentShape(target, Where.X); var y = context.GetArgumentShape(target, Where.Y); var cond = context.GetArgumentShape(target, Where.Cond); return ShapeExprUtility.BroadcastShape(x, y, cond); diff --git a/src/Nncase.Evaluator/TypeInference.cs b/src/Nncase.Evaluator/TypeInference.cs index ce54f0aae6..a0749e8fdd 100644 --- a/src/Nncase.Evaluator/TypeInference.cs +++ b/src/Nncase.Evaluator/TypeInference.cs @@ -132,7 +132,7 @@ public static IRType BroadcastType(DataType dataType, params TensorType[] inputs var inDim = inDimIndex < 0 ? 1 : inShape[inDimIndex].Value!.Value; if (inDim == 0) { - throw new InvalidOperationException("Input dimension should not be 0."); + return new InvalidType("Input dimension should not be 0."); } inputDims[i] = inDim; @@ -352,6 +352,11 @@ public static IRType TransposeType(TensorType input, Expr perm) } var permt = permValue.Value.ToArray(); + if (input.Shape.Count != permt.Length) + { + return new InvalidType("Transpose shoud perm.size == inShape.size"); + } + var outShape = ApplyPerm(input.Shape, permt); return input with { Shape = outShape }; } diff --git a/src/Nncase.Evaluator/TypeInferenceVisitor.cs b/src/Nncase.Evaluator/TypeInferenceVisitor.cs index 208d085058..8ffd28f715 100644 --- a/src/Nncase.Evaluator/TypeInferenceVisitor.cs +++ b/src/Nncase.Evaluator/TypeInferenceVisitor.cs @@ -123,7 +123,7 @@ protected override IRType VisitLeafCall(Call expr) var type = expr.Target switch { Op op => CompilerServices.InferenceOp(op, _context, _inferencer_cache), - BaseFunction func => ((CallableType)func.CheckedType).ReturnType, + BaseFunction func => BaseFunctionInfer(expr, func), _ => new InvalidType("Target of call expression should be either a function or an op."), }; _context.CurrentCall = null; @@ -393,4 +393,14 @@ private void SetCheckedType(Expr expr, IRType type) IRHelpers.SetRawCheckedType(expr, type); IsFullyInferenced &= type is not InvalidType; } + + private IRType BaseFunctionInfer(Call call, BaseFunction func) + { + if (func.CheckedType is InvalidType) + { + return func.CheckedType; + } + + return ((CallableType)func.CheckedType).ReturnType; + } } diff --git a/src/Nncase.Importer/Onnx/Conv2DTranspose.cs b/src/Nncase.Importer/Onnx/Conv2DTranspose.cs index 370a7c5bdf..0ba62bfc6e 100644 --- a/src/Nncase.Importer/Onnx/Conv2DTranspose.cs +++ b/src/Nncase.Importer/Onnx/Conv2DTranspose.cs @@ -46,8 +46,8 @@ private Expr VisitConv2DTranspose(NodeProto op) .Match( o => Tensor.From(o), () => IR.Util.GetConvTransposeOutputShape( - input, - weights, + ShapeOf(input), + ShapeOf(weights), strides.ToArray(), outputPadding, pads, diff --git a/src/Nncase.Importer/TFLite/Conv2DTranspose.cs b/src/Nncase.Importer/TFLite/Conv2DTranspose.cs index 50c781d3b0..d5687e7cdb 100644 --- a/src/Nncase.Importer/TFLite/Conv2DTranspose.cs +++ b/src/Nncase.Importer/TFLite/Conv2DTranspose.cs @@ -17,14 +17,19 @@ public partial class TFLiteImporter { private Expr VisitConv2DTranspose(in tflite.Operator op) { - var outShape = ((TensorConst)GetInputExprs(op, 0)).Value.ToArray(); + var outShape = GetInputExprs(op, 0); var newOutShape = new[] { outShape[0], outShape[3], outShape[1], outShape[2] }; var (input, weights) = GetInputExprs(op, 2, 1); - Expr bias = Enumerable.Repeat(0f, newOutShape[1]).ToArray(); + Expr bias; if (op.InputsLength > 3) { bias = GetInputExprs(op, 3); } + else + { + var oc = IR.F.Tensors.ShapeOf(weights)[0]; + bias = IR.F.Tensors.Expand(new[] { 0f }, IR.F.Tensors.StackScalar(oc)); + } var options = op.BuiltinOptionsAsTransposeConvOptions(); var (_, _) = Util.GetHW(input); @@ -45,7 +50,7 @@ private Expr VisitConv2DTranspose(in tflite.Operator op) F.Tensors.NHWCToNCHW(input), F.Tensors.NHWCToNCHW(weights), bias, - newOutShape, + IR.F.Tensors.Stack(new IR.Tuple(newOutShape), 0), stride, padding, Tensor.From(new long[] { 0, 0, 0, 0 }), diff --git a/src/Nncase.Importer/TFLite/TFLiteImporter.cs b/src/Nncase.Importer/TFLite/TFLiteImporter.cs index d4eee88c46..8a6343933e 100644 --- a/src/Nncase.Importer/TFLite/TFLiteImporter.cs +++ b/src/Nncase.Importer/TFLite/TFLiteImporter.cs @@ -66,17 +66,40 @@ public TFLiteImporter(byte[] tfliteModel, CompileSession compileSession) protected override (IEnumerable Inputs, Dictionary VarMap) CreateInputs() { var inputsCount = _subGraph.InputsLength; - var created_inputs = new Var[inputsCount]; + var createdInputs = new Var[inputsCount]; + var dynVarMap = Enumerable + .Range(0, inputsCount) + .SelectMany(i => + { + var tensor = _subGraph.Tensors(_subGraph.Inputs(i))!.Value; + return Enumerable.Range(0, tensor.ShapeSignatureLength).Select(i => tensor.ShapeSignature(i)).Where(i => i < 0); + }) + .ToHashSet() + .ToArray() + .Order() + .Select(i => new Var(i.ToString(), new TensorType(DataTypes.Int32, Shape.Scalar))) + .ToDictionary(v => v.Name, v => v); + + if (dynVarMap.Count > 1) + { + throw new NotImplementedException(); + } + + var varMap = new Dictionary(); for (int i = 0; i < inputsCount; i++) { var inputId = _subGraph.Inputs(i); var tensor = _subGraph.Tensors(inputId)!.Value; var input = new Var(tensor.Name, GetIRType(tensor)); - created_inputs[i] = input; + var shape = input.CheckedShape.Select(x => x.IsFixed ? (Expr)x.FixedValue : dynVarMap.First().Value).ToArray(); + varMap[input] = shape; + createdInputs[i] = input; _outputTensors.Add(inputId, input); } - return (created_inputs, new()); + CompileSession.CompileOptions.ShapeBucketOptions = + CompileSession.CompileOptions.ShapeBucketOptions with { VarMap = varMap }; + return (createdInputs, varMap); } protected override void ConvertOp() @@ -133,7 +156,7 @@ private static Dimension[] GetShapeArray(tflite.Tensor tensor) } return Enumerable.Range(0, tensor.ShapeLength).Select(i => - tensor.Shape(i) == -1 ? Dimension.Unknown : tensor.Shape(i)).ToArray(); + tensor.ShapeSignature(i) < 0 ? Dimension.Unknown : tensor.Shape(i)).ToArray(); } private void Visit(in tflite.Operator op) diff --git a/src/Nncase.Importer/TFLite/Tile.cs b/src/Nncase.Importer/TFLite/Tile.cs index 09ef628f71..2495df0edb 100644 --- a/src/Nncase.Importer/TFLite/Tile.cs +++ b/src/Nncase.Importer/TFLite/Tile.cs @@ -12,7 +12,7 @@ public partial class TFLiteImporter private Expr VisitTile(in tflite.Operator op) { var (input, multiples) = GetInputExprs(op, 0, 1); - return Tile(input, multiples); + return Tile(input, Cast(multiples, DataTypes.Int64)); } } } diff --git a/src/Nncase.Passes/Rules/Lower/BroadcastMarker.cs b/src/Nncase.Passes/Rules/Lower/BroadcastMarker.cs new file mode 100644 index 0000000000..b83a062c7a --- /dev/null +++ b/src/Nncase.Passes/Rules/Lower/BroadcastMarker.cs @@ -0,0 +1,79 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using Nncase.IR; +using Nncase.IR.Math; +using Nncase.IR.Tensors; +using Nncase.PatternMatch; +using Nncase.Utilities; +using static Nncase.Passes.Rules.Lower.BroadcastMarkerHelper; +using static Nncase.PatternMatch.Utility; +using static Nncase.Utilities.ReplaceUtility; + +namespace Nncase.Passes.Rules.Lower; + +// e.g. matmul(reshape(marker(x))) -> matmul(marker(reshape(marker(x)))) +[RuleGenerator] +public partial class BroadcastInputMarker : RewriteRule +{ + public override Pattern Pattern => IsCallWildcard( + "outer", + IsWildcard(), + InputPattern); + + public Pattern InputPattern => IsCallWildcard( + "call", + IsWildcard(), + IsRangeOfMarker( + "marker", + IsWildcard(), + IsWildcard())); + + public Expr? GetReplace(Call outer, Call call, Marker marker) + { + if (!NotChangeRangeOp(call.Target)) + { + return null; + } + + if (outer.Target is MatMul && CompilerServices.TryMatchRoot(outer.Arguments[1], InputPattern, new(), out var matchResult)) + { + var rhsMarker = (Marker)matchResult["marker"]; + var rhsCall = (Call)matchResult["call"]; + var lhs = marker.With(target: ReplaceCallFirstParam(call, marker)); + var rhs = rhsMarker.With(target: ReplaceCallFirstParam(rhsCall, rhsMarker)); + return ReplaceCallParams(outer, (0, lhs), (1, rhs)); + } + + return ReplaceCallFirstParam(outer, marker.With(target: ReplaceCallFirstParam(call, marker))); + } +} + +// e.g. marker(reshape(matmul(x))) -> marker(reshape(marker(matmul(x)))) +[RuleGenerator] +public partial class BroadcastOutputMarker : RewriteRule +{ + public override Pattern Pattern => IsRangeOfMarker( + "marker", + IsCallWildcard("input", IsWildcard(), IsCallWildcard(null, IsWildcard())), + IsWildcard()); + + public Expr? GetReplace(Call input, Marker marker) + { + if (!NotChangeRangeOp(input.Target)) + { + return null; + } + + return ReplaceCallFirstParam(input, marker.With(target: input.Arguments[0])); + } +} + +internal static class BroadcastMarkerHelper +{ + public static bool NotChangeRangeOp(Expr op) + { + return op is Squeeze || op is Unsqueeze || op is Reshape || op is Broadcast; + } +} diff --git a/src/Nncase.Passes/Rules/Neutral/AddRangeOfAndMarker.cs b/src/Nncase.Passes/Rules/Neutral/AddRangeOfAndMarker.cs index a53c2d3166..656a452263 100644 --- a/src/Nncase.Passes/Rules/Neutral/AddRangeOfAndMarker.cs +++ b/src/Nncase.Passes/Rules/Neutral/AddRangeOfAndMarker.cs @@ -125,8 +125,13 @@ public static bool CheckOp(Op op) { if (!pairs.ContainsKey(callParams[i])) { + // 动态shape的情况下会先统计range再分段,matmul转conv2d则是需要知道shape才能做 + // 动态shape情况下执行的顺序是range -> 分段 -> matmul转conv2d + // 这里必须要对matmul的rhs进行判断,如果matmul是动态的那么不会走量化,如果是静态的那么一定会转到conv2d + // 因此认为matmul的rhs为const的情况下一定能转成conv2d bool isWeights = ((call.Target is Conv2D || call.Target is Conv2DTranspose) && (i == 1)) - || (call.Target is LSTM && i > 0); + || (call.Target is LSTM && i > 0) + || (call.Target is MatMul && i == 1 && callParams[1] is TensorConst); if (!configExist && !useAutoMixQuant) { diff --git a/src/Nncase.Passes/Rules/Neutral/CombineReshape.cs b/src/Nncase.Passes/Rules/Neutral/CombineReshape.cs index 3f0b39eb42..3f0c2be75d 100644 --- a/src/Nncase.Passes/Rules/Neutral/CombineReshape.cs +++ b/src/Nncase.Passes/Rules/Neutral/CombineReshape.cs @@ -94,7 +94,8 @@ public CombineConstBinaryReshape() { var significantInputShape = input.CheckedShape.ToValueArray().Where(x => x > 1).ToArray(); var constSize = constInput.CheckedShape.ToValueArray()[0]; - if (significantShape.SequenceEqual(significantInputShape) && oldShape[^1] == constSize) + + if (significantShape.SequenceEqual(significantInputShape) && oldShape.Length > 0 && oldShape[^1] == constSize) { var broadcastIndex = Array.LastIndexOf(input.CheckedShape.ToValueArray(), constSize); var newConstShape = Enumerable.Repeat(1, input.CheckedShape.Rank - 1 - broadcastIndex).ToList(); diff --git a/src/Nncase.Passes/Rules/Neutral/CombineTranspose.cs b/src/Nncase.Passes/Rules/Neutral/CombineTranspose.cs index c90986b45d..dbe323c1ee 100644 --- a/src/Nncase.Passes/Rules/Neutral/CombineTranspose.cs +++ b/src/Nncase.Passes/Rules/Neutral/CombineTranspose.cs @@ -81,7 +81,13 @@ public CombineConstBinaryTranspose() private Expr? GetReplace(Binary binary, Call binaryCall, Expr x, Expr y, Expr perm) { - var expandDim = perm.CheckedShape.Size - ((TensorConst)perm).Value.ToArray()[perm.CheckedShape.Size - 1] - 1; + var permV = ((TensorConst)perm).Value.ToArray(); + if (permV.Length == 0) + { + return null; + } + + var expandDim = perm.CheckedShape.Size - permV[perm.CheckedShape.Size - 1] - 1; if (x is Const) { diff --git a/src/Nncase.Passes/Rules/Neutral/FoldGatherReshape.cs b/src/Nncase.Passes/Rules/Neutral/FoldGatherReshape.cs new file mode 100644 index 0000000000..f0b23530f0 --- /dev/null +++ b/src/Nncase.Passes/Rules/Neutral/FoldGatherReshape.cs @@ -0,0 +1,29 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System.Linq; +using Nncase.IR; +using Nncase.PatternMatch; +using static Nncase.IR.TypePatternUtility; +using static Nncase.PatternMatch.F.Tensors; +using static Nncase.PatternMatch.Utility; + +namespace Nncase.Passes.Rules.Neutral; + +[RuleGenerator] +public sealed partial class FoldGatherReshape : RewriteRule +{ + // Reshape(Gather(Shape, 0, 0), new[] { 0 }) -> GetItem(Shape, 0) + public override Pattern Pattern => IsGather( + IsReshape(IsWildcard("input"), IsTensorConst("newShape")), IsTensorConst("axis"), IsTensorConst("index")); + + private Expr? GetReplace(Expr input, int[] newShape, int axis, int index) + { + if (newShape.SequenceEqual(new[] { 1 }) && axis == 1) + { + return input[index]; + } + + return null; + } +} diff --git a/src/Nncase.Passes/Rules/Neutral/FoldIf.cs b/src/Nncase.Passes/Rules/Neutral/FoldIf.cs new file mode 100644 index 0000000000..ca6fde86c9 --- /dev/null +++ b/src/Nncase.Passes/Rules/Neutral/FoldIf.cs @@ -0,0 +1,24 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using Nncase.IR; +using Nncase.IR.Math; +using Nncase.IR.NN; +using Nncase.PatternMatch; +using static Nncase.IR.TypePatternUtility; +using static Nncase.PatternMatch.Utility; + +namespace Nncase.Passes.Rules.Neutral; + +[RuleGenerator] +public sealed partial class FoldIf : RewriteRule +{ + public override Pattern Pattern => IsWildcard("expr", expr => expr is If @if && @if.Condition is TensorConst); + + private Expr? GetReplace(If expr) + { + var cond = ((TensorConst)expr.Condition).Value.ToScalar(); + return cond ? expr.Then : expr.Else; + } +} diff --git a/src/Nncase.Passes/Rules/Neutral/ShapeBucket.cs b/src/Nncase.Passes/Rules/Neutral/ShapeBucket.cs deleted file mode 100644 index 739e9dfd80..0000000000 --- a/src/Nncase.Passes/Rules/Neutral/ShapeBucket.cs +++ /dev/null @@ -1,1227 +0,0 @@ -// Copyright (c) Canaan Inc. All rights reserved. -// Licensed under the Apache license. See LICENSE file in the project root for full license information. - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Diagnostics; -using System.Globalization; -using System.IO; -using System.Linq; -using System.Reactive; -using System.Threading.Tasks; -using System.Transactions; -using DryIoc; -using DryIoc.ImTools; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Toolkit.HighPerformance; -using NetFabric.Hyperlinq; -using Nncase.Diagnostics; -using Nncase.IR; -using Nncase.IR.Math; -using Nncase.IR.NN; -using Nncase.IR.Tensors; -using Nncase.Passes.Analysis; -using Nncase.Passes.Mutators; -using Nncase.Passes.Transforms; -using Nncase.PatternMatch; -using Nncase.Utilities; -using static Nncase.IR.F.Tensors; -using static Nncase.Passes.Rules.Neutral.ShapeBucketHelper; -using static Nncase.PatternMatch.F.Math; -using static Nncase.PatternMatch.Utility; -using static Nncase.Utilities.ReplaceUtility; -using Dimension = Nncase.IR.Dimension; -using Tuple = System.Tuple; - -namespace Nncase.Passes.Rules.Neutral; - -public class BucketFusion : Fusion -{ - public BucketFusion(string name, string moduleKind, Expr body, ReadOnlySpan parameters, Var[] effectVar) - : base( - name, moduleKind, body, parameters) - { - EffectVar = effectVar; - } - - public BucketFusion(string moduleKind, Expr body, ReadOnlySpan parameters, Var[] effectVar) - : base( - moduleKind, - body, - parameters) - { - EffectVar = effectVar; - } - - public BucketFusion(string name, string moduleKind, Var[] effectVar, Expr body, params Var[] parameters) - : base(name, moduleKind, body, parameters) - { - EffectVar = effectVar; - } - - public BucketFusion(string moduleKind, Var[] effectVar, Expr body, params Var[] parameters) - : base(moduleKind, body, parameters) - { - EffectVar = effectVar; - } - - public Var[] EffectVar { get; set; } - - public bool IsSimple - { - get - { - var names = Name.Split("_"); - return names.Length == 2 && (names[0] == "Binary" || names[0] == "Unary"); - } - } - - public static BucketFusion FromNormalFusion(Fusion f, Var[] effectVars) - { - return new BucketFusion(f.Name, "stackvm", f.Body, f.Parameters.ToArray(), effectVars); - } - - public new BucketFusion With(string? name = null, string? moduleKind = null, Expr? body = null, Var[]? parameters = null) - => new BucketFusion(name ?? Name, moduleKind ?? ModuleKind, body ?? Body, parameters ?? Parameters, EffectVar); -} - -[RuleGenerator] -public partial class MarkerCallToFusion : RewriteRule - where T : Op -{ - private static int _counter; - - private Call? _currentCall; - - public string ModuleKind => "stackvm"; - - public override Pattern Pattern => IsRangeOfMarker( - "callMarker", - IsCallWildcard(null, IsOp()), - IsTensorConst()); - - protected virtual bool MustHaveMarker => true; - - private string Name => _currentCall!.Target.GetType().Name; - - private string RelPath => $"{_counter}_{_currentCall!.Target.GetType().Name}"; - - public static Marker[] GetCallInputs(Call call) => - call.Arguments.ToArray().OfType().Where(x => x.Target is not TensorConst).ToArray(); - - public virtual bool Check(Call call) - { - return true; - } - - public Expr? GetReplace(Marker callMarker) - { - var call = (Call)callMarker.Target; - _currentCall = call; - DumpIR(callMarker, "0_origin", RelPath); - if (!Check(call)) - { - return null; - } - - var argsMarker = GetCallInputs(call); - var args = argsMarker.Select(arg => arg.Target).ToArray(); - var varMap = CompileSession.CompileOptions.ShapeBucketOptions.VarMap; - var set = MakeEffectVarArray(varMap, args); - var fusionVars = argsMarker.Select(arg => new Var(arg.CheckedType)).ToArray(); - var inputsWithMarker = - fusionVars.Zip(argsMarker).Select(pair => pair.Second.With(target: pair.First)).ToArray(); - - var pairs = inputsWithMarker.Select((input, i) => (i, (Expr)input)).ToArray(); - - // arguments用到其他input的地方就要replace对应的input - var newCall = ReplaceUtility.ReplaceCallParams(call.Target, call.Arguments.ToArray(), pairs); - var newCallWithMarker = callMarker.With(target: newCall); - - // 处理其他的参数用到了分段的input的情况 - // 即便body只有一个call,但这里是针对所有参数的表达式进行替换,比如反卷积的output shape是一个用到了需要分段的input的表达式 - // 如果不加这个则output shape引用的原始的未分段的输入会再次塞进来 - var body = fusionVars.Zip(args).Aggregate((Expr)newCallWithMarker, (newBody, tuple) => - { - var (fusionVar, arg) = tuple; - return ReplaceUtility.ReplaceExpr(newBody, arg, fusionVar); - }); - - var f = new BucketFusion($"{Name}_{_counter}", ModuleKind, set, body, fusionVars); - - // PrintEffectVar(f.Name, set); - Expr outerCall = newCallWithMarker.With(target: new Call(f, argsMarker)); - DumpIR(outerCall, "1_after", RelPath); - _counter++; - if (body.Users.Count > 1) - { - throw new InvalidOperationException(); - } - - return outerCall; - } -} - -public class Conv2DToFusion : MarkerCallToFusion -{ -} - -public class Conv2DTransposeToFusion : MarkerCallToFusion -{ - // when OutputShape is Const, it means output shape is not effected by input. - public override bool Check(Call call) => call.Arguments[Conv2DTranspose.OutputShape.Index] is not Const; -} - -public class MatmulToFusion : MarkerCallToFusion -{ -} - -public class SigmoidToFusion : MarkerCallToFusion -{ -} - -public class LeakyReluToFusion : MarkerCallToFusion -{ -} - -public class TransposeToFusion : MarkerCallToFusion -{ - protected override bool MustHaveMarker => false; -} - -public class PadToFusion : MarkerCallToFusion -{ - protected override bool MustHaveMarker => false; - - public override bool Check(Call call) => ((Pad)call.Target).PadMode == PadMode.Constant; -} - -public class UnaryToFusion : MarkerCallToFusion -{ - public override bool Check(Call call) - { - var list = new[] { UnaryOp.Abs, UnaryOp.Neg, UnaryOp.Acos, UnaryOp.Asin }; - var op = ((Unary)call.Target).UnaryOp; - return call.CheckedShape.Rank > 1 && list.Contains(op); - } -} - -// todo: do more check for binary -public class BinaryToFusion : MarkerCallToFusion -{ - public override bool Check(Call call) => call.CheckedShape.Rank > 1; -} - -[RuleGenerator] -public partial class ClearRequire : RewriteRule -{ - public override Pattern Pattern { get; } = IsRequire(require => true, IsTensorConst("predicate"), IsWildcard("expr")); - - public Expr? GetReplace(bool predicate, Expr expr) - { - if (expr is If && predicate) - { - return expr; - } - - return null; - } -} - -[RuleGenerator] -public partial class FoldRepeatMarker : RewriteRule -{ - public override Pattern Pattern { get; } = IsRangeOfMarker( - "markerA", - IsRangeOfMarker( - "markerB", - IsWildcard(), - IsWildcard("rangeB")), - IsWildcard("rangeA")); - - public Expr? GetReplace(Expr rangeA, Expr rangeB, Marker markerB) - { - if (rangeA == rangeB) - { - return markerB; - } - - return null; - } -} - -[RuleGenerator] -public partial class ClearFusionOuterMarker : RewriteRule -{ - public static Pattern CallerPattern => IsCall( - "caller", - IsFusion(null, "stackvm", IsWildcard(), GenerateParameters(null)), - GenerateParameters(null)); - - public override Pattern Pattern { get; } = IsRangeOfMarker("marker", CallerPattern, IsWildcard()); - - public Expr? GetReplace(Marker marker, Call caller) - { - return caller; - } -} - -[RuleGenerator] -public partial class FusionBucket : RewriteRule -{ - private static int _counter; - - private string _relPath = string.Empty; - - public override Pattern Pattern => IsCall( - "outerCall", - IsFusion( - "fusion", - "stackvm", - IsWildcard("fusionBody"), - GenerateParameters(null)), - GenerateParameters(null)); - - public static int[] ComputeSegmentList(int segmentCount, int min, int max) - { - var size = (max - min) / segmentCount; - return Enumerable.Range(0, segmentCount - 1).Select(i => min + (i * size)).Append(max).ToArray(); - } - - public static Expr PreProcess(Var input, Dictionary inputInfo, Dictionary varValues, Dictionary fusionInputData, Expr[] fusionInputs, int i) - { - var fixedShape = ShapeEvaluate(input, inputInfo, varValues, fusionInputData); - return new Call(new BucketPad(), input, fixedShape); - } - - // info:(InputVar -> DimVar) - // VarInfo:(DimVar -> Value) - // fusionInfo:(InputVar -> DimVar) - public static int[] ShapeEvaluate(Expr expr, Dictionary info, Dictionary varInfo, Dictionary fusionInfo) - { - // var info is used for compute shape expr - var dummyInput = MakeDummyInput(info, varInfo); - var fusionDummyInput = - MakeDummyInput( - fusionInfo, - varInfo.Concat(dummyInput).ToDictionary(pair => pair.Key, pair => pair.Value)); - var shapeExpr = - expr.EvaluateShapeExpr(info.Concat(fusionInfo).ToDictionary(pair => pair.Key, pair => pair.Value)); - - if (!shapeExpr.InferenceType()) - { - throw new InvalidOperationException(); - } - - // used for shape expr evaluate - // 1. main input - // 2. fusion input - // 3. shape var - var newEvaluatorInfo = dummyInput.Concat(fusionDummyInput).Concat(varInfo) - .ToDictionary(pair => pair.Key, pair => pair.Value); - - var shape = shapeExpr.Evaluate(newEvaluatorInfo); - return shape.AsTensor().ToArray(); - } - - public static (Dictionary MinDict, Dictionary MaxDict) GetBoundDict( - Dictionary inputInfo, ShapeBucketOptions options) - { - // find vars in Input ShapeExpr - var vars = inputInfo.Values.SelectMany(x => x).OfType().ToHashSet().ToArray(); - - // DimVarName -> Dict.key -> Dict.Value - var minDict = options.RangeInfo.ToDictionary( - pair => vars.FindFirst(v => v.Name == pair.Key), - pair => (IValue)Value.FromTensor(pair.Value.Min)); - var maxDict = options.RangeInfo.ToDictionary( - pair => vars.FindFirst(v => v.Name == pair.Key), - pair => (IValue)Value.FromTensor(pair.Value.Max)); - return (minDict, maxDict); - } - - public static Expr MakeSplitEntry(Expr originBody, Var[] fusionVars, Dictionary inputInfo, Dictionary varInfo, Dictionary fusionInputdata, Dictionary fusionInputsShape, Expr[] fusionInputs, string relPath, int seg) - { - // 避免这里的修改影响到原始的body,每个分支需要进行自己的修改 - // todo: 但是或许是这里引起的复制 - var call = originBody.Clone(); - - // 找到拷贝的call里面所有var,和fusion的原始var要对应上 - var finder = new FindVar(); - finder.Visit(call); - var newVars = finder.Vars; - - var fixInputs = fusionVars - .Select((arg, i) => PreProcess(arg, inputInfo, varInfo, fusionInputdata, fusionInputs, i)).ToArray(); - - // 替换逻辑:新的body中的var -> fusion原始的var -> target为fusion的call的input - // 本质上只是对这个body的所有输入做替换 - call = fusionVars.Select(v => newVars.FindFirst(newVar => newVar.Name == v.Name)).Zip(fixInputs).Aggregate( - call, - (sum, pair) => - { - return ReplaceExpr(sum, pair.First, pair.Second); - }); - if (!call.InferenceType()) - { - DumpIR(call, "InvalidType"); - throw new InvalidOperationException(); - } - - var originShape = originBody.EvaluateShapeExpr(fusionInputsShape); - originShape.InferenceType(); - - var rank = call.CheckedShape.Rank; - - // 对body的输出进行slice - var body = (Expr)Slice(call, Enumerable.Repeat(0, rank).ToArray(), Cast(originShape, DataTypes.Int32), rank); - return body; - } - - public Expr FixInput(Expr body, int[][] shapeList, Var[] fusionVars, Expr[] outerArgs) - { - var result = fusionVars.Zip(outerArgs).Zip(shapeList).Aggregate(body, (sum, data) => - { - var ((fusionVar, arg), fixShape) = data; - Expr expr = new Call(new FixShape(), arg, fixShape); - if (arg is Marker m) - { - expr = m.With(target: expr); - } - - return ReplaceExpr(sum, fusionVar, expr); - }); - return result; - } - - public Expr? GetReplace(Call outerCall, BucketFusion fusion, Expr fusionBody) - { - if (fusion.IsSimple) - { - return fusion.Parameters.ToArray().Zip(outerCall.Arguments.ToArray()).Aggregate(fusion.Body, (sum, data) => - { - var (fusionVar, arg) = data; - return ReplaceExpr(sum, fusionVar, arg); - }); - } - - // Console.WriteLine($"FusionBucketGetReplace {_counter} {fusion.Name}"); - _relPath = $"{_counter}"; - DumpIR(outerCall, $"BucketOriginFusion_{fusion.Name}", _relPath); - - var varMap = CompileSession.CompileOptions.ShapeBucketOptions.VarMap; - - var fusionInputsShapeExpr = MakeFusionInputShapeExpr(outerCall, fusion, varMap); - CheckAlive(fusionInputsShapeExpr); - - // ensure alive in rewrite, release when return - // using var pinner = new ExprPinner(fusionInputsShapeExpr.Values.SelectMany(x => x).ToArray()); - var options = CompileSession.CompileOptions.ShapeBucketOptions; - var (minDict, maxDict) = GetBoundDict(varMap, options); - - var fusionVars = fusion.Parameters.ToArray(); - - // compute fixed Shape - var minFixedShapeList = ComputeFixedShape(fusionVars, minDict, varMap, fusionInputsShapeExpr); - var maxFixedShapeList = ComputeFixedShape(fusionVars, maxDict, varMap, fusionInputsShapeExpr); - - // PrintMinMaxShape(minFixedShapeList, maxFixedShapeList, _relPath); - // 2. get dim info(inputIndex, (dimIndex, range) - var counts = ComputeCounts(minFixedShapeList, maxFixedShapeList, out int totalCount); - if (totalCount == 0 || (minFixedShapeList[0].SequenceEqual(maxFixedShapeList[0]) && - minFixedShapeList[1].SequenceEqual(maxFixedShapeList[1]))) - { - DumpIR(fusionBody, "BucketResultFix", _relPath); - var fix = FixInput(fusionBody, minFixedShapeList, fusionVars, outerCall.Arguments.ToArray()); - _counter++; - return fix; - } - - // todo: process total count, matmul maybe multi count, but other should check this - if (totalCount > 1) - { - // Console.WriteLine($"{fusion.Name} totalCount > 1"); - // return null; - } - - var args = outerCall.Arguments.ToArray(); - var fusionInputShapes = MakeShapeOfFusionInput(fusion, args); - - var dimVarValues = MakeVarValuesForAllSegment(options); - var info = ComputeSegmentInfo(counts, options); - var body = Split(fusionBody, fusionVars, info, 0, 1, dimVarValues, args, varMap, fusionInputsShapeExpr, fusionInputShapes); - body.InferenceType(); - - if (body.Users.Count > 1) - { - throw new InvalidOperationException(); - } - - // FixInput Replace Var - var newBody = ReplaceFusionVarWithCallArgs(fusion, args, body); - - // let bind - if (newBody is If @if) - { - newBody = IR.F.Math.Require(true, @if.With(paramList: args)); - } - - _counter++; - return newBody; - } - - private static void PrintMinMaxShape(int[][] minFixedShapeList, int[][] maxFixedShapeList, string relPath) - { - string str = string.Empty; - Console.Write("min "); - str += "min "; - foreach (int[] shape in minFixedShapeList) - { - var s = DumpUtility.SerializeShape(shape) + " "; - str += s; - Console.Write(s); - } - - Console.Write("max "); - str += "max "; - foreach (int[] shape in maxFixedShapeList) - { - var s = DumpUtility.SerializeShape(shape) + " "; - str += s; - Console.Write(s); - } - } - - // 计算出使用哪个位置的input进行分段 - private static SegmentInfo ComputeSegmentInfo( - (int InputIndex, (int First, (int First, int Second) Second)[] Range)[] counts, ShapeBucketOptions options) - { - var (iIndex, dimIndex, (min, max)) = counts.Select(x => - { - Debug.Assert(x.Range.Length <= 2, "x.range.Length <= 2"); - return (inputIndex: x.InputIndex, x.Range[0].First, x.Range[0].Second); - }).ToArray().First(); - - var segments = ComputeSegmentList(options.SegmentsCount, min, max); - var info = new SegmentInfo(iIndex, dimIndex, segments); - return info; - } - - // make dummy value from InputInfo - // VarInfo:(DimVar -> Value) - private static Dictionary - MakeDummyInput(Dictionary info, Dictionary varInfo) => - info.ToDictionary( - pair => pair.Key, - pair => - { - var shapeExpr = Stack(new IR.Tuple(pair.Value.Select(x => Cast(x, DataTypes.Int32)).ToArray()), 0); - var shape = shapeExpr.Evaluate(varInfo).AsTensor(); - return ConstantOfShape( - shape, - Cast(0, pair.Key.CheckedDataType)).Evaluate(varInfo); - }); - - private static (int InputIndex, (int First, (int First, int Second) Second)[] Range)[] ComputeCounts( - int[][] minFixedShapeList, int[][] maxFixedShapeList, out int totalCount) - { - (int InputIndex, (int First, (int First, int Second) Second)[] Range)[] counts = minFixedShapeList.Zip(maxFixedShapeList).Select((pair, inputIndex) => - { - var (minShape, maxShape) = pair; - - // (range, dimIndex) - var range = Enumerable.Range(0, minShape.Length).Zip(minShape.Zip(maxShape)).Where(data => - { - var (dimIndex, pair) = data; - return pair.First != pair.Second; - }).ToArray(); - return (inputIndex, range); - }).Where(pair => pair.range.Length > 0).ToArray(); - totalCount = counts.Length; - return counts; - } - - private static Expr ReplaceFusionVarWithCallArgs(BucketFusion fusion, Expr[] args, Expr newBody) => - fusion.Parameters.ToArray().Zip(args).Aggregate(newBody, (sum, pair) => - { - var (param, arg) = pair; - var result = ReplaceExpr(sum, param, arg); - return result; - }); - - // 计算shape,而不是shape表达式 - private static Dictionary MakeShapeOfFusionInput(BucketFusion fusion, Expr[] args) - { - var fusionInputShapes = fusion.Parameters - .ToArray() - .Zip(args) - .ToDictionary(pair => pair.First, pair => - { - var shape = Cast((Expr)ShapeOf(pair.Second), DataTypes.Int32); - return Enumerable.Range(0, pair.Second.CheckedShape.Rank).Select(i => shape[i]).ToArray(); - }); - return fusionInputShapes; - } - - private static void CheckAlive(Dictionary fusionInputInfo) - { - foreach (var value in fusionInputInfo.Values) - { - foreach (var expr in value) - { - if (!expr.IsAlive) - { - throw new NotImplementedException(); - } - } - } - } - - private static Dictionary MakeFusionInputShapeExpr(Call call, BucketFusion fusion, Dictionary varMap) - { - var data = fusion.Parameters.ToArray().Zip(call.Arguments.ToArray().Select((arg, i) => - { - var result = arg.EvaluateShapeExpr(varMap); - return Enumerable.Range(0, arg.CheckedShape.Rank).Select(i => - { - var res = result[i]; - return res; - }).ToArray(); - })).Select(pair => new KeyValuePair(pair.First, pair.Second)); - var fusionInputData = data.ToDictionary(pair => pair.Key, pair => pair.Value); - return fusionInputData; - } - - private int[][] ComputeFixedShape(Expr[] fusionVars, Dictionary varInfo, Dictionary varMap, Dictionary fusionInputData) => - fusionVars.Select((arg, i) => - { - var fixedShape = ShapeEvaluate(arg, varMap, varInfo, fusionInputData); - return fixedShape; - }).ToArray(); - - // 计算每个var在不同的段下的值 - private Dictionary MakeVarValuesForAllSegment(ShapeBucketOptions options) - { - int segmentCount = options.SegmentsCount; - var varRange = options.RangeInfo; - var varMap = options.VarMap; - var varAndInputAllSegment = varRange.ToDictionary(pair => pair.Key, pair => - { - var (min, max) = pair.Value; - var segments = ComputeSegmentList(segmentCount, min, max); - return segments; - }); - - var vars = varMap.Values.SelectMany(x => x).OfType().ToHashSet().ToArray(); - - // DimVarName -> Dict.key -> Dict.Value - var varValues = varAndInputAllSegment.ToDictionary( - pair => vars.FindFirst(v => v.Name == pair.Key), - pair => { return pair.Value.OrderByDescending(x => x).ToArray(); }); - return varValues; - } - - private Expr Split(Expr fusionBody, Var[] fusionVars, SegmentInfo info, int current, int limit, Dictionary varValues, Expr[] fusionInputs, Dictionary varMap, Dictionary fusionInputData, Dictionary fusionInputsShape) - { - // do with marker - // 分段是针对input做的,而不是替换了input。 - // arg var -> compute - // arg var -> bucket -> compute - // arg -> bucket -> compute - var (inputIndex, dimIndex, segments) = info; - var dim = ShapeOf(fusionInputs[inputIndex])[dimIndex]; - var sp = ConstantOfShape(new[] { 1 }, Cast(0, fusionBody.CheckedDataType)); - int i = 0; - - var body = segments.OrderByDescending(x => x).Aggregate( - (Expr)IR.F.Math.Require(false, sp, "input dim large than limit"), - (sum, seg) => - { - // 根据var,也就是target为这个fusion的call的参数来进行判断落在哪个段 - var cond = dim <= (long)seg; - - // select var value for current segment - var varInfo = varValues.ToDictionary(pair => pair.Key, pair => (IValue)Value.FromTensor(pair.Value[i])); - var thenBody = current + 1 < limit - ? Split(fusionBody, fusionVars, info, current + 1, limit, varValues, fusionInputs, varMap, fusionInputData, fusionInputsShape) - : MakeSplitEntry(fusionBody, fusionVars, varMap, varInfo, fusionInputData, fusionInputsShape, fusionInputs, _relPath, seg); - var elseBody = sum; - i++; - var result = new If(cond, thenBody, elseBody); - return result; - }); - - return body; - } -} - -public class FindVar : ExprVisitor -{ - public HashSet Vars { get; set; } = new(); - - // todo: if visit call(VarFusion), then return EffectVar - protected override Expr VisitLeafVar(Var expr) - { - Vars.Add(expr); - return expr; - } - - protected override Expr DefaultVisitLeaf(Expr expr) => expr; -} - -public abstract class MergeFusionBase : RewriteRule -{ - protected static readonly Dictionary OpList = new() - { - { typeof(Reshape).TypeHandle, 0 }, - { typeof(Unsqueeze).TypeHandle, 0 }, - { typeof(Squeeze).TypeHandle, 0 }, - - // { typeof(Slice).TypeHandle, 0 }, - { typeof(Concat).TypeHandle, 0 }, - { typeof(Cast).TypeHandle, 0 }, - { typeof(IR.Tensors.Stack).TypeHandle, 0 }, - { typeof(Expand).TypeHandle, 0 }, - { typeof(ConstantOfShape).TypeHandle, 0 }, - { typeof(Where).TypeHandle, 0 }, - { typeof(Compare).TypeHandle, 0 }, - { typeof(Gather).TypeHandle, 0 }, - - // compute - { typeof(Transpose).TypeHandle, 1 }, - { typeof(Unary).TypeHandle, 1 }, - { typeof(Binary).TypeHandle, 2 }, - { typeof(Clamp).TypeHandle, 2 }, - { typeof(Pad).TypeHandle, 2 }, - - // ... - { typeof(Conv2D).TypeHandle, 2 }, - { typeof(MatMul).TypeHandle, 2 }, - { typeof(Tile).TypeHandle, 0 }, - }; - - protected int Counter { get; set; } - - public static bool AllConst(Call originCall) - { - // 暂时只能合并其他输入为const的 - if (originCall.Arguments.Length == 1) - { - return true; - } - - var otherArgs = originCall.Arguments[1..].ToArray(); - if (otherArgs.All(x => x is Const || x is Marker { Target: Const })) - { - return true; - } - - return false; - } - - public bool ValidTarget(Expr target) - { - if (target is ActivationOp) - { - return true; - } - - if (OpList.TryGetValue(target.GetType().TypeHandle, out _)) - { - return true; - } - - return false; - } -} - -[RuleGenerator] -public partial class MergeNextMarkerToFusion : MergeFusionBase -{ - // 用于将fusion call 外部的marker合并进来 - public override Pattern Pattern => IsRangeOfMarker("marker", new MergeNextCallToFusion().FusionCall, IsWildcard()); - - // 外部保留marker给下一个使用 - public Expr? GetReplace(Marker marker, Call fusionOuterCall, BucketFusion fusion, RunPassContext context) - { - if (fusion.Body is Marker) - { - return null; - } - - // marker - if (fusionOuterCall.Users.Count > 1 || marker.Users.Count > 1) - { - return null; - } - - var result = - marker.With(target: fusionOuterCall.With(target: fusion.With(body: marker.With(target: fusion.Body)))); - return result; - } -} - -[RuleGenerator] -public partial class MergePrevMarkerToFusion : MergeFusionBase -{ - public override Pattern Pattern => IsCall( - "fusionOuterCall", - IsFusion( - "fusion", - "stackvm", - IsWildcard(), - GenerateParameters(null)), - GenerateParameters(null, IsRangeOfMarker("marker", IsWildcard(), IsWildcard()))); - - // fusion(marker(xx)) { var } -> fusion(marker(xx)) { marker(var) } - public Expr? GetReplace(Marker marker, Call fusionOuterCall, BucketFusion fusion) - { - var hasVisited = fusion.Parameters[0].Users.Where(u => u is not Fusion).All(u => u is Marker); - if (hasVisited) - { - return null; - } - - // 不更改原始的input中的marker,要拷贝到fusion里面,将所有的var替换为marker(var) - // 同时将fusion的body中用到原始var的地方替换为marker(var) - // MergeCall的时候是支持marker的 - var newBody = ReplaceExpr(fusion.Body, fusion.Parameters[0], marker.With(target: fusion.Parameters[0])); - - // 重新构建fusion - var newFusion = fusion.With(body: newBody); - - // 返回新的call - DumpIR(newFusion, $"{Counter++}_{fusion.Name}"); - return fusionOuterCall.With(target: newFusion); - } -} - -[RuleGenerator] -public partial class MergeNextCallToFusion : MergeFusionBase -{ - public Pattern FusionCall => IsCall( - "fusionOuterCall", - IsFusion( - "fusion", - "stackvm", - IsWildcard(), - GenerateParameters(null)), - GenerateParameters(null)); - - public override Pattern Pattern => IsCallWildcard( - "nextCall", - IsWildcard("target"), - IsAlt( - "maybeFusionCallMarker", - FusionCall, - IsRangeOfMarker(FusionCall, IsWildcard()))); - - // nextCall(fusion(x)) -> fusion(nextCall(x)) - // nextCall(marker(fusion(x))) -> fusion(nextCall(marker(x))) - public Expr? GetReplace(Call nextCall, Expr maybeFusionCallMarker, Expr target, Call fusionOuterCall, BucketFusion fusion) - { - // todo:next call and marker maybe cause dup - if (!ValidTarget(target)) - { - return null; - } - - // todo: only for single input, effect var must be same - if (MultiUser(maybeFusionCallMarker)) - { - return null; - } - - if (!AllConst(nextCall)) - { - return null; - } - - DumpIR(nextCall, $"{Counter}_{fusion.Name}_{target.GetType().Name}_origin"); - - // 将call里面call fusion的部分替换为fusion的body - var oldBody = DupExpr(fusion.Body); - - // 这里必须新构建一个Expr,不能使用原始的nextCall Replace掉参数,不然如果外面有marker,那么replace以后的call还是会被外面的marker引用,因此会出现重复的情况 - // arg0可能是marker,如果是marker的话不能替换marker的参数,而是重新构造marker - var newBody = ReplaceCallParams(nextCall.Target, nextCall.Arguments.ToArray(), (0, (Expr)oldBody)); - - // 除了第一个参数的部分,其他参数可能会用到外面的东西,是不是可以作为var直接传进来??但是这会影响后面ToFusion的部分... - - // 更新fusion的body - var newFusion = fusion.With(body: newBody); - - // 创建新的call,target为fusion,参数为fusion的参数 // todo:针对非const的情况要处理这里 - // 但是第一个参数要注意,如果有marker那么需要处理marker // 这里如果arg是marker的话则需要copy一份,不然会导致marker的user重复,进而复制了if - // var newArgs = fusionOuterCall.Arguments.ToArray().Select(arg => arg is Marker m ? m.With() : arg).ToArray(); - var newArgs = fusionOuterCall.Arguments.ToArray().Select(DupExpr).ToArray(); - var call = (Expr)nextCall.With(target: newFusion, arguments: newArgs); - - // 附加next call的外面marker - DumpIR(call, $"{Counter++}_{fusion.Name}_{target.GetType().Name}_after"); - if (newBody.Users.Count > 1) - { - throw new InvalidOperationException($"{newFusion.Name} is Invalid"); - } - - return call; - } - - private static bool MultiUser(Expr nextCall) - { - // Marker(LeakyRelu(Marker(Fusion))) - // 如果user > 1 - if (nextCall.Users.Count > 1) - { - return true; - } - - // 只有一个user也可能是一个marker - if (nextCall.Users.First() is Marker m) - { - // 判断marker的user - if (m.Users.Count > 1) - { - return true; - } - } - - // 不是marker那就没问题,一定不是多个user - return false; - } - - private bool SameEffectVar(Call originCall, Fusion fusion) - { - var array = MakeEffectVarArray( - CompileSession.CompileOptions.ShapeBucketOptions.VarMap, - originCall.Arguments[^1..].ToArray()); - if (fusion is BucketFusion varFusion) - { - if (array.Length != 0 && !Enumerable.SequenceEqual(varFusion.EffectVar, array)) - { - return true; - } - } - else - { - return true; - } - - return false; - } -} - -[RuleGenerator] -public partial class MergePrevCallToFusion : MergeFusionBase -{ - public override Pattern Pattern => IsCall( - "fusionOuterCall", - IsFusion( - "fusion", - "stackvm", - IsWildcard(), - GenerateParameters(null)), - GenerateParameters( - null, - MaybeMarker("lhsArg", PrevCall("lhs")))); - - public Pattern PrevCall(string prefix) => IsCallWildcard($"{prefix}PrevCall", IsWildcard($"{prefix}Target")); - - public Pattern MaybeMarker(string exprName, Pattern exprPatten) => IsAlt( - exprName, - IsRangeOfMarker(exprPatten, IsWildcard()), - exprPatten); - - // 输入必须匹配marker,因为即便合并marker也是要在外面保留一份副本 - // fusion(marker(prevCall()) { var } -> fusion(var) { marker(prevCall()) } - // fusion((prevCall()) { var } -> fusion(var) { prevCall() } - - // dfs - // xx | marker(xx)不行, 会先匹配到xx - // xx(marker) | xx 可以 - public Expr? GetReplace(Call fusionOuterCall, BucketFusion fusion, Call lhsPrevCall, Expr lhsTarget, Expr lhsArg) - { - // 从inputs中筛选出所有需要合并的 - var (prevCallsInfo, prevOutputMaybeMarker) = CollectInputsInfo(fusionOuterCall); - if (prevCallsInfo.Length == 0) - { - return null; - } - - // 要被合并的call - var prevCalls = prevCallsInfo.Select(x => x.Item1).ToArray(); - var prevCallStr = string.Join("_", prevCalls.Select(call => call.Target.GetType().Name)); - DumpIR(fusionOuterCall, $"{Counter}_{prevCallStr}_{fusion.Name}_origin"); - - // 新的fusion var,根据要被合并的call的argument来构建 - var newVars = prevCalls.Select(arg => new Var(arg.Arguments[0].CheckedType)).ToArray(); - - // 被合并的call更新参数,arg0替换为新的fusion的var,创建新的被合并call - // 并且保存marker - var newPrevCalls = prevCalls.Select((prevCall, i) => - { - // 每个prevCall的arg0替换为fusionVar - var oldArgs = prevCall.Arguments.ToArray(); - var newArg = oldArgs[0] is Marker marker ? (Expr)marker.With(target: newVars[i]) : newVars[i]; - var newArgs = ReplaceItems(oldArgs, (0, newArg)); - - // newArgs,是var,但是可能需要保存var自身的range - var newPrevCall = prevCall.With(arguments: newArgs); - return prevOutputMaybeMarker[i] is Marker m ? (Expr)m.With(target: newPrevCall) : newPrevCall; - }).ToArray(); - - var dupFusionBody = fusion.Body; - - // 新的fusion body将原来的var换成prevCall - var newBody = prevCallsInfo.Select(pair => fusion.Parameters[pair.Item2]).Zip(newPrevCalls).Aggregate( - (Expr)dupFusionBody, (sum, pair) => - { - // 此时prevCall携带新的var - var (fusionVar, newPrevCall) = pair; - return ReplaceExpr(sum, fusionVar, newPrevCall); - }); - - // 新的fusion的param更换为新的var - var newParams = ReplaceItems( - fusion.Parameters.ToArray(), - newVars.Zip(prevCallsInfo).Select(tuple => (tuple.Second.Item2, (Expr)tuple.First)).ToArray()) - .OfType().ToArray(); - var newFusion = fusion.With(body: newBody, parameters: newParams); - - // 新的args为原来所有的prev call的arg[0] - var newArgs = ReplaceItems( - fusionOuterCall.Arguments.ToArray(), - prevCallsInfo.Select(pair => - { - return (pair.Item2, DupExpr(pair.Item1.Arguments[0])); - }).ToArray()); - - // 原始的fusion的call更换target为新的fusion,以及arg0替换为prevCall的arg0,其他不变 - var call = fusionOuterCall.With(target: newFusion, arguments: newArgs); - DumpIR(call, $"{Counter++}_{prevCallStr}_{fusion.Name}_after"); - return call; - } - - private ((Call, int)[] PrevCalls, Expr[] MayBeMarkers) CollectInputsInfo(Call fusionOuterCall) - { - // todo: 判断rhs的effect var才行 - var prevCalls = new List<(Call, int)>(); - var maybeMarkers = new List(); - var args = fusionOuterCall.Arguments.ToArray(); - for (int i = 0; i < args.Length; ++i) - { - var rhsArg = args[i]; - if (rhsArg is Marker marker && marker.Target is Call rhsPrevCall) - { - if (marker.Users.Count > 1) - { - continue; - } - - var rhsTarget = rhsPrevCall.Target; - - if (!IsInvalid(rhsPrevCall, rhsTarget)) - { - prevCalls.Add((DupExpr(rhsPrevCall), i)); - maybeMarkers.Add(DupExpr(marker)); - } - } - - if (rhsArg is Call rhsCall) - { - var rhsTarget = rhsCall.Target; - - if (!IsInvalid(rhsCall, rhsTarget)) - { - var rhs = DupExpr(rhsCall); - prevCalls.Add((rhs, i)); - maybeMarkers.Add((Expr)rhs); - } - } - } - - return (prevCalls.ToArray(), maybeMarkers.ToArray()); - } - - private bool IsInvalid(Call lhsPrevCall, Expr lhsTarget) - { - if (lhsPrevCall.Users.Count > 1) - { - return true; - } - - if (!ValidTarget(lhsTarget)) - { - return true; - } - - if (!AllConst(lhsPrevCall)) - { - return true; - } - - return false; - } -} - -public class MergeBucketFusion : ModulePass -{ - protected override Task RunCoreAsync(IRModule input, RunPassContext context) - { - // 1. save effect var info - var main = (Function)input.Entry!; - var s = new SearchBucketFusion(); - s.Visit(main); - var set = s.FusionEffectVars(); - - // 2. merge - var analyzerMananger = CompileSession.GetRequiredService(); - var analysis = new Dictionary - { - [typeof(IExprUserAnalysisResult)] = analyzerMananger.GetAnaylsis(main), - }; - var rewriter = new DataFlowMergeRewriter(); - var post = (Function)rewriter.Rewrite( - main, - new IMergeRewriteRule[] - { - new SameInputFusionMergeRule(), - new MultiInputFusionMergeRule(), - new ShortCutFusionMergeRuleLeft(), - new ShortCutFusionMergeRuleRight(), - }, - (rule, option) => new BucketFusionGroupMutator(rule, option), - new() { AnalysisResults = analysis }); - - DumpIR(post, "AfterMergeFusion"); - - // 3. translate fusion to BucketFusion - var mutator = new Passes.Mutators.Substitutor(e => - { - if (e is Call c && c.Target is Fusion f) - { - CompilerServices.Rewrite(f.Body, new[] { new FoldRepeatMarker() }, new()); - var effectVars = f.Name.Split("_").Chunk(2).SelectMany(list => - { - var originName = string.Join("_", list); - return set[originName]; - }).ToHashSet().ToArray(); - return c.With(target: BucketFusion.FromNormalFusion(f, effectVars)); - } - - return null; - }); - mutator.Visit(post, Unit.Default); - DumpIR(post, "AfterTranslateFusion"); - return Task.FromResult(input); - } -} - -internal sealed class BucketFusionGroupMutator : Passes.Mutators.FusionGroupMutator -{ - public BucketFusionGroupMutator(IMergeRewriteRule preOrderfusionRule, RunPassContext passOptions) - : base(preOrderfusionRule, passOptions) - { - } - - public override bool MergedFusionCheckCallBack(Fusion merged_fusion, HashSet candidate_fusions) - { - // 回避反卷积,反卷积的shape表达式目前会引起重复的计算 - if (merged_fusion.Name.Contains("Conv2DTranspose", StringComparison.Ordinal) || - candidate_fusions.Any(f => f.Name.Contains("Conv2DTranspose", StringComparison.Ordinal))) - { - return false; - } - - return true; - } -} - -internal record SegmentInfo(int InputIndex, int DimIndex, int[] Segments); - -internal static class ShapeBucketHelper -{ - public static void PrintEffectVar(string name, Var[] set) - { - Console.WriteLine($"{name} EffectVar:"); - foreach (var var in set) - { - Console.WriteLine(var.Name); - } - } - - // avoid dup marker user - public static T DupExpr(T body) - where T : Expr - { - T dupFusionBody = body switch - { - Marker m => (T)(object)m.With(target: DupExpr(m.Target)), - Call c => (T)(object)c.With(), - _ => body, - }; - return dupFusionBody; - } - - public static Var[] MakeEffectVarArray(Dictionary varMap, params Expr[] args) - { - var visitor = new FindVar(); - args.ForEach(arg => - { - var argShapeExpr = arg.EvaluateShapeExpr(varMap); - visitor.Visit(argShapeExpr); - }); - var vars = visitor.Vars.ToHashSet(); - - // PrintEffectVar("VisitorVars", vars.ToArray()); - var inputAndDimVarMap = - varMap.ToDictionary(pair => pair.Key, pair => pair.Value.OfType().ToHashSet().ToArray()); - var allDimVars = varMap.Values.SelectMany(x => x).OfType(); - var afterProcessVars = vars.SelectMany(var => - { - if (inputAndDimVarMap.TryGetValue(var, out var dimVars)) - { - return dimVars; - } - - if (allDimVars.Contains(var)) - { - return new[] { var }; - } - - return new[] { var }; - }).ToHashSet(); - return afterProcessVars.Intersect(allDimVars).ToHashSet().ToArray(); - } - - internal static void DumpIR(Expr expr, string prefix, string? reletivePath = null) - { - if (DumpScope.Current.IsEnabled(DumpFlags.Rewrite)) - { - DumpScope.Current.DumpIR(expr, prefix, reletivePath); - } - } -} - -internal class SearchBucketFusion : ExprVisitor -{ - private HashSet FusionSet { get; set; } = new(); - - public Dictionary FusionEffectVars() - { - return FusionSet.ToDictionary(s => s.Name, s => s.EffectVar); - } - - protected override Expr DefaultVisitLeaf(Expr expr) => expr; - - protected override Expr VisitLeafCall(Call expr) - { - if (expr.Target is BucketFusion f) - { - FusionSet.Add(f); - } - - return expr; - } -} diff --git a/src/Nncase.Passes/Rules/Neutral/SqueezeShape.cs b/src/Nncase.Passes/Rules/Neutral/SqueezeShape.cs index 7351304b15..b4885bcc8b 100644 --- a/src/Nncase.Passes/Rules/Neutral/SqueezeShape.cs +++ b/src/Nncase.Passes/Rules/Neutral/SqueezeShape.cs @@ -12,6 +12,7 @@ using Nncase.IR.NN; using Nncase.IR.Tensors; using Nncase.PatternMatch; +using static Nncase.IR.F.Math; using static Nncase.IR.F.Tensors; using static Nncase.IR.TypePatternUtility; using static Nncase.PatternMatch.F.Math; @@ -245,3 +246,158 @@ private Tuple, List> SqueezeTranspose(List oldShape, L return Reshape(Transpose(Reshape(input, new_shape.ToArray()), new_perm.ToArray()), newOutputShape); } } + +[RuleGenerator] +public sealed partial class SqueezeBinaryShape : IRewriteRule +{ + /// + public IPattern Pattern { get; } = IsBinary("binary", "binaryCall", x => true, IsWildcard("lhs") with { TypePattern = HasFixedShape() }, IsWildcard("rhs") with { TypePattern = HasFixedShape() }); + + /// + /// Squeeze input shape. + /// + /// left input shape. + /// right input shape. + /// Squeeze flag, new lhs, new rhs. + public (bool SqueezeOrNot, List NewAShape, List NewBShape) SqueezeInputShape(List a, List b) + { + var aSize = a.Count; + var bSize = b.Count; + + var squeezeTimes = Math.Max( + aSize > 4 ? aSize - 4 : 0, + bSize > 4 ? bSize - 4 : 0); + + if (squeezeTimes <= 0) + { + return (false, a, b); + } + + List newA = a; + List newB = b; + + if (aSize == bSize) + { + if (a.SequenceEqual(b)) + { + newA = SqueezeShape(a); + newB = SqueezeShape(b); + } + else + { + var canFold = Enumerable.Repeat(true, aSize).ToArray(); + var foldIndexCouples = new List<(int, int)>(); + + for (int i = 0; i < aSize; i++) + { + if (a[i] != b[i]) + { + canFold[i] = false; + } + } + + for (int i = aSize - 1; i > 0; i--) + { + if (canFold[i] && canFold[i - 1]) + { + foldIndexCouples.Add((i - 1, i)); + } + } + + while (squeezeTimes > 0 && foldIndexCouples.Count > 0) + { + var (front, back) = foldIndexCouples[0]; + newA[front] *= newA[back]; + newB[front] *= newB[back]; + + newA.RemoveAt(back); + newB.RemoveAt(back); + + foldIndexCouples.RemoveAt(0); + squeezeTimes--; + } + + for (int i = newA.Count - 1, count = newA.Count - 5; i >= 0 && count >= 0; i--) + { + if (newA[i] * newB[i] == 1) + { + newA.RemoveAt(i); + newB.RemoveAt(i); + count--; + } + } + + if (newA.Count > 4) + { + return (false, newA, newB); + } + } + } + else + { + if (aSize != 1) + { + newA = SqueezeShape(a); + } + + if (bSize != 1) + { + newB = SqueezeShape(b); + } + } + + return (true, newA, newB); + } + + private static List SqueezeShape(List shape) + { + var newShape = new List { 1, 1, 1, 1 }; + + for (int i = shape.Count - 1, k = 3; i >= 0; i--) + { + newShape[k] *= shape[i]; + if (k > 0) + { + k--; + } + } + + return newShape; + } + + private static List GetOutputShape(List a, List b) + { + if (a.Count == 1) + { + return b; + } + + if (b.Count == 1) + { + return a; + } + + var outputShape = a; + for (int i = 0; i < a.Count; i++) + { + outputShape[i] = Math.Max(a[i], b[i]); + } + + return outputShape; + } + + private Expr? GetReplace(Binary binary, Call binaryCall, Expr lhs, Expr rhs) + { + var lShape = lhs.CheckedShape.Count == 0 ? new Shape(new List { 1 }) : lhs.CheckedShape; + var rShape = rhs.CheckedShape.Count == 0 ? new Shape(new List { 1 }) : rhs.CheckedShape; + var (result, newLShape, newRShape) = SqueezeInputShape(lShape.ToValueList(), rShape.ToValueList()); + if (!result) + { + return null; + } + + var outputShape = GetOutputShape(lShape.ToValueList(), rShape.ToValueList()); + + return Reshape(Binary(binary.BinaryOp, Reshape(lhs, newLShape.ToArray()), Reshape(rhs, newRShape.ToArray())), outputShape.ToArray()); + } +} diff --git a/src/Nncase.Passes/Rules/ShapeBucket/FoldBucketReshape.cs b/src/Nncase.Passes/Rules/ShapeBucket/FoldBucketReshape.cs new file mode 100644 index 0000000000..2c78c5ca35 --- /dev/null +++ b/src/Nncase.Passes/Rules/ShapeBucket/FoldBucketReshape.cs @@ -0,0 +1,44 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System.Linq; +using Nncase.IR; +using Nncase.IR.Tensors; +using Nncase.PatternMatch; +using Nncase.Utilities; +using static Nncase.IR.TypePatternUtility; +using static Nncase.PatternMatch.F.Tensors; +using static Nncase.PatternMatch.Utility; + +namespace Nncase.Passes.Rules.ShapeBucket; + +[RuleGenerator] +public sealed partial class FoldBucketPadReshape : RewriteRule +{ + // Reshape(Gather(Shape, 0, 0), new[] { 0 }) -> GetItem(Shape, 0) + public override Pattern Pattern => IsReshape( + IsBucketPad(null, "bucketPad", IsWildcard(), IsTensorConst()), + IsTensorConst("newShape")); + + private Expr? GetReplace(Call bucketPad, Expr newShape) + { + return ReplaceUtility.ReplaceCallParams(bucketPad, (BucketPad.Shape.Index, newShape)); + } +} + +// todo: squeeze +[RuleGenerator] +public sealed partial class FoldBucketPadUnsqueeze : RewriteRule +{ + // Reshape(Gather(Shape, 0, 0), new[] { 0 }) -> GetItem(Shape, 0) + public override Pattern Pattern => IsUnsqueeze( + null, + "unsqueeze", + IsBucketPad(null, "bucketPad", IsWildcard(), IsTensorConst()), + IsTensorConst()); + + private Expr? GetReplace(Call bucketPad, Call unsqueeze) + { + return ReplaceUtility.ReplaceCallParams(bucketPad, (BucketPad.Shape.Index, unsqueeze.CheckedShape.ToValueArray())); + } +} diff --git a/src/Nncase.Passes/Rules/ShapeBucket/MergeBucketFusion.cs b/src/Nncase.Passes/Rules/ShapeBucket/MergeBucketFusion.cs new file mode 100644 index 0000000000..f22e2d4c70 --- /dev/null +++ b/src/Nncase.Passes/Rules/ShapeBucket/MergeBucketFusion.cs @@ -0,0 +1,743 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reactive; +using System.Threading.Tasks; +using DryIoc.FastExpressionCompiler.LightExpression; +using DryIoc.ImTools; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Toolkit.HighPerformance; +using NetFabric.Hyperlinq; +using Nncase.IR; +using Nncase.IR.Tensors; +using Nncase.Passes.Analysis; +using Nncase.Passes.Mutators; +using Nncase.PatternMatch; +using static Nncase.Passes.Rules.ShapeBucket.ShapeBucketHelper; +using static Nncase.PatternMatch.Utility; +using static Nncase.Utilities.ReplaceUtility; +using Tuple = Nncase.IR.Tuple; + +namespace Nncase.Passes.Rules.ShapeBucket; + +public class MergeBucketFusionPass : FunctionPass +{ + protected override async Task RunCoreAsync(BaseFunction input, RunPassContext context) + { + var main = (Function)input; + while (true) + { + var preHash = main.GetHashCode(); + CompilerServices.Rewrite(main, new IRewriteRule[] { new MultiUserCallToFusion(), new MergeTupleFusion() }, new()); + await new MergeSeqBucketFusion().RunAsync(main, context); + IRHelpers.DCE(main); + await new MergeMultiUsersFusion().RunAsync(main, context); + var postHash = main.GetHashCode(); + if (preHash == postHash) + { + break; + } + } + + return main; + } +} + +[RuleGenerator] +public partial class MergeTupleFusion : RewriteRule +{ + public override Pattern Pattern => IsTuple( + "tuple", + new VArgsPattern( + list => + { + return Enumerable.Range(0, list.Length).Select(_ => + IsWildcard(null, field => field is Call { Target: BucketFusion } && field.Users.Count == 1)).ToArray(); + }, + null)); + + private Expr? GetReplace(Tuple tuple) + { + var fields = tuple.Fields.ToArray().OfType().ToArray(); + + // merge input + var newArgs = new List(); + var newParams = new List(); + var oldParamsToNewArg = new List<(Var, Expr)>(); + foreach (var field in fields) + { + var fieldArgs = field.Arguments.ToArray(); + var fieldParams = ((BucketFusion)field.Target).Parameters; + for (var i = 0; i < fieldArgs.Length; i++) + { + var fieldArg = fieldArgs[i]; + if (!newArgs.Contains(fieldArg)) + { + var newVar = new Var(fieldArg.CheckedType); + newParams.Add(newVar); + newArgs.Add(fieldArg); + oldParamsToNewArg.Add((fieldParams[i], newVar)); + } + + oldParamsToNewArg.Add((fieldParams[i], newParams[newArgs.IndexOf(fieldArg)])); + } + } + + var fieldBodys = fields.Select(c => c.Target).OfType().Select(x => x.Body).ToArray(); + var newBody = ReplaceClone(new IR.Tuple(fieldBodys), oldParamsToNewArg.ToArray()); + var newFusion = new BucketFusion("stackvm", newBody, newParams.ToArray(), Array.Empty()); + var newCall = new Call(newFusion, newArgs.ToArray()); + return newCall; + } +} + +public class MergeSeqBucketFusion : FunctionPass +{ + protected override Task RunCoreAsync(BaseFunction input, RunPassContext context) + { + var main = (Function)input; + + // todo: fix + var mergeRelPath = string.Empty; + + // 1. get origin info + var s = new SearchBucketFusion(); + s.Visit(main); + var set = s.FusionEffectVars(); + + // 2. merge + var post = MergeFusion(main); + DumpIR(post, "AfterMergeFusion", mergeRelPath); + + // 3. translate fusion to BucketFusion + TranslateFusionToBucket(set, post, CompileSession); + DumpIR(post, "AfterTranslateFusion", mergeRelPath); + return Task.FromResult(post); + } + + private static void TranslateFusionToBucket(Dictionary set, Function post, CompileSession seesion) + { + var inputDimsVars = InputDimVars(seesion); + var mutator = new Passes.Mutators.Substitutor(e => + { + if (e is Call c && c.Target is Fusion f) + { + var effectVars = Array.Empty(); + if (inputDimsVars.Length <= 1) + { + effectVars = inputDimsVars; + } + else + { + effectVars = f.Name.Split("_").Chunk(2).SelectMany(list => + { + var originName = string.Join("_", list); + return set[originName]; + }).ToHashSet().ToArray(); + } + + return c.With(target: BucketFusion.FromNormalFusion(f, effectVars)); + } + + return null; + }); + mutator.Visit(post, Unit.Default); + } + + private Function MergeFusion(Function main) + { + var analyzerMananger = CompileSession.GetRequiredService(); + var analysis = new Dictionary + { + [typeof(IExprUserAnalysisResult)] = analyzerMananger.GetAnaylsis(main), + }; + CompilerServices.Rewrite(main, new[] { new ClearFusionOuterMarker() }, new()); + var rewriter = new DataFlowMergeRewriter(); + var post = (Function)rewriter.Rewrite( + main, + new IMergeRewriteRule[] + { + new SameInputFusionMergeRule(), new MultiInputFusionMergeRule(), new ShortCutFusionMergeRuleLeft(), + new ShortCutFusionMergeRuleRight(), + }, + (rule, option) => new BucketFusionGroupMutator(rule, option), + new() { AnalysisResults = analysis }); + + return post; + } +} + +public class MergeMultiUsersFusion : FunctionPass +{ + private static string MergeRelPath => MultiUserCallToFusion.Counter.ToString(); + + public static bool DetectedRing(Call outerCall, Expr[] users) + { + // var users = outerCall.Users.ToArray(); + // todo: fix this,TestComplexExpr + // var userArgs = users.SelectMany(user => ((Call)user).Arguments.ToArray()).Except(users).ToArray(); + // 用这个不过,但是好像会引起其他问题?? + var userArgs = users.SelectMany(user => ((Call)user).Arguments.ToArray()).ToArray(); + foreach (var arg in userArgs) + { + var list = new FindExpr().Run(arg, users, outerCall, expr => + { + if (expr is Const) + { + return false; + } + + return users.Contains(expr); + }); + if (list.Count > 0) + { + return true; + } + } + + return false; + } + + protected override Task RunCoreAsync(BaseFunction input, RunPassContext context) + { + var main = (Function)input; + var c = new ReplaceVisitor(); + c.Replace(main); + DumpIR(main, "AfterMergeUser", MergeRelPath); + return Task.FromResult(input); + } + + private static (Expr? NewCall, UserInfo[] AllUsers) MergeMultiUserFusion(Call outerCall, BucketFusion fusion) + { + var users = outerCall.Users.ToArray(); + + var notSupport = ((Expr?)null, Array.Empty()); + if (users.Length == 0) + { + return notSupport; + } + + if (users.OfType().All(user => user.Target is GetItem)) + { + // 需要去重,可能一个getItem的user使用了多个getItem + // 但是去重的过程需要在collect info的时候做 + // 不然被去重的user不能正确的被替换掉 + users = users.SelectMany(user => user.Users.ToArray()).ToArray(); + } + + // todo: not support + if (users.Any(user => user is Tuple)) + { + // Console.WriteLine("HasTuple"); + return notSupport; + } + + var userInfos = CollectUsers(outerCall, users); + + // todo: support only one user, because merge fusion rule is not enough + // maybe a error + // if (userInfos.Length < 2) + // { + // return null; + // } + + // has invalid + if (userInfos.Length != users.Distinct().ToArray().Length) + { + Console.WriteLine("not all fusion call and getItemMode"); + return notSupport; + } + + if (outerCall.Users.Any(user => user is Tuple) || users.Any(user => user.CheckedType is TupleType)) + { + return notSupport; + } + + if (users.Any(user => + user is Call c && c.Arguments.ToArray().Any(arg => arg is Tuple || arg.CheckedType is TupleType))) + { + // todo: not implement + return notSupport; + } + + if (DetectedRing(outerCall, users)) + { + // Console.WriteLine("HasRing"); + return notSupport; + } + + if (outerCall.Users.ToArray().OfType().All(user => user.Target is GetItem)) + { + // Console.WriteLine("MeregForGetItem"); + Console.WriteLine(MergeRelPath); + } + + // todo: with tuple + // 1. all user are fusion + // 2. some user are fusion + // 3. no fusion + var oldUsers = userInfos.Select(x => x.User).ToArray(); + + var otherName = string.Join("\n", oldUsers.Select(x => x.Target switch + { + BucketFusion f => f.Name, + Op op => op.GetType().Name, + _ => string.Empty, + })); + Console.WriteLine($"Merge {fusion.Name}"); + Console.WriteLine(otherName); + var fusionDict = outerCall.Arguments.ToArray().Zip(fusion.Parameters.ToArray()).ToArray(); + + // 这个vars用于确定output的args里面哪些要加入,哪些要消除,另外还要包含多个user的那个 + // todo: 目前newParams已经去除重复 + var argMap = MakeNewVarsMap(userInfos, fusionDict, outerCall); + var newUsers = MakeNewUserExpr(userInfos, outerCall, argMap); + var newBody = MakeNewBody(newUsers); + + // todo: args去除重复,在不需要更新的情况下不进行更新 + var newArgs = argMap.NewArgs(); + var newParams = argMap.NewParams; + var newFusion = MakeNewFusion(newBody, fusion, newParams, oldUsers); + var newCall = MakeNewCall(newFusion, newArgs); + if (newArgs.ToHashSet().Count != newArgs.Length) + { + throw new InvalidOperationException("Has Repeat args"); + } + + if (!newCall.InferenceType()) + { + DumpIR(newCall, "newCallInvalid"); + throw new InvalidOperationException("InvalidNewCallInMergeMultiUser"); + } + + DumpIR(newCall, "newCall", MergeRelPath); + ArgsChecker(newArgs); + return (newCall, userInfos); + } + + private static FusionVarMapper MakeNewVarsMap(UserInfo[] userInfos, (Expr, Var)[] fusionDict, Call outerCall) + { + var originUsers = outerCall.Users.ToArray(); + var originArgs = fusionDict.Concat(userInfos.SelectMany(info => + { + var user = info.User; + if (user.Target is BucketFusion fusion) + { + if (info.GetItem != null) + { + var args = user.Arguments.ToArray().OfNoConst().ToArray(); + + // 这里需要删除所有outerCall user里面的getItem,有可能一个call用了多个getItem + return args.Zip(fusion.Parameters.ToArray()).Where(pair => !originUsers.Contains(pair.First)).ToArray(); + } + + return user.Arguments.ToArray().OfNoConst().Zip(fusion.Parameters.ToArray()); + } + + throw new NotImplementedException(); + })).ToArray(); + + var oldVars = originArgs.Select(item => item.Item2).ToArray(); + if (oldVars.ToHashSet().Count != oldVars.Length) + { + throw new InvalidOperationException("oldVar has dup"); + } + + var users = userInfos.Select(x => x.User).ToArray(); + + // 保证var顺序以及字典 + // 初始param应该包含fusion的 + var fusionParams = fusionDict.Select(pair => pair.Item2).ToArray(); + + // arg到var的映射,并不需要使用fusion的信息设置为初始值,因为合并的是user + var result = originArgs.Aggregate( + (Array.Empty<(Expr UserArg, Var RelativeNewVar, Var OldVar)>(), fusionParams), + (sum, pair) => + { + var (totalDict, totalVars) = sum; + var (arg, oldVar) = pair; + var fusionResult = FindFirst(fusionDict, arg); + + // fusion的参数中查看是否有这个arg,有的话则用fusion对应的var来替代 + if (fusionResult != null) + { + // 这个时候会有多个arg指向同一个RelativeNewVar,但是为了把这个arg替换为oldvar, 暂时要保留,后面需要去重 + // 不能在这里就去重,会引发错误 + return (totalDict.Append((arg, fusionResult!, oldVar)), totalVars); + } + + // 查看当前累积的参数中是否有这个arg,有的话用arg对应的新var来替代 + var result = FindFirst(totalDict.Select(tuple => (tuple.UserArg, tuple.RelativeNewVar)).ToArray(), arg); + if (result != null) + { + return (totalDict.Append((arg, result!, oldVar)), totalVars); + } + + // todo: maybe put fusion body in this is better? + // arg是outerCall那就不需要添加任何var,因为这个arg会被替换为fusion的body + if (arg == outerCall) + { + return (totalDict, totalVars); + } + + // 其他参数被合并进来了,也就不需要再创建新的var,但是后面替换的时候也要对这种情况进行替换 + if (users.Contains(arg)) + { + return (totalDict, totalVars); + } + + // 普通的其他输入,进行替换 + var newVar = new Var(arg.CheckedType); + return (totalDict.Append((arg, newVar, oldVar)), totalVars.Append(newVar)); + }); + return new FusionVarMapper(result.fusionParams, result.Item1); + } + + private static Var? FindFirst((Expr, Var)[] totalDict, Expr arg) + { + var result = totalDict.Where(pair => pair.Item1 == arg).ToArray(); + if (result.Length > 0) + { + return result.First().Item2; + } + + return null; + } + + private static BucketFusion MakeNewFusion(Expr body, BucketFusion fusion, Var[] newParams, Call[] oldUsers) + { + // todo: EffectVar + var name = fusion.Name + "_" + string.Join("_", oldUsers.Select(x => x.Target switch + { + BucketFusion f => f.Name, + Op op => op.GetType().Name, + _ => string.Empty, + })); + if (name.Length > 100) + { + name = name.Substring(0, 100); + } + + return new BucketFusion(name, "stackvm", body, newParams, fusion.EffectVar); + } + + private static Call MakeNewCall(BucketFusion fusion, Expr[] args) + { + return new Call(fusion, args); + } + + private static Expr MakeNewBody(Expr[] newUsers) + { + if (newUsers.Length == 1) + { + return newUsers.First(); + } + + return new IR.Tuple(newUsers); + } + + private static Expr[] MakeNewUserExpr(UserInfo[] userInfos, Call outerCall, FusionVarMapper argMap) + { + var users = userInfos.Select(x => x.User).ToArray(); + + // 不能加入原始的body,因为所有的输出都被合并了,只有合并进来的输出了 + // todo: 如果user是getItem,那么需要换成getItem的target才行 + var originBody = new IR.Tuple(users.ToArray()); + + var newOriginBody = originBody.Clone(); + var finder = new FindVar(); + finder.Visit(newOriginBody); + + var map = argMap.OldToNewParam(); + + // replace + var fusionMap = newOriginBody.Fields.ToArray().Append(outerCall).OfType().Where(call => call.Target is BucketFusion) + .ToDictionary(call => call.Target, call => ((BucketFusion)call.Target).Body); + var mutator = new Passes.Mutators.Substitutor(e => + { + // 因为替换一个以后其他的都会发生变化,因此不能用call来匹配 + if (e is Call c) + { + if (c.Target is not BucketFusion) + { + return null; + } + + // 只有在call是属于users中才行,否则是 + var fusion = (BucketFusion)c.Target; + if (fusionMap.TryGetValue(fusion, out var body)) + { + // 被合并的fusion不需要做额外的操作 + if (fusion.Name == ((BucketFusion)outerCall.Target).Name) + { + return body; + } + + var paramList = fusion.Parameters.ToArray(); + + // todo: 注意,如果arg target不是users里面的fusion,那么就不替换。目前没实现这个部分 + var pairs = paramList.Zip(c.Arguments.ToArray()).Select((pair) => + { + // 默认是替换为arg,但是如果有其他可以替换的,那么替换为其他的 + var (param, arg) = pair; + if (map.TryGetValue(param, out var newVar)) + { + return (param, newVar); + } + + return pair; + }); + + // body中的var需要换成call对应的arg + return pairs.Aggregate(body, (sum, pair) => + { + return ReplaceExpr(sum, pair.First, pair.Second); + }); + + // return body; + } + } + + return null; + }); + mutator.Visit(newOriginBody, Unit.Default); + + // DumpIR(newOriginBody, "newOriginBody", relPath); + return newOriginBody.Fields.ToArray(); + } + + private static UserInfo[] CollectUsers(Call outerCall, Expr[] users) + { + var originUsers = outerCall.Users.ToArray(); + var outputs = users + .Distinct() // 去重,避免多个getItem被一个expr使用 + .Select((user, userIndex) => + { + // 找到fusion在user的arguments的哪个index + if (user is Call userCall) + { + var valid = userCall.Target switch + { + Op op => false, + BucketFusion => true, // todo: check effect var + _ => throw new NotImplementedException(), + }; + return (user, userIndex, valid); + } + else if (user is IR.Tuple) + { + // tuple should in other merge rule + var valid = false; + return (user, userIndex, valid); + } + else + { + var valid = false; + return (user, userIndex, valid); + } + }) + .Where(tuple => tuple.valid) + .Select(tuple => (tuple.user, tuple.userIndex)) + .Select(pair => + { + var user = (Call)pair.user; + var getItem = user.Arguments.ToArray().FindFirst(userArg => originUsers.Contains(userArg)); + return new UserInfo(user, pair.userIndex, getItem); + }) + .ToArray(); + return outputs; + } + + private record UserInfo(Call User, int UserIndex, Expr? GetItem); + + private class ReplaceVisitor : ExprVisitor + { + private Function? _fn; + + private bool _changed; + + private static int Counter => MultiUserCallToFusion.Counter; + + private static string RelPath => Counter.ToString(); + + private Expr Root => _fn!.Body; + + public void Replace(Function fn) + { + _fn = fn; + Visit(Root); + } + + protected override Expr VisitCall(Call expr) + { + if (_changed) + { + return expr; + } + + return base.VisitCall(expr); + } + + protected override Expr VisitLeafCall(Call expr) + { + // 回避错误的user计数与visitor的访问 + // visitor的逻辑会先访问叶子节点 + // 因此如果VisitLeaf叶子改了其user + // 那么expr同级的其他operand即便被修改了也还是会访问到 + // 因此一次只能做一个 + if (_changed) + { + return expr; + } + + if (expr is Call outerCall && outerCall.Target is BucketFusion fusion) + { + if (outerCall.Users.Count == 1 && outerCall.Users.First() is Function) + { + return expr; + } + + // Console.WriteLine($"Match {fusion.Name} counter:{Counter}"); + DumpIR(Root, "OriginRoot", RelPath); + + var (newCall, users) = MergeMultiUserFusion(outerCall, fusion); + if (newCall != null) + { + UpdateUse(users, newCall, outerCall); + DumpIR(Root, "rootAfterMerge", RelPath); + Root.InferenceType(); + if (Root.CheckedType is InvalidType) + { + throw new InvalidOperationException("InvalidRoot"); + } + + // todo: 检查已经被合并的fusion的名字是否还存在,存在就是错误 + AddCounter(); + + // CheckRepeat(Root); + _changed = true; + return newCall; + } + } + + return expr; + } + + protected override Expr DefaultVisitLeaf(Expr expr) => expr; + + private static void UpdateUse(UserInfo[] users, Expr newCall, Call outerCall) + { + // ref TestTupleGetItemOutputIsSingle + if (users.Distinct().ToArray().Length == 1) + { + ReplaceAllUsesWith(users[0].User, newCall); + return; + } + + var getItemMode = outerCall.Users.First() is Call c && c.Target is GetItem; + if (getItemMode) + { + // todo: getItemMode + partial merge maybe error + foreach ((var user, int userIndex, var _) in users) + { + ReplaceAllUsesWith(user, newCall[userIndex]); + } + } + else + { + for (var i = 0; i < users.Length; i++) + { + var newOperand = newCall.CheckedType is TupleType ? newCall[i] : newCall; + ReplaceAllUsesWith(users[i].User, newOperand); + } + } + } + + private void AddCounter() + { + MultiUserCallToFusion.Counter++; + } + } +} + +internal sealed class BucketFusionGroupMutator : Passes.Mutators.FusionGroupMutator +{ + public BucketFusionGroupMutator(IMergeRewriteRule preOrderfusionRule, RunPassContext passOptions) + : base(preOrderfusionRule, passOptions) + { + } + + public override bool MergedFusionCheckCallBack(Fusion mergedFusion, HashSet candidateFusions) + { + // ShowMergeCandidate(mergedFusion, candidateFusions); + + // 回避反卷积,反卷积的shape表达式目前会引起重复的计算 + if (mergedFusion.Name.Contains("Conv2DTranspose", StringComparison.Ordinal) || + candidateFusions.Any(f => f.Name.Contains("Conv2DTranspose", StringComparison.Ordinal))) + { + return false; + } + + return true; + } + + private static void ShowMergeCandidate(Fusion mergedFusion, HashSet candidateFusions) + { + Console.WriteLine("-----------------"); + Console.WriteLine(mergedFusion.Name); + Console.WriteLine("-----------------"); + foreach (var candidateFusion in candidateFusions) + { + Console.WriteLine(candidateFusion.Name); + } + + Console.WriteLine("-----------------"); + } +} + +// userArg -> oldVar 原始fusion的var +// oldVar -> RelativeNewVar 替换的过程 +internal record FusionVarMapper(Var[] NewParams, (Expr UserArg, Var RelativeNewVar, Var OldVar)[] ArgMap) +{ + public Dictionary OldToNewParam() + { + return ArgMap.ToDictionary(info => info.OldVar, info => info.RelativeNewVar); + } + + public Expr[] NewArgs() + { + // 多个arg指向相同的RelativeNewVar的情况,去重 + var data = ArgMap.Where(pair => NewParams.Contains(pair.RelativeNewVar)).DistinctBy(x => x.RelativeNewVar).ToArray(); + if (data.Length != NewParams.Length) + { + Console.WriteLine("error"); + } + + return data.Select(pair => pair.UserArg).ToArray(); + } +} + +internal class SearchBucketFusion : ExprVisitor +{ + private HashSet FusionSet { get; set; } = new(); + + public Dictionary FusionEffectVars() + { + return FusionSet.ToDictionary(s => s.Name, s => s.EffectVar); + } + + protected override Expr DefaultVisitLeaf(Expr expr) => expr; + + protected override Expr VisitLeafCall(Call expr) + { + if (expr.Target is BucketFusion f) + { + FusionSet.Add(f); + } + + return expr; + } +} diff --git a/src/Nncase.Passes/Rules/ShapeBucket/MergeCallToFusion.cs b/src/Nncase.Passes/Rules/ShapeBucket/MergeCallToFusion.cs new file mode 100644 index 0000000000..5118cb44e9 --- /dev/null +++ b/src/Nncase.Passes/Rules/ShapeBucket/MergeCallToFusion.cs @@ -0,0 +1,612 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Xml; +using NetFabric.Hyperlinq; +using Nncase.IR; +using Nncase.IR.Tensors; +using Nncase.PatternMatch; +using static Nncase.Passes.Rules.ShapeBucket.ShapeBucketHelper; +using static Nncase.PatternMatch.F.Math; +using static Nncase.PatternMatch.Utility; +using static Nncase.Utilities.ReplaceUtility; + +namespace Nncase.Passes.Rules.ShapeBucket; +public abstract class MergeFusionBase : RewriteRule +{ + protected int Counter { get; set; } + + public static bool AllConst(Call originCall) + { + // 暂时只能合并其他输入为const的 + if (originCall.Arguments.Length == 1) + { + return true; + } + + var otherArgs = originCall.Arguments[1..].ToArray(); + if (otherArgs.All(x => x is Const || x is Marker { Target: Const })) + { + return true; + } + + return false; + } + + public bool ValidTarget(Expr target) + { + return CallValidator.ValidTarget(target); + } +} + +[RuleGenerator] +public partial class MergeNextMarkerToFusion : MergeFusionBase +{ + // 用于将fusion call 外部的marker合并进来 + public override Pattern Pattern => IsRangeOfMarker("marker", new MergeNextCallToFusion().FusionCall, IsWildcard()); + + // 外部保留marker给下一个使用 + public Expr? GetReplace(Marker marker, Call fusionOuterCall, BucketFusion fusion, RunPassContext context) + { + if (fusion.Body is Marker) + { + return null; + } + + // marker + if (fusionOuterCall.Users.Count > 1 || marker.Users.Count > 1) + { + return null; + } + + var result = + marker.With(target: fusionOuterCall.With(target: fusion.With(body: marker.With(target: fusion.Body)))); + return result; + } +} + +[RuleGenerator] +public partial class MergePrevMarkerToFusion : MergeFusionBase +{ + public override Pattern Pattern => IsCall( + "fusionOuterCall", + IsFusion( + "fusion", + "stackvm", + IsWildcard(), + GenerateParameters(null)), + GenerateParameters(null, IsRangeOfMarker("marker", IsWildcard(), IsWildcard()))); + + // fusion(marker(xx)) { var } -> fusion(marker(xx)) { marker(var) } + public Expr? GetReplace(Marker marker, Call fusionOuterCall, BucketFusion fusion) + { + var hasVisited = fusion.Parameters[0].Users.Where(u => u is not Fusion).All(u => u is Marker); + if (hasVisited) + { + return null; + } + + // 不更改原始的input中的marker,要拷贝到fusion里面,将所有的var替换为marker(var) + // 同时将fusion的body中用到原始var的地方替换为marker(var) + // MergeCall的时候是支持marker的 + var newBody = ReplaceExpr(fusion.Body, fusion.Parameters[0], marker.With(target: fusion.Parameters[0])); + + // 重新构建fusion + var newFusion = fusion.With(body: newBody); + + // 返回新的call + DumpIR(newFusion, $"{Counter++}_{fusion.Name}"); + return fusionOuterCall.With(target: newFusion); + } +} + +[RuleGenerator] +public partial class MergeNextCallToFusion : MergeFusionBase +{ + public Pattern FusionCall => IsCall( + "fusionOuterCall", + IsFusion( + "fusion", + "stackvm", + IsWildcard(), + GenerateParameters(null)), + GenerateParameters(null)); + + public override Pattern Pattern => IsCallWildcard( + "nextCall", + IsWildcard("target"), + IsAlt( + "maybeFusionCallMarker", + FusionCall, + IsRangeOfMarker(FusionCall, IsWildcard()))); + + // nextCall(fusion(x)) -> fusion(nextCall(x)) + // nextCall(marker(fusion(x))) -> fusion(nextCall(marker(x))) + public Expr? GetReplace(Call nextCall, Expr maybeFusionCallMarker, Expr target, Call fusionOuterCall, BucketFusion fusion) + { + var singleVar = CompileSession.CompileOptions.ShapeBucketOptions.VarMap.Values.SelectMany(x => x).OfType().ToHashSet().Count <= 1; + if (!singleVar && nextCall.Arguments.ToArray().OfType().Count() > 1) + { + return null; + } + + if (!ValidTarget(target)) + { + return null; + } + + // todo: only for single input, effect var must be same + if (MultiUser(maybeFusionCallMarker)) + { + return null; + } + + // ref test TestMergeNextWithUserHasMultiUser + if (MultiUser(nextCall)) + { + // 会复制 + return null; + } + + if (!AllConst(nextCall)) + { + return null; + } + + DumpIR(nextCall, $"{Counter}_{fusion.Name}_{target.GetType().Name}_origin"); + + // 将call里面call fusion的部分替换为fusion的body + var oldBody = fusion.Body; + + // 这里必须新构建一个Expr,不能使用原始的nextCall Replace掉参数,不然如果外面有marker,那么replace以后的call还是会被外面的marker引用,因此会出现重复的情况 + // arg0可能是marker,如果是marker的话不能替换marker的参数,而是重新构造marker + Expr newBody = ReplaceCallParams(nextCall.Target, nextCall.Arguments.ToArray(), (0, (Expr)oldBody)); + + // todo: 针对marker的测试 + if (nextCall.Users.Count == 1 && nextCall.Users.First() is Marker m) + { + newBody = m.With(target: newBody); + } + + // 除了第一个参数的部分,其他参数可能会用到外面的东西,是不是可以作为var直接传进来??但是这会影响后面ToFusion的部分... + + // 更新fusion的body + var newFusion = fusion.With(body: newBody); + + // 创建新的call,target为fusion,参数为fusion的参数 // todo:针对非const的情况要处理这里 + // 但是第一个参数要注意,如果有marker那么需要处理marker // 这里如果arg是marker的话则需要copy一份,不然会导致marker的user重复,进而复制了if + // var newArgs = fusionOuterCall.Arguments.ToArray().Select(arg => arg is Marker m ? m.With() : arg).ToArray(); + var newArgs = fusionOuterCall.Arguments.ToArray().ToArray(); + var call = (Expr)nextCall.With(target: newFusion, arguments: newArgs); + + // 附加next call的外面marker + DumpIR(call, $"{Counter++}_{fusion.Name}_{target.GetType().Name}_after"); + if (newBody.Users.Count > 1) + { + throw new InvalidOperationException($"{newFusion.Name} is Invalid"); + } + + ArgsChecker(newArgs); + + return call; + } + + private static bool MultiUser(Expr call) + { + // 不是marker直接判断count + if (call is Call && call.Users.Count > 1) + { + return true; + } + + // 是marker那么判断marker的users + if (call is Marker { Users.Count: > 1 }) + { + return true; + } + + // 不是marker那就没问题,一定不是多个user + return false; + } + + private bool SameEffectVar(Call originCall, Fusion fusion) + { + var array = MakeEffectVarArray( + CompileSession, + CompileSession.CompileOptions.ShapeBucketOptions.VarMap, + originCall.Arguments[^1..].ToArray()); + if (fusion is BucketFusion varFusion) + { + if (array.Length != 0 && !Enumerable.SequenceEqual(varFusion.EffectVar, array)) + { + return true; + } + } + else + { + return true; + } + + return false; + } +} + +[RuleGenerator] +public partial class MergePrevCallToFusion : MergeFusionBase +{ + private string _prevCallStr = string.Empty; + + public override Pattern Pattern => IsCall( + "fusionOuterCall", + IsFusion( + "fusion", + "stackvm", + IsWildcard(), + GenerateParameters(null)), + GenerateParameters( + null, + IsWildcard())); + + public Pattern PrevCall(string prefix) => IsCallWildcard($"{prefix}PrevCall", IsWildcard($"{prefix}Target")); + + public Pattern MaybeMarker(string exprName, Pattern exprPatten) => IsAlt( + exprName, + IsRangeOfMarker(exprPatten, IsWildcard()), + exprPatten); + + // 输入必须匹配marker,因为即便合并marker也是要在外面保留一份副本 + // fusion(marker(prevCall()) { var } -> fusion(var) { marker(prevCall()) } + // fusion((prevCall()) { var } -> fusion(var) { prevCall() } + + // dfs + // xx | marker(xx)不行, 会先匹配到xx + // xx(marker) | xx 可以 + public Expr? GetReplace(Call fusionOuterCall, BucketFusion fusion) + { + // multi var的情况下,matmul的var一定是由输入构成,所以一定可以合并 + var (fusionArgsInfo, prevOutputMaybeMarker) = CollectInputsInfo(fusionOuterCall); + if (fusionArgsInfo.Length == 0) + { + return null; + } + + // FusionArgs + var inputShouldBeMerge = CollectInputShouldBeMerge(fusionArgsInfo); + var prefix = $"{Counter}_{_prevCallStr}_{fusion.Name}_origin"; + + DumpIR(fusionOuterCall, prefix, printPrefix: "MergePrevCallToFusion"); + + var indices = fusionArgsInfo.Select(x => x.Item2).ToHashSet(); + var fusionDict = fusionOuterCall.Arguments.ToArray().Zip(fusion.Parameters.ToArray()) + .Where((expr, i) => !indices.Contains(i)) + .ToDictionary(pair => pair.First, pair => pair.Second); + + // (InputArg -> NewFusionVar[]), InputArg is part of newArgs. + var newVarsMap = MakeNewFusionVarsMap(fusionArgsInfo, fusionDict); + + // 所有要被合并的call替换args为Fusion的Var + var newPrevCalls = MakeNewPrevCalls(inputShouldBeMerge, prevOutputMaybeMarker, newVarsMap); + DumpIR(new IR.Tuple(newPrevCalls), "newPrevCalls"); + + var newVarsMapFlatten = newVarsMap.SelectMany(x => x).ToArray(); + var newBody = MakeNewBody(fusion, newVarsMapFlatten.Select(v => v.InputIndex).ToHashSet().ToArray(), newPrevCalls); + DumpIR(newBody, "newBody"); + var newParams = MakeNewParam(fusion, newVarsMapFlatten, newBody).ToHashSet().ToArray(); + var newFusion = fusion.With(body: newBody, parameters: newParams); + var newArgs = MakeNewArgs(fusionOuterCall, newVarsMapFlatten, inputShouldBeMerge).ToHashSet().ToArray(); + + Expr call = MakeNewCall(fusionOuterCall, fusion, newFusion, newArgs); + + // fusion var to arg + // 左边的arg的表达式是右边arg的一部分的时候,在将左边的arg替换为var的时候 + // 右边的表达式中引用左边的arg的情况下右边的表达式也会被替换为fusion的var,参考TestMalMulReshape + call = newParams.Zip(newArgs).Aggregate(call, (sum, pair) => + { + var (param, arg) = pair; + return ReplaceExpr(sum, param, arg); + }); + + DumpIR(call, $"{Counter++}_{_prevCallStr}_{fusion.Name}_after"); + ArgsChecker(newArgs); + return call; + } + + internal static T[] FusionVarsOperation(VarReplaceInfo[] newVars, T[] fusionVars, Func f) + where T : Expr + { + var inputIndices = newVars.Select(v => v.InputIndex).ToArray(); + return fusionVars.ToArray().SelectMany((fusionArg, inputIndex) => + { + // no change + if (!inputIndices.Contains(inputIndex)) + { + return new[] { fusionArg }; + } + + return newVars.Where(v => v.InputIndex == inputIndex).SelectMany(f).ToArray(); + }).ToArray(); + } + + internal static VarReplaceInfo[][] NewVarsDeduplication(VarReplaceInfo[][] newVars, Dictionary fusionDict) + { + var dict = newVars + .SelectMany(x => x) + .Select(info => new KeyValuePair(info.Expr, info.Vars)) + .ToHashSet(new KeyValuePairKeyComparer()) + .ToDictionary(pair => pair.Key, pair => pair.Value); + var newVarsDeduplication = newVars.Select(list => list.Select(info => + { + // todo: tuple的情况消除重复 + var defaultVar = Array.Empty(); + if (info.Expr is IR.Tuple tuple) + { + // FusionArg为tuple的时候,tuple中部分参数已经是fusion的参数的情况 + // TestMergeInputWhichHadBeMerged + // TestMergeInputInTupleWhichHadBeMerged + var callFields = tuple.Fields.ToArray().Where(ShouldBeInput).ToArray(); + + // dict里有这个expr,也就是说其他FusionArg中出现过,有对应的vars + if (dict.TryGetValue(info.Expr, out defaultVar)) + { + if (defaultVar.Length != callFields.Length) + { + throw new InvalidOperationException(); + } + } + else + { + // dict中没有这个var,那么只需要info中的vars是否有在fusionVar中出现过的 + defaultVar = info.Vars; + } + + var newVars = callFields.Zip(defaultVar).Select(pair => + { + // 如果tuple中有的元素已经在FusionArg中,那么优先替换 + var (field, defaultVar) = pair; + if (fusionDict.TryGetValue(field, out var fusionVar)) + { + return fusionVar; + } + + // 否则使用默认的var todo: 添加这种test + return defaultVar; + }).ToArray(); + return info with { Vars = newVars }; + } + + if (fusionDict.TryGetValue(info.Expr, out var fusionVar)) + { + return info with { Vars = new[] { fusionVar } }; + } + + // TestSameInputMerge + if (dict.TryGetValue(info.Expr, out var vars)) + { + return info with { Vars = vars }; + } + + return info; + }).ToArray()).ToArray(); + return newVarsDeduplication; + } + + private static Expr[] MakeNewPrevCalls(Call[] inputsShouldBeMerge, Expr[] prevOutputMaybeMarker, VarReplaceInfo[][] newVarsOrigin) + { + var tuple = new IR.Tuple(inputsShouldBeMerge); + + if (inputsShouldBeMerge.Length != newVarsOrigin.Length) + { + Console.WriteLine(); + } + + return inputsShouldBeMerge.Zip(newVarsOrigin).Select((pair, i) => + { + var (input, varsInfoList) = pair; + int outCounter = 0; + var newArgs = input.Arguments.ToArray().Select(x => + { + if (x is TensorConst) + { + return x; + } + + if (x is Marker m && m.Target is TensorConst) + { + return m; + } + + if (outCounter >= varsInfoList.Length) + { + throw new InvalidOperationException(); + } + + var newVar = varsInfoList[outCounter++].Vars; + if (x is IR.Tuple tuple) + { + int counter = 0; + var newFields = tuple.Fields.ToArray().Select(field => + { + if (field is TensorConst) + { + return field; + } + + return (Expr)newVar[counter++]; + }).ToArray(); + return new IR.Tuple(newFields); + } + + return newVar.First(); + }).ToArray(); + var newCall = input.With(arguments: newArgs); + + var call = prevOutputMaybeMarker[i] is Marker m ? m.With(target: newCall) : (Expr)newCall; + if (!call.InferenceType()) + { + DumpIR(call, "InvalidInMakeNewPrevCalls"); + throw new InvalidOperationException(); + } + + return call; + }).ToArray(); + } + + private static Call MakeNewCall(Call fusionOuterCall, BucketFusion fusion, BucketFusion newFusion, Expr[] newArgs) + { + // 原始的fusion的call更换target为新的fusion,以及arg0替换为prevCall的arg0,其他不变 + var call = fusionOuterCall.With(target: newFusion, arguments: newArgs); + return call; + } + + private static Expr[] MakeNewArgs(Call fusionOuterCall, VarReplaceInfo[] newVars, Call[] fusionArgs) + { + return FusionVarsOperation(newVars, fusionOuterCall.Arguments.ToArray(), newVar => + { + if (newVar.Expr is IR.Tuple tuple) + { + return tuple.Fields.ToArray().Where(ShouldBeInput).ToArray(); + } + + return new[] { newVar.Expr }; + }); + } + + private static Var[] MakeNewParam(BucketFusion fusion, VarReplaceInfo[] newVars, Expr newBody) + { + var newParams = FusionVarsOperation(newVars, fusion.Parameters.ToArray(), newVar => newVar.Vars); + return newParams; + } + + private static Expr MakeNewBody(BucketFusion fusion, int[] inputIndices, Expr[] newPrevCalls) + { + // 新的fusion body将原来的var换成prevCall + var newBody = inputIndices.Select(index => fusion.Parameters[index]).Zip(newPrevCalls).Aggregate( + fusion.Body, (sum, pair) => + { + // 此时prevCall携带新的var + var (fusionVar, newPrevCall) = pair; + return ReplaceExpr(sum, fusionVar, newPrevCall); + }); + return newBody; + } + + // todo: add test for this + private static bool ShouldBeInput(Expr expr) + { + if (expr is Marker m) + { + return m.Target is not TensorConst; + } + + return expr is not TensorConst; + } + + // PrevCall(input1, input2, ...) + // input: input1, input2, ... + // call => [arg] + // tuple => [arg1, arg2, ...] + // VarReplaceInfo[InputIndex][InputArgIndex] + private static VarReplaceInfo[][] MakeNewFusionVarsMap((Call, int)[] fusionInputsInfo, Dictionary fusionDict) + { + var newVars = fusionInputsInfo.Select(fusionInputInfo => + { + var (fusionInput, inputIndex) = fusionInputInfo; + return fusionInput.Arguments.ToArray().Where(ShouldBeInput).Select((inputArg) => + { + // add condition to limit + var vars = new[] { new Var(inputArg.CheckedType) }; + if (inputArg is IR.Tuple tuple) + { + vars = tuple.Fields.ToArray().Where(ShouldBeInput).Select(field => new Var(field.CheckedType)).ToArray(); + } + + return new VarReplaceInfo(inputArg, vars, inputIndex); + }).ToArray(); + }).ToArray(); + var newVarsDeduplication = NewVarsDeduplication(newVars, fusionDict); + return newVarsDeduplication; + } + + private Call[] CollectInputShouldBeMerge((Call, int)[] prevCallsInfo) + { + var prevCalls = prevCallsInfo.Select(x => x.Item1).ToArray(); + _prevCallStr = string.Join("_", prevCalls.Select(call => call.Target.GetType().Name)); + return prevCalls; + } + + // 只需要替换被合并的call的args中的call,所以搜索和返回的都是Call + // 记录index,和原始的call的arg对应,多个输入的情况可能中间会有const隔开 + private ((Call, int)[] PrevCalls, Expr[] MayBeMarkers) CollectInputsInfo(Call fusionOuterCall) + { + // todo: 判断rhs的effect var才行 + var prevCalls = new List<(Call, int)>(); + var maybeMarkers = new List(); + var args = fusionOuterCall.Arguments.ToArray(); + for (int i = 0; i < args.Length; ++i) + { + var rhsArg = args[i]; + if (rhsArg is Marker marker && marker.Target is Call rhsPrevCall) + { + if (marker.Users.Count > 1) + { + continue; + } + + var rhsTarget = rhsPrevCall.Target; + + if (!IsInvalid(rhsPrevCall, rhsTarget)) + { + // prevCalls.Add((DupExpr(rhsPrevCall), i)); + if (rhsPrevCall.CheckedType is TupleType) + { + Console.WriteLine("1065 Error"); + throw new NotImplementedException(); + } + + prevCalls.Add((rhsPrevCall, i)); + maybeMarkers.Add(marker); + } + } + + if (rhsArg is Call rhsCall) + { + var rhsTarget = rhsCall.Target; + + if (!IsInvalid(rhsCall, rhsTarget)) + { + if (rhsCall.CheckedType is TupleType) + { + Console.WriteLine("1080 Error"); + throw new NotImplementedException(); + } + + // var rhs = DupExpr(rhsCall); + prevCalls.Add((rhsCall, i)); + maybeMarkers.Add((Expr)rhsCall); + } + } + } + + return (prevCalls.ToArray(), maybeMarkers.ToArray()); + } + + private bool IsInvalid(Call lhsPrevCall, Expr lhsTarget) + { + if (lhsPrevCall.Users.Count > 1) + { + return true; + } + + if (!ValidTarget(lhsTarget)) + { + return true; + } + + return false; + } +} + +internal record VarReplaceInfo(Expr Expr, Var[] Vars, int InputIndex); diff --git a/src/Nncase.Passes/Rules/ShapeBucket/RecordFusionShape.cs b/src/Nncase.Passes/Rules/ShapeBucket/RecordFusionShape.cs new file mode 100644 index 0000000000..110e37026a --- /dev/null +++ b/src/Nncase.Passes/Rules/ShapeBucket/RecordFusionShape.cs @@ -0,0 +1,151 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reactive; +using System.Threading.Tasks; +using Google.OrTools.Algorithms; +using NetFabric.Hyperlinq; +using Nncase.Diagnostics; +using Nncase.Evaluator; +using Nncase.IR; +using static Nncase.IR.F.Tensors; + +namespace Nncase.Passes.Rules.ShapeBucket; + +public record FusionShapeData(IValue Outshape, IValue[] InputShapes); + +public class FusionShapeUpdater : ExprVisitor +{ + private readonly Dictionary _memo; + + public FusionShapeUpdater(Dictionary memo) + { + _memo = memo; + } + + public Dictionary FusionShape { get; set; } = new(); + + protected override Expr DefaultVisitLeaf(Expr expr) => expr; + + protected override Expr VisitLeafCall(Call expr) + { + if (expr.Target is BucketFusion f) + { + var argShape = expr.Arguments.ToArray().Select(arg => GetShape(_memo[arg])).ToArray(); + var shape = GetShape(_memo[expr]); + FusionShape[f] = new FusionShapeData(shape, argShape); + } + + return expr; + } + + private IValue GetShape(IValue value) + { + var shapes = value.AsTensors().Select(x => x.Shape.ToValueArray()).ToArray(); + if (shapes.Length == 1) + { + return Value.FromTensor(shapes[0]); + } + + return new TupleValue(shapes.Select(x => Value.FromTensor(x)).ToArray()); + } +} + +public class SimpleTimer : IDisposable +{ + private readonly DateTime _startTime; + private readonly string _name; + + public SimpleTimer(string name) + { + _startTime = System.DateTime.Now; + _name = name; + } + + public void Dispose() + { + var endTime = System.DateTime.Now; + var time = endTime - _startTime; + Console.WriteLine($"{_name} tooks {time.Seconds}"); + } +} + +public class RecordFusionShape : FunctionPass +{ + private Dictionary _dimVarValues = new(); + + public RecordFusionShape(Dictionary shapeList) + { + FusionShapeInfo = shapeList; + } + + public Dictionary FusionShapeInfo { get; set; } + + protected override Task RunCoreAsync(BaseFunction main, RunPassContext context) + { + var options = CompileSession.CompileOptions.ShapeBucketOptions; + var varMap = options.VarMap; + _dimVarValues = ShapeBucketHelper.MakeVarValuesForAllSegment(options); + + // 一共有多组key seg + var list = Enumerable.Range(0, _dimVarValues.First().Value.Length).Select(i => + { + // 一组里面多个key seg + return _dimVarValues.Select(pair => (pair.Key, Value: pair.Value[i])).ToArray(); + }).ToArray(); + var tmpFusionShapeList = list.Select((seg, i) => + { + var varValues = seg.ToDictionary(pair => pair.Key, pair => (IValue)Value.FromTensor(pair.Value)); + var exprValues = seg.ToDictionary(pair => (Expr)pair.Key, pair => (IValue)Value.FromTensor(pair.Value)); + var input = MakeDummyInput(varMap, varValues); + var body = ((Function)main).Body; + var memo = EvaluatorUtil.GetMemo(body, input); + var f = new FusionShapeUpdater(ConcatDictionary(memo, exprValues)); + f.Visit(main); + return f.FusionShape; + }).SelectMany(x => x) + .ToLookup(x => x.Key, x => x.Value) + .ToDictionary(pair => pair.Key, pair => pair.ToArray()); + + foreach (var (f, shapeInfo) in tmpFusionShapeList) + { + FusionShapeInfo[f] = shapeInfo; + } + + return Task.FromResult(main); + } + + private static Dictionary ConcatDictionary(Dictionary memo, Dictionary exprValues) + { + foreach (var (key, value) in exprValues) + { + memo[key] = value; + } + + return memo; + } + + // make dummy value from InputInfo + // VarInfo:(DimVar -> Value) + private static Dictionary + MakeDummyInput(IReadOnlyDictionary info, Dictionary varInfo) + { + return info.ToDictionary( + pair => pair.Key, + pair => + { + // todo: dummy input可能会有问题... + var shapeExpr = pair.Key.CheckedShape.IsScalar + ? (Expr)Array.Empty() + : Stack(new IR.Tuple(pair.Value.Select(x => Cast(x, DataTypes.Int32)).ToArray()), 0); + + var shape = shapeExpr.Evaluate(varInfo).AsTensor(); + return ConstantOfShape( + shape, + Cast(1, pair.Key.CheckedDataType)).Evaluate(varInfo); + }); + } +} diff --git a/src/Nncase.Passes/Rules/ShapeBucket/ShapeBucket.cs b/src/Nncase.Passes/Rules/ShapeBucket/ShapeBucket.cs new file mode 100644 index 0000000000..197a5e0e8b --- /dev/null +++ b/src/Nncase.Passes/Rules/ShapeBucket/ShapeBucket.cs @@ -0,0 +1,1126 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Diagnostics; +using System.Globalization; +using System.IO; +using System.Linq; +using System.Reactive; +using System.Threading.Tasks; +using System.Transactions; +using DryIoc; +using DryIoc.ImTools; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Toolkit.HighPerformance; +using NetFabric.Hyperlinq; +using Nncase.Diagnostics; +using Nncase.Evaluator; +using Nncase.IR; +using Nncase.IR.Math; +using Nncase.IR.NN; +using Nncase.IR.Tensors; +using Nncase.Passes.Analysis; +using Nncase.Passes.Rules.Lower; +using Nncase.Passes.Rules.Neutral; +using Nncase.Passes.Rules.ShapeExpr; +using Nncase.Passes.Transforms; +using Nncase.PatternMatch; +using Nncase.Utilities; +using static Nncase.IR.F.Tensors; +using static Nncase.Passes.Rules.ShapeBucket.ShapeBucketHelper; +using static Nncase.PatternMatch.F.Math; +using static Nncase.PatternMatch.F.Tensors; +using static Nncase.PatternMatch.Utility; +using static Nncase.Utilities.ReplaceUtility; +using Dimension = Nncase.IR.Dimension; +using FoldConstCall = Nncase.Passes.Mutators.FoldConstCall; +using Stack = Nncase.IR.Tensors.Stack; +using Tuple = System.Tuple; + +namespace Nncase.Passes.Rules.ShapeBucket; + +public class BucketFusion : Fusion, IEquatable +{ + public BucketFusion(string name, string moduleKind, Expr body, ReadOnlySpan parameters, Var[] effectVar) + : base( + name, moduleKind, body, parameters) + { + EffectVar = effectVar; + } + + public BucketFusion(string moduleKind, Expr body, ReadOnlySpan parameters, Var[] effectVar) + : base( + moduleKind, + body, + parameters) + { + EffectVar = effectVar; + } + + public BucketFusion(string name, string moduleKind, Var[] effectVar, Expr body, params Var[] parameters) + : base(name, moduleKind, body, parameters) + { + EffectVar = effectVar; + } + + public BucketFusion(string moduleKind, Var[] effectVar, Expr body, params Var[] parameters) + : base(moduleKind, body, parameters) + { + EffectVar = effectVar; + } + + public Var[] EffectVar { get; set; } + + public bool IsSimple + { + get + { + // todo: change list + var names = Name.Split("_"); + var list = new[] { "MatMul", "Conv2D", "Conv2DTranspose", "Transpose" }; + foreach (string name in names) + { + if (list.Contains(name)) + { + return false; + } + } + + return true; + } + } + + public static BucketFusion FromNormalFusion(Fusion f, Var[] effectVars) + { + return new BucketFusion(f.Name, "stackvm", f.Body, f.Parameters.ToArray(), effectVars); + } + + public new BucketFusion With(string? name = null, string? moduleKind = null, Expr? body = null, Var[]? parameters = null) + => new BucketFusion(name ?? Name, moduleKind ?? ModuleKind, body ?? Body, parameters ?? Parameters, EffectVar); + + public bool Equals(BucketFusion? other) + { + if (other == null) + { + return false; + } + + return Name == other.Name && ModuleKind == other.ModuleKind && Body.GetHashCode() == other.Body.GetHashCode() && + Parameters.SequenceEqual(other.Parameters) && EffectVar.SequenceEqual(other.EffectVar); + } + + public override bool Equals(object? obj) + { + return Equals(obj as BucketFusion); + } +} + +[RuleGenerator] +public partial class CallToFusion : RewriteRule +{ + private readonly bool _onlyDynamic; + + public CallToFusion(bool onlyDynamic) + { + _onlyDynamic = onlyDynamic; + } + + public CallToFusion() + { + _onlyDynamic = false; + } + + public static int Counter { get; set; } + + public string ModuleKind => "stackvm"; + + public override Pattern Pattern => throw new InvalidOperationException(); + + protected virtual bool MustHaveMarker => true; + + private Call? CurrentCall { get; set; } + + private string Name => CurrentCall!.Target.GetType().Name; + + private string RelPath => $"{Counter}_{CurrentCall!.Target.GetType().Name}"; + + public virtual bool Check(Call call) + { + return true; + } + + public Expr? GetReplace(Call call, IMatchResult matchResult) + { + // 第二轮的时候再开 + if (_onlyDynamic && call.CheckedShape.IsFixed) + { + return null; + } + + var originType = call.CheckedType; + CurrentCall = call; + DumpIR((Expr)matchResult.Root, "origin", RelPath); + if (!Check(call)) + { + return null; + } + + Init(matchResult); + + Console.WriteLine(call.Target.GetType().Name); + var argsMarkerData = CollectInputs(call); + var args = argsMarkerData.Select(pair => pair.Item1).ToArray(); + + // var argsMarker = argsMarkerData.Select(pair => pair.Item1).ToArray(); + // var args = argsMarker.Select(arg => arg.Target).ToArray(); + var varMap = CompileSession.CompileOptions.ShapeBucketOptions.VarMap; + var set = MakeEffectVarArray(CompileSession, varMap, args); + var fusionVars = MakeNewParam(args); + var newCall = MakeNewCall(call, fusionVars, argsMarkerData); + var f = MakeNewFusion(fusionVars, args, newCall, set); + var outerCall = MakeNewOuterCall(newCall, f, args); + DumpIR(outerCall, "after", RelPath); + Counter++; + + if (!outerCall.InferenceType()) + { + DumpIR(outerCall, "InvalidType"); + throw new InvalidOperationException(); + } + + if (outerCall.CheckedType != originType) + { + DumpIR(outerCall, "TypeChanged"); + throw new InvalidOperationException(); + } + + return outerCall; + } + + protected virtual Expr ProcessForNewBody(Var[] fusionVars, Expr[] args, Expr expr) => expr; + + protected virtual Expr ProcessForOuterCall(Expr expr) => expr; + + protected virtual (Expr, int)[] CollectInputs(Call call) => + call.Arguments.ToArray().Select((arg, i) => + { + if (arg is Marker m && m.Target is not TensorConst) + { + return (m, i); + } + + return (arg, -1); + }).Where(pair => pair.Item2 != -1).Select(pair => (pair.arg, pair.Item2)).ToArray(); + + protected virtual void Init(IMatchResult result) + { + } + + protected virtual Expr ReplaceVarsWithArg(Var[] fusionVars, Expr[] args, Expr newCall) => + fusionVars.Zip(args).Aggregate(newCall, (newBody, tuple) => + { + var (fusionVar, arg) = tuple; + return ReplaceUtility.ReplaceExpr(newBody, arg, fusionVar); + }); + + private static Var[] MakeNewParam(Expr[] args) + { + var fusionVars = args.Select(arg => new Var(arg.CheckedType)).ToArray(); + return fusionVars; + } + + private Expr MakeNewOuterCall(Expr call, BucketFusion f, Expr[] argsMarker) + { + // PrintEffectVar(f.Name, set); + Expr outerCall = ProcessForOuterCall(new Call(f, argsMarker)); + return outerCall; + } + + private BucketFusion MakeNewFusion(Var[] fusionVars, Expr[] args, Expr newCall, Var[] set) + { + // 处理其他的参数用到了分段的input的情况 + // 即便body只有一个call,但这里是针对所有参数的表达式进行替换,比如反卷积的output shape是一个用到了需要分段的input的表达式 + // 如果不加这个则output shape引用的原始的未分段的输入会再次塞进来 + + // todo: 如果其中一个arg有多个user,并且有在fusion之外的部分,如果被替换为var,那么fusion外的那部分表达式operand都会跟着变成var + // args是call的话重新构造就好了, 能够解决目前的情况,但是不知道更复杂的情况会不会出问题 + var body = ReplaceVarsWithArg(fusionVars, args, newCall); + + var f = new BucketFusion($"{Name}_{Counter}", ModuleKind, set, body, fusionVars); + return f; + } + + private Expr MakeNewCall(Call call, Var[] fusionVars, (Expr, int)[] argsMarkerData) + { + var inputsWithMarkerAndIndex = + fusionVars.Zip(argsMarkerData).Select(pair => + { + var (arg, originIndex) = pair.Second; + if (arg is Marker m) + { + return (originIndex, arg: m.With(target: pair.First)); + } + + return (originIndex, arg: (Expr)pair.First); + }).ToArray(); + + // index should map to origin input, not inputsWithMarker index + // var pairs = inputsWithMarkerAndIndex.Select((input, i) => (i, (Expr)input)).ToArray(); + var indices = inputsWithMarkerAndIndex.Select(x => x.originIndex).ToArray(); + var newArgs = call.Arguments.ToArray().Select((arg, i) => + { + if (indices.Contains(i)) + { + var fields = inputsWithMarkerAndIndex.Where(x => x.originIndex == i).ToArray(); + + // todo: tuple type(split) maybe error + if (arg is IR.Tuple tup) + { + // 包含tuple中所有元素,const以及非const + var newFields = new List(); + int inputCounter = 0; + foreach (var inputField in tup.Fields.ToArray()) + { + if (inputField is TensorConst) + { + newFields.Add(inputField); + } + else + { + newFields.Add(fields[inputCounter++].arg); + } + } + + // var newFields = inputsWithMarkerAndIndex.Select(x => x.originIndex == i ? x.arg : arg).ToArray(); + return new IR.Tuple(newFields.ToArray()); + } + + if (fields.Length > 1) + { + throw new InvalidOperationException(); + } + + return fields.First().arg; + } + + return arg; + }).ToArray(); + + // arguments用到其他input的地方就要replace对应的input + var newCall = call.With(arguments: newArgs); + + // var newCall = ReplaceUtility.ReplaceCallParams(call.Target, call.Arguments.ToArray(), inputsWithMarkerAndIndex); + var newCallWithMarker = ProcessForOuterCall(newCall); + return newCallWithMarker; + } +} + +public class MarkerCallToFusion : CallToFusion + where T : Op +{ + public MarkerCallToFusion(bool isDynamic = false) + : base(isDynamic) + { + } + + public MarkerCallToFusion() + : base(false) + { + } + + public override Pattern Pattern => IsRangeOfMarker( + "callMarker", + IsCallWildcard("call", IsOp()), + IsTensorConst()); + + protected Marker? CallMarker { get; set; } + + protected override Expr ProcessForNewBody(Var[] fusionVars, Expr[] args, Expr expr) => + CallMarker!.With(target: expr); + + protected override Expr ProcessForOuterCall(Expr expr) => CallMarker!.With(target: expr); + + protected override void Init(IMatchResult result) + { + CallMarker = (Marker)result["callMarker"]; + } +} + +public class MultiUserCallToFusion : CallToFusion +{ + public MultiUserCallToFusion(bool onlyDynamic = false) + : base(onlyDynamic) + { + } + + public MultiUserCallToFusion() + { + } + + public override Pattern Pattern => IsWildcard("call", expr => + { + if (expr is Call c && c.Target is not BucketFusion) + { + if (c.Target is Binary) + { + if (c.Arguments[0] is not Const && c.Arguments[1] is not Const) + { + return false; + } + + return true; + } + + if (c.Target is IR.Tensors.Reshape) + { + if (c.Arguments[IR.Tensors.Reshape.Shape.Index] is TensorConst) + { + return CallValidator.ValidTarget(c.Target); + } + } + else + { + return CallValidator.ValidTarget(c.Target); + } + } + + return false; + }); + + public override bool Check(Call call) + { + return !call.Users.ToArray().OfType().Any(); + } + + protected override (Expr, int)[] CollectInputs(Call call) => + call.Arguments.ToArray().SelectMany((arg, i) => + { + if (arg is IR.Tuple tuple) + { + return tuple.Fields + .ToArray() + .Where(field => field is not TensorConst) + .Select(field => (field, i)) + .ToArray(); + } + + if (arg is not TensorConst) + { + return new[] { (arg, i) }; + } + + return new[] { (arg, -1) }; + }).Where(pair => pair.Item2 != -1).Select(pair => (pair.Item1, pair.Item2)).ToArray(); +} + +public class Conv2DToFusion : MarkerCallToFusion +{ + public Conv2DToFusion(bool isDynamic = false) + : base(isDynamic) + { + } + + public Conv2DToFusion() + { + } +} + +// tflite相比于onnx的比较特殊,output shape是原图进行计算的,而不是自行创建表达式计算。 +// 如果采用一样的处理方法会导致复制输入中的function和call +// 对于tflite的所有反卷积的通用性不能确保,暂且这样硬编码,另外tflite的动态shape也很少见 +// 这里本质的问题是因为output shape所指向的很可能并不是input,或者说是input并不是output shape所指向的表达式的子表达式 +public class TFConv2DTransposeToFusion : MarkerCallToFusion +{ + private Call? _transpose; + + private Call? _originCall; + + private Marker? _transposeInputMarker; + + public TFConv2DTransposeToFusion(bool isDynamic = false) + : base(isDynamic) + { + } + + public override Pattern Pattern => IsRangeOfMarker( + "callMarker", + IsCallWildcard( + "call", + IsOp(), + IsRangeOfMarker( + IsCallWildcard( + "transpose", + IsOp(), + IsRangeOfMarker( + "transposeInputMarker", + IsCallWildcard("originCall", IsWildcard(), IsWildcard()), + IsWildcard())), + IsWildcard())), + IsTensorConst()); + + protected override (Expr, int)[] CollectInputs(Call call) + { + return new[] { (_originCall!.Arguments[0], 0) }; + } + + protected override void Init(IMatchResult result) + { + _transpose = (Call)result["transpose"]; + _originCall = (Call)result["originCall"]; + _transposeInputMarker = (Marker)result["transposeInputMarker"]; + base.Init(result); + } + + protected override Expr ReplaceVarsWithArg(Var[] fusionVars, Expr[] args, Expr newCall) + { + var convTranspose = (Call)CallMarker!.Target; + var c = ReplaceCallFirstParam( + convTranspose, + _transposeInputMarker!.With(target: + ReplaceCallFirstParam( + _transpose!, + _transposeInputMarker.With(target: + ReplaceCallFirstParam(_originCall!, fusionVars[0]))))); + return CallMarker.With(target: base.ReplaceVarsWithArg(fusionVars, args, c)); + } + + protected override Expr ProcessForNewBody(Var[] fusionVars, Expr[] args, Expr expr) + { + // 1. reconstruct new body + + // 2. replace + var newBody = fusionVars.Zip(args).Aggregate(expr, (newBody, tuple) => + { + var (fusionVar, arg) = tuple; + return ReplaceUtility.ReplaceExpr(newBody, arg, fusionVar); + }); + return CallMarker!.With(target: newBody); + + // return ReplaceClone(callMarker.With(target: newBody), fusionVars.Zip(args).ToArray()); + } +} + +public class Conv2DTransposeToFusion : MarkerCallToFusion +{ + public Conv2DTransposeToFusion(bool isDynamic = false) + : base(isDynamic) + { + } + + // when OutputShape is Const, it means output shape is not effected by input. + public override bool Check(Call call) => call.Arguments[Conv2DTranspose.OutputShape.Index] is not Const; +} + +public class MatmulToFusion : MarkerCallToFusion +{ + public MatmulToFusion(bool isDynamic = false) + : base(isDynamic) + { + } +} + +public class ActToFusion : MarkerCallToFusion +{ + public ActToFusion(bool isDynamic = false) + : base(isDynamic) + { + } +} + +public class TransposeToFusion : MarkerCallToFusion +{ + public TransposeToFusion(bool isDynamic = false) + : base(isDynamic) + { + } + + protected override bool MustHaveMarker => false; +} + +public class UnaryToFusion : MarkerCallToFusion +{ + public UnaryToFusion(bool isDynamic = false) + : base(isDynamic) + { + } + + public UnaryToFusion() + { + } +} + +// todo: do more check for binary +public class BinaryToFusion : MarkerCallToFusion +{ + public BinaryToFusion(bool isDynamic = false) + : base(isDynamic) + { + } + + // public override bool Check(Call call) => call.CheckedShape.Rank > 1; +} + +[RuleGenerator] +public partial class ClearRequire : RewriteRule +{ + // for require(true, value, msg) + public override Pattern Pattern { get; } = + IsRequire(require => true, IsTensorConst("predicate"), IsWildcard("expr")); + + public Expr? GetReplace(bool predicate, Expr expr) + { + if (predicate) + { + return expr; + } + + return null; + } +} + +[RuleGenerator] +public partial class FoldRepeatMarker : RewriteRule +{ + public override Pattern Pattern { get; } = IsRangeOfMarker( + "markerA", + IsRangeOfMarker( + "markerB", + IsWildcard(), + IsWildcard("rangeB")), + IsWildcard("rangeA")); + + public Expr? GetReplace(Expr rangeA, Expr rangeB, Marker markerB) + { + if (rangeA == rangeB) + { + return markerB; + } + + return null; + } +} + +[RuleGenerator] +public partial class ClearFusionOuterMarker : RewriteRule +{ + public static Pattern CallerPattern => IsCall( + "caller", + IsFusion(null, "stackvm", IsWildcard(), GenerateParameters(null)), + GenerateParameters(null)); + + public override Pattern Pattern { get; } = IsRangeOfMarker("marker", CallerPattern, IsWildcard()); + + public Expr? GetReplace(Marker marker, Call caller) + { + return caller; + } +} + +public class FusionBucketContext +{ + public FusionBucketContext(Call outerCall, BucketFusion fusion, Dictionary varMap, Dictionary dimVarValues, ShapeExprCache cache) + { + OuterCall = outerCall; + Fusion = fusion; + VarMap = varMap; + Cache = cache; + Cache.VarMap = varMap; + FusionInputShapeExpr = MakeFusionInputShapeExpr(outerCall, fusion, cache); + CheckAlive(FusionInputShapeExpr); + DimVarValues = dimVarValues; + Arguments = OuterCall.Arguments.ToArray(); + Parameters = Fusion.Parameters.ToArray(); + FixedShapeCache = new(); + SliceShape = ComputeSliceShape(); + } + + public Expr SliceShape { get; } + + public Call OuterCall { get; } + + public BucketFusion Fusion { get; } + + public Dictionary VarMap { get; } + + public Dictionary FusionInputShapeExpr { get; } + + public Dictionary DimVarValues { get; } + + public Expr[] Arguments { get; } + + public Var[] Parameters { get; } + + public ShapeExprCache Cache { get; } + + // segIndex -> fixed shape list + public Dictionary FixedShapeCache { get; } + + public Expr FusionBody => Fusion.Body; + + public Dictionary DimVarValue(int i) => + DimVarValues.ToDictionary(pair => pair.Key, pair => (IValue)Value.FromTensor(pair.Value[i])); + + // ShapeOf而不是shape表达式,用于计算Slice的shape + private static Dictionary MakeShapeOfFusionInput(Var[] parameters, Expr[] args) + { + var fusionInputShapes = parameters + .Zip(args) + .ToDictionary(pair => pair.First, pair => + { + var shape = Cast((Expr)ShapeOf(pair.Second), DataTypes.Int32); + return Enumerable.Range(0, pair.Second.CheckedShape.Rank).Select(i => shape[i]).ToArray(); + }); + return fusionInputShapes; + } + + private static Dictionary MakeFusionInputShapeExpr(Call call, BucketFusion fusion, ShapeExprCache cache) + { + var data = fusion.Parameters.ToArray().Zip(call.Arguments.ToArray().Select((arg, i) => + { + // DumpIR(arg, "MakeFusionInputShapeExprArg"); + var result = arg.EvaluateShapeExpr(cache); + if (!result.InferenceType()) + { + DumpIR(result, "InvalidInputShapeExpr"); + throw new InvalidOperationException(); + } + + return Enumerable.Range(0, arg.CheckedShape.Rank).Select(i => + { + var res = result[i]; + return res; + }).ToArray(); + })).Select(pair => new KeyValuePair(pair.First, pair.Second)); + var fusionInputData = data.ToDictionary(pair => pair.Key, pair => pair.Value); + return fusionInputData; + } + + private static void CheckAlive(Dictionary fusionInputInfo) + { + foreach (var value in fusionInputInfo.Values) + { + foreach (var expr in value) + { + if (!expr.IsAlive) + { + throw new NotImplementedException(); + } + } + } + } + + private Expr ComputeSliceShape() + { + var originBody = FusionBody; + var shapeOfFusionInput = MakeShapeOfFusionInput(Parameters, Arguments); + var originShape = originBody.EvaluateShapeExpr(shapeOfFusionInput); + originShape.InferenceType(); + + return originShape; + } +} + +[RuleGenerator] +public partial class FusionBucket : RewriteRule +{ + private static int _counter; + + private static string _relPath = string.Empty; + + private readonly ShapeExprCache _cache = ShapeExprCache.Default; + + public FusionBucket(Dictionary list) + { + FusionShapeInfo = list; + } + + public Dictionary FusionShapeInfo { get; set; } + + public override Pattern Pattern => IsCall( + "outerCall", + IsFusion( + "fusion", + "stackvm", + IsWildcard("fusionBody"), + GenerateParameters(null)), + GenerateParameters(null)); + + internal Dictionary VarMap => CompileSession.CompileOptions.ShapeBucketOptions.VarMap; + + public static Expr PreProcess(FusionBucketContext context, Var param, Dictionary inputInfo, Dictionary varValues, Dictionary fusionInputData, int segIndex, int inputIndex) + { + // Console.WriteLine($"seg index{segIndex}"); + if (context.FixedShapeCache.TryGetValue(segIndex, out var cachedFixedShape)) + { + // var cachedShape = cachedFixedShape[inputIndex]; + // Console.WriteLine(string.Join(",", cachedShape)); + // Console.WriteLine("Cache ok"); + return new Call(new BucketPad(), param, cachedFixedShape[inputIndex]); + } + + throw new InvalidDataException("Shape Cache not found"); + } + + public static (Dictionary MinDict, Dictionary MaxDict) GetBoundDict( + Dictionary inputInfo, Dictionary rangeInfo) + { + // find vars in Input ShapeExpr + var vars = inputInfo.Values.SelectMany(x => x).OfType().ToHashSet().ToArray(); + + // DimVarName -> Dict.key -> Dict.Value + var minDict = rangeInfo.ToDictionary( + pair => vars.FindFirst(v => v.Name == pair.Key), + pair => (IValue)Value.FromTensor(pair.Value.Min)); + var maxDict = rangeInfo.ToDictionary( + pair => vars.FindFirst(v => v.Name == pair.Key), + pair => (IValue)Value.FromTensor(pair.Value.Max)); + return (minDict, maxDict); + } + + public static Expr MakeSplitEntry(FusionBucketContext context, Dictionary varInfo, int segIndex) + { + var originBody = context.FusionBody; + var fusionVars = context.Parameters; + var fixInputs = fusionVars + .Select((arg, i) => + PreProcess(context, arg, context.VarMap, varInfo, context.FusionInputShapeExpr, segIndex, i)).ToArray(); + + // 替换逻辑:新的body中的var -> fusion原始的var -> target为fusion的call的input + // 本质上只是对这个body的所有输入做替换 + // 避免这里的修改影响到原始的body,每个分支需要进行自己的修改,所以要clone处理 + // DumpIR(originBody, "originBody", _relPath); + var call = ReplaceClone(originBody, fusionVars.Zip(fixInputs).ToArray()); + if (!call.InferenceType()) + { + DumpIR(call, "InvalidType"); + throw new InvalidOperationException(); + } + + var slice = MakeSlice(context, call, originBody); + DumpIR(slice, $"slice_{segIndex}", _relPath); + return slice; + } + + public Expr? GetReplace(Call outerCall, BucketFusion fusion, Expr fusionBody) + { + if (ShouldRestore(outerCall, fusion)) + { + return RestoreBodyWithArgs(outerCall.Arguments.ToArray(), fusion.Parameters.ToArray(), fusion.Body); + } + + fusionBody = CompilerServices.Rewrite( + fusionBody, + new IRewriteRule[] + { + new FoldStackGetItem(), new FoldShapeOf(), new FoldTwoReshapes(), new FoldTwoCasts(), + new FoldTwoSlices(), new FoldNopBinary(), new FoldNopCast(), new Neutral.FoldConstCall(), + new FoldNopReshape(), new FoldNopSlice(), new FoldIf(), + }, + new()); + Console.WriteLine($"FusionBucketGetReplace {_counter} {fusion.Name}"); + _relPath = $"{_counter}"; + + DumpIR(outerCall, $"BucketOriginFusion_{fusion.Name}", _relPath); + + var options = CompileSession.CompileOptions.ShapeBucketOptions; + var dimVarValues = MakeVarValuesForAllSegment(options); + var context = new FusionBucketContext(outerCall, fusion, VarMap, dimVarValues, _cache); + + var shapeInfos = Array.Empty(); + if (!FusionShapeInfo.TryGetValue(fusion, out shapeInfos)) + { + // todo: 不知道为什么有的时候无法从key中获取 + var list = FusionShapeInfo.Where(x => x.Key == fusion).ToArray(); + if (list.Length != 1) + { + throw new InvalidOperationException($"NoKey{fusion.Name}"); + } + + shapeInfos = list[0].Value; + } + + var allFixedShapes = shapeInfos + .Select(x => + x.InputShapes.Select(iShape => iShape.AsTensor().ToArray().ToArray()).ToArray()).ToArray(); + for (int i = 0; i < shapeInfos.Length; i++) + { + for (int j = 0; j < allFixedShapes.Length; j++) + { + context.FixedShapeCache[j] = allFixedShapes[j]; + } + } + + // reverse + var minFixedShapeList = allFixedShapes[^1]; + var maxFixedShapeList = allFixedShapes[0]; + + // PrintMinMaxShape(minFixedShapeList, maxFixedShapeList, _relPath); + + // 2. get dim info(inputIndex, (dimIndex, range) + var counts = ComputeCounts(minFixedShapeList, maxFixedShapeList, out int totalCount); + if (IsFixed(totalCount, minFixedShapeList, maxFixedShapeList)) + { + var fix = FixInput(context, minFixedShapeList); + DumpIR(fix, "BucketResultFix", _relPath); + _counter++; + return fix; + } + + // todo: process total count, matmul maybe multi count, but other should check this + if (totalCount > 1) + { + // Console.WriteLine($"{fusion.Name} totalCount > 1"); + } + + var info = ComputeSegmentInfo(counts, options); + var body = Split(context, info); + body.InferenceType(); + + if (body.Users.Count > 1 || body.CheckedType is InvalidType) + { + throw new InvalidOperationException(); + } + + if (body is not If) + { + _counter++; + DumpIR(body, "Rebuild", _relPath); + return body; + } + + // DumpIR(body, "newBodyBeforeReplace", _relPath); + // FixInput Replace Var + var newBody = ReplaceFusionVarWithCallArgs(fusion, context.Arguments, body); + + // let bind + if (newBody is If @if) + { + newBody = IR.F.Math.Require(true, @if.With(paramList: context.Arguments)); + } + + DumpIR(newBody, "BucketResult", _relPath); + _counter++; + if (newBody.CheckedType is InvalidType) + { + throw new InvalidOperationException("InvalidBucketBody"); + } + + return newBody; + } + + public Expr FixInput(FusionBucketContext context, int[][] shapeList) + { + var fixedShapeInput = context.Arguments.Zip(shapeList).Select(pair => + { + var (arg, fixedShape) = pair; + return (Expr)new Call(new FixShape(), arg, fixedShape); + }).ToArray(); + return ReplaceClone(context.FusionBody, context.Parameters.Zip(fixedShapeInput).ToArray()); + } + + private static void PrintShapeInfos(FusionShapeData[] shapeInfos) + { + for (var i = 0; i < shapeInfos.Length; i++) + { + Console.WriteLine($"Segment Index {i}"); + var inShapes = shapeInfos[i].InputShapes; + for (int j = 0; j < inShapes.Length; j++) + { + var shape = inShapes[j].AsTensor().ToArray(); + Console.WriteLine($"Input {j} shape:"); + Console.WriteLine(string.Join(",", shape)); + } + } + } + + private static Expr MakeSlice(FusionBucketContext context, Expr call, Expr originBody) + { + if (call.CheckedType is TupleType tuple) + { + var fields = Enumerable.Range(0, tuple.Count) + .Select(i => MakeSliceForTensor(originBody[i], call[i], context)).ToArray(); + return new IR.Tuple(fields); + } + + return MakeSliceForTensor(originBody, call, context); + } + + private static Expr MakeSliceForTensor(Expr originBody, Expr call, FusionBucketContext context) + { + var sliceShape = context.SliceShape; + var rank = call.CheckedShape.Rank; + var simplifyCall = CompilerServices.Rewrite( + call, + new IRewriteRule[] + { + new FoldStackGetItem(), + new FoldShapeOf(), + new FoldTwoReshapes(), + new FoldTwoCasts(), + new FoldTwoSlices(), + new FoldNopBinary(), + new FoldNopCast(), + new Neutral.FoldConstCall(), + new FoldNopReshape(), + new FoldNopSlice(), + new FoldIf(), + }, + new()); + + var body = (Expr)Slice(simplifyCall, Enumerable.Repeat(0, rank).ToArray(), Cast(sliceShape, DataTypes.Int32), rank); + return body; + } + + private static bool IsFixed(int totalCount, int[][] minFixedShapeList, int[][] maxFixedShapeList) => + totalCount == 0 || (minFixedShapeList[0].SequenceEqual(maxFixedShapeList[0]) && + minFixedShapeList[1].SequenceEqual(maxFixedShapeList[1])); + + private static bool ShouldRestore(Call outerCall, BucketFusion fusion) + { + return fusion.IsSimple || + outerCall.CheckedType is TupleType || + outerCall.CheckedShape.Rank == 0 || + outerCall.Arguments.ToArray().Any(arg => + arg.CheckedType is TupleType); + } + + private static Expr RestoreBodyWithArgs(Expr[] args, Var[] parameters, Expr body) => + ReplaceClone(body, parameters.Zip(args).ToArray()); + + private static void PrintMinMaxShape(int[][] minFixedShapeList, int[][] maxFixedShapeList, string relPath) + { + string str = string.Empty; + Console.Write("min "); + str += "min "; + foreach (int[] shape in minFixedShapeList) + { + var s = DumpUtility.SerializeShape(shape) + " "; + str += s; + Console.Write(s); + } + + Console.Write("max "); + str += "max "; + foreach (int[] shape in maxFixedShapeList) + { + var s = DumpUtility.SerializeShape(shape) + " "; + str += s; + Console.Write(s); + } + } + + // 计算出使用哪个位置的input进行分段 + private static SegmentInfo ComputeSegmentInfo( + (int InputIndex, (int First, (int First, int Second) Second)[] Range)[] counts, ShapeBucketOptions options) + { + var (iIndex, dimIndex, (min, max)) = counts.Select(x => + { + Debug.Assert(x.Range.Length <= 2, "x.range.Length <= 2"); + return (inputIndex: x.InputIndex, x.Range[0].First, x.Range[0].Second); + }).ToArray().First(); + + var segments = ComputeSegmentList(options.SegmentsCount, min, max); + var info = new SegmentInfo(iIndex, dimIndex, segments); + return info; + } + + private static (int InputIndex, (int First, (int First, int Second) Second)[] Range)[] ComputeCounts( + int[][] minFixedShapeList, int[][] maxFixedShapeList, out int totalCount) + { + (int InputIndex, (int First, (int First, int Second) Second)[] Range)[] counts = minFixedShapeList + .Zip(maxFixedShapeList).Select((pair, inputIndex) => + { + var (minShape, maxShape) = pair; + + // (range, dimIndex) + var range = Enumerable.Range(0, minShape.Length).Zip(minShape.Zip(maxShape)).Where(data => + { + var (dimIndex, pair) = data; + return pair.First != pair.Second; + }).ToArray(); + return (inputIndex, range); + }).Where(pair => pair.range.Length > 0).ToArray(); + totalCount = counts.Length; + return counts; + } + + private static Expr ReplaceFusionVarWithCallArgs(BucketFusion fusion, Expr[] args, Expr newBody) => + fusion.Parameters.ToArray().Zip(args).Aggregate(newBody, (sum, pair) => + { + var (param, arg) = pair; + var result = ReplaceExpr(sum, param, arg); + return result; + }); + + private static Expr Split(FusionBucketContext context, SegmentInfo info) + { + var fusionInputs = context.Arguments; + var (inputIndex, dimIndex, segments) = info; + var dim = ShapeOf(fusionInputs[inputIndex])[dimIndex]; + var failure = MakeFailure(context.FusionBody); + + int i = 0; + + // 1. 普通情况不应该rebuild + // 2. rebuild的正确性 + // if (ShouldBeRebuild(context)) + // { + // Console.WriteLine("Rebuild"); + // return RestoreBodyWithArgs(context.Arguments, context.Parameters, context.FusionBody); + // } + var body = segments.OrderByDescending(x => x).Aggregate( + failure, + (sum, seg) => + { + // 根据var,也就是target为这个fusion的call的参数来进行判断落在哪个段 + var cond = dim <= (long)seg; + + // select var value for current segment + var varInfo = context.DimVarValue(i); + var thenBody = MakeSplitEntry(context, varInfo, i); + var elseBody = sum; + i++; + + var result = new If(cond, thenBody, elseBody); + return result; + }); + + return body; + } + + private static bool ShouldBeRebuild(FusionBucketContext context) + { + var varInfo = context.DimVarValue(0); + var entry = MakeSplitEntry(context, varInfo, 0); + return entry switch + { + IR.Tuple tuple => tuple.Fields.ToArray().Any(ShouldBeRebuild), + Call => ShouldBeRebuild(entry), + _ => throw new ArgumentOutOfRangeException("context"), + }; + } + + private static bool ShouldBeRebuild(Expr entry) => entry is Call { Target: IR.Tensors.Slice } c && + (!c.Arguments[IR.Tensors.Slice.Input.Index].CheckedShape + .IsFixed); + + private static Expr MakeFailure(Expr fusionBody) + { + var failure = fusionBody.CheckedType switch + { + TupleType tuple => new IR.Tuple(tuple.Fields.ToArray() + .Select(x => + { + return ConstantOfShape(new[] { 1 }, Cast(0, ((TensorType)x).DType)); + }).ToArray()), + TensorType tensorType => (Expr)ConstantOfShape(new[] { 1 }, Cast(0, tensorType.DType)), + _ => throw new ArgumentOutOfRangeException("fusionBody"), + }; + return IR.F.Math.Require(false, failure, "input dim large than limit"); + } +} + +internal record SegmentInfo(int InputIndex, int DimIndex, int[] Segments); diff --git a/src/Nncase.Passes/Rules/ShapeBucket/ShapeBucketHelper.cs b/src/Nncase.Passes/Rules/ShapeBucket/ShapeBucketHelper.cs new file mode 100644 index 0000000000..3d9cbd389e --- /dev/null +++ b/src/Nncase.Passes/Rules/ShapeBucket/ShapeBucketHelper.cs @@ -0,0 +1,457 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reactive; +using DryIoc.ImTools; +using Nncase.Diagnostics; +using Nncase.IR; +using Nncase.IR.Math; +using Nncase.IR.NN; +using Nncase.IR.Tensors; +using Nncase.Passes.Rules.Lower; +using Nncase.Passes.Rules.Neutral; +using Nncase.Passes.Rules.ShapeExpr; +using Nncase.PatternMatch; +using static Nncase.PatternMatch.Utility; +using static Nncase.Utilities.ReplaceUtility; + +namespace Nncase.Passes.Rules.ShapeBucket; + +public static class CallValidator +{ + private static readonly HashSet ForceConvert = new() + { + typeof(Conv2D).TypeHandle, + typeof(MatMul).TypeHandle, + typeof(Unsqueeze).TypeHandle, + typeof(Squeeze).TypeHandle, + typeof(Cast).TypeHandle, + typeof(Unary).TypeHandle, + typeof(Transpose).TypeHandle, + typeof(Pad).TypeHandle, + }; + + // todo: add debug mode + private static readonly HashSet MaybeDynamic = new() + { + // typeof(SpaceToBatch).TypeHandle, + // typeof(BatchToSpace).TypeHandle, + typeof(Concat).TypeHandle, + typeof(Stack).TypeHandle, + typeof(Binary).TypeHandle, + typeof(Slice).TypeHandle, + typeof(Gather).TypeHandle, + typeof(ShapeOf).TypeHandle, + + // typeof(Reshape).TypeHandle, + typeof(Expand).TypeHandle, + typeof(ConstantOfShape).TypeHandle, + typeof(Where).TypeHandle, + typeof(Compare).TypeHandle, + typeof(Reduce).TypeHandle, + typeof(Clamp).TypeHandle, + typeof(Tile).TypeHandle, + typeof(CumSum).TypeHandle, + typeof(IR.Tensors.Range).TypeHandle, + }; + + public static bool IsMaybeDynamic(Expr target) => MaybeDynamic.Contains(target.GetType().TypeHandle); + + public static bool IsForceConvert(Expr target) => ForceConvert.Contains(target.GetType().TypeHandle); + + public static bool ValidTarget(Expr target) + { + if (target is ActivationOp) + { + return true; + } + + if (IsMaybeDynamic(target) || IsForceConvert(target)) + { + return true; + } + + return false; + } +} + +public static class ShapeBucketRegister +{ + public static void CheckShapeBucketOptions(ShapeBucketOptions options) + { + if (options.Enable) + { + if (options.SegmentsCount < 2) + { + throw new InvalidOperationException("SegmentsCount should >= 2"); + } + } + } + + public static void MergeOp(IPassManager iPassManager) + { + iPassManager.AddWithName("MergeNextCall").Configure(c => + { + c.Add(); + c.Add(); + }); + iPassManager.AddWithName("MergePrevCall").Configure(c => + { + c.Add(); + c.Add(); + }); + } + + public static void ToFusion(IPassManager p, bool onlyDynamic = false) => + p.AddWithName("ToFusion").Configure(c => + { + c.Add(onlyDynamic); + c.Add(onlyDynamic); + c.Add(onlyDynamic); + c.Add(onlyDynamic); + }); + + public static void Bucket(IPassManager p) + { + var shapeList = new Dictionary(); + p.Add(shapeList); + p.AddWithName("FusionBucket").Configure(c => + { + c.Add(shapeList); + }); + } + + public static void Rebuild(IPassManager p) + { + // rebuild + ToFusion(p, true); + Bucket(p); + } + + public static void MergeFusion(IPassManager p, bool singleVar) + { + if (!singleVar) + { + return; + } + + p.AddWithName("MergeBucketFusionPass"); + } + + public static void LostToFusion(IPassManager p, bool singleVar) => + p.AddWithName("LostToFusion").Configure(c => + { + c.Add(); + c.Add(); + c.Add(); + if (singleVar) + { + c.Add(); + } + }); + + public static void ClearMarker(IPassManager p) => + p.AddWithName("ClearSomeMarker").Configure(p => + { + p.Add(); + p.Add(); + }); + + public static void Simplify(IPassManager p) => + p.AddWithName("Simplify").Configure(c => + { + c.Add(); + c.Add(); + c.Add(); + c.Add(); + c.Add(); + c.Add(); + c.Add(); + c.Add(); + c.Add(); + c.Add(); + c.Add(); + }); +} + +public static class ShapeBucketHelper +{ + public static Dictionary MakeVarValuesForAllSegment(ShapeBucketOptions options) + { + int segmentCount = options.SegmentsCount; + var varRange = options.RangeInfo; + var varMap = options.VarMap; + var varAndInputAllSegment = varRange.ToDictionary(pair => pair.Key, pair => + { + var (min, max) = pair.Value; + var segments = ComputeSegmentList(segmentCount, min, max); + return segments; + }); + + var vars = varMap.Values.SelectMany(x => x).OfType().ToHashSet().ToArray(); + + // DimVarName -> Dict.key -> Dict.Value + var varValues = varAndInputAllSegment.ToDictionary( + pair => vars.FindFirst(v => v.Name == pair.Key), + pair => { return pair.Value.OrderByDescending(x => x).ToArray(); }); + return varValues; + } + + public static int[] ComputeSegmentList(int segmentCount, int min, int max) + { + var size = (max - min) / segmentCount; + return Enumerable.Range(0, segmentCount - 1).Select(i => min + (i * size)).Append(max).ToArray(); + } + + public static void ArgsChecker(Expr[] newArgs) + { + if (newArgs.Length == 0) + { + throw new InvalidOperationException("Empty Arg"); + } + + if (newArgs.Any(arg => arg is Var v && v.Name.StartsWith("var_"))) + { + throw new InvalidOperationException("Args has Var in fusion"); + } + + if (newArgs.Any(arg => arg is Marker m && m.Target is Const)) + { + throw new InvalidOperationException("Args has tuple"); + } + + if (newArgs.Any(arg => arg is IR.Tuple)) + { + throw new InvalidOperationException("Args has tuple"); + } + + if (newArgs.ToHashSet().Count != newArgs.Length) + { + throw new InvalidOperationException("Has Repeat args"); + } + } + + // clone origin Expr and Do replace for var + public static Expr ReplaceClone(Expr originBody, params (Var, Expr)[] originVarAndExpr) + { + var call = originBody.Clone(); + var finder = new FindVar(); + finder.Visit(call); + var newVars = finder.Vars; + originVarAndExpr.ForEach(pair => + { + var (v, newExpr) = pair; + var varShouldBeReplaced = newVars.FindFirst(newVar => newVar.Name == v.Name); + if (varShouldBeReplaced == null) + { + throw new InvalidOperationException(); + } + + ReplaceExpr(call, varShouldBeReplaced, newExpr); + }); + return call; + } + + public static void PrintEffectVar(string name, Var[] set) + { + Console.WriteLine($"{name} EffectVar:"); + foreach (var var in set) + { + Console.WriteLine(var.Name); + } + } + + public static Var[] InputDimVars(CompileSession session) + { + return session.CompileOptions.ShapeBucketOptions.VarMap.Values.SelectMany(x => x).OfType() + .ToHashSet().ToArray(); + } + + public static Var[] MakeEffectVarArray(CompileSession session, Dictionary varMap, params Expr[] args) + { + var dimVars = InputDimVars(session); + if (dimVars.Length == 1) + { + return dimVars; + } + + if (dimVars.Length == 0) + { + // todo: process this, in test should not have this + // throw new InvalidOperationException("MaybeError"); + } + + var visitor = new FindVar(); + args.ForEach(arg => + { + DumpIR(arg, "argExpr"); + var argShapeExpr = arg.EvaluateShapeExpr(varMap); + visitor.Visit(argShapeExpr); + }); + var vars = visitor.Vars.ToHashSet(); + + // PrintEffectVar("VisitorVars", vars.ToArray()); + var inputAndDimVarMap = + varMap.ToDictionary(pair => pair.Key, pair => pair.Value.OfType().ToHashSet().ToArray()); + var allDimVars = varMap.Values.SelectMany(x => x).OfType(); + var afterProcessVars = vars.SelectMany(var => + { + if (inputAndDimVarMap.TryGetValue(var, out var dimVars)) + { + return dimVars; + } + + if (allDimVars.Contains(var)) + { + return new[] { var }; + } + + return new[] { var }; + }).ToHashSet(); + return afterProcessVars.Intersect(allDimVars).ToHashSet().ToArray(); + } + + public static void DumpIR(Expr expr, string prefix, string? reletivePath = null, string? printPrefix = null) + { + if (DumpScope.Current.IsEnabled(DumpFlags.Rewrite)) + { + var s = prefix; + if (prefix.Length > 80) + { + s = s[..80]; + } + + Console.WriteLine($"{printPrefix} {prefix}"); + DumpScope.Current.DumpIR(expr, s, reletivePath); + } + } +} + +public class FindExpr : ExprVisitor +{ + private readonly List _list = new(); + private Func? _f; + private Expr[] _limit = Array.Empty(); + private Expr? _outerCall; + + public List Run(Expr expr, Expr[] limit, Expr outerCall, Func checker) + { + _f = checker; + _outerCall = outerCall; + _limit = limit; + Visit(expr); + return _list; + } + + protected override Expr DefaultVisitLeaf(Expr expr) + { + if (_f!(expr)) + { + _list.Add(expr); + } + + return expr; + } + + protected override Expr DispatchVisit(Expr expr) + { + if (_limit.Contains(expr)) + { + _list.Add(expr); + return expr; + } + + if (expr == _outerCall) + { + return expr; + } + + if (HasVisited(expr, out var result)) + { + return result; + } + + return MarkVisited(expr, base.DispatchVisit(expr)); + } +} + +public class FindVar : ExprVisitor +{ + public HashSet Vars { get; set; } = new(); + + // todo: if visit call(VarFusion), then return EffectVar + protected override Expr VisitLeafVar(Var expr) + { + Vars.Add(expr); + return expr; + } + + protected override Expr DefaultVisitLeaf(Expr expr) => expr; +} + +[RuleGenerator] +public sealed partial class ForceConvertOpChecker : RewriteRule +{ + public override Pattern Pattern => IsCall( + "call", + IsOp(op => CallValidator.IsForceConvert(op)), + GenerateParameters(null, IsWildcard())); + + // todo: is slice + public Expr? GetReplace(Call call) + { + if (!call.CheckedShape.IsFixed) + { + throw new InvalidOperationException("ForceConvertCall should has fixed shape after bucket"); + } + + return call; + } +} + +internal static class ExprArrayExtension +{ + public static IEnumerable OfNoConst(this IEnumerable args) + { + return args.Where(x => x is not TensorConst); + } +} + +internal class KeyValuePairKeyComparer : IEqualityComparer> +{ + public bool Equals(KeyValuePair x, KeyValuePair y) + { + return Equals(x.Key, y.Key); + } + + public int GetHashCode(KeyValuePair obj) + { + return HashCode.Combine(obj.Key); + } +} + +internal class OpCounter : ExprVisitor +{ + private readonly Dictionary _counter = new(); + + protected override Expr VisitCall(Call expr) + { + if (expr.Target is Op) + { + var handle = expr.Target.GetType().TypeHandle; + if (_counter.ContainsKey(handle)) + { + _counter[handle] += 1; + } + else + { + _counter[handle] = 1; + } + } + + return base.VisitCall(expr); + } +} diff --git a/src/Nncase.Passes/Rules/ShapeExpr/FoldGetItem.cs b/src/Nncase.Passes/Rules/ShapeExpr/FoldGetItem.cs index 4f09df3b2d..1e4ad2107d 100644 --- a/src/Nncase.Passes/Rules/ShapeExpr/FoldGetItem.cs +++ b/src/Nncase.Passes/Rules/ShapeExpr/FoldGetItem.cs @@ -1,6 +1,7 @@ // Copyright (c) Canaan Inc. All rights reserved. // Licensed under the Apache license. See LICENSE file in the project root for full license information. +using System; using System.Linq; using Nncase.IR; using Nncase.PatternMatch; @@ -17,6 +18,7 @@ namespace Nncase.Passes.Rules.ShapeExpr; public partial class FoldStackGetItem : RewriteRule { public override Pattern Pattern => IsStack( + null, "stack", IsTuple("tuple", IsVArgsRepeat(list => Enumerable.Range(0, list.Length) @@ -26,7 +28,7 @@ public partial class FoldStackGetItem : RewriteRule private Pattern InputPattern => IsWildcard(); - private Expr? GetReplace(IR.Tuple tuple) + private Expr? GetReplace(Call stack, IR.Tuple tuple) { var getItems = tuple.Fields.ToArray().Select(x => (Call)x).ToArray(); var index = getItems.Select(x => ((TensorConst)x.Arguments[GetItem.Index.Index]).Value.ToScalar()); @@ -36,11 +38,18 @@ public partial class FoldStackGetItem : RewriteRule } var input = getItems[0].Arguments[GetItem.Input.Index]; - if (input.CheckedShape[0] != getItems.Length) + if (input.Users.Count != getItems.Length) { return null; } + // [1, 2, 3, 4] -> Stack(1, 2, 3) + // slice for more, (2, 3) is ok, but is fast?? + if (input.CheckedShape[0] != getItems.Length) + { + return IR.F.Tensors.Slice(input, new[] { 0 }, new[] { getItems.Length }, 1); + } + return input; } } diff --git a/src/Nncase.Tests.TestFixture/TransformBase/Compare.cs b/src/Nncase.Tests.TestFixture/TransformBase/Compare.cs index 23e3b91795..e8f7213cb6 100644 --- a/src/Nncase.Tests.TestFixture/TransformBase/Compare.cs +++ b/src/Nncase.Tests.TestFixture/TransformBase/Compare.cs @@ -128,10 +128,7 @@ public static bool TensorValueCompare(TensorValue pre, TensorValue post, float t public static bool TupleValueCompare(TupleValue a, TupleValue b, float thresh = 0.99f) { - if (a.Count != b.Count) - { - return false; - } + Assert.Equal(a.Count, b.Count); foreach (var (t1, t2) in a.AsTensors().Zip(b.AsTensors())) { diff --git a/src/Nncase.Tests/Evaluator/UnitTestShapeEvaluator.cs b/src/Nncase.Tests/Evaluator/UnitTestShapeEvaluator.cs index bb5bfe4ce5..97c9a8a5c0 100644 --- a/src/Nncase.Tests/Evaluator/UnitTestShapeEvaluator.cs +++ b/src/Nncase.Tests/Evaluator/UnitTestShapeEvaluator.cs @@ -21,6 +21,12 @@ public class UnitTestShapeEvaluator : TestClassBase { private readonly int _defaultDim = 4; + public static IEnumerable RangeData => new[] + { + new object[] { 1, 7, 1 }, + new object[] { 1, 7, 2 }, + }; + [Fact] public void TestConstant1() { @@ -46,7 +52,10 @@ public void TestWithVar() var dimVar = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); var newShape = new Expr[] { 1, 3, dimVar, 6 }; var varMap = new Dictionary { { input, newShape } }; - Assert.Equal(Stack(new IR.Tuple(newShape), 0), input.EvaluateShapeExpr(varMap)); + var dict = new Dictionary { { dimVar, Value.FromTensor(4) } }; + var expect = Stack(new IR.Tuple(newShape), 0).Evaluate(dict); + var result = input.EvaluateShapeExpr(varMap).Evaluate(dict); + Assert.Equal(expect, result); } [Fact] @@ -82,6 +91,7 @@ public void UnitTestReduceMean() public void UnitTestUnsqueeze() { TestOpShapeEval(input => Unsqueeze(input, new[] { 4 })); + TestOpShapeEval(input => Unsqueeze(input, new[] { -1 })); } [Fact] @@ -158,12 +168,50 @@ public void UnitTestShapeOf() public void UnitTestTranspose() { TestOpShapeEval(input => Transpose(input, new[] { 0, 2, 3, 1 })); + TestOpShapeEval(input => Transpose(input, new[] { 0, 2, 1, 3 })); } [Fact] public void UnitTestReshape() { TestOpShapeEval(input => Reshape(input, new[] { 1, 3, 12, -1 })); + TestOpShapeEval(input => Reshape(input, new[] { 1, -1, 12, 3 })); + } + + [Fact] + public void UnitTestGetItem() + { + var dimVar = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var input = new Var(new TensorType(DataTypes.Int32, new[] { Dimension.Unknown })); + var expr = input[1]; + var dict = new Dictionary { { input, new[] { dimVar } } }; + var shape = expr.EvaluateShapeExpr(dict); + var varValues = new Dictionary { { input, Value.FromTensor(new[] { 4 }) } }; + var shapeValue = shape.Evaluate(varValues).AsTensor().ToArray(); + var evalShape = expr + .Evaluate(new Dictionary { { input, Value.FromTensor(new[] { 2, 3, 4, 5 }) } }) + .AsTensor() + .Shape; + var fixedShape = evalShape.ToValueArray(); + Assert.Equal(fixedShape, shapeValue); + } + + [Fact] + public void UnitTestGetItemSingle() + { + var dimVar = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var input = new Var(new TensorType(DataTypes.Int32, new[] { Dimension.Unknown })); + var expr = input[0]; + var dict = new Dictionary { { input, new[] { dimVar } } }; + var shape = expr.EvaluateShapeExpr(dict); + var varValues = new Dictionary { { input, Value.FromTensor(new[] { 1 }) } }; + var shapeValue = shape.Evaluate(varValues).AsTensor().ToArray(); + var evalShape = expr + .Evaluate(new Dictionary { { input, Value.FromTensor(new[] { 2 }) } }) + .AsTensor() + .Shape; + var fixedShape = evalShape.ToValueArray(); + Assert.Equal(fixedShape, shapeValue); } [Fact] @@ -208,10 +256,72 @@ public void UnitTestPad() TestOpShapeEval(input => Pad(input, new[,] { { 1, 2 }, { 1, 3 }, { 2, 4 }, { 6, 1 } }, PadMode.Constant, 0f)); } + [Fact] + public void TestSpaceTobatch() + { + var dimVar = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var input = new Var(new TensorType(DataTypes.Float32, new[] { 1, Dimension.Unknown, 192 })); + var paddings = Tensor.From(new[] { 0, 1 }, new[] { 1, 2 }); + var expr = SpaceToBatch(input, new[] { 3 }, paddings); + var dict = new Dictionary { { input, new Expr[] { 1, dimVar, 192 } } }; + var shape = expr.EvaluateShapeExpr(dict); + var varValues = new Dictionary { { dimVar, Value.FromTensor(8) } }; + Dumpper.DumpIR(shape, "Shape"); + var shapeValue = shape.Evaluate(varValues).AsTensor().ToArray(); + var evalShape = expr + .Evaluate(new Dictionary { { input, Value.FromTensor(Testing.Rand(1, 8, 192)) } }) + .AsTensor() + .Shape; + var fixedShape = evalShape.ToValueArray(); + Assert.Equal(fixedShape, shapeValue); + } + + [Fact] + public void TestBatchToSpace() + { + var dimVar = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var input = new Var(new TensorType(DataTypes.Float32, new[] { Dimension.Unknown, 69, 192 })); + var paddings = Tensor.From(new[] { 0, 1 }, new[] { 1, 2 }); + var expr = BatchToSpace(input, new[] { 3 }, paddings); + var dict = new Dictionary { { input, new Expr[] { dimVar, 69, 192 } } }; + var shape = expr.EvaluateShapeExpr(dict); + var varValues = new Dictionary { { dimVar, Value.FromTensor(3) } }; + Dumpper.DumpIR(shape, "Shape"); + var shapeValue = shape.Evaluate(varValues).AsTensor().ToArray(); + var evalShape = expr + .Evaluate(new Dictionary { { input, Value.FromTensor(Testing.Rand(3, 69, 192)) } }) + .AsTensor() + .Shape; + var fixedShape = evalShape.ToValueArray(); + Assert.Equal(fixedShape, shapeValue); + } + [Fact] public void UnitTestSqueeze() { TestOpShapeEval(input => Squeeze(input, new[] { 0 })); + TestOpShapeEval(input => Squeeze(input, new[] { -4 })); + } + + [Theory] + [MemberData(nameof(RangeData))] + public void UnitTestRange(int beginV, int endV, int stepV) + { + var begin = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var end = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var step = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var expr = Range(begin, end, step); + var shape = expr.EvaluateShapeExpr(); + var varValues = new Dictionary + { + { begin, Value.FromTensor(beginV) }, + { end, Value.FromTensor(endV) }, + { step, Value.FromTensor(stepV) }, + }; + + var shapeValue = shape.Evaluate(varValues).AsTensor().ToArray(); + var fixedShape = expr.Evaluate(varValues).AsTensor().Shape.ToValueArray(); + Assert.Equal(fixedShape, shapeValue); } private Expr MakeDim() => new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); diff --git a/src/Nncase.Tests/Rules/Neutral/UnitTestSqueezeTransposeShape.cs b/src/Nncase.Tests/Rules/Neutral/UnitTestSqueezeTransposeShape.cs index 07cf46fed0..166cc666d4 100644 --- a/src/Nncase.Tests/Rules/Neutral/UnitTestSqueezeTransposeShape.cs +++ b/src/Nncase.Tests/Rules/Neutral/UnitTestSqueezeTransposeShape.cs @@ -9,6 +9,7 @@ using System.Text; using System.Threading.Tasks; using Nncase.Diagnostics; +using Nncase.IR.Math; using Nncase.Passes; using Nncase.Passes.Rules.Neutral; using Nncase.Tests.TestFixture; @@ -60,3 +61,53 @@ public void TestSqueezeTransposeShapeNegative(int[] shape, int[] perm) TestNotMatch(rootPre); } } + +public class UnitTestSqueezeBinaryShape : TransformTestBase +{ + public static IEnumerable TestSqueezeBinaryShapePosivateData => + new[] + { + new object[] { new[] { 1 }, new[] { 1, 2, 4, 8, 3 } }, + new object[] { new[] { 1, 2, 4, 8, 3 }, new[] { 1 } }, + new object[] { new[] { 1, 2, 4, 8, 3 }, new[] { 1, 1, 4, 1, 1 } }, + new object[] { new[] { 1, 2, 4, 8, 3 }, new[] { 1, 1, 4, 8, 1 } }, + new object[] { new[] { 1, 2, 4, 8, 3 }, new[] { 1, 2, 4, 8, 3 } }, + new object[] { new[] { 1, 2, 1, 8, 1 }, new[] { 3, 1, 6, 1, 1 } }, + new object[] { new[] { 1, 2, 4, 1, 3, 1, 3 }, new[] { 1, 2, 4, 1, 1, 5, 1 } }, + new object[] { new[] { 2, 3, 4, 8, 3, 5, 3, 5 }, new[] { 2, 3, 4, 8, 1, 5, 3, 5 } }, + }; + + public static IEnumerable TestSqueezeBinaryShapeNegativeData => + new[] + { + new object[] { new[] { 2 }, new[] { 2 } }, + new object[] { new[] { 1, 2 }, new[] { 1 } }, + new object[] { new[] { 1, 2 }, new[] { 1, 2 } }, + new object[] { new[] { 2 }, new[] { 1 } }, + new object[] { new[] { 1, 2, 4 }, new[] { 1 } }, + new object[] { new[] { 1, 2, 4 }, new[] { 1, 2, 1 } }, + new object[] { new[] { 1, 2, 4 }, new[] { 1, 2, 4 } }, + new object[] { new[] { 1, 2, 1, 5, 8 }, new[] { 3, 1, 4, 1, 8 } }, + new object[] { new[] { 2, 3, 4, 8, 3, 5, 3, 5 }, new[] { 2, 1, 4, 8, 1, 5, 3, 5 } }, + }; + + [Theory] + [MemberData(nameof(TestSqueezeBinaryShapePosivateData))] + public void TestSqueezeBinaryShapePositivate(int[] lShape, int[] rShape) + { + var a = Random.Normal(DataTypes.Float32, 0, 1, 0, lShape); + var b = Random.Normal(DataTypes.Float32, 0, 1, 0, rShape); + var rootPre = Math.Binary(BinaryOp.Add, a, b); + TestMatched(rootPre); + } + + [Theory] + [MemberData(nameof(TestSqueezeBinaryShapeNegativeData))] + public void TestSqueezeBinaryShapeNegative(int[] lShape, int[] rShape) + { + var a = Random.Normal(DataTypes.Float32, 0, 1, 0, lShape); + var b = Random.Normal(DataTypes.Float32, 0, 1, 0, rShape); + var rootPre = Math.Binary(BinaryOp.Add, a, b); + TestNotMatch(rootPre); + } +} diff --git a/src/Nncase.Tests/Rules/ShapeBucket/ShapeBucketTest.cs b/src/Nncase.Tests/Rules/ShapeBucket/ShapeBucketTest.cs new file mode 100644 index 0000000000..b014ad2d6f --- /dev/null +++ b/src/Nncase.Tests/Rules/ShapeBucket/ShapeBucketTest.cs @@ -0,0 +1,366 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Nncase.Diagnostics; +using Nncase.IR; +using Nncase.IR.Tensors; +using Nncase.Passes; +using Nncase.Passes.Rules.Neutral; +using Nncase.Passes.Rules.ShapeBucket; +using Nncase.Quantization; +using Nncase.Tests.ReWrite.FusionTest; +using Nncase.Tests.TestFixture; +using Nncase.Tests.TransformTest; +using Nncase.Utilities; +using Xunit; +using Xunit.Abstractions; +using static Nncase.IR.F.Math; +using static Nncase.IR.F.NN; +using static Nncase.IR.F.Tensors; +using static Nncase.Tests.Rules.ShapeBucket.ShapeBucketTestHelper; + +namespace Nncase.Tests.Rules.ShapeBucket; + +[AutoSetupTestMethod(InitSession = true)] +public class ShapeBucketTest : TransformTestBase +{ + [Fact] + public void TestFindVar() + { + var v1 = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var v2 = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var expr = ((v1 * 4) + (v2 * 3)) * 2; + var visitor = new FindVar(); + visitor.Visit(expr); + Assert.Equal(visitor.Vars, new HashSet(new[] { v1, v2 })); + } + + [Fact] + public void TestBucketPad() + { + var input = Testing.Rand(1, 2, 16, 16); + var fixedShape = new[] { 1, 3, 24, 24 }; + var p = new Call(new BucketPad(), input, fixedShape); + var (_, kmodel) = Testing.BuildKModel("test", new IRModule(new Function(p)), CompileSession); + var result = Testing.RunKModel(kmodel, "call_arg", Array.Empty()); + var pads = fixedShape - Cast(ShapeOf(input), DataTypes.Int32); + var paddings = Transpose( + Stack(new IR.Tuple(Enumerable.Repeat(0, fixedShape.Length).ToArray(), pads), 0), + new[] { 1, 0 }); + var fixedInput = IR.F.NN.Pad(input, paddings, PadMode.Constant, Cast(0, input.ElementType)); + var fixedResult = new Call(new FixShape(), fixedInput, fixedShape); + var origin = fixedResult.Evaluate(); + var cos = Comparator.CosSimilarity(origin, result)[0]; + Assert.True(cos > 0.999); + } + + private Var Scalar(string name) => new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); +} + +[AutoSetupTestMethod(InitSession = true)] +public class TestMergePrevCallToFusion : TransformTestBase +{ + [Fact] + public void TestMergePrevCallSingleInput() + { + var input = Testing.Rand(1, 3, 24, 24); + var inputVar = MakeVar(input); + var transpose = Transpose(inputVar, new[] { 3, 2, 1, 0 }); + var v = new Var(transpose.CheckedType); + var abs = Abs(v); + var f = new BucketFusion("stackvm", abs, new[] { v }, Array.Empty()); + var c = new Call(f, transpose); + TestMatched(c, new Dictionary { { inputVar, Value.FromTensor(input) } }); + } + + [Fact] + public void TestBodyMultiInputMergeLeft() + { + var mainInput0 = Testing.Rand(1, 3, 24, 24); + var mainInput1 = Testing.Rand(1, 3, 24, 24); + var mainVar0 = MakeVar(mainInput0); + var mainVar1 = MakeVar(mainInput0); + var fusionVar0 = MakeVar(mainInput0); + var fusionVar1 = MakeVar(mainInput1); + var concat = Concat(new IR.Tuple(fusionVar0, fusionVar1), 0); + var f = new BucketFusion("stackvm", concat, new[] { fusionVar0, fusionVar1 }, Array.Empty()); + var abs = Abs(mainVar0); + var c = new Call(f, abs, mainVar1); + TestMatched( + c, + new Dictionary + { + { mainVar0, Value.FromTensor(mainInput0) }, { mainVar1, Value.FromTensor(mainInput1) }, + }); + } + + [Fact] + public void TestBodyMultiInputMergeRight() + { + var mainInput0 = Testing.Rand(1, 3, 24, 24); + var mainInput1 = Testing.Rand(1, 3, 24, 24); + var mainVar0 = MakeVar(mainInput0); + var mainVar1 = MakeVar(mainInput0); + var fusionVar0 = MakeVar(mainInput0); + var fusionVar1 = MakeVar(mainInput1); + var concat = Concat(new IR.Tuple(fusionVar0, fusionVar1), 0); + var f = new BucketFusion("stackvm", concat, new[] { fusionVar0, fusionVar1 }, Array.Empty()); + var abs = Abs(mainVar1); + var c = new Call(f, Softmax(mainInput0, 0), abs); + TestMatched( + c, + new Dictionary + { + { mainVar0, Value.FromTensor(mainInput0) }, { mainVar1, Value.FromTensor(mainInput1) }, + }); + } + + [Fact(Skip = "Reshape is not stable")] + public void TestPrevMultiInputForDynamicReshape() + { + // fusion + var fusionVar = new Var(new TensorType(DataTypes.Float32, new[] { 1, 3, 24, 24 })); + var transpose = Transpose(fusionVar, new[] { 3, 2, 1, 0 }); + var f = new BucketFusion("stackvm", transpose, new[] { fusionVar }, Array.Empty()); + + // input + var input = Testing.Rand(3, 24, 24); + var inputVar = new Var(new TensorType(input.ElementType, input.Shape)); + var newShape = Concat(new IR.Tuple(new[] { 1L }, ShapeOf(inputVar)), 0); + var reshape = Reshape(Abs(inputVar), newShape); + var c = new Call(f, reshape); + TestMatched(c, new Dictionary { { inputVar, Value.FromTensor(input) } }); + } + + [Fact] + public void TestForMergeConcat() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var input1 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = MakeVar(input0); + var inputVar1 = MakeVar(input1); + var concat = Concat(new IR.Tuple(inputVar0, inputVar1), 0); + var v = new Var(concat.CheckedType); + var abs = Abs(v); + var f = new BucketFusion("stackvm", abs, new[] { v }, Array.Empty()); + var c = new Call(f, concat); + TestMatched( + c, + new Dictionary + { + { inputVar0, Value.FromTensor(input0) }, { inputVar1, Value.FromTensor(input1) }, + }); + } + + [Fact] + public void TestMatMulAndConcat() + { + var lhs = new Var("i0", new TensorType(DataTypes.Float32, new[] { 1, 3, 24, 24 })); + var rhs = new Var("i1", new TensorType(DataTypes.Float32, new[] { 2, 3, 24, 24 })); + var mm = IR.F.Math.MatMul(lhs, rhs); + var f = new BucketFusion("stackvm", mm, new[] { lhs, rhs }, Array.Empty()); + + var input0 = Testing.Rand(1, 3, 24, 24); + var input1 = Testing.Rand(1, 3, 24, 24); + var input2 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = MakeVar(input0); + var inputVar1 = MakeVar(input1); + var inputVar2 = MakeVar(input2); + var concat = Concat(new IR.Tuple(inputVar1, inputVar2), 0); + var c = new Call(f, Softmax(inputVar0, 0), concat); + TestMatched( + c, + new Dictionary + { + { inputVar0, Value.FromTensor(input0) }, + { inputVar1, Value.FromTensor(input1) }, + { inputVar2, Value.FromTensor(input2) }, + }); + } + + [Fact] + public void TestAfterMergeSameInput() + { + var input = Testing.Rand(1, 3, 24, 24); + var inputVar = new Var(new TensorType(input.ElementType, input.Shape)); + var abs = Abs(inputVar); + + var fusionVar0 = new Var(new TensorType(input.ElementType, input.Shape)); + var fusionVar1 = new Var(new TensorType(input.ElementType, input.Shape)); + var concat = Concat(new IR.Tuple(fusionVar0, fusionVar1), 0); + var f = new BucketFusion("stackvm", concat, new Var[] { fusionVar0, fusionVar1 }, Array.Empty()); + var c = new Call(f, Sqrt(abs), Neg(abs)); + TestMatched(c, new Dictionary { { inputVar, Value.FromTensor(input) } }); + } + + [Fact(Skip = "Reshape is not stable")] + public void TestMatMulReshape() + { + // 左边的表达式是右边表达式的一部分 + // 重新构造prev的call,使用新的var来替换,因此在替换%0的时候,ShapeOf()的参数也变成了var,但实际上ShapeOf的参数应当还是原始的 + // %0 = Add(BinaryOp.Add, %var_88: f32[1,3,24,24], const(f32[1] : {1f})) 2 -1378211376: // f32[1,3,24,24] + // %1 = ShapeOf(%0) 1 2067123334: // i64[4] + // %2 = Gather(%1, const(i64 : 0), const(i64 : 0)) 1 805902410: // i64 + // %3 = Reshape(%2, const(i64[1] : {1L})) 1 -466610003: // i64[1] + // %4 = (%3, const(i64[1] : {3L}), const(i64[1] : {24L}), const(i64[1] : {24L})): // (i64[1], i64[1], i64[1], i64[1]) + // + // %5 = Concat(%4, const(i32 : 0)) 1 80776753: // i64[4] + // %6 = Reshape(%0, %5) 2 -1638748643: // f32[?,?,?,?] + // %7 = (%6): // (f32[?,?,?,?]) + var input = Testing.Rand(1, 3, 24, 24); + var lhs = MakeVar(input); + var add = Add(lhs, new[] { 1f }); + var rhs = Reshape(add, Concat( + new IR.Tuple(Reshape(Gather(ShapeOf(add), 0L, 0L), new[] { 1L }), new[] { 3L }, new[] { 24L }, new[] { 24L }), 0)); + + var lhsVar = new Var("lhs", new TensorType(input.ElementType, input.Shape)); + var rhsVar = new Var("rhs", new TensorType(input.ElementType, input.Shape)); + var mm = IR.F.Math.MatMul(lhsVar, rhsVar); + var f = new BucketFusion("stackvm", mm, new Var[] { lhsVar, rhsVar }, Array.Empty()); + var c = new Call(f, lhs, rhs); + TestMatched(c, new Dictionary { { lhs, Value.FromTensor(input) } }); + } + + [Fact] + public void TestMergeStackWithConstant() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var input1 = Testing.Rand(1, 3, 24, 24); + var lhs = MakeVar(input0); + var rhs = MakeVar(input1); + + var scalarInput = Testing.Rand(); + var scalarInputVar = MakeVar(scalarInput); + var other = Stack(new IR.Tuple(scalarInputVar, 1f, 2f), 0); + + var lhsVar = MakeVar(input0); + var rhsVar = MakeVar(input1); + var otherVar = MakeVar(other); + var mm = IR.F.Math.MatMul(lhsVar, rhsVar); + var f = new BucketFusion("stackvm", mm * otherVar[1], new Var[] { lhsVar, rhsVar, otherVar }, Array.Empty()); + var c = new Call(f, lhs, rhs, other); + var result = TestMatched(c, new Dictionary + { + { lhs, Value.FromTensor(input0) }, + { rhs, Value.FromTensor(input1) }, + { scalarInputVar, Value.FromTensor(scalarInput) }, + }); + var fusion = GetResultFusion(result); + + // constant should not be var + Assert.Equal(3, fusion.Parameters.Length); + } + + // v1:add(a1, an) v2:add(a2, an) + // fusion(v1, v2) + // | + // fusion(a1, a2, an) + [Fact] + public void TestSameInputMerge() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var input1 = Testing.Rand(1, 3, 24, 24); + var other = Testing.Rand(1, 3, 24, 24); + var in0Var = MakeVar(input0); + var in1Var = MakeVar(input1); + var otherVar = MakeVar(other); + var add0 = in0Var + otherVar; + var add1 = in1Var + otherVar; + var lhs = MakeVar(add0); + var rhs = MakeVar(add1); + var mm = IR.F.Math.MatMul(lhs, rhs); + var f = new BucketFusion("stackvm", mm, new Var[] { lhs, rhs }, Array.Empty()); + var c = new Call(f, add0, add1); + var result = TestMatched(c, new Dictionary + { + { in0Var, Value.FromTensor(input0) }, + { in1Var, Value.FromTensor(input1) }, + { otherVar, Value.FromTensor(other) }, + }); + var fusion = GetResultFusion(result); + Assert.Equal(3, fusion.Parameters.Length); + } + + [Fact] + public void TestMergeInputWhichHadBeMerged() + { + // fusion(add(input, other), other) -> fusion(input, other) + var input0 = Testing.Rand(1, 3, 24, 24); + var other = Testing.Rand(1, 3, 24, 24); + var in0Var = MakeVar(input0); + var otherVar = MakeVar(other); + var add0 = in0Var + otherVar; + var lhs = MakeVar(add0); + var rhs = MakeVar(otherVar); + var mm = IR.F.Math.MatMul(lhs, rhs); + var f = new BucketFusion("stackvm", mm, new Var[] { lhs, rhs }, Array.Empty()); + var c = new Call(f, add0, otherVar); + var result = TestMatched(c, new Dictionary + { + { in0Var, Value.FromTensor(input0) }, + { otherVar, Value.FromTensor(other) }, + }); + var fusion = GetResultFusion(result); + Assert.Equal(2, fusion.Parameters.Length); + } + + // avoid + // fusion -> nextCall -> nextCallUser -> user1 + // -> user2 + // 这种情况在合并nextCall后,如果nextCallUser被匹配到了,那么此时只会有一个user,因此暂时不rewrite + [Fact] + public void TestMergeNextWithUserHasMultiUser() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var in0Var = new Var(new TensorType(input0.ElementType, input0.Shape)); + var a = MakeSingleSimpleFusionCall(Abs, Softmax(in0Var, 0)); + var s = Sqrt(a); + var sin = Sin(s); + var e = Exp(sin); + var f = Floor(sin); + var body = e + f; + var newBody = TestMatched(body, new Dictionary { { in0Var, Value.FromTensor(input0) } }); + var c = new FusionCounterVisitor(); + c.Visit(newBody); + Assert.Equal(1, c.Count); + TestNotMatch(newBody); + } + + [Fact] + public void TestMergeInputInTupleWhichHadBeMerged() + { + var lhs = new Var(new TensorType(DataTypes.Int32, new[] { 1 })); + var rhs = new Var(new TensorType(DataTypes.Int32, new[] { 2 })); + var bn = lhs + rhs; + var f = new BucketFusion("stackvm", bn, new Var[] { lhs, rhs }, Array.Empty()); + + var input = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); + var r = Reshape(input, new[] { 1 }); + var concat = Concat(new IR.Tuple(r, (Expr)new[] { 1 }), 0); + var c = new Call(f, r, concat); + var result = TestMatched(c, new Dictionary + { + { input, Value.FromTensor(2) }, + }); + var call = (Call)result; + var fusion = (BucketFusion)call.Target; + Assert.Equal(1, fusion.Parameters.Length); + Assert.Equal(1, call.Arguments.Length); + } + + private static BucketFusion GetResultFusion(Expr result) + { + var fusion = (BucketFusion)((Call)result).Target; + return fusion; + } + + private static Var MakeVar(Tensor mainInput0) => new("input", new TensorType(mainInput0.ElementType, mainInput0.Shape)); + + private static Var MakeVar(Expr expr) => new("input", expr.CheckedType); +} diff --git a/src/Nncase.Tests/Rules/ShapeBucket/ShapeBucketTestHelper.cs b/src/Nncase.Tests/Rules/ShapeBucket/ShapeBucketTestHelper.cs new file mode 100644 index 0000000000..af7c01783c --- /dev/null +++ b/src/Nncase.Tests/Rules/ShapeBucket/ShapeBucketTestHelper.cs @@ -0,0 +1,32 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Linq; +using Nncase.IR; +using Nncase.Passes.Rules.ShapeBucket; + +namespace Nncase.Tests.Rules.ShapeBucket; + +public static class ShapeBucketTestHelper +{ + internal static Function MakeFun(Expr output, Var[] inputVar) => new Function("main", output, inputVar); + + internal static Call MakeSingleSimpleFusionCall(Func ctor, Expr arg) + { + var v = new Var(arg.CheckedType); + var abs = ctor(v); + var f = new BucketFusion("stackvm", abs, new[] { v }, Array.Empty()); + var c = new Call(f, arg); + return c; + } + + internal static Call MakeSimpleFusionCall(Func ctor, params Expr[] args) + { + var paramList = args.Select(x => new Var(x.CheckedType)).ToArray(); + var abs = ctor(paramList); + var f = new BucketFusion("stackvm", abs, paramList, Array.Empty()); + var c = new Call(f, args); + return c; + } +} diff --git a/src/Nncase.Tests/Rules/ShapeBucket/UnitCallToFusionTest.cs b/src/Nncase.Tests/Rules/ShapeBucket/UnitCallToFusionTest.cs new file mode 100644 index 0000000000..511f6b2e87 --- /dev/null +++ b/src/Nncase.Tests/Rules/ShapeBucket/UnitCallToFusionTest.cs @@ -0,0 +1,160 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Nncase.Diagnostics; +using Nncase.IR; +using Nncase.IR.Tensors; +using Nncase.Passes; +using Nncase.Passes.Rules.Neutral; +using Nncase.Passes.Rules.ShapeBucket; +using Nncase.Quantization; +using Nncase.Tests.ReWrite.FusionTest; +using Nncase.Tests.TestFixture; +using Nncase.Tests.TransformTest; +using Nncase.Utilities; +using Xunit; +using Xunit.Abstractions; +using static Nncase.IR.F.Math; +using static Nncase.IR.F.NN; +using static Nncase.IR.F.Tensors; + +namespace Nncase.Tests.Rules.ShapeBucket; + +[AutoSetupTestMethod(InitSession = true)] +public class UnitCallToFusionTest : TransformTestBase +{ + [Fact] + public void TestMarkerCallToFusion() + { + var input = Testing.Rand(1, 3, 24, 24); + var inputVar = new Var(new TensorType(input.ElementType, input.Shape)); + var m1 = new Marker("RangeOf", inputVar, new[] { 0.1f, 0.2f }); + var abs = Abs(m1); + var m2 = new Marker("RangeOf", abs, new[] { -0.1f, 0.2f }); + var post = TestMatched( + m2, + new Dictionary { { inputVar, Value.FromTensor(input) } }); + Assert.True(post is Marker); + var postCall = (Call)((Marker)post).Target; + var fusion = (BucketFusion)postCall.Target; + Assert.True(postCall.Arguments[0] is Marker); + Assert.True(fusion.Body is Marker); + } + + [Fact] + public void TestMutliUserCallToFusion() + { + var input = Testing.Rand(1, 3, 24, 24); + var inputVar = new Var(new TensorType(input.ElementType, input.Shape)); + var abs = Abs(Softmax(inputVar, 0)); + var fusionVar1 = new Var(new TensorType(input.ElementType, input.Shape)); + var c1 = new Call(new BucketFusion("stackvm", fusionVar1 + 1f, new[] { fusionVar1 }, Array.Empty()), abs); + var fusionVar2 = new Var(new TensorType(input.ElementType, input.Shape)); + var c2 = new Call(new BucketFusion("stackvm", fusionVar2 - 1f, new[] { fusionVar2 }, Array.Empty()), abs); + var body = new IR.Tuple(c1, c2); + Dumpper.DumpIR(body, "Body"); + TestMatched(body, new Dictionary { { inputVar, Value.FromTensor(input) } }); + } + + [Fact] + public void TestConcatToFusion() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var input1 = Testing.Rand(1, 3, 24, 24); + var inputVar1 = new Var(new TensorType(input1.ElementType, input1.Shape)); + var input2 = Testing.Rand(1, 3, 24, 24); + var inputVar2 = new Var(new TensorType(input2.ElementType, input2.Shape)); + var inputs = new[] { inputVar0, inputVar1, inputVar2 }.Select(x => Softmax(x, 0)).ToArray(); + var cat = Concat(new IR.Tuple(inputs), 0); + TestMatched( + cat, + new Dictionary + { + { inputVar0, Value.FromTensor(input0) }, + { inputVar1, Value.FromTensor(input1) }, + { inputVar2, Value.FromTensor(input2) }, + }); + } + + [Fact] + public void TestConcatToFusionWithConst() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var input1 = Testing.Rand(1, 3, 24, 24); + var inputVar1 = new Var(new TensorType(input1.ElementType, input1.Shape)); + var input2 = Testing.Rand(1, 3, 24, 24); + var inputs = new Expr[] { inputVar0, inputVar1 }.Select(x => (Expr)Softmax(x, 0)).Append(input2).ToArray(); + var cat = Concat(new IR.Tuple(inputs), 0); + TestMatched( + cat, + new Dictionary + { + { inputVar0, Value.FromTensor(input0) }, + { inputVar1, Value.FromTensor(input1) }, + }); + } + + [Fact] + public void TestConcatSingleInputToFusion() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var inputs = new[] { inputVar0 }.Select(x => Softmax(x, 0)).ToArray(); + var cat = Concat(new IR.Tuple(inputs), 0); + TestMatched( + cat, + new Dictionary + { + { inputVar0, Value.FromTensor(input0) }, + }); + } + + [Fact] + public void TestReshapeToFusion() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var s = Softmax(inputVar0, 0); + var r = Reshape(s, Require(true, ShapeOf(s))); + TestMatched(r, new Dictionary { { inputVar0, Value.FromTensor(input0) } }); + } + + [Fact] + public void TestComplexReshapeToFusion() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var s = Softmax(inputVar0, 0); + var r = Reshape(s, Require(true, ShapeOf(s))); + TestMatched(r, new Dictionary { { inputVar0, Value.FromTensor(input0) } }); + } + + [Fact] + public void TestNoNest() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var after = TestMatched( + Sqrt(Abs(Softmax(inputVar0, 0))), + new Dictionary { { inputVar0, Value.FromTensor(input0) } }); + + var v = new FusionCounterVisitor(); + v.Visit(after); + Assert.Equal(2, v.Count); + if (after is Call { Target: BucketFusion fusion } c) + { + Assert.Equal(1, c.Arguments.Length); + var n = new FusionCounterVisitor(); + n.Visit(fusion.Body); + Assert.Equal(0, n.Count); + } + } +} diff --git a/src/Nncase.Tests/Rules/ShapeBucket/UnitTestMergeMultiUserFusion.cs b/src/Nncase.Tests/Rules/ShapeBucket/UnitTestMergeMultiUserFusion.cs new file mode 100644 index 0000000000..d348593805 --- /dev/null +++ b/src/Nncase.Tests/Rules/ShapeBucket/UnitTestMergeMultiUserFusion.cs @@ -0,0 +1,303 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Nncase.Diagnostics; +using Nncase.IR; +using Nncase.IR.Tensors; +using Nncase.Passes; +using Nncase.Passes.Rules.Neutral; +using Nncase.Passes.Rules.ShapeBucket; +using Nncase.Quantization; +using Nncase.Tests.ReWrite.FusionTest; +using Nncase.Tests.TestFixture; +using Nncase.Tests.TransformTest; +using Nncase.Utilities; +using Xunit; +using Xunit.Abstractions; +using static Nncase.IR.F.Math; +using static Nncase.IR.F.NN; +using static Nncase.IR.F.Tensors; +using static Nncase.Tests.Rules.ShapeBucket.ShapeBucketTestHelper; +using Tuple = Nncase.IR.Tuple; + +namespace Nncase.Tests.Rules.ShapeBucket; + +[AutoSetupTestMethod(InitSession = true)] +public class UnitTestMergeMultiUserFusion : TransformTestBase +{ + [Fact] + public async Task TestSimple() + { + var input = Testing.Rand(1, 3, 24, 24); + var inputVar = new Var("inputVar", new TensorType(input.ElementType, input.Shape)); + + var callee = MakeSingleSimpleFusionCall(Abs, inputVar); + var caller0 = MakeSingleSimpleFusionCall(Sqrt, callee); + var caller1 = MakeSingleSimpleFusionCall(Ceil, callee); + var output = new IR.Tuple(caller0, caller1); + var dict = new Dictionary { { inputVar, Value.FromTensor(input) } }; + await RunTest(output, new[] { inputVar }, dict); + } + + [Fact] + public async Task TestHasSameInput() + { + // tr = transpose(input) + // callee = Abs(tr) + // callee + tr | callee - tr + var input = Testing.Rand(1, 3, 24, 24); + var inputVar = new Var("inputVar", new TensorType(input.ElementType, input.Shape)); + var tr = Transpose(inputVar, new[] { 3, 2, 1, 0 }); + var callee = MakeSingleSimpleFusionCall(Abs, tr); + var caller0 = MakeSimpleFusionCall(args => args[0] + args[1], callee, tr); + var caller1 = MakeSimpleFusionCall(args => args[0] - args[1], callee, tr); + var output = new IR.Tuple(caller0, caller1); + var dict = new Dictionary { { inputVar, Value.FromTensor(input) } }; + await RunTest(output, new[] { inputVar }, dict); + } + + [Fact] + public async Task TestWithRing() + { + var input = Testing.Rand(1, 3, 24, 24); + var inputVar = new Var(new TensorType(input.ElementType, input.Shape)); + var leakyRelu = MakeSingleSimpleFusionCall(expr => LeakyRelu(expr, 0.1), inputVar); + var abs = MakeSingleSimpleFusionCall(Abs, leakyRelu); + var sp = ShapeOf(abs); + var data = ConstantOfShape(sp, 0f); + var binary = MakeSimpleFusionCall(args => args[0] - args[1], leakyRelu, data); + var output = binary; + var dict = new Dictionary { { inputVar, Value.FromTensor(input) } }; + await RunTestNotMatch(output, new[] { inputVar }, dict); + } + + [Fact] + public async Task TestSeqNoRing() + { + var input = Testing.Rand(1, 3, 24, 24); + var con = Testing.Rand(1, 3, 24, 24); + var inputVar = new Var(new TensorType(input.ElementType, input.Shape)); + var abs0 = Softmax(new[] { 1f }, 0); + var abs1 = Softmax(new[] { 2f }, 0); + var mm1 = MakeSingleSimpleFusionCall(expr => IR.F.Math.MatMul(expr, con), Softmax(inputVar, 0)); + var body = MakeSimpleFusionCall( + expr => IR.F.Math.MatMul(expr[0], Testing.Rand(1, 3, 24, 24)) * expr[1] * expr[2], mm1, abs0, abs1); + await RunTest(body, new[] { inputVar }, new Dictionary { { inputVar, Value.FromTensor(input) } }); + } + + [Fact] + public void MergeFusionTuple() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var input1 = Testing.Rand(1, 3, 24, 24); + var inputVar1 = new Var(new TensorType(input1.ElementType, input1.Shape)); + var input2 = Testing.Rand(1, 3, 24, 24); + var inputVar2 = new Var(new TensorType(input2.ElementType, input2.Shape)); + var a1 = MakeSingleSimpleFusionCall(Abs, inputVar0); + var a2 = MakeSingleSimpleFusionCall(Abs, inputVar1); + var a3 = MakeSingleSimpleFusionCall(Abs, inputVar2); + TestMatched( + new IR.Tuple(a1, a2, a3), + new Dictionary + { + { inputVar0, Value.FromTensor(input0) }, + { inputVar1, Value.FromTensor(input1) }, + { inputVar2, Value.FromTensor(input2) }, + }); + } + + [Fact] + public void MergeFusionTupleWithSameInput() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var a1 = MakeSingleSimpleFusionCall(Abs, inputVar0); + var a2 = MakeSingleSimpleFusionCall(Abs, inputVar0); + var a3 = MakeSingleSimpleFusionCall(Abs, inputVar0); + TestMatched( + new IR.Tuple(a1, a2, a3), + new Dictionary { { inputVar0, Value.FromTensor(input0) } }); + } + + [Fact] + public async Task MergeUserWithSameInput() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var input1 = Testing.Rand(1, 3, 24, 24); + var inputVar1 = new Var(new TensorType(input1.ElementType, input1.Shape)); + var s0 = Softmax(inputVar0, 0); + var s1 = Softmax(inputVar1, 0); + var s2 = Softmax(inputVar1, 1); + var call = MakeSimpleFusionCall(expr => expr[0] + expr[1], s0, s1); + var user = MakeSimpleFusionCall(expr => (expr[0] / expr[1]) + expr[2], call, s1, s2); + await RunTest( + user, + new[] { inputVar0, inputVar1 }, + new Dictionary + { + { inputVar0, Value.FromTensor(input0) }, { inputVar1, Value.FromTensor(input1) }, + }); + } + + [Fact] + public async Task TestTupleGetItemFusionSimple() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var call = MakeSingleSimpleFusionCall(expr => new IR.Tuple(expr + 1f, expr - 1f), Softmax(inputVar0, 0)); + var abs0 = MakeSingleSimpleFusionCall(Abs, call[0]); + var abs1 = MakeSingleSimpleFusionCall(Abs, call[1]); + await RunTest( + new IR.Tuple( + new[] { abs0, abs1 }), + new[] { inputVar0 }, + new Dictionary { { inputVar0, Value.FromTensor(input0) } }); + } + + [Fact] + public async Task TestTupleGetItemMultiUser() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var call = MakeSingleSimpleFusionCall(expr => new IR.Tuple(expr + 1f, expr - 1f), Softmax(inputVar0, 0)); + var abs00 = MakeSingleSimpleFusionCall(Abs, call[0]); + var abs01 = MakeSingleSimpleFusionCall(Abs, call[0]); + var abs10 = MakeSingleSimpleFusionCall(Abs, call[1]); + var abs11 = MakeSingleSimpleFusionCall(Abs, call[1]); + await RunTest( + new IR.Tuple( + new[] { abs00, abs01, abs10, abs11 }), + new[] { inputVar0 }, + new Dictionary { { inputVar0, Value.FromTensor(input0) } }); + } + + // %91 = %Binary_156_Conv2D_76_Conv2D_75_Conv2D_83_Conv2D_82_Squeeze_265_Binary_157(%85, %90, %75) 2 -1614309361: // f32[?,?] + // %92 = %Squeeze_272_Slice_271_Binary_277_ConstantOfShape_276_Slice_275_Cast_274_ShapeOf_273_Cast_305_Compare(%80, %91) 2 -1199809018: // (i32[?], i32[?,?]) + // %93 = GetItem(%92, const(i32 : 0)) 1 259791103: // i32[?] + // %94 = GetItem(%92, const(i32 : 1)) 3 1708502831: // i32[?,?] + // %95 = %Binary_279_Reduce_278_Tile_282_Unsqueeze_266_Cast_281_Stack_280_Reshape_293_Where_292_Reshape_291_Co(%93, %91, %94) 1 -1815551698: // f32[?,?,?] + // %108 = %Binary_166_Binary_165(%94) 4 -1861428933: // f32[?,?,?,?] + [Fact] + public async Task TestTupleGetItemUsersLargeThanOutputs() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var("inputVar", new TensorType(input0.ElementType, input0.Shape)); + var call = MakeSingleSimpleFusionCall(expr => new IR.Tuple(expr + 1f, expr - 1f), Softmax(inputVar0, 0)); + var n93 = call[0]; + var n94 = call[1]; + var n95 = MakeSimpleFusionCall(expr => expr[0] * expr[1] * expr[2], n93, n94, inputVar0); + var n108 = MakeSimpleFusionCall(expr => expr[0] * expr[0], n94); + var n108User = MakeSingleSimpleFusionCall(Abs, n108); + await RunTest( + new IR.Tuple(new[] { n95, n108User }), + new[] { inputVar0 }, + new Dictionary { { inputVar0, Value.FromTensor(input0) } }, + count: 2); + } + + [Fact] + public async Task TestGetItemWithRing() + { + // 29用到了28,所以其实是有环的 + // %26 = %ShapeOf_269_Gather_270_Gather_272(%23) 2 -1002975247: // (i64, i64) + // %27 = GetItem(%26, const(i32 : 0)) 1 -1105276988: // i64 + // %28 = %Reshape_271(%27) 2 761954617: // i64[1] + // %29 = GetItem(%26, const(i32 : 1)) 2 195526821: // i64 + // %30 = %Binary_166_MatMul_1(%23) 2 1373803502: // f32[?,?,?] + // %31 = %Reshape_276_Binary_275_Gather_274_ShapeOf_273(%30) 1 1087046740: // i64[1] + // %32 = (const(i64[1] : {-1L}), %31, const(i64[1] : {24L})): // (i64[1], i64[1], i64[1]) + // %33 = %Concat_277(%32) 2 708748629: // i64[3] + // %34 = %MatMul_0_MatMul_2(%23, %24, %25, %28, %29, %30, %33) 1 1042934643: // f32[?,?,?] + // %39 = %MatMul_3_MatMul_4_Binary_167_MatMul_5_Binary_168(%23, %37, %35, %38, %33, %28, %29) 1 274933965: // f32[?,?,?] + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var(new TensorType(input0.ElementType, input0.Shape)); + var s = Softmax(inputVar0, 0); + + // 26 + var call = MakeSingleSimpleFusionCall(expr => new IR.Tuple(expr + 1f, expr - 1f), s); + + // 27 + var a0 = call[0]; + + // 28 + var abs = MakeSingleSimpleFusionCall(Abs, a0); + + // 29 + var a1 = call[1]; + + // 39 + var compute0 = MakeSimpleFusionCall(expr => expr[0] * expr[1], a1, abs); + var compute1 = MakeSimpleFusionCall(expr => expr[0] * expr[1], a1, abs); + var res = MakeSimpleFusionCall(expr => expr[0] + expr[1], compute0, compute1); + await RunTest( + res, + new[] { inputVar0 }, + new Dictionary { { inputVar0, Value.FromTensor(input0) } }, + count: 3); + } + + [Fact] + public async Task TestTupleGetItemOutputIsSingle() + { + var input0 = Testing.Rand(1, 3, 24, 24); + var inputVar0 = new Var("input", new TensorType(input0.ElementType, input0.Shape)); + var abs = MakeSingleSimpleFusionCall(expr => new IR.Tuple(Abs(expr), Sqrt(expr)), inputVar0); + var bn = MakeSimpleFusionCall(expr => expr[0] + expr[1], abs[0], abs[1]); + var sf = Softmax(bn, 0); + await RunTest( + sf, + new[] { inputVar0 }, + new Dictionary { { inputVar0, Value.FromTensor(input0) } }); + } + + private static async Task RunTestNotMatch(Expr body, Var[] inputVar, Dictionary dict) + { + var module = MakeFun(body, inputVar); + _ = body.Evaluate(dict); + var preHash = body.GetHashCode(); + var post = await new MergeMultiUsersFusion().RunAsync(module, new()); + var postHash = ((Function)post).Body.GetHashCode(); + Assert.Equal(postHash, preHash); + } + + private static async Task RunTest(Expr body, Var[] inputVar, Dictionary dict, int repeatTimes = 1, int count = 1) + { + var fun = MakeFun(body, inputVar); + DumpScope.Current.DumpIR(fun, "origin"); + var preResult = body.Evaluate(dict); + var preHash = body.GetHashCode(); + var post = fun; + for (int i = 0; i < repeatTimes; i++) + { + post = (Function)await new MergeMultiUsersFusion().RunAsync(fun, new()); + } + + DumpScope.Current.DumpIR(post, "post"); + var newBody = ((Function)post).Body; + var postHash = newBody.GetHashCode(); + Assert.NotEqual(postHash, preHash); + var postResult = ((Function)post).Body.Evaluate(dict); + if (!Comparator.AllEqual(preResult, postResult)) + { + ValueDumper.DumpTensors( + preResult.AsTensors().Select(Value.FromTensor).ToArray(), + Path.Join(DumpScope.Current.Directory, "preResult")); + ValueDumper.DumpTensors( + postResult.AsTensors().Select(Value.FromTensor).ToArray(), + Path.Join(DumpScope.Current.Directory, "postResult")); + Comparator.Compare(preResult, postResult); + } + + var visitor = new FusionCounterVisitor(); + visitor.Visit(newBody); + Assert.Equal(count, visitor.Count); + } +} diff --git a/src/Nncase.Tests/Rules/ShapeBucketTest.cs b/src/Nncase.Tests/Rules/ShapeBucketTest.cs deleted file mode 100644 index 5f4bbbf245..0000000000 --- a/src/Nncase.Tests/Rules/ShapeBucketTest.cs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) Canaan Inc. All rights reserved. -// Licensed under the Apache license. See LICENSE file in the project root for full license information. - -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Nncase.Diagnostics; -using Nncase.IR; -using Nncase.IR.Tensors; -using Nncase.Passes; -using Nncase.Passes.Rules.Neutral; -using Nncase.Quantization; -using Nncase.Tests.TestFixture; -using Nncase.Tests.TransformTest; -using Nncase.Utilities; -using Xunit; -using Xunit.Abstractions; -using static Nncase.IR.F.Math; -using static Nncase.IR.F.Tensors; - -namespace Nncase.Tests.Rules; - -[AutoSetupTestMethod(InitSession = true)] -public class ShapeBucketTest : TransformTestBase -{ - [Fact] - public void TestFindVar() - { - var v1 = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); - var v2 = new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); - var expr = ((v1 * 4) + (v2 * 3)) * 2; - var visitor = new FindVar(); - visitor.Visit(expr); - Assert.Equal(visitor.Vars, new HashSet(new[] { v1, v2 })); - } - - [Fact] - public void TestBucketPad() - { - var input = Testing.Rand(1, 2, 16, 16); - var fixedShape = new[] { 1, 3, 24, 24 }; - var p = new Call(new BucketPad(), input, fixedShape); - var (_, kmodel) = Testing.BuildKModel("test", new IRModule(new Function(p)), CompileSession); - var result = Testing.RunKModel(kmodel, "call_arg", Array.Empty()); - var pads = fixedShape - Cast(ShapeOf(input), DataTypes.Int32); - var paddings = Transpose( - Stack(new IR.Tuple(Enumerable.Repeat(0, fixedShape.Length).ToArray(), pads), 0), - new[] { 1, 0 }); - var fixedInput = IR.F.NN.Pad(input, paddings, PadMode.Constant, Cast(0, input.ElementType)); - var fixedResult = new Call(new FixShape(), fixedInput, fixedShape); - var origin = fixedResult.Evaluate(); - var cos = Comparator.CosSimilarity(origin, result)[0]; - Assert.True(cos > 0.999); - } - - private Var Scalar(string name) => new Var(new TensorType(DataTypes.Int32, Shape.Scalar)); -} diff --git a/src/Nncase.Tests/Rules/ShapeExpr/UnitTestFoldGetItem.cs b/src/Nncase.Tests/Rules/ShapeExpr/UnitTestFoldGetItem.cs index 572f88a6ff..3a4037a5ce 100644 --- a/src/Nncase.Tests/Rules/ShapeExpr/UnitTestFoldGetItem.cs +++ b/src/Nncase.Tests/Rules/ShapeExpr/UnitTestFoldGetItem.cs @@ -2,6 +2,7 @@ // Licensed under the Apache license. See LICENSE file in the project root for full license information. using System; +using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using Nncase.Diagnostics; @@ -24,14 +25,24 @@ public void TestFoldStackGetItem() TestMatched(s); } + [Fact] + public void TestFoldStackGetItemDyn() + { + var input = Tensor.From(new[] { 1, 2, 3 }); + var inputVar = new Var(new TensorType(input.ElementType, input.Shape)); + var abs = IR.F.Math.Abs(inputVar); + var s = Stack(new IR.Tuple(new[] { abs[0], abs[1], abs[2] }), 0); + var body = new If(true, new[] { 3, 2, 1 }, s); + TestMatched(body, new Dictionary { { inputVar, Value.FromTensor(input) } }); + } + [Fact] public void TestFoldSqueezeGetItem() { var shape = new Expr[] { 1, 80, 4, 1 }; var s = Stack(new IR.Tuple(shape), 0); var result = Stack(new IR.Tuple(s[0], s[1], s[2]), 0); - DumpScope.Current.DumpIR(result, "result"); - TestNotMatch(result); + TestMatched(result); } [Fact] diff --git a/src/Nncase.Tests/Rules/UnitTestBroadcastMarker.cs b/src/Nncase.Tests/Rules/UnitTestBroadcastMarker.cs new file mode 100644 index 0000000000..88bbfcb350 --- /dev/null +++ b/src/Nncase.Tests/Rules/UnitTestBroadcastMarker.cs @@ -0,0 +1,35 @@ +// Copyright (c) Canaan Inc. All rights reserved. +// Licensed under the Apache license. See LICENSE file in the project root for full license information. + +using Nncase.IR; +using Nncase.Passes.Rules.Lower; +using Nncase.Tests.TestFixture; +using Xunit; +using static Nncase.IR.F.Math; +using static Nncase.IR.F.Tensors; + +namespace Nncase.Tests.Rules; + +[AutoSetupTestMethod(InitSession = true)] +public class UnitTestBroadcastMarker : TransformTestBase +{ + [Fact] + public void TestBroadcastInputMarker() + { + var input = Testing.Rand(1, 3, 24, 24); + var a = IR.F.Math.MatMul( + Reshape(new Marker(WellknownMarkerNames.RangeOf, input, new[] { -1f, 1f }), input.Shape), + Reshape(new Marker(WellknownMarkerNames.RangeOf, input, new[] { -2f, 2f }), input.Shape)); + var result = TestMatched(a); + TestNotMatch(result); + } + + [Fact] + public void TestBroadcastOutputMarker() + { + var input = Testing.Rand(1, 3, 24, 24); + var a = new Marker(WellknownMarkerNames.RangeOf, Reshape(Abs(input), input.Shape), new[] { -1f, 1f }); + var result = TestMatched(a); + TestNotMatch(result); + } +} diff --git a/src/Nncase.Tests/Targets/UnitTestCPUTarget.cs b/src/Nncase.Tests/Targets/UnitTestCPUTarget.cs index b3be7dff7e..9ea99522c9 100644 --- a/src/Nncase.Tests/Targets/UnitTestCPUTarget.cs +++ b/src/Nncase.Tests/Targets/UnitTestCPUTarget.cs @@ -10,6 +10,7 @@ using System.Threading.Tasks; using Microsoft.Extensions.DependencyInjection; using Nncase.CodeGen; +using Nncase.Diagnostics; using Nncase.IR; using Nncase.IR.Tensors; using Nncase.Runtime.Interop; @@ -198,6 +199,31 @@ public void TestStackVMNestIf() GenerateKModelAndRunFromFn(main, input, output); } + [Fact] + public void TestNestIfWithThenBegin() + { + CompileOptions.DumpFlags = DumpFlags.CodeGen; + var condVar = new Var(new TensorType(DataTypes.Boolean, Shape.Scalar)); + var cast = Cast(condVar, DataTypes.Int32); + var i = new If(condVar, cast * new If(condVar, 3 + cast, 2), 6); + var main = new Function("main", i, new[] { condVar }); + Dumpper.DumpIR(main, "main"); + var input = (Tensor)true; + var output = (Tensor)4; + GenerateKModelAndRunFromFn(main, input, output); + } + + [Fact] + public void TestNestIfWithElseBegin() + { + var condVar = new Var(new TensorType(DataTypes.Boolean, Shape.Scalar)); + var i = new If(condVar, 3, new If(condVar, 1, 2)); + var main = new Function("main", i, new[] { condVar }); + var input = (Tensor)false; + var output = (Tensor)2; + GenerateKModelAndRunFromFn(main, input, output); + } + private void TestCodeGen(Expr body, Var[] vars, [CallerMemberName] string? name = null) { var main = new Function("main", body, vars); diff --git a/tests/ci_proxy.py b/tests/ci_proxy.py deleted file mode 100644 index 40c6714e9f..0000000000 --- a/tests/ci_proxy.py +++ /dev/null @@ -1,150 +0,0 @@ -import os -import argparse -import stat -import socket -import json -import threading -import queue -import logging -import logging.handlers -import time - - -def recv_file(conn, target_root, mylogger): - conn.sendall(f"pls send file info".encode()) - header = conn.recv(1024) - file_dict = json.loads(header.decode()) - file_name = file_dict['file_name'] - file_size = file_dict['file_size'] - mylogger.debug('recv: file = {0}, size = {1}'.format(file_name, file_size)) - conn.sendall(f"pls send {file_name}".encode()) - - full_file = os.path.join(target_root, file_name) - with open(full_file, 'wb') as f: - recv_size = 0 - while recv_size < file_size: - slice = conn.recv(4096) - f.write(slice) - recv_size += len(slice) - - os.chmod(full_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) - return file_name - - -def Consumer(target, q, nfs_root, ip, port): - # create target root - target_root = os.path.join(nfs_root, target) - if not os.path.exists(target_root): - os.makedirs(target_root) - - # logging - mylogger = logging.getLogger() - mylogger.setLevel(logging.DEBUG) - rf_handler = logging.handlers.RotatingFileHandler( - f'ci_proxy_{target}.log', mode='a', maxBytes=32 * 1024 * 1024, backupCount=10) - # rf_handler.setLevel(logging.INFO) - mylogger.setLevel(logging.DEBUG) - rf_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')) - mylogger.addHandler(rf_handler) - - # telnet_client = TelnetClient(mylogger) - while True: - cmd = './' - conn = q.get() - - # recv header - conn.sendall(f"pls send header".encode()) - header = conn.recv(1024) - header_dict = json.loads(header.decode()) - mylogger.info("test case = {0}".format(header_dict['case'])) - file_num = header_dict['app'] + header_dict['kmodel'] + header_dict['inputs'] - - # recv all kinds of files(app + kmodel + inputs) - for i in range(file_num): - file = recv_file(conn, target_root, mylogger) - if i == 0: - cmd = cmd + file - else: - cmd = cmd + ' ' + file - - # print('cmd = {0}'.format(cmd)) - - # connect nuc_proxy server - client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - client_socket.connect((ip, int(port))) - - # send target - dummy = client_socket.recv(1024) - target_dict = {} - target_dict['target'] = target - client_socket.sendall(json.dumps(target_dict).encode()) - - # send header - dummy = client_socket.recv(1024) - dict = {} - dict['cmd'] = cmd - client_socket.sendall(json.dumps(dict).encode()) - - infer_result = client_socket.recv(1024).decode() - client_socket.close() - # print('infer_result = {0}'.format(infer_result)) - if infer_result.find('succeed') == -1: - conn.sendall(f'infer failed on {target} board: {infer_result}'.encode()) - else: - conn.sendall(f'infer succeed'.encode()) - dummy = conn.recv(1024) - - # send outputs - for i in range(header_dict['outputs']): - file = os.path.join(target_root, f'nncase_result_{i}.bin') - file_size = os.path.getsize(file) - conn.sendall(str(file_size).encode()) - dummy = conn.recv(1024) - - with open(file, 'rb') as f: - conn.sendall(f.read()) - dummy = conn.recv(1024) - mylogger.debug('send: file = {0}, size = {1}'.format(file, file_size)) - - conn.close() - - -def main(): - # args - parser = argparse.ArgumentParser(prog="ci_proxy") - parser.add_argument("--ci_proxy_port", help='listening port of ci_proxy', - type=int, default=10000) - parser.add_argument("--nfs_root", help='nfs root on pc', type=str, default='/data/nfs') - parser.add_argument("--nuc_proxy_ip", help='ip of nuc_proxy', type=str, default='localhost') - parser.add_argument("--nuc_proxy_port", help='listening port of nuc_proxy', - type=int, default=10001) - - args = parser.parse_args() - - dict = {} - size = 256 - - server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server_socket.bind(('localhost', args.ci_proxy_port)) - server_socket.listen(size) - while True: - conn, addr = server_socket.accept() - - # recv target - conn.sendall(f"pls send your target".encode()) - info = conn.recv(1024) - target_dict = json.loads(info.decode()) - target = target_dict['target'] - - if target not in dict: - q = queue.Queue(maxsize=size) - t_consumer = threading.Thread(target=Consumer, args=( - target, q, args.nfs_root, args.nuc_proxy_ip, args.nuc_proxy_port)) - t_consumer.start() - dict[target] = q - - dict[target].put(conn) - - -if __name__ == '__main__': - main() diff --git a/tests/compare_util.py b/tests/compare_util.py index 6398bf65b4..9395cbca21 100644 --- a/tests/compare_util.py +++ b/tests/compare_util.py @@ -95,7 +95,7 @@ def compare_ndarray(expected: np.ndarray, if dump_hist: y, x = np.histogram(expected - actual, 100) np.savetxt(dump_file, np.stack((x[:-1], y)).T, fmt='%f', delimiter=',') - similarity_info = f"\n{similarity_name} similarity = {similarity}, threshold = {threshold}\n" + similarity_info = f"{similarity_name} similarity = {similarity}, threshold = {threshold}" if similarity_name in ['cosine', 'euclidean', 'segment']: compare_op = lt diff --git a/tests/config.toml b/tests/config.toml index c4cb6ac0c2..1e4ec50d67 100644 --- a/tests/config.toml +++ b/tests/config.toml @@ -1,6 +1,7 @@ name = 'default_config' root = 'tests_output' -dump_hist = true +dump_hist = false +dump_infer = false [compile_opt] preprocess = false @@ -17,6 +18,10 @@ model_layout = 'NHWC' letterbox_value = 0 dump_asm = true dump_ir = false +shape_bucket_enable = false +shape_bucket_range_info = { } +shape_bucket_segments_count = 4 +shape_bucket_fix_var_map = { } [ptq_opt] use_mix_quant = false diff --git a/tests/generator.py b/tests/generator.py index 631883c757..bcccb4ad4d 100644 --- a/tests/generator.py +++ b/tests/generator.py @@ -11,7 +11,7 @@ def from_random(self, shape: List[int], dtype: np.dtype, abs: bool = False) -> n data = np.random.randint(0, 256, shape) elif dtype == np.int8: data = np.random.randint(-128, 128, shape) - elif dtype == np.bool: + elif dtype == bool: data = np.random.rand(*shape) > 0.5 elif dtype == np.int32: data = np.random.randint(1, 5, size=shape, dtype='int32') diff --git a/tests/importer/onnx_/basic/test_binary.py b/tests/importer/onnx_/basic/test_binary.py index c629691f04..6a4cecf9c0 100644 --- a/tests/importer/onnx_/basic/test_binary.py +++ b/tests/importer/onnx_/basic/test_binary.py @@ -23,45 +23,42 @@ def _make_module(v_shape): class BinaryModule(torch.nn.Module): def __init__(self): super(BinaryModule, self).__init__() - # self.v = torch.from_numpy(np.random.rand(*v_shape).astype(np.float32)) self.v = torch.from_numpy(np.ones(v_shape).astype(np.float32)) def forward(self, x): outs = [] outs.append(torch.add(x, self.v)) - # outs.append(torch.mul(x, self.v)) - # outs.append(torch.sub(x, self.v)) - # outs.append(torch.max(x, self.v)) - # outs.append(torch.div(x, self.v)) - # outs.append(torch.min(x, self.v)) - # outs.append(torch.fmod(x, self.v)) + outs.append(torch.mul(x, self.v)) + outs.append(torch.sub(x, self.v)) + outs.append(torch.max(x, self.v)) + outs.append(torch.div(x, self.v)) + outs.append(torch.min(x, self.v)) + outs.append(torch.fmod(x, self.v)) return outs return BinaryModule() lhs_shapes = [ - # [3], - # [64, 3], - # [3, 64, 3], - # [8, 3, 64, 3] - [1, 3, 24, 24] + [3], + [64, 3], + [3, 64, 3], + [8, 3, 64, 3], ] rhs_shapes = [ - # [1], - # [3], - # [1, 3], - # [64, 1], - # [64, 3], - # [3, 64, 1], - # [3, 64, 3], - # [8, 3, 64, 1], - # [8, 3, 64, 3], - # [8, 3, 1, 3], - # [8, 1, 64, 3], - # [1, 3, 64, 1] - [1, 3, 24, 24] + [1], + [3], + [1, 3], + [64, 1], + [64, 3], + [3, 64, 1], + [3, 64, 3], + [8, 3, 64, 1], + [8, 3, 64, 3], + [8, 3, 1, 3], + [8, 1, 64, 3], + [1, 3, 64, 1], ] diff --git a/tests/inference.py b/tests/inference.py index f602c1fe68..603e73d674 100644 --- a/tests/inference.py +++ b/tests/inference.py @@ -7,21 +7,47 @@ import socket import json from test_utils import * +import time + + +def data_shape_list_string(data): + return '\n'.join(map(lambda d: ' '.join(map(lambda x: str(x), d['model_shape'])), data)) + + +def generate_kmodel_data_info(inputs, outputs, infer_dir): + input_shapes = data_shape_list_string(inputs) + output_shapes = data_shape_list_string(outputs) +# input_shapes = '\n'.join(map(lambda input: ' '.join(map(lambda x: str(x), input['model_shape'])), inputs)) + s = f"{len(inputs)} {len(outputs)}\n{input_shapes}\n{output_shapes}" + with open(os.path.join(infer_dir, "kmodel.desc"), "w+") as f: + f.write(s) class Inference: def run_inference(self, compiler, target, ptq_enabled, infer_dir): in_ci = test_utils.in_ci() kpu_targets = test_utils.kpu_targets() - port = test_utils.port() + nuc_ip = test_utils.nuc_ip() + nuc_port = test_utils.nuc_port() test_executable = test_utils.test_executable(target) - running_on_evb = in_ci and target in kpu_targets and port is not None and test_executable is not None and len( + running_on_evb = in_ci and target in kpu_targets and nuc_ip is not None and nuc_port is not None and test_executable is not None and len( self.inputs) > 0 and len(self.outputs) > 0 + if self.cfg['dump_infer']: + self.infer_dict['case'] = os.path.basename(self.case_dir) + self.infer_dict['target'] = target if ptq_enabled: self.set_quant_opt(compiler) + + if self.cfg['dump_infer']: + case = os.path.basename(self.case_dir) + self.infer_dict['if_quant_type'] = self.cfg['ptq_opt']['quant_type'] + self.infer_dict['w_quant_type'] = self.cfg['ptq_opt']['w_quant_type'] + compiler.compile() kmodel = compiler.gencode_tobytes() + if self.dynamic: + generate_kmodel_data_info(self.inputs, self.outputs, infer_dir) os.makedirs(infer_dir, exist_ok=True) if not in_ci: with open(os.path.join(infer_dir, 'test.kmodel'), 'wb') as f: @@ -34,7 +60,17 @@ def run_inference(self, compiler, target, ptq_enabled, infer_dir): sim = nncase.Simulator() sim.load_model(kmodel) self.set_infer_input(sim, compile_opt) + + if self.cfg['dump_infer']: + t1 = time.perf_counter() + sim.run() + + if self.cfg['dump_infer']: + t = (time.perf_counter() - t1) * 1000 + self.infer_dict['time(ms)'] = str(t) + self.infer_dict['fps'] = str(round(1000 / t, 2)) + outputs = self.dump_infer_output(sim, compile_opt, infer_dir) return outputs @@ -70,12 +106,13 @@ def dump_infer_output(self, sim, compile_opt, infer_dir): return outputs def run_evb(self, target, kmodel, compile_opt): - port = test_utils.port() + ip = test_utils.nuc_ip() + port = test_utils.nuc_port() test_executable = test_utils.test_executable(target) # connect server client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - client_socket.connect(('localhost', int(port))) + client_socket.connect((ip, int(port))) # send target dummy = client_socket.recv(1024) @@ -124,8 +161,15 @@ def run_evb(self, target, kmodel, compile_opt): # get infer result outputs = [] - cmd_result = client_socket.recv(1024).decode() - if cmd_result.find('succeed') != -1: + result_dict = {} + ret = client_socket.recv(1024) + result_dict = json.loads(ret.decode()) + if result_dict['type'].find('finish') != -1: + if self.cfg['dump_infer']: + t = result_dict['time'] + self.infer_dict['time(ms)'] = str(t) + self.infer_dict['fps'] = str(round(1000 / t, 2)) + client_socket.sendall(f"pls send outputs".encode()) # recv outputs @@ -148,6 +192,11 @@ def run_evb(self, target, kmodel, compile_opt): client_socket.close() else: client_socket.close() - raise Exception(f'{cmd_result}') + + if self.cfg['dump_infer']: + self.infer_dict['result'] = 'Fail' + self.infer_dict['remark'] = result_dict['error'] + dump_dict_to_json(self.infer_dict, self.infer_file) + raise Exception(result_dict['error']) return outputs diff --git a/tests/json2md.py b/tests/json2md.py new file mode 100644 index 0000000000..983d859166 --- /dev/null +++ b/tests/json2md.py @@ -0,0 +1,24 @@ +import argparse +import json +import pandas as pd + + +def json2md(json_file): + json_list = [] + with open(json_file, 'r') as f: + json_list = json.load(f) + + json_list = sorted(json_list, key=lambda d: d['case']) + df = pd.DataFrame.from_records(json_list) + md = df.to_markdown() + md_file = json_file.split('/')[-1].split('.')[0] + '.md' + + with open(md_file, 'w') as f: + f.write(md) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog="json2md") + parser.add_argument("--json", help='json file', type=str) + args = parser.parse_args() + json2md(args.json) diff --git a/tests/kernels/CMakeLists.txt b/tests/kernels/CMakeLists.txt index fdd840324d..1a94858873 100644 --- a/tests/kernels/CMakeLists.txt +++ b/tests/kernels/CMakeLists.txt @@ -1,10 +1,11 @@ enable_testing() find_package(ortki) +find_package(rapidjson) macro(add_test_exec name) add_executable(${name} ${name}.cpp) - target_link_libraries(${name} PRIVATE GTest::gtest_main nncaseruntime ortki::ortki) + target_link_libraries(${name} PRIVATE GTest::gtest_main nncaseruntime ortki::ortki rapidjson::rapidjson) add_test(NAME ${name} COMMAND ${CMAKE_COMMAND} -DTEST_EXECUTABLE=$ -P ${CMAKE_CURRENT_SOURCE_DIR}/../../toolchains/run_test.cmake) endmacro() diff --git a/tests/kernels/kernel_test.h b/tests/kernels/kernel_test.h index faf2d552a1..fa3e491cc1 100644 --- a/tests/kernels/kernel_test.h +++ b/tests/kernels/kernel_test.h @@ -18,6 +18,7 @@ #include "nncase/shape.h" #include #include +#include #include #include #include @@ -32,11 +33,16 @@ #include #include #include +#include // rapidjson's DOM-style API +#include +#include +#include #include #include using namespace nncase::runtime; using namespace nncase::kernels; +using namespace rapidjson; namespace nncase { typedef enum { RANDOM, NOZERO, NONEG, NOPOS } initial_mode; @@ -211,8 +217,8 @@ class KernelTest { NNCASE_UNUSED auto res = kernels::stackvm::apply( tensor.shape(), [&](gsl::span index) -> result { - get(tensor, index) = - static_cast(dis(gen)) >= 0; + get(tensor, index) = + static_cast(dis(gen)); return ok(); }); break; @@ -237,7 +243,7 @@ class KernelTest { if (shape.size() == 1 && (shape[0] == initvalue.size())) { // One dim array attribute T *tmp = new T[shape[0]]; - for (int i = 0; i < (int)shape[0]; ++i) { + for (size_t i = 0; i < shape[0]; ++i) { tmp[i] = initvalue[i]; } return tmp; @@ -426,675 +432,6 @@ class KernelTest { } } - void cast_copy_tensor(runtime::runtime_tensor &source_tensor, - runtime::runtime_tensor &destination_tensor) { - auto destination_tensor_dtype = destination_tensor.datatype(); - auto source_tensor_dtype = source_tensor.datatype(); - switch (destination_tensor_dtype) { - case dt_int8: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_int16: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_int32: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_int64: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_uint8: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_uint16: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_uint32: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_uint64: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_float32: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast(get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - case dt_float64: { - NNCASE_UNUSED auto res = kernels::stackvm::apply( - destination_tensor.shape(), - [&](gsl::span index) -> result { - switch (source_tensor_dtype) { - case dt_int8: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_int64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_uint64: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float16: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - case dt_float32: { - get(destination_tensor, index) = - static_cast( - get(source_tensor, index)); - break; - } - default: { - } - } - return ok(); - }); - break; - } - default: { - } - } - } - static ortki::OrtKITensor * runtime_tensor_2_ort_tensor(runtime::runtime_tensor &tensor) { auto mapped = @@ -1265,6 +602,76 @@ class KernelTest { return ok(); } + template + std::vector tensor_to_array(runtime::runtime_tensor &lhs) { + + std::vector vec1; + vec1.reserve(compute_size(lhs.shape())); + + kernels::stackvm::apply( + lhs.shape(), + [&](gsl::span index) -> result { + auto dtype = lhs.datatype(); + switch (dtype) { + case dt_int8: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_int16: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_int32: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_int64: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_uint8: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_uint16: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_uint32: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_uint64: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_float16: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_bfloat16: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_float32: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + case dt_float64: { + vec1.push_back(static_cast(get(lhs, index))); + break; + } + default: { + return err(std::errc::not_supported); + } + } + return ok(); + }) + .is_ok(); + + return vec1; + } + bool is_same_tensor(runtime::runtime_tensor &lhs, runtime::runtime_tensor &rhs) { if (lhs.shape() != rhs.shape()) { @@ -1353,9 +760,8 @@ class KernelTest { } case dt_float16: { if (get(lhs, index) == get(rhs, index) || - fabs(get(lhs, index) - - get(rhs, index)) <= - std::numeric_limits::epsilon()) { + fabs((float)get(lhs, index) - + (float)get(rhs, index)) <= 0.01f) { return ok(); } else if (std::isnan(get(lhs, index)) && std::isnan(get(rhs, index))) { @@ -1384,8 +790,8 @@ class KernelTest { if (get(lhs, index) == get(rhs, index) || fabs(get(lhs, index) - - get(rhs, index)) <= - std::numeric_limits::epsilon()) { + get(rhs, index)) <= 0.0001f + /*std::numeric_limits::epsilon()*/) { return ok(); } else if (std::isnan(get(lhs, index)) && std::isnan(get(rhs, index))) { @@ -1497,13 +903,15 @@ class KernelTest { break; } case dt_float16: { - vec1.push_back(get(lhs, index)); - vec2.push_back(get(rhs, index)); + vec1.push_back(static_cast(get(lhs, index))); + vec2.push_back(static_cast(get(rhs, index))); break; } case dt_bfloat16: { - vec1.push_back(get(lhs, index)); - vec2.push_back(get(rhs, index)); + vec1.push_back( + static_cast(get(lhs, index))); + vec2.push_back( + static_cast(get(rhs, index))); break; } case dt_float32: { @@ -1516,13 +924,6 @@ class KernelTest { vec2.push_back(static_cast(get(rhs, index))); break; } - case dt_boolean: { - vec1.push_back( - static_cast(get(lhs, index) ? 2 : 1)); - vec2.push_back( - static_cast(get(rhs, index) ? 2 : 1)); - break; - } default: { return err(std::errc::not_supported); } @@ -1546,7 +947,10 @@ class KernelTest { } void print_runtime_tensor(runtime::runtime_tensor lhs) { - std::cout << "tensor:" << std::endl; + std::cout << "tensor (shape:[ "; + for (auto a : lhs.shape()) + std::cout << a << " "; + std::cout << "]):" << std::endl; kernels::stackvm::apply( lhs.shape(), [&](gsl::span index) -> result { @@ -1630,5 +1034,138 @@ class KernelTest { std::cout << std::endl; } + + virtual void quantize_to_int16(runtime::runtime_tensor &expected, + runtime::runtime_tensor &input, int16_t zero, + float scale) { + if (expected.datatype() != dt_int16) + return; + NNCASE_UNUSED auto res = kernels::stackvm::apply( + expected.shape(), + [&](gsl::span index) -> result { + get(expected, index) = static_cast( + get(input, index) / scale + zero); + return ok(); + }); + } + + virtual void int16_dequantize_to_float(runtime::runtime_tensor &expected, + runtime::runtime_tensor &input, + int16_t zero, float scale) { + if (input.datatype() != dt_int16) + return; + NNCASE_UNUSED auto res = kernels::stackvm::apply( + expected.shape(), + [&](gsl::span index) -> result { + get(expected, index) = static_cast( + (get(input, index) - zero) * scale); + return ok(); + }); + } + + static std::string ReadFromJsonFile(std::ifstream &file) { + std::string content((std::istreambuf_iterator(file)), + std::istreambuf_iterator()); + file.close(); + return content; + } + + static void ParseJson(Document &document, std::string js_str) { + if (document.Parse(js_str.c_str()).HasParseError()) + std::cout << "Parsing Error: " + << (unsigned)document.GetErrorOffset() << " " + << GetParseError_En(document.GetParseError()) + << std::endl; + assert(document.IsObject()); + } + + void ParseJson(std::string js_str) { + if (_document.Parse(js_str.c_str()).HasParseError()) + std::cout << "Parsing Error: " + << (unsigned)_document.GetErrorOffset() << " " + << GetParseError_En(_document.GetParseError()) + << std::endl; + assert(_document.IsObject()); + } + + typecode_t Str2DataType(std::string type) { + std::cout << type << std::endl; + if (str_2_datatype.find(type) != str_2_datatype.end()) { + return str_2_datatype[type]; + } else { + return dt_int8; + } + } + + int64_t GetNumber(const char *key) { + assert(_document[key].IsInt64()); + return _document[key].GetInt64(); + } + + float GetFloatNumber(const char *key) { + assert(_document[key].IsDouble()); + return _document[key].GetFloat(); + } + + typecode_t GetDataType(const char *key) { + assert(_document[key].IsString()); + return Str2DataType(_document[key].GetString()); + } + + dims_t GetShapeArray(const char *key) { + assert(_document[key].IsArray()); + + Value &array = _document[key]; + size_t arraySize = array.Size(); + dims_t cArray(arraySize); + for (rapidjson::SizeType i = 0; i < arraySize; i++) { + if (array[i].IsUint()) { + cArray[i] = array[i].GetUint(); + } else { + std::cout << "Invalid JSON format. Expected unsigned integer " + "values in the array." + << std::endl; + } + } + return cArray; + } + + axes_t GetAxesArray(const char *key) { + assert(_document[key].IsArray()); + + Value &array = _document[key]; + size_t arraySize = array.Size(); + axes_t cArray(arraySize); + for (rapidjson::SizeType i = 0; i < arraySize; i++) { + if (array[i].IsUint()) { + cArray[i] = array[i].GetUint(); + } else { + std::cout << "Invalid JSON format. Expected unsigned integer " + "values in the array." + << std::endl; + } + } + return cArray; + } + + static std::string GetFileNameFromMacro(const char *filePath) { + std::string fullFilePath(filePath); + size_t lastSlashIndex = fullFilePath.find_last_of("/\\"); + if (lastSlashIndex != std::string::npos) { + return fullFilePath.substr(lastSlashIndex + 1); + } + return fullFilePath; + } + + public: + Document _document; + std::map str_2_datatype = { + {"dt_int8", dt_int8}, {"dt_int16", dt_int16}, + {"dt_int32", dt_int32}, {"dt_int64", dt_int64}, + {"dt_uint8", dt_uint8}, {"dt_uint16", dt_uint16}, + {"dt_uint32", dt_uint32}, {"dt_uint64", dt_uint64}, + {"dt_float16", dt_float16}, {"dt_float32", dt_float32}, + {"dt_float64", dt_float64}, {"dt_bfloat16", dt_bfloat16}, + {"dt_boolean", dt_boolean}}; }; } // namespace nncase diff --git a/tests/kernels/macro_util.h b/tests/kernels/macro_util.h index cbd7b69135..a3c5ca9c60 100644 --- a/tests/kernels/macro_util.h +++ b/tests/kernels/macro_util.h @@ -296,3 +296,84 @@ {reinterpret_cast(ptr_ort), size}, true, \ host_runtime_tensor::pool_cpu_only) \ .expect("create expected tensor failed"); + +#define MAX_CASE_NUM 10000 +#define ENDFIX ".json" +#define PARENT_DIR_1 "../../../tests/kernels/" +#define PARENT_DIR_2 "../../../../tests/kernels/" + +#define SPLIT_ELEMENT(key, idx) \ + rapidjson::Value copiedArray##key(rapidjson::kArrayType); \ + copiedArray##key.CopyFrom(key[idx], write_doc.GetAllocator()); \ + write_doc.AddMember(Value(#key, write_doc.GetAllocator()), \ + copiedArray##key, write_doc.GetAllocator()); + +#define FOR_LOOP(key, idx) \ + assert(document[#key].IsArray()); \ + Value &key = document[#key]; \ + for (SizeType idx = 0; idx < key.Size(); ++idx) { + +#define FOR_LOOP_END() } + +#define FILE_NAME_GEN(PARENT_DIR, name) \ + std::string(PARENT_DIR) + std::string(name) + std::string(ENDFIX) + +#define FILE_NAME_GEN_SUBCASE(case_name, filename, idx) \ + std::string(case_name) + "_" + std::string(filename) + "_" + \ + std::to_string(idx) + std::string(ENDFIX) + +#define READY_TEST_CASE_GENERATE() \ + std::string content; \ + auto filename1 = FILE_NAME_GEN(PARENT_DIR_1, TEST_CASE_NAME); \ + std::ifstream file1(filename1); \ + if (file1.fail()) { \ + file1.close(); \ + auto filename2 = FILE_NAME_GEN(PARENT_DIR_2, TEST_CASE_NAME); \ + std::ifstream file2(filename2); \ + if (file2.fail()) { \ + file2.close(); \ + std::cout << "File does not exist: " << filename2 << std::endl; \ + } else { \ + content = KernelTest::ReadFromJsonFile(file2); \ + std::cout << "File exists: " << filename2 << std::endl; \ + } \ + } else { \ + content = KernelTest::ReadFromJsonFile(file1); \ + std::cout << "File exists: " << filename1 << std::endl; \ + } \ + Document document; \ + KernelTest::ParseJson(document, content); \ + unsigned case_num = 0; \ + Document write_doc; \ + write_doc.SetObject(); + +#define WRITE_SUB_CASE() \ + std::ofstream ofs(FILE_NAME_GEN_SUBCASE( \ + TEST_CASE_NAME, KernelTest::GetFileNameFromMacro(__FILE__), \ + case_num)); \ + OStreamWrapper osw(ofs); \ + Writer writer(osw); \ + write_doc.Accept(writer); \ + case_num++; \ + write_doc.RemoveAllMembers(); + +#define READY_SUBCASE() \ + auto &&[idx] = GetParam(); \ + auto filename = FILE_NAME_GEN_SUBCASE( \ + TEST_CASE_NAME, KernelTest::GetFileNameFromMacro(__FILE__), idx); \ + std::ifstream file(filename); \ + if (file.is_open()) { \ + std::cout << "Open file: " << filename << std::endl; \ + ParseJson(ReadFromJsonFile(file)); \ + } else { \ + file.close(); \ + GTEST_SKIP(); \ + } + +#define CLEAR_SUBCASE() \ + auto &&[idx] = GetParam(); \ + auto filename = FILE_NAME_GEN_SUBCASE( \ + TEST_CASE_NAME, KernelTest::GetFileNameFromMacro(__FILE__), idx); \ + if (std::remove(filename.c_str()) == 0) { \ + printf("File deleted successfully: %s\n", filename.c_str()); \ + } diff --git a/tests/kernels/test_batch_normalization.cpp b/tests/kernels/test_batch_normalization.cpp index a755ed6992..2b8df77efe 100644 --- a/tests/kernels/test_batch_normalization.cpp +++ b/tests/kernels/test_batch_normalization.cpp @@ -22,16 +22,21 @@ #include #include +#define TEST_CASE_NAME "test_batch_normalization" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class BatchNormalizationTest : public KernelTest, - public ::testing::TestWithParam> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, input_shape] = GetParam(); + READY_SUBCASE() + + auto input_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, input_shape, host_runtime_tensor::pool_cpu_only) @@ -59,7 +64,7 @@ class BatchNormalizationTest init_tensor_var(var); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } virtual void init_tensor_var(runtime::runtime_tensor &tensor) { std::random_device rd; @@ -80,12 +85,8 @@ class BatchNormalizationTest runtime_tensor var; }; -INSTANTIATE_TEST_SUITE_P( - batch_normalization, BatchNormalizationTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 8, 24, 24}, dims_t{1, 3, 3, 16}, - dims_t{2, 4, 8, 8}, dims_t{8, 8}, - dims_t{1, 3, 16, 1}, dims_t{1, 1}))); +INSTANTIATE_TEST_SUITE_P(batch_normalization, BatchNormalizationTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(BatchNormalizationTest, batch_normalization) { auto input_ort = runtime_tensor_2_ort_tensor(input); @@ -94,9 +95,12 @@ TEST_P(BatchNormalizationTest, batch_normalization) { auto mean_ort = runtime_tensor_2_ort_tensor(mean); auto var_ort = runtime_tensor_2_ort_tensor(var); + auto eps = 0.01f; + auto momentum = 0.9f; + // expected - auto output_ort = ortki_BatchNormalization(input_ort, scale_ort, b_ort, - mean_ort, var_ort, 0.01f, 0.9f); + auto output_ort = ortki_BatchNormalization( + input_ort, scale_ort, b_ort, mean_ort, var_ort, eps, momentum); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -106,14 +110,14 @@ TEST_P(BatchNormalizationTest, batch_normalization) { true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float epsilon_ptr[] = {0.01f}; + float epsilon_ptr[] = {eps}; auto epsilon = hrt::create(nncase::dt_float32, {1}, {reinterpret_cast(epsilon_ptr), sizeof(epsilon_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float monentum_ptr[] = {0.9f}; + float monentum_ptr[] = {momentum}; auto monentum = hrt::create(nncase::dt_float32, {1}, {reinterpret_cast(monentum_ptr), sizeof(monentum_ptr)}, @@ -142,6 +146,15 @@ TEST_P(BatchNormalizationTest, batch_normalization) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_batch_normalization.json b/tests/kernels/test_batch_normalization.json new file mode 100644 index 0000000000..dc1ed67809 --- /dev/null +++ b/tests/kernels/test_batch_normalization.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 8, 24, 24], [1, 3, 3, 16], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1]], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_batch_to_space.cpp b/tests/kernels/test_batch_to_space.cpp index a33beb5e16..a1955782e8 100644 --- a/tests/kernels/test_batch_to_space.cpp +++ b/tests/kernels/test_batch_to_space.cpp @@ -22,16 +22,21 @@ #include #include +#define TEST_CASE_NAME "test_batch_to_space" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class BatchToSpaceTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, input_shape, expect_shape] = GetParam(); + READY_SUBCASE() + + auto input_shape = GetShapeArray("lhs_shape"); + auto expect_shape = GetShapeArray("rhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, input_shape, host_runtime_tensor::pool_cpu_only) @@ -44,7 +49,7 @@ class BatchToSpaceTest : public KernelTest, init_tensor(expect); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -52,14 +57,12 @@ class BatchToSpaceTest : public KernelTest, }; INSTANTIATE_TEST_SUITE_P(BatchToSpace, BatchToSpaceTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{4, 1, 2, 2}), - testing::Values(dims_t{1, 1, 4, 4}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(BatchToSpaceTest, BatchToSpace) { // expected - float_t b[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + float b[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; auto b_ptr = b; auto expected = hrt::create(input.datatype(), expect.shape(), {reinterpret_cast(b_ptr), 64}, @@ -67,7 +70,7 @@ TEST_P(BatchToSpaceTest, BatchToSpace) { .expect("create tensor failed"); // actual - float_t a[] = {1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16}; + float a[] = {1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16}; auto input_tensor = hrt::create(input.datatype(), input.shape(), {reinterpret_cast(a), sizeof(a)}, true, @@ -105,6 +108,18 @@ TEST_P(BatchToSpaceTest, BatchToSpace) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(rhs_shape, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(rhs_shape, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_batch_to_space.json b/tests/kernels/test_batch_to_space.json new file mode 100644 index 0000000000..a2df26523c --- /dev/null +++ b/tests/kernels/test_batch_to_space.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[4, 1, 2, 2]], + "rhs_shape":[[1, 1, 4, 4]], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_broadcast.cpp b/tests/kernels/test_broadcast.cpp index 1476e6ea47..7e8eb4e7ca 100644 --- a/tests/kernels/test_broadcast.cpp +++ b/tests/kernels/test_broadcast.cpp @@ -22,52 +22,184 @@ #include #include +#define TEST_CASE_NAME "test_broadcast" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class BroadCastTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, r_shape] = GetParam(); - - float input_ptr[] = {3, 2, 1}; - input = hrt::create(typecode, l_shape, - {reinterpret_cast(input_ptr), - sizeof(input_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - float output_ptr[] = {3, 2, 1, 3, 2, 1, 3, 2, 1}; - expected = hrt::create(typecode, r_shape, - {reinterpret_cast(output_ptr), - sizeof(output_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + + input = + hrt::create(typecode, r_shape, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(input); + + one = hrt::create(typecode, r_shape, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor_one(one); + + size_t shape_size = r_shape.size(); + int64_t *shape_array = (int64_t *)malloc(shape_size * sizeof(int64_t)); + std::copy(r_shape.begin(), r_shape.end(), shape_array); + new_shape = hrt::create(dt_int64, {shape_size}, + {reinterpret_cast(shape_array), + shape_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } + + void init_tensor_one(runtime::runtime_tensor &tensor) { + auto dtype = tensor.datatype(); + switch (dtype) { + case dt_int8: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_int16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_int32: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = 1; + return ok(); + }); + break; + } + case dt_int64: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint8: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint32: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint64: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_float16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_float32: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_float64: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_bfloat16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + default: { + } + } + } protected: runtime_tensor input; - runtime_tensor expected; + runtime_tensor one; + runtime_tensor new_shape; }; INSTANTIATE_TEST_SUITE_P(BroadCast, BroadCastTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{3}), - testing::Values(dims_t{1, 3, 3}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(BroadCastTest, BroadCast) { - // actual - int64_t a_ptr[] = {1, 3, 3}; - auto a = hrt::create(nncase::dt_int64, {3}, - {reinterpret_cast(a_ptr), sizeof(a_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output = kernels::stackvm::broadcast(input.impl(), a.impl()) + + // expected + auto output_ort = ortki_Mul(runtime_tensor_2_ort_tensor(input), + runtime_tensor_2_ort_tensor(one)); + size_t size = 0; + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + auto expected = hrt::create(input.datatype(), shape, + {reinterpret_cast(ptr_ort), size}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // actual + auto output = kernels::stackvm::broadcast(input.impl(), new_shape.impl()) .expect("broadcast failed"); runtime_tensor actual(output.as().expect("as tensor failed")); @@ -86,6 +218,18 @@ TEST_P(BroadCastTest, BroadCast) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(lhs_shape, j) + FOR_LOOP(rhs_shape, k) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(rhs_shape, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_broadcast.json b/tests/kernels/test_broadcast.json new file mode 100644 index 0000000000..5c63307334 --- /dev/null +++ b/tests/kernels/test_broadcast.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[3], [1, 3], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1]], + "rhs_shape":[[1, 8, 24, 24], [1, 3, 3, 16], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1]], + "lhs_type":["dt_float32", "dt_float64", "dt_int32", "dt_int64", "dt_float16"] +} \ No newline at end of file diff --git a/tests/kernels/test_bucket_pad.cpp b/tests/kernels/test_bucket_pad.cpp new file mode 100644 index 0000000000..2f9f2a38de --- /dev/null +++ b/tests/kernels/test_bucket_pad.cpp @@ -0,0 +1,124 @@ +/* Copyright 2019-2021 Canaan Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "kernel_test.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#define TEST_CASE_NAME "test_bucket_pad" + +using namespace nncase; +using namespace nncase::runtime; +using namespace ortki; + +class BucketPadTest : public KernelTest, + public ::testing::TestWithParam> { + public: + void SetUp() override { + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + + input = + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(input); + + float value_array[] = {0}; + value = hrt::create(dt_float32, {1}, + {reinterpret_cast(value_array), + sizeof(value_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } + + void TearDown() override {} + + protected: + runtime_tensor input; + runtime_tensor value; +}; + +INSTANTIATE_TEST_SUITE_P(BucketPad, BucketPadTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); + +TEST_P(BucketPadTest, BucketPad) { + + // expected + size_t size = 0; + int64_t pad_ptr[] = {0, 0, 0, 0, 0, 0, 0, 0}; + auto pad = + hrt::create(dt_int64, {8}, + {reinterpret_cast(pad_ptr), sizeof(pad_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto l_ort = runtime_tensor_2_ort_tensor(input); + auto pad_ort = runtime_tensor_2_ort_tensor(pad); + auto value_ort = runtime_tensor_2_ort_tensor(value); + auto output_ort = ortki_Pad(l_ort, pad_ort, value_ort, "constant"); + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + auto expected = hrt::create(input.datatype(), shape, + {reinterpret_cast(ptr_ort), size}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // actual + int64_t new_shape_array[] = {1, 3, 16, 16}; + auto new_shape = + hrt::create(dt_int64, {4}, + {reinterpret_cast(new_shape_array), + sizeof(new_shape_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + auto output = kernels::stackvm::bucket_pad(input.impl(), new_shape.impl()) + .expect("pad failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); +} + +int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, j) + FOR_LOOP(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(lhs_type, i) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/tests/kernels/test_bucket_pad.json b/tests/kernels/test_bucket_pad.json new file mode 100644 index 0000000000..9729bcd176 --- /dev/null +++ b/tests/kernels/test_bucket_pad.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16]], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_cast.cpp b/tests/kernels/test_cast.cpp index 9d6fb0f4c2..adcec1f2f1 100644 --- a/tests/kernels/test_cast.cpp +++ b/tests/kernels/test_cast.cpp @@ -23,16 +23,21 @@ #include #include +#define TEST_CASE_NAME "test_cast" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class CastTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode_input, typecode_output, l_shape] = GetParam(); + READY_SUBCASE() + + auto typecode_input = GetDataType("lhs_type"); + auto typecode_output = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = hrt::create(typecode_input, l_shape, host_runtime_tensor::pool_cpu_only) @@ -54,7 +59,7 @@ class CastTest : public KernelTest, .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -63,12 +68,8 @@ class CastTest : public KernelTest, runtime_tensor expected; }; -INSTANTIATE_TEST_SUITE_P( - cast, CastTest, - testing::Combine(testing::Values(dt_int16, dt_int8, dt_float32, dt_uint8), - testing::Values(dt_int16, dt_int8, dt_float32, dt_uint8), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3, 8, 8}, - dims_t{1, 3, 1}))); +INSTANTIATE_TEST_SUITE_P(cast, CastTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(CastTest, cast) { // actual @@ -129,8 +130,10 @@ TEST_P(CastTest, cast) { cosine_similarity_tensor(expected1, actual1); if (!result1) { - print_runtime_tensor(actual1); - print_runtime_tensor(expected1); + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); } // compare @@ -138,6 +141,18 @@ TEST_P(CastTest, cast) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(rhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(rhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_cast.json b/tests/kernels/test_cast.json new file mode 100644 index 0000000000..174c15a235 --- /dev/null +++ b/tests/kernels/test_cast.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 8, 24, 24], [1, 3, 3, 16], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1]], + "lhs_type":["dt_int16", "dt_float32", "dt_uint8", "dt_int8"], + "rhs_type":["dt_int16", "dt_float32", "dt_uint8", "dt_int8"] +} \ No newline at end of file diff --git a/tests/kernels/test_celu.cpp b/tests/kernels/test_celu.cpp index 2481f11ca5..8727460204 100644 --- a/tests/kernels/test_celu.cpp +++ b/tests/kernels/test_celu.cpp @@ -22,42 +22,47 @@ #include #include +#define TEST_CASE_NAME "test_celu" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class CeluTest - : public KernelTest, - public ::testing::TestWithParam> { +class CeluTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, input_shape] = GetParam(); + READY_SUBCASE() + + auto input_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, input_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + alpha = hrt::create(typecode, {1}, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(alpha); } void TearDown() override {} protected: runtime_tensor input; + runtime_tensor alpha; }; -INSTANTIATE_TEST_SUITE_P( - celu, CeluTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}, dims_t{1, 2}, - dims_t{1, 3, 16, 16}, dims_t{16, 16}, - dims_t{3, 16}, dims_t{1, 3, 16, 1}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(celu, CeluTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(CeluTest, celu) { auto input_ort = runtime_tensor_2_ort_tensor(input); // expected - auto output_ort = ortki_Celu(input_ort, 1.2f); + OrtKITensor *output_ort; + output_ort = ortki_Celu(input_ort, tensor_to_array(alpha)[0]); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -68,13 +73,8 @@ TEST_P(CeluTest, celu) { .expect("create tensor failed"); // actual - float_t a_ptr[] = {1.2f}; - auto a = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(a_ptr), sizeof(a_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output = - kernels::stackvm::celu(input.impl(), a.impl()).expect("celu failed"); + auto output = kernels::stackvm::celu(input.impl(), alpha.impl()) + .expect("celu failed"); runtime_tensor actual(output.as().expect("as tensor failed")); bool result = is_same_tensor(expected, actual) || @@ -92,6 +92,15 @@ TEST_P(CeluTest, celu) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_celu.json b/tests/kernels/test_celu.json new file mode 100644 index 0000000000..c6ec765242 --- /dev/null +++ b/tests/kernels/test_celu.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1], [1, 2], [1, 3, 16, 16], [16, 16], [1, 16], [1, 3, 16, 1], []], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_clamp.cpp b/tests/kernels/test_clamp.cpp index 6e3896cb37..94807ce2f5 100644 --- a/tests/kernels/test_clamp.cpp +++ b/tests/kernels/test_clamp.cpp @@ -22,75 +22,92 @@ #include #include +#define TEST_CASE_NAME "test_clamp" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ClampTest - : public KernelTest, - public ::testing::TestWithParam> { +class ClampTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + + auto value1 = GetFloatNumber("min"); + auto value2 = GetFloatNumber("max"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + min_value = value1; + max_value = value2; } void TearDown() override {} protected: runtime_tensor input; + float min_value; + float max_value; }; -INSTANTIATE_TEST_SUITE_P( - clamp, ClampTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int16, dt_int8, - dt_uint8, dt_uint16, dt_uint32, dt_uint64, - dt_int64, dt_float64, dt_boolean), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{1, 3}, dims_t{8, 8}, - dims_t{1, 3, 8}, dims_t{16, 16}, dims_t{}, - dims_t{16}))); +INSTANTIATE_TEST_SUITE_P(clamp, ClampTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ClampTest, clamp) { // expected - float_t min1[] = {-1.0f}; - auto min_tensor1 = + float min[] = {min_value}; + auto min_tensor_float = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(min1), sizeof(min1)}, true, + {reinterpret_cast(min), sizeof(min)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t max1[] = {1.0f}; - auto max_tensor1 = + float max[] = {max_value}; + auto max_tensor_float = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(max1), sizeof(max1)}, true, + {reinterpret_cast(max), sizeof(max)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - auto output1 = kernels::stackvm::clamp(input.impl(), min_tensor1.impl(), - max_tensor1.impl()) - .expect("clamp failed"); - runtime_tensor expected(output1.as().expect("as tensor failed")); + auto output_ort = + ortki_Clip(runtime_tensor_2_ort_tensor(input), + ortki_CastLike(runtime_tensor_2_ort_tensor(min_tensor_float), + runtime_tensor_2_ort_tensor(input)), + ortki_CastLike(runtime_tensor_2_ort_tensor(max_tensor_float), + runtime_tensor_2_ort_tensor(input))); + size_t size = 0; + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + auto expected = hrt::create(input.datatype(), shape, + {reinterpret_cast(ptr_ort), size}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); // actual - float_t min[] = {-1.0f}; - auto min_tensor = - hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(min), sizeof(min)}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - float_t max[] = {1.0f}; - auto max_tensor = - hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(max), sizeof(max)}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + runtime_tensor min_tensor( + kernels::stackvm::cast(input.datatype(), + runtime::stackvm::cast_mode_t::kdefault, + min_tensor_float.impl()) + .expect("cast failed") + .as() + .expect("as tensor failed")); + + runtime_tensor max_tensor( + kernels::stackvm::cast(input.datatype(), + runtime::stackvm::cast_mode_t::kdefault, + max_tensor_float.impl()) + .expect("cast failed") + .as() + .expect("as tensor failed")); auto output = kernels::stackvm::clamp(input.impl(), min_tensor.impl(), max_tensor.impl()) @@ -112,6 +129,21 @@ TEST_P(ClampTest, clamp) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(min, k) + FOR_LOOP(max, l) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(min, k) + SPLIT_ELEMENT(max, l) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_clamp.json b/tests/kernels/test_clamp.json new file mode 100644 index 0000000000..6c62ab02d6 --- /dev/null +++ b/tests/kernels/test_clamp.json @@ -0,0 +1,6 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1], [1, 3], [8, 8], [1, 3, 8], [16, 16], [16], []], + "lhs_type":["dt_float32"], + "min": [-1, -2, -3, -4, -5, -6], + "max": [0, 1, 2, 3, 4, 5, 6] +} \ No newline at end of file diff --git a/tests/kernels/test_concat.cpp b/tests/kernels/test_concat.cpp index c9508c36ab..c7da18b3bf 100644 --- a/tests/kernels/test_concat.cpp +++ b/tests/kernels/test_concat.cpp @@ -22,16 +22,21 @@ #include #include +#define TEST_CASE_NAME "test_concat" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ConcatTest - : public KernelTest, - public ::testing::TestWithParam> { +class ConcatTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, shape] = GetParam(); + READY_SUBCASE() + + auto shape = GetShapeArray("lhs_shape"); + auto value = GetNumber("axis"); + auto typecode = GetDataType("lhs_type"); lhs = hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -40,21 +45,22 @@ class ConcatTest rhs = hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(rhs); + + axis_value = value > 0 ? value >= (int64_t)shape.size() ? 0 : value + : -value > (int64_t)shape.size() ? 0 + : value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor lhs; runtime_tensor rhs; + int64_t axis_value; }; -INSTANTIATE_TEST_SUITE_P( - Concat, ConcatTest, - testing::Combine(testing::Values(dt_float32, dt_int64, dt_int32, dt_float64, - dt_int16, dt_uint32, dt_boolean), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3}, - dims_t{1, 3, 16}, dims_t{1}))); +INSTANTIATE_TEST_SUITE_P(Concat, ConcatTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ConcatTest, Concat) { auto l_ort = runtime_tensor_2_ort_tensor(lhs); @@ -62,7 +68,7 @@ TEST_P(ConcatTest, Concat) { OrtKITensor *ls_ort[2] = {l_ort, r_ort}; // expected - auto output_ort = ortki_Concat(ls_ort, 2, 0); + auto output_ort = ortki_Concat(ls_ort, 2, axis_value); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -80,7 +86,7 @@ TEST_P(ConcatTest, Concat) { fields.push_back(field2); auto output_tuple = tuple(std::in_place, std::move(fields)); - int64_t axis_ptr[] = {0}; + int64_t axis_ptr[] = {axis_value}; auto axis = hrt::create(dt_int64, {1}, {reinterpret_cast(axis_ptr), sizeof(axis_ptr)}, @@ -107,6 +113,18 @@ TEST_P(ConcatTest, Concat) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(axis, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(axis, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_concat.json b/tests/kernels/test_concat.json new file mode 100644 index 0000000000..c17d767ff2 --- /dev/null +++ b/tests/kernels/test_concat.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 8, 24, 24], [1, 3, 3, 16], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1], [16]], + "axis":[-1, 0, 1, 2, 3, -2, -3, -4], + "lhs_type":["dt_float32", "dt_int64", "dt_int32", "dt_float64", "dt_int16", "dt_uint32", "dt_boolean", "dt_float16"] +} \ No newline at end of file diff --git a/tests/kernels/test_constant_of_shape.cpp b/tests/kernels/test_constant_of_shape.cpp index f839138768..7506c9a28e 100644 --- a/tests/kernels/test_constant_of_shape.cpp +++ b/tests/kernels/test_constant_of_shape.cpp @@ -22,52 +22,165 @@ #include #include +#define TEST_CASE_NAME "test_constant_of_shape" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ConstantOfShapeTest - : public KernelTest, - public ::testing::TestWithParam> { +class ConstantOfShapeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, shape] = GetParam(); - - const int size = 768; - int32_t array[size]; + READY_SUBCASE() - for (int32_t &i : array) { - i = 1; - } + auto shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); expected = - hrt::create(dt_int32, shape, - {reinterpret_cast(array), sizeof(array)}, - true, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + init_tensor_one(expected); + + size_t shape_size = shape.size(); + int64_t *shape_array = (int64_t *)malloc(shape_size * sizeof(int64_t)); + std::copy(shape.begin(), shape.end(), shape_array); + shape_tensor = hrt::create(dt_int64, {shape_size}, + {reinterpret_cast(shape_array), + shape_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } + + void init_tensor_one(runtime::runtime_tensor &tensor) { + auto dtype = tensor.datatype(); + switch (dtype) { + case dt_int8: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_int16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_int32: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = 1; + return ok(); + }); + break; + } + case dt_int64: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint8: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint32: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_uint64: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_float16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_float32: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_float64: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + case dt_bfloat16: { + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(1); + return ok(); + }); + break; + } + default: { + } + } + } protected: runtime_tensor expected; + runtime_tensor shape_tensor; }; INSTANTIATE_TEST_SUITE_P(constant_of_shape, ConstantOfShapeTest, - testing::Combine(testing::Values(dt_int32), - testing::Values(dims_t{1, 3, 16, - 16}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ConstantOfShapeTest, constant_of_shape) { // actual - int64_t shape1[] = {1, 3, 16, 16}; - auto shape_ptr = - hrt::create(dt_int64, {4}, - {reinterpret_cast(shape1), sizeof(shape1)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - int32_t value[] = {1}; auto value_ptr = hrt::create(dt_int32, {1}, @@ -75,9 +188,9 @@ TEST_P(ConstantOfShapeTest, constant_of_shape) { host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - auto output = - kernels::stackvm::constant_of_shape(shape_ptr.impl(), value_ptr.impl()) - .expect("constant_of_shape failed"); + auto output = kernels::stackvm::constant_of_shape(shape_tensor.impl(), + value_ptr.impl()) + .expect("constant_of_shape failed"); runtime_tensor actual(output.as().expect("as tensor failed")); bool result = is_same_tensor(expected, actual) || @@ -95,6 +208,15 @@ TEST_P(ConstantOfShapeTest, constant_of_shape) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_constant_of_shape.json b/tests/kernels/test_constant_of_shape.json new file mode 100644 index 0000000000..8ddf991217 --- /dev/null +++ b/tests/kernels/test_constant_of_shape.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 8, 24, 24], [1, 3, 3, 16], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1], [16]], + "lhs_type":["dt_int32"] +} \ No newline at end of file diff --git a/tests/kernels/test_conv2d.cpp b/tests/kernels/test_conv2d.cpp index a9abc07d7c..3d4cf62f78 100644 --- a/tests/kernels/test_conv2d.cpp +++ b/tests/kernels/test_conv2d.cpp @@ -28,10 +28,12 @@ using namespace ortki; class Conv2DTest : public KernelTest, public ::testing::TestWithParam< - std::tuple> { + std::tuple> { public: void SetUp() override { - auto &&[typecode, input_shape, weight_shape, bias_shape] = GetParam(); + auto &&[typecode, input_shape, weight_shape, bias_shape, value1, value2, + value3, value4] = GetParam(); input = hrt::create(typecode, input_shape, host_runtime_tensor::pool_cpu_only) @@ -47,6 +49,11 @@ class Conv2DTest : public KernelTest, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(bais); + + dilations_value = value1; + pad_value = value2; + strides_value = value3; + group_value = value4; } void TearDown() override {} @@ -55,14 +62,22 @@ class Conv2DTest : public KernelTest, runtime_tensor input; runtime_tensor weight; runtime_tensor bais; + dims_t dilations_value; + dims_t pad_value; + dims_t strides_value; + int64_t group_value; }; -INSTANTIATE_TEST_SUITE_P(conv2d, Conv2DTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 4, 5, 5}), - testing::Values(dims_t{8, 4, 3, 3}, - dims_t{8, 4, 1, 1}), - testing::Values(dims_t{8}))); +INSTANTIATE_TEST_SUITE_P( + conv2d, Conv2DTest, + testing::Combine( + testing::Values(dt_float32), + testing::Values(dims_t{1, 4, 5, 5}, dims_t{1, 4, 16, 16}), + testing::Values(dims_t{8, 4, 3, 3}, dims_t{8, 4, 1, 1}), + testing::Values(dims_t{8}), testing::Values(dims_t{2, 2}, dims_t{1, 1}), + testing::Values(dims_t{1, 1, 1, 1} /*, dims_t{0, 0, 1, 0}*/), + testing::Values(dims_t{1, 1}, dims_t{2, 2}), + testing::Values(1 /*, 2*/))); // todo result error TEST_P(Conv2DTest, conv2d) { auto input_ort = runtime_tensor_2_ort_tensor(input); @@ -71,14 +86,25 @@ TEST_P(Conv2DTest, conv2d) { // expected const char auto_pad[7] = "NOTSET"; - int64_t dilations[] = {1, 1}; + + size_t dilations_size = dilations_value.size(); + int64_t *dilations = (int64_t *)malloc(dilations_size * sizeof(int64_t)); + std::copy(dilations_value.begin(), dilations_value.end(), dilations); + int64_t kernel_shape[] = {(int64_t)weight.shape()[2], (int64_t)weight.shape()[3]}; - int64_t pad[] = {1, 1, 1, 1}; - int64_t strides[] = {1, 1}; - auto output_ort = - ortki_Conv(input_ort, weight_ort, bais_ort, auto_pad, dilations, 2, 1, - kernel_shape, 2, pad, 4, strides, 2); + + size_t pad_size = pad_value.size(); + int64_t *pad = (int64_t *)malloc(pad_size * sizeof(int64_t)); + std::copy(pad_value.begin(), pad_value.end(), pad); + + size_t strides_size = strides_value.size(); + int64_t *strides = (int64_t *)malloc(strides_size * sizeof(int64_t)); + std::copy(strides_value.begin(), strides_value.end(), strides); + + auto output_ort = ortki_Conv( + input_ort, weight_ort, bais_ort, auto_pad, dilations, dilations_size, + group_value, kernel_shape, 2, pad, pad_size, strides, strides_size); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -89,41 +115,49 @@ TEST_P(Conv2DTest, conv2d) { .expect("create tensor failed"); // actual - int64_t group[] = {1}; - float_t fused_clamp[] = {-std::numeric_limits::infinity(), - std::numeric_limits::infinity()}; + int64_t group[] = {group_value}; + + float fused_clamp[] = {-std::numeric_limits::infinity(), + std::numeric_limits::infinity()}; + auto dilations_ptr = hrt::create(nncase::dt_int64, {2}, {reinterpret_cast(dilations), - sizeof(dilations)}, + dilations_size * sizeof(int64_t)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto kernel_shape_ptr = hrt::create( nncase::dt_int64, {2}, {reinterpret_cast(kernel_shape), sizeof(kernel_shape)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - auto pad_ptr = - hrt::create(nncase::dt_int64, {4}, - {reinterpret_cast(pad), sizeof(pad)}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto strides_ptr = - hrt::create(nncase::dt_int64, {2}, - {reinterpret_cast(strides), sizeof(strides)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + + auto pad_ptr = hrt::create(nncase::dt_int64, {4}, + {reinterpret_cast(pad), + pad_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto strides_ptr = hrt::create(nncase::dt_int64, {2}, + {reinterpret_cast(strides), + strides_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + auto group_ptr = hrt::create(nncase::dt_int64, {1}, {reinterpret_cast(group), sizeof(group)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto fused_clamp_ptr = hrt::create( nncase::dt_float32, {2}, {reinterpret_cast(fused_clamp), sizeof(fused_clamp)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output = kernels::stackvm::conv2d( runtime::stackvm::pad_mode_t::constant, input.impl(), weight.impl(), diff --git a/tests/kernels/test_conv2d_transpose.cpp b/tests/kernels/test_conv2d_transpose.cpp index 1900217889..96e60a1d86 100644 --- a/tests/kernels/test_conv2d_transpose.cpp +++ b/tests/kernels/test_conv2d_transpose.cpp @@ -29,10 +29,12 @@ using namespace ortki; class Conv2DTransposeTest : public KernelTest, public ::testing::TestWithParam< - std::tuple> { + std::tuple> { public: void SetUp() override { - auto &&[typecode, input_shape, weight_shape, bias_shape] = GetParam(); + auto &&[typecode, input_shape, weight_shape, bias_shape, value1, value2, + value3, value4, value5, value6] = GetParam(); input = hrt::create(typecode, input_shape, host_runtime_tensor::pool_cpu_only) @@ -48,6 +50,13 @@ class Conv2DTransposeTest host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(bais); + + dilations_value = value1; + pad_value = value2; + strides_value = value3; + group_value = value4; + output_padding_value = value5; + output_shape_value = value6; } void TearDown() override {} @@ -56,13 +65,24 @@ class Conv2DTransposeTest runtime_tensor input; runtime_tensor weight; runtime_tensor bais; + dims_t dilations_value; + dims_t pad_value; + dims_t strides_value; + dims_t output_padding_value; + dims_t output_shape_value; + int64_t group_value; }; -INSTANTIATE_TEST_SUITE_P(conv2d_transpose, Conv2DTransposeTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 1, 5, 5}), - testing::Values(dims_t{1, 2, 3, 3}), - testing::Values(dims_t{2}))); +INSTANTIATE_TEST_SUITE_P( + conv2d_transpose, Conv2DTransposeTest, + testing::Combine( + testing::Values(dt_float32), testing::Values(dims_t{1, 1, 5, 5}), + testing::Values(dims_t{1, 2, 3, 3}), testing::Values(dims_t{2}), + testing::Values(/*dims_t{2, 2} ,*/ dims_t{1, 1}), + testing::Values(dims_t{1, 1, 1, 1} /*, dims_t{0, 0, 1, 0}*/), + testing::Values(dims_t{1, 1} /*, dims_t{2, 2}*/), + testing::Values(1 /*, 2*/), testing::Values(dims_t{0, 0}), + testing::Values(dims_t{1, 2, 5, 5}))); TEST_P(Conv2DTransposeTest, conv2d_transpose) { auto input_ort = runtime_tensor_2_ort_tensor(input); @@ -71,16 +91,37 @@ TEST_P(Conv2DTransposeTest, conv2d_transpose) { // expected const char *auto_pad = "NOTSET"; - int64_t dilations[] = {1, 1}; - int64_t kernel_shape[] = {3, 3}; - int64_t pad[] = {1, 1, 1, 1}; - int64_t strides[] = {1, 1}; - int64_t output_padding[] = {0, 0}; - int64_t output_shape[] = {1, 2, 5, 5}; - auto output_ort = - ortki_ConvTranspose(input_ort, weight_ort, bais_ort, auto_pad, - dilations, 2, 1, kernel_shape, 2, output_padding, 2, - output_shape, 4, pad, 4, strides, 2); + size_t dilations_size = dilations_value.size(); + int64_t *dilations = (int64_t *)malloc(dilations_size * sizeof(int64_t)); + std::copy(dilations_value.begin(), dilations_value.end(), dilations); + + int64_t kernel_shape[] = {(int64_t)weight.shape()[2], + (int64_t)weight.shape()[3]}; + + size_t pad_size = pad_value.size(); + int64_t *pad = (int64_t *)malloc(pad_size * sizeof(int64_t)); + std::copy(pad_value.begin(), pad_value.end(), pad); + + size_t strides_size = strides_value.size(); + int64_t *strides = (int64_t *)malloc(strides_size * sizeof(int64_t)); + std::copy(strides_value.begin(), strides_value.end(), strides); + + size_t output_padding_size = output_padding_value.size(); + int64_t *output_padding = + (int64_t *)malloc(output_padding_size * sizeof(int64_t)); + std::copy(output_padding_value.begin(), output_padding_value.end(), + output_padding); + + size_t output_shape_size = output_shape_value.size(); + int64_t *output_shape = + (int64_t *)malloc(output_shape_size * sizeof(int64_t)); + std::copy(output_shape_value.begin(), output_shape_value.end(), + output_shape); + + auto output_ort = ortki_ConvTranspose( + input_ort, weight_ort, bais_ort, auto_pad, dilations, dilations_size, + group_value, kernel_shape, 2, output_padding, output_padding_size, + output_shape, output_shape_size, pad, pad_size, strides, strides_size); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -91,52 +132,60 @@ TEST_P(Conv2DTransposeTest, conv2d_transpose) { .expect("create tensor failed"); // actual - int64_t group[] = {1}; - float_t fused_clamp[] = {-FLT_MAX, FLT_MAX}; + int64_t group[] = {group_value}; + float fused_clamp[] = {-FLT_MAX, FLT_MAX}; auto dilations_ptr = hrt::create(nncase::dt_int64, {2}, {reinterpret_cast(dilations), - sizeof(dilations)}, + dilations_size * sizeof(int64_t)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto kernel_shape_ptr = hrt::create( nncase::dt_int64, {2}, {reinterpret_cast(kernel_shape), sizeof(kernel_shape)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - auto pad_ptr = - hrt::create(nncase::dt_int64, {4}, - {reinterpret_cast(pad), sizeof(pad)}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto strides_ptr = - hrt::create(nncase::dt_int64, {2}, - {reinterpret_cast(strides), sizeof(strides)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + + auto pad_ptr = hrt::create(nncase::dt_int64, {4}, + {reinterpret_cast(pad), + pad_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto strides_ptr = hrt::create(nncase::dt_int64, {2}, + {reinterpret_cast(strides), + strides_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + auto group_ptr = hrt::create(nncase::dt_int64, {1}, {reinterpret_cast(group), sizeof(group)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto fused_clamp_ptr = hrt::create( nncase::dt_float32, {2}, {reinterpret_cast(fused_clamp), sizeof(fused_clamp)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output_padding_ptr = hrt::create(nncase::dt_int64, {2}, {reinterpret_cast(output_padding), - sizeof(output_padding)}, + output_padding_size * sizeof(int64_t)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output_shape_ptr = - hrt::create( - nncase::dt_int64, {4}, - {reinterpret_cast(output_shape), sizeof(output_shape)}, - true, host_runtime_tensor::pool_cpu_only) + hrt::create(nncase::dt_int64, {4}, + {reinterpret_cast(output_shape), + output_shape_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output = kernels::stackvm::conv2d_transpose( runtime::stackvm::pad_mode_t::constant, input.impl(), weight.impl(), @@ -161,6 +210,18 @@ TEST_P(Conv2DTransposeTest, conv2d_transpose) { } int main(int argc, char *argv[]) { + // READY_TEST_CASE_GENERATE() + // FOR_LOOP(lhs_shape, i) + // FOR_LOOP(lhs_type, j) + // FOR_LOOP(rhs_type, k) + // SPLIT_ELEMENT(lhs_shape, i) + // SPLIT_ELEMENT(lhs_type, j) + // SPLIT_ELEMENT(rhs_type, k) + // WRITE_SUB_CASE() + // FOR_LOOP_END() + // FOR_LOOP_END() + // FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_conv2d_transpose.json b/tests/kernels/test_conv2d_transpose.json new file mode 100644 index 0000000000..8ddf991217 --- /dev/null +++ b/tests/kernels/test_conv2d_transpose.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 8, 24, 24], [1, 3, 3, 16], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1], [16]], + "lhs_type":["dt_int32"] +} \ No newline at end of file diff --git a/tests/kernels/test_cum_sum.cpp b/tests/kernels/test_cum_sum.cpp index b5900e8792..e6a287e32c 100644 --- a/tests/kernels/test_cum_sum.cpp +++ b/tests/kernels/test_cum_sum.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_cum_sum" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class CumSumTest - : public KernelTest, - public ::testing::TestWithParam> { +class CumSumTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -39,20 +43,14 @@ class CumSumTest init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - cum_sum, CumSumTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int64, - dt_float64 /*, - dt_uint32, dt_uint64, dt_float16, - dt_bfloat16*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{2, 2}, - dims_t{1, 3, 2}))); +INSTANTIATE_TEST_SUITE_P(cum_sum, CumSumTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(CumSumTest, cum_sum) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -108,6 +106,15 @@ TEST_P(CumSumTest, cum_sum) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_cum_sum.json b/tests/kernels/test_cum_sum.json new file mode 100644 index 0000000000..d5df7d717c --- /dev/null +++ b/tests/kernels/test_cum_sum.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [2, 2], [1, 3, 2]], + "lhs_type":["dt_float32", "dt_int32", "dt_int64", "dt_float64", "dt_float16"] +} \ No newline at end of file diff --git a/tests/kernels/test_dequantize.cpp b/tests/kernels/test_dequantize.cpp index 065bf91c7b..13cee1c24a 100644 --- a/tests/kernels/test_dequantize.cpp +++ b/tests/kernels/test_dequantize.cpp @@ -26,12 +26,16 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class DequantizeTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_dequantize" + +class DequantizeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -39,87 +43,147 @@ class DequantizeTest init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; INSTANTIATE_TEST_SUITE_P(dequantize, DequantizeTest, - testing::Combine(testing::Values(dt_uint8, dt_int8), - testing::Values(dims_t{1, 3, 16, - 16}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(DequantizeTest, dequantize) { auto l_ort = runtime_tensor_2_ort_tensor(input); + auto zero_point_value = 127; + auto scale_value = 0.01f; - // expected runtime_tensor zero_point_ptr; if (input.datatype() == dt_uint8) { - uint8_t zero_point[] = {127}; + uint8_t zero_point[] = {(uint8_t)zero_point_value}; zero_point_ptr = hrt::create(nncase::dt_uint8, {1}, {reinterpret_cast(zero_point), sizeof(zero_point)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); } else if (input.datatype() == dt_int8) { - int8_t zero_point[] = {127}; + int8_t zero_point[] = {(int8_t)zero_point_value}; zero_point_ptr = hrt::create(nncase::dt_int8, {1}, {reinterpret_cast(zero_point), sizeof(zero_point)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + } else { + int16_t zero_point[] = {(int16_t)zero_point_value}; + zero_point_ptr = hrt::create(nncase::dt_int16, {1}, + {reinterpret_cast(zero_point), + sizeof(zero_point)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - float_t scale[] = {0.01f}; + float scale[] = {scale_value}; auto scale_ptr = hrt::create(nncase::dt_float32, {1}, {reinterpret_cast(scale), sizeof(scale)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - auto output_ort = - ortki_DequantizeLinear(l_ort, runtime_tensor_2_ort_tensor(scale_ptr), - runtime_tensor_2_ort_tensor(zero_point_ptr), 0); - size_t size = 0; - void *ptr_ort = tensor_buffer(output_ort, &size); - dims_t shape(tensor_rank(output_ort)); - tensor_shape(output_ort, reinterpret_cast(shape.data())); - auto expected = hrt::create(dt_float32, shape, - {reinterpret_cast(ptr_ort), size}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - // actual - quant_param_t quantParam; - quantParam.zero_point = 127; - quantParam.scale = 0.01f; - quant_param_t quant_param[] = {quantParam}; - auto quant_param_ptr = - hrt::create( - dt_int64, {1}, - {reinterpret_cast(quant_param), sizeof(quant_param)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output = kernels::stackvm::dequantize(dt_float32, input.impl(), - quant_param_ptr.impl()) - .expect("dequantize failed"); - runtime_tensor actual(output.as().expect("as tensor failed")); - - bool result = is_same_tensor(expected, actual) || - cosine_similarity_tensor(expected, actual); - - if (!result) { - std::cout << "actual "; - print_runtime_tensor(actual); - std::cout << "expected "; - print_runtime_tensor(expected); - } - // compare - EXPECT_TRUE(result); + if (zero_point_ptr.datatype() != dt_int16) { + + // expected + auto output_ort = ortki_DequantizeLinear( + l_ort, runtime_tensor_2_ort_tensor(scale_ptr), + runtime_tensor_2_ort_tensor(zero_point_ptr), 0); + size_t size = 0; + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + auto expected = + hrt::create(dt_float32, shape, + {reinterpret_cast(ptr_ort), size}, true, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // actual + quant_param_t quantParam; + quantParam.zero_point = zero_point_value; + quantParam.scale = scale_value; + quant_param_t quant_param[] = {quantParam}; + auto quant_param_ptr = + hrt::create(dt_int64, {1}, + {reinterpret_cast(quant_param), + sizeof(quant_param)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + auto output = kernels::stackvm::dequantize(dt_float32, input.impl(), + quant_param_ptr.impl()) + .expect("dequantize failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); + } else { + + quant_param_t quantParam; + quantParam.zero_point = zero_point_value; + quantParam.scale = scale_value; + quant_param_t quant_param[] = {quantParam}; + auto quant_param_ptr = + hrt::create(dt_int64, {1}, + {reinterpret_cast(quant_param), + sizeof(quant_param)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // expected + runtime_tensor expected; + expected = hrt::create(dt_float32, input.shape(), + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + int16_dequantize_to_float(expected, input, 127, 0.01f); + + // actual + auto output = kernels::stackvm::dequantize(dt_float32, input.impl(), + quant_param_ptr.impl()) + .expect("dequantize failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + print_runtime_tensor(input); + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); + } } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(lhs_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_dequantize.json b/tests/kernels/test_dequantize.json new file mode 100644 index 0000000000..84ad46c434 --- /dev/null +++ b/tests/kernels/test_dequantize.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [3, 16], [16, 16], [1], []], + "lhs_type":["dt_int8", "dt_int16", "dt_uint8"] +} \ No newline at end of file diff --git a/tests/kernels/test_elu.cpp b/tests/kernels/test_elu.cpp index 8fc22de136..b7d51de804 100644 --- a/tests/kernels/test_elu.cpp +++ b/tests/kernels/test_elu.cpp @@ -22,41 +22,53 @@ #include #include +#define TEST_CASE_NAME "test_elu" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class EluTest - : public KernelTest, - public ::testing::TestWithParam> { +class EluTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + alpha = hrt::create(typecode, {1}, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(alpha); } void TearDown() override {} protected: runtime_tensor input; + runtime_tensor alpha; }; -INSTANTIATE_TEST_SUITE_P( - elu, EluTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{8, 8}, dims_t{1, 4, 16}, - dims_t{1, 3, 24, 24}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(elu, EluTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(EluTest, elu) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - auto output_ort = ortki_Elu(l_ort, 0.8f); + OrtKITensor *output_ort; + if (input.datatype() == dt_float16) { + output_ort = ortki_Elu(l_ort, tensor_to_array(alpha)[0]); + } else if (input.datatype() == dt_float32) { + output_ort = ortki_Elu(l_ort, tensor_to_array(alpha)[0]); + } else { + output_ort = ortki_Elu(l_ort, tensor_to_array(alpha)[0]); + } size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -67,13 +79,8 @@ TEST_P(EluTest, elu) { .expect("create tensor failed"); // actual - float_t a_ptr[] = {0.8f}; - auto a = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(a_ptr), sizeof(a_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); auto output = - kernels::stackvm::elu(input.impl(), a.impl()).expect("elu failed"); + kernels::stackvm::elu(input.impl(), alpha.impl()).expect("elu failed"); runtime_tensor actual(output.as().expect("as tensor failed")); bool result = is_same_tensor(expected, actual) || @@ -91,6 +98,15 @@ TEST_P(EluTest, elu) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, j) + FOR_LOOP(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(lhs_type, i) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_elu.json b/tests/kernels/test_elu.json new file mode 100644 index 0000000000..2694fe1f34 --- /dev/null +++ b/tests/kernels/test_elu.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"] +} \ No newline at end of file diff --git a/tests/kernels/test_erf.cpp b/tests/kernels/test_erf.cpp index e400d41b91..2bf6f994e6 100644 --- a/tests/kernels/test_erf.cpp +++ b/tests/kernels/test_erf.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_erf" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ErfTest - : public KernelTest, - public ::testing::TestWithParam> { +class ErfTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -45,12 +49,8 @@ class ErfTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - erf, ErfTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{8, 8}, dims_t{1, 4, 16}, - dims_t{1, 3, 24, 24}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(erf, ErfTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ErfTest, erf) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -85,6 +85,15 @@ TEST_P(ErfTest, erf) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, j) + FOR_LOOP(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(lhs_type, i) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_erf.json b/tests/kernels/test_erf.json new file mode 100644 index 0000000000..8f88180b02 --- /dev/null +++ b/tests/kernels/test_erf.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16"] +} \ No newline at end of file diff --git a/tests/kernels/test_expand.cpp b/tests/kernels/test_expand.cpp index 672ad1e142..bef9760863 100644 --- a/tests/kernels/test_expand.cpp +++ b/tests/kernels/test_expand.cpp @@ -22,49 +22,52 @@ #include #include +#define TEST_CASE_NAME "test_expand" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ExpandTest - : public KernelTest, - public ::testing::TestWithParam> { +class ExpandTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, input_shape] = GetParam(); + READY_SUBCASE() + + auto input_shape = GetShapeArray("lhs_shape"); + auto shape = GetShapeArray("rhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, input_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + size_t shape_size = shape.size(); + int64_t *shape_array = (int64_t *)malloc(shape_size * sizeof(int64_t)); + std::copy(shape.begin(), shape.end(), shape_array); + new_shape = hrt::create(dt_int64, {shape_size}, + {reinterpret_cast(shape_array), + shape_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + runtime_tensor new_shape; }; -INSTANTIATE_TEST_SUITE_P( - Expand, ExpandTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int64, dt_uint8, - dt_int8, dt_int16, dt_uint16, dt_uint32, - dt_uint64, dt_float16, dt_float64, - dt_boolean), - testing::Values(dims_t{3, 1}, dims_t{1, 1}, - dims_t{1, 1, 1}, dims_t{3, 1, 1, 1}))); +INSTANTIATE_TEST_SUITE_P(Expand, ExpandTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ExpandTest, expand) { auto input_ort = runtime_tensor_2_ort_tensor(input); // expected - int64_t new_shape[] = {1}; - auto new_shape_ptr = hrt::create(nncase::dt_int64, {1}, - {reinterpret_cast(new_shape), - sizeof(new_shape)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto new_shape_ort = runtime_tensor_2_ort_tensor(new_shape_ptr); + auto new_shape_ort = runtime_tensor_2_ort_tensor(new_shape); auto output_ort = ortki_Expand(input_ort, new_shape_ort); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); @@ -76,7 +79,7 @@ TEST_P(ExpandTest, expand) { .expect("create tensor failed"); // actual - auto output = kernels::stackvm::expand(input.impl(), new_shape_ptr.impl()) + auto output = kernels::stackvm::expand(input.impl(), new_shape.impl()) .expect("expand failed"); runtime_tensor actual(output.as().expect("as tensor failed")); @@ -92,123 +95,21 @@ TEST_P(ExpandTest, expand) { // compare EXPECT_TRUE(result); - - // expected - int64_t new_shape1[] = {1, 1}; - auto new_shape_ptr1 = - hrt::create( - nncase::dt_int64, {2}, - {reinterpret_cast(new_shape1), sizeof(new_shape1)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto new_shape_ort1 = runtime_tensor_2_ort_tensor(new_shape_ptr1); - auto output_ort1 = ortki_Expand(input_ort, new_shape_ort1); - size_t size1 = 0; - void *ptr_ort1 = tensor_buffer(output_ort1, &size1); - dims_t shape1(tensor_rank(output_ort1)); - tensor_shape(output_ort1, reinterpret_cast(shape1.data())); - auto expected1 = - hrt::create(input.datatype(), shape1, - {reinterpret_cast(ptr_ort1), size1}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - // actual - auto output1 = kernels::stackvm::expand(input.impl(), new_shape_ptr1.impl()) - .expect("expand failed"); - runtime_tensor actual1(output.as().expect("as tensor failed")); - - bool result1 = is_same_tensor(expected1, actual1) || - cosine_similarity_tensor(expected1, actual1); - - if (!result1) { - std::cout << "actual1 "; - print_runtime_tensor(actual1); - std::cout << "expected1 "; - print_runtime_tensor(expected1); - } - - // compare - EXPECT_TRUE(result1); - - // expected - int64_t new_shape2[] = {3, 4}; - auto new_shape_ptr2 = - hrt::create( - nncase::dt_int64, {2}, - {reinterpret_cast(new_shape2), sizeof(new_shape2)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto new_shape_ort2 = runtime_tensor_2_ort_tensor(new_shape_ptr2); - auto output_ort2 = ortki_Expand(input_ort, new_shape_ort2); - size_t size2 = 0; - void *ptr_ort2 = tensor_buffer(output_ort2, &size2); - dims_t shape2(tensor_rank(output_ort2)); - tensor_shape(output_ort2, reinterpret_cast(shape2.data())); - auto expected2 = - hrt::create(input.datatype(), shape2, - {reinterpret_cast(ptr_ort2), size2}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - // actual - auto output2 = kernels::stackvm::expand(input.impl(), new_shape_ptr2.impl()) - .expect("expand failed"); - runtime_tensor actual2(output2.as().expect("as tensor failed")); - - bool result2 = is_same_tensor(expected2, actual2) || - cosine_similarity_tensor(expected2, actual2); - - if (!result) { - std::cout << "actual2 "; - print_runtime_tensor(actual2); - std::cout << "expected2 "; - print_runtime_tensor(expected2); - } - - // compare - EXPECT_TRUE(result2); - - // expected - int64_t new_shape3[] = {2, 1, 6}; - auto new_shape_ptr3 = - hrt::create( - nncase::dt_int64, {3}, - {reinterpret_cast(new_shape3), sizeof(new_shape3)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto new_shape_ort3 = runtime_tensor_2_ort_tensor(new_shape_ptr3); - auto output_ort3 = ortki_Expand(input_ort, new_shape_ort3); - size_t size3 = 0; - void *ptr_ort3 = tensor_buffer(output_ort3, &size3); - dims_t shape3(tensor_rank(output_ort3)); - tensor_shape(output_ort3, reinterpret_cast(shape3.data())); - auto expected3 = - hrt::create(input.datatype(), shape3, - {reinterpret_cast(ptr_ort3), size3}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - // actual - auto output3 = kernels::stackvm::expand(input.impl(), new_shape_ptr3.impl()) - .expect("expand failed"); - runtime_tensor actual3(output3.as().expect("as tensor failed")); - - bool result3 = is_same_tensor(expected3, actual3) || - cosine_similarity_tensor(expected3, actual3); - - if (!result3) { - std::cout << "actual3 "; - print_runtime_tensor(actual3); - std::cout << "expected3 "; - print_runtime_tensor(expected3); - } - - // compare - EXPECT_TRUE(result3); } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(rhs_shape, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(rhs_shape, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_expand.json b/tests/kernels/test_expand.json new file mode 100644 index 0000000000..ec698ea329 --- /dev/null +++ b/tests/kernels/test_expand.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[3, 1], [1, 1], [1, 1, 1], [3, 1, 1, 1]], + "rhs_shape":[[1, 3, 3], [1, 3, 3, 3], [3, 4], [1, 1], [2, 1, 6]], + "lhs_type":["dt_float32", "dt_int32", "dt_int64", "dt_uint8", "dt_int8", "dt_int16", "dt_uint16", "dt_uint32", "dt_uint64", "dt_float16", "dt_float64", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_flatten.cpp b/tests/kernels/test_flatten.cpp index d1e05f37e9..6354c331b8 100644 --- a/tests/kernels/test_flatten.cpp +++ b/tests/kernels/test_flatten.cpp @@ -22,16 +22,21 @@ #include #include +#define TEST_CASE_NAME "test_flatten" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class FlattenTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, value] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto value = GetNumber("axis"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -49,7 +54,7 @@ class FlattenTest : public KernelTest, .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -57,15 +62,8 @@ class FlattenTest : public KernelTest, int32_t axis_value; }; -INSTANTIATE_TEST_SUITE_P( - flatten, FlattenTest, - testing::Combine(testing::Values(dt_float32, dt_int8, dt_int32, dt_uint8, - dt_int16, dt_uint16, dt_uint32, dt_uint64, - dt_int64, dt_float16, dt_float64, - dt_bfloat16, dt_boolean), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3, 48, 48}, - dims_t{1, 2}, dims_t{1, 3, 16}), - testing::Values(0, 1, -1, 2, 3, -2, -3, -4))); +INSTANTIATE_TEST_SUITE_P(flatten, FlattenTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(FlattenTest, flatten) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -101,6 +99,18 @@ TEST_P(FlattenTest, flatten) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(axis, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(axis, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_flatten.json b/tests/kernels/test_flatten.json new file mode 100644 index 0000000000..84fd36277c --- /dev/null +++ b/tests/kernels/test_flatten.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 48, 48], [1, 2], [1, 3, 16]], + "axis":[0, 1, -1, 2, 3, -2, -3, -4], + "lhs_type":["dt_float32", "dt_int8", "dt_int32", "dt_uint8", "dt_int16", "dt_uint16", "dt_uint32", "dt_uint64", "dt_int64", "dt_float16", "dt_float64", "dt_bfloat16", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_gather.cpp b/tests/kernels/test_gather.cpp index 7da9953266..09632db6d2 100644 --- a/tests/kernels/test_gather.cpp +++ b/tests/kernels/test_gather.cpp @@ -38,7 +38,7 @@ class GatherTest : public KernelTest, init_tensor(input); int64_t indices_array[] = {0, 0, 1, 1}; - indices = hrt::create(dt_int64, shape, + indices = hrt::create(dt_int64, {2, 2}, {reinterpret_cast(indices_array), sizeof(indices_array)}, true, host_runtime_tensor::pool_cpu_only) @@ -68,7 +68,13 @@ INSTANTIATE_TEST_SUITE_P( dt_int8, dt_int16, dt_uint8, dt_uint16, dt_uint32, dt_float16, dt_float64, dt_bfloat16, dt_boolean), - testing::Values(dims_t{2, 2}), testing::Values(-1, 0, 1))); + testing::Values(dims_t{ + 2, + 2} /*, dims_t{3, 5}, + dims_t{2, 3, 1}, dims_t{5, 7, 5}, + dims_t{5, 4, 3, 2}, dims_t{5, 5, 7, 7}, + dims_t{2, 3, 3, 5}*/), + testing::Values(-1, 0, 1))); TEST_P(GatherTest, gather) { auto input_ort = runtime_tensor_2_ort_tensor(input); diff --git a/tests/kernels/test_gather_elements.cpp b/tests/kernels/test_gather_elements.cpp new file mode 100644 index 0000000000..1a17edaecf --- /dev/null +++ b/tests/kernels/test_gather_elements.cpp @@ -0,0 +1,119 @@ +/* Copyright 2019-2021 Canaan Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "kernel_test.h" +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace nncase; +using namespace nncase::runtime; +using namespace ortki; + +class GatherElementsTest + : public KernelTest, + public ::testing::TestWithParam< + std::tuple> { + public: + void SetUp() override { + auto &&[typecode, shape, value] = GetParam(); + + input = hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(input); + + int64_t indices_array[] = {0, 0, 1, 1}; + indices = hrt::create(dt_int64, {2, 2}, + {reinterpret_cast(indices_array), + sizeof(indices_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + batchDims_value = value; + int64_t batchDims_array[1] = {value}; + batchDims = hrt::create(dt_int64, dims_t{1}, + {reinterpret_cast(batchDims_array), + sizeof(batchDims_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } + + void TearDown() override {} + + protected: + runtime_tensor input; + runtime_tensor indices; + runtime_tensor batchDims; + int64_t batchDims_value; +}; + +INSTANTIATE_TEST_SUITE_P( + gather_elements, GatherElementsTest, + testing::Combine(testing::Values(dt_int32, dt_int64, dt_float32, dt_uint64, + dt_int8, dt_int16, dt_uint8, dt_uint16, + dt_uint32, dt_float16, dt_float64, + dt_bfloat16, dt_boolean), + testing::Values(dims_t{ + 2, + 2} /*, dims_t{3, 5}, + dims_t{2, 3, 1}, dims_t{5, 7, 5}, + dims_t{5, 4, 3, 2}, dims_t{5, 5, 7, 7}, + dims_t{2, 3, 3, 5}*/), + testing::Values(-1, 0, 1))); + +TEST_P(GatherElementsTest, gather_elements) { + auto input_ort = runtime_tensor_2_ort_tensor(input); + auto indices_ort = runtime_tensor_2_ort_tensor(indices); + + // expected + auto output_ort = + ortki_GatherElements(input_ort, indices_ort, batchDims_value); + size_t size = 0; + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + auto expected = hrt::create(input.datatype(), shape, + {reinterpret_cast(ptr_ort), size}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // actual + auto output = kernels::stackvm::gather_elements( + input.impl(), batchDims.impl(), indices.impl()) + .expect("gather failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); +} + +int main(int argc, char *argv[]) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/tests/kernels/test_gather_nd.cpp b/tests/kernels/test_gather_nd.cpp index 3432e5239e..fb4687f5f7 100644 --- a/tests/kernels/test_gather_nd.cpp +++ b/tests/kernels/test_gather_nd.cpp @@ -26,19 +26,16 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class GatherNDTest - : public KernelTest, - public ::testing::TestWithParam> { +class GatherNDTest : public KernelTest, + public ::testing::TestWithParam< + std::tuple> { public: void SetUp() override { - auto &&[typecode, shape] = GetParam(); + auto &&[typecode, shape, value] = GetParam(); - int32_t input_array[] = {0, 1, 2, 3}; - input = hrt::create(dt_int32, shape, - {reinterpret_cast(input_array), - sizeof(input_array)}, - true, host_runtime_tensor::pool_cpu_only) + input = hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + init_tensor(input); int64_t indices_array[] = {0, 0, 1, 1}; indices = hrt::create(dt_int64, shape, @@ -47,8 +44,9 @@ class GatherNDTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - int64_t batchDims_array[] = {0}; - batchDims = hrt::create(dt_int64, {1}, + batchDims_value = value; + int64_t batchDims_array[1] = {value}; + batchDims = hrt::create(dt_int64, dims_t{1}, {reinterpret_cast(batchDims_array), sizeof(batchDims_array)}, true, host_runtime_tensor::pool_cpu_only) @@ -61,6 +59,7 @@ class GatherNDTest runtime_tensor input; runtime_tensor indices; runtime_tensor batchDims; + int64_t batchDims_value; }; INSTANTIATE_TEST_SUITE_P( @@ -69,14 +68,17 @@ INSTANTIATE_TEST_SUITE_P( dt_int8, dt_int16, dt_uint8, dt_uint16, dt_uint32, dt_float16, dt_float64, dt_bfloat16, dt_boolean), - testing::Values(dims_t{2, 2}))); + testing::Values(dims_t{2, 2} /*, dims_t{3, 5}, + dims_t{2, 3, 1}, dims_t{5, 7, 5}, + dims_t{5, 4, 3, 2}, dims_t{5, 5, 7, 7}, + dims_t{2, 3, 3, 5}*/), testing::Values(0 /*, -1, 1*/))); TEST_P(GatherNDTest, gather_nd) { auto input_ort = runtime_tensor_2_ort_tensor(input); auto indices_ort = runtime_tensor_2_ort_tensor(indices); // expected - auto output_ort = ortki_GatherND(input_ort, indices_ort, 0); + auto output_ort = ortki_GatherND(input_ort, indices_ort, batchDims_value); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); diff --git a/tests/kernels/test_gelu.cpp b/tests/kernels/test_gelu.cpp index aa587bf857..a3e0ee0594 100644 --- a/tests/kernels/test_gelu.cpp +++ b/tests/kernels/test_gelu.cpp @@ -22,59 +22,145 @@ #include #include +#define TEST_CASE_NAME "test_gelu" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class GeluTest - : public KernelTest, - public ::testing::TestWithParam> { +class GeluTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + alpha = hrt::create(typecode, {1}, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor_alpha(alpha); } void TearDown() override {} + virtual void init_tensor_alpha(runtime::runtime_tensor &tensor) { + auto dtype = tensor.datatype(); + switch (dtype) { + case dt_float16: { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(0.0f, 2.0f); + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(dis(gen)); + return ok(); + }); + break; + } + case dt_float32: { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(0.0f, 2.0f); + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(dis(gen)); + return ok(); + }); + break; + } + case dt_float64: { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(0.0, 2.0); + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(dis(gen)); + return ok(); + }); + break; + } + case dt_bfloat16: { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<> dis(0.0, 2.0); + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = + static_cast(dis(gen)); + return ok(); + }); + break; + } + default: { + } + } + } + protected: runtime_tensor input; + runtime_tensor alpha; }; -INSTANTIATE_TEST_SUITE_P( - gelu, GeluTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{8, 8}, dims_t{1, 4, 16}, - dims_t{1, 3, 24, 24}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(gelu, GeluTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(GeluTest, gelu) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - float_t a_ptr[] = {0.5f}; - auto a = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(a_ptr), sizeof(a_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto a_ort = runtime_tensor_2_ort_tensor(a); - - float_t b_ptr[] = {2.0f}; - auto b = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(b_ptr), sizeof(b_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto b_ort = runtime_tensor_2_ort_tensor(b); + auto a_ort = runtime_tensor_2_ort_tensor(alpha); + + runtime_tensor b; + runtime_tensor c; + if (input.datatype() == dt_float16) { + half b_ptr[] = {(half)2.0f}; + b = hrt::create(nncase::dt_float16, {1}, + {reinterpret_cast(b_ptr), sizeof(b_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + half c_ptr[] = {(half)1.0f}; + c = hrt::create(nncase::dt_float16, {1}, + {reinterpret_cast(c_ptr), sizeof(c_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else if (input.datatype() == dt_float32) { + float b_ptr[] = {2.0f}; + b = hrt::create(nncase::dt_float32, {1}, + {reinterpret_cast(b_ptr), sizeof(b_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); - float_t c_ptr[] = {1.0f}; - auto c = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(c_ptr), sizeof(c_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + float c_ptr[] = {1.0f}; + c = hrt::create(nncase::dt_float32, {1}, + {reinterpret_cast(c_ptr), sizeof(c_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else if (input.datatype() == dt_float64) { + double b_ptr[] = {2.0f}; + b = hrt::create(nncase::dt_float64, {1}, + {reinterpret_cast(b_ptr), sizeof(b_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + double c_ptr[] = {1.0f}; + c = hrt::create(nncase::dt_float64, {1}, + {reinterpret_cast(c_ptr), sizeof(c_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } + + auto b_ort = runtime_tensor_2_ort_tensor(b); auto c_ort = runtime_tensor_2_ort_tensor(c); auto scaledInput = ortki_Mul(a_ort, l_ort); @@ -93,8 +179,8 @@ TEST_P(GeluTest, gelu) { .expect("create tensor failed"); // actual - auto output = - kernels::stackvm::gelu(input.impl(), a.impl()).expect("gelu failed"); + auto output = kernels::stackvm::gelu(input.impl(), alpha.impl()) + .expect("gelu failed"); runtime_tensor actual(output.as().expect("as tensor failed")); bool result = is_same_tensor(expected, actual) || @@ -112,6 +198,15 @@ TEST_P(GeluTest, gelu) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, j) + FOR_LOOP(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(lhs_type, i) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_gelu.json b/tests/kernels/test_gelu.json new file mode 100644 index 0000000000..8f88180b02 --- /dev/null +++ b/tests/kernels/test_gelu.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16"] +} \ No newline at end of file diff --git a/tests/kernels/test_get_item.cpp b/tests/kernels/test_get_item.cpp index 948ce197c9..6ad5403cf8 100644 --- a/tests/kernels/test_get_item.cpp +++ b/tests/kernels/test_get_item.cpp @@ -61,15 +61,18 @@ TEST_P(GetItemTest, get_item) { sizeof(index_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + int64_t shape_ort[] = {1}; auto shape = hrt::create(dt_int64, {1}, {reinterpret_cast(shape_ort), sizeof(shape_ort)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto get_item_output = kernels::stackvm::get_item(input.impl(), index.impl()) .expect("get_item failed"); + auto output = kernels::stackvm::reshape(get_item_output, shape.impl()) .expect("get_item failed"); runtime_tensor actual(output.as().expect("as tensor failed")); diff --git a/tests/kernels/test_hard_sigmoid.cpp b/tests/kernels/test_hard_sigmoid.cpp index b282fa45c4..1b96601fff 100644 --- a/tests/kernels/test_hard_sigmoid.cpp +++ b/tests/kernels/test_hard_sigmoid.cpp @@ -22,55 +22,94 @@ #include #include +#define TEST_CASE_NAME "test_hard_sigmoid" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class HardSigmoidTest - : public KernelTest, - public ::testing::TestWithParam> { +class HardSigmoidTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + auto value1 = GetFloatNumber("alpha"); + auto value2 = GetFloatNumber("gamma"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + alpha_value = value1; + gamma_value = value2; } void TearDown() override {} protected: runtime_tensor input; + float alpha_value; + float gamma_value; }; -INSTANTIATE_TEST_SUITE_P( - hard_sigmoid, HardSigmoidTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{1, 3}, dims_t{1, 3, 16}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(hard_sigmoid, HardSigmoidTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(HardSigmoidTest, hard_sigmoid) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - float_t alpha_ptr[] = {0.5f}; - auto alpha = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(alpha_ptr), - sizeof(alpha_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - float_t gamma_ptr[] = {0.6f}; - auto gamma = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(gamma_ptr), - sizeof(gamma_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - auto output_ort = ortki_HardSigmoid(l_ort, 0.5f, 0.6f); + runtime_tensor alpha; + runtime_tensor gamma; + if (input.datatype() == dt_float32) { + float alpha_ptr[] = {alpha_value}; + alpha = hrt::create(nncase::dt_float32, {1}, + {reinterpret_cast(alpha_ptr), + sizeof(alpha_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float gamma_ptr[] = {gamma_value}; + gamma = hrt::create(nncase::dt_float32, {1}, + {reinterpret_cast(gamma_ptr), + sizeof(gamma_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else if (input.datatype() == dt_float16) { + half alpha_ptr[] = {(half)alpha_value}; + alpha = hrt::create(nncase::dt_float16, {1}, + {reinterpret_cast(alpha_ptr), + sizeof(alpha_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + half gamma_ptr[] = {(half)gamma_value}; + gamma = hrt::create(nncase::dt_float16, {1}, + {reinterpret_cast(gamma_ptr), + sizeof(gamma_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else { + double alpha_ptr[] = {(double)alpha_value}; + alpha = hrt::create(nncase::dt_float64, {1}, + {reinterpret_cast(alpha_ptr), + sizeof(alpha_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + double gamma_ptr[] = {(double)gamma_value}; + gamma = hrt::create(nncase::dt_float64, {1}, + {reinterpret_cast(gamma_ptr), + sizeof(gamma_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } + + auto output_ort = ortki_HardSigmoid(l_ort, alpha_value, gamma_value); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -101,6 +140,21 @@ TEST_P(HardSigmoidTest, hard_sigmoid) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(alpha, k) + FOR_LOOP(gamma, l) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(alpha, k) + SPLIT_ELEMENT(gamma, l) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_hard_sigmoid.json b/tests/kernels/test_hard_sigmoid.json new file mode 100644 index 0000000000..641b60c599 --- /dev/null +++ b/tests/kernels/test_hard_sigmoid.json @@ -0,0 +1,6 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"], + "alpha":[1.2, 0.8, 0.5, 0.6], + "gamma":[1.2, 0.8, 0.5, 0.6] +} \ No newline at end of file diff --git a/tests/kernels/test_hard_swish.cpp b/tests/kernels/test_hard_swish.cpp index d547d4f9e2..23c591bc4e 100644 --- a/tests/kernels/test_hard_swish.cpp +++ b/tests/kernels/test_hard_swish.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_hard_swish" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class HardSwishTest - : public KernelTest, - public ::testing::TestWithParam> { +class HardSwishTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -44,32 +48,14 @@ class HardSwishTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - hard_swish, HardSwishTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 2}, - dims_t{1}, dims_t{16, 16}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(hard_swish, HardSwishTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(HardSwishTest, hard_swish) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - float_t alpha_ptr[] = {1.0f / 6.0f}; - auto alpha = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(alpha_ptr), - sizeof(alpha_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - float_t beta_ptr[] = {0.5f}; - auto beta = - hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(beta_ptr), sizeof(beta_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - auto output_ort = - ortki_Mul(l_ort, ortki_HardSigmoid(l_ort, 1.0f / 6.0f, 0.5f)); + auto output_ort = ortki_HardSwish(l_ort); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -88,6 +74,8 @@ TEST_P(HardSwishTest, hard_swish) { cosine_similarity_tensor(expected, actual); if (!result) { + std::cout << "input "; + print_runtime_tensor(input); std::cout << "actual "; print_runtime_tensor(actual); std::cout << "expected "; @@ -99,6 +87,15 @@ TEST_P(HardSwishTest, hard_swish) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_hard_swish.json b/tests/kernels/test_hard_swish.json new file mode 100644 index 0000000000..3ef2fca0a7 --- /dev/null +++ b/tests/kernels/test_hard_swish.json @@ -0,0 +1,31 @@ +{ + "lhs_shape": [ + [ + 1, + 2 + ], + [ + 1, + 3, + 16, + 16 + ], + [ + 1, + 3, + 16 + ], + [ + 1 + ], + [ + 16, + 16 + ], + [] + ], + "lhs_type": [ + "dt_float32", + "dt_float16" + ] +} \ No newline at end of file diff --git a/tests/kernels/test_hardmax.cpp b/tests/kernels/test_hardmax.cpp index f9a21d0a9d..b8190f8499 100644 --- a/tests/kernels/test_hardmax.cpp +++ b/tests/kernels/test_hardmax.cpp @@ -22,24 +22,30 @@ #include #include +#define TEST_CASE_NAME "test_hardmax" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class HardmaxTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, shape, value] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + auto value = GetNumber("axis_value"); - input = hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + input = + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); init_tensor(input); - axis_value = value > 0 ? value < (int64_t)shape.size() ? value : 0 - : -value <= (int64_t)shape.size() ? value - : 0; + axis_value = value > 0 ? value < (int64_t)l_shape.size() ? value : 0 + : -value <= (int64_t)l_shape.size() ? value + : 0; } void TearDown() override {} @@ -49,12 +55,8 @@ class HardmaxTest : public KernelTest, int64_t axis_value; }; -INSTANTIATE_TEST_SUITE_P( - hardmax, HardmaxTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}, - dims_t{2, 6}, dims_t{1}), - testing::Values(-4, -3, -2, -1, 0, 1, 2, 3))); +INSTANTIATE_TEST_SUITE_P(hardmax, HardmaxTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(HardmaxTest, hardmax) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -96,6 +98,18 @@ TEST_P(HardmaxTest, hardmax) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(axis_value, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(axis_value, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_hardmax.json b/tests/kernels/test_hardmax.json new file mode 100644 index 0000000000..49fedb4158 --- /dev/null +++ b/tests/kernels/test_hardmax.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [2, 16], [1]], + "lhs_type":["dt_float32"], + "axis_value": [-4, -3, -2, -1, 0, 1, 2, 3] +} \ No newline at end of file diff --git a/tests/kernels/test_instance_normalization.cpp b/tests/kernels/test_instance_normalization.cpp index 792898beb2..14af095e7a 100644 --- a/tests/kernels/test_instance_normalization.cpp +++ b/tests/kernels/test_instance_normalization.cpp @@ -57,19 +57,22 @@ class InstanceNormalizationTest runtime_tensor b; }; -INSTANTIATE_TEST_SUITE_P(instance_normalization, InstanceNormalizationTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, - dims_t{1, 2, 4, 8}))); +INSTANTIATE_TEST_SUITE_P( + instance_normalization, InstanceNormalizationTest, + testing::Combine(testing::Values(dt_float32), + testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 2, 4, 8}, + dims_t{1, 3, + 16} /*, dims_t{24, 16, 16}*/))); +// todo when in_shape[0] is not 1,cos is about 0.96 TEST_P(InstanceNormalizationTest, instance_normalization) { auto l_ort = runtime_tensor_2_ort_tensor(input); auto scale_ort = runtime_tensor_2_ort_tensor(scale); auto b_ort = runtime_tensor_2_ort_tensor(b); + auto eps = 1e-4f; // expected - auto output_ort = - ortki_InstanceNormalization(l_ort, scale_ort, b_ort, 1e-4f); + auto output_ort = ortki_InstanceNormalization(l_ort, scale_ort, b_ort, eps); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -80,7 +83,7 @@ TEST_P(InstanceNormalizationTest, instance_normalization) { .expect("create tensor failed"); // actual - float epsilon_ptr[] = {1e-4f}; + float epsilon_ptr[] = {eps}; auto epsilon = hrt::create(nncase::dt_float32, {1}, {reinterpret_cast(epsilon_ptr), sizeof(epsilon_ptr)}, diff --git a/tests/kernels/test_layer_norm.cpp b/tests/kernels/test_layer_norm.cpp index 586a875a69..55d73cb483 100644 --- a/tests/kernels/test_layer_norm.cpp +++ b/tests/kernels/test_layer_norm.cpp @@ -22,16 +22,21 @@ #include #include +#define TEST_CASE_NAME "test_layer_norm" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class LayerNormTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, axis] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto axis = GetNumber("axis"); + auto typecode = GetDataType("lhs_type"); axis_value = axis > (int64_t)l_shape.size() - 1 ? (int64_t)l_shape.size() - 1 @@ -67,7 +72,7 @@ class LayerNormTest : public KernelTest, init_tensor(b); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -76,22 +81,18 @@ class LayerNormTest : public KernelTest, int64_t axis_value; }; -INSTANTIATE_TEST_SUITE_P( - layer_norm, LayerNormTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 2, 4, 8}, - dims_t{2, 2, 4, 4}, dims_t{1, 3, 16}, - dims_t{1, 16}, dims_t{16}), - testing::Values(-3, -2, -1, 0, 1, 2, 3))); +INSTANTIATE_TEST_SUITE_P(layer_norm, LayerNormTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(LayerNormTest, layer_norm) { auto l_ort = runtime_tensor_2_ort_tensor(input); auto scale_ort = runtime_tensor_2_ort_tensor(scale); auto b_ort = runtime_tensor_2_ort_tensor(b); + auto eps = 1e-05f; // expected - auto output_ort = ortki_LayerNormalization(l_ort, scale_ort, b_ort, - axis_value, 1e-05f, 1L); + auto output_ort = + ortki_LayerNormalization(l_ort, scale_ort, b_ort, axis_value, eps, 1L); size_t size = 0; void *ptr_ort = tensor_buffer(tensor_seq_get_value(output_ort, 0), &size); dims_t shape(tensor_rank(tensor_seq_get_value(output_ort, 0))); @@ -103,9 +104,10 @@ TEST_P(LayerNormTest, layer_norm) { .expect("create tensor failed"); // actual - auto output = kernels::stackvm::layer_norm(axis_value, 1e-05f, input.impl(), - scale.impl(), b.impl()) - .expect("layer_norm failed"); + auto output = + kernels::stackvm::layer_norm((int32_t)axis_value, eps, input.impl(), + scale.impl(), b.impl()) + .expect("layer_norm failed"); runtime_tensor actual(output.as().expect("as tensor failed")); bool result = is_same_tensor(expected, actual) || @@ -121,6 +123,18 @@ TEST_P(LayerNormTest, layer_norm) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(axis, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(axis, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_layer_norm.json b/tests/kernels/test_layer_norm.json new file mode 100644 index 0000000000..53b71d38cd --- /dev/null +++ b/tests/kernels/test_layer_norm.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 2, 4, 8], [2, 2, 4, 4], [1, 3, 16], [1, 16], [16]], + "axis":[0, 1, -1, 2, 3, -2, -3], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_leaky_relu.cpp b/tests/kernels/test_leaky_relu.cpp index b7456c388b..035263d2a5 100644 --- a/tests/kernels/test_leaky_relu.cpp +++ b/tests/kernels/test_leaky_relu.cpp @@ -22,47 +22,56 @@ #include #include +#define TEST_CASE_NAME "test_leaky_relu" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class LeakyReluTest - : public KernelTest, - public ::testing::TestWithParam> { +class LeakyReluTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + alpha = hrt::create(typecode, {1}, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(alpha); } void TearDown() override {} protected: runtime_tensor input; + runtime_tensor alpha; }; -INSTANTIATE_TEST_SUITE_P( - leaky_relu, LeakyReluTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{16, 16}, dims_t{16}, dims_t{1}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(leaky_relu, LeakyReluTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(LeakyReluTest, leaky_relu) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - float_t alpha_ptr[] = {0.6f}; - auto alpha = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(alpha_ptr), - sizeof(alpha_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output_ort = ortki_LeakyRelu(l_ort, 0.6f); + OrtKITensor *output_ort; + if (input.datatype() == dt_float16) { + output_ort = ortki_LeakyRelu(l_ort, tensor_to_array(alpha)[0]); + } else if (input.datatype() == dt_float32) { + output_ort = ortki_LeakyRelu(l_ort, tensor_to_array(alpha)[0]); + } else if (input.datatype() == dt_bfloat16) { + output_ort = + ortki_LeakyRelu(l_ort, tensor_to_array(alpha)[0]); + } else { + output_ort = ortki_LeakyRelu(l_ort, tensor_to_array(alpha)[0]); + } size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -92,6 +101,15 @@ TEST_P(LeakyReluTest, leaky_relu) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_leaky_relu.json b/tests/kernels/test_leaky_relu.json new file mode 100644 index 0000000000..2694fe1f34 --- /dev/null +++ b/tests/kernels/test_leaky_relu.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"] +} \ No newline at end of file diff --git a/tests/kernels/test_log_softmax.cpp b/tests/kernels/test_log_softmax.cpp index a07227da3e..c77c619263 100644 --- a/tests/kernels/test_log_softmax.cpp +++ b/tests/kernels/test_log_softmax.cpp @@ -26,12 +26,17 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; +#define TEST_CASE_NAME "test_softmax" + class LogSoftmaxTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, value] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto value = GetNumber("axis"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -48,7 +53,7 @@ class LogSoftmaxTest : public KernelTest, .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -56,12 +61,8 @@ class LogSoftmaxTest : public KernelTest, int64_t axis_value; }; -INSTANTIATE_TEST_SUITE_P( - log_softmax, LogSoftmaxTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}, dims_t{1, 3}, - dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}), - testing::Values(0, 1, 2, 3, -4, -3, -2, -1))); +INSTANTIATE_TEST_SUITE_P(log_softmax, LogSoftmaxTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(LogSoftmaxTest, log_softmax) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -97,6 +98,18 @@ TEST_P(LogSoftmaxTest, log_softmax) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(axis, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(axis, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_lrn.cpp b/tests/kernels/test_lrn.cpp index 514961b79e..e06cbe5080 100644 --- a/tests/kernels/test_lrn.cpp +++ b/tests/kernels/test_lrn.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_lrn" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class LrnTest - : public KernelTest, - public ::testing::TestWithParam> { +class LrnTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -39,22 +43,25 @@ class LrnTest init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; INSTANTIATE_TEST_SUITE_P(lrn, LrnTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, - dims_t{1, 3, 8, 8}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(LrnTest, lrn) { auto l_ort = runtime_tensor_2_ort_tensor(input); + auto alpha_value = 0.22f; + auto beta_value = 0.20f; + auto bias_value = 0.75f; + auto output_size_value = 3L; // expected - auto output_ort = ortki_LRN(l_ort, 0.22f, 0.20f, 0.75f, 3); + auto output_ort = ortki_LRN(l_ort, alpha_value, beta_value, bias_value, + output_size_value); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -65,32 +72,36 @@ TEST_P(LrnTest, lrn) { .expect("create tensor failed"); // actual - float_t alpha_ptr[] = {0.22f}; + float alpha_ptr[] = {alpha_value}; auto alpha = hrt::create(dt_float32, {1}, {reinterpret_cast(alpha_ptr), sizeof(alpha_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t beta_ptr[] = {0.20f}; + + float beta_ptr[] = {beta_value}; auto beta = hrt::create(dt_float32, {1}, {reinterpret_cast(beta_ptr), sizeof(beta_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t bias_ptr[] = {0.75f}; + + float bias_ptr[] = {bias_value}; auto bias = hrt::create(dt_float32, {1}, {reinterpret_cast(bias_ptr), sizeof(bias_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - int64_t size_ptr[] = {3l}; - auto size0 = + + int64_t size_ptr[] = {output_size_value}; + auto output_size = hrt::create(dt_int64, {1}, {reinterpret_cast(size_ptr), sizeof(size_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output = kernels::stackvm::lrn(input.impl(), alpha.impl(), beta.impl(), - bias.impl(), size0.impl()) + bias.impl(), output_size.impl()) .expect("lrn failed"); runtime_tensor actual(output.as().expect("as tensor failed")); @@ -109,6 +120,15 @@ TEST_P(LrnTest, lrn) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_lrn.json b/tests/kernels/test_lrn.json new file mode 100644 index 0000000000..e501b1feef --- /dev/null +++ b/tests/kernels/test_lrn.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 8, 8]], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_lstm.cpp b/tests/kernels/test_lstm.cpp index ce1aa5c451..d57f0d11c2 100644 --- a/tests/kernels/test_lstm.cpp +++ b/tests/kernels/test_lstm.cpp @@ -73,14 +73,18 @@ class LstmTest runtime_tensor r; }; -INSTANTIATE_TEST_SUITE_P(lstm, LstmTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 1, 2}), - testing::Values(dims_t{1, 1, 1}), - testing::Values(dims_t{1, 1, 1}), - testing::Values(dims_t{1, 8}), - testing::Values(dims_t{1, 4, 2}), - testing::Values(dims_t{1, 4, 1}))); +INSTANTIATE_TEST_SUITE_P( + lstm, LstmTest, + testing::Combine(testing::Values(dt_float32), + testing::Values(dims_t{1, 1, + 2} /*, + dims_t{1, 1, 3} , dims_t{1, 1, + 8}*/), + testing::Values(dims_t{1, 1, 1}), + testing::Values(dims_t{1, 1, 1}), + testing::Values(dims_t{1, 8}), + testing::Values(dims_t{1, 4, 2} /*,dims_t{1,8,2}*/), + testing::Values(dims_t{1, 4, 1} /*,dims_t{1,8,1}*/))); TEST_P(LstmTest, lstm) { auto x_ort = runtime_tensor_2_ort_tensor(x); @@ -99,16 +103,16 @@ TEST_P(LstmTest, lstm) { true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); auto seqLength_ort = runtime_tensor_2_ort_tensor(seqLength); - float_t p_ptr[] = {{}, {}, {}}; + float p_ptr[] = {{}, {}, {}}; auto p = hrt::create(dt_float32, {1, 3}, {reinterpret_cast(p_ptr), sizeof(p_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); auto p_ort = runtime_tensor_2_ort_tensor(p); - float_t alpha[] = {0.0f}; - float_t beta[] = {0.0f}; + float alpha[] = {0.0f}; + float beta[] = {0.0f}; const char *activations_ptr[] = {"Sigmoid", "Tanh", "Tanh"}; - float_t clip = std::numeric_limits::quiet_NaN(); + float clip = std::numeric_limits::quiet_NaN(); const char *direction = "forward"; auto output_ort = ortki_LSTM(x_ort, w_ort, r_ort, b_ort, seqLength_ort, initH_ort, @@ -162,7 +166,7 @@ TEST_P(LstmTest, lstm) { {reinterpret_cast(beta), sizeof(beta)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t f[] = {clip}; + float f[] = {clip}; auto clip_ptr = hrt::create(dt_float32, {1}, {reinterpret_cast(f), sizeof(f)}, true, host_runtime_tensor::pool_cpu_only) diff --git a/tests/kernels/test_matmul.cpp b/tests/kernels/test_matmul.cpp index d3eef1f514..56cb9ed892 100644 --- a/tests/kernels/test_matmul.cpp +++ b/tests/kernels/test_matmul.cpp @@ -26,12 +26,17 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; +#define TEST_CASE_NAME "test_matmul" + class MatMulTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, r_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); lhs = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -42,20 +47,15 @@ class MatMulTest : public KernelTest, init_tensor(rhs); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor lhs; runtime_tensor rhs; }; -INSTANTIATE_TEST_SUITE_P( - mat_mul, MatMulTest, - testing::Combine( - testing::Values(dt_int32, dt_int64, dt_float32, dt_float64, dt_int32, - dt_uint32, dt_uint64), - testing::Values(dims_t{1, 3}, dims_t{1, 3, 3}, dims_t{1, 2, 3, 3}), - testing::Values(dims_t{3, 1}, dims_t{1, 3, 3}, dims_t{1, 2, 3, 3}))); +INSTANTIATE_TEST_SUITE_P(mat_mul, MatMulTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(MatMulTest, mat_mul) { auto l_ort = runtime_tensor_2_ort_tensor(lhs); @@ -92,6 +92,18 @@ TEST_P(MatMulTest, mat_mul) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(lhs_shape, j) + FOR_LOOP(rhs_shape, k) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(rhs_shape, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_matmul.json b/tests/kernels/test_matmul.json new file mode 100644 index 0000000000..8c89dea255 --- /dev/null +++ b/tests/kernels/test_matmul.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[3], [1, 3], [3, 3], [1, 3, 3], [4, 2, 3, 3]], + "lhs_type":["dt_int32", "dt_int64", "dt_float32", "dt_float64", "dt_uint32", "dt_uint64"], + "rhs_shape":[[3], [3, 1], [3, 3], [1, 3, 3], [4, 2, 3, 3]] +} \ No newline at end of file diff --git a/tests/kernels/test_normal.cpp b/tests/kernels/test_normal.cpp index ed26f74b09..3103c364f9 100644 --- a/tests/kernels/test_normal.cpp +++ b/tests/kernels/test_normal.cpp @@ -22,52 +22,65 @@ #include #include +#define TEST_CASE_NAME "test_normal" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class NormalTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, r_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetAxesArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + auto value1 = GetFloatNumber("mean_value"); + auto value2 = GetFloatNumber("scale_value"); + auto value3 = GetFloatNumber("seed_value"); - float_t mean_ptr[] = {0.5f}; - float_t scale_ptr[] = {1.0f}; - float_t seed_ptr[] = {1.0f}; + mean_value = value1; + float_t mean_ptr[] = {mean_value}; mean = hrt::create( typecode, {1}, {reinterpret_cast(mean_ptr), sizeof(mean_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + + scale_value = value2; + float_t scale_ptr[] = {scale_value}; scale = hrt::create(typecode, {1}, {reinterpret_cast(scale_ptr), sizeof(scale_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + + seed_value = value3; + float_t seed_ptr[] = {seed_value}; seed = hrt::create( typecode, {1}, {reinterpret_cast(seed_ptr), sizeof(seed_ptr)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + shape_array = l_shape; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor mean; runtime_tensor scale; runtime_tensor seed; axes_t shape_array; + float mean_value; + float scale_value; + float seed_value; }; INSTANTIATE_TEST_SUITE_P(normal, NormalTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(axes_t{1, 3, 16, 16}), - testing::Values(dims_t{1, 3, 16, - 16}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(NormalTest, normal) { @@ -75,7 +88,8 @@ TEST_P(NormalTest, normal) { std::vector vec(shape_array.begin(), shape_array.end()); int64_t shape_ptr[4]; std::copy(vec.begin(), vec.end(), shape_ptr); - auto output_ort = ortki_RandomNormal(1, 0.5f, 1.0f, 1.0f, shape_ptr, 4); + auto output_ort = ortki_RandomNormal(1, mean_value, scale_value, seed_value, + shape_ptr, shape_array.size()); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -112,6 +126,24 @@ TEST_P(NormalTest, normal) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(mean_value, k) + FOR_LOOP(scale_value, l) + FOR_LOOP(seed_value, m) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(mean_value, k) + SPLIT_ELEMENT(scale_value, l) + SPLIT_ELEMENT(seed_value, m) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_normal.json b/tests/kernels/test_normal.json new file mode 100644 index 0000000000..ad45bb2530 --- /dev/null +++ b/tests/kernels/test_normal.json @@ -0,0 +1,7 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 2, 3, 4]], + "lhs_type":["dt_float32"], + "mean_value":[0.5, 0.1], + "scale_value":[1.0, 2.0], + "seed_value":[1.0, 2.0] +} \ No newline at end of file diff --git a/tests/kernels/test_normal_like.cpp b/tests/kernels/test_normal_like.cpp index a042e90837..2230c32df6 100644 --- a/tests/kernels/test_normal_like.cpp +++ b/tests/kernels/test_normal_like.cpp @@ -22,41 +22,76 @@ #include #include +#define TEST_CASE_NAME "test_normal_like" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class NormalLikeTest - : public KernelTest, - public ::testing::TestWithParam> { +class NormalLikeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + auto value1 = GetFloatNumber("mean_value"); + auto value2 = GetFloatNumber("scale_value"); + auto value3 = GetFloatNumber("seed_value"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + mean_value = value1; + float mean_ptr[] = {mean_value}; + mean = hrt::create( + typecode, {1}, + {reinterpret_cast(mean_ptr), sizeof(mean_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + scale_value = value2; + float scale_ptr[] = {scale_value}; + scale = hrt::create(typecode, {1}, + {reinterpret_cast(scale_ptr), + sizeof(scale_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + seed_value = value3; + float seed_ptr[] = {seed_value}; + seed = hrt::create( + typecode, {1}, + {reinterpret_cast(seed_ptr), sizeof(seed_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + runtime_tensor mean; + runtime_tensor scale; + runtime_tensor seed; + axes_t shape_array; + float mean_value; + float scale_value; + float seed_value; }; -INSTANTIATE_TEST_SUITE_P( - normal_like, NormalLikeTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 2, 4, 8}, - dims_t{1, 3, 16}, dims_t{1, 3}, dims_t{1}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(normal_like, NormalLikeTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(NormalLikeTest, normal_like) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - auto output_ort = ortki_RandomNormalLike(l_ort, 1, 0.5f, 1.0f, 1.0f); + auto output_ort = + ortki_RandomNormalLike(l_ort, 1, mean_value, scale_value, seed_value); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -67,24 +102,6 @@ TEST_P(NormalLikeTest, normal_like) { .expect("create tensor failed"); // actual - float_t mean_ptr[] = {0.5f}; - float_t scale_ptr[] = {1.0f}; - float_t seed_ptr[] = {1.0f}; - auto mean = - hrt::create(input.datatype(), {1}, - {reinterpret_cast(mean_ptr), sizeof(mean_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto scale = hrt::create(input.datatype(), {1}, - {reinterpret_cast(scale_ptr), - sizeof(scale_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto seed = - hrt::create(input.datatype(), {1}, - {reinterpret_cast(seed_ptr), sizeof(seed_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); auto output = kernels::stackvm::normal_like(dt_float32, input.impl(), mean.impl(), scale.impl(), seed.impl()) @@ -106,6 +123,24 @@ TEST_P(NormalLikeTest, normal_like) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(mean_value, k) + FOR_LOOP(scale_value, l) + FOR_LOOP(seed_value, m) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(mean_value, k) + SPLIT_ELEMENT(scale_value, l) + SPLIT_ELEMENT(seed_value, m) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_normal_like.json b/tests/kernels/test_normal_like.json new file mode 100644 index 0000000000..5dad86909e --- /dev/null +++ b/tests/kernels/test_normal_like.json @@ -0,0 +1,7 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 2, 4, 8], [1, 3, 16], [1, 3], [1], []], + "lhs_type":["dt_float32"], + "mean_value":[0.5, 0.3], + "scale_value":[1.0, 2.0], + "seed_value":[1.0, 2.0] +} \ No newline at end of file diff --git a/tests/kernels/test_one_hot.cpp b/tests/kernels/test_one_hot.cpp index e2618cd4eb..af1324da7b 100644 --- a/tests/kernels/test_one_hot.cpp +++ b/tests/kernels/test_one_hot.cpp @@ -26,14 +26,19 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class OneHotTest - : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { +#define TEST_CASE_NAME "test_one_hot" + +class OneHotTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[value_typecode, index_typecode, l_shape, values_shape, - axis_value] = GetParam(); + + READY_SUBCASE() + auto l_shape = GetShapeArray("lhs_shape"); + auto values_shape = GetShapeArray("rhs_shape"); + auto axis_value = GetNumber("axis"); + auto value_typecode = GetDataType("lhs_type"); + auto index_typecode = GetDataType("rhs_type"); int64_t a[] = {3, 2, 4, 0}; indices = hrt::create(index_typecode, l_shape, @@ -56,7 +61,7 @@ class OneHotTest axis = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor indices; @@ -65,11 +70,8 @@ class OneHotTest int32_t axis; }; -INSTANTIATE_TEST_SUITE_P( - OneHot, OneHotTest, - testing::Combine(testing::Values(dt_float16, dt_float32), - testing::Values(dt_int64), testing::Values(dims_t{4}), - testing::Values(dims_t{2}), testing::Values(0, 1))); +INSTANTIATE_TEST_SUITE_P(OneHot, OneHotTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(OneHotTest, OneHot) { auto indices_ort = runtime_tensor_2_ort_tensor(indices); @@ -115,6 +117,24 @@ TEST_P(OneHotTest, OneHot) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(rhs_shape, j) + FOR_LOOP(axis, k) + FOR_LOOP(lhs_type, l) + FOR_LOOP(rhs_type, m) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(rhs_shape, j) + SPLIT_ELEMENT(axis, k) + SPLIT_ELEMENT(lhs_type, l) + SPLIT_ELEMENT(rhs_type, m) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_one_hot.json b/tests/kernels/test_one_hot.json new file mode 100644 index 0000000000..e317d3151b --- /dev/null +++ b/tests/kernels/test_one_hot.json @@ -0,0 +1,7 @@ +{ + "lhs_shape":[[4]], + "rhs_shape":[[2]], + "axis":[0, 1], + "lhs_type":["dt_float32", "dt_float16"], + "rhs_type":["dt_int64"] +} \ No newline at end of file diff --git a/tests/kernels/test_pad.cpp b/tests/kernels/test_pad.cpp index 57d5ad32ad..c4f1ed7930 100644 --- a/tests/kernels/test_pad.cpp +++ b/tests/kernels/test_pad.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_pad" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class PadTest - : public KernelTest, - public ::testing::TestWithParam> { +class PadTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -42,19 +46,15 @@ class PadTest init_tensor(value); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; runtime_tensor value; }; -INSTANTIATE_TEST_SUITE_P( - Pad, PadTest, - testing::Combine( - testing::Values(dt_int32, dt_int8, dt_uint8, dt_float64, dt_uint64, - dt_int64, dt_float32, dt_float16, dt_uint32), - testing::Values(dims_t{1, 3, 24, 24}, dims_t{1, 3, 16, 16}))); +INSTANTIATE_TEST_SUITE_P(Pad, PadTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(PadTest, Pad) { @@ -100,6 +100,15 @@ TEST_P(PadTest, Pad) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_pad.json b/tests/kernels/test_pad.json new file mode 100644 index 0000000000..04e75eaadf --- /dev/null +++ b/tests/kernels/test_pad.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 24, 24], [1, 3, 16, 16]], + "lhs_type":["dt_int32", "dt_int8", "dt_uint8", "dt_float64", "dt_uint64", "dt_int64", "dt_float32", "dt_float16", "dt_uint32"] +} \ No newline at end of file diff --git a/tests/kernels/test_prelu.cpp b/tests/kernels/test_prelu.cpp index 9b59c0a3b9..b5d1df3ef4 100644 --- a/tests/kernels/test_prelu.cpp +++ b/tests/kernels/test_prelu.cpp @@ -22,44 +22,73 @@ #include #include +#define TEST_CASE_NAME "test_prelu" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; +using slope_t = itlib::small_vector; -class PreluTest - : public KernelTest, - public ::testing::TestWithParam> { +class PreluTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + auto slope_value = GetSlopeArray("slope"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + if (slope_value.size() == 1 || + slope_value.size() == l_shape[l_shape.size() - 1]) { + slope = slope_value; + } else { + slope = slope_t{0.1}; + } } void TearDown() override {} + slope_t GetSlopeArray(const char *key) { + assert(_document[key].IsArray()); + Value &array = _document[key]; + size_t arraySize = array.Size(); + slope_t cArray(arraySize); + for (rapidjson::SizeType i = 0; i < arraySize; i++) { + if (array[i].IsFloat()) { + cArray[i] = array[i].GetFloat(); + } else { + std::cout << "Invalid JSON format. Expected unsigned float " + "values in the array." + << std::endl; + } + } + return cArray; + } + protected: runtime_tensor input; + slope_t slope; }; -INSTANTIATE_TEST_SUITE_P( - Prelu, PreluTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{8, 8}, dims_t{1, 4, 16}, - dims_t{1, 3, 24, 24}))); +INSTANTIATE_TEST_SUITE_P(Prelu, PreluTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(PreluTest, Prelu) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - float_t slope_ptr[] = {0.2f}; - auto slope = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(slope_ptr), - sizeof(slope_ptr)}, + size_t slope_size = slope.size(); + float *slope_array = (float *)malloc(slope_size * sizeof(float)); + std::copy(slope.begin(), slope.end(), slope_array); + auto slope = hrt::create(dt_float32, {slope_size}, + {reinterpret_cast(slope_array), + slope_size * sizeof(float)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); auto slope_ort = runtime_tensor_2_ort_tensor(slope); @@ -90,36 +119,21 @@ TEST_P(PreluTest, Prelu) { // compare EXPECT_TRUE(result); - - // // expected - // float_t slope_ptr1[] = {0.2f, 0.2f}; - // auto slope1 = hrt::create(nncase::dt_float32, {2}, - // {reinterpret_cast(slope_ptr1), - // sizeof(slope_ptr1)}, - // true, host_runtime_tensor::pool_cpu_only) - // .expect("create tensor failed"); - // auto slope_ort1 = runtime_tensor_2_ort_tensor(slope1); - // auto output_ort = ortki_PRelu(l_ort, slope_ort1); - // size_t size = 0; - // void *ptr_ort = tensor_buffer(output_ort, &size); - // dims_t shape(tensor_rank(output_ort)); - // tensor_shape(output_ort, reinterpret_cast(shape.data())); - // auto expected = hrt::create(input.datatype(), shape, - // {reinterpret_cast(ptr_ort), - // size}, true, - // host_runtime_tensor::pool_cpu_only) - // .expect("create tensor failed"); - // - // // actual - // auto output = kernels::stackvm::prelu(input.impl(), slope1.impl()) - // .expect("prelu failed"); - // runtime_tensor actual(output.as().expect("as tensor failed")); - // - // // compare - // EXPECT_TRUE(is_same_tensor(expected, actual)); } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(slope, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(slope, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_prelu.json b/tests/kernels/test_prelu.json new file mode 100644 index 0000000000..b728e936d6 --- /dev/null +++ b/tests/kernels/test_prelu.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [1, 4, 16], [1], [1, 3, 24, 24]], + "lhs_type":["dt_float32"], + "slope":[[0.2], [0.1], [0.3], [0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3], [0.1, 0.2, 0.2, 0.4, 0.2, 0.2, 0.3, 0.8], [0.1, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.8, 0.2, 0.12, 0.2, 0.21], [0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2]] +} \ No newline at end of file diff --git a/tests/kernels/test_quantize.cpp b/tests/kernels/test_quantize.cpp index a067f9ba51..13939dad22 100644 --- a/tests/kernels/test_quantize.cpp +++ b/tests/kernels/test_quantize.cpp @@ -26,93 +26,166 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class QuantizeTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_quantize" + +class QuantizeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto input_typecode = GetDataType("input_type"); + auto quant_type = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + + input = hrt::create(input_typecode, l_shape, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(input); + + auto zero_point_value = 127; + auto scale_value = 0.01f; - float input_array[] = {1.0F, 1.2F, 1.4F, 1.5F, 1.6F, 1.8F, 1.9F, 2.0F}; - input = hrt::create(typecode, {2, 4}, - {reinterpret_cast(input_array), - sizeof(input_array)}, + if (quant_type == dt_uint8) { + uint8_t zero_point[] = {(uint8_t)zero_point_value}; + zero_point_ptr = + hrt::create(nncase::dt_uint8, {1}, + {reinterpret_cast(zero_point), + sizeof(zero_point)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else if (quant_type == dt_int8) { + int8_t zero_point[] = {(int8_t)zero_point_value}; + zero_point_ptr = + hrt::create(nncase::dt_int8, {1}, + {reinterpret_cast(zero_point), + sizeof(zero_point)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else { + int16_t zero_point[] = {(int16_t)zero_point_value}; + zero_point_ptr = + hrt::create(nncase::dt_int16, {1}, + {reinterpret_cast(zero_point), + sizeof(zero_point)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + } + + float scale[] = {scale_value}; + scale_ptr = + hrt::create(nncase::dt_float32, {1}, + {reinterpret_cast(scale), sizeof(scale)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + quant_param_t quantParam; + quantParam.zero_point = zero_point_value; + quantParam.scale = scale_value; + quant_param_t quant_param[] = {quantParam}; + quant_param_ptr = + hrt::create(dt_int64, {1}, + {reinterpret_cast(quant_param), + sizeof(quant_param)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + runtime_tensor scale_ptr; + runtime_tensor zero_point_ptr; + runtime_tensor quant_param_ptr; }; INSTANTIATE_TEST_SUITE_P(quantize, QuantizeTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, - 16}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(QuantizeTest, quantize) { auto l_ort = runtime_tensor_2_ort_tensor(input); - // expected - uint8_t zero_point[] = {127}; - auto zero_point_ptr = - hrt::create( - nncase::dt_uint8, {1}, - {reinterpret_cast(zero_point), sizeof(zero_point)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - float_t scale[] = {0.01f}; - auto scale_ptr = - hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(scale), sizeof(scale)}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output_ort = - ortki_QuantizeLinear(l_ort, runtime_tensor_2_ort_tensor(scale_ptr), - runtime_tensor_2_ort_tensor(zero_point_ptr), 0); - size_t size = 0; - void *ptr_ort = tensor_buffer(output_ort, &size); - dims_t shape(tensor_rank(output_ort)); - tensor_shape(output_ort, reinterpret_cast(shape.data())); - auto expected = hrt::create(dt_uint8, shape, - {reinterpret_cast(ptr_ort), size}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - // actual - quant_param_t quantParam; - quantParam.zero_point = 127; - quantParam.scale = 0.01f; - quant_param_t quant_param[] = {quantParam}; - auto quant_param_ptr = - hrt::create( - dt_int64, {1}, - {reinterpret_cast(quant_param), sizeof(quant_param)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output = kernels::stackvm::quantize(dt_uint8, input.impl(), - quant_param_ptr.impl()) - .expect("quantize failed"); - runtime_tensor actual(output.as().expect("as tensor failed")); - - bool result = is_same_tensor(expected, actual) || - cosine_similarity_tensor(expected, actual); - - if (!result) { - std::cout << "actual "; - print_runtime_tensor(actual); - std::cout << "expected "; - print_runtime_tensor(expected); - } + if (zero_point_ptr.datatype() != dt_int16) { - // compare - EXPECT_TRUE(result); + // expected + runtime_tensor expected; + auto output_ort = ortki_QuantizeLinear( + l_ort, runtime_tensor_2_ort_tensor(scale_ptr), + runtime_tensor_2_ort_tensor(zero_point_ptr), 0); + size_t size = 0; + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + expected = hrt::create(zero_point_ptr.datatype(), shape, + {reinterpret_cast(ptr_ort), size}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // actual + auto output = + kernels::stackvm::quantize(zero_point_ptr.datatype(), input.impl(), + quant_param_ptr.impl()) + .expect("quantize failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); + + } else { + + // expected + runtime_tensor expected; + expected = hrt::create(dt_int16, input.shape(), + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + quantize_to_int16(expected, input, 127, 0.01f); + + // actual + auto output = + kernels::stackvm::quantize(zero_point_ptr.datatype(), input.impl(), + quant_param_ptr.impl()) + .expect("quantize failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); + } } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(lhs_shape, j) + FOR_LOOP(input_type, k) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(input_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_quantize.json b/tests/kernels/test_quantize.json new file mode 100644 index 0000000000..3ca742485e --- /dev/null +++ b/tests/kernels/test_quantize.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [3, 16], [16, 16], [1], []], + "lhs_type":["dt_int8", "dt_int16", "dt_uint8"], + "input_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_range.cpp b/tests/kernels/test_range.cpp index d481707270..b9574a0c05 100644 --- a/tests/kernels/test_range.cpp +++ b/tests/kernels/test_range.cpp @@ -22,32 +22,39 @@ #include #include +#define TEST_CASE_NAME "test_range" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class RangeTest - : public KernelTest, - public ::testing::TestWithParam> { +class RangeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, shape] = GetParam(); + READY_SUBCASE() + + auto shape = GetShapeArray("lhs_shape"); + auto begin_value = GetFloatNumber("begin"); + auto end_value = GetFloatNumber("end"); + auto step_value = GetFloatNumber("step"); + auto typecode = GetDataType("lhs_type"); - float_t begin_array[] = {0.0f}; + float_t begin_array[] = {begin_value}; begin = hrt::create(typecode, shape, {reinterpret_cast(begin_array), sizeof(begin_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t end_array[] = {100.0f}; + float_t end_array[] = {end_value}; end = hrt::create( typecode, shape, {reinterpret_cast(end_array), sizeof(end_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t step_array[] = {100.0f}; + float_t step_array[] = {step_value}; step = hrt::create(typecode, shape, {reinterpret_cast(step_array), sizeof(step_array)}, @@ -55,7 +62,7 @@ class RangeTest .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor begin; @@ -64,8 +71,7 @@ class RangeTest }; INSTANTIATE_TEST_SUITE_P(Range, RangeTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(RangeTest, Range) { auto begin_ort = runtime_tensor_2_ort_tensor(begin); @@ -103,6 +109,24 @@ TEST_P(RangeTest, Range) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(begin, j) + FOR_LOOP(end, k) + FOR_LOOP(step, l) + FOR_LOOP(lhs_type, m) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(begin, j) + SPLIT_ELEMENT(end, k) + SPLIT_ELEMENT(step, l) + SPLIT_ELEMENT(lhs_type, m) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_range.json b/tests/kernels/test_range.json new file mode 100644 index 0000000000..b96c82eb68 --- /dev/null +++ b/tests/kernels/test_range.json @@ -0,0 +1,7 @@ +{ + "lhs_shape":[[1]], + "begin":[0.0, 10.0, 20.0], + "end":[100.0, 90.0, 80.0], + "step":[1.0, 10.0, 5.0], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_rank.cpp b/tests/kernels/test_rank.cpp index 2b4d2bc46e..22bfa5d9df 100644 --- a/tests/kernels/test_rank.cpp +++ b/tests/kernels/test_rank.cpp @@ -27,12 +27,16 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class RankTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_rank" + +class RankTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode_input, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode_input = GetDataType("lhs_type"); input = hrt::create(typecode_input, l_shape, host_runtime_tensor::pool_cpu_only) @@ -47,19 +51,15 @@ class RankTest .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; runtime_tensor expected; }; -INSTANTIATE_TEST_SUITE_P( - rank, RankTest, - testing::Combine(testing::Values(dt_int16, dt_int8, dt_float32, dt_uint8), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3, 8, 8}, - dims_t{1, 3, 1}, dims_t{1, 3, 16}, - dims_t{1, 3}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(rank, RankTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(RankTest, rank) { // actual @@ -92,6 +92,15 @@ TEST_P(RankTest, rank) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, m) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, m) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_rank.json b/tests/kernels/test_rank.json new file mode 100644 index 0000000000..cc74c3c634 --- /dev/null +++ b/tests/kernels/test_rank.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 8, 8], [1, 3, 1], [1, 3, 16], [1, 3], [1], []], + "lhs_type":["dt_float32", "dt_int8", "dt_int32", "dt_uint8", "dt_int16", "dt_uint16", "dt_uint32", "dt_uint64", "dt_int64", "dt_float16", "dt_float64", "dt_bfloat16", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_reduce.json b/tests/kernels/test_reduce.json new file mode 100644 index 0000000000..760e75445e --- /dev/null +++ b/tests/kernels/test_reduce.json @@ -0,0 +1,8 @@ +{ + "lhs_type":["dt_float32"], + "rhs_type":["dt_int64"], + "lhs_shape":[[1, 3, 16, 16]], + "rhs_shape":[[1]], + "bool_value": [0,1], + "axis_value":[[0], [-1], [-2], [-3], [1], [2], [3], [2, 3], [-2, -1], [1, 2, 3], [-1, -2, -3], [0, 1, 2, 3], [-1, -2, -3, -4]] +} \ No newline at end of file diff --git a/tests/kernels/test_reduce_arg.json b/tests/kernels/test_reduce_arg.json new file mode 100644 index 0000000000..c0ede8cda5 --- /dev/null +++ b/tests/kernels/test_reduce_arg.json @@ -0,0 +1,9 @@ +{ + "lhs_type":["dt_float32","dt_float64","dt_int32"], + "rhs_type":["dt_int64"], + "lhs_shape":[[1, 3, 16, 16], [1, 2, 3, 4], [1, 3, 16], [3, 16], [16]], + "rhs_shape":[[1]], + "bool1_value": [0,1], + "bool2_value": [0,1], + "axis_value":[-1, 0, 1, 2, 3, -2, -3, -4] +} \ No newline at end of file diff --git a/tests/kernels/test_reduce_arg_max.cpp b/tests/kernels/test_reduce_arg_max.cpp index 6b4beec7b7..f5a546fca1 100644 --- a/tests/kernels/test_reduce_arg_max.cpp +++ b/tests/kernels/test_reduce_arg_max.cpp @@ -26,19 +26,29 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; +#define TEST_CASE_NAME "test_reduce_arg" + class ReduceArgMaxTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, l_shape, r_shape, value1, value2, - value3] = GetParam(); + + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + auto value1 = GetNumber("axis_value"); + auto value2 = GetNumber("bool1_value"); + auto value3 = GetNumber("bool2_value"); a = hrt::create(typecode1, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(a); - axis_value = value1; + axis_value = value1 > 0 ? value1 >= (int64_t)l_shape.size() ? 0 : value1 + : -value1 > (int64_t)l_shape.size() ? 0 + : value1; int64_t axis_array[] = {axis_value}; axis = hrt::create(typecode2, r_shape, {reinterpret_cast(axis_array), @@ -62,7 +72,7 @@ class ReduceArgMaxTest : public KernelTest, .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor a; @@ -74,12 +84,8 @@ class ReduceArgMaxTest : public KernelTest, int64_t select_last_idx_value; }; -INSTANTIATE_TEST_SUITE_P( - ReduceArgMax, ReduceArgMaxTest, - testing::Combine(testing::Values(dt_float32), testing::Values(dt_int64), - testing::Values(dims_t{1, 3, 16, 16}), - testing::Values(dims_t{1}), testing::Values(-1, 0), - testing::Values(1, 0), testing::Values(1, 0))); +INSTANTIATE_TEST_SUITE_P(ReduceArgMax, ReduceArgMaxTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceArgMaxTest, ReduceArgMax) { @@ -90,7 +96,7 @@ TEST_P(ReduceArgMaxTest, ReduceArgMax) { void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); - auto expected = hrt::create(dt_float64, shape, + auto expected = hrt::create(dt_int64, shape, {reinterpret_cast(ptr_ort), size}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -107,6 +113,8 @@ TEST_P(ReduceArgMaxTest, ReduceArgMax) { cosine_similarity_tensor(expected, actual); if (!result) { + std::cout << "input "; + print_runtime_tensor(a); std::cout << "actual "; print_runtime_tensor(actual); std::cout << "expected "; @@ -118,6 +126,30 @@ TEST_P(ReduceArgMaxTest, ReduceArgMax) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(lhs_shape, k) + FOR_LOOP(rhs_shape, l) + FOR_LOOP(bool1_value, m) + FOR_LOOP(bool2_value, n) + FOR_LOOP(axis_value, o) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(lhs_shape, k) + SPLIT_ELEMENT(rhs_shape, l) + SPLIT_ELEMENT(bool1_value, m) + SPLIT_ELEMENT(bool2_value, n) + SPLIT_ELEMENT(axis_value, o) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reduce_arg_min.cpp b/tests/kernels/test_reduce_arg_min.cpp index 6f36dfcb4c..65e6d33cb7 100644 --- a/tests/kernels/test_reduce_arg_min.cpp +++ b/tests/kernels/test_reduce_arg_min.cpp @@ -22,23 +22,33 @@ #include #include +#define TEST_CASE_NAME "test_reduce_arg" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; class ReduceArgMinTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, l_shape, r_shape, value1, value2, - value3] = GetParam(); + + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + auto value1 = GetNumber("axis_value"); + auto value2 = GetNumber("bool1_value"); + auto value3 = GetNumber("bool2_value"); a = hrt::create(typecode1, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(a); - axis_value = value1; + axis_value = value1 > 0 ? value1 >= (int64_t)l_shape.size() ? 0 : value1 + : -value1 > (int64_t)l_shape.size() ? 0 + : value1; int64_t axis_array[] = {axis_value}; axis = hrt::create(typecode2, r_shape, {reinterpret_cast(axis_array), @@ -62,7 +72,7 @@ class ReduceArgMinTest : public KernelTest, .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor a; @@ -74,12 +84,8 @@ class ReduceArgMinTest : public KernelTest, int64_t select_last_idx_value; }; -INSTANTIATE_TEST_SUITE_P( - ReduceArgMin, ReduceArgMinTest, - testing::Combine(testing::Values(dt_float32), testing::Values(dt_int64), - testing::Values(dims_t{1, 3, 16, 16}), - testing::Values(dims_t{1}), testing::Values(-1, 0), - testing::Values(1, 0), testing::Values(1, 0))); +INSTANTIATE_TEST_SUITE_P(ReduceArgMin, ReduceArgMinTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceArgMinTest, ReduceArgMin) { @@ -90,7 +96,7 @@ TEST_P(ReduceArgMinTest, ReduceArgMin) { void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); - auto expected = hrt::create(dt_float64, shape, + auto expected = hrt::create(dt_int64, shape, {reinterpret_cast(ptr_ort), size}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -107,6 +113,8 @@ TEST_P(ReduceArgMinTest, ReduceArgMin) { cosine_similarity_tensor(expected, actual); if (!result) { + std::cout << "input "; + print_runtime_tensor(a); std::cout << "actual "; print_runtime_tensor(actual); std::cout << "expected "; @@ -118,6 +126,30 @@ TEST_P(ReduceArgMinTest, ReduceArgMin) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(lhs_shape, k) + FOR_LOOP(rhs_shape, l) + FOR_LOOP(bool1_value, m) + FOR_LOOP(bool2_value, n) + FOR_LOOP(axis_value, o) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(lhs_shape, k) + SPLIT_ELEMENT(rhs_shape, l) + SPLIT_ELEMENT(bool1_value, m) + SPLIT_ELEMENT(bool2_value, n) + SPLIT_ELEMENT(axis_value, o) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reduce_max.cpp b/tests/kernels/test_reduce_max.cpp index b85365f768..dca0a52b1d 100644 --- a/tests/kernels/test_reduce_max.cpp +++ b/tests/kernels/test_reduce_max.cpp @@ -26,14 +26,21 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReduceMaxTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reduce" + +class ReduceMaxTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, l_shape, r_shape, value, axis_arry] = - GetParam(); + + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + auto value = GetNumber("bool_value"); + auto axis_value = GetAxesArray("axis_value"); a = hrt::create(typecode1, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -55,37 +62,28 @@ class ReduceMaxTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - axis_arry1 = axis_arry; + axis_value_array = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor a; - axes_t axis_arry1; + axes_t axis_value_array; int64_t keepDims_value; runtime_tensor keepDims; runtime_tensor init_value; }; -INSTANTIATE_TEST_SUITE_P( - ReduceMax, ReduceMaxTest, - testing::Combine(testing::Values(dt_float32), testing::Values(dt_int64), - testing::Values(dims_t{1, 3, 16, 16}), - testing::Values(dims_t{1}), testing::Values(0, 1), - testing::Values(axes_t{0}, axes_t{-1}, axes_t{-2}, - axes_t{-3}, axes_t{1}, axes_t{2}, - axes_t{3}, axes_t{2, 3}, axes_t{-2, -1}, - axes_t{1, 2, 3}, axes_t{-1, -2, -3}, - axes_t{0, 1, 2, 3}, - axes_t{-1, -2, -3, -4}))); +INSTANTIATE_TEST_SUITE_P(ReduceMax, ReduceMaxTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceMaxTest, ReduceMax) { - size_t axis_size = axis_arry1.size(); + + size_t axis_size = axis_value_array.size(); if (axis_size <= a.shape().size()) { int64_t *axis_array = (int64_t *)malloc(axis_size * sizeof(int64_t)); - size_t size = 0; - std::copy(axis_arry1.begin(), axis_arry1.end(), axis_array); + std::copy(axis_value_array.begin(), axis_value_array.end(), axis_array); auto axis = hrt::create(dt_int64, {axis_size}, {reinterpret_cast(axis_array), axis_size * sizeof(int64_t)}, @@ -94,6 +92,9 @@ TEST_P(ReduceMaxTest, ReduceMax) { auto output_ort = ortki_ReduceMax(runtime_tensor_2_ort_tensor(a), axis_array, axis_size, keepDims_value); + + // expected + size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); @@ -127,6 +128,27 @@ TEST_P(ReduceMaxTest, ReduceMax) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(lhs_shape, k) + FOR_LOOP(rhs_shape, l) + FOR_LOOP(bool_value, m) + FOR_LOOP(axis_value, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(lhs_shape, k) + SPLIT_ELEMENT(rhs_shape, l) + SPLIT_ELEMENT(bool_value, m) + SPLIT_ELEMENT(axis_value, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reduce_mean.cpp b/tests/kernels/test_reduce_mean.cpp index abce548339..be40249440 100644 --- a/tests/kernels/test_reduce_mean.cpp +++ b/tests/kernels/test_reduce_mean.cpp @@ -26,14 +26,21 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReduceMeanTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reduce" + +class ReduceMeanTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, l_shape, r_shape, value, axis_arry] = - GetParam(); + + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + auto value = GetNumber("bool_value"); + auto axis_value = GetAxesArray("axis_value"); a = hrt::create(typecode1, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -47,7 +54,7 @@ class ReduceMeanTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float init_value_array[] = {0}; // the mean of input's range + float init_value_array[] = {0}; // the min of input's range init_value = hrt::create(typecode1, r_shape, {reinterpret_cast(init_value_array), @@ -55,37 +62,28 @@ class ReduceMeanTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - axis_arry1 = axis_arry; + axis_value_array = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor a; - axes_t axis_arry1; + axes_t axis_value_array; int64_t keepDims_value; runtime_tensor keepDims; runtime_tensor init_value; }; -INSTANTIATE_TEST_SUITE_P( - ReduceMean, ReduceMeanTest, - testing::Combine(testing::Values(dt_float32), testing::Values(dt_int64), - testing::Values(dims_t{1, 2, 3, 4}), - testing::Values(dims_t{1}), testing::Values(0, 1), - testing::Values(axes_t{0}, axes_t{-1}, axes_t{-2}, - axes_t{-3}, axes_t{1}, axes_t{2}, - axes_t{3}, axes_t{2, 3}, axes_t{-2, -1}, - axes_t{1, 2, 3}, axes_t{-1, -2, -3}, - axes_t{0, 1, 2, 3}, - axes_t{-1, -2, -3, -4}))); +INSTANTIATE_TEST_SUITE_P(ReduceMean, ReduceMeanTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceMeanTest, ReduceMean) { - size_t axis_size = axis_arry1.size(); + + size_t axis_size = axis_value_array.size(); if (axis_size <= a.shape().size()) { int64_t *axis_array = (int64_t *)malloc(axis_size * sizeof(int64_t)); - size_t size = 0; - std::copy(axis_arry1.begin(), axis_arry1.end(), axis_array); + std::copy(axis_value_array.begin(), axis_value_array.end(), axis_array); auto axis = hrt::create(dt_int64, {axis_size}, {reinterpret_cast(axis_array), axis_size * sizeof(int64_t)}, @@ -94,6 +92,9 @@ TEST_P(ReduceMeanTest, ReduceMean) { auto output_ort = ortki_ReduceMean(runtime_tensor_2_ort_tensor(a), axis_array, axis_size, keepDims_value); + + // expected + size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); @@ -114,8 +115,6 @@ TEST_P(ReduceMeanTest, ReduceMean) { cosine_similarity_tensor(expected, actual); if (!result) { - std::cout << "input tensor:"; - print_runtime_tensor(a); std::cout << "actual "; print_runtime_tensor(actual); std::cout << "expected "; @@ -129,6 +128,27 @@ TEST_P(ReduceMeanTest, ReduceMean) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(lhs_shape, k) + FOR_LOOP(rhs_shape, l) + FOR_LOOP(bool_value, m) + FOR_LOOP(axis_value, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(lhs_shape, k) + SPLIT_ELEMENT(rhs_shape, l) + SPLIT_ELEMENT(bool_value, m) + SPLIT_ELEMENT(axis_value, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reduce_min.cpp b/tests/kernels/test_reduce_min.cpp index 258d47b35a..7ba7f1f1db 100644 --- a/tests/kernels/test_reduce_min.cpp +++ b/tests/kernels/test_reduce_min.cpp @@ -26,14 +26,21 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReduceMinTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reduce" + +class ReduceMinTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, l_shape, r_shape, value, axis_arry] = - GetParam(); + + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + auto value = GetNumber("bool_value"); + auto axis_value = GetAxesArray("axis_value"); a = hrt::create(typecode1, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -55,37 +62,28 @@ class ReduceMinTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - axis_arry1 = axis_arry; + axis_value_array = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor a; - axes_t axis_arry1; + axes_t axis_value_array; int64_t keepDims_value; runtime_tensor keepDims; runtime_tensor init_value; }; -INSTANTIATE_TEST_SUITE_P( - ReduceMin, ReduceMinTest, - testing::Combine(testing::Values(dt_float32), testing::Values(dt_int64), - testing::Values(dims_t{1, 3, 16, 16}), - testing::Values(dims_t{1}), testing::Values(0, 1), - testing::Values(axes_t{0}, axes_t{-1}, axes_t{-2}, - axes_t{-3}, axes_t{1}, axes_t{2}, - axes_t{3}, axes_t{2, 3}, axes_t{-2, -1}, - axes_t{1, 2, 3}, axes_t{-1, -2, -3}, - axes_t{0, 1, 2, 3}, - axes_t{-1, -2, -3, -4}))); +INSTANTIATE_TEST_SUITE_P(ReduceMin, ReduceMinTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceMinTest, ReduceMin) { - size_t axis_size = axis_arry1.size(); + + size_t axis_size = axis_value_array.size(); if (axis_size <= a.shape().size()) { int64_t *axis_array = (int64_t *)malloc(axis_size * sizeof(int64_t)); - size_t size = 0; - std::copy(axis_arry1.begin(), axis_arry1.end(), axis_array); + std::copy(axis_value_array.begin(), axis_value_array.end(), axis_array); auto axis = hrt::create(dt_int64, {axis_size}, {reinterpret_cast(axis_array), axis_size * sizeof(int64_t)}, @@ -94,6 +92,9 @@ TEST_P(ReduceMinTest, ReduceMin) { auto output_ort = ortki_ReduceMax(runtime_tensor_2_ort_tensor(a), axis_array, axis_size, keepDims_value); + + // expected + size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); @@ -127,6 +128,27 @@ TEST_P(ReduceMinTest, ReduceMin) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(lhs_shape, k) + FOR_LOOP(rhs_shape, l) + FOR_LOOP(bool_value, m) + FOR_LOOP(axis_value, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(lhs_shape, k) + SPLIT_ELEMENT(rhs_shape, l) + SPLIT_ELEMENT(bool_value, m) + SPLIT_ELEMENT(axis_value, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reduce_prod.cpp b/tests/kernels/test_reduce_prod.cpp index 43d3d4a934..1ffa0579a2 100644 --- a/tests/kernels/test_reduce_prod.cpp +++ b/tests/kernels/test_reduce_prod.cpp @@ -26,14 +26,21 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReduceProdTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reduce" + +class ReduceProdTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, l_shape, r_shape, value, axis_arry] = - GetParam(); + + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + auto value = GetNumber("bool_value"); + auto axis_value = GetAxesArray("axis_value"); a = hrt::create(typecode1, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -47,7 +54,7 @@ class ReduceProdTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float init_value_array[] = {0}; // the min of input's range + float init_value_array[] = {-1}; // the min of input's range init_value = hrt::create(typecode1, r_shape, {reinterpret_cast(init_value_array), @@ -55,37 +62,28 @@ class ReduceProdTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - axis_arry1 = axis_arry; + axis_value_array = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor a; - axes_t axis_arry1; + axes_t axis_value_array; int64_t keepDims_value; runtime_tensor keepDims; runtime_tensor init_value; }; -INSTANTIATE_TEST_SUITE_P( - ReduceProd, ReduceProdTest, - testing::Combine(testing::Values(dt_float32), testing::Values(dt_int64), - testing::Values(dims_t{1, 3, 16, 16}), - testing::Values(dims_t{1}), testing::Values(0, 1), - testing::Values(axes_t{0}, axes_t{-1}, axes_t{-2}, - axes_t{-3}, axes_t{1}, axes_t{2}, - axes_t{3}, axes_t{2, 3}, axes_t{-2, -1}, - axes_t{1, 2, 3}, axes_t{-1, -2, -3}, - axes_t{0, 1, 2, 3}, - axes_t{-1, -2, -3, -4}))); +INSTANTIATE_TEST_SUITE_P(ReduceMin, ReduceProdTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceProdTest, ReduceProd) { - size_t axis_size = axis_arry1.size(); + + size_t axis_size = axis_value_array.size(); if (axis_size <= a.shape().size()) { int64_t *axis_array = (int64_t *)malloc(axis_size * sizeof(int64_t)); - size_t size = 0; - std::copy(axis_arry1.begin(), axis_arry1.end(), axis_array); + std::copy(axis_value_array.begin(), axis_value_array.end(), axis_array); auto axis = hrt::create(dt_int64, {axis_size}, {reinterpret_cast(axis_array), axis_size * sizeof(int64_t)}, @@ -94,6 +92,9 @@ TEST_P(ReduceProdTest, ReduceProd) { auto output_ort = ortki_ReduceProd(runtime_tensor_2_ort_tensor(a), axis_array, axis_size, keepDims_value); + + // expected + size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); @@ -127,6 +128,27 @@ TEST_P(ReduceProdTest, ReduceProd) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(lhs_shape, k) + FOR_LOOP(rhs_shape, l) + FOR_LOOP(bool_value, m) + FOR_LOOP(axis_value, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(lhs_shape, k) + SPLIT_ELEMENT(rhs_shape, l) + SPLIT_ELEMENT(bool_value, m) + SPLIT_ELEMENT(axis_value, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reduce_sum.cpp b/tests/kernels/test_reduce_sum.cpp index 13b9f148bf..a2539b714e 100644 --- a/tests/kernels/test_reduce_sum.cpp +++ b/tests/kernels/test_reduce_sum.cpp @@ -26,14 +26,21 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReduceSumTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reduce" + +class ReduceSumTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, l_shape, r_shape, value, axis_arry] = - GetParam(); + + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto r_shape = GetShapeArray("rhs_shape"); + auto value = GetNumber("bool_value"); + auto axis_value = GetAxesArray("axis_value"); a = hrt::create(typecode1, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -47,45 +54,36 @@ class ReduceSumTest true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - int64_t init_value_array[] = {0}; // the sum of input's range + float init_value_array[] = {0}; // the min of input's range init_value = - hrt::create(typecode2, r_shape, + hrt::create(typecode1, r_shape, {reinterpret_cast(init_value_array), sizeof(init_value_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - axis_arry1 = axis_arry; + axis_value_array = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor a; - axes_t axis_arry1; + axes_t axis_value_array; int64_t keepDims_value; runtime_tensor keepDims; runtime_tensor init_value; }; -INSTANTIATE_TEST_SUITE_P( - ReduceSum, ReduceSumTest, - testing::Combine(testing::Values(dt_float32), testing::Values(dt_int64), - testing::Values(dims_t{1, 2, 3, 4}), - testing::Values(dims_t{1}), testing::Values(0, 1), - testing::Values(axes_t{0}, axes_t{-1}, axes_t{-2}, - axes_t{-3}, axes_t{1}, axes_t{2}, - axes_t{3}, axes_t{2, 3}, axes_t{-2, -1}, - axes_t{1, 2, 3}, axes_t{-1, -2, -3}, - axes_t{0, 1, 2, 3}, - axes_t{-1, -2, -3, -4}))); +INSTANTIATE_TEST_SUITE_P(ReduceMin, ReduceSumTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceSumTest, ReduceSum) { - size_t axis_size = axis_arry1.size(); + + size_t axis_size = axis_value_array.size(); if (axis_size <= a.shape().size()) { int64_t *axis_array = (int64_t *)malloc(axis_size * sizeof(int64_t)); - size_t size = 0; - std::copy(axis_arry1.begin(), axis_arry1.end(), axis_array); + std::copy(axis_value_array.begin(), axis_value_array.end(), axis_array); auto axis = hrt::create(dt_int64, {axis_size}, {reinterpret_cast(axis_array), axis_size * sizeof(int64_t)}, @@ -94,6 +92,9 @@ TEST_P(ReduceSumTest, ReduceSum) { auto output_ort = ortki_ReduceSum(runtime_tensor_2_ort_tensor(a), runtime_tensor_2_ort_tensor(axis), keepDims_value, 0); + + // expected + size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); @@ -114,8 +115,6 @@ TEST_P(ReduceSumTest, ReduceSum) { cosine_similarity_tensor(expected, actual); if (!result) { - std::cout << "input tensor:"; - print_runtime_tensor(a); std::cout << "actual "; print_runtime_tensor(actual); std::cout << "expected "; @@ -129,6 +128,27 @@ TEST_P(ReduceSumTest, ReduceSum) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(lhs_shape, k) + FOR_LOOP(rhs_shape, l) + FOR_LOOP(bool_value, m) + FOR_LOOP(axis_value, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(lhs_shape, k) + SPLIT_ELEMENT(rhs_shape, l) + SPLIT_ELEMENT(bool_value, m) + SPLIT_ELEMENT(axis_value, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reduce_window2D.json b/tests/kernels/test_reduce_window2D.json new file mode 100644 index 0000000000..5b4773fe43 --- /dev/null +++ b/tests/kernels/test_reduce_window2D.json @@ -0,0 +1,8 @@ +{ + "lhs_type":["dt_float32"], + "lhs_shape":[[1, 3, 16, 16]], + "dilations":[[1, 1]], + "filter": [[3, 3]], + "stride": [[1, 1]], + "onnxPads":[[1, 1, 1, 1], [0, 0, 0, 0]] +} \ No newline at end of file diff --git a/tests/kernels/test_reduce_window_2d.cpp b/tests/kernels/test_reduce_window_2d.cpp index e02b8bff43..950ee59530 100644 --- a/tests/kernels/test_reduce_window_2d.cpp +++ b/tests/kernels/test_reduce_window_2d.cpp @@ -26,38 +26,63 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReduceWindow2DTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reduce_window2D" + +class ReduceWindow2DTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); + auto value1 = GetAxesArray("dilations"); + auto value2 = GetAxesArray("filter"); + auto value3 = GetAxesArray("stride"); + auto value4 = GetAxesArray("onnxPads"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + dilations_value = value1; + filter_value = value2; + stride_value = value3; + onnxPads_value = value4; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + axes_t dilations_value; + axes_t filter_value; + axes_t stride_value; + axes_t onnxPads_value; }; INSTANTIATE_TEST_SUITE_P(ReduceWindow2D, ReduceWindow2DTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, - 16}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReduceWindow2DTest, ReduceWindow2D) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - int64_t dilations[] = {1, 1}; - int64_t filter[] = {3, 3}; - int64_t stride[] = {1, 1}; - int64_t onnxPads[] = {1, 1, 1, 1}; + size_t dilations_size = dilations_value.size(); + int64_t *dilations = (int64_t *)malloc(dilations_size * sizeof(int64_t)); + std::copy(dilations_value.begin(), dilations_value.end(), dilations); + + size_t filter_size = filter_value.size(); + int64_t *filter = (int64_t *)malloc(filter_size * sizeof(int64_t)); + std::copy(filter_value.begin(), filter_value.end(), filter); + + size_t stride_size = stride_value.size(); + int64_t *stride = (int64_t *)malloc(stride_size * sizeof(int64_t)); + std::copy(stride_value.begin(), stride_value.end(), stride); + + size_t onnxPads_size = onnxPads_value.size(); + int64_t *onnxPads = (int64_t *)malloc(onnxPads_size * sizeof(int64_t)); + std::copy(onnxPads_value.begin(), onnxPads_value.end(), onnxPads); auto output_ort = ortki_MaxPool(l_ort, "NOTSET", 0, dilations, 2, filter, 2, onnxPads, 4, 0, stride, 2); size_t size = 0; @@ -72,33 +97,38 @@ TEST_P(ReduceWindow2DTest, ReduceWindow2D) { // actual auto dilations_tensor = - hrt::create( - dt_int64, {2}, - {reinterpret_cast(dilations), sizeof(dilations)}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto filter_tensor = - hrt::create(dt_int64, {2}, - {reinterpret_cast(filter), sizeof(filter)}, + hrt::create(dt_int64, {dilations_size}, + {reinterpret_cast(dilations), + dilations_size * sizeof(int64_t)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - auto stride_tensor = - hrt::create(dt_int64, {2}, - {reinterpret_cast(stride), sizeof(stride)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto onnxPads_tensor = - hrt::create(dt_int64, {4}, - {reinterpret_cast(onnxPads), sizeof(onnxPads)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - float_t init_value[] = {0.0f}; + + auto filter_tensor = hrt::create(dt_int64, {filter_size}, + {reinterpret_cast(filter), + filter_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto stride_tensor = hrt::create(dt_int64, {stride_size}, + {reinterpret_cast(stride), + stride_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto onnxPads_tensor = hrt::create(dt_int64, {onnxPads_size}, + {reinterpret_cast(onnxPads), + onnxPads_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float init_value[] = {0.0f}; auto init_value_tensor = hrt::create( dt_float32, {1}, {reinterpret_cast(init_value), sizeof(init_value)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + bool ceil_mode_value[] = {false}; auto ceil_mode_value_tensor = hrt::create(dt_boolean, {1}, @@ -106,6 +136,7 @@ TEST_P(ReduceWindow2DTest, ReduceWindow2D) { sizeof(ceil_mode_value)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + bool count_include_pad[] = {false}; auto count_include_pad_tensor = hrt::create(dt_boolean, {1}, @@ -113,6 +144,7 @@ TEST_P(ReduceWindow2DTest, ReduceWindow2D) { sizeof(count_include_pad)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output = kernels::stackvm::reduce_window2d( runtime::stackvm::reduce_op_t::max, input.impl(), init_value_tensor.impl(), filter_tensor.impl(), @@ -137,6 +169,27 @@ TEST_P(ReduceWindow2DTest, ReduceWindow2D) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(lhs_shape, j) + FOR_LOOP(dilations, k) + FOR_LOOP(filter, l) + FOR_LOOP(stride, m) + FOR_LOOP(onnxPads, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + SPLIT_ELEMENT(dilations, k) + SPLIT_ELEMENT(filter, l) + SPLIT_ELEMENT(stride, m) + SPLIT_ELEMENT(onnxPads, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_relu.cpp b/tests/kernels/test_relu.cpp index 0a8a8dcdeb..90e8c2bfc1 100644 --- a/tests/kernels/test_relu.cpp +++ b/tests/kernels/test_relu.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_relu" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReluTest - : public KernelTest, - public ::testing::TestWithParam> { +class ReluTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -45,12 +49,8 @@ class ReluTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Relu, ReluTest, - testing::Combine(testing::Values(dt_float32, dt_int32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{8, 8}, dims_t{1, 4, 16}, - dims_t{1, 3, 24, 24}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Relu, ReluTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReluTest, Relu) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -85,6 +85,15 @@ TEST_P(ReluTest, Relu) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_relu.json b/tests/kernels/test_relu.json new file mode 100644 index 0000000000..47193c3ce4 --- /dev/null +++ b/tests/kernels/test_relu.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16","dt_float64", "dt_int32"] +} \ No newline at end of file diff --git a/tests/kernels/test_require.cpp b/tests/kernels/test_require.cpp index ffaa069a41..12d2b7d6a5 100644 --- a/tests/kernels/test_require.cpp +++ b/tests/kernels/test_require.cpp @@ -22,37 +22,33 @@ #include #include +#define TEST_CASE_NAME "test_require" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class RequireTest - : public KernelTest, - public ::testing::TestWithParam> { +class RequireTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("lhs_shape"); lhs = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(lhs); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor lhs; }; -INSTANTIATE_TEST_SUITE_P( - Require, RequireTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int64, dt_uint32, - dt_int8, dt_uint8, dt_int16, dt_uint16, - dt_uint64, dt_float16, dt_float64, - dt_boolean), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1}, - dims_t{1, 3}, dims_t{16, 16}, - dims_t{1, 3, 16}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Require, RequireTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(RequireTest, Require) { @@ -87,6 +83,15 @@ TEST_P(RequireTest, Require) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(lhs_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(lhs_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_require.json b/tests/kernels/test_require.json new file mode 100644 index 0000000000..4d19ec4f78 --- /dev/null +++ b/tests/kernels/test_require.json @@ -0,0 +1,4 @@ +{ + "lhs_type":["dt_float32", "dt_int32", "dt_int64", "dt_uint32", "dt_int8", "dt_uint8", "dt_int16", "dt_uint16", "dt_uint64", "dt_float16", "dt_float64", "dt_boolean"], + "lhs_shape":[[1, 3, 16, 16], [1], [16, 16], [1, 3, 16], [], [1, 3]] +} \ No newline at end of file diff --git a/tests/kernels/test_reshape.cpp b/tests/kernels/test_reshape.cpp index 2d20e75a5a..71a43d81a9 100644 --- a/tests/kernels/test_reshape.cpp +++ b/tests/kernels/test_reshape.cpp @@ -26,12 +26,16 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReshapeTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reshape" + +class ReshapeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -39,21 +43,14 @@ class ReshapeTest init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Reshape, ReshapeTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int64, dt_boolean, - dt_float16, dt_int8, dt_uint16, dt_int16, - dt_uint8, dt_uint64, dt_float64, - dt_uint32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 16, 3, 16}, - dims_t{3, 16, 16}, dims_t{768}, - dims_t{48, 16}))); +INSTANTIATE_TEST_SUITE_P(Reshape, ReshapeTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReshapeTest, Reshape) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -97,6 +94,15 @@ TEST_P(ReshapeTest, Reshape) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reshape.json b/tests/kernels/test_reshape.json new file mode 100644 index 0000000000..9911eae939 --- /dev/null +++ b/tests/kernels/test_reshape.json @@ -0,0 +1,4 @@ +{ + "i_shape":[[1, 3, 16, 16], [1, 16, 3, 16], [3, 16, 16], [768], [48, 16]], + "lhs_type":["dt_float32", "dt_int32", "dt_int16", "dt_int64", "dt_int8", "dt_uint8", "dt_uint16", "dt_uint32", "dt_uint64", "dt_bfloat16", "dt_float64", "dt_float16", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_resize_image.cpp b/tests/kernels/test_resize_image.cpp index b50ae7062e..947020b3a0 100644 --- a/tests/kernels/test_resize_image.cpp +++ b/tests/kernels/test_resize_image.cpp @@ -26,28 +26,30 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ResizeImageTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_resize_image" + +class ResizeImageTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); lhs = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(lhs); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor lhs; }; INSTANTIATE_TEST_SUITE_P(ResizeImage, ResizeImageTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 224, - 224}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ResizeImageTest, ResizeImage) { @@ -60,7 +62,7 @@ TEST_P(ResizeImageTest, ResizeImage) { true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t roi_array[1]; + float roi_array[1]; auto roi = hrt::create(dt_float32, {1}, {reinterpret_cast(roi_array), sizeof(roi_array)}, @@ -75,7 +77,7 @@ TEST_P(ResizeImageTest, ResizeImage) { true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t cubic_coeff_a_array[] = {-0.75f}; + float cubic_coeff_a_array[] = {-0.75f}; auto cubic_coeff_a = hrt::create(dt_float32, {1}, {reinterpret_cast(cubic_coeff_a_array), @@ -83,7 +85,7 @@ TEST_P(ResizeImageTest, ResizeImage) { true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t extrapolation_value_array[] = {0.0f}; + float extrapolation_value_array[] = {0.0f}; auto extrapolation_value = hrt::create(dt_float32, {1}, {reinterpret_cast(extrapolation_value_array), @@ -132,9 +134,188 @@ TEST_P(ResizeImageTest, ResizeImage) { // compare EXPECT_TRUE(result); + + // actual + + // actual + int64_t new_shape_array1[] = {1, 3, 112, 112}; + auto new_shape1 = + hrt::create(dt_int64, {4}, + {reinterpret_cast(new_shape_array1), + sizeof(new_shape_array1)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float roi_array1[1]; + auto roi1 = hrt::create(dt_float32, {1}, + {reinterpret_cast(roi_array1), + sizeof(roi_array1)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + int32_t exclude_outside_array1[] = {0}; + + auto exclude_outside1 = + hrt::create(dt_int32, {1}, + {reinterpret_cast(exclude_outside_array1), + sizeof(exclude_outside_array1)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float cubic_coeff_a_array1[] = {-0.75f}; + auto cubic_coeff_a1 = + hrt::create(dt_float32, {1}, + {reinterpret_cast(cubic_coeff_a_array1), + sizeof(cubic_coeff_a_array1)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float_t extrapolation_value_array1[] = {0.0f}; + auto extrapolation_value1 = + hrt::create(dt_float32, {1}, + {reinterpret_cast(extrapolation_value_array1), + sizeof(extrapolation_value_array1)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto output1 = + kernels::stackvm::resize_image( + runtime::stackvm::image_resize_mode_t::bilinear, + runtime::stackvm::image_resize_transformation_mode_t:: + pytorch_half_pixel, + runtime::stackvm::image_resize_nearest_mode_t::round_prefer_ceil, + false, lhs.impl(), roi1.impl(), new_shape1.impl(), + cubic_coeff_a1.impl(), exclude_outside1.impl(), + extrapolation_value1.impl()) + .expect("resize_image failed"); + runtime_tensor actual1(output1.as().expect("as tensor failed")); + + const char *transformation_mode1 = "pytorch_half_pixel"; + const char *resize_mode_t1 = "linear"; + const char *nearest_mode_t1 = "round_prefer_floor"; + + // expected + auto output_ort1 = ortki_ResizeWithSizes( + runtime_tensor_2_ort_tensor(lhs), runtime_tensor_2_ort_tensor(roi1), + runtime_tensor_2_ort_tensor(new_shape1), transformation_mode1, -0.75f, + 0l, 0.0f, resize_mode_t1, nearest_mode_t1); + + size_t size1 = 0; + void *ptr_ort1 = tensor_buffer(output_ort1, &size1); + dims_t shape1(tensor_rank(output_ort1)); + tensor_shape(output_ort1, reinterpret_cast(shape1.data())); + auto expected1 = + hrt::create(lhs.datatype(), shape1, + {reinterpret_cast(ptr_ort1), size1}, true, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + bool result1 = is_same_tensor(expected1, actual1) || + cosine_similarity_tensor(expected1, actual1); + + if (!result1) { + std::cout << "actual "; + print_runtime_tensor(actual1); + std::cout << "expected "; + print_runtime_tensor(expected1); + } + + // compare + EXPECT_TRUE(result1); + + // actual + int64_t new_shape_array2[] = {1, 3, 112, 112}; + auto new_shape2 = + hrt::create(dt_int64, {4}, + {reinterpret_cast(new_shape_array2), + sizeof(new_shape_array2)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float_t roi_array2[1]; + auto roi2 = hrt::create(dt_float32, {1}, + {reinterpret_cast(roi_array2), + sizeof(roi_array2)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + int32_t exclude_outside_array2[] = {0}; + + auto exclude_outside2 = + hrt::create(dt_int32, {1}, + {reinterpret_cast(exclude_outside_array2), + sizeof(exclude_outside_array2)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float_t cubic_coeff_a_array2[] = {-0.75f}; + auto cubic_coeff_a2 = + hrt::create(dt_float32, {1}, + {reinterpret_cast(cubic_coeff_a_array2), + sizeof(cubic_coeff_a_array2)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float_t extrapolation_value_array2[] = {0.0f}; + auto extrapolation_value2 = + hrt::create(dt_float32, {1}, + {reinterpret_cast(extrapolation_value_array2), + sizeof(extrapolation_value_array2)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto output2 = + kernels::stackvm::resize_image( + runtime::stackvm::image_resize_mode_t::bilinear, + runtime::stackvm::image_resize_transformation_mode_t:: + pytorch_half_pixel, + runtime::stackvm::image_resize_nearest_mode_t::round_prefer_ceil, + false, lhs.impl(), roi2.impl(), new_shape2.impl(), + cubic_coeff_a2.impl(), exclude_outside2.impl(), + extrapolation_value2.impl()) + .expect("resize_image failed"); + runtime_tensor actual2(output2.as().expect("as tensor failed")); + + const char *transformation_mode2 = "pytorch_half_pixel"; + const char *resize_mode_t2 = "linear"; + const char *nearest_mode_t2 = "round_prefer_ceil"; + + // expected + auto output_ort2 = ortki_ResizeWithSizes( + runtime_tensor_2_ort_tensor(lhs), runtime_tensor_2_ort_tensor(roi2), + runtime_tensor_2_ort_tensor(new_shape2), transformation_mode2, -0.75f, + 0l, 0.0f, resize_mode_t2, nearest_mode_t2); + + size_t size2 = 0; + void *ptr_ort2 = tensor_buffer(output_ort2, &size2); + dims_t shape2(tensor_rank(output_ort2)); + tensor_shape(output_ort2, reinterpret_cast(shape2.data())); + auto expected2 = + hrt::create(lhs.datatype(), shape2, + {reinterpret_cast(ptr_ort2), size2}, true, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + bool result2 = is_same_tensor(expected2, actual2) || + cosine_similarity_tensor(expected2, actual2); + + if (!result2) { + std::cout << "actual "; + print_runtime_tensor(actual2); + std::cout << "expected "; + print_runtime_tensor(expected2); + } + + // compare + EXPECT_TRUE(result2); } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_resize_image.json b/tests/kernels/test_resize_image.json new file mode 100644 index 0000000000..8dcce05246 --- /dev/null +++ b/tests/kernels/test_resize_image.json @@ -0,0 +1,4 @@ +{ + "i_shape":[[1, 3, 224, 224]], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_reverse_sequence.cpp b/tests/kernels/test_reverse_sequence.cpp index cb399b54ba..8c2d3647d5 100644 --- a/tests/kernels/test_reverse_sequence.cpp +++ b/tests/kernels/test_reverse_sequence.cpp @@ -26,12 +26,16 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ReverseSequenceTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_reverse_sequence" + +class ReverseSequenceTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -39,15 +43,14 @@ class ReverseSequenceTest init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; INSTANTIATE_TEST_SUITE_P(ReverseSequence, ReverseSequenceTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{4, 4}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ReverseSequenceTest, ReverseSequence) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -106,6 +109,15 @@ TEST_P(ReverseSequenceTest, ReverseSequence) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_reverse_sequence.json b/tests/kernels/test_reverse_sequence.json new file mode 100644 index 0000000000..bfc3f9b151 --- /dev/null +++ b/tests/kernels/test_reverse_sequence.json @@ -0,0 +1,4 @@ +{ + "i_shape":[[4, 4]], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_scatter_nd.cpp b/tests/kernels/test_scatter_nd.cpp index c9b8aa6e51..926d692bef 100644 --- a/tests/kernels/test_scatter_nd.cpp +++ b/tests/kernels/test_scatter_nd.cpp @@ -26,14 +26,19 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ScatterNDTest - : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { +#define TEST_CASE_NAME "test_scatter_nd" + +class ScatterNDTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode1, typecode2, input_shape, indices_shape, - updates_shape] = GetParam(); + READY_SUBCASE() + + auto typecode1 = GetDataType("lhs_type"); + auto typecode2 = GetDataType("rhs_type"); + auto input_shape = GetShapeArray("input_shape"); + auto indices_shape = GetShapeArray("indices_shape"); + auto updates_shape = GetShapeArray("updates_shape"); input = hrt::create(typecode1, input_shape, host_runtime_tensor::pool_cpu_only) @@ -53,7 +58,7 @@ class ScatterNDTest init_tensor(updates); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -61,14 +66,8 @@ class ScatterNDTest runtime_tensor updates; }; -INSTANTIATE_TEST_SUITE_P( - ScatterND, ScatterNDTest, - testing::Combine( - testing::Values(dt_float32, dt_uint8, dt_int8, dt_float16, dt_uint32, - dt_uint64, dt_uint16, dt_int16, dt_int32, dt_int64, - dt_float64, dt_boolean, dt_bfloat16), - testing::Values(dt_int64), testing::Values(dims_t{2, 1, 10}), - testing::Values(dims_t{2, 1, 1, 3}), testing::Values(dims_t{2, 1, 1}))); +INSTANTIATE_TEST_SUITE_P(ScatterND, ScatterNDTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ScatterNDTest, ScatterND) { @@ -108,6 +107,24 @@ TEST_P(ScatterNDTest, ScatterND) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(rhs_type, j) + FOR_LOOP(input_shape, k) + FOR_LOOP(indices_shape, l) + FOR_LOOP(updates_shape, m) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(rhs_type, j) + SPLIT_ELEMENT(input_shape, k) + SPLIT_ELEMENT(indices_shape, l) + SPLIT_ELEMENT(updates_shape, m) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_scatter_nd.json b/tests/kernels/test_scatter_nd.json new file mode 100644 index 0000000000..d1b5f2c96f --- /dev/null +++ b/tests/kernels/test_scatter_nd.json @@ -0,0 +1,7 @@ +{ + "lhs_type":["dt_float32", "dt_uint8", "dt_int8", "dt_float16", "dt_uint32", "dt_uint64", "dt_uint16", "dt_int16", "dt_int32", "dt_int64", "dt_float64", "dt_boolean", "dt_bfloat16"], + "rhs_type":["dt_int64"], + "input_shape":[[2, 1, 10], [2, 5, 10]], + "indices_shape":[[2, 1, 1, 3]], + "updates_shape":[[2, 1, 1]] +} \ No newline at end of file diff --git a/tests/kernels/test_selu.cpp b/tests/kernels/test_selu.cpp index 5d63d18d36..76abef35d6 100644 --- a/tests/kernels/test_selu.cpp +++ b/tests/kernels/test_selu.cpp @@ -22,54 +22,93 @@ #include #include +#define TEST_CASE_NAME "test_selu" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SeluTest - : public KernelTest, - public ::testing::TestWithParam> { +class SeluTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + auto value1 = GetFloatNumber("alpha"); + auto value2 = GetFloatNumber("gamma"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + alpha_value = value1; + gamma_value = value2; } void TearDown() override {} protected: runtime_tensor input; + float alpha_value; + float gamma_value; }; -INSTANTIATE_TEST_SUITE_P( - Selu, SeluTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}, - dims_t{1, 3}, dims_t{1}, dims_t{8, 8}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Selu, SeluTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SeluTest, Selu) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - float_t alpha_ptr[] = {1.5f}; - auto alpha = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(alpha_ptr), - sizeof(alpha_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - float_t gamma_ptr[] = {1.5f}; - auto gamma = hrt::create(nncase::dt_float32, {1}, - {reinterpret_cast(gamma_ptr), - sizeof(gamma_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output_ort = ortki_Selu(l_ort, 1.5f, 1.5f); + runtime_tensor alpha; + runtime_tensor gamma; + if (input.datatype() == dt_float32) { + float alpha_ptr[] = {alpha_value}; + alpha = hrt::create(nncase::dt_float32, {1}, + {reinterpret_cast(alpha_ptr), + sizeof(alpha_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + float gamma_ptr[] = {gamma_value}; + gamma = hrt::create(nncase::dt_float32, {1}, + {reinterpret_cast(gamma_ptr), + sizeof(gamma_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else if (input.datatype() == dt_float16) { + half alpha_ptr[] = {(half)alpha_value}; + alpha = hrt::create(nncase::dt_float16, {1}, + {reinterpret_cast(alpha_ptr), + sizeof(alpha_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + half gamma_ptr[] = {(half)gamma_value}; + gamma = hrt::create(nncase::dt_float16, {1}, + {reinterpret_cast(gamma_ptr), + sizeof(gamma_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else { + double alpha_ptr[] = {(double)alpha_value}; + alpha = hrt::create(nncase::dt_float64, {1}, + {reinterpret_cast(alpha_ptr), + sizeof(alpha_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + double gamma_ptr[] = {(double)gamma_value}; + gamma = hrt::create(nncase::dt_float64, {1}, + {reinterpret_cast(gamma_ptr), + sizeof(gamma_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } + auto output_ort = ortki_Selu(l_ort, alpha_value, gamma_value); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -100,6 +139,21 @@ TEST_P(SeluTest, Selu) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + FOR_LOOP(alpha, k) + FOR_LOOP(gamma, l) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(alpha, k) + SPLIT_ELEMENT(gamma, l) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_selu.json b/tests/kernels/test_selu.json new file mode 100644 index 0000000000..69c710cf54 --- /dev/null +++ b/tests/kernels/test_selu.json @@ -0,0 +1,6 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"], + "alpha":[1.2, 0.8, 0.5, 0.6, 1.5], + "gamma":[1.2, 0.8, 0.5, 0.6, 1.5] +} \ No newline at end of file diff --git a/tests/kernels/test_shape_of.cpp b/tests/kernels/test_shape_of.cpp index 496174d667..fb509d5ac4 100644 --- a/tests/kernels/test_shape_of.cpp +++ b/tests/kernels/test_shape_of.cpp @@ -26,42 +26,45 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class ShapeOfTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_shape_of" + +class ShapeOfTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, shape] = GetParam(); + + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto shape = GetShapeArray("i_shape"); lhs = hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(lhs); + + // expected + size_t shape_size = shape.size(); + int64_t *shape_array = (int64_t *)malloc(shape_size * sizeof(int64_t)); + std::copy(shape.begin(), shape.end(), shape_array); + expected = hrt::create(nncase::dt_int64, {shape_size}, + {reinterpret_cast(shape_array), + shape_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor lhs; + runtime_tensor expected; }; -INSTANTIATE_TEST_SUITE_P( - ShapeOf, ShapeOfTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int8, dt_uint8, - dt_float64, dt_int16, dt_uint32, dt_uint64, - dt_uint16, dt_boolean, dt_int64, - dt_float16), - testing::Values(dims_t{1, 3, 16, 16}))); +INSTANTIATE_TEST_SUITE_P(ShapeOf, ShapeOfTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(ShapeOfTest, ShapeOf) { - // expected - int64_t expected_array[] = {1, 3, 16, 16}; - auto expected = hrt::create(nncase::dt_int64, {4}, - {reinterpret_cast(expected_array), - sizeof(expected_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - // actual auto output = kernels::stackvm::shape_of(lhs.impl()).expect("ShapeOf failed"); @@ -82,6 +85,15 @@ TEST_P(ShapeOfTest, ShapeOf) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(input_shape, k) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(input_shape, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_shape_of.json b/tests/kernels/test_shape_of.json new file mode 100644 index 0000000000..e369b54de0 --- /dev/null +++ b/tests/kernels/test_shape_of.json @@ -0,0 +1,4 @@ +{ + "lhs_type":["dt_float32", "dt_uint8", "dt_int8", "dt_float16", "dt_uint32", "dt_uint64", "dt_uint16", "dt_int16", "dt_int32", "dt_int64", "dt_float64", "dt_boolean", "dt_bfloat16"], + "input_shape":[[1, 3, 16, 16], [1], [1, 3], [1, 2, 3, 4, 5], [1, 3, 16]] +} \ No newline at end of file diff --git a/tests/kernels/test_sigmoid.cpp b/tests/kernels/test_sigmoid.cpp index a2ec67aaf0..c9d5b1180d 100644 --- a/tests/kernels/test_sigmoid.cpp +++ b/tests/kernels/test_sigmoid.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_sigmoid" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SigmoidTest - : public KernelTest, - public ::testing::TestWithParam> { +class SigmoidTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -45,12 +49,8 @@ class SigmoidTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Sigmoid, SigmoidTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}, dims_t{1, 3}, - dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}, - dims_t{16, 16}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Sigmoid, SigmoidTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SigmoidTest, Sigmoid) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -86,6 +86,15 @@ TEST_P(SigmoidTest, Sigmoid) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_sigmoid.json b/tests/kernels/test_sigmoid.json new file mode 100644 index 0000000000..2694fe1f34 --- /dev/null +++ b/tests/kernels/test_sigmoid.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"] +} \ No newline at end of file diff --git a/tests/kernels/test_size_of.cpp b/tests/kernels/test_size_of.cpp index 9efacb4dc2..3f81183f63 100644 --- a/tests/kernels/test_size_of.cpp +++ b/tests/kernels/test_size_of.cpp @@ -26,12 +26,17 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SizeOfTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_size_of" + +class SizeOfTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("input_shape"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -45,13 +50,8 @@ class SizeOfTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - SizeOf, SizeOfTest, - testing::Combine(testing::Values(dt_float32, dt_int8, dt_uint8, dt_boolean, - dt_int32, dt_int16, dt_float64, dt_uint16), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3, 3, 3}, - dims_t{1, 3, 16}, dims_t{1}, - dims_t{1, 3}))); +INSTANTIATE_TEST_SUITE_P(SizeOf, SizeOfTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SizeOfTest, SizeOf) { @@ -91,6 +91,15 @@ TEST_P(SizeOfTest, SizeOf) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(input_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(input_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_size_of.json b/tests/kernels/test_size_of.json new file mode 100644 index 0000000000..e0e54ee403 --- /dev/null +++ b/tests/kernels/test_size_of.json @@ -0,0 +1,4 @@ +{ + "lhs_type":["dt_float32", "dt_uint8", "dt_int8", "dt_float16", "dt_uint32", "dt_uint64", "dt_uint16", "dt_int16", "dt_int32", "dt_int64", "dt_float64", "dt_boolean", "dt_bfloat16"], + "input_shape":[[1, 3, 16, 16], [1], [1, 3], [1, 2, 3, 4, 5], [1, 3, 16], [1, 3, 3, 3]] +} \ No newline at end of file diff --git a/tests/kernels/test_slice.cpp b/tests/kernels/test_slice.cpp index daaa549644..62723b84ad 100644 --- a/tests/kernels/test_slice.cpp +++ b/tests/kernels/test_slice.cpp @@ -22,16 +22,24 @@ #include #include +#define TEST_CASE_NAME "test_slice" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SliceTest - : public KernelTest, - public ::testing::TestWithParam> { +class SliceTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("input_shape"); + auto value1 = GetShapeArray("value1"); + auto value2 = GetShapeArray("value2"); + auto value3 = GetShapeArray("value3"); + auto value4 = GetShapeArray("value4"); int32_t input_array[120]; @@ -44,20 +52,57 @@ class SliceTest sizeof(input_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + + size_t begin_size = value1.size(); + int64_t *begin_array = (int64_t *)malloc(begin_size * sizeof(int64_t)); + std::copy(value1.begin(), value1.end(), begin_array); + begin = hrt::create(dt_int64, {begin_size}, + {reinterpret_cast(begin_array), + begin_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create1 tensor failed"); + + size_t end_size = value2.size(); + int64_t *end_array = (int64_t *)malloc(end_size * sizeof(int64_t)); + std::copy(value2.begin(), value2.end(), end_array); + end = hrt::create(dt_int64, {begin_size}, + {reinterpret_cast(end_array), + end_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create2 tensor failed"); + + size_t axes_size = value3.size(); + int64_t *axes_array = (int64_t *)malloc(axes_size * sizeof(int64_t)); + std::copy(value3.begin(), value3.end(), axes_array); + axes = hrt::create(dt_int64, {begin_size}, + {reinterpret_cast(axes_array), + axes_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create3 tensor failed"); + + size_t strides_size = value4.size(); + int64_t *strides_array = + (int64_t *)malloc(strides_size * sizeof(int64_t)); + std::copy(value4.begin(), value4.end(), strides_array); + strides = hrt::create(dt_int64, {begin_size}, + {reinterpret_cast(strides_array), + strides_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create4 tensor failed"); } void TearDown() override {} protected: runtime_tensor input; + runtime_tensor begin; + runtime_tensor end; + runtime_tensor axes; + runtime_tensor strides; }; -INSTANTIATE_TEST_SUITE_P( - Slice, SliceTest, - testing::Combine(testing::Values(dt_int32), - testing::Values(dims_t{2, 3, 4, 5}, dims_t{1, 4, 5, 6}, - dims_t{1, 1, 1, 120}, dims_t{2, 2, 5, 6}, - dims_t{1, 1, 2, 60}))); +INSTANTIATE_TEST_SUITE_P(Slice, SliceTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SliceTest, Slice) { @@ -70,30 +115,6 @@ TEST_P(SliceTest, Slice) { .expect("create tensor failed"); // actual - int32_t begin_array[] = {0, 0, 0, 0}; - auto begin = hrt::create(input.datatype(), {4}, - {reinterpret_cast(begin_array), - sizeof(begin_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - int32_t end_array[] = {1, 1, 1, 5}; - auto end = hrt::create(input.datatype(), {4}, - {reinterpret_cast(end_array), - sizeof(end_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - int32_t axes_array[] = {0, 1, 2, 3}; - auto axes = hrt::create(input.datatype(), {4}, - {reinterpret_cast(axes_array), - sizeof(axes_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - int32_t strides_array[] = {1, 1, 1, 1}; - auto strides = hrt::create(input.datatype(), {4}, - {reinterpret_cast(strides_array), - sizeof(strides_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); auto output = kernels::stackvm::slice(input.impl(), begin.impl(), end.impl(), axes.impl(), strides.impl()) @@ -115,6 +136,27 @@ TEST_P(SliceTest, Slice) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(input_shape, j) + FOR_LOOP(value1, k) + FOR_LOOP(value2, l) + FOR_LOOP(value3, m) + FOR_LOOP(value4, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(input_shape, j) + SPLIT_ELEMENT(value1, k) + SPLIT_ELEMENT(value2, l) + SPLIT_ELEMENT(value3, m) + SPLIT_ELEMENT(value4, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_slice.json b/tests/kernels/test_slice.json new file mode 100644 index 0000000000..3d64b6de70 --- /dev/null +++ b/tests/kernels/test_slice.json @@ -0,0 +1,8 @@ +{ + "lhs_type":["dt_int32"], + "input_shape":[[2, 3, 4, 5], [1, 4, 5, 6], [1, 1, 1, 120], [2, 2, 5, 6], [1, 1, 2, 60]], + "value1": [[0, 0, 0, 0]], + "value2": [[1, 1, 1, 5]], + "value3": [[0, 1, 2, 3]], + "value4": [[1, 1, 1, 1]] +} \ No newline at end of file diff --git a/tests/kernels/test_softmax.cpp b/tests/kernels/test_softmax.cpp index 2c11217092..7640f20e69 100644 --- a/tests/kernels/test_softmax.cpp +++ b/tests/kernels/test_softmax.cpp @@ -26,12 +26,17 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SoftmaxTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_softmax" + +class SoftmaxTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, value] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto value = GetNumber("axis"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -47,7 +52,7 @@ class SoftmaxTest .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -55,12 +60,8 @@ class SoftmaxTest int64_t axis_value; }; -INSTANTIATE_TEST_SUITE_P( - Softmax, SoftmaxTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}, dims_t{1, 3}, - dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}), - testing::Values(0, 1, 2, 3, -4, -3, -2, -1))); +INSTANTIATE_TEST_SUITE_P(Softmax, SoftmaxTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SoftmaxTest, Softmax) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -96,6 +97,18 @@ TEST_P(SoftmaxTest, Softmax) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(axis, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(axis, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_softmax.json b/tests/kernels/test_softmax.json new file mode 100644 index 0000000000..e1c271bc01 --- /dev/null +++ b/tests/kernels/test_softmax.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[1, 8, 24, 24], [1, 3, 3, 16], [2, 4, 8, 8], [8, 8], [1, 3, 16, 1], [1, 1], [16], [1, 16]], + "axis":[0, 1, 2, 3, -4, -3, -2, -1], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_softplus.cpp b/tests/kernels/test_softplus.cpp index 6f43ebf672..5b2bd8b06b 100644 --- a/tests/kernels/test_softplus.cpp +++ b/tests/kernels/test_softplus.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_softplus" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SoftplusTest - : public KernelTest, - public ::testing::TestWithParam> { +class SoftplusTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -45,12 +49,8 @@ class SoftplusTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Softplus, SoftplusTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}, dims_t{1, 3}, - dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Softplus, SoftplusTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SoftplusTest, Softplus) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -86,6 +86,15 @@ TEST_P(SoftplusTest, Softplus) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_softplus.json b/tests/kernels/test_softplus.json new file mode 100644 index 0000000000..2694fe1f34 --- /dev/null +++ b/tests/kernels/test_softplus.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"] +} \ No newline at end of file diff --git a/tests/kernels/test_softsign.cpp b/tests/kernels/test_softsign.cpp index 9c41331a7c..6ab4ee32ca 100644 --- a/tests/kernels/test_softsign.cpp +++ b/tests/kernels/test_softsign.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_softsign" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SoftsignTest - : public KernelTest, - public ::testing::TestWithParam> { +class SoftsignTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -45,12 +49,8 @@ class SoftsignTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Softsign, SoftsignTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}, dims_t{1, 3}, - dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Softsign, SoftsignTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SoftsignTest, Softsign) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -86,6 +86,15 @@ TEST_P(SoftsignTest, Softsign) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_softsign.json b/tests/kernels/test_softsign.json new file mode 100644 index 0000000000..2694fe1f34 --- /dev/null +++ b/tests/kernels/test_softsign.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"] +} \ No newline at end of file diff --git a/tests/kernels/test_space_to_batch.cpp b/tests/kernels/test_space_to_batch.cpp index d32ec514fe..e195be82f1 100644 --- a/tests/kernels/test_space_to_batch.cpp +++ b/tests/kernels/test_space_to_batch.cpp @@ -22,25 +22,40 @@ #include #include +#define TEST_CASE_NAME "test_space_to_batch" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SpaceToBatchTest - : public KernelTest, - public ::testing::TestWithParam> { +class SpaceToBatchTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); // expected - float_t expected_array[] = {1, 3, 9, 11, 2, 4, 10, 12, - 5, 7, 13, 15, 6, 8, 14, 16}; - expected = hrt::create(typecode, l_shape, - {reinterpret_cast(expected_array), - sizeof(expected_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + if (l_shape.size() == 3) { + float expected_array[] = {1, 5, 2, 6, 3, 7, 4, 8}; + expected = + hrt::create(typecode, l_shape, + {reinterpret_cast(expected_array), + sizeof(expected_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else if (l_shape.size() == 4) { + float expected_array[] = {1, 3, 9, 11, 2, 4, 10, 12, + 5, 7, 13, 15, 6, 8, 14, 16}; + expected = + hrt::create(typecode, l_shape, + {reinterpret_cast(expected_array), + sizeof(expected_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } } void TearDown() override {} @@ -50,29 +65,40 @@ class SpaceToBatchTest }; INSTANTIATE_TEST_SUITE_P(SpaceToBatch, SpaceToBatchTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{4, 2, 2, 1}))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SpaceToBatchTest, SpaceToBatch) { // actual - float_t a[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; - auto input = hrt::create(dt_float32, {1, 4, 4, 1}, - {reinterpret_cast(a), sizeof(a)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + runtime_tensor input; + if (expected.shape().size() == 3) { + float a[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + input = hrt::create(dt_float32, {1, 4, 3}, + {reinterpret_cast(a), sizeof(a)}, true, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else if (expected.shape().size() == 4) { + float a[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + input = hrt::create(dt_float32, {1, 4, 4, 1}, + {reinterpret_cast(a), sizeof(a)}, true, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } + int64_t shape_array[] = {2, 2}; auto shape = hrt::create(dt_int64, {2}, {reinterpret_cast(shape_array), sizeof(shape_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + int64_t crops_array[] = {0, 0, 0, 0}; auto crops = hrt::create(dt_int64, {2, 2}, {reinterpret_cast(crops_array), sizeof(crops_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output = kernels::stackvm::space_to_batch(input.impl(), shape.impl(), crops.impl()) .expect("space_to_batch failed"); @@ -93,6 +119,15 @@ TEST_P(SpaceToBatchTest, SpaceToBatch) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_space_to_batch.json b/tests/kernels/test_space_to_batch.json new file mode 100644 index 0000000000..c53c1b850a --- /dev/null +++ b/tests/kernels/test_space_to_batch.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[4, 2, 1], [4, 2, 2, 1]], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_split.cpp b/tests/kernels/test_split.cpp index f8dc59b7ba..549fae278c 100644 --- a/tests/kernels/test_split.cpp +++ b/tests/kernels/test_split.cpp @@ -26,12 +26,17 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; +#define TEST_CASE_NAME "test_split" + class SplitTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, value] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto value = GetNumber("axis"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -47,7 +52,7 @@ class SplitTest : public KernelTest, .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; @@ -56,9 +61,7 @@ class SplitTest : public KernelTest, }; INSTANTIATE_TEST_SUITE_P(Split, SplitTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{4, 8, 8}), - testing::Values(0, -3))); + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SplitTest, Split) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -66,14 +69,14 @@ TEST_P(SplitTest, Split) { // expected size_t size = 0; int64_t sections_array[] = {2, 2}; - auto sextions = hrt::create(dt_int64, {2}, + auto sections = hrt::create(dt_int64, {2}, {reinterpret_cast(sections_array), sizeof(sections_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); auto output_ort1 = tensor_seq_get_value( - ortki_Split(l_ort, runtime_tensor_2_ort_tensor(sextions), axis_value), + ortki_Split(l_ort, runtime_tensor_2_ort_tensor(sections), axis_value), 0); void *ptr_ort1 = tensor_buffer(output_ort1, &size); dims_t shape1(tensor_rank(output_ort1)); @@ -85,7 +88,7 @@ TEST_P(SplitTest, Split) { .expect("create tensor failed"); auto output_ort2 = tensor_seq_get_value( - ortki_Split(l_ort, runtime_tensor_2_ort_tensor(sextions), axis_value), + ortki_Split(l_ort, runtime_tensor_2_ort_tensor(sections), axis_value), 1); void *ptr_ort2 = tensor_buffer(output_ort2, &size); dims_t shape2(tensor_rank(output_ort2)); @@ -101,7 +104,7 @@ TEST_P(SplitTest, Split) { // actual auto output = - kernels::stackvm::split(input.impl(), axis.impl(), sextions.impl()) + kernels::stackvm::split(input.impl(), axis.impl(), sections.impl()) .expect("split failed"); tuple actual(output.as().expect("as tensor failed")); @@ -109,6 +112,18 @@ TEST_P(SplitTest, Split) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(axis, j) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(axis, j) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_split.json b/tests/kernels/test_split.json new file mode 100644 index 0000000000..8335c11a75 --- /dev/null +++ b/tests/kernels/test_split.json @@ -0,0 +1,5 @@ +{ + "lhs_shape":[[4, 8, 8]], + "axis":[0, -3], + "lhs_type":["dt_float32"] +} \ No newline at end of file diff --git a/tests/kernels/test_squeeze.cpp b/tests/kernels/test_squeeze.cpp index 1b74092121..74e22b473c 100644 --- a/tests/kernels/test_squeeze.cpp +++ b/tests/kernels/test_squeeze.cpp @@ -26,45 +26,51 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SqueezeTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_squeeze" + +class squeezeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto axis_value = GetAxesArray("axis_array"); + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + axis_array = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + axes_t axis_array; }; -INSTANTIATE_TEST_SUITE_P( - Squeeze, SqueezeTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int16, dt_float64, - dt_int8, dt_uint8, dt_uint16, dt_uint32, - dt_uint64, dt_int64, dt_bfloat16, - dt_float16, dt_boolean), - testing::Values(dims_t{1, 3, 1, 16}, dims_t{1, 3, 1, 1}))); +INSTANTIATE_TEST_SUITE_P(squeeze, squeezeTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); -TEST_P(SqueezeTest, Squeeze) { +TEST_P(squeezeTest, squeeze) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected - size_t size = 0; - int64_t axes_array[] = {0, 2}; - auto axes = hrt::create(dt_int64, {2}, - {reinterpret_cast(axes_array), - sizeof(axes_array)}, + size_t axis_size = axis_array.size(); + int64_t *axis_array1 = (int64_t *)malloc(axis_size * sizeof(int64_t)); + std::copy(axis_array.begin(), axis_array.end(), axis_array1); + auto axes = hrt::create(dt_int64, {axis_size}, + {reinterpret_cast(axis_array1), + axis_size * sizeof(int64_t)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); auto output_ort = ortki_Squeeze(l_ort, runtime_tensor_2_ort_tensor(axes)); + + size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); tensor_shape(output_ort, reinterpret_cast(shape.data())); @@ -93,6 +99,18 @@ TEST_P(SqueezeTest, Squeeze) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(axis_array, j) + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(axis_array, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_squeeze.json b/tests/kernels/test_squeeze.json new file mode 100644 index 0000000000..14a0b7a1fb --- /dev/null +++ b/tests/kernels/test_squeeze.json @@ -0,0 +1,5 @@ +{ + "axis_array":[[0], [0, 1]], + "lhs_shape":[[1, 1], [1, 1, 24], [1, 1], [1, 1, 16], [1, 1, 3, 16], [1, 1, 16, 16]], + "lhs_type":["dt_float32", "dt_int32", "dt_int64", "dt_uint32", "dt_int8", "dt_uint8", "dt_int16", "dt_uint16", "dt_uint64", "dt_float16", "dt_float64", "dt_bfloat16", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_stack.cpp b/tests/kernels/test_stack.cpp index 10f667b835..a7901cdefb 100644 --- a/tests/kernels/test_stack.cpp +++ b/tests/kernels/test_stack.cpp @@ -26,33 +26,35 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class StackTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_stack" + +class StackTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto value = GetNumber("axis_array"); + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + axes_value = value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + int64_t axes_value; }; -INSTANTIATE_TEST_SUITE_P( - Stack, StackTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int16, dt_float64, - dt_int8, dt_uint8, dt_uint16, dt_uint32, - dt_uint64, dt_int64, dt_bfloat16, - dt_float16, dt_boolean), - testing::Values(dims_t{1}, dims_t{2}, dims_t{1, 1}, - dims_t{1, 2, 4, 8}, dims_t{4, 4, 8}))); +INSTANTIATE_TEST_SUITE_P(Stack, StackTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(StackTest, Stack) { @@ -61,7 +63,7 @@ TEST_P(StackTest, Stack) { std::vector fields; fields.push_back(field1); auto output_tuple = tuple(std::in_place, std::move(fields)); - int64_t axes_array[] = {-1}; + int64_t axes_array[] = {axes_value}; auto axes = hrt::create(dt_int64, {1}, {reinterpret_cast(axes_array), sizeof(axes_array)}, @@ -82,7 +84,7 @@ TEST_P(StackTest, Stack) { auto expected1 = kernels::stackvm::reshape(input.impl(), output_shape.impl()) .expect("stack failed"); - runtime_tensor expected(output.as().expect("as tensor failed")); + runtime_tensor expected(expected1.as().expect("as tensor failed")); bool result = is_same_tensor(expected, actual) || cosine_similarity_tensor(expected, actual); @@ -99,6 +101,18 @@ TEST_P(StackTest, Stack) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(axis_array, j) + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(axis_array, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_stack.json b/tests/kernels/test_stack.json new file mode 100644 index 0000000000..097aaf63bd --- /dev/null +++ b/tests/kernels/test_stack.json @@ -0,0 +1,5 @@ +{ + "axis_array":[0, -1], + "lhs_shape":[[1]], + "lhs_type":["dt_float32", "dt_int32", "dt_int64", "dt_uint32", "dt_int8", "dt_uint8", "dt_int16", "dt_uint16", "dt_uint64", "dt_float16", "dt_float64", "dt_bfloat16", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_swish.cpp b/tests/kernels/test_swish.cpp index 10f9a4bee4..b8815527fe 100644 --- a/tests/kernels/test_swish.cpp +++ b/tests/kernels/test_swish.cpp @@ -22,16 +22,20 @@ #include #include +#define TEST_CASE_NAME "test_swish" + using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class SwishTest - : public KernelTest, - public ::testing::TestWithParam> { +class SwishTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -45,12 +49,8 @@ class SwishTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Swish, SwishTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1}, dims_t{1, 3}, - dims_t{1, 3, 16, 16}, dims_t{1, 3, 16}, - dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Swish, SwishTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(SwishTest, Swish) { auto l_ort = runtime_tensor_2_ort_tensor(input); @@ -85,6 +85,15 @@ TEST_P(SwishTest, Swish) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_swish.json b/tests/kernels/test_swish.json new file mode 100644 index 0000000000..2694fe1f34 --- /dev/null +++ b/tests/kernels/test_swish.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 3, 16, 16], [1, 3, 16], [8, 8], [16, 16], [1], [1, 3, 24, 24], []], + "lhs_type":["dt_float32", "dt_float16", "dt_float64"] +} \ No newline at end of file diff --git a/tests/kernels/test_tile.cpp b/tests/kernels/test_tile.cpp index dd3795848f..6fca5370f8 100644 --- a/tests/kernels/test_tile.cpp +++ b/tests/kernels/test_tile.cpp @@ -26,12 +26,16 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class TileTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_tile" + +class TileTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) @@ -39,19 +43,14 @@ class TileTest init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Tile, TileTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int16, dt_float64, - dt_int8, dt_uint8, dt_uint16, dt_uint32, - dt_uint64, dt_int64, dt_boolean), - testing::Values(dims_t{1, 2, 4, 8}, - dims_t{1, 3, 16, 16}))); +INSTANTIATE_TEST_SUITE_P(Tile, TileTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(TileTest, Tile) { auto input_ort = runtime_tensor_2_ort_tensor(input); @@ -274,6 +273,15 @@ TEST_P(TileTest, Tile) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_tile.json b/tests/kernels/test_tile.json new file mode 100644 index 0000000000..5ec2eb400d --- /dev/null +++ b/tests/kernels/test_tile.json @@ -0,0 +1,4 @@ +{ + "lhs_shape":[[1, 2, 4, 8], [1, 3, 16, 16]], + "lhs_type":["dt_float32", "dt_int32", "dt_int16", "dt_float64", "dt_int8", "dt_uint8", "dt_uint16", "dt_uint32", "dt_uint64", "dt_int64", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_topK.cpp b/tests/kernels/test_topK.cpp index 9edf0e855c..ecf0ed9ff5 100644 --- a/tests/kernels/test_topK.cpp +++ b/tests/kernels/test_topK.cpp @@ -26,20 +26,29 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class TopKTest - : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { +#define TEST_CASE_NAME "test_topk" + +class TopKTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, value1, value2, value3] = GetParam(); + READY_SUBCASE() + + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); + auto value1 = GetNumber("value1"); + auto value2 = GetNumber("value2"); + auto value3 = GetNumber("value3"); + auto value4 = GetNumber("value4"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); - axis_value = value1; + axis_value = value1 > 0 ? value1 >= (int64_t)l_shape.size() ? 0 : value1 + : -value1 > (int64_t)l_shape.size() ? 0 + : value1; int64_t axis_array[] = {value1}; axis = hrt::create(dt_int64, {1}, {reinterpret_cast(axis_array), @@ -62,6 +71,8 @@ class TopKTest sizeof(sorted_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + + k_value = value4; } void TearDown() override {} @@ -74,23 +85,26 @@ class TopKTest runtime_tensor largest; int64_t sorted_value; runtime_tensor sorted; + int64_t k_value; }; -INSTANTIATE_TEST_SUITE_P( - TopK, TopKTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 2, 4, 8}, dims_t{1, 3, 16, 16}, - dims_t{3, 3, 6}, dims_t{16, 16}, dims_t{1}, - dims_t{1, 3}), - testing::Values(0, -1), testing::Values(0, 1), - testing::Values(0, 1))); +INSTANTIATE_TEST_SUITE_P(TopK, TopKTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); + +// testing::Combine(testing::Values(dt_float32), +// testing::Values(dims_t{1, 2, 4, 8}, dims_t{1, 3, 16, 16}, +// dims_t{3, 3, 6}, dims_t{16, 16}, +// dims_t{1}, dims_t{1, 3}), +// testing::Values(0, -1 /*, 1, 2, 3*/), +// testing::Values(0, 1), testing::Values(0, 1), +// testing::Values(1 /*, 2, 4, 16*/))); TEST_P(TopKTest, TopK) { auto l_ort = runtime_tensor_2_ort_tensor(input); // expected size_t size = 0; - int64_t k_array[] = {1}; + int64_t k_array[] = {k_value}; auto k = hrt::create(dt_int64, {1}, {reinterpret_cast(k_array), sizeof(k_array)}, @@ -136,6 +150,27 @@ TEST_P(TopKTest, TopK) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, k) + FOR_LOOP(value1, l) + FOR_LOOP(value2, m) + FOR_LOOP(value3, n) + FOR_LOOP(value4, o) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, k) + SPLIT_ELEMENT(value1, l) + SPLIT_ELEMENT(value2, m) + SPLIT_ELEMENT(value3, n) + SPLIT_ELEMENT(value4, o) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_topk.json b/tests/kernels/test_topk.json new file mode 100644 index 0000000000..e81402d777 --- /dev/null +++ b/tests/kernels/test_topk.json @@ -0,0 +1,8 @@ +{ + "lhs_shape":[[1, 2, 4, 8], [1, 3, 16, 16], [3, 3, 6], [16, 16], [1], [1, 3]], + "lhs_type":["dt_float32"], + "value1": [0, -1], + "value2": [0, 1], + "value3": [0, 1], + "value4": [1] +} \ No newline at end of file diff --git a/tests/kernels/test_transpose.cpp b/tests/kernels/test_transpose.cpp index 9967b27f5a..78d918441c 100644 --- a/tests/kernels/test_transpose.cpp +++ b/tests/kernels/test_transpose.cpp @@ -26,82 +26,101 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class TransposeTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_transpose" + +class TransposeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + perm_value = GetAxesArray("perm"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + axes_t perm_value; }; -INSTANTIATE_TEST_SUITE_P( - Transpose, TransposeTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int16, dt_float64, - dt_int8, dt_uint8, dt_uint16, dt_uint32, - dt_uint64, dt_int64, dt_bfloat16, - dt_float16, dt_boolean), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 2, 4, 8}, - dims_t{2, 2, 4, 4}))); +INSTANTIATE_TEST_SUITE_P(Transpose, TransposeTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(TransposeTest, Transpose) { - auto input_ort = runtime_tensor_2_ort_tensor(input); - int64_t perm[] = {1, 0, 3, 2}; - size_t perm_size = 4; - - // expected - auto output_ort = ortki_Transpose(input_ort, perm, perm_size); - size_t size = 0; - void *ptr_ort = tensor_buffer(output_ort, &size); - dims_t shape(tensor_rank(output_ort)); - tensor_shape(output_ort, reinterpret_cast(shape.data())); - auto expected = hrt::create(input.datatype(), shape, - {reinterpret_cast(ptr_ort), size}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - auto perm1 = - hrt::create(nncase::dt_int64, {4}, - {reinterpret_cast(perm), sizeof(perm)}, true, - host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - int32_t perm_size_ptr[] = {4}; - auto perm_size1 = hrt::create(nncase::dt_int32, {1}, - {reinterpret_cast(perm_size_ptr), - sizeof(perm_size_ptr)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - - auto output = kernels::stackvm::transpose(input.impl(), perm1.impl()) - .expect("transpose failed"); - runtime_tensor actual(output.as().expect("as tensor failed")); - - bool result = is_same_tensor(expected, actual) || - cosine_similarity_tensor(expected, actual); - - if (!result) { - std::cout << "actual "; - print_runtime_tensor(actual); - std::cout << "expected "; - print_runtime_tensor(expected); - } - // compare - EXPECT_TRUE(result); + if (input.shape().size() == perm_value.size()) { + size_t perm_size = perm_value.size(); + int64_t *perm = (int64_t *)malloc(perm_size * sizeof(int64_t)); + std::copy(perm_value.begin(), perm_value.end(), perm); + auto input_ort = runtime_tensor_2_ort_tensor(input); + + auto perm_ptr = hrt::create(nncase::dt_int64, {perm_size}, + {reinterpret_cast(perm), + perm_size * sizeof(int64_t)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // expected + auto output_ort = ortki_Transpose(input_ort, perm, perm_size); + size_t size = 0; + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + auto expected = + hrt::create(input.datatype(), shape, + {reinterpret_cast(ptr_ort), size}, true, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + int32_t perm_size_ptr[] = {4}; + auto perm_size1 = + hrt::create(nncase::dt_int32, {1}, + {reinterpret_cast(perm_size_ptr), + sizeof(perm_size_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto output = kernels::stackvm::transpose(input.impl(), perm_ptr.impl()) + .expect("transpose failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); + } } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + FOR_LOOP(perm, k) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + SPLIT_ELEMENT(perm, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_transpose.json b/tests/kernels/test_transpose.json new file mode 100644 index 0000000000..6e2ba436b3 --- /dev/null +++ b/tests/kernels/test_transpose.json @@ -0,0 +1,5 @@ +{ + "i_shape":[[1, 3, 16, 16], [1, 2, 4, 8], [2, 2, 4, 4], [1, 3, 16], [1, 2, 4], [2, 2, 4], [16, 16], [2, 4], [4, 4]], + "lhs_type":["dt_float32", "dt_int32", "dt_int16", "dt_int64", "dt_int8", "dt_uint8", "dt_uint16", "dt_uint32", "dt_uint64", "dt_bfloat16", "dt_float64", "dt_float16", "dt_boolean"], + "perm": [[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1], [0, 3, 1, 2], [0, 3, 2, 1], [1, 0, 3, 2], [1, 3, 2, 0], [1, 0, 2, 3], [1, 2, 0, 3], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 1, 3], [2, 0, 3, 1], [2, 1, 0, 3], [2, 1, 3, 0], [2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 0, 2, 1], [3, 1, 2, 0], [3, 1, 0, 2], [3, 2, 1, 0], [3, 2, 0, 1], [0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0], [0, 1], [1, 0]] +} \ No newline at end of file diff --git a/tests/kernels/test_trilu.cpp b/tests/kernels/test_trilu.cpp new file mode 100644 index 0000000000..34ffa3b50b --- /dev/null +++ b/tests/kernels/test_trilu.cpp @@ -0,0 +1,110 @@ +/* Copyright 2019-2021 Canaan Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "kernel_test.h" +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace nncase; +using namespace nncase::runtime; +using namespace ortki; + +class TriluTest : public KernelTest, + public ::testing::TestWithParam< + std::tuple> { + public: + void SetUp() override { + auto &&[typecode, l_shape, alpha_value] = GetParam(); + + input = + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + init_tensor(input); + + alpha = alpha_value; + } + + void TearDown() override {} + + protected: + runtime_tensor input; + int64_t alpha; +}; + +INSTANTIATE_TEST_SUITE_P(trilu, TriluTest, + testing::Combine(testing::Values(dt_uint8), + testing::Values(dims_t{4, 5}), + testing::Values(-1))); + +TEST_P(TriluTest, trilu) { + // auto l_ort = runtime_tensor_2_ort_tensor(input); + + int64_t a_ptr[] = {alpha}; + auto a = hrt::create(nncase::dt_int64, {1}, + {reinterpret_cast(a_ptr), sizeof(a_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + // todo ort no implement Trilu + // expected + // auto output_ort = ortki_Trilu(l_ort, runtime_tensor_2_ort_tensor(a), + // 0); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); + // dims_t shape(tensor_rank(output_ort)); + // tensor_shape(output_ort, reinterpret_cast(shape.data())); + // auto expected = hrt::create(input.datatype(), shape, + // {reinterpret_cast(ptr_ort), + // size}, true, + // host_runtime_tensor::pool_cpu_only) + // .expect("create tensor failed"); + + // actual + int32_t upper_ptr[] = {0}; + auto upper = hrt::create(nncase::dt_int32, {1}, + {reinterpret_cast(upper_ptr), + sizeof(upper_ptr)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + auto output = kernels::stackvm::trilu(input.impl(), a.impl(), upper.impl()) + .expect("trilu failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); + + auto output1 = kernels::stackvm::trilu(input.impl(), a.impl(), upper.impl()) + .expect("trilu failed"); + runtime_tensor expected(output.as().expect("as tensor failed")); + + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } + + // compare + EXPECT_TRUE(result); +} + +int main(int argc, char *argv[]) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/tests/kernels/test_unary.json b/tests/kernels/test_unary.json new file mode 100644 index 0000000000..26585afb47 --- /dev/null +++ b/tests/kernels/test_unary.json @@ -0,0 +1,4 @@ +{ + "i_shape":[[1, 3, 16, 16], [3, 16, 16], [3, 16, 1], [16, 16], [16, 1], [1, 16, 1], [16], [1], []], + "lhs_type":["dt_float32", "dt_int32", "dt_int64", "dt_float64", "dt_float16"] +} \ No newline at end of file diff --git a/tests/kernels/test_unary_abs.cpp b/tests/kernels/test_unary_abs.cpp index 84d2b13525..ae12d4ed59 100644 --- a/tests/kernels/test_unary_abs.cpp +++ b/tests/kernels/test_unary_abs.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine(testing::Values(dt_float32), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, - dims_t{16, 1}, dims_t{1, 16, 1}, - dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, abs) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, abs) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_acos.cpp b/tests/kernels/test_unary_acos.cpp index 2176c45664..cf406cd756 100644 --- a/tests/kernels/test_unary_acos.cpp +++ b/tests/kernels/test_unary_acos.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, acos) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, acos) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_acosh.cpp b/tests/kernels/test_unary_acosh.cpp index c60f04ec24..baefa786fd 100644 --- a/tests/kernels/test_unary_acosh.cpp +++ b/tests/kernels/test_unary_acosh.cpp @@ -26,20 +26,24 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } void init_tensor(runtime_tensor &tensor) override { auto dtype = tensor.datatype(); @@ -179,13 +183,8 @@ class UnaryTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, acosh) { OrtKITensor *orts[1]; @@ -223,6 +222,15 @@ TEST_P(UnaryTest, acosh) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_asin.cpp b/tests/kernels/test_unary_asin.cpp index 438996c346..a7841ee9f0 100644 --- a/tests/kernels/test_unary_asin.cpp +++ b/tests/kernels/test_unary_asin.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, asin) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, asin) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_asinh.cpp b/tests/kernels/test_unary_asinh.cpp index c10da0c4d0..9a5fb65bf6 100644 --- a/tests/kernels/test_unary_asinh.cpp +++ b/tests/kernels/test_unary_asinh.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, asinh) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, asinh) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_ceil.cpp b/tests/kernels/test_unary_ceil.cpp index f4400cb4a1..043850ddc5 100644 --- a/tests/kernels/test_unary_ceil.cpp +++ b/tests/kernels/test_unary_ceil.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, ceil) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, ceil) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_cos.cpp b/tests/kernels/test_unary_cos.cpp index f3b5d6e881..11de46cb33 100644 --- a/tests/kernels/test_unary_cos.cpp +++ b/tests/kernels/test_unary_cos.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, cos) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, cos) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_cosh.cpp b/tests/kernels/test_unary_cosh.cpp index 7ad13608da..57f097f3d2 100644 --- a/tests/kernels/test_unary_cosh.cpp +++ b/tests/kernels/test_unary_cosh.cpp @@ -26,34 +26,33 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); -TEST_P(UnaryTest, abs) { +TEST_P(UnaryTest, cosh) { OrtKITensor *orts[1]; orts[0] = runtime_tensor_2_ort_tensor(input); @@ -89,6 +88,15 @@ TEST_P(UnaryTest, abs) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_exp.cpp b/tests/kernels/test_unary_exp.cpp index 6b30c585bb..bc825c453a 100644 --- a/tests/kernels/test_unary_exp.cpp +++ b/tests/kernels/test_unary_exp.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type1" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, exp) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, exp) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_floor.cpp b/tests/kernels/test_unary_floor.cpp index faa16840f6..7ec8b089a6 100644 --- a/tests/kernels/test_unary_floor.cpp +++ b/tests/kernels/test_unary_floor.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type1" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, floor) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, floor) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_log.cpp b/tests/kernels/test_unary_log.cpp index e85e1d730c..65c0c4e50c 100644 --- a/tests/kernels/test_unary_log.cpp +++ b/tests/kernels/test_unary_log.cpp @@ -26,20 +26,24 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type1" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } void init_tensor(runtime_tensor &tensor) override { auto dtype = tensor.datatype(); @@ -179,13 +183,8 @@ class UnaryTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, log) { OrtKITensor *orts[1]; @@ -223,6 +222,15 @@ TEST_P(UnaryTest, log) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_logical_not.cpp b/tests/kernels/test_unary_logical_not.cpp index dd9f9caf2f..5b957079a1 100644 --- a/tests/kernels/test_unary_logical_not.cpp +++ b/tests/kernels/test_unary_logical_not.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type2" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine(testing::Values(dt_boolean), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, - dims_t{16, 1}, dims_t{1, 16, 1}, - dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, logical_not) { OrtKITensor *orts[1]; @@ -90,6 +89,15 @@ TEST_P(UnaryTest, logical_not) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_neg.cpp b/tests/kernels/test_unary_neg.cpp index f2b607a3d7..c8631001b3 100644 --- a/tests/kernels/test_unary_neg.cpp +++ b/tests/kernels/test_unary_neg.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, neg) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, neg) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_other_type.json b/tests/kernels/test_unary_other_type.json new file mode 100644 index 0000000000..5b84555925 --- /dev/null +++ b/tests/kernels/test_unary_other_type.json @@ -0,0 +1,4 @@ +{ + "i_shape":[[1, 3, 16, 16], [3, 16, 16], [3, 16, 1], [16, 16], [16, 1], [1, 16, 1], [16], [1], []], + "other_type": ["dt_float32", "dt_float16"] +} \ No newline at end of file diff --git a/tests/kernels/test_unary_other_type1.json b/tests/kernels/test_unary_other_type1.json new file mode 100644 index 0000000000..ce8779a683 --- /dev/null +++ b/tests/kernels/test_unary_other_type1.json @@ -0,0 +1,4 @@ +{ + "i_shape":[[1, 3, 16, 16], [3, 16, 16], [3, 16, 1], [16, 16], [16, 1], [1, 16, 1], [16], [1], []], + "other_type": ["dt_float32", "dt_float16", "dt_float64"] +} \ No newline at end of file diff --git a/tests/kernels/test_unary_other_type2.json b/tests/kernels/test_unary_other_type2.json new file mode 100644 index 0000000000..fa8b0fed1b --- /dev/null +++ b/tests/kernels/test_unary_other_type2.json @@ -0,0 +1,4 @@ +{ + "i_shape":[[1, 3, 16, 16], [3, 16, 16], [3, 16, 1], [16, 16], [16, 1], [1, 16, 1], [16], [1], []], + "other_type": ["dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_unary_round.cpp b/tests/kernels/test_unary_round.cpp index f205a132cd..b5ab62aa01 100644 --- a/tests/kernels/test_unary_round.cpp +++ b/tests/kernels/test_unary_round.cpp @@ -26,34 +26,33 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type1" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); -TEST_P(UnaryTest, roound) { +TEST_P(UnaryTest, round) { OrtKITensor *orts[1]; orts[0] = runtime_tensor_2_ort_tensor(input); @@ -78,6 +77,7 @@ TEST_P(UnaryTest, roound) { cosine_similarity_tensor(expected, actual); if (!result) { + print_runtime_tensor(input); std::cout << "actual "; print_runtime_tensor(actual); std::cout << "expected "; @@ -89,6 +89,15 @@ TEST_P(UnaryTest, roound) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_rsqrt.cpp b/tests/kernels/test_unary_rsqrt.cpp index b50be7902e..aa7fdd7d59 100644 --- a/tests/kernels/test_unary_rsqrt.cpp +++ b/tests/kernels/test_unary_rsqrt.cpp @@ -26,20 +26,24 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } void init_tensor(runtime_tensor &tensor) override { auto dtype = tensor.datatype(); @@ -158,6 +162,18 @@ class UnaryTest }); break; } + case dt_float16: { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(1.0f, 2.0f); + NNCASE_UNUSED auto res = kernels::stackvm::apply( + tensor.shape(), + [&](gsl::span index) -> result { + get(tensor, index) = static_cast(dis(gen)); + return ok(); + }); + break; + } case dt_float64: { std::random_device rd; std::mt19937 gen(rd()); @@ -179,25 +195,31 @@ class UnaryTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, rsqrt) { OrtKITensor *orts[1]; orts[0] = runtime_tensor_2_ort_tensor(input); // expected - float one_array[] = {1}; - auto one = hrt::create(input.datatype(), {1}, - {reinterpret_cast(one_array), - sizeof(one_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); + runtime_tensor one; + if (input.datatype() == dt_float16) { + half one_array[] = {(half)1}; + one = hrt::create( + input.datatype(), {1}, + {reinterpret_cast(one_array), sizeof(one_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } else { + float one_array[] = {1}; + one = hrt::create( + input.datatype(), {1}, + {reinterpret_cast(one_array), sizeof(one_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + } + auto output_ort = ortki_Sqrt(ortki_Div(runtime_tensor_2_ort_tensor(one), orts[0])); size_t size = 0; @@ -220,6 +242,7 @@ TEST_P(UnaryTest, rsqrt) { cosine_similarity_tensor(expected, actual); if (!result) { + print_runtime_tensor(input); std::cout << "actual "; print_runtime_tensor(actual); std::cout << "expected "; @@ -231,6 +254,15 @@ TEST_P(UnaryTest, rsqrt) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_sign.cpp b/tests/kernels/test_unary_sign.cpp index 6ffae85e7d..100178a35d 100644 --- a/tests/kernels/test_unary_sign.cpp +++ b/tests/kernels/test_unary_sign.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, sign) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, sign) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_sin.cpp b/tests/kernels/test_unary_sin.cpp index 67ccca4ede..61cf2d165c 100644 --- a/tests/kernels/test_unary_sin.cpp +++ b/tests/kernels/test_unary_sin.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type1" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, sin) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, sin) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_sinh.cpp b/tests/kernels/test_unary_sinh.cpp index f149e3448b..a7c3c1b6d5 100644 --- a/tests/kernels/test_unary_sinh.cpp +++ b/tests/kernels/test_unary_sinh.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, sin) { OrtKITensor *orts[1]; @@ -89,6 +88,15 @@ TEST_P(UnaryTest, sin) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_sqrt.cpp b/tests/kernels/test_unary_sqrt.cpp index 4ca5dde243..d0f19a3e23 100644 --- a/tests/kernels/test_unary_sqrt.cpp +++ b/tests/kernels/test_unary_sqrt.cpp @@ -26,21 +26,23 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} - void init_tensor(runtime_tensor &tensor) override { auto dtype = tensor.datatype(); switch (dtype) { @@ -175,17 +177,14 @@ class UnaryTest } } + void TearDown() override { CLEAR_SUBCASE() } + protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, sqrt) { OrtKITensor *orts[1]; @@ -224,6 +223,15 @@ TEST_P(UnaryTest, sqrt) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_square.cpp b/tests/kernels/test_unary_square.cpp index a6004859e5..30d3813b93 100644 --- a/tests/kernels/test_unary_square.cpp +++ b/tests/kernels/test_unary_square.cpp @@ -26,20 +26,24 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } void init_tensor(runtime_tensor &tensor) override { auto dtype = tensor.datatype(); @@ -179,13 +183,8 @@ class UnaryTest runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, sqrt) { OrtKITensor *orts[1]; @@ -225,6 +224,15 @@ TEST_P(UnaryTest, sqrt) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unary_tanh.cpp b/tests/kernels/test_unary_tanh.cpp index ac99524d24..10c8f1d67b 100644 --- a/tests/kernels/test_unary_tanh.cpp +++ b/tests/kernels/test_unary_tanh.cpp @@ -26,32 +26,31 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnaryTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unary_other_type1" + +class UnaryTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, i_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("other_type"); + auto l_shape = GetShapeArray("i_shape"); input = - hrt::create(typecode, i_shape, host_runtime_tensor::pool_cpu_only) + hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; }; -INSTANTIATE_TEST_SUITE_P( - Unary, UnaryTest, - testing::Combine( - testing::Values(dt_float32 /*, dt_int32, dt_int64, dt_float64*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{3, 16, 1}, dims_t{16, 16}, dims_t{16, 1}, - dims_t{1, 16, 1}, dims_t{16}, dims_t{1}, dims_t{}))); +INSTANTIATE_TEST_SUITE_P(Unary, UnaryTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UnaryTest, tanh) { OrtKITensor *orts[1]; @@ -90,6 +89,15 @@ TEST_P(UnaryTest, tanh) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(other_type, i) + FOR_LOOP(i_shape, j) + SPLIT_ELEMENT(other_type, i) + SPLIT_ELEMENT(i_shape, j) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_uniform.cpp b/tests/kernels/test_uniform.cpp index 16ef311a47..a8c566b7c6 100644 --- a/tests/kernels/test_uniform.cpp +++ b/tests/kernels/test_uniform.cpp @@ -26,28 +26,40 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UniformTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_uniform" + +class UniformTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); - float_t high_array[] = {1.0f}; - high = hrt::create(dt_float32, {1}, + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto l_shape = GetAxesArray("l_shape"); + auto shape = GetShapeArray("shape"); + auto value1 = GetFloatNumber("value1"); + auto value2 = GetFloatNumber("value2"); + auto value3 = GetFloatNumber("value3"); + + high_value = value1; + float high_array[] = {high_value}; + high = hrt::create(typecode, shape, {reinterpret_cast(high_array), sizeof(high_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t low_array[] = {0.0f}; + low_value = value2; + float low_array[] = {low_value}; low = hrt::create( - dt_float32, {1}, + typecode, shape, {reinterpret_cast(low_array), sizeof(low_array)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - float_t seed_array[] = {1.0f}; - seed = hrt::create(dt_float32, {1}, + seed_value = value3; + float seed_array[] = {seed_value}; + seed = hrt::create(typecode, shape, {reinterpret_cast(seed_array), sizeof(seed_array)}, true, host_runtime_tensor::pool_cpu_only) @@ -56,30 +68,28 @@ class UniformTest shape_array = l_shape; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor high; runtime_tensor low; runtime_tensor seed; axes_t shape_array; + float high_value; + float low_value; + float seed_value; }; -INSTANTIATE_TEST_SUITE_P( - Uniform, UniformTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int16, dt_float64, - dt_int8, dt_uint8, dt_uint16, dt_uint32, - dt_uint64, dt_int64, dt_float16, - dt_boolean), - testing::Values(axes_t{1, 3, 16, 16}))); +INSTANTIATE_TEST_SUITE_P(Uniform, UniformTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UniformTest, Uniform) { // expected std::vector vec(shape_array.begin(), shape_array.end()); int64_t shape_u_array[4]; std::copy(vec.begin(), vec.end(), shape_u_array); - auto output_ort = - ortki_RandomUniform(1, 1.0f, 0.0f, 1.0f, shape_u_array, 4); + auto output_ort = ortki_RandomUniform(1, high_value, low_value, seed_value, + shape_u_array, 4); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -115,6 +125,27 @@ TEST_P(UniformTest, Uniform) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(shape, j) + FOR_LOOP(l_shape, k) + FOR_LOOP(value1, l) + FOR_LOOP(value2, m) + FOR_LOOP(value3, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(shape, j) + SPLIT_ELEMENT(l_shape, k) + SPLIT_ELEMENT(value1, l) + SPLIT_ELEMENT(value2, m) + SPLIT_ELEMENT(value3, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_uniform.json b/tests/kernels/test_uniform.json new file mode 100644 index 0000000000..5938d01054 --- /dev/null +++ b/tests/kernels/test_uniform.json @@ -0,0 +1,8 @@ +{ + "lhs_type":["dt_float32"], + "shape":[[1]], + "l_shape":[[1, 3, 16, 16]], + "value1":[1.0, 2.0, 3.0, 4.0, 5.0], + "value2":[0.0, 0.5, 0.8, 1.0], + "value3":[1.0, 2.0] +} \ No newline at end of file diff --git a/tests/kernels/test_uniform_like.cpp b/tests/kernels/test_uniform_like.cpp index e643e8c969..46c129e571 100644 --- a/tests/kernels/test_uniform_like.cpp +++ b/tests/kernels/test_uniform_like.cpp @@ -26,44 +26,71 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; +#define TEST_CASE_NAME "test_uniform_like" + class UniformLikeTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape, r_shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto shape = GetShapeArray("shape"); + auto l_shape = GetShapeArray("l_shape"); + auto value1 = GetFloatNumber("value1"); + auto value2 = GetFloatNumber("value2"); + auto value3 = GetFloatNumber("value3"); lhs = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(lhs); - rhs = hrt::create(typecode, r_shape, host_runtime_tensor::pool_cpu_only) + high_value = value1; + float high_array[] = {high_value}; + high = hrt::create(typecode, shape, + {reinterpret_cast(high_array), + sizeof(high_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); + + low_value = value2; + float low_array[] = {low_value}; + low = hrt::create( + typecode, shape, + {reinterpret_cast(low_array), sizeof(low_array)}, + true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); - init_tensor(rhs); + + seed_value = value3; + float seed_array[] = {seed_value}; + seed = hrt::create(typecode, shape, + {reinterpret_cast(seed_array), + sizeof(seed_array)}, + true, host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor lhs; - runtime_tensor rhs; + runtime_tensor high; + runtime_tensor low; + runtime_tensor seed; + float high_value; + float low_value; + float seed_value; }; -INSTANTIATE_TEST_SUITE_P( - UniformLike, UniformLikeTest, - testing::Combine(testing::Values(dt_float32, dt_int32/*, dt_int16, dt_float64, - dt_int8, dt_uint8, dt_uint16, dt_uint32, - dt_uint64, dt_int64, dt_float16*/), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{16, 16}, dims_t{16}, dims_t{1}), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{3, 16, 16}, - dims_t{16, 16}, dims_t{16}, dims_t{1}))); +INSTANTIATE_TEST_SUITE_P(UniformLike, UniformLikeTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(UniformLikeTest, UniformLike) { auto l_ort = runtime_tensor_2_ort_tensor(lhs); // expected - auto output_ort = ortki_RandomUniformLike(l_ort, 1, 1.0f, 0.0f, 1.0f); + auto output_ort = + ortki_RandomUniformLike(l_ort, 1, high_value, low_value, seed_value); size_t size = 0; void *ptr_ort = tensor_buffer(output_ort, &size); dims_t shape(tensor_rank(output_ort)); @@ -74,26 +101,8 @@ TEST_P(UniformLikeTest, UniformLike) { .expect("create tensor failed"); // actual - float_t high_array[] = {1.0f}; - auto high = hrt::create(dt_float32, {1}, - {reinterpret_cast(high_array), - sizeof(high_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - float_t low_array[] = {0.0f}; - auto low = hrt::create(dt_float32, {1}, - {reinterpret_cast(low_array), - sizeof(low_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - float_t seed_array[] = {1.0f}; - auto seed = hrt::create(dt_float32, {1}, - {reinterpret_cast(seed_array), - sizeof(seed_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); auto output = - kernels::stackvm::uniform_like(dt_float32, lhs.impl(), high.impl(), + kernels::stackvm::uniform_like(lhs.datatype(), lhs.impl(), high.impl(), low.impl(), seed.impl()) .expect("uniform_like failed"); runtime_tensor actual(output.as().expect("as tensor failed")); @@ -113,6 +122,27 @@ TEST_P(UniformLikeTest, UniformLike) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, i) + FOR_LOOP(l_shape, k) + FOR_LOOP(shape, j) + FOR_LOOP(value1, l) + FOR_LOOP(value2, m) + FOR_LOOP(value3, n) + SPLIT_ELEMENT(lhs_type, i) + SPLIT_ELEMENT(l_shape, k) + SPLIT_ELEMENT(shape, j) + SPLIT_ELEMENT(value1, l) + SPLIT_ELEMENT(value2, m) + SPLIT_ELEMENT(value3, n) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_uniform_like.json b/tests/kernels/test_uniform_like.json new file mode 100644 index 0000000000..9f02ffa84c --- /dev/null +++ b/tests/kernels/test_uniform_like.json @@ -0,0 +1,8 @@ +{ + "lhs_type":["dt_float32"], + "l_shape":[[1, 3, 16, 16], [3, 16, 16], [16, 16], [16], [1]], + "shape":[[1]], + "value1":[1.0, 2.0, 3.0, 4.0, 5.0], + "value2":[0.0, 0.5, 0.8, 1.0], + "value3":[1.0, 2.0] +} \ No newline at end of file diff --git a/tests/kernels/test_unsqueeze.cpp b/tests/kernels/test_unsqueeze.cpp index e0d0d3c7d5..5f0523a75a 100644 --- a/tests/kernels/test_unsqueeze.cpp +++ b/tests/kernels/test_unsqueeze.cpp @@ -26,74 +26,108 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; -class UnsqueezeTest - : public KernelTest, - public ::testing::TestWithParam> { +#define TEST_CASE_NAME "test_unsqueeze" + +class UnsqueezeTest : public KernelTest, + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, l_shape] = GetParam(); + READY_SUBCASE() + + auto axis_value = GetAxesArray("axis_array"); + auto l_shape = GetShapeArray("lhs_shape"); + auto typecode = GetDataType("lhs_type"); input = hrt::create(typecode, l_shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); init_tensor(input); + + axis_array = axis_value; } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor input; + axes_t axis_array; }; -INSTANTIATE_TEST_SUITE_P( - Unsqueeze, UnsqueezeTest, - testing::Combine(testing::Values(dt_float32, dt_int32, dt_int16, dt_float64, - dt_int8, dt_uint8, dt_uint16, dt_uint32, - dt_uint64, dt_int64, dt_bfloat16, - dt_float16, dt_boolean), - testing::Values(dims_t{1, 3, 16, 16}, dims_t{1, 3}, - dims_t{1}, dims_t{1, 3, 16}))); +INSTANTIATE_TEST_SUITE_P(Unsqueeze, UnsqueezeTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); + +// testing::Combine( +// testing::Values(dt_float32, dt_int32, dt_int16, dt_float64, dt_int8, +// dt_uint8, dt_uint16, dt_uint32, dt_uint64, dt_int64, +// dt_bfloat16, dt_float16, dt_boolean), +// testing::Values(dims_t{24, 24}, dims_t{3, 24, 24}, dims_t{24, 24}, +// dims_t{24}, dims_t{1, 3, 16}), +// testing::Values(axes_t{0}, axes_t{-1}, axes_t{-2}, axes_t{-3}, +// axes_t{1}, axes_t{2}, axes_t{3}, axes_t{0, 1}, +// axes_t{0, 2}, axes_t{1, -1}, +// /*axes_t{-2, -1}, +// axes_t{2, 1},*/ +// axes_t{-4}))); TEST_P(UnsqueezeTest, Unsqueeze) { auto l_ort = runtime_tensor_2_ort_tensor(input); - // expected - size_t size = 0; - int64_t axes_array[] = {0, 2}; - auto axes = hrt::create(dt_int64, {2}, - {reinterpret_cast(axes_array), - sizeof(axes_array)}, - true, host_runtime_tensor::pool_cpu_only) - .expect("create tensor failed"); - auto output_ort = ortki_Unsqueeze(l_ort, runtime_tensor_2_ort_tensor(axes)); - void *ptr_ort = tensor_buffer(output_ort, &size); - dims_t shape(tensor_rank(output_ort)); - tensor_shape(output_ort, reinterpret_cast(shape.data())); - auto expected = hrt::create(input.datatype(), shape, - {reinterpret_cast(ptr_ort), size}, + if (axis_array.size() + input.shape().size() == 4) { + // expected + size_t axis_size = axis_array.size(); + int64_t *axis_array1 = (int64_t *)malloc(axis_size * sizeof(int64_t)); + std::copy(axis_array.begin(), axis_array.end(), axis_array1); + auto axes = hrt::create(dt_int64, {axis_size}, + {reinterpret_cast(axis_array1), + axis_size * sizeof(int64_t)}, true, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); + auto output_ort = + ortki_Unsqueeze(l_ort, runtime_tensor_2_ort_tensor(axes)); - // actual - auto output = kernels::stackvm::unsqueeze(input.impl(), axes.impl()) - .expect("unsqueeze failed"); - runtime_tensor actual(output.as().expect("as tensor failed")); + size_t size = 0; + void *ptr_ort = tensor_buffer(output_ort, &size); + dims_t shape(tensor_rank(output_ort)); + tensor_shape(output_ort, reinterpret_cast(shape.data())); + auto expected = + hrt::create(input.datatype(), shape, + {reinterpret_cast(ptr_ort), size}, true, + host_runtime_tensor::pool_cpu_only) + .expect("create tensor failed"); - bool result = is_same_tensor(expected, actual) || - cosine_similarity_tensor(expected, actual); + // actual + auto output = kernels::stackvm::unsqueeze(input.impl(), axes.impl()) + .expect("unsqueeze failed"); + runtime_tensor actual(output.as().expect("as tensor failed")); - if (!result) { - std::cout << "actual "; - print_runtime_tensor(actual); - std::cout << "expected "; - print_runtime_tensor(expected); - } + bool result = is_same_tensor(expected, actual) || + cosine_similarity_tensor(expected, actual); + + if (!result) { + std::cout << "actual "; + print_runtime_tensor(actual); + std::cout << "expected "; + print_runtime_tensor(expected); + } - // compare - EXPECT_TRUE(result); + // compare + EXPECT_TRUE(result); + } } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(axis_array, j) + FOR_LOOP(lhs_shape, i) + FOR_LOOP(lhs_type, k) + SPLIT_ELEMENT(axis_array, j) + SPLIT_ELEMENT(lhs_shape, i) + SPLIT_ELEMENT(lhs_type, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_unsqueeze.json b/tests/kernels/test_unsqueeze.json new file mode 100644 index 0000000000..184a324bdd --- /dev/null +++ b/tests/kernels/test_unsqueeze.json @@ -0,0 +1,5 @@ +{ + "axis_array":[[0], [-1], [-2], [-3], [1], [2], [3], [0, 1], [0, 2], [1, -1], [-4]], + "lhs_shape":[[1, 12], [3, 24, 24], [24], [1, 3, 16]], + "lhs_type":["dt_float32", "dt_int32", "dt_int16", "dt_float64", "dt_int8", "dt_uint8", "dt_uint16", "dt_uint32", "dt_uint64", "dt_int64", "dt_bfloat16", "dt_float16", "dt_boolean"] +} \ No newline at end of file diff --git a/tests/kernels/test_where.cpp b/tests/kernels/test_where.cpp index b0af2a1832..915b77ab1f 100644 --- a/tests/kernels/test_where.cpp +++ b/tests/kernels/test_where.cpp @@ -26,12 +26,17 @@ using namespace nncase; using namespace nncase::runtime; using namespace ortki; +#define TEST_CASE_NAME "test_where" + class WhereTest : public KernelTest, - public ::testing::TestWithParam< - std::tuple> { + public ::testing::TestWithParam> { public: void SetUp() override { - auto &&[typecode, typecode_bool, shape] = GetParam(); + READY_SUBCASE() + + auto typecode = GetDataType("lhs_type"); + auto typecode_bool = GetDataType("rhs_type"); + auto shape = GetShapeArray("i_shape"); lhs = hrt::create(typecode, shape, host_runtime_tensor::pool_cpu_only) .expect("create tensor failed"); @@ -47,7 +52,7 @@ class WhereTest : public KernelTest, init_tensor(con); } - void TearDown() override {} + void TearDown() override { CLEAR_SUBCASE() } protected: runtime_tensor lhs; @@ -55,14 +60,8 @@ class WhereTest : public KernelTest, runtime_tensor con; }; -INSTANTIATE_TEST_SUITE_P( - Where, WhereTest, - testing::Combine( - testing::Values(dt_float32, dt_int32, dt_float64, dt_uint8, dt_int64), - testing::Values(dt_boolean), - testing::Values(dims_t{4}, dims_t{1}, dims_t{1, 3}, dims_t{1, 3, 16}, - dims_t{1, 3, 16, 16}, dims_t{1, 16, 16}, dims_t{16, 16}, - dims_t{3, 16, 16}, dims_t{1, 16}))); +INSTANTIATE_TEST_SUITE_P(Where, WhereTest, + testing::Combine(testing::Range(0, MAX_CASE_NUM))); TEST_P(WhereTest, Where) { auto l_ort = runtime_tensor_2_ort_tensor(lhs); @@ -101,6 +100,18 @@ TEST_P(WhereTest, Where) { } int main(int argc, char *argv[]) { + READY_TEST_CASE_GENERATE() + FOR_LOOP(lhs_type, j) + FOR_LOOP(rhs_type, i) + FOR_LOOP(i_shape, k) + SPLIT_ELEMENT(lhs_type, j) + SPLIT_ELEMENT(rhs_type, i) + SPLIT_ELEMENT(i_shape, k) + WRITE_SUB_CASE() + FOR_LOOP_END() + FOR_LOOP_END() + FOR_LOOP_END() + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tests/kernels/test_where.json b/tests/kernels/test_where.json new file mode 100644 index 0000000000..451a44fae8 --- /dev/null +++ b/tests/kernels/test_where.json @@ -0,0 +1,5 @@ +{ + "lhs_type":["dt_float32", "dt_int32", "dt_float64", "dt_uint8", "dt_int64"], + "rhs_type":["dt_boolean"], + "i_shape":[[1, 3, 16, 16], [3, 16, 16], [3, 16, 1], [16, 16], [16, 1], [1, 16, 1], [16], [1], [4], [1, 3], [1, 3, 16]] +} \ No newline at end of file diff --git a/tests/nuc_proxy.py b/tests/nuc_proxy.py index ca5419a122..f2ffdac1fa 100644 --- a/tests/nuc_proxy.py +++ b/tests/nuc_proxy.py @@ -7,226 +7,250 @@ import queue import logging import logging.handlers -import telnetlib -import time import serial +import shutil +import time import toml -class TelnetClient(): - def __init__(self, mylogger): - self.tn = telnetlib.Telnet() - self.logger = mylogger - self.ip = '10.99.105.216' - self.timeout = 60 - - def login(self, ip, username, password): - try: - self.tn.open(ip, port=23) - except: - self.logger.error('telnet {0} failed'.format(ip)) - return False - - self.ip = ip - self.tn.read_until(b'login: ', timeout=self.timeout) - self.tn.write(username.encode() + b'\r\n') - - cmd_result = self.tn.read_very_eager().decode() - if 'Login incorrect' not in cmd_result: - self.logger.info('{0} login succeed'.format(ip)) - return True - else: - self.logger.error('{0} login failed'.format(ip)) - return False - - def logout(self): - self.tn.close() - self.logger.info('{0} logout succeed'.format(self.ip)) - - def execute(self, cmd, flag): - self.logger.debug('execute: cmd = {0}, flag = {1}'.format(cmd, flag)) - self.tn.write(cmd.encode() + b'\r\n') - cmd_result = self.tn.read_until(flag.encode(), timeout=self.timeout).decode() - if flag not in cmd_result: - # time out - self.tn.write(telnetlib.IP) - cmd_result = f'timeout for {self.timeout} seconds' - self.logger.error('execute {0} failed: {1}'.format(cmd, cmd_result)) - return cmd_result, False - else: - self.tn.write('echo $?'.encode() + b'\r\n') - cmd_status = self.tn.read_until(flag.encode(), self.timeout).decode() - if cmd_status.find('\r\n0\r\n') == -1: - self.logger.error('execute {0} failed: {1}'.format(cmd, cmd_result)) - return cmd_result, False - else: - return cmd_result, True - - -def recv_file(conn, target_root, mylogger): - conn.sendall(f"pls send file info".encode()) - header = conn.recv(1024) - file_dict = json.loads(header.decode()) - file_name = file_dict['file_name'] - file_size = file_dict['file_size'] - mylogger.debug('recv: file = {0}, size = {1}'.format(file_name, file_size)) - conn.sendall(f"pls send {file_name}".encode()) - - full_file = os.path.join(target_root, file_name) - with open(full_file, 'wb') as f: - recv_size = 0 - while recv_size < file_size: - slice = conn.recv(4096) - f.write(slice) - recv_size += len(slice) - - # conn.sendall(f"recv {file_name} succeed".encode()) - os.chmod(full_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) - return file_name - - class MySerial: - def __init__(self, target, port, baudrate): + def __init__(self, port, baudrate, logger): self.s = None - self.target = target self.port = port self.baudrate = baudrate + self.logger = logger self.timeout = 60 - # self.log_file = '{0}_uart.log'.format(target) def open(self): + self.logger.debug(f'open {self.port} begin') self.s = serial.Serial(port=self.port, baudrate=self.baudrate, timeout=self.timeout) - # if (self.s.isOpen()): - # print('open {0} succeed'.format(self.port)) - # else: - # print('open {0} failed'.format(self.port)) + if (self.s.isOpen()): + self.logger.debug(f'open {self.port} succeed end') + else: + self.logger.debug(f'open {self.port} failed end') def close(self): + self.logger.debug(f'close {self.port} begin') self.s.close() + self.logger.debug(f'close {self.port} end') def write(self, cmd): + self.logger.debug(f'write {cmd} begin') + cmd = cmd + '\r' self.s.write(cmd.encode()) - - # def readall(self): - # f = open(self.log_file, 'a') - # while True: - # # line = self.s.readline().decode() - # #print('readline from serial {0}: {1}'.format(self.port, line)) - # print('{0}'.format(line), end='') - # f.write(line) - # if '}\n' in line: - # break - # f.close() + self.s.flush() + self.logger.debug('write end') def read_until(self, expected): - data = self.s.read_until(expected).decode() - # print('read: {0}'.format(data), end='') + self.logger.debug('read begin') + data = self.s.read_until(expected.encode()).decode() + self.logger.debug('read end: data = {0}'.format(data)) return data + def run_cmd(self, cmd, expected=''): + data = '' + self.open() + self.write(cmd) + if expected != '': + data = self.read_until(expected) -def run_cmds(s, cmds): - s.open() + self.close() + return data + + +class Target: + def __init__(self, name, cfg, nfs, clear_queue): + self.name = name + self.infer_queue = queue.Queue(maxsize=clear_queue.maxsize) + self.clear_queue = clear_queue + self.working_dir = cfg['working_dir'] + self.separator = cfg['separator'] + + # nfs_dir + self.nfs_dir = os.path.join(nfs, name) + if not os.path.exists(self.nfs_dir): + os.makedirs(self.nfs_dir) + + # logging + mylogger = logging.getLogger() + mylogger.setLevel(logging.INFO) + rf_handler = logging.handlers.RotatingFileHandler( + f'nuc_proxy_{name}.log', mode='a', maxBytes=32 * 1024 * 1024, backupCount=10) + rf_handler.setLevel(logging.INFO) + rf_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')) + mylogger.addHandler(rf_handler) + self.logger = mylogger + + # serial + self.s0 = MySerial(cfg['uart0'], cfg['baudrate0'], self.logger) + self.s1 = MySerial(cfg['uart1'], cfg['baudrate1'], self.logger) + + +def recv_file(conn, case_dir, logger): + conn.sendall(f"pls send file info".encode()) + header = conn.recv(1024) + file_dict = json.loads(header.decode()) + file_name = file_dict['file_name'] + file_size = file_dict['file_size'] + logger.debug('recv begin: file = {0}, size = {1}'.format(file_name, file_size)) + conn.sendall(f"pls send {file_name}".encode()) - for cmd in cmds.split('&&'): - s.write(cmd + '\r') + full_file = os.path.join(case_dir, file_name) + with open(full_file, 'wb') as f: + recv_size = 0 + while recv_size < file_size: + slice = conn.recv(4096) + f.write(slice) + recv_size += len(slice) - data = s.read_until(b'}') - s.close() + os.chmod(full_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + logger.debug('recv end') + return file_name - return data +def recv_worker(conn, target): + # recv header + conn.sendall(f"pls send header".encode()) + header = conn.recv(1024) + header_dict = json.loads(header.decode()) + new_case = header_dict['case'] + str(int(time.time())) + target.logger.info("test case = {0}".format(new_case)) + case_dir = os.path.join(target.nfs_dir, new_case) + os.makedirs(case_dir) + file_num = header_dict['app'] + header_dict['kmodel'] + header_dict['inputs'] + + # recv all kinds of files(app + kmodel + inputs) + cmds = f'cd {target.working_dir}/{target.name}/{new_case};./' + for i in range(file_num): + file = recv_file(conn, case_dir, target.logger) + if i == 0: + cmds = cmds + file + else: + cmds = cmds + ' ' + file -def Consumer(target, q, ip, username, password, working_dir, uart, baudrate): - # logging - mylogger = logging.getLogger() - mylogger.setLevel(logging.DEBUG) - rf_handler = logging.handlers.RotatingFileHandler( - f'nuc_proxy_{target}.log', mode='a', maxBytes=32 * 1024 * 1024, backupCount=10) - rf_handler.setLevel(logging.INFO) - rf_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')) - mylogger.addHandler(rf_handler) - telnet_client = TelnetClient(mylogger) + target.logger.debug("cmds = {0}".format(cmds)) + target.infer_queue.put((cmds, conn, case_dir, header_dict['outputs'])) - # serial - s = MySerial(target, uart, baudrate) +def infer_worker(target): while True: - conn = q.get() + cmds, conn, case_dir, output_num = target.infer_queue.get() + separator = os.path.basename(case_dir) + target.separator + ret = '' - # recv header - conn.sendall(f"pls send cmd".encode()) - header = conn.recv(1024) - dict = json.loads(header.decode()) - cmd = dict['cmd'] - mylogger.info("cmd = {0}".format(dict['cmd'])) + # exit from face_detect after rebooting + # target.s1.run_cmd('q') + target.s1.run_cmd('') - # run cmds on device - cmds = 'cd {0}/{1} && {2}'.format(working_dir, target, cmd) - ret_str = run_cmds(s, cmds) + for cmd in cmds.split(';'): + ret = target.s1.run_cmd(cmd, separator) # infer result - if ret_str.find('terminate') != -1 or ret_str.find('Exception') != -1: - conn.sendall(f'infer exception: {ret_str}'.encode()) - elif ret_str.find('}') == -1: + dict = {'type': 'finish', 'time': 0.0, 'error': ''} + if ret.find('terminate') != -1 or ret.find('Exception') != -1: + target.logger.error('infer exception') + err = f'infer exception: {ret}' + dict['type'] = 'exception' + dict['error'] = err[0:1024] + conn.sendall(json.dumps(dict).encode()) + elif ret.find(separator) == -1: # reboot target when timeout - conn.sendall(f'infer timeout'.encode()) - mylogger.error('reboot {0}({1}) for timeout'.format(target, ip)) - telnet_client.login(ip, username, password) - flag = f'[{username}@canaan ~ ]' - telnet_client.execute('reboot', flag) - telnet_client.logout() - time.sleep(30) + target.logger.error('reboot for timeout') + dict['type'] = 'timeout' + dict['error'] = 'infer timeout' + conn.sendall(json.dumps(dict).encode()) + + # reboot after login + target.s0.run_cmd('root') + target.s0.run_cmd('') + target.s0.run_cmd('reboot') + time.sleep(20) else: - conn.sendall(f'infer succeed'.encode()) + dict['time'] = float(ret.split('\n')[1].split()[1]) + conn.sendall(json.dumps(dict).encode()) + dummy = conn.recv(1024) + + # send outputs + for i in range(output_num): + file = os.path.join(case_dir, f'nncase_result_{i}.bin') + file_size = os.path.getsize(file) + conn.sendall(str(file_size).encode()) + dummy = conn.recv(1024) + + target.logger.debug('send begin: file = {0}, size = {1}'.format(file, file_size)) + with open(file, 'rb') as f: + conn.sendall(f.read()) + target.logger.debug('send end') + dummy = conn.recv(1024) + target.logger.debug('infer finish') conn.close() + target.clear_queue.put(case_dir) + + +def clear_worker(q): + while True: + case_dir = q.get() + if os.path.exists(case_dir): + shutil.rmtree(case_dir) def main(): # default config config = ''' + ip = '10.99.105.216' + port = 10000 + nfs = '/data/nfs' [k230] - ip = '192.168.1.230' - username = 'root' - password = '' working_dir = '/sharefs' - uart = '/dev/ttyUSB1' - baudrate = 2500000 + separator = '>' + uart0 = '/dev/ttyUSB0' + baudrate0 = 115200 + uart1 = '/dev/ttyUSB1' + baudrate1 = 115200 ''' # args parser = argparse.ArgumentParser(prog="nuc_proxy") - parser.add_argument("--port", help='listening port', type=int, default=10001) - parser.add_argument("--config", help='config str or file', type=str, default=config) + parser.add_argument("--config", help='config string or file', type=str, default=config) args = parser.parse_args() size = 256 + cfg = {} + dict = {} # load config - cfg = {} if os.path.isfile(args.config): cfg = toml.load(args.config) else: cfg = toml.loads(args.config) - # create queue and thread - for k in cfg: - q = queue.Queue(maxsize=size) - t_consumer = threading.Thread(target=Consumer, args=( - k, q, cfg[k]["ip"], cfg[k]['username'], cfg[k]['password'], cfg[k]['working_dir'], cfg[k]['uart'], cfg[k]['baudrate'])) - t_consumer.start() - cfg[k]['queue'] = q + # clear thread + clear_queue = queue.Queue(maxsize=size) + clear_thread = threading.Thread(target=clear_worker, args=(clear_queue,)) + clear_thread.start() - # server socket + # start server server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server_socket.bind(('localhost', args.port)) + server_socket.bind((cfg['ip'], cfg['port'])) server_socket.listen(size) while True: conn, addr = server_socket.accept() - # recv target + # recv target name conn.sendall(f"pls send your target".encode()) info = conn.recv(1024) - dict = json.loads(info.decode()) - cfg[dict['target']]['queue'].put(conn) + target_dict = json.loads(info.decode()) + target_name = target_dict['target'] + + # create target instance + if target_name not in dict: + target = Target(target_name, cfg[target_name], cfg['nfs'], clear_queue) + infer_thread = threading.Thread(target=infer_worker, args=(target,)) + infer_thread.start() + dict[target_name] = target + + # start recv thread + recv_thread = threading.Thread(target=recv_worker, args=(conn, dict[target_name])) + recv_thread.start() if __name__ == '__main__': diff --git a/tests/test_runner.py b/tests/test_runner.py index 0829dd213e..2e0b89055e 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -55,6 +55,19 @@ def __init__(self, case_name, override_cfg: str = None) -> None: # used for tag dynamic model for onnx simplify self.dynamic = False + if self.cfg['dump_infer']: + self.infer_file = test_utils.infer_file() + self.infer_dict = { + 'case': 'unknown', + 'target': 'cpu', + 'if_quant_type': 'uint8', + 'w_quant_type': 'uint8', + 'time(ms)': 'N/A', + 'fps': 'N/A', + 'result': 'Pass', + 'remark': 'N/A' + } + def transform_input(self, values: List[np.ndarray], type: str, stage: str) -> List[np.ndarray]: new_values = [] compile_opt = self.cfg['compile_opt'] @@ -250,8 +263,12 @@ def run(self, model_file: Union[List[str], str]): mode_dir = os.path.join(target_dir, k_mode) shutil.move(tmp_dir, mode_dir) judge, result = self.compare_results( - expected, actual, stage, k_target, v_target['similarity_name'], k_mode, v_mode['threshold'], self.cfg['dump_hist'], mode_dir) + expected, actual, stage, k_target, v_target['similarity_name'], k_mode, v_mode['threshold'], dump_hist, mode_dir) + if stage == 'infer' and self.cfg['dump_infer']: + self.infer_dict['result'] = 'Pass' if judge else 'Fail' + self.infer_dict['remark'] = result.replace('\n', ' ') + dump_dict_to_json(self.infer_dict, self.infer_file) if not judge: if test_utils.in_ci(): self.clear(self.case_dir) @@ -407,17 +424,19 @@ def compare_results(self, stage, target, similarity_name, mode, threshold, dump_hist, dump_dir) -> Tuple[bool, str]: i = 0 judges = [] + result = '' for expected, actual in zip(ref_ouputs, test_outputs): expected = expected.astype(np.float32) actual = actual.astype(np.float32) dump_file = os.path.join(dump_dir, 'nncase_result_{0}_hist.csv'.format(i)) judge, similarity_info = compare_ndarray( expected, actual, similarity_name, threshold, dump_hist, dump_file) - result_info = "\n{0} [ {1} {2} {3} ] Output: {4}!!\n".format( + result_info = "{0} [ {1} {2} {3} ] Output {4}:".format( 'Pass' if judge else 'Fail', stage, target, mode, i) - result = similarity_info + result_info - with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f: - f.write(result) + result += result_info + similarity_info i = i + 1 judges.append(judge) + + with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f: + f.write(result) return sum(judges) == len(judges), result diff --git a/tests/test_utils.py b/tests/test_utils.py index 474387a4e8..3cd04c54bd 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,4 +1,5 @@ import os +import json import numpy as np @@ -33,6 +34,17 @@ def _cast_bfloat16_then_float32(values: np.array): values[i] = value +def dump_dict_to_json(dict, json_file): + json_list = [] + if os.path.exists(json_file): + with open(json_file, 'r') as f: + json_list = json.load(f) + + json_list.append(dict) + with open(json_file, 'w') as f: + json.dump(json_list, f) + + def in_ci(): return os.getenv('CI', False) @@ -41,9 +53,17 @@ def kpu_targets(): return os.getenv('KPU_TARGETS', "").split(',') -def port(): - return os.getenv('CI_PROXY_PORT') +def nuc_ip(): + return os.getenv('NUC_PROXY_IP') + + +def nuc_port(): + return os.getenv('NUC_PROXY_PORT') def test_executable(target): return os.getenv('TEST_EXECUTABLE_{0}'.format(target.upper())) + + +def infer_file(): + return os.getenv('INFER_FILE', 'infer_report.json') diff --git a/tests/kernels/functions.sh b/tools/TestMacroGenrator/functions.sh similarity index 100% rename from tests/kernels/functions.sh rename to tools/TestMacroGenrator/functions.sh diff --git a/tests/kernels/generate_macro.sh b/tools/TestMacroGenrator/generate_macro.sh similarity index 89% rename from tests/kernels/generate_macro.sh rename to tools/TestMacroGenrator/generate_macro.sh index 50e6a6f452..8b53c22283 100644 --- a/tests/kernels/generate_macro.sh +++ b/tools/TestMacroGenrator/generate_macro.sh @@ -1,7 +1,7 @@ . ./functions.sh . ./kernel_op_config.sh mkdir -p generated -generated_file=generated/generated_macro.h +generated_file=../../tests/kernels/generated/generated_macro.h ARGS=0 ATTR=0 GenerateHeader diff --git a/tests/kernels/kernel_op_config.sh b/tools/TestMacroGenrator/kernel_op_config.sh similarity index 100% rename from tests/kernels/kernel_op_config.sh rename to tools/TestMacroGenrator/kernel_op_config.sh