From 299a4b3962723d7d2eefa9c76c0543293cf5e0f1 Mon Sep 17 00:00:00 2001 From: Michael Poluektov Date: Wed, 16 Aug 2023 14:49:37 +0000 Subject: [PATCH 01/15] move xformer --- Makefile | 2 +- experimental/README.md | 13 ------------- third_party/lib_tflite_micro | 2 +- {experimental/xformer => xformer}/.bazelrc | 0 {experimental/xformer => xformer}/.bazelversion | 0 {experimental/xformer => xformer}/.clang-format | 0 {experimental/xformer => xformer}/.gitignore | 0 .../xformer => xformer}/Analysis/MemoryPlan.cpp | 0 .../xformer => xformer}/Analysis/MemoryPlan.h | 0 {experimental/xformer => xformer}/BUILD | 0 {experimental/xformer => xformer}/IR/XCoreOps.cpp | 0 {experimental/xformer => xformer}/IR/XCoreOps.h | 0 {experimental/xformer => xformer}/IR/XCoreOps.td | 0 {experimental/xformer => xformer}/README.md | 0 {experimental => xformer}/TESTING.rst | 0 {experimental/xformer => xformer}/Test/BUILD | 0 .../Test/activation-lowering.mlir | 0 .../xformer => xformer}/Test/avg_pool_2d.mlir | 0 {experimental/xformer => xformer}/Test/conv2d.mlir | 0 .../xformer => xformer}/Test/convpatterns.mlir | 0 .../xformer => xformer}/Test/fc-to-conv2d.mlir | 0 .../Test/invalid-loadconstantop.mlir | 0 .../Test/invalid-loadflashop.mlir | 0 .../xformer => xformer}/Test/invalid-op_split.mlir | 0 .../xformer => xformer}/Test/lit_test.bzl | 0 .../xformer => xformer}/Test/loadconstantop.mlir | 0 .../xformer => xformer}/Test/loadflashop.mlir | 0 .../xformer => xformer}/Test/mlir_example.tflite | Bin .../xformer => xformer}/Test/op_split.mlir | 0 .../xformer => xformer}/Test/padding.mlir | 0 .../Test/pytorch_transpose.mlir | 0 {experimental/xformer => xformer}/Test/run_lit.sh | 0 .../xformer => xformer}/Test/tfl_fc.tflite | Bin .../xformer => xformer}/Test/tflpatterns.mlir | 0 .../Test/translate-to-customops.mlir | 0 .../Test/valid_strided_slice.mlir | 0 .../Transforms/ApplyLoadConstantOpPatterns.cpp | 0 .../Transforms/ApplyTFLPatterns.cpp | 0 .../Transforms/ApplyXCPatterns.cpp | 0 .../xformer => xformer}/Transforms/ConvPatterns.h | 0 .../xformer => xformer}/Transforms/ConvPatterns.td | 0 .../Transforms/ConvPatternsLCE.cpp | 0 .../Transforms/ConvPatternsTFL.cpp | 0 .../Transforms/ConvRevertPatterns.td | 0 .../Transforms/LoadConstantOpPatterns.td | 0 .../xformer => xformer}/Transforms/OpSplit.cpp | 0 .../Transforms/OptimizeConv2D.cpp | 0 .../Transforms/OptimizeTranspose.cpp | 0 .../xformer => xformer}/Transforms/Options.h | 0 .../xformer => xformer}/Transforms/Passes.cpp | 0 .../xformer => xformer}/Transforms/Passes.h | 0 .../xformer => xformer}/Transforms/ReplaceAdd.cpp | 0 .../Transforms/ReplaceAvgPoolWithConv2D.cpp | 0 .../Transforms/ReplaceConv2D.cpp | 0 .../Transforms/ReplaceFCWithConv2D.cpp | 0 .../xformer => xformer}/Transforms/ReplaceMul.cpp | 0 .../Transforms/ReplaceStridedSlice.cpp | 0 .../xformer => xformer}/Transforms/TFLPatterns.td | 0 .../Transforms/TranslateToCustomOp.cpp | 0 .../Transforms/WriteFlashImage.cpp | 0 .../xformer => xformer}/Transforms/XCPatterns.td | 0 .../xformer => xformer}/Utils/Diagnostics.h | 0 {experimental/xformer => xformer}/Utils/FileIO.cpp | 0 {experimental/xformer => xformer}/Utils/FileIO.h | 0 .../xformer => xformer}/Utils/ThreadSupport.cpp | 0 .../xformer => xformer}/Utils/ThreadSupport.h | 0 {experimental/xformer => xformer}/Utils/Util.cpp | 0 {experimental/xformer => xformer}/Utils/Util.h | 0 {experimental/xformer => xformer}/Utils/Utils.td | 0 {experimental/xformer => xformer}/Version.h | 0 {experimental/xformer => xformer}/WORKSPACE | 0 {experimental/xformer => xformer}/XCoreOptMain.cpp | 0 xformer/external | 1 + {experimental/xformer => xformer}/lib_nn.BUILD | 0 .../xformer => xformer}/lib_tflite_micro.BUILD | 0 {experimental/xformer => xformer}/lib_tflmc.BUILD | 0 .../xformer => xformer}/model_maker/save_model.py | 0 .../train_samples/COCO_train2014_000000002377.jpg | Bin .../train_samples/COCO_train2014_000000003157.jpg | Bin .../train_samples/COCO_train2014_000000026132.jpg | Bin .../train_samples/COCO_train2014_000000026924.jpg | Bin .../train_samples/COCO_train2014_000000027622.jpg | Bin .../train_samples/COCO_train2014_000000049478.jpg | Bin .../train_samples/COCO_train2014_000000050562.jpg | Bin .../train_samples/COCO_train2014_000000066822.jpg | Bin .../train_samples/COCO_train2014_000000068502.jpg | Bin .../train_samples/COCO_train2014_000000070322.jpg | Bin .../train_samples/COCO_train2014_000000070659.jpg | Bin .../train_samples/COCO_train2014_000000083002.jpg | Bin .../train_samples/COCO_train2014_000000085589.jpg | Bin .../train_samples/COCO_train2014_000000090498.jpg | Bin .../train_samples/COCO_train2014_000000098956.jpg | Bin .../train_samples/COCO_train2014_000000100703.jpg | Bin .../train_samples/COCO_train2014_000000105708.jpg | Bin .../train_samples/COCO_train2014_000000106003.jpg | Bin .../train_samples/COCO_train2014_000000115218.jpg | Bin .../train_samples/COCO_train2014_000000116003.jpg | Bin .../train_samples/COCO_train2014_000000118104.jpg | Bin .../train_samples/COCO_train2014_000000123579.jpg | Bin .../train_samples/COCO_train2014_000000125586.jpg | Bin .../train_samples/COCO_train2014_000000126936.jpg | Bin .../train_samples/COCO_train2014_000000131099.jpg | Bin .../train_samples/COCO_train2014_000000143944.jpg | Bin .../train_samples/COCO_train2014_000000151669.jpg | Bin .../train_samples/COCO_train2014_000000157016.jpg | Bin .../train_samples/COCO_train2014_000000158107.jpg | Bin .../train_samples/COCO_train2014_000000180609.jpg | Bin .../train_samples/COCO_train2014_000000190617.jpg | Bin .../train_samples/COCO_train2014_000000192322.jpg | Bin .../train_samples/COCO_train2014_000000195233.jpg | Bin .../train_samples/COCO_train2014_000000205297.jpg | Bin .../train_samples/COCO_train2014_000000210258.jpg | Bin .../train_samples/COCO_train2014_000000213344.jpg | Bin .../train_samples/COCO_train2014_000000217672.jpg | Bin .../train_samples/COCO_train2014_000000218057.jpg | Bin .../train_samples/COCO_train2014_000000219385.jpg | Bin .../train_samples/COCO_train2014_000000223458.jpg | Bin .../train_samples/COCO_train2014_000000229347.jpg | Bin .../train_samples/COCO_train2014_000000230708.jpg | Bin .../train_samples/COCO_train2014_000000232143.jpg | Bin .../train_samples/COCO_train2014_000000238843.jpg | Bin .../train_samples/COCO_train2014_000000243361.jpg | Bin .../train_samples/COCO_train2014_000000243527.jpg | Bin .../train_samples/COCO_train2014_000000244528.jpg | Bin .../train_samples/COCO_train2014_000000246384.jpg | Bin .../train_samples/COCO_train2014_000000247338.jpg | Bin .../train_samples/COCO_train2014_000000266880.jpg | Bin .../train_samples/COCO_train2014_000000269561.jpg | Bin .../train_samples/COCO_train2014_000000271006.jpg | Bin .../train_samples/COCO_train2014_000000279672.jpg | Bin .../train_samples/COCO_train2014_000000287427.jpg | Bin .../train_samples/COCO_train2014_000000292620.jpg | Bin .../train_samples/COCO_train2014_000000293805.jpg | Bin .../train_samples/COCO_train2014_000000297266.jpg | Bin .../train_samples/COCO_train2014_000000300028.jpg | Bin .../train_samples/COCO_train2014_000000305287.jpg | Bin .../train_samples/COCO_train2014_000000305600.jpg | Bin .../train_samples/COCO_train2014_000000307999.jpg | Bin .../train_samples/COCO_train2014_000000308302.jpg | Bin .../train_samples/COCO_train2014_000000314852.jpg | Bin .../train_samples/COCO_train2014_000000316113.jpg | Bin .../train_samples/COCO_train2014_000000330391.jpg | Bin .../train_samples/COCO_train2014_000000348670.jpg | Bin .../train_samples/COCO_train2014_000000355550.jpg | Bin .../train_samples/COCO_train2014_000000365512.jpg | Bin .../train_samples/COCO_train2014_000000372229.jpg | Bin .../train_samples/COCO_train2014_000000378396.jpg | Bin .../train_samples/COCO_train2014_000000384475.jpg | Bin .../train_samples/COCO_train2014_000000385239.jpg | Bin .../train_samples/COCO_train2014_000000387976.jpg | Bin .../train_samples/COCO_train2014_000000421619.jpg | Bin .../train_samples/COCO_train2014_000000423637.jpg | Bin .../train_samples/COCO_train2014_000000444312.jpg | Bin .../train_samples/COCO_train2014_000000448461.jpg | Bin .../train_samples/COCO_train2014_000000461549.jpg | Bin .../train_samples/COCO_train2014_000000466191.jpg | Bin .../train_samples/COCO_train2014_000000476125.jpg | Bin .../train_samples/COCO_train2014_000000490434.jpg | Bin .../train_samples/COCO_train2014_000000503274.jpg | Bin .../train_samples/COCO_train2014_000000505655.jpg | Bin .../train_samples/COCO_train2014_000000506149.jpg | Bin .../train_samples/COCO_train2014_000000508870.jpg | Bin .../train_samples/COCO_train2014_000000510211.jpg | Bin .../train_samples/COCO_train2014_000000513371.jpg | Bin .../train_samples/COCO_train2014_000000517882.jpg | Bin .../train_samples/COCO_train2014_000000521048.jpg | Bin .../train_samples/COCO_train2014_000000521327.jpg | Bin .../train_samples/COCO_train2014_000000532457.jpg | Bin .../train_samples/COCO_train2014_000000535649.jpg | Bin .../train_samples/COCO_train2014_000000537337.jpg | Bin .../train_samples/COCO_train2014_000000542931.jpg | Bin .../train_samples/COCO_train2014_000000546147.jpg | Bin .../train_samples/COCO_train2014_000000547979.jpg | Bin .../train_samples/COCO_train2014_000000549236.jpg | Bin .../train_samples/COCO_train2014_000000551869.jpg | Bin .../train_samples/COCO_train2014_000000563597.jpg | Bin .../train_samples/COCO_train2014_000000563771.jpg | Bin {experimental/xformer => xformer}/modelrunner.py | 0 {experimental/xformer => xformer}/patches/BUILD | 0 .../patches/flatbuffer_export.patch | 0 .../xformer => xformer}/patches/softmax.patch | 0 .../xformer => xformer}/patches/tf_update.patch | 0 {experimental/xformer => xformer}/toolchain/BUILD | 0 .../toolchain/cc_toolchain_config.bzl | 0 {experimental/xformer => xformer}/version_check.sh | 0 185 files changed, 3 insertions(+), 15 deletions(-) delete mode 100644 experimental/README.md rename {experimental/xformer => xformer}/.bazelrc (100%) rename {experimental/xformer => xformer}/.bazelversion (100%) rename {experimental/xformer => xformer}/.clang-format (100%) rename {experimental/xformer => xformer}/.gitignore (100%) rename {experimental/xformer => xformer}/Analysis/MemoryPlan.cpp (100%) rename {experimental/xformer => xformer}/Analysis/MemoryPlan.h (100%) rename {experimental/xformer => xformer}/BUILD (100%) rename {experimental/xformer => xformer}/IR/XCoreOps.cpp (100%) rename {experimental/xformer => xformer}/IR/XCoreOps.h (100%) rename {experimental/xformer => xformer}/IR/XCoreOps.td (100%) rename {experimental/xformer => xformer}/README.md (100%) rename {experimental => xformer}/TESTING.rst (100%) rename {experimental/xformer => xformer}/Test/BUILD (100%) rename {experimental/xformer => xformer}/Test/activation-lowering.mlir (100%) rename {experimental/xformer => xformer}/Test/avg_pool_2d.mlir (100%) rename {experimental/xformer => xformer}/Test/conv2d.mlir (100%) rename {experimental/xformer => xformer}/Test/convpatterns.mlir (100%) rename {experimental/xformer => xformer}/Test/fc-to-conv2d.mlir (100%) rename {experimental/xformer => xformer}/Test/invalid-loadconstantop.mlir (100%) rename {experimental/xformer => xformer}/Test/invalid-loadflashop.mlir (100%) rename {experimental/xformer => xformer}/Test/invalid-op_split.mlir (100%) rename {experimental/xformer => xformer}/Test/lit_test.bzl (100%) rename {experimental/xformer => xformer}/Test/loadconstantop.mlir (100%) rename {experimental/xformer => xformer}/Test/loadflashop.mlir (100%) rename {experimental/xformer => xformer}/Test/mlir_example.tflite (100%) rename {experimental/xformer => xformer}/Test/op_split.mlir (100%) rename {experimental/xformer => xformer}/Test/padding.mlir (100%) rename {experimental/xformer => xformer}/Test/pytorch_transpose.mlir (100%) rename {experimental/xformer => xformer}/Test/run_lit.sh (100%) rename {experimental/xformer => xformer}/Test/tfl_fc.tflite (100%) rename {experimental/xformer => xformer}/Test/tflpatterns.mlir (100%) rename {experimental/xformer => xformer}/Test/translate-to-customops.mlir (100%) rename {experimental/xformer => xformer}/Test/valid_strided_slice.mlir (100%) rename {experimental/xformer => xformer}/Transforms/ApplyLoadConstantOpPatterns.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ApplyTFLPatterns.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ApplyXCPatterns.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ConvPatterns.h (100%) rename {experimental/xformer => xformer}/Transforms/ConvPatterns.td (100%) rename {experimental/xformer => xformer}/Transforms/ConvPatternsLCE.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ConvPatternsTFL.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ConvRevertPatterns.td (100%) rename {experimental/xformer => xformer}/Transforms/LoadConstantOpPatterns.td (100%) rename {experimental/xformer => xformer}/Transforms/OpSplit.cpp (100%) rename {experimental/xformer => xformer}/Transforms/OptimizeConv2D.cpp (100%) rename {experimental/xformer => xformer}/Transforms/OptimizeTranspose.cpp (100%) rename {experimental/xformer => xformer}/Transforms/Options.h (100%) rename {experimental/xformer => xformer}/Transforms/Passes.cpp (100%) rename {experimental/xformer => xformer}/Transforms/Passes.h (100%) rename {experimental/xformer => xformer}/Transforms/ReplaceAdd.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ReplaceAvgPoolWithConv2D.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ReplaceConv2D.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ReplaceFCWithConv2D.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ReplaceMul.cpp (100%) rename {experimental/xformer => xformer}/Transforms/ReplaceStridedSlice.cpp (100%) rename {experimental/xformer => xformer}/Transforms/TFLPatterns.td (100%) rename {experimental/xformer => xformer}/Transforms/TranslateToCustomOp.cpp (100%) rename {experimental/xformer => xformer}/Transforms/WriteFlashImage.cpp (100%) rename {experimental/xformer => xformer}/Transforms/XCPatterns.td (100%) rename {experimental/xformer => xformer}/Utils/Diagnostics.h (100%) rename {experimental/xformer => xformer}/Utils/FileIO.cpp (100%) rename {experimental/xformer => xformer}/Utils/FileIO.h (100%) rename {experimental/xformer => xformer}/Utils/ThreadSupport.cpp (100%) rename {experimental/xformer => xformer}/Utils/ThreadSupport.h (100%) rename {experimental/xformer => xformer}/Utils/Util.cpp (100%) rename {experimental/xformer => xformer}/Utils/Util.h (100%) rename {experimental/xformer => xformer}/Utils/Utils.td (100%) rename {experimental/xformer => xformer}/Version.h (100%) rename {experimental/xformer => xformer}/WORKSPACE (100%) rename {experimental/xformer => xformer}/XCoreOptMain.cpp (100%) create mode 120000 xformer/external rename {experimental/xformer => xformer}/lib_nn.BUILD (100%) rename {experimental/xformer => xformer}/lib_tflite_micro.BUILD (100%) rename {experimental/xformer => xformer}/lib_tflmc.BUILD (100%) rename {experimental/xformer => xformer}/model_maker/save_model.py (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000002377.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000003157.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000026132.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000026924.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000027622.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000049478.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000050562.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000066822.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000068502.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000070322.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000070659.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000083002.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000085589.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000090498.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000098956.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000100703.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000105708.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000106003.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000115218.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000116003.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000118104.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000123579.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000125586.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000126936.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000131099.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000143944.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000151669.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000157016.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000158107.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000180609.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000190617.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000192322.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000195233.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000205297.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000210258.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000213344.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000217672.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000218057.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000219385.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000223458.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000229347.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000230708.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000232143.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000238843.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000243361.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000243527.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000244528.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000246384.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000247338.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000266880.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000269561.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000271006.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000279672.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000287427.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000292620.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000293805.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000297266.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000300028.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000305287.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000305600.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000307999.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000308302.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000314852.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000316113.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000330391.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000348670.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000355550.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000365512.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000372229.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000378396.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000384475.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000385239.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000387976.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000421619.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000423637.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000444312.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000448461.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000461549.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000466191.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000476125.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000490434.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000503274.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000505655.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000506149.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000508870.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000510211.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000513371.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000517882.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000521048.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000521327.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000532457.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000535649.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000537337.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000542931.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000546147.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000547979.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000549236.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000551869.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000563597.jpg (100%) rename {experimental/xformer => xformer}/model_maker/train_samples/COCO_train2014_000000563771.jpg (100%) rename {experimental/xformer => xformer}/modelrunner.py (100%) rename {experimental/xformer => xformer}/patches/BUILD (100%) rename {experimental/xformer => xformer}/patches/flatbuffer_export.patch (100%) rename {experimental/xformer => xformer}/patches/softmax.patch (100%) rename {experimental/xformer => xformer}/patches/tf_update.patch (100%) rename {experimental/xformer => xformer}/toolchain/BUILD (100%) rename {experimental/xformer => xformer}/toolchain/cc_toolchain_config.bzl (100%) rename {experimental/xformer => xformer}/version_check.sh (100%) diff --git a/Makefile b/Makefile index c64691146..84784cdf3 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ xformer2_integration_test: #************************** .PHONY: version_check version_check: - cd ./experimental/xformer && ./version_check.sh + cd ./xformer && ./version_check.sh .PHONY: build build: version_check xcore_interpreters_build diff --git a/experimental/README.md b/experimental/README.md deleted file mode 100644 index c0715ef37..000000000 --- a/experimental/README.md +++ /dev/null @@ -1,13 +0,0 @@ -DISCLAIMER --- - -This is the experimental folder. Nothing in here is expected to work, and playing with it may even -mess up things that you previously thought were working. You are welcome to try out projects in this -area, but you do so at your own risk. - - -Contents --- - -* xformer - a TFLite to XCore MLIR based optimizer - diff --git a/third_party/lib_tflite_micro b/third_party/lib_tflite_micro index e82ecfae0..a3d467d27 160000 --- a/third_party/lib_tflite_micro +++ b/third_party/lib_tflite_micro @@ -1 +1 @@ -Subproject commit e82ecfae0b8dbb8ea65868fda267c22568e0460e +Subproject commit a3d467d2770e0afedac6b53bf286da2d81c5dc57 diff --git a/experimental/xformer/.bazelrc b/xformer/.bazelrc similarity index 100% rename from experimental/xformer/.bazelrc rename to xformer/.bazelrc diff --git a/experimental/xformer/.bazelversion b/xformer/.bazelversion similarity index 100% rename from experimental/xformer/.bazelversion rename to xformer/.bazelversion diff --git a/experimental/xformer/.clang-format b/xformer/.clang-format similarity index 100% rename from experimental/xformer/.clang-format rename to xformer/.clang-format diff --git a/experimental/xformer/.gitignore b/xformer/.gitignore similarity index 100% rename from experimental/xformer/.gitignore rename to xformer/.gitignore diff --git a/experimental/xformer/Analysis/MemoryPlan.cpp b/xformer/Analysis/MemoryPlan.cpp similarity index 100% rename from experimental/xformer/Analysis/MemoryPlan.cpp rename to xformer/Analysis/MemoryPlan.cpp diff --git a/experimental/xformer/Analysis/MemoryPlan.h b/xformer/Analysis/MemoryPlan.h similarity index 100% rename from experimental/xformer/Analysis/MemoryPlan.h rename to xformer/Analysis/MemoryPlan.h diff --git a/experimental/xformer/BUILD b/xformer/BUILD similarity index 100% rename from experimental/xformer/BUILD rename to xformer/BUILD diff --git a/experimental/xformer/IR/XCoreOps.cpp b/xformer/IR/XCoreOps.cpp similarity index 100% rename from experimental/xformer/IR/XCoreOps.cpp rename to xformer/IR/XCoreOps.cpp diff --git a/experimental/xformer/IR/XCoreOps.h b/xformer/IR/XCoreOps.h similarity index 100% rename from experimental/xformer/IR/XCoreOps.h rename to xformer/IR/XCoreOps.h diff --git a/experimental/xformer/IR/XCoreOps.td b/xformer/IR/XCoreOps.td similarity index 100% rename from experimental/xformer/IR/XCoreOps.td rename to xformer/IR/XCoreOps.td diff --git a/experimental/xformer/README.md b/xformer/README.md similarity index 100% rename from experimental/xformer/README.md rename to xformer/README.md diff --git a/experimental/TESTING.rst b/xformer/TESTING.rst similarity index 100% rename from experimental/TESTING.rst rename to xformer/TESTING.rst diff --git a/experimental/xformer/Test/BUILD b/xformer/Test/BUILD similarity index 100% rename from experimental/xformer/Test/BUILD rename to xformer/Test/BUILD diff --git a/experimental/xformer/Test/activation-lowering.mlir b/xformer/Test/activation-lowering.mlir similarity index 100% rename from experimental/xformer/Test/activation-lowering.mlir rename to xformer/Test/activation-lowering.mlir diff --git a/experimental/xformer/Test/avg_pool_2d.mlir b/xformer/Test/avg_pool_2d.mlir similarity index 100% rename from experimental/xformer/Test/avg_pool_2d.mlir rename to xformer/Test/avg_pool_2d.mlir diff --git a/experimental/xformer/Test/conv2d.mlir b/xformer/Test/conv2d.mlir similarity index 100% rename from experimental/xformer/Test/conv2d.mlir rename to xformer/Test/conv2d.mlir diff --git a/experimental/xformer/Test/convpatterns.mlir b/xformer/Test/convpatterns.mlir similarity index 100% rename from experimental/xformer/Test/convpatterns.mlir rename to xformer/Test/convpatterns.mlir diff --git a/experimental/xformer/Test/fc-to-conv2d.mlir b/xformer/Test/fc-to-conv2d.mlir similarity index 100% rename from experimental/xformer/Test/fc-to-conv2d.mlir rename to xformer/Test/fc-to-conv2d.mlir diff --git a/experimental/xformer/Test/invalid-loadconstantop.mlir b/xformer/Test/invalid-loadconstantop.mlir similarity index 100% rename from experimental/xformer/Test/invalid-loadconstantop.mlir rename to xformer/Test/invalid-loadconstantop.mlir diff --git a/experimental/xformer/Test/invalid-loadflashop.mlir b/xformer/Test/invalid-loadflashop.mlir similarity index 100% rename from experimental/xformer/Test/invalid-loadflashop.mlir rename to xformer/Test/invalid-loadflashop.mlir diff --git a/experimental/xformer/Test/invalid-op_split.mlir b/xformer/Test/invalid-op_split.mlir similarity index 100% rename from experimental/xformer/Test/invalid-op_split.mlir rename to xformer/Test/invalid-op_split.mlir diff --git a/experimental/xformer/Test/lit_test.bzl b/xformer/Test/lit_test.bzl similarity index 100% rename from experimental/xformer/Test/lit_test.bzl rename to xformer/Test/lit_test.bzl diff --git a/experimental/xformer/Test/loadconstantop.mlir b/xformer/Test/loadconstantop.mlir similarity index 100% rename from experimental/xformer/Test/loadconstantop.mlir rename to xformer/Test/loadconstantop.mlir diff --git a/experimental/xformer/Test/loadflashop.mlir b/xformer/Test/loadflashop.mlir similarity index 100% rename from experimental/xformer/Test/loadflashop.mlir rename to xformer/Test/loadflashop.mlir diff --git a/experimental/xformer/Test/mlir_example.tflite b/xformer/Test/mlir_example.tflite similarity index 100% rename from experimental/xformer/Test/mlir_example.tflite rename to xformer/Test/mlir_example.tflite diff --git a/experimental/xformer/Test/op_split.mlir b/xformer/Test/op_split.mlir similarity index 100% rename from experimental/xformer/Test/op_split.mlir rename to xformer/Test/op_split.mlir diff --git a/experimental/xformer/Test/padding.mlir b/xformer/Test/padding.mlir similarity index 100% rename from experimental/xformer/Test/padding.mlir rename to xformer/Test/padding.mlir diff --git a/experimental/xformer/Test/pytorch_transpose.mlir b/xformer/Test/pytorch_transpose.mlir similarity index 100% rename from experimental/xformer/Test/pytorch_transpose.mlir rename to xformer/Test/pytorch_transpose.mlir diff --git a/experimental/xformer/Test/run_lit.sh b/xformer/Test/run_lit.sh similarity index 100% rename from experimental/xformer/Test/run_lit.sh rename to xformer/Test/run_lit.sh diff --git a/experimental/xformer/Test/tfl_fc.tflite b/xformer/Test/tfl_fc.tflite similarity index 100% rename from experimental/xformer/Test/tfl_fc.tflite rename to xformer/Test/tfl_fc.tflite diff --git a/experimental/xformer/Test/tflpatterns.mlir b/xformer/Test/tflpatterns.mlir similarity index 100% rename from experimental/xformer/Test/tflpatterns.mlir rename to xformer/Test/tflpatterns.mlir diff --git a/experimental/xformer/Test/translate-to-customops.mlir b/xformer/Test/translate-to-customops.mlir similarity index 100% rename from experimental/xformer/Test/translate-to-customops.mlir rename to xformer/Test/translate-to-customops.mlir diff --git a/experimental/xformer/Test/valid_strided_slice.mlir b/xformer/Test/valid_strided_slice.mlir similarity index 100% rename from experimental/xformer/Test/valid_strided_slice.mlir rename to xformer/Test/valid_strided_slice.mlir diff --git a/experimental/xformer/Transforms/ApplyLoadConstantOpPatterns.cpp b/xformer/Transforms/ApplyLoadConstantOpPatterns.cpp similarity index 100% rename from experimental/xformer/Transforms/ApplyLoadConstantOpPatterns.cpp rename to xformer/Transforms/ApplyLoadConstantOpPatterns.cpp diff --git a/experimental/xformer/Transforms/ApplyTFLPatterns.cpp b/xformer/Transforms/ApplyTFLPatterns.cpp similarity index 100% rename from experimental/xformer/Transforms/ApplyTFLPatterns.cpp rename to xformer/Transforms/ApplyTFLPatterns.cpp diff --git a/experimental/xformer/Transforms/ApplyXCPatterns.cpp b/xformer/Transforms/ApplyXCPatterns.cpp similarity index 100% rename from experimental/xformer/Transforms/ApplyXCPatterns.cpp rename to xformer/Transforms/ApplyXCPatterns.cpp diff --git a/experimental/xformer/Transforms/ConvPatterns.h b/xformer/Transforms/ConvPatterns.h similarity index 100% rename from experimental/xformer/Transforms/ConvPatterns.h rename to xformer/Transforms/ConvPatterns.h diff --git a/experimental/xformer/Transforms/ConvPatterns.td b/xformer/Transforms/ConvPatterns.td similarity index 100% rename from experimental/xformer/Transforms/ConvPatterns.td rename to xformer/Transforms/ConvPatterns.td diff --git a/experimental/xformer/Transforms/ConvPatternsLCE.cpp b/xformer/Transforms/ConvPatternsLCE.cpp similarity index 100% rename from experimental/xformer/Transforms/ConvPatternsLCE.cpp rename to xformer/Transforms/ConvPatternsLCE.cpp diff --git a/experimental/xformer/Transforms/ConvPatternsTFL.cpp b/xformer/Transforms/ConvPatternsTFL.cpp similarity index 100% rename from experimental/xformer/Transforms/ConvPatternsTFL.cpp rename to xformer/Transforms/ConvPatternsTFL.cpp diff --git a/experimental/xformer/Transforms/ConvRevertPatterns.td b/xformer/Transforms/ConvRevertPatterns.td similarity index 100% rename from experimental/xformer/Transforms/ConvRevertPatterns.td rename to xformer/Transforms/ConvRevertPatterns.td diff --git a/experimental/xformer/Transforms/LoadConstantOpPatterns.td b/xformer/Transforms/LoadConstantOpPatterns.td similarity index 100% rename from experimental/xformer/Transforms/LoadConstantOpPatterns.td rename to xformer/Transforms/LoadConstantOpPatterns.td diff --git a/experimental/xformer/Transforms/OpSplit.cpp b/xformer/Transforms/OpSplit.cpp similarity index 100% rename from experimental/xformer/Transforms/OpSplit.cpp rename to xformer/Transforms/OpSplit.cpp diff --git a/experimental/xformer/Transforms/OptimizeConv2D.cpp b/xformer/Transforms/OptimizeConv2D.cpp similarity index 100% rename from experimental/xformer/Transforms/OptimizeConv2D.cpp rename to xformer/Transforms/OptimizeConv2D.cpp diff --git a/experimental/xformer/Transforms/OptimizeTranspose.cpp b/xformer/Transforms/OptimizeTranspose.cpp similarity index 100% rename from experimental/xformer/Transforms/OptimizeTranspose.cpp rename to xformer/Transforms/OptimizeTranspose.cpp diff --git a/experimental/xformer/Transforms/Options.h b/xformer/Transforms/Options.h similarity index 100% rename from experimental/xformer/Transforms/Options.h rename to xformer/Transforms/Options.h diff --git a/experimental/xformer/Transforms/Passes.cpp b/xformer/Transforms/Passes.cpp similarity index 100% rename from experimental/xformer/Transforms/Passes.cpp rename to xformer/Transforms/Passes.cpp diff --git a/experimental/xformer/Transforms/Passes.h b/xformer/Transforms/Passes.h similarity index 100% rename from experimental/xformer/Transforms/Passes.h rename to xformer/Transforms/Passes.h diff --git a/experimental/xformer/Transforms/ReplaceAdd.cpp b/xformer/Transforms/ReplaceAdd.cpp similarity index 100% rename from experimental/xformer/Transforms/ReplaceAdd.cpp rename to xformer/Transforms/ReplaceAdd.cpp diff --git a/experimental/xformer/Transforms/ReplaceAvgPoolWithConv2D.cpp b/xformer/Transforms/ReplaceAvgPoolWithConv2D.cpp similarity index 100% rename from experimental/xformer/Transforms/ReplaceAvgPoolWithConv2D.cpp rename to xformer/Transforms/ReplaceAvgPoolWithConv2D.cpp diff --git a/experimental/xformer/Transforms/ReplaceConv2D.cpp b/xformer/Transforms/ReplaceConv2D.cpp similarity index 100% rename from experimental/xformer/Transforms/ReplaceConv2D.cpp rename to xformer/Transforms/ReplaceConv2D.cpp diff --git a/experimental/xformer/Transforms/ReplaceFCWithConv2D.cpp b/xformer/Transforms/ReplaceFCWithConv2D.cpp similarity index 100% rename from experimental/xformer/Transforms/ReplaceFCWithConv2D.cpp rename to xformer/Transforms/ReplaceFCWithConv2D.cpp diff --git a/experimental/xformer/Transforms/ReplaceMul.cpp b/xformer/Transforms/ReplaceMul.cpp similarity index 100% rename from experimental/xformer/Transforms/ReplaceMul.cpp rename to xformer/Transforms/ReplaceMul.cpp diff --git a/experimental/xformer/Transforms/ReplaceStridedSlice.cpp b/xformer/Transforms/ReplaceStridedSlice.cpp similarity index 100% rename from experimental/xformer/Transforms/ReplaceStridedSlice.cpp rename to xformer/Transforms/ReplaceStridedSlice.cpp diff --git a/experimental/xformer/Transforms/TFLPatterns.td b/xformer/Transforms/TFLPatterns.td similarity index 100% rename from experimental/xformer/Transforms/TFLPatterns.td rename to xformer/Transforms/TFLPatterns.td diff --git a/experimental/xformer/Transforms/TranslateToCustomOp.cpp b/xformer/Transforms/TranslateToCustomOp.cpp similarity index 100% rename from experimental/xformer/Transforms/TranslateToCustomOp.cpp rename to xformer/Transforms/TranslateToCustomOp.cpp diff --git a/experimental/xformer/Transforms/WriteFlashImage.cpp b/xformer/Transforms/WriteFlashImage.cpp similarity index 100% rename from experimental/xformer/Transforms/WriteFlashImage.cpp rename to xformer/Transforms/WriteFlashImage.cpp diff --git a/experimental/xformer/Transforms/XCPatterns.td b/xformer/Transforms/XCPatterns.td similarity index 100% rename from experimental/xformer/Transforms/XCPatterns.td rename to xformer/Transforms/XCPatterns.td diff --git a/experimental/xformer/Utils/Diagnostics.h b/xformer/Utils/Diagnostics.h similarity index 100% rename from experimental/xformer/Utils/Diagnostics.h rename to xformer/Utils/Diagnostics.h diff --git a/experimental/xformer/Utils/FileIO.cpp b/xformer/Utils/FileIO.cpp similarity index 100% rename from experimental/xformer/Utils/FileIO.cpp rename to xformer/Utils/FileIO.cpp diff --git a/experimental/xformer/Utils/FileIO.h b/xformer/Utils/FileIO.h similarity index 100% rename from experimental/xformer/Utils/FileIO.h rename to xformer/Utils/FileIO.h diff --git a/experimental/xformer/Utils/ThreadSupport.cpp b/xformer/Utils/ThreadSupport.cpp similarity index 100% rename from experimental/xformer/Utils/ThreadSupport.cpp rename to xformer/Utils/ThreadSupport.cpp diff --git a/experimental/xformer/Utils/ThreadSupport.h b/xformer/Utils/ThreadSupport.h similarity index 100% rename from experimental/xformer/Utils/ThreadSupport.h rename to xformer/Utils/ThreadSupport.h diff --git a/experimental/xformer/Utils/Util.cpp b/xformer/Utils/Util.cpp similarity index 100% rename from experimental/xformer/Utils/Util.cpp rename to xformer/Utils/Util.cpp diff --git a/experimental/xformer/Utils/Util.h b/xformer/Utils/Util.h similarity index 100% rename from experimental/xformer/Utils/Util.h rename to xformer/Utils/Util.h diff --git a/experimental/xformer/Utils/Utils.td b/xformer/Utils/Utils.td similarity index 100% rename from experimental/xformer/Utils/Utils.td rename to xformer/Utils/Utils.td diff --git a/experimental/xformer/Version.h b/xformer/Version.h similarity index 100% rename from experimental/xformer/Version.h rename to xformer/Version.h diff --git a/experimental/xformer/WORKSPACE b/xformer/WORKSPACE similarity index 100% rename from experimental/xformer/WORKSPACE rename to xformer/WORKSPACE diff --git a/experimental/xformer/XCoreOptMain.cpp b/xformer/XCoreOptMain.cpp similarity index 100% rename from experimental/xformer/XCoreOptMain.cpp rename to xformer/XCoreOptMain.cpp diff --git a/xformer/external b/xformer/external new file mode 120000 index 000000000..3ee3af63e --- /dev/null +++ b/xformer/external @@ -0,0 +1 @@ +bazel-out/../../../external \ No newline at end of file diff --git a/experimental/xformer/lib_nn.BUILD b/xformer/lib_nn.BUILD similarity index 100% rename from experimental/xformer/lib_nn.BUILD rename to xformer/lib_nn.BUILD diff --git a/experimental/xformer/lib_tflite_micro.BUILD b/xformer/lib_tflite_micro.BUILD similarity index 100% rename from experimental/xformer/lib_tflite_micro.BUILD rename to xformer/lib_tflite_micro.BUILD diff --git a/experimental/xformer/lib_tflmc.BUILD b/xformer/lib_tflmc.BUILD similarity index 100% rename from experimental/xformer/lib_tflmc.BUILD rename to xformer/lib_tflmc.BUILD diff --git a/experimental/xformer/model_maker/save_model.py b/xformer/model_maker/save_model.py similarity index 100% rename from experimental/xformer/model_maker/save_model.py rename to xformer/model_maker/save_model.py diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000002377.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000002377.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000002377.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000002377.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000003157.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000003157.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000003157.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000003157.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000026132.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000026132.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000026132.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000026132.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000026924.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000026924.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000026924.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000026924.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000027622.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000027622.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000027622.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000027622.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000049478.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000049478.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000049478.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000049478.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000050562.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000050562.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000050562.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000050562.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000066822.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000066822.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000066822.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000066822.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000068502.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000068502.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000068502.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000068502.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000070322.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000070322.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000070322.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000070322.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000070659.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000070659.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000070659.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000070659.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000083002.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000083002.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000083002.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000083002.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000085589.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000085589.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000085589.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000085589.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000090498.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000090498.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000090498.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000090498.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000098956.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000098956.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000098956.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000098956.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000100703.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000100703.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000100703.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000100703.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000105708.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000105708.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000105708.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000105708.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000106003.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000106003.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000106003.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000106003.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000115218.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000115218.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000115218.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000115218.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000116003.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000116003.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000116003.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000116003.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000118104.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000118104.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000118104.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000118104.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000123579.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000123579.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000123579.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000123579.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000125586.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000125586.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000125586.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000125586.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000126936.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000126936.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000126936.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000126936.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000131099.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000131099.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000131099.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000131099.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000143944.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000143944.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000143944.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000143944.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000151669.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000151669.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000151669.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000151669.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000157016.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000157016.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000157016.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000157016.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000158107.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000158107.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000158107.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000158107.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000180609.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000180609.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000180609.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000180609.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000190617.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000190617.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000190617.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000190617.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000192322.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000192322.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000192322.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000192322.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000195233.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000195233.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000195233.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000195233.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000205297.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000205297.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000205297.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000205297.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000210258.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000210258.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000210258.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000210258.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000213344.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000213344.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000213344.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000213344.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000217672.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000217672.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000217672.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000217672.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000218057.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000218057.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000218057.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000218057.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000219385.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000219385.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000219385.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000219385.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000223458.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000223458.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000223458.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000223458.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000229347.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000229347.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000229347.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000229347.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000230708.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000230708.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000230708.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000230708.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000232143.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000232143.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000232143.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000232143.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000238843.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000238843.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000238843.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000238843.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000243361.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000243361.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000243361.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000243361.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000243527.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000243527.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000243527.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000243527.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000244528.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000244528.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000244528.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000244528.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000246384.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000246384.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000246384.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000246384.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000247338.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000247338.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000247338.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000247338.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000266880.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000266880.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000266880.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000266880.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000269561.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000269561.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000269561.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000269561.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000271006.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000271006.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000271006.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000271006.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000279672.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000279672.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000279672.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000279672.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000287427.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000287427.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000287427.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000287427.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000292620.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000292620.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000292620.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000292620.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000293805.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000293805.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000293805.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000293805.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000297266.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000297266.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000297266.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000297266.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000300028.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000300028.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000300028.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000300028.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000305287.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000305287.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000305287.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000305287.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000305600.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000305600.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000305600.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000305600.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000307999.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000307999.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000307999.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000307999.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000308302.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000308302.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000308302.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000308302.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000314852.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000314852.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000314852.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000314852.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000316113.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000316113.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000316113.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000316113.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000330391.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000330391.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000330391.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000330391.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000348670.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000348670.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000348670.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000348670.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000355550.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000355550.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000355550.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000355550.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000365512.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000365512.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000365512.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000365512.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000372229.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000372229.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000372229.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000372229.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000378396.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000378396.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000378396.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000378396.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000384475.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000384475.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000384475.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000384475.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000385239.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000385239.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000385239.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000385239.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000387976.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000387976.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000387976.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000387976.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000421619.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000421619.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000421619.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000421619.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000423637.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000423637.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000423637.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000423637.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000444312.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000444312.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000444312.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000444312.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000448461.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000448461.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000448461.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000448461.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000461549.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000461549.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000461549.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000461549.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000466191.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000466191.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000466191.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000466191.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000476125.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000476125.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000476125.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000476125.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000490434.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000490434.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000490434.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000490434.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000503274.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000503274.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000503274.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000503274.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000505655.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000505655.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000505655.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000505655.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000506149.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000506149.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000506149.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000506149.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000508870.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000508870.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000508870.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000508870.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000510211.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000510211.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000510211.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000510211.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000513371.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000513371.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000513371.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000513371.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000517882.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000517882.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000517882.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000517882.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000521048.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000521048.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000521048.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000521048.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000521327.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000521327.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000521327.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000521327.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000532457.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000532457.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000532457.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000532457.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000535649.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000535649.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000535649.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000535649.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000537337.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000537337.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000537337.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000537337.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000542931.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000542931.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000542931.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000542931.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000546147.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000546147.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000546147.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000546147.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000547979.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000547979.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000547979.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000547979.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000549236.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000549236.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000549236.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000549236.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000551869.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000551869.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000551869.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000551869.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000563597.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000563597.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000563597.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000563597.jpg diff --git a/experimental/xformer/model_maker/train_samples/COCO_train2014_000000563771.jpg b/xformer/model_maker/train_samples/COCO_train2014_000000563771.jpg similarity index 100% rename from experimental/xformer/model_maker/train_samples/COCO_train2014_000000563771.jpg rename to xformer/model_maker/train_samples/COCO_train2014_000000563771.jpg diff --git a/experimental/xformer/modelrunner.py b/xformer/modelrunner.py similarity index 100% rename from experimental/xformer/modelrunner.py rename to xformer/modelrunner.py diff --git a/experimental/xformer/patches/BUILD b/xformer/patches/BUILD similarity index 100% rename from experimental/xformer/patches/BUILD rename to xformer/patches/BUILD diff --git a/experimental/xformer/patches/flatbuffer_export.patch b/xformer/patches/flatbuffer_export.patch similarity index 100% rename from experimental/xformer/patches/flatbuffer_export.patch rename to xformer/patches/flatbuffer_export.patch diff --git a/experimental/xformer/patches/softmax.patch b/xformer/patches/softmax.patch similarity index 100% rename from experimental/xformer/patches/softmax.patch rename to xformer/patches/softmax.patch diff --git a/experimental/xformer/patches/tf_update.patch b/xformer/patches/tf_update.patch similarity index 100% rename from experimental/xformer/patches/tf_update.patch rename to xformer/patches/tf_update.patch diff --git a/experimental/xformer/toolchain/BUILD b/xformer/toolchain/BUILD similarity index 100% rename from experimental/xformer/toolchain/BUILD rename to xformer/toolchain/BUILD diff --git a/experimental/xformer/toolchain/cc_toolchain_config.bzl b/xformer/toolchain/cc_toolchain_config.bzl similarity index 100% rename from experimental/xformer/toolchain/cc_toolchain_config.bzl rename to xformer/toolchain/cc_toolchain_config.bzl diff --git a/experimental/xformer/version_check.sh b/xformer/version_check.sh similarity index 100% rename from experimental/xformer/version_check.sh rename to xformer/version_check.sh From 6c0647f4a3b07f4b70b23f105cda961ef6a4decf Mon Sep 17 00:00:00 2001 From: Michael Poluektov Date: Wed, 16 Aug 2023 15:23:27 +0000 Subject: [PATCH 02/15] replace experimental/xformer with xformer, fix relative paths --- .github/scripts/build_linux_wheel.sh | 4 ++-- .github/workflows/build-and-test.yml | 6 +++--- .github/workflows/release.yml | 8 ++++---- Jenkinsfile | 4 ++-- Makefile | 20 ++++++++++---------- docs/rst/build-xformer.rst | 2 +- python/setup.py | 19 ++++++++++++------- xformer/README.md | 4 ++-- 8 files changed, 36 insertions(+), 31 deletions(-) diff --git a/.github/scripts/build_linux_wheel.sh b/.github/scripts/build_linux_wheel.sh index 30acf36e9..26f6cefa3 100755 --- a/.github/scripts/build_linux_wheel.sh +++ b/.github/scripts/build_linux_wheel.sh @@ -27,10 +27,10 @@ CC=/dt9/usr/bin/gcc CXX=/dt9/usr/bin/g++ make build # Build xcore-opt # Crosstool toolchain info is mentioned here, "--crosstool_top" # https://github.com/tensorflow/tensorflow/blob/master/.bazelrc -cd experimental/xformer +cd xformer bazel build //:xcore-opt --verbose_failures --linkopt=-lrt --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain --//:disable_version_check # Build python wheel -cd ../../python +cd ../python python setup.py bdist_wheel diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 336cda19d..9833d5925 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -58,7 +58,7 @@ jobs: - name: Build xformer shell: bash run: | - cd $GITHUB_WORKSPACE/experimental/xformer + cd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }}' >> .bazelrc.user @@ -115,7 +115,7 @@ jobs: # auditwheel requires patchelf and repairs the created wheel file to # be compatible for manylinux. run: | - pushd $GITHUB_WORKSPACE/experimental/xformer + pushd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }}' >> .bazelrc.user @@ -199,7 +199,7 @@ jobs: - name: Build xformer shell: cmd run: | - cd %GITHUB_WORKSPACE%/experimental/xformer + cd %GITHUB_WORKSPACE%/xformer IF DEFINED GOOGLE_APPLICATION_CREDENTIALS set "BAZEL_EXTRA_ARGS=--remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }} --google_default_credentials --remote_upload_local_results=true --experimental_remote_cache_compression" set BAZEL_VC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC bazelisk build //:xcore-opt --verbose_failures --local_ram_resources=5120 --action_env PYTHON_BIN_PATH="C:/hostedtoolcache/windows/Python/3.8.10/x64/python.exe" %BAZEL_EXTRA_ARGS% --//:disable_version_check diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8e0296ee2..a0d6ea369 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -52,7 +52,7 @@ jobs: cd $GITHUB_WORKSPACE make build - cd $GITHUB_WORKSPACE/experimental/xformer + cd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }}' >> .bazelrc.user @@ -105,7 +105,7 @@ jobs: cd $GITHUB_WORKSPACE make build - cd $GITHUB_WORKSPACE/experimental/xformer + cd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-arm64-python${{ matrix.python-version }}' >> .bazelrc.user @@ -151,7 +151,7 @@ jobs: # auditwheel requires patchelf and repairs the created wheel file to # be compatible for manylinux. run: | - pushd $GITHUB_WORKSPACE/experimental/xformer + pushd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }}' >> .bazelrc.user @@ -227,7 +227,7 @@ jobs: cmake .. cmake --build . -t install --parallel --config Release - cd %GITHUB_WORKSPACE%/experimental/xformer + cd %GITHUB_WORKSPACE%/xformer set BAZEL_VC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC bazelisk build //:xcore-opt --local_ram_resources=5120 --action_env PYTHON_BIN_PATH="C:/hostedtoolcache/windows/Python/3.8.10/x64/python.exe" --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }} --google_default_credentials --//:disable_version_check - name: Build windows wheels diff --git a/Jenkinsfile b/Jenkinsfile index f3ed326b0..6aba21825 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -54,7 +54,7 @@ pipeline { // build dll_interpreter for python interface sh "make build" // build xformer - dir("experimental/xformer") { + dir("xformer") { sh "wget https://github.com/bazelbuild/bazelisk/releases/download/v1.16.0/bazelisk-linux-amd64" sh "chmod +x bazelisk-linux-amd64" sh "./bazelisk-linux-amd64 build --remote_cache=${env.BAZEL_CACHE_URL} //:xcore-opt --verbose_failures --//:disable_version_check" @@ -80,7 +80,7 @@ pipeline { stage("Host Test") { steps { withVenv { - dir("experimental/xformer") { + dir("xformer") { // xformer2 unit tests sh "./bazelisk-linux-amd64 test --remote_cache=${env.BAZEL_CACHE_URL} //Test:all --verbose_failures --test_output=errors --//:disable_version_check" } diff --git a/Makefile b/Makefile index 84784cdf3..b691c6e89 100644 --- a/Makefile +++ b/Makefile @@ -73,21 +73,21 @@ help: .PHONY: init_linux init_linux: - export BAZEL_VERSION=`cat experimental/xformer/.bazelversion` ;\ + export BAZEL_VERSION=`cat xformer/.bazelversion` ;\ curl -fLO "https://github.com/bazelbuild/bazel/releases/download/$${BAZEL_VERSION}/bazel-$${BAZEL_VERSION}-installer-linux-x86_64.sh" && \ chmod +x bazel-$${BAZEL_VERSION}-installer-linux-x86_64.sh && \ ./bazel-$${BAZEL_VERSION}-installer-linux-x86_64.sh --prefix=$$PWD/bazel .PHONY: init_darwin init_darwin: - export BAZEL_VERSION=`cat experimental/xformer/.bazelversion` ;\ + export BAZEL_VERSION=`cat xformer/.bazelversion` ;\ curl -fLO "https://github.com/bazelbuild/bazel/releases/download/$${BAZEL_VERSION}/bazel-$${BAZEL_VERSION}-installer-darwin-x86_64.sh" && \ chmod +x bazel-$${BAZEL_VERSION}-installer-darwin-x86_64.sh && \ ./bazel-$${BAZEL_VERSION}-installer-darwin-x86_64.sh --prefix=$$PWD/bazel .PHONY: init_windows init_windows: - export BAZEL_VERSION=`cat experimental/xformer/.bazelversion` ;\ + export BAZEL_VERSION=`cat xformer/.bazelversion` ;\ curl -fLO 'https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-windows-x86_64.exe' mv bazel-${BAZEL_VERSION}-windows-x86_64.exe bazel.exe @@ -102,10 +102,10 @@ build_release_linux: python3 -m venv .venv && \ . .venv/bin/activate && \ pip install -r requirements.txt && \ - cd experimental/xformer && ../../bazel/bin/bazel build --remote_cache=http://srv-bri-bld-cache:8080 //:xcore-opt --verbose_failures) + cd xformer && ../bazel/bin/bazel build --remote_cache=http://srv-bri-bld-cache:8080 //:xcore-opt --verbose_failures) rm -rf ../Installs/Linux/External/xformer mkdir -p ../Installs/Linux/External/xformer - cp experimental/xformer/bazel-bin/xcore-opt ../Installs/Linux/External/xformer + cp xformer/bazel-bin/xcore-opt ../Installs/Linux/External/xformer .PHONY: build_release_darwin build_release_darwin: @@ -113,24 +113,24 @@ build_release_darwin: . .venv/bin/activate && \ pip3 install --upgrade pip && \ pip3 install -r requirements.txt && \ - cd experimental/xformer && ../../bazel/bin/bazel build --remote_cache=http://srv-bri-bld-cache:8080 --config=darwin_config //:xcore-opt --verbose_failures) + cd xformer && ../bazel/bin/bazel build --remote_cache=http://srv-bri-bld-cache:8080 --config=darwin_config //:xcore-opt --verbose_failures) rm -rf ../Installs/Mac/External/xformer mkdir -p ../Installs/Mac/External/xformer - cp experimental/xformer/bazel-bin/xcore-opt ../Installs/Mac/External/xformer + cp xformer/bazel-bin/xcore-opt ../Installs/Mac/External/xformer .PHONY: build_release_windows build_release_windows: python3 -m venv .venv (. .venv/bin/activate && pip install -r requirements.txt) - (. .venv/bin/activate && cd experimental/xformer && ../../bazel build --remote_cache=http://srv-bri-bld-cache:8080 --config=windows_config //:xcore-opt --verbose_failures) + (. .venv/bin/activate && cd xformer && ../bazel build --remote_cache=http://srv-bri-bld-cache:8080 --config=windows_config //:xcore-opt --verbose_failures) mkdir -p ../Installs/Linux/External/xformer - cp experimental/xformer/bazel-bin/xcore-opt ../Installs/Windows/External/xformer + cp xformer/bazel-bin/xcore-opt ../Installs/Windows/External/xformer TEST_SCRIPT= \ (cd xmos_ai_tools/src/xinterpreters/host/ && make build)&& \ (cd xmos_ai_tools && python3 setup.py bdist_wheel &&\ pip install ./xmos_ai_tools/dist/*"&& \ -(cd experimental/xformer && ../../bazel/bin/bazel test --remote_cache=http://srv-bri-bld-cache:8080 //Test:all --verbose_failures)&& \ +(cd xformer && ../bazel/bin/bazel test --remote_cache=http://srv-bri-bld-cache:8080 //Test:all --verbose_failures)&& \ (pytest integration_tests/runner.py --models_path integration_tests/models/non-bnns -n $(NUM_PROCS) --junitxml=integration_non_bnns_junit.xml)&& \ (pytest integration_tests/runner.py --models_path integration_tests/models/bnns --bnn -n $(NUM_PROCS) --junitxml=integration_bnns_junit.xml) diff --git a/docs/rst/build-xformer.rst b/docs/rst/build-xformer.rst index fa03cb1b7..1221e09cf 100644 --- a/docs/rst/build-xformer.rst +++ b/docs/rst/build-xformer.rst @@ -28,4 +28,4 @@ Building xmos-ai-tools * Build the XCore host tflm interpreter libraries with default settings, run:: ``make build`` -After following the above instructions, to build xformer, please follow the build instructions `here <../../experimental/xformer#readme>`_. Upon building xformer, you may install ``xmos-ai-tools`` via ``cd python && pip install .`` +After following the above instructions, to build xformer, please follow the build instructions `here <../../xformer#readme>`_. Upon building xformer, you may install ``xmos-ai-tools`` via ``cd python && pip install .`` diff --git a/python/setup.py b/python/setup.py index e4e4b646f..e49a1f0e1 100644 --- a/python/setup.py +++ b/python/setup.py @@ -16,15 +16,19 @@ here = pathlib.Path(__file__).parent.resolve() exe_suffix = ".exe" if platform.system() == "Windows" else "" XCOREOPT_BINARY: Union[pathlib.Path, str] = pathlib.Path.joinpath( - here.parent, "experimental", "xformer", "bazel-bin", "xcore-opt" + here.parent, "xformer", "bazel-bin", "xcore-opt" ) XCOREOPT_BINARY = str(XCOREOPT_BINARY) + exe_suffix # Get the long description from the README file LONG_README = (here / "README.md").read_text(encoding="utf-8") + + # Fix link in Readme to current commit hash def get_git_revision_hash() -> str: - return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip() + return subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip() + + LONG_README = LONG_README.replace("!!COMMIT_HASH!!", get_git_revision_hash()) # xtflm_interpreter path and libs from lib_tflite_micro @@ -71,10 +75,10 @@ def finalize_options(self): # add device lib and headers as package data -device_files = {root.replace(os.sep, '.'): - ['*.h', '*.a', '*.make', '*.cmake'] - for root, d, f in os.walk(os.path.join("xmos_ai_tools", "xinterpreters", "device")) - } +device_files = { + root.replace(os.sep, "."): ["*.h", "*.a", "*.make", "*.cmake"] + for root, d, f in os.walk(os.path.join("xmos_ai_tools", "xinterpreters", "device")) +} # add host interpreter lib package_files = {"xmos_ai_tools.xinterpreters.host": XTFLM_INTERPRETER_LIBS} @@ -118,7 +122,8 @@ def finalize_options(self): install_requires=REQUIRED_PACKAGES, package_data=package_files, data_files=[ - ("Scripts" if platform.system() == "Windows" else "bin", [XCOREOPT_BINARY]) + ("Scripts" if platform.system() == + "Windows" else "bin", [XCOREOPT_BINARY]) ], cmdclass={ "bdist_wheel": bdist_wheel, diff --git a/xformer/README.md b/xformer/README.md index 01a081553..3f7f43e44 100644 --- a/xformer/README.md +++ b/xformer/README.md @@ -11,7 +11,7 @@ packages from `requirements.txt`. With Bazel installed (check `.bazelversion` for current version), you can build with the following command (make sure you run it -from the directory /ai_tools/experimental/xformer): +from the directory /ai_tools/xformer): bazel build //:xcore-opt @@ -29,7 +29,7 @@ Python package instructions After building the xcore-opt binary, the python package can be built with the following command (make sure you run it from the directory -/ai_tools/experimental/xformer/python): +/ai_tools/xformer/python): cd python python setup.py bdist_wheel From 54f9c7178a22017207a92e2ab5e4f3611ffc4590 Mon Sep 17 00:00:00 2001 From: Michael Poluektov Date: Wed, 16 Aug 2023 15:24:02 +0000 Subject: [PATCH 03/15] remove archived folder --- archived/README.md | 23 - archived/test/README.md | 54 - archived/test/generate.ipynb | 1455 --- archived/test/integration_test/__init__.py | 438 - archived/test/integration_test/conftest.py | 278 - archived/test/integration_test/pytest.ini | 4 - .../test_directed/__init__.py | 8 - .../test_directed/bnn_model/saved_model.pb | Bin 479766 -> 0 bytes .../variables/variables.data-00000-of-00001 | Bin 168778 -> 0 bytes .../bnn_model/variables/variables.index | Bin 1548 -> 0 bytes .../test_directed/test_bnn.py | 128 - .../test_directed/test_mobilenet_v1.py | 142 - .../test_offset_saturating_ops.py | 54 - .../test_directed/test_zero_weights.py | 52 - .../integration_test/test_directed/tmp.txt | 19 - .../test_single_op_models/__init__.py | 111 - .../test_activation/__init__.py | 29 - .../test_activation/conftest.py | 13 - .../test_activation/test_prelu.py | 43 - .../test_activation/test_relu.py | 32 - .../test_activation/test_relu.yml | 203 - .../test_activation/test_relu6.py | 29 - .../test_activation/test_relu6.yml | 203 - .../test_activation/test_sigmoid.py | 29 - .../test_activation/test_sigmoid.yml | 203 - .../test_activation/test_tanh.py | 29 - .../test_activation/test_tanh.yml | 203 - .../test_single_op_models/test_add.py | 38 - .../test_single_op_models/test_add.yml | 103 - .../test_binarized/__init__.py | 215 - .../test_binarized/conftest.py | 19 - .../test_binarized/padded/__init__.py | 9 - .../padded/test_bconv2d_bin_DI_padded.py | 17 - .../padded/test_bconv2d_bin_DI_padded.yml | 243 - .../padded/test_bconv2d_bin_padded.py | 17 - .../padded/test_bconv2d_bin_padded.yml | 243 - .../padded/test_bconv2d_int8_DIDO_padded.py | 18 - .../padded/test_bconv2d_int8_DIDO_padded.yml | 303 - .../padded/test_bconv2d_int8_padded.py | 18 - .../padded/test_bconv2d_int8_padded.yml | 323 - .../test_binarized/test_bconv2d_bin.py | 78 - .../test_binarized/test_bconv2d_bin.yml | 223 - .../test_binarized/test_bconv2d_bin_DI.py | 41 - .../test_binarized/test_bconv2d_bin_DI.yml | 223 - .../test_binarized/test_bconv2d_int8.py | 55 - .../test_binarized/test_bconv2d_int8.yml | 303 - .../test_binarized/test_bconv2d_int8_DIDO.py | 36 - .../test_binarized/test_bconv2d_int8_DIDO.yml | 303 - .../test_bconv2d_int8_DIDO_activation.py | 18 - .../test_bconv2d_int8_DIDO_activation.yml | 303 - .../test_bconv2d_int8_activation.py | 18 - .../test_bconv2d_int8_activation.yml | 303 - .../test_binarized/test_bsign.py | 66 - .../test_binarized/test_bsign.yml | 103 - .../test_conv2d/__init__.py | 94 - .../test_conv2d/conftest.py | 4 - .../test_conv2d/test_conv2d.py | 38 - .../test_conv2d/test_conv2d.yml | 483 - .../test_conv2d/test_conv2d_1x1.py | 37 - .../test_conv2d/test_conv2d_1x1.yml | 243 - .../test_conv2d/test_conv2d_shallowin.py | 38 - .../test_conv2d/test_conv2d_shallowin.yml | 483 - .../test_conv2d/test_depthwise_conv2d.py | 47 - .../test_conv2d/test_depthwise_conv2d.yml | 443 - .../test_explicit_padding/__init__.py | 52 - .../test_padded_conv2d.py | 28 - .../test_padded_conv2d.yml | 303 - .../test_padded_conv2d_shallowin.py | 28 - .../test_padded_conv2d_shallowin.yml | 303 - .../test_padded_depthwise_conv2d.py | 28 - .../test_padded_depthwise_conv2d.yml | 283 - .../test_conv2d/test_fused_relu/__init__.py | 36 - .../test_custom_relu_conv2d.py | 39 - .../test_custom_relu_conv2d.yml | 263 - .../test_conv2d/test_single_pixel_conv2d.py | 43 - .../test_conv2d/test_single_pixel_conv2d.yml | 124 - .../test_fully_connected.py | 57 - .../test_fully_connected.yml | 244 - .../test_single_op_models/test_pad.py | 38 - .../test_single_op_models/test_pad.yml | 163 - .../test_pool2d/__init__.py | 58 - .../test_pool2d/test_avgpool2d.py | 29 - .../test_pool2d/test_avgpool2d.yml | 443 - .../test_pool2d/test_global_avgpool2d.py | 48 - .../test_pool2d/test_global_avgpool2d.yml | 203 - .../test_pool2d/test_maxpool2d.py | 29 - .../test_pool2d/test_maxpool2d.yml | 443 - .../test_strided_slice.py | 46 - .../test_strided_slice.yml | 11 - archived/tflite2xcore/LICENSE.txt | 133 - archived/tflite2xcore/README.md | 5 - archived/tflite2xcore/build_dist.sh | 13 - archived/tflite2xcore/setup.py | 64 - .../tflite2xcore/tflite2xcore/__init__.py | 30 - archived/tflite2xcore/tflite2xcore/analyze.py | 145 - .../tflite2xcore/tflite2xcore/converter.py | 359 - .../tflite2xcore/execution_planning.py | 66 - .../tflite2xcore/model_generation/__init__.py | 20 - .../model_generation/converters.py | 190 - .../model_generation/data_factories.py | 57 - .../model_generation/evaluators.py | 198 - .../model_generation/model_generators.py | 72 - .../tflite2xcore/model_generation/runners.py | 168 - .../tflite2xcore/model_generation/utils.py | 56 - .../tflite2xcore/parallelization.py | 343 - .../tflite2xcore/tflite2xcore/pass_manager.py | 105 - archived/tflite2xcore/tflite2xcore/py.typed | 0 .../tflite2xcore/tests/__init__.py | 0 .../tflite2xcore/tests/conftest.py | 37 - .../tflite2xcore/tests/pytest.ini | 0 .../tflite2xcore/tests/runtests.py | 194 - .../tests/test_execution_planning/__init__.py | 0 .../test_ReverseDepthFirstPlanner.py | 152 - .../tflite2xcore/tests/test_ir/__init__.py | 0 .../tests/test_ir/builtin_operators.tflite | Bin 96560 -> 0 bytes .../tests/test_ir/test_BuiltinOptions.py | 119 - .../tests/test_ir/test_serialization.py | 149 - .../test_ChannelGroupSlicePlanner.py | 67 - .../test_ElementWisePlanner.py | 71 - .../test_parallelization/test_SlicePlanner.py | 95 - .../test_transformation_passes/__init__.py | 0 .../test_transformation_passes/conftest.py | 136 - .../model_builders.py | 877 -- .../test_add_passes/__init__.py | 4 - .../test_add_passes/test_ReplaceAddPass.py | 142 - .../__init__.py | 38 - .../test_ConstantPropagationPass.py | 70 - .../test_conv2d_passes/__init__.py | 0 .../test_conv2d_passes/conftest.py | 218 - .../test_CanonicalizeSinglePixelConv2DPass.py | 160 - ...CanonicalizeSingleinDepthwiseConv2DPass.py | 99 - .../test_Replace1x1Conv2dPass.py | 113 - .../test_ReplaceDeepConv2dPass.py | 86 - .../test_ReplaceDepthwiseConv2dPass.py | 143 - .../test_ReplaceShallowinConv2dPass.py | 126 - .../test_dce_passes/__init__.py | 52 - .../test_dce_passes/conftest.py | 19 - .../test_dce_passes/dce_composite_test.py | 71 - .../test_EliminateDeadBuffersPass.py | 53 - .../test_EliminateDeadOperatorsPass.py | 45 - .../test_EliminateDeadTensorsPass.py | 43 - .../test_fully_connected_passes/__init__.py | 0 .../test_fully_connected_passes/conftest.py | 71 - .../test_LegalizeXCFullyConnectedPass.py | 61 - .../test_ReplaceFullyConnectedPass.py | 33 - .../test_lce_passes/__init__.py | 259 - .../test_lce_passes/conftest.py | 17 - ...test_LegalizeBconv2dBitpackedDeepInPass.py | 36 - .../test_LegalizeBconv2dBitpackedPass.py | 105 - ...st_LegalizeBconv2dInt8DeepInDeepOutPass.py | 52 - .../test_LegalizeBconv2dInt8Pass.py | 136 - .../test_LegalizeXCBconv2DPaddingPass.py | 128 - .../test_ReplaceBconv2DBitpackedDeepInPass.py | 53 - .../test_ReplaceBconv2DBitpackedPass.py | 99 - ...est_ReplaceBconv2DInt8DeepInDeepOutPass.py | 57 - .../test_ReplaceBconv2DInt8Pass.py | 101 - .../test_ReplaceLceQuantizePass.py | 80 - .../test_lut_passes/__init__.py | 0 .../test_lut_passes/conftest.py | 108 - .../test_lut_passes/test_ReplaceReLU6Pass.py | 35 - .../test_lut_passes/test_ReplaceReLUPass.py | 35 - .../test_lut_passes/test_ReplaceTanhPass.py | 35 - .../test_op_version_passes/__init__.py | 0 .../test_op_version_passes/conftest.py | 12 - .../test_LegalizeQuantizeVersionPass.py | 88 - .../test_padding_passes/__init__.py | 25 - .../test_padding_passes/conftest.py | 115 - .../padding_composite_test.py | 122 - .../test_FuseConsecutivePadsPass.py | 108 - .../test_FuseConv2dPaddingPass.py | 204 - .../test_RemovePaddingInputPass.py | 87 - .../test_ReplacePadPass.py | 97 - .../test_SplitPaddingPass.py | 107 - .../test_parallelization_passes/__init__.py | 0 .../test_parallelization_passes/conftest.py | 36 - .../test_ParallelizeConv2dPass.py | 67 - .../test_ParallelizeDepthwiseConv2dPass.py | 55 - .../test_ParallelizeFullyConnectedPass.py | 46 - ...test_ParallelizeGlobalAveragePool2DPass.py | 53 - .../test_ParallelizePool2DPass.py | 60 - .../test_ParallelizeRequant16To8Pass.py | 36 - .../test_pooling_passes/__init__.py | 0 .../test_pooling_passes/conftest.py | 270 - .../test_ReplaceAveragePool2D2x2Pass.py | 94 - .../test_ReplaceAveragePool2DPass.py | 38 - .../test_ReplaceGlobalAveragePool2DPass.py | 110 - .../test_ReplaceMaxPool2D2x2Pass.py | 49 - .../test_ReplaceMaxPool2DPass.py | 38 - .../__init__.py | 0 .../conftest.py | 12 - .../test_CanonicalizeQuantizedInputPass.py | 157 - .../test_CanonicalizeQuantizedOutputPass.py | 167 - .../test_LegalizeFloatInputPass.py | 153 - .../test_LegalizeFloatOutputPass.py | 112 - ...t_RemoveRedundantInt8RequantizationPass.py | 106 - .../test_renaming_passes/__init__.py | 0 .../test_renaming_passes/conftest.py | 4 - ...st_LegalizeOperatorOutputTensorNamePass.py | 183 - .../test_reshape_passes/__init__.py | 0 .../test_reshape_passes/conftest.py | 106 - .../test_CanonializeReshapePass.py | 93 - .../test_RemovePrecedingReshapePass.py | 109 - .../test_RemoveSubsequentReshapePass.py | 51 - .../test_scratch_memory_passes/__init__.py | 0 .../test_scratch_memory_passes/conftest.py | 23 - .../test_ScratchMemoryConv2d1x1Pass.py | 45 - .../test_ScratchMemoryConv2dPass.py | 59 - .../test_ScratchMemoryDepthwiseConv2dPass.py | 44 - .../test_ScratchMemoryFullyConnectedPass.py | 34 - .../test_warning_passes/__init__.py | 0 .../test_FloatingPointWarningPass.py | 64 - .../test_word_alignment_passes/__init__.py | 0 .../test_word_alignment_passes/conftest.py | 8 - .../test_CanonicalizeConv2DInputChannels.py | 139 - .../tflite2xcore/tflite_visualize.py | 824 -- .../transformation_passes/__init__.py | 119 - .../transformation_passes/add_passes.py | 88 - .../constant_propagation_passes.py | 80 - .../transformation_passes/conv2d_passes.py | 358 - .../transformation_passes/dce_passes.py | 51 - .../fully_connected_passes.py | 45 - .../transformation_passes/lce_passes.py | 709 -- .../transformation_passes/lut_passes.py | 96 - .../minification_passes.py | 65 - .../op_version_passes.py | 24 - .../transformation_passes/padding_passes.py | 339 - .../parallelization_passes.py | 166 - .../transformation_passes/pooling_passes.py | 193 - .../quantize_dequantize_passes.py | 185 - .../transformation_passes/renaming_passes.py | 39 - .../transformation_passes/reshape_passes.py | 110 - .../scratch_memory_passes.py | 167 - .../transformation_passes.py | 512 - .../transformation_passes/warning_passes.py | 30 - .../word_alignment_passes.py | 95 - archived/tflite2xcore/tflite2xcore/utils.py | 392 - archived/tflite2xcore/tflite2xcore/version.py | 24 - .../tflite2xcore/tflite2xcore/xcore_model.py | 4 - .../tflite2xcore/xcore_schema/README.md | 27 - .../tflite2xcore/xcore_schema/__init__.py | 25 - .../xcore_schema/builtin_options.py | 146 - .../xcore_schema/builtin_options.pyi | 12 - .../xcore_schema/data_container.py | 98 - .../xcore_schema/dict_conversion.py | 223 - .../tflite2xcore/xcore_schema/flexbuffers.py | 165 - .../tflite2xcore/xcore_schema/ir_object.py | 64 - .../tflite2xcore/xcore_schema/misc_enums.py | 42 - .../tflite2xcore/xcore_schema/misc_enums.pyi | 20 - .../tflite2xcore/xcore_schema/op_codes.py | 59 - .../tflite2xcore/xcore_schema/op_codes.pyi | 163 - .../tflite2xcore/xcore_schema/operator.py | 67 - .../xcore_schema/operator_code.py | 37 - .../xcore_schema/schema_py_generated.py | 9738 ----------------- .../tflite2xcore/xcore_schema/subgraph.py | 214 - .../tflite2xcore/xcore_schema/tensor.py | 119 - .../tflite2xcore/xcore_schema/tensor_type.py | 73 - .../tflite2xcore/xcore_schema/tensor_type.pyi | 24 - .../tflite2xcore/xcore_schema/xcore_model.py | 463 - archived/tflite2xcore/xformer.py | 96 - archived/utils/lib_flexbuffers/CMakeLists.txt | 50 - archived/utils/lib_flexbuffers/README.md | 24 - archived/utils/lib_flexbuffers/build.sh | 11 - .../src/flexbuffer_helpers.cpp | 107 - 263 files changed, 39945 deletions(-) delete mode 100644 archived/README.md delete mode 100644 archived/test/README.md delete mode 100644 archived/test/generate.ipynb delete mode 100644 archived/test/integration_test/__init__.py delete mode 100644 archived/test/integration_test/conftest.py delete mode 100644 archived/test/integration_test/pytest.ini delete mode 100644 archived/test/integration_test/test_directed/__init__.py delete mode 100644 archived/test/integration_test/test_directed/bnn_model/saved_model.pb delete mode 100644 archived/test/integration_test/test_directed/bnn_model/variables/variables.data-00000-of-00001 delete mode 100644 archived/test/integration_test/test_directed/bnn_model/variables/variables.index delete mode 100644 archived/test/integration_test/test_directed/test_bnn.py delete mode 100644 archived/test/integration_test/test_directed/test_mobilenet_v1.py delete mode 100644 archived/test/integration_test/test_directed/test_offset_saturating_ops.py delete mode 100644 archived/test/integration_test/test_directed/test_zero_weights.py delete mode 100644 archived/test/integration_test/test_directed/tmp.txt delete mode 100644 archived/test/integration_test/test_single_op_models/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/conftest.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_prelu.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_relu.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_relu.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_relu6.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_relu6.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_tanh.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_activation/test_tanh.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_add.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_add.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/conftest.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/conftest.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_fully_connected.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_fully_connected.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_pad.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_pad.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_pool2d/__init__.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.yml delete mode 100644 archived/test/integration_test/test_single_op_models/test_strided_slice.py delete mode 100644 archived/test/integration_test/test_single_op_models/test_strided_slice.yml delete mode 100644 archived/tflite2xcore/LICENSE.txt delete mode 100644 archived/tflite2xcore/README.md delete mode 100755 archived/tflite2xcore/build_dist.sh delete mode 100644 archived/tflite2xcore/setup.py delete mode 100644 archived/tflite2xcore/tflite2xcore/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/analyze.py delete mode 100644 archived/tflite2xcore/tflite2xcore/converter.py delete mode 100644 archived/tflite2xcore/tflite2xcore/execution_planning.py delete mode 100644 archived/tflite2xcore/tflite2xcore/model_generation/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/model_generation/converters.py delete mode 100644 archived/tflite2xcore/tflite2xcore/model_generation/data_factories.py delete mode 100644 archived/tflite2xcore/tflite2xcore/model_generation/evaluators.py delete mode 100644 archived/tflite2xcore/tflite2xcore/model_generation/model_generators.py delete mode 100644 archived/tflite2xcore/tflite2xcore/model_generation/runners.py delete mode 100644 archived/tflite2xcore/tflite2xcore/model_generation/utils.py delete mode 100644 archived/tflite2xcore/tflite2xcore/parallelization.py delete mode 100644 archived/tflite2xcore/tflite2xcore/pass_manager.py delete mode 100644 archived/tflite2xcore/tflite2xcore/py.typed delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/pytest.ini delete mode 100755 archived/tflite2xcore/tflite2xcore/tests/runtests.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_execution_planning/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_execution_planning/test_ReverseDepthFirstPlanner.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_ir/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_ir/builtin_operators.tflite delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_ir/test_BuiltinOptions.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_ir/test_serialization.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ChannelGroupSlicePlanner.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ElementWisePlanner.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_SlicePlanner.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/model_builders.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/test_ReplaceAddPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/test_ConstantPropagationPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSinglePixelConv2DPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSingleinDepthwiseConv2DPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_Replace1x1Conv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDeepConv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDepthwiseConv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceShallowinConv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/dce_composite_test.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadBuffersPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadOperatorsPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadTensorsPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_LegalizeXCFullyConnectedPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_ReplaceFullyConnectedPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedDeepInPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8DeepInDeepOutPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8Pass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeXCBconv2DPaddingPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedDeepInPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8DeepInDeepOutPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8Pass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceLceQuantizePass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLU6Pass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLUPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceTanhPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/test_LegalizeQuantizeVersionPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/padding_composite_test.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConsecutivePadsPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConv2dPaddingPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_RemovePaddingInputPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_ReplacePadPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_SplitPaddingPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeConv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeDepthwiseConv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeFullyConnectedPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeGlobalAveragePool2DPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizePool2DPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeRequant16To8Pass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2D2x2Pass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2DPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceGlobalAveragePool2DPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2D2x2Pass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2DPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedInputPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedOutputPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatInputPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatOutputPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_RemoveRedundantInt8RequantizationPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/test_LegalizeOperatorOutputTensorNamePass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_CanonializeReshapePass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemovePrecedingReshapePass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemoveSubsequentReshapePass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2d1x1Pass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryDepthwiseConv2dPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryFullyConnectedPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_warning_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_warning_passes/test_FloatingPointWarningPass.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/conftest.py delete mode 100644 archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/test_CanonicalizeConv2DInputChannels.py delete mode 100755 archived/tflite2xcore/tflite2xcore/tflite_visualize.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/add_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/constant_propagation_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/conv2d_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/dce_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/fully_connected_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/lce_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/lut_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/minification_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/op_version_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/padding_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/parallelization_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/pooling_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/quantize_dequantize_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/renaming_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/reshape_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/scratch_memory_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/transformation_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/warning_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/transformation_passes/word_alignment_passes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/utils.py delete mode 100644 archived/tflite2xcore/tflite2xcore/version.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_model.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/README.md delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/__init__.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.pyi delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/data_container.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/dict_conversion.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/flexbuffers.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/ir_object.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.pyi delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.pyi delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/operator.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/operator_code.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/schema_py_generated.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/subgraph.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/tensor.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.py delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.pyi delete mode 100644 archived/tflite2xcore/tflite2xcore/xcore_schema/xcore_model.py delete mode 100755 archived/tflite2xcore/xformer.py delete mode 100644 archived/utils/lib_flexbuffers/CMakeLists.txt delete mode 100644 archived/utils/lib_flexbuffers/README.md delete mode 100755 archived/utils/lib_flexbuffers/build.sh delete mode 100644 archived/utils/lib_flexbuffers/src/flexbuffer_helpers.cpp diff --git a/archived/README.md b/archived/README.md deleted file mode 100644 index b344495da..000000000 --- a/archived/README.md +++ /dev/null @@ -1,23 +0,0 @@ -DISCLAIMER --- - -This is the archived xformer1 integration tests. These are not currently used for testing, however, can be used for generating tflite models. - - -Generating models --- -Please build xformer using following the instructions mentioned [here](https://github.com/xmos/ai_tools#readme). -Then install tflite2xcore Python package using the following command as it is needed for generating models: -```shell -pip install "./archived/tflite2xcore[examples]" -``` - -To generate models for a particular test, use the following command defining the MODEL_DUMP_PATH env variable: -```shell -MODEL_DUMP_PATH= pytest archived/test/integration_test/test_directed/test_mobilenet_v1.py --cache-clear --only-experimental-xformer2 -``` - -To dump all models, point pytest to the outermost dir: -```shell -MODEL_DUMP_PATH= pytest archived/test/integration_test --cache-clear --only-experimental-xformer2 -``` \ No newline at end of file diff --git a/archived/test/README.md b/archived/test/README.md deleted file mode 100644 index 4365f51fe..000000000 --- a/archived/test/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Integration Tests - -Before running the integration tests, you must first install the `xcore_interpreter` python module as described in the main [`README.md`](../README.md). - -## Running all tests with the host interpreter - -To run all tests in the suite: -``` -pytest integration_test -``` - -When modifying the test suite itself (or the model generation framework in `tflite2xcore`) it is usually necessary to clear cached models and data: -``` -pytest integration_test --cache-clear -``` - -The model cache is currently not process-safe. Thus when running tests on multiple processes with `pytest-xdist`, it is usually necessary to specify `--dist loadfile`. -To run all tests using NUM worker processes: -``` -pytest integration_test -n NUM --dist loadfile -``` - -## Running tests with the device interpreter - -*To do this, the XMOS tools need to be installed and on the path.* - -To run on the device, specify the `--use-device` flag for any test. Some tests are skipped on the device to speed up execution or avoid known errors associated with insufficient arena size or other HW-related problems. - -Do not use the `-n` option when running on the device. - -## Running specific tests or configurations - -To run a specific test (function) in a module, use the `::` notation, e.g.: -``` -pytest integration_test/test_single_op_models/test_fully_connected.py::test_output -``` - -Test modules typically include multiple configurations, either defined by the `CONFIGS` variable in the module, or in an adjacent `.yml` file with identical base name (e.g. `test_fully_connected.{py,yml}`). -To run a single configuration with index `0` of a particular test (function): -``` -pytest integration_test/test_single_op_models/test_fully_connected.py::test_output[CONFIGS[0]] -``` - -## Dump test models - -Specifying `--dump models` will save the converted `.tflite` models in the model cache directory. To print the location of these saved models, set the logging level to `info` by setting `--log-cli-level=info`. - -## Other testing options - -For additional logging output, including case index for failures, specify a desired log level with `--log-cli-level={warning,info,debug}`. - -To halt at the first failure or error use the `-x` flag. - -For more info on built-in and custom options, run `pytest -h`, or consult the documentation https://docs.pytest.org/en/stable/contents.html. diff --git a/archived/test/generate.ipynb b/archived/test/generate.ipynb deleted file mode 100644 index 278e2fa93..000000000 --- a/archived/test/generate.ipynb +++ /dev/null @@ -1,1455 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import yaml\n", - "import lhsmdu\n", - "import numpy as np\n", - "from scipy.stats import distributions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def make_configs(params, conditions, *, N):\n", - " A = lhsmdu.sample(len(params), N)\n", - " distros = {\n", - " k: distributions.randint(0, len(p_vals))\n", - " for k, p_vals in params.items()\n", - " }\n", - " idx_map = {\n", - " key: np.int32(lhsmdu.inverseTransformSample(distros[key], floats)).flatten().tolist()\n", - " for floats, key in zip(A, params)\n", - " }\n", - "\n", - " configs = []\n", - " for b in zip(*idx_map.values()):\n", - " d = {k: params[j] for j, (k, params) in zip(b, params.items())}\n", - " if conditions(d):\n", - " configs.append(d)\n", - " return configs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "noalias_dumper = yaml.dumper.SafeDumper\n", - "noalias_dumper.ignore_aliases = lambda self, data: True\n", - "def dump_configs(configs, filename):\n", - " with open(filename, \"w\") as f:\n", - " f.write(\n", - " \"# Copyright (c) 2020, XMOS Ltd, All rights reserved\\n\"\n", - " \"# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK\\n\"\n", - " )\n", - " yaml.dump({\"default\": dict(enumerate(configs))}, stream=f, Dumper=noalias_dumper)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_conv2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " K_h=[1,2,3,5,7,9],\n", - " K_w=[1,3,4,5,7,9],\n", - " input_channels=[4,8,12,16,20,32,36,48],\n", - " output_channels=[4,8,12,16,28,32,48,52,64],\n", - " padding=[\"same\", \"valid\"],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if (d[\"K_w\"] == d[\"K_h\"] == 1 or d[\"K_w\"] * d[\"input_channels\"] <= 32):\n", - " return False\n", - " if d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]:\n", - " return False\n", - " if d[\"padding\"] == \"valid\":\n", - " if d[\"K_w\"] == d[\"width\"] and d[\"K_h\"] == d[\"height\"]:\n", - " # this would be single pixel conv2d\n", - " return False\n", - " return d[\"K_w\"] <= d[\"width\"] and d[\"K_h\"] <= d[\"height\"]\n", - " return True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=100)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_conv2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_custom_relu_conv2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params[\"max_value\"] = [j/2 for j in range(1, 12)]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=50)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_custom_relu_conv2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_padded_conv2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " K_h=[1,2,3,5,7,9],\n", - " K_w=[1,3,4,5,7,9],\n", - " input_channels=[4,8,12,16,20,32,36,48],\n", - " output_channels=[4,8,12,16,28,32,48,52,64],\n", - " pad_t=[0,1,2],\n", - " pad_b=[0,1,2],\n", - " pad_l=[0,1,2],\n", - " pad_r=[0,1,2],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if (d[\"K_w\"] == d[\"K_h\"] == 1 or d[\"K_w\"] * d[\"input_channels\"] <= 32):\n", - " return False\n", - " if d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]:\n", - " return False\n", - " if d[\"pad_t\"] == d[\"pad_b\"] == d[\"pad_l\"] == d[\"pad_r\"] == 0:\n", - " return False\n", - " padded_width = d[\"width\"] + d[\"pad_l\"] + d[\"pad_r\"]\n", - " padded_height = d[\"height\"] + d[\"pad_t\"] + d[\"pad_b\"]\n", - " if d[\"K_w\"] == padded_width and d[\"K_h\"] == padded_height:\n", - " # this would be single pixel conv2d\n", - " return False\n", - " return (\n", - " d[\"K_w\"] <= padded_width\n", - " and d[\"K_h\"] <= padded_height\n", - " and d[\"pad_r\"] < d[\"K_w\"] > d[\"pad_l\"]\n", - " and d[\"pad_t\"] < d[\"K_h\"] > d[\"pad_b\"]\n", - " ) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=70)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_padded_conv2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_conv2d_1x1.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " input_channels=[4,8,12,16,20,32,36,48],\n", - " output_channels=[4,8,12,16,28,32,48,52,64],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if d[\"width\"] == d[\"height\"] == 1:\n", - " # this would be single pixel conv2d\n", - " return False\n", - " return not (d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=55)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_conv2d_1x1.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_conv2d_shallowin.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " K_h=[1,2,3,5,7,9],\n", - " K_w=[1,2,3,4,5,7,8],\n", - " input_channels=[4,8,16,32],\n", - " output_channels=[4,8,12,16,28,32,48,52,64],\n", - " padding=[\"same\", \"valid\"],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if (d[\"K_w\"] == d[\"K_h\"] == 1 or d[\"K_w\"] * d[\"input_channels\"] > 32):\n", - " return False\n", - " if d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]:\n", - " return False\n", - " if d[\"padding\"] == \"valid\":\n", - " if d[\"K_w\"] == d[\"width\"] and d[\"K_h\"] == d[\"height\"]:\n", - " # this would be single pixel conv2d\n", - " return False\n", - " return d[\"K_w\"] <= d[\"width\"] and d[\"K_h\"] <= d[\"height\"]\n", - " return True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=150)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_conv2d_shallowin.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_padded_conv2d_shallowin.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " K_h=[1,2,3,5,7,9],\n", - " K_w=[1,2,3,4,5,7,8],\n", - " input_channels=[4,8,16,32],\n", - " output_channels=[4,8,12,16,28,32,48,52,64],\n", - " pad_t=[0,1,2],\n", - " pad_b=[0,1,2],\n", - " pad_l=[0,1,2],\n", - " pad_r=[0,1,2],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if (d[\"K_w\"] == d[\"K_h\"] == 1 or d[\"K_w\"] * d[\"input_channels\"] > 32):\n", - " return False\n", - " if d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]:\n", - " return False\n", - " if d[\"pad_t\"] == d[\"pad_b\"] == d[\"pad_l\"] == d[\"pad_r\"] == 0:\n", - " return False\n", - " padded_width = d[\"width\"] + d[\"pad_l\"] + d[\"pad_r\"]\n", - " padded_height = d[\"height\"] + d[\"pad_t\"] + d[\"pad_b\"]\n", - " if d[\"K_w\"] == padded_width and d[\"K_h\"] == padded_height:\n", - " # this would be single pixel conv2d\n", - " return False\n", - " return (\n", - " d[\"K_w\"] <= padded_width\n", - " and d[\"K_h\"] <= padded_height\n", - " and d[\"pad_r\"] < d[\"K_w\"] > d[\"pad_l\"]\n", - " and d[\"pad_t\"] < d[\"K_h\"] > d[\"pad_b\"]\n", - " ) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=150)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_padded_conv2d_shallowin.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_depthwise_conv2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " K_h=[1,2,3,5,7,9],\n", - " K_w=[1,2,3,4,5,7,8],\n", - " channels=[4,8,12,16,28,32,48,52,64],\n", - " padding=[\"same\", \"valid\"],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]:\n", - " return False\n", - " if d[\"padding\"] == \"valid\":\n", - " return d[\"K_w\"] <= d[\"width\"] and d[\"K_h\"] <= d[\"height\"]\n", - " return True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=70)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_depthwise_conv2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_padded_depthwise_conv2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " K_h=[1,2,3,5,7,9],\n", - " K_w=[1,2,3,4,5,7,8],\n", - " channels=[4,8,12,16,28,32,48,52,64],\n", - " pad_t=[0,1,2],\n", - " pad_b=[0,1,2],\n", - " pad_l=[0,1,2],\n", - " pad_r=[0,1,2],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]:\n", - " return False\n", - " if d[\"pad_t\"] == d[\"pad_b\"] == d[\"pad_l\"] == d[\"pad_r\"] == 0:\n", - " return False\n", - " return (\n", - " d[\"K_w\"] <= d[\"width\"] + d[\"pad_l\"] + d[\"pad_r\"]\n", - " and d[\"K_h\"] <= d[\"height\"] + d[\"pad_t\"] + d[\"pad_b\"]\n", - " and d[\"pad_r\"] < d[\"K_w\"] > d[\"pad_l\"]\n", - " and d[\"pad_t\"] < d[\"K_h\"] > d[\"pad_b\"]\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=70)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_padded_depthwise_conv2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_maxpool2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " K_h=[1,2,3],\n", - " K_w=[1,2,3],\n", - " channels=[4,8,12,16,28,32,48,52,64],\n", - " padding=[\"valid\"],\n", - " strides=[(1,1), (1,2), (2,1), (2,2), (1, 3), (3, 2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " if d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]:\n", - " return False\n", - " return d[\"K_w\"] <= d[\"width\"] and d[\"K_h\"] <= d[\"height\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=62)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_maxpool2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_avgpool2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=62)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_avgpool2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_global_avgpool2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " channels=[4,8,12,16,28,32,48,52,64],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " return not (\n", - " d[\"num_threads\"] > d[\"height\"] or d[\"num_threads\"] > d[\"width\"]\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=55)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_global_avgpool2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Activations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " channels=[1,4,7,12,16,25,32,49],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for file in [\"test_relu.yml\", \"test_relu6.yml\", \"test_sigmoid.yml\", \"test_tanh.yml\"]:\n", - " configs = make_configs(params, conditions=lambda _: True, N=40)\n", - " print(file, len(configs))\n", - " dump_configs(configs, file)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_fully_connected.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " channels=[1,4,7,12,16,25,32,49],\n", - " outputs=[1,4,7,12,16,25,32,49],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=40)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_fully_connected.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_single_pixel_conv2d.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " K_h=[1,2,4,5,8,12,15],\n", - " K_w=[1,3,4,7,9,14,20],\n", - " input_channels=[1,4,7,12,16,25,32,49],\n", - " output_channels=[1,4,7,12,16,25,32,49],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_single_pixel_conv2d.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bsign.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8],\n", - " width=[1,4,7,8],\n", - " input_channels=[i*32 for i in (1,4,7)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bsign.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_bin_DI.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[256,512],\n", - " output_channels=[32, 64],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_bin_DI.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_bin_DI_padded.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[2,3,6],\n", - " K_w=[3,4,5],\n", - " input_channels=[256,512],\n", - " output_channels=[32, 64],\n", - " padding=[\"same\"],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_bin_DI_padded.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_bin.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[32, 128],\n", - " output_channels=[32, 64],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_bin.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_bin_padded.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[32, 128],\n", - " output_channels=[32, 64],\n", - " padding=[\"same\"],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_bin_padded.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_pad.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[1,4,7,8,14,20],\n", - " channels=[4,8,12,16,28,32,48,52,64],\n", - " pad_t=[0,1,2],\n", - " pad_b=[0,1,2],\n", - " pad_l=[0,1,2],\n", - " pad_r=[0,1,2],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " return sum(params[\"pad_\" + k] for k in \"tblr\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_pad.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_int8_DIDO.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[256,512],\n", - " output_channels=[16, 48],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " output_range = [(range_min, range_max) for range_min in range(-4, 1, 2) for range_max in range(1, 6, 2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_int8_DIDO.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_int8.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[32, 128, 256+64],\n", - " output_channels=[4, 28, 32],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " output_range = [(range_min, range_max) for range_min in range(-4, 1, 2) for range_max in range(1, 6, 2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_int8.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_int8_DIDO_padded.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[256,512],\n", - " output_channels=[16, 48],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " padding=[\"same\"],\n", - " output_range = [(range_min, range_max) for range_min in range(-4, 1, 2) for range_max in range(1, 6, 2)]\n", - "# num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_int8_DIDO_padded.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_int8_padded.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[32, 128, 256+64],\n", - " output_channels=[4, 28, 32],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " padding=[\"same\"],\n", - " output_range = [(range_min, range_max) for range_min in range(-4, 1, 2) for range_max in range(1, 6, 2)],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_int8_padded.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_int8_activation.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[32, 128, 256+64],\n", - " output_channels=[4, 28, 32],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " output_range = [(range_min, range_max) for range_min in range(-4, 1, 2) for range_max in range(1, 6, 2)],\n", - " activation = [\"relu\", \"relu6\"],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_int8_activation.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_bconv2d_int8_DIDO_activation.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[7, 10, 12],\n", - " width=[6, 8, 11],\n", - " K_h=[1,2,3,6],\n", - " K_w=[1,3,4,5],\n", - " input_channels=[256,512],\n", - " output_channels=[16, 48],\n", - " strides=[(1,1), (1,2), (2,1), (2,2)],\n", - " output_range = [(range_min, range_max) for range_min in range(-4, 1, 2) for range_max in range(1, 6, 2)],\n", - " activation = [\"relu\", \"relu6\"],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_bconv2d_int8_DIDO_activation.yml\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# test_add.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,3,5,8,13,15],\n", - " width=[1,5,6,7,9,11],\n", - " channels=[4,11,13,51,53,64],\n", - " num_threads=[1,2,5],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions=lambda _: True, N=20)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_add.yml\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# test_strided_slice.yml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(\n", - " height=[1,4,5,8,12,15],\n", - " width=[4,7,8,14,20],\n", - " channels=[4,8,12,16,20,32,36,48],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def conditions(d):\n", - " return True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "configs = make_configs(params, conditions, N=100)\n", - "len(configs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dump_configs(configs, \"test_strided_slice.yml\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.12" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/archived/test/integration_test/__init__.py b/archived/test/integration_test/__init__.py deleted file mode 100644 index 242bbbaee..000000000 --- a/archived/test/integration_test/__init__.py +++ /dev/null @@ -1,438 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import logging -import pytest -import _pytest -import numpy as np -import tensorflow as tf -from abc import abstractmethod -from pathlib import Path -from typing import ( - Union, - List, - NamedTuple, - Tuple, - Dict, - Iterable, - Type, - Any, -) - -from tflite2xcore.utils import unpack_bits -from tflite2xcore.model_generation import ( - TFLiteModel, - ModelGenerator, -) -from tflite2xcore.model_generation.runners import Runner -from tflite2xcore.model_generation.evaluators import ( - LarqEvaluator, - TFLiteEvaluator, - TFLiteQuantEvaluator, - XCoreEvaluator, -) -from tflite2xcore.model_generation.converters import ( - LarqConverter, - TFLiteFloatConverter, - TFLiteQuantConverter, - XCoreConverter, -) -from tflite2xcore.model_generation.data_factories import ( - TensorDataFactory, - InputInitializerDataFactory, -) - - -# ---------------------------------------------------------------------------- -# RUNNERS -# ---------------------------------------------------------------------------- - - -class IntegrationTestRunner(Runner): - _model_generator: "IntegrationTestModelGenerator" - _representative_data: tf.Tensor - - def __init__( - self, - generator: Type["IntegrationTestModelGenerator"], - *, - use_device: bool = False, - experimental_xformer2: bool = False, - only_experimental_xformer2: bool = False, - ) -> None: - super().__init__(generator) - self._use_device = use_device - - self._repr_data_factory = self.make_repr_data_factory() - self.register_data_factory(self._repr_data_factory) - - self._xcore_converter = XCoreConverter( - self, - self.get_xcore_reference_model, - experimental_xformer2=experimental_xformer2, - only_experimental_xformer2=only_experimental_xformer2, - ) - self.register_converter(self._xcore_converter) - - self._identity_converter = XCoreConverter( - self, - self._xcore_converter.get_converted_model, - experimental_xformer2=experimental_xformer2, - only_experimental_xformer2=only_experimental_xformer2, - ) - self.register_converter(self._identity_converter) - - self._xcore_evaluator = XCoreEvaluator( - self, - self.get_xcore_evaluation_data, - self._xcore_converter.get_converted_model, - self._xcore_converter.get_converted_model_params, - use_device=self._use_device, - ) - self.register_evaluator(self._xcore_evaluator) - - @abstractmethod - def get_xcore_reference_model(self) -> TFLiteModel: - raise NotImplementedError() - - def make_repr_data_factory(self) -> TensorDataFactory: - # representative data (e.g. for quantization and test) - return InputInitializerDataFactory( - self, lambda: self._model_generator.input_shape - ) - - @property - def repr_data_example_count(self) -> int: - return 10 - - def get_representative_data(self) -> tf.Tensor: - try: - return self._representative_data - except AttributeError: - try: - self._representative_data = self._repr_data_factory.make_data( - self.repr_data_example_count - ) - except AttributeError: - raise Exception( - "Cannot get quantization data before runner is run!" - ) from None - return self._representative_data - - @abstractmethod - def get_xcore_evaluation_data(self) -> Union[np.ndarray, tf.Tensor]: - raise NotImplementedError() - - @classmethod - def load(cls, dirpath: Union[Path, str]) -> "IntegrationTestRunner": - runner = super().load(dirpath) - assert isinstance(runner, IntegrationTestRunner) - return runner - - def rerun_post_cache(self) -> None: - self._xcore_converter.convert() - self._xcore_evaluator.evaluate() - - -class DefaultOutputData(NamedTuple): - reference_float: np.ndarray - reference_quant: np.ndarray - xcore: np.ndarray - - -class DefaultIntegrationTestRunner(IntegrationTestRunner): - outputs: DefaultOutputData - _xcore_evaluation_data: np.ndarray - - def __init__( - self, - generator: Type["IntegrationTestModelGenerator"], - **kwargs: Any, - ) -> None: - super().__init__(generator, **kwargs) - - # floating point reference - self._reference_float_converter = TFLiteFloatConverter( - self, self.get_built_model - ) - self.register_converter(self._reference_float_converter) - - self._reference_float_evaluator = TFLiteEvaluator( - self, - self.get_representative_data, - self._reference_float_converter.get_converted_model, - None, - ) - self.register_evaluator(self._reference_float_evaluator) - - # quantized reference - self._reference_quant_converter = TFLiteQuantConverter( - self, self.get_built_model, self.get_representative_data - ) - self.register_converter(self._reference_quant_converter) - - self._reference_quant_evaluator = TFLiteQuantEvaluator( - self, - self.get_representative_data, - self._reference_quant_converter.get_converted_model, - None, - ) - self.register_evaluator(self._reference_quant_evaluator) - - def get_xcore_reference_model(self) -> TFLiteModel: - return self._reference_quant_converter.get_converted_model() - - def get_xcore_evaluation_data(self) -> Union[np.ndarray, tf.Tensor]: - return self._reference_quant_evaluator.input_data - - def run(self) -> None: - """Defines how a DefaultIntegrationTestRunner should be run. - - Most integration tests require self.outputs to be set. - """ - super().run() - self._reference_float_converter.convert() - self._reference_quant_converter.convert() - - self._reference_quant_evaluator.evaluate() - self._reference_float_evaluator.evaluate() - - self.rerun_post_cache() - - def rerun_post_cache(self) -> None: - super().rerun_post_cache() - - self.outputs = DefaultOutputData( - self._reference_float_evaluator.output_data, - self._reference_quant_evaluator.output_data, - self._xcore_evaluator.output_data, - ) - self.converted_models.update( - { - "reference_quant": self._reference_quant_converter._model, - "xcore": self._xcore_converter._model, - } - ) - - def dump( - self, - dirpath: Path, - example_idx: Union[int, Iterable[int]] = [], - *, - dump_models: bool = True, - dump_visualizations: bool = True, - ) -> None: - # if dump_models: - # self.dump_models(dirpath, visualize=dump_visualizations) - - self.dump_data( - dirpath, - data={ - "input": self._xcore_evaluator.input_data, - "reference_quant_output": self.outputs.reference_quant, - "xcore_output": self.outputs.xcore, - }, - example_idx=example_idx, - ) - - -class BinarizedOutputData(NamedTuple): - reference_quant: np.ndarray - xcore: np.ndarray - - -class BinarizedTestRunner(IntegrationTestRunner): - outputs: BinarizedOutputData - - def __init__( - self, - generator: Type["IntegrationTestModelGenerator"], - **kwargs: Any, - ) -> None: - super().__init__(generator, **kwargs) - - self._lce_converter = self.make_lce_converter() - self.register_converter(self._lce_converter) - - self._lce_evaluator = LarqEvaluator( - self, self.get_representative_data, self._lce_converter.get_converted_model, None - ) - self.register_evaluator(self._lce_evaluator) - - def get_xcore_evaluation_data(self) -> Union[np.ndarray, tf.Tensor]: - return self.get_representative_data() - - def make_lce_converter(self) -> LarqConverter: - return LarqConverter(self, self.get_built_model) - - def get_xcore_reference_model(self) -> TFLiteModel: - return self._lce_converter.get_converted_model() - - def run(self) -> None: - super().run() - self._lce_converter.convert() - self._lce_evaluator.evaluate() - - self.rerun_post_cache() - - def rerun_post_cache(self) -> None: - super().rerun_post_cache() - - self.outputs = BinarizedOutputData( - self._lce_evaluator.output_data, - self._xcore_evaluator.output_data, - ) - self.converted_models.update( - { - "reference_lce": self._lce_converter._model, - "xcore": self._xcore_converter._model, - } - ) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class IntegrationTestModelGenerator(ModelGenerator): - @abstractmethod - def _build_core_model(self) -> tf.keras.Model: - raise NotImplementedError() - - def build(self) -> None: - self._prep_backend() - self._model = self._build_core_model() - self._model.build(self._model.input_shape) - - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - - -class FailedElement(NamedTuple): - idx: Tuple[int, ...] - diff: Union[int, float] - expected: Union[int, float] - predicted: Union[int, float] - - -def __log_deviations(diff: np.ndarray, level: int, *, msg: str) -> None: - logger = logging.getLogger() - if logger.isEnabledFor(level): - if np.issubdtype(diff.dtype, np.integer): - devs = [ - f"{c}/{diff.size} ({c / diff.size:.2%}) with diff={v}" - for v, c in zip(*np.unique(diff, return_counts=True)) - if v - ] - msg += " deviations: " + (", ".join(devs) if devs else "None") - else: - stats = { - "mean": np.mean(diff), - "stdev": np.std(diff), - "median": np.median(diff), - "min": np.min(diff), - "max": np.max(diff), - } - msg += f" deviation stats: {stats}" - - logger.log(level, msg) - - -class BatchedArrayComparison(NamedTuple): - failures: Dict[int, List[FailedElement]] - mean_abs_diffs: List[float] - - -def _compare_batched_arrays( - predicted: np.ndarray, - expected: np.ndarray, - tolerance: Union[int, float], - per_bits: bool = False, -) -> BatchedArrayComparison: - assert tolerance >= 0 - assert predicted.shape == expected.shape - - output_type = predicted.dtype - assert output_type == expected.dtype # NOTE: 'is' operator can be buggy, use == - if per_bits: - predicted = unpack_bits(predicted) - expected = unpack_bits(expected) - diffs = np.bitwise_xor(predicted, expected) - elif np.issubdtype(output_type, np.integer): - diffs = np.int64(predicted) - np.int64(expected) - elif np.issubdtype(output_type, np.floating): - tolerance = np.float32(tolerance) - diffs = np.float32(predicted) - np.float32(expected) - else: - raise TypeError("Only integer and float types are supported") - - failures: Dict[int, List[FailedElement]] = {} - mean_abs_diffs: List[float] = [] - for j, (arr, arr_ref, diff) in enumerate(zip(predicted, expected, diffs)): - __log_deviations(diff, logging.DEBUG, msg=f"Example {j}") - abs_diff = np.abs(diff) - logger = logging.getLogger() - diff_idx = zip(*np.where(abs_diff > tolerance)) - logger.log(logging.INFO, abs_diff) - logger.log(logging.INFO, tolerance) - logger.log(logging.INFO, list(diff_idx)) - - - failed_elements = [ - FailedElement(idx, diff[idx], arr_ref[idx], arr[idx]) for idx in diff_idx - ] - if failed_elements: - failures[j] = failed_elements - - mean_abs_diffs.append(float(np.mean(abs_diff))) - - __log_deviations(diffs, logging.INFO, msg="Total") - return BatchedArrayComparison(failures, mean_abs_diffs) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mean_abs_diffs( - compared_outputs: BatchedArrayComparison, mean_abs_diff_tolerance: float -) -> None: - msg = [ - f"\nidx={j}: mean_abs_diff={mean_abs_diff} > tolerance={mean_abs_diff_tolerance}" - for j, mean_abs_diff in enumerate(compared_outputs.mean_abs_diffs) - if mean_abs_diff > mean_abs_diff_tolerance - ] - if msg: - pytest.fail( - f"The following examples have excessive mean deviations:{''.join(msg)}", - pytrace=False, - ) - - -def test_output( - compared_outputs: BatchedArrayComparison, request: _pytest.fixtures.SubRequest -) -> None: - verbose = request.config.getoption("verbose") > 0 - - if compared_outputs.failures: - msg = "The following examples have failed elements:" - msg += "".join( - f"\n{request.node.fspath}::{request.node.name} Example {j}" - + ( - "".join( - f"\nidx={e.idx}: diff={e.diff}, " - f"expected={e.expected}, predicted={e.predicted}" - for e in elements - ) - if verbose - else "" - ) - for j, elements in compared_outputs.failures.items() - ) - if not verbose: - msg += "\nSet verbosity > 0 for more details." - pytest.fail(msg, pytrace=False) diff --git a/archived/test/integration_test/conftest.py b/archived/test/integration_test/conftest.py deleted file mode 100644 index b29dd7f60..000000000 --- a/archived/test/integration_test/conftest.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import yaml -import logging -import portalocker -import pytest -import _pytest -import numpy as np -from pathlib import Path -from typing import Dict, Type, Optional, Union, List - -from tflite2xcore.utils import dequantize -from tflite2xcore.model_generation.utils import stringify_config - -from . import ( - IntegrationTestRunner, - DefaultIntegrationTestRunner, - _compare_batched_arrays, - BatchedArrayComparison, -) - - -# ---------------------------------------------------------------------------- -# HOOKS -# ---------------------------------------------------------------------------- - - -def pytest_addoption(parser: _pytest.config.argparsing.Parser) -> None: - parser.addoption( - "-C", - "--coverage", - action="store", - default="default", - choices=["reduced", "default", "extended"], - help="Set the coverage level", - ) - - parser.addoption( - "-D", - "--dump", - action="store", - default=None, - choices=[None, "models"], - help="Set what contents of the model generation runs should be dumped into cache for easier access.", - ) - - parser.addoption( - "--config-only", - action="store_true", - help="The model generators are configured but not run", - ) - - parser.addoption( - "--generate-only", - action="store_true", - help="The model generators are run and cached but outputs are not evaluated for correctness", - ) - - parser.addoption( - "--use-device", - action="store_true", - help="Execute interpreter on hardware device", - ) - - parser.addoption( - "--experimental-xformer2", - action="store_true", - help="Use MLIR-based xformer 2.0 for part of the optimization pipeline. Experimental.", - ) - - parser.addoption( - "--only-experimental-xformer2", - action="store_true", - help="Use MLIR-based xformer 2.0 for part of the optimization pipeline. Experimental.", - ) - - -def pytest_generate_tests(metafunc: _pytest.python.Metafunc) -> None: - if "run" in metafunc.fixturenames: - try: - configs = metafunc.module.__configs - except AttributeError: - try: - CONFIGS = metafunc.module.CONFIGS - config_file = Path(metafunc.module.__file__) - except AttributeError: - logging.debug(f"CONFIGS undefined in {metafunc.module}") - config_file = Path(metafunc.module.__file__).with_suffix(".yml") - try: - with open(config_file, "r") as f: - CONFIGS = yaml.load(f, Loader=yaml.FullLoader) - except FileNotFoundError: - logging.info( - "Cannot find .yml test config file and " - "test module does not contain CONFIGS" - ) - CONFIGS = {} - - coverage = metafunc.config.getoption("coverage") - try: - configs = list(CONFIGS[coverage].values()) if CONFIGS else [{}] - except KeyError: - raise KeyError( - "CONFIGS does not define coverage level " - f"'{coverage}' in {config_file.resolve()}" - ) from None - metafunc.module.__configs = configs - - metafunc.parametrize( - "run", - configs, - indirect=True, - ids=[f"CONFIGS[{j}]" for j, _ in enumerate(configs)], - ) - - -def pytest_collection_modifyitems( - config: _pytest.config.Config, items: List[pytest.Item] -) -> None: - use_device = config.getoption("--use-device") - use_xformer2 = config.getoption("--experimental-xformer2") or config.getoption("--only-experimental-xformer2") - skip_on_device = pytest.mark.skip(reason="Test skipped on device") - skip_on_xformer2 = pytest.mark.skip(reason="Test skipped when using xformer2") - for item in items: - if use_device and "skip_on_device" in item.keywords: - item.add_marker(skip_on_device) - elif use_xformer2 and "skip_on_xformer2" in item.keywords: - item.add_marker(skip_on_xformer2) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture(autouse=True) -def disable_gpus(monkeypatch: _pytest.monkeypatch.MonkeyPatch) -> None: - monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "-1") - - -@pytest.fixture -def use_device(request: _pytest.fixtures.SubRequest) -> bool: - return bool(request.config.getoption("--use-device")) - - -@pytest.fixture -def experimental_xformer2(request: _pytest.fixtures.SubRequest) -> bool: - return bool(request.config.getoption("--experimental-xformer2")) - -@pytest.fixture -def only_experimental_xformer2(request: _pytest.fixtures.SubRequest) -> bool: - return bool(request.config.getoption("--only-experimental-xformer2")) - -_WORKER_CACHE: Dict[Path, IntegrationTestRunner] = {} - - -@pytest.fixture -def run( - request: _pytest.fixtures.SubRequest, use_device: bool -) -> IntegrationTestRunner: - try: - GENERATOR = request.module.GENERATOR - except AttributeError: - raise NameError("GENERATOR not designated in test") from None - - try: - RUNNER: Type[IntegrationTestRunner] = request.module.RUNNER - except AttributeError: - RUNNER = DefaultIntegrationTestRunner - - pytest_config = request.config - - if request.param.pop("skip_on_device", False) and use_device: - pytest.skip() - - runner = RUNNER( - GENERATOR, - use_device=use_device, - experimental_xformer2=pytest_config.getoption("--experimental-xformer2"), - only_experimental_xformer2=pytest_config.getoption("--only-experimental-xformer2"), - ) - runner.set_config(**request.param) - - logging.info(f"Config: {runner._config}") - if pytest_config.getoption("--config-only"): - pytest.skip() - - config_str = stringify_config(runner._config) - file_path = Path(request.module.__file__) - key = file_path.relative_to(pytest_config.rootdir) / config_str - - try: - runner = _WORKER_CACHE[key] - except KeyError: - pytest_cache = pytest_config.cache - if pytest_cache is None: - raise TypeError("pytest cache is not available") - - dirpath = pytest_cache.get(str(key), "") - if dirpath: - runner = runner.load(dirpath) - logging.debug(f"cached runner loaded from {dirpath}") - runner.rerun_post_cache() - else: - runner.run() - try: - with portalocker.BoundedSemaphore(1, hash(key), timeout=0): - dirpath = str(pytest_cache.makedir("model_cache") / key) - dirpath = runner.save(dirpath) - if pytest_config.getoption("dump") == "models": - runner.dump_models(dirpath) - - logging.debug(f"runner cached to {dirpath}") - pytest_cache.set(str(key), str(dirpath)) - except portalocker.AlreadyLocked: - # another process will write to cache - pass - _WORKER_CACHE[key] = runner - - if pytest_config.getoption("--generate-only"): - pytest.skip() - - return runner - - -@pytest.fixture -def abs_output_tolerance() -> int: - return 1 - - -@pytest.fixture -def bitpacked_outputs() -> bool: - return False - - -@pytest.fixture -def implicit_tolerance_margin() -> float: - return 0.05 - - -@pytest.fixture -def compared_outputs( - run: DefaultIntegrationTestRunner, - abs_output_tolerance: Optional[Union[int, float]], - bitpacked_outputs: bool, - implicit_tolerance_margin: float, -) -> BatchedArrayComparison: - if bitpacked_outputs: - return _compare_batched_arrays( - run.outputs.xcore, run.outputs.reference_quant, tolerance=0, per_bits=True - ) - if abs_output_tolerance is None: - # use implicitly derived tolerance - output_quantization = run._xcore_evaluator.output_quant - y_quant = run.outputs.reference_quant - y_float = run.outputs.reference_float - - # The implicit tolerance is derived from how much the quantized reference - # deviates from the floating point reference. - max_diff = np.max(np.abs(dequantize(y_quant, *output_quantization) - y_float)) - # max_diff is usually at least 1 bit, but we ensure this and add some room for error - abs_output_tolerance = max(float(max_diff), output_quantization.scale) * ( - 1 + implicit_tolerance_margin - ) - logging.info( - f"Using implicit absolute output tolerance: {abs_output_tolerance}" - ) - - return _compare_batched_arrays( - dequantize(run.outputs.xcore, *output_quantization), - run.outputs.reference_float, - abs_output_tolerance, - ) - - return _compare_batched_arrays( - run.outputs.xcore, run.outputs.reference_quant, abs_output_tolerance - ) diff --git a/archived/test/integration_test/pytest.ini b/archived/test/integration_test/pytest.ini deleted file mode 100644 index 64ff58573..000000000 --- a/archived/test/integration_test/pytest.ini +++ /dev/null @@ -1,4 +0,0 @@ -[pytest] -markers = - skip_on_device: skips test on device (i.e. when --use-device is speficied) - skip_on_xformer2: skips tests when using new xformer 2.0 (i.e. when --experimental-xformer2 is speficied) diff --git a/archived/test/integration_test/test_directed/__init__.py b/archived/test/integration_test/test_directed/__init__.py deleted file mode 100644 index 3bc495e21..000000000 --- a/archived/test/integration_test/test_directed/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from .. import ( - BinarizedTestRunner, - IntegrationTestModelGenerator, - test_output, -) diff --git a/archived/test/integration_test/test_directed/bnn_model/saved_model.pb b/archived/test/integration_test/test_directed/bnn_model/saved_model.pb deleted file mode 100644 index fee6d36f84fb08cffc8036356e375d3384c4b602..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 479766 zcmeFa37j0qT`#^pyQ>|QRx7D>x+L3cU6x13?)2<60Nq>3raxLTa0VCegrjxiovWRI8NE&zB!rGW$`wR<18D*JjGw zHsJx67>_u%(AOCT`jCOPo^F&H<+9v?(x@y}%d?19^Yo7sB+BdmhS0iK2?3r7x&cFk!W)`b+l?&#odZSjU zUcf1{YbQqi&acI#+G1moxwjpyEnhCrEH@TwMN$oYlQiJ#(Hdy}*OjWXr2Uv_|IV#w zH5A+^RjTD0{o>AB&>CQ8tvp+-mKK;Yx&GP-ETkC^RqpJqmur{GnO^IVbtr3XzC=EU z4~~u>ykL#aoi7dNM(1)9hWY;g-iqFApv_;uT&gyTK*uF(wm3TUbh%b719K^=Gh5I= z(HTZXyRy33gHnaA%o=a(q=t>mc8?OXu-;6iyQzPJnZ0a-wwmH<(ennnOGwZV8A$dY zYBX`93yTYd%6alPArX;ua|mm;pD#6LE*7hcwT04r<=GM$d&QBV3#EmHQWFe}NT%!;Gj8 zeXD^E$;naTOGNSNY$qexQXPh=J9?D-)B?UK=wuy7w?xwOYrsTh5) zfyNaGRN|RRh3c$(kAd!tp)5%uKf$u^E>pE~^uq?aTai#DHmX*v&dQLex7sA?Co?gk zKBzb>>VtbqY+^-y@Uo()pJWapNG9q@xe1B-$&8w)_p6kOdjAO}MhQi|Uqr+x;#edT z^`xj|MEzt&P1JieN=3c*fD(U%qTVY(ojo#9PfAKu)K6yAL_JrrRMd0NDe+7w>bZi{ zStk?qRDsEf`pJx%s25Z&74?E=mDng0^@76GSqaBARxsvT5%oJf&BMmR2H43P_P_4* zb7}a3;$(eLGlGLZyR8rIDe+0xq|BWm)x%~aq?Os%zlPUd5pvE~O7+%HizrTGZaTd|`*bVQ3_B{u32p`^}4N~|qizOJmLt5;(!{YFBp zrHfQ!?e7}s9!5JbI&_q%pN|dAm+B1|reL4o#&OG|IHY_%$&Td1W@{v&-!jmnEj3E1 zJ6a<$La+?7f~Bc0qDMj#f~Fl^M3dB97ttlOsf*|;)Vt^pL6kBBjw~N>^Qf3jdM2UF zG&_@ynyr(Ce$POUwcI$R?rNRbh%V!n>!!Pm9wDvIti-yECad`_qf2RHm(kUzcUiw< z4>ujF_&c6-4eO?3U*EZQXtsQ*GE*)bJ@r^&SY`R^%Y5bu&j$b7PfY#R6X`y)YwFjY zK=0~7YhY{MH($C^t{ohPi=WQXCZ-@nSj)ms0Guwg)0#uz*|;6Imx=hg|I8;9rC`Ol5O z)q4Mm(6Pb4A|;ys)i~Ok-e_)Y)WD)o93IGy+xC2@qg&FO%+0m(h2?qp$u%XE`INlOb99-nf~i>)3=oulg+CV3=@M`g)JxE& z^KAVyvifJ`^_S`TFQX0VIdd(etAU>`z)$qP1!}&?)?AU*{7QMvPt!G%9-3#%T#%Jn zm6y55m$}52`Sr3gYw|Mde3=cl%w<`bm*izWLzj6H^}_mQ48zAR)5yZdZqu;fBWoHt z__*6NM&RQmrZEa1_nF2Rd^}PV4L9Vzl2yU11Pd*L$+*Z0Av1)q<=XAVA3!RH8kJ_?_s z@Hq{iWAOP3_#B5%Led0$J`10uyl*g5)-Zg|!6zy20(@G8;0@_-G}k(N%Y`y{JAOb9?%m&FxO9;%(2G^mB%}%6>v;d(;J7Cjb9W1DWYp z8$9KzTciH;YhbVKwPY~;S|g6yZ=tD`?yGjBztxa4^_MfCLVKojy-F*Zbd_f zj+{9&eSGS%GxwbyVv9qYVf?x?iXQ%b-p_8oy+P1#YMaac^tUtp_Qu$LdlTJn(w*s! z^qV94tqaYWvmxF^c~jz10SB{=4LXW}v*!6wJrUlCctltY**izoQZtuAwZ!}N;k$*E zQQPyVI-fN!hw4;whQo)W>h(?mQFVg>j7WGQ=s^GzK@S3$2zn5}IRrk`%w2bozC|!d z)y^6H>9;b2^gCh)>37nDRC-9+k^Zj8LF&uJ?-r1*cAV)?zl|aNdtyodUYd02S!hT4 z`yxo6hwaz*8|J#=%yPZ4xB#BA&N?)Zemf@ze?Te+e=tH0zJrm2XW>x&LtME(EG_q) z5#{~}Q|^pe3XO2qq- zy3eI&&K>FZMD#hejK5bvy4vZqKm9(2^dE~Q{eRG;OOLiY(mx(a`bBdt)YU=<9pA@P zxFF=>7X;D5BXo5kH4(WlToQ8jt*CL;Fl(U}3Ee|TJ(S27E($sNiKtq?(R?;ktI(T* zs?`(t!ezm)*?TIgcCswMrkgAau<0ht!adHiaId#4(0AQ=_7j4^s;02$PyZw{Sbr*Z zu>LeXSf$#I9qFHm7_3CL{;W{8{!dN@{x7Kv{J97j`16bmB(nAWT)Dp>E%yTv<$jPU zH(s_9e;Y?rDU_`v3cFV#QsI>6$X5G{+vmS1=yNs2P=ETDm_GmI*gpT?be~JLN;}dY zis*Anw!R=BT}}VgpZ+jI`me;2{;M?UQsvc-^hY8|SC*}$$8CkNP_~XHC|gNQQrSA1 zoNOia#L3oC`iUs%rYu{jT0N;Ll&yZv-cwPv6WI!?5+YkcRYGLzqmFEy_GBx4*PUm- zCK#-0D#`xzuQP-7H)03tN9n;T)o1QV|DVXg>TkI}CLmo+^Vy&NO@{Q}iY5KyH0e^c z>5lX#)JXqr0qJTA*8cSGFrPlfhHVrN0&c5lpfgN&P=pt~f~v*b+AXPxQ!4c_#m z@4D0T{|W|*nkKwI{TXJU{6XwM`9pf3NY&^&(ti{&P(Z=>st_CXoCWbc0^P$n>qHa9 z=O#&@mHsjk=x3rxAnJ`dr>ifJuJ%RNX9eAI90Eze(@`kCfa^;^SH^%F617Gxe6Ajt9t_BUSP zYoGIp84k|*#IgrFQ)1bJohdQH!9qyPaIg>(vl*Dnz_MqX%fJlRg}#kg_zb7$lv8xK zvvT&3m9rOIt85G3DyQf^r|7g(^lGPQ2H4d9yMTICy>);3KN#x&GnV@QqN#tUbUVBw{ok?FlinF}dS_VNJ5CRR zwvF^4%)+1t?UsK{&~jC6Re$<9aGdlk2!rWYr{XNTuSxlKXq=X-x^|>ro06M$opEPV z|1BxrDy^;)>ra0xL;bhKQvWPf#Hb^7EO-6!6hBA?_Lke8(2Kwbv@ z#yN@U zCjB+kFueLrIDx`UG<2NMsnX#>k4Tkw{2alkVZpe6)&D69#~+0lMFv(|gdaB9uK>u9X2!s^~k^7oVw> zmX;t;HlNRpXVD{QpiUxbi*uFvvW>mRh)o3`ygni-{eW5Y$$oT;1i-MEHKk&ZqXMgg z;$dBoIIIbthMU6?!_8x8VK(TTn1UVoA&M5l`lw1+8)pbN3$}w>ftca=K%|&1AILkl zY!$Lj$OrO@?N25^U=;I#60bxC3#$_HfqK!C%fgr0@es&r;lu3IArBKDZsDV?ho6|=*`6T=qzT3WuOSn5j(yQA=cUvXzk8-jcl z`5v*`MIyLvpe-%i^`1z?@w-DBN<`stdRkuRlUj3IaHhS@6+!cW*--b1_tTrMkNDY7 z#LwR8-Dsys{J`!U0InSgG?6pE^06)GkNOZGi5;MyW>y5pYznwD^ToL;?5-y!hE0T4 z+1iNuHUrSR`Qo3gHu*Q0*%P^dfXWGX!wE=h6*)z^ z@uBJC=mC|E0aS80ctKoomEeER+TiM?`Jxk-T+C05=JM7pQikI1f$-hqZpWDZH#{gg z?-U#e@xn0Pc24q0rVklsOoAq^sG#zhX+fy6N6XGzg8^f1@P75H&lJ#FS1V?p)9U73 z=*|e;7lE?({z1f)Z0t!_-ooSQF7q@}Y>}^WLxX5jaT&CE#rjftrZQLY(BRXiJ75bt zz!0?!Ne-oOHx+reTYeWkw*h@TmDnI+CjL%zn+rBiPXC@!adF940e}EFhhFMFBSi$V z0fKCaLxPkba1sI~p)lZ8ZRTRB3K;gbc0pYJE6~f6FF#AQASAQ!+W@TWsDpAu4pL`> zoCrLMeGGdPfH4X7J`3n$ZI#rH|91*K>e40U_0atQD)uPJ6Yx0j`b4FAqI4M=P(M|! z9d&qZA7moh)W+_N!1CaDaJmgG#UDwb(~0vBR73ozXzPGg=iy^*X)XS6H=1gli{>4% z0~OyYoP-4%bPSfJCtIh2n+tx?FH5}WV%95MStyvOzgnu*7N05BO4SQxGqumKjwQi+ z43J@~oEDnyC-yd054YZOclE9V$WGIHgv|c3{a5_ zXPZ4}(8mx<@7(d`ZvB0sKRu$)9l;UA%B**eAgfWI)oUAM_%q0nkX?5O9W3l;?YfL9 zhPfcyhTV~;637pQQ$|YbQ1UoAluS>bLMJskGv;hEJ~3iNolV}?#4huji=-PDDKsXP%$hfPSSL?V-o$!s0BPBUljws*LOi zbD~jKWpEOPzRa3F=cXQ<3 zEs=M(%I~7T+>Jg1+B;gjiB3padIJ+J?%?>89m_JCo9$?;Tu*x|x`n$*D?!ZzXx|)Y zuxD+mNE*#cxd^4~A(V-Gnl3%0V!dkYI#MT&T_@~xi$^Y97;>&pFP{%olUI#B+*5#w zEqD$$Ldk1+m7I(*>e{sRd>!~%w4PND3siw}o~zevhU>v_+`j#O(qB^~@99bCud_K$ znH5q?DtE9tY2X48Lk0dxNVOUBjDT+%!%LmoT?&-okEPIho?bKbi?0ya!VUpX$%nVG zzX$3+d?#Nz*Kd9icf~vylo{yRK)dyrZIpwUWG)ZzL+H5PS=7#a>*T*>{(_ASv>Scp zrFvz4v1<12Su|f;uwaz(^y1Sj1h>>ZhfWof0bDvK?REbY0;5@1rsrKt-*ngl8!M>t zAW!zsfOUow3q$qg^FxcZDZTuPrY0D@ltSBYvLnGtF>F|f`}x}lA%=0;*Sc3(!%n**?nSp z9^b+@59S3(iD$uBvQ==4+(>9RMYTmRq<~`l(G*hI1eor4WOfEvc`XY!9O||?a{D=+ zCG23pgXj2Bz)6kPB+aIfgeHE7v?#Q|6P~Bhxn`O1eh4Mvo`a1N^nD-`@9||~4rR?D z6`2S{Bq9~TD@8&a8O*%IbO?85>!9R4Ulz(`V|&^wn3fK`_{R+-pq3p_Aqm-*P0r4E zLhzSl1ICk+`@)=O+u}>g(B96Kl$YFW;IxEku~VXCjBJpB#QcQ@|u=$T31ZSch9T+~_D7lQ&sCsI+LADl40H zQnBMqwb@X35tzn4vKpO7$7n@Uz2OAWY3Y{6EJnA4!3qSP7q(+>F}uyc7g5h?(BFV- z2K@C-!-)=JFkjnc!yytJL58k)$2*A+C9`u993nsQ@sHm_4v}_Q_zLQVFJ|wN+1axe zPLqo4IXz(C_BR-bHVC3Fp>(`}R*~vpsdd`d2S2haN&*r9++v(c@MWP}407(0HYeIf z%C#5&nSoY&9=#{c;47vo_eahe;!O-)ZlxeF^#IyXyihC6R$#yt!QHfRWvOg#H8&r% zA*Ua3?;Hdb=vdauR6`r-m!p^JsXBDLF03_Fv+%bV2ziZ!F7+LovFZJKvOS}zpRRZ3 z9rW}xH+pr$;m`Br&P?*wBwcrtYbZU@yEmX;RWk5r$3{qef{UD>6|6v;^ob#zifrgu z_?>uBLmqbUFi)L^fm>_bnN7XN zV^$Wc<=LYU;AyVoe(*Qo{f@t~3k|~|#k77R$s`+sf0B5N9iM&x-AA)NXtxgAv*4Z# zxH-YgJkyA^eIs?-+g($;{aw^;j>EwY+z&lyot?x^M^))jgkOq!PhTv-Y03G(I|L62 z2$HK-d&rsDE)u*fph^;d0T>7KkVtu`-YCsH?H$YIMHuwayc;a6u6=jy)EY>YOsvdC z!7!W_g^I_o2KB9=S~zRDKoQCn-|khsWonaOG4e3fzGe{ZzOW`ZLkn+M+9b%@E~(kV zzYRWs3{7VLX{QfPlP;@KBu~T+E18~e3tC^lSgTZ@E|Of-#f4J+X_IVa zH-wsqPwA@;yECRICRR`Nftctj7^y&Sc#hEuKKH+*pns-CJ557_s7a)Y@XK_0vU%L|&DUtFxsRxY(nW6A;*R!|x) zqO^Bqrtc@U9IITyeNe4!Y})AE9J-=GH|Qh1_wX5rA0++|EmLk-5Y^=th0ofI1|ecQ znDl$K*)*?rAJQP$={GQ2!bkv1U(2-GW7jSSpFrKd)pNB?nN8$?eOH4-7^?6JVJB#= z6JTdIVW(RZaT&9}daC(8sLJ7``-BFez)SljJcIfm48F3kykL4?@M`;(U9@x>$IlC( z+$|YD#2tlJctlJ+IhVi!;!0;I3eP&TBInHtPI3kf!|RE7g;#ub zDNmG~w@|lEyT4Eq+F@lOi(bx=&$jr_*;CnGrL{@5%VMOml>o14sSM>YQrY6AvO|6# zmEi>qcGFVX9xtRtBQ3YV=Q$LGpKy3S;_y6>q0V|@Gaeq(Alh9HN5~j~ok1ciA&hUg zC^AOuuZ}ydy^M@FW5j(zk5v05Zz{IVT7RQNSo@eW6-T_OxZ1AFA(qTNZNAzs!GK%s z?p$DT@b@=v|8i913@nW@>jC}B5DM4r$a zrW`*tN9|?=Q&Ug8&{NYEhp^d5Y^Xa$k*R5y%5R$-%GbL!YL0hOwzq{J^+ske?Hz%& z+Af#1A;q5r-9Rd+N3JYP zkq8#P3PH72s9LvGWd7L9Yq?qr+>=a>lwp;y!o2fh-xgjy5~L%#f%hz+)@gQB8GEXDrdQ&^>4f+}ON8BVpP27a`-7eaCb@ zqV@lHD@(J_0DPOUc>lh~7bBB^B?G3oUE4K<-6PPAlMJtk;@H88;(eI^D248D@A%e~ z9B(5E8`h%H%QBJnw!NX@29vcw5Hty1pp6-5H{hBY)p9yUHOtCb5N{dFH~b?8!r`Yp zgBE3C%Y>hbx3noTlGy*k#d}2HUkl@_P+)Nt)tLCel#YOB7qlSz%!$#{@1^gsGg10AAABIBaM zU``PH?+F0rqsJJ7|IH4#XoT-CB!Tb^uz7YO2*)=(gyh|FzYhX}QdD*$&+$L~hxMRdM#EIc*UP(AH9BC^YCx)Z8gA=0>4>f6H!-)QNaALwx zHlF&13p#he8ag;Jk+@?Kj)vR>ezD-hObP%g$WQ`Z>J5hzGbw@|0-II`Cnlr|?BK*) zx&Y=d80e1iP=LGw_l8t3v|7ZTkSOs5)6${$dg8=zGV;ZU6EjJaP~6h%bPPz$8x|)< zvkN!6nZ31(L^Ow zh*ndQbS|T8{9;GXl%$?%kTq9dLp>8$^Geb)aipzqJrhT*lAh@oZVo)+heD^AJQ@!t zGw_pyGKj-85^+bX)X^#tym%I9Fr>&=28fN&Dsj{G#iCUj6dYe~RISorN2^39u+=&Z zbNWC4y zjc5f~ej-1TBPb=D*E*gTx@B+U9us(xtMWr{BBhVh{&dl@HeE zo*MZXb(-*Bi3JAXy)fW{-0%V}@FBIP`?*$z)t-k)Mmy9H;%gNrx-XX{K^ivI>SBjH zE{f66riMb3-+3!a1N5MsfWX|~MP>zy;uybff%YQ2xAYt8?=7|VZT{==;-{5;8+!X% zbdKF^z}498Cb z$CwR4_&E4F#pHl&BsE(knF5QX$bs0b1RzL>Hxx)+F=Af`9C8+|n|22JkUiWX#@_|` zlod^-OA~=a*{FbAkpN=D)#7(waTIn-4M#@qNJj!Gz9#16;jWo z$>D)*OpZi+uVsF7L*`|8j*yqsVXezxUxJd1rSu&tR-U_)^WPx>tps3!3Y3o$8_?fO z9Wb5x+#gKui@!KRVpWP9rphyRhSTAYrs6iVqkv7$%f*q|-DzMS0KK?{LJ}4~QwJ4d z4EcQRIuof)WAVD7!U?~>C%iZn=a-9G$FOlLYH@<(Leu5>Wz+uajff$2?R zAz`z#3~$@S?q;IHQ2#WEFw`K!$Z%%BiAV+D3ai`L6bea=#~iQ$95TBj<4$3O;*^on z3Os#^XsVu=KFh|PEA>FkZgPBVVuTzHPB%)8^4#(~r<}rX63?SS$@9K|#oNwF9tozq zB0qJM6bJd!0^mO|@EUdxd546iB1%s}mW&6HBJJ^0tEKbA2YXNv9i?ZbK$jam0&F!e zn-Z=+IFUg@IKcC=w>RD{N4^nILMbA!>t9N*_AkC4jtRkiM2D7?* z1LaCIwgf9i5BpDP^kW7ZqX$oHK(Zswt%*E+0A*kV=ww?U&gsi6dkLAjbcFp|yQM

>2`1aHK0Ha*hcnaD$;X#Rtqt#M^e2qK?O(&I<8 zW~q;zJbp%dBwW)2yQNE^J5Z^F=qkKG)h4Nrv>k%0W9hg?SoIMCla!492Z zhfc7At?s`Ytt!rdDywd0(U5s?esQKSdH-^~JUdlt%v?MP$RTHQoV!Edr72fNh}>K| z!4BtxwGAt;Wtb1GCn4xI4*KoqxS)HkqwwIln6AvU)cp!hx}9K$POw9Lak(~ARu2g4 z1Upo!7hrjEB4ci32o2FH&EYp}y7#ah8@8tl-r_Ha-~J<_W7aL}_yd7m!^2#vvb zbKwGS5N~cM zr=0Iti{6kzkJ4jNr18}7(SdQ8y-&EbswqF) zQB!`6cu@}OVX|a4pf!-6yHP7us&Gy)Q~SC|iY@-2k6)v)5e%GyZt$&JQbydBygmBi z9@XuUMrI-6hcpu9hxB%FM2=bt6je_i+J5`ZjQtiomLd|JEDK$WbVB!3DbzWb?B%`T zHlvM&4ZI^YDm(%%Z4V(;3gS2J=ttA&tp++k?^>ADB$R=cBqqt{b8I4m2nSKlqln!@ zNBfKZXTtuXf$L~Cv(E7wN~2X)5Ahp<>?S>D7MB|)q#3aw%}DEFEwd-mtrXnpaJP}_ z*77SoMf`>ynm&#mQ0b~@N5VX0Fq*hw-G#Ic{15Z<>TZXcihe$RKy_wsq}Tv~ zg?m73^k~VA$BEpSo<5bNE6&7Nc05Xae2;iLnTRL4E>m?D;a0&>n(-vm3V!v6%Jmcm zF@A%e>O-^Wep*ua89Vhr+cP-Q%Ay z&}uJs{G=It#Z(P`Z>~Yc8PiESXJihb4aEz!(rg7n(2I*p#m1GTvbojVd~~_qSX_9( zy>pNpPx7OgiBQA$qTNh@iQWIZFfqouYPRQ)@TK;x^Rb5n-DPOS9|<}P$+7?i^IY;JkGueF~0)yB`kHg4`efY3i1?TJ^UZD{5_C_g7g=vgY) zW-gYh)$)7N?zk6)W_*KFk-Tv4Y-F*iWd9O&U&aq1`5?Fb2V>7M2f))LGKYBjV zC>L_HW`yRA6Xnt|_=+5$L+gdIFR!}vya^4yzj<>QeA}CHL)E;yB$WR`H^1O)uY1DL zk@zht+Vr9pZv>^;JJSeoPlsr&8DoWGtg}vN|HxR5St!)c*tA_vGqnHT+IQq`hicsi z3v7#OeVVA&PfSCknOeWa8097a-erZKz(NTa8 zQGa?Ip5BFSKVK=;L3=GHK;S=6I7)~rKZtor1597>lmNL ziGN|A*?acL!^e*SbY@?@QLD^4S!%#+4gV1nlS4axqk*pzrQ;&5w*k ziKnc6agfoE@_U0xbZ_AR>-NTgi82>Nu!eREms^>5W($(H@JiTo%%Q1}-EkNl(reQS z-i3VRF7(w@B0gyO{4fhOsj>&T7ZqlleTrQCL3}!x#4owXAYdo1m7Sz0>GWOcGW1>Pa#DRK zBByPA2V}p)K$+NK)u=6(XDgRl_G{|u0$8*IIu+u-gUlkT^btJK()_W?C2YmM;)s&c zWuLb=UYz=D@Ly7!Opu5qmL9ik{6&@af~=@4EH9XS?ic0uR;oVf_<$-+tS?xU)KF=t zM=~r#dt)g8;E`ko9i*~Rs|;q;v_Hht2{K`U@`B z`E(mvkN+TrPAAS&fg4e4BXC5Yuf?o7XCG?|5AcV((NycaRqS<`~|rl_Tkoxcee?Qe^txM!$I&0@D;MFATHp@iTlF(*i?$v$HIBn z_iU$p+(RnRCgdA#Ztf#5ny<|3(}`Rg0b_z%s7Gg{Zd z=i;VGr!d$SFN}5sIF3(@=b~IOR&+WAErFLjAX+m)kO{>|uVf^S*W8Y-rqGkLwK<_w z$Pn)~GQp%(F!!T79V>`aMG5dBYvt0c*d}ri-NiplEFO}_1zs4toHOVNJF_!^N_MVK zFH;bM zek3d#ztcds@poc;G*pF+O4?P!8jPzSKX0IoJc;$?^JX`Q9K4sm!xHbGT-UfGg;kMD zSW40$^W{Q~_}c}PfM(QDFo&?X;SEw74)NFSRgG~jpayCsrY{v)Y`RSWZUr`JtlEjq zLyTt>6s3w7;LjkQxv-ohHa7r0itB8zbo?Fz4TkTrLoDP#sJ_0c%@7sPYsd!4PjF}i zj6i3W?2BszHBTIAq&82&V#TQyf0vLRf3DmTrpKEt0)Fex7a>(-#{8p!)b=IfIiudO zSkOQqz!nF0@X-9gGIm6%%$z$FTY`TdtalPEIah6HAS=s=-F%Zodp)(m4(2~KYVjX~ zc}(q5fr472EBEq^gZ^@cFC$Uang^yaRhsc%iE;Hnh}X@nl2UxX(L<64xIXviy(}`C zd!AC)Dn<+iK?GuyKGGcgTVjo*EZEguJ3*6*m4+Tx?L%J=sogjYFUT?83ldhrFh+sT zH_zp9MPrX^;UM@;Yp>^HTXGuaC$bFK341`s5Y{D{nrc{>ii6IFk)Sa#J2?$`;GavBdDRfqpV!QFDwhI%spCJub@U&@Gup$48JXfR0JT;5uwj*i`#59 zu28C31+I)Y**YaJQQ)UU!z6{93+WeMGSkG-#2-iTo@RnRkMI)Z;)fNyM4R9N{?inC zOkwfS<*h))+3JWKi}k`4@Hf&Am*eo`ZQ=y}ST~9dz*FhF1blI09cESN;B>IQ&i1Lc z3?#33b)X9oQsMo{bOHXf+$Hy963ut_7K~G_q8`pyW8MUq2(L>P6G4}QGy(#^w2Hiw z!w3jXEdaP#v35;N11*CmDfk8pq;|Y-u;3-D+T{MiY;Mz4xcNYyey0!3p!;YAb2G^k z^5bBXpuyofxf1{)fTYhO|EwD~cfyZ1ojYN_D0jjZa}$?40Y=Wa)k&Rz;sG-3p`>6l z`uARR78U3j4QHHsqXa1|<^T-6Xd93!&z5UNCPA@-@tDgpCL+$X2yk%gPL*nn3PIB- z&mJw!&zt-Yp820x2G3NgUXHV*Q=Uy`1|xG9rBIhv)}p5fJl7M`XIWHwCC)|L-SQKV z`^?%9VjTW9@g`#%$6$fxspW{Rn>Qu$@Asl(XdgZJYvtnna*;^{%bv$omsqzEk^INM3}{WUw)_oOok1b-nDDI}llD=*b6^NX;h+d~36 zRI%W%Q>t&RJx;4ZiIbj(r2m;vfZ=5P($Ky5EaBs(yzJy1f2&Y-=p zJtqj#a%4u5cQ^|ujH=J(wryhffp*g9i8w8<4E}b-0w{j98#UdirtzK?y9=v5my5%C zjz3X5Nsf|-dKRB0<)f5JVjQz&8y^G6{zCJ8hyOrs%Q({PC~wCJwHZv24~P%cAxx2c ztW{AqpM!YNmiPQxG4sjh6mK*?e$1w&#?1la@!C!3|0R|Z0j9F{qW22)^4LW$YwP)J zHQzIp#>It7b)j_m62!PDB|SWk&b6b4*4?>Ru7mYQ6d=u-FtUe^b8YSV>+Y8Fv1{g* z(&hoK&dHAZ2{A+HyK#B`A-Lmy5*w)!>&De_KbbE-vV>QV`^mhT_eroD)%JDK2o-JB zALJ(VnfOdEx)0q&tKxm13F;{4SeeGGF?y57Y8UVaSB{lwFkyr0!^Fz;!1R6SXdI*B z&b;~I;T2|P(mK3N^0C%K=wM;LYQ`CGA4%%yqySci-rj?XY>x@-r_mh-fu>e=IAYLW zL_O2x`DN4o>$?m1&TTCpttYKjP7U}uu|~io)fY;T6XcR@HDf$46R2K8y0$tvCX*T* z3W{d*vm4N_cB4J?d|p_bEzi@L%0TMn^VWoALUzzfb!oX#%x*Kt#d7uO>f$q1P{yuB ztKEy@FndWJiC#dgKz1DE>yOquCa@?DCAZ%?jHi`esa9&Tc^n*-}F_O12AL-k)9o>Sr8maD{ z-c^0+)o4xs+JSZJH<%kYZQim~%jxMjS)-3lPoc@kIUOzUVY=rcu}j}9mcoOQ;pwZB z@wRi4M}nQNDn~61nfwNWU$L*CXCiPMe48JbNt6C=H3M$ycquC$3IaHaJJ6;g7|Ux# zP!!Ho<|^gcVzsnTHdmY8+`nF?MyR*fglvkTM0Y(XRp^4dt0M3AMcz$E-d!De7e(G( z6M45k^6uKmy91GT*G1l4A9;5}{13LyOgx)PWR3xrGBwQ+*-ZOQD>7vb7LiLQogVoeL96s z(uXjSb2p`2jyaNz^S%Y!(bk#_VBfqI-NM~utn~+wvL7+%Ee@f~9C5j|8$g`XFXeb_ zf4X#m-a-XYS{dEHfd*Jt>X21k#+V2*?%;KE6z6JQNsi(iX)D}OoTFCBQ5-xJ*7NS< zX6P4Bqm8$)1J5^FZ)1NC@NDs&eCeEnzhA^%F%Jf12C5QJus>#|hKnCSzd}cG@Nx<>=`@#hLvprfCR%2*mvN(i3Z)IRE7S9?rEfYMWZ3wbFyuD@ zARoHy!KmGhRuyN!5wmV)(U5s?esQKS8N*7z`E!!FE1@e3NCXVF!=mH|HevV$0iHkH zaWKax#FL5l_%ac42Ae}FG7*YM zL@El?Uf9Hu!OYKAhj3@M3Q{!K?Kqv^PW%mG~O{vBnT3tq}Wbptz zLh*p{|Ew?H1z{maVOaMQfYoU0ZxJzuYw}bOWLjs9qyr zr*XVz~5_W=fXe#8XF^mrBmE7J#{l`A^ z)nxGFCPppx7`PL~?25oq?Os%f1-b!Gk;;XtZLm5&UtU}SPd3LH1m0K4j2e1B#`P^W zA6Y0j)>R`m$!X{vSTo-SmM`J{E~Nj9VPr-XW=(B(`8x)>+oemY2N<%^JRv_bX_(-K7(RC<^sT4nSMRV5gRU;e!nbH8`vyQ5^lb%rV^VW45QCpIf2A7*} z^!XIh-%$y&jkd5+#PRb>Gt($|>BWv5$8jl0PmkZEka<jKd z7*)u-Drgt}R0=&B%f(pev^d*&E91tBN2%p|S=|DEx(%&Y%Gs)&u~m>&w6zf!pR!Ld zUer0Okh8T39^emmqp8-%YXaFspKP59Zr1ogEiX&FXiB@OdQ255Hz89_>!jh|m8&)% zZoPPSo4`O>DI$QZ2O}o=*9_)194`J1^W_=PghS-h>E-qH*ulO3( zqKy_;H`8bp?jhh-S{M;e5>J*h)6>ak$cn;@c%NAMC4y|#I-_+2TC2X^p#(Th`M)IO zgROvNc-?%kKX?$mr#m^Xno$N_R>`YI#cq);h7NI**Z6qG>QKFnR{_Vble zy)-+^qXPsL3FKp}b`FBB_+!ykqq%I)D>vc5?-}TxxGf_v;@a9t4!1}X(edurXqA$! zGh(b)kM7)MI^Pc7r}D!BL<39@xfg5 zjRsz)H3@hCXaX--hxDq%|Ds^s31~F?z~yiLSmhEPQJy`t!dFGw`s~o`90jXU&=CA( zwRVT>DC+H|Eh&c-xaGE6)s})RwFbK_z~8T+)d&70W=~O2#(UM_zfiFBI1f8EmF7Aj z_Mq(1qdD*tEJses7$T@M3M1hJZ(C7sFFn~*s?xBpC=WQ-!txdcy8`fq-;yKKU=oB#9IfV z#!|MZ%r&T4f;j>THF1pEac0J*yX$Kg&&txig<($nQf zm%%6gamaoMLLXLIu!Pr7m1{>Yma5hAJn`#ty~{gZ=77ZluC0&&D^4GyJ_iA*eF}6t zM`mX^Qy*T@Ugm!**5e3eZE1fWsNY>(FiB+)UINY5-={_A|-gq zsy4Y#F`L`G63mxLBjJ3C!kD?O`iTWh;iHnDib;5v;SNu)@+Z6UiL@vRzyg`?hpT|XC{Ez&3)sa>d>TLr2H za>ENa%_uq$`Kbvq{iqul%6V6>0G_+}NkO?oE%aSbbP!vkxT@(p1p}71fP4&?{5u6(;W$yQa}H9M z5wLQ_(}Dpsdw2X}0o1g1S`b!GCU`oajLwqc)EDEgcZmn*|oOCaA@KZJ^O6 z-V^j!i^o{?mV(u+DT?uDy2V}3_oyHZ$qvD640M2uBGhb@Rk)NTa+B?B>|6WUvEl#2vwW9T&eF?+~ge7APV zNi2BQZPCQx*Ba;IK|l$(YNeqlM9&~;%LFYm_m2NJ#^As?a_Wxh2++z zqc z{?QF--Ft1=J9BvI5CA9nPwgD+qW&}7@7^c56vzXX9t<$LySa8bPxwOAuG%NOdH>VH z7A$cpRpEa!&?eD?X0J^eX3n+rB=HBOFMcpDrr72O&v9mjAt4j|$F-#zL3}0u2CXgP zw&7n5cf?U*+B(MVc|zJNj`A2awu0);)=bhP&=B;TkT(4D2HL~*oco}+P(ybSQRa%~ z1LB9p8_Iy`@lm;U8!X7uA+CUQuGnWikwmTWtfoEd>F2pI3C9z%>-HCW)dm3emNHTJ=Te9lrr|#Fn!v33I*q`3 z)4a*`KAX=>gjQ7;g9WmFcf0>=xe)$^_{%GE?IiwtfWZ?!?rZb-oxZ)C_;U&8gm3$& z84!{_)BTr%p`f_$Bl`sMZ%_?qG?lX9n}w9&XA!EEi}TB{7|k!6J>)Oux4zEENq}Ap z_Ci5zGQAowiCrhcE2H3STO<}iq1|6yEh9dvhv*@exV=IWIKl-gCSCsnb>d<1QKLzw zFoSmTN(rt#p^gwL%+wtt)Eg&On8i~}X7sq2Eir8s^j*s?9O~@_rBsttu=s%nj$D z{y+rlDhhpB+`6JrI)V9S5ZwbSqpe_tvFL5MNUJMgE4v}lU4<-OSrVcKKW`vbS;IV% z*!~+5NinNjxE2CR$}ZvjGv1DyRoF03`CDyvyUIKx+hc1pmskApwP0|SH$$-pN9Oy@`2@% z&1|EU2**0}v&r4_NKRE-7KpNtw#>g}L7rg+PK5PkmaBMKf`PUY^kqCfVSSnR#Ej=h z);@iiq$>OsUSH-u=;_PY;}zM$$O4tga3H2bSg@W)o^kl z|H2wGx2mwl%<_)X?EWFN9pxxbMq@S^tud3(7S@>g5BmCm9KBTYed!+8*%o^no`C6U z+JniE#!N;{SYsx7GO-n#*O*Bw3v0|Ik9zAjt1J;c>@VZ2#!U36Y&BQqKCdy8)DhO0 zi5_H@bbbrs4jXEWg;sV|qIr#(oR+Z0O!TZLlBngKRW^xU60P#84d69qqVheW!Wy%v zrR3W|JX}*=xDUWLcjhXkjL}TX4cz^leG-lr0 z!FeW^gvN~6-pIen<~3#>W%yO7-##SUhhK@#%zM~xW+1Q?S4p2@Yk;j!sx`B?(`|Tl z6djU7Q5{}fPlT=_ydnxq=ID`X&3M8EnirwgEJS(`6QEAKj_|`VlnG&s;0@e7>BwDlZy8^8lrUinE-i#k!-o6eC^=ACj zf!<22Hw)9uO+~*tq24U^^`QPhtm*2_eEDYU%_aqZl^yJ)Q#hnI`}YVJJ7{$nCm=d7 zNO*b=bI(>$=!*Sjo$RE|z^^yZS`Z-@OH1XU)8wz&Yk$anc%)^y%NheNEl1?&vDR@l z<8J|HHIJ5hqgI(M&lcvG8}M&}JdAX8&oYuTGP$!dtwD z>|x|@b?|QQcR|kvPwkC(3IxpJa${+^0d|MT=Pds@G9HdsXUmr%TWz-|mdL;uC^|(I z?U7$Mdr+#-1;4MaU#wNCPZt61s9szs)t@#&;E*)8Lb`ARZS`(UyOYQ+aL6AcvZ|;= zXWg*?Id2du)w zfYN;CGdwnXPJvLsw|QJ*#+5$;@St~}d3lH$OOhvoQ6wuP=hs4p)=3yDJ;4~0OQ{}u zJTX+^UpLS?j$VSZXpgCuByzwerb4CxXGlut%y4LKfTwc-^PZAH#(8T)h-Ul|-jONS zl&;Q?xUNZT_{!O6sp%GWZ<7#*{~?9WGiq)BL20xVF{6by40`dYPqTG;`pSqpn+LCm zSi?BTtl4Y93k<0~XW8a^11F;S+AJ5GM;6#x+ro6E!wbjuxog7@{M|k1G|PfS7_Sny z%IdiNIVM(q;a9eSO4&hZzWsGkrzT}#Cdxhon1O8|7JoPF`h?~BiahWQakf0}!s0I} z_$lFw@<>$`+3A+foG}y+QO=5$N^9idhJBx~4fun+<8y9EjAa&5w`sy{sLN;G`B_F0 z{xt(_;acU=ZuYrfIA1?+N(Noan$%=fc?tz@Q)H+HmHAy|0Z5#fR&pLW7Ks@mTNSv; z9_pcF!A+!R4Q5Aje==>s-GiyC9TE~)n5e*?tbrdT5krX)K$%(@l% zvmlrz4>wgIH$c5vgij_>8nxc&%1sLt$4!xd7tz|e`BI}%u98hfz2O}gM>y#5z=bd8b7$(8+mtC zEt!1F*K?|bfi6<03jd~OFz3X$! zf{jQw6zVz7rG3A+0h!xd0uAc5>%!(jGspTFLl| zK73mY;rffu$}k;aEL3I!bSEaOY^o zJG|*`ZLJsE(G0Mdnmn2v5MkDBH8*pZb!3>1jCIatH{sdrEgR5>yU{L2kb$121d70N z@tIm_33h44d_FgxB?m8C`A0NHQKv9-En2-?eY(2%3?N;?7LK~K*vlMyIl^9!vX^7* zG;GrYpmK%BGH7Po^T+z72U$!q&3&(0km&UJwfLol!<$qEg|aZ)1NCwCDIvzI2Woei3)YJQ$Q2xGDKi08fRS5nwsfGqaOO?m+>xrZPAH3|zhm z_%-6iZWI4tI~_veoIy|V+Wo`m9zE9a-l5mdW8r$8n$V~Eq*jF-z3q)&hKRa0H$m@+ zLso?o(GFknMrv{hoougH*EwCUE>)^s`@pp4Rpq+9g|To)X&JC>N)^&BtjWM&fKk&B)C(R#9p>>=Wlga*!mPDR`%o>SZSOrzjoHateEy&Y{AQI#bRjFXZhA0TB%K0w^EA|D_$ z@YGqP@(IFY$*ax-gsG7a5FfCp2lB)TEb;+D15aI!P?skS6WCjBUSCp%0S zeJ9!#2|i&LLE5bJAhbPXak^^k7xeXzBw*{RaZvCy05R!3580)}0by5-7{n0idrYPr zdGUN^RcU$bgAoL?4?;2yam(-*Q|L~J>8>|QkePZ8f^mwr1gSh*u0c{8CM>Q=g_7c` z;(IP7tXO&(Q7oOEK7*!dQ7dV(x*k10F_NFKAd7^gB%XQrW8G|0Tv?Bp(x}8?#j1QI z78a}qg5O32zl~+qIZ8rMUt2xIw}jUdDt4>C6oU(8lp_f!BQYo=GpHY0Iaj$*T&gWL z7EKINs1J1?J@wdngM4`azI0is2fHSW%qh~d4^1CO570f!EP(23>BOit&L@nPbrF73 z57H4&@-Afn$Yj+8{V89SvFz4ZR(>ldzcm`~TckYzc1I2X00F=$Bc(M09w#GUdiqq7 zZWmdC0ZgiJqa>61MD@I2&>hcEz!4#5f@wGeWT7(u$S)?O&$3`bC z$bBO%i)T0fB}jh22nFf3Kt4|`B%|TNa-gOWGy)Z*07FlEXQ68nKzKq(%BY6g`6y=!#_L)Nvy0j!jRoXGSME zhsHWLHa_A2cMEumf2arP?WE;X1^~?kHTcZOQD?cRo1;-T$D?lMX;m4F7*?RVC+~I()XD22 zP&LAY;S`b^2?qm!e>LV(}Bw!ow+n_*$(IuzT$UE z#Hy5`^2q~P9zd*yca?q@h$Wz_hKjm55pgp=9Cg#W27+}mb3FyC)_lxaV=}=i?Qr~4 z5l0A;Rxm~rBb2Gq+Wf(CgUwelVv%fbuGMMdf;XbRmqnfKV7ygY;+iO4EBIX_lQ`{0@&^^i=&luZci) zS)wCQ^CLMDzAs@W{y)HI*T*=1S1~9lqF|^V01(9y`~R zgu{Ez(K=@-`FAppMbOPXC?}YEP`c;_5iY7?R>#L^ML~VMRLg{9>e!pCbDC;9Q%}n$ zD_`3G;_{`BTh(Saea*RHSPv_j-j%#HtW(PNBt^SZs6;8}OtwVnbDh0vrpix;mIWuT(MCQyu=qm<#j6Oxlqo|ZmP!I_)IzGZg0(4@i zlw+tkFo~NlhKg%obNI;bzB%xbRw=Q91YDEBnL2bk5(MX}vHpl%9wbe)%X?($ zs$ud!L7*aJH^5T_i{(V6I#esqF3*tE5&-8oTbTh&xZb10-8Lc|v^zs3)$GpP;Ux{+ zHnqh~8rZ$FAMyou8SpPf5(fe-rMyB&LzDLIn%zK?7YB}ac|V7#^iRgYiA|p65RtL# zl|?lla{pc1=8Gx}nqRsM%`aW%n=g8)+2%v(FGH&vwdL|`<+{@!pl`>FRp!1q(nE{PJ)G^|==$4`gZnSmhGl;BdZl*(VyC{u};pYW)aEa_+16 zJnDn7QdwADFugBcRh6n=EMDV2Uw9VsA^Jh{QDX(_a0@00pB9$qli?HOR=T(03Y zE^F=TH-#XINQxLrxEK(48ZQ3z1`>!h0Uc2S6ew{TrAi3^ZU|)QCz43W5y7tw=lDt^ z1aks$`kziF2S+65H-_3&%&NDlxs7Ef@rGjJ4ZxN2Q-@KgH#%EO93He-*yp}DmML5L z*m<2jsAh%^Yj;@EghdKijs;S{6#TZbGJgDEANmdhy+lnTz<6mo;6CHS47g7eNUb1L zP8u=v1u*tWEUt3>=qZohQ;iFW0!T4|8z*K+DM0}&gY z?3Aaz&4@}9))cCE@< zQR~n5h^JQ~=`?kjt25okcubHzXjd2$#CLx5FhQEiqM>EJ^s8+1y7YZL=qYrBp24AY zDcVN~Y)08EqxR&MEJ)rxU7lYy?Z2MpE6{l)AYLJok<@^n6Awa@+$@wp1$wDO)Sz`* zO_V_O6as1->hKLE-60ZhiA~Hab92}Au`&^IxA=e?%A$zA-9TFfDIky3F4Rrg4S!I{ zHvge?M-aHY0J_-TJzZVje3&?6E)XzMxB^W!xZ6nxz{|pevnbakq1RRLF~d!jLLC`) z-Jm@GDUE*2Kx4GV4D>^O8pw1*r98bQfEyjxggZ`cR`IkmUhv^k3O0`9c zm)4}sza@MG2~rc%DG%Ea-*Y2mTY|XpoAESqm_LmiLI(=i=%DfA${FQJ{%iyGTiINglo*z9dP(QU8~zGThX2{Xl> z9j`zycb}1ag&G&QsV!=MV-e3UTg7zdyoN>&uZ$Bb;V%&Tr$d-7IE_4p$xblN2>qF06ln}2Y( zsh=v>jyk;(*Q~a7y}A|kv9`1pe=LO_YXcVr`Jvi8z|}eVc$->G0Qrt3!Anzg1Z4*; zzEwCU{X|qROM(t=X846a)rP{U%vj2DLgb8je==Qw!L4GI(@fL-^wbjgWD?C6X%loc zqZt$RErB?%R4_F9TSogF7w$q~0B z(XX=;k?7ai2?CwWgDMmB>g+@e_H;H+N|wMs+a=^<@UOG^82sz35GI)_-AbQg-0MUj;ZFuY7S^@CZuo{G*tYIY({5Fz2 zy!D<)bYt*N$K8-8T?^hgVSwvgUK!x}Eh`J)8rEeQRCRVdFTFJ(#lD8MI5FjIfN(L$@! zIxyE-hdfz=fTUrr|KvKsT!Z#p09|cm$A!O&z^_4L9xoVm4Sj79g9>?!G<=it6T}Tp z9q9V?Hv-T#ST*8-t}SaUE)H0`zi-HhtnnsMEIVM=VzJr*yKX`aDQ0B{?0T^}e+4`^ zuceMxYxU@WU02JqM@#eb9kA=Kk^hKaxs9WCe70O|R2o;}q8U42*OIsE7y0av2*xloBxcMz^b>2n>h>x(5C47m?nuOt|T5G#_7 zyt_K`E{eRnCh~57H%<^b9^w_GpJ+EkI?=S#VW(49jl6ZbS-y1;X9mxCC1V(}7qav!;L zVaT~Yy?h>={(WHBJ={}}Wkc{BZlD3c@th6b4Ec4-7!%>oJ;}!8tmo^%&!Y8gt%3j$ zInUK=26Rcj9t;3@x8G0t%Z4pI3H^087w6lq+`;Ok-4RO#{z(Jf5kskfZyLi(o!VUr zl#t+{^*p_1=oddQVhh_fo_jvt#{M3Jdf+?x(z$-~i?}Q1!Jy2*KOevfA2U&7Q!0p zjTRAn-?j8jhb{2^v&+Ii1HpNmSQx4=pC1A&4Eur1-Dp*D2Eya&W)=;Z2j>@O3X?o( z9M1t^**W88Bnvbn3&bG9he<+kAz}Ch18o%CF}p$B;oAgvhA0@~0lQU!-PFgUY)3n- z%|c4?yA2eglO5q9k=q7wGZ;q%{$ugr010FBdN0pwwhjK&yOkd=1!uKhD3_`<^iE4% zAcuFJlnf<+MgOQ^C;?Ei1JryA-#nNXASD1)8gCWcV$qe`))v8#A~4V&O(B&{fa%U0 zW@mtv2Uob^F03@Z-39m!wjk_az=P-bQNT%!)+CJrt~4Nd@!jTJv&?uu01k1_!A1%C zK9Gs`_%bnvvgVMAOoSp5k&1wwA|Z|pW?o`Cggdi!Q1YHH3$au-wx_*Xe)+9e}#&n%m%ckQZF`Y zCCGyWpdG1wT|!;aYc!VL1Q*Nei;$Sqf;6~-7o<;YMt@|mAiQ$yD)nV)d~AG_fkfg+jV8y+A}){8Gw9o76BJ5${tfdXoCraHS0H=WvzlBsUcYG0;0 zhaa6xb=D1#sgBzvHj@o`1!k(_TGd>px=spYiyRsN7v|CBQnm3opr3+}gJg~1gFxYI z;`fMmwe3w*cinR(C!VJ6WV*|I5Gl5+y?bHw_dVzVw4c_m)Eng@Ns%l{6EHTMr+2BW zCk%dPV!5#5c{2#+{J~7J39n9)d~wI8A3*oT^CL#hG@hTxT7s-sPYV-PC?vg7Ts(A<@0%QcZ`$__K`gPdskKw_dvF^(-jfJ#BRwS zLktN-R-;*wrOPQD*(J<0dkgCimP-4bpy#~pzosC~el(5VYM=x3pk<)#DHRZ8+5zi> z#p-#~6Ya1gIy$q?f3`2zMJ!5Q%ND=E%u2`2gNXTwsB{-MPZphr`8~qjD6ivp+3gup z$!Am9FcLCycE`_U2wN~B0|Zi$cO%kcp#Rv1zM4W0s7Q&JjU|9^vT31JvR~Yb3b2xr z3>k7=b*WA^{kEga`T6qVk{>7un6{JXCHn8jxXfq45tO8)kW%FO6$EbZSB;$ZJK!<% zHt?8n-(%f5LLN+U=`Z5<;P$SW@vbFKR>Iox?-=Or*!FmOX>eYfU512-i&elToUP0N zHoWT}EcYYorU~iJ+<|&8mFAbr=C-LVu)Qb#(A_)xNrpWG{-sE!y?a`s2>+#l?ow`% zqs)&}$&^}fA%svol^T#Teuoe4?GgC^GFMhl@ z_1U1=AsO{1;4iAQ*R{EreV*|}wY`<9PdYvj%OkhFtV?8+)KF=tM=~r#dt)g8Z^UE< z9l+42Rfa!jpxr8uI$rtBFn&m-_@OI)PX!(kQM}))MmGL4r2*y`v0#9?)-2U_(i19v z-n!1FY7JBxTrRobC8ximGS@r4o53Cnx3E!Em|yNxu;-V8rz?2($Bvv(3*MAM@q&$i z3tYv+yl83DtHKSZ?^L-l+Elynryx={mW#2rAI=pzw0CZ0+*s#g>Ert=B21FA2%c_3 z>+v5bWUkZBTPFw{ZEXaO=<^jE*ds(PeV{Nbg?x5R@Bn|f8%@QIhuHC&KnAuaTc?7X zHGZ7Q%Mve|3ZGRyrizlU9ZQ1u_;=;0*B)-YZ}QAhMSwI{<&7S^*WmN02gJ4VM?V@Ahmwd1r3 zs>#4-*Haa(auTZGU7ivmtP|5b{6Qv0uPppBwAxoxB%!ZaLzRaszEf$SY}w#d?Guen zp923kwL@OVX|(~lc8WH7vYqhM3CQR;tqPB%+e^HeCNQ($HLmVL^+4HiT9u^`7aYYR zIls{jiwzVhItQta)9NxU19&$y$EYSct#q7LX?H!FPu~9frgmEW!QBWj0}_l1o*2zD zj;$T=IUlwzr0Ht{e2!Debz+Q(@ttGJO&DYR3pYoM@k1f-xjY&Vt3Z6OLA{7#Vfb(d zYR<-c>Ojp!LYJzm>R&8SbAtj*D*_#MBSOs$iqH!QTv7*WZlJixoES)+-ZiLqwsB>t zOz=Xj4%8fR=Jc@u9iT5I7|s^qt0Znxf@$f{bUmTwxDD8g5o&Ibc(FUSb#AcS$WU{l zRRD$?k)0i=xc~-J!2w4ZYVJc~M;?oT{?|XJ(DNo6W4f?7TZVMtr7I9UI0Gq$ig9rkqvNbbXbA0R z0=s6MfQp(lMZrVp0nrO`%WFur?*Z0A1jY$;yv4!-I{_32jX7Ux%v>yz+z6%l%CjXq zv_RgNEIO$00#gSBjfh^EQ_ssVh3;*p1cnMAoMBwn%jvA$R#L1i9Yqcei6{-d!jL9u zUbd*gqGM=MTyb0(3?)31tfrY{=`$(x3PYNNNmc?(iZ6qqglAGNo=K0OhnT_Vzakd` z*96*Gd=4e0;Xk2Ira5r3(>7 zft1uMiV!cEhJpy$al!0x1IzpkuL!{Qs*zWCQw)A9e%F6Crrx^+^@iW|e_>#EB{f@W zEf&@sdRJESk;raTqu{7k88S-lrN?u8=z(+m2`sl)jmg#uC}yu3Q*;rkn2sSGs90iek?vdJsy17;0UrTOyiDEK}yG7ssEz^&>7r+~+cTZQ>bBFCK z!1J@OUa%msi6(*gmkqR&=8nhU>FC5sI$JnMIL+&s#FlAB^R!!qF8tR9`Z^K$(sdys z&e8LDlKXAz2ZBDB9C`Q$QfREXwmKoA8PUsZKl^^wkBXjchN+rPTocVUvFVW@ike(O zBSA!7(<8z2B5eCe5K(MHLUa-KX*<|hbM^b3C zsdod9%ENl^Pm#kbc)R(XgEiM|pLKo-|37>00vP9U6$<;?m96zyetmvs;v|aFyp-6| zeyi6iFI#q!I*A>}vYoVPyj@ExduwT>YFBog-dxfa+R_K5g|-l0C50B+0tH$q6i9(W zDNS46aC!daRWA2l_~F9+Z@InnKl43j<~#F!GrON;yC|)^(th*JoH=vmoHJ+6L(f|i z>bsjk>JLPpH^xycTN=OT z1<-c9jCUIkY#dq(ftSXE(D#~?xEyrb7H)VpV7NPt!g0;X+mFrtRE~6bCP9KzvobDH zL#|*L!n?oIsncK15kuxKpXg9e>g?>P#uzjU`<>`~`t)1!wtK!XGrM>I$R4AulO^|k1G25F7jx`>!A~P zY;us9$rwIq--bu$bIUtUM)ApyWxihYqdBq-nY+Eti&Um%L)(;pqJZ#Z`U<4qgoJ0Q zJU8k(I<070;zI9K?Z{^+bc%q_3KsVvCTfYSV&cH$zEPiJgS9Ok777(bUzXks;hbi7 zp88-z<{BM%=wHFP2x}-8@JmB}M;EdB(}J^98_+Tn*l-)~A?wxQb+P_T;V94+#Prwl z_|nTOiqvS|gY3r8dO`a!cobIBJJZ`n?3!ZNLIX0q5(S1*#Rt3zi_BI@fsH+LxpDzcs6KKLV=+X-xAp1~5+f?_CigDjY-m|~h9)?BnJuPDAW zw-s|3{@pxj*M|hJR^8DC(?hOr)ug4# zIb_g(C?~0rEL}OAYHA(c{8>U|&Do0^C?-)?>`*e&9>DaAIRiQh>O7(r(wWp#wIN6Dg`U17}WDf$2? zleJ&YllHwXacSG(=A)Wu4kYd{Dm*2L1hW;l1sy?OOOd%?OJ9b?)R4PS0z^+NV3PNL zTcoHkr~+OP3uv-G;%yitGUCQB0q-!jlX4!5`}gx6i~B$A!2P)KBK4s7hZbl9_NOXG3aM4a%}<> z_Syr})2AHTt7^kA%*cV>qH5vJCDdi2GruYQIM*dYNs2r?C_&JA^l*c!6+kNT zc4H7BEr6M)j;HN5p<^<7iZLctG9-s8uru%8ANfY#~_a18g0&RidZtcQ^C0%Wye znQd~s!(qO^92A}zoY)72XTnx}y-;%|{Ero(VgCi6?01A8i15&N!PSm5FRw+&MBSP< zA>f@Hx4<^6GZZ^qZpRwhYhBQ&aQn+|l8`7v_7h`M;`F0-QjpsKiiEJaF<09PtIB?Q zot^mH=&!^V;-FXHNz`>>$0G^yl%OgNr+hc&fTuvqiLV?A8~ubuZkATgZx{$_ST4rO zeAav?-dLP*aOjDUo*r1>&cT7`YkU9+8dl;;mS=IeUBMP?;5PSxE2N>aNGe$FV65_n zyk$_h0Q@B#6z2hJ};;i5bh-^xJ&~}Oh9`0 zQpEl=)l{Y@u%F(3JIdAHZLd9b;E}_J@MQf^YY&|leTERX$TS?hXP4|Q_AixHUvCl_ z!0Ui66pg0xJ^{3eaw0Ank+UsSK3>GbU-g|AfT+-(C3skj-7Pu|5l0NnoD~N>ibK{1 z9)yn`Oe(h$z`JZ8kR~2m2-JwAAEvKAIoz!z9b&elvAdA;H<7jLIH7?4eDNC`9jNIS z@+v}*R~K=m*&S`q@hVi+yx^w4U>H@{!=%qPeEmLrIC;Vn zv{|^*guK`=x21Bo$}rP!BG=-x1Gb!vOAXj#5=Ky63BvM7=G(Te1k+b}Ij;m&lb}C_ z-&El3)mMUed)~c9A#vQrMMSQqVLJn?DlP@_8L2J>k=7!|xO^f7zZ6Ubn7)Kx3L-DM zcTyg0V3&f(8zOE(_J%o+!rL;dP&nKy1Id_Q2_lnq(WAN&MA|1_2_o&2uLSWI#IxlJ zV-CL(L;y{=5=2@fmrVv&g8JGVdL@X!lXxYFv==u8ekBO6$-6-k_PSSs={-gAsO>93 zWR}P(QMeMsu36)6!#6W+x$vl_ z?s3@>T#b>`lmJrWpTJrB5R&06C8Dxg@}^mve^~{XPp-fwqPimG0$KXvBLR_xy9lp5o9fZ z#fkEH#c^n`8JUd!$2`GeL{NMn{2!On9dAp0ga?v(6!XGCXCZDEI;MCrMOK3Y&$2A7 zhP=L~qZMOb0iyNP2|ZaV4u3(0MI`}7WyFYA1*Z04M|NX~K||kc5z&o8XyV=!1xiIH zH)Bx4MX*1|N1mmstXFTuo%B%zJ!_RAQ#vP+IU3jI;pMMP*JTzrWx z2tvEqjh8W?vUiBky?^KRaVc#aP-}Q^-TBF*2ERm5h=Eo8~VwD{?(-0N%5)0EyULE{V{b~cIQ zDg9X-Nmj(ABWC=HW&aNiv1%(%Tq2@9OYkWDS!BQwp9s`=69Zk#pG5**aYG)5N$Jnx z#E#R_#A6GAng!FVA(*d>!k;A-Vx>Pz>YK>gp_Zid*|xKo3V#+2sOgs!{w(o#5q}oS zGXx!v7MZ*PhL3x+$gLxf5{i1X#9@{wG8_)|Xi0rN8b4yhrU_y~j_i80>_`5E5>EYF zNOx_aS)ZL}x$5WbH;y~X{VLnER30rZYhG557Jme#M@y`vgLtc^dmWs*OxfJ2DU8vI zNhm#9(Dn%)Eol2xj}{;Gh}c~@tg1X(&>1N`TH>um74h;36@^Dj8o)}AmiUXVOGSc5 zOZ*KHHz9jNJzC=XrbCaGc!v_ZjW%L}PNlknjZfA^kJ6(h-afoF!?h^+3XE|h|IQ3` z3y2R8yD?ja&6b2Yg-1&qPCqCyDV*Vec+EXpQs)XChw6HpH(IPv zkE6IwS58Ff(V}=G!J|d-Mh7xj_qH_)>}s=9j|7w+Ef^NWjTMu`D|p~hc(kMgt@LQY zUUdb6M=3a~lUb*08iBxNmO19kZ0*s7P$HuqE%Eivz@sJJsU(&8Et(`<%(h^HM+=79 zP&T6;E!Z3GVyHb@6tA@B(UKVsbq^7+CQL@FJX)~HA^abg(j9M0+@l49DUj5-M+^3b z$Wjy@EgH+A?a`vqS0uqS$&D^@2 z&C0cEr9g4$yK`g*3as5T>8;fyTPnIU zXylSnXbqt+3%*BIHfnWCLMu2J&M&nVmRc-@Pf|N4Y{%%~aAUgmw4EEw2UN)jL$2=F zMRMb7;QKAjbBp!H`3jV2X;$W{&GR;x0v(7gM#rNqvMYFS+}}jrRtjlEH56AkG7lF+ zKB2Ck=mtVvGj3VVHOj8lqlV;={iqIIQUO#7h)`?PZNXg0M8TbF?zw_`vL*WW5U`Ho z*zP(8Oo3h>JQV0EqyX9*JeL+IwM9flsy<3#AB?n)0;zOGG^S<_T|7MrP|;tr$Yu#% zRwvJ;sg5{ugeGA?VFD*$lUV(OvAqG=!N#e*q44$KKZ`L^_a|R~#oHQ#nf|Jr{3EP_ zD=HhY^NZ+36V}XLRK&gC8Uq$8SB$f4H4%L92jS&HJSB_*IQmZ(c|>qH3mYoSih>e3 z;npXtH$a7%UV;GQ`9`JQm6wN+n=*nwN-Q{3Xvu*eRwQ8 z+5T{G6e9$!#x`;8?qsqZ(Fay;G~lCZh*y+8Fc3SUkD!+83eg7!|H{z^RCRsPB6pb+ zcUYb@AO{VxYN;x_!qKP$2ubR0(gQ&vmL30>9F3Oj8JY?QUh)Tm{2Lo{1 zgV7JSg|k@2*S2xSSAD&w?P-Wc8#PBFPFu(ds=vyGkM#%)Y#bfu90e11tnjhH=&{1b z+V6UW6*5`g{pQQ-qRwzTNX1XJXG%ZV-d2z+!?ukpo_(~jqkU6apwn9k$!*%*27Z;; z&*#X5`C59$tjN-hREm9A>^1O8Ptad0)n*eC?YWuP!F~>TO5s2u9X*l(J|pk4wlHc|LH2=cP(k05CuhXD2XPSzw1gQ?fm{tL z$!1Ew2Tz;NK&F5s9w44Fr&j`D^!xH;+I&`sQ9E&65ZB6#cf$P{tW9EtS;+=4vodI- z-`PbbmtlQ$D$SZ5vtF|dP)fI+;gNrK`N!h?M751XHtA_p7Z>L*Ru-#`vo$++o8_Eb zmgS@03D$HJ31R30dK5Dp?h}zxIRmdj^idlbGbKhLCt_AXL_cGZogxqc z>~^<52`TSrFzsk*CG%&{fi(GGA4Q6(=~6knRj{(!1UJpiUw}B-O7%i*v3j;vnSm?N ziaWLXGqpt~_ah%>^~tb{7l--&n*+Q5o(bw-#&z{=39`QD;Fmnh`VKL^8l$sX5Vs_^ z3(T;dNm7i>57#il{GG|YxpNL%M~GM0W|*sZa)$wU^g}@M$xYp{b#5Ko!#EJQ*7)>u znm7;4e5p9!r!{W&SO?SG`yg?5^637_B?CJHP&I@zYK}(#ER`N&rlNu5z5`MV5lIJ= zUkIljzL8GwsZ`{5s)Kp>JnZ4@T3$yw}$51}BjE+|nG?!aQ$%qkSV$747 zIjhr%9a-sq#rZ^8&i+o}r{9t@lsbUCkcB>wI9ZwX!kZ%Uw3(OJl`h0PrKXyh3=LbBgYn#p`~%?mlJ|$O4$8F zauCfmQvc)r-w<1(3#D7O8ElbjZ(g(Jo9^R_IK!$Z6meEUOTVL=6oTwiTp}@L(Pm{& z96g}S1m8-634 zLlq;(LbcEt<0Hr3Rg9d0IH#FNFM6MF*PZG|;o|}>=>3RRdZ5D&AW$?5UkIAfrF~cx zT6b$ZB-)65*O3B5iZxpG)+KvuustL0_ErdR0|$Hv8>+bXsZIM5xfm~`?AB6*P=Wk7 z1pF-5kF$uDYLhoDM*KDwy{z-1bAgoGW)yb&uPVk3H)kS~LC4j5)p@3){N{Ei>FiZ= zjsIW}&HLV+pcbJl#>d6SiamCf$l#Oc6J&*WsEMJLX47P*)A1~BLH zc8Bjf5Kb#N2%Ccd2f>bea3GvE3*XiZ`f%QEW#bKLXX0Ku{&-=HJxMf=QlWq%y<~f3 zsaadBG#6@9^_eJHzq#al=yUW;?pgOPq!vMwG*FiOMJRL@#&7lr#lm$ z?ovM`AMPeCvabS)+DvV+)|m4BjRJplr!rf;1Qb72ot>@BG%DpWXUMS$@dchG(`x?tW6(;&gfs~i0YZGdO-bQ zX)t8}Mv^~KZ`kWdmy@%$!=GIzAXMF6OY)9o^&g0BM9Awheuo%RE1d6zKUnJEn+SEU zQ!4X~*-P-7=aKK}wGdZI@)XADCp}a1jhXt{%EID&Yu=`S@NSYncOW4b8T-ibhsj|*q*oivm#($6*kEfi?V_+cTbZ42Hfv3`9;G1`#jQf~k64k$ z&K;e5741Xyo$fxZXcD=kbdFw=^TZ+fI02>vleO$+hYQD1qDo11<8=@hHPZ2Hdpi{% zENJs8oW_jD96YZUte~Y(aJ=*-2TF^Y&)x3mUKo3`@+-^lIFscZc19hk&Wm!_ zyrT=PJF9B1UQ!`gLG^mHo@%j7m5)Z$kH(abh6f>cySt~?lWu1%TmQ}p;AM&X8Wew; zvvNjz`*B7A$B!Q)M|ldsbQc4)YPeihWFJ*65Pd#RZdYthFc(UkAkIgJ5QtS{g~X5v z(~Wj#woLaawlGf|KSfUP3!_Al$vzz!9%Cumm3_(Rrk{e+5CT(B%2)M1b+m^I*N38d z>~bG-mx0Y{9CM2X3pfhfi^cZnEeZW^ZqB^Q?2^2$@ogm50aLDWxI)Q0sPjW&#)`~g zwx#Z&h0BvPyKKoBQ88BKg3}*QI6||x>75!e>$Td^Jj3h^Sq@1LJ z#ZXHBA&+{*D3z^P4{dG`k`+@po`en_6kj{3BHTE7#vf`&Tx18tp25 z?fGFik7!=KYhBPg$8;|*Y|TrKSdU7~Nj)mt#d=ikNU29Kq!ERW&do8T!4 z?UnAC;FXv7xXk>MGI(#^oanp<7EhYDa(N@mbH@IgrLwmF<~y$}|4opQDo@OExu||rQa{S> z?jdp!yd{wYz)_fX0STDhJ}ip|=SXpM)q_)tZ!>k9H?#AZH~@}a#U7lX%PzOW(W(b$ zswNehZIf?uGH~Y0_5yAtJUCUZMoP6*eUaL(2PZqw;qF5IVJ_+6Xwt*-3OE||_AiH= zTP7dQVsTjIYNTB7E6azI9l)2HrLOvLM#od-sdJ^6>ecGQ`NEap!wI`(1ZR-GB`V>e zqwt*fEl?}KTK8XO)@tMsSQ;Kxu~y|CTJ_@8$(vzzv@lw3yjMDJ!>z{zqmYJ zoXmV4_soQE%hVZ0Z7%O|`5!B7?wj`$$Z4%k;c9ZV&2rlaDRukE^^iH}f}<>= zj(E?}&*#YQW~)|d*3UN5!-)Jux>wm%=p!r^@xkLz291wJ3}$WM$TbSLjSf{KjQ*fS zwg&wU1hp-={JA%V52bLp=t3kcuF?68va=0=hCV^oPM)hS)NJpA(h#NqU<8m|CpP#6psaDr za(1BI)7M*M2QsWq0d{HN8-js%PHYbcCQPGr|Fc*sBXlJCH2%QJZ0?GY9r{HmmmDtqFZZIE& zU*-GNE1zTKyo51hsDy~IEXin1Y!QP2=zoIfOP+j#2W?Q2iiBB!Cq9IF^z4X8Il4pt z1&XLikV%BJQ+OOMxkM5H&Z6hN!h6AUo~RTpQprkDF9G<+mgc#|dgFWrinTT?bJgZ~ zo8|C^0&Eg2^cY!Not`$TT%Yf^12(aKq8r%6nsF<~c%a)6Jx%iPi@o;1^z07YawhSjBA zw32mZ-JQmG@&R{?b;D2|F8vk; zfO@+Ya5+V~k9d`x?cf)>-tEurTDgtWw=F>K_e`Ji2*L3PVT1p5rvv!(-~i_dX)6Pq z=bTMqfAnm$83Fsz26QRKai_5(AZ`YL!8&mJ+$Ze$jF>XiIT*M^j{`x@Px~+*nV+T{ z%$wXM2Ea1vhAtlS=n$l{M~9oDxx2XNhDPj{bQ1BY(21))PNu3)Tz`dXWv*@&S3dj) znW;WGH@Af#48tMRqc5ZC6O4|jK2Ce8kKbiTN>oy8f+0D5TG3+pRUaqd&ZP_lh|is& zD?07%ic+vEB+eO4jBa7<6)|p;z+wv&;TV?pAiCv@xh| zUjkQlGQc&@m7za@D?2h*_P~$0GQ41bZk{W<^ulH|)bSiboI_X47~%Hn;XLA@o;-08 z56ujK_Ql~4qY=;yGLeKp-+ot&M%-UHC27c`5x<2QsP0R_R_q+Lp`yf4d(zvAL%~*D z?+)ex%l4izUfq{~;MV(^3kZi$w{drihY97DPv77hxV}N%cS&Q|BaMyWpzaJh7{sTL zVPR`_&PqguiSZL&VJgX6bJ(3mv^CAx3%@m8b_mKwW~A7Q9$+Pd zN{YSAPUVh9-vf4jz{~~b(7^E;?DVj_y1hyKo2FcYL12K4S)lZB7JHocf864&0;U;C z(b4IjmR#CdI@cYr~MKAAo(DymHF(6_~9yTNRhImbry=KCdRP%D(?1H$p5A(2B|jCMS9| z<-;%L$Yb6f8c^bT-~q@j^BBwQ^Elx8NWB549T0HMW3|PD9 zDgD(Pnamu2ps~tY!?F^rJ`PVV%WUbV^JJoPEZR4w5-LqrZ0IXGheFy5fzCvq%)D#R z`2&S4T~+Xyf=zlF)y2j6ihRw)h+jtx5YX0l@R+3MU})qO4gZkY@xzI&$DQoXvL+@1KUpF?}DR&6K- zKScKH^x*1Kjj5WVml5Lly$DDvR-9zC-A%?4MrW@lA?_|Rn(}r6>L$p&DKDoZ?*THQ z*GGHkb2>vGd%b=1vA5esAA7k|^l^Q(hrUu8`V?E8y?3TwZ7OK}5%O|>(GxS&GYr%l zmF83xxC>06^WLgYr}p+L@`MkYun&iWd{RnPUkla_de?v*4rdtbY&E9R4uwT6_&Xdy zV-7ai?5_sv$a#s!E?2#^9%F!MyvUP zwQdsR&MD^wtKj_trsa29WIHQiP@g(pJ6n7Dfu-5mBh}W_IUD_!@}jJ79%BYbMt2TYwNEW>f`@vDu>Uut>N2TWG>>Yn;0SGjwU9SYYGaQEAoY7AobDv7wnW z)76pMpfgk*9y~KTU9Q%K&p4xlGsC69GF;XGG;qJ_!$98#I__j`wl>wW{ZD!wwk)AJ zif(dWjsD)^cNKmkPsn$$XULJwz^;LvS;RaLz@eQ?g2AkD_8}k~oBLf@f4bRgo;0F& z%cL3+t_q@Ym=9%RX^t8F7pF~b18~n&=sW6-eiPXN4&jU7qW=KsVvXt?IO(&$Q6d{u zp@_G9V57QD*629&gWzZ^J+b94=(Y$OAyVn}(rX^z(erK5G{Y~&wO7uExduzU(|;pm zo5vjnTD3-VezC%^6|9HcvHIF@7muv&{kPt-oXJ+6CT)i;KYgJhbK;+xPNls)PduN4@^9Ae+6v1C-0vaM&2t9xXMVn`*yPW?!F`M#u=1HB`u0}Mq@n{X>vBoS5YO_f0;5mlTaLuH6d zL>2dbAWAS}0* z9a&i#rklw+_sHM0uO-(wyDa=a_gw3Vq&G70DjoyDWeFs0)(1h$m6iBN^rH|gAA>^k zFxVS=G0Wp1#LdV0r!Rp2nJ16|SG>ShoSoH2LLuzDyT2G{B)3i6;%g+<#GcB*m*0Qz z;bV^;K6=7E?mas3(ESHbR31Ej;MmKVGa3pC{WgnS6Fz|j?M!u&02Q1Ky@RVYvBm+S zQw~YLWRcLw7OF&hV%X)CqU*!ncq~UsPha?4o?z5QG7i%2)80#Z8)|d7p&;1HvAUU` zQZOj}D~sF`H!2QHg$73dhKrSzbN!E#6+n4&@R$gort5w1D@Az%vIlc_f~P@_)k@a` zoo!CUbZR5t(E+_Kqs8HkrwLO-nH_UVj*mwdQQtLTplzq)?;+Sc;TukI+aM4^;J3#T zPmtgbFrP*bwg%ruQ^p1liIT%X-~*_(PJ%rz=pLxyM&9IYZ;PIUL-)S~86v#ol7S!b z&s*eYt?@O2$E)kSFwSgIy~w{Y-&mxL_paP>zuOY~cPos=GPPNDzVIA#lqPOd0)VZF zbss(h5=Pb^qf2nPmX2aTJK;9A!{5daLb1{Nh{85YZdTl^Z?t*p8ov|f0lO8%LO=)pwVVxG#u0Qm6FG!~}m^Phq!u4&+zYlPQI_xlyL};*gLjOS&ku=sD6g3Hh(U zKMDS&L6WO1Gcm36M({(QjAAIUAdfG+^wK?1&XG$(&XEF}#P}t!E-7-1WZ~Bpq$8OF z9t*>P`15cVBoGPYSSDk!3psZXh>#DZEGKV?_ms^!({XuW|7DA;52Wi+JNlJ!gNKx} zfmkPOSaK(4S^amAt(CKj)#*CiOjp25+P$3JX>UKc)NIYqJ?KB#TOKPy`u9dmB)p2e z(!{^4*Ql`r5tret=r^kr8ar$gsTEo74r85WzpT;>kiA9|U%@~C^1%Z6kVS@g|2WKb z&ATZ8CzjdD3~Z=D@R}PI{Q5VL?xn{0#{9(wIM{C_>-|3}tl%&ACo9Oy{R!26W4`h) zSi9JzdM%zImVPYZ|M9p#5ni9v9);5y`>rE}y6=%_$E`vTh%8xV;5vMKtIRdY_@lt-9#$S5 z<#}o>-QT{9{9%q9V-~qF1yTfW2IQp=Erh?PLP9^6BU|KFcI=yg zC(@nb(geY4bhr3@6xK#>m3x=gE$rg6A{wS{JVcD`Y+y?Ne#<2aam;z)wrO|EZnhY2=*XyMuWA_wY^!L6Xg76u@r)xgui zD0h){l_`h@Xxc?GVDFuspBmqvg35tKzn;2||1D-73UYv;fYBE%vJH9SH zsN%@(*3nuo$2^&P7y=LdVT;7z6e)Zq*iCO55cJP+;s6fg=7PONo|`y_aO+(s)62Lg zteKAsucCCc1PPZB?7CS7m1S)!($6BK1Zl>+>}c1Z@3<+Lb|H`Wd{0*Yvdsk{DS()M zHb-=n0NbZWK{KG*vW&(i4r99*x#6^o5|J2a?`fF|Bu?EK%FxSJ-gVt_Zq~w(>$@q< zg*lh-Jn^28CzeRj9?;>5&_yO(5o|9C;21H>WbOg(?bcq!dm6lPdFG33OAlW95sM(u ziUbwokjtXv^rZtr{~o-NP}~N5^0xR>aV{aJjA>LprwqLLcy8I@q9pN46_EkQ{B4xT zLY{eTe5x*|b$Nu$P`%X_`Q|dtDlP>80LD9iKS%VAMmYC0AUukoAmE@k${ZA+6Ul`3 zrSZ^8vA;tEYVgrpHL*p13pWODmj!`)+4nMe`sms87P%E5$s;*IqD5+5232Sb3?j#5 zgUEg49y7%{(p<+*MS$`(%poM~4{k}i{XZbnw>Jvi8sINF_t@bZ;dkki0Ney! zvN?FwSF)Q!(4RX8xwMNvhbm{+vO@?A=iv3tviVA?J0yjes+v?1!Yp~NF*B}sdwtXQ zo7=&3nT?j|(MdOfy&16(!HLGq0Jg!W6!EB|+8&672S#M6C1S?J@Z2RIK$*S5J#$@; zuBrizAkeTg(^o7##T2zS=`LdX=B9p)bBv}v=puHy&unj?M{T!vmTJP>3ZgGx^>&Dk z0R)n;$jrk#+Izc1-~7SXluGBoakk=(O1}ezC)_K@89dl62)^C5FxW3BRBXrmlcjt7 zLiddR!ggk4(AhcK7Jxs1SR+NoP06+qGv{gZ2ra{b98MFFd!8|ul96z41&sniJoa9aYBRFTu#GATvM4On;vNw#k<8hr) zy5ipDWgWnF@@g;Z6od6AyJV-|_3w=EjK){*|URZpBNk2Lq@^&aUJ+7*dfLR~-76kPt?AB9%MeWa9jAW4iV<#0lgBl40mX^Y6g-Tbkz}Blr0V+`Ba^ zbJgZ~sNZTio24m%YP#81Ls??5g&I_Y^_NuDHJ3OY%V}*fUkjWjBfgSD4bk zC8N1)3rLep6o53>jOU&!#3xgtkQQj8FhM_m)lOlBqm_lA5)ZXLf|XCA)nID3wH>bGd%>Ek7jThx~lwa z{;ohEtKI-Fo`h-|CH~Ou8TU$3l;MgbMR?mcigAxDi*fHbGwvNZ4k2B7E19fsKs5Hq z)oaxwCMxY?@@2w)a_{(^q9>vd-7EaT(xwUeEE9z7nDt%*;l(p#A|`SN$v!h^(v2jS zO%Z-ByLzi*!()O;rjMi)oQ8xftMyhLmVp6vR0N(RD|=|xJkjz4>MdANmm*9_Bj`FtJ_}(Nms3F@zqf;c+tcH`j;yW{DEKub zE6iY^8yPJNDhZ@l@!|yVgefPO}Sng zb;!9i6&BA*u&U*GA)oKRF)&^1p4jPI)$)Bk%uziE6mTFRt$IKThx6d67&&lL@O2zY z#6X}nMY#HY&=)uT>eKO#1FMa?w+^O{4H!*djymwCkG<6H^u1t&*5Xoax_%)8NB-wt zWWfkwf3!}z{Xdw6r#gG6ejx}(5FGi#Hw-5NCLRA(dM1obNQ`^f;I|ju($%Z$W}pg! z9v~YDc(rDOCorPBebHBteCVc&1Zr$@TN&a7QsBdD^fcf^BaWgQ+M40R51@dW6fquS z$G)LzzD9*80S*6kxWSb<82wgAdoOg|Z;qUqydTrJ*(H7&3Rs#j%wOn^)Hi*O#axEtL`%^%51oN^r)8qJqAo{BJp(Yv zF{RacS)9|vSqiIc5a$9m@?Ac~OCnP4CF8Mo%W3a&Xc-`P#NICs zig;mF+q@ceFvDR|jK6@>341R<42I4kPAcp*A2WB6(fD9EwQ!~7SlTXt^geP|{AHYA zxR=v#BhgG$lnzD=qsO`JRvZzL8C0Z$=J%5WaR5gv$&v7mSRY>1%~*Ojd~G!7<|KJ6 z4oz7EemK}F>T~s6y-{gGlJ!~zkf7++kqAtuTM61%kdrZJouUXWLp3-kdtHlAQ((|Ry4s2}w7c8QjT+N^U$79F^n&F_-JJ>(rUKhXzb&HQI_S40^xGly z+hO$E5%k+p^xHA?+cGv`6kG?Lu!Bz6fga*`g!#t5gz3h=gxSWwgvrLggt^AQgsH~A zgqf}fUkN6&PXTxv!r#HN^V|l){3yI+C93+%a zMytlJmP9{(*sYj&>cAr>??+LzS`s}KMXM#zeL2<~SmBcBP|^$Mv-B6hxE?)nUD~d! z!suX&SHnlYKSv_zVn@GH{!~bBhK&vfpZ#UWo2S&T-*XdHBaLdDP^Ex z<;3i*ui~p&as_XYNf&h{EmV@CKcs8TH8d?yw9uvo3UEU&j|3+f363ce9Qpwa2@bOQ zX0{Rz2a$iTPVx&32M*>AP6wFH*#ng=@<(J*R?hEY~hAdZGn1P!JJR1OQH9b;is zz`|HE7RF4mFy>bMKHys9IWx`}f`v9M!W=TjI!4BrfQ)i7GRmgNC{rly5hsZhB;}TcM~0c6i6X-hk-=-47&5d}O#vAt#x@XSV9I8@mq$EK%oI)(CnXUlyvm8; zL`&xsa56+cj^YHT_p24(GKlh(&96vK#gF=9+8%!PxX8I?Jg%J@8yatM4 zK}!V{urSI3a1ktEN@(X7vpjap^imW%qat>A%@o6qmTD?sXH0<|OgTlcV|d@pW5-NU zMX@s`Vux2%G3;pRsseV(^fyu3!L-%vuhDsInQ5^ownR17Nl{}RV>Q+p#IXe<#f+ph zj;&6;12}9sgB>foFd1IycT$vH$5`2Qiu8*ZZNW%!Y%Pz|1CO!J6=Il>fUy*X*fCa! z9Y=vNOd&=vmVqw^kFn0RW|$CUQe>$cyxV_B6U|@E_+bt5or~J*R!E)F-CqoXJZ_t~ zrO<7&bVk<1p31?O-+%DoV~-v_dcwWUeRSfX`wyO|Jb3)Tv6nkk4GsNHi|pY+l`n}p z1$RdPZqEb>!Cm7(5t*b>>1Qm$`%@_Izi#Av;n5tj3VJi&M4n#MjGPYJ$!nv$HR|6? z)=jrAEwES}l6S0b2jooH4Y+}nN!QW&M;C1OU%6Y<_4 z;Y+t8uHJ*_w?*_@2mQ8$emjJIJB)rif_^)SemjPKTgF_PQE-9#BWM5)hkff{-IlN~ zhp;b)u`frkFGsO2$FMKUbnR(I8!{pJ-rL=(JV1;pLz9tHqJJZhmT-dQnnhb2yZ)#wH?OWj$my^@V28^+flsj7}j{&cCZz892D9T0&NLxIfS$vLR$_aEr-#T zBS^~;wB;z$aujVjhO`_*Tb7ZQWoIYnBE)Hppab)Yna%$(eh@PH+(#5S=*Z1vdu6Fv zTdXt}YE$)@Iz;L;s&h4aqs=Q~mn&5djJq~5efPS=r`?H93yDwHCq5;KPd6k!?MZyP zG4bi9#HX7RpKeKfx;62so%nQH;?wPkPj@6f-I@4wm-;Drv5UNxoa7_XAikp6g0M^} zql;KJA~n52QLNc0gji#UX99A7>k#1Oi&I`qxGK>W4A~^}D>5XXO(a4O7I~>g&nPK* zuxh*}jQ0SUP`@|>l5g7WkwuMOC?z24<>XNN{Rj|+QxOQH7=(Ydi32tSno$iFx=e=E zu*Y5$fK8lsv0{M0hsXm-K&15~K!u|)jG$9vB76}eBd1*y#$FVFO+=xS84A;T5}*n( zAy8o{oeG!8YrS>EUsSUypq37p)kV@=v0(*7K@d1P@R@qGS)EoUGK6jSIGOa`OpcOO zU?qVVA?h>3j``V&+6;tz(!a2L<}sj9pRj~@$?8rZEOqsrFk=_5?>u)OukS#9pwTwr zWcM?-x#%3=DSF@_mQXU5QpH@>x0m4($_?tmNdH%kOsg9!Hz>A{VBO*JmPnaKV#ruo z4JIL#ORt#9WoBjYLBEzGGb@33BoWR^YjXO?Ketjip+C^&^F?$ccR=?#`FaM`3x6CG zv&c*u=I5DJztI7#(jE;*Ypld*Tn3>|Qg%MROzTKLlOxwC@nEm>SU$baYhozxqGK}k z4u@G&VKhK+RblB$(D2l&oh0yQl#)pA5?u5* zEYj~Sbb4ci;kI)e)t|_nCa}?ewMbb#l?T`%cY@_PItqu}QzZ!zu?5QDysSUbyE=fJ z{vV6n)%LuRHO=INuRr&DI)a|Qp(}|<Xl?m z-1;&$C={6-^=7URSKyz8q|{43pzYJg<@hBCKsU7p^XIsj-5T)Q=7d<1OoXNZ1I<>g z(yX6t3{1j5b{G3MpgCi-Wg&*)j$Q*$`c8}7QrWUIi)hu)do&Yfe(k)l0nN_7t8rA!@(e!?OzOK!#B2I1jR z(ap(i0d_F~t})QJ=z&8&ZIKjw2hLAiBp_l_;Hk@-sk_C)6q?2pxJ?%>`a3ytYce{6 zDIB%ehrd$oNo29*HxTQD4Ni>|v|EUY3X00Xa5DC>~|%<5uu{r7b@uz2lU#7 z%=#k+5>F-*z=DGWvQ2go)NWVQtD7q3ZeD+%RgCe#8ew&4) z95OitOB9h4wf4>;!g&+=M^O~MWXwldN?Z2R%d+gN=S+W3wPZkG@d`b$(}#bq60x6h zE1_Xhb#2H30Vr1*g_C~PB6py(_L~PP1|XiXOpYHMF^_T=im~*^u<#QizDdHozDt9FQ#!F6HHY5T914d3e{N=`Xl= zK(>zV3aOY%95fh-k=kB^nS{ECueRgFCSpufN_3|&UqdJMk<<>8pdO zDY{cC8UsjQ$`PEQ$5%6$l%TJePm8w0M%vU=hDZCyVmbgQq2ug`oA-ZQz7zk3RRYi? z+z>PL%{g*y!b_o57C)c3LN45y)Hj2rZh>(4@YRUzYHGMl+hISwkORMa0roAcsorg` zg-nx&5A|;&-Cm{Krd=TGXqT1CSHZW z1y%=M9y!@S3Sv&Q=BNZYVmShbJB5dazU?gJsA&HZZ0w5H7kN6(5zAnE{xka13FV_6 ze94wiSR2_+Bq1cWSP7O-eJ3CN=_Xk|)1jxY&yjmPa>db=GJ9lwGjdJR$H09Z&VatP zE19Ha93b}J>*Xy!UWE!>7pkmE7b~x!w&cckf1tcl&sSyuUl);MM0QuIHr}RFLpb#P z9g#%PO(X|QZNQ;GK&-!>Ran~woWc&|wPnU1OhP!J;j`lKrxIUC}P+crq2LA`t6WjnX^W?_Ju^_XYgh? z|Mn;0KoNrY*eT{uC(WYf3v*1zj)jXY)|&H6i&M1(^N~t`qy0;;rI;}GoD%&WNYWWl zx0@4iVDelQeACvtA8CGY%6@%eD`bLDXLjszAfqb+1rzqZBPOs;1{bI&i}h(ps?eOR zLybd7(lK>DF!QSW7cxiYAv@e!*BD}NoY(*t)eOhhxV0wo`b-#%<%#s~mlIk6f$O^wx8dEhpWB&_JlRQQvNfcH~h))YiYlO=t^j zY<{V=u+)m&ij!J%=}K}}4Kgg^*PUT!c7UT7Hz5olqOEC-j`?yD8sRP?=TC$A(TVvpZ zL>y;Mv=PMc2hlOkDgdQ#$dhy4x(Ozdu(>jnnBA<$H6pWs|JS-B%jL$w?w(f1T#@r4 zJ#A6KnNmXIYgP^~^euVvnw44vq!eSyOOw}Kh;HEv207BlL!F2_Xu|* zZm^^a%LcLPthl&RBXS2zZFXpro+jb@PDpUFVq&RXKQkXO@+8ux&dn_{+9Ddmh++? z&XGs5LlYvTPiBZHP7G_I>S(reqwmWHsS6UQgXWqAkQ)?0cz@+zScl*CkUn!8@%mf-~O!_~qwB!gSY|kZ|xo_|YUm9!N z{~OXf=wgw#W`C2Ao)1VSHLZgZa{3+%%~$Lv4S~}pn$=m^UqnW25DJMv>AQ{;z-rxS z)mxYBt--Z=wC-=U7=If_AqeLew6;7<@uZWMwLydOgqNhn7It{!C z3!!`9rcV9-pkf$wL64Jlf}>tHeA%7{9)wYPJ<+Ek9?ZGS5$~xOuA+-$j;+&=SR~3W z!Y3NxMhH{(x8-U!<`g?|HQT_!N9%+Hq6ZEgh2VE;2ftx{Ub<+3+#f+Vm15tGJbl~S zq9>8eyIB@_;9C`UmK#3`(&! z{!5MXjrofWNaVbctoQ$@4DQ|@%aQzowQI6fZPjL$W{*`DK^=hjx!UwWU?KGHb42yb zRy_!4iv|$TX!QO~B!2>GJiz{S^49k4{ToTw3Amv23iN5`gN#feMoP4Wkio!nxvUmk>C);PI`){I&GRbtK9 zuR8up@B$NBXH8*(~is zTmB;0VeG)dc9UD((vtVmf!OUGIDwDfAznTTCrRNCmKh-1-F1}huEWP4ROl`Os?xSn zamaDD#xOZUt8oAJkqr_rewr}GBs2l7Z@MM>U<}pW&ECnXN%W z+Tc77;&g)C+fEO>Y_)G9H=IeUdL=5TP(kVit8~!I{FwmyzyTL@V2v*5k1o7m4Y%)|J;@$=;jbfQk-Jf7c@SXiRB>6>2zAZwxHfrk5Zc(|iMrG1K5j zA7-jhU*hI@N78)%jlm`E?uniLC2qd2ho#?Tsd#hjA{XMYI)aJ*4{!xC=)umz)aZ36 zhC)I9fI)|GG$ChGHO2_bAYd?@*($JN2*U}w~81@%eaaFI!l48 zvFfeGrP_4;LdO~QA+hv}4#c>c49SYR^y|OCJS?iShw2yTkj6_Mlc_!(Vwvi2+YlY$ zYmxaUX^TM$2o0G0-swN+%^UbgeQs&a?)HB%n0LJqPJr&yhfKV->U_ch;NK0WJPj!3 zmS(ddL^#0d4oCk64$pdxfhR1@wz&vJy&(oJh3lqAcyQ9z>zV$&CecB(mq&EoF9!4F zH=E9OfaT7d;C!IpnIr9}WcC(WNuwPprCNK>-y|+7w#N+e?0!nHtqiv{Gy0w!(P4n{ z{K4uH$4Q53wjNfzmDI%wlt$Iq2l3g_|B@q<8gD9WBFAcr2fZTXX@<7W=xFG)28{CT z^xer%E(;FmmviK#2Hd8&MJ$1xQshWG_^lN6F%0z{mjx5_b9r(oi=K7jeo_uKPi6s! zw2#6Q(^JdPIcFzT-8;li!dT$W1KBJA3*ht%bcW$Lfu;Cp7Ss22i;hgP`VltZ&Ex^i zvcB*115Bjljm6ERSzq*b+-}vF^ey1j;e!(ezHIhj4b?AtJq~d|kdb<3>^HGK9G?C$ zei^;RA3K7kEdfB=WoxFTutF~i6GP{H^2Hz=ACm%~1E-MS(|Ju>nvmU&V(2v5)GzGz z)RJ+}nb|lvJpEd<6E7BcHxI5+ig+A&+NFNlx3=&T)|MXn2&9!@Ayk!_`fROHovV!x z+Se+4lYl=D(rZ)?ov~cs4P+bZZQ3hG3`r{L4X9Ga>QRucqw|k0^xsQ%R4RNEI8(Di zX7Qy`<-n*2g^7)KKHaBaFZsgZ=ZKk?3+p<4qk%J^~4!6h1HBtl5wTuD3#iFqi zXQAi0eS};yrSj~z2Z?IpZns-S9~}trpcG;JoV5dO#oe-z1XSlaELXYj>(>s;kxt$y}gtLZk&2hAL4)`d1JLT_C` zvl9HVHuz%*-dTsf<3O`@L9=1_qdWLx1l}p2@35{5L9?yo6XYZ=` z#P9AUx6r8Dn%q(N_MCl->`TsE%Yn3~T|d$4tPe-u^WO!#snVLMc!Rq^Id)gUf^G8k z_gSy&n}X|;W`!|@zVY6^BdpR@-<`dE!540O_C+{-XvS=6@Kx>Uh1yiBS#ck^VVa9) zqo-~{8P+Fkj%usquui-@e6AO$m-q&rH*8a(z?Za8%ch-QfVzMz;63TEIh>K(p-QGcZRf?>THvt z10qz<)~2~H0vvktE%vdVs%0|fB1 zO@Jsi`zN0SjKHdbXZW4HgYN%d_v}+oVMuvw31$hHf%<>a@&~BkuTxnQ&H00zY}FRo z>=+I7!PaTiW?=_D!w@1b4NgWSpdXiZ{CZ8=FT#M1`=Xzf9A4~evh4|gOjldg$_$iS zueR9NQ=S>53D?BUmixR8;hF4iZxQ^)wMT>nExIE{by`^Ov-L);>h3G3^bV_CW%vk) z-A4(N?DgubOI~rzJUBZKzTGDqbq2DuLd}}HSJ-ZQmXQkUAB1>Pcdx81&Nszw+*f^A zLOu)|BV;RXx9AghvJhhjx7j}b=HK)CbP1O+eui&(;aS~#cka`VyMp^rZK`@ngwG>5 z#v8%)c!43XJT?eK?QW=9KyQOR^H*zd-yqvto)!SiA9`aP-QCOZX0f(+a5%!10GiK4 zfn-2Bv6`jO9=e0xyuIY^1AmqLw*CzHC-MXz0kS&9y3^r0%4>Lr5zT!EiFztNQNsj$ zF_X+|{6@`7b8|IF4>#q%_pFO36&t{qPJrn+*Gy(Y?xj~i3hwTcNFV^h6G;u9Xu_77 zr~%U~Thu2@PciD5pMwC=r8#$%%Vqcmh^sz3-*CSebjfH6TDTM)V94j^{-0-Rz)ITN zZ)dA>b7}8B+5j2^xB#oZSa%o1cLtrSRhj=mJDc6_!PTr9Q?-;{6kxcMNB6xqH}sMF z61MBScVbU?0jOAej1DD{l}vmGJ68)_Ijg%t1;qeFoS+!6zmqqsD?NC;xJZkuDjrN2 zc5p-{Bk3_CHNglUtBS&^qTuR=!tR&lXdoSGSEiy+R45AaG$va$77Q!0_3%U!#8w%I z`>EObf~T*5+$!#KJncq9G0}s>XeT4E@QjM*%4+VE#Lkj=)bZLQCl!cwtF{_om7UdG zB@mG6>w1maSa zZ6jB}D2z&)vcF^?F%ejJSty7*caE4)L{26y`%30eX>#94{$(fm)r>cWjO6)sB>PS1 zX;t=rqse~dPIG0y#@@=pIATJAD`gx}A_ikYta0)dJ?_AL}Uisv{hX^TWf{) zd-5p~zb;{S5b*y&XXq)xa?1`VP|Af3k1<`B75WrAGFUE+4#NYDO~9i= zrFSTKLxevbK~KTR-UWT+;v~DjJjELM#lrYjNg%K>T#uQ)GfVZ^Y1neip~lms;(iP# zTpz~CP|r&8go3JMXBVpr=PHd_>*D<4dB9^N)4_uSm|7DYP~hu6XJ2XG;NI2@BfT?F z_NvxkcILjwW0E~^{mYn_L$Dha{%Bl_;66QcO^dK1>zapG-NPcx26$5tjsb_@ch#zU zSX&DIs(V;(->QPYs^EK0teST&E1}*qHi&6BYOcu(^tj`0Ck9Xhmb_~I{01|Bs_2kE z@<5wp;ybwN9>&~Nl96O+2{f@sM3%r+KXTx!tA6BHqaXRIdl-MnTl*{dVjZ{nM?D*M0DWWUl2YSleVVZvN> z4}(ZU=2fi0m<)|VCU?@2QE1gYj2$64t59^6%#wa}Rs=S@P9f-~Y-% zPQEZd80U@VX9@d*T(VbH;8x_l>Ds=rlv3&GvOM)P*;Ne>9`K@^#gtt@9)U|)wjH-O zIT=fU`!{t#+PB?&lDJ)kYgBpWs5;NwwR~c^jfHpdiRA7myjwq2+`mH6s)!O?uR5Ow z{v<{N%A4h2qKfUQxTsL1k_#M-F`Q$o>W8~S#w53FGMM~=9yoGn4?+wCo_)0K2lp)b z#~!l1u*cpaCq97c;n&&gyl;V2t`|w=1{JCFs!8QWkyO4zB$e;fCzW5=pdytQHpuKk zI!XoT=PgB75LIY^g^@yZs9U}17teIT4qT1->FvA1!@;kVL!E*hVEZoXBb~2)Kw?u6 z9x^98%-qQ**)t$W1>(o`q>qxXOxIbWHkKuVS>PcZ*g?nFJESNu{>vFbKz!tgMaW~+1kjq z!Y%e@|BE282neNHMWJ+?N+{i~7E1l1P2Ff?=E>gg@|z+uC$UW8nY7a>;=TPiWGEw zdHXDRD?1Y1A+nS^RV-z%nx*U$S<3rFmhxTtEaj`pBhg=Eb|g}oFcrKdP^hroyO54U07N z-69P=uTMjNqC6}8ayw@wZH=j_dEZ-ATSpxU6;M^0q&0_gBE{AwY zf(Mf-?R&UAOWw!M#N*+a_^#+oe0O*zzDGV2-;14z?-QwEzls_TsHtH>q=xSksp0$e zso|HEXW|caekN8*d#{u;LscF4zjrEj4?+jg zgU})DAap->5PCr5MGvZY(aY7m=&;C(en8|!@7L!=Ss#Q{{=W+Dv#jT4;UJ^}4C6b| zL1;CP@ao0URa$CjHBiLe@`@nQZ``90FQtBy9fTed#m&Piar1~;+#C_b%@2y=<^_Fm z^K;6B&=2Sxgp~T@yi%jJS6;pYInyI`}WGX)_GL;YNGnEy&B2;Z&1#el_qmII}(TL(i^c@rE(i$1!VH+=3 znS{ewMQ$&CaQ4wxOokd68y+Or+T#*QeQ!k+4@6ZQ0s`d% zXs-~xIT<31>QakIET0)Uh($<>P5|=h3qlis-9AgckDa|1L>hXniiQ@|G}IJn=qE%P z`j|cqy$9X< zp9|{sxhSsBPm1gFQ~K-kdgUJfO6+WWP?fkiSS^(iLUkrlh@)j)KT{a76%BO{@D%Jg zLj76NAZsC})Eb13n{2BHA1|{l2R@3nRf3PZY-KRalX6wyX1Ge4TRe~eTm}+=%RmBf8A$gqkl^!P_$0FK(inU`1D~wF*V#FT^)~~btiQAH>7-8ROF2*x|C7DZji0W} z)mn@7sV3P~*oN=D4TWdz?x5ARh5u~tYRoq(p%i2KWu0NSNp2{7lYOn%>tgNf(kwi9 z#*JL6%+5EP&|pL1Im=$>RYM`y6t=7P^bVdaY%F{mzo+*U-kGy^L0zYr>e4I}jj6J- z;ZXLy+FDwykvj_CuD#>Qt*mh$sL8XHYGb;xSc5Jatk}zRy#+;LsZ%HJh)JAoDD$h+d(#lfK}$I3)(c7PL=5`z|GPw zyxx-6SvR7;r|$uWqP-0}YJ#+nU`?=4vN z7CTqSb3eViEoX;dc&tP#|MIzz4_a6&W(6m-)!MVSr3nwgja4(e8trb8dEiLH_#ZUOH8c;b&08Upe`}B z4%DSmD&g%D-%f3xj!suNz7ru-y>RiK!nX^A`mR($eK$|2%2@L~h3`lt)c1&@K|k~y z2o)Ny1fizmKD~vzI^T(`j$Y(#PvJen>by5~b>7FXjxua_PvN@~SEubLURWIn`R#ml z;6%N>@ZFZZxiYoXY|YPw%KfIo^OCCaJxW#OdlOWZ?~_!OpAqTtXZ2N;uPALHUx=Nx zCtI~i(C?Yr5$GmCNSgAZ;c^_OQ-Q+NkGM-_Hd=%eIOl&YPc{|4Ut0bjP%i)b6PEu6 zrRD#mxcoo2;>&NQi7u-xC0Kj~NGr1Ro|#n4Sm4QcUW@GYm55z}7Fnz~fgZ^eLa<+? z8&9+wYO?$n5SFhODBn}~fWY#9D3#^^FwgRpQTBTZA53KVZ3o>0%ZCX4&RKpsX0vGF z3gizVtD_fJ+Ee%uVRe2qb#*?>uZ}V(bx+|ViL29gj4G@S1hlTi>fAAYd!SE)UVj~? zO|zZpY4-i&w{ea4s2aPveQCTHv~_j+n^diN?S6E8Sa|8yl$Qkk9&DXVzXw|<)9>%` z^!s~t^?UB^tn~YjA_S`!`r1?YF@a!zJe6R7f+tvI-0Ysh#}Wy)?J!y(SctjpoM2rI z@+VOZ@~0#f;-{4=#K#jXv9l)QG?c6?ioNJJTS4 zMq2)#RWAQ06PEwyq~-qwaru8yfBFBb@&NHAqXWd1oKmD^79DO*NZXkDSS#``oECM^na0O`pN+1J%wLNWcqE# zD+AMq;O5SmemZ8eXyLBTi^%Heh2-@VJ}s=yFQ=}~ukfp*jOW`^_|?SKX*;|RRtI7M zS7LP{9g=NHREK0cQ`R9_V^oJ^jU^p&zgmZ6t#KXFdkNJcxt9bT5^nOD4hc8;Oox2T z(;<)R>X6*qS?Q3UK?qhaVz8(1YXZUkdMd$wmM2(cxZ$3{ZzK|I+tG(Wun?-a5(JwW zJNX>4I(ng3J%!&CR_F7ntMd}SI?A}MJ%ukMu1?!wT(CM2^L16N&KHr@(Tj!ZDSSy- zo!?4bo!{nHM;S=9r|>(8tJ8K|6|4>fVRgPb?_kac&%~V%UKe*h_+3dM|2?Hb{`(0E z`5#CM`7enI`HT7r`R^#54?dZt^MR?we>wfRmF@>hAhGk?mE(XAcwjpa%vu~|e%Q|V zAQ*(Ug2mW!fAWRr3h)bwss5!%^}lKSM&F3Unu)#&teG1T$1a4yQI84xsweE9@r3>B zbcH?lc2>gv4-tl?7r56`_#=T~{g+gR^~XHJQbzslDf~$y!)iMS5EvFj40g`2Jd5F) z+w#ud#OMa5Btj zf4V+p9+hzcv;c_4VEQ9OV@M8#@FiS7vY-sOeq>&;U`S$dB49{jK^dTFvY-smG+7V@ z1W&j@84x^C%tD*^Ad45VJkW9;Xn9Yy?E=*no~`vjTjzn+?SWSCKwIyDMm*3q_@MPL z&;Znp@X6vuHo+&07ugJ-EM5e>JKT5?(9_*`5gR^7O%`x7emL zYYVDRvha!jH^vzCo&I|Y|0J-+e@YGwhzizb7tG+pe*&K;TE4`2sByH()$!z~fPaZSW|B>q&&QOmIu1q{ETk;<7=? z)&SI9CMQ7MHI@_UGh3z>?&khFvbl9V&3X#Y<@gZQ*XPngRNs&b!;fh}?WVS;@J5I* zR)sllEVS*k2Adl^+%n(X%eg?`oRcGn^?kv53g03u(3?^h=*|2BsU6Ao6yB1!KyAB~ z!2*E~TILJ1OuT{Z{Da=kKeQ}6f0^L^M&rIRdVMRx9Ch#IdkSwAnB&`0nd94e=BU2Z-&1%;B6Do}iXWIG_yb%m%#qm# zDnJye^wlBtT(%v|;O^ie^s>R7zMrKQtB|JBu!OO6j&|8_%BZ#jTkd6p`|82)Dx{_< zzKk(;h3_vDUFa`!&R^un&6P^MF;iQtL9&j@-28M6GGA3ML7po&H`Yv}QZ74Vj!v`D zk&;fcF}S$W7_>MtWPjQsKWqI|{--Un;en;u*#nDbCl_i{c9C>d7tc2B+n>+hLe{;e zt67_!A%)khbF)x333*My%~1yVxY*OVYV*9kk#vK%x%*a*tg$zcweaWpi#GdX7b*A| z#hUhp=ko=UTUbNbgRO*|zc}BRz2v@^e?Gs)-a$5bUxjbvKMY^nWJ9&lm~Xi`&zd&- zZj03qeUQJkoWHV2FOOtD58bqn?egfz@Sy!m7I_;?_07^$H~3R^2j3b^0_NocB}`-! zHxUK=+hg#vh6wom`(DW3L2iF*?iSMRW%n%F*Vr2hUl0CcUV&~rIeg}KTjYy*2a-_j z=O-GYro>EZKxo6G#+Wl? zzhIH?27-Q%M9|my1ij<=ydB@WiTgB5^7eP<*DU9&7CCM>$22k?E0(X0>HkyC`G*|2 zlb^oeG!k3ak+MPSa%se%_1M^;LF=+JY=c<%Fi;qHT?o?QcAvs-d_I4DoWgkNkVtLY zIA+@Ye7=iG1xzAnb5x(?{w(kOah^P=gYz^xD}yA>q@%Inv1~dj4?5X&G&VGHm9Fo* z^Ugc-WQbp1ER}+|wTgp-gHEZPx5{JfyajGgZ9q6SY=7J$AAvphq077HjJ2?@<(;qQ z$)g5~lDzlWdW|~dXl5Oj;lI&r`vYAn+X)X3U9HRi*Ov2Ti;VNDrBY>z(&-0Qx;Qv? zb%^t?Th7-ka?*I3skB}kEL|NKct_58YmV&WF(4>3^6HkyU?baqc%`eVqyK+**Ag>D z5rpX;4y12(G~g=AJ0R%4+p{~nD?j)nj|2$HBfesy7zI5UM2tKRs3+N&aPSpPd}I=$ zqS2^9;_IOCaWWE=BZ(d)AYeF~m{{F2>_5FbJN=lR@HdD5@aLbIzrL#Ks_Lrh>Nsc; zKsH8~%3CmLKPLSmriGICF*}0EdOdy=t3kI7iGZt^RVNVp1(8KrRweh^4teTE!xMXO z9jOFi#)~yt ze#B-M5&IjF)e2C>f){w}e&E&gN0Qn@xdkAGkVR4Yv7Glpg}*sfEZQ zKk)vC*}s^KWxb`8$q0vK*=*I#UI;r)vlp6B;Wr{F?6y5k^LSXv$8IpXfu=&YSp81??VvpX=&vWW*0Gez>0b5bpu6_ zsy^N6_{#De2cO>QJM0u%zBs%b2cH8gBNX5274+ArdrTVPgX0tvrgt`eca^ju4&6Lv@eG32EE)Y2_2E2VfG1@ ztIi|G0_CiEC`w2bYm;|7p|8H}EM|XUxrVI;T&-$RtCG*Qz3qxPh1f|%mP%T^C%I7h z;hZX2RQIH-@e*Pe5m~3krPP_I?nxi(Z-|{hWJxaMq0LI^y0;al3z)%OP4`(aPOUthPDLMX`&vV#&@!MtX9m|R&YZn>-~0s& z7d4iTuUNVE?#)}aZr^|4k)4m%I&shNuDnxA$S0Du-Si{kd|w4{L7Sb!o#qJc4@lkRAW1iImC&t64^<>Wiym8Q34=EeI){;p#s-Ft z@|ih!*6Il5;}*ujT7yfg;0#vDwZ=c9z19rjLTd2&VYYs=%HLmF{#8(G}L3;RxO=7djBNd7vj&sQM(JNsIggmBNkl`9Dba-&9 z=M-y6tH)JZ=;!yy=9punE{ug!M3y!WkWf1iEOZaT(&BLtZ&^tK4xH|AVbTyF=T3<^;EBe4}l<#}psn18lz46oLda2?j%G0Y2ywj7bBWaWI$EdUz)HIeAYC z9j%CRXm+unJms?<<%b8Hr54Xqdj)MzTdWy7vxl$EV<;^{5#*{ZC`uVNlA@oHgAsKv z6jXvH;$Q`d;vh7c0LYWGeB`0<>I2qjU^WF@51j1K%OdH*z&~065-|YM2+2<+Wx|Ia z(&r0gl7#+oz!ZtSOiUuIhN(zO1Az&O6#y}p$lFMF&ge6vYog^>!sswak3g8dO5GEf zVdrQ9A#0;CsTJnF(mkU&SXyt((gy=)-P5PVKNu)e;pvrK6hqNoH{YM#EcDB0#TlIJ zD5Dx^FW$>h4#}LmERPNLk#GQ|p7efBlGQ)gYmQFZbR))@$xoSF3BbT$L4#QQd z4F~xq9zK^!o{10e+`}=!t~l68*2KXovb={o{e9&!xyMS1 z$)#U{8PCng?m2C<&iydoB~UnI;~r>g;z|RBI*lud!kKaUbSlvCIOr0QEf5pgFL$cd zWPxV$Dp~OvdPSEQ;7oW)40fI!(%fc)3OY^Nt7`d}ng&=lJWp>100JoY08-}L0Dx+r z5dfH8L3cV)cXga%OdITQO46nsLw$qXP0)Ud2GHxPy+s(z9B7nhcmk6D^g|?PSsWusTu7sNWnGjDVJ?%|9zlj zoHOLHcte)XL!4qgCLfy2uDs$MC2$7X!H8rhA`m9JOq);N#($tc%G!kh`h?r80%&lQ z8Ygz|u$$|4z(G!8f~OR?1jI3@1wA~MmsyMVSt2$1=8(=0fk=v!eB$1dE-9Vc!#5RE z$n!s1!z+#zl%K>osUQxfk?zqTBF)Kb!SpW)tLmKCI|CYIJ9DPG7O-dKmYW;`(-Jl~ zD5w2i@4B0WaBnSH6ubdpWWR#gLtI`&BP;&>r+8L%uh|TW^~Pij)zQ;`&nHOdkBN|S zhDsM4!Aa&RX8%7PHP}N!mgrmrXV0&=6BwzdQd6W_6fhq^TTlDOzEDraQ$>0zQJAt6 gCNpU3X`hT-J%zv0Q?909OffC}$Y^HCmDbV!0UVU&m;e9( diff --git a/archived/test/integration_test/test_directed/bnn_model/variables/variables.data-00000-of-00001 b/archived/test/integration_test/test_directed/bnn_model/variables/variables.data-00000-of-00001 deleted file mode 100644 index fbb9ea8ea5eca94d8cb6f532e416ab593993e924..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 168778 zcmWh!cQ}>r8%Jc1kWx}eL{Tc^obx`nv{c$uL`6n~l&HQ9S&=<5vN{u$78z%}&m&rj zCM_jOigqe0)$jfN=W@Bu@jmB%KKExGlQ*e2*RU3CP`98@4Bn(!3*=!%wGl};po>oY z+=>4ri8(W_c|hleQK)dKD|&U;4SQd=LP_l=Xz>Dh(yOuo-G6ls5AD|i^W}y(JSG`J z{uo2lCv$kT)dX) z>ckrC9_|c2@0)P3)y; zj3y^vU%?6!Tk&MOJnZw8g0Wksa^@erj0|RpkYbuS(wTaas{Anp&zDUk-&S>^)1A8T z<(D$?e6xnU+2Koy>=xkmkU<%zz!DlR^6)@)3-Zv2faWzGK-#N2BrFh2A}z13VPj4mR{cH)+SYBst){xD>yj}}UlfeaF1m$2 z_9tUj(^1^ifJ-?~x)H5o$O^#Guav;|%%c<_Wv&jTcEpjlxAMJnAie05aprg3~ zSKZ+w12G>zSYm-&eEbnNM-PH8y+NMim%;*Ci|W6!l~!b{klGUhRH`rz5<1tSpN8oq zEUwe?(5vD&$^D6lq;L4e|Y|(USrn9NHg- zHdb;F9+G0U?Yc1fqa{+l`v_kT|A5yNr(?Ba0ho8fAGzpX!3n=uFkYF3DsG&mnlDdgD<`8nsGe$dddvCoDxBsD2Pjsw653IE1e+@wp)nma@?B^|{sq0GKKmF^ zcRpS~FHRM663jIr=3=4D#-bQKnDHO}Pre7A`1l#Gr&q&(*azO%H{-R=OF(n?S(L4M z2`{VnK;A*`aa0`-&A;wQ(zx~b{Kfw;eIbKVnZ-ih4kCOxK7%^@QxozO>T%ePc--nb z4*mW)8if0ol7q|Jsq}&)IAG3Hl5Db)cDZ6gCMOP{I3qg zGgcQ*Ub_a2Q-9zi%M-CA-W0u=Btz$Qe&fO$$EYLODkMqyFro|3vag>npp{#$qRVNu zXph=bbnyOO>^$Km+7pBEfrB^j<&-hR_v;H^~Ld51-?trGKRI=Jiu&oBZ+O z^_#F}q!B*paE!haHjNZ1?5A8Q6ZGJgI=Y)9L51qq@QI{VF#F4Eymls$<(SWh^|#bv z^164Zsd)?`L)pk(=Mu8j-b55LD^WLjjt91BKt)I*S<#b)k0q6(3AmA3zpel+o-Hq% z`*bYXFk(wCFZ+#Xt4GwNN=@16LzA#uj0JMCwWa1rzoDAKZ0g0;M6C0`419Ow;nvsL zV37S8&v&jxul_uc+0|IUey1Vaq_Gou^z*Uzv~gs6TRO2ZnFSkS<;aPz&oL@V#=NaY zcxfIBDpsz=JGG{xrS%$+YWxfDS|5dOmrcc6$0o|kb~mGfpDpP6HY2p|k~`+#&67n8 zJi>pwxVXthfbO-&;)@PD;gr(?>Uzji<;IvrwDE5SraK1HHI?lX|4NnB@8DknLM)k;&>G*rGU>au%3E zO4l=dRGC6$HZFLfI!%uhbs~eY_i@$6-|X=lR=_cznWSkV3-@t{sJOK*V32BxrqpAbFAzEm_;WwU&5M&*lifSwuk^H%;c$WMX; zn)sgrE-G1q9v0XFNMp#&J%#8T(W71M;;0RMOQ}SUg^-NLKy&a*EXa?ipVDe%|C3?- zr8*aL-|mFTGdB?<4IL~p2}esFQ)HUtFzBvYgkyXrfqa|@%S`*x$EMXNa@0!P67B+9 zca_lPp(e;|f-|kyd!2grSs86P6NP-ouO?P_F_B+(4lV!Pf%}uS(0Yqu$}nsrX^5Nz zLC0v$i;$VJ&|l@~<3CN{OCNCrORBKmI|2RuXbN&_nS+Dg038 z08hfhkdLDy+;mW;o+N+7vBEaI`uA--^Q5k9YnTJsyZ)bSrkNc|zjA=uy{iGwO7F+> zBOQVEO+>6G9%#dX(^%=01}rGvfm#DnY0tNfRA1^mSe?Pg=E+Xvk8wQicAJak6Q&WR zSxYHBpCQCmn@t|8L{heaZ1N~<7{7^ffN7Nps5IXyErzIaJuot7A#0+|(Sv)n6&LS(M16XeKTXb%_ zGdcdW3}<(ga2}31Ko>m>qdco5cy8b|^kL^inPJf&y6o94oq#OK7sm)RU8fDNuX~37 z>ieLrv^@DeT^VABl5kiZjU0C~XH}m-xQeQHzpWA3@yLbTG*!V@w9g=)U~lxdbOy?? zd5;cU(T4WmG(6>H8E5|FFSLu!Ni_9>gk$Yz?Qe+dEG|)}n*g1w8AmeR49HxzBFJ5vDAe^G+PC^ak1Zti}`XX~PcV=O`fb1_ySkLCfR}?8IA14m;gPEzfna zMzadEoK0tIoV$T!Q>`iL=UnvtYyny^*#~Xb)&luGXDAh~M$Gv#4lORcfN^3KUcfx( z+7lC@DqRb2J9+_y9C?dZGJAY0y9hb$dn$|9I*#2PN|4f5gzAR}yu=u;l=yR8FP7utCs`Y8eZe|E$MrIMq0J;WU`;`vH5%s&LXl1N7Y0k26=tiX2@$ zi@5JJK>G7;;^XJK@U@a+B>ftTcgmZ=o~y}p=LA(SzP$jgNG(ETbD!XY19Qk~`~%%S zHWm3@+$dcvjG^25&Z9<0XWZ1T0cT^qh^Trj4(v^Yvl<4(rlc4s0D%A zMRdO55ONwVM@I2};oSOBh`0F|!#{ekVuds6I1-6Xd%rP1FD3ouE3jd-7;7%9K%0_O z;r{A6?6q+}ZuL)~wkKUi4qeT3TBRWgc%VS4s1@j{N-MVHaB#I(1yY!VQP8qLs_bPL zepsYL#;eRmA$PQia>#i6r{9A3#F-HPM+#C6vlZx1Y69XtXhmv|bz#|fYq)x2Dqi|j zo&2stXjI)ev_ta`eaS@*Rz7aU#_|1F{i-3IdMX~f4r!6>yGqy;r{iD32e@ZS33YH- z1M|&Z;O~KA^x7znGniC{dqOv}cf8*VCA+VnIL=+HSXGP1NWWv>ju+&~k1%mI|dI`OXpQz3}E)1H1`73dicoAlkj#In;2J^Bcu>=kDZm z;CZ*fRV5L$Nwzw3UW{?BG>CW3J0yTyag}p$^CtM}NWpPybLYmsb1!t&2xMYBRfu2%d1X)8H$x!`A{Ojrvio|!( z)#h#}iTem=HH%@pZX!%d{tf2Q^&serg`F7&jd$MnlZ!5Iao6VeVB#MHTdsUTn$O;n zUL$ST>|6&0hOy9SqYE}WlIbWV5t`pT6MZ2g^j3E%EsVLxZj3laAJmwH*S`0lniXp3 z_j}~1SB9c~rsg3l8I;ev>)_Bm;8{JrThwkk=>ioHWFDyH_2oBd7J3sRN37Zbx zhN!jYAtclXzA7j=cjhey(=!r~pEVkqb|pi4Me2fUj&se=#bDsv1djiHQP1!t zc;W@f_L`1!oxe47DA_r0FSG)YbspS3p94RFlEFqn&RJ5v6DH_9>aaw?r_}gvi+So7n@zJTU5cN_CVMWsEXQJrVZ?~N4 zPnF|fb!UQo>>&7Rs-(xJ0{XA?NUOd+#cG9_Qr-j}oWG%i_ODt9wt0MePPhtA<4)n| z#&H3?)R$K;b7v?`P$6tqTB583F{JlxzirXjI7aVzD>Z^u+uHut` z_fbgsKuG<#Z#ERyzLDB}p@?1^o{5I|FL0hkl2r8fJX+W|nmmu*DSf;$TxJxI#u2_v zB)xwZ1Ai)Z@*X!`(vWea_2oqvl5 ziCuTm+zrJN&h^Sb=SIL2+J`!6u7+_>ywI*u8d))5-gn=m!HU zu}x+!C_mW5T9{-^-}xH|r((nC+tX61&)T!l=nXvb#zX+~2ZveF?|4MVO;7esGKU?X z-Y9LJF-6>Uri9#h97<}sxI}}r(A)m;$w-)xEK;2ZPQ?YXsK2Ypc2gO4eq7^}UVjW- zVED}RCyNL^39{c>Lip1_aK8qhtX?UAsRpW&OfMmsypIo!1ro}A%x&}_B@5>s^@Xf3 z0m;{Yh0?YlQdM)FTK;nbOc=u_2fmGki=VkL#k8E zqRvRSV)6dB^8bH2NkKK8>0(m_Nq=EzVB@<_+iFLmyqbFVSA>R3~gV~hZ_|PyH z9-c90yA)y0cO`n?j0sYe zdF~{qAxAPNVb|1`;o0X~@)FqhWz|Bqhx$d+B|dxBQ&drjTAoY3$ut`52TnW61X z0(kErrT(&ip@iOK=)W_+ENPZ(A zf82#Y9ut{XT|4X3bOHF+3qTF|O7E=Q#2)vROE%WIz@+!<(8(S?FlSEo4333V@d2=z z>P)IDuOq7CC@QwjMGGvMGdcDYZ_g1yMNAT@zI}xfaYuol4-bSvBJyRSp6pII56Djs zQe&(~wf@s43pO5sCAAaSI=y^y_7fNEXCy)Im9^N@dq2C&AQ3)tg{1FBE#~F$ST%P= z@O+Dq%;cyhMbXc|EF8X(#M?C&ZAtSvO61NF%#l^U^Z+dd2Uv^h4>zrY#<@Haw?`uWkI2*dF^i-V zry3$>RwLGYYT~G7ca9yV)avxZ-Va@A@xdG8og0qxRbkF8BlK6wXDOReTi=hg|-V(w^$w`?w{lse-gmSfl{Ki@fo$<`zFUy^Bs!axCtj_=Hu`a zo5Xibj*!%IPf;XQAT~F>B0jO|Il7pCSQd4?maU&406Id4tiQbho@fZ*NYrg7-CSju zF3_ebz5U_+oJC}7Tp@1T=H(R8>kofN3yJw%A&mb|K#niqL%zR&*z8_J672-!)bgYB zJX;Z2GN}|=Pq{&03LlR5+JH+j9~QUs!S0%#*r0?9OG^ZB@qRK{oIaX1d#*rW>kcT+ z+Dl$}vB>HUPul!-5Y8%jOHHHia8|oFAjx+j*;=Foq1oHuPv&go9X>=`IIoror+Skd zRtFj`jbkq#`Aszpu;52yuH(MgYsk(0h?8k^23)UCAj|w42R{+_U1aK)wz<7>4&`TA6!5x|Ex0`PdihnCVL z(l{pC;7nYRT+;&lq@$BkBpv%MMEB261~7EG5dR^JAqi(Hs^ zeTmFedm+S~_=a6YgH(4>Hk}u+9ghj@Vmn&XI3OzlI+vG|GtL6w6(>P{8CCz_!*|Ux}xD_v=aXi>xJgMO?6c%beNPLDY0~Wy5;Y5VGC4 z9rvD-u(GFGgVVqjG)-9v9ypH&fhZWOm6_0vyiPPU!p09*3n4!yg?)8i8oYS38!fri zM)!m-CWV8EtmXUeQ>x}#ps~3ctAs8E-SvF3zM2PzmQSFM*$Pqn;xy^%;MK5db{0i{ z*(Ea{?TW^g7_(0;Nr3wKQkL0t0bwKvN!E?T-`-}D>;qp>g>(vie3`54pmUDX3NH=F zpMM)~oR9?pS@qOQz6xA>x=T{`yIQ<`)GFw?$tTSR)L_RY9@(5;PMiC_lR0zbaIBgN z+4h9lAL@K!xnD#=!-1^%_!g-P$2(eCB@ycbabUn&PU4mq$TDgJX@5fzdGc*MiLnpD zy6${pb!v<-F4mZ7`mT155lcg~@wJmUG7PfjZwWeu|* zacT{=EftWMH4HbIutV%(Q-RV3w^P@m1SC_>QTlcakDRat zv^(6MT?XZ3icKT>tG$nQ**cB9wiLqGT|&~kM2CJ^Z%ou=e4^(f0(YHNpgqwQtUUu@ z%+PUSF!MZWd-qFbs}uqB+vP~0f$+P7mDo017s|*QN4AwM(Rj%x85a}TbvrwybF$Xd zkvk9ZoU`*}3#%h#Cf|$eLuos9U$`5oJ6aAl>*8t)SH58%is2Ei|sRyn$oajV8l(~Dd&OaAHnKB<#)OfI6VX8F#F&}vPZ9Af3J)p1|dQSI_--pF3zVCyaPzs*AQa*_$j*h;xf9caGySF z83o7UDo~xi2k~M!tG8x3JZ^M_T>D6v9Da^v(GrT@|NMdTdo5|RHZ{_-PZ>OY0w~|h zmGojAPfjoUFN!bVlZ}7ZNbK075T|7c^(I?DZikwa32(pS%i>~ck?CP+Q0`)!-?#xv zHM68kEI+gG>1ar*ThH?SEr8mxc~GUa3wsHtQ#~0)w0=MZTit1yII+M+b|z*F8N=t1 zptjErpymp7h38PMBah&7e6Wt}m;D&<2lZ5IDx_})a_Zs1_b&p{8!IHQqtvn5Km@Ve zahWr=bv&7>#V2nJd9e9<2eQwcCaLnvmqt5};IZ2U_>A@fJapp|Epk|tx}Um{&Na#s4Six1ReEomde?KYqBf*BUq zaBt>(#4^f+oTdq~fvGczmf3UJlEGEjDv?K;$}92JQKqowJRf|X`g3%?yl{GRj89n0 zCPCSM4YDr`N8-HvF4mOef%X;wUc(cz=4YD9DpZBwWE6_e#I~WF6i1eBbP#48VYrXo zPkKW47-+qC%zo)8YxIPfWA+CwF?`O4Ro=z;6&m${_d2o8NI;p-Ug5L@&sf99q znd_V>(#8u>uqf}f43@=7`&CXLdVfAn8qk-n8PA8Kwdb+3$W0bAiVtfu4W0hak@b$Q zk{L)|Ag6P3^t&j-ofZEf|FJjb$w z4kn1(xzMK7-b|?!XJqjzceR?NF#b77e5{5C%OVBju8R;H zL-QRX8Ez$7z;G#RfuwHbRa~9HBc;}RQ3vNQd-iT!`otR_n+G$t z@kqZ$Ir;6$2eCgNZiNcqr|)TU_@D^P))>;TrxLqA9g+>Am`Mdzgp+hQH=M7>APJae$}zGD2JPMV}>aRRx!BU>VOl}Ch0sbuJw zDO8nNz-e1=G}(IrmW{u}KAFexh|K~J-DEhSQKe(>qkIm_+>_du#3h+W7;c=m2~nO2 z?Bj3e(XZaNp%>*7(O%hgHi|t+B9eJP6-|ZDCj}(US3oommO*|1m%JX41OH|bye$%t z-GxrbL@gZ#eS*pJ*E~}8)E0hk=8|W|0&?JmHknq+C$E)0(0ga!pu`z`BALd_*lkOx zX`m+@ekhToT@ym+av9D`vchX!d!>&jD17Z>?Wrc9Kn}9EF)irb2seY*II}gIr`0#sgIYiZ`z!X0|iCHIvDkOyRKa3Z2 zUkJ-L@G)uULH}kUyg4o)1=a$n)-P;O@u{NSUWlN%lMmBhamh(l4bWU9B!2^vr4`J4 zzS0%Ko;gBdcTS5H(!_&Hr6OW{GZ3!1FguXuLHhu*Xo z-B5S>F-1VIyD~A?9R+I5Y9zOwPflo$Aj>Di4V}#Y&$%~)NOGL8o9jIC=X);xdt8om zOLNHV_#*1`kFD&bxyy0*g?d&xISiR2Q?UK2541<_Xxhtu4E{av1D*MXIQbVU>5N}5 zDUB~FY}q_UGoN$>aqvf!BASA|JR0#)`x5q|gB9!qyDSvBQAAH; z9ic5m<_!_!bgAIvU9$QtZ}IwIBx_PShl`IbcD!>(0BKj!P`uYbomXaY@p11S9Xi*B%|^M6R!{;f&Zab0pYKOl<|ko5^}g zmnA}riymGxK>#6ekBL}pm>;6MLc z?41+%ptHV+EZHC+dJ4xlTy+H)XVxQrHt{u8%My^Rn>_N#h6nNaJV*ysTI0e&I@eJM zm0TX^8wx=*u$C4LFD1GTjD|1akpRylu++N(=`Uw807j#a9~8j%Z(@`e#s}AH0(g7& z26ChL&|tzR9YcH=$MF88%)HZc1wi@m;Uc5g3%Go8Uto`(G58(GaGXuw7_5H91IKaY zWQD~bFIMCGu8;i`cNLK46{yREpKR5SX&^Ope1#xfY2b_zUB@EHyu0?8C1 z%x3O+aQbOjc3Vj9R#l*i`#NNnpc22EEC6uflHrgz>6n%8#MN6cYU{j9Eoi$&pL&rF zBPKk^EaySRcn4IlK7;+W?>T3I`u=$(H?-mA3JPpur(hTobB1^D;a86@=-lH zwY@Tu81LhHn-FRyJHwiD_LRLHk7&sFxMaYbe6>yl z>u1fV<;nrDx6^>v$GyR8qZRG)jth>jTPgcNLkhgE;v}IfXLu6-BhAL=6daf zz}#YZ8_4j0qP+wi>6F;3loPelB2Yxg@#k+7y8Bc->setr+2^}Qa`De{7>Cx+DEH&t9nsue&M zub=MI62M}`X2)a9{@whgoLou$iMFg{_{JX|i5wV9FWjtx+#Y41e+w*hfW$vApqOT-WR~H8 zQ{j8Y!!0>QHUx`^ocBc3ze$7++wLa!nLD$8NS)dkEF>kRlZe!EBAn_Hl5$-U8(r~W z8}fyuxPZCqZ64Sg0!Za5KB;%$k}$P$phsuJJZ4w$=H$W>h6i1J$%V`VaUAny$Vl5BOf-9b4@N{_aHvee9j}u z9XhaG!-c55WO9;Q27sP#f^_EWl#X!8_qb=M?}`St3)}(oXY+_(rWF}x?&g9mP2xFS zC#1a7B8mQf9=z%CC3#o*(AvY`-wz>#hYQJ+JRtAjYKth?KdAVUCb z=5q^PE`;1qd^odIK>C>6Y1)5@>~vG(ckPy`^VMd$8%TUG0dJ%vgX4|UIO|# z`WD+@L>~sGbm5q>{aCtoA?E)1Et@!RKSUp_qBS}VCHIzLacW*F320=xB9?q&9m%2Y zweex=IX>|<-wemk31Q$+4!j$qO`6xtWfA^7)Z0H14*uItam-Rt>uE%P84$B>mJZ>t zuJsVmzZtmOjd-9&x7ae#>HCX#oG zK5U2ED)9Qg4{P5aAb%)sd}K<5I!9ali5OwR(f!9i*1VKvV46C%2HyFXes zpHF75ETC7KTEj5nLGHn4tYw8y@InOvDK}dID~-~{iOkOV`qht0de4V13X^eRNevZ{ zv)5Esx?KMwO>Q^DWST7)!%uH_d z6hhB%J}O}{Q1591h+=g8;UGR}UgeV04r9orZIu%1nLPOQT#x+L;X)0QL0Jz934X=} z#UJwI*Kr<6-&Fv{yU*h#!)Z>vHGD`MCxq4hYneP%2uGOrIA|^;vlz{OJ1ULy-#0~R zuTBc8k%W=et{iN%Ws{@cvvHvIrw?l@u#4&B! zr?XOsd1xy&eJ~q7*7r2vQeDRj)1OK&P8dg8mdDG&J_ix|O?%EysT`2^mbk`(1j!Dobq3L$3+>GNMRLjf1>1It2hld zw$P}bFx~Ebzd1`iC!(R{39^pmg*ZbRkFZ@nyY#WP!-rNc>6DloXzh2**7>9cbWS=P zK3GH5J+Olt_A{ij8&$}~k}MdWG#Z`sN~6l^YS>XXe^5g^GNB!3;J8D^q%lH;{21br zvzNNDf7cLl>x!kaI_FCh{j%xr)-fdWLzGjMTm$-E8Y9{AEEF8~t^v(cvGqAV9rY(F z0@2wJTWp#<8|E-x+`gC&>~?_%3nu5VPwZb#_N%Kq1x1>YbFWjNOmu*BReC_{YKqt-iZs#nb}}`n%yiWi(xUde0l@_)Y z7mx@3j(`^FAq94gh!OkQ^7OtXYCv`hI~k~VkhqN zWZmvy_`_*ly+c>iwXmZ8+2X2euLfPzcSfsfyS%x*98t{o=U(^Aef+ z(kXD^LkjJ8tEv9N`CjR%CMgz=wqYs%b|xb?OC$yAzmR;38~h2rAzn0Gi!Uxwz-0#~ zl7J>1an`kP*}srQ5=+@+s3<6>-mIHT>|4#z&B{&4^{y@?1$S~<$8VOb%%)H1v}oWV>X)9dM&Bm~#Y-RwJm;?b)e z`9sV1B<49}D${@!nfGm2zS9 zf|ZPJ(wax44cSC*dMH${`-5KH&Y+u&ra-{%bkwnOCj8lz&hq*;f!dNgmAZakKpr}5 zl+M-}CEKMi6ZaoXCfq%?V9N<%I`3_$(zA?Rp`u25<1L)F&axuE#b?;wpZ##ogCw$a z%OKvciRtn_cchMmTH_NTH*xJrMP%ilEgrRdIV_6rqaRNaHoRW6RF+RQ;d`%M;iXL1 zsA9uCtZ_tx{?#;h-h|&=xP0-E#OqcDeo?Q6;fg$3n^;9F?qD)q^%K-D|0%@qGoq!2_Y@dGVm$6 zS@dl^Hdgl(5#29G?ah}6;Z!Q)Aw}>&neqPqmhs@7oRCB?-0NHT6x?v}I6h;x4A^_^ z&{8HFUl1xHSso(1tCLT3rdq!K{A{*njRdX1BLhPCS@e!RH^7I}XD7g~W6_X2qJ?L*x}$>DN)+6g0g3l` z@YZZ5Jo^t|@Vf!YI8+W2CM!Wk4F8GY!_C!vIK<<^?$tsPG}A?@@*ktaq`qWKfdHD- z1)#Z-M;4Xx!DFcaUUB%OJ5m4>vdf%20$S_*gPC(abDBKMWV{k@#=mJ60)#WUM|C8u zKP!aCngV#eQ3!S)nNH4bF3FY`BZvAjB$(#pRQj9AF=iGK`pjCGvqw?td9sum`Isi# zo+X5U>s&ag6p8Mf>b9umgzYUb$7jVAFbIF+2y;P*>MwBsw`HYXM zz=W+j4FAW}<29w6xt&~?Y&c&Qu`7?N@!x}ce#(<4785ZT-;mx_y6d!R{8fA}fe&(B zhp@@24Ek+5LVG8&WDWt2>=RAK(m6N_Z%DBLbr%ild}O*bf$?V#gc_h3m-&)u#vT|E?vj?{J;0j1bO*Rs&b43sq3{-I%jjKx>Xm@MVGI5wq6=e8h=7MrEvp3WSa^4 zF?Z1Sj!K!^oFG`wlUANkh7n5Sav~0PI!*HRTwTwO#2+0y}Mvtg#(dP^Kq=4DE`(~G-+^8hkr@L*` z@T*UF+v(MuZ|NU6DNm2XMKgw1yx_v5C*fplaELfS+l#zuy33lcZ1VDbkGN!9fCSo7R{DpAsdW;{;A;Z;1CzT+xtT*zb%ENy(tC4p2g zViBc3Vk8)NLAzJ+!7Jng%l`zy`lJ5RL)&v9=HXcm=+7rN8dlKXKT(uz)^0L1Pe|sD z&zD4{OhS1PlZoX>u;jeDDQrFC&AAX*PJVVKl7~Dd1MXL2#Tsv7Mx>GN?1RIRO}fhTXO7YAGsJo8!nzoZQWK(Mm_>L2N8I}_`|YT zX8(!OvY?+1#704EyfATA-C#kt&gMZi1yU7QTYCD(6bb2L9 z_Z3LTi)?J*_X-^d+DPYL-;Z~;ZbXGQ_JB^<7`n* z^HaNuZTNMK#9-tMKD#y!flC0D^F0o4T`nL!JZqV^JJT!tqfXS%Tt#DJ3y`ezAl0#! zOYDEvaOe}Wu;R-I>dj_9JVsVXIb@BcEmrApIxZfijy=?ZFC)KE=e%bUO3@YDmCDQZ z*{mnIi{?lsZkb*iBut^2kO10(65*jNiF4HdHulnvl75cXms+YUXNO`b_rCG8-PSeuo`oC!m3oKvV7k}gtQ#mMRTET} z`qR2mV<2uZN7j=i#>>to(7!LsuwYFMGIDi+mS7%<5l(@}**u8#?3Sp!)FdKi*R9_zu;Cz&WC(5}&EJ3no>NlffLQO32%xw51N(y)k8G(b2gQBwX}_Rxv*OW7Pod_$e-O8SCK z{T$#?F^r!$D!pO3nm*bkI&0s2rWjTYjs_F2e2(>=YV^?U2c>XnGs{UWj83$*1?6!G zH1~lJ^f!7Szsx=K@(X;7c&U`5@&Yuho&+ls)}h!5a)t%e|ruUJIa@$;$?NUQ-VOMdZwW21_NSl>cI;=NT9a zvCb-FGzh>9Wuw1O8`y5g7(KO^MJiOe)X8op7jfJzeOfUMUu3+veY%>QT0a4N_NW7NVfhBk1U__29K&K z0iQ1-&etvNoUpv_lq&Vk*hCGEMW3a9w3y(xX3y|zNiGb`R)teKE>4xq&bq&^glI87ec`4OOfSCUl*;tT z!``Wr*gZ=iI_SGh$e&ES#y&=I(`IvQbw5#?_})0m@F=--DnZ)q&U7i1wuF5P=S=Q-#7e!b7*s>c26 z=}ZmR;EpG&Ah>-8ymFlCKb`eoJe256j6j2lYRFwf?AtYKyxaXpS>l-WILn}g`-xIRi~rCh^lBv!UH9HMkvo!Y6lCK}Sg;bYDWg z*Wej`%L7$#x5P`FF!O{s_kfy>?b{jd=A;-OcixKQ^O~W% zDVJQEnazDtZtB;(q6|;P1u@3!z35c0!od$37=6a8u<1fC@QYH&+=ds(Z@R1Sq1+8Q z)uXBbwh%PD5Jrt!Aw0-i4Aq8y(z)6L@<-QgaQ!O^yMIeCnmyODVn?ZH z>`{R3eJde+=SX})KAfMk5=wt5@JS@qLwY_=X@YQn`~*03;3W%g-Q5^Tz5h>}I!Ss$ zm1ydw#^c0vp03qmSv7H zjBtq%MtW*>Ea7RtuNj+OQ)7Cv24hX!VG-4W`CA)+e_UU^+4&vpUs^BTV#sIlTY8OX zan8#S@^qsxw(CQ^BWVUdDv#+=H67b2&KbskOXNQ_9pu^MH#E*x!It&J(h{Q)Ml<<4 zE1E5+CT3EWMMhptrk7A2L9_p*s|qOa*MM}h8V=r8VZ`!6=zN(xg}y4ZJ*R_L&sUT- zeL`I0LoLmuN?12ujdRBIfH`$4Z1h}(X04rI$SBgp7HRQYV-5NhQlEb`)A*>B3Yycu zz2-m{Sh=|)AFpjHmK!!5yRgB!>{nD3Uscy0&$dfozmk<`__T|6CC|h0j9v_CxCw?{ zRb#@+Jz{5FxwLN;z5hj}Wx>;am999w4cq2kATfsO;n<#}+*RPB#QkYhqw_ z-e*!=h;RFAV8dHCNz~AAOo{f!4~N$a#_7K0y==Tu?c?WXdfpWlefTZ5|4Y0Y^;j1- zwt@cUO6)Hdi~HtTpw;A;GPeMlpQjhfuX@-SX7uwD&hGjm*uPR>`%EuMz}->&%YV}h z63*N}TcLs*y1h9^#1@t?4fe%na6t<_r>Mj_&FclN!#+)5ICB zJ7tUSx94r%9~R5D-O^vH?4Z|7T*_~i?16_%Z*i;fDhMN;Koc6{b)iHBUA)dS-Bp_5 zhYi8&O(?4|ZZYHYq^BwX!U z2o?Y7AnvvfYL={yiSzHon%HSDEq9J=T7?=KZdXBVxdwB|!`)9>U1eJh zUM(kGq@@-&8{hDqS5)wc@?lTh@Fp#} zrHRYn-3`VDmDttcXBNJ&(D!hD0KI&E#J7r2eQ;`DmTi1yD ze=_5S;l;x7^G)^E#7ph{I~7Jpn!>I}rr@>mkioa>Io5Wk5hj{dbBa@^}Vvmlu6^HEF1S`61VJ#LYp7>&Ar(>{MyK`15fC z_%Tfs?!QsN%!eB>ZbJ?xRP*;RNeYU@vG*w;E@*Q^S?k zD~0tRM=^hoCHmELw%-gZlf~*LW635pK7Qm1-GdADyDx^zQfvz0-=}%{euL)oTH-&3 ztPgKGzcLj!CBv$--0sAw8 zJk&hm2OFn>+b|{US-y>bTdNhN4Yvwg1{5Lg)!?5)E1?O1$Z#h57DEoO0$raY<-nSieqz7t@Icwd-RT z>8--a3n_Q)qs9i(zr1*`4)s%%_LRuX?A%`3)Mw?oP?#584ltb>ISzI6V?4Hpt*1&w|*4 zcD05~Vx!K!R)Hj^5xiYI3`N6XzH0DE;}fgIGXFqtuWM^Y`_@lxjnPJTqvJaTc(hZ2 zPhbT*9x%{&gjmRZ?pkdAm}=QIHH7b0!RM}OR7%v~LVSlCaUTBUJzOIe;|=jV^Ny+E z!D=nOC+|1VESHs@Qev5d2HaHR4`5ZPDX@L;5$0ad7($-6VKaWLM~@aoLTa8bNWN_{ z9td}b2BV1gKz}$evDjbrb;?vikHjbLQ2+wy~2-BNu;eg{*kPfhB%g^k zSqHpJed&R=zl9Y64Z+Ih9-HdY09LQAw`n;^$K)#Qwf7SHw*2$tSP@bBERJF$!MWEUIW<|W<&27nimEu zvGL88xXY&8kdXPy&#>w-yE#(IgMb#@pZ7GKOdMu$V?6CZKiD_?9lHqN-W zS04AdOFiU4Ki-!(xl|zEu6Ca=H8`5-+gYG%Ne6muj?tqNZaSHTnV_;;&DmtBukf=Mi817fc5v12cmFQJ}VYnhC;Lo{%M7@_@| zH*Eb`Z$k>rKmL{V2J|{F`Rv$DT({o?D;?Xi&ev_>@<}yxh*CrKi$*M`Sr_S-UqfMX zyCb6Kv`L7{8nM6pJi8vTnIGvk0SrGjv*Pbt+1DPc{etYC%h}<(#%@=7$y5AIVUlZO zN&3A0(u>WfLXq|mpSnL@%posytyGKca)<>Ce#P=Ks|@dMNAZkUVyk;oPB~Ev(TA@~ zeYGaIY(*CB2Z#gt1_daWH-&jeei-eR&1bn?wJ^~7OZm0llmneH()gpx8nm|{2J38* zkMFQ9*o?TnalLb)D!dSn2F%0yjjQm3V+z|kN&)siez8Cw7g7E8k$#NEgZ;PZ8TCYY zEI8XncJH)NNclbhwe}|jFR22?$S1OKcZadUn6404SMkv4kN^lZO;oHJ3E%n zj820--+Bna-kHV?P5&7^Dn^LwYW@imD{Q%B`dPWZOOfcEX3GB&L%efjE9}T`%KE(1 z;@m})8>ftmy5d@yea%$E7RuBsYt?vcOIOH_&NkFuQb0fYJ3-zu$lt!+cxv1sE;q@? zLD}S`T`I!$WeNK8bDnUYeoE+anR=yfT4-6S0{KBTJ|muT+xd0^+ULp~PTmm1=(YUI z&BVk&B~G8K#Kc76sDnwj$%%o+fhtT4sFpk;-u?#ptLYYbI1`-N-yrI-Y4*ADW+_`4 zriJX??d4_kEWDxoe6fidu2W8B({o2q6NhqViw15^(qOZu#MEaGgr_}v!wSkcY_)F* z_kFwKS!s9S7xB71zx4p!9SyXdrGbcUTD-SZ3+;#K(0v%`#&sH0C{$>=P7SHp8RH)7 z!0D0_q=g!MpGNv2@htP{T;51}!&f^UG^BoK#6rpiw^RYmx;#)peAhGLnr70k8WsMx zL4^mS=yfBg7F2o&ZwiR}Y*~bTj#1XkVwmqgxdIl&>afjd>OCWm>U+3SuNa?@US=;8C{(a9xGk(Ip!adi4DoPHHf z1+6JxL0N(|g)3pibQ_ud+m5oBcHgDH+sk13EeY24^@D{Uy5Nk9#fI@)l_>FE1AA;@ zFlU*cq*k=#tDn^InE6V0;64>nl8J?)?~<=4c@E(!P^fg!l~}{Mly!<4-@~V#I7K(| z7MGUkpmLWQ%HL3r(?tvZY2>GF*W$L-8n_y6V0Vc(eSKC5)uj2H$WY^b@*58b8r*k{ zvUWpAJ4w>PxXw!K+*gT3q`}O%xtp6jRKa}uPIPU$1OAkpVJqwB3TwLTlK;9n5Z|90 zDO5U=uC&})`t>2@DBpZ#XQ`(9-FKDgZfhhL?oZ|i76?9(&(E{I8w|qOn>9xEyDM}H z^Ta93Y3IksFfn+!#CT5@E9eUq5Zz0MZ{w(*be+Z;EMt--9@GnW|H`xc{mbXy8HlC- z{^MUNX7WF9fIANCE`AToH$+EV6kHo`G@Mk1LTl@6Os&qxDPgtZ!6Th5QIm zSZ*R4aC)UYP(71(sZ2EnP`1#=Hc-CgYrTHzzb!&W2OnTXKC+94D}}&-A$-*8zGd5I z^oB9!I=ndbtN6V8|T5_Mc-M`hDuf+a^u*sIkhZc{BiarWS%_mi#4P!S_Pd=RaiW$rFdxQ7Wu6& zodvb)Lw4qQ9Oky`Pd)et+1g7#eWtD)3PUz{;}{}shY_bS#BqZ#yQc;`PUeZl%RONF zNHq>xMxL*=3N{sMv2a#n4$|pJx~44^Lm^r&m?7+SW1H=2nsL zcexfqpDM7;1Um1DyS(FguQcf&X@f1ixpTQ1y!sK-N?gfrIwK6k&olEPbfjFt+0MlE zexRB-_N8}mJ2l$swD{ll-Fz_3fDNc7eM2R9+^3#JqlKRxwBVexUQ>U-cA!sN_N@d%ZQCa^?6_MO{4q#oQIt-Y89Q4t%&dZ zJsTWpS1TJ?WoQ@C2pT76v7>RG<(Ba!d20e{jzj5c1zKsw_R$qx~`tTQ?sQ)9j7SJ`cg##|Ypfg0Cp_V?ur zRyJD;tGiP!+N+$uYc)bRGrcM4&hOZ=#67TCu7Z1{IZq)SWSvD4k8Vf&@s-`Md&OeE zmDAp`^x{^ASuPs3km>jq)*P?aEk)^w7toWIunD{dnQ^9RzUo_WZbw;16^LIz;}`_{#SX2MH-4wWyrw&k@t*# zKh&_g{0Tq#cp-SvYxP;BfVp#YknxW6y~&06@_`Qca1DBERM3`mgyp-ZV9-lfn0|!( zF`9|HobHCP3B-+`QNm~Hk(!w9!GM7;NV_jXqcnH zHWx!Mk8}$2XQWq+BJY>Zy;cS_{yL*Xrz9;zz90sjzE1;aj_z`f=IW=!v+0$TV^E@T zH8Hu1sFz(#Tw*r~x}*#!w^_hsFD5GSN0Ay2TKr@~$a`5&{9t^J8b9o|6|IwovVbDW z9#e0av{j8ko=1hkRg_(yp~PWsq(K;Im(P0z27YddZD`(|)Lo00NMG*Lc_7#=D8!+h zc(u`meCT#1`i9U<$EgP+oy?K)zq5ZUK{2r_tfRT8W#3TYW%F7=QMC)IoQy2ppn-HzdnMM>OcYKvVi)SLwuc5b z6PuGqGup=U8j$&rW|~5~3OefGDd{d_r&3MKRKb4IIwNNm;!bsYzJt8f_VjtSqM2dh z)gob7?@XWF9vETV zDr`$`10MTGdwU#--`2z$*K|9@KA(=058N3DuFkiFe_NZfz|b(`cf%s+xu7>bSoT=n zA)}kPGDrc59=ilNX+F!A-R5I2BAZ>hfOTkivP6Qki^{(#_8)gre!Gvqczjh1J9Bv! zIDeand#331pK~tpBemOv**8xz^Z(9^_S2^bYhGRA3nos*(L2hd&-{aoDI1*N;QSjh ztJe(rcK0_fYI97eq zXr0e__L8fd5l@z2PCGM?F?qr$=VLF+y;z5^O@_5s8uAq}Zuv7ilpc``+AFvU-+ENMrf3AtqxD4L*G0UN_X>`MwaH?6u(TM?NNb5fkII z_%Nyv_EVpjbW{Tm+G{Z_RgKcWl&5R0hG61(=h2^AYEZ&S(r8wQltcdf#28AP&qne! zUG}SS)Z#4sxsRR?nnRod_h7N(eLj=CkKyhSc=bOuw7h#$7PkAW{>HD%g7x!`Xwq*N z-&t!bJc{lMGcU@p(OErPwfzt46dJ&v%v3>W&%XT7&K!B0Ob5u>zfjtwdZND3b~QWk zXBkY>`14n`yX1|2pJDfUn~O)rB>Qcm#*t@l2@dzAIED1Z*cFst z(`>>5QNk}hx+xU&Y|GM3j+V`?GkSpI{>2!z}VT!o^SV?8$(Rtlf(~$CAIQ zggxEngU#aE0u!=D*Y~g@pUQt< z3NcQ56pn^C4L0rHfLqtbGV}T=#~XW9cAil#c@o+^lX@UwYd!@u7N0joh&zr;I&{5K| zfl@e`S48ul3XAC9*iu0(Xto-o&1s*5Wku-XfH*@0x`-Z8!O+c z#oNS)+7BFslIgT7fEb(}fko&`GjT!+=^z`)+gU+6Rv6{=$8CTP%LlVzr!-+Ztt}RnQ{l*8}nzSOXy)DJagI?mQn`+1+U+Ltu9+aWb;lYeT ztXQeV+2o<^)sla+lJ+h1*5KAnO878uNX?Nk6+jnYHz8rN5E1Uh=GMqRe<=jJ%>i3J)J% zGBmd|lUvkV;~dp)Y2t`z-y(Xh*?|UzOGBIUm}Nym;L!@y z%|lz#z1HO!C4U}_WFLNp`u$vI%HK@yA#NH-^>%M4yZHFB?1gPJ*=nlIn@W^?n6nxq z|HxQ&=P7K78M2T1-n_(5D)B6v!d#zjmT^~u@y_QKytq&<{(EfY^Wg;f@V?3Hyf4kx zHXWIbsV7+&KcxxEhQhsETNukCaT#eNZ|!1OZN>pH=u;m5-Xwx=vL|im<4?bJz2ZTi z?t-h^(Aoa|nsCLS27Ufpwu*GXr$LtZCovuF1S${4xj5ioE4VQGsj;qAH9t1!ul#7c&TzEXSTtE@0{fcxWR=y+j9W)m`Mz9r zki9TD$s6c{p!LE*?6CPVF|R$%_-XZG)!Sb}vHuTY+FRnZtcVv5Bpqx!<)*Xh*_VS3 zh9A9kFl09E^rIeqU^3+pBh{GfsevTwpPUY9aaRFlv)d%IUBtI!YShsEG4TYX_o%)k z!TFBZT<5^6)c(G!Z)%jWd-RwQn6eC10e-|mkecpnqAD=+al zBWbs$nhpEq&;7(LqQSIK)?id4%7Z7f7MI$9vHcW`I`^1=nCirw!*&av$28ER=W&By zqZ@2%wU3BVN%*Tn6JzW6TSE4Q&1jaAfYtu>^4*ddviBFV`L4G^F>}BZu6?mhoGsch z%Tpb2gf5JA&dw2gsHfv~Q#Zd2_Y}Bj;CPI-_#oJKcP=}3={b8*4brUO5<{y#w2OcA zZ0QBC!>ebupz3NAuU@fF@c&%EOkXA&uh*+s(4IBWuSX59>F9-q_+~IUYPbF+>muzp z+FZQ)>4|ao^Ght@?NsPh`L|qlaVx|JrO4`ZA;NIi#nLS0X5mh`HH^1E#GJ<|utj?r zyBl@c;2ir$)_3e)i8{X}(oHAw2<3KT*3Fi1FDywgcUWkA(D{& zLSKh>%w;&aIrSoKQBOMt zT(vO%Kl1m6{E)}KPr}9Hjs-%bS}9>_eK%xT6GaxqM|#q2E|}eH=7j+fp`CFNYoPi0!|phI5l>9|~!}NBYxj z)=vcvKIHjL(BiO?>%2exY+GwTh-S2JV^hRCCs^^BfU(G&VSuu>)2<8;+-0n zmKVytHXcp=-cS}ZJy$yJ>pSCvtEAUgUuD7W#2wJ{sw2*LgMBv;{jH>*E7N?lo-5&M zA2TR#o^R|J*rC*X_Df-UN-!R~aY^jB(oX+hqbzYtrItUgFoj0#0$9!Xm&WIpZwfy% zqogwDkpZJJ8SbrpN~4)^x%hhqbc98~&9^*Df~xS&&;E-DIQi;KmK_mbvGd zO^piPJ_%wcZ>Uh*{hKYg*Bd+vXqNQ}VP`K{u^09Opm~f8t5!$w6m=Rls_u=SGaic$ zA2x})J$qQ_>q_~UxBuix>eguffLMZN$;@SKHNP}w1k|4UWW00Sj;%f?@{-9`IPDAR z`?s>><6b(IKbo$AbuTV(yN7+?+p7%9EbKN6e)V1NVAce9>;|Dr!BMu4`d^QUT;5f) zLx^m+iBD^)L{H}(;+B=o_@9DEo@053ryNnhSj`yz{0Hq|S1UocDIez1T)unKFf<#{ zN%Wu40!^<{W=gFEi%D7BU7k+)FUo0{t~ONs^OG!O^gVW-$3Og4vkrbra7!b-``@Cn zDyJ4$v6XrUnhiRATLa5dS4z%3qWp)Q3HopIhuES*HZyIkY`q<2>PcTRU-4En_pfHR zsLx6bB?hpe3V)J!@+nt|J)`gOhs1e|Sf$3RAv8b4?!~Pql+?ovhCxx3@48NNyEid= zTt5&}|@O(hS{@c3tROl&}kLJHiX{X4}&IkxLr7%ZD92!EHWV zdqMjyMp6%PR}HOZt1xD1B=*bve>RdbF$-$>obNw`@0UqyzqAJ=nTdRBr?q~6_I>A_ zZ_mIbvGhz?*!m6XPT2+G#rv(G+Ba;Fu={redwa!K`Z4Szzw_J*`jwA|53Zifws0PH zRK~&Fj5GS~#dUncnPB;?9%|O;PaaRn%jG_G22r-XKw77HFTUAX<+toZ1(QDvqYOYh ztfu$#l-FPR>|^=HTLFjIx91VkkJi>8q}}8nB@1E6Et;#w&0-z8tHra2Xb+B`27R;D zkoG|hLnF1&eVrCV+=!`LM7vDB7Q(Ye+w?=qh_l-RU`x-y@;nV53#XpXQ3K(LTJ$7+ z$3W-!=g~U+NLq0Ud0S`J(oVBiRD=2#!h+8lRIm2ot^ZR);!t8sTM`#Eyj~xb=8Eq= zTH)Ad3&pCMrmSdbklbeT8h-n=qvTifY$r^R?$-#WU1)d5<$>(ZLwouCzPAi-a(7BS zo;79*2DTM8o*sb_l_$!hLUrtFW}I*6rJekT&KnC=27Lg{Nwf1vd&m#vX_uBWeOMv( z4p%|~&12e)v|n~I^}vbroz%B5-oK`Th@{REi-|g%G_5OK-M16Y9&L>|Bh-+dM&8C< z^5VxRq@&F;aY?5f=+{jN0nciTU#N}@dSWgcZK;DZw{;kGy^XkZxfWMiEAf8#WFE88 zvE*7OHFUFj%N+-yFwbodAM!;BzwHyGt><=z3--Z$+{PCCb50)t(*59k!T>PaypTBI zOY$E-$q)OgmUP>)K)zvZAL&5*UVd*#BeI>E0{h6%yM1pfK2@wX-0Bu7%t$y^*39vx zZ<(%!%Wkzae2TY*`{fos$pf~s5mROu2VJcY8&Iwsz$snM$0Fqj=_Ki8p^xraMo_x)iYiPgO)TD zW6OW=n7uTQPz?+2RS4lpN}RToINmN2a5HS@mwyG8T9+x|Ny0{$M6>>-&PDKa!ay;s zLWO5YU$^#k!4LLP{9QFYBY`>yDQH@z_q-%%>AbnWaH}Y#Ggj8eA15RoGltHokrhsf z=dy7t}lnVMbb*b-i8}LH;_Bq)7(qvOOv(aZ|~KD+qZ`0PLF27(p3wMD~IL^Bj@zQi5-XG3Jdd6 zMUqZBJICDcdf9M1H$D>Y41Fv+{=FB5#!SIs*N^!X?l~^>If@S2Xg3stMwak0W^I zlf}ZfDJsd5dS~nqa)7%p%7Es^PyF2*bGE$SB%w*u@q8}?@NCmL!e^-x=j52+q}nS6 z+7@6ue)BFnSC$REX20gW5A@~hlE1Q*P5fBT>MR}}62-iCb^)Kk#I#(y#!`|mp|)MDdY@MqC*o?Sh_k0za}*RjQJ?)LVBa;p`+s9uC{O|`5Ehes)BLZ zI({<84z%~Gn5CB)Usa-$<@{(T%V;lW_iMID7sPCp)c9XA?(Y$<_a+^9%U~IDs>K7> zXB&To1o8mV*B#?dNeQct!q?&f+9omA?!<9IK{9+xR zVQYOdgz?LiAkpmQzeta7@xsq9?1>t7@X1*FX}{QRzk@NKJWuDt)JOUC;dfFK4K-;x zSiMYxldq7Lezzf?KxgO&n&mrrPr4 z{Nt8*>^1f++clt;1zYtu_8EFje<5r(Wf~mWEqQNLAKWY!nL9#sfnK~g%>mcmlc4ND z1GrFihu!Wt2oA2alvu@dhPh!cnd{HzvUk~=`1{F4{M4)=aJx}Q;|<#_bj{shL*n9a zakzFhq%LlT_B;O181LotYx0G##yd!>v=Gm@Ao+_wlbe4SZ`Bf+{PBGQbXc=d6 zBd;2gE{|H_f_}8;FWh1!d)3+r6Sj0f~;}`mjL+JnD6uhdeojvUZNT*$$ZHc4pR0Ou& zWyI1D;}P5d7j9PY#x%oRBCmEbQ{g;My0P+i7S0>h4`Ynv&5(9Ha~JK>)u>U^cL3eT zUpRE>EYl@{~x(vj*ZT_(yr!}&I?vm8fYoHUTG@-8jkU)ou{O-nUR^qG<}bsX7&lh(@Sc9AYJ=g;-(1)jtAM_C zV`T^Hd$Avfe8ogMhx|`W7Y6NDfZTT&w&(fc&CN&U7c%W&CuKt$JYJ6O7vA$(76I}f zXJX0|14zd=QS(DtSK0a$J6t(+3F>-()=wQC!4t-2NMB9$G}@h4NUNrJLqJNge9rX* zVfCtoh8xQ%3vpIO_j7KMKmEKI{vPu{^?>JWVEh+Z$CDey!09@^*fE0D^+j=JPhW9> z;XmK>(1Bp#9LFcuG?NXixxfcpbd$zsDxvd(M!0-pESwpqlg(X}&t5Ig6!-f)5Hs2y z<=59v#MP_)gc&a7Y(7<=1z8^G5w{k`?g(Iy-?idxuKEeyF$$Tb{#-hB> zuJX?ACSu%)Zu(d2mf{EJK%Bdo?%O&k@KBFk#;)snLdBbGjJyNzJjw$jg4A&HKKbhh zoJ#+;+XmKGPO@P~DPuTn6octc3|XJFuy|b#d!0#HjMohezaG=BYdSY}diol-+#%+u z-9;hhC~2H?=&p_b7Rkcr&}(&Xz}K}>lXs(mtRCGA6Z0+`&IQ;?VyWNC`};&VI z9U38Aofapq`AH1Gcq?J>xh7E1nC>GmU&bGdw=ihltd$0}PGftHOlJ4TRF}J~y(ffs zlfvc$8rIzAH}fGMa0~G*Y4mfKg%X!|oAN|O9R^VTzjn6R&7F3vkRIzw ztlIb`kIUjeWMDAu3XG?@QPDz!W;8F@4pd=9bJ`7P*d)0oDzSA#1tfH&pNTR8V;5Ff-Ua2*$^j<+XBsqr&;7Wn)-n4u%NA#v3X!)*pxXQbL}S>t(8gAHaDBFW(#(S zIoJo?qWrMQRSB-i-_Ayl-^A+nILpo!>@4f@_BNM#Z;{;k{fYVgD;7Mv-Ql*Cw4-@c zCJrwDCC=&73J18w!J^sy(EMWphglh7?8&37?mgW@;+a;G8?qf|7%58@pNL-JYayAK z==*Dv%D>eXv#v!nI};0nIj7jeRp~hJRC}?r*=8K49uMgo6_E6=89Yc|0WMi>nDfWW z{9)@6?6SX3HUrFQcUge=H7Z?xcKi+AZ{|Vv>s*cA+rx%UxNa`qjvN3xPWQ!v&`WGf zz(T(HxdOU1J1eZ@@netXb%GxQoFLHe4pYwG$NF}W&~u|e{UeR=WXuKL*+E&h z_R%xnEj?7wbaP{fid69_4!=d!{G+n)8>Q@%`X09lvSsIPZseBRG+;W~6OTSH5!~w5 zu?wZNkKJmraloz<<)&I!dC!B-m`hT7NMog?`Ge%h!#xb8@*qPk@hhg{FX5zTqTyo3 ze@xvm7n~1HG(L1VV(4;gADd~qOCQ$3$?z(AblF_eh34gshVl9h;@y%4yk7N{FP*nQ zShaVh;gI(OVN={G*6o%J&KlIlkbk#8V28XhFd_p(&p6>0dljDRlmIU`^x{uLedW_{ zKa=`-G+|Q$d*I%$yNugRdh>^kUK?{-UGXz}UC2)T8zDCSc%S{g;-vo)e2)!Z--T;B zUzENIJ;;LxtI+;zF*|*C7P##r?s0S(KiNqvxutH-kE|u0U}A5~U96F9-`lYU9WljH#HmJSU!Q;X>d=nf=^>Y!X zPs!p>7q*4xji??kSq;nntYjBU^?a^IfsG2U@RQ^-E}o-+tky1m{sntw-`OeFcXc25 z7i1=XRmVXd(B7CQDk#2fZ`_zi z+77XokF=x*PEO>bsdqedDYLxs7d6CJjxRGgSv)|+wHECy5hW(Z+#)mU8;k5zT_T4rbYk@H#@@p`JC zTlr?%RXC1)`T32HS~*Gd!Ip+T278H^xLn?}`XkSksl*qB9&m6>DEoM`GkA~x&M&)} z%ev<);p_1E=r`<#u-n>;rEhVA4=X$ro0~0T-=ii9M`m`z%L$X1bYwnQeM!O9 z_am^^-A+R5b)L99nC?^gD3NWWI|eMH6XY988w$S?sHaS2_&z_EA2ZyNPi}I`=-GEI zoK6YCx}LMdE!|2bx2JF9O`b@Q1s&pIYAQQFO$q<^@J}61b#D-WZR|#yf&Sz5+RzSV$HhoZZ7FHYxO9hxi`GH`TVV{`z7l_?5**J}Thdu62fp-W#zh%7Ygloou9=uJO$k z6UvyRl-=1JEkE7US2}dV@bYa7=fl%U6~f2UxxT+ojAO0b$g{H#)JJsm5x%DPEsx8M zke`hF$V-k_>X#SH;z1L069v+Zk)O;@V< zHRpWeX-hAh@wgYZz7xS)bpOJl(aYe(4bPG;;$@fFdcmhtmXX?SF$=_OgSMe`ps*+AEer`{1W05PLiCC|@w86C^CQ5;hzr5ARxA!(?Kj z&6X2K?JypflIAsjb{1QFdAwj+?t?M!DCa5}<#TPvN$)lXC*ryTY8Yv^9M9L$4z=lt zhJ%x$g{znE8%9UY620A7eew4zrKwT@=Q&6bZ8zce!B=T$q6$a{S#A$%`yZh9rLTY`a!h3Kl#+!Se3t;G=|H1GX83#ktZMedBTCTosxGD}`P(V{NAQe){CgOy@Yeyq9^I z-sAQLy^oU;_l2iQkAGI6eTWwK@_~lMZ9n>~SVlfEaa7;W-DOSg(49y>HE>AsL-@L+ zg6*FqH=e0c!n{Fy`K;02C~a|u52jq7=bp>FgtC&ihFIgx<-riEAW!zO0_IG$!{&Q( z*kj^LR`u}5XnnD8w;O%WUQ`Q952WKO^0()ab{I|?(_?21%Z_f!FB~Lpa}3=ZFhLCy zG}*A@SwAq}7lzhSRx%|&4V}XCx&Gxcn5c{sZ^o9T zK^DE$OvinM&-UM;F_TH*HwnZ|eXqWRhB=LHu!llBnvJzPn5KDef{ zF`AS}@KcU4EGc><*8VkD`8eZ;K-A;QG)wDP7N2Qbvz8NI^QI$K@Z8e^G%lkZ2Qw(-@84I z8aE!US$|=>)r}=8!BR5&`&+Tbvlq@NKA~UQ^@*RZdwU+>^^A!Q&GoOQ%)sZF2iW#v zGu%FyvU+=yr8TtYu>WZC+4iKsWb(4ZT8F?x327K#m*Ouq_5G_R!(y9ITpg>yyA71+ z?~{mK9=_+JWG?uTvL(S!3%D|Rjc~7z8mF~L7i`})f@6nU$?d`md}2*?(4ojizRj$f zCHL4--q+HV4Qf>g4#$@mKNtB5FDn+xcZL>jrS z)7>r6Q?V6p(dQmRIha1KT2Swx9L-z>&UYYBj_UDsDb#Ev~$RW?vCMFnBb^I z4>}Vr{CFvS;yDLxiA!xqca-%e4=4NedM39cZu}d4kDE|NAfNUJD2X#C?y#a?Av|x; z-LQ~2k+M=PJPoCOB&P+}w;9SpYpeKx)eG6m=o;zJ14^`AKNyEyqC3K)Zh!ByP|}<7!~Bd>T|TpWH_gEEfg?OsPK9RS_qp)}J zLHV2jJyWgMu^IYk+DTZ-`$<3hjk!#E($cO%xZiN$^UH_AkCsP#Jenj!*Plb+u2d-u z%el;Y#+dL~zljCf*+po7!x-Wsvq#cCZ=+CyjNR%6J7`sxvn#GsA zDyeacXFF`Rq37wxlFnH@VM`n0z%|YI{1^uqdAgB7H+{PJCwd0@j&bqMqn%=*?GN&E z!`#>>DCTyfyk!^b8&ST`PAoFe4)Ur0@#ha19J@ifk#q+1-?6~hemWEHZ(mz4O?ivE9r=|)%bDZ;+a5YH5=QPNuh z2Ykk|y~hfk>Rjh!jWY^Ma-i0+_jp31CRXu)82k5w(20%7;J!SR=Lr_mE7{47-A zc3)!uU3GAFh8nuwRzV(VL7w|H&~XT59_FiX+Z8P;oNnrm=M;hSJ}vaJrrkdDdj97~ z8>ysS?mYh6`J+tW9Eu)=*Svpuys{_fGBG_j`XHB{a zUMT3SbZ*4f7vJG74}GC_O&39baRM*6@tZqRjqUPsDsTEe&M+-Ei2EPy&WF^~j^(-Z zxr!-FdUIuj&cE!~Wx~!WQ#ZlP05T_-FY!5AwBrkC&IeeaiLYLb&E{F>muN z3F1dC7n^2F*x7Cugc537d*w$cP`Tx0zUk4|l-8hYL?JX-qtN18rTSGf# zuEYv0hbnPi{8X7K>66=+Qr_+-`2)X*moSZ$LzyMspmQ`Z+eWWZkCY#+pt}GmPu70G z5_WXQCGJpfU+yXCjEVVLC?&1PIG6TkY*B$az*&6xVyD=&Rf)0mZi%qyU?-^B_&|FCMs?14?z^w8%*ZaX zqHIb={I1_$U*FfOmvVZZ`@Y8K^ZuCek4mo!CF_6F<6X?S0ggJNw?}n2yQ3B`=#3cO zZOLceZ~H>>j{=c(lLQP?XY%Ri_0$PVpQ&ZKy-8NqIEdi&Ar5z}e~)?d51n#YX(?uO z^+x}=_zbsDBUK=XMjh#4Jutf~S8EQa<_g`_GvfEaUflC9HdoT=R7``LF~?O?#x7w!7nd@Ud9@1pu})=Nv3(fc zJLnm3_krJ%K(^0*H6NxqmHdAu;g@}vC-Wx3w8#6I`B7vmclku!Hs79NZV~q>~3o*>KB^Y@(@znJw7t@t|@CC*0VDbKYtKqKw%jV*hLN9IMbIbo6i17?!PVc$XDU>{*?Mo4DZbm~O7x9R zQh>9CgrAMSLygf|a1-?kORC=ShK=ZhiSG>=V-%3{B#6z7ME;Wb1F|U%Jzs4y_|+S= zKFxAA#TxlMCzGhnV+kv>#6L?hk7F`sIaVxT+A(sNUyA##PdFDWnLs0YVLs+k+^O_T zWW~Ug&&WJ2<>p@ZD$I6VC4Z+s=B-BV=2giW{N_&d&ZLT% z_mlgg^`ua!QIQBMjW4LRKK~$lEN1{Im$0@JDU0r1$0zn4##WfU6lPa0f-4QPA-+|Y zZC~O~T9Zww$Dw^n(IjIs=-vWi7En!Nd)tr)dPQ_x;9<&#T7Z063TM{jRApuMhp#xJ z&usfSK%HU=R~NmREhySZL$_!$>a~gmY_g?W@=N%$qaSz(@`}m*)Bs3cvz0w@&?7tM zm(UpPIBwvQQT&8g4dnN|i$v}I#A8*E1Y8~j)7RH*krDQj&R)F*(EHEc<2>f`^=J|@ z8~s$lm^~Alr@nuGDz~~>23cBNS;3$z*1s{ia(L4|VYK?IXo9W+ChO_3KJ_MSkY9@+ z*W#-1wDW%?{>E-tQa+I=Yn~JP<#X8ZQ@g=tR1mAje9#3}`_BEUk+I=@{rHh%vOV{^ z*9fDwjZ(U{NW)%(L0wQRy|t!lWIIq;+Nb_X7}`w z-{DT^^bH3I?HaDuyx0v4;)akgp^)u<*`4^5*sz`hHj>wkC#zgw9vNJQcvgMqZ z^;~gEeXKqm9445+@otMq*jioK*0Dl)DGT*ICQ|T4zxO88=#BH0gGTlZ&$2#~U=;4^ zbK<0+szdGz&UD*Li#S_QT#|$#CSW zKlQ8+l(AYp^f02A`Q;5-bx(gcO#}sGmzdm+EapmnedOMxnA2dzezJ9wAMcvbi{zDDQa_t-KzM_pT)*<}n9-@t zN5!13(puxotqm$BZq5;8`2n1*mSxa_zQN?o{sUxrbPYGm+Z%L{r(tiG`d=yjZww z&|f<7jV2fCV+Pra1Gwz()-3aet#a&Q4I=G}dx*WuXq=NX)OL74t9~}Q-F8=PNRg4S zWEwf;^@NNzI7naixj`*2b|IfKcrNBf0{>~aF65s8rh8{R_(ZO!3yrOnu}z(vk#rbb z3{Qi^K?QvN*Bmt}n>lMM33S8>p<#iPHO|YWjiZ&kd*c;R(i{PzNS|>};xDRynraKC zw_o(Mnu=PsR;8$ipEF#yu@V-vZz3oB0-z}P7agZM=b?3S3ypZPM>)&K6xQ8G9hlsl zX}bl{ldC=WzG5-kUf>Lg&70Wb(|?I=jSe|_%oARwj1c4)Na4*|7dB;Ns?zhq4#D#u zek9}U0lLi!y>PuJu)`bCdv!OBQ=$G}KpOeLe%4SSI7d5;FVhS!W8vxXPGI$Jyf7a% zR2R1I1na@MRC~A_9B_AY&1E2+W*5UH-q3{4qzgBB;y}0?(;dEzyTkXB^x^%TTUBd& zS}@{qOmOKQ7PVsl8&E)iIrL;d+!XBB!5(Z`;!f6gYcdocN`Qc8UMBXTqT$-+%!UpOj69PnWM{$Hx&qXU+sPsO zTJ*@_7V+}?Opfg#{HID?PHgd55Wn)IYL;cTK>lwm>we{w=QES*Lgv&Te9PA|pW&%O zm+UvHlHGHZM#8BT^QO(BORxV>|GZyZ^#ijGdd^#*I+s1Ka`W6(7|Bys8F|xMI5FX? zhsVlz)yQ5}YLCe+9#;Y)=;x$6TQFHDF5`If2tZED5hW_?YT}|S7OQ-4Txz9^g zdk$!@M{Dj0``$Z7_n8R=*A@(9e2oSfKB&x79cfWDNW%ymhCU~oD#vh^Thl>EQrV1{ z!R+qmZNjtfs%Ycm5A@6~rTg0NJw*@lR4OfHCR}$Hr%s*baRku|AEV~EH|4q1s)6&@5CzXY{Xzox!**{}Zn{|n3&cYcL)lq-Qw&QN>npG#i zK6EeJf7l!DVy$=_>x9K=m}QAQmUh}xIuK`P8eW(iw&S_LX_5>MVo$r*9>^!u!HqKM z#lBxhCVxb_Fei5m6)S%$FYiVT0YyLPk)G57eVd=2_h5~Ow80}8=UkXk{6~QEsM;c^ zdR?eiO}kCPPN$Jbe>qrNi>jPsk}yZOk=ib*CLz7dK?il7WfpDJbH{69GRBde+}lgZ z_S_|&FLx3uSORN)iP>z2N6MytL0q@bx@`W8Rq8lRFPP$Q#I{w9;5Q!XPWw0-5K+QV zWJiwSj|!~ekmp%y^6Ixb$zUA$`)3I4$S;J$C22(cXef&=AM5^UT3`5-wT$gL{g_VM z@a$Yuln@FQam*xfEIVW94$Chm(Iu%{$mHV1wCYTU$Zx@Q@@(1*`Vn)7YQ$>#tzKNU zyv!5+Y3wDhbT+aBU8mFP-g*#N?jp=Y7--&Cz)iuk^IxR z!u=V!4Nl3z*wBA#g$uK`5wjh6wB|=U=FWVpy70D($g}55Y8Dv`$G(oIR|Y4;XO*L9 zO+h$)+G?RR_}WCzm}{%cS0no?Rl?4MWqAe|+3;UH{$pX<@a zD0;cMmuL5P_vnBRj_|GgIZf)-9;l+bxl!jN(u+;Np z)>;WISz`^R-@mFo=lBcn9JobOv3Z~VV4~=!nW5+6SxIc(vH;TKWDfJ+JJ&P6PZE9A z{@Qc8@fD@XRX?t5a}9ZT$r`;61bWNniwc%`2nrhv;7PabqP637VC*<#_+m}kAXmT{ zy+Fp9A0hX5=T|1gKO~xC=W{E%jbmX$bA>MqaTbTV%&DzX)y}s$%s%h|*;CVl&D^VC z-0BQYJSvzy+&Pl{yP3jqe@e-stsX2YS^<%!*kgw$!m<~bt#W4Dx#z1>$k8h7BS+`) z=SHNe9o!XQU$ch~-%ZrvEm@=xYwv#%&x!Vn{iFf+?eVMZp!k+dmObtfJ@HHO($WkrJ z)I&rTO4+Fk*dyS~z@nRi-NBvS4&VFeN`{P0&J$nrdJeP((Go`AE)}gF z`;&K7m+?VG`qbmYM(9->rf#?Tz$pWUvBAN~>VHOCL9h2QI&qm7@$GUDSdN%oHn>e3 z_#4C&b(eMi;iTb@1#G^n$JNdm&Fy&H4K%O#ll_uI+=A*)d{Q{(zdbCiY{7H;O;D0L z%4RoH4Do?=eRh$hJ(9>`dt)#>+(-m|y;a9<=u@p>BK3NQmvmZ}m1=FgD>d)_<(6!8 z_ME-XiHapS9c6j#|y)ZFL@m_-YgI!)x0Rf^7KuBDevA64oro(Vc**0Bp$qsYF%L&VavkvEz;3>sFIR&`jf zB$tAw3gJsWDU!O9o38(qBUb$(%~wA06PLq`vz^)*9u_x2*j^9h_7y-aJXFnjk6 z2v){np6>HvxW(mQPVHnC`BDmx1E+}wwqeF&=QF`Uk9lX`PSS6u_pUaSP4goCFRfzbdnIiDpc>IhXLBZ@ z<+Of{9_!LAokUw}k_~H<;Zt;1j&jJjpZS~2lTKvvIDhuwM-ll^yN4vcvEf%eDHWE@ zMrOy@Q=EopHrp3xMRTWDipuis;Ir&IRcRmLoOgO~+t#&GKl36Y|GJ!}?){+L_v07u z`rQmH7sv!J$mG$^G1uu^<&(--WNnyf$HUGg0`}wlQEDCJ4VA-HMDx@kvUj%ulPz39 ze7Yk)VL57aI^J;}DRTC>HHlqZbe*65#Fc-K^T&ZPVd}|keHhdHz?&N_gzHx~@qG_x zf!3V)f*%)}Xh3yBRlb%zM4y?hZkQm0rh`vJlP~pwW*wj}e~u>6=OpZ+YYyEVc|>4~ zzHiN@PC7bpgmCPJbkBwJo(do^n9GV8PCsg6Zo|SIe2HFnpw(U6*7|1(SB>`)b+!H_ zaLINjLG==r{Cg;!u+WWt|8SmMc99B-2Gx1s9Fys!WkJX^9q#zhG0XH!=e{T*nD0XDKL-u+I<7g~5m6mp1NASLL!rVQ`(Ja}+ zR(xFr9@mq=x_UXt!qZ`KX&QVyn*^Et3!wj<YvSP#UQ6|_ds%1b zdygGl;x+$qrC0s!GhUbEKHfoIW4x?$mw8=kYw*&Qm3fD0b@Q(2VeYlawAjm|<)9Z` zc*AS&L=A7Fu1YUsb7SvzjYD4irvxvvllQ#leL3fKBl4)%@zIT5_Nf_O^({}lRtuZF zE|#=-8ASVdkG!GsY3v*CgQ^6dUFJnT8^>Mr2};ZNkx#7m89ivLkI{hcV_j3H`K&px z$H(E(NuRJD24ll#s(f&pfPjS^ppNxz1`H@wN=q!-Jh*L@b z&pTs2_wvXD+}G}WB7uOiLQXw-EyNm!it^`b^7Xib?;jTgnM+$mr%!g!VE19{r!@x! zXUDTzVp=6>8B4bxRWR*sa#o9Hm-^Ip)@5U)s&}@KZdrI)Skse{%wbuTTT*s0yL~r2 z@(xK@+!SOIt;Kp1^B&djM)3}ZZK(%m1^P>iL^hs!uw}g^NvS$Z?+)1^IF0++x$p96 z(ODDdKYj?y$vC67pNRW)oDrXk>QAORts>T}fNPkCJEmuS1fv}} z7S}LL6(m8v!O|iBA3vY@A^uErPk&S$4Vz0~p*Op4zldRu0+gLy&C>LxtX3sq-BFVe zIuds}ug9@(4aftSyHR}u-{n1Xc0#U+H|)70Vb_rPFByw|rl@Q_I100W^IFestter~ z??sV!QQK)uIQqk<7Lh;AMxfb{z;(5st;`B3P_>{Qwe4&i+prw9Z@;q1_-(sEe(X8z zf_3#9<9ST~1MW9}=&@jJ%)9bf#G1Mn!Q*K~aA;8*-MiqjsPyJKPpeeS2(_IGF}EalF+nuI z{+2T6a6JDFzuvswAO3CcOP+^CAv|He8zoQ-&$H4R01|( z9{ka99%SZjzbfS=OTmLbF`iMo5{LkE)%%7@fV+U)hF=m^pt*w*+&z6eFJpUR+_<9V ztAc>wPaZe^AhV&clw{Pqu_&K?ypv85Y@PLj&b9Pr%jV~?*DVru$5+Z`(#QPi_OrCt z7cm<>*N&tc{@_hrk*9`p+32)lQDf*_dRTz>#g}r?!0n?Mue}sbAd}^-{2u3zzoUIu zWuQ3%{jSM88M6<419A2unNu7*7>Uo;F!ZY8&NQG}gZovs68wDo(YBSSqpZQ+N!}{- zj77Gue>abH1qCY27*_#wk+IasUg~#E$aEX)39a#Yw6M&8*DjK=8tn-ne;{U~rd9Is zc}v+r3q5sQ`%k_l0vVZ0o46(3Pso-7V&){uV!rmsTdWP>{azb0e$z7dD_yObVypyC zcSlZ-({yqrt3%1xE11?3?+j? zJ>g-w(v39Rj^o2(QO9kZq&kD2kxfwR=yTUmryX0VeDd7Gb4uTI?&x@Ax7mkzjIFOE zsb{6^-O^bkvug;+&k7P$`pdxma-fJ@KCfPaI*Y>xZP{a`^!OJ_?UIkukg*|Ea3P|P8nBxS2{uQv2% z7WZk>b8_pFBTQae#G)Fc+^>7@sN-ZYWEv!s%d#YXZ*;SIy^=8FiBs5vo#(0>OjuJgjr>^#Z28Z*%y4cjeOo&d8okSCIri_5J{GYNxFb(&eNB3mw5t|1|Dx{c zY3iftd8GI#GT=@qU?MMu{6sOk8-~xcmIN~LN2|R@k z`rV*iXeu3 zvpaW?GZz23%5>&1wynTc{m`+LojOLKM|my{NJ0-}=XL6pn51r)Xhr^2WwHH5OWEt> zB+&@V@vyXE8-3Mb2`_4Q)7&%2065Y_W1c3njA#k;@u{k)MZQ?3P6FHVGLbyv8!PWT zD`rpq_d!8JCNtXck>7=R=}vDZAWM1_73W*4g09P%uD!l{$p{m8Sj5rWI6bjlCY(&Y+RaMbfT=)+gIA|!i#GhVU&FhTXFS7iA`{8%5Jo;GA z=6;}N;?3QcoYcSyJk|w(=H-KMTBYXO*RNxbkKyiY#sqf7S`K#FvD71Yi(psRA~>k| zSoEW}jP?JE8G_-+iFwe63-)s3g4UN(|H*5~=;mVZbCR&-{W-RvM{w0N@e8%cNy_$? zxxo994F1uL4sPOAF;q=2KPL$YBB~)quy+e`@UMEat=EvHa0<`#^~sgOS#9J%SQ=Xu zk{}E?CkMlRiDbs7V!AmYldSCGK>xdzP}Tj06A8b(QZ>x=h^pZfvQYCDaC_<*3Dr?R zT!R=^ckZHwrAwjVA^Hq5BVpvmKJa9bg6%qagRCYo>LJP)W!0D`>Xm;RMYDFNv3(J^ zk1d@@huzj>^DqN`^4cOM)Eq-o?rRDh)+4iz&mm^hw{RH~7`6L_tiG%$7;sAlKkzyA zyzRulTH(wF?D(mA`b-9Y(h|Y-$Oxq^>Lpwcy%BZx6O#qih0LmSJEZqPX6r)@<$=L7 zVPMC6fnU27mp%3=84xOEJ#?mmxsd`qdDK95+sIO%h#^ED8TH?LsLiJb!t%Eg7TP9Z z$88k?mo46O{tMJCeDS9_#&J*=jF}vZ17Sy7A2{G=MEY7@R}Gv`7&$9}yuT~>W4g$v zMJ>C)woahjj(2ZlPx2)2usWpWc$GtxA5=f9;WU?QhuI3c+7FkRo zQrPtjzaDk?!cZM>G#LqR)@@_sM;zcn);XxZW1d>pO*xye88bV3FXL<@ktup55hRhQ zx&EBV&Ye`izpe_U*(JXD-NYJlc&2Z)%Xrgkt2rD#*Ys68571vZ}rp4dHTyD|)Gs_dI3?IDf%= z=$RuNOxr`!un!xB{K&lv6mZ8%$_^YmD00XXvrm`A?0lMx{l;^2x6+1A-{HZ+Kh0vt zm;J0r)>%mAU_H}v`#Q-Bl|%QRnF7u0QntQX3aiZ9J@h|hk!LrG;MB*9dTz`4q!1t=-DD@W^>Qu${i`Z31~$rZIa5Dfnj|k-xwatUWP3yT9vqK*1W<8{n=gV2-kr&F{C&h5iJW-ic ztza#v^Io!WDl5ipj2-L5jI_(acR(sL5MiBBP1Uts93e5*N$7iGuVB*qE2LMPgblIe zpeHgUw%8R{{AbXUX+No?CyZ63FV3%P@G~r{xy9F`{(Z0hTRP+VA2OJevxIeB*tJgN zb}1C>9M;1J)8a9wOTrtOPf;K6!_3udgW+npHJt5`fI;gmewyJZkwK=X=g{wkyq()4 zVzs9TO75N{A1lPL)^j5>yxKtPjGu*E&#NCOF_yEy+!B~kr-0v=9O&Y^-qZ^9VvS92)o+lO zXov5e@qaONb{%?XS4r9D{)g#k)K!_RisX-KDoNL}Y3yGHvcu~!+uO&SY~!S~0@?mm8Fs|3{g=Tv&>TEsl{D`(Vim0*f9GM5^p%pks}>Obp8qOQosUX6K2_p!dL z&R@*>eTw2d<|IJO6L+RP(S*|)qoz{^gz!07tJI`Pn3K?tEbOk!wELm9wa$~Cj;$c+ zco*ID+`(oIYoecQ?IG&ocN!FPkWHF^`PPdPxSfTV(Hkctn;)4$>s_ox&8*;jx`h2d zv*E%T8Mrma^MB(d$f5keI!cNuHNnf;H;dff6M$m}1D`_+S0{MCm*0|N4x?^9j=ovS729ViB!@O00i zb|#RKl}K)E%7hiabV>gUEnK1i`M5X>aXjk?H>a18H_vR8{l>~6Uu;98@UH(%50Ocz zU@*OsWej9ZKZv?9dDRk|iS;*K09fZD`H8Kg8TPKNsl4 z^L*6AWa6#)U6hWR&0Ui;)F;p2KEgrDY_Q+mqqhKVA33RZ?Kcvh7fAtg)Y!(q`n1ju zwV%zw;M?Q|G7BA1@rQC@^37sq_-z>!E&~HP8^$ zf-Z{ePXbW~cnJH*@b@ohv=iQCzs2yRpd7hqGWLArX+G=jOgM20*>{6^{*hG5t{c7* zOtF!Jw~-Vg4%{IBZXjzK^-^6G_`OvJ$iwh)(0x5>EvomZ-qrjiGgfb}s@pzYREu6s zhxG%Hy{!PfN0@6CErpGE-riB+>{1{`uUR^GDnJay9|p2(`sktNP53VOj7-Db>I8Rl zX5793wwjIy&v}?TwOt1P1oO$v2iwV++*MrB5$yAhbrn7~LuT2Bwa6)LBD=6pTK6zd zt<>P)V;t5AxQ}mpD2G8-dg@W9qnh?ix8c5qAhTPYKdmq!2x43sieuQIFhlU2TI5!r2$aaBuM`@;kL_rQxCN z!s6RfKGkTiXQs0Z>XW8`*%K-AKppSEVSbQ`@5SmD`22S@;DwD#STNRse};*ndSaER zsgJLR`@$@yT>Tq%>O3b7t)pTj74PPDAmDuj+!M1*OVD&3{i&74Y zm?tlVFTYIKMu(wFpHsPXkXxK*;=*^Tzlp)LK1cyZnKt}@#4(T&)Ez|XY|qBa$3;;o z1su#;t^E1>2Vd2sqnb2I4r%>c)ua99vn!j?BdhHIR={2M!GB@L3-~%1*^3J{Visl=tGkU-1{N$xzRB(dK+r3v= zuqBy(2wcZ@ILer5Rto8J3UjRNl`8Q87rOP@NnYt72W|{%;`cbS@XAP~i0{ve(DTP5 zr|=-w8UAC!S-kEpW*U(}qRah>xHg$=uUpD>4K^mz?unWGwITfU-VW?wtrVI?i@^TJ zDAE?k16f^RcQ3eyL8X zKI~Zwxh=DWPXC-q&l~8qEWhsQi0|~kkxEt6cNxUGsg;WcOa`OR$R$TU(K*x^P8LaF zpHVTCoO@4u`F3!jvro{*?=@6_OZ$k?UaFMg3N|EYDQOv;C(1*=$^hF5r2YlwUpN*q zn@i};ZdkxB{={dxMqaAFb9V*|w1>~KhKI?B$#Iiy&!G8(nPH)8i>8r75Xy{~UeHX`u^v;9H zaUooka~kizTmter&hP^@W4?Fgpw~yr#x6>RJ~)GDAD;kz>#?uzfozNsBGoNnn`msg z9IpILAm@84;M|O?#k z;`}BSXt<1}sb<3IA;zMn~J0))yj{r%Y-)RYhGWk z4}FDo@VQQJwO{sVl3=aPtR7(gF=xif97~}5Oa?^#4Fl&GL%J91w_N@;S!sQh{Ih6w z|JJXH%3XeovZf_?Y`1V@E*+>1kCVa4Pl+&eg9PrcG+=d`d#dJb#k>Zbqa~U2<`Skp z7U*9%OkgK^Wz7ARsUV0rRO`ju1k>5?rQJ(R8aa0K+13&`N%|M-rR=m$GGT-aq`66#yPJHm)+_g>t zn-*XON{|>dx{9IF^(=ikv6+`IF!Kccg_Y*`tX;d0K0ABlA@d*A(^2zjz4AX{QKp35 zztK(LhtKlnAC0(^M}JoDM3%fo3g^+WyLRV5UX+wdmEE6FcibEI@U$YbpWZC*&0u9& zS3G|vA^-P>E1zRyS#{_No(TuO37mY2AUCj>jA%)xepxAysJn*zS}No_UG-pNeldU3 zFNH0zeIld=RG1xV&13lw=EiOJY2;O|Jka+h#-f6Nvh;=@AU4yoKcbs}-B z#QB3x3zriXC+c7Qo~Rxhz|EXEx@Vyr0td~4X1s5T<`#M!j?dx}ddgW2){7NqheKPi zHXGKxKdr)^eXdIx9iEHxLo-{|_(;tCae7SZF;%cbAN9(;uX1K3?V{YD*oPE%SN9u~ zNz69PRz(9((^GbXL+lmd%BeE8e|CyGv#E`5-j{&Atek~@+ey64^PwTFr?Sy^FblFr z-4WiSBc7a49SU$$ncr_Cy?l`)nVrOhPCt|>4iXk@xnGpyF^@iSn8q%LjwWukocj7; z8S65@m$>}d0y>^CT%=Q~r(9FYlamxl=~P4}>$f1VI4?8m;1 zMs{LZFo^GDRh|560NZ`U?C;ZFZi@>ysBX0edlsZLl3Q8#NywN!pzdSN@BQ3UwAlJ5 ze{5rez&~g>+?j9Bv;_9d8*tB7j$V?LSbIhxA1)oArO(A+J^3Ly;aZWlAZKaFksbJJdQNmr8Q&{5 zQS~;+gT9OQW^>f$>|E$5=(FI3+Tk-YTye%6oqCxRI4)rm>x*Dh*Ek{xHKYTc{i5Zk z@Eku~#6nSL?liZB7UW{yUPxaUmOB_O7zTpm)p^?ao`7MY6S#irMckWQtK_Cn_`j}Q zh%4Sbs>@RZ*G3#Bzm`d1lE+U`v#$)!zm-C7X@9Umt+C$RzHAxZ->vo^N!FV;xSLOd z1Gw|n!}{@W0i%b0I9FZ!qhQCZikUcFi;sAyqW6-ESjj|uezqb@+aXG?W&b{4L1E62;&(BYS;h4(JDl|$}k6?*dH z>*&J$LHvO6W>kIGhG%nl(hc{Q;Wiie#xqJX1nj$vQqt~$P`7(CT_8whlUnh#r zL_TPy7ZV$d2Qy3jcLnQ$FV^+~eR;**`qP3Mti9To^nyFXBx-}IjYOkqF}pbYwW#*_ zZxTBuP4p=FFIU)%*~S_Y7LvLTBK$fj+kKt5Y&b?HA$Mjt&aBtq{OtTC^n51NlH-qF z@CGjwEWI!0TJJtBy6Ae8wRg_uKU^v2cb*>1ihE1gr0f!~|7XpQ);+`9d>RA!CVTiV zkHS@TxErz_X~VKA#gID>MBiKr)Qci<7P1Q2EB|D4uQPHZ@Qk_Zmx-v#HwaDO!U zLA?5a-YT+EW2eAsOAR$ywuFkDkqM0V#>w3=VDY9z^q$xpf&2v@1)H}E_hdH3@FPJ) zwJmq4kFU&xyP;D#|6WRZFTxsD;$E=_znFRaD`aq|jGMS7n;sjO#@7@cVL7y%ySqUj z=Det&f4iZdGE7FTv8K{$m<)$8x8hXy?V_sUq3pvMd=7fY@cmC?E?ST6%xPF4_+{C# z4fDpaC>=TKg|qnsyTed_;0MmDN-0NY!o)f;>#R59hQ^(vR?~^wy_=bCV>}k}qtfP) zVZS3`Xx~tHS2b3(;1p^@&QF0rJ2~rkcbv0aEMu-SDGS^-nT4R9IH)ZdeB|$huSLZW zkN2~C{0_*@DXkcv_JY2S6SGaogYIWwS5+IokA^)_5{;t+Ma;RGvqWuQQ}6D?x@{OM z_UH*h{JjiIk%E^389>-89__k9cwZ3+lV3-`f2~%m&m%Dy^fKgMWTKDt7HWV^W7)Q| z8|b!XO*ZLP6JOFW78IQ`U{$;v2DfXd4lJst>vixf-Sexm(LaF|dPte$h0}ar>}7m2 z#q8+XB&Ih>#^N^>L&k_OR-JAke0Fd+i*d)iK71z*XG>sRB-ZyX64nXm`^}3JZU2P3 z#lkyW6~U~MH^=$gc7N&AAX65GIk>wIoZ{Cl`bcm3HMwWB#i#_0MR4cmE@+y3o!ck7 z<}oV^gJZ#t^%Ht>7UoHyG|E)P7d+)hY7QVn^W@C-tug3YV6NK&neyFn4U%p=NcGAe z&o3h%HfS;K30Cxm!m%l=W4PF}J@-2ubPIcCe;F*cnW8?t3O_ryZS=BIL^lN^6X~Y` zL|6sDlcf$cyuO*VJ;p5Du0HLRE^eq$?L(7FP#B0b(ZvVnOcr)iF|9#jAJ{EIbE~v#2TY8cE z)27cFAHJr?YX)({Zm;m1@4ORut*6T7F!VajS`JzLvMcTwf8uAm?_$<3zR|~1Rou=h zIeRiq3O=g+bnB`-SdROhixbLJJ@CKb^B&XV*U!_m8<zoMXz20v^+B?tZ22UnOTUbAoW(9Sasv)`RPqfu7`e3DXhd z=QXgDwI8_4ReQVe9_C{qOgB%owdgG8XOqj1ts4pff40(#Ga~r|s2A83jkBrmQkJM+ zSrzWINwfiVLyfTtX5uD?{|0u_Cm}csXz#*Bt+Qgy59gA@zs2mZcQ9)(IZq04rx%Xz z@z^mdIpKx*P(Ch+)_W4hKkb9LW#+Uo5!p`vIn$iD08(d1$ilu-i2N;KD=&R@KU8s% zT-+YPwB8~+fx~>jpeo_rbqaQ|Sk9K`-BKBATp&8V#e4;izq{{A5J`-vbhd=)g)2b* zc>q7~YY)%s-Egiq$zA!kcp12~C|I*$F11jv6D)7LC3-t<9eZZpQ(5ebIY6DrV6dLR z4mQYPM~4Pgv}2~5YZJF=j;F}X1T)_rN!VtYf~jwfR@Y-)x@q80rvJH!we-p+mmkTP z*ro{TP|H)NMMZCr-(De$VAa#Z2|Ff+M{gMSbN4*jlv_@0rXBvH9VoAxF zVtTUoRJ#4(F+$F3v3MVR?@Wr|oxTw;tPRgUmcdB#V#r;Yq0&ou;t^az*mm3ziuJE4 z=dSiZ&n;#*{<$mIgTJF5TQJkea0onJzlUCXUt3i#kANwvBQV!2o3%a|OU4u!aS0>e zihjoFKmclNE30zpI@=LKLFXd2dG!)D_gj{z-{kwWaf}=?P9Xci@e9XW;Y=NS=mi>D z>Ol{+nf1Lm;kY?U5_Mk;f+3BxLkDLf)%q*eYB84zmPM(mDC~EZDAek|p#xz+zf{;esfL$ppjXy@Hh)<(vdSPR zAB-DsRb4QaaDR8=479fytGj-L*kLc2|F@k6eF}ue#ZsUNxJ!3Az*<*hzSOrMroK`H zEq6Mp&8kFJ_ea1A-XI&W*h5rTj$aq&%g@L6V4`dh`x;_SGnOe>RXO%7fvK?IAkIa) z9fT)^L)jwnF0ys6v+z$)1QUK557*+%StTtd&A2znLVbr8p69PBN0Jo0zg-t>;|h*+ z2ul0vvCEeARjt058!$$~`b;i{VFUK@_d+mFus7!Jh?T_6Kngc@UnBLsB~a@UCiHpR zPE0QPR3*h=KG%vVBEuP&M>Sso^Ygd!y7^00DgRJ&Gw3A$A^>&dPmNixftbJbxQZ;& z!TNIVTqfT6P?Xaug=a4hf?K^Qd#x~H{>z*__r&@M4Xfp_evKhB#2O_}Z!fpjA3YZ6 z{TsAK!HTB;rK1MrbN|n=wZ->pego#zAI8t`_zaL@zi-x6TlBEAosJ9?uy-S8Q;iqP zsvbU{2obwR5brT_*`5Krg=G5xQNHzc;;Mf{=&0jGEGjN=$=ee6oc%It<+qS6jQ%3( z6Wd>4mX^m29cM>k%x@8wdFlK(w;iCdas#cHHW982IU(fR%-FFSWLrHnXYR#Xka`|@ zEZe)mY9rKn)UTt@W9GB34p_Tl?FfUrFtz16&J^b|^OJYdC;O5`Iel$eMy-rh;GFUy zG6uheqZjY=?yCC=%-%`9M^_HPENz_t^)U?xeuvbRz4pg!YN+S!9ADBy=UhmXt}i&v zb6}HRdeT(92cs`+<~DvVSLLM>vUk#7(Zv_5X+-})?Da1RT(g=4OP?8X8rTDm6}ZL8;ChP7wO_7yWM~E`9(eqNP@UE z(WExMT`(=d2U<$d^EIi6z1}rd_3!^R?f`A*a6#VXzX=c}74wT_$SS-phR}Q&%-;E# zTf4qL85AR8$Zv){hLQC3RwM4w^Zo3!q7WuFN}#GSi5)vDf*s^I?Gucw%j0|4rrave z7nPyH93$M#>0A+Y)n5;7`1c+38o*|MP?OwE32MorX;3$!2>MQOoFEZl3&kCpSomv(TIhP)@zCBNxE+=p2X&VYZ=k0tHYXNs68vPCAbTb?uN>b{oK6w_Tiz*jvh+EjEcBo;*J`OshaLIrt}{1i^WX#ltXa& zkdl7=GL8+M^^ztpi5K2_evt1MFJg8wH81VRq7k^0?cK4Ko6u;>HhmX@wnaA4#<{CI z)~7o*CbO-va(F&AfgQA(%=A}}qTRk2u**_P6C#m=?){N>=&TjZ!~61j*;F7F3icy+ z4cpKI^>BvOgu^@Tw!>KVQE;0+4wZvrc?h*e54uTEhj7HnebjVHGWnyM0gJxAdrh1rdl`tpSmK#(yM#~pB7YxiGkqxm~&~?pFO{W4EF-eCBEuMZY6vsfak`5A5Q#M$w1Mn%7V&hEjfH? zDuOXRkB|%5DeBo5q%dGXH0>wE@AID+tcG=C&bQ=hzgb%$wHTRE5ty$RDkNR3{F%P? zd-6`Jf;J7_4o`4Sq<{1|d2?`<%I-SO2wICl<09_EuGy=mXUgH`Fx1f%JSQvdQpug; zIG>3#<|~~QEYU%eJkHi4SG;>M)7#tF9D65~DbD@6OwM6{$IfJrZ_D9pWg;IoJstIv zs8upoK(wQh9=My~@woXu>1k=9is`OTX0;wrhr}r0d7d*_aT|Nz1K-H#0R_~3mIU^h zYZHx|3;FB4km2r@%EwA&tn*w#rE2mv_I)m9{(581=n32v{uim9=YzRE_*qZDId^WY zgmp}pu&7t~99~nvoL-k`9aEBGSDd%b_CMDx54n7GD?Po(q5tXBR)|<3W`l*8zn+9z zuAvg9bp`vy6RY9a74+I(MAlTf1P*U1t%}qeMl@1Un_ytcmRb9|Uy4Ak)T1{vW!lf8!owTg1Re5$vIOah)!>>SpR5SrOPZ2 z8s*)S#e0thr-}&nV<7`#Jm>P0f$xr!Nhjr5n86aE^Ui zhp3yjqR#)uK7Q+A)N*d>!Yl3{VB*^y+<=J_S@a4Cq#d8e_41O#hz`sLojPB5x9v84 z`lb)$%-Ri)eoW(?{yEX_CDT~Sj#65;sf3wqM(*l9^c}m|zzDB$a;X{fLNZ6glI8jA z^Caw@Cl8~yaj!K~!xLs0qNmoXmYgnqPD=*>>1ry5#Y?5^%7kXVZ5Qr_({S&flEYr` z=pTI7B;3sZM4tRDp=7{LQL&eUXz56R!C`WC6+L(kOa@;}Mps4oucV*xq8 zmauVUx^OXgt17Jq`^s=Rg#M62eD^eRySxaBN>32$r%S=r2cYCR=9jL-S;>ZU2tA1B z<|!E%;vTkY^%1fw7P*Gz$yEs<8sN1=4)$wmM2|aB8@juR3_TwTBJBn`bvpV6C#G>+ ze+96quc=fw0(K78RHp^X;PJeL-2YidGhCLzgi;A~VYPhPLj@RiU~bZL2}^RY6(nv! z9gABC`)zDb!itohOWS4a=+Mt3cQgGQ>-&2jFL3tRGqdJRqg1yar!6dz-8-$tcYjpS z?GI($y-$`EoeG9pOAY>v+agY+_29FuO6Jnv)~m5@EpgvN#DK@d&DG@SkCrL%z~%cw z%QutJ&VhdB*LLg~>3s@E{AJjK^0Jqxq5d#sFVlT}^F_*uvCa`zcKpA-?3AHmkQFW= z{5WP-9C8oN`F#x(T_vsL1Y3;XtH2!x7vU>an$dn|2dH$9rW0RrC+mdg)PhDwO=IgUaPtu6)qwCKdVd;HoF1TDG z^>{%0{5%cxH+3}JezJyT&paZVo01RK#SYA>iatx$BE0zcs;uP!y)f#6(a`6chukFI zTNpRnSH3Xrvfx~+#W^x7*cj4D-?@~s_tNsg$}yWw?AVCe17%0ISRr<^D!{??J^UL> znugvbk{r^bKfX@cY?QST^nkoJ4GO{0^(S{Cek|V6nP;riz{RlM7&iR{f8Az>uI^*8 zP-IJU!A^5Ax6KJ@H@ivf*4SM6)RSuHlw>E(91??7BbE5yL&`9|>ccvBR$)y^l)>VC zB93i2OfVLa*0DH9&+BSM9;UzvCYGqX6DHH!3bM^ijXi$_!nPpl-s&)ybexp+MYzJt z;69k5?#9n)S9NEY)|JI|XC+kp@h%9iwbEiSMNU8CgFKZgLwf&#Uji7q}*4&3q*m(mU<7 zGL1FZHJqF8EaF#}+2Xg=IZXc66%TZ*X1$ZByO)0NpE-Su3wFmclTVF>c(XJ7kDUtB zyS`vHkL}_A**KOp3T(CLF|wZ6gD=M0O5Cm(TmDhs2RE$Ol1@hHUjnbYGq#aektrUe!F@MqI-{>-O?SVYcAtUBc&G zvxZ6SnzQzaDSSu+CA^-wfT_u=@<3$<*{vvpJ(;+w3kqzan9HXOoC?RRR3Mo{Sw8h@ zQI~R#6|AvG?d~|i_KqAnWznA8u0kk2PPuCXhzoV3Uc(*aO*=M{*%Q}S@$?(dDOSL# z8&*=`x0?D5X%-Eq&Unh+x#}0Jul%IJkUj0#!)rZY^9?QhB)niy`EH)*1n|ma1e4@i z@qPEI*#m}1wDzKw=6+HgwW5BtT_p*OC_MrJM zx7Sy8YZURn+w!oPOCHRJH4d5yXP9XJERbPC2qmj z)3ucQ9t($7s_dq&nfpbghNnnXr2mtZl-nMT+5#Iwi7EWEHPwX6y%<6!OgSV zvz%jYk`A8F*nbn%Fr%{uRz%05Z2W!wO`1X1N028$znGU2FL(O>lk&}vwQ&F7b6#^Y zm6d6WC0?~(_}wWzGikm`_GzJStrBXMQ)XW+X)IGoPh~-S z{4@nDrCGRZ%rT+oAC-8G5r-Y3g}f8(sTcDf>wMS9PSKoQBcBD6eL7+5m<}+fp`Y+~ zcp+GC(=fiC-c#43Y@&G|;UaW_-Lq+*@9l*}UYR(}ZxggUSHoWHjOXu$5k50$6@G|Q z!+}sO{&`%*?BXvOIo-(%dtT>a-?3ofk_NV#?cmhLz*Eneipn4P`o0zwOl(stEZ?LB z=huYi`{uz9Iv*=*UQu=-d1?pcf!8-d);91x_jp8I7~3BkKMXQAyicSqmOsg)bIXH? z)^*IevPQhzday9NlR}={U4_--sQb@PCnk@{A_ebZA9uLw(8;i`g5+P(0de9x{oRXD&t59Up$Gw8wz?z~%z&7z|~vbilB8(qokUCn7O zHG{=Bo!Fb2V|)bcke@ds zOUVWvRvuKCkN8~6Z*{+o zAB|5x9^wH%OOGeao65^`TJTfb^0E27g>r{=gxPnXe${4`%-4N@@Vlfdd%KOi?RMJ* z_rfOF`}7`BzP-6rA5p+AZIbI;<~0>(?n)Fcb@(Jtp529;2Ajc;@SNdJeBpnq~!m7&|q{A z%A?SaqMkb~jLSHtKlr6YZZo2Xv98DrJ2uEha~Bo2nXr(o@^o*Dxhu?FMqJv|6YNl> z38>aQWA#5<8+*pzk_|t;3(ijcP+BA?aaZDcaPW5L+DE~>72$gxgXRmCE*ZS0;|%`v z%39e`9d#oUclU0Rg9uyHc)^YQ`^hStmZXBP0lES0rc&=mW)=p~-ke9c=CEm&?81Ws zvO3~b?o25Fb(c?NOM(eE4bxzE+ukg>XRP>XO$>zZrsp=#RJ=NVZ<+P>4u;|sD17=Ug^cvR#w*Url70_7gd1(a5Iyan zasJmtFpP7;`SdJC5mv9Avs~Wqa2&=SRe{De1w2l!!w%$ENU6v}eMo11G3^_>c7H#H z_vyv&F8t0O5w@6)`#KX(|s z{W}w! zA-?{}j^XlQp-INnr0cwv?p#Ad6!3Mp3gmn|4t>)>m{D3LKC>FCzrIngU#V%!j(vSC z|F4#^BIq1DkxDpmT@$R_26(M{vN4C&`l`R3^t&W0(amX#{O$vHX7XP&-?@$JH;NL& zI+{DH>V6t)o1SJ>!IZ^Os)VeT1?aiEkF5PwJIKF6d=qKjY^u*1+Q+p{Bl+f^`3TMDRG%O5;Z$&fE?xPhpiA=-F>8Ffu zXnt$;R)GgYC`+n{JQ~FXVy??3Fas@|Z82C_V&jT|^fy?|QbMTBN3qN2Qn`ZWq7M62 z5c~7AxR~CxfYro@_-m=Zd70$m*L&^+!f!v?g^m(-VkKd*ww&F$EWLJ#ou)xi5rKAt{bB<%9DeW)ToCOUm#$t!kY zOkGpTVpd!o^5v$IfBUo}Om3`!dBNS}9jJ5Y{EaGc;EG1Lde#s5GQx!>2dkm(L5(Dk?u26k z2)~qX5mqni%@+`lAKzb%UR`3?hJH}~E$Wx#`|R%gV!ak$5x3lJDfQ}-AG)G;m7u2Y*e2;;B*~@?VArP7*gm0j`!o3zP|}Cnb(i`yzd3)y;V43oDzA* zJX}-tjvf2hNT28SpTVL!o~`|^#$Gfd{;k-;I@#wKm#(@!aM{KcpbFZhUw?cIH2C{V zVpZ?N+86#UOYVHo&}&tiw0%I4Jh+#oIDQ!Qnyl2|=?d}!hItr94p0E=n~znhpDb%# z5cF9y(3rn>IV!G_R=h3pgT!ZP=)51(m%Jh0$%C+Yl+-L^G%jt@&6s+x2P_(UQs<(<2E9so-l}KJ zbY&xcCn6B;Iz@}SHg3V2ls)*Wg$geJ=!);GB*v1f3iup0hDv`9zT(>PtfQKQAK{msgkCOlw=ehv}pV##;sy4R39J(&Mgl0QDZA@;W? z{F`|EwJB|71vjJEa|K=t3V{upC64v&K zEoj%#y>ev~^IUDN-~RT5Fmmh$h&VO`PR3|)m}dxlRGEki>#b$&x>A4AJ{2xC9@HI9 zc7^QKw04|i@-|kcTsKI9<$ZQxpPLGZ_m0$?9|XZNX*cWFb{r&NfBoD28tPiyEqHuO z<{rC?`QEEDu?_8kzb)FrLt6W*U%JXi-MUBjD8f*W-xrPR^}2VayZF)#hsBxwPl@qy z8{ndr3nK>jb@ z{AuDRq`rDhZ)1X{gfH6Ko^`oF-7LQ3JFPWH#|;0U&&5>LAjZSD0bk( zwU+wCU#X}Zy#Z?~y>X9h26q^~1+?x0`!>c(Y~n(g=h*FhZ+b7Wa4Q@7L<@brhxp!j;?pW4Us5snT zF8*?1?%gKKR%=z**F_D>-)BlEg)sT00})u$sy8<2IA5r)+0A>L`o-|YIF^??5^uE+ zg5mE%1xJruL9&sulRm64{`*Uwo#==B_w-R>R?HrD?DSu*B+OC$vAfV#`GcG1EXL4p z3ap%Cs$22mpYFJK9(3!m5tnDEuu%H98x{98h9;VwA4KgFtCe_hQfE z|Gz8m=qN;5klrNbrX)^xm2Vv@<6{%3Yr$zb`?~lRJ32pz7krp3uYA@AuKD#dq{Tkq zbLcF3b^0?uxi$|6)y=>v+g33866uUvpuE$XbX>mrqHsSN_-vceXtgbkf6Aha_>w>3 z+3gc_$NeZ1Z0QzZ)dI@F5O(5^!)a)fqk!WQE!xC-8&A@F(ygDX{Dbpw{8x9!@H=)C z?jjDy%d|86o1%i@31Qg&gfraI*%Vw0`;Y-ExgX9Vx_T3%Hz zEJz;AC%Y0~P5oPsR-4JfOqJ*zo5a5L9)PgHlCKqqvB?!ozBe}yKmJIRzWSoT=T@x^ z(Pm#*-7*cd7_Sl$w8Brvfuim*vJOfgSd-rh-Z)-oGW~8?Q$GxbY*PwX@~t2?UWKFS zPWGGpA+v6u7vJ3=+-hfv;6m8kYp`TuIN__u+K?vfDepImo-g6s8`2e^+8xAS>-)gw zR~ZKVjKOTs^l)Lr)Ou!i(3}VOwV`LXfkzB^!#*A!%aco*l`q$E=`l{(C(-R;UYs>s zzp9*_nV>YxsmMm#?xcs0vtm=-qso&dYOK2GCdsDnWxq*(vFyMt_Igws`NwzP4Y?B= zVfMTn9x#aXEH)X2jx^Z5&d4Mo%+AtiY@t3=26SijIqtAYyZTyv4 zBi_gy4y8{^SQQ)2zqqyHZqlLR?qmit1GYidSMrwLJjN~OD&V6&nVAG{!tfK1z(hDD6mDGZ*_C4+gw2n49&`PfzHHZyK%VN2ovUYFHP;H=X?m6$fG zIjs4!3AVl+2W!SC@#*4p;me@zVmoJr-v4qR-mssAy!j_3T;WlMPxofa^us9YaYb7r zTsD=8n%?-nM^A9we?sc-^n=|>PS>T+y~d5i}E`L}zr0o3(Zwza2hR1~D zVH>yWEO}=d4hYq{2dFXjY7%zWMwT@VualenQ1BniH)AR3 zVSFk;R>Ju*HQEgtgN{ck zW!=ti7VllVEgM^&D_mBj8d^Hl5&S(I=dRp<|JCK`zxD0M&0I|MS1%l6ZJXPn>7>1U zO|~nZe&YjGJypmZwD6~#3rAYNW-G*B?9h~9UTYd}(~p_sf(v@u!odn_@yFAn?EJ8a z@O1Vb9&0+3x)k+1_u~z=^?V9!-=>C56XxTJ%v|yP5DV_3dL)c)`G85k48aW{69m6S z#msWYGuF@i)WBzJ?o&>7Bi7-yvpzN9y72C{0;)-WHK}hu{iON>yvxxOI-A77g8NPKxt(lk>tMcDk-)uM(E0qIFBAtP$UCpxN}3xj z6fJEBs+}r0ur>~+U;Hc0TRj|J{q2goUslNmRLsHw?HYpJ_#ElFhY~n{k>1B~#2=36 z%@>n)J9WefQBtWu=bu~I?=zGo-+3?#J52f4>ot6(q;L7bKMJ9uvQlVB-15~Q-Q?ln zf!vfZg2L8n_%dUf7(H>Q@d{za1GbT7I!=ZE`4xGlE%?NYgxBlcR~psl+JIG=7Hl@I zkZ;>TIZ`Ecd^6!}+M#qm{Ou&P7}LGnVwwUst+&Fc?OF`#IZU4Z$_ZzDYQoMDrsw9m zhP8dMr`#pl$g~bB_|xY*OKyj}UTulnnnqyRW6}WZT&&wn8fzqPMtrM@f_Lly`1550 z_kT;ejz)uFbWb&oP*AT%@HF(TSq*&)QhDIcXlVcI4$D`pl>mqFa*~ng^tyzPydwU7I8SvgWKr^(J{4F-|Kv_Y*fu^{S3DNeclWWTzWPZ=K9qLm-nTz zT_?ylX?n=0@P5Q^j|s%w++Nb*YZgLaf(sZQyk|QIv)SvH2bU@1_<9qDmURi3NO!=m z@{SOj*o>JGuU!8(MLM=q8{_0U4_*D_N$@K8mb~g_E_SZ@z}q;jMeV_^NP6ZPj{~Nw#9`m&xM1}>*SxWnag53Z@_|^TLdet61G}N_=V(H zNt#Qstfb=iK=E1>Zhdi!ZT3GaEXcHEL3$-BG)dClzioMGUnT0WO1??2X8N#?JlT)D zd%u+E-nw2|8D1|ec|*M<&wSVoy9JQ9IFJwbO6RkB=V8>S8*I|k>wJz>3x|3V7T2CU zH$f|T_LVdGni!eTD`p&jwkj61p1!a=^{MRm7|P20MT_@(%)Y zC*Am+4b9=m)5cI+-Od~7<yF~7F`f7>-?s4Wjs}AL)zHDQ6(1(e z2i?10{6cg`c>N*@Mm?f4Zr1<2!6qWBUS6p-V6mv&&yfSMUOjbM zoRg!4tL>w)=s+TKJm4gpIG)Vc#%_QYhqJJeFE{QWz1O1^CX^F13>N!Of#)x&^QSjy zh9~4m$IVpW^%xC%SFoDza?nfH-Kt?psBKO)&5<1Hb0PMutSZ9FjVPN<_Ug#xDsd=M@ZD+ld2<^@Sb z!lQ8Byg~0=@xjft>_U8d!TQBeh_%@-gZMmfOo@ft_RY9DXEqkDJuFOBv=INg zl89PrSn@-jqD+XW^IRl;&F{Ag*|+cP zTx%Ep^i*f|TUX4IV@lbxjeFs@^&zR{8$IJmm4=SG?yQt%b?0otoX2S3(>FCPUP{mU z{s* zKc5+j;IR0_lQ=HYUX`w=-h=_M^6>PdtTs|EEIqkO{J1njbg3Q%9&a9tCF{+F&q^EP znU1+w=zCI}*VRYoRJvEXus~71f9n!RdU{&CZP69)E5>2g+$Sugow;t~({wSrbu;j{ zw`5=ZiJx8*rE5ErItEF{+`92v^h~W3on(gEdmY+Ez4R!s~`Et+e<_izUkSKSldoKxd8{RZL8QtDgDzhVp<>kTe8DPrkiD?F6F z#gOEOg;r1J%8Q7%UfH$C*!qpDq1#Iz!Mwi;nopi444ssRrtOFai1UYKmLXzi z(gW=BqCB{cYOpcw%U5M|<;#=2SoB3ln6NAf@?urken1E~jvN9X`r8XN2BmCAlROyE za;rS!wFC#}PzU7}E;Kph&n6{T^7Ch8>}H}3G~Rli@1&&v)6$s`NoUj5ah=6Gq14Ir z%26!ot;NJ+V>1pe4k-*~!{%TIAK;BwqLO^i+1V6ZNl-p1?=P9Ax6wt;V9w zkBq-ctpF^-aeE)qdv$C8HXXZw`FCHvZ=5yn{I{W@H}3-L8e~BDOD&3PbHL#h`Em~L zfh`v4c#7}RDK{!$)cM8w$>D@u-TW-nN9DuHp(%Lrrve)VFfqpZ6MwqkF>lm(ySS0& zt^E}WO#9nddiQv;^v{C%yfj#^?-U&*{_>zKlntTK|3S7S%Akb^+i+}9sAiYdy>Z{b z4LDs!-L?muq><`uDzJlb-Ed1a*Pxq^HbJ4W9VeSAzb`KsT)~e>hQOtYpW>xA z-q8Q@MWF+#@R@|NhImJVF7_99nCpm=>KJ$@k+9bV0bsNV#IPPIJT+}3T(pXY3oa_` z*L@kEOdS03kn3_qY{zozLlHvh;Jw0YlSaC%_(A;H&xts^>%sCCLmN}~azo7OYoYtz z(g_0`+=OwXev5mvsSi{`xdX&sw{$tg^Cqg`_{1HQWl+!Jo+PjZc3B4F?ZwdNL#dcG zBnkdmlV|JaIDV*HVSrERGEIf6Y`5uMcDhR%1dq2Xef)4I{Cay_X5%!9bvpi02&Ei8 z?+$8VLW>E^&{x6L9=SaEOb)-UHkSuwY+&`viJw0CiQnJZLf9836>d&Qmt601M(&p1 zSLjK+_mXv6jMBIXSo~BCjxR|c`iVS#gKw2}+Y-+wE!hXtQ(C|l=*SNgd1A=R*G#^y z8Tgsa5dLab^&`vKfzQAn;R1Q~B`P zP0UNB!KjMI`g2YGGRK@nT$uh@J*c_S_e25>jpXh?eis?CiHbtk!cZ3+n z#$wg^-J*Zi1D-N25#L{-p0I_!LU;X8S>@DOxUG+#{aN!|`Z-PuPnu8$JL&5;%~QbH zrAqYBZDH(cKAR*Q5_SnI;nIFbC=NW%h8)ip-&(67{kQ^??pc63y$LjEb(;s+4d!!a zIvA?$*1(tN9a;Ihaq@`L@4UH;y0(a?DRwo;&%KOfFCCYNZWZ&fu4kNacj8x>=5b55 z{BjHMGcDlGUkBj@HF>{66NDR2{Fr{SQXGFS23{OVDP6w$9aGgA<)@E);*yaYq_aCZ zi56ogNJqT)&{u@2pw}%mwz2BW8f;$&tNpjiuG&t8CyS5rNg5>vI|t(51*RN+w1O$? zf5`?-3NY!9`oXO&S6aCv{?Q(UdZ-LnE-vG z1#IK79mmpVk+&GdJ_LBNcxQ8jJ>7Aiz88!b5+PWhBmcx+U&+t#v9PNu6kZ;4H;A0PZKw}6sQCXhm!su(GHR z78kOEhb@>3?$c{TZH>HNwJWXYXE=raWHAyX>^`5n9C$f~pF>KAh9LT%#mrZoq$80{v8cq({!Vl+I zutp2r3{xh&lpk)oo7XmdRW|<+X@e3}xLDpv7usEc{<{@;+3%ucC+WfW?fS*KCH^$V zj65pu__d=r<*ko+wfK+TyWJvVm-nObjnfp*wz?kp>ji0skCzG=Id{b;AJ)OfgU_?f!S3Kl=HWSsKXd%RBX{(HGnQR&U~*eLI3j|D|6a*Gnk&VQ{#9Zx zTJN>)mw2=7ls99cz*dcQ;z?H_s&VwK-4H&3vP|#M zoT;@&|1tlJ;`g3>lB*ggc1V;#XLIqkgLPTsJ*#-{>`lgknCC)$vt}%{;;}4W=m8tH z_W-|_N#b_3j?141@*={kde&>?xj9xip1hRyIo`t71*I(chycpvU5%}~l1^>lI#gzF zVZ%v#-8*HrIB-%|NH00gZp1c`yJZK!;od@0tZ3N+SB6;bk?ijbRgWP0LJM5Gb36uM@lD_!S#nAtlFQyNt zOuEEa^&J|ub{0bTLZm2MXL@R3LLbC@S)A85@Y+wXu^8%OceFYNG2#9pTKdCYp; zw~|~p|0~WN6D1!x={0*s9JRy!48xy~%P{9o4k%i;LtB5fyn0p^q}*7EU&^zg#?yzt z3N~Xi{2#H^=iSh&*AQ8sm62d}#1m8A1?ro9?kt&Rc3ARexR$Sd)rR-F)Q52Scr^P) z9?CDz1edA5x$XTTX8kb_XAYYsJX-iem(c!+IPu98NI%_AeAtw9i_HrVH=Y&&c{#&k}(LXrxlCy%ENG3_c(Yk z^McpQ^Lu2Q^?CU9S2}mRO2~JHk@s;oWryxRk#)EFrcXVzo!$G~n7VG(8~<&q{b4t-=0o4f*>CiTrke0{c7q;e=`hT&4ei z#yKTlMO=Es@&fs@BNrK?9^%Rg`-MKKmHg}3j)oJR$5STB7ja^<-NtY4yBVFHTSND^ zz~>yBkJfu5q;bPfi9RL)h9Pg;$PQc4eJZmn_MN#6Zo3dZ3HSJxy7u_)`wo1&*PZi) z+rTtqCno$>Lgm!)viSY?xp(0}&^db-p55e*D4l<&UKTG^7$*q6ED%*SA!$&QYTscbNP*~ zX}H;TEI%4LTW(b`kk66$;h1lJu=;3QA&Z{V0P<`knJV#3m&c;7Q$3%opX((boCmFb zs?d=%W1FuRVs=A=XZ7pz?7f8?`fXIhl_#Y0xv0Q)?RLui2P87PwK?b>Nj>Q!M?r*l zI6gMn#cx<{rXymt+PsmEh%c4bXeTv($0&tpEbUqDxci2o8Y}&0-5vFFxHnDCmvqI#y2?2|LnOT4m{^8D`)etQ=3@y zJ2e&mmP)zl1vOd}$oSlT-XKY-Ec-X}D?dYdMCr2<#e+Qxuqcn7-Ik_s?`po_Fsm64 z*&4|^ZVnTgNz{0{u(d37DRE<~Qdr5E*3e-v@nu*9z>3j86@*hvwpRd zvuKhiq`poNU*DtIWbR_zU>Xg>ePza4^Z$E&RG3Ry8G#N;-1|rc%S|uJUyVzJ5A}LU z<8K=D{!AS(3)Tr!$3_ZsKT#gd(j)xLekqT*K=+FqO8Du$43G3v!?3pJB~IR2EWSw{ z(!KiOWYSbjJEdZUcM6O($pvuR=`(xRq=PQP<1Z_VC!c6TEk13phQVvbk>4#JivLF8 zjhY+xtembeauXbKJj`y8{KQ|&$1;3lbjWjjm3etg3CXL*yrU|m}$mI}w5f7J|G6i`KUO@^Ahc%z4lQ_g&k^KIoLVop`d{Yn$?! zoBJ_)>&?8td$zp0H)+lD$HN|_6UHPrgJJ*ALXkZ&hJH9JR(P&v&+o)wklO+{)xH41C~?Fe_Z@ z_l5Ek$)iSk^|KqubLy{zua8GVQhzn92ygD$GhK;xv!aDBjyic}Njv7HPv*s=6*x_< z!hPyn%%PJ7b1YGS4{_YfeYJQ#>l*8QIt(MbHANv}GIZC3;WbAs^m<4h;=Y##T5jGY zt{OX;`FU#beH+5K3KQU266Lv=$WSEBO!pDw<-ad*w;jQR(;JNsva@(m7HML~yR#aj zi8z3C!|t|9yw>i37f)EP&rMwes_&FJUflt%2gc*NDGIb(lLu0jnf~MjXIywF4t?5} z>eJmCz|Ga)JN_ne}!jLx}e~N#)8|FkGihuF;E879odb1y$usy1wHB zmKDH_VVRQhEyPPeb2#uX+;Bg?8(hq20L^_=1JlapL7=GyG&veWaZ~b3C{^$_a0v{3 z@j;jx@>?#ukjRJ98XiU(g4JQ>(oFL1tPRhDGhOpAa+A67A!SbZU0UcFbc*~>{~ADH zp%(pKc94mGFEIBJs|9)I0$5}|2F3Dn7IXZS;f+xZ&ZqXXoYpD$`_3wu3X$Wggyf@Z`}&S&uDJ_BAnytqSPMKaw7!eb-q2 z{)+~4)k*NNC=VADMF_1_$taSRRo`?s#QCS1(4eookEUWM@JUp^acL0Zj&qj^7_ z1lG;jtvl%)EtoI0fFq^-p;cEko+V9T8;5Sf-)a>ur=HNT1x4)AAYa3YNU7jK@AGQ+ zTWr$Jb&?4)1G&zlm(j(wS{P!V2W~3|z!BX@TuXCV&UGaoS<@Z5JbY>NuBE($txE8x z``dYqJKS!R#ugoP;JOS)?$Gi*5B@M5rOBqk=cw(nbNxjgX}rUG-nEikFwFy7U1#0? zHZwucQoj4MA$on+Qt1rxAssq=ldsRUX8qqVywW2YcefhALLw}o_b$q%^xq^FxN6Y( zC3WHLn99eEN5Q{fDZC}@rbBF&V18fDu8}{W);5#)woc4$BK4@1KV{?M>3ff+uE%RV z1rM2)*DY7Vyqa@-KKZH_t0+%}G?EdMB%wz7H~l;yGoLW4m}VGtsNCQ+B&W=yf-*~< z#j;G&`jniHXOCE(%*kRI?3$?oNv}cBB$)D*=^i?K8Rbb^X~kQU&a=a`rWK_H{8O?P z9=)Wj;)WiqdoSw97^sD;LxaV=CDeQKXsfbJEP12b0I1 zv`~{jQ^(q04Kx_ML+18!rI^&1d~O{BC3d^%Y_BGba{Fz@dldp3Q57%l-lYQTy-Tr& zqY~7nIv&)n6HGs+hBl=2$XXl=7UC-wukR>4+^ILVOZFAa-IP$!g*>CxFZh2&hsC(L z`QW7o{=J~Dm$9z`Ugzx>+cc#7?2v{mec)p*uWw@fxTsWr--7US!h-tF?9bAdR`Zmz zAEfV=y%&M*0R0~<#wmxp$~I8`+OUk*a*v~yP=p%PHebbF+mD3_Thq}wayu9?7_Qt> z;+3m$cw@RN{Ak-5TmR9(5;2p1nXANBgr}G8NMl`FM`B7umHcLdJUD!;J8oRm4>q2l z?DUZeF#pg3h79k83!2UKa;W+%EE=hW1zBCi*bOur-Q3C*y@nWn(K}gEw-!cEqdx2N zq>-~9k6~FKjXSFHF{P^t6GudXUz{2aQXa{pgS7vBZ6N=1Vlv;Lre1)5BUyoy8YX6( zB^27gI4X5iIl$)dhv^V@u&s-(jn4Z?+ucE@hm|9jniQ< zomtHc!`(N$LZZ7t?D7VK>&1ch z61PLe#+H(*@Jh-3u9W$$2@^eTQBHoyESy1l(H3<@Y~CR9AJu;?UsEv|?(FoCby_(| zi2ZDfH>N4Mm;VFf$6@i*4N!oY*9RNcug=B3b@V$I6&v?HwP7Dx%rRQlipF>|B__SQ zB%9uP6+hV}$C%o}2ai8I<8Z{1?7eyA{N8aCw9X2JgL=}O=FaX!{$M*CU8_ac2n}V)zR)>un!sc~DIerCB4G%JUH=n7PeWUfq3#6 z-68&0SUCz7c2h&E8Cv-CBMJJQq<<$w!R5~ixY$w)iwOVxK4&I8tC2y!3tD_gdw!Va zmk{`_B~R@~SO=}y7XBsdOjsA)B$FS)#k0g)?4UjJ)+4ql^%<)0WyAmuS zHo>0>qd=k#7c+Ng@!kx3)`IrwMB-!i6Q?{(y&U%a1LIs;Lp9g5@R;y`)i2NSV9Ijb zTQbqG;dTn1Tq*KDJxnm}tr8pA<-ydRAne|)#I6P zn!xp8B#9^FK7ZH{L*53Q9bf7|61|lBf84Cpgl$9sZn> zhb9#oj2KBdn+Z9FHT!$XG?BlAMhCvKskWpmXx~BBY>^iG9(l({+a|~t_aKk+K3B}Q zs^@j{F3MMpru&1d7XRb4U$vUZttrD`GkvZ*ei}TJN%?rseo9mc!+6}eDDXShU-I}^ z6R(zElmI**=6d&#-6`{s4flOr~;ia=N4s}t@f;(>Df9E4x?d>h>J=9*FJJ|;sa_Tm?c~}^K!J+)tt9TS|rOqG6g3Tkgt0Od6#mO5FXN0 zvgb?yN=nzrrA1N3{6fm`jdhVd`lv$XDhn~^|Nepd8ff?Qq)>HQ4Hh=tg{vtltb08l zv~She$tjQWw9MfAr#-l#qZ+Tx%VIyn?D2H{Sl-}?8DHIv`ZR15LiDYP^4_B;r!Le^ zZygcMT6EBYPG%`8zB`tCk|!Pute#*{^v@k2koU@wdC_korQSKczMoSh|p`}$Q= z%(G}^>~z+??8f4*=tum<%1v6ZB7IG__p7;W%prNQ1O5C%3HbG=rSOB+V9B}_U_-~I1D;FFc(ipPh)i~fxT-n57w%m4t3Nx)- z;cxpiu$z`FOl)>fKJaV-?;nzfJZGS>JSYs6Yt-cTSHls75~ujI#LS6>kWoKTcrb0H zakAY$>9VnT{O?NAP39``lQWivyW2+Mi+#z=`QbpB{n*v;cDn-77j=Zu zbRK_xJsNNBma=eL;1f?MS(n$-;7WP|B+a4sATe2JoV*3L#64gWT2t57U<;Nu-xSBE zDA8szVIzlf@Y-ye|5j+waAG;aS0yg8?558nW4!v57Pq?`HI9%y;va|RL4=7J zUQAEJm@qAQ34*Z5VmoqSKeUi0!e_6xA5ok4(^ky#VhY=-OXMljy$zgyt(A5yu~^zmP@l`n1OunV*+4P zW+c9_+J$WMKV$FUD9k#NhaLyc$aduf%4_M}&?kiBf?=KEzFBE`&8c;)*LBJby1SG| z6a~PJy)>_yP!6T587^-5hOLPl53gJ(x0CR)(+2wcjxFSK7g6SC$pP`yyzxwE(+hHg zB4N976P}(-`P{ijsjo9i@LYPuIAa%e1+}d+v@#szl^69aWXS>98Tza}>Yf_+HzNJw zh~aRgFbg~y2jZph4Z;$d!OpJwWT?^S!M`$QfGwWg_n&8*7wOX@>nLU>itlhp=!@Jj0%7;H}{~lGF52(i@F3}L_n99%b4vu;*~c{Y{A)*^xiLG10>U^-Z(oz87En*9^&0ZfSwV|IO`us{nlme?!-dw$0p~uj^pl1a zOi2e#qd_pnq+B@BkG!UX8p(6=oyF>N#ObA}pwh4hiyBM;*|uWgqTe$SPSRPHlMk;x z41w-Q5xgjI{Uq{ADdaJGToS8s&r+ zDvI6pJsW?}DeKi7S(L$J_p+Vazkqc2TC?&5v4`Gmf(dC&{1IZ5X%z zPfM92^+L6j`i&rinNK4uK2`&+;g{rhub(d;g|2++en}BzrqBcPWR(M!|XA7UOxY~>yBVz)eqW+bjCiWBY4_@o-Civ z81inxvg7_N>dzVDj6hdXY9<>WB(C*3qyFKUcAY#|M!Hv?Q9_HM)NS@8o(=iYO;K{9J4YeLP;^PC&m*Ft1T z$eN-g$@<;jf8YCE?{&S?%=4Uc-@jG3xYiSHun4o|qr_ghDa4a#bTFCy)ZXh=GhS1s`&$BAbi0dtb>4@L{+SrG5 zdxRb4xJL@(WDx5j2csMr-CHbXFS@EFvzKAdjIR%EvWi6=!CTT4?g~%uE6BK@_i8uX zQQM>nxa}{eH{$OwN6USXFyD;M<|n|eZPDzKA9e_}M1l6qRZ`+CBZ@jt`VD8jzBfFC zL-qG+D0no#3ki(CKCXv3 zbSP?DJ1f!CI=wyL*3pel`qfcLDoEnf|)0xAw>3`?j(;&=;)BZ4LQT>(l zY`+QGXqgf`3um!&+&1jly%@Qg84(je3i{py(pg&p^2F|%1vPpa2?Qgvo3W}nO9(ru|=V}#k%_&km6xWWB_Y^~Vpb#L}B zd6YP%(`+I2g#ymhtDqrg7q7L%3RP3 zH4QTSDQ^07mrUN*LViAdEv*nvp)_9g(VdJM&I|``>j1yh$ldF>_vX6 zS_=K9Pa%Q7tGGtIlQ#G}L&7^tGOZ8x>m9ztOtb|wbXG8HP3uJD9aQvTxRQph7yt?K z08anHCq6TCo7O*3L3<(J&;xOfFQvWUe5?{S)Dpqx#Cl<>8fS|RzuaH`H5A~=9!bPX zIaQzrLwr)qIopqd;Tcls?s`<{mHieqKySq(Y^0P)s`#(~oW-$^ZAdL@m?B@Zrb-R- z9(;%_PBJFexKD+JVE*Db8-A=q2T4bBx@|Z(FV!Mkmcf8=mCkIV+yl}nnLD`zgoSO<7s4H0udKa3*zDtrS2ZnG=K4#a&tz% z<3U3<0MCukE&0@2+(0aUY=Nz*2~cQ`xq$~FQOh%djxR}M^N<5b_gTiuJMN;@-!V&Z zZfPmJ9nN55G~exLm3sgBWLE1kkPASa+>Ng(+V|IuJc^HvrJrLCXpNfEB(n~$pnALe zXcwHuZusQe;*V=SGRbXD6jE!%g?Gme>4H2udaH_PzQsUf&~P?0K1p-Pyo6u8<+$Yb z-vQKOlDBx1D3h6s{KfkgpJ2acOR4Xj0;(VMxa5Ii4|fA`k+k-9eE-){FmAlc52{up zDu~&|N3ipE5NdyacZFU~z9i%8ac$G+pWKn+d~)1b35&;=ivOLF!G(|W`Kx3Qzth%? zc_Wr^edn)|14<7GIim!L;xl`&%LTGG-(U@OAyhFug`QY=SK|;A49^e9z#|)Td}}Mk ze;Uh3_isN+S4+f9bIsec`GI9gu#XtZZ+S^}CgX3p0yAxmc@o`D2^SWWcqZbkDSw3h z%Z569fAkjh`^ki-s}dx9^%Od@YByOhVS{82?ok5<9;Km4TVRjrMe*}!8OXZFlZKCS z7=sy}o++Qit;c2bRLmvj@m?b|OBa$wTMXd#h&-ATA%j;>hj=)ymNUXkF*;{yd=_Vj>pm>&MK5_A3!5ZZ?Cg|Ik}7S|xg4pwEWZ;4GN5pZyuvkqV8N z!x4zPv!ewTK2Lawr>Q2f^PWg&oyX=>W$rOz-Muny$CFR6e$^L@vQQ z&q+=Lx=j`x%$4D;HI$CR`+Dzf%wF(>kUo>PRKF6{_d7t0;uHDaI1``f z*&hbC$e`X@MN>1DvvUzvaOr(NZAh6EE(Ye&#lKasGW0O1F5O4!M`Di7$W@YR#kbPA ztrPg!?+~*w+YJsLL4yB_b@VghekO$*>C%zu6r>JNwj5{0H-qS=`-f@r$|IUrd)ClZ zgOlLVJ2`Br#N70Evp_E-2MpWn)KP8gNo>+GM*>DwaEYwM$ z(uCP3V+Tt?9lK$ug1A-Rh+QAdwtt@h8_weUf%8l22gKc1)Nm;=6QLcqpA5J+m%8th zfe!wTkH-&VX)~LJZWFUbAALS>p%z-PUqKMv5{20%B@=}F25*+MuE2BR$PWB@E&;|J zHKLE!q5g}U^mx86kNQ;R!ykz$?eWG(*m@lE@(borRqbkcJ0pv)^fsU|e^2wAjvN|1 z6cE{PmXr5V!7UATY}ozdyj~p^8=>~&tj0dZe8sLogQJg+2F72T!@q&DC$nPl*z$sg90AR#Vn6s3f}Ou5^gqkp~kwn z8*b5v+VyzK!WK|TmH$XE&5dJ2H(>{#Wu8b^j`}ed1>E(LQR|d_LSt=za)WEsR{xUH z`t$gGBd&ibl*3Lj8J$q9fN|)5zWFK&UTdP%kDW22FR%lIX6Dh-hj-YN_m<$6c!Yjm zngBKWn1z=qgUWO_;*r%u`*Wuc-P%{C-S*^>_QO>Le3U8an*Hneuy)Rn(?3#3_~cI= zC*{%I&o_c^Qk^9By$u9%o5camA)I_>lDNMzgLSIIoL20g%C+z!ol>OKt=kdqgQY&3 z(N|4OCU2k~MJjr+pn(r;Kz#2FW_*0{r*7$(-@(ag!nYJK3c~z%*QS#4ZP-JHyVk)j z`QU)M=YLn9JGE!1o;aaUJA)`flG&bBD;meeX@)Y~301 zOCyK|c2>aN&)Zp8e-(XOC#Nkq$5q*>FmGJOA3%KWshbO(fV1;Y}VS*t0}EyR|4jDpoTj=3bTM6urJuLr+9Xc zWb6sU-t{NAL$)embCVpRmmvm$-)mCI5Vq}>8w~vPS%Pxe}@?t4EOtUm39 z>2^8dh;6I2-Ea?);b)op6*J;I>bN18Uvd}q9HFDVv~5+5C1z-syQ?jlBXh$8mK0BtTqYu@7#pUKQc(k!>k}Y)3Uo` zM|4C-*zi6>d}LRFB|Uc${hP~VbbJKv z;^C&eqhCk*^qdMbHF2e3MKapEsWTmhngsDY3l=c|wM4I_)JnAihW8!Hg7EWp+PoID zVbkEOt_&tWR?<9o>@De-2gT7*+G^w%7dIcJeKRpPdG&7E*uR+kxZb9%f3%zLlXXYZ z|1kCua=DUEg>racj9IdJ6Zn@$M+?Jk^*H;9{?H%qe4S|I{3p(VVZFXGmvH2litr1qu!b?m25p{^P4u!-B3L&0TWg(cWO zuzCLoEw6n-75r1DWaqV)BPxmHHVRV~YDXN!Cd}di`KNn}5ntlDn93 zDgJphE*~}ivlKMj5zn)iT|i5v<8E9^@Obz)LGo||K3m8cciEUASRNS)D+@%LLd3p(m#<^) zr(O8q_A09Dp2VfKqVERtK`uYE)TUNq&h}pwofDy;9$%1W+GL|%?j0-Mnz=|azy&$G zV{#b0s@iqSEwk#Jzp!IK;{nW22TT(^isz`>fKkz(8m=+xxRy@lFaf!l&-GU=L3 zsMsD3(k46yF@J61Vp}!>wVVdC%^~B@0sg9<$w=w(OWHYQ5~x#RP80e%Cu1Jt0-wW7 zVu9R3$Rs-a|2v@hQEH?}CDu`=#Ko)9U^zL=SDGndp!ZL*RIid0Y(;*%Hk!{_e}(%w z4%ltsxt7s%$Eq&rv_3n_MQ!#HG{4T$l$)(x3E#iheC`x zmX9u(&-i#d<~OK9Jm(DFn+udsVe*dHAkXnYg#MIh>?A@zQsjFTEWu}1X@@MiGDXRj#N& z%zwTOZ6AYksc}ylVuBefNtk=JIgQNS(hbzOliTfos(nzegqQ0#2{XT7_XOU}k4Nu= zllpQR@)>8yd9yhIH4Mow->Q#=qrVh?|8hJl=1ruBMXKvok7i)ekCQyx!)7b zr&gB^(wgKUS}WluxkDC9X1Cpu6#A#}9gfZ?H}|+|!`-m|@<1M(D#G1pRuuhE@=`1; zkiv<8M)&EdPnmUQLJ=VWBQj2qDv`x$R;hvP&EyHIZvZ@F9}ZP*J(UUXnH z$LE1o}X<>I!ix~);5|EOVdD@djtErMoH-s$C+e5p3h$+I|$G2{2`C~ ziD<*YSoeo{8DwtzTw3NuSjYHB+{>I5p2fkaTkDWd9vc_HgQ?FV zhw(irx{Db3{Pk4#Kp1%uD}yY)-otJ*Y6(6T@dNuvVeUH20m0|8eZM4#nS}WVH~X_w zL0iGn1i8WSIBU<&qbZN^KE~`a`xDjd#sc&wTVq!<;?g-LFWIPD+3@n8EnSQ=MWXit z5IPjG9$v#>PaEckKbXl1Qiix)ay5kaA(LU|fBi|+11XJaBxFpvFU3-KI5SR3AFPOi z=f2oAG92|>8_u)h7aKhX*q`+L{PekJzi-H`;d#`@U=JPm`!*Mccis72s6Rx$YR20I zbdZ+}R^CRu;q?#pw+XY3yp@Cs+XD_Z?S7QeUg7YTZ4l^i#Ql zn%z%VKlnY32G^Cdy}d9i{kj6YR+N&OrFlZFz7I{bJx@-5#2LB&bpF4tb>xHJD*6Y1 zU%zQ`*q%6ve%pUk9CpPKaW64_(SNpfy73bdupfOg+z#&b63j3o+aNswvtwk*+?1o? zbnNm~;FbKWv>I{t?yE3I-bNSRPMHhuls#cG?yj?`QqtB7z3?9o3dgQs*Yj3;^;?D- zRtsJ3NjPTRXZTW={9OWWzvwtXwiZyicR)_DFP?~>St|NkZ?97q4bKfEC?I6G@Q zxr`Qn|EQ!nc``V-uaN3=2*SC|Rk+v?FDQ)i=tCYkB*Yu5`Yxv@@myJ5g1wpX*!QEx z&eqC8Vu0tLtP!($FAgLxFQPhOqXK5OppP0k-@T=mSamn#L3ZJcwRI~!fHU`96TAb$ z^671ySM~eP;CsIG;bw{U3fFy)!=#cUDlRX5`ohVjqEXF8BEDU9#J#mE1@~Z7kVK@3lAxw`+Z2@|ShA-<8vx zVXG8&jK};KVF7LP`o*=w{iM%*%nqi<;nX`*Qa>yY!q&(k9JLG+H+#CTztIk6`}XGg zzb|IfJpXaCP;0;J-eS7pmjkR>n@M)h&W8W$u-oi1_O*7vd*O{*yLjRS;)*-*=}CLY z(Kpp>YWwN*CO%)2M)o5Muj`At&;Kq-J2_ocYhgr3g}V@y^=-_DI!dzj5!)+NL1)oM zFkjJ*>Le|v&22%F3GTPpTe*@hI*vVur;7wKIbH1h0CyogTVEC-H=-OVXz!sn?olIl zI%EIMC(MdX`QiE7Zyx)IyT;-eH|!L`eBn7Q>_$86Krf4>BWo!fM!sQqGInDQA3};; zl2yPYLehhIrP^>4F$0{!Lf9spt&c zNBbW|ZKlmwY8!-IA8e^`AGHFBo{{vz)&ng4^+icLr3_NfVm`9`3^(~VewHx}+{3Zh z3_Y%h{bkY2;M{4}9lPS6-gKo=c;f|V<-DN)cnmUP)m9Tz)RehD@8fvgV>r7mW#S6%SvS(-=hHhpCWI-Vp0`pW2HQIIekH36~Baw@Nv!-;A+d_VY9+oKle z@sW0tQ+LC(ah{(@#w*lltiX)^=Y=49`qRT*T1k51tQO;|gm=)LG>^t!occ>#x2YrO z`Z(On^=FbP9o3q-&ygpYGMZM|ejsHa8!)|E~_t+t1e#CnawPVwI9Ht2`6%cAZiMTzV#|dW{KO^Ctv9esqf$oO z;XZD={1|^ebEfEFsJ>8+nwQnx2t~Th&*(jyu)$N4s zsA2P7kD9Hn1sZPdR-)G^qlax2?0rO6h)3+F<15r@`Y2F~f-_&0l+G)b!P5c}eO>rO z(_>>Ci3~Rf)=LH--vyDHuh{W$HCDVKZ6aOftxsH0hdF&CdKOTxF)zbNGaZz4sy%XS z8&Q+qr&4|T{cO0sqca&f5I@(tz2}Qw7pZsNK^?>X9x$j1=fLXUWXB%lM|bandU+mP z+k1p$4tQ4LeR`F4$6YxjPgBC=SnS-ZL_a)o8ufSwj-8``1w~QZdA}E&8SdN@+kznV z+90^~pPXiE64*uu1(SSBCo_;Qyj=Q5$eKvC!R_&Ro@1jfwckm9)r^4NI#HU`c`D8+ z-A9PI9YyaWPCaUfiauGJ%dQ_rPVsPOn1;_#NpI9Cd8z34|IY}3UW1tmTGD`BPf|Ht znfOVVfOo-w=eud`7ekLneSeT)4P&UE+L?3t&ll2c3~2r)DQp?$P0VlQQ=boMCHKeU zZmQpoe(s~7UK12_+^ODldqbsg_#!3CUe<|Bz50_tJtZ{!S1)*qRO%7i_NfE#Zcj4o z4R7tD$^84Nur5XlZMb9WV2*aNH{yvF*Tt^5x9UBw_b6YA93_5N(i?WH<_+p?O&s_@ z#5mx)6f?nW$>bi#Ns#Ru?%V_wtykfBB0+82SrwSKNNK?ALlB3Wtx&(){72kDEpO!0 za-5})GR$63Z09}kvp>9N4P#lNP-u_2*Z*PX%8u8Invg;Vxg#2R5uj+6h|+hOE27I$8zzze24kW_#EuhRk&?4#SRA}d{+G@!1$|( zSzgblevPPy+kt+~nCGN*jS{v5Ajcnrev(^q`n;x(`>qy!+AKe<{SdCA8azK2m>a^V z7(Xuf@dj8Y!M+gwEqA_JK^^DF=+dWQ!lS%@>{9YZh?~Eke|7mAyQVx$T6@SPZdoBv zaUXqw-H-55I9tTHDIqW&I}J~DB>98alD~u8!0=-pwHx-5>Eo_9VhHBP>>DVF4pagA z(ZuQ0IzvRaGS7^``LOgB&L*qO1?{PP`eKg^8cb#g`ZlS=u=6|3gLSw|SIOxC+iA2< z82*jAa<~(P9lSfeNH@cyf<-?i1jizlb>%V%(BgZJm|ES@k<4n33AM()(bt{j;A`Ao zEXDjHvP@1dvbUOH(S(a?L7w4b4`Ic#4ElU*!`$p_?tDD%4zD+edxv<@ zgE+e zrcPXf^$u!%Jd3CYuYuOt#w^5E4#R&x5K3lGCpT_NG(+ZLmI!)eWXoo;ApI16=5550 zALc>ZnCtwzX{Z5=Mcsj(h3DBR9%NI|EXMHJ^G}!wHm;Z@-4IAmY>}|1xWmjxT~F-< zSK55~2v}FUKy6)nNXt0M_N~Po&o>wfIwqA2TqL7?YG0G}yD%^LV-%S0lfu$JnBQ|t zN`GHVCKqpDPSN+??E0~TB%-D>a|u)l(?(z}{h3?Z?yP`mSLL(k?xCd*dfoQ)krM25 zkEHUx#caeRC4FOv{=TO+^mroP&E^JlO^b?N=sHC_QRfJBy0(paj9v#5PGbLn_dwF~ zPafxh=ec_Jb>iBRq{bO1;g`k-yQ~ zIbA>0zZ`qO&5cHH^yU+6x(;TV1S{dZUDuL5JN#+=!I2Wzw7a5*KO-S`cbt07eB>C{ z%4yr~-86rxt{8^Sg!m^)TKCNh4x06Vh8XNa%FcxFds5oyDW|jC&arI^@jY8P7YqYD zU?}3_-%V0vOn|sso?$ECl^Q!`@*qd}87+|$jJKhX?cGh??j~#<)3H%Xr^qUBHg{wBFREFotEVHKlZQBwRhD1n2BL>U>QG?;q+( zhGa+~m3{Z{Hpcf7^(@eA57)yK@VK}}@XT05I&Ztjbvj%k?Dm{4>9J}&tQmtmW?w%# zrd+1|@0Kkr%h*e{xnag$9o{=DhI`bM&JlbZL*NDCrw2pxn6d$9dGxaXcf5&j`zob_ zXY6Bpy3OaVOx>V;5>PJqYCCBn!Vj|r=VahFZvegz*n>KvP%^q^Cfj+cL}Cz|3ZM8N z!~ypJ>E&qHG;TJ%w6vb(ZtlqCy}+3y(mls8A9B!RF2Qt%WJau@8P>HQHNfw*F=UPA;E#N0 zGm_D6o0sti0`h2KFuuz;cO9{OOh)1Px@hB3_sKKx9y*A<)z6f4F7Ne|4X z_PHwBoP}Qeo48}aCe|5u=K2nusO?Y691ObCy%G$+y zP8ErSFZS-_ND;o9qkM$Md4();Bx;a$d}n^+?sCn{h+3dGD$$`-cr#ML=tda~d7Y2l zYokdx;wCHWykOi|^eE%A(Nzca!x1~EWUm{0K3WbtVLrsa7(|2H{bPZ%XA0$O^y!-} zX%KM+@mR!WyUOEO$3r7Lt4}mC*VZ>=&afeh*7{f9B(Z<$ zNPWLt{?;rR?L`%k+j}{A?VyAb72)OXh~?t*G4_kM;Cl0laQaxd)*$o|pR`WPNoub$ zAKz0Vy&H$X-@ys`HDqa>>=wGe3R6(Jyo$NcQ_#GJm$U_Cw$wFE26A(MGA9WAM5{W| zsBS8F-PKQ|J03fpe*e_mx@bgaZ$S;c?fudfXZL#eB6r)@D~?@Ur=Z4*A>fFi{SX7k-imbM%=kv4HK!$Nc#eM_cl9B_1`Og(c=;^n+Ol<&@iD zyl1V%G@?~7?k8hwK0GH&4GUl)V)(c1GwJxOdhPQj6SFwcvZwDC^(o+B>sORg_@7W#%n|B%vu z(!ZK#%h3CFCrM}+`cjC+J1XB~4ZYK34_CJ-=$ML4+=+l~H1aq0YIw@Qc~ockcv%76 zC-)E@p`Pf{wMPE+hI@kd{Z!GVh2=syY6U9&CX>Qxx+F-O0*<22TxkmWd;22BkgWsh zZvzF3=zMzW9(E^wT0n2S|3&__HHrE#B^V05G(l$(2Qr(%U9{GNVaF>tmjl?5H3+jN zk&{~XFN)S1KT$6oSR*)2`^jF$b{4(vJrJ&a`;Ybhq@d~9C7S0N70f-khCRo*{og0l z<05z3bGZrvb@4s?*wCcx-_2v5YvMJH*D>2-^i{SL_kt&D0CdHgxMNzdU>!M&THDSMO9W%XnB_Zpjb%1o!KS~ zpEI1SySR<*``S#pn+I~~Tpf$QE`_p5h#Ri_BwR(k(#zGpY+RB%t(v5QjqgIZqJ2fe zXE)4%#AmlJ-j5+~VUU!fzqm+8jGen_M2iwv%EFS#LGy6`!T3L2b+*_!x_7TVcBPH;I^O^JURHiJnl7%hWH;Rq z(^-$)Q1Ex`hYO$+``NJE4JvLK&QpU$sEK{lpp6RcEQH$Kkz72Xq&=f-dH17B`FP7l z-cEU*JpQ$S^-lj;`cPq_4w_lX{naD1;LmA+yFh5zsHY_1+Fp9UdMb-^TUlDPdXzTj z;6&lh>NX*7DQYBM_oj2*v0ux`4En5A(Jvc^L-55;q`?|Ab{nop9w4SPM)gEJZ>x;j zx!|4GC7RYu?@P~Y>29kWZ|eOEa^1E;!>T74R*plwXp6f>pmB4}d&OkL`Z6MQj3^j|MQd1o=km z?xgqzc5sG|C%*HJk(_mMn2FqhlhZ`lU-61fIC4W0v~wO^blr%p#f+A(#^^ud5leuP zU}A+iLRVFwi`axTx|rNvg_xg7jA$axhHJ@Sm^cW1g&zahg_$#XMXU?-yR(iuKHtj5 zn0Sa>JQ9S_+TX0j)0^+yz0!U8D%|h=QO{hpS8XH?)IQpY=M84`k6ogGtWz4{^(7^& zkJX1F-}$0_Q}xNXM{$@Dq=1y!GTQWgCCNLsliGSBzwvH?xccOLxVUT@bjIwl@@t(* zuYSuh1IUEt;CZI{cL(v-8+VmX+kJ@zxO%l8xT# zOJlo|g8wjo-AYIJ(LR+q3^UNy<6Iez_}Qgq%*r}=T2dF-lX`UB2df?)1kkDEj~z1c z3>tBiY;Avmz4RCa$rZ?_`AQ(}ZBKZITFR|X2bkgbIClRRo@Y2m-@F_}f0rrXx^F7o z@y3z5jai^sFducav3kVpnKx(lVTUNWFog|HkEeSi*kzybwDfvEGx*TIML7Rc0gc8R z;mfRKwxOXDtw@g({!85h$u84a_8S#+8`F=@9&wy+4ew8aHui)}E#iGC!$5Cr5)HkJ z`Ftasxf3=VoDo~mf4@{Lx)MLDpBI>H_Y|$4wh0FNVvkP1TIkRnyX0Rb5o^R16^P|b z>*^$=__wmJi`~T?^(E}ivo(BSZ`88+^xzyXpJa}A7sB+bK3vv`(ZnCmxF?8h#~79{ z#|@YbhdR%plRAS`)t}YquY|m-L+P&syknhb!J498uq{|ct$!tu`bzBnMt#CIS$EGa z?uj&_Q@n@s&Z(TgM1T^@QGy|_PX zrVy{2GAKBQe(xEGs|~9X?O5f`dj1?QT8TR`*7b3JVzcO*kndc%UJhrx%bHHFy+GR1 z5MyzZW47BzQuTZhwXl=YE{OS9c7Me6d$EhN2=1YsUWapQbBN^7FMYmjXisukIKi5O zGr-mddDUs=UZNW=g_$dhh`=&MQe8Y7 zcDk|Gm*>-Lx38L(CqLQrR|}bCqo3yYT?MR=Or)j7dGH$l4qcxFn%!;;S@9tR<_?b+ zwbLU1&>YT!)c73F90e-rc>1IykuUgbM$J4L!U+6p=0iWxx%W(F_Lz?MuYPa{}$a^;{Bb=MV?2 zCSAb0ifU74bil3}yjK~3H^jRCbesLLK$ z&w{|gze-)w7IRlp3p~@Gtdx9fH(T7La0D$gL+-H+HTjr*Rfd>aWwRVIk2=vA|L5g4 zR%_)SIR$h!3T?OFC}!=8wefhQ#Yrgl%HXzDilZk~`lL)||ZTXhP=udn&h^ zN$#PBWp!N=?A{}#ktJ~?=SwVg!FSK$shmdH9)jY^7#d!#JP&i?sq{aSmRNNV7@If;m zp^F?G;wA`PnKjG|kLRE0_kr&}3Ptm0PgT>_;pD{A?XbQ=O1m%FgWdDvp?<{~O^4A1 z^i6lvuho3y6*Y1`ENhbXRsS2BXv39itF<2`+wguo{;q(<55v!}bOClXL~&zoeAb5C zeL#E-ec1A~xR+V)rsFnY4+lf+BiaR8j-eO9F^9gWN}y%KF0i9^Swi-@T=pq=v-tUu zZl(RpD+TM}wlEj5DUV>($8lNU8S|ch_{>$}g1vr6INT4v+DabayR`G&9f`AB7t+?t zg{#uTe&ePzX!}~nx>;?c({hGsC0?n}xN!>I`vUpK38=lu&yw`ovIm$$6MV&_T-eA_NjuD;aIrOt`i-c~A5}V?=TiPAxzTqnBfw-#IKna*td|{n6 z2cWavQ+BB740C)L3Tc?7?EP>Dd5f6UJ6}7>jm7K|-+myv&oK2Thw$Ky zE4UY;)-rMo(Qm#d$?CsNc=Z07q-IGIYy2Xkw-2Fi(+7Qqn-p-#ekx|}?}a(9D%tS> zxYTXmJeUyER$#U8e&i^%?df^;FTz&j(ZM ziv@0nCT6jutzMxCk5G$5>7XzSv#hCKA4HSi(Rz(B=DOQrWEYw)=oszH$5$wZ^4<;;5*L3K%8ZMK!kQ*|0dp6h4`{YMkAcB5uIJbXsKVD=p|z3 zpEEJXYwl@@cViEDZ%R44mzyR3l!Ta%+9lL9Ah(e|fZRXegZ#~C{vqbBmEj!al9NiE zQ71d&RITXXg8~@VM-EGlC()8!E2v@gIuIk*w;1_}9`R>2HUCh%eyAPILGQ#4IINvJ zYprm&y*0E$EPs+>A?&$j#wz>mWj*msJbruwR5(0g28$<(-|dW{j`kzjy+cRH_OYof zu5_xfyBYHe$U|aO$h4i1KLwlJptvWedY|*iZG47I+a8Elx8WJI)Q;+<#=*)84_<

39CF}_HeR)q+m-AYVUFvipc=tfB{{R&!6A@_i>%{ju9uGRFRx+Q; z6EUX1K%fN#w@7JjnmuwWRrLJF@Bf7ATEcMO$?vH3vg3YA;M) z3Z>IgC#Q$`%Z2r9-Mz7_bqV%ud0}2@sS_U<;{?ypYxEfJcEfldws3n8WZ--}bNV{T zS4*i!o&1ffxLU^LmONE&t-{~)KjhknMv71Ag@9%9V!G|^Ychg=D1?qV%A5?(iY;%i z;^to0pT6KZC=xqd zL($`Z>ywBt;NW<2G-tZUOEM|Rhu&HBSvZxG5B6hw(}{?wD-=7$wZoRs<*~cq%&Gg@ z2a04G<6^}-mA#j|_>;!|i#CQi!xobGKd!Xb!|4!zq6^*ne}1kW@ugA!!K-Q|H5xIQ zt2`PG64U`0ZbiT8ja|ev2(uLS8EO~GIuU~v>u9pK4-^@caVH9=0UYj4rwmmA`7?w5 z8F7+3Is!E>k2HeWo*MV<`3s=k3QqFlnZM_~i%V$0!QPT?lX0&pwuR99j@0jG2l#|q zp)KP-sBgq5=yE-0GI!^3QK)<+*?QuF&}WE7a9SGxmKj(1_QPHYzV1p&6w}z%t~WGX z`$HO88Fp}tP-?s^LSfn!859?9A)Kf?@x|ZBQeMmJ)b!WR811i-8M*T3C&$3OG7H|- z2r-fBJ!GeevDS40Vjgz`>4?x6*4Rr;8i!S}v6?LOLG}SJt72Zqe<<`*VdkM0_t9)! z5^(e*A7S6kJ!0f7?&q`pl$$nGJjbYof4&JZ)wU-d?|j~|Z|=+3G;NO1qYg9nCR@{e z!8xojFBZ0--!rd2c0raR*0?|#VYa}!-6UoJ@L zYE%5%7f%vze>4!j7$N>W>pZdizEE;-$9~PavzJ-cdDIg5RZE&GvqS;-_wC&8PQT-P z95G}uT=||37xpg!t!px=o>kBOwD{3AE{U|$e$=d^f5qU$o{~?Gm(bmT=zoy>WLJ+k zaazAP=(nT;$Mv5EkDdgHAIIPs(F3)hsBe`dCyHl8*NO68KNA*TdrZ!D8c126bYhCS z|IXGirSoP;gcgSd;vrkj;iqK;ysVOvuhd$SIL(&)cwr3D-^Xaqv`vTGL5WcF9?;)L##X z*`T>pr#65N?7mgA^6fzJjy)}`A0!{};}ODKA>UZD;)B8$`CJsLTZV0z=|Ptrkk zpL94igcNUnOU75=d-)4}I}32XchsYk|Gp>5T7O#fN}oR9KDaAy%sB_aK- z)K+<=oWqiO?d$t8I%n8OYEqs@|9gSDjBTIT=c?}fabwiGBd+i*1oLOC!nI9^KPwu% z&tuLzo!c&(SVUoV-YqE%--k2Hy-}Vwnwy0|Rp>|FA1;`#!5oM^Tj*AtQ+!{U@u|&! zMc4Kz>EMgT5{q_-TTD?vz_{&fFTN9pM|THuqCeBWGZy@MnsTiPNnl%8PdpSRtUN1~ zF8uZ#y+mQ~r6iAQn!KAaGn|7DPVp3acc1b^V1C*I=m#;${!ZX?wjFoVyNjq% zUcrEi5#;|?iypm|!i64c?!mP*VSS&&T;9sV^i`LK>aB7eb|lna{BU0zYZsWuS00su z-k#nPlaswf>#(DCY3>n;&gy+k@*omSaLy7uUhVa1IWB%;uUr23~!}{@!{=u1BGj_`og|ow5(Pyop;_Xr}<}_yj2w zB3Cx@(l)xbvUjQVpk5HR8u7+5qPY`0RG3(cxC8PO;(LGj>+O&yIA16M#*>ZU7vjYBV9_wRBRyIo1I4~P>U z;clOAsHD}e%2?Sf%-7y=lBHx7k^iQm=61KCaMwx4WBmxNdM@tap_xjSpR}1yt$fXV zHzQAnTw`w6C~e+~m6GnKdrQ{TC2LL%ny$?qI}Mb0_B}w&&yvX;jG7=4Ex0d*M6JDzUvrOlR^R8^?Kh%~S;5|$Qg9r6hi#vN`l|k8)p-xbviWfa z&=7o>Ie!eHY*@nC$rkNOdLXVHH{XVija#mHkt?QFH_;n?ZZOP0;Y_2>pnp@ULxq1* zuy4pF-D+Q+-@PQ8HikLSWjM>;KD8QVZG0tp7qx;tAALIu%<#^fKQv0b)(AP(5@4<2MZ~$XmR*fZW=4D4 zX$!lpCi9-}X6_+~Z~V!Eg{@oIoqrS9-M~?v(-Gsp%@nYFt0V2`d{Z;mAXM_PyhsqG z^V;*@>i9d}!=TVRkNEw#Nv7J=iZk}6YVX<1rBA!o3j=MZl7D9_wNEEvj$_~(F7Uh` zIXrC#8`d~jtsPZH+;#4V)*w#1tp8~;VFGGc{KAQ2c8Tb?Q>A#};AY->18Q|iW9jTM z#gd_4my|~I+7BP($UC85u=Q&W8_-EcKcKeEW6mG)eQY*3h04^`+Z3?zcu%NtS;&7t zeZqnGHEhT$DU>uKPv7K9SI7M03I6x~>`kN+O&IniuhS?!uy__syQqFj3j!J2VkxVlB?l4=MuJ@TM$QM}}LnLX!jU@J`a z!|u4oSmF^#Va$~%V%prQ-hNIF3D-Np{;N_t6g>o8_ex>!F&XvR@Ju`gH3UnFRIv8B zu2{Y?7s$W=-=WhyJ3S63E?GTk$IsS~@g|0TFO8#dF`ZzU*L8J;SEmJr?u3 z!l5FuR(LzcinJ{L&B~*b8O(KH>T2|y*y4FLS44dVDd>vTt!zrvcq0C>(LH;+m@A^5 zoLkgky4-a*Yb}$}Qq0<&+#{AOc~nQ{)hCJX;n}gsw8-;k_yIn!@hFQJkx306E9i-# zDjHq=gRGzJBt8?S3+m^cG#js8Cp)vRv$(R|aNYQpBnRg%k(CuOI*A%^)M?GB@S&nk zHe%blbDp-&vuVa@^trcnf{3^Qk|6Z(#B}HlDVEs9@dJB4PS|U2e%>M}`YY($fIx-k_petWW68oGbLx$&5-_djU zA=%TQ#8*i_WPK;`#;8AOS;e-S70?OkYO>l}LA**Nd?lU>nS-3z(j6*LXy=q%bud;h zs^o>9*i+{5*+U&T`=xL)d;+QJu!5#pk7FzGoU?n+9p_`{xs1bpmkn~T%Kk&DA{R+shb_~ruX#b9YgO>s96P0oF{{tE zlx;xH{%N|2P?0+x`@WEOsv1K)zoACHbTKs1JutW{dMFW_GnG1e=(b0n#Sl47`W3_n zp^xE%-bXTE7iy^{j9}WSBefpwHqrB&V#R$&ci42@uuCpxf^Q1>o53LEj;QU7xYosgu*w})>^!`B^=$7AS z>)Vf^5!OSfWULgvL>N)ODT9Q_7vESg>J#Jp`U_z#Rzk;ROUSkVQFPvMHGh8`r_z#) zq%`*L8T#;22xUrGD}25LsNSXcQlEldGGyv&dJPP*;_^##n%dj-}(LF zPabuh`}w@j>-~B?UlDdsB0aJ+}L9Ho$6I zYq;&CzWf5wqUX*if%E)pq4~Hx%IB*R=Opox$GWND7l|5J;fWAS-6gEg(al|TyOn{r zt^lV>PXL#QAE|TlUU2z?IL5iS-`KnxqYGaPr?s=4gZx8Wc%lY|mqw@_8 zHPMIoF(hESpow@E%Gs>Rmu$!O%2VQUz)oqHjSK}I)Z-Z9IiEm0NDX`7NPRBtMCMGv zW1@p(2qEI`lw=I+Le1mJ;w>MH0Cc!U{Un~G;nxtKnO6-w?%JS@MnZ7-JA*UA!(E#*(CI5B=;#|haPlreLqd{$IkpgUH~Qc=%`Kd$m@Dj(L>_KZ zQ3TY$TsU)h7o4|)><%9hUc>JI&>H((bmz=QXjK#dxh;p_+^Hcr-(42}YyqIsNDZF- zv`$<$bOwFh^PJK=w;v}oJZB%Knq%gy2J9dn)pg|D{5oHTW7|A!-+i_Md(@WV*Jn*p zS8yc+WjDA-c1&lZF9u>abuw#-hHd5z5N@33Yx-w>3Y<7q2CSQR7{4&-155AC1Ic~F z<29v{o7aB_%u=gjcR)ohyEhfxj3ZvK^%T;XI1y^t)Z$({DHN1v0pF+)Hhx?g)uCSm zolp7DyDab0s%ELy4@P#ud+8}uTHQ`m?7kJt{Ndq>mIqXyb`{LYEfi0?SWWpYaD*pl z4?I~zjg1aYW_-z9wUA+eWG#w-_LW>wGQ8E;iAi1W#Ei9O|gx1I-dTE>>TUk z;PJY1U`oS7w1RjOUXXkO$xT63Ei=h6{Z#Sxr&A=e6em&|FDEk5q<_bKVlA}3QHLMo zAELMKdJis&|D*T1Si*NFZNQ|^6lBuFp?9C-(+)44A%BFA|6EH1zhpe|nPr1W=SLEd zapB?14m&vWJ`!(rM>Df=FwW*V)$Q=qpe_T_M>9!d;@bZ->3QP&Y=eQaU#k7PoL zejHeGd;(kXn#bnvUW*OA&U3|Wf$*d6^)9NE@F!{xz`;wdHeu;SxIG|PW8 z)&GkJ;%C^gHkM(~<5?o$EWFMrDU{H6O36FZOa~@B%7yPtmEnQ$2XLC&4YtR(lFiGO zfuhM#ct*bvcds}pK7FScEqc<%s*F_v^_@O!%+kp)Yd8$dB%X_GhIHnvC5%r?8~m5_ z!7*Jg!7?!i{hq{!=;CY8P#lF$dDO!QDF-lX@_kWrb2+k9-i|B$CZP8L>%e?PYdqC0 z6-B(~!z{8x=)7JbDUHcw6f5mfxB4_uu1qC9S1f?h@i#hykMOZ|OeI`z%)@iiWgotf~=Xtz>4N$p^QtMgCgEBR)vPu;jyW+6q?TUI>-ZusEcICsi;mLUDtPXyg z@D2U=ev&0y1DxbaI#c|JhI7an_@|OQJ4_SroP4 z04M*Q5;yRj7Cv_34xOlO43E_F@JH`iSlsi4by0ZC>ck=R^`C%End=YaW=3;V=gor4 zlkbV)0ws_hG8>;O8bj&36(uqDyP%62VKa*5ko>SQHYWGoP3HC3QtJyfbL&2|mEpOvh2Qy7X z3;q_delrHxqE|{Z%RUDkGmXH)-fNcg!>)1aEgInb<`m?b!pBEm8d0Z0O=)DeUUamH z%*MxvXOw7=Ti5&HnMy-IiR@VWc9K0#AIV^Px??kvf10vIQ6hJNK@V^D+jeVKg3H%F z(s==yXuZ=$wo=X#`o@s_7tvB0aud1(!fIf&aDwfgfkwKgUkgUhkbJK;FDx>ULX^UUYH>PLvL+mtJI8=AO2Ut-lButQ1+>4#B~(Ay zL|7XFEH{1z8+o!G&X3#9*pBb*{_*rCyJp)h@N{!}pxu}}5CcW3qa&4T?KBr!*qOty4U6bL6#$m`PSGVA?sr{Z`6$J@)3qWm}Tr6s;=O<@$JNmPUB%BR0(P1I_5o zUtPv_{yEDZ-V_Ftbg(wjjC$WzA&)tjoy#|%UsjvqbmCjPZ)Yf$jya8%5a08oo%bcZ zK3q`zeiI1vodSNQohKJE;1ZqmFM zZ9LgCh!#C^)brY~$_XQJ{A}=wS zo`>DekY4c^J>)q#+g5FQw&-65N8*qWimsLM;A_bR)SS8kPgxrYWl09+Mgb4ot0hP{ zZXuEl13K{C_eQ8vBSs&>@{p$BF#fFe2AxzV`rmwY_T#C!VyB92VDsQmXVz0cJaB~g z9N#~sf2h0B>HA5y!n#NtJNW^s@tZ?071iS%#22BsG#EZg;9=cys!Zj{B;?&5#{TI) zBJo&5d|qr6wc=hM`0JU0v!^CwnT`;&%4juw`?4PPtv`sE9mF&IFc@WuzJdjLoxsy( z2~4YwgK4^ZA$R9BdU&1|-u_@B9O*ibyeo({dxH2dW)g4lkTRA&d==^CC*T{S>EQC+ zG-#0jg<2fafg&cUL;km7cHX`QPW`qr2&BKGQ!YOEX!;iD+SG)aXPUqpt%PBkT0^JI zx1efN9YIyAA$!s26{x&-oU>%yE0mH%IOB#%*z?*^^yKzmU>I4BJG=|<(mW^JWp*C< zcgtYfxCj`adUkSU7K-Sej5B_uf~1x<_E6_@dW!2Bw3;(2TC(jT2wZXy-}~=2_i>a5 zdZ%p$$AY&5-Ue@QzQR+|y4D6|MYwaHH_E|84>_=1b42oAVS%L0hupDN459cDInJWH zrO-b-59Te3T~9LGtx_NoB?8~Y9}n>8cz{8`X-k|pJOPW3WUd*y1$*aDxRxo>)(&qonjuob>L>#Ome>ZmM4Lf zy8(DbZ4m9tlwt>wJvLn3!qK~92RD)Z%lRn{AjwnPcI0L@4EIaI6NnFYXyh>b_JefR zR{moA=JcU+X+7LsFWRWsr0qC~GfOgKp8*_7`prD7GQoH9&w*oW9#iG=k!+jEZnU6G z7d+wRcaJ=2pj?&@fxn6xAZh40N-!lprv7S_nZ64c5ik4TvT(Sb_>1P2CMD0acmXEnB z)#cIX#}~lOoG_32Xxi$25*>e{7JU7eC)vQ7!syW3Y?0$jdfr$$6>78%FR54pj>Vke zmMI-T&koeX{?ChmISsx-2j5UD(SB=nSc&DjfAHkgZ*3OVafrL z(>y+Ct!C8LJ@d8`-u)sHPK(WgQPnmuO@WVdiynv!x80KP7(+Nex(b)dH_*=ud_`;f zdPSAwu2Fqb0abSGq@R79jBbEEO#2|sM_Uw{KO;_#W;2_Wcq1~dO#Dmcaps8juQ zu;^2SICPLONnFpNN5t2Y(!;}hk1D|kyLR@|gltf@gk<5bW&`i88o=88pmxxXu;_vh zm~vfUTP=5l8TsXj<{#=3?oD=!CaZ9dPa^$NgR)Rst&>_cw~slevIi>! zT1tu%<-xoiBwsPb3I8-DUBGV2U`0F+fBtR=&lk!{Qlh*lrF)Cv_eNP5XSSAcclJhq zTD`y6ROmW9b)PdC8UmvJz?!_V_f-DAQt=);_>&^fw!%eSR+{* zKGl+N1Lki70=JuJ+0_E}bJ8Q^_}dYEeY6k`lkfPI=c8!9O$K_kFa~n4DLxue4{aq& zVa6p9Ge0?m>n{-Cn!bAc*;WWlA<3WIPJ!&CWEiLs1+Oj@LNDzzX!Ze!M#;{scV#X7 z^)Qs1Ogw%Ir%%G`=BqL%iGQPVNd_Jm(BQ@jc<|_K6}rVn2*2zptX%&g3x-Mn=YPGG7=^wI-+XgZR&e+(ydEsqQ zQuOMirEJ!wG~I z>vLF%aF*!Wz#y;PK$2PCz1H-EIgv1%A6W_j0y;G{$k`=>gkL=cHOxgN!r1K zXl+OaoU)0J_byDu7cCE#O)HFZ#lD`AKzej+*$-Sv7o<2Z1>;g!U|^^F4J(tOBS8v^6RHTPi`363*pVvW}rLct$3BbG23#)2PSH->sC3|geH^y@519YaJtVpxT4U-X7-o0-A1K# z$avQ}bSh+}Xm*4WS8ECJs4P4PFTRd}%S5AK;CZikY>Nv9l z@3C*2g}8SDIpe3Q>1Wrf@h^8r+^#H*_iaoB#pPj~=(l2aYKgw&qUCyE$H~S!)3fld z$9m9{ssX!1o!u|s6ik9!P+|N@Ry@}pTCWMjPD=Xt$bBg`B)e8zx<#4}S=IxTGh9Jv z{Aqy5nVpi>LqF{%KKJ>Dn2c0=;9>p=r26t0b3@`6v!9Na-*tyj>Hz3yl%wyeo&;OR z@8HIdP*7LPv{LDG$nVT#ItiDI_BsMH@zbI#*{!wF3~U!f(u~g3@6|) zr=ojB;XX-kz77*KO_s`ZCqC=kKpeVv5 z`(K*)^xzV>riSb|n#q3taVUIKO8V3?4*~GX!*+&83Ons?D)*Dt1nhe90%{2hgq>v< zL48;y)#mUYoufze&!5x8Z-x%zfO#a7el0uMK^lrfLEW{!ht@Z_B(*haNQ58cW2uEeRc>J z2aM5C9z^3<^&fk#unwC1_``0>5z_Tb=V0?EgnPnrv6i_JmOU~TifWyh(7Nf8m(pav z9&XHv?8y87*mZEsCIA)p4zqTzGwADt5h==ZTQ~@%`AeiDcX3+_?NVny^?@3Xgnp<4zdo2Qr=)1H)PQjH}Qa>s&m{tgwf; z)QWHkx_07BdOvltc98v3Ybf5o@*vxHRoyznpLBLrB;hs*=_-l+Dc*gSgM9)BL-NvL zn;pBIp@ht1t2PpSev3Xe+slEPHcigz-Gd^~KI>qXa zE!qe_4d%d8KS_pKx)z7b$f4HS*J3)=LS(2~Lpxuc1|^ecp^T^)jgFR#xv@%Yn z6)3L51pR&xWt-USfmL5rBk_vy_~{nXGiYy#R+SPyi$BT28gk)B;uQm-TsV1HfE@b+ zM7Q1%=J{}7_eMAehcp);^*0rw#0A7JbQH2Of7^h4^cKAM?`LjUwH}6FNncln2-tPc zfmw;yxrLEJ($o1JTpV{7t;r>Mn5T<5$8#s(ubV!BsYN6Ud44?U#Qn}bIUCKTpC&R% z`+0D%Ob$4^1kf(G2scpO3C^0X4hJ3(uiS+ks7O2_Hhwj5_bo55{B;JtM0DmGvTwn- z3&UtP=NMA=bHs|X`46QM)-0zX%lKJCjd|yQj9W8VhaM@2Y%! zC*Fm-qGBDi>Yj}*S}b5DYwTl9LZ)Kt-`hFS$}~EHrJ!|@6ngE~4tDI7XGa>-QRrX7 zUmPS^{yGc#N=6#=Ju1)rpfA8PHl#6g56r{(Logk%>?Pf@e+74o&MI8c-NY7O!)#VH z58m3I!hIZ|$vkYzV0Rw1V)*ON0Nb*AtO9QjbUS5@rEiD8k-iC%zoJpjd;=@sGMp(< zTiwxhRhf^UxuwGL+-H)YKtAb{Ux6n*i3D%_TG)~u3i$CfqNO`!FyFT5fIfONDt7w^ z9Jd8oE&CM*cKJ?(P9R*e7gC*;8Yw65=FBp@HwP^Lx zo#@OzHHo%WJGgtZkW!9Z&Q@>Eg9}31(MaGy(KCfzcvfEn%uCMW>R;u<=0^$W@1PXv za*FRzx|oDT2V3aZ&Pk#it#NSng=`za-g~yG-@dU;#M>b?77oum?PUYsB;)pSW1P37 z19fd(hdmSYupoF5QxUzLo_epLW34vv34RF0+oh#|UYjrH{ibf}itl+QVDUrr>?se& z-R0xzZ;g1$f?w!QVhD0JCg07HYv9sX4X|K8@osA^#lgL;=e)^`aDU{vwhep1U z=x%s97A_6NmDYd3aJ~_kdpw6dPWqUFXOT`@xe`EmJ!gLtO;0ClIqter+*NUh?D2Qa z#%@l;(~G;1=Hz4Dg&DCp`Aj43_wQ$q949`2r?-(G(FnF$crj1w%h+wcsxVKggw-pa z$?gq0ED6ctaK8ml0+TUeE50N>Mr76xQ$GR>s29vbqJJOwVhRn({B=m#1~z`+!RI9F zck;(s^vH_%ep<`$+lM2_?89_841RKt7Iw22yjNgz;yu&}1Z-pZQaEdgH{smyfb7q7 zTv4_Wnz3im;jd?CyR}{D<$mJF`)Pw`H$0@>9O9^HiuROF!&&Y@T#Eu4QrUKr6>Xk2 ziMg>M9V+}T>{?Z*3O{ExU>o8+)qN^}8}m-11IC19Rnded+$4Q98Y1?^(|Tm`I+G1F zAz5u59<+Sefnuvn(4cJ=cm3~bdf8v%?f+v#$vF%FqgFH8*XcgWJ5!qN@=#-NG4#1o2k#E>pj=5VUhyXu2P#M4V{*N$ z^SnkT*j<1OOLpQv3p|mgXbQWj%9kphWPk_A?s4^VA#zU~LG9vm;=#Xr7@5G!=vGP~ z=H`%{;xi5yHZsS*-6Qas;3;t1D)9u<(AC24DMbyjt+R}v4>WUhuT%1 z?8X~AQQJT*bn`etcyUp%)$anBO5Vc}?kljO%@y(WD@k3Vd2{fyxvM#K<1De$^#%0n zefz<4S{KHb{{_6S5H26JqF?J-fwkX7%nPE$`t2Q$3Zzr8Mv5X^yvYE8vza)?bPeuI zQKYm6kE7kq^Wm?&bl^$1GjpdUfgZ0?{A!sLK6m;8yQTahl9}Af=qmS%XEn_f{kkTE zR(jRUdGZ;5!9CWimJ=O*fQMr)DCSkMAsjyOybR85g#NI8=Kzm&s zHQ=5l*>Q@8rPX_$>e*rcv}}?1ND;*)?tz}M@O09Ls_8PqlA%G6SJM? zI>b)5ma%I&d`9ARkqt6h2+P`akm-DDm~wkJ{_lw^+S9*@2~6ss57gG-*cd*%et>lG zh6aJBS)tsYE<)%)3JN)9^>9;YKBdu<5ARo#ER3!Y=iB;Y-;qU_;RoJ5|j@$#zp=1M0xX~C85HRDcMA+2Tjlwo&wxhdgGE+Mr-}Y|k7Ou@P9Z8d5u4bJ#|mV(dL|)N5eeyesCK$?5f3|k{~v{s*ITnDOlN! zbf~_X2Rn$z^eoX-V+vcqc|$&ov~h%UJp|aL!V-+sv6Y_%K{)WZz58=!xu7A|!?2TZn+nfb=j94a zpTi+)Ri}+@bI@+)w(duu-AVRa23Flsv-BA5;?v;5(H3_9N&v=I2%ueWJf8D6SJH20 z4T9r&Fm|&7atRUQ%FtvKQ9yilFBUQe!94Eo;uK1s5<(AeS9te@5_C0YP-3kY{5qP- zt&%7Al5Y*LNq@GLhI5PPv4bWq8z=x-D<^aQ_G{z7;mT7>nGr1u0{x{t$i_^Q zz@h8i8QvG^$X&hkV8eH|*nbPCT2zA7G&6wkBJmAe3TDd5y|d`2BUDRfCH-fWnbCQL z=vYOUmBNmt_)~o{-r%H49UZ*{gr`Q)?n2Ua+*m^QmZh^F0#C4d$>nsSdxd4~UgvHn z4U(5JJR;gqMfhN3md$=vg_;Pjsx4JnbU!v0J5HXAE2D{j#`_%D(zeisM{ znL9U5WDH{q`+>eL$sm)v!1bnyK+4ck5~BHy31?a*E3Z;;LoMlyi~Ry@we3Z{g=diV zs#ol1m9@-Q9VZqK@$ulTmE2VZG5DEk9L}QzFy6q172XJ@TpBluAH|XPSL{n9uB!m8 znxx}wV;wF;>R90i;az=IXBA$&7k@VOz?XmXpjMI&(p2%_ZriGdwO?UZbX!rr zwWacopkwC|8PWv_&7jeA02QhPl7JYYrG`qps4uU|IZ-6UcBw~lZ**CM??r-A;s z<=9uT+vcL;QTprD1`v0c~|0J)5V$F+CVgK z3jthzb&AB`CWFf3Bca4pSCUhy0nZYhEozeuTNkne23L~qwDW7g>Q<5|{3rv5+k3eG z>d8)mOSC_V=tN}qerHKFQ@BJM)jRPZ%0B^0j?}}4J;Y=9TNW%Oxrh%R0&wDwC6K$t zk`42)fEylEbmyA}zyuqXz5Pmn_uM{&8~%GwbtFf@xBX{qg4<0=FXU!e_L>L4)6MAB z$wdH$kiNx+di-*23*&Foh@bry;%hT$ptUZCxnD1Yv(rLx+%7Y?GUYIQ;IxKyE{vdO zRTsmu(;9eRt15hck??=-oC2M}^Ki$FGhm&TEM-d0&)D)D+)+jtS!q-88*~e7`uhi@ zcE1O0x0m6P1BuLu+kHs0tpwg^s>9i!1TI@ug-`FS#g|T}A-xCzEKzb{?-8H5O4noh zfr2_VdUO*Er)q$zm%G@5LCI)VU=?=*@uVHd)`4?Q%D{uZd^l1_a+{CT82JsKIG@7> zIB4A?^wyUAJ}#kn2jP5eS+ohNz3UTSi=K&#|9%6J$CEiX_Igr97i(a)Ts(H<2Eb1@ zDM{ZlAud)SEc~Zo>@W9vw3>M9gU2aA#n>pU1y-Sv6g_cMn;sln$YsyV^)ZgRLRjDs z0_zT@u%#x1Q*+51Re18?soS%m>xTmDw!t4Bdz8!RH`ZnI6zdq;eKlt>(fEhn#jpn& zFLci_9+2eL5Pj>k0Jo9v_6qe&NU1Ivr>{GNoZV^qBb*+LHl@^dF?maqfb0yA*6S4j|Kcec3uyCh_BjbJhHJN-}$I@;(71FLH;E2 zKC%x)^ZW}W(%u5>w4G$yNw(v>7wO&VBRb#TDsWYXFv9Qiaa=axmK`Hn`R^*&^GS#` zo_V7$nfK7u6XU@0>%E)@{ijein<)-dU_tHgDx9VF$J!t>14^+q?3xzB8h$mOyYfmW zXZ~mt8kHkiG_QHKdv{pkuh+6sldt zC6%z6co}tO*K+m3cEU4BUHqMKSfUz543;nkbTNNqbIlW%+ z+e<$Dr7=nJai11mJGT~&se7|^yoF#S{5-8@M!X3>{?82;Lf%87pS{|M486#F?Gj6U z)7wv{B(yOnGxyt+3&0hl1SD!#hfCMklD!3C_@B+Ry-WUo1xD4B(Of^c z;7BgYx!WeHa+-(jb9Zz$eQ<)h6AyuFGCX*kcnjgZ=jcDzP*hMsIPXrE(M?OpE?Ee0 z@MIzOq)l*xd6I2r?;k*yZbrevtx)l1E@6m`fwa>G5M?AV&+50(x7>5^GxsB+l3V+L ze#2FkE1f`}zcs}6jFY9`kgU<4z3pgK4dF2?YNrhoWkp$G-DoLyFEBaKLqjJ%e!8L- zC)U^D&W@cR-ZO^@pZ)}`TB!kl(@EUFhJ5^fc{20ux)$Z*cZSO!EQfL)?|>R%GCe<9 zMk^oQK&`VR`&Bsu82rnSk~mt3UAP*yk@+Q9U{Z_wL*JmnU(sx6r86gV73n1Y#iz9O zw$f$0-LTSs)!5@ZD+!s|AzD7YhePxh_g?t%hi*ieV+onX@Dfda|SG@%^ zhP}vfqX0h%X_wr#97LDXpJmaUr zv)f(a@jf3Y6(5F1-*wQvOJjjs65#=E4PqqHnQ(D;J+-I%FDQMt5!ipf$+rKq!;_=z ztiSzD073PHadt!&FBFc$|0YaCzXB8K^s!ptv7`)cz0iYR@d{yu5Am~ix6q4H2AD7( z0bHl$(Gh$@BL9 zBKsDDBNC~uX&|A;6s-Ie$yz?3q2FBr{y3)y(Zd4VZ|*B;&@e(WZ>Mrh8ZXk%)>MhJ zpYic&l7(6ENe0Z^ZOxu^OTozrgt=UsasX+~#h$Pkg^b6Bz6E2eriutaR-k z+APa?0QCG`qg^ED*B!3J+OMvKABH!B74Q5(K$Z!Rd3zYLnNMiVo+Pw3CklLO^hdt_ z*>G&F0LN_k!6n{4d?}s6szp^#F8&6#u zCxGs`cfgE?Yv9hWL{q%LgX5oVWd7O);AKU6_}_np;7nXSRHsj}&-&AGk~8scPxYXY zu_L$==}kLc6u{yxS#VIBrX9Zcg7@U-(9Jo41`gK3${!Cp^DL)=b7W`#ySt1wI<*$` ztx3XfpD9tdI?K_M#|^O6lEd6QErh%8Fi7yV2Yu1kr3}cK?o+l$zFLdnJ|EISbms%7 zLrsqR{IN5>Ra%QZxJQ|@X<%aJ|H|nlQ8E=inEcLu|*D)PkOWbx@)I z4EPq>fEBN5qe&S?;A;uVEa4NcMt#L1W0( z*PSC<*93Xw*}e)i;9{9NSS~1$uvZ2-LXI-fsgnot8u!7-pc>TdyM?~+?^BZEQMRsI z9ycFc4jw7p17(do=uq}gqPb!Ur*$LY^|s1j^^UWgwMn7JaYG6q~gQvGn44C__6Z-Wl^TEuk6S=L zLO{Axc|;Gl!e@384IwHEKfOL4V5b}yX?%ckOXXpM^|{z_zAi^hcnBw)Nycu{QD*Ub zFFbeRVNgPNZbp&IDb*T21mWa!mg-=9{2tH&8o`onzcb-)r8~$_FN_}ckfD=dE>8Mp zi?^5mW>f|iP=zuTDByD$oGqmb=Z?sLKP&9<*bzTCbTkF6OxMTd_wHKBTU&ugDwjF^ zf0lzDem!oo^FKGpiE8CrGkBo_x4;^buR?5D6E>M}lSLWTqjyteHzOUjA<;lz;vPEUZz6 zL+>$Vt6B)t&V`CQ@oL+5r{bvVKldO})Dh-;uKKFpQ&Mpx%B z?1*L|IzG&U+yy*nrNx^5}HxLyGJT?muHdlUGqS%DO97;t*0k`7(IG|s9P;)AZBRK)Q{xGg*#aZU)}^6d@y_&N$k&RW7i;V-%@OOb85>a1fa#*E=Yg0Sl;47sw%Nn6CMD=Ht^lmjRRcY{YEk50SM*D-guYfT z&l+!91wHleT5RbfIU`{rJ^zy~tll;eOVrXiLz+Cy4w0E?ZHp*phZ|IH-N}A@mq_!; zJFn9>N5ZY)VcRmoTWBUa-rV)@_UJ}QdF~CgOpinPwCw=8O);QJt{KTKZU9~T_|WXJ zguT3I6*|8qpQHRq#(IDB1XxM-1Lj96=#!BrB~O$#Q|UDtwyT$p$Iq<{K--6S>UD$? zRF=zuqQok+UW&tRKN$vLdL1@dSb^p~T1E#NDzI~wo~2dPZgYRt9t0n)s&G|+6rJV9 zgKz7JzDhK>Ek-^#c1Bxw^6ItB_6NsU)>&vJj1IS1nn8NSUd7-iC2^AHd-(XWTnQ4r z3 zNtym-@v~Z4$%gjXu+xF~A|xYdjQA(NPn(VL3*s;PdepkHd7NdtlsnzVQKWnBnPJ7u zLOjzo4+glBS!!Put`}#s?&NpdBQgBHfcZr$r89KY9ZO{5oq_NF$v`$~yRdyo4VGF<*qpp;woCrZ zqh}9ZXU54qL74;O&Uc9TLa#4iAJ9C!d6p8Kd5N(2*N4)fggbhFaGdx;2oG<4bH(cY z3N91Bw-jt$t4EpN`wAih1>C5-xpd|0f9y#zpPI~=&E=}8psbm@B>sLjP}=SUct5@t zR~;P&!=ps=q=fiFus?ROsAR`Ys({IauXgNqFnT`El`|Nrk8g;1Fn%H*j{T^^&SWOD zyT+%!de!3GU>{_WpGZ$QPBV)t;&J8K@NV5Ch7CNu8Dy7_1F^C<(fZ^=*ioe(M&8^e zF1GWeH>VW>&(k~D)3XBLZ8GB{7{aB^r=ujL25iu?5pr}|Ej4FJvn`7cBHH7MX#3xn z?Cd)gl%48A&e8@RULHmM?5oAvB%ht2Q;lX%bmT-^Z$=aNGT;cDjWhO;PB7DlWUaba zTr_V8MGhrNj7EgGU!V!qh*ki7xG-dD7OQ`;7M}v2x&NNsq-#9aCKA`KCY^{n zjgaRj#3y=$;O`?_Y;h`^NsG%9C2YB5p{>`AhP#GX!(!qor1pRTa#vIy<#Toro&HJ^ z4;M7?@XZfG_%2Y7UTM_Ew*9Qb)u%FTI!7ep(IF?;&^HdKaCtO7lMZxNAGF!y`UJp^ zC8QH5QS$lVIygzR2N!xA;!JkzKsra)BIiVHVEQhNGM&MvK0lofGYBK5d0-|Z`&Jr` z=p3Q%IugBUMlsIDd*IJ>+kI+Q`Bd(V)DmjErHoUd#(A{kK`g0ar z{$MYzKm7*1m_H3J`I7>_kZznQ8nyWN_-XLB+HAsJA{iQ4!d0C}X2e1h>Ji~dJ=ZD4 zE}waD_19Qbba?{asr3wfzafC7V~z004{4A{Ps3kN9Txq17=jNi%VnNs3E^G^7Z4s% z2ePj7@w(shId#W4m_8*BHudwcWt%JHmw#e4PEV#~jAek6F4=JoZpCL)a>0gNFZk!= zE3nyOo5UmLB-;|j(v`J5k)vUTIHvp{%=)Lr1YQimCvyxX{1bCw;e-}yII7VZ?(r#w||0F{Wj>tPd#Qz=q=9aJ2AGN<)lvs>;UW1!%$p<06jU= z$cFRDZt7_kn$vg?S*<@#NsG^Sm*3CD;$hAH(^_z*aVff@ zUW>iLh8dST6FF(;uCVK-@gaVqild&9oO-+_`b6iU_K;8FQKB>W(tpsgg@@s?uXE5T zg+8&Xkg$4uNY7~abV-F=Eam4eFSZ!6!$u>KHq#?a@EMZr-F=L7J*w_zd`R~9sZb7+ z)C^<{Q=rtHL@Ze1fD^4sQSIdY*o1iEwcaZ*C*tGTWw&?1m6vOA1|_Ppu9jGsA1L$5oiZF4Vup>1~RIvYm@rr@-)wVZHt-H6;u z-cyCU1Xy!?K0D1JoRAzUSS7<=l>craO4^u2R}+S03(=0UKrC<%dnN8VpwDfeEx>cu zLvbU~Fo#{N!P*1ak_l!Ukg@Zrts2wJow1*IG5LJ_VR9|r(HH|S7phV16CY8co?5!F zfiU(w$TM=D2X33%PsH#bj2UCmRJC74C3yk0K`0ll0ROt(%`! ziyf8Y^x9LUus4aEMc zu=lemr)|HG`*{-Se=88caH55Gm68dyRDiz{zE4hcA`UQPK zFnX6n!LR$w*l5c}xc_S^V{SxoIzCQg{IiMEP{& z=u0Ez$+q}%^kF)U`3^c46J76CIAQ5tU^q_G@Qyhg(lMKe+j$4TWot3hK=w>(*B7HV zwL(0-@HXojG}oECJoV(HV2}-yk(IQ|?Cfs6H)_o%oWbd6Tap zshUHi%ltA3I@ihWZrsSYOD%&_4TRX`k~(bkF~iS}>;>UXH@L5EtcFA43DAvw2p)vq zVuMt)#nFn-yDEO|MCZz#ae#o_ePYv?k)0`6_4O2JEfw62TZzv`=``)tEumTriO)}~ zoz2%&q)!D;z_6Q(jI|ol&IhVDWc@*vH?MK~vNLMMAC)shKsHa@Q5+W*P zyMK)MKbGLI>hIhE>u2B=VaB=U@L-ze1yGcwjJ=0uO3JSgFKB5seYZLjuV+jFtP!zg z>o2kG`;MaUT?olO6yopiYgjf&fjvyLd#YhNl7H#S%t}5gkyZ%94rIr3gzQMQo}^%f zqOIt)nHBmF4WUY03I3+Lp1bO!5$sa*!Cn77>vnV7gq{2jH5NXixSdc&ek{s~fKi@ddu1C<%t|rixANe>rzZGQv z$_7rPPc+-T1GSjgAq}11l+?`>mdC3CL4Q}^nGavmVJj_=OQV4Oc$Tn&&B&heKVvrJ zQfQZOyC*#ubyDIWv|?_bV3E?zGH{1H7u)>Raj=CWUToXN$%)<#wvp!~N>OEOBMPY# zMg5WkD34aZJfFI}uMU6N?+618Nw?=X6MXgEHEK&(28idVQNfNEkW76!`ge2|cvUb9 z?Nk=xs*_~C6J{b0w;KGy?jn+#GY7tuzJxX?)WY5V%JfVW4m+FHUoAq#fZy+vuyd8>Pq)9l@?Lbj| zKmIB;1-~J4v1M5_m}a__sk`TZD%T2OZH+Rz2mQoamHDWb)5YpeF@gI!3vt$J9U^cpGx{az+v$}^0O?|^F_z5dIM844)cV(qhnVs>;6$#1NL`L z!jTn$c+~DGUAmR@jXfEmluXn>^6rV~Fh>r&*+Te|Asl!xav2-F4gls5TRC4kLTRVl_PdayZnvMV@`KV@PHT(>*Tyxx~bH99=(Q z65O#y1&`jHkC4N7wDVUkn7W{v{cr}evC--{0&0Twmt$ZFAo=Zium4BUdHCh@zHywU z77eM4mdZ#B*4oc%Z2tX> zGs#beV?E<+#%@n%n`8tpaM($StG^BZe3uxX9SI|seQhbNz5!Lj&rFy|#5%h>@qXW6 zK6QU6D>q4n%C8l&K}sHs(~uHw*)`!l69-E`{OKIA^}JH`cB7yl?2uv;JdB|>a~ICCY~Ui_ zG~u_O!}&l1F&D7a+qP{|F&Vcwl7B7uaU>(^t)$j?@aw2O<;V?HN_B6!?G?M(MnOxMm{AD5<4594t48|YIe~4ND27n;LwrU+CpqSF z7_~+yGS{@1PpkSiikqHn$VQy1#Y4CAc+Im{pzhBM{&-fm_z7psf4f?av+w)!M+u*IcCeEADg*6{2}z@TE--6-w3I;v(P9z zi9P8%i0yRV553xN$mC^ZaA56@60wW~YWZiJAGBIOs5Qc{39Zcf`5 zdD!-DqVRplSCt6*4l}bBYd;Adx3L?cZR}4d+%*8*`(9%Ky1(etq!m49ExL#n|m$Z#xM;+^cQ9@6G>(MXh-?E&kT>BXxO;+SX zr#R!NUwf=_QbVcZ`;E9wQQhX-S$oz);AD5?n(>==De$p#h3r-QHxWCe99M?!v2&H* z&!!#nqzTF8R*mto=)1C^%BE*6)(Cgzh|U5S`C=F*=$Fz(8k+2xcx|q|B^kpOTEV{S zzscRx!`bBa4BqB*BYD)SiX%(b3wUq*Ak;u=NCfS8RGSdKu=gkv+<}~>C zV}|m_R-C2JC&;oVHF}x5p1J&OAsdyjG?)MSsuUxG77^uj%YY*z|CjB8^MUoaF;q6yFg>7TaxnOY2`r;udP{ChuTd4|vHZut|Ogk}JR>-f){K99#|HI%xPq8nx z*)}fq7WL`37E2pm#;+Gb?eaIB<_8fwrgqVBHo$&7|6rOUdqmEa9U}4MrD}y7tTurU zk;eS=LmT zob5qwu;BIX#c`YR*&2PFN;8|ZsvYja9;eMYk4yhXkGDyr-8rPBQi-%4+g_`^+ieVmX|B`j+8DOjuP;Xff7T3!KE|j^I?B z$c~xTERnarOMaddxJwnOl?B7PnNO}7;+KU&r{=7u70k@a@`$KdgdBp?VZ5PcxYMNzNXE_FQ$LH~Pw@yP@ z|9t*}od>Qtq{DN*cC32b2^K9T6T_p+(DLyiv|vLq^ZOb;a3$e`br3D=1f5awj6|<< zA0LuGlJ7IzOL7f77>B^m^yXwi&-ML?kq-hyizBA8Ex)p$-6V%qYRhG`K#YeWB%E^2%QA*pI!|G>%6E@*mMhw(RM61B$x{`)}m!!e!^gxFHX@+Yb5O#+!S3TQm&&j1fhnByC z!Q}Z^-ZkwJcKUvyF~VM3N?;?b^=<}Vd4cbEBO0X3BT%YV;3K`v;Wy9S%Fi?3&nNT> zEX))a(eQ(NnM~1nI``}e60X-Rv9%b%t2FQCr-qyWV}Uz5$RP@!{uKOv!tY!cm_c^> z3p?eie(qiTayF^co;mm7BPQ=n2Hz(;#JlT)@uNj7U-l%3k%|?%SgHr8%Ohj-7qnK5 z%F(u7uEFA+U(nXE;E1hu#1V2KX)b&3vCwU@Xck*AL*Vetp2p7Xe~;-}5xmFWe0J#6 z$v866%*OYqklEvc`1;*~cfI2{pR%i?Dp=1$*neozqUrhULP?j{C&rCEHpZ9z(WT3T z)oy}ug{JV~qrlXhkd7BhwBgOvk2JzS0YP{M*GL+0YSeN5s&EI-Np}XP{dxRxKPArW zdnIO8%wrGh3}JODlAyFvg$7vcvQ}_(XL{LucJuNywEHg1Va_Mn)Y6%}q55HHuMn|@ zX`5|dKD z`7ysMphocNo3}j0DnWZ$QCz@&7V^Z0gC(NC_sQ5Spo{d$ z(Upd|>@S(ymG%NSN2Nu`S*Xl{(bgO2;3?_+%J3E&HUH;m{wAN*S|L3Ddxib6@h7Uh z@hZ&-=mk^1Fm~?3^9Ui!VU1Kaj(B>AeBOP9yc_k#?zAvtZgii7X;+kaof+Fnx@s<~ z6ExlSq2NcdOrAqXX2D|U~ zC$rvGD{w(uU~H*vz?~rl{1A0Np87YCmbsC%a+#Q>33r_l=fxOMP=-|S zfre=uq;kQQFd@twL(U&zk1rd_XZJlLGtS>4c-fCQY+EU6|0gg0(Qt+Qh~7`llGXSN zLbj!3y>eAcw<0I%two0r;hug{*!kYIz}hFHiDPLP4H;>G#v4@_P3c^|Wk;ptj#Us} zefv9}G;$CpTIcbPGyABm(?R%jQQ%*$q@sm29FtV#!>bCa z;!e!^tu1<&DCFy#17WMdp{n4C4tz&I9-lf=3%+C=0f$j}e7!Kk+;Wv-L&Ju#?{_-$ zG;k;Dx%C)y^y%_RSDnQHe~pR!V?VmI#{e=WmW%F8P#}R)3RQ-)3t_v!7vD8#uHEX1 zH^Ao0RA!XuDK1=-&yJi>z>`NpK4ozp`JVn7>d)@tZ5D4Mqwh>+;{!!#Gwdy6kRj~y z&I|js#4MJPWWk?OQ+D+4@AQ}aWV@J>5H_ybo@U4a``nALU;5^<%X{TmYZC!8)jyAQ z4)�`njy3+Y&0K;pn9%^fD?5pNlO=&lYyz(-q@+k>Fn&mMzRGhYI+3lQYq`K~B*m*g3>>m6M!Ae!3L)Yjt&2sK!X5{>Wxi4eb@5^d2?ejQ(-V+^e zOv4K1{Fo@vkrDU|>%@!`6Gv)>Pv)CP_(QirJnx?Uq{2UoVl&r7MVoVhZit^E3G;8-aYJnvvraxcVqT< z@kRLIk;9j(l!D2J>vpN@WvWIxFJ}Ex-@;!j(l(}$DfJ;<^*UnU^79c4X4`|-;U+Ojr0poX01W&o;(*p5=G$NucAMhw!UQV%W0n ztNGx=%cr-WTv}yVbQs(1Z^OOjbY4B*h?UD24C#hi>{}}n_6cZkS!+zhjwd6@$!(GB z{i9-1rF4ir>zP3n3+KRhX-BrVzKfdtvt<9%YQoIK6F9b-hl?xjFdvxXtF%2x0nuCDs`A)R&DKb1Zn?8JA7$J?5B zxbvc0Vf_5>Cgk?(3MM&2u}XU3AI8DWmf!qNI8&jg`H;6}7#w5AmJK_Fe|G{f?xUzw zNe=(6e-x8*|24D8MYU>-TLxcVuun8z?=UAfVT0ttG7*3D(h-T>osXz~GXpj;WsIqb z3-hsNKJ!J7<&*D6;iQ}#KB~u0qJAzy^!nsB2fNIHh){mpMM`Wk*yW(lbLo_ zG@aQcN>)3;>u&p8mF$ww=bsdjT8QIc7@9$`pjia1^yh2bkK6w3*OWxxYCwOjyC6S$ zJd-Lnk9F6L;B~eL-BSMAypzfXKB&f(y^u0Rv}~3qKAe`$?661>J$t2w>w2!hIolj| z{Apv%{bh%4AAH0sk7VOR<#bdoazo>Z-y!08DMt9^iSj!a+i=1?WYyq2J|@6|PV1S^ zuRQ(-44ySW*zu+0n*Azn|Mn<;?3YxL`x*td(POlYY>%MRJ7%KUQYTi&A_u?R@fEVO zMQEb+7-zme4gY?+h)llCyx5FJ;(S_lMx0oib$ooF+1VdITRYbmBhqccMKH>|nhxkHnNNv6Hq}Vpkc+ zLe?YTr)Bn|#kw|X(0YO`Ipv9wg3n-Z*JY85ps`+6-^LqTpy-b<|6faOW|F25Hs_lj zY!*D$kuy>%l`TVq^@v=IoW}-NGwW;J&LJ5&0j9hD~ZNV)?Kq zVwaQA_{U7>vbQ)c8Tv+oiIsb8|8jx{tjY>Dts71((=-HHb{?O5vKJe#q;czKA4W#4 z5PwfkCYPqfqf2_6gs%|v#PD+XVo|^=(4p+y%GtQ)w+hb$3-5F1N}_D|L43gC7Iglp zCZ_edqH9ujxUPr$p(wo@9aD0l#8A-r4YQyk-kh4n$Kx7im?$xGiJkFc6ZW6#6J*Xg zP;DksgHzvlXWD6Tqg4e9^HcJTW4Uq z=n>pk{Yd;{!mjh&H8^!(H{5$V8Y>KAu;EuW6QFZ~37OalPglr`0;ash z2iLEPIbpvyz4aO$mOPG04AKz=*emcirYGWS_e36alieqxq+6 zyJ5$~R&p_^A5U3Y@ywp7bhKrw_-qJ=PTXvG@@ohG@7O!r<1J%oXk#ETXfvR;_x}%oHfk|r z`S7&u%(XWHCqXFpBK;Bk>RKQ~K5 z^3^+n547`Vx9igvD>mxWZC65ibvBP@i{!chm zI}clR3fK_~28gx1zzON%00fS~)k{aP&DxMLejIB%mRf}pGKO&0bE%bvrTCJVgraSM-w z0*Lkh*|zoe?9K)`kXy4;yj#fDp4%o%4*fK)pr*3WvG){syxhZon_ew+`UrfDKufzR z*>|~ATOa7Uswa{4(`SO8Xo2m!XuF)B`uyXeM~HTygzV`W&)Wx#5U;T`g0#irC1b(;n{+s0MoYQ=8gb}~2Nbo;=8pbcK zzrchJ)hgF2Wmx&LD(+m$75IAe8Z%{+z#lUCO0`bsf~#OE*z_%3VxcE^>Sv|FcpKmi zNju6f(qPl)?E~@r2uA0zt%QsZBv}`?vkL7+c0UEiWRd?&epT#JzUzAmMtkS*@y8E? zwR4>0c4IJf^c%6AMw(bCyk8~>c@Ddw<#tiCEZ|(vd5rFRFPSa!qHisdd7GY>%z(ls z{9B&GfAAQ_Mo&~_^=7Ivo6fgkhoz8Po>M7OtukU4{XEUzRZnL>ca&2<BbzS)5D+>|! zX7Ki-D_HA{g``Mjc)o81xa`t^V@E?EYJ@37)>*@X8G5j6sg=+puLXS%2y#wd=1XY-99 z@uwm@X;*?zeJW6TY9#E^oC0?^9q7Jm0Ju~GvUUy-h?(mqe?-dPuQS@V+c>9d z+Dw;eC^L1_ASV2w3v<~0kjP%%MAAJcMWXPgmT6tk%&g$`m@O`cMAIhdlXqPOqKG$J zB@N59M1K|zClPPo*e#b7GiNk=L_hQl7&C`V?%pW_NolkRb5hoT{MWOEG4kzT4(3Kn z8lzj8!oG0kKt>dEv}`=t-tbUl*YlKFyGJZZUwefSpHU&>-It1&tJa9ZQnQ(V1^uFb zleC#{FZ7t5>~H2^%^1exkGZ(ThA>U97fW(xH!==AtxT1sAvq%+FP>~Qlv%QMh(u2# zg}L`%G?N$fg1J$aYMW;7O=iNksLI;o4Pu(nqpVS2>wTKWdGQ4p~LnCS@x4_jqJ78wAu$6+_MkVYP8=q^oYIr+VA$J4e#w^ri`>_ z%oOd>_mn;P@WFn-pxvJPp2$AUkadWN2(*`VG_#MWF}D9;tKe|#=uP_oRj@x;*K6NV z0QMrexehZ_*4UrAP-A~R`hoqh37hQKPJiy0dFX_rx=o_f%_*|Zkp-!aMNUtg8XSi? zSr2{eI4R?_W7gPbPS;IF2&Ra8g&UbQ*W{o8#7d#ZC^rpB!ggJmb_H zSm(I$>u{$&zX3zGqQobF$?b24)7sXDorSV23r_YgjJ7o<>mxNCfwx}-!h|80oD_gl<)s~eB#_E(X>?0AK>oAM!~ zCKP9{I!%lZT!LQ%PwBSUSI<4LP6TZbcOLP>Cm* zk5TlEuOmyByORkQC`?{AKnz;LVOv={U1hnNMt9ruVSx+as-R~wO}FUq+b4+6?AtW_ zXExrs1kO-0UT1)R778EsIe>$i4-(oFn3onKDwB7dy zx#A<@Lsn){bxlXgw~v4z&jqk*tqOl&=P5XoCyR=aBRR#@$@tZ$9k%JOpq{SYW4RFW?}U;B}nYO3lH*!VE?Wecqe8ANu6E; zyPkff>O04Q+JFVWGIc8xzf;6-e>D^9dh6-%jVg4$`!8~1OdaZ|m_lg^sgpCu_b6C* zDxZF>mypn9S-9nU1yTOjMiRehLUpAVF52G-sh{%5D&;yleCavdENHG%b^hU`={dCI za|teXmu2TaR=~?sTw$7bCd#JuG6QAa=u>`J^z82qk{Xi&3Hn<}aIY@-sk<=A75izN z%Vfc4xB$nTnTsm3l*n1icZm~JMIBQ00aY0V)=)QGmiO4Y5zYmwR;HZg@Whl$$c8T8g~}4B@UK- z$fwn%m#DXcJkB%f#2TA}5+CfJh-(*JB@O!0kkBm+OKBONs8eTa91dWDts7>q z(gMfm2-uLq;Hn{Ou=>SXG}-c+bNiiwB`(_`F@G5;m@xz%PgsF=p2x^O11I9~$O%$k zoFL-#Ci2&D7I`}NFm8HRiO+r+pzO~uew#uec^wf9#akx9?r%RZ?*31>BV?W??NZ?8 zjg;pHRb)~7OlkH;Y8uX}upuMO;z?WRYB>1l5)lQe(&3#98JDWe|6BYLzmJyTi%&Iy z^V~lAyb$IN!_w>el`W&%WuKh8e>$m_ktl(w{gcOUCz@zjqY$f0@r#q`F|&L7`OHM zpr?5lxk)K_L%kGyXNy3^SP?HJUtnSe9Y@a@yO`Yut*GZQl(qiy4xeN;5y@hKshquz zyD0b&-ekQ7X|wm#AchjB>eX1#za3ncnsS_}1h=G9j1fBlXBQ~Y zv}6hQE&nJ)F8P8H^M|mLHd@o$8x-Nky+ioyt2z`t{l{$UI)_GchC%S;AdJ$Bgn(@g zw2=mo)jgl!mQ@p4rMcjInIU{q%K)=@SOS$A^&EzTvUJI%NwC=eBb{Jvf>T?j(#9%P zNRPWtz58yHdu4h;|C2B$Pb;GrA1dN&rSr&yeV}6VX}Er~I!=;YMwjcs(7AgGn>{fP z@>}J=dHM)$?x`e<(>Md)!mi_K-HVJ_mm8Kf7ebwFF}E!%pIQ%#Wa6glgFhz+_TE>q zbFCYuca~!NZ&~!TIfke1U4g>FkuWbzijCZJ55>QuVW^O?eLOP)76)%9b2gQuxrgwc zYHP*)hKl@#y&MGCE5VBo5d;rxcczA1Bg_wz&8 z0n_2^7vEwy_pS$X-gc5^91i-E=fZK*V{o7_S(2(FLDxl>an!V5l1=)<$*>7K;74=^ zqa6~>Ne@aTyOfus^Z^@qxj>>-`=bgUb+q$9j`^pAfvn3neEpLT_ghQCy#lR_lUtH61f;i2*i{CbC zW6qqnw9;fMcwbdOpDBMx^|zyB)siFdWcN8pIc(0)(0PP1rgI?n)n-gn|4tS@ScB8N zMvIQNo`m!tC1mQP224}+1zQbD1A@J%nc#4kO3T#k&|U zaB(tVD@>)vDvtcW#~zUQ;uo&_H-f5ul!AeL9!7V$;Lj6^P@-)~4G$S{p1X7))#oNk z*Q&GkL)Y^opFM_SIWsZS><`?~T!Q!7N5U|w3`K*?iQK<-(WgOKFsCYn*IU~KH$B5q ztwTf>eb`R5`yauj*hA?3`3Qb}wuk$7axru~P-FI4Sdh<|!VG1&pO{4T5&_)-HwAxA z$>1bhDQ-r^>|y-A?FnSn{;5!XYa~77WQgV3okWG5BFtGg>66vDkkK{`8lPyx+B*Vc zq4qp8u<-=?H3ra!{CzMuxe(u#l;EmaiRh*O8JUJA>STTh<4mex4Q(MuWX=+qfjOYE zyMxxm2NCy3bNH_|fhY=`0U7WEGV-n9=dz9=sj2E=<;MM(|2vfH8l-_T1$`i|ln&Wh&Uh$66&{9NXD;9OLmym&Ev8#w@jPi@ zF#}^;+aTzD3Hg3o#_rd#3EZp*1N1RdN8?^;l(};e>Rsh%_VEH(xaT3xz7T{*9*qKj z-v>f}s69EfS?FYWd7N-Euj%e_L-?)eE;{!tmYVAa;f{fw^xD61tWuIO9REHB@`~+o zz0OCPkspo47d>fO2MZSEZQRt9MW|pD1J@HLqHjzzaj@Nr8;-`%xO-x+URFz`AM=9Z!&P12@gV>Ol8R*+yCfrdX@yTXEGhAIo7Uh|t+<$4<{ACG79W+9N`(E-|oP_(NYRIjP zmBg=a2y3log|!z3#O}&Nc>_ahctxM!hA#@-rF~J*c&83NuKr70JzikWJ!Qyi?#Aey zeO%aZj{F8EVa6L{4r_fSh6%f&z!yaK0waFE)j+MHU#`p9IDiQgMlzF?^ZV zgF5-nbmFw@i;8L2;T=!}wpH@BQ z{#@Nk;j#*R+)+<6``z$*;&MDqTU#bXK*U(1ZJ4m8j+Wn+BLfP5Df@ml8~r(-iTQH{6YIQ5#26hc&fb9u z*P^ic>Ivq3<6C$s>rW4NOQ8R6E&kW4jIt5K;6bSHyr`AK4&%w>XZk<#-J=0Dw;K`d zo$|b1&LtYQQXWVAc?k#Wdr3{RH<(J~K`Eh@xc4^Uk^Bh|_Wm2zwkVLYH~Y~!eG#Y+ ziGnppW^hB6tw7ZaqruZuA7^dRfyli37=PCkb+Mi;77dKN~$iZiihS zT_OWsfz|GbBWqV&lMH>W%q#S!;l$(t+O(h!6wId*E7S!q4F}$*%@VXPRMH}?TzdL3 ziyp=Iz`9HmkMI6QcoKuDfgE*fC?PGyXP_x@CsxgN#J^wvae?-8VbIO(u*tIvyQ-7% z<~5Fbkt*u*;2^5=c5K|!(^O-R9{QSSK}6bEEW3Ob7Tn9llw+^KPIm|1+$+Lqr-H!$ zWfpGMUr!gzNhjaRBDt~B4wyGn1K;gC#T{QSu(G3v;?vM5nzC&ezqxA>3Gc{)E#~Xt zg;@*Al=VPV?_v<&tr0CdsSjc27SZ`rZ6R^=0GfA?;v40ph27LADkG~!9dDVFHvSoT zZa56So}GnNedZi@z!tU~Jx?D7R58P?`_VwyEk&MsMDX<=#z1NznHA{6%M=Y~GxRUf zyRA78SZYiyUwGp1!ix}lt^$IJ3UIlz0{D1%pho;KcyqZFKTg*}%M@?8T=szY9V zl%xN}1vvlsY)BMXXY-u>aoBmD@XyT9ezO+L-FBNBTwg|)4Gn;E8~@UV`6KxvAwRO^ z!ZB2DW8nVC7#h!=hpD9rnCY8LHmNr zlCvisUG-aZtF{T{l`O~}lX@B|!^7KwTD)UD6!IQj#vVO&e&&jw-0dF`5H`~gm*Fie zdgqCflr2~$_%t0VBH-Z(oM-7zPt_ziKwQ%~1FLd@?Rg`WlfrEj!=!GRY zR6qS9y?E~paq^9YeZ8`D;m|JXQ!LLvXc~+L7Iq}p{W|mJ>k#a|>q~~d{SL8me@H;S z23w-4#!H=(!I*`0-0vwPV1}L;9o&-8pBHq*oSpFQkvc589E%OvV}Qo~q_#8$M=#Q5 zf?u>?LsSOg6;k2r)`@U1TVCh}x^e$O zbz3269GpmQDakLk8h7zbM_3+UNcS7dvhqMOfX>M&XaYn9y4;nEy%s8oR^ z?Teu#{w1f|oPuU<)9}Ap%AlrEiTh}fyFqWdyPDE%fv3u z1EP%gFy2~%N7U#t_SMX!>IG^r{Ieo?T@XpGA6|(!R56z#i!~0Bjq>*(XG$`L=7!?c<9CSZpa>%J@uTq*Gcd(NoxLDe zgsykzz$_ahCiIRf-u8;aez5}I-?srn2Bwo;!QV+tVmaBXI|w%JFC}k;&cGjohvU7G zp15A}jC7aSio`U{9&hZ3KsaT2a?S_YS%gyRCI zOzv-GnZ(aVxQCPv1)26}W=Z24aB(>gV~1(;#P2S2T?wG4mv5qew~s*OSs7m4APpS; z>!*Q^BK&wDp9CI^7CGNG!B0ONQD>qXdS4GEHZ@1lJ<1ix1 zt%--mBa*&sFi}>~!`|rAWLpWL-Ks0_Y-K5(^Tvej>n}s;zN5q_D-!I=r@@rHPwBMU z3%Ipanr+v=iz#bAV&>Z2INIMX3E_ zE@^sl1pRR>?rATEnRN-IaQ8%Z)SgOAJhlcF`PZOD^GOuH?B}k2tR`Q4&fu$jRXX-r zE>U#Sr@AikY*LB^w9V7Rs987Z-l#1gduuB`RAzPftu=0!Oq&pqXgiaQ`g`Ay>`g)vfr@=Z@jDSx&VsHu7 zf*gSzsJE?%1{bE&rX@d7)j9?&caO*6Pd|~;qXK8FYzKzC^C$cBK9gGt`SkmjOp+pK zL+{+;$ar%hmn#W@Ht(-E+gY0wY;eX?A(~LC;!ez{6v>7~7>dcF}Scj0vu7}o~GrNQ=7C3c)nT|K1Y9~)qTpqRpgOhaEsO)XeNWE zI1!bARQk6@i?{`SCfZ*H3;S?^Gu@{ps3-CGT)m!?O0|YdYWJ>3}w#y7mfVvA=%)=i;@n7G1TxQ%vOHGae z+totmoYsUIak}WCLM!}hHo&`M?%=NFBIeEsY1Y&1FX=Hah0h+jbe;EMOq*En|4u%T z>F~e?9gt8{BxS@oTF&$!bT`<+c02q-Pt2wv?3)dYbgKW={m&|31d|?Pz zboQZVxCeaH+J$q{7h>1lG`QoU11{6-VBEJ$*l_A68M$2OrShpH%l}G&L7jyBwlab` zb`osLD#oMU-RNn#jXwD`9exEA(>rVB;F(!Ib(=Pvjt?s0Ce|$^Hw4dISnOTOCTjD4 z*K+VacO^a_P_}b@yc0QxWRUxO5Jnkk!5j5_Y9r-Fj#(H|x$bS`#j_qJd9D*#S*;K8 zW2TZ_O;<&~|3s7V31zPdwD(=Y?yn3s@q4eV$_{Qo#h>BeZ z8~?tB%bk_zDeGc)K&Kup{fCiZBkbX@@eRgn#2M-ne-;K+G?43MGsK&$-GqJe3ud}f zJsq*7mhAg49dv&#pmjg*;MCMXP_HZjucRUpy+j51ge-Vf*TH>D+bpmZj3E5uW9-bB z0sdN9qBB2*&fLfuaAkKZ{e6Ux$1emX{@w5N9(#q}7xIQ7F9DpaDu~9TNn}sDJg7Wc zNMqESNZnQ`{G2ES8KZBLnD)0sL&gAKhE5|3w`y~`MpmNoCqeL0F^-OVB7?cJC{?Uf zK}Po-9a?gZ9Bdy9!EN`bb8rB;_{)Tm7vJdRDRl*@vJ>=7cY*=N&6G}8NDR=_j7#am<=gnk}U%5@PI$Nm9`ngo$ zkQcrhPv}S8e7rJDid~*tO^>b`geN^B$%Qrod(BnIz^XKS8>9&aKQ6*Wynv75s%+b{ zyP2A+nZ$+ zPVXDXo1!fxDH=49)li-LI#E_6p(U~tN+ltyMMWAa5-p{rNJARWeVxWP6-p_}NQ#i` zkO;r?`uzpZ>v>+!xzBZ9pU>z0*@wx;&e4>8#|8VI->3HdkErF&OtPzNGTqf1MLQH! zQBJf+aO71oJ?~LU^2~zCeyu<7Xz(@Fj$WN4!X7n@g{%r`@EdE1 zJKC05j5v?Q8lT%FbWS|>Zkmgq&Y!Jv89s+^zn;djd3R{}t^2THb3cp@>EknxQ6_14 z0eVT_A=`>xk>0dI;+lUzP@T&P4j=iu!!JNoWST%JGM>6iCDB_AVO7_U zWRRUk8dT0*4p#lHp`ls@xS_j&lw1*p_+`5EsnTqs!oSltMFo%v zRt=av8p~-Nhydbo2om$^$zER>612Jq?NdI`-ka~MX1{q&vl>R3#g?Kpd6o)vySk9c zEgTK{D+$J_C(v?AAlVWBg1i@#$1iis&0Bwmf<#3Y^>VibE0q~g{_rP#wtT6@%1u-7 zvB7@WyetL1mpS6}vpdM-5=vrZN|<7uSg^MACdYfXkfq}!>C&5X@qP3;3d)n2f+g$V zx<&+6O*0VOGjXT#&Y#JUO&LWgLs)UjMX>4fOh_rdL`{v0Y2(u!f>(EU^0U%ua#DRe zzn4@)kMYN=tkopgkAVlF=;0IElXD&SixyLslK_M9+o;;GExBFYP2&r4NKSna^~q3& zU3&&d`m0b9Q9q4JHX7l{m4|UkR409Lbq?lz_#*Il-37Y>z6chZJwZ8DU8d2vL0~r~ zf=I4b;5x?%;89pGxD?C;HUCj&;Mys2yzdI$O_>MOM-Rnyq~W_`FKR@)Gn!#z$3H4Oh=_gfo{Ydo0SCG0rrfk|| zGxDs(08a=h65($OAltqh&7xM|th2YVNVfzYJY66#{r-g>5}tq^Dkt&K*58ouxfH|K zD?@y<5ybKytgetrte{Jfl{RPDP6#x{gKNIkVu#_7I%^`y{B|;7~S4 z7u*f{!S>M|3$~~oMaO@j`?SZiqS-f?|Dq=ISxf=U_~?e+pLF56^c>7MD9_yr2&4sNogF06Ucd0V)wYr{-*!QT&kl>NZ*SA5FNZKSZ#-Nm z90yNjuQ4;93nN<^LbhFQBAN0PU^3}HoPKOP?rn`g?H{4wu_79FZkhx$O)iti%AWK< z&snN=`zI-8>ab$+6g<860WJ>DfOUs8f%Q*7bxOdKh{3;wP2?##fj*iN^e?*ys!cc1 z@W;bsPtr8V=e()^!B^<|W-cUO=XJ-DO|ZwjuWI4mYJ9xX6uWMQf%Ut2P%}3IGGEy- zN7`>wk!|a7y5Bs|yzfT5N(pKly@sZ>OG%#8Fu9>B&$G~#(BZ%hsM%zIri%xl#kB+J zP#H+9G=wit7&7d99^<~3!uIW%Xt+U$_y6ak4>60siu3B#=yHfmS}2s z03~Pi;F51iVBk4K-WV|?Fz+<{6`X=s*Y%*i&x!gE2yx+((p=5csoWBqdfYny1>V{5 z4{Wc;K%e+WU_-^t|9+1F|8voJZ>9#Et{lVkyqpKDnH@eobPDAi0&&!A22-WhjPnc2 znTX|Xbk=V-!3C*A z&doBC26&d^KaX^xcgqp3mwuq@n)EQuTbFI@zXfeHl-RE*!ikSFana}?Exq&4e8H;4 z^rPv0cpDvu&5f(EH&Kc0$APN54vH|Z;70l3DVGE_>o}Y&p9E1YIh2b;aZQ8H8>GK0KmI{%t$oi#v?ERn*kjeL)n4IcZG;+x#ZP@LAW$N1lByfj}FW@Lf&Daxvj1r?7DvUt&DM&5b?dr6+I?c|N-N=9#EzK8;y+;}|rnhT)%i#yGEeEpGeh0RKIV!inh_pme_uhGo}) z(5g(VFHJ|`A2skrJ_Ayw-$UgeJp26U1m>q?H2#W}U?&VcCsTu7;VOMood1;fDwL?O z^*`IuvGx)^n(~mI6!$<)Q5o)73&&?zj!~Dy$HZ^dQrMEZ7xw=ii?1g(GPzY6DC*Tf zeWLk@p=J+uE0>|DTne3%a|icMj>FwX9C;&^gYp;o%-v}L{UtAgHhZJc?yMH5gf>w%J4z(Gg5cWn z^AI(3p4Y&KpyKE|sI6R#2d$9oZJJ9i>&G(@T9kyY)ZrZL9jVBN3M`G8k8ipAIJ7bf z3Pi@im=~TTJkS)}PetHI-2+(tDwO_SxeCt6C!xslNc4Zl=YBTD5dDt%*qC?}mOFXj z>2nbfx_k$&70Mxob6u-uPnX1?p%FTB(s)R`rGp0#v@wIqN0G;(fM3mI!H(EG$nwy_ zF`gT7_c=Evvi~3$)Z|f}a1&VgIS-;m7Qw}^%Xm2?7X zi}#`hZGBCI3=iW{g%CWO_JiKwpMz)PYsjOK8|eGzI;QP+0pT<%DCvBIAz3C+n;t=J zR~bOJST3q}iL=(Bw{iJpTiheOoz(bN_JxCy16!?AO=1~0AGJ)MX z$ahBQl+b{%5K<9+2`=sNL5qhhTN#)&qv=WXG|UDqGbIi__<>PCJ8bl=B|Z*XT=a2yxbJ^%l=6$Pv#X0ev@e0yMVf;*T9^;A0^JL;7*>=T{VN7!yGq zR~)3v=Uv98R!j2kgCRTj+5@UsV@THClmxww48gpf%kYP>;EGEksdbtV=TWML?l;uw zd@9UYugIW&u`-;=)#r58{zfozIK&>@(u0#CZ{w?jAHa2cKGj(l3T?Ig@2@P+mMT_~ zOXp0v=$G5MKeIdVSuoQ4g-7wirYkU16ASx~=VSD{Ev&VJ7gr*dMbf74#aF)LU|otI z%=8pNW4@E^m68Mw@7@kywdX?f^h@aS@d(I_slrDWc?*EM6UzOVPb)gpAo9?1GC@a( z@5nG_i!)@*UH@&3$r z81X8c6sV@*i-D)meaxJkj=u-o`4JLQI3E`UpBKdc{l|Nl6H)5rDUy1(g^V`7z&1Z) zYJXc9R!9r6;WP5_O~P-=r@qK8UrVxe+Z#~X)Pba_9Sh#vhDY0WqW#*(JnP08cWSCw zyr)q>R|3Awkw>dwNiYv<#zUL}+ocu+My5GT@z0M{(I3jN@O}xEmyo2zCW>VG`!nDg zZ-vsPiD0v+7duwVAbp`r{iBm1HS?<=T=p(~nRx>ztcb$#$28c1^-ef2U!42d^NX2M z&I8|!t=PBo%7(#)OrklJ1+Y3wCx1ol(rf4pF-JxH3fM)|}2)Bdg-b(0C zsH4Otn6X!AfRt7hPJGCXd39Z%>+GHhk|EDvP467McR3vme=t4!{9eO z3^;*pFO<1o?@TyRZ81)0q6p`>)tP&J$Az_v)wIZd-%rL4UZQtgJs@6VF|KI;P3(Us z!^yM!+1{`avnBVy;*_tbbkZ4T{g;fnolP{Dze6@mN+sjVbm@VtSLn{?A>3jvA@@F# zZE{%Dzc{@C^6INAuxyO9+M?`bpC&Pr`X+ zYi?cmSaxUHA-et{$9s;~a*vzuVMoAY2uV4GcTcIX5A#pZ6Wb%Qa%qc5Amq{f~U1egnRy{q!+nxwC&x8?wuu}+E1*jGp11dru}gyKPF z`cAH{^$(0c<-$Ffk_A&Wo!KL-8ROu;m7c2j1K&%x;*$b7PVZd>+>QQ6G`hP|K6YhfbXivuxG-003(9_@qt zfp)0wmE!JPZN^u;cg`&#g8Lxj%l>VZ;Oygd*qU;8@Y?Vk+Y>Z7pIlq|cV(BL>dqOu zz1oa^73rYMb<-iieHA;WG8Vi~$b+Ya8oOzYChiM4k4;u4oWutoh-mZ0^40Ij>xymo z<8v@vdifHxew3o5+*E8A9e~p96h{uep^LHk z-;OuF{0%gt8={-YX z++(ZDnpmsB@ahHlllrjXWs5oSF{v1$dkLqwchEUclu_@&Qz%y~AkUi)Q`^3|5E6Tx z?)e>s>3a95<}(8l92Ux)uiSz5QNiGmvLAvqJ+WM5CydPMMYXtI!HzIDyfKRbrgD)gcdj_+wh+q;LkhUA5`Uc3y9s^Ooh$ifia2sk z3{9&((MwMh+4;ixxcPP(9xK&_Zr>0xGHDU#IrK=7__+Yr^{oYMZE1-8cM0{=$3g4n zFj)G0As$gk!^KXQiGxilh)TYL{P{}U&$B#}XY(BFm@@^RCeOi|pv0X`#3@ z=^2iT>7?~@G+CLh1ky8Jo>(Wka^0h!8R0Q4e4qGCyc4I%Djf}n+V5Jt&(xCb7%M{Z zU2daYPAg2^uLu80_JN$;d$LDyBca#y$$oY$x2%38m(r3E9ZQ&$gr{^A)BtlF5yP%r=-liAH5CwkK?er z`~ja85u!h&wXjrQlui6*NLqI<0M+D)VCZRp-E;~XzsbQ(SNGwoZgtu$pGO}6uUq-1 z!G8H@xP9^?Ryb&5iS7FxQUaI`cm*0~=#~3%BJDU6pGg7Lt zsrnq9I{7+C_8Nod)gqb}dK=!S`JheGVKeQyM{wg_F?PcJ1@O38A53-j;MMgjcrRxN z8MV4h{7-MiyS#@wY+)-tPqR4Wq7hLw|7-*1A22Z@M96;w6X|QV_=aS+cWv-wy42~Tf z6+}I*TP^{W9j|D^kQ-W;@*1Qd z1(I#10bl*cY!#LVHfQ|%_Zd2UuZE1sb5!b?9(Uem99Ix`9}Y@*6GbC` zTJ!uHk;OSo*M|!*%;$Fug!IWtu7XH=#NkFQaW>@hL$EwlPCX1W;P{1prno8)jlMoY z6*);RWBLIqW)uUjT^517s{lJp8$dZ!m}gO&a^vzY<8cQhA8bP@vwRO~)lS20%P(R1 zqi`x?$I(?TlqNmRft%ykL65j3oAt*EUJrePs4<72FKQAp)-$-r&kJ$&#m$@$e>YgT zN|GCv4+p=2W^zL$89RT(!|K0}=+$kCU`Kc3p1nD+UA-DR(sxxYeRmEF-p@t%<{on9 zy+7yqRD|r8wIMskIWaT;YJus-LUex`2NzRE>8AQyuzSsns@AMxeAXk)5%C`SZI(P5 zy-r{R%9EJGQ@h~bp$bU5d5$c)`j+(RzJjvgMpVvR4I3@jBAQuYzboS4lhyEH>_$O_ zeKfXZ%j4?lZ-~)5W85L74%_pR$+U4ix9rO`L3Gju)NyzUNoOw7^%decYHd!*HE)a^ zwkOiAKIotp2_H6QVb{o0@^Q`?Xj;1&a#|&DeU}YB6)RvGcHSmq_AS7S=*wi)qA{%7 z8y`Ab&jPCE$f05EJ`7#;1a@2*z(=yZ*s#eBF26pD726YViuFXe6!?~y&T51u?g}9J zwV&LqWzlonWTIww5|2miAdXxjmb{1)99rRm4)fI^>*67@Z=@MHn<-?kb~&Ua>R>VZ zm2sD?#mevU+}^TLC>EC>!nwTGC@ujWL5Wy-BNAtqW>FQ#UxJD}AFMg%2g^(1=(^~$ zFlY7^_#LE*nKR<>WU~{d{`rrb+xHxeJfol{Wjee}ih!qa7X`kD9|%6}Y6t7wt2C3} zp9KrX;z9KSqS`6W?Xr~MwYxpUXJH2{(r&^bV?GBpdn2m68pjsKXoGj>3ugavLtL^i z7~?gTf!)L)bUCJn%O4&fr@F#n^iPgOnVS|2+ZN;S`d%Sx!FIUV(n?># zMzkx6hMzD(_TBr3_1<$qi{E$8;?I^K%L$zGx*l>uvJP24OD>b|)TS0)(0+R#+P6<+ zziJiJOeaZr-aW`X?7fC6swt?py@P+ZTqCPgFVU~EjaVkw2iN+hfJ6RMqV4?!mX~Kj zTa2wmYT-(Y2Ul;BJH^t}Z@m}PgnlK>qdll+#Cyj^643hZB+!WvpwsgU0&N9jWF@lr zdw?QaH{uR%>b&1ms~qq0xrW67m++`kqG~lt>!8_IUG7(jyXBC5HsUBw%vOVR*SmHLf@l=j7HG6*Q2YP zyIM$<#1071Pp1Fz&)kw*Ks$K8Qq^KP&dK;2h=jf(0Y^rtTg@eqcySZ5Cv^$zJ>Lqv zFFi)#_x$rr1}odnMB#z=anyg^Sn}zX3p_YHLdQEbA?yf%;Cdi016KOLY|Ka|1mp+D{ztHyT;im(IU$AMZS zk|>LvP>^sC_3s`aZ?)EAQ= zR|Q8y^)(Uh;qpju%n(AYSNy(c;zb%CoJ!VgU4<=^>Ot(#Kiq2Z3B%r;hx&nL+LWNd zZ9FUnoo>sqLhc--9?(zmUbc6iJ|%jb^rA7}KG+qjwT;Ggh_3X<#bTu20{d`0Hd`ExvbI}kI^oPfYHDG+9)LN6EYhd1G$82yb+kTKVS ztCR<{`Z*6pp1-s3)|970syXzz)oi}&p_8U-J|QWSld=5HS6Fi7BxIP~r+(Q%I9Z8+ z(XE~2k_nRkyadGPb2^ImB}1lg436yoWnunoEqLyGK*KMDfpN-gdNsHTHg1?{u{-!A zS=4YEH{W%oi+$xd{gf%_Up!e*(4R}&-c&DL5V)jeX zeKW1`P{=%xvcC+QJDN#svjaJ-9gF3YMZjfB9jWE_Ghd80LMl5S!-CF0`Z*gqe}OJE z`@bM{%Vh*NgAc*(F&9zsLyI8o+Ep6-RF$h1J7%%~sS^Du7GLrg-=9S4MH020gvXl+W9mTIBzC znS|uk;@^~wbl>P+yfANcit%|882Yud5@S@xdvIj^C^Al^o$18 z%_EhcS_Ebnu8=e4D{$Y+RI-1+5_C?LgJZtkG?9PDn%$gA_F77Sf8|<|Gk*>`SDis+ za@|7rO%31gev>v}Cb4c^LVk!?g6QuvRIPY2&esj5&mXM7 zr&S?%?6eks-gu0RXRnZrT`CZJ(*-A+G6Pnuc}>qQZ6kBjZ3U+fwvuJZ@A>b(1%m`O zkaqhAU29Qf@#BOh2FQz$V6m%okCQf7ms;V5m2ZjNt~h$PK%Z`3lf?LF7vqq05Rvt$ zgv&9J$bctQ1nk2GUgt@=ri!1RJu)je`i6$BEQJ+r7h&NpDH6SD9CuJz8Yl8T-M{yR zad=k-=np)n+n5L}(NSgv61H$D{0w+fJN$Wm0$e;b1~nH~nfuObAf3u*$)%*(FH)=ZD`=#l z0)`#9M%O5u2a7Y|X#Afon2acr0YwukqvZ%61M+oG;=2Jlfb8&O_DZ!HAc^JCr z6iv*w6MX9{gJ~LrRY@ysp*V3ZHqY_F?}lq(Y*jVYakweilA(>OPF_I0(qQ5orv{x) zg>;EfHM9QQ36R~azzs~E$iAsbC*kF3xWQEtqvRbd?k39N9UU8dpt%I><=^tTs%H9Q z`)R1oRmH*A51?>ma+T!0$pU581ac-g(KZ?Y`3fA?^V}1CCW0Pkax65@JjAtO9L;;) ziB<~sVA=AL6t{688yp17JY0#Xk2m(8iKpMBw745B;jn4$3XFVq1yrpRvHHXni=#!x zjApPYDxa{im@B0VTbgz8xY{4yf8-C*Oaw$G8N=_~$D~2P|BnJrkhmiprd7nDUEeL- zx_dVbS-b?U^D|S3f*82iXwka!x#WtbFSO5%=k?69q@+!h3*$Rl9(QTd1#kC2+3~Rg zk*5xD{F?+cNxI?!rBtxwys&YL5Et_z7j2ryL79CVHP!x)mIPgflDaMAR-Y>7uovk_ zRt%Io>;;K{5}GG2hM>r67|+LHbd3%iIKCDBs>$Q1`Y#x>IiDC6hSL%4Sh(9+EkH?c z`X3_(6KiJRDt~3_9-~h89&^XJ@;3OxP!8TmIzdwC0CxY%1F~HbJtF^+XSMeb1|;b+ zp2@URtqPC4o(oE^7YZ8s6KI8B4EV@4(EFB^Wc~FjniF%d>gi%rOf20CEpq=z6q!k; z>ZOwDy>;|J#%m_`cpM&Ow$t^m6JWu|!!$&w3g1RVkXOqrp}x8hetlkzU6LZ$c(I1s z4u7m#djAZzUBya;xd}wcHIs}C_R;RPCc4JFo774R!^>~^^ocLub-X==tal@*Hd&UN zlzoN%2;_4+=IX>nDvI{ehS=|#S_;>2l3isL(Y+y!|O2zU~9Docf`0D&&;)? zv#toBRe2FPb0iHsc4Wf9G(UKja*{;PtVQRIUx?$(WH34+16Lv%@#4V-A|sWCweFW; zKurZprdm)1J5yNDJsm&%38#;*%hNaF+GOmVv-DxVJ}Ft0O=i9urV}sEg@(YLL|;6e z3|Ne?Xvk-+91g&Nd`%eD-JqxbgKjThEI23LfeVK~4iUrhIbc&$=v=n~1sYmDjv?gj*Z{eJiI9w35f*V1zN#&wEG>jC-lnCTm zP+@3N_Jl+pZ^B&>i69rc0xtyrg648uP3Fzhb`_nL4Wg4DdHWTM` zPk`wQnn}_3K2o^E2K{Dj1exbT;N!mn(IRZ`+GF9LTV$R&Jwjj< zRS27v2Cye+94wM=7Q9}29YjuSL)S-E^nr6Lc--YRl2u_iF4Bz|9 z_BQ-y9z<8jj5S+es*dVTYoIA$1IB;v$9gsq{uz(MA0npcNX6*2f?{aBX^*>3OhoSs z;%vo7J^0#ID$pwKfZe|an74^tw0lQ1u6eNqoln$LheZlxEi8b&-=@IL;9&gsGZJsc zq(NKxH;V#;SLCQ?4K*&4r2cBv@FuJVQ!@fFQHn#uZ^vPa!dJ!Z?t2}E2x zlP3T9N-j*$;l^~f!$;Lm=u^Ahjb`qu5-_%NX91N~atD4sn zUX@rfS0J+@0H<58VS{I6p!MmW>>-5fDxaO`C&&aUE?PiHYVe}KE9hS%VC7Y|7?aza@at~4cc9lF^ z^9$uq4e$$P=@`sc)+&*Gz$4G7*yJ8epO+@A(Rw zg_@CHsKxh-M5}Nb9@}M!W7C5%_vSP#xpV_{KMRpySyfiA?wMfPnjqXDmJAp5OUaVL zKa5IwBtD$jM&GtCz&Vc2@K*m470B;^2qQC6TbYB0N;O#Nx)tcA(SSlu{`e&^3}4t4 zgYgnI%(+YBiOs?dU}1%@;5}vI~b-e8aV=94>zPlIAJQ;~Dc)z~kzD z%zHBp-X$CX$uHjUv*BJEkL69MS{auC%5!pLTUIJ@-1FdFb@g?sN)nzO!& z*YI|usQ5{`B+{Joo1?_89=HMKv-saSC<5f28tHKfS2B6tIy&5tgGcxt$2&c;TwAFl z4dyuqXNFy1{r%@SJL2!HYfL^>49umypr-heM#4%QWF0+9?nGhgO|Q2d3|$~ zyilJCqorFpu?h_`Upt6Q()d7{7fys`BTL+y^OktK%|mr5T{N_MOAU?R3Pw?kRcWy& zl`FSU-CgB^p0*+8=R$_@->-;Q>{B6ZMg_`$kHHhmPJ)__k6@3I99=!6FUa|F99NW2 zBJ3JP_TIL3EWG%dXkT|lZ<$Ei>phNrJURyyPR&5aw|B|*&V}@(bP8twu&r(O68@-%lgp2s*sKjiovwoQZXup!U}LDyRKxNldU3cr6To?`64k zL*OnD*5tjH7wsAKnF&l$RT`8`noJCR=R?EN4m7fEg(}n7Rn^vnb{yoj zzWPA(qf1T_Q`;9bXbjI|oj;xkge{35?_XSTkuA8o&+5IaSe?c{-djht*}Y-DtZ3+E==;OW3sciGClj_h@o)- z7Iem8Y^)wx)GCUz=QiO=VR@XSah3!9bYF`~B^CrNAq&4nk)<29*_>34hL z6d?`5LbKt*D}D4e@W!6bSzPz=pVapEAdPn7a}sr@EynTtCB2g{h>G4a8m1NcYH8A`>7gnAA6mCY>#Z)_QCFGd3SIJRL#0 zR*sud?SzA^j=1!{Q$+dHL#A)}RM31{fv>_CaJ~PY$fm5|ZaibyX`=;n!mUasBCZG$ zTlsl(=3!Lcl?0BVK6L+*Cny`h^G?JfD-(>?!tWb#RBB|s!2G^E>WQe(!=K8CgG(YY z7}tiuF@5xKeKnjWZq)y2s=(~PO}aO9I=VQoMEAOQ>MME@JthfpmT4+*J5`tXsV`w2 zb#8%;<`w)_ngaJ${;0BEIUjSAmtlcTAT;K^C&67wq0`#_y32Zx!Y)ZktV#e$~+}=ft?wK{s}1^S;ajzm^o#bCwL6;(@zXJspvvKd};;M)VQ5bh*2fdnJ zLw9>>gIx;6?2INN9bV5o*fJ4ZK2Crr$5JvRX^#poC*Xv0S@_IqvY^4@F8-6g0$ZSk z7++dQw?-BSCKT3Hp|Kv$&pL;jnra2nY9)jYDZ$m#wwQ2y5uN|o4)sO#uoY|{benDdcThE ziVU+@{3i;tCM2Lzmn^D^ECXdB1GebhL{9GZOscr_G1=8_N-ZvnR(=1bfi=<#z_E3N z>Xtuc`lm$W&Xgs*FC?Axuh~Vi6qc}p@@brXfiqOZ2BP-;D8V!hd9)l!qL2Pv!@NHg zpl17s$Y^n(yYdj{{q_ae9!$h`y^CbRg%41;ssJUeys;?d8+5s=a%qRt;nuqb8t&2q zS>k{3hmsLzYOToMZz(h1BbDu2`-a(KvWiL$Dzb*159s{ZZO}P~5oDyErl-V?VfRNH zY)G!5=bir}r^FUut_$ChQG6TXA1l)VwH$efV_M#i|t% ze7ESlW$gWRpGfyNUN4hq#+P&K(fe5-(|hSS%#%vTqJvjKC-^g7e=z}5XacFZ{saDQ z&V&i}<2en_gTx@QiOe6cU_`3BET-|E)F)piV90$3ko|oQ4J`MNxuYgTslFG_RNAw- zbC%$*Rg>`BT5s^mYQoqqH4u9}k^5kI0$-mW5(HNM#sJG9$QCXW>>B$Y?r{x)Q=2|e z*GY5eSxY`Io34c?uOMhiMw4GDGufmUQTTFOJM#qZQs|jPk7;#M=gX$t17;N}Yw$S( zBTE`_YYc1N;g5sIt-*8gdaz5Zr)3@w$()1v7zN#esY_ndsSbU(t1OQSeR&Jk<1$e= zF^4^5=t?X0eZVKzSa9UCA2*X?@PM-dd$fE7x6P=LD5YOTN8eO9vp|;{vYr8M6QAR@ z?rIq4w+i<1o}r_o?Sg$~M6&8I@sm;El1BMr;=-+< zEWe)%$$c#dU3HUQ%$|h4vroc4qhgrIGZK>2ywOi~3w<#)nLggL6Xn;FV$`^+ zcsX}AoR?5V8G8qCTO$kHu05Fcl_w@9pQ;koc!bxR`|000TO5D(J6Nw>g?Bo~!&JqW z#B*u}Zl}7eh;trpjl7Nr!I!=^E5yP1NledoA&~!bo8Z)!)BsP@gU+{zZn-t-JY5Tl zg>?{rawhlQJey|EJ&Ue(b3mf^BDxsGQtwCOxE7teFtYayI{y6*VK#EOuSXsxJj>j_q3FNu*tl31?nmy$3I$QNQm7Vp zq??luu65MNTpEO0Dlu;6N4lc(CQhn73meUwLAU^j_3}+H7&Q@QpE2e-whMvcxNkI0 zBoD`gby9KNUgl-vSMun{Ep#gqg86?-Fte)#y1w|)rh?aKJpTlw8uu}KAMYpQZ>kd` zpL$w#R|TpXyU2adKwMx_1I%@M+*{4k)hD0QqTXo&@5Ci+vcW=JCY()vXlIkurm^gm z)C7_^=*PW~HimA^LF|*t!+M_8(%v2j@edv{9Uq>M!=)*h^e736W^LdnNHZFBY=m0q z$nt%7-k`8kgKhP^MHlm(So+)buxq>k2l%sj%w+>M$T_`orO;Bkt}L5*{|@n3&?Fqt z?I+8R_K=Hw7oE867})o*2w$l7(^7pQaDHY$N0QaqV^K1kpSB1@YP~|MS-c1h>g9Fba@e<_7mHV&#qO3&I&WbH`Ddp^<<{sy?U@>gH#DQa*SkPZegl>M=a0cg zPH^apHsrHPXdwEK#2n2hqq$LJaQRQi~vFhJE z(&%*nmIg|}f)C>%87301H%>4wsDSo|EWuh^15y{(iN_Mp;;lxcQZsIXj#4z9QP_YH z+U5A#NtfSyW`a+#5LzWnh2TG1`Fw#PnHd<4lS)INbx$C5OBp6xOkC;V%w)KA%>qN~ zZ;@1`iCFsSHBsAX&irgq6Pzj;BBzabhS*w$o@+g05$bJ>-yQpqw2wpW$j9`%wWxG)Y&j6MViMjSGLgr8|Et6xjG=;jbIK=VgTv zUUj}eTI+S$0Oj4(J7^0^%_v6m^p%3ArVDY>WwDk;bH-|)q&Jty&)@ieB?B@2i9-%v4?T8Ps9NFCumuILVD zSJ}vL2d7ukrD@7|P39?H);x(xDn-0^_YjQqrtm(=NU*Hu^|9@VXx;r9=O)JD0K?)! zR*~R)J_EaAA$lLb2S&TjLwTg|&}93L9>fQO|HyDg7nMk^fNvfV zn3;9}#`reS)^IKIO!_VHm8>+Ey0{3CUcik_2s;~;>Fdm57*a(hVRJ00hp7g*vIw_^g%iDx=}`PkmRVQ25Qd7D!_Pr+F8y8&%o{raeXTE`$Gse? z8g&^oXATPPOI45~r`0JF+`_D@zXWeDBw@BrBpt_Vrs)bXjACc9Uzhc~%{iy#<3C>74aNCC)1(7oru{N?&m^*e7TKP@E@{tn3 zwq2*e%U2ZUeEdf~%zp=qw-jUlk0Cm_NSij!$^s2(L;9gqncn%cAI>S=WxjR1Wqd>u zVd@Qe&TCkO*xc!ah(Gn%QgNBkh4Zn#*N(WatgXs(4917YJHRu4H@=p$Wybb>VyeBi zVgIb{{9Kj5Yv&h1MkpIM7mP!p-|29WpUZu0&(Luz0$@;2gUHpF)Y&B*r2oa?fY&m77~{-q;T0f$ zPn(YadyrJTcu!L%TBCfAfRqMJ$ArLi(AIm+XKlOT#maFswQCdNs%vPbY=ZtvmZ6I! z@5{ck45w|JNlJr7xGQ_ZVd<?`8RCdnKN5h=c>izSR6= zGpShmf>^Jp!6{PdFu$CD(j*;jRP7BMn|d8{n{4pCZ97<=cq}O0da?4s_=mSu;_EJV4r;*Iey`fMeO7{s@IxQwfy}7a&1rsTq1vfvhx;P z+x`P`&S?XG-hiU=7*2S58Ok0}<+h(R!8JY7__EBH)YB_;B?SDhw>N>yse1y)mCDo3 z(}J|19jTPod*&(1l06YZqM}`!$kv9kBq}6pD*G-{_snA}k$uUMHG3j!cKy!tdcEIz z-@@MCHWq!n3oSx4&~9i3#zwI`O42D{ zyU-C|?C1xJv+vS3M;3zBL1x!2w}9m5CYXEuCdvMM9CGhPLF&+c^zPA}sG92zy8@kY z%7=W+pRfh?9?oVpM<$|$z7*PQutT*A=ke~q4MVu_baX2m>?F2=0CegRxcq?W_SSamX7w$yvg;yp;GqQ# z>Y53IHfGW}DPgRB&qq4*>>_x1g(vSmydyOK8orI%4D*)Hpu=1|(X;*mt$%1n9ttmm z>*gMqAX7u9vV8Ke#Rgtq4ghoS3atFje4ph{(5Pc96?GBfNs}rJe%uiPvWiIe`i`)? zb_KpaFE38fu7?5nzCc&V;@B4gY%_Qt^9k&y$Bl+z!B_#hFL?3p#5k)>Y^i!l@qP-{gFPH-6DsSdGj#l$|L+9tSq{? zK!N6*9R>Yv9iWT3*7#Ua27O{!4C>r;^|HM|I{zGbugiW=z9dlj*-zCeW=axrFv9gccv0*>qMqwNT7`klK*SFDc# zgLxWslizAw?jtMO`=LMaxWQtgTbGiqpK>7SL>tW6Y>XyF%i;2jt$c<|J*vsEnCq{b zXcwbQe%tNcR6{x$>h5{t>Cp+KsEr%WJU53?6;A$|-T1Ip@R z%3tUTf`xGy(mS5k8196PLBqu_Q%=FSr6Xwl?Cp4U{tX)E@Rl4enFO(0_miY;6G+bi zQlND6W^qcZB8+!LFI1L=I&ST{o=8dm@a`aSxMw%*gksT zTn*S|w8wVx#aNhh0W0UPf?1tEkV8{|NOzb7T4O}W`|hM!Ge3}vq8!{Pm_%Ll{lUA< z5vuuVEA2A#BDnbmVf#ntan6p8B9qP;7#|;pTfrYy^3LLgy#}aN%tEC{w1MYCJHeKV z_rdDIWfGit0^1fu;EnFb=^1TA^Ag012W`Ro#5OvpMoQ!-eV=?ivYD*x7!Cfb%`nI< z0{5r25Pg`TBF-zxM1>FQ;A2KN-rVCV_9?tgN)El{ZY$(~jBgBBjUG+A|?YJUxiZ?XIYY+ z>V{w}A1rFI?Od^%S6^{As~qT-xDq@HPH_r%)kG#UjEVbrQ@j|$&I`Q0fftL{K%UoO z+!NLg#snXC)SKx_4nB{7u*b1DcE~fxdZZ-YJ$fFt854#h^_Rj)-EgYZY7MHy<>0dE zs$!}4oya4-}!P7-mh4V1+ zNjc8YW#>Xqwi4Z3oa5-WULD)*J&U93I-={ud^nXGE&lxdBuu}UMJ}@S|GfNCEHWI8 zhgRD|M&w&+HgO)jD__eE+!;qR=RP6nx)H?RsUv*9e+Ea+O(y4OL=xQ=3#jLr0o?HM zLa-m<3C53}a3)f#aK!vN++bD+En=lPqg{H?ZN)e;WVa27XU?YDQN_iR?O5)vr?up` z&I3oSMONg(u~f(%noONO%m<5eseHH37l(iWZGe2sq=sv|OHb>ntgdEpfqYh3QGL$f>&(rwpD zVJzBVsog@h>i5A|@pD>R%FajCEr;|mXW=fRzf<8_91{7GROfxiLGY4msTN86R5#*B z+g&)N#DwhVVh&a>?V+IUa(uaS3RKOHfNs@Qv^=DiT-A!^xWW{^=h)_@N%l3okDbK*1?Rqme@Ysh3Oi* zscdaIi>+ygKCyermuN$-i@{)AB@;}Km?Uxjm9wcy_F8VD>udVz<$GGtuOgC!KgGZ9!ghf04(jFI@TRE2Ij^4-Z(%j7@4H^i#xBKHG=NM92 zw2ABH)|dE2t-+&Sp0KD-898`UNFP^6pvP5NI{ZynvVLPIf3D}gVqHHEXuG6@E=~PP z96X(h+rSl?8d*)w_PtDY4k@Cc-R>9bJ!}WJ<|JTE^gQe~(*dXT>;z^#2g5!2WKQ)< z3MpOuzPNP%8v4Cz2A^AEk2y|8;OnyvZiLRI(;qA&=URUwAJW4#OSYD+zd*NDw3$Dp5aI}w`hv!!Y;@0{Hh~Jk#iXOgnhqSe~2A$bb5c{Ay@=a|h`Z|wJ54lV$m1E%6z=bq$TV%1NMklNs#){sCbV2K?1iH@V7ENBil>gAL zn#xQeFyeYHG41DvaYBy2r!fIOwcJg7N?macDdg(AFT-?0jy`R33N*D>(`7SOl1>iP0 zO;I1KMKI2l#q7=U;e|`w*qQaMu;a>G>eoSzpR1bBYUC)2>vp#1%AEESN8vH>h^yt# zo>wSNW3^)zI)!7g_cToF)eDW24T#b`c~+w(lhXSv|HaH?XvGgAqkCt&6o&Au# zwu5-;;rGz@V@q;K(+1iFh2TD)n@|&e4GisCiHEnnitftm`DwYQsNE#Wo!MAPCal~| zdhb_*dArwxV^vG+bZ{Iv_8THDe}9O+oTUdT9!`*;-U1E;bb=Fu*U>4?J)uqcF5_q!@$pkcTXeRZhqoZeMZukdCo*fcMI8m ztqp_(4d_8W0{{LC2jkhuT?JXi9YzXb4Mv*~U=nWd^S_ zjIiA`wpYH_PCW8m8433{z?9gPcw@~7jMi-rwg;AT4`-ev2X1*2_Xt(+Vl@Mv)mXr| zA#u2FX)&4HYREKi{^N!l=iZvUc>rrZ!X zVm1=mY&8Kxo7Xh9OC@>D&J#@QJ_GwHEG`x-_vc^tF=g=?hHyTeb4*&*8ZE?1&~?lS z+8=w+%?C8mSbiZ?$(*6~@7KWl39fK#R5Er6ZHYslE~H73Vc7b>LfA8YISSL3bFyRO z!P|WRO?lG~Gg@t+Yi!Dxue~P-Wmh=#+SUVgyaqz_sp+8qp);DiXR)Wno5FHeRWHI|w@gV* zHQM<@DtgA2!Em=FIJsXUHF#S}?-p+4T0TqWT=i#T_>6OKIdKQvn>_<-#;it}=NG7> z!Oqw-n8~Y>20d5^!g#3FJO~Mz%|pLqD6_WZ&)^#52SmzwOMW zSGL}zXRcM?wLLnxHk#RTLgdkTODOkl&3uwxdx3wNw}~9SEn<0gy3*(iX|!~WH018> z!3ADR$9A?-Fm_5N(J+$7n@c=Nr=6MHC&3~}tqVY(4y;z-?%g=)@Dn=C)d0tgvxcpy zFX%xtcc>&OymkHyzH=`xFnL(Yl^=MIyds6nQ57_(ml26E{m8%GLFj3#kud+{bu1p0 z0B&;x7%ck?@+(xuoSm5HUQ3}m*1F8rA4G<*T>BOsBVf^{bgbIb8<+H8U7oDvlPZ;9 zg7!gvLVag)C~pJCe~xFhB|p)W1>G^Jw6`*^Xf>-T-|-AJ+P@p_pHzqAQ(n=^_KQjI zu07xb?pU?vs1zR$bE9X+h)f*e#(4c_q{MK(iMt!d$B}?msou*mV><0cYdaw z9^%ObIOC=+oZ598qwbw_e0%G)WB$%$-YwG)ZhUmX>t;*A+2}6Ooz)Q|jr7Tj9s=z7 zOvKKZQ_^kx99kg5;fEm;aKM%JL~6-%l00(;wV%@;l$lQcs$CR(&C8&gFBg+DYgt@3 zJ9B@rL|ydtW)-gT7>#D-br@@HC`x3t<@VnjR_v|z9Q{?E(}BJm49PVUr|sDZ;%zNN z&ug!c%Nif)y%qvupfd)Ihps1XURJ!GdNR>Iok2}ZD!KAgvfRrpZE*IQ#qh*Z6*SwO zJEaFDz%=4}{CmrgW*8Hz^KGfqadLb)?3x9*H{+>WpqY6qOzu>`+( zAlYzg0(N@dRa6!|7B2UGMK9juA#AfLEsh~t0JhOr>esl^KGx==$ zpgn%5?hl(tJn5mTO1=jv;^;zwxW1DUYWppB(MTO1-Je4oHs#XiDmi4; z83#_~el=a3zZ6QhBw+TXc#^Y!1$kq|>bJCDc`|!OPvkgv97ix zxZg7LW4V4FtVkEVspoN5Z3`UTSy5b~7mtdU5=g;L6LIUZdmz^@^D&owG2l5TKv*c2M8zfM+9Hh)bOwLcmrtF#qO`{+&i*=YGe* zd@@D#9Z9syw|JJTU0URMKo_jWYoNtAJCr@WkPcNJg%!Ocai8G{Vlr(add)otdKr3D zCnBF(2HYp~zCTpzY(SrVrjSy~bGGGD~Xd zzK>PJWKR;DJ5~bL*JZ`0kDuYpzRp1B%ldF=l=!8A{(6N3tUh0|y;@hLCa!E9qxY>g&IT?ipdEe=|B}@2EOxM2B zQVY~sEsOYaMJN~}jq?;v+M$Ste_`*uzw-|7OVkvI}OraFk@Ls-1yv%y@o z`yG0AKq_@?qE8-wxVkvOGK$nWX>4<8=AgNf2lAl>~iFr%o?= zvsyZ9af-n}%wF7z?z(lAL_6*#7Wd_G-U$OxuG@w~)_UQTBdq?|d3{(m`~+Uy!gBj= zW8VGTC!~0C9Ne`Rld^8BAv?_i@@h8WmX-_fW?E}rL9ZK}UHF=6E$f4IdTw|*_7SPM z>=pU7x{>d!t}daAJ|nmDbp&z{qh% z+isYkSPZ2x18{$4BHVm)9XCy4H7jZd({c0a==wmmoAaDW{aougz3M`mS+SB<>`LQq z@~Zf$-Wvv#edj0lV0mn+T~Y1sM%p&g1Jh1;5dXk-qM5g*W0kfZ@|Aj+_xKr&9jQeI zj}YPFy|an3%pv-e#f)34u7Zud7vPoDO)ya7Bo?x>yOv8qn7aFqsm>CDq87c`>q$ zjLq!@s(I|(?zAJgQ7YZBy&lD#TNXq4niIsoc0H|J`jou+w2)-vS5aT@>Ev5^JVcyX zMY|ZX`rW8i^g-GR@A$H_O!Or#7v|IYcBjGY&{gj4gc|OvbP&!=PI0uT?1oG7^10gl z9n`AqI6|Mca4;~EKakxE$?_xAZ6}ZebKK$6!F-4hN#@TkBBbW+WL$55h4QOHX{!FD z;>#}#;n0lb_Cp7^f(z5sFmov-t39P)^qyq$dGI#6#GxyseyYLB?o~yPJi5TuOa)Mr znhtzMDQuCA$B~z0AiJ}UII}JT{oTZ5X83uUW}t{^gFC}1qtjelYgO^Sj-z2kX)nD>N0pA)S?g6tu0Neje^<*gpxsGaIRL--)n)y(=1+^o1;IFA^N{fq03B zVrGc}{q%SWev@alA3HtaHyNqX_8H-1c6py-Yqgo!*77lxA8Q702I|u5E}cc8^Ahms zt28=xOB|64OhdK$Lm)FZ7IPdsf-v3-gNhI1fE5Ph=%LB@m}|%44>y6%Ia$a}>WY0e z?IB&k2HVP15}$*g==pd@YB2pGb$E1+mS_wEy)WBfZ~PVLmvP)7bS^s!q}mz+YFzMg zyT{~+c_})CW#Y9d0vOBA={~hADDFKgk#;??hz?BlCiYXtz?fx&Xx75Pq~&N6Ser|ZndQ9fq*NUdK{NMx&i%$ ze85MixxE)tA0Dk~uuY>Ro|cjwCzc>2Z|i)ZJdN_#ZyX^l zvVx8&w=G^Wbr`KytwIHsqhq<;X4-Nxt21)=9z=Jj!O`kVq3m!XzwlWb3|`Y8CdS!e zN{kGsty*Ls>1B>L6I)}g+jOpfLM_RuXSt!?RMD(Wy~thtdGz?34qQw5)AWq`cv`OB zpEKV%KvZ`n7W$mpNl%7!L!Vpe{Jz2`bW=AD>subf(dPDOxbO*?u;4g6G*b{SsNF;F ziIZVS`-kXg+8GTx$>S(dUl{vYO*H0V8kcxFAHN+d0?(Yb)J7_t-pL8Wn|f>E^zD;e zXN^f-Ib@zW{~9ML@yr-#3#hF2}%pj8P>xzd;J{1$;O z?dF12k6HYl)CCY+;7xMf)*OD~rvw30v(Qj_E*?~8{s!4BGA&F9D~I#+z(g0iw|*;T zsU*O5&-r+##eK(sWd~?#?q0grf)cGs<^1V6+T=w#ix;TgPiwanP>0&i=v=c0+Pe*c zYiEbh@grM_Dyo;^#!ZV!mn(8&)44~;iuKE|U%?#=cM3wwqMPuXouQlj%u=MA$k8rG zt`l*-7YgsB;2DLdWb)i$h{hQ>M`so2D(r^HHb()ZrNoIAxty|dl;f(PFj{05=g`R` z6H}xuxDD68lS@`zV3uZoYCm`pl)f$^7vya4!>31-<+H{Kl`>d1Rvq6fWRp&>({b?o zpkIAs}FNiU%%qZ)a}WYA-B0D zmA%NrF8eX=X&fDfqhW5C26&h*q32ar!tL^ObRSwmwg))lvhJ!x7@P;<`T>w?HUmHK z&U~?+3Y7^hgnKW4rQ6c=uzuY@xGnYp>nd$trh5uMZNNFI z>TL_#OAAovsS$`w(?KifESL~hyy%sAo=LZvPAc<6FY zf7V$L?A?i}_rht5^U?Uqb{8EzRR%{UL}Ro}IVY3Hay#5CFYZ)oB9h%+MKqsfqUjkU zTvVTht=`t40Xus>Vm*&D^B)0~-NMcqw?`UROCG;jP7eiNhgSuf@G_@8iEs{eEH!*e zy}7USpmZ_uY&*w3TH!6f`;|6~bMwW5d#sLJ%zHBMsV>S+Jj%Cz?}kwtvgB@x2VU4T z2LqEW94mE!B<)_Ln`sr9CQb27i3&!%XFi$f#fQCbd0=+(_aeoXPl@2hX-?HE z2%o;_32%}$AZ<@RXNNCv%5yWgKIRm?J8%zif1H9*vy5o3x1o5kjXPbgFi`aHdN9;Z zctJ+CD!?JrvY2mY8K=L^jg#p;0irI|alyK0>G_i>aKFk359`%%&o{e3qO>KeF_%D$ zi>H$gCaY+5UOYL~O9ax!@px})9w(bnM;^E?W#C7k71Id6hjdASI3* z)XXiuI&v{twKtGAGF}MpHlE}Uv1}a)ebdQAr?uRZPuJ*c)ThtjId@VwmTa6J4P|Uy zD4Ju8i{+$nM^Xj#^-CqL@ny87>v8h^_)PR-x<%Ex(fH_Y7<93FNcMPMrK=rzVth-8 zk1KD{h#Ok8`sG)8Ea@UVOO^&LqlIv?Dh=&F93nWnmM+-X1}9HR0Bct}ba;OY2U-hZ zaPc~5p8(ja>@w|jJq!lmCo*Au2)&lf>_Pe}pfGPEH7%UQY9;R_r*5~#3k5@9!T!tq z+n^+V{Di4=r=J0~i~LGwT-ikG%=VD&chzA>l^6%Ml@mP*m&W4oBsjjwflB$i;JxSx zJfG%4i{S`LC66Of?VoYV@0&!05*}x%l2j@HQipn>s%m?zpIm&4wN3qDQiNS5JF`;g~CATfcSWfC;Om zW^2SzEho}FXd1TldIv76r_*m8GsunZ6S)KH=V7|~AV{vsz=<(CXr16Dr!-oFf0(x% zv-0}Gj4R{l-JYGH?dEQ9#XiDuvM`E#J)wtJ^Rn=a6rjuvMUZ;(x_IpV>tM2CA%5;7 zC!W4(A^2Nu0r4$m7<-zXF_7(oX*a`RcGgyMW@QHUdB2WkZ?{CP4i`zxczx(_Mg&Hy z6T$3z0DZW%knUgbg}&zJbDw(m$Jah*A*YVzk=i@~wU-J>`MMJB@ccx0v||Y#oFX9n z={PbkW z@LXpIIob+NT#4ge5i9KE`+}>>H6dF&AE)m>ErFeNL%}0yH(kfh)6BiZY^x7mQtwX7 zi!G+@r2{)ii#o|3=D*!Ggw|1Mxb7CEmDVu?pM&^?-xVld~4kQp*L|J@P^*IQ-UvVtMZjtN#3KaN^1hv56OY!0rOCLjeuSW>qmX}MPSHvE0I*zI$GFy0~c;EleUhyL5o&tz_D>1 zpk-TAYB9%+ss{n}9NiD=woT>@ZYRI-M( z*3wx%ws6-{1I%MT(X4)M7<+etW7L3ZGFyEEoIAgp4jxrb&St1`A62!evuP^on#Xh5 z#;5t#Sp$8@wBIyY<;q=iY5qd7xx?=u~$dgKM|=X{f& z6S$DHyIKopox4Ckttt5RV-($_6pyAWnSI~l8Wg7+iWj}T#A+Z6qzm13;N-_7Zdbk? z?Kf~b?#+EhN2wp-6mP@mf-mK zm8)p;As&PH4Q$QHLuIvHEFV9s?R($?Df96JU;O})Y55awnz<5vI#eHcKV70RVL3t1w-T{hr^~?O zY%BPl(uLK$_J9l8NpSn66Fxd2h1D{?a3=W*uDs&F_UFM+#d2jT^ghVhN0jrSr@rxS zb=K6{I*9aTdYgQCrla~;XEdDVPEIMEhw1CzkRERgV4|BohGxf7Yg1jkZD@@Pch!?i zh5hmBv3FeXes#L@+E5%AyBZ#U_JlEkqjCS+r{w(>Wjw8)NvaiD&EN8TbXg*z_ns+3 z3%ascS+ylC>@8$xN_x;$z1g|((gUz#ia$xeKMJIL*}48nC!x2e3@(F3bd>WmtUJu| z`VL8hOVOFQ>ysQBN6v$;Lt8=5enz0NU5QxsSV42TJfN98;7g?n+QO_emamor;fFmW zL9PwV-8vb~@0JkFKxOQ%^NOE!UK=#_NwK;v7UbmgF}SQ^6F;xY1Yh0XgK1Iu{M)K^ zu%?>X;S-(UOYU>B)j1rvcE{1v>;}HJsG{9u!og$Qc^V;gn)_U7M{0I_rt5`=h+Uun z-J`PT-7HhmahNxhn%^R?LpSl7xuIm@ty-FDF#@AHt;fwZ+gbhA9LMr!9khH{6Q5m2}5km4_t9;T=hrooKk%(t9y9PRy0>F}5%^u5ytn!HsL zMn1ei-74>Mbq}hEl9K@)X=GF!yTTH7c&Xroy~|Oh&tjOlA)gl9`9v1ZQp55K74&Lg zEI#j0f;GvuqNF~h;JIZc#-R`glo|3JRdGe(GE4hMXj-t#u1>+IE51#}>p%yOPc|&_S8FlhiV9 z1oqXv2HC;y`PWZ<(7Hd{caC$#V>3450?X6n*suih#@PvEo@^qU%6CD!(-+dUe>@)C z9gHiT_QJVmk7-|*M{CFaAI7USkH%RyC+2p)uIv!e+K(?^*o8+<7;an(iJ&mf8A}@A7h`s9$L{DbI`{I%AlfC1wis^VOw~6U?#1)sg9u8dUM~EjnhLC0Vc6 z4)ed*VuwI`Ogp9ty+01bZW~lYo$~YO)eEJRvUxkD;5*%uSIFXlS)#4mv)TEgDG+hu zDlT}ngd7Or$OXk4_{#YltlQTQ41MpDJ}m}f|Na_4V<|~)|CN55V?ct1v#4{97g_`) zQL%q6>|S63!9M;l+jR}oP0}EJjl5WO(0J0iU_KUpeMLQ7q@mr@Hc(>bOZ|t(lJnIw z30hcC-D;NK&A$^)j9X1S<&$9l$wa(+*#d{$n#cLfQHH!*hH(A(E?Ruy9_ecFlDF2f zK?GOmky4FvI`gPQ5n}O6Ls&ccI=!yHjW{Yl#1MyW^pfAyEg*6EbR_G?g{bhxIXyMBn!rzT1hX3W%7NtnnH|+J&JZAnqyRHucN8~tF_9U(qc*@6KphLO$RI->2&`E=#P;jnK)M-&GuVaCqZ+_cDZgkSxf zGdQydMHPlDPUAf7syrTrooihK9obD5Rq2w~N^&zd!n1tUSETB3* z&b)r>JGAS>cDQWzG&mBY#6`7>2TO~M+^*~zTDhl$+AeK}s$o4$RpnOcv?K;1EpF!13fbo`tIiv7ND zcBU0{l*V?t?{vUn&G-x&H{&eP+t42SPU-+|o~Iq2hRR|^rWCh#W?OP5+?VSa@{$jn zeuzvRosQnt@}%?K9-#E*HIYq>!;02(@bHoA+{N!kuqky9DKS}02fWLKPxBY!?z^7k z)Y;y|v85*0%3hw;jo(a&6i>&>7~+65mQ(iDVs5L`DQbP@9Uafk_#6**;#1~Skp(Ya zV(u#;8p?gAVL7ST`+ZB%zJn1MpSTKgjM#q8K9I^jz0S1@vEiH-hQqfJn`zf03s`=g zblA6lFD^6ahF?Rwg3ar_*nPS#&JN#4uS9Q#<5yiF?dTg4ccPfwx7PrLOBO_NX(6bx z+*HTyPSetIb$U z-0V(Z-fuakFWmz3&d1XYpTa@qQ#SEp@lW5wlla2Hr|{$41#sCW0X|ne;__F?Vx7Ga zujteQLz(@l-s~z@nW97a<(KJ{%xXgJYJ+j%A!2fBG>WeEARRM~QpJm7Xu!idve{9e z=6Vv0QajI$ocM;{J%ZI%FYol3Q<(Sh%Han1R+82W4_5&hcJA>qU?j$O^9f~U)mePaO z?@8&MT(0a=DQ&e>jmEb+M-^=j(W>@QB(G=%y+2Ti%02SIVJs$NjKOnKGG!@A3R*yV zcQK_q-7?5W=`_+tLxo%w7jXi)I5JC%#UfsJ<6Ylw2K`R`aB^=Y*rU4=&M(^!Iaz&> zvK&3a7qGCA~BeB1@Erj*==Qmfh zpyGaasp_y6Fu(8{4RMKr^RLG96@&M~I_drN%`t8M`|R1+Ikt>bzxgIANQ}p0m$dMjLJP9a`x@_H`-0!s+W^xdjbZC3 zb8d>t0!;J&3^&h@qP@a5(}RjD!G!7Xa`E<9|KcQ3Ih#(`1#d1MzAzBJz8QkE)?OyD zA)d6p%MKWPFcRldM{2lKk-CJ;rv z7*qHw(}g5<_ZgzpM#S}Zj;9BXte`FG$I`LuTW~==7CC(FtjcNMJ4`O7y`;AK>0G}p z?r`)Q%SSmcf)tK42ZIYm5EeC!J2PMnEuFT3cF5mKJ49B~v$m(`z8ZPb-ZlVQF1k;R zN2Xehug87EVVUaG^t4&R3i7i-?r39z35fCo5BcDsI!_E1 z0BqQmMK35Tlgbo5OtF7Q9`CTAbDy3?B3uInE2A+f>m4;cbO5Yu6^Mn-6&5qomU=!v z2LY;O_(^{N9C@xz4$Q3MBjz$0U6+o3~H`DIJ! zzoj)CAL&XS;&eT27uMErY zi*=l}i7qJZ^d<=`Cdae116W?YLzBmEq;KXgB2E=gc#j9;=;kNa=#5J=NcyvjbfN7S zGEq^?x9(F&9!Bpb`kgc}u?(QfLJ@E0pX28b`%dbWvv|h^+E8-Wn?yO@b-Z2J3YEs~ zBh^+5Fi6D!?DdAh;X!I}d)85hZBruAIjNF++Wr#`|CCI#k~$wfQ`p)0Q~M?2HILK8 zJC!uWdhWx-9-&U+4Nt?wXC5sRk5HN_K0a)OxLfH6@ym8;;;XCU#RcX)#czuGi+AmB zE1q(8iFnX(XYq^9v&Cc2>WeoA&J%~uO%`9>Fi)Jl%tMSzUBy1_hKjXWa+l3_7K@8b z$BM}tC-K$Knd0^#qr|hPE*3NVB8FeY@QWCJ5yLNH_(crAh~XD8{33>5#PEw4ei6ej zV)#W2zlh-%G5jKiU&Qc>7=97MFJkyb48Mrs7cu-IhF`?+ix_?p!!Kg^MGU`);TOq% zh!?YbAYw@#7gjGyEUBjSoa8+Kxp$<_m16mn%06a`YkR>3|q{36UO*oE}Z3rmG|f|f${zn*x<1Vl~o zj`0haJS8gfuWl`WcI&~q$u!x`l0lRC+0BYw$o{;rX1|eZ@;5zLH@TnPda{dFKQC<9 zZv;*LMgm&!vzsmJCf{T?Yt~KvXE!@`q44vfPs41-FOFl<;qm4EtKv3{ye_OoUERR8H`&8}Mib0wjp%|BNXh{AuaY!%uH)P!nL z5k3K-J`;oeyr=m@1W5jfRA?_y7ivWKO^yy`w_<&w0>VPQgTo>t*&}6vhEV>0J(UHT zLir%S2%kuApQ!-~DuT8`g~))(p*~U35v+F$1$Xv~b`5{gR?xmVq*~1*?a&ZQl!CfI zTiBYR4fPB54v+Q;jS7hMi%?Jy=m_P0++|pG8)MaL4r|Bev36>Rb)}SoK+stz<1;-V zQbAszFO;3^6B6RXZW;(>Ci+GBD5weyg^D3z(*i;#dx!Y>ges^BjD)Ix-}r0HLMQn# z%8VORX3`vGrp;5A8p9mh9L#ae!yMlb<{b8EdN(ma6a*9gnF|7e4;#$@ABn`C*ckt$=J5MA zkKeB${^<&B1pY$hkbqF{$VomwRt|P+GP~t7y}>PI!IZ|B1DeAe*gWQ-hL|NB3T_ND zq&b+O&BF})H_WMxVTLycGopEzk^hDn)fi@Ub1P7A%)tt>Y<-imV2!>|%91q( zLa83Cu@p*KvBps-Wz8B+DAkiSu0kmr)(jC!*|KJ+P|A)q6NFOs3UX{_NHd+Jg~Bc= zcJFVH(|>@R@dG47AOXn`NI)9?0BQ6CBts_w$u+X!X} zm4E1#A%0O20h1z|S7Q_fvxTjG_Ua&*BWxSW6q%+J9zDTaVTT`3n^t=i1#wbBxxae+ z->63|*eFZ>?UPav3gV^yP5GayMoNMNrfdIvla7K!soznX)R5X{^(t+_JgMdfWfmLk z>Hq!AXZ-s!R}svYYB<;12^L5-J=wJc$x_YDb`?PigYrLB(nsj6pfb|gy}xs>fv(;o zod=F^6>9vY>v>NK3!T=(*Sn|r4_!~o<_G5*?=}8@Xx`n~!^6FQuMr-u!_6CmQ_x`Z zw~6WB>gVmh`L?hLzWteUsU%p`h;NI3%eN&B`PQsvsx3(U1>agI_zQjiGj<&&`b15d z;vE_m5#kg4Q>nK$|4WC}>e<8yTK;(Us{!;CPW%rZt)}rv6l5*yCecV9{R&HjaO!`s zbZQz)1La%Gu}NeNdi)A!tZ@2&a2oy+=TH4!OY|$eKYRTOw>F!9OaMy+JrlqZLC*xR zM9?zr2pA= zMoEy_i1Zb|C4FT>(wp52X$w~Ug7jdfeq~7sRlO%gM@EH(u&v&Ybx1|9`d@M|yIDE7 z<`;5sZ6i52l5MKj{R{5;X5nu51>B8|;7$_y{AY%@Z6seUy?gvh!v1{ttEnszrA%M2 zQ26JMzo%NH#9>og_IBph#kL9qJ$g3#2$qum^)W|ssj^WQBAQNoD- zypeu$cp8ZOCKm-O^9DVBHJfJ%WB-HI_%~R83j8LQg?}*r?DZ?`l4XG@-I8U2DczD~ zf&5q&;ODZ?@af;**^VYA>z^4clmt5)P1aq%ovgbXPS$3v9om9DznH8N*}At$*}Csv zGH`#hGVs7JWMDxf87Psh2mb~4P_u9ge*yP!Be=hptv!A%Tdn^e3@HhU8}Xd~EzfB~o;Pb%(iY$^c-}y^ikl$)&+Jc1f+LMcKl)qJk2NH{ zS>u(q;P@{||6|!I;dt}1wdems*($-*tZeQ1|4z0_@HCK|P0H4u4SW1by8W?iZGiQs z)NE3={)72vuU}!8$W~@5k;qnNDv`+6VLxQ6`%l^0@af;**@-45>z|n{Y?CTr<$ zC+o?EleJlUrnaE$my`7$mHSi^r2m=aQ%P{T5$WZ>CH+i8(wjA#Y75T(8R_SmApOq_ ztV)9OjYz-nThcE!B)wVdthV6NFG!bIQDXmVMVa|uE6U}6twdLvU5T##VkN5hzg&r| ze!UXe{J~1(EgbhBy{NINMuJ3QjBe5bVDs0b-+i9Ub}9KheSUnNu|Gb~nLj_zhEM2Kake{0hHB zOpIhQkNMvu%Nz5*{oCOC*FIn3hGYADiD!>#rV`H{(@Z69IJOW<+;D6ml(@~9yNr4E z{&JTwH(U#a`4SJG)DO_oKR~zqv2w~tR!&(KTJ@KQPwofkRzE-set?$$0b1b)XvM!l zE3sDMkyB=^#KWh;T8W2Gm9-KNUu)J%JbZ0fEAj9NS!?^382=#=B?7+T%S!GRNZezu z{`0;5xnoI5@Ve0#`{uV_?A!l+vA^<4X$#){;*!lrxs-;L0zK(`0(2S_}Fj&nm0#l3qJjJ0RE!>a}(76soh#h z@TC#;Uw=#ew}#YjXjUE87JUCL^%Cxw|KN^A)7<&NL1t}}aFBh4|Ks3aZ(iTToBye` zN=XpM0w;fZ5IPFtrGMwyO_2V_53MC=-qfWnNR)2gwfke<{Y8C}^uN8*e{K^~63lBv z{ruljzn~%Y%^TFT15o!{nueZ%khPk8l6(!=$LB50j!l z`usIADNT&bpYG9=1PdFD%%b0p%;JV4)BFZcTd?G}BO?(=-jX+xy#Bvm?dP!kTgh%X VBu)P)S|r99DO~4brvYH|I1_HR0U+vdp~n_}tXQJfdur1?lzDZ4Qx97A{LH%1q2l zP9;jC97tp73bg~m$_hp#hmiurzw#Ol3z$sLxC#CfRunNNKAa9Pfou=pZhj!DAVPdV z?FVa{@$ynO-5OSq@X$k)~RTd$}X&|XhK$4IVdz72AXn!#kAT% zxd<3orbGr7P-82*Mgx;+i&kj~@qq=@)(qBWdhOIsQUeR9xgD(e@z1w^80m59mBg22?t2*%7D z4IDrQ(2Q=N-3ORV{jbg}7uMo8*UwF?h%Zh~%t?(0dI7&H7!=k3UBF_Rb(L+em^y!E z9`PDifHm$j4ohNCgCx47oYZ1{L;My(N|Oyhy}(@3n)~D)w~VkQVJ%z=8_t44>cIYk zLR>N;Muc@JC~P tf.keras.Model: - # tf may complain about missing gradients, so silence it - with LoggingContext(tf.get_logger(), logging.ERROR): - return tf.keras.models.load_model( - Path(__file__).parent / "bnn_model", compile=False - ) - - -GENERATOR = BNNModelGenerator - -# ---------------------------------------------------------------------------- -# DATA FACTORIES -# ---------------------------------------------------------------------------- - - -class CIFAR10TestDataFactory(TensorDataFactory): - def make_data(self, batch: Optional[int] = None) -> tf.Tensor: - _, (test_images, _) = tf.keras.datasets.cifar10.load_data() - assert self._shape_hook() == test_images.shape[1:] - return tf.cast(test_images - 128.0, tf.int8)[:batch] - - -class CIFAR10TestLabelFactory(TensorDataFactory): - def make_data(self, batch: Optional[int] = None) -> tf.Tensor: - _, (_, test_labels) = tf.keras.datasets.cifar10.load_data() - return tf.cast(test_labels, tf.int8)[:batch, 0] - - -# ---------------------------------------------------------------------------- -# RUNNERS -# ---------------------------------------------------------------------------- - - -class CIFAR10BinarizedTestRunner(BinarizedTestRunner): - def __init__( - self, - generator: Type[IntegrationTestModelGenerator], - **kwargs: Any, - ) -> None: - super().__init__(generator, **kwargs) - - self._ground_truth_data_factory = CIFAR10TestLabelFactory(self, lambda: tuple()) - self.register_data_factory(self._ground_truth_data_factory) - - @property - def repr_data_example_count(self) -> int: - # TODO: fix this when tools are more stable (on the device this tends to time out) - return 100 if self._use_device else 10000 - - def make_repr_data_factory(self) -> TensorDataFactory: - return CIFAR10TestDataFactory(self, lambda: self._model_generator.input_shape) - - def get_ground_truth_data(self) -> tf.Tensor: - return self._ground_truth_data_factory.make_data(self.repr_data_example_count) - - -RUNNER = CIFAR10BinarizedTestRunner - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture -def abs_output_tolerance(use_device: bool) -> int: - return 13 if use_device else 49 - - -@pytest.fixture -def expected_accuracy(use_device: bool) -> float: - return 0.79 if use_device else 0.6882 - - -@pytest.fixture -def expected_prediction_deviation(use_device: bool) -> int: - return 0 if use_device else 84 - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_prediction_deviation( - run: CIFAR10BinarizedTestRunner, expected_prediction_deviation: int -) -> None: - xcore_labels = np.argmax(run.outputs.xcore, axis=1) - reference_labels = np.argmax(run.outputs.reference_quant, axis=1) - deviation_indices = (reference_labels != xcore_labels).nonzero()[0] - assert len(deviation_indices) == expected_prediction_deviation - - -def test_accuracy(run: CIFAR10BinarizedTestRunner, expected_accuracy: float) -> None: - metric = tf.keras.metrics.Accuracy() - metric.update_state( - y_true=run.get_ground_truth_data(), y_pred=np.argmax(run.outputs.xcore, axis=1) - ) - assert metric.result().numpy() == np.float32(expected_accuracy) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_directed/test_mobilenet_v1.py b/archived/test/integration_test/test_directed/test_mobilenet_v1.py deleted file mode 100644 index 0cf1ac21c..000000000 --- a/archived/test/integration_test/test_directed/test_mobilenet_v1.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import os -import pytest -import tensorflow as tf -from typing import Optional, Tuple, Any -from tensorflow.python.keras.utils import ( # pylint: disable=no-name-in-module - data_utils, -) - -from tflite2xcore.model_generation import Configuration - -from . import IntegrationTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - -BASE_WEIGHT_URL = ( - "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/" -) - - -def _MobileNet_safe( - input_shape: Optional[Tuple[int, int, int]] = None, - alpha: float = 1.0, - depth_multiplier: int = 1, - dropout: float = 1e-3, - include_top: bool = True, - weights: Optional[str] = "imagenet", - input_tensor: Optional[tf.Tensor] = None, - pooling: Optional[str] = None, - classes: int = 1000, - *args: Any, - **kwargs: Any, -) -> tf.keras.Model: - if weights == "imagenet" and (not include_top or classes == 1000): - input_shape = input_shape or (224, 224, 3) - rows = input_shape[0] - if ( - input_shape[0] == input_shape[1] - and depth_multiplier == 1 - and alpha in [0.25, 0.50, 0.75, 1.0] - and rows in [128, 160, 192, 224] - ): - if alpha == 1.0: - alpha_text = "1_0" - elif alpha == 0.75: - alpha_text = "7_5" - elif alpha == 0.50: - alpha_text = "5_0" - else: - alpha_text = "2_5" - - model_name = f"mobilenet_{alpha_text}_{rows}_tf" - if not include_top: - model_name += "_no_top" - model_name += ".h5" - weight_url = BASE_WEIGHT_URL + model_name - weights = data_utils.get_file( - model_name, - weight_url, - cache_subdir=f"/tmp/.keras/{os.getpid()}/", - ) - - return tf.keras.applications.MobileNet( - input_shape, - alpha, - depth_multiplier, - dropout, - include_top, - weights, - pooling, - classes, - *args, - **kwargs, - ) - - -def MobileNet(*args: Any, **kwargs: Any) -> tf.keras.Model: - """ Wrapper for tf.keras.applications.MobileNet to work around h5 multiprocess issues. """ - try: - return tf.keras.applications.MobileNet(*args, **kwargs) - except (KeyError, OSError) as e: - if e.args[0].startswith("Unable to open"): - return _MobileNet_safe(*args, **kwargs) - else: - raise - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class MobileNetV1ModelGenerator(IntegrationTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config["input_size"] = cfg.pop("input_size") - self._config["alpha"] = cfg.pop("alpha") - super()._set_config(cfg) - - def _build_core_model(self) -> tf.keras.Model: - input_size = self._config["input_size"] - return MobileNet( - input_shape=(input_size, input_size, 3), alpha=self._config["alpha"] - ) - - -GENERATOR = MobileNetV1ModelGenerator - -# ---------------------------------------------------------------------------- -# CONFIGS -# ---------------------------------------------------------------------------- - - -CONFIGS = { - "default": {0: {"input_size": 128, "alpha": 0.25, "skip_on_device": True}}, -} - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture -def abs_output_tolerance() -> None: - return - - -# TODO: try removing this when global average pool is improved -@pytest.fixture -def implicit_tolerance_margin() -> float: - return 0.15 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_directed/test_offset_saturating_ops.py b/archived/test/integration_test/test_directed/test_offset_saturating_ops.py deleted file mode 100644 index 9e6135eb5..000000000 --- a/archived/test/integration_test/test_directed/test_offset_saturating_ops.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf - -from tflite2xcore.model_generation import Configuration - -from .test_mobilenet_v1 import MobileNet -from . import IntegrationTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class OffsetSaturatingModelGenerator(IntegrationTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config["layers"] = cfg.pop("layers") - super()._set_config(cfg) - - def _build_core_model(self) -> tf.keras.Model: - source_model = MobileNet(input_shape=(128, 128, 3), alpha=0.25) - layers = [source_model.layers[idx] for idx in self._config["layers"]] - input_shape = layers[0].input_shape[1:] - - return tf.keras.models.Sequential( - layers=[tf.keras.layers.InputLayer(input_shape), *layers] - ) - - -GENERATOR = OffsetSaturatingModelGenerator - -# ---------------------------------------------------------------------------- -# CONFIGS -# ---------------------------------------------------------------------------- - - -CONFIGS = { - "default": { - 0: {"layers": [1, 2, 3, 4]}, - 1: {"layers": [8, 9, 10]}, - 2: {"layers": [15, 16, 17]}, - 3: {"layers": [21, 22, 23]}, - }, -} - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_directed/test_zero_weights.py b/archived/test/integration_test/test_directed/test_zero_weights.py deleted file mode 100644 index 04e90a0b9..000000000 --- a/archived/test/integration_test/test_directed/test_zero_weights.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf - -from . import IntegrationTestModelGenerator - -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class ZeroWeightsModelGenerator(IntegrationTestModelGenerator): - def _build_core_model(self) -> tf.keras.Model: - initializer = tf.keras.initializers.Constant(0) - return tf.keras.Sequential( - [ - tf.keras.layers.Conv2D( - 4, - 4, - kernel_initializer=initializer, - bias_initializer=initializer, - input_shape=(20, 20, 20), - ), - tf.keras.layers.BatchNormalization(), - ] - ) - - -GENERATOR = ZeroWeightsModelGenerator - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture(autouse=True) -def skip_module(experimental_xformer2: bool) -> None: - major_version = tf.version.VERSION[:3] - if major_version == "2.4" and experimental_xformer2: - pytest.skip("Skipping test due to bug tf 2.4 bug") - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_directed/tmp.txt b/archived/test/integration_test/test_directed/tmp.txt deleted file mode 100644 index cb9182aba..000000000 --- a/archived/test/integration_test/test_directed/tmp.txt +++ /dev/null @@ -1,19 +0,0 @@ -============================= test session starts ============================== -platform darwin -- Python 3.6.8, pytest-5.2.0, py-1.8.0, pluggy-0.13.0 -rootdir: /Users/laszlokindrat/ai_tools/test/model_generation/integration_test, inifile: pytest.ini -plugins: xdist-1.30.0, forked-1.1.3 -collected 1 item - -test_bnn.py ->>>>>>>>>>>>>>>>>>> PDB set_trace (IO-capturing turned off) >>>>>>>>>>>>>>>>>>>> -> /Users/laszlokindrat/ai_tools/test/model_generation/integration_test/conftest.py(149)run() --> dirpath = pytest_config.cache.get(key, "") -(Pdb) --KeyboardInterrupt-- -(Pdb) --KeyboardInterrupt-- -(Pdb) --KeyboardInterrupt-- -(Pdb) --KeyboardInterrupt-- -(Pdb) --KeyboardInterrupt-- -(Pdb) --KeyboardInterrupt-- -(Pdb) --KeyboardInterrupt-- -(Pdb) --KeyboardInterrupt-- -(Pdb) \ No newline at end of file diff --git a/archived/test/integration_test/test_single_op_models/__init__.py b/archived/test/integration_test/test_single_op_models/__init__.py deleted file mode 100644 index 4ce065c4d..000000000 --- a/archived/test/integration_test/test_single_op_models/__init__.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from abc import abstractmethod -from typing import Tuple, Optional - -from tflite2xcore.utils import asserting_cast -from tflite2xcore.model_generation import Configuration - -from .. import ( - IntegrationTestRunner, - BinarizedTestRunner, - _compare_batched_arrays, - BatchedArrayComparison, - IntegrationTestModelGenerator, - test_output, - test_mean_abs_diffs, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class ImageInputOpTestModelGenerator(IntegrationTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config.update({key: cfg.pop(key) for key in ["height", "width"]}) - super()._set_config(cfg) - - @property - @abstractmethod - def _input_channels(self) -> int: - raise NotImplementedError() - - @property - def _input_shape(self) -> Tuple[int, int, int]: - cfg = self._config - return cfg["height"], cfg["width"], self._input_channels - - @abstractmethod - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - raise NotImplementedError() - - def _build_core_model(self) -> tf.keras.Model: - return tf.keras.Sequential( - layers=[self._op_layer(input_shape=self._input_shape)] - ) - - -class PaddingMixin(ImageInputOpTestModelGenerator): - _PAD_KEYS = ("pad_t", "pad_b", "pad_l", "pad_r") - - def _set_config(self, cfg: Configuration) -> None: - self._config.update({key: cfg.pop(key, 1) for key in self._PAD_KEYS}) - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - for key in self._PAD_KEYS: - assert self._config[key] >= 0, f"{key} must be non-negative" - - def _pad_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - kwargs = {"input_shape": input_shape} if input_shape else {} - cfg = self._config - return tf.keras.layers.ZeroPadding2D( - padding=((cfg["pad_t"], cfg["pad_b"]), (cfg["pad_l"], cfg["pad_r"])), - **kwargs, - ) - - -class ChannelAgnosticOpTestModelGenerator(ImageInputOpTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config["channels"] = cfg.pop("channels", 4) - super()._set_config(cfg) - - @property - def _input_channels(self) -> int: - return asserting_cast(int, self._config["channels"]) - - -class ChannelPreservingOpTestModelGenerator(ChannelAgnosticOpTestModelGenerator): - def check_config(self) -> None: - super().check_config() - assert self._config["channels"] % 4 == 0, "# of channels must be multiple of 4" - - -class FilterOpTestModelGenerator(ImageInputOpTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config.update( - {key: cfg.pop(key) for key in ["K_h", "K_w", "padding", "strides"]} - ) - super()._set_config(cfg) - - def build(self) -> None: - try: - super().build() - except ValueError as e: - if e.args[0].startswith("Negative dimension size caused by"): - raise ValueError( - "Negative dimension size (Hint: if using 'valid' padding " - "verify that the kernel is at least the size of input image)" - ) from e - else: - raise diff --git a/archived/test/integration_test/test_single_op_models/test_activation/__init__.py b/archived/test/integration_test/test_single_op_models/test_activation/__init__.py deleted file mode 100644 index bdb465920..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import tensorflow as tf -from abc import abstractmethod -from typing import Callable, Optional, Tuple - -from .. import ( - ChannelAgnosticOpTestModelGenerator, - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class LUTActivationOpTestModelGenerator(ChannelAgnosticOpTestModelGenerator): - @property - @abstractmethod - def act_fun(self) -> Callable[[tf.Tensor], tf.Tensor]: - raise NotImplementedError() - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - kwargs = {"input_shape": input_shape} if input_shape else {} - return tf.keras.layers.Lambda(self.act_fun, **kwargs) diff --git a/archived/test/integration_test/test_single_op_models/test_activation/conftest.py b/archived/test/integration_test/test_single_op_models/test_activation/conftest.py deleted file mode 100644 index 5e3d94e0c..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/conftest.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - -@pytest.fixture -def abs_output_tolerance() -> int: - return 0 diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_prelu.py b/archived/test/integration_test/test_single_op_models/test_activation/test_prelu.py deleted file mode 100644 index 44edb0d90..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_prelu.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Optional, Tuple - -from tflite2xcore.model_generation import Configuration -from tflite2xcore.model_generation.utils import parse_init_config - -from . import ChannelAgnosticOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class PReluTestModelGenerator(ChannelAgnosticOpTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config["alpha_init"] = cfg.pop("alpha_init", ("RandomUniform", -1, 1)) - super()._set_config(cfg) - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - kwargs = {"input_shape": input_shape} if input_shape else {} - return tf.keras.layers.PReLU( - alpha_initializer=parse_init_config(*self._config["alpha_init"]), **kwargs - ) - - -GENERATOR = PReluTestModelGenerator - -# TODO: fix this if/when we support prelu -CONFIGS = {"default": {0: {"height": 5, "width": 5, "channels": 3}}} - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_relu.py b/archived/test/integration_test/test_single_op_models/test_activation/test_relu.py deleted file mode 100644 index 30e91562a..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_relu.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Callable - -from . import LUTActivationOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class ReluTestModelGenerator(LUTActivationOpTestModelGenerator): - @property - def act_fun(self) -> Callable[[tf.Tensor], tf.Tensor]: - return lambda x: tf.nn.relu(x) - - -GENERATOR = ReluTestModelGenerator - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_relu.yml b/archived/test/integration_test/test_single_op_models/test_activation/test_relu.yml deleted file mode 100644 index fb3767686..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_relu.yml +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 12 - height: 4 - num_threads: 5 - width: 1 - 1: - channels: 49 - height: 8 - num_threads: 1 - width: 8 - 2: - channels: 1 - height: 8 - num_threads: 1 - width: 8 - 3: - channels: 7 - height: 1 - num_threads: 1 - width: 4 - 4: - channels: 1 - height: 5 - num_threads: 2 - width: 7 - 5: - channels: 32 - height: 12 - num_threads: 2 - width: 8 - 6: - channels: 7 - height: 12 - num_threads: 5 - width: 4 - 7: - channels: 12 - height: 4 - num_threads: 1 - width: 14 - 8: - channels: 32 - height: 8 - num_threads: 5 - width: 14 - 9: - channels: 32 - height: 1 - num_threads: 1 - width: 8 - 10: - channels: 49 - height: 1 - num_threads: 1 - width: 8 - 11: - channels: 25 - height: 1 - num_threads: 2 - width: 20 - 12: - channels: 4 - height: 12 - num_threads: 1 - width: 7 - 13: - channels: 25 - height: 5 - num_threads: 2 - width: 4 - 14: - channels: 32 - height: 12 - num_threads: 5 - width: 14 - 15: - channels: 16 - height: 1 - num_threads: 1 - width: 4 - 16: - channels: 12 - height: 4 - num_threads: 1 - width: 20 - 17: - channels: 1 - height: 4 - num_threads: 5 - width: 7 - 18: - channels: 4 - height: 15 - num_threads: 1 - width: 7 - 19: - channels: 4 - height: 8 - num_threads: 2 - width: 1 - 20: - channels: 25 - height: 15 - num_threads: 1 - width: 4 - 21: - channels: 49 - height: 8 - num_threads: 5 - width: 7 - 22: - channels: 16 - height: 8 - num_threads: 5 - width: 1 - 23: - channels: 12 - height: 4 - num_threads: 5 - width: 7 - 24: - channels: 4 - height: 1 - num_threads: 5 - width: 4 - 25: - channels: 25 - height: 5 - num_threads: 2 - width: 14 - 26: - channels: 32 - height: 12 - num_threads: 1 - width: 1 - 27: - channels: 49 - height: 15 - num_threads: 1 - width: 14 - 28: - channels: 1 - height: 5 - num_threads: 5 - width: 20 - 29: - channels: 16 - height: 4 - num_threads: 2 - width: 20 - 30: - channels: 1 - height: 15 - num_threads: 2 - width: 14 - 31: - channels: 7 - height: 1 - num_threads: 2 - width: 8 - 32: - channels: 12 - height: 15 - num_threads: 2 - width: 1 - 33: - channels: 49 - height: 12 - num_threads: 2 - width: 4 - 34: - channels: 16 - height: 15 - num_threads: 5 - width: 20 - 35: - channels: 16 - height: 15 - num_threads: 2 - width: 14 - 36: - channels: 7 - height: 8 - num_threads: 5 - width: 20 - 37: - channels: 7 - height: 5 - num_threads: 5 - width: 1 - 38: - channels: 25 - height: 5 - num_threads: 5 - width: 1 - 39: - channels: 4 - height: 5 - num_threads: 1 - width: 14 diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_relu6.py b/archived/test/integration_test/test_single_op_models/test_activation/test_relu6.py deleted file mode 100644 index ba89ccecd..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_relu6.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Callable - -from . import LUTActivationOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class Relu6TestModelGenerator(LUTActivationOpTestModelGenerator): - @property - def act_fun(self) -> Callable[[tf.Tensor], tf.Tensor]: - return lambda x: tf.nn.relu6(x) - - -GENERATOR = Relu6TestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_relu6.yml b/archived/test/integration_test/test_single_op_models/test_activation/test_relu6.yml deleted file mode 100644 index 886f026ff..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_relu6.yml +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 4 - height: 1 - num_threads: 5 - width: 4 - 1: - channels: 7 - height: 15 - num_threads: 2 - width: 1 - 2: - channels: 1 - height: 1 - num_threads: 2 - width: 14 - 3: - channels: 1 - height: 4 - num_threads: 2 - width: 4 - 4: - channels: 25 - height: 8 - num_threads: 1 - width: 1 - 5: - channels: 25 - height: 8 - num_threads: 1 - width: 4 - 6: - channels: 25 - height: 5 - num_threads: 5 - width: 7 - 7: - channels: 49 - height: 5 - num_threads: 1 - width: 8 - 8: - channels: 32 - height: 5 - num_threads: 5 - width: 7 - 9: - channels: 12 - height: 15 - num_threads: 2 - width: 4 - 10: - channels: 32 - height: 4 - num_threads: 5 - width: 4 - 11: - channels: 12 - height: 8 - num_threads: 1 - width: 7 - 12: - channels: 16 - height: 5 - num_threads: 5 - width: 8 - 13: - channels: 12 - height: 8 - num_threads: 5 - width: 4 - 14: - channels: 16 - height: 5 - num_threads: 2 - width: 14 - 15: - channels: 7 - height: 1 - num_threads: 1 - width: 20 - 16: - channels: 4 - height: 12 - num_threads: 2 - width: 1 - 17: - channels: 49 - height: 8 - num_threads: 5 - width: 7 - 18: - channels: 7 - height: 4 - num_threads: 2 - width: 1 - 19: - channels: 7 - height: 8 - num_threads: 1 - width: 20 - 20: - channels: 25 - height: 1 - num_threads: 2 - width: 14 - 21: - channels: 16 - height: 12 - num_threads: 5 - width: 20 - 22: - channels: 32 - height: 5 - num_threads: 1 - width: 8 - 23: - channels: 32 - height: 5 - num_threads: 2 - width: 7 - 24: - channels: 49 - height: 12 - num_threads: 5 - width: 14 - 25: - channels: 12 - height: 12 - num_threads: 5 - width: 20 - 26: - channels: 1 - height: 4 - num_threads: 5 - width: 8 - 27: - channels: 25 - height: 15 - num_threads: 2 - width: 8 - 28: - channels: 16 - height: 12 - num_threads: 1 - width: 1 - 29: - channels: 7 - height: 15 - num_threads: 1 - width: 14 - 30: - channels: 4 - height: 15 - num_threads: 1 - width: 14 - 31: - channels: 16 - height: 12 - num_threads: 5 - width: 8 - 32: - channels: 4 - height: 1 - num_threads: 2 - width: 1 - 33: - channels: 1 - height: 12 - num_threads: 1 - width: 1 - 34: - channels: 49 - height: 15 - num_threads: 5 - width: 7 - 35: - channels: 1 - height: 4 - num_threads: 1 - width: 14 - 36: - channels: 32 - height: 15 - num_threads: 5 - width: 20 - 37: - channels: 49 - height: 1 - num_threads: 2 - width: 7 - 38: - channels: 12 - height: 4 - num_threads: 1 - width: 20 - 39: - channels: 4 - height: 1 - num_threads: 2 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.py b/archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.py deleted file mode 100644 index 41396b2ba..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Callable - -from . import LUTActivationOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class SigmoidTestModelGenerator(LUTActivationOpTestModelGenerator): - @property - def act_fun(self) -> Callable[[tf.Tensor], tf.Tensor]: - return lambda x: tf.nn.sigmoid(x) - - -GENERATOR = SigmoidTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.yml b/archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.yml deleted file mode 100644 index ab62ede3e..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_sigmoid.yml +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 49 - height: 4 - num_threads: 1 - width: 8 - 1: - channels: 7 - height: 4 - num_threads: 5 - width: 8 - 2: - channels: 4 - height: 15 - num_threads: 1 - width: 1 - 3: - channels: 1 - height: 5 - num_threads: 1 - width: 8 - 4: - channels: 1 - height: 5 - num_threads: 5 - width: 1 - 5: - channels: 32 - height: 12 - num_threads: 1 - width: 20 - 6: - channels: 1 - height: 15 - num_threads: 1 - width: 8 - 7: - channels: 4 - height: 12 - num_threads: 2 - width: 1 - 8: - channels: 7 - height: 12 - num_threads: 2 - width: 20 - 9: - channels: 1 - height: 5 - num_threads: 1 - width: 8 - 10: - channels: 12 - height: 4 - num_threads: 2 - width: 4 - 11: - channels: 12 - height: 1 - num_threads: 1 - width: 7 - 12: - channels: 4 - height: 4 - num_threads: 1 - width: 8 - 13: - channels: 25 - height: 8 - num_threads: 5 - width: 1 - 14: - channels: 16 - height: 8 - num_threads: 5 - width: 20 - 15: - channels: 4 - height: 1 - num_threads: 2 - width: 20 - 16: - channels: 16 - height: 15 - num_threads: 5 - width: 20 - 17: - channels: 7 - height: 4 - num_threads: 2 - width: 14 - 18: - channels: 16 - height: 8 - num_threads: 2 - width: 14 - 19: - channels: 32 - height: 12 - num_threads: 2 - width: 8 - 20: - channels: 49 - height: 4 - num_threads: 5 - width: 4 - 21: - channels: 12 - height: 1 - num_threads: 2 - width: 14 - 22: - channels: 12 - height: 1 - num_threads: 5 - width: 7 - 23: - channels: 25 - height: 1 - num_threads: 2 - width: 1 - 24: - channels: 32 - height: 5 - num_threads: 1 - width: 7 - 25: - channels: 16 - height: 15 - num_threads: 5 - width: 14 - 26: - channels: 12 - height: 5 - num_threads: 2 - width: 4 - 27: - channels: 32 - height: 15 - num_threads: 5 - width: 7 - 28: - channels: 4 - height: 8 - num_threads: 5 - width: 14 - 29: - channels: 49 - height: 4 - num_threads: 2 - width: 4 - 30: - channels: 49 - height: 8 - num_threads: 1 - width: 4 - 31: - channels: 25 - height: 12 - num_threads: 1 - width: 14 - 32: - channels: 1 - height: 8 - num_threads: 5 - width: 7 - 33: - channels: 25 - height: 5 - num_threads: 2 - width: 4 - 34: - channels: 16 - height: 15 - num_threads: 1 - width: 14 - 35: - channels: 32 - height: 12 - num_threads: 1 - width: 7 - 36: - channels: 49 - height: 12 - num_threads: 2 - width: 7 - 37: - channels: 7 - height: 1 - num_threads: 2 - width: 1 - 38: - channels: 25 - height: 8 - num_threads: 5 - width: 1 - 39: - channels: 7 - height: 5 - num_threads: 5 - width: 20 diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_tanh.py b/archived/test/integration_test/test_single_op_models/test_activation/test_tanh.py deleted file mode 100644 index e6dfaa468..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_tanh.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Callable - -from . import LUTActivationOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class TanhTestModelGenerator(LUTActivationOpTestModelGenerator): - @property - def act_fun(self) -> Callable[[tf.Tensor], tf.Tensor]: - return lambda x: tf.nn.tanh(x) - - -GENERATOR = TanhTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_activation/test_tanh.yml b/archived/test/integration_test/test_single_op_models/test_activation/test_tanh.yml deleted file mode 100644 index 38897ffb4..000000000 --- a/archived/test/integration_test/test_single_op_models/test_activation/test_tanh.yml +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 1 - height: 1 - num_threads: 1 - width: 4 - 1: - channels: 7 - height: 1 - num_threads: 1 - width: 14 - 2: - channels: 32 - height: 5 - num_threads: 2 - width: 14 - 3: - channels: 16 - height: 4 - num_threads: 5 - width: 1 - 4: - channels: 4 - height: 1 - num_threads: 5 - width: 1 - 5: - channels: 25 - height: 8 - num_threads: 5 - width: 7 - 6: - channels: 32 - height: 15 - num_threads: 1 - width: 8 - 7: - channels: 4 - height: 15 - num_threads: 5 - width: 4 - 8: - channels: 12 - height: 8 - num_threads: 2 - width: 8 - 9: - channels: 7 - height: 12 - num_threads: 5 - width: 7 - 10: - channels: 25 - height: 15 - num_threads: 1 - width: 4 - 11: - channels: 4 - height: 8 - num_threads: 5 - width: 7 - 12: - channels: 1 - height: 4 - num_threads: 1 - width: 14 - 13: - channels: 16 - height: 8 - num_threads: 2 - width: 20 - 14: - channels: 49 - height: 15 - num_threads: 2 - width: 7 - 15: - channels: 49 - height: 4 - num_threads: 5 - width: 14 - 16: - channels: 16 - height: 4 - num_threads: 5 - width: 8 - 17: - channels: 1 - height: 1 - num_threads: 5 - width: 1 - 18: - channels: 16 - height: 5 - num_threads: 5 - width: 20 - 19: - channels: 49 - height: 15 - num_threads: 2 - width: 8 - 20: - channels: 1 - height: 12 - num_threads: 1 - width: 20 - 21: - channels: 7 - height: 1 - num_threads: 2 - width: 14 - 22: - channels: 4 - height: 8 - num_threads: 2 - width: 1 - 23: - channels: 12 - height: 12 - num_threads: 1 - width: 7 - 24: - channels: 32 - height: 5 - num_threads: 5 - width: 8 - 25: - channels: 7 - height: 5 - num_threads: 5 - width: 4 - 26: - channels: 7 - height: 12 - num_threads: 1 - width: 14 - 27: - channels: 25 - height: 12 - num_threads: 2 - width: 8 - 28: - channels: 16 - height: 4 - num_threads: 1 - width: 4 - 29: - channels: 4 - height: 5 - num_threads: 5 - width: 20 - 30: - channels: 25 - height: 4 - num_threads: 1 - width: 4 - 31: - channels: 32 - height: 5 - num_threads: 1 - width: 20 - 32: - channels: 25 - height: 15 - num_threads: 1 - width: 20 - 33: - channels: 12 - height: 15 - num_threads: 2 - width: 20 - 34: - channels: 32 - height: 8 - num_threads: 2 - width: 1 - 35: - channels: 49 - height: 12 - num_threads: 2 - width: 4 - 36: - channels: 12 - height: 1 - num_threads: 2 - width: 8 - 37: - channels: 1 - height: 8 - num_threads: 1 - width: 1 - 38: - channels: 12 - height: 5 - num_threads: 1 - width: 1 - 39: - channels: 49 - height: 1 - num_threads: 2 - width: 7 diff --git a/archived/test/integration_test/test_single_op_models/test_add.py b/archived/test/integration_test/test_single_op_models/test_add.py deleted file mode 100644 index 237aecf02..000000000 --- a/archived/test/integration_test/test_single_op_models/test_add.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -import tensorflow as tf -from typing import Optional, Tuple -import random - -from . import ChannelAgnosticOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class AddModelGenerator(ChannelAgnosticOpTestModelGenerator): - def _build_core_model(self) -> tf.keras.Model: - input = tf.keras.Input(shape=self._input_shape) - x2 = tf.random.normal([1, *self._input_shape], mean=random.random()) - out = self._op_layer()([input, x2]) - return tf.keras.models.Model(inputs=input, outputs=out) - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - return tf.keras.layers.Add() - - -GENERATOR = AddModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_add.yml b/archived/test/integration_test/test_single_op_models/test_add.yml deleted file mode 100644 index 2a8cdd04c..000000000 --- a/archived/test/integration_test/test_single_op_models/test_add.yml +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 4 - height: 15 - num_threads: 1 - width: 5 - 1: - channels: 51 - height: 3 - num_threads: 1 - width: 1 - 2: - channels: 64 - height: 15 - num_threads: 2 - width: 7 - 3: - channels: 53 - height: 5 - num_threads: 1 - width: 6 - 4: - channels: 11 - height: 8 - num_threads: 2 - width: 6 - 5: - channels: 64 - height: 1 - num_threads: 1 - width: 6 - 6: - channels: 13 - height: 15 - num_threads: 2 - width: 1 - 7: - channels: 11 - height: 1 - num_threads: 2 - width: 9 - 8: - channels: 11 - height: 13 - num_threads: 5 - width: 7 - 9: - channels: 13 - height: 13 - num_threads: 5 - width: 9 - 10: - channels: 53 - height: 1 - num_threads: 1 - width: 7 - 11: - channels: 51 - height: 3 - num_threads: 1 - width: 6 - 12: - channels: 64 - height: 8 - num_threads: 5 - width: 9 - 13: - channels: 4 - height: 5 - num_threads: 1 - width: 11 - 14: - channels: 53 - height: 13 - num_threads: 2 - width: 11 - 15: - channels: 13 - height: 13 - num_threads: 5 - width: 1 - 16: - channels: 51 - height: 5 - num_threads: 5 - width: 11 - 17: - channels: 51 - height: 8 - num_threads: 5 - width: 9 - 18: - channels: 13 - height: 3 - num_threads: 5 - width: 5 - 19: - channels: 4 - height: 5 - num_threads: 2 - width: 5 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/__init__.py b/archived/test/integration_test/test_single_op_models/test_binarized/__init__.py deleted file mode 100644 index d3cdb651b..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/__init__.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import _pytest -import larq -import numpy as np -import tensorflow as tf -import larq_compute_engine as lce -from typing import Optional, Tuple, Type, Any, Union, NamedTuple - -from tflite2xcore.utils import get_bitpacked_shape -from tflite2xcore.xcore_schema import ( - Tensor, - Buffer, - ExternalOpCodes, - TensorType, - XCOREModel, -) -from tflite2xcore.pass_manager import PassManager -from tflite2xcore.transformation_passes import ( - CanonicalizeLceQuantizedInputPass, - CanonicalizeLceQuantizedOutputPass, - UnifyEmptyBuffersPass, -) -from tflite2xcore.transformation_passes.transformation_passes import ( - OutputTensorMatchingPass, -) -from tflite2xcore.converter import CleanupManager - -from tflite2xcore.model_generation import Configuration, TFLiteModel, Hook -from tflite2xcore.model_generation.evaluators import LarqEvaluator -from tflite2xcore.model_generation.runners import Runner -from tflite2xcore.model_generation.converters import KerasModelConverter, LarqConverter -from tflite2xcore.model_generation.data_factories import InputInitializerDataFactory -from tflite2xcore.model_generation.utils import parse_init_config - -from .. import ( - BinarizedTestRunner, - test_mean_abs_diffs, - test_output, -) -from ..test_conv2d import Conv2dWordAlignedTestModelGenerator - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class LarqCompositeTestModelGenerator(Conv2dWordAlignedTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config["activation"] = cfg.pop("activation", "linear") - - self._config["output_range"] = cfg.pop("output_range", (-3, 3)) - - self._config["input_range"] = input_range = cfg.pop("input_range") - cfg["input_init"] = cfg.pop("input_init", ("RandomUniform", *input_range)) - - self._config.update( - {"bias_init": cfg.pop("bias_init", ("RandomUniform", -1, 1))} - ) - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - - assert self._config["activation"] in ("linear", "relu", "relu6") - - input_range = self._config["input_range"] - assert len(input_range) == 2 - assert input_range[0] <= 0 <= input_range[1] - - output_range = self._config["output_range"] - assert len(output_range) == 2 - assert output_range[0] <= 0 <= output_range[1] - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Conv2D: - kwargs = {"input_shape": input_shape} if input_shape else {} - cfg = self._config - return larq.layers.QuantConv2D( - filters=cfg["output_channels"], - kernel_size=(cfg["K_h"], cfg["K_w"]), - padding=cfg["padding"], - pad_values=1, - strides=cfg["strides"], - input_quantizer="ste_sign", - kernel_quantizer="ste_sign", - kernel_constraint="weight_clip", - use_bias=False, - kernel_initializer=parse_init_config(*cfg["weight_init"]), - **kwargs, - ) - - def _fake_quant( - self, x: tf.Tensor, range_min: int = 0, range_max: int = 1 - ) -> tf.Tensor: - return tf.quantization.fake_quant_with_min_max_vars(x, range_min, range_max) - - def _build_core_model(self) -> tf.keras.Model: - img = tf.keras.layers.Input(shape=self._input_shape) - x = self._fake_quant(img, *self._config["input_range"]) - x = self._op_layer()(x) - if self._config["activation"] == "relu": - x = tf.keras.layers.ReLU()(x) - elif self._config["activation"] == "relu6": - x = tf.keras.layers.ReLU(6)(x) - x = tf.keras.layers.BatchNormalization( - beta_initializer=parse_init_config(*self._config["bias_init"]) - )(x) - x = self._fake_quant(x, *self._config["output_range"]) - return tf.keras.Model(img, x) - - -class BConv2dGenericTestModelGenerator(LarqCompositeTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - assert ( - "input_range" not in cfg - ), f"input_range cannot be specified for BConv2d tests" - cfg["input_range"] = (np.iinfo(np.int32).min, np.iinfo(np.int32).max) - - assert ( - "input_init" not in cfg - ), f"input_init cannot be specified for BConv2d tests" - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - assert ( - self._config["input_channels"] % 32 == 0 - ), "# of input channels must be multiple of 32" - - -# ---------------------------------------------------------------------------- -# CONVERTERS -# ---------------------------------------------------------------------------- - - -class RemoveSingleOutputOperatorPass(OutputTensorMatchingPass): - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self._done = False - - def match(self, tensor: Tensor) -> bool: - return (not self._done) and super().match(tensor) - - def mutate(self, tensor: Tensor) -> None: - subgraph = tensor.subgraph - out_op = tensor.producers[0] - - subgraph.outputs.remove(tensor) - subgraph.outputs.append(out_op.inputs[0]) - subgraph.remove_operator(out_op) - - self._done = True - - -class LarqSingleOpConverter(LarqConverter): - """Converts a larq composite TFL model to a single op TFL model. - - This converter is to work around the fact that some larq ops - cannot be directly generated from keras layers. - """ - - def __init__( - self, - runner: Runner, - input_model_hook: Hook[tf.keras.Model], - strip: bool = False, - remove_last_op: bool = False, - ) -> None: - super().__init__(runner, input_model_hook) - self._strip = strip - self._remove_last_op = remove_last_op - - def convert(self) -> None: - super().convert() - - # import pathlib - # with open(pathlib.Path("/Users/deepakpanickal/code/ai_tools/experimental/xformer/xformer_input1.tflite").resolve(), "wb") as fd: - # fd.write(self._model) - - model_ir = XCOREModel.deserialize(self._model) - pass_mgr = PassManager(model_ir) - - if self._strip: - pass_mgr.register_pass(CanonicalizeLceQuantizedInputPass()) - pass_mgr.register_pass(CanonicalizeLceQuantizedOutputPass()) - if self._remove_last_op: - pass_mgr.register_pass(RemoveSingleOutputOperatorPass()) - - pass_mgr.register_pass(UnifyEmptyBuffersPass()) - pass_mgr.register_passes(CleanupManager()) - - pass_mgr.run_passes() - - self._model = model_ir.serialize() - - # with open(pathlib.Path("/Users/deepakpanickal/code/ai_tools/experimental/xformer/xformer_input3.tflite").resolve(), "wb") as fd: - # fd.write(self._model) - - -# ---------------------------------------------------------------------------- -# CONVERTERS -# ---------------------------------------------------------------------------- - - -class BinarizedSingleOpRunner(BinarizedTestRunner): - def make_repr_data_factory(self) -> InputInitializerDataFactory: - return InputInitializerDataFactory( - self, - lambda: get_bitpacked_shape(self._model_generator.input_shape), - dtype=tf.int32, - ) diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/conftest.py b/archived/test/integration_test/test_single_op_models/test_binarized/conftest.py deleted file mode 100644 index d67efb8e3..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/conftest.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture -def mean_abs_diff_tolerance() -> float: - return 0.0 - - -@pytest.fixture -def bitpacked_outputs() -> bool: - return True diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/__init__.py b/archived/test/integration_test/test_single_op_models/test_binarized/padded/__init__.py deleted file mode 100644 index 6dc6ebc93..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from .. import ( - test_mean_abs_diffs, - test_output, -) diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.py b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.py deleted file mode 100644 index 4dc47a3b8..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from ..test_bconv2d_bin_DI import ( # pylint: disable=unused-import - GENERATOR, - RUNNER, -) - -from . import ( # pylint: disable=unused-import - test_mean_abs_diffs, -) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.yml b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.yml deleted file mode 100644 index 1a8e7efbe..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_DI_padded.yml +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 2 - K_w: 3 - height: 10 - input_channels: 256 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 1 - - 1 - width: 11 - 1: - K_h: 3 - K_w: 3 - height: 10 - input_channels: 512 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 1 - - 2 - width: 6 - 2: - K_h: 6 - K_w: 3 - height: 10 - input_channels: 256 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 2 - - 2 - width: 8 - 3: - K_h: 3 - K_w: 4 - height: 7 - input_channels: 256 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 1 - - 1 - width: 8 - 4: - K_h: 2 - K_w: 3 - height: 10 - input_channels: 256 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 1 - - 1 - width: 6 - 5: - K_h: 2 - K_w: 3 - height: 7 - input_channels: 512 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 1 - - 2 - width: 8 - 6: - K_h: 3 - K_w: 4 - height: 7 - input_channels: 256 - num_threads: 2 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 6 - 7: - K_h: 6 - K_w: 5 - height: 12 - input_channels: 256 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 8 - 8: - K_h: 6 - K_w: 3 - height: 12 - input_channels: 512 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 1 - - 2 - width: 6 - 9: - K_h: 6 - K_w: 5 - height: 12 - input_channels: 512 - num_threads: 5 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 8 - 10: - K_h: 3 - K_w: 4 - height: 10 - input_channels: 256 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 1 - - 2 - width: 6 - 11: - K_h: 6 - K_w: 5 - height: 12 - input_channels: 256 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 2 - - 1 - width: 11 - 12: - K_h: 2 - K_w: 4 - height: 7 - input_channels: 512 - num_threads: 5 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 11 - 13: - K_h: 3 - K_w: 5 - height: 10 - input_channels: 256 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 1 - - 2 - width: 11 - 14: - K_h: 2 - K_w: 4 - height: 10 - input_channels: 512 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 2 - - 1 - width: 11 - 15: - K_h: 6 - K_w: 4 - height: 12 - input_channels: 512 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 11 - 16: - K_h: 6 - K_w: 5 - height: 7 - input_channels: 512 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 8 - 17: - K_h: 3 - K_w: 5 - height: 12 - input_channels: 256 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 1 - - 1 - width: 11 - 18: - K_h: 2 - K_w: 5 - height: 7 - input_channels: 512 - num_threads: 2 - output_channels: 64 - padding: same - strides: - - 2 - - 2 - width: 6 - 19: - K_h: 2 - K_w: 4 - height: 12 - input_channels: 512 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 1 - - 1 - width: 6 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.py b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.py deleted file mode 100644 index 910196812..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from ..test_bconv2d_bin import ( # pylint: disable=unused-import - GENERATOR, - RUNNER, -) - -from . import ( # pylint: disable=unused-import - test_mean_abs_diffs, -) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.yml b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.yml deleted file mode 100644 index 2712a2a0d..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_bin_padded.yml +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 2 - K_w: 4 - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 6 - 1: - K_h: 6 - K_w: 1 - height: 7 - input_channels: 32 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 1 - - 2 - width: 8 - 2: - K_h: 3 - K_w: 1 - height: 12 - input_channels: 128 - num_threads: 2 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 11 - 3: - K_h: 6 - K_w: 3 - height: 10 - input_channels: 32 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 1 - - 1 - width: 8 - 4: - K_h: 1 - K_w: 3 - height: 7 - input_channels: 128 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 8 - 5: - K_h: 1 - K_w: 5 - height: 7 - input_channels: 32 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 2 - - 1 - width: 11 - 6: - K_h: 3 - K_w: 1 - height: 7 - input_channels: 128 - num_threads: 2 - output_channels: 64 - padding: same - strides: - - 1 - - 1 - width: 11 - 7: - K_h: 1 - K_w: 4 - height: 12 - input_channels: 128 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 8 - 8: - K_h: 2 - K_w: 5 - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 1 - - 2 - width: 6 - 9: - K_h: 6 - K_w: 3 - height: 10 - input_channels: 32 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 2 - - 2 - width: 6 - 10: - K_h: 2 - K_w: 3 - height: 10 - input_channels: 128 - num_threads: 5 - output_channels: 32 - padding: same - strides: - - 2 - - 1 - width: 11 - 11: - K_h: 6 - K_w: 4 - height: 10 - input_channels: 128 - num_threads: 2 - output_channels: 64 - padding: same - strides: - - 2 - - 2 - width: 8 - 12: - K_h: 2 - K_w: 3 - height: 7 - input_channels: 32 - num_threads: 5 - output_channels: 32 - padding: same - strides: - - 1 - - 1 - width: 8 - 13: - K_h: 1 - K_w: 4 - height: 7 - input_channels: 128 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 1 - - 1 - width: 6 - 14: - K_h: 3 - K_w: 1 - height: 12 - input_channels: 32 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 1 - - 2 - width: 6 - 15: - K_h: 6 - K_w: 4 - height: 10 - input_channels: 128 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 1 - - 1 - width: 11 - 16: - K_h: 3 - K_w: 5 - height: 10 - input_channels: 128 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 1 - - 2 - width: 8 - 17: - K_h: 1 - K_w: 5 - height: 10 - input_channels: 32 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 11 - 18: - K_h: 2 - K_w: 1 - height: 10 - input_channels: 128 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 1 - - 2 - width: 11 - 19: - K_h: 3 - K_w: 5 - height: 12 - input_channels: 32 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 2 - - 1 - width: 6 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.py b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.py deleted file mode 100644 index 4e83acd34..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from ..test_bconv2d_int8_DIDO import ( # pylint: disable=unused-import - GENERATOR, - RUNNER, - bitpacked_outputs, -) - -from . import ( # pylint: disable=unused-import - test_output, -) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.yml b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.yml deleted file mode 100644 index 91ba069c7..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_DIDO_padded.yml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 1 - K_w: 4 - activation: relu - height: 10 - input_channels: 256 - output_channels: 48 - output_range: - - -4 - - 1 - padding: same - strides: - - 1 - - 1 - width: 8 - 1: - K_h: 6 - K_w: 1 - activation: relu - height: 7 - input_channels: 256 - output_channels: 48 - output_range: - - -2 - - 5 - padding: same - strides: - - 1 - - 2 - width: 11 - 2: - K_h: 6 - K_w: 3 - activation: relu - height: 10 - input_channels: 256 - output_channels: 48 - output_range: - - -4 - - 3 - padding: same - strides: - - 1 - - 1 - width: 8 - 3: - K_h: 6 - K_w: 4 - activation: relu - height: 12 - input_channels: 256 - output_channels: 16 - output_range: - - 0 - - 5 - padding: same - strides: - - 1 - - 2 - width: 6 - 4: - K_h: 1 - K_w: 3 - activation: relu - height: 12 - input_channels: 512 - output_channels: 48 - output_range: - - 0 - - 1 - padding: same - strides: - - 1 - - 2 - width: 11 - 5: - K_h: 6 - K_w: 1 - activation: relu - height: 7 - input_channels: 256 - output_channels: 16 - output_range: - - -4 - - 3 - padding: same - strides: - - 2 - - 2 - width: 6 - 6: - K_h: 3 - K_w: 5 - activation: relu - height: 7 - input_channels: 256 - output_channels: 16 - output_range: - - -2 - - 1 - padding: same - strides: - - 1 - - 1 - width: 11 - 7: - K_h: 1 - K_w: 1 - activation: relu - height: 7 - input_channels: 512 - output_channels: 48 - output_range: - - -4 - - 3 - padding: same - strides: - - 2 - - 2 - width: 8 - 8: - K_h: 2 - K_w: 5 - activation: relu - height: 10 - input_channels: 512 - output_channels: 48 - output_range: - - -2 - - 5 - padding: same - strides: - - 2 - - 2 - width: 8 - 9: - K_h: 2 - K_w: 1 - activation: relu - height: 10 - input_channels: 512 - output_channels: 16 - output_range: - - 0 - - 3 - padding: same - strides: - - 1 - - 1 - width: 8 - 10: - K_h: 3 - K_w: 1 - activation: relu - height: 12 - input_channels: 512 - output_channels: 16 - output_range: - - -4 - - 5 - padding: same - strides: - - 2 - - 1 - width: 11 - 11: - K_h: 3 - K_w: 4 - activation: relu - height: 12 - input_channels: 512 - output_channels: 48 - output_range: - - -4 - - 5 - padding: same - strides: - - 1 - - 1 - width: 8 - 12: - K_h: 1 - K_w: 4 - activation: relu - height: 7 - input_channels: 512 - output_channels: 48 - output_range: - - -2 - - 1 - padding: same - strides: - - 2 - - 2 - width: 11 - 13: - K_h: 3 - K_w: 5 - activation: relu - height: 10 - input_channels: 256 - output_channels: 16 - output_range: - - -2 - - 3 - padding: same - strides: - - 2 - - 2 - width: 6 - 14: - K_h: 6 - K_w: 4 - activation: relu - height: 12 - input_channels: 256 - output_channels: 16 - output_range: - - 0 - - 3 - padding: same - strides: - - 1 - - 2 - width: 11 - 15: - K_h: 2 - K_w: 3 - activation: relu - height: 10 - input_channels: 512 - output_channels: 16 - output_range: - - -2 - - 3 - padding: same - strides: - - 2 - - 1 - width: 11 - 16: - K_h: 2 - K_w: 5 - activation: relu - height: 10 - input_channels: 256 - output_channels: 48 - output_range: - - 0 - - 1 - padding: same - strides: - - 2 - - 1 - width: 6 - 17: - K_h: 2 - K_w: 3 - activation: relu - height: 7 - input_channels: 512 - output_channels: 48 - output_range: - - 0 - - 1 - padding: same - strides: - - 2 - - 1 - width: 8 - 18: - K_h: 3 - K_w: 3 - activation: relu - height: 12 - input_channels: 512 - output_channels: 16 - output_range: - - 0 - - 5 - padding: same - strides: - - 2 - - 1 - width: 6 - 19: - K_h: 1 - K_w: 5 - activation: relu - height: 7 - input_channels: 256 - output_channels: 16 - output_range: - - -4 - - 1 - padding: same - strides: - - 1 - - 2 - width: 6 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.py b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.py deleted file mode 100644 index 9074ca416..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from ..test_bconv2d_int8 import ( # pylint: disable=unused-import - GENERATOR, - RUNNER, - bitpacked_outputs, -) - -from . import ( # pylint: disable=unused-import - test_output, -) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.yml b/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.yml deleted file mode 100644 index e3d2da861..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/padded/test_bconv2d_int8_padded.yml +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 2 - K_w: 4 - activation: relu - height: 10 - input_channels: 32 - num_threads: 5 - output_channels: 4 - output_range: - - -4 - - 1 - padding: same - strides: - - 2 - - 1 - width: 8 - 1: - K_h: 3 - K_w: 5 - activation: relu - height: 10 - input_channels: 128 - num_threads: 5 - output_channels: 32 - output_range: - - -4 - - 5 - padding: same - strides: - - 2 - - 2 - width: 6 - 2: - K_h: 1 - K_w: 5 - activation: relu - height: 12 - input_channels: 320 - num_threads: 1 - output_channels: 4 - output_range: - - -2 - - 3 - padding: same - strides: - - 1 - - 2 - width: 8 - 3: - K_h: 3 - K_w: 4 - activation: relu - height: 7 - input_channels: 128 - num_threads: 5 - output_channels: 32 - output_range: - - 0 - - 5 - padding: same - strides: - - 2 - - 2 - width: 6 - 4: - K_h: 2 - K_w: 3 - activation: relu - height: 10 - input_channels: 320 - num_threads: 1 - output_channels: 28 - output_range: - - -2 - - 5 - padding: same - strides: - - 1 - - 2 - width: 8 - 5: - K_h: 2 - K_w: 1 - activation: relu - height: 7 - input_channels: 32 - num_threads: 1 - output_channels: 4 - output_range: - - -4 - - 1 - padding: same - strides: - - 1 - - 2 - width: 6 - 6: - K_h: 3 - K_w: 3 - activation: relu - height: 10 - input_channels: 128 - num_threads: 1 - output_channels: 4 - output_range: - - -4 - - 3 - padding: same - strides: - - 1 - - 1 - width: 11 - 7: - K_h: 1 - K_w: 3 - activation: relu - height: 12 - input_channels: 320 - num_threads: 5 - output_channels: 4 - output_range: - - -2 - - 1 - padding: same - strides: - - 2 - - 2 - width: 6 - 8: - K_h: 1 - K_w: 5 - activation: relu - height: 7 - input_channels: 128 - num_threads: 2 - output_channels: 28 - output_range: - - 0 - - 1 - padding: same - strides: - - 1 - - 1 - width: 11 - 9: - K_h: 3 - K_w: 1 - activation: relu - height: 12 - input_channels: 320 - num_threads: 1 - output_channels: 4 - output_range: - - -2 - - 5 - padding: same - strides: - - 1 - - 2 - width: 11 - 10: - K_h: 6 - K_w: 5 - activation: relu - height: 10 - input_channels: 320 - num_threads: 2 - output_channels: 32 - output_range: - - 0 - - 1 - padding: same - strides: - - 2 - - 1 - width: 11 - 11: - K_h: 1 - K_w: 5 - activation: relu - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 32 - output_range: - - -4 - - 1 - padding: same - strides: - - 1 - - 1 - width: 11 - 12: - K_h: 6 - K_w: 1 - activation: relu - height: 7 - input_channels: 32 - num_threads: 5 - output_channels: 28 - output_range: - - -4 - - 3 - padding: same - strides: - - 2 - - 1 - width: 11 - 13: - K_h: 6 - K_w: 1 - activation: relu - height: 7 - input_channels: 128 - num_threads: 5 - output_channels: 28 - output_range: - - -2 - - 1 - padding: same - strides: - - 2 - - 1 - width: 8 - 14: - K_h: 6 - K_w: 3 - activation: relu - height: 10 - input_channels: 128 - num_threads: 2 - output_channels: 28 - output_range: - - 0 - - 3 - padding: same - strides: - - 1 - - 2 - width: 8 - 15: - K_h: 2 - K_w: 4 - activation: relu - height: 10 - input_channels: 32 - num_threads: 2 - output_channels: 28 - output_range: - - -2 - - 3 - padding: same - strides: - - 1 - - 1 - width: 8 - 16: - K_h: 3 - K_w: 4 - activation: relu - height: 12 - input_channels: 32 - num_threads: 5 - output_channels: 4 - output_range: - - 0 - - 3 - padding: same - strides: - - 2 - - 2 - width: 6 - 17: - K_h: 2 - K_w: 4 - activation: relu - height: 7 - input_channels: 32 - num_threads: 1 - output_channels: 32 - output_range: - - 0 - - 3 - padding: same - strides: - - 1 - - 1 - width: 8 - 18: - K_h: 1 - K_w: 3 - activation: relu - height: 12 - input_channels: 320 - num_threads: 1 - output_channels: 28 - output_range: - - -2 - - 1 - padding: same - strides: - - 2 - - 2 - width: 11 - 19: - K_h: 6 - K_w: 1 - activation: relu - height: 12 - input_channels: 128 - num_threads: 2 - output_channels: 32 - output_range: - - 0 - - 5 - padding: same - strides: - - 2 - - 1 - width: 6 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.py b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.py deleted file mode 100644 index bf9453216..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import larq -import tensorflow as tf - -from tflite2xcore.model_generation import Configuration - -from . import ( - BinarizedSingleOpRunner, - BConv2dGenericTestModelGenerator, - LarqSingleOpConverter, -) - -from . import ( # pylint: disable=unused-import - test_mean_abs_diffs, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class BConv2dBitpackedTestModelGenerator(BConv2dGenericTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - cfg.setdefault("padding", "valid") - for forbidden_key in ("activation", "output_range"): - assert ( - forbidden_key not in cfg - ), f"{forbidden_key} cannot be specified for BConv2dBitpacked tests" - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - assert ( - self._config["input_channels"] % 32 == 0 - ), "# of input channels must be multiple of 32" - - def _build_core_model(self) -> tf.keras.Model: - img = tf.keras.layers.Input(shape=self._input_shape) - x = self._fake_quant(img, *self._config["input_range"]) - x = self._op_layer()(x) - # NOTE: we need the next dummy layer in order to produce a bconv2d with bitpacked output - x = larq.layers.QuantConv2D( - filters=32, - kernel_size=(1, 1), - padding="valid", - pad_values=1, - strides=(1, 1), - input_quantizer="ste_sign", - kernel_quantizer="ste_sign", - kernel_constraint="weight_clip", - )(x) - x = self._fake_quant(x, *self._config["output_range"]) - return tf.keras.Model(img, x) - - -GENERATOR = BConv2dBitpackedTestModelGenerator - -# ---------------------------------------------------------------------------- -# RUNNERS -# ---------------------------------------------------------------------------- - - -class BConv2dBitpackedTestRunner(BinarizedSingleOpRunner): - def make_lce_converter(self) -> LarqSingleOpConverter: - return LarqSingleOpConverter( - self, self.get_built_model, strip=True, remove_last_op=True - ) - - -RUNNER = BConv2dBitpackedTestRunner - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.yml b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.yml deleted file mode 100644 index 6a097a2d0..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin.yml +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 3 - K_w: 3 - height: 12 - input_channels: 128 - num_threads: 2 - output_channels: 32 - strides: - - 2 - - 1 - width: 11 - 1: - K_h: 3 - K_w: 3 - height: 10 - input_channels: 32 - num_threads: 2 - output_channels: 64 - strides: - - 2 - - 1 - width: 8 - 2: - K_h: 6 - K_w: 1 - height: 7 - input_channels: 128 - num_threads: 5 - output_channels: 32 - strides: - - 2 - - 2 - width: 6 - 3: - K_h: 2 - K_w: 4 - height: 10 - input_channels: 128 - num_threads: 2 - output_channels: 32 - strides: - - 1 - - 1 - width: 6 - 4: - K_h: 1 - K_w: 1 - height: 12 - input_channels: 32 - num_threads: 1 - output_channels: 64 - strides: - - 1 - - 2 - width: 11 - 5: - K_h: 6 - K_w: 5 - height: 7 - input_channels: 128 - num_threads: 1 - output_channels: 32 - strides: - - 1 - - 2 - width: 11 - 6: - K_h: 6 - K_w: 4 - height: 10 - input_channels: 32 - num_threads: 2 - output_channels: 32 - strides: - - 1 - - 1 - width: 8 - 7: - K_h: 6 - K_w: 5 - height: 7 - input_channels: 32 - num_threads: 5 - output_channels: 64 - strides: - - 1 - - 2 - width: 8 - 8: - K_h: 2 - K_w: 5 - height: 12 - input_channels: 32 - num_threads: 5 - output_channels: 64 - strides: - - 2 - - 1 - width: 6 - 9: - K_h: 6 - K_w: 5 - height: 7 - input_channels: 128 - num_threads: 2 - output_channels: 32 - strides: - - 1 - - 1 - width: 8 - 10: - K_h: 1 - K_w: 1 - height: 10 - input_channels: 128 - num_threads: 1 - output_channels: 64 - strides: - - 2 - - 2 - width: 11 - 11: - K_h: 1 - K_w: 5 - height: 10 - input_channels: 128 - num_threads: 1 - output_channels: 64 - strides: - - 1 - - 1 - width: 11 - 12: - K_h: 3 - K_w: 1 - height: 10 - input_channels: 32 - num_threads: 1 - output_channels: 64 - strides: - - 1 - - 2 - width: 6 - 13: - K_h: 2 - K_w: 3 - height: 12 - input_channels: 128 - num_threads: 2 - output_channels: 32 - strides: - - 2 - - 2 - width: 6 - 14: - K_h: 3 - K_w: 4 - height: 12 - input_channels: 128 - num_threads: 5 - output_channels: 64 - strides: - - 1 - - 2 - width: 11 - 15: - K_h: 2 - K_w: 3 - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 32 - strides: - - 2 - - 1 - width: 8 - 16: - K_h: 3 - K_w: 1 - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 32 - strides: - - 2 - - 1 - width: 11 - 17: - K_h: 1 - K_w: 4 - height: 7 - input_channels: 32 - num_threads: 1 - output_channels: 64 - strides: - - 2 - - 2 - width: 8 - 18: - K_h: 1 - K_w: 3 - height: 10 - input_channels: 32 - num_threads: 5 - output_channels: 32 - strides: - - 1 - - 1 - width: 6 - 19: - K_h: 2 - K_w: 4 - height: 7 - input_channels: 128 - num_threads: 5 - output_channels: 64 - strides: - - 2 - - 2 - width: 6 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.py b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.py deleted file mode 100644 index b181d20a9..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - - -from .test_bconv2d_bin import ( - BConv2dBitpackedTestRunner, - BConv2dBitpackedTestModelGenerator, -) - -from . import ( # pylint: disable=unused-import - test_mean_abs_diffs, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class BConv2dBitpackedDeepInTestModelGenerator(BConv2dBitpackedTestModelGenerator): - def check_config(self) -> None: - super().check_config() - assert ( - self._config["input_channels"] % 256 == 0 - ), "# of input channels must be multiple of 256" - - -GENERATOR = BConv2dBitpackedDeepInTestModelGenerator - -# ---------------------------------------------------------------------------- -# RUNNERS -# ---------------------------------------------------------------------------- - - -RUNNER = BConv2dBitpackedTestRunner - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.yml b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.yml deleted file mode 100644 index 9cde4504c..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_bin_DI.yml +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 6 - K_w: 3 - height: 10 - input_channels: 256 - num_threads: 5 - output_channels: 64 - strides: - - 1 - - 1 - width: 6 - 1: - K_h: 6 - K_w: 3 - height: 10 - input_channels: 256 - num_threads: 1 - output_channels: 32 - strides: - - 1 - - 1 - width: 6 - 2: - K_h: 6 - K_w: 3 - height: 12 - input_channels: 256 - num_threads: 1 - output_channels: 64 - strides: - - 1 - - 2 - width: 6 - 3: - K_h: 1 - K_w: 4 - height: 10 - input_channels: 512 - num_threads: 2 - output_channels: 64 - strides: - - 1 - - 2 - width: 11 - 4: - K_h: 2 - K_w: 1 - height: 12 - input_channels: 256 - num_threads: 1 - output_channels: 32 - strides: - - 1 - - 2 - width: 8 - 5: - K_h: 6 - K_w: 1 - height: 7 - input_channels: 256 - num_threads: 5 - output_channels: 64 - strides: - - 2 - - 2 - width: 11 - 6: - K_h: 1 - K_w: 4 - height: 10 - input_channels: 256 - num_threads: 2 - output_channels: 64 - strides: - - 2 - - 2 - width: 11 - 7: - K_h: 3 - K_w: 5 - height: 7 - input_channels: 256 - num_threads: 1 - output_channels: 32 - strides: - - 2 - - 2 - width: 8 - 8: - K_h: 3 - K_w: 1 - height: 12 - input_channels: 512 - num_threads: 2 - output_channels: 32 - strides: - - 1 - - 2 - width: 8 - 9: - K_h: 1 - K_w: 5 - height: 10 - input_channels: 512 - num_threads: 1 - output_channels: 64 - strides: - - 1 - - 2 - width: 8 - 10: - K_h: 2 - K_w: 1 - height: 12 - input_channels: 512 - num_threads: 5 - output_channels: 64 - strides: - - 2 - - 2 - width: 6 - 11: - K_h: 3 - K_w: 4 - height: 7 - input_channels: 256 - num_threads: 1 - output_channels: 64 - strides: - - 1 - - 1 - width: 11 - 12: - K_h: 6 - K_w: 4 - height: 12 - input_channels: 512 - num_threads: 5 - output_channels: 64 - strides: - - 2 - - 2 - width: 8 - 13: - K_h: 3 - K_w: 5 - height: 7 - input_channels: 512 - num_threads: 2 - output_channels: 32 - strides: - - 1 - - 1 - width: 11 - 14: - K_h: 2 - K_w: 4 - height: 7 - input_channels: 512 - num_threads: 1 - output_channels: 32 - strides: - - 2 - - 1 - width: 11 - 15: - K_h: 2 - K_w: 3 - height: 7 - input_channels: 256 - num_threads: 2 - output_channels: 32 - strides: - - 1 - - 1 - width: 6 - 16: - K_h: 2 - K_w: 1 - height: 12 - input_channels: 512 - num_threads: 2 - output_channels: 64 - strides: - - 2 - - 1 - width: 11 - 17: - K_h: 3 - K_w: 5 - height: 10 - input_channels: 256 - num_threads: 2 - output_channels: 32 - strides: - - 2 - - 1 - width: 8 - 18: - K_h: 1 - K_w: 3 - height: 10 - input_channels: 512 - num_threads: 5 - output_channels: 32 - strides: - - 2 - - 1 - width: 6 - 19: - K_h: 1 - K_w: 5 - height: 12 - input_channels: 512 - num_threads: 5 - output_channels: 32 - strides: - - 2 - - 1 - width: 6 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.py b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.py deleted file mode 100644 index d95c8c5f9..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.model_generation import Configuration - -from . import ( - BinarizedSingleOpRunner, - BConv2dGenericTestModelGenerator, - LarqSingleOpConverter, -) - -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class BConv2dInt8TestModelGenerator(BConv2dGenericTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - cfg.setdefault("padding", "valid") - super()._set_config(cfg) - - -GENERATOR = BConv2dInt8TestModelGenerator - -# ---------------------------------------------------------------------------- -# RUNNERS -# ---------------------------------------------------------------------------- - - -class BConv2dInt8TestRunner(BinarizedSingleOpRunner): - def make_lce_converter(self) -> LarqSingleOpConverter: - return LarqSingleOpConverter(self, self.get_built_model, strip=True) - - -RUNNER = BConv2dInt8TestRunner - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture -def bitpacked_outputs() -> bool: - return False - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.yml b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.yml deleted file mode 100644 index 0bbd56b4d..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8.yml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 3 - K_w: 1 - activation: relu - height: 7 - input_channels: 320 - num_threads: 1 - output_channels: 4 - output_range: - - -4 - - 5 - strides: - - 1 - - 1 - width: 11 - 1: - K_h: 1 - K_w: 5 - activation: relu - height: 10 - input_channels: 128 - num_threads: 2 - output_channels: 4 - output_range: - - -4 - - 1 - strides: - - 2 - - 1 - width: 11 - 2: - K_h: 3 - K_w: 3 - activation: relu - height: 7 - input_channels: 128 - num_threads: 5 - output_channels: 28 - output_range: - - -2 - - 5 - strides: - - 2 - - 1 - width: 8 - 3: - K_h: 1 - K_w: 4 - activation: relu - height: 12 - input_channels: 320 - num_threads: 5 - output_channels: 32 - output_range: - - -4 - - 1 - strides: - - 2 - - 1 - width: 8 - 4: - K_h: 2 - K_w: 4 - activation: relu - height: 12 - input_channels: 32 - num_threads: 1 - output_channels: 28 - output_range: - - 0 - - 3 - strides: - - 1 - - 1 - width: 6 - 5: - K_h: 6 - K_w: 1 - activation: relu - height: 10 - input_channels: 320 - num_threads: 2 - output_channels: 28 - output_range: - - 0 - - 1 - strides: - - 2 - - 2 - width: 6 - 6: - K_h: 2 - K_w: 1 - activation: relu - height: 12 - input_channels: 128 - num_threads: 1 - output_channels: 28 - output_range: - - -4 - - 5 - strides: - - 2 - - 2 - width: 6 - 7: - K_h: 2 - K_w: 5 - activation: relu - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 4 - output_range: - - 0 - - 1 - strides: - - 2 - - 1 - width: 6 - 8: - K_h: 1 - K_w: 3 - activation: relu - height: 10 - input_channels: 320 - num_threads: 5 - output_channels: 32 - output_range: - - 0 - - 5 - strides: - - 2 - - 2 - width: 8 - 9: - K_h: 1 - K_w: 4 - activation: relu - height: 7 - input_channels: 32 - num_threads: 2 - output_channels: 4 - output_range: - - -2 - - 3 - strides: - - 1 - - 2 - width: 11 - 10: - K_h: 6 - K_w: 1 - activation: relu - height: 10 - input_channels: 128 - num_threads: 1 - output_channels: 4 - output_range: - - 0 - - 5 - strides: - - 1 - - 2 - width: 11 - 11: - K_h: 6 - K_w: 3 - activation: relu - height: 12 - input_channels: 320 - num_threads: 5 - output_channels: 28 - output_range: - - -2 - - 1 - strides: - - 1 - - 2 - width: 11 - 12: - K_h: 6 - K_w: 5 - activation: relu - height: 12 - input_channels: 32 - num_threads: 1 - output_channels: 32 - output_range: - - 0 - - 1 - strides: - - 1 - - 1 - width: 6 - 13: - K_h: 2 - K_w: 5 - activation: relu - height: 7 - input_channels: 128 - num_threads: 5 - output_channels: 28 - output_range: - - -2 - - 3 - strides: - - 2 - - 1 - width: 8 - 14: - K_h: 3 - K_w: 4 - activation: relu - height: 10 - input_channels: 32 - num_threads: 5 - output_channels: 4 - output_range: - - 0 - - 5 - strides: - - 2 - - 2 - width: 8 - 15: - K_h: 6 - K_w: 3 - activation: relu - height: 10 - input_channels: 32 - num_threads: 2 - output_channels: 4 - output_range: - - -4 - - 3 - strides: - - 1 - - 1 - width: 11 - 16: - K_h: 3 - K_w: 1 - activation: relu - height: 12 - input_channels: 128 - num_threads: 5 - output_channels: 32 - output_range: - - -2 - - 1 - strides: - - 1 - - 1 - width: 6 - 17: - K_h: 1 - K_w: 4 - activation: relu - height: 7 - input_channels: 128 - num_threads: 2 - output_channels: 32 - output_range: - - -4 - - 5 - strides: - - 1 - - 2 - width: 11 - 18: - K_h: 2 - K_w: 3 - activation: relu - height: 10 - input_channels: 320 - num_threads: 2 - output_channels: 28 - output_range: - - -4 - - 3 - strides: - - 2 - - 2 - width: 8 - 19: - K_h: 3 - K_w: 5 - activation: relu - height: 7 - input_channels: 320 - num_threads: 1 - output_channels: 32 - output_range: - - -2 - - 5 - strides: - - 1 - - 2 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.py b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.py deleted file mode 100644 index 7e7f4211a..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from .test_bconv2d_int8 import BConv2dInt8TestModelGenerator -from .test_bconv2d_int8 import ( # pylint: disable=unused-import - bitpacked_outputs, - RUNNER, -) -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class BConv2dInt8DeepInDeepOutTestModelGenerator(BConv2dInt8TestModelGenerator): - def check_config(self) -> None: - super().check_config() - assert ( - self._config["input_channels"] % 256 == 0 - ), "# of input channels must be multiple of 256" - assert ( - self._config["output_channels"] % 16 == 0 - ), "# of input channels must be multiple of 16" - - -GENERATOR = BConv2dInt8DeepInDeepOutTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.yml b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.yml deleted file mode 100644 index 0e6bdbea2..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO.yml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 2 - K_w: 4 - activation: relu - height: 7 - input_channels: 256 - num_threads: 1 - output_channels: 16 - output_range: - - -2 - - 5 - strides: - - 2 - - 2 - width: 8 - 1: - K_h: 3 - K_w: 3 - activation: relu - height: 7 - input_channels: 512 - num_threads: 2 - output_channels: 16 - output_range: - - -2 - - 1 - strides: - - 1 - - 2 - width: 8 - 2: - K_h: 2 - K_w: 4 - activation: relu - height: 7 - input_channels: 256 - num_threads: 1 - output_channels: 16 - output_range: - - 0 - - 1 - strides: - - 2 - - 2 - width: 8 - 3: - K_h: 6 - K_w: 5 - activation: relu - height: 12 - input_channels: 256 - num_threads: 1 - output_channels: 16 - output_range: - - 0 - - 1 - strides: - - 1 - - 2 - width: 8 - 4: - K_h: 1 - K_w: 1 - activation: relu - height: 7 - input_channels: 256 - num_threads: 2 - output_channels: 48 - output_range: - - -4 - - 3 - strides: - - 1 - - 1 - width: 11 - 5: - K_h: 3 - K_w: 4 - activation: relu - height: 10 - input_channels: 256 - num_threads: 1 - output_channels: 48 - output_range: - - -2 - - 1 - strides: - - 2 - - 2 - width: 8 - 6: - K_h: 3 - K_w: 1 - activation: relu - height: 12 - input_channels: 512 - num_threads: 2 - output_channels: 48 - output_range: - - -2 - - 1 - strides: - - 1 - - 1 - width: 6 - 7: - K_h: 1 - K_w: 5 - activation: relu - height: 7 - input_channels: 512 - num_threads: 5 - output_channels: 48 - output_range: - - 0 - - 3 - strides: - - 2 - - 1 - width: 6 - 8: - K_h: 6 - K_w: 4 - activation: relu - height: 10 - input_channels: 256 - num_threads: 5 - output_channels: 16 - output_range: - - -2 - - 3 - strides: - - 2 - - 1 - width: 6 - 9: - K_h: 6 - K_w: 3 - activation: relu - height: 12 - input_channels: 256 - num_threads: 5 - output_channels: 48 - output_range: - - 0 - - 3 - strides: - - 2 - - 1 - width: 11 - 10: - K_h: 1 - K_w: 3 - activation: relu - height: 10 - input_channels: 512 - num_threads: 2 - output_channels: 48 - output_range: - - -4 - - 3 - strides: - - 2 - - 2 - width: 11 - 11: - K_h: 3 - K_w: 5 - activation: relu - height: 12 - input_channels: 512 - num_threads: 1 - output_channels: 16 - output_range: - - -4 - - 1 - strides: - - 1 - - 2 - width: 11 - 12: - K_h: 1 - K_w: 1 - activation: relu - height: 10 - input_channels: 512 - num_threads: 5 - output_channels: 48 - output_range: - - -4 - - 1 - strides: - - 1 - - 2 - width: 8 - 13: - K_h: 3 - K_w: 5 - activation: relu - height: 12 - input_channels: 512 - num_threads: 5 - output_channels: 16 - output_range: - - 0 - - 1 - strides: - - 1 - - 2 - width: 6 - 14: - K_h: 2 - K_w: 5 - activation: relu - height: 10 - input_channels: 512 - num_threads: 5 - output_channels: 16 - output_range: - - -4 - - 5 - strides: - - 1 - - 1 - width: 6 - 15: - K_h: 2 - K_w: 1 - activation: relu - height: 12 - input_channels: 256 - num_threads: 1 - output_channels: 48 - output_range: - - 0 - - 5 - strides: - - 2 - - 1 - width: 11 - 16: - K_h: 2 - K_w: 3 - activation: relu - height: 10 - input_channels: 512 - num_threads: 2 - output_channels: 16 - output_range: - - 0 - - 5 - strides: - - 2 - - 2 - width: 6 - 17: - K_h: 6 - K_w: 4 - activation: relu - height: 7 - input_channels: 256 - num_threads: 2 - output_channels: 48 - output_range: - - -2 - - 5 - strides: - - 1 - - 1 - width: 6 - 18: - K_h: 1 - K_w: 3 - activation: relu - height: 7 - input_channels: 512 - num_threads: 1 - output_channels: 48 - output_range: - - -2 - - 3 - strides: - - 1 - - 1 - width: 11 - 19: - K_h: 6 - K_w: 1 - activation: relu - height: 12 - input_channels: 256 - num_threads: 5 - output_channels: 16 - output_range: - - -4 - - 3 - strides: - - 2 - - 1 - width: 11 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.py b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.py deleted file mode 100644 index 245eb31b5..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from .test_bconv2d_int8_DIDO import ( # pylint: disable=unused-import - GENERATOR, - RUNNER, - bitpacked_outputs, -) - -from . import ( # pylint: disable=unused-import - test_output, -) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.yml b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.yml deleted file mode 100644 index 11ad35ba7..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_DIDO_activation.yml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 3 - K_w: 3 - activation: relu - height: 7 - input_channels: 512 - num_threads: 2 - output_channels: 48 - output_range: - - -4 - - 1 - strides: - - 1 - - 2 - width: 6 - 1: - K_h: 2 - K_w: 4 - activation: relu - height: 10 - input_channels: 512 - num_threads: 2 - output_channels: 48 - output_range: - - -2 - - 1 - strides: - - 2 - - 2 - width: 8 - 2: - K_h: 2 - K_w: 5 - activation: relu - height: 12 - input_channels: 256 - num_threads: 1 - output_channels: 16 - output_range: - - 0 - - 5 - strides: - - 1 - - 1 - width: 6 - 3: - K_h: 1 - K_w: 4 - activation: relu - height: 12 - input_channels: 256 - num_threads: 5 - output_channels: 16 - output_range: - - -2 - - 1 - strides: - - 1 - - 1 - width: 8 - 4: - K_h: 6 - K_w: 4 - activation: relu - height: 12 - input_channels: 512 - num_threads: 1 - output_channels: 48 - output_range: - - -2 - - 3 - strides: - - 1 - - 1 - width: 8 - 5: - K_h: 1 - K_w: 3 - activation: relu - height: 12 - input_channels: 512 - num_threads: 2 - output_channels: 48 - output_range: - - 0 - - 1 - strides: - - 1 - - 2 - width: 8 - 6: - K_h: 2 - K_w: 5 - activation: relu - height: 10 - input_channels: 256 - num_threads: 5 - output_channels: 48 - output_range: - - -4 - - 5 - strides: - - 2 - - 2 - width: 8 - 7: - K_h: 3 - K_w: 1 - activation: relu - height: 7 - input_channels: 256 - num_threads: 2 - output_channels: 16 - output_range: - - -2 - - 5 - strides: - - 2 - - 1 - width: 11 - 8: - K_h: 6 - K_w: 1 - activation: relu - height: 12 - input_channels: 512 - num_threads: 5 - output_channels: 48 - output_range: - - -2 - - 5 - strides: - - 2 - - 1 - width: 6 - 9: - K_h: 1 - K_w: 5 - activation: relu - height: 10 - input_channels: 512 - num_threads: 5 - output_channels: 48 - output_range: - - 0 - - 3 - strides: - - 1 - - 1 - width: 11 - 10: - K_h: 2 - K_w: 3 - activation: relu - height: 7 - input_channels: 512 - num_threads: 1 - output_channels: 16 - output_range: - - -4 - - 5 - strides: - - 2 - - 1 - width: 6 - 11: - K_h: 1 - K_w: 4 - activation: relu - height: 10 - input_channels: 256 - num_threads: 1 - output_channels: 48 - output_range: - - 0 - - 5 - strides: - - 2 - - 1 - width: 11 - 12: - K_h: 6 - K_w: 1 - activation: relu - height: 12 - input_channels: 512 - num_threads: 1 - output_channels: 16 - output_range: - - -2 - - 3 - strides: - - 1 - - 2 - width: 11 - 13: - K_h: 6 - K_w: 3 - activation: relu - height: 7 - input_channels: 512 - num_threads: 2 - output_channels: 48 - output_range: - - -4 - - 3 - strides: - - 2 - - 2 - width: 8 - 14: - K_h: 2 - K_w: 1 - activation: relu - height: 10 - input_channels: 256 - num_threads: 1 - output_channels: 16 - output_range: - - -2 - - 1 - strides: - - 2 - - 2 - width: 8 - 15: - K_h: 3 - K_w: 5 - activation: relu - height: 7 - input_channels: 256 - num_threads: 2 - output_channels: 48 - output_range: - - 0 - - 3 - strides: - - 1 - - 2 - width: 11 - 16: - K_h: 1 - K_w: 1 - activation: relu - height: 12 - input_channels: 256 - num_threads: 2 - output_channels: 16 - output_range: - - 0 - - 3 - strides: - - 1 - - 1 - width: 6 - 17: - K_h: 3 - K_w: 5 - activation: relu - height: 10 - input_channels: 256 - num_threads: 5 - output_channels: 16 - output_range: - - -4 - - 1 - strides: - - 2 - - 1 - width: 11 - 18: - K_h: 3 - K_w: 3 - activation: relu - height: 7 - input_channels: 512 - num_threads: 1 - output_channels: 16 - output_range: - - 0 - - 1 - strides: - - 1 - - 2 - width: 11 - 19: - K_h: 6 - K_w: 4 - activation: relu - height: 7 - input_channels: 256 - num_threads: 5 - output_channels: 16 - output_range: - - -4 - - 3 - strides: - - 2 - - 2 - width: 6 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.py b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.py deleted file mode 100644 index c784ce657..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from .test_bconv2d_int8 import ( # pylint: disable=unused-import - GENERATOR, - RUNNER, - bitpacked_outputs, -) - -from . import ( # pylint: disable=unused-import - test_output, -) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.yml b/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.yml deleted file mode 100644 index fff1ef76e..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bconv2d_int8_activation.yml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 6 - K_w: 3 - activation: relu - height: 7 - input_channels: 32 - num_threads: 1 - output_channels: 4 - output_range: - - -2 - - 1 - strides: - - 2 - - 1 - width: 6 - 1: - K_h: 3 - K_w: 1 - activation: relu - height: 7 - input_channels: 128 - num_threads: 2 - output_channels: 4 - output_range: - - 0 - - 3 - strides: - - 2 - - 2 - width: 6 - 2: - K_h: 6 - K_w: 4 - activation: relu - height: 10 - input_channels: 320 - num_threads: 2 - output_channels: 28 - output_range: - - -4 - - 3 - strides: - - 2 - - 1 - width: 8 - 3: - K_h: 3 - K_w: 5 - activation: relu - height: 7 - input_channels: 320 - num_threads: 1 - output_channels: 32 - output_range: - - -2 - - 3 - strides: - - 1 - - 2 - width: 8 - 4: - K_h: 1 - K_w: 5 - activation: relu - height: 7 - input_channels: 320 - num_threads: 5 - output_channels: 32 - output_range: - - -4 - - 5 - strides: - - 1 - - 1 - width: 8 - 5: - K_h: 2 - K_w: 4 - activation: relu - height: 10 - input_channels: 32 - num_threads: 1 - output_channels: 32 - output_range: - - -4 - - 3 - strides: - - 2 - - 2 - width: 8 - 6: - K_h: 2 - K_w: 4 - activation: relu - height: 10 - input_channels: 128 - num_threads: 2 - output_channels: 4 - output_range: - - -4 - - 1 - strides: - - 2 - - 1 - width: 11 - 7: - K_h: 6 - K_w: 4 - activation: relu - height: 10 - input_channels: 320 - num_threads: 2 - output_channels: 4 - output_range: - - -4 - - 3 - strides: - - 1 - - 2 - width: 11 - 8: - K_h: 2 - K_w: 5 - activation: relu - height: 7 - input_channels: 32 - num_threads: 1 - output_channels: 32 - output_range: - - 0 - - 5 - strides: - - 2 - - 2 - width: 6 - 9: - K_h: 3 - K_w: 4 - activation: relu - height: 10 - input_channels: 32 - num_threads: 1 - output_channels: 28 - output_range: - - 0 - - 5 - strides: - - 1 - - 1 - width: 8 - 10: - K_h: 6 - K_w: 1 - activation: relu - height: 10 - input_channels: 320 - num_threads: 5 - output_channels: 28 - output_range: - - -2 - - 3 - strides: - - 2 - - 1 - width: 11 - 11: - K_h: 1 - K_w: 3 - activation: relu - height: 12 - input_channels: 128 - num_threads: 5 - output_channels: 28 - output_range: - - 0 - - 1 - strides: - - 1 - - 1 - width: 11 - 12: - K_h: 1 - K_w: 3 - activation: relu - height: 12 - input_channels: 128 - num_threads: 5 - output_channels: 4 - output_range: - - 0 - - 3 - strides: - - 2 - - 1 - width: 11 - 13: - K_h: 3 - K_w: 3 - activation: relu - height: 7 - input_channels: 128 - num_threads: 1 - output_channels: 32 - output_range: - - -4 - - 5 - strides: - - 1 - - 1 - width: 11 - 14: - K_h: 2 - K_w: 1 - activation: relu - height: 12 - input_channels: 128 - num_threads: 5 - output_channels: 28 - output_range: - - 0 - - 3 - strides: - - 1 - - 1 - width: 6 - 15: - K_h: 6 - K_w: 5 - activation: relu - height: 12 - input_channels: 320 - num_threads: 1 - output_channels: 28 - output_range: - - -2 - - 1 - strides: - - 1 - - 2 - width: 6 - 16: - K_h: 1 - K_w: 1 - activation: relu - height: 12 - input_channels: 128 - num_threads: 5 - output_channels: 28 - output_range: - - -2 - - 5 - strides: - - 2 - - 2 - width: 6 - 17: - K_h: 1 - K_w: 1 - activation: relu - height: 12 - input_channels: 320 - num_threads: 2 - output_channels: 4 - output_range: - - 0 - - 1 - strides: - - 1 - - 2 - width: 8 - 18: - K_h: 3 - K_w: 5 - activation: relu - height: 7 - input_channels: 32 - num_threads: 5 - output_channels: 32 - output_range: - - -4 - - 1 - strides: - - 2 - - 2 - width: 11 - 19: - K_h: 2 - K_w: 3 - activation: relu - height: 10 - input_channels: 32 - num_threads: 2 - output_channels: 4 - output_range: - - -2 - - 5 - strides: - - 1 - - 2 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.py b/archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.py deleted file mode 100644 index a09312891..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -import numpy as np - -from tflite2xcore.model_generation import Configuration -from tflite2xcore.model_generation.data_factories import InputInitializerDataFactory - -from . import ( - BinarizedSingleOpRunner, - LarqCompositeTestModelGenerator, - LarqSingleOpConverter, -) - -from . import ( # pylint: disable=unused-import - test_mean_abs_diffs, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class BSignTestModelGenerator(LarqCompositeTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - for key in ("K_w", "K_h", "output_channels", "activation"): - assert key not in cfg, f"{key} should not be specified for bsign tests" - cfg["output_channels"] = 32 - cfg["K_w"] = cfg["K_h"] = 1 - super()._set_config(cfg) - - -GENERATOR = BSignTestModelGenerator - -# ---------------------------------------------------------------------------- -# RUNNERS -# ---------------------------------------------------------------------------- - - -class BSignTestRunner(BinarizedSingleOpRunner): - def make_lce_converter(self) -> LarqSingleOpConverter: - return LarqSingleOpConverter(self, self.get_built_model, remove_last_op=True) - - def _set_config(self, cfg: Configuration) -> None: - cfg["input_range"] = cfg.pop( - "input_range", (np.iinfo(np.int8).min, np.iinfo(np.int8).max) - ) - assert ( - "output_range" not in cfg - ), f"output_range cannot be specified for Bsign tests" - super()._set_config(cfg) - - def make_repr_data_factory(self) -> InputInitializerDataFactory: - return InputInitializerDataFactory( - self, lambda: self._model_generator.input_shape, dtype=tf.int8 - ) - - -RUNNER = BSignTestRunner - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.yml b/archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.yml deleted file mode 100644 index e89f48598..000000000 --- a/archived/test/integration_test/test_single_op_models/test_binarized/test_bsign.yml +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - height: 8 - input_channels: 32 - num_threads: 1 - width: 4 - 1: - height: 4 - input_channels: 128 - num_threads: 1 - width: 1 - 2: - height: 8 - input_channels: 224 - num_threads: 2 - width: 7 - 3: - height: 4 - input_channels: 224 - num_threads: 1 - width: 4 - 4: - height: 5 - input_channels: 32 - num_threads: 2 - width: 4 - 5: - height: 1 - input_channels: 224 - num_threads: 1 - width: 4 - 6: - height: 8 - input_channels: 128 - num_threads: 2 - width: 1 - 7: - height: 1 - input_channels: 32 - num_threads: 2 - width: 7 - 8: - height: 8 - input_channels: 32 - num_threads: 5 - width: 7 - 9: - height: 8 - input_channels: 128 - num_threads: 5 - width: 8 - 10: - height: 1 - input_channels: 224 - num_threads: 1 - width: 7 - 11: - height: 1 - input_channels: 128 - num_threads: 1 - width: 4 - 12: - height: 5 - input_channels: 224 - num_threads: 5 - width: 8 - 13: - height: 4 - input_channels: 32 - num_threads: 1 - width: 8 - 14: - height: 5 - input_channels: 224 - num_threads: 2 - width: 8 - 15: - height: 5 - input_channels: 128 - num_threads: 5 - width: 1 - 16: - height: 4 - input_channels: 128 - num_threads: 5 - width: 8 - 17: - height: 5 - input_channels: 128 - num_threads: 5 - width: 7 - 18: - height: 1 - input_channels: 128 - num_threads: 5 - width: 1 - 19: - height: 4 - input_channels: 32 - num_threads: 2 - width: 1 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/__init__.py b/archived/test/integration_test/test_single_op_models/test_conv2d/__init__.py deleted file mode 100644 index 1c49c1f27..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import tensorflow as tf -from abc import abstractmethod -from typing import Tuple, Optional - -from tflite2xcore.utils import asserting_cast -from tflite2xcore.model_generation import Configuration -from tflite2xcore.model_generation.utils import parse_init_config - -from .. import ( - PaddingMixin, - FilterOpTestModelGenerator, - ChannelPreservingOpTestModelGenerator, - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class AbstractConv2dTestModelGenerator(FilterOpTestModelGenerator): - @property - def _total_width(self) -> int: - return asserting_cast(int, self._config["width"]) - - @property - def _total_height(self) -> int: - return asserting_cast(int, self._config["height"]) - - def _set_config(self, cfg: Configuration) -> None: - self._config.update( - { - "weight_init": cfg.pop("weight_init", ("RandomUniform", -1, 1)), - "bias_init": cfg.pop("bias_init", ("Constant", 0)), - } - ) - cfg.setdefault("padding", "same") - cfg.setdefault("strides", (1, 1)) - super()._set_config(cfg) - - -class Conv2dGenericTestModelGenerator(AbstractConv2dTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config.update( - { - "input_channels": cfg.pop("input_channels", 4), - "output_channels": cfg.pop("output_channels", 4), - } - ) - super()._set_config(cfg) - - @property - def _input_channels(self) -> int: - return asserting_cast(int, self._config["input_channels"]) - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Conv2D: - kwargs = {"input_shape": input_shape} if input_shape else {} - cfg = self._config - return tf.keras.layers.Conv2D( - filters=cfg["output_channels"], - kernel_size=(cfg["K_h"], cfg["K_w"]), - padding=cfg["padding"], - strides=cfg["strides"], - bias_initializer=parse_init_config(*cfg["bias_init"]), - kernel_initializer=parse_init_config(*cfg["weight_init"]), - **kwargs, - ) - - -class Conv2dWordAlignedTestModelGenerator(Conv2dGenericTestModelGenerator): - def check_config(self) -> None: - super().check_config() - assert ( - self._config["input_channels"] % 4 == 0 - ), "# of input channels must be multiple of 4" - assert ( - self._config["output_channels"] % 4 == 0 - ), "# of output channels must be multiple of 4" - - -class Conv2dProperTestModelGenerator(Conv2dWordAlignedTestModelGenerator): - def check_config(self) -> None: - super().check_config() - if self._config["padding"] == "valid": - assert ( - self._config["K_h"] != self._total_height - or self._config["K_w"] != self._total_width - ), "identical kernel and image size with valid padding is reserved for single pixel testing" diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/conftest.py b/archived/test/integration_test/test_single_op_models/test_conv2d/conftest.py deleted file mode 100644 index c2419d085..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/conftest.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.py deleted file mode 100644 index cd2e0b12c..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.model_generation import Configuration - -from . import Conv2dProperTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class Conv2dTestModelGenerator(Conv2dProperTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - cfg.setdefault("input_channels", 20) - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - assert ( - self._config["K_w"] * self._config["input_channels"] > 32 - ), "K_w * input_channels <= 32 is reserved for conv2d_shallowin testing" - assert ( - self._config["K_h"] != 1 or self._config["K_w"] != 1 - ), "1x1 kernel is reserved for conv2d_1x1 testing" - - -GENERATOR = Conv2dTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.yml deleted file mode 100644 index 41fe5c09c..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d.yml +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 1 - K_w: 4 - height: 1 - input_channels: 12 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 1 - - 2 - width: 8 - 1: - K_h: 1 - K_w: 3 - height: 8 - input_channels: 48 - num_threads: 1 - output_channels: 64 - padding: valid - strides: - - 2 - - 2 - width: 14 - 2: - K_h: 2 - K_w: 4 - height: 8 - input_channels: 20 - num_threads: 1 - output_channels: 8 - padding: valid - strides: - - 1 - - 1 - width: 4 - 3: - K_h: 9 - K_w: 9 - height: 5 - input_channels: 16 - num_threads: 2 - output_channels: 4 - padding: same - strides: - - 1 - - 1 - width: 8 - 4: - K_h: 9 - K_w: 9 - height: 15 - input_channels: 32 - num_threads: 1 - output_channels: 4 - padding: same - strides: - - 2 - - 2 - width: 20 - 5: - K_h: 3 - K_w: 4 - height: 15 - input_channels: 16 - num_threads: 5 - output_channels: 64 - padding: valid - strides: - - 2 - - 2 - width: 20 - 6: - K_h: 5 - K_w: 4 - height: 12 - input_channels: 20 - num_threads: 5 - output_channels: 52 - padding: same - strides: - - 1 - - 2 - width: 20 - 7: - K_h: 3 - K_w: 7 - height: 5 - input_channels: 16 - num_threads: 5 - output_channels: 16 - padding: valid - strides: - - 2 - - 2 - width: 7 - 8: - K_h: 2 - K_w: 7 - height: 1 - input_channels: 12 - num_threads: 1 - output_channels: 16 - padding: same - strides: - - 1 - - 1 - width: 20 - 9: - K_h: 3 - K_w: 7 - height: 4 - input_channels: 8 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 2 - - 1 - width: 20 - 10: - K_h: 5 - K_w: 5 - height: 12 - input_channels: 36 - num_threads: 5 - output_channels: 8 - padding: same - strides: - - 2 - - 1 - width: 20 - 11: - K_h: 7 - K_w: 7 - height: 15 - input_channels: 48 - num_threads: 2 - output_channels: 48 - padding: same - strides: - - 1 - - 2 - width: 20 - 12: - K_h: 3 - K_w: 9 - height: 12 - input_channels: 20 - num_threads: 1 - output_channels: 12 - padding: valid - strides: - - 2 - - 1 - width: 14 - 13: - K_h: 7 - K_w: 3 - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 48 - padding: same - strides: - - 2 - - 2 - width: 8 - 14: - K_h: 2 - K_w: 5 - height: 8 - input_channels: 12 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 20 - 15: - K_h: 7 - K_w: 9 - height: 8 - input_channels: 32 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 8 - 16: - K_h: 9 - K_w: 4 - height: 8 - input_channels: 48 - num_threads: 2 - output_channels: 12 - padding: same - strides: - - 1 - - 2 - width: 7 - 17: - K_h: 9 - K_w: 5 - height: 8 - input_channels: 12 - num_threads: 5 - output_channels: 16 - padding: same - strides: - - 1 - - 2 - width: 20 - 18: - K_h: 9 - K_w: 1 - height: 12 - input_channels: 36 - num_threads: 1 - output_channels: 4 - padding: valid - strides: - - 1 - - 2 - width: 8 - 19: - K_h: 5 - K_w: 3 - height: 8 - input_channels: 12 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 2 - - 1 - width: 7 - 20: - K_h: 5 - K_w: 1 - height: 4 - input_channels: 36 - num_threads: 2 - output_channels: 16 - padding: same - strides: - - 1 - - 2 - width: 20 - 21: - K_h: 1 - K_w: 7 - height: 5 - input_channels: 16 - num_threads: 2 - output_channels: 52 - padding: same - strides: - - 1 - - 1 - width: 8 - 22: - K_h: 2 - K_w: 5 - height: 5 - input_channels: 20 - num_threads: 1 - output_channels: 12 - padding: valid - strides: - - 2 - - 1 - width: 14 - 23: - K_h: 9 - K_w: 5 - height: 12 - input_channels: 36 - num_threads: 2 - output_channels: 8 - padding: valid - strides: - - 1 - - 1 - width: 7 - 24: - K_h: 3 - K_w: 9 - height: 12 - input_channels: 8 - num_threads: 2 - output_channels: 48 - padding: same - strides: - - 1 - - 2 - width: 20 - 25: - K_h: 5 - K_w: 7 - height: 8 - input_channels: 8 - num_threads: 2 - output_channels: 28 - padding: valid - strides: - - 1 - - 2 - width: 20 - 26: - K_h: 1 - K_w: 9 - height: 4 - input_channels: 36 - num_threads: 1 - output_channels: 8 - padding: same - strides: - - 2 - - 1 - width: 8 - 27: - K_h: 3 - K_w: 4 - height: 8 - input_channels: 12 - num_threads: 2 - output_channels: 48 - padding: valid - strides: - - 2 - - 2 - width: 14 - 28: - K_h: 1 - K_w: 4 - height: 8 - input_channels: 20 - num_threads: 5 - output_channels: 52 - padding: same - strides: - - 1 - - 2 - width: 20 - 29: - K_h: 7 - K_w: 9 - height: 15 - input_channels: 48 - num_threads: 1 - output_channels: 16 - padding: same - strides: - - 2 - - 2 - width: 4 - 30: - K_h: 5 - K_w: 1 - height: 8 - input_channels: 48 - num_threads: 1 - output_channels: 64 - padding: valid - strides: - - 1 - - 2 - width: 8 - 31: - K_h: 9 - K_w: 5 - height: 5 - input_channels: 48 - num_threads: 5 - output_channels: 4 - padding: same - strides: - - 2 - - 2 - width: 7 - 32: - K_h: 1 - K_w: 7 - height: 15 - input_channels: 36 - num_threads: 2 - output_channels: 16 - padding: valid - strides: - - 1 - - 1 - width: 7 - 33: - K_h: 5 - K_w: 4 - height: 5 - input_channels: 48 - num_threads: 2 - output_channels: 64 - padding: valid - strides: - - 1 - - 2 - width: 14 - 34: - K_h: 1 - K_w: 3 - height: 1 - input_channels: 32 - num_threads: 1 - output_channels: 48 - padding: same - strides: - - 2 - - 2 - width: 7 - 35: - K_h: 3 - K_w: 1 - height: 15 - input_channels: 48 - num_threads: 1 - output_channels: 12 - padding: same - strides: - - 2 - - 2 - width: 14 - 36: - K_h: 1 - K_w: 7 - height: 12 - input_channels: 32 - num_threads: 1 - output_channels: 48 - padding: same - strides: - - 2 - - 2 - width: 4 - 37: - K_h: 7 - K_w: 3 - height: 5 - input_channels: 12 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 7 - 38: - K_h: 1 - K_w: 3 - height: 5 - input_channels: 48 - num_threads: 1 - output_channels: 8 - padding: valid - strides: - - 1 - - 1 - width: 4 - 39: - K_h: 5 - K_w: 5 - height: 15 - input_channels: 36 - num_threads: 1 - output_channels: 12 - padding: same - strides: - - 1 - - 1 - width: 20 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.py deleted file mode 100644 index 51d59715c..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.model_generation import Configuration - -from . import Conv2dProperTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class Conv2d1x1TestModelGenerator(Conv2dProperTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - cfg.setdefault("K_h", 1) - cfg.setdefault("K_w", 1) - cfg.setdefault("strides", (1, 1)) - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - assert self._config["K_h"] == 1, "Kernel height must be 1" - assert self._config["K_w"] == 1, "Kernel width must be 1" - assert self._config["strides"] == (1, 1), "strides must be (1, 1)" - - -GENERATOR = Conv2d1x1TestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.yml deleted file mode 100644 index 5bbc817a5..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_1x1.yml +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - height: 4 - input_channels: 16 - num_threads: 1 - output_channels: 12 - width: 7 - 1: - height: 15 - input_channels: 8 - num_threads: 2 - output_channels: 52 - width: 7 - 2: - height: 8 - input_channels: 36 - num_threads: 1 - output_channels: 52 - width: 7 - 3: - height: 8 - input_channels: 8 - num_threads: 1 - output_channels: 16 - width: 4 - 4: - height: 5 - input_channels: 48 - num_threads: 1 - output_channels: 48 - width: 14 - 5: - height: 5 - input_channels: 8 - num_threads: 5 - output_channels: 28 - width: 20 - 6: - height: 8 - input_channels: 32 - num_threads: 2 - output_channels: 64 - width: 20 - 7: - height: 12 - input_channels: 36 - num_threads: 2 - output_channels: 52 - width: 8 - 8: - height: 5 - input_channels: 36 - num_threads: 1 - output_channels: 4 - width: 8 - 9: - height: 15 - input_channels: 4 - num_threads: 1 - output_channels: 12 - width: 7 - 10: - height: 12 - input_channels: 8 - num_threads: 2 - output_channels: 28 - width: 14 - 11: - height: 8 - input_channels: 32 - num_threads: 5 - output_channels: 16 - width: 20 - 12: - height: 5 - input_channels: 48 - num_threads: 2 - output_channels: 4 - width: 4 - 13: - height: 15 - input_channels: 36 - num_threads: 5 - output_channels: 32 - width: 8 - 14: - height: 12 - input_channels: 48 - num_threads: 2 - output_channels: 16 - width: 7 - 15: - height: 15 - input_channels: 20 - num_threads: 2 - output_channels: 64 - width: 8 - 16: - height: 12 - input_channels: 36 - num_threads: 5 - output_channels: 28 - width: 8 - 17: - height: 15 - input_channels: 20 - num_threads: 2 - output_channels: 8 - width: 14 - 18: - height: 15 - input_channels: 32 - num_threads: 2 - output_channels: 64 - width: 14 - 19: - height: 12 - input_channels: 48 - num_threads: 5 - output_channels: 52 - width: 20 - 20: - height: 15 - input_channels: 20 - num_threads: 1 - output_channels: 48 - width: 20 - 21: - height: 4 - input_channels: 16 - num_threads: 2 - output_channels: 8 - width: 8 - 22: - height: 5 - input_channels: 12 - num_threads: 1 - output_channels: 52 - width: 1 - 23: - height: 15 - input_channels: 4 - num_threads: 5 - output_channels: 48 - width: 7 - 24: - height: 5 - input_channels: 36 - num_threads: 2 - output_channels: 28 - width: 7 - 25: - height: 5 - input_channels: 48 - num_threads: 1 - output_channels: 4 - width: 1 - 26: - height: 1 - input_channels: 12 - num_threads: 1 - output_channels: 48 - width: 20 - 27: - height: 12 - input_channels: 48 - num_threads: 1 - output_channels: 32 - width: 8 - 28: - height: 8 - input_channels: 12 - num_threads: 5 - output_channels: 32 - width: 8 - 29: - height: 8 - input_channels: 12 - num_threads: 1 - output_channels: 64 - width: 1 - 30: - height: 8 - input_channels: 20 - num_threads: 2 - output_channels: 64 - width: 4 - 31: - height: 4 - input_channels: 12 - num_threads: 2 - output_channels: 32 - width: 8 - 32: - height: 15 - input_channels: 32 - num_threads: 5 - output_channels: 8 - width: 14 - 33: - height: 5 - input_channels: 4 - num_threads: 5 - output_channels: 16 - width: 20 - 34: - height: 4 - input_channels: 20 - num_threads: 1 - output_channels: 4 - width: 1 - 35: - height: 5 - input_channels: 20 - num_threads: 1 - output_channels: 48 - width: 20 - 36: - height: 1 - input_channels: 16 - num_threads: 1 - output_channels: 28 - width: 4 - 37: - height: 12 - input_channels: 12 - num_threads: 1 - output_channels: 4 - width: 4 - 38: - height: 4 - input_channels: 16 - num_threads: 1 - output_channels: 12 - width: 4 - 39: - height: 1 - input_channels: 4 - num_threads: 1 - output_channels: 12 - width: 14 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.py deleted file mode 100644 index 641551c78..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.model_generation import Configuration - -from . import Conv2dProperTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class Conv2dShallowinTestModelGenerator(Conv2dProperTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - cfg.setdefault("input_channels", 4) - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - assert ( - self._config["K_w"] * self._config["input_channels"] <= 32 - ), "K_w * input_channels > 32 is reserved for general conv2d testing" - assert ( - self._config["K_h"] != 1 or self._config["K_w"] != 1 - ), "1x1 kernel is reserved for conv2d_1x1 testing" - - -GENERATOR = Conv2dShallowinTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.yml deleted file mode 100644 index d5e70ba65..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_conv2d_shallowin.yml +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 7 - K_w: 2 - height: 15 - input_channels: 4 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 1 - - 2 - width: 7 - 1: - K_h: 1 - K_w: 4 - height: 5 - input_channels: 8 - num_threads: 2 - output_channels: 28 - padding: valid - strides: - - 1 - - 2 - width: 14 - 2: - K_h: 3 - K_w: 4 - height: 15 - input_channels: 4 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 20 - 3: - K_h: 2 - K_w: 4 - height: 8 - input_channels: 8 - num_threads: 2 - output_channels: 52 - padding: same - strides: - - 2 - - 1 - width: 8 - 4: - K_h: 7 - K_w: 1 - height: 8 - input_channels: 16 - num_threads: 1 - output_channels: 64 - padding: valid - strides: - - 1 - - 1 - width: 20 - 5: - K_h: 2 - K_w: 2 - height: 8 - input_channels: 8 - num_threads: 2 - output_channels: 8 - padding: same - strides: - - 1 - - 1 - width: 8 - 6: - K_h: 7 - K_w: 1 - height: 12 - input_channels: 16 - num_threads: 5 - output_channels: 28 - padding: valid - strides: - - 1 - - 2 - width: 14 - 7: - K_h: 2 - K_w: 3 - height: 1 - input_channels: 4 - num_threads: 1 - output_channels: 28 - padding: same - strides: - - 2 - - 2 - width: 8 - 8: - K_h: 7 - K_w: 1 - height: 1 - input_channels: 8 - num_threads: 1 - output_channels: 16 - padding: same - strides: - - 1 - - 1 - width: 1 - 9: - K_h: 5 - K_w: 8 - height: 12 - input_channels: 4 - num_threads: 5 - output_channels: 64 - padding: same - strides: - - 2 - - 1 - width: 8 - 10: - K_h: 2 - K_w: 3 - height: 4 - input_channels: 8 - num_threads: 2 - output_channels: 52 - padding: same - strides: - - 2 - - 1 - width: 7 - 11: - K_h: 9 - K_w: 1 - height: 4 - input_channels: 8 - num_threads: 2 - output_channels: 8 - padding: same - strides: - - 1 - - 1 - width: 20 - 12: - K_h: 5 - K_w: 4 - height: 15 - input_channels: 8 - num_threads: 2 - output_channels: 4 - padding: same - strides: - - 2 - - 2 - width: 8 - 13: - K_h: 2 - K_w: 1 - height: 15 - input_channels: 32 - num_threads: 1 - output_channels: 28 - padding: same - strides: - - 1 - - 1 - width: 1 - 14: - K_h: 2 - K_w: 3 - height: 5 - input_channels: 4 - num_threads: 5 - output_channels: 16 - padding: same - strides: - - 2 - - 2 - width: 20 - 15: - K_h: 3 - K_w: 1 - height: 5 - input_channels: 8 - num_threads: 2 - output_channels: 16 - padding: valid - strides: - - 1 - - 1 - width: 20 - 16: - K_h: 3 - K_w: 1 - height: 8 - input_channels: 32 - num_threads: 5 - output_channels: 12 - padding: valid - strides: - - 2 - - 1 - width: 8 - 17: - K_h: 5 - K_w: 3 - height: 5 - input_channels: 4 - num_threads: 2 - output_channels: 12 - padding: same - strides: - - 1 - - 2 - width: 8 - 18: - K_h: 2 - K_w: 8 - height: 1 - input_channels: 4 - num_threads: 1 - output_channels: 12 - padding: same - strides: - - 2 - - 2 - width: 7 - 19: - K_h: 5 - K_w: 4 - height: 15 - input_channels: 4 - num_threads: 2 - output_channels: 4 - padding: valid - strides: - - 1 - - 1 - width: 7 - 20: - K_h: 3 - K_w: 5 - height: 8 - input_channels: 4 - num_threads: 1 - output_channels: 52 - padding: same - strides: - - 2 - - 2 - width: 4 - 21: - K_h: 3 - K_w: 3 - height: 8 - input_channels: 8 - num_threads: 2 - output_channels: 16 - padding: same - strides: - - 1 - - 2 - width: 4 - 22: - K_h: 2 - K_w: 4 - height: 8 - input_channels: 4 - num_threads: 2 - output_channels: 8 - padding: valid - strides: - - 1 - - 2 - width: 14 - 23: - K_h: 5 - K_w: 2 - height: 15 - input_channels: 16 - num_threads: 5 - output_channels: 16 - padding: same - strides: - - 2 - - 1 - width: 7 - 24: - K_h: 2 - K_w: 2 - height: 15 - input_channels: 8 - num_threads: 1 - output_channels: 64 - padding: same - strides: - - 2 - - 2 - width: 20 - 25: - K_h: 3 - K_w: 8 - height: 8 - input_channels: 4 - num_threads: 2 - output_channels: 12 - padding: valid - strides: - - 2 - - 1 - width: 20 - 26: - K_h: 7 - K_w: 2 - height: 4 - input_channels: 16 - num_threads: 2 - output_channels: 4 - padding: same - strides: - - 2 - - 1 - width: 7 - 27: - K_h: 1 - K_w: 2 - height: 15 - input_channels: 16 - num_threads: 5 - output_channels: 28 - padding: same - strides: - - 2 - - 1 - width: 20 - 28: - K_h: 3 - K_w: 5 - height: 12 - input_channels: 4 - num_threads: 1 - output_channels: 48 - padding: same - strides: - - 1 - - 1 - width: 8 - 29: - K_h: 5 - K_w: 2 - height: 8 - input_channels: 16 - num_threads: 2 - output_channels: 48 - padding: same - strides: - - 2 - - 1 - width: 20 - 30: - K_h: 7 - K_w: 5 - height: 1 - input_channels: 4 - num_threads: 1 - output_channels: 16 - padding: same - strides: - - 1 - - 2 - width: 4 - 31: - K_h: 9 - K_w: 3 - height: 5 - input_channels: 4 - num_threads: 2 - output_channels: 4 - padding: same - strides: - - 2 - - 2 - width: 14 - 32: - K_h: 3 - K_w: 4 - height: 4 - input_channels: 4 - num_threads: 2 - output_channels: 32 - padding: same - strides: - - 1 - - 1 - width: 4 - 33: - K_h: 9 - K_w: 2 - height: 15 - input_channels: 4 - num_threads: 1 - output_channels: 16 - padding: same - strides: - - 1 - - 1 - width: 1 - 34: - K_h: 2 - K_w: 1 - height: 8 - input_channels: 16 - num_threads: 1 - output_channels: 28 - padding: same - strides: - - 1 - - 1 - width: 4 - 35: - K_h: 5 - K_w: 1 - height: 8 - input_channels: 32 - num_threads: 1 - output_channels: 12 - padding: valid - strides: - - 2 - - 2 - width: 8 - 36: - K_h: 3 - K_w: 2 - height: 1 - input_channels: 4 - num_threads: 1 - output_channels: 8 - padding: same - strides: - - 2 - - 2 - width: 1 - 37: - K_h: 1 - K_w: 2 - height: 5 - input_channels: 8 - num_threads: 5 - output_channels: 12 - padding: valid - strides: - - 1 - - 2 - width: 14 - 38: - K_h: 2 - K_w: 1 - height: 12 - input_channels: 8 - num_threads: 5 - output_channels: 8 - padding: same - strides: - - 2 - - 1 - width: 8 - 39: - K_h: 7 - K_w: 8 - height: 8 - input_channels: 4 - num_threads: 5 - output_channels: 28 - padding: same - strides: - - 2 - - 1 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.py deleted file mode 100644 index 1dacd6cb8..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Tuple, Optional - -from tflite2xcore.model_generation.utils import parse_init_config - -from . import ( - AbstractConv2dTestModelGenerator, - ChannelPreservingOpTestModelGenerator, -) -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class DepthwiseConv2dTestModelGenerator( - ChannelPreservingOpTestModelGenerator, AbstractConv2dTestModelGenerator -): - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.DepthwiseConv2D: - kwargs = {"input_shape": input_shape} if input_shape else {} - cfg = self._config - return tf.keras.layers.DepthwiseConv2D( - kernel_size=(cfg["K_h"], cfg["K_w"]), - depth_multiplier=1, - padding=cfg["padding"], - strides=cfg["strides"], - bias_initializer=parse_init_config(*cfg["bias_init"]), - kernel_initializer=parse_init_config(*cfg["weight_init"]), - **kwargs - ) - - -GENERATOR = DepthwiseConv2dTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.yml deleted file mode 100644 index 60f650e19..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_depthwise_conv2d.yml +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 3 - K_w: 5 - channels: 4 - height: 15 - num_threads: 5 - padding: same - strides: - - 1 - - 2 - width: 7 - 1: - K_h: 9 - K_w: 2 - channels: 16 - height: 5 - num_threads: 5 - padding: same - strides: - - 2 - - 1 - width: 8 - 2: - K_h: 2 - K_w: 1 - channels: 48 - height: 8 - num_threads: 2 - padding: same - strides: - - 2 - - 1 - width: 7 - 3: - K_h: 1 - K_w: 2 - channels: 28 - height: 12 - num_threads: 5 - padding: same - strides: - - 2 - - 1 - width: 7 - 4: - K_h: 1 - K_w: 2 - channels: 4 - height: 8 - num_threads: 2 - padding: same - strides: - - 2 - - 1 - width: 14 - 5: - K_h: 9 - K_w: 2 - channels: 16 - height: 15 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 14 - 6: - K_h: 5 - K_w: 5 - channels: 16 - height: 5 - num_threads: 1 - padding: same - strides: - - 2 - - 1 - width: 14 - 7: - K_h: 5 - K_w: 1 - channels: 8 - height: 4 - num_threads: 1 - padding: same - strides: - - 2 - - 1 - width: 20 - 8: - K_h: 5 - K_w: 8 - channels: 28 - height: 4 - num_threads: 1 - padding: same - strides: - - 1 - - 2 - width: 8 - 9: - K_h: 5 - K_w: 1 - channels: 4 - height: 5 - num_threads: 5 - padding: same - strides: - - 1 - - 1 - width: 20 - 10: - K_h: 2 - K_w: 3 - channels: 16 - height: 15 - num_threads: 2 - padding: same - strides: - - 2 - - 1 - width: 4 - 11: - K_h: 5 - K_w: 4 - channels: 28 - height: 8 - num_threads: 2 - padding: same - strides: - - 1 - - 2 - width: 7 - 12: - K_h: 7 - K_w: 3 - channels: 8 - height: 15 - num_threads: 1 - padding: valid - strides: - - 1 - - 1 - width: 8 - 13: - K_h: 3 - K_w: 4 - channels: 48 - height: 5 - num_threads: 2 - padding: same - strides: - - 1 - - 1 - width: 8 - 14: - K_h: 7 - K_w: 3 - channels: 4 - height: 8 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 8 - 15: - K_h: 9 - K_w: 7 - channels: 32 - height: 5 - num_threads: 2 - padding: same - strides: - - 1 - - 1 - width: 7 - 16: - K_h: 1 - K_w: 8 - channels: 16 - height: 5 - num_threads: 2 - padding: same - strides: - - 1 - - 1 - width: 7 - 17: - K_h: 9 - K_w: 4 - channels: 32 - height: 15 - num_threads: 5 - padding: valid - strides: - - 1 - - 2 - width: 14 - 18: - K_h: 1 - K_w: 7 - channels: 52 - height: 5 - num_threads: 1 - padding: same - strides: - - 2 - - 2 - width: 4 - 19: - K_h: 7 - K_w: 4 - channels: 64 - height: 12 - num_threads: 5 - padding: valid - strides: - - 1 - - 1 - width: 20 - 20: - K_h: 7 - K_w: 8 - channels: 64 - height: 4 - num_threads: 1 - padding: same - strides: - - 2 - - 2 - width: 20 - 21: - K_h: 3 - K_w: 4 - channels: 64 - height: 5 - num_threads: 2 - padding: valid - strides: - - 2 - - 1 - width: 4 - 22: - K_h: 9 - K_w: 3 - channels: 12 - height: 8 - num_threads: 1 - padding: same - strides: - - 1 - - 2 - width: 4 - 23: - K_h: 1 - K_w: 7 - channels: 28 - height: 4 - num_threads: 2 - padding: same - strides: - - 2 - - 1 - width: 8 - 24: - K_h: 3 - K_w: 5 - channels: 48 - height: 4 - num_threads: 2 - padding: valid - strides: - - 2 - - 2 - width: 14 - 25: - K_h: 9 - K_w: 7 - channels: 16 - height: 12 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 20 - 26: - K_h: 5 - K_w: 4 - channels: 4 - height: 12 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 7 - 27: - K_h: 2 - K_w: 1 - channels: 52 - height: 15 - num_threads: 2 - padding: valid - strides: - - 1 - - 1 - width: 4 - 28: - K_h: 1 - K_w: 3 - channels: 64 - height: 12 - num_threads: 2 - padding: valid - strides: - - 1 - - 2 - width: 8 - 29: - K_h: 7 - K_w: 1 - channels: 64 - height: 5 - num_threads: 1 - padding: same - strides: - - 1 - - 2 - width: 8 - 30: - K_h: 2 - K_w: 3 - channels: 4 - height: 5 - num_threads: 2 - padding: same - strides: - - 2 - - 2 - width: 14 - 31: - K_h: 1 - K_w: 2 - channels: 52 - height: 15 - num_threads: 2 - padding: valid - strides: - - 2 - - 2 - width: 4 - 32: - K_h: 5 - K_w: 7 - channels: 32 - height: 15 - num_threads: 2 - padding: valid - strides: - - 2 - - 2 - width: 14 - 33: - K_h: 7 - K_w: 4 - channels: 12 - height: 5 - num_threads: 1 - padding: same - strides: - - 1 - - 2 - width: 7 - 34: - K_h: 2 - K_w: 4 - channels: 48 - height: 4 - num_threads: 1 - padding: same - strides: - - 1 - - 2 - width: 14 - 35: - K_h: 3 - K_w: 5 - channels: 8 - height: 12 - num_threads: 5 - padding: same - strides: - - 1 - - 2 - width: 8 - 36: - K_h: 9 - K_w: 5 - channels: 16 - height: 15 - num_threads: 1 - padding: same - strides: - - 2 - - 1 - width: 14 - 37: - K_h: 2 - K_w: 2 - channels: 8 - height: 8 - num_threads: 1 - padding: same - strides: - - 2 - - 1 - width: 7 - 38: - K_h: 3 - K_w: 3 - channels: 8 - height: 15 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 14 - 39: - K_h: 1 - K_w: 1 - channels: 48 - height: 1 - num_threads: 1 - padding: valid - strides: - - 2 - - 1 - width: 1 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/__init__.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/__init__.py deleted file mode 100644 index 7fc239069..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf - -from tflite2xcore.utils import asserting_cast -from tflite2xcore.model_generation import Configuration - -from .. import ( - PaddingMixin, - AbstractConv2dTestModelGenerator, - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class ExplicitlyPaddedConv2dMixin(PaddingMixin, AbstractConv2dTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - assert ( - "padding" not in cfg - ), f"padding config should be defined by {self._PAD_KEYS}" - cfg["padding"] = "valid" - - super()._set_config(cfg) - - @property - def _total_width(self) -> int: - return ( - super()._total_width - + asserting_cast(int, self._config["pad_l"]) - + asserting_cast(int, self._config["pad_r"]) - ) - - @property - def _total_height(self) -> int: - return ( - super()._total_height - + asserting_cast(int, self._config["pad_t"]) - + asserting_cast(int, self._config["pad_b"]) - ) - - def _build_core_model(self) -> tf.keras.Model: - - return tf.keras.Sequential( - layers=[self._pad_layer(input_shape=self._input_shape), self._op_layer()] - ) - diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.py deleted file mode 100644 index fbf6e93dc..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from ..test_conv2d import Conv2dTestModelGenerator -from . import ExplicitlyPaddedConv2dMixin -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class PaddedConv2dTestModelGenerator( - ExplicitlyPaddedConv2dMixin, Conv2dTestModelGenerator -): - pass - - -GENERATOR = PaddedConv2dTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.yml deleted file mode 100644 index 4d7105d0e..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d.yml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 9 - K_w: 7 - height: 8 - input_channels: 32 - num_threads: 1 - output_channels: 28 - pad_b: 2 - pad_l: 1 - pad_r: 0 - pad_t: 0 - strides: - - 2 - - 2 - width: 7 - 1: - K_h: 2 - K_w: 5 - height: 8 - input_channels: 16 - num_threads: 5 - output_channels: 16 - pad_b: 1 - pad_l: 0 - pad_r: 1 - pad_t: 1 - strides: - - 2 - - 1 - width: 14 - 2: - K_h: 9 - K_w: 7 - height: 12 - input_channels: 16 - num_threads: 2 - output_channels: 28 - pad_b: 2 - pad_l: 0 - pad_r: 0 - pad_t: 1 - strides: - - 1 - - 2 - width: 20 - 3: - K_h: 5 - K_w: 7 - height: 5 - input_channels: 16 - num_threads: 1 - output_channels: 48 - pad_b: 1 - pad_l: 0 - pad_r: 2 - pad_t: 0 - strides: - - 1 - - 2 - width: 7 - 4: - K_h: 3 - K_w: 5 - height: 8 - input_channels: 16 - num_threads: 1 - output_channels: 64 - pad_b: 2 - pad_l: 2 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 2 - width: 20 - 5: - K_h: 9 - K_w: 5 - height: 15 - input_channels: 36 - num_threads: 5 - output_channels: 12 - pad_b: 2 - pad_l: 1 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 1 - width: 20 - 6: - K_h: 7 - K_w: 5 - height: 12 - input_channels: 8 - num_threads: 1 - output_channels: 16 - pad_b: 1 - pad_l: 1 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 2 - width: 8 - 7: - K_h: 7 - K_w: 5 - height: 15 - input_channels: 48 - num_threads: 5 - output_channels: 4 - pad_b: 0 - pad_l: 2 - pad_r: 2 - pad_t: 1 - strides: - - 1 - - 2 - width: 7 - 8: - K_h: 9 - K_w: 9 - height: 5 - input_channels: 8 - num_threads: 5 - output_channels: 12 - pad_b: 2 - pad_l: 0 - pad_r: 2 - pad_t: 2 - strides: - - 1 - - 2 - width: 8 - 9: - K_h: 3 - K_w: 7 - height: 5 - input_channels: 36 - num_threads: 2 - output_channels: 4 - pad_b: 1 - pad_l: 2 - pad_r: 2 - pad_t: 1 - strides: - - 1 - - 2 - width: 4 - 10: - K_h: 9 - K_w: 4 - height: 12 - input_channels: 32 - num_threads: 2 - output_channels: 32 - pad_b: 0 - pad_l: 1 - pad_r: 0 - pad_t: 2 - strides: - - 2 - - 1 - width: 20 - 11: - K_h: 7 - K_w: 9 - height: 12 - input_channels: 16 - num_threads: 2 - output_channels: 52 - pad_b: 1 - pad_l: 2 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 2 - width: 20 - 12: - K_h: 5 - K_w: 4 - height: 15 - input_channels: 48 - num_threads: 1 - output_channels: 52 - pad_b: 0 - pad_l: 1 - pad_r: 2 - pad_t: 0 - strides: - - 1 - - 2 - width: 1 - 13: - K_h: 9 - K_w: 9 - height: 12 - input_channels: 12 - num_threads: 2 - output_channels: 48 - pad_b: 2 - pad_l: 1 - pad_r: 0 - pad_t: 0 - strides: - - 1 - - 1 - width: 14 - 14: - K_h: 5 - K_w: 4 - height: 15 - input_channels: 20 - num_threads: 5 - output_channels: 28 - pad_b: 1 - pad_l: 1 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 2 - width: 14 - 15: - K_h: 3 - K_w: 4 - height: 15 - input_channels: 16 - num_threads: 5 - output_channels: 48 - pad_b: 2 - pad_l: 0 - pad_r: 0 - pad_t: 2 - strides: - - 2 - - 2 - width: 14 - 16: - K_h: 5 - K_w: 3 - height: 8 - input_channels: 16 - num_threads: 1 - output_channels: 48 - pad_b: 1 - pad_l: 1 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 1 - width: 20 - 17: - K_h: 7 - K_w: 3 - height: 8 - input_channels: 36 - num_threads: 1 - output_channels: 64 - pad_b: 0 - pad_l: 1 - pad_r: 1 - pad_t: 1 - strides: - - 1 - - 1 - width: 8 - 18: - K_h: 1 - K_w: 3 - height: 5 - input_channels: 16 - num_threads: 1 - output_channels: 32 - pad_b: 0 - pad_l: 0 - pad_r: 1 - pad_t: 0 - strides: - - 1 - - 1 - width: 20 - 19: - K_h: 3 - K_w: 7 - height: 5 - input_channels: 20 - num_threads: 1 - output_channels: 8 - pad_b: 1 - pad_l: 0 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 2 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.py deleted file mode 100644 index 6ce537062..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from ..test_conv2d_shallowin import Conv2dShallowinTestModelGenerator -from . import ExplicitlyPaddedConv2dMixin -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class PaddedConv2dShallowinTestModelGenerator( - ExplicitlyPaddedConv2dMixin, Conv2dShallowinTestModelGenerator -): - pass - - -GENERATOR = PaddedConv2dShallowinTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.yml deleted file mode 100644 index 973491da9..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_conv2d_shallowin.yml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 9 - K_w: 4 - height: 12 - input_channels: 8 - num_threads: 1 - output_channels: 12 - pad_b: 0 - pad_l: 2 - pad_r: 1 - pad_t: 1 - strides: - - 2 - - 1 - width: 14 - 1: - K_h: 5 - K_w: 5 - height: 15 - input_channels: 4 - num_threads: 1 - output_channels: 52 - pad_b: 1 - pad_l: 0 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 2 - width: 7 - 2: - K_h: 5 - K_w: 3 - height: 8 - input_channels: 4 - num_threads: 5 - output_channels: 8 - pad_b: 1 - pad_l: 2 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 2 - width: 7 - 3: - K_h: 2 - K_w: 4 - height: 4 - input_channels: 4 - num_threads: 1 - output_channels: 52 - pad_b: 0 - pad_l: 2 - pad_r: 2 - pad_t: 1 - strides: - - 2 - - 1 - width: 1 - 4: - K_h: 3 - K_w: 4 - height: 4 - input_channels: 8 - num_threads: 1 - output_channels: 32 - pad_b: 2 - pad_l: 2 - pad_r: 1 - pad_t: 1 - strides: - - 2 - - 1 - width: 14 - 5: - K_h: 9 - K_w: 3 - height: 12 - input_channels: 4 - num_threads: 5 - output_channels: 8 - pad_b: 1 - pad_l: 2 - pad_r: 2 - pad_t: 1 - strides: - - 1 - - 2 - width: 7 - 6: - K_h: 7 - K_w: 4 - height: 5 - input_channels: 8 - num_threads: 5 - output_channels: 16 - pad_b: 2 - pad_l: 0 - pad_r: 0 - pad_t: 0 - strides: - - 2 - - 2 - width: 7 - 7: - K_h: 3 - K_w: 7 - height: 5 - input_channels: 4 - num_threads: 5 - output_channels: 28 - pad_b: 2 - pad_l: 2 - pad_r: 0 - pad_t: 1 - strides: - - 2 - - 2 - width: 8 - 8: - K_h: 5 - K_w: 2 - height: 5 - input_channels: 4 - num_threads: 2 - output_channels: 16 - pad_b: 2 - pad_l: 0 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 2 - width: 7 - 9: - K_h: 7 - K_w: 2 - height: 8 - input_channels: 16 - num_threads: 2 - output_channels: 48 - pad_b: 2 - pad_l: 0 - pad_r: 0 - pad_t: 1 - strides: - - 2 - - 2 - width: 20 - 10: - K_h: 3 - K_w: 4 - height: 15 - input_channels: 8 - num_threads: 2 - output_channels: 12 - pad_b: 1 - pad_l: 0 - pad_r: 1 - pad_t: 2 - strides: - - 2 - - 2 - width: 14 - 11: - K_h: 7 - K_w: 4 - height: 15 - input_channels: 4 - num_threads: 2 - output_channels: 64 - pad_b: 0 - pad_l: 2 - pad_r: 2 - pad_t: 0 - strides: - - 1 - - 2 - width: 4 - 12: - K_h: 7 - K_w: 3 - height: 8 - input_channels: 4 - num_threads: 1 - output_channels: 16 - pad_b: 2 - pad_l: 0 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 1 - width: 1 - 13: - K_h: 3 - K_w: 4 - height: 5 - input_channels: 8 - num_threads: 2 - output_channels: 12 - pad_b: 1 - pad_l: 1 - pad_r: 0 - pad_t: 0 - strides: - - 2 - - 1 - width: 20 - 14: - K_h: 1 - K_w: 8 - height: 8 - input_channels: 4 - num_threads: 1 - output_channels: 64 - pad_b: 0 - pad_l: 1 - pad_r: 1 - pad_t: 0 - strides: - - 2 - - 1 - width: 8 - 15: - K_h: 5 - K_w: 8 - height: 15 - input_channels: 4 - num_threads: 5 - output_channels: 48 - pad_b: 0 - pad_l: 2 - pad_r: 0 - pad_t: 0 - strides: - - 2 - - 2 - width: 14 - 16: - K_h: 3 - K_w: 5 - height: 8 - input_channels: 4 - num_threads: 1 - output_channels: 16 - pad_b: 0 - pad_l: 0 - pad_r: 2 - pad_t: 0 - strides: - - 2 - - 1 - width: 8 - 17: - K_h: 3 - K_w: 3 - height: 15 - input_channels: 4 - num_threads: 1 - output_channels: 52 - pad_b: 2 - pad_l: 0 - pad_r: 2 - pad_t: 0 - strides: - - 2 - - 1 - width: 8 - 18: - K_h: 7 - K_w: 4 - height: 12 - input_channels: 4 - num_threads: 2 - output_channels: 4 - pad_b: 1 - pad_l: 1 - pad_r: 1 - pad_t: 1 - strides: - - 2 - - 1 - width: 8 - 19: - K_h: 5 - K_w: 2 - height: 8 - input_channels: 4 - num_threads: 2 - output_channels: 52 - pad_b: 2 - pad_l: 0 - pad_r: 0 - pad_t: 1 - strides: - - 2 - - 2 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.py deleted file mode 100644 index de6952efc..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from ..test_depthwise_conv2d import DepthwiseConv2dTestModelGenerator -from . import ExplicitlyPaddedConv2dMixin -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class PaddedDepthwiseConv2dTestModelGenerator( - ExplicitlyPaddedConv2dMixin, DepthwiseConv2dTestModelGenerator -): - pass - - -GENERATOR = PaddedDepthwiseConv2dTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.yml deleted file mode 100644 index 8ea18d925..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_explicit_padding/test_padded_depthwise_conv2d.yml +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 7 - K_w: 4 - channels: 12 - height: 8 - num_threads: 1 - pad_b: 1 - pad_l: 1 - pad_r: 0 - pad_t: 1 - strides: - - 2 - - 1 - width: 8 - 1: - K_h: 9 - K_w: 1 - channels: 12 - height: 8 - num_threads: 1 - pad_b: 1 - pad_l: 0 - pad_r: 0 - pad_t: 0 - strides: - - 1 - - 2 - width: 4 - 2: - K_h: 3 - K_w: 3 - channels: 12 - height: 8 - num_threads: 2 - pad_b: 2 - pad_l: 2 - pad_r: 1 - pad_t: 2 - strides: - - 2 - - 2 - width: 14 - 3: - K_h: 7 - K_w: 3 - channels: 12 - height: 15 - num_threads: 5 - pad_b: 2 - pad_l: 2 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 2 - width: 14 - 4: - K_h: 9 - K_w: 4 - channels: 52 - height: 12 - num_threads: 1 - pad_b: 0 - pad_l: 1 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 1 - width: 14 - 5: - K_h: 9 - K_w: 3 - channels: 28 - height: 12 - num_threads: 2 - pad_b: 1 - pad_l: 2 - pad_r: 1 - pad_t: 0 - strides: - - 2 - - 2 - width: 20 - 6: - K_h: 5 - K_w: 2 - channels: 16 - height: 12 - num_threads: 2 - pad_b: 0 - pad_l: 0 - pad_r: 1 - pad_t: 0 - strides: - - 2 - - 1 - width: 20 - 7: - K_h: 5 - K_w: 3 - channels: 4 - height: 8 - num_threads: 1 - pad_b: 0 - pad_l: 1 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 2 - width: 20 - 8: - K_h: 7 - K_w: 5 - channels: 52 - height: 5 - num_threads: 1 - pad_b: 2 - pad_l: 0 - pad_r: 2 - pad_t: 0 - strides: - - 2 - - 2 - width: 20 - 9: - K_h: 7 - K_w: 7 - channels: 8 - height: 15 - num_threads: 1 - pad_b: 1 - pad_l: 1 - pad_r: 2 - pad_t: 0 - strides: - - 1 - - 1 - width: 7 - 10: - K_h: 5 - K_w: 3 - channels: 48 - height: 15 - num_threads: 5 - pad_b: 0 - pad_l: 2 - pad_r: 1 - pad_t: 1 - strides: - - 1 - - 1 - width: 14 - 11: - K_h: 7 - K_w: 7 - channels: 52 - height: 8 - num_threads: 1 - pad_b: 1 - pad_l: 0 - pad_r: 1 - pad_t: 0 - strides: - - 1 - - 1 - width: 8 - 12: - K_h: 7 - K_w: 4 - channels: 32 - height: 15 - num_threads: 1 - pad_b: 2 - pad_l: 2 - pad_r: 2 - pad_t: 1 - strides: - - 2 - - 2 - width: 8 - 13: - K_h: 2 - K_w: 8 - channels: 32 - height: 8 - num_threads: 2 - pad_b: 1 - pad_l: 0 - pad_r: 1 - pad_t: 0 - strides: - - 1 - - 2 - width: 7 - 14: - K_h: 9 - K_w: 5 - channels: 12 - height: 8 - num_threads: 2 - pad_b: 2 - pad_l: 0 - pad_r: 2 - pad_t: 2 - strides: - - 2 - - 1 - width: 8 - 15: - K_h: 3 - K_w: 7 - channels: 12 - height: 12 - num_threads: 2 - pad_b: 2 - pad_l: 1 - pad_r: 1 - pad_t: 0 - strides: - - 2 - - 2 - width: 20 - 16: - K_h: 7 - K_w: 5 - channels: 8 - height: 5 - num_threads: 1 - pad_b: 2 - pad_l: 0 - pad_r: 2 - pad_t: 1 - strides: - - 2 - - 1 - width: 14 - 17: - K_h: 5 - K_w: 4 - channels: 28 - height: 4 - num_threads: 2 - pad_b: 0 - pad_l: 2 - pad_r: 1 - pad_t: 2 - strides: - - 1 - - 2 - width: 8 - 18: - K_h: 3 - K_w: 1 - channels: 32 - height: 12 - num_threads: 5 - pad_b: 2 - pad_l: 0 - pad_r: 0 - pad_t: 2 - strides: - - 2 - - 2 - width: 7 - 19: - K_h: 3 - K_w: 3 - channels: 28 - height: 1 - num_threads: 1 - pad_b: 0 - pad_l: 1 - pad_r: 1 - pad_t: 2 - strides: - - 2 - - 1 - width: 1 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/__init__.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/__init__.py deleted file mode 100644 index fdcfb1eb2..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import tensorflow as tf - -from tflite2xcore.model_generation import Configuration - -from .. import ( - AbstractConv2dTestModelGenerator, - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class FusedCustomReluMixin(AbstractConv2dTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config["max_value"] = cfg.pop("max_value") - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - max_value = self._config["max_value"] - assert max_value > 0, f"max_value must be greater than 0, got {max_value}" - assert max_value != 6, f"max_value cannot be equal to 6 (Relu6 is not custom)" - - def _build_core_model(self) -> tf.keras.Model: - return tf.keras.Sequential( - layers=[ - self._op_layer(input_shape=self._input_shape), - tf.keras.layers.ReLU(max_value=self._config["max_value"]), - ] - ) diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.py deleted file mode 100644 index 0b0702b15..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf - -from ..test_conv2d import Conv2dTestModelGenerator -from . import FusedCustomReluMixin -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class CustomReluConv2dTestModelGenerator( - FusedCustomReluMixin, Conv2dTestModelGenerator -): - pass - - -GENERATOR = CustomReluConv2dTestModelGenerator - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture -def abs_output_tolerance() -> None: - return - - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.yml deleted file mode 100644 index e90de9f07..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_fused_relu/test_custom_relu_conv2d.yml +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 2 - K_w: 3 - height: 5 - input_channels: 32 - max_value: 1.0 - num_threads: 1 - output_channels: 4 - padding: same - strides: - - 1 - - 2 - width: 20 - 1: - K_h: 5 - K_w: 4 - height: 12 - input_channels: 16 - max_value: 0.5 - num_threads: 2 - output_channels: 4 - padding: valid - strides: - - 1 - - 2 - width: 4 - 2: - K_h: 7 - K_w: 7 - height: 8 - input_channels: 36 - max_value: 2.5 - num_threads: 5 - output_channels: 8 - padding: same - strides: - - 2 - - 2 - width: 7 - 3: - K_h: 1 - K_w: 3 - height: 5 - input_channels: 12 - max_value: 1.5 - num_threads: 5 - output_channels: 8 - padding: same - strides: - - 1 - - 1 - width: 20 - 4: - K_h: 3 - K_w: 4 - height: 5 - input_channels: 12 - max_value: 1.0 - num_threads: 1 - output_channels: 32 - padding: same - strides: - - 2 - - 2 - width: 7 - 5: - K_h: 5 - K_w: 3 - height: 1 - input_channels: 36 - max_value: 4.5 - num_threads: 1 - output_channels: 12 - padding: same - strides: - - 1 - - 2 - width: 20 - 6: - K_h: 5 - K_w: 7 - height: 15 - input_channels: 48 - max_value: 1.0 - num_threads: 1 - output_channels: 52 - padding: valid - strides: - - 1 - - 1 - width: 7 - 7: - K_h: 3 - K_w: 4 - height: 12 - input_channels: 48 - max_value: 2.5 - num_threads: 1 - output_channels: 48 - padding: same - strides: - - 1 - - 1 - width: 14 - 8: - K_h: 1 - K_w: 9 - height: 4 - input_channels: 36 - max_value: 2.5 - num_threads: 1 - output_channels: 48 - padding: same - strides: - - 2 - - 2 - width: 8 - 9: - K_h: 1 - K_w: 5 - height: 8 - input_channels: 12 - max_value: 1.5 - num_threads: 1 - output_channels: 32 - padding: valid - strides: - - 2 - - 1 - width: 7 - 10: - K_h: 3 - K_w: 5 - height: 12 - input_channels: 12 - max_value: 4.0 - num_threads: 2 - output_channels: 48 - padding: same - strides: - - 1 - - 2 - width: 14 - 11: - K_h: 7 - K_w: 7 - height: 15 - input_channels: 36 - max_value: 2.5 - num_threads: 1 - output_channels: 16 - padding: same - strides: - - 2 - - 2 - width: 20 - 12: - K_h: 1 - K_w: 9 - height: 15 - input_channels: 20 - max_value: 3.0 - num_threads: 2 - output_channels: 8 - padding: valid - strides: - - 1 - - 2 - width: 14 - 13: - K_h: 9 - K_w: 9 - height: 5 - input_channels: 4 - max_value: 5.0 - num_threads: 1 - output_channels: 8 - padding: same - strides: - - 1 - - 1 - width: 8 - 14: - K_h: 3 - K_w: 7 - height: 5 - input_channels: 32 - max_value: 5.5 - num_threads: 5 - output_channels: 32 - padding: valid - strides: - - 1 - - 2 - width: 20 - 15: - K_h: 2 - K_w: 7 - height: 8 - input_channels: 36 - max_value: 1.5 - num_threads: 5 - output_channels: 12 - padding: same - strides: - - 1 - - 1 - width: 14 - 16: - K_h: 2 - K_w: 3 - height: 5 - input_channels: 16 - max_value: 4.5 - num_threads: 5 - output_channels: 52 - padding: same - strides: - - 1 - - 1 - width: 8 - 17: - K_h: 7 - K_w: 4 - height: 15 - input_channels: 36 - max_value: 5.0 - num_threads: 2 - output_channels: 16 - padding: valid - strides: - - 1 - - 2 - width: 4 - 18: - K_h: 5 - K_w: 3 - height: 15 - input_channels: 20 - max_value: 0.5 - num_threads: 2 - output_channels: 12 - padding: valid - strides: - - 2 - - 1 - width: 20 - 19: - K_h: 2 - K_w: 5 - height: 12 - input_channels: 20 - max_value: 1.5 - num_threads: 1 - output_channels: 28 - padding: same - strides: - - 2 - - 2 - width: 4 diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.py b/archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.py deleted file mode 100644 index 66b997af3..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.model_generation import Configuration - -from . import Conv2dGenericTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class SinglePixelConv2dTestModelGenerator(Conv2dGenericTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - assert "height" not in cfg and "width" not in cfg, ( - "height and width should not be specified " - "(they are inferred from kernel height and width)" - ) - cfg["height"] = cfg.setdefault("K_h", 1) - cfg["width"] = cfg.setdefault("K_w", 1) - - cfg.setdefault("padding", "valid") - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - assert ( - self._config["padding"] == "valid" - ), "Only valid padding is allowed in single pixel conv2d tests" - - -GENERATOR = SinglePixelConv2dTestModelGenerator - - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.yml b/archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.yml deleted file mode 100644 index 19e08830b..000000000 --- a/archived/test/integration_test/test_single_op_models/test_conv2d/test_single_pixel_conv2d.yml +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 15 - K_w: 20 - input_channels: 49 - num_threads: 1 - output_channels: 12 - skip_on_device: True - 1: - K_h: 2 - K_w: 9 - input_channels: 25 - num_threads: 5 - output_channels: 49 - 2: - K_h: 12 - K_w: 3 - input_channels: 4 - num_threads: 1 - output_channels: 7 - 3: - K_h: 12 - K_w: 9 - input_channels: 12 - num_threads: 5 - output_channels: 32 - 4: - K_h: 8 - K_w: 14 - input_channels: 4 - num_threads: 2 - output_channels: 4 - 5: - K_h: 8 - K_w: 7 - input_channels: 49 - num_threads: 2 - output_channels: 49 - 6: - K_h: 2 - K_w: 3 - input_channels: 1 - num_threads: 2 - output_channels: 49 - 7: - K_h: 4 - K_w: 1 - input_channels: 32 - num_threads: 5 - output_channels: 16 - 8: - K_h: 1 - K_w: 7 - input_channels: 7 - num_threads: 2 - output_channels: 4 - 9: - K_h: 5 - K_w: 1 - input_channels: 12 - num_threads: 1 - output_channels: 1 - 10: - K_h: 8 - K_w: 14 - input_channels: 25 - num_threads: 5 - output_channels: 12 - 11: - K_h: 15 - K_w: 1 - input_channels: 32 - num_threads: 2 - output_channels: 12 - 12: - K_h: 15 - K_w: 4 - input_channels: 12 - num_threads: 2 - output_channels: 1 - 13: - K_h: 5 - K_w: 14 - input_channels: 1 - num_threads: 1 - output_channels: 25 - 14: - K_h: 2 - K_w: 9 - input_channels: 49 - num_threads: 5 - output_channels: 25 - 15: - K_h: 5 - K_w: 20 - input_channels: 25 - num_threads: 2 - output_channels: 7 - 16: - K_h: 5 - K_w: 3 - input_channels: 7 - num_threads: 1 - output_channels: 32 - 17: - K_h: 1 - K_w: 4 - input_channels: 16 - num_threads: 2 - output_channels: 4 - 18: - K_h: 1 - K_w: 20 - input_channels: 4 - num_threads: 5 - output_channels: 16 - 19: - K_h: 4 - K_w: 7 - input_channels: 16 - num_threads: 1 - output_channels: 16 diff --git a/archived/test/integration_test/test_single_op_models/test_fully_connected.py b/archived/test/integration_test/test_single_op_models/test_fully_connected.py deleted file mode 100644 index bfde67a08..000000000 --- a/archived/test/integration_test/test_single_op_models/test_fully_connected.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Optional, Tuple - -from tflite2xcore.model_generation import Configuration -from tflite2xcore.model_generation.utils import parse_init_config - -from . import ChannelAgnosticOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class FullyConnectedTestModelGenerator(ChannelAgnosticOpTestModelGenerator): - def _set_config(self, cfg: Configuration) -> None: - self._config.update( - { - "weight_init": cfg.pop("weight_init", ("RandomUniform", -1, 1)), - "bias_init": cfg.pop("bias_init", ("RandomUniform", -1, 1)), - "outputs": cfg.pop("outputs"), - } - ) - super()._set_config(cfg) - - def _build_core_model(self) -> tf.keras.Model: - return tf.keras.Sequential( - layers=[ - tf.keras.layers.Flatten(input_shape=self._input_shape), - self._op_layer(), - ] - ) - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - cfg = self._config - return tf.keras.layers.Dense( - cfg["outputs"], - activation="linear", - bias_initializer=parse_init_config(*cfg["bias_init"]), - kernel_initializer=parse_init_config(*cfg["weight_init"]), - ) - - -GENERATOR = FullyConnectedTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_fully_connected.yml b/archived/test/integration_test/test_single_op_models/test_fully_connected.yml deleted file mode 100644 index fd20d6441..000000000 --- a/archived/test/integration_test/test_single_op_models/test_fully_connected.yml +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 1 - height: 4 - num_threads: 2 - outputs: 32 - width: 8 - 1: - channels: 49 - height: 8 - num_threads: 1 - outputs: 12 - width: 8 - 2: - channels: 4 - height: 4 - num_threads: 1 - outputs: 12 - width: 1 - 3: - channels: 4 - height: 5 - num_threads: 1 - outputs: 16 - width: 7 - 4: - channels: 1 - height: 5 - num_threads: 1 - outputs: 12 - width: 8 - 5: - channels: 25 - height: 5 - num_threads: 2 - outputs: 32 - width: 8 - 6: - channels: 25 - height: 1 - num_threads: 5 - outputs: 4 - width: 14 - 7: - channels: 32 - height: 5 - num_threads: 5 - outputs: 25 - width: 7 - 8: - channels: 32 - height: 12 - num_threads: 5 - outputs: 4 - width: 4 - 9: - channels: 49 - height: 8 - num_threads: 2 - outputs: 1 - width: 20 - 10: - channels: 49 - height: 1 - num_threads: 5 - outputs: 4 - width: 1 - 11: - channels: 25 - height: 8 - num_threads: 1 - outputs: 1 - width: 7 - 12: - channels: 32 - height: 5 - num_threads: 2 - outputs: 12 - width: 14 - 13: - channels: 12 - height: 12 - num_threads: 2 - outputs: 25 - width: 4 - 14: - channels: 7 - height: 12 - num_threads: 5 - outputs: 1 - width: 20 - 15: - channels: 1 - height: 4 - num_threads: 1 - outputs: 7 - width: 4 - 16: - channels: 16 - height: 15 - num_threads: 1 - outputs: 16 - width: 4 - 17: - channels: 4 - height: 8 - num_threads: 2 - outputs: 25 - width: 7 - 18: - channels: 16 - height: 15 - num_threads: 5 - outputs: 4 - width: 1 - 19: - channels: 49 - height: 1 - num_threads: 5 - outputs: 25 - width: 7 - 20: - channels: 12 - height: 8 - num_threads: 2 - outputs: 7 - width: 4 - 21: - channels: 7 - height: 1 - num_threads: 5 - outputs: 7 - width: 8 - 22: - channels: 16 - height: 4 - num_threads: 2 - outputs: 25 - width: 8 - 23: - channels: 25 - height: 8 - num_threads: 1 - outputs: 49 - width: 1 - 24: - channels: 32 - height: 4 - num_threads: 2 - outputs: 4 - width: 14 - 25: - channels: 49 - height: 12 - num_threads: 5 - outputs: 32 - width: 14 - skip_on_device: True - 26: - channels: 7 - height: 12 - num_threads: 1 - outputs: 32 - width: 20 - 27: - channels: 16 - height: 15 - num_threads: 5 - outputs: 16 - width: 20 - 28: - channels: 7 - height: 1 - num_threads: 2 - outputs: 49 - width: 20 - 29: - channels: 12 - height: 15 - num_threads: 2 - outputs: 16 - width: 14 - 30: - channels: 4 - height: 15 - num_threads: 1 - outputs: 16 - width: 4 - 31: - channels: 12 - height: 4 - num_threads: 5 - outputs: 49 - width: 7 - 32: - channels: 32 - height: 8 - num_threads: 5 - outputs: 1 - width: 20 - 33: - channels: 12 - height: 15 - num_threads: 1 - outputs: 7 - width: 8 - 34: - channels: 16 - height: 15 - num_threads: 2 - outputs: 7 - width: 1 - 35: - channels: 1 - height: 12 - num_threads: 1 - outputs: 49 - width: 20 - 36: - channels: 7 - height: 5 - num_threads: 5 - outputs: 49 - width: 1 - 37: - channels: 1 - height: 1 - num_threads: 2 - outputs: 12 - width: 7 - 38: - channels: 4 - height: 4 - num_threads: 5 - outputs: 32 - width: 4 - 39: - channels: 25 - height: 5 - num_threads: 1 - outputs: 1 - width: 14 diff --git a/archived/test/integration_test/test_single_op_models/test_pad.py b/archived/test/integration_test/test_single_op_models/test_pad.py deleted file mode 100644 index c1586cab0..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pad.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Optional, Tuple - -from . import ChannelAgnosticOpTestModelGenerator, PaddingMixin -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class PadTestModelGenerator(ChannelAgnosticOpTestModelGenerator, PaddingMixin): - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - return self._pad_layer(input_shape=input_shape) - - -GENERATOR = PadTestModelGenerator - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture -def abs_output_tolerance() -> int: - return 0 - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_pad.yml b/archived/test/integration_test/test_single_op_models/test_pad.yml deleted file mode 100644 index ca5a08d11..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pad.yml +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 48 - height: 4 - pad_b: 0 - pad_l: 2 - pad_r: 0 - pad_t: 0 - width: 1 - 1: - channels: 48 - height: 5 - pad_b: 2 - pad_l: 0 - pad_r: 0 - pad_t: 1 - width: 1 - 2: - channels: 64 - height: 15 - pad_b: 0 - pad_l: 1 - pad_r: 0 - pad_t: 0 - width: 8 - 3: - channels: 52 - height: 5 - pad_b: 2 - pad_l: 2 - pad_r: 2 - pad_t: 0 - width: 7 - 4: - channels: 64 - height: 8 - pad_b: 0 - pad_l: 1 - pad_r: 1 - pad_t: 1 - width: 4 - 5: - channels: 32 - height: 1 - pad_b: 2 - pad_l: 2 - pad_r: 0 - pad_t: 1 - width: 4 - 6: - channels: 8 - height: 1 - pad_b: 2 - pad_l: 2 - pad_r: 2 - pad_t: 1 - width: 8 - 7: - channels: 12 - height: 5 - pad_b: 1 - pad_l: 1 - pad_r: 2 - pad_t: 2 - width: 20 - 8: - channels: 32 - height: 15 - pad_b: 1 - pad_l: 0 - pad_r: 1 - pad_t: 2 - width: 8 - 9: - channels: 16 - height: 8 - pad_b: 2 - pad_l: 0 - pad_r: 2 - pad_t: 2 - width: 7 - 10: - channels: 16 - height: 15 - pad_b: 2 - pad_l: 1 - pad_r: 2 - pad_t: 0 - width: 4 - 11: - channels: 64 - height: 1 - pad_b: 1 - pad_l: 1 - pad_r: 2 - pad_t: 2 - width: 8 - 12: - channels: 28 - height: 12 - pad_b: 1 - pad_l: 0 - pad_r: 0 - pad_t: 0 - width: 1 - 13: - channels: 8 - height: 4 - pad_b: 0 - pad_l: 2 - pad_r: 2 - pad_t: 0 - width: 20 - 14: - channels: 48 - height: 12 - pad_b: 1 - pad_l: 2 - pad_r: 1 - pad_t: 1 - width: 20 - 15: - channels: 12 - height: 15 - pad_b: 0 - pad_l: 0 - pad_r: 1 - pad_t: 2 - width: 20 - 16: - channels: 28 - height: 4 - pad_b: 1 - pad_l: 2 - pad_r: 1 - pad_t: 1 - width: 14 - 17: - channels: 4 - height: 8 - pad_b: 0 - pad_l: 1 - pad_r: 0 - pad_t: 1 - width: 14 - 18: - channels: 12 - height: 8 - pad_b: 2 - pad_l: 0 - pad_r: 1 - pad_t: 2 - width: 7 - 19: - channels: 4 - height: 1 - pad_b: 1 - pad_l: 0 - pad_r: 0 - pad_t: 2 - width: 1 diff --git a/archived/test/integration_test/test_single_op_models/test_pool2d/__init__.py b/archived/test/integration_test/test_single_op_models/test_pool2d/__init__.py deleted file mode 100644 index 0e506aec6..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pool2d/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import tensorflow as tf -from abc import abstractmethod -from typing import Tuple, Optional, Type, Union - -from tflite2xcore.model_generation import Configuration - -from .. import ( - FilterOpTestModelGenerator, - ChannelPreservingOpTestModelGenerator, - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class Pool2dGenericTestModelGenerator( - ChannelPreservingOpTestModelGenerator, FilterOpTestModelGenerator -): - def _set_config(self, cfg: Configuration) -> None: - cfg.setdefault("strides", (2, 2)) - cfg.setdefault("K_h", 2) - cfg.setdefault("K_w", 2) - cfg.setdefault("padding", "valid") - super()._set_config(cfg) - - def check_config(self) -> None: - super().check_config() - if self._config["padding"] == "same": - assert ( - self._config["height"] % 2 == self._config["width"] % 2 == 0 - and self._config["K_h"] == self._config["K_w"] == 2 - and self._config["strides"][0] == self._config["strides"][1] == 2 - ), "same padding is only allowed for the common 2x2 case" - - @property - @abstractmethod - def _op_class( - self, - ) -> Union[Type[tf.keras.layers.MaxPool2D], Type[tf.keras.layers.AvgPool2D]]: - raise NotImplementedError() - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - kwargs = {"input_shape": input_shape} if input_shape else {} - cfg = self._config - return self._op_class( - pool_size=(cfg["K_h"], cfg["K_w"]), - strides=cfg["strides"], - padding=cfg["padding"], - **kwargs - ) diff --git a/archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.py b/archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.py deleted file mode 100644 index 5437ac01c..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Type - -from . import Pool2dGenericTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class AvgPool2dTestModelGenerator(Pool2dGenericTestModelGenerator): - @property - def _op_class(self) -> Type[tf.keras.layers.AvgPool2D]: - return tf.keras.layers.AvgPool2D # type: ignore - - -GENERATOR = AvgPool2dTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.yml b/archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.yml deleted file mode 100644 index c7b9b8b6d..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pool2d/test_avgpool2d.yml +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 3 - K_w: 3 - channels: 12 - height: 8 - num_threads: 1 - padding: valid - strides: - - 2 - - 1 - width: 20 - 1: - K_h: 3 - K_w: 1 - channels: 4 - height: 8 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 14 - 2: - K_h: 2 - K_w: 1 - channels: 12 - height: 12 - num_threads: 1 - padding: valid - strides: - - 1 - - 2 - width: 7 - 3: - K_h: 3 - K_w: 2 - channels: 16 - height: 12 - num_threads: 1 - padding: valid - strides: - - 1 - - 2 - width: 8 - 4: - K_h: 2 - K_w: 3 - channels: 64 - height: 15 - num_threads: 2 - padding: valid - strides: - - 1 - - 1 - width: 4 - 5: - K_h: 3 - K_w: 1 - channels: 8 - height: 5 - num_threads: 5 - padding: valid - strides: - - 1 - - 2 - width: 8 - 6: - K_h: 2 - K_w: 1 - channels: 64 - height: 8 - num_threads: 5 - padding: valid - strides: - - 1 - - 3 - width: 8 - 7: - K_h: 3 - K_w: 1 - channels: 4 - height: 12 - num_threads: 2 - padding: valid - strides: - - 1 - - 3 - width: 4 - 8: - K_h: 1 - K_w: 2 - channels: 16 - height: 12 - num_threads: 1 - padding: valid - strides: - - 3 - - 2 - width: 4 - 9: - K_h: 1 - K_w: 2 - channels: 48 - height: 5 - num_threads: 2 - padding: valid - strides: - - 2 - - 1 - width: 7 - 10: - K_h: 1 - K_w: 2 - channels: 4 - height: 12 - num_threads: 1 - padding: valid - strides: - - 1 - - 2 - width: 8 - 11: - K_h: 2 - K_w: 3 - channels: 4 - height: 4 - num_threads: 2 - padding: valid - strides: - - 1 - - 3 - width: 7 - 12: - K_h: 3 - K_w: 1 - channels: 28 - height: 5 - num_threads: 1 - padding: valid - strides: - - 1 - - 1 - width: 8 - 13: - K_h: 3 - K_w: 1 - channels: 28 - height: 15 - num_threads: 5 - padding: valid - strides: - - 1 - - 3 - width: 7 - 14: - K_h: 1 - K_w: 1 - channels: 52 - height: 15 - num_threads: 1 - padding: valid - strides: - - 1 - - 2 - width: 4 - 15: - K_h: 1 - K_w: 2 - channels: 64 - height: 12 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 7 - 16: - K_h: 3 - K_w: 3 - channels: 28 - height: 12 - num_threads: 5 - padding: valid - strides: - - 3 - - 2 - width: 7 - 17: - K_h: 1 - K_w: 2 - channels: 8 - height: 12 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 20 - 18: - K_h: 2 - K_w: 2 - channels: 12 - height: 8 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 20 - 19: - K_h: 3 - K_w: 3 - channels: 52 - height: 15 - num_threads: 1 - padding: valid - strides: - - 3 - - 2 - width: 7 - 20: - K_h: 2 - K_w: 1 - channels: 8 - height: 8 - num_threads: 5 - padding: valid - strides: - - 1 - - 3 - width: 20 - 21: - K_h: 2 - K_w: 3 - channels: 8 - height: 5 - num_threads: 5 - padding: valid - strides: - - 2 - - 1 - width: 7 - 22: - K_h: 2 - K_w: 2 - channels: 52 - height: 8 - num_threads: 1 - padding: valid - strides: - - 1 - - 1 - width: 20 - 23: - K_h: 3 - K_w: 3 - channels: 48 - height: 4 - num_threads: 2 - padding: valid - strides: - - 2 - - 2 - width: 20 - 24: - K_h: 1 - K_w: 1 - channels: 64 - height: 12 - num_threads: 5 - padding: valid - strides: - - 1 - - 3 - width: 8 - 25: - K_h: 2 - K_w: 3 - channels: 48 - height: 15 - num_threads: 5 - padding: valid - strides: - - 1 - - 1 - width: 14 - 26: - K_h: 3 - K_w: 2 - channels: 28 - height: 4 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 14 - 27: - K_h: 3 - K_w: 3 - channels: 52 - height: 5 - num_threads: 2 - padding: valid - strides: - - 1 - - 1 - width: 4 - 28: - K_h: 2 - K_w: 3 - channels: 12 - height: 8 - num_threads: 5 - padding: valid - strides: - - 2 - - 1 - width: 8 - 29: - K_h: 2 - K_w: 2 - channels: 52 - height: 8 - num_threads: 2 - padding: valid - strides: - - 1 - - 3 - width: 8 - 30: - K_h: 2 - K_w: 1 - channels: 64 - height: 4 - num_threads: 2 - padding: valid - strides: - - 1 - - 2 - width: 4 - 31: - K_h: 1 - K_w: 2 - channels: 16 - height: 15 - num_threads: 2 - padding: valid - strides: - - 1 - - 2 - width: 14 - 32: - K_h: 1 - K_w: 3 - channels: 12 - height: 12 - num_threads: 2 - padding: valid - strides: - - 1 - - 2 - width: 8 - 33: - K_h: 1 - K_w: 1 - channels: 8 - height: 15 - num_threads: 1 - padding: valid - strides: - - 1 - - 3 - width: 1 - 34: - K_h: 1 - K_w: 1 - channels: 32 - height: 5 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 14 - 35: - K_h: 2 - K_w: 3 - channels: 48 - height: 15 - num_threads: 5 - padding: valid - strides: - - 3 - - 2 - width: 14 - 36: - K_h: 1 - K_w: 3 - channels: 52 - height: 8 - num_threads: 1 - padding: valid - strides: - - 2 - - 1 - width: 14 - 37: - K_h: 2 - K_w: 2 - channels: 32 - height: 5 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 7 - 38: - K_h: 1 - K_w: 3 - channels: 52 - height: 4 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 7 - 39: - K_h: 2 - K_w: 1 - channels: 8 - height: 5 - num_threads: 1 - padding: valid - strides: - - 2 - - 1 - width: 20 diff --git a/archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.py b/archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.py deleted file mode 100644 index fd7a8c886..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Optional, Tuple - -from . import ChannelPreservingOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output as _test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class GlobalAveragePooling2dTestModelGenerator(ChannelPreservingOpTestModelGenerator): - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - kwargs = {"input_shape": input_shape} if input_shape else {} - return tf.keras.layers.GlobalAveragePooling2D(**kwargs) - - -GENERATOR = GlobalAveragePooling2dTestModelGenerator - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - -# TODO: fix this -def test_output(compared_outputs, request): # type: ignore - name = request.node.name - if tf.version.VERSION[:3] in ("2.4", "2.5"): - if ( - name.endswith("[CONFIGS[14]]") - or name.endswith("[CONFIGS[16]]") - or name.endswith("[CONFIGS[21]]") - ): - request.applymarker(pytest.mark.xfail(run=False)) - _test_output(compared_outputs, request) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.yml b/archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.yml deleted file mode 100644 index 63c07017d..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pool2d/test_global_avgpool2d.yml +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - channels: 4 - height: 8 - num_threads: 5 - width: 8 - 1: - channels: 32 - height: 15 - num_threads: 1 - width: 4 - 2: - channels: 64 - height: 8 - num_threads: 2 - width: 7 - 3: - channels: 64 - height: 8 - num_threads: 1 - width: 4 - 4: - channels: 16 - height: 15 - num_threads: 5 - width: 20 - 5: - channels: 12 - height: 8 - num_threads: 1 - width: 1 - 6: - channels: 8 - height: 5 - num_threads: 2 - width: 7 - 7: - channels: 52 - height: 8 - num_threads: 1 - width: 14 - 8: - channels: 48 - height: 15 - num_threads: 5 - width: 20 - 9: - channels: 32 - height: 12 - num_threads: 2 - width: 8 - 10: - channels: 52 - height: 8 - num_threads: 1 - width: 1 - 11: - channels: 48 - height: 15 - num_threads: 1 - width: 14 - 12: - channels: 16 - height: 15 - num_threads: 2 - width: 7 - 13: - channels: 8 - height: 5 - num_threads: 1 - width: 14 - 14: - channels: 8 - height: 8 - num_threads: 5 - width: 14 - 15: - channels: 28 - height: 15 - num_threads: 1 - width: 20 - 16: - channels: 8 - height: 12 - num_threads: 5 - width: 7 - 17: - channels: 28 - height: 4 - num_threads: 2 - width: 8 - 18: - channels: 28 - height: 12 - num_threads: 1 - width: 4 - 19: - channels: 12 - height: 5 - num_threads: 1 - width: 8 - 20: - channels: 12 - height: 8 - num_threads: 2 - width: 4 - 21: - channels: 16 - height: 5 - num_threads: 5 - width: 14 - 22: - channels: 64 - height: 15 - num_threads: 1 - width: 20 - 23: - channels: 4 - height: 12 - num_threads: 1 - width: 4 - 24: - channels: 52 - height: 5 - num_threads: 2 - width: 7 - 25: - channels: 28 - height: 15 - num_threads: 2 - width: 7 - 26: - channels: 48 - height: 1 - num_threads: 1 - width: 1 - 27: - channels: 4 - height: 8 - num_threads: 5 - width: 20 - 28: - channels: 4 - height: 12 - num_threads: 5 - width: 8 - 29: - channels: 8 - height: 5 - num_threads: 5 - width: 14 - 30: - channels: 48 - height: 5 - num_threads: 1 - width: 7 - 31: - channels: 32 - height: 15 - num_threads: 1 - width: 4 - 32: - channels: 48 - height: 5 - num_threads: 1 - width: 7 - 33: - channels: 32 - height: 4 - num_threads: 2 - width: 4 - 34: - channels: 52 - height: 8 - num_threads: 2 - width: 8 - 35: - channels: 12 - height: 5 - num_threads: 5 - width: 8 - 36: - channels: 12 - height: 12 - num_threads: 2 - width: 14 - 37: - channels: 64 - height: 4 - num_threads: 1 - width: 1 - 38: - channels: 28 - height: 4 - num_threads: 1 - width: 7 - 39: - channels: 64 - height: 4 - num_threads: 2 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.py b/archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.py deleted file mode 100644 index 462d4a5f6..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import tensorflow as tf -from typing import Type - -from . import Pool2dGenericTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class MaxPool2dTestModelGenerator(Pool2dGenericTestModelGenerator): - @property - def _op_class(self) -> Type[tf.keras.layers.MaxPool2D]: - return tf.keras.layers.MaxPool2D # type: ignore - - -GENERATOR = MaxPool2dTestModelGenerator - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.yml b/archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.yml deleted file mode 100644 index 9a98b4876..000000000 --- a/archived/test/integration_test/test_single_op_models/test_pool2d/test_maxpool2d.yml +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - K_h: 3 - K_w: 1 - channels: 12 - height: 12 - num_threads: 1 - padding: valid - strides: - - 1 - - 3 - width: 4 - 1: - K_h: 1 - K_w: 3 - channels: 52 - height: 4 - num_threads: 1 - padding: valid - strides: - - 1 - - 3 - width: 4 - 2: - K_h: 3 - K_w: 2 - channels: 8 - height: 5 - num_threads: 2 - padding: valid - strides: - - 2 - - 1 - width: 20 - 3: - K_h: 1 - K_w: 2 - channels: 8 - height: 12 - num_threads: 1 - padding: valid - strides: - - 3 - - 2 - width: 7 - 4: - K_h: 2 - K_w: 2 - channels: 8 - height: 4 - num_threads: 1 - padding: valid - strides: - - 3 - - 2 - width: 4 - 5: - K_h: 1 - K_w: 3 - channels: 16 - height: 5 - num_threads: 2 - padding: valid - strides: - - 3 - - 2 - width: 4 - 6: - K_h: 3 - K_w: 2 - channels: 28 - height: 15 - num_threads: 5 - padding: valid - strides: - - 2 - - 1 - width: 8 - 7: - K_h: 2 - K_w: 1 - channels: 16 - height: 12 - num_threads: 5 - padding: valid - strides: - - 1 - - 2 - width: 7 - 8: - K_h: 2 - K_w: 1 - channels: 32 - height: 4 - num_threads: 1 - padding: valid - strides: - - 3 - - 2 - width: 14 - 9: - K_h: 1 - K_w: 3 - channels: 28 - height: 12 - num_threads: 5 - padding: valid - strides: - - 2 - - 1 - width: 7 - 10: - K_h: 1 - K_w: 3 - channels: 4 - height: 8 - num_threads: 2 - padding: valid - strides: - - 1 - - 3 - width: 20 - 11: - K_h: 1 - K_w: 3 - channels: 52 - height: 5 - num_threads: 2 - padding: valid - strides: - - 1 - - 1 - width: 4 - 12: - K_h: 3 - K_w: 2 - channels: 12 - height: 8 - num_threads: 1 - padding: valid - strides: - - 1 - - 2 - width: 8 - 13: - K_h: 2 - K_w: 1 - channels: 64 - height: 4 - num_threads: 1 - padding: valid - strides: - - 2 - - 2 - width: 14 - 14: - K_h: 3 - K_w: 3 - channels: 64 - height: 4 - num_threads: 1 - padding: valid - strides: - - 1 - - 3 - width: 7 - 15: - K_h: 2 - K_w: 3 - channels: 12 - height: 15 - num_threads: 2 - padding: valid - strides: - - 1 - - 3 - width: 14 - 16: - K_h: 1 - K_w: 2 - channels: 52 - height: 1 - num_threads: 1 - padding: valid - strides: - - 1 - - 1 - width: 7 - 17: - K_h: 3 - K_w: 3 - channels: 32 - height: 5 - num_threads: 2 - padding: valid - strides: - - 2 - - 2 - width: 20 - 18: - K_h: 2 - K_w: 2 - channels: 12 - height: 8 - num_threads: 2 - padding: valid - strides: - - 3 - - 2 - width: 8 - 19: - K_h: 3 - K_w: 1 - channels: 28 - height: 8 - num_threads: 2 - padding: valid - strides: - - 2 - - 1 - width: 20 - 20: - K_h: 2 - K_w: 1 - channels: 12 - height: 5 - num_threads: 5 - padding: valid - strides: - - 3 - - 2 - width: 20 - 21: - K_h: 2 - K_w: 3 - channels: 4 - height: 5 - num_threads: 2 - padding: valid - strides: - - 2 - - 1 - width: 8 - 22: - K_h: 2 - K_w: 3 - channels: 8 - height: 12 - num_threads: 1 - padding: valid - strides: - - 1 - - 1 - width: 14 - 23: - K_h: 3 - K_w: 3 - channels: 48 - height: 15 - num_threads: 2 - padding: valid - strides: - - 2 - - 1 - width: 4 - 24: - K_h: 3 - K_w: 1 - channels: 48 - height: 12 - num_threads: 2 - padding: valid - strides: - - 1 - - 2 - width: 20 - 25: - K_h: 2 - K_w: 3 - channels: 48 - height: 12 - num_threads: 5 - padding: valid - strides: - - 1 - - 3 - width: 7 - 26: - K_h: 3 - K_w: 2 - channels: 32 - height: 15 - num_threads: 5 - padding: valid - strides: - - 1 - - 3 - width: 7 - 27: - K_h: 3 - K_w: 3 - channels: 4 - height: 12 - num_threads: 5 - padding: valid - strides: - - 1 - - 3 - width: 14 - 28: - K_h: 1 - K_w: 2 - channels: 52 - height: 8 - num_threads: 2 - padding: valid - strides: - - 2 - - 1 - width: 14 - 29: - K_h: 1 - K_w: 3 - channels: 16 - height: 8 - num_threads: 5 - padding: valid - strides: - - 1 - - 2 - width: 7 - 30: - K_h: 2 - K_w: 1 - channels: 64 - height: 12 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 14 - 31: - K_h: 2 - K_w: 3 - channels: 12 - height: 8 - num_threads: 1 - padding: valid - strides: - - 2 - - 1 - width: 14 - 32: - K_h: 2 - K_w: 2 - channels: 64 - height: 5 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 8 - 33: - K_h: 1 - K_w: 2 - channels: 64 - height: 15 - num_threads: 5 - padding: valid - strides: - - 2 - - 2 - width: 20 - 34: - K_h: 3 - K_w: 2 - channels: 8 - height: 15 - num_threads: 1 - padding: valid - strides: - - 1 - - 3 - width: 8 - 35: - K_h: 1 - K_w: 2 - channels: 48 - height: 15 - num_threads: 2 - padding: valid - strides: - - 1 - - 1 - width: 20 - 36: - K_h: 1 - K_w: 1 - channels: 4 - height: 8 - num_threads: 5 - padding: valid - strides: - - 3 - - 2 - width: 14 - 37: - K_h: 3 - K_w: 3 - channels: 32 - height: 8 - num_threads: 1 - padding: valid - strides: - - 1 - - 2 - width: 8 - 38: - K_h: 1 - K_w: 2 - channels: 64 - height: 15 - num_threads: 2 - padding: valid - strides: - - 1 - - 1 - width: 7 - 39: - K_h: 2 - K_w: 1 - channels: 28 - height: 4 - num_threads: 1 - padding: valid - strides: - - 1 - - 2 - width: 8 diff --git a/archived/test/integration_test/test_single_op_models/test_strided_slice.py b/archived/test/integration_test/test_single_op_models/test_strided_slice.py deleted file mode 100644 index 38ca73347..000000000 --- a/archived/test/integration_test/test_single_op_models/test_strided_slice.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -import tensorflow as tf -from typing import Optional, Tuple -from math import ceil - -from . import ChannelAgnosticOpTestModelGenerator -from . import ( # pylint: disable=unused-import - test_output, -) - - -# ---------------------------------------------------------------------------- -# GENERATORS -# ---------------------------------------------------------------------------- - - -class StridedSliceModelGenerator(ChannelAgnosticOpTestModelGenerator): - def _build_core_model(self) -> tf.keras.Model: - - input_shape = self._input_shape - - inputs = tf.keras.Input(shape=input_shape) - x_0 = tf.strided_slice(inputs,begin=[0, 0, 0], end= [input_shape[0], input_shape[1]//2, input_shape[2]],strides= [1, 1, 1]) - x_1 = tf.strided_slice(inputs,begin=[0, ceil(input_shape[1]/2), 0], end= [input_shape[0], input_shape[1], input_shape[2]],strides= [1, 1, 1]) - x = tf.keras.layers.Concatenate(axis=1)([x_0,x_1]) - outputs = tf.keras.layers.Flatten()(x) - return tf.keras.Model(inputs=inputs,outputs=[outputs]) - - def _op_layer( - self, *, input_shape: Optional[Tuple[int, int, int]] = None - ) -> tf.keras.layers.Layer: - return tf.strided_slice() - -GENERATOR = StridedSliceModelGenerator - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - -if __name__ == "__main__": - pytest.main() diff --git a/archived/test/integration_test/test_single_op_models/test_strided_slice.yml b/archived/test/integration_test/test_single_op_models/test_strided_slice.yml deleted file mode 100644 index 433bc9d03..000000000 --- a/archived/test/integration_test/test_single_op_models/test_strided_slice.yml +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved -# RANDOMLY GENERATED CONFIGS, MODIFY AT OWN RISK -default: - 0: - height: 160 - width: 160 - channels: 4 - 1: - height: 150 - width: 150 - channels: 4 diff --git a/archived/tflite2xcore/LICENSE.txt b/archived/tflite2xcore/LICENSE.txt deleted file mode 100644 index 4d4087535..000000000 --- a/archived/tflite2xcore/LICENSE.txt +++ /dev/null @@ -1,133 +0,0 @@ -Software Release License Agreement - -Copyright (c) 2017-2020, XMOS, All rights reserved. - -BY ACCESSING, USING, INSTALLING OR DOWNLOADING THE XMOS SOFTWARE, YOU AGREE TO BE BOUND BY THE FOLLOWING TERMS. IF YOU DO NOT AGREE TO THESE, DO NOT ATTEMPT TO DOWNLOAD, ACCESS OR USE THE XMOS Software. -Parties: - -(1) XMOS Limited, incorporated and registered in England and Wales with company number 5494985 whose registered office is 107 Cheapside, London, EC2V 6DN (XMOS). - -(2) An individual or legal entity exercising permissions granted by this License (Customer). - -If you are entering into this Agreement on behalf of another legal entity such as a company, partnership, university, college etc. (for example, as an employee, student or consultant), you warrant that you have authority to bind that entity. - -1. Definitions - -"License" means this Software License and any schedules or annexes to it. - -"License Fee" means the fee for the XMOS Software as detailed in any schedules or annexes to this Software License - -"Licensee Modifications" means all developments and modifications of the XMOS Software developed independently by the Customer. - -"XMOS Modifications" means all developments and modifications of the XMOS Software developed or co-developed by XMOS. - -"XMOS Hardware" means any XMOS hardware devices supplied by XMOS from time to time and/or the particular XMOS devices detailed in any schedules or annexes to this Software License. - -"XMOS Software" comprises the XMOS owned circuit designs, schematics, source code, object code, reference designs, (including related programmer comments and documentation, if any), error corrections, improvements, modifications (including XMOS Modifications) and updates. - -The headings in this License do not affect its interpretation. Save where the context otherwise requires, references to clauses and schedules are to clauses and schedules of this License. - -Unless the context otherwise requires: - -- references to XMOS and the Customer include their permitted successors and assigns; -- references to statutory provisions include those statutory provisions as amended or re-enacted; and -- references to any gender include all genders. - -Words in the singular include the plural and in the plural include the singular. - -2. License - -XMOS grants the Customer a non-exclusive license to use, develop, modify and distribute the XMOS Software in object code only with, or for the purpose of being used with, XMOS Hardware. - -Open Source Software (OSS) must be used and dealt with in accordance with any license terms under which OSS is distributed. - -3. Consideration - -In consideration of the mutual obligations contained in this License, the parties agree to its terms. - -4. Term - -Subject to clause 12 below, this License shall be perpetual. - -5. Restrictions on Use - -The Customer will adhere to all applicable import and export laws and regulations of the country in which it resides and of the United States and United Kingdom, without limitation. The Customer agrees that it is its responsibility to obtain copies of and to familiarise itself fully with these laws and regulations to avoid violation. - -6. Modifications - -The Customer will own all intellectual property rights in the Licensee Modifications but will undertake to provide XMOS with any fixes made to correct any bugs found in the XMOS Software on a non-exclusive, perpetual and royalty free license basis. - -XMOS will own all intellectual property rights in the XMOS Modifications. -The Customer may only use the Licensee Modifications and XMOS Modifications on, or in relation to, XMOS Hardware. - -7. Support - -Support of the XMOS Software may be provided by XMOS pursuant to a separate support agreement. - -8. Warranty and Disclaimer - -The XMOS Software is provided "AS IS" without a warranty of any kind. XMOS and its licensors' entire liability and Customer's exclusive remedy under this warranty to be determined in XMOS's sole and absolute discretion, will be either (a) the corrections of defects in media or replacement of the media, or (b) the refund of the license fee paid (if any). - -Whilst XMOS gives the Customer the ability to load their own software and applications onto XMOS devices, the security of such software and applications when on the XMOS devices is the Customer's own responsibility and any breach of security shall not be deemed a defect or failure of the hardware. XMOS shall have no liability whatsoever in relation to any costs, damages or other losses Customer may incur as a result of any breaches of security in relation to your software or applications. - -XMOS AND ITS LICENSORS DISCLAIM ALL OTHER WARRANTIES, EXPRESS OR IMPLIED, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY/ SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT EXCEPT TO THE EXTENT THAT THESE DISCLAIMERS ARE HELD TO BE LEGALLY INVALID UNDER APPLICABLE LAW. - -9. High Risk Activities - -The XMOS Software is not designed or intended for use in conjunction with on-line control equipment in hazardous environments requiring fail-safe performance, including without limitation the operation of nuclear facilities, aircraft navigation or communication systems, air traffic control, life support machines, or weapons systems (collectively "High Risk Activities") in which the failure of the XMOS Software could lead directly to death, personal injury, or severe physical or environmental damage. XMOS and its licensors specifically disclaim any express or implied warranties relating to use of the XMOS Software in connection with High Risk Activities. - -10. Liability - -TO THE EXTENT NOT PROHIBITED BY APPLICABLE LAW, NEITHER XMOS NOR ITS LICENSORS SHALL BE LIABLE FOR ANY LOST REVENUE, BUSINESS, PROFIT, CONTRACTS OR DATA, ADMINISTRATIVE OR OVERHEAD EXPENSES, OR FOR SPECIAL, INDIRECT, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES HOWEVER CAUSED AND REGARDLESS OF THEORY OF LIABILITY ARISING OUT OF THIS LICENSE, EVEN IF XMOS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. In no event shall XMOS's liability to the Customer whether in contract, tort (including negligence), or otherwise exceed the License Fee. - -Customer agrees to indemnify, hold harmless, and defend XMOS and its licensors from and against any claims or lawsuits, including attorneys' fees and any other liabilities, demands, proceedings, damages, losses, costs, expenses fines and charges which are made or brought against or incurred by XMOS as a result of your use or distribution of the Licensee Modifications or your use or distribution of XMOS Software, or any development of it, other than in accordance with the terms of this License. - -11. Ownership - -The copyrights and all other intellectual and industrial property rights for the protection of information with respect to the XMOS Software (including the methods and techniques on which they are based) are retained by XMOS and/or its licensors. Nothing in this Agreement serves to transfer such rights. Customer may not sell, mortgage, underlet, sublease, sublicense, lend or transfer possession of the XMOS Software in any way whatsoever to any third party who is not bound by this Agreement. - -12. Termination - -Either party may terminate this License at any time on written notice to the other if the other: - -- is in material or persistent breach of any of the terms of this License and either that breach is incapable of remedy, or the other party fails to remedy that breach within 30 days after receiving written notice requiring it to remedy that breach; or -- is unable to pay its debts (within the meaning of section 123 of the Insolvency Act 1986), or becomes insolvent, or is subject to an order or a resolution for its liquidation, administration, winding-up or dissolution (otherwise than for the purposes of a solvent amalgamation or reconstruction), or has an administrative or other receiver, manager, trustee, liquidator, administrator or similar officer appointed over all or any substantial part of its assets, or enters into or proposes any composition or arrangement with its creditors generally, or is subject to any analogous event or proceeding in any applicable jurisdiction. - -Termination by either party in accordance with the rights contained in clause 12 shall be without prejudice to any other rights or remedies of that party accrued prior to termination. - -On termination for any reason: - -- all rights granted to the Customer under this License shall cease; -- the Customer shall cease all activities authorised by this License; -- the Customer shall immediately pay any sums due to XMOS under this License; and -- the Customer shall immediately destroy or return to the XMOS (at the XMOS's option) all copies of the XMOS Software then in its possession, custody or control and, in the case of destruction, certify to XMOS that it has done so. - -Clauses 5, 8, 9, 10 and 11 shall survive any effective termination of this Agreement. - -13. Third party rights - -No term of this License is intended to confer a benefit on, or to be enforceable by, any person who is not a party to this license. - -14. Confidentiality and publicity - -Each party shall, during the term of this License and thereafter, keep confidential all, and shall not use for its own purposes nor without the prior written consent of the other disclose to any third party any, information of a confidential nature (including, without limitation, trade secrets and information of commercial value) which may become known to such party from the other party and which relates to the other party, unless such information is public knowledge or already known to such party at the time of disclosure, or subsequently becomes public knowledge other than by breach of this license, or subsequently comes lawfully into the possession of such party from a third party. - -The terms of this license are confidential and may not be disclosed by the Customer without the prior written consent of XMOS. -The provisions of clause 14 shall remain in full force and effect notwithstanding termination of this license for any reason. - -15. Entire agreement - -This License and the documents annexed as appendices to this License or otherwise referred to herein contain the whole agreement between the parties relating to the subject matter hereof and supersede all prior agreements, arrangements and understandings between the parties relating to that subject matter. - -16. Assignment - -The Customer shall not assign this License or any of the rights granted under it without XMOS's prior written consent. - -17. Governing law and jurisdiction - -This License shall be governed by and construed in accordance with English law and each party hereby submits to the non-exclusive jurisdiction of the English courts. - -This License has been entered into on the date stated at the beginning of it. - -Schedule -XMOS tflite2xcore Library software \ No newline at end of file diff --git a/archived/tflite2xcore/README.md b/archived/tflite2xcore/README.md deleted file mode 100644 index acaddafa0..000000000 --- a/archived/tflite2xcore/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# AI Extensions Python module - -# Python module info - -`tflite2xcore` is a Python module containing utilities for converting and optimizing an TensorFlow Lite model (.tflite) to deploy on an xCORE microprocessor. diff --git a/archived/tflite2xcore/build_dist.sh b/archived/tflite2xcore/build_dist.sh deleted file mode 100755 index d6811e0f9..000000000 --- a/archived/tflite2xcore/build_dist.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/bash - -rm -rf dist - -python setup.py sdist bdist_wheel - -# if twine check dist/* ; then -# if [ "$1" = "--test" ] ; then -# twine upload --repository-url https://test.pypi.org/legacy/ dist/* -# else -# twine upload dist/* ; -# fi -# fi diff --git a/archived/tflite2xcore/setup.py b/archived/tflite2xcore/setup.py deleted file mode 100644 index b430aa819..000000000 --- a/archived/tflite2xcore/setup.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import setuptools - -LIB_FLEXBUFFERS = [ - "libs/linux/libflexbuffers.so", - "libs/linux/libflexbuffers.so.1.0.1", - "libs/macos/libflexbuffers.dylib", - "libs/macos/libflexbuffers.1.0.1.dylib", -] - -EXCLUDES = ["*tests", "*tests.*", "*model_generation", "*model_generation.*"] -SCRIPTS = ["xformer.py", "tflite2xcore/tflite_visualize.py"] - -INSTALL_REQUIRES = [ - "aenum>=2.2.4", - "dill>=0.3.1.1", - "flatbuffers==1.12", - "numpy>=1.19.5", - "tensorflow-macos>=2.8.0,<=2.8.0", - "larq-compute-engine>=0.6.2", -] - -setuptools.setup( - name="tflite2xcore", - packages=setuptools.find_packages(exclude=EXCLUDES), - scripts=SCRIPTS, - python_requires=">=3.8.0", - install_requires=INSTALL_REQUIRES, - extras_require={ - "test": [ - "pytest>=5.2.0", - "pytest-xdist>=1.30.0", - "portalocker==2.0.0", - "keras-applications>=1.0.8", - "PyYAML>=5.3.1", - "larq>=0.11.1", - ], - "examples": [ - "scipy>=1.4.1", - "keras-preprocessing>=1.1.2", - "tqdm>=4.41.1", - "matplotlib>=3.1.1", - "jupyter>=1.0.0", - ], - "dev": [ - "mypy>=0.782", - "lhsmdu>=1.1", - "netron>=4.8.8", - ], - }, - package_data={"": LIB_FLEXBUFFERS}, - author="XMOS", - author_email="support@xmos.com", - description="XMOS Tools to convert TensorFlow Lite models to xCORE microcontrollers.", - license="LICENSE.txt", - keywords="xmos xcore", - use_scm_version={ - "root": "../..", - "relative_to": __file__, - "version_scheme": "post-release", - }, - setup_requires=["setuptools_scm"], -) diff --git a/archived/tflite2xcore/tflite2xcore/__init__.py b/archived/tflite2xcore/tflite2xcore/__init__.py deleted file mode 100644 index da2fe08cd..000000000 --- a/archived/tflite2xcore/tflite2xcore/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import sys -import ctypes - -from pathlib import Path - -__PARENT_DIR = Path(__file__).parent.absolute() -if sys.platform.startswith("linux"): - lib_path = str(__PARENT_DIR / "libs" / "linux" / "libflexbuffers.so") -elif sys.platform == "darwin": - lib_path = str(__PARENT_DIR / "libs" / "macos" / "libflexbuffers.dylib") -else: - raise RuntimeError("tflite2xcore is not yet supported on Windows!") - -libflexbuffers = ctypes.cdll.LoadLibrary(lib_path) -from . import version - -__version__ = version.get_version() - -from . import xcore_schema -from . import xcore_model -from . import execution_planning -from . import converter -from . import pass_manager -from . import parallelization -from . import tflite_visualize -from . import utils -from . import analyze -from . import model_generation diff --git a/archived/tflite2xcore/tflite2xcore/analyze.py b/archived/tflite2xcore/tflite2xcore/analyze.py deleted file mode 100644 index 1f2e732e9..000000000 --- a/archived/tflite2xcore/tflite2xcore/analyze.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import logging -from copy import copy - -from tflite2xcore.xcore_model import XCOREModel - - -def calc_subgraph_mem_req(subgraph): - operators = copy(subgraph.operators) - coexisting = set(subgraph.inputs) - op_mem_reqs = dict() - while operators: - op = operators.pop(0) - needed_tensors = op.inputs + op.outputs - coexisting.update(needed_tensors) - - op_mem_reqs[op.name] = { - "buffer_tensors": { - tensor.name: len(tensor.buffer.data) - for tensor in coexisting - if len(tensor.buffer.data) - }, - "arena_tensors": { - tensor.name: tensor.size - for tensor in coexisting - if not len(tensor.buffer.data) - }, - "init": 500, # TODO: this is a rough estimate - } - op_mem_reqs[op.name]["buffers"] = sum( - v for v in op_mem_reqs[op.name]["buffer_tensors"].values() - ) - op_mem_reqs[op.name]["arena"] = sum( - v for v in op_mem_reqs[op.name]["arena_tensors"].values() - ) - - coexisting = { - tensor - for tensor in coexisting - if ( - [consumer for consumer in tensor.consumers if consumer in operators] - or tensor in subgraph.outputs - ) - } - - return { - "op_mem_reqs": op_mem_reqs, - "buffers": sum( - len(buffer.data) - for buffer in subgraph.model.buffers - if [owner for owner in buffer.owners if owner in subgraph.tensors] - ), - "arena": max(op_info["arena"] for op_info in op_mem_reqs.values()), - "init": sum(op_info["init"] for op_info in op_mem_reqs.values()), - } - - -def analyze_model(model): - analysis = { - "subgraphs": [calc_subgraph_mem_req(subgraph) for subgraph in model.subgraphs] - } - analysis["buffers"] = sum(len(buffer.data) for buffer in model.buffers) - analysis["arena"] = max( - subgraph_info["arena"] for subgraph_info in analysis["subgraphs"] - ) - analysis["init"] = sum( - subgraph_info["init"] for subgraph_info in analysis["subgraphs"] - ) - - return analysis - - -# TODO: remove this someday since analysis should not rely on an interpreter -# however, currently the interpreter is the only method to determine the -# size of the tensor arena -def calc_arena_size(model_content): - try: - from xcore_interpreters import XCOREInterpreter - - with XCOREInterpreter(model_content=model_content) as interpreter: - logger = logging.getLogger("tensor_arena_allocations") - [logger.info(line) for line in interpreter.get_allocations().split("\n")] - return interpreter.tensor_arena_size - except RuntimeError as e: - print("Runtime Error: Failed calculating tensor arena size.") - print(str(e)) - - return None - - -def calc_weight_and_bias_fetch_sizes(model_content): - max_weights_size = 0 - max_bias_size = 0 - model = XCOREModel.deserialize(model_content) - for subgraph in model.subgraphs: - for op in subgraph.operators: - if "mem" in op.custom_options: - max_weights_size = max(max_weights_size, op.custom_options["mem"][0]) - max_bias_size = max(max_bias_size, op.custom_options["mem"][1]) - - return max_weights_size, max_bias_size - - -def print_report(tflite_output_path): - indent = " " * 2 - - with open(tflite_output_path, "rb") as fd: - model_content = fd.read() - model_size = len(model_content) - try: - tensor_arena_size = calc_arena_size(model_content) - max_weights_size, max_bias_size = calc_weight_and_bias_fetch_sizes( - model_content - ) - print(f"Model size: {model_size} (bytes)") - print() - if tensor_arena_size: - ram_used = model_size + tensor_arena_size - print("Model stored in RAM") - print(f"{indent}Tensor arena size: {tensor_arena_size} (bytes)") - print() - print(f"{indent}Total RAM required: {ram_used} (bytes)") - print() - if max_weights_size and max_bias_size: - print("Model stored in external memory (Flash or LPDDR)") - tensor_arena_size += max_weights_size + max_bias_size - print(f"{indent}Tensor arena size: {tensor_arena_size} (bytes)") - print() - print(f"{indent}Total RAM required: {tensor_arena_size}") - print(f"{indent}Total external memory required: {model_size}") - print() - else: - print("Unable to determine model memory requirements.") - except RuntimeError as e: - prefix = "Didn't find op for builtin opcode " - msg = e.args[0] - if msg.startswith(prefix): - op_details = msg.split("\n", 1)[0][len(prefix) :] - logging.warning( - f"Arena size calculation failed because of unknown op in the interpreter: {op_details}" - ) - else: - raise diff --git a/archived/tflite2xcore/tflite2xcore/converter.py b/archived/tflite2xcore/tflite2xcore/converter.py deleted file mode 100644 index adad9c73f..000000000 --- a/archived/tflite2xcore/tflite2xcore/converter.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import os -import logging -import tempfile -import subprocess - -from pathlib import Path -from typing import Optional, Union, Any - -from tflite2xcore.pass_manager import PassManager -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore import transformation_passes as passes - - -XFORMER2_PATH = ( - Path(__file__).resolve().parents[3] - / "experimental" - / "xformer" - / "bazel-bin" - / "xcore-opt" -) - - -class CleanupManager(PassManager): - def __init__(self, model: Optional[XCOREModel] = None, **kwargs: Any) -> None: - super().__init__(model, **kwargs) - self.register_pass(passes.EliminateDeadOperatorsPass()) - self.register_pass(passes.EliminateDeadTensorsPass()) - self.register_pass(passes.EliminateDeadBuffersPass()) - - -class BasicCanonicalizationManager(PassManager): - def __init__( - self, - model: Optional[XCOREModel] = None, - *, - remove_float_interface: bool, - **kwargs: Any, - ) -> None: - super().__init__(model, **kwargs) - self.register_pass(passes.CanonicalizeEmptyBuffersPass()) - - if remove_float_interface: - self.register_pass(passes.CanonicalizeQuantizedInputPass()) - self.register_pass(passes.CanonicalizeQuantizedOutputPass()) - - # start with a round of constant folding - self.register_pass(passes.ConstantPropagationPass()) - - # canonicalize single pixel convolution - # 1x1 convolutions acting on 1x1 inputs (without padding) map trivially - # to a fully connected, so we canoncalize these to a builtin FULLY_CONNECTED - self.register_pass(passes.CanonicalizeSinglePixelConv2DPass()) - - # canonicalize reshape - # this ensures that RESHAPE has a single input tensor - # (no dynamic reshapes are currently supported) - self.register_pass(passes.CanonicalizeReshapePass()) - self.register_passes(CleanupManager()) # this is needed - - # canonicalize fully connected shapes - # the FC implementation flattens implicitly, so we remove RESHAPES before - # and after FULLY_CONNECTED ops - self.register_pass(passes.RemovePrecedingReshapePass()) - self.register_pass(passes.RemoveSubsequentReshapePass()) - - # canonicalize single channel depthwise convolutions - # depthwise convolutions with one input channel map trivially to ordinary - # convolutions with `depth_multiplier` output channels - self.register_pass(passes.CanonicalizeSingleinDepthwiseConv2DPass()) - self.register_pass(passes.LegalizeSingleinConv2DPass()) - - # canonicalize quantize ops - # two consecutive quantize ops have no effect besides adding error - self.register_pass(passes.RemoveRedundantInt8RequantizationPass()) - # the TFLM interpreter does not support newer versions of quantized - # so we downgrade where safe - self.register_pass(passes.LegalizeQuantizeVersionPass()) - - # need to cleanup after the intial canonicalization - self.register_passes(CleanupManager()) - - -class WordAlignmentCanonicalizationManager(PassManager): - def __init__(self, model: Optional[XCOREModel] = None, **kwargs: Any) -> None: - super().__init__(model, **kwargs) - - # canonicalize word alignment of inputs - # we insert explicit channel-wise padding to ensure that - # input channel counts to convolutions are divisible by 4 - # (this is currently required by our kernels) - self.register_pass(passes.CanonicalizeConv2DInputChannels()) - - -class ActivationLoweringManager(PassManager): - def __init__(self, model: Optional[XCOREModel] = None, experimental_xformer2: bool = False, **kwargs: Any) -> None: - super().__init__(model, **kwargs) - - # first we match ops and replace them - if not experimental_xformer2: - self.register_pass(passes.ReplaceReLUPass()) - self.register_pass(passes.ReplaceReLU6Pass()) - self.register_pass(passes.ReplaceTanhPass()) - self.register_pass(passes.ReplaceLogisticPass()) - # second we legalize the op by calculating the LUT - self.register_pass(passes.LegalizeXCLookupTablePass()) - - -class PoolingLoweringManager(PassManager): - def __init__(self, model: Optional[XCOREModel] = None, **kwargs: Any) -> None: - super().__init__(model, **kwargs) - - self.register_pass(passes.ReplaceMaxPool2D2x2Pass()) - self.register_pass(passes.ReplaceMaxPool2DPass()) - self.register_pass(passes.ReplaceAveragePool2D2x2Pass()) - self.register_pass(passes.ReplaceAveragePool2DPass()) - self.register_pass(passes.ReplaceGlobalAveragePool2DPass()) - - -class ParametricOperatorLoweringManager(PassManager): - def __init__( - self, - model: Optional[XCOREModel] = None, - *, - experimental_xformer2: bool = False, - **kwargs: Any, - ) -> None: - super().__init__(model, **kwargs) - - # first we match ops and replace them - if not experimental_xformer2: - self.register_pass(passes.ReplaceFullyConnectedPass()) - self.register_pass(passes.Replace1x1Conv2dPass()) - self.register_pass(passes.ReplaceShallowinConv2dPass()) - self.register_pass(passes.ReplaceDepthwiseConv2dPass()) - self.register_pass(passes.ReplaceDeepConv2dPass()) - - # second we legalize them by reshaping weight/bias tensors, - # calculating parameters specific to our kernels, - # and populating the custom options - if not experimental_xformer2: - self.register_pass(passes.LegalizeXCFullyConnectedPass()) - self.register_pass(passes.LegalizeXC1x1ConvPass()) - self.register_pass(passes.LegalizeXCShallowinConvPass()) - self.register_pass(passes.LegalizeXCDepthwiseConvPass()) - self.register_pass(passes.LegalizeXCDeepConvPass()) - - -class PaddingOptimizationManager(PassManager): - def __init__( - self, - model: Optional[XCOREModel] = None, - *, - remove_input_alignment_pad: bool, - experimental_xformer2: bool = False, - **kwargs: Any, - ) -> None: - super().__init__(model, **kwargs) - - # canonicalize by ensuring that spatial and other dims are decoupled - # first fuse consecutive PAD ops - # (injected by word alignment, bconv2d padding legalization, etc.) - self.register_pass(passes.FuseConsecutivePadsPass()) - # second split batch/channel-wise padding from spatial padding - self.register_pass(passes.SplitPaddingPass()) - - # we optimize the convolutions by fusing it with spatial padding - self.register_pass(passes.FuseConv2dPaddingPass()) - if remove_input_alignment_pad: - # remove word alignment padding on the input - self.register_pass(passes.RemovePaddingInputPass()) - # replace with optimized implementation where possible - self.register_pass(passes.ReplacePadPass()) - - # Fuse back any remaining PAD operators - self.register_pass(passes.FuseConsecutivePadsPass()) - - -class ParallelizationManager(PassManager): - def __init__( - self, model: Optional[XCOREModel] = None, *, num_threads: int, **kwargs: Any - ) -> None: - super().__init__(model, **kwargs) - - self.register_pass( - passes.ParallelizeFullyConnectedPass(num_threads=num_threads) - ) - self.register_pass(passes.ParallelizeConv2dPass(num_threads=num_threads)) - self.register_pass( - passes.ParallelizeDepthwiseConv2dPass(num_threads=num_threads) - ) - self.register_pass(passes.ParallelizePooling2DPass(num_threads=num_threads)) - self.register_pass( - passes.ParallelizeGlobalAveragePool2DPass(num_threads=num_threads) - ) - - self.register_pass(passes.ParallelizeBConv2dBinPass(num_threads=num_threads)) - self.register_pass(passes.ParallelizeBConv2dInt8Pass(num_threads=num_threads)) - self.register_pass(passes.ParallelizeAddPass(num_threads=num_threads)) - self.register_pass(passes.ParallelizeLUTPass(num_threads=num_threads)) - # pass_mgr.register_pass(passes.ParallelizeRequant16To8Pass(num_threads=num_threads)) # intentionally disabled - - # NOTE: scratch memory passes must be registered after parallelization passes - # TODO: it would be better if scratch memory calculation could be decoupled from parallelization - self.register_pass(passes.ScratchMemoryConv2dPass()) - self.register_pass(passes.ScratchMemoryConv2d1x1Pass()) - self.register_pass(passes.ScratchMemoryDepthwiseConv2dPass()) - self.register_pass(passes.ScratchMemoryFullyConnectedPass()) - - -class BinarizedOperatorLoweringManager(PassManager): - def __init__(self, model: Optional[XCOREModel] = None, **kwargs: Any) -> None: - super().__init__(model, **kwargs) - - # map LceQuantize to our bsign op - self.register_pass(passes.ReplaceLceQuantizePass()) - - # match bconv2d ops and replace them - self.register_pass(passes.ReplaceBconv2DBitpackedDeepInPass()) - self.register_pass(passes.ReplaceBconv2DBitpackedPass()) - self.register_pass(passes.ReplaceBconv2DInt8DeepInDeepOutPass()) - self.register_pass(passes.ReplaceBconv2DInt8Pass()) - - # we legalize the padding by injecting an explicit PAD where needed - self.register_pass(passes.LegalizeXCBconv2DPaddingPass()) - - # legalize the parameter tensors and custom options - self.register_pass(passes.LegalizeBconv2dBitpackedDeepInPass()) - self.register_pass(passes.LegalizeBconv2dBitpackedPass()) - self.register_pass(passes.LegalizeBconv2dInt8DeepInDeepOutPass()) - self.register_pass(passes.LegalizeBconv2dInt8Pass()) - - -class ExternalMemoryOptimizationManager(PassManager): - def __init__(self, model: Optional[XCOREModel] = None, **kwargs: Any) -> None: - super().__init__(model, **kwargs) - self.register_pass(passes.InsertExternalMemoryFetchPass()) - - -class FinalizationManager(PassManager): - def __init__( - self, - model: Optional[XCOREModel] = None, - *, - cleanup: bool, - minification: bool, - **kwargs: Any, - ) -> None: - super().__init__(model, **kwargs) - if cleanup: - self.register_passes(CleanupManager()) - - # TODO: this is actually a canonicalization pass - self.register_pass(passes.LegalizeOperatorOutputTensorNamePass()) - - self.register_pass(passes.FloatingPointWarningPass()) - - self.register_pass(passes.UnifyEmptyBuffersPass()) - self.register_pass(passes.EliminateDeadBuffersPass()) - - if minification: - self.register_pass(passes.MinifyQuantInfoPass()) - self.register_pass(passes.MinifyTensorNamesPass()) - - -def optimize_for_xcore( - model: XCOREModel, - *, - cleanup: bool = True, - minification: bool = False, - num_threads: Optional[int] = None, - intermediates_path: Optional[Union[str, Path]] = None, - remove_input_alignment_pad: bool = False, - remove_float_interface: bool = False, - external_memory: bool = False, - experimental_xformer2: bool = False, -) -> XCOREModel: - num_threads = num_threads or 1 - intermediates_path = Path(intermediates_path) if intermediates_path else None - - pass_mgr = PassManager(model, keep_intermediates=bool(intermediates_path)) - - # canonicalization - pass_mgr.register_passes( - BasicCanonicalizationManager(remove_float_interface=remove_float_interface) - ) - pass_mgr.register_passes(WordAlignmentCanonicalizationManager()) - - # lowering to the xcore ops - pass_mgr.register_passes(ActivationLoweringManager(experimental_xformer2=experimental_xformer2)) - pass_mgr.register_passes(PoolingLoweringManager()) - pass_mgr.register_passes(BinarizedOperatorLoweringManager()) - - if experimental_xformer2: - try: - pass_mgr.run_passes() - model.sanity_check() - finally: - if intermediates_path: - pass_mgr.save_intermediates(intermediates_path / "pre_xformer2") - intermediates_path /= "post_xformer2" - - with tempfile.TemporaryDirectory(suffix=str(os.getpid())) as dirname: - input_path = Path(dirname) / "input.tflite" - model.write_flatbuffer(input_path) - - output_path = Path(dirname) / "output.tflite" - cmd = [str(XFORMER2_PATH), str(input_path), "-o", str(output_path)] - p = subprocess.run(cmd, capture_output=True, check=True) - logging.debug(p.stdout) - - model = XCOREModel.read_flatbuffer(output_path) - - pass_mgr = PassManager(model, keep_intermediates=bool(intermediates_path)) - - pass_mgr.register_passes( - ParametricOperatorLoweringManager(experimental_xformer2=experimental_xformer2) - ) - - # TODO: finish these and find a manager for them: - pass_mgr.register_pass(passes.ReplaceAddPass()) - - # optimizations on xcore ops - pass_mgr.register_passes( - PaddingOptimizationManager( - remove_input_alignment_pad=remove_input_alignment_pad, experimental_xformer2=experimental_xformer2 - ) - ) - pass_mgr.register_passes(ParallelizationManager(num_threads=num_threads)) - if external_memory: - pass_mgr.register_passes(ExternalMemoryOptimizationManager()) - - # finalize (cleanup, minification, renaming, etc.) - pass_mgr.register_passes( - FinalizationManager(minification=minification, cleanup=cleanup) - ) - - try: - pass_mgr.run_passes() - model.sanity_check() - finally: - if intermediates_path: - pass_mgr.save_intermediates(intermediates_path) - - model.description = model.description + " + XMOS optimized." - - return model - - -def convert( - tflite_input_path: Union[str, Path], - tflite_output_path: Union[str, Path], - **kwargs: Any, -) -> None: - model = XCOREModel.read_flatbuffer(tflite_input_path) - model = optimize_for_xcore(model, **kwargs) - model.write_flatbuffer(tflite_output_path) diff --git a/archived/tflite2xcore/tflite2xcore/execution_planning.py b/archived/tflite2xcore/tflite2xcore/execution_planning.py deleted file mode 100644 index 1cc60d080..000000000 --- a/archived/tflite2xcore/tflite2xcore/execution_planning.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from abc import ABC, abstractmethod -from typing import Sequence, Dict - -from tflite2xcore import xcore_schema as xir - - -class ExecutionPlanner(ABC): - def __init__(self, subgraph: xir.Subgraph): - self._graph = subgraph - - @abstractmethod - def make_plan(self) -> Sequence[xir.Operator]: - raise NotImplementedError() - - -class ReverseDepthFirstPlanner(ExecutionPlanner): - def make_plan(self) -> Sequence[xir.Operator]: - # rely on dict's insertion order guarantee (CPython 3.6+) - reverse_op_order: Dict[xir.Operator, None] = {} - - # initialize the op stack with a sentinel that we'll remove later - sentinel_op = self._graph.create_operator( - xir.OperatorCode(xir.XCOREOpCodes.DUMMY), - inputs=self._graph.outputs, - ) - sentinel_op.name = "SENTINEL" - op_stack = [sentinel_op] - - # dependency counts to be used to resolve ops that have multiple consumers - dependency_counts: Dict[xir.Operator, int] = {sentinel_op: 1} - - while op_stack: - op = op_stack.pop(-1) - if op in reverse_op_order: - # op already scheduled - continue - - if op not in dependency_counts: - # this is the first time we see this op, so count the dependencies - dependency_counts[op] = len( - {c for t in op.outputs for c in t.consumers} - ) - - if dependency_counts[op] <= 0: - raise Exception( - "Found operator with 0 or fewer dependencies (the graph may be corrupted)" - ) - - dependency_counts[op] -= 1 - if dependency_counts[op]: - # skip scheduling of op if there are other dependents - continue - - reverse_op_order[op] = None - for tin in sorted(op.inputs, key=lambda t: t.size): - op_stack.extend(tin.producers) - - # remove sentinel op - self._graph.remove_operator(sentinel_op) - del reverse_op_order[sentinel_op] - - # return ops in reverse order - return list(reversed(list(reverse_op_order.keys()))) diff --git a/archived/tflite2xcore/tflite2xcore/model_generation/__init__.py b/archived/tflite2xcore/tflite2xcore/model_generation/__init__.py deleted file mode 100644 index 8368df080..000000000 --- a/archived/tflite2xcore/tflite2xcore/model_generation/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import tensorflow as tf -from typing import Union, Dict, Any, TypeVar -from typing_extensions import Protocol - -from tflite2xcore.utils import TFLiteModel - -Configuration = Dict[str, Any] - -T_co = TypeVar("T_co", covariant=True) - - -class Hook(Protocol[T_co]): - def __call__(self) -> T_co: - ... - - -from .model_generators import ModelGenerator diff --git a/archived/tflite2xcore/tflite2xcore/model_generation/converters.py b/archived/tflite2xcore/tflite2xcore/model_generation/converters.py deleted file mode 100644 index 53d7ec851..000000000 --- a/archived/tflite2xcore/tflite2xcore/model_generation/converters.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import os -import logging -import pathlib -import tempfile -import subprocess - -import tensorflow as tf -import larq_compute_engine as lce -from abc import abstractmethod -from typing import Union - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.converter import optimize_for_xcore, XFORMER2_PATH -from tflite2xcore.utils import quantize_converter - -from . import TFLiteModel, Configuration, Hook - -from .runners import Runner, RunnerDependent - -class Converter(RunnerDependent): - """Superclass for defining model conversion logic and storing converted models. - - Converter objects are registered in Runner objects. - """ - - _model: TFLiteModel - _model_params: str - - def __init__( - self, - runner: Runner, - input_model_hook: Hook[Union[TFLiteModel, tf.keras.Model]], - ) -> None: - self._runner = runner - self._input_model_hook = input_model_hook - - def get_converted_model(self) -> TFLiteModel: - try: - return self._model - except AttributeError: - raise Exception( - "Cannot get converted model before converter is run!" - ) from None - - def get_converted_model_params(self) -> str: - return self._model_params - - @abstractmethod - def convert(self) -> None: - """Sets self._model as defined in subclasses. - - This method should be called after the set_config method has prepared - the converter. - """ - raise NotImplementedError() - - -class KerasModelConverter(Converter): - """ Converts a Keras model to a TFLite model. """ - - _input_model_hook: Hook[tf.keras.Model] - - def __init__(self, runner: Runner, input_model_hook: Hook[tf.keras.Model]) -> None: - super().__init__(runner, input_model_hook) - - -class TFLiteFloatConverter(KerasModelConverter): - """ Converts a Keras model to a floating point TFLite model. """ - - def convert(self) -> None: - self._model = tf.lite.TFLiteConverter.from_keras_model( - self._input_model_hook() - ).convert() - - -class TFLiteQuantConverter(KerasModelConverter): - """ Converts a Keras model to a quantized TFLite model. """ - - def __init__( - self, - runner: Runner, - input_model_hook: Hook[tf.keras.Model], - repr_data_hook: Hook[tf.Tensor], - ) -> None: - super().__init__(runner, input_model_hook) - self._repr_data_hook = repr_data_hook - - def convert(self) -> None: - converter = tf.lite.TFLiteConverter.from_keras_model(self._input_model_hook()) - quantize_converter(converter, representative_data=self._repr_data_hook()) - self._model = converter.convert() - - -class XCoreConverter(Converter): - """ Converts a (quantized) TFLite model to an xcore.ai-optimized TFLite model. """ - - def __init__( - self, - runner: Runner, - input_model_hook: Hook[TFLiteModel], - *, - experimental_xformer2: bool = False, - only_experimental_xformer2: bool = False - ) -> None: - super().__init__(runner, input_model_hook) - self._experimental_xformer2 = experimental_xformer2 - self._only_experimental_xformer2 = only_experimental_xformer2 - - def _set_config(self, cfg: Configuration) -> None: - if "num_threads" not in self._config: - self._config["num_threads"] = cfg.pop("num_threads", 1) - - def convert(self) -> None: - model = self._input_model_hook() - if self._only_experimental_xformer2: - with tempfile.TemporaryDirectory(suffix=str(os.getpid())) as dirname: - input_path = pathlib.Path(dirname) / "input.tflite" - flash_image_file_path = pathlib.Path(dirname) / "model.params" - - with open(pathlib.Path(input_path).resolve(), "wb") as fd: - fd.write(model) - - # get model dump path - model_dump_path = os.getenv('MODEL_DUMP_PATH') - if not model_dump_path: - logger = logging.getLogger() - logger.error("Model dump path not provided!") - model_dump_path= pathlib.Path(model_dump_path) - if(not model_dump_path.exists() or not model_dump_path.is_dir() or not model_dump_path.is_absolute()): - logger = logging.getLogger() - logger.error("Invalid model dump path - should be an absolute path to a directory!") - - # extract current test name and count from pytest env - test = os.getenv('PYTEST_CURRENT_TEST') - test = test.split(':')[0] - test = test.rsplit('/', 1)[1] - test = test.split('.')[0] - - # create dir for test - model_dump_path = model_dump_path.joinpath(test) - model_dump_path.mkdir(exist_ok=True) - - test_count = os.getenv('PYTEST_CURRENT_TEST') - test_count = test_count.rsplit('[', 1)[1] - test_count = test_count.split(']', maxsplit=1)[0] - test_file_name = test + "_" + str(test_count) + ".tflite" - dump_path = model_dump_path.joinpath(test_file_name) - import shutil - shutil.copyfile(input_path, dump_path) - - output_path = pathlib.Path(dirname) / "output.tflite" - cmd = [str(XFORMER2_PATH), str(input_path), "-o", str(output_path),"--xcore-flash-image-file", - str(flash_image_file_path), "--xcore-thread-count=5"] - p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) - logger = logging.getLogger() - logger.debug(p.stdout) - - with open(pathlib.Path(output_path).resolve(), "rb") as fd: - bits = bytes(fd.read()) - with open(pathlib.Path(flash_image_file_path).resolve(), "rb") as fd: - params = bytes(fd.read()) - - self._model = bits - self._model_params = params - - else: - model = XCOREModel.deserialize(model) - model = optimize_for_xcore( - model, - num_threads=self._config["num_threads"], - experimental_xformer2=self._experimental_xformer2, - ) - self._model = model.serialize() - self._model_params = None - - -class LarqConverter(KerasModelConverter): - """ Converts a Larq model to a TFLite model. """ - - def convert(self) -> None: - self._model = lce.convert_keras_model( - self._input_model_hook(), - inference_input_type=tf.int8, - inference_output_type=tf.int8, - target="xcore", - experimental_enable_bitpacked_activations=True, - ) \ No newline at end of file diff --git a/archived/tflite2xcore/tflite2xcore/model_generation/data_factories.py b/archived/tflite2xcore/tflite2xcore/model_generation/data_factories.py deleted file mode 100644 index 6060381a0..000000000 --- a/archived/tflite2xcore/tflite2xcore/model_generation/data_factories.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import tensorflow as tf -from abc import abstractmethod -from typing import Tuple, Optional, Any - -from . import Configuration, Hook -from .utils import parse_init_config -from .runners import Runner, RunnerDependent - - -class DataFactory(RunnerDependent): - @abstractmethod - def make_data(self) -> Any: - raise NotImplementedError() - - -class TensorDataFactory(DataFactory): - def __init__( - self, - runner: Runner, - shape_hook: Hook[Tuple[int, ...]], - dtype: tf.dtypes.DType = tf.float32, - ): - super().__init__(runner) - self._shape_hook = shape_hook - self._dtype = dtype - - @abstractmethod - def make_data(self, batch: Optional[int] = None) -> tf.Tensor: - raise NotImplementedError() - - -class InitializerDataFactory(TensorDataFactory): - @property - @abstractmethod - def initializer(self) -> tf.keras.initializers.Initializer: - raise NotImplementedError() - - def make_data(self, batch: Optional[int] = None) -> tf.Tensor: - shape = self._shape_hook() - if batch is not None: - shape = (batch, *shape) - - return self.initializer(shape, dtype=self._dtype) - - -class InputInitializerDataFactory(InitializerDataFactory): - def _set_config(self, cfg: Configuration) -> None: - if "input_init" not in self._config: - self._config["input_init"] = cfg.pop("input_init", ("RandomUniform", -1, 1)) - super()._set_config(cfg) - - @property - def initializer(self) -> tf.keras.initializers.Initializer: - return parse_init_config(*self._config["input_init"]) diff --git a/archived/tflite2xcore/tflite2xcore/model_generation/evaluators.py b/archived/tflite2xcore/tflite2xcore/model_generation/evaluators.py deleted file mode 100644 index 72be5f7ac..000000000 --- a/archived/tflite2xcore/tflite2xcore/model_generation/evaluators.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import tensorflow as tf -import numpy as np -import larq_compute_engine as lce -from abc import abstractmethod -from typing import Union - -from xtflm_interpreter import XTFLMInterpreter - -from tflite2xcore.utils import ( - quantize, - QuantizationTuple, - apply_interpreter_to_examples, -) - -from . import TFLiteModel, Hook - -from .runners import Runner, RunnerDependent - - -class Evaluator(RunnerDependent): - """Superclass for defining model evaluation logic. - - Evaluator objects are registered in Runner objects. - Evaluation means that output data is generated for a given - input, but it does not mean that a model is compared to another one. - """ - - _input_data: np.ndarray - _output_data: np.ndarray - - def __init__( - self, - runner: Runner, - input_data_hook: Hook[Union[tf.Tensor, np.ndarray]], - model_hook: Hook[Union[tf.keras.Model, TFLiteModel]], - model_params_hook: Hook[str], - ) -> None: - self._runner = runner - self._input_data_hook = input_data_hook - self._model_hook = model_hook - self._model_params_hook = model_params_hook - - @property - def input_data(self) -> np.ndarray: - try: - return self._input_data - except AttributeError: - raise Exception("Cannot get input data before evaluator is run!") from None - - @input_data.setter - def input_data(self, data: Union[tf.Tensor, np.ndarray]) -> None: - self._input_data = np.array(data) - - @property - def output_data(self) -> np.ndarray: - try: - return self._output_data - except AttributeError: - raise Exception("Cannot get output data before evaluator is run!") from None - - @output_data.setter - def output_data(self, data: Union[tf.Tensor, np.ndarray]) -> None: - self._output_data = np.array(data) - - @abstractmethod - def evaluate(self) -> None: - """ Populates self._input_data and self._output_data. """ - raise NotImplementedError() - - -class TFLiteEvaluator(Evaluator): - """ Defines the evaluation logic for a TFLite float model. """ - - _interpreter: tf.lite.Interpreter - - def __init__( - self, - runner: Runner, - input_data_hook: Hook[Union[tf.Tensor, np.ndarray]], - model_hook: Hook[TFLiteModel], - model_params_hook: Hook[str], - ) -> None: - super().__init__(runner, input_data_hook, model_hook, model_params_hook) - - def set_interpreter(self) -> None: - self._interpreter = tf.lite.Interpreter(model_content=self._model_hook()) - - def set_input_data(self) -> None: - self.input_data = self._input_data_hook() - - def evaluate(self) -> None: - self.set_interpreter() - self._interpreter.allocate_tensors() - self.set_input_data() - self.output_data = apply_interpreter_to_examples( - self._interpreter, self.input_data - ) - del self._interpreter - - -class TFLiteQuantEvaluator(TFLiteEvaluator): - """Defines the evaluation logic for a quantized TFLite model. - - The input and output quantization parameters are inferred from the model. - """ - - _input_type: np.dtype - _input_quant: QuantizationTuple - _output_quant: QuantizationTuple - - def __init__( - self, - runner: Runner, - input_data_hook: Hook[Union[tf.Tensor, np.ndarray]], - model_hook: Hook[TFLiteModel], - model_params_hook: Hook[str], - ) -> None: - super().__init__(runner, input_data_hook, model_hook, model_params_hook) - - @property - def input_quant(self) -> QuantizationTuple: - try: - return self._input_quant - except AttributeError: - raise Exception( - "Cannot get input quantization before evaluator is run!" - ) from None - - @property - def output_quant(self) -> QuantizationTuple: - try: - return self._output_quant - except AttributeError: - raise Exception( - "Cannot get output quantization before evaluator is run!" - ) from None - - def set_input_data(self) -> None: - input_details = self._interpreter.get_input_details()[0] - self._input_quant = QuantizationTuple(*input_details["quantization"]) - self._input_type = np.dtype(input_details["dtype"]) - self._output_quant = QuantizationTuple( - *self._interpreter.get_output_details()[0]["quantization"] - ) - - super().set_input_data() - if ( - self._input_type in (np.int8, np.int16) - and self.input_data.dtype == np.float32 - ): - self.input_data = quantize( - self.input_data, *self._input_quant, dtype=self._input_type - ) - - -class XCoreEvaluator(TFLiteQuantEvaluator): - """Defines the evaluation logic for a TFLite float model. - - The input and output quantization parameters are inferred from the model. - """ - - def __init__( - self, - runner: Runner, - input_data_hook: Hook[Union[tf.Tensor, np.ndarray]], - model_hook: Hook[TFLiteModel], - model_params_hook: Hook[str], - use_device: bool = False, - ) -> None: - super().__init__(runner, input_data_hook, model_hook, model_params_hook) - self._use_device = use_device - - def evaluate(self) -> None: - if self._use_device: - print('Warning: use device deprecated') - - if self._model_params_hook(): - self._interpreter = XTFLMInterpreter(model_content=self._model_hook(), params_content=self._model_params_hook()) - else: - self._interpreter = XTFLMInterpreter(model_content=self._model_hook()) - - with self._interpreter: - self.set_input_data() - self.output_data = apply_interpreter_to_examples( - self._interpreter, self.input_data - ) - - del self._interpreter - - -class LarqEvaluator(Evaluator): - def evaluate(self) -> None: - interpreter = lce.tflite.python.interpreter.Interpreter(self._model_hook()) - self.input_data = self._input_data_hook() - self.output_data = interpreter.predict(self.input_data) diff --git a/archived/tflite2xcore/tflite2xcore/model_generation/model_generators.py b/archived/tflite2xcore/tflite2xcore/model_generation/model_generators.py deleted file mode 100644 index 77486ce2b..000000000 --- a/archived/tflite2xcore/tflite2xcore/model_generation/model_generators.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import logging -import tensorflow as tf -from pathlib import Path -from abc import abstractmethod -from typing import Tuple, Iterator -from contextlib import contextmanager - -from tflite2xcore.utils import set_all_seeds, LoggingContext - -from .runners import Runner, RunnerDependent - - -class ModelGenerator(RunnerDependent): - """Superclass for defining parameterized automatic model generation. - - The main use case of this class is generation of the integration test - models. - """ - - _model: tf.keras.Model - - def __init__(self, runner: Runner) -> None: - self._runner = runner - - @abstractmethod - def build(self) -> None: - """Sets the _model field as needed by the subclass. - - The configuration should be set using the _set_config method before - calling this. - """ - raise NotImplementedError() - - def get_model(self) -> tf.keras.Model: - try: - return self._model - except AttributeError: - raise Exception( - "Cannot get generated model before build is called!" - ) from None - - def _prep_backend(self) -> None: - tf.keras.backend.clear_session() - set_all_seeds() - - @property - def input_shape(self) -> Tuple[int, ...]: - return self._model.input_shape[1:] # type:ignore # pylint: disable=no-member - - @property - def output_shape(self) -> Tuple[int, ...]: - return self._model.output_shape[1:] # type:ignore # pylint: disable=no-member - - @contextmanager - def save_model(self, dirpath: Path) -> Iterator[None]: - """Saves the underlying model contents and make the object temporarily pickleable. - - A model subdirectory is created. - """ - self._model.save(dirpath / "model") - tmp = self._model - del self._model - yield - self._model = tmp - - def load_model(self, dirpath: Path) -> None: - # tf may complain about missing training config, so silence it - with LoggingContext(tf.get_logger(), logging.ERROR): - self._model = tf.keras.models.load_model(dirpath / "model", compile=False) diff --git a/archived/tflite2xcore/tflite2xcore/model_generation/runners.py b/archived/tflite2xcore/tflite2xcore/model_generation/runners.py deleted file mode 100644 index a61e06825..000000000 --- a/archived/tflite2xcore/tflite2xcore/model_generation/runners.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import logging -import dill -import numpy as np -import tensorflow as tf -from pathlib import Path -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Dict, Any, List, Union, Type, Iterable - -from tflite2xcore import tflite_visualize - -from . import Configuration, TFLiteModel - -if TYPE_CHECKING: - from .evaluators import Evaluator - from .converters import Converter - from .data_factories import DataFactory - from .model_generators import ModelGenerator - -ConvertedModels = Dict[str, TFLiteModel] - - -class Runner(ABC): - """Superclass for defining the behavior of model generation runs. - - A Runner registers a ModelGenerator object along with all the - converters, evaluators, and data factories. - """ - - converted_models: ConvertedModels - _converters: List["Converter"] - _evaluators: List["Evaluator"] - _data_factories: List["DataFactory"] - _config: Configuration - - def __init__(self, generator: Type["ModelGenerator"]) -> None: - self._model_generator = generator(self) - self._converters = [] - self._evaluators = [] - self._data_factories = [] - - def register_converter(self, converter: "Converter") -> None: - self._converters.append(converter) - - def register_evaluator(self, evaluator: "Evaluator") -> None: - self._evaluators.append(evaluator) - - def register_data_factory(self, data_factory: "DataFactory") -> None: - self._data_factories.append(data_factory) - - @abstractmethod - def run(self) -> None: - """ Defines how the runner should be run once configured. """ - self._model_generator.build() - self.converted_models = {} - - def get_built_model(self) -> tf.keras.Model: - return self._model_generator.get_model() - - def check_config(self) -> None: - """ Checks if the current configuration parameters are legal. """ - # TODO: extend to converters and evaluators - self._model_generator.check_config() - - def _set_config(self, cfg: Configuration) -> None: - """Sets the relevant configuration parameters. - - This method operates on the config input argument in-place. - Subclasses should override this instead of the set_config method. - """ - self._model_generator._set_config(cfg) - for converter in self._converters: - converter._set_config(cfg) - for evaluator in self._evaluators: - evaluator._set_config(cfg) - for data_factory in self._data_factories: - data_factory._set_config(cfg) - - def set_config(self, **config: Any) -> None: - """Configures the runner before it is called. - - Default values for missing configuration parameters are set. - Subclasses should override set_config instead of this method. - """ - self._config = {} - self._set_config(config) - if config: - raise ValueError( - f"Unexpected configuration parameter(s): {', '.join(config.keys())}" - ) - self.check_config() - - def save(self, dirpath: Union[Path, str]) -> Path: - """Saves the Runner contents to the specified directory. - - If the directory doesn't exist, it is created. - """ - dirpath = Path(dirpath) - dirpath.mkdir(parents=True, exist_ok=True) - - with self._model_generator.save_model(dirpath): - with open(dirpath / "runner.dill", "wb") as f: - dill.dump(self, f) - - return dirpath - - @classmethod - def load(cls, dirpath: Union[Path, str]) -> "Runner": - dirpath = Path(dirpath) - with open(dirpath / "runner.dill", "rb") as f: - obj: "Runner" = dill.load(f) - assert isinstance(obj, cls) - - obj._model_generator.load_model(dirpath) - return obj - - def dump_models(self, dirpath: Path, *, visualize: bool = True) -> None: - for name, model in self.converted_models.items(): - name = "model_" + name - model_path = (dirpath / name).with_suffix(".tflite") - model_html = model_path.with_suffix(".html") - with open(model_path, "wb") as f: - f.write(model) - logging.debug(f"{name} dumped to {model_path}") - if visualize: - tflite_visualize.main(model_path, model_html) - logging.debug(f"{name} visualization dumped to {model_html}") - - all_models_name = ",".join(self.converted_models.keys()) - all_models_path = (dirpath / f"model_{{{all_models_name}}}").with_suffix( - ".tflite" - ) - logging.info(f"Models dumped to {all_models_path}") - - @staticmethod - def dump_data( - dirpath: Path, - *, - data: Dict[str, Union[tf.Tensor, np.ndarray]], - example_idx: Union[int, Iterable[int]] = [], - ) -> None: - example_idx = [example_idx] if isinstance(example_idx, int) else example_idx - for key, arr in data.items(): - for j in example_idx: - np.save(dirpath / f"example_{j}.{key}", arr[j]) - # with open(dirpath / f"example_{j}.{key}", "wb") as f: - # f.write(np.array(arr[j]).tobytes()) - - -class RunnerDependent(ABC): - def __init__(self, runner: "Runner") -> None: - self._runner = runner - - @property - def _config(self) -> Configuration: - return self._runner._config - - def check_config(self) -> None: - pass - - def _set_config(self, cfg: Configuration) -> None: - """Sets the relevant configuration parameters and returns the unused ones. - - This method operates on the cfg input argument in-place. - """ - pass diff --git a/archived/tflite2xcore/tflite2xcore/model_generation/utils.py b/archived/tflite2xcore/tflite2xcore/model_generation/utils.py deleted file mode 100644 index d53d39426..000000000 --- a/archived/tflite2xcore/tflite2xcore/model_generation/utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import os -import tensorflow as tf -from collections.abc import Iterable -from typing import Union, Any, Tuple - -from . import Configuration - - -class RandomUniform(tf.keras.initializers.RandomUniform): # type: ignore - def __call__( - self, shape: Tuple[int, ...], dtype: tf.dtypes.DType = None - ) -> tf.Tensor: - try: - return super().__call__(shape, dtype) - except Exception as e: - if e.args and e.args[0].startswith("Invalid dtype "): - dtype = tf.dtypes.as_dtype(dtype) - if dtype in (tf.int8, tf.int16): - if self.minval < dtype.min: - raise ValueError( - f"initializer minval = {self.minval} < {dtype.min} = dtype.min" - ) from None - elif self.maxval > dtype.max: - raise ValueError( - f"initializer maxval = {self.maxval} < {dtype.max} = dtype.max" - ) from None - else: - return tf.cast( - self._random_generator.random_uniform( - shape, self.minval, self.maxval, tf.int32 - ), - dtype, - ) - raise - - -def parse_init_config( - name: str, *args: Union[int, float] -) -> tf.keras.initializers.Initializer: - if name == "RandomUniform": - init = RandomUniform - else: - init = getattr(tf.keras.initializers, name) - return init(*args) - - -def stringify_config(cfg: Configuration) -> str: - def stringify_value(v: Any) -> str: - if not isinstance(v, str) and isinstance(v, Iterable): - v = "(" + ",".join(str(c) for c in v) + ")" - return str(v).replace(" ", "_") - - return os.sep.join(k + "=" + stringify_value(v) for k, v in sorted(cfg.items())) diff --git a/archived/tflite2xcore/tflite2xcore/parallelization.py b/archived/tflite2xcore/tflite2xcore/parallelization.py deleted file mode 100644 index 71febad62..000000000 --- a/archived/tflite2xcore/tflite2xcore/parallelization.py +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import math -import logging -from abc import ABC, abstractmethod -from functools import partial -from typing import ( - Dict, - Any, - Callable, - List, - Tuple, - Optional, - SupportsFloat, - NamedTuple, - Generic, - TypeVar, - Sequence, -) - -from tflite2xcore.utils import ACC_PERIOD_INT8, WORD_SIZE_BYTES - -MAX_THREADS = 5 -CHANNEL_GROUP_SIZE = ACC_PERIOD_INT8 - - -class ParallelizationPlan(ABC): - def __init__( - self, num_threads: int, *, fixed_cost_per_thread: SupportsFloat - ) -> None: - self._num_threads = num_threads - self._fixed_cost_per_thread = fixed_cost_per_thread - - def estimate_fixed_cost(self) -> float: - return self._num_threads * float(self._fixed_cost_per_thread) - - @abstractmethod - def estimate_cost(self) -> SupportsFloat: - raise NotImplementedError() - - def __repr__(self) -> str: - return f"{type(self).__name__} (num_threads={self._num_threads}, cost={self.estimate_cost()})" - - def to_dict(self) -> Dict[str, Any]: - return {"th": self._num_threads} - - -class ElementWiseParallelizationPlan(ParallelizationPlan): - def __init__( - self, - num_threads: int, - *, - job_sizes: Optional[Sequence[int]] = None, - **kwargs: Any, - ) -> None: - super().__init__(num_threads, **kwargs) - self._job_sizes = list(job_sizes or []) - - def estimate_cost(self) -> SupportsFloat: - return max(self._job_sizes) + self.estimate_fixed_cost() - - def to_dict(self) -> Dict[str, Any]: - d = super().to_dict() - if self._job_sizes: - d["eg"] = self._job_sizes - return d - - -class _ChannelGroup(NamedTuple): - begin: int - end: int - - -class ChannelGroupParallelizationPlan(ParallelizationPlan): - def __init__( - self, - num_threads: int, - *, - channel_groups: Optional[Sequence[_ChannelGroup]] = None, - **kwargs: Any, - ) -> None: - super().__init__(num_threads, **kwargs) - self._channel_groups = list(channel_groups or []) - - def _estimate_channel_group_cost(self, changrp: _ChannelGroup) -> int: - if changrp.begin - changrp.begin + 1 == CHANNEL_GROUP_SIZE: - return 1 - else: - return 2 # NOTE: 2 might be a bit aggressive - - def estimate_cost(self) -> SupportsFloat: - return ( - sum( - self._estimate_channel_group_cost(changrp) - for changrp in self._channel_groups - ) - / self._num_threads - ) - - def to_dict(self) -> Dict[str, Any]: - d = super().to_dict() - if self._channel_groups: - d["cg"] = [tuple(t) for t in self._channel_groups] - return d - - -class _RowColumnSlice(NamedTuple): - top: int - left: int - rows: int - cols: int - - -class RowColumnParallelizationPlan(ChannelGroupParallelizationPlan): - def __init__( - self, - num_threads: int, - *, - row_column_slices: Optional[Sequence[_RowColumnSlice]] = None, - **kwargs: Any, - ) -> None: - super().__init__(num_threads, **kwargs) - self._row_col_slices = row_column_slices or [] - - def to_dict(self) -> Dict[str, Any]: - d = super().to_dict() - if self._row_col_slices is not None: - d["rc"] = [tuple(t) for t in self._row_col_slices] - return d - - def _estimate_row_slice_cost(self, row_col_slice: _RowColumnSlice) -> int: - return row_col_slice.rows * row_col_slice.cols - - def estimate_cost(self) -> SupportsFloat: - cost = 0 - for changrp_slice in self._channel_groups: - changrp_cost = self._estimate_channel_group_cost(changrp_slice) - - # TODO: make this cost estimate more general - assert len(self._row_col_slices) <= self._num_threads - cost += changrp_cost * max( - self._estimate_row_slice_cost(row_slice) - for row_slice in self._row_col_slices - ) - - return cost + self.estimate_fixed_cost() - - -class ParallelizationPlanner(ABC): - def __init__( - self, - *, - num_threads: int, - forced: bool = False, - fixed_cost_per_thread: SupportsFloat = 0, - ) -> None: - assert 0 < num_threads <= MAX_THREADS - self.logger = logging.getLogger(self.__class__.__name__) - self._num_threads = num_threads - self._forced = forced - self._fixed_cost_per_thread = fixed_cost_per_thread - - @abstractmethod - def create_n_thread_candidates(self, num_threads: int) -> None: - pass - - def create_candidate_plans(self) -> None: - for n in range(self._num_threads): - self.create_n_thread_candidates(n + 1) - - @abstractmethod - def find_optimal_plan(self) -> ParallelizationPlan: - raise NotImplementedError() - - -_P = TypeVar("_P", bound=ParallelizationPlan) - - -class NaiveParallelizationPlanner(ParallelizationPlanner, Generic[_P]): - def __init__(self, **kwargs: Any) -> None: - super().__init__(**kwargs) - self._candidate_plans: List[_P] = [] - - def add_candidate_plan(self, plan: _P) -> None: - self._candidate_plans.append(plan) - - def find_optimal_plan(self) -> _P: - if not self._candidate_plans: - self.create_candidate_plans() - - best_plan = min(self._candidate_plans, key=lambda plan: plan.estimate_cost()) - - if best_plan._num_threads == self._num_threads: - self.logger.debug(f"found best plan: {repr(best_plan)}") - return best_plan - else: - forced_candidates = [ - plan - for plan in self._candidate_plans - if plan._num_threads == self._num_threads - ] - best_forced_plan = None - if forced_candidates: - best_forced_plan = min( - forced_candidates, key=lambda plan: plan.estimate_cost() - ) - - if self._forced: - if best_forced_plan: - self.logger.warning( - f"forcing suboptimal plan {repr(best_forced_plan)} " - f"when better alternative {repr(best_plan)} exists." - ) - return best_forced_plan - - self.logger.warning( - f"no forced plan could be found, resolving to {repr(best_plan)}" - ) - else: - self.logger.debug( - f"replacing suboptimal plan {repr(best_forced_plan)} " - f"with better alternative {repr(best_plan)}." - ) - return best_plan - - -class ElementWisePlanner(NaiveParallelizationPlanner[ElementWiseParallelizationPlan]): - def __init__( - self, num_elements: int, *, alignment: int = WORD_SIZE_BYTES, **kwargs: Any - ) -> None: - super().__init__(**kwargs) - assert num_elements > 0 - self._num_elements = num_elements - self._alignment = alignment - - def create_n_thread_candidates(self, num_threads: int) -> None: - r = self._num_elements % self._alignment - full_vectors = (self._num_elements - r) // self._alignment - p = full_vectors % num_threads - k = (full_vectors - p) // num_threads - - job_sizes = [ - k * self._alignment + (idx < p) * self._alignment - for idx in range(num_threads) - ] - job_sizes[-1] += r - assert sum(job_sizes) == self._num_elements - - if 0 not in job_sizes: - self.add_candidate_plan( - ElementWiseParallelizationPlan( - num_threads, - job_sizes=job_sizes, - fixed_cost_per_thread=self._fixed_cost_per_thread, - ) - ) - - -class ChannelGroupSlicePlanner( - NaiveParallelizationPlanner[ChannelGroupParallelizationPlan] -): - def __init__(self, num_channels_out: int, **kwargs: Any) -> None: - super().__init__(**kwargs) - self._cout = num_channels_out - - def split_channelwise(self) -> List[_ChannelGroup]: - changrps = [] - num_changrps = math.ceil(self._cout / CHANNEL_GROUP_SIZE) - for i in range(num_changrps): - Cbegin = i * CHANNEL_GROUP_SIZE - Cend = min(Cbegin + CHANNEL_GROUP_SIZE - 1, self._cout - 1) - changrps.append(_ChannelGroup(Cbegin, Cend)) - - return changrps - - def create_n_thread_candidates(self, num_threads: int) -> None: - changrps = self.split_channelwise() - if len(changrps) >= num_threads: - self.add_candidate_plan( - ChannelGroupParallelizationPlan( - num_threads, - channel_groups=changrps, - fixed_cost_per_thread=self._fixed_cost_per_thread, - ) - ) - - -class SlicePlanner(NaiveParallelizationPlanner[RowColumnParallelizationPlan]): - def __init__( - self, num_channels_out: int, height: int, width: int, **kwargs: Any - ) -> None: - super().__init__(**kwargs) - assert height * width > 0, f"received height={height}, width={width}" - self._height, self._width = height, width - kwargs.pop("num_threads") - self._ch_group_planner = partial( - ChannelGroupSlicePlanner, num_channels_out, **kwargs - ) - - def _split_unidirectionally( - self, dim: int, num_threads: int - ) -> Tuple[List[int], List[int]]: - adjustments: Dict[int, Callable[[int], List[int]]] = { - 1: lambda rem: [0], - 2: lambda rem: [int(rem >= 1), 0], - 3: lambda rem: [int(rem >= 1), 0, int(rem >= 2)], - 4: lambda rem: [int(rem >= 1), int(rem == 3), 0, int(rem >= 2)], - 5: lambda rem: [ - int(rem >= 1), - int(rem >= 3), - 0, - int(rem >= 4), - int(rem >= 2), - ], - } - - base, rem = dim // num_threads, dim % num_threads - block_lengths = [base + a for a in adjustments[num_threads](rem)] - block_starts = [0] - for j in range(num_threads - 1): - block_starts.append(block_starts[j] + block_lengths[j]) - return block_starts, block_lengths - - def _split_vertically(self, num_threads: int) -> List[_RowColumnSlice]: - starts, heights = self._split_unidirectionally(self._height, num_threads) - return [ - _RowColumnSlice(starts[j], 0, heights[j], self._width) - for j in range(num_threads) - if heights[j] > 0 - ] - - def create_n_thread_candidates(self, num_threads: int) -> None: - self.add_candidate_plan( - RowColumnParallelizationPlan( - num_threads, - channel_groups=self._ch_group_planner( - num_threads=num_threads - ).split_channelwise(), - row_column_slices=self._split_vertically(num_threads), - fixed_cost_per_thread=self._fixed_cost_per_thread, - ) - ) diff --git a/archived/tflite2xcore/tflite2xcore/pass_manager.py b/archived/tflite2xcore/tflite2xcore/pass_manager.py deleted file mode 100644 index 1d234b5d3..000000000 --- a/archived/tflite2xcore/tflite2xcore/pass_manager.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import logging -from pathlib import Path -from collections import deque -from math import log10, ceil -from typing import TYPE_CHECKING, Iterable, Optional, List, Tuple, Deque - -from tflite2xcore import tflite_visualize - -if TYPE_CHECKING: - from tflite2xcore.xcore_model import XCOREModel - from tflite2xcore.transformation_passes import ModelTransformationPass - - -class PassManager: - def __init__( - self, - model: Optional["XCOREModel"] = None, - *, - debug: bool = False, - keep_intermediates: bool = False, - ) -> None: - self._queue: Deque["ModelTransformationPass"] = deque() - self.logger = logging.getLogger(self.__class__.__name__) - self._model: Optional["XCOREModel"] = None - if model: - self.register_model(model) - self._mutating_passes: List[Tuple[int, str]] = [] - self._intermediates: List[bytes] = [] - self.keep_intermediates = keep_intermediates - - def register_model(self, model: "XCOREModel") -> None: - assert model - self._model = model - - @property - def passes(self) -> Iterable["ModelTransformationPass"]: - for trf_pass in self._queue: - yield trf_pass - - def register_passes(self, other_mgr: "PassManager") -> None: - for trf_pass in other_mgr.passes: - self.register_pass(trf_pass) - - def register_pass(self, trf_pass: "ModelTransformationPass") -> None: - self._queue.append(trf_pass) - - def pop_pass(self) -> "ModelTransformationPass": - return self._queue.popleft() - - def save_intermediates(self, dirpath: Path, *, visualize: bool = True) -> None: - if len(self._intermediates) == 0: - self.logger.warning("No intermediate models were recorded!") - - dirpath.mkdir(parents=True, exist_ok=True) - - fill_width = ceil(log10(self._mutating_passes[-1][0])) - for (j, _), bits in zip(self._mutating_passes, self._intermediates): - basepath = dirpath.joinpath( - f"model_{self.__class__.__name__}_{j:0{fill_width}d}" - ).resolve() - filepath = basepath.with_suffix(".tflite") - with open(filepath, "wb") as f: - f.write(bits) - if visualize: - tflite_visualize.main(filepath, basepath.with_suffix(".html")) - self.logger.debug(f"Saved {filepath}") - - def run_passes(self) -> None: - if not self._model: - raise Exception("No model registered!") - - num_passes = len(self._queue) - self.logger.debug(f"Running {num_passes} passes...") - for n in range(num_passes): - trf_pass = self.pop_pass() - - self.logger.debug(f"Running pass #{n}/{num_passes}: {trf_pass}..") - - modified = trf_pass.run(self._model) - if __debug__: - try: - self._model.sanity_check() - except AssertionError as e: - self.logger.exception(e) - - if modified: - self._mutating_passes.append((n, trf_pass.__class__.__name__)) - if self.keep_intermediates: - # switch descriptions for the intermediate models - new_desc = str(self._mutating_passes) - self._model.description, old_desc = ( - new_desc, - self._model.description, - ) - self._intermediates.append(self._model.serialize()) - self._model.description = old_desc - - msg = "\n".join( - [f" #{p[0]}/{num_passes}: {p[1]}" for p in self._mutating_passes] - ) - if msg: - self.logger.info(f"The following passes mutated the model:\n{msg}") diff --git a/archived/tflite2xcore/tflite2xcore/py.typed b/archived/tflite2xcore/tflite2xcore/py.typed deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/conftest.py deleted file mode 100644 index ace842d51..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/conftest.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - - -# ---------------------------------------------------------------------------- -# HOOKS -# ---------------------------------------------------------------------------- - - -def pytest_addoption(parser): - parser.addoption("--smoke", action="store_true", help="smoke test") - parser.addoption("--extended", action="store_true", help="extended test") - - -# TODO: this is deprecated, find a better way -def pytest_cmdline_preparse(config, args): - if "--smoke" in args and "--extended" in args: - raise pytest.UsageError('Only one of "--smoke" and "--extended" can be used') - - -def pytest_generate_tests(metafunc): - try: - PARAMS = metafunc.module.PARAMS - if metafunc.config.getoption("smoke"): - params = PARAMS.get("smoke", PARAMS["default"]) - elif metafunc.config.getoption("extended"): - params = PARAMS.get("extended", PARAMS["default"]) - else: - params = PARAMS["default"] - except AttributeError: - params = {} - - for name, values in params.items(): - if name in metafunc.fixturenames: - metafunc.parametrize(name, values) diff --git a/archived/tflite2xcore/tflite2xcore/tests/pytest.ini b/archived/tflite2xcore/tflite2xcore/tests/pytest.ini deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/runtests.py b/archived/tflite2xcore/tflite2xcore/tests/runtests.py deleted file mode 100755 index 61618123d..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/runtests.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import os -import pytest -import argparse -import sys -import atexit - -import multiprocessing as mp - -from io import StringIO -from enum import Enum, auto -from timeit import default_timer as timer -from typing import Counter, List, Tuple, Optional, NamedTuple, Callable, Sequence - - -class CollectionMode(Enum): - FILES = auto() - TESTS = auto() - - -class CollectorPlugin: - def __init__(self, *, mode: CollectionMode = CollectionMode.FILES) -> None: - self.counter = Counter[str]() - self.mode = mode - - def tests(self) -> List[Tuple[str, int]]: - return self.counter.most_common() - - def pytest_collection_modifyitems(self, items: List[pytest.Item]) -> None: - if self.mode is CollectionMode.FILES: - self.counter = Counter(item.nodeid.split("::")[0] for item in items) - elif self.mode is CollectionMode.TESTS: - self.counter = Counter(item.nodeid.split("[")[0] for item in items) - else: - raise ValueError(f"Unsupported collection mode {self.mode}") - - -Job = List[str] - - -class JobCollector: - def __init__( - self, - path: str, - *, - coverage_options: Optional[Sequence[str]] = None, - verbose: bool = False, - junit: bool = False, - ) -> None: - if not (os.path.exists(path) and os.path.isdir(path)): - raise ValueError(f"Invalid directory path: {path}") - - self.plugin = CollectorPlugin() - self.verbose = verbose - self.jobs: List[Job] = [] - self.path = path - self.junit = junit - - coverage_options = list(coverage_options or []) - self.optional_args = ["-qq"] + coverage_options - self.collection_job = [self.path, "--collect-only"] + self.optional_args - - def collect(self) -> int: - print("Collecting test cases...") - start = timer() - sys.stdout = StringIO() - try: - exit_code = pytest.main(self.collection_job, plugins=[self.plugin]) - finally: - output = sys.stdout.getvalue() - sys.stdout = sys.__stdout__ - print(f"Collection finished in {timer() - start:.2f}s.") - - if self.verbose or exit_code: - print(output) - - if not exit_code: - self.jobs = [] - tests = self.plugin.tests() - for path, _ in tests: - full_path = os.path.join(self.path, path) - cmd = [full_path, "--tb=short"] + self.optional_args - if self.junit: - cmd += ["--junitxml", full_path + "_junit.xml"] - self.jobs.append(cmd) - - print(f"{sum(cnt for _, cnt in tests)} CASES IN {len(self.jobs)} JOBS:") - for job, (_, cnt) in zip(self.jobs, tests): - print(f"{cnt} CASES IN: {' '.join(job)}") - - return exit_code - - -class JobResult(NamedTuple): - job: Job - output: str - time: float - exit_code: int - - -class JobExecutor: - def __init__( - self, - job_fun: Callable[[Job], JobResult], - *, - workers: int = 1, - verbose: bool = False, - ) -> None: - cpu_count = mp.cpu_count() - if workers == -1 or workers > cpu_count: - workers = cpu_count - elif workers < -1 or workers == 0: - raise ValueError(f"Invalid number of workers: {workers}") - - self.workers = workers - self.verbose = verbose - self.pool = mp.Pool(self.workers) - atexit.register(self.pool.close) - self.job_fun = job_fun - - def execute(self, jobs: Sequence[Job]) -> Sequence[JobResult]: - print(f"Executing {len(jobs)} jobs on {self.workers} workers...") - - start = timer() - outputs = self.pool.map(self.job_fun, jobs) - total = timer() - start - - passed = failed = 0 - for job, output, t, exit_code in outputs: - job_str = f"TIME={t:.2f}s in {' '.join(job)}" - if exit_code: - failed += 1 - print("FAILED:", job_str) - if self.verbose: - print(output) - print() - else: - passed += 1 - print("PASSED:", job_str) - - print(f"TOTAL: PASSED={passed}, FAILED={failed}, TIME={total:.2f}s") - return outputs - - -def run_job(job: Job) -> JobResult: - sys.stdout = StringIO() - try: - start = timer() - exit_code = pytest.main(job) - t = timer() - start - finally: - output = sys.stdout.getvalue() - sys.stdout = sys.__stdout__ - return JobResult(job, output, t, exit_code) - - -def main(raw_args: Optional[Sequence[str]] = None) -> None: - parser = argparse.ArgumentParser() - parser.add_argument("dir", nargs="?", default=os.path.curdir) - parser.add_argument("--smoke", action="store_true", default=False) - parser.add_argument("--extended", action="store_true", default=False) - parser.add_argument("--collect-only", action="store_true", default=False) - parser.add_argument("--junit", action="store_true", default=False) - parser.add_argument("-n", "--workers", type=int, default=1) - parser.add_argument("-v", "--verbose", action="store_true", default=False) - args = parser.parse_args(raw_args) - - coverage_options = [] - if args.smoke and args.extended: - raise ValueError('Only one of "--smoke" and "--extended" can be used') - elif args.smoke: - coverage_options.append("--smoke") - elif args.extended: - coverage_options.append("--extended") - - collector = JobCollector( - args.dir, - coverage_options=coverage_options, - verbose=args.verbose, - junit=args.junit, - ) - exit_code = collector.collect() - if exit_code or args.collect_only or not args.workers: - exit(exit_code) - - executor = JobExecutor(run_job, workers=args.workers, verbose=args.verbose) - executor.execute(collector.jobs) - - -if __name__ == "__main__": - main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_execution_planning/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_execution_planning/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_execution_planning/test_ReverseDepthFirstPlanner.py b/archived/tflite2xcore/tflite2xcore/tests/test_execution_planning/test_ReverseDepthFirstPlanner.py deleted file mode 100644 index d3944a4cc..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_execution_planning/test_ReverseDepthFirstPlanner.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Type - -from tflite2xcore.execution_planning import ExecutionPlanner, ReverseDepthFirstPlanner - -from tflite2xcore import xcore_schema as xir - -DUMMY_OPERATOR_CODE = xir.OperatorCode(xir.XCOREOpCodes.DUMMY) - - -@pytest.fixture() # type: ignore -def PlannerUnderTest() -> Type[ReverseDepthFirstPlanner]: - return ReverseDepthFirstPlanner - - -def test_single_op_with_const(PlannerUnderTest: Type[ExecutionPlanner]) -> None: - subgraph = xir.Subgraph(model=xir.XCOREModel()) - - tin = subgraph.create_tensor( - "input", xir.TensorType.FLOAT32, shape=(1,), isinput=True - ) - tconst = subgraph.create_tensor("const", tin.type, tin.shape) - tout = subgraph.create_tensor("output", tin.type, tin.shape, isoutput=True) - op = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[tin, tconst], outputs=[tout] - ) - - planner = PlannerUnderTest(subgraph) - assert planner.make_plan() == [op] - - -def test_single_op_with_two_outputs(PlannerUnderTest: Type[ExecutionPlanner]) -> None: - subgraph = xir.Subgraph(model=xir.XCOREModel()) - - tin = subgraph.create_tensor( - "input", xir.TensorType.FLOAT32, shape=(1,), isinput=True - ) - tout1 = subgraph.create_tensor("output1", tin.type, tin.shape, isoutput=True) - tout2 = subgraph.create_tensor("output2", tin.type, tin.shape, isoutput=True) - op = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[tin], outputs=[tout1, tout2] - ) - - planner = PlannerUnderTest(subgraph) - assert planner.make_plan() == [op] - - -def test_linear_graph(PlannerUnderTest: Type[ExecutionPlanner]) -> None: - subgraph = xir.Subgraph(model=xir.XCOREModel()) - - tin = subgraph.create_tensor( - "input", xir.TensorType.FLOAT32, shape=(1,), isinput=True - ) - - t1 = subgraph.create_tensor("intermediate1", tin.type, tin.shape) - op1 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[tin], outputs=[t1]) - - t2 = subgraph.create_tensor("intermediate2", t1.type, t1.shape) - op2 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[t1], outputs=[t2]) - - tout = subgraph.create_tensor("output", t2.type, t2.shape, isoutput=True) - op3 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[t2], outputs=[tout]) - - planner = PlannerUnderTest(subgraph) - assert planner.make_plan() == [op1, op2, op3] - - -def test_order_by_size(PlannerUnderTest: Type[ExecutionPlanner]) -> None: - subgraph = xir.Subgraph(model=xir.XCOREModel()) - - tin = subgraph.create_tensor( - "input", xir.TensorType.FLOAT32, shape=(1,), isinput=True - ) - - tout1 = subgraph.create_tensor("output1", tin.type, (1,), isoutput=True) - op1 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[tin], outputs=[tout1]) - - tout2 = subgraph.create_tensor("output2", tin.type, (3,), isoutput=True) - op2 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[tin], outputs=[tout2]) - - tout3 = subgraph.create_tensor("output3", tin.type, (2,), isoutput=True) - op3 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[tin], outputs=[tout3]) - - planner = PlannerUnderTest(subgraph) - assert planner.make_plan() == [op1, op3, op2] - - -def test_symmetric_parallel_block(PlannerUnderTest: Type[ExecutionPlanner]) -> None: - subgraph = xir.Subgraph(model=xir.XCOREModel()) - - tin = subgraph.create_tensor( - "input", xir.TensorType.FLOAT32, shape=(1,), isinput=True - ) - t1_in = subgraph.create_tensor("branch1_input", tin.type, tin.shape) - t2_in = subgraph.create_tensor("branch2_input", tin.type, tin.shape) - op0 = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[tin], outputs=[t1_in, t2_in] - ) - - t1_out = subgraph.create_tensor("branch1_output", t1_in.type, (2,)) # bigger - op1 = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[t1_in], outputs=[t1_out] - ) - - t2_out = subgraph.create_tensor("branch2_output", t1_out.type, (1,)) # smaller - op2 = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[t2_in], outputs=[t2_out] - ) - - tout = subgraph.create_tensor("output", tin.type, tin.shape, isoutput=True) - op3 = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[t1_out, t2_out], outputs=[tout] - ) - - planner = PlannerUnderTest(subgraph) - assert planner.make_plan() == [op0, op2, op1, op3] - - -def test_asymmetric_parallel_block(PlannerUnderTest: Type[ExecutionPlanner]) -> None: - subgraph = xir.Subgraph(model=xir.XCOREModel()) - - tin = subgraph.create_tensor( - "input", xir.TensorType.FLOAT32, shape=(1,), isinput=True - ) - tmid = subgraph.create_tensor("branch_input", tin.type, tin.shape) - op0 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[tin], outputs=[tmid]) - - t1_mid = subgraph.create_tensor("branch1_mid", tmid.type, tmid.shape) - op1 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[tmid], outputs=[t1_mid]) - - t1_out = subgraph.create_tensor("branch1_out", t1_mid.type, shape=(1,)) # smaller - op2 = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[t1_mid], outputs=[t1_out] - ) - - t2_out = subgraph.create_tensor("branch2_out", tmid.type, shape=(2,)) # bigger - op3 = subgraph.create_operator(DUMMY_OPERATOR_CODE, inputs=[tmid], outputs=[t2_out]) - - tout = subgraph.create_tensor("output", tin.type, tin.shape, isoutput=True) - op4 = subgraph.create_operator( - DUMMY_OPERATOR_CODE, inputs=[t1_out, t2_out], outputs=[tout] - ) - - planner = PlannerUnderTest(subgraph) - assert planner.make_plan() == [op0, op1, op2, op3, op4] - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_ir/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_ir/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_ir/builtin_operators.tflite b/archived/tflite2xcore/tflite2xcore/tests/test_ir/builtin_operators.tflite deleted file mode 100644 index 0aec83443dc155a40ac0efd6fce8ff2414c2e0fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96560 zcmXWCca$W_eIA&$GP5!(%X?q#{q*$s-JP`n77I@}k|&BH2}7HrtyQ;d&d!ME%tI}UVPk($bE2ApnMOH*c#P9pQ zUjP7ruYB^eO8{U40ssQw00me8$FS5hECB#`h`;>bL15z_fx!DiJUP5ah5(14hWO3z ze^U$pAD@5I3IN!L_~(D}&ENmlzxw8%{>eZ8VJ}K(@{eiB(IIQ$%!~3AD>(>-rUux>Q z@R6?n(XWSP-_rHs4PEDl5BQ6$u76D(e&5jbso{OJr0auWz1LY?pC7hEcXhqV>3Y7U z>uz4x|5(!X=l=`r^*pxnCS1#;Xylqjv-?x`bQ!LZ26mV*)Ro|YrV1*v%>Do%Zy{$yF(A*W&EWQD&^zl^Z?iHw|3?4Qow1veVLO@^oTpcFY&P?% zvA`E!yWXyQ9q4uw&+GPsN6n8{rrPFqz47vRy;iAb?C<{@@!MjpvUmO3zwa#^9m8GR zMs(B7Y9;C}^lIzALdJ3%bJA{w8TWwHaXCxr;UC!-F`9aKU@8&r#G)n0< zo(1KB;FyU~qQec@tQb#($*dJ)tqO`uQscDj@D@9Cm`u1CqX6gxoam=Bb5zPGBs*}u zX>2Ov72V#B-$evA&sKaZ@7NMHAb--#ewQ6>gP5nB#TYPtWEPy27UQPNo=DqmlsT!A zw>Si7ET*IVwyncXpw=n@dV#vjo^#mxEKk*Zs0i0JQ7R8#ma&S>-~gRIPeBG7m7r!A zx6wAD?LPvo-cqJ*w^jN@UBs$3@q=2U8ZI<^J_bPhs_F}cYRUjyul41=W~kfbq=J}7 z^Btq@&`5Vh;v&N|jMDC5J056bcAGt6a&;)2A#4gm8AVPBB*>5{eEUb$_1 z_yxnQ+`GQ1K1-dPj2u6w9PDFZ?kPjDwkIi-oj)LE!s*BFw*JXZhB!WH#Q#it-1?W7 zXRn$MY{zt$_|O0640`TsZ|uMPjgN0U@fXcZ{*UB4gDPX(*$xgn_K*}WyE#oHTncYwER^O?k~)gb_}aq=AQ*8X2yROzkFj*62MYd z@Mb@{{osv1Y_I;{=s6=fb!bF8YnGJMB>z?Rr5DxpsL-=4>xBt9@gI8q$-Kx2NZ!97 zgZtn7y)zfD?sT6W^g@5(LkC*)C)hjZ?;Zwjql0hs-Mhk)w$v$Hw|Z+@cvbLj^Os+m ze`r|Q9lQA@{I@Fv|J>{a^O5&nZLZFa^tT!ZYrZeWHol+P=Uxf;FK<0mpLjp;cvZDml2&(XRF~s1ubP-%vKvTGe z1-wWnaK&qCe!Jjm1hgN;f$qgtx9DCD%741y7W_bP6=<04fFZDYK1YQ}NU1zHBvBcdyFs__^*sDh;=ifbBO zLk@z|bJQtWWvoz!>gZ--zHemn1k{uW#b{-as?QzN(sCZ-)e~E=XmHLgccP+BD`sA= zS>q)GYH&G^`v?M$>?BiF+FUxW?yT75k9_@BA$+J*V~}^ej$47I=g|tPI|9&S{qaI@L3g6}a{_hOy--ev==6DDEwX@ot{SFG&i^30D zMHw_KZVCIp>>cgv)(?6?f;`ZRvg0}@N!!5NmfDt9i~cB2?F(FUI6GwS1ZXp|*G^S& zfl+cirU|i}TX3~t{4FauH?ZtklW-G@N}9u+#Ww)6*Jy^^sL)`uCy>`V@buwj`Fwy6mY^gyrzz$Xlp^*)T2G?%Mq7rGT01JYpoHR2ekKs~ofk1hH(xA_f{$kEe?#LQj2h=8Y7)(!j?Yd0Q42*R?BF*Ua)6xK%C}n{(p!Sm5nmi?`Q& zL=~}l(Z^e1si&>B1y`FcF4u+k7~guf`Qp;Dq5A+CRgK~;MK*=e8#!-tc($#1b5*jl z2+huOxLeA1d(TA1EW5rlk;wi^^PNcnGYW~?2_-a*61&JIHbvj4f2xRuu!}$YyGJkE zaUT_k_?AtD<%Hr4fy{k%${QQi2Xow4Hb30?iA8aGn1$(Ee&$Fop0{ZY#Wa^pp3hHv z-^Q}Fc9{}N(z{)M!q>Ynvp>Tn`7wQ}u;we;V)=*n5gI~+W<5A!V<>}(BoCZdpl+R1 z7LOdTPr3Z4_Wru(%eegf9qk7NW6grd*bOh!t50;jXPkRHr zMz)u@{D>k&K|>4Nbb?aCE*spQoyq|TSYWawN}akNV!G`Md(HC>@?;~oQFgQ4Lv#Xv zJEWj%C1PuTQL889Ko&3KZE}~q`5k?o0?wMVXP?P0 z2>5X~iANzSS5u%epYM7tv0 zJT$Iq9ViHhSRkyiPH}V}PGDYB;tsu4bvC`4X|xGYZ6jdkl$u)^_{fJ(L4=PG1WIR3 z2JDVO+s}y>H;!%aCCN{KJ+pNNFQ%y&rE{n#FsN%yqcB>JY>$t~q4Ri@_PL<_Th6CR z$vTDHV<0St>qe@~H(a0A#%7_I&FiMuWtHfc?Bf>Xfy@>ia%7NAx3nc7GKkmv0}{{L zg$#Ns8|4s+@*Vrl_6gS4@hf#3s}%SgXByPonTugVmkJ6*0Ffk{M4+76pgCUhmKX3D zjp}Lr%eIcdg}AOw;a&QKq0Wq({ob#~41PLhcV&VLVQXiJXZQA4)EcIqkRI7Dg^-A6 z^4x-V#2W$JU-n_1=k>t5w+L}2Hf_u961E^TdCWPyRR@X8n66;R%sCr0C>n)v*}l`Q zXr@m90gjf%J;}fs-rRL!V9^;7WLv-{VhQwwVNU`T%Sa=THyEAZuYRPV`i9AnZx2j8 zywBkGnx}V7DTUr}=Oxc<_4e7wM$K zC;OVa5%51JMa*z=mf$la&K;;yiwu zXJMqL?3jEa1}a(Mq=&9M9vqpBQWKQDG`F|c^d6$sM}DY6b}IK81!?as-}g70x9Sg) z;JEnk7hC*OA6dHs(Jg~-Y}{_|VX0>0&%aj^s~3%MizqD{gr&Te57>Q zonPJO96h)XKT{L7``AoRZ>1Tc*XVO8vLxp-hV{dmS%3*;jsO5-7PWE(=CVg|L>i)n zCj3ZbEo}?zlxv(-22IeW4;N*232d1s+I42ihSZ;ZT+YW+e!%JU&QKCbT(7$C4a18G?KLOs2$4dm12DY$J}Nn@?B1$$-swC}HX@M!f zVji=fN8qnxomyvvbXXe`F0tE{5BJ)MkOp4e{>UfARjqA5pTeGP9AV60g4GQMn@&EeJfSjYf(AxosDxlKv3PE!mIbC3nQ++ zJzzFwGmkQjodML7#veAc*9| zdRKW?if=TWPKzm&HVQKc2CjJ;*2Z4~vE46xgpuSmG;{v}vcSe(MIG&k#}bYG?0 zd#-M}hBEl1f$ex+kc&{Go;MosZeif%c^xip@*n3V1~SYBm3;JBjweic$;8&|1P?(Q zOc3JuYZ#zdtifqN;;11zMYI;!-Pf@echZ0Ny;)Zx`L+vi3Gr~iBhxh}$<&SlM|;?S zQBS&e|9Y}#&Q@U1lz~kh?*+>JQiGBSU9w%I?wh0y8=FHIJ5m7*25k)B#>SR{g9z*v zMayLPS4KNz>x!N{o;il>k}u}3*+;Kn{jsPI4u(CG8?b||*9of`T9+0R+KMgJ!RXEx z-NTjh%IDm25SVO&Egi@egsy4j*5q@t%T4w3FZ9b_9VztHTq@4Z03&GK7Qdu^T9$449>spKFv7A#qqTclbNK@ae% z2sHagvg8>ZcWYXqr#IarlMb_6ZrA(HQ9^4NP!u@sn*s~=0jv^J)C&tw%($Hh01hNa zJ>Kjn-u`gxl{1>PYeWgB?awD8#&xW_vbfA!YLjwnoaO)yJ#yTxLp5$whuOAd?K^%++Ps z@3mPDyEtzv1(m*OTN3bECSRH4z?@*t^|gjwIoO)wOiY8SYjz38`lhF>*B^M_K(>8U zx?%}y=+?#pP1^^319P-A-zztr_V<2B{lAunUPM*>BMfo>PQ-3?(z46DH#*sq3Gj@m^w#^wj4;wAYi7$|sLIdY?@ujef!XGn~c&2XB+& zYma*K?oxNYx+rz<0q7dJpe4J4x!h>*etr$XR;xrz5sA>cYq;;2z))|pu3)e=fFmi$ ze5cS9j+-gxZh-F%Qe2B@4tx^jJn$N#aHEpNuXp&&<^eA8sL*d8t=DaxM$)DJ~s`}K&B-k&1$=0rDQBzc?pW-G8!vm&q{Q&u^{lq>==u-VM! zad84rAWvhr9?n9}#sKT;MzFLB0#4)s35J7F1kw>Pj7sG^sTqJRyC&=w4OLbQ=?yrE z)(Y+dsIvt)yRbq{n8vqT=NWU*bh6+0)80|K?$=9+r zWiZ@_7}*>#v{BYbiwF)#J<3U=lEomiuvaOuWd}bnxM4X(76zPx^Ib%i4D&#z&RS4dE&_^GVYt2p<|C9F zrK)V6u9$rdw{M9^h|T~ua(bz z!E-(NY$Q8eMi!S}-u$EY;TzYzbh9sR@3=ccM|tKe(s6650x<^bX!Qt;)ha~6zOJ;| zOtTCLL3Ktk5EOah;zsQbJV4-4)kIMH-Th3($Y=>-9A!WTXOE{Pmr3+m@(mj6V4+=a zxh#(KbM<8p;;P*_?e_WBRWab{l%Wh+X68sL-`C6+@DrdE^p@ zoAlgxe9JX1I41IvU_-olp)rk}>Rhjbwj1r31REWR)9%#3Jz^>qTSi7#WhdI#1weE% zZQ2Y1<&wbX>}@DKorP)=OGuE>2%7p<7V0Xvk!I6SFcUh(UK3qw#U;w>&pI z#f>$Pb4YPvV{AXb84p9{+Q5wSf=#!UW^E2Zx8Z$$!ZnyG*dE5xZV9!qE`U+ntPxNq zQKY1U9k7PP`VnogW~u`~QwK@aW@$8mDQ7zVzv#WN(mt(Zj~5p>J+VeJl@!wr*kvE7$-yEJ3c zbm&aN*%-1loeVRSn>IWdj&_imGp}-HVJ_BepyPY9>u|To8zBMG5~18ttD$s2l{BdQ zY=<{0!L^a9xfyH+!aM^U`%*$}%T`oiY>YwYQ&xiwJ;a?J1Z2l2^gZGsT5B-ZsQOMC zaKyB+L!0lghYTlBktzV%JXNZp$RK_nqm~8Tzti8RpTw8Dm>lwNtW_U^I|sA{ojejxXeqv-nH?YY8&#RL zge|zmMQU3HZa?M%7s{5yDY{ZF0JG~7P&12x!1)P_#}w-Lxng7lJ&VnQOCYbGa!(y< zIZ;y$v5{~X^&5)D{;AQ^g$AYp;4TfyNk5|X+UBlkv(R9fZlY37F@t3(H{gaLims+8 zE12lnq%0~lv+F#L1TtR5-bxRg0~q!<2wq$;9rbd;C|0{=-VvMHdQu2-r*c zRa*-K4kgYl*!vVdHTeOKuT%U8Rve#*7B?AN7+tnpyat`V(A%^HrFpq z?#zz67km&Xx*Q|>GCN_R&Dj9Ds&CkVOi-gfo37ZMpw}kTi)t=5N%_8@*HKle`8H@ zyeU7fO?BNVOu3)(coI9S?bt5%zA`OM`#^))4K-=qTzcM2zT+^G z_*f6zgl(0;vFyOxvH?<`(fFP)TYt{Y#KTkto`fAcLAPnqjgJLXNK=hZ4)p*iS~y4d z8&II03Ldw<-fyQ>%c_OQ>Q@@JzFc(smWrk7@_*|RyTp(0r_ zkjVg9(2kXB9k>%a-u`35;q$p8AVw!u10ZxmMXBeixHCDc>5)~q5o{kJJF{B}+CEdr z2hges6?*Z0D|lS8Nom>ksu#^;Z;ziALIv-UD^^=^>uKg*D`V{RVX*^5lXl8RzBd90 zPYz~R7e>;JPuMIXGHth6tP!=~F^_PTgrV=6ll}10hM&&YU3s&?txzHl+qy;fkmEWI zuD2I~D_e;TD_%eZ%E~GJX}ll9-t1TBL5HOtO55s#i))`T+C8l0GL#rK_+WT!>B*x} zU!XI`ufXhVE%xP|%jQ9!_L;ryn0G-MggR=}?i9C>*4pF#nZW$h<@r#HpSr+d;h9S@ z{qySJa3-f#4q6*qnHoM7b2P;L#CIKy)PX&O4J>EtiJ#9D>=#J4u&v1EK+s*Hip~KL zAgJC<=uAvdX-EE(!cgCU(iQ!(;(xlkMB0m?xl2jUrMdIC=?pnPdNs0t;_R=k$(AQS z`&GQkboU%*m4|;Av(HM#iM8IBtjl{Jj8*P`U!wl_i5RqbB0rX&ef-|PT++{4mcaVM zPaIu4dL`6ay)HRE-%EFaE$h$Aw*1dt2yTSIX(snR`S<}cFmE16Wo2*Nf&u-?_1Ohf za!pt#9X;h%!Sq70bUL|}-rbp%J?G3dJ@@vxtLW}a?yYB)@cdiCc{<7ex6g9rzkOg{ zbUthSFZVZoWSM#A?(b0te;~H!KiEE7dzpGQNHu<(?|)vRXRcl6f8Dmd^X`OIdug;! z%I`wQt5erteC2V#HWb=r*ShbQ%NWeI1ESv+fh<4noQM!;xryV)*#5NsaG;hemnT3l zWK#*HUgh}SY{gP@O;DtXf=0;)1!UrEJ z*=FN<-O`X34NcmU>Fj^Ud0-4X%X7Rv*RYi0Bl)}B&Hai<(xWZl;(G10 z_pOHmv+~)-ssR})kw6La3r}w%S*vpfdSKA@kkSnJ(BwZHzvv-y4B71;)vA199f8e5 z3CwI~Br@cpv5a7&NXD*mvJr5MJHfmJ^a=KY)#u)HqRLo1H;oiml0;eQ2sSA=DIVmu zyNb93O?<6E6^gj5J%Q{Wwwu|ukhvJKeih;kOkFNtVWa<)GM*-~7XItuTnl8JdySKl zqc;c3Ru3t5gUdCuX#(I^$6^_?v)s)e7ruR?+^gkV>?TgIeec_akT?qs6u~Yz^}{mY zXMfGYd`~_qby&Sms8y47PeYE0kSnXqmFK8wxM>UE{8JiDx7%J{+a&4jM05a|JdNLo|UAE!|`k%YWvpAS8Io6{CfNJnHw9|O4&DmDV~Z;Q^G+~ z5>`92vPJ$RzcS*_j_tkh-Rr-wF#q{G>xn&>@=a714vuer?^RD@w%N_FoM$p}QSEZZ z`qbCmRmdGtnpe`+dmq(t?RSh=(xB*(uR57WJyDIl*z8xZP8*(4sh#6yotspQn!S+D zjeAS~O=C_~+ZK{jvPR70f;QQ$eCLH}bESE&B**&4%kjNb?tX#ug88CR!}(K}bj*Kc zl7rM@h8jJ&_kc+ks^4qBKLu*${U|W_@Mqh!i3avNL_5rQmDZ6=zTq5U!UQG*hZC4# z`)(Ff!l6>zbZqoFnBu;|y{ceYv(=)t_alm!*hny<=F$mOFWwo_mdm(ut-}{_)R^ik zW?OlmE2f0;XHl`kDZdfoH(Sh&?4xQQ862;HgrYJLB2yiEfiRegKwfihv9=L~ci@Dg&`9tqaMNX>3Gp4L1CMjHTwV&0Ht+wmI% zC4f}=lV@wALt7YC!87RUzA&NtT4ePnpmgts0DC)w?lOBsHVKR>z_dA+_#%iL z9d;aKq~~d_c-6iA6J-P-`G@^O-0B~|rvPB+pV4IF$6gwMJD@vq)oSGv?dm!WEU%iL zUNPXs;FRN6(6;?SI{Y_cZPrVq5LAlt+~p$^H|YHybu&_-w*WzV6y4{Y<-zWhWp7|?4-J0MOrZlB6#MA|mp)~i9IsE8 z3k3kBMrIL<*Tj`&^jEN3x^sA@J>tNdblnv#!1~iZ-<)P^m^Ot(%dTN8N!eYTGBKRC zNRJo2^pmC#z2VAPYX@oef`^Pt6W&c`zpMCVjHqZ9+0C|(SJa_wdj!q-&6rj(NqcDZ z)G%w%M^EEirjtcFi9{EHMC*Xnde)gPlomXVX9SsxjAfc9)J%^FQfW|D9LVhBaS!KHqI95ac39#&+%XLA!u)l9KOm+-XoQu66BA-XOa?(#WDd znr(Jf8`n;vOuD6uqJMCD0oYbG^!K^yKuf;`wo3jq>M1UI{io|BxPA9|SH zdWVP^ss*j0 zVCUG7=*%|AuR7w?vXQi;Ra#Du3ZgvLQ6b!%;EznZoV&omPE7-!dgH^RhCuli! zGzt!UO6^mRwC6YWx0{1wJT6!Z?vBT;Sr|D3jkm_imCU3&DpAD*}+&TzonyQYR? zvQL()hli%{IPgL+H(3sg|MAt{5~t!NdBkgcC-A!mWTsSlvufZ2?(w$!Sl)c&XL`i4 z+aDDCQMpCU1pxV9VBp9M{uwUk~p9v?qQ+=&i8#gDn@ z{9{w%_0ld!J{3~4*$@!+*YW{hb>Oc>f4eb0ds*Y<8>_a}9~y@R`M|8E^=migwM2W*|tzWVee zn)TYB6nFd<+Md=7ow?`V#b;9b+q7lKeRVA36UI%~tdp-J9Ij_{A!Qwadz*RYv(CWf5H9niJ3!OIv});Gnp-K4Z7dgjA(eE7!NzJfr3w zKdyg&<<=^sKiBFV)JwA4GeP*#joYWkShqHGlK|SZr4_W1JwlwUXlll4~-&*E~~ayhw6<(>~C!iOsT;@9rKJUT;w z4?UN^b19dtPW5dgvf=s8x1B_pCeL7_us>&jaPz>_6ejz7s~EfIZ=G@7eOF0kjWa{v zAJ}-T0@bO{LKKaUG zR*=q`V4=0>6Mx+pwN#|DS_7RgcFkOVhDblNGQ*Zpm>l>x#n%yVg zwD?kAy0dbuf0JpkKP~?YTn;xK?EhBWDkkF3rMLxjA$-^RRvywny)N{>z5n|JnCH}6*9m^muX zJK9Myl4q;VC4&B73qg7HUQ&3dF?HtF=(`s3$iv+Oe8twL`m`(i>w5%y-}vr*`RC~A z&+h%5`7R0G3Z+?B01!XOU6nK6OBxODHt)Pv*n|r|{`QZk3MoJGVoBe)-ZFF-#_|84 zM+4i@pY*Z_BsUUqsY>V-7eRua0&8weQm)s?Pga{j9u%DxmxB+KUpX;ZTdFmof~9le7T02H23@uB!z|;YK)K9x3{i9GZGQ$eNk+p`uv}((%{`Iu|roJ3h^9BubOS zI`jw}syr#E-x5;sHeZ~}ydOf**L2}OEasPSq6r$EZddV?lfbz)A#^3~B3N|Z{hi$ntRbZ(` zHNZmBjLDsei*Pf8AfVA2DO}s`O;Xx>S$Z8bRleUlI_W6e_EAvX-m?y6FB7`bWxcxl z<&Oy~5$?RoWZIDU0bkmsg&#pRk*2(RYvm$Ge005GAZ+KP=F_MyFAd_#(E8&3mG@+r%w3sn$^Y!EzLA)FZI;zB2TUiz0vI^EkbNb%mWb( z6fPy8#m0v?N0#no1+kT5egy~$38owO0BW!S*F1SBCRt47cJODnQ?}(o=Fp%EAh5ah zZxb~1b09}`RZY`6_q0au#=7C>+Zh7us%|uY_??V=xC@WYIP}4~-^h_CSOt{tuaEjD zNw=f^nY_=QGn%7w=8$d*&v=bZ3DzeM?FdfRoutK76AzVH+7UF@y56aZ=kIw=JEtj! zE4CB>kYmo#=HZ?el1Nmnsm_qy{YFdUjS`uC6bd*G1$!+>RQMqQlhWsO1Oz%k>Vb`hfn!0(*B=#QGt zd1rArrncv=q;(bP3xP9cn^*2zxk&lZcIzPTdR)=dzFaW;8@mY8lIpX+#1#NC|K)*m5EO? zh5-{`NI$RAHfNn{Wgz!i!c+6we2@`qUAt82Y;L;L%F|ia>D)^KP@A|MD>+$|TuBgk z>P}laP3)zLho5mCPk4W+%@1tWkm*=x1=+_XOPDocqbtjAD)#GV>U(zWjbVW081s)j zY~xV#w%{|oi+F^!mB#>aKZ>0=l)LoV$uEp7Ufd?3!P$Z6$izSK39@=nl;)>;yfJG; zxXs6d9qPHt6Ni9jD$7|LH!js(ja*l;VhL*sxVd4dVkzqUu5%|x!>bC`dW#^EySX>^ zwSdsKPk*m9UAB?Z++jEmLkHJUqoH59YRDbE;$Wza%+~3P> zHt@4VuwhMPlcnaKyn3IHb<)WZg_`YY zH$s2!6Jtx4gA;ehW`7+!gTr;#%UlpW0~$S72;Agp%+z8dGahh}bA9cVkk)%*o&NU< z;=a;PArXOhpRb3PY;L|)accTM*>^ABU3^b}<9^G!Xz!go%reMR{6`VL;go+p;HJMK zsyD>Y-O=^EuG?@%kl(7g^cOa6M1wF<>p&O=g+n`tcHlY`o3NlQV}o*;2t-fSR;5Yo zX~Wgmj^h^w||!mTJTp2dr9Un--kPqu4K;#dA;lv80)^C>SYG%oqJvtQ+ zvkWv$Z_#BXbF0qYnj9TCGu$o5EbDvLa_3rOKx);}h59fYw04d_@eVOG{mkzH3#CM@@?@tWE`=(A&lM}a1+?=k%%>a;gIspy%|;G$XxR62UVh@6a0$S`3B z4o}S0(rCo18HJA9d;t!*&z-W&QCPxKJ}*2Zd~V`+|g55^-y z5kKu{p$>;>V#d5(bYyiK^C5X48cqFAlKcKZ*Gf^)<{22Suvb*a0V3Crsd?N*na$$3 zb1vR6_1maaaP+KaCH_YL!I1k|d(l-1(KWoFQT~0S<@8Q)H`mYuv3}=cxZnq&9lG$Q z!!P(g$WGY|jH!McR9~~CA;1n&a#}C6vts9BjVka2)u;sK@3RlDL+U-Fqyahd1=Wk&BPF zlfhe4q~YVK%VgLCfT<<{ukG{M!$3CUJc$_VRDCJ?R7_8qJu>XVvAdib7t3I=8S5rp z4?qR(eQK#?hDzJuD9Og_^?V6Fo9oI0=-60B9uMO=_R3Vl@_1v72|aPLX^=Q`$+Wdq z2bF!c8gl_>`?iRKb>*cVDh|y)xL-WyCpahlS*ou{X1MKWAYrzkE5>TgZ1<9OQtpi! zj7K>~O9>(jT`h9FXMKmKbLwf-Y21YlY>!XZos|;FNls(R%+=uj^j>0sk{;CyDz*@3 z6)|zL%iH?6I#idaDTTA{T}zm-pe(z(8Y*HYIuGH)bV#{w3GNf4eVC(MZ8<-PvxCvb zkhg15Q10O9QipAOnyK2Bq6~EIo;}fx_o$eJY@iP)Pp0qzd!#s~AF}$R+G;hO+P1Es zD;R0NBmQ{w)%P<({N7$#!7kMp>4DU$v>V$d1ZPPo*c!>qH#vvlxYlfj#HDgp>q`05 z?#eBf3Y6MybBz|Y6 z5nErIx2J=w?}9h8nPe__2EPy|f2HNFHbra!y1*gbSeK2zbo=CF>hb+_YciHAkg>Fp zxWXf@xy2WKQ2$Afop@R``OkG*ocxlhsEFpg0>p%#Fh9Cvub8bc%+3uQqTyti1ZS+A zm`hC{4;-WB)X8}rH5{I;H-cyeu#4{&?o$CX?%;TsHPZukQRXR>2!6tEeqdqUK@j6^ z*@(!QeFa*TGd>BjXp9l?2l2YT`PAlKXSng+BGm9$L$w?_c&LK~^Qgn7*)pb3eKyfU zBXM;rx}dqGI$cnio@Bu(NY~mPxn?dBLzi*yxc(^G#3?H8@EX-19JhKaC&6f0ds1d9 z&s_yThqcP#_8x=1#K3p`q{<DUegx8oZ10$y*Z=T!)C8#L?a_W7Iy+1g@k#=Bw4Lkz7QDLVA86)SZnRDcTi7cFl za$Ge&71~obsY}Nu-Z7CIbf!jqTFT+q!i!&!aQ1XJz8_sVxfhK+M45YwQyH@tHm;n= zafr0d%FPC53nY-RDUjaZpLl5Rj_qO)g<0mUEN1jT#Hcark+asQVjl4EKDelsDb0V; z>YstUhmTxSuAv=nzD$5q^JSGhiiR(dtCu{N&-rOQZ0<%cp9_9&l#OO%9$?||OJ9EQ zC8wV(d}ZPyI>ebK$?5M)t7D;_RR?^1;BD7@Dg6m?aKc zOq-T5NbTRaH?rGXnN=!72h_W>IzVES(j1CE7V2>R$zH;oz3vu#j^?C{EF8z3Cx)VV z6g!x(&5a?4-}ux-2B+V{jKI~ykaxlL_|!xmw=9#$;g>y%B;F}aoD~-q|C0h79Cris z1g_ke4ji>l=rDpDJTyJt<6?8CG2~^;dPkY6`AuhLJ~W}AMWBaf_RLI*OkE-y(D~d6 zSx>_M;DPIkScvPOd0;mwiJ^6-Dy5=yFELwM=Z_^%jO)?R>S1TS(o7-XEe|i0dSck$ zJ@Vu?vnQZi7YhX}j*izqJnAv_JL=)#L=AEnFtL|?c&pYoL&(P6rvP`Lu(HlE{^RW) z7tgMI`+cgeMYhQt8#&pp|2)l*xv*z=}Bu*oGXF4$0dqUcRm6pK!AL_3XF(RBiQ?1lq`LpMA0z*#wmV&sTr zm`kV*6U~7FJzQ~b7D^$>0_E=&oUift*}nD2r9(5oPv6^84DHivhdc2C)I^2HTic_r z0iw0Ce-a&^nAuf!n7!qNWOA@@_R7S6x;Gnwgkt>Hrasl8_OmSNoRY4;)$BoR~n6sU%?K zyy|u1xHWGoE~00$W(2ev+Xl^xN?>F$Lz-}9fm>B-k8*5E@v>rl(1es@=U35&$}4Jj z1nUPD<@A6~*J}B?UTxteEo{(J^5_ttd6Dw)8S@e>BIUNIj+y$_mb)W&67wAzbVZUV zG;2ao%C5Rm8tnToYiDfQaEvwRH#+(DWgSgmubk%noXAeQ-QEN~aXP}~2baPNV27{d zpiIWRQ7^Y)to|&y)6y*_$Dy%ua{7vUM5;=BqkG;kovCz9M!-}q6NfHl^kxeQhbF)o znX`xtM_`aV6WX%i69c>}+vZAny#*YZ)2#SR(RE-ifo{1g(RUbGZM793 zU57k0c3?>?%=ci@$vf>AoCT+q8hILOqP2YLjpQ=1iTrAvfilqF{68bfkx1-!N58kb zR(?ws(dNO~$g`F5XTZ;LoWL1=eU}<&&-1n7uWFV{|B0v=hbQkFKOR3k7>|9aXql(g zRHQfOWhOtQm}XyS9zH98*-hI&?{_$#{ex7oFl8RJ%?||x{wu;W&GxZRA)vag zk4xN&*&ZJ?r6K_rrBe));~>z&MjDXyV;Q|?D}GEW_3pg~CNU&7SW|h#<#`|5yS}i# zE`RCFKVeQD8H+=5_5TPCfXP(L(~J&A@2bbXKY#dXZNvIyvd#aN%5jd)!>Ju=xhdRi zO%F`(s?gXU|30;vD{nTd$cc6c4HJLRo1t52DZ-D_^Xy!RP|PiM)_^oyYgA zCS$}WG%CMz*FF1vD9v$}peG@ZL-0*JcVh>*C=)A5f;&WO9n zQ)e-&V)Psl&Y4D*L@a!bSz?Du3eX9P4eS+J{c|!0m9z<#ZLlS_Yp~rlqp7e1ujWiJI`I}K<5}ybW~d#Huh?XWp>M)kHoRa0F~em_a!VL! zOo*EbF1+?UFz8dyF)M0A${3CI0WZoS6Bf3$SQcL@;!t>GY{?Gy+9ul#@vWb>hf+w` z1-rB`n{)OKf-Lz`+x4ixale`Hk^X(>sL7~I9Gv=ghCJ^3deUgurb(SxCg*1Pbctyi2T$B1J@|!UYhN) z$m_(;0+^e%ysiJeDR|kNu0Un58PY}}xl1i*gWL*qcw<)dIPFf!h67Gji;0fR-)hQ8 z3o5iSWqJoDti|x7cKTopif=G}SupKkflVEv`0*V`Li=H-n=Rsl9%+6QlBWzh(dpa# zvIL&U>ct&Bk&mF%SH)j=C_A*7!Y2Hu2dzx#+3yaCV=!lRwOXs+8`ScRKg^q!nY-or zxv6Y+FGjTDN;t>wN?dR6#SJ78pDXLTUDIj{)w?(7!-0S5R;vaxz31w1pv%E50$#YE zt8BRQ>jyOesGEKh!bngqS@lC3?}89f&L11EN`VPB**h7I@1SBpBY zgjZxg>2doT;Q(qzyW+TKg)oe|yOQ;_>H0@F&*{{WnuNw2W<$4f=*7)&bDh>ZIkE5GtENln0B&=d2l}3*dexB(YomSjmLi2BY4g z#wP0MO3vdTFFmm=?+bz!_5(6L*m_p0mm*!TIze|xv>mg-J9h)l2Z?4M9taOE5S9aw z8fj-=^Cg9VmYmGT8bccBqs~ikTc0z<CQ4We5r8^kk{_*7X@`pHo<5yyF%`|aCS53F)@Z| z3cEmFaZRO?>LvEfL^WWO%SKwhb~VDzP=7F#q|jpMavN=P!ylh!_`|xD^5KkU%hM%W z@~3<;hoc*sb#WwMaST z+`8XvMqD;yw3RB7+@hRHj$5>k`2}EqBP&!=LF8lPOG<{a$J)%_>0W}6KCd%P8X8hB4e%W z>k();9w3RTZ?17vlfZfKw7~aF@_dsUU`JZE13(HuxMVvg>zYo>n=UIyt*$a=WAK1Q zpSF|~*+GcGqnpo>Gj`D{xkGFkFABGbM970$nL)DHkLY##Lcf}v@6=jMlshx`2Uj?O z>gx^@Di89(qPNG7YdL|rWT&Pshk1}iwY$~%xi%B!*jC-K?;aas-eN(Ay~TOmQouq7>bN6Dl#@o*8{*3NHi=CeHwgTn^E*>y)bYMcGEqR zpBi&%Q8>t>i1~pUZ5v)Fs-|P)Y9ogX;d|p>Z42XzX4YJl`vp|jvQo-O7j-O`zryxx zpnh>m-Q|Ozt$zBnX!3RW)L!!dt#+mFq`$6mQ{9)nLvD+2zbK%G9$o)c5vx9GoDEJo z#C08%hNi!HP&>9+9hUn;k-aqN8CzzLKyEc*!p2&B$^`ibUevkN3p28(*)W#4YFnOn zg)$bE+!EZp@5E9)|U8CZp{~+XtZOnMyrVg2;u`TNgmsXs>6}-^w9C^Hl zqrt7SEZmkd^}IT)hEy?P`D*w@9#!hUo7Vh2zhSt-Lx`$z1WDzoKu}XDNuznoBmoqtI8oil1prbm55=se(m! zjKCHQq6T_^*3*t9Y(ihV==UlDlYVnp5?M#tW2k!EAhL|MZ#GrRB3Q-T&r7g#w2|t_ zXj#j1jLxr<=S?jbAz$`Hg`x92{~M*`rr1XqH4j>RlubRRisG5Up7O}yL2MD>Ij9!U zFe7*J3n^4EjXl%c(K@C%N6}VWvlua3FzitvZ~eFCQ@kxHed|-pRfIFj;nB?%CuBQ8 zE)6I0RDR-Z!@y&AF*XdCm=;y~L(dHZgL0SS>Ssfy z?!Vz?TB#DG+x2(fGxr0&yr9K}a_2(&zGI>^0PQyv7kt)hzq9AtO*$}2%jucN#l|^T ztVJh*xW|N92|jooW%ypI>5ehB2U9FDl-;`3VbaUyT z2Kogg4k<;!ad`QL+y^^Dd6@PLzHZY(M;da{f+nJ_5e5eo1-zHiA>YsuH$D$xUv>rN zqOR!=L+vz}t2w5mA(1Dlflz}>Ol3gQP02LD@?(*5zTO-tDsKKD$S}4xV?!OS1Kj;X z2KHt`$d7OBu_X7|$#^){s=9%eqAa~z2*q$~jw`a~ao9OX({9hn6?!im`+5dR?PFC`*dKX+bemLoS!}rYu z5Bi7Bk&9j=gT$rsMeQiy=vMgVr8h{(%-Y~6GK+Xey*%jt$FC1it{;;`njQCj)2e*&5r)G z`Vx%{!)sr{eOv9ibP_<~RjB5YRo zVWny5a2zZavN~r`yBtev1HslTBNC`Po|Jc|&Olqizz2x|)Jrun@50t6Wze!=b$b9G zXLr>n>v@8w^enKSOlmMMGmaJGu&WT%GDT(jaXRj)a5}JnTjg&CykwJgJl4)TPzd#G zVhFn%*V%@&>~0L8^y8`u=~0kqY96?7D2{#U-^ma=k!<5GxfJ?6IN1=(}SeUuG1I~os5G8e~DtZgXOLzuyK z^m+aFj!xwA#qkHqs}+PUvLw+D!9pE{OPIow@{9U?%YJ0mqwfw^4!h^6^FE@I`XM&7 z6$@sI5sP)Il4aEE)N0diWBT(3vb&@v1 zKOn};_&cJBPR(#Leq#2@l(9u+7v@C2-s&7v!=lwjkAC;sQtlf^YzD8r@xQLn<+nPk zm(QJED?Y`IpUCKa4>;#O{bm1!@Kn8g#rLB57umbP)q#MQ$!eg}mnUDV1 zXM}=X8-q3zkExwx|9RtSxd}EiEv0k-5j%&LK)+6WzRt#rCcm*aFu!m$OVgqupxR2c z++u4Ob<4Z<=nw5@xDBEI^h%CTbuKl;mVR!_opl z8DVxv>Jd$8Xc7g@7uoBfEzv-o5=gUt2qSvDRRIv)08x;SZTTXL4F{x>w@@lhn7E;c zQieoP)jq!>Mt^Cob_A$e#n`CX#G*Qv2=g34^mqXP)D9#xJ%b`FT4|gihq#5Z_*5J- zN{TY%Js1y=NK+66atJ}R=u?POS!31JAXJM&YWKc8k0P{ zHfUhY)1-e@cMHW=RA1Q7=f5 zuWFV_ux$Yt$$&91EOgDPf$!?v0@Wf#gQi#wDJKAPl44O^3PpfcD`x>IqegjaD_x>) zlT|4IIJH%=T}tT@MvpgGEh+9WBJBZzfvg9^$T9+g2&jW9L$+)z3o@)g@djuRU2RDa zT76Ru8cwKy!y4$7Gt|T+Vw6M&k47PVf1Kyb01{?feye?77wk710s^rUK(tg*CnCPt zl5S??c#bK7nlbhSHEU7&e75uX((j^ZoOhTLhH}}733K>1e{go(43I@+b90^g_2l17 z**mJUm3NEhaXPJ{_mto79~)iOJxl&D7X6j|V*n1xqld>M)P4tCZ`t_3Y3Y572zCXmWh9;s4hxfm(7nBy-M%8TF*{hv$CULb8>v4Q5wZZ*y3?ZHQV1NhCEMJs@fM|)-b8>m7QUaFcbm(2W0oe z0S9jvd2N-}YLz-VKpnr@2H%(n92^J(!_QH9f1(X~3MUVayWQ9j@wjPyxJ{GYj2K<`sbewf$blNqOPTS9R%et%Du~D0X-9mAG*=!cvovX4A zSJUd#T-#PzOTl8Mg$K~G0_#Pdt?CVRTbtwXDv{F`ZF-COsKCJFxRf)SA`rl6KheS% ziPa3O-2};!*GeJ*v=daiBgSeU(9v90&DztfSrl@5MIU6$o zceRkC2DvkKI=QyVLpmiH5DXq$XQ=7gT~r~8AQaInD9Gfi&%jY9;WIRCg07+X`(Q}a z2DC6En;NBn&<89e4wiEep(O|M{aGB>E5oA8F$v*-t~WX)NxLe0LG35xD<7Nkq$XYL z+lDn$O)+u((aoOhsiq2@oLVGX=e)~P`D(~#zeSeO-PZib-jxUSfj&_y_sC23Ipo4`Wm3V*MlAY(vuy=Z4YiPckzzT`s*> z8GSf>CS=l>|8c$bFF$^xo8H`cwOx7hZsz8w_7q(mej6szwno-a133ltyJbgQc3L2F zP0>KfJi^+|;bj=aQQ~;CiTp{j=l~+lErCD3u1n>N$r{i+HLD1V6wQXH=#Er*V>}w9 zacehdBif#JjRKv;-!U}vbbq-r1fIcD#kES$g96cK>IBfrY#vpR{6USaW0X zR;#?%T@qs}tMc1!rnGm)kK=|uT2GM!M~|AadUe*E1fp4~0SP{WEy$uw&gk_JDjv9o zwxwJaMrIeESK!nLwsqhulEYk{3u(8qU!%=8P3}gGpy#gi4OlPiC(cX)E~1{JuH4f> zMe@9g3nzT@rL4BBe7CFG4|;34T(P#-w()i&OLaE)>8S%nz5nDdqBnvllR{KF1skjlOGM&{qs^ z%sV*j&Na&iq^_|$S&SU3_+=&cAqs8nKViEAU+YoT zu}Rn0lXM8s=}qW)$$PhG7Hqfh86r~N!SiS4>v}9k+#Yw@u3bzmzcV8t!H{w6o$;H) zD~+1ROD!3{8#7<~{$=NlKl`7+oB!s2Tj{<^wBT<=kavAgJI*VxH}mQ}&mEKE3VVWQ z%gf)t@`bmt*1THV|ERAaFGl*)CNgbuzC>^9YOee%zE_q$W%?f*SAGF+I|oTnIcQ&o z+u_7E=4ev9_p(N7f7dc<*K5FZW$x46JSvC1D0Plc{^wiOgE4F#Jmt_}EGb|cZ` z(14X5-Z+R`J@nwD{G7B`#vH*TVfzp)>Uq~LRiq)?p}WxZl}n~vcb#xBBd9CW(fxC~ zfdHm4LXbS3Vmjam3 z2!qkM3?$xC?kN^D%3tI#`WX4>5d9K@0 z&m0Zq%Q~g5@N~C0)oGg?w^6rWKxbGTa)**$FaAMr;1D*{)!p990&VU&KO8uc-qy~- zcGtJV4eVE?{XTat^KfELfLq+}BP_0M7U)Ngzph6A>G4z`3}`!+Y6-PIT6{L2>aPVC zef|(G?Yiy?-#dGH(dTtwC^oDHBH(b}bxL>qGZU|^WNBD_X#F8Pd}jg#E{;*h4e!9# z>~-i4(U~QGxI__@p! zE=I{WQAVH znR?D&I@$ojUA}umLCcNL z$rE1)jqn#*fqjhXp+)V`l1#BL6do5&f1F@C|UqpUQOJ;h(+9sYUj@-@6`WPDwb$W~;hwIR)!v@sAksM>j5&UbQ zFj#r;5rCNr$8AvRW%zAY!9A?(+E|x_fwI->3o`Vc+V31>OlfDrGzawxlD(NJDuvV? zlsOZ^efN6B^^ATD49aMPdXs6kJ0s4V_Kk!(h}l52^y2lw?A)aV7pyQ~Ss%|9^&#DE zr{5dpg^tYmy7H~UY-91tltQ4gk}^Ckd0j$lKw2-c4V_f=0mg{xYWgli#n!3S)M@TZ zZbCG$6lL!3`yma^<%|^7p6McIVWyce)KohJm_i|EQNr3m>&4zkyNlWa$vb_|r$&51 z$SWP09QulaYC`hJUv)TG=&%D5kw5D&Y5sv^L9Cj_RlX}=ZMGhUjGBvVC)1fI zsVNE8IbQC$yu@IOe9J57m-s_Z*$Z9{*KwG^r=Ji;{spOLrv_XVb?I( zu-L+?rHOQ!v_`GW;zY%+QEK$Lkn4ueT`im9L)F-@T+t;zE)}Y|UO-))Xm!@Bg(!8+ zzmH>*M>9!vHbVXSKgv_}KQxEl=h44|p9tbR70=;zy{F7u;K%y4S8BtA@C^D%=~Ue_ zaO|O>@2}S_PSq0aE?RbB>AR;>quWQeHRnC!S+G1TARjULPfRalkMS~iiX;Q$#`+&Q zGwrdf3x(eOq_oR^NU|wS=mFCHW5o2aVG>~fhoxfQzVKuI!?lw~UE%!2Em#&Mg`k9x zs~xoPL@-=gj+_2sN8Bs0Cg+uB3;L-%f}M6=L$#-{MWtZf`nMF6(s13~eU&qs6tjgD z`ax9GnuL^|)!_CHD|X7|UT%wQuS%qgNc3RsyhVRC&6vRo&bVH;nT?zO!9(#IL*vi& z=$j}1_UZkUqFTobQ0oY2S*M0j=lSJnqnbr#kNcyqZOrB$?@rW~IE@}y^p>U`6VtW& z&^)b}G_l?2cDYFVS-*JW#;HZ}1oytG+E!l+X_#Vwp6HEZo_;&X7O&_cexhg}xL^oS z&?2qmj{lpnoCsD%%wo^)jag!RtJtL-gG8`QUzCZX!|bb+V|YaZ#vBzOlrwL@R%zbp)ozKT3rZOtO1{2{!{FEvfnaEFUikz=BZ;GW7`^j#KlHw7hFV= z20I@obK*rc<2g$zR%CJ)xAgIA0YTj)PfLHwEID%Lr+WVg*X(=q*jQ4po0l>9fo-?| zTts=_A#ctwASej{bv{}ds^=lI)|+Eb5yO_pN-!GOp7dr&=pof)Wh~8+!X(ou2Ykyq zdH_IK#JTAvwLvP;q`I5Kc6DPJ9%06G*z03VXgn1ivQSp(!2=cRTyHiXHekI0eKT;! zbSlYe&~0h`yXzYTtKJCcp+jGZd(bf5famaY%W>%htD7x}B8(Zh8Kd;p%=54sX#yspqv@C5?gE~|+mkL@EleTS(x>DPm z_h*gumYiy-E20L`EKHp)YSf!KD=x9(%;-K|_S<`SVr~EAORz4>HQm`+;3Ybw5@y{ZTH2i2Z{Dlw z0uN&~y5rpKgv)%XE|Si4qDO1vGIRuEmC46NU#mFkOlxda^3&C-)zfx$^O&bc4+u3{ zgGQe8ElAHU?C)Xtj4ij(;suwRL-7h`oeLoNg@4RffsBqUY(rLyEUX#J;|#9_-NtzGRDg#1>mVc$zff;R19S?MyWzV}>8DctHLW%`K=42Mo@`=5AV3oE?X- zP3~U5*G(nnTt^-H3k)wL4eg{M>8KQq1zJdTDNL*CsAVj6CrkVKO>Mq8$cxei!U)22 zf34bMF*W#f+GvU*aoE^M3W#sF-)@J%{R?x%i$M!-PbN|w^J7QF?v0-MulBXIg3X~# zQVAts7BrytF9U%l##qV7h+eB0u_5AC%_3@vn`5N4E#i4&ERSm0=n0|YtX4Z+Z6%Pb zV70LC;63ZL*V^1E>BWab7v7483SQ zdEwOgm|SP?6Vl)4HqPmWi^i~iwcgQFRO5#4^U`?CUpDr6V5#?1t1#&zqh`0GtvZ{5 zrguOM+f5Uw^i25J$xIigQsa#1AtOOz!#J2_y{skN0V(s-)vk3Vj8qgzV@j}KpSeEo z&u6ZSuG#+7fnQ|t)XTiyptX=-JuL)4D2Qy2rA#l&f?%w*J6Ort>6fQ_`gTL}7+V`H z`z<=DwK+9PQEAj%Z@Ks|a=0Hv<#HbBaj3qeSTNpGt?Aa6`+9FvPf4Vij?Jg^39U8& z5`gs*conMOOJ3IVlH*2v{&}jSKAE~a05jr}O*$ApqPL~e9~V!TsIa5>c6}_yoiJy4 zM~pFAeHbAlCEfi+RqAXcK#tKsIc{+COGxee4}@PXLE*+V!a7pAw3pc3tA^F=Rc7~@ z^<`RDo}Mz7o{l_?bSG>TNB{L*?y$p63&r3fxY6jHdUq}bAlrHkkYj1raf{Ox*t_w@ z)XN9~np1mR%Zy^Wyk!&ju24ApyjWA*2D$rPl5-v4Q|7ZQW>rVsdacH846kR1b*31J zle7Z!#(ujxM!~6kpPZ_&biznsN&Z5i>sGh?dx(FToW`(7&ma5 ztmeJ?qH6Q;>0vy2&Sz5g+7$w{Y)Og+MkC9$r?&c|grlqLuK1ur;Rs*(O zpiT$6-)JF@zDvNS_)g2COy;1VzR4U zx?VJ^Xph}c<^v|K{gPiESeuh{d+d&7W5pP)nv<-USX8DeS^bZ5YtH5EN z@8<91)An}14utSqLsYbAtu9kj5nbCn%_FT1rPX)ubFWqU`UIgpxxmOuG6SdppbIa+lNuT&k5 zTU$!Kr*EE}Y1C26;!Qv^2_sHPi^t?5gy5m9oTg|+msn5VL-)O#2UypvH@3#2%X7}O zl9|SnQ2Zijhcqhq7F!NB*DlK_Bx#|Gq01=uR!qk%j%c-1_ z5DW&wnyypWGS0DjC3NZ=DtdP~*_ymperU-hDnn`fZO~+lDNbspL;DKo%s^m6TxDd# zS#x!gw6ic!2&rX*V+#cB%-HVddr;oIc2XW@!6iNuFizfSeulgpZ=Wj%KdLdw?2G6J zt<#n>HpOCcQD^vY4Ld9hKRu0a(Z9SQ`tK&rQr*H^FRE|%A2jAZ4mFR~V^w|=PMjAt zl+ga>sTS__zDs)cty|p_XZ%R}_<%=mNXBwnbHmm@Q9C_@GkILI;zy^J0Z?<%wlmC( zHsVd+D#SBOMWw7qu1T4gX4pH)k9!+hKjx1=W36AAoP1sSmNLN+q-(6Pfup~v0c`?1%WR-^Mrc=_JV5m=Ie*p|k?FW7nj2gV}wPwXK|1cVB~xKvN{> z;aK;kEuDCMbmm(AgGyt)G zYj*yNX#Tr)?UU}+r04j&F{{penDmxBR?I=RQtw#d{a3f8uV#PG)AqmY`A>8qtEtv# znK^deDYsrIW|+N{jeW7O=1`8+ti6+3A)JYqZz+RdX%IA~?x>dh9hn8a|N9*On2w?G z0r(hdaCV<-l#|Zq3#A`p74N ze`JD@p(VB^zn;7E>(aHK_+Ha8Bd~b0K6u}J&ZnbozVOA(m&x~9%NEa9WaR$>XRiB? zI>|X>>1fo1#<4i&Y_?NR5{(-xHy+df;Q=%{rVs5T-?6*;?V>IZBgWgi`4_3OxM=uO zcQB)OTK?F>)75D*7mV~@|MJRnOI?@Qi^l!Y0RH`Nqsx|tqcG#>mMosaPu22T@ECa~ z4nnnN-dX&Rs)!5%UhPSvxo&#wG7Ip*`; zQIdt`f%ToMI~Pg|-?g3`d{BZ7{0!Ru6HhMe>|}yJ7<>5%IjIMa5y2^ouLIV zP9^5@`6N7EUm3uTHCH9MnXM!p3munT=DW*>Ur%D=^iY550B2i^a6Q*L#a6R1+lZ*(0?fr%9k20x<&8Ye`FH3^gx`v*m86lxZPP$HigX(_k*3A^}T*?PjExWKUX?>77 zt+I2PXMWnkvxTF?{*Ru#QsI@m7eni3)_l+6dvtS!D^lJ%*`I_0w~Tb-!pRXs z^Gg>P4PN7tDu~DCBG2irQ!Pa&CmL!OL`uJ{@i)HnUNkSYH^Uk}TpLPHQIKjcWoJ8(dK%0^* zexJdxCxDw`gKG)Yk#;#V%Q^EKbE84t7Ja@&-yC#<>xFmra%HAUZ>G_DhYuuPbG+ep z<4exd&DILf$qh6n)kDUNafq&&1+f#@dv$HnbiSPQ4g&v~?vKCa-suSL@d;!Vk2kDp z@%7;{yzsJOH$`{4nJXbG&nH%S{Ro=7|Lo)AJRN|ulyA5W+C_g=HJUr!HL6$8=Nv@; z(GwnJwxZ$on2EQ?bi-_-#gzF02b&o(gQji2T3?*cAGlk$U2k3>G$Y1QLAGij+aUQh zaGUCP&9LrVU~9IcxaFL4WUWtAq1B)7j3_^zeKCv@&M9|8s5h(?EkJLqv4;8yAc>E6 zoRL^w@;*^eGyra7Q;@QVJx@8AgX7C0_DkoIm2)&DyJKhIoBU?(zd_39&9h4vG? zu4oKS@ac?xb&Yb-x21W?)PQ&TovJ<|gVx5?<6%0N4>r4Y`tg3|iS(_r9VodY)X%a< zbkK?1nN)qjZX5|WXD06V4-WTd;PUR)p}pP7$UyZliMV9 zk{QvJ&ar5Hs%=p)ttw-p-8ZfQ^t0AvqS<9E6NUQPWK$0r2jwa?&H%hU z&ILx@V@IzHwu@%HLC7#tns;)nZpt&;}VA5m2 z6t1YABmR3=GMPB?vWH|kMm-guLT8J0#rW0uj1|*b8GcjzUw)w24lJqZDzrPNnA+pa zvPhiY%&hbzPW|+H4Cxfk!)1-OWj`I!-U{o&O$!)Vi1-(@YIo-S&d%*A+&)0&pt=!Q zMIooL#{}6Ws~0DstqHBHI-l;xYVN^V-&J0rlGVGf$x+E5z zw;O{yjji5ARM5+pz*Fb18@=7l_W4M!zhDOZQrK9zXZRxJzrA6Y^YPM$>iuv2X%+mJ zx%$!5*Mgq{8<9T@3sYnU~MYO@ZDZavp? zL^c@U6FSz*<@O43$;lMX_IlCmWb(U2%i1g)x?lfre(alOv7Z=*bDM7)=aJTcIKEqN zkC-{l%FEdAY))L-@F~cV|K^LGiE1WJKdDxP#diV(ZoUYf|M8Ei}^l!11O9o;N*`3%ILIf=sL2=UeqB&y;xi zIs0=?_et7aPcH1Loj=lI-gih1L%sqyfhICJ)i>Ni*%&zzIj z3*Tsl0$FCALyHgf6FcXP5mdEv%E;#7UBzcH>sGa<$pB0pYtFO|DU_Yvbh)+fk(WDX z|JCel2lTek1+8WiGvxH5xE#}EhdHewU_X~ujN(EW>rYqCJ2W-R*7#8&S6ukEy$OO_Nk;?lE5I|%m^85L`@AK?0v21(t`YBP($6`|2s8p=9M@F+kjMj?C z7|6fV6wMkf*EgPj^X!?CBc5{0ig487BeYS`b2ps?R{MtAS}}1xD^-dL@p%oprAq;q zvrJK7Efn;VM^8oXl$%kvf$VE%CrvjRy3yigL+pEEt7#6Wq1 zH($}S+w22#ttmDMRfMTZsqGtJX`OM{$#6OW!|AZLQgb=Jp>gd_?e$Rvef!n40Bmo% z*MDN34en$fm~Fs<#&E??Dqq?9xLP2Wk|OtEgtdNH8qr~&bav(zC9%j9X@%$X&7?G( zZRLj(&vW2!rA^KA2$hAV=59!{6= zA`w(8ia_6+X9t>6bF{jEpFq_Rg8Cd?f?jRKgyec_oa5)$dN~0kzPmrC{$`J4G>U40 zwfvdn;*#?J`mgmHp<$!36l3OhYas=geIk=jBBA4W+izETi0&BL&fqFzyK@2je;fR$v*wRhwiKCRLU0){IS$!1=9DsE#j>kix=htht{Yddmy zqWP(^PQ;GSZ?l5J zSVIMm$u5#O2|mnCRigFtaHrcw-5_a8l$T5;1>98>hy`Ren|$6jcCrtN9w+kr=gHgQ zrO113>k&;Q#+p&Df)=-8QL{+M>H|`-;Se+C6`faAm6zR$Gt;|k;}sT@mJ{C@ow_%w zG$_-Yb&IH)liK2Lk1SNqMSh!`t~n;*@s%x;W*}IT4o%8lr71@ZK>W%2n9k6m5*L&O zyU0d6byl!N^6hB}e%a8Rv2ui!73>?TUfi6$#?;H}BAIy&@Jj-XMKN0OvVvb8V{|&d zT-rVLR+PglV(q0@7AiUzvGBPwcZ;nDJpZ_f9MbzvC@b8CP`D*UO}o+^Ay$T{I5NEB zWxKo3+PN&>P=dJ=H`E;ONsw7T+zImi(nbEjl_AmLNsks5-du=J$@|m*ap-S!#?E46 z_7<%&XVkjFSwS^Z&GnQkKzxMdAQT=a) zJgKvDc2ZFNFU55_8x7`vc2+21A*-tkYhEzY5!pWXp{eqx*`(^Ls7=oP6}XzR_Q#m{ zRJXd)(1T!+E(T>8iB#Za!Lu`^eDx&hn`IGIHdpJd3KyC;cU$D5`}1guUm2e>aJ`-O zn5$TxG@_dF@xa=a7EA^qRH0o=vPLCOKOs+svpiVU9%`>4S1bBbj=nPX1=5oH%G3)76}okEYev{OP2ZmB6r^uL_=vo0g~7z6 zILdxlUTRt2Ouc!vj;J>JU~vxchP zp?G%ol6^GOMRW-f(8R@gskZ!QU*(6Xpu}cC{*4+6IXlCjeXqj{QNjG6<4XXKK{RxhLS{=fU;j_8Hl@shR!pU*y%2d^tc=+@fbYF~#z$<8>Y0`!wQ}ifjZ>?c+vC^^-7G5fwNI}um*#Mbp?dOr1J=Pol9&pO{^N0J= zAH03NC_FQFwy(Jtial)?Ja%GgPPx#|U0dlHuEk{gwh7!oBN=|L3pyzB`50^R)+LV;zDsXl)=UTZ5_Ln!6wvq&mkI^81 zgY9X3Xtmx23O10?kBcBpars4&>=|Opv^`FiQ(FQvEa_$F{8T_?lscqL!Ai4;q8c{| zy)$m2bTrM30Gc-^r50U-?sT(2d4O-J%b{GOtr?V3z7PO1{hSI(npDyiJo zUH$`g4-|fO1;kReQBmwmnx^o_1`*r_Cvhr~0MLwkiYT zsyp_*+&GpA=Yo!0Uu6nQlIH8>GApO;IV7DPfSo~n<$TippDsm|Mn-fKd-JefyrdL!|umG@k+{=yV*YhicL|YSoe$Y z>NFveAt;bSHQe#M_0sZq)0BLurj;f*;*&h^sFkA@RS7jGyDIRnS8M@yVO7@oox)QA zOhLORV{j^7MPx;ILhF}9=d|rDO=Q<;*^royV=5vxJ_9(e)t~VZcxOQ+i)k`qS;(3Z zJPd0_aeq-)x^QF>_KrnSg}d3f5ZN5xQ7B9CsKn9FkmW|RIu)%e6$=Y@ALtSg<`rIU z@$nP*9I}qnV;;~2c|o(u0xPwe-M5bvl71T@+gDLeg)exI~TXWD}DZwE%-MxP!ao`pEziwZ8r3dc zpX2!VcSOjh^4D_MB&p>iri|j3^@M7y5&ZSAHt4Wx^^Jo@aDd%JaL_(pZK=LG-U!Mz z3R_#_xd=!?Za85Hq z=x7WhfN*j#Bffkhukf z`;uN^7dbCIZ4CE_p`~EYk|Wp@(~R%5SL$HEzz{MRR{0nwdYUCz>9l>9BCA+7tQ3|s zC6$+i4(bE7&n|x?sx~*$5=*l8Aw^uL%8EA!=OrE121>AFS=99Y)*-0sSx2#A&Udw9 z0bLIP!+ED3;xdH7-e~$BA#$tCPp2&Xi{WSqnd}dVt!wkp2tWhEj`dNM*0tr6L2-ev z-9N7{LdxLxk1?mNz!CNT^ByM1AAQtAYxvmG7DhbgFFlSYXTKGjN$VJ*nA? zQNzPNaF2z+pEdH>0BIae)Zi$55A(@O!!Kpc`wiREV033ngde9u8X%Wk=YG=9@!zzL zJu~-@M7iI_Yu9VypxN{OQm}K$+RY5+p9-lEO5VMp&9*A$L?ZSc)i|J3AH9|kt(9p6 zy|Jo%%JdhCeCbG?RuzWww5`^+*u^m#3)ks^NcAg6?u=G+RIpH#$et5^o7N^Rd8nK! z7puBCk+o2S;{B{aQiG*bDR*`Q#|aKpne${!_fRk3!z*R{*+w^SG8B{D(H9$m5}-Ji zlX;}n9C8Gqo}++tF-t~gI$*^r{pO4{YNq#S-TF{ldlVS-?JJqmS5*>399)xi2-VDS zXTuqvC+Pt&)YgFswX~rf#H@*Ogb9vQryw#rSDO$aE)<)??OnpWP~~D8!oFW?)@y@` z58@UaCq)UtDS#mnz%_#w0fg0$ip`?eNF;{VBVa@>su40-&N1BO5lo_nIYFn3(1OFL z-4$6p3#Xga0RdkGBoU>jCN$ok&4($m37ZpY8?kKJwK202t zYN7lm;e)Mzl9dN9TOJQ|h5s^91AliinEByWkGl7&wVb@)6j>|1VDR~etz9KC!M)zE zoNb@vcK+`Bff9N9$g{YEsJ@%d(z}R2%ejmxR_xICn&8WB;yM#rwMqke9i+S4<+ROS zN9~#-y;p*VSF^>nWcw}O(I-Qddvti!_nr()tg@8)=f5?bXU;^AuB%_UazA+7vhcO7 zf7Vi~|CJk>{?)NF-(8nPj!WQ^LFj+;@8vCz-{!ZXA1^Z0r*E%GKHUEx7RFpp73QZHwA4~Rq?7aM9_eoqdh5TsELgJjM zbM?sefkXGpSS`y5C}-KLSiKHJT6W*+r%Q_H#!Otc1RVtB^5NQxXT;D}P!&FJ>|;r- zLLHy}p86wnyEZGBa49(yC5e!sg+|XyH;<;^`#YumXn83mPSVB^?=}Eo3%f9;7W|pno zt|$a(pX4UjIiuX>vqwTPo)FEPj)5KV6-B#wEX`YJTfYRK6we#ZC9`v?U+=19*i@o2 zCoZ0wS*B&BJL#9m++^{fE~Z9&Sj1e-5?WbPj3v0Gka*sFv$fu68L4%c9lkZCHw&89 zerqWkbFEkFfdvBL*IW9xOpW7?>A)Tp%)a7kX|^D+%l^~6~cQV1m%RI z^yVBfiRpW7WV13MeY%jSoD?vrITk|>36m!Pf`@v{jukoE*f8F5?A2ASOa7BTuwm`P z{+9^o$A>ni|7AaG_JIarJGFi@z$xD}Ahy&Hv?Wx$)?Pcd8cv(_%D>jtA>u_6H{rEj zKkUn0>A|ZSrN|}UfR{qZY23(b2I9_KgHCuN2GrG;69(qbWkiLqJ5D2x?#rrlHzCMAtcd`+Vm<9jsKOv^EVc z>q44(^I|uD$)!=J9v8(|Wa{Vtt0c0_`LtgEz_8z`vZcjkP0(zSq$20DR$E0_l!+P& zbXsC<987{myCV6T3^nwW(Ai>*=2qSi$liKu7qE0sBii~?q*;-xKRDe#5nhIFNW@Ny zf3drTn3p7Urt!Yy^nO@{Tc`euNW>}Dgs2dv=HIg6Cze}(+Kc8+!qZ$sik?pE@nqJa z&l{-t@rFaP^u=AmIQlr8Kb;!YbHZ(ED&4^7z}t1vP(Q&>o_KH>udEll?Vjkcs*-A( z+cJhBb5>OO_E5P&i`}Owt-&H0VWY?Jf)aqwr%1FF4Zfjt-m;mMxYA#!>5^lbKtu5p zsjzT@y$j4g@V)w_s_wEUr6MjLNM6F4sRhqa1(`TywrX~?5X+PFxtz&8EeT5T-g>3x z>X>R==C=42Pd(h?znNx3WHw6-(^Ni}mn2(9A7@!Uk%{)zO-g$-pPeS~ zJzi9gQ>t0db3J|hFD36>Ay(DPpjfX!y-8hH`1c(PgYBrCUq%hm!v9&#x_Mdb)uniG z7^VWWt%j6ly@$Kpiqww`=4(k=isR4S6SXD`^go=GG*eBYhqwRX;=5{8C?IS?0@PlT zCTG5OHp_iN_8shvwZ$Pd2*Z~0ry54{^}*jaK%vxO6R9I<%&jaQp}>V}pvp9rR^cd= zJZ_HFBwCPx^bxN-EPy&H+oN>;GP)FXA%S$h`O*UUb!GBRY&b5n&-HDOq2Dq8pZ>5Nd+#MRmv>q zxV3KA4ZYGL{%NT*w91AGgVAPk7|BNGhPEUHs*ENu;f>{~mr;i@Db^q8_$`&A?}js_ z$!5L<^x76bCrMI#sjNG!xQ!K{vMDHVpembE%6iqz%RHTf3bJxD{}4T^%CN8^wOj0* zHV3HO$!TkN@hPp7*2WtHp}4A2sVrqrkbp`gXwCE%{d)6I*-#=QqNRp+h|vQUP*EC0 zPp3^L!8=-O$bbMfs}`vy#K~Bs>;L?i^XbpxbsSMB8!w-z5BiwtSi?JpApW8+-dqTbBpR1^y6D$mAmBN9UEN)r~J*E_@RT-a`&u(j78=B;OMf$k#^1WktFMj+i!BDsl{V)shOME0wz-Vbb9*4F}+c zRDq7v5evDUkv?4(iN+EQEg-3RyZ1`UpIAPTu6h}VD*_~R{Sv2X>Rwfy0J<$7cCGrw zOS53f5h<|``cnmrf2t@=JOjOsYQ$|EpXzm`RW!j5c zMaVNqpI^6loHk;ECdwtud%o?%K`uidKFMR~(;i84#`*>1v`o*=6o zWh2weF3FFKH9^%Ob^EVJlV<@ON@eScl(ly3=$(_8b`%Yd^-Ua@5l)D8OH<3piI_%v z!t2iAP@mw21OPg)8rH8IAU(T22v&{xqiLa`EzkM&kb++o5FW^ISfI$d&0^+i0?>pk zX~~VU3%@bUcO~{cuvzcFN@ovgfRe&Q7wMhqh383&AFDXJ&F=A%Z5ThCFf#qVTBocv z`vX<6B5-pib>edCv};va^{r}26>V+u-RdnzgX~a{AjQV(V4Y5G#pJ3{`)KdWa**8W z_Or{)wRVATBDi{@O$=QJ5sL!pnVg#U!J>p#f|QFGaCN~fUsDEiqJdmIL^QTX1`zvV zro~(gL)9*}l0rlJ5iW*rZRwBwroFjSMcsZ>X21fL2CDWV$;ag>iSXNrPBkZo)h>9I z>ek7Hv_;kYBB`Yp{6Op}_?bb==gQ^>0wP+bMuk;*vEMGBP-Y|r5KeouCS$wP-aO9K zqXW9ox7kiwr;U`jc5Ztx>X-tvPe(wZAimveg*MCQR@63%yE0mM2sT5_$tl`ET0C%{ zSvwS-Ea4%p<8lTH+P~)3Qt=mRS`~%Ma~fY=l36c}$4Xb-o=q?(8xXLq4{-a|t6+sW zY3)Mv#dLUH)=+OO_3(iauD8hnTvL>cAkGWQ(x+-ps!dO74esB(yS=-gABu*NVS=+M zbc=N*B7+{F7(A~V5ma+qqvGEE)7*koSB7)bnBJu|8bh_cfKN^hOh7k(^txK~X+~@+ z%RX7^z8$NiE~M&6q0d)dpCR^}(K&m6YRwsk@>lfT_n$l4c9M)nB%z5LxGx8Q)=_X( zYJ!Y8pV7#I4XQ(Y)+Xk;h!%FLC+)ers(IT(Jsmmd*ECEMF=^G3m$s5;zzX&xt#sjV z8EZLP-u7U%5=M{2g8-u+fK)Zw$@O3~YUaNIDv}P^vt7|9wD>u8U*EiGBC{9Arp7jU z@aaw85mCgnbIl__VNF&hxb8_FP9}(SCT;8KXJ;4My-nZfWU>} zvY#!H-NP~XgVc|!tl@f;9!rfgnv1V5_9o-}1ARSROE6ye=WK8Nl zHf9l(OBWO}rZU{O*Qm`7xVsoZIY6vhje zW${Rm7!FN~bt$sFhvj$!|BgXqywnwBHAcG|D$(JKdPkt=3xE@B4D5BxsK52>okr|Y zUbgu<^R)Nc4C2G-!HlBJzF}OYGu8g|pwNqy_D6`+C@O?yt$89JBd0YA(U>$kPyBWo zrR3~I^Hi_W(ax5%Nr%UG_ zf2PYsR_w#rgHO`_%9YoyPtiVuOo?;*qsM<0?XYm24AB7t2PHSt=kB@73k>{S>lC`R z_{jg|k6kKW*{WBBn!T8vDS_uV)hPvB?jLx_Splvxw|Ll_kxCrdqdAA?=+P_2xe?)V zKfr+t;uOiiLc#-T=2G_Q5nkmUupjnI5?hNz&cMo@PClb;aW7nQD)jZ#gs7+Tj28dO zS`v#xwxp&9zQ*yf!lC>lLhVFV`Yp}F3?#Ys4V`}~5ctGO==(~wKDt@wOD=aX^_sY3 zD1~c`vk&|6Ki9MMsbO7td(r1`Q~cCPJT|Bf_8N|n2WMwyoAfohi^C_GXb$P&@6LIZ znf<4^)&)C0p(|&b0)^aM=xHqnr{lj-i?XCg`1A8`Z-uuGe=s$Fl}*xk501Z$>5fWs zX^HZ%C_hX2J*Ee_pVUV>4x6vl0n+}y%Ah*ePUt}ERxcMD4KU{g308ETP@^M}+FRh* z+e;Bo0g$$$ccvY{SL5f7WT~V`X=LASGkWzXK1TU+r-`y83no_VIn>u)V4o=jOR0UC z5s?B(TgsaB`NLC1Rq>5sRQNpZkJi5obES8+$JgBxsl>G@&1FqGaWuvSm|nWn`;F)? zUVL*;w|}Qz4A|PMQHfVtnaArnH*9>K!KlJ6@~80-B|BMYHK5LE%7R@2`sa4R1VWd+ zH!E*d7kt<1VwD>Uh?bs_NDg4*>&UayZLy(9$*$pg_R5VY`!BxBuD5Vl z4GC>7seF-MXn)aLM&G{!2QI=)`iV;UZwmKj4d(^(I2F8qTCVg*-Y6v+t(@DY7v7IG zs7cdm@|KENMzyq})VP{+OzI81Z+ihNT5{9sXqS>RB(j*QCN+)Tu~MVa1S}|jp`I1G zW=AgikjNjgO>XxjqVsGH*qOc@TR>OKXM#aCr5*BUqeHn=2K1Pns!Eh0uOGURa_T&d1pc9RmF1Ul)Bp38P5o()32ZozU@K! zdxZrubzeT2oUa_KFq)vNJIg?}H~M~4$I#NSsNb;48>|c3To8;o8Fh4+dZOh2w!3{l zuT%TQWhmG`p6bHwJOIx3@bt99Q7u8r_vNyBp0?!cUCkOBR!61s%)tK&s6V*j<3vfn z>0#hNhWD;)w@>Fws@iW{7Aj5GD4wSLOSJ(h&n^U)*LkF|&-zr63#7*b6TRbU)liNM z2Dcj*B4_B?;-*j>yM`NQ3+*iSJGIPp@22AxKg@*QD4c4bFM^#_llAz|PKa4zyCeT> z?07n2Zp`cLorJD3R8^k1o&Nw7D0fnzIFJbb__YrfTUNyCsS?3Pu3u=y)xDqMV3bb< z$A-lhJ04rD$QVbvcw@w#uAnQiSHtd}_tcTN@SzYJRbGmSt<~53>_C|KTp{aF?&5Tw zL_pCvN*L8ct8x2xB7p~6;w`?vP@<#mg^Y7JS+91ppDS?aW9 zG8N4H;~NLkREI_5J8nsBqBmIXXiN=}MQIp4>3y5PtzU(Aq0YG$)rtCKXF|tx)+<6uJHw_}K;j4{CnHMN9< z(^o7mq>pNutY_QiLo0wRi34Zz`o!=`X7BdqYhZjdT^Sd!qfzJI zvl47ot9wwU99m~{bWRE^y`-QQml2Vfd0F^C^D(x^_i<;wDW3!+$K&7DP15T>W!}Hy zdTh_0g$5V?1 z#;n`0_p3ITvns~O%xv9}*YuUvbLHj0iv%|o380tp+Qv^W7WCef-tl}q;@!JqtZL8v zmU)Ky`1Q@V`C$IhkH3~rR*CJTeo%O7DL8CRrmMmr&rHGw_<>#MG#RH}$B3?OQbz(> zk5Wa-_|Pi`3G~o8EA|!?T!hZ}lAD>H@~|&wL4wwV7g}Qyvk5l6S%Hhkk)RRN~@786>uBmG+DNS(-Gu zCiG(I5T@t<_?HXO`g1HGc)cE_1qXZl*S15VSke{gG~27e7c2YT!%tU>>~W%8KO^QW z;(cH>={hxV86>XAAs2r*q8t3$bKkZdu9y9v^LFpUS9t4h+LN~-C~jnbbbo$26W)Gn zeYmr3O#7R9j~i1HtyItERWk7g{etI3wqN=TAtLjSz9WBQV@_0hT7`cLcs-{xxw-9g z=zi2`3p1UcSX?KV8-M7I)2&cCcshbQfv%}i6f{6?anf@^xQdd#O_jEv#^#>p5aS5l zx78)p61F$Vr3-K?Yl^kc$+V7lD7b;Pp}1LPe+B&ca;AvkKWV9xeISL~38`<&(r0qTD#!-G!Etw(gr{ zJ&Y7J*jTD3IlIc2@Jm8FO>P%p_XsBMDn~k_HBZPs-DY}?(Ywu+6G)(g&5X=v_dAie zYz(P>;A~hO`@gU1h`B%e!EmX5fl1lST}QO|yg`U_(5EHrmkSHO)oR|K1*H9+c({c@ zMC;#OoPxZ>MTV^pUs3&3G`%a9l`J><&)fd7OjsfPeG`=(TG*Ll$^P|wUdO*ZG}a{H z5I)j=<5m~H(zU%*^rTUs&Tgr-wgljMJEcIBRi#)NV5sn$%0htWZ=!HoHr8bn1_pQ` zYQY}qy2kizZ#&y`pGX8tg2wJRo#`|g_^2cL>RsE#t<|3tt(J86OhaXhM>1xwsVm9@ zaQ^0l-_vhjM`}FxSw=J3Ott0`qP6c$bB$d1yb;w&mi3pu)c}uA z4W+(CiCsSs$zAzHhg>SJ(k`)_DVCNivfZI9+F~0J0(30>)7IJc^mYz!?y7Y`UO@n8 zv%6MJuZ6zZlw0A_&f_leF!x*AJD=h9Z0u8&p7C|4H82lZNB>y zD2UfobwzWjXskf9R9}cflAQ|&*Q&QDFk-4KKHw2g!U}p~j&Zk?(F&Ah0m=KVT7Ob(Ja2=? zN+Ya7YXAog6=EIURP_PF!om<$5ta6ulANz8sC%$iY5cUP=ms$zLYS;jEalQ0Bd_Yz#y!^ z(tSjOgn5U!sgcO3ZsIDd00ersF=J)BQo)TPkI1|N6mC*i?18AJ-5V1KbrOeIPyrW( zPFcQ_Dt05~*dzGPLtNiY29B=IOsL$i|YAq&@0Y0wY`Ctmz`NU zU3GLtNKrw=wy2wfvC1PD+$y4g5J_qIS2Y71l^%`6WfVduvU!_?6MgMa+XboKuoenW zrD2W~HI6~Sq&P&=pdf1P(-$nw+)@qp-GTc3F8G zu0cIC1ax3jrL?S-3^?2pRU18|T2D{|gR}B9vQM)TrPBs^sBTXn;pRJ}s46@M(CmK0 zZIwiJibV=iT6z>GwL8o51Uxh=hHOSBhoO#%rt||%mlpIh=}t)uHsZ>h+9U#1<-NLt z%1sZ9nPvNx06ZHnA8Px#QG&FkLY(P3N4(Rnl>cUSQZHnn6Dl!@?i}@a?0E8X-)zs& z_oXhQRFX+6uDhN*B)raVITX^0#j1@Pjju%}Mby1Nx7K+Qd%|vsGk7i~r^%Wi(c0N04`(WM+V|Hf|DCLDCXg z(_31%Tzj2n8M#|`5$8n5HqP@NZo}A_3`<7kFJ8UPywMx12k!fINbmKBfKVIV5fn7q zd4QmBgC#!pdddA5-E^h-S|C4V=q9Nz>o-XyOEZq&Q%ZJUh=zk-Y+F|1zl&EJC*f>J$zn1$*%6)w^f2#d*!XkRXd=F^wa1XoE z=&lh0W)|M4B`az_K~`;yFhRG3#Q)L%vmlnDZ*4Fib>Hq8KJKO_g*exu>#P0Y{~P@N z%}>gQ?HXTx$w#?TBG*7+BMmo_YerUGB52^7@0ste-YL zDSJxf_@>@J6#jl#hh|dIX*zk5Qb82SS>@yjiaI6A4K%*VCMh&;zkDNs3kyL#M-P(Z zL|3p3$|yxQ=Y;S&Epv#PzSm^qf`; zg~p|e*#jh*5AkYIw`(KJ#uk>BKONFxpT4&%Im&3Y4vJw3ST`7hhK%AuhxSXf5~xC$ z+kso}waa@s6{fLPqS@JUkHPkPFt7R7KI`ls?n@Mf2IQ2 z@n%zP+sw&Yc$(ocPqEOc+>i>dzqMB1)=@|Lv{P^Km+{B)ddS$;meTEf#h|X$NnQ!g zdR1l1MXN&+3>Zr9Y#UsxTw)Se3;h;+fWTpu^SFLsD7IU>H=TIt;o%W;A~Vhy6`vk7 z_AnO9eZ|P_@7r^Sm1hFI(de^Nku?@b-#v|EhEU6p0XkdbR#qT~c73a0nYd<|6aH`g z-^k-NJ>0-Rt<0LzADFr_L`gSHXN>xh@*+eJF}cQwoR>3%uAvR!{47`~nyC_4)OX5F z`$J0AUPkK0j%HWI14CD!=mE-w{;K>8?ms9LU9k>5)EGj*8Cti?14k;9i3BYoIuG=I zh-%GwQKJLLRF(KJif~HUa6lSCH>O8L?hC@I)Y-%_nP%8Ps{>!r*R}-8Se2#iISZyg zWNz;((5Nic$MZt8&Bf;X)UgsMK!OvoBc*s8)MX|}hA=-B?d1b5DWQazPERgyXKl)! zT;9sm&YgzsBz)sscpu;geWRM_x?FB5fPAVZ~(rj|M2JGo47&M>z4UUT8qy z+bEYKDj|Oty@hZ5v@q0qtjjCs6xzO!QLgqtI^C*w(~z%W&+GkK5ql8v7LOgrg8d;) zDU@4<++=Ge5~@4e;_CM+HRlK`@2dxA;-kVPFM$j{Yno=nIb|9y-cgKxjK4K zI_RsbWZ#oz>7BwUX&4}sXw}5`w335km!$S(MIfA*EJqrLl-Sz_hy^Q4cJBq&CB+^c z^k{@n`tSOX_tuFeq>V?-X(K3AH-6O#*{cBx5smPP1K*h>f1{|jdDAfK&zf4WlRSW- z=?kBp?WXmL9@S9rH&9Cy;xT=9gbZ}3eRT1`nYTSHO$3lq zXQ99=Moz6An9oO*#+)AhB$Ir@u#@92CAAi8vrIStq#f%u)Kd`7rmU?g?AldcMQ-#OPW`{mke1~Ht^8D7BpNarkI-bN)=c09y#*rJ?Hzc zj0^f4fWl`+etz$?Pwtx_5m&zWK-W?3~MbHWRwUl4#|h-utY0 zF#K0Qbp1M4y!9WJs%iQ1ke_H%0*=S4r z>U~4+dNtG2wvP3Z>ujJot-Rj;ZX;A)ewnlV;_U7X!^BfIBUPu?bg`I?_m++XUsX47 z;pIZ~8{n2AEva5C^v+au_0!S-#SD|bE$KM3cT-HZ_m%Xp%y#IM+SDP-=&V3Fk z#sSSkOtEF3{YZg7(Dy)9F0XWpGwhBf;m3x{2|4fWjw`+Ldblaca17TZm|7hYc6cZY zm|Zr))UXKf=^>lC*ER@#Opi5{OrNQTor*#r$i{yxicZ zG8L`C=p9!lv?=?%{dw!I_YXyVam_FqK`TqXM(eF}e8M2)DK*V9mu6~KzIj^dJ4Z!= zY{(&#kW2xvDLa;F0whEF*1p<$t~zG~jNeqftz;&6tB_&xlikOYpsxnp6ScfTA0Y}3 zA_=fq9xC%SXa6Y`3WqZ?%z*i~y)U445#T7P=5cZC{sEr1eJAl@gmmNd%{#u~#bsTk zd%pX_`L5O-=`X%7WGAo0G|z%*+@$qVUc%6yaad$k)0VPeX9>AhFNbQ9PD$tt%=l!A zS-K+|`bRx^dCAsQlPw8@jT!Tn#wQsvI%!hRPY;==CS+X0c*|p*&(50_CcdmGgT<{E zM6&<%)b>Yi<<=D<*|BoiV2XuP$!H4i?i3W7uBR<2sB60YicKd|rdt|ibbPHH6`$7cQGM(4R;A!T4)+3izFE`d_MEIET( zoA!{WWnsLWR8+nOEt6k1X0cI;Yfz+B`{rx(9+ z?X>TolM*YO#K$Yr(9gRa4XS<9tj@w;R`NkuYgp(shXKx~GMHMj>RVe{qqZ>uxFnR8 z&~*&*ROX|_@i385JG29b%tA)`3KxaxyBMKtc@Wysh-sXoc8_3oIMZf23+{9GcF*BggjZQ zv#xc!=c=sQZI61<6&pb>S(a~%7;<{`6UWWkMcyLJR5(?e6C`=J&2+787zr9h3JMx% zDt2=rK5#g)1{$fb;~wdXu9%!4S;D5DZKV6Wq(Xo!)og*gYAKSw=!nryp*;;8C&6uJ z+}37)a5Nk^`OR)`dtnxA;GVMRFd^4uokfo!?q<8xUvhac-(dg@ktrmYP91J;_atbo z*`$;SkQX*gY`>}U3o?O18NDNtC8R3|l>&<08qAIH(u5K*!lKE**cc2Hx{3i=lv$NP z$&lWlx1mdhuZ1?(b{NtO7gfp+V`=`lVC}zmoq>WMHAX#t$11v z9%6I`?3*HLf5$3}z>=UCZ5JXU@$s{=;rrR*=l8OEiZ4^c361!XOk!>{Y8Mv1Uz}=) z|87=)i61uN2S2;C`Sm}43u&<%)EHi>5C(hv;D4yU`?tJEXG$O4_4X)1@YXson&IrZ zCf|Z+7Bzlu*|c_!vDs$&TRtY+9k)Qut7R?O>E(FykuYQIlwk?iK4V&vaZ?u&s7O_K z+0lsTnBE8-9w#M3KNd2}F14A)KJ@^93>}P4@>*Y7Gynfn^j^V|n`fFP5{XP84bppW zj`yZcRh{xIlA=gaqFPc*ZD&KhE@n1jcW)+QV)vqh6Vct%6SLFP)0$DsJE|qINEXRr z70Xv0SI4{b-g}EQNB{{Sv6$;~cQWxO{_lI==Lwx2TGn}?pI-9tJ93*N*&vD!<^tIL zf8}5;%^0lz`4_wH1+KWJ{>n#%?3sUt@7N`*j7r8Yc;w=bh#TlYI6VvRN?J)T>N?eV zRkD+{!w@@6m%L{yiNP)Sp}k)*`j3KOc^8w&t`D@{JooP<2j_A9G|x{4LSF!_L#r_q zWWz;~8uC0{oXRa=MgkqqjE!_vV<#(v{*!ck#J^BYPu37W^zNRZ-TW&9n-X1>4s->6 zO|s5RK2X5%X=5#T)=bG|#7(uzMXaHDrG{NlyYP-z|7@fcOus9+pL-DCIug}UZ`JNS3ml)^zej!#1C^6g zy|?PkkIwht+9LA^%APkriPpcnhxE$;Vv(hruk8fq{I@9MY*$6lrnUj@82e|H-`Qk; z229X{I_SBXT2iCKNLa@w7YpI8ppA9Dm64r)QC3FV#G}4mg}r?}Ld-1Kn*HGj!X(y7 z@@7LVEJ>$imX@#Hfj&Dt11>4W!+oxU3uB#}^t}dE{La1V`>rb^X)g^uD${Pd$2$@%3vW72IoJoS4M(GnfV%fmVBA?3;p%0 z`V|W`U*jbL$M;*7Wa8*EHtB~e! zJdF!gpriXwk_;*5Wh7m_0HQrZoUf3h0u?ABajo1loNxBHazQ(Kpb=IvL8i~wJ1D1c zWfYP2bWXX#!u|!X+{OHJoVh*{SR{&b5fa9!_D3fwP}K%AJoNkGb?Y4gw@#K>okl`#3>urtDX*ru~j)^M2-u@ytrh!_$m^m(R(BjRjVV5yYIjCrmHnySW; zlg^1aEydp)7%3C#i*VM)#qCI#x(4yub#%btjb*q;39-uwHdP&zmB<+ED+&UAhqP+s z&5gzQkYH0w2QB&;*+|9i4w7TKe6k-SdgiXKmNn@rI6D4h_b9H&r^xBM13VRPu} zrUqP^kv!red=T9Usp4sw_mRClfQ{s;jxH>C6{;%2ORn&p!^7Ml*NBLFf&%E-ye1{a zw>g}+uBVlm@I^&h0b);+aK&|V8>tw1!uj_P3gZou=Mf@ZQ6UVd*FGc ztuNuN7c?7U4l0--AM=v$BUGoP62zTf@{H{}I?hlhpG3V+g7sw4)BA&G5a{nVzu>Ql z;=lp9b*H;fn-Y=8RG}?0uTQ>GOsV`}?7a#Oq0(*LwwCL<~XvRl`r@Kt(?HyGP$>4mWnVJ=yqp_`sfO zRu1Fn*YHyNqYgGCRn(WKvj+d^3n-}XjtaDP$h7U-Gha;uz*+80_GIo>B{+&wkI6I& ziVd_-JwLf(8rWQ<-GH^_?uhxiAZ8%ATcCS!ET}5&Q7pG8^^GBryC8&ZZxVH=)j*q$ zfaK0NC7a$ipDXg|mq+HtQ+Vyn;zGsQm!Mc&U8yvdUPP)IJLW{o^AL>6=1#4sJ zmqp|GHtO7v3&;<5hf2p0L(4SDezPU5*u;b7-wx9k&77z6e9}}dCXaa?s2XiM^}`{A zqf)zafO4r0@zk|@uAWUd!6W3kqTKvNE?Ipn+ShR1e$RYHE6TE;K0oVyt?2waE_?O= z5Xsb(cuM)sS^j4RbADo|OdUYKjh#wum5yK-Ut+?&Muizpj?qou2eu_!GY!NVX?3Zv zkutiZJGzfWSE`sZR<{^6WfPKJ*uD?>W^p?)?l>vAA68EtLQd>+_)55Uv zk&HB#0Y8?_I0@5TM^3A_HNVR?33{{>t!bs5_sb5+mb1KEE&;Ry5hZO8{?cg+d2xbqv}JpHap9Pb(){$nr^Z7N6e=6xD}cB=eMLBtzPd zVmiV)YPRbVX<={{veev-B$WPrxLgxdc+w(KZX15Xd56=!1o~q5&oOaJ3ISOao1iLI zCBpE6T=SJP-lIO-Z`R6|XEk2EY)plXhUi&p^T7stjCjJHmvgy?G<(XdHW7>@&{5=jU)zkOL+;?%9AQe5`iYWr`(8K z%G-?!48tS>5Rgw;dx3#W*_@gf1!c>Y?SN$E-B=nz%erCNtZB7%qh_w%#?k)m%-2hQ z*)!0PCA?XfM5%krKg>sp1Q3esj~9fqUC=S9u;|#dOxQFd3^CkBQN3H;5^GRBQJ-%} zOv)(>uBdvPFEYhN7|`=wU7!j}CWLKE)g$`!Mhb?UaUwd=I$F~jUdyQa;FRi#cImPN zYKI(|tkkGF86WBRahX9VO@d(oh}q;YH7>8~XJz?_JI=Vv)kVbD*SC;nr>r^^FfNRL z{&8dI8ef@5vzEm{{bSg_EP7HHkP$*ZvuW>o5&}e%a$^%MSs%8pjXnmmUv)FJbT03Y zYc+fsZV)QAU%lU6iuKbviIEaKQm0#v3U7EL^p!afhGc@l!~HMeGol#Mi@syXwRZi6xLy&7*WRX|ebWFI%K3n`mF0xtrx~;Zfv;X6x;0F*jPbL_X|#89c1r zr=ImXm*L+~m)T|lI^&h-WQoob^x?Q}J0GZu^G|sV{Udk5z=2Q_jJ6enu zGXJUF;uL|sHp~(Qqnv<+OiAEL^HjrxpvZ@Do2Fi%pxrroG%WQ><0`2*mc+TCh$-e6 z@2F{%S0o}LiS1)W%eDwyXRS`A$0X6%fczP}P)=W{sDmxh_Wr95%hvo6mDn*D7akw& z6pd86#jlZroiT;C!m3&+8%`0Oo*its6vY@NQ3$gmL@Mkk4faKax!vH@fZ9h5Y>pZP z#;=akrz)OB4J)SEx2TMH}|LItqE0J6~I;Jiha@fd$%j; zI{JgO0te@U*454a1uliQPB^eNpGdWZrCC|>Ox8I{H>786+tm2?@W@BHSbpc*K0Z3sO3jR9t0OADV`(BfY~KJ!77#$|{n-wYl|_El1=|4?wL zUhj$W7i-1y>pZD^a3Q~Vd_2B4IH+NddDHz>tvL`OPit48(8rb61LQGrV%zp>cakHZ znH%PBTF$|{nbA3xQ4UkhKRd{p3%}q`s$Dv|aPe(e4rg+W^0HGz0a@e3zi`%GZC1pm zKyPmEX;5ZS8XWGQVfeSEUn2x1Z@;EC68pEW8@M*;8{CZEB3%ejuVLB>wCGo_+2o;8 zt&`WxEWafW^6F%qkvtD9UNcFVQhDZ7_eT@mpNz|Vy~H;Mu8;qAPJL*vhTH!FyyhnG z&+m;05^d|V#8=u0eZ?n_e{QuOirlo}P`>GscOF*w-rTrTCIdXJFFiXpLloJj<;ILb z-JQ-u>+GBXl}#Zw>&0pX^=Dw7*xwzhP4) z%QuccCiW*29~%-PvrD|5>V2krJHLLc{Wlrz-}x3vvft$ef^*G3T;rq(Q4?z6x8-Wl zUp=hEUe#I;P8oi-Ywd^^WmUxt6~u&UZ}5KrLpx#~Hd+Z+wo(fDFBMSq!uu>A@9#KV z>VF$zkSW8il(7ps-hk|@A;sO2nok_}YtjEOfXlD-&M&7fS+@=ZSpNbZ>kQBnbyJ=- zz1x&yk&w0Uwo0n@ugm$rYvZ2u*?3{>jyz3NkT+veXuddpGNx!$qs8WZJi~>ITYcOM zE9CyOSE(x-g&g2E@(B!ZZ-~cpdYYsa0gPBIdY!{he#`a#b2H+aRJH?1b%1y4sX;a~ z(-sM7R7>VlY<@s1!Vnj554U_Ww>Bi)@L zI@~IwSSR4Yx=BQN2wabmVc zHEF(_!p|)QV@sKlhhO-MOCxnVA_65+E%CW}oZi+r@W-w0n3WAkWU|U}*R)6zeh+2P zc(9*SrSXu4Qqcv=3Yuv??I`o_cO>uXcbk(;q8Sa{bSgkk^4sTjVzN`8pX>C-3!4o>lrL9MhHvLJkU6Mo zP+A+BzM`YM;`UeI(2$PXMY7enTv+|RZh9n~vOQ8@L%HAuVa4skkb{h6H1kGp-x9zc z>l^$dE~z-8!tCX~a!R~hC@HHwKVKL;-YFi3(=(Oxux2{yX!%H{kome+=Ii@|oo!XL zA~+PEeRh7Szuz+>=GWS!Wv-GiSZhc4Gt__X4A-lJLbJsyxSyNq9-h}!;QiSZ<0zgn z9d`54Wwx$d(A0;i)0}0uy*?i^b-&CTRL%N-6Xc$bhFt$OCA0W%i5k8llI*V%!wxP# zT_F)!2+M_|w-G}^epr!yH$|i@Me>vL;HJcKcQmS0?b^<&O?ast*+Uzr z9IU6e#0Cu~j@7ECRltSEYEB<5>`~{(Q={CJj781QX+laXehGcgRxW##?;JnlACC>^ ztA_$*UHT%j^?dw6cUbstA9-(gGl9?-ylk@$Q4&uCVV`w=%vaQpnG5Yl+1~W)WmeO# zMZV2)x)NWKYGLK$)2(x|(;}ZL32^lNtH~bORrOcwoDKWBB)lQ$FWwupRsDwfJ|>yI zZ=9*cDoPQH>h|4x<7eq-Myax@AeSlZ69ShXylT;kLHaaq9k7#8Wn-1%x6O_jP4~2A zc^~MHPsSf>u1HOGF>m&s)M$&q>z%oRR&O*CX8)F{)h~~vuv)X8r^Ni6Vzm2dT#beL zkC#rOgEv);92d=cZEcnOZT+w|hm-)WS*?`w{D!Zolo@ofAGB3YwZhDQ zf4nYt9f9u*b+}HZJ(_)Uk;~DV)92qQPfh~mlT%VN8iX?fIhQF*G4_Gy56#+y$x`bY zql5ao>^Z+t2H_s^)#DQArGA}y^?hNHP+CR87kB&F$^vpP+0W`qnx;2bm=%APDL0*S z|2|e081P>oIAw~)Nd}jQ$ws2OaWP@&7ncQGTcE`Bt#e?7f_rl0h%YP8mncQ9dQk-- zSz?}u@mz&W5zwvbAZVD}uOd&)wis}h&kf+*tFnsD@mlTp)7`2;FC8eT!g-TX4Ml%% z?n?ZkVryTgc4>@iwJW*kE*b^`G04;EhrL{VjDNS%K&6;irK=m2d9UbVp(K_lAz4EB zhCE+86#SurBc~W6g_4pt+G3_+NtX!)>D1!-1x0p4x*8)-R+hk4>SR$)_b5pq>)H@Oe*&5 zPa#Q$pchTECR~l0XlbD$lnn5wvJ1A1m71=ts(ZOHR-oFYz@Cc3n8=3KLRP7cA<-n- zL4vU5D`;DF}P8Z#KocF079JkN{1smCr{|`Nw0~Y{@0La%^HuL zsI&IvQnwOl`z8IN5agbUbb=lTkFp(9LjlHS_V8p-m2|V3UZp5VC`W2K4>jcoZ-VBA z!hu9c5_I-RD5hYUj#M9x3`E}yuvRGm2o*7{C0E5roYJ@2eWwJ;>as&)tAy|*Jp;yZ z9#UZIl|%)of2l)|t~A!>8p&Li?4`$mzVBs1foMCq5|EJ#ZTfOd*l-EZKtY~VI0q7g z-0o*MkTo*HX8U)Nt1W>_0w+sAzp14WEot%dlEew8 zSUB=ag+1{pN@a_fw_?xfB$AR|8}5ZLen9O|Ns*pa*tGQ_ZGA*mYXY^F>Jm%`V?H1_b`5^h65k#YQULAst$YoF`c; zYm%mU3RI`YKpu4LAuX^(hBN`9YcNU-gc4L;B>X?S7n5pRXO&gq8QOqXLTApRTr7ALGyMdzi69d*h7@@m64WSX{Yzy{~XsO!IPlVU&#zX1k# z4wFZuC_aj~K*vjQP3rbRrMjaRPl^tNGg@4BTM>!nn+ton5b4jX3)4JxA6n4s z)0YRdO?*U7ys7%OLg=@s|8IXFd2C!;klk+2@~HP?L_r;?kJ4GuxjhE65TZf`ikHB2 zx3rw3UmCPfO{3c?-lab8@HVX$$4NZh6Wo8Ks>La0vc{Qbc-`69fedg( zd=_c^VH@f;g)CYi@bSMx8jCesxRK7t$S2b-FLRJ7#;*Y4%NIlQxe&F{0-FHpx_D6YoslTnkNH95}zWl3M zTj;KQ?s&v_6G{IglnLWENcjvJ$N)Tt`1Z`>UsR`!qR2yw21Qn8v*4L@P0t_kx)GQ z#@*jJrTEQ;O~f8svHeU-?F1sl%ln0b8BDQycQ^LKiyfm~!*e+{#Y48JgAOLAl#{aI z(Xh%?^@euf2O|8Dnk(`5X=7Jb;!o$rG7cQey-}e^{)veih^5zB+&$&>k!-2da*mr7 zMvb=X&7q4{k}AY=xv&gMkHPMhE;*vkMtbx0*?A>tZ*wwDOvF>|K6uD4`EA0SoQLWa z6dUd}Yd|MJ=ZHCDxXT{fz+(!Of%vqFNA@QmtQF@+b7tecs~E6`DpK8CT^7*Yj^&Xx zxnMp~l9o4!cR<%ym45+Kl7k+Dm739$k&a8|Fj1W{M-)@$Dpk`o4wM7`M-_xjJfV+E z=g>b5JCycGpx4s%ghvYzvy9Pps6PZH^+ti-PUOig;niThu+!uRAR@o|7i9}@a0`R3GSlSb;Snh*6>f7mH1{U(OEI$rGtwo`qBeX*~7mcvY>C zucfMngHGNkAn!7g1zF;Dkp>a(m*aRPcJS;(R5sTw*$SE$7ow+e=t7*AJW{_xfr>)g z+Q9yR83k>B5u8F3Rd5eJ?>T6Cc$~e{@^5wN1{V2EQv$gFN?`YrimC2TyDFRgWHsK~ zPv$=!0-DnHaVjW~bpeQ<+Z05|%irjUbnKIfid-)4DNpm;sC=McEeei! z_I36krXsLGXcwceRdTkjkV<9zSDB&-f(Nz(-3jTfCX zOs%NkVZ$FsKcJw-`soq`{JD3{nS!&5`@J+M>j{dm&Cb;=!vun8K zZH(`&i3^c#uY`l;BV;4Lec z8i#X?7VBO3@)d6nlO7^ykMv7#EU{@%G90CU=LB z@ZDbjG#H-G7vhPtuW)<+-woy8|6fPlqlx_JisBd9X-WlZKmGjPB}Q!BF*Jml%-A~G zC=y7a6g8M?Fh0JG3%Zv~@{-P0=d}@>$GUtfXzAebq$qI|wix%G)FcJEU{GXd_{902ILPNdW8&v%PRP8huWCB!Sle}DZr1E>-_Y6JJE5_o zTx_Rto`<&PK7O`RqBRp0_oAUUKemD1mPz;87mkl7{c4wczX)ihBal=3vp)*c28C?m zXN^;rYqWB^ZSf5b*NN$Y!JI^&9k$Y$mnBcHBk|qe7ECC<=4w`c^Yo4D#s~J z+OT(YX*joKOE;nzr_~k|-x%6V0gGcTP04y)Mp^P0&@pwFYwbgW!ys*Fx9POoSk+x( z$K--s$$i9Ut^LEFt1-x8ZZcxEov{BKr5 z9r2Gmt*PTTaCCOF&YmbcNa`*Nh+il!suNt_pV}kxePR?Y%5>AQP2QJ+9=zR{1HP-wPp7aCJ}j2L zUO40Vudgamf@v=n69yBnxR>-h1!qNy|2Eq!&}l(k_kyONTFOh%LMUr6^@sL@?bSe( zmwm^9n8fhH`T1PV6xzFT_w49YD;R|`R|EfS3?9ukZ`3Q3@WuB>@!N7bK&#S%L2mK( z$xD}OYeyMAyU5q+WeTe?0qZ>-{WYsF`7&oN3R2K_%(7{~a5O3Yd1$a4?>`B4So270 zSiMXnSNhG_@I{Z$1CtP6wo7#`i|A*ej3BdOor6qm)dIZtE}tFRasZ-EJ0(5EBI`_i zAH>sST##$V_*ronG!5)2%UQkZOU&fF#pzKwZV8!vCsdu6shzEQKSHn=zjdhPrLJK^ zP0L+@9@;cW82uZhrqmVJbwMbu9icaOPAW$ z`rnx<)QA+FmEu)@)y9)<%_oMbp1!aGjajEv*NKpNtFVE}WZp5_Nrt55{~nk_o|^!j;y+I2OiAg?{L#1W>Y;xg2lW=!Cj|3H`7hu4Fw<7N4&C!EC^}az z0n=&9p&Ffzq$#_J=BM-5cgd8CzZdi%74HS98aq;!#$@9XySkNN3KyNv+cRgMG7?NPl(v{*fQ)NA0~_erljy2;&`e;@Pnq zUfO-GS{GdO0hMgH3-7hXP+4&AtJiz0ks9+sq1e4Fpi+Z4o!gcJm3r?*LM6{>661!O zJn>&_mW382GTIycnlK8mh}d~B9n4UJhP?D^1)vk2W+2-~s<$+HpW?-OH zCO@CKd+689g+_x_wdDC`-A(0Za9}Ub!@B3ZzphpS+4!^LGo7NQv)3mg!hPGNf%jUpZCm53Y&If{xTz zT*7rRg>Og+hufpxgxhOJ{UFi0ygvyJ8FKyz@e=OuC2}_146MhABEHcR_SfZnu6Hip z?iQSboM36?E#WxkHN?px#tyf6&8^(57&9mX=(N1Pa^uN5qH*V$*4kJ~I7Ba0 zOKvv401I!>uJZ)d#I~o7dP8!4GM&Gmf^-tTArJnIVsi!>(k)T`{;}|Vs4u|h=p~mN z6_)8`Wlf-Z@W&0MW17Y+wT<}lDZ-ij46D4{N`d|ATHA+ivTILc8Z(g)3Xguo6R_!G zQ)FMkBIir`T|+h0A;F(Wd-54|t`Tc1p5mvR)7n+N{yR>R_bN@7r&ROE!PF8o!*d)z z8>za#0$x(=uMN4Z{Ic?)!Y4W@OFtKUmXcKLz3x9Bs$@TGAB;&K3pZ?XydO;HwVe|5 z^R+FkDo6)f?8{R3e*5VXeu4q({-7^fdH1)Q%|q&0R4RL zd`4mhLiU1bLuooO%LnRl4O`GkGKVQk$AA1R!=Xzlo5`Zki3jHLPBzbYQlFxX@a`OV7 z{>AG+3I#TTL{x50BgiDM#d6jn#=yriv6;9KBzD`I1pE8`prBrpRJ4Pvk&ah-u+75``zWvVJ%8AOw+c1u8@ z^}L?zNP(_9g=praR1g>ry1iFD2+gTtSP?OLht`ji?hYwN(V-6Ga!aYv_f{Cws`Bh*<=D(vb+{9V z#iiCsoKOj$&ZUGZ1FEZ3)|;a=YRcYMIG`JRT*z^9cglpG$mX-nV4|DuGixSgtFBLwCp=sj6S@tHqS%)wnQgYzP2S$!MICf~=-NMlQx6iyazpMc=FeN%1Tm z?Dks*%g9s`Wb0?EL*+gU$%E2td3$3*85+Z&T94(^$WCSdqF9k`ftMu0IuyVSHh7=_ zReVaPNs?{3`8mpoy^XZ6AbD*wk%D?KhKF}c1h+6R>NN)?b-AT<_2%@Sm9}p*rbclK zBruRk-jWwyq;Il8{pCRgA#A)K=3@CGq(9h|!ToV3QlC6qykKOK0O09(sQ6?z+QAxZ z%1GDnie*!AF;x}A-WggjUieCk1>xImoG-6xq!P@hfwQ%~v8r{9+)Q=2PGr(uvxE*=41rZbBMH) z-Q^YZF?Gh&ud611Z$vpnB_} zEHdo8I=GVr@9%N3CxlA~pNBF#3mTfI1A=m-&=lu%>9P{9=7nVy7rZWLpB;&p@pGEh zRud|8FtpbvTfEh_SUUy>E+TtaHL8KiD3Xy(oX6dY2H78xZ(IRW?R9}Zb7`Vrs>ZF9 zPr()43UN@XHmwM**R|G6T7B6Z9l2*j_EAzXo8=oGado|PskvifhKBn^Vzh_sclotU zx;XZb9w4{?7hCH>blB^R9w3l;;K0H$v$(hPe#x9m7#=CPZRP6$z$0u52OW=c#$<;^ zHpOE#d2mRUsQLTM=9kjjTuxjfbdR;8Dc8+0Xlik#r!46z&M&tFzXtld3guelshE;H znFl>Roi1k5GlaQrFNx|!+gX*t?npJ_3?AoOUttVI-LJB_Sm0_ywBP%gmNrNE#0!*F zAJ(2`aYEX8%Vfm}aL#~D-XGgBjbC{*>B@X-)xGV|#qprA7W@9bZjd1yMa5S0!*$<%^*df?3A)v<}6n!b<>=1`z4q-v(hu$f0m>|y}(wHv?i zb|})tR%c}rUNk2_n%}bV;3%E@W-4uiVaw_7F7XRDGoSE6+PSARep$RgZ`T|n(h)mQ zaS8RkvJ-ufvz;J}!5-ebS@b+i6+Uv3HgKYotB3P-S~8>*+=X!GO;S*v}{S?-(sQYt4W!4lT;RW z%ZyTis@DD(WJtVA!6p!nOc1BV;#H*$lL1)714vIMML8|%YY6MLqEkcmv=@Gv%THWT zNE}~e^K08HMM398z~p+$8kX^>=33_I4~s9<`syv!x%kA;6n2TTSZb8ugi%f%I9@w3 z4O>xP%>{}~%JFJf^+YwsQRj?bY^}3ippF(<~11O2K|P=#7bF>IsZVAms>e(1Fw3i zxZ7aJ0pNF@DP0dX&T>+iKTAhpv|U@u1GK$otEhs4H= zbPMbvmcC;&1(7_NS4bQ!6ev=%d1)lBVqeRio#Pr6QZ0Zhk1WDN+V}7@pPj7Ai3b%;g0zv zcls)ed}(XW(II0t$Tcc%yMAGgI!&3*z(JXb5*eflKKKTjP|)&B)GTxMoydyoPsT=U zUreLd{>=-Mq%NTX>yI`e5X~ra`9f0l%f!F#iv3`zsj2^IgdG*&c;fYe~nTdHKc|GT*4XzooXz4CY%~TT|Y;I#jr5L+m++MZgV@S@7y$wAY;5rfm~6s zf&U|nWD^H$NmR4Y`y2Z~7JA*8TJF?kNikmP|F)E#6FH!VeFG1YQig(toZNS7kmPAL zR7Uy0B)vl{y$$`ZVgjDz+UnjT&Ej&b-B8wIPwkQY-%78%CvkRa!#_5}^a^o#F22cU$&>An zKt1ZF9yc2ufnBRNEBdRd2GK7(aJVE6fK14EbEeM{#zrQWUXPl9Xc?AF|LRC|Moo=g zxULfOV-CwH^iavQwHDVZ-;@lr#wXQ#X~B)B`9k3>q%aSr=d&BU&U_~0kl*=#G`?tr zkn-zZq=8%F-FlulWs={JL<7ZFuh~bkhaynU9joAm!U*28yx<*#rfjADmtds2s*0pF z(r{k3WE~#n`9AJf;_b9NYkWgEp1Y2)#iX#dH z&qMqsVE=mg8!A}iAV+F%4i|Gd&1v5ZYF)&pN)3~G^ysqJDe7iWZ#IFHQ=23pImen~ za;x$;9PP5ALuN+*>+ zSB92n+^)v3%7tdW`kqu|lbYkcg^=!u`U*SS-!V=7fa$&SmFY_#Q0{Asqpn=1a(^MY z6ny0~{B`&3SFTC>!Qt_;(D}}Ps1wAW$uw1=Kb0I_&$B#gUmUeGS@Y`%_$Bg(mjTyi z4->sAQ>RGg&dV!;2gAM8IE>4K>Qgt-K6v=a{-?9TuA<&QAJad=_`t(T`Sf-F&c`SC z6W{*lEk1A-ydBNj()fJ?Rl)Ju{9Fxisr_eNU*UJYgrA!q3EtShs|r#qTX{2_8GYCX zEgt2=mp6scUbQO!;&v0B_*q*4);owy#`6}c&jnq+5|DJX1w zeg7q_-ELnpPHZ!LQ&cD)TR9mExNh=+ovP>JooCYRA8PNDyZ@Savs4KMd7@hXdgtqB z)8(dijG9@EJQ6geodg<^wjra(wh>pEi(lPd5+SYW@P~waMkbIhPJeQMo&B_V&R^k= zFb9jfpII2guEBE*WK9w?1egRrp<>R*35~k4`*x>mq4t#Ko>#)TvYw_dFKsDx4`S!7 zzj-<`C&!SJk;~hobeT)bc9xG{_{dB_iKgH@{`#JwbeZ>f##c8mN;dSK1BEuT%wb&>U}yNmOM zIfw@n4rB?ech@OM`*@0Ugo53Z!=6k`0OdDf}K4y9#}<-GY-XAS*2M_u^MsQa{zU z1>8!(&Ar08RMhEAsLnBxl%(tRcUIyj{$-OaK3%%wC8ezkts+XeM)HBSG6*k%d@((! z49CCT+ED2XB5!6SWr4>E2O!5WLHl{PKd((gkbCMa2n%`9nb}HRXMs2jX}&VlCH}+q z7(fULqJiT3iiUUkP0|cL+^Br#`P@S7#|Gv0>HU`tdrsNu^?4OfQ?%>oEqpUn`rLhh zx?3flurzKvxDT_j8Z&v=W=w;=hLdw=}ASjcJ221z|V0^iDrtkvdU zC8*hX%x!6mJy6WdqRaJ=sDI8?e|5-;jUFo8MfvC?6KLXI9`;DqA{dn3}3P2Jl{6Y2+eU<_gU{_vx#JYp6KZ2x#Qv55sfRun2TGb5jpXlAwY4@OW z3yY-}o|Y6Y$VjOn;&Q1gN?JptcS9M?mc78oZOUT2c-$Sn6}*_0SN7&|wViY;kpHG& z4VAV9@=4252zghh zfxBHwH^wI@fHTQV&0+6`B|@<(yKHm=JFOX-5Nmb33r%PpBEQm*XDh>Dcwks?6l7B( z{=ec~a`D)8R*;)E@txaw?oj*!8u2id#IqtvdVbQKn*9G+dJibc?fcFXsj5&3m2>Rs zoYQnq4h%z56lGe0maJqsuf69x+t1$fZ7_HIb}gTF*WOcbTFVkCiV{hYB1LjId3r)m z=iJqmbIvFf3WY*E=-%~vcs*SW{Ob?Ap28o$-}n3ZdLD&npM{c1RgzD@>u7t5tCo9^ zvI^4nw#oU>aaWzgjpxsq_G3d2hhjBPmpK+EwhZc|*%OS{8dh`&LabA&DpBL#?luZO zJ6o@|JF48!{`e;(p<}HS9bOkF?%3M$gEiixcxxE3D}d$=Ftc0QEJL0D`reH{yVVQ2QgCxsc)Sz zJY`7623GhcN~eOC2Ne5BA=4))I8t%RMcqhuG}RV16ecE|ELh~r zN+|EuQZw^}5lB_2*&!5!1Dc_xknuRUT#D?L`j`wL8KKiZ40if@nZs`n1hF5}hY{i;%86VpzAUQP{Ra#p4}en^7@1tH&2xul3E zC0?+xsr_;8uIyJEFTKZeCU|OQUcRqo_W=4ac6z}M;5$=Tx|7zqRAINEE^VWjE2!K` z-41J=Uuhal30v`Obj*CtX+~QDlT1GL9=?>`mNoy{9#@+TDX|o|j_c>b`<8`t#*SLEO_bhcv$KlR|HO(HG;g zD^qiQ^p2PrAJ_Fm_eTe^?0AzI7^&|)XnJWJzaa2glaWyoHq5gVR=fC{x^C(wpFA!Z z(Xt;%Ic&AoNW_0m^$U1&S^jIDGHI*H*pCt(v(Dc%>ciWZRHZlaCS?VHoS<0Lk6eVQ zmH*Nr*A42?2@kQ_o^FNxWWk2W0>SZPHS1b{8i>(VxWV+0DK)e&IZHm4)nGy5PqV6z zUN~ zbrkD6`ArXR>*eD7H>IgE(Ga4CUJ4~!X`;f_x6v-e4* z(%*e6(qL4h^1Xw6>hejd2>P)GX#cD+iYzZ$=-gU=4vso&S z=0m8tKWLI8byvn#rR0E%T?fo^It{COP^kG>M(Snbi}R7xCR2jp+rtTnMg5wVy85tO zQSv1->yEOqDK{Z48ap~{&2{23!A2h%>7O*W#JWuCcQ{+C9h8sD7)Um!(41EGYQ3iT zzx356exxXomWnoFB)CS7mMtA(s8A8m_Oy+0z{+=QkBuF>MK;6$$XP?Xa(2QwoG<}& zuGg8W*&GKyzQ4EJq~6!gjH%0W*)sfe?q%ckd5ygiL^K$ZcoIw3*_G}}qhUFraHuqd zyz6kEFkH6K9Xdt51JG&p$DR#UX9$fk77dMf;E2ZN;yj<+B+UP|S>9_#&c3$%WL`VR zD|g`ZX%&ZB=fC4PXkD1lZ)xE@ZR}_;IQCivd)?+G;FHF$h~2A(VcL1YqVM+GU;p5kh+I7$N*2CAPWT4* zB>bwtnlQ6{b^#Czz0oB@#thik24$tOb%BO7r$1ZMAJ*h5DDp}WfVyXT!qE|l{SQy4 zOobbR)n3Ng8xJ1EiM>yjccV9k(b3$;Cz*ykw4?OCi(ibKdN+1tid3tipC>9Hun#LW zHvoZvP7hT&BL%es{amU#eD=p~3oOQBMBW}Q!RZ{3Q=aDawSa5FF>2c#8Nzr6Sexs$ z#|Ifep_{xLeNOuu+P?xQ3lq3^x0PjxeRqD27`c zMi1BX=u&Ihjy=1B4R=(UtkQ?Pt16LwjbdpIWbKZd>R{E!2o8P;CryLsW`39j#!X;j{WQU z`?Yj#tPuO&EuB%P(GJv~>EC3K=WXop+Te~d^hWCL(Ou_IA1d9lK3E6`H^)`TstWq_ zlDquwTx9U?-^7n1Q6HwCxNNOHoT={hu+c)+O)k3Kbb0nBRy+>vmqESEJvWO*j>KfG zVR7W!HKe`B+UYTJNBK_rqp0mcBnk;{x9Vo?ZwE}y*y(+KiWr_c_e8r?dj);77Fj8u zH1K)CO&)U?nkLh}a@RMm`+ z$aY4PM3erB>d+=}H{@GDvJAzY?wGG0@6^WmmOV9`UHNX~wj#whbccL|FxU+TD&Gf+ zX>sh>Ws{N&xLRyoJSLsH4cL*b-VMnuZZ)juN<1+VE^4W4wU#hE2r}#!?C#Wu=h~>QW-MvT(cPvg(Kj;gUl5jxz6HqSC0jwU z5*-y&!%e+0)7X{vv0GhZ*WH!_S0T1)?5H0WbERhxK?i-tDnB}i5D#(Vz^@Js6hA;y zcziaHC_T~M@4qk3krKs;QfG6&-g+&mzB@BjF!k?gcwh(NyZL9h=V}Fi4EamGwP@y#f_AO5zuqlGD4@Qb?2iY{sV|rlu4DEqZep^>qwYBYLx>TQ z9IU0oUdM)LM&`xR2-#b>y!T{-xi|oSjn$nM;!{tzlzGhIc*hVF``%MjL{%C)OtT?} zeeTTgeE+=7`U|&LZOK$iV6hnFo8IaLzffs@bRAE96gGj|sr#$L6D^?zDm--cjr92J zd8>-$^-#5M`PjcBweiEir`s8y>&^>|(AsPy+lzOk(0Gc6rXAcuV@#tF?$)Y)!!bT{=^%}TfV4=|Sq#`tF01 zD*AR{N!$MT=XcQFD@2k%n%*F{VT4JYeI`0De8donof%_eG_rRX+WqrxS2LyRx75XjE*mzuOo}jikoE%{Y-?19(H?=hF+i}W}ej?rpi zK0U^6y4LQ|JG;4L{Zp?yl!acI{Q1u1r1P1ypY7Ax<)<+f`=hqnBVsDaw2@Ft{JGea zXSnXDbrEg&r=()8Z7amuDTTf`J>OdOpH$DQ#@ejGh*Q{>YcJ~MlFDZXHBWBFZB~ZQ zoQtgo;FQ`|_idAIdpy;+$qKFsb;RO9Pywc&}h>-=k^No=@GK@ z!BN5ylxWWQOpf=~6IT+=V(9BixqkN%X(53%*;1+f*0&ARPlu|~bIHS#=144wdxJu2 zy&aAS5G0nAPH&vj=r8~SHyQ)NxRGp>X_3pg;~)LYN>gnBh$8XQsp z{>c#uKZhFSdn%npG^k2F4ROv;VR|N?d~~YNzoeTR(BhN2gnmrfKneyBs7|lNdt0 z=Z;I^2S{-`*%7BQv&l|cuLJXZN{6ZggZW5{HDqd@EZt|QvC6E{X>TM{CN+;hGEY3C z(=@irrcc<(u3v4X>PblkKo9Llr*vhGa_%*$@=Sg^SKNx}`c&%a_Ctf5>}W~)P@lWs zuwlWRnpksG>Fw3SiSX|0bnbQI#LDc=R0fAuwYg+kvcp2xA1KKk6YFPh=%!C@z`ly@ z?NO`+hI_rB9ni5e(Lykz*O@)3g8`I_QG8N!Xrl#M3sQxO^mO9DDs5KZl6+ap;2`ix z_+zWVGzdhybYnlNyTb@d!y2Bda38gz)!2)wYZHZ9yoZ{%ayO||{(Q);AdXADVEyRu zQd4%k=DueauluUH>|m){?eon>Of?78V)}q;_bjxjFxAVJUm)Px)9ygb;jr*8hGm^| zPHGjtqSl*<$mbO$gXxZhAGRU6kDZ+ki@ZG$FkxC89g=WRS^bilsHE@CqUVgg)va+L zQ(+Dy1Bj03YAGo4ohBh#*4U1i1@gaKeF5|$I1F<=I8 z-fyzXRAzrK>~J;Lo0U_$1xRnQ?F)EXtEqYC$8!9blNinvW%?OIm!M|2&lq|_ePu&;nZ5Zlyn<9Qt31%|qmDx>!s_l6H0){3eJK2zvu zW&31FD>Jt{2M!&G3oi~43RhsQ=T)Qhyz&Qh56_g~#AV-L|BR&CSIET4&-B1-Kd(dz zbL?PpT&qu|2BJ!ZF7Ilg|Hd&44VDHC3vjgi3%h;y> zz^WbSh3b=`$QA`Ok5bM0la@ZDC1gPi7^&ct$VFWLCS~QGpb9<#hSDV zUu$^oY)$tFQxQXY<{oR!@y?7P(hMb<7wj+xDwzhUIh_N_)Jx9YBS87c{^_O0T%6Gf zcl}R_hN)@-a8bF-0}ofDRlZ#eU4f||XA}yCrNi{5+S_-d>1g}jSC1<(Ee*HQBC=j= zoC1jMr<^>u?^A4GAEx(VLI1V^>MJ&thjZ9DWOgc}z0~fy@%6@G?d~<-O$V_3uo7lt zrw8?Zzdb&m85lO!%Ck{30T!+`QJqQ=I%?!#6z-92*_p5_@5- z>aSpvz2VtCSJt{*H6N?U=td?*3`}aq%euaNzCv`6(g~C0J7U7ADHQEnzK&c4EIG80 zcLYJ~J$1JiT^sc*)JJowu}eC4p?|Es< zr?N-SFCK$Vje@@Q86DcXA5^G2V5WD?SvJ;*0u>?owroxjjtdtmAWIB{Q%Z*UVE z;N3e0y>Ok42m^*FHj;CWQPkC|wV8CRb zCYPW5y8gq-D|x|1ueQ`54;qRRnKtL}-=_?i4wxTV+&tHMQ_|*iEvf7K4-a)A!|`b$ zpGWF(3$5Kz)yyBy*nW6XRnEide6GrMAI|<@ph5=)${Ner<{N^1?BS0rLwAR(>`!vW z?>QW1#f13ik)lB(T=pe@O9^HVRtM!f6Py{l(ce6DUWkYM>wj%%3Qw=JoRfu1`G2q- z$gTTTQ_dWS6m#<ywzi;#(;*>o2elZs0xKhB|IV)%IaT6Mh-7!fC zAZDK%j}s_&lk9c9Xyq)uN9LRTpr58HA39-#ubc=bZ56>+=JsRiPbx#g$9(?aTz65O zDkX<9au0sPj{3Ld#3zc)*UUoMPTzUS8vbcRXG*oqGQdCVtd8JQBR=cR6nWqEX?ir4 zP%5CK6@X42o*4-6=ODIp;yg@0TEW9j41RA0Ulsiu_G5l3Q+?6`W`E2Y)%{5*YOHA< zndcqJ%l3MTKZdg%k23iSr_)d;tc3Z>fZBNi+}W8@wPvU1^OY?rMBJnA$A(mFy%d~6 zzhWMTZ*tjt?kkiJKI2`;?{1NXqueBxjqJ%kp0CcI`Sdp=)7Q@`G;WqV_>!hWP=$kz zY$1P?<}~n4W4|_1KrJ;d%$(W!-mcO$e7LqaPRsg~Ya0_QL~mf#MZsEZvaGtA-NXTA zBKi;0bNZwMg3^nlt=YlO;i9M?zJJxyS?XUAi2{-I(^Cw`xC}YnI0;2% z)SiZ^R7^EsJ~gnrL01P$FhqwiSiLpSm31P7wRgoIr_m$r8JDfpddp#usY)hdq1`h9 zSr`$+7O(Zi)zf!YK7|?t!IQ)8m&M6Zg_G7@RK6CUwCVb~YW&m!a{tUky%ik7r`4u! zHZ+4dm66{VI_ZT{I}!@U+!A5ifaJt@w&+kz{V3nnjLn<*y^E3l%I=5sRIv~~qi0ez ztc(?dFZWCDC9@`zY5$!>l74sh9o0GW>toBbaoycQ%K6kVUp@96ok=nWTrAXkzbcfh zXYRkM+4@rA<(jU|=Sx}KHrzGEqux^gSU7liK{r#-i`5FkoP%aELn@ueJsfuzI<@M> zwD7>bslJ2`afF_pza+W}?#rayIG`M@N-$3Iph>L$!$kGAZ(`CqKeBVK={DMpUVUzE zXn4hiK;%m_t4Q$O&CZ}Bf&bDP-5SU7{`>(ft7t(@9WocSg?eK0`a@G=wIe>#u2o7D z49=~})Hj==_V0{*0jTVg;@Xc|jvXbwGi#=b*l<$S;JYn8dFG#C30>5|QfUcPg=f{B zDX-HBGxm>EI&Z9Ru_Reo-pfvhwYD;&dc_mwDViD3hec9|f#HSf+V<{(&-MhsfI0U}Ntsy1iMkViy+?Gw>ibAF6cjKD=eR@#t{pcqr0Q$2v zdB|XTkId5lXzkpx0(t({VI-(zm4rFlMv+T@-?wtK|9wS$P9w8x)GI~XkZ4foN`--D z!{*Avu-k}h7uPYSw&%CGiWMzBhfH=ou?8NbmkF#DXscnA7`cr_8kX~=P+NYak!f@6 zF}3!u2Ma||I1UZ)%^X%N>S7n&DTf@)9@6r|4_gk6^3FvmYcaJ0Q)c^NZQUT)+xupl z$^G3EQl5S|-gRXy^{eVf6u31xagE1}pC4?8k#faVPG`}Ej@9Q`;-pifwCS3v@sXoL zHMy(-QE_IeTBYWjk(T5zwUKA~;?F*>-j&ymPI{9u(Z$zX1jvx)a=r`h*{r}sq*k*v zy@kk+HTK1OV@Q2bB>`{hg<1~ArS`tD7ub~4FC*6Y+p^`shrSRoD&GJGn@s^1vt8Bs zAw{6RxyvpO3OlCX(AP_i`sfguXKg=IIKp0ILyT&5vZ@3_sf)=-6nwvd^f&eWI^#~Q z*I}-Y?0dCy-AaAX;7V7U>2h-zFh%b^%P&4&jSEvn6=_L7RV@#CjdPkBoTafkpcz#i#H97Md zO6}w-W+KIQ(qha>p^K)3l!ESQnPLsOZe6L(yggV~6s`GWeqjF_~Z~Pp2gB^ zbXKQbADwnTG5%|A%fRx$qki049?++`(WqAWM9!Zw_6`eEBb2%H>xb^m*uWu%4eR;~Kz-Z^FqkN7p2^bhHGKn_1CMe#Q5dpUeWu>* zB4N`{W^INvD*F_wZKq#=^2K~(EZ0_nJwHiYsb%xs#ctWI`5VAtm%77;?biN&7;5v~ za?wVcM#t-3nY|ITBlmb$TSDk}Q>Be|<&vb*^+R$s5gA+`GK}>k_B5!RN_4Unj18@r zyECeosyfiKB$D10A<34GNggvVieRLwpE&uzazO+4M$BCRZm6v{X|aY39?-t zOlnjreNpMv^%0)~w<|@>$+oI!!0dEQpa5XH&`sK%Ikm%q0v2+I7+{ql+}q3ul5Pet z${b@vXBEIq==R7!Uq_fYgOorGShBgAl&B!rn-oI{DO&|=kF0q{$y^oiMd5+Du z03suKK~j}!`1~cmp(n9wlvQT+t6#rpch0?W>wd3|IHwhprgPW+>itNr_S`dba@4rK zzk77t`10?49qq&c0b};q*`)x=K($2N1nR^hBQ|9AR>G7xfWxlN=KRt2S>pJD0k?6t zF62Ktm)`vixc#cVc~!o5%9K2B-u*T4=xc$(X9g;eIXkG15BMGrIUWdJReKq&Pgu*t zKmlUwS^=Mxb+4#{^ZmGkNJKag1#MF%{Uu*>3Mk*qkKQJodC}u=jE!myJ97UI)4ppV z{YqGyMhp%FQ-zzKJpTCnrvi*k0lK{}UwL`!jd#BFcR%{#=bkyfpZ?m*ul(h!fA+l} zetSC+TRX_U_Rj}j`_nIf`Qq7h@X^GJGoQp(??wrqY2G1`a&Djf%=e#p=3mZ@|7a4q ze&N~g^?v?k=j>X~Go`~`5p&jfe1H<(|V>l=vUUY zd8s_!TzlBY(?UIzYgM%Y{^qTR@r~OBBb3Bs`o&QlvM~SR^Dn*ne=fcHm(TsD|46n8 z6@{2IT7VTevR}Y&mTDhrY@pSnwyFDN?$%E6S_!|eQQ~?Hq4vspGKn4-Cox>BLJVFP zXD~ECS&dUDHQIj1{P!EwO-VXfwT*N74n1&f2YxH1kypKAxS>Zv*SCpxw`=7*t8OSH z-exjgX-hSE2C@;-!!QJbeT7o)mFw&<3hNYYGB7gyhcCbUAE3?!?Vy>{zM}R2xqtZ8 zC-u`!rMc*w+R)B5hClO8(^P70-t9+z#7LM*ex+I5%D;Ih_|tcG-bwKJC$g=yVX)l& zB@;ncYVl^099Vo~+Ehk{@cHry7~v_F3qXiXfYPg-P*8Sd5UwK=BH*9b|u^`dwEoN5>nkJfsZ*= zyzNHMFV>gOhla-+i_W~CDqH~L%S*S;EI*mD2ffjg@!IPO?i#H8Cxhm$n(a&GpuyI@ zn-7hN5{DimYT5eh_m02+kGV?TF+0pYf66=LpL%US^1Xk$YmnzpyV+O29{B3#t~PO3 zBC3&kC^FT-)G0ZJ`gb{mGO3JWS;Eg+Tzx~{BdJrlK~(|F!IZ6TRqT5bSs6Lanu8C?E>jaT9Na|4KE0;Adc-7

`dBqz$8l;H##T6Is~x>D(WI`?!o zL{X;Qxi7sv@A}}G)E|EG?)Z1MS`Fue-+cAWmji#=+W8OnZ*>0qqr>uq=kzVs_YYn~pe-Hii)#&RN%j*)-_C16955sT1QheAKeD94s zeQ)5E`EG`127cgw;X5;*dm9^vThJ%exu3X~9}c=|E)7AeiwM^5X}xM@N}w|`2?Fla z=v>1LA*0grsb_2x(^?IJ)v8SJCx#b(-+bzMRFHLI*CY3?>84)fT<(rk%wB(|xB41b z!-CI-Lo^VKh%Uh-JZPAmBi&gK}9Dpx~X zr>&|ssu8K{TP$za+MroCGJN`JQy!>>SKnjg#gVJ3tv8pB2?LcVF z0a!rFT{Fle&^BtxAvpgA>J{n`O?*^ukIxOrI5JOM*AwuhGX2!8!u+V1`1 zgS3A3%;U+x(L3+%xzD^BOG{|4poa^^q5!h^bRy|s*jCM>KQfv>Q2-%fkhkl~mo7dq zjKA)_us?M*9nci!FTSY`yr&z>oq0Ye1Mi@~Kg4v`INz#mdc+UQhs`0mZBK8L`DPR> z)@!KSd$M-8kh)tNH5$2&Ccux4l$L4-R6pNf6T+qD+*ci6l=?Dgr$e{nUmTfv_2Q?^ zea|Kz&ZIIN(DvBPaYFK?>`9rIIXd$5=qHKPEpW&#es($axj)!7`L`Nq1d45*nd$%8 zU$p-H?|-P^8SZB0X{!0}{DoIPojZ$S{x?0p^1lDsC*I#kyU%CC-;r)Xf#00{)vuG& zQ?&xJrTvqqfA`hT{BLI(l0KF4z=O{ZKW|lxKn0r07x})#D3rB|z4{47X4)(Z_xn6c zQLP%1$*|Xpg^%FXHDv*RPPa8b*+bqU)DP`fE9YMYbk8cD)qQu|^xg9_umAa16f@5k zpS`#{J$~bbuk8MtFZa(ahKK7bUd_9l^Mi0N^|SY`69GwPXiNSe@1WV4F?;taIOSM= z^k06aI$~dXX+Aa8KXv}8o7Wyh-+J=g*G?xU$roR^M17Jzd24;-ndQ8>ZF_oF#G$o! z@1thjOF>cmRcZTzwEeBL{c_#)pQUX_2LN`dgKBUBz(H3O{|^WN4%Gl);zjWnFaX@? zi(-uwMd>BrFEIdk4+H?O6#%|w2Y|Qz0PrW$aepVh{?8pz6i-C)vNWC)1qB?)0pOc1 z0Du$#;4lNg^|~mYla7(1HARcG-uwNsC?+^jbV|qkaxQ>0ZcqA$C;>!oN&Cf208o+v zfK@v7UnGodOB(mL1?e25WAoDP?@Dt!lID%ur0-(@@E1PmcuExi<0Jt5ZU6w7k|^Q@ zQG8aK-%EM`I4h0)n+5>*u5^AMNn>1)-eWT-ir>-z!28l1|3Di1?J$I@I7>6}REJpX(i0RF4=T1a}$?v<{CbS>VI-gi@)r^*8WZ%XI&P3b*Y zY5S~nNmSA`V5R+EcT1B_N%N8BvLXHae@WN%r_%5KowUynq;vQm(tJlk|Npg+&PR<2 z88FoE)GY=n!u6)?Filyh1MfE0rPuL+pqg#j$Xr3mbtt5d00s%RuIblJB#pu?rAa*{ zA{GSE94m00(q<(6ZveF&jt7{>tI0ttTy_H3o^<<*yrbSg52BF zs5O#)tKC(V%jve-z_RR3UK7l!tVnZHOOzO~SE4$mqQj7p`!&iArgh8hoQiRw9nb{# zmLRdH%j@h$-a%?>c2c7@DX#-m3>G$C`N~v01D)@Ip{pSGQdj4_>c8aZVwr zBn`GfwG>U>GRmu!(winGAvrx+BIiB2b*_~vaQjx7I#{;FHNd0|l!%87g>s-;+OeHf zU=xk`3fdtFGJHV;p-z^EmI`>sU&q-8rYGN5&PIoa8d zXj2(q?@#mhbaQ=)C{Wlj;Y-gM~7o&kZtSL$1SEq$&jHtrEi`Q2<=T^^2B+T%G{OJmrc%))77Clko@fgWlT4BPwiJzZX@ zl1bhuoHE+rY*yCI+stTUIOUXr5|91Crg_wq#g!Fd1IA6}lW=}V*&7)H$Pb%_lf+%0 zJnd?(sJBp!z47ETT?q;Z9Jbw9+v}rsP$)yVwAgF{%76GA9sUJa5ZpYXg<}7W#nI$v+Y+X?$9t7%~ z4w)sXR#_=U{mvfPw=e8`d*`AEXMVnR$?%KObE2P(FW%Oaaph|Fx zfn*yTU_o@A6Ai(Pp-{pXO0=Is!AACYL&lG z93@1FS%SCAxy9PcbC|lz%e5=*ZpSFd)pb`2S2I_v)}x+cxdcDAQ!gO$7Ue1x+nBpU zj42>s9t3HCN>_ywOpxp0k%Csk1Y;ddhZECL{EQqIEVaJrMw4ESaHe5Fk+wr6`~{n zn3zqe6j6JrOK=F@76;qatTh$)Q$Rs(P&c`DRDPGK;SSyoPBu%3(}wSSFd!i6-sHYn z2OD-numA}I$1I$URR!LNOhA;^KZsR6bgED`*+2 z%eZBXlG*L>Amu2sq7Ck+aKNCD_yK~l9!u`4lP;9(H%Oo_n3d;gjkYQ4lNkOsYF%TrxjN@6+{#XJxy=8M|u;+bYkW212}p=ea@*32dwqg50Xv@HZE z#;kf#+Y{YIZ9gbB+c)YR^J$h`LFElEOV#_e^7@BdTU#dN!g3VYQ$MDUi({xo_Kcvk zP~;|#TV1*fa@~|XMx?UncD0JkTQ_^wD844N9?H#vzCHs ztN&@5?h1_qSq-(p3#i>*kBDHq)drO~ehQUykg#XQsQ9@8O~t<&91yU*_ZM4RR`F^E_yx@a?!I!RkN%3@H<#z(D>5qD}yX65>sE2GYBlGm$jQTa0` zA00ajaG>E@}2QpuI^s(QtdMLIfY*(GTZZSnzyW#4YY zSeC0^dJv=_0-$tM*9gr?GADXtmiSNekcQ#OA!BknBRAw0Liv9fEeJy9Ew!k)`4TxHFAeCiqxHUQpBvn>JQ8 z)iZj2#cgW(rNBa7-DJA8vFrWs-M_j_lc}m51w^WpOCMd8P_nH`EH^YxR>X#_imUZ<`AIyG;3d{d@@)THsMQxh|D$*?x@K~TAP>vFj%`V7I4YS2G>zzOg^q2y)uqLw{gpG(aa#5k} zOa7v!GF8?n%9^S~CWA_0WhH_Ts8KH{`WY|=9HOMsQvz zJkaNpF)VXg&k%ZJm-P*zLjz9nsMXRu@%Emb!-5c8gW{b}h4JyiP*I(sJ+y8xF`+l; z6E7B}m03|quCBWK=g|aeHI|wVUEk-0`C=ySIM$zBb%<=e1L<{bbKT;^3s z6DNEeKpME9PXQ#pq*PQXP@S7TyBf{X?QT=annf zHjPqKWle^jz5>f8TaP?YwPBa&MO{%x)O-a52h?oAxkefa(kc}en@#%3MueNXmF&na zo>2f*sf|I@dq#DyJXl*EhC){QNuDo1LuB^{$3@;YiSv%Ze0aZSiE0V^r(_wvx%F|m z9@r1L08hr$g{44*a`uA96=KicnOIlVJqx=#*2F^NVGbSCKf%0A%YN=C(_95NkuvhE zz^7VOn9C~aFaii#3rL>90G)`uWUSZ2aW1dw(71-qHhtKgh%xl%$L&9vFh@Q8X8ZBvPzDLTMlBJ!ZLnS;3?B9^5?$glHSsbL0!cicIs%d?Z_I0JUU*#psf;vkG^cAl4_DT}lY(3BOcv z7*iI&gC1iDNQNBcP9x*h^oh7^Ttzkk=|;$p$$ZVOz!U%*f?y2ZXQ|Fbb3r`PaU7aa zRz@;QsLGu*RV03kz9A%aT2O6pN>a);CaAP6V~f=5)Hr~#&8h>y%cF9m%+Rq38cEt@ zhanHDkQ6L>$O4(VthxYzdz6B~qR(=sJf&p-u2Ik+CFw-HP`q|nv8jlK0g>Vx5DpH+ zAW}vsf|MO-O5l;SKALk#)S-PE;9M1l!dH=D9+`#aFg{jQfJ&e!uJ%WDrE&FWmj-Q# zo-rf;4T3TPkCPO2up7xlT63n@~u{0wOA! z7BHJR>WBL_RM)!B*&~l1gamx617d zmKE%uV{vVlN#g*o>|acF(s@^5EOh+L!1gVDtM>Y(-&Z7sF|M&;y zl0?iIjVx7eqrF1_S#E2ACI=K1%|T)8gkv_FqLjD|8FUi6BO!# znbJxsY&C=yQ3eIuZb8Zr5M{Uw1?7tFk(mT!DjC@-VvOD@k@0!BEonelJJRd+eB~6G zAw<1N25MLhq^Wi#wjHePCT))JH2kGwW_E<#e6?+FlB};uH1~N zF36h(f$lnL6w3_wsOFGuNI?P-+_v2(`X3|tSz94T2aK@4$Fb|(br{kWnwc=JG>s*ph1#lA49ZbHSfDx0%6 zD=Mbm5lBg>P&c+bEoIy9?!Y3LDOsQvIp0^%y;!HqoTLZjj8_SsNY~8TX{Le5q#9QY z_GsFj@G-ZM97kJKDeq_1$$+^tzA*u;ep+yx$<3^8&-i9D?;{F?P|W95X{f!9Y(7tZwN<+ zmscf@RIw^ZftGC7SUqr7;&?wZ!ZC!mK+jdik(E9duy+X-1YLwYTZ$oaqi3RoG=zo+ zj|ysq@gxQ&{aj8SXN`+&I~Jsfys6&HsVFt9^7Qqh($Hi=q!ns;dup`KYuw#()uBg? zCRrP7S4r?883*T@B4k2DPNojX9RNzCF_yCb~^564UAttKL_Jd|GJMOdBtaF zG;E^4T?Ii|!Q<3PI24wbfPSl6dS^CP>Q?d^yL?N!ZYt7jJKRw!r7&&R%L_E`qFddf z@lf~`4!t9%4OJwa36O`;e5hHNR#eTjb@#hq{W%=Appea~ogOC!mWby&V|yCs6byg)El zfMnSX!71D^zD)u3q= znZ+8bQPVmalZY+#jaVju>RF4WxuR1faZKOGmqT%l_c(><5RqR|KVwU%BwWrHQ=Ny_ z6POCCmlT^OrBU1yDa36Z`HW48=f~+9)0(u(b6K#0sqtbxoGYXTTT0Pf#WWczqezfG zNe2(rP1|@;RAGzNd$ZF`L#I^LcGWbTA`wD{EqvuG=f#e$JVt|gygZD@Rkle99p=O% zdCV(`aA{QR5TSCLOcAUtOA45z#+l9H;!3006myS6Ri(=EKV-+c5o5}X>3F*3Xk3qU zr7*-5iZ!Q86;4WV-BOdx%2gFQUR9}SD#o!kpT*0R6k^HOvZV&2BE~4-6?m0^rCAm-|Xane|VS2w&;MOB#2!^o@hq#SnqQ{WY}sWnU4x>9#%gK&bfBC9Mb z@=f~j9@>#Jn&o4a4vj`~l6Tj_u7D-F0iO%#N7-Tx!-Kt#CR(MGwbe$E(Nu0I*rh&} z$*CEMP1QgZJ5&->hbmH~rhI`al%%coRdF(fne}jXl{iu6$_7v=1pRTPL{_2Gm|DJC z5y!czyCtm?2C7D|mkp`eV%#T(O}KP*jM9M0I3{UHYsHs2eNL*Zwq`{Wa3T(-BW6Vk zb?kC+rgBtF5wuKn6fXtv*jyfso(s+D<2rF=)jp|b-85GW)z+x0qM;BhfMmMkvUem! zKV!Qofapu-+N-m!>0F3RtEw_xF)=sgLyMKf*zAh93db!&%-BH6Fl*;z@Vz!DVp6q~ zJtaAh6!KzpbyC8L#bExyXQ;!Ml@d}@O782arDbJ>+f`6SV@bUxpUbcG=Mz%c3`qjk zLz$|9m8Wr#4MJB_7KE)Py^-g2XBBKgHOv9AWIk~}`q2L8-S)DS=i_bZE zN?jaRsTAdu)$N24z)edem_jOBlB@D%IPI>?xWl@lIKhgQR2Xlff;c;oO6jz%eo2TE zTT4=!i(!>vnGWN$>2z3AWlES!nwM$Pc!AbBxkO)Bgjrd2Orb#ww1{U+G~)JC;b2fZ z8OqBulsr|foGt2RbFC7V_Q=JNq}Zd&iO~#vvl7Q`?*%h-H24U==-lpn_A z3A)rfkv)L+I(S;y8ugr#7n)@U-8I?Mh?-g%@Xl(OyjkAFSlhFqoT8UIlqnvb6;W)V zV2l)5o=-45E>GoZbjgA|pi);8btQMgsTJYHWjXU@bAGm(wTYwo;R#(sD2mxSXj}b^ zuAaw>6#KrcnbB8lDx_ZN!K#Wt{!~g94-_tI)$4<}$}e%3MkAgR=UJDly-?HEUtbIu z(Mo2VP90ZG#K)G@o~WRwBMK?UjwUl`OR%P9akJ zaYISXl#}>1LrrD*6iqRemQsp^PfNtdt8;ETuSbHKrCn%lP?N9qs-h8trHq)ep~;k5 zG-)Yij6;d62roEwZ0RGJP2!dMQiT-MBc0JKOQbz4ltO+9VwuHLR3loJWMm;W%GK8U zj0mgDWHMeZN7uutu~JSejZnS5l*-8-JBAp}>cU#4mPwzCM`u4Mm%?8CVWalGj7(yo zGybU!{Y$jfV%@Y%dNdHTM?|qgOeKGNI@dPGWK#$nET%tW%S+=#jtw7F9GD4U6|6pm z^8E>Mk zRgu!9JC%GxBsS-fSMyqFR>xC37m~SMInSzc6Pk-n)_d?7F0!-(3k_D-wRx&6m*C|Y zlN+y4-Pkg|0YmsXH>>WAx@#3>SR<@0a!HrCj;bO^yV$I!U{ecfcwbu%jj<9F5mJ;y zVVUjiw0E5|s4vXUY4WBb3>Qg>cuc2FEbHkW!Yzf5qLyqS;V5fM3>Rgbg~*xyN;+W3 zAWgPLJ}e0ljU^nkR>To&IJm_l`6Z4a$DWq z+W4G&wPZE|JIAOMhKgi{*PCQmZNS9FXm?7|hebY7)rYdtm4$SWOHaa<_l9JFqi(NJ z@tnWlZq2~kqoOmFooFw4tMU?)vMNm?6NTCQwDfq#xhSTL^fafW2t5f!A`2Buj8toZ z(+R}VN;I!cD|xFs#;=xDmF;SPv`8XD>6Bq8*QM74BAKzqgBb%WxmtPN;~GyF&>^Qq61GUDZw-HYnr>UpCgnHr zKE#2FK^v=((5~h3(0SM$(GUtX@;PUsdLnRCQ=8IWsS!nn4~z#ZIPV;bToXL9p~WSq zg$31GiHKg3lX3a97Iq(t*yWj6Mk)d$Q%r=Eq~j?O>?{g%8BSRY#%7Sx)Viz*@9k~5 z$5WPw;!9+C4D&h@VWV&rI&6yQ_fhkb8sIwV!u6`~ZLpt3DS9q=&EvqC7G#v9xjU8)>;7gI= z(;r1cEHfg{bIRod$Nvcytk)RTL%ewD_)$1+CDq_ixL~JxtB47n>Y6rv91P0gtfVlP z2nV%c`*o;i^3d4SX`M~al-gAF2QO1jaBbdX=8QOxik8l(BO0nY%%c&!5~Smutaz9z zs9VLEQFvuBNT+M+w0<}Z-j_@9{hnqyU+lb@(Wf=R8s3ns(p*)=+Gwm|(r20|F3Ut# zn3ywiSNNcS0d)io{}|PI`=W7v?h49b5`z} z)mRaqJW{f$&T>9i>@53X3PFMlk{|J+seC;xOI4Uoq{Bzav|yTINAluK@APj`rmZQd zDD$~SN>QGiO6=GBv`EQ)zvdoU2sQO&BXN?6ByRVQmX(~BKrp}0Hs+fPg&7xyOMs=jqsf;u2*&<%+ z$xnFsDcF82k2a1uVqGCz>MtLnQgdTs_-qwUHAEtH=|gh0QQB?eEF*H*BB83+;}%{s zEiVX*v75csxMY?t91VrWBK)w}YW|9(F@lE0Z)Ha-nSd_~Z>yT9QIyW((Mms~m~N+K z@-?t!e_AXqvs!nP#X1z0XG>LI=|ks|5o>o=+$TGqaIthbnM?;MYhhd*!#D6Lmsfs@ zG>K}=hI;BU6fwHZ9lmnMxZI-jg*p>mDMj{e-jb^6v$?9R=I+ew!99O<#8)|OnT2`?R)NQj*px=3L{|z{UF=kw!zTr?JBBV z+DUT&)w1G7RWDcu6_RR^q^y(`+tN{azrt}k!__F8^yeE_$%<5~i*tHP2d9lAxKTg& z^{dc*1(7BRGFBCctYdjz^)>bI?4&W;SzB!*xn;$#vIlF2Y%9~)<;OIJbiUjnwI1>p z&+j*|=qg&Ze7!>So?l|T)|3p4u8fVW4oU~|)8W8|xf+XokWRID4d?!-Ro&8IH?KUK z%;sZL*Ut7$s58?+n@c5=Z;-```M6grGHJlPv?hjdJ|*K}CM-(rNTng0$wiN|3a+Z` z>@_0c)4ccnQ3tGNI8>cDD>KiHB+__`J!0i?Mc4{$SZ%M#)tqkWdN(F%n^B(|W+M1GoCM28}mtADS4WkW(W8y{R-6;~t72?-ksWSibJr^2B)vXpb6(e`q+c%p`Bxj9yh z+Ne%>nnOL_v^=NQloXSuA0H}=j2CCsnEgPXW!y@0a47b;Z?z5ft_p}Nxri-k_#j`? zSj>lN%V`HwQfrJ37LO)MnmW$YA1lnZh+&)QR3(nDOHkA_)wFh3P(yY_5dSNA>}hENnES4W+pQ6 zrfjO!DXZk8z2#s=T3s@Nu>Ke(=VD@RGz7aq9E4M>w=y#$Wh6yf??o(4%Urt^z*!h%&b2nO0%>jc_T0C$r6ZM;`iWRh^1vyxfnTltE zwWAf7a*h;dv2Ol)x56+w%#^&Pc%73@YM3miRAwfY)SA0|R7qoY`NNT5)T!t!*c%#0 zXAKrH?Ju0lYP59T8T$*d-)?3Cs!9xxDW#f}dHjqNM-%7yp@h09QN`V6 zZA_8i?C2olX^?45^u&3-hAvGdn(Ca5jwUl~jT9P>PZ~9F(xFu)3sxKJGNP8OM>(!l z%NmBFGi}n$%m_w1I!mF}R)?G&mE^6JiUCH)P^(|AsZ#Fgq9a_UbskJ7EuEg}vW23x z7FEk|Dwk5l(R^=h{%{6fBD?TlefMNb$C->A%S$WCxZTs`vY;hRm=5xOGouF#nsQp( zr79yhZN=h~^@+A%P4>o43ieu-#Rjc8(~_As&A2~TN3USREo;L?xkeXdRQaSj#(RqG zh_8a3)a8#y>)GK`uv8n>xFhEAR$U;kFDumDlHX4TO&xhpxUS8j=%w^ajw(S7Qf9Oj zN;!kdbV>(SOC>pT=5$DP1gpf+k~x=a|DZip^LECWW>-~0Gv;z{TQXs?*h|Z(R^pVA zPBoUpN)c8`WD%2v6?IfjVQW$+WWB{Y3&jqnO4a3W>3Pe$<6%dvTX{O!J=0m|nnf+9 zbX~By)hD4^r0GBmcNxQ2QdLcrZ4JENUi3;;Nvqgw@g!4`mcxX!Yo=P63FviaCn{0G z4t-UGA`CY9d=o5^<}>=zK_9{Al4 z&w!aEa@$SoZ)#ru-~*3r+41m}&3%GtrH};jA-u$#gl9hndIv}dQvf{y*F8WdA>yMz ztAGL!z87dSkbpsm`xfv5(@4;>F8mhKH2?{|;GP5w;K9Oj1Ze{TGe-o>E5S(-98v(2 zP;h&$Sd1_DV0wvM2e`q*h2Mgiq>w%iB$@wPNMDF2q${K++=HG7d2Hc1czp)^K+Qk` z&d>r5R0zMAg!F~y1z(WU1%l+Rr*@b~=iU#|S0 za4i2E4xwHN)C_d~A{;{4B96e{iKF@B*gTHS=c1qD5X$co^0Rovsho6f$#40U*sdy_p5;f zJpTgy_1{xpE>%u;c>Zt5=u<=jA3@F+*Bb?_Nq`{H?HBF|Wi&4zzhf$9++~qroHP_xt)HLoUzT5g7al>z( zC4PP5E5yj7JBj$QpAyG!`6aRU{-=rC{Vx&UAf6(wzT-(^$(_#=*U0t~tssNXw*L!n z2rHHeGlLaZFLX2W{fr-TPkslqE8)F#v3(6e_?PVq1oEZ(R08YZ>UHJwWdH0h z$PLS$B^z(tODc_f$ZPjMMLxaZdUD${KP8|2%0H0TxgH_cFME+3d4C&OUGcAE=!MyOEM9k-B&?9Y-t|NMLMnm>G>JahMJhobmhOFyH1EHI^qjec zyq~>`e8{wi{P@uyklVLDP43CxL1Nn7WZmp@@oyU1_O?j$4e$4SrH z=Sg9_|C{aWb3?2kyu~pJrY7_A$Y$_wp%=!rQ5fii`+G0=S3|h&!nF|ZO9y=t2x05t zG;#RF|00eBhX~@UGsNoMZxQdd?IZHr&JjH?NY|P~PC|6}1QEFQePZ1CI??}2Z0){H z|3>wn$xD$$VJOKg8_+1hVh{vT`BcKnohT$LwY`H`I1{@iWE zrqDZUc7Cgd@CM#obKQNL*4Te@mN@b1_tw;Xs3Kxp8;SawH;Cq=9mM5N?jV*@M~H1d zJ4Q^nF(My(nb>ky5Ao>cvuplYx}Tswd2UVUKSji^0(Y*#cJEqac;ostR&E_3V7OF& z{@GUj7k!FiK~E^3hVm8Wsf*<+%#Xh;Ul7Pet{JWyW?mUw`t`koW6{3Ltv~sH2Y0W2XTW;II>?f54tSpW&cOE_LxX*L9~ro-TQ&IOQw@XS@Beb} zTMZq9AH4Y8!4)fy49=cBI#~QiY*6-%mj^{3bPpO|9U9nm?I(k4`=1-QqD?gT5q9Um zGfzA`@bKt{0o&lZ!KHxVZ4ouJ&}@FO-UdPT3<$cfMP1{f-cFe3L$#Wp_X;|~hk$MaB7lVXzn~)*@STPk zHh5AY{(_EOc0o6d>=<6SXA|z_7OwRR@f3htn9bw!>BEIwNK4=*=x#3x7Ylj*Z`Fuw ATL1t6 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_ir/test_BuiltinOptions.py b/archived/tflite2xcore/tflite2xcore/tests/test_ir/test_BuiltinOptions.py deleted file mode 100644 index cedb9f0d0..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_ir/test_BuiltinOptions.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import List, Dict, Any - -from tflite2xcore.xcore_schema import ( - BuiltinOpCodes, - BuiltinOptions, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -_BUILTIN_OP_CODE_NONES = [ - BuiltinOpCodes.CUSTOM, - BuiltinOpCodes.DELEGATE, - BuiltinOpCodes.EMBEDDING_LOOKUP, - BuiltinOpCodes.FLOOR, - BuiltinOpCodes.CEIL, - BuiltinOpCodes.HASHTABLE_LOOKUP, - BuiltinOpCodes.LOGISTIC, - BuiltinOpCodes.RELU, - BuiltinOpCodes.RELU_N1_TO_1, - BuiltinOpCodes.RELU6, - BuiltinOpCodes.ROUND, - BuiltinOpCodes.TANH, - BuiltinOpCodes.PRELU, - BuiltinOpCodes.SIN, - BuiltinOpCodes.LOG, - BuiltinOpCodes.SQRT, - BuiltinOpCodes.RSQRT, - BuiltinOpCodes.ELU, -] - -_BUILTIN_OP_CODE_REDUCERS = [ - BuiltinOpCodes.MEAN, - BuiltinOpCodes.REDUCE_MAX, - BuiltinOpCodes.REDUCE_MIN, - BuiltinOpCodes.REDUCE_PROD, - BuiltinOpCodes.REDUCE_ANY, - BuiltinOpCodes.SUM, -] - -_BUILTIN_OP_CODE_POOLS = [ - BuiltinOpCodes.AVERAGE_POOL_2D, - BuiltinOpCodes.L2_POOL_2D, - BuiltinOpCodes.MAX_POOL_2D, -] - - -PARAMS = { - level: { - "builtin_op_code": list(BuiltinOpCodes), - "builtin_option_type": list(BuiltinOptions), - "builtin_op_code_none": _BUILTIN_OP_CODE_NONES, - "builtin_op_code_reducer": _BUILTIN_OP_CODE_REDUCERS, - "builtin_op_code_pool": _BUILTIN_OP_CODE_POOLS, - } - for level in ["extended", "default", "smoke"] -} # type: Dict[str, Dict[str, List[Any]]] - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture -def option_type_map_values() -> List[BuiltinOptions]: - return [BuiltinOptions.from_BuiltinOpCodes(op_code) for op_code in BuiltinOpCodes] - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_option_type_map(builtin_op_code: BuiltinOpCodes) -> None: - option_type = BuiltinOptions.from_BuiltinOpCodes(builtin_op_code) - assert option_type in BuiltinOptions - if option_type is BuiltinOptions.NONE: - assert builtin_op_code in _BUILTIN_OP_CODE_NONES - if option_type is BuiltinOptions.ReducerOptions: - assert builtin_op_code in _BUILTIN_OP_CODE_REDUCERS - if option_type is BuiltinOptions.Pool2DOptions: - assert builtin_op_code in _BUILTIN_OP_CODE_POOLS - - -def test_option_type_map_values( - option_type_map_values: List[BuiltinOptions], builtin_option_type: BuiltinOptions -) -> None: - assert builtin_option_type in option_type_map_values - - -def test_option_type_map_nones(builtin_op_code_none: BuiltinOpCodes) -> None: - assert ( - BuiltinOptions.from_BuiltinOpCodes(builtin_op_code_none) is BuiltinOptions.NONE - ) - - -def test_option_type_map_reducers(builtin_op_code_reducer: BuiltinOpCodes) -> None: - assert ( - BuiltinOptions.from_BuiltinOpCodes(builtin_op_code_reducer) - is BuiltinOptions.ReducerOptions - ) - - -def test_option_type_map_pools(builtin_op_code_pool: BuiltinOpCodes) -> None: - assert ( - BuiltinOptions.from_BuiltinOpCodes(builtin_op_code_pool) - is BuiltinOptions.Pool2DOptions - ) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_ir/test_serialization.py b/archived/tflite2xcore/tflite2xcore/tests/test_ir/test_serialization.py deleted file mode 100644 index d5288094d..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_ir/test_serialization.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import os -import tempfile -import pytest - -from pathlib import Path - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import ( - ActivationFunctionType, - TensorType, - OperatorCode, - BuiltinOpCodes, - XCOREOpCodes, - Subgraph, -) - -import tensorflow as tf - -BUILTIN_OPERATORS_TEST_FILE = os.path.join( - Path(__file__).parent.absolute(), "builtin_operators.tflite" -) - - -def _test_read_flatbuffer(model): - - assert model.version == 3 - assert len(model.metadata) == 1 - assert len(model.operator_codes) == 6 - - assert len(model.buffers) == 18 - assert len(model.buffers[0].data) == 0 - assert len(model.buffers[4].data) == 128 - - assert len(model.subgraphs) == 1 - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 10 - assert len(subgraph.tensors) == 19 - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 1 - assert len(subgraph.intermediates) == len(subgraph.tensors) - len( - subgraph.inputs - ) - len(subgraph.outputs) - - tensor = subgraph.tensors[2] - assert tensor.name == "arm_benchmark/conv2d/Conv2D_bias" - assert tensor.sanitized_name == "arm_benchmark_conv2d_Conv2D_bias" - assert tensor.type is TensorType.INT32 - assert tensor.shape == (32,) - assert len(tensor.buffer.data) == 128 - - operator = subgraph.operators[1] - assert operator.operator_code.code is BuiltinOpCodes.CONV_2D - assert operator.operator_code.version == 3 - - assert ( - operator.builtin_options["fused_activation_function"] - is ActivationFunctionType.RELU - ) - assert len(operator.inputs) == 3 - assert len(operator.outputs) == 1 - assert operator.outputs[0].name == "arm_benchmark/re_lu/Relu" - - -def test_read_flatbuffer(): - model = XCOREModel.read_flatbuffer(BUILTIN_OPERATORS_TEST_FILE) - - _test_read_flatbuffer(model) - - -def test_write_flatbuffer(): - model = XCOREModel.read_flatbuffer(BUILTIN_OPERATORS_TEST_FILE) - - tmp_file = os.path.join(tempfile.mkdtemp(), "test_write_flatbuffer.tflite") - bytes_expected = os.path.getsize(BUILTIN_OPERATORS_TEST_FILE) - bytes_written = model.write_flatbuffer(tmp_file) - - _test_read_flatbuffer(XCOREModel.read_flatbuffer(tmp_file)) - - # assert bytes_written <= bytes_expected - - # make sure it can be read by tensorflow interpreter - interpreter = tf.lite.Interpreter(model_path=tmp_file) - - assert interpreter is not None - - os.remove(tmp_file) - - -def test_custom_options(): - model = XCOREModel() - subgraph = Subgraph(model=model) - - input_tensor = subgraph.create_tensor( - "input_tensor", TensorType.INT16, [1, 5, 5, 4], isinput=True - ) - output_tensor = subgraph.create_tensor( - "output_tensor", TensorType.INT8, [1, 5, 5, 4], isoutput=True - ) - expected_operator = subgraph.create_operator( - OperatorCode(XCOREOpCodes.XC_requantize_16_to_8), - inputs=[input_tensor], - outputs=[output_tensor], - ) - - expected_operator.custom_options = { - "int": 1, - "bool": True, - "float": 1.100000023842, - "string": "test string", - "vector_of_ints": [3, 2, 1], - "vector_of_bools": [True, False], - "vector_of_floats": [1.100000023842, 1.100000023842], - "vector_of_strings": ["str1", "str2", "str3"], - "map": {"one": 1, "two": 2}, - "vector_of_vectors": [[3, 2, 1], [1, 2, 3], [3, 2, 1]], - "vector_of_maps": [ - {"map1": [1, 2, 3]}, - {"map2": [1, 2, 3]}, - {"map3": [1, 2, 3]}, - ], - "enum": BuiltinOpCodes.CONV_2D, - "vector_of_enums": [BuiltinOpCodes.CONV_2D, BuiltinOpCodes.ADD], - "map_of_enums": {"conv_2d": BuiltinOpCodes.CONV_2D, "add": BuiltinOpCodes.ADD}, - } - - tmp_file = os.path.join(tempfile.mkdtemp(), "test_custom_options.tflite") - bytes_written = model.write_flatbuffer(tmp_file) - - assert bytes_written > 0 - - model = XCOREModel.read_flatbuffer(tmp_file) - - loaded_operator = model.subgraphs[0].operators[0] - loaded_options = loaded_operator.custom_options - loaded_options["enum"] = BuiltinOpCodes(loaded_options["enum"]) - loaded_options["vector_of_enums"] = [ - BuiltinOpCodes(e) for e in loaded_options["vector_of_enums"] - ] - loaded_options["map_of_enums"] = { - k: BuiltinOpCodes(v) for k, v in loaded_options["map_of_enums"].items() - } - assert loaded_options == expected_operator.custom_options - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ChannelGroupSlicePlanner.py b/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ChannelGroupSlicePlanner.py deleted file mode 100644 index 9540538a8..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ChannelGroupSlicePlanner.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -import numpy as np - -from tflite2xcore.parallelization import ChannelGroupSlicePlanner, MAX_THREADS - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -MAX_OUTPUT_CHANNELS = 75 - -PARAMS = {"default": {"num_channels": list(range(1, MAX_OUTPUT_CHANNELS + 1))}} - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -def generate_thread_cost_array(max_channels: int = MAX_OUTPUT_CHANNELS) -> np.ndarray: - thread_costs = np.zeros((max_channels, MAX_THREADS), dtype=np.float) - - for num_channels in range(1, max_channels + 1): - for num_threads in list(range(1, MAX_THREADS + 1)): - planner = ChannelGroupSlicePlanner( - num_channels, num_threads=num_threads, forced=True - ) - plan = planner.find_optimal_plan() - thread_costs[num_channels - 1, num_threads - 1] = plan.estimate_cost() - - return thread_costs - - -@pytest.fixture(scope="session") # type: ignore -def thread_cost_array() -> np.ndarray: - return generate_thread_cost_array() - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_channel_coverage(num_channels: int) -> None: - planner = ChannelGroupSlicePlanner(num_channels, num_threads=MAX_THREADS) - planner.create_candidate_plans() - for plan in planner._candidate_plans: - coverage_map = np.zeros(num_channels, dtype=bool) - for changrp in plan._channel_groups: - coverage_map[changrp.begin : changrp.end + 1] = True - assert np.all(coverage_map) - - -def test_optimal_thread_count(num_channels: int, thread_cost_array: np.ndarray) -> None: - planner = ChannelGroupSlicePlanner(num_channels, num_threads=MAX_THREADS) - plan = planner.find_optimal_plan() - costs = thread_cost_array[num_channels - 1, :] - assert np.min(costs) == plan.estimate_cost() - assert np.argmin(costs) == plan._num_threads - 1 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ElementWisePlanner.py b/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ElementWisePlanner.py deleted file mode 100644 index 94a4fedda..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_ElementWisePlanner.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -import numpy as np - -from tflite2xcore.parallelization import ElementWisePlanner, MAX_THREADS - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -MAX_ELEMENTS = 75 - -PARAMS = {"default": {"num_elements": list(range(1, MAX_ELEMENTS + 1))}} - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -def generate_thread_cost_array(max_elements: int = MAX_ELEMENTS) -> np.ndarray: - thread_costs = np.zeros((max_elements, MAX_THREADS), dtype=np.float) - - for num_elements in range(1, max_elements + 1): - for num_threads in list(range(1, MAX_THREADS + 1)): - planner = ElementWisePlanner( - num_elements, - num_threads=num_threads, - forced=True, - fixed_cost_per_thread=10, - ) - plan = planner.find_optimal_plan() - thread_costs[num_elements - 1, num_threads - 1] = plan.estimate_cost() - - return thread_costs - - -@pytest.fixture(scope="session") # type: ignore -def thread_cost_array() -> np.ndarray: - return generate_thread_cost_array() - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_element_coverage(num_elements: int) -> None: - planner = ElementWisePlanner( - num_elements, num_threads=MAX_THREADS, fixed_cost_per_thread=10 - ) - planner.create_candidate_plans() - for plan in planner._candidate_plans: - assert num_elements == sum(plan._job_sizes) - - -def test_optimal_thread_count(num_elements: int, thread_cost_array: np.ndarray) -> None: - planner = ElementWisePlanner( - num_elements, num_threads=MAX_THREADS, fixed_cost_per_thread=10 - ) - plan = planner.find_optimal_plan() - costs = thread_cost_array[num_elements - 1, :] - assert np.min(costs) == plan.estimate_cost() - assert np.argmin(costs) == plan._num_threads - 1 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_SlicePlanner.py b/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_SlicePlanner.py deleted file mode 100644 index e316f155a..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_parallelization/test_SlicePlanner.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import itertools - -import numpy as np - -from tflite2xcore.parallelization import SlicePlanner, MAX_THREADS - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -MAX_OUTPUT_CHANNELS = 20 -MAX_HEIGHT = MAX_WIDTH = 10 - -PARAMS = { - "default": { - "num_channels": list(range(1, MAX_OUTPUT_CHANNELS + 1)), - "height": list(range(1, MAX_HEIGHT + 1)), - "width": list(range(1, MAX_WIDTH + 1)), - } -} - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -def generate_thread_cost_array( - max_channel: int = MAX_OUTPUT_CHANNELS, - max_height: int = MAX_HEIGHT, - max_width: int = MAX_WIDTH, -) -> np.ndarray: - thread_costs = np.zeros( - (max_channel, max_height, max_width, MAX_THREADS), dtype=np.float - ) - - for c, y, x in itertools.product( - range(max_channel), range(max_height), range(max_width) - ): - for num_threads in list(range(1, MAX_THREADS + 1)): - planner = SlicePlanner( - num_channels_out=c + 1, - height=y + 1, - width=x + 1, - num_threads=num_threads, - forced=True, - ) - plan = planner.find_optimal_plan() - thread_costs[c, y, x, num_threads - 1] = plan.estimate_cost() - - return thread_costs - - -@pytest.fixture(scope="session") # type: ignore -def thread_cost_array() -> np.ndarray: - return generate_thread_cost_array() - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_layout_coverage(num_channels: int, height: int, width: int) -> None: - planner = SlicePlanner(num_channels, height, width, num_threads=MAX_THREADS) - planner.create_candidate_plans() - for plan in planner._candidate_plans: - coverage_map = np.zeros((height, width), dtype=bool) - for block in plan._row_col_slices: - y_start, y_end = block.top, block.top + block.rows - x_start, x_end = block.left, block.left + block.cols - coverage_map[y_start:y_end, x_start:x_end] = True - assert np.all(coverage_map) - - coverage_map = np.zeros(num_channels, dtype=bool) - for changrp in plan._channel_groups: - coverage_map[changrp.begin : changrp.end + 1] = True - assert np.all(coverage_map) - - -def test_optimal_thread_count( - num_channels: int, height: int, width: int, thread_cost_array: np.ndarray -) -> None: - planner = SlicePlanner(num_channels, height, width, num_threads=MAX_THREADS) - plan = planner.find_optimal_plan() - costs = thread_cost_array[num_channels - 1, height - 1, width - 1, :] - assert np.min(costs) == plan.estimate_cost() - assert np.argmin(costs) == plan._num_threads - 1 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/conftest.py deleted file mode 100644 index be670b599..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/conftest.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import itertools -from typing import List, Dict, Iterator, Tuple, Any - -from tflite2xcore.transformation_passes import ModelTransformationPass -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import TensorType, ValidOpCodes -from tflite2xcore.converter import CleanupManager - - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - - -def _make_name_type_pairs( - name: str, types: List[TensorType] -) -> Iterator[Dict[str, TensorType]]: - for n, t in zip(itertools.cycle([name]), types): - yield {n: t} - - -def _test_non_matching_params( - trf_pass: ModelTransformationPass, model: XCOREModel, *, op_idx: int = -1 -) -> None: - assert not trf_pass.match(model.subgraphs[0].operators[op_idx]) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -NON_INT8_TEST_TYPES = [ - TensorType.UINT8, - TensorType.INT32, - TensorType.FLOAT32, - TensorType.INT16, -] - -NON_INT32_TEST_TYPES = [ - TensorType.INT8, - TensorType.UINT8, - TensorType.INT16, - TensorType.FLOAT32, -] - -NON_FLOAT32_TEST_TYPES = [ - TensorType.INT8, - TensorType.INT16, - TensorType.INT32, - TensorType.UINT8, -] - -ParamsType = Dict[str, Dict[str, List[Any]]] - -PARAMS = { - "extended": { - "input_height": [9, 20, 32], - "input_width": [7, 17, 32], - "input_channels": [4, 16, 32, 36], - }, - "default": { - "input_height": [9, 20], - "input_width": [7, 17], - "input_channels": [4, 16, 32], - }, - "smoke": { - "input_height": [9, 20], - "input_width": [7, 17], - "input_channels": [4, 32], - }, -} # type: ParamsType - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def strides(stride_h: int, stride_w: int) -> Tuple[int, int]: - return (stride_h, stride_w) - - -@pytest.fixture() -def input_size(input_height: int, input_width: int) -> Tuple[int, int]: - return (input_height, input_width) - - -@pytest.fixture() -def input_shape( - input_size: Tuple[int, int], input_channels: int -) -> Tuple[int, int, int]: - return (*input_size, input_channels) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_matching_params( - trf_pass: ModelTransformationPass, model: XCOREModel, *, op_idx: int = -1 -) -> None: - assert trf_pass.match(model.subgraphs[0].operators[op_idx]) - - -def test_non_matching_tensors( - trf_pass: ModelTransformationPass, - model: XCOREModel, - non_matching_tensors: Dict[str, TensorType], -) -> None: - subgraph = model.subgraphs[0] - for name, type_ in non_matching_tensors.items(): - subgraph.get_tensor(name).type = type_ - _test_non_matching_params(trf_pass, model) - - -def test_replace_mutate( - trf_pass: ModelTransformationPass, model: XCOREModel, new_opcode: ValidOpCodes -) -> None: - # run replacement pass - trf_pass.run(model) - model.sanity_check() - - # clean up dangling op - CleanupManager(model).run_passes() - model.sanity_check() - - # check new op - op_code = model.subgraphs[0].operators[-1].operator_code.code - assert op_code is new_opcode, f"expected: {new_opcode}, got: {op_code}" diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/model_builders.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/model_builders.py deleted file mode 100644 index d2d70b873..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/model_builders.py +++ /dev/null @@ -1,877 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from typing import Callable, Tuple, Optional, Union -from copy import deepcopy - - -from tflite2xcore.utils import QuantizationTuple -from tflite2xcore.xcore_schema import ( - XCOREModel, - Subgraph, - ActivationFunctionType, - Padding, - TensorType, - OperatorCode, - BuiltinOpCodes, - XCOREOpCodes, -) - -ModelBuilder = Callable[..., XCOREModel] - - -def generate_dummy_data( - shape: Tuple[int, ...], dtype: Union[type, np.dtype] -) -> np.ndarray: - if np.issubdtype(dtype, np.int8): - t_max = np.iinfo(dtype).max - t_range = t_max - np.iinfo(dtype).min - return np.int8(np.arange(np.prod(shape)) % t_range - t_max) - else: - return np.arange(np.prod(shape), dtype=dtype) - - -def build_split(subgraph=None, *, input_shape, tensor_type, axis, num_splits): - assert 0 <= axis < len(input_shape) - assert 1 < num_splits <= input_shape[axis] - assert input_shape[axis] % num_splits == 0 - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - tin = subgraph.create_tensor("input", tensor_type, shape=input_shape, isinput=True) - t_axis = subgraph.create_tensor("axis", TensorType.INT32, shape=[]) - t_axis.buffer.data = np.array([axis], dtype=np.int32) - - out_shape = ( - *input_shape[:axis], - int(input_shape[axis] // num_splits), - *input_shape[axis + 1 :], - ) - outputs = [ - subgraph.create_tensor(f"output_{j}", tin.type, out_shape, isoutput=True) - for j in range(num_splits) - ] - op = subgraph.create_operator( - OperatorCode(BuiltinOpCodes.SPLIT), inputs=[t_axis, tin], outputs=outputs - ) - op.builtin_options = {"num_splits": num_splits} - - return subgraph.model - - -def build_dequantize( - subgraph: Optional[Subgraph] = None, - *, - input_shape: Tuple[int, ...], - input_quantization: Optional[QuantizationTuple] = None, -) -> XCOREModel: - subgraph = subgraph or Subgraph(model=XCOREModel()) - - quant = input_quantization or QuantizationTuple(0.12, -35) - input_shape = [1, *input_shape] - qin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization={"scale": [quant.scale], "zero_point": [quant.zero_point]}, - ) - - fout = subgraph.create_tensor( - "output_dequantized", TensorType.FLOAT32, qin.shape, isoutput=True - ) - - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.DEQUANTIZE), inputs=[qin], outputs=[fout] - ) - - return subgraph.model - - -def build_quantize( - subgraph: Optional[Subgraph] = None, - *, - input_shape: Tuple[int, ...], - output_quantization: Optional[QuantizationTuple] = None, -) -> XCOREModel: - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - tin = subgraph.create_tensor("input", TensorType.FLOAT32, input_shape, isinput=True) - - quant = output_quantization or QuantizationTuple(0.12, -35) - qout = subgraph.create_tensor( - "output_quantized", - TensorType.INT8, - tin.shape, - isoutput=True, - quantization={"scale": [quant.scale], "zero_point": [quant.zero_point]}, - ) - - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.QUANTIZE), inputs=[tin], outputs=[qout] - ) - - return subgraph.model - - -def build_elementwise_op(builtin_opcode, subgraph=None, *, input_shape, tensor_type): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - quantization = {"scale": [0.35], "zero_point": [0]} - tin = subgraph.create_tensor( - "input", - tensor_type, - shape=input_shape, - isinput=True, - quantization=deepcopy(quantization), - ) - tout = subgraph.create_tensor( - "output", - tin.type, - shape=tin.shape, - isoutput=True, - quantization=deepcopy(quantization), - ) - subgraph.create_operator(OperatorCode(builtin_opcode), inputs=[tin], outputs=[tout]) - - return subgraph.model - - -def build_relu(subgraph=None, **kwargs): - return build_elementwise_op(BuiltinOpCodes.RELU, subgraph, **kwargs) - - -def build_relu6(subgraph=None, **kwargs): - return build_elementwise_op(BuiltinOpCodes.RELU6, subgraph, **kwargs) - - -def build_tanh(subgraph=None, **kwargs): - return build_elementwise_op(BuiltinOpCodes.TANH, subgraph, **kwargs) - - -def build_logistic(subgraph=None, **kwargs): - return build_elementwise_op(BuiltinOpCodes.LOGISTIC, subgraph, **kwargs) - - -def build_abs(subgraph=None, **kwargs): - return build_elementwise_op(BuiltinOpCodes.ABS, subgraph, **kwargs) - - -def build_mean(subgraph=None, *, input_shape, reduction_dims): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - tin = subgraph.create_tensor( - "input", - type_=TensorType.INT8, - shape=input_shape, - isinput=True, - quantization={"scale": [0.65], "zero_point": [-12]}, - ) - tred = subgraph.create_tensor( - "reduction_dims", TensorType.INT32, [len(reduction_dims)] - ) - tout = subgraph.create_tensor( - "output", - tin.type, - [tin.shape[0] + tin.shape[3]], - isoutput=True, - quantization={"scale": [0.42], "zero_point": [-11]}, - ) - tred.buffer.data = np.array(reduction_dims, dtype=np.int32) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.MEAN), inputs=[tin, tred], outputs=[tout] - ) - - return subgraph.model - - -def build_XC_avgpool2d_global(subgraph=None, *, input_shape, reduction_dims): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - tin = subgraph.create_tensor( - "input", type_=TensorType.INT8, shape=input_shape, isinput=True - ) - tred = subgraph.create_tensor( - "reduction_dims", TensorType.INT32, [len(reduction_dims)] - ) - tout = subgraph.create_tensor( - "output", tin.type, [tin.shape[0], tin.shape[3]], isoutput=True - ) - tred.buffer.data = np.array(reduction_dims, dtype=np.int32) - subgraph.create_operator( - OperatorCode(XCOREOpCodes.XC_avgpool2d_global), - inputs=[tin, tred], - outputs=[tout], - ) - - return subgraph.model - - -def build_argmax(subgraph=None, *, input_shape, input_type): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - tin = subgraph.create_tensor( - "input", type_=input_type, shape=input_shape, isinput=True - ) - tout = subgraph.create_tensor("output", TensorType.INT32, tin.shape, isoutput=True) - dim_tensor = subgraph.create_tensor("axis", TensorType.INT32, shape=[]) - dim_tensor.buffer.data = np.int32([1]) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ARG_MAX), inputs=[tin, dim_tensor], outputs=[tout] - ) - - return subgraph.model - - -def build_pool( - builtin_opcode, - subgraph=None, - *, - input_shape, - padding, - pool_size, - strides, - fused_activation=ActivationFunctionType.NONE, -): - assert len(strides) == len(pool_size) == 2 - assert padding in Padding - assert fused_activation in [ - ActivationFunctionType.NONE, - ActivationFunctionType.RELU, - ActivationFunctionType.RELU6, - ] - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - output_shape = [ # TODO: fix this: calculate based on strides and pool_size - input_shape[0], - input_shape[1] // 2, - input_shape[1] // 2, - input_shape[3], - ] - quantization = {"scale": [0.35], "zero_point": [0]} - tin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization=deepcopy(quantization), - ) - tout = subgraph.create_tensor( - "output", - tin.type, - output_shape, - isoutput=True, - quantization=deepcopy(quantization), - ) - - op = subgraph.create_operator( - OperatorCode(builtin_opcode), inputs=[tin], outputs=[tout] - ) - op.builtin_options = { - "padding": padding, - "stride_h": strides[0], - "stride_w": strides[1], - "filter_height": pool_size[0], - "filter_width": pool_size[1], - "fused_activation_function": fused_activation, - } - - return subgraph.model - - -def build_maxpool(subgraph=None, **kwargs): - return build_pool(BuiltinOpCodes.MAX_POOL_2D, subgraph, **kwargs) - - -def build_avgpool(subgraph=None, **kwargs): - return build_pool(BuiltinOpCodes.AVERAGE_POOL_2D, subgraph, **kwargs) - - -def build_XC_pool(opcode, subgraph=None, *, input_shape, pool_size, strides): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, *input_shape] - output_shape = [ - input_shape[0], - input_shape[1] // 2, - input_shape[1] // 2, - input_shape[3], - ] - quantization = {"scale": [0.35], "zero_point": [0]} - tin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization=deepcopy(quantization), - ) - tout = subgraph.create_tensor( - "output", - tin.type, - output_shape, - isoutput=True, - quantization=deepcopy(quantization), - ) - - op = subgraph.create_operator(OperatorCode(opcode), inputs=[tin], outputs=[tout]) - op.add_custom_options( - pool=[pool_size[0], pool_size[0]], stride=[strides[0], strides[1]] - ) - - return subgraph.model - - -def build_XC_maxpool2d(subgraph=None, **kwargs): - return build_XC_pool(XCOREOpCodes.XC_maxpool2d, subgraph, **kwargs) - - -def build_XC_avgpool2d(subgraph=None, **kwargs): - return build_XC_pool(XCOREOpCodes.XC_avgpool2d, subgraph, **kwargs) - - -def build_fc(subgraph=None, *, outputs, input_shape, add_batch_dim=True): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - if add_batch_dim: - # TODO unify this behaviour - input_shape = [1, *input_shape] - - weight_shape = [outputs, np.prod(input_shape[1:])] - - tin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization={"scale": [0.02874], "zero_point": [-2]}, - ) - w = subgraph.create_tensor( - "weights", - TensorType.INT8, - weight_shape, - quantization={"scale": [0.00836], "zero_point": [0]}, - ) - b = subgraph.create_tensor( - "biases", - TensorType.INT32, - weight_shape[:1], - quantization={"scale": [0.00024], "zero_point": [0]}, - ) - tout = subgraph.create_tensor( - "output", - tin.type, - shape=[1, weight_shape[0]], - isoutput=True, - quantization={"scale": [0.11332], "zero_point": [6]}, - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.FULLY_CONNECTED), inputs=[tin, w, b], outputs=[tout] - ) - - # add dummy data so that the op can be mutated - w.buffer.data = generate_dummy_data(w.shape, np.int8) - b.buffer.data = generate_dummy_data(b.shape, np.int32) - - return subgraph.model - - -def build_XC_fc(subgraph=None, *, outputs, input_channels): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, input_channels, 1, 1] - weight_shape = [outputs, np.prod(input_shape[1:])] - bso_shape = [int(np.ceil(outputs / 16)), 7, 16] - - tin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization={"scale": [0.02874], "zero_point": [-2]}, - ) - w = subgraph.create_tensor( - "weights", - TensorType.INT8, - weight_shape, - quantization={"scale": [0.00836], "zero_point": [0]}, - ) - b = subgraph.create_tensor( - "biases", - TensorType.INT32, - bso_shape, - quantization={"scale": [0.00024], "zero_point": [0]}, - ) - tout = subgraph.create_tensor( - "output", - TensorType.INT8, - shape=[1, weight_shape[0]], - isoutput=True, - quantization={"scale": [0.11332], "zero_point": [6]}, - ) - subgraph.create_operator( - OperatorCode(XCOREOpCodes.XC_fc), inputs=[tin, w, b], outputs=[tout] - ) - - return subgraph.model - - -def build_XC_requantize_16_to_8(subgraph=None, *, outputs, input_channels): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - input_shape = [1, input_channels, 1, 1] - weight_shape = [outputs, np.prod(input_shape[1:])] - - tin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization={"scale": [0.02874], "zero_point": [-2]}, - ) - tout = subgraph.create_tensor( - "output", - TensorType.INT16, - shape=[1, weight_shape[0]], - isoutput=True, - quantization={"scale": [0.11332], "zero_point": [6]}, - ) - subgraph.create_operator( - OperatorCode(XCOREOpCodes.XC_requantize_16_to_8), inputs=[tin], outputs=[tout] - ) - - return subgraph.model - - -def build_intermediate_fc(subgraph=None, *, outputs, input_shape): - model = build_fc(subgraph, outputs=outputs, input_shape=input_shape) - subgraph = subgraph or model.subgraphs[0] - - subgraph.get_tensor("weights").name = "weights_1" - subgraph.get_tensor("biases").name = "biases_1" - - tmid = subgraph.get_tensor("output") - tmid.name = "intermediate" - subgraph.outputs.remove(tmid) - - return model - - -def build_conv2d(subgraph=None, *, weight_shape, input_size, padding, strides): - subgraph = subgraph or Subgraph(model=XCOREModel()) - assert padding in Padding - - height, width = input_size - C_out, K_h, K_w, C_in = weight_shape - - input_shape = [1, height, width, C_in] - tin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization={"scale": [0.63], "zero_point": [-5]}, - ) - np.random.seed(42) - w = subgraph.create_tensor( - "weights", - TensorType.INT8, - weight_shape, - quantization={ - "scale": np.random.uniform(size=(C_out,)).astype(float).tolist(), - "zero_point": [0] * C_out, - }, - ) - b = subgraph.create_tensor( - "biases", - TensorType.INT32, - shape=[C_out], - quantization={ - "scale": [ - tin.quantization["scale"][0] * s for s in w.quantization["scale"] - ], - "zero_point": [0] * C_out, - }, - ) - - # add dummy data so that the op can be mutated - w.buffer.data = generate_dummy_data(w.shape, np.int8) - b.buffer.data = generate_dummy_data(b.shape, np.int32) - - if padding is Padding.SAME: - # TODO: this is incorrect if stride > 1 - output_shape = [1, height, width, C_out] - elif padding is Padding.VALID: - output_shape = [ - 1, - int(np.ceil((height - K_h + 1) / strides[0])), - int(np.ceil((width - K_w + 1) / strides[1])), - C_out, - ] - - tout = subgraph.create_tensor("output", tin.type, shape=output_shape, isoutput=True) - - op = subgraph.create_operator( - OperatorCode(BuiltinOpCodes.CONV_2D), inputs=[tin, w, b], outputs=[tout] - ) - op.builtin_options = { - "padding": padding, - "fused_activation_function": ActivationFunctionType.NONE, - "stride_h": strides[0], - "stride_w": strides[1], - "dilation_w_factor": 1, - "dilation_h_factor": 1, - } - - return subgraph.model - - -def build_depthwise_conv2d( - subgraph=None, *, weight_shape, input_size, padding, strides=(1, 1) -): - assert len(strides) == 2 - assert padding in Padding - subgraph = subgraph or Subgraph(model=XCOREModel()) - - # NOTE: weight_shape uses channel order HWIM (following TensorFlow DepthwiseConv) - height, width = input_size - K_h, K_w, C_in, depth_multiplier = weight_shape - C_out = C_in * depth_multiplier - - input_shape = [1, input_size[0], input_size[1], C_in] - weight_shape = [1, K_h, K_w, C_out] - tin = subgraph.create_tensor( - "input", - TensorType.INT8, - input_shape, - isinput=True, - quantization={"scale": [0.48], "zero_point": [15]}, - ) - np.random.seed(42) - w = subgraph.create_tensor( - "weights", - TensorType.INT8, - weight_shape, - quantization={ - "scale": np.random.uniform(size=(C_out,)).astype(float).tolist(), - "zero_point": [0] * C_out, - }, - ) - b = subgraph.create_tensor( - "biases", - TensorType.INT32, - shape=[C_out], - quantization={ - "scale": [ - tin.quantization["scale"][0] * s for s in w.quantization["scale"] - ], - "zero_point": [0] * C_out, - }, - ) - - # add dummy data so that the op can be mutated - w.buffer.data = generate_dummy_data(w.shape, np.int8) - b.buffer.data = generate_dummy_data(b.shape, np.int32) - - if padding is Padding.SAME: - output_shape = [1, height, width, C_out] - elif padding is Padding.VALID: - output_shape = [ - 1, - int(np.ceil((height - K_h + 1) / strides[0])), - int(np.ceil((width - K_w + 1) / strides[1])), - C_out, - ] - tout = subgraph.create_tensor("output", tin.type, output_shape, isoutput=True) - - op = subgraph.create_operator( - OperatorCode(BuiltinOpCodes.DEPTHWISE_CONV_2D), - inputs=[tin, w, b], - outputs=[tout], - ) - op.builtin_options = { - "padding": padding, - "depth_multiplier": depth_multiplier, - "stride_h": strides[0], - "stride_w": strides[1], - "dilation_w_factor": 1, - "dilation_h_factor": 1, - } - - return subgraph.model - - -SpatialPadding = Tuple[Tuple[int, int], Tuple[int, int]] - - -def _calculate_implicit_pads( - strides: Tuple[int, int], input_size: Tuple[int, int], kernel_size: Tuple[int, int] -) -> SpatialPadding: - return tuple( - (0, int(np.ceil((i - k) / s) * s - i + k)) - for s, i, k in zip(strides, input_size, kernel_size) - ) - - -def _calculate_out_size( - spatial_pads: SpatialPadding, - strides: Tuple[int, int], - input_size: Tuple[int, int], - kernel_size: Tuple[int, int], -) -> Tuple[int, int]: - return tuple( - int((i - k + p[0] + p[1]) / s + 1) - for p, s, i, k in zip(spatial_pads, strides, input_size, kernel_size) - ) - - -def build_XC_conv2d(opcode, subgraph=None, *, weight_shape, input_size, strides): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - height, width = input_size - C_out, _, _, C_in = weight_shape - - input_shape = [1, height, width, C_in] - bso_shape = [int(np.ceil(C_out / 16)), 7, 16] - tin = subgraph.create_tensor("input", TensorType.INT8, input_shape, isinput=True) - w = subgraph.create_tensor("weights", TensorType.INT8, weight_shape) - b = subgraph.create_tensor("bso", TensorType.INT16, bso_shape) - - # valid padding - pads = _calculate_implicit_pads(strides, input_size, weight_shape[1:3]) - out_size = _calculate_out_size(pads, strides, input_size, weight_shape[1:3]) - output_shape = [C_out, *out_size, C_in] - tout = subgraph.create_tensor("output", tin.type, output_shape, isoutput=True) - - op = subgraph.create_operator( - OperatorCode(opcode), inputs=[tin, w, b], outputs=[tout] - ) - op.add_custom_options(pad=(pads[0][0], pads[1][0]), stride=(strides[0], strides[1])) - - return subgraph.model - - -def build_XC_conv2d_deep(subgraph=None, **kwargs): - return build_XC_conv2d(XCOREOpCodes.XC_conv2d_deep, subgraph, **kwargs) - - -def build_XC_conv2d_shallowin(subgraph=None, **kwargs): - return build_XC_conv2d(XCOREOpCodes.XC_conv2d_shallowin, subgraph, **kwargs) - - -def build_XC_conv2d_1x1(subgraph=None, **kwargs): - return build_XC_conv2d(XCOREOpCodes.XC_conv2d_1x1, subgraph, **kwargs) - - -def build_XC_conv2d_depthwise(subgraph=None, *, weight_shape, input_size, strides): - subgraph = subgraph or Subgraph(model=XCOREModel()) - - height, width = input_size - C_in = weight_shape[2] - - input_shape = [1, height, width, C_in] - bso_shape = [int(np.ceil(C_in / 16)), 7, 16] - tin = subgraph.create_tensor("input", TensorType.INT8, input_shape, isinput=True) - w = subgraph.create_tensor("weights", TensorType.INT8, weight_shape) - b = subgraph.create_tensor("bso", TensorType.INT16, bso_shape) - - # valid padding - pads = _calculate_implicit_pads(strides, input_size, weight_shape[:2]) - out_size = _calculate_out_size(pads, strides, input_size, weight_shape[:2]) - output_shape = [1, *out_size, C_in] - tout = subgraph.create_tensor("output", tin.type, output_shape, isoutput=True) - - op = subgraph.create_operator( - OperatorCode(XCOREOpCodes.XC_conv2d_depthwise), - inputs=[tin, w, b], - outputs=[tout], - ) - op.add_custom_options(pad=(pads[0][0], pads[1][0]), stride=(strides[0], strides[1])) - - return subgraph.model - - -def build_pad(subgraph=None, *, input_shape, paddings): - assert len(paddings) == len(input_shape) == 4 - for j, p in enumerate(paddings): - assert len(p) == 2, f"padding[{j}] is not a pair" - - subgraph = subgraph or Subgraph(model=XCOREModel()) - - output_shape = [i + sum(p) for i, p in zip(input_shape, paddings)] - tin = subgraph.create_tensor( - "unpadded", - TensorType.INT8, - input_shape, - isinput=True, - quantization={"scale": [0.09], "zero_point": [-15]}, - ) - tout = subgraph.create_tensor("padded", tin.type, output_shape, isoutput=True) - p = subgraph.create_tensor("paddings", TensorType.INT32, shape=[4, 2]) - p.buffer.data = np.int32(paddings) - - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.PAD), inputs=[tin, p], outputs=[tout] - ) - - return subgraph.model - - -def _glue_ops(op1, op2): - subgraph = op1.subgraph - assert subgraph is op2.subgraph - - old_input, old_output = op2.inputs[0], op1.outputs[0] - op2.inputs[0] = old_output - subgraph.remove_tensor(old_input) - if old_output in subgraph.outputs: - subgraph.outputs.remove(old_output) - old_output.consumers.append(op2) - - -def _glue_quantize(op): - subgraph = op.subgraph - intermediate = subgraph.outputs[0] - build_quantize(subgraph, input_shape=intermediate.shape) - _glue_ops(op, subgraph.operators[-1]) - - -def build_consecutive_pads(subgraph=None, *, input_shape, paddings_1, paddings_2): - model = build_pad(subgraph, input_shape=input_shape, paddings=paddings_1) - subgraph = subgraph or model.subgraphs[0] - - build_pad(subgraph, input_shape=subgraph.outputs[0].shape, paddings=paddings_2) - - pad_1, pad_2 = subgraph.operators[:2] - _glue_ops(pad_1, pad_2) - - return model - - -def build_non_input_pad(subgraph=None, *, input_shape, paddings): - model = build_pad(subgraph, input_shape=input_shape, paddings=paddings) - subgraph = subgraph or model.subgraphs[0] - - build_abs(subgraph, input_shape=input_shape, tensor_type=TensorType.INT8) - - pad1, abs1 = subgraph.operators[:2] - _glue_ops(abs1, pad1) - - return model - - -def build_reshape( - subgraph=None, - *, - input_shape, - output_shape, - add_batch_dim=False, - input_shape_tensor=True, -): - - if add_batch_dim: - # Prepend dims with batch dimension 1 - input_shape = [1, *input_shape] - - assert 0 < len(output_shape) < 5 - - assert np.prod(input_shape) == np.prod(output_shape), "Inconsistant shapes" - - subgraph = subgraph or Subgraph(model=XCOREModel()) - - tin = subgraph.create_tensor( - "original_shape", TensorType.INT8, input_shape, isinput=True - ) - tout = subgraph.create_tensor("reshaped", tin.type, output_shape, isoutput=True) - - if input_shape_tensor: - p = subgraph.create_tensor("shape", TensorType.INT32, shape=[len(output_shape)]) - p.buffer.data = np.int32(output_shape) - inputs = [tin, p] - else: - inputs = [tin] - - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.RESHAPE), - inputs=inputs, - outputs=[tout], - builtin_options={"new_shape": output_shape}, - ) - return subgraph.model - - -def build_fc_with_preceding_reshape( - subgraph=None, *, input_shape, fc_outputs, reshaped_input_shape -): - model = build_reshape( - subgraph, - input_shape=input_shape, - output_shape=reshaped_input_shape, - add_batch_dim=False, - ) - subgraph = model.subgraphs[0] - - build_fc( - model.subgraphs[0], - outputs=fc_outputs, - input_shape=reshaped_input_shape, - add_batch_dim=False, - ) - - _glue_ops(*subgraph.operators[:2]) - - return model - - -def build_fc_with_subsequent_reshape( - subgraph=None, *, fc_output_shape, reshaped_output_shape -): - model = build_fc( - subgraph, - outputs=np.prod(fc_output_shape), - input_shape=(1, 4), - add_batch_dim=False, - ) - subgraph = model.subgraphs[0] - - build_reshape( - subgraph, - input_shape=fc_output_shape, - output_shape=reshaped_output_shape, - add_batch_dim=False, - ) - fc, reshape = subgraph.operators[:2] - _glue_ops(fc, reshape) - - fc.outputs[0].shape = fc_output_shape - - return model - - -def build_padded_DW(subgraph=None, *, weight_shape, input_size, paddings, strides): - input_shape = [1, *input_size, weight_shape[-1]] - model = build_pad(subgraph, input_shape=input_shape, paddings=paddings) - subgraph = subgraph or model.subgraphs[0] - output_shape = subgraph.outputs[0].shape - - build_XC_conv2d_depthwise( - subgraph, - weight_shape=weight_shape, - input_size=output_shape[1:3], - strides=strides, - ) - - pad_op, conv_op = subgraph.operators[:2] - _glue_ops(pad_op, conv_op) - - old_input = conv_op.inputs[0] - pad_op.outputs[0].quantization = old_input.quantization - pad_op.inputs[0].quantization = old_input.quantization - - return model diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/__init__.py deleted file mode 100644 index 46b12a650..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from ..conftest import PARAMS, test_replace_mutate diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/test_ReplaceAddPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/test_ReplaceAddPass.py deleted file mode 100644 index 5240b20a7..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_add_passes/test_ReplaceAddPass.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple, Optional - - -from tflite2xcore.transformation_passes import ReplaceAddPass -from tflite2xcore.xcore_schema import ( - XCOREModel, - Subgraph, - BuiltinOpCodes, - OperatorCode, - TensorType, - XCOREOpCodes, - Subgraph, -) -from . import test_replace_mutate as _test_replace_mutate - - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - - -def build_add( - subgraph: Optional[Subgraph] = None, - *, - input_shape: Tuple[int, ...], - tensor_type: TensorType -) -> XCOREModel: - subgraph = subgraph or Subgraph(model=XCOREModel()) - input_tensor_0 = subgraph.create_tensor( - "input_0", - tensor_type, - input_shape, - isinput=True, - quantization={"scale": [0.35], "zero_point": [0]}, - ) - input_tensor_1 = subgraph.create_tensor( - "input_1", - tensor_type, - input_shape, - isinput=True, - quantization={"scale": [0.15], "zero_point": [2]}, - ) - output_tensor = subgraph.create_tensor( - "output", - tensor_type, - input_shape, - isoutput=True, - quantization={"scale": [0.25], "zero_point": [1]}, - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ADD), - inputs=[input_tensor_0, input_tensor_1], - outputs=[output_tensor], - ) - return subgraph.model - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = { - "default": { - "tensor_type": [TensorType.INT8], - "non_matching_tensor_type": [TensorType.INT16, TensorType.FLOAT32], - "input_height": [9, 20], - "input_width": [7, 17], - "input_channels": [4, 16, 32], - } -} - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceAddPass: - return ReplaceAddPass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_add_8 - - -@pytest.fixture() -def model(input_shape: Tuple[int, int, int], tensor_type: TensorType) -> XCOREModel: - return build_add(input_shape=input_shape, tensor_type=tensor_type) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_matching_params(trf_pass: ReplaceAddPass, model: XCOREModel) -> None: - assert trf_pass.match(model.subgraphs[0].operators[0]) - - -def test_non_matching_tensor_type( - trf_pass: ReplaceAddPass, non_matching_tensor_type: TensorType, model: XCOREModel -) -> None: - model.subgraphs[0].get_tensor("input_1").type = TensorType - assert not trf_pass.match(model.subgraphs[0].operators[0]) - - -def test_non_matching_tensor_shape(trf_pass: ReplaceAddPass, model: XCOREModel) -> None: - current_shape = model.subgraphs[0].get_tensor("input_1").shape - new_shape = (current_shape[0] + 1, *current_shape[1:]) - model.subgraphs[0].get_tensor("input_1").shape = new_shape - assert not trf_pass.match(model.subgraphs[0].operators[0]) - - -def test_mutate( - trf_pass: ReplaceAddPass, model: XCOREModel, new_opcode: XCOREOpCodes -) -> None: - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - _test_replace_mutate(trf_pass, model, new_opcode) - - # check operators - subgraph = model.subgraphs[0] - operators = subgraph.operators - assert len(operators) == 1 - op = operators[0] - - # check tensors - assert len(op.inputs) == 3 - assert len(op.outputs) == 1 - assert op.inputs[2].shape == (6,) - assert op.inputs[2].type is TensorType.INT32 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/__init__.py deleted file mode 100644 index e613cb693..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from typing import Tuple, Optional - -from tflite2xcore.utils import QuantizationTuple -from tflite2xcore.xcore_model import XCOREModel - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - generate_dummy_data, - build_dequantize, - build_quantize, - _glue_ops, -) - -from ..conftest import ( # pylint: disable=unused-import - PARAMS, - test_matching_params, - _test_non_matching_params, -) - - -def build_quantize_dequantize_identity( - *, input_shape: Tuple[int, ...], quantization: Optional[QuantizationTuple] = None -) -> XCOREModel: - model = build_dequantize(input_shape=input_shape, input_quantization=quantization) - subgraph = model.subgraphs[0] - subgraph.operators[0].inputs[0].buffer.data = generate_dummy_data( - input_shape, np.int8 - ) - - build_quantize(subgraph, input_shape=input_shape, output_quantization=quantization) - _glue_ops(*subgraph.operators) - - subgraph.inputs = [] - - return model diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/test_ConstantPropagationPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/test_ConstantPropagationPass.py deleted file mode 100644 index d0425dc4c..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_constant_propagation_passes/test_ConstantPropagationPass.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ConstantPropagationPass -from tflite2xcore.converter import CleanupManager - -from . import build_quantize_dequantize_identity - -from . import ( # pylint: disable=unused-import - PARAMS, - test_matching_params as _test_matching_params, - _test_non_matching_params, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ConstantPropagationPass: - return ConstantPropagationPass() - - -@pytest.fixture() -def model(input_shape: Tuple[int, int, int]) -> XCOREModel: - return build_quantize_dequantize_identity(input_shape=input_shape) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_matching_params(trf_pass: ConstantPropagationPass, model: XCOREModel) -> None: - _test_matching_params(trf_pass, model, op_idx=0) - - -def test_non_matching_input_tensor( - trf_pass: ConstantPropagationPass, model: XCOREModel -) -> None: - _test_non_matching_params(trf_pass, model, op_idx=1) - - -def test_mutate(trf_pass: ConstantPropagationPass, model: XCOREModel) -> None: - subgraph = model.subgraphs[0] - operators = subgraph.operators - old_output = subgraph.outputs[0] - assert not old_output.buffer.data - - data = operators[0].inputs[0].buffer.data - - # run mutating pass, then clean up - trf_pass.run(model) - model.sanity_check() - CleanupManager(model).run_passes() - - assert not operators - assert len(subgraph.tensors) == 1 - assert subgraph.tensors[0] is old_output - assert old_output.buffer.data == data - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/conftest.py deleted file mode 100644 index 38729d7c8..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/conftest.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.transformation_passes import ModelTransformationPass -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import Padding, XCOREOpCodes - -from tflite2xcore.tests.test_transformation_passes.model_builders import ModelBuilder -from ..test_fully_connected_passes.conftest import PARAMS as FC_PARAMS -from ..conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, - test_non_matching_tensors, - test_replace_mutate as _test_replace_mutate, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -# NOTE: this is intentional to reduce test counts -PARAMS["extended"]["non_matching_tensors"] = FC_PARAMS["default"][ - "non_matching_tensors" -] -PARAMS["default"]["non_matching_tensors"] = FC_PARAMS["smoke"]["non_matching_tensors"] -PARAMS["smoke"]["non_matching_tensors"] = FC_PARAMS["smoke"]["non_matching_tensors"] - -PARAMS["extended"].update( - { - "kernel_height": [2, 3, 5, 7], - "kernel_width": [2, 3, 5, 7], - "non_matching_input_channels": [3, 9, 15], - "output_channels": PARAMS["extended"]["input_channels"], - "non_matching_output_channels": [3, 9, 15], - "padding": list(Padding), - "stride_h": [1, 2, 3], - "stride_w": [1, 2, 3], - } -) - -PARAMS["default"].update( - { - "kernel_height": [2, 3, 5], - "kernel_width": [2, 3, 5], - "non_matching_input_channels": [3, 15], - "input_channels": PARAMS["default"]["input_channels"][1:], - "output_channels": PARAMS["default"]["input_channels"][1:], - "non_matching_output_channels": [3, 15], - "padding": list(Padding), - "stride_h": [1, 2], - "stride_w": [1, 2], - } -) - -PARAMS["smoke"].update( - { - "kernel_height": [2, 3], - "kernel_width": [2, 3], - "non_matching_input_channels": [9], - "output_channels": PARAMS["smoke"]["input_channels"], - "non_matching_output_channels": [9], - "padding": list(Padding), - "stride_h": [1], - "stride_w": [1], - } -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def weight_shape( - output_channels: int, kernel_height: int, kernel_width: int, input_channels: int -) -> Tuple[int, int, int, int]: - return [output_channels, kernel_height, kernel_width, input_channels] - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_non_matching_output_channels( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - non_matching_output_channels: int, - kernel_height: int, - kernel_width: int, - input_channels: int, - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> None: - model = build_model( - weight_shape=[ - non_matching_output_channels, - kernel_height, - kernel_width, - input_channels, - ], - input_size=input_size, - padding=padding, - strides=strides, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_kernel_height( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - output_channels: int, - non_matching_kernel_height: int, - kernel_width: int, - input_channels: int, - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> None: - model = build_model( - weight_shape=[ - output_channels, - non_matching_kernel_height, - kernel_width, - input_channels, - ], - input_size=input_size, - padding=padding, - strides=strides, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_kernel_width( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - output_channels: int, - kernel_height: int, - non_matching_kernel_width: int, - input_channels: int, - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> None: - model = build_model( - weight_shape=[ - output_channels, - kernel_height, - non_matching_kernel_width, - input_channels, - ], - input_size=input_size, - padding=padding, - strides=strides, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_input_channels( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - output_channels: int, - kernel_height: int, - kernel_width: int, - non_matching_input_channels: int, - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> None: - model = build_model( - weight_shape=[ - output_channels, - kernel_height, - kernel_width, - non_matching_input_channels, - ], - input_size=input_size, - padding=padding, - strides=strides, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_stride_w( - trf_pass: ModelTransformationPass, model: XCOREModel, non_matching_stride_w: int -) -> None: - op = model.subgraphs[0].operators[0] - op.builtin_options["stride_w"] = non_matching_stride_w - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_stride_h( - trf_pass: ModelTransformationPass, model: XCOREModel, non_matching_stride_h: int -) -> None: - op = model.subgraphs[0].operators[0] - op.builtin_options["stride_h"] = non_matching_stride_h - _test_non_matching_params(trf_pass, model) - - -def test_replace_mutate( - trf_pass: ModelTransformationPass, model: XCOREModel, custom_opcode: XCOREOpCodes -) -> None: - _test_replace_mutate(trf_pass, model, custom_opcode) - - # check custom options - custom_options = model.subgraphs[0].operators[-1].custom_options - assert "illegal_params" in custom_options - assert custom_options["illegal_params"] is True diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSinglePixelConv2DPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSinglePixelConv2DPass.py deleted file mode 100644 index db92b9eaa..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSinglePixelConv2DPass.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import numpy as np -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import Padding, BuiltinOpCodes -from tflite2xcore.transformation_passes import CanonicalizeSinglePixelConv2DPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_conv2d, - ModelBuilder, -) - -from .conftest import ( - PARAMS, - _test_non_matching_params, - _test_replace_mutate, - test_matching_params, - test_non_matching_tensors, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -PARAMS = deepcopy(PARAMS) - -for params in PARAMS.values(): - params.update( - { - "input_channels": params["input_channels"] - + params["non_matching_input_channels"], - "output_channels": params["input_channels"] - + params["non_matching_output_channels"], - "input_height": params["kernel_height"] + params["input_height"], - "input_width": params["kernel_width"] + params["input_width"], - } - ) - -PARAMS["extended"].update( - {"input_height_deviation": [1, 2, 4], "input_width_deviation": [1, 2, 5]} -) -PARAMS["default"].update( - {"input_height_deviation": [1, 4], "input_width_deviation": [2, 5]} -) -PARAMS["smoke"].update({"input_height_deviation": [1], "input_width_deviation": [2]}) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_conv2d - - -@pytest.fixture() -def trf_pass() -> CanonicalizeSinglePixelConv2DPass: - return CanonicalizeSinglePixelConv2DPass() - - -@pytest.fixture() -def weight_shape( - output_channels: int, input_size: Tuple[int, int], input_channels: int -) -> Tuple[int, int, int, int]: - return [output_channels, *input_size, input_channels] - - -@pytest.fixture() -def model(weight_shape: Tuple[int, int, int, int]) -> XCOREModel: - return build_conv2d( - weight_shape=weight_shape, - input_size=weight_shape[1:3], - # padding and stride should not matter for this model - # but usind this builder guarantees the 1x1 output - padding=Padding.VALID, - strides=(1, 1), - ) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass: CanonicalizeSinglePixelConv2DPass, model: XCOREModel) -> None: - subgraph = model.subgraphs[0] - old_op = subgraph.operators[0] - - old_input, old_weights, old_bias = old_op.inputs[:3] - old_output = old_op.outputs[0] - - old_weight_shape = old_weights.shape - old_weight_quantization = old_weights.quantization - old_fused_activation = old_op.builtin_options["fused_activation_function"] - - _test_replace_mutate(trf_pass, model, BuiltinOpCodes.FULLY_CONNECTED) - new_op = subgraph.operators[0] - new_weights = new_op.inputs[1] - - # check tensor objects - assert old_input == new_op.inputs[0] - assert old_weights != new_weights - assert old_bias == new_op.inputs[2] - assert old_output == new_op.outputs[0] - - # check weight tensor - new_weight_shape = new_weights.shape - assert len(new_weight_shape) == 2 - assert old_weight_shape[0] == new_weight_shape[0] - assert np.prod(old_weight_shape[1:]) == new_weight_shape[1] - assert old_fused_activation == new_op.builtin_options["fused_activation_function"] - assert old_weight_quantization == new_weights.quantization - - -def test_non_matching_input_size( - trf_pass: CanonicalizeSinglePixelConv2DPass, - build_model: ModelBuilder, - weight_shape: Tuple[int, int, int, int], - input_height_deviation: int, - input_width_deviation: int, -) -> None: - input_size = ( - weight_shape[1] + input_height_deviation, - weight_shape[2] + input_width_deviation, - ) - model = build_conv2d( - weight_shape=weight_shape, - input_size=input_size, - # valid padding and strides equal to the input size ensures (1, 1) output - padding=Padding.VALID, - strides=input_size, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_output_size( - trf_pass: CanonicalizeSinglePixelConv2DPass, - build_model: ModelBuilder, - weight_shape: Tuple[int, int, int, int], -) -> None: - model = build_conv2d( - weight_shape=weight_shape, - input_size=weight_shape[1:3], - padding=Padding.SAME, # this assumes that input is never 1x1 - strides=(1, 1), - ) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSingleinDepthwiseConv2DPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSingleinDepthwiseConv2DPass.py deleted file mode 100644 index 73efa9558..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_CanonicalizeSingleinDepthwiseConv2DPass.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - CanonicalizeSingleinDepthwiseConv2DPass, - LegalizeSingleinConv2DPass, -) - -from .test_ReplaceDepthwiseConv2dPass import ( - weight_shape, - build_model, - model, - test_non_matching_input_channels, - test_non_matching_depth_multiplier, -) -from .conftest import ( - PARAMS, - test_matching_params, - test_non_matching_tensors, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - non_matching_input_channels = [ - c - for c in PARAMS[k]["input_channels"] + PARAMS[k]["non_matching_input_channels"] - if c > 1 - ] - PARAMS[k].update( - { - "input_channels": [1], - "non_matching_input_channels": non_matching_input_channels, - "depth_multiplier": PARAMS[k]["output_channels"], - "non_matching_depth_multiplier": PARAMS[k]["non_matching_output_channels"], - } - ) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> CanonicalizeSingleinDepthwiseConv2DPass: - return CanonicalizeSingleinDepthwiseConv2DPass() - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: CanonicalizeSingleinDepthwiseConv2DPass, model: XCOREModel -) -> None: - # extract reference data - subgraph = model.subgraphs[0] - old_op = subgraph.operators[0] - old_weight_shape = old_op.inputs[1].shape - old_bias = old_op.inputs[2] - - # run transformation passes - trf_pass.run(model) - model.sanity_check() - assert len(subgraph.operators) == 1 - - LegalizeSingleinConv2DPass().run(model) - model.sanity_check() - assert len(subgraph.operators) == 1 - - # check operator - op = subgraph.operators[0] - assert len(op.inputs) == 3 - assert len(op.outputs) == 1 - - # check weight shape - new_weight_shape = op.inputs[1].shape - assert new_weight_shape[0] == old_weight_shape[3] - assert new_weight_shape[1:2] == old_weight_shape[1:2] - assert new_weight_shape[3] == old_weight_shape[0] - - # check bias - assert old_bias is op.inputs[2] - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_Replace1x1Conv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_Replace1x1Conv2dPass.py deleted file mode 100644 index 617235486..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_Replace1x1Conv2dPass.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import XCOREOpCodes, Padding -from tflite2xcore.transformation_passes import Replace1x1Conv2dPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_conv2d, - ModelBuilder, -) -from .conftest import ( - PARAMS, - test_replace_mutate as test_mutate, - test_matching_params, - test_non_matching_output_channels, - test_non_matching_kernel_height, - test_non_matching_kernel_width, - test_non_matching_input_channels, - test_non_matching_stride_h, - test_non_matching_stride_w, - test_non_matching_tensors, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - { - "kernel_width": [1], - "non_matching_kernel_width": [2, 3, 5, 7], - "kernel_height": [1], - "non_matching_kernel_height": [2, 3, 5, 7], - "stride_h": [1], - "non_matching_stride_h": [2, 3], - "stride_w": [1], - "non_matching_stride_w": [2, 3], - } -) - -PARAMS["default"].update( - { - "kernel_width": [1], - "non_matching_kernel_width": [2, 3, 7], - "kernel_height": [1], - "non_matching_kernel_height": [2, 3, 7], - "stride_h": [1], - "non_matching_stride_h": [2, 3], - "stride_w": [1], - "non_matching_stride_w": [2, 3], - } -) - -PARAMS["smoke"].update( - { - "kernel_width": [1], - "non_matching_kernel_width": [2, 3], - "kernel_height": [1], - "non_matching_kernel_height": [2, 3], - "stride_h": [1], - "non_matching_stride_h": [2], - "stride_w": [1], - "non_matching_stride_w": [2], - } -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_conv2d - - -@pytest.fixture() -def trf_pass() -> Replace1x1Conv2dPass: - return Replace1x1Conv2dPass() - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> XCOREModel: - model = build_conv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - ) - return model - - -@pytest.fixture() -def custom_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_conv2d_1x1 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDeepConv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDeepConv2dPass.py deleted file mode 100644 index a4c83f7d5..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDeepConv2dPass.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import XCOREOpCodes, Padding -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ReplaceDeepConv2dPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_conv2d, - ModelBuilder, -) -from .conftest import ( - PARAMS, - test_replace_mutate as _test_mutate, - test_matching_params, - test_non_matching_output_channels, - test_non_matching_input_channels, - test_non_matching_tensors, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def build_model() -> ModelBuilder: - return build_conv2d - - -@pytest.fixture() # type: ignore -def trf_pass() -> ReplaceDeepConv2dPass: - return ReplaceDeepConv2dPass() - - -@pytest.fixture() # type: ignore -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> XCOREModel: - return build_conv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - ) - - -@pytest.fixture() # type: ignore -def custom_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_conv2d_deep - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: ModelTransformationPass, model: XCOREModel, custom_opcode: XCOREOpCodes -) -> None: - subgraph = model.subgraphs[0] - old_op = subgraph.operators[0] - strides = tuple(old_op.builtin_options[f"stride_{ax}"] for ax in ("h", "w")) - - _test_mutate(trf_pass, model, custom_opcode) - - custom_options = subgraph.operators[-1].custom_options - assert "stride" in custom_options - assert custom_options["stride"] == strides - - assert "pad" in custom_options - assert len(custom_options["pad"]) == 2 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDepthwiseConv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDepthwiseConv2dPass.py deleted file mode 100644 index d8a2b0168..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceDepthwiseConv2dPass.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import XCOREOpCodes, Padding -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ReplaceDepthwiseConv2dPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - ModelBuilder, - build_depthwise_conv2d, -) -from .conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, - test_non_matching_tensors, -) -from .test_ReplaceDeepConv2dPass import test_mutate - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - {"depth_multiplier": [1], "non_matching_depth_multiplier": [2, 5, 16]} -) - -PARAMS["default"].update( - {"depth_multiplier": [1], "non_matching_depth_multiplier": [2, 16]} -) - -PARAMS["smoke"].update({"depth_multiplier": [1], "non_matching_depth_multiplier": [16]}) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_depthwise_conv2d - - -@pytest.fixture() -def trf_pass() -> ReplaceDepthwiseConv2dPass: - return ReplaceDepthwiseConv2dPass() - - -@pytest.fixture() -def weight_shape( - depth_multiplier: int, kernel_height: int, kernel_width: int, input_channels: int -) -> Tuple[int, int, int, int]: - return [kernel_height, kernel_width, input_channels, depth_multiplier] - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> XCOREModel: - return build_depthwise_conv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - ) - - -@pytest.fixture() -def custom_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_conv2d_depthwise - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_non_matching_input_channels( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - depth_multiplier: int, - kernel_height: int, - kernel_width: int, - non_matching_input_channels: int, - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> None: - model = build_model( - weight_shape=[ - kernel_height, - kernel_width, - non_matching_input_channels, - depth_multiplier, - ], - input_size=input_size, - padding=padding, - strides=strides, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_depth_multiplier( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - non_matching_depth_multiplier: int, - kernel_height: int, - kernel_width: int, - input_channels: int, - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> None: - model = build_model( - weight_shape=[ - kernel_height, - kernel_width, - input_channels, - non_matching_depth_multiplier, - ], - input_size=input_size, - padding=padding, - strides=strides, - ) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceShallowinConv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceShallowinConv2dPass.py deleted file mode 100644 index d3dd18bd7..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_conv2d_passes/test_ReplaceShallowinConv2dPass.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import XCOREOpCodes, Padding -from tflite2xcore.transformation_passes import ReplaceShallowinConv2dPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - ModelBuilder, - build_conv2d, -) -from .conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, - test_non_matching_output_channels, - test_non_matching_input_channels, - test_non_matching_tensors, -) -from .test_ReplaceDeepConv2dPass import test_mutate as _test_mutate - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - {"input_channels": list(range(4, 36, 4)), "kernel_width": list(range(1, 9))} -) - -PARAMS["default"].update({"input_channels": [4, 8, 16], "kernel_width": [2, 3, 5]}) - -PARAMS["smoke"].update({"input_channels": [4, 8], "kernel_width": [3, 5]}) - -for k in PARAMS: - all_tails = [ - (kw, cin) - for cin in PARAMS[k]["input_channels"] - for kw in PARAMS[k]["kernel_width"] - ] - PARAMS[k].update( - weight_tail=[t for t in all_tails if t[0] * t[1] <= 32], - non_matching_weight_tail=[t for t in all_tails if t[0] * t[1] > 32], - ) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_conv2d - - -@pytest.fixture() -def trf_pass() -> ReplaceShallowinConv2dPass: - return ReplaceShallowinConv2dPass() - - -@pytest.fixture() -def weight_shape( - output_channels: int, kernel_height: int, weight_tail: int -) -> Tuple[int, int, int]: - return [output_channels, kernel_height, *weight_tail] - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> XCOREModel: - return build_conv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - ) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass: ReplaceShallowinConv2dPass, model: XCOREModel) -> None: - subgraph = model.subgraphs[0] - K_w = subgraph.operators[0].inputs[1].shape[2] - - _test_mutate(trf_pass, model, custom_opcode=XCOREOpCodes.XC_conv2d_shallowin) - - custom_options = subgraph.operators[-1].custom_options - assert "Kw" in custom_options - assert custom_options["Kw"] == K_w - - -def test_non_matching_weight_tail( - trf_pass: ReplaceShallowinConv2dPass, - build_model: ModelBuilder, - output_channels: int, - kernel_height: int, - non_matching_weight_tail: Tuple[int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> None: - model = build_model( - weight_shape=[output_channels, kernel_height, *non_matching_weight_tail], - input_size=input_size, - padding=padding, - strides=strides, - ) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/__init__.py deleted file mode 100644 index b01e5a73d..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import TensorType - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_tanh, - _glue_ops, -) - -from ..conftest import ( # pylint: disable=unused-import - _test_non_matching_params, - test_matching_params, -) - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - - -def count_tensors(model: XCOREModel) -> int: - return sum(len(subgraph.tensors) for subgraph in model.subgraphs) - - -def count_operators(model: XCOREModel) -> int: - return sum(len(subgraph.operators) for subgraph in model.subgraphs) - - -def add_dangling_tensor(model: XCOREModel) -> None: - model.subgraphs[0].create_tensor( - "dangling_tensor", TensorType.INT16, shape=[1, 32, 1, 1] - ) - - -def add_dangling_ops(model: XCOREModel) -> None: - subgraph = model.subgraphs[0] - tin, tout = subgraph.inputs[0], subgraph.outputs[0] - - # add first op - build_tanh(subgraph, input_shape=tout.shape, tensor_type=tout.type) - _glue_ops(subgraph.operators[0], subgraph.operators[1]) - dangling_tensor = subgraph.operators[1].outputs[0] - - # add second op - build_tanh( - subgraph, input_shape=dangling_tensor.shape, tensor_type=dangling_tensor.type - ) - _glue_ops(subgraph.operators[1], subgraph.operators[2]) - - # fix inputs and outputs - subgraph.inputs, subgraph.outputs = [tin], [tout] diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/conftest.py deleted file mode 100644 index 96f3631b1..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/conftest.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import TensorType - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_relu - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def model() -> XCOREModel: - return build_relu(input_shape=[2, 2, 4], tensor_type=TensorType.INT8) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/dce_composite_test.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/dce_composite_test.py deleted file mode 100644 index 09d5bb127..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/dce_composite_test.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import XCOREModel, Metadata - -from tflite2xcore.transformation_passes import ( - EliminateDeadBuffersPass, - EliminateDeadTensorsPass, - EliminateDeadOperatorsPass, -) - -from . import ( - count_tensors, - count_operators, - add_dangling_ops, - add_dangling_tensor, -) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(model: XCOREModel) -> None: - add_dangling_ops(model) - add_dangling_tensor(model) - Metadata("dummy", model) - num_ops = count_operators(model) - num_tensors = count_tensors(model) - num_buffers = len(model.buffers) - - # add_dangling_ops leaves two dead buffers, remove those first - pass2 = EliminateDeadBuffersPass() - pass2.run(model) - model.sanity_check() - - assert num_ops == count_operators(model) - assert num_tensors == count_tensors(model) - assert num_buffers == len(model.buffers) + 2 - - # this will remove two dead ops, and leave their outputs dangling - EliminateDeadOperatorsPass().run(model) - model.sanity_check() - - assert num_ops == count_operators(model) + 2 - assert num_tensors == count_tensors(model) - assert num_buffers == len(model.buffers) + 2 - - # this cleans up the original danling tensor, plus the other two left by the previous pass - EliminateDeadTensorsPass().run(model) - model.sanity_check() - - assert num_ops == count_operators(model) + 2 - assert num_tensors == count_tensors(model) + 3 - assert num_buffers == len(model.buffers) + 2 - - # each tensor leaves one dead buffer - pass2 = EliminateDeadBuffersPass() - pass2.run(model) - model.sanity_check() - - assert num_ops == count_operators(model) + 2 - assert num_tensors == count_tensors(model) + 3 - assert num_buffers == len(model.buffers) + 2 + 3 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadBuffersPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadBuffersPass.py deleted file mode 100644 index 0b3328976..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadBuffersPass.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import XCOREModel, Buffer, Metadata -from tflite2xcore.transformation_passes import EliminateDeadBuffersPass - -from . import add_dangling_tensor - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def trf_pass() -> EliminateDeadBuffersPass: - return EliminateDeadBuffersPass() - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_non_matching(model: XCOREModel, trf_pass: EliminateDeadBuffersPass) -> None: - add_dangling_tensor(model) - Metadata("dummy", model) - num_buffers = len(model.buffers) - trf_pass.run(model) - model.sanity_check() - assert num_buffers == len(model.buffers) - - -def test_mutate_identity(model: XCOREModel, trf_pass: EliminateDeadBuffersPass) -> None: - num_buffers = len(model.buffers) - trf_pass.run(model) - model.sanity_check() - assert num_buffers == len(model.buffers) - - -def test_mutate(model: XCOREModel, trf_pass: EliminateDeadBuffersPass) -> None: - Buffer(model) - Metadata("dummy", model) - num_buffers = len(model.buffers) - trf_pass.run(model) - model.sanity_check() - assert num_buffers == len(model.buffers) + 1 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadOperatorsPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadOperatorsPass.py deleted file mode 100644 index aa920de18..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadOperatorsPass.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import EliminateDeadOperatorsPass - -from . import count_operators, add_dangling_ops - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type:ignore -def trf_pass() -> EliminateDeadOperatorsPass: - return EliminateDeadOperatorsPass() - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate_identity( - model: XCOREModel, trf_pass: EliminateDeadOperatorsPass -) -> None: - num_ops = count_operators(model) - trf_pass.run(model) - model.sanity_check() - assert num_ops == count_operators(model) - - -def test_mutate(model: XCOREModel, trf_pass: EliminateDeadOperatorsPass) -> None: - add_dangling_ops(model) - num_ops = count_operators(model) - trf_pass.run(model) - model.sanity_check() - assert num_ops == count_operators(model) + 2 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadTensorsPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadTensorsPass.py deleted file mode 100644 index 8cfc311e0..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_dce_passes/test_EliminateDeadTensorsPass.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import EliminateDeadTensorsPass - -from . import count_tensors, add_dangling_tensor - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type:ignore -def trf_pass() -> EliminateDeadTensorsPass: - return EliminateDeadTensorsPass() - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate_identity(model: XCOREModel, trf_pass: EliminateDeadTensorsPass) -> None: - num_tensors = count_tensors(model) - trf_pass.run(model) - model.sanity_check() - assert num_tensors == count_tensors(model) - - -def test_mutate(model: XCOREModel, trf_pass: EliminateDeadTensorsPass) -> None: - add_dangling_tensor(model) - num_tensors = count_tensors(model) - trf_pass.run(model) - model.sanity_check() - assert num_tensors == count_tensors(model) + 1 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/conftest.py deleted file mode 100644 index 7d61f9d16..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/conftest.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import itertools -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_fc -from ..conftest import ( - PARAMS, - _test_non_matching_params, - _make_name_type_pairs, - NON_INT8_TEST_TYPES, - NON_INT32_TEST_TYPES, - test_matching_params, - test_non_matching_tensors, - test_replace_mutate, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -_NON_MATCHING_TENSORS = list( - itertools.chain( - _make_name_type_pairs("input", NON_INT8_TEST_TYPES), - _make_name_type_pairs("weights", NON_INT8_TEST_TYPES), - _make_name_type_pairs("biases", NON_INT32_TEST_TYPES), - _make_name_type_pairs("output", NON_INT8_TEST_TYPES), - ) -) - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - { - "input_channels": [5, 8, 10, 16, 29, 64], - "outputs": [1, 2, 10, 16, 29, 100], - "non_matching_tensors": _NON_MATCHING_TENSORS, - } -) - -PARAMS["default"].update( - { - "input_channels": [5, 10, 29, 64], - "outputs": [2, 10, 16, 100], - "non_matching_tensors": _NON_MATCHING_TENSORS[::2], - } -) - -PARAMS["smoke"].update( - { - "input_channels": [5, 29], - "outputs": [2, 10], - "non_matching_tensors": _NON_MATCHING_TENSORS[::4], - } -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def model(input_shape: Tuple[int, int, int], outputs: int) -> XCOREModel: - return build_fc(input_shape=input_shape, outputs=outputs) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_LegalizeXCFullyConnectedPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_LegalizeXCFullyConnectedPass.py deleted file mode 100644 index de3a47d01..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_LegalizeXCFullyConnectedPass.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import numpy as np - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ReplaceFullyConnectedPass, - LegalizeXCFullyConnectedPass, -) - -from .conftest import PARAMS - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(model: XCOREModel) -> None: - # extract original parameters - subgraph = model.subgraphs[0] - op = subgraph.operators[0] - - weight_shape_old = op.inputs[1].shape - assert len(weight_shape_old) == 2 - dim_out, dim_in = weight_shape_old - - bias_shape_old = op.inputs[2].shape - assert len(bias_shape_old) == 1 - assert bias_shape_old[0] == dim_out - - # run replacement pass - ReplaceFullyConnectedPass().run(model) - model.sanity_check() - assert len(subgraph.operators) == 1 - - # run legalization pass - LegalizeXCFullyConnectedPass().run(model) - model.sanity_check() - assert len(subgraph.operators) == 1 - new_op = subgraph.operators[0] - assert len(new_op.inputs) == 3 - - # check weight tensors - weight_shape_new = new_op.inputs[1].shape - assert len(weight_shape_new) == 2 - assert weight_shape_new[0] == dim_out - assert weight_shape_new[1] == int(np.ceil(dim_in / 4)) * 4 - - # check bias/scale/offset tensor - bso_shape = new_op.inputs[2].shape - assert len(bso_shape) == 3 - assert bso_shape[0] == int(np.ceil(dim_out / 16)) - assert bso_shape[1] == 7 - assert bso_shape[2] == 16 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_ReplaceFullyConnectedPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_ReplaceFullyConnectedPass.py deleted file mode 100644 index 680139a12..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_fully_connected_passes/test_ReplaceFullyConnectedPass.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import XCOREOpCodes -from tflite2xcore.transformation_passes import ReplaceFullyConnectedPass - -from .conftest import ( - PARAMS, - test_matching_params, - test_non_matching_tensors, - test_replace_mutate as test_mutate, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceFullyConnectedPass: - return ReplaceFullyConnectedPass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_fc - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/__init__.py deleted file mode 100644 index 017f84a96..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/__init__.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from copy import deepcopy -from typing import Tuple, Optional, Dict, Any - -from tflite2xcore.transformation_passes.lce_passes import ( - ReplaceBconv2DPass, - XC_BCONV2D_OPCODES, -) -from tflite2xcore.transformation_passes import ModelTransformationPass -from tflite2xcore.xcore_schema import ( - XCOREModel, - Subgraph, - TensorType, - ActivationFunctionType, - Padding, - OperatorCode, - ValidOpCodes, - ExternalOpCodes, - XCOREOpCodes, -) -from tflite2xcore.utils import calculate_same_output_size, calculate_valid_output_size - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - generate_dummy_data, -) - -from ..conftest import ParamsType -from ..conftest import ( # pylint: disable=unused-import - _make_name_type_pairs, - _test_non_matching_params, - test_matching_params, - test_non_matching_tensors, - test_replace_mutate as _test_mutate, -) -from ..test_conv2d_passes.conftest import test_replace_mutate as test_conv2d_mutate -from ..test_conv2d_passes.conftest import ( # pylint: disable=unused-import - PARAMS, - test_non_matching_input_channels, - test_non_matching_output_channels, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -PARAMS = deepcopy(PARAMS) - - -def update_lce_params(PARAMS: ParamsType) -> ParamsType: - for key in ( - "input_channels", - "non_matching_input_channels", - "output_channels", - "non_matching_output_channels", - ): - PARAMS["default"][key] = PARAMS["extended"][key][:-1] - PARAMS["smoke"][key] = PARAMS["default"][key][:-1] - non_matching_tensors = PARAMS["extended"]["non_matching_tensors"][::2] - PARAMS["default"]["non_matching_tensors"] = non_matching_tensors - PARAMS["smoke"]["non_matching_tensors"] = non_matching_tensors - - return PARAMS - - -PARAMS["extended"].update( - {"input_channels": [32, 128, 256], "non_matching_input_channels": [48, 130, 245]} -) - -# ---------------------------------------------------------------------------- -# MODEL BUILDERS -# ---------------------------------------------------------------------------- - - -def build_LceQuantize( - subgraph: Optional[Subgraph] = None, - *, - input_shape: Tuple[int, int, int], - input_tensor_type: TensorType = TensorType.INT8, -) -> XCOREModel: - subgraph = subgraph or Subgraph(model=XCOREModel()) - height, width, channels = input_shape - input_shape = (1, height, width, channels) - output_shape = (1, height, width, int(np.ceil(channels / 32))) - - tin = subgraph.create_tensor("input", input_tensor_type, input_shape, isinput=True) - tout = subgraph.create_tensor( - "output", TensorType.INT32, output_shape, isoutput=True - ) - - subgraph.create_operator( - OperatorCode(ExternalOpCodes.LceQuantize), inputs=[tin], outputs=[tout] - ) - - return subgraph.model - - -def build_bconv2d( - subgraph: Optional[Subgraph] = None, - *, - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - padding: Optional[Padding], - strides: Tuple[int, int], - opcode: ValidOpCodes, - output_tensor_type: TensorType = TensorType.INT8, -) -> XCOREModel: - subgraph = subgraph or Subgraph(model=XCOREModel()) - - # the given shapes are not bitpacked (i.e. true channel counts) - # so we bitpack them - C_out, _, _, C_in = weight_shape - bitpacked_input_channels = int(np.ceil(C_in / 32)) - weight_shape = (*weight_shape[:3], bitpacked_input_channels) - - # create input tensors - input_shape = [1, *input_size, bitpacked_input_channels] - tin = subgraph.create_tensor("input", TensorType.INT32, input_shape, isinput=True) - - w = subgraph.create_tensor("weights", TensorType.INT32, weight_shape) - w.buffer.data = generate_dummy_data(w.shape, np.int32) - - input_tensors = [tin, w] - if output_tensor_type is TensorType.INT32: - output_threshold = subgraph.create_tensor( - "output_threshold", TensorType.INT32, weight_shape[:1] - ) - output_threshold.buffer.data = generate_dummy_data( - output_threshold.shape, np.int32 - ) - - input_tensors.append(output_threshold) - - output_quantization = None - elif output_tensor_type is TensorType.INT8: - post_act_params: Dict[str, Any] = {"shape": weight_shape[:1]} - if opcode in XC_BCONV2D_OPCODES: - post_act_params["type_"] = TensorType.INT16 - dummy_data = generate_dummy_data(post_act_params["shape"], np.int16) - else: - post_act_params["type_"] = TensorType.FLOAT32 - dummy_data = generate_dummy_data(post_act_params["shape"], np.float32) - - post_act_mult = subgraph.create_tensor("post_act_mult", **post_act_params) - post_act_mult.buffer.data = dummy_data - - post_act_bias = subgraph.create_tensor("post_act_bias", **post_act_params) - post_act_bias.buffer.data = dummy_data - - input_tensors.extend([post_act_mult, post_act_bias]) - - output_quantization = {"scale": [0.46], "zero_point": [-54]} - else: - raise ValueError( - f"output_tensor_type must be {TensorType.INT32} or {TensorType.INT8}" - ) - - # check padding and determine output size - if padding is Padding.SAME: - output_size = calculate_same_output_size(input_size, strides) - else: - if padding is None: - assert opcode in XC_BCONV2D_OPCODES - elif padding is not Padding.VALID: - raise ValueError(f"Unsupported padding: {padding}") - output_size = calculate_valid_output_size( - input_size, strides, weight_shape[1:3] - ) - - tout = subgraph.create_tensor( - "output", - output_tensor_type, - shape=(1, *output_size, C_out), - isoutput=True, - quantization=output_quantization, - ) - - # create custom options - custom_options = {"padding": padding} if padding else {} - if opcode is ExternalOpCodes.LceBconv2d: - custom_options.update( - { - "channels_in": C_in, - "fused_activation_function": ActivationFunctionType.NONE, - "stride_height": strides[0], - "stride_width": strides[1], - "dilation_width_factor": 1, - "dilation_height_factor": 1, - } - ) - else: - custom_options["stride"] = strides - - # create operator - subgraph.create_operator( - OperatorCode(opcode), - inputs=input_tensors, - outputs=[tout], - custom_options=custom_options, - ) - - return subgraph.model - - -def build_lceBconv2d( - subgraph: Optional[Subgraph] = None, *, padding: Padding, **kwargs -) -> XCOREModel: - return build_bconv2d( - subgraph, padding=padding, opcode=ExternalOpCodes.LceBconv2d, **kwargs - ) - - -def build_XC_bconv2d( - subgraph: Optional[Subgraph] = None, - *, - opcode: XCOREOpCodes = XCOREOpCodes.XC_bconv2d_int8, - padding: Optional[Padding] = None, - **kwargs, -) -> XCOREModel: - return build_bconv2d(subgraph, padding=padding, opcode=opcode, **kwargs) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: ModelTransformationPass, model: XCOREModel, new_opcode: XCOREOpCodes -) -> None: - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 1 - - _test_mutate(trf_pass, model, new_opcode) - - -def test_bconv2d_mutate( - trf_pass: ReplaceBconv2DPass, model: XCOREModel, new_opcode: XCOREOpCodes -) -> None: - subgraph = model.subgraphs[0] - operators = subgraph.operators - op = operators[-1] - strides = op.custom_options["stride_height"], op.custom_options["stride_width"] - padding = op.custom_options["padding"] - - test_conv2d_mutate(trf_pass, model, new_opcode) - - assert len(operators) == 1 - - new_op = operators[-1] - assert "illegal_params" in new_op.custom_options - assert "stride" in new_op.custom_options - assert strides == new_op.custom_options["stride"] - assert "padding" in new_op.custom_options - assert padding == new_op.custom_options["padding"] diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/conftest.py deleted file mode 100644 index 42916a8ea..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/conftest.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from . import build_lceBconv2d -from ..test_conv2d_passes.conftest import weight_shape # pylint: disable=unused-import - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model(): - return build_lceBconv2d diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedDeepInPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedDeepInPass.py deleted file mode 100644 index cbcb3c8b9..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedDeepInPass.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest - -from tflite2xcore.transformation_passes import ( - LegalizeBconv2dBitpackedDeepInPass, - ReplaceBconv2DBitpackedDeepInPass, -) - -from .test_LegalizeBconv2dBitpackedPass import ( # pylint: disable=unused-import - test_mutate, -) -from .test_ReplaceBconv2DBitpackedDeepInPass import ( # pylint: disable=unused-import - model, - new_opcode, - PARAMS, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def replacement_pass() -> ReplaceBconv2DBitpackedDeepInPass: - return ReplaceBconv2DBitpackedDeepInPass() - - -@pytest.fixture() # type: ignore -def legalization_pass() -> LegalizeBconv2dBitpackedDeepInPass: - return LegalizeBconv2dBitpackedDeepInPass() - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedPass.py deleted file mode 100644 index 137f3e36c..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dBitpackedPass.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest -import numpy as np - -from tflite2xcore.utils import VECTOR_SIZE_WORDS, WORD_SIZE_BITS -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes import ( - LegalizeBconv2dBitpackedPass, - ReplaceBconv2DBitpackedPass, -) -from tflite2xcore.transformation_passes.lce_passes import FILLER -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import TensorType, XCOREOpCodes - -from .test_ReplaceBconv2DBitpackedPass import ( # pylint: disable=unused-import - model, - new_opcode, - PARAMS, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def replacement_pass() -> ReplaceBconv2DBitpackedPass: - return ReplaceBconv2DBitpackedPass() - - -@pytest.fixture() # type: ignore -def legalization_pass() -> LegalizeBconv2dBitpackedPass: - return LegalizeBconv2dBitpackedPass() - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - replacement_pass: ReplaceBconv2DBitpackedPass, - legalization_pass: LegalizeBconv2dBitpackedPass, - model: XCOREModel, - new_opcode: XCOREOpCodes, -) -> None: - subgraph = model.subgraphs[0] - - # run replacement pass - replacement_pass.mutate(subgraph.operators[0]) - CleanupManager(model).run_passes() - - bconv2d_op = subgraph.operators[0] - - op_code = bconv2d_op.operator_code.code - assert op_code is new_opcode, f"expected: {new_opcode}, got: {op_code}" - - old_weights = bconv2d_op.inputs[1] - old_thresholds = bconv2d_op.inputs[2] - - # ensure that legalization pass matches - assert legalization_pass.match(bconv2d_op) - - # run legalization pass - legalization_pass.mutate(bconv2d_op) - CleanupManager(model).run_passes() - - # basic checks - assert len(subgraph.operators) == 1 - assert bconv2d_op is subgraph.operators[0] - assert len(bconv2d_op.inputs) == 3 - - # check custom options - options = bconv2d_op.custom_options - assert "illegal_params" not in options - assert options["K"][:3] == old_weights.shape[:3] - assert options["K"][3] == old_weights.shape[3] * WORD_SIZE_BITS - - # check biases - new_thresholds = bconv2d_op.inputs[2] - assert new_thresholds is not old_thresholds - assert new_thresholds.type is TensorType.INT32 - assert new_thresholds.shape == old_thresholds.shape - - # check weights - new_weights = bconv2d_op.inputs[1] - assert new_weights is not old_weights - assert new_weights.type is TensorType.INT32 - assert len(new_weights.shape) == 1 - - kernel_channel_size = np.prod(old_weights.shape[1:]) - filler_size = ( - VECTOR_SIZE_WORDS - kernel_channel_size % VECTOR_SIZE_WORDS - ) % VECTOR_SIZE_WORDS - assert new_weights.shape[0] - np.prod(old_weights.shape) == filler_size - - if filler_size: - filler_bits = new_weights.as_array()[-filler_size:] - assert np.all(filler_bits == FILLER) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8DeepInDeepOutPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8DeepInDeepOutPass.py deleted file mode 100644 index 00a9ce0ed..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8DeepInDeepOutPass.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest - -from tflite2xcore.transformation_passes import ( - LegalizeBconv2dInt8DeepInDeepOutPass, - ReplaceBconv2DInt8DeepInDeepOutPass, -) -from tflite2xcore.xcore_schema import XCOREModel, XCOREOpCodes - -from .test_LegalizeBconv2dInt8Pass import _test_mutate -from .test_ReplaceBconv2DInt8DeepInDeepOutPass import ( # pylint: disable=unused-import - model, - new_opcode, - PARAMS, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def replacement_pass() -> ReplaceBconv2DInt8DeepInDeepOutPass: - return ReplaceBconv2DInt8DeepInDeepOutPass() - - -@pytest.fixture() # type: ignore -def legalization_pass() -> LegalizeBconv2dInt8DeepInDeepOutPass: - return LegalizeBconv2dInt8DeepInDeepOutPass() - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - replacement_pass: ReplaceBconv2DInt8DeepInDeepOutPass, - legalization_pass: LegalizeBconv2dInt8DeepInDeepOutPass, - model: XCOREModel, - new_opcode: XCOREOpCodes, -) -> None: - _test_mutate(replacement_pass, legalization_pass, model, new_opcode) - - bconv2d_op = model.subgraphs[0].operators[0] - assert len(bconv2d_op.inputs) == 5 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8Pass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8Pass.py deleted file mode 100644 index 271fb7563..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeBconv2dInt8Pass.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest -import numpy as np - -from tflite2xcore.utils import VECTOR_SIZE_WORDS, WORD_SIZE_BITS -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes import ( - LegalizeBconv2dInt8Pass, - ReplaceBconv2DInt8Pass, -) -from tflite2xcore.transformation_passes.lce_passes import FILLER -from tflite2xcore.xcore_schema import XCOREModel, TensorType, XCOREOpCodes - -from .test_ReplaceBconv2DInt8Pass import ( # pylint: disable=unused-import - model, - new_opcode, - PARAMS, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def replacement_pass() -> ReplaceBconv2DInt8Pass: - return ReplaceBconv2DInt8Pass() - - -@pytest.fixture() # type: ignore -def legalization_pass() -> LegalizeBconv2dInt8Pass: - return LegalizeBconv2dInt8Pass() - - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - - -def _test_mutate( - replacement_pass: ReplaceBconv2DInt8Pass, - legalization_pass: LegalizeBconv2dInt8Pass, - model: XCOREModel, - new_opcode: XCOREOpCodes, -) -> None: - subgraph = model.subgraphs[0] - - # run replacement pass - replacement_pass.mutate(subgraph.operators[0]) - CleanupManager(model).run_passes() - - bconv2d_op = subgraph.operators[0] - - op_code = bconv2d_op.operator_code.code - assert op_code is new_opcode, f"expected: {new_opcode}, got: {op_code}" - - old_weights = bconv2d_op.inputs[1] - old_multipliers = bconv2d_op.inputs[2] - old_biases = bconv2d_op.inputs[3] - - # ensure that legalization pass matches - assert legalization_pass.match(bconv2d_op) - - # run legalization pass - legalization_pass.mutate(bconv2d_op) - CleanupManager(model).run_passes() - - # basic checks - assert len(subgraph.operators) == 1 - assert bconv2d_op is subgraph.operators[0] - - # check custom options - options = bconv2d_op.custom_options - assert "illegal_params" not in options - assert "fused_activation_function" not in options - assert options["K"][:3] == old_weights.shape[:3] - assert options["K"][3] == old_weights.shape[3] * WORD_SIZE_BITS - - # check multipliers - new_multipliers = bconv2d_op.inputs[2] - assert new_multipliers is not old_multipliers - assert new_multipliers.type is TensorType.INT16 - assert new_multipliers.shape == old_multipliers.shape - - # check biases - new_biases = bconv2d_op.inputs[3] - assert new_biases is not old_biases - assert new_biases.type is TensorType.INT16 - assert new_biases.shape == old_biases.shape - - # check output trf params - output_trf = bconv2d_op.inputs[4] - assert output_trf.type is TensorType.INT16 - assert output_trf.shape == (6 * 16 + 2,) - - # check weights - new_weights = bconv2d_op.inputs[1] - assert new_weights is not old_weights - assert new_weights.type is TensorType.INT32 - assert len(new_weights.shape) == 1 - - # TODO: find a more precise test - old_weight_size = np.prod(old_weights.shape) - assert new_weights.shape[0] - old_weight_size >= 8 - - fill = new_weights.as_array().ravel()[old_weight_size:] - assert np.all(fill == FILLER) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - replacement_pass: ReplaceBconv2DInt8Pass, - legalization_pass: LegalizeBconv2dInt8Pass, - model: XCOREModel, - new_opcode: XCOREOpCodes, -) -> None: - _test_mutate(replacement_pass, legalization_pass, model, new_opcode) - - bconv2d_op = model.subgraphs[0].operators[0] - assert len(bconv2d_op.inputs) == 6 - - # check accu_modifier - new_biases = bconv2d_op.inputs[3] - new_accu_modifier = bconv2d_op.inputs[5] - assert new_accu_modifier.type is TensorType.INT16 - assert new_accu_modifier.shape == new_biases.shape - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeXCBconv2DPaddingPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeXCBconv2DPaddingPass.py deleted file mode 100644 index 5cd506b31..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_LegalizeXCBconv2DPaddingPass.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.transformation_passes.lce_passes import LegalizeXCBconv2DPaddingPass -from tflite2xcore.xcore_schema import Padding, XCOREOpCodes -from tflite2xcore.xcore_model import XCOREModel - -from . import build_XC_bconv2d, _test_non_matching_params -from . import ( # pylint: disable=unused-import - PARAMS, - test_matching_params, - test_mutate as _test_mutate, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -# NOTE: this is intentional to reduce test counts -PARAMS["extended"] = PARAMS["default"] -PARAMS["default"] = PARAMS["smoke"] - -for params in PARAMS.values(): - params["opcode"] = [XCOREOpCodes.XC_bconv2d_int8, XCOREOpCodes.XC_bconv2d_bin] - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> LegalizeXCBconv2DPaddingPass: - return LegalizeXCBconv2DPaddingPass() - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], - opcode: XCOREOpCodes, -) -> XCOREModel: - return build_XC_bconv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - opcode=opcode, - ) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: LegalizeXCBconv2DPaddingPass, - model: XCOREModel, - padding: Padding, - opcode: XCOREOpCodes, -) -> None: - subgraph = model.subgraphs[0] - old_input = subgraph.inputs[0] - old_output = subgraph.outputs[0] - - _test_mutate(trf_pass, model, opcode) - - operators = subgraph.operators - bconv2d_op = operators[-1] - - opcode_after = bconv2d_op.operator_code.code - assert opcode_after is opcode, f"expected: {opcode}, got: {opcode_after}" - - assert "padding" not in bconv2d_op.custom_options - assert old_output is bconv2d_op.outputs[0] - - if padding is Padding.VALID: - assert len(operators) == 1 - assert old_input is bconv2d_op.inputs[0] - else: - assert len(operators) == 2 - - pad_op = operators[0] - assert old_input is pad_op.inputs[0] - assert len(pad_op.inputs) == 2 - assert len(pad_op.outputs) == 1 - - intermediate = bconv2d_op.inputs[0] - assert intermediate is pad_op.outputs[0] - - # check that padding is sane - paddings = pad_op.inputs[1].as_array().tolist() - for j, (size, pads, padded_size) in enumerate( - zip(old_input.shape, paddings, intermediate.shape) - ): - assert ( - size + sum(pads) == padded_size - ), f"incorrect padded size in dimension {j}" - - -def test_non_matching_legal( - trf_pass: LegalizeXCBconv2DPaddingPass, - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - strides: Tuple[int, int], - opcode: XCOREOpCodes, -) -> None: - model = build_XC_bconv2d( - weight_shape=weight_shape, - input_size=input_size, - strides=strides, - opcode=opcode, - ) - - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedDeepInPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedDeepInPass.py deleted file mode 100644 index 20bfb1f4f..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedDeepInPass.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest -from copy import deepcopy - -from tflite2xcore.transformation_passes import ReplaceBconv2DBitpackedDeepInPass -from tflite2xcore.xcore_schema import XCOREOpCodes - -from .test_ReplaceBconv2DBitpackedPass import ( # pylint: disable=unused-import - PARAMS, - model, - test_matching_params, - test_non_matching_tensors, - test_non_matching_input_channels, - test_mutate, -) -from .test_ReplaceBconv2DInt8DeepInDeepOutPass import PARAMS as DEEP_PARAMS - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -PARAMS = deepcopy(PARAMS) - -for key in PARAMS: - PARAMS[key]["input_channels"] = DEEP_PARAMS[key]["input_channels"] - PARAMS[key]["non_matching_input_channels"] = DEEP_PARAMS[key][ - "non_matching_input_channels" - ] - -# NOTE: this is intentional to reduce test counts -PARAMS["extended"] = PARAMS["default"] -PARAMS["default"] = PARAMS["smoke"] - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceBconv2DBitpackedDeepInPass: - return ReplaceBconv2DBitpackedDeepInPass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_bin_DI - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedPass.py deleted file mode 100644 index 9f743bb60..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DBitpackedPass.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.transformation_passes import ReplaceBconv2DBitpackedPass -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import TensorType, XCOREOpCodes, Padding - -from . import ( - build_lceBconv2d, - _make_name_type_pairs, - update_lce_params, - test_bconv2d_mutate as _test_mutate, -) -from . import ( # pylint: disable=unused-import - PARAMS, - test_matching_params, - test_non_matching_tensors, - test_non_matching_input_channels, -) - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - { - "output_channels": [32, 128, 256], - "non_matching_output_channels": [16, 20, 33], - "non_matching_tensors": list( - _make_name_type_pairs("output", [TensorType.FLOAT32, TensorType.INT8]) - ), - } -) - -PARAMS = update_lce_params(PARAMS) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceBconv2DBitpackedPass: - return ReplaceBconv2DBitpackedPass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_bin - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> XCOREModel: - return build_lceBconv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - output_tensor_type=TensorType.INT32, - ) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: ReplaceBconv2DBitpackedPass, model: XCOREModel, new_opcode: XCOREOpCodes -) -> None: - subgraph = model.subgraphs[0] - operators = subgraph.operators - - _test_mutate(trf_pass, model, new_opcode) - - assert len(operators) == 1 - - new_op = operators[-1] - - assert len(new_op.inputs) == 3 - new_op.inputs[1].type is TensorType.INT32 - new_op.inputs[2].type is TensorType.INT32 - new_op.outputs[0].type is TensorType.INT8 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8DeepInDeepOutPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8DeepInDeepOutPass.py deleted file mode 100644 index a943f77db..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8DeepInDeepOutPass.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest -from copy import deepcopy - -from tflite2xcore.transformation_passes import ReplaceBconv2DInt8DeepInDeepOutPass -from tflite2xcore.xcore_schema import XCOREOpCodes - -from . import update_lce_params -from .test_ReplaceBconv2DInt8Pass import ( # pylint: disable=unused-import - PARAMS, - model, - test_matching_params, - test_non_matching_tensors, - test_non_matching_input_channels, - test_non_matching_output_channels, - test_mutate, -) - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - { - "output_channels": [16, 64, 128], - "non_matching_output_channels": [8, 17, 28], - "input_channels": [256, 512, 1024], - "non_matching_input_channels": [16, 21, 128], - } -) - -PARAMS = update_lce_params(PARAMS) - -# NOTE: this is intentional to reduce test counts -PARAMS["extended"] = PARAMS["default"] -PARAMS["default"] = PARAMS["smoke"] - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceBconv2DInt8DeepInDeepOutPass: - return ReplaceBconv2DInt8DeepInDeepOutPass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_int8_DIDO - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8Pass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8Pass.py deleted file mode 100644 index a4e873117..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceBconv2DInt8Pass.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.transformation_passes import ReplaceBconv2DInt8Pass -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import XCOREOpCodes, Padding, TensorType - -from . import ( - build_lceBconv2d, - _make_name_type_pairs, - update_lce_params, - test_bconv2d_mutate as _test_mutate, -) -from . import ( # pylint: disable=unused-import - PARAMS, - test_matching_params, - test_non_matching_tensors, - test_non_matching_input_channels, - test_non_matching_output_channels, -) - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - { - "output_channels": [4, 16, 32], - "non_matching_output_channels": [5, 9, 27], - "non_matching_tensors": list( - _make_name_type_pairs("output", [TensorType.FLOAT32, TensorType.INT32]) - ), - } -) - -PARAMS = update_lce_params(PARAMS) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceBconv2DInt8Pass: - return ReplaceBconv2DInt8Pass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_int8 - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - padding: Padding, - strides: Tuple[int, int], -) -> XCOREModel: - return build_lceBconv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - ) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: ReplaceBconv2DInt8Pass, model: XCOREModel, new_opcode: XCOREOpCodes -) -> None: - subgraph = model.subgraphs[0] - operators = subgraph.operators - - _test_mutate(trf_pass, model, new_opcode) - - assert len(operators) == 1 - - new_op = operators[-1] - - assert "fused_activation_function" in new_op.custom_options - - assert len(new_op.inputs) == 4 - new_op.inputs[1].type is TensorType.INT32 - new_op.inputs[2].type is TensorType.INT16 - new_op.inputs[3].type is TensorType.INT16 - new_op.outputs[0].type is TensorType.INT8 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceLceQuantizePass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceLceQuantizePass.py deleted file mode 100644 index 299ddb730..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lce_passes/test_ReplaceLceQuantizePass.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import pytest -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.transformation_passes.lce_passes import ReplaceLceQuantizePass -from tflite2xcore.xcore_model import XCOREModel, TensorType -from tflite2xcore.xcore_schema import ( - TensorType, - XCOREOpCodes, -) - -from . import ( - build_LceQuantize, - _make_name_type_pairs, - _test_non_matching_params, - update_lce_params, -) -from . import ( # pylint: disable=unused-import - PARAMS, - test_matching_params, - test_non_matching_tensors, - test_mutate, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - { - "non_matching_tensors": list( - _make_name_type_pairs("input", [TensorType.FLOAT32, TensorType.INT32]) - ), - } -) - -PARAMS = update_lce_params(PARAMS) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceLceQuantizePass: - return ReplaceLceQuantizePass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_bsign_8 - - -@pytest.fixture() -def model(input_shape: Tuple[int, int, int]) -> XCOREModel: - return build_LceQuantize(input_shape=input_shape) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_non_matching_input_channels( - trf_pass: ReplaceLceQuantizePass, - non_matching_input_channels: int, - input_size: Tuple[int, int], -) -> None: - model = build_LceQuantize(input_shape=(*input_size, non_matching_input_channels)) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/conftest.py deleted file mode 100644 index f274753c4..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/conftest.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from copy import deepcopy - -from tflite2xcore.transformation_passes import LegalizeXCLookupTablePass -from tflite2xcore.xcore_schema import TensorType, XCOREOpCodes - -from ..conftest import test_matching_params, _test_non_matching_params - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = { - "default": { - "input_channels": [1, 2, 3, 4, 8, 16, 32], - "input_height": [1, 2, 3, 4, 5, 9], - "input_width": [1, 2, 3, 4, 5, 9], - "non_matching_input_type": [ - TensorType.INT16, - TensorType.INT32, - TensorType.UINT8, - TensorType.FLOAT32, - ], - "non_matching_output_type": [ - TensorType.INT16, - TensorType.INT32, - TensorType.UINT8, - TensorType.FLOAT32, - ], - }, - "smoke": { - "input_channels": [1, 4, 32], - "input_height": [1, 9], - "input_width": [1, 9], - "non_matching_input_type": [TensorType.INT16, TensorType.FLOAT32], - "non_matching_output_type": [TensorType.INT16, TensorType.FLOAT32], - }, -} - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def legalize_table_pass(): - return LegalizeXCLookupTablePass() - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_non_matching_input_type(trf_pass, model, non_matching_input_type): - op = model.subgraphs[0].operators[0] - op.inputs[0].type = non_matching_input_type - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_output_type(trf_pass, model, non_matching_output_type): - op = model.subgraphs[0].operators[0] - op.outputs[0].type = non_matching_output_type - _test_non_matching_params(trf_pass, model) - - -def test_mutate(trf_pass, legalize_table_pass, model): - # extract original parameters - subgraph = model.subgraphs[0] - tin_shape = deepcopy(subgraph.get_tensor("input").shape) - tout_shape = deepcopy(subgraph.get_tensor("output").shape) - original_opcode = subgraph.operators[0].operator_code.code - - # run replacement pass - trf_pass.run(model) - model.sanity_check() - - # check new op - op = subgraph.operators[-1] - assert op.operator_code.code is XCOREOpCodes.XC_lookup_8 - assert "original_opcode" in op.custom_options - assert op.custom_options["original_opcode"] is original_opcode - - # run table legalization pass - legalize_table_pass.run(model) - model.sanity_check() - assert "original_opcode" not in op.custom_options - - # check input/output tensors - tin = subgraph.get_tensor("input") - tout = subgraph.get_tensor("output") - - assert len(subgraph.operators) == 1 - assert len(subgraph.tensors) == 3 - assert tin in subgraph.inputs and tin not in subgraph.outputs - assert tout in subgraph.outputs and tout not in subgraph.inputs - assert tin.shape == tin_shape - assert tout.shape == tout_shape - - # check LUT shape - lut_tensor = op.inputs[1] - assert len(lut_tensor.buffer.data) == 256 - assert lut_tensor.shape == (256,) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLU6Pass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLU6Pass.py deleted file mode 100644 index 29fc00522..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLU6Pass.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import TensorType -from tflite2xcore.transformation_passes import ReplaceReLU6Pass - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_relu6 -from .conftest import ( - PARAMS, - test_matching_params, - test_non_matching_input_type, - test_non_matching_output_type, - test_mutate, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(): - return ReplaceReLU6Pass() - - -@pytest.fixture() -def model(input_shape): - return build_relu6(input_shape=input_shape, tensor_type=TensorType.INT8) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLUPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLUPass.py deleted file mode 100644 index 3e91c7664..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceReLUPass.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import TensorType -from tflite2xcore.transformation_passes import ReplaceReLUPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_relu -from .conftest import ( - PARAMS, - test_matching_params, - test_non_matching_input_type, - test_non_matching_output_type, - test_mutate, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(): - return ReplaceReLUPass() - - -@pytest.fixture() -def model(input_shape): - return build_relu(input_shape=input_shape, tensor_type=TensorType.INT8) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceTanhPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceTanhPass.py deleted file mode 100644 index 0f46d3be6..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_lut_passes/test_ReplaceTanhPass.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import TensorType -from tflite2xcore.transformation_passes import ReplaceTanhPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_tanh -from .conftest import ( - PARAMS, - test_matching_params, - test_non_matching_input_type, - test_non_matching_output_type, - test_mutate, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(): - return ReplaceTanhPass() - - -@pytest.fixture() -def model(input_shape): - return build_tanh(input_shape=input_shape, tensor_type=TensorType.INT8) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/conftest.py deleted file mode 100644 index 8e0040582..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/conftest.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from ..conftest import ( - PARAMS, - _test_non_matching_params, - _make_name_type_pairs, - NON_INT8_TEST_TYPES, - NON_FLOAT32_TEST_TYPES, - test_matching_params, - test_non_matching_tensors, -) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/test_LegalizeQuantizeVersionPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/test_LegalizeQuantizeVersionPass.py deleted file mode 100644 index 2f651f9eb..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_op_version_passes/test_LegalizeQuantizeVersionPass.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import BuiltinOpCodes -from tflite2xcore.transformation_passes import LegalizeQuantizeVersionPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_quantize - -from .conftest import ( - PARAMS, - _test_non_matching_params, - _make_name_type_pairs, - NON_INT8_TEST_TYPES, - NON_FLOAT32_TEST_TYPES, - test_matching_params, - test_non_matching_tensors, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -_NON_MATCHING_TENSORS = list( - _make_name_type_pairs("input", NON_FLOAT32_TEST_TYPES) -) + list(_make_name_type_pairs("output_quantized", NON_INT8_TEST_TYPES)) - -for params in PARAMS.values(): - params.update( - {"non_matching_version": [1, 3], "non_matching_tensors": _NON_MATCHING_TENSORS} - ) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def model(input_shape: Tuple[int, int, int]) -> XCOREModel: - model = build_quantize(input_shape=input_shape) - model.subgraphs[0].operators[0].operator_code.version = 2 - return model - - -@pytest.fixture() -def trf_pass() -> LegalizeQuantizeVersionPass: - return LegalizeQuantizeVersionPass() - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(model: XCOREModel, trf_pass: LegalizeQuantizeVersionPass) -> None: - subgraph = model.subgraphs[0] - trf_pass.mutate(subgraph.operators[0]) - subgraph.sanity_check() - - assert len(subgraph.operators) == 1 - assert len(subgraph.tensors) == 2 - - op_code = subgraph.operators[0].operator_code - assert op_code.code is BuiltinOpCodes.QUANTIZE - assert op_code.version == 1 - - -def test_non_matching_version( - trf_pass: LegalizeQuantizeVersionPass, - input_shape: Tuple[int, int, int], - non_matching_version: int, -) -> None: - model = build_quantize(input_shape=input_shape) - model.subgraphs[0].operators[0].operator_code.version = non_matching_version - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/__init__.py deleted file mode 100644 index 161afb976..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from typing import Tuple - -from tflite2xcore.transformation_passes import ModelTransformationPass - -from ..model_builders import build_pad -from ..conftest import _test_non_matching_params, test_replace_mutate - -PaddingType = Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int]] - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -def test_non_matching_paddings( - trf_pass: ModelTransformationPass, - input_shape: Tuple[int, int, int, int], - non_matching_paddings: PaddingType, -) -> None: - model = build_pad(input_shape=input_shape, paddings=non_matching_paddings) - _test_non_matching_params(trf_pass, model) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/conftest.py deleted file mode 100644 index d3e666fea..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/conftest.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy -from itertools import product - -from ..conftest import ( - PARAMS, - ParamsType, - _test_non_matching_params, - test_matching_params, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PADS = [0, 1, 2] - -PARAMS["extended"].update( - { - "input_batches": [1, 4], - "pad_t": PADS, - "pad_b": PADS, - "pad_l": PADS, - "pad_r": PADS, - "pad_batch_l": [0], - "pad_batch_r": PADS, - "pad_channel_l": [0], - "pad_channel_r": PADS, - } -) - -PARAMS["default"].update( - { - "input_batches": [1], - "pad_t": PADS, - "pad_b": PADS[:2], - "pad_l": PADS, - "pad_r": PADS[:2], - "pad_batch_l": [0], - "pad_batch_r": [0], - "pad_channel_l": [0], - "pad_channel_r": PADS, - } -) - -PARAMS["smoke"].update( - { - "input_batches": [1], - "pad_t": PADS[:1], - "pad_b": PADS, - "pad_l": PADS[:1], - "pad_r": PADS, - "pad_batch_l": [0], - "pad_batch_r": [0], - "pad_channel_l": [0], - "pad_channel_r": PADS[:2], - } -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def input_shape(input_batches, input_size, input_channels): - return [input_batches, *input_size, input_channels] - - -@pytest.fixture() -def paddings_HW(pad_t, pad_b, pad_l, pad_r): - return [(0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)] - - -@pytest.fixture() -def paddings_NC(pad_batch_l, pad_batch_r, pad_channel_l, pad_channel_r): - return [(pad_batch_r, pad_batch_r), (0, 0), (0, 0), (pad_channel_l, pad_channel_r)] - - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - - -def update_params_with_paddings(PARAMS, *, is_matching): - for params in PARAMS.values(): - all_paddings = list( - product( - product(params["pad_batch_l"], params["pad_batch_r"]), - product(params["pad_t"], params["pad_b"]), - product(params["pad_l"], params["pad_r"]), - product(params["pad_channel_l"], params["pad_channel_r"]), - ) - ) - - params.update( - { - "paddings": [ - padding for padding in all_paddings if is_matching(padding) - ], - "non_matching_paddings": [ - padding for padding in all_paddings if not is_matching(padding) - ], - } - ) - - return PARAMS diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/padding_composite_test.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/padding_composite_test.py deleted file mode 100644 index 0caf615a5..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/padding_composite_test.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes import ( - SplitPaddingPass, - FuseConsecutivePadsPass, - FuseConv2dPaddingPass, -) -from tflite2xcore.xcore_schema import BuiltinOpCodes, XCOREOpCodes - -from ..model_builders import build_pad, build_padded_DW -from .test_SplitPaddingPass import ( - is_matching as is_split_matching, - PARAMS as SPLIT_PARAMS, -) -from .test_FuseConv2dPaddingPass import PARAMS as CONV_PARAMS, has_excessive_padding -from .conftest import PARAMS - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - PARAMS[k]["paddings"] = deepcopy(SPLIT_PARAMS[k]["paddings"]) - PARAMS[k]["splittable_spatial_params"] = [ - param_combo - for param_combo in CONV_PARAMS[k]["non_matching_channel_batch_params"] - if is_split_matching(param_combo.padding) - and not has_excessive_padding(param_combo) - ] - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_split_fuse_pad(input_shape, paddings): - model = build_pad(input_shape=input_shape, paddings=paddings) - operators = model.subgraphs[0].operators - assert len(operators) == 1 - pad_ori = operators[0].inputs[1] - paddings_ori = pad_ori.as_array() - - split_pass = SplitPaddingPass() - split_pass.run(model) - model.sanity_check() - assert len(operators) == 2 - op1, op2 = operators - assert op1.operator_code.code is op2.operator_code.code is BuiltinOpCodes.PAD - - split_pass = FuseConsecutivePadsPass() - split_pass.run(model) - model.sanity_check() - - # need to clean up dangling ops/tensors - CleanupManager(model).run_passes() - model.sanity_check() - - assert len(operators) == 1 - pad_new = operators[0].inputs[1] - assert pad_new is not pad_ori - paddings_new = pad_new.as_array() - assert paddings_new[0][0] == paddings_ori[0][0] - assert paddings_new[0][1] == paddings_ori[0][1] - assert paddings_new[1][0] == paddings_ori[1][0] - assert paddings_new[1][1] == paddings_ori[1][1] - assert paddings_new[2][0] == paddings_ori[2][0] - assert paddings_new[2][1] == paddings_ori[2][1] - assert paddings_new[3][0] == paddings_ori[3][0] - assert paddings_new[3][1] == paddings_ori[3][1] - - -def test_split_fuse_conv2d(splittable_spatial_params, input_channels): - model = build_padded_DW( - weight_shape=[*splittable_spatial_params.kernel_size, input_channels], - input_size=splittable_spatial_params.input_size, - paddings=splittable_spatial_params.padding, - strides=splittable_spatial_params.stride, - ) - operators = model.subgraphs[0].operators - assert len(operators) == 2 - paddings_ori = operators[0].inputs[1].as_array() - - split_pass = SplitPaddingPass() - split_pass.run(model) - model.sanity_check() - assert len(operators) == 3 - op1, op2, op3 = operators - assert op1.operator_code.code is op2.operator_code.code is BuiltinOpCodes.PAD - assert op3.operator_code.code is XCOREOpCodes.XC_conv2d_depthwise - - split_pass = FuseConv2dPaddingPass() - split_pass.run(model) - model.sanity_check() - - # need to clean up dangling ops/tensors - CleanupManager(model).run_passes() - model.sanity_check() - - assert len(operators) == 2 - op1, op2 = operators - assert op1.operator_code.code is BuiltinOpCodes.PAD - assert op2.operator_code.code is XCOREOpCodes.XC_conv2d_depthwise - paddings_new = operators[0].inputs[1].as_array() - assert paddings_new[1][0] == paddings_new[2][0] == 0 - assert paddings_new[1][1] == paddings_new[2][1] == 0 - assert paddings_new[0][0] == paddings_ori[0][0] - assert paddings_new[0][1] == paddings_ori[0][1] - assert paddings_new[3][0] == paddings_ori[3][0] - assert paddings_new[3][1] == paddings_ori[3][1] - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConsecutivePadsPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConsecutivePadsPass.py deleted file mode 100644 index e041b29d9..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConsecutivePadsPass.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes import FuseConsecutivePadsPass - -from ..model_builders import build_pad, build_consecutive_pads -from .conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, - update_params_with_paddings, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = update_params_with_paddings(deepcopy(PARAMS), is_matching=lambda padding: True) - -# NOTE: this is intentional to keep test case count lower -PARAMS["default"]["paddings"] = PARAMS["smoke"]["paddings"] - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model(): - return build_pad - - -@pytest.fixture() -def trf_pass(): - return FuseConsecutivePadsPass() - - -@pytest.fixture() -def model(input_shape, paddings, paddings_NC): - return build_consecutive_pads( - input_shape=input_shape, paddings_1=paddings, paddings_2=paddings_NC - ) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass, model): - # extract original padding values - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 2 - pad_1_ori = subgraph.operators[0].inputs[1] - pad_2_ori = subgraph.operators[1].inputs[1] - paddings_1_ori = pad_1_ori.as_array() - paddings_2_ori = pad_2_ori.as_array() - in_ori, out_ori = subgraph.inputs[0], subgraph.outputs[0] - - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - # need to clean up dangling ops/tensors - CleanupManager(model).run_passes() - model.sanity_check() - - # check operator - assert len(subgraph.operators) == 1 - op = subgraph.operators[0] - assert len(op.inputs) == 2 - assert len(op.outputs) == 1 - - # check input/output tensors - assert len(subgraph.inputs) == len(subgraph.outputs) == 1 - in_new, out_new = subgraph.inputs[0], subgraph.outputs[0] - assert in_ori is in_new is op.inputs[0] - assert out_ori is out_new is op.outputs[0] - - # check parameters - pad_new = subgraph.operators[0].inputs[1] - paddings_new = pad_new.as_array() - assert pad_new is not pad_1_ori - assert pad_new is not pad_2_ori - assert paddings_new[0][0] == paddings_1_ori[0][0] + paddings_2_ori[0][0] - assert paddings_new[0][1] == paddings_1_ori[0][1] + paddings_2_ori[0][1] - assert paddings_new[1][0] == paddings_1_ori[1][0] + paddings_2_ori[1][0] - assert paddings_new[1][1] == paddings_1_ori[1][1] + paddings_2_ori[1][1] - assert paddings_new[2][0] == paddings_1_ori[2][0] + paddings_2_ori[2][0] - assert paddings_new[2][1] == paddings_1_ori[2][1] + paddings_2_ori[2][1] - assert paddings_new[3][0] == paddings_1_ori[3][0] + paddings_2_ori[3][0] - assert paddings_new[3][1] == paddings_1_ori[3][1] + paddings_2_ori[3][1] - - -def test_non_matching_single_pad(trf_pass, input_shape, paddings): - model = build_pad(input_shape=input_shape, paddings=paddings) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConv2dPaddingPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConv2dPaddingPass.py deleted file mode 100644 index f7fed77e9..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_FuseConv2dPaddingPass.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from itertools import product -from copy import deepcopy -from typing import Tuple, NamedTuple - -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes import FuseConv2dPaddingPass -from tflite2xcore.xcore_schema import XCOREOpCodes -from tflite2xcore.xcore_model import XCOREModel - -from . import PaddingType -from ..model_builders import build_padded_DW, _calculate_implicit_pads, ModelBuilder -from ..test_conv2d_passes.conftest import ( - PARAMS as CONV_PARAMS, - _test_non_matching_params, - test_matching_params, -) -from .conftest import PARAMS, ParamsType - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -CONV_PARAMS = deepcopy(CONV_PARAMS) -PARAMS = deepcopy(PARAMS) - -# NOTE: this is intentional to keep test case count lower -PARAMS["extended"].update(CONV_PARAMS["default"]) -PARAMS["default"].update(CONV_PARAMS["smoke"]) -PARAMS["smoke"].update(CONV_PARAMS["smoke"]) - - -class ParamCombination(NamedTuple): - input_channels: int - input_size: Tuple[int, int] - kernel_size: Tuple[int, int] - stride: Tuple[int, int] - padding: PaddingType - - -def has_channel_batch_pad(padding: PaddingType) -> bool: - return not padding[0] == padding[3] == (0, 0) - - -def has_excessive_padding(param_combo: ParamCombination) -> bool: - implicit_pads = _calculate_implicit_pads( - param_combo.stride, param_combo.input_size, param_combo.kernel_size - ) - effective_pads = tuple( - tuple(sum(t) for t in zip(*pad_pairs)) - for pad_pairs in zip(implicit_pads, param_combo.padding[1:3]) - ) - - for p, k in zip(effective_pads, param_combo.kernel_size): - if p[0] >= k or p[1] >= k: - return True - return False - - -def update_params_with_paddings(PARAMS: ParamsType) -> ParamsType: - for params in PARAMS.values(): - input_sizes = product(params["input_height"], params["input_width"]) - kernel_sizes = product(params["kernel_height"], params["kernel_width"]) - strides = product(params["stride_h"], params["stride_w"]) - paddings = product( - product(params["pad_batch_l"], params["pad_batch_r"]), - product(params["pad_t"], params["pad_b"]), - product(params["pad_l"], params["pad_r"]), - product(params["pad_channel_l"], params["pad_channel_r"]), - ) - - non_matching_channel_batch_params = [] - non_matching_spatial_params = [] - matching_spatial_params = [] - - for t in product( - params["input_channels"], input_sizes, kernel_sizes, strides, paddings - ): - param_combo = ParamCombination(*t) - if has_channel_batch_pad(param_combo.padding): - non_matching_channel_batch_params.append(param_combo) - elif has_excessive_padding(param_combo): - non_matching_spatial_params.append(param_combo) - else: - matching_spatial_params.append(param_combo) - - params.update( - { - "non_matching_channel_batch_params": non_matching_channel_batch_params, - "non_matching_spatial_params": non_matching_spatial_params, - "matching_spatial_params": matching_spatial_params, - } - ) - return PARAMS - - -PARAMS = update_params_with_paddings(PARAMS) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_padded_DW - - -@pytest.fixture() -def trf_pass() -> FuseConv2dPaddingPass: - return FuseConv2dPaddingPass() - - -@pytest.fixture() -def model(matching_spatial_params: ParamCombination, input_channels: int) -> XCOREModel: - return build_padded_DW( - weight_shape=[*matching_spatial_params.kernel_size, input_channels], - input_size=matching_spatial_params.input_size, - paddings=matching_spatial_params.padding, - strides=matching_spatial_params.stride, - ) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass: FuseConv2dPaddingPass, model: XCOREModel) -> None: - # extract original model info - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 2 - pad_params_pad_ori = subgraph.operators[0].inputs[1].as_array() - pad_params_conv_ori = subgraph.operators[-1].custom_options["pad"] - in_ori, out_ori = subgraph.inputs[0], subgraph.outputs[0] - in_shape_ori, out_shape_ori = deepcopy(in_ori.shape), deepcopy(out_ori.shape) - - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - # need to clean up dangling ops/tensors - CleanupManager(model).run_passes() - model.sanity_check() - - # check operator - assert len(subgraph.operators) == 1 - op = subgraph.operators[0] - assert op.operator_code.code is XCOREOpCodes.XC_conv2d_depthwise - assert len(op.inputs) == 3 - assert len(op.outputs) == 1 - - # check input/output tensors - assert len(subgraph.inputs) == len(subgraph.outputs) == 1 - in_new, out_new = subgraph.inputs[0], subgraph.outputs[0] - assert in_ori is in_new is op.inputs[0] - assert out_ori is out_new is op.outputs[0] - assert in_shape_ori == in_new.shape - assert out_shape_ori == out_new.shape - - # check 'pad' parameters - pad_params_new = op.custom_options["pad"] - assert len(pad_params_new) == 2 - assert -pad_params_new[0] - pad_params_conv_ori[0] == pad_params_pad_ori[1][0] - assert -pad_params_new[1] - pad_params_conv_ori[1] == pad_params_pad_ori[2][0] - - -def test_non_matching_channel_batch_params( - trf_pass: FuseConv2dPaddingPass, - build_model: ModelBuilder, - non_matching_channel_batch_params: ParamCombination, - input_channels: int, -) -> None: - model = build_model( - weight_shape=[*non_matching_channel_batch_params.kernel_size, input_channels], - input_size=non_matching_channel_batch_params.input_size, - paddings=non_matching_channel_batch_params.padding, - strides=non_matching_channel_batch_params.stride, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_spatial_params( - trf_pass: FuseConv2dPaddingPass, - build_model: ModelBuilder, - non_matching_spatial_params: ParamCombination, - input_channels: int, -) -> None: - model = build_model( - weight_shape=[*non_matching_spatial_params.kernel_size, input_channels], - input_size=non_matching_spatial_params.input_size, - paddings=non_matching_spatial_params.padding, - strides=non_matching_spatial_params.stride, - ) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_RemovePaddingInputPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_RemovePaddingInputPass.py deleted file mode 100644 index 57a7ef9db..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_RemovePaddingInputPass.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes.padding_passes import RemovePaddingInputPass - -from ..model_builders import build_pad, build_non_input_pad -from .conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, - update_params_with_paddings, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -def only_channel_pad(padding): - return padding[-1] != (0, 0) and (all(pad == (0, 0) for pad in padding[:-1])) - - -PARAMS = update_params_with_paddings(deepcopy(PARAMS), is_matching=only_channel_pad) - -# NOTE: this is intentional to keep test case count lower -PARAMS["default"]["paddings"] = PARAMS["smoke"]["paddings"] - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(): - return RemovePaddingInputPass() - - -@pytest.fixture() -def model(input_shape, paddings): - return build_pad(input_shape=input_shape, paddings=paddings) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass, model): - # extract original padding values - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 1 - out_ori = subgraph.outputs[0] - - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - # need to clean up dangling ops/tensors - CleanupManager(model).run_passes() - model.sanity_check() - - # check pad operator has been removed - assert len(subgraph.operators) == 0 - - # check input/output tensors - new input/output should be old output - assert len(subgraph.inputs) == len(subgraph.outputs) == 1 - in_new, out_new = subgraph.inputs[0], subgraph.outputs[0] - assert in_new is out_ori - assert out_ori is out_new - - -def test_non_matching_non_input_pad(trf_pass, input_shape, paddings): - model = build_non_input_pad(input_shape=input_shape, paddings=paddings) - - for op in model.subgraphs[0].operators: - assert not trf_pass.match(op) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_ReplacePadPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_ReplacePadPass.py deleted file mode 100644 index 9e9c16763..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_ReplacePadPass.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import numpy as np -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.transformation_passes import ReplacePadPass -from tflite2xcore.xcore_schema import BuiltinOpCodes, XCOREModel, XCOREOpCodes - -from . import PaddingType -from ..model_builders import build_pad, ModelBuilder - -from . import test_non_matching_paddings, test_replace_mutate as _test_replace_mutate -from .conftest import ( - PARAMS, - test_matching_params, - update_params_with_paddings, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -def is_matching(padding: PaddingType) -> bool: - return padding[0] == padding[3] == (0, 0) - - -PARAMS = update_params_with_paddings(deepcopy(PARAMS), is_matching=is_matching) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() # type: ignore -def build_model() -> ModelBuilder: - return build_pad - - -@pytest.fixture() # type: ignore -def trf_pass() -> ReplacePadPass: - return ReplacePadPass() - - -@pytest.fixture() -def new_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_pad - - -@pytest.fixture() # type: ignore -def model(input_shape: Tuple[int, int, int, int], paddings: PaddingType) -> XCOREModel: - return build_pad(input_shape=input_shape, paddings=paddings) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: ReplacePadPass, model: XCOREModel, new_opcode: XCOREOpCodes -) -> None: - # extract original padding values - subgraph = model.subgraphs[0] - params_ori = subgraph.operators[-1].inputs[1].as_array().tolist() - - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - _test_replace_mutate(trf_pass, model, new_opcode) - - # check operators - operators = subgraph.operators - assert len(operators) == 1 - op = operators[0] - - # check tensors - assert len(op.inputs) == 2 - assert len(op.outputs) == 1 - - # check parameters - params_new = op.inputs[1].as_array().tolist() - assert params_new == params_ori - - zero_point_byte = np.int8(op.inputs[0].quantization["zero_point"][0]).tobytes() - pad_value_bytes = np.int32(op.custom_options["pad_value"]).tobytes() - assert pad_value_bytes == zero_point_byte * 4 - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_SplitPaddingPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_SplitPaddingPass.py deleted file mode 100644 index c7d27936f..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_padding_passes/test_SplitPaddingPass.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.transformation_passes import SplitPaddingPass -from tflite2xcore.xcore_schema import BuiltinOpCodes - -from ..model_builders import build_pad -from . import test_non_matching_paddings -from .conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, - update_params_with_paddings, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -def is_matching(padding): - return (padding[0] != (0, 0) or padding[3] != (0, 0)) and ( - padding[1] != (0, 0) or padding[2] != (0, 0) - ) - - -PARAMS = update_params_with_paddings(deepcopy(PARAMS), is_matching=is_matching) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model(): - return build_pad - - -@pytest.fixture() -def trf_pass(): - return SplitPaddingPass() - - -@pytest.fixture() -def model(input_shape, paddings): - return build_pad(input_shape=input_shape, paddings=paddings) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass, model): - # extract original padding values - subgraph = model.subgraphs[0] - params_ori = subgraph.operators[-1].inputs[1].as_array() - - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - # check operators - operators = subgraph.operators - assert len(operators) == 2 - op_NC, op_HW = operators - assert op_NC.operator_code.code is op_HW.operator_code.code is BuiltinOpCodes.PAD - assert len(op_NC.inputs) == len(op_HW.inputs) == 2 - assert len(op_NC.outputs) == len(op_HW.outputs) == 1 - - # check input/output tensors - assert len(subgraph.inputs) == len(subgraph.outputs) == 1 - input_tensor, output_tensor = subgraph.inputs[0], subgraph.outputs[0] - assert input_tensor in op_NC.inputs - assert input_tensor not in op_NC.outputs + op_HW.inputs + op_HW.outputs - assert output_tensor in op_HW.outputs - assert output_tensor not in op_HW.inputs + op_NC.inputs + op_NC.outputs - - # check wiring - assert op_NC.outputs[0] is op_HW.inputs[0] - - # check parameters - params_NC = op_NC.inputs[1].as_array() - assert params_NC[1][0] == params_NC[2][0] == 0 - assert params_NC[1][1] == params_NC[2][1] == 0 - assert params_NC[0][0] == params_ori[0][0] - assert params_NC[0][1] == params_ori[0][1] - assert params_NC[3][0] == params_ori[3][0] - assert params_NC[3][1] == params_ori[3][1] - - params_HW = op_HW.inputs[1].as_array() - assert params_HW[0][0] == params_HW[3][0] == 0 - assert params_HW[0][1] == params_HW[3][1] == 0 - assert params_HW[1][0] == params_ori[1][0] - assert params_HW[1][1] == params_ori[1][1] - assert params_HW[2][0] == params_ori[2][0] - assert params_HW[2][1] == params_ori[2][1] - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/conftest.py deleted file mode 100644 index 8e3597913..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/conftest.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from tflite2xcore.transformation_passes import ModelTransformationPass -from tflite2xcore.xcore_model import XCOREModel - -from ..conftest import PARAMS, test_matching_params, _test_non_matching_params - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS["extended"].update({"num_threads": [1, 2, 3, 4, 5]}) - -PARAMS["default"].update({"num_threads": [1, 4, 5]}) - -PARAMS["smoke"].update({"num_threads": [5]}) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: ModelTransformationPass, model: XCOREModel, num_threads: int -) -> None: - op = model.subgraphs[0].operators[0] - assert "par" not in op.custom_options - - trf_pass.run(model) - model.sanity_check() - - _test_non_matching_params(trf_pass, model) - assert "par" in op.custom_options diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeConv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeConv2dPass.py deleted file mode 100644 index 6b71ccd12..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeConv2dPass.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy -from typing import Tuple, Callable - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ParallelizeConv2dPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_conv2d_deep, - build_XC_conv2d_shallowin, - build_XC_conv2d_1x1, -) - -from ..test_conv2d_passes.conftest import PARAMS, weight_shape -from .conftest import test_matching_params, test_mutate, PARAMS as PAR_PARAMS - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - PARAMS[k].update( - { - "num_threads": PAR_PARAMS[k]["num_threads"], - "model_builder": [ - build_XC_conv2d_deep, - build_XC_conv2d_shallowin, - build_XC_conv2d_1x1, - ], - } - ) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(num_threads: int) -> ModelTransformationPass: - return ParallelizeConv2dPass(num_threads=num_threads) - - -@pytest.fixture() -def model( - model_builder: Callable[..., XCOREModel], - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - strides: Tuple[int, int], -) -> XCOREModel: - return model_builder( - weight_shape=weight_shape, input_size=input_size, strides=strides - ) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeDepthwiseConv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeDepthwiseConv2dPass.py deleted file mode 100644 index 591c85543..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeDepthwiseConv2dPass.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ParallelizeDepthwiseConv2dPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_conv2d_depthwise, -) - -from ..test_conv2d_passes.test_ReplaceDepthwiseConv2dPass import PARAMS, weight_shape -from .conftest import test_matching_params, test_mutate, PARAMS as PAR_PARAMS - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - PARAMS[k].update({"num_threads": PAR_PARAMS[k]["num_threads"]}) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(num_threads: int) -> ModelTransformationPass: - return ParallelizeDepthwiseConv2dPass(num_threads=num_threads) - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - strides: Tuple[int, int], -) -> XCOREModel: - return build_XC_conv2d_depthwise( - weight_shape=weight_shape, input_size=input_size, strides=strides - ) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeFullyConnectedPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeFullyConnectedPass.py deleted file mode 100644 index f7e1acebe..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeFullyConnectedPass.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ParallelizeFullyConnectedPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_XC_fc - -from ..test_fully_connected_passes.conftest import PARAMS -from .conftest import test_matching_params, test_mutate, PARAMS as PAR_PARAMS - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - PARAMS[k].update({"num_threads": PAR_PARAMS[k]["num_threads"]}) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(num_threads: int) -> XCOREModel: - return ParallelizeFullyConnectedPass(num_threads=num_threads) - - -@pytest.fixture() -def model(outputs: int, input_channels: int) -> ModelTransformationPass: - return build_XC_fc(outputs=outputs, input_channels=input_channels) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeGlobalAveragePool2DPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeGlobalAveragePool2DPass.py deleted file mode 100644 index 66e8c9ae7..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeGlobalAveragePool2DPass.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ParallelizeGlobalAveragePool2DPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_avgpool2d_global, -) - -from ..test_pooling_passes.test_ReplaceGlobalAveragePool2DPass import PARAMS -from .conftest import test_matching_params, test_mutate, PARAMS as PAR_PARAMS - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - PARAMS[k].update({"num_threads": PAR_PARAMS[k]["num_threads"]}) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(num_threads: int) -> XCOREModel: - return ParallelizeGlobalAveragePool2DPass(num_threads=num_threads) - - -@pytest.fixture() -def model( - input_shape: Tuple[int, int, int], reduction_dims: Tuple[int, ...] -) -> ModelTransformationPass: - return build_XC_avgpool2d_global( - input_shape=input_shape, reduction_dims=reduction_dims - ) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizePool2DPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizePool2DPass.py deleted file mode 100644 index 56c20beed..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizePool2DPass.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy -from typing import Tuple, Callable - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ParallelizePooling2DPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_maxpool2d, - build_XC_avgpool2d, -) - -from ..test_pooling_passes.conftest import PARAMS, pool_size -from .conftest import test_matching_params, test_mutate, PARAMS as PAR_PARAMS - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - PARAMS[k].update( - { - "num_threads": PAR_PARAMS[k]["num_threads"], - "model_builder": [build_XC_maxpool2d, build_XC_avgpool2d], - } - ) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(num_threads: int) -> ModelTransformationPass: - return ParallelizePooling2DPass(num_threads=num_threads) - - -@pytest.fixture() -def model( - model_builder: Callable[..., XCOREModel], - input_shape: Tuple[int, int, int], - pool_size: Tuple[int, int], - strides: Tuple[int, int], -) -> XCOREModel: - return model_builder(input_shape=input_shape, pool_size=pool_size, strides=strides) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeRequant16To8Pass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeRequant16To8Pass.py deleted file mode 100644 index b68bd1933..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_parallelization_passes/test_ParallelizeRequant16To8Pass.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ParallelizeRequant16To8Pass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_requantize_16_to_8, -) - -from .test_ParallelizeFullyConnectedPass import PARAMS -from .conftest import test_matching_params, test_mutate - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(num_threads: int) -> XCOREModel: - return ParallelizeRequant16To8Pass(num_threads=num_threads) - - -@pytest.fixture() -def model(outputs: int, input_channels: int) -> ModelTransformationPass: - return build_XC_requantize_16_to_8(outputs=outputs, input_channels=input_channels) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/conftest.py deleted file mode 100644 index d92fe4dad..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/conftest.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.transformation_passes import ModelTransformationPass -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import Padding, ActivationFunctionType, XCOREOpCodes - -from tflite2xcore.tests.test_transformation_passes.model_builders import ModelBuilder -from ..conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, - test_replace_mutate as _test_replace_mutate, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["default"].update( - { - "non_matching_input_channels": [1, 3, 9, 15], - "padding": [Padding.VALID], - "non_matching_padding": [Padding.SAME], - "fused_activation": [ActivationFunctionType.NONE], - "non_matching_fused_activation": [ - ActivationFunctionType.RELU, - ActivationFunctionType.RELU6, - ], - "stride_h": [1, 2], - "stride_w": [1, 2], - "pool_h": [1, 2, 3], - "pool_w": [1, 2, 3], - } -) - -PARAMS["smoke"].update( - { - "non_matching_input_channels": [1, 9], - "padding": [Padding.VALID], - "non_matching_padding": [Padding.SAME], - "fused_activation": [ActivationFunctionType.NONE], - "non_matching_fused_activation": [ActivationFunctionType.RELU], - "stride_h": [2], - "stride_w": [2], - "pool_h": [2, 3], - "pool_w": [2, 3], - } -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def pool_size(pool_h, pool_w) -> Tuple[int, int]: - return (pool_h, pool_w) - - -@pytest.fixture() -def model( - build_model: ModelBuilder, - input_shape: Tuple[int, int, int], - pool_size: Tuple[int, int], - strides: Tuple[int, int], - padding: Padding, - fused_activation: ActivationFunctionType, -) -> XCOREModel: - return build_model( - input_shape=input_shape, - padding=padding, - pool_size=pool_size, - strides=strides, - fused_activation=fused_activation, - ) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate( - trf_pass: ModelTransformationPass, model: XCOREModel, custom_opcode: XCOREOpCodes -) -> None: - subgraph = model.subgraphs[0] - old_op = subgraph.operators[0] - strides = tuple(old_op.builtin_options[f"stride_{ax}"] for ax in ("h", "w")) - pool = tuple(old_op.builtin_options[f"filter_{ax}"] for ax in ("height", "width")) - - _test_replace_mutate(trf_pass, model, custom_opcode) - - custom_options = subgraph.operators[-1].custom_options - assert "pool" in custom_options - assert custom_options["pool"] == pool - assert "stride" in custom_options - assert custom_options["stride"] == strides - - -def test_non_matching_input_channels( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - input_size: Tuple[int, int], - non_matching_input_channels: int, - pool_size: Tuple[int, int], - strides: Tuple[int, int], - padding: Padding, - fused_activation: ActivationFunctionType, -) -> None: - input_shape = (*input_size, non_matching_input_channels) - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=pool_size, - strides=strides, - fused_activation=fused_activation, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_fused_activation( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - input_shape: Tuple[int, int, int], - pool_size: Tuple[int, int], - strides: Tuple[int, int], - padding: Padding, - non_matching_fused_activation: ActivationFunctionType, -) -> None: - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=pool_size, - strides=strides, - fused_activation=non_matching_fused_activation, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_input_height( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - input_width: int, - non_matching_input_height: int, - input_channels: int, - pool_size: Tuple[int, int], - strides: Tuple[int, int], - padding: Padding, - fused_activation: ActivationFunctionType, -) -> None: - input_shape = (input_width, non_matching_input_height, input_channels) - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=pool_size, - strides=strides, - fused_activation=fused_activation, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_input_width( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - non_matching_input_width: int, - input_height: int, - input_channels: int, - pool_size: Tuple[int, int], - strides: Tuple[int, int], - padding: Padding, - fused_activation: ActivationFunctionType, -) -> None: - input_shape = (non_matching_input_width, input_height, input_channels) - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=pool_size, - strides=strides, - fused_activation=fused_activation, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_pool_h( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - input_shape: Tuple[int, int, int], - non_matching_pool_h: int, - pool_w: int, - strides: Tuple[int, int], - padding: Padding, - fused_activation: ActivationFunctionType, -) -> None: - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=(non_matching_pool_h, pool_w), - strides=strides, - fused_activation=fused_activation, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_pool_w( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - input_shape: Tuple[int, int, int], - pool_h: int, - non_matching_pool_w: int, - strides: Tuple[int, int], - padding: Padding, - fused_activation: ActivationFunctionType, -) -> None: - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=(pool_h, non_matching_pool_w), - strides=strides, - fused_activation=fused_activation, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_stride_h( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - input_shape: Tuple[int, int, int], - pool_size: Tuple[int, int], - non_matching_stride_h: int, - stride_w: int, - padding: Padding, - fused_activation: ActivationFunctionType, -) -> None: - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=pool_size, - strides=(non_matching_stride_h, stride_w), - fused_activation=fused_activation, - ) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_stride_w( - trf_pass: ModelTransformationPass, - build_model: ModelBuilder, - input_shape: Tuple[int, int, int], - pool_size: Tuple[int, int], - stride_h: int, - non_matching_stride_w: int, - padding: Padding, - fused_activation: ActivationFunctionType, -) -> None: - model = build_model( - input_shape=input_shape, - padding=padding, - pool_size=pool_size, - strides=(stride_h, non_matching_stride_w), - fused_activation=fused_activation, - ) - _test_non_matching_params(trf_pass, model) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2D2x2Pass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2D2x2Pass.py deleted file mode 100644 index da57251ef..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2D2x2Pass.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.xcore_schema import Padding, XCOREOpCodes -from tflite2xcore.transformation_passes import ReplaceAveragePool2D2x2Pass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_avgpool, - ModelBuilder, -) -from .conftest import ( - PARAMS, - test_matching_params, - test_mutate, - test_non_matching_input_channels, - test_non_matching_fused_activation, - test_non_matching_input_height, - test_non_matching_input_width, - test_non_matching_pool_h, - test_non_matching_pool_w, - test_non_matching_stride_h, - test_non_matching_stride_w, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["default"].update( - { - "input_height": [2, 4, 8, 12], - "non_matching_input_height": [3, 9, 13, 23], - "input_width": [2, 4, 8, 12], - "non_matching_input_width": [3, 9, 13, 23], - "padding": list(Padding), - "stride_h": [2], - "non_matching_stride_h": [1, 3], - "stride_w": [2], - "non_matching_stride_w": [1, 3], - "pool_h": [2], - "non_matching_pool_h": [1, 3], - "pool_w": [2], - "non_matching_pool_w": [1, 3], - } -) - -PARAMS["smoke"].update( - { - "input_height": [2, 12], - "non_matching_input_height": [3, 13], - "input_width": [2, 12], - "non_matching_input_width": [3, 13], - "padding": list(Padding), - "stride_h": [2], - "non_matching_stride_h": [3], - "stride_w": [2], - "non_matching_stride_w": [3], - "pool_h": [2], - "non_matching_pool_h": [3], - "pool_w": [2], - "non_matching_pool_w": [3], - } -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_avgpool - - -@pytest.fixture() -def trf_pass() -> ReplaceAveragePool2D2x2Pass: - return ReplaceAveragePool2D2x2Pass() - - -@pytest.fixture() -def custom_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_avgpool2d - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2DPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2DPass.py deleted file mode 100644 index edaedff3e..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceAveragePool2DPass.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.transformation_passes import ReplaceAveragePool2DPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_avgpool, - ModelBuilder, -) -from .test_ReplaceAveragePool2D2x2Pass import custom_opcode -from .conftest import ( - PARAMS, - test_matching_params, - test_mutate, - test_non_matching_input_channels, - test_non_matching_fused_activation, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_avgpool - - -@pytest.fixture() -def trf_pass() -> ReplaceAveragePool2DPass: - return ReplaceAveragePool2DPass() - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceGlobalAveragePool2DPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceGlobalAveragePool2DPass.py deleted file mode 100644 index fd24baeee..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceGlobalAveragePool2DPass.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import itertools -from typing import Tuple -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import XCOREOpCodes -from tflite2xcore.transformation_passes import ReplaceGlobalAveragePool2DPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_mean -from .conftest import ( - PARAMS, - test_matching_params, - _test_non_matching_params, - _test_replace_mutate as _test_mutate, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["default"].update({"reduction_dims": [(1, 2), (2, 1)]}) -PARAMS["default"].update( - { - "non_matching_reduction_dims": [ - t - for t in itertools.chain( - itertools.product(range(4)), - itertools.product(range(4), repeat=2), - itertools.product(range(4), repeat=3), - ) - if t not in PARAMS["default"]["reduction_dims"] - ] - } -) - -PARAMS["smoke"].update({"reduction_dims": PARAMS["default"]["reduction_dims"]}) -PARAMS["smoke"].update( - { - "non_matching_reduction_dims": [ - t - for t in itertools.chain( - itertools.product(range(1, 4)), itertools.product(range(1, 4), repeat=2) - ) - if t not in PARAMS["smoke"]["reduction_dims"] - ] - } -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ReplaceGlobalAveragePool2DPass: - return ReplaceGlobalAveragePool2DPass() - - -@pytest.fixture() -def model( - input_shape: Tuple[int, int, int], reduction_dims: Tuple[int, int] -) -> XCOREModel: - return build_mean(input_shape=input_shape, reduction_dims=reduction_dims) - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass: ReplaceGlobalAveragePool2DPass, model: XCOREModel) -> None: - _test_mutate(trf_pass, model, new_opcode=XCOREOpCodes.XC_avgpool2d_global) - - # check bias/scale/offset tensor - op = model.subgraphs[0].operators[-1] - assert op.inputs[1].shape == (7,) - - -def test_non_matching_input_channels( - trf_pass: ReplaceGlobalAveragePool2DPass, - input_size: Tuple[int, int], - non_matching_input_channels: int, - reduction_dims: Tuple[int, int], -) -> None: - input_shape = (*input_size, non_matching_input_channels) - model = build_mean(input_shape=input_shape, reduction_dims=reduction_dims) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_reduction_dims( - trf_pass: ReplaceGlobalAveragePool2DPass, - input_shape: Tuple[int, int, int], - non_matching_reduction_dims: Tuple[int, ...], -) -> None: - model = build_mean( - input_shape=input_shape, reduction_dims=non_matching_reduction_dims - ) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2D2x2Pass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2D2x2Pass.py deleted file mode 100644 index 5dd169ca5..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2D2x2Pass.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import XCOREOpCodes -from tflite2xcore.transformation_passes import ReplaceMaxPool2D2x2Pass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_maxpool, - ModelBuilder, -) -from .test_ReplaceAveragePool2D2x2Pass import PARAMS -from .conftest import ( - test_matching_params, - test_mutate, - test_non_matching_input_channels, - test_non_matching_fused_activation, - test_non_matching_input_height, - test_non_matching_input_width, - test_non_matching_pool_h, - test_non_matching_pool_w, - test_non_matching_stride_h, - test_non_matching_stride_w, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_maxpool - - -@pytest.fixture() -def trf_pass() -> ReplaceMaxPool2D2x2Pass: - return ReplaceMaxPool2D2x2Pass() - - -@pytest.fixture() -def custom_opcode() -> XCOREOpCodes: - return XCOREOpCodes.XC_maxpool2d - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2DPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2DPass.py deleted file mode 100644 index 81231b9d1..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_pooling_passes/test_ReplaceMaxPool2DPass.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.transformation_passes import ReplaceMaxPool2DPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_maxpool, - ModelBuilder, -) -from .test_ReplaceMaxPool2D2x2Pass import custom_opcode -from .conftest import ( - PARAMS, - test_matching_params, - test_mutate, - test_non_matching_input_channels, - test_non_matching_fused_activation, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def build_model() -> ModelBuilder: - return build_maxpool - - -@pytest.fixture() -def trf_pass() -> ReplaceMaxPool2DPass: - return ReplaceMaxPool2DPass() - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/conftest.py deleted file mode 100644 index 8e0040582..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/conftest.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from ..conftest import ( - PARAMS, - _test_non_matching_params, - _make_name_type_pairs, - NON_INT8_TEST_TYPES, - NON_FLOAT32_TEST_TYPES, - test_matching_params, - test_non_matching_tensors, -) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedInputPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedInputPass.py deleted file mode 100644 index 6b360077d..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedInputPass.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import ( - XCOREModel, - Subgraph, - TensorType, - OperatorCode, - BuiltinOpCodes, -) -from tflite2xcore.transformation_passes import CanonicalizeQuantizedInputPass - - -@pytest.fixture() -def simple_model(): - subgraph = Subgraph(model=XCOREModel()) - - fin = subgraph.create_tensor( - "input", TensorType.FLOAT32, [1, 5, 5, 3], isinput=True - ) - qin = subgraph.create_tensor("quantized_input", TensorType.INT8, fin.shape) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.QUANTIZE), inputs=[fin], outputs=[qin] - ) - - qout = subgraph.create_tensor( - "quantized_output", TensorType.INT8, qin.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ABS), inputs=[qin], outputs=[qout] - ) - - return subgraph.model - - -@pytest.fixture() -def dual_input_model(): - subgraph = Subgraph(model=XCOREModel()) - - fin1 = subgraph.create_tensor( - "input_1", TensorType.FLOAT32, [1, 5, 5, 3], isinput=True - ) - qin1 = subgraph.create_tensor("quantized_input_1", TensorType.INT8, fin1.shape) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.QUANTIZE), inputs=[fin1], outputs=[qin1] - ) - - fin2 = subgraph.create_tensor( - "input_2", TensorType.FLOAT32, fin1.shape, isinput=True - ) - qin2 = subgraph.create_tensor("quantized_input_2", TensorType.INT8, fin2.shape) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.QUANTIZE), inputs=[fin2], outputs=[qin2] - ) - - qout = subgraph.create_tensor( - "quantized_output", TensorType.INT8, qin1.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ADD), inputs=[qin1, qin2], outputs=[qout] - ) - - return subgraph.model - - -@pytest.fixture() -def non_matching_model(): - subgraph = Subgraph(model=XCOREModel()) - - fin1 = subgraph.create_tensor( - "input_1", TensorType.FLOAT32, [1, 5, 5, 3], isinput=True - ) - qout1 = subgraph.create_tensor( - "quantized_output_1", TensorType.INT8, fin1.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.QUANTIZE), inputs=[fin1], outputs=[qout1] - ) - - fin2 = subgraph.create_tensor( - "input_2", TensorType.FLOAT32, [1, 3, 3, 8], isinput=True - ) - qout2 = subgraph.create_tensor( - "quantized_output_2", TensorType.INT8, fin2.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.QUANTIZE), inputs=[fin2], outputs=[qout2] - ) - - return subgraph.model - - -@pytest.fixture() -def trf_pass(): - return CanonicalizeQuantizedInputPass() - - -def test_match(simple_model, trf_pass): - assert trf_pass.match(simple_model.subgraphs[0].operators[0]) - - -def test_mutate(simple_model, trf_pass): - subgraph = simple_model.subgraphs[0] - trf_pass.mutate(subgraph.operators[0]) - subgraph.sanity_check() - - qin = subgraph.get_tensor("quantized_input") - qout = subgraph.get_tensor("quantized_output") - - assert len(subgraph.operators) == 1 - assert subgraph.operators[0].operator_code.code is BuiltinOpCodes.ABS - assert len(subgraph.tensors) == 2 - assert qin in subgraph.inputs and qin not in subgraph.outputs - assert qout in subgraph.outputs and qout not in subgraph.inputs - - -def test_run_simple(simple_model, trf_pass): - trf_pass.run(simple_model) - simple_model.sanity_check() - subgraph = simple_model.subgraphs[0] - - qin = subgraph.get_tensor("quantized_input") - qout = subgraph.get_tensor("quantized_output") - - assert len(subgraph.operators) == 1 - assert subgraph.operators[0].operator_code.code is BuiltinOpCodes.ABS - assert len(subgraph.tensors) == 2 - assert qin in subgraph.inputs and qin not in subgraph.outputs - assert qout in subgraph.outputs and qout not in subgraph.inputs - - -def test_run_dual_input(dual_input_model, trf_pass): - trf_pass.run(dual_input_model) - dual_input_model.sanity_check() - subgraph = dual_input_model.subgraphs[0] - - qin1 = subgraph.get_tensor("quantized_input_1") - qin2 = subgraph.get_tensor("quantized_input_2") - qout = subgraph.get_tensor("quantized_output") - - assert len(subgraph.operators) == 1 - assert subgraph.operators[0].operator_code.code is BuiltinOpCodes.ADD - assert len(subgraph.tensors) == 3 - assert qin1 in subgraph.inputs and qin1 not in subgraph.outputs - assert qin2 in subgraph.inputs and qin2 not in subgraph.outputs - assert qout in subgraph.outputs and qout not in subgraph.inputs - - -def test_non_match(trf_pass, non_matching_model): - for op in non_matching_model.subgraphs[0].operators: - assert not trf_pass.match(op) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedOutputPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedOutputPass.py deleted file mode 100644 index 0b26c9498..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_CanonicalizeQuantizedOutputPass.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.xcore_schema import TensorType, OperatorCode, BuiltinOpCodes -from tflite2xcore.transformation_passes import CanonicalizeQuantizedOutputPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_split, - build_dequantize, - build_abs, - _glue_ops, -) - -from .conftest import ( - PARAMS, - _test_non_matching_params, - _make_name_type_pairs, - NON_INT8_TEST_TYPES, - NON_FLOAT32_TEST_TYPES, - test_matching_params, - test_non_matching_tensors, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -_NON_MATCHING_TENSORS = [ - {**d1, **d2} # the types of "input" and "output" are changed concurrently - for d1, d2 in zip( - _make_name_type_pairs("input", NON_INT8_TEST_TYPES), - _make_name_type_pairs("output", NON_INT8_TEST_TYPES), - ) -] + list(_make_name_type_pairs("output_dequantized", NON_FLOAT32_TEST_TYPES)) - -PARAMS["default"].update( - {"num_splits": [2, 4], "non_matching_tensors": _NON_MATCHING_TENSORS} -) - -PARAMS["smoke"].update( - {"num_splits": [2], "non_matching_tensors": _NON_MATCHING_TENSORS[::2]} -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def model(input_shape): - model = build_abs(input_shape=input_shape, tensor_type=TensorType.INT8) - subgraph = model.subgraphs[0] - - build_dequantize(subgraph, input_shape=input_shape) - - _glue_ops(*subgraph.operators[:2]) - - return model - - -@pytest.fixture() -def model_multi_out(input_shape, num_splits): - model = build_split( - input_shape=input_shape, - num_splits=num_splits, - tensor_type=TensorType.INT8, - axis=2, - ) - subgraph = model.subgraphs[0] - - op = model.subgraphs[0].operators[0] - for qout in op.outputs: - subgraph.outputs.remove(qout) - fout = subgraph.create_tensor( - qout.name + "_float", TensorType.FLOAT32, qout.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.DEQUANTIZE), inputs=[qout], outputs=[fout] - ) - - return model - - -@pytest.fixture() -def model_non_matching_consumer(model, input_shape): - subgraph = model.subgraphs[0] - op_deq = subgraph.operators[-1] - - build_abs(subgraph, input_shape=input_shape, tensor_type=op_deq.outputs[0].type) - op_abs_2 = subgraph.operators[-1] - _glue_ops(op_deq, op_abs_2) # this removes op_deq.outputs[0] - subgraph.outputs.append(op_abs_2.inputs[0]) # so we put back its replacement - - return model - - -@pytest.fixture() -def trf_pass(): - return CanonicalizeQuantizedOutputPass() - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(model, trf_pass): - subgraph = model.subgraphs[0] - trf_pass.mutate(subgraph.operators[1]) - subgraph.sanity_check() - - qin = subgraph.get_tensor("input") - qout = subgraph.get_tensor("output") - - assert len(subgraph.operators) == 1 - assert subgraph.operators[0].operator_code.code is BuiltinOpCodes.ABS - assert len(subgraph.tensors) == 2 - assert qin in subgraph.inputs - assert qin not in subgraph.outputs - assert qout in subgraph.outputs - assert qout not in subgraph.inputs - - -def test_mutate_multi_out(model_multi_out, num_splits, trf_pass): - trf_pass.run(model_multi_out) - model_multi_out.sanity_check() - subgraph = model_multi_out.subgraphs[0] - - assert len(subgraph.operators) == 1 - assert subgraph.operators[0].operator_code.code is BuiltinOpCodes.SPLIT - assert len(subgraph.tensors) == 2 + num_splits # split has two inputs - - taxis = subgraph.get_tensor("axis") - assert taxis not in subgraph.outputs - assert taxis not in subgraph.inputs - - tin = subgraph.get_tensor("input") - assert tin in subgraph.inputs - assert tin not in subgraph.outputs - - assert len(subgraph.outputs) == num_splits - for j, tout in enumerate(subgraph.outputs): - assert tout not in subgraph.inputs, f"subgraph.outputs[{j}]" - - -def test_non_matching_input(trf_pass, input_shape): - # NOTE: a single DEQUANTIZE will always have an input tensor that is an input - # to the subgraph, hence it should not be matched - model = build_dequantize(input_shape=input_shape) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_consumers(trf_pass, model_non_matching_consumer): - for j, op in enumerate(model_non_matching_consumer.subgraphs[0].operators): - assert not trf_pass.match(op), f"subgraphs[0].operators[{j}]" - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatInputPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatInputPass.py deleted file mode 100644 index 4eb1869e7..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatInputPass.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import ( - XCOREModel, - Subgraph, - TensorType, - OperatorCode, - BuiltinOpCodes, -) -from tflite2xcore.transformation_passes import LegalizeFloatInputPass - - -@pytest.fixture() -def simple_model(): - subgraph = Subgraph(model=XCOREModel()) - - qin = subgraph.create_tensor( - "quantized_input", TensorType.INT8, [1, 5, 5, 3], isinput=True - ) - qout = subgraph.create_tensor( - "quantized_output", TensorType.INT8, qin.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ABS), inputs=[qin], outputs=[qout] - ) - - return subgraph.model - - -@pytest.fixture() -def dual_input_model(): - subgraph = Subgraph(model=XCOREModel()) - - qin1 = subgraph.create_tensor( - "quantized_input_1", TensorType.INT8, [1, 5, 5, 3], isinput=True - ) - qin2 = subgraph.create_tensor( - "quantized_input_2", TensorType.INT8, qin1.shape, isinput=True - ) - qout = subgraph.create_tensor( - "quantized_output", TensorType.INT8, qin1.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ADD), inputs=[qin1, qin2], outputs=[qout] - ) - - return subgraph.model - - -@pytest.fixture() -def non_matching_model(): - subgraph = Subgraph(model=XCOREModel()) - - qin = subgraph.create_tensor( - "quantized_input", TensorType.INT32, [1, 5, 5, 3], isinput=True - ) - qout = subgraph.create_tensor( - "quantized_output", TensorType.INT32, qin.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ABS), inputs=[qin], outputs=[qout] - ) - - fin = subgraph.create_tensor( - "float_input", TensorType.FLOAT32, [1, 3, 3, 8], isinput=True - ) - fout = subgraph.create_tensor( - "float_output", TensorType.FLOAT32, fin.shape, isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.ABS), inputs=[fin], outputs=[fout] - ) - - return subgraph.model - - -@pytest.fixture() -def trf_pass(): - return LegalizeFloatInputPass() - - -def test_match(simple_model, trf_pass): - assert trf_pass.match(simple_model.subgraphs[0].inputs[0]) - - -def test_mutate(simple_model, trf_pass): - subgraph = simple_model.subgraphs[0] - trf_pass.mutate(subgraph.inputs[0]) - simple_model.sanity_check() - - qin = subgraph.get_tensor("quantized_input") - fin = subgraph.get_tensor("quantized_input_float") - qout = subgraph.get_tensor("quantized_output") - - assert len(subgraph.operators) == 2 - assert len(subgraph.tensors) == 3 - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 1 - assert fin in subgraph.inputs and fin not in subgraph.outputs - assert qin not in (subgraph.inputs + subgraph.outputs) - assert qout in subgraph.outputs and qout not in subgraph.inputs - - -def test_run_simple(simple_model, trf_pass): - trf_pass.run(simple_model) - simple_model.sanity_check() - subgraph = simple_model.subgraphs[0] - - qin = subgraph.get_tensor("quantized_input") - fin = subgraph.get_tensor("quantized_input_float") - qout = subgraph.get_tensor("quantized_output") - - assert len(subgraph.operators) == 2 - assert len(subgraph.tensors) == 3 - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 1 - assert fin in subgraph.inputs and fin not in subgraph.outputs - assert qin not in (subgraph.inputs + subgraph.outputs) - assert qout in subgraph.outputs and qout not in subgraph.inputs - - -def test_run_dual_input(dual_input_model, trf_pass): - trf_pass.run(dual_input_model) - dual_input_model.sanity_check() - subgraph = dual_input_model.subgraphs[0] - - qin1 = subgraph.get_tensor("quantized_input_1") - qin2 = subgraph.get_tensor("quantized_input_2") - fin1 = subgraph.get_tensor("quantized_input_1_float") - fin2 = subgraph.get_tensor("quantized_input_2_float") - qout = subgraph.get_tensor("quantized_output") - - assert len(subgraph.operators) == 3 - assert len(subgraph.tensors) == 5 - assert len(subgraph.inputs) == 2 - assert len(subgraph.outputs) == 1 - assert qin1 not in (subgraph.inputs + subgraph.outputs) - assert qin2 not in (subgraph.inputs + subgraph.outputs) - assert fin1 in subgraph.inputs and fin1 not in subgraph.outputs - assert fin1 in subgraph.inputs and fin2 not in subgraph.outputs - assert qout in subgraph.outputs and qout not in subgraph.inputs - - -def test_non_match(trf_pass, non_matching_model): - for input_tensor in non_matching_model.subgraphs[0].inputs: - assert not trf_pass.match(input_tensor) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatOutputPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatOutputPass.py deleted file mode 100644 index a6f40b9b8..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_LegalizeFloatOutputPass.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_schema import ( - XCOREModel, - Subgraph, - TensorType, - OperatorCode, - BuiltinOpCodes, -) -from tflite2xcore.transformation_passes import LegalizeFloatOutputPass - -from .test_LegalizeFloatInputPass import simple_model, non_matching_model - - -@pytest.fixture() -def dual_output_model(): - subgraph = Subgraph(model=XCOREModel()) - - # TODO: add operator options to specify split axis and number - qin = subgraph.create_tensor( - "quantized_input", TensorType.INT8, [1, 5, 5, 4], isinput=True - ) - qout1 = subgraph.create_tensor( - "quantized_output_1", TensorType.INT8, [1, 5, 5, 2], isoutput=True - ) - qout2 = subgraph.create_tensor( - "quantized_output_2", TensorType.INT8, [1, 5, 5, 2], isoutput=True - ) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.SPLIT), inputs=[qin], outputs=[qout1, qout2] - ) - - return subgraph.model - - -@pytest.fixture() -def trf_pass(): - return LegalizeFloatOutputPass() - - -def test_match(simple_model, trf_pass): - assert trf_pass.match(simple_model.subgraphs[0].outputs[0]) - - -def test_mutate(simple_model, trf_pass): - subgraph = simple_model.subgraphs[0] - trf_pass.mutate(subgraph.outputs[0]) - subgraph.sanity_check() - - qin = subgraph.get_tensor("quantized_input") - qout = subgraph.get_tensor("quantized_output") - fout = subgraph.get_tensor("quantized_output_float") - - assert len(subgraph.operators) == 2 - assert len(subgraph.tensors) == 3 - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 1 - assert qin in subgraph.inputs and qin not in subgraph.outputs - assert qout not in (subgraph.inputs + subgraph.outputs) - assert fout in subgraph.outputs and fout not in subgraph.inputs - - -def test_run(simple_model, trf_pass): - trf_pass.run(simple_model) - simple_model.sanity_check() - subgraph = simple_model.subgraphs[0] - - qin = subgraph.get_tensor("quantized_input") - qout = subgraph.get_tensor("quantized_output") - fout = subgraph.get_tensor("quantized_output_float") - - assert len(subgraph.operators) == 2 - assert len(subgraph.tensors) == 3 - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 1 - assert qin in subgraph.inputs and qin not in subgraph.outputs - assert qout not in (subgraph.inputs + subgraph.outputs) - assert fout in subgraph.outputs and fout not in subgraph.inputs - - -def test_run_dual_output(dual_output_model, trf_pass): - trf_pass.run(dual_output_model) - dual_output_model.sanity_check() - subgraph = dual_output_model.subgraphs[0] - - qin = subgraph.get_tensor("quantized_input") - qout_1 = subgraph.get_tensor("quantized_output_1") - qout_2 = subgraph.get_tensor("quantized_output_2") - fout_1 = subgraph.get_tensor("quantized_output_1_float") - fout_2 = subgraph.get_tensor("quantized_output_2_float") - - assert len(subgraph.operators) == 3 - assert len(subgraph.tensors) == 5 - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 2 - assert qin in subgraph.inputs and qin not in subgraph.outputs - assert qout_1 not in (subgraph.inputs + subgraph.outputs) - assert qout_2 not in (subgraph.inputs + subgraph.outputs) - assert fout_1 in subgraph.outputs and fout_1 not in subgraph.inputs - assert fout_2 in subgraph.outputs and fout_2 not in subgraph.inputs - - -def test_non_match(trf_pass, non_matching_model): - for output_tensor in non_matching_model.subgraphs[0].outputs: - assert not trf_pass.match(output_tensor) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_RemoveRedundantInt8RequantizationPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_RemoveRedundantInt8RequantizationPass.py deleted file mode 100644 index 6dd039653..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_quantize_dequantize_passes/test_RemoveRedundantInt8RequantizationPass.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -import itertools -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import TensorType, BuiltinOpCodes -from tflite2xcore.transformation_passes import RemoveRedundantInt8RequantizationPass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_fc, - build_abs, - _glue_quantize, -) - -from ..test_fully_connected_passes.conftest import PARAMS -from .conftest import ( - NON_INT8_TEST_TYPES, - _make_name_type_pairs, - _test_non_matching_params, - test_matching_params, - test_non_matching_tensors, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -_NON_MATCHING_TENSORS = list( - itertools.chain( - _make_name_type_pairs("output_quantized", NON_INT8_TEST_TYPES), - _make_name_type_pairs("output", NON_INT8_TEST_TYPES), - ) -) - -PARAMS["extended"].update({"non_matching_tensors": _NON_MATCHING_TENSORS}) - -PARAMS["default"].update({"non_matching_tensors": _NON_MATCHING_TENSORS[::2]}) - -PARAMS["smoke"].update({"non_matching_tensors": _NON_MATCHING_TENSORS[::4]}) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> RemoveRedundantInt8RequantizationPass: - return RemoveRedundantInt8RequantizationPass() - - -@pytest.fixture() -def model(input_shape: Tuple[int, int, int], outputs: int) -> XCOREModel: - model = build_fc(input_shape=input_shape, outputs=outputs) - _glue_quantize(model.subgraphs[0].operators[0]) - return model - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate( - model: XCOREModel, trf_pass: RemoveRedundantInt8RequantizationPass -) -> None: - subgraph = model.subgraphs[0] - qin = subgraph.get_tensor("input") - qout = subgraph.get_tensor("output_quantized") - - trf_pass.mutate(subgraph.operators[1]) - subgraph.sanity_check() - - assert len(subgraph.operators) == 1 - assert subgraph.operators[0].operator_code.code is BuiltinOpCodes.FULLY_CONNECTED - assert len(subgraph.tensors) == 3 + 1 - assert qin in subgraph.inputs - assert qin not in subgraph.outputs - assert qout in subgraph.outputs - assert qout not in subgraph.inputs - - -def test_non_matching_consumers( - trf_pass: RemoveRedundantInt8RequantizationPass, model: XCOREModel -) -> None: - _glue_quantize(model.subgraphs[0].operators[0]) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_op( - trf_pass: RemoveRedundantInt8RequantizationPass, input_shape: Tuple[int, int, int] -) -> None: - model = build_abs(input_shape=input_shape, tensor_type=TensorType.INT8) - _glue_quantize(model.subgraphs[0].operators[0]) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/conftest.py deleted file mode 100644 index e377bccc9..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/conftest.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from ..conftest import PARAMS, test_matching_params, _test_non_matching_params diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/test_LegalizeOperatorOutputTensorNamePass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/test_LegalizeOperatorOutputTensorNamePass.py deleted file mode 100644 index 48009243f..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_renaming_passes/test_LegalizeOperatorOutputTensorNamePass.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.xcore_schema import TensorType -from tflite2xcore.transformation_passes import LegalizeOperatorOutputTensorNamePass - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_relu, - build_consecutive_pads, - build_split, -) - -from .conftest import ( - PARAMS, - test_matching_params as _test_matching_params, - _test_non_matching_params, -) - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["default"].update({"num_splits": [2, 4]}) - -PARAMS["smoke"].update({"num_splits": [2]}) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(): - return LegalizeOperatorOutputTensorNamePass() - - -@pytest.fixture() -def model_simple(input_shape): - return build_relu(input_shape=input_shape, tensor_type=TensorType.INT8) - - -@pytest.fixture() -def model_multi_op(input_shape): - paddings = [[0] * 2] * 4 - return build_consecutive_pads( - input_shape=[1, *input_shape], paddings_1=paddings, paddings_2=paddings - ) - - -@pytest.fixture() -def model_multi_out(input_shape, num_splits): - return build_split( - input_shape=input_shape, - num_splits=num_splits, - tensor_type=TensorType.INT8, - axis=2, - ) - - -@pytest.fixture() -def model_multi_out_partial(input_shape, num_splits): - model = build_split( - input_shape=input_shape, - num_splits=num_splits, - tensor_type=TensorType.INT8, - axis=2, - ) - op = model.subgraphs[0].operators[0] - for j, tensor in enumerate(op.outputs[1:]): - tensor.name = f"{op.name}/output_{j+1}" - return model - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_matching_simple(trf_pass, model_simple): - _test_matching_params(trf_pass, model_simple) - - -def test_matching_multi_op(trf_pass, model_multi_op): - operators = model_multi_op.subgraphs[0].operators - assert trf_pass.match(operators[0]) - assert trf_pass.match(operators[1]) - - -def test_matching_multi_out(trf_pass, model_multi_out): - _test_matching_params(trf_pass, model_multi_out) - - -def test_matching_multi_out_partial(trf_pass, model_multi_out_partial): - _test_matching_params(trf_pass, model_multi_out_partial) - - -def test_non_matching_simple(trf_pass, model_simple): - subgraph = model_simple.subgraphs[0] - op = subgraph.operators[0] - t_out = op.outputs[0] - t_out.name = f"{op.name}/output" - _test_non_matching_params(trf_pass, model_simple) - - -def test_non_matching_multi_op(trf_pass, model_multi_op): - subgraph = model_multi_op.subgraphs[0] - for op in subgraph.operators: - assert len(op.outputs) == 1 - t_out = op.outputs[0] - t_out.name = f"{op.name}/output" - - for j, op in enumerate(subgraph.operators): - assert not trf_pass.match(op), f"op {j} should not be matched" - - -def test_non_matching_multi_out(trf_pass, model_multi_out): - subgraph = model_multi_out.subgraphs[0] - op = subgraph.operators[0] - for j, tensor in enumerate(op.outputs): - tensor.name = f"{op.name}/output_{j}" - - _test_non_matching_params(trf_pass, model_multi_out) - - -def test_mutate_simple(trf_pass, model_simple): - # run mutating pass - trf_pass.run(model_simple) - model_simple.sanity_check() - - op = model_simple.subgraphs[0].operators[-1] - assert op.outputs[0].name == f"{op.name}/output" - - -def test_mutate_multi_op(trf_pass, model_multi_op): - # run mutating pass - trf_pass.run(model_multi_op) - model_multi_op.sanity_check() - - for j, op in enumerate(model_multi_op.subgraphs[0].operators): - expected_name = f"{op.name}/output" - name = op.outputs[0].name - assert ( - name == expected_name - ), f"op {j} name: expected '{expected_name}', found '{name}'" - - -def test_mutate_multi_out(trf_pass, model_multi_out): - # run mutating pass - trf_pass.run(model_multi_out) - model_multi_out.sanity_check() - - op = model_multi_out.subgraphs[0].operators[-1] - for j, tensor in enumerate(op.outputs): - expected_name = f"{op.name}/output_{j}" - name = tensor.name - assert ( - name == expected_name - ), f"tensor {j} name: expected '{expected_name}', found '{name}'" - - -def test_mutate_multi_out_partial(trf_pass, model_multi_out_partial): - # run mutating pass - trf_pass.run(model_multi_out_partial) - model_multi_out_partial.sanity_check() - - op = model_multi_out_partial.subgraphs[0].operators[-1] - for j, tensor in enumerate(op.outputs): - expected_name = f"{op.name}/output_{j}" - name = tensor.name - assert ( - name == expected_name - ), f"tensor {j} name: expected '{expected_name}', found '{name}'" - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/conftest.py deleted file mode 100644 index 9d10ecea3..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/conftest.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from copy import deepcopy -from itertools import product, chain -from typing import Tuple, NamedTuple, Callable - -from ..conftest import ( - ParamsType, - PARAMS, - _test_non_matching_params, - test_matching_params, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -PARAMS["extended"].update( - { - "input_batch": [1, 2], - "input_channels": [4, 8, 16], - "outputs": [2, 10], - } -) - -PARAMS["default"].update( - { - "input_batch": [1, 2], - "input_channels": [4, 32], - "outputs": [2, 10], - } -) - -PARAMS["smoke"].update( - { - "input_batch": [1], - "input_channels": [4, 32], - "outputs": [10], - } -) - - -# ---------------------------------------------------------------------------- -# HELPERS -# ---------------------------------------------------------------------------- - - -class ReshapeTuple(NamedTuple): - input: Tuple[int, ...] - output: Tuple[int, ...] - - -def update_params_with_reshape( - PARAMS: ParamsType, *, is_matching: Callable[[ReshapeTuple], bool] -) -> ParamsType: - for params in PARAMS.values(): - - def get_product_shape(*, dim=4, order="NHWC"): - if dim == 4: - if order == "NHWC": - return product( - params["input_batch"], - params["input_height"], - params["input_width"], - params["input_channels"], - ) - else: - return product( - params["input_batch"], - params["input_channels"], - params["input_height"], - params["input_width"], - ) - else: - return ( - (*p[: dim - 1], np.prod(p[dim - 1 :])) - for p in get_product_shape(dim=dim + 1, order=order) - ) - - all_reshapes = ( - ReshapeTuple(*p) - for p in chain( - product( - get_product_shape(dim=4), get_product_shape(dim=4, order="NCHW") - ), - product(get_product_shape(dim=4), get_product_shape(dim=3)), - product(get_product_shape(dim=3), get_product_shape(dim=2)), - ) - ) - - matching_reshape = params["reshape"] = [] - non_matching_reshape = params["non_matching_reshape"] = [] - for reshape in all_reshapes: - # this is a bit wasteful - if np.prod(reshape.input) == np.prod(reshape.output): - if is_matching(reshape): - matching_reshape.append(reshape) - else: - non_matching_reshape.append(reshape) - - return PARAMS diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_CanonializeReshapePass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_CanonializeReshapePass.py deleted file mode 100644 index 49beb7555..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_CanonializeReshapePass.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes.reshape_passes import CanonicalizeReshapePass - -from ..model_builders import build_reshape -from .conftest import ( - ReshapeTuple, - PARAMS, - _test_non_matching_params, - test_matching_params, - update_params_with_reshape, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = update_params_with_reshape(deepcopy(PARAMS), is_matching=lambda _: True) - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> CanonicalizeReshapePass: - return CanonicalizeReshapePass() - - -@pytest.fixture() -def model(reshape: ReshapeTuple) -> XCOREModel: - return build_reshape(input_shape=reshape.input, output_shape=reshape.output) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass: CanonicalizeReshapePass, model: XCOREModel) -> None: - - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 1 - - in_ori, out_ori = subgraph.inputs[0], subgraph.outputs[0] - - assert (len(subgraph.operators[0].inputs)) == 2 - - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - # need to clean up dangling ops/tensors - CleanupManager(model).run_passes() - model.sanity_check() - - assert len(subgraph.operators) == 1 - op = subgraph.operators[0] - assert len(op.inputs) == 1 - assert len(op.outputs) == 1 - - # check input/output tensors - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 1 - - assert in_ori is op.inputs[0] - assert in_ori in subgraph.inputs - - assert out_ori is op.outputs[0] - assert out_ori in subgraph.outputs - - -def test_non_matching_no_shape_tensor( - trf_pass: CanonicalizeReshapePass, reshape: ReshapeTuple -) -> None: - model = build_reshape( - input_shape=reshape.input, - output_shape=reshape.output, - input_shape_tensor=False, - ) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemovePrecedingReshapePass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemovePrecedingReshapePass.py deleted file mode 100644 index 782c8206f..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemovePrecedingReshapePass.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest -from copy import deepcopy - -from tflite2xcore.transformation_passes import ModelTransformationPass -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.converter import CleanupManager -from tflite2xcore.transformation_passes.reshape_passes import RemovePrecedingReshapePass -from tflite2xcore.xcore_schema import BuiltinOpCodes - -from ..model_builders import build_fc_with_preceding_reshape, build_reshape -from .conftest import ( - PARAMS, - ReshapeTuple, - _test_non_matching_params, - test_matching_params, - update_params_with_reshape, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - - -def is_matching_reshape(reshape: ReshapeTuple) -> bool: - # Check batch dim is unchanged - return reshape.input[0] == reshape.output[0] - - -PARAMS = update_params_with_reshape(deepcopy(PARAMS), is_matching=is_matching_reshape) - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> RemovePrecedingReshapePass: - return RemovePrecedingReshapePass() - - -@pytest.fixture() -def model(outputs: int, reshape: ReshapeTuple) -> XCOREModel: - return build_fc_with_preceding_reshape( - input_shape=reshape.input, - fc_outputs=outputs, - reshaped_input_shape=reshape.output, - ) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass: ModelTransformationPass, model: XCOREModel) -> None: - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 2 - - in_ori, out_ori = subgraph.inputs[0], subgraph.outputs[0] - - # run mutating pass - trf_pass.run(model) - model.sanity_check() - - # need to clean up dangling ops/tensors - CleanupManager(model).run_passes() - model.sanity_check() - - # Check FC operator and that RESHAPE has been removed - assert len(subgraph.operators) == 1 - op = subgraph.operators[0] - assert len(op.inputs) == 3 - assert len(op.outputs) == 1 - assert op.operator_code.code is BuiltinOpCodes.FULLY_CONNECTED - - # check input/output tensors - assert len(subgraph.inputs) == 1 - assert len(subgraph.outputs) == 1 - - assert in_ori is op.inputs[0] - assert in_ori in subgraph.inputs - assert out_ori is op.outputs[0] - assert out_ori in subgraph.outputs - - -def test_non_matching_reshape_only( - trf_pass: ModelTransformationPass, reshape: ReshapeTuple -) -> None: - model = build_reshape(input_shape=reshape.input, output_shape=reshape.output) - _test_non_matching_params(trf_pass, model) - - -def test_non_matching_simple( - trf_pass: ModelTransformationPass, outputs: int, non_matching_reshape: ReshapeTuple -) -> None: - model = build_fc_with_preceding_reshape( - input_shape=non_matching_reshape.input, - fc_outputs=outputs, - reshaped_input_shape=non_matching_reshape.output, - ) - _test_non_matching_params(trf_pass, model) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemoveSubsequentReshapePass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemoveSubsequentReshapePass.py deleted file mode 100644 index bb8a453a4..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_reshape_passes/test_RemoveSubsequentReshapePass.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes.reshape_passes import ( - RemoveSubsequentReshapePass, -) - -from ..model_builders import build_fc_with_subsequent_reshape -from .conftest import test_matching_params as _test_matching_params -from .test_RemovePrecedingReshapePass import ( - PARAMS, - ReshapeTuple, - test_non_matching_reshape_only, - # test_non_matching_simple, # TODO: fix this - test_mutate, -) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> RemoveSubsequentReshapePass: - return RemoveSubsequentReshapePass() - - -@pytest.fixture() -def model(outputs: int, reshape: ReshapeTuple) -> XCOREModel: - return build_fc_with_subsequent_reshape( - fc_output_shape=reshape.input, reshaped_output_shape=reshape.output - ) - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_matching_params( - trf_pass: RemoveSubsequentReshapePass, model: XCOREModel -) -> None: - _test_matching_params(trf_pass, model, op_idx=0) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/conftest.py deleted file mode 100644 index ae52377f9..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/conftest.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from tflite2xcore.transformation_passes import ModelTransformationPass -from tflite2xcore.xcore_model import XCOREModel - -from ..conftest import PARAMS, test_matching_params, _test_non_matching_params - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass: ModelTransformationPass, model: XCOREModel) -> None: - op = model.subgraphs[0].operators[0] - assert "mem" not in op.custom_options - - trf_pass.run(model) - model.sanity_check() - - _test_non_matching_params(trf_pass, model) - assert "mem" in op.custom_options diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2d1x1Pass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2d1x1Pass.py deleted file mode 100644 index 673955fb5..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2d1x1Pass.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ScratchMemoryConv2d1x1Pass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_conv2d_1x1, -) - -from ..test_conv2d_passes.test_Replace1x1Conv2dPass import PARAMS -from ..test_conv2d_passes.conftest import weight_shape -from .conftest import test_matching_params, test_mutate - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ModelTransformationPass: - return ScratchMemoryConv2d1x1Pass() - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - strides: Tuple[int, int], -) -> XCOREModel: - return build_XC_conv2d_1x1( - weight_shape=weight_shape, input_size=input_size, strides=strides - ) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2dPass.py deleted file mode 100644 index 9cf4023e8..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryConv2dPass.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from copy import deepcopy -from typing import Tuple, Callable - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ScratchMemoryConv2dPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_conv2d_deep, - build_XC_conv2d_shallowin, -) - -from ..test_conv2d_passes.conftest import PARAMS, weight_shape -from .conftest import test_matching_params, test_mutate - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(PARAMS) - -for k in PARAMS: - PARAMS[k].update( - {"model_builder": [build_XC_conv2d_deep, build_XC_conv2d_shallowin]} - ) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ModelTransformationPass: - return ScratchMemoryConv2dPass() - - -@pytest.fixture() -def model( - model_builder: Callable[..., XCOREModel], - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - strides: Tuple[int, int], -) -> XCOREModel: - return model_builder( - weight_shape=weight_shape, input_size=input_size, strides=strides - ) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryDepthwiseConv2dPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryDepthwiseConv2dPass.py deleted file mode 100644 index a95055fcd..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryDepthwiseConv2dPass.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from typing import Tuple - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ScratchMemoryDepthwiseConv2dPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import ( - build_XC_conv2d_depthwise, -) - -from ..test_conv2d_passes.test_ReplaceDepthwiseConv2dPass import PARAMS, weight_shape -from .conftest import test_matching_params, test_mutate - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ModelTransformationPass: - return ScratchMemoryDepthwiseConv2dPass() - - -@pytest.fixture() -def model( - weight_shape: Tuple[int, int, int, int], - input_size: Tuple[int, int], - strides: Tuple[int, int], -) -> XCOREModel: - return build_XC_conv2d_depthwise( - weight_shape=weight_shape, input_size=input_size, strides=strides - ) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryFullyConnectedPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryFullyConnectedPass.py deleted file mode 100644 index e0c3a76d3..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_scratch_memory_passes/test_ScratchMemoryFullyConnectedPass.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.transformation_passes import ( - ModelTransformationPass, - ScratchMemoryFullyConnectedPass, -) - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_XC_fc - -from ..test_fully_connected_passes.conftest import PARAMS -from .conftest import test_matching_params, test_mutate - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> ModelTransformationPass: - return ScratchMemoryFullyConnectedPass() - - -@pytest.fixture() -def model(outputs: int, input_channels: int) -> XCOREModel: - return build_XC_fc(outputs=outputs, input_channels=input_channels) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_warning_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_warning_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_warning_passes/test_FloatingPointWarningPass.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_warning_passes/test_FloatingPointWarningPass.py deleted file mode 100644 index 03ff50264..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_warning_passes/test_FloatingPointWarningPass.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -from tflite2xcore.transformation_passes import FloatingPointWarningPass -from tflite2xcore.xcore_schema import XCOREModel, Subgraph, TensorType - -_TENSOR_SHAPE = (1, 1, 1, 1) - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = { - "default": { - "tensor_type": [TensorType.FLOAT32, TensorType.FLOAT16, TensorType.FLOAT64], - "non_matching_tensor_type": [ - TensorType.INT8, - TensorType.INT16, - TensorType.INT32, - TensorType.UINT8, - ], - } -} - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass() -> FloatingPointWarningPass: - return FloatingPointWarningPass() - - -@pytest.fixture() -def model(tensor_type: TensorType) -> XCOREModel: - subgraph = Subgraph(model=XCOREModel()) - subgraph.create_tensor("test_tensor", tensor_type, _TENSOR_SHAPE) - return subgraph.model - - -# ---------------------------------------------------------------------------- -# TESTS -# ---------------------------------------------------------------------------- - - -def test_matching_params(trf_pass: FloatingPointWarningPass, model: XCOREModel) -> None: - assert trf_pass.match(model.subgraphs[0].tensors[0]) - - -def test_non_matching_tensor_type( - trf_pass: FloatingPointWarningPass, non_matching_tensor_type: TensorType -) -> None: - subgraph = Subgraph(model=XCOREModel()) - test_tensor = subgraph.create_tensor( - "test_tensor", non_matching_tensor_type, _TENSOR_SHAPE - ) - assert not trf_pass.match(test_tensor) - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/conftest.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/conftest.py deleted file mode 100644 index 89d151a4f..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/conftest.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from ..conftest import ( - PARAMS, - _test_non_matching_params, - test_matching_params, -) diff --git a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/test_CanonicalizeConv2DInputChannels.py b/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/test_CanonicalizeConv2DInputChannels.py deleted file mode 100644 index f1c2a2b70..000000000 --- a/archived/tflite2xcore/tflite2xcore/tests/test_transformation_passes/test_word_alignment_passes/test_CanonicalizeConv2DInputChannels.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pytest - -import numpy as np - -from copy import deepcopy - -from tflite2xcore.transformation_passes import CanonicalizeConv2DInputChannels -from tflite2xcore.xcore_schema import BuiltinOpCodes - -from tflite2xcore.tests.test_transformation_passes.model_builders import build_conv2d -from tflite2xcore.tests.test_transformation_passes.test_conv2d_passes.conftest import ( - PARAMS as CONV_PARAMS, - test_non_matching_input_channels, -) -from .conftest import ( - _test_non_matching_params, - test_matching_params, -) - - -# ---------------------------------------------------------------------------- -# PARAMETER VALUES -# ---------------------------------------------------------------------------- - -PARAMS = deepcopy(CONV_PARAMS) - -for k in PARAMS: - PARAMS[k]["input_channels"] = deepcopy( - CONV_PARAMS[k]["non_matching_input_channels"] - ) - PARAMS[k]["non_matching_input_channels"] = deepcopy( - CONV_PARAMS[k]["input_channels"] - ) - PARAMS[k]["output_channels"] = deepcopy( - CONV_PARAMS["smoke"]["output_channels"] - + CONV_PARAMS["smoke"]["non_matching_output_channels"] - ) - - -# ---------------------------------------------------------------------------- -# FIXTURES -# ---------------------------------------------------------------------------- - - -@pytest.fixture() -def trf_pass(): - return CanonicalizeConv2DInputChannels() - - -@pytest.fixture() -def build_model(): - return build_conv2d - - -@pytest.fixture() -def weight_shape(output_channels, kernel_height, kernel_width, input_channels): - return [output_channels, kernel_height, kernel_width, input_channels] - - -@pytest.fixture() -def model(weight_shape, input_size, padding, strides): - model = build_conv2d( - weight_shape=weight_shape, - input_size=input_size, - padding=padding, - strides=strides, - ) - return model - - -# ---------------------------------------------------------------------------- -# TEST FUNCTIONS -# ---------------------------------------------------------------------------- - - -def test_mutate(trf_pass, model): - subgraph = model.subgraphs[0] - assert len(subgraph.operators) == 1 - old_conv_op = subgraph.operators[0] - old_weight_shape = old_conv_op.inputs[1].shape - old_weights = old_conv_op.inputs[1].as_array() - assert old_weights.dtype is np.dtype(np.int8) - - # run padding pass - trf_pass.run(model) - model.sanity_check() - assert len(subgraph.operators) == 2 - - # test pad operator - pad_op = subgraph.operators[0] - assert pad_op.operator_code.code is BuiltinOpCodes.PAD - assert len(pad_op.inputs) == 2 - assert len(pad_op.outputs) == 1 - assert pad_op.inputs[0] in subgraph.inputs - - # test conv operator - conv_op = subgraph.operators[1] - assert conv_op.operator_code.code is BuiltinOpCodes.CONV_2D - assert len(conv_op.inputs) == 3 - assert len(conv_op.outputs) == 1 - assert conv_op.outputs[0] in subgraph.outputs - assert conv_op.inputs[0] is pad_op.outputs[0] - - # get channel counts - input_channels = pad_op.inputs[0].shape[3] - padded_channels = conv_op.inputs[0].shape[3] - pad_size = padded_channels - input_channels - - # test weight tensor shape - new_weight_shape = conv_op.inputs[1].shape - assert old_weight_shape[:3] == new_weight_shape[:3] - assert new_weight_shape[3] == padded_channels - new_weights = conv_op.inputs[1].as_array() - assert old_weights.dtype is np.dtype(np.int8) - assert np.all(new_weights[..., :input_channels] == old_weights) - assert np.all( - new_weights[..., input_channels:] - == np.zeros([*old_weight_shape[:3], pad_size], dtype=np.int8) - ) - - # test paddings tensor - paddings = pad_op.inputs[1] - pads_arr = paddings.as_array() - assert pads_arr.shape == paddings.shape == (4, 2) - assert pads_arr[0][0] == [0] - assert pads_arr[0][1] == [0] - assert pads_arr[1][0] == [0] - assert pads_arr[1][1] == [0] - assert pads_arr[2][0] == [0] - assert pads_arr[2][1] == [0] - assert pads_arr[3][0] == [0] - assert pads_arr[3][1] == [pad_size] - - -if __name__ == "__main__": - pytest.main() diff --git a/archived/tflite2xcore/tflite2xcore/tflite_visualize.py b/archived/tflite2xcore/tflite2xcore/tflite_visualize.py deleted file mode 100755 index 53dff4414..000000000 --- a/archived/tflite2xcore/tflite2xcore/tflite_visualize.py +++ /dev/null @@ -1,824 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import json -import os -import enum -import webbrowser -import tempfile -from pathlib import Path -from collections import Counter -from typing import Optional, Union - -from tflite2xcore.xcore_schema.flexbuffers import FlexbufferParser -from tflite2xcore.xcore_model import XCOREModel -from tflite2xcore.xcore_schema import XCOREOpCodes -from tflite2xcore.utils import VerbosityParser - -# A CSS description for making the visualizer -_CSS = """ - - - -""" -# TODO: reference d3.js script locally - -_D3_HTML_TEMPLATE = """ -""" - - -class OpCodeMapper: - """Maps an opcode index to a text representation.""" - - def __init__(self, data): - self.opcode_idx_to_name = [ - d["custom_code"] if d["builtin_code"] == "CUSTOM" else d["builtin_code"] - for d in data["operator_codes"] - ] - - self.color = [] - for d in data["operator_codes"]: - if d["builtin_code"] == "CUSTOM": - try: - XCOREOpCodes(d["custom_code"]) - color = "#00a000" # xcore optimized custom opcode - except ValueError: - color = "#a00000" # unknown custom opcode - else: - color = "#0000a0" - self.color.append(color) - - def __call__(self, opcode_idx, op_idx=None): - s = ( - self.opcode_idx_to_name[opcode_idx] - if opcode_idx < len(self.opcode_idx_to_name) - else "UNKNOWN" - ) - return f"{s} [{opcode_idx}]" if op_idx is None else f"({op_idx}) {s}" - - -class OpCodeTooltipMapper: - """Maps a list of opcode indices to a tooltip hoverable indicator of more.""" - - def __init__(self, model_dict, subgraph): - self.operators = subgraph["operators"] - self.opcode_mapper = OpCodeMapper(model_dict) - - def __call__(self, idx_list): - html = "" - for idx in idx_list: - html += self.opcode_mapper(self.operators[idx]["opcode_index"], idx) - html += "
" - html += f"
{idx_list}
" - return html - - -class DataSizeMapper: - """For buffers, report the number of bytes.""" - - @classmethod - def _format_bytes(cls, n): - return f"{n:,d} bytes" - - def __call__(self, x): - return "--" if x is None else self._format_bytes(len(x)) - - -class BufferOwnerMapper: - """For buffers, report the owners with tooltips.""" - - def __init__(self, model_dict): - self.subgraphs = model_dict["subgraphs"] - - def __call__(self, d): - if not isinstance(d, dict): - return "N/A" - - html_list = [] - for k, owners in d.items(): - subgraph = self.subgraphs[k] - tensor_mapper = TensorTooltipMapper(subgraph) - html_list.append(f"{k}: {tensor_mapper(owners)}") - - return ", ".join(html_list) if html_list else "--" - - -class TensorMapper: - """Maps a tensor index to a text representation.""" - - def __init__(self, subgraph, node_text=False): - self.tensors = subgraph["tensors"] - self.node_text = node_text - - def __call__(self, idx): - tensor = self.tensors[idx] - if self.node_text: - return ( - f"{tensor['name']}" - + "\n" - + f"({idx:d}) <{tensor['type']}> {tensor['shape']}" - ) - else: - return f"({idx:d}) {tensor['name']} <{tensor['type']}> {tensor['shape']}
" - - -class TensorTooltipMapper: - """Maps a list of tensor indices to a tooltip hoverable indicator of more.""" - - def __init__(self, subgraph): - self.tensor_mapper = TensorMapper(subgraph) - - def __call__(self, idx_list): - html = "" - for idx in idx_list: - html += self.tensor_mapper(idx) - html += f"{idx_list}" - return html - - -class DictMapper: - def __call__(self, d): - if d: - return { - k: (v.name if isinstance(v, enum.Enum) else v) for k, v in d.items() - } - else: - return d - - -class CustomOptionsMapper: - """Maps a list of bytes representing a flexbuffer to a dictionary.""" - - def __call__(self, custom_options): - return ( - FlexbufferParser().parse(bytes(custom_options)) if custom_options else None - ) - - -def GenerateGraph(subgraph_idx, g, opcode_mapper): - """Produces the HTML required to have a d3 visualization of the dag.""" - - def TensorID(idx): - return f"t{idx:d}" - - def OperatorID(idx): - return f"o{idx:d}" - - def NodeWidth(node_text): - return int(max(len(line) * 12 / 16 * 10 + 5 for line in node_text.split("\n"))) - - def NodeHeight(node_text): - return node_text.count("\n") * 15 + 25 - - edges, nodes = [], [] - tensor_nodes_info, op_nodes_info = [], [] - first, second = {}, {} - pixel_mult = 200 # TODO(aselle): multiplier for initial placement - width_mult = 170 # TODO(aselle): multiplier for initial placement - - tensor_mapper = TensorMapper(g, node_text=True) - - for tensor_index, tensor in enumerate(g["tensors"]): - t_node_text = f"({tensor_index:d}) {tensor['name']} {tensor['shape']}" - t_node_text = tensor_mapper(tensor_index) - tensor_nodes_info.append( - { - "text": t_node_text, - "width": NodeWidth(t_node_text), - "height": NodeHeight(t_node_text), - "color": "#fffacd", # default tensor color, should be only for parameters - } - ) - - for op_idx, op in enumerate(g["operators"]): - o_node_text = opcode_mapper(op["opcode_index"], op_idx) - op_nodes_info.append( - { - "text": o_node_text, - "width": NodeWidth(o_node_text), - "height": NodeHeight(o_node_text), - "color": opcode_mapper.color[op["opcode_index"]], - } - ) - - # coloring intermediate tensors - for tensor_index in op["outputs"]: - tensor_nodes_info[tensor_index]["color"] = "#dddddd" - - # coloring input/output tensors - for tensor_index in range(len(g["tensors"])): - if tensor_index in g["inputs"]: - tensor_nodes_info[tensor_index]["color"] = "#ccccff" - elif tensor_index in g["outputs"]: - tensor_nodes_info[tensor_index]["color"] = "#ffcccc" - - for op_index, op in enumerate(g["operators"]): - x = width_mult - for tensor_index in op["inputs"]: - if tensor_index not in first: - first[tensor_index] = ((op_index - 0.5 + 1) * pixel_mult, x) - x += tensor_nodes_info[tensor_index]["width"] + 10 # offset - edges.append( - {"source": TensorID(tensor_index), "target": OperatorID(op_index)} - ) - - x = width_mult - for tensor_index in op["outputs"]: - if tensor_index not in second: - second[tensor_index] = ((op_index + 0.5 + 1) * pixel_mult, x) - x += tensor_nodes_info[tensor_index]["width"] + 10 # offset - edges.append( - {"target": TensorID(tensor_index), "source": OperatorID(op_index)} - ) - - nodes.append( - { - "id": OperatorID(op_index), - "name": op_nodes_info[op_index]["text"], - "text_color": "#eeeeee", - "fill_color": op_nodes_info[op_index]["color"], - "edge_radius": 10, - "x": pixel_mult, - "y": (op_index + 1) * pixel_mult, - "node_width": op_nodes_info[op_index]["width"], - "node_height": op_nodes_info[op_index]["height"], - } - ) - - for tensor_index, tensor in enumerate(g["tensors"]): - initial_y = ( - first[tensor_index] - if tensor_index in first - else second[tensor_index] - if tensor_index in second - else (0, 0) - ) - - nodes.append( - { - "id": TensorID(tensor_index), - "name": tensor_nodes_info[tensor_index]["text"], - "text_color": "#000000", - "fill_color": tensor_nodes_info[tensor_index]["color"], - "edge_radius": 1, - "x": initial_y[1], - "y": initial_y[0], - "node_width": tensor_nodes_info[tensor_index]["width"], - "node_height": tensor_nodes_info[tensor_index]["height"], - } - ) - - graph_str = json.dumps({"nodes": nodes, "edges": edges}, indent=2) - html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx) - return html - - -def GenerateTableHtml(items, keys_to_print, display_index=True): - """Given a list of object values and keys to print, make an HTML table. - - Args: - items: Items to print an array of dicts. - keys_to_print: (key, display_fn). `key` is a key in the object. i.e. - items[0][key] should exist. display_fn is the mapping function on display. - i.e. the displayed html cell will have the string returned by - `mapping_fn(items[0][key])`. - display_index: add a column which is the index of each row in `items`. - Returns: - An html table. - """ - indent = " " * 2 - - # Print the list of items - html = "" - html += "\n" - if display_index: - html += f"{indent}\n" - for h, mapper in keys_to_print: - html += f"{indent}\n" - html += "" - - # print rows - for idx, tensor in enumerate(items): - html += "\n" - if display_index: - html += f"{indent}\n" - # print tensor.keys() - for h, mapper in keys_to_print: - val = tensor[h] if h in tensor else None - val = val if mapper is None else mapper(val) - html += f"{indent}\n" - - html += "" - html += "
index{h}
{idx}{val}
\n\n" - return html - - -def dict_to_html(data): - """Given a tflite model as a dictionary, produce html description.""" - - indent = " " * 2 - - html = "\n" - html += _CSS - html += "\n\n" - html += "

TensorFlow Lite Model

\n" - - toplevel_stuff = [ - ("filename", None), - ("filesize", DataSizeMapper()._format_bytes), - ("version", None), - ("description", None), - ] - - html += "" - for key, mapping in toplevel_stuff: - html += "\n" - html += f"{indent}\n" - val = data.get(key) if mapping is None else mapping(data.get(key)) - html += f"{indent}\n" - html += "" - html += "
{key}{val}
\n" - - for subgraph_idx, g in enumerate(data["subgraphs"]): - # Subgraph local specs on what to display - html += "\n
" - tensor_mapper = TensorTooltipMapper(g) - opcode_mapper = OpCodeMapper(data) - opcode_tooltip_mapper = OpCodeTooltipMapper(data, g) - custom_options_mapper = CustomOptionsMapper() - op_keys_to_display = [ - ("inputs", tensor_mapper), - ("outputs", tensor_mapper), - ("opcode_index", opcode_mapper), - ("builtin_options", DictMapper()), - ("custom_options", custom_options_mapper), - ] - tensor_keys_to_display = [ - ("name", None), - ("consumers", opcode_tooltip_mapper), - ("producers", opcode_tooltip_mapper), - ("type", None), - ("shape", None), - ("buffer", None), - ("quantization", DictMapper()), - ] - - html += "

Subgraph %d

\n" % subgraph_idx - - # Inputs and outputs. - html += "

Inputs/Outputs

\n" - html += GenerateTableHtml( - [{"inputs": g["inputs"], "outputs": g["outputs"]}], - [("inputs", tensor_mapper), ("outputs", tensor_mapper)], - display_index=False, - ) - - # Print the tensors. - html += "

Tensors

\n" - html += GenerateTableHtml(g["tensors"], tensor_keys_to_display) - - # Print the ops. - html += "

Ops

\n" - html += GenerateTableHtml(g["operators"], op_keys_to_display) - - # Visual graph. - html += "\n" % ( - subgraph_idx, - ) - html += GenerateGraph(subgraph_idx, g, opcode_mapper) - html += "
\n\n" - - # Buffers - size_mapper = DataSizeMapper() - buffer_keys_to_display = [ - ("data", size_mapper), - ("owners", BufferOwnerMapper(data)), - ] - total_bytes = sum(len(d["data"]) for d in data["buffers"]) - html += ( - "

Buffers " - f"(total: {size_mapper._format_bytes(total_bytes)}, " - f"{total_bytes/data['filesize']:.2%} of filesize)" - "

\n" - ) - html += GenerateTableHtml(data["buffers"], buffer_keys_to_display) - - # Operator codes - operator_keys_to_display = [ - ("builtin_code", None), - ("custom_code", None), - ("version", None), - ("count", None), - ] - op_cnt = sorted( - Counter( - op["opcode_index"] - for subgraph in data["subgraphs"] - for op in subgraph["operators"] - ).items() - ) - for d, p in zip(data["operator_codes"], op_cnt): - d["count"] = p[1] - html += "

Operator Codes

\n" - html += GenerateTableHtml(data["operator_codes"], operator_keys_to_display) - - html += "\n\n" - - return html - - -def model_to_html(model, filename=None): - if isinstance(model, (bytes, bytearray)): - model = XCOREModel.deserialize(model) - elif not isinstance(model, XCOREModel): - raise TypeError("model musy be XCOREModel or serialized flatbuffer model") - - try: - data = model.to_dict(extended=True) - except AttributeError as e: - if e.args[0] == "'Buffer' object has no attribute 'owners'": - data = model.to_dict(extended=False) - else: - raise - - if filename: - data["filename"] = filename - data["filesize"] = os.stat(filename).st_size - else: - data["filename"] = data["filesize"] = "--" - - return dict_to_html(data) - - -def main( - tflite_input: Union[str, Path], - html_output: Optional[Union[str, Path]] = None, - *, - open_browser: bool = False, -) -> None: - if html_output: # TODO: do this with a context manager - html_path = html_output - else: - html_file = tempfile.NamedTemporaryFile(delete=False) - html_path = html_file.name - - if not os.path.exists(tflite_input): - raise RuntimeError(f"Invalid filename {tflite_input}") - - html = model_to_html(XCOREModel.read_flatbuffer(tflite_input), tflite_input) - with open(html_path, "w") as f: - f.write(html) - - if open_browser: - webbrowser.open_new_tab("file://" + os.path.realpath(html_path)) - - if not html_output: - html_file.close() - - -if __name__ == "__main__": - parser = VerbosityParser( - verbosity_config=dict(action="store_true", default=False, help="Verbose mode.") - ) - parser.add_argument("tflite_input", help="Input .tflite file.") - parser.add_argument( - "-o", - "--html_output", - required=False, - default=None, - help="Output .html file. If not specified, a temporary file is created.", - ) - parser.add_argument( - "-b", - "--browser", - action="store_true", - help="Open browser after the .html is created.", - ) - args = parser.parse_args() - tflite_input, html_output = args.tflite_input, args.html_output - - main(tflite_input, html_output, open_browser=args.browser) diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/__init__.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/__init__.py deleted file mode 100644 index 9f76df6f9..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/__init__.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from .transformation_passes import ModelTransformationPass, CanonicalizeEmptyBuffersPass - -from .lut_passes import ( - ReplaceTanhPass, - ReplaceLogisticPass, - ReplaceReLUPass, - ReplaceReLU6Pass, - LegalizeXCLookupTablePass, -) -from .conv2d_passes import ( - CanonicalizeSinglePixelConv2DPass, - CanonicalizeSingleinDepthwiseConv2DPass, - LegalizeSingleinConv2DPass, - Replace1x1Conv2dPass, - LegalizeXC1x1ConvPass, - ReplaceDepthwiseConv2dPass, - LegalizeXCDepthwiseConvPass, - ReplaceDeepConv2dPass, - LegalizeXCDeepConvPass, - ReplaceShallowinConv2dPass, - LegalizeXCShallowinConvPass, -) - -from .fully_connected_passes import ( - ReplaceFullyConnectedPass, - LegalizeXCFullyConnectedPass, -) - -from .pooling_passes import ( - ReplaceMaxPool2DPass, - ReplaceMaxPool2D2x2Pass, - ReplaceAveragePool2DPass, - ReplaceAveragePool2D2x2Pass, - ReplaceGlobalAveragePool2DPass, -) -from .padding_passes import ( - FuseConv2dPaddingPass, - SplitPaddingPass, - FuseConsecutivePadsPass, - RemovePaddingInputPass, - ReplacePadPass, -) - -from .quantize_dequantize_passes import ( - RemoveRedundantInt8RequantizationPass, - CanonicalizeQuantizedInputPass, - CanonicalizeQuantizedOutputPass, - CanonicalizeLceQuantizedOutputPass, - CanonicalizeLceQuantizedInputPass, - LegalizeFloatInputPass, - LegalizeFloatOutputPass, -) - -from .op_version_passes import LegalizeQuantizeVersionPass - -from .dce_passes import ( - EliminateDeadOperatorsPass, - EliminateDeadTensorsPass, - EliminateDeadBuffersPass, -) - -from .reshape_passes import ( - RemoveSubsequentReshapePass, - RemovePrecedingReshapePass, - CanonicalizeReshapePass, -) - -from .renaming_passes import LegalizeOperatorOutputTensorNamePass - -from .minification_passes import ( - MinifyQuantInfoPass, - MinifyTensorNamesPass, - UnifyEmptyBuffersPass, -) - -from .word_alignment_passes import CanonicalizeConv2DInputChannels - -from .parallelization_passes import ( - ParallelizeConv2dPass, - ParallelizeDepthwiseConv2dPass, - ParallelizeFullyConnectedPass, - ParallelizeRequant16To8Pass, - ParallelizePooling2DPass, - ParallelizeGlobalAveragePool2DPass, - ParallelizeBConv2dBinPass, - ParallelizeBConv2dInt8Pass, - ParallelizeLUTPass, - ParallelizeAddPass, -) - -from .scratch_memory_passes import ( - ScratchMemoryFullyConnectedPass, - ScratchMemoryConv2dPass, - ScratchMemoryConv2d1x1Pass, - ScratchMemoryDepthwiseConv2dPass, - InsertExternalMemoryFetchPass, -) - -from .constant_propagation_passes import ConstantPropagationPass - -from .lce_passes import ( - ReplaceBconv2DInt8Pass, - ReplaceBconv2DInt8DeepInDeepOutPass, - ReplaceBconv2DBitpackedPass, - ReplaceBconv2DBitpackedDeepInPass, - ReplaceLceQuantizePass, - LegalizeXCBconv2DPaddingPass, - LegalizeBconv2dInt8Pass, - LegalizeBconv2dInt8DeepInDeepOutPass, - LegalizeBconv2dBitpackedPass, - LegalizeBconv2dBitpackedDeepInPass, -) - -from .warning_passes import FloatingPointWarningPass - -from .add_passes import ReplaceAddPass diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/add_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/add_passes.py deleted file mode 100644 index efd6edf13..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/add_passes.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np - -from tflite2xcore.xcore_model import Operator -from tflite2xcore.xcore_schema import ( - BuiltinOpCodes, - OperatorCode, - XCOREOpCodes, - TensorType, -) - -from .transformation_passes import ReplaceQuantizedOperatorPass - - -class ReplaceAddPass(ReplaceQuantizedOperatorPass): - @property - def matching_opcode(self) -> BuiltinOpCodes: - return BuiltinOpCodes.ADD - - @property - def new_opcode(self) -> OperatorCode: - return OperatorCode(XCOREOpCodes.XC_add_8) - - def match(self, op: Operator) -> bool: - return ( - super().match(op) - and len(op.inputs) == 2 - and op.inputs[0].type is self.matching_input_type - and op.inputs[0].type is op.inputs[1].type is op.outputs[0].type - and op.inputs[0].shape == op.inputs[1].shape == op.outputs[0].shape - ) - - def mutate(self, op: Operator) -> Operator: - new_op = super().mutate(op) - - # constant picked so 8 bit number fits in 16 bits - s_0 = s_1 = -6 - - input_scales = ( - new_op.inputs[0].quantization["scale"][0], - new_op.inputs[1].quantization["scale"][0], - ) - - output_scale = new_op.outputs[0].quantization["scale"][0] - - scale_ratios = (input_scales[0] / output_scale, input_scales[1] / output_scale) - - max_ratio = max(scale_ratios) - - msb_max_ratio = int(np.floor(np.log2(max_ratio))) - - # constant picked for number fits in 16 bits - scale_mismatch = 14 - msb_max_ratio - - m_0 = np.round(scale_ratios[0] * 2 ** scale_mismatch) - m_1 = np.round(scale_ratios[1] * 2 ** scale_mismatch) - - s_out = max(0, scale_mismatch - s_0) - - output_zero_point = new_op.outputs[0].quantization["zero_point"][0] - - inputs_zero_points = ( - new_op.inputs[0].quantization["zero_point"][0], - new_op.inputs[1].quantization["zero_point"][0], - ) - - b = ( - (output_zero_point << s_out) - - m_0 * (inputs_zero_points[0] << -s_0) - - m_1 * (inputs_zero_points[1] << -s_1) - ) - - params = np.int32([s_0, m_0, s_1, m_1, b, s_out]) - - subgraph = new_op.subgraph - bias_scale_shift_tensor = subgraph.create_tensor( - f"{new_op.name}/bias_scale_shift", - TensorType.INT32, - consumers=[new_op], - shape=params.shape, - ) - new_op.inputs.append(bias_scale_shift_tensor) - - new_op.inputs[2].buffer.data = params - - return new_op diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/constant_propagation_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/constant_propagation_passes.py deleted file mode 100644 index 65dc05249..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/constant_propagation_passes.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -import tensorflow as tf -from copy import deepcopy -from typing import Iterable - -from tflite2xcore.xcore_schema import ( - BuiltinOpCodes, - XCOREModel, - Subgraph, - Operator, - Tensor, - Buffer, -) - -from .transformation_passes import OperatorMatchingPass - - -class ConstantPropagationPass(OperatorMatchingPass): - def match(self, op: Operator) -> bool: - if super().match(op): - for t in op.inputs: - if not t.is_constant: - return False - elif not t.buffer.data: - self.logger.warning("Found constant tensor with empty buffer") - - if op.operator_code.code in BuiltinOpCodes: - return True - else: - self.logger.warning( - f"Found unsupported operator {op.operator_code.code}" - ) - - return False - - def mutate(self, op: Operator) -> None: - # we first clone a single op model from the op - new_model = XCOREModel() - new_subgraph = Subgraph(model=new_model) - - def clone_tensors(old_tensors: Iterable[Tensor]) -> Iterable[Tensor]: - return (new_subgraph.clone_tensor(t) for t in old_tensors) - - new_op = new_subgraph.create_operator( - op.operator_code, - inputs=clone_tensors(op.inputs), - outputs=clone_tensors(op.outputs), - builtin_options=deepcopy(op.builtin_options), - ) - - # the new model will have no inputs (all op inputs are constant) - # all op outputs will be subgraph outputs as well - for tensor in new_op.outputs: - new_subgraph.outputs.append(tensor) - # rearranging buffers to satisfy the builtin interpreter - new_model.buffers.remove(tensor.buffer) - new_model.buffers = [ - tensor.buffer, - *new_model.buffers, - ] - - # run the single op model thourhg the builtin interpreter - # to get the propagated values - self.logger.debug("Propagating constant using tf.lite.Interpreter...") - interp = tf.lite.Interpreter(model_content=new_model.serialize()) - interp.allocate_tensors() - interp.invoke() - output_values = [ - interp.get_tensor(det["index"]) for det in interp.get_output_details() - ] - - # finally, mutate the original graph - assert len(op.outputs) == len(output_values) # sanity check - for tensor, data in zip(op.outputs, output_values): - tensor.buffer.owners.remove(tensor) - tensor.buffer = Buffer(op.model, np.array(data), owners=[tensor]) - op.subgraph.remove_operator(op) diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/conv2d_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/conv2d_passes.py deleted file mode 100644 index b47b637e5..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/conv2d_passes.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from copy import deepcopy -from typing import Tuple - -from tflite2xcore.xcore_schema import ( - BuiltinOpCodes, - OperatorCode, - XCOREOpCodes, - FullyConnectedOptionsWeightsFormat, -) -from tflite2xcore.utils import WORD_SIZE_BYTES - -from .transformation_passes import ( - ReplaceQuantizedWeightBiasOperatorPass, - ReplaceXCWeightBiasOperatorPass, - LegalizeWeightBiasPass, - LegalizeXCWeightBiasPass, -) - - -class CanonicalizeSinglePixelConv2DPass(ReplaceQuantizedWeightBiasOperatorPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.CONV_2D - - @property - def new_opcode(self): - return OperatorCode(BuiltinOpCodes.FULLY_CONNECTED, version=7) - - def match(self, op): - with self.using(op): - return ( - super().match(op) - and self._weights.shape[1:3] == self._input.shape[1:3] - and self._output.shape[1] == self._output.shape[2] == 1 - ) - - def mutate(self, op): - builtin_options = { - "fused_activation_function": op.builtin_options[ - "fused_activation_function" - ], - "weights_format": FullyConnectedOptionsWeightsFormat.DEFAULT, - "keep_num_dims": False, - "asymmetric_quantize_inputs": False, - } - - new_op = super().mutate(op) - with self.using(new_op): - old_weight_tensor = self._weights - self._op.builtin_options = builtin_options - - new_weight_tensor = self._op.subgraph.create_tensor( - f"{self._op.name}/weights", - old_weight_tensor.type, - shape=( - old_weight_tensor.shape[0], - np.prod(old_weight_tensor.shape[1:]), - ), - quantization=old_weight_tensor.quantization, - consumers=[self._op], - buffer=old_weight_tensor.buffer, - ) - - # rewire old and new kernel tensors - old_weight_tensor.consumers.remove(self._op) - self._op.inputs[1] = new_weight_tensor - - return new_op - - -class CanonicalizeSingleinDepthwiseConv2DPass(ReplaceXCWeightBiasOperatorPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.DEPTHWISE_CONV_2D - - @property - def new_opcode(self): - return OperatorCode(BuiltinOpCodes.CONV_2D, version=3) - - @property - def _depth_multiplier(self): - return self._op.builtin_options["depth_multiplier"] - - def match(self, op): - with self.using(op): - # TODO: update this when conv2d output channel word alignment is done - return ( - super().match(op) - and self._input.shape[3] == 1 - and self._output.shape[3] % WORD_SIZE_BYTES == 0 # Cout divisible by 4 - ) - - def mutate(self, op): - with self.using(op): - builtin_options = deepcopy(self._op.builtin_options) - depth_multiplier = builtin_options.pop("depth_multiplier") - assert depth_multiplier == self._weights.shape[3] - - # create new op and update builtin options - new_op = super().mutate(op) - new_op.builtin_options = builtin_options - - return new_op - - -class LegalizeSingleinConv2DPass(LegalizeWeightBiasPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.CONV_2D - - def mutate_biases(self, op): - # NOTE: nothing to be done on the biases - pass - - def mutate_weights(self, op): - with self.using(op): - self._replace_weights(np.transpose(self._weights.as_array(), [3, 1, 2, 0])) - - -class ReplaceConv2DPass(ReplaceXCWeightBiasOperatorPass): - @property - def _strides(self): - options = self._op.builtin_options - return options["stride_h"], options["stride_w"] - - @property - def _dilation(self): - options = self._op.builtin_options - return options["dilation_h_factor"], options["dilation_w_factor"] - - @property - def _padding(self): - return self._op.builtin_options["padding"] - - def match(self, op): - if super().match(op): - with self.using(op): - if self._dilation != (1, 1): - self.logger.warning( - f"Found non-supported dilation: {self._dilation}" - ) - else: - return True - - return False - - -class LegalizeXCConvPass(LegalizeXCWeightBiasPass): - @property - def _new_weight_shape(self): - # by default, no reshaping is done - return self._weights.shape - - def mutate_weights(self, op): - with self.using(op): - self._replace_weights( - self._weights.as_array().reshape(self._new_weight_shape) - ) - - -class Replace1x1Conv2dPass(ReplaceConv2DPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.CONV_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_conv2d_1x1) - - def match(self, op): - if super().match(op): - with self.using(op): - return ( - self._strides == (1, 1) - and self._weights.shape[0] % WORD_SIZE_BYTES - == 0 # Cout divisible by 4 - and self._weights.shape[1] == 1 - and self._weights.shape[2] == 1 - and self._weights.shape[3] % WORD_SIZE_BYTES - == 0 # Cin divisible by 4 - ) - - return False - - -class LegalizeXC1x1ConvPass(LegalizeXCConvPass): - @property - def matching_opcode(self): - return XCOREOpCodes.XC_conv2d_1x1 - - def _zero_point_bias(self): - return np.sum( - self._weights.as_array(np.int64) * self._input_zero_point, axis=3 - ).squeeze() - - @property - def _new_weight_shape(self): - # NOTE: The reshape is not strictly necessary since the first dimension of - # the kernel should be 1 in TFLite - old_shape = self._weights.shape - return [old_shape[0], old_shape[3]] - - -class ReplacePaddedConv2DPass(ReplaceConv2DPass): - def _pad(self) -> Tuple[int, int]: - # pad: [top, left] - return tuple( - # first arg of max is <= for valid padding - max(int((o - 1) * s - i + k) // 2, 0) - for o, s, i, k in zip( - self._output.shape[1:3], - self._op.custom_options["stride"], - self._input.shape[1:3], - self._weights.shape[1:3], - ) - ) - - def mutate(self, op): - new_op = super().mutate(op) - - with self.using(op): - new_op.add_custom_options(stride=self._strides) - with self.using(new_op): - new_op.add_custom_options(pad=tuple(-p for p in self._pad())) - return new_op - - -class ReplaceDepthwiseConv2dPass(ReplacePaddedConv2DPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.DEPTHWISE_CONV_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_conv2d_depthwise) - - @property - def _depth_multiplier(self): - return self._op.builtin_options["depth_multiplier"] - - def match(self, op): - if super().match(op): - with self.using(op): - if self._depth_multiplier != 1: - self.logger.warning( - f"Found non-supported depthwise multiplier: {self._depth_multiplier}" - ) - else: - return ( - self._weights.shape[3] % WORD_SIZE_BYTES == 0 - ) # Cin divisible by 4 - - return False - - -class LegalizeXCDepthwiseConvPass(LegalizeXCConvPass): - @property - def matching_opcode(self): - return XCOREOpCodes.XC_conv2d_depthwise - - def _zero_point_bias(self): - # NOTE: first dimension of the kernel is always 1 in depthwise conv2d - return np.sum( - self._weights.as_array(np.int64) * self._input_zero_point, axis=(1, 2) - ).squeeze() - - @property - def _new_weight_shape(self): - # NOTE: The reshape is not strictly necessary since the first dimension of - # the kernel should be 1 in TFLite - return self._weights.shape[1:] - - -class ReplaceDeepConv2dPass(ReplacePaddedConv2DPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.CONV_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_conv2d_deep) - - def match(self, op): - if super().match(op): - with self.using(op): - return ( - self._weights.shape[0] % WORD_SIZE_BYTES == 0 # Cout divisible by 4 - and self._weights.shape[3] % WORD_SIZE_BYTES - == 0 # Cin divisible by 4 - ) - - return False - - -class LegalizeXCDeepConvPass(LegalizeXCConvPass): - @property - def matching_opcode(self): - return XCOREOpCodes.XC_conv2d_deep - - def _zero_point_bias(self): - return np.sum( - self._weights.as_array(np.int64) * self._input_zero_point, axis=(1, 2, 3) - ) - - -class ReplaceShallowinConv2dPass(ReplacePaddedConv2DPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.CONV_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_conv2d_shallowin) - - def match(self, op): - if super().match(op): - with self.using(op): - return ( - self._weights.shape[0] % WORD_SIZE_BYTES == 0 # Cout divisible by 4 - and self._weights.shape[3] % WORD_SIZE_BYTES - == 0 # Cin divisible by 4 - and np.prod(self._weights.shape[2:]) <= 32 # K_w * Cin <= 32 - ) - - return False - - def mutate(self, op): - new_op = super().mutate(op) - - with self.using(new_op): - new_op.add_custom_options(Kw=int(self._weights.shape[2])) - return new_op - - -class LegalizeXCShallowinConvPass(LegalizeXCConvPass): - @property - def matching_opcode(self): - return XCOREOpCodes.XC_conv2d_shallowin - - def _zero_point_bias(self): - return np.sum( - self._weights.as_array(np.int64) * self._input_zero_point, axis=(1, 2, 3) - ) - - def mutate_weights(self, op): - with self.using(op): - Kw_pad = int(32 / self._weights.shape[3] - self._weights.shape[2]) - unpadded_weights = self._weights.as_array().reshape(self._new_weight_shape) - self._replace_weights( - np.pad( - unpadded_weights, - pad_width=[(0, 0), (0, 0), (0, Kw_pad), (0, 0)], - ) - ) diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/dce_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/dce_passes.py deleted file mode 100644 index f7f19637f..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/dce_passes.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from .transformation_passes import ( - BufferMatchingPass, - TensorMatchingPass, - OperatorMatchingPass, -) - - -class EliminateDeadOperatorsPass(OperatorMatchingPass): - def match(self, op): - if super().match(op): - interface_tensors = set(op.subgraph.inputs + op.subgraph.outputs) - for t in op.outputs: - if t in interface_tensors or t.consumers: - return False - else: - return True - - return False - - def mutate(self, op): - op.subgraph.remove_operator(op) - - -class EliminateDeadTensorsPass(TensorMatchingPass): - def match(self, tensor): - return ( - super().match(tensor) - and tensor not in tensor.subgraph.inputs - and tensor not in tensor.subgraph.outputs - and not tensor.consumers - and not tensor.producers - ) - - def mutate(self, tensor): - tensor.subgraph.remove_tensor(tensor) - - -class EliminateDeadBuffersPass(BufferMatchingPass): - def match(self, buffer): - return super().match(buffer) and not buffer.owners - - def mutate(self, buffer): - buffer.model.buffers.remove(buffer) - - def run(self, model): - modified_cnt = super().run(model) - self.logger.debug(f"Removed {modified_cnt} dead buffers") - return modified_cnt diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/fully_connected_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/fully_connected_passes.py deleted file mode 100644 index bfe1ba06b..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/fully_connected_passes.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np - -from tflite2xcore.xcore_schema import ( - BuiltinOpCodes, - OperatorCode, - XCOREOpCodes, -) -from tflite2xcore.utils import WORD_SIZE_BYTES - -from .transformation_passes import ( - ReplaceXCWeightBiasOperatorPass, - LegalizeXCWeightBiasPass, -) - - -class ReplaceFullyConnectedPass(ReplaceXCWeightBiasOperatorPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.FULLY_CONNECTED - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_fc) - - -class LegalizeXCFullyConnectedPass(LegalizeXCWeightBiasPass): - @property - def matching_opcode(self): - return XCOREOpCodes.XC_fc - - def _zero_point_bias(self): - return np.sum(self._weights.as_array(np.int64) * self._input_zero_point, axis=1) - - def mutate_weights(self, op): - with self.using(op): - # zero_padding weight tensor - col_pad = ( - WORD_SIZE_BYTES - 1 - (self._weights.shape[1] - 1) % WORD_SIZE_BYTES - ) - arr = np.pad(self._weights.as_array(), pad_width=[(0, 0), (0, col_pad)]) - - self._replace_weights(arr) diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/lce_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/lce_passes.py deleted file mode 100644 index 4ddd2fe8a..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/lce_passes.py +++ /dev/null @@ -1,709 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import numpy as np -from math import ceil -from typing import Tuple, List, NamedTuple, Dict - -from tflite2xcore.utils import ( - WORD_SIZE_BITS, - WORD_SIZE_BYTES, - VECTOR_SIZE_BITS, - VECTOR_SIZE_WORDS, - ACC_PERIOD_INT8, - xor_popcount, - calculate_same_padding, - get_unpacked_shape, - clrsb, -) -from tflite2xcore.xcore_schema import ( - Operator, - Padding, - TensorType, - ExternalOpCodes, - XCOREOpCodes, - OperatorCode, - BuiltinOpCodes, - ActivationFunctionType, -) - -from .transformation_passes import ( - OperatorMatchingPass, - ReplaceQuantizedOperatorPass, - LegalizeWeightBiasPass, -) -from .conv2d_passes import ReplaceConv2DPass - -FILLER = 0x55555555 - -XC_BCONV2D_OPCODES = ( - XCOREOpCodes.XC_bconv2d_bin, - XCOREOpCodes.XC_bconv2d_bin_DI, - XCOREOpCodes.XC_bconv2d_int8, - XCOREOpCodes.XC_bconv2d_int8_DIDO, -) - - -class ReplaceBconv2DPass(ReplaceConv2DPass): - @property - def matching_opcode(self) -> ExternalOpCodes: - return ExternalOpCodes.LceBconv2d - - @property - def matching_input_type(self) -> TensorType: - return TensorType.INT32 - - @property - def matching_biases_type(self) -> TensorType: - return TensorType.INT32 - - @property - def matching_weights_type(self) -> TensorType: - return TensorType.INT32 - - @property - def _strides(self) -> Tuple[int, int]: - options = self._op.custom_options - return options["stride_height"], options["stride_width"] - - @property - def _dilation(self) -> Tuple[int, int]: - options = self._op.custom_options - return options["dilation_height_factor"], options["dilation_width_factor"] - - @property - def _padding(self) -> Padding: - return Padding(self._op.custom_options["padding"]) - - @property - def _fused_activation_function(self) -> ActivationFunctionType: - return ActivationFunctionType( - self._op.custom_options["fused_activation_function"] - ) - - @property - def _input_channels(self) -> int: - return self._op.custom_options["channels_in"] - - @property - def _output_channels(self) -> int: - return self._weights.shape[0] - - def match(self, op: Operator) -> bool: - if super().match(op): - with self.using(op): - if self._input_channels != self._weights.shape[3] * WORD_SIZE_BITS: - self.logger.warning( - f"Found {self.matching_opcode} operator " - f"with {self._input_channels} input channels " - f"(not a multiple of {WORD_SIZE_BITS})." - ) - elif self._output_channels % WORD_SIZE_BYTES != 0: - self.logger.warning( - f"Found {self.matching_opcode} operator " - f"with {self._output_channels} output channels " - f"(not a multiple of {WORD_SIZE_BYTES})" - ) - else: - return True - - return False - - def mutate(self, op: Operator) -> Operator: - new_op = super().mutate(op) - with self.using(op): - new_op.add_custom_options(stride=self._strides, padding=self._padding) - return new_op - - -class ReplaceBconv2DInt8Pass(ReplaceBconv2DPass): - @property - def new_opcode(self) -> OperatorCode: - return OperatorCode(XCOREOpCodes.XC_bconv2d_int8) - - def _match_non_weight_inputs(self) -> bool: - return len(self._op.inputs) == 4 and all( - params_tensor.type is TensorType.FLOAT32 - and params_tensor.is_constant - and params_tensor not in self._op.subgraph.outputs - for params_tensor in self._op.inputs[2:] - ) - - def mutate(self, op: Operator) -> Operator: - new_op = super().mutate(op) - with self.using(op): - new_op.add_custom_options( - fused_activation_function=self._fused_activation_function - ) - return new_op - - -class ReplaceBconv2DInt8DeepInDeepOutPass(ReplaceBconv2DInt8Pass): - @property - def new_opcode(self) -> OperatorCode: - return OperatorCode(XCOREOpCodes.XC_bconv2d_int8_DIDO) - - def match(self, op: Operator) -> bool: - with self.using(op): - return ( - super().match(op) - and self._input_channels % VECTOR_SIZE_BITS == 0 - and self._output_channels % ACC_PERIOD_INT8 == 0 - ) - - -class ReplaceBconv2DBitpackedPass(ReplaceBconv2DPass): - @property - def matching_output_type(self) -> TensorType: - return TensorType.INT32 - - @property - def new_opcode(self) -> OperatorCode: - return OperatorCode(XCOREOpCodes.XC_bconv2d_bin) - - def _match_non_weight_inputs(self) -> bool: - return ( - len(self._op.inputs) == 3 - and self._op.inputs[2].type is TensorType.INT32 - and self._op.inputs[2].is_constant - and self._op.inputs[2] not in self._op.subgraph.outputs - ) - - def match(self, op: Operator) -> bool: - if super().match(op): - with self.using(op): - if self._output_channels % WORD_SIZE_BITS == 0: - return True - self.logger.warning( - f"Found {self.matching_opcode} operator with bitpacked output " - f"and {self._output_channels} output channels " - f"(not a multiple of {WORD_SIZE_BITS})" - ) - return False - - -class ReplaceBconv2DBitpackedDeepInPass(ReplaceBconv2DBitpackedPass): - @property - def new_opcode(self) -> OperatorCode: - return OperatorCode(XCOREOpCodes.XC_bconv2d_bin_DI) - - def match(self, op: Operator) -> bool: - with self.using(op): - return super().match(op) and self._input_channels % VECTOR_SIZE_BITS == 0 - - -class ReplaceLceQuantizePass(ReplaceQuantizedOperatorPass): - @property - def new_opcode(self) -> OperatorCode: - return OperatorCode(XCOREOpCodes.XC_bsign_8) - - @property - def matching_opcode(self) -> ExternalOpCodes: - return ExternalOpCodes.LceQuantize - - @property - def matching_output_type(self) -> TensorType: - return TensorType.INT32 - - def match(self, op: Operator) -> bool: - if super().match(op): - input_shape = op.inputs[0].shape - if len(input_shape) == 4 and input_shape[3] % WORD_SIZE_BITS == 0: - return True - self.logger.warning( - f"Found LceQuantize with illegal input shape {input_shape}" - ) - return False - - -class LegalizeBconv2dPass(LegalizeWeightBiasPass): - @property - def matching_input_type(self) -> TensorType: - return TensorType.INT32 - - @property - def _kernel_channel_size(self) -> int: - # call only after custom options are set with weights shape - return np.prod(self._op.custom_options["K"][1:]) # type: ignore - - @property - def _overlap_size(self) -> int: - return ( - VECTOR_SIZE_WORDS - - 1 - - (self._kernel_channel_size // WORD_SIZE_BITS - 1) % VECTOR_SIZE_WORDS - ) - - @property - def _fill_size(self) -> int: - return self._overlap_size - - @staticmethod - def __c_out_group_bounds(c_out_group: int, num_c_out: int) -> Tuple[int, int]: - c_out_group_start = c_out_group * ACC_PERIOD_INT8 - c_out_group_end = min(num_c_out, (c_out_group + 1) * ACC_PERIOD_INT8) - return c_out_group_start, c_out_group_end - - def mutate_weights(self, op: Operator) -> None: - with self.using(op): - weights = self._weights.as_array() - - num_c_out = weights.shape[0] - num_cout_groups = ceil(num_c_out / ACC_PERIOD_INT8) - - # first we reorder the weights - reordered_weight_channels: List[np.ndarray] = [] - for c_out_group in range(num_cout_groups): - c_start, c_end = self.__c_out_group_bounds(c_out_group, num_c_out) - chan_group = weights.reshape(num_c_out, -1)[c_start:c_end] - reordered_weight_channels.extend( - a.ravel() - for a in np.split( - np.flip(chan_group, axis=0), - [ - i * VECTOR_SIZE_WORDS - for i in range( - ceil(chan_group.shape[-1] / VECTOR_SIZE_WORDS) - ) - ], - axis=1, - ) - ) - - # then we need to add filler bits at the end of the last channel - # NOTE: this means that this tensor is no longer rectangular - reordered_weight_channels.append( - # TODO: fix this filler value - np.full(self._fill_size, FILLER, dtype=weights.dtype) - ) - self._replace_weights(np.concatenate(reordered_weight_channels)) - - def _calculate_overlap_correction(self, boggled_weights: np.ndarray) -> np.ndarray: - channel_size_words = self._kernel_channel_size // WORD_SIZE_BITS - tail_size = VECTOR_SIZE_WORDS - self._overlap_size - overlap_correction = np.empty(self._biases.shape, dtype=np.int32) - num_channels_out = self._biases.shape[0] - for c_out in range(num_channels_out): - c_out_group = c_out // ACC_PERIOD_INT8 - c_start, c_end = self.__c_out_group_bounds(c_out_group, num_channels_out) - reversed_offset = c_out % ACC_PERIOD_INT8 % (c_end - c_start) * tail_size - overlap_start = c_end * channel_size_words - reversed_offset - - junk = boggled_weights[overlap_start : overlap_start + self._overlap_size] - overlap_correction[c_out] = ( - xor_popcount(junk, np.zeros_like(junk)) - junk.size * WORD_SIZE_BITS / 2 - ) - return overlap_correction - - def mutate(self, op: Operator) -> Operator: - with self.using(op): - op.add_custom_options(K=get_unpacked_shape(self._weights.shape)) - # NOTE: the order of these mutations is strict - self.mutate_weights(op) - self.mutate_biases(op) - op.custom_options.pop("illegal_params") - return op - - -class LegalizeBconv2dInt8Pass(LegalizeBconv2dPass): - @property - def matching_opcode(self) -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_int8 - - @property - def matching_output_type(self) -> TensorType: - return TensorType.INT8 - - @property - def _fill_size(self) -> int: - k_p_adjust = ( - self._kernel_channel_size // WORD_SIZE_BITS - 1 - ) % VECTOR_SIZE_WORDS + 1 - patch_loop_counter = ceil(self._kernel_channel_size / VECTOR_SIZE_BITS) - 1 - out_tail_chans = int(self._weights.shape[0] - 1) % ACC_PERIOD_INT8 + 1 - fill_words = (patch_loop_counter > 0) * ( - ACC_PERIOD_INT8 - out_tail_chans - ) * VECTOR_SIZE_WORDS - k_p_adjust * out_tail_chans - return max(fill_words, VECTOR_SIZE_WORDS) - - def _calculate_accu_clamps(self) -> Tuple[float, float]: - # follow larq's implementation to get the output tranform clamps - INT32_MIN, INT32_MAX = np.iinfo(np.int32).min, np.iinfo(np.int32).max - activation_range_map: Dict[ActivationFunctionType, Tuple[int, int]] = { - ActivationFunctionType.NONE: (INT32_MIN, INT32_MAX), - ActivationFunctionType.RELU: (0, INT32_MAX), - ActivationFunctionType.RELU_N1_TO_1: (-1, 1), - ActivationFunctionType.RELU6: (0, 6), - } - nominal_clamps = activation_range_map[ - self._op.custom_options["fused_activation_function"] - ] - output_trf_clamps = ( - self._kernel_channel_size - - min(nominal_clamps[1], self._kernel_channel_size), - self._kernel_channel_size - - max(nominal_clamps[0], -self._kernel_channel_size), - ) - - # transform to xcore vpu accumulator space - return ( - (self._kernel_channel_size - output_trf_clamps[0]) / 2, - (self._kernel_channel_size - output_trf_clamps[1]) / 2, - ) - - @staticmethod - def __calculate_exp_bounds(arr: np.ndarray, bound_width: int) -> Tuple[int, int]: - min_exp = -1 - int(np.max(np.frexp(arr)[1])) - return min_exp, min_exp + bound_width - - def _calculate_MBA( - self, adjusted_pam: np.ndarray, adjusted_pab: np.ndarray - ) -> Tuple[int, int, int]: - # calculate bounds on A - accu_clamps = self._calculate_accu_clamps() - max_out = int(max(self._kernel_channel_size / 2, *accu_clamps)) - min_out = int(min(-self._kernel_channel_size / 2, *accu_clamps)) - rsb = min(clrsb(max_out), clrsb(min_out)) - Amin, Amax = rsb - 32 + 1, rsb - 16 - - # calculate bounds on M - Mmin, Mmax = self.__calculate_exp_bounds(adjusted_pam, bound_width=16) - - # calculate bounds on B - Bmin, Bmax = self.__calculate_exp_bounds(adjusted_pab, bound_width=16 + 14) - # ensure A + M = B, and that the addition is fine - Bmax = max(Bmax, Amax + Mmax - 1) - - for A in range(Amax, Amin - 1, -1): - for M in range(Mmax, Mmin - 1, -1): - B = A + M - if Bmin <= B <= Bmax: - return M, B, A - raise ValueError("quantized exponents cannot be determined") - - def _calculate_clamp_offsets(self, A: int) -> Tuple[int, int, int]: - shifted_accu_limits = tuple(c * 2 ** A for c in self._calculate_accu_clamps()) - - INT16_MAX = np.iinfo(np.int16).max - clamp_offsets = ( - int(INT16_MAX - shifted_accu_limits[0]), - int(-INT16_MAX - shifted_accu_limits[1]), - ) - - if abs(clamp_offsets[0]) >= abs(clamp_offsets[1]): - clamp_offsets = clamp_offsets[::-1] - clamp_far_half = clamp_offsets[1] // 2 - return (-clamp_offsets[0], -clamp_offsets[1] + clamp_far_half, clamp_far_half) - - class _ScalarQuantParams(NamedTuple): - M: int - clamp_offset_near: int - clamp_offset_far0: int - clamp_offset_far1: int - bias_multiplier: int - accu_shr: int - accu_shl: int - final_shr: int - adjusted_B: int - - def _calculate_scalar_quant_params( - self, adjusted_pam: np.ndarray, adjusted_pab: np.ndarray - ) -> "_ScalarQuantParams": - M, B, A = self._calculate_MBA(adjusted_pam, adjusted_pab) - assert B >= 8 - - _, Bmax_16 = self.__calculate_exp_bounds(adjusted_pab, bound_width=16) - accu_shift_signed = -A - - return self._ScalarQuantParams( - M, - *self._calculate_clamp_offsets(A), - bias_multiplier=2 ** max(0, B - Bmax_16), - accu_shr=max(0, accu_shift_signed), - accu_shl=min(accu_shift_signed, 0), - final_shr=B - 8, - adjusted_B=min(B, Bmax_16), - ) - - class _QuantParams(NamedTuple): - post_act_mult_quant: np.ndarray - post_act_bias_quant: np.ndarray - output_trf_params: np.ndarray - accu_modifier: np.ndarray - - def _calculate_quant_parameters( - self, adjusted_pam: np.ndarray, adjusted_pab: np.ndarray - ) -> "_QuantParams": - # first we calculate the scalar quantization parameters - q_params = self._calculate_scalar_quant_params(adjusted_pam, adjusted_pab) - - # then quantize the post activation multiplier and bias - pam_q = np.round(adjusted_pam * 2.0 ** q_params.M) - post_act_mult_quant = pam_q.astype(np.int16) - assert np.all(post_act_mult_quant == pam_q) - - # TODO: fix this so there is no need to clip - pab_q = np.round(adjusted_pab * 2.0 ** q_params.adjusted_B) - post_act_bias_quant = np.clip( - pab_q, np.iinfo(np.int16).min, np.iinfo(np.int16).max - ).astype(np.int16) - if np.any(post_act_bias_quant != pab_q): - self.logger.warning("clipped post_act_bias_quant") - - # output transform parameters need to be replicated and concatenated for efficiency - def fill_int16_vector(val: int) -> np.ndarray: - return np.full(16, val, dtype=np.int16) - - # TODO: fix this by reordering the underlying lib_nn struct - output_trf_params = np.concatenate( - [ - fill_int16_vector(getattr(q_params, field)) - for field in ( - "clamp_offset_near", - "clamp_offset_far0", - "clamp_offset_far1", - "bias_multiplier", - "final_shr", - "accu_shr", - ) - ] - + [np.frombuffer(np.int32(q_params.accu_shl).tobytes(), dtype=np.int16)] - ) - - # calculate quantized accumulator modifier - weights = self._weights.as_array() # already boggled - overlap_corrections = self._calculate_overlap_correction(weights) - accu_modifier = np.int16( - overlap_corrections / 2 ** (q_params.accu_shr + q_params.accu_shl) - ) - - return self._QuantParams( - post_act_mult_quant, post_act_bias_quant, output_trf_params, accu_modifier - ) - - def mutate_biases(self, op: Operator) -> None: - with self.using(op): - # first we adjust pam/pab as the larq kernel's output transform requires - output_scale = self._output.quantization["scale"][0] - output_zero_point = self._output.quantization["zero_point"][0] - post_act_mult_float = self._op.inputs[2].as_array() - post_act_bias_float = self._op.inputs[3].as_array() - - output_trf_pam = -post_act_mult_float / output_scale - output_trf_pab = ( - post_act_bias_float / output_scale - - output_trf_pam * self._kernel_channel_size - + output_zero_point - ) - - # then adjust pam/pad as required by our kernels - adjusted_pam = -2 * output_trf_pam - adjusted_pab = output_trf_pab + output_trf_pam * self._kernel_channel_size - - # calculate quantization parameters as required by the kernel - q_params = self._calculate_quant_parameters(adjusted_pam, adjusted_pab) - - # TODO: refactor the rest of this function - # create and populate new post_act_mult tensor - new_pam_tensor = self._op.subgraph.create_tensor( - f"{self._op.name}/post_act_mult", - TensorType.from_numpy_dtype(q_params.post_act_mult_quant.dtype), - q_params.post_act_mult_quant.shape, - consumers=[self._op], - ) - new_pam_tensor.buffer.data = q_params.post_act_mult_quant - - # replace old pam tensor - self._op.inputs[2].consumers.remove(self._op) - self._op.inputs[2] = new_pam_tensor - - # create and populate new post_act_bias tensor - new_pab_tensor = self._op.subgraph.create_tensor( - f"{self._op.name}/post_act_bias", - TensorType.from_numpy_dtype(q_params.post_act_bias_quant.dtype), - q_params.post_act_bias_quant.shape, - consumers=[self._op], - ) - new_pab_tensor.buffer.data = q_params.post_act_bias_quant - - # replace old pab tensor - self._op.inputs[3].consumers.remove(self._op) - self._op.inputs[3] = new_pab_tensor - - # create and populate new output_trf_tensor tensor - output_trf_tensor = self._op.subgraph.create_tensor( - f"{self._op.name}/output_trf_params", - TensorType.from_numpy_dtype(q_params.output_trf_params.dtype), - q_params.output_trf_params.shape, - consumers=[self._op], - ) - output_trf_tensor.buffer.data = q_params.output_trf_params - self._op.inputs.append(output_trf_tensor) - - # create and populate new output_trf_tensor tensor - accu_modifier_tensor = self._op.subgraph.create_tensor( - f"{self._op.name}/accu_modifier", - TensorType.from_numpy_dtype(q_params.accu_modifier.dtype), - q_params.accu_modifier.shape, - consumers=[self._op], - ) - accu_modifier_tensor.buffer.data = q_params.accu_modifier - self._op.inputs.append(accu_modifier_tensor) - - def mutate(self, op: Operator) -> Operator: - new_op = super().mutate(op) - new_op.custom_options.pop("fused_activation_function") - return new_op - - -class LegalizeBconv2dInt8DeepInDeepOutPass(LegalizeBconv2dInt8Pass): - @property - def matching_opcode(self) -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_int8_DIDO - - def mutate_biases(self, op: Operator) -> None: - super().mutate_biases(op) - - # we just need to make sure that there is no overlap - accu_modifier_tensor = op.inputs[5] - assert np.all(accu_modifier_tensor.as_array() == 0) - accu_modifier_tensor.consumers.remove(op) - del op.inputs[5] - - -class LegalizeBconv2dBitpackedPass(LegalizeBconv2dPass): - @property - def matching_output_type(self) -> TensorType: - return TensorType.INT32 - - @property - def matching_opcode(self) -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_bin - - def mutate_biases(self, op: Operator) -> None: - with self.using(op): - thresholds = self._biases.as_array() - weights = self._weights.as_array() # already boggled - - # first we need to calculate a correction term - # due to how our HW popcount differs from the Larq reference - popcount_correction = self._kernel_channel_size / 2 - - # second we need to calculate correction terms - # due to how we handle incomplete weights regsiters - # (the data register is padded with zeros, so the loaded kernel - # coeffs can have some junk loaded, and we correct that) - overlap_correction = self._calculate_overlap_correction(weights) - thresholds += np.int32(overlap_correction - popcount_correction) - - # boggle the lower and higher 2 bytes in every ACC_PERIOD_INT8 consecutive value - thresholds = np.concatenate( - [ - np.frombuffer( - np.frombuffer(cgroup.tobytes(), dtype=np.int16) - .reshape(ACC_PERIOD_INT8, 2) - .T.tobytes(), - dtype=np.int32, - ) - for cgroup in thresholds.reshape( - thresholds.shape[0] // ACC_PERIOD_INT8, ACC_PERIOD_INT8 - ) - ] - ) - - # create and populate new thresholds tensor - new_thresholds = self._op.subgraph.create_tensor( - f"{self._op.name}/thresholds", - TensorType.INT32, - thresholds.shape, - consumers=[self._op], - ) - new_thresholds.buffer.data = thresholds - - # replace old tensor - self._op.inputs[2].consumers.remove(self._op) - self._op.inputs[2] = new_thresholds - - -class LegalizeBconv2dBitpackedDeepInPass(LegalizeBconv2dBitpackedPass): - @property - def matching_opcode(self) -> XCOREOpCodes: - return XCOREOpCodes.XC_bconv2d_bin_DI - - @property - def _overlap_size(self) -> int: - return 0 - - -# Split out padding to a separate op from BConv -# TODO: this currently only matches with XC_bconv2d_* -# but going forward might like to extend this to other conv ops -# and make it a more general pass for all convolutions. -class LegalizeXCBconv2DPaddingPass(OperatorMatchingPass): - @property - def _strides(self) -> Tuple[int, int]: - return self._op.custom_options["stride"] - - @property - def _padding(self) -> Padding: - return self._op.custom_options["padding"] - - MATCHING_OPCODES = XC_BCONV2D_OPCODES - - def match(self, op: Operator) -> bool: - return ( - super().match(op) - and op.operator_code.code in self.MATCHING_OPCODES - and "padding" in op.custom_options - ) - - def mutate(self, op: Operator) -> Operator: - padding = Padding(op.custom_options.pop("padding")) - if padding is Padding.VALID: - return op - - old_input = op.inputs[0] - - # calculate paddings - with self.using(op): - input_and_strides = old_input.shape[1:3], self._strides - - paddings = np.int32( - [ - (0, 0), - *calculate_same_padding(*input_and_strides, op.inputs[1].shape[1:3]), - (0, 0), - ] - ) - - # return early if mode is SAME, but has no effect - if np.all(paddings == 0): - return op - - subgraph = op.subgraph - - # Construct paddings parameter tensor and padded input tensor - padding_tensor = subgraph.create_tensor( - f"{op.name}/paddings", TensorType.INT32, shape=paddings.shape - ) - padding_tensor.buffer.data = paddings - - padded_shape = tuple( - int(size + sum(pads)) for size, pads in zip(old_input.shape, paddings) - ) - padded_input_tensor = subgraph.create_tensor( - f"{op.name}/input", TensorType.INT32, shape=padded_shape, consumers=[op] - ) - - # create new PAD op and inject it before the convolution - pad_op = subgraph.create_operator( - OperatorCode(BuiltinOpCodes.PAD), - inputs=[old_input, padding_tensor], - outputs=[padded_input_tensor], - ) - subgraph.insert_operator(op, pad_op) - - # Cut connection from old input to the op - old_input.consumers.remove(op) - op.inputs[0] = padded_input_tensor - - return op diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/lut_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/lut_passes.py deleted file mode 100644 index 91f2efa71..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/lut_passes.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np - -from tflite2xcore.utils import quantize, dequantize -from tflite2xcore.xcore_schema import ( - TensorType, - BuiltinOpCodes, - OperatorCode, - XCOREOpCodes, -) - -from .transformation_passes import ( - ReplaceQuantizedOperatorPass, - QuantizedOperatorMatchingPass, -) - - -ACTIVATIONS = { - BuiltinOpCodes.RELU: lambda x: np.maximum(x, 0.0), - BuiltinOpCodes.RELU6: lambda x: np.minimum(np.maximum(x, 0.0), 6.0), - BuiltinOpCodes.TANH: lambda x: np.tanh(x), - BuiltinOpCodes.LOGISTIC: lambda x: 1.0 / (1.0 + np.exp(-x)), -} - - -class ReplaceWithXCLookupPass(ReplaceQuantizedOperatorPass): - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_lookup_8) - - def mutate(self, op): - new_op = super().mutate(op) - new_op.add_custom_options(original_opcode=self.matching_opcode) - return new_op - - -class LegalizeXCLookupTablePass(QuantizedOperatorMatchingPass): - @property - def matching_opcode(self): - return XCOREOpCodes.XC_lookup_8 - - def match(self, op): - return super().match(op) and "original_opcode" in op.custom_options - - def _dequantize_input(self, int_arr): - input_quant = self._input.quantization - return dequantize( - int_arr, input_quant["scale"][0], input_quant["zero_point"][0] - ) - - def _quantize_output(self, float_arr): - output_quant = self._output.quantization - return quantize( - float_arr, output_quant["scale"][0], output_quant["zero_point"][0] - ) - - def mutate(self, op): - inputs_int = np.arange(-128, 128, dtype=np.int8) - activation = ACTIVATIONS[op.custom_options.pop("original_opcode")] - with self.using(op): - outputs_int = self._quantize_output( - activation(self._dequantize_input(inputs_int)) - ) - outputs_int = np.concatenate([outputs_int[128:], outputs_int[0:128]]) - - lut_tensor = op.subgraph.create_tensor( - f"{op.name}/LUT", TensorType.INT8, shape=[len(outputs_int)], consumers=[op] - ) - lut_tensor.buffer.data = outputs_int - op.inputs.append(lut_tensor) - - -class ReplaceReLUPass(ReplaceWithXCLookupPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.RELU - - -class ReplaceReLU6Pass(ReplaceWithXCLookupPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.RELU6 - - -class ReplaceTanhPass(ReplaceWithXCLookupPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.TANH - - -class ReplaceLogisticPass(ReplaceWithXCLookupPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.LOGISTIC diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/minification_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/minification_passes.py deleted file mode 100644 index 79c95786d..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/minification_passes.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from tflite2xcore.xcore_schema import XCOREOpCodes, BuiltinOpCodes, Buffer, XCOREModel -from .transformation_passes import TensorMatchingPass, BufferMatchingPass - - -class MinifyQuantInfoPass(TensorMatchingPass): - # NOTE: it's risky to include the builtin ops here, but (at least in the - # micro interpreter), min/max info does not seem to be used - SAFE_OP_CODES = [c for c in XCOREOpCodes] + [c for c in BuiltinOpCodes] - - def match(self, tensor): - dependents = tensor.consumers + tensor.producers - quantization = tensor.quantization - - if super().match(tensor) and quantization and dependents: - for op in dependents: - if op.operator_code.code not in self.SAFE_OP_CODES: - # min/max info is removed if tensor only interacts with XC ops - return False - else: - return "min" in quantization or "max" in quantization - return False - - def mutate(self, tensor): - tensor.quantization.pop("min", None) - tensor.quantization.pop("max", None) - - -class MinifyTensorNamesPass(TensorMatchingPass): - def __new_tensor_name(self, tensor): - return str(self._obj_index) - - def match(self, tensor): - return super().match(tensor) and tensor.name != self.__new_tensor_name(tensor) - - def mutate(self, tensor): - tensor.name = self.__new_tensor_name(tensor) - - -# TODO: add tests -class UnifyEmptyBuffersPass(BufferMatchingPass): - def match(self, buffer: Buffer) -> bool: - return ( - super().match(buffer) - and not buffer - and buffer is not buffer.model.buffers[0] - and buffer.owners - ) - - def mutate(self, buffer: Buffer) -> None: - sentinel = buffer.model.buffers[0] - - for owner in buffer.owners: - owner.buffer = sentinel - sentinel.owners.append(owner) - - buffer.owners = [] - - def run(self, model: XCOREModel) -> int: - model.buffers.insert(0, Buffer()) - modified_cnt = super().run(model) - self.logger.debug(f"Unified {modified_cnt} empty buffers") - return modified_cnt diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/op_version_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/op_version_passes.py deleted file mode 100644 index a0a8fae87..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/op_version_passes.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from tflite2xcore.xcore_model import Operator -from tflite2xcore.xcore_schema import TensorType, BuiltinOpCodes - -from .transformation_passes import OperatorMatchingPass - - -class LegalizeQuantizeVersionPass(OperatorMatchingPass): - def match(self, op: Operator) -> bool: - if not super().match(op): - return False - - opcode = op.operator_code - return ( - opcode.code is BuiltinOpCodes.QUANTIZE - and opcode.version == 2 - and op.inputs[0].type is TensorType.FLOAT32 - and op.outputs[0].type is TensorType.INT8 - ) - - def mutate(self, op: Operator) -> None: - op.operator_code.version = 1 diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/padding_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/padding_passes.py deleted file mode 100644 index ac4d1bf6f..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/padding_passes.py +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from typing import Tuple - -from tflite2xcore.xcore_schema import ( - Padding, - TensorType, - BuiltinOpCodes, - XCOREOpCodes, - OperatorCode, - Operator, -) - -from .transformation_passes import OperatorMatchingPass - - -class FuseConv2dPaddingPass(OperatorMatchingPass): - MATCHING_OPCODES = ( - XCOREOpCodes.XC_conv2d_depthwise, - XCOREOpCodes.XC_conv2d_deep, - XCOREOpCodes.XC_conv2d_shallowin, - ) - - @property - def _producer(self): - return self._op.inputs[0].producers[0] - - @property - def _pad_params(self): - return self._producer.inputs[1].as_array().tolist() - - @property - def _conv_pad(self) -> Tuple[int, int]: - return tuple(-p for p in self._op.custom_options["pad"]) - - @property - def _kernel_size(self): - opcode = self._op.operator_code.code - weights = self._op.inputs[1] - if opcode is XCOREOpCodes.XC_conv2d_depthwise: - return weights.shape[0:2] - elif opcode in (XCOREOpCodes.XC_conv2d_deep, XCOREOpCodes.XC_conv2d_shallowin): - return weights.shape[1:3] - - @staticmethod - def _calculate_end_padding(out_size, strides, in_size, kernel_size): - return tuple( - (o - 1) * s - i + k - for o, s, i, k in zip(out_size, strides, in_size, kernel_size) - ) - - def match(self, op): - if not super().match(op): - return False - - with self.using(op): - opcode = self._op.operator_code.code - if opcode not in self.MATCHING_OPCODES: - return False - - try: - if self._producer.operator_code.code is not BuiltinOpCodes.PAD: - return False - except IndexError: - # No producers found for input - return False - - pad_params = self._pad_params - if len(pad_params) != 4: - return False - - if pad_params[0] != [0, 0] or pad_params[3] != [0, 0]: - # NOTE: SplitPaddingPass decouples channel- and batch-wise padding - return False - - kernel_size = self._kernel_size - implicit_end_pads = self._calculate_end_padding( - out_size=op.outputs[0].shape[1:3], - strides=op.custom_options["stride"], - in_size=op.inputs[0].shape[1:3], - kernel_size=kernel_size, - ) - - conv_pad = self._conv_pad - - all_pads = ( - [conv_pad[0] + pad_params[1][0], implicit_end_pads[0] + pad_params[1][1]], - [conv_pad[1] + pad_params[2][0], implicit_end_pads[1] + pad_params[2][1]], - ) - - for p, k in zip(all_pads, kernel_size): - if p[0] >= k or p[1] >= k: - # kernels currently don't support this - self.logger.warning( - f"While fusing, found implicit padding={p}" - f" not smaller than kernel={kernel_size}" - ) - return False - - if len(conv_pad) == 2 and not isinstance(conv_pad, str): - return True - elif conv_pad in ["SAME", "VALID"] + list(Padding): - raise ValueError(f"Deprecated 'pad' option in {opcode}: 'pad'={conv_pad}") - else: - self.logger.warning(f"Invalid option in {opcode}: 'pad'={conv_pad}") - - return False - - def mutate(self, op): - with self.using(op): - producer = self._producer - pad_params = self._pad_params - old_pad = self._conv_pad - - # cut connection to old input - op.inputs[0].consumers.remove(op) - - # add connection from unpadded input to convolution operator - op.inputs[0] = producer.inputs[0] - op.inputs[0].consumers.append(op) - - # set padding: [top, left, zero_point] - op.custom_options["pad"] = [ - -(old_pad[0] + pad_params[1][0]), - -(old_pad[1] + pad_params[2][0]), - ] - - -class SplitPaddingPass(OperatorMatchingPass): - @property - def _pad_params(self): - return self._op.inputs[1].as_array().tolist() - - def match(self, op): - if not super().match(op): - return False - - with self.using(op): - opcode = self._op.operator_code.code - if opcode is not BuiltinOpCodes.PAD: - return False - - pad_params = self._pad_params - if len(pad_params) != 4: - return False - - return (pad_params[0] != [0, 0] or pad_params[3] != [0, 0]) and ( - pad_params[1] != [0, 0] or pad_params[2] != [0, 0] - ) - - def mutate(self, op): - subgraph = op.subgraph - - with self.using(op): - pad_params = self._pad_params - pads_NC = [pad_params[0], [0, 0], [0, 0], pad_params[3]] - pads_HW = [[0, 0], pad_params[1], pad_params[2], [0, 0]] - - # cut connection from old input to the op - old_input = op.inputs[0] - old_input.consumers.remove(op) - - # create new parameter tensor for the op, and replace old - # the old op will become the spatial padding - # this is needed because multiple ops can share the same parameter tensor - # NOTE: the old paddings tensor might be dangling and will be cleaned up later - op.inputs[1].consumers.remove(op) - op.inputs[1] = subgraph.create_tensor( - f"{op.name}/paddings", TensorType.INT32, shape=[4, 2], consumers=[op] - ) - op.inputs[1].buffer.data = np.int32(pads_HW) - - # create new (batch/channel-wise) operator - new_op = subgraph.create_operator( - OperatorCode(BuiltinOpCodes.PAD), inputs=[old_input] - ) - subgraph.insert_operator(op, new_op) - - # assign padding tensor to new op - new_op.inputs.append( - subgraph.create_tensor( - f"{new_op.name}/paddings", - TensorType.INT32, - shape=[4, 2], - consumers=[new_op], - ) - ) - new_op.inputs[1].buffer.data = np.int32(pads_NC) - - # create intermediate tensor and wire it up - intermediate_shape = [ - size + pad[0] + pad[1] for size, pad in zip(old_input.shape, pads_NC) - ] - op.inputs[0] = subgraph.create_tensor( - f"{new_op.name}/output", - old_input.type, - intermediate_shape, - consumers=[op], - producers=[new_op], - quantization=old_input.quantization, - ) - new_op.outputs.append(op.inputs[0]) - - -class FuseConsecutivePadsPass(OperatorMatchingPass): - @property - def _producer(self): - return self._op.inputs[0].producers[0] - - @property - def _pad_params(self): - return self._op.inputs[1].as_array() - - def match(self, op): - # the anchor is the second of two consecutive PAD ops - try: - with self.using(op): - return ( - super().match(op) - and self._op.operator_code.code is BuiltinOpCodes.PAD - and self._producer.operator_code.code is BuiltinOpCodes.PAD - ) - except IndexError: - # No producers found for input - return False - - def mutate(self, op): - subgraph = op.subgraph - with self.using(op): - producer = self._producer - this_params = self._pad_params - with self.using(producer): - producer_params = self._pad_params - new_params = this_params + producer_params - - # cut connection from old inputs to the anchor op - intermediate = op.inputs[0] - intermediate.consumers.remove(op) - op.inputs[1].consumers.remove(op) - - # create new parameter tensor for the op, and replace old - # this is needed because multiple ops can share the same parameter tensor - # NOTE: the old paddings tensor might be dangling and will be cleaned up later - op.inputs[1] = subgraph.create_tensor( - f"{op.name}/paddings", - TensorType.INT32, - shape=new_params.shape, - consumers=[op], - ) - op.inputs[1].buffer.data = new_params.astype(np.int32) - - # set up bypass connection - op.inputs[0] = producer.inputs[0] - producer.inputs[0].consumers.append(op) - - -class RemovePaddingInputPass(OperatorMatchingPass): - def match(self, op): - if op.operator_code.code is BuiltinOpCodes.PAD: - padding = op.inputs[1].as_array().tolist() - return ( - super().match(op) - # Match padding only where it is the first operator in the subgraph - and op.inputs[0] in op.subgraph.inputs - # Make sure no other op uses this input - and len(op.inputs[0].consumers) == 1 - # Match only padding in channel direction i.e. inserted for VPU alignment - and len(padding) == 4 - and padding[-1] != [0, 0] - and all(pad == [0, 0] for pad in padding[:-1]) - ) - else: - return False - - def mutate(self, op): - subgraph = op.subgraph - subgraph.inputs.append(op.outputs[0]) - subgraph.remove_tensor(op.inputs[0]) # DCE doesn't clean up subgraph inputs - subgraph.remove_operator(op) - - -class ReplacePadPass(OperatorMatchingPass): - @property - def new_opcode(self) -> OperatorCode: - return OperatorCode(XCOREOpCodes.XC_pad) - - def match(self, op: Operator) -> bool: - if super().match and op.operator_code.code is BuiltinOpCodes.PAD: - padding = op.inputs[1].as_array().tolist() - - try: - pad_value = op.inputs[0].quantization["zero_point"][0] - except KeyError: - pad_value = 0 - - input_type = op.inputs[0].type - if ( - np.can_cast(pad_value, input_type.to_numpy_dtype()) - and input_type.sizeof() <= 4 - ): - # match spatial pad only - if len(padding) == 4 and padding[-1] == [0, 0] and padding[0] == [0, 0]: - bytes_per_pixel = input_type.sizeof() * op.inputs[0].shape[3] - return bytes_per_pixel % 4 == 0 - else: - raise ValueError( - f"zero_point is out of bounds for tensor with type {input_type}" - ) - - return False - - def mutate(self, op: Operator) -> Operator: - new_op = op.subgraph.create_operator( - self.new_opcode, inputs=op.inputs, outputs=op.outputs - ) - new_op.subgraph.replace_operator(op, new_op) - - input_type = new_op.inputs[0].type - try: - pad_value = new_op.inputs[0].quantization["zero_point"][0] - except KeyError: - pad_value = 0 - - new_op.add_custom_options( - pad_value=int( - np.frombuffer( - np.full( - 4 // input_type.sizeof(), - pad_value, - dtype=input_type.to_numpy_dtype(), - ).tobytes(), - dtype=np.int32, - ) - ) - ) - - return new_op diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/parallelization_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/parallelization_passes.py deleted file mode 100644 index b4eac01a7..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/parallelization_passes.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from abc import abstractmethod -from typing import Tuple, Optional, Any - -from tflite2xcore.xcore_schema import XCOREOpCodes, Operator -from tflite2xcore.parallelization import ( - ParallelizationPlanner, - SlicePlanner, - ChannelGroupSlicePlanner, - ElementWisePlanner, -) -from tflite2xcore.utils import WORD_SIZE_BITS, WORD_SIZE_BYTES, ACC_PERIOD_INT8 - -from .transformation_passes import OperatorMatchingPass - - -class ParallelizationPass(OperatorMatchingPass): - FIXED_COST_PER_THREAD = 0 - - @property - @abstractmethod - def MATCHING_OPCODES(self) -> Tuple[XCOREOpCodes, ...]: - return tuple() - - def __init__( - self, - *args: Any, - num_threads: Optional[int] = None, - forced: bool = False, - **kwargs: Any - ) -> None: - super().__init__(*args, **kwargs) - num_threads = num_threads or 1 - assert num_threads > 0 - self._planner_args = dict( - forced=forced, - num_threads=num_threads, - fixed_cost_per_thread=self.FIXED_COST_PER_THREAD, - ) - - def match(self, op: Operator) -> bool: - return ( - super().match(op) - and op.operator_code.code in self.MATCHING_OPCODES - and "par" not in op.custom_options - ) - - @property - @abstractmethod - def _planner(self) -> ParallelizationPlanner: - raise NotImplementedError() - - def mutate(self, op: Operator) -> None: - with self.using(op): - op.add_custom_options(par=self._planner.find_optimal_plan().to_dict()) - - -class ParallelizeElementWisePass(ParallelizationPass): - JOB_SIZE_ALIGNMENT = WORD_SIZE_BYTES - - @property - def _num_elements(self) -> int: - return int(np.prod(self._op.outputs[0].shape[1:])) - - @property - def _planner(self) -> ElementWisePlanner: - return ElementWisePlanner( - self._num_elements, alignment=self.JOB_SIZE_ALIGNMENT, **self._planner_args - ) - - -class ParallelizeLUTPass(ParallelizeElementWisePass): - MATCHING_OPCODES = (XCOREOpCodes.XC_lookup_8,) - FIXED_COST_PER_THREAD = 10 - JOB_SIZE_ALIGNMENT = 1 - - -class ParallelizeAddPass(ParallelizeElementWisePass): - MATCHING_OPCODES = (XCOREOpCodes.XC_add_8,) - FIXED_COST_PER_THREAD = 100 - - -class ParallelizeChannelWisePass(ParallelizeElementWisePass): - FIXED_COST_PER_THREAD = 0 - JOB_SIZE_ALIGNMENT = ACC_PERIOD_INT8 - - @property - def _num_elements(self) -> int: - num_channels = self._op.outputs[0].shape[-1] - assert num_channels % WORD_SIZE_BYTES == 0 - return num_channels - - -class ParallelizeGlobalAveragePool2DPass(ParallelizeChannelWisePass): - MATCHING_OPCODES = (XCOREOpCodes.XC_avgpool2d_global,) - - -class ChannelGroupParallelizationPass(ParallelizationPass): - @property - def _planner(self) -> ChannelGroupSlicePlanner: - output_shape = self._op.outputs[0].shape - Cout = np.prod(output_shape[1:]) # works even if output is (1, 1, 1, Cout) - assert output_shape[-1] == Cout - return ChannelGroupSlicePlanner(Cout, **self._planner_args) - - -class SpatialParallelizationPass(ParallelizationPass): - @property - def _cout(self) -> int: - return self._op.outputs[0].shape[3] - - @property - def _planner(self) -> SlicePlanner: - _, height, width, _ = self._op.outputs[0].shape - return SlicePlanner(self._cout, height, width, **self._planner_args) - - -class ParallelizeFullyConnectedPass(ChannelGroupParallelizationPass): - MATCHING_OPCODES = (XCOREOpCodes.XC_fc,) - - -class ParallelizeRequant16To8Pass(ChannelGroupParallelizationPass): - MATCHING_OPCODES = (XCOREOpCodes.XC_requantize_16_to_8,) - - -class ParallelizeConv2dPass(SpatialParallelizationPass): - MATCHING_OPCODES = ( - XCOREOpCodes.XC_conv2d_shallowin, - XCOREOpCodes.XC_conv2d_deep, - XCOREOpCodes.XC_conv2d_1x1, - ) - - -class ParallelizeBConv2dInt8Pass(SpatialParallelizationPass): - MATCHING_OPCODES = ( - XCOREOpCodes.XC_bconv2d_int8, - XCOREOpCodes.XC_bconv2d_int8_DIDO, - ) - - def mutate(self, op: Operator) -> None: - with self.using(op): - par = self._planner.find_optimal_plan().to_dict() - par.pop("cg") - op.add_custom_options(par=par) - - -class ParallelizeBConv2dBinPass(ParallelizeBConv2dInt8Pass): - MATCHING_OPCODES = ( - XCOREOpCodes.XC_bconv2d_bin, - XCOREOpCodes.XC_bconv2d_bin_DI, - ) - - @property - def _cout(self) -> int: - return super()._cout * WORD_SIZE_BITS - - -class ParallelizeDepthwiseConv2dPass(SpatialParallelizationPass): - MATCHING_OPCODES = (XCOREOpCodes.XC_conv2d_depthwise,) - - -class ParallelizePooling2DPass(SpatialParallelizationPass): - MATCHING_OPCODES = (XCOREOpCodes.XC_maxpool2d, XCOREOpCodes.XC_avgpool2d) diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/pooling_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/pooling_passes.py deleted file mode 100644 index 36c544e67..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/pooling_passes.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np - -from tflite2xcore.xcore_schema import ( - ActivationFunctionType, - Padding, - TensorType, - BuiltinOpCodes, - OperatorCode, - XCOREOpCodes, -) -from tflite2xcore.utils import WORD_SIZE_BYTES - -from .transformation_passes import ReplaceQuantizedOperatorPass - - -class ReplacePool2DPass(ReplaceQuantizedOperatorPass): - @property - def _strides(self): - options = self._op.builtin_options - return options["stride_h"], options["stride_w"] - - @property - def _pool_size(self): - options = self._op.builtin_options - return options["filter_height"], options["filter_width"] - - @property - def _padding(self): - return self._op.builtin_options["padding"] - - @property - def _fused_activation(self): - return self._op.builtin_options["fused_activation_function"] - - def match(self, op): - if super().match(op): - with self.using(op): - return ( - self._input.quantization == self._output.quantization - and self._fused_activation is ActivationFunctionType.NONE - and self._input.shape[3] % 4 == 0 - ) - - return False - - def mutate(self, op): - new_op = super().mutate(op) - - with self.using(op): - new_op.add_custom_options(stride=self._strides, pool=self._pool_size) - - -class ReplacePool2D2x2Pass(ReplacePool2DPass): - def match(self, op): - if super().match(op): - with self.using(op): - return ( - self._strides == (2, 2) - and self._pool_size == (2, 2) - and self._input.shape[1] % 2 == 0 - and self._input.shape[2] % 2 == 0 - ) - - return False - - -class ReplaceMaxPool2DPass(ReplacePool2DPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.MAX_POOL_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_maxpool2d) - - def match(self, op): - if super().match(op): - with self.using(op): - return self._padding is Padding.VALID - - return False - - -class ReplaceMaxPool2D2x2Pass(ReplacePool2D2x2Pass): - @property - def matching_opcode(self): - return BuiltinOpCodes.MAX_POOL_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_maxpool2d) - - -class ReplaceAveragePool2DPass(ReplacePool2DPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.AVERAGE_POOL_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_avgpool2d) - - def match(self, op): - if super().match(op): - with self.using(op): - return self._padding is Padding.VALID - - return False - - -class ReplaceAveragePool2D2x2Pass(ReplacePool2D2x2Pass): - @property - def matching_opcode(self): - return BuiltinOpCodes.AVERAGE_POOL_2D - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_avgpool2d) - - -class ReplaceGlobalAveragePool2DPass(ReplaceQuantizedOperatorPass): - @property - def matching_opcode(self): - return BuiltinOpCodes.MEAN - - @property - def new_opcode(self): - return OperatorCode(XCOREOpCodes.XC_avgpool2d_global) - - def match(self, op): - if super().match(op): - with self.using(op): - axis = self._op.inputs[1].as_array().flatten().tolist() - if axis == [1, 2] or axis == [2, 1]: - return self._input.shape[3] % WORD_SIZE_BYTES == 0 - else: - self.logger.warning("Axis is not either [1, 2] or [2, 1]") - - return False - - @property - def _bias_scale_shift(self): - num_pixels = self._input.shape[1] * self._input.shape[2] - rescaling = ( - self._input.quantization["scale"][0] / self._output.quantization["scale"][0] - ) - multiplier = rescaling / num_pixels - - scale = np.round(multiplier * 2 ** (7 - np.ceil(np.log2(multiplier)))) - if scale == 128.0: - scale /= 2 - shift = np.round(np.log2(scale / multiplier)) - bias = np.round( - ( - self._output.quantization["zero_point"][0] - - self._input.quantization["zero_point"][0] * rescaling - + 0.5 # needed because the tflite ref adds 0.5 to the bias - ) - * 2 ** shift - ) - - if shift > 24 or shift < 0: - raise ValueError( - f"Global Average Pool shift must be between 0 and 24, got {shift}." - ) - if scale > 127 or scale < 64: - raise ValueError( - f"Global Average Pool scale must be between 64 and 127, got {scale}." - ) - - return bias.astype(np.int32), scale.astype(np.int8), shift.astype(np.int16) - - def mutate(self, op): - new_op = super().mutate(op) - subgraph = new_op.subgraph - - with self.using(new_op): - # replace reduction_indices tensor with bias_scale_shift - new_op.inputs[1].consumers.remove(new_op) - new_op.inputs[1] = subgraph.create_tensor( - f"{new_op.name}/bias_scale_shift", - TensorType.INT8, - shape=[7], - consumers=[new_op], - ) - new_op.inputs[1].buffer.data = b"".join( - p.tobytes() for p in self._bias_scale_shift - ) - - return new_op diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/quantize_dequantize_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/quantize_dequantize_passes.py deleted file mode 100644 index 6aee1a931..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/quantize_dequantize_passes.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from tflite2xcore.xcore_model import Operator, Tensor -from tflite2xcore.xcore_schema import ( - TensorType, - BuiltinOpCodes, - OperatorCode, - ExternalOpCodes, - ValidOpCodes, -) - -from .transformation_passes import ( - InputTensorMatchingPass, - OutputTensorMatchingPass, - QuantizedOperatorMatchingPass, -) - - -class RemoveRedundantInt8RequantizationPass(QuantizedOperatorMatchingPass): - @property - def matching_opcode(self) -> BuiltinOpCodes: - return BuiltinOpCodes.QUANTIZE - - _PRECEDING_OPCODES = ( - BuiltinOpCodes.CONV_2D, - BuiltinOpCodes.DEPTHWISE_CONV_2D, - BuiltinOpCodes.FULLY_CONNECTED, - BuiltinOpCodes.QUANTIZE, - ) - - def match(self, op: Operator) -> bool: - if super().match(op): - intermediate_tensor = op.inputs[0] - if ( - len(intermediate_tensor.consumers) == 1 - and len(intermediate_tensor.producers) == 1 - and intermediate_tensor not in op.subgraph.outputs - ): - producer_op = intermediate_tensor.producers[0] - return producer_op.operator_code.code in self._PRECEDING_OPCODES - return False - - def mutate(self, op: Operator) -> None: - subgraph = op.subgraph - intermediate_tensor = op.inputs[0] - output_tensor = op.outputs[0] - - producer_op = intermediate_tensor.producers[0] - producer_op.outputs[0] = output_tensor - output_tensor.producers.append(producer_op) - - # it is safe to remove the tensor and the intermediate op - # since the match checked that there are no other consumers - subgraph.remove_operator(op) - subgraph.remove_tensor(intermediate_tensor) - - -# TODO: improve tests for this -class CanonicalizeQuantizedInputPass(QuantizedOperatorMatchingPass): - @property - def matching_opcode(self) -> BuiltinOpCodes: - return BuiltinOpCodes.QUANTIZE - - @property - def matching_input_type(self) -> TensorType: - return TensorType.FLOAT32 - - def match(self, op: Operator) -> bool: - if super().match(op): - input_tensor, output_tensor = op.inputs[0], op.outputs[0] - return ( - input_tensor in op.subgraph.inputs - and len(input_tensor.consumers) == 1 - and output_tensor not in op.subgraph.outputs - ) - - return False - - def mutate(self, op: Operator) -> None: - subgraph = op.subgraph - subgraph.inputs.append(op.outputs[0]) - subgraph.remove_tensor(op.inputs[0]) # DCE doesn't clean up subgraph inputs - subgraph.remove_operator(op) - - -# TODO consider adding tests for this -class CanonicalizeLceQuantizedInputPass(CanonicalizeQuantizedInputPass): - @property - def matching_input_type(self) -> TensorType: - return TensorType.INT8 - - @property - def matching_output_type(self) -> TensorType: - return TensorType.INT32 - - @property - def matching_opcode(self) -> ValidOpCodes: - return ExternalOpCodes.LceQuantize - - -class CanonicalizeQuantizedOutputPass(QuantizedOperatorMatchingPass): - @property - def matching_opcode(self) -> BuiltinOpCodes: - return BuiltinOpCodes.DEQUANTIZE - - @property - def matching_output_type(self) -> TensorType: - return TensorType.FLOAT32 - - def match(self, op: Operator) -> bool: - if super().match(op): - try: - if op.operator_code.code is not self.matching_opcode: - return False - except AttributeError: - return False - - output_tensor = op.outputs[0] - if ( - output_tensor in op.subgraph.outputs - and not output_tensor.consumers - and op.inputs[0] not in op.subgraph.inputs - ): - if len(output_tensor.producers) == 1: - return True - else: - self.logger.warning( - f"Encountered output of removable {self.matching_opcode} " - "with more than one producer." - ) - - return False - - def mutate(self, op: Operator) -> None: - subgraph = op.subgraph - subgraph.outputs.append(op.inputs[0]) - subgraph.remove_tensor(op.outputs[0]) # DCE doesn't clean up subgraph outputs - subgraph.remove_operator(op) - - -# TODO consider adding tests for this -class CanonicalizeLceQuantizedOutputPass(CanonicalizeQuantizedOutputPass): - @property - def matching_input_type(self) -> TensorType: - return TensorType.INT32 - - @property - def matching_opcode(self) -> ValidOpCodes: - return ExternalOpCodes.LceDequantize - - -# TODO: improve tests for this -class LegalizeFloatInputPass(InputTensorMatchingPass): - def match(self, input_tensor: Tensor) -> bool: - return super().match(input_tensor) and input_tensor.type is TensorType.INT8 - - def mutate(self, qin: Tensor) -> None: - subgraph = qin.subgraph - fin = subgraph.create_tensor( - f"{qin.name}_float", TensorType.FLOAT32, qin.shape, isinput=True - ) - subgraph.inputs.remove(qin) - op = subgraph.create_operator( - OperatorCode(BuiltinOpCodes.QUANTIZE), inputs=[fin], outputs=[qin] - ) - # builtin interpreter prefers ops ordered this way - subgraph.operators.remove(op) - subgraph.operators.insert(0, op) - - -# TODO: improve tests for this -class LegalizeFloatOutputPass(OutputTensorMatchingPass): - def match(self, input_tensor: Tensor) -> bool: - return super().match(input_tensor) and input_tensor.type is TensorType.INT8 - - def mutate(self, qout: Tensor) -> None: - subgraph = qout.subgraph - fout = subgraph.create_tensor( - f"{qout.name}_float", TensorType.FLOAT32, qout.shape, isoutput=True - ) - subgraph.outputs.remove(qout) - subgraph.create_operator( - OperatorCode(BuiltinOpCodes.DEQUANTIZE), inputs=[qout], outputs=[fout] - ) diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/renaming_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/renaming_passes.py deleted file mode 100644 index 09670b88c..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/renaming_passes.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from .transformation_passes import OperatorMatchingPass - - -class LegalizeOperatorOutputTensorNamePass(OperatorMatchingPass): - def match(self, op): - if super().match(op): - if len(op.outputs) == 1: - return not op.outputs[0].name.startswith(f"{op.name}/output") - - for j, tensor in enumerate(op.outputs): - candidate_name = f"{op.name}/output_{j}" - if not tensor.name.startswith(candidate_name): - return True - - return False - - def __mutate_tensor_name(self, tensor, candidate_name): - subgraph = tensor.subgraph - if tensor.name != candidate_name: - unique_name = subgraph.make_unique_tensor_name(candidate_name) - - if unique_name is not candidate_name: - self.logger.warning( - f"candidate_name {candidate_name} is already used by " - f"tensor {subgraph.tensors.index(tensor)}, " - f"defaulting to {unique_name}" - ) - - tensor.name = unique_name - - def mutate(self, op): - if len(op.outputs) == 1: - self.__mutate_tensor_name(op.outputs[0], f"{op.name}/output") - else: - for j, tensor in enumerate(op.outputs): - self.__mutate_tensor_name(tensor, f"{op.name}/output_{j}") diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/reshape_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/reshape_passes.py deleted file mode 100644 index 995fd3fd6..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/reshape_passes.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np - -from tflite2xcore.xcore_schema import BuiltinOpCodes -from tflite2xcore.xcore_model import Operator - -from .transformation_passes import OperatorMatchingPass - - -class AdjacentReshapeMatchingPass(OperatorMatchingPass): - @property - def MATCHING_OPCODES(self): - return (BuiltinOpCodes.FULLY_CONNECTED,) - - @property - def _reshape_op(self): - return self._op.inputs[0].producers[0] - - def match(self, op: Operator) -> bool: - if super().match(op) and op.operator_code.code in self.MATCHING_OPCODES: - try: - with self.using(op): - reshape_op = self._reshape_op - except IndexError: - return False - - return ( - reshape_op.operator_code.code is BuiltinOpCodes.RESHAPE - and reshape_op.inputs[0].shape[0] == reshape_op.outputs[0].shape[0] - ) - - return False - - -class RemoveSubsequentReshapePass(AdjacentReshapeMatchingPass): - @property - def _reshape_op(self): - return self._op.outputs[0].consumers[0] - - def match(self, op: Operator) -> bool: - if super().match(op): - with self.using(op): - if len(self._reshape_op.inputs[0].consumers) == 1: - return True - self.logger.warning( - "Subsequent RESHAPE found with more than 1 consumer" - ) - return False - - def mutate(self, op: Operator) -> None: - with self.using(op): - reshape_op = self._reshape_op - - # Remove connection from old output to the anchor op - # then create the new connection - op.outputs[0].producers.remove(op) - op.outputs[0] = reshape_op.outputs[0] - op.outputs[0].producers.append(op) - - op.subgraph.remove_operator(reshape_op) - - -class RemovePrecedingReshapePass(AdjacentReshapeMatchingPass): - def mutate(self, op: Operator) -> None: - reshape_op = op.inputs[0].producers[0] - - # Remove connection from old input to the anchor op - # then create the new connection - op.inputs[0].consumers.remove(op) - op.inputs[0] = reshape_op.inputs[0] - op.inputs[0].consumers.append(op) - - -class CanonicalizeReshapePass(OperatorMatchingPass): - def match(self, op: Operator) -> bool: - if not (super().match(op) and op.operator_code.code is BuiltinOpCodes.RESHAPE): - return False - - try: - if list(op.builtin_options["new_shape"]) != list(op.outputs[0].shape): - raise ValueError( - "new_shape option to RESHAPE doesn't match output tensor shape" - ) - except (KeyError, TypeError): - # in tf2.2 the builtin options seems unused - self.logger.debug( - "Expected new_shape option to RESHAPE was not found " - "(ensure you are running tf2.2 or newer)" - ) - - if -1 in op.inputs[0].shape + op.outputs[0].shape: - self.logger.warning("Dynamically sized tensors are not supported") - return False - - assert np.prod(op.inputs[0].shape) == np.prod( - op.outputs[0].shape - ), "RESHAPE input and output shapes are not consistent" - - # NOTE: we used to check if op.inputs[1] is constant - # However since we neither we or the runtime currently supports - # dynamic shapes, this is disabled for now to enable better - # conversion of certain models (e.g. mobilenet v1) in tf2.3> - return len(op.inputs) == 2 - - def mutate(self, op: Operator) -> None: - # Remove connection between RESHAPE and input tensor[1], the new shape - op.inputs[1].consumers.remove(op) - op.inputs = op.inputs[:1] diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/scratch_memory_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/scratch_memory_passes.py deleted file mode 100644 index 6b2cc89ed..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/scratch_memory_passes.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from abc import abstractmethod -from typing import Tuple - -from tflite2xcore.xcore_schema import XCOREOpCodes, Operator, Tensor, Buffer -from tflite2xcore.parallelization import CHANNEL_GROUP_SIZE -from tflite2xcore.xcore_schema.operator_code import OperatorCode - -from .transformation_passes import OperatorMatchingPass, TensorMatchingPass - - -class ScratchMemoryCalculationPass(OperatorMatchingPass): - @property - def _input(self): - return self._op.inputs[0] - - @property - def _weights(self): - return self._op.inputs[1] - - @property - def _biases(self): - return self._op.inputs[2] - - @property - @abstractmethod - def MATCHING_OPCODES(self) -> Tuple[XCOREOpCodes, ...]: - return tuple() - - def match(self, op: Operator) -> bool: - return ( - super().match(op) - and op.operator_code.code in self.MATCHING_OPCODES - and "mem" not in op.custom_options - ) - - @property - def _bias_scratch_size(self) -> int: - _, Bv, Bl = self._biases.shape - return Bv * Bl * self._biases.type.sizeof() - - @property - @abstractmethod - def _weights_scratch_size(self) -> int: - raise NotImplementedError() - - def mutate(self, op: Operator) -> None: - with self.using(op): - op.add_custom_options( - mem=[self._weights_scratch_size, self._bias_scratch_size] - ) - - -class ScratchMemoryFullyConnectedPass(ScratchMemoryCalculationPass): - MATCHING_OPCODES = (XCOREOpCodes.XC_fc,) - - @property - def _weights_scratch_size(self) -> int: - Cout, Cin = self._weights.shape - - custom_options = self._op.custom_options - if "par" in custom_options: - # NOTE: number of channel groups is at least number of threads - i_cg = custom_options["par"]["th"] - return Cin * (custom_options["par"]["cg"][i_cg - 1][1] + 1) - else: - return Cin * Cout - - @property - def _bias_scratch_size(self) -> int: - _, Bv, Bl = self._biases.shape - - custom_options = self._op.custom_options - if "par" in custom_options: - # NOTE: number of channel groups is at least number of threads - i_cg = custom_options["par"]["th"] - return Bv * Bl * self._biases.type.sizeof() * i_cg - else: - return Bv * Bl * self._biases.type.sizeof() - - -class Conv2dScratchMemoryCalculationPass(ScratchMemoryCalculationPass): - @property - @abstractmethod - def _kernel_size(self) -> Tuple[int, int]: - raise NotImplementedError() - - @property - def _max_channel_group_size(self): - custom_options = self._op.custom_options - if "par" in custom_options: - max_cg_size = max([cg[1] - cg[0] + 1 for cg in custom_options["par"]["cg"]]) - else: - max_cg_size = CHANNEL_GROUP_SIZE - - return max_cg_size - - @property - def _weights_scratch_size(self) -> int: - _, _, _, Cin = self._input.shape - Kh, Kw = self._kernel_size - return Cin * Kh * Kw * self._max_channel_group_size - - -class ScratchMemoryConv2dPass(Conv2dScratchMemoryCalculationPass): - MATCHING_OPCODES = ( - XCOREOpCodes.XC_conv2d_deep, - XCOREOpCodes.XC_conv2d_shallowin, - ) - - @property - def _kernel_size(self) -> Tuple[int, int]: - return self._weights.shape[1:3] - - -class ScratchMemoryDepthwiseConv2dPass(Conv2dScratchMemoryCalculationPass): - MATCHING_OPCODES = (XCOREOpCodes.XC_conv2d_depthwise,) - - @property - def _kernel_size(self) -> Tuple[int, int]: - return self._weights.shape[0:2] - - @property - def _weights_scratch_size(self) -> int: - Kh, Kw = self._kernel_size - return Kh * Kw * self._max_channel_group_size - - -class ScratchMemoryConv2d1x1Pass(Conv2dScratchMemoryCalculationPass): - MATCHING_OPCODES = (XCOREOpCodes.XC_conv2d_1x1,) - - @property - def _kernel_size(self) -> Tuple[int, int]: - return 1, 1 - - -class InsertExternalMemoryFetchPass(TensorMatchingPass): - MATCHING_OPCODES = ( - XCOREOpCodes.XC_bconv2d_bin, - XCOREOpCodes.XC_bconv2d_bin_DI, - XCOREOpCodes.XC_bconv2d_int8, - XCOREOpCodes.XC_bconv2d_int8_DIDO, - ) - - def match(self, tensor: Tensor) -> bool: - if super().match(tensor) and tensor.is_constant and len(tensor.consumers) == 1: - consumer = tensor.consumers[0] - return consumer.operator_code.code in self.MATCHING_OPCODES - return False - - def mutate(self, tensor: Tensor) -> None: - subgraph = tensor.subgraph - - # clone the constant tensor, rename the original, remove old buffer - new_tensor = subgraph.clone_tensor(tensor) - tensor.name = subgraph.make_unique_tensor_name(tensor.name) - tensor.buffer.owners = [] - tensor.buffer = Buffer(tensor.model, owners=[tensor]) - - # create and insert fetch op - fetch_op = subgraph.create_operator( - OperatorCode(XCOREOpCodes.XC_fetch), inputs=[new_tensor], outputs=[tensor] - ) - fetch_op.name = f"{new_tensor.name}/fetch" - subgraph.insert_operator(tensor.consumers[0], fetch_op) diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/transformation_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/transformation_passes.py deleted file mode 100644 index 77e43604c..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/transformation_passes.py +++ /dev/null @@ -1,512 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import logging -import numpy as np -from abc import ABC, abstractmethod -from contextlib import contextmanager -from typing import Any - -from tflite2xcore.xcore_schema import TensorType, OperatorCode, Operator, Buffer -from tflite2xcore.utils import ACC_PERIOD_INT8, format_array - - -class ModelTransformationPass(ABC): - def __init__(self): - self.logger = logging.getLogger(self.__class__.__name__) - - def _sanity_check(self, obj): - if __debug__: - try: - obj.sanity_check() - except AssertionError as e: - self.logger.exception(e) - - @abstractmethod - def run(self, model): - return 0 - - def __str__(self): - return self.__class__.__name__ - - -class SubgraphPass(ModelTransformationPass): - def __init__(self, *args: Any, **kwargs: Any): - super().__init__(*args, **kwargs) - self._subgraph_idx = -1 - self._obj_index = -1 - self._num_matches = 0 - - @abstractmethod - def match(self, obj) -> bool: - return True - - @abstractmethod - def target_iterable(self, subgraph): - pass - - def log_match(self, obj): - self.logger.debug(f"matched {obj}") - - @abstractmethod - def run_subgraph(self, subgraph): - pass - - def run(self, model): - modified_cnt = 0 - for self._subgraph_idx, subgraph in enumerate(model.subgraphs): - self.logger.debug(f"running on subgraph {self._subgraph_idx}") - if self.run_subgraph(subgraph): - modified_cnt += 1 - - self._subgraph_idx = -1 - return modified_cnt - - -class SubgraphAnalysisPass(SubgraphPass): - def run_subgraph(self, subgraph): - self._num_matches = 0 - for self._obj_index, obj in enumerate(self.target_iterable(subgraph)): - if self.match(obj): - self._num_matches += 1 - self.log_match(obj) - self._sanity_check(obj) - return 0 - - -class SubgraphTransformationPass(SubgraphPass): - @abstractmethod - def mutate(self, obj): - pass - - def run_subgraph(self, subgraph): - self._num_matches = 0 - while True: - for self._obj_index, obj in enumerate(self.target_iterable(subgraph)): - if self.match(obj): - self._num_matches += 1 - self.log_match(obj) - self._sanity_check(obj) - self.mutate(obj) - self._sanity_check(subgraph) - break - else: - self._obj_index = -1 - return self._num_matches - - -class OperatorMatchingPass(SubgraphTransformationPass): - _op: Operator - - def __init__(self, *args: Any, **kwargs: Any): - super().__init__(*args, **kwargs) - self._op = None - - def target_iterable(self, subgraph): - return subgraph.operators - - @contextmanager - def using(self, op): - self._op, original_op = op, self._op - yield - self._op = original_op - - def log_match(self, op): - super().log_match(f"operator [{self._obj_index}]: {op.operator_code}") - - -class TensorMatchingPass(SubgraphTransformationPass): - def target_iterable(self, subgraph): - return subgraph.tensors - - def log_match(self, tensor): - super().log_match(f"tensor [{self._obj_index}]: {tensor.name}") - - -class BufferMatchingPass(ModelTransformationPass): - def __init__(self, *args: Any, **kwargs: Any): - super().__init__(*args, **kwargs) - self._buffer_idx = -1 - - @abstractmethod - def match(self, buffer): - return True - - @abstractmethod - def mutate(self, buffer): - pass - - def log_match(self, buffer): - self.logger.debug( - f"matched buffer [{self._buffer_idx}] of length " - f"{len(buffer)} with {len(buffer.owners)} owners" - ) - - def run(self, model): - modified_cnt = 0 - while True: - for self._buffer_idx, buffer in enumerate(model.buffers): - if self.match(buffer): - self.log_match(buffer) - modified_cnt += 1 - - self._sanity_check(buffer) - self.mutate(buffer) - self._sanity_check(model) - break - else: - self._buffer_idx = -1 - return modified_cnt - - -# TODO: add tests -class CanonicalizeEmptyBuffersPass(ModelTransformationPass): - def run(self, model): - if model.buffers: - sentinel = model.buffers[0] - if not sentinel: # buffer 0 has to be empty - for tensor in sentinel.owners: - tensor.buffer = Buffer(model) - tensor.buffer.owners.append(tensor) - del model.buffers[0] - return 1 - return 0 - - -class InputTensorMatchingPass(SubgraphTransformationPass): - def target_iterable(self, subgraph): - return subgraph.inputs - - def log_match(self, tensor): - super().log_match(f"input [{self._obj_index}]: {tensor.name}") - - -class OutputTensorMatchingPass(SubgraphTransformationPass): - def target_iterable(self, subgraph): - return subgraph.outputs - - def log_match(self, tensor): - super().log_match(f"output [{self._obj_index}]: {tensor.name}") - - -class QuantizedOperatorMatchingPass(OperatorMatchingPass): - @property - def _output(self): - return self._op.outputs[0] - - @property - def _input(self): - return self._op.inputs[0] - - @property - def _input_zero_point(self): - return int(self._input.quantization["zero_point"][0]) - - @property - def _output_zero_point(self): - return int(self._output.quantization["zero_point"][0]) - - @property - @abstractmethod - def matching_opcode(self): - raise NotImplementedError() - - @property - def matching_input_type(self) -> TensorType: - return TensorType.INT8 - - @property - def matching_output_type(self) -> TensorType: - return TensorType.INT8 - - def match(self, op): - if super().match(op) and op.operator_code.code is self.matching_opcode: - with self.using(op): - return ( - self._input.type is self.matching_input_type - and self._output.type is self.matching_output_type - ) - return False - - -class ReplaceQuantizedOperatorPass(QuantizedOperatorMatchingPass): - @property - @abstractmethod - def new_opcode(self) -> OperatorCode: - raise NotImplementedError() - - def mutate(self, op): - new_op = op.subgraph.create_operator( - self.new_opcode, inputs=op.inputs, outputs=op.outputs - ) - new_op.subgraph.replace_operator(op, new_op) - return new_op - - -class ReplaceQuantizedWeightBiasOperatorPass(ReplaceQuantizedOperatorPass): - @property - def _weights(self): - return self._op.inputs[1] - - @property - def _biases(self): - return self._op.inputs[2] - - @property - def matching_biases_type(self) -> TensorType: - return TensorType.INT32 - - @property - def matching_weights_type(self) -> TensorType: - return TensorType.INT8 - - def _match_non_weight_inputs(self) -> bool: - try: - return ( - self._biases.type is self.matching_biases_type - and self._biases.is_constant - and self._biases not in self._op.subgraph.outputs - ) - except IndexError: - # if bias is missing, the operator should match - return True - - def match(self, op): - with self.using(op): - return ( - super().match(op) - and self._weights.type is self.matching_weights_type - # NOTE: the current implementations don't allow mutating ops - # if one of the parameter tensors is an output or not constant - and self._weights.is_constant - and self._weights not in op.subgraph.outputs - and self._match_non_weight_inputs() - ) - - -class ReplaceXCWeightBiasOperatorPass(ReplaceQuantizedWeightBiasOperatorPass): - def mutate(self, op): - new_op = super().mutate(op) - new_op.add_custom_options(illegal_params=True) - return new_op - - -# TODO: refactor properties -class LegalizeWeightBiasPass(QuantizedOperatorMatchingPass): - @property - def _biases(self): - return self._op.inputs[2] - - @property - def _weights(self): - return self._op.inputs[1] - - @abstractmethod - def mutate_biases(self, op): - pass - - @abstractmethod - def mutate_weights(self, op): - pass - - def _replace_weights(self, arr) -> None: - # create and populate new weight tensor - subgraph = self._op.subgraph - new_weights = subgraph.create_tensor( - f"{self._op.name}/weights", - TensorType.from_numpy_dtype(arr.dtype), - arr.shape, - consumers=[self._op], - ) - new_weights.buffer.data = arr - - # replace old tensor - self._weights.consumers.remove(self._op) - self._op.inputs[1] = new_weights - - def match(self, op) -> bool: - if super().match(op) and "illegal_params" in op.custom_options: - return op.custom_options["illegal_params"] - return False - - def mutate(self, op): - # NOTE: the order of these mutations is strict - self.mutate_biases(op) - self.mutate_weights(op) - op.custom_options.pop("illegal_params") - return op - - -class LegalizeXCWeightBiasPass(LegalizeWeightBiasPass): - def _multiplier(self): - output_scale = self._output.quantization["scale"][0] - bias_scale = np.array(self._biases.quantization["scale"]) - return bias_scale / output_scale - - @abstractmethod - def _zero_point_bias(self): - pass - - def _unified_bias(self): - arr_64 = self._biases.as_array(np.int64) - self._zero_point_bias().astype( - np.int64 - ) - arr_32 = np.clip(arr_64, -(2 ** 31), 2 ** 31 - 1).astype(np.int32) - if np.any(arr_32 != arr_64): - self.logger.warning("_unified_bias saturated 32 bit!") - return arr_32 - - @staticmethod - def __pad_to_acc_period(arr): - pad = ACC_PERIOD_INT8 - 1 - (arr.shape[0] - 1) % ACC_PERIOD_INT8 - return np.pad(arr, pad_width=[(0, pad)]) - - def _bias_arr(self): - # calculate bias values with the effect of quantization changes - bias = self._unified_bias() - - # zero pad and reshape - bias = self.__pad_to_acc_period(bias) - - # splitting lower and upper 16 bits of each 32 bit value - tmp_shape = (bias.shape[0] // ACC_PERIOD_INT8, ACC_PERIOD_INT8, -1) - new_bias = np.frombuffer(bias.flatten().tobytes(), dtype=np.int16).reshape( - tmp_shape - ) - return np.stack([new_bias[:, :, 1], new_bias[:, :, 0]], axis=1) - - @property - def _SHIFT_ADJUSTMENT(self): - # NOTE: If we would not need to add the offset separately, the intermediate - # could never saturate, and this value would be 8. But decreasing to 7 - # means that we get an extra bit of headroom in the intermediate. - # TODO: investigate if this could be calculated/estimated from the parameters - return 7 - - def _shift_scale(self): - multiplier = self._multiplier() - # NOTE: VLMUL expects one factor in Q2.14 - # we have 1 <= scale < 2 represented in Q2.14 - - multiplier_mask = multiplier != 0 - rshift = np.full(multiplier.shape, 16) - rshift[multiplier_mask] = -np.ceil(np.log2(multiplier[multiplier_mask])) + 1 - scale = np.full(multiplier.shape, 2 ** 15 - 1) - scale[multiplier_mask] = np.round( - multiplier[multiplier_mask] * 2 ** (14 + rshift[multiplier_mask]) - ) - - for j in range(len(scale)): - if scale[j] == 2 ** 15: - rshift[j] -= 1 - scale[j] /= 2 - # we are using 16 bits instead of 8 so we need to adjust the shift - rshift[j] -= self._SHIFT_ADJUSTMENT - - bias_size = np.prod(self._biases.shape) - if len(scale) == 1: - rshift = np.repeat(rshift, bias_size) - scale = np.repeat(scale, bias_size) - rshift, scale = np.int16(rshift), np.int16(scale) - if rshift.shape != scale.shape: - raise ValueError( - f"Shift and scale shapes don't match: {rshift.shape} != {scale.shape}" - ) - return rshift, scale - - @property - def _OUTPUT_BITS(self): - return 8 - - @property - def _MAX_POST_SHIFT(self): - return 22 + self._SHIFT_ADJUSTMENT - self._OUTPUT_BITS - - def _scale_offset_arr(self): - # calculate right shift/scale - rshift, scale = self._shift_scale() - - # zero pad and reshape into appropriate array - rshift = self.__pad_to_acc_period(rshift) - scale = self.__pad_to_acc_period(scale) - - # split left and right shift into pre and post scaling shifts - shift_pre = np.maximum(rshift, 0) - shift_post = self._MAX_POST_SHIFT * np.ones( - rshift.shape, dtype=rshift.dtype - ) + np.minimum(rshift, 0) - if np.any(shift_post.flatten() < 0): - raise ValueError( - "Negative shift_post encountered: " f"{format_array(shift_post)}" - ) - - # calculate offset - raw_offset = ( - np.float64(self._output_zero_point) - * 2 ** shift_post.astype(np.float64) - * 2 ** (self._OUTPUT_BITS - 8) - ).flatten() - - offset_scale = np.round(np.sqrt(np.abs(raw_offset))).astype(np.int16) - offset = np.zeros(offset_scale.shape, dtype=offset_scale.dtype) - pos_ind = offset_scale > 0 - offset[pos_ind] = np.round(raw_offset[pos_ind] / offset_scale[pos_ind]).astype( - np.int16 - ) - - new_shape = (-1, ACC_PERIOD_INT8) - return np.stack( - [ - shift_pre.reshape(new_shape), - scale.reshape(new_shape), - offset_scale.reshape(new_shape), - offset.reshape(new_shape), - shift_post.reshape(new_shape), - ], - axis=1, - ) - - def _bso_arr(self): - return np.concatenate([self._bias_arr(), self._scale_offset_arr()], axis=1) - - def __add_const_zero_bias(self): - out_channels = self._output.shape[-1] - input_scale = self._input.quantization["scale"][0] - new_biases = self._op.subgraph.create_tensor( - f"{self._op.name}/const_zero_bias", - TensorType.INT32, - shape=(out_channels,), - consumers=[self._op], - quantization={ - "scale": [ - input_scale * weight_scale - for weight_scale in self._weights.quantization["scale"] - ], - "zero_point": [0] * out_channels, - }, - ) - new_biases.buffer.data = np.zeros(new_biases.shape, dtype=np.int32) - self._op.inputs.append(new_biases) - - def mutate_biases(self, op): - with self.using(op): - try: - self._biases - except IndexError: - self.__add_const_zero_bias() - - # calculate the bias/scale/offset tensor - bso = self._bso_arr() - - # create and populate new bias tensor - new_biases = self._op.subgraph.create_tensor( - f"{self._op.name}/bias_shift_scale", - TensorType.INT16, - bso.shape, - consumers=[self._op], - ) - new_biases.buffer.data = bso - - # replace old tensor - self._biases.consumers.remove(self._op) - self._op.inputs[2] = new_biases diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/warning_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/warning_passes.py deleted file mode 100644 index 3d70214c8..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/warning_passes.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from typing import Iterable - -from tflite2xcore.xcore_model import Tensor, Subgraph -from tflite2xcore.xcore_schema import TensorType - -from .transformation_passes import SubgraphAnalysisPass - - -class FloatingPointWarningPass(SubgraphAnalysisPass): - def match(self, tensor: Tensor) -> bool: - return super().match(tensor) and tensor.type in ( - TensorType.FLOAT64, - TensorType.FLOAT32, - TensorType.FLOAT16, - ) - - def target_iterable(self, subgraph: Subgraph) -> Iterable[Tensor]: - return subgraph.tensors - - def log_match(self, tensor: Tensor) -> None: - self.logger.info(f"Floating Point Tensor: {tensor}") - - def run_subgraph(self, subgraph: Subgraph) -> int: - super().run_subgraph(subgraph) - if self._num_matches: - self.logger.warning(f"Floating Point Tensors Found: {self._num_matches}") - return 0 diff --git a/archived/tflite2xcore/tflite2xcore/transformation_passes/word_alignment_passes.py b/archived/tflite2xcore/tflite2xcore/transformation_passes/word_alignment_passes.py deleted file mode 100644 index 163678cbc..000000000 --- a/archived/tflite2xcore/tflite2xcore/transformation_passes/word_alignment_passes.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from copy import deepcopy - -from tflite2xcore.xcore_schema import TensorType, BuiltinOpCodes, OperatorCode - -from .transformation_passes import QuantizedOperatorMatchingPass - - -class CanonicalizeConv2DInputChannels(QuantizedOperatorMatchingPass): - @property - def _weights(self): - return self._op.inputs[1] - - @property - def _biases(self): - return self._op.inputs[2] - - @property - def matching_opcode(self): - return BuiltinOpCodes.CONV_2D - - def match(self, op): - if super().match(op): - with self.using(op): - input_shape = self._input.shape - return ( - len(input_shape) == 4 - and input_shape[-1] % 4 - # NOTE: the current implementation doesn't allow mutating - # if the weight tensor is an output or not a constant - and self._weights.is_constant - and self._weights not in op.subgraph.outputs - ) - return False - - def mutate(self, op): - subgraph = op.subgraph - - with self.using(op): - old_weight_tensor = self._weights - old_shape = old_weight_tensor.shape - new_shape = [*old_shape[:3], int(4 * np.ceil(old_shape[3] / 4))] - pad_size = new_shape[3] - old_shape[3] - - pads = [[0, 0], [0, 0], [0, 0], [0, pad_size]] - - # create new zero padded kernel tensor - # TODO: this could be done better if we had constant folding, by - # adding an appropriate padding op between the original and - # the new weights, and let it be folded later. - # (this would also work if the weight/bias is an input/output) - new_weight_tensor = subgraph.create_tensor( - f"{self._op.name}/weights", - old_weight_tensor.type, - new_shape, - quantization=old_weight_tensor.quantization, - consumers=[self._op], - ) - new_weight_tensor.buffer.data = np.pad(self._weights.as_array(), pads) - - # rewire old and new kernel tensors - old_weight_tensor.consumers.remove(self._op) - self._op.inputs[1] = new_weight_tensor - - # create new channel-wise padding operator - old_input = self._input - pad_op = subgraph.create_operator( - OperatorCode(BuiltinOpCodes.PAD), inputs=[old_input] - ) - subgraph.insert_operator(self._op, pad_op) - old_input.consumers.remove(self._op) - - # create paddings tensor and connect to op - paddings_tensor = subgraph.create_tensor( - f"{pad_op.name}/paddings", - TensorType.INT32, - shape=[4, 2], - consumers=[pad_op], - ) - paddings_tensor.buffer.data = np.int32(pads) - pad_op.inputs.append(paddings_tensor) - - # create intermediate tensor and wire up to conv and pad ops - self._op.inputs[0] = subgraph.create_tensor( - f"{self._op.name}/xc_padded_input", - TensorType.INT8, - shape=[*old_input.shape[:3], new_shape[3]], - producers=[pad_op], - consumers=[self._op], - quantization=deepcopy(old_input.quantization), - ) - pad_op.outputs.append(self._op.inputs[0]) diff --git a/archived/tflite2xcore/tflite2xcore/utils.py b/archived/tflite2xcore/tflite2xcore/utils.py deleted file mode 100644 index faf49a889..000000000 --- a/archived/tflite2xcore/tflite2xcore/utils.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import os -import re -import random -import argparse -import logging -import numpy as np -import tensorflow as tf -from math import log2, ceil -from functools import wraps -from types import TracebackType -from typing import ( - Union, - Optional, - Dict, - Any, - TypeVar, - Callable, - cast, - Type, - NamedTuple, - Iterator, - List, - Tuple, -) - -# ----------------------------------------------------------------------------- -# WIDELY USED TYPES AND TYPING HELPERS -# ----------------------------------------------------------------------------- - -_T = TypeVar("_T") - - -def asserting_cast(t: Type[_T], x: Any) -> _T: - """WARNING: this function should be used sparingly!""" - assert isinstance(x, t), type(x) - return x - - -class QuantizationTuple(NamedTuple): - scale: float - zero_point: int - - -TFLiteModel = Union[bytes, bytearray] -PaddingTuple = Tuple[Tuple[int, int], ...] -ShapeTuple = Tuple[int, ...] - - -# ----------------------------------------------------------------------------- -# XCORE MAGIC NUMBERS -# ----------------------------------------------------------------------------- - -ACC_PERIOD_INT8 = 16 - -WORD_SIZE_BYTES = 4 -WORD_SIZE_BITS = WORD_SIZE_BYTES * 8 -VECTOR_SIZE_WORDS = 8 -VECTOR_SIZE_BYTES = VECTOR_SIZE_WORDS * 4 -VECTOR_SIZE_BITS = VECTOR_SIZE_BYTES * 8 - -# ----------------------------------------------------------------------------- -# REPRODUCIBILITY -# ----------------------------------------------------------------------------- - -DEFAULT_SEED = 123 - - -def set_all_seeds(seed: int = DEFAULT_SEED) -> None: - tf.random.set_seed(seed) - np.random.seed(seed) - random.seed(seed) - - -def set_gpu_usage(use_gpu: bool, verbose: Union[bool, int]) -> None: - # can throw annoying error if CUDA cannot be initialized - try: - default_log_level = os.environ["TF_CPP_MIN_LOG_LEVEL"] - if not verbose: - os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - gpus = tf.config.experimental.list_physical_devices("GPU") - os.environ["TF_CPP_MIN_LOG_LEVEL"] = default_log_level - except KeyError: - gpus = tf.config.experimental.list_physical_devices("GPU") - - if gpus: - if use_gpu: - for gpu in gpus: - tf.config.experimental.set_memory_growth(gpu, enable=True) - else: - logging.info("GPUs disabled.") - tf.config.experimental.set_visible_devices([], "GPU") - elif use_gpu: - logging.warning("No available GPUs found, defaulting to CPU.") - logging.debug(f"Eager execution enabled: {tf.executing_eagerly()}") - - -# ----------------------------------------------------------------------------- -# LOGGING & STRING FORMATTING -# ----------------------------------------------------------------------------- - - -def set_verbosity(verbosity: int = 0) -> None: - verbosities = [logging.WARNING, logging.INFO, logging.DEBUG] - verbosity = min(verbosity, len(verbosities) - 1) - - logging.basicConfig(level=verbosities[verbosity]) - if not verbosity: - logging.getLogger("tensorflow").setLevel(logging.ERROR) - - -class VerbosityParser(argparse.ArgumentParser): - def __init__( - self, - *args: Any, - verbosity_config: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> None: - self.logger = logging.getLogger(self.__class__.__name__) - kwargs.setdefault("formatter_class", argparse.ArgumentDefaultsHelpFormatter) - kwargs.setdefault("conflict_handler", "resolve") - super().__init__(*args, **kwargs) - - verbosity_config = verbosity_config or dict() - verbosity_config.setdefault("action", "count") - verbosity_config.setdefault("default", 0) - verbosity_config.setdefault( - "help", - "Set verbosity level. " - "-v: summary info of mutations; -vv: detailed mutation and debug info.", - ) - self.add_argument("-v", "--verbose", **verbosity_config) - - def parse_args(self, *args, **kwargs): # type: ignore - args = super().parse_args(*args, **kwargs) - set_verbosity(args.verbose) # type: ignore - set_gpu_usage(args.use_gpu if hasattr(args, "use_gpu") else False, args.verbose) # type: ignore - return args - - -def snake_to_camel(word: str) -> str: - output = "".join(x.capitalize() or "_" for x in word.split("_")) - return output[0].lower() + output[1:] - - -def camel_to_snake(name: str) -> str: - name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower() - - -def format_array(arr: np.ndarray, style: str = "") -> str: - msg = f"numpy.ndarray, shape={arr.shape}, dtype={arr.dtype}:\n" - if style.endswith("_scale_offset_arr"): - msg += f"shift_pre:\n{arr[:, 0]}\n" - msg += f"scale:\n{arr[:, 1]}\n" - msg += f"offset_scale:\n{arr[:, 2]}\n" - msg += f"offset:\n{arr[:, 3]}\n" - msg += f"shift_post:\n{arr[:, 4]}" - else: - msg += f"{arr}" - return msg + "\n" - - -_RT = TypeVar("_RT") -_DecoratedFunc = TypeVar("_DecoratedFunc", bound=Callable[..., _RT]) - - -def log_method_output( - level: int = logging.DEBUG, logger: Optional[logging.Logger] = None -) -> Callable[[_DecoratedFunc], _DecoratedFunc]: - def _log_method_output(func: _DecoratedFunc) -> _DecoratedFunc: - @wraps(func) - def wrapper(self: Any, *args: Any, **kwargs: Any) -> _RT: - try: - logger = logger or self.logger - except AttributeError: - logger = logging.getLogger() - - out: _RT = func(self, *args, **kwargs) - msg = f"{func.__name__} output:\n" - if isinstance(out, np.ndarray): - msg += format_array(out, func.__name__) - else: - msg += f"{out}\n" - - logger.log(level, msg) - return out - - return cast(_DecoratedFunc, wrapper) - - return _log_method_output - - -class LoggingContext: - def __init__( - self, - logger: logging.Logger, - level: Optional[int] = None, - handler: Optional[logging.Handler] = None, - close: bool = True, - ) -> None: - self.logger = logger - self.level = level - self.handler = handler - self.close = close - - def __enter__(self) -> None: - if self.level is not None: - self.old_level = self.logger.level - self.logger.setLevel(self.level) - if self.handler: - self.logger.addHandler(self.handler) - - def __exit__( - self, - exception_type: Optional[Type[BaseException]], - exception_value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> None: - if self.level is not None: - self.logger.setLevel(self.old_level) - if self.handler: - self.logger.removeHandler(self.handler) - if self.handler and self.close: - self.handler.close() - - -# ----------------------------------------------------------------------------- -# BINARY OPERATION HELPERS -# ----------------------------------------------------------------------------- - - -def unpack_bits(arr: np.ndarray) -> np.ndarray: - assert arr.dtype == np.int32 - unpacked_shape = (*arr.shape[:-1], arr.shape[-1] * WORD_SIZE_BITS) - return np.unpackbits( # pylint: disable=no-member - np.frombuffer(arr.tobytes(), dtype=np.uint8) - ).reshape(unpacked_shape) - - -def xor_popcount(a: np.ndarray, b: np.ndarray) -> int: - assert a.dtype == b.dtype == np.int32 - return np.count_nonzero(unpack_bits(np.bitwise_xor(a, b))) # type: ignore - - -def clrsb(a: int, bitwidth: int = 32) -> int: - """ counts leading redundant sign bits """ - return bitwidth - ceil(log2(abs(a))) - 1 - - -# ----------------------------------------------------------------------------- -# QUANTIZATION HELPERS -# ----------------------------------------------------------------------------- - - -def quantize( - arr: np.ndarray, - scale: float, - zero_point: int, - dtype: Union[type, "np.dtype"] = np.int8, -) -> np.ndarray: - t = np.round(np.float32(arr) / np.float32(scale)).astype(np.int32) + zero_point - return np.clip(t, np.iinfo(dtype).min, np.iinfo(dtype).max).astype(dtype) - - -def dequantize(arr: np.ndarray, scale: float, zero_point: int) -> np.ndarray: - return np.float32(arr.astype(np.int32) - np.int32(zero_point)) * np.float32(scale) - - -# ----------------------------------------------------------------------------- -# MODEL CONVERSION AND EVALUATION HELPERS -# ----------------------------------------------------------------------------- - - -def quantize_converter( - converter: tf.lite.TFLiteConverter, - representative_data: Union[tf.Tensor, np.ndarray], - *, - show_progress_step: int = 0, -) -> None: - converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.inference_input_type = tf.int8 - converter.inference_output_type = tf.int8 - x_train_ds = tf.data.Dataset.from_tensor_slices(representative_data).batch(1) - - def representative_data_gen() -> Iterator[List[tf.Tensor]]: - for j, input_value in enumerate(x_train_ds.take(representative_data.shape[0])): - if show_progress_step and (j + 1) % show_progress_step == 0: - logging.info( - "Converter quantization processed examples " - f"{j+1:6d}/{representative_data.shape[0]}" - ) - yield [input_value] - - converter.representative_dataset = representative_data_gen - - -def apply_interpreter_to_examples( - interpreter: tf.lite.Interpreter, - examples: Union[tf.Tensor, np.ndarray], - *, - interpreter_input_ind: Optional[int] = None, - interpreter_output_ind: Optional[int] = None, - show_progress_step: int = 0, - show_pid: bool = False, -) -> np.ndarray: - interpreter.allocate_tensors() - if interpreter_input_ind is None: - interpreter_input_ind = interpreter.get_input_details()[0]["index"] - if interpreter_output_ind is None: - interpreter_output_ind = interpreter.get_output_details()[0]["index"] - - outputs = [] - for j, x in enumerate(examples): - if show_progress_step and (j + 1) % show_progress_step == 0: - if show_pid: - logging.info( - f"(PID {os.getpid()}) Evaluated examples {j+1:6d}/{examples.shape[0]}" - ) - else: - logging.info(f"Evaluated examples {j+1:6d}/{examples.shape[0]}") - interpreter.set_tensor(interpreter_input_ind, np.expand_dims(x, 0)) - interpreter.invoke() - y = interpreter.get_tensor(interpreter_output_ind) - outputs.append(y) - - return np.vstack(outputs) if isinstance(examples, np.ndarray) else outputs - - -def quantize_keras_model( - model: tf.keras.Model, - representative_data: Union[tf.Tensor, np.ndarray], - show_progress_step: int = 0, -) -> TFLiteModel: - converter = tf.lite.TFLiteConverter.from_keras_model(model) - quantize_converter( - converter, representative_data, show_progress_step=show_progress_step - ) - return converter.convert() # type: ignore - - -# ----------------------------------------------------------------------------- -# SHAPE COMPUTATION HELPERS -# ----------------------------------------------------------------------------- - - -def _calculate_valid_output_size(in_size: int, stride: int, k_dim: int) -> int: - assert in_size >= k_dim - return ceil((in_size - k_dim + 1) / stride) - - -def calculate_valid_output_size( - input_size: ShapeTuple, strides: ShapeTuple, kernel_size: ShapeTuple -) -> ShapeTuple: - return tuple( - _calculate_valid_output_size(*t) for t in zip(input_size, strides, kernel_size) - ) - - -def _calculate_same_output_size(in_size: int, stride: int) -> int: - return ceil(in_size / stride) - - -def calculate_same_output_size( - input_size: ShapeTuple, strides: ShapeTuple -) -> ShapeTuple: - return tuple(_calculate_same_output_size(*t) for t in zip(input_size, strides)) - - -def calculate_same_padding( - input_size: ShapeTuple, strides: ShapeTuple, kernel_size: ShapeTuple -) -> PaddingTuple: - def calc_axis_pad(in_size: int, stride: int, k_dim: int) -> Tuple[int, int]: - out_size = _calculate_same_output_size(in_size, stride) - total_pad = max((out_size - 1) * stride + k_dim - in_size, 0) - pad_start = total_pad // 2 - return (pad_start, total_pad - pad_start) - - return tuple(calc_axis_pad(*t) for t in zip(input_size, strides, kernel_size)) - - -def get_bitpacked_shape(shape: Tuple[int, ...]) -> Tuple[int, ...]: - channels = shape[-1] - assert channels % WORD_SIZE_BITS == 0 - return (*shape[:-1], channels // WORD_SIZE_BITS) - - -def get_unpacked_shape(shape: Tuple[int, ...]) -> Tuple[int, ...]: - return (*shape[:-1], shape[-1] * WORD_SIZE_BITS) diff --git a/archived/tflite2xcore/tflite2xcore/version.py b/archived/tflite2xcore/tflite2xcore/version.py deleted file mode 100644 index 5c231f2c9..000000000 --- a/archived/tflite2xcore/tflite2xcore/version.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -from contextlib import suppress - - -def get_version() -> str: - with suppress(Exception): - try: - # use setuptools_scm if installed - # setuptools_scm will append commit info the base version number - from setuptools_scm import get_version - - return get_version( - root="../..", relative_to=__file__, version_scheme="post-release" - ) - except ImportError: - # fall back to the builtin importlib_metadata module - # importlib_metadata returns the version number in the package metadata - from importlib_metadata import version - - try: - return version(__name__) - except: - return "Unable to determine version from package." diff --git a/archived/tflite2xcore/tflite2xcore/xcore_model.py b/archived/tflite2xcore/tflite2xcore/xcore_model.py deleted file mode 100644 index a976a3ee4..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_model.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from tflite2xcore.xcore_schema import * diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/README.md b/archived/tflite2xcore/tflite2xcore/xcore_schema/README.md deleted file mode 100644 index 115eb1d42..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Generating schema_py_generated.py - -## Install flatbuffers version 1.12.0 or newer - -Pull/Clone & checkout release branch - -Build latests flatc - - > mkdir buld - > cd build - > cmake ../ - > make install - -## Generate schema - -Run - - > ./flatc --python --gen-mutable --gen-object-api --no-includes path/to/schema.fbs - -Concat the generated .py files into a single module - - > find ./tflite -name '*.py' -exec cat {} + | sed '/import flatbuffers/d' | sed '/from flatbuffers.compat import import_numpy/d' | sed '/np = import_numpy()/d' | sed '/# namespace: tflite/d' | sed '/# automatically generated by the FlatBuffers compiler, do not modify/d' > schema_py_generated.py - -Add the following lines to schema_py_generated.py - - import flatbuffers - import numpy as np diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/__init__.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/__init__.py deleted file mode 100644 index 55504c46f..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from . import flexbuffers -from .tensor_type import TensorType -from .op_codes import BuiltinOpCodes, ExternalOpCodes, XCOREOpCodes -from .misc_enums import ( - QuantizationDetails, - ActivationFunctionType, - FullyConnectedOptionsWeightsFormat, - Padding, -) -from .builtin_options import BuiltinOptions - -from .ir_object import _IRObject, _ModelDependent, _SubgraphDependent -from .operator_code import OperatorCode, ValidOpCodes, CustomOpCodes -from .data_container import Buffer, Metadata, _DataContainer -from .operator import _OpOptionsType, Operator -from .tensor import Tensor, _ShapeInputType -from .subgraph import Subgraph - - -from .xcore_model import XCOREModel - -from . import xcore_model diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.py deleted file mode 100644 index d2669601f..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -# type: ignore - -from enum import Enum - -from . import schema_py_generated as schema, BuiltinOpCodes - -BuiltinOptions = Enum( - "BuiltinOptions", - {k: v for k, v in vars(schema.BuiltinOptions).items() if not k.startswith("__")}, -) - -# this mapping should follow the schema and: -# tensorflow/tensorflow/lite/core/api/flatbuffer_conversions.cc -__BuiltinOptions_from_BuiltinOpCodes = { - BuiltinOpCodes.ADD: BuiltinOptions.AddOptions, - BuiltinOpCodes.AVERAGE_POOL_2D: BuiltinOptions.Pool2DOptions, - BuiltinOpCodes.CONCATENATION: BuiltinOptions.ConcatenationOptions, - BuiltinOpCodes.CONV_2D: BuiltinOptions.Conv2DOptions, - BuiltinOpCodes.DEPTHWISE_CONV_2D: BuiltinOptions.DepthwiseConv2DOptions, - BuiltinOpCodes.DEPTH_TO_SPACE: BuiltinOptions.DepthToSpaceOptions, - BuiltinOpCodes.DEQUANTIZE: BuiltinOptions.DequantizeOptions, - BuiltinOpCodes.EMBEDDING_LOOKUP: BuiltinOptions.NONE, - BuiltinOpCodes.FLOOR: BuiltinOptions.NONE, - BuiltinOpCodes.FULLY_CONNECTED: BuiltinOptions.FullyConnectedOptions, - BuiltinOpCodes.HASHTABLE_LOOKUP: BuiltinOptions.NONE, - BuiltinOpCodes.L2_NORMALIZATION: BuiltinOptions.L2NormOptions, - BuiltinOpCodes.L2_POOL_2D: BuiltinOptions.Pool2DOptions, - BuiltinOpCodes.LOCAL_RESPONSE_NORMALIZATION: BuiltinOptions.LocalResponseNormalizationOptions, - BuiltinOpCodes.LOGISTIC: BuiltinOptions.NONE, - BuiltinOpCodes.LSH_PROJECTION: BuiltinOptions.LSHProjectionOptions, - BuiltinOpCodes.LSTM: BuiltinOptions.LSTMOptions, - BuiltinOpCodes.MAX_POOL_2D: BuiltinOptions.Pool2DOptions, - BuiltinOpCodes.MUL: BuiltinOptions.MulOptions, - BuiltinOpCodes.RELU: BuiltinOptions.NONE, - BuiltinOpCodes.RELU_N1_TO_1: BuiltinOptions.NONE, - BuiltinOpCodes.RELU6: BuiltinOptions.NONE, - BuiltinOpCodes.RESHAPE: BuiltinOptions.ReshapeOptions, - BuiltinOpCodes.RESIZE_BILINEAR: BuiltinOptions.ResizeBilinearOptions, - BuiltinOpCodes.RNN: BuiltinOptions.RNNOptions, - BuiltinOpCodes.SOFTMAX: BuiltinOptions.SoftmaxOptions, - BuiltinOpCodes.SPACE_TO_DEPTH: BuiltinOptions.SpaceToDepthOptions, - BuiltinOpCodes.SVDF: BuiltinOptions.SVDFOptions, - BuiltinOpCodes.TANH: BuiltinOptions.NONE, - BuiltinOpCodes.CONCAT_EMBEDDINGS: BuiltinOptions.ConcatEmbeddingsOptions, - BuiltinOpCodes.SKIP_GRAM: BuiltinOptions.SkipGramOptions, - BuiltinOpCodes.CALL: BuiltinOptions.CallOptions, - BuiltinOpCodes.CUSTOM: BuiltinOptions.NONE, - BuiltinOpCodes.EMBEDDING_LOOKUP_SPARSE: BuiltinOptions.EmbeddingLookupSparseOptions, - BuiltinOpCodes.PAD: BuiltinOptions.PadOptions, - BuiltinOpCodes.UNIDIRECTIONAL_SEQUENCE_RNN: BuiltinOptions.SequenceRNNOptions, - BuiltinOpCodes.GATHER: BuiltinOptions.GatherOptions, - BuiltinOpCodes.BATCH_TO_SPACE_ND: BuiltinOptions.BatchToSpaceNDOptions, - BuiltinOpCodes.SPACE_TO_BATCH_ND: BuiltinOptions.SpaceToBatchNDOptions, - BuiltinOpCodes.TRANSPOSE: BuiltinOptions.TransposeOptions, - BuiltinOpCodes.MEAN: BuiltinOptions.ReducerOptions, - BuiltinOpCodes.SUB: BuiltinOptions.SubOptions, - BuiltinOpCodes.DIV: BuiltinOptions.DivOptions, - BuiltinOpCodes.SQUEEZE: BuiltinOptions.SqueezeOptions, - BuiltinOpCodes.UNIDIRECTIONAL_SEQUENCE_LSTM: BuiltinOptions.UnidirectionalSequenceLSTMOptions, - BuiltinOpCodes.STRIDED_SLICE: BuiltinOptions.StridedSliceOptions, - BuiltinOpCodes.BIDIRECTIONAL_SEQUENCE_RNN: BuiltinOptions.BidirectionalSequenceRNNOptions, - BuiltinOpCodes.EXP: BuiltinOptions.ExpOptions, - BuiltinOpCodes.TOPK_V2: BuiltinOptions.TopKV2Options, - BuiltinOpCodes.SPLIT: BuiltinOptions.SplitOptions, - BuiltinOpCodes.LOG_SOFTMAX: BuiltinOptions.LogSoftmaxOptions, - BuiltinOpCodes.DELEGATE: BuiltinOptions.NONE, - BuiltinOpCodes.BIDIRECTIONAL_SEQUENCE_LSTM: BuiltinOptions.BidirectionalSequenceLSTMOptions, - BuiltinOpCodes.CAST: BuiltinOptions.CastOptions, - BuiltinOpCodes.PRELU: BuiltinOptions.NONE, - BuiltinOpCodes.MAXIMUM: BuiltinOptions.MaximumMinimumOptions, - BuiltinOpCodes.ARG_MAX: BuiltinOptions.ArgMaxOptions, - BuiltinOpCodes.MINIMUM: BuiltinOptions.MaximumMinimumOptions, - BuiltinOpCodes.LESS: BuiltinOptions.LessOptions, - BuiltinOpCodes.NEG: BuiltinOptions.NegOptions, - BuiltinOpCodes.PADV2: BuiltinOptions.PadV2Options, - BuiltinOpCodes.GREATER: BuiltinOptions.GreaterOptions, - BuiltinOpCodes.GREATER_EQUAL: BuiltinOptions.GreaterEqualOptions, - BuiltinOpCodes.LESS_EQUAL: BuiltinOptions.LessEqualOptions, - BuiltinOpCodes.SELECT: BuiltinOptions.SelectOptions, - BuiltinOpCodes.SLICE: BuiltinOptions.SliceOptions, - BuiltinOpCodes.SIN: BuiltinOptions.NONE, - BuiltinOpCodes.TRANSPOSE_CONV: BuiltinOptions.TransposeConvOptions, - BuiltinOpCodes.SPARSE_TO_DENSE: BuiltinOptions.SparseToDenseOptions, - BuiltinOpCodes.TILE: BuiltinOptions.TileOptions, - BuiltinOpCodes.EXPAND_DIMS: BuiltinOptions.ExpandDimsOptions, - BuiltinOpCodes.EQUAL: BuiltinOptions.EqualOptions, - BuiltinOpCodes.NOT_EQUAL: BuiltinOptions.NotEqualOptions, - BuiltinOpCodes.LOG: BuiltinOptions.NONE, - BuiltinOpCodes.SUM: BuiltinOptions.ReducerOptions, - BuiltinOpCodes.SQRT: BuiltinOptions.NONE, - BuiltinOpCodes.RSQRT: BuiltinOptions.NONE, - BuiltinOpCodes.SHAPE: BuiltinOptions.ShapeOptions, - BuiltinOpCodes.POW: BuiltinOptions.PowOptions, - BuiltinOpCodes.ARG_MIN: BuiltinOptions.ArgMinOptions, - BuiltinOpCodes.FAKE_QUANT: BuiltinOptions.FakeQuantOptions, - BuiltinOpCodes.REDUCE_PROD: BuiltinOptions.ReducerOptions, - BuiltinOpCodes.REDUCE_MAX: BuiltinOptions.ReducerOptions, - BuiltinOpCodes.PACK: BuiltinOptions.PackOptions, - BuiltinOpCodes.LOGICAL_OR: BuiltinOptions.LogicalOrOptions, - BuiltinOpCodes.ONE_HOT: BuiltinOptions.OneHotOptions, - BuiltinOpCodes.LOGICAL_AND: BuiltinOptions.LogicalAndOptions, - BuiltinOpCodes.LOGICAL_NOT: BuiltinOptions.LogicalNotOptions, - BuiltinOpCodes.UNPACK: BuiltinOptions.UnpackOptions, - BuiltinOpCodes.REDUCE_MIN: BuiltinOptions.ReducerOptions, - BuiltinOpCodes.FLOOR_DIV: BuiltinOptions.FloorDivOptions, - BuiltinOpCodes.REDUCE_ANY: BuiltinOptions.ReducerOptions, - BuiltinOpCodes.SQUARE: BuiltinOptions.SquareOptions, - BuiltinOpCodes.ZEROS_LIKE: BuiltinOptions.ZerosLikeOptions, - BuiltinOpCodes.FILL: BuiltinOptions.FillOptions, - BuiltinOpCodes.FLOOR_MOD: BuiltinOptions.FloorModOptions, - BuiltinOpCodes.RANGE: BuiltinOptions.RangeOptions, - BuiltinOpCodes.RESIZE_NEAREST_NEIGHBOR: BuiltinOptions.ResizeNearestNeighborOptions, - BuiltinOpCodes.LEAKY_RELU: BuiltinOptions.LeakyReluOptions, - BuiltinOpCodes.SQUARED_DIFFERENCE: BuiltinOptions.SquaredDifferenceOptions, - BuiltinOpCodes.MIRROR_PAD: BuiltinOptions.MirrorPadOptions, - BuiltinOpCodes.ABS: BuiltinOptions.AbsOptions, - BuiltinOpCodes.SPLIT_V: BuiltinOptions.SplitVOptions, - BuiltinOpCodes.UNIQUE: BuiltinOptions.UniqueOptions, - BuiltinOpCodes.CEIL: BuiltinOptions.NONE, - BuiltinOpCodes.REVERSE_V2: BuiltinOptions.ReverseV2Options, - BuiltinOpCodes.ADD_N: BuiltinOptions.AddNOptions, - BuiltinOpCodes.GATHER_ND: BuiltinOptions.GatherNdOptions, - BuiltinOpCodes.COS: BuiltinOptions.CosOptions, - BuiltinOpCodes.WHERE: BuiltinOptions.WhereOptions, - BuiltinOpCodes.RANK: BuiltinOptions.RankOptions, - BuiltinOpCodes.ELU: BuiltinOptions.NONE, - BuiltinOpCodes.REVERSE_SEQUENCE: BuiltinOptions.ReverseSequenceOptions, - BuiltinOpCodes.MATRIX_DIAG: BuiltinOptions.MatrixDiagOptions, - BuiltinOpCodes.QUANTIZE: BuiltinOptions.QuantizeOptions, - BuiltinOpCodes.MATRIX_SET_DIAG: BuiltinOptions.MatrixSetDiagOptions, - BuiltinOpCodes.ROUND: BuiltinOptions.NONE, - BuiltinOpCodes.HARD_SWISH: BuiltinOptions.HardSwishOptions, - BuiltinOpCodes.IF: BuiltinOptions.IfOptions, - BuiltinOpCodes.WHILE: BuiltinOptions.WhileOptions, - BuiltinOpCodes.NON_MAX_SUPPRESSION_V4: BuiltinOptions.NonMaxSuppressionV4Options, - BuiltinOpCodes.NON_MAX_SUPPRESSION_V5: BuiltinOptions.NonMaxSuppressionV5Options, - BuiltinOpCodes.SCATTER_ND: BuiltinOptions.ScatterNdOptions, - BuiltinOpCodes.SELECT_V2: BuiltinOptions.SelectV2Options, - BuiltinOpCodes.DENSIFY: BuiltinOptions.DensifyOptions, - BuiltinOpCodes.SEGMENT_SUM: BuiltinOptions.SegmentSumOptions, - BuiltinOpCodes.BATCH_MATMUL: BuiltinOptions.BatchMatMulOptions, -} -BuiltinOptions.from_BuiltinOpCodes = lambda x: __BuiltinOptions_from_BuiltinOpCodes[x] diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.pyi b/archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.pyi deleted file mode 100644 index 4269e451e..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/builtin_options.pyi +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved - -from enum import IntEnum -from typing import Callable, Any - -from . import BuiltinOpCodes - -class BuiltinOptions(IntEnum): - # TODO: consider adding fields for IDE support - @classmethod - def __call__(cls, x: Any) -> BuiltinOptions: ... - from_BuiltinOpCodes: Callable[[BuiltinOpCodes], BuiltinOptions] diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/data_container.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/data_container.py deleted file mode 100644 index eb9d2d468..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/data_container.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import logging -import numpy as np -from typing import TYPE_CHECKING, Iterable, Optional, Union, List, Any - -from . import _ModelDependent, TensorType - -if TYPE_CHECKING: - from .xcore_model import XCOREModel - from . import Tensor - - -_BufferDataType = Union[list, tuple, bytes, bytearray, np.ndarray] - - -class _DataContainer(_ModelDependent): - def __init__( - self, - name: Optional[str] = None, - model: Optional["XCOREModel"] = None, - data: Optional[_BufferDataType] = None, - ) -> None: - super().__init__(name, model) - self.data = data # type: ignore # see https://github.com/python/mypy/issues/3004 - - @property - def data(self) -> bytes: - return self._data - - @data.setter - def data(self, data: Optional[_BufferDataType]) -> None: - if data is None: - self._data = b"" - elif isinstance(data, (list, tuple, bytes, bytearray)): - # this ensures immutability and that lists have uint8 elements only - self._data = bytes(data) - elif isinstance(data, np.ndarray): - try: - TensorType.from_numpy_dtype(data.dtype) - except KeyError: - # we throw a warning if a non-convertible datatype is used - logging.getLogger("XCOREModel").warning( - f"Numpy array of type {data.dtype} stored in buffer" - ) - self._data = data.tobytes() - else: - raise TypeError(f"data must be list/tuple of bytes or numpy array") - - def __len__(self) -> int: - return len(self.data) - - def is_equal(self, other: Any) -> bool: - return super().is_equal(other) and self.data == other.data - - -class Buffer(_DataContainer): - def __init__( - self, - model: Optional["XCOREModel"] = None, - data: Optional[_BufferDataType] = None, - *, - owners: Optional[Iterable["Tensor"]] = None, - ) -> None: - super().__init__(None, model, data) - self.owners: List["Tensor"] = list( - owners or [] - ) # TODO: should this be managed by Tensor? - - def __str__(self) -> str: - return f"Buffer[{len(self.data)}]" - - def is_equal(self, other: Any) -> bool: - # check owner length only to avoid circular dependencies - return super().is_equal(other) and len(self.owners) == len(other.owners) - - def sanity_check(self) -> None: - assert self in self.model.buffers - for owner in self.owners: - assert owner.buffer is self - - -class Metadata(_DataContainer): - def __init__( - self, - name: str, - model: Optional["XCOREModel"] = None, - data: Optional[_BufferDataType] = None, - ) -> None: - super().__init__(name, model, data) - - def __str__(self) -> str: - return f"name={self.name}, data={list(self.data)}" - - def sanity_check(self) -> None: - super().sanity_check - assert self in self.model.metadata diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/dict_conversion.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/dict_conversion.py deleted file mode 100644 index 038ad8386..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/dict_conversion.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from enum import Enum -from typing import TYPE_CHECKING, Dict, Union, Any - -from tflite2xcore.utils import camel_to_snake, snake_to_camel - -from .flexbuffers import FlexbufferBuilder -from . import ( - schema_py_generated as schema, - Buffer, - Subgraph, - Tensor, - Operator, - Metadata, - ActivationFunctionType, - Padding, - OperatorCode, - QuantizationDetails, - BuiltinOpCodes, - BuiltinOptions, -) - -if TYPE_CHECKING: - from . import XCOREModel - - -def create_dict_from_operator_code( - operator_code: OperatorCode, -) -> Dict[str, Union[str, int]]: - operator_code_dict: Dict[str, Union[str, int]] = {"version": operator_code.version} - - if operator_code.code in BuiltinOpCodes: - operator_code_dict["builtin_code"] = operator_code.name - else: - operator_code_dict["builtin_code"] = BuiltinOpCodes.CUSTOM.name - operator_code_dict["custom_code"] = operator_code.name - - return operator_code_dict - - -def create_dict_from_operator(operator: Operator) -> Dict[str, Any]: - tensors = operator.subgraph.tensors - operator_codes = operator.subgraph.model.operator_codes - - operator_dict = { - "opcode_index": operator_codes.index(operator.operator_code), - "inputs": [tensors.index(input_tensor) for input_tensor in operator.inputs], - "outputs": [tensors.index(input_tensor) for input_tensor in operator.outputs], - "custom_options_format": "FLEXBUFFERS", - } - - if operator.builtin_options: - operator_dict["builtin_options"] = operator.builtin_options - - if operator.custom_options: - fbb = FlexbufferBuilder(operator.custom_options) - operator_dict["custom_options"] = fbb.get_bytes() - - return operator_dict - - -def create_dict_from_tensor( - tensor: Tensor, *, extended: bool = False -) -> Dict[str, Any]: - subgraph = tensor.subgraph - buffers = subgraph.model.buffers - - tensor_dict = { - "name": tensor.name, - "type": tensor.type.name, - "shape": tensor.shape, - "buffer": buffers.index(tensor.buffer), - } - - if tensor.quantization: - tensor_dict["quantization"] = tensor.quantization - - if extended: - operators = subgraph.operators - tensor_dict["consumers"] = sorted(operators.index(t) for t in tensor.consumers) - tensor_dict["producers"] = sorted(operators.index(t) for t in tensor.producers) - - return tensor_dict - - -def create_dict_from_subgraph( - subgraph: Subgraph, *, extended: bool = False -) -> Dict[str, Any]: - tensors = subgraph.tensors - - subgraph_dict = { - "tensors": [ - create_dict_from_tensor(tensor, extended=extended) for tensor in tensors - ], - "inputs": [tensors.index(input_tensor) for input_tensor in subgraph.inputs], - "outputs": [tensors.index(output_tensor) for output_tensor in subgraph.outputs], - "operators": [ - create_dict_from_operator(operator) for operator in subgraph.operators - ], - } - - if subgraph.name: - subgraph_dict["name"] = subgraph.name - - return subgraph_dict - - -def create_dict_from_buffer( - buffer: Buffer, *, extended: bool = False -) -> Dict[str, Any]: - buffer_dict: Dict[str, Any] = ( - {"data": buffer.data} if buffer.data is not None else {} - ) - - if extended: - owners_dict: Dict[Union[int, str], Any] = dict() - model = buffer.model - - # track down and tally all owners - for owner in buffer.owners: - subgraph = owner.subgraph - owner_idx = model.subgraphs.index(subgraph) - owners_in_subgraph = owners_dict.setdefault(owner_idx, []) - owners_in_subgraph.append(subgraph.tensors.index(owner)) - - # sort the ordering - owners_dict = dict(sorted(owners_dict.items())) - for subgraph_idx in owners_dict: - owners_dict[subgraph_idx].sort() - - buffer_dict["owners"] = owners_dict - - return buffer_dict - - -def create_dict_from_metadata(metadata: Metadata) -> Dict[str, Union[bytes, str, None]]: - return { - "name": metadata.name, - "data": metadata.data, - } - - -def create_dict_from_model( - model: "XCOREModel", *, extended: bool = False -) -> Dict[str, Any]: - return { - "version": model.version, - "description": model.description, - "metadata": [ - create_dict_from_metadata(metadata) for metadata in model.metadata - ], - "buffers": [ - create_dict_from_buffer(buffer, extended=extended) - for buffer in model.buffers - ], - "subgraphs": [ - create_dict_from_subgraph(subgraph, extended=extended) - for subgraph in model.subgraphs - ], - "operator_codes": [ - create_dict_from_operator_code(operator_code) - for operator_code in model.operator_codes - ], - } - - -def builtin_options_to_dict(builtin_options: Any) -> Dict[str, Any]: - dict_ = {camel_to_snake(k): v for k, v in vars(builtin_options).items()} - if "fused_activation_function" in dict_: - dict_["fused_activation_function"] = ActivationFunctionType( - dict_["fused_activation_function"] - ) - if "padding" in dict_: - dict_["padding"] = Padding(dict_["padding"]) - - return dict_ - - -def dict_to_builtin_options(type_: int, dict_: Dict[str, Any]) -> Any: - class_identifier = BuiltinOptions(type_).name + "T" - - builtin_class = getattr(schema, class_identifier) - builtin_options = builtin_class() - - for k, v in dict_.items(): - if isinstance(v, Enum): - v = v.value - - setattr(builtin_options, snake_to_camel(k), v) - - return builtin_options - - -def quantization_to_dict( - quantization: schema.QuantizationParametersT, -) -> Dict[str, Any]: - def value_map(k: str, v: Any) -> Any: - if k == "detailsType": - v = QuantizationDetails(v) - elif isinstance(v, np.ndarray): - v = v.tolist() - return v - - return { - camel_to_snake(k): value_map(k, v) - for k, v in vars(quantization).items() - if v is not None - } - - -def dict_to_quantization(dict_: Dict[str, Any]) -> schema.QuantizationParametersT: - quantization: schema.QuantizationParametersT = schema.QuantizationParametersT() # type: ignore - - for k, v in dict_.items(): - if isinstance(v, Enum): - v = v.value - - setattr(quantization, snake_to_camel(k), v) - - return quantization diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/flexbuffers.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/flexbuffers.py deleted file mode 100644 index a17157eb9..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/flexbuffers.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import json -import struct -import ctypes -import numpy as np -from enum import Enum -from typing import Dict, Any, List, Optional, Union - -from tflite2xcore import libflexbuffers as lib - - -class FlexbufferBuilder: - def __init__(self, data: Optional[Dict[str, Any]] = None) -> None: - lib.new_builder.restype = ctypes.c_void_p - - lib.builder_clear.argtypes = [ctypes.c_void_p] - lib.builder_clear.restype = ctypes.c_void_p - - lib.builder_start_map.argtypes = [ctypes.c_void_p, ctypes.c_char_p] - lib.builder_start_map.restype = ctypes.c_size_t - - lib.builder_end_map.argtypes = [ctypes.c_void_p, ctypes.c_size_t] - lib.builder_string.restype = ctypes.c_size_t - - lib.builder_start_vector.argtypes = [ctypes.c_void_p, ctypes.c_char_p] - lib.builder_string.restype = ctypes.c_size_t - - lib.builder_end_vector.argtypes = [ - ctypes.c_void_p, - ctypes.c_size_t, - ctypes.c_bool, - ctypes.c_bool, - ] - lib.builder_string.restype = ctypes.c_size_t - - lib.builder_int.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int] - lib.builder_int.restype = ctypes.c_void_p - - lib.builder_uint.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint] - lib.builder_uint.restype = ctypes.c_void_p - - lib.builder_bool.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_bool] - lib.builder_bool.restype = ctypes.c_void_p - - lib.builder_float.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_float] - lib.builder_float.restype = ctypes.c_void_p - - lib.builder_string.argtypes = [ - ctypes.c_void_p, - ctypes.c_char_p, - ctypes.c_char_p, - ] - lib.builder_string.restype = ctypes.c_void_p - - lib.builder_vector_int.argtypes = [ctypes.c_void_p, ctypes.c_int] - lib.builder_vector_int.restype = ctypes.c_void_p - - lib.builder_vector_bool.argtypes = [ctypes.c_void_p, ctypes.c_bool] - lib.builder_vector_bool.restype = ctypes.c_void_p - - lib.builder_vector_float.argtypes = [ctypes.c_void_p, ctypes.c_float] - lib.builder_vector_float.restype = ctypes.c_void_p - - lib.builder_vector_string.argtypes = [ctypes.c_void_p, ctypes.c_char_p] - lib.builder_vector_string.restype = ctypes.c_void_p - - lib.builder_finish.argtypes = [ctypes.c_void_p] - lib.builder_finish.restype = ctypes.c_void_p - - lib.builder_get_buffer.argtypes = [ctypes.c_void_p, ctypes.c_char_p] - lib.builder_get_buffer.restype = ctypes.c_size_t - - self.obj = lib.new_builder() - - if data: - self.set_data(data) - - def _add_vector(self, data: List[Any], key_ascii: Optional[bytes] = None) -> int: - size = lib.builder_start_vector(self.obj, key_ascii) - for list_item in data: - if isinstance(list_item, Enum): - list_item = list_item.value - - list_item_type = type(list_item) # TODO: fix this - if np.issubdtype(list_item_type, np.signedinteger): - lib.builder_vector_int(self.obj, int(list_item)) - elif np.issubdtype(list_item_type, np.bool_): - lib.builder_vector_bool(self.obj, bool(list_item)) - elif np.issubdtype(list_item_type, np.floating): - lib.builder_vector_float(self.obj, float(list_item)) - elif list_item_type is str: - lib.builder_vector_string(self.obj, list_item.encode("ascii")) - elif list_item_type is dict: - self._add_map(list_item) - elif list_item_type in (list, tuple, np.ndarray): - self._add_vector(list(list_item)) - else: - raise Exception( - f"Type {list_item_type} not supported (list item={list_item})" - ) - size = lib.builder_end_vector(self.obj, size, False, False) - - return size # type: ignore - - def _add_map(self, data: Dict[str, Any], key_ascii: Optional[bytes] = None) -> int: - msize = lib.builder_start_map(self.obj, key_ascii) - - for key, value in data.items(): - key_ascii = key.encode("ascii") - if isinstance(value, Enum): - value = value.value - - value_type = type(value) - if np.issubdtype(value_type, np.signedinteger): - lib.builder_int(self.obj, key_ascii, int(value)) - elif np.issubdtype(value_type, np.bool_): - lib.builder_bool(self.obj, key_ascii, bool(value)) - elif np.issubdtype(value_type, np.floating): - lib.builder_float(self.obj, key_ascii, float(value)) - elif value_type is str: - lib.builder_string(self.obj, key_ascii, value.encode("ascii")) - elif value_type is dict: - self._add_map(value, key_ascii) - elif value_type in (list, tuple, np.ndarray): - self._add_vector(list(value), key_ascii) - else: - raise Exception( - f"Type {value_type} not supported (key={key}, value={value})" - ) - - size = lib.builder_end_map(self.obj, msize) - return size # type: ignore - - def set_data(self, data: Dict[str, Any]) -> None: - lib.builder_clear(self.obj) - - self._add_map(data) - - lib.builder_finish(self.obj) - - def get_bytes(self, size: int = 1024) -> List[int]: - buf = ctypes.create_string_buffer(size) - actual_size = lib.builder_get_buffer(self.obj, buf) - return [int(ubyte[0]) for ubyte in struct.iter_unpack("B", buf[0:actual_size])] # type: ignore - - -class FlexbufferParser: - def __init__(self) -> None: - lib.parse_flexbuffer.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p, ctypes.c_size_t] - lib.parse_flexbuffer.restype = ctypes.c_size_t - - def parse(self, buffer: bytes, size: int = 100000) -> Any: - if not buffer: - return {} - - char_array = ctypes.c_char * len(buffer) - json_buffer = ctypes.create_string_buffer(size) - - actual_size = lib.parse_flexbuffer( - char_array.from_buffer_copy(buffer), len(buffer), json_buffer, size - ) - - return json.loads(json_buffer[0:actual_size]) # type: ignore diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/ir_object.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/ir_object.py deleted file mode 100644 index 7edf036e5..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/ir_object.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, TypeVar, Sequence, List, Any, Optional - -_S = TypeVar("_S", bound="_IRObject") - -if TYPE_CHECKING: - from .xcore_model import XCOREModel, Subgraph - - -class _IRObject(ABC): - def __init__(self, name: Optional[str] = None) -> None: - self.name = name or "" - - @abstractmethod - def sanity_check(self) -> None: - raise NotImplementedError() - - @staticmethod - def sequence_equal(l1: Sequence[_S], l2: Sequence[_S]) -> bool: - return len(l1) == len(l2) and all(a.is_equal(b) for a, b in zip(l1, l2)) - - @staticmethod - def _remove_if_contained(ll: List[_S], obj: _S) -> None: - try: - ll.remove(obj) - except ValueError: - pass - - def is_equal(self, other: Any) -> bool: - if type(self) is type(other): - self.sanity_check() - other.sanity_check() - return True - return False - - -class _ModelDependent(_IRObject): - _model: "XCOREModel" - - def __init__( - self, name: Optional[str] = None, model: Optional["XCOREModel"] = None - ): - super().__init__(name) - if model: - model.register_dependent(self) - - @property - def model(self) -> "XCOREModel": - return self._model - - -class _SubgraphDependent(_IRObject): - _subgraph: "Subgraph" - - @property - def subgraph(self) -> "Subgraph": - return self._subgraph - - @property - def model(self) -> "XCOREModel": - return self.subgraph._model diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.py deleted file mode 100644 index c88b8decd..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -# type: ignore - -from enum import Enum - -from . import schema_py_generated as schema - - -ActivationFunctionType = Enum( - "ActivationFunctionType", - { - k: v - for k, v in vars(schema.ActivationFunctionType).items() - if not k.startswith("__") - }, -) - - -QuantizationDetails = Enum( - "QuantizationDetails", - { - k: v - for k, v in vars(schema.QuantizationDetails).items() - if not k.startswith("__") - }, -) - - -FullyConnectedOptionsWeightsFormat = Enum( - "FullyConnectedOptionsWeightsFormat", - { - k: v - for k, v in vars(schema.FullyConnectedOptionsWeightsFormat).items() - if not k.startswith("__") - }, -) - -Padding = Enum( - "Padding", {k: v for k, v in vars(schema.Padding).items() if not k.startswith("__")} -) diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.pyi b/archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.pyi deleted file mode 100644 index f59f179a6..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/misc_enums.pyi +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2019-2020, XMOS Ltd, All rights reserved - -from enum import IntEnum - -# TODO: consider adding fields to this enums for IDE support - -class ActivationFunctionType(IntEnum): - NONE: ActivationFunctionType - RELU: ActivationFunctionType - RELU_N1_TO_1: ActivationFunctionType - RELU6: ActivationFunctionType - TANH: ActivationFunctionType - SIGN_BIT: ActivationFunctionType - -class QuantizationDetails(IntEnum): ... -class FullyConnectedOptionsWeightsFormat(IntEnum): ... - -class Padding(IntEnum): - SAME: Padding - VALID: Padding diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.py deleted file mode 100644 index e8d2f12b4..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -# type: ignore - -import enum -import aenum -from typing import Union - -from . import schema_py_generated as schema - -BuiltinOpCodes = enum.Enum( - "BuiltinOpCodes", - {k: v for k, v in vars(schema.BuiltinOperator).items() if not k.startswith("__")}, -) - - -class ExternalOpCodes(aenum.Enum): - def _generate_next_value_(name: str, *_) -> str: # pylint: disable=no-self-argument - return name - - LceQuantize = aenum.auto() - LceBconv2d = aenum.auto() - LceDequantize = aenum.auto() - - @classmethod - def add_new_opcode(cls, name: str) -> "ExternalOpCodes": - assert name.isidentifier() - try: - return cls[name] - except KeyError: - aenum.extend_enum(cls, name) - return cls[name] - - -class XCOREOpCodes(enum.Enum): - def _generate_next_value_(name: str, *_) -> str: # pylint: disable=no-self-argument - return name - - DUMMY = enum.auto() # for testing purposes only - XC_lookup_8 = enum.auto() - XC_argmax_16 = enum.auto() # currently not used by any passes - XC_maxpool2d = enum.auto() - XC_avgpool2d = enum.auto() - XC_avgpool2d_global = enum.auto() - XC_fc = enum.auto() - XC_requantize_16_to_8 = enum.auto() # currently unused - XC_conv2d_shallowin = enum.auto() - XC_conv2d_deep = enum.auto() - XC_conv2d_1x1 = enum.auto() - XC_conv2d_depthwise = enum.auto() - XC_bsign_8 = enum.auto() - XC_bconv2d_int8 = enum.auto() - XC_bconv2d_int8_DIDO = enum.auto() - XC_bconv2d_bin = enum.auto() - XC_bconv2d_bin_DI = enum.auto() - XC_add_8 = enum.auto() - XC_pad = enum.auto() - XC_fetch = enum.auto() diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.pyi b/archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.pyi deleted file mode 100644 index 515be397d..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/op_codes.pyi +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved - -import enum -import aenum -from typing import Type - -class BuiltinOpCodes(enum.IntEnum): - ADD: BuiltinOpCodes - AVERAGE_POOL_2D: BuiltinOpCodes - CONCATENATION: BuiltinOpCodes - CONV_2D: BuiltinOpCodes - DEPTHWISE_CONV_2D: BuiltinOpCodes - DEPTH_TO_SPACE: BuiltinOpCodes - DEQUANTIZE: BuiltinOpCodes - EMBEDDING_LOOKUP: BuiltinOpCodes - FLOOR: BuiltinOpCodes - FULLY_CONNECTED: BuiltinOpCodes - HASHTABLE_LOOKUP: BuiltinOpCodes - L2_NORMALIZATION: BuiltinOpCodes - L2_POOL_2D: BuiltinOpCodes - LOCAL_RESPONSE_NORMALIZATION: BuiltinOpCodes - LOGISTIC: BuiltinOpCodes - LSH_PROJECTION: BuiltinOpCodes - LSTM: BuiltinOpCodes - MAX_POOL_2D: BuiltinOpCodes - MUL: BuiltinOpCodes - RELU: BuiltinOpCodes - RELU_N1_TO_1: BuiltinOpCodes - RELU6: BuiltinOpCodes - RESHAPE: BuiltinOpCodes - RESIZE_BILINEAR: BuiltinOpCodes - RNN: BuiltinOpCodes - SOFTMAX: BuiltinOpCodes - SPACE_TO_DEPTH: BuiltinOpCodes - SVDF: BuiltinOpCodes - TANH: BuiltinOpCodes - CONCAT_EMBEDDINGS: BuiltinOpCodes - SKIP_GRAM: BuiltinOpCodes - CALL: BuiltinOpCodes - CUSTOM: BuiltinOpCodes - EMBEDDING_LOOKUP_SPARSE: BuiltinOpCodes - PAD: BuiltinOpCodes - UNIDIRECTIONAL_SEQUENCE_RNN: BuiltinOpCodes - GATHER: BuiltinOpCodes - BATCH_TO_SPACE_ND: BuiltinOpCodes - SPACE_TO_BATCH_ND: BuiltinOpCodes - TRANSPOSE: BuiltinOpCodes - MEAN: BuiltinOpCodes - SUB: BuiltinOpCodes - DIV: BuiltinOpCodes - SQUEEZE: BuiltinOpCodes - UNIDIRECTIONAL_SEQUENCE_LSTM: BuiltinOpCodes - STRIDED_SLICE: BuiltinOpCodes - BIDIRECTIONAL_SEQUENCE_RNN: BuiltinOpCodes - EXP: BuiltinOpCodes - TOPK_V2: BuiltinOpCodes - SPLIT: BuiltinOpCodes - LOG_SOFTMAX: BuiltinOpCodes - DELEGATE: BuiltinOpCodes - BIDIRECTIONAL_SEQUENCE_LSTM: BuiltinOpCodes - CAST: BuiltinOpCodes - PRELU: BuiltinOpCodes - MAXIMUM: BuiltinOpCodes - ARG_MAX: BuiltinOpCodes - MINIMUM: BuiltinOpCodes - LESS: BuiltinOpCodes - NEG: BuiltinOpCodes - PADV2: BuiltinOpCodes - GREATER: BuiltinOpCodes - GREATER_EQUAL: BuiltinOpCodes - LESS_EQUAL: BuiltinOpCodes - SELECT: BuiltinOpCodes - SLICE: BuiltinOpCodes - SIN: BuiltinOpCodes - TRANSPOSE_CONV: BuiltinOpCodes - SPARSE_TO_DENSE: BuiltinOpCodes - TILE: BuiltinOpCodes - EXPAND_DIMS: BuiltinOpCodes - EQUAL: BuiltinOpCodes - NOT_EQUAL: BuiltinOpCodes - LOG: BuiltinOpCodes - SUM: BuiltinOpCodes - SQRT: BuiltinOpCodes - RSQRT: BuiltinOpCodes - SHAPE: BuiltinOpCodes - POW: BuiltinOpCodes - ARG_MIN: BuiltinOpCodes - FAKE_QUANT: BuiltinOpCodes - REDUCE_PROD: BuiltinOpCodes - REDUCE_MAX: BuiltinOpCodes - PACK: BuiltinOpCodes - LOGICAL_OR: BuiltinOpCodes - ONE_HOT: BuiltinOpCodes - LOGICAL_AND: BuiltinOpCodes - LOGICAL_NOT: BuiltinOpCodes - UNPACK: BuiltinOpCodes - REDUCE_MIN: BuiltinOpCodes - FLOOR_DIV: BuiltinOpCodes - REDUCE_ANY: BuiltinOpCodes - SQUARE: BuiltinOpCodes - ZEROS_LIKE: BuiltinOpCodes - FILL: BuiltinOpCodes - FLOOR_MOD: BuiltinOpCodes - RANGE: BuiltinOpCodes - RESIZE_NEAREST_NEIGHBOR: BuiltinOpCodes - LEAKY_RELU: BuiltinOpCodes - SQUARED_DIFFERENCE: BuiltinOpCodes - MIRROR_PAD: BuiltinOpCodes - ABS: BuiltinOpCodes - SPLIT_V: BuiltinOpCodes - UNIQUE: BuiltinOpCodes - CEIL: BuiltinOpCodes - REVERSE_V2: BuiltinOpCodes - ADD_N: BuiltinOpCodes - GATHER_ND: BuiltinOpCodes - COS: BuiltinOpCodes - WHERE: BuiltinOpCodes - RANK: BuiltinOpCodes - ELU: BuiltinOpCodes - REVERSE_SEQUENCE: BuiltinOpCodes - MATRIX_DIAG: BuiltinOpCodes - QUANTIZE: BuiltinOpCodes - MATRIX_SET_DIAG: BuiltinOpCodes - ROUND: BuiltinOpCodes - HARD_SWISH: BuiltinOpCodes - IF: BuiltinOpCodes - WHILE: BuiltinOpCodes - NON_MAX_SUPPRESSION_V4: BuiltinOpCodes - NON_MAX_SUPPRESSION_V5: BuiltinOpCodes - SCATTER_ND: BuiltinOpCodes - SELECT_V2: BuiltinOpCodes - DENSIFY: BuiltinOpCodes - SEGMENT_SUM: BuiltinOpCodes - BATCH_MATMUL: BuiltinOpCodes - -class ExternalOpCodes(aenum.Enum): # type: ignore - LceQuantize: ExternalOpCodes - LceBconv2d: ExternalOpCodes - LceDequantize: ExternalOpCodes - @classmethod - def add_new_opcode(cls: Type[ExternalOpCodes], name: str) -> ExternalOpCodes: ... - -class XCOREOpCodes(enum.Enum): - DUMMY: XCOREOpCodes - XC_lookup_8: XCOREOpCodes - XC_argmax_16: XCOREOpCodes - XC_maxpool2d: XCOREOpCodes - XC_avgpool2d: XCOREOpCodes - XC_avgpool2d_global: XCOREOpCodes - XC_fc: XCOREOpCodes - XC_requantize_16_to_8: XCOREOpCodes - XC_conv2d_shallowin: XCOREOpCodes - XC_conv2d_deep: XCOREOpCodes - XC_conv2d_1x1: XCOREOpCodes - XC_conv2d_depthwise: XCOREOpCodes - XC_bsign_8: XCOREOpCodes - XC_bconv2d_int8: XCOREOpCodes - XC_bconv2d_int8_DIDO: XCOREOpCodes - XC_bconv2d_bin: XCOREOpCodes - XC_bconv2d_bin_DI: XCOREOpCodes - XC_add_8: XCOREOpCodes - XC_pad: XCOREOpCodes - XC_fetch: XCOREOpCodes diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/operator.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/operator.py deleted file mode 100644 index 9eb01e6b9..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/operator.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from typing import TYPE_CHECKING, Optional, Dict, Any, Iterable - -if TYPE_CHECKING: - from . import Tensor, Subgraph, XCOREModel - -from . import _IRObject, OperatorCode - - -_OpOptionsType = Dict[str, Any] - - -class Operator(_IRObject): - name: str - - def __init__( - self, - subgraph: "Subgraph", - operator_code: OperatorCode, - name: Optional[str] = None, - inputs: Optional[Iterable["Tensor"]] = None, - outputs: Optional[Iterable["Tensor"]] = None, - builtin_options: Optional[_OpOptionsType] = None, - custom_options: Optional[_OpOptionsType] = None, - ) -> None: - # Generally, do not use this constructor to instantiate Operator! - # Use Subgraph.create_operator instead. - - super().__init__(name or "") - self.subgraph = subgraph # parent - self.operator_code = operator_code - self.inputs = list(inputs or []) - self.outputs = list(outputs or []) - self.builtin_options = builtin_options or {} - self.custom_options = custom_options or {} - - def add_custom_options(self, **kwargs: Any) -> None: - if kwargs: - self.custom_options.update(kwargs) - - @property - def model(self) -> "XCOREModel": - return self.subgraph.model - - def __str__(self) -> str: - return f"({self.subgraph.operators.index(self)}) operator_code={self.operator_code}" - - def is_equal(self, other: Any) -> bool: - return ( - super().is_equal(other) - and self.operator_code == other.operator_code - # and self.name == other.name # intentionally not compared - and self.sequence_equal(self.inputs, other.inputs) - and self.sequence_equal(self.outputs, other.outputs) - and self.builtin_options == other.builtin_options - and self.custom_options == other.custom_options - ) - - def sanity_check(self) -> None: - assert self in self.subgraph.operators - # check double links with inputs/outputs - for tensor in self.inputs: - assert self in tensor.consumers - for tensor in self.outputs: - assert self in tensor.producers diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/operator_code.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/operator_code.py deleted file mode 100644 index 7c65a630b..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/operator_code.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from typing import Optional, Union, Any - -from . import XCOREOpCodes, ExternalOpCodes, BuiltinOpCodes - -CustomOpCodes = Union[XCOREOpCodes, ExternalOpCodes] - -ValidOpCodes = Union[BuiltinOpCodes, CustomOpCodes] - - -class OperatorCode: - def __init__(self, opcode: ValidOpCodes, *, version: Optional[int] = None) -> None: - self.version = version or 1 - self.code = opcode - - @property - def name(self) -> str: - return self.code.name - - @property - def value(self) -> Union[int, str]: - return self.code.value - - def __eq__(self, obj: Any) -> bool: - return ( - isinstance(obj, OperatorCode) - and obj.code is self.code - and obj.version == self.version - ) - - def __hash__(self) -> int: - return hash(str(self)) - - def __str__(self) -> str: - return f"{self.code} (version {self.version})" diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/schema_py_generated.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/schema_py_generated.py deleted file mode 100644 index a54cbac3c..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/schema_py_generated.py +++ /dev/null @@ -1,9738 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. -import flatbuffers -import numpy as np - -class BatchToSpaceNDOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsBatchToSpaceNDOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = BatchToSpaceNDOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def BatchToSpaceNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # BatchToSpaceNDOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def BatchToSpaceNDOptionsStart(builder): builder.StartObject(0) -def BatchToSpaceNDOptionsEnd(builder): return builder.EndObject() - - -class BatchToSpaceNDOptionsT(object): - - # BatchToSpaceNDOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - batchToSpaceNDOptions = BatchToSpaceNDOptions() - batchToSpaceNDOptions.Init(buf, pos) - return cls.InitFromObj(batchToSpaceNDOptions) - - @classmethod - def InitFromObj(cls, batchToSpaceNDOptions): - x = BatchToSpaceNDOptionsT() - x._UnPack(batchToSpaceNDOptions) - return x - - # BatchToSpaceNDOptionsT - def _UnPack(self, batchToSpaceNDOptions): - if batchToSpaceNDOptions is None: - return - - # BatchToSpaceNDOptionsT - def Pack(self, builder): - BatchToSpaceNDOptionsStart(builder) - batchToSpaceNDOptions = BatchToSpaceNDOptionsEnd(builder) - return batchToSpaceNDOptions - - - -class MirrorPadOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsMirrorPadOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = MirrorPadOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # MirrorPadOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # MirrorPadOptions - def Mode(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def MirrorPadOptionsStart(builder): builder.StartObject(1) -def MirrorPadOptionsAddMode(builder, mode): builder.PrependInt8Slot(0, mode, 0) -def MirrorPadOptionsEnd(builder): return builder.EndObject() - - -class MirrorPadOptionsT(object): - - # MirrorPadOptionsT - def __init__(self): - self.mode = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - mirrorPadOptions = MirrorPadOptions() - mirrorPadOptions.Init(buf, pos) - return cls.InitFromObj(mirrorPadOptions) - - @classmethod - def InitFromObj(cls, mirrorPadOptions): - x = MirrorPadOptionsT() - x._UnPack(mirrorPadOptions) - return x - - # MirrorPadOptionsT - def _UnPack(self, mirrorPadOptions): - if mirrorPadOptions is None: - return - self.mode = mirrorPadOptions.Mode() - - # MirrorPadOptionsT - def Pack(self, builder): - MirrorPadOptionsStart(builder) - MirrorPadOptionsAddMode(builder, self.mode) - mirrorPadOptions = MirrorPadOptionsEnd(builder) - return mirrorPadOptions - - - -class ArgMaxOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsArgMaxOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ArgMaxOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ArgMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ArgMaxOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ArgMaxOptions - def OutputType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def ArgMaxOptionsStart(builder): builder.StartObject(1) -def ArgMaxOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0) -def ArgMaxOptionsEnd(builder): return builder.EndObject() - - -class ArgMaxOptionsT(object): - - # ArgMaxOptionsT - def __init__(self): - self.outputType = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - argMaxOptions = ArgMaxOptions() - argMaxOptions.Init(buf, pos) - return cls.InitFromObj(argMaxOptions) - - @classmethod - def InitFromObj(cls, argMaxOptions): - x = ArgMaxOptionsT() - x._UnPack(argMaxOptions) - return x - - # ArgMaxOptionsT - def _UnPack(self, argMaxOptions): - if argMaxOptions is None: - return - self.outputType = argMaxOptions.OutputType() - - # ArgMaxOptionsT - def Pack(self, builder): - ArgMaxOptionsStart(builder) - ArgMaxOptionsAddOutputType(builder, self.outputType) - argMaxOptions = ArgMaxOptionsEnd(builder) - return argMaxOptions - - - -class ReducerOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsReducerOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ReducerOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ReducerOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ReducerOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ReducerOptions - def KeepDims(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def ReducerOptionsStart(builder): builder.StartObject(1) -def ReducerOptionsAddKeepDims(builder, keepDims): builder.PrependBoolSlot(0, keepDims, 0) -def ReducerOptionsEnd(builder): return builder.EndObject() - - -class ReducerOptionsT(object): - - # ReducerOptionsT - def __init__(self): - self.keepDims = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - reducerOptions = ReducerOptions() - reducerOptions.Init(buf, pos) - return cls.InitFromObj(reducerOptions) - - @classmethod - def InitFromObj(cls, reducerOptions): - x = ReducerOptionsT() - x._UnPack(reducerOptions) - return x - - # ReducerOptionsT - def _UnPack(self, reducerOptions): - if reducerOptions is None: - return - self.keepDims = reducerOptions.KeepDims() - - # ReducerOptionsT - def Pack(self, builder): - ReducerOptionsStart(builder) - ReducerOptionsAddKeepDims(builder, self.keepDims) - reducerOptions = ReducerOptionsEnd(builder) - return reducerOptions - - - -class QuantizeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsQuantizeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = QuantizeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # QuantizeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def QuantizeOptionsStart(builder): builder.StartObject(0) -def QuantizeOptionsEnd(builder): return builder.EndObject() - - -class QuantizeOptionsT(object): - - # QuantizeOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - quantizeOptions = QuantizeOptions() - quantizeOptions.Init(buf, pos) - return cls.InitFromObj(quantizeOptions) - - @classmethod - def InitFromObj(cls, quantizeOptions): - x = QuantizeOptionsT() - x._UnPack(quantizeOptions) - return x - - # QuantizeOptionsT - def _UnPack(self, quantizeOptions): - if quantizeOptions is None: - return - - # QuantizeOptionsT - def Pack(self, builder): - QuantizeOptionsStart(builder) - quantizeOptions = QuantizeOptionsEnd(builder) - return quantizeOptions - - - -class SubGraph(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSubGraph(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SubGraph() - x.Init(buf, n + offset) - return x - - @classmethod - def SubGraphBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SubGraph - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SubGraph - def Tensors(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - obj = Tensor() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # SubGraph - def TensorsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SubGraph - def TensorsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - - # SubGraph - def Inputs(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # SubGraph - def InputsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # SubGraph - def InputsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SubGraph - def InputsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - return o == 0 - - # SubGraph - def Outputs(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # SubGraph - def OutputsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # SubGraph - def OutputsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SubGraph - def OutputsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - return o == 0 - - # SubGraph - def Operators(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - obj = Operator() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # SubGraph - def OperatorsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SubGraph - def OperatorsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - return o == 0 - - # SubGraph - def Name(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.String(o + self._tab.Pos) - return None - -def SubGraphStart(builder): builder.StartObject(5) -def SubGraphAddTensors(builder, tensors): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0) -def SubGraphStartTensorsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SubGraphAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) -def SubGraphStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SubGraphAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) -def SubGraphStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SubGraphAddOperators(builder, operators): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0) -def SubGraphStartOperatorsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SubGraphAddName(builder, name): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) -def SubGraphEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class SubGraphT(object): - - # SubGraphT - def __init__(self): - self.tensors = None # type: List[TensorT] - self.inputs = None # type: List[int] - self.outputs = None # type: List[int] - self.operators = None # type: List[OperatorT] - self.name = None # type: str - - @classmethod - def InitFromBuf(cls, buf, pos): - subGraph = SubGraph() - subGraph.Init(buf, pos) - return cls.InitFromObj(subGraph) - - @classmethod - def InitFromObj(cls, subGraph): - x = SubGraphT() - x._UnPack(subGraph) - return x - - # SubGraphT - def _UnPack(self, subGraph): - if subGraph is None: - return - if not subGraph.TensorsIsNone(): - self.tensors = [] - for i in range(subGraph.TensorsLength()): - if subGraph.Tensors(i) is None: - self.tensors.append(None) - else: - tensor_ = TensorT.InitFromObj(subGraph.Tensors(i)) - self.tensors.append(tensor_) - if not subGraph.InputsIsNone(): - if np is None: - self.inputs = [] - for i in range(subGraph.InputsLength()): - self.inputs.append(subGraph.Inputs(i)) - else: - self.inputs = subGraph.InputsAsNumpy() - if not subGraph.OutputsIsNone(): - if np is None: - self.outputs = [] - for i in range(subGraph.OutputsLength()): - self.outputs.append(subGraph.Outputs(i)) - else: - self.outputs = subGraph.OutputsAsNumpy() - if not subGraph.OperatorsIsNone(): - self.operators = [] - for i in range(subGraph.OperatorsLength()): - if subGraph.Operators(i) is None: - self.operators.append(None) - else: - operator_ = OperatorT.InitFromObj(subGraph.Operators(i)) - self.operators.append(operator_) - self.name = subGraph.Name() - - # SubGraphT - def Pack(self, builder): - if self.tensors is not None: - tensorslist = [] - for i in range(len(self.tensors)): - tensorslist.append(self.tensors[i].Pack(builder)) - SubGraphStartTensorsVector(builder, len(self.tensors)) - for i in reversed(range(len(self.tensors))): - builder.PrependUOffsetTRelative(tensorslist[i]) - tensors = builder.EndVector(len(self.tensors)) - if self.inputs is not None: - if np is not None and type(self.inputs) is np.ndarray: - inputs = builder.CreateNumpyVector(self.inputs) - else: - SubGraphStartInputsVector(builder, len(self.inputs)) - for i in reversed(range(len(self.inputs))): - builder.PrependInt32(self.inputs[i]) - inputs = builder.EndVector(len(self.inputs)) - if self.outputs is not None: - if np is not None and type(self.outputs) is np.ndarray: - outputs = builder.CreateNumpyVector(self.outputs) - else: - SubGraphStartOutputsVector(builder, len(self.outputs)) - for i in reversed(range(len(self.outputs))): - builder.PrependInt32(self.outputs[i]) - outputs = builder.EndVector(len(self.outputs)) - if self.operators is not None: - operatorslist = [] - for i in range(len(self.operators)): - operatorslist.append(self.operators[i].Pack(builder)) - SubGraphStartOperatorsVector(builder, len(self.operators)) - for i in reversed(range(len(self.operators))): - builder.PrependUOffsetTRelative(operatorslist[i]) - operators = builder.EndVector(len(self.operators)) - if self.name is not None: - name = builder.CreateString(self.name) - SubGraphStart(builder) - if self.tensors is not None: - SubGraphAddTensors(builder, tensors) - if self.inputs is not None: - SubGraphAddInputs(builder, inputs) - if self.outputs is not None: - SubGraphAddOutputs(builder, outputs) - if self.operators is not None: - SubGraphAddOperators(builder, operators) - if self.name is not None: - SubGraphAddName(builder, name) - subGraph = SubGraphEnd(builder) - return subGraph - - - -class SquareOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSquareOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SquareOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SquareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SquareOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def SquareOptionsStart(builder): builder.StartObject(0) -def SquareOptionsEnd(builder): return builder.EndObject() - - -class SquareOptionsT(object): - - # SquareOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - squareOptions = SquareOptions() - squareOptions.Init(buf, pos) - return cls.InitFromObj(squareOptions) - - @classmethod - def InitFromObj(cls, squareOptions): - x = SquareOptionsT() - x._UnPack(squareOptions) - return x - - # SquareOptionsT - def _UnPack(self, squareOptions): - if squareOptions is None: - return - - # SquareOptionsT - def Pack(self, builder): - SquareOptionsStart(builder) - squareOptions = SquareOptionsEnd(builder) - return squareOptions - - - -class OperatorCode(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsOperatorCode(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = OperatorCode() - x.Init(buf, n + offset) - return x - - @classmethod - def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # OperatorCode - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # OperatorCode - def BuiltinCode(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # OperatorCode - def CustomCode(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.String(o + self._tab.Pos) - return None - - # OperatorCode - def Version(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 1 - -def OperatorCodeStart(builder): builder.StartObject(3) -def OperatorCodeAddBuiltinCode(builder, builtinCode): builder.PrependInt8Slot(0, builtinCode, 0) -def OperatorCodeAddCustomCode(builder, customCode): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0) -def OperatorCodeAddVersion(builder, version): builder.PrependInt32Slot(2, version, 1) -def OperatorCodeEnd(builder): return builder.EndObject() - - -class OperatorCodeT(object): - - # OperatorCodeT - def __init__(self): - self.builtinCode = 0 # type: int - self.customCode = None # type: str - self.version = 1 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - operatorCode = OperatorCode() - operatorCode.Init(buf, pos) - return cls.InitFromObj(operatorCode) - - @classmethod - def InitFromObj(cls, operatorCode): - x = OperatorCodeT() - x._UnPack(operatorCode) - return x - - # OperatorCodeT - def _UnPack(self, operatorCode): - if operatorCode is None: - return - self.builtinCode = operatorCode.BuiltinCode() - self.customCode = operatorCode.CustomCode() - self.version = operatorCode.Version() - - # OperatorCodeT - def Pack(self, builder): - if self.customCode is not None: - customCode = builder.CreateString(self.customCode) - OperatorCodeStart(builder) - OperatorCodeAddBuiltinCode(builder, self.builtinCode) - if self.customCode is not None: - OperatorCodeAddCustomCode(builder, customCode) - OperatorCodeAddVersion(builder, self.version) - operatorCode = OperatorCodeEnd(builder) - return operatorCode - - - -class Metadata(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsMetadata(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Metadata() - x.Init(buf, n + offset) - return x - - @classmethod - def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Metadata - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Metadata - def Name(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.String(o + self._tab.Pos) - return None - - # Metadata - def Buffer(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) - return 0 - -def MetadataStart(builder): builder.StartObject(2) -def MetadataAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) -def MetadataAddBuffer(builder, buffer): builder.PrependUint32Slot(1, buffer, 0) -def MetadataEnd(builder): return builder.EndObject() - - -class MetadataT(object): - - # MetadataT - def __init__(self): - self.name = None # type: str - self.buffer = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - metadata = Metadata() - metadata.Init(buf, pos) - return cls.InitFromObj(metadata) - - @classmethod - def InitFromObj(cls, metadata): - x = MetadataT() - x._UnPack(metadata) - return x - - # MetadataT - def _UnPack(self, metadata): - if metadata is None: - return - self.name = metadata.Name() - self.buffer = metadata.Buffer() - - # MetadataT - def Pack(self, builder): - if self.name is not None: - name = builder.CreateString(self.name) - MetadataStart(builder) - if self.name is not None: - MetadataAddName(builder, name) - MetadataAddBuffer(builder, self.buffer) - metadata = MetadataEnd(builder) - return metadata - - - -class TopKV2Options(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsTopKV2Options(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = TopKV2Options() - x.Init(buf, n + offset) - return x - - @classmethod - def TopKV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # TopKV2Options - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def TopKV2OptionsStart(builder): builder.StartObject(0) -def TopKV2OptionsEnd(builder): return builder.EndObject() - - -class TopKV2OptionsT(object): - - # TopKV2OptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - topKV2Options = TopKV2Options() - topKV2Options.Init(buf, pos) - return cls.InitFromObj(topKV2Options) - - @classmethod - def InitFromObj(cls, topKV2Options): - x = TopKV2OptionsT() - x._UnPack(topKV2Options) - return x - - # TopKV2OptionsT - def _UnPack(self, topKV2Options): - if topKV2Options is None: - return - - # TopKV2OptionsT - def Pack(self, builder): - TopKV2OptionsStart(builder) - topKV2Options = TopKV2OptionsEnd(builder) - return topKV2Options - - - -class SqueezeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSqueezeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SqueezeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SqueezeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SqueezeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SqueezeOptions - def SqueezeDims(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # SqueezeOptions - def SqueezeDimsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # SqueezeOptions - def SqueezeDimsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SqueezeOptions - def SqueezeDimsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - -def SqueezeOptionsStart(builder): builder.StartObject(1) -def SqueezeOptionsAddSqueezeDims(builder, squeezeDims): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0) -def SqueezeOptionsStartSqueezeDimsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SqueezeOptionsEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class SqueezeOptionsT(object): - - # SqueezeOptionsT - def __init__(self): - self.squeezeDims = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - squeezeOptions = SqueezeOptions() - squeezeOptions.Init(buf, pos) - return cls.InitFromObj(squeezeOptions) - - @classmethod - def InitFromObj(cls, squeezeOptions): - x = SqueezeOptionsT() - x._UnPack(squeezeOptions) - return x - - # SqueezeOptionsT - def _UnPack(self, squeezeOptions): - if squeezeOptions is None: - return - if not squeezeOptions.SqueezeDimsIsNone(): - if np is None: - self.squeezeDims = [] - for i in range(squeezeOptions.SqueezeDimsLength()): - self.squeezeDims.append(squeezeOptions.SqueezeDims(i)) - else: - self.squeezeDims = squeezeOptions.SqueezeDimsAsNumpy() - - # SqueezeOptionsT - def Pack(self, builder): - if self.squeezeDims is not None: - if np is not None and type(self.squeezeDims) is np.ndarray: - squeezeDims = builder.CreateNumpyVector(self.squeezeDims) - else: - SqueezeOptionsStartSqueezeDimsVector(builder, len(self.squeezeDims)) - for i in reversed(range(len(self.squeezeDims))): - builder.PrependInt32(self.squeezeDims[i]) - squeezeDims = builder.EndVector(len(self.squeezeDims)) - SqueezeOptionsStart(builder) - if self.squeezeDims is not None: - SqueezeOptionsAddSqueezeDims(builder, squeezeDims) - squeezeOptions = SqueezeOptionsEnd(builder) - return squeezeOptions - - - -class CosOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsCosOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = CosOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # CosOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def CosOptionsStart(builder): builder.StartObject(0) -def CosOptionsEnd(builder): return builder.EndObject() - - -class CosOptionsT(object): - - # CosOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - cosOptions = CosOptions() - cosOptions.Init(buf, pos) - return cls.InitFromObj(cosOptions) - - @classmethod - def InitFromObj(cls, cosOptions): - x = CosOptionsT() - x._UnPack(cosOptions) - return x - - # CosOptionsT - def _UnPack(self, cosOptions): - if cosOptions is None: - return - - # CosOptionsT - def Pack(self, builder): - CosOptionsStart(builder) - cosOptions = CosOptionsEnd(builder) - return cosOptions - - - -class AddOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsAddOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = AddOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def AddOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # AddOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # AddOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def AddOptionsStart(builder): builder.StartObject(1) -def AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def AddOptionsEnd(builder): return builder.EndObject() - - -class AddOptionsT(object): - - # AddOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - addOptions = AddOptions() - addOptions.Init(buf, pos) - return cls.InitFromObj(addOptions) - - @classmethod - def InitFromObj(cls, addOptions): - x = AddOptionsT() - x._UnPack(addOptions) - return x - - # AddOptionsT - def _UnPack(self, addOptions): - if addOptions is None: - return - self.fusedActivationFunction = addOptions.FusedActivationFunction() - - # AddOptionsT - def Pack(self, builder): - AddOptionsStart(builder) - AddOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - addOptions = AddOptionsEnd(builder) - return addOptions - - - -class AbsOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsAbsOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = AbsOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # AbsOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def AbsOptionsStart(builder): builder.StartObject(0) -def AbsOptionsEnd(builder): return builder.EndObject() - - -class AbsOptionsT(object): - - # AbsOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - absOptions = AbsOptions() - absOptions.Init(buf, pos) - return cls.InitFromObj(absOptions) - - @classmethod - def InitFromObj(cls, absOptions): - x = AbsOptionsT() - x._UnPack(absOptions) - return x - - # AbsOptionsT - def _UnPack(self, absOptions): - if absOptions is None: - return - - # AbsOptionsT - def Pack(self, builder): - AbsOptionsStart(builder) - absOptions = AbsOptionsEnd(builder) - return absOptions - - - -class MulOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsMulOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = MulOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def MulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # MulOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # MulOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def MulOptionsStart(builder): builder.StartObject(1) -def MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def MulOptionsEnd(builder): return builder.EndObject() - - -class MulOptionsT(object): - - # MulOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - mulOptions = MulOptions() - mulOptions.Init(buf, pos) - return cls.InitFromObj(mulOptions) - - @classmethod - def InitFromObj(cls, mulOptions): - x = MulOptionsT() - x._UnPack(mulOptions) - return x - - # MulOptionsT - def _UnPack(self, mulOptions): - if mulOptions is None: - return - self.fusedActivationFunction = mulOptions.FusedActivationFunction() - - # MulOptionsT - def Pack(self, builder): - MulOptionsStart(builder) - MulOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - mulOptions = MulOptionsEnd(builder) - return mulOptions - - -class MirrorPadMode(object): - REFLECT = 0 - SYMMETRIC = 1 - - - -class BuiltinOperator(object): - ADD = 0 - AVERAGE_POOL_2D = 1 - CONCATENATION = 2 - CONV_2D = 3 - DEPTHWISE_CONV_2D = 4 - DEPTH_TO_SPACE = 5 - DEQUANTIZE = 6 - EMBEDDING_LOOKUP = 7 - FLOOR = 8 - FULLY_CONNECTED = 9 - HASHTABLE_LOOKUP = 10 - L2_NORMALIZATION = 11 - L2_POOL_2D = 12 - LOCAL_RESPONSE_NORMALIZATION = 13 - LOGISTIC = 14 - LSH_PROJECTION = 15 - LSTM = 16 - MAX_POOL_2D = 17 - MUL = 18 - RELU = 19 - RELU_N1_TO_1 = 20 - RELU6 = 21 - RESHAPE = 22 - RESIZE_BILINEAR = 23 - RNN = 24 - SOFTMAX = 25 - SPACE_TO_DEPTH = 26 - SVDF = 27 - TANH = 28 - CONCAT_EMBEDDINGS = 29 - SKIP_GRAM = 30 - CALL = 31 - CUSTOM = 32 - EMBEDDING_LOOKUP_SPARSE = 33 - PAD = 34 - UNIDIRECTIONAL_SEQUENCE_RNN = 35 - GATHER = 36 - BATCH_TO_SPACE_ND = 37 - SPACE_TO_BATCH_ND = 38 - TRANSPOSE = 39 - MEAN = 40 - SUB = 41 - DIV = 42 - SQUEEZE = 43 - UNIDIRECTIONAL_SEQUENCE_LSTM = 44 - STRIDED_SLICE = 45 - BIDIRECTIONAL_SEQUENCE_RNN = 46 - EXP = 47 - TOPK_V2 = 48 - SPLIT = 49 - LOG_SOFTMAX = 50 - DELEGATE = 51 - BIDIRECTIONAL_SEQUENCE_LSTM = 52 - CAST = 53 - PRELU = 54 - MAXIMUM = 55 - ARG_MAX = 56 - MINIMUM = 57 - LESS = 58 - NEG = 59 - PADV2 = 60 - GREATER = 61 - GREATER_EQUAL = 62 - LESS_EQUAL = 63 - SELECT = 64 - SLICE = 65 - SIN = 66 - TRANSPOSE_CONV = 67 - SPARSE_TO_DENSE = 68 - TILE = 69 - EXPAND_DIMS = 70 - EQUAL = 71 - NOT_EQUAL = 72 - LOG = 73 - SUM = 74 - SQRT = 75 - RSQRT = 76 - SHAPE = 77 - POW = 78 - ARG_MIN = 79 - FAKE_QUANT = 80 - REDUCE_PROD = 81 - REDUCE_MAX = 82 - PACK = 83 - LOGICAL_OR = 84 - ONE_HOT = 85 - LOGICAL_AND = 86 - LOGICAL_NOT = 87 - UNPACK = 88 - REDUCE_MIN = 89 - FLOOR_DIV = 90 - REDUCE_ANY = 91 - SQUARE = 92 - ZEROS_LIKE = 93 - FILL = 94 - FLOOR_MOD = 95 - RANGE = 96 - RESIZE_NEAREST_NEIGHBOR = 97 - LEAKY_RELU = 98 - SQUARED_DIFFERENCE = 99 - MIRROR_PAD = 100 - ABS = 101 - SPLIT_V = 102 - UNIQUE = 103 - CEIL = 104 - REVERSE_V2 = 105 - ADD_N = 106 - GATHER_ND = 107 - COS = 108 - WHERE = 109 - RANK = 110 - ELU = 111 - REVERSE_SEQUENCE = 112 - MATRIX_DIAG = 113 - QUANTIZE = 114 - MATRIX_SET_DIAG = 115 - ROUND = 116 - HARD_SWISH = 117 - IF = 118 - WHILE = 119 - NON_MAX_SUPPRESSION_V4 = 120 - NON_MAX_SUPPRESSION_V5 = 121 - SCATTER_ND = 122 - SELECT_V2 = 123 - DENSIFY = 124 - SEGMENT_SUM = 125 - BATCH_MATMUL = 126 - - - - -class Uint16Vector(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsUint16Vector(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Uint16Vector() - x.Init(buf, n + offset) - return x - - @classmethod - def Uint16VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Uint16Vector - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Uint16Vector - def Values(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Uint16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2)) - return 0 - - # Uint16Vector - def ValuesAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o) - return 0 - - # Uint16Vector - def ValuesLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Uint16Vector - def ValuesIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - -def Uint16VectorStart(builder): builder.StartObject(1) -def Uint16VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) -def Uint16VectorStartValuesVector(builder, numElems): return builder.StartVector(2, numElems, 2) -def Uint16VectorEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class Uint16VectorT(object): - - # Uint16VectorT - def __init__(self): - self.values = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - uint16Vector = Uint16Vector() - uint16Vector.Init(buf, pos) - return cls.InitFromObj(uint16Vector) - - @classmethod - def InitFromObj(cls, uint16Vector): - x = Uint16VectorT() - x._UnPack(uint16Vector) - return x - - # Uint16VectorT - def _UnPack(self, uint16Vector): - if uint16Vector is None: - return - if not uint16Vector.ValuesIsNone(): - if np is None: - self.values = [] - for i in range(uint16Vector.ValuesLength()): - self.values.append(uint16Vector.Values(i)) - else: - self.values = uint16Vector.ValuesAsNumpy() - - # Uint16VectorT - def Pack(self, builder): - if self.values is not None: - if np is not None and type(self.values) is np.ndarray: - values = builder.CreateNumpyVector(self.values) - else: - Uint16VectorStartValuesVector(builder, len(self.values)) - for i in reversed(range(len(self.values))): - builder.PrependUint16(self.values[i]) - values = builder.EndVector(len(self.values)) - Uint16VectorStart(builder) - if self.values is not None: - Uint16VectorAddValues(builder, values) - uint16Vector = Uint16VectorEnd(builder) - return uint16Vector - - - -class NotEqualOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsNotEqualOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = NotEqualOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def NotEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # NotEqualOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def NotEqualOptionsStart(builder): builder.StartObject(0) -def NotEqualOptionsEnd(builder): return builder.EndObject() - - -class NotEqualOptionsT(object): - - # NotEqualOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - notEqualOptions = NotEqualOptions() - notEqualOptions.Init(buf, pos) - return cls.InitFromObj(notEqualOptions) - - @classmethod - def InitFromObj(cls, notEqualOptions): - x = NotEqualOptionsT() - x._UnPack(notEqualOptions) - return x - - # NotEqualOptionsT - def _UnPack(self, notEqualOptions): - if notEqualOptions is None: - return - - # NotEqualOptionsT - def Pack(self, builder): - NotEqualOptionsStart(builder) - notEqualOptions = NotEqualOptionsEnd(builder) - return notEqualOptions - - - -class ExpOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsExpOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ExpOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ExpOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def ExpOptionsStart(builder): builder.StartObject(0) -def ExpOptionsEnd(builder): return builder.EndObject() - - -class ExpOptionsT(object): - - # ExpOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - expOptions = ExpOptions() - expOptions.Init(buf, pos) - return cls.InitFromObj(expOptions) - - @classmethod - def InitFromObj(cls, expOptions): - x = ExpOptionsT() - x._UnPack(expOptions) - return x - - # ExpOptionsT - def _UnPack(self, expOptions): - if expOptions is None: - return - - # ExpOptionsT - def Pack(self, builder): - ExpOptionsStart(builder) - expOptions = ExpOptionsEnd(builder) - return expOptions - - - -class TileOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsTileOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = TileOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def TileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # TileOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def TileOptionsStart(builder): builder.StartObject(0) -def TileOptionsEnd(builder): return builder.EndObject() - - -class TileOptionsT(object): - - # TileOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - tileOptions = TileOptions() - tileOptions.Init(buf, pos) - return cls.InitFromObj(tileOptions) - - @classmethod - def InitFromObj(cls, tileOptions): - x = TileOptionsT() - x._UnPack(tileOptions) - return x - - # TileOptionsT - def _UnPack(self, tileOptions): - if tileOptions is None: - return - - # TileOptionsT - def Pack(self, builder): - TileOptionsStart(builder) - tileOptions = TileOptionsEnd(builder) - return tileOptions - - - -class LeakyReluOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLeakyReluOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LeakyReluOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LeakyReluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LeakyReluOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # LeakyReluOptions - def Alpha(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - -def LeakyReluOptionsStart(builder): builder.StartObject(1) -def LeakyReluOptionsAddAlpha(builder, alpha): builder.PrependFloat32Slot(0, alpha, 0.0) -def LeakyReluOptionsEnd(builder): return builder.EndObject() - - -class LeakyReluOptionsT(object): - - # LeakyReluOptionsT - def __init__(self): - self.alpha = 0.0 # type: float - - @classmethod - def InitFromBuf(cls, buf, pos): - leakyReluOptions = LeakyReluOptions() - leakyReluOptions.Init(buf, pos) - return cls.InitFromObj(leakyReluOptions) - - @classmethod - def InitFromObj(cls, leakyReluOptions): - x = LeakyReluOptionsT() - x._UnPack(leakyReluOptions) - return x - - # LeakyReluOptionsT - def _UnPack(self, leakyReluOptions): - if leakyReluOptions is None: - return - self.alpha = leakyReluOptions.Alpha() - - # LeakyReluOptionsT - def Pack(self, builder): - LeakyReluOptionsStart(builder) - LeakyReluOptionsAddAlpha(builder, self.alpha) - leakyReluOptions = LeakyReluOptionsEnd(builder) - return leakyReluOptions - - - -class SelectOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSelectOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SelectOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SelectOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SelectOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def SelectOptionsStart(builder): builder.StartObject(0) -def SelectOptionsEnd(builder): return builder.EndObject() - - -class SelectOptionsT(object): - - # SelectOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - selectOptions = SelectOptions() - selectOptions.Init(buf, pos) - return cls.InitFromObj(selectOptions) - - @classmethod - def InitFromObj(cls, selectOptions): - x = SelectOptionsT() - x._UnPack(selectOptions) - return x - - # SelectOptionsT - def _UnPack(self, selectOptions): - if selectOptions is None: - return - - # SelectOptionsT - def Pack(self, builder): - SelectOptionsStart(builder) - selectOptions = SelectOptionsEnd(builder) - return selectOptions - - - -class FillOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsFillOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = FillOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # FillOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def FillOptionsStart(builder): builder.StartObject(0) -def FillOptionsEnd(builder): return builder.EndObject() - - -class FillOptionsT(object): - - # FillOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - fillOptions = FillOptions() - fillOptions.Init(buf, pos) - return cls.InitFromObj(fillOptions) - - @classmethod - def InitFromObj(cls, fillOptions): - x = FillOptionsT() - x._UnPack(fillOptions) - return x - - # FillOptionsT - def _UnPack(self, fillOptions): - if fillOptions is None: - return - - # FillOptionsT - def Pack(self, builder): - FillOptionsStart(builder) - fillOptions = FillOptionsEnd(builder) - return fillOptions - - - -class GatherOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsGatherOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = GatherOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # GatherOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # GatherOptions - def Axis(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def GatherOptionsStart(builder): builder.StartObject(1) -def GatherOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) -def GatherOptionsEnd(builder): return builder.EndObject() - - -class GatherOptionsT(object): - - # GatherOptionsT - def __init__(self): - self.axis = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - gatherOptions = GatherOptions() - gatherOptions.Init(buf, pos) - return cls.InitFromObj(gatherOptions) - - @classmethod - def InitFromObj(cls, gatherOptions): - x = GatherOptionsT() - x._UnPack(gatherOptions) - return x - - # GatherOptionsT - def _UnPack(self, gatherOptions): - if gatherOptions is None: - return - self.axis = gatherOptions.Axis() - - # GatherOptionsT - def Pack(self, builder): - GatherOptionsStart(builder) - GatherOptionsAddAxis(builder, self.axis) - gatherOptions = GatherOptionsEnd(builder) - return gatherOptions - - -class TensorType(object): - FLOAT32 = 0 - FLOAT16 = 1 - INT32 = 2 - UINT8 = 3 - INT64 = 4 - STRING = 5 - BOOL = 6 - INT16 = 7 - COMPLEX64 = 8 - INT8 = 9 - FLOAT64 = 10 - - - - -class PadOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsPadOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = PadOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def PadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # PadOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def PadOptionsStart(builder): builder.StartObject(0) -def PadOptionsEnd(builder): return builder.EndObject() - - -class PadOptionsT(object): - - # PadOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - padOptions = PadOptions() - padOptions.Init(buf, pos) - return cls.InitFromObj(padOptions) - - @classmethod - def InitFromObj(cls, padOptions): - x = PadOptionsT() - x._UnPack(padOptions) - return x - - # PadOptionsT - def _UnPack(self, padOptions): - if padOptions is None: - return - - # PadOptionsT - def Pack(self, builder): - PadOptionsStart(builder) - padOptions = PadOptionsEnd(builder) - return padOptions - - - -class LogicalOrOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLogicalOrOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LogicalOrOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LogicalOrOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LogicalOrOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def LogicalOrOptionsStart(builder): builder.StartObject(0) -def LogicalOrOptionsEnd(builder): return builder.EndObject() - - -class LogicalOrOptionsT(object): - - # LogicalOrOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - logicalOrOptions = LogicalOrOptions() - logicalOrOptions.Init(buf, pos) - return cls.InitFromObj(logicalOrOptions) - - @classmethod - def InitFromObj(cls, logicalOrOptions): - x = LogicalOrOptionsT() - x._UnPack(logicalOrOptions) - return x - - # LogicalOrOptionsT - def _UnPack(self, logicalOrOptions): - if logicalOrOptions is None: - return - - # LogicalOrOptionsT - def Pack(self, builder): - LogicalOrOptionsStart(builder) - logicalOrOptions = LogicalOrOptionsEnd(builder) - return logicalOrOptions - - - -class NegOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsNegOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = NegOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def NegOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # NegOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def NegOptionsStart(builder): builder.StartObject(0) -def NegOptionsEnd(builder): return builder.EndObject() - - -class NegOptionsT(object): - - # NegOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - negOptions = NegOptions() - negOptions.Init(buf, pos) - return cls.InitFromObj(negOptions) - - @classmethod - def InitFromObj(cls, negOptions): - x = NegOptionsT() - x._UnPack(negOptions) - return x - - # NegOptionsT - def _UnPack(self, negOptions): - if negOptions is None: - return - - # NegOptionsT - def Pack(self, builder): - NegOptionsStart(builder) - negOptions = NegOptionsEnd(builder) - return negOptions - - - -class DepthToSpaceOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsDepthToSpaceOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = DepthToSpaceOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # DepthToSpaceOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # DepthToSpaceOptions - def BlockSize(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def DepthToSpaceOptionsStart(builder): builder.StartObject(1) -def DepthToSpaceOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0) -def DepthToSpaceOptionsEnd(builder): return builder.EndObject() - - -class DepthToSpaceOptionsT(object): - - # DepthToSpaceOptionsT - def __init__(self): - self.blockSize = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - depthToSpaceOptions = DepthToSpaceOptions() - depthToSpaceOptions.Init(buf, pos) - return cls.InitFromObj(depthToSpaceOptions) - - @classmethod - def InitFromObj(cls, depthToSpaceOptions): - x = DepthToSpaceOptionsT() - x._UnPack(depthToSpaceOptions) - return x - - # DepthToSpaceOptionsT - def _UnPack(self, depthToSpaceOptions): - if depthToSpaceOptions is None: - return - self.blockSize = depthToSpaceOptions.BlockSize() - - # DepthToSpaceOptionsT - def Pack(self, builder): - DepthToSpaceOptionsStart(builder) - DepthToSpaceOptionsAddBlockSize(builder, self.blockSize) - depthToSpaceOptions = DepthToSpaceOptionsEnd(builder) - return depthToSpaceOptions - - - -class SoftmaxOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSoftmaxOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SoftmaxOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SoftmaxOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SoftmaxOptions - def Beta(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - -def SoftmaxOptionsStart(builder): builder.StartObject(1) -def SoftmaxOptionsAddBeta(builder, beta): builder.PrependFloat32Slot(0, beta, 0.0) -def SoftmaxOptionsEnd(builder): return builder.EndObject() - - -class SoftmaxOptionsT(object): - - # SoftmaxOptionsT - def __init__(self): - self.beta = 0.0 # type: float - - @classmethod - def InitFromBuf(cls, buf, pos): - softmaxOptions = SoftmaxOptions() - softmaxOptions.Init(buf, pos) - return cls.InitFromObj(softmaxOptions) - - @classmethod - def InitFromObj(cls, softmaxOptions): - x = SoftmaxOptionsT() - x._UnPack(softmaxOptions) - return x - - # SoftmaxOptionsT - def _UnPack(self, softmaxOptions): - if softmaxOptions is None: - return - self.beta = softmaxOptions.Beta() - - # SoftmaxOptionsT - def Pack(self, builder): - SoftmaxOptionsStart(builder) - SoftmaxOptionsAddBeta(builder, self.beta) - softmaxOptions = SoftmaxOptionsEnd(builder) - return softmaxOptions - - - -class TransposeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsTransposeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = TransposeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # TransposeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def TransposeOptionsStart(builder): builder.StartObject(0) -def TransposeOptionsEnd(builder): return builder.EndObject() - - -class TransposeOptionsT(object): - - # TransposeOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - transposeOptions = TransposeOptions() - transposeOptions.Init(buf, pos) - return cls.InitFromObj(transposeOptions) - - @classmethod - def InitFromObj(cls, transposeOptions): - x = TransposeOptionsT() - x._UnPack(transposeOptions) - return x - - # TransposeOptionsT - def _UnPack(self, transposeOptions): - if transposeOptions is None: - return - - # TransposeOptionsT - def Pack(self, builder): - TransposeOptionsStart(builder) - transposeOptions = TransposeOptionsEnd(builder) - return transposeOptions - - - -class OneHotOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsOneHotOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = OneHotOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def OneHotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # OneHotOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # OneHotOptions - def Axis(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def OneHotOptionsStart(builder): builder.StartObject(1) -def OneHotOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) -def OneHotOptionsEnd(builder): return builder.EndObject() - - -class OneHotOptionsT(object): - - # OneHotOptionsT - def __init__(self): - self.axis = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - oneHotOptions = OneHotOptions() - oneHotOptions.Init(buf, pos) - return cls.InitFromObj(oneHotOptions) - - @classmethod - def InitFromObj(cls, oneHotOptions): - x = OneHotOptionsT() - x._UnPack(oneHotOptions) - return x - - # OneHotOptionsT - def _UnPack(self, oneHotOptions): - if oneHotOptions is None: - return - self.axis = oneHotOptions.Axis() - - # OneHotOptionsT - def Pack(self, builder): - OneHotOptionsStart(builder) - OneHotOptionsAddAxis(builder, self.axis) - oneHotOptions = OneHotOptionsEnd(builder) - return oneHotOptions - - - -class GatherNdOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsGatherNdOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = GatherNdOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def GatherNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # GatherNdOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def GatherNdOptionsStart(builder): builder.StartObject(0) -def GatherNdOptionsEnd(builder): return builder.EndObject() - - -class GatherNdOptionsT(object): - - # GatherNdOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - gatherNdOptions = GatherNdOptions() - gatherNdOptions.Init(buf, pos) - return cls.InitFromObj(gatherNdOptions) - - @classmethod - def InitFromObj(cls, gatherNdOptions): - x = GatherNdOptionsT() - x._UnPack(gatherNdOptions) - return x - - # GatherNdOptionsT - def _UnPack(self, gatherNdOptions): - if gatherNdOptions is None: - return - - # GatherNdOptionsT - def Pack(self, builder): - GatherNdOptionsStart(builder) - gatherNdOptions = GatherNdOptionsEnd(builder) - return gatherNdOptions - - - -class BidirectionalSequenceRNNOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsBidirectionalSequenceRNNOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = BidirectionalSequenceRNNOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def BidirectionalSequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # BidirectionalSequenceRNNOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # BidirectionalSequenceRNNOptions - def TimeMajor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # BidirectionalSequenceRNNOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # BidirectionalSequenceRNNOptions - def MergeOutputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # BidirectionalSequenceRNNOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def BidirectionalSequenceRNNOptionsStart(builder): builder.StartObject(4) -def BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(0, timeMajor, 0) -def BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) -def BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs): builder.PrependBoolSlot(2, mergeOutputs, 0) -def BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) -def BidirectionalSequenceRNNOptionsEnd(builder): return builder.EndObject() - - -class BidirectionalSequenceRNNOptionsT(object): - - # BidirectionalSequenceRNNOptionsT - def __init__(self): - self.timeMajor = False # type: bool - self.fusedActivationFunction = 0 # type: int - self.mergeOutputs = False # type: bool - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - bidirectionalSequenceRNNOptions = BidirectionalSequenceRNNOptions() - bidirectionalSequenceRNNOptions.Init(buf, pos) - return cls.InitFromObj(bidirectionalSequenceRNNOptions) - - @classmethod - def InitFromObj(cls, bidirectionalSequenceRNNOptions): - x = BidirectionalSequenceRNNOptionsT() - x._UnPack(bidirectionalSequenceRNNOptions) - return x - - # BidirectionalSequenceRNNOptionsT - def _UnPack(self, bidirectionalSequenceRNNOptions): - if bidirectionalSequenceRNNOptions is None: - return - self.timeMajor = bidirectionalSequenceRNNOptions.TimeMajor() - self.fusedActivationFunction = bidirectionalSequenceRNNOptions.FusedActivationFunction() - self.mergeOutputs = bidirectionalSequenceRNNOptions.MergeOutputs() - self.asymmetricQuantizeInputs = bidirectionalSequenceRNNOptions.AsymmetricQuantizeInputs() - - # BidirectionalSequenceRNNOptionsT - def Pack(self, builder): - BidirectionalSequenceRNNOptionsStart(builder) - BidirectionalSequenceRNNOptionsAddTimeMajor(builder, self.timeMajor) - BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, self.mergeOutputs) - BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - bidirectionalSequenceRNNOptions = BidirectionalSequenceRNNOptionsEnd(builder) - return bidirectionalSequenceRNNOptions - - - -class UnpackOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsUnpackOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = UnpackOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def UnpackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # UnpackOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # UnpackOptions - def Num(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # UnpackOptions - def Axis(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def UnpackOptionsStart(builder): builder.StartObject(2) -def UnpackOptionsAddNum(builder, num): builder.PrependInt32Slot(0, num, 0) -def UnpackOptionsAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, 0) -def UnpackOptionsEnd(builder): return builder.EndObject() - - -class UnpackOptionsT(object): - - # UnpackOptionsT - def __init__(self): - self.num = 0 # type: int - self.axis = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - unpackOptions = UnpackOptions() - unpackOptions.Init(buf, pos) - return cls.InitFromObj(unpackOptions) - - @classmethod - def InitFromObj(cls, unpackOptions): - x = UnpackOptionsT() - x._UnPack(unpackOptions) - return x - - # UnpackOptionsT - def _UnPack(self, unpackOptions): - if unpackOptions is None: - return - self.num = unpackOptions.Num() - self.axis = unpackOptions.Axis() - - # UnpackOptionsT - def Pack(self, builder): - UnpackOptionsStart(builder) - UnpackOptionsAddNum(builder, self.num) - UnpackOptionsAddAxis(builder, self.axis) - unpackOptions = UnpackOptionsEnd(builder) - return unpackOptions - - - -class WhileOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsWhileOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = WhileOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # WhileOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # WhileOptions - def CondSubgraphIndex(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # WhileOptions - def BodySubgraphIndex(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def WhileOptionsStart(builder): builder.StartObject(2) -def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): builder.PrependInt32Slot(0, condSubgraphIndex, 0) -def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): builder.PrependInt32Slot(1, bodySubgraphIndex, 0) -def WhileOptionsEnd(builder): return builder.EndObject() - - -class WhileOptionsT(object): - - # WhileOptionsT - def __init__(self): - self.condSubgraphIndex = 0 # type: int - self.bodySubgraphIndex = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - whileOptions = WhileOptions() - whileOptions.Init(buf, pos) - return cls.InitFromObj(whileOptions) - - @classmethod - def InitFromObj(cls, whileOptions): - x = WhileOptionsT() - x._UnPack(whileOptions) - return x - - # WhileOptionsT - def _UnPack(self, whileOptions): - if whileOptions is None: - return - self.condSubgraphIndex = whileOptions.CondSubgraphIndex() - self.bodySubgraphIndex = whileOptions.BodySubgraphIndex() - - # WhileOptionsT - def Pack(self, builder): - WhileOptionsStart(builder) - WhileOptionsAddCondSubgraphIndex(builder, self.condSubgraphIndex) - WhileOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex) - whileOptions = WhileOptionsEnd(builder) - return whileOptions - - - -class NonMaxSuppressionV5Options(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = NonMaxSuppressionV5Options() - x.Init(buf, n + offset) - return x - - @classmethod - def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # NonMaxSuppressionV5Options - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def NonMaxSuppressionV5OptionsStart(builder): builder.StartObject(0) -def NonMaxSuppressionV5OptionsEnd(builder): return builder.EndObject() - - -class NonMaxSuppressionV5OptionsT(object): - - # NonMaxSuppressionV5OptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - nonMaxSuppressionV5Options = NonMaxSuppressionV5Options() - nonMaxSuppressionV5Options.Init(buf, pos) - return cls.InitFromObj(nonMaxSuppressionV5Options) - - @classmethod - def InitFromObj(cls, nonMaxSuppressionV5Options): - x = NonMaxSuppressionV5OptionsT() - x._UnPack(nonMaxSuppressionV5Options) - return x - - # NonMaxSuppressionV5OptionsT - def _UnPack(self, nonMaxSuppressionV5Options): - if nonMaxSuppressionV5Options is None: - return - - # NonMaxSuppressionV5OptionsT - def Pack(self, builder): - NonMaxSuppressionV5OptionsStart(builder) - nonMaxSuppressionV5Options = NonMaxSuppressionV5OptionsEnd(builder) - return nonMaxSuppressionV5Options - - -class BuiltinOptions(object): - NONE = 0 - Conv2DOptions = 1 - DepthwiseConv2DOptions = 2 - ConcatEmbeddingsOptions = 3 - LSHProjectionOptions = 4 - Pool2DOptions = 5 - SVDFOptions = 6 - RNNOptions = 7 - FullyConnectedOptions = 8 - SoftmaxOptions = 9 - ConcatenationOptions = 10 - AddOptions = 11 - L2NormOptions = 12 - LocalResponseNormalizationOptions = 13 - LSTMOptions = 14 - ResizeBilinearOptions = 15 - CallOptions = 16 - ReshapeOptions = 17 - SkipGramOptions = 18 - SpaceToDepthOptions = 19 - EmbeddingLookupSparseOptions = 20 - MulOptions = 21 - PadOptions = 22 - GatherOptions = 23 - BatchToSpaceNDOptions = 24 - SpaceToBatchNDOptions = 25 - TransposeOptions = 26 - ReducerOptions = 27 - SubOptions = 28 - DivOptions = 29 - SqueezeOptions = 30 - SequenceRNNOptions = 31 - StridedSliceOptions = 32 - ExpOptions = 33 - TopKV2Options = 34 - SplitOptions = 35 - LogSoftmaxOptions = 36 - CastOptions = 37 - DequantizeOptions = 38 - MaximumMinimumOptions = 39 - ArgMaxOptions = 40 - LessOptions = 41 - NegOptions = 42 - PadV2Options = 43 - GreaterOptions = 44 - GreaterEqualOptions = 45 - LessEqualOptions = 46 - SelectOptions = 47 - SliceOptions = 48 - TransposeConvOptions = 49 - SparseToDenseOptions = 50 - TileOptions = 51 - ExpandDimsOptions = 52 - EqualOptions = 53 - NotEqualOptions = 54 - ShapeOptions = 55 - PowOptions = 56 - ArgMinOptions = 57 - FakeQuantOptions = 58 - PackOptions = 59 - LogicalOrOptions = 60 - OneHotOptions = 61 - LogicalAndOptions = 62 - LogicalNotOptions = 63 - UnpackOptions = 64 - FloorDivOptions = 65 - SquareOptions = 66 - ZerosLikeOptions = 67 - FillOptions = 68 - BidirectionalSequenceLSTMOptions = 69 - BidirectionalSequenceRNNOptions = 70 - UnidirectionalSequenceLSTMOptions = 71 - FloorModOptions = 72 - RangeOptions = 73 - ResizeNearestNeighborOptions = 74 - LeakyReluOptions = 75 - SquaredDifferenceOptions = 76 - MirrorPadOptions = 77 - AbsOptions = 78 - SplitVOptions = 79 - UniqueOptions = 80 - ReverseV2Options = 81 - AddNOptions = 82 - GatherNdOptions = 83 - CosOptions = 84 - WhereOptions = 85 - RankOptions = 86 - ReverseSequenceOptions = 87 - MatrixDiagOptions = 88 - QuantizeOptions = 89 - MatrixSetDiagOptions = 90 - HardSwishOptions = 91 - IfOptions = 92 - WhileOptions = 93 - DepthToSpaceOptions = 94 - NonMaxSuppressionV4Options = 95 - NonMaxSuppressionV5Options = 96 - ScatterNdOptions = 97 - SelectV2Options = 98 - DensifyOptions = 99 - SegmentSumOptions = 100 - BatchMatMulOptions = 101 - - -def BuiltinOptionsCreator(unionType, table): - from flatbuffers.table import Table - if not isinstance(table, Table): - return None - if unionType == BuiltinOptions().Conv2DOptions: - return Conv2DOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().DepthwiseConv2DOptions: - return DepthwiseConv2DOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ConcatEmbeddingsOptions: - return ConcatEmbeddingsOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LSHProjectionOptions: - return LSHProjectionOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().Pool2DOptions: - return Pool2DOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SVDFOptions: - return SVDFOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().RNNOptions: - return RNNOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().FullyConnectedOptions: - return FullyConnectedOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SoftmaxOptions: - return SoftmaxOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ConcatenationOptions: - return ConcatenationOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().AddOptions: - return AddOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().L2NormOptions: - return L2NormOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LocalResponseNormalizationOptions: - return LocalResponseNormalizationOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LSTMOptions: - return LSTMOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ResizeBilinearOptions: - return ResizeBilinearOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().CallOptions: - return CallOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ReshapeOptions: - return ReshapeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SkipGramOptions: - return SkipGramOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SpaceToDepthOptions: - return SpaceToDepthOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().EmbeddingLookupSparseOptions: - return EmbeddingLookupSparseOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().MulOptions: - return MulOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().PadOptions: - return PadOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().GatherOptions: - return GatherOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().BatchToSpaceNDOptions: - return BatchToSpaceNDOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SpaceToBatchNDOptions: - return SpaceToBatchNDOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().TransposeOptions: - return TransposeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ReducerOptions: - return ReducerOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SubOptions: - return SubOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().DivOptions: - return DivOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SqueezeOptions: - return SqueezeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SequenceRNNOptions: - return SequenceRNNOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().StridedSliceOptions: - return StridedSliceOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ExpOptions: - return ExpOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().TopKV2Options: - return TopKV2OptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SplitOptions: - return SplitOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LogSoftmaxOptions: - return LogSoftmaxOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().CastOptions: - return CastOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().DequantizeOptions: - return DequantizeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().MaximumMinimumOptions: - return MaximumMinimumOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ArgMaxOptions: - return ArgMaxOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LessOptions: - return LessOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().NegOptions: - return NegOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().PadV2Options: - return PadV2OptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().GreaterOptions: - return GreaterOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().GreaterEqualOptions: - return GreaterEqualOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LessEqualOptions: - return LessEqualOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SelectOptions: - return SelectOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SliceOptions: - return SliceOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().TransposeConvOptions: - return TransposeConvOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SparseToDenseOptions: - return SparseToDenseOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().TileOptions: - return TileOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ExpandDimsOptions: - return ExpandDimsOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().EqualOptions: - return EqualOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().NotEqualOptions: - return NotEqualOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ShapeOptions: - return ShapeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().PowOptions: - return PowOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ArgMinOptions: - return ArgMinOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().FakeQuantOptions: - return FakeQuantOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().PackOptions: - return PackOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LogicalOrOptions: - return LogicalOrOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().OneHotOptions: - return OneHotOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LogicalAndOptions: - return LogicalAndOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LogicalNotOptions: - return LogicalNotOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().UnpackOptions: - return UnpackOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().FloorDivOptions: - return FloorDivOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SquareOptions: - return SquareOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ZerosLikeOptions: - return ZerosLikeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().FillOptions: - return FillOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().BidirectionalSequenceLSTMOptions: - return BidirectionalSequenceLSTMOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().BidirectionalSequenceRNNOptions: - return BidirectionalSequenceRNNOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().UnidirectionalSequenceLSTMOptions: - return UnidirectionalSequenceLSTMOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().FloorModOptions: - return FloorModOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().RangeOptions: - return RangeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ResizeNearestNeighborOptions: - return ResizeNearestNeighborOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().LeakyReluOptions: - return LeakyReluOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SquaredDifferenceOptions: - return SquaredDifferenceOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().MirrorPadOptions: - return MirrorPadOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().AbsOptions: - return AbsOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SplitVOptions: - return SplitVOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().UniqueOptions: - return UniqueOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ReverseV2Options: - return ReverseV2OptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().AddNOptions: - return AddNOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().GatherNdOptions: - return GatherNdOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().CosOptions: - return CosOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().WhereOptions: - return WhereOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().RankOptions: - return RankOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ReverseSequenceOptions: - return ReverseSequenceOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().MatrixDiagOptions: - return MatrixDiagOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().QuantizeOptions: - return QuantizeOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().MatrixSetDiagOptions: - return MatrixSetDiagOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().HardSwishOptions: - return HardSwishOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().IfOptions: - return IfOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().WhileOptions: - return WhileOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().DepthToSpaceOptions: - return DepthToSpaceOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().NonMaxSuppressionV4Options: - return NonMaxSuppressionV4OptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().NonMaxSuppressionV5Options: - return NonMaxSuppressionV5OptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().ScatterNdOptions: - return ScatterNdOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SelectV2Options: - return SelectV2OptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().DensifyOptions: - return DensifyOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().SegmentSumOptions: - return SegmentSumOptionsT.InitFromBuf(table.Bytes, table.Pos) - if unionType == BuiltinOptions().BatchMatMulOptions: - return BatchMatMulOptionsT.InitFromBuf(table.Bytes, table.Pos) - return None - - - -class DivOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsDivOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = DivOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def DivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # DivOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # DivOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def DivOptionsStart(builder): builder.StartObject(1) -def DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def DivOptionsEnd(builder): return builder.EndObject() - - -class DivOptionsT(object): - - # DivOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - divOptions = DivOptions() - divOptions.Init(buf, pos) - return cls.InitFromObj(divOptions) - - @classmethod - def InitFromObj(cls, divOptions): - x = DivOptionsT() - x._UnPack(divOptions) - return x - - # DivOptionsT - def _UnPack(self, divOptions): - if divOptions is None: - return - self.fusedActivationFunction = divOptions.FusedActivationFunction() - - # DivOptionsT - def Pack(self, builder): - DivOptionsStart(builder) - DivOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - divOptions = DivOptionsEnd(builder) - return divOptions - - - -class UnidirectionalSequenceLSTMOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsUnidirectionalSequenceLSTMOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = UnidirectionalSequenceLSTMOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def UnidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # UnidirectionalSequenceLSTMOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # UnidirectionalSequenceLSTMOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # UnidirectionalSequenceLSTMOptions - def CellClip(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # UnidirectionalSequenceLSTMOptions - def ProjClip(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # UnidirectionalSequenceLSTMOptions - def TimeMajor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # UnidirectionalSequenceLSTMOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def UnidirectionalSequenceLSTMOptionsStart(builder): builder.StartObject(5) -def UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0) -def UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0) -def UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(3, timeMajor, 0) -def UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) -def UnidirectionalSequenceLSTMOptionsEnd(builder): return builder.EndObject() - - -class UnidirectionalSequenceLSTMOptionsT(object): - - # UnidirectionalSequenceLSTMOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - self.cellClip = 0.0 # type: float - self.projClip = 0.0 # type: float - self.timeMajor = False # type: bool - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - unidirectionalSequenceLSTMOptions = UnidirectionalSequenceLSTMOptions() - unidirectionalSequenceLSTMOptions.Init(buf, pos) - return cls.InitFromObj(unidirectionalSequenceLSTMOptions) - - @classmethod - def InitFromObj(cls, unidirectionalSequenceLSTMOptions): - x = UnidirectionalSequenceLSTMOptionsT() - x._UnPack(unidirectionalSequenceLSTMOptions) - return x - - # UnidirectionalSequenceLSTMOptionsT - def _UnPack(self, unidirectionalSequenceLSTMOptions): - if unidirectionalSequenceLSTMOptions is None: - return - self.fusedActivationFunction = unidirectionalSequenceLSTMOptions.FusedActivationFunction() - self.cellClip = unidirectionalSequenceLSTMOptions.CellClip() - self.projClip = unidirectionalSequenceLSTMOptions.ProjClip() - self.timeMajor = unidirectionalSequenceLSTMOptions.TimeMajor() - self.asymmetricQuantizeInputs = unidirectionalSequenceLSTMOptions.AsymmetricQuantizeInputs() - - # UnidirectionalSequenceLSTMOptionsT - def Pack(self, builder): - UnidirectionalSequenceLSTMOptionsStart(builder) - UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - UnidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip) - UnidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip) - UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor) - UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - unidirectionalSequenceLSTMOptions = UnidirectionalSequenceLSTMOptionsEnd(builder) - return unidirectionalSequenceLSTMOptions - - - -class GreaterOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsGreaterOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = GreaterOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # GreaterOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def GreaterOptionsStart(builder): builder.StartObject(0) -def GreaterOptionsEnd(builder): return builder.EndObject() - - -class GreaterOptionsT(object): - - # GreaterOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - greaterOptions = GreaterOptions() - greaterOptions.Init(buf, pos) - return cls.InitFromObj(greaterOptions) - - @classmethod - def InitFromObj(cls, greaterOptions): - x = GreaterOptionsT() - x._UnPack(greaterOptions) - return x - - # GreaterOptionsT - def _UnPack(self, greaterOptions): - if greaterOptions is None: - return - - # GreaterOptionsT - def Pack(self, builder): - GreaterOptionsStart(builder) - greaterOptions = GreaterOptionsEnd(builder) - return greaterOptions - - - -class ArgMinOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsArgMinOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ArgMinOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ArgMinOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ArgMinOptions - def OutputType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def ArgMinOptionsStart(builder): builder.StartObject(1) -def ArgMinOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0) -def ArgMinOptionsEnd(builder): return builder.EndObject() - - -class ArgMinOptionsT(object): - - # ArgMinOptionsT - def __init__(self): - self.outputType = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - argMinOptions = ArgMinOptions() - argMinOptions.Init(buf, pos) - return cls.InitFromObj(argMinOptions) - - @classmethod - def InitFromObj(cls, argMinOptions): - x = ArgMinOptionsT() - x._UnPack(argMinOptions) - return x - - # ArgMinOptionsT - def _UnPack(self, argMinOptions): - if argMinOptions is None: - return - self.outputType = argMinOptions.OutputType() - - # ArgMinOptionsT - def Pack(self, builder): - ArgMinOptionsStart(builder) - ArgMinOptionsAddOutputType(builder, self.outputType) - argMinOptions = ArgMinOptionsEnd(builder) - return argMinOptions - - -class CustomOptionsFormat(object): - FLEXBUFFERS = 0 - - - - -class FloorModOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsFloorModOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = FloorModOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def FloorModOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # FloorModOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def FloorModOptionsStart(builder): builder.StartObject(0) -def FloorModOptionsEnd(builder): return builder.EndObject() - - -class FloorModOptionsT(object): - - # FloorModOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - floorModOptions = FloorModOptions() - floorModOptions.Init(buf, pos) - return cls.InitFromObj(floorModOptions) - - @classmethod - def InitFromObj(cls, floorModOptions): - x = FloorModOptionsT() - x._UnPack(floorModOptions) - return x - - # FloorModOptionsT - def _UnPack(self, floorModOptions): - if floorModOptions is None: - return - - # FloorModOptionsT - def Pack(self, builder): - FloorModOptionsStart(builder) - floorModOptions = FloorModOptionsEnd(builder) - return floorModOptions - - - -class QuantizationParameters(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsQuantizationParameters(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = QuantizationParameters() - x.Init(buf, n + offset) - return x - - @classmethod - def QuantizationParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # QuantizationParameters - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # QuantizationParameters - def Min(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # QuantizationParameters - def MinAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) - return 0 - - # QuantizationParameters - def MinLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # QuantizationParameters - def MinIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - - # QuantizationParameters - def Max(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # QuantizationParameters - def MaxAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) - return 0 - - # QuantizationParameters - def MaxLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # QuantizationParameters - def MaxIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - return o == 0 - - # QuantizationParameters - def Scale(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # QuantizationParameters - def ScaleAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) - return 0 - - # QuantizationParameters - def ScaleLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # QuantizationParameters - def ScaleIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - return o == 0 - - # QuantizationParameters - def ZeroPoint(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) - return 0 - - # QuantizationParameters - def ZeroPointAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) - return 0 - - # QuantizationParameters - def ZeroPointLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # QuantizationParameters - def ZeroPointIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - return o == 0 - - # QuantizationParameters - def DetailsType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) - return 0 - - # QuantizationParameters - def Details(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - from flatbuffers.table import Table - obj = Table(bytearray(), 0) - self._tab.Union(obj, o) - return obj - return None - - # QuantizationParameters - def QuantizedDimension(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def QuantizationParametersStart(builder): builder.StartObject(7) -def QuantizationParametersAddMin(builder, min): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0) -def QuantizationParametersStartMinVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def QuantizationParametersAddMax(builder, max): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0) -def QuantizationParametersStartMaxVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def QuantizationParametersAddScale(builder, scale): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0) -def QuantizationParametersStartScaleVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def QuantizationParametersAddZeroPoint(builder, zeroPoint): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0) -def QuantizationParametersStartZeroPointVector(builder, numElems): return builder.StartVector(8, numElems, 8) -def QuantizationParametersAddDetailsType(builder, detailsType): builder.PrependUint8Slot(4, detailsType, 0) -def QuantizationParametersAddDetails(builder, details): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(details), 0) -def QuantizationParametersAddQuantizedDimension(builder, quantizedDimension): builder.PrependInt32Slot(6, quantizedDimension, 0) -def QuantizationParametersEnd(builder): return builder.EndObject() - -try: - from typing import List, Union -except: - pass - -class QuantizationParametersT(object): - - # QuantizationParametersT - def __init__(self): - self.min = None # type: List[float] - self.max = None # type: List[float] - self.scale = None # type: List[float] - self.zeroPoint = None # type: List[int] - self.detailsType = 0 # type: int - self.details = None # type: Union[None, CustomQuantizationT] - self.quantizedDimension = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - quantizationParameters = QuantizationParameters() - quantizationParameters.Init(buf, pos) - return cls.InitFromObj(quantizationParameters) - - @classmethod - def InitFromObj(cls, quantizationParameters): - x = QuantizationParametersT() - x._UnPack(quantizationParameters) - return x - - # QuantizationParametersT - def _UnPack(self, quantizationParameters): - if quantizationParameters is None: - return - if not quantizationParameters.MinIsNone(): - if np is None: - self.min = [] - for i in range(quantizationParameters.MinLength()): - self.min.append(quantizationParameters.Min(i)) - else: - self.min = quantizationParameters.MinAsNumpy() - if not quantizationParameters.MaxIsNone(): - if np is None: - self.max = [] - for i in range(quantizationParameters.MaxLength()): - self.max.append(quantizationParameters.Max(i)) - else: - self.max = quantizationParameters.MaxAsNumpy() - if not quantizationParameters.ScaleIsNone(): - if np is None: - self.scale = [] - for i in range(quantizationParameters.ScaleLength()): - self.scale.append(quantizationParameters.Scale(i)) - else: - self.scale = quantizationParameters.ScaleAsNumpy() - if not quantizationParameters.ZeroPointIsNone(): - if np is None: - self.zeroPoint = [] - for i in range(quantizationParameters.ZeroPointLength()): - self.zeroPoint.append(quantizationParameters.ZeroPoint(i)) - else: - self.zeroPoint = quantizationParameters.ZeroPointAsNumpy() - self.detailsType = quantizationParameters.DetailsType() - self.details = QuantizationDetailsCreator(self.detailsType, quantizationParameters.Details()) - self.quantizedDimension = quantizationParameters.QuantizedDimension() - - # QuantizationParametersT - def Pack(self, builder): - if self.min is not None: - if np is not None and type(self.min) is np.ndarray: - min = builder.CreateNumpyVector(self.min) - else: - QuantizationParametersStartMinVector(builder, len(self.min)) - for i in reversed(range(len(self.min))): - builder.PrependFloat32(self.min[i]) - min = builder.EndVector(len(self.min)) - if self.max is not None: - if np is not None and type(self.max) is np.ndarray: - max = builder.CreateNumpyVector(self.max) - else: - QuantizationParametersStartMaxVector(builder, len(self.max)) - for i in reversed(range(len(self.max))): - builder.PrependFloat32(self.max[i]) - max = builder.EndVector(len(self.max)) - if self.scale is not None: - if np is not None and type(self.scale) is np.ndarray: - scale = builder.CreateNumpyVector(self.scale) - else: - QuantizationParametersStartScaleVector(builder, len(self.scale)) - for i in reversed(range(len(self.scale))): - builder.PrependFloat32(self.scale[i]) - scale = builder.EndVector(len(self.scale)) - if self.zeroPoint is not None: - if np is not None and type(self.zeroPoint) is np.ndarray: - zeroPoint = builder.CreateNumpyVector(self.zeroPoint) - else: - QuantizationParametersStartZeroPointVector(builder, len(self.zeroPoint)) - for i in reversed(range(len(self.zeroPoint))): - builder.PrependInt64(self.zeroPoint[i]) - zeroPoint = builder.EndVector(len(self.zeroPoint)) - if self.details is not None: - details = self.details.Pack(builder) - QuantizationParametersStart(builder) - if self.min is not None: - QuantizationParametersAddMin(builder, min) - if self.max is not None: - QuantizationParametersAddMax(builder, max) - if self.scale is not None: - QuantizationParametersAddScale(builder, scale) - if self.zeroPoint is not None: - QuantizationParametersAddZeroPoint(builder, zeroPoint) - QuantizationParametersAddDetailsType(builder, self.detailsType) - if self.details is not None: - QuantizationParametersAddDetails(builder, details) - QuantizationParametersAddQuantizedDimension(builder, self.quantizedDimension) - quantizationParameters = QuantizationParametersEnd(builder) - return quantizationParameters - - - -class SliceOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSliceOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SliceOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SliceOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def SliceOptionsStart(builder): builder.StartObject(0) -def SliceOptionsEnd(builder): return builder.EndObject() - - -class SliceOptionsT(object): - - # SliceOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - sliceOptions = SliceOptions() - sliceOptions.Init(buf, pos) - return cls.InitFromObj(sliceOptions) - - @classmethod - def InitFromObj(cls, sliceOptions): - x = SliceOptionsT() - x._UnPack(sliceOptions) - return x - - # SliceOptionsT - def _UnPack(self, sliceOptions): - if sliceOptions is None: - return - - # SliceOptionsT - def Pack(self, builder): - SliceOptionsStart(builder) - sliceOptions = SliceOptionsEnd(builder) - return sliceOptions - - - -class WhereOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsWhereOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = WhereOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def WhereOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # WhereOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def WhereOptionsStart(builder): builder.StartObject(0) -def WhereOptionsEnd(builder): return builder.EndObject() - - -class WhereOptionsT(object): - - # WhereOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - whereOptions = WhereOptions() - whereOptions.Init(buf, pos) - return cls.InitFromObj(whereOptions) - - @classmethod - def InitFromObj(cls, whereOptions): - x = WhereOptionsT() - x._UnPack(whereOptions) - return x - - # WhereOptionsT - def _UnPack(self, whereOptions): - if whereOptions is None: - return - - # WhereOptionsT - def Pack(self, builder): - WhereOptionsStart(builder) - whereOptions = WhereOptionsEnd(builder) - return whereOptions - - - -class Conv2DOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsConv2DOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Conv2DOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Conv2DOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Conv2DOptions - def Padding(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # Conv2DOptions - def StrideW(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # Conv2DOptions - def StrideH(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # Conv2DOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # Conv2DOptions - def DilationWFactor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 1 - - # Conv2DOptions - def DilationHFactor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 1 - -def Conv2DOptionsStart(builder): builder.StartObject(6) -def Conv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) -def Conv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) -def Conv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) -def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(3, fusedActivationFunction, 0) -def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(4, dilationWFactor, 1) -def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(5, dilationHFactor, 1) -def Conv2DOptionsEnd(builder): return builder.EndObject() - - -class Conv2DOptionsT(object): - - # Conv2DOptionsT - def __init__(self): - self.padding = 0 # type: int - self.strideW = 0 # type: int - self.strideH = 0 # type: int - self.fusedActivationFunction = 0 # type: int - self.dilationWFactor = 1 # type: int - self.dilationHFactor = 1 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - conv2DOptions = Conv2DOptions() - conv2DOptions.Init(buf, pos) - return cls.InitFromObj(conv2DOptions) - - @classmethod - def InitFromObj(cls, conv2DOptions): - x = Conv2DOptionsT() - x._UnPack(conv2DOptions) - return x - - # Conv2DOptionsT - def _UnPack(self, conv2DOptions): - if conv2DOptions is None: - return - self.padding = conv2DOptions.Padding() - self.strideW = conv2DOptions.StrideW() - self.strideH = conv2DOptions.StrideH() - self.fusedActivationFunction = conv2DOptions.FusedActivationFunction() - self.dilationWFactor = conv2DOptions.DilationWFactor() - self.dilationHFactor = conv2DOptions.DilationHFactor() - - # Conv2DOptionsT - def Pack(self, builder): - Conv2DOptionsStart(builder) - Conv2DOptionsAddPadding(builder, self.padding) - Conv2DOptionsAddStrideW(builder, self.strideW) - Conv2DOptionsAddStrideH(builder, self.strideH) - Conv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - Conv2DOptionsAddDilationWFactor(builder, self.dilationWFactor) - Conv2DOptionsAddDilationHFactor(builder, self.dilationHFactor) - conv2DOptions = Conv2DOptionsEnd(builder) - return conv2DOptions - - - -class PadV2Options(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsPadV2Options(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = PadV2Options() - x.Init(buf, n + offset) - return x - - @classmethod - def PadV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # PadV2Options - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def PadV2OptionsStart(builder): builder.StartObject(0) -def PadV2OptionsEnd(builder): return builder.EndObject() - - -class PadV2OptionsT(object): - - # PadV2OptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - padV2Options = PadV2Options() - padV2Options.Init(buf, pos) - return cls.InitFromObj(padV2Options) - - @classmethod - def InitFromObj(cls, padV2Options): - x = PadV2OptionsT() - x._UnPack(padV2Options) - return x - - # PadV2OptionsT - def _UnPack(self, padV2Options): - if padV2Options is None: - return - - # PadV2OptionsT - def Pack(self, builder): - PadV2OptionsStart(builder) - padV2Options = PadV2OptionsEnd(builder) - return padV2Options - - - -class L2NormOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsL2NormOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = L2NormOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def L2NormOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # L2NormOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # L2NormOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def L2NormOptionsStart(builder): builder.StartObject(1) -def L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def L2NormOptionsEnd(builder): return builder.EndObject() - - -class L2NormOptionsT(object): - - # L2NormOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - l2NormOptions = L2NormOptions() - l2NormOptions.Init(buf, pos) - return cls.InitFromObj(l2NormOptions) - - @classmethod - def InitFromObj(cls, l2NormOptions): - x = L2NormOptionsT() - x._UnPack(l2NormOptions) - return x - - # L2NormOptionsT - def _UnPack(self, l2NormOptions): - if l2NormOptions is None: - return - self.fusedActivationFunction = l2NormOptions.FusedActivationFunction() - - # L2NormOptionsT - def Pack(self, builder): - L2NormOptionsStart(builder) - L2NormOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - l2NormOptions = L2NormOptionsEnd(builder) - return l2NormOptions - - - -class NonMaxSuppressionV4Options(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsNonMaxSuppressionV4Options(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = NonMaxSuppressionV4Options() - x.Init(buf, n + offset) - return x - - @classmethod - def NonMaxSuppressionV4OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # NonMaxSuppressionV4Options - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def NonMaxSuppressionV4OptionsStart(builder): builder.StartObject(0) -def NonMaxSuppressionV4OptionsEnd(builder): return builder.EndObject() - - -class NonMaxSuppressionV4OptionsT(object): - - # NonMaxSuppressionV4OptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - nonMaxSuppressionV4Options = NonMaxSuppressionV4Options() - nonMaxSuppressionV4Options.Init(buf, pos) - return cls.InitFromObj(nonMaxSuppressionV4Options) - - @classmethod - def InitFromObj(cls, nonMaxSuppressionV4Options): - x = NonMaxSuppressionV4OptionsT() - x._UnPack(nonMaxSuppressionV4Options) - return x - - # NonMaxSuppressionV4OptionsT - def _UnPack(self, nonMaxSuppressionV4Options): - if nonMaxSuppressionV4Options is None: - return - - # NonMaxSuppressionV4OptionsT - def Pack(self, builder): - NonMaxSuppressionV4OptionsStart(builder) - nonMaxSuppressionV4Options = NonMaxSuppressionV4OptionsEnd(builder) - return nonMaxSuppressionV4Options - - - -class RankOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsRankOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = RankOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def RankOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # RankOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def RankOptionsStart(builder): builder.StartObject(0) -def RankOptionsEnd(builder): return builder.EndObject() - - -class RankOptionsT(object): - - # RankOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - rankOptions = RankOptions() - rankOptions.Init(buf, pos) - return cls.InitFromObj(rankOptions) - - @classmethod - def InitFromObj(cls, rankOptions): - x = RankOptionsT() - x._UnPack(rankOptions) - return x - - # RankOptionsT - def _UnPack(self, rankOptions): - if rankOptions is None: - return - - # RankOptionsT - def Pack(self, builder): - RankOptionsStart(builder) - rankOptions = RankOptionsEnd(builder) - return rankOptions - - - -class ConcatEmbeddingsOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsConcatEmbeddingsOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ConcatEmbeddingsOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ConcatEmbeddingsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ConcatEmbeddingsOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ConcatEmbeddingsOptions - def NumChannels(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # ConcatEmbeddingsOptions - def NumColumnsPerChannel(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # ConcatEmbeddingsOptions - def NumColumnsPerChannelAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # ConcatEmbeddingsOptions - def NumColumnsPerChannelLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # ConcatEmbeddingsOptions - def NumColumnsPerChannelIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - return o == 0 - - # ConcatEmbeddingsOptions - def EmbeddingDimPerChannel(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # ConcatEmbeddingsOptions - def EmbeddingDimPerChannelAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # ConcatEmbeddingsOptions - def EmbeddingDimPerChannelLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # ConcatEmbeddingsOptions - def EmbeddingDimPerChannelIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - return o == 0 - -def ConcatEmbeddingsOptionsStart(builder): builder.StartObject(3) -def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels): builder.PrependInt32Slot(0, numChannels, 0) -def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0) -def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0) -def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ConcatEmbeddingsOptionsEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class ConcatEmbeddingsOptionsT(object): - - # ConcatEmbeddingsOptionsT - def __init__(self): - self.numChannels = 0 # type: int - self.numColumnsPerChannel = None # type: List[int] - self.embeddingDimPerChannel = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - concatEmbeddingsOptions = ConcatEmbeddingsOptions() - concatEmbeddingsOptions.Init(buf, pos) - return cls.InitFromObj(concatEmbeddingsOptions) - - @classmethod - def InitFromObj(cls, concatEmbeddingsOptions): - x = ConcatEmbeddingsOptionsT() - x._UnPack(concatEmbeddingsOptions) - return x - - # ConcatEmbeddingsOptionsT - def _UnPack(self, concatEmbeddingsOptions): - if concatEmbeddingsOptions is None: - return - self.numChannels = concatEmbeddingsOptions.NumChannels() - if not concatEmbeddingsOptions.NumColumnsPerChannelIsNone(): - if np is None: - self.numColumnsPerChannel = [] - for i in range(concatEmbeddingsOptions.NumColumnsPerChannelLength()): - self.numColumnsPerChannel.append(concatEmbeddingsOptions.NumColumnsPerChannel(i)) - else: - self.numColumnsPerChannel = concatEmbeddingsOptions.NumColumnsPerChannelAsNumpy() - if not concatEmbeddingsOptions.EmbeddingDimPerChannelIsNone(): - if np is None: - self.embeddingDimPerChannel = [] - for i in range(concatEmbeddingsOptions.EmbeddingDimPerChannelLength()): - self.embeddingDimPerChannel.append(concatEmbeddingsOptions.EmbeddingDimPerChannel(i)) - else: - self.embeddingDimPerChannel = concatEmbeddingsOptions.EmbeddingDimPerChannelAsNumpy() - - # ConcatEmbeddingsOptionsT - def Pack(self, builder): - if self.numColumnsPerChannel is not None: - if np is not None and type(self.numColumnsPerChannel) is np.ndarray: - numColumnsPerChannel = builder.CreateNumpyVector(self.numColumnsPerChannel) - else: - ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, len(self.numColumnsPerChannel)) - for i in reversed(range(len(self.numColumnsPerChannel))): - builder.PrependInt32(self.numColumnsPerChannel[i]) - numColumnsPerChannel = builder.EndVector(len(self.numColumnsPerChannel)) - if self.embeddingDimPerChannel is not None: - if np is not None and type(self.embeddingDimPerChannel) is np.ndarray: - embeddingDimPerChannel = builder.CreateNumpyVector(self.embeddingDimPerChannel) - else: - ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, len(self.embeddingDimPerChannel)) - for i in reversed(range(len(self.embeddingDimPerChannel))): - builder.PrependInt32(self.embeddingDimPerChannel[i]) - embeddingDimPerChannel = builder.EndVector(len(self.embeddingDimPerChannel)) - ConcatEmbeddingsOptionsStart(builder) - ConcatEmbeddingsOptionsAddNumChannels(builder, self.numChannels) - if self.numColumnsPerChannel is not None: - ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel) - if self.embeddingDimPerChannel is not None: - ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel) - concatEmbeddingsOptions = ConcatEmbeddingsOptionsEnd(builder) - return concatEmbeddingsOptions - - - -class Tensor(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsTensor(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Tensor() - x.Init(buf, n + offset) - return x - - @classmethod - def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Tensor - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Tensor - def Shape(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Tensor - def ShapeAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Tensor - def ShapeLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Tensor - def ShapeIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - - # Tensor - def Type(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # Tensor - def Buffer(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) - return 0 - - # Tensor - def Name(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.String(o + self._tab.Pos) - return None - - # Tensor - def Quantization(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - x = self._tab.Indirect(o + self._tab.Pos) - obj = QuantizationParameters() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # Tensor - def IsVariable(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # Tensor - def Sparsity(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - x = self._tab.Indirect(o + self._tab.Pos) - obj = SparsityParameters() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # Tensor - def ShapeSignature(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Tensor - def ShapeSignatureAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Tensor - def ShapeSignatureLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Tensor - def ShapeSignatureIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - return o == 0 - -def TensorStart(builder): builder.StartObject(8) -def TensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0) -def TensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def TensorAddType(builder, type): builder.PrependInt8Slot(1, type, 0) -def TensorAddBuffer(builder, buffer): builder.PrependUint32Slot(2, buffer, 0) -def TensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) -def TensorAddQuantization(builder, quantization): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0) -def TensorAddIsVariable(builder, isVariable): builder.PrependBoolSlot(5, isVariable, 0) -def TensorAddSparsity(builder, sparsity): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0) -def TensorAddShapeSignature(builder, shapeSignature): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0) -def TensorStartShapeSignatureVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def TensorEnd(builder): return builder.EndObject() - -try: - from typing import List, Optional -except: - pass - -class TensorT(object): - - # TensorT - def __init__(self): - self.shape = None # type: List[int] - self.type = 0 # type: int - self.buffer = 0 # type: int - self.name = None # type: str - self.quantization = None # type: Optional[QuantizationParametersT] - self.isVariable = False # type: bool - self.sparsity = None # type: Optional[SparsityParametersT] - self.shapeSignature = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - tensor = Tensor() - tensor.Init(buf, pos) - return cls.InitFromObj(tensor) - - @classmethod - def InitFromObj(cls, tensor): - x = TensorT() - x._UnPack(tensor) - return x - - # TensorT - def _UnPack(self, tensor): - if tensor is None: - return - if not tensor.ShapeIsNone(): - if np is None: - self.shape = [] - for i in range(tensor.ShapeLength()): - self.shape.append(tensor.Shape(i)) - else: - self.shape = tensor.ShapeAsNumpy() - self.type = tensor.Type() - self.buffer = tensor.Buffer() - self.name = tensor.Name() - if tensor.Quantization() is not None: - self.quantization = QuantizationParametersT.InitFromObj(tensor.Quantization()) - self.isVariable = tensor.IsVariable() - if tensor.Sparsity() is not None: - self.sparsity = SparsityParametersT.InitFromObj(tensor.Sparsity()) - if not tensor.ShapeSignatureIsNone(): - if np is None: - self.shapeSignature = [] - for i in range(tensor.ShapeSignatureLength()): - self.shapeSignature.append(tensor.ShapeSignature(i)) - else: - self.shapeSignature = tensor.ShapeSignatureAsNumpy() - - # TensorT - def Pack(self, builder): - if self.shape is not None: - if np is not None and type(self.shape) is np.ndarray: - shape = builder.CreateNumpyVector(self.shape) - else: - TensorStartShapeVector(builder, len(self.shape)) - for i in reversed(range(len(self.shape))): - builder.PrependInt32(self.shape[i]) - shape = builder.EndVector(len(self.shape)) - if self.name is not None: - name = builder.CreateString(self.name) - if self.quantization is not None: - quantization = self.quantization.Pack(builder) - if self.sparsity is not None: - sparsity = self.sparsity.Pack(builder) - if self.shapeSignature is not None: - if np is not None and type(self.shapeSignature) is np.ndarray: - shapeSignature = builder.CreateNumpyVector(self.shapeSignature) - else: - TensorStartShapeSignatureVector(builder, len(self.shapeSignature)) - for i in reversed(range(len(self.shapeSignature))): - builder.PrependInt32(self.shapeSignature[i]) - shapeSignature = builder.EndVector(len(self.shapeSignature)) - TensorStart(builder) - if self.shape is not None: - TensorAddShape(builder, shape) - TensorAddType(builder, self.type) - TensorAddBuffer(builder, self.buffer) - if self.name is not None: - TensorAddName(builder, name) - if self.quantization is not None: - TensorAddQuantization(builder, quantization) - TensorAddIsVariable(builder, self.isVariable) - if self.sparsity is not None: - TensorAddSparsity(builder, sparsity) - if self.shapeSignature is not None: - TensorAddShapeSignature(builder, shapeSignature) - tensor = TensorEnd(builder) - return tensor - - - -class SplitVOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSplitVOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SplitVOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SplitVOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SplitVOptions - def NumSplits(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def SplitVOptionsStart(builder): builder.StartObject(1) -def SplitVOptionsAddNumSplits(builder, numSplits): builder.PrependInt32Slot(0, numSplits, 0) -def SplitVOptionsEnd(builder): return builder.EndObject() - - -class SplitVOptionsT(object): - - # SplitVOptionsT - def __init__(self): - self.numSplits = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - splitVOptions = SplitVOptions() - splitVOptions.Init(buf, pos) - return cls.InitFromObj(splitVOptions) - - @classmethod - def InitFromObj(cls, splitVOptions): - x = SplitVOptionsT() - x._UnPack(splitVOptions) - return x - - # SplitVOptionsT - def _UnPack(self, splitVOptions): - if splitVOptions is None: - return - self.numSplits = splitVOptions.NumSplits() - - # SplitVOptionsT - def Pack(self, builder): - SplitVOptionsStart(builder) - SplitVOptionsAddNumSplits(builder, self.numSplits) - splitVOptions = SplitVOptionsEnd(builder) - return splitVOptions - - - -class SkipGramOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSkipGramOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SkipGramOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SkipGramOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SkipGramOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SkipGramOptions - def NgramSize(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # SkipGramOptions - def MaxSkipSize(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # SkipGramOptions - def IncludeAllNgrams(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def SkipGramOptionsStart(builder): builder.StartObject(3) -def SkipGramOptionsAddNgramSize(builder, ngramSize): builder.PrependInt32Slot(0, ngramSize, 0) -def SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize): builder.PrependInt32Slot(1, maxSkipSize, 0) -def SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams): builder.PrependBoolSlot(2, includeAllNgrams, 0) -def SkipGramOptionsEnd(builder): return builder.EndObject() - - -class SkipGramOptionsT(object): - - # SkipGramOptionsT - def __init__(self): - self.ngramSize = 0 # type: int - self.maxSkipSize = 0 # type: int - self.includeAllNgrams = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - skipGramOptions = SkipGramOptions() - skipGramOptions.Init(buf, pos) - return cls.InitFromObj(skipGramOptions) - - @classmethod - def InitFromObj(cls, skipGramOptions): - x = SkipGramOptionsT() - x._UnPack(skipGramOptions) - return x - - # SkipGramOptionsT - def _UnPack(self, skipGramOptions): - if skipGramOptions is None: - return - self.ngramSize = skipGramOptions.NgramSize() - self.maxSkipSize = skipGramOptions.MaxSkipSize() - self.includeAllNgrams = skipGramOptions.IncludeAllNgrams() - - # SkipGramOptionsT - def Pack(self, builder): - SkipGramOptionsStart(builder) - SkipGramOptionsAddNgramSize(builder, self.ngramSize) - SkipGramOptionsAddMaxSkipSize(builder, self.maxSkipSize) - SkipGramOptionsAddIncludeAllNgrams(builder, self.includeAllNgrams) - skipGramOptions = SkipGramOptionsEnd(builder) - return skipGramOptions - - -class LSHProjectionType(object): - UNKNOWN = 0 - SPARSE = 1 - DENSE = 2 - - - -class LSTMKernelType(object): - FULL = 0 - BASIC = 1 - - - - -class RangeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsRangeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = RangeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # RangeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def RangeOptionsStart(builder): builder.StartObject(0) -def RangeOptionsEnd(builder): return builder.EndObject() - - -class RangeOptionsT(object): - - # RangeOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - rangeOptions = RangeOptions() - rangeOptions.Init(buf, pos) - return cls.InitFromObj(rangeOptions) - - @classmethod - def InitFromObj(cls, rangeOptions): - x = RangeOptionsT() - x._UnPack(rangeOptions) - return x - - # RangeOptionsT - def _UnPack(self, rangeOptions): - if rangeOptions is None: - return - - # RangeOptionsT - def Pack(self, builder): - RangeOptionsStart(builder) - rangeOptions = RangeOptionsEnd(builder) - return rangeOptions - - - -class RNNOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsRNNOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = RNNOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def RNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # RNNOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # RNNOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # RNNOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def RNNOptionsStart(builder): builder.StartObject(2) -def RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(1, asymmetricQuantizeInputs, 0) -def RNNOptionsEnd(builder): return builder.EndObject() - - -class RNNOptionsT(object): - - # RNNOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - rNNOptions = RNNOptions() - rNNOptions.Init(buf, pos) - return cls.InitFromObj(rNNOptions) - - @classmethod - def InitFromObj(cls, rNNOptions): - x = RNNOptionsT() - x._UnPack(rNNOptions) - return x - - # RNNOptionsT - def _UnPack(self, rNNOptions): - if rNNOptions is None: - return - self.fusedActivationFunction = rNNOptions.FusedActivationFunction() - self.asymmetricQuantizeInputs = rNNOptions.AsymmetricQuantizeInputs() - - # RNNOptionsT - def Pack(self, builder): - RNNOptionsStart(builder) - RNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - RNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - rNNOptions = RNNOptionsEnd(builder) - return rNNOptions - - - -class MatrixSetDiagOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsMatrixSetDiagOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = MatrixSetDiagOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def MatrixSetDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # MatrixSetDiagOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def MatrixSetDiagOptionsStart(builder): builder.StartObject(0) -def MatrixSetDiagOptionsEnd(builder): return builder.EndObject() - - -class MatrixSetDiagOptionsT(object): - - # MatrixSetDiagOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - matrixSetDiagOptions = MatrixSetDiagOptions() - matrixSetDiagOptions.Init(buf, pos) - return cls.InitFromObj(matrixSetDiagOptions) - - @classmethod - def InitFromObj(cls, matrixSetDiagOptions): - x = MatrixSetDiagOptionsT() - x._UnPack(matrixSetDiagOptions) - return x - - # MatrixSetDiagOptionsT - def _UnPack(self, matrixSetDiagOptions): - if matrixSetDiagOptions is None: - return - - # MatrixSetDiagOptionsT - def Pack(self, builder): - MatrixSetDiagOptionsStart(builder) - matrixSetDiagOptions = MatrixSetDiagOptionsEnd(builder) - return matrixSetDiagOptions - - - -class ZerosLikeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsZerosLikeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ZerosLikeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ZerosLikeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ZerosLikeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def ZerosLikeOptionsStart(builder): builder.StartObject(0) -def ZerosLikeOptionsEnd(builder): return builder.EndObject() - - -class ZerosLikeOptionsT(object): - - # ZerosLikeOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - zerosLikeOptions = ZerosLikeOptions() - zerosLikeOptions.Init(buf, pos) - return cls.InitFromObj(zerosLikeOptions) - - @classmethod - def InitFromObj(cls, zerosLikeOptions): - x = ZerosLikeOptionsT() - x._UnPack(zerosLikeOptions) - return x - - # ZerosLikeOptionsT - def _UnPack(self, zerosLikeOptions): - if zerosLikeOptions is None: - return - - # ZerosLikeOptionsT - def Pack(self, builder): - ZerosLikeOptionsStart(builder) - zerosLikeOptions = ZerosLikeOptionsEnd(builder) - return zerosLikeOptions - - -class CombinerType(object): - SUM = 0 - MEAN = 1 - SQRTN = 2 - - - - -class HardSwishOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsHardSwishOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = HardSwishOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # HardSwishOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def HardSwishOptionsStart(builder): builder.StartObject(0) -def HardSwishOptionsEnd(builder): return builder.EndObject() - - -class HardSwishOptionsT(object): - - # HardSwishOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - hardSwishOptions = HardSwishOptions() - hardSwishOptions.Init(buf, pos) - return cls.InitFromObj(hardSwishOptions) - - @classmethod - def InitFromObj(cls, hardSwishOptions): - x = HardSwishOptionsT() - x._UnPack(hardSwishOptions) - return x - - # HardSwishOptionsT - def _UnPack(self, hardSwishOptions): - if hardSwishOptions is None: - return - - # HardSwishOptionsT - def Pack(self, builder): - HardSwishOptionsStart(builder) - hardSwishOptions = HardSwishOptionsEnd(builder) - return hardSwishOptions - - - -class MaximumMinimumOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsMaximumMinimumOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = MaximumMinimumOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def MaximumMinimumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # MaximumMinimumOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def MaximumMinimumOptionsStart(builder): builder.StartObject(0) -def MaximumMinimumOptionsEnd(builder): return builder.EndObject() - - -class MaximumMinimumOptionsT(object): - - # MaximumMinimumOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - maximumMinimumOptions = MaximumMinimumOptions() - maximumMinimumOptions.Init(buf, pos) - return cls.InitFromObj(maximumMinimumOptions) - - @classmethod - def InitFromObj(cls, maximumMinimumOptions): - x = MaximumMinimumOptionsT() - x._UnPack(maximumMinimumOptions) - return x - - # MaximumMinimumOptionsT - def _UnPack(self, maximumMinimumOptions): - if maximumMinimumOptions is None: - return - - # MaximumMinimumOptionsT - def Pack(self, builder): - MaximumMinimumOptionsStart(builder) - maximumMinimumOptions = MaximumMinimumOptionsEnd(builder) - return maximumMinimumOptions - - - -class ResizeBilinearOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsResizeBilinearOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ResizeBilinearOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ResizeBilinearOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ResizeBilinearOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ResizeBilinearOptions - def AlignCorners(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # ResizeBilinearOptions - def HalfPixelCenters(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def ResizeBilinearOptionsStart(builder): builder.StartObject(4) -def ResizeBilinearOptionsAddAlignCorners(builder, alignCorners): builder.PrependBoolSlot(2, alignCorners, 0) -def ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters): builder.PrependBoolSlot(3, halfPixelCenters, 0) -def ResizeBilinearOptionsEnd(builder): return builder.EndObject() - - -class ResizeBilinearOptionsT(object): - - # ResizeBilinearOptionsT - def __init__(self): - self.alignCorners = False # type: bool - self.halfPixelCenters = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - resizeBilinearOptions = ResizeBilinearOptions() - resizeBilinearOptions.Init(buf, pos) - return cls.InitFromObj(resizeBilinearOptions) - - @classmethod - def InitFromObj(cls, resizeBilinearOptions): - x = ResizeBilinearOptionsT() - x._UnPack(resizeBilinearOptions) - return x - - # ResizeBilinearOptionsT - def _UnPack(self, resizeBilinearOptions): - if resizeBilinearOptions is None: - return - self.alignCorners = resizeBilinearOptions.AlignCorners() - self.halfPixelCenters = resizeBilinearOptions.HalfPixelCenters() - - # ResizeBilinearOptionsT - def Pack(self, builder): - ResizeBilinearOptionsStart(builder) - ResizeBilinearOptionsAddAlignCorners(builder, self.alignCorners) - ResizeBilinearOptionsAddHalfPixelCenters(builder, self.halfPixelCenters) - resizeBilinearOptions = ResizeBilinearOptionsEnd(builder) - return resizeBilinearOptions - - - -class ConcatenationOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsConcatenationOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ConcatenationOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ConcatenationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ConcatenationOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ConcatenationOptions - def Axis(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # ConcatenationOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def ConcatenationOptionsStart(builder): builder.StartObject(2) -def ConcatenationOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) -def ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) -def ConcatenationOptionsEnd(builder): return builder.EndObject() - - -class ConcatenationOptionsT(object): - - # ConcatenationOptionsT - def __init__(self): - self.axis = 0 # type: int - self.fusedActivationFunction = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - concatenationOptions = ConcatenationOptions() - concatenationOptions.Init(buf, pos) - return cls.InitFromObj(concatenationOptions) - - @classmethod - def InitFromObj(cls, concatenationOptions): - x = ConcatenationOptionsT() - x._UnPack(concatenationOptions) - return x - - # ConcatenationOptionsT - def _UnPack(self, concatenationOptions): - if concatenationOptions is None: - return - self.axis = concatenationOptions.Axis() - self.fusedActivationFunction = concatenationOptions.FusedActivationFunction() - - # ConcatenationOptionsT - def Pack(self, builder): - ConcatenationOptionsStart(builder) - ConcatenationOptionsAddAxis(builder, self.axis) - ConcatenationOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - concatenationOptions = ConcatenationOptionsEnd(builder) - return concatenationOptions - - - -class LogSoftmaxOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLogSoftmaxOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LogSoftmaxOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LogSoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LogSoftmaxOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def LogSoftmaxOptionsStart(builder): builder.StartObject(0) -def LogSoftmaxOptionsEnd(builder): return builder.EndObject() - - -class LogSoftmaxOptionsT(object): - - # LogSoftmaxOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - logSoftmaxOptions = LogSoftmaxOptions() - logSoftmaxOptions.Init(buf, pos) - return cls.InitFromObj(logSoftmaxOptions) - - @classmethod - def InitFromObj(cls, logSoftmaxOptions): - x = LogSoftmaxOptionsT() - x._UnPack(logSoftmaxOptions) - return x - - # LogSoftmaxOptionsT - def _UnPack(self, logSoftmaxOptions): - if logSoftmaxOptions is None: - return - - # LogSoftmaxOptionsT - def Pack(self, builder): - LogSoftmaxOptionsStart(builder) - logSoftmaxOptions = LogSoftmaxOptionsEnd(builder) - return logSoftmaxOptions - - - -class SplitOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSplitOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SplitOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SplitOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SplitOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SplitOptions - def NumSplits(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def SplitOptionsStart(builder): builder.StartObject(1) -def SplitOptionsAddNumSplits(builder, numSplits): builder.PrependInt32Slot(0, numSplits, 0) -def SplitOptionsEnd(builder): return builder.EndObject() - - -class SplitOptionsT(object): - - # SplitOptionsT - def __init__(self): - self.numSplits = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - splitOptions = SplitOptions() - splitOptions.Init(buf, pos) - return cls.InitFromObj(splitOptions) - - @classmethod - def InitFromObj(cls, splitOptions): - x = SplitOptionsT() - x._UnPack(splitOptions) - return x - - # SplitOptionsT - def _UnPack(self, splitOptions): - if splitOptions is None: - return - self.numSplits = splitOptions.NumSplits() - - # SplitOptionsT - def Pack(self, builder): - SplitOptionsStart(builder) - SplitOptionsAddNumSplits(builder, self.numSplits) - splitOptions = SplitOptionsEnd(builder) - return splitOptions - - - -class SelectV2Options(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSelectV2Options(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SelectV2Options() - x.Init(buf, n + offset) - return x - - @classmethod - def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SelectV2Options - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def SelectV2OptionsStart(builder): builder.StartObject(0) -def SelectV2OptionsEnd(builder): return builder.EndObject() - - -class SelectV2OptionsT(object): - - # SelectV2OptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - selectV2Options = SelectV2Options() - selectV2Options.Init(buf, pos) - return cls.InitFromObj(selectV2Options) - - @classmethod - def InitFromObj(cls, selectV2Options): - x = SelectV2OptionsT() - x._UnPack(selectV2Options) - return x - - # SelectV2OptionsT - def _UnPack(self, selectV2Options): - if selectV2Options is None: - return - - # SelectV2OptionsT - def Pack(self, builder): - SelectV2OptionsStart(builder) - selectV2Options = SelectV2OptionsEnd(builder) - return selectV2Options - - -class FullyConnectedOptionsWeightsFormat(object): - DEFAULT = 0 - SHUFFLED4x16INT8 = 1 - - - -class SparseIndexVector(object): - NONE = 0 - Int32Vector = 1 - Uint16Vector = 2 - Uint8Vector = 3 - - -def SparseIndexVectorCreator(unionType, table): - from flatbuffers.table import Table - if not isinstance(table, Table): - return None - if unionType == SparseIndexVector().Int32Vector: - return Int32VectorT.InitFromBuf(table.Bytes, table.Pos) - if unionType == SparseIndexVector().Uint16Vector: - return Uint16VectorT.InitFromBuf(table.Bytes, table.Pos) - if unionType == SparseIndexVector().Uint8Vector: - return Uint8VectorT.InitFromBuf(table.Bytes, table.Pos) - return None - - - -class SquaredDifferenceOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSquaredDifferenceOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SquaredDifferenceOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SquaredDifferenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SquaredDifferenceOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def SquaredDifferenceOptionsStart(builder): builder.StartObject(0) -def SquaredDifferenceOptionsEnd(builder): return builder.EndObject() - - -class SquaredDifferenceOptionsT(object): - - # SquaredDifferenceOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - squaredDifferenceOptions = SquaredDifferenceOptions() - squaredDifferenceOptions.Init(buf, pos) - return cls.InitFromObj(squaredDifferenceOptions) - - @classmethod - def InitFromObj(cls, squaredDifferenceOptions): - x = SquaredDifferenceOptionsT() - x._UnPack(squaredDifferenceOptions) - return x - - # SquaredDifferenceOptionsT - def _UnPack(self, squaredDifferenceOptions): - if squaredDifferenceOptions is None: - return - - # SquaredDifferenceOptionsT - def Pack(self, builder): - SquaredDifferenceOptionsStart(builder) - squaredDifferenceOptions = SquaredDifferenceOptionsEnd(builder) - return squaredDifferenceOptions - - - -class ReverseSequenceOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsReverseSequenceOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ReverseSequenceOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ReverseSequenceOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ReverseSequenceOptions - def SeqDim(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # ReverseSequenceOptions - def BatchDim(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def ReverseSequenceOptionsStart(builder): builder.StartObject(2) -def ReverseSequenceOptionsAddSeqDim(builder, seqDim): builder.PrependInt32Slot(0, seqDim, 0) -def ReverseSequenceOptionsAddBatchDim(builder, batchDim): builder.PrependInt32Slot(1, batchDim, 0) -def ReverseSequenceOptionsEnd(builder): return builder.EndObject() - - -class ReverseSequenceOptionsT(object): - - # ReverseSequenceOptionsT - def __init__(self): - self.seqDim = 0 # type: int - self.batchDim = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - reverseSequenceOptions = ReverseSequenceOptions() - reverseSequenceOptions.Init(buf, pos) - return cls.InitFromObj(reverseSequenceOptions) - - @classmethod - def InitFromObj(cls, reverseSequenceOptions): - x = ReverseSequenceOptionsT() - x._UnPack(reverseSequenceOptions) - return x - - # ReverseSequenceOptionsT - def _UnPack(self, reverseSequenceOptions): - if reverseSequenceOptions is None: - return - self.seqDim = reverseSequenceOptions.SeqDim() - self.batchDim = reverseSequenceOptions.BatchDim() - - # ReverseSequenceOptionsT - def Pack(self, builder): - ReverseSequenceOptionsStart(builder) - ReverseSequenceOptionsAddSeqDim(builder, self.seqDim) - ReverseSequenceOptionsAddBatchDim(builder, self.batchDim) - reverseSequenceOptions = ReverseSequenceOptionsEnd(builder) - return reverseSequenceOptions - - - -class ShapeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsShapeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ShapeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ShapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ShapeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ShapeOptions - def OutType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def ShapeOptionsStart(builder): builder.StartObject(1) -def ShapeOptionsAddOutType(builder, outType): builder.PrependInt8Slot(0, outType, 0) -def ShapeOptionsEnd(builder): return builder.EndObject() - - -class ShapeOptionsT(object): - - # ShapeOptionsT - def __init__(self): - self.outType = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - shapeOptions = ShapeOptions() - shapeOptions.Init(buf, pos) - return cls.InitFromObj(shapeOptions) - - @classmethod - def InitFromObj(cls, shapeOptions): - x = ShapeOptionsT() - x._UnPack(shapeOptions) - return x - - # ShapeOptionsT - def _UnPack(self, shapeOptions): - if shapeOptions is None: - return - self.outType = shapeOptions.OutType() - - # ShapeOptionsT - def Pack(self, builder): - ShapeOptionsStart(builder) - ShapeOptionsAddOutType(builder, self.outType) - shapeOptions = ShapeOptionsEnd(builder) - return shapeOptions - - - -class ReverseV2Options(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsReverseV2Options(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ReverseV2Options() - x.Init(buf, n + offset) - return x - - @classmethod - def ReverseV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ReverseV2Options - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def ReverseV2OptionsStart(builder): builder.StartObject(0) -def ReverseV2OptionsEnd(builder): return builder.EndObject() - - -class ReverseV2OptionsT(object): - - # ReverseV2OptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - reverseV2Options = ReverseV2Options() - reverseV2Options.Init(buf, pos) - return cls.InitFromObj(reverseV2Options) - - @classmethod - def InitFromObj(cls, reverseV2Options): - x = ReverseV2OptionsT() - x._UnPack(reverseV2Options) - return x - - # ReverseV2OptionsT - def _UnPack(self, reverseV2Options): - if reverseV2Options is None: - return - - # ReverseV2OptionsT - def Pack(self, builder): - ReverseV2OptionsStart(builder) - reverseV2Options = ReverseV2OptionsEnd(builder) - return reverseV2Options - - - -class Operator(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsOperator(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Operator() - x.Init(buf, n + offset) - return x - - @classmethod - def OperatorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Operator - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Operator - def OpcodeIndex(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) - return 0 - - # Operator - def Inputs(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Operator - def InputsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Operator - def InputsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Operator - def InputsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - return o == 0 - - # Operator - def Outputs(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Operator - def OutputsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Operator - def OutputsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Operator - def OutputsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - return o == 0 - - # Operator - def BuiltinOptionsType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) - return 0 - - # Operator - def BuiltinOptions(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - from flatbuffers.table import Table - obj = Table(bytearray(), 0) - self._tab.Union(obj, o) - return obj - return None - - # Operator - def CustomOptions(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) - return 0 - - # Operator - def CustomOptionsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) - return 0 - - # Operator - def CustomOptionsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Operator - def CustomOptionsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - return o == 0 - - # Operator - def CustomOptionsFormat(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # Operator - def MutatingVariableInputs(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) - return 0 - - # Operator - def MutatingVariableInputsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o) - return 0 - - # Operator - def MutatingVariableInputsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Operator - def MutatingVariableInputsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - return o == 0 - - # Operator - def Intermediates(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Operator - def IntermediatesAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Operator - def IntermediatesLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Operator - def IntermediatesIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) - return o == 0 - -def OperatorStart(builder): builder.StartObject(9) -def OperatorAddOpcodeIndex(builder, opcodeIndex): builder.PrependUint32Slot(0, opcodeIndex, 0) -def OperatorAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) -def OperatorStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) -def OperatorStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorAddBuiltinOptionsType(builder, builtinOptionsType): builder.PrependUint8Slot(3, builtinOptionsType, 0) -def OperatorAddBuiltinOptions(builder, builtinOptions): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0) -def OperatorAddCustomOptions(builder, customOptions): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0) -def OperatorStartCustomOptionsVector(builder, numElems): return builder.StartVector(1, numElems, 1) -def OperatorAddCustomOptionsFormat(builder, customOptionsFormat): builder.PrependInt8Slot(6, customOptionsFormat, 0) -def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0) -def OperatorStartMutatingVariableInputsVector(builder, numElems): return builder.StartVector(1, numElems, 1) -def OperatorAddIntermediates(builder, intermediates): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(intermediates), 0) -def OperatorStartIntermediatesVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorEnd(builder): return builder.EndObject() - -try: - from typing import List, Union -except: - pass - -class OperatorT(object): - - # OperatorT - def __init__(self): - self.opcodeIndex = 0 # type: int - self.inputs = None # type: List[int] - self.outputs = None # type: List[int] - self.builtinOptionsType = 0 # type: int - self.builtinOptions = None # type: Union[None, Conv2DOptionsT, DepthwiseConv2DOptionsT, ConcatEmbeddingsOptionsT, LSHProjectionOptionsT, Pool2DOptionsT, SVDFOptionsT, RNNOptionsT, FullyConnectedOptionsT, SoftmaxOptionsT, ConcatenationOptionsT, AddOptionsT, L2NormOptionsT, LocalResponseNormalizationOptionsT, LSTMOptionsT, ResizeBilinearOptionsT, CallOptionsT, ReshapeOptionsT, SkipGramOptionsT, SpaceToDepthOptionsT, EmbeddingLookupSparseOptionsT, MulOptionsT, PadOptionsT, GatherOptionsT, BatchToSpaceNDOptionsT, SpaceToBatchNDOptionsT, TransposeOptionsT, ReducerOptionsT, SubOptionsT, DivOptionsT, SqueezeOptionsT, SequenceRNNOptionsT, StridedSliceOptionsT, ExpOptionsT, TopKV2OptionsT, SplitOptionsT, LogSoftmaxOptionsT, CastOptionsT, DequantizeOptionsT, MaximumMinimumOptionsT, ArgMaxOptionsT, LessOptionsT, NegOptionsT, PadV2OptionsT, GreaterOptionsT, GreaterEqualOptionsT, LessEqualOptionsT, SelectOptionsT, SliceOptionsT, TransposeConvOptionsT, SparseToDenseOptionsT, TileOptionsT, ExpandDimsOptionsT, EqualOptionsT, NotEqualOptionsT, ShapeOptionsT, PowOptionsT, ArgMinOptionsT, FakeQuantOptionsT, PackOptionsT, LogicalOrOptionsT, OneHotOptionsT, LogicalAndOptionsT, LogicalNotOptionsT, UnpackOptionsT, FloorDivOptionsT, SquareOptionsT, ZerosLikeOptionsT, FillOptionsT, BidirectionalSequenceLSTMOptionsT, BidirectionalSequenceRNNOptionsT, UnidirectionalSequenceLSTMOptionsT, FloorModOptionsT, RangeOptionsT, ResizeNearestNeighborOptionsT, LeakyReluOptionsT, SquaredDifferenceOptionsT, MirrorPadOptionsT, AbsOptionsT, SplitVOptionsT, UniqueOptionsT, ReverseV2OptionsT, AddNOptionsT, GatherNdOptionsT, CosOptionsT, WhereOptionsT, RankOptionsT, ReverseSequenceOptionsT, MatrixDiagOptionsT, QuantizeOptionsT, MatrixSetDiagOptionsT, HardSwishOptionsT, IfOptionsT, WhileOptionsT, DepthToSpaceOptionsT, NonMaxSuppressionV4OptionsT, NonMaxSuppressionV5OptionsT, ScatterNdOptionsT, SelectV2OptionsT, DensifyOptionsT, SegmentSumOptionsT, BatchMatMulOptionsT] - self.customOptions = None # type: List[int] - self.customOptionsFormat = 0 # type: int - self.mutatingVariableInputs = None # type: List[bool] - self.intermediates = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - operator = Operator() - operator.Init(buf, pos) - return cls.InitFromObj(operator) - - @classmethod - def InitFromObj(cls, operator): - x = OperatorT() - x._UnPack(operator) - return x - - # OperatorT - def _UnPack(self, operator): - if operator is None: - return - self.opcodeIndex = operator.OpcodeIndex() - if not operator.InputsIsNone(): - if np is None: - self.inputs = [] - for i in range(operator.InputsLength()): - self.inputs.append(operator.Inputs(i)) - else: - self.inputs = operator.InputsAsNumpy() - if not operator.OutputsIsNone(): - if np is None: - self.outputs = [] - for i in range(operator.OutputsLength()): - self.outputs.append(operator.Outputs(i)) - else: - self.outputs = operator.OutputsAsNumpy() - self.builtinOptionsType = operator.BuiltinOptionsType() - self.builtinOptions = BuiltinOptionsCreator(self.builtinOptionsType, operator.BuiltinOptions()) - if not operator.CustomOptionsIsNone(): - if np is None: - self.customOptions = [] - for i in range(operator.CustomOptionsLength()): - self.customOptions.append(operator.CustomOptions(i)) - else: - self.customOptions = operator.CustomOptionsAsNumpy() - self.customOptionsFormat = operator.CustomOptionsFormat() - if not operator.MutatingVariableInputsIsNone(): - if np is None: - self.mutatingVariableInputs = [] - for i in range(operator.MutatingVariableInputsLength()): - self.mutatingVariableInputs.append(operator.MutatingVariableInputs(i)) - else: - self.mutatingVariableInputs = operator.MutatingVariableInputsAsNumpy() - if not operator.IntermediatesIsNone(): - if np is None: - self.intermediates = [] - for i in range(operator.IntermediatesLength()): - self.intermediates.append(operator.Intermediates(i)) - else: - self.intermediates = operator.IntermediatesAsNumpy() - - # OperatorT - def Pack(self, builder): - if self.inputs is not None: - if np is not None and type(self.inputs) is np.ndarray: - inputs = builder.CreateNumpyVector(self.inputs) - else: - OperatorStartInputsVector(builder, len(self.inputs)) - for i in reversed(range(len(self.inputs))): - builder.PrependInt32(self.inputs[i]) - inputs = builder.EndVector(len(self.inputs)) - if self.outputs is not None: - if np is not None and type(self.outputs) is np.ndarray: - outputs = builder.CreateNumpyVector(self.outputs) - else: - OperatorStartOutputsVector(builder, len(self.outputs)) - for i in reversed(range(len(self.outputs))): - builder.PrependInt32(self.outputs[i]) - outputs = builder.EndVector(len(self.outputs)) - if self.builtinOptions is not None: - builtinOptions = self.builtinOptions.Pack(builder) - if self.customOptions is not None: - if np is not None and type(self.customOptions) is np.ndarray: - customOptions = builder.CreateNumpyVector(self.customOptions) - else: - OperatorStartCustomOptionsVector(builder, len(self.customOptions)) - for i in reversed(range(len(self.customOptions))): - builder.PrependUint8(self.customOptions[i]) - customOptions = builder.EndVector(len(self.customOptions)) - if self.mutatingVariableInputs is not None: - if np is not None and type(self.mutatingVariableInputs) is np.ndarray: - mutatingVariableInputs = builder.CreateNumpyVector(self.mutatingVariableInputs) - else: - OperatorStartMutatingVariableInputsVector(builder, len(self.mutatingVariableInputs)) - for i in reversed(range(len(self.mutatingVariableInputs))): - builder.PrependBool(self.mutatingVariableInputs[i]) - mutatingVariableInputs = builder.EndVector(len(self.mutatingVariableInputs)) - if self.intermediates is not None: - if np is not None and type(self.intermediates) is np.ndarray: - intermediates = builder.CreateNumpyVector(self.intermediates) - else: - OperatorStartIntermediatesVector(builder, len(self.intermediates)) - for i in reversed(range(len(self.intermediates))): - builder.PrependInt32(self.intermediates[i]) - intermediates = builder.EndVector(len(self.intermediates)) - OperatorStart(builder) - OperatorAddOpcodeIndex(builder, self.opcodeIndex) - if self.inputs is not None: - OperatorAddInputs(builder, inputs) - if self.outputs is not None: - OperatorAddOutputs(builder, outputs) - OperatorAddBuiltinOptionsType(builder, self.builtinOptionsType) - if self.builtinOptions is not None: - OperatorAddBuiltinOptions(builder, builtinOptions) - if self.customOptions is not None: - OperatorAddCustomOptions(builder, customOptions) - OperatorAddCustomOptionsFormat(builder, self.customOptionsFormat) - if self.mutatingVariableInputs is not None: - OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs) - if self.intermediates is not None: - OperatorAddIntermediates(builder, intermediates) - operator = OperatorEnd(builder) - return operator - - - -class SequenceRNNOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSequenceRNNOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SequenceRNNOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SequenceRNNOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SequenceRNNOptions - def TimeMajor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # SequenceRNNOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # SequenceRNNOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def SequenceRNNOptionsStart(builder): builder.StartObject(3) -def SequenceRNNOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(0, timeMajor, 0) -def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) -def SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) -def SequenceRNNOptionsEnd(builder): return builder.EndObject() - - -class SequenceRNNOptionsT(object): - - # SequenceRNNOptionsT - def __init__(self): - self.timeMajor = False # type: bool - self.fusedActivationFunction = 0 # type: int - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - sequenceRNNOptions = SequenceRNNOptions() - sequenceRNNOptions.Init(buf, pos) - return cls.InitFromObj(sequenceRNNOptions) - - @classmethod - def InitFromObj(cls, sequenceRNNOptions): - x = SequenceRNNOptionsT() - x._UnPack(sequenceRNNOptions) - return x - - # SequenceRNNOptionsT - def _UnPack(self, sequenceRNNOptions): - if sequenceRNNOptions is None: - return - self.timeMajor = sequenceRNNOptions.TimeMajor() - self.fusedActivationFunction = sequenceRNNOptions.FusedActivationFunction() - self.asymmetricQuantizeInputs = sequenceRNNOptions.AsymmetricQuantizeInputs() - - # SequenceRNNOptionsT - def Pack(self, builder): - SequenceRNNOptionsStart(builder) - SequenceRNNOptionsAddTimeMajor(builder, self.timeMajor) - SequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - sequenceRNNOptions = SequenceRNNOptionsEnd(builder) - return sequenceRNNOptions - - - -class Model(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsModel(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Model() - x.Init(buf, n + offset) - return x - - @classmethod - def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Model - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Model - def Version(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) - return 0 - - # Model - def OperatorCodes(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - obj = OperatorCode() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # Model - def OperatorCodesLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Model - def OperatorCodesIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - return o == 0 - - # Model - def Subgraphs(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - obj = SubGraph() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # Model - def SubgraphsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Model - def SubgraphsIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - return o == 0 - - # Model - def Description(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.String(o + self._tab.Pos) - return None - - # Model - def Buffers(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - obj = Buffer() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # Model - def BuffersLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Model - def BuffersIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - return o == 0 - - # Model - def MetadataBuffer(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Model - def MetadataBufferAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Model - def MetadataBufferLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Model - def MetadataBufferIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - return o == 0 - - # Model - def Metadata(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - obj = Metadata() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # Model - def MetadataLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Model - def MetadataIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - return o == 0 - -def ModelStart(builder): builder.StartObject(7) -def ModelAddVersion(builder, version): builder.PrependUint32Slot(0, version, 0) -def ModelAddOperatorCodes(builder, operatorCodes): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0) -def ModelStartOperatorCodesVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ModelAddSubgraphs(builder, subgraphs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0) -def ModelStartSubgraphsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ModelAddDescription(builder, description): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0) -def ModelAddBuffers(builder, buffers): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0) -def ModelStartBuffersVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ModelAddMetadataBuffer(builder, metadataBuffer): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0) -def ModelStartMetadataBufferVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ModelAddMetadata(builder, metadata): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(metadata), 0) -def ModelStartMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ModelEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class ModelT(object): - - # ModelT - def __init__(self): - self.version = 0 # type: int - self.operatorCodes = None # type: List[OperatorCodeT] - self.subgraphs = None # type: List[SubGraphT] - self.description = None # type: str - self.buffers = None # type: List[BufferT] - self.metadataBuffer = None # type: List[int] - self.metadata = None # type: List[MetadataT] - - @classmethod - def InitFromBuf(cls, buf, pos): - model = Model() - model.Init(buf, pos) - return cls.InitFromObj(model) - - @classmethod - def InitFromObj(cls, model): - x = ModelT() - x._UnPack(model) - return x - - # ModelT - def _UnPack(self, model): - if model is None: - return - self.version = model.Version() - if not model.OperatorCodesIsNone(): - self.operatorCodes = [] - for i in range(model.OperatorCodesLength()): - if model.OperatorCodes(i) is None: - self.operatorCodes.append(None) - else: - operatorCode_ = OperatorCodeT.InitFromObj(model.OperatorCodes(i)) - self.operatorCodes.append(operatorCode_) - if not model.SubgraphsIsNone(): - self.subgraphs = [] - for i in range(model.SubgraphsLength()): - if model.Subgraphs(i) is None: - self.subgraphs.append(None) - else: - subGraph_ = SubGraphT.InitFromObj(model.Subgraphs(i)) - self.subgraphs.append(subGraph_) - self.description = model.Description() - if not model.BuffersIsNone(): - self.buffers = [] - for i in range(model.BuffersLength()): - if model.Buffers(i) is None: - self.buffers.append(None) - else: - buffer_ = BufferT.InitFromObj(model.Buffers(i)) - self.buffers.append(buffer_) - if not model.MetadataBufferIsNone(): - if np is None: - self.metadataBuffer = [] - for i in range(model.MetadataBufferLength()): - self.metadataBuffer.append(model.MetadataBuffer(i)) - else: - self.metadataBuffer = model.MetadataBufferAsNumpy() - if not model.MetadataIsNone(): - self.metadata = [] - for i in range(model.MetadataLength()): - if model.Metadata(i) is None: - self.metadata.append(None) - else: - metadata_ = MetadataT.InitFromObj(model.Metadata(i)) - self.metadata.append(metadata_) - - # ModelT - def Pack(self, builder): - if self.operatorCodes is not None: - operatorCodeslist = [] - for i in range(len(self.operatorCodes)): - operatorCodeslist.append(self.operatorCodes[i].Pack(builder)) - ModelStartOperatorCodesVector(builder, len(self.operatorCodes)) - for i in reversed(range(len(self.operatorCodes))): - builder.PrependUOffsetTRelative(operatorCodeslist[i]) - operatorCodes = builder.EndVector(len(self.operatorCodes)) - if self.subgraphs is not None: - subgraphslist = [] - for i in range(len(self.subgraphs)): - subgraphslist.append(self.subgraphs[i].Pack(builder)) - ModelStartSubgraphsVector(builder, len(self.subgraphs)) - for i in reversed(range(len(self.subgraphs))): - builder.PrependUOffsetTRelative(subgraphslist[i]) - subgraphs = builder.EndVector(len(self.subgraphs)) - if self.description is not None: - description = builder.CreateString(self.description) - if self.buffers is not None: - bufferslist = [] - for i in range(len(self.buffers)): - bufferslist.append(self.buffers[i].Pack(builder)) - ModelStartBuffersVector(builder, len(self.buffers)) - for i in reversed(range(len(self.buffers))): - builder.PrependUOffsetTRelative(bufferslist[i]) - buffers = builder.EndVector(len(self.buffers)) - if self.metadataBuffer is not None: - if np is not None and type(self.metadataBuffer) is np.ndarray: - metadataBuffer = builder.CreateNumpyVector(self.metadataBuffer) - else: - ModelStartMetadataBufferVector(builder, len(self.metadataBuffer)) - for i in reversed(range(len(self.metadataBuffer))): - builder.PrependInt32(self.metadataBuffer[i]) - metadataBuffer = builder.EndVector(len(self.metadataBuffer)) - if self.metadata is not None: - metadatalist = [] - for i in range(len(self.metadata)): - metadatalist.append(self.metadata[i].Pack(builder)) - ModelStartMetadataVector(builder, len(self.metadata)) - for i in reversed(range(len(self.metadata))): - builder.PrependUOffsetTRelative(metadatalist[i]) - metadata = builder.EndVector(len(self.metadata)) - ModelStart(builder) - ModelAddVersion(builder, self.version) - if self.operatorCodes is not None: - ModelAddOperatorCodes(builder, operatorCodes) - if self.subgraphs is not None: - ModelAddSubgraphs(builder, subgraphs) - if self.description is not None: - ModelAddDescription(builder, description) - if self.buffers is not None: - ModelAddBuffers(builder, buffers) - if self.metadataBuffer is not None: - ModelAddMetadataBuffer(builder, metadataBuffer) - if self.metadata is not None: - ModelAddMetadata(builder, metadata) - model = ModelEnd(builder) - return model - - - -class PackOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsPackOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = PackOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # PackOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # PackOptions - def ValuesCount(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # PackOptions - def Axis(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def PackOptionsStart(builder): builder.StartObject(2) -def PackOptionsAddValuesCount(builder, valuesCount): builder.PrependInt32Slot(0, valuesCount, 0) -def PackOptionsAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, 0) -def PackOptionsEnd(builder): return builder.EndObject() - - -class PackOptionsT(object): - - # PackOptionsT - def __init__(self): - self.valuesCount = 0 # type: int - self.axis = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - packOptions = PackOptions() - packOptions.Init(buf, pos) - return cls.InitFromObj(packOptions) - - @classmethod - def InitFromObj(cls, packOptions): - x = PackOptionsT() - x._UnPack(packOptions) - return x - - # PackOptionsT - def _UnPack(self, packOptions): - if packOptions is None: - return - self.valuesCount = packOptions.ValuesCount() - self.axis = packOptions.Axis() - - # PackOptionsT - def Pack(self, builder): - PackOptionsStart(builder) - PackOptionsAddValuesCount(builder, self.valuesCount) - PackOptionsAddAxis(builder, self.axis) - packOptions = PackOptionsEnd(builder) - return packOptions - - - -class LessEqualOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLessEqualOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LessEqualOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LessEqualOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def LessEqualOptionsStart(builder): builder.StartObject(0) -def LessEqualOptionsEnd(builder): return builder.EndObject() - - -class LessEqualOptionsT(object): - - # LessEqualOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - lessEqualOptions = LessEqualOptions() - lessEqualOptions.Init(buf, pos) - return cls.InitFromObj(lessEqualOptions) - - @classmethod - def InitFromObj(cls, lessEqualOptions): - x = LessEqualOptionsT() - x._UnPack(lessEqualOptions) - return x - - # LessEqualOptionsT - def _UnPack(self, lessEqualOptions): - if lessEqualOptions is None: - return - - # LessEqualOptionsT - def Pack(self, builder): - LessEqualOptionsStart(builder) - lessEqualOptions = LessEqualOptionsEnd(builder) - return lessEqualOptions - - -class ActivationFunctionType(object): - NONE = 0 - RELU = 1 - RELU_N1_TO_1 = 2 - RELU6 = 3 - TANH = 4 - SIGN_BIT = 5 - - - - -class LogicalAndOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLogicalAndOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LogicalAndOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LogicalAndOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def LogicalAndOptionsStart(builder): builder.StartObject(0) -def LogicalAndOptionsEnd(builder): return builder.EndObject() - - -class LogicalAndOptionsT(object): - - # LogicalAndOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - logicalAndOptions = LogicalAndOptions() - logicalAndOptions.Init(buf, pos) - return cls.InitFromObj(logicalAndOptions) - - @classmethod - def InitFromObj(cls, logicalAndOptions): - x = LogicalAndOptionsT() - x._UnPack(logicalAndOptions) - return x - - # LogicalAndOptionsT - def _UnPack(self, logicalAndOptions): - if logicalAndOptions is None: - return - - # LogicalAndOptionsT - def Pack(self, builder): - LogicalAndOptionsStart(builder) - logicalAndOptions = LogicalAndOptionsEnd(builder) - return logicalAndOptions - - - -class DequantizeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsDequantizeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = DequantizeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def DequantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # DequantizeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def DequantizeOptionsStart(builder): builder.StartObject(0) -def DequantizeOptionsEnd(builder): return builder.EndObject() - - -class DequantizeOptionsT(object): - - # DequantizeOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - dequantizeOptions = DequantizeOptions() - dequantizeOptions.Init(buf, pos) - return cls.InitFromObj(dequantizeOptions) - - @classmethod - def InitFromObj(cls, dequantizeOptions): - x = DequantizeOptionsT() - x._UnPack(dequantizeOptions) - return x - - # DequantizeOptionsT - def _UnPack(self, dequantizeOptions): - if dequantizeOptions is None: - return - - # DequantizeOptionsT - def Pack(self, builder): - DequantizeOptionsStart(builder) - dequantizeOptions = DequantizeOptionsEnd(builder) - return dequantizeOptions - - -class QuantizationDetails(object): - NONE = 0 - CustomQuantization = 1 - - -def QuantizationDetailsCreator(unionType, table): - from flatbuffers.table import Table - if not isinstance(table, Table): - return None - if unionType == QuantizationDetails().CustomQuantization: - return CustomQuantizationT.InitFromBuf(table.Bytes, table.Pos) - return None - - - -class StridedSliceOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsStridedSliceOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = StridedSliceOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # StridedSliceOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # StridedSliceOptions - def BeginMask(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # StridedSliceOptions - def EndMask(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # StridedSliceOptions - def EllipsisMask(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # StridedSliceOptions - def NewAxisMask(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # StridedSliceOptions - def ShrinkAxisMask(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def StridedSliceOptionsStart(builder): builder.StartObject(5) -def StridedSliceOptionsAddBeginMask(builder, beginMask): builder.PrependInt32Slot(0, beginMask, 0) -def StridedSliceOptionsAddEndMask(builder, endMask): builder.PrependInt32Slot(1, endMask, 0) -def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask): builder.PrependInt32Slot(2, ellipsisMask, 0) -def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask): builder.PrependInt32Slot(3, newAxisMask, 0) -def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask): builder.PrependInt32Slot(4, shrinkAxisMask, 0) -def StridedSliceOptionsEnd(builder): return builder.EndObject() - - -class StridedSliceOptionsT(object): - - # StridedSliceOptionsT - def __init__(self): - self.beginMask = 0 # type: int - self.endMask = 0 # type: int - self.ellipsisMask = 0 # type: int - self.newAxisMask = 0 # type: int - self.shrinkAxisMask = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - stridedSliceOptions = StridedSliceOptions() - stridedSliceOptions.Init(buf, pos) - return cls.InitFromObj(stridedSliceOptions) - - @classmethod - def InitFromObj(cls, stridedSliceOptions): - x = StridedSliceOptionsT() - x._UnPack(stridedSliceOptions) - return x - - # StridedSliceOptionsT - def _UnPack(self, stridedSliceOptions): - if stridedSliceOptions is None: - return - self.beginMask = stridedSliceOptions.BeginMask() - self.endMask = stridedSliceOptions.EndMask() - self.ellipsisMask = stridedSliceOptions.EllipsisMask() - self.newAxisMask = stridedSliceOptions.NewAxisMask() - self.shrinkAxisMask = stridedSliceOptions.ShrinkAxisMask() - - # StridedSliceOptionsT - def Pack(self, builder): - StridedSliceOptionsStart(builder) - StridedSliceOptionsAddBeginMask(builder, self.beginMask) - StridedSliceOptionsAddEndMask(builder, self.endMask) - StridedSliceOptionsAddEllipsisMask(builder, self.ellipsisMask) - StridedSliceOptionsAddNewAxisMask(builder, self.newAxisMask) - StridedSliceOptionsAddShrinkAxisMask(builder, self.shrinkAxisMask) - stridedSliceOptions = StridedSliceOptionsEnd(builder) - return stridedSliceOptions - - - -class Int32Vector(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsInt32Vector(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Int32Vector() - x.Init(buf, n + offset) - return x - - @classmethod - def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Int32Vector - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Int32Vector - def Values(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Int32Vector - def ValuesAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Int32Vector - def ValuesLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Int32Vector - def ValuesIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - -def Int32VectorStart(builder): builder.StartObject(1) -def Int32VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) -def Int32VectorStartValuesVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def Int32VectorEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class Int32VectorT(object): - - # Int32VectorT - def __init__(self): - self.values = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - int32Vector = Int32Vector() - int32Vector.Init(buf, pos) - return cls.InitFromObj(int32Vector) - - @classmethod - def InitFromObj(cls, int32Vector): - x = Int32VectorT() - x._UnPack(int32Vector) - return x - - # Int32VectorT - def _UnPack(self, int32Vector): - if int32Vector is None: - return - if not int32Vector.ValuesIsNone(): - if np is None: - self.values = [] - for i in range(int32Vector.ValuesLength()): - self.values.append(int32Vector.Values(i)) - else: - self.values = int32Vector.ValuesAsNumpy() - - # Int32VectorT - def Pack(self, builder): - if self.values is not None: - if np is not None and type(self.values) is np.ndarray: - values = builder.CreateNumpyVector(self.values) - else: - Int32VectorStartValuesVector(builder, len(self.values)) - for i in reversed(range(len(self.values))): - builder.PrependInt32(self.values[i]) - values = builder.EndVector(len(self.values)) - Int32VectorStart(builder) - if self.values is not None: - Int32VectorAddValues(builder, values) - int32Vector = Int32VectorEnd(builder) - return int32Vector - - - -class LocalResponseNormalizationOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LocalResponseNormalizationOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LocalResponseNormalizationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LocalResponseNormalizationOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # LocalResponseNormalizationOptions - def Radius(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # LocalResponseNormalizationOptions - def Bias(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # LocalResponseNormalizationOptions - def Alpha(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # LocalResponseNormalizationOptions - def Beta(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - -def LocalResponseNormalizationOptionsStart(builder): builder.StartObject(4) -def LocalResponseNormalizationOptionsAddRadius(builder, radius): builder.PrependInt32Slot(0, radius, 0) -def LocalResponseNormalizationOptionsAddBias(builder, bias): builder.PrependFloat32Slot(1, bias, 0.0) -def LocalResponseNormalizationOptionsAddAlpha(builder, alpha): builder.PrependFloat32Slot(2, alpha, 0.0) -def LocalResponseNormalizationOptionsAddBeta(builder, beta): builder.PrependFloat32Slot(3, beta, 0.0) -def LocalResponseNormalizationOptionsEnd(builder): return builder.EndObject() - - -class LocalResponseNormalizationOptionsT(object): - - # LocalResponseNormalizationOptionsT - def __init__(self): - self.radius = 0 # type: int - self.bias = 0.0 # type: float - self.alpha = 0.0 # type: float - self.beta = 0.0 # type: float - - @classmethod - def InitFromBuf(cls, buf, pos): - localResponseNormalizationOptions = LocalResponseNormalizationOptions() - localResponseNormalizationOptions.Init(buf, pos) - return cls.InitFromObj(localResponseNormalizationOptions) - - @classmethod - def InitFromObj(cls, localResponseNormalizationOptions): - x = LocalResponseNormalizationOptionsT() - x._UnPack(localResponseNormalizationOptions) - return x - - # LocalResponseNormalizationOptionsT - def _UnPack(self, localResponseNormalizationOptions): - if localResponseNormalizationOptions is None: - return - self.radius = localResponseNormalizationOptions.Radius() - self.bias = localResponseNormalizationOptions.Bias() - self.alpha = localResponseNormalizationOptions.Alpha() - self.beta = localResponseNormalizationOptions.Beta() - - # LocalResponseNormalizationOptionsT - def Pack(self, builder): - LocalResponseNormalizationOptionsStart(builder) - LocalResponseNormalizationOptionsAddRadius(builder, self.radius) - LocalResponseNormalizationOptionsAddBias(builder, self.bias) - LocalResponseNormalizationOptionsAddAlpha(builder, self.alpha) - LocalResponseNormalizationOptionsAddBeta(builder, self.beta) - localResponseNormalizationOptions = LocalResponseNormalizationOptionsEnd(builder) - return localResponseNormalizationOptions - - - -class SpaceToBatchNDOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSpaceToBatchNDOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SpaceToBatchNDOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SpaceToBatchNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SpaceToBatchNDOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def SpaceToBatchNDOptionsStart(builder): builder.StartObject(0) -def SpaceToBatchNDOptionsEnd(builder): return builder.EndObject() - - -class SpaceToBatchNDOptionsT(object): - - # SpaceToBatchNDOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - spaceToBatchNDOptions = SpaceToBatchNDOptions() - spaceToBatchNDOptions.Init(buf, pos) - return cls.InitFromObj(spaceToBatchNDOptions) - - @classmethod - def InitFromObj(cls, spaceToBatchNDOptions): - x = SpaceToBatchNDOptionsT() - x._UnPack(spaceToBatchNDOptions) - return x - - # SpaceToBatchNDOptionsT - def _UnPack(self, spaceToBatchNDOptions): - if spaceToBatchNDOptions is None: - return - - # SpaceToBatchNDOptionsT - def Pack(self, builder): - SpaceToBatchNDOptionsStart(builder) - spaceToBatchNDOptions = SpaceToBatchNDOptionsEnd(builder) - return spaceToBatchNDOptions - - - -class DimensionMetadata(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsDimensionMetadata(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = DimensionMetadata() - x.Init(buf, n + offset) - return x - - @classmethod - def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # DimensionMetadata - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # DimensionMetadata - def Format(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # DimensionMetadata - def DenseSize(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # DimensionMetadata - def ArraySegmentsType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) - return 0 - - # DimensionMetadata - def ArraySegments(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - from flatbuffers.table import Table - obj = Table(bytearray(), 0) - self._tab.Union(obj, o) - return obj - return None - - # DimensionMetadata - def ArrayIndicesType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) - return 0 - - # DimensionMetadata - def ArrayIndices(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - from flatbuffers.table import Table - obj = Table(bytearray(), 0) - self._tab.Union(obj, o) - return obj - return None - -def DimensionMetadataStart(builder): builder.StartObject(6) -def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0) -def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0) -def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0) -def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0) -def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0) -def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0) -def DimensionMetadataEnd(builder): return builder.EndObject() - -try: - from typing import Union -except: - pass - -class DimensionMetadataT(object): - - # DimensionMetadataT - def __init__(self): - self.format = 0 # type: int - self.denseSize = 0 # type: int - self.arraySegmentsType = 0 # type: int - self.arraySegments = None # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT] - self.arrayIndicesType = 0 # type: int - self.arrayIndices = None # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT] - - @classmethod - def InitFromBuf(cls, buf, pos): - dimensionMetadata = DimensionMetadata() - dimensionMetadata.Init(buf, pos) - return cls.InitFromObj(dimensionMetadata) - - @classmethod - def InitFromObj(cls, dimensionMetadata): - x = DimensionMetadataT() - x._UnPack(dimensionMetadata) - return x - - # DimensionMetadataT - def _UnPack(self, dimensionMetadata): - if dimensionMetadata is None: - return - self.format = dimensionMetadata.Format() - self.denseSize = dimensionMetadata.DenseSize() - self.arraySegmentsType = dimensionMetadata.ArraySegmentsType() - self.arraySegments = SparseIndexVectorCreator(self.arraySegmentsType, dimensionMetadata.ArraySegments()) - self.arrayIndicesType = dimensionMetadata.ArrayIndicesType() - self.arrayIndices = SparseIndexVectorCreator(self.arrayIndicesType, dimensionMetadata.ArrayIndices()) - - # DimensionMetadataT - def Pack(self, builder): - if self.arraySegments is not None: - arraySegments = self.arraySegments.Pack(builder) - if self.arrayIndices is not None: - arrayIndices = self.arrayIndices.Pack(builder) - DimensionMetadataStart(builder) - DimensionMetadataAddFormat(builder, self.format) - DimensionMetadataAddDenseSize(builder, self.denseSize) - DimensionMetadataAddArraySegmentsType(builder, self.arraySegmentsType) - if self.arraySegments is not None: - DimensionMetadataAddArraySegments(builder, arraySegments) - DimensionMetadataAddArrayIndicesType(builder, self.arrayIndicesType) - if self.arrayIndices is not None: - DimensionMetadataAddArrayIndices(builder, arrayIndices) - dimensionMetadata = DimensionMetadataEnd(builder) - return dimensionMetadata - - - -class Pool2DOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsPool2DOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Pool2DOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Pool2DOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Pool2DOptions - def Padding(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # Pool2DOptions - def StrideW(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # Pool2DOptions - def StrideH(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # Pool2DOptions - def FilterWidth(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # Pool2DOptions - def FilterHeight(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # Pool2DOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def Pool2DOptionsStart(builder): builder.StartObject(6) -def Pool2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) -def Pool2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) -def Pool2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) -def Pool2DOptionsAddFilterWidth(builder, filterWidth): builder.PrependInt32Slot(3, filterWidth, 0) -def Pool2DOptionsAddFilterHeight(builder, filterHeight): builder.PrependInt32Slot(4, filterHeight, 0) -def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(5, fusedActivationFunction, 0) -def Pool2DOptionsEnd(builder): return builder.EndObject() - - -class Pool2DOptionsT(object): - - # Pool2DOptionsT - def __init__(self): - self.padding = 0 # type: int - self.strideW = 0 # type: int - self.strideH = 0 # type: int - self.filterWidth = 0 # type: int - self.filterHeight = 0 # type: int - self.fusedActivationFunction = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - pool2DOptions = Pool2DOptions() - pool2DOptions.Init(buf, pos) - return cls.InitFromObj(pool2DOptions) - - @classmethod - def InitFromObj(cls, pool2DOptions): - x = Pool2DOptionsT() - x._UnPack(pool2DOptions) - return x - - # Pool2DOptionsT - def _UnPack(self, pool2DOptions): - if pool2DOptions is None: - return - self.padding = pool2DOptions.Padding() - self.strideW = pool2DOptions.StrideW() - self.strideH = pool2DOptions.StrideH() - self.filterWidth = pool2DOptions.FilterWidth() - self.filterHeight = pool2DOptions.FilterHeight() - self.fusedActivationFunction = pool2DOptions.FusedActivationFunction() - - # Pool2DOptionsT - def Pack(self, builder): - Pool2DOptionsStart(builder) - Pool2DOptionsAddPadding(builder, self.padding) - Pool2DOptionsAddStrideW(builder, self.strideW) - Pool2DOptionsAddStrideH(builder, self.strideH) - Pool2DOptionsAddFilterWidth(builder, self.filterWidth) - Pool2DOptionsAddFilterHeight(builder, self.filterHeight) - Pool2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - pool2DOptions = Pool2DOptionsEnd(builder) - return pool2DOptions - - - -class ScatterNdOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsScatterNdOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ScatterNdOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ScatterNdOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def ScatterNdOptionsStart(builder): builder.StartObject(0) -def ScatterNdOptionsEnd(builder): return builder.EndObject() - - -class ScatterNdOptionsT(object): - - # ScatterNdOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - scatterNdOptions = ScatterNdOptions() - scatterNdOptions.Init(buf, pos) - return cls.InitFromObj(scatterNdOptions) - - @classmethod - def InitFromObj(cls, scatterNdOptions): - x = ScatterNdOptionsT() - x._UnPack(scatterNdOptions) - return x - - # ScatterNdOptionsT - def _UnPack(self, scatterNdOptions): - if scatterNdOptions is None: - return - - # ScatterNdOptionsT - def Pack(self, builder): - ScatterNdOptionsStart(builder) - scatterNdOptions = ScatterNdOptionsEnd(builder) - return scatterNdOptions - - - -class DepthwiseConv2DOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsDepthwiseConv2DOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = DepthwiseConv2DOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # DepthwiseConv2DOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # DepthwiseConv2DOptions - def Padding(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # DepthwiseConv2DOptions - def StrideW(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # DepthwiseConv2DOptions - def StrideH(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # DepthwiseConv2DOptions - def DepthMultiplier(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # DepthwiseConv2DOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # DepthwiseConv2DOptions - def DilationWFactor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 1 - - # DepthwiseConv2DOptions - def DilationHFactor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 1 - -def DepthwiseConv2DOptionsStart(builder): builder.StartObject(7) -def DepthwiseConv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) -def DepthwiseConv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) -def DepthwiseConv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) -def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier): builder.PrependInt32Slot(3, depthMultiplier, 0) -def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(4, fusedActivationFunction, 0) -def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(5, dilationWFactor, 1) -def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(6, dilationHFactor, 1) -def DepthwiseConv2DOptionsEnd(builder): return builder.EndObject() - - -class DepthwiseConv2DOptionsT(object): - - # DepthwiseConv2DOptionsT - def __init__(self): - self.padding = 0 # type: int - self.strideW = 0 # type: int - self.strideH = 0 # type: int - self.depthMultiplier = 0 # type: int - self.fusedActivationFunction = 0 # type: int - self.dilationWFactor = 1 # type: int - self.dilationHFactor = 1 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - depthwiseConv2DOptions = DepthwiseConv2DOptions() - depthwiseConv2DOptions.Init(buf, pos) - return cls.InitFromObj(depthwiseConv2DOptions) - - @classmethod - def InitFromObj(cls, depthwiseConv2DOptions): - x = DepthwiseConv2DOptionsT() - x._UnPack(depthwiseConv2DOptions) - return x - - # DepthwiseConv2DOptionsT - def _UnPack(self, depthwiseConv2DOptions): - if depthwiseConv2DOptions is None: - return - self.padding = depthwiseConv2DOptions.Padding() - self.strideW = depthwiseConv2DOptions.StrideW() - self.strideH = depthwiseConv2DOptions.StrideH() - self.depthMultiplier = depthwiseConv2DOptions.DepthMultiplier() - self.fusedActivationFunction = depthwiseConv2DOptions.FusedActivationFunction() - self.dilationWFactor = depthwiseConv2DOptions.DilationWFactor() - self.dilationHFactor = depthwiseConv2DOptions.DilationHFactor() - - # DepthwiseConv2DOptionsT - def Pack(self, builder): - DepthwiseConv2DOptionsStart(builder) - DepthwiseConv2DOptionsAddPadding(builder, self.padding) - DepthwiseConv2DOptionsAddStrideW(builder, self.strideW) - DepthwiseConv2DOptionsAddStrideH(builder, self.strideH) - DepthwiseConv2DOptionsAddDepthMultiplier(builder, self.depthMultiplier) - DepthwiseConv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - DepthwiseConv2DOptionsAddDilationWFactor(builder, self.dilationWFactor) - DepthwiseConv2DOptionsAddDilationHFactor(builder, self.dilationHFactor) - depthwiseConv2DOptions = DepthwiseConv2DOptionsEnd(builder) - return depthwiseConv2DOptions - - - -class Uint8Vector(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsUint8Vector(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Uint8Vector() - x.Init(buf, n + offset) - return x - - @classmethod - def Uint8VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Uint8Vector - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Uint8Vector - def Values(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) - return 0 - - # Uint8Vector - def ValuesAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) - return 0 - - # Uint8Vector - def ValuesLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Uint8Vector - def ValuesIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - -def Uint8VectorStart(builder): builder.StartObject(1) -def Uint8VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) -def Uint8VectorStartValuesVector(builder, numElems): return builder.StartVector(1, numElems, 1) -def Uint8VectorEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class Uint8VectorT(object): - - # Uint8VectorT - def __init__(self): - self.values = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - uint8Vector = Uint8Vector() - uint8Vector.Init(buf, pos) - return cls.InitFromObj(uint8Vector) - - @classmethod - def InitFromObj(cls, uint8Vector): - x = Uint8VectorT() - x._UnPack(uint8Vector) - return x - - # Uint8VectorT - def _UnPack(self, uint8Vector): - if uint8Vector is None: - return - if not uint8Vector.ValuesIsNone(): - if np is None: - self.values = [] - for i in range(uint8Vector.ValuesLength()): - self.values.append(uint8Vector.Values(i)) - else: - self.values = uint8Vector.ValuesAsNumpy() - - # Uint8VectorT - def Pack(self, builder): - if self.values is not None: - if np is not None and type(self.values) is np.ndarray: - values = builder.CreateNumpyVector(self.values) - else: - Uint8VectorStartValuesVector(builder, len(self.values)) - for i in reversed(range(len(self.values))): - builder.PrependUint8(self.values[i]) - values = builder.EndVector(len(self.values)) - Uint8VectorStart(builder) - if self.values is not None: - Uint8VectorAddValues(builder, values) - uint8Vector = Uint8VectorEnd(builder) - return uint8Vector - - - -class FullyConnectedOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsFullyConnectedOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = FullyConnectedOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def FullyConnectedOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # FullyConnectedOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # FullyConnectedOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # FullyConnectedOptions - def WeightsFormat(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # FullyConnectedOptions - def KeepNumDims(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # FullyConnectedOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def FullyConnectedOptionsStart(builder): builder.StartObject(4) -def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat): builder.PrependInt8Slot(1, weightsFormat, 0) -def FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims): builder.PrependBoolSlot(2, keepNumDims, 0) -def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) -def FullyConnectedOptionsEnd(builder): return builder.EndObject() - - -class FullyConnectedOptionsT(object): - - # FullyConnectedOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - self.weightsFormat = 0 # type: int - self.keepNumDims = False # type: bool - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - fullyConnectedOptions = FullyConnectedOptions() - fullyConnectedOptions.Init(buf, pos) - return cls.InitFromObj(fullyConnectedOptions) - - @classmethod - def InitFromObj(cls, fullyConnectedOptions): - x = FullyConnectedOptionsT() - x._UnPack(fullyConnectedOptions) - return x - - # FullyConnectedOptionsT - def _UnPack(self, fullyConnectedOptions): - if fullyConnectedOptions is None: - return - self.fusedActivationFunction = fullyConnectedOptions.FusedActivationFunction() - self.weightsFormat = fullyConnectedOptions.WeightsFormat() - self.keepNumDims = fullyConnectedOptions.KeepNumDims() - self.asymmetricQuantizeInputs = fullyConnectedOptions.AsymmetricQuantizeInputs() - - # FullyConnectedOptionsT - def Pack(self, builder): - FullyConnectedOptionsStart(builder) - FullyConnectedOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - FullyConnectedOptionsAddWeightsFormat(builder, self.weightsFormat) - FullyConnectedOptionsAddKeepNumDims(builder, self.keepNumDims) - FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - fullyConnectedOptions = FullyConnectedOptionsEnd(builder) - return fullyConnectedOptions - - - -class CastOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsCastOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = CastOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def CastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # CastOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # CastOptions - def InDataType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # CastOptions - def OutDataType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def CastOptionsStart(builder): builder.StartObject(2) -def CastOptionsAddInDataType(builder, inDataType): builder.PrependInt8Slot(0, inDataType, 0) -def CastOptionsAddOutDataType(builder, outDataType): builder.PrependInt8Slot(1, outDataType, 0) -def CastOptionsEnd(builder): return builder.EndObject() - - -class CastOptionsT(object): - - # CastOptionsT - def __init__(self): - self.inDataType = 0 # type: int - self.outDataType = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - castOptions = CastOptions() - castOptions.Init(buf, pos) - return cls.InitFromObj(castOptions) - - @classmethod - def InitFromObj(cls, castOptions): - x = CastOptionsT() - x._UnPack(castOptions) - return x - - # CastOptionsT - def _UnPack(self, castOptions): - if castOptions is None: - return - self.inDataType = castOptions.InDataType() - self.outDataType = castOptions.OutDataType() - - # CastOptionsT - def Pack(self, builder): - CastOptionsStart(builder) - CastOptionsAddInDataType(builder, self.inDataType) - CastOptionsAddOutDataType(builder, self.outDataType) - castOptions = CastOptionsEnd(builder) - return castOptions - - - -class EqualOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsEqualOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = EqualOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def EqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # EqualOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def EqualOptionsStart(builder): builder.StartObject(0) -def EqualOptionsEnd(builder): return builder.EndObject() - - -class EqualOptionsT(object): - - # EqualOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - equalOptions = EqualOptions() - equalOptions.Init(buf, pos) - return cls.InitFromObj(equalOptions) - - @classmethod - def InitFromObj(cls, equalOptions): - x = EqualOptionsT() - x._UnPack(equalOptions) - return x - - # EqualOptionsT - def _UnPack(self, equalOptions): - if equalOptions is None: - return - - # EqualOptionsT - def Pack(self, builder): - EqualOptionsStart(builder) - equalOptions = EqualOptionsEnd(builder) - return equalOptions - - -class Padding(object): - SAME = 0 - VALID = 1 - - - - -class FakeQuantOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsFakeQuantOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = FakeQuantOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # FakeQuantOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # FakeQuantOptions - def Min(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # FakeQuantOptions - def Max(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # FakeQuantOptions - def NumBits(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # FakeQuantOptions - def NarrowRange(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def FakeQuantOptionsStart(builder): builder.StartObject(4) -def FakeQuantOptionsAddMin(builder, min): builder.PrependFloat32Slot(0, min, 0.0) -def FakeQuantOptionsAddMax(builder, max): builder.PrependFloat32Slot(1, max, 0.0) -def FakeQuantOptionsAddNumBits(builder, numBits): builder.PrependInt32Slot(2, numBits, 0) -def FakeQuantOptionsAddNarrowRange(builder, narrowRange): builder.PrependBoolSlot(3, narrowRange, 0) -def FakeQuantOptionsEnd(builder): return builder.EndObject() - - -class FakeQuantOptionsT(object): - - # FakeQuantOptionsT - def __init__(self): - self.min = 0.0 # type: float - self.max = 0.0 # type: float - self.numBits = 0 # type: int - self.narrowRange = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - fakeQuantOptions = FakeQuantOptions() - fakeQuantOptions.Init(buf, pos) - return cls.InitFromObj(fakeQuantOptions) - - @classmethod - def InitFromObj(cls, fakeQuantOptions): - x = FakeQuantOptionsT() - x._UnPack(fakeQuantOptions) - return x - - # FakeQuantOptionsT - def _UnPack(self, fakeQuantOptions): - if fakeQuantOptions is None: - return - self.min = fakeQuantOptions.Min() - self.max = fakeQuantOptions.Max() - self.numBits = fakeQuantOptions.NumBits() - self.narrowRange = fakeQuantOptions.NarrowRange() - - # FakeQuantOptionsT - def Pack(self, builder): - FakeQuantOptionsStart(builder) - FakeQuantOptionsAddMin(builder, self.min) - FakeQuantOptionsAddMax(builder, self.max) - FakeQuantOptionsAddNumBits(builder, self.numBits) - FakeQuantOptionsAddNarrowRange(builder, self.narrowRange) - fakeQuantOptions = FakeQuantOptionsEnd(builder) - return fakeQuantOptions - - - -class AddNOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsAddNOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = AddNOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def AddNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # AddNOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def AddNOptionsStart(builder): builder.StartObject(0) -def AddNOptionsEnd(builder): return builder.EndObject() - - -class AddNOptionsT(object): - - # AddNOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - addNOptions = AddNOptions() - addNOptions.Init(buf, pos) - return cls.InitFromObj(addNOptions) - - @classmethod - def InitFromObj(cls, addNOptions): - x = AddNOptionsT() - x._UnPack(addNOptions) - return x - - # AddNOptionsT - def _UnPack(self, addNOptions): - if addNOptions is None: - return - - # AddNOptionsT - def Pack(self, builder): - AddNOptionsStart(builder) - addNOptions = AddNOptionsEnd(builder) - return addNOptions - - - -class ResizeNearestNeighborOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsResizeNearestNeighborOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ResizeNearestNeighborOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ResizeNearestNeighborOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ResizeNearestNeighborOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ResizeNearestNeighborOptions - def AlignCorners(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # ResizeNearestNeighborOptions - def HalfPixelCenters(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def ResizeNearestNeighborOptionsStart(builder): builder.StartObject(2) -def ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners): builder.PrependBoolSlot(0, alignCorners, 0) -def ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters): builder.PrependBoolSlot(1, halfPixelCenters, 0) -def ResizeNearestNeighborOptionsEnd(builder): return builder.EndObject() - - -class ResizeNearestNeighborOptionsT(object): - - # ResizeNearestNeighborOptionsT - def __init__(self): - self.alignCorners = False # type: bool - self.halfPixelCenters = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - resizeNearestNeighborOptions = ResizeNearestNeighborOptions() - resizeNearestNeighborOptions.Init(buf, pos) - return cls.InitFromObj(resizeNearestNeighborOptions) - - @classmethod - def InitFromObj(cls, resizeNearestNeighborOptions): - x = ResizeNearestNeighborOptionsT() - x._UnPack(resizeNearestNeighborOptions) - return x - - # ResizeNearestNeighborOptionsT - def _UnPack(self, resizeNearestNeighborOptions): - if resizeNearestNeighborOptions is None: - return - self.alignCorners = resizeNearestNeighborOptions.AlignCorners() - self.halfPixelCenters = resizeNearestNeighborOptions.HalfPixelCenters() - - # ResizeNearestNeighborOptionsT - def Pack(self, builder): - ResizeNearestNeighborOptionsStart(builder) - ResizeNearestNeighborOptionsAddAlignCorners(builder, self.alignCorners) - ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, self.halfPixelCenters) - resizeNearestNeighborOptions = ResizeNearestNeighborOptionsEnd(builder) - return resizeNearestNeighborOptions - - - -class SubOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSubOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SubOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SubOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SubOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SubOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def SubOptionsStart(builder): builder.StartObject(1) -def SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def SubOptionsEnd(builder): return builder.EndObject() - - -class SubOptionsT(object): - - # SubOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - subOptions = SubOptions() - subOptions.Init(buf, pos) - return cls.InitFromObj(subOptions) - - @classmethod - def InitFromObj(cls, subOptions): - x = SubOptionsT() - x._UnPack(subOptions) - return x - - # SubOptionsT - def _UnPack(self, subOptions): - if subOptions is None: - return - self.fusedActivationFunction = subOptions.FusedActivationFunction() - - # SubOptionsT - def Pack(self, builder): - SubOptionsStart(builder) - SubOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - subOptions = SubOptionsEnd(builder) - return subOptions - - - -class LSTMOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLSTMOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LSTMOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LSTMOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # LSTMOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # LSTMOptions - def CellClip(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # LSTMOptions - def ProjClip(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # LSTMOptions - def KernelType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # LSTMOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def LSTMOptionsStart(builder): builder.StartObject(5) -def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def LSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0) -def LSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0) -def LSTMOptionsAddKernelType(builder, kernelType): builder.PrependInt8Slot(3, kernelType, 0) -def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) -def LSTMOptionsEnd(builder): return builder.EndObject() - - -class LSTMOptionsT(object): - - # LSTMOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - self.cellClip = 0.0 # type: float - self.projClip = 0.0 # type: float - self.kernelType = 0 # type: int - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - lSTMOptions = LSTMOptions() - lSTMOptions.Init(buf, pos) - return cls.InitFromObj(lSTMOptions) - - @classmethod - def InitFromObj(cls, lSTMOptions): - x = LSTMOptionsT() - x._UnPack(lSTMOptions) - return x - - # LSTMOptionsT - def _UnPack(self, lSTMOptions): - if lSTMOptions is None: - return - self.fusedActivationFunction = lSTMOptions.FusedActivationFunction() - self.cellClip = lSTMOptions.CellClip() - self.projClip = lSTMOptions.ProjClip() - self.kernelType = lSTMOptions.KernelType() - self.asymmetricQuantizeInputs = lSTMOptions.AsymmetricQuantizeInputs() - - # LSTMOptionsT - def Pack(self, builder): - LSTMOptionsStart(builder) - LSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - LSTMOptionsAddCellClip(builder, self.cellClip) - LSTMOptionsAddProjClip(builder, self.projClip) - LSTMOptionsAddKernelType(builder, self.kernelType) - LSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - lSTMOptions = LSTMOptionsEnd(builder) - return lSTMOptions - - - -class LogicalNotOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLogicalNotOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LogicalNotOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LogicalNotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LogicalNotOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def LogicalNotOptionsStart(builder): builder.StartObject(0) -def LogicalNotOptionsEnd(builder): return builder.EndObject() - - -class LogicalNotOptionsT(object): - - # LogicalNotOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - logicalNotOptions = LogicalNotOptions() - logicalNotOptions.Init(buf, pos) - return cls.InitFromObj(logicalNotOptions) - - @classmethod - def InitFromObj(cls, logicalNotOptions): - x = LogicalNotOptionsT() - x._UnPack(logicalNotOptions) - return x - - # LogicalNotOptionsT - def _UnPack(self, logicalNotOptions): - if logicalNotOptions is None: - return - - # LogicalNotOptionsT - def Pack(self, builder): - LogicalNotOptionsStart(builder) - logicalNotOptions = LogicalNotOptionsEnd(builder) - return logicalNotOptions - - - -class IfOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsIfOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = IfOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def IfOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # IfOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # IfOptions - def ThenSubgraphIndex(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # IfOptions - def ElseSubgraphIndex(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def IfOptionsStart(builder): builder.StartObject(2) -def IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex): builder.PrependInt32Slot(0, thenSubgraphIndex, 0) -def IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex): builder.PrependInt32Slot(1, elseSubgraphIndex, 0) -def IfOptionsEnd(builder): return builder.EndObject() - - -class IfOptionsT(object): - - # IfOptionsT - def __init__(self): - self.thenSubgraphIndex = 0 # type: int - self.elseSubgraphIndex = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - ifOptions = IfOptions() - ifOptions.Init(buf, pos) - return cls.InitFromObj(ifOptions) - - @classmethod - def InitFromObj(cls, ifOptions): - x = IfOptionsT() - x._UnPack(ifOptions) - return x - - # IfOptionsT - def _UnPack(self, ifOptions): - if ifOptions is None: - return - self.thenSubgraphIndex = ifOptions.ThenSubgraphIndex() - self.elseSubgraphIndex = ifOptions.ElseSubgraphIndex() - - # IfOptionsT - def Pack(self, builder): - IfOptionsStart(builder) - IfOptionsAddThenSubgraphIndex(builder, self.thenSubgraphIndex) - IfOptionsAddElseSubgraphIndex(builder, self.elseSubgraphIndex) - ifOptions = IfOptionsEnd(builder) - return ifOptions - - - -class BatchMatMulOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsBatchMatMulOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = BatchMatMulOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def BatchMatMulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # BatchMatMulOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # BatchMatMulOptions - def AdjX(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # BatchMatMulOptions - def AdjY(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def BatchMatMulOptionsStart(builder): builder.StartObject(2) -def BatchMatMulOptionsAddAdjX(builder, adjX): builder.PrependBoolSlot(0, adjX, 0) -def BatchMatMulOptionsAddAdjY(builder, adjY): builder.PrependBoolSlot(1, adjY, 0) -def BatchMatMulOptionsEnd(builder): return builder.EndObject() - - -class BatchMatMulOptionsT(object): - - # BatchMatMulOptionsT - def __init__(self): - self.adjX = False # type: bool - self.adjY = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - batchMatMulOptions = BatchMatMulOptions() - batchMatMulOptions.Init(buf, pos) - return cls.InitFromObj(batchMatMulOptions) - - @classmethod - def InitFromObj(cls, batchMatMulOptions): - x = BatchMatMulOptionsT() - x._UnPack(batchMatMulOptions) - return x - - # BatchMatMulOptionsT - def _UnPack(self, batchMatMulOptions): - if batchMatMulOptions is None: - return - self.adjX = batchMatMulOptions.AdjX() - self.adjY = batchMatMulOptions.AdjY() - - # BatchMatMulOptionsT - def Pack(self, builder): - BatchMatMulOptionsStart(builder) - BatchMatMulOptionsAddAdjX(builder, self.adjX) - BatchMatMulOptionsAddAdjY(builder, self.adjY) - batchMatMulOptions = BatchMatMulOptionsEnd(builder) - return batchMatMulOptions - - -class DimensionType(object): - DENSE = 0 - SPARSE_CSR = 1 - - - - -class ExpandDimsOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsExpandDimsOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ExpandDimsOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ExpandDimsOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def ExpandDimsOptionsStart(builder): builder.StartObject(0) -def ExpandDimsOptionsEnd(builder): return builder.EndObject() - - -class ExpandDimsOptionsT(object): - - # ExpandDimsOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - expandDimsOptions = ExpandDimsOptions() - expandDimsOptions.Init(buf, pos) - return cls.InitFromObj(expandDimsOptions) - - @classmethod - def InitFromObj(cls, expandDimsOptions): - x = ExpandDimsOptionsT() - x._UnPack(expandDimsOptions) - return x - - # ExpandDimsOptionsT - def _UnPack(self, expandDimsOptions): - if expandDimsOptions is None: - return - - # ExpandDimsOptionsT - def Pack(self, builder): - ExpandDimsOptionsStart(builder) - expandDimsOptions = ExpandDimsOptionsEnd(builder) - return expandDimsOptions - - - -class FloorDivOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsFloorDivOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = FloorDivOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def FloorDivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # FloorDivOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def FloorDivOptionsStart(builder): builder.StartObject(0) -def FloorDivOptionsEnd(builder): return builder.EndObject() - - -class FloorDivOptionsT(object): - - # FloorDivOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - floorDivOptions = FloorDivOptions() - floorDivOptions.Init(buf, pos) - return cls.InitFromObj(floorDivOptions) - - @classmethod - def InitFromObj(cls, floorDivOptions): - x = FloorDivOptionsT() - x._UnPack(floorDivOptions) - return x - - # FloorDivOptionsT - def _UnPack(self, floorDivOptions): - if floorDivOptions is None: - return - - # FloorDivOptionsT - def Pack(self, builder): - FloorDivOptionsStart(builder) - floorDivOptions = FloorDivOptionsEnd(builder) - return floorDivOptions - - - -class DensifyOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsDensifyOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = DensifyOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # DensifyOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def DensifyOptionsStart(builder): builder.StartObject(0) -def DensifyOptionsEnd(builder): return builder.EndObject() - - -class DensifyOptionsT(object): - - # DensifyOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - densifyOptions = DensifyOptions() - densifyOptions.Init(buf, pos) - return cls.InitFromObj(densifyOptions) - - @classmethod - def InitFromObj(cls, densifyOptions): - x = DensifyOptionsT() - x._UnPack(densifyOptions) - return x - - # DensifyOptionsT - def _UnPack(self, densifyOptions): - if densifyOptions is None: - return - - # DensifyOptionsT - def Pack(self, builder): - DensifyOptionsStart(builder) - densifyOptions = DensifyOptionsEnd(builder) - return densifyOptions - - - -class LessOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLessOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LessOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LessOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def LessOptionsStart(builder): builder.StartObject(0) -def LessOptionsEnd(builder): return builder.EndObject() - - -class LessOptionsT(object): - - # LessOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - lessOptions = LessOptions() - lessOptions.Init(buf, pos) - return cls.InitFromObj(lessOptions) - - @classmethod - def InitFromObj(cls, lessOptions): - x = LessOptionsT() - x._UnPack(lessOptions) - return x - - # LessOptionsT - def _UnPack(self, lessOptions): - if lessOptions is None: - return - - # LessOptionsT - def Pack(self, builder): - LessOptionsStart(builder) - lessOptions = LessOptionsEnd(builder) - return lessOptions - - - -class BidirectionalSequenceLSTMOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsBidirectionalSequenceLSTMOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = BidirectionalSequenceLSTMOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def BidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # BidirectionalSequenceLSTMOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # BidirectionalSequenceLSTMOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # BidirectionalSequenceLSTMOptions - def CellClip(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # BidirectionalSequenceLSTMOptions - def ProjClip(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) - return 0.0 - - # BidirectionalSequenceLSTMOptions - def MergeOutputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - - # BidirectionalSequenceLSTMOptions - def TimeMajor(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return True - - # BidirectionalSequenceLSTMOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def BidirectionalSequenceLSTMOptionsStart(builder): builder.StartObject(6) -def BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) -def BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0) -def BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0) -def BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs): builder.PrependBoolSlot(3, mergeOutputs, 0) -def BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(4, timeMajor, 1) -def BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(5, asymmetricQuantizeInputs, 0) -def BidirectionalSequenceLSTMOptionsEnd(builder): return builder.EndObject() - - -class BidirectionalSequenceLSTMOptionsT(object): - - # BidirectionalSequenceLSTMOptionsT - def __init__(self): - self.fusedActivationFunction = 0 # type: int - self.cellClip = 0.0 # type: float - self.projClip = 0.0 # type: float - self.mergeOutputs = False # type: bool - self.timeMajor = True # type: bool - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - bidirectionalSequenceLSTMOptions = BidirectionalSequenceLSTMOptions() - bidirectionalSequenceLSTMOptions.Init(buf, pos) - return cls.InitFromObj(bidirectionalSequenceLSTMOptions) - - @classmethod - def InitFromObj(cls, bidirectionalSequenceLSTMOptions): - x = BidirectionalSequenceLSTMOptionsT() - x._UnPack(bidirectionalSequenceLSTMOptions) - return x - - # BidirectionalSequenceLSTMOptionsT - def _UnPack(self, bidirectionalSequenceLSTMOptions): - if bidirectionalSequenceLSTMOptions is None: - return - self.fusedActivationFunction = bidirectionalSequenceLSTMOptions.FusedActivationFunction() - self.cellClip = bidirectionalSequenceLSTMOptions.CellClip() - self.projClip = bidirectionalSequenceLSTMOptions.ProjClip() - self.mergeOutputs = bidirectionalSequenceLSTMOptions.MergeOutputs() - self.timeMajor = bidirectionalSequenceLSTMOptions.TimeMajor() - self.asymmetricQuantizeInputs = bidirectionalSequenceLSTMOptions.AsymmetricQuantizeInputs() - - # BidirectionalSequenceLSTMOptionsT - def Pack(self, builder): - BidirectionalSequenceLSTMOptionsStart(builder) - BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - BidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip) - BidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip) - BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, self.mergeOutputs) - BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor) - BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - bidirectionalSequenceLSTMOptions = BidirectionalSequenceLSTMOptionsEnd(builder) - return bidirectionalSequenceLSTMOptions - - - -class SegmentSumOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSegmentSumOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SegmentSumOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SegmentSumOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def SegmentSumOptionsStart(builder): builder.StartObject(0) -def SegmentSumOptionsEnd(builder): return builder.EndObject() - - -class SegmentSumOptionsT(object): - - # SegmentSumOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - segmentSumOptions = SegmentSumOptions() - segmentSumOptions.Init(buf, pos) - return cls.InitFromObj(segmentSumOptions) - - @classmethod - def InitFromObj(cls, segmentSumOptions): - x = SegmentSumOptionsT() - x._UnPack(segmentSumOptions) - return x - - # SegmentSumOptionsT - def _UnPack(self, segmentSumOptions): - if segmentSumOptions is None: - return - - # SegmentSumOptionsT - def Pack(self, builder): - SegmentSumOptionsStart(builder) - segmentSumOptions = SegmentSumOptionsEnd(builder) - return segmentSumOptions - - - -class EmbeddingLookupSparseOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsEmbeddingLookupSparseOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = EmbeddingLookupSparseOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def EmbeddingLookupSparseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # EmbeddingLookupSparseOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # EmbeddingLookupSparseOptions - def Combiner(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def EmbeddingLookupSparseOptionsStart(builder): builder.StartObject(1) -def EmbeddingLookupSparseOptionsAddCombiner(builder, combiner): builder.PrependInt8Slot(0, combiner, 0) -def EmbeddingLookupSparseOptionsEnd(builder): return builder.EndObject() - - -class EmbeddingLookupSparseOptionsT(object): - - # EmbeddingLookupSparseOptionsT - def __init__(self): - self.combiner = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - embeddingLookupSparseOptions = EmbeddingLookupSparseOptions() - embeddingLookupSparseOptions.Init(buf, pos) - return cls.InitFromObj(embeddingLookupSparseOptions) - - @classmethod - def InitFromObj(cls, embeddingLookupSparseOptions): - x = EmbeddingLookupSparseOptionsT() - x._UnPack(embeddingLookupSparseOptions) - return x - - # EmbeddingLookupSparseOptionsT - def _UnPack(self, embeddingLookupSparseOptions): - if embeddingLookupSparseOptions is None: - return - self.combiner = embeddingLookupSparseOptions.Combiner() - - # EmbeddingLookupSparseOptionsT - def Pack(self, builder): - EmbeddingLookupSparseOptionsStart(builder) - EmbeddingLookupSparseOptionsAddCombiner(builder, self.combiner) - embeddingLookupSparseOptions = EmbeddingLookupSparseOptionsEnd(builder) - return embeddingLookupSparseOptions - - - -class SparseToDenseOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSparseToDenseOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SparseToDenseOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SparseToDenseOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SparseToDenseOptions - def ValidateIndices(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def SparseToDenseOptionsStart(builder): builder.StartObject(1) -def SparseToDenseOptionsAddValidateIndices(builder, validateIndices): builder.PrependBoolSlot(0, validateIndices, 0) -def SparseToDenseOptionsEnd(builder): return builder.EndObject() - - -class SparseToDenseOptionsT(object): - - # SparseToDenseOptionsT - def __init__(self): - self.validateIndices = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - sparseToDenseOptions = SparseToDenseOptions() - sparseToDenseOptions.Init(buf, pos) - return cls.InitFromObj(sparseToDenseOptions) - - @classmethod - def InitFromObj(cls, sparseToDenseOptions): - x = SparseToDenseOptionsT() - x._UnPack(sparseToDenseOptions) - return x - - # SparseToDenseOptionsT - def _UnPack(self, sparseToDenseOptions): - if sparseToDenseOptions is None: - return - self.validateIndices = sparseToDenseOptions.ValidateIndices() - - # SparseToDenseOptionsT - def Pack(self, builder): - SparseToDenseOptionsStart(builder) - SparseToDenseOptionsAddValidateIndices(builder, self.validateIndices) - sparseToDenseOptions = SparseToDenseOptionsEnd(builder) - return sparseToDenseOptions - - - -class MatrixDiagOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsMatrixDiagOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = MatrixDiagOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def MatrixDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # MatrixDiagOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def MatrixDiagOptionsStart(builder): builder.StartObject(0) -def MatrixDiagOptionsEnd(builder): return builder.EndObject() - - -class MatrixDiagOptionsT(object): - - # MatrixDiagOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - matrixDiagOptions = MatrixDiagOptions() - matrixDiagOptions.Init(buf, pos) - return cls.InitFromObj(matrixDiagOptions) - - @classmethod - def InitFromObj(cls, matrixDiagOptions): - x = MatrixDiagOptionsT() - x._UnPack(matrixDiagOptions) - return x - - # MatrixDiagOptionsT - def _UnPack(self, matrixDiagOptions): - if matrixDiagOptions is None: - return - - # MatrixDiagOptionsT - def Pack(self, builder): - MatrixDiagOptionsStart(builder) - matrixDiagOptions = MatrixDiagOptionsEnd(builder) - return matrixDiagOptions - - - -class LSHProjectionOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsLSHProjectionOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = LSHProjectionOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def LSHProjectionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # LSHProjectionOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # LSHProjectionOptions - def Type(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - -def LSHProjectionOptionsStart(builder): builder.StartObject(1) -def LSHProjectionOptionsAddType(builder, type): builder.PrependInt8Slot(0, type, 0) -def LSHProjectionOptionsEnd(builder): return builder.EndObject() - - -class LSHProjectionOptionsT(object): - - # LSHProjectionOptionsT - def __init__(self): - self.type = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - lSHProjectionOptions = LSHProjectionOptions() - lSHProjectionOptions.Init(buf, pos) - return cls.InitFromObj(lSHProjectionOptions) - - @classmethod - def InitFromObj(cls, lSHProjectionOptions): - x = LSHProjectionOptionsT() - x._UnPack(lSHProjectionOptions) - return x - - # LSHProjectionOptionsT - def _UnPack(self, lSHProjectionOptions): - if lSHProjectionOptions is None: - return - self.type = lSHProjectionOptions.Type() - - # LSHProjectionOptionsT - def Pack(self, builder): - LSHProjectionOptionsStart(builder) - LSHProjectionOptionsAddType(builder, self.type) - lSHProjectionOptions = LSHProjectionOptionsEnd(builder) - return lSHProjectionOptions - - - -class SparsityParameters(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSparsityParameters(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SparsityParameters() - x.Init(buf, n + offset) - return x - - @classmethod - def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SparsityParameters - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SparsityParameters - def TraversalOrder(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # SparsityParameters - def TraversalOrderAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # SparsityParameters - def TraversalOrderLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SparsityParameters - def TraversalOrderIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - - # SparsityParameters - def BlockMap(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # SparsityParameters - def BlockMapAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # SparsityParameters - def BlockMapLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SparsityParameters - def BlockMapIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - return o == 0 - - # SparsityParameters - def DimMetadata(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - obj = DimensionMetadata() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # SparsityParameters - def DimMetadataLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # SparsityParameters - def DimMetadataIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - return o == 0 - -def SparsityParametersStart(builder): builder.StartObject(3) -def SparsityParametersAddTraversalOrder(builder, traversalOrder): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(traversalOrder), 0) -def SparsityParametersStartTraversalOrderVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SparsityParametersAddBlockMap(builder, blockMap): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blockMap), 0) -def SparsityParametersStartBlockMapVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SparsityParametersAddDimMetadata(builder, dimMetadata): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dimMetadata), 0) -def SparsityParametersStartDimMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def SparsityParametersEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class SparsityParametersT(object): - - # SparsityParametersT - def __init__(self): - self.traversalOrder = None # type: List[int] - self.blockMap = None # type: List[int] - self.dimMetadata = None # type: List[DimensionMetadataT] - - @classmethod - def InitFromBuf(cls, buf, pos): - sparsityParameters = SparsityParameters() - sparsityParameters.Init(buf, pos) - return cls.InitFromObj(sparsityParameters) - - @classmethod - def InitFromObj(cls, sparsityParameters): - x = SparsityParametersT() - x._UnPack(sparsityParameters) - return x - - # SparsityParametersT - def _UnPack(self, sparsityParameters): - if sparsityParameters is None: - return - if not sparsityParameters.TraversalOrderIsNone(): - if np is None: - self.traversalOrder = [] - for i in range(sparsityParameters.TraversalOrderLength()): - self.traversalOrder.append(sparsityParameters.TraversalOrder(i)) - else: - self.traversalOrder = sparsityParameters.TraversalOrderAsNumpy() - if not sparsityParameters.BlockMapIsNone(): - if np is None: - self.blockMap = [] - for i in range(sparsityParameters.BlockMapLength()): - self.blockMap.append(sparsityParameters.BlockMap(i)) - else: - self.blockMap = sparsityParameters.BlockMapAsNumpy() - if not sparsityParameters.DimMetadataIsNone(): - self.dimMetadata = [] - for i in range(sparsityParameters.DimMetadataLength()): - if sparsityParameters.DimMetadata(i) is None: - self.dimMetadata.append(None) - else: - dimensionMetadata_ = DimensionMetadataT.InitFromObj(sparsityParameters.DimMetadata(i)) - self.dimMetadata.append(dimensionMetadata_) - - # SparsityParametersT - def Pack(self, builder): - if self.traversalOrder is not None: - if np is not None and type(self.traversalOrder) is np.ndarray: - traversalOrder = builder.CreateNumpyVector(self.traversalOrder) - else: - SparsityParametersStartTraversalOrderVector(builder, len(self.traversalOrder)) - for i in reversed(range(len(self.traversalOrder))): - builder.PrependInt32(self.traversalOrder[i]) - traversalOrder = builder.EndVector(len(self.traversalOrder)) - if self.blockMap is not None: - if np is not None and type(self.blockMap) is np.ndarray: - blockMap = builder.CreateNumpyVector(self.blockMap) - else: - SparsityParametersStartBlockMapVector(builder, len(self.blockMap)) - for i in reversed(range(len(self.blockMap))): - builder.PrependInt32(self.blockMap[i]) - blockMap = builder.EndVector(len(self.blockMap)) - if self.dimMetadata is not None: - dimMetadatalist = [] - for i in range(len(self.dimMetadata)): - dimMetadatalist.append(self.dimMetadata[i].Pack(builder)) - SparsityParametersStartDimMetadataVector(builder, len(self.dimMetadata)) - for i in reversed(range(len(self.dimMetadata))): - builder.PrependUOffsetTRelative(dimMetadatalist[i]) - dimMetadata = builder.EndVector(len(self.dimMetadata)) - SparsityParametersStart(builder) - if self.traversalOrder is not None: - SparsityParametersAddTraversalOrder(builder, traversalOrder) - if self.blockMap is not None: - SparsityParametersAddBlockMap(builder, blockMap) - if self.dimMetadata is not None: - SparsityParametersAddDimMetadata(builder, dimMetadata) - sparsityParameters = SparsityParametersEnd(builder) - return sparsityParameters - - - -class UniqueOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsUniqueOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = UniqueOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def UniqueOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # UniqueOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # UniqueOptions - def IdxOutType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 2 - -def UniqueOptionsStart(builder): builder.StartObject(1) -def UniqueOptionsAddIdxOutType(builder, idxOutType): builder.PrependInt8Slot(0, idxOutType, 2) -def UniqueOptionsEnd(builder): return builder.EndObject() - - -class UniqueOptionsT(object): - - # UniqueOptionsT - def __init__(self): - self.idxOutType = 2 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - uniqueOptions = UniqueOptions() - uniqueOptions.Init(buf, pos) - return cls.InitFromObj(uniqueOptions) - - @classmethod - def InitFromObj(cls, uniqueOptions): - x = UniqueOptionsT() - x._UnPack(uniqueOptions) - return x - - # UniqueOptionsT - def _UnPack(self, uniqueOptions): - if uniqueOptions is None: - return - self.idxOutType = uniqueOptions.IdxOutType() - - # UniqueOptionsT - def Pack(self, builder): - UniqueOptionsStart(builder) - UniqueOptionsAddIdxOutType(builder, self.idxOutType) - uniqueOptions = UniqueOptionsEnd(builder) - return uniqueOptions - - - -class ReshapeOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsReshapeOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = ReshapeOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # ReshapeOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # ReshapeOptions - def NewShape(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # ReshapeOptions - def NewShapeAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # ReshapeOptions - def NewShapeLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # ReshapeOptions - def NewShapeIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - -def ReshapeOptionsStart(builder): builder.StartObject(1) -def ReshapeOptionsAddNewShape(builder, newShape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0) -def ReshapeOptionsStartNewShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ReshapeOptionsEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class ReshapeOptionsT(object): - - # ReshapeOptionsT - def __init__(self): - self.newShape = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - reshapeOptions = ReshapeOptions() - reshapeOptions.Init(buf, pos) - return cls.InitFromObj(reshapeOptions) - - @classmethod - def InitFromObj(cls, reshapeOptions): - x = ReshapeOptionsT() - x._UnPack(reshapeOptions) - return x - - # ReshapeOptionsT - def _UnPack(self, reshapeOptions): - if reshapeOptions is None: - return - if not reshapeOptions.NewShapeIsNone(): - if np is None: - self.newShape = [] - for i in range(reshapeOptions.NewShapeLength()): - self.newShape.append(reshapeOptions.NewShape(i)) - else: - self.newShape = reshapeOptions.NewShapeAsNumpy() - - # ReshapeOptionsT - def Pack(self, builder): - if self.newShape is not None: - if np is not None and type(self.newShape) is np.ndarray: - newShape = builder.CreateNumpyVector(self.newShape) - else: - ReshapeOptionsStartNewShapeVector(builder, len(self.newShape)) - for i in reversed(range(len(self.newShape))): - builder.PrependInt32(self.newShape[i]) - newShape = builder.EndVector(len(self.newShape)) - ReshapeOptionsStart(builder) - if self.newShape is not None: - ReshapeOptionsAddNewShape(builder, newShape) - reshapeOptions = ReshapeOptionsEnd(builder) - return reshapeOptions - - - -class PowOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsPowOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = PowOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # PowOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def PowOptionsStart(builder): builder.StartObject(0) -def PowOptionsEnd(builder): return builder.EndObject() - - -class PowOptionsT(object): - - # PowOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - powOptions = PowOptions() - powOptions.Init(buf, pos) - return cls.InitFromObj(powOptions) - - @classmethod - def InitFromObj(cls, powOptions): - x = PowOptionsT() - x._UnPack(powOptions) - return x - - # PowOptionsT - def _UnPack(self, powOptions): - if powOptions is None: - return - - # PowOptionsT - def Pack(self, builder): - PowOptionsStart(builder) - powOptions = PowOptionsEnd(builder) - return powOptions - - - -class GreaterEqualOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsGreaterEqualOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = GreaterEqualOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def GreaterEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # GreaterEqualOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - -def GreaterEqualOptionsStart(builder): builder.StartObject(0) -def GreaterEqualOptionsEnd(builder): return builder.EndObject() - - -class GreaterEqualOptionsT(object): - - # GreaterEqualOptionsT - def __init__(self): - pass - - @classmethod - def InitFromBuf(cls, buf, pos): - greaterEqualOptions = GreaterEqualOptions() - greaterEqualOptions.Init(buf, pos) - return cls.InitFromObj(greaterEqualOptions) - - @classmethod - def InitFromObj(cls, greaterEqualOptions): - x = GreaterEqualOptionsT() - x._UnPack(greaterEqualOptions) - return x - - # GreaterEqualOptionsT - def _UnPack(self, greaterEqualOptions): - if greaterEqualOptions is None: - return - - # GreaterEqualOptionsT - def Pack(self, builder): - GreaterEqualOptionsStart(builder) - greaterEqualOptions = GreaterEqualOptionsEnd(builder) - return greaterEqualOptions - - - -class SVDFOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSVDFOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SVDFOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SVDFOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SVDFOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SVDFOptions - def Rank(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # SVDFOptions - def FusedActivationFunction(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # SVDFOptions - def AsymmetricQuantizeInputs(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) - return False - -def SVDFOptionsStart(builder): builder.StartObject(3) -def SVDFOptionsAddRank(builder, rank): builder.PrependInt32Slot(0, rank, 0) -def SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) -def SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) -def SVDFOptionsEnd(builder): return builder.EndObject() - - -class SVDFOptionsT(object): - - # SVDFOptionsT - def __init__(self): - self.rank = 0 # type: int - self.fusedActivationFunction = 0 # type: int - self.asymmetricQuantizeInputs = False # type: bool - - @classmethod - def InitFromBuf(cls, buf, pos): - sVDFOptions = SVDFOptions() - sVDFOptions.Init(buf, pos) - return cls.InitFromObj(sVDFOptions) - - @classmethod - def InitFromObj(cls, sVDFOptions): - x = SVDFOptionsT() - x._UnPack(sVDFOptions) - return x - - # SVDFOptionsT - def _UnPack(self, sVDFOptions): - if sVDFOptions is None: - return - self.rank = sVDFOptions.Rank() - self.fusedActivationFunction = sVDFOptions.FusedActivationFunction() - self.asymmetricQuantizeInputs = sVDFOptions.AsymmetricQuantizeInputs() - - # SVDFOptionsT - def Pack(self, builder): - SVDFOptionsStart(builder) - SVDFOptionsAddRank(builder, self.rank) - SVDFOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) - SVDFOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) - sVDFOptions = SVDFOptionsEnd(builder) - return sVDFOptions - - - -class CustomQuantization(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsCustomQuantization(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = CustomQuantization() - x.Init(buf, n + offset) - return x - - @classmethod - def CustomQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # CustomQuantization - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # CustomQuantization - def Custom(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) - return 0 - - # CustomQuantization - def CustomAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) - return 0 - - # CustomQuantization - def CustomLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # CustomQuantization - def CustomIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - -def CustomQuantizationStart(builder): builder.StartObject(1) -def CustomQuantizationAddCustom(builder, custom): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(custom), 0) -def CustomQuantizationStartCustomVector(builder, numElems): return builder.StartVector(1, numElems, 1) -def CustomQuantizationEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class CustomQuantizationT(object): - - # CustomQuantizationT - def __init__(self): - self.custom = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - customQuantization = CustomQuantization() - customQuantization.Init(buf, pos) - return cls.InitFromObj(customQuantization) - - @classmethod - def InitFromObj(cls, customQuantization): - x = CustomQuantizationT() - x._UnPack(customQuantization) - return x - - # CustomQuantizationT - def _UnPack(self, customQuantization): - if customQuantization is None: - return - if not customQuantization.CustomIsNone(): - if np is None: - self.custom = [] - for i in range(customQuantization.CustomLength()): - self.custom.append(customQuantization.Custom(i)) - else: - self.custom = customQuantization.CustomAsNumpy() - - # CustomQuantizationT - def Pack(self, builder): - if self.custom is not None: - if np is not None and type(self.custom) is np.ndarray: - custom = builder.CreateNumpyVector(self.custom) - else: - CustomQuantizationStartCustomVector(builder, len(self.custom)) - for i in reversed(range(len(self.custom))): - builder.PrependUint8(self.custom[i]) - custom = builder.EndVector(len(self.custom)) - CustomQuantizationStart(builder) - if self.custom is not None: - CustomQuantizationAddCustom(builder, custom) - customQuantization = CustomQuantizationEnd(builder) - return customQuantization - - - -class SpaceToDepthOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsSpaceToDepthOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = SpaceToDepthOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def SpaceToDepthOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # SpaceToDepthOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # SpaceToDepthOptions - def BlockSize(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def SpaceToDepthOptionsStart(builder): builder.StartObject(1) -def SpaceToDepthOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0) -def SpaceToDepthOptionsEnd(builder): return builder.EndObject() - - -class SpaceToDepthOptionsT(object): - - # SpaceToDepthOptionsT - def __init__(self): - self.blockSize = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - spaceToDepthOptions = SpaceToDepthOptions() - spaceToDepthOptions.Init(buf, pos) - return cls.InitFromObj(spaceToDepthOptions) - - @classmethod - def InitFromObj(cls, spaceToDepthOptions): - x = SpaceToDepthOptionsT() - x._UnPack(spaceToDepthOptions) - return x - - # SpaceToDepthOptionsT - def _UnPack(self, spaceToDepthOptions): - if spaceToDepthOptions is None: - return - self.blockSize = spaceToDepthOptions.BlockSize() - - # SpaceToDepthOptionsT - def Pack(self, builder): - SpaceToDepthOptionsStart(builder) - SpaceToDepthOptionsAddBlockSize(builder, self.blockSize) - spaceToDepthOptions = SpaceToDepthOptionsEnd(builder) - return spaceToDepthOptions - - - -class TransposeConvOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsTransposeConvOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = TransposeConvOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # TransposeConvOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # TransposeConvOptions - def Padding(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # TransposeConvOptions - def StrideW(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - - # TransposeConvOptions - def StrideH(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def TransposeConvOptionsStart(builder): builder.StartObject(3) -def TransposeConvOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) -def TransposeConvOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) -def TransposeConvOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) -def TransposeConvOptionsEnd(builder): return builder.EndObject() - - -class TransposeConvOptionsT(object): - - # TransposeConvOptionsT - def __init__(self): - self.padding = 0 # type: int - self.strideW = 0 # type: int - self.strideH = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - transposeConvOptions = TransposeConvOptions() - transposeConvOptions.Init(buf, pos) - return cls.InitFromObj(transposeConvOptions) - - @classmethod - def InitFromObj(cls, transposeConvOptions): - x = TransposeConvOptionsT() - x._UnPack(transposeConvOptions) - return x - - # TransposeConvOptionsT - def _UnPack(self, transposeConvOptions): - if transposeConvOptions is None: - return - self.padding = transposeConvOptions.Padding() - self.strideW = transposeConvOptions.StrideW() - self.strideH = transposeConvOptions.StrideH() - - # TransposeConvOptionsT - def Pack(self, builder): - TransposeConvOptionsStart(builder) - TransposeConvOptionsAddPadding(builder, self.padding) - TransposeConvOptionsAddStrideW(builder, self.strideW) - TransposeConvOptionsAddStrideH(builder, self.strideH) - transposeConvOptions = TransposeConvOptionsEnd(builder) - return transposeConvOptions - - - -class Buffer(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsBuffer(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Buffer() - x.Init(buf, n + offset) - return x - - @classmethod - def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # Buffer - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Buffer - def Data(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) - return 0 - - # Buffer - def DataAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) - return 0 - - # Buffer - def DataLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # Buffer - def DataIsNone(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - return o == 0 - -def BufferStart(builder): builder.StartObject(1) -def BufferAddData(builder, data): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0) -def BufferStartDataVector(builder, numElems): return builder.StartVector(1, numElems, 1) -def BufferEnd(builder): return builder.EndObject() - -try: - from typing import List -except: - pass - -class BufferT(object): - - # BufferT - def __init__(self): - self.data = None # type: List[int] - - @classmethod - def InitFromBuf(cls, buf, pos): - buffer = Buffer() - buffer.Init(buf, pos) - return cls.InitFromObj(buffer) - - @classmethod - def InitFromObj(cls, buffer): - x = BufferT() - x._UnPack(buffer) - return x - - # BufferT - def _UnPack(self, buffer): - if buffer is None: - return - if not buffer.DataIsNone(): - if np is None: - self.data = [] - for i in range(buffer.DataLength()): - self.data.append(buffer.Data(i)) - else: - self.data = buffer.DataAsNumpy() - - # BufferT - def Pack(self, builder): - if self.data is not None: - if np is not None and type(self.data) is np.ndarray: - data = builder.CreateNumpyVector(self.data) - else: - BufferStartDataVector(builder, len(self.data)) - for i in reversed(range(len(self.data))): - builder.PrependUint8(self.data[i]) - data = builder.EndVector(len(self.data)) - BufferStart(builder) - if self.data is not None: - BufferAddData(builder, data) - buffer = BufferEnd(builder) - return buffer - - - -class CallOptions(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsCallOptions(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = CallOptions() - x.Init(buf, n + offset) - return x - - @classmethod - def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) - - # CallOptions - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # CallOptions - def Subgraph(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) - return 0 - -def CallOptionsStart(builder): builder.StartObject(1) -def CallOptionsAddSubgraph(builder, subgraph): builder.PrependUint32Slot(0, subgraph, 0) -def CallOptionsEnd(builder): return builder.EndObject() - - -class CallOptionsT(object): - - # CallOptionsT - def __init__(self): - self.subgraph = 0 # type: int - - @classmethod - def InitFromBuf(cls, buf, pos): - callOptions = CallOptions() - callOptions.Init(buf, pos) - return cls.InitFromObj(callOptions) - - @classmethod - def InitFromObj(cls, callOptions): - x = CallOptionsT() - x._UnPack(callOptions) - return x - - # CallOptionsT - def _UnPack(self, callOptions): - if callOptions is None: - return - self.subgraph = callOptions.Subgraph() - - # CallOptionsT - def Pack(self, builder): - CallOptionsStart(builder) - CallOptionsAddSubgraph(builder, self.subgraph) - callOptions = CallOptionsEnd(builder) - return callOptions diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/subgraph.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/subgraph.py deleted file mode 100644 index a87c8dcba..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/subgraph.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from copy import deepcopy -from typing import TYPE_CHECKING, Any, Optional, Iterable, List - -from . import ( - _ModelDependent, - TensorType, - OperatorCode, - Buffer, - _ShapeInputType, - Tensor, - Operator, - _OpOptionsType, -) - -if TYPE_CHECKING: - from . import XCOREModel - - -class Subgraph(_ModelDependent): - def __init__( - self, - name: Optional[str] = None, - model: Optional["XCOREModel"] = None, - inputs: Optional[Iterable[Tensor]] = None, - outputs: Optional[Iterable[Tensor]] = None, - operators: Optional[Iterable[Operator]] = None, - tensors: Optional[Iterable[Tensor]] = None, - ) -> None: - super().__init__(name, model) - self.inputs: List[Tensor] = list(inputs or []) - self.outputs: List[Tensor] = list(outputs or []) - self.operators: List[Operator] = list(operators or []) - self.tensors: List[Tensor] = list(tensors or []) - - @property - def intermediates(self) -> List[Tensor]: - # intermediates are any tensors that are not an input or an output - return [t for t in self.tensors if t not in (self.inputs + self.outputs)] - - def is_equal(self, other: Any) -> bool: - return ( - super().is_equal(other) - # and self.name == other.name # intentionally not compared - and self.sequence_equal(self.inputs, other.inputs) - and self.sequence_equal(self.outputs, other.outputs) - and self.sequence_equal(self.operators, other.operators) - and self.sequence_equal(self.tensors, other.tensors) - ) - - def create_tensor( - self, - name: str, - type_: TensorType, - shape: _ShapeInputType, - *, - buffer: Optional[Buffer] = None, - quantization: Optional[_OpOptionsType] = None, - isinput: bool = False, - isoutput: bool = False, - producers: Optional[Iterable[Operator]] = None, - consumers: Optional[Iterable[Operator]] = None, - ) -> Tensor: - - name = self.make_unique_tensor_name(name) - tensor = Tensor(self, name, type_, shape, quantization, producers, consumers) - - if buffer is None: - try: - buffer = Buffer(self._model) - except AttributeError: - buffer = Buffer() - tensor.buffer = buffer - tensor.buffer.owners.append(tensor) - - self.tensors.append(tensor) - if isinput: - self.inputs.append(tensor) - if isoutput: - self.outputs.append(tensor) - return tensor - - def remove_tensor(self, tensor: Tensor) -> None: - """Removes the tensor from the subgraph and cuts all its connections. - - Note that the tensor will be left in an illegal state. - """ - assert tensor in self.tensors - self.tensors.remove(tensor) - self._remove_if_contained(self.inputs, tensor) - self._remove_if_contained(self.outputs, tensor) - for op in tensor.consumers: - self._remove_if_contained(op.inputs, tensor) - for op in tensor.producers: - self._remove_if_contained(op.outputs, tensor) - tensor.buffer.owners.remove(tensor) - - # TODO: fix this - # del tensor.consumers, tensor.producers, tensor.subgraph, tensor.buffer - del tensor.consumers, tensor.producers, tensor.buffer - - def generate_unique_op_name(self, operator_code: OperatorCode) -> str: - existing_names = [op.name for op in self.operators] - j = 0 - while True: - j, new_name = j + 1, f"{operator_code.name}_{j}" - if new_name not in existing_names: - return new_name - - def make_unique_tensor_name(self, candidate_name: str) -> str: - existing_names = [ - name - for tensor in self.tensors - for name in (tensor.name, tensor.sanitized_name) - ] - - j, new_name = 1, candidate_name - while True: - if new_name not in existing_names: - return new_name - j, new_name = j + 1, f"{candidate_name}_{j}" - - def create_operator( - self, - operator_code: OperatorCode, - *, - inputs: Optional[Iterable[Tensor]] = None, - outputs: Optional[Iterable[Tensor]] = None, - builtin_options: Optional[_OpOptionsType] = None, - custom_options: Optional[_OpOptionsType] = None, - ) -> Operator: - name = self.generate_unique_op_name(operator_code) - operator = Operator( - self, operator_code, name, inputs, outputs, builtin_options, custom_options - ) - self.operators.append(operator) - for input_tensor in operator.inputs: - input_tensor.consumers.append(operator) - for output_tensor in operator.outputs: - output_tensor.producers.append(operator) - return operator - - def remove_operator(self, op: Operator) -> None: - """Removes the operator from the subgraph and cuts all its connections. - - Note that the operator will be left in an illegal state. - """ - assert op in self.operators - self.operators.remove(op) - for t in op.inputs: - self._remove_if_contained(t.consumers, op) - for t in op.outputs: - self._remove_if_contained(t.producers, op) - del op.inputs, op.outputs, op.subgraph - - def insert_operator( - self, ref_op: Operator, new_op: Operator, after: bool = False - ) -> None: - """NOTE: this does not rewire inputs/outputs""" - # find location of reference op - try: - ref_idx = self.operators.index(ref_op) - except ValueError as e: - raise ValueError("Cannot find reference operator in the subgraph") from e - - # remove new_op from list if already in the subgraph - if new_op in self.operators: - self.operators.remove(new_op) - - # (re)insert new op before/after reference op - self.operators.insert(ref_idx + (1 if after else 0), new_op) - - def replace_operator(self, op: Operator, new_op: Operator) -> None: - """NOTE: this does not rewire inputs/outputs""" - # insert new op - try: - self.insert_operator(op, new_op) - except ValueError: - raise ValueError("Cannot find operator to replace in the subgraph") - # remove old op - self.remove_operator(op) - - def clone_tensor(self, tensor: Tensor) -> Tensor: - return self.create_tensor( - tensor.name, - tensor.type, - tensor.shape, - quantization=deepcopy(tensor.quantization), - buffer=Buffer(self.model, tensor.buffer.data), - ) - - def get_tensor(self, name: str) -> Tensor: - for t in self.tensors: - if t.name == name: - return t - raise ValueError(f"Tensor with name {name} not found!") - - def sanity_check(self) -> None: - assert self in self.model.subgraphs - # check for duplicates - assert len(self.inputs) == len(set(self.inputs)) - assert len(self.outputs) == len(set(self.outputs)) - assert len(self.operators) == len(set(self.operators)) - assert len(self.tensors) == len(set(self.tensors)) - # make sure inputs and outputs are not misplaced - for tensor in self.inputs + self.outputs: - assert tensor in self.tensors - # the subgraph is sane as long as all its objects are sane - for op in self.operators: - op.sanity_check() - for tensor in self.tensors: - tensor.sanity_check() diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor.py deleted file mode 100644 index ab4a03bd9..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import numpy as np -from typing import TYPE_CHECKING, Optional, Iterable, Union, List, Tuple, Any - -from . import ( - TensorType, - _SubgraphDependent, - Buffer, - Operator, - _OpOptionsType, -) - -if TYPE_CHECKING: - from . import Subgraph - -_ShapeInputType = Union[None, Iterable[Union[int, np.integer]], np.ndarray] - - -class Tensor(_SubgraphDependent): - name: str - buffer: Buffer - - def __init__( - self, - subgraph: "Subgraph", - name: str, - type_: TensorType, - shape: _ShapeInputType, - quantization: Optional[_OpOptionsType] = None, - producers: Optional[Iterable[Operator]] = None, - consumers: Optional[Iterable[Operator]] = None, - ) -> None: - # Generally, do not use this constructor to instantiate Tensor! - # Use Subgraph.create_tensor instead. - - super().__init__(name or "") - self._subgraph = subgraph # parent - assert isinstance(type_, TensorType) - self.type = type_ - self.shape: Tuple[int, ...] = shape # type: ignore # see https://github.com/python/mypy/issues/3004 - - self.quantization = quantization or {} - self.producers: List[Operator] = list(producers or []) - self.consumers: List[Operator] = list(consumers or []) - - @property - def shape(self) -> Tuple[int, ...]: - return self._shape - - @shape.setter - def shape(self, shape: _ShapeInputType) -> None: - if shape is None: - shape = [] - elif isinstance(shape, np.ndarray): - shape = shape.tolist() - else: - shape = list(shape) - - for j, s in enumerate(shape): - if not isinstance(s, (int, np.integer)): - raise TypeError( - "Tensor.shape must be an iterable of integers, " - f"got shape[{j}] = {s} with type {type(s)}" - ) - - self._shape = tuple(int(s) for s in shape) - - def is_equal(self, other: Any) -> bool: - return ( - super().is_equal(other) - # and self.name == other.name # intentionally not compared - and self.buffer.is_equal(other.buffer) - and self.type == other.type - and self.shape == other.shape - and self.quantization == other.quantization - and len(self.producers) == len(other.producers) # avoids circular deps - and len(self.consumers) == len(other.consumers) # avoids circular deps - ) - - def __str__(self) -> str: - return f"name={self.name}, type={self.type.name}, shape={self.shape}, buffer={self.buffer}" - - def sanity_check(self) -> None: - assert self in self.subgraph.tensors - assert self in self.buffer.owners - # check double links with consumers/producers - for op in self.producers: - assert self in op.outputs - for op in self.consumers: - assert self in op.inputs - - @property - def sanitized_name(self) -> str: - """Return a name that is safe to use in source code""" - return self.name.replace("/", "_") - - @property - def size(self) -> int: - return self.type.sizeof() * np.prod(self.shape) # type: ignore - - def as_array(self, dtype: Optional[type] = None) -> np.ndarray: - arr = np.copy( - np.frombuffer(self.buffer._data, dtype=self.type.to_numpy_dtype()) - ) - if dtype: - arr = arr.astype(dtype) - return arr.reshape(self.shape) - - @property - def is_constant(self) -> bool: - # There is an esoteric case where by a tensor without any producers could potentially be - # modified if it shares a buffer with a tensor from another subgraph. - # As such we also check if all owners of its buffer have no producers and are not inputs - return all( - not t.producers and t not in self.subgraph.inputs - for t in self.buffer.owners - ) diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.py deleted file mode 100644 index 6c92f1bc6..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -# type: ignore - -import numpy as np -from enum import Enum - -from . import schema_py_generated as schema - -TensorType = Enum( - "TensorType", - {k: v for k, v in vars(schema.TensorType).items() if not k.startswith("__")}, -) - -__TensorType_to_stdint_type = { - # TensorType.STRING: None, # intentionally not supported - TensorType.FLOAT64: "float64_t", - TensorType.FLOAT32: "float32_t", - TensorType.FLOAT16: "float16_t", - TensorType.COMPLEX64: "complex64_t", - TensorType.INT64: "int64_t", - TensorType.INT32: "int32_t", - TensorType.INT16: "int16_t", - TensorType.INT8: "int8_t", - TensorType.UINT8: "uint8_t", - TensorType.BOOL: "uint8_t", -} -TensorType.to_stdint_type = lambda self: __TensorType_to_stdint_type[self] - -__TensorType_sizeof = { - # TensorType.STRING: None, # intentionally not supported - TensorType.FLOAT64: 8, - TensorType.FLOAT32: 4, - TensorType.FLOAT16: 2, - TensorType.COMPLEX64: 8, - TensorType.INT64: 8, - TensorType.INT32: 4, - TensorType.INT16: 2, - TensorType.INT8: 1, - TensorType.UINT8: 1, - TensorType.BOOL: 1, -} -TensorType.sizeof = lambda self: __TensorType_sizeof[self] - -__TensorType_to_numpy_dtype = { - # TensorType.STRING: None, # intentionally not supported - TensorType.FLOAT64: np.dtype(np.float64), - TensorType.FLOAT32: np.dtype(np.float32), - TensorType.FLOAT16: np.dtype(np.float16), - TensorType.COMPLEX64: np.dtype(np.complex64), - TensorType.INT64: np.dtype(np.int64), - TensorType.INT32: np.dtype(np.int32), - TensorType.INT16: np.dtype(np.int16), - TensorType.INT8: np.dtype(np.int8), - TensorType.UINT8: np.dtype(np.uint8), - TensorType.BOOL: np.dtype(np.bool_), -} -TensorType.to_numpy_dtype = lambda self: __TensorType_to_numpy_dtype[self] - -__TensorType_from_numpy_dtype = { - np.dtype(np.float64): TensorType.FLOAT64, - np.dtype(np.float32): TensorType.FLOAT32, - np.dtype(np.float16): TensorType.FLOAT16, - np.dtype(np.complex64): TensorType.COMPLEX64, - np.dtype(np.int64): TensorType.INT64, - np.dtype(np.int32): TensorType.INT32, - np.dtype(np.int16): TensorType.INT16, - np.dtype(np.int8): TensorType.INT8, - np.dtype(np.uint8): TensorType.UINT8, - np.dtype(np.bool_): TensorType.BOOL, -} -TensorType.from_numpy_dtype = lambda x: __TensorType_from_numpy_dtype[np.dtype(x)] diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.pyi b/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.pyi deleted file mode 100644 index a384b6db5..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/tensor_type.pyi +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2020, XMOS Ltd, All rights reserved - -import enum -import numpy as np -from typing import Callable, Any - -class TensorType(enum.IntEnum): - STRING: TensorType - FLOAT64: TensorType - FLOAT32: TensorType - FLOAT16: TensorType - COMPLEX64: TensorType - INT64: TensorType - INT32: TensorType - INT16: TensorType - INT8: TensorType - UINT8: TensorType - BOOL: TensorType - @classmethod - def __call__(cls, x: Any) -> TensorType: ... - def to_stdint_type(self) -> str: ... - def sizeof(self) -> int: ... - def to_numpy_dtype(self) -> np.dtype: ... - from_numpy_dtype: Callable[[np.dtype], TensorType] diff --git a/archived/tflite2xcore/tflite2xcore/xcore_schema/xcore_model.py b/archived/tflite2xcore/tflite2xcore/xcore_schema/xcore_model.py deleted file mode 100644 index a59b03168..000000000 --- a/archived/tflite2xcore/tflite2xcore/xcore_schema/xcore_model.py +++ /dev/null @@ -1,463 +0,0 @@ -# Copyright 2019-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -import pathlib -import flatbuffers -import logging -from typing import ( - Dict, - Any, - Union, - Optional, - Iterable, - List, - Counter, - TypeVar, - Type, - overload, - cast, - MutableSequence, -) - -from . import ( - _IRObject, - _ModelDependent, - _DataContainer, - OperatorCode, - Buffer, - Subgraph, - Metadata, - BuiltinOpCodes, - XCOREOpCodes, - ExternalOpCodes, - ValidOpCodes, - TensorType, - schema_py_generated as schema, -) -from .dict_conversion import ( - builtin_options_to_dict, - dict_to_builtin_options, - quantization_to_dict, - dict_to_quantization, - create_dict_from_model, -) -from .flexbuffers import FlexbufferBuilder, FlexbufferParser -from .builtin_options import BuiltinOptions - -from tflite2xcore.utils import asserting_cast -from tflite2xcore.execution_planning import ReverseDepthFirstPlanner - -_R = TypeVar("_R", bound="XCOREModel") - -_T = TypeVar("_T", bound="_ModelDependent") - - -class _ModelDependentContainer(MutableSequence[_T]): - def __init__(self, model: "XCOREModel", objects: Optional[Iterable[_T]] = None): - self._model = model - self._objects: List[_T] = [] - if objects: - self.extend(objects) # pylint: disable=no-member - - @overload - def __getitem__(self, key: int) -> _T: - ... - - @overload - def __getitem__(self, key: slice) -> "_ModelDependentContainer[_T]": - ... - - def __getitem__( - self, key: Union[int, slice] - ) -> Union[_T, "_ModelDependentContainer[_T]"]: - if isinstance(key, int): - return self._objects[key] - - return _ModelDependentContainer(self._model, self._objects[key]) - - def __delitem__(self, key: Union[int, slice]) -> None: - objects = [self[key]] if isinstance(key, int) else self[key] - for obj in objects: - del obj._model - del self._objects[key] - - @overload - def __setitem__(self, key: int, obj: _T) -> None: - ... - - @overload - def __setitem__(self, key: slice, objects: Iterable[_T]) -> None: - ... - - def __setitem__( - self, key: Union[int, slice], objects: Union[_T, Iterable[_T]] - ) -> None: - # NOTE: mypy cannot correctly infer the type of objects given the type of key - # so the casts below are to suppress the resulting mypy errors - - if isinstance(key, int): - del self._objects[key]._model - obj = cast(_T, objects) - self._objects[key] = obj - obj._model = self._model - return - - # NOTE: since key must be a slice now, there is guarantee that - # self._objects[key] has the same length as objects - - old_objects = self._objects[key] - for old_obj in old_objects: - del old_obj._model - - # objects can be an iterator, so we need to set _model on the fly - def set_model(obj: _T) -> _T: - obj._model = self._model - return obj - - self._objects[key] = (set_model(obj) for obj in cast(Iterable[_T], objects)) - - def insert(self, idx: int, obj: _T) -> None: - self._objects.insert(idx, obj) - obj._model = self._model - - def __len__(self) -> int: - return len(self._objects) - - -class XCOREModel(_IRObject): - def __init__( - self, - version: Optional[int] = None, - description: Optional[str] = None, - subgraphs: Optional[Iterable[Subgraph]] = None, - buffers: Optional[Iterable[Buffer]] = None, - metadata: Optional[Iterable[Metadata]] = None, - ) -> None: - super().__init__() - self.version = version or 3 - self.description = description or "" - self.buffers = _ModelDependentContainer[Buffer](self, buffers) - self.metadata = _ModelDependentContainer[Metadata](self, metadata) - self.subgraphs = _ModelDependentContainer[Subgraph](self, subgraphs) - - def register_dependent(self, dependent: _ModelDependent) -> None: - if isinstance(dependent, Buffer): - self.buffers.append(dependent) # pylint: disable=no-member - elif isinstance(dependent, Metadata): - self.metadata.append(dependent) # pylint: disable=no-member - elif isinstance(dependent, Subgraph): - self.subgraphs.append(dependent) # pylint: disable=no-member - else: - raise TypeError(f"Unsupported model dependent with type {type(dependent)}") - - def is_equal(self, other: Any) -> bool: - return ( - super().is_equal(other) - and self.version == other.version - # and self.description == other.description # intentionally not compared - and self.sequence_equal(self.buffers, other.buffers) - and self.sequence_equal(self.subgraphs, other.subgraphs) - and self.sequence_equal(self.metadata, other.metadata) - ) - - def count_operator_codes(self) -> Counter[OperatorCode]: - return Counter( - operator.operator_code - for subgraph in self.subgraphs - for operator in subgraph.operators - ) - - @property - def operator_codes(self) -> List[OperatorCode]: - # sort the operators codes from most frequent to least frequent - # why? because the flatbuffer is a tiny bit smaller if we do - return [op_code for op_code, _ in self.count_operator_codes().most_common()] - - def sanity_check(self) -> None: - # check for duplicates - assert len(self.subgraphs) == len(set(self.subgraphs)) - assert len(self.buffers) == len(set(self.buffers)) - assert len(self.metadata) == len(set(self.metadata)) - # the model is sane as long as all its subgraphs are sane - for subgraph in self.subgraphs: - subgraph.sanity_check() - for buffer in self.buffers: - buffer.sanity_check() - for metadata in self.metadata: - metadata.sanity_check() - - @classmethod - def _from_flatbuffer_model(cls: Type[_R], modelT: schema.ModelT) -> _R: - model = cls( - version=modelT.version, - description=modelT.description.decode("utf-8") # type: ignore - if modelT.description - else None, - ) - - # load metadata - metadata_map = { - metadataT.buffer: Metadata( - name=metadataT.name.decode("utf-8") # type: ignore - if metadataT.name - else None, - model=model, - data=modelT.buffers[metadataT.buffer].data, - ) - for metadataT in modelT.metadata or [] - } - - # check that buffer 0 is empty - # NOTE: BufferT.data can be np.ndarray - buffer_0_data = modelT.buffers[0].data - if buffer_0_data is not None and len(buffer_0_data) > 0: - logging.warning("Non-empty buffer 0 in flatbuffer!") - - # create all non-metadata buffers - buffer_map = { - idx: Buffer(model, bufferT.data) - for idx, bufferT in enumerate(modelT.buffers) - if idx not in metadata_map - } - - # create operator codes lookup - operator_codes_lut = [] - for operator_codeT in modelT.operatorCodes: - opcode: ValidOpCodes = BuiltinOpCodes(operator_codeT.builtinCode) - if opcode is BuiltinOpCodes.CUSTOM: - custom_code = operator_codeT.customCode.decode("utf-8") # type: ignore - try: - opcode = XCOREOpCodes(custom_code) - except ValueError: - opcode = ExternalOpCodes.add_new_opcode(custom_code) - operator_codes_lut.append( - OperatorCode(opcode, version=operator_codeT.version) - ) - - # load subgraphs - for subgraph_index, subgraphT in enumerate(modelT.subgraphs): - subgraph = Subgraph( - name=subgraphT.name.decode("utf-8") # type: ignore - if subgraphT.name - else None, - model=model, - ) - - # load tensors - tensors = [] - for tensor_index, tensorT in enumerate(subgraphT.tensors): - is_input = tensor_index in subgraphT.inputs - is_output = tensor_index in subgraphT.outputs - - # load quantization - quantization = None - if hasattr(tensorT, "quantization") and tensorT.quantization: - quantization = quantization_to_dict(tensorT.quantization) - - if tensorT.buffer in metadata_map: - # a tensor is referencing a metadata buffer - # this shouldn't happen, but we can work around it - metadata = metadata_map[tensorT.buffer] - logging.warning( - f"Tensor {tensor_index} referencing " - f'metadata "{metadata.name}" with buffer {tensorT.buffer}' - ) - buffer_map[tensorT.buffer] = Buffer(model, metadata.data) - - tensor = subgraph.create_tensor( - name=tensorT.name.decode("utf-8"), # type: ignore - type_=TensorType(tensorT.type), - shape=tensorT.shape, - buffer=buffer_map[tensorT.buffer], - quantization=quantization, - isinput=is_input, - isoutput=is_output, - ) - tensors.append(tensor) - - # load operators & set inputs/outputs (registers op as tensor consumer/producer) - for operator_index, operatorT in enumerate(subgraphT.operators): - options = {} - if ( - hasattr(operatorT, "builtinOptions") - and operatorT.builtinOptions is not None - ): - options["builtin_options"] = builtin_options_to_dict( - operatorT.builtinOptions - ) - if ( - hasattr(operatorT, "customOptions") - and operatorT.customOptions is not None - ): - options["custom_options"] = FlexbufferParser().parse( - bytes(operatorT.customOptions) - ) - - def is_valid_tensor_index( - idx: int, lower: int = -1, upper: int = len(tensors) - ) -> bool: - if idx < lower or idx >= upper: - raise ValueError( - f"Invalid input tensor index [{idx}]: " - f"subgraph [{subgraph_index}], " - f"operator [{operator_index}], " - f"bounds: [{lower}, {upper}]" - ) - - return idx != -1 # -1 encodes optional for input indices - - subgraph.create_operator( - operator_code=operator_codes_lut[operatorT.opcodeIndex], - inputs=[ - tensors[input_index] - for input_index in operatorT.inputs - if is_valid_tensor_index(input_index) - ], - outputs=[ - tensors[output_index] - for output_index in operatorT.outputs - if is_valid_tensor_index(output_index, lower=0) - ], - **options, - ) - - model.sanity_check() - return model - - @classmethod - def deserialize(cls: Type[_R], bits: bytes) -> _R: - model_obj = schema.Model.GetRootAsModel(bits, 0) # type: ignore - modelT = schema.ModelT.InitFromObj(model_obj) # type: ignore - return cls._from_flatbuffer_model(modelT) - - @classmethod - def read_flatbuffer(cls: Type[_R], filename: Union[pathlib.Path, str]) -> _R: - with open(pathlib.Path(filename).resolve(), "rb") as fd: - bits = bytes(fd.read()) - - return cls.deserialize(bits) - - def _to_flatbuffer_model(self) -> schema.ModelT: - modelT = schema.ModelT() # type: ignore - modelT.version = self.version - modelT.description = self.description - - modelT.buffers = [] - - def create_buffer_from_container(data_container: _DataContainer) -> int: - """ returns the index of the serialized bufferT object""" - bufferT = schema.BufferT() # type: ignore - if len(data_container.data) > 0: - bufferT.data = list(data_container.data) - modelT.buffers.append(bufferT) - return len(modelT.buffers) - 1 - - # check that buffer 0 is empty - if self.buffers[0]: - logging.warning("Non-empty buffer 0 in model!") - - # create tensor buffers - buffer_idx_map: Dict[Buffer, int] = {} - for buffer in self.buffers: - buffer_idx_map[buffer] = create_buffer_from_container(buffer) - - # create metadata and their buffers - modelT.metadata = [] - for metadata in self.metadata: - metadataT = schema.MetadataT() # type: ignore - metadataT.name = metadata.name - metadataT.buffer = create_buffer_from_container(metadata) - modelT.metadata.append(metadataT) - - # create operator_codes - modelT.operatorCodes = [] - for operator_code in self.operator_codes: - operatorCodeT = schema.OperatorCodeT() # type: ignore - if operator_code.code in BuiltinOpCodes: - operatorCodeT.builtinCode = asserting_cast(int, operator_code.value) - else: - operatorCodeT.builtinCode = BuiltinOpCodes.CUSTOM.value - operatorCodeT.customCode = operator_code.name - operatorCodeT.version = operator_code.version - modelT.operatorCodes.append(operatorCodeT) - - # create subgraphs - modelT.subgraphs = [] - for subgraph in self.subgraphs: - subgraphT = schema.SubGraphT() # type: ignore - subgraphT.name = subgraph.name - - # set inputs and outputs - subgraphT.inputs = [subgraph.tensors.index(t) for t in subgraph.inputs] - subgraphT.outputs = [subgraph.tensors.index(t) for t in subgraph.outputs] - - # set tensors - subgraphT.tensors = [] - for tensor in subgraph.tensors: - tensorT = schema.TensorT() # type: ignore - tensorT.name = tensor.name - tensorT.shape = list(tensor.shape) - tensorT.buffer = buffer_idx_map[tensor.buffer] - tensorT.type = tensor.type.value - if tensor.quantization: - tensorT.quantization = dict_to_quantization(tensor.quantization) - subgraphT.tensors.append(tensorT) - - # set operators - subgraphT.operators = [] - planner = ReverseDepthFirstPlanner(subgraph) - for operator in planner.make_plan(): - operatorT = schema.OperatorT() # type: ignore - op_code = operator.operator_code - operatorT.opcodeIndex = self.operator_codes.index(op_code) - - operatorT.inputs = [subgraph.tensors.index(t) for t in operator.inputs] - operatorT.outputs = [ - subgraph.tensors.index(t) for t in operator.outputs - ] - - # TODO: fix this hack - # we need a better data structure to represent inputs/outputs of operators - if op_code.code is ExternalOpCodes.LceBconv2d: - if len(operatorT.inputs) == 3: - # bitpacked output - operatorT.inputs = ( - operatorT.inputs[:2] + [-1, -1] + operatorT.inputs[-1:] - ) - else: - # int8 output - operatorT.inputs = operatorT.inputs + [-1] - - if op_code.code in BuiltinOpCodes: - builtin_options_type = BuiltinOptions.from_BuiltinOpCodes( - op_code.code - ) - operatorT.builtinOptionsType = builtin_options_type.value - - if operator.builtin_options: - operatorT.builtinOptions = dict_to_builtin_options( - builtin_options_type, operator.builtin_options - ) - - if operator.custom_options: - fbb = FlexbufferBuilder(operator.custom_options) - operatorT.customOptions = fbb.get_bytes() - subgraphT.operators.append(operatorT) - - modelT.subgraphs.append(subgraphT) - - return modelT - - def serialize(self) -> bytes: - modelT = self._to_flatbuffer_model() - builder = flatbuffers.Builder(1024 * 1024) - model_offset = modelT.Pack(builder) # type: ignore - builder.Finish(model_offset, file_identifier=b"TFL3") - return bytes(builder.Output()) - - def write_flatbuffer(self, filename: Union[pathlib.Path, str]) -> int: - with open(pathlib.Path(filename).resolve(), "wb") as fd: - return fd.write(self.serialize()) - - def to_dict(self, *args: Any, **kwargs: Any) -> Dict[Any, Any]: - return create_dict_from_model(self, *args, **kwargs) diff --git a/archived/tflite2xcore/xformer.py b/archived/tflite2xcore/xformer.py deleted file mode 100755 index b4ee28af4..000000000 --- a/archived/tflite2xcore/xformer.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python -O -# Copyright 2020-2021 XMOS LIMITED. -# This Software is subject to the terms of the XMOS Public Licence: Version 1. - -from pathlib import Path - -from tflite2xcore import utils, analyze, version -import tflite2xcore.converter as xcore_conv - -if __name__ == "__main__": - parser = utils.VerbosityParser() - parser.add_argument("tflite_input", help="Input .tflite file.") - parser.add_argument("tflite_output", help="Output .tflite file.") - parser.add_argument( - "--minify", - action="store_true", - default=False, - help="Make the model smaller at the expense of readability.", - ) - parser.add_argument( - "-par", - "--num_threads", - type=int, - default=1, - help="Number of parallel threads for xcore.ai optimization.", - ) - parser.add_argument( - "--intermediates_path", - default=None, - help="Path to directory for storing intermediate models. If not given " - "intermediate models will not be saved. If path doesn't exists, " - "it will be created. Contents may be overwritten.", - ) - parser.add_argument( - "--analyze", - action="store_true", - default=False, - help="Analyze the output model. " - "A report is printed showing the runtime memory footprint of the model.", - ) - parser.add_argument( - "--remove_input_alignment_pad", - action="store_true", - default=False, - help="Remove channel-wise padding on the input tensor(s). " - "The new input tensor will have the padded size, " - "so the padding should be implemented by the application developer.", - ) - parser.add_argument( - "--version", - action="version", - version=version.get_version(), - help="Display the version of the xformer", - ) - parser.add_argument( - "--ext_mem", - action="store_true", - default=False, - help="Experimental flag for better external memory support.", - ) - parser.add_argument( - "--experimental-xformer2", - action="store_true", - default=False, - help="Use MLIR-based xformer 2.0 for part of the optimization pipeline. Experimental.", - ) - parser.add_argument( - "--only-experimental-xformer2", - action="store_true", - default=False, - help="Use only MLIR-based xformer 2.0. Experimental.", - ) - - args = parser.parse_args() - - utils.set_verbosity(args.verbose) - - tflite_input_path = Path(args.tflite_input) - tflite_output_path = Path(args.tflite_output) - - xcore_conv.convert( - tflite_input_path, - tflite_output_path, - minification=args.minify, - num_threads=args.num_threads, - intermediates_path=args.intermediates_path, - remove_input_alignment_pad=args.remove_input_alignment_pad, - external_memory=args.ext_mem, - experimental_xformer2=args.experimental_xformer2, - only_experimental_xformer2=args.only_experimental_xformer2 - ) - - print(f"Conversion successful, output: {tflite_output_path}") - - if args.analyze: - analyze.print_report(tflite_output_path) diff --git a/archived/utils/lib_flexbuffers/CMakeLists.txt b/archived/utils/lib_flexbuffers/CMakeLists.txt deleted file mode 100644 index 7a80cdbc6..000000000 --- a/archived/utils/lib_flexbuffers/CMakeLists.txt +++ /dev/null @@ -1,50 +0,0 @@ -cmake_minimum_required(VERSION 3.14) - -project(libflexbuffers VERSION 1.0.1) - -# This variable is ignored on platforms other than Apple -set(CMAKE_OSX_SYSROOT /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk) - -#********************** -# Paths -#********************** - -set(AI_TOOLS_ROOT_DIR "${CMAKE_CURRENT_LIST_DIR}/../..") - -#********************** -# Build flags -#********************** -set(CMAKE_CXX_STANDARD 11) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_FLAGS "-g -O0") -set(CMAKE_C_FLAGS "-std=c99 -g -O0") - -#********************** -# Build shared library -#********************** - -add_library(libflexbuffers SHARED) -set_target_properties(libflexbuffers PROPERTIES VERSION ${PROJECT_VERSION}) -set_target_properties(libflexbuffers PROPERTIES PREFIX "") -target_link_libraries(libflexbuffers stdc++ m) - - -target_sources(libflexbuffers - PRIVATE "${AI_TOOLS_ROOT_DIR}/third_party/flatbuffers/src/util.cpp" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src/flexbuffer_helpers.cpp" -) - -target_include_directories(libflexbuffers - PRIVATE "${AI_TOOLS_ROOT_DIR}/third_party/flatbuffers/include" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src" -) - -if (${CMAKE_SYSTEM_NAME} MATCHES "Linux") - set(INSTALL_DIR "${PROJECT_SOURCE_DIR}/../../tflite2xcore/tflite2xcore/libs/linux") -elseif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - set(INSTALL_DIR "${PROJECT_SOURCE_DIR}/../../tflite2xcore/tflite2xcore/libs/macos") -else() # must be Windows - set(INSTALL_DIR "${PROJECT_SOURCE_DIR}/../../tflite2xcore/tflite2xcore/libs/windows") -endif() - -install(TARGETS libflexbuffers DESTINATION ${INSTALL_DIR}) diff --git a/archived/utils/lib_flexbuffers/README.md b/archived/utils/lib_flexbuffers/README.md deleted file mode 100644 index 4852b5a6d..000000000 --- a/archived/utils/lib_flexbuffers/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# XCORE Flexbuffer Python Bindings - -This project contains Python bindings for a subset of the Flexbuffer classes. This subset is used to serialize model custom options using the Flexbuffer format. - -## Building - -Install CMake version 3.14 or newer (https://cmake.org/download/). - -Modify your path to include the CMake binaries. Run the CMake application and -click Tools...How to Install For Command Line Use. - -Make a directory for the build - - > mkdir build - > cd build - -Run cmake - - > cmake ../ - > make - -To install - - > make install diff --git a/archived/utils/lib_flexbuffers/build.sh b/archived/utils/lib_flexbuffers/build.sh deleted file mode 100755 index ab23399b7..000000000 --- a/archived/utils/lib_flexbuffers/build.sh +++ /dev/null @@ -1,11 +0,0 @@ -set -e - -echo "****************************" -echo "* Building lib_flexbuffers" -echo "****************************" - -mkdir -p build -cd build -cmake ../ -make install -cd .. diff --git a/archived/utils/lib_flexbuffers/src/flexbuffer_helpers.cpp b/archived/utils/lib_flexbuffers/src/flexbuffer_helpers.cpp deleted file mode 100644 index 83157063a..000000000 --- a/archived/utils/lib_flexbuffers/src/flexbuffer_helpers.cpp +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2020-2021 XMOS LIMITED. -// This Software is subject to the terms of the XMOS Public Licence: Version 1. -#include -#include - -#include "flatbuffers/flexbuffers.h" - -extern "C" { -// ************************ -// flexbuffers::Builder API -// see -// https://github.com/google/flatbuffers/blob/master/include/flatbuffers/flexbuffers.h -// see: https://google.github.io/flatbuffers/flexbuffers.html -// ************************ - -flexbuffers::Builder *new_builder() { return new flexbuffers::Builder(); } - -size_t builder_start_map(flexbuffers::Builder *fbb, const char *key = nullptr) { - if (key) - return fbb->StartMap(key); - else - return fbb->StartMap(); -} - -size_t builder_end_map(flexbuffers::Builder *fbb, size_t size) { - return fbb->EndMap(size); -} - -size_t builder_start_vector(flexbuffers::Builder *fbb, - const char *key = nullptr) { - if (key) - return fbb->StartVector(key); - else - return fbb->StartVector(); -} - -size_t builder_end_vector(flexbuffers::Builder *fbb, size_t size, bool typed, - bool fixed) { - return fbb->EndVector(size, typed, fixed); -} - -void builder_clear(flexbuffers::Builder *fbb) { fbb->Clear(); } - -void builder_int(flexbuffers::Builder *fbb, const char *key, int64_t val) { - fbb->Int(key, val); -} - -void builder_vector_int(flexbuffers::Builder *fbb, int64_t val) { - fbb->Int(val); -} - -void builder_uint(flexbuffers::Builder *fbb, const char *key, uint64_t val) { - fbb->UInt(key, val); -} - -void builder_vector_uint(flexbuffers::Builder *fbb, uint64_t val) { - fbb->UInt(val); -} - -void builder_bool(flexbuffers::Builder *fbb, const char *key, bool val) { - fbb->Bool(key, val); -} - -void builder_vector_bool(flexbuffers::Builder *fbb, bool val) { - fbb->Bool(val); -} - -void builder_float(flexbuffers::Builder *fbb, const char *key, float val) { - fbb->Float(key, val); -} - -void builder_vector_float(flexbuffers::Builder *fbb, float val) { - fbb->Float(val); -} - -void builder_string(flexbuffers::Builder *fbb, const char *key, - const char *val) { - fbb->String(key, val); -} - -void builder_vector_string(flexbuffers::Builder *fbb, const char *val) { - fbb->String(val); -} - -void builder_finish(flexbuffers::Builder *fbb) { fbb->Finish(); } - -size_t builder_get_buffer(flexbuffers::Builder *fbb, char *buf) { - std::vector bytes = fbb->GetBuffer(); - - for (std::size_t i = 0; i < bytes.size(); ++i) { - buf[i] = bytes[i]; - } - - return bytes.size(); -} - -size_t parse_flexbuffer(const uint8_t *fb, size_t size, char *buf) { - std::vector bytes(fb, fb + size); - std::string json; - - flexbuffers::GetRoot(bytes).ToString(true, true, json); - strncpy(buf, json.c_str(), json.length()); - - return json.length(); -} - -} // extern "C" From e7998f7bb8318c887152ad1166331502fa54f17a Mon Sep 17 00:00:00 2001 From: Michael Poluektov Date: Wed, 16 Aug 2023 17:11:06 +0000 Subject: [PATCH 04/15] fix relative paths --- xformer/WORKSPACE | 8 ++++---- xformer/version_check.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/xformer/WORKSPACE b/xformer/WORKSPACE index 80ff32c9e..b0844347b 100644 --- a/xformer/WORKSPACE +++ b/xformer/WORKSPACE @@ -73,28 +73,28 @@ tf_workspace0() ################################# lib_nn ####################################### new_local_repository( name = "lib_nn", - path = "../../third_party/lib_nn", + path = "../third_party/lib_nn", build_file = "lib_nn.BUILD", ) ############################# lib_tflite_micro ################################# new_local_repository( name = "lib_tflite_micro", - path = "../../third_party/lib_tflite_micro", + path = "../third_party/lib_tflite_micro", build_file = "lib_tflite_micro.BUILD", ) ########################### tflite_micro_compiler ############################## new_local_repository( name = "lib_tflmc", - path = "../../third_party/lib_tflite_micro/tflite_micro_compiler", + path = "../third_party/lib_tflite_micro/tflite_micro_compiler", build_file = "lib_tflmc.BUILD", ) ############################## tflite_micro #################################### local_repository( name = "tflite_micro", - path = "../../third_party/lib_tflite_micro/lib_tflite_micro/submodules/tflite-micro", + path = "../third_party/lib_tflite_micro/lib_tflite_micro/submodules/tflite-micro", ) ########################### larq compute engine ################################ diff --git a/xformer/version_check.sh b/xformer/version_check.sh index b7ddfd086..74952acd1 100755 --- a/xformer/version_check.sh +++ b/xformer/version_check.sh @@ -6,7 +6,7 @@ if [ "$1" == "BAZEL_BUILD" ] then LIB_TFLITE_MICRO_DIR="../../external/lib_tflite_micro/lib_tflite_micro" else - LIB_TFLITE_MICRO_DIR="../../third_party/lib_tflite_micro/lib_tflite_micro" + LIB_TFLITE_MICRO_DIR="../third_party/lib_tflite_micro/lib_tflite_micro" fi cd $LIB_TFLITE_MICRO_DIR From 5a221ada87af390abcae11c5e9baa94f1a346a2d Mon Sep 17 00:00:00 2001 From: Michael Poluektov Date: Wed, 16 Aug 2023 18:54:03 +0000 Subject: [PATCH 05/15] remove artifact --- xformer/external | 1 - 1 file changed, 1 deletion(-) delete mode 120000 xformer/external diff --git a/xformer/external b/xformer/external deleted file mode 120000 index 3ee3af63e..000000000 --- a/xformer/external +++ /dev/null @@ -1 +0,0 @@ -bazel-out/../../../external \ No newline at end of file From 11abe60ae6d88977398dc04454ada2818d520403 Mon Sep 17 00:00:00 2001 From: Michael Poluektov Date: Wed, 16 Aug 2023 14:15:51 +0000 Subject: [PATCH 06/15] black, __version__ setup --- python/xmos_ai_tools/__init__.py | 7 +++ python/xmos_ai_tools/xformer/__init__.py | 11 +++- python/xmos_ai_tools/xformer/flash.py | 55 +++++++++++++++---- .../xinterpreters/base/base_interpreter.py | 28 ++++++---- .../device/device_interpreter.py | 51 ++++++++++++----- .../xinterpreters/host/host_interpreter.py | 30 +++++++--- 6 files changed, 136 insertions(+), 46 deletions(-) diff --git a/python/xmos_ai_tools/__init__.py b/python/xmos_ai_tools/__init__.py index e69de29bb..4b6c40f8f 100644 --- a/python/xmos_ai_tools/__init__.py +++ b/python/xmos_ai_tools/__init__.py @@ -0,0 +1,7 @@ +from importlib.metadata import version, PackageNotFoundError + +try: + __version__ = version(__name__) +except PackageNotFoundError: + # package is not installed + pass diff --git a/python/xmos_ai_tools/xformer/__init__.py b/python/xmos_ai_tools/xformer/__init__.py index e44ebfdd4..ff0137ec0 100644 --- a/python/xmos_ai_tools/xformer/__init__.py +++ b/python/xmos_ai_tools/xformer/__init__.py @@ -8,6 +8,7 @@ __compilation_output = "" __arena_size = 0 + def convert( filename: Union[str, Path], outfile: Union[str, Path], @@ -29,22 +30,28 @@ def convert( args.append(str(filename)) process_call: subprocess.CompletedProcess = subprocess.run( - [arg for arg in args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True + [arg for arg in args], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + check=True, ) global __compilation_output, __arena_size __compilation_output = process_call.stdout.decode("utf-8") - size_str = re.sub('((.|\n|\r)*)Tensor arena size :', '', __compilation_output) + size_str = re.sub("((.|\n|\r)*)Tensor arena size :", "", __compilation_output) __arena_size = int(size_str.strip()) return process_call.returncode + def tensor_arena_size() -> int: return __arena_size + def print_optimization_report(): print(__compilation_output) + def print_help(show_hidden: Optional[bool] = False) -> int: if show_hidden: return subprocess.run(["xcore-opt", "--help-list-hidden"]).returncode diff --git a/python/xmos_ai_tools/xformer/flash.py b/python/xmos_ai_tools/xformer/flash.py index a11664d16..b33434296 100644 --- a/python/xmos_ai_tools/xformer/flash.py +++ b/python/xmos_ai_tools/xformer/flash.py @@ -72,7 +72,7 @@ def tobytes(integr): @staticmethod def swap_nibbles(x): - return ( (x & 0x0F)<<4 | (x & 0xF0)>>4 ) + return (x & 0x0F) << 4 | (x & 0xF0) >> 4 def add_params(self, engine, params=None, filename=None): image = FlashBuilder.create_params_image(params, filename) @@ -89,7 +89,11 @@ def flash_image(self): The whole thing should be written as is to flash """ headers = [None] * self.engines - start = BYTES_FOR_MAGIC_PATTERN + BYTES_FOR_VERSION + BYTES_PER_ENGINE_HEADER * self.engines + start = ( + BYTES_FOR_MAGIC_PATTERN + + BYTES_FOR_VERSION + + BYTES_PER_ENGINE_HEADER * self.engines + ) for i in range(self.engines): headers[i] = FlashBuilder.Header( len(self.models[i]), @@ -103,14 +107,41 @@ def flash_image(self): # We add the magic fast flash pattern of 32 bytes at the very beginning # After that comes the version output = bytes( - [0xff, 0x00, 0x0f, 0x0f, - 0x0f, 0x0f, 0x0f, 0x0f, - 0xff, 0x00, 0xff, 0x00, - 0xff, 0x00, 0xff, 0x00, - 0x31, 0xf7, 0xce, 0x08, - 0x31, 0xf7, 0xce, 0x08, - 0x9c, 0x63, 0x9c, 0x63, - 0x9c, 0x63, 0x9c, 0x63]) + [ + 0xFF, + 0x00, + 0x0F, + 0x0F, + 0x0F, + 0x0F, + 0x0F, + 0x0F, + 0xFF, + 0x00, + 0xFF, + 0x00, + 0xFF, + 0x00, + 0xFF, + 0x00, + 0x31, + 0xF7, + 0xCE, + 0x08, + 0x31, + 0xF7, + 0xCE, + 0x08, + 0x9C, + 0x63, + 0x9C, + 0x63, + 0x9C, + 0x63, + 0x9C, + 0x63, + ] + ) output += bytes( [VERSION_MAJOR, VERSION_MINOR, 0xFF ^ VERSION_MAJOR, 0xFF ^ VERSION_MINOR] @@ -148,7 +179,9 @@ def flash_file(self, filename): def generate_flash(*, output_file, model_files, param_files): - assert(len(model_files)==len(param_files)), "The number of provided model files must match the number of param files!" + assert len(model_files) == len( + param_files + ), "The number of provided model files must match the number of param files!" num_of_engines = len(model_files) fb = FlashBuilder(engines=num_of_engines) for i in range(num_of_engines): diff --git a/python/xmos_ai_tools/xinterpreters/base/base_interpreter.py b/python/xmos_ai_tools/xinterpreters/base/base_interpreter.py index bff9267f8..c45fbea7c 100644 --- a/python/xmos_ai_tools/xinterpreters/base/base_interpreter.py +++ b/python/xmos_ai_tools/xinterpreters/base/base_interpreter.py @@ -10,10 +10,12 @@ from tflite.TensorType import TensorType import numpy as np + class XTFLMInterpreterStatus(Enum): OK = 0 ERROR = 1 + class xcore_tflm_base_interpreter(ABC): """! The xcore interpreters base class. Defines a common base interface to be used by the host and device interpreters. @@ -26,7 +28,7 @@ def __init__(self) -> None: self.models: List[xcore_tflm_base_interpreter.modelData] = [] @abstractmethod - def initialise_interpreter(self, model_index: int=0) -> None: + def initialise_interpreter(self, model_index: int = 0) -> None: """! Abstract initialising interpreter with model associated with model_index. @param model_index The engine to target, for interpreters that support multiple models running concurrently. Defaults to 0 for use with a single model. @@ -44,7 +46,9 @@ def set_tensor(self, tensor_index: int, value, model_index: int = 0) -> None: raise NotImplementedError @abstractmethod - def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None) -> ndarray: + def get_tensor( + self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None + ) -> ndarray: """! Abstract method for reading data from the output tensor of a model. @param tensor_index The index of output tensor to target. @param model_index The model to target, for interpreters that support multiple models @@ -55,7 +59,9 @@ def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarra raise NotImplementedError @abstractmethod - def get_input_tensor(self, input_index: int = 0, model_index: int = 0) -> Union[ndarray, List[Union[int, Tuple[float]]]]: + def get_input_tensor( + self, input_index: int = 0, model_index: int = 0 + ) -> Union[ndarray, List[Union[int, Tuple[float]]]]: """! Abstract for reading the data in the input tensor of a model. @param input_index The index of input tensor to target. @param model_index The engine to target, for interpreters that support multiple models @@ -218,7 +224,6 @@ def get_input_details(self, model_index: int = 0) -> List[Dict[str, Any]]: inputsList = [] for input_ in range(0, modelBuf.Subgraphs(0).InputsLength()): - tensorIndex = modelBuf.Subgraphs(0).Inputs(input_) # Generate dictioary of tensor details @@ -227,7 +232,9 @@ def get_input_details(self, model_index: int = 0) -> List[Dict[str, Any]]: dtype = np.int8 elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32: dtype = np.int32 - elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32: + elif ( + modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32 + ): dtype = np.float32 else: raise Exception @@ -271,9 +278,7 @@ def get_input_details(self, model_index: int = 0) -> List[Dict[str, Any]]: return inputsList - def get_output_details( - self, model_index: int = 0 - ) -> List[Dict[str, Any]]: + def get_output_details(self, model_index: int = 0) -> List[Dict[str, Any]]: """! Reads the output tensor details from the model. @param output_index The index of output tensor to target. @param model_index The model to target, for interpreters that support multiple models @@ -288,7 +293,6 @@ def get_output_details( outputsList = [] for output_ in range(0, modelBuf.Subgraphs(0).OutputsLength()): - # Output tensor is last tensor tensorIndex = modelBuf.Subgraphs(0).Outputs(output_) @@ -298,7 +302,9 @@ def get_output_details( dtype = np.int8 elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32: dtype = np.int32 - elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32: + elif ( + modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32 + ): dtype = np.float32 details = { @@ -466,7 +472,7 @@ def pathToContent(self) -> None: if self.params_content is None and self.params_path is not None: with open(self.params_path, "rb") as input_fd2: self.params_content = input_fd2.read() - + # If params_content is None, set to empty byte array if self.params_content is None: self.params_content = bytes([]) diff --git a/python/xmos_ai_tools/xinterpreters/device/device_interpreter.py b/python/xmos_ai_tools/xinterpreters/device/device_interpreter.py index 5608599c8..d13ada64b 100644 --- a/python/xmos_ai_tools/xinterpreters/device/device_interpreter.py +++ b/python/xmos_ai_tools/xinterpreters/device/device_interpreter.py @@ -93,8 +93,11 @@ def set_tensor(self, tensor_index, value: ndarray, model_index=0) -> None: count: Optional[int] tensor_details: Optional[Dict[str, Any]] count, tensor_details = next( - filter(lambda x: x[1]["index"] == tensor_index, enumerate(self.get_input_details())), - (None, None) + filter( + lambda x: x[1]["index"] == tensor_index, + enumerate(self.get_input_details()), + ), + (None, None), ) if count is None or tensor_details is None: @@ -115,7 +118,9 @@ def set_tensor(self, tensor_index, value: ndarray, model_index=0) -> None: print("Setting Input Tensor") return - def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None) -> ndarray: + def get_tensor( + self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None + ) -> ndarray: """! Abstract for reading the data in the output tensor of a model. @param tensor_index The index of output tensor to target. @param tensor Tensor of correct shape to write into (optional) @@ -127,8 +132,11 @@ def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarra count: Optional[int] tensor_details: Optional[Dict[str, Any]] count, tensor_details = next( - filter(lambda x: x[1]["index"] == tensor_index, enumerate(self.get_output_details())), - (None, None) + filter( + lambda x: x[1]["index"] == tensor_index, + enumerate(self.get_output_details()), + ), + (None, None), ) if count is None or tensor_details is None: @@ -154,7 +162,9 @@ def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarra return np.reshape(output, tensor_details["shape"]) - def get_input_tensor(self, input_index=0, model_index=0) -> List[Union[int, Tuple[float]]]: + def get_input_tensor( + self, input_index=0, model_index=0 + ) -> List[Union[int, Tuple[float]]]: """! Abstract for reading the data in the input tensor of a model. @param input_index The index of output tensor to target. @param model_index The engine to target, for interpreters that support multiple models @@ -219,7 +229,11 @@ def _clear_error(self): pass def download_model( - self, model_bytes: bytearray, secondary_memory: bool = False, flash: bool = False, model_index: int = 0 + self, + model_bytes: bytearray, + secondary_memory: bool = False, + flash: bool = False, + model_index: int = 0, ): """! Download a model on to the device. @param model_bytes The byte array containing the model. @@ -284,7 +298,9 @@ def read_times(self, model_index: int = 0) -> List[Union[int, Tuple[float]]]: return output.tolist() @abstractmethod - def _upload_data(self, cmd, length, sign=False, tensor_num=0, engine_num=0) -> bytes: + def _upload_data( + self, cmd, length, sign=False, tensor_num=0, engine_num=0 + ) -> bytes: raise NotImplementedError @abstractmethod @@ -343,6 +359,7 @@ def _upload_data(self, cmd, length, sign=False, tensor_num=0, engine_num=0): def _clear_error(self): import usb + usb.util.dispose_resources(self._dev) self._dev.clear_halt(self._out_ep) self._dev.clear_halt(self._in_ep) @@ -371,13 +388,21 @@ def connect(self): if e.backend_error_code == usb.backend.libusb1.LIBUSB_ERROR_BUSY: for cfg in self._dev: for intf in cfg: - if self._dev.is_kernel_driver_active(intf.bInterfaceNumber): + if self._dev.is_kernel_driver_active( + intf.bInterfaceNumber + ): try: - self._dev.detach_kernel_driver(intf.bInterfaceNumber) + self._dev.detach_kernel_driver( + intf.bInterfaceNumber + ) self._dev.set_configuration() except usb.core.USBError: self._clear_error() - print("USB error : Could not detach kernel driver from interface({0})".format(intf.bInterfaceNumber)) + print( + "USB error : Could not detach kernel driver from interface({0})".format( + intf.bInterfaceNumber + ) + ) raise IOError() cfg = self._dev.get_active_configuration() @@ -388,14 +413,14 @@ def connect(self): intf, # match the first OUT endpoint custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) - == usb.util.ENDPOINT_OUT, + == usb.util.ENDPOINT_OUT, ) self._in_ep = usb.util.find_descriptor( intf, # match the first IN endpoint custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) - == usb.util.ENDPOINT_IN, + == usb.util.ENDPOINT_IN, ) assert self._out_ep is not None diff --git a/python/xmos_ai_tools/xinterpreters/host/host_interpreter.py b/python/xmos_ai_tools/xinterpreters/host/host_interpreter.py index fd1141f50..92ec0d681 100644 --- a/python/xmos_ai_tools/xinterpreters/host/host_interpreter.py +++ b/python/xmos_ai_tools/xinterpreters/host/host_interpreter.py @@ -10,7 +10,8 @@ from numpy import ndarray from xmos_ai_tools.xinterpreters.base.base_interpreter import ( - xcore_tflm_base_interpreter, XTFLMInterpreterStatus, + xcore_tflm_base_interpreter, + XTFLMInterpreterStatus, ) # DLL path for different platforms @@ -37,6 +38,7 @@ MAX_TENSOR_ARENA_SIZE = 10000000 + class xcore_tflm_host_interpreter(xcore_tflm_base_interpreter): """! The xcore interpreters host class. The interpreter to be used on a host, inherits from base interpreter. @@ -109,7 +111,7 @@ def __init__(self, max_tensor_arena_size: int = MAX_TENSOR_ARENA_SIZE) -> None: super().__init__() - def __enter__(self) -> 'xcore_tflm_host_interpreter': + def __enter__(self) -> "xcore_tflm_host_interpreter": return self def __exit__(self, exc_type, exc_value, exc_traceback) -> None: @@ -156,11 +158,16 @@ def set_tensor(self, tensor_index: int, value: ndarray, model_index=0) -> None: length = len(val) length2 = self.get_input_tensor_size(tensor_index) if length != length2: - print("ERROR: mismatching size in set_input_tensor %d vs %d" % (length, length2)) + print( + "ERROR: mismatching size in set_input_tensor %d vs %d" + % (length, length2) + ) self._check_status(lib.set_input_tensor(self.obj, tensor_index, val, length)) - def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None) -> ndarray: + def get_tensor( + self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None + ) -> ndarray: """! Read data from the output tensor of a model. @param tensor_index The index of output tensor to target. @param model_index The model to target, for interpreters that support multiple models @@ -172,8 +179,11 @@ def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarra count: Optional[int] tensor_details: Optional[Dict[str, Any]] count, tensor_details = next( - filter(lambda x: x[1]["index"] == tensor_index, enumerate(self.get_output_details())), - (None, None) + filter( + lambda x: x[1]["index"] == tensor_index, + enumerate(self.get_output_details()), + ), + (None, None), ) if count is None or tensor_details is None: @@ -186,7 +196,10 @@ def get_tensor(self, tensor_index: int = 0, model_index: int = 0, tensor: ndarra else: length = len(tensor.tobytes()) if length != length: - print("ERROR: mismatching size in get_output_tensor %d vs %d" % (length, length)) + print( + "ERROR: mismatching size in get_output_tensor %d vs %d" + % (length, length) + ) data_ptr = tensor.ctypes.data_as(ctypes.c_void_p) self._check_status(lib.get_output_tensor(self.obj, count, data_ptr, length)) @@ -208,8 +221,7 @@ def get_input_tensor(self, input_index: int = 0, model_index: int = 0) -> ndarra return tensor def reset(self, model_index: int = 0) -> None: - """! Resets the model. - """ + """! Resets the model.""" self._check_status(lib.reset(self.obj)) def invoke(self, model_index: int = 0) -> None: From 07fda7bad6886e9a1c488ff7dc58ceaabadcfc82 Mon Sep 17 00:00:00 2001 From: panickal-xmos Date: Tue, 15 Aug 2023 15:23:52 +0100 Subject: [PATCH 07/15] Better memory planning and bug fixes --- xformer/.bazelrc | 2 +- xformer/Analysis/MemoryPlan.cpp | 55 ++++++++++------------ xformer/Analysis/MemoryPlan.h | 6 ++- xformer/Transforms/ApplyTFLPatterns.cpp | 3 ++ xformer/Transforms/Options.h | 1 - xformer/Transforms/Passes.cpp | 1 + xformer/Transforms/Passes.h | 1 + xformer/Transforms/TFLPatterns.td | 6 ++- xformer/Transforms/TranslateToCustomOp.cpp | 10 ---- xformer/Transforms/XCPatterns.td | 3 +- xformer/XCoreOptMain.cpp | 7 ++- 11 files changed, 45 insertions(+), 50 deletions(-) diff --git a/xformer/.bazelrc b/xformer/.bazelrc index 0bc6f548c..2e6a7fef0 100644 --- a/xformer/.bazelrc +++ b/xformer/.bazelrc @@ -74,7 +74,7 @@ build:asan --copt -Wno-macro-redefined build:asan --copt -fsanitize=address build:asan --copt -DADDRESS_SANITIZER build:asan --copt -g -build:asan --copt -O1 +build:asan --copt -O0 build:asan --copt -fno-omit-frame-pointer build:asan --copt -fno-sanitize-recover=all build:asan --copt -gmlt diff --git a/xformer/Analysis/MemoryPlan.cpp b/xformer/Analysis/MemoryPlan.cpp index 70fef14e9..35f6ad3b3 100644 --- a/xformer/Analysis/MemoryPlan.cpp +++ b/xformer/Analysis/MemoryPlan.cpp @@ -29,7 +29,7 @@ void MemoryPlan::build() { auto type = v.getType().dyn_cast(); size_t k = static_cast(utils::getShapedTypeSize(type)); // Align size up to double word = 8 bytes - k = ((k + 15) / 16) * 16; + k = ((k + 7) / 8) * 8; return k; }; @@ -80,15 +80,6 @@ void MemoryPlan::build() { valueInfo[v].firstUsed = operationIds[startOp]; valueInfo[v].lastUsed = operationIds[lvb->getEndOperation(v, startOp)]; } - - LLVM_DEBUG(llvm::dbgs() << "\n\n"); - for (auto v : values) { - LLVM_DEBUG(llvm::dbgs() << "\nvalue " << valueInfo[v].id - << " size = " << valueInfo[v].size - << " start op = " << valueInfo[v].firstUsed - << " end op = " << valueInfo[v].lastUsed); - } - LLVM_DEBUG(llvm::dbgs() << "\n\n"); } Operation *MemoryPlan::getOpWithMaxMemoryUsed() { @@ -122,6 +113,7 @@ Operation *MemoryPlan::getOpWithMaxMemoryUsed() { } int MemoryPlan::getOffset(Value v, int size, + DenseMap &valueInfo, ValuesOrderedByOffset &allocatedValues) { int offset = 0; @@ -138,7 +130,7 @@ int MemoryPlan::getOffset(Value v, int size, } // Found an overlapping buffer - if (allocatedOffset - offset > size) { + if (allocatedOffset - offset >= size) { // There is a gap break; } else { @@ -153,8 +145,11 @@ int MemoryPlan::getOffset(Value v, int size, return offset; } -std::vector MemoryPlan::getAllocatedOffsets() { +std::vector MemoryPlan::getAllocatedOffsets(const bool overlapOps, + int &peakMemoryUsed) { std::vector offsets; + // Copy of valueInfo + auto vInfo = valueInfo; // Overlap buffers llvm::DenseMap> outInVals; @@ -177,16 +172,16 @@ std::vector MemoryPlan::getAllocatedOffsets() { } } - if (overlapOption) { + if (overlapOps) { for (auto o : operations) { if (llvm::isa(o)) { auto in = o->getOperand(0); if (in.hasOneUse()) { auto out = o->getResult(0); - int offset = valueInfo[out].size - valueInfo[in].size; + int offset = vInfo[out].size - vInfo[in].size; outInVals[out] = {in, offset}; - valueInfo[in].size += offset; - valueInfo[in].lastUsed = valueInfo[out].lastUsed; + vInfo[in].size += offset; + vInfo[in].lastUsed = vInfo[out].lastUsed; } } @@ -204,11 +199,11 @@ std::vector MemoryPlan::getAllocatedOffsets() { auto firstVal = outInVals[in].first; auto firstOffset = outInVals[in].second; - offset += valueInfo[out].size - valueInfo[firstVal].size; + offset += vInfo[out].size - vInfo[firstVal].size; outInInVals[out] = {{in, offset}, {firstVal, firstOffset}}; - valueInfo[firstVal].size += offset; - valueInfo[firstVal].lastUsed = valueInfo[out].lastUsed; + vInfo[firstVal].size += offset; + vInfo[firstVal].lastUsed = vInfo[out].lastUsed; } } } @@ -220,7 +215,7 @@ std::vector MemoryPlan::getAllocatedOffsets() { if (lhs.second != rhs.second) { return lhs.second < rhs.second; } - return valueInfo[lhs.first].id < valueInfo[rhs.first].id; + return vInfo[lhs.first].id < vInfo[rhs.first].id; }; // The top item is the largest one. llvm::PriorityQueue, @@ -229,9 +224,8 @@ std::vector MemoryPlan::getAllocatedOffsets() { // Insert values and their sizes into priority queue for (auto v : values) { - if (!outInVals.count(v) && !outInInVals.count(v) && - !valueInfo[v].isConstant) { - queue.push({v, valueInfo[v].size}); + if (!outInVals.count(v) && !outInInVals.count(v) && !vInfo[v].isConstant) { + queue.push({v, vInfo[v].size}); } } @@ -246,7 +240,7 @@ std::vector MemoryPlan::getAllocatedOffsets() { queue.pop(); // check with allocatedValues list - int newOffset = getOffset(v, size, allocatedValues); + int newOffset = getOffset(v, size, vInfo, allocatedValues); allocatedValues.insert({v, newOffset}); } @@ -300,28 +294,31 @@ std::vector MemoryPlan::getAllocatedOffsets() { } for (auto v : values) { - if (valueInfo[v].isConstant) { + if (vInfo[v].isConstant) { allocatedValues.insert({v, -1}); } } // Sort the allocated offsets by id, i.e., execution order auto cmp = [&](QueueItem a, QueueItem b) { - return valueInfo[a.first].id < valueInfo[b.first].id; + return vInfo[a.first].id < vInfo[b.first].id; }; std::multiset allocatedValuesOrderedByID(cmp); for (auto i : allocatedValues) { allocatedValuesOrderedByID.insert(i); } + size_t peakUsed = 0; LLVM_DEBUG(llvm::dbgs() << "\nAllocated offsets : "); for (auto i : allocatedValuesOrderedByID) { offsets.push_back(i.second); - LLVM_DEBUG(llvm::dbgs() - << "\nValue " << valueInfo[i.first].id << ", size " - << valueInfo[i.first].size << ", offset " << i.second); + peakUsed = std::max(peakUsed, vInfo[i.first].size + i.second); + LLVM_DEBUG(llvm::dbgs() << "\nValue " << vInfo[i.first].id << ", size " + << vInfo[i.first].size << ", offset " << i.second); } + LLVM_DEBUG(llvm::dbgs() << "\n\nPEAK USED : " << peakUsed << "\n\n"); LLVM_DEBUG(llvm::dbgs() << "\n\n"); + peakMemoryUsed = peakUsed; return offsets; } diff --git a/xformer/Analysis/MemoryPlan.h b/xformer/Analysis/MemoryPlan.h index 996c4af14..61a307152 100644 --- a/xformer/Analysis/MemoryPlan.h +++ b/xformer/Analysis/MemoryPlan.h @@ -40,7 +40,8 @@ class MemoryPlan { // - If no large-enough gap is found, the current buffer is placed after the // last buffer that's simultaneously active. // - This continues until all buffers are placed, and the offsets stored. - std::vector getAllocatedOffsets(); + std::vector getAllocatedOffsets(const bool overlapOps, + int &peakMemoryUsed); Operation *getOpWithMaxMemoryUsed(); @@ -69,7 +70,8 @@ class MemoryPlan { int lastUsed; }; - int getOffset(Value v, int size, ValuesOrderedByOffset &allocatedOffsets); + int getOffset(Value v, int size, DenseMap &valueInfo, + ValuesOrderedByOffset &allocatedOffsets); DenseMap valueInfo; diff --git a/xformer/Transforms/ApplyTFLPatterns.cpp b/xformer/Transforms/ApplyTFLPatterns.cpp index 8397efa01..5e6e2763a 100644 --- a/xformer/Transforms/ApplyTFLPatterns.cpp +++ b/xformer/Transforms/ApplyTFLPatterns.cpp @@ -116,6 +116,9 @@ struct HoistQuantizeAboveConcatPattern if (!concatOp) { return failure(); } + if (!concatOp.getOutput().getType().getElementType().isa()) { + return failure(); + } SmallVector quantizeOps; TFL::QuantizeOp newQuantizeOp; diff --git a/xformer/Transforms/Options.h b/xformer/Transforms/Options.h index b698a2d2b..5dbf94c7f 100644 --- a/xformer/Transforms/Options.h +++ b/xformer/Transforms/Options.h @@ -22,7 +22,6 @@ extern llvm::cl::list opSplitTopOpsOption; extern llvm::cl::list opSplitNumSplitsOption; extern llvm::cl::opt allowInputModificationOption; extern llvm::cl::opt convDebugOption; -extern llvm::cl::opt overlapOption; extern llvm::cl::opt overlapConvOption; extern llvm::cl::opt offlineOffsetsOption; extern llvm::cl::opt convChannelwiseSplitSizeOption; diff --git a/xformer/Transforms/Passes.cpp b/xformer/Transforms/Passes.cpp index 4e1b99172..0a828ca49 100644 --- a/xformer/Transforms/Passes.cpp +++ b/xformer/Transforms/Passes.cpp @@ -41,6 +41,7 @@ void buildXCorePassPipeline(OpPassManager &pm) { } // Run canonicalization, which includes combining Reshapes pm.addPass(mlir::createCanonicalizerPass()); + pm.addPass(createPlanMemoryPass()); pm.addPass(createTranslateToCustomOpPass()); } diff --git a/xformer/Transforms/Passes.h b/xformer/Transforms/Passes.h index 33f5dfeb8..b777a6c9a 100644 --- a/xformer/Transforms/Passes.h +++ b/xformer/Transforms/Passes.h @@ -37,6 +37,7 @@ std::unique_ptr> createApplyXCPatternsPass(); std::unique_ptr> createApplyLoadConstantOpPatternsPass(); std::unique_ptr> createWriteFlashImagePass(); +std::unique_ptr> createPlanMemoryPass(); std::unique_ptr> createTranslateToCustomOpPass(); //===----------------------------------------------------------------------===// diff --git a/xformer/Transforms/TFLPatterns.td b/xformer/Transforms/TFLPatterns.td index 0b3821834..85d074051 100644 --- a/xformer/Transforms/TFLPatterns.td +++ b/xformer/Transforms/TFLPatterns.td @@ -19,12 +19,16 @@ class UpdateShapeWithAxis "quant::CastQuantizedTypeAttrFromExpressedType($_builder, $0, " "quant::QuantizedType::castToExpressedType($1.getType()), " #i #")">; +def IsQuantizedType : Constraint($0.getType()." + "cast().getElementType())">>; + // Convert Quantize(Reshape()) -> Reshape(Quantize()) // This is to merge Quantize with Conv2D if possible def : Pat<(TFL_QuantizeOp(TFL_ReshapeOp $input, $shape), $qtype), (TFL_ReshapeOp(TFL_QuantizeOp $input, (UpdateShapeWithAxis<-1> $qtype, $input)), - $shape)>; + $shape), + [(IsQuantizedType $input)]>; // Fuse Quantize(Conv2D()) -> Conv2D() def : Pat<(TFL_QuantizeOp(TFL_Conv2DOp $input, $f, $b, $dh, $dw, $faf, $p, $sh, diff --git a/xformer/Transforms/TranslateToCustomOp.cpp b/xformer/Transforms/TranslateToCustomOp.cpp index a324705d4..70a1dee1c 100644 --- a/xformer/Transforms/TranslateToCustomOp.cpp +++ b/xformer/Transforms/TranslateToCustomOp.cpp @@ -162,16 +162,6 @@ void TranslateToCustomOp::runOnOperation() { RewritePatternSet patterns(ctx); func::FuncOp func = getOperation(); - if (offlineOffsetsOption) { - auto &m = getAnalysis(); - auto offsets = m.getAllocatedOffsets(); - - // Store as an attribute in the module - auto module = func->getParentOfType(); - OpBuilder builder(func); - module->setAttr("xc.offsets", builder.getI32VectorAttr(offsets)); - } - patterns.insert>(ctx); patterns.insert>(ctx); patterns.insert>(ctx); diff --git a/xformer/Transforms/XCPatterns.td b/xformer/Transforms/XCPatterns.td index a9677f56c..d3784e5b4 100644 --- a/xformer/Transforms/XCPatterns.td +++ b/xformer/Transforms/XCPatterns.td @@ -14,8 +14,7 @@ include "Utils/Utils.td" def getLookupTable : NativeCodeCall<"getLookupTable($_builder, $0.getDefiningOp())">; -def getThreadCount - : NativeCodeCall<"getThreadCount($_builder)">; +def getThreadCount : NativeCodeCall<"getThreadCount($_builder)">; foreach activationOp = [TFL_ReluOp, TFL_Relu6Op, TFL_TanhOp, TFL_LogisticOp, TFL_HardSwishOp] in { diff --git a/xformer/XCoreOptMain.cpp b/xformer/XCoreOptMain.cpp index d9623637d..cf434d3ea 100644 --- a/xformer/XCoreOptMain.cpp +++ b/xformer/XCoreOptMain.cpp @@ -115,15 +115,12 @@ cl::opt convDebugOption("xcore-conv-debug", cl::init(false), cl::cat(XformerCategory), cl::Hidden); -cl::opt overlapOption("xcore-overlap", cl::desc("Overlap buffers."), - cl::init(true), cl::cat(XformerCategory)); - cl::opt overlapConvOption("xcore-overlap-conv", cl::desc("Overlap conv also."), cl::init(false), cl::cat(XformerCategory), cl::Hidden); cl::opt offlineOffsetsOption("xcore-offline-offsets", - cl::desc("Offline offsets"), cl::init(false), + cl::desc("Offline offsets"), cl::init(true), cl::cat(XformerCategory)); cl::opt convChannelwiseSplitSizeOption( @@ -245,6 +242,8 @@ int main(int argc, char **argv) { // Register any command line options. registerPassManagerCLOptions(); + registerAsmPrinterCLOptions(); + registerMLIRContextCLOptions(); xcore::registerXCorePassPipeline(); PassPipelineCLParser passPipeline("", "Compiler passes to run"); cl::SetVersionPrinter(PrintVersion); From 2464efe4781304fd926f889051fdde406f94f8be Mon Sep 17 00:00:00 2001 From: panickal-xmos Date: Tue, 15 Aug 2023 15:25:33 +0100 Subject: [PATCH 08/15] Add new pass --- .../xformer/Transforms/PlanMemory.cpp | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 experimental/xformer/Transforms/PlanMemory.cpp diff --git a/experimental/xformer/Transforms/PlanMemory.cpp b/experimental/xformer/Transforms/PlanMemory.cpp new file mode 100644 index 000000000..57e073068 --- /dev/null +++ b/experimental/xformer/Transforms/PlanMemory.cpp @@ -0,0 +1,59 @@ +// Copyright 2021 XMOS LIMITED. This Software is subject to the terms of the +// XMOS Public License: Version 1 + +#include "Analysis/MemoryPlan.h" +#include "IR/XCoreOps.h" +#include "Transforms/Options.h" + +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h" + +namespace mlir { +namespace xcore { + +namespace { +// Write flash image +struct PlanMemory + : public PassWrapper> { + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PlanMemory) + + void getDependentDialects(DialectRegistry ®istry) const final { + registry.insert(); + } + StringRef getArgument() const final { return "xcore-plan-memory"; } + StringRef getDescription() const final { return "Plan memory"; } + void runOnOperation() override; +}; + +void PlanMemory::runOnOperation() { + auto func = getOperation(); + auto module = func->getParentOfType(); + OpBuilder builder(module); + + auto &m = getAnalysis(); + int peakMemoryUsedWithOverlap, peakMemoryUsedWithoutOverlap; + auto offlineOffsetsWithOverlap = + m.getAllocatedOffsets(/*overlapOps=*/true, peakMemoryUsedWithOverlap); + auto offlineOffsetsWithoutOverlap = + m.getAllocatedOffsets(/*overlapOps=*/false, peakMemoryUsedWithoutOverlap); + + if (peakMemoryUsedWithOverlap < peakMemoryUsedWithoutOverlap) { + module->setAttr("xc.offsets", + builder.getI32VectorAttr(offlineOffsetsWithOverlap)); + } else { + module->setAttr("xc.offsets", + builder.getI32VectorAttr(offlineOffsetsWithoutOverlap)); + } +} +} // namespace + +// Creates an instance of the PlanMemory pass. +std::unique_ptr> createPlanMemoryPass() { + return std::make_unique(); +} + +static PassRegistration pass; + +} // namespace xcore +} // namespace mlir From a97c50e901c1129f42d7861790208cd8fa261c04 Mon Sep 17 00:00:00 2001 From: panickal-xmos Date: Wed, 16 Aug 2023 16:59:35 +0100 Subject: [PATCH 09/15] Disable memory planning for unsupported ops --- .../xformer/Transforms/PlanMemory.cpp | 47 ++++++++++++------- xformer/XCoreOptMain.cpp | 5 +- 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/experimental/xformer/Transforms/PlanMemory.cpp b/experimental/xformer/Transforms/PlanMemory.cpp index 57e073068..8ae6d5f43 100644 --- a/experimental/xformer/Transforms/PlanMemory.cpp +++ b/experimental/xformer/Transforms/PlanMemory.cpp @@ -27,23 +27,36 @@ struct PlanMemory }; void PlanMemory::runOnOperation() { - auto func = getOperation(); - auto module = func->getParentOfType(); - OpBuilder builder(module); - - auto &m = getAnalysis(); - int peakMemoryUsedWithOverlap, peakMemoryUsedWithoutOverlap; - auto offlineOffsetsWithOverlap = - m.getAllocatedOffsets(/*overlapOps=*/true, peakMemoryUsedWithOverlap); - auto offlineOffsetsWithoutOverlap = - m.getAllocatedOffsets(/*overlapOps=*/false, peakMemoryUsedWithoutOverlap); - - if (peakMemoryUsedWithOverlap < peakMemoryUsedWithoutOverlap) { - module->setAttr("xc.offsets", - builder.getI32VectorAttr(offlineOffsetsWithOverlap)); - } else { - module->setAttr("xc.offsets", - builder.getI32VectorAttr(offlineOffsetsWithoutOverlap)); + if (offlineOffsetsOption) { + auto func = getOperation(); + + bool unSupportedOpsInGraph = false; + func.walk([&](Operation *op) { + if (llvm::isa(op)) { + unSupportedOpsInGraph = true; + } + }); + + if (!unSupportedOpsInGraph) { + auto module = func->getParentOfType(); + OpBuilder builder(module); + + auto &m = getAnalysis(); + int peakMemoryUsedWithOverlap, peakMemoryUsedWithoutOverlap; + auto offlineOffsetsWithOverlap = + m.getAllocatedOffsets(/*overlapOps=*/true, peakMemoryUsedWithOverlap); + auto offlineOffsetsWithoutOverlap = m.getAllocatedOffsets( + /*overlapOps=*/false, peakMemoryUsedWithoutOverlap); + + if (peakMemoryUsedWithOverlap < peakMemoryUsedWithoutOverlap) { + module->setAttr("xc.offsets", + builder.getI32VectorAttr(offlineOffsetsWithOverlap)); + } else { + module->setAttr("xc.offsets", + builder.getI32VectorAttr(offlineOffsetsWithoutOverlap)); + } + } } } } // namespace diff --git a/xformer/XCoreOptMain.cpp b/xformer/XCoreOptMain.cpp index cf434d3ea..b3ecbf7a7 100644 --- a/xformer/XCoreOptMain.cpp +++ b/xformer/XCoreOptMain.cpp @@ -121,7 +121,7 @@ cl::opt overlapConvOption("xcore-overlap-conv", cl::opt offlineOffsetsOption("xcore-offline-offsets", cl::desc("Offline offsets"), cl::init(true), - cl::cat(XformerCategory)); + cl::cat(XformerCategory), cl::Hidden); cl::opt convChannelwiseSplitSizeOption( "xcore-conv-channelwise-split-size", @@ -389,8 +389,7 @@ int main(int argc, char **argv) { // std::vector offline_offsets = { // 73728, -1, -1, -1, -1, -1, -1, 0, 129024, 73728, 166272, 132096, // 73728, 153984, 132096, 73728, 132096, 73728, 0, 52224, 0}; - if (mlir::xcore::offlineOffsetsOption) { - auto attr = module->getAttr("xc.offsets"); + if (auto attr = module->getAttr("xc.offsets")) { auto offline_offsets = std::vector{ attr.cast().getValues().begin(), attr.cast().getValues().end()}; From c54bf6e418419127e4b286d9df34855b7e913c8b Mon Sep 17 00:00:00 2001 From: Michael Poluektov Date: Wed, 16 Aug 2023 19:08:18 +0000 Subject: [PATCH 10/15] move PlanMemory.cpp back to xformer --- {experimental/xformer => xformer}/Transforms/PlanMemory.cpp | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {experimental/xformer => xformer}/Transforms/PlanMemory.cpp (100%) diff --git a/experimental/xformer/Transforms/PlanMemory.cpp b/xformer/Transforms/PlanMemory.cpp similarity index 100% rename from experimental/xformer/Transforms/PlanMemory.cpp rename to xformer/Transforms/PlanMemory.cpp From 839a287f21816f5c9201fbb8827ad1f22412a3b2 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 16 Aug 2023 23:18:32 +0100 Subject: [PATCH 11/15] fix experimental/xformer in hidden files --- .github/workflows/release-beta.yml | 8 ++++---- .gitignore | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release-beta.yml b/.github/workflows/release-beta.yml index 6bfaac27b..101f8a7fe 100644 --- a/.github/workflows/release-beta.yml +++ b/.github/workflows/release-beta.yml @@ -89,7 +89,7 @@ jobs: cd $GITHUB_WORKSPACE make build - cd $GITHUB_WORKSPACE/experimental/xformer + cd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }}' >> .bazelrc.user @@ -152,7 +152,7 @@ jobs: cd $GITHUB_WORKSPACE make build - cd $GITHUB_WORKSPACE/experimental/xformer + cd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-arm64-python${{ matrix.python-version }}' >> .bazelrc.user @@ -206,7 +206,7 @@ jobs: # auditwheel requires patchelf and repairs the created wheel file to # be compatible for manylinux. run: | - pushd $GITHUB_WORKSPACE/experimental/xformer + pushd $GITHUB_WORKSPACE/xformer if [[ -n $GOOGLE_APPLICATION_CREDENTIALS ]]; then echo "using Google Cloud Storage cache" echo -e 'build --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }}' >> .bazelrc.user @@ -293,7 +293,7 @@ jobs: cmake .. cmake --build . -t install --parallel --config Release - cd %GITHUB_WORKSPACE%/experimental/xformer + cd %GITHUB_WORKSPACE%/xformer set BAZEL_VC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC bazelisk build //:xcore-opt --local_ram_resources=5120 --action_env PYTHON_BIN_PATH="C:/hostedtoolcache/windows/Python/3.8.10/x64/python.exe" --remote_cache=${{ env.BAZEL_CACHE_URL }}/${{ runner.os }}-${{ runner.arch }}-python${{ matrix.python-version }} --google_default_credentials --//:disable_version_check - name: Build windows wheels diff --git a/.gitignore b/.gitignore index d2a5fc446..eecd172b2 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ **/*.eggs/* Pipfile.lock ai_tools_venv/* -experimental/xformer/.venv +xformer/.venv # Build cruft **/.lock* @@ -57,7 +57,7 @@ python/xmos_ai_tools/xinterpreters/device/include *.h5 *.html *training_history.png -experimental/xformer/model_maker/*.tflite +xformer/model_maker/*.tflite # Others *.npz From 9ff53c1bbe561892a74993b8183129980b068e96 Mon Sep 17 00:00:00 2001 From: panickal-xmos Date: Thu, 17 Aug 2023 13:52:19 +0100 Subject: [PATCH 12/15] Add beta ops for conv f32 --- xformer/IR/XCoreOps.td | 41 ++++++++++++++++++++++ xformer/Transforms/ConvPatterns.td | 23 +++++++++--- xformer/Transforms/TranslateToCustomOp.cpp | 8 +++++ 3 files changed, 68 insertions(+), 4 deletions(-) diff --git a/xformer/IR/XCoreOps.td b/xformer/IR/XCoreOps.td index 78f732972..ec7a918bc 100644 --- a/xformer/IR/XCoreOps.td +++ b/xformer/IR/XCoreOps.td @@ -132,6 +132,47 @@ def XC_MulOp : XC_Op<"mul", [Pure]> { let results = (outs TensorOf<[QI8]> : $output); } +def XC_Beta_ConvF32Op : XC_Op<"beta_convf32", [Pure]> { + let summary = "Beta ConvF32 op"; + + let description = [{Beta ConvF32 op.}]; + + let arguments = (ins + TensorOf<[F32]>:$input, + TensorOf<[F32]>:$kernels, + TensorOf<[F32]>:$bias + ); + + let results = (outs TensorOf<[F32]> : $output); +} + +def XC_Beta_TransposeConvF32Op : XC_Op<"beta_transposeconvf32", [Pure]> { + let summary = "Beta Transpose ConvF32 op"; + + let description = [{Beta Transpose ConvF32 op.}]; + + let arguments = (ins + TensorOf<[F32]>:$input, + TensorOf<[F32]>:$kernels, + TensorOf<[F32]>:$bias + ); + + let results = (outs TensorOf<[F32]> : $output); +} + +def XC_Beta_FcF32Op : XC_Op<"beta_fcf32", [Pure]> { + let summary = "Beta Fc F32 op"; + + let description = [{Beta Fc ConvF32 op.}]; + + let arguments = (ins + TensorOf<[F32]>:$input, + TensorOf<[F32]>:$kernels + ); + + let results = (outs TensorOf<[F32]> : $output); +} + def XC_Conv2DV2Op : XC_Op<"conv2d_v2", [Pure]> { let summary = "Conv2D V2 op"; diff --git a/xformer/Transforms/ConvPatterns.td b/xformer/Transforms/ConvPatterns.td index e072a2d3e..a9725a5d6 100644 --- a/xformer/Transforms/ConvPatterns.td +++ b/xformer/Transforms/ConvPatterns.td @@ -25,8 +25,8 @@ def IsConstOp // TFL_Conv2D() -> XC_FakeConv2D() def : -Pat<(TFL_Conv2DOp: $output TensorOf<[QI8]>:$input, TensorOf<[QI8]>:$f, AnyTypeOf<[TensorOf<[I32,QI32]>, NoneType]>:$b, $dh, $dw, $faf, $p, $sh, $sw), - (XC_FakeConv2DOp $input, $f, $b, $dh, $dw, $faf, $p, (CreateNoneValue $input), $sh, $sw),[ +Pat<(TFL_Conv2DOp: $output TensorOf<[QI8]>:$input, TensorOf<[QI8]>:$f, AnyTypeOf<[TensorOf<[I32,QI32]>, NoneType]>:$b, $dh, $dw, $faf, $wf, $sh, $sw), + (XC_FakeConv2DOp $input, $f, $b, $dh, $dw, $faf, $wf, (CreateNoneValue $input), $sh, $sw),[ (HasMultipleOfNBytesPerPixel<4> $input), (HasMultipleOfNBytesPerPixel<4> $output), (HasEqualChannels $input, $f), @@ -35,9 +35,24 @@ Pat<(TFL_Conv2DOp: $output TensorOf<[QI8]>:$input, TensorOf<[QI8]>:$f, AnyTypeOf // TFL_DepthwiseConv2D() -> XC_FakeDepthwiseConv2D() def : -Pat<(TFL_DepthwiseConv2DOp: $output TensorOf<[QI8]>:$input, TensorOf<[QI8]>:$f, TensorOf<[I32,QI32]>:$b, $dh, $dw, $faf, $p, $sh, $sw, $dm), - (XC_FakeDepthwiseConv2DOp $input, $f, $b, $dh, $dw, $faf, $p, (CreateNoneValue $input), $sh, $sw, $dm),[ +Pat<(TFL_DepthwiseConv2DOp: $output TensorOf<[QI8]>:$input, TensorOf<[QI8]>:$f, TensorOf<[I32,QI32]>:$b, $dh, $dw, $faf, $wf, $sh, $sw, $dm), + (XC_FakeDepthwiseConv2DOp $input, $f, $b, $dh, $dw, $faf, $wf, (CreateNoneValue $input), $sh, $sw, $dm),[ (HasMultipleOfNBytesPerPixel<4> $input), (HasMultipleOfNBytesPerPixel<4> $output), (IsConstOp $f), ]>; + +// F32 TFL_Conv2D() -> XC_Beta_ConvF32() +def : +Pat<(TFL_Conv2DOp: $output TensorOf<[F32]>:$input, TensorOf<[F32]>:$f, TensorOf<[F32]>:$b, $dh, $dw, $faf, $wf, $sh, $sw), + (XC_Beta_ConvF32Op $input, $f, $b)>; + +// F32 TFL_TransposeConv2D() -> XC_Beta_TransposeConvF32() +def : +Pat<(TFL_TransposeConvOp: $output $outshape, TensorOf<[F32]>:$f, TensorOf<[F32]>:$input, TensorOf<[F32]>:$b, $wf, $sh, $sw, $faf), + (XC_Beta_TransposeConvF32Op $input, $f, $b)>; + +// F32 TFL_FullyConnected() -> XC_Beta_FcF32() +def : +Pat<(TFL_FullyConnectedOp: $output TensorOf<[F32]>:$input, TensorOf<[F32]>:$f, $b, $faf, $wf, $knd, $aqi), + (XC_Beta_FcF32Op $input, $f)>; diff --git a/xformer/Transforms/TranslateToCustomOp.cpp b/xformer/Transforms/TranslateToCustomOp.cpp index 70a1dee1c..2e459b3e5 100644 --- a/xformer/Transforms/TranslateToCustomOp.cpp +++ b/xformer/Transforms/TranslateToCustomOp.cpp @@ -14,6 +14,11 @@ namespace mlir { namespace xcore { std::vector Bsign8Op::buildCustomOptions() { return {}; } +std::vector Beta_ConvF32Op::buildCustomOptions() { return {}; } +std::vector Beta_TransposeConvF32Op::buildCustomOptions() { + return {}; +} +std::vector Beta_FcF32Op::buildCustomOptions() { return {}; } std::vector LookupOp::buildCustomOptions() { flexbuffers::Builder fbb; @@ -171,6 +176,9 @@ void TranslateToCustomOp::runOnOperation() { patterns.insert>(ctx); patterns.insert>(ctx); patterns.insert>(ctx); + patterns.insert>(ctx); + patterns.insert>(ctx); + patterns.insert>(ctx); (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); } From 072caf19a37365f5caf62406e7e3230d053dafb4 Mon Sep 17 00:00:00 2001 From: panickal-xmos Date: Thu, 17 Aug 2023 13:52:37 +0100 Subject: [PATCH 13/15] Update submodule --- third_party/lib_tflite_micro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/lib_tflite_micro b/third_party/lib_tflite_micro index a3d467d27..7106a715a 160000 --- a/third_party/lib_tflite_micro +++ b/third_party/lib_tflite_micro @@ -1 +1 @@ -Subproject commit a3d467d2770e0afedac6b53bf286da2d81c5dc57 +Subproject commit 7106a715ac286e4bf3e7d085da3b600a7fb838e9 From 3eaf6767c149ad4d16e7577c71f2ce959e1767ab Mon Sep 17 00:00:00 2001 From: panickal-xmos Date: Thu, 17 Aug 2023 15:37:23 +0100 Subject: [PATCH 14/15] Add source file --- xformer/lib_tflite_micro.BUILD | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xformer/lib_tflite_micro.BUILD b/xformer/lib_tflite_micro.BUILD index 0990ce63f..8d8b1948b 100644 --- a/xformer/lib_tflite_micro.BUILD +++ b/xformer/lib_tflite_micro.BUILD @@ -36,5 +36,9 @@ filegroup( "lib_tflite_micro/src/tflite-xcore-kernels/xcore_3_to_4.cc", "lib_tflite_micro/src/tflite-xcore-kernels/xcore_strided_slice.cc", "lib_tflite_micro/src/tflite-xcore-kernels/xcore_mul.cc", + "lib_tflite_micro/src/tflite-xcore-kernels/xcore_beta_convf32.cc", + "lib_tflite_micro/src/tflite-xcore-kernels/xcore_beta_transposeconvf32.cc", + "lib_tflite_micro/src/tflite-xcore-kernels/xcore_beta_fcf32.cc", + "lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.c", ], ) From 48957ad319776bf58195a5c43db4ef43b1e84cbf Mon Sep 17 00:00:00 2001 From: panickal-xmos Date: Fri, 18 Aug 2023 09:27:59 +0100 Subject: [PATCH 15/15] Update submodule --- third_party/lib_tflite_micro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/lib_tflite_micro b/third_party/lib_tflite_micro index 7106a715a..30f4c7cab 160000 --- a/third_party/lib_tflite_micro +++ b/third_party/lib_tflite_micro @@ -1 +1 @@ -Subproject commit 7106a715ac286e4bf3e7d085da3b600a7fb838e9 +Subproject commit 30f4c7cab66db8f5686f04c6537633bcdaa40abb