diff --git a/tests/tools/onert_run/CMakeLists.txt b/tests/tools/onert_run/CMakeLists.txt index 1d536ddc0d9..8d77ff07cf2 100644 --- a/tests/tools/onert_run/CMakeLists.txt +++ b/tests/tools/onert_run/CMakeLists.txt @@ -12,7 +12,6 @@ list(APPEND ONERT_RUN_SRCS "src/nnfw_util.cc") list(APPEND ONERT_RUN_SRCS "src/randomgen.cc") list(APPEND ONERT_RUN_SRCS "src/rawformatter.cc") -nnfw_find_package(Boost REQUIRED program_options) nnfw_find_package(Ruy QUIET) nnfw_find_package(HDF5 QUIET) @@ -31,11 +30,10 @@ else() endif(HDF5_FOUND) target_include_directories(onert_run PRIVATE src) -target_include_directories(onert_run PRIVATE ${Boost_INCLUDE_DIRS}) target_link_libraries(onert_run nnfw_lib_tflite jsoncpp) target_link_libraries(onert_run nnfw-dev) -target_link_libraries(onert_run ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(onert_run arser) target_link_libraries(onert_run nnfw_lib_benchmark) if(Ruy_FOUND AND PROFILE_RUY) target_link_libraries(onert_run ruy_instrumentation) diff --git a/tests/tools/onert_run/src/args.cc b/tests/tools/onert_run/src/args.cc index 5650f48c92c..ee9298c357e 100644 --- a/tests/tools/onert_run/src/args.cc +++ b/tests/tools/onert_run/src/args.cc @@ -17,7 +17,7 @@ #include "args.h" #include -#include +#include #include #include @@ -147,272 +147,320 @@ Args::Args(const int argc, char **argv) void Args::Initialize(void) { - auto process_nnpackage = [&](const std::string &package_filename) { - _package_filename = package_filename; - - std::cerr << "Package Filename " << _package_filename << std::endl; - checkPackage(package_filename); - }; - - auto process_modelfile = [&](const std::string &model_filename) { - _model_filename = model_filename; - - std::cerr << "Model Filename " << _model_filename << std::endl; - checkModelfile(model_filename); + _arser.add_argument("path").type(arser::DataType::STR).help("NN Package or NN Modelfile path"); + + _arser.add_argument("--version") + .nargs(0) + .default_value(false) + .help("Print version and exit immediately"); + _arser.add_argument("--nnpackage") + .type(arser::DataType::STR) + .help("NN Package file(directory) name"); + _arser.add_argument("--modelfile").type(arser::DataType::STR).help("NN Model filename"); +#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 + _arser.add_argument("--dump", "-d").type(arser::DataType::STR).help("Output filename"); + _arser.add_argument("--load", "-l").type(arser::DataType::STR).help("Input filename"); +#endif + _arser.add_argument("--dump:raw").type(arser::DataType::STR).help("Raw Output filename"); + _arser.add_argument("--dump_input:raw") + .type(arser::DataType::STR) + .help("Raw Input filename for dump"); + _arser.add_argument("--load:raw").type(arser::DataType::STR).help("Raw Input filename"); + _arser.add_argument("--output_sizes") + .type(arser::DataType::STR) + .help({"The output buffer size in JSON 1D array", + "If not given, the model's output sizes are used", + "e.g. '[0, 40, 2, 80]' to set 0th tensor to 40 and 2nd tensor to 80."}); + _arser.add_argument("--num_runs", "-r") + .type(arser::DataType::INT32) + .default_value(1) + .help("The number of runs"); + _arser.add_argument("--fixed_input") + .nargs(0) + .default_value(false) + .help("Use same random input data on each run (avaliable on random input)"); + _arser.add_argument("--force_float") + .nargs(0) + .default_value(false) + .help("Ignore model's input and output type and use float type buffer"); + _arser.add_argument("--warmup_runs", "-w") + .type(arser::DataType::INT32) + .default_value(0) + .help("The number of warmup runs"); + _arser.add_argument("--minmax_runs") + .type(arser::DataType::INT32) + .default_value(0) + .help("The number of minmax recording runs before full quantization"); + _arser.add_argument("--run_delay", "--t") + .type(arser::DataType::INT32) + .default_value(-1) + .help("Delay time(us) between runs (as default no delay)"); + _arser.add_argument("--gpumem_poll", "-g") + .nargs(0) + .default_value(false) + .help("Check gpu memory polling separately"); + _arser.add_argument("--mem_poll", "-m") + .nargs(0) + .default_value(false) + .help("Check memory polling"); + _arser.add_argument("--write_report", "-p") + .nargs(0) + .default_value(false) + .help({"Write report", "{exec}-{nnpkg|modelfile}-{backend}.csv will be generated.", + "e.g. onert_run-UNIT_Add_000-acl_cl.csv.", + "{nnpkg|modelfile} name may be changed to realpath if you use symbolic-link."}); + _arser.add_argument("--shape_prepare") + .type(arser::DataType::STR) + .default_value("") + .help("Please refer to the description of 'shape_run'"); + _arser.add_argument("--shape_run").type(arser::DataType::STR).default_value("").help({ + "--shape_prepare: set shape of tensors before compilation (before calling nnfw_prepare()).", + "--shape_run: set shape of tensors before running (before calling nnfw_run()).", + "Allowed value:.", + "'[0, [1, 2], 2, []]': set 0th tensor to [1, 2] and 2nd tensor to [] (scalar).", +#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 + "'h5': read shape(s) from H5 input file. '--load' should also be provided.", + "if '--load' option is provided but '--shape_prepare' or '--shape_run' is not provided,", + "'--shape_run h5' will be used by default.", +#endif + "For detailed description, please consutl the description of nnfw_set_input_tensorinfo()" + }); + _arser.add_argument("--output_shape") + .type(arser::DataType::STR) + .default_value("") + .help({"Set output shape for dump. Size should be same.", + "'[0, [1, 2], 2, []]': set 0th tensor to [1, 2] and 2nd tensor to [] (scalar)."}); + _arser.add_argument("--verbose_level", "-v") + .type(arser::DataType::INT32) + .default_value(0) + .help({"Verbose level", "0: prints the only result. Messages btw run don't print", + "1: prints result and message btw run", "2: prints all of messages to print"}); + _arser.add_argument("--quantize", "-q") + .type(arser::DataType::STR) + .help({"Request quantization with type", "uint8, int16: full quantization", + "int8_wo, int16_wo: weight only quantization"}); + _arser.add_argument("--qpath") + .type(arser::DataType::STR) + .help({"Path to export quantized model.", + "If it is not set, the quantized model will be exported to the same directory of the " + "original model/package with q8/q16 suffix."}); + _arser.add_argument("--codegen", "-c") + .type(arser::DataType::STR) + .help({"Target backend name for code generation", + "The target string will be used to find a backend library.", + "This string should be in the following format:", "{backend extension} + '-gen'.", + "For detailed description, please see the description of nnfw_codegen()"}); + _arser.add_argument("--cpath") + .type(arser::DataType::STR) + .help({"Path to export target-dependent model.", + "If it is not set, the generated model will be exported to the same directory of the " + "original model/package with target backend extension."}); +} - _use_single_model = true; - }; +void Args::Parse(const int argc, char **argv) +{ + try + { + _arser.parse(argc, argv); - auto process_path = [&](const std::string &path) { - struct stat sb; - if (stat(path.c_str(), &sb) == 0) + if (_arser.get("--version")) { - if (sb.st_mode & S_IFDIR) - { - _package_filename = path; - checkPackage(path); - std::cerr << "Package Filename " << path << std::endl; - } - else - { - _model_filename = path; - checkModelfile(path); - std::cerr << "Model Filename " << path << std::endl; - _use_single_model = true; - } + _print_version = true; + return; } - else + + // Require modelfile, nnpackage, or path + if (!_arser["--nnpackage"] && !_arser["--modelfile"] && !_arser["path"]) { - std::cerr << "Cannot find: " << path << "\n"; + std::cerr << "Require one of options modelfile, nnpackage, or path." << std::endl; exit(1); } - }; - auto process_output_sizes = [&](const std::string &output_sizes_json_str) { - Json::Value root; - Json::Reader reader; - if (!reader.parse(output_sizes_json_str, root, false)) + // Cannot use both single model file and nnpackage at once + if (_arser["--nnpackage"] && _arser["--modelfile"]) { - std::cerr << "Invalid JSON format for output_sizes \"" << output_sizes_json_str << "\"\n"; + std::cerr << "Cannot use both single model file and nnpackage at once." << std::endl; exit(1); } - auto arg_map = argArrayToMap(root); - for (auto &pair : arg_map) + if (_arser["--nnpackage"]) { - uint32_t key = pair.first; - Json::Value &val_json = pair.second; - if (!val_json.isUInt()) + std::cout << "Package Filename " << _package_filename << std::endl; + _package_filename = _arser.get("--nnpackage"); + } + + if (_arser["--modelfile"]) + { + std::cout << "Model Filename " << _model_filename << std::endl; + _model_filename = _arser.get("--modelfile"); + } + + if (_arser["path"]) + { + auto path = _arser.get("path"); + struct stat sb; + if (stat(path.c_str(), &sb) == 0) + { + if (sb.st_mode & S_IFDIR) + { + _package_filename = path; + checkPackage(path); + std::cout << "Package Filename " << path << std::endl; + } + else + { + _model_filename = path; + checkModelfile(path); + std::cout << "Model Filename " << path << std::endl; + _use_single_model = true; + } + } + else { - std::cerr << "All the values in `output_sizes` must be unsigned integers\n"; + std::cerr << "Cannot find: " << path << "\n"; exit(1); } - uint32_t val = val_json.asUInt(); - _output_sizes[key] = val; } - }; - auto process_shape_prepare = [&](const std::string &shape_str) { #if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 - if (shape_str == "H5" || shape_str == "h5") - { - _when_to_use_h5_shape = WhenToUseH5Shape::PREPARE; - return; - } + if (_arser["--dump"]) + _dump_filename = _arser.get("--dump"); + if (_arser["--load"]) + _load_filename = _arser.get("--load"); #endif - try - { - handleShapeJsonParam(_shape_prepare, shape_str); - } - catch (const std::exception &e) - { - std::cerr << "error with '--shape_prepare' option: " << shape_str << std::endl; - exit(1); - } - }; - auto process_output_shape = [&](const std::string &shape_str) { - try + if (_arser["--dump:raw"]) + _dump_raw_filename = _arser.get("--dump:raw"); + if (_arser["--dump_input:raw"]) + _dump_raw_input_filename = _arser.get("--dump_input:raw"); + if (_arser["--load:raw"]) + _load_raw_filename = _arser.get("--load:raw"); + + if (_arser["--output_sizes"]) { - handleShapeJsonParam(_output_shape, shape_str); + auto output_sizes_json_str = _arser.get("--output_sizes"); + Json::Value root; + Json::Reader reader; + if (!reader.parse(output_sizes_json_str, root, false)) + { + std::cerr << "Invalid JSON format for output_sizes \"" << output_sizes_json_str << "\"\n"; + exit(1); + } + + auto arg_map = argArrayToMap(root); + for (auto &pair : arg_map) + { + uint32_t key = pair.first; + Json::Value &val_json = pair.second; + if (!val_json.isUInt()) + { + std::cerr << "All the values in `output_sizes` must be unsigned integers\n"; + exit(1); + } + uint32_t val = val_json.asUInt(); + _output_sizes[key] = val; + } } - catch (const std::exception &e) + + _num_runs = _arser.get("--num_runs"); + _fixed_input = _arser.get("--fixed_input"); + _force_float = _arser.get("--force_float"); + _warmup_runs = _arser.get("--warmup_runs"); + _minmax_runs = _arser.get("--minmax_runs"); + _run_delay = _arser.get("--run_delay"); + _gpumem_poll = _arser.get("--gpumem_poll"); + _mem_poll = _arser.get("--mem_poll"); + _write_report = _arser.get("--write_report"); + + // calling, e.g., "onert_run .. -- shape_prepare .. --shape_run .." should theoretically + // work but allowing both options together on command line makes the usage and implemenation + // of onert_run too complicated. Therefore let's not allow those option together. + auto shape_prepare = _arser.get("--shape_prepare"); + auto shape_run = _arser.get("--shape_run"); + if (!shape_prepare.empty() && !shape_run.empty()) { - std::cerr << "error with '--output_shape' option: " << shape_str << std::endl; + std::cerr << "Two options '--shape_prepare' and '--shape_run' cannot be given at once" + << std::endl; exit(1); } - }; - auto process_shape_run = [&](const std::string &shape_str) { -#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 - if (shape_str == "H5" || shape_str == "h5") + if (!shape_prepare.empty()) { - _when_to_use_h5_shape = WhenToUseH5Shape::RUN; - return; - } +#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 + if (shape_prepare == "H5" || shape_prepare == "h5") + { + _when_to_use_h5_shape = WhenToUseH5Shape::PREPARE; + return; + } #endif - try - { - handleShapeJsonParam(_shape_run, shape_str); + try + { + handleShapeJsonParam(_shape_prepare, shape_prepare); + } + catch (const std::exception &e) + { + std::cerr << "error with '--shape_prepare' option: " << shape_prepare << std::endl; + exit(1); + } } - catch (const std::exception &e) + + if (!shape_run.empty()) { - std::cerr << "error with '--shape_run' option: " << shape_str << std::endl; - exit(1); - } - }; - - // General options - po::options_description general("General options", 100); - - // clang-format off - general.add_options() - ("help,h", "Print available options") - ("version", "Print version and exit immediately") - ("nnpackage", po::value()->notifier(process_nnpackage), "NN Package file(directory) name") - ("modelfile", po::value()->notifier(process_modelfile), "NN Model filename") - ("path", po::value()->notifier(process_path), "NN Package or NN Modelfile path") -#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 - ("dump,d", po::value()->default_value("")->notifier([&](const auto &v) { _dump_filename = v; }), "Output filename") - ("load,l", po::value()->default_value("")->notifier([&](const auto &v) { _load_filename = v; }), "Input filename") -#endif - ("dump:raw", po::value()->default_value("")->notifier([&](const auto &v) { _dump_raw_filename = v; }), "Raw Output filename") - ("dump_input:raw", po::value()->default_value("")->notifier([&](const auto &v) { _dump_raw_input_filename = v; }), "Raw Input filename for dump") - ("load:raw", po::value()->default_value("")->notifier([&](const auto &v) { _load_raw_filename = v; }), "Raw Input filename") - ("output_sizes", po::value()->notifier(process_output_sizes), - "The output buffer size in JSON 1D array\n" - "If not given, the model's output sizes are used\n" - "e.g. '[0, 40, 2, 80]' to set 0th tensor to 40 and 2nd tensor to 80.\n") - ("num_runs,r", po::value()->default_value(1)->notifier([&](const auto &v) { _num_runs = v; }), "The number of runs") - ("fixed_input", "Use same random input data on each run (avaliable on random input)") - ("force_float", "Ignore model's input and output type and use float type buffer") - ("warmup_runs,w", po::value()->default_value(0)->notifier([&](const auto &v) { _warmup_runs = v; }), "The number of warmup runs") - ("minmax_runs", po::value()->default_value(0)->notifier([&](const auto &v) { _minmax_runs = v; }), "The number of minmax recording runs before full quantization") - ("run_delay,t", po::value()->default_value(-1)->notifier([&](const auto &v) { _run_delay = v; }), "Delay time(us) between runs (as default no delay") - ("gpumem_poll,g", po::value()->default_value(false)->notifier([&](const auto &v) { _gpumem_poll = v; }), "Check gpu memory polling separately") - ("mem_poll,m", po::value()->default_value(false)->notifier([&](const auto &v) { _mem_poll = v; }), "Check memory polling") - ("write_report,p", po::value()->default_value(false)->notifier([&](const auto &v) { _write_report = v; }), - "Write report\n" - "{exec}-{nnpkg|modelfile}-{backend}.csv will be generated.\n" - "e.g. onert_run-UNIT_Add_000-acl_cl.csv.\n" - "{nnpkg|modelfile} name may be changed to realpath if you use symbolic-link.") - ("shape_prepare", po::value()->default_value("[]")->notifier(process_shape_prepare), - "Please refer to the description of 'shape_run'") - ("shape_run", po::value()->default_value("[]")->notifier(process_shape_run), - "'--shape_prepare: set shape of tensors before compilation (before calling nnfw_prepare()).\n" - "'--shape_run: set shape of tensors before running (before calling nnfw_run()).\n" - "Allowed value:.\n" - "'[0, [1, 2], 2, []]': set 0th tensor to [1, 2] and 2nd tensor to [] (scalar).\n" #if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 - "'h5': read shape(s) from H5 input file. '--load' should also be provided.\n" - "if '--load' option is provided but '--shape_prepare' or '--shape_run' is not provided,\n" - "'--shape_run h5' will be used by default.\n" + if (shape_run == "H5" || shape_run == "h5") + { + _when_to_use_h5_shape = WhenToUseH5Shape::RUN; + return; + } #endif - "For detailed description, please consutl the description of nnfw_set_input_tensorinfo()\n" - ) - ("output_shape", po::value()->default_value("[]")->notifier(process_output_shape), - "Set output shape for dump.\n" - "Size should be same.\n" - "'[0, [1, 2], 2, []]': set 0th tensor to [1, 2] and 2nd tensor to [] (scalar).\n") - ("verbose_level,v", po::value()->default_value(0)->notifier([&](const auto &v) { _verbose_level = v; }), - "Verbose level\n" - "0: prints the only result. Messages btw run don't print\n" - "1: prints result and message btw run\n" - "2: prints all of messages to print\n") - ("quantize,q", po::value()->default_value("")->notifier([&](const auto &v) { _quantize = v; }), - "Request quantization with type\n" - "uint8, int16: full quantization\n" - "int8_wo, int16_wo: weight only quantization\n") - ("qpath", po::value()->default_value("")->notifier([&](const auto &v) { _quantized_model_path = v; }), - "Path to export quantized model.\n" - "If it is not set, the quantized model will be exported to the same directory of the original model/package with q8/q16 suffix.") - ("codegen,c", po::value()->default_value("")->notifier([&](const auto &v) { _codegen = v; }), - "Target backend name for code generation\n" - "The target string will be used to find a backend library.\n" - "This string should be in the following format:\n" - "{backend extension} + '-gen'.\n" - "For detailed description, please see the description of nnfw_codegen()") - ("cpath", po::value()->default_value("")->notifier([&](const auto &v) { _codegen_model_path = v; }), - "Path to export target-dependent model.\n" - "If it is not set, the generated model will be exported to the same directory of the original model/package with target backend extension.") - ; - // clang-format on - - _options.add(general); - _positional.add("path", -1); -} - -void Args::Parse(const int argc, char **argv) -{ - po::variables_map vm; - po::store(po::command_line_parser(argc, argv).options(_options).positional(_positional).run(), - vm); - - if (vm.count("help")) - { - std::cout << "onert_run\n\n"; - std::cout << "Usage: " << argv[0] << " path to nnpackage root directory []\n\n"; - std::cout << _options; - std::cout << "\n"; - - exit(0); - } - - if (vm.count("version")) - { - _print_version = true; - return; - } - - { - auto conflicting_options = [&](const std::string &o1, const std::string &o2) { - if ((vm.count(o1) && !vm[o1].defaulted()) && (vm.count(o2) && !vm[o2].defaulted())) + try { - throw boost::program_options::error(std::string("Two options '") + o1 + "' and '" + o2 + - "' cannot be given at once."); + handleShapeJsonParam(_shape_run, shape_run); } - }; - - // calling, e.g., "onert_run .. -- shape_prepare .. --shape_run .." should theoretically - // work but allowing both options together on command line makes the usage and implemenation - // of onert_run too complicated. Therefore let's not allow those option together. - conflicting_options("shape_prepare", "shape_run"); - - // Cannot use both single model file and nnpackage at once - conflicting_options("modelfile", "nnpackage"); + catch (const std::exception &e) + { + std::cerr << "error with '--shape_run' option: " << shape_run << std::endl; + exit(1); + } + } - // Require modelfile, nnpackage, or path - if (!vm.count("modelfile") && !vm.count("nnpackage") && !vm.count("path")) - throw boost::program_options::error( - std::string("Require one of options modelfile, nnpackage, or path.")); - } + auto output_shape = _arser.get("--output_shape"); + if (!output_shape.empty()) + { + try + { + handleShapeJsonParam(_output_shape, output_shape); + } + catch (const std::exception &e) + { + std::cerr << "error with '--output_shape' option: " << output_shape << std::endl; + exit(1); + } + } - if (vm.count("fixed_input")) - { - _fixed_input = true; - } - if (vm.count("force_float")) - _force_float = true; + _verbose_level = _arser.get("--verbose_level"); - try - { - po::notify(vm); - } - catch (const std::bad_cast &e) - { - std::cerr << "Bad cast error - " << e.what() << '\n'; - exit(1); - } + if (_arser["--quantize"]) + _quantize = _arser.get("--quantize"); + if (_arser["--qpath"]) + _quantized_model_path = _arser.get("--qpath"); + if (_arser["--codegen"]) + _codegen = _arser.get("--codegen"); + if (_arser["--cpath"]) + _codegen_model_path = _arser.get("--cpath"); - // This must be run after `notify` as `_warm_up_runs` must have been processed before. - if (vm.count("mem_poll")) - { + // This must be run after parsing as `_warm_up_runs` must have been processed before. // Instead of EXECUTE to avoid overhead, memory polling runs on WARMUP if (_mem_poll && _warmup_runs == 0) { _warmup_runs = 1; } } + catch (const std::bad_cast &e) + { + std::cerr << "Bad cast error - " << e.what() << '\n'; + exit(1); + } } bool Args::shapeParamProvided() diff --git a/tests/tools/onert_run/src/args.h b/tests/tools/onert_run/src/args.h index 9e474c2a78d..a55cd627a82 100644 --- a/tests/tools/onert_run/src/args.h +++ b/tests/tools/onert_run/src/args.h @@ -20,11 +20,10 @@ #include #include #include -#include -#include "types.h" +#include -namespace po = boost::program_options; +#include "types.h" namespace onert_run { @@ -84,8 +83,7 @@ class Args void Parse(const int argc, char **argv); private: - po::positional_options_description _positional; - po::options_description _options; + arser::Arser _arser; std::string _package_filename; std::string _model_filename; diff --git a/tests/tools/onert_run/src/onert_run.cc b/tests/tools/onert_run/src/onert_run.cc index b32242a4eb2..1fb452d6783 100644 --- a/tests/tools/onert_run/src/onert_run.cc +++ b/tests/tools/onert_run/src/onert_run.cc @@ -30,9 +30,9 @@ #include "ruy/profiler/profiler.h" #endif -#include #include #include +#include #include #include #include @@ -507,11 +507,6 @@ int main(const int argc, char **argv) return 0; } - catch (boost::program_options::error &e) - { - std::cerr << "E: " << e.what() << std::endl; - exit(-1); - } catch (std::runtime_error &e) { std::cerr << "E: Fail to run by runtime error:" << e.what() << std::endl;