Skip to content

Commit

Permalink
Merge pull request #32 from MegEngine/update-internal
Browse files Browse the repository at this point in the history
Update internal
  • Loading branch information
yeasoon authored Jan 11, 2023
2 parents 8e8a2f8 + 16fa9ee commit 987537b
Show file tree
Hide file tree
Showing 115 changed files with 7,572 additions and 367 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ MegCC supports Arm64/ArmV7/X86/BareMatal backend. You may want to check [support
* Download release compiler suit from [release page](https://github.com/MegEngine/MegCC/releases)
* Compiler from source, please fellow the [compiler doc](compiler/README.md)
* Build the release tar, please fellow the [release doc](doc/how-to-release.md)
* Get benchmark of different model please reference [benchmark](benchmark/README.md)

#### How to use MegCC

Expand Down
4 changes: 4 additions & 0 deletions benchmark/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
model/benchmark_*
model/generated_models
config
output
97 changes: 97 additions & 0 deletions benchmark/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
cmake_minimum_required(VERSION 3.15.2)
set(CMAKE_EXPORT_COMPILE_COMMANDS
ON
CACHE INTERNAL "")

project(Benchmarker)

option(ENABLE_MEGENGINE_FRAMEWORK "build benchmark for megengine" OFF)
configure_file(src/build_config.h.in
${CMAKE_CURRENT_BINARY_DIR}/genfiles/build_config.h)
# set megcc lib
if(NOT DEFINED RUNTIME_KERNEL_DIR)
message(FATAL_ERROR "build MegCC runtime kernel dir RUNTIME_KERNEL_DIR is empty, use -DRUNTIME_KERNEL_DIR=your_model_kernel_dir to set")
else()
message(STATUS "build MegCC runtime with kernel dir ${RUNTIME_KERNEL_DIR}")
endif()

add_library(TinyNN STATIC IMPORTED)
set_target_properties(
TinyNN PROPERTIES IMPORTED_LOCATION
"${RUNTIME_KERNEL_DIR}/runtime/install/lib/libTinyNN.a")
if(ENABLE_MEGENGINE_FRAMEWORK)
message(STATUS "build benchmark with megengine ${ENABLE_MEGENGINE_FRAMEWORK}")
option(X86_BACKEND "Build bechmarker with X86 megengine lib" ON)
# set megengine lib
if(NOT DEFINED MEGENGINE_INSTALL_DIR)
message(FATAL_ERROR "MEGENGINE_INSTALL_DIR is empty use -DMEGENGINE_INSTALL_DIR=your_megengine_install_dir to set")
else()
message(STATUS "MEGENGINE_INSTALL_DIR is ${MEGENGINE_INSTALL_DIR}")
endif()
add_library(mgb_imported INTERFACE IMPORTED)

if(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
set(MGE_INSTALL_LIBS ${MEGENGINE_INSTALL_DIR}/lite/lib/aarch64/liblite_static_all_in_one.a)
target_link_libraries(mgb_imported INTERFACE ${MGE_INSTALL_LIBS})
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a")
set(MGE_INSTALL_LIBS ${MEGENGINE_INSTALL_DIR}/lite/lib/armv7/liblite_static_all_in_one.a)
target_link_libraries(mgb_imported INTERFACE ${MGE_INSTALL_LIBS})
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64")
set(MGE_INSTALL_LIBS ${MEGENGINE_INSTALL_DIR}/lite/lib/riscv64/liblite_static_all_in_one.a)
target_link_libraries(mgb_imported INTERFACE ${MGE_INSTALL_LIBS})
else()
if(X86_BACKEND)
if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64" OR ${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64")
set(MKL_LIBS
${PROJECT_SOURCE_DIR}/../third_party/MegEngine/third_party/mkl/x86_64/lib/libmkl_core.a
${PROJECT_SOURCE_DIR}/../third_party/MegEngine/third_party/mkl/x86_64/lib/libmkl_sequential.a
${PROJECT_SOURCE_DIR}/../third_party/MegEngine/third_party/mkl/x86_64/lib/libmkl_intel_ilp64.a
)
set(MGE_INSTALL_LIBS ${MEGENGINE_INSTALL_DIR}/lite/lib/x86_64/liblite_static_all_in_one.a)
target_compile_definitions(mgb_imported INTERFACE -DMKL_ILP64)
# WARNING: i386 is not test locally
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "i386" OR ${CMAKE_SYSTEM_PROCESSOR} STREQUAL "i686")
set(MKL_LIBS
${PROJECT_SOURCE_DIR}/../third_party/MegEngine/third_party/mkl/i386/lib/libmkl_core.a
${PROJECT_SOURCE_DIR}/../third_party/MegEngine/third_party/mkl/x86_64/lib/libmkl_sequential.a
${PROJECT_SOURCE_DIR}/../third_party/MegEngine/third_party/mkl/x86_64/lib/libmkl_intel_32.a
)
set(MGE_INSTALL_LIBS ${MEGENGINE_INSTALL_DIR}/lite/lib/i386/liblite_static_all_in_one.a)
endif()
set(MKL_DNN_LIBS
${MEGENGINE_INSTALL_DIR}/lib/libdnnl.a
${MEGENGINE_INSTALL_DIR}/lib/libmkldnn.a
)

if(UNIX AND NOT APPLE)
target_link_libraries(mgb_imported INTERFACE ${MGE_INSTALL_LIBS} ${MKL_DNN_LIBS} -Wl,--start-group -ldl ${MKL_LIBS} -Wl,--end-group)
else()
target_link_libraries(mgb_imported INTERFACE ${MGE_INSTALL_LIBS} ${MKL_DNN_LIBS} ${MKL_LIBS})
endif()
else()
set(MGE_INSTALL_LIBS ${MEGENGINE_INSTALL_DIR}/lib/libmegengine.a ${MEGENGINE_INSTALL_DIR}/lib/libflatbuffers.a)
target_link_libraries(mgb_imported INTERFACE ${MGE_INSTALL_LIBS})
endif()

endif()

target_include_directories(mgb_imported INTERFACE ${MEGENGINE_INSTALL_DIR}/include)
endif()
# benchmarker config
file(GLOB_RECURSE SOURCES main.cpp src/*.cpp src/*.h)
add_executable(benchmarker ${SOURCES})
target_include_directories(
benchmarker PUBLIC $<BUILD_INTERFACE:${RUNTIME_KERNEL_DIR}/runtime/install/include> $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/genfiles>)
if(ENABLE_MEGENGINE_FRAMEWORK)
target_link_libraries(benchmarker -pthread TinyNN mgb_imported)
else()
target_link_libraries(benchmarker -pthread TinyNN)
endif()
message(STATUS "${CMAKE_TOOLCHAIN_FILE}")
if(CMAKE_TOOLCHAIN_FILE)
if(ANDROID)
target_link_libraries(benchmarker log)
endif()
endif()

install(TARGETS benchmarker LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX})
80 changes: 80 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# How to use megcc benchmark

## introduction
megcc benchmark is a easy tool to get the benchmark result of different model in megcc
the file struction is shown bellow:
```
├── clean.sh
├── CMakeLists.txt
├── main.cpp
├── model
│ ├── model_arm.json
│ ├── model_riscv.json
│ ├── model_x86.json
│ └── request.txt
├── python
│ ├── example.py
│ ├── format.sh
│ └── src
│ ├── benchmark.py
│ └── models.py
├── README.md
├── src
│ ├── benchmark.h
│ ├── build_config.h.in
│ ├── CCbenchmark.cpp
│ ├── CCbenchmark.h
│ ├── MGEbenchmark.cpp
│ └── MGEbenchmark.h
└── tools
├── cc_analysis.py
└── inference_visual.py
```

in src, it is a c++ application to run benchmark result on different platform.
in python, the model convertion, other related preparing work and the benchmarker example is given
the tools contains some usable scripts to analysis benchmark results
## supported model
mobilenetv2, resnet18, efficientnetb0 shufflenetv2 vgg16
## request
```bash
mgeconvert > v.1.0.2
onnx==1.11.0
torch==1.10.0
# or
git clone https://github.com/MegEngine/mgeconvert.git
cd mgeconvert
git checkout master
python3 -m pip install . --user --install-option="--targets=onnx"

```
the mgeconvert can be install by following command:
```bash
git clone https://github.com/MegEngine/mgeconvert.git
cd mgeconvert
git checkout master
python3 -m pip install . --user --install-option="--targets=onnx"

```
## get model and run benchmark example
``` bash
cd megcc/benchmark
export MEGCC_MGB_TO_TINYNN_PATH=<your_mgb_to_tinynn_path>
python3 python/example.py
```
if you want to run in other platform, please reference the example add your new run_platform_xxx function in BenchmarkRunner,
the example given a ssh remote device test template

## analysis megcc log

the `output` directory is generated by `example.py`

### visualize the inference result of different model
```bash
python3 benchmark/tools/inference_visual.py benchmark/output -o figure_dir
```

### visualize the profile result of different kernel in different model
```bash
python3 benchmark/tools/cc_analysis.py benchmark/output -o figure_dir
```
3 changes: 3 additions & 0 deletions benchmark/clean.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# /bin/bash -e
set -x
rm -rf ./build* ./output ./config ./model/benchmark* ./model/generate*
64 changes: 64 additions & 0 deletions benchmark/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/**
* \file benchmark/main.cpp
*
* This file is part of MegCC, a deep learning compiler developed by Megvii.
*
* \copyright Copyright (c) 2021-2022 Megvii Inc. All rights reserved.
*/
#include <cmath>
#include <cstdio>
#include <memory>
#include "src/CCbenchmark.h"
#include "src/MGEbenchmark.h"

using namespace megcc;
using namespace Benchmark;
int main(int argc, char** argv) {
if (argc < 2 && argc > 4) {
fprintf(stderr, "cmdline error, please run with:\n");
fprintf(stderr, "benchmarker <input_model> [options] ... \n");
fprintf(stderr,
"tips:\n\t you can use --profile and --mge to profile model "
"and enable megengine framework (\"megcc\" is default)\n");
return -1;
}
int log_level = 3;
std::string framework = "megcc";
std::string model_path = argv[1];
int idx = 2;
while (idx < argc) {
std::string args = argv[idx];
if (args == "--profile") {
log_level = 0;
} else if (args == "--mge") {
framework = "mge";
} else {
fprintf(stderr, "invalid option: %s\n", argv[idx]);
}
++idx;
}
std::vector<std::shared_ptr<Benchmarker>> benchmarkers;
if (framework == "megcc") {
benchmarkers.push_back(
std::make_shared<CCBenchmarker>(model_path, log_level));
}
#if ENABLE_MEGENGINE_FRAMEWORK
else if (framework == "mge") {
benchmarkers.push_back(
std::make_shared<MGEBenchmarker>(model_path, log_level));
}
#endif
else {
fprintf(stderr,
"unsupport framework: %s, megcc, mge(export "
"ENABLE_MEGENGINE_FRAMEWORK=ON) is supported\n",
framework.c_str());
}

for (size_t i = 0; i < benchmarkers.size(); ++i) {
benchmarkers[i]->load_model();
benchmarkers[i]->profile();
}

return 0;
}
47 changes: 47 additions & 0 deletions benchmark/model/model_arm.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
{
"dump_dir": "./benchmark_kernel_arm/",
"models": [
{
"model_name": "mobilenetv2",
"model_path": "./generated_models/mobilenetv2.mge",
"input_shape_str": "data=(1,3,224,224)",
"enable_nchw44": true
},
{
"model_name": "resnet18",
"model_path": "./generated_models/resnet18.mge",
"input_shape_str": "data=(1,3,224,224)",
"enable_nchw44": true
},
{
"model_name": "resnet50",
"model_path": "./generated_models/resnet50.mge",
"input_shape_str": "data=(1,3,224,224)",
"enable_nchw44": true
},
{
"model_name": "efficientnetb0",
"model_path": "./generated_models/efficientnetb0.mge",
"input_shape_str": "data=(1,3,256,256)",
"enable_nchw44": true
},
{
"model_name": "shufflenetv2",
"model_path": "./generated_models/shufflenetv2.mge",
"input_shape_str": "data=(1,3,224,224)",
"enable_nchw44": true
},
{
"model_name": "vgg11",
"model_path": "./generated_models/vgg11.mge",
"input_shape_str": "data=(1,3,224,224)",
"enable_nchw44": true
},
{
"model_name": "vgg16",
"model_path": "./generated_models/vgg16.mge",
"input_shape_str": "data=(1,3,224,224)",
"enable_nchw44": true
}
]
}
40 changes: 40 additions & 0 deletions benchmark/model/model_riscv.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
{
"dump_dir": "./benchmark_kernel_riscv/",
"models": [
{
"model_name": "mobilenetv2",
"model_path": "./generated_models/mobilenetv2.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "resnet18",
"model_path": "./generated_models/resnet18.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "resnet50",
"model_path": "./generated_models/resnet50.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "efficientnetb0",
"model_path": "./generated_models/efficientnetb0.mge",
"input_shape_str": "data=(1,3,256,256)"
},
{
"model_name": "shufflenetv2",
"model_path": "./generated_models/shufflenetv2.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "vgg11",
"model_path": "./generated_models/vgg11.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "vgg16",
"model_path": "./generated_models/vgg16.mge",
"input_shape_str": "data=(1,3,224,224)"
}
]
}
40 changes: 40 additions & 0 deletions benchmark/model/model_x86.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
{
"dump_dir": "./benchmark_kernel_x86/",
"models": [
{
"model_name": "mobilenetv2",
"model_path": "./generated_models/mobilenetv2.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "resnet18",
"model_path": "./generated_models/resnet18.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "resnet50",
"model_path": "./generated_models/resnet50.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "efficientnetb0",
"model_path": "./generated_models/efficientnetb0.mge",
"input_shape_str": "data=(1,3,256,256)"
},
{
"model_name": "shufflenetv2",
"model_path": "./generated_models/shufflenetv2.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "vgg11",
"model_path": "./generated_models/vgg11.mge",
"input_shape_str": "data=(1,3,224,224)"
},
{
"model_name": "vgg16",
"model_path": "./generated_models/vgg16.mge",
"input_shape_str": "data=(1,3,224,224)"
}
]
}
Loading

0 comments on commit 987537b

Please sign in to comment.