diff --git a/onnxruntime/python/tools/tensorrt/perf/mem_test/main.cpp b/onnxruntime/python/tools/tensorrt/perf/mem_test/main.cpp index 61d5440690e8c..ec30b8ba0985d 100644 --- a/onnxruntime/python/tools/tensorrt/perf/mem_test/main.cpp +++ b/onnxruntime/python/tools/tensorrt/perf/mem_test/main.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include void run_ort_trt2() { @@ -135,7 +134,7 @@ void run_ort_trt() { session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED); - const char* model_path = "squeezenet.onnx"; + const char* model_path = "/data/ep-perf-models/onnx-zoo-models/squeezenet1.0-7/squeezenet/model.onnx"; Ort::ThrowOnError(api.CreateTensorRTProviderOptions(&tensorrt_options)); std::unique_ptr rel_trt_options( diff --git a/onnxruntime/python/tools/tensorrt/perf/mem_test/run.sh b/onnxruntime/python/tools/tensorrt/perf/mem_test/run.sh index 4bd3345ed066f..dd53fe6127462 100755 --- a/onnxruntime/python/tools/tensorrt/perf/mem_test/run.sh +++ b/onnxruntime/python/tools/tensorrt/perf/mem_test/run.sh @@ -14,9 +14,7 @@ s) ORT_SOURCE=${OPTARG};; esac done -ONNX_MODEL_TAR_URL="https://github.com/onnx/models/raw/main/archive/vision/classification/squeezenet/model/squeezenet1.0-7.tar.gz" -MODEL_TAR_NAME="squeezenet1.0-7.tar.gz" -ONNX_MODEL="squeezenet.onnx" +ONNX_MODEL="/data/ep-perf-models/onnx-zoo-models/squeezenet1.0-7/squeezenet/model.onnx" ASAN_OPTIONS="protect_shadow_gap=0:new_delete_type_mismatch=0:log_path=asan.log" export LD_LIBRARY_PATH=${ORT_BINARY_PATH} @@ -48,15 +46,11 @@ cp ../squeezenet_calibration.flatbuffers . cmake .. make -j -wget ${ONNX_MODEL_TAR_URL} -O squeezenet1.0-7.tar.gz -tar -xzf ${MODEL_TAR_NAME} --strip-components=1 -mv model.onnx ${ONNX_MODEL} -rm ${MODEL_TAR_NAME} mkdir result # Run valgrind echo $(date +"%Y-%m-%d %H:%M:%S") '[valgrind] Starting memcheck with' ${ONNX_MODEL} -valgrind --leak-check=full --show-leak-kinds=all --log-file=valgrind.log ${ORT_SOURCE}/build/Linux/Release/onnxruntime_perf_test -e tensorrt -r 1 ${ONNX_MODEL} +valgrind --leak-check=full --show-leak-kinds=definite --max-threads=3000 --num-callers=20 --keep-debuginfo=yes --log-file=valgrind.log ${ORT_SOURCE}/build/Linux/Release/onnxruntime_perf_test -e tensorrt -r 1 ${ONNX_MODEL} echo $(date +"%Y-%m-%d %H:%M:%S") '[valgrind] Analyzing valgrind log' found_leak_summary=false diff --git a/onnxruntime/python/tools/tensorrt/perf/mem_test/run_mem_test_docker.sh b/onnxruntime/python/tools/tensorrt/perf/mem_test/run_mem_test_docker.sh index f041608e24ed8..4e94c63ee6c25 100755 --- a/onnxruntime/python/tools/tensorrt/perf/mem_test/run_mem_test_docker.sh +++ b/onnxruntime/python/tools/tensorrt/perf/mem_test/run_mem_test_docker.sh @@ -24,4 +24,4 @@ then BUILD_ORT_LATEST="true" fi -docker run --rm --gpus all -v $MEM_TEST_DIR:$DOCKER_MEM_TEST_DIR $DOCKER_IMAGE /bin/bash $DOCKER_MEM_TEST_DIR'run.sh' -p $DOCKER_MEM_TEST_DIR -o $DOCKER_ORT_LIBS -s $DOCKER_ORT_SOURCE -l $BUILD_ORT_LATEST +docker run --rm --gpus all -v $MEM_TEST_DIR:$DOCKER_MEM_TEST_DIR -v /data/ep-perf-models:/data/ep-perf-models $DOCKER_IMAGE /bin/bash $DOCKER_MEM_TEST_DIR'run.sh' -p $DOCKER_MEM_TEST_DIR -o $DOCKER_ORT_LIBS -s $DOCKER_ORT_SOURCE -l $BUILD_ORT_LATEST diff --git a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml index 47061965efe26..e75bb68a8bfeb 100644 --- a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml @@ -126,6 +126,13 @@ jobs: - script: 'python3 -m pip install pandas azure-kusto-data[pandas] azure-kusto-ingest[pandas] coloredlogs' displayName: 'Install dashboard dependencies' + - script: | + az --version || { + echo "Azure CLI not found, installing..." + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + } + displayName: 'Check and Install Azure CLI' + - task: AzureCLI@2 displayName: 'Azure CLI Post to Dashboard' inputs: