Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin' into prathikrao/concat-training…
Browse files Browse the repository at this point in the history
…-bfloat16-2
  • Loading branch information
Prathik Rao committed Nov 6, 2023
2 parents 5b77d98 + 3b63d85 commit 940ae33
Show file tree
Hide file tree
Showing 10 changed files with 30 additions and 11 deletions.
2 changes: 1 addition & 1 deletion cmake/deps.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ abseil_cpp;https://github.com/abseil/abseil-cpp/archive/refs/tags/20230802.0.zip
cxxopts;https://github.com/jarro2783/cxxopts/archive/3c73d91c0b04e2b59462f0a741be8c07024c1bc0.zip;6c6ca7f8480b26c8d00476e0e24b7184717fe4f0
date;https://github.com/HowardHinnant/date/archive/refs/tags/v3.0.1.zip;2dac0c81dc54ebdd8f8d073a75c053b04b56e159
dlpack;https://github.com/dmlc/dlpack/archive/refs/tags/v0.6.zip;4d565dd2e5b31321e5549591d78aa7f377173445
eigen;https://gitlab.com/libeigen/eigen/-/archive/3.4/eigen-3.4.zip;ee201b07085203ea7bd8eb97cbcb31b07cfa3efb
eigen;https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.zip;ef24286b7ece8737c99fa831b02941843546c081
flatbuffers;https://github.com/google/flatbuffers/archive/refs/tags/v1.12.0.zip;ba0a75fd12dbef8f6557a74e611b7a3d0c5fe7bf
fp16;https://github.com/Maratyszcza/FP16/archive/0a92994d729ff76a58f692d3028ca1b64b145d91.zip;b985f6985a05a1c03ff1bb71190f66d8f98a1494
fxdiv;https://github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip;a5658f4036402dbca7cebee32be57fb8149811e1
Expand Down
4 changes: 3 additions & 1 deletion cmake/onnxruntime_providers_tensorrt.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,14 @@
URL ${DEP_URL_onnx_tensorrt}
URL_HASH SHA1=${DEP_SHA1_onnx_tensorrt}
)
if (NOT CUDA_INCLUDE_DIR)
set(CUDA_INCLUDE_DIR ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) # onnx-tensorrt repo needs this variable to build
endif()
# The onnx_tensorrt repo contains a test program, getSupportedAPITest, which doesn't support Windows. It uses
# unistd.h. So we must exclude it from our build. onnxruntime_fetchcontent_makeavailable is for the purpose.
onnxruntime_fetchcontent_makeavailable(onnx_tensorrt)
include_directories(${onnx_tensorrt_SOURCE_DIR})
set(CMAKE_CXX_FLAGS ${OLD_CMAKE_CXX_FLAGS})
set(CUDA_INCLUDE_DIR ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) # onnx-tensorrt repo needs this variable to build
if ( CMAKE_COMPILER_IS_GNUCC )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
endif()
Expand Down
6 changes: 6 additions & 0 deletions onnxruntime/core/providers/vitisai/imp/tensor_proto.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,12 @@ gsl::span<const char> tensor_proto_as_raw(
std::vector<uint8_t> unpacked_tensor;
auto s = onnxruntime::utils::UnpackInitializerData(tensor, onnxruntime::Path(), unpacked_tensor);
mut_tensor.mutable_raw_data()->resize(unpacked_tensor.size());
mut_tensor.clear_float_data();
mut_tensor.clear_int32_data();
mut_tensor.clear_string_data();
mut_tensor.clear_int64_data();
mut_tensor.clear_double_data();
mut_tensor.clear_uint64_data();
memcpy(mut_tensor.mutable_raw_data()->data(), unpacked_tensor.data(), unpacked_tensor.size());
}
return gsl::span<const char>(tensor.raw_data().data(), tensor.raw_data().size());
Expand Down
6 changes: 6 additions & 0 deletions onnxruntime/test/framework/function_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,13 @@ TEST(FunctionTest, TestInlinedLocalFunctionNotRemoved) {

// myfun is not removed because it was claimed by InternalTestingEP
model_proto = session_object.GetModel().ToProto();
#ifdef USE_TVM
// TVM EP takes the whole graph and optimizes it within its own framework.
// It does not retain the original graph.
ASSERT_EQ(0, model_proto.functions_size());
#else
ASSERT_EQ(1, model_proto.functions_size());
#endif
}

} // namespace test
Expand Down
5 changes: 5 additions & 0 deletions tools/ci_build/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -1839,6 +1839,11 @@ def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
# For CUDA or DML enabled builds test IOBinding feature
if args.use_cuda or args.use_dml:
log.info("Testing IOBinding feature")
if args.use_dml:
run_subprocess(
[sys.executable, "-m", "pip", "uninstall", "--yes", "onnx"], cwd=cwd, dll_path=dll_path
)
run_subprocess([sys.executable, "-m", "pip", "install", "-q", "onnx"], cwd=cwd, dll_path=dll_path)
run_subprocess([sys.executable, "onnxruntime_test_python_iobinding.py"], cwd=cwd, dll_path=dll_path)

if args.use_cuda:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ steps:
packageType: upack
feed: '/7424c8e4-5c62-490e-95c4-79446f31017c'
definition: '517c4f6f-5437-4392-a70d-4f15ec5be2f0'
version: 1.0.107
version: 1.0.110
downloadPath: $(Build.BinariesDirectory)/deps

# The private ADO project
Expand All @@ -22,7 +22,7 @@ steps:
packageType: upack
feed: '/4c7631f5-24c0-4307-8822-1aa8f180c325'
definition: 'fd9dd5ad-b73e-4678-890e-edcf680dbc1a'
version: 1.0.107
version: 1.0.110
downloadPath: $(Build.BinariesDirectory)/deps

# You can add more ADO accounts at here.
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ mypy
pytest
setuptools>=68.2.2
wheel
git+http://github.com/onnx/onnx.git@b86cc54efce19530fb953e4b21f57e6b3888534c#egg=onnx
protobuf==3.20.2
onnx==1.15.0
protobuf==4.21.12
sympy==1.12
flatbuffers
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ mypy
pytest
setuptools>=68.2.2
wheel
git+http://github.com/onnx/onnx.git@b86cc54efce19530fb953e4b21f57e6b3888534c#egg=onnx
protobuf==3.20.2
onnx==1.15.0
protobuf==4.21.12
sympy==1.12
flatbuffers
neural-compressor>=2.2.1
4 changes: 2 additions & 2 deletions tools/ci_build/github/linux/docker/scripts/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ mypy
pytest
setuptools>=68.2.2
wheel>=0.35.1
git+http://github.com/onnx/onnx.git@b86cc54efce19530fb953e4b21f57e6b3888534c#egg=onnx
onnx==1.15.0
argparse
sympy==1.12
flatbuffers
protobuf==3.20.2
protobuf==4.21.12
packaging
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ numpy==1.21.6 ; python_version < '3.11'
numpy==1.24.2 ; python_version >= '3.11'
transformers==v4.16.1
rsa==4.9
tensorboard>=2.2.0,<2.5.0
tensorboard==2.13.0
h5py
wget
pytorch-lightning
Expand Down

0 comments on commit 940ae33

Please sign in to comment.