diff --git a/cmake/onnxruntime_python.cmake b/cmake/onnxruntime_python.cmake index 062cc8f9dbff3..07c65e7986b05 100644 --- a/cmake/onnxruntime_python.cmake +++ b/cmake/onnxruntime_python.cmake @@ -667,6 +667,15 @@ add_custom_command( $ ) +if (onnxruntime_BUILD_SHARED_LIB) + add_custom_command( + TARGET onnxruntime_pybind11_state POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy + $ + $/onnxruntime/capi/ + ) +endif() + if (onnxruntime_USE_OPENVINO) add_custom_command( TARGET onnxruntime_pybind11_state POST_BUILD diff --git a/setup.py b/setup.py index 3203993e0c4d4..5750833ce35de 100644 --- a/setup.py +++ b/setup.py @@ -297,11 +297,13 @@ def finalize_options(self): "libmklml_gnu.so", "libiomp5.so", "mimalloc.so", + "libonnxruntime.so*", ] dl_libs = ["libonnxruntime_providers_shared.so"] dl_libs.append(providers_cuda_or_rocm) dl_libs.append(providers_tensorrt_or_migraphx) dl_libs.append(providers_cann) + dl_libs.append("libonnxruntime.so*") # DNNL, TensorRT & OpenVINO EPs are built as shared libs libs.extend(["libonnxruntime_providers_shared.so"]) libs.extend(["libonnxruntime_providers_dnnl.so"]) @@ -313,7 +315,12 @@ def finalize_options(self): if nightly_build: libs.extend(["libonnxruntime_pywrapper.so"]) elif platform.system() == "Darwin": - libs = ["onnxruntime_pybind11_state.so", "libdnnl.2.dylib", "mimalloc.so"] # TODO add libmklml and libiomp5 later. + libs = [ + "onnxruntime_pybind11_state.so", + "libdnnl.2.dylib", + "mimalloc.so", + "libonnxruntime.dylib*", + ] # TODO add libmklml and libiomp5 later. # DNNL & TensorRT EPs are built as shared libs libs.extend(["libonnxruntime_providers_shared.dylib"]) libs.extend(["libonnxruntime_providers_dnnl.dylib"]) @@ -323,7 +330,13 @@ def finalize_options(self): if nightly_build: libs.extend(["libonnxruntime_pywrapper.dylib"]) else: - libs = ["onnxruntime_pybind11_state.pyd", "dnnl.dll", "mklml.dll", "libiomp5md.dll"] + libs = [ + "onnxruntime_pybind11_state.pyd", + "dnnl.dll", + "mklml.dll", + "libiomp5md.dll", + "onnxruntime.dll", + ] # DNNL, TensorRT & OpenVINO EPs are built as shared libs libs.extend(["onnxruntime_providers_shared.dll"]) libs.extend(["onnxruntime_providers_dnnl.dll"]) @@ -376,7 +389,7 @@ def finalize_options(self): dl_libs.append("plugins.xml") dl_libs.append("usb-ma2x8x.mvcmd") data = ["capi/libonnxruntime_pywrapper.so"] if nightly_build else [] - data += [path.join("capi", x) for x in dl_libs if path.isfile(path.join("onnxruntime", "capi", x))] + data += [path.join("capi", x) for x in dl_libs if glob(path.join("onnxruntime", "capi", x))] ext_modules = [ Extension( "onnxruntime.capi.onnxruntime_pybind11_state", @@ -384,7 +397,7 @@ def finalize_options(self): ), ] else: - data = [path.join("capi", x) for x in libs if path.isfile(path.join("onnxruntime", "capi", x))] + data = [path.join("capi", x) for x in libs if glob(path.join("onnxruntime", "capi", x))] ext_modules = [] # Additional examples diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index b73a17db3ce13..ae4c9b27544ba 100644 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -2593,7 +2593,16 @@ def main(): if args.build_wheel or args.gen_doc or args.use_tvm or args.enable_training: args.enable_pybind = True - if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs: + if ( + args.build_csharp + or args.build_nuget + or args.build_java + or args.build_nodejs + or (args.enable_pybind and not args.enable_training) + ): + # If pyhon bindings are enabled, we embed the shared lib in the python package. + # If training is enabled, we don't embed the shared lib in the python package since training requires + # torch interop. args.build_shared_lib = True if args.build_nuget and cross_compiling: