diff --git a/csharp/OnnxRuntime.CSharp.proj b/csharp/OnnxRuntime.CSharp.proj index 5e43756ced7b1..ae9b3750ec893 100644 --- a/csharp/OnnxRuntime.CSharp.proj +++ b/csharp/OnnxRuntime.CSharp.proj @@ -20,6 +20,8 @@ CMake creates a target to this project true true None + false + NativeNuget.nuspec true @@ -45,6 +47,13 @@ CMake creates a target to this project python + + + + $([System.DateTime]::UtcNow.ToString(yyyyMMdd)) + $([System.DateTime]::UtcNow.ToString(hhmm)) + + @@ -58,7 +67,7 @@ CMake creates a target to this project - + @@ -81,8 +90,6 @@ CMake creates a target to this project - $([System.DateTime]::UtcNow.ToString(yyyyMMdd)) - $([System.DateTime]::UtcNow.ToString(hhmm)) @(MajorVersionNumber) $(PackageVersion)$(ReleaseVersionSuffix) $(PackageVersion) @@ -91,23 +98,10 @@ CMake creates a target to this project - - - - - - - - - - - - - + + + @@ -116,7 +110,7 @@ CMake creates a target to this project @@ -144,6 +138,34 @@ CMake creates a target to this project /> + + + + + + + + + + + + + + + + + + + diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/targets/netstandard/props.xml b/csharp/src/Microsoft.ML.OnnxRuntime/targets/netstandard/props.xml index 9376d98c040a9..efe5c659f250a 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/targets/netstandard/props.xml +++ b/csharp/src/Microsoft.ML.OnnxRuntime/targets/netstandard/props.xml @@ -37,7 +37,7 @@ x86 arm64 - arm + arm $(Platform) @@ -55,7 +55,8 @@ + Condition="('$(PlatformTarget)' == 'x64' OR ('$(PlatformTarget)' == 'AnyCPU' AND '$(Prefer32Bit)' != 'true')) AND + Exists('$(MSBuildThisFileDirectory)..\..\runtimes\win-x64\native\onnxruntime.dll')"> onnxruntime.dll PreserveNewest false @@ -146,8 +147,8 @@ PreserveNewest false - - + + onnxruntime.dll diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.bat b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.bat index 07128fe1620ab..c16f12dc17f79 100755 --- a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.bat +++ b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.bat @@ -52,9 +52,12 @@ IF NOT errorlevel 0 ( %dn% list test\Microsoft.ML.OnnxRuntime.EndToEndTests\Microsoft.ML.OnnxRuntime.EndToEndTests.csproj package dir test\Microsoft.ML.OnnxRuntime.EndToEndTests\packages\ -IF "%PACKAGENAME%"=="Microsoft.ML.OnnxRuntime.Gpu" ( +set gpu_package=F +IF "%PACKAGENAME%"=="Microsoft.ML.OnnxRuntime.Gpu" set gpu_package=T +IF "%PACKAGENAME%"=="Microsoft.ML.OnnxRuntime.Gpu.Windows" set gpu_package=T +IF %%gpu_package%%=="T" ( set TESTONGPU=ON - %dn% test -p:DefineConstants=USE_TENSORRT test\Microsoft.ML.OnnxRuntime.EndToEndTests\Microsoft.ML.OnnxRuntime.EndToEndTests.csproj --no-restore --filter TensorRT + %dn% test -p:DefineConstants=USE_TENSORRT test\Microsoft.ML.OnnxRuntime.EndToEndTests\Microsoft.ML.OnnxRuntime.EndToEndTests.csproj --no-restore --filter TensorRT IF NOT errorlevel 0 ( @echo "Failed to build or execute the end-to-end test" diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh index 39f0ff1c2f85e..a500e4bce8fbf 100755 --- a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh +++ b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh @@ -32,7 +32,7 @@ if [ $RunTestCsharp = "true" ]; then exit 1 fi - if [ $PACKAGENAME = "Microsoft.ML.OnnxRuntime.Gpu" ]; then + if [ $PACKAGENAME = "Microsoft.ML.OnnxRuntime.Gpu" ] || [ $PACKAGENAME = "Microsoft.ML.OnnxRuntime.Gpu.Linux" ]; then export TESTONGPU=ON dotnet test -p:DefineConstants=USE_CUDA $BUILD_SOURCESDIRECTORY/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/Microsoft.ML.OnnxRuntime.EndToEndTests.csproj --no-restore --verbosity detailed if [ $? -ne 0 ]; then diff --git a/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml b/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml index 50ca6908520a9..badee79fd78b3 100644 --- a/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml +++ b/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml @@ -708,39 +708,52 @@ stages: variables: breakCodesignValidationInjection: ${{ parameters.DoEsrp }} ReleaseVersionSuffix: $[stageDependencies.Setup.Set_Variables.outputs['Set_Release_Version_Suffix.ReleaseVersionSuffix']] + BuildDate: $[format('{0:yyyyMMdd}', pipeline.startTime)] + BuildTime: $[format('{0:HHmm}', pipeline.startTime)] steps: - checkout: self submodules: true - - task: DownloadPipelineArtifact@2 - displayName: 'Download Pipeline Artifact - NuGet' - inputs: - artifactName: 'onnxruntime-win-x64-cuda' - targetPath: '$(Build.BinariesDirectory)/nuget-artifact' - - task: DownloadPipelineArtifact@2 - displayName: 'Download Pipeline Artifact - NuGet' - inputs: - artifactName: 'onnxruntime-win-x64-tensorrt' - targetPath: '$(Build.BinariesDirectory)/nuget-artifact' + - template: templates/flex-downloadPipelineArtifact.yml + parameters: + StepName: 'Download Pipeline Artifact - NuGet' + ArtifactName: 'onnxruntime-win-x64-cuda' + TargetPath: '$(Build.BinariesDirectory)/nuget-artifact' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} - - task: DownloadPipelineArtifact@2 - displayName: 'Download Pipeline Artifact - NuGet' - inputs: - artifactName: 'onnxruntime-linux-x64-cuda' - targetPath: '$(Build.BinariesDirectory)/nuget-artifact' + - template: templates/flex-downloadPipelineArtifact.yml + parameters: + StepName: 'Download Pipeline Artifact - NuGet' + ArtifactName: 'onnxruntime-win-x64-tensorrt' + TargetPath: '$(Build.BinariesDirectory)/nuget-artifact' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} - - task: DownloadPipelineArtifact@2 - displayName: 'Download Pipeline Artifact - NuGet' - inputs: - artifactName: 'onnxruntime-linux-x64-tensorrt' - targetPath: '$(Build.BinariesDirectory)/nuget-artifact' + - template: templates/flex-downloadPipelineArtifact.yml + parameters: + StepName: 'Download Pipeline Artifact - NuGet' + ArtifactName: 'onnxruntime-linux-x64-cuda' + TargetPath: '$(Build.BinariesDirectory)/nuget-artifact' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} - - task: DownloadPipelineArtifact@2 - displayName: 'Download Pipeline Artifact - NuGet' - inputs: - artifactName: 'drop-extra' - targetPath: '$(Build.BinariesDirectory)/extra-artifact' + - template: templates/flex-downloadPipelineArtifact.yml + parameters: + StepName: 'Download Pipeline Artifact - NuGet' + ArtifactName: 'onnxruntime-linux-x64-tensorrt' + TargetPath: '$(Build.BinariesDirectory)/nuget-artifact' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} + + - template: templates/flex-downloadPipelineArtifact.yml + parameters: + StepName: 'Download Pipeline Artifact - NuGet' + ArtifactName: 'drop-extra' + TargetPath: '$(Build.BinariesDirectory)/extra-artifact' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} # Reconstruct the build dir - task: PowerShell@2 @@ -797,12 +810,13 @@ stages: DoEsrp: ${{ parameters.DoEsrp }} - task: MSBuild@1 - displayName: 'Build Nuget Packages' + displayName: 'Build Nuget Packages Microsoft.ML.OnnxRuntime.Gpu' inputs: solution: '$(Build.SourcesDirectory)\csharp\OnnxRuntime.CSharp.proj' configuration: RelWithDebInfo platform: 'Any CPU' - msbuildArguments: '-t:CreatePackage -p:OnnxRuntimeBuildDirectory="$(Build.BinariesDirectory)" -p:OrtPackageId=Microsoft.ML.OnnxRuntime.Gpu -p:IsReleaseBuild=${{ parameters.IsReleaseBuild }} -p:ReleaseVersionSuffix=$(ReleaseVersionSuffix)' + msbuildArguments: '-t:CreatePackage -p:OnnxRuntimeBuildDirectory="$(Build.BinariesDirectory)" -p:OrtPackageId=Microsoft.ML.OnnxRuntime.Gpu -p:IsReleaseBuild=${{ parameters.IsReleaseBuild }} + -p:ReleaseVersionSuffix=$(ReleaseVersionSuffix) -p:CurrentDate=$(BuildDate) -p:CurrentTime=$(BuildTime)' workingDirectory: '$(Build.SourcesDirectory)\csharp' - task: BatchScript@1 @@ -842,8 +856,25 @@ stages: parameters: PackageType: 'nuget' PackagePath: '$(Build.ArtifactStagingDirectory)' - PackageName: 'Microsoft.ML.OnnxRuntime.*nupkg' PlatformsSupported: 'win-x64,linux-x64' + # 1* stands for version number. we use it to fileter Gpu.Windows and Gpu.Linux packages + PackageName: 'Microsoft.ML.OnnxRuntime.Gpu.1*nupkg' + VerifyNugetSigning: false + + - template: templates/validate-package.yml + parameters: + PackageType: 'nuget' + PackagePath: '$(Build.ArtifactStagingDirectory)' + PackageName: 'Microsoft.ML.OnnxRuntime.Gpu.Windows.*nupkg' + PlatformsSupported: 'win-x64' + VerifyNugetSigning: false + + - template: templates/validate-package.yml + parameters: + PackageType: 'nuget' + PackagePath: '$(Build.ArtifactStagingDirectory)' + PackageName: 'Microsoft.ML.OnnxRuntime.Gpu.Linux.*nupkg' + PlatformsSupported: 'linux-x64' VerifyNugetSigning: false - task: PublishPipelineArtifact@0 @@ -1104,6 +1135,19 @@ stages: ArtifactSuffix: 'GPU' StageSuffix: 'GPU' Skipx86Tests: 'true' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} + +- template: nuget/templates/test_win.yml + parameters: + AgentPool : 'onnxruntime-Win2022-GPU-T4' + NugetPackageName : 'Microsoft.ML.OnnxRuntime.Gpu.Windows' + ArtifactSuffix: 'GPU' + StageSuffix: 'GPU' + MoreSuffix: '_Windows' + Skipx86Tests: 'true' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} - template: nuget/templates/test_linux.yml parameters: @@ -1114,6 +1158,16 @@ stages: SpecificArtifact: ${{ parameters.specificArtifact }} BuildId: ${{ parameters.BuildId }} +- template: nuget/templates/test_linux.yml + parameters: + AgentPool : Onnxruntime-Linux-GPU + ArtifactSuffix: 'GPU' + StageSuffix: 'GPU' + MoreSuffix: '_Linux' + NugetPackageName : 'Microsoft.ML.OnnxRuntime.Gpu.Linux' + SpecificArtifact: ${{ parameters.specificArtifact }} + BuildId: ${{ parameters.BuildId }} + - template: nuget/templates/test_linux.yml parameters: AgentPool: AMD-GPU @@ -1321,4 +1375,4 @@ stages: displayName: 'Publish Pipeline NuGet Artifact' inputs: artifactName: 'drop-signed-nuget-dml' - targetPath: '$(Build.ArtifactStagingDirectory)' \ No newline at end of file + targetPath: '$(Build.ArtifactStagingDirectory)' diff --git a/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml b/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml index 8a9592282cd46..efb936a8ded3d 100644 --- a/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml @@ -160,6 +160,8 @@ stages: StageSuffix: 'GPU' Skipx86Tests: 'true' CudaVersion: ${{ parameters.CudaVersion }} + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} ## Linux GPU Testing - template: nuget/templates/test_linux.yml parameters: diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml index 1e609b052b8d3..f735755b04bb3 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml @@ -3,13 +3,14 @@ parameters: ArtifactSuffix: '' NugetPackageName : '' StageSuffix: 'CPU' + MoreSuffix: '' NativePackagePrefix: 'onnxruntime' SpecificArtifact: false CustomOpArtifactName: 'onnxruntime-linux-x64' BuildId: '0' CudaVersion: '11.8' stages: -- stage: NuGet_Test_Linux_${{ parameters.StageSuffix }} +- stage: NuGet_Test_Linux_${{ parameters.StageSuffix }}${{ parameters.MoreSuffix }} dependsOn: - NuGet_Packaging_${{ parameters.StageSuffix }} condition: succeeded() diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/test_win.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/test_win.yml index a15c3061913f8..102a037a4a588 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/test_win.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/test_win.yml @@ -3,15 +3,19 @@ parameters: NugetPackageName : '' ArtifactSuffix: '' StageSuffix: 'CPU' + # More Suffix is used to differentiate testing for GPU and GPU-Windows/GPU-Linux packages + MoreSuffix: '' # For inference packages, the test data artifact name is drop-extra and no suffix is required. # For training packages, to differentiate the artifact name we add '-training' suffix. This needs to be passed from # the parent pipeline. TestDataArtifactSuffix: '' Skipx86Tests: 'false' CudaVersion: '' + SpecificArtifact: false + BuildId: '' stages: -- stage: NuGet_Test_Win_${{ parameters.StageSuffix }} +- stage: NuGet_Test_Win_${{ parameters.StageSuffix }}${{ parameters.MoreSuffix }} dependsOn: - NuGet_Packaging_${{ parameters.StageSuffix }} condition: succeeded() @@ -58,11 +62,13 @@ stages: arguments: 'amd64' modifyEnvironment: true - - task: DownloadPipelineArtifact@0 - displayName: 'Download Pipeline Artifact' - inputs: - artifactName: drop-signed-nuget-${{ parameters.ArtifactSuffix }} - targetPath: '$(Build.BinariesDirectory)\nuget-artifact' + - template: ../../templates/flex-downloadPipelineArtifact.yml + parameters: + StepName: 'Download Pipeline Artifact' + ArtifactName: drop-signed-nuget-${{ parameters.ArtifactSuffix }} + TargetPath: '$(Build.BinariesDirectory)\nuget-artifact' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} - template: get-nuget-package-version-as-variable.yml parameters: diff --git a/tools/ci_build/github/azure-pipelines/templates/c-api-cpu.yml b/tools/ci_build/github/azure-pipelines/templates/c-api-cpu.yml index fff75e62716f5..37b4bdc43afcd 100644 --- a/tools/ci_build/github/azure-pipelines/templates/c-api-cpu.yml +++ b/tools/ci_build/github/azure-pipelines/templates/c-api-cpu.yml @@ -779,6 +779,8 @@ stages: Skipx86Tests : false NugetPackageName : 'Microsoft.ML.OnnxRuntime' ArtifactSuffix: 'CPU' + SpecificArtifact: ${{ parameters.SpecificArtifact }} + BuildId: ${{ parameters.BuildId }} - template: ../nuget/templates/test_linux.yml parameters: diff --git a/tools/nuget/generate_nuspec_for_native_nuget.py b/tools/nuget/generate_nuspec_for_native_nuget.py index df74e7e5599a8..66248565a3e3a 100644 --- a/tools/nuget/generate_nuspec_for_native_nuget.py +++ b/tools/nuget/generate_nuspec_for_native_nuget.py @@ -39,7 +39,11 @@ def get_package_name(os, cpu_arch, ep, is_training_package): # Currently we take onnxruntime_providers_cuda from CUDA build # And onnxruntime, onnxruntime_providers_shared and # onnxruntime_providers_tensorrt from tensorrt build -def is_this_file_needed(ep, filename): +# cuda binaries are split out into the platform dependent packages Microsoft.ML.OnnxRuntime.{Linux|Windows} +# and not included in the base Microsoft.ML.OnnxRuntime.Gpu package +def is_this_file_needed(ep, filename, package_name): + if package_name == "Microsoft.ML.OnnxRuntime.Gpu": + return False return (ep != "cuda" or "cuda" in filename) and (ep != "tensorrt" or "cuda" not in filename) @@ -47,7 +51,7 @@ def is_this_file_needed(ep, filename): # ep: cuda, tensorrt, None # files_list: a list of xml string pieces to append # This function has no return value. It updates files_list directly -def generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, is_training_package): +def generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, is_training_package, package_name): for child in nuget_artifacts_dir.iterdir(): if not child.is_dir(): continue @@ -57,7 +61,11 @@ def generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, child = child / "lib" # noqa: PLW2901 for child_file in child.iterdir(): suffixes = [".dll", ".lib", ".pdb"] if include_pdbs else [".dll", ".lib"] - if child_file.suffix in suffixes and is_this_file_needed(ep, child_file.name): + if ( + child_file.suffix in suffixes + and is_this_file_needed(ep, child_file.name, package_name) + and package_name != "Microsoft.ML.OnnxRuntime.Gpu.Linux" + ): files_list.append( '' % cpu_arch ) @@ -83,7 +91,11 @@ def generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, for child_file in child.iterdir(): if not child_file.is_file(): continue - if child_file.suffix == ".so" and is_this_file_needed(ep, child_file.name): + if ( + child_file.suffix == ".so" + and is_this_file_needed(ep, child_file.name, package_name) + and package_name != "Microsoft.ML.OnnxRuntime.Gpu.Windows" + ): files_list.append( '' % cpu_arch ) @@ -128,6 +140,9 @@ def parse_arguments(): help="The selected execution provider for this build.", ) parser.add_argument("--sdk_info", required=False, default="", type=str, help="dependency SDK information.") + parser.add_argument( + "--nuspec_name", required=False, default="NativeNuget.nuspec", type=str, help="nuget spec name." + ) return parser.parse_args() @@ -161,11 +176,14 @@ def generate_description(line_list, package_name): + "See https://github.com/microsoft/onnxruntime-training-examples/tree/master/on_device_training for " + "more details." ) + elif "Microsoft.ML.OnnxRuntime.Gpu.Linux" in package_name: + description = "This package contains Linux native shared library artifacts for ONNX Runtime with CUDA." + elif "Microsoft.ML.OnnxRuntime.Gpu.Windows" in package_name: + description = "This package contains Windows native shared library artifacts for ONNX Runtime with CUDA." elif "Microsoft.ML.OnnxRuntime" in package_name: # This is a Microsoft.ML.OnnxRuntime.* package description = ( "This package contains native shared library artifacts for all supported platforms of ONNX Runtime." ) - line_list.append("" + description + "") @@ -193,6 +211,13 @@ def generate_repo_url(line_list, repo_url, commit_id): line_list.append('') +def add_common_dependencies(xml_text, package_name, version): + xml_text.append('') + if package_name == "Microsoft.ML.OnnxRuntime.Gpu": + xml_text.append('') + xml_text.append('') + + def generate_dependencies(xml_text, package_name, version): dml_dependency = '' @@ -215,21 +240,22 @@ def generate_dependencies(xml_text, package_name, version): include_dml = package_name == "Microsoft.ML.OnnxRuntime.DirectML" xml_text.append("") + # Support .Net Core xml_text.append('') - xml_text.append('') + add_common_dependencies(xml_text, package_name, version) if include_dml: xml_text.append(dml_dependency) xml_text.append("") # Support .Net Standard xml_text.append('') - xml_text.append('') + add_common_dependencies(xml_text, package_name, version) if include_dml: xml_text.append(dml_dependency) xml_text.append("") # Support .Net Framework xml_text.append('') - xml_text.append('') + add_common_dependencies(xml_text, package_name, version) if include_dml: xml_text.append(dml_dependency) xml_text.append("") @@ -324,6 +350,8 @@ def generate_files(line_list, args): ] is_mklml_package = args.package_name == "Microsoft.ML.OnnxRuntime.MKLML" is_cuda_gpu_package = args.package_name == "Microsoft.ML.OnnxRuntime.Gpu" + is_cuda_gpu_win_sub_package = args.package_name == "Microsoft.ML.OnnxRuntime.Gpu.Windows" + is_cuda_gpu_linux_sub_package = args.package_name == "Microsoft.ML.OnnxRuntime.Gpu.Linux" is_rocm_gpu_package = args.package_name == "Microsoft.ML.OnnxRuntime.ROCm" is_dml_package = args.package_name == "Microsoft.ML.OnnxRuntime.DirectML" is_windowsai_package = args.package_name == "Microsoft.AI.MachineLearning" @@ -389,24 +417,35 @@ def generate_files(line_list, args): runtimes = f'{runtimes_target}{args.target_architecture}\\{runtimes_native_folder}"' # Process headers - files_list.append( - "' - ) - files_list.append( - "' - ) - files_list.append( - "' - ) + build_dir = "buildTransitive" if "Gpu" in args.package_name else "build" + include_dir = f"{build_dir}\\native\\include" + + # Sub.Gpu packages do not include the onnxruntime headers + if args.package_name != "Microsoft.ML.OnnxRuntime.Gpu": + files_list.append( + "' + ) + files_list.append( + "' + ) + files_list.append( + "' + ) if is_training_package: files_list.append( @@ -531,37 +570,42 @@ def generate_files(line_list, args): if nuget_artifacts_dir.exists(): # Code path for ADO build pipeline, the files under 'nuget-artifacts' are # downloaded from other build jobs - if is_cuda_gpu_package: + if is_cuda_gpu_package or is_cuda_gpu_win_sub_package or is_cuda_gpu_linux_sub_package: ep_list = ["tensorrt", "cuda", None] elif is_rocm_gpu_package: ep_list = ["rocm", None] else: ep_list = [None] for ep in ep_list: - generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, is_training_package) + generate_file_list_for_ep( + nuget_artifacts_dir, ep, files_list, include_pdbs, is_training_package, args.package_name + ) is_ado_packaging_build = True else: # Code path for local dev build - files_list.append( - "" - ) - files_list.append( - "" - ) - if include_pdbs and os.path.exists(os.path.join(args.native_build_path, "onnxruntime.pdb")): + # for local dev build, gpu linux package is also generated for compatibility though it is not used + if not is_cuda_gpu_linux_sub_package: files_list.append( - "" + "" ) - + files_list.append( + "" + ) + if include_pdbs and os.path.exists(os.path.join(args.native_build_path, "onnxruntime.pdb")): + files_list.append( + "" + ) else: - files_list.append( - "' - ) + ort_so = os.path.join(args.native_build_path, "libonnxruntime.so") + if os.path.exists(ort_so): + files_list.append( + "' + ) if includes_winml: # Process microsoft.ai.machinelearning import lib, dll, and pdb @@ -732,7 +776,7 @@ def generate_files(line_list, args): + '\\native" />' ) - if args.execution_provider == "cuda" or is_cuda_gpu_package and not is_ado_packaging_build: + if args.execution_provider == "cuda" or is_cuda_gpu_win_sub_package and not is_ado_packaging_build: files_list.append( "') # Process native targets files_list.append("') @@ -857,6 +901,8 @@ def generate_files(line_list, args): if ( is_cpu_package or is_cuda_gpu_package + or is_cuda_gpu_linux_sub_package + or is_cuda_gpu_win_sub_package or is_rocm_gpu_package or is_dml_package or is_mklml_package @@ -877,9 +923,10 @@ def generate_files(line_list, args): args.package_name + ".props", ) os.system(copy_command + " " + source_props + " " + target_props) - files_list.append("') + files_list.append("') if not is_snpe_package and not is_qnn_package: - files_list.append("') + files_list.append("') + files_list.append("') # Process targets file source_targets = os.path.join( @@ -895,9 +942,10 @@ def generate_files(line_list, args): args.package_name + ".targets", ) os.system(copy_command + " " + source_targets + " " + target_targets) - files_list.append("') + files_list.append("') if not is_snpe_package and not is_qnn_package: - files_list.append("') + files_list.append("') + files_list.append("') # Process xamarin targets files if args.package_name == "Microsoft.ML.OnnxRuntime": @@ -1148,7 +1196,8 @@ def main(): lines = generate_nuspec(args) # Create the nuspec needed to generate the Nuget - with open(os.path.join(args.native_build_path, "NativeNuget.nuspec"), "w") as f: + print(f"nuspec_name: {args.nuspec_name}") + with open(os.path.join(args.native_build_path, args.nuspec_name), "w") as f: for line in lines: # Uncomment the printing of the line if you need to debug what's produced on a CI machine # print(line) diff --git a/tools/nuget/validate_package.py b/tools/nuget/validate_package.py index daa96cc72e989..1079b64c9a90f 100644 --- a/tools/nuget/validate_package.py +++ b/tools/nuget/validate_package.py @@ -102,6 +102,7 @@ def check_if_dlls_are_present( platforms_supported, zip_file, package_path, + is_gpu_dependent_package=False, # only used for nuget packages ): platforms = platforms_supported.strip().split(",") if package_type == "tarball": @@ -110,31 +111,44 @@ def check_if_dlls_are_present( file_list_in_package += [os.path.join(dirpath, file) for file in filenames] else: file_list_in_package = zip_file.namelist() - + print(file_list_in_package) + # In Nuget GPU package, onnxruntime.dll is in dependent package. + package_contains_library = not bool(package_type == "nuget" and is_gpu_package) + # In Nuget GPU package, gpu header files are not in dependent package. + package_contains_headers = bool( + (is_gpu_package and package_type != "nuget") or (package_type == "nuget" and not is_gpu_package) + ) + # In Nuget GPU package, cuda ep and tensorrt ep dlls are in dependent package + package_contains_cuda_binaries = bool((is_gpu_package and package_type != "nuget") or is_gpu_dependent_package) for platform in platforms: if platform.startswith("win"): native_folder = "_native" if is_windows_ai_package else "native" if package_type == "nuget": folder = "runtimes/" + platform + "/" + native_folder - header_folder = "build/native/include" + build_dir = "buildTransitive" if is_gpu_dependent_package else "build" + header_folder = f"{build_dir}/native/include" else: # zip package folder = package_path + "/lib" header_folder = package_path + "/include" - path = folder + "/" + "onnxruntime.dll" - print("Checking path: " + path) - if path not in file_list_in_package: - print("onnxruntime.dll not found for " + platform) - raise Exception("onnxruntime.dll not found for " + platform) + # In Nuget GPU package, onnxruntime.dll is in dependent package. + if package_contains_library: + path = folder + "/" + "onnxruntime.dll" + print("Checking path: " + path) + if path not in file_list_in_package: + print("onnxruntime.dll not found for " + platform) + raise Exception("onnxruntime.dll not found for " + platform) - if is_gpu_package: + if package_contains_cuda_binaries: for dll in win_gpu_package_libraries: path = folder + "/" + dll print("Checking path: " + path) if path not in file_list_in_package: print(dll + " not found for " + platform) raise Exception(dll + " not found for " + platform) + + if package_contains_headers: check_if_headers_are_present(gpu_related_header_files, header_folder, file_list_in_package, platform) if is_dml_package: @@ -148,24 +162,28 @@ def check_if_dlls_are_present( elif platform.startswith("linux"): if package_type == "nuget": folder = "runtimes/" + platform + "/native" - header_folder = "build/native/include" + build_dir = "buildTransitive" if is_gpu_dependent_package else "build" + header_folder = f"{build_dir}/native/include" else: # tarball package folder = package_path + "/lib" header_folder = package_path + "/include" - path = folder + "/" + "libonnxruntime.so" - print("Checking path: " + path) - if path not in file_list_in_package: - print("libonnxruntime.so not found for " + platform) - raise Exception("libonnxruntime.so not found for " + platform) + if package_contains_library: + path = folder + "/" + "libonnxruntime.so" + print("Checking path: " + path) + if path not in file_list_in_package: + print("libonnxruntime.so not found for " + platform) + raise Exception("libonnxruntime.so not found for " + platform) - if is_gpu_package: + if package_contains_cuda_binaries: for so in linux_gpu_package_libraries: path = folder + "/" + so print("Checking path: " + path) if path not in file_list_in_package: print(so + " not found for " + platform) raise Exception(so + " not found for " + platform) + + if package_contains_headers: for header in gpu_related_header_files: path = header_folder + "/" + header print("Checking path: " + path) @@ -275,11 +293,11 @@ def validate_nuget(args): nuget_file_name = nuget_packages_found_in_path[0] full_nuget_path = os.path.join(args.package_path, nuget_file_name) - if "Gpu" in nuget_file_name: - is_gpu_package = True - else: - is_gpu_package = False - + is_gpu_package = bool("microsoft.ml.onnxruntime.gpu.1" in args.package_name.lower()) + is_gpu_dependent_package = bool( + "microsoft.ml.onnxruntime.gpu.windows" in args.package_name.lower() + or "microsoft.ml.onnxruntime.gpu.linux" in args.package_name.lower() + ) if "directml" in nuget_file_name.lower(): is_dml_package = True else: @@ -325,6 +343,7 @@ def validate_nuget(args): args.platforms_supported, zip_file, None, + is_gpu_dependent_package, ) verify_nuget_signing = args.verify_nuget_signing.lower()