diff --git a/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml b/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml index 520b39c39f9c2..f66952ac73b16 100644 --- a/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml @@ -139,7 +139,5 @@ stages: CudaVersion: ${{ parameters.CudaVersion }} SpecificArtifact: ${{ parameters.SpecificArtifact }} BuildId: ${{ parameters.BuildId }} - docker_base_image: ${{ variables.docker_base_image }} - linux_trt_version: ${{ variables.linux_trt_version }} ## Win/Linux GPU Combined Publishing #- template: templates/publish-nuget.yml diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml index 0e3a6a9dcc7b7..ae257f7ce0ef9 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml @@ -7,8 +7,7 @@ parameters: SpecificArtifact: false CustomOpArtifactName: 'onnxruntime-linux-x64' BuildId: '0' - DockerBaseImage: 'nvidia/cuda:11.8.0-cudnn8-devel-ubi8' - TRT_VERSION: 8.6.1.6-1.cuda11.8 + CudaVersion: '11.8' stages: - stage: NuGet_Test_Linux_${{ parameters.StageSuffix }} dependsOn: @@ -55,13 +54,18 @@ stages: - ${{if contains(parameters.StageSuffix , 'GPU') }}: - template: ../../templates/get-docker-image-steps.yml parameters: - Dockerfile: tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_cuda11_8_tensorrt8_6 + Dockerfile: tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu Context: tools/ci_build/github/linux/docker/ - DockerBuildArgs: " - --build-arg BASEIMAGE=${{ parameters.DockerBaseImage }} - --build-arg TRT_VERSION=${{ parameters.TRT_VERSION }} - --build-arg BUILD_UID=$( id -u ) - " + ${{if contains(parameters.CudaVersion , '12.2') }}: + DockerBuildArgs: " + --build-arg BASEIMAGE=nvidia/12.2.2-cudnn8-devel-ubuntu20.04 + --build-arg TRT_VERSION=8.6.1.6-1+cuda12.0 + --build-arg BUILD_UID=$( id -u ) + " + ${{ else }}: + DockerBuildArgs: " + --build-arg BUILD_UID=$( id -u ) + " Repository: onnxruntimepackagestest - bash: | docker run --rm \ diff --git a/tools/ci_build/github/azure-pipelines/stages/cuda-testing-stage.yml b/tools/ci_build/github/azure-pipelines/stages/cuda-testing-stage.yml index 86d22a9475b6c..6b76e0c7c2592 100644 --- a/tools/ci_build/github/azure-pipelines/stages/cuda-testing-stage.yml +++ b/tools/ci_build/github/azure-pipelines/stages/cuda-testing-stage.yml @@ -2,10 +2,6 @@ parameters: - name: CudaVersion type: string default: '11.8' - - name: docker_base_image - type: string - - name: linux_trt_version - type: string # these 2 parameters are used for debugging. - name: SpecificArtifact type: boolean @@ -28,9 +24,8 @@ stages: AgentPool: Onnxruntime-Linux-GPU ArtifactSuffix: 'GPU' StageSuffix: 'GPU' - TRT_VERSION: ${{ parameters.linux_trt_version }} - DockerBaseImage: ${{ parameters.docker_base_image }} CustomOpArtifactName: onnxruntime-linux-x64-gpu NugetPackageName: 'Microsoft.ML.OnnxRuntime.Gpu' SpecificArtifact: ${{ parameters.specificArtifact }} + CudaVersion: ${{ parameters.CudaVersion }} BuildId: ${{ parameters.BuildId }} \ No newline at end of file diff --git a/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_cuda11_8_tensorrt8_6 b/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu similarity index 53% rename from tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_cuda11_8_tensorrt8_6 rename to tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu index 83a974469234f..7d96ceed3268b 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_cuda11_8_tensorrt8_6 +++ b/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu @@ -5,8 +5,10 @@ # Dockerfile to run ONNXRuntime with TensorRT integration # Build base image with required system packages -FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 AS base - +ARG BASE_IMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 +ARG TRT_VERSION=8.6.1.6-1+cuda11.8 +FROM $BASE_IMAGE AS base +ARG TRT_VERSION ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH} ENV DEBIAN_FRONTEND=noninteractive @@ -24,12 +26,11 @@ RUN apt-get install -y --no-install-recommends \ RUN pip install --upgrade pip # Install TensorRT -RUN v="8.6.1.6-1+cuda11.8" &&\ - apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub &&\ +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub &&\ apt-get update &&\ - apt-get install -y libnvinfer8=${v} libnvonnxparsers8=${v} libnvparsers8=${v} libnvinfer-plugin8=${v} libnvinfer-lean8=${v} libnvinfer-vc-plugin8=${v} libnvinfer-dispatch8=${v}\ - libnvinfer-headers-dev=${v} libnvinfer-headers-plugin-dev=${v} libnvinfer-dev=${v} libnvonnxparsers-dev=${v} libnvparsers-dev=${v} libnvinfer-plugin-dev=${v} libnvinfer-lean-dev=${v} libnvinfer-vc-plugin-dev=${v} libnvinfer-dispatch-dev=${v}\ - python3-libnvinfer=${v} libnvinfer-samples=${v} tensorrt-dev=${v} tensorrt-libs=${v} + apt-get install -y libnvinfer8=${TRT_VERSION} libnvonnxparsers8=${TRT_VERSION} libnvparsers8=${TRT_VERSION} libnvinfer-plugin8=${TRT_VERSION} libnvinfer-lean8=${TRT_VERSION} libnvinfer-vc-plugin8=${TRT_VERSION} libnvinfer-dispatch8=${TRT_VERSION}\ + libnvinfer-headers-dev=${TRT_VERSION} libnvinfer-headers-plugin-dev=${TRT_VERSION} libnvinfer-dev=${TRT_VERSION} libnvonnxparsers-dev=${TRT_VERSION} libnvparsers-dev=${TRT_VERSION} libnvinfer-plugin-dev=${TRT_VERSION} libnvinfer-lean-dev=${TRT_VERSION} libnvinfer-vc-plugin-dev=${TRT_VERSION} libnvinfer-dispatch-dev=${TRT_VERSION}\ + python3-libnvinfer=${TRT_VERSION} libnvinfer-samples=${TRT_VERSION} tensorrt-dev=${TRT_VERSION} tensorrt-libs=${TRT_VERSION} ADD scripts /tmp/scripts RUN cd /tmp/scripts && /tmp/scripts/install_dotnet.sh && rm -rf /tmp/scripts