From 622c6be6112f062456553c15acc70673547d58f9 Mon Sep 17 00:00:00 2001
From: Sophie Schoenmeyer <107952697+sophies927@users.noreply.github.com>
Date: Thu, 7 Nov 2024 16:43:55 -0800
Subject: [PATCH] Update ORT website w/ 1.20.0 + 1.20.1 changes (#22768)
### Description
The Getting Started support matrix and ORT Release Roadmap have changed
since the ORT 1.20.0 release.
Updated w/ info about new pkgs + CUDA default version and added 1.20.1
patch release plans.
### Motivation and Context
---
src/routes/getting-started/table.svelte | 75 ++++++----
src/routes/roadmap/+page.svelte | 187 +++++++++---------------
2 files changed, 122 insertions(+), 140 deletions(-)
diff --git a/src/routes/getting-started/table.svelte b/src/routes/getting-started/table.svelte
index e3cdd46ccc2e1..6f93326de619f 100644
--- a/src/routes/getting-started/table.svelte
+++ b/src/routes/getting-started/table.svelte
@@ -77,16 +77,16 @@
const TrainingBuildIds = ['ot_stable', 'ot_nightly'];
const validCombos = {
'windows,C-API,X64,CUDA':
- "For CUDA 11.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu. We are planning to start releasing the following packages:
We are planning to upgrade ONNX Runtime support for the following (where the first value is the
highest version previously supported and the second value is the version support that will be
- added in ORT 1.20):
+ added in ORT 1.20.1):
- In addition to various bug fixes and performance improvements, ORT 1.20 will include the
- following major updates:
+ In addition to various bug fixes and performance improvements, ORT 1.20.1 will include the
+ following updates:
For CUDA 12.X: Follow instructions here.
Refer to docs for requirements.",
+ "For CUDA 12.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Windows.
For CUDA 11.X: Follow instructions here.
Refer to docs for requirements.",
'windows,C++,X64,CUDA':
- "For CUDA 11.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.
For CUDA 12.X: Follow instructions here.
Refer to docs for requirements.",
+ "For CUDA 12.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Windows.
For CUDA 11.X: Follow instructions here.
Refer to docs for requirements.",
'windows,C#,X64,CUDA':
- "For CUDA 11.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.
For CUDA 12.X: Follow instructions here.
Refer to docs for requirements.",
+ "For CUDA 12.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Windows.
For CUDA 11.X: Follow instructions here.
Refer to docs for requirements.",
'windows,Python,X64,CUDA':
- "For CUDA 11.X (default):
pip install onnxruntime-gpu
For CUDA 12.X:
pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
Refer to docs for requirements.",
+ "For CUDA 12.X (default):
pip install onnxruntime-gpu
For CUDA 11.X:
pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-11/pypi/simple/
Refer to docs for requirements.",
'linux,Python,ARM64,CUDA':
"For Jetpack 4.4+, follow installation instructions from here.
Note: We do not have CUDA 12.X ARM64 support at this time.",
@@ -95,13 +95,13 @@
"Download .tgz file from Github
Refer to docs for requirements.",
'linux,C++,X64,CUDA':
- "For CUDA 11.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.
For CUDA 12.X: Follow instructions here.
Refer to docs for requirements.",
+ "For CUDA 12.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Linux.
For CUDA 11.X: Follow instructions here.
Refer to docs for requirements.",
'linux,C#,X64,CUDA':
- "For CUDA 11.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.
For CUDA 12.X: Follow instructions here.
Refer to docs for requirements.",
+ "For CUDA 12.X (default): Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Linux.
For CUDA 11.X: Follow instructions here.
Refer to docs for requirements.",
'linux,Python,X64,CUDA':
- "For CUDA 11.X (default):
pip install onnxruntime-gpu
For CUDA 12.X:
pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
Refer to docs for requirements.",
+ "For CUDA 12.X (default):
pip install onnxruntime-gpu
For CUDA 11.X:
pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-11/pypi/simple/
Refer to docs for requirements.",
'linux,C-API,ARM32,DefaultCPU':
"Follow build instructions from here",
@@ -119,13 +119,13 @@
"Install Nuget package Microsoft.ML.OnnxRuntime",
'windows,C-API,ARM32,DefaultCPU':
- "Install Nuget package Microsoft.ML.OnnxRuntime",
+ "Follow build instructions from here",
'windows,C++,ARM32,DefaultCPU':
- "Install Nuget package Microsoft.ML.OnnxRuntime",
+ "Follow build instructions from here",
'windows,C#,ARM32,DefaultCPU':
- "Install Nuget package Microsoft.ML.OnnxRuntime",
+ "Follow build instructions from here",
'windows,C-API,ARM64,DefaultCPU':
"Install Nuget package Microsoft.ML.OnnxRuntime",
@@ -157,6 +157,12 @@
'linux,C#,X64,DefaultCPU':
"Install Nuget package Microsoft.ML.OnnxRuntime",
+ 'linux,C-API,ARM64,DefaultCPU':
+ "Download .tgz file from Github",
+
+ 'linux,C#,ARM64,DefaultCPU':
+ "Install Nuget package Microsoft.ML.OnnxRuntime",
+
'mac,C-API,X64,DefaultCPU':
"Add 'onnxruntime-c' using CocoaPods or download the .tgz file from Github.",
@@ -164,13 +170,15 @@
"Add 'onnxruntime-c' using CocoaPods or download the .tgz file from Github.",
'mac,C#,X64,DefaultCPU':
- "Download .tgz file from Github",
+ "Install Nuget package Microsoft.ML.OnnxRuntime",
'mac,C#,X64,CoreML':
- "Download .tgz file from Github",
+ "Install Nuget package Microsoft.ML.OnnxRuntime",
'windows,Python,X64,DefaultCPU': 'pip install onnxruntime',
+ 'windows,Python,X86,DefaultCPU': 'pip install onnxruntime',
+
'mac,Python,X64,DefaultCPU': 'pip install onnxruntime',
'mac,Python,X64,CoreML': 'pip install onnxruntime',
@@ -267,32 +275,29 @@
'windows,Python,X64,OpenVINO': 'pip install onnxruntime-openvino',
'windows,C-API,X64,TensorRT':
- "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Windows
Refer to docs for usage details.",
'windows,C++,X64,TensorRT':
- "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Windows
Refer to docs for usage details.",
'windows,C#,X64,TensorRT':
- "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Windows
Refer to docs for usage details.",
'windows,Python,X64,TensorRT':
"pip install onnxruntime-gpu
Refer to docs for requirements.",
'linux,C-API,X64,TensorRT':
- "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Linux
Refer to docs for usage details.",
'linux,C++,X64,TensorRT':
- "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Linux
Refer to docs for usage details.",
'linux,C#,X64,TensorRT':
- "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu.Linux
Refer to docs for usage details.",
'linux,Python,X64,TensorRT':
"pip install onnxruntime-gpu
Refer to docs for requirements.",
- 'linux,C#,ARM64,DefaultCPU':
- "Install Nuget package Microsoft.ML.OnnxRuntime",
-
'linux,Python,ARM64,TensorRT':
"pip install onnxruntime-gpu
Refer to docs for requirements.",
@@ -335,7 +340,7 @@
"Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle",
'linux,Java,X64,CUDA':
- "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for requirements.
Note: CUDA 12.X is not currently available with the Java GPU package due to low demand.",
+ "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for requirements.",
'mac,Java,X64,DefaultCPU':
"Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle",
@@ -382,7 +387,7 @@
"Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle",
'windows,Java,X64,CUDA':
- "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for requirements.
Note: CUDA 12.X is not currently available with the Java GPU package due to low demand.",
+ "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for requirements.",
'windows,Java,X64,TensorRT':
"Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for usage details.",
@@ -635,13 +640,22 @@
//QNN
'windows,C-API,ARM64,QNN':
- "View installation instructions here",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.QNN.",
'windows,C++,ARM64,QNN':
- "View installation instructions here",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.QNN.",
'windows,C#,ARM64,QNN':
- "View installation instructions here",
+ "Install Nuget package Microsoft.ML.OnnxRuntime.QNN.",
+
+ 'windows,C-API,X64,QNN':
+ "Install Nuget package Microsoft.ML.OnnxRuntime.QNN.",
+
+ 'windows,C++,X64,QNN':
+ "Install Nuget package Microsoft.ML.OnnxRuntime.QNN.",
+
+ 'windows,C#,X64,QNN':
+ "Install Nuget package Microsoft.ML.OnnxRuntime.QNN.",
'linux,C-API,ARM64,QNN':
"Follow build instructions from here",
@@ -655,6 +669,15 @@
'android,C++,ARM64,QNN':
"Follow build instructions from here",
+ 'windows,Python,ARM64,QNN':
+ "pip install onnxruntime-qnn",
+
+ 'windows,Python,X64,QNN':
+ "pip install onnxruntime-qnn",
+
+ 'android,Java,ARM64,QNN':
+ "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android-qnn using Maven/Gradle and refer to the mobile deployment guide",
+
//Xnnpack
'ios,C-API,ARM64,XNNPACK':
"Add 'onnxruntime-c' using CocoaPods and refer to the mobile deployment guide or Follow build instructions from here",
diff --git a/src/routes/roadmap/+page.svelte b/src/routes/roadmap/+page.svelte
index 117e993a05215..41feb697dab87 100644
--- a/src/routes/roadmap/+page.svelte
+++ b/src/routes/roadmap/+page.svelte
@@ -1,6 +1,6 @@
Announcements
-
- New Packages
-
-
Versioning Updates
-
Major Updates
-
Feature Requests
@@ -166,15 +150,9 @@
Note: All timelines and features listed on this page are subject to change.
- Tentative release date: 10/30/2024 -
+- Release candidate now available on GitHub here. + Tentative release date: 11/20/2024
+ No features planned for 1.20.1. Stay tuned for 1.21 features. +
+ No features planned for 1.20.1. Stay tuned for 1.21 features. +
+ No features planned for 1.20.1. Stay tuned for 1.21 features. +
++ No features planned for 1.20.1. Stay tuned for 1.21 features. +
+ No features planned for 1.20.1. Stay tuned for 1.21 features. +
+ No features planned for 1.20.1. Stay tuned for 1.21 features. +
- Full release notes for ONNX Runtime Extensions v0.13 will be found here once they are available (10/30 target). + No features planned for 1.20.1. Stay tuned for 1.21 features.
- Full release notes for Olive v0.7.0 can be found here. + No features planned for 1.20.1. Stay tuned for 1.21 features.