diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 0000000000000..38972655faff0 --- /dev/null +++ b/.eslintignore @@ -0,0 +1,13 @@ +.DS_Store +node_modules +/build +/.svelte-kit +/package +.env +.env.* +!.env.example + +# Ignore files for PNPM, NPM and YARN +pnpm-lock.yaml +package-lock.json +yarn.lock diff --git a/.eslintrc.cjs b/.eslintrc.cjs new file mode 100644 index 0000000000000..ebc19589fa579 --- /dev/null +++ b/.eslintrc.cjs @@ -0,0 +1,30 @@ +module.exports = { + root: true, + extends: [ + 'eslint:recommended', + 'plugin:@typescript-eslint/recommended', + 'plugin:svelte/recommended', + 'prettier' + ], + parser: '@typescript-eslint/parser', + plugins: ['@typescript-eslint'], + parserOptions: { + sourceType: 'module', + ecmaVersion: 2020, + extraFileExtensions: ['.svelte'] + }, + env: { + browser: true, + es2017: true, + node: true + }, + overrides: [ + { + files: ['*.svelte'], + parser: 'svelte-eslint-parser', + parserOptions: { + parser: '@typescript-eslint/parser' + } + } + ] +}; diff --git a/.github/workflows/check-website-links.yml b/.github/workflows/check-website-links.yml index 5c102d35fe410..bdff6b7c5f723 100644 --- a/.github/workflows/check-website-links.yml +++ b/.github/workflows/check-website-links.yml @@ -7,7 +7,7 @@ on: branches: - gh-pages workflow_dispatch: - + jobs: checklinks: name: Check website links @@ -21,6 +21,22 @@ jobs: bundler-cache: true - name: Build jekyll website with drafts run: bundle exec jekyll build --drafts + + - name: Use Node.js + uses: actions/setup-node@v3 + with: + node-version: 19.x + + - name: Install dependencies + run: npm install + + - name: Generate build + run: npm run build + + - name: Move site into place + run: | + sudo mv ./build/* ./_site + - name: Check for broken links run: | - bundle exec htmlproofer --assume_extension --checks_to_ignore ImageCheck,ScriptCheck --only_4xx --http_status_ignore 429,403 --allow_hash_href --url_ignore "https://onnxruntime.ai/docs/reference/api/c-api.html,https://www.onnxruntime.ai/docs/reference/execution-providers/TensorRT-ExecutionProvider.html#c-api-example,https://www.onnxruntime.ai/docs/resources/graph-optimizations.html,onnxruntime/capi/onnxruntime_pybind11_state.html,https://github.com/microsoft/onnx-converters-private/issues/new/choose" --log-level :info ./_site + bundle exec htmlproofer --assume_extension --checks_to_ignore ImageCheck,ScriptCheck --only_4xx --http_status_ignore 429,403 --allow_hash_href --url_ignore "https://onnxruntime.ai/docs/reference/api/c-api.html,https://www.onnxruntime.ai/docs/reference/execution-providers/TensorRT-ExecutionProvider.html#c-api-example,https://www.onnxruntime.ai/docs/resources/graph-optimizations.html,onnxruntime/capi/onnxruntime_pybind11_state.html,https://github.com/microsoft/onnx-converters-private/issues/new/choose,https://aka.ms/onnx/exportissue,%sveltekit.assets%/favicon.ico,https://onnxruntime.ai/getting-started, https://aka.ms/onnx/board" --log-level :info ./_site diff --git a/.github/workflows/publish-gh-pages.yml b/.github/workflows/publish-gh-pages.yml index d459af79222b2..c92dc1b57a65e 100644 --- a/.github/workflows/publish-gh-pages.yml +++ b/.github/workflows/publish-gh-pages.yml @@ -4,7 +4,7 @@ name: Publish site on: # Runs on pushes targeting the branch where the website sources live push: - branches: ["gh-pages"] + branches: ['gh-pages'] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: @@ -18,7 +18,7 @@ permissions: # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. concurrency: - group: "pages" + group: 'pages' cancel-in-progress: false jobs: @@ -32,6 +32,14 @@ jobs: - name: Setup Pages uses: actions/configure-pages@v3 + - name: Use Node.js + uses: actions/setup-node@v3 + with: + node-version: 19.x + + - name: Install dependencies + run: npm install + - name: Build with Jekyll uses: actions/jekyll-build-pages@v1 with: @@ -101,6 +109,12 @@ jobs: sudo rm -rf _site/docs/api/js sudo mv apidocs/docs/api/js _site/docs/api + - name: Generate build + run: npm run build + + - name: Move site into place + run: | + sudo mv ./build/* ./_site - name: Upload site uses: actions/upload-pages-artifact@v1 diff --git a/.gitignore b/.gitignore index 3fbeafd5e67a5..4c873c6b31fff 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,14 @@ _site .vscode /build -/cmake \ No newline at end of file +/cmake + +.DS_Store +node_modules +/.svelte-kit +/package +.env +.env.* +!.env.example +vite.config.js.timestamp-* +package-lock.json \ No newline at end of file diff --git a/.npmrc b/.npmrc new file mode 100644 index 0000000000000..0c05da457e450 --- /dev/null +++ b/.npmrc @@ -0,0 +1,2 @@ +engine-strict=true +resolution-mode=highest diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000000000..38972655faff0 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,13 @@ +.DS_Store +node_modules +/build +/.svelte-kit +/package +.env +.env.* +!.env.example + +# Ignore files for PNPM, NPM and YARN +pnpm-lock.yaml +package-lock.json +yarn.lock diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000000000..a77fddea90975 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,9 @@ +{ + "useTabs": true, + "singleQuote": true, + "trailingComma": "none", + "printWidth": 100, + "plugins": ["prettier-plugin-svelte"], + "pluginSearchDirs": ["."], + "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }] +} diff --git a/404.html b/404.html deleted file mode 100644 index 2a208139b9090..0000000000000 --- a/404.html +++ /dev/null @@ -1,111 +0,0 @@ - - - - - - - - - - - - - - - - - - - ONNX Runtime | 404 - Page Not Found - - - - - - - - - - Skip to main content -
-
- - -
- - -
-
-
-
-
-
-

Oops

-

- You've reached a page that doesn't exist. If you feel like something should be - there, please open an - issue on GitHub. -

-
-
-
-
-
-
-
-
- -
- - - - - - - - - - - - - \ No newline at end of file diff --git a/Build2023.html b/Build2023.html deleted file mode 100644 index b74078fd47f7b..0000000000000 --- a/Build2023.html +++ /dev/null @@ -1,89 +0,0 @@ - - - - - - - - - - ONNX Runtime at the 2023 Microsoft Build conference - - - - - - - - - - - Skip to main content -
-
- - -
- - - -
-
- -
- - - - - - - - - - - - - diff --git a/README.md b/README.md new file mode 100644 index 0000000000000..5c91169b0ca65 --- /dev/null +++ b/README.md @@ -0,0 +1,38 @@ +# create-svelte + +Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte). + +## Creating a project + +If you're seeing this, you've probably already done this step. Congrats! + +```bash +# create a new project in the current directory +npm create svelte@latest + +# create a new project in my-app +npm create svelte@latest my-app +``` + +## Developing + +Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: + +```bash +npm run dev + +# or start the server and open the app in a new browser tab +npm run dev -- --open +``` + +## Building + +To create a production version of your app: + +```bash +npm run build +``` + +You can preview the production build with `npm run preview`. + +> To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment. diff --git a/_config.yml b/_config.yml index 5084e8272173d..0620f40ac8af4 100644 --- a/_config.yml +++ b/_config.yml @@ -10,28 +10,28 @@ plugins: - jekyll-redirect-from kramdown: parse_block_html: true - toc_levels: "2" -logo: "/images/ONNX-Runtime-logo.svg" -aux_links: - "ONNX Runtime": - - "/" - "Install": - - "/docs/install/" - "Get Started": - - "/docs/get-started/" - "Tutorials": - - "/docs/tutorials/" - "API Docs": - - "/docs/api/" - "YouTube": - - "https://www.youtube.com/onnxruntime" - "GitHub": - - "https://github.com/microsoft/onnxruntime" -ga_tracking: UA-156955408-1 + toc_levels: '2' +logo: '/images/ONNX-Runtime-logo.svg' +aux_links: + 'ONNX Runtime': + - '/' + 'Install': + - '/docs/install/' + 'Get Started': + - '/docs/get-started/' + 'Tutorials': + - '/docs/tutorials/' + 'API Docs': + - '/docs/api/' + 'YouTube': + - 'https://www.youtube.com/onnxruntime' + 'GitHub': + - 'https://github.com/microsoft/onnxruntime' +ga_tracking: UA-156955408-1 # Footer "Edit this page on GitHub" link text gh_edit_link: true # show or hide edit this page link -gh_edit_link_text: "Edit this page on GitHub" -gh_edit_repository: "https://github.com/microsoft/onnxruntime" # the github URL for your repo -gh_edit_branch: "gh-pages" # the branch that your docs is served from +gh_edit_link_text: 'Edit this page on GitHub' +gh_edit_repository: 'https://github.com/microsoft/onnxruntime' # the github URL for your repo +gh_edit_branch: 'gh-pages' # the branch that your docs is served from # gh_edit_source: docs # the source that your files originate from -gh_edit_view_mode: "tree" # "tree" or "edit" if you want the user to jump into the editor immediately \ No newline at end of file +gh_edit_view_mode: 'tree' # "tree" or "edit" if you want the user to jump into the editor immediately diff --git a/_includes/footer_custom.html b/_includes/footer_custom.html deleted file mode 100644 index 5b0c6763e6de2..0000000000000 --- a/_includes/footer_custom.html +++ /dev/null @@ -1,3 +0,0 @@ -{%- assign url = page.url -%} - -

For documentation questions, please file an issue

\ No newline at end of file diff --git a/_includes/head_custom.html b/_includes/head_custom.html deleted file mode 100644 index 782e013b101ad..0000000000000 --- a/_includes/head_custom.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - diff --git a/_sass/color_schemes/onnxruntime.scss b/_sass/color_schemes/onnxruntime.scss index 6a1fc4731b5f4..4e0cc934e1881 100644 --- a/_sass/color_schemes/onnxruntime.scss +++ b/_sass/color_schemes/onnxruntime.scss @@ -3,11 +3,11 @@ $btn-primary-color: #226aca; // Code is too light in default theme // .highlight .n { - color: #555 !important; + color: #555 !important; } .highlight .nn { - color: #555 !important; + color: #555 !important; } .highlight .c1 { - color: #188616 !important; + color: #188616 !important; } diff --git a/_sass/custom/custom.scss b/_sass/custom/custom.scss index 70b4a49f71317..a0a655e67b7a8 100644 --- a/_sass/custom/custom.scss +++ b/_sass/custom/custom.scss @@ -1,26 +1,28 @@ -.site-nav, .site-header, .site-footer { - width: 100%; - } - +.site-nav, +.site-header, +.site-footer { + width: 100%; +} + .side-bar { - @include mq(lg) { - width: $nav-width; - } + @include mq(lg) { + width: $nav-width; + } } .site-footer { - color: #717072 + color: #717072; } - + .main { - @include mq(lg) { - margin-left: $nav-width; - } + @include mq(lg) { + margin-left: $nav-width; + } } content blockquote { - background: #f9f9f9; - border-left: 5px solid #ccc; - margin: 1.5em 10px; - padding: 0.5em 10px; + background: #f9f9f9; + border-left: 5px solid #ccc; + margin: 1.5em 10px; + padding: 0.5em 10px; } diff --git a/about.html b/about.html deleted file mode 100644 index 22d7ab5392a4b..0000000000000 --- a/about.html +++ /dev/null @@ -1,170 +0,0 @@ - - - - - - - - - - - - - - - ONNX Runtime | About - - - - - - - - - - Skip to main content -
-
- - -
- - -
-
-
-
-
-
-

About

-

- ONNX Runtime is an open source project that is designed to accelerate machine learning across a wide range of frameworks, operating systems, and hardware platforms. It enables acceleration of machine learning inferencing across all of your deployment targets using a single set of API. ONNX Runtime automatically parses through your model to identify optimization opportunities and provides access to the best hardware acceleration available. -

-

- ONNX Runtime also offers training acceleration, which incorporates innovations from Microsoft Research and is proven across production workloads like Office 365, Bing and Visual Studio. -

- Join us on Github -

-

- At Microsoft, ONNX Runtime is used as the primary Machine Learning inferencing solution for products groups. ONNX Runtime serves over 1 trillion daily inferences across over 150 production models covering all task domains. -

- - Learn more about ONNX Runtime at Microsoft - -
-
- ONNX Runtime Logo Icon -
-
-
-
-
-
-
-
- Illustration of a computational graph representing ONNX format and the acceleration enabled by ONNX Runtime -
-
-

Optimization and acceleration

-

- Run any ONNX model using a single set of inference APIs that provide access to the best hardware acceleration available. Built-in optimization features trim and consolidate nodes without impacting model accuracy. Additionally, full backwards compatibility for ONNX and ONNX-ML ensures all ONNX models can be inferenced. -

-
-
-
-
- Illustration of blank boxes conveying the breadth of API and platform support -
-
-

API and platform support

-

- Take advantage of the benefits of ONNX Runtime without changing your technology stack. Access ONNX Runtime using your preferred APIC#, C++, C, Python, or Java. Support for Linux, Windows and Mac allows you to build and deploy applications without worry. -

-
-
-
-
- Illustration of a lightbulb above three computers, all connected by lines; represents the community of ONNX Runtime contributors -
-
-

Continuous community innovation

-

- Our community of partners and contributors drives constant innovation. Partners provide ONNX compatible compilers and accelerators to ensure models are as efficient as possible. Our contributor community improves ONNX Runtime by contributing code, ideas and feedback. Join us on GitHub. -

-
-
-
-
-
- -
-
-
-
-
-

Design principles

-

- ONNX Runtime abstracts custom accelerators and runtimes to maximize their benefits across an ONNX model. To do this, ONNX Runtime partitions the ONNX model graph into subgraphs that align with available custom accelerators and runtimes. When operators are not supported by custom accelerators or runtimes, ONNX Runtime provides a default runtime that is used as the fallback execution — ensuring that any model will run. Learn more. -

-
-
-
-
-
-
-
-
- -
- - - - - - - - - - - - - \ No newline at end of file diff --git a/blogs.html b/blogs.html deleted file mode 100644 index ac82db803bcbb..0000000000000 --- a/blogs.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - ONNX Runtime blogs - - - - - - - - - diff --git a/blogs/footer.html b/blogs/footer.html deleted file mode 100644 index ab742525560f7..0000000000000 --- a/blogs/footer.html +++ /dev/null @@ -1,68 +0,0 @@ - diff --git a/blogs/header.html b/blogs/header.html deleted file mode 100644 index 57c8af9d78186..0000000000000 --- a/blogs/header.html +++ /dev/null @@ -1,40 +0,0 @@ -
- -
diff --git a/blogs/index.html b/blogs/index.html deleted file mode 100644 index 1eac8a2512563..0000000000000 --- a/blogs/index.html +++ /dev/null @@ -1,130 +0,0 @@ - - - - - - - - - - - - - - - ONNX Runtime | Blogs - - - - - - - - - - Skip to main content -
-
- - -
- - -
-
- -
-
-

Blogs & Announcements

-
-
- -
- - - -
-
-
- - -
- - - - - - - - - - - - - - - diff --git a/blogs/pytorch-on-the-edge.html b/blogs/pytorch-on-the-edge.html deleted file mode 100644 index eee11ccc142b1..0000000000000 --- a/blogs/pytorch-on-the-edge.html +++ /dev/null @@ -1,361 +0,0 @@ - - - - - - - - - - - - - - - Run PyTorch models on the edge - - - - - - - - - - - - - - - - - Skip to main content -
-
- - -
- - -
-
- -
-
-

Run PyTorch models on the edge

- - - -
-
-

- Most modern ML models are developed with PyTorch. The agility and flexibility that PyTorch provides for creating and training models has made it the most popular deep learning framework today. The typical workflow is to train these models in the cloud and run them from the cloud as well. However, many scenarios are arising that make it more attractive – or in some cases, required – to run locally on device. These include: -

-
    -
  • Avoiding network round-trips to the cloud (for example in audio and video processing)
  • -
  • Keeping user data on device (for privacy protection or regulatory requirements)
  • -
  • High cost of cloud resources (especially when device capabilities are underutilized)
  • -
  • Application requirements to operate without internet connectivity
  • -
- - Diagram showing the PyTorch logo representing a PyTorch model, fanning out to icons for web, mobile and browser devices running ONNX Runtime - -

In this article, we'll demystify running PyTorch models on the edge. We define 'edge' as anywhere that is outside of the cloud, ranging from large, well-resourced personal computers to small footprint devices such as mobile phones. This has been a challenging task to accomplish in the past, but new advances in model optimization and software like ONNX Runtime make it more feasible – even for new generative AI and large language models like Stable Diffusion, Whisper, and Llama2.

- -

Considerations for PyTorch models on the edge

- -

There are several factors to keep in mind when thinking about running a PyTorch model on the edge:

-
    -
  • Size: modern models can be several gigabytes (hence the name Large Language Models!). On the cloud, size is usually not a consideration until it becomes too large to fit on a single GPU. At that point there are various well-known solutions for running across multiple GPUs. For edge devices, we need to find models that can fit within the constraints of the device. This sometimes requires a tradeoff with quality. Most modern models come in several sizes (1 billion parameters, 13 billion parameters, 70 billion parameters, etc) so you can select a variant that fits on your device. Techniques such as quantization are usually applied to reduce the number of bits representing parameters, further reducing the model size. The size of the application is also constrained by the app stores, so bringing in gigabytes of libraries won't work on the edge.
  • -
  • API for application integration: on the cloud, models are usually packaged as Docker containers that expose an endpoint that is called by an application or service. On edge devices, Docker containers may take up too many resources or may not even be supported. By using an optimized engine, like ONNX Runtime, the dependency on Python and Docker containers can be eliminated. ONNX Runtime also has APIs in many languages including C, C++, C#, Rust, Java, JavaScript, Objective-C and Swift for making it easier to integrate natively with the hosting application.
  • -
  • Performance: with large amounts of memory, no power restrictions, and hefty compute capabilities, running non-optimized models on the cloud is possible. On edge devices, these luxuries do not exist and optimization is crucial. For example, ONNX Runtime optimizes memory allocations, fuses model operators, reduces kernel launch times, minimizes tensor transfers between processing units, and applies tuned matrix math algorithms. It’s also able to make use of compilers and engines that are device-specific, providing a common interface for your application while harnessing the best approach on each device.
  • -
  • Maintainability: on the cloud, updating a model is as simple as deploying a new container image and ramping up traffic. On the edge, you need to consider how you will distribute model updates. Sometimes this involves publishing updates to an app store, sometimes it might be possible to implement a data update mechanism within your app and download new model files or maybe even deltas. There are many possible paths, so we won’t go into much depth on this topic in this article but it’s an aspect to keep in mind as you plan for production.
  • -
  • Hybrid: instead of cloud versus device, you can choose to utilize both. There are several hybrid patterns that are used in production today by applications such as Office. One pattern is to dynamically decide whether to run on the device or in the cloud based on network conditions or input characteristics. Another pattern is to run part of the model pipeline on the device and part on the cloud. This is especially useful with modern model pipelines that have separate encoder and decoder stages. Using an engine like ONNX Runtime that works on both cloud and device simplifies development. We’ll discuss hybrid scenarios in more detail in a forthcoming article.
  • -
  • Personalization: in many cases, the PyTorch model is simply being run on the device. However, you may also have scenarios where you need to personalize the model on the device without sending data to the cloud. Recommendation and content targeting are example scenarios that can improve their quality by updating models based on activity on the device. Fine tuning and training with PyTorch on the device may not feasible (due to performance and size concerns) but using an engine like ONNX Runtime allows PyTorch models to be updated and personalized locally. The same mechanism also enabled federated learning, which can help mitigate user data exposure.
  • -
- -

Tools for PyTorch models on the edge

- -

We mentioned ONNX Runtime several times above. ONNX Runtime is a compact, standards-based engine that has deep integration with PyTorch. By using PyTorch's ONNX APIs, your PyTorch models can run on a spectrum of edge devices with ONNX Runtime.

- -

The first step for running PyTorch models on the edge is to get them into a lightweight format that doesn't require the PyTorch framework and its gigabytes of dependencies. PyTorch has thought about this and includes an API that enables exactly this - torch.onnx. ONNX is an open standard that defines the operators that make up models. The PyTorch ONNX APIs take the Pythonic PyTorch code and turn it into a functional graph that captures the operators that are needed to run the model without Python. As with everything in machine learning, there are some limitations to be aware of. Some PyTorch models cannot be represented as a single graph – in this case you may need to output several graphs and stitch them together in your own pipeline.

- -

The popular Hugging Face library also has APIs that build on top of this torch.onnx functionality to export models to the ONNX format. Over 130,000 models are supported making it very likely that the model you care about is one of them.

- -

In this article, we'll show you several examples involving state-of-the-art PyTorch models (like Whisper and Stable Diffusion) on popular devices (like Windows laptops, mobile phones, and web browsers) via various languages (from C# to JavaScript to Swift).

- -

Examples of PyTorch models on the edge

- -

Stable Diffusion on Windows

- -

The Stable Diffusion pipeline consists of five PyTorch models that build an image from a text description. The diffusion process iterates on random pixels until the output image matches the description.

- -

To run on the edge, four of the models can be exported to ONNX format from HuggingFace.

- -

-from optimum.onnxruntime import ORTStableDiffusionPipeline
-pipeline = ORTStableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", export=True)
-pipeline.save_pretrained("./onnx-stable-diffusion")
-                                    
- -

You don't have to export the fifth model, ClipTokenizer, as it is available in ONNX Runtime extensions, a library for pre and post processing PyTorch models.

- -

To run this pipeline of models as a .NET application, we build the pipeline code in C#. This code can be run on CPU, GPU, or NPU, if they are available on your machine, using ONNX Runtime's device-specific hardware accelerators. This is configured with the ExecutionProviderTarget below.

- -

-static void Main(string[] args)
-{
-    var prompt = "Two golden retriever puppies playing in the grass.";
-    var config = new StableDiffusionConfig
-    {
-        NumInferenceSteps = 50,
-        GuidanceScale = 7.5,
-        ExecutionProviderTarget = StableDiffusionConfig.ExecutionProvider.Cpu,
-        DeviceId = 0,
-        TokenizerOnnxPath = @".\models\tokenizer\model.onnx",
-        TextEncoderOnnxPath = @".\models\text_encoder\model.onnx",
-        UnetOnnxPath = @".\models\unet\model.onnx",
-        VaeDecoderOnnxPath = @".\models\vae_decoder\model.onnx",
-        SafetyModelPath = @".\models\safety_checker\model.onnx",
-    };
-
-    var image = UNet.Inference(prompt, config);
-
-    if (image == null)
-    {
-        Console.WriteLine("Unable to create image, please try again.");
-    }
-}                               
-                                    
- -

This is the output of the model pipeline, running with 50 inference iterations:

- - Two golden retriever puppies playing in the grass - -

You can build the application and run it on Windows with the detailed steps shown in this tutorial.

- -

Text generation in the browser

- -

Running a PyTorch model locally in the browser is not only possible but super simple with the transformers.js library. Transformers.js uses ONNX Runtime Web as its backend. Many models are already converted to ONNX and served by the tranformers.js CDN, making inference in the browser a matter of writing a few lines of HTML:

- -

-<html>
-    <body>
-        <h1>Enter starting text …</h1>
-    
-        <form id="form">
-            <input type="text" id="inputText">
-            <button type="submit" id="submitButton">Submit</button>
-        </form>
-    
-        <div id="output"></div>
-    
-        <script type="module">
-    
-            import { pipeline } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.6.2';
-    
-            let inputText = document.getElementById('inputText');
-            let outputDiv = document.getElementById('output');
-            let submitButton = document.getElementById('submitButton');
-    
-            submitButton.addEventListener('click', async (e) => {
-    
-                e.preventDefault();
-    
-                let generator = await pipeline('text-generation', 'Xenova/LaMini-Neo-125M');
-    
-                let result = await generator(inputText.value,
-                    { max_new_tokens: 200,
-                        temperature: 2,
-                        repetition_penalty: 1.5,
-                        no_repeat_ngram_size: 2,
-                        num_beams: 2,
-                        num_return_sequences: 1,
-                        });
-    
-                outputDiv.innerHTML = result[0].generated_text;
-            });
-        </script>    
-    </body>
-</html>
-                                        
-                                    
- -

You can also embed the call to the transformers pipeline using vanilla JavaScript, or in a web application, with React or Next.js, or write a browser extension.

- -

ONNX Runtime Web currently uses web assembly to execute the model on the CPU. This is fine for many models but leveraging the GPU, if one exists on the device, can improve the user experience. ONNX Runtime Web support for WebGPU is coming *very* soon and enables you to tap into the GPU while use the same inference APIs.

- - Text generation in the browser using transformers.js. The prompt is Two golden retriever puppies are playing in the grass, and the response is playing in the grasslands. They are known for their playful nature and they have a playful face. - -

Speech recognition with Whisper on mobile

- -

Whisper from OpenAI is a PyTorch speech recognition model. Whisper comes in a number of different size variants - the smallest, Whisper Tiny, is suitable to run on mobile devices. All components of the Whisper Tiny model (audio decoder, encoder, decoder, and text sequence generation) can be composed and exported to a single ONNX model using the Olive framework. To run this model as part of a mobile application, you can use ONNX Runtime Mobile, which supports Android, iOS, react-native, and MAUI/Xamarin.

- -

ONNX Runtime Mobile supports hardware acceleration via NNAPI (on Android), CoreML (on iOS), and XNNPACK (both iOS and Android).

- -

The relevant snippet of a example Android mobile app that performs speech transcription on short samples of audio is shown below:

- -

-init {
-    val env = OrtEnvironment.getEnvironment()
-    val sessionOptions = OrtSession.SessionOptions()
-    sessionOptions.registerCustomOpLibrary(OrtxPackage.getLibraryPath())
-
-    session = env.createSession(modelBytes, sessionOptions)
-
-    val nMels: Long = 80
-    val nFrames: Long = 3000
-
-    baseInputs = mapOf(
-        "min_length" to createIntTensor(env, intArrayOf(1), tensorShape(1)),
-        "max_length" to createIntTensor(env, intArrayOf(200), tensorShape(1)),
-        "num_beams" to createIntTensor(env, intArrayOf(1), tensorShape(1)),
-        "num_return_sequences" to createIntTensor(env, intArrayOf(1), tensorShape(1)),
-        "length_penalty" to createFloatTensor(env, floatArrayOf(1.0f), tensorShape(1)),
-        "repetition_penalty" to createFloatTensor(env, floatArrayOf(1.0f), tensorShape(1)),
-    )
-}
-
-data class Result(val text: String, val inferenceTimeInMs: Long)
-
-fun run(audioTensor: OnnxTensor): Result {
-    val inputs = mutableMapOf()
-    baseInputs.toMap(inputs)
-    inputs["audio_pcm"] = audioTensor
-    val startTimeInMs = SystemClock.elapsedRealtime()
-    val outputs = session.run(inputs)
-    val elapsedTimeInMs = SystemClock.elapsedRealtime() - startTimeInMs
-    val recognizedText = outputs.use {
-        @Suppress("UNCHECKED_CAST")
-        (outputs[0].value as Array>)[0][0]
-    }
-    return Result(recognizedText, elapsedTimeInMs)
-}
-
-                                    
- -

You can record a short audio clip to transcribe.

- - Screenshot of an Android app to perform speech recognition using ONNX Runtime, running a PyTorch Whisper model - -

- -

Train a model to recognize your voice on mobile

- -

ONNX Runtime can also take a pre-trained model and adapt it to new data. It can do this on the edge - on mobile specifically where it is easy to record your voice, access your photos and other personalized data. Importantly, your data does not leave the device during training.

- -

For example, you can train a PyTorch model to recognize just your own voice on your mobile phone, for authentication scenarios.

- -

The PyTorch model is obtained from HuggingFace in your development environment, and extra layers are added to perform the speaker classification:

- -

-from transformers import Wav2Vec2ForSequenceClassification, AutoConfig
-import torch
-
-config = AutoConfig.from_pretrained("superb/wav2vec2-base-superb-sid")
-model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid")
-
-model.classifier = torch.nn.Linear(256, 2)
-                                        
-                                    
- -

The model and other components necessary for training (a loss function to measure the quality of the model and an optimizer to instruct how the weights are adjusted during training) are exported with ONNX Runtime Training:

- -

-artifacts.generate_artifacts(
-    onnx_model,
-    requires_grad=requires_grad,
-    frozen_params=frozen_params,
-    loss=CustomCELoss(),
-    optimizer=artifacts.OptimType.AdamW,
-    artifact_directory="MyVoice/artifacts",
-)                                        
-                                    
- -

This set of artifacts is now ready to be loaded by the mobile app, shown here as iOS Swift code. The app asks the user for samples of their voice and the model is trained with the samples.

- -

-func trainStep(inputData: [Data], labels: [Int64]) throws  {
-
-    let inputs = [try getORTValue(dataList: inputData), try getORTValue(labels: labels)]
-    try trainingSession.trainStep(withInputValues: inputs)
-    
-    try trainingSession.optimizerStep()
-    
-    try trainingSession.lazyResetGrad()
-}                                    
-                                    
- -

Once the model is trained, you can run it to verify that a voice sample is you!

- - A screenshot of an iPhone app to perform speaker verification by recording a number of speech samples of the speaker - -

You can read the full Speaker Verification tutorial, and build and run the application from source.

- -

Where to next?

- -

In this article we've shown why you would run PyTorch models on the edge and what aspects to consider. We also shared several examples with code that you can use for running state-of-the-art PyTorch models on the edge with ONNX Runtime. We also showed how ONNX Runtime was built for performance and cross-platform execution, making it the ideal way to run PyTorch models on the edge. Have fun running PyTorch models on the edge with ONNX Runtime!

-

You may have noticed that we didn't include a Llama2 example even though ONNX Runtime is optimized to run it. That's because the amazing Llama2 model deserves its own article, so stay tuned for that!

-
-
-
-
-
-
-
- -
- - - - - - - - - - - - - - diff --git a/build2023.html b/build2023.html deleted file mode 100644 index b74078fd47f7b..0000000000000 --- a/build2023.html +++ /dev/null @@ -1,89 +0,0 @@ - - - - - - - - - - ONNX Runtime at the 2023 Microsoft Build conference - - - - - - - - - - - Skip to main content -
-
- - -
- - - -
-
- -
- - - - - - - - - - - - - diff --git a/community.html b/community.html deleted file mode 100644 index a5f62dcc713ae..0000000000000 --- a/community.html +++ /dev/null @@ -1,341 +0,0 @@ - - - - - - - - - - - - - - - - ONNX Runtime | Community - - - - - - - - - - Skip to main content -
-
- - -
- - -
-
- -
-
-

Organizations and products using ONNX Runtime

-
- -
“With ONNX Runtime, Adobe Target got flexibility and standardization in one package: flexibility for our customers to train ML models in the frameworks of their choice, and standardization to robustly deploy those models at scale for fast inference, to deliver true, real-time personalized experiences.”

-
–Georgiana Copil, Senior Computer Scientist, Adobe
-
-
-
- -
“The ONNX Runtime integration with AMD’s ROCm open software ecosystem helps our customers leverage the power of AMD Instinct GPUs to accelerate and scale their large machine learning models with flexibility across multiple frameworks.”

-
–Andrew Dieckmann, Corporate Vice President and General Manager, AMD Data Center GPU & Accelerated Processing
-
-
-
- -
“Using ONNX Runtime, we have improved the inference performance of many computer vision (CV) and natural language processing (NLP) models trained by multiple deep learning frameworks. These are part of the Alipay production system. We plan to use ONNX Runtime as the high-performance inference backend for more deep learning models in broad applications, such as click-through rate prediction and cross-modal prediction.”

-
–Xiaoming Zhang, Head of Inference Team, Ant Group
-
-
-
- -
“At CERN in the ATLAS experiment, we have integrated the C++ API of ONNX Runtime into our software framework: Athena. We are currently performing inferences using ONNX models especially in the reconstruction of electrons and muons. We are benefiting from its C++ compatibility, platform*-to-ONNX converters (* Keras, TensorFlow, PyTorch, etc) and its thread safety.”

-
–ATLAS Experiment team, CERN (European Organization for Nuclear Research)
-
-
-
- -
“Building and deploying AI solutions to the cloud at scale is complex. With massive datasets and performance considerations, finding a harmonious balance is crucial. ONNX Runtime provided us with the flexibility to package a scikit-learn model built with Python, deploy it serverlessly to a Node.js environment, and run it in the cloud with impressive performance.”

-
–Matthew Leyburn, Software Engineer, Bazaarvoice
-
-
-
- -
“ClearBlade’s integration of ONNX Runtime with our Enterprise loT and Edge Platforms enables customers and partners to build Al models using any industry Al tool they want to use. Using this solution, our customers can use the ONNX Runtime Go language APIs to seamlessly deploy any model to - run on equipment in remote locations or on the factory floor!”

-
–Aaron Allsbrook, CTO & Founder, ClearBlade
-
-
-
- -
“At Deezer, we use ONNX Runtime for machine learning powered features for music recommendations in our streaming service. ONNX Runtime's C API is easy to integrate with our software stack and enables us to run and deploy transformer models with great performance for real-time use cases.”

-
–Mathieu Morlon, Software Engineer, Deezer
-
-
-
- -
“We integrate AI models in various markets and regulated industries using many stacks and frameworks, merging R&D and Ethics. With ONNX Runtime, we provide maximum performance and flexibility to use the customers' preferred technology, from cloud to embedded systems.”

-
–Mauro Bennici, AI Architect and AI Ethicist, Intelligenza Etica
-
-
-
- -
“We use ONNX Runtime to easily deploy thousands of open-source state-of-the-art models in the Hugging Face model hub and accelerate private models for customers of the Accelerated Inference API on CPU and GPU.”

-
–Morgan Funtowicz, Machine Learning Engineer, Hugging Face
-
-
-
- -
“ONNX Runtime powers many of our Natural Language Processing (NLP) and Computer Vision (CV) models that crunch the global media landscape in real-time. It is our go-to framework for scaling our production workload, providing important features ranging from built-in quantization tools to easy GPU and VNNI acceleration.”

-
–Viet Yen Nguyen, CTO, Hypefactors
-
-
-
- -
“InFarm delivers machine-learning powered solutions for intelligent farming, running computer vision models on a variety of hardware, including on-premise GPU clusters, edge computing devices like NVIDIA Jetsons, and cloud-based CPU and GPU clusters. ONNX Runtime enables InFarm to standardise the model formats and outputs of models generated across multiple teams to simplify deployment while also providing the best performance on all hardware targets.”

-
–Ashley Walker, Chief Information and Technology Officer, InFarm
-
-
-
- -
“We are excited to support ONNX Runtime on the Intel® Distribution of OpenVINO™. This accelerates machine learning inference across Intel hardware and gives developers the flexibility to choose the combination of Intel hardware that best meets their needs from CPU to VPU or FPGA.”

-
–Jonathan Ballon, Vice President and General Manager, Intel Internet of Things Group
-
-
- -
- -
“ONNX Runtime enables our customers to easily apply NVIDIA TensorRT’s powerful optimizations to machine learning models, irrespective of the training framework, and deploy across NVIDIA GPUs and edge devices.”

-
– Kari Ann Briski, Sr. Director, Accelerated Computing Software and AI Product, NVIDIA
-
-
-
- -
“The integration of ONNX Runtime into Apache OpenNLP 2.0 enables easy use of state-of-the-art Natural Language Processing (NLP) models in the Java ecosystem. For libraries and applications already using OpenNLP, such as Apache Lucene and Apache Solr, using ONNX Runtime via OpenNLP provides exciting new possibilities.”

-
–Jeff Zemerick, Search Relevance Engineer at OpenSource Connections and Chair of the Apache OpenNLP project
-
-
-
- -
“The ONNX Runtime API for Java enables Java developers and Oracle customers to seamlessly consume and execute ONNX machine-learning models, while taking advantage of the expressive power, high performance, and scalability of Java.”

-
–Stephen Green, Director of Machine Learning Research Group, Oracle
-
-
-
- -
“Using a common model and code base, the ONNX Runtime allows Peakspeed to easily flip between platforms to help our customers choose the most cost-effective solution based on their infrastructure and requirements.”

-
–Oscar Kramer, Chief Geospatial Scientist, Peakspeed
-
-
-
- -
“ONNX Runtime provides us with a lightweight runtime that focuses on performance, yet allows our ML engineers to choose the best frameworks and models for the task at hand.”

-
–Brian Lambert, Machine Learning Engineer, Pieces.app -
-
-
-
- -
“The mission of PTW is to guarantee radiation therapy safely. Bringing an AI model from research into the clinic can be a challenge, however. These are very different software and hardware environments. ONNX Runtime bridges the gap and allows us to choose the best possible tools for research and be sure deployment into any environment will just work.”

-
–Jan Weidner, Research Software Engineer, PTW Dosimetry
-
-
-
- -
“ONNX Runtime underpins RedisAI's distinctive capability to run machine-learning and deep-learning model inference seamlessly inside of Redis. This integration allows data scientists to train models in their preferred ML framework (PyTorch, TensorFlow, etc), and serve those models from Redis for low-latency inference.”

-
–Sam Partee, Principal Engineer, Applied AI, Redis
-
-
-
- -
“With support for ONNX Runtime, our customers and developers can cross the boundaries of the model training framework, easily deploy ML models in Rockchip NPU powered devices.”

-
–Feng Chen, Senior Vice President, Rockchip
-
-
-
- -
“We needed a runtime engine to handle the transition from data science land to a high-performance production runtime system. ONNX Runtime (ORT) simply ‘just worked’. Having no previous experience with ORT, I was able to easily convert my models, and had prototypes running inference in multiple languages within just a few hours. ORT will be my go-to runtime engine for the foreseeable future.”

-
–Bill McCrary, Application Architect, Samtec
-
-
-
- -
“The unique combination of ONNX Runtime and SAS Event Stream Processing changes the game for developers and systems integrators by supporting flexible pipelines and enabling them to target multiple hardware platforms for the same AI models without bundling and packaging changes. This is crucial considering the additional build and test effort saved on an ongoing basis.”

-
–Saurabh Mishra, Senior Manager, Product Management, Internet of Things, SAS
-
-
-
- -
“Teradata provides a highly extensible framework that enables importation and inference of previously trained Machine Learning (ML) and Deep Learning (DL) models. ONNX Runtime enables us to expand the capabilities of Vantage Bring Your Own Model (BYOM) and gives data scientists more options for ML and DL models integration, inference and production deployment within Teradata Vantage ecosystem.”

-
–Michael Riordan, Director, Vantage Data Science and Analytics Products, Teradata
-
-
-
- -
“ONNX Runtime’s simple C API with DirectML provider enabled Topaz Labs to add support for AMD GPUs and NVIDIA Tensor Cores in just a couple of days. Furthermore, our models load many times faster on GPU than any other frameworks. Even our larger models with about 100 million parameters load within seconds.”

-
–Suraj Raghuraman, Head of AI Engine, Topaz Labs
-
-
-
- -
“We selected ONNX Runtime as the backend of Unreal Engine’s Neural Network Interface (NNI) plugin inference system because of its extensibility to support the platforms that Unreal Engine runs on, while enabling ML practitioners to develop ML models in the frameworks of their choice. NNI evaluates neural networks in real time in Unreal Engine and acts as the foundation for game developers to use and deploy ML models to solve many development challenges, including animation, ML-based AI, camera tracking, and more.”

-
–Francisco Vicente Carrasco, Research Engineering Lead, Epic Games
-
-
-
- -
“At the USDA we use ONNX Runtime in GuideMaker, a program we developed to design pools of guide RNAs needed for large-scale gene editing experiments with CRISPR-Cas. ONNX allowed us to make an existing model more interoperable and ONNX Runtime speeds up predictions of guide RNA binding.”

-
–Adam Rivers, Computational Biologist, United States Department of Agriculture, Agricultural Research Service
-
-
-
- -
“ONNX Runtime has vastly increased Vespa.ai’s capacity for evaluating large models, both in performance and model types we support.”

-
–Lester Solbakken, Principal Engineer, Vespa.ai, Verizon Media
-
-
-
- -
“ONNX Runtime has been very helpful to us at Writer in optimizing models for production. It lets us deploy more powerful models and still deliver results to our customers with the latency they expect.”

-
–Dave Buchanan, Director of AI and NLP, Writer
-
-
-
- -
“Xilinx is excited that Microsoft has announced Vitis™ AI interoperability and runtime support for ONNX Runtime, enabling developers to deploy machine learning models for inference to FPGA IaaS such as Azure NP series VMs and Xilinx edge devices.”

-
–Sudip Nag, Corporate Vice President, Software & AI Products, Xilinx
-
-
-
-
-
-
-
-
- -
- - - - - - - - - - - - - \ No newline at end of file diff --git a/css/custom.css b/css/custom.css deleted file mode 100644 index 537914475e278..0000000000000 --- a/css/custom.css +++ /dev/null @@ -1,927 +0,0 @@ -html { - scroll-padding-top: 100px; -} - -body { - font-family: 'Lato', sans-serif; - font-size: 18px; - color: #1a1a1a; -} - -.ns-callout { - background-color: #e8e8e8; - border-color: #1e5f7b; - padding: 15px 30px 15px 15px; - border-left: 5px solid #1a1a1a; -} - -h1 { - font-size: 46px; -} - -.h1, -.h2, -.h3, -.h4, -.h5, -.h6, -h1, -h2, -h3, -h4, -h5, -h6 { - font-weight: 600; -} - -.navbar-custom { - font-family: "Open Sans", sans-serif; - max-width: 1600px; - margin: 0 auto; -} - -header.fixed-top .navbar-custom img.onnx-logo { - width: 205px; - height: 70px; - -webkit-transition: all 200ms ease-in-out; - transition: all 200ms ease-in-out; -} - -header.fixed-top { - padding: 12px 0; - -webkit-transition: all 200ms ease-in-out; - transition: all 200ms ease-in-out; -} - -header.fixed-top.scrolled { - padding: 0 0; - background-color: #D8D6D6; - -webkit-transition: all 200ms ease-in-out; - transition: all 200ms ease-in-out; -} - -.navbar-custom .navbar-nav .nav-link { - color: #1a1a1a; - font-weight: bold; - position: relative; - padding: 0; - line-height: 1; -} - -.navbar-custom .navbar-nav .nav-item { - padding: .5rem 0 .5rem 1rem; -} - -.navbar-custom .navbar-nav .nav-item.active .nav-link { - color: #0B5CA2; -} - -.navbar-custom .navbar-nav .nav-item .nav-link:hover { - color: #0B5CA2; -} - -header.scrolled .navbar-custom .navbar-nav .nav-item.active .nav-link { - color: #0B5CA2; -} - -header.scrolled .navbar-custom .navbar-nav .nav-item .nav-link:hover { - color: #0B5CA2; -} - -header.scrolled .navbar-custom .navbar-nav .nav-link { - color: #1a1a1a; -} - -.navbar-custom .navbar-nav .nav-item:not(:last-child) .nav-link { - border-right: 2px solid #0B5CA2; -} - -header.scrolled .navbar-custom .navbar-nav .nav-item:not(:last-child) .nav-link { - border-right: 2px solid #1a1a1a; -} - -.navbar-toggler .navbar-toggler-icon { - background-image: url('../images/svg/menu-icon.svg'); - width: 3em; - height: 3em; - -webkit-transition: 0.2s; - transition: 0.2s; -} - -header.header-collapse .navbar-toggler .navbar-toggler-icon { - background-image: url('../images/svg/menu-cross-icon.svg'); - width: 2.5em; - height: 2.5em; - -webkit-transition: 0.2s; - transition: 0.2s; -} - -.outer-container { - max-width: 1280px; -} - -.btn-blue { - background-color: #0F77D2; - border: 0; - border-radius: 0; - font-size: 18px; - font-weight: 600; - outline: none; -} - -.btn-blue:hover, -.btn-blue:focus, -.btn-blue:active { - background-color: #0F77D2 !important; - border: 0; - outline: none; -} - -a.link { - color: #0F77D2; - font-weight: 600; - position: relative; - text-decoration: none; -} - -a.link:hover .link-content, -a.link:focus .link-content, -a.link:active .link-content { - border-bottom: 2px solid #045DA0; - -webkit-transition: all 200ms ease-in-out; - transition: all 200ms ease-in-out; -} - -.link-arrow { - padding-left: 5px; - -webkit-transition: all 200ms ease-in-out; - transition: all 200ms ease-in-out; -} - -.link-arrow.fa-angle-right { - color: #1a1a1a; -} - -a.link:hover .link-arrow.fa-angle-right, -a.link:focus .link-arrow.fa-angle-right, -a.link:active .link-arrow.fa-angle-right { - color: #045DA0; -} - -a.link:hover, -a.link:focus, -a.link:active { - color: #045DA0; -} - -a.link:hover .link-arrow, -a.link:focus .link-arrow, -a.link:active .link-arrow { - padding-left: 10px; - -webkit-transition: all 200ms ease-in-out; - transition: all 200ms ease-in-out; -} - -.get-started-section h2 { - font-size: 30px; -} - -.tab-wrapper { - margin: 3rem auto; - padding: 0 70px; -} - -.carousel { - padding: 0 70px; -} - -.carousel .item, -.tab-wrapper .item { - color: #000; - overflow: hidden; - min-height: 120px; - font-size: 26px; -} - -.carousel .media img, -.tab-wrapper .media img { - max-width: 100%; - height: auto; - display: block; -} - -.carousel .testimonial, -.tab-wrapper .testimonial { - position: relative; -} - -.video { - max-width: 100%; - aspect-ratio: 16 / 9; -} - -.carousel p, -.tab-wrapper p { - font-size: 26px; - font-weight: bold; -} - -.carousel .overview, -.tab-wrapper .overview { - font-size: 16px; - margin-bottom: 30px; - font-weight: normal; -} - -.carousel-control-prev { - width: 5%; - justify-content: left; -} - -.carousel-control-next { - width: 5%; - justify-content: right; -} - -.carousel-control-prev-icon { - background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23000' viewBox='0 0 8 8'%3e%3cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3e%3c/svg%3e") -} - -.carousel-control-next-icon { - background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23000' viewBox='0 0 8 8'%3e%3cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3e%3c/svg%3e") -} - -.carousel .carousel-indicators { - bottom: -40px; -} - -.carousel-indicators li, -.carousel-indicators li.active { - width: 18px; - height: 18px; - border-radius: 50%; - margin: 1px 3px; -} - -.carousel-indicators li { - border: 2px solid #4D4D4D; - width: 11px; - height: 11px; -} - -.carousel-indicators li.active { - width: 11px; - height: 11px; - color: #fff; - background: #1fa2ff; - border: 2px solid transparent; - ; -} - -.main-wrapper .top-banner-bg { - background-image: url(../images/top-banner-background.jpg); - background-size: auto; - background-position: top center; - background-repeat: repeat no-repeat; -} - -.icon-container img { - width: 50px; - height: 55px; -} - -.customer-logo img { - max-width: 65%; - /* padding-right: 5px; - padding-left: 5px; */ -} - -.ms-logo img { - max-width: 100%; - padding-right: 20px; - padding-left: 20px; -} - - -div.customer-logo:hover { - background-color: #e8e8e8; - transition: .4s; -} - -.blue-text { - color: #1fa2ff; -} - -h3.blue-text { - font-weight: 700; -} - -.article-blurb { - color: #000000; -} - -.article-date { - color: #666666; - text-transform: uppercase; - font-size: 14px; -} - -.news { - background-color: #e8e8e8; - padding: 2rem; - font-size: 18px; - margin: 2rem; -} - -.alert { - background-color: #E6EFF4 !important; - color: #000000 !important; - border: 0px !important; - text-align: center !important; - border-radius: 0% !important; -} - -.alert a { - color: #007bff !important; - font-weight: bold !important; -} - - -.news:hover { - background-color: #fff; - box-shadow: 0 .5rem 1rem rgba(0, 0, 0, .20); - -webkit-transition: all 200ms ease-in -out; - transition: all 200ms ease-in-out; -} - -.bold-text { - font-weight: bold; -} - -.custom-list { - list-style: none; -} - -.ft-20 { - font-size: 20px; -} - -.pr-10 { - padding-right: 10%; -} - -.bg-lightgray { - background-color: #e8e8e8 -} - -.border-top { - border-top: 1px solid #b3b3b3 !important; -} - -/***Sticky Footer***/ -.footer { - background-color: #333333; -} - -.footer h2 { - font-size: 30px; -} - -.footer .footer-heading { - color: #ffffff; - font-size: 28px; - margin-right: 60px; -} - -.footer a { - font-size: 20px; - color: #1fa2ff; - text-decoration: none; - padding-right: 0; - position: relative; - display: inline-block; -} - -.footer a .fa-twitter { - font-size: 30px; -} - -.footer a:hover, -.footer a:focus, -.footer a:active { - color: #a0dcf9; -} - -.footer a .link-content { - box-shadow: 0 2px #1fa2ff; -} - -.footer a:hover .link-content, -.footer a:focus .link-content, -.footer a:active .link-content { - border-bottom: 2px solid #a0dcf9; - -webkit-transition: all 200ms ease-in -out; - transition: all 200ms ease-in-out; -} - -.footer .link-arrow { - padding-left: 5px; - -webkit-transition: all 200ms ease-in-out; - transition: all 200ms ease-in-out; -} - -.footer .link-arrow.fa-angle-right { - color: #ffffff; -} - -.footer a:hover .link-arrow.fa-angle-right, -.footer a:active .link-arrow.fa-angle-right, -.footer a:focus .link-arrow.fa-angle-right { - color: #a0dcf9; -} - -.footer a:hover .link-arrow, -.footer a:active .link-arrow, -.footer a:focus .link-arrow { - padding-left: 10px; -} - -.footer a.twitter-icon::after { - display: none; -} - -p.text-copyright { - font-size: 14px; - display: inline-block; -} - -.footer .onnx-footer-logo { - width: 205px; - height: 70px; -} - -.back-to-top { - position: fixed; - bottom: 25px; - right: 25px; - background-color: #007bff; - width: 50px; - height: 50px; - padding: 0; - border: 0; - border-radius: 0; - outline: transparent; - display: none; -} - -.back-to-top .fa-angle-up { - font-size: 44px; - color: #ffffff; -} - -.back-to-top:hover, -.back-to-top:focus, -.back-to-top:active { - background-color: #1FA2FF; -} - -.skip-main { - left: -999px; - position: absolute; - top: auto; - width: 1px; - height: 1px; - overflow: hidden; - z-index: -999; - cursor: pointer; -} - -.skip-main:focus, -.skip-main:active { - color: #fff; - background-color: #000; - left: auto; - top: auto; - width: 230px; - height: auto; - overflow: auto; - left: 50%; - -webkit-transform: translateX(-50%); - transform: translateX(-50%); - padding: 5px; - border-radius: 0; - text-align: center; - font-size: 1.2em; - z-index: 9999; - border: none; -} - -.append-play-buttom aside { - text-align: center; - position: relative; - top: -48px; - margin-left: 10%; - width: auto; -} - -.carousel-pause-button { - font-size: 16px; - background: none; - border: none; - position: relative; - margin: 0; - line-height: 1; - z-index: 111; -} - -.abbr[data-original-title], -abbr[title] { - text-decoration: none; - -webkit-text-decoration: none; - text-decoration: none; -} - -a .abbr[data-original-title], -a abbr[title] { - cursor: pointer; -} - - -.quick-starts .col-md-2-4 { - -webkit-box-flex: 0; - -ms-flex: 0 0 20%; - flex: 0 0 20%; - max-width: 20%; -} - -[role=radio] { - display: block; - position: relative; - margin: 0.5em; - padding-left: 20px; - border: thin transparent solid; -} - -.r-wrap .r-heading h3 { - color: #000; - font-size: 22px; - margin: 0; -} - -.r-wrap .r-heading { - position: relative; - text-align: left; - padding: 0 20px; - background-color: #E6EFF4; - color: #000; - margin: 2px 0; - -webkit-box-align: center; - -ms-flex-align: center; - align-items: center; - display: -webkit-box; - display: -ms-flexbox; - display: flex; - min-height: 40px; -} - -.r-wrap.command-block .col-md-3.r-heading { - background-color: #007bff; - border: 1px solid #fff; -} - - -.r-wrap.command-block .r-heading::before { - border-left: none; -} - -.r-wrap.command-block .r-heading h4::before { - display: block; - color: transparent; - height: 83px; - position: absolute; - left: 15px; - border-left: 2px solid #000; - top: 0px; -} - -.r-wrap .r-content { - width: 100%; - margin: 0; - cursor: pointer; -} - -.r-wrap .r-content .row { - margin-right: 0; -} - -div.quotebox { - border: 1px solid #cccccc; - background-color: #e8e8e8; - padding-top: 15px; -} - -.quotebox img { - max-height: 180px; -} - -.r-wrap .r-content .r-option, -.r-wrap .r-content .col-disable { - margin: 0; - padding: 0; -} - -.r-wrap .r-content .r-option span, -.r-wrap .r-content .col-disable span { - cursor: pointer; - color: #000000; - padding: 0 20px; - font-weight: 400; - font-size: 16px; - margin: 2px 2px; - background-color: #fafafa; - display: -webkit-box; - display: -webkit-flex; - display: -ms-flexbox; - display: flex; - -webkit-box-align: center; - -webkit-align-items: center; - -ms-flex-align: center; - align-items: center; - min-height: 40px; - word-break: break-word; -} - -.r-wrap .r-content .r-option.selected span, -.r-wrap .r-content .r-option.selected span:hover, -.r-wrap .r-content .r-option.selected span:focus, -.r-wrap .r-content .r-option.selected span:active { - background-color: #007bff; - color: #fff; - border: 0px solid #000; - -} - -.r-wrap .r-content .r-option.unsupported span, -.r-wrap .r-content .r-option.unsupported span:hover, -.r-wrap .r-content .r-option.unsupported span:focus, -.r-wrap .r-content .r-option.unsupported span:active { - border: 2px solid #cc0000; -} - - -.r-wrap .r-content .r-option.gray span, -.r-wrap .r-content .r-option.gray span:hover, -.r-wrap .r-content .r-option.gray span:focus, -.r-wrap .r-content .r-option.gray span:active { - background-color: #eee; - color: #999; -} - -.r-wrap .r-content .r-option.command-container a { - color: #fff; - text-decoration: underline; - font-weight: bold; -} - -.r-wrap .r-content .r-option.command-container { - display: table; - padding: 2px 2px 2px 2.3px; -} - -.r-wrap .r-content .r-option.command-container span { - display: table-cell; - padding: 0 10px 0 20px; - vertical-align: middle; - background-color: #000; - color: #fff; -} - -.r-wrap .r-content #command.r-option.valid span, -.r-wrap .r-content #command.r-option.valid span:hover, -.r-wrap .r-content #ot_command.r-option.valid span, -.r-wrap .r-content #ot_command.r-option.valid span:hover { - background-color: green; - -} - -.r-wrap .r-content #command.r-option.invalid span, -.r-wrap .r-content #command.r-option.invalid span:hover, -.r-wrap .r-content #ot_command.r-option.invalid span, -.r-wrap .r-content #ot_command.r-option.invalid span:hover { - border: 2px solid #cc0000; -} - - -.r-wrap .r-content .r-option.command-container a.link:hover, -.r-wrap .r-content .r-option.command-container a.link:focus, -.r-wrap .r-content .r-option.command-container a.link:active { - color: #000; -} - -.r-wrap .r-content .r-option span:hover { - background-color: #0F77D2; - color: #fff; - /* border: 1px solid #000; */ -} - -.r-wrap .r-content .r-option.command-container.selected, -.r-wrap .r-content .r-option.command-container:hover { - color: #212529; -} - - - -.r-wrap .r-content #command.r-option span:hover, -.r-wrap .r-content #ot_command.r-option span:hover { - background-color: #000; - color: #fff; - border: 1px solid #ccc; - cursor: default; -} - - - - - -.col-disable span:hover, -.col-disable span:focus, -.col-disable span:active, -.r-wrap .r-content .r-option.selected.col-disable span, -.r-wrap .r-content .r-option.selected.col-disable span:hover, -.r-wrap .r-content .r-option.selected.col-disable span:focus, -.r-wrap .r-content .r-option.selected.col-disable span:active { - background-color: transparent !important; - color: transparent !important; - border: 1px solid #ccc !important; - cursor: default !important; -} - -.r-wrap.command-block { - height: auto; -} - -.col-2dot4, -.col-sm-2dot4, -.col-md-2dot4, -.col-lg-2dot4, -.col-xl-2dot4 { - position: relative; - width: 100%; - min-height: 1px; - max-height: 1; - padding-right: 15px; - padding-left: 15px; -} - -@media (min-width: 768px) { - .hardwareAcceleration .col-lg-2dot5 { - -webkit-box-flex: 0; - -ms-flex: 0 0 20%; - flex: 0 0 20%; - max-width: 20%; - } -} - - -/* new css start */ -.subheading { - font-size: 24px; -} - -.resources-img img { - max-width: 100%; -} - -.news-img img { - max-width: 100%; -} - -.news-img { - padding-left: 3rem; - padding-top: 3rem; -} - -.resources-img { - position: relative; - display: block; - width: 100%; -} - -.resources-img img { - cursor: pointer; -} - -.resources-img::before { - content: ''; - background-image: url('../images/youtube-logo.png'); - background-position: center; - background-size: 50%; - background-repeat: no-repeat; - position: absolute; - left: 0; - top: 0; - right: 0; - height: 100%; - width: auto; - background-color: rgba(0, 0, 0, 0.3); - cursor: pointer; -} - -.section-heading { - font-size: 30px; -} - -.blue-title-columns h3 { - font-size: 24px; -} - -.blue-title-columns h3.quote { - font-size: 16px; - text-align: center; -} - -.blue-title-columns h3.hardware { - font-size: 18px; -} - -.row.blue-title-columns { - text-align: center; -} - -.blue-title-columns div.quote-attribution { - font-size: 18px; - font-weight: normal; - font-style: italic; - color: #0F77D2; - text-align: right; -} - -.blue-title-columns h2 { - font-size: 24px; -} - -#myModal .close { - padding: 6px; - position: absolute; - right: 12px; - top: 8px; - z-index: 11; -} - - -.tabpanel { - margin: 20px; - padding: 0; -} - -.tbl_tablist { - margin: 2px 0; - padding: 0; - list-style: none; - display: flex; - border-bottom: 1px solid #ddd; -} - -.tbl_tab, -.custom-tab .nav-tabs .nav-link { - margin: 0; - padding: 12px 24px; - font-weight: bold; - border: 1px solid #dddddd; - background: #fff; - border-top-left-radius: .5em; - border-top-right-radius: .5em; - border-bottom: 0; - font-size: 22px; - cursor: pointer; - color: #1a1a1a; - ; -} - -.tbl_panel { - clear: both; - display: block; - margin: 0 0 0 0; - padding: 0; -} - -ul.controlList { - list-style-type: none; -} - -li[aria-selected='true'], -.custom-tab .nav-tabs .nav-link.active { - background-color: #007bff; - color: #fff; - border: 1px solid #007bff; - margin-bottom: -1px; -} - - -div.tbl_panel[aria-hidden='true'] { - display: none; -} - -div.tbl_panel[aria-hidden='false'] { - display: block; -} \ No newline at end of file diff --git a/css/fonts.css b/css/fonts.css deleted file mode 100644 index a3bda99980ca0..0000000000000 --- a/css/fonts.css +++ /dev/null @@ -1,226 +0,0 @@ -/* open-sans-300 - latin */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300; - font-display: swap; - src: url('../fonts/open-sans-v18-latin-300.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/open-sans-v18-latin-300.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/open-sans-v18-latin-300.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/open-sans-v18-latin-300.woff') format('woff'), /* Modern Browsers */ - url('../fonts/open-sans-v18-latin-300.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/open-sans-v18-latin-300.svg#OpenSans') format('svg'); /* Legacy iOS */ - } - /* open-sans-300italic - latin */ - @font-face { - font-family: 'Open Sans'; - font-style: italic; - font-weight: 300; - font-display: swap; - src: url('../fonts/open-sans-v18-latin-300italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/open-sans-v18-latin-300italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/open-sans-v18-latin-300italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/open-sans-v18-latin-300italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/open-sans-v18-latin-300italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/open-sans-v18-latin-300italic.svg#OpenSans') format('svg'); /* Legacy iOS */ - } - /* open-sans-regular - latin */ - @font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 400; - font-display: swap; - src: url('../fonts/open-sans-v18-latin-regular.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/open-sans-v18-latin-regular.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/open-sans-v18-latin-regular.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/open-sans-v18-latin-regular.woff') format('woff'), /* Modern Browsers */ - url('../fonts/open-sans-v18-latin-regular.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/open-sans-v18-latin-regular.svg#OpenSans') format('svg'); /* Legacy iOS */ - } - /* open-sans-italic - latin */ - @font-face { - font-family: 'Open Sans'; - font-style: italic; - font-weight: 400; - font-display: swap; - src: url('../fonts/open-sans-v18-latin-italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/open-sans-v18-latin-italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/open-sans-v18-latin-italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/open-sans-v18-latin-italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/open-sans-v18-latin-italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/open-sans-v18-latin-italic.svg#OpenSans') format('svg'); /* Legacy iOS */ - } - /* open-sans-700italic - latin */ - @font-face { - font-family: 'Open Sans'; - font-style: italic; - font-weight: 700; - font-display: swap; - src: url('../fonts/open-sans-v18-latin-700italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/open-sans-v18-latin-700italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/open-sans-v18-latin-700italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/open-sans-v18-latin-700italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/open-sans-v18-latin-700italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/open-sans-v18-latin-700italic.svg#OpenSans') format('svg'); /* Legacy iOS */ - } - /* open-sans-700 - latin */ - @font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 700; - font-display: swap; - src: url('../fonts/open-sans-v18-latin-700.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/open-sans-v18-latin-700.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/open-sans-v18-latin-700.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/open-sans-v18-latin-700.woff') format('woff'), /* Modern Browsers */ - url('../fonts/open-sans-v18-latin-700.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/open-sans-v18-latin-700.svg#OpenSans') format('svg'); /* Legacy iOS */ - } - - -/* lato-100 - latin */ -@font-face { - font-family: 'Lato'; - font-style: normal; - font-weight: 100; - font-display: swap; - src: url('../fonts/lato-v17-latin-100.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-100.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-100.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-100.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-100.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-100.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-100italic - latin */ - @font-face { - font-family: 'Lato'; - font-style: italic; - font-weight: 100; - font-display: swap; - src: url('../fonts/lato-v17-latin-100italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-100italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-100italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-100italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-100italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-100italic.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-300 - latin */ - @font-face { - font-family: 'Lato'; - font-style: normal; - font-weight: 300; - font-display: swap; - src: url('../fonts/lato-v17-latin-300.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-300.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-300.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-300.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-300.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-300.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-300italic - latin */ - @font-face { - font-family: 'Lato'; - font-style: italic; - font-weight: 300; - font-display: swap; - src: url('../fonts/lato-v17-latin-300italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-300italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-300italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-300italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-300italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-300italic.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-italic - latin */ - @font-face { - font-family: 'Lato'; - font-style: italic; - font-weight: 400; - font-display: swap; - src: url('../fonts/lato-v17-latin-italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-italic.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-regular - latin */ - @font-face { - font-family: 'Lato'; - font-style: normal; - font-weight: 400; - font-display: swap; - src: url('../fonts/lato-v17-latin-regular.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-regular.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-regular.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-regular.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-regular.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-regular.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-700 - latin */ - @font-face { - font-family: 'Lato'; - font-style: normal; - font-weight: 700; - font-display: swap; - src: url('../fonts/lato-v17-latin-700.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-700.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-700.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-700.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-700.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-700.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-900 - latin */ - @font-face { - font-family: 'Lato'; - font-style: normal; - font-weight: 900; - font-display: swap; - src: url('../fonts/lato-v17-latin-900.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-900.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-900.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-900.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-900.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-900.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-700italic - latin */ - @font-face { - font-family: 'Lato'; - font-style: italic; - font-weight: 700; - font-display: swap; - src: url('../fonts/lato-v17-latin-700italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-700italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-700italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-700italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-700italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-700italic.svg#Lato') format('svg'); /* Legacy iOS */ - } - /* lato-900italic - latin */ - @font-face { - font-family: 'Lato'; - font-style: italic; - font-weight: 900; - font-display: swap; - src: url('../fonts/lato-v17-latin-900italic.eot'); /* IE9 Compat Modes */ - src: local(''), - url('../fonts/lato-v17-latin-900italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('../fonts/lato-v17-latin-900italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('../fonts/lato-v17-latin-900italic.woff') format('woff'), /* Modern Browsers */ - url('../fonts/lato-v17-latin-900italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('../fonts/lato-v17-latin-900italic.svg#Lato') format('svg'); /* Legacy iOS */ - } \ No newline at end of file diff --git a/css/responsive.css b/css/responsive.css deleted file mode 100644 index b64998fd4e7f1..0000000000000 --- a/css/responsive.css +++ /dev/null @@ -1,292 +0,0 @@ - -/* Responsive css start */ - - @media only screen and (max-width: 1366px) { - .outer-container{ - padding: 0 3%; - } - .carousel .testimonial { - padding: 0 0 0 30px; - } - .footer .footer-heading{ - margin-right: 0; - } - } - - @media only screen and (max-width: 1199px) { - .carousel { - padding: 0 40px; - } - .carousel .testimonial { - padding: 0 0 0 0; - } - .r-wrap .r-content .r-option span{ - padding: 0 10px; - } - .customer-logo img{ - max-height: 150px; - } - .news-img img { - max-width: 100%; - } - } - - @media only screen and (max-width: 991px) { - body{ - font-size: 16px; - } - .blue-title-columns h2 { - font-size: 20px; - } - .navbar-custom .onnx-logo { - width: 230px; - } - .carousel .media img, - .tab-wrapper .media img{ - max-width: 180px; - } - .get-started-section h2 { - font-size: 24px; - } - .carousel p, - .tab-wrapper p { - font-size: 20px; - } - .footer .footer-heading{ - font-size: 22px; - } - .footer a{ - font-size: 18px; - padding-right: 0; - } - .footer p{ - margin-bottom: 10px; - } - .footer .onnx-footer-logo { - width: 170px; - height: 58px; - } - /* css for single col */ - .single-col-container{ - max-width: 60%; - } - .r-wrap .r-heading h3 { - font-size: 18px; - } - .r-wrap .r-content .r-option span{ - padding: 0 5px; - } - .r-wrap .r-heading h4, - .r-wrap .r-content .r-option span{ - font-size: 16px; - } - .pr-10 { - padding-right: 15px; - } - .ft-20 { - font-size: 18px; - } - .footer h2 { - font-size: 24px; - } - .tab{ - font-size: 18px; - } - .section-heading{ - font-size: 24px; - } - .blue-title-columns h3 { - font-size: 22px; - } - .news-img img { - max-width: 100%; - } - } - - @media only screen and (max-width: 767px) { - .main-wrapper .top-banner-bg{ - background-position: top left; - } - .header-content .container{ - max-width: 100%; - } - .outer-container { - padding: 0 10%; - } - .navbar-custom .onnx-logo { - width: 210px; - } - h1 { - font-size: 30px; - } - .carousel .media img { - max-width: 180px; - } - .media .media-left{ - justify-content: center; - } - .carousel { - padding: 0 0; - margin: 32px auto 60px; - } - .footer p{ - margin-bottom: 0; - } - .text-center-sm{ - text-align: center; - } - .navbar-custom .navbar-nav .nav-link::before{ - display: none; - } - header.header-collapse{ - background-color: #D8D6D6; - } - header.header-collapse .navbar-custom .navbar-nav .nav-item.active .nav-link { - color: #ffffff; - } - header.header-collapse .navbar-custom .navbar-nav .nav-item .nav-link:hover { - color: #1a1a1a; - } - header.scrolled .navbar-custom .navbar-nav .nav-item:not(:last-child) .nav-link, - .navbar-custom .navbar-nav .nav-item:not(:last-child) .nav-link { - border-right: none; - } - .navbar-custom .navbar-nav .nav-item { - padding: .5rem 0 .5rem 0; - } - .single-col-container{ - max-width: 100%; - } - .hardwareAcceleration .col-lg-2dot5, .col-lg-2dot4{ - -webkit-box-flex: 0; - -ms-flex: 0 0 100%; - flex: 0 0 100%; - max-width: 100%; - } - .r-wrap .r-content .r-option:first-child span, .r-wrap .r-content .r-option:nth-child(6) span { - margin-left: 0; - } - .r-wrap .r-content .r-option span{ - margin: 1px 0; - } - .r-wrap .r-heading, - .r-wrap .r-content .r-option span{ - min-height: 40px; - height: auto; - padding: 5px 20px; - } - .r-wrap .r-content .r-option.command-container span { - padding-bottom: 5px; - } - .border-md-top{ - border-top: 1px solid #dee2e6!important; - } - .append-play-buttom aside{ - top: -57px; - margin-left: 100px; - } - .back-to-top{ - width: 45px; - height: 40px; - } - .back-to-top .fa-angle-up { - font-size: 38px; - } - header.fixed-top .navbar-custom img.onnx-logo { - width: 175px; - height: 60px; - } - header.fixed-top { - padding: 8px 0; - } - .r-wrap .r-content .r-option.command-container { - padding: 0; - } - p.text-copyright{ - display: block; - } - .tab{ - font-size: 16px; - padding: 10px 12px; - } - - .custom-tab .nav-tabs { - position: absolute; - border: 1px solid #ccc; - z-index: 11; - min-height: 50px; - left: 15px; - right: 15px; - transition: all ease .5s; - } - .custom-tab .nav-tabs li { - width: 100%; - margin: 0 !important; - } - .custom-tab .nav-tabs li > a { - display: none; - border: 0 !important; - border-radius: 0 !important; - } - .custom-tab .nav-tabs .nav-link{ - font-size: 18px; - } - .custom-tab .nav-tabs.open li > a, .custom-tab .nav-tabs li > a.active { - display: block; - transition: all ease .5s; - } - .custom-tab .nav-tabs .dropdown-arrow { - position: absolute; - top: 18px; - right: 15px; - border: solid #000000; - border-width: 0 2px 2px 0; - display: inline-block; - padding: 5px; - vertical-align: middle; - transform: rotate(45deg); - -webkit-transform: rotate(45deg); - transition: all ease .5s; - cursor: pointer; - } - .custom-tab .nav-tabs.open .dropdown-arrow { - transform: rotate(-135deg); - transition: all ease .5s; - } - .tab-wrapper{ - padding-top: 60px; - } - .news-img img { - max-width: 100%; - } - } - - @media only screen and (max-width: 480px) { - .outer-container { - padding: 0 3%; - } - .navbar-custom{ - padding: 0.5rem 10px; - } - .navbar-custom .onnx-logo { - width: 170px; - } - .navbar-toggler .navbar-toggler-icon{ - width: 2.5em; - height: 2.5em; - } - header.header-collapse .navbar-toggler .navbar-toggler-icon{ - width: 2em; - height: 2em; - } - } - - @media only screen and (max-width: 420px) { - .icon-container img { - width: 40px; - height: 45px; - } - .outer-container { - padding: 0 2%; - } - } \ No newline at end of file diff --git a/docs/install/index.md b/docs/install/index.md index 0f1574d3c7b86..a283637a303d0 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -355,4 +355,4 @@ In addition to general [requirements](#requirements), please note additional req ## Training install table for all languages -Refer to the getting started with [Optimized Training](../../index.html#getStartedTable) page for more fine-grained installation instructions. +Refer to the getting started with [Optimized Training](https://onnxruntime.ai/getting-started) page for more fine-grained installation instructions. diff --git a/footer.html b/footer.html deleted file mode 100644 index 54926e17a44a1..0000000000000 --- a/footer.html +++ /dev/null @@ -1,68 +0,0 @@ - diff --git a/googlee249f93c137f3996.html b/googlee249f93c137f3996.html index 60cf9f5b2bf71..3d1ee76b4b6b5 100644 --- a/googlee249f93c137f3996.html +++ b/googlee249f93c137f3996.html @@ -1 +1 @@ -google-site-verification: googlee249f93c137f3996.html \ No newline at end of file +google-site-verification: googlee249f93c137f3996.html diff --git a/header.html b/header.html deleted file mode 100644 index 46d02a8785caf..0000000000000 --- a/header.html +++ /dev/null @@ -1,40 +0,0 @@ -
- -
diff --git a/images/python-free.png b/images/python-free.png deleted file mode 100644 index 14271cbeffb65..0000000000000 Binary files a/images/python-free.png and /dev/null differ diff --git a/index.html b/index.html deleted file mode 100644 index dc09ca79a3b9c..0000000000000 --- a/index.html +++ /dev/null @@ -1,1118 +0,0 @@ - - - - - - - - - - - - - - - ONNX Runtime | Home - - - - - - - - - - - - - Skip to main content -
-
- -
- - -
-
-

- Cross-Platform Accelerated Machine Learning -

-
- -
-
-
-
-
-
-
-
- -
-
-
-

- Speed up machine learning process -

-

- Built-in optimizations that deliver up to 17X faster - inferencing and up to 1.4X faster training -

-
-
-
-
-
-
-
- -
-
-
-

- Plug into your existing technology stack -

-

- Support for a variety of frameworks, operating systems - and hardware platforms -

-
-
-
-
-
-
-
- -
-
-
-

- Build using proven technology -

-

- Used in Office 365, Visual Studio and Bing, delivering - more than a Trillion inferences every day -

-
-
-
-
-
-
- -
-
-
-
- -
-
-

Use ONNX Runtime with your favorite language

-
- -
-
- -
-
-
-
-
-import onnxruntime as ort
-
-# Load the model and create InferenceSession
-model_path = "path/to/your/onnx/model"
-session = ort.InferenceSession(model_path)
-
-# Load and preprocess the input image inputTensor
-...
-
-# Run inference
-outputs = session.run(None, {"input": inputTensor})
-print(outputs)
- Learn more
-											
-
-
-
-import ai.onnxruntime.*;
-
-// Load the model and create InferenceSession
-String modelPath = "path/to/your/onnx/model";
-OrtEnvironment env = OrtEnvironment.getEnvironment();
-OrtSession session = env.createSession(modelPath);
-
-// Load and preprocess the input image inputTensor
-...
-
-// Run inference
-OrtSession.Result outputs = session.run(inputTensor);
-System.out.println(outputs.get(0).getTensor().getFloatBuffer().get(0));
- Learn more
-											
-
-
-
-import * as ort from "onnxruntime-web";
-
-// Load the model and create InferenceSession
-const modelPath = "path/to/your/onnx/model";
-const session = await ort.InferenceSession.create(modelPath);
-
-// Load and preprocess the input image to inputTensor
-...
-
-// Run inference
-const outputs = await session.run({ input: inputTensor });
-console.log(outputs);
- Learn more
-											
-
-
-
-#include "onnxruntime_cxx_api.h"
-
-// Load the model and create InferenceSession
-Ort::Env env;
-std::string model_path = "path/to/your/onnx/model";
-Ort::Session session(env, model_path, Ort::SessionOptions{ nullptr });
-
-// Load and preprocess the input image to 
-// inputTensor, inputNames, and outputNames
-...
-
-// Run inference
-std::vector outputTensors =
- session.Run(Ort::RunOptions{nullptr}, 
- 			inputNames.data(), 
-			&inputTensor, 
-			inputNames.size(), 
-			outputNames.data(), 
-			outputNames.size());
-
-const float* outputDataPtr = outputTensors[0].GetTensorMutableData();
-std::cout << outputDataPtr[0] << std::endl;
- Learn more
-											
-
-
-
-using Microsoft.ML.OnnxRuntime;
-
-// Load the model and create InferenceSession
-string model_path = "path/to/your/onnx/model";
-var session = new InferenceSession(model_path);
-
-// Load and preprocess the input image to inputTensor
-...
-
-// Run inference
-var outputs = session.Run(inputTensor).ToList();
-Console.WriteLine(outputs[0].AsTensor()[0]);
- Learn more
-											
-
-
-
-
- -
- -
-
- -
-
-

Use ONNX Runtime with the platform of your choice

-
- -

Select the configuration you want to use and run the corresponding - installation script. -
ONNX Runtime supports a variety of hardware and architectures to fit any need. -

-
- - -
-
-
-
-
-

Platform

-

- Platform list contains six items -

-
-
-
-
- Windows -
-
- Linux -
-
- Mac -
-
- Android -
-
- iOS -
-
- Web Browser -
-
-
-
- -
-
-

API

-

- API list contains eight items -

-
-
-
-
- Python -
-
- C++ -
-
- C# -
-
- C -
-
- Java -
-
- JS -
-
- Obj-C -
-
- WinRT -
-
-
-
- -
-
-

- Architecture -

-

- Architecture list contains five items -

-
-
-
-
- X64 -
-
- X86 -
-
- ARM64 -
-
- ARM32 -
-
- IBM Power -
-
-
-
- -
-
-

- Hardware Acceleration -

-

- Hardware Acceleration list contains seventeen - items -

-
-
-
-
- Default  CPU -
-
- CoreML -
-
- CUDA -
-
- DirectML -
-
- MIGraphX -
-
- NNAPI -
-
- oneDNN -
-
- OpenVINO -
-
- ROCm -
-
- QNN -
-
- TensorRT -
-
- ACL (Preview) -
-
- ArmNN (Preview) -
-
- Azure (Preview) -
-
- CANN (Preview) -
-
- Rockchip NPU (Preview) -
-
- TVM (Preview) -
-
- Vitis AI (Preview) -
-
- XNNPACK (Preview) -
-
-
-
- -
-
-

- Installation Instructions -

-
-
-
-
- - Please select a combination of resources - -
-
-
-
-
-
-
- - -
-
- -
-
-
-
-
-

ONNX RUNTIME VIDEOS

-
-
-
- - -
-
- -
-
-
-
-
-

- ORGANIZATIONS & PRODUCTS USING ONNX RUNTIME -

-
-
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
-
-
-
-
-
-
- -
- - - - - - - - - - - - - - - - - - - - diff --git a/js/blogs.json b/js/blogs.json deleted file mode 100644 index 9b9d00ac308f0..0000000000000 --- a/js/blogs.json +++ /dev/null @@ -1,166 +0,0 @@ -{ - "blogs": [ - { - "title": "Run PyTorch models on the edge", - "date": "October 12th, 2023", - "blurb": "Everything you need to know about running PyTorch models on the edge with ONNX Runtime.", - "link": "./pytorch-on-the-edge" - }, - { - "title": "Accelerating over 130,000 Hugging Face models with ONNX Runtime", - "date": "October 4th, 2023", - "blurb": "Learn how ONNX Runtime helps users accelerate open source machine learning models from Hugging Face.", - "link": "https://cloudblogs.microsoft.com/opensource/2023/10/04/accelerating-over-130000-hugging-face-models-with-onnx-runtime/" - }, - { - "title": "On-Device Training with ONNX Runtime: A deep dive", - "date": "July 5th, 2023", - "blurb": "This blog presents technical details of On-Device training with ONNX Runtime. It explains how On-Device Training works and what are the different steps and artifacts involved in the training process. This information will help you train your models on edge devices.", - "link": "https://cloudblogs.microsoft.com/opensource/2023/07/05/on-device-training-with-onnx-runtime-a-deep-dive/" - }, - { - "title": "Build and deploy fast and portable speech recognition applications with ONNX Runtime and Whisper", - "date": "June 7th, 2023", - "blurb": "Learn how ONNX Runtime accelerates Whisper and makes it easy to deploy on desktop, mobile, in the cloud, and even in the browser.", - "link": "https://medium.com/microsoftazure/build-and-deploy-fast-and-portable-speech-recognition-applications-with-onnx-runtime-and-whisper-5bf0969dd56b" - }, - { - "title": "On-Device Training: Efficient training on the edge with ONNX Runtime", - "date": "May 31st, 2023", - "blurb": "This blog introduces On-Device Training to enable training models on edge devices with the data available on-edge. It extends ORT Inference on edge to include federated learning and personalization scenarios.", - "link": "https://cloudblogs.microsoft.com/opensource/2023/05/31/on-device-training-efficient-training-on-the-edge-with-onnx-runtime/" - }, - { - "title": "Unlocking the end-to-end Windows AI developer experience using ONNX runtime and Olive", - "date": "May 23th, 2023", - "blurb": "This blog reviews the new capabilities of ONNX Runtime and the Olive toolchain to support hybrid inferencing, NPU EPs, and hardware aware model optimizations on Windows and other platforms", - "link": "https://blogs.windows.com/windowsdeveloper/2023/05/23/unlocking-the-end-to-end-windows-ai-developer-experience-using-onnx-runtime-and-olive" - }, - { - "title": "Bringing the power of AI to Windows 11 - unlocking a new era of productivity for customers and developers with Windows Copilot and Dev Home", - "date": "May 23th, 2023", - "blurb": "This blog reviews AI in Windows 11, including ONNX Runtime as the gateway to Windows AI and new ONNX Runtime capabilities on Windows", - "link": "https://blogs.windows.com/windowsdeveloper/2023/05/23/bringing-the-power-of-ai-to-windows-11-unlocking-a-new-era-of-productivity-for-customers-and-developers-with-windows-copilot-and-dev-home" - }, - { - "title": "Optimize DirectML performance with Olive", - "date": "May 23th, 2023", - "blurb": "This blog shows how to use Olive to optimize models for DML EP in ONNX Runtime", - "link": "https://devblogs.microsoft.com/windowsai/optimize-directml-performance-with-olive" - }, - { - "title": "DirectML ❤ Stable Diffusion", - "date": "May 23th, 2023", - "blurb": "This blog shows how to use the Stable Diffusion model on DML EP using Olive to optimize the Stable Diffusion model", - "link": "https://devblogs.microsoft.com/windowsai/dml-stable-diffusion/" - }, - { - "title": "Accelerating Stable Diffusion Inference with ONNX Runtime", - "date": "May 10th, 2023", - "blurb": "This blog shows how to accelerate the Stable Diffusion models from Hugging Face on NVIDIA and AMD GPUs with ONNX Runtime. It includes benchmark results obtained on A100 and RTX3060 and MI250X.", - "link": "https://medium.com/microsoftazure/accelerating-stable-diffusion-inference-with-onnx-runtime-203bd7728540" - }, - { - "title": "Azure Container for PyTorch is now Generally Available in Azure Machine Learning!", - "date": "March 22nd, 2023", - "blurb": "ACPT provides a ready-to-use distributed training environment for users to run on the latest multi-node GPU infrastructure offered in Azure. With Nebula, a new fast checkpointing capability in ACPT, you can save your checkpoints 1000 times faster with a simple API that works asynchronously with your training process.", - "link": "https://techcommunity.microsoft.com/t5/ai-machine-learning-blog/azure-container-for-pytorch-is-now-generally-available-in-azure/ba-p/3774616" - }, - { - "title": "High-performance deep learning in Oracle Cloud with ONNX Runtime", - "date": "March 15th, 2023", - "blurb": "Enabling scenarios through the usage of Deep Neural Network (DNN) models is critical to our AI strategy at Oracle, and our Cloud AI Services team has built a solution to serve DNN models for customers in the healthcare sector. In this blog post, we’ll share challenges our team faced, and how ONNX Runtime solves these as the backbone of success for high-performance inferencing.", - "link": "https://cloudblogs.microsoft.com/opensource/2023/03/15/high-performance-deep-learning-in-oracle-cloud-with-onnx-runtime/" - }, - { - "title": "Inference Stable Diffusion with C# and ONNX Runtime", - "date": "March 9th, 2023", - "blurb": "In this tutorial we will learn how to do inferencing for the popular Stable Diffusion deep learning model in C#. Stable Diffusion models take a text prompt and create an image that represents the text. ", - "link": "https://onnxruntime.ai/docs/tutorials/csharp/stable-diffusion-csharp.html" - }, - { - "title": "Video super resolution in Microsoft Edge", - "date": "March 8th, 2023", - "blurb": "VSR in Microsoft Edge builds on top of ONNX Runtime and DirectML making our solution portable across GPU vendors and allowing VSR to be available to more users. Additional graphics cards which support these technologies and have sufficient computing power will receive support in the future. The ONNX Runtime and DirectML teams have fine-tuned their technology over many years, resulting in VSR making the most of the performance and capabilities of your graphics card’s processing power.", - "link": "https://blogs.windows.com/msedgedev/2023/03/08/video-super-resolution-in-microsoft-edge/" - }, - { - "title": "OctoML drives down production AI inference costs at Microsoft through new integration with ONNX Runtime ecosystem", - "date": "March 2nd, 2023", - "blurb": "Over the past year, OctoML engineers worked closely with Watch For to design and implement the TVM Execution Provider (EP) for ONNX Runtime - bringing the model optimization potential of Apache TVM to all ONNX Runtime users. This builds upon the collaboration we began in 2021, to bring the benefits of TVM’s code generation and flexible quantization support to production scale at Microsoft.", - "link": "https://octoml.ai/blog/octoml-drives-down-costs-at-microsoft-through-new-integration-with-onnx-runtime/" - }, - { - "title": "Performant on-device inferencing with ONNX Runtime", - "date": "February 8th, 2023", - "blurb": "On-device machine learning model serving is a difficult task, especially given the limited bandwidth of early-stage startups. This guest post from the team at Pieces shares the problems and solutions evaluated for their on-device model serving stack and how ONNX Runtime serves as their backbone of success.", - "link": "https://cloudblogs.microsoft.com/opensource/2023/02/08/performant-on-device-inferencing-with-onnx-runtime/" - }, - { - "title": "Improve BERT inference speed by combining the power of Optimum, OpenVINO™, ONNX Runtime, and Azure", - "date": "January 25th, 2023", - "blurb": "In this blog, we will discuss one of the ways to make huge models like BERT smaller and faster with OpenVINO™ Neural Networks Compression Framework (NNCF) and ONNX Runtime with OpenVINO™ Execution Provider through Azure Machine Learning.", - "link": "https://cloudblogs.microsoft.com/opensource/2023/01/25/improve-bert-inference-speed-by-combining-the-power-of-optimum-openvino-onnx-runtime-and-azure/" - }, - { - "title": "Optimum + ONNX Runtime: Easier, Faster training for your Hugging Face models", - "date": "January 24th, 2023", - "blurb": "Hugging Face’s Optimum library, through its integration with ONNX Runtime for training, provides an open solution to improve training times by 35% or more for many popular Hugging Face models. We present details of both Hugging Face Optimum and the ONNX Runtime Training ecosystem, with performance numbers highlighting the benefits of using the Optimum library.", - "link": "https://huggingface.co/blog/optimum-onnxruntime-training/" - }, - { - "title": "Live demos of machine learning models with ONNX and Hugging Face Spaces", - "date": "June 6, 2022", - "blurb": "Choosing which machine learning model to use, sharing a model with a colleague, and quickly trying out a model are all reasons why you may find yourself wanting to quickly run inference on a model. You can configure your environment and download Jupyter notebooks, but it would be nicer if there was a way to run a model with even less effort...", - "link": "https://cloudblogs.microsoft.com/opensource/2022/06/06/live-demos-of-machine-learning-models-with-onnx-and-hugging-face-spaces/" - }, - { - "title": "Optimizing and deploying transformer INT8 inference with ONNX Runtime-TensorRT on NVIDIA GPUs", - "date": "May 2, 2022", - "blurb": "Transformer-based models have revolutionized the natural language processing (NLP) domain. Ever since its inception, transformer architecture has been integrated into models like Bidirectional Encoder Representations from Transformers (BERT) and Generative Pre-trained Transformer (GPT) for performing tasks such as text generation or summarization and question and answering to name a few...", - "link": "https://cloudblogs.microsoft.com/opensource/2022/05/02/optimizing-and-deploying-transformer-int8-inference-with-onnx-runtime-tensorrt-on-nvidia-gpus/" - }, - { - "title": "Scaling-up PyTorch inference: Serving billions of daily NLP inferences with ONNX Runtime", - "date": "April 19, 2022", - "blurb": "Scale, performance, and efficient deployment of state-of-the-art Deep Learning models are ubiquitous challenges as applied machine learning grows across the industry. We’re happy to see that the ONNX Runtime Machine Learning model inferencing solution we’ve built and use in high-volume Microsoft products and services also resonates with our open source community, enabling new capabilities that drive content relevance and productivity...", - "link": "https://cloudblogs.microsoft.com/opensource/2022/04/19/scaling-up-pytorch-inference-serving-billions-of-daily-nlp-inferences-with-onnx-runtime/" - }, - { - "title": "Add AI to mobile applications with Xamarin and ONNX Runtime", - "date": "December 14, 2021", - "blurb": "ONNX Runtime now supports building mobile applications in C# with Xamarin. Support for Android and iOS is included in the ONNX Runtime release 1.10 NuGet package. This enables C# developers to build AI applications for Android and iOS to execute ONNX models on mobile devices with ONNX Runtime...", - "link": "https://cloudblogs.microsoft.com/opensource/2021/12/14/add-ai-to-mobile-applications-with-xamarin-and-onnx-runtime/" - }, - { - "title": "ONNX Runtime Web—running your machine learning model in browser", - "date": "September 2, 2021", - "blurb": "We are introducing ONNX Runtime Web (ORT Web), a new feature in ONNX Runtime to enable JavaScript developers to run and deploy machine learning models in browsers. It also helps enable new classes of on-device computation. ORT Web will be replacing the soon to be deprecated onnx.js...", - "link": "https://cloudblogs.microsoft.com/opensource/2021/09/02/onnx-runtime-web-running-your-machine-learning-model-in-browser/" - }, - { - "title": "Accelerate PyTorch transformer model training with ONNX Runtime – a deep dive", - "date": "July 13, 2021", - "blurb": "ONNX Runtime (ORT) for PyTorch accelerates training large scale models across multiple GPUs with up to 37% increase in training throughput over PyTorch and up to 86% speed up when combined with DeepSpeed...", - "link": "https://techcommunity.microsoft.com/t5/azure-ai/accelerate-pytorch-transformer-model-training-with-onnx-runtime/ba-p/2540471" - }, - { - "title": "Accelerate PyTorch training with torch-ort", - "date": "July 13, 2021", - "blurb": "With a simple change to your PyTorch training script, you can now speed up training large language models with torch_ort.ORTModule, running on the target hardware of your choice. Training deep learning models requires ever-increasing compute and memory resources. Today we release torch_ort.ORTModule, to accelerate distributed training of PyTorch models, reducing the time and resources needed for training...", - "link": "https://cloudblogs.microsoft.com/opensource/2021/07/13/accelerate-pytorch-training-with-torch-ort/" - }, - { - "title": "ONNX Runtime release 1.8.1 previews support for accelerated training on AMD GPUs with the AMD ROCm™ Open Software Platform", - "date": "July 13, 2021", - "blurb": "ONNX Runtime is an open-source project that is designed to accelerate machine learning across a wide range of frameworks, operating systems, and hardware platforms. Today, we are excited to announce a preview version of ONNX Runtime in release 1.8.1 featuring support for AMD Instinct™ GPUs facilitated by the AMD ROCm™ open software platform...", - "link": "https://cloudblogs.microsoft.com/opensource/2021/07/13/onnx-runtime-release-1-8-1-previews-support-for-accelerated-training-on-amd-gpus-with-the-amd-rocm-open-software-platform/" - }, - { - "title": "Journey to optimize large scale transformer model inference with ONNX Runtime", - "date": "June 30, 2021", - "blurb": "Large-scale transformer models, such as GPT-2 and GPT-3, are among the most useful self-supervised transformer language models for natural language processing tasks such as language translation, question answering, passage summarization, text generation, and so on...", - "link": "https://cloudblogs.microsoft.com/opensource/2021/06/30/journey-to-optimize-large-scale-transformer-model-inference-with-onnx-runtime/" - } -] -} diff --git a/js/bootstrap.min.js b/js/bootstrap.min.js deleted file mode 100644 index c4c0d1f95cd3c..0000000000000 --- a/js/bootstrap.min.js +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Bootstrap v4.3.1 (https://getbootstrap.com/) - * Copyright 2011-2019 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */ -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery"),require("popper.js")):"function"==typeof define&&define.amd?define(["exports","jquery","popper.js"],e):e((t=t||self).bootstrap={},t.jQuery,t.Popper)}(this,function(t,g,u){"use strict";function i(t,e){for(var n=0;nthis._items.length-1||t<0))if(this._isSliding)g(this._element).one(Q.SLID,function(){return e.to(t)});else{if(n===t)return this.pause(),void this.cycle();var i=ndocument.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},t._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},t._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=t.left+t.right
',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",sanitize:!0,sanitizeFn:null,whiteList:Ee},je="show",He="out",Re={HIDE:"hide"+De,HIDDEN:"hidden"+De,SHOW:"show"+De,SHOWN:"shown"+De,INSERTED:"inserted"+De,CLICK:"click"+De,FOCUSIN:"focusin"+De,FOCUSOUT:"focusout"+De,MOUSEENTER:"mouseenter"+De,MOUSELEAVE:"mouseleave"+De},xe="fade",Fe="show",Ue=".tooltip-inner",We=".arrow",qe="hover",Me="focus",Ke="click",Qe="manual",Be=function(){function i(t,e){if("undefined"==typeof u)throw new TypeError("Bootstrap's tooltips require Popper.js (https://popper.js.org/)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var t=i.prototype;return t.enable=function(){this._isEnabled=!0},t.disable=function(){this._isEnabled=!1},t.toggleEnabled=function(){this._isEnabled=!this._isEnabled},t.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=g(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),g(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(g(this.getTipElement()).hasClass(Fe))return void this._leave(null,this);this._enter(null,this)}},t.dispose=function(){clearTimeout(this._timeout),g.removeData(this.element,this.constructor.DATA_KEY),g(this.element).off(this.constructor.EVENT_KEY),g(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&g(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},t.show=function(){var e=this;if("none"===g(this.element).css("display"))throw new Error("Please use show on visible elements");var t=g.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){g(this.element).trigger(t);var n=_.findShadowRoot(this.element),i=g.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(t.isDefaultPrevented()||!i)return;var o=this.getTipElement(),r=_.getUID(this.constructor.NAME);o.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&g(o).addClass(xe);var s="function"==typeof this.config.placement?this.config.placement.call(this,o,this.element):this.config.placement,a=this._getAttachment(s);this.addAttachmentClass(a);var l=this._getContainer();g(o).data(this.constructor.DATA_KEY,this),g.contains(this.element.ownerDocument.documentElement,this.tip)||g(o).appendTo(l),g(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new u(this.element,o,{placement:a,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:We},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){return e._handlePopperPlacementChange(t)}}),g(o).addClass(Fe),"ontouchstart"in document.documentElement&&g(document.body).children().on("mouseover",null,g.noop);var c=function(){e.config.animation&&e._fixTransition();var t=e._hoverState;e._hoverState=null,g(e.element).trigger(e.constructor.Event.SHOWN),t===He&&e._leave(null,e)};if(g(this.tip).hasClass(xe)){var h=_.getTransitionDurationFromElement(this.tip);g(this.tip).one(_.TRANSITION_END,c).emulateTransitionEnd(h)}else c()}},t.hide=function(t){var e=this,n=this.getTipElement(),i=g.Event(this.constructor.Event.HIDE),o=function(){e._hoverState!==je&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),g(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(g(this.element).trigger(i),!i.isDefaultPrevented()){if(g(n).removeClass(Fe),"ontouchstart"in document.documentElement&&g(document.body).children().off("mouseover",null,g.noop),this._activeTrigger[Ke]=!1,this._activeTrigger[Me]=!1,this._activeTrigger[qe]=!1,g(this.tip).hasClass(xe)){var r=_.getTransitionDurationFromElement(n);g(n).one(_.TRANSITION_END,o).emulateTransitionEnd(r)}else o();this._hoverState=""}},t.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},t.isWithContent=function(){return Boolean(this.getTitle())},t.addAttachmentClass=function(t){g(this.getTipElement()).addClass(Ae+"-"+t)},t.getTipElement=function(){return this.tip=this.tip||g(this.config.template)[0],this.tip},t.setContent=function(){var t=this.getTipElement();this.setElementContent(g(t.querySelectorAll(Ue)),this.getTitle()),g(t).removeClass(xe+" "+Fe)},t.setElementContent=function(t,e){"object"!=typeof e||!e.nodeType&&!e.jquery?this.config.html?(this.config.sanitize&&(e=Se(e,this.config.whiteList,this.config.sanitizeFn)),t.html(e)):t.text(e):this.config.html?g(e).parent().is(t)||t.empty().append(e):t.text(g(e).text())},t.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},t._getOffset=function(){var e=this,t={};return"function"==typeof this.config.offset?t.fn=function(t){return t.offsets=l({},t.offsets,e.config.offset(t.offsets,e.element)||{}),t}:t.offset=this.config.offset,t},t._getContainer=function(){return!1===this.config.container?document.body:_.isElement(this.config.container)?g(this.config.container):g(document).find(this.config.container)},t._getAttachment=function(t){return Pe[t.toUpperCase()]},t._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(t){if("click"===t)g(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(t){return i.toggle(t)});else if(t!==Qe){var e=t===qe?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=t===qe?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;g(i.element).on(e,i.config.selector,function(t){return i._enter(t)}).on(n,i.config.selector,function(t){return i._leave(t)})}}),g(this.element).closest(".modal").on("hide.bs.modal",function(){i.element&&i.hide()}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},t._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},t._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||g(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),g(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Me:qe]=!0),g(e.getTipElement()).hasClass(Fe)||e._hoverState===je?e._hoverState=je:(clearTimeout(e._timeout),e._hoverState=je,e.config.delay&&e.config.delay.show?e._timeout=setTimeout(function(){e._hoverState===je&&e.show()},e.config.delay.show):e.show())},t._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||g(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),g(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Me:qe]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=He,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout(function(){e._hoverState===He&&e.hide()},e.config.delay.hide):e.hide())},t._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},t._getConfig=function(t){var e=g(this.element).data();return Object.keys(e).forEach(function(t){-1!==Oe.indexOf(t)&&delete e[t]}),"number"==typeof(t=l({},this.constructor.Default,e,"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),_.typeCheckConfig(be,t,this.constructor.DefaultType),t.sanitize&&(t.template=Se(t.template,t.whiteList,t.sanitizeFn)),t},t._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},t._cleanTipClass=function(){var t=g(this.getTipElement()),e=t.attr("class").match(Ne);null!==e&&e.length&&t.removeClass(e.join(""))},t._handlePopperPlacementChange=function(t){var e=t.instance;this.tip=e.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},t._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(g(t).removeClass(xe),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},i._jQueryInterface=function(n){return this.each(function(){var t=g(this).data(Ie),e="object"==typeof n&&n;if((t||!/dispose|hide/.test(n))&&(t||(t=new i(this,e),g(this).data(Ie,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return Le}},{key:"NAME",get:function(){return be}},{key:"DATA_KEY",get:function(){return Ie}},{key:"Event",get:function(){return Re}},{key:"EVENT_KEY",get:function(){return De}},{key:"DefaultType",get:function(){return ke}}]),i}();g.fn[be]=Be._jQueryInterface,g.fn[be].Constructor=Be,g.fn[be].noConflict=function(){return g.fn[be]=we,Be._jQueryInterface};var Ve="popover",Ye="bs.popover",ze="."+Ye,Xe=g.fn[Ve],$e="bs-popover",Ge=new RegExp("(^|\\s)"+$e+"\\S+","g"),Je=l({},Be.Default,{placement:"right",trigger:"click",content:"",template:''}),Ze=l({},Be.DefaultType,{content:"(string|element|function)"}),tn="fade",en="show",nn=".popover-header",on=".popover-body",rn={HIDE:"hide"+ze,HIDDEN:"hidden"+ze,SHOW:"show"+ze,SHOWN:"shown"+ze,INSERTED:"inserted"+ze,CLICK:"click"+ze,FOCUSIN:"focusin"+ze,FOCUSOUT:"focusout"+ze,MOUSEENTER:"mouseenter"+ze,MOUSELEAVE:"mouseleave"+ze},sn=function(t){var e,n;function i(){return t.apply(this,arguments)||this}n=t,(e=i).prototype=Object.create(n.prototype),(e.prototype.constructor=e).__proto__=n;var o=i.prototype;return o.isWithContent=function(){return this.getTitle()||this._getContent()},o.addAttachmentClass=function(t){g(this.getTipElement()).addClass($e+"-"+t)},o.getTipElement=function(){return this.tip=this.tip||g(this.config.template)[0],this.tip},o.setContent=function(){var t=g(this.getTipElement());this.setElementContent(t.find(nn),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(on),e),t.removeClass(tn+" "+en)},o._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},o._cleanTipClass=function(){var t=g(this.getTipElement()),e=t.attr("class").match(Ge);null!==e&&0=this._offsets[o]&&("undefined"==typeof this._offsets[o+1]||t 50); - backTop(); -}); -$(document).ready(function () { - equalHeight(); - backTop(); - - $("button.navbar-toggler").click(function (event) { - $("header").toggleClass("header-collapse"); - }); - $("header").toggleClass("scrolled", $(this).scrollTop() > 50); - - // scroll body to 0px on click - $("#back-to-top").click(function () { - $("body,html").animate( - { - scrollTop: 0, - }, - 400 - ); - $("#ONNXLogo").focus(); - return false; - }); - - $(document).click(function (event) { - var clickover = $(event.target); - var _opened = $(".navbar-collapse").hasClass("show"); - if (_opened === true && !clickover.hasClass("navbar-toggler")) { - $(".navbar-toggler").click(); - } - }); - - $("#listbox-5").focus(function () { - var top = $(".get-started-section").offset().top; - $(window).scrollTop(top); - }); - - $(document).keyup(function (e) { - if ($("#navbarNav").hasClass("show")) { - if (e.keyCode === 27) $("button.navbar-toggler").click(); // esc - } - }); - - $(".btn-getStarted").click(function () { - var tableTop = $("#getStartedTable").offset().top; - $("body,html").animate( - { - scrollTop: tableTop - 100, - }, - 600 - ); - - }); - $(document).on("click", ".resources-img", function(e) { - var data = $(this).attr('data-src'); - var iframe = '' - $('#myModal .modal-body').html(iframe); - $(".btn-modal").click(); - }); - $("#myModal").on('hide.bs.modal', function () { - $('#myModal .modal-body').children('iframe').attr('src', ''); - }); - $(document).on("focus", function (e) { - var docTop = $(this).offset().top; - $(window).scrollTop(docTop); - }); - - getStartedScroll(); - carouselNormalization(); - - $('.nav-tabs').responsiveTabs(); - - //populate blogs - $.getJSON("../js/blogs.json", function (json) { - - var template = document.querySelector('#blog-item'); - var blogList = document.querySelector('#blog-list'); - - json.blogs.forEach(function(blog) { - var clone = template.content.cloneNode(true); - clone.querySelector('[name="title"]').textContent = blog.title; - clone.querySelector('[name="date"]').textContent = blog.date; - clone.querySelector('[name="blurb"]').textContent = blog.blurb; - clone.querySelector('[name="link"]').href = blog.link; - blogList.appendChild(clone); - }); - - }); - -// //create local json and template fill for testing -// var template = document.querySelector('#blog-item'); -// var blogList = document.querySelector('#blog-list'); -// var blog = { -// title: "Test Title", -// date: "Test Date", -// blurb: "Test Blurb", -// link: "https://cloudblogs.microsoft.com/opensource/2023/01/25/improve-bert-inference-speed-by-combining-the-power-of-optimum-openvino-onnx-runtime-and-azure/" -// }; -// var clone = template.content.cloneNode(true); -// clone.querySelector('[name="title"]').textContent = blog.title; -// clone.querySelector('[name="date"]').textContent = blog.date; -// clone.querySelector('[name="blurb"]').textContent = blog.blurb; -// clone.querySelector('[name="link"]').href = blog.link; -// blogList.appendChild(clone); - -}); - -function getStartedScroll() { - var windowsHash = location.hash.split("#"); - var tabelId = $("#getStartedTable"); - if (tabelId.length) { - var tableTop = tabelId.offset().top; - } - if (windowsHash[1] === "getStartedTable") { - $("body,html").animate( - { - scrollTop: tableTop - 100, - }, - 600 - ); - } -} - -$(window).resize(function () { - equalHeight(); -}); - -function backTop() { - if ($(this).scrollTop() > 50) { - $("#back-to-top").fadeIn(); - } else { - $("#back-to-top").fadeOut(); - } -} - -function equalHeight() { - if (window.innerWidth > 767) { - var maxHeight = 0; - $(".equalHeight h2").height("auto"); - $(".equalHeight h2").each(function () { - if ($(this).height() > maxHeight) { - maxHeight = $(this).height(); - } - }); - $(".equalHeight h2").height(maxHeight); - - var maxHeight = 0; - $(".equalHeight p").height("auto"); - $(".equalHeight p").each(function () { - if ($(this).height() > maxHeight) { - maxHeight = $(this).height(); - } - }); - $(".equalHeight p").height(maxHeight); - - var maxHeight = 0; - $(".equalHeight-1 h3").height("auto"); - $(".equalHeight-1 h3").each(function () { - if ($(this).height() > maxHeight) { - maxHeight = $(this).height(); - } - }); - $(".equalHeight-1 h3").height(maxHeight); - - var maxHeight = 0; - $(".equalHeight-1 .onnx-model-content").height("auto"); - $(".equalHeight-1 .onnx-model-content").each(function () { - if ($(this).height() > maxHeight) { - maxHeight = $(this).height(); - } - }); - $(".equalHeight-1 .onnx-model-content").height(maxHeight); - - var maxHeight = 0; - $(".equalHeight-2 h3").height("auto"); - $(".equalHeight-2 h3").each(function () { - if ($(this).height() > maxHeight) { - maxHeight = $(this).height(); - } - }); - $(".equalHeight-2 h3").height(maxHeight); - - var maxHeight = 0; - $(".equalHeight-2 p.first-child").height("auto"); - $(".equalHeight-2 p.first-child").each(function () { - if ($(this).height() > maxHeight) { - maxHeight = $(this).height(); - } - }); - $(".equalHeight-2 p.first-child").height(maxHeight); - } else { - $(".equalHeight h2").height("auto"); - $(".equalHeight p").height("auto"); - $(".equalHeight-1 h3").height("auto"); - $(".equalHeight-1 .onnx-model-content").height("auto"); - $(".equalHeight-2 h3").height("auto"); - $(".equalHeight-2 p.first-child").height("auto"); - } -} - -function carouselNormalization() { - var items = $("#ONNXCarousel .item"), - heights = [], - tallest; - if (items.length) { - function normalizeHeights() { - items.each(function () { - heights.push($(this).height()); - }); - tallest = Math.max.apply(null, heights); - items.each(function () { - $(this).css("min-height", tallest + "px"); - }); - } - normalizeHeights(); - $(window).on("resize orientationchange", function () { - (tallest = 0), (heights.length = 0); - items.each(function () { - $(this).css("min-height", "0"); - }); - normalizeHeights(); - }); - } -} - -(function ($){ - $.fn.responsiveTabs = function() { - this.addClass('ddTabs'), - this.append($('')), - - this.on("click", "li > a.active, span.dropdown-arrow", function (){ - this.toggleClass('open'); - }.bind(this)), this.on("click", "li > a:not(.active)", function() { - this.removeClass("open") - }.bind(this)); - } -})(jQuery); - -let copyText = (e) => { - // console.log(e.target.previousSibling.innerHTML.replace( /(<([^>]+)>)/ig, '')) - text = e.target.previousSibling.innerText; - navigator.clipboard.writeText(text); - $(e.target).popover({ - content: "Copied!", - placement: "Left", - trigger: "manual" - }); - $(e.target).popover("show"); - - // Hide the popover after 2 seconds - setTimeout(() => { - $(e.target).popover("hide"); - }, 1500); -} diff --git a/js/script.js b/js/script.js deleted file mode 100644 index 9b50f13c17cb0..0000000000000 --- a/js/script.js +++ /dev/null @@ -1,1712 +0,0 @@ -var supportedOperatingSystems = new Map([ - ['linux', 'linux'], - ['mac', 'macos'], - ['win', 'windows'], - ['web', 'web'], -]); -var supportedOperatingSystemsNew = [ - {key: 'linux', value: 'linux'}, - {key: 'mac', value: 'macos'}, - {key: 'win', value: 'windows'}, - {key: 'web', value: 'web'} -] - -var opts = { - os: '', - architecture: '', - language: '', - hardwareAcceleration: '', -}; -var ot_opts = { - // os: getAnchorSelectedOS() || getDefaultSelectedOS(), - ot_scenario: '', - ot_os: '', - ot_architecture: 'ot_X64', - ot_language: '', - ot_hardwareAcceleration: '', - ot_build: '' -}; - -var os = $(".os > .r-option"); - -var architecture = $(".architecture > .r-option"); -var language = $(".language > .r-option"); -var hardwareAcceleration = $(".hardwareAcceleration > .r-option"); - -var ot_os = $(".ot_os > .r-option"); -var ot_tab = $('#OT_tab'); - -var ot_scenario = $(".ot_scenario > .r-option"); -var ot_architecture = $(".ot_architecture > .r-option"); -var ot_language = $(".ot_language > .r-option"); -var ot_hardwareAcceleration = $(".ot_hardwareAcceleration > .r-option"); -var ot_build = $(".ot_build > .r-option"); - -var supported = true; -var ot_defaultSelection = true; - -function checkKeyPress(event) { - var keycode = (event.keyCode ? event.keyCode : event.which); - if (keycode == '13' || keycode == '32' || (keycode >= '37' && keycode <= '40')) { - return true; - } else { - return false; - } -} - - - -os.on("click", function () { - selectedOption(os, this, "os"); - -}); -os.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - selectedOption(os, this, "os"); - } -}); -ot_os.on("click", function () { - ot_selectedOption(ot_os, this, "ot_os"); -}); -ot_os.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - ot_selectedOption(ot_os, this, "ot_os"); - } -}); -ot_tab.on("click", function() { - ot_commandMessage(ot_buildMatcher()); - ot_checkValidity(); -}); -ot_scenario.on("click", function () { - ot_selectedOption(ot_scenario, this, "ot_scenario"); -}); -ot_scenario.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - ot_selectedOption(ot_scenario, this, "ot_scenario"); - } -}); -ot_build.on("click", function () { - ot_selectedOption(ot_build, this, "ot_build"); -}); -ot_build.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - ot_selectedOption(ot_build, this, "ot_build"); - } -}); -architecture.on("click", function () { - selectedOption(architecture, this, "architecture"); -}); -architecture.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - selectedOption(architecture, this, "architecture"); - } -}); -ot_architecture.on("click", function () { - ot_selectedOption(ot_architecture, this, "ot_architecture"); -}); -ot_architecture.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - ot_selectedOption(ot_architecture, this, "ot_architecture"); - } -}); -language.on("click", function () { - selectedOption(language, this, "language"); -}); -language.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - selectedOption(language, this, "language"); - } -}); -ot_language.on("click", function () { - ot_selectedOption(ot_language, this, "ot_language"); -}); -ot_language.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - ot_selectedOption(ot_language, this, "ot_language"); - } -}); -hardwareAcceleration.on("click", function () { - selectedOption(hardwareAcceleration, this, "hardwareAcceleration"); -}); -hardwareAcceleration.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - selectedOption(hardwareAcceleration, this, "hardwareAcceleration"); - } -}); -ot_hardwareAcceleration.on("click", function () { - ot_selectedOption(ot_hardwareAcceleration, this, "ot_hardwareAcceleration"); -}); -ot_hardwareAcceleration.on("keypress keyup", function (event) { - if (checkKeyPress(event)) { - ot_selectedOption(ot_hardwareAcceleration, this, "ot_hardwareAcceleration"); - } -}); - - - -// Pre-select user's operating system -// $(document).ready(function () { -// var userOsOption = document.getElementById(opts.os); -// var ot_userOsOption = document.getElementById(ot_opts.ot_os); -// if (userOsOption) { -// selectedOption(os, userOsOption, "os"); - -// } -// if (ot_userOsOption) { -// ot_selectedOption(ot_os, ot_userOsOption, "ot_os"); -// } -// }); - - -// determine os (mac, linux, windows) based on user's platform -// function getDefaultSelectedOS() { -// var platform = navigator.platform.toLowerCase(); -// for (var idx = 0; idx < supportedOperatingSystemsNew.length; idx++ ) { -// if (platform.indexOf(supportedOperatingSystemsNew[idx].key) !== -1) { -// return supportedOperatingSystemsNew[idx].value; -// } -// } -// // Just return something if user platform is not in our supported map -// return supportedOperatingSystemsNew[0].value; -// } - -// determine os based on location hash -// function getAnchorSelectedOS() { -// var anchor = location.hash; -// var ANCHOR_REGEX = /^#[^ ]+$/; -// // Look for anchor in the href -// if (!ANCHOR_REGEX.test(anchor)) { -// return false; -// } -// // Look for anchor with OS in the first portion -// var testOS = anchor.slice(1).split("-")[0]; -// for (var idx = 0; idx < supportedOperatingSystemsNew.length; idx++ ) { -// if (testOS.indexOf(supportedOperatingSystemsNew[idx].key) !== -1) { -// return supportedOperatingSystemsNew[idx].value; -// } -// } -// return false; -// } - -function checkValidity(){ - var current_os = opts['os']; - var current_lang = opts['language']; - var current_arch = opts['architecture']; - var current_hw = opts['hardwareAcceleration']; - - var valid = Object.getOwnPropertyNames(validCombos); - - //os section - for(var i =0; i=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT/pypi/simple/ onnxruntime-training
pip install torch-ort
python -m torch_ort.configure", - - "ot_linux,ot_large_model,ot_python,ot_X64,ot_CUDA118,ot_nightly": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ onnxruntime-training
pip install torch-ort
python -m torch_ort.configure", - - "ot_linux,ot_large_model,ot_python,ot_X64,ot_ROCm,ot_stable": - "pip install onnxruntime-training -f https://download.onnxruntime.ai/onnxruntime_stable_<rocm_version*>.html
pip install torch-ort
python -m torch_ort.configure

*Available versions", - - "ot_linux,ot_large_model,ot_python,ot_X64,ot_ROCm,ot_nightly": - "pip install onnxruntime-training -f https://download.onnxruntime.ai/onnxruntime_nightly_<rocm_version*>.html
pip install torch-ort
python -m torch_ort.configure

*Available versions", - - "ot_linux,ot_on_device,ot_python,ot_X64,ot_CPU,ot_stable": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT/pypi/simple/ onnxruntime-training-cpu", - - "ot_linux,ot_on_device,ot_python,ot_X64,ot_CPU,ot_nightly": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ onnxruntime-training-cpu", - - "ot_linux,ot_on_device,ot_python,ot_X64,ot_CUDA118,ot_stable": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT/pypi/simple/ onnxruntime-training", - - "ot_linux,ot_on_device,ot_python,ot_X64,ot_CUDA118,ot_nightly": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ onnxruntime-training", - - "ot_linux,ot_on_device,ot_cplusplus,ot_X64,ot_CPU,ot_stable": - "Download .tgz file from Github
Refer to docs for requirements.", - - "ot_linux,ot_on_device,ot_csharp,ot_X64,ot_CPU,ot_stable": - "Install Nuget package Microsoft.ML.OnnxRuntime.Training", - - "ot_linux,ot_on_device,ot_c,ot_X64,ot_CUDA118,ot_stable": - "Follow build instructions from here", - - "ot_linux,ot_on_device,ot_cplusplus,ot_X64,ot_CUDA118,ot_stable": - "Follow build instructions from here", - - "ot_linux,ot_on_device,ot_csharp,ot_X64,ot_CUDA118,ot_stable": - "Follow build instructions from here", - - "ot_linux,ot_on_device,ot_c,ot_X64,ot_CPU,ot_stable": - "Download .tgz file from Github
Refer to docs for requirements.", - - "ot_windows,ot_on_device,ot_python,ot_X64,ot_CPU,ot_stable": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT/pypi/simple/ onnxruntime-training-cpu", - - "ot_windows,ot_on_device,ot_python,ot_X64,ot_CPU,ot_nightly": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ onnxruntime-training-cpu", - - "ot_windows,ot_on_device,ot_python,ot_X64,ot_CUDA118,ot_stable": - "Follow build instructions from here", - - "ot_windows,ot_on_device,ot_c,ot_X64,ot_CPU,ot_stable": - "Install Nuget package Microsoft.ML.OnnxRuntime.Training", - - "ot_windows,ot_on_device,ot_cplusplus,ot_X64,ot_CPU,ot_stable": - "Install Nuget package Microsoft.ML.OnnxRuntime.Training", - - "ot_windows,ot_on_device,ot_csharp,ot_X64,ot_CPU,ot_stable": - "Install Nuget package Microsoft.ML.OnnxRuntime.Training", - - "ot_windows,ot_on_device,ot_c,ot_X64,ot_CUDA118,ot_stable": - "Follow build instructions from here", - - "ot_windows,ot_on_device,ot_cplusplus,ot_X64,ot_CUDA118,ot_stable": - "Follow build instructions from here", - - "ot_windows,ot_on_device,ot_csharp,ot_X64,ot_CUDA118,ot_stable": - "Follow build instructions from here", - - "ot_android,ot_on_device,ot_c,ot_X64,ot_CPU,ot_stable": - "Follow installation instructions from here", - - "ot_android,ot_on_device,ot_cplusplus,ot_X64,ot_CPU,ot_stable": - "Follow installation instructions from here", - - "ot_android,ot_on_device,ot_java,ot_X64,ot_CPU,ot_stable": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-training-android using Maven/Gradle and refer to the instructions here.", - - "ot_android,ot_on_device,ot_c,ot_X64,ot_CPU,ot_nightly": - "Follow build instructions from here", - - "ot_android,ot_on_device,ot_cplusplus,ot_X64,ot_CPU,ot_nightly": - "Follow build instructions from here", - - "ot_android,ot_on_device,ot_java,ot_X64,ot_CPU,ot_nightly": - "Follow build instructions from here", - - "ot_mac,ot_on_device,ot_python,ot_X64,ot_CPU,ot_stable": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT/pypi/simple/ onnxruntime-training-cpu", - - "ot_mac,ot_on_device,ot_python,ot_X64,ot_CPU,ot_nightly": - "python -m pip install cerberus flatbuffers h5py numpy>=1.16.6 onnx packaging protobuf sympy setuptools>=41.4.0
pip install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ onnxruntime-training-cpu", - - "ot_ios,ot_on_device,ot_objc,ot_X64,ot_CPU,ot_stable": - "Add 'onnxruntime-training-objc' using CocoaPods and refer to the mobile deployment guide", - - "ot_ios,ot_on_device,ot_c,ot_X64,ot_CPU,ot_stable": - "Add 'onnxruntime-training-c' using CocoaPods and refer to the mobile deployment guide", - - "ot_ios,ot_on_device,ot_cplusplus,ot_X64,ot_CPU,ot_stable": - "Add 'onnxruntime-training-c' using CocoaPods and refer to the mobile deployment guide", - - "ot_ios,ot_on_device,ot_objc,ot_X64,ot_CPU,ot_nightly": - "Follow build instructions from here", - - "ot_ios,ot_on_device,ot_c,ot_X64,ot_CPU,ot_nightly": - "Follow build instructions from here", - - "ot_ios,ot_on_device,ot_cplusplus,ot_X64,ot_CPU,ot_nightly": - "Follow build instructions from here", -}; - -function ot_commandMessage(key) { - $("#ot_command").removeClass("valid"); - $("#ot_command").removeClass("invalid"); - - if(ot_opts['ot_os']=='' || ot_opts['ot_scenario'] == '' || ot_opts['ot_architecture'] == '' || ot_opts['ot_language']=='' || ot_opts['ot_hardwareAcceleration'] == '' || ot_opts['ot_build'] == ''){ - $("#ot_command span").html( - "Please select a combination of resources" - ) - } - else if (!ot_validCombos.hasOwnProperty(key)) { - $("#ot_command span").html( - "This combination is not supported. De-select to make another selection." - ) - $("#ot_command").addClass("invalid"); - return false; - } - else { - $("#ot_command span").html(ot_validCombos[key]); - $("#ot_command").addClass("valid"); - return true; - } - -} - -var validCombos = { - - "windows,C-API,X64,CUDA": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for requirements.", - - "windows,C++,X64,CUDA": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for requirements.", - - "windows,C#,X64,CUDA": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for requirements.", - - "windows,Python,X64,CUDA": - "pip install onnxruntime-gpu
Refer to docs for requirements.", - - "linux,Python,ARM64,CUDA": - "For Jetpack 4.4+, follow installation instructions from here", - - "linux,C-API,X64,CUDA": - "Download .tgz file from Github
Refer to docs for requirements.", - - "linux,C++,X64,CUDA": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for requirements.", - - "linux,C#,X64,CUDA": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for requirements.", - - "linux,Python,X64,CUDA": - "pip install onnxruntime-gpu
Refer to docs for requirements.", - - "linux,C-API,ARM32,DefaultCPU": - "Follow build instructions from here", - - "linux,C++,ARM32,DefaultCPU": - "Follow build instructions from here", - - "linux,Python,ARM32,DefaultCPU": - "Follow build instructions from here", - - "windows,C-API,X64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C-API,X86,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C-API,ARM32,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C++,ARM32,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C#,ARM32,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C-API,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C++,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C#,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C++,X64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C++,X86,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C#,X64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "windows,C#,X86,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "linux,C-API,X64,DefaultCPU": - "Download .tgz file from Github", - - "linux,C++,X64,DefaultCPU": - "Download .tgz file from Github", - - "linux,C#,X64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "mac,C-API,X64,DefaultCPU": - "Download .tgz file from Github", - - "mac,C++,X64,DefaultCPU": - "Download .tgz file from Github", - - "mac,C#,X64,DefaultCPU": - "Download .tgz file from Github", - - "mac,C#,X64,CoreML": - "Download .tgz file from Github", - - "windows,Python,X64,DefaultCPU": - "pip install onnxruntime", - - "mac,Python,X64,DefaultCPU": - "pip install onnxruntime", - - "linux,Python,X64,DefaultCPU": - "pip install onnxruntime", - - "linux,Python,ARM64,DefaultCPU": - "pip install onnxruntime", - - "windows,C-API,X64,DNNL": - "Follow build instructions from here", - - "windows,C++,X64,DNNL": - "Follow build instructions from here", - - "windows,C#,X64,DNNL": - "Follow build instructions from here", - - "windows,Python,X64,DNNL": - "Follow build instructions from here", - - "linux,C-API,X64,DNNL": - "Follow build instructions from here", - - "linux,C++,X64,DNNL": - "Follow build instructions from here", - - "linux,C#,X64,DNNL": - "Follow build instructions from here", - - "linux,Python,X64,DNNL": - "Follow build instructions from here", - - "linux,Python,X64,TVM": - "Follow build instructions from here", - - "linux,Python,X86,TVM": - "Follow build instructions from here", - - "linux,Python,ARM32,TVM": - "Follow build instructions from here", - - "linux,Python,ARM64,TVM": - "Follow build instructions from here", - - "windows,Python,X64,TVM": - "Follow build instructions from here", - - "windows,Python,X86,TVM": - "Follow build instructions from here", - - "windows,Python,ARM32,TVM": - "Follow build instructions from here", - - "windows,Python,ARM64,TVM": - "Follow build instructions from here", - - "linux,C-API,X64,OpenVINO": - "Follow build instructions from here", - - "linux,C++,X64,OpenVINO": - "Follow build instructions from here", - - "linux,C#,X64,OpenVINO": - "Follow build instructions from here", - - "linux,Python,X64,OpenVINO": - "pip install onnxruntime-openvino
Docker image also available.", - - "windows,C-API,X64,OpenVINO": - "Follow build instructions from here", - - "windows,C++,X64,OpenVINO": - "Follow build instructions from here", - - "windows,C#,X64,OpenVINO": - "Follow build instructions from here", - - "windows,Python,X64,OpenVINO": - "pip install onnxruntime-openvino", - - "windows,C-API,X64,TensorRT": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.", - - "windows,C++,X64,TensorRT": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.", - - "windows,C#,X64,TensorRT": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.", - - "windows,Python,X64,TensorRT": - "pip install onnxruntime-gpu
Refer to docs for requirements.", - - "linux,C-API,X64,TensorRT": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.", - - "linux,C++,X64,TensorRT": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.", - - "linux,C#,X64,TensorRT": - "Install Nuget package Microsoft.ML.OnnxRuntime.Gpu
Refer to docs for usage details.", - - "linux,Python,X64,TensorRT": - "pip install onnxruntime-gpu
Refer to docs for requirements.", - - "linux,C#,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "linux,Python,ARM64,TensorRT": - "pip install onnxruntime-gpu
Refer to
docs for requirements.", - - "windows,C-API,X86,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML
Refer to docs for requirements.", - - "windows,C++,X86,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,C#,X86,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,Python,X86,DirectML": - "Follow build instructions from here", - - "windows,C-API,X64,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,C++,X64,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,C#,X64,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,Python,X64,DirectML": - "pip install onnxruntime-directml", - - "windows,C-API,ARM64,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,C++,ARM64,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,C#,ARM64,DirectML": - "Install Nuget package Microsoft.ML.OnnxRuntime.DirectML", - - "windows,Python,ARM64,DirectML": - "Follow build instructions from here", - - "linux,Java,X64,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle", - - "linux,Java,X64,CUDA": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for requirements.", - - "mac,Java,X64,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle", - - //javascript - "linux,JS,X64,DefaultCPU": - "npm install onnxruntime-node", - - "mac,JS,X64,DefaultCPU": - "npm install onnxruntime-node", - - "windows,JS,X64,DefaultCPU": - "npm install onnxruntime-node", - - "web,JS,,": - "npm install onnxruntime-web", - - "android,JS,ARM64,DefaultCPU": - "npm install onnxruntime-react-native", - - "android,JS,X64,DefaultCPU": - "npm install onnxruntime-react-native", - - "android,JS,X86,DefaultCPU": - "npm install onnxruntime-react-native", - - "ios,JS,ARM64,DefaultCPU": - "npm install onnxruntime-react-native", - - "windows,WinRT,X86,DefaultCPU": - "Install Nuget package Microsoft.AI.MachineLearning", - - "windows,WinRT,X64,DefaultCPU": - "Install Nuget package Microsoft.AI.MachineLearning", - - "windows,WinRT,ARM64,DefaultCPU": - "Install Nuget package Microsoft.AI.MachineLearning", - - "windows,WinRT,ARM32,DefaultCPU": - "Install Nuget package Microsoft.AI.MachineLearning", - - "windows,WinRT,X86,DirectML": - "Install Nuget package Microsoft.AI.MachineLearning", - - "windows,WinRT,X64,DirectML": - "Install Nuget package Microsoft.AI.MachineLearning", - - "windows,WinRT,ARM64,DirectML": - "Install Nuget package Microsoft.AI.MachineLearning", - - "windows,Java,X64,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle", - - "windows,Java,X64,CUDA": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for requirements.", - - "windows,Java,X64,TensorRT": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for usage details.", - - "windows,Java,X64,DNNL": - "Follow build and API instructions", - - "windows,Java,X64,OpenVINO": - "Follow build and API instructions", - - "linux,Java,X64,TensorRT": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime_gpu using Maven/Gradle.
Refer to docs for usage details.", - - "linux,Java,X64,DNNL": - "Follow build and API instructions", - - "linux,Java,X64,OpenVINO": - "Follow build and API instructions", - - "android,C-API,ARM64,NNAPI": - "Follow build instructions from here", - - "android,C++,ARM64,NNAPI": - "Follow build instructions from here", - - "android,Java,ARM64,NNAPI": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,X86,NNAPI": - "Follow build instructions from here", - - "android,C++,X86,NNAPI": - "Follow build instructions from here", - - "android,C#,X86,NNAPI": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "android,Java,X64,NNAPI": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,X64,NNAPI": - "Follow build instructions from here", - - "android,C++,X64,NNAPI": - "Follow build instructions from here", - - "android,C#,X64,NNAPI": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "android,Java,X86,NNAPI": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,ARM32,NNAPI": - "Follow build instructions from here", - - "android,C++,ARM32,NNAPI": - "Follow build instructions from here", - - "android,C#,ARM32,NNAPI": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "android,Java,ARM32,NNAPI": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,ARM64,DefaultCPU": - "Follow build instructions from here", - - "android,C++,ARM64,DefaultCPU": - "Follow build instructions from here", - - "android,Java,ARM64,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,ARM32,DefaultCPU": - "Follow build instructions from here", - - "android,C++,ARM32,DefaultCPU": - "Follow build instructions from here", - - "android,C#,ARM32,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "android,Java,ARM32,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,X86,DefaultCPU": - "Follow build instructions from here", - - "android,C++,X86,DefaultCPU": - "Follow build instructions from here", - - "android,C#,X86,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "android,Java,X86,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,X64,DefaultCPU": - "Follow build instructions from here", - - "android,C++,X64,DefaultCPU": - "Follow build instructions from here", - - "android,C#,X64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "android,Java,X64,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android or com.microsoft.onnxruntime:onnxruntime-mobile using Maven/Gradle and refer to the mobile deployment guide", - - "android,C#,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "android,C#,ARM64,NNAPI": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "ios,C-API,ARM64,DefaultCPU": - "Add 'onnxruntime-c' or 'onnxruntime-mobile-c' using CocoaPods and refer to the mobile deployment guide", - - "ios,C++,ARM64,DefaultCPU": - "Add 'onnxruntime-c' or 'onnxruntime-mobile-c' using CocoaPods and refer to the mobile deployment guide", - - "ios,C-API,ARM64,CoreML": - "Add 'onnxruntime-c' or 'onnxruntime-mobile-c' using CocoaPods and refer to the mobile deployment guide", - - "ios,C++,ARM64,CoreML": - "Add 'onnxruntime-c' or 'onnxruntime-mobile-c' using CocoaPods and refer to the mobile deployment guide", - - "ios,objectivec,ARM64,DefaultCPU": - "Add 'onnxruntime-objc' or 'onnxruntime-mobile-objc' using CocoaPods and refer to the mobile deployment guide", - - "ios,objectivec,ARM64,CoreML": - "Add 'onnxruntime-objc' or 'onnxruntime-mobile-objc' using CocoaPods and refer to the mobile deployment guide", - - "ios,C#,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "ios,C#,ARM64,CoreML": - "Install Nuget package Microsoft.ML.OnnxRuntime.", - - "windows,Python,X64,VitisAI": - "Follow build instructions from here", - - "windows,C++,X64,VitisAI": - "Follow build instructions from here", - - "linux,C++,ARM64,VitisAI": - "Follow build instructions from here", - - "linux,Python,ARM64,VitisAI": - "Follow build instructions from here", - - "linux,Python,X64,MIGraphX": - "Follow build instructions from here", - - "linux,C-API,X64,MIGraphX": - "Follow build instructions from here", - - "linux,C++,X64,MIGraphX": - "Follow build instructions from here", - - "linux,Python,X64,ROCm": - "Follow build instructions from here", - - "linux,C-API,X64,ROCm": - "Follow build instructions from here", - - "linux,C++,X64,ROCm": - "Follow build instructions from here", - - "linux,Python,ARM64,ACL": - "Follow build instructions from here", - - "linux,C-API,ARM64,ACL": - "Follow build instructions from here", - - "linux,C++,ARM64,ACL": - "Follow build instructions from here", - - "linux,Python,ARM32,ACL": - "Follow build instructions from here", - - "linux,C-API,ARM32,ACL": - "Follow build instructions from here", - - "linux,C++,ARM32,ACL": - "Follow build instructions from here", - - "linux,Python,ARM64,ArmNN": - "Follow build instructions from here", - - "linux,C-API,ARM64,ArmNN": - "Follow build instructions from here", - - "linux,C++,ARM64,ArmNN": - "Follow build instructions from here", - - "linux,Python,ARM32,ArmNN": - "Follow build instructions from here", - - "linux,C-API,ARM32,ArmNN": - "Follow build instructions from here", - - "linux,C++,ARM32,ArmNN": - "Follow build instructions from here", - - "linux,Python,ARM64,RockchipNPU": - "Follow build instructions from here", - - "linux,C-API,ARM64,RockchipNPU": - "Follow build instructions from here", - - "linux,C++,ARM64,RockchipNPU": - "Follow build instructions from here", - - //mac m1 - "mac,C-API,ARM64,CoreML": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "mac,C#,ARM64,CoreML": - "Install Nuget package Microsoft.ML.OnnxRuntime
Refer to docs for requirements.", - - "mac,C++,ARM64,CoreML": - "Download .tgz file from Github", - - "mac,Java,ARM64,CoreML": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle", - - "mac,C-API,ARM64,CoreML": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "mac,Python,ARM64,DefaultCPU": - "pip install onnxruntime", - - "mac,Python,ARM64,DefaultCPU": - "pip install onnxruntime", - - "mac,Java,ARM64,DefaultCPU": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime using Maven/Gradle", - - "mac,C#,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "mac,C-API,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - "mac,C++,ARM64,DefaultCPU": - "Install Nuget package Microsoft.ML.OnnxRuntime", - - //power - "linux,C-API,Power,DefaultCPU": - "Follow build instructions from here", - - "linux,C++,Power,DefaultCPU": - "Follow build instructions from here", - - "linux,Python,Power,DefaultCPU": - "pip install onnxruntime-powerpc64le", - - //QNN - "windows,C-API,ARM64,QNN": - "View installation instructions here", - - "windows,C++,ARM64,QNN": - "View installation instructions here", - - "windows,C#,ARM64,QNN": - "View installation instructions here", - - "linux,C-API,ARM64,QNN": - "Follow build instructions from here", - - "linux,C++,ARM64,QNN": - "Follow build instructions from here", - - "android,C-API,ARM64,QNN": - "Follow build instructions from here", - - "android,C++,ARM64,QNN": - "Follow build instructions from here", - - //Xnnpack - "ios,C-API,ARM64,XNNPACK": - "Add 'onnxruntime-c' using CocoaPods and refer to the mobile deployment guide or Follow build instructions from here", - - "ios,objectivec,ARM64,XNNPACK": - "Add 'onnxruntime-objc' using CocoaPods and refer to the mobile deployment guide", - - "android,C-API,ARM64,XNNPACK": - "Follow build instructions from here", - - "android,C++,ARM64,XNNPACK": - "Follow build instructions from here", - - "android,Java,ARM64,XNNPACK": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android using Maven/Gradle and refer to the mobile deployment guide", - - "android,C-API,ARM32,XNNPACK": - "Follow build instructions from here", - - "android,C++,ARM32,XNNPACK": - "Follow build instructions from here", - - "android,Java,ARM32,XNNPACK": - "Add a dependency on com.microsoft.onnxruntime:onnxruntime-android using Maven/Gradle and refer to the mobile deployment guide", - - - "windows,C-API,X86,XNNPACK": - "Follow build instructions from here", - - "windows,C++,X86,XNNPACK": - "Follow build instructions from here", - - "linux,C-API,X86,XNNPACK": - "Follow build instructions from here", - - "linux,C++,X86,XNNPACK": - "Follow build instructions from here", - - "linux,Python,ARM64,CANN": - "pip install onnxruntime-cann
Refer to docs for requirements.", - - "linux,C-API,ARM64,CANN": - "Follow build instructions from here.", - - "linux,C++,ARM64,CANN": - "Follow build instructions from here.", - - "linux,Python,X64,CANN": - "pip install onnxruntime-cann
Refer to docs for requirements.", - - "linux,C-API,X64,CANN": - "Follow build instructions from here.", - - "linux,C++,X64,CANN": - "Follow build instructions from here.", - - "windows,Python,X64,Azure": - "Follow build instructions from here", - - "linux,Python,X64,Azure": - "Follow build instructions from here", -}; - -function commandMessage(key) { - - $("#command").removeClass("valid"); - $("#command").removeClass("invalid"); - - if(opts['os']=='web' && opts['language']=='JS' &&validCombos.hasOwnProperty(key)){ - $("#command span").html(validCombos[key]); - $("#command").addClass("valid"); - return true; - } - else if(opts['os']=='' || opts['architecture'] == '' || opts['language']=='' || opts['hardwareAcceleration'] == ''){ - - $("#command span").html( - "Please select a combination of resources" - ) - } - else if (!validCombos.hasOwnProperty(key)) { - $("#command span").html( - "This combination is not supported. De-select to make another selection." - ) - $("#command").addClass("invalid"); - return false; - } else { - $("#command span").html(validCombos[key]); - $("#command").addClass("valid"); - return true; - } -} - -//Accesibility Get started tabel -var KEYCODE = { - DOWN: 40, - LEFT: 37, - RIGHT: 39, - SPACE: 32, - UP: 38 -} -window.addEventListener('load', function () { - var radiobuttons = document.querySelectorAll('[role=option]'); - for (var i = 0; i < radiobuttons.length; i++) { - var rb = radiobuttons[i]; - rb.addEventListener('click', clickRadioGroup); - rb.addEventListener('keydown', keyDownRadioGroup); - rb.addEventListener('focus', focusRadioButton); - rb.addEventListener('blur', blurRadioButton); - } -}); - -function firstRadioButton(node) { - var first = node.parentNode.firstChild; - while (first) { - if (first.nodeType === Node.ELEMENT_NODE) { - if (first.getAttribute("role") === 'option') return first; - } - first = first.nextSibling; - } - return null; -} - -function lastRadioButton(node) { - var last = node.parentNode.lastChild; - while (last) { - if (last.nodeType === Node.ELEMENT_NODE) { - if (last.getAttribute("role") === 'option') return last; - } - last = last.previousSibling; - } - return last; -} - -function nextRadioButton(node) { - var next = node.nextSibling; - while (next) { - if (next.nodeType === Node.ELEMENT_NODE) { - if (next.getAttribute("role") === 'option') return next; - } - next = next.nextSibling; - } - return null; -} - -function previousRadioButton(node) { - var prev = node.previousSibling; - while (prev) { - if (prev.nodeType === Node.ELEMENT_NODE) { - if (prev.getAttribute("role") === 'option') return prev; - } - prev = prev.previousSibling; - } - return null; -} - -function getImage(node) { - var child = node.firstChild; - while (child) { - if (child.nodeType === Node.ELEMENT_NODE) { - if (child.tagName === 'IMG') return child; - } - child = child.nextSibling; - } - return null; -} - -function setRadioButton(node, state) { - - var image = getImage(node); - if (state == 'true') { - node.setAttribute('aria-selected', 'true'); - // $(node).trigger() - node.tabIndex = 0; - node.focus(); - } - else { - node.setAttribute('aria-selected', 'false'); - node.tabIndex = -1; - } -} - -function clickRadioGroup(event) { - - var type = event.type; - if (type === 'click') { - var node = event.currentTarget; - var radioButton = firstRadioButton(node); - while (radioButton) { - setRadioButton(radioButton, "false"); - radioButton = nextRadioButton(radioButton); - } - setRadioButton(node, "true"); - event.preventDefault(); - event.stopPropagation(); - } -} - -function keyDownRadioGroup(event) { - - var type = event.type; - var next = false; - if (type === "keydown") { - var node = event.currentTarget; - switch (event.keyCode) { - case KEYCODE.DOWN: - case KEYCODE.RIGHT: - var next = nextRadioButton(node); - if (!next) next = firstRadioButton(node); //if node is the last node, node cycles to first. - break; - case KEYCODE.UP: - case KEYCODE.LEFT: - next = previousRadioButton(node); - if (!next) next = lastRadioButton(node); //if node is the last node, node cycles to first. - break; - case KEYCODE.SPACE: - next = node; - break; - } - if (next) { - var radioButton = firstRadioButton(node); - while (radioButton) { - setRadioButton(radioButton, "false"); - radioButton = nextRadioButton(radioButton); - } - setRadioButton(next, "true"); - event.preventDefault(); - event.stopPropagation(); - } - } -} - -function focusRadioButton(event) { - event.currentTarget.className += ' focus'; - document.getElementById("command").innerHTML; -} - -function blurRadioButton(event) { - event.currentTarget.className = event.currentTarget.className.replace(' focus', ''); -} - - - $(document).ready(function () { - $(".tbl_tablist li[role='tab']").click(function () { - $(".tbl_tablist li[role='tab']:not(this)").attr("aria-selected", "false"); - $(this).attr("aria-selected", "true"); - var tabpanid = $(this).attr("aria-controls"); - var tabpan = $("#" + tabpanid); - $("div[role='tabpanel']:not(tabpan)").attr("aria-hidden", "true"); - $("div[role='tabpanel']:not(tabpan)").addClass("hidden"); - - tabpan.removeClass("hidden"); - tabpan.attr("aria-hidden", "false"); - }); - - $(".tbl_tablist li[role='tab']").keydown(function (ev) { - if (ev.which == 13) { - $(this).click(); - } - }); - - //This adds keyboard function that pressing an arrow left or arrow right from the tabs toggel the tabs. - $(".tbl_tablist li[role='tab']").keydown(function (ev) { - if (ev.which == 39 || ev.which == 37) { - var selected = $(this).attr("aria-selected"); - if (selected == "true") { - $("li[aria-selected='false']").attr("aria-selected", "true").focus(); - $(this).attr("aria-selected", "false"); - - var tabpanid = $("li[aria-selected='true']").attr("aria-controls"); - var tabpan = $("#" + tabpanid); - $("div[role='tabpanel']:not(tabpan)").attr("aria-hidden", "true"); - $("div[role='tabpanel']:not(tabpan)").addClass("hidden"); - - tabpan.attr("aria-hidden", "false"); - tabpan.removeClass("hidden"); - } - } - }); - }); - - // Modal Extension - // =============================== - - $('.modal-dialog').attr( {'role' : 'document'}) - var modalhide = $.fn.modal.Constructor.prototype.hide - $.fn.modal.Constructor.prototype.hide = function(){ - modalhide.apply(this, arguments) - $(document).off('keydown.bs.modal') - } - - var modalfocus = $.fn.modal.Constructor.prototype.enforceFocus - $.fn.modal.Constructor.prototype.enforceFocus = function(){ - var $content = this.$element.find(".modal-content") - var focEls = $content.find(":tabbable") - , $lastEl = $(focEls[focEls.length-1]) - , $firstEl = $(focEls[0]) - $lastEl.on('keydown.bs.modal', $.proxy(function (ev) { - if(ev.keyCode === 9 && !(ev.shiftKey | ev.ctrlKey | ev.metaKey | ev.altKey)) { // TAB pressed - ev.preventDefault(); - $firstEl.focus(); - } - }, this)) - $firstEl.on('keydown.bs.modal', $.proxy(function (ev) { - if(ev.keyCode === 9 && ev.shiftKey) { // SHIFT-TAB pressed - ev.preventDefault(); - $lastEl.focus(); - } - }, this)) - modalfocus.apply(this, arguments) - } - - $(function() { - var tabs = $(".custom-tab"); - - // For each individual tab DIV, set class and aria role attributes, and hide it - $(tabs).find(".tab-content > div.tab-pane").attr({ - "class": "tabPanel", - "role": "tabpanel", - "aria-hidden": "true" - }).hide(); - - // Get the list of tab links - var tabsList = tabs.find("ul:first").attr({ - "role": "tablist" - }); - - // For each item in the tabs list... - $(tabsList).find("li > a").each( - function(a) { - var tab = $(this); - - // Create a unique id using the tab link's href - var tabId = "tab-" + tab.attr("href").slice(1); - - // Assign tab id, aria and tabindex attributes to the tab control, but do not remove the href - tab.attr({ - "id": tabId, - "role": "tab", - "aria-selected": "false", - // "tabindex": "-1" - }).parent().attr("role", "presentation"); - - // Assign aria attribute to the relevant tab panel - $(tabs).find(".tabPanel").eq(a).attr("aria-labelledby", tabId); - - // Set the click event for each tab link - tab.click( - function(e) { - // Prevent default click event - e.preventDefault(); - - // Change state of previously selected tabList item - $(tabsList).find("> li.active").removeClass("active").find("> a").attr({ - "aria-selected": "false", - // "tabindex": "-1" - }); - - // Hide previously selected tabPanel - $(tabs).find(".tabPanel:visible").attr("aria-hidden", "true").hide(); - - // Show newly selected tabPanel - $(tabs).find(".tabPanel").eq(tab.parent().index()).attr("aria-hidden", "false").show(); - - // Set state of newly selected tab list item - tab.attr({ - "aria-selected": "true", - "tabindex": "0" - }).parent().addClass("active"); - tab.focus(); - } - ); - } - ); - - // Set keydown events on tabList item for navigating tabs - $(tabsList).delegate("a", "keydown", - function(e) { - var tab = $(this); - switch (e.which) { - case 37: - //case 38: - if (tab.parent().prev().length != 0) { - tab.parent().prev().find("> a").click(); - } else { - $(tabsList).find("li:last > a").click(); - } - break; - case 39: - //case 40: - if (tab.parent().next().length != 0) { - tab.parent().next().find("> a").click(); - } else { - $(tabsList).find("li:first > a").click(); - } - break; - } - } - ); - - // Show the first tabPanel - $(tabs).find(".tabPanel:first").attr("aria-hidden", "false").show(); - - // Set state for the first tabsList li - $(tabsList).find("li:first").addClass("active").find(" > a").attr({ - "aria-selected": "true", - "tabindex": "0" - }); - }); diff --git a/js/w3.js b/js/w3.js deleted file mode 100644 index d6d0d9e0ec70b..0000000000000 --- a/js/w3.js +++ /dev/null @@ -1,400 +0,0 @@ -/* W3.JS 1.04 April 2019 by w3schools.com */ -"use strict"; -var w3 = {}; -w3.hide = function (sel) { - w3.hideElements(w3.getElements(sel)); -}; -w3.hideElements = function (elements) { - var i, l = elements.length; - for (i = 0; i < l; i++) { - w3.hideElement(elements[i]); - } -}; -w3.hideElement = function (element) { - w3.styleElement(element, "display", "none"); -}; -w3.show = function (sel, a) { - var elements = w3.getElements(sel); - if (a) {w3.hideElements(elements);} - w3.showElements(elements); -}; -w3.showElements = function (elements) { - var i, l = elements.length; - for (i = 0; i < l; i++) { - w3.showElement(elements[i]); - } -}; -w3.showElement = function (element) { - w3.styleElement(element, "display", "block"); -}; -w3.addStyle = function (sel, prop, val) { - w3.styleElements(w3.getElements(sel), prop, val); -}; -w3.styleElements = function (elements, prop, val) { - var i, l = elements.length; - for (i = 0; i < l; i++) { - w3.styleElement(elements[i], prop, val); - } -}; -w3.styleElement = function (element, prop, val) { - element.style.setProperty(prop, val); -}; -w3.toggleShow = function (sel) { - var i, x = w3.getElements(sel), l = x.length; - for (i = 0; i < l; i++) { - if (x[i].style.display == "none") { - w3.styleElement(x[i], "display", "block"); - } else { - w3.styleElement(x[i], "display", "none"); - } - } -}; -w3.addClass = function (sel, name) { - w3.addClassElements(w3.getElements(sel), name); -}; -w3.addClassElements = function (elements, name) { - var i, l = elements.length; - for (i = 0; i < l; i++) { - w3.addClassElement(elements[i], name); - } -}; -w3.addClassElement = function (element, name) { - var i, arr1, arr2; - arr1 = element.className.split(" "); - arr2 = name.split(" "); - for (i = 0; i < arr2.length; i++) { - if (arr1.indexOf(arr2[i]) == -1) {element.className += " " + arr2[i];} - } -}; -w3.removeClass = function (sel, name) { - w3.removeClassElements(w3.getElements(sel), name); -}; -w3.removeClassElements = function (elements, name) { - var i, l = elements.length, arr1, arr2, j; - for (i = 0; i < l; i++) { - w3.removeClassElement(elements[i], name); - } -}; -w3.removeClassElement = function (element, name) { - var i, arr1, arr2; - arr1 = element.className.split(" "); - arr2 = name.split(" "); - for (i = 0; i < arr2.length; i++) { - while (arr1.indexOf(arr2[i]) > -1) { - arr1.splice(arr1.indexOf(arr2[i]), 1); - } - } - element.className = arr1.join(" "); -}; -w3.toggleClass = function (sel, c1, c2) { - w3.toggleClassElements(w3.getElements(sel), c1, c2); -}; -w3.toggleClassElements = function (elements, c1, c2) { - var i, l = elements.length; - for (i = 0; i < l; i++) { - w3.toggleClassElement(elements[i], c1, c2); - } -}; -w3.toggleClassElement = function (element, c1, c2) { - var t1, t2, t1Arr, t2Arr, j, arr, allPresent; - t1 = (c1 || ""); - t2 = (c2 || ""); - t1Arr = t1.split(" "); - t2Arr = t2.split(" "); - arr = element.className.split(" "); - if (t2Arr.length == 0) { - allPresent = true; - for (j = 0; j < t1Arr.length; j++) { - if (arr.indexOf(t1Arr[j]) == -1) {allPresent = false;} - } - if (allPresent) { - w3.removeClassElement(element, t1); - } else { - w3.addClassElement(element, t1); - } - } else { - allPresent = true; - for (j = 0; j < t1Arr.length; j++) { - if (arr.indexOf(t1Arr[j]) == -1) {allPresent = false;} - } - if (allPresent) { - w3.removeClassElement(element, t1); - w3.addClassElement(element, t2); - } else { - w3.removeClassElement(element, t2); - w3.addClassElement(element, t1); - } - } -}; -w3.getElements = function (id) { - if (typeof id == "object") { - return [id]; - } else { - return document.querySelectorAll(id); - } -}; -w3.filterHTML = function(id, sel, filter) { - var a, b, c, i, ii, iii, hit; - a = w3.getElements(id); - for (i = 0; i < a.length; i++) { - b = a[i].querySelectorAll(sel); - for (ii = 0; ii < b.length; ii++) { - hit = 0; - if (b[ii].innerText.toUpperCase().indexOf(filter.toUpperCase()) > -1) { - hit = 1; - } - c = b[ii].getElementsByTagName("*"); - for (iii = 0; iii < c.length; iii++) { - if (c[iii].innerText.toUpperCase().indexOf(filter.toUpperCase()) > -1) { - hit = 1; - } - } - if (hit == 1) { - b[ii].style.display = ""; - } else { - b[ii].style.display = "none"; - } - } - } -}; -w3.sortHTML = function(id, sel, sortvalue) { - var a, b, i, ii, y, bytt, v1, v2, cc, j; - a = w3.getElements(id); - for (i = 0; i < a.length; i++) { - for (j = 0; j < 2; j++) { - cc = 0; - y = 1; - while (y == 1) { - y = 0; - b = a[i].querySelectorAll(sel); - for (ii = 0; ii < (b.length - 1); ii++) { - bytt = 0; - if (sortvalue) { - v1 = b[ii].querySelector(sortvalue).innerText; - v2 = b[ii + 1].querySelector(sortvalue).innerText; - } else { - v1 = b[ii].innerText; - v2 = b[ii + 1].innerText; - } - v1 = v1.toLowerCase(); - v2 = v2.toLowerCase(); - if ((j == 0 && (v1 > v2)) || (j == 1 && (v1 < v2))) { - bytt = 1; - break; - } - } - if (bytt == 1) { - b[ii].parentNode.insertBefore(b[ii + 1], b[ii]); - y = 1; - cc++; - } - } - if (cc > 0) {break;} - } - } -}; -w3.slideshow = function (sel, ms, func) { - var i, ss, x = w3.getElements(sel), l = x.length; - ss = {}; - ss.current = 1; - ss.x = x; - ss.ondisplaychange = func; - if (!isNaN(ms) || ms == 0) { - ss.milliseconds = ms; - } else { - ss.milliseconds = 1000; - } - ss.start = function() { - ss.display(ss.current) - if (ss.ondisplaychange) {ss.ondisplaychange();} - if (ss.milliseconds > 0) { - window.clearTimeout(ss.timeout); - ss.timeout = window.setTimeout(ss.next, ss.milliseconds); - } - }; - ss.next = function() { - ss.current += 1; - if (ss.current > ss.x.length) {ss.current = 1;} - ss.start(); - }; - ss.previous = function() { - ss.current -= 1; - if (ss.current < 1) {ss.current = ss.x.length;} - ss.start(); - }; - ss.display = function (n) { - w3.styleElements(ss.x, "display", "none"); - w3.styleElement(ss.x[n - 1], "display", "block"); - } - ss.start(); - return ss; -}; -w3.includeHTML = function(cb) { - var z, i, elmnt, file, xhttp; - z = document.getElementsByTagName("*"); - for (i = 0; i < z.length; i++) { - elmnt = z[i]; - file = elmnt.getAttribute("w3-include-html"); - if (file) { - xhttp = new XMLHttpRequest(); - xhttp.onreadystatechange = function() { - if (this.readyState == 4) { - if (this.status == 200) {elmnt.innerHTML = this.responseText;} - if (this.status == 404) {elmnt.innerHTML = "Page not found.";} - elmnt.removeAttribute("w3-include-html"); - w3.includeHTML(cb); - } - } - xhttp.open("GET", file, true); - xhttp.send(); - return; - } - } - if (cb) cb(); -}; -w3.getHttpData = function (file, func) { - w3.http(file, function () { - if (this.readyState == 4 && this.status == 200) { - func(this.responseText); - } - }); -}; -w3.getHttpObject = function (file, func) { - w3.http(file, function () { - if (this.readyState == 4 && this.status == 200) { - func(JSON.parse(this.responseText)); - } - }); -}; -w3.displayHttp = function (id, file) { - w3.http(file, function () { - if (this.readyState == 4 && this.status == 200) { - w3.displayObject(id, JSON.parse(this.responseText)); - } - }); -}; -w3.http = function (target, readyfunc, xml, method) { - var httpObj; - if (!method) {method = "GET"; } - if (window.XMLHttpRequest) { - httpObj = new XMLHttpRequest(); - } else if (window.ActiveXObject) { - httpObj = new ActiveXObject("Microsoft.XMLHTTP"); - } - if (httpObj) { - if (readyfunc) {httpObj.onreadystatechange = readyfunc;} - httpObj.open(method, target, true); - httpObj.send(xml); - } -}; -w3.getElementsByAttribute = function (x, att) { - var arr = [], arrCount = -1, i, l, y = x.getElementsByTagName("*"), z = att.toUpperCase(); - l = y.length; - for (i = -1; i < l; i += 1) { - if (i == -1) {y[i] = x;} - if (y[i].getAttribute(z) !== null) {arrCount += 1; arr[arrCount] = y[i];} - } - return arr; -}; -w3.dataObject = {}, -w3.displayObject = function (id, data) { - var htmlObj, htmlTemplate, html, arr = [], a, l, rowClone, x, j, i, ii, cc, repeat, repeatObj, repeatX = ""; - htmlObj = document.getElementById(id); - htmlTemplate = init_template(id, htmlObj); - html = htmlTemplate.cloneNode(true); - arr = w3.getElementsByAttribute(html, "w3-repeat"); - l = arr.length; - for (j = (l - 1); j >= 0; j -= 1) { - cc = arr[j].getAttribute("w3-repeat").split(" "); - if (cc.length == 1) { - repeat = cc[0]; - } else { - repeatX = cc[0]; - repeat = cc[2]; - } - arr[j].removeAttribute("w3-repeat"); - repeatObj = data[repeat]; - if (repeatObj && typeof repeatObj == "object" && repeatObj.length != "undefined") { - i = 0; - for (x in repeatObj) { - i += 1; - rowClone = arr[j]; - rowClone = w3_replace_curly(rowClone, "element", repeatX, repeatObj[x]); - a = rowClone.attributes; - for (ii = 0; ii < a.length; ii += 1) { - a[ii].value = w3_replace_curly(a[ii], "attribute", repeatX, repeatObj[x]).value; - } - (i === repeatObj.length) ? arr[j].parentNode.replaceChild(rowClone, arr[j]) : arr[j].parentNode.insertBefore(rowClone, arr[j]); - } - } else { - console.log("w3-repeat must be an array. " + repeat + " is not an array."); - continue; - } - } - html = w3_replace_curly(html, "element"); - htmlObj.parentNode.replaceChild(html, htmlObj); - function init_template(id, obj) { - var template; - template = obj.cloneNode(true); - if (w3.dataObject.hasOwnProperty(id)) {return w3.dataObject[id];} - w3.dataObject[id] = template; - return template; - } - function w3_replace_curly(elmnt, typ, repeatX, x) { - var value, rowClone, pos1, pos2, originalHTML, lookFor, lookForARR = [], i, cc, r; - rowClone = elmnt.cloneNode(true); - pos1 = 0; - while (pos1 > -1) { - originalHTML = (typ == "attribute") ? rowClone.value : rowClone.innerHTML; - pos1 = originalHTML.indexOf("{{", pos1); - if (pos1 === -1) {break;} - pos2 = originalHTML.indexOf("}}", pos1 + 1); - lookFor = originalHTML.substring(pos1 + 2, pos2); - lookForARR = lookFor.split("||"); - value = undefined; - for (i = 0; i < lookForARR.length; i += 1) { - lookForARR[i] = lookForARR[i].replace(/^\s+|\s+$/gm, ''); //trim - if (x) {value = x[lookForARR[i]];} - if (value == undefined && data) {value = data[lookForARR[i]];} - if (value == undefined) { - cc = lookForARR[i].split("."); - if (cc[0] == repeatX) {value = x[cc[1]]; } - } - if (value == undefined) { - if (lookForARR[i] == repeatX) {value = x;} - } - if (value == undefined) { - if (lookForARR[i].substr(0, 1) == '"') { - value = lookForARR[i].replace(/"/g, ""); - } else if (lookForARR[i].substr(0,1) == "'") { - value = lookForARR[i].replace(/'/g, ""); - } - } - if (value != undefined) {break;} - } - if (value != undefined) { - r = "{{" + lookFor + "}}"; - if (typ == "attribute") { - rowClone.value = rowClone.value.replace(r, value); - } else { - w3_replace_html(rowClone, r, value); - } - } - pos1 = pos1 + 1; - } - return rowClone; - } - function w3_replace_html(a, r, result) { - var b, l, i, a, x, j; - if (a.hasAttributes()) { - b = a.attributes; - l = b.length; - for (i = 0; i < l; i += 1) { - if (b[i].value.indexOf(r) > -1) {b[i].value = b[i].value.replace(r, result);} - } - } - x = a.getElementsByTagName("*"); - l = x.length; - a.innerHTML = a.innerHTML.replace(r, result); - } -}; \ No newline at end of file diff --git a/news.html b/news.html deleted file mode 100644 index 2d48f599f935a..0000000000000 --- a/news.html +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - - - - - - - - - - ONNX Runtime | Community - - - - - - - - - - - -

This page has moved to Blogs

- - -
- - - - - - - - - - - - - \ No newline at end of file diff --git a/olive.html b/olive.html deleted file mode 100644 index 3474643fbd2c1..0000000000000 --- a/olive.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - Olive - hardware-aware model optimization - - - - - - - - - diff --git a/onnx/converterteam.html b/onnx/converterteam.html deleted file mode 100644 index 61b1204b452ae..0000000000000 --- a/onnx/converterteam.html +++ /dev/null @@ -1,118 +0,0 @@ - - - - - - - - - - - - - - ONNX Converters Team - - - -
-
- ONNX Logo -

ONNX Converters Team

-
-

Welcome to the landing page for the ONNX Converters Team at Microsoft.
We hope your stay is - short and that you quickly get what you need!

-
-
-
-
-
-

Issue Submission

-
-

- Have an issue converting a PyTorch or TensorFlow model to ONNX model?
Submit an issue to get help ASAP. -

-
-
-
-

Microsoft Internal (Private)

-

If the issue contains information that cannot be disclosed publicly, and you're internal to Microsoft, file an issue internally:

- -
-
-

General Issue (Public)

-

Have an issue that can be publicly disclosed on GitHub? File the issue upstream:

- -
-
-

External Partner (Private)

-

If you're external to Microsoft and the issue contains information that cannot be disclosed publicly, use the following link to template an email, and send it to the below DRI:

- -
-
-
-
-
-
- © Microsoft. All rights reserved. -
- - - - \ No newline at end of file diff --git a/ort-at-microsoft.html b/ort-at-microsoft.html deleted file mode 100644 index b359591836c43..0000000000000 --- a/ort-at-microsoft.html +++ /dev/null @@ -1,126 +0,0 @@ - - - - - - - - - - - - - - - ONNX Runtime | ORT at Microsoft - - - - - - - - - - Skip to main content -
-
- - -
- - -
-
- -
-

ONNX Runtime usage at Microsoft

- ONNX Runtime powers Machine Learning inferencing for most of Microsoft's products and services, - providing high performance and deployment versatility to support a large range of device types - across cloud, mobile, and edge. -

- -
- - - - - - - - -
-
-
-
-
-
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/package.json b/package.json new file mode 100644 index 0000000000000..68d2392a3ac16 --- /dev/null +++ b/package.json @@ -0,0 +1,53 @@ +{ + "name": "website", + "version": "0.0.1", + "private": true, + "scripts": { + "dev": "vite dev", + "build": "vite build", + "preview": "vite preview", + "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json", + "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch", + "lint": "prettier --plugin-search-dir . --check . && eslint .", + "format": "prettier --plugin-search-dir . --write ./src", + "deploy": "npx gh-pages -d build -r git@github.com:MaanavD/onnxruntime.git", + "postbuild": "npx svelte-sitemap --domain https://onnxruntime.ai" + }, + "devDependencies": { + "@sveltejs/adapter-auto": "^2.0.0", + "@sveltejs/adapter-static": "^2.0.2", + "@sveltejs/kit": "^1.20.4", + "@types/animejs": "^3.1.7", + "@types/gtag.js": "^0.0.13", + "@types/jquery": "^3.5.20", + "@typescript-eslint/eslint-plugin": "^5.45.0", + "@typescript-eslint/parser": "^5.45.0", + "autoprefixer": "^10.4.14", + "daisyui": "^3.6.3", + "eslint": "^8.28.0", + "eslint-config-prettier": "^8.5.0", + "eslint-plugin-svelte": "^2.30.0", + "gh-pages": "^5.0.0", + "jquery": "^3.7.1", + "postcss": "^8.4.26", + "prettier": "^2.8.0", + "prettier-plugin-svelte": "^2.10.1", + "svelte": "^4.0.5", + "svelte-check": "^3.4.3", + "svelte-highlight": "^7.3.0", + "svelte-preprocess-import-assets": "^1.0.1", + "svelte-sitemap": "^2.6.0", + "tailwindcss": "^3.3.3", + "tslib": "^2.4.1", + "typescript": "^5.0.0", + "vite": "^4.4.2" + }, + "type": "module", + "dependencies": { + "animejs": "^3.2.1", + "saos": "^1.3.1", + "svelte-icons": "^2.1.0", + "sveltejs-adapter-ipfs": "^0.4.12", + "theme-change": "^2.5.0" + } +} diff --git a/postcss.config.js b/postcss.config.js new file mode 100644 index 0000000000000..0f7721681d725 --- /dev/null +++ b/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {} + } +}; diff --git a/pytorch.html b/pytorch.html deleted file mode 100644 index b73ca6d6c1342..0000000000000 --- a/pytorch.html +++ /dev/null @@ -1,236 +0,0 @@ - - - - - - - - - - - - - - - ONNX Runtime | PyTorch - - - - - - - - - - Skip to main content -
-
- - -
- - -
- -
-

PyTorch + ONNX Runtime = Production -

-
- -
-
-
-
-
-
-
-
- -
-
-
-

Deploy anywhere

-

Run PyTorch models on cloud, desktop, mobile, - IoT, and even in the browser -

-
-
-
-
-
-
-
- -
-
-
-

Boost performance

-

Accelerate PyTorch models to improve user - experience and reduce costs -

-
-
-
-
-
-
-
- -
-
-
-

Improve time to market

-

Used by Microsoft and many others for their - production PyTorch workloads -

-
-
-
-
-
- - -
- -
-
-
-
- -
-
-

Native support in PyTorch

-

PyTorch includes support for ONNX through the torch.onnx APIs to simplify exporting your PyTorch model to - the portable ONNX format. - The ONNX Runtime team maintains these exporter APIs to ensure a high level of - compatibility with PyTorch models. -

-

Get your PyTorch models ready for optimized deployment >> -

-
-
-
-
- -
-
-

Python not required

-

Training PyTorch models requires Python but that can be a significant obstacle to - deploying PyTorch models to many production environments, especially Android and - iOS mobile devices. - ONNX Runtime is designed for production and provides APIs in C/C++, C#, Java, - and Objective-C, helping create a bridge from your PyTorch training environment - to a successful PyTorch production deployment. -

-

See ONNX Runtime's many - Python-free APIs >>

-
-
-
-
- -
-
-

Lower latency, higher throughput

-

Better performance can help improve your user experience and lower your operating - costs. - A wide range of models from computer vision (ResNet, MobileNet, Inception, YOLO, - super resolution, etc) to speech and NLP (BERT, RoBERTa, GPT-2, T5, etc) can - benefit from ONNX Runtime's optimized performance. - The ONNX Runtime team regularly benchmarks and optimizes top models for - performance. - ONNX Runtime also integrates with top hardware accelerator libraries like - TensorRT and OpenVINO so you can get the best performance on the hardware - available while using the same common APIs across all your target platforms. -

-

Check the latest on performance enhancements >>

-
-
-
-
- -
-
-

Get innovations into production faster

-

Development agility is a key factor in overall costs. - ONNX Runtime was built on the experience of taking PyTorch models to production - in high scale services like Microsoft Office, Bing, and Azure. It used to take - weeks and months to take a model from R&D to production. - With ONNX Runtime, models can be ready to be deployed at scale in hours or days. -

-

See what ONNX Runtime users are saying - >>

-
-
-
-
-
-
-
-
- -
- - - - - - - - - - - - - \ No newline at end of file diff --git a/src/app.css b/src/app.css new file mode 100644 index 0000000000000..1e8234fb6d5bb --- /dev/null +++ b/src/app.css @@ -0,0 +1,14 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; +/* { + primary": "#b2b2b2", + "secondary": "#fcfcfc", + "accent": "#64c0fe", + "neutral": "#d1d1d1", + "base-100": "#212933", + "info": "#1d4ed8", + "success": "#818cf8", + "warning": "#facc15", + "error": "#ef4444", +} */ diff --git a/src/app.d.ts b/src/app.d.ts new file mode 100644 index 0000000000000..f59b884c51ed3 --- /dev/null +++ b/src/app.d.ts @@ -0,0 +1,12 @@ +// See https://kit.svelte.dev/docs/types#app +// for information about these interfaces +declare global { + namespace App { + // interface Error {} + // interface Locals {} + // interface PageData {} + // interface Platform {} + } +} + +export {}; diff --git a/src/app.html b/src/app.html new file mode 100644 index 0000000000000..94ce373d1071d --- /dev/null +++ b/src/app.html @@ -0,0 +1,12 @@ + + + + + + + %sveltekit.head% + + +
%sveltekit.body%
+ + diff --git a/src/images/ONNX-Dark.svelte b/src/images/ONNX-Dark.svelte new file mode 100644 index 0000000000000..1146f894ff920 --- /dev/null +++ b/src/images/ONNX-Dark.svelte @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/ONNX-Icon.svg b/src/images/ONNX-Icon.svg new file mode 100644 index 0000000000000..9c310c2ff2353 --- /dev/null +++ b/src/images/ONNX-Icon.svg @@ -0,0 +1,39 @@ + +ONNX Runtime Logo + + + + diff --git a/src/images/ONNX-Light.svelte b/src/images/ONNX-Light.svelte new file mode 100644 index 0000000000000..df088b3c71430 --- /dev/null +++ b/src/images/ONNX-Light.svelte @@ -0,0 +1,194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/StableDiffusion1.webp b/src/images/StableDiffusion1.webp new file mode 100644 index 0000000000000..ea69a75be1081 Binary files /dev/null and b/src/images/StableDiffusion1.webp differ diff --git a/src/images/StableDiffusion2.webp b/src/images/StableDiffusion2.webp new file mode 100644 index 0000000000000..9eae0ed9a2f84 Binary files /dev/null and b/src/images/StableDiffusion2.webp differ diff --git a/src/images/blogs/pytorch-on-the-edge-puppies.png b/src/images/blogs/pytorch-on-the-edge-puppies.png new file mode 100644 index 0000000000000..8ed4cd7c6be7f Binary files /dev/null and b/src/images/blogs/pytorch-on-the-edge-puppies.png differ diff --git a/src/images/blogs/pytorch-on-the-edge-speechrec.png b/src/images/blogs/pytorch-on-the-edge-speechrec.png new file mode 100644 index 0000000000000..201232f38af80 Binary files /dev/null and b/src/images/blogs/pytorch-on-the-edge-speechrec.png differ diff --git a/src/images/blogs/pytorch-on-the-edge-textgen.png b/src/images/blogs/pytorch-on-the-edge-textgen.png new file mode 100644 index 0000000000000..e5cefb1b4a2ba Binary files /dev/null and b/src/images/blogs/pytorch-on-the-edge-textgen.png differ diff --git a/src/images/blogs/pytorch-on-the-edge-training.png b/src/images/blogs/pytorch-on-the-edge-training.png new file mode 100644 index 0000000000000..2d04fa3b94a86 Binary files /dev/null and b/src/images/blogs/pytorch-on-the-edge-training.png differ diff --git a/src/images/blogs/pytorch-on-the-edge-with-ort.png b/src/images/blogs/pytorch-on-the-edge-with-ort.png new file mode 100644 index 0000000000000..69b7737c0089c Binary files /dev/null and b/src/images/blogs/pytorch-on-the-edge-with-ort.png differ diff --git a/src/images/foundationmodels.png b/src/images/foundationmodels.png new file mode 100644 index 0000000000000..1ecb48570a841 Binary files /dev/null and b/src/images/foundationmodels.png differ diff --git a/src/images/logos/ATLAS-logo.png b/src/images/logos/ATLAS-logo.png new file mode 100644 index 0000000000000..1b0b03e78300a Binary files /dev/null and b/src/images/logos/ATLAS-logo.png differ diff --git a/src/images/logos/GitHub-Logo.svg b/src/images/logos/GitHub-Logo.svg new file mode 100644 index 0000000000000..4b212aaf6f6f0 --- /dev/null +++ b/src/images/logos/GitHub-Logo.svg @@ -0,0 +1,3 @@ + + + diff --git a/src/images/logos/PeakSpeed_logo.png b/src/images/logos/PeakSpeed_logo.png new file mode 100644 index 0000000000000..6233171cb80a0 Binary files /dev/null and b/src/images/logos/PeakSpeed_logo.png differ diff --git a/src/images/logos/PyTorch_logo_black.svg b/src/images/logos/PyTorch_logo_black.svg new file mode 100644 index 0000000000000..27da4c9272c06 --- /dev/null +++ b/src/images/logos/PyTorch_logo_black.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/src/images/logos/Rockchip-logo.png b/src/images/logos/Rockchip-logo.png new file mode 100644 index 0000000000000..f5f1deff1c9c6 Binary files /dev/null and b/src/images/logos/Rockchip-logo.png differ diff --git a/src/images/logos/Windows_logo_and_wordmark.svg b/src/images/logos/Windows_logo_and_wordmark.svg new file mode 100644 index 0000000000000..128b87a28bb72 --- /dev/null +++ b/src/images/logos/Windows_logo_and_wordmark.svg @@ -0,0 +1,87 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/logos/adobe-logo.png b/src/images/logos/adobe-logo.png new file mode 100644 index 0000000000000..b04b9a72e0623 Binary files /dev/null and b/src/images/logos/adobe-logo.png differ diff --git a/src/images/logos/amd-logo.png b/src/images/logos/amd-logo.png new file mode 100644 index 0000000000000..aac8547e3a390 Binary files /dev/null and b/src/images/logos/amd-logo.png differ diff --git a/src/images/logos/antgroup-logo.png b/src/images/logos/antgroup-logo.png new file mode 100644 index 0000000000000..b6d3902f40423 Binary files /dev/null and b/src/images/logos/antgroup-logo.png differ diff --git a/src/images/logos/bazaarvoice-logo.png b/src/images/logos/bazaarvoice-logo.png new file mode 100644 index 0000000000000..9e4bd85a70c1d Binary files /dev/null and b/src/images/logos/bazaarvoice-logo.png differ diff --git a/src/images/logos/clearblade-logo.png b/src/images/logos/clearblade-logo.png new file mode 100644 index 0000000000000..b7344eb2e10c2 Binary files /dev/null and b/src/images/logos/clearblade-logo.png differ diff --git a/src/images/logos/deezer-logo.png b/src/images/logos/deezer-logo.png new file mode 100644 index 0000000000000..93c3ca05ae00c Binary files /dev/null and b/src/images/logos/deezer-logo.png differ diff --git a/src/images/logos/hf-logo-with-title.svg b/src/images/logos/hf-logo-with-title.svg new file mode 100644 index 0000000000000..7e2540845e1f8 --- /dev/null +++ b/src/images/logos/hf-logo-with-title.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/src/images/logos/huggingface-logo.png b/src/images/logos/huggingface-logo.png new file mode 100644 index 0000000000000..9b322328c5702 Binary files /dev/null and b/src/images/logos/huggingface-logo.png differ diff --git a/src/images/logos/hypefactors-logo.png b/src/images/logos/hypefactors-logo.png new file mode 100644 index 0000000000000..ee60d4b798a84 Binary files /dev/null and b/src/images/logos/hypefactors-logo.png differ diff --git a/src/images/logos/infarm-logo.png b/src/images/logos/infarm-logo.png new file mode 100644 index 0000000000000..265b7f13e19fc Binary files /dev/null and b/src/images/logos/infarm-logo.png differ diff --git a/src/images/logos/intel-logo.png b/src/images/logos/intel-logo.png new file mode 100644 index 0000000000000..9e41023ffef61 Binary files /dev/null and b/src/images/logos/intel-logo.png differ diff --git a/src/images/logos/intelligenza-etica-logo.png b/src/images/logos/intelligenza-etica-logo.png new file mode 100644 index 0000000000000..8a921d51ab4fd Binary files /dev/null and b/src/images/logos/intelligenza-etica-logo.png differ diff --git a/src/images/logos/navitaire-amadeus-logo.png b/src/images/logos/navitaire-amadeus-logo.png new file mode 100644 index 0000000000000..213731eb0833f Binary files /dev/null and b/src/images/logos/navitaire-amadeus-logo.png differ diff --git a/src/images/logos/nvidia.png b/src/images/logos/nvidia.png new file mode 100644 index 0000000000000..def63034fd493 Binary files /dev/null and b/src/images/logos/nvidia.png differ diff --git a/src/images/logos/opennlp-logo.png b/src/images/logos/opennlp-logo.png new file mode 100644 index 0000000000000..99777e1535cf4 Binary files /dev/null and b/src/images/logos/opennlp-logo.png differ diff --git a/src/images/logos/oracle-logo.png b/src/images/logos/oracle-logo.png new file mode 100644 index 0000000000000..61b4ade39e2af Binary files /dev/null and b/src/images/logos/oracle-logo.png differ diff --git a/src/images/logos/pieces-logo.png b/src/images/logos/pieces-logo.png new file mode 100644 index 0000000000000..f1cae4c514425 Binary files /dev/null and b/src/images/logos/pieces-logo.png differ diff --git a/src/images/logos/ptw-logo.png b/src/images/logos/ptw-logo.png new file mode 100644 index 0000000000000..af429ff3cee00 Binary files /dev/null and b/src/images/logos/ptw-logo.png differ diff --git a/src/images/logos/redis-logo.png b/src/images/logos/redis-logo.png new file mode 100644 index 0000000000000..a524d6c7ad011 Binary files /dev/null and b/src/images/logos/redis-logo.png differ diff --git a/src/images/logos/samtec-logo.png b/src/images/logos/samtec-logo.png new file mode 100644 index 0000000000000..25334879a2b77 Binary files /dev/null and b/src/images/logos/samtec-logo.png differ diff --git a/src/images/logos/sas-logo.png b/src/images/logos/sas-logo.png new file mode 100644 index 0000000000000..e61600aa707f2 Binary files /dev/null and b/src/images/logos/sas-logo.png differ diff --git a/src/images/logos/teradata-logo.png b/src/images/logos/teradata-logo.png new file mode 100644 index 0000000000000..f6de78d339927 Binary files /dev/null and b/src/images/logos/teradata-logo.png differ diff --git a/src/images/logos/topazlabs-logo.png b/src/images/logos/topazlabs-logo.png new file mode 100644 index 0000000000000..7a547015bd565 Binary files /dev/null and b/src/images/logos/topazlabs-logo.png differ diff --git a/src/images/logos/ue-logo.png b/src/images/logos/ue-logo.png new file mode 100644 index 0000000000000..6a28a03bc2160 Binary files /dev/null and b/src/images/logos/ue-logo.png differ diff --git a/src/images/logos/usda-logo.png b/src/images/logos/usda-logo.png new file mode 100644 index 0000000000000..dd6176a3490f4 Binary files /dev/null and b/src/images/logos/usda-logo.png differ diff --git a/src/images/logos/vespa-logo.png b/src/images/logos/vespa-logo.png new file mode 100644 index 0000000000000..9cdc6e0947275 Binary files /dev/null and b/src/images/logos/vespa-logo.png differ diff --git a/src/images/logos/visual-studio-logo.png b/src/images/logos/visual-studio-logo.png new file mode 100644 index 0000000000000..d1347e231bdaf Binary files /dev/null and b/src/images/logos/visual-studio-logo.png differ diff --git a/src/images/logos/writer-logo.png b/src/images/logos/writer-logo.png new file mode 100644 index 0000000000000..7f3dec3a4f91c Binary files /dev/null and b/src/images/logos/writer-logo.png differ diff --git a/src/images/logos/xilinx-logo.png b/src/images/logos/xilinx-logo.png new file mode 100644 index 0000000000000..95679857199c8 Binary files /dev/null and b/src/images/logos/xilinx-logo.png differ diff --git a/src/images/onnx-icon.svelte b/src/images/onnx-icon.svelte new file mode 100644 index 0000000000000..b6179bf934142 --- /dev/null +++ b/src/images/onnx-icon.svelte @@ -0,0 +1,51 @@ + + + + + + + diff --git a/src/images/prod-ready.png b/src/images/prod-ready.png new file mode 100644 index 0000000000000..f1a174a9e0ceb Binary files /dev/null and b/src/images/prod-ready.png differ diff --git a/src/images/undraw/image_HF1.svelte b/src/images/undraw/image_HF1.svelte new file mode 100644 index 0000000000000..17aaccdba6ab0 --- /dev/null +++ b/src/images/undraw/image_HF1.svelte @@ -0,0 +1,165 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_HF2.svelte b/src/images/undraw/image_HF2.svelte new file mode 100644 index 0000000000000..94d99dfa34208 --- /dev/null +++ b/src/images/undraw/image_HF2.svelte @@ -0,0 +1,263 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_HF3.svelte b/src/images/undraw/image_HF3.svelte new file mode 100644 index 0000000000000..ff194f8ec2e13 --- /dev/null +++ b/src/images/undraw/image_HF3.svelte @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_PT1.svelte b/src/images/undraw/image_PT1.svelte new file mode 100644 index 0000000000000..0e6f33336a463 --- /dev/null +++ b/src/images/undraw/image_PT1.svelte @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_PT2.svelte b/src/images/undraw/image_PT2.svelte new file mode 100644 index 0000000000000..58ccbd8d3edca --- /dev/null +++ b/src/images/undraw/image_PT2.svelte @@ -0,0 +1,51 @@ + + + + + + + + + diff --git a/src/images/undraw/image_PT3.svelte b/src/images/undraw/image_PT3.svelte new file mode 100644 index 0000000000000..63418f82c0d7b --- /dev/null +++ b/src/images/undraw/image_PT3.svelte @@ -0,0 +1,123 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_PT4.svelte b/src/images/undraw/image_PT4.svelte new file mode 100644 index 0000000000000..ac7f0f5a7cb67 --- /dev/null +++ b/src/images/undraw/image_PT4.svelte @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_PT5.svelte b/src/images/undraw/image_PT5.svelte new file mode 100644 index 0000000000000..2f3459451cc1e --- /dev/null +++ b/src/images/undraw/image_PT5.svelte @@ -0,0 +1,190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_blogs.svelte b/src/images/undraw/image_blogs.svelte new file mode 100644 index 0000000000000..2bc994866c967 --- /dev/null +++ b/src/images/undraw/image_blogs.svelte @@ -0,0 +1,142 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_inference1.svg b/src/images/undraw/image_inference1.svg new file mode 100644 index 0000000000000..b28eaad2fa401 --- /dev/null +++ b/src/images/undraw/image_inference1.svg @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_inference2.svelte b/src/images/undraw/image_inference2.svelte new file mode 100644 index 0000000000000..e10d4d9e99b24 --- /dev/null +++ b/src/images/undraw/image_inference2.svelte @@ -0,0 +1,292 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_largemodeltraining.svelte b/src/images/undraw/image_largemodeltraining.svelte new file mode 100644 index 0000000000000..e5fa5b3ecfee2 --- /dev/null +++ b/src/images/undraw/image_largemodeltraining.svelte @@ -0,0 +1,234 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_ondevtraining.svelte b/src/images/undraw/image_ondevtraining.svelte new file mode 100644 index 0000000000000..049569d2c4d50 --- /dev/null +++ b/src/images/undraw/image_ondevtraining.svelte @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_ortcrossplatform.svelte b/src/images/undraw/image_ortcrossplatform.svelte new file mode 100644 index 0000000000000..7e65f9cc3a2c2 --- /dev/null +++ b/src/images/undraw/image_ortcrossplatform.svelte @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_ortgenerativeai.svelte b/src/images/undraw/image_ortgenerativeai.svelte new file mode 100644 index 0000000000000..929f15d6b5f87 --- /dev/null +++ b/src/images/undraw/image_ortgenerativeai.svelte @@ -0,0 +1,156 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_ortmobile.svelte b/src/images/undraw/image_ortmobile.svelte new file mode 100644 index 0000000000000..d82a8f46049fa --- /dev/null +++ b/src/images/undraw/image_ortmobile.svelte @@ -0,0 +1,220 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_ortperf.svelte b/src/images/undraw/image_ortperf.svelte new file mode 100644 index 0000000000000..267e0201ea5bd --- /dev/null +++ b/src/images/undraw/image_ortperf.svelte @@ -0,0 +1,126 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_ortweb.svelte b/src/images/undraw/image_ortweb.svelte new file mode 100644 index 0000000000000..5dbcad39298f5 --- /dev/null +++ b/src/images/undraw/image_ortweb.svelte @@ -0,0 +1,292 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_testimonials.svelte b/src/images/undraw/image_testimonials.svelte new file mode 100644 index 0000000000000..9eb40a2bf5e59 --- /dev/null +++ b/src/images/undraw/image_testimonials.svelte @@ -0,0 +1,311 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_training1.svelte b/src/images/undraw/image_training1.svelte new file mode 100644 index 0000000000000..3f358e0f0da89 --- /dev/null +++ b/src/images/undraw/image_training1.svelte @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_training2.svelte b/src/images/undraw/image_training2.svelte new file mode 100644 index 0000000000000..bf8ae9d0ceaec --- /dev/null +++ b/src/images/undraw/image_training2.svelte @@ -0,0 +1,201 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/undraw/image_tutorials.svelte b/src/images/undraw/image_tutorials.svelte new file mode 100644 index 0000000000000..c057c3462eb4a --- /dev/null +++ b/src/images/undraw/image_tutorials.svelte @@ -0,0 +1,197 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/images/windowsdevkit.png b/src/images/windowsdevkit.png new file mode 100644 index 0000000000000..0ca344f600665 Binary files /dev/null and b/src/images/windowsdevkit.png differ diff --git a/src/lib/index.ts b/src/lib/index.ts new file mode 100644 index 0000000000000..856f2b6c38aec --- /dev/null +++ b/src/lib/index.ts @@ -0,0 +1 @@ +// place files you want to import through the `$lib` alias in this folder. diff --git a/src/routes/+error.svelte b/src/routes/+error.svelte new file mode 100644 index 0000000000000..982c411ac76aa --- /dev/null +++ b/src/routes/+error.svelte @@ -0,0 +1,19 @@ + + +
+
+

4

+
+ +
+

4

+
+

We're not sure how you got here, but you're probably lost!

+

If you'd like, here's a way to get home.

+
+ If you feel like something should be here, please + open an issue on GitHub. +
diff --git a/src/routes/+layout.js b/src/routes/+layout.js new file mode 100644 index 0000000000000..dc311703240e8 --- /dev/null +++ b/src/routes/+layout.js @@ -0,0 +1,8 @@ +export const prerender = true; +export const load = ({ url }) => { + const { pathname } = url; + + return { + pathname + }; +}; diff --git a/src/routes/+layout.svelte b/src/routes/+layout.svelte new file mode 100644 index 0000000000000..061fecc21f21a --- /dev/null +++ b/src/routes/+layout.svelte @@ -0,0 +1,57 @@ + + + + {@html oneLight} + ONNX Runtime | {data.pathname == '/' + ? 'Home' + : data.pathname.substring(1).charAt(0).toUpperCase() + data.pathname.substring(2)} + + + + + + + + + + + + + + + +
+ {#if !$page.url.pathname.startsWith('/blogs/')} +
+ {/if} + {#key data.pathname} +
+ +
+ {/key} + {#if !$page.url.pathname.startsWith('/blogs/')} +
+ {/if} +
diff --git a/src/routes/+page.svelte b/src/routes/+page.svelte new file mode 100644 index 0000000000000..d920af7a1f274 --- /dev/null +++ b/src/routes/+page.svelte @@ -0,0 +1,149 @@ + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + diff --git a/src/routes/Build2023/+page.svelte b/src/routes/Build2023/+page.svelte new file mode 100644 index 0000000000000..4faea6e81accf --- /dev/null +++ b/src/routes/Build2023/+page.svelte @@ -0,0 +1,6 @@ + diff --git a/src/routes/blogs/+page.svelte b/src/routes/blogs/+page.svelte new file mode 100644 index 0000000000000..53ee531831142 --- /dev/null +++ b/src/routes/blogs/+page.svelte @@ -0,0 +1,252 @@ + + + + + +
+
+

Blogs & Announcements

+ +
+
+

Featured posts

+
+ {#each featuredblog as blog} + + {/each} +
+
+
+

Recent posts

+
+ {#each blogs as blog, i} + + {/each} +
+
+
diff --git a/src/routes/blogs/blog-post-featured.svelte b/src/routes/blogs/blog-post-featured.svelte new file mode 100644 index 0000000000000..4483124cc0fa4 --- /dev/null +++ b/src/routes/blogs/blog-post-featured.svelte @@ -0,0 +1,39 @@ + + + diff --git a/src/routes/blogs/blog-post.svelte b/src/routes/blogs/blog-post.svelte new file mode 100644 index 0000000000000..a661253f59672 --- /dev/null +++ b/src/routes/blogs/blog-post.svelte @@ -0,0 +1,39 @@ + + + diff --git a/src/routes/blogs/pytorch-on-the-edge/+page.svelte b/src/routes/blogs/pytorch-on-the-edge/+page.svelte new file mode 100644 index 0000000000000..8e668555387ab --- /dev/null +++ b/src/routes/blogs/pytorch-on-the-edge/+page.svelte @@ -0,0 +1,475 @@ + + + + + +
+
+

Run PyTorch models on the edge

+

12TH OCTOBER, 2023

+
+
+

+ Most modern ML models are developed with PyTorch. The agility and flexibility that PyTorch + provides for creating and training models has made it the most popular deep learning + framework today. The typical workflow is to train these models in the cloud and run them + from the cloud as well. However, many scenarios are arising that make it more attractive - + or in some cases, required - to run locally on device. These include: +

+
    +
  • + Avoiding network round-trips to the cloud (for example in audio and video processing) +
  • +
  • Keeping user data on device (for privacy protection or regulatory requirements)
  • +
  • + High cost of cloud resources (especially when device capabilities are underutilized) +
  • +
  • Application requirements to operate without internet connectivity
  • +
+ + Diagram showing the PyTorch logo representing a PyTorch model, fanning out to icons for web, mobile and browser devices running ONNX Runtime + +

+ In this article, we'll demystify running PyTorch models on the edge. We define 'edge' as + anywhere that is outside of the cloud, ranging from large, well-resourced personal computers + to small footprint devices such as mobile phones. This has been a challenging task to + accomplish in the past, but new advances in model optimization and software like + ONNX Runtime + make it more feasible - even for new generative AI and large language models like Stable Diffusion, + Whisper, and Llama2. +

+ +

Considerations for PyTorch models on the edge

+ +

+ There are several factors to keep in mind when thinking about running a PyTorch model on the + edge: +

+
    +
  • + Size: modern models can be several gigabytes (hence the name Large + Language Models!). On the cloud, size is usually not a consideration until it becomes too + large to fit on a single GPU. At that point there are various well-known solutions for + running across multiple GPUs. For edge devices, we need to find models that can fit within + the constraints of the device. This sometimes requires a tradeoff with quality. Most + modern models come in several sizes (1 billion parameters, 13 billion parameters, 70 + billion parameters, etc) so you can select a variant that fits on your device. Techniques + such as quantization are usually applied to reduce the number of bits representing + parameters, further reducing the model size. The size of the application is also + constrained by the app stores, so bringing in gigabytes of libraries won't work on the + edge. +
  • +
  • + API for application integration: on the cloud, models are usually + packaged as Docker containers that expose an endpoint that is called by an application or + service. On edge devices, Docker containers may take up too many resources or may not even + be supported. By using an optimized engine, like ONNX Runtime, the dependency on Python + and Docker containers can be eliminated. ONNX Runtime also has APIs in many languages + including C, C++, C#, Rust, Java, JavaScript, Objective-C and Swift for making it easier + to integrate natively with the hosting application. +
  • +
  • + Performance: with large amounts of memory, no power restrictions, and + hefty compute capabilities, running non-optimized models on the cloud is possible. On edge + devices, these luxuries do not exist and optimization is crucial. For example, ONNX + Runtime optimizes memory allocations, fuses model operators, reduces kernel launch times, + minimizes tensor transfers between processing units, and applies tuned matrix math + algorithms. It's also able to make use of compilers and engines that are device-specific, + providing a common interface for your application while harnessing the best approach on + each device. +
  • +
  • + Maintainability: on the cloud, updating a model is as simple as deploying + a new container image and ramping up traffic. On the edge, you need to consider how you + will distribute model updates. Sometimes this involves publishing updates to an app store, + sometimes it might be possible to implement a data update mechanism within your app and + download new model files or maybe even deltas. There are many possible paths, so we won't + go into much depth on this topic in this article but it's an aspect to keep in mind as you + plan for production. +
  • +
  • + Hybrid: instead of cloud versus device, you can choose to utilize both. + There are several hybrid patterns that are used in production today by applications such + as Office. One pattern is to dynamically decide whether to run on the device or in the + cloud based on network conditions or input characteristics. Another pattern is to run part + of the model pipeline on the device and part on the cloud. This is especially useful with + modern model pipelines that have separate encoder and decoder stages. Using an engine like + ONNX Runtime that works on both cloud and device simplifies development. We'll discuss + hybrid scenarios in more detail in a forthcoming article. +
  • +
  • + Personalization: in many cases, the PyTorch model is simply being run on + the device. However, you may also have scenarios where you need to personalize the model + on the device without sending data to the cloud. Recommendation and content targeting are + example scenarios that can improve their quality by updating models based on activity on + the device. Fine tuning and training with PyTorch on the device may not feasible (due to + performance and size concerns) but using an engine like ONNX Runtime allows PyTorch models + to be updated and personalized locally. The same mechanism also enabled federated + learning, which can help mitigate user data exposure. +
  • +
+ +

Tools for PyTorch models on the edge

+ +

+ We mentioned ONNX Runtime several times above. ONNX Runtime is a compact, standards-based + engine that has deep integration with PyTorch. By using PyTorch's ONNX APIs, your PyTorch + models can run on a spectrum of edge devices with ONNX Runtime. +

+ +

+ The first step for running PyTorch models on the edge is to get them into a lightweight + format that doesn't require the PyTorch framework and its gigabytes of dependencies. PyTorch + has thought about this and includes an API that enables exactly this - torch.onnx. ONNX is an open standard that defines the operators that make + up models. The PyTorch ONNX APIs take the Pythonic PyTorch code and turn it into a functional + graph that captures the operators that are needed to run the model without Python. As with everything + in machine learning, there are some limitations to be aware of. Some PyTorch models cannot be + represented as a single graph - in this case you may need to output several graphs and stitch + them together in your own pipeline. +

+ +

+ The popular Hugging Face library also has APIs that build on top of this torch.onnx + functionality to export models to the ONNX format. Over 130,000 models are supported making it very likely that the model you care about is one of them. +

+ +

+ In this article, we'll show you several examples involving state-of-the-art PyTorch models + (like Whisper and Stable Diffusion) on popular devices (like Windows laptops, mobile phones, + and web browsers) via various languages (from C# to JavaScript to Swift). +

+ +

Examples of PyTorch models on the edge

+ +

Stable Diffusion on Windows

+ +

+ The Stable Diffusion pipeline consists of five PyTorch models that build an image from a + text description. The diffusion process iterates on random pixels until the output image + matches the description. +

+ +

+ To run on the edge, four of the models can be exported to ONNX format from HuggingFace. +

+ +

+ You don't have to export the fifth model, ClipTokenizer, as it is available in ONNX Runtime extensions, a library for pre and post processing PyTorch models. +

+ +

+ To run this pipeline of models as a .NET application, we build the pipeline code in C#. This + code can be run on CPU, GPU, or NPU, if they are available on your machine, using ONNX + Runtime's device-specific hardware accelerators. This is configured with the ExecutionProviderTarget below. +

+ +

+ This is the output of the model pipeline, running with 50 inference iterations: +

+ + Two golden retriever puppies playing in the grass + +

+ You can build the application and run it on Windows with the detailed steps shown in this tutorial. +

+ +

Text generation in the browser

+ +

+ Running a PyTorch model locally in the browser is not only possible but super simple with + the transformers.js library. Transformers.js uses ONNX Runtime Web as its backend. Many models are already converted + to ONNX and served by the tranformers.js CDN, making inference in the browser a matter of writing + a few lines of HTML: +

+ +

+ You can also embed the call to the transformers pipeline using vanilla JavaScript, or in a + web application, with React or Next.js, or write a browser extension. +

+ +

+ ONNX Runtime Web currently uses web assembly to execute the model on the CPU. This is fine + for many models but leveraging the GPU, if one exists on the device, can improve the user + experience. ONNX Runtime Web support for WebGPU is coming *very* soon and enables you to tap + into the GPU while use the same inference APIs. +

+ + Text generation in the browser using transformers.js. The prompt is Two golden retriever puppies are playing in the grass, and the response is playing in the grasslands. They are known for their playful nature and they have a playful face. + +

Speech recognition with Whisper on mobile

+ +

+ Whisper from OpenAI is a PyTorch speech recognition model. Whisper comes in a number of + different size variants - the smallest, Whisper Tiny, is suitable to run on mobile devices. + All components of the Whisper Tiny model (audio decoder, encoder, decoder, and text sequence + generation) can be composed and exported to a single ONNX model using the Olive framework. To run this model as part of a mobile application, you can use ONNX Runtime Mobile, which + supports Android, iOS, react-native, and MAUI/Xamarin. +

+ +

+ ONNX Runtime Mobile supports hardware acceleration via NNAPI (on Android), CoreML (on iOS), + and XNNPACK (both iOS and Android). +

+ +

+ The relevant snippet of a example Android mobile app that performs speech transcription on short samples of audio is shown below: +

+ +

You can record a short audio clip to transcribe.

+ + Screenshot of an Android app to perform speech recognition using ONNX Runtime, running a PyTorch Whisper model + +

Train a model to recognize your voice on mobile

+ +

+ ONNX Runtime can also take a pre-trained model and adapt it to new data. It can do this on + the edge - on mobile specifically where it is easy to record your voice, access your photos + and other personalized data. Importantly, your data does not leave the device during + training. +

+ +

+ For example, you can train a PyTorch model to recognize just your own voice on your mobile + phone, for authentication scenarios. +

+ +

+ The PyTorch model is obtained from HuggingFace in your development environment, and extra + layers are added to perform the speaker classification: +

+ +

+ The model and other components necessary for training (a loss function to measure the + quality of the model and an optimizer to instruct how the weights are adjusted during + training) are exported with ONNX Runtime Training: +

+ +

+ This set of artifacts is now ready to be loaded by the mobile app, shown here as iOS Swift + code. The app asks the user for samples of their voice and the model is trained with the + samples. +

+ +

+ Once the model is trained, you can run it to verify that a voice sample is you! +

+ + A screenshot of an iPhone app to perform speaker verification by recording a number of speech samples of the speaker + +

+ You can read the full Speaker Verification tutorial, and + build and run the application from source. +

+ +

Where to next?

+ +

+ In this article we've shown why you would run PyTorch models on the edge and what aspects to + consider. We also shared several examples with code that you can use for running + state-of-the-art PyTorch models on the edge with ONNX Runtime. We also showed how ONNX + Runtime was built for performance and cross-platform execution, making it the ideal way to + run PyTorch models on the edge. Have fun running PyTorch models on the edge with ONNX + Runtime! +

+ +

+ You may have noticed that we didn't include a Llama2 example even though ONNX Runtime is + optimized to run it. That's because the amazing Llama2 model deserves its own article, so + stay tuned for that! +

+
+
+
+